repo_name string | path string | copies string | size string | content string | license string |
|---|---|---|---|---|---|
rdnetto/linux-TF101 | drivers/message/i2o/memory.c | 13205 | 8272 | /*
* Functions to handle I2O memory
*
* Pulled from the inlines in i2o headers and uninlined
*
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
#include <linux/module.h>
#include <linux/i2o.h>
#include <linux/delay.h>
#include <linux/string.h>
#include <linux/slab.h>
#include "core.h"
/* Protects our 32/64bit mask switching */
static DEFINE_MUTEX(mem_lock);
/**
* i2o_sg_tablesize - Calculate the maximum number of elements in a SGL
* @c: I2O controller for which the calculation should be done
* @body_size: maximum body size used for message in 32-bit words.
*
* Return the maximum number of SG elements in a SG list.
*/
u16 i2o_sg_tablesize(struct i2o_controller *c, u16 body_size)
{
i2o_status_block *sb = c->status_block.virt;
u16 sg_count =
(sb->inbound_frame_size - sizeof(struct i2o_message) / 4) -
body_size;
if (c->pae_support) {
/*
* for 64-bit a SG attribute element must be added and each
* SG element needs 12 bytes instead of 8.
*/
sg_count -= 2;
sg_count /= 3;
} else
sg_count /= 2;
if (c->short_req && (sg_count > 8))
sg_count = 8;
return sg_count;
}
EXPORT_SYMBOL_GPL(i2o_sg_tablesize);
/**
* i2o_dma_map_single - Map pointer to controller and fill in I2O message.
* @c: I2O controller
* @ptr: pointer to the data which should be mapped
* @size: size of data in bytes
* @direction: DMA_TO_DEVICE / DMA_FROM_DEVICE
* @sg_ptr: pointer to the SG list inside the I2O message
*
* This function does all necessary DMA handling and also writes the I2O
* SGL elements into the I2O message. For details on DMA handling see also
* dma_map_single(). The pointer sg_ptr will only be set to the end of the
* SG list if the allocation was successful.
*
* Returns DMA address which must be checked for failures using
* dma_mapping_error().
*/
dma_addr_t i2o_dma_map_single(struct i2o_controller *c, void *ptr,
size_t size,
enum dma_data_direction direction,
u32 ** sg_ptr)
{
u32 sg_flags;
u32 *mptr = *sg_ptr;
dma_addr_t dma_addr;
switch (direction) {
case DMA_TO_DEVICE:
sg_flags = 0xd4000000;
break;
case DMA_FROM_DEVICE:
sg_flags = 0xd0000000;
break;
default:
return 0;
}
dma_addr = dma_map_single(&c->pdev->dev, ptr, size, direction);
if (!dma_mapping_error(&c->pdev->dev, dma_addr)) {
#ifdef CONFIG_I2O_EXT_ADAPTEC_DMA64
if ((sizeof(dma_addr_t) > 4) && c->pae_support) {
*mptr++ = cpu_to_le32(0x7C020002);
*mptr++ = cpu_to_le32(PAGE_SIZE);
}
#endif
*mptr++ = cpu_to_le32(sg_flags | size);
*mptr++ = cpu_to_le32(i2o_dma_low(dma_addr));
#ifdef CONFIG_I2O_EXT_ADAPTEC_DMA64
if ((sizeof(dma_addr_t) > 4) && c->pae_support)
*mptr++ = cpu_to_le32(i2o_dma_high(dma_addr));
#endif
*sg_ptr = mptr;
}
return dma_addr;
}
EXPORT_SYMBOL_GPL(i2o_dma_map_single);
/**
* i2o_dma_map_sg - Map a SG List to controller and fill in I2O message.
* @c: I2O controller
* @sg: SG list to be mapped
* @sg_count: number of elements in the SG list
* @direction: DMA_TO_DEVICE / DMA_FROM_DEVICE
* @sg_ptr: pointer to the SG list inside the I2O message
*
* This function does all necessary DMA handling and also writes the I2O
* SGL elements into the I2O message. For details on DMA handling see also
* dma_map_sg(). The pointer sg_ptr will only be set to the end of the SG
* list if the allocation was successful.
*
* Returns 0 on failure or 1 on success.
*/
int i2o_dma_map_sg(struct i2o_controller *c, struct scatterlist *sg,
int sg_count, enum dma_data_direction direction, u32 ** sg_ptr)
{
u32 sg_flags;
u32 *mptr = *sg_ptr;
switch (direction) {
case DMA_TO_DEVICE:
sg_flags = 0x14000000;
break;
case DMA_FROM_DEVICE:
sg_flags = 0x10000000;
break;
default:
return 0;
}
sg_count = dma_map_sg(&c->pdev->dev, sg, sg_count, direction);
if (!sg_count)
return 0;
#ifdef CONFIG_I2O_EXT_ADAPTEC_DMA64
if ((sizeof(dma_addr_t) > 4) && c->pae_support) {
*mptr++ = cpu_to_le32(0x7C020002);
*mptr++ = cpu_to_le32(PAGE_SIZE);
}
#endif
while (sg_count-- > 0) {
if (!sg_count)
sg_flags |= 0xC0000000;
*mptr++ = cpu_to_le32(sg_flags | sg_dma_len(sg));
*mptr++ = cpu_to_le32(i2o_dma_low(sg_dma_address(sg)));
#ifdef CONFIG_I2O_EXT_ADAPTEC_DMA64
if ((sizeof(dma_addr_t) > 4) && c->pae_support)
*mptr++ = cpu_to_le32(i2o_dma_high(sg_dma_address(sg)));
#endif
sg = sg_next(sg);
}
*sg_ptr = mptr;
return 1;
}
EXPORT_SYMBOL_GPL(i2o_dma_map_sg);
/**
* i2o_dma_alloc - Allocate DMA memory
* @dev: struct device pointer to the PCI device of the I2O controller
* @addr: i2o_dma struct which should get the DMA buffer
* @len: length of the new DMA memory
*
* Allocate a coherent DMA memory and write the pointers into addr.
*
* Returns 0 on success or -ENOMEM on failure.
*/
int i2o_dma_alloc(struct device *dev, struct i2o_dma *addr, size_t len)
{
struct pci_dev *pdev = to_pci_dev(dev);
int dma_64 = 0;
mutex_lock(&mem_lock);
if ((sizeof(dma_addr_t) > 4) && (pdev->dma_mask == DMA_BIT_MASK(64))) {
dma_64 = 1;
if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
mutex_unlock(&mem_lock);
return -ENOMEM;
}
}
addr->virt = dma_alloc_coherent(dev, len, &addr->phys, GFP_KERNEL);
if ((sizeof(dma_addr_t) > 4) && dma_64)
if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)))
printk(KERN_WARNING "i2o: unable to set 64-bit DMA");
mutex_unlock(&mem_lock);
if (!addr->virt)
return -ENOMEM;
memset(addr->virt, 0, len);
addr->len = len;
return 0;
}
EXPORT_SYMBOL_GPL(i2o_dma_alloc);
/**
* i2o_dma_free - Free DMA memory
* @dev: struct device pointer to the PCI device of the I2O controller
* @addr: i2o_dma struct which contains the DMA buffer
*
* Free a coherent DMA memory and set virtual address of addr to NULL.
*/
void i2o_dma_free(struct device *dev, struct i2o_dma *addr)
{
if (addr->virt) {
if (addr->phys)
dma_free_coherent(dev, addr->len, addr->virt,
addr->phys);
else
kfree(addr->virt);
addr->virt = NULL;
}
}
EXPORT_SYMBOL_GPL(i2o_dma_free);
/**
* i2o_dma_realloc - Realloc DMA memory
* @dev: struct device pointer to the PCI device of the I2O controller
* @addr: pointer to a i2o_dma struct DMA buffer
* @len: new length of memory
*
* If there was something allocated in the addr, free it first. If len > 0
* than try to allocate it and write the addresses back to the addr
* structure. If len == 0 set the virtual address to NULL.
*
* Returns the 0 on success or negative error code on failure.
*/
int i2o_dma_realloc(struct device *dev, struct i2o_dma *addr, size_t len)
{
i2o_dma_free(dev, addr);
if (len)
return i2o_dma_alloc(dev, addr, len);
return 0;
}
EXPORT_SYMBOL_GPL(i2o_dma_realloc);
/*
* i2o_pool_alloc - Allocate an slab cache and mempool
* @mempool: pointer to struct i2o_pool to write data into.
* @name: name which is used to identify cache
* @size: size of each object
* @min_nr: minimum number of objects
*
* First allocates a slab cache with name and size. Then allocates a
* mempool which uses the slab cache for allocation and freeing.
*
* Returns 0 on success or negative error code on failure.
*/
int i2o_pool_alloc(struct i2o_pool *pool, const char *name,
size_t size, int min_nr)
{
pool->name = kmalloc(strlen(name) + 1, GFP_KERNEL);
if (!pool->name)
goto exit;
strcpy(pool->name, name);
pool->slab =
kmem_cache_create(pool->name, size, 0, SLAB_HWCACHE_ALIGN, NULL);
if (!pool->slab)
goto free_name;
pool->mempool = mempool_create_slab_pool(min_nr, pool->slab);
if (!pool->mempool)
goto free_slab;
return 0;
free_slab:
kmem_cache_destroy(pool->slab);
free_name:
kfree(pool->name);
exit:
return -ENOMEM;
}
EXPORT_SYMBOL_GPL(i2o_pool_alloc);
/*
* i2o_pool_free - Free slab cache and mempool again
* @mempool: pointer to struct i2o_pool which should be freed
*
* Note that you have to return all objects to the mempool again before
* calling i2o_pool_free().
*/
void i2o_pool_free(struct i2o_pool *pool)
{
mempool_destroy(pool->mempool);
kmem_cache_destroy(pool->slab);
kfree(pool->name);
};
EXPORT_SYMBOL_GPL(i2o_pool_free);
| gpl-2.0 |
GeyerA/kernel_shamu | arch/x86/lib/mmx_32.c | 13461 | 8212 | /*
* MMX 3DNow! library helper functions
*
* To do:
* We can use MMX just for prefetch in IRQ's. This may be a win.
* (reported so on K6-III)
* We should use a better code neutral filler for the short jump
* leal ebx. [ebx] is apparently best for K6-2, but Cyrix ??
* We also want to clobber the filler register so we don't get any
* register forwarding stalls on the filler.
*
* Add *user handling. Checksums are not a win with MMX on any CPU
* tested so far for any MMX solution figured.
*
* 22/09/2000 - Arjan van de Ven
* Improved for non-egineering-sample Athlons
*
*/
#include <linux/hardirq.h>
#include <linux/string.h>
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/types.h>
#include <asm/i387.h>
#include <asm/asm.h>
void *_mmx_memcpy(void *to, const void *from, size_t len)
{
void *p;
int i;
if (unlikely(in_interrupt()))
return __memcpy(to, from, len);
p = to;
i = len >> 6; /* len/64 */
kernel_fpu_begin();
__asm__ __volatile__ (
"1: prefetch (%0)\n" /* This set is 28 bytes */
" prefetch 64(%0)\n"
" prefetch 128(%0)\n"
" prefetch 192(%0)\n"
" prefetch 256(%0)\n"
"2: \n"
".section .fixup, \"ax\"\n"
"3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
" jmp 2b\n"
".previous\n"
_ASM_EXTABLE(1b, 3b)
: : "r" (from));
for ( ; i > 5; i--) {
__asm__ __volatile__ (
"1: prefetch 320(%0)\n"
"2: movq (%0), %%mm0\n"
" movq 8(%0), %%mm1\n"
" movq 16(%0), %%mm2\n"
" movq 24(%0), %%mm3\n"
" movq %%mm0, (%1)\n"
" movq %%mm1, 8(%1)\n"
" movq %%mm2, 16(%1)\n"
" movq %%mm3, 24(%1)\n"
" movq 32(%0), %%mm0\n"
" movq 40(%0), %%mm1\n"
" movq 48(%0), %%mm2\n"
" movq 56(%0), %%mm3\n"
" movq %%mm0, 32(%1)\n"
" movq %%mm1, 40(%1)\n"
" movq %%mm2, 48(%1)\n"
" movq %%mm3, 56(%1)\n"
".section .fixup, \"ax\"\n"
"3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
" jmp 2b\n"
".previous\n"
_ASM_EXTABLE(1b, 3b)
: : "r" (from), "r" (to) : "memory");
from += 64;
to += 64;
}
for ( ; i > 0; i--) {
__asm__ __volatile__ (
" movq (%0), %%mm0\n"
" movq 8(%0), %%mm1\n"
" movq 16(%0), %%mm2\n"
" movq 24(%0), %%mm3\n"
" movq %%mm0, (%1)\n"
" movq %%mm1, 8(%1)\n"
" movq %%mm2, 16(%1)\n"
" movq %%mm3, 24(%1)\n"
" movq 32(%0), %%mm0\n"
" movq 40(%0), %%mm1\n"
" movq 48(%0), %%mm2\n"
" movq 56(%0), %%mm3\n"
" movq %%mm0, 32(%1)\n"
" movq %%mm1, 40(%1)\n"
" movq %%mm2, 48(%1)\n"
" movq %%mm3, 56(%1)\n"
: : "r" (from), "r" (to) : "memory");
from += 64;
to += 64;
}
/*
* Now do the tail of the block:
*/
__memcpy(to, from, len & 63);
kernel_fpu_end();
return p;
}
EXPORT_SYMBOL(_mmx_memcpy);
#ifdef CONFIG_MK7
/*
* The K7 has streaming cache bypass load/store. The Cyrix III, K6 and
* other MMX using processors do not.
*/
static void fast_clear_page(void *page)
{
int i;
kernel_fpu_begin();
__asm__ __volatile__ (
" pxor %%mm0, %%mm0\n" : :
);
for (i = 0; i < 4096/64; i++) {
__asm__ __volatile__ (
" movntq %%mm0, (%0)\n"
" movntq %%mm0, 8(%0)\n"
" movntq %%mm0, 16(%0)\n"
" movntq %%mm0, 24(%0)\n"
" movntq %%mm0, 32(%0)\n"
" movntq %%mm0, 40(%0)\n"
" movntq %%mm0, 48(%0)\n"
" movntq %%mm0, 56(%0)\n"
: : "r" (page) : "memory");
page += 64;
}
/*
* Since movntq is weakly-ordered, a "sfence" is needed to become
* ordered again:
*/
__asm__ __volatile__("sfence\n"::);
kernel_fpu_end();
}
static void fast_copy_page(void *to, void *from)
{
int i;
kernel_fpu_begin();
/*
* maybe the prefetch stuff can go before the expensive fnsave...
* but that is for later. -AV
*/
__asm__ __volatile__(
"1: prefetch (%0)\n"
" prefetch 64(%0)\n"
" prefetch 128(%0)\n"
" prefetch 192(%0)\n"
" prefetch 256(%0)\n"
"2: \n"
".section .fixup, \"ax\"\n"
"3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
" jmp 2b\n"
".previous\n"
_ASM_EXTABLE(1b, 3b) : : "r" (from));
for (i = 0; i < (4096-320)/64; i++) {
__asm__ __volatile__ (
"1: prefetch 320(%0)\n"
"2: movq (%0), %%mm0\n"
" movntq %%mm0, (%1)\n"
" movq 8(%0), %%mm1\n"
" movntq %%mm1, 8(%1)\n"
" movq 16(%0), %%mm2\n"
" movntq %%mm2, 16(%1)\n"
" movq 24(%0), %%mm3\n"
" movntq %%mm3, 24(%1)\n"
" movq 32(%0), %%mm4\n"
" movntq %%mm4, 32(%1)\n"
" movq 40(%0), %%mm5\n"
" movntq %%mm5, 40(%1)\n"
" movq 48(%0), %%mm6\n"
" movntq %%mm6, 48(%1)\n"
" movq 56(%0), %%mm7\n"
" movntq %%mm7, 56(%1)\n"
".section .fixup, \"ax\"\n"
"3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
" jmp 2b\n"
".previous\n"
_ASM_EXTABLE(1b, 3b) : : "r" (from), "r" (to) : "memory");
from += 64;
to += 64;
}
for (i = (4096-320)/64; i < 4096/64; i++) {
__asm__ __volatile__ (
"2: movq (%0), %%mm0\n"
" movntq %%mm0, (%1)\n"
" movq 8(%0), %%mm1\n"
" movntq %%mm1, 8(%1)\n"
" movq 16(%0), %%mm2\n"
" movntq %%mm2, 16(%1)\n"
" movq 24(%0), %%mm3\n"
" movntq %%mm3, 24(%1)\n"
" movq 32(%0), %%mm4\n"
" movntq %%mm4, 32(%1)\n"
" movq 40(%0), %%mm5\n"
" movntq %%mm5, 40(%1)\n"
" movq 48(%0), %%mm6\n"
" movntq %%mm6, 48(%1)\n"
" movq 56(%0), %%mm7\n"
" movntq %%mm7, 56(%1)\n"
: : "r" (from), "r" (to) : "memory");
from += 64;
to += 64;
}
/*
* Since movntq is weakly-ordered, a "sfence" is needed to become
* ordered again:
*/
__asm__ __volatile__("sfence \n"::);
kernel_fpu_end();
}
#else /* CONFIG_MK7 */
/*
* Generic MMX implementation without K7 specific streaming
*/
static void fast_clear_page(void *page)
{
int i;
kernel_fpu_begin();
__asm__ __volatile__ (
" pxor %%mm0, %%mm0\n" : :
);
for (i = 0; i < 4096/128; i++) {
__asm__ __volatile__ (
" movq %%mm0, (%0)\n"
" movq %%mm0, 8(%0)\n"
" movq %%mm0, 16(%0)\n"
" movq %%mm0, 24(%0)\n"
" movq %%mm0, 32(%0)\n"
" movq %%mm0, 40(%0)\n"
" movq %%mm0, 48(%0)\n"
" movq %%mm0, 56(%0)\n"
" movq %%mm0, 64(%0)\n"
" movq %%mm0, 72(%0)\n"
" movq %%mm0, 80(%0)\n"
" movq %%mm0, 88(%0)\n"
" movq %%mm0, 96(%0)\n"
" movq %%mm0, 104(%0)\n"
" movq %%mm0, 112(%0)\n"
" movq %%mm0, 120(%0)\n"
: : "r" (page) : "memory");
page += 128;
}
kernel_fpu_end();
}
static void fast_copy_page(void *to, void *from)
{
int i;
kernel_fpu_begin();
__asm__ __volatile__ (
"1: prefetch (%0)\n"
" prefetch 64(%0)\n"
" prefetch 128(%0)\n"
" prefetch 192(%0)\n"
" prefetch 256(%0)\n"
"2: \n"
".section .fixup, \"ax\"\n"
"3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
" jmp 2b\n"
".previous\n"
_ASM_EXTABLE(1b, 3b) : : "r" (from));
for (i = 0; i < 4096/64; i++) {
__asm__ __volatile__ (
"1: prefetch 320(%0)\n"
"2: movq (%0), %%mm0\n"
" movq 8(%0), %%mm1\n"
" movq 16(%0), %%mm2\n"
" movq 24(%0), %%mm3\n"
" movq %%mm0, (%1)\n"
" movq %%mm1, 8(%1)\n"
" movq %%mm2, 16(%1)\n"
" movq %%mm3, 24(%1)\n"
" movq 32(%0), %%mm0\n"
" movq 40(%0), %%mm1\n"
" movq 48(%0), %%mm2\n"
" movq 56(%0), %%mm3\n"
" movq %%mm0, 32(%1)\n"
" movq %%mm1, 40(%1)\n"
" movq %%mm2, 48(%1)\n"
" movq %%mm3, 56(%1)\n"
".section .fixup, \"ax\"\n"
"3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
" jmp 2b\n"
".previous\n"
_ASM_EXTABLE(1b, 3b)
: : "r" (from), "r" (to) : "memory");
from += 64;
to += 64;
}
kernel_fpu_end();
}
#endif /* !CONFIG_MK7 */
/*
* Favour MMX for page clear and copy:
*/
static void slow_zero_page(void *page)
{
int d0, d1;
__asm__ __volatile__(
"cld\n\t"
"rep ; stosl"
: "=&c" (d0), "=&D" (d1)
:"a" (0), "1" (page), "0" (1024)
:"memory");
}
void mmx_clear_page(void *page)
{
if (unlikely(in_interrupt()))
slow_zero_page(page);
else
fast_clear_page(page);
}
EXPORT_SYMBOL(mmx_clear_page);
static void slow_copy_page(void *to, void *from)
{
int d0, d1, d2;
__asm__ __volatile__(
"cld\n\t"
"rep ; movsl"
: "=&c" (d0), "=&D" (d1), "=&S" (d2)
: "0" (1024), "1" ((long) to), "2" ((long) from)
: "memory");
}
void mmx_copy_page(void *to, void *from)
{
if (unlikely(in_interrupt()))
slow_copy_page(to, from);
else
fast_copy_page(to, from);
}
EXPORT_SYMBOL(mmx_copy_page);
| gpl-2.0 |
psomas/lguest64 | drivers/net/vxge/vxge-traffic.c | 150 | 68194 | /******************************************************************************
* This software may be used and distributed according to the terms of
* the GNU General Public License (GPL), incorporated herein by reference.
* Drivers based on or derived from this code fall under the GPL and must
* retain the authorship, copyright and license notice. This file is not
* a complete program and may only be used when the entire operating
* system is licensed under the GPL.
* See the file COPYING in this distribution for more information.
*
* vxge-traffic.c: Driver for Exar Corp's X3100 Series 10GbE PCIe I/O
* Virtualized Server Adapter.
* Copyright(c) 2002-2010 Exar Corp.
******************************************************************************/
#include <linux/etherdevice.h>
#include "vxge-traffic.h"
#include "vxge-config.h"
#include "vxge-main.h"
/*
* vxge_hw_vpath_intr_enable - Enable vpath interrupts.
* @vp: Virtual Path handle.
*
* Enable vpath interrupts. The function is to be executed the last in
* vpath initialization sequence.
*
* See also: vxge_hw_vpath_intr_disable()
*/
enum vxge_hw_status vxge_hw_vpath_intr_enable(struct __vxge_hw_vpath_handle *vp)
{
u64 val64;
struct __vxge_hw_virtualpath *vpath;
struct vxge_hw_vpath_reg __iomem *vp_reg;
enum vxge_hw_status status = VXGE_HW_OK;
if (vp == NULL) {
status = VXGE_HW_ERR_INVALID_HANDLE;
goto exit;
}
vpath = vp->vpath;
if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
status = VXGE_HW_ERR_VPATH_NOT_OPEN;
goto exit;
}
vp_reg = vpath->vp_reg;
writeq(VXGE_HW_INTR_MASK_ALL, &vp_reg->kdfcctl_errors_reg);
__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
&vp_reg->general_errors_reg);
__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
&vp_reg->pci_config_errors_reg);
__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
&vp_reg->mrpcim_to_vpath_alarm_reg);
__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
&vp_reg->srpcim_to_vpath_alarm_reg);
__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
&vp_reg->vpath_ppif_int_status);
__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
&vp_reg->srpcim_msg_to_vpath_reg);
__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
&vp_reg->vpath_pcipif_int_status);
__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
&vp_reg->prc_alarm_reg);
__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
&vp_reg->wrdma_alarm_status);
__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
&vp_reg->asic_ntwk_vp_err_reg);
__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
&vp_reg->xgmac_vp_int_status);
val64 = readq(&vp_reg->vpath_general_int_status);
/* Mask unwanted interrupts */
__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
&vp_reg->vpath_pcipif_int_mask);
__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
&vp_reg->srpcim_msg_to_vpath_mask);
__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
&vp_reg->srpcim_to_vpath_alarm_mask);
__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
&vp_reg->mrpcim_to_vpath_alarm_mask);
__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
&vp_reg->pci_config_errors_mask);
/* Unmask the individual interrupts */
writeq((u32)vxge_bVALn((VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO1_OVRFLOW|
VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO2_OVRFLOW|
VXGE_HW_GENERAL_ERRORS_REG_STATSB_DROP_TIMEOUT_REQ|
VXGE_HW_GENERAL_ERRORS_REG_STATSB_PIF_CHAIN_ERR), 0, 32),
&vp_reg->general_errors_mask);
__vxge_hw_pio_mem_write32_upper(
(u32)vxge_bVALn((VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_OVRWR|
VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_OVRWR|
VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_POISON|
VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_POISON|
VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_DMA_ERR|
VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_DMA_ERR), 0, 32),
&vp_reg->kdfcctl_errors_mask);
__vxge_hw_pio_mem_write32_upper(0, &vp_reg->vpath_ppif_int_mask);
__vxge_hw_pio_mem_write32_upper(
(u32)vxge_bVALn(VXGE_HW_PRC_ALARM_REG_PRC_RING_BUMP, 0, 32),
&vp_reg->prc_alarm_mask);
__vxge_hw_pio_mem_write32_upper(0, &vp_reg->wrdma_alarm_mask);
__vxge_hw_pio_mem_write32_upper(0, &vp_reg->xgmac_vp_int_mask);
if (vpath->hldev->first_vp_id != vpath->vp_id)
__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
&vp_reg->asic_ntwk_vp_err_mask);
else
__vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn((
VXGE_HW_ASIC_NTWK_VP_ERR_REG_XMACJ_NTWK_REAFFIRMED_FAULT |
VXGE_HW_ASIC_NTWK_VP_ERR_REG_XMACJ_NTWK_REAFFIRMED_OK), 0, 32),
&vp_reg->asic_ntwk_vp_err_mask);
__vxge_hw_pio_mem_write32_upper(0,
&vp_reg->vpath_general_int_mask);
exit:
return status;
}
/*
* vxge_hw_vpath_intr_disable - Disable vpath interrupts.
* @vp: Virtual Path handle.
*
* Disable vpath interrupts. The function is to be executed the last in
* vpath initialization sequence.
*
* See also: vxge_hw_vpath_intr_enable()
*/
enum vxge_hw_status vxge_hw_vpath_intr_disable(
struct __vxge_hw_vpath_handle *vp)
{
u64 val64;
struct __vxge_hw_virtualpath *vpath;
enum vxge_hw_status status = VXGE_HW_OK;
struct vxge_hw_vpath_reg __iomem *vp_reg;
if (vp == NULL) {
status = VXGE_HW_ERR_INVALID_HANDLE;
goto exit;
}
vpath = vp->vpath;
if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
status = VXGE_HW_ERR_VPATH_NOT_OPEN;
goto exit;
}
vp_reg = vpath->vp_reg;
__vxge_hw_pio_mem_write32_upper(
(u32)VXGE_HW_INTR_MASK_ALL,
&vp_reg->vpath_general_int_mask);
val64 = VXGE_HW_TIM_CLR_INT_EN_VP(1 << (16 - vpath->vp_id));
writeq(VXGE_HW_INTR_MASK_ALL, &vp_reg->kdfcctl_errors_mask);
__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
&vp_reg->general_errors_mask);
__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
&vp_reg->pci_config_errors_mask);
__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
&vp_reg->mrpcim_to_vpath_alarm_mask);
__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
&vp_reg->srpcim_to_vpath_alarm_mask);
__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
&vp_reg->vpath_ppif_int_mask);
__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
&vp_reg->srpcim_msg_to_vpath_mask);
__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
&vp_reg->vpath_pcipif_int_mask);
__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
&vp_reg->wrdma_alarm_mask);
__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
&vp_reg->prc_alarm_mask);
__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
&vp_reg->xgmac_vp_int_mask);
__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
&vp_reg->asic_ntwk_vp_err_mask);
exit:
return status;
}
void vxge_hw_vpath_tti_ci_set(struct __vxge_hw_fifo *fifo)
{
struct vxge_hw_vpath_reg __iomem *vp_reg;
struct vxge_hw_vp_config *config;
u64 val64;
if (fifo->config->enable != VXGE_HW_FIFO_ENABLE)
return;
vp_reg = fifo->vp_reg;
config = container_of(fifo->config, struct vxge_hw_vp_config, fifo);
if (config->tti.timer_ci_en != VXGE_HW_TIM_TIMER_CI_ENABLE) {
config->tti.timer_ci_en = VXGE_HW_TIM_TIMER_CI_ENABLE;
val64 = readq(&vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
fifo->tim_tti_cfg1_saved = val64;
writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
}
}
void vxge_hw_vpath_dynamic_rti_ci_set(struct __vxge_hw_ring *ring)
{
u64 val64 = ring->tim_rti_cfg1_saved;
val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
ring->tim_rti_cfg1_saved = val64;
writeq(val64, &ring->vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_RX]);
}
void vxge_hw_vpath_dynamic_tti_rtimer_set(struct __vxge_hw_fifo *fifo)
{
u64 val64 = fifo->tim_tti_cfg3_saved;
u64 timer = (fifo->rtimer * 1000) / 272;
val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(0x3ffffff);
if (timer)
val64 |= VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(timer) |
VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_EVENT_SF(5);
writeq(val64, &fifo->vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_TX]);
/* tti_cfg3_saved is not updated again because it is
* initialized at one place only - init time.
*/
}
void vxge_hw_vpath_dynamic_rti_rtimer_set(struct __vxge_hw_ring *ring)
{
u64 val64 = ring->tim_rti_cfg3_saved;
u64 timer = (ring->rtimer * 1000) / 272;
val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(0x3ffffff);
if (timer)
val64 |= VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(timer) |
VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_EVENT_SF(4);
writeq(val64, &ring->vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_RX]);
/* rti_cfg3_saved is not updated again because it is
* initialized at one place only - init time.
*/
}
/**
* vxge_hw_channel_msix_mask - Mask MSIX Vector.
* @channeh: Channel for rx or tx handle
* @msix_id: MSIX ID
*
* The function masks the msix interrupt for the given msix_id
*
* Returns: 0
*/
void vxge_hw_channel_msix_mask(struct __vxge_hw_channel *channel, int msix_id)
{
__vxge_hw_pio_mem_write32_upper(
(u32)vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
&channel->common_reg->set_msix_mask_vect[msix_id%4]);
}
/**
* vxge_hw_channel_msix_unmask - Unmask the MSIX Vector.
* @channeh: Channel for rx or tx handle
* @msix_id: MSI ID
*
* The function unmasks the msix interrupt for the given msix_id
*
* Returns: 0
*/
void
vxge_hw_channel_msix_unmask(struct __vxge_hw_channel *channel, int msix_id)
{
__vxge_hw_pio_mem_write32_upper(
(u32)vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
&channel->common_reg->clear_msix_mask_vect[msix_id%4]);
}
/**
* vxge_hw_channel_msix_clear - Unmask the MSIX Vector.
* @channel: Channel for rx or tx handle
* @msix_id: MSI ID
*
* The function unmasks the msix interrupt for the given msix_id
* if configured in MSIX oneshot mode
*
* Returns: 0
*/
void vxge_hw_channel_msix_clear(struct __vxge_hw_channel *channel, int msix_id)
{
__vxge_hw_pio_mem_write32_upper(
(u32) vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
&channel->common_reg->clr_msix_one_shot_vec[msix_id % 4]);
}
/**
* vxge_hw_device_set_intr_type - Updates the configuration
* with new interrupt type.
* @hldev: HW device handle.
* @intr_mode: New interrupt type
*/
u32 vxge_hw_device_set_intr_type(struct __vxge_hw_device *hldev, u32 intr_mode)
{
if ((intr_mode != VXGE_HW_INTR_MODE_IRQLINE) &&
(intr_mode != VXGE_HW_INTR_MODE_MSIX) &&
(intr_mode != VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) &&
(intr_mode != VXGE_HW_INTR_MODE_DEF))
intr_mode = VXGE_HW_INTR_MODE_IRQLINE;
hldev->config.intr_mode = intr_mode;
return intr_mode;
}
/**
* vxge_hw_device_intr_enable - Enable interrupts.
* @hldev: HW device handle.
* @op: One of the enum vxge_hw_device_intr enumerated values specifying
* the type(s) of interrupts to enable.
*
* Enable Titan interrupts. The function is to be executed the last in
* Titan initialization sequence.
*
* See also: vxge_hw_device_intr_disable()
*/
void vxge_hw_device_intr_enable(struct __vxge_hw_device *hldev)
{
u32 i;
u64 val64;
u32 val32;
vxge_hw_device_mask_all(hldev);
for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
if (!(hldev->vpaths_deployed & vxge_mBIT(i)))
continue;
vxge_hw_vpath_intr_enable(
VXGE_HW_VIRTUAL_PATH_HANDLE(&hldev->virtual_paths[i]));
}
if (hldev->config.intr_mode == VXGE_HW_INTR_MODE_IRQLINE) {
val64 = hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_TX] |
hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_RX];
if (val64 != 0) {
writeq(val64, &hldev->common_reg->tim_int_status0);
writeq(~val64, &hldev->common_reg->tim_int_mask0);
}
val32 = hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_TX] |
hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_RX];
if (val32 != 0) {
__vxge_hw_pio_mem_write32_upper(val32,
&hldev->common_reg->tim_int_status1);
__vxge_hw_pio_mem_write32_upper(~val32,
&hldev->common_reg->tim_int_mask1);
}
}
val64 = readq(&hldev->common_reg->titan_general_int_status);
vxge_hw_device_unmask_all(hldev);
}
/**
* vxge_hw_device_intr_disable - Disable Titan interrupts.
* @hldev: HW device handle.
* @op: One of the enum vxge_hw_device_intr enumerated values specifying
* the type(s) of interrupts to disable.
*
* Disable Titan interrupts.
*
* See also: vxge_hw_device_intr_enable()
*/
void vxge_hw_device_intr_disable(struct __vxge_hw_device *hldev)
{
u32 i;
vxge_hw_device_mask_all(hldev);
/* mask all the tim interrupts */
writeq(VXGE_HW_INTR_MASK_ALL, &hldev->common_reg->tim_int_mask0);
__vxge_hw_pio_mem_write32_upper(VXGE_HW_DEFAULT_32,
&hldev->common_reg->tim_int_mask1);
for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
if (!(hldev->vpaths_deployed & vxge_mBIT(i)))
continue;
vxge_hw_vpath_intr_disable(
VXGE_HW_VIRTUAL_PATH_HANDLE(&hldev->virtual_paths[i]));
}
}
/**
* vxge_hw_device_mask_all - Mask all device interrupts.
* @hldev: HW device handle.
*
* Mask all device interrupts.
*
* See also: vxge_hw_device_unmask_all()
*/
void vxge_hw_device_mask_all(struct __vxge_hw_device *hldev)
{
u64 val64;
val64 = VXGE_HW_TITAN_MASK_ALL_INT_ALARM |
VXGE_HW_TITAN_MASK_ALL_INT_TRAFFIC;
__vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32),
&hldev->common_reg->titan_mask_all_int);
}
/**
* vxge_hw_device_unmask_all - Unmask all device interrupts.
* @hldev: HW device handle.
*
* Unmask all device interrupts.
*
* See also: vxge_hw_device_mask_all()
*/
void vxge_hw_device_unmask_all(struct __vxge_hw_device *hldev)
{
u64 val64 = 0;
if (hldev->config.intr_mode == VXGE_HW_INTR_MODE_IRQLINE)
val64 = VXGE_HW_TITAN_MASK_ALL_INT_TRAFFIC;
__vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32),
&hldev->common_reg->titan_mask_all_int);
}
/**
* vxge_hw_device_flush_io - Flush io writes.
* @hldev: HW device handle.
*
* The function performs a read operation to flush io writes.
*
* Returns: void
*/
void vxge_hw_device_flush_io(struct __vxge_hw_device *hldev)
{
u32 val32;
val32 = readl(&hldev->common_reg->titan_general_int_status);
}
/**
* __vxge_hw_device_handle_error - Handle error
* @hldev: HW device
* @vp_id: Vpath Id
* @type: Error type. Please see enum vxge_hw_event{}
*
* Handle error.
*/
static enum vxge_hw_status
__vxge_hw_device_handle_error(struct __vxge_hw_device *hldev, u32 vp_id,
enum vxge_hw_event type)
{
switch (type) {
case VXGE_HW_EVENT_UNKNOWN:
break;
case VXGE_HW_EVENT_RESET_START:
case VXGE_HW_EVENT_RESET_COMPLETE:
case VXGE_HW_EVENT_LINK_DOWN:
case VXGE_HW_EVENT_LINK_UP:
goto out;
case VXGE_HW_EVENT_ALARM_CLEARED:
goto out;
case VXGE_HW_EVENT_ECCERR:
case VXGE_HW_EVENT_MRPCIM_ECCERR:
goto out;
case VXGE_HW_EVENT_FIFO_ERR:
case VXGE_HW_EVENT_VPATH_ERR:
case VXGE_HW_EVENT_CRITICAL_ERR:
case VXGE_HW_EVENT_SERR:
break;
case VXGE_HW_EVENT_SRPCIM_SERR:
case VXGE_HW_EVENT_MRPCIM_SERR:
goto out;
case VXGE_HW_EVENT_SLOT_FREEZE:
break;
default:
vxge_assert(0);
goto out;
}
/* notify driver */
if (hldev->uld_callbacks.crit_err)
hldev->uld_callbacks.crit_err(
(struct __vxge_hw_device *)hldev,
type, vp_id);
out:
return VXGE_HW_OK;
}
/*
* __vxge_hw_device_handle_link_down_ind
* @hldev: HW device handle.
*
* Link down indication handler. The function is invoked by HW when
* Titan indicates that the link is down.
*/
static enum vxge_hw_status
__vxge_hw_device_handle_link_down_ind(struct __vxge_hw_device *hldev)
{
/*
* If the previous link state is not down, return.
*/
if (hldev->link_state == VXGE_HW_LINK_DOWN)
goto exit;
hldev->link_state = VXGE_HW_LINK_DOWN;
/* notify driver */
if (hldev->uld_callbacks.link_down)
hldev->uld_callbacks.link_down(hldev);
exit:
return VXGE_HW_OK;
}
/*
* __vxge_hw_device_handle_link_up_ind
* @hldev: HW device handle.
*
* Link up indication handler. The function is invoked by HW when
* Titan indicates that the link is up for programmable amount of time.
*/
static enum vxge_hw_status
__vxge_hw_device_handle_link_up_ind(struct __vxge_hw_device *hldev)
{
/*
* If the previous link state is not down, return.
*/
if (hldev->link_state == VXGE_HW_LINK_UP)
goto exit;
hldev->link_state = VXGE_HW_LINK_UP;
/* notify driver */
if (hldev->uld_callbacks.link_up)
hldev->uld_callbacks.link_up(hldev);
exit:
return VXGE_HW_OK;
}
/*
* __vxge_hw_vpath_alarm_process - Process Alarms.
* @vpath: Virtual Path.
* @skip_alarms: Do not clear the alarms
*
* Process vpath alarms.
*
*/
static enum vxge_hw_status
__vxge_hw_vpath_alarm_process(struct __vxge_hw_virtualpath *vpath,
u32 skip_alarms)
{
u64 val64;
u64 alarm_status;
u64 pic_status;
struct __vxge_hw_device *hldev = NULL;
enum vxge_hw_event alarm_event = VXGE_HW_EVENT_UNKNOWN;
u64 mask64;
struct vxge_hw_vpath_stats_sw_info *sw_stats;
struct vxge_hw_vpath_reg __iomem *vp_reg;
if (vpath == NULL) {
alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_UNKNOWN,
alarm_event);
goto out2;
}
hldev = vpath->hldev;
vp_reg = vpath->vp_reg;
alarm_status = readq(&vp_reg->vpath_general_int_status);
if (alarm_status == VXGE_HW_ALL_FOXES) {
alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_SLOT_FREEZE,
alarm_event);
goto out;
}
sw_stats = vpath->sw_stats;
if (alarm_status & ~(
VXGE_HW_VPATH_GENERAL_INT_STATUS_PIC_INT |
VXGE_HW_VPATH_GENERAL_INT_STATUS_PCI_INT |
VXGE_HW_VPATH_GENERAL_INT_STATUS_WRDMA_INT |
VXGE_HW_VPATH_GENERAL_INT_STATUS_XMAC_INT)) {
sw_stats->error_stats.unknown_alarms++;
alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_UNKNOWN,
alarm_event);
goto out;
}
if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_XMAC_INT) {
val64 = readq(&vp_reg->xgmac_vp_int_status);
if (val64 &
VXGE_HW_XGMAC_VP_INT_STATUS_ASIC_NTWK_VP_ERR_ASIC_NTWK_VP_INT) {
val64 = readq(&vp_reg->asic_ntwk_vp_err_reg);
if (((val64 &
VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT) &&
(!(val64 &
VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK))) ||
((val64 &
VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT_OCCURR) &&
(!(val64 &
VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK_OCCURR)
))) {
sw_stats->error_stats.network_sustained_fault++;
writeq(
VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT,
&vp_reg->asic_ntwk_vp_err_mask);
__vxge_hw_device_handle_link_down_ind(hldev);
alarm_event = VXGE_HW_SET_LEVEL(
VXGE_HW_EVENT_LINK_DOWN, alarm_event);
}
if (((val64 &
VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK) &&
(!(val64 &
VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT))) ||
((val64 &
VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK_OCCURR) &&
(!(val64 &
VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT_OCCURR)
))) {
sw_stats->error_stats.network_sustained_ok++;
writeq(
VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK,
&vp_reg->asic_ntwk_vp_err_mask);
__vxge_hw_device_handle_link_up_ind(hldev);
alarm_event = VXGE_HW_SET_LEVEL(
VXGE_HW_EVENT_LINK_UP, alarm_event);
}
writeq(VXGE_HW_INTR_MASK_ALL,
&vp_reg->asic_ntwk_vp_err_reg);
alarm_event = VXGE_HW_SET_LEVEL(
VXGE_HW_EVENT_ALARM_CLEARED, alarm_event);
if (skip_alarms)
return VXGE_HW_OK;
}
}
if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_PIC_INT) {
pic_status = readq(&vp_reg->vpath_ppif_int_status);
if (pic_status &
VXGE_HW_VPATH_PPIF_INT_STATUS_GENERAL_ERRORS_GENERAL_INT) {
val64 = readq(&vp_reg->general_errors_reg);
mask64 = readq(&vp_reg->general_errors_mask);
if ((val64 &
VXGE_HW_GENERAL_ERRORS_REG_INI_SERR_DET) &
~mask64) {
sw_stats->error_stats.ini_serr_det++;
alarm_event = VXGE_HW_SET_LEVEL(
VXGE_HW_EVENT_SERR, alarm_event);
}
if ((val64 &
VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO0_OVRFLOW) &
~mask64) {
sw_stats->error_stats.dblgen_fifo0_overflow++;
alarm_event = VXGE_HW_SET_LEVEL(
VXGE_HW_EVENT_FIFO_ERR, alarm_event);
}
if ((val64 &
VXGE_HW_GENERAL_ERRORS_REG_STATSB_PIF_CHAIN_ERR) &
~mask64)
sw_stats->error_stats.statsb_pif_chain_error++;
if ((val64 &
VXGE_HW_GENERAL_ERRORS_REG_STATSB_DROP_TIMEOUT_REQ) &
~mask64)
sw_stats->error_stats.statsb_drop_timeout++;
if ((val64 &
VXGE_HW_GENERAL_ERRORS_REG_TGT_ILLEGAL_ACCESS) &
~mask64)
sw_stats->error_stats.target_illegal_access++;
if (!skip_alarms) {
writeq(VXGE_HW_INTR_MASK_ALL,
&vp_reg->general_errors_reg);
alarm_event = VXGE_HW_SET_LEVEL(
VXGE_HW_EVENT_ALARM_CLEARED,
alarm_event);
}
}
if (pic_status &
VXGE_HW_VPATH_PPIF_INT_STATUS_KDFCCTL_ERRORS_KDFCCTL_INT) {
val64 = readq(&vp_reg->kdfcctl_errors_reg);
mask64 = readq(&vp_reg->kdfcctl_errors_mask);
if ((val64 &
VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_OVRWR) &
~mask64) {
sw_stats->error_stats.kdfcctl_fifo0_overwrite++;
alarm_event = VXGE_HW_SET_LEVEL(
VXGE_HW_EVENT_FIFO_ERR,
alarm_event);
}
if ((val64 &
VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_POISON) &
~mask64) {
sw_stats->error_stats.kdfcctl_fifo0_poison++;
alarm_event = VXGE_HW_SET_LEVEL(
VXGE_HW_EVENT_FIFO_ERR,
alarm_event);
}
if ((val64 &
VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_DMA_ERR) &
~mask64) {
sw_stats->error_stats.kdfcctl_fifo0_dma_error++;
alarm_event = VXGE_HW_SET_LEVEL(
VXGE_HW_EVENT_FIFO_ERR,
alarm_event);
}
if (!skip_alarms) {
writeq(VXGE_HW_INTR_MASK_ALL,
&vp_reg->kdfcctl_errors_reg);
alarm_event = VXGE_HW_SET_LEVEL(
VXGE_HW_EVENT_ALARM_CLEARED,
alarm_event);
}
}
}
if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_WRDMA_INT) {
val64 = readq(&vp_reg->wrdma_alarm_status);
if (val64 & VXGE_HW_WRDMA_ALARM_STATUS_PRC_ALARM_PRC_INT) {
val64 = readq(&vp_reg->prc_alarm_reg);
mask64 = readq(&vp_reg->prc_alarm_mask);
if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RING_BUMP)&
~mask64)
sw_stats->error_stats.prc_ring_bumps++;
if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RXDCM_SC_ERR) &
~mask64) {
sw_stats->error_stats.prc_rxdcm_sc_err++;
alarm_event = VXGE_HW_SET_LEVEL(
VXGE_HW_EVENT_VPATH_ERR,
alarm_event);
}
if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RXDCM_SC_ABORT)
& ~mask64) {
sw_stats->error_stats.prc_rxdcm_sc_abort++;
alarm_event = VXGE_HW_SET_LEVEL(
VXGE_HW_EVENT_VPATH_ERR,
alarm_event);
}
if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_QUANTA_SIZE_ERR)
& ~mask64) {
sw_stats->error_stats.prc_quanta_size_err++;
alarm_event = VXGE_HW_SET_LEVEL(
VXGE_HW_EVENT_VPATH_ERR,
alarm_event);
}
if (!skip_alarms) {
writeq(VXGE_HW_INTR_MASK_ALL,
&vp_reg->prc_alarm_reg);
alarm_event = VXGE_HW_SET_LEVEL(
VXGE_HW_EVENT_ALARM_CLEARED,
alarm_event);
}
}
}
out:
hldev->stats.sw_dev_err_stats.vpath_alarms++;
out2:
if ((alarm_event == VXGE_HW_EVENT_ALARM_CLEARED) ||
(alarm_event == VXGE_HW_EVENT_UNKNOWN))
return VXGE_HW_OK;
__vxge_hw_device_handle_error(hldev, vpath->vp_id, alarm_event);
if (alarm_event == VXGE_HW_EVENT_SERR)
return VXGE_HW_ERR_CRITICAL;
return (alarm_event == VXGE_HW_EVENT_SLOT_FREEZE) ?
VXGE_HW_ERR_SLOT_FREEZE :
(alarm_event == VXGE_HW_EVENT_FIFO_ERR) ? VXGE_HW_ERR_FIFO :
VXGE_HW_ERR_VPATH;
}
/**
* vxge_hw_device_begin_irq - Begin IRQ processing.
* @hldev: HW device handle.
* @skip_alarms: Do not clear the alarms
* @reason: "Reason" for the interrupt, the value of Titan's
* general_int_status register.
*
* The function performs two actions, It first checks whether (shared IRQ) the
* interrupt was raised by the device. Next, it masks the device interrupts.
*
* Note:
* vxge_hw_device_begin_irq() does not flush MMIO writes through the
* bridge. Therefore, two back-to-back interrupts are potentially possible.
*
* Returns: 0, if the interrupt is not "ours" (note that in this case the
* device remain enabled).
* Otherwise, vxge_hw_device_begin_irq() returns 64bit general adapter
* status.
*/
enum vxge_hw_status vxge_hw_device_begin_irq(struct __vxge_hw_device *hldev,
u32 skip_alarms, u64 *reason)
{
u32 i;
u64 val64;
u64 adapter_status;
u64 vpath_mask;
enum vxge_hw_status ret = VXGE_HW_OK;
val64 = readq(&hldev->common_reg->titan_general_int_status);
if (unlikely(!val64)) {
/* not Titan interrupt */
*reason = 0;
ret = VXGE_HW_ERR_WRONG_IRQ;
goto exit;
}
if (unlikely(val64 == VXGE_HW_ALL_FOXES)) {
adapter_status = readq(&hldev->common_reg->adapter_status);
if (adapter_status == VXGE_HW_ALL_FOXES) {
__vxge_hw_device_handle_error(hldev,
NULL_VPID, VXGE_HW_EVENT_SLOT_FREEZE);
*reason = 0;
ret = VXGE_HW_ERR_SLOT_FREEZE;
goto exit;
}
}
hldev->stats.sw_dev_info_stats.total_intr_cnt++;
*reason = val64;
vpath_mask = hldev->vpaths_deployed >>
(64 - VXGE_HW_MAX_VIRTUAL_PATHS);
if (val64 &
VXGE_HW_TITAN_GENERAL_INT_STATUS_VPATH_TRAFFIC_INT(vpath_mask)) {
hldev->stats.sw_dev_info_stats.traffic_intr_cnt++;
return VXGE_HW_OK;
}
hldev->stats.sw_dev_info_stats.not_traffic_intr_cnt++;
if (unlikely(val64 &
VXGE_HW_TITAN_GENERAL_INT_STATUS_VPATH_ALARM_INT)) {
enum vxge_hw_status error_level = VXGE_HW_OK;
hldev->stats.sw_dev_err_stats.vpath_alarms++;
for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
if (!(hldev->vpaths_deployed & vxge_mBIT(i)))
continue;
ret = __vxge_hw_vpath_alarm_process(
&hldev->virtual_paths[i], skip_alarms);
error_level = VXGE_HW_SET_LEVEL(ret, error_level);
if (unlikely((ret == VXGE_HW_ERR_CRITICAL) ||
(ret == VXGE_HW_ERR_SLOT_FREEZE)))
break;
}
ret = error_level;
}
exit:
return ret;
}
/**
* vxge_hw_device_clear_tx_rx - Acknowledge (that is, clear) the
* condition that has caused the Tx and RX interrupt.
* @hldev: HW device.
*
* Acknowledge (that is, clear) the condition that has caused
* the Tx and Rx interrupt.
* See also: vxge_hw_device_begin_irq(),
* vxge_hw_device_mask_tx_rx(), vxge_hw_device_unmask_tx_rx().
*/
void vxge_hw_device_clear_tx_rx(struct __vxge_hw_device *hldev)
{
if ((hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_TX] != 0) ||
(hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_RX] != 0)) {
writeq((hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_TX] |
hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_RX]),
&hldev->common_reg->tim_int_status0);
}
if ((hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_TX] != 0) ||
(hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_RX] != 0)) {
__vxge_hw_pio_mem_write32_upper(
(hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_TX] |
hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_RX]),
&hldev->common_reg->tim_int_status1);
}
}
/*
* vxge_hw_channel_dtr_alloc - Allocate a dtr from the channel
* @channel: Channel
* @dtrh: Buffer to return the DTR pointer
*
* Allocates a dtr from the reserve array. If the reserve array is empty,
* it swaps the reserve and free arrays.
*
*/
static enum vxge_hw_status
vxge_hw_channel_dtr_alloc(struct __vxge_hw_channel *channel, void **dtrh)
{
void **tmp_arr;
if (channel->reserve_ptr - channel->reserve_top > 0) {
_alloc_after_swap:
*dtrh = channel->reserve_arr[--channel->reserve_ptr];
return VXGE_HW_OK;
}
/* switch between empty and full arrays */
/* the idea behind such a design is that by having free and reserved
* arrays separated we basically separated irq and non-irq parts.
* i.e. no additional lock need to be done when we free a resource */
if (channel->length - channel->free_ptr > 0) {
tmp_arr = channel->reserve_arr;
channel->reserve_arr = channel->free_arr;
channel->free_arr = tmp_arr;
channel->reserve_ptr = channel->length;
channel->reserve_top = channel->free_ptr;
channel->free_ptr = channel->length;
channel->stats->reserve_free_swaps_cnt++;
goto _alloc_after_swap;
}
channel->stats->full_cnt++;
*dtrh = NULL;
return VXGE_HW_INF_OUT_OF_DESCRIPTORS;
}
/*
* vxge_hw_channel_dtr_post - Post a dtr to the channel
* @channelh: Channel
* @dtrh: DTR pointer
*
* Posts a dtr to work array.
*
*/
static void
vxge_hw_channel_dtr_post(struct __vxge_hw_channel *channel, void *dtrh)
{
vxge_assert(channel->work_arr[channel->post_index] == NULL);
channel->work_arr[channel->post_index++] = dtrh;
/* wrap-around */
if (channel->post_index == channel->length)
channel->post_index = 0;
}
/*
* vxge_hw_channel_dtr_try_complete - Returns next completed dtr
* @channel: Channel
* @dtr: Buffer to return the next completed DTR pointer
*
* Returns the next completed dtr with out removing it from work array
*
*/
void
vxge_hw_channel_dtr_try_complete(struct __vxge_hw_channel *channel, void **dtrh)
{
vxge_assert(channel->compl_index < channel->length);
*dtrh = channel->work_arr[channel->compl_index];
prefetch(*dtrh);
}
/*
* vxge_hw_channel_dtr_complete - Removes next completed dtr from the work array
* @channel: Channel handle
*
* Removes the next completed dtr from work array
*
*/
void vxge_hw_channel_dtr_complete(struct __vxge_hw_channel *channel)
{
channel->work_arr[channel->compl_index] = NULL;
/* wrap-around */
if (++channel->compl_index == channel->length)
channel->compl_index = 0;
channel->stats->total_compl_cnt++;
}
/*
* vxge_hw_channel_dtr_free - Frees a dtr
* @channel: Channel handle
* @dtr: DTR pointer
*
* Returns the dtr to free array
*
*/
void vxge_hw_channel_dtr_free(struct __vxge_hw_channel *channel, void *dtrh)
{
channel->free_arr[--channel->free_ptr] = dtrh;
}
/*
* vxge_hw_channel_dtr_count
* @channel: Channel handle. Obtained via vxge_hw_channel_open().
*
* Retrieve number of DTRs available. This function can not be called
* from data path. ring_initial_replenishi() is the only user.
*/
int vxge_hw_channel_dtr_count(struct __vxge_hw_channel *channel)
{
return (channel->reserve_ptr - channel->reserve_top) +
(channel->length - channel->free_ptr);
}
/**
* vxge_hw_ring_rxd_reserve - Reserve ring descriptor.
* @ring: Handle to the ring object used for receive
* @rxdh: Reserved descriptor. On success HW fills this "out" parameter
* with a valid handle.
*
* Reserve Rx descriptor for the subsequent filling-in driver
* and posting on the corresponding channel (@channelh)
* via vxge_hw_ring_rxd_post().
*
* Returns: VXGE_HW_OK - success.
* VXGE_HW_INF_OUT_OF_DESCRIPTORS - Currently no descriptors available.
*
*/
enum vxge_hw_status vxge_hw_ring_rxd_reserve(struct __vxge_hw_ring *ring,
void **rxdh)
{
enum vxge_hw_status status;
struct __vxge_hw_channel *channel;
channel = &ring->channel;
status = vxge_hw_channel_dtr_alloc(channel, rxdh);
if (status == VXGE_HW_OK) {
struct vxge_hw_ring_rxd_1 *rxdp =
(struct vxge_hw_ring_rxd_1 *)*rxdh;
rxdp->control_0 = rxdp->control_1 = 0;
}
return status;
}
/**
* vxge_hw_ring_rxd_free - Free descriptor.
* @ring: Handle to the ring object used for receive
* @rxdh: Descriptor handle.
*
* Free the reserved descriptor. This operation is "symmetrical" to
* vxge_hw_ring_rxd_reserve. The "free-ing" completes the descriptor's
* lifecycle.
*
* After free-ing (see vxge_hw_ring_rxd_free()) the descriptor again can
* be:
*
* - reserved (vxge_hw_ring_rxd_reserve);
*
* - posted (vxge_hw_ring_rxd_post);
*
* - completed (vxge_hw_ring_rxd_next_completed);
*
* - and recycled again (vxge_hw_ring_rxd_free).
*
* For alternative state transitions and more details please refer to
* the design doc.
*
*/
void vxge_hw_ring_rxd_free(struct __vxge_hw_ring *ring, void *rxdh)
{
struct __vxge_hw_channel *channel;
channel = &ring->channel;
vxge_hw_channel_dtr_free(channel, rxdh);
}
/**
* vxge_hw_ring_rxd_pre_post - Prepare rxd and post
* @ring: Handle to the ring object used for receive
* @rxdh: Descriptor handle.
*
* This routine prepares a rxd and posts
*/
void vxge_hw_ring_rxd_pre_post(struct __vxge_hw_ring *ring, void *rxdh)
{
struct __vxge_hw_channel *channel;
channel = &ring->channel;
vxge_hw_channel_dtr_post(channel, rxdh);
}
/**
* vxge_hw_ring_rxd_post_post - Process rxd after post.
* @ring: Handle to the ring object used for receive
* @rxdh: Descriptor handle.
*
* Processes rxd after post
*/
void vxge_hw_ring_rxd_post_post(struct __vxge_hw_ring *ring, void *rxdh)
{
struct vxge_hw_ring_rxd_1 *rxdp = (struct vxge_hw_ring_rxd_1 *)rxdh;
struct __vxge_hw_channel *channel;
channel = &ring->channel;
rxdp->control_0 = VXGE_HW_RING_RXD_LIST_OWN_ADAPTER;
if (ring->stats->common_stats.usage_cnt > 0)
ring->stats->common_stats.usage_cnt--;
}
/**
* vxge_hw_ring_rxd_post - Post descriptor on the ring.
* @ring: Handle to the ring object used for receive
* @rxdh: Descriptor obtained via vxge_hw_ring_rxd_reserve().
*
* Post descriptor on the ring.
* Prior to posting the descriptor should be filled in accordance with
* Host/Titan interface specification for a given service (LL, etc.).
*
*/
void vxge_hw_ring_rxd_post(struct __vxge_hw_ring *ring, void *rxdh)
{
struct vxge_hw_ring_rxd_1 *rxdp = (struct vxge_hw_ring_rxd_1 *)rxdh;
struct __vxge_hw_channel *channel;
channel = &ring->channel;
wmb();
rxdp->control_0 = VXGE_HW_RING_RXD_LIST_OWN_ADAPTER;
vxge_hw_channel_dtr_post(channel, rxdh);
if (ring->stats->common_stats.usage_cnt > 0)
ring->stats->common_stats.usage_cnt--;
}
/**
* vxge_hw_ring_rxd_post_post_wmb - Process rxd after post with memory barrier.
* @ring: Handle to the ring object used for receive
* @rxdh: Descriptor handle.
*
* Processes rxd after post with memory barrier.
*/
void vxge_hw_ring_rxd_post_post_wmb(struct __vxge_hw_ring *ring, void *rxdh)
{
wmb();
vxge_hw_ring_rxd_post_post(ring, rxdh);
}
/**
* vxge_hw_ring_rxd_next_completed - Get the _next_ completed descriptor.
* @ring: Handle to the ring object used for receive
* @rxdh: Descriptor handle. Returned by HW.
* @t_code: Transfer code, as per Titan User Guide,
* Receive Descriptor Format. Returned by HW.
*
* Retrieve the _next_ completed descriptor.
* HW uses ring callback (*vxge_hw_ring_callback_f) to notifiy
* driver of new completed descriptors. After that
* the driver can use vxge_hw_ring_rxd_next_completed to retrieve the rest
* completions (the very first completion is passed by HW via
* vxge_hw_ring_callback_f).
*
* Implementation-wise, the driver is free to call
* vxge_hw_ring_rxd_next_completed either immediately from inside the
* ring callback, or in a deferred fashion and separate (from HW)
* context.
*
* Non-zero @t_code means failure to fill-in receive buffer(s)
* of the descriptor.
* For instance, parity error detected during the data transfer.
* In this case Titan will complete the descriptor and indicate
* for the host that the received data is not to be used.
* For details please refer to Titan User Guide.
*
* Returns: VXGE_HW_OK - success.
* VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS - No completed descriptors
* are currently available for processing.
*
* See also: vxge_hw_ring_callback_f{},
* vxge_hw_fifo_rxd_next_completed(), enum vxge_hw_status{}.
*/
enum vxge_hw_status vxge_hw_ring_rxd_next_completed(
struct __vxge_hw_ring *ring, void **rxdh, u8 *t_code)
{
struct __vxge_hw_channel *channel;
struct vxge_hw_ring_rxd_1 *rxdp;
enum vxge_hw_status status = VXGE_HW_OK;
u64 control_0, own;
channel = &ring->channel;
vxge_hw_channel_dtr_try_complete(channel, rxdh);
rxdp = (struct vxge_hw_ring_rxd_1 *)*rxdh;
if (rxdp == NULL) {
status = VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS;
goto exit;
}
control_0 = rxdp->control_0;
own = control_0 & VXGE_HW_RING_RXD_LIST_OWN_ADAPTER;
*t_code = (u8)VXGE_HW_RING_RXD_T_CODE_GET(control_0);
/* check whether it is not the end */
if (!own || *t_code == VXGE_HW_RING_T_CODE_FRM_DROP) {
vxge_assert(((struct vxge_hw_ring_rxd_1 *)rxdp)->host_control !=
0);
++ring->cmpl_cnt;
vxge_hw_channel_dtr_complete(channel);
vxge_assert(*t_code != VXGE_HW_RING_RXD_T_CODE_UNUSED);
ring->stats->common_stats.usage_cnt++;
if (ring->stats->common_stats.usage_max <
ring->stats->common_stats.usage_cnt)
ring->stats->common_stats.usage_max =
ring->stats->common_stats.usage_cnt;
status = VXGE_HW_OK;
goto exit;
}
/* reset it. since we don't want to return
* garbage to the driver */
*rxdh = NULL;
status = VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS;
exit:
return status;
}
/**
* vxge_hw_ring_handle_tcode - Handle transfer code.
* @ring: Handle to the ring object used for receive
* @rxdh: Descriptor handle.
* @t_code: One of the enumerated (and documented in the Titan user guide)
* "transfer codes".
*
* Handle descriptor's transfer code. The latter comes with each completed
* descriptor.
*
* Returns: one of the enum vxge_hw_status{} enumerated types.
* VXGE_HW_OK - for success.
* VXGE_HW_ERR_CRITICAL - when encounters critical error.
*/
enum vxge_hw_status vxge_hw_ring_handle_tcode(
struct __vxge_hw_ring *ring, void *rxdh, u8 t_code)
{
struct __vxge_hw_channel *channel;
enum vxge_hw_status status = VXGE_HW_OK;
channel = &ring->channel;
/* If the t_code is not supported and if the
* t_code is other than 0x5 (unparseable packet
* such as unknown UPV6 header), Drop it !!!
*/
if (t_code == VXGE_HW_RING_T_CODE_OK ||
t_code == VXGE_HW_RING_T_CODE_L3_PKT_ERR) {
status = VXGE_HW_OK;
goto exit;
}
if (t_code > VXGE_HW_RING_T_CODE_MULTI_ERR) {
status = VXGE_HW_ERR_INVALID_TCODE;
goto exit;
}
ring->stats->rxd_t_code_err_cnt[t_code]++;
exit:
return status;
}
/**
* __vxge_hw_non_offload_db_post - Post non offload doorbell
*
* @fifo: fifohandle
* @txdl_ptr: The starting location of the TxDL in host memory
* @num_txds: The highest TxD in this TxDL (0 to 255 means 1 to 256)
* @no_snoop: No snoop flags
*
* This function posts a non-offload doorbell to doorbell FIFO
*
*/
static void __vxge_hw_non_offload_db_post(struct __vxge_hw_fifo *fifo,
u64 txdl_ptr, u32 num_txds, u32 no_snoop)
{
struct __vxge_hw_channel *channel;
channel = &fifo->channel;
writeq(VXGE_HW_NODBW_TYPE(VXGE_HW_NODBW_TYPE_NODBW) |
VXGE_HW_NODBW_LAST_TXD_NUMBER(num_txds) |
VXGE_HW_NODBW_GET_NO_SNOOP(no_snoop),
&fifo->nofl_db->control_0);
mmiowb();
writeq(txdl_ptr, &fifo->nofl_db->txdl_ptr);
mmiowb();
}
/**
* vxge_hw_fifo_free_txdl_count_get - returns the number of txdls available in
* the fifo
* @fifoh: Handle to the fifo object used for non offload send
*/
u32 vxge_hw_fifo_free_txdl_count_get(struct __vxge_hw_fifo *fifoh)
{
return vxge_hw_channel_dtr_count(&fifoh->channel);
}
/**
* vxge_hw_fifo_txdl_reserve - Reserve fifo descriptor.
* @fifoh: Handle to the fifo object used for non offload send
* @txdlh: Reserved descriptor. On success HW fills this "out" parameter
* with a valid handle.
* @txdl_priv: Buffer to return the pointer to per txdl space
*
* Reserve a single TxDL (that is, fifo descriptor)
* for the subsequent filling-in by driver)
* and posting on the corresponding channel (@channelh)
* via vxge_hw_fifo_txdl_post().
*
* Note: it is the responsibility of driver to reserve multiple descriptors
* for lengthy (e.g., LSO) transmit operation. A single fifo descriptor
* carries up to configured number (fifo.max_frags) of contiguous buffers.
*
* Returns: VXGE_HW_OK - success;
* VXGE_HW_INF_OUT_OF_DESCRIPTORS - Currently no descriptors available
*
*/
enum vxge_hw_status vxge_hw_fifo_txdl_reserve(
struct __vxge_hw_fifo *fifo,
void **txdlh, void **txdl_priv)
{
struct __vxge_hw_channel *channel;
enum vxge_hw_status status;
int i;
channel = &fifo->channel;
status = vxge_hw_channel_dtr_alloc(channel, txdlh);
if (status == VXGE_HW_OK) {
struct vxge_hw_fifo_txd *txdp =
(struct vxge_hw_fifo_txd *)*txdlh;
struct __vxge_hw_fifo_txdl_priv *priv;
priv = __vxge_hw_fifo_txdl_priv(fifo, txdp);
/* reset the TxDL's private */
priv->align_dma_offset = 0;
priv->align_vaddr_start = priv->align_vaddr;
priv->align_used_frags = 0;
priv->frags = 0;
priv->alloc_frags = fifo->config->max_frags;
priv->next_txdl_priv = NULL;
*txdl_priv = (void *)(size_t)txdp->host_control;
for (i = 0; i < fifo->config->max_frags; i++) {
txdp = ((struct vxge_hw_fifo_txd *)*txdlh) + i;
txdp->control_0 = txdp->control_1 = 0;
}
}
return status;
}
/**
* vxge_hw_fifo_txdl_buffer_set - Set transmit buffer pointer in the
* descriptor.
* @fifo: Handle to the fifo object used for non offload send
* @txdlh: Descriptor handle.
* @frag_idx: Index of the data buffer in the caller's scatter-gather list
* (of buffers).
* @dma_pointer: DMA address of the data buffer referenced by @frag_idx.
* @size: Size of the data buffer (in bytes).
*
* This API is part of the preparation of the transmit descriptor for posting
* (via vxge_hw_fifo_txdl_post()). The related "preparation" APIs include
* vxge_hw_fifo_txdl_mss_set() and vxge_hw_fifo_txdl_cksum_set_bits().
* All three APIs fill in the fields of the fifo descriptor,
* in accordance with the Titan specification.
*
*/
void vxge_hw_fifo_txdl_buffer_set(struct __vxge_hw_fifo *fifo,
void *txdlh, u32 frag_idx,
dma_addr_t dma_pointer, u32 size)
{
struct __vxge_hw_fifo_txdl_priv *txdl_priv;
struct vxge_hw_fifo_txd *txdp, *txdp_last;
struct __vxge_hw_channel *channel;
channel = &fifo->channel;
txdl_priv = __vxge_hw_fifo_txdl_priv(fifo, txdlh);
txdp = (struct vxge_hw_fifo_txd *)txdlh + txdl_priv->frags;
if (frag_idx != 0)
txdp->control_0 = txdp->control_1 = 0;
else {
txdp->control_0 |= VXGE_HW_FIFO_TXD_GATHER_CODE(
VXGE_HW_FIFO_TXD_GATHER_CODE_FIRST);
txdp->control_1 |= fifo->interrupt_type;
txdp->control_1 |= VXGE_HW_FIFO_TXD_INT_NUMBER(
fifo->tx_intr_num);
if (txdl_priv->frags) {
txdp_last = (struct vxge_hw_fifo_txd *)txdlh +
(txdl_priv->frags - 1);
txdp_last->control_0 |= VXGE_HW_FIFO_TXD_GATHER_CODE(
VXGE_HW_FIFO_TXD_GATHER_CODE_LAST);
}
}
vxge_assert(frag_idx < txdl_priv->alloc_frags);
txdp->buffer_pointer = (u64)dma_pointer;
txdp->control_0 |= VXGE_HW_FIFO_TXD_BUFFER_SIZE(size);
fifo->stats->total_buffers++;
txdl_priv->frags++;
}
/**
* vxge_hw_fifo_txdl_post - Post descriptor on the fifo channel.
* @fifo: Handle to the fifo object used for non offload send
* @txdlh: Descriptor obtained via vxge_hw_fifo_txdl_reserve()
* @frags: Number of contiguous buffers that are part of a single
* transmit operation.
*
* Post descriptor on the 'fifo' type channel for transmission.
* Prior to posting the descriptor should be filled in accordance with
* Host/Titan interface specification for a given service (LL, etc.).
*
*/
void vxge_hw_fifo_txdl_post(struct __vxge_hw_fifo *fifo, void *txdlh)
{
struct __vxge_hw_fifo_txdl_priv *txdl_priv;
struct vxge_hw_fifo_txd *txdp_last;
struct vxge_hw_fifo_txd *txdp_first;
struct __vxge_hw_channel *channel;
channel = &fifo->channel;
txdl_priv = __vxge_hw_fifo_txdl_priv(fifo, txdlh);
txdp_first = (struct vxge_hw_fifo_txd *)txdlh;
txdp_last = (struct vxge_hw_fifo_txd *)txdlh + (txdl_priv->frags - 1);
txdp_last->control_0 |=
VXGE_HW_FIFO_TXD_GATHER_CODE(VXGE_HW_FIFO_TXD_GATHER_CODE_LAST);
txdp_first->control_0 |= VXGE_HW_FIFO_TXD_LIST_OWN_ADAPTER;
vxge_hw_channel_dtr_post(&fifo->channel, txdlh);
__vxge_hw_non_offload_db_post(fifo,
(u64)txdl_priv->dma_addr,
txdl_priv->frags - 1,
fifo->no_snoop_bits);
fifo->stats->total_posts++;
fifo->stats->common_stats.usage_cnt++;
if (fifo->stats->common_stats.usage_max <
fifo->stats->common_stats.usage_cnt)
fifo->stats->common_stats.usage_max =
fifo->stats->common_stats.usage_cnt;
}
/**
* vxge_hw_fifo_txdl_next_completed - Retrieve next completed descriptor.
* @fifo: Handle to the fifo object used for non offload send
* @txdlh: Descriptor handle. Returned by HW.
* @t_code: Transfer code, as per Titan User Guide,
* Transmit Descriptor Format.
* Returned by HW.
*
* Retrieve the _next_ completed descriptor.
* HW uses channel callback (*vxge_hw_channel_callback_f) to notifiy
* driver of new completed descriptors. After that
* the driver can use vxge_hw_fifo_txdl_next_completed to retrieve the rest
* completions (the very first completion is passed by HW via
* vxge_hw_channel_callback_f).
*
* Implementation-wise, the driver is free to call
* vxge_hw_fifo_txdl_next_completed either immediately from inside the
* channel callback, or in a deferred fashion and separate (from HW)
* context.
*
* Non-zero @t_code means failure to process the descriptor.
* The failure could happen, for instance, when the link is
* down, in which case Titan completes the descriptor because it
* is not able to send the data out.
*
* For details please refer to Titan User Guide.
*
* Returns: VXGE_HW_OK - success.
* VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS - No completed descriptors
* are currently available for processing.
*
*/
enum vxge_hw_status vxge_hw_fifo_txdl_next_completed(
struct __vxge_hw_fifo *fifo, void **txdlh,
enum vxge_hw_fifo_tcode *t_code)
{
struct __vxge_hw_channel *channel;
struct vxge_hw_fifo_txd *txdp;
enum vxge_hw_status status = VXGE_HW_OK;
channel = &fifo->channel;
vxge_hw_channel_dtr_try_complete(channel, txdlh);
txdp = (struct vxge_hw_fifo_txd *)*txdlh;
if (txdp == NULL) {
status = VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS;
goto exit;
}
/* check whether host owns it */
if (!(txdp->control_0 & VXGE_HW_FIFO_TXD_LIST_OWN_ADAPTER)) {
vxge_assert(txdp->host_control != 0);
vxge_hw_channel_dtr_complete(channel);
*t_code = (u8)VXGE_HW_FIFO_TXD_T_CODE_GET(txdp->control_0);
if (fifo->stats->common_stats.usage_cnt > 0)
fifo->stats->common_stats.usage_cnt--;
status = VXGE_HW_OK;
goto exit;
}
/* no more completions */
*txdlh = NULL;
status = VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS;
exit:
return status;
}
/**
* vxge_hw_fifo_handle_tcode - Handle transfer code.
* @fifo: Handle to the fifo object used for non offload send
* @txdlh: Descriptor handle.
* @t_code: One of the enumerated (and documented in the Titan user guide)
* "transfer codes".
*
* Handle descriptor's transfer code. The latter comes with each completed
* descriptor.
*
* Returns: one of the enum vxge_hw_status{} enumerated types.
* VXGE_HW_OK - for success.
* VXGE_HW_ERR_CRITICAL - when encounters critical error.
*/
enum vxge_hw_status vxge_hw_fifo_handle_tcode(struct __vxge_hw_fifo *fifo,
void *txdlh,
enum vxge_hw_fifo_tcode t_code)
{
struct __vxge_hw_channel *channel;
enum vxge_hw_status status = VXGE_HW_OK;
channel = &fifo->channel;
if (((t_code & 0x7) < 0) || ((t_code & 0x7) > 0x4)) {
status = VXGE_HW_ERR_INVALID_TCODE;
goto exit;
}
fifo->stats->txd_t_code_err_cnt[t_code]++;
exit:
return status;
}
/**
* vxge_hw_fifo_txdl_free - Free descriptor.
* @fifo: Handle to the fifo object used for non offload send
* @txdlh: Descriptor handle.
*
* Free the reserved descriptor. This operation is "symmetrical" to
* vxge_hw_fifo_txdl_reserve. The "free-ing" completes the descriptor's
* lifecycle.
*
* After free-ing (see vxge_hw_fifo_txdl_free()) the descriptor again can
* be:
*
* - reserved (vxge_hw_fifo_txdl_reserve);
*
* - posted (vxge_hw_fifo_txdl_post);
*
* - completed (vxge_hw_fifo_txdl_next_completed);
*
* - and recycled again (vxge_hw_fifo_txdl_free).
*
* For alternative state transitions and more details please refer to
* the design doc.
*
*/
void vxge_hw_fifo_txdl_free(struct __vxge_hw_fifo *fifo, void *txdlh)
{
struct __vxge_hw_fifo_txdl_priv *txdl_priv;
u32 max_frags;
struct __vxge_hw_channel *channel;
channel = &fifo->channel;
txdl_priv = __vxge_hw_fifo_txdl_priv(fifo,
(struct vxge_hw_fifo_txd *)txdlh);
max_frags = fifo->config->max_frags;
vxge_hw_channel_dtr_free(channel, txdlh);
}
/**
* vxge_hw_vpath_mac_addr_add - Add the mac address entry for this vpath
* to MAC address table.
* @vp: Vpath handle.
* @macaddr: MAC address to be added for this vpath into the list
* @macaddr_mask: MAC address mask for macaddr
* @duplicate_mode: Duplicate MAC address add mode. Please see
* enum vxge_hw_vpath_mac_addr_add_mode{}
*
* Adds the given mac address and mac address mask into the list for this
* vpath.
* see also: vxge_hw_vpath_mac_addr_delete, vxge_hw_vpath_mac_addr_get and
* vxge_hw_vpath_mac_addr_get_next
*
*/
enum vxge_hw_status
vxge_hw_vpath_mac_addr_add(
struct __vxge_hw_vpath_handle *vp,
u8 (macaddr)[ETH_ALEN],
u8 (macaddr_mask)[ETH_ALEN],
enum vxge_hw_vpath_mac_addr_add_mode duplicate_mode)
{
u32 i;
u64 data1 = 0ULL;
u64 data2 = 0ULL;
enum vxge_hw_status status = VXGE_HW_OK;
if (vp == NULL) {
status = VXGE_HW_ERR_INVALID_HANDLE;
goto exit;
}
for (i = 0; i < ETH_ALEN; i++) {
data1 <<= 8;
data1 |= (u8)macaddr[i];
data2 <<= 8;
data2 |= (u8)macaddr_mask[i];
}
switch (duplicate_mode) {
case VXGE_HW_VPATH_MAC_ADDR_ADD_DUPLICATE:
i = 0;
break;
case VXGE_HW_VPATH_MAC_ADDR_DISCARD_DUPLICATE:
i = 1;
break;
case VXGE_HW_VPATH_MAC_ADDR_REPLACE_DUPLICATE:
i = 2;
break;
default:
i = 0;
break;
}
status = __vxge_hw_vpath_rts_table_set(vp,
VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_ADD_ENTRY,
VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA,
0,
VXGE_HW_RTS_ACCESS_STEER_DATA0_DA_MAC_ADDR(data1),
VXGE_HW_RTS_ACCESS_STEER_DATA1_DA_MAC_ADDR_MASK(data2)|
VXGE_HW_RTS_ACCESS_STEER_DATA1_DA_MAC_ADDR_MODE(i));
exit:
return status;
}
/**
* vxge_hw_vpath_mac_addr_get - Get the first mac address entry for this vpath
* from MAC address table.
* @vp: Vpath handle.
* @macaddr: First MAC address entry for this vpath in the list
* @macaddr_mask: MAC address mask for macaddr
*
* Returns the first mac address and mac address mask in the list for this
* vpath.
* see also: vxge_hw_vpath_mac_addr_get_next
*
*/
enum vxge_hw_status
vxge_hw_vpath_mac_addr_get(
struct __vxge_hw_vpath_handle *vp,
u8 (macaddr)[ETH_ALEN],
u8 (macaddr_mask)[ETH_ALEN])
{
u32 i;
u64 data1 = 0ULL;
u64 data2 = 0ULL;
enum vxge_hw_status status = VXGE_HW_OK;
if (vp == NULL) {
status = VXGE_HW_ERR_INVALID_HANDLE;
goto exit;
}
status = __vxge_hw_vpath_rts_table_get(vp,
VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_FIRST_ENTRY,
VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA,
0, &data1, &data2);
if (status != VXGE_HW_OK)
goto exit;
data1 = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_DA_MAC_ADDR(data1);
data2 = VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_DA_MAC_ADDR_MASK(data2);
for (i = ETH_ALEN; i > 0; i--) {
macaddr[i-1] = (u8)(data1 & 0xFF);
data1 >>= 8;
macaddr_mask[i-1] = (u8)(data2 & 0xFF);
data2 >>= 8;
}
exit:
return status;
}
/**
* vxge_hw_vpath_mac_addr_get_next - Get the next mac address entry for this
* vpath
* from MAC address table.
* @vp: Vpath handle.
* @macaddr: Next MAC address entry for this vpath in the list
* @macaddr_mask: MAC address mask for macaddr
*
* Returns the next mac address and mac address mask in the list for this
* vpath.
* see also: vxge_hw_vpath_mac_addr_get
*
*/
enum vxge_hw_status
vxge_hw_vpath_mac_addr_get_next(
struct __vxge_hw_vpath_handle *vp,
u8 (macaddr)[ETH_ALEN],
u8 (macaddr_mask)[ETH_ALEN])
{
u32 i;
u64 data1 = 0ULL;
u64 data2 = 0ULL;
enum vxge_hw_status status = VXGE_HW_OK;
if (vp == NULL) {
status = VXGE_HW_ERR_INVALID_HANDLE;
goto exit;
}
status = __vxge_hw_vpath_rts_table_get(vp,
VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_NEXT_ENTRY,
VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA,
0, &data1, &data2);
if (status != VXGE_HW_OK)
goto exit;
data1 = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_DA_MAC_ADDR(data1);
data2 = VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_DA_MAC_ADDR_MASK(data2);
for (i = ETH_ALEN; i > 0; i--) {
macaddr[i-1] = (u8)(data1 & 0xFF);
data1 >>= 8;
macaddr_mask[i-1] = (u8)(data2 & 0xFF);
data2 >>= 8;
}
exit:
return status;
}
/**
* vxge_hw_vpath_mac_addr_delete - Delete the mac address entry for this vpath
* to MAC address table.
* @vp: Vpath handle.
* @macaddr: MAC address to be added for this vpath into the list
* @macaddr_mask: MAC address mask for macaddr
*
* Delete the given mac address and mac address mask into the list for this
* vpath.
* see also: vxge_hw_vpath_mac_addr_add, vxge_hw_vpath_mac_addr_get and
* vxge_hw_vpath_mac_addr_get_next
*
*/
enum vxge_hw_status
vxge_hw_vpath_mac_addr_delete(
struct __vxge_hw_vpath_handle *vp,
u8 (macaddr)[ETH_ALEN],
u8 (macaddr_mask)[ETH_ALEN])
{
u32 i;
u64 data1 = 0ULL;
u64 data2 = 0ULL;
enum vxge_hw_status status = VXGE_HW_OK;
if (vp == NULL) {
status = VXGE_HW_ERR_INVALID_HANDLE;
goto exit;
}
for (i = 0; i < ETH_ALEN; i++) {
data1 <<= 8;
data1 |= (u8)macaddr[i];
data2 <<= 8;
data2 |= (u8)macaddr_mask[i];
}
status = __vxge_hw_vpath_rts_table_set(vp,
VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_DELETE_ENTRY,
VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA,
0,
VXGE_HW_RTS_ACCESS_STEER_DATA0_DA_MAC_ADDR(data1),
VXGE_HW_RTS_ACCESS_STEER_DATA1_DA_MAC_ADDR_MASK(data2));
exit:
return status;
}
/**
* vxge_hw_vpath_vid_add - Add the vlan id entry for this vpath
* to vlan id table.
* @vp: Vpath handle.
* @vid: vlan id to be added for this vpath into the list
*
* Adds the given vlan id into the list for this vpath.
* see also: vxge_hw_vpath_vid_delete, vxge_hw_vpath_vid_get and
* vxge_hw_vpath_vid_get_next
*
*/
enum vxge_hw_status
vxge_hw_vpath_vid_add(struct __vxge_hw_vpath_handle *vp, u64 vid)
{
enum vxge_hw_status status = VXGE_HW_OK;
if (vp == NULL) {
status = VXGE_HW_ERR_INVALID_HANDLE;
goto exit;
}
status = __vxge_hw_vpath_rts_table_set(vp,
VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_ADD_ENTRY,
VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_VID,
0, VXGE_HW_RTS_ACCESS_STEER_DATA0_VLAN_ID(vid), 0);
exit:
return status;
}
/**
* vxge_hw_vpath_vid_get - Get the first vid entry for this vpath
* from vlan id table.
* @vp: Vpath handle.
* @vid: Buffer to return vlan id
*
* Returns the first vlan id in the list for this vpath.
* see also: vxge_hw_vpath_vid_get_next
*
*/
enum vxge_hw_status
vxge_hw_vpath_vid_get(struct __vxge_hw_vpath_handle *vp, u64 *vid)
{
u64 data;
enum vxge_hw_status status = VXGE_HW_OK;
if (vp == NULL) {
status = VXGE_HW_ERR_INVALID_HANDLE;
goto exit;
}
status = __vxge_hw_vpath_rts_table_get(vp,
VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_FIRST_ENTRY,
VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_VID,
0, vid, &data);
*vid = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_VLAN_ID(*vid);
exit:
return status;
}
/**
* vxge_hw_vpath_vid_delete - Delete the vlan id entry for this vpath
* to vlan id table.
* @vp: Vpath handle.
* @vid: vlan id to be added for this vpath into the list
*
* Adds the given vlan id into the list for this vpath.
* see also: vxge_hw_vpath_vid_add, vxge_hw_vpath_vid_get and
* vxge_hw_vpath_vid_get_next
*
*/
enum vxge_hw_status
vxge_hw_vpath_vid_delete(struct __vxge_hw_vpath_handle *vp, u64 vid)
{
enum vxge_hw_status status = VXGE_HW_OK;
if (vp == NULL) {
status = VXGE_HW_ERR_INVALID_HANDLE;
goto exit;
}
status = __vxge_hw_vpath_rts_table_set(vp,
VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_DELETE_ENTRY,
VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_VID,
0, VXGE_HW_RTS_ACCESS_STEER_DATA0_VLAN_ID(vid), 0);
exit:
return status;
}
/**
* vxge_hw_vpath_promisc_enable - Enable promiscuous mode.
* @vp: Vpath handle.
*
* Enable promiscuous mode of Titan-e operation.
*
* See also: vxge_hw_vpath_promisc_disable().
*/
enum vxge_hw_status vxge_hw_vpath_promisc_enable(
struct __vxge_hw_vpath_handle *vp)
{
u64 val64;
struct __vxge_hw_virtualpath *vpath;
enum vxge_hw_status status = VXGE_HW_OK;
if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
status = VXGE_HW_ERR_INVALID_HANDLE;
goto exit;
}
vpath = vp->vpath;
/* Enable promiscuous mode for function 0 only */
if (!(vpath->hldev->access_rights &
VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM))
return VXGE_HW_OK;
val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
if (!(val64 & VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN)) {
val64 |= VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN |
VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN |
VXGE_HW_RXMAC_VCFG0_BCAST_EN |
VXGE_HW_RXMAC_VCFG0_ALL_VID_EN;
writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
}
exit:
return status;
}
/**
* vxge_hw_vpath_promisc_disable - Disable promiscuous mode.
* @vp: Vpath handle.
*
* Disable promiscuous mode of Titan-e operation.
*
* See also: vxge_hw_vpath_promisc_enable().
*/
enum vxge_hw_status vxge_hw_vpath_promisc_disable(
struct __vxge_hw_vpath_handle *vp)
{
u64 val64;
struct __vxge_hw_virtualpath *vpath;
enum vxge_hw_status status = VXGE_HW_OK;
if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
status = VXGE_HW_ERR_INVALID_HANDLE;
goto exit;
}
vpath = vp->vpath;
val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
if (val64 & VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN) {
val64 &= ~(VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN |
VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN |
VXGE_HW_RXMAC_VCFG0_ALL_VID_EN);
writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
}
exit:
return status;
}
/*
* vxge_hw_vpath_bcast_enable - Enable broadcast
* @vp: Vpath handle.
*
* Enable receiving broadcasts.
*/
enum vxge_hw_status vxge_hw_vpath_bcast_enable(
struct __vxge_hw_vpath_handle *vp)
{
u64 val64;
struct __vxge_hw_virtualpath *vpath;
enum vxge_hw_status status = VXGE_HW_OK;
if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
status = VXGE_HW_ERR_INVALID_HANDLE;
goto exit;
}
vpath = vp->vpath;
val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
if (!(val64 & VXGE_HW_RXMAC_VCFG0_BCAST_EN)) {
val64 |= VXGE_HW_RXMAC_VCFG0_BCAST_EN;
writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
}
exit:
return status;
}
/**
* vxge_hw_vpath_mcast_enable - Enable multicast addresses.
* @vp: Vpath handle.
*
* Enable Titan-e multicast addresses.
* Returns: VXGE_HW_OK on success.
*
*/
enum vxge_hw_status vxge_hw_vpath_mcast_enable(
struct __vxge_hw_vpath_handle *vp)
{
u64 val64;
struct __vxge_hw_virtualpath *vpath;
enum vxge_hw_status status = VXGE_HW_OK;
if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
status = VXGE_HW_ERR_INVALID_HANDLE;
goto exit;
}
vpath = vp->vpath;
val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
if (!(val64 & VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN)) {
val64 |= VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN;
writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
}
exit:
return status;
}
/**
* vxge_hw_vpath_mcast_disable - Disable multicast addresses.
* @vp: Vpath handle.
*
* Disable Titan-e multicast addresses.
* Returns: VXGE_HW_OK - success.
* VXGE_HW_ERR_INVALID_HANDLE - Invalid handle
*
*/
enum vxge_hw_status
vxge_hw_vpath_mcast_disable(struct __vxge_hw_vpath_handle *vp)
{
u64 val64;
struct __vxge_hw_virtualpath *vpath;
enum vxge_hw_status status = VXGE_HW_OK;
if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
status = VXGE_HW_ERR_INVALID_HANDLE;
goto exit;
}
vpath = vp->vpath;
val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
if (val64 & VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN) {
val64 &= ~VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN;
writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
}
exit:
return status;
}
/*
* vxge_hw_vpath_alarm_process - Process Alarms.
* @vpath: Virtual Path.
* @skip_alarms: Do not clear the alarms
*
* Process vpath alarms.
*
*/
enum vxge_hw_status vxge_hw_vpath_alarm_process(
struct __vxge_hw_vpath_handle *vp,
u32 skip_alarms)
{
enum vxge_hw_status status = VXGE_HW_OK;
if (vp == NULL) {
status = VXGE_HW_ERR_INVALID_HANDLE;
goto exit;
}
status = __vxge_hw_vpath_alarm_process(vp->vpath, skip_alarms);
exit:
return status;
}
/**
* vxge_hw_vpath_msix_set - Associate MSIX vectors with TIM interrupts and
* alrms
* @vp: Virtual Path handle.
* @tim_msix_id: MSIX vectors associated with VXGE_HW_MAX_INTR_PER_VP number of
* interrupts(Can be repeated). If fifo or ring are not enabled
* the MSIX vector for that should be set to 0
* @alarm_msix_id: MSIX vector for alarm.
*
* This API will associate a given MSIX vector numbers with the four TIM
* interrupts and alarm interrupt.
*/
void
vxge_hw_vpath_msix_set(struct __vxge_hw_vpath_handle *vp, int *tim_msix_id,
int alarm_msix_id)
{
u64 val64;
struct __vxge_hw_virtualpath *vpath = vp->vpath;
struct vxge_hw_vpath_reg __iomem *vp_reg = vpath->vp_reg;
u32 vp_id = vp->vpath->vp_id;
val64 = VXGE_HW_INTERRUPT_CFG0_GROUP0_MSIX_FOR_TXTI(
(vp_id * 4) + tim_msix_id[0]) |
VXGE_HW_INTERRUPT_CFG0_GROUP1_MSIX_FOR_TXTI(
(vp_id * 4) + tim_msix_id[1]);
writeq(val64, &vp_reg->interrupt_cfg0);
writeq(VXGE_HW_INTERRUPT_CFG2_ALARM_MAP_TO_MSG(
(vpath->hldev->first_vp_id * 4) + alarm_msix_id),
&vp_reg->interrupt_cfg2);
if (vpath->hldev->config.intr_mode ==
VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) {
__vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(
VXGE_HW_ONE_SHOT_VECT0_EN_ONE_SHOT_VECT0_EN,
0, 32), &vp_reg->one_shot_vect0_en);
__vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(
VXGE_HW_ONE_SHOT_VECT1_EN_ONE_SHOT_VECT1_EN,
0, 32), &vp_reg->one_shot_vect1_en);
__vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(
VXGE_HW_ONE_SHOT_VECT2_EN_ONE_SHOT_VECT2_EN,
0, 32), &vp_reg->one_shot_vect2_en);
}
}
/**
* vxge_hw_vpath_msix_mask - Mask MSIX Vector.
* @vp: Virtual Path handle.
* @msix_id: MSIX ID
*
* The function masks the msix interrupt for the given msix_id
*
* Returns: 0,
* Otherwise, VXGE_HW_ERR_WRONG_IRQ if the msix index is out of range
* status.
* See also:
*/
void
vxge_hw_vpath_msix_mask(struct __vxge_hw_vpath_handle *vp, int msix_id)
{
struct __vxge_hw_device *hldev = vp->vpath->hldev;
__vxge_hw_pio_mem_write32_upper(
(u32) vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
&hldev->common_reg->set_msix_mask_vect[msix_id % 4]);
}
/**
* vxge_hw_vpath_msix_clear - Clear MSIX Vector.
* @vp: Virtual Path handle.
* @msix_id: MSI ID
*
* The function clears the msix interrupt for the given msix_id
*
* Returns: 0,
* Otherwise, VXGE_HW_ERR_WRONG_IRQ if the msix index is out of range
* status.
* See also:
*/
void vxge_hw_vpath_msix_clear(struct __vxge_hw_vpath_handle *vp, int msix_id)
{
struct __vxge_hw_device *hldev = vp->vpath->hldev;
if ((hldev->config.intr_mode == VXGE_HW_INTR_MODE_MSIX_ONE_SHOT))
__vxge_hw_pio_mem_write32_upper(
(u32) vxge_bVALn(vxge_mBIT((msix_id >> 2)), 0, 32),
&hldev->common_reg->clr_msix_one_shot_vec[msix_id % 4]);
else
__vxge_hw_pio_mem_write32_upper(
(u32) vxge_bVALn(vxge_mBIT((msix_id >> 2)), 0, 32),
&hldev->common_reg->clear_msix_mask_vect[msix_id % 4]);
}
/**
* vxge_hw_vpath_msix_unmask - Unmask the MSIX Vector.
* @vp: Virtual Path handle.
* @msix_id: MSI ID
*
* The function unmasks the msix interrupt for the given msix_id
*
* Returns: 0,
* Otherwise, VXGE_HW_ERR_WRONG_IRQ if the msix index is out of range
* status.
* See also:
*/
void
vxge_hw_vpath_msix_unmask(struct __vxge_hw_vpath_handle *vp, int msix_id)
{
struct __vxge_hw_device *hldev = vp->vpath->hldev;
__vxge_hw_pio_mem_write32_upper(
(u32)vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
&hldev->common_reg->clear_msix_mask_vect[msix_id%4]);
}
/**
* vxge_hw_vpath_inta_mask_tx_rx - Mask Tx and Rx interrupts.
* @vp: Virtual Path handle.
*
* Mask Tx and Rx vpath interrupts.
*
* See also: vxge_hw_vpath_inta_mask_tx_rx()
*/
void vxge_hw_vpath_inta_mask_tx_rx(struct __vxge_hw_vpath_handle *vp)
{
u64 tim_int_mask0[4] = {[0 ...3] = 0};
u32 tim_int_mask1[4] = {[0 ...3] = 0};
u64 val64;
struct __vxge_hw_device *hldev = vp->vpath->hldev;
VXGE_HW_DEVICE_TIM_INT_MASK_SET(tim_int_mask0,
tim_int_mask1, vp->vpath->vp_id);
val64 = readq(&hldev->common_reg->tim_int_mask0);
if ((tim_int_mask0[VXGE_HW_VPATH_INTR_TX] != 0) ||
(tim_int_mask0[VXGE_HW_VPATH_INTR_RX] != 0)) {
writeq((tim_int_mask0[VXGE_HW_VPATH_INTR_TX] |
tim_int_mask0[VXGE_HW_VPATH_INTR_RX] | val64),
&hldev->common_reg->tim_int_mask0);
}
val64 = readl(&hldev->common_reg->tim_int_mask1);
if ((tim_int_mask1[VXGE_HW_VPATH_INTR_TX] != 0) ||
(tim_int_mask1[VXGE_HW_VPATH_INTR_RX] != 0)) {
__vxge_hw_pio_mem_write32_upper(
(tim_int_mask1[VXGE_HW_VPATH_INTR_TX] |
tim_int_mask1[VXGE_HW_VPATH_INTR_RX] | val64),
&hldev->common_reg->tim_int_mask1);
}
}
/**
* vxge_hw_vpath_inta_unmask_tx_rx - Unmask Tx and Rx interrupts.
* @vp: Virtual Path handle.
*
* Unmask Tx and Rx vpath interrupts.
*
* See also: vxge_hw_vpath_inta_mask_tx_rx()
*/
void vxge_hw_vpath_inta_unmask_tx_rx(struct __vxge_hw_vpath_handle *vp)
{
u64 tim_int_mask0[4] = {[0 ...3] = 0};
u32 tim_int_mask1[4] = {[0 ...3] = 0};
u64 val64;
struct __vxge_hw_device *hldev = vp->vpath->hldev;
VXGE_HW_DEVICE_TIM_INT_MASK_SET(tim_int_mask0,
tim_int_mask1, vp->vpath->vp_id);
val64 = readq(&hldev->common_reg->tim_int_mask0);
if ((tim_int_mask0[VXGE_HW_VPATH_INTR_TX] != 0) ||
(tim_int_mask0[VXGE_HW_VPATH_INTR_RX] != 0)) {
writeq((~(tim_int_mask0[VXGE_HW_VPATH_INTR_TX] |
tim_int_mask0[VXGE_HW_VPATH_INTR_RX])) & val64,
&hldev->common_reg->tim_int_mask0);
}
if ((tim_int_mask1[VXGE_HW_VPATH_INTR_TX] != 0) ||
(tim_int_mask1[VXGE_HW_VPATH_INTR_RX] != 0)) {
__vxge_hw_pio_mem_write32_upper(
(~(tim_int_mask1[VXGE_HW_VPATH_INTR_TX] |
tim_int_mask1[VXGE_HW_VPATH_INTR_RX])) & val64,
&hldev->common_reg->tim_int_mask1);
}
}
/**
* vxge_hw_vpath_poll_rx - Poll Rx Virtual Path for completed
* descriptors and process the same.
* @ring: Handle to the ring object used for receive
*
* The function polls the Rx for the completed descriptors and calls
* the driver via supplied completion callback.
*
* Returns: VXGE_HW_OK, if the polling is completed successful.
* VXGE_HW_COMPLETIONS_REMAIN: There are still more completed
* descriptors available which are yet to be processed.
*
* See also: vxge_hw_vpath_poll_rx()
*/
enum vxge_hw_status vxge_hw_vpath_poll_rx(struct __vxge_hw_ring *ring)
{
u8 t_code;
enum vxge_hw_status status = VXGE_HW_OK;
void *first_rxdh;
u64 val64 = 0;
int new_count = 0;
ring->cmpl_cnt = 0;
status = vxge_hw_ring_rxd_next_completed(ring, &first_rxdh, &t_code);
if (status == VXGE_HW_OK)
ring->callback(ring, first_rxdh,
t_code, ring->channel.userdata);
if (ring->cmpl_cnt != 0) {
ring->doorbell_cnt += ring->cmpl_cnt;
if (ring->doorbell_cnt >= ring->rxds_limit) {
/*
* Each RxD is of 4 qwords, update the number of
* qwords replenished
*/
new_count = (ring->doorbell_cnt * 4);
/* For each block add 4 more qwords */
ring->total_db_cnt += ring->doorbell_cnt;
if (ring->total_db_cnt >= ring->rxds_per_block) {
new_count += 4;
/* Reset total count */
ring->total_db_cnt %= ring->rxds_per_block;
}
writeq(VXGE_HW_PRC_RXD_DOORBELL_NEW_QW_CNT(new_count),
&ring->vp_reg->prc_rxd_doorbell);
val64 =
readl(&ring->common_reg->titan_general_int_status);
ring->doorbell_cnt = 0;
}
}
return status;
}
/**
* vxge_hw_vpath_poll_tx - Poll Tx for completed descriptors and process
* the same.
* @fifo: Handle to the fifo object used for non offload send
*
* The function polls the Tx for the completed descriptors and calls
* the driver via supplied completion callback.
*
* Returns: VXGE_HW_OK, if the polling is completed successful.
* VXGE_HW_COMPLETIONS_REMAIN: There are still more completed
* descriptors available which are yet to be processed.
*/
enum vxge_hw_status vxge_hw_vpath_poll_tx(struct __vxge_hw_fifo *fifo,
struct sk_buff ***skb_ptr, int nr_skb,
int *more)
{
enum vxge_hw_fifo_tcode t_code;
void *first_txdlh;
enum vxge_hw_status status = VXGE_HW_OK;
struct __vxge_hw_channel *channel;
channel = &fifo->channel;
status = vxge_hw_fifo_txdl_next_completed(fifo,
&first_txdlh, &t_code);
if (status == VXGE_HW_OK)
if (fifo->callback(fifo, first_txdlh, t_code,
channel->userdata, skb_ptr, nr_skb, more) != VXGE_HW_OK)
status = VXGE_HW_COMPLETIONS_REMAIN;
return status;
}
| gpl-2.0 |
0sc0d3r/enigma2 | lib/base/freesatv2.cpp | 150 | 6084 | /*
FreeSat Huffman decoder for VDR
Copyright (C) 2008 DOM http://www.rst38.org.uk/vdr/
Port to C++ / Enigma 2
Copyright (C) 2008 Martin Croome
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
*/
#include "freesatv2.h"
#ifdef FREESATV2_DEBUG
# include "eerror.h"
#endif
#include <asm/types.h>
#include "cfile.h"
#define START '\0'
#define STOP '\0'
#define ESCAPE '\1'
#ifndef DATADIR
# define DATADIR "/usr/share"
#endif
#ifndef FREESAT_DATA_DIRECTORY
#define FREESAT_DATA_DIRECTORY DATADIR
#endif
#define TABLE1_FILENAME FREESAT_DATA_DIRECTORY "/enigma2/freesat.t1"
#define TABLE2_FILENAME FREESAT_DATA_DIRECTORY "/enigma2/freesat.t2"
static void loadFile(huffTableEntry **table, const char *filename);
struct huffTableEntry
{
uint32_t value;
uint16_t bits;
char next;
huffTableEntry * nextEntry;
huffTableEntry(unsigned int value, short bits, char next) : value(value), bits(bits), next(next), nextEntry(NULL)
{ }
};
freesatHuffmanDecoder::freesatHuffmanDecoder()
{
memset(m_tables, 0, sizeof(m_tables));
loadFile(&m_tables[0][0], TABLE1_FILENAME);
loadFile(&m_tables[1][0], TABLE2_FILENAME);
}
freesatHuffmanDecoder::~freesatHuffmanDecoder()
{
int i, j;
huffTableEntry *currentEntry, *nextEntry;
for ( j = 0 ; j < 2; j++ )
{
for ( i = 0 ; i < 256; i++ )
{
currentEntry = m_tables[j][i];
while ( currentEntry != NULL )
{
nextEntry = currentEntry->nextEntry;
delete currentEntry;
currentEntry = nextEntry;
}
m_tables[j][i] = NULL;
}
}
}
/** \brief Convert a textual character description into a value
*
* \param str - Encoded (in someway) string
*
* \return Raw character
*/
static unsigned char resolveChar(const char *str)
{
const char *p = str;
unsigned c0 = *p++, c1 = *p++;
if (c1)
switch(c0|c1<<8)
{
case '0'|'x'<<8:
if ( sscanf(p,"%02x", &c1) == 1 )
c0 = c1;
break;
case 'E'|'S'<<8:
if ( !strcmp(p,"CAPE") )
c0 = ESCAPE;
break;
case 'S'|'T'<<8:
if ( !strcmp(p,"OP") )
c0 = STOP;
else if ( !strcmp(p,"ART") )
c0 = START;
break;
}
return c0;
}
/** \brief Decode a binary string into a value
*
* \param binary - Binary string to decode
*
* \return Decoded value
*/
static unsigned long decodeBinary(const char *binary)
{
unsigned long mask = 0x80000000;
unsigned long val = 0;
while (*binary)
{
if ( *binary == '1' )
{
val |= mask;
}
mask >>= 1;
++binary;
}
return val;
}
static void loadFile(huffTableEntry **table, const char *filename)
{
char buf[1024];
char *from;
char *to;
char *binary;
char *colon;
CFile fp(filename, "r");
if ( fp )
{
while ( fgets(buf,sizeof(buf),fp) != NULL )
{
// Tokenize string "in place"
from = buf;
colon = strchr(buf, ':');
if (colon == NULL)
continue;
binary = colon + 1;
*colon = 0;
colon = strchr(binary, ':');
if (colon == NULL)
continue;
*colon = 0;
to = colon + 1;
colon = strchr(to, ':');
if (colon != NULL)
*colon = 0;
{
int bin_len = strlen(binary);
int from_char = resolveChar(from);
char to_char = resolveChar(to);
unsigned long bin = decodeBinary(binary);
// Add entry to end of bucket
huffTableEntry **pCurrent = &table[from_char];
while ( *pCurrent != NULL )
{
pCurrent = &((*pCurrent)->nextEntry);
}
*pCurrent = new huffTableEntry(bin, bin_len, to_char);
}
}
}
#ifdef FREESATV2_DEBUG
else
{
eDebug("[FREESAT] Cannot load '%s'",filename);
}
#endif
}
/** \brief Decode an EPG string as necessary
*
* \param src - Possibly encoded string
* \param size - Size of the buffer
*
* \retval NULL - Can't decode
* \return A decoded string
*/
std::string freesatHuffmanDecoder::decode(const unsigned char *src, size_t size)
{
std::string uncompressed;
if (src[0] != 0x1f)
return uncompressed;
const unsigned int table_index = src[1] - 1;
if (table_index <= 1)
{
huffTableEntry **table = &m_tables[table_index][0];
unsigned int value = 0;
unsigned int byte = 2;
unsigned int bit = 0;
int lastch = START;
while (byte < 6 && byte < size)
{
value |= src[byte] << ((5-byte) * 8);
byte++;
}
do
{
int found = 0;
unsigned bitShift = 0;
if (lastch == ESCAPE)
{
char nextCh = (value >> 24) & 0xff;
found = 1;
// Encoded in the next 8 bits.
// Terminated by the first ASCII character.
bitShift = 8;
if ((nextCh & 0x80) == 0)
lastch = nextCh;
uncompressed.append(&nextCh, 1);
}
else
{
huffTableEntry * currentEntry = table[lastch];
while ( currentEntry != NULL )
{
unsigned mask = 0, maskbit = 0x80000000;
short kk;
for ( kk = 0; kk < currentEntry->bits; kk++)
{
mask |= maskbit;
maskbit >>= 1;
}
if ((value & mask) == currentEntry->value)
{
char nextCh = currentEntry->next;
bitShift = currentEntry->bits;
if (nextCh != STOP && nextCh != ESCAPE)
{
uncompressed.append(&nextCh, 1);
}
found = 1;
lastch = nextCh;
break;
}
currentEntry = currentEntry->nextEntry;
}
}
if (found)
{
// Shift up by the number of bits.
unsigned b;
for ( b = 0; b < bitShift; b++)
{
value = (value << 1) & 0xfffffffe;
if (byte < size)
value |= (src[byte] >> (7-bit)) & 1;
if (bit == 7)
{
bit = 0;
byte++;
}
else bit++;
}
}
else
{
#ifdef FREESATV2_DEBUG
eDebug("[FREESAT] Missing table %d entry: <%s>", table_index + 1, uncompressed.c_str());
#endif
return uncompressed;
}
} while (lastch != STOP && value != 0);
}
return uncompressed;
}
| gpl-2.0 |
toxxin/kernel_palm | drivers/video/fbdev/sm501fb.c | 150 | 54451 | /* linux/drivers/video/sm501fb.c
*
* Copyright (c) 2006 Simtec Electronics
* Vincent Sanders <vince@simtec.co.uk>
* Ben Dooks <ben@simtec.co.uk>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* Framebuffer driver for the Silicon Motion SM501
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/mm.h>
#include <linux/tty.h>
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/fb.h>
#include <linux/init.h>
#include <linux/vmalloc.h>
#include <linux/dma-mapping.h>
#include <linux/interrupt.h>
#include <linux/workqueue.h>
#include <linux/wait.h>
#include <linux/platform_device.h>
#include <linux/clk.h>
#include <linux/console.h>
#include <linux/io.h>
#include <asm/uaccess.h>
#include <asm/div64.h>
#ifdef CONFIG_PM
#include <linux/pm.h>
#endif
#include <linux/sm501.h>
#include <linux/sm501-regs.h>
#include "edid.h"
static char *fb_mode = "640x480-16@60";
static unsigned long default_bpp = 16;
static struct fb_videomode sm501_default_mode = {
.refresh = 60,
.xres = 640,
.yres = 480,
.pixclock = 20833,
.left_margin = 142,
.right_margin = 13,
.upper_margin = 21,
.lower_margin = 1,
.hsync_len = 69,
.vsync_len = 3,
.sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
.vmode = FB_VMODE_NONINTERLACED
};
#define NR_PALETTE 256
enum sm501_controller {
HEAD_CRT = 0,
HEAD_PANEL = 1,
};
/* SM501 memory address.
*
* This structure is used to track memory usage within the SM501 framebuffer
* allocation. The sm_addr field is stored as an offset as it is often used
* against both the physical and mapped addresses.
*/
struct sm501_mem {
unsigned long size;
unsigned long sm_addr; /* offset from base of sm501 fb. */
void __iomem *k_addr;
};
/* private data that is shared between all frambuffers* */
struct sm501fb_info {
struct device *dev;
struct fb_info *fb[2]; /* fb info for both heads */
struct resource *fbmem_res; /* framebuffer resource */
struct resource *regs_res; /* registers resource */
struct resource *regs2d_res; /* 2d registers resource */
struct sm501_platdata_fb *pdata; /* our platform data */
unsigned long pm_crt_ctrl; /* pm: crt ctrl save */
int irq;
int swap_endian; /* set to swap rgb=>bgr */
void __iomem *regs; /* remapped registers */
void __iomem *regs2d; /* 2d remapped registers */
void __iomem *fbmem; /* remapped framebuffer */
size_t fbmem_len; /* length of remapped region */
u8 *edid_data;
};
/* per-framebuffer private data */
struct sm501fb_par {
u32 pseudo_palette[16];
enum sm501_controller head;
struct sm501_mem cursor;
struct sm501_mem screen;
struct fb_ops ops;
void *store_fb;
void *store_cursor;
void __iomem *cursor_regs;
struct sm501fb_info *info;
};
/* Helper functions */
static inline int h_total(struct fb_var_screeninfo *var)
{
return var->xres + var->left_margin +
var->right_margin + var->hsync_len;
}
static inline int v_total(struct fb_var_screeninfo *var)
{
return var->yres + var->upper_margin +
var->lower_margin + var->vsync_len;
}
/* sm501fb_sync_regs()
*
* This call is mainly for PCI bus systems where we need to
* ensure that any writes to the bus are completed before the
* next phase, or after completing a function.
*/
static inline void sm501fb_sync_regs(struct sm501fb_info *info)
{
smc501_readl(info->regs);
}
/* sm501_alloc_mem
*
* This is an attempt to lay out memory for the two framebuffers and
* everything else
*
* |fbmem_res->start fbmem_res->end|
* | |
* |fb[0].fix.smem_start | |fb[1].fix.smem_start | 2K |
* |-> fb[0].fix.smem_len <-| spare |-> fb[1].fix.smem_len <-|-> cursors <-|
*
* The "spare" space is for the 2d engine data
* the fixed is space for the cursors (2x1Kbyte)
*
* we need to allocate memory for the 2D acceleration engine
* command list and the data for the engine to deal with.
*
* - all allocations must be 128bit aligned
* - cursors are 64x64x2 bits (1Kbyte)
*
*/
#define SM501_MEMF_CURSOR (1)
#define SM501_MEMF_PANEL (2)
#define SM501_MEMF_CRT (4)
#define SM501_MEMF_ACCEL (8)
static int sm501_alloc_mem(struct sm501fb_info *inf, struct sm501_mem *mem,
unsigned int why, size_t size, u32 smem_len)
{
struct sm501fb_par *par;
struct fb_info *fbi;
unsigned int ptr;
unsigned int end;
switch (why) {
case SM501_MEMF_CURSOR:
ptr = inf->fbmem_len - size;
inf->fbmem_len = ptr; /* adjust available memory. */
break;
case SM501_MEMF_PANEL:
if (size > inf->fbmem_len)
return -ENOMEM;
ptr = inf->fbmem_len - size;
fbi = inf->fb[HEAD_CRT];
/* round down, some programs such as directfb do not draw
* 0,0 correctly unless the start is aligned to a page start.
*/
if (ptr > 0)
ptr &= ~(PAGE_SIZE - 1);
if (fbi && ptr < smem_len)
return -ENOMEM;
break;
case SM501_MEMF_CRT:
ptr = 0;
/* check to see if we have panel memory allocated
* which would put an limit on available memory. */
fbi = inf->fb[HEAD_PANEL];
if (fbi) {
par = fbi->par;
end = par->screen.k_addr ? par->screen.sm_addr : inf->fbmem_len;
} else
end = inf->fbmem_len;
if ((ptr + size) > end)
return -ENOMEM;
break;
case SM501_MEMF_ACCEL:
fbi = inf->fb[HEAD_CRT];
ptr = fbi ? smem_len : 0;
fbi = inf->fb[HEAD_PANEL];
if (fbi) {
par = fbi->par;
end = par->screen.sm_addr;
} else
end = inf->fbmem_len;
if ((ptr + size) > end)
return -ENOMEM;
break;
default:
return -EINVAL;
}
mem->size = size;
mem->sm_addr = ptr;
mem->k_addr = inf->fbmem + ptr;
dev_dbg(inf->dev, "%s: result %08lx, %p - %u, %zd\n",
__func__, mem->sm_addr, mem->k_addr, why, size);
return 0;
}
/* sm501fb_ps_to_hz
*
* Converts a period in picoseconds to Hz.
*
* Note, we try to keep this in Hz to minimise rounding with
* the limited PLL settings on the SM501.
*/
static unsigned long sm501fb_ps_to_hz(unsigned long psvalue)
{
unsigned long long numerator=1000000000000ULL;
/* 10^12 / picosecond period gives frequency in Hz */
do_div(numerator, psvalue);
return (unsigned long)numerator;
}
/* sm501fb_hz_to_ps is identical to the opposite transform */
#define sm501fb_hz_to_ps(x) sm501fb_ps_to_hz(x)
/* sm501fb_setup_gamma
*
* Programs a linear 1.0 gamma ramp in case the gamma
* correction is enabled without programming anything else.
*/
static void sm501fb_setup_gamma(struct sm501fb_info *fbi,
unsigned long palette)
{
unsigned long value = 0;
int offset;
/* set gamma values */
for (offset = 0; offset < 256 * 4; offset += 4) {
smc501_writel(value, fbi->regs + palette + offset);
value += 0x010101; /* Advance RGB by 1,1,1.*/
}
}
/* sm501fb_check_var
*
* check common variables for both panel and crt
*/
static int sm501fb_check_var(struct fb_var_screeninfo *var,
struct fb_info *info)
{
struct sm501fb_par *par = info->par;
struct sm501fb_info *sm = par->info;
unsigned long tmp;
/* check we can fit these values into the registers */
if (var->hsync_len > 255 || var->vsync_len > 63)
return -EINVAL;
/* hdisplay end and hsync start */
if ((var->xres + var->right_margin) > 4096)
return -EINVAL;
/* vdisplay end and vsync start */
if ((var->yres + var->lower_margin) > 2048)
return -EINVAL;
/* hard limits of device */
if (h_total(var) > 4096 || v_total(var) > 2048)
return -EINVAL;
/* check our line length is going to be 128 bit aligned */
tmp = (var->xres * var->bits_per_pixel) / 8;
if ((tmp & 15) != 0)
return -EINVAL;
/* check the virtual size */
if (var->xres_virtual > 4096 || var->yres_virtual > 2048)
return -EINVAL;
/* can cope with 8,16 or 32bpp */
if (var->bits_per_pixel <= 8)
var->bits_per_pixel = 8;
else if (var->bits_per_pixel <= 16)
var->bits_per_pixel = 16;
else if (var->bits_per_pixel == 24)
var->bits_per_pixel = 32;
/* set r/g/b positions and validate bpp */
switch(var->bits_per_pixel) {
case 8:
var->red.length = var->bits_per_pixel;
var->red.offset = 0;
var->green.length = var->bits_per_pixel;
var->green.offset = 0;
var->blue.length = var->bits_per_pixel;
var->blue.offset = 0;
var->transp.length = 0;
var->transp.offset = 0;
break;
case 16:
if (sm->pdata->flags & SM501_FBPD_SWAP_FB_ENDIAN) {
var->blue.offset = 11;
var->green.offset = 5;
var->red.offset = 0;
} else {
var->red.offset = 11;
var->green.offset = 5;
var->blue.offset = 0;
}
var->transp.offset = 0;
var->red.length = 5;
var->green.length = 6;
var->blue.length = 5;
var->transp.length = 0;
break;
case 32:
if (sm->pdata->flags & SM501_FBPD_SWAP_FB_ENDIAN) {
var->transp.offset = 0;
var->red.offset = 8;
var->green.offset = 16;
var->blue.offset = 24;
} else {
var->transp.offset = 24;
var->red.offset = 16;
var->green.offset = 8;
var->blue.offset = 0;
}
var->red.length = 8;
var->green.length = 8;
var->blue.length = 8;
var->transp.length = 0;
break;
default:
return -EINVAL;
}
return 0;
}
/*
* sm501fb_check_var_crt():
*
* check the parameters for the CRT head, and either bring them
* back into range, or return -EINVAL.
*/
static int sm501fb_check_var_crt(struct fb_var_screeninfo *var,
struct fb_info *info)
{
return sm501fb_check_var(var, info);
}
/* sm501fb_check_var_pnl():
*
* check the parameters for the CRT head, and either bring them
* back into range, or return -EINVAL.
*/
static int sm501fb_check_var_pnl(struct fb_var_screeninfo *var,
struct fb_info *info)
{
return sm501fb_check_var(var, info);
}
/* sm501fb_set_par_common
*
* set common registers for framebuffers
*/
static int sm501fb_set_par_common(struct fb_info *info,
struct fb_var_screeninfo *var)
{
struct sm501fb_par *par = info->par;
struct sm501fb_info *fbi = par->info;
unsigned long pixclock; /* pixelclock in Hz */
unsigned long sm501pixclock; /* pixelclock the 501 can achieve in Hz */
unsigned int mem_type;
unsigned int clock_type;
unsigned int head_addr;
unsigned int smem_len;
dev_dbg(fbi->dev, "%s: %dx%d, bpp = %d, virtual %dx%d\n",
__func__, var->xres, var->yres, var->bits_per_pixel,
var->xres_virtual, var->yres_virtual);
switch (par->head) {
case HEAD_CRT:
mem_type = SM501_MEMF_CRT;
clock_type = SM501_CLOCK_V2XCLK;
head_addr = SM501_DC_CRT_FB_ADDR;
break;
case HEAD_PANEL:
mem_type = SM501_MEMF_PANEL;
clock_type = SM501_CLOCK_P2XCLK;
head_addr = SM501_DC_PANEL_FB_ADDR;
break;
default:
mem_type = 0; /* stop compiler warnings */
head_addr = 0;
clock_type = 0;
}
switch (var->bits_per_pixel) {
case 8:
info->fix.visual = FB_VISUAL_PSEUDOCOLOR;
break;
case 16:
info->fix.visual = FB_VISUAL_TRUECOLOR;
break;
case 32:
info->fix.visual = FB_VISUAL_TRUECOLOR;
break;
}
/* allocate fb memory within 501 */
info->fix.line_length = (var->xres_virtual * var->bits_per_pixel)/8;
smem_len = info->fix.line_length * var->yres_virtual;
dev_dbg(fbi->dev, "%s: line length = %u\n", __func__,
info->fix.line_length);
if (sm501_alloc_mem(fbi, &par->screen, mem_type, smem_len, smem_len)) {
dev_err(fbi->dev, "no memory available\n");
return -ENOMEM;
}
mutex_lock(&info->mm_lock);
info->fix.smem_start = fbi->fbmem_res->start + par->screen.sm_addr;
info->fix.smem_len = smem_len;
mutex_unlock(&info->mm_lock);
info->screen_base = fbi->fbmem + par->screen.sm_addr;
info->screen_size = info->fix.smem_len;
/* set start of framebuffer to the screen */
smc501_writel(par->screen.sm_addr | SM501_ADDR_FLIP,
fbi->regs + head_addr);
/* program CRT clock */
pixclock = sm501fb_ps_to_hz(var->pixclock);
sm501pixclock = sm501_set_clock(fbi->dev->parent, clock_type,
pixclock);
/* update fb layer with actual clock used */
var->pixclock = sm501fb_hz_to_ps(sm501pixclock);
dev_dbg(fbi->dev, "%s: pixclock(ps) = %u, pixclock(Hz) = %lu, "
"sm501pixclock = %lu, error = %ld%%\n",
__func__, var->pixclock, pixclock, sm501pixclock,
((pixclock - sm501pixclock)*100)/pixclock);
return 0;
}
/* sm501fb_set_par_geometry
*
* set the geometry registers for specified framebuffer.
*/
static void sm501fb_set_par_geometry(struct fb_info *info,
struct fb_var_screeninfo *var)
{
struct sm501fb_par *par = info->par;
struct sm501fb_info *fbi = par->info;
void __iomem *base = fbi->regs;
unsigned long reg;
if (par->head == HEAD_CRT)
base += SM501_DC_CRT_H_TOT;
else
base += SM501_DC_PANEL_H_TOT;
/* set framebuffer width and display width */
reg = info->fix.line_length;
reg |= ((var->xres * var->bits_per_pixel)/8) << 16;
smc501_writel(reg, fbi->regs + (par->head == HEAD_CRT ?
SM501_DC_CRT_FB_OFFSET : SM501_DC_PANEL_FB_OFFSET));
/* program horizontal total */
reg = (h_total(var) - 1) << 16;
reg |= (var->xres - 1);
smc501_writel(reg, base + SM501_OFF_DC_H_TOT);
/* program horizontal sync */
reg = var->hsync_len << 16;
reg |= var->xres + var->right_margin - 1;
smc501_writel(reg, base + SM501_OFF_DC_H_SYNC);
/* program vertical total */
reg = (v_total(var) - 1) << 16;
reg |= (var->yres - 1);
smc501_writel(reg, base + SM501_OFF_DC_V_TOT);
/* program vertical sync */
reg = var->vsync_len << 16;
reg |= var->yres + var->lower_margin - 1;
smc501_writel(reg, base + SM501_OFF_DC_V_SYNC);
}
/* sm501fb_pan_crt
*
* pan the CRT display output within an virtual framebuffer
*/
static int sm501fb_pan_crt(struct fb_var_screeninfo *var,
struct fb_info *info)
{
struct sm501fb_par *par = info->par;
struct sm501fb_info *fbi = par->info;
unsigned int bytes_pixel = info->var.bits_per_pixel / 8;
unsigned long reg;
unsigned long xoffs;
xoffs = var->xoffset * bytes_pixel;
reg = smc501_readl(fbi->regs + SM501_DC_CRT_CONTROL);
reg &= ~SM501_DC_CRT_CONTROL_PIXEL_MASK;
reg |= ((xoffs & 15) / bytes_pixel) << 4;
smc501_writel(reg, fbi->regs + SM501_DC_CRT_CONTROL);
reg = (par->screen.sm_addr + xoffs +
var->yoffset * info->fix.line_length);
smc501_writel(reg | SM501_ADDR_FLIP, fbi->regs + SM501_DC_CRT_FB_ADDR);
sm501fb_sync_regs(fbi);
return 0;
}
/* sm501fb_pan_pnl
*
* pan the panel display output within an virtual framebuffer
*/
static int sm501fb_pan_pnl(struct fb_var_screeninfo *var,
struct fb_info *info)
{
struct sm501fb_par *par = info->par;
struct sm501fb_info *fbi = par->info;
unsigned long reg;
reg = var->xoffset | (info->var.xres_virtual << 16);
smc501_writel(reg, fbi->regs + SM501_DC_PANEL_FB_WIDTH);
reg = var->yoffset | (info->var.yres_virtual << 16);
smc501_writel(reg, fbi->regs + SM501_DC_PANEL_FB_HEIGHT);
sm501fb_sync_regs(fbi);
return 0;
}
/* sm501fb_set_par_crt
*
* Set the CRT video mode from the fb_info structure
*/
static int sm501fb_set_par_crt(struct fb_info *info)
{
struct sm501fb_par *par = info->par;
struct sm501fb_info *fbi = par->info;
struct fb_var_screeninfo *var = &info->var;
unsigned long control; /* control register */
int ret;
/* activate new configuration */
dev_dbg(fbi->dev, "%s(%p)\n", __func__, info);
/* enable CRT DAC - note 0 is on!*/
sm501_misc_control(fbi->dev->parent, 0, SM501_MISC_DAC_POWER);
control = smc501_readl(fbi->regs + SM501_DC_CRT_CONTROL);
control &= (SM501_DC_CRT_CONTROL_PIXEL_MASK |
SM501_DC_CRT_CONTROL_GAMMA |
SM501_DC_CRT_CONTROL_BLANK |
SM501_DC_CRT_CONTROL_SEL |
SM501_DC_CRT_CONTROL_CP |
SM501_DC_CRT_CONTROL_TVP);
/* set the sync polarities before we check data source */
if ((var->sync & FB_SYNC_HOR_HIGH_ACT) == 0)
control |= SM501_DC_CRT_CONTROL_HSP;
if ((var->sync & FB_SYNC_VERT_HIGH_ACT) == 0)
control |= SM501_DC_CRT_CONTROL_VSP;
if ((control & SM501_DC_CRT_CONTROL_SEL) == 0) {
/* the head is displaying panel data... */
sm501_alloc_mem(fbi, &par->screen, SM501_MEMF_CRT, 0,
info->fix.smem_len);
goto out_update;
}
ret = sm501fb_set_par_common(info, var);
if (ret) {
dev_err(fbi->dev, "failed to set common parameters\n");
return ret;
}
sm501fb_pan_crt(var, info);
sm501fb_set_par_geometry(info, var);
control |= SM501_FIFO_3; /* fill if >3 free slots */
switch(var->bits_per_pixel) {
case 8:
control |= SM501_DC_CRT_CONTROL_8BPP;
break;
case 16:
control |= SM501_DC_CRT_CONTROL_16BPP;
sm501fb_setup_gamma(fbi, SM501_DC_CRT_PALETTE);
break;
case 32:
control |= SM501_DC_CRT_CONTROL_32BPP;
sm501fb_setup_gamma(fbi, SM501_DC_CRT_PALETTE);
break;
default:
BUG();
}
control |= SM501_DC_CRT_CONTROL_SEL; /* CRT displays CRT data */
control |= SM501_DC_CRT_CONTROL_TE; /* enable CRT timing */
control |= SM501_DC_CRT_CONTROL_ENABLE; /* enable CRT plane */
out_update:
dev_dbg(fbi->dev, "new control is %08lx\n", control);
smc501_writel(control, fbi->regs + SM501_DC_CRT_CONTROL);
sm501fb_sync_regs(fbi);
return 0;
}
static void sm501fb_panel_power(struct sm501fb_info *fbi, int to)
{
unsigned long control;
void __iomem *ctrl_reg = fbi->regs + SM501_DC_PANEL_CONTROL;
struct sm501_platdata_fbsub *pd = fbi->pdata->fb_pnl;
control = smc501_readl(ctrl_reg);
if (to && (control & SM501_DC_PANEL_CONTROL_VDD) == 0) {
/* enable panel power */
control |= SM501_DC_PANEL_CONTROL_VDD; /* FPVDDEN */
smc501_writel(control, ctrl_reg);
sm501fb_sync_regs(fbi);
mdelay(10);
control |= SM501_DC_PANEL_CONTROL_DATA; /* DATA */
smc501_writel(control, ctrl_reg);
sm501fb_sync_regs(fbi);
mdelay(10);
/* VBIASEN */
if (!(pd->flags & SM501FB_FLAG_PANEL_NO_VBIASEN)) {
if (pd->flags & SM501FB_FLAG_PANEL_INV_VBIASEN)
control &= ~SM501_DC_PANEL_CONTROL_BIAS;
else
control |= SM501_DC_PANEL_CONTROL_BIAS;
smc501_writel(control, ctrl_reg);
sm501fb_sync_regs(fbi);
mdelay(10);
}
if (!(pd->flags & SM501FB_FLAG_PANEL_NO_FPEN)) {
if (pd->flags & SM501FB_FLAG_PANEL_INV_FPEN)
control &= ~SM501_DC_PANEL_CONTROL_FPEN;
else
control |= SM501_DC_PANEL_CONTROL_FPEN;
smc501_writel(control, ctrl_reg);
sm501fb_sync_regs(fbi);
mdelay(10);
}
} else if (!to && (control & SM501_DC_PANEL_CONTROL_VDD) != 0) {
/* disable panel power */
if (!(pd->flags & SM501FB_FLAG_PANEL_NO_FPEN)) {
if (pd->flags & SM501FB_FLAG_PANEL_INV_FPEN)
control |= SM501_DC_PANEL_CONTROL_FPEN;
else
control &= ~SM501_DC_PANEL_CONTROL_FPEN;
smc501_writel(control, ctrl_reg);
sm501fb_sync_regs(fbi);
mdelay(10);
}
if (!(pd->flags & SM501FB_FLAG_PANEL_NO_VBIASEN)) {
if (pd->flags & SM501FB_FLAG_PANEL_INV_VBIASEN)
control |= SM501_DC_PANEL_CONTROL_BIAS;
else
control &= ~SM501_DC_PANEL_CONTROL_BIAS;
smc501_writel(control, ctrl_reg);
sm501fb_sync_regs(fbi);
mdelay(10);
}
control &= ~SM501_DC_PANEL_CONTROL_DATA;
smc501_writel(control, ctrl_reg);
sm501fb_sync_regs(fbi);
mdelay(10);
control &= ~SM501_DC_PANEL_CONTROL_VDD;
smc501_writel(control, ctrl_reg);
sm501fb_sync_regs(fbi);
mdelay(10);
}
sm501fb_sync_regs(fbi);
}
/* sm501fb_set_par_pnl
*
* Set the panel video mode from the fb_info structure
*/
static int sm501fb_set_par_pnl(struct fb_info *info)
{
struct sm501fb_par *par = info->par;
struct sm501fb_info *fbi = par->info;
struct fb_var_screeninfo *var = &info->var;
unsigned long control;
unsigned long reg;
int ret;
dev_dbg(fbi->dev, "%s(%p)\n", __func__, info);
/* activate this new configuration */
ret = sm501fb_set_par_common(info, var);
if (ret)
return ret;
sm501fb_pan_pnl(var, info);
sm501fb_set_par_geometry(info, var);
/* update control register */
control = smc501_readl(fbi->regs + SM501_DC_PANEL_CONTROL);
control &= (SM501_DC_PANEL_CONTROL_GAMMA |
SM501_DC_PANEL_CONTROL_VDD |
SM501_DC_PANEL_CONTROL_DATA |
SM501_DC_PANEL_CONTROL_BIAS |
SM501_DC_PANEL_CONTROL_FPEN |
SM501_DC_PANEL_CONTROL_CP |
SM501_DC_PANEL_CONTROL_CK |
SM501_DC_PANEL_CONTROL_HP |
SM501_DC_PANEL_CONTROL_VP |
SM501_DC_PANEL_CONTROL_HPD |
SM501_DC_PANEL_CONTROL_VPD);
control |= SM501_FIFO_3; /* fill if >3 free slots */
switch(var->bits_per_pixel) {
case 8:
control |= SM501_DC_PANEL_CONTROL_8BPP;
break;
case 16:
control |= SM501_DC_PANEL_CONTROL_16BPP;
sm501fb_setup_gamma(fbi, SM501_DC_PANEL_PALETTE);
break;
case 32:
control |= SM501_DC_PANEL_CONTROL_32BPP;
sm501fb_setup_gamma(fbi, SM501_DC_PANEL_PALETTE);
break;
default:
BUG();
}
smc501_writel(0x0, fbi->regs + SM501_DC_PANEL_PANNING_CONTROL);
/* panel plane top left and bottom right location */
smc501_writel(0x00, fbi->regs + SM501_DC_PANEL_TL_LOC);
reg = var->xres - 1;
reg |= (var->yres - 1) << 16;
smc501_writel(reg, fbi->regs + SM501_DC_PANEL_BR_LOC);
/* program panel control register */
control |= SM501_DC_PANEL_CONTROL_TE; /* enable PANEL timing */
control |= SM501_DC_PANEL_CONTROL_EN; /* enable PANEL gfx plane */
if ((var->sync & FB_SYNC_HOR_HIGH_ACT) == 0)
control |= SM501_DC_PANEL_CONTROL_HSP;
if ((var->sync & FB_SYNC_VERT_HIGH_ACT) == 0)
control |= SM501_DC_PANEL_CONTROL_VSP;
smc501_writel(control, fbi->regs + SM501_DC_PANEL_CONTROL);
sm501fb_sync_regs(fbi);
/* ensure the panel interface is not tristated at this point */
sm501_modify_reg(fbi->dev->parent, SM501_SYSTEM_CONTROL,
0, SM501_SYSCTRL_PANEL_TRISTATE);
/* power the panel up */
sm501fb_panel_power(fbi, 1);
return 0;
}
/* chan_to_field
*
* convert a colour value into a field position
*
* from pxafb.c
*/
static inline unsigned int chan_to_field(unsigned int chan,
struct fb_bitfield *bf)
{
chan &= 0xffff;
chan >>= 16 - bf->length;
return chan << bf->offset;
}
/* sm501fb_setcolreg
*
* set the colour mapping for modes that support palettised data
*/
static int sm501fb_setcolreg(unsigned regno,
unsigned red, unsigned green, unsigned blue,
unsigned transp, struct fb_info *info)
{
struct sm501fb_par *par = info->par;
struct sm501fb_info *fbi = par->info;
void __iomem *base = fbi->regs;
unsigned int val;
if (par->head == HEAD_CRT)
base += SM501_DC_CRT_PALETTE;
else
base += SM501_DC_PANEL_PALETTE;
switch (info->fix.visual) {
case FB_VISUAL_TRUECOLOR:
/* true-colour, use pseuo-palette */
if (regno < 16) {
u32 *pal = par->pseudo_palette;
val = chan_to_field(red, &info->var.red);
val |= chan_to_field(green, &info->var.green);
val |= chan_to_field(blue, &info->var.blue);
pal[regno] = val;
}
break;
case FB_VISUAL_PSEUDOCOLOR:
if (regno < 256) {
val = (red >> 8) << 16;
val |= (green >> 8) << 8;
val |= blue >> 8;
smc501_writel(val, base + (regno * 4));
}
break;
default:
return 1; /* unknown type */
}
return 0;
}
/* sm501fb_blank_pnl
*
* Blank or un-blank the panel interface
*/
static int sm501fb_blank_pnl(int blank_mode, struct fb_info *info)
{
struct sm501fb_par *par = info->par;
struct sm501fb_info *fbi = par->info;
dev_dbg(fbi->dev, "%s(mode=%d, %p)\n", __func__, blank_mode, info);
switch (blank_mode) {
case FB_BLANK_POWERDOWN:
sm501fb_panel_power(fbi, 0);
break;
case FB_BLANK_UNBLANK:
sm501fb_panel_power(fbi, 1);
break;
case FB_BLANK_NORMAL:
case FB_BLANK_VSYNC_SUSPEND:
case FB_BLANK_HSYNC_SUSPEND:
default:
return 1;
}
return 0;
}
/* sm501fb_blank_crt
*
* Blank or un-blank the crt interface
*/
static int sm501fb_blank_crt(int blank_mode, struct fb_info *info)
{
struct sm501fb_par *par = info->par;
struct sm501fb_info *fbi = par->info;
unsigned long ctrl;
dev_dbg(fbi->dev, "%s(mode=%d, %p)\n", __func__, blank_mode, info);
ctrl = smc501_readl(fbi->regs + SM501_DC_CRT_CONTROL);
switch (blank_mode) {
case FB_BLANK_POWERDOWN:
ctrl &= ~SM501_DC_CRT_CONTROL_ENABLE;
sm501_misc_control(fbi->dev->parent, SM501_MISC_DAC_POWER, 0);
case FB_BLANK_NORMAL:
ctrl |= SM501_DC_CRT_CONTROL_BLANK;
break;
case FB_BLANK_UNBLANK:
ctrl &= ~SM501_DC_CRT_CONTROL_BLANK;
ctrl |= SM501_DC_CRT_CONTROL_ENABLE;
sm501_misc_control(fbi->dev->parent, 0, SM501_MISC_DAC_POWER);
break;
case FB_BLANK_VSYNC_SUSPEND:
case FB_BLANK_HSYNC_SUSPEND:
default:
return 1;
}
smc501_writel(ctrl, fbi->regs + SM501_DC_CRT_CONTROL);
sm501fb_sync_regs(fbi);
return 0;
}
/* sm501fb_cursor
*
* set or change the hardware cursor parameters
*/
static int sm501fb_cursor(struct fb_info *info, struct fb_cursor *cursor)
{
struct sm501fb_par *par = info->par;
struct sm501fb_info *fbi = par->info;
void __iomem *base = fbi->regs;
unsigned long hwc_addr;
unsigned long fg, bg;
dev_dbg(fbi->dev, "%s(%p,%p)\n", __func__, info, cursor);
if (par->head == HEAD_CRT)
base += SM501_DC_CRT_HWC_BASE;
else
base += SM501_DC_PANEL_HWC_BASE;
/* check not being asked to exceed capabilities */
if (cursor->image.width > 64)
return -EINVAL;
if (cursor->image.height > 64)
return -EINVAL;
if (cursor->image.depth > 1)
return -EINVAL;
hwc_addr = smc501_readl(base + SM501_OFF_HWC_ADDR);
if (cursor->enable)
smc501_writel(hwc_addr | SM501_HWC_EN,
base + SM501_OFF_HWC_ADDR);
else
smc501_writel(hwc_addr & ~SM501_HWC_EN,
base + SM501_OFF_HWC_ADDR);
/* set data */
if (cursor->set & FB_CUR_SETPOS) {
unsigned int x = cursor->image.dx;
unsigned int y = cursor->image.dy;
if (x >= 2048 || y >= 2048 )
return -EINVAL;
dev_dbg(fbi->dev, "set position %d,%d\n", x, y);
//y += cursor->image.height;
smc501_writel(x | (y << 16), base + SM501_OFF_HWC_LOC);
}
if (cursor->set & FB_CUR_SETCMAP) {
unsigned int bg_col = cursor->image.bg_color;
unsigned int fg_col = cursor->image.fg_color;
dev_dbg(fbi->dev, "%s: update cmap (%08x,%08x)\n",
__func__, bg_col, fg_col);
bg = ((info->cmap.red[bg_col] & 0xF8) << 8) |
((info->cmap.green[bg_col] & 0xFC) << 3) |
((info->cmap.blue[bg_col] & 0xF8) >> 3);
fg = ((info->cmap.red[fg_col] & 0xF8) << 8) |
((info->cmap.green[fg_col] & 0xFC) << 3) |
((info->cmap.blue[fg_col] & 0xF8) >> 3);
dev_dbg(fbi->dev, "fgcol %08lx, bgcol %08lx\n", fg, bg);
smc501_writel(bg, base + SM501_OFF_HWC_COLOR_1_2);
smc501_writel(fg, base + SM501_OFF_HWC_COLOR_3);
}
if (cursor->set & FB_CUR_SETSIZE ||
cursor->set & (FB_CUR_SETIMAGE | FB_CUR_SETSHAPE)) {
/* SM501 cursor is a two bpp 64x64 bitmap this routine
* clears it to transparent then combines the cursor
* shape plane with the colour plane to set the
* cursor */
int x, y;
const unsigned char *pcol = cursor->image.data;
const unsigned char *pmsk = cursor->mask;
void __iomem *dst = par->cursor.k_addr;
unsigned char dcol = 0;
unsigned char dmsk = 0;
unsigned int op;
dev_dbg(fbi->dev, "%s: setting shape (%d,%d)\n",
__func__, cursor->image.width, cursor->image.height);
for (op = 0; op < (64*64*2)/8; op+=4)
smc501_writel(0x0, dst + op);
for (y = 0; y < cursor->image.height; y++) {
for (x = 0; x < cursor->image.width; x++) {
if ((x % 8) == 0) {
dcol = *pcol++;
dmsk = *pmsk++;
} else {
dcol >>= 1;
dmsk >>= 1;
}
if (dmsk & 1) {
op = (dcol & 1) ? 1 : 3;
op <<= ((x % 4) * 2);
op |= readb(dst + (x / 4));
writeb(op, dst + (x / 4));
}
}
dst += (64*2)/8;
}
}
sm501fb_sync_regs(fbi); /* ensure cursor data flushed */
return 0;
}
/* sm501fb_crtsrc_show
*
* device attribute code to show where the crt output is sourced from
*/
static ssize_t sm501fb_crtsrc_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct sm501fb_info *info = dev_get_drvdata(dev);
unsigned long ctrl;
ctrl = smc501_readl(info->regs + SM501_DC_CRT_CONTROL);
ctrl &= SM501_DC_CRT_CONTROL_SEL;
return snprintf(buf, PAGE_SIZE, "%s\n", ctrl ? "crt" : "panel");
}
/* sm501fb_crtsrc_show
*
* device attribute code to set where the crt output is sourced from
*/
static ssize_t sm501fb_crtsrc_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t len)
{
struct sm501fb_info *info = dev_get_drvdata(dev);
enum sm501_controller head;
unsigned long ctrl;
if (len < 1)
return -EINVAL;
if (strnicmp(buf, "crt", 3) == 0)
head = HEAD_CRT;
else if (strnicmp(buf, "panel", 5) == 0)
head = HEAD_PANEL;
else
return -EINVAL;
dev_info(dev, "setting crt source to head %d\n", head);
ctrl = smc501_readl(info->regs + SM501_DC_CRT_CONTROL);
if (head == HEAD_CRT) {
ctrl |= SM501_DC_CRT_CONTROL_SEL;
ctrl |= SM501_DC_CRT_CONTROL_ENABLE;
ctrl |= SM501_DC_CRT_CONTROL_TE;
} else {
ctrl &= ~SM501_DC_CRT_CONTROL_SEL;
ctrl &= ~SM501_DC_CRT_CONTROL_ENABLE;
ctrl &= ~SM501_DC_CRT_CONTROL_TE;
}
smc501_writel(ctrl, info->regs + SM501_DC_CRT_CONTROL);
sm501fb_sync_regs(info);
return len;
}
/* Prepare the device_attr for registration with sysfs later */
static DEVICE_ATTR(crt_src, 0664, sm501fb_crtsrc_show, sm501fb_crtsrc_store);
/* sm501fb_show_regs
*
* show the primary sm501 registers
*/
static int sm501fb_show_regs(struct sm501fb_info *info, char *ptr,
unsigned int start, unsigned int len)
{
void __iomem *mem = info->regs;
char *buf = ptr;
unsigned int reg;
for (reg = start; reg < (len + start); reg += 4)
ptr += sprintf(ptr, "%08x = %08x\n", reg,
smc501_readl(mem + reg));
return ptr - buf;
}
/* sm501fb_debug_show_crt
*
* show the crt control and cursor registers
*/
static ssize_t sm501fb_debug_show_crt(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct sm501fb_info *info = dev_get_drvdata(dev);
char *ptr = buf;
ptr += sm501fb_show_regs(info, ptr, SM501_DC_CRT_CONTROL, 0x40);
ptr += sm501fb_show_regs(info, ptr, SM501_DC_CRT_HWC_BASE, 0x10);
return ptr - buf;
}
static DEVICE_ATTR(fbregs_crt, 0444, sm501fb_debug_show_crt, NULL);
/* sm501fb_debug_show_pnl
*
* show the panel control and cursor registers
*/
static ssize_t sm501fb_debug_show_pnl(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct sm501fb_info *info = dev_get_drvdata(dev);
char *ptr = buf;
ptr += sm501fb_show_regs(info, ptr, 0x0, 0x40);
ptr += sm501fb_show_regs(info, ptr, SM501_DC_PANEL_HWC_BASE, 0x10);
return ptr - buf;
}
static DEVICE_ATTR(fbregs_pnl, 0444, sm501fb_debug_show_pnl, NULL);
/* acceleration operations */
static int sm501fb_sync(struct fb_info *info)
{
int count = 1000000;
struct sm501fb_par *par = info->par;
struct sm501fb_info *fbi = par->info;
/* wait for the 2d engine to be ready */
while ((count > 0) &&
(smc501_readl(fbi->regs + SM501_SYSTEM_CONTROL) &
SM501_SYSCTRL_2D_ENGINE_STATUS) != 0)
count--;
if (count <= 0) {
dev_err(info->dev, "Timeout waiting for 2d engine sync\n");
return 1;
}
return 0;
}
static void sm501fb_copyarea(struct fb_info *info, const struct fb_copyarea *area)
{
struct sm501fb_par *par = info->par;
struct sm501fb_info *fbi = par->info;
int width = area->width;
int height = area->height;
int sx = area->sx;
int sy = area->sy;
int dx = area->dx;
int dy = area->dy;
unsigned long rtl = 0;
/* source clip */
if ((sx >= info->var.xres_virtual) ||
(sy >= info->var.yres_virtual))
/* source Area not within virtual screen, skipping */
return;
if ((sx + width) >= info->var.xres_virtual)
width = info->var.xres_virtual - sx - 1;
if ((sy + height) >= info->var.yres_virtual)
height = info->var.yres_virtual - sy - 1;
/* dest clip */
if ((dx >= info->var.xres_virtual) ||
(dy >= info->var.yres_virtual))
/* Destination Area not within virtual screen, skipping */
return;
if ((dx + width) >= info->var.xres_virtual)
width = info->var.xres_virtual - dx - 1;
if ((dy + height) >= info->var.yres_virtual)
height = info->var.yres_virtual - dy - 1;
if ((sx < dx) || (sy < dy)) {
rtl = 1 << 27;
sx += width - 1;
dx += width - 1;
sy += height - 1;
dy += height - 1;
}
if (sm501fb_sync(info))
return;
/* set the base addresses */
smc501_writel(par->screen.sm_addr, fbi->regs2d + SM501_2D_SOURCE_BASE);
smc501_writel(par->screen.sm_addr,
fbi->regs2d + SM501_2D_DESTINATION_BASE);
/* set the window width */
smc501_writel((info->var.xres << 16) | info->var.xres,
fbi->regs2d + SM501_2D_WINDOW_WIDTH);
/* set window stride */
smc501_writel((info->var.xres_virtual << 16) | info->var.xres_virtual,
fbi->regs2d + SM501_2D_PITCH);
/* set data format */
switch (info->var.bits_per_pixel) {
case 8:
smc501_writel(0, fbi->regs2d + SM501_2D_STRETCH);
break;
case 16:
smc501_writel(0x00100000, fbi->regs2d + SM501_2D_STRETCH);
break;
case 32:
smc501_writel(0x00200000, fbi->regs2d + SM501_2D_STRETCH);
break;
}
/* 2d compare mask */
smc501_writel(0xffffffff, fbi->regs2d + SM501_2D_COLOR_COMPARE_MASK);
/* 2d mask */
smc501_writel(0xffffffff, fbi->regs2d + SM501_2D_MASK);
/* source and destination x y */
smc501_writel((sx << 16) | sy, fbi->regs2d + SM501_2D_SOURCE);
smc501_writel((dx << 16) | dy, fbi->regs2d + SM501_2D_DESTINATION);
/* w/h */
smc501_writel((width << 16) | height, fbi->regs2d + SM501_2D_DIMENSION);
/* do area move */
smc501_writel(0x800000cc | rtl, fbi->regs2d + SM501_2D_CONTROL);
}
static void sm501fb_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
{
struct sm501fb_par *par = info->par;
struct sm501fb_info *fbi = par->info;
int width = rect->width, height = rect->height;
if ((rect->dx >= info->var.xres_virtual) ||
(rect->dy >= info->var.yres_virtual))
/* Rectangle not within virtual screen, skipping */
return;
if ((rect->dx + width) >= info->var.xres_virtual)
width = info->var.xres_virtual - rect->dx - 1;
if ((rect->dy + height) >= info->var.yres_virtual)
height = info->var.yres_virtual - rect->dy - 1;
if (sm501fb_sync(info))
return;
/* set the base addresses */
smc501_writel(par->screen.sm_addr, fbi->regs2d + SM501_2D_SOURCE_BASE);
smc501_writel(par->screen.sm_addr,
fbi->regs2d + SM501_2D_DESTINATION_BASE);
/* set the window width */
smc501_writel((info->var.xres << 16) | info->var.xres,
fbi->regs2d + SM501_2D_WINDOW_WIDTH);
/* set window stride */
smc501_writel((info->var.xres_virtual << 16) | info->var.xres_virtual,
fbi->regs2d + SM501_2D_PITCH);
/* set data format */
switch (info->var.bits_per_pixel) {
case 8:
smc501_writel(0, fbi->regs2d + SM501_2D_STRETCH);
break;
case 16:
smc501_writel(0x00100000, fbi->regs2d + SM501_2D_STRETCH);
break;
case 32:
smc501_writel(0x00200000, fbi->regs2d + SM501_2D_STRETCH);
break;
}
/* 2d compare mask */
smc501_writel(0xffffffff, fbi->regs2d + SM501_2D_COLOR_COMPARE_MASK);
/* 2d mask */
smc501_writel(0xffffffff, fbi->regs2d + SM501_2D_MASK);
/* colour */
smc501_writel(rect->color, fbi->regs2d + SM501_2D_FOREGROUND);
/* x y */
smc501_writel((rect->dx << 16) | rect->dy,
fbi->regs2d + SM501_2D_DESTINATION);
/* w/h */
smc501_writel((width << 16) | height, fbi->regs2d + SM501_2D_DIMENSION);
/* do rectangle fill */
smc501_writel(0x800100cc, fbi->regs2d + SM501_2D_CONTROL);
}
static struct fb_ops sm501fb_ops_crt = {
.owner = THIS_MODULE,
.fb_check_var = sm501fb_check_var_crt,
.fb_set_par = sm501fb_set_par_crt,
.fb_blank = sm501fb_blank_crt,
.fb_setcolreg = sm501fb_setcolreg,
.fb_pan_display = sm501fb_pan_crt,
.fb_cursor = sm501fb_cursor,
.fb_fillrect = sm501fb_fillrect,
.fb_copyarea = sm501fb_copyarea,
.fb_imageblit = cfb_imageblit,
.fb_sync = sm501fb_sync,
};
static struct fb_ops sm501fb_ops_pnl = {
.owner = THIS_MODULE,
.fb_check_var = sm501fb_check_var_pnl,
.fb_set_par = sm501fb_set_par_pnl,
.fb_pan_display = sm501fb_pan_pnl,
.fb_blank = sm501fb_blank_pnl,
.fb_setcolreg = sm501fb_setcolreg,
.fb_cursor = sm501fb_cursor,
.fb_fillrect = sm501fb_fillrect,
.fb_copyarea = sm501fb_copyarea,
.fb_imageblit = cfb_imageblit,
.fb_sync = sm501fb_sync,
};
/* sm501_init_cursor
*
* initialise hw cursor parameters
*/
static int sm501_init_cursor(struct fb_info *fbi, unsigned int reg_base)
{
struct sm501fb_par *par;
struct sm501fb_info *info;
int ret;
if (fbi == NULL)
return 0;
par = fbi->par;
info = par->info;
par->cursor_regs = info->regs + reg_base;
ret = sm501_alloc_mem(info, &par->cursor, SM501_MEMF_CURSOR, 1024,
fbi->fix.smem_len);
if (ret < 0)
return ret;
/* initialise the colour registers */
smc501_writel(par->cursor.sm_addr,
par->cursor_regs + SM501_OFF_HWC_ADDR);
smc501_writel(0x00, par->cursor_regs + SM501_OFF_HWC_LOC);
smc501_writel(0x00, par->cursor_regs + SM501_OFF_HWC_COLOR_1_2);
smc501_writel(0x00, par->cursor_regs + SM501_OFF_HWC_COLOR_3);
sm501fb_sync_regs(info);
return 0;
}
/* sm501fb_info_start
*
* fills the par structure claiming resources and remapping etc.
*/
static int sm501fb_start(struct sm501fb_info *info,
struct platform_device *pdev)
{
struct resource *res;
struct device *dev = &pdev->dev;
int k;
int ret;
info->irq = ret = platform_get_irq(pdev, 0);
if (ret < 0) {
/* we currently do not use the IRQ */
dev_warn(dev, "no irq for device\n");
}
/* allocate, reserve and remap resources for display
* controller registers */
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (res == NULL) {
dev_err(dev, "no resource definition for registers\n");
ret = -ENOENT;
goto err_release;
}
info->regs_res = request_mem_region(res->start,
resource_size(res),
pdev->name);
if (info->regs_res == NULL) {
dev_err(dev, "cannot claim registers\n");
ret = -ENXIO;
goto err_release;
}
info->regs = ioremap(res->start, resource_size(res));
if (info->regs == NULL) {
dev_err(dev, "cannot remap registers\n");
ret = -ENXIO;
goto err_regs_res;
}
/* allocate, reserve and remap resources for 2d
* controller registers */
res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
if (res == NULL) {
dev_err(dev, "no resource definition for 2d registers\n");
ret = -ENOENT;
goto err_regs_map;
}
info->regs2d_res = request_mem_region(res->start,
resource_size(res),
pdev->name);
if (info->regs2d_res == NULL) {
dev_err(dev, "cannot claim registers\n");
ret = -ENXIO;
goto err_regs_map;
}
info->regs2d = ioremap(res->start, resource_size(res));
if (info->regs2d == NULL) {
dev_err(dev, "cannot remap registers\n");
ret = -ENXIO;
goto err_regs2d_res;
}
/* allocate, reserve resources for framebuffer */
res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
if (res == NULL) {
dev_err(dev, "no memory resource defined\n");
ret = -ENXIO;
goto err_regs2d_map;
}
info->fbmem_res = request_mem_region(res->start,
resource_size(res),
pdev->name);
if (info->fbmem_res == NULL) {
dev_err(dev, "cannot claim framebuffer\n");
ret = -ENXIO;
goto err_regs2d_map;
}
info->fbmem = ioremap(res->start, resource_size(res));
if (info->fbmem == NULL) {
dev_err(dev, "cannot remap framebuffer\n");
goto err_mem_res;
}
info->fbmem_len = resource_size(res);
/* clear framebuffer memory - avoids garbage data on unused fb */
memset(info->fbmem, 0, info->fbmem_len);
/* clear palette ram - undefined at power on */
for (k = 0; k < (256 * 3); k++)
smc501_writel(0, info->regs + SM501_DC_PANEL_PALETTE + (k * 4));
/* enable display controller */
sm501_unit_power(dev->parent, SM501_GATE_DISPLAY, 1);
/* enable 2d controller */
sm501_unit_power(dev->parent, SM501_GATE_2D_ENGINE, 1);
/* setup cursors */
sm501_init_cursor(info->fb[HEAD_CRT], SM501_DC_CRT_HWC_ADDR);
sm501_init_cursor(info->fb[HEAD_PANEL], SM501_DC_PANEL_HWC_ADDR);
return 0; /* everything is setup */
err_mem_res:
release_mem_region(info->fbmem_res->start,
resource_size(info->fbmem_res));
err_regs2d_map:
iounmap(info->regs2d);
err_regs2d_res:
release_mem_region(info->regs2d_res->start,
resource_size(info->regs2d_res));
err_regs_map:
iounmap(info->regs);
err_regs_res:
release_mem_region(info->regs_res->start,
resource_size(info->regs_res));
err_release:
return ret;
}
static void sm501fb_stop(struct sm501fb_info *info)
{
/* disable display controller */
sm501_unit_power(info->dev->parent, SM501_GATE_DISPLAY, 0);
iounmap(info->fbmem);
release_mem_region(info->fbmem_res->start,
resource_size(info->fbmem_res));
iounmap(info->regs2d);
release_mem_region(info->regs2d_res->start,
resource_size(info->regs2d_res));
iounmap(info->regs);
release_mem_region(info->regs_res->start,
resource_size(info->regs_res));
}
static int sm501fb_init_fb(struct fb_info *fb, enum sm501_controller head,
const char *fbname)
{
struct sm501_platdata_fbsub *pd;
struct sm501fb_par *par = fb->par;
struct sm501fb_info *info = par->info;
unsigned long ctrl;
unsigned int enable;
int ret;
switch (head) {
case HEAD_CRT:
pd = info->pdata->fb_crt;
ctrl = smc501_readl(info->regs + SM501_DC_CRT_CONTROL);
enable = (ctrl & SM501_DC_CRT_CONTROL_ENABLE) ? 1 : 0;
/* ensure we set the correct source register */
if (info->pdata->fb_route != SM501_FB_CRT_PANEL) {
ctrl |= SM501_DC_CRT_CONTROL_SEL;
smc501_writel(ctrl, info->regs + SM501_DC_CRT_CONTROL);
}
break;
case HEAD_PANEL:
pd = info->pdata->fb_pnl;
ctrl = smc501_readl(info->regs + SM501_DC_PANEL_CONTROL);
enable = (ctrl & SM501_DC_PANEL_CONTROL_EN) ? 1 : 0;
break;
default:
pd = NULL; /* stop compiler warnings */
ctrl = 0;
enable = 0;
BUG();
}
dev_info(info->dev, "fb %s %sabled at start\n",
fbname, enable ? "en" : "dis");
/* check to see if our routing allows this */
if (head == HEAD_CRT && info->pdata->fb_route == SM501_FB_CRT_PANEL) {
ctrl &= ~SM501_DC_CRT_CONTROL_SEL;
smc501_writel(ctrl, info->regs + SM501_DC_CRT_CONTROL);
enable = 0;
}
strlcpy(fb->fix.id, fbname, sizeof(fb->fix.id));
memcpy(&par->ops,
(head == HEAD_CRT) ? &sm501fb_ops_crt : &sm501fb_ops_pnl,
sizeof(struct fb_ops));
/* update ops dependent on what we've been passed */
if ((pd->flags & SM501FB_FLAG_USE_HWCURSOR) == 0)
par->ops.fb_cursor = NULL;
fb->fbops = &par->ops;
fb->flags = FBINFO_FLAG_DEFAULT | FBINFO_READS_FAST |
FBINFO_HWACCEL_COPYAREA | FBINFO_HWACCEL_FILLRECT |
FBINFO_HWACCEL_XPAN | FBINFO_HWACCEL_YPAN;
#if defined(CONFIG_OF)
#ifdef __BIG_ENDIAN
if (of_get_property(info->dev->parent->of_node, "little-endian", NULL))
fb->flags |= FBINFO_FOREIGN_ENDIAN;
#else
if (of_get_property(info->dev->parent->of_node, "big-endian", NULL))
fb->flags |= FBINFO_FOREIGN_ENDIAN;
#endif
#endif
/* fixed data */
fb->fix.type = FB_TYPE_PACKED_PIXELS;
fb->fix.type_aux = 0;
fb->fix.xpanstep = 1;
fb->fix.ypanstep = 1;
fb->fix.ywrapstep = 0;
fb->fix.accel = FB_ACCEL_NONE;
/* screenmode */
fb->var.nonstd = 0;
fb->var.activate = FB_ACTIVATE_NOW;
fb->var.accel_flags = 0;
fb->var.vmode = FB_VMODE_NONINTERLACED;
fb->var.bits_per_pixel = 16;
if (info->edid_data) {
/* Now build modedb from EDID */
fb_edid_to_monspecs(info->edid_data, &fb->monspecs);
fb_videomode_to_modelist(fb->monspecs.modedb,
fb->monspecs.modedb_len,
&fb->modelist);
}
if (enable && (pd->flags & SM501FB_FLAG_USE_INIT_MODE) && 0) {
/* TODO read the mode from the current display */
} else {
if (pd->def_mode) {
dev_info(info->dev, "using supplied mode\n");
fb_videomode_to_var(&fb->var, pd->def_mode);
fb->var.bits_per_pixel = pd->def_bpp ? pd->def_bpp : 8;
fb->var.xres_virtual = fb->var.xres;
fb->var.yres_virtual = fb->var.yres;
} else {
if (info->edid_data) {
ret = fb_find_mode(&fb->var, fb, fb_mode,
fb->monspecs.modedb,
fb->monspecs.modedb_len,
&sm501_default_mode, default_bpp);
/* edid_data is no longer needed, free it */
kfree(info->edid_data);
} else {
ret = fb_find_mode(&fb->var, fb,
NULL, NULL, 0, NULL, 8);
}
switch (ret) {
case 1:
dev_info(info->dev, "using mode specified in "
"@mode\n");
break;
case 2:
dev_info(info->dev, "using mode specified in "
"@mode with ignored refresh rate\n");
break;
case 3:
dev_info(info->dev, "using mode default "
"mode\n");
break;
case 4:
dev_info(info->dev, "using mode from list\n");
break;
default:
dev_info(info->dev, "ret = %d\n", ret);
dev_info(info->dev, "failed to find mode\n");
return -EINVAL;
}
}
}
/* initialise and set the palette */
if (fb_alloc_cmap(&fb->cmap, NR_PALETTE, 0)) {
dev_err(info->dev, "failed to allocate cmap memory\n");
return -ENOMEM;
}
fb_set_cmap(&fb->cmap, fb);
ret = (fb->fbops->fb_check_var)(&fb->var, fb);
if (ret)
dev_err(info->dev, "check_var() failed on initial setup?\n");
return 0;
}
/* default platform data if none is supplied (ie, PCI device) */
static struct sm501_platdata_fbsub sm501fb_pdata_crt = {
.flags = (SM501FB_FLAG_USE_INIT_MODE |
SM501FB_FLAG_USE_HWCURSOR |
SM501FB_FLAG_USE_HWACCEL |
SM501FB_FLAG_DISABLE_AT_EXIT),
};
static struct sm501_platdata_fbsub sm501fb_pdata_pnl = {
.flags = (SM501FB_FLAG_USE_INIT_MODE |
SM501FB_FLAG_USE_HWCURSOR |
SM501FB_FLAG_USE_HWACCEL |
SM501FB_FLAG_DISABLE_AT_EXIT),
};
static struct sm501_platdata_fb sm501fb_def_pdata = {
.fb_route = SM501_FB_OWN,
.fb_crt = &sm501fb_pdata_crt,
.fb_pnl = &sm501fb_pdata_pnl,
};
static char driver_name_crt[] = "sm501fb-crt";
static char driver_name_pnl[] = "sm501fb-panel";
static int sm501fb_probe_one(struct sm501fb_info *info,
enum sm501_controller head)
{
unsigned char *name = (head == HEAD_CRT) ? "crt" : "panel";
struct sm501_platdata_fbsub *pd;
struct sm501fb_par *par;
struct fb_info *fbi;
pd = (head == HEAD_CRT) ? info->pdata->fb_crt : info->pdata->fb_pnl;
/* Do not initialise if we've not been given any platform data */
if (pd == NULL) {
dev_info(info->dev, "no data for fb %s (disabled)\n", name);
return 0;
}
fbi = framebuffer_alloc(sizeof(struct sm501fb_par), info->dev);
if (fbi == NULL) {
dev_err(info->dev, "cannot allocate %s framebuffer\n", name);
return -ENOMEM;
}
par = fbi->par;
par->info = info;
par->head = head;
fbi->pseudo_palette = &par->pseudo_palette;
info->fb[head] = fbi;
return 0;
}
/* Free up anything allocated by sm501fb_init_fb */
static void sm501_free_init_fb(struct sm501fb_info *info,
enum sm501_controller head)
{
struct fb_info *fbi = info->fb[head];
fb_dealloc_cmap(&fbi->cmap);
}
static int sm501fb_start_one(struct sm501fb_info *info,
enum sm501_controller head, const char *drvname)
{
struct fb_info *fbi = info->fb[head];
int ret;
if (!fbi)
return 0;
mutex_init(&info->fb[head]->mm_lock);
ret = sm501fb_init_fb(info->fb[head], head, drvname);
if (ret) {
dev_err(info->dev, "cannot initialise fb %s\n", drvname);
return ret;
}
ret = register_framebuffer(info->fb[head]);
if (ret) {
dev_err(info->dev, "failed to register fb %s\n", drvname);
sm501_free_init_fb(info, head);
return ret;
}
dev_info(info->dev, "fb%d: %s frame buffer\n", fbi->node, fbi->fix.id);
return 0;
}
static int sm501fb_probe(struct platform_device *pdev)
{
struct sm501fb_info *info;
struct device *dev = &pdev->dev;
int ret;
/* allocate our framebuffers */
info = kzalloc(sizeof(struct sm501fb_info), GFP_KERNEL);
if (!info) {
dev_err(dev, "failed to allocate state\n");
return -ENOMEM;
}
info->dev = dev = &pdev->dev;
platform_set_drvdata(pdev, info);
if (dev->parent->platform_data) {
struct sm501_platdata *pd = dev->parent->platform_data;
info->pdata = pd->fb;
}
if (info->pdata == NULL) {
int found = 0;
#if defined(CONFIG_OF)
struct device_node *np = pdev->dev.parent->of_node;
const u8 *prop;
const char *cp;
int len;
info->pdata = &sm501fb_def_pdata;
if (np) {
/* Get EDID */
cp = of_get_property(np, "mode", &len);
if (cp)
strcpy(fb_mode, cp);
prop = of_get_property(np, "edid", &len);
if (prop && len == EDID_LENGTH) {
info->edid_data = kmemdup(prop, EDID_LENGTH,
GFP_KERNEL);
if (info->edid_data)
found = 1;
}
}
#endif
if (!found) {
dev_info(dev, "using default configuration data\n");
info->pdata = &sm501fb_def_pdata;
}
}
/* probe for the presence of each panel */
ret = sm501fb_probe_one(info, HEAD_CRT);
if (ret < 0) {
dev_err(dev, "failed to probe CRT\n");
goto err_alloc;
}
ret = sm501fb_probe_one(info, HEAD_PANEL);
if (ret < 0) {
dev_err(dev, "failed to probe PANEL\n");
goto err_probed_crt;
}
if (info->fb[HEAD_PANEL] == NULL &&
info->fb[HEAD_CRT] == NULL) {
dev_err(dev, "no framebuffers found\n");
goto err_alloc;
}
/* get the resources for both of the framebuffers */
ret = sm501fb_start(info, pdev);
if (ret) {
dev_err(dev, "cannot initialise SM501\n");
goto err_probed_panel;
}
ret = sm501fb_start_one(info, HEAD_CRT, driver_name_crt);
if (ret) {
dev_err(dev, "failed to start CRT\n");
goto err_started;
}
ret = sm501fb_start_one(info, HEAD_PANEL, driver_name_pnl);
if (ret) {
dev_err(dev, "failed to start Panel\n");
goto err_started_crt;
}
/* create device files */
ret = device_create_file(dev, &dev_attr_crt_src);
if (ret)
goto err_started_panel;
ret = device_create_file(dev, &dev_attr_fbregs_pnl);
if (ret)
goto err_attached_crtsrc_file;
ret = device_create_file(dev, &dev_attr_fbregs_crt);
if (ret)
goto err_attached_pnlregs_file;
/* we registered, return ok */
return 0;
err_attached_pnlregs_file:
device_remove_file(dev, &dev_attr_fbregs_pnl);
err_attached_crtsrc_file:
device_remove_file(dev, &dev_attr_crt_src);
err_started_panel:
unregister_framebuffer(info->fb[HEAD_PANEL]);
sm501_free_init_fb(info, HEAD_PANEL);
err_started_crt:
unregister_framebuffer(info->fb[HEAD_CRT]);
sm501_free_init_fb(info, HEAD_CRT);
err_started:
sm501fb_stop(info);
err_probed_panel:
framebuffer_release(info->fb[HEAD_PANEL]);
err_probed_crt:
framebuffer_release(info->fb[HEAD_CRT]);
err_alloc:
kfree(info);
return ret;
}
/*
* Cleanup
*/
static int sm501fb_remove(struct platform_device *pdev)
{
struct sm501fb_info *info = platform_get_drvdata(pdev);
struct fb_info *fbinfo_crt = info->fb[0];
struct fb_info *fbinfo_pnl = info->fb[1];
device_remove_file(&pdev->dev, &dev_attr_fbregs_crt);
device_remove_file(&pdev->dev, &dev_attr_fbregs_pnl);
device_remove_file(&pdev->dev, &dev_attr_crt_src);
sm501_free_init_fb(info, HEAD_CRT);
sm501_free_init_fb(info, HEAD_PANEL);
unregister_framebuffer(fbinfo_crt);
unregister_framebuffer(fbinfo_pnl);
sm501fb_stop(info);
kfree(info);
framebuffer_release(fbinfo_pnl);
framebuffer_release(fbinfo_crt);
return 0;
}
#ifdef CONFIG_PM
static int sm501fb_suspend_fb(struct sm501fb_info *info,
enum sm501_controller head)
{
struct fb_info *fbi = info->fb[head];
struct sm501fb_par *par = fbi->par;
if (par->screen.size == 0)
return 0;
/* blank the relevant interface to ensure unit power minimised */
(par->ops.fb_blank)(FB_BLANK_POWERDOWN, fbi);
/* tell console/fb driver we are suspending */
console_lock();
fb_set_suspend(fbi, 1);
console_unlock();
/* backup copies in case chip is powered down over suspend */
par->store_fb = vmalloc(par->screen.size);
if (par->store_fb == NULL) {
dev_err(info->dev, "no memory to store screen\n");
return -ENOMEM;
}
par->store_cursor = vmalloc(par->cursor.size);
if (par->store_cursor == NULL) {
dev_err(info->dev, "no memory to store cursor\n");
goto err_nocursor;
}
dev_dbg(info->dev, "suspending screen to %p\n", par->store_fb);
dev_dbg(info->dev, "suspending cursor to %p\n", par->store_cursor);
memcpy_fromio(par->store_fb, par->screen.k_addr, par->screen.size);
memcpy_fromio(par->store_cursor, par->cursor.k_addr, par->cursor.size);
return 0;
err_nocursor:
vfree(par->store_fb);
par->store_fb = NULL;
return -ENOMEM;
}
static void sm501fb_resume_fb(struct sm501fb_info *info,
enum sm501_controller head)
{
struct fb_info *fbi = info->fb[head];
struct sm501fb_par *par = fbi->par;
if (par->screen.size == 0)
return;
/* re-activate the configuration */
(par->ops.fb_set_par)(fbi);
/* restore the data */
dev_dbg(info->dev, "restoring screen from %p\n", par->store_fb);
dev_dbg(info->dev, "restoring cursor from %p\n", par->store_cursor);
if (par->store_fb)
memcpy_toio(par->screen.k_addr, par->store_fb,
par->screen.size);
if (par->store_cursor)
memcpy_toio(par->cursor.k_addr, par->store_cursor,
par->cursor.size);
console_lock();
fb_set_suspend(fbi, 0);
console_unlock();
vfree(par->store_fb);
vfree(par->store_cursor);
}
/* suspend and resume support */
static int sm501fb_suspend(struct platform_device *pdev, pm_message_t state)
{
struct sm501fb_info *info = platform_get_drvdata(pdev);
/* store crt control to resume with */
info->pm_crt_ctrl = smc501_readl(info->regs + SM501_DC_CRT_CONTROL);
sm501fb_suspend_fb(info, HEAD_CRT);
sm501fb_suspend_fb(info, HEAD_PANEL);
/* turn off the clocks, in case the device is not powered down */
sm501_unit_power(info->dev->parent, SM501_GATE_DISPLAY, 0);
return 0;
}
#define SM501_CRT_CTRL_SAVE (SM501_DC_CRT_CONTROL_TVP | \
SM501_DC_CRT_CONTROL_SEL)
static int sm501fb_resume(struct platform_device *pdev)
{
struct sm501fb_info *info = platform_get_drvdata(pdev);
unsigned long crt_ctrl;
sm501_unit_power(info->dev->parent, SM501_GATE_DISPLAY, 1);
/* restore the items we want to be saved for crt control */
crt_ctrl = smc501_readl(info->regs + SM501_DC_CRT_CONTROL);
crt_ctrl &= ~SM501_CRT_CTRL_SAVE;
crt_ctrl |= info->pm_crt_ctrl & SM501_CRT_CTRL_SAVE;
smc501_writel(crt_ctrl, info->regs + SM501_DC_CRT_CONTROL);
sm501fb_resume_fb(info, HEAD_CRT);
sm501fb_resume_fb(info, HEAD_PANEL);
return 0;
}
#else
#define sm501fb_suspend NULL
#define sm501fb_resume NULL
#endif
static struct platform_driver sm501fb_driver = {
.probe = sm501fb_probe,
.remove = sm501fb_remove,
.suspend = sm501fb_suspend,
.resume = sm501fb_resume,
.driver = {
.name = "sm501-fb",
.owner = THIS_MODULE,
},
};
module_platform_driver(sm501fb_driver);
module_param_named(mode, fb_mode, charp, 0);
MODULE_PARM_DESC(mode,
"Specify resolution as \"<xres>x<yres>[-<bpp>][@<refresh>]\" ");
module_param_named(bpp, default_bpp, ulong, 0);
MODULE_PARM_DESC(bpp, "Specify bit-per-pixel if not specified mode");
MODULE_AUTHOR("Ben Dooks, Vincent Sanders");
MODULE_DESCRIPTION("SM501 Framebuffer driver");
MODULE_LICENSE("GPL v2");
| gpl-2.0 |
desaishivam26/android_kernel_motorola_msm8916 | arch/s390/mm/fault.c | 918 | 17785 | /*
* S390 version
* Copyright IBM Corp. 1999
* Author(s): Hartmut Penner (hp@de.ibm.com)
* Ulrich Weigand (uweigand@de.ibm.com)
*
* Derived from "arch/i386/mm/fault.c"
* Copyright (C) 1995 Linus Torvalds
*/
#include <linux/kernel_stat.h>
#include <linux/perf_event.h>
#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/ptrace.h>
#include <linux/mman.h>
#include <linux/mm.h>
#include <linux/compat.h>
#include <linux/smp.h>
#include <linux/kdebug.h>
#include <linux/init.h>
#include <linux/console.h>
#include <linux/module.h>
#include <linux/hardirq.h>
#include <linux/kprobes.h>
#include <linux/uaccess.h>
#include <linux/hugetlb.h>
#include <asm/asm-offsets.h>
#include <asm/pgtable.h>
#include <asm/irq.h>
#include <asm/mmu_context.h>
#include <asm/facility.h>
#include "../kernel/entry.h"
#ifndef CONFIG_64BIT
#define __FAIL_ADDR_MASK 0x7ffff000
#define __SUBCODE_MASK 0x0200
#define __PF_RES_FIELD 0ULL
#else /* CONFIG_64BIT */
#define __FAIL_ADDR_MASK -4096L
#define __SUBCODE_MASK 0x0600
#define __PF_RES_FIELD 0x8000000000000000ULL
#endif /* CONFIG_64BIT */
#define VM_FAULT_BADCONTEXT 0x010000
#define VM_FAULT_BADMAP 0x020000
#define VM_FAULT_BADACCESS 0x040000
#define VM_FAULT_SIGNAL 0x080000
static unsigned long store_indication __read_mostly;
#ifdef CONFIG_64BIT
static int __init fault_init(void)
{
if (test_facility(75))
store_indication = 0xc00;
return 0;
}
early_initcall(fault_init);
#endif
static inline int notify_page_fault(struct pt_regs *regs)
{
int ret = 0;
/* kprobe_running() needs smp_processor_id() */
if (kprobes_built_in() && !user_mode(regs)) {
preempt_disable();
if (kprobe_running() && kprobe_fault_handler(regs, 14))
ret = 1;
preempt_enable();
}
return ret;
}
/*
* Unlock any spinlocks which will prevent us from getting the
* message out.
*/
void bust_spinlocks(int yes)
{
if (yes) {
oops_in_progress = 1;
} else {
int loglevel_save = console_loglevel;
console_unblank();
oops_in_progress = 0;
/*
* OK, the message is on the console. Now we call printk()
* without oops_in_progress set so that printk will give klogd
* a poke. Hold onto your hats...
*/
console_loglevel = 15;
printk(" ");
console_loglevel = loglevel_save;
}
}
/*
* Returns the address space associated with the fault.
* Returns 0 for kernel space and 1 for user space.
*/
static inline int user_space_fault(unsigned long trans_exc_code)
{
/*
* The lowest two bits of the translation exception
* identification indicate which paging table was used.
*/
trans_exc_code &= 3;
if (trans_exc_code == 2)
/* Access via secondary space, set_fs setting decides */
return current->thread.mm_segment.ar4;
if (s390_user_mode == HOME_SPACE_MODE)
/* User space if the access has been done via home space. */
return trans_exc_code == 3;
/*
* If the user space is not the home space the kernel runs in home
* space. Access via secondary space has already been covered,
* access via primary space or access register is from user space
* and access via home space is from the kernel.
*/
return trans_exc_code != 3;
}
static inline void report_user_fault(struct pt_regs *regs, long signr)
{
if ((task_pid_nr(current) > 1) && !show_unhandled_signals)
return;
if (!unhandled_signal(current, signr))
return;
if (!printk_ratelimit())
return;
printk(KERN_ALERT "User process fault: interruption code 0x%X ",
regs->int_code);
print_vma_addr(KERN_CONT "in ", regs->psw.addr & PSW_ADDR_INSN);
printk(KERN_CONT "\n");
printk(KERN_ALERT "failing address: %lX\n",
regs->int_parm_long & __FAIL_ADDR_MASK);
show_regs(regs);
}
/*
* Send SIGSEGV to task. This is an external routine
* to keep the stack usage of do_page_fault small.
*/
static noinline void do_sigsegv(struct pt_regs *regs, int si_code)
{
struct siginfo si;
report_user_fault(regs, SIGSEGV);
si.si_signo = SIGSEGV;
si.si_code = si_code;
si.si_addr = (void __user *)(regs->int_parm_long & __FAIL_ADDR_MASK);
force_sig_info(SIGSEGV, &si, current);
}
static noinline void do_no_context(struct pt_regs *regs)
{
const struct exception_table_entry *fixup;
unsigned long address;
/* Are we prepared to handle this kernel fault? */
fixup = search_exception_tables(regs->psw.addr & PSW_ADDR_INSN);
if (fixup) {
regs->psw.addr = extable_fixup(fixup) | PSW_ADDR_AMODE;
return;
}
/*
* Oops. The kernel tried to access some bad page. We'll have to
* terminate things with extreme prejudice.
*/
address = regs->int_parm_long & __FAIL_ADDR_MASK;
if (!user_space_fault(regs->int_parm_long))
printk(KERN_ALERT "Unable to handle kernel pointer dereference"
" at virtual kernel address %p\n", (void *)address);
else
printk(KERN_ALERT "Unable to handle kernel paging request"
" at virtual user address %p\n", (void *)address);
die(regs, "Oops");
do_exit(SIGKILL);
}
static noinline void do_low_address(struct pt_regs *regs)
{
/* Low-address protection hit in kernel mode means
NULL pointer write access in kernel mode. */
if (regs->psw.mask & PSW_MASK_PSTATE) {
/* Low-address protection hit in user mode 'cannot happen'. */
die (regs, "Low-address protection");
do_exit(SIGKILL);
}
do_no_context(regs);
}
static noinline void do_sigbus(struct pt_regs *regs)
{
struct task_struct *tsk = current;
struct siginfo si;
/*
* Send a sigbus, regardless of whether we were in kernel
* or user mode.
*/
si.si_signo = SIGBUS;
si.si_errno = 0;
si.si_code = BUS_ADRERR;
si.si_addr = (void __user *)(regs->int_parm_long & __FAIL_ADDR_MASK);
force_sig_info(SIGBUS, &si, tsk);
}
static noinline void do_fault_error(struct pt_regs *regs, int fault)
{
int si_code;
switch (fault) {
case VM_FAULT_BADACCESS:
case VM_FAULT_BADMAP:
/* Bad memory access. Check if it is kernel or user space. */
if (user_mode(regs)) {
/* User mode accesses just cause a SIGSEGV */
si_code = (fault == VM_FAULT_BADMAP) ?
SEGV_MAPERR : SEGV_ACCERR;
do_sigsegv(regs, si_code);
return;
}
case VM_FAULT_BADCONTEXT:
do_no_context(regs);
break;
case VM_FAULT_SIGNAL:
if (!user_mode(regs))
do_no_context(regs);
break;
default: /* fault & VM_FAULT_ERROR */
if (fault & VM_FAULT_OOM) {
if (!user_mode(regs))
do_no_context(regs);
else
pagefault_out_of_memory();
} else if (fault & VM_FAULT_SIGBUS) {
/* Kernel mode? Handle exceptions or die */
if (!user_mode(regs))
do_no_context(regs);
else
do_sigbus(regs);
} else
BUG();
break;
}
}
/*
* This routine handles page faults. It determines the address,
* and the problem, and then passes it off to one of the appropriate
* routines.
*
* interruption code (int_code):
* 04 Protection -> Write-Protection (suprression)
* 10 Segment translation -> Not present (nullification)
* 11 Page translation -> Not present (nullification)
* 3b Region third trans. -> Not present (nullification)
*/
static inline int do_exception(struct pt_regs *regs, int access)
{
struct task_struct *tsk;
struct mm_struct *mm;
struct vm_area_struct *vma;
unsigned long trans_exc_code;
unsigned long address;
unsigned int flags;
int fault;
tsk = current;
/*
* The instruction that caused the program check has
* been nullified. Don't signal single step via SIGTRAP.
*/
clear_tsk_thread_flag(tsk, TIF_PER_TRAP);
if (notify_page_fault(regs))
return 0;
mm = tsk->mm;
trans_exc_code = regs->int_parm_long;
/*
* Verify that the fault happened in user space, that
* we are not in an interrupt and that there is a
* user context.
*/
fault = VM_FAULT_BADCONTEXT;
if (unlikely(!user_space_fault(trans_exc_code) || in_atomic() || !mm))
goto out;
address = trans_exc_code & __FAIL_ADDR_MASK;
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
if (user_mode(regs))
flags |= FAULT_FLAG_USER;
if (access == VM_WRITE || (trans_exc_code & store_indication) == 0x400)
flags |= FAULT_FLAG_WRITE;
down_read(&mm->mmap_sem);
#ifdef CONFIG_PGSTE
if ((current->flags & PF_VCPU) && S390_lowcore.gmap) {
address = __gmap_fault(address,
(struct gmap *) S390_lowcore.gmap);
if (address == -EFAULT) {
fault = VM_FAULT_BADMAP;
goto out_up;
}
if (address == -ENOMEM) {
fault = VM_FAULT_OOM;
goto out_up;
}
}
#endif
retry:
fault = VM_FAULT_BADMAP;
vma = find_vma(mm, address);
if (!vma)
goto out_up;
if (unlikely(vma->vm_start > address)) {
if (!(vma->vm_flags & VM_GROWSDOWN))
goto out_up;
if (expand_stack(vma, address))
goto out_up;
}
/*
* Ok, we have a good vm_area for this memory access, so
* we can handle it..
*/
fault = VM_FAULT_BADACCESS;
if (unlikely(!(vma->vm_flags & access)))
goto out_up;
if (is_vm_hugetlb_page(vma))
address &= HPAGE_MASK;
/*
* If for any reason at all we couldn't handle the fault,
* make sure we exit gracefully rather than endlessly redo
* the fault.
*/
fault = handle_mm_fault(mm, vma, address, flags);
/* No reason to continue if interrupted by SIGKILL. */
if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) {
fault = VM_FAULT_SIGNAL;
goto out;
}
if (unlikely(fault & VM_FAULT_ERROR))
goto out_up;
/*
* Major/minor page fault accounting is only done on the
* initial attempt. If we go through a retry, it is extremely
* likely that the page will be found in page cache at that point.
*/
if (flags & FAULT_FLAG_ALLOW_RETRY) {
if (fault & VM_FAULT_MAJOR) {
tsk->maj_flt++;
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1,
regs, address);
} else {
tsk->min_flt++;
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1,
regs, address);
}
if (fault & VM_FAULT_RETRY) {
/* Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk
* of starvation. */
flags &= ~FAULT_FLAG_ALLOW_RETRY;
flags |= FAULT_FLAG_TRIED;
down_read(&mm->mmap_sem);
goto retry;
}
}
fault = 0;
out_up:
up_read(&mm->mmap_sem);
out:
return fault;
}
void __kprobes do_protection_exception(struct pt_regs *regs)
{
unsigned long trans_exc_code;
int fault;
trans_exc_code = regs->int_parm_long;
/*
* Protection exceptions are suppressing, decrement psw address.
* The exception to this rule are aborted transactions, for these
* the PSW already points to the correct location.
*/
if (!(regs->int_code & 0x200))
regs->psw.addr = __rewind_psw(regs->psw, regs->int_code >> 16);
/*
* Check for low-address protection. This needs to be treated
* as a special case because the translation exception code
* field is not guaranteed to contain valid data in this case.
*/
if (unlikely(!(trans_exc_code & 4))) {
do_low_address(regs);
return;
}
fault = do_exception(regs, VM_WRITE);
if (unlikely(fault))
do_fault_error(regs, fault);
}
void __kprobes do_dat_exception(struct pt_regs *regs)
{
int access, fault;
access = VM_READ | VM_EXEC | VM_WRITE;
fault = do_exception(regs, access);
if (unlikely(fault))
do_fault_error(regs, fault);
}
#ifdef CONFIG_64BIT
void __kprobes do_asce_exception(struct pt_regs *regs)
{
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
unsigned long trans_exc_code;
/*
* The instruction that caused the program check has
* been nullified. Don't signal single step via SIGTRAP.
*/
clear_tsk_thread_flag(current, TIF_PER_TRAP);
trans_exc_code = regs->int_parm_long;
if (unlikely(!user_space_fault(trans_exc_code) || in_atomic() || !mm))
goto no_context;
down_read(&mm->mmap_sem);
vma = find_vma(mm, trans_exc_code & __FAIL_ADDR_MASK);
up_read(&mm->mmap_sem);
if (vma) {
update_mm(mm, current);
return;
}
/* User mode accesses just cause a SIGSEGV */
if (user_mode(regs)) {
do_sigsegv(regs, SEGV_MAPERR);
return;
}
no_context:
do_no_context(regs);
}
#endif
int __handle_fault(unsigned long uaddr, unsigned long pgm_int_code, int write)
{
struct pt_regs regs;
int access, fault;
/* Emulate a uaccess fault from kernel mode. */
regs.psw.mask = psw_kernel_bits | PSW_MASK_DAT | PSW_MASK_MCHECK;
if (!irqs_disabled())
regs.psw.mask |= PSW_MASK_IO | PSW_MASK_EXT;
regs.psw.addr = (unsigned long) __builtin_return_address(0);
regs.psw.addr |= PSW_ADDR_AMODE;
regs.int_code = pgm_int_code;
regs.int_parm_long = (uaddr & PAGE_MASK) | 2;
access = write ? VM_WRITE : VM_READ;
fault = do_exception(®s, access);
/*
* Since the fault happened in kernel mode while performing a uaccess
* all we need to do now is emulating a fixup in case "fault" is not
* zero.
* For the calling uaccess functions this results always in -EFAULT.
*/
return fault ? -EFAULT : 0;
}
#ifdef CONFIG_PFAULT
/*
* 'pfault' pseudo page faults routines.
*/
static int pfault_disable;
static int __init nopfault(char *str)
{
pfault_disable = 1;
return 1;
}
__setup("nopfault", nopfault);
struct pfault_refbk {
u16 refdiagc;
u16 reffcode;
u16 refdwlen;
u16 refversn;
u64 refgaddr;
u64 refselmk;
u64 refcmpmk;
u64 reserved;
} __attribute__ ((packed, aligned(8)));
int pfault_init(void)
{
struct pfault_refbk refbk = {
.refdiagc = 0x258,
.reffcode = 0,
.refdwlen = 5,
.refversn = 2,
.refgaddr = __LC_CURRENT_PID,
.refselmk = 1ULL << 48,
.refcmpmk = 1ULL << 48,
.reserved = __PF_RES_FIELD };
int rc;
if (pfault_disable)
return -1;
asm volatile(
" diag %1,%0,0x258\n"
"0: j 2f\n"
"1: la %0,8\n"
"2:\n"
EX_TABLE(0b,1b)
: "=d" (rc) : "a" (&refbk), "m" (refbk) : "cc");
return rc;
}
void pfault_fini(void)
{
struct pfault_refbk refbk = {
.refdiagc = 0x258,
.reffcode = 1,
.refdwlen = 5,
.refversn = 2,
};
if (pfault_disable)
return;
asm volatile(
" diag %0,0,0x258\n"
"0:\n"
EX_TABLE(0b,0b)
: : "a" (&refbk), "m" (refbk) : "cc");
}
static DEFINE_SPINLOCK(pfault_lock);
static LIST_HEAD(pfault_list);
static void pfault_interrupt(struct ext_code ext_code,
unsigned int param32, unsigned long param64)
{
struct task_struct *tsk;
__u16 subcode;
pid_t pid;
/*
* Get the external interruption subcode & pfault
* initial/completion signal bit. VM stores this
* in the 'cpu address' field associated with the
* external interrupt.
*/
subcode = ext_code.subcode;
if ((subcode & 0xff00) != __SUBCODE_MASK)
return;
inc_irq_stat(IRQEXT_PFL);
/* Get the token (= pid of the affected task). */
pid = sizeof(void *) == 4 ? param32 : param64;
rcu_read_lock();
tsk = find_task_by_pid_ns(pid, &init_pid_ns);
if (tsk)
get_task_struct(tsk);
rcu_read_unlock();
if (!tsk)
return;
spin_lock(&pfault_lock);
if (subcode & 0x0080) {
/* signal bit is set -> a page has been swapped in by VM */
if (tsk->thread.pfault_wait == 1) {
/* Initial interrupt was faster than the completion
* interrupt. pfault_wait is valid. Set pfault_wait
* back to zero and wake up the process. This can
* safely be done because the task is still sleeping
* and can't produce new pfaults. */
tsk->thread.pfault_wait = 0;
list_del(&tsk->thread.list);
wake_up_process(tsk);
put_task_struct(tsk);
} else {
/* Completion interrupt was faster than initial
* interrupt. Set pfault_wait to -1 so the initial
* interrupt doesn't put the task to sleep.
* If the task is not running, ignore the completion
* interrupt since it must be a leftover of a PFAULT
* CANCEL operation which didn't remove all pending
* completion interrupts. */
if (tsk->state == TASK_RUNNING)
tsk->thread.pfault_wait = -1;
}
} else {
/* signal bit not set -> a real page is missing. */
if (WARN_ON_ONCE(tsk != current))
goto out;
if (tsk->thread.pfault_wait == 1) {
/* Already on the list with a reference: put to sleep */
__set_task_state(tsk, TASK_UNINTERRUPTIBLE);
set_tsk_need_resched(tsk);
} else if (tsk->thread.pfault_wait == -1) {
/* Completion interrupt was faster than the initial
* interrupt (pfault_wait == -1). Set pfault_wait
* back to zero and exit. */
tsk->thread.pfault_wait = 0;
} else {
/* Initial interrupt arrived before completion
* interrupt. Let the task sleep.
* An extra task reference is needed since a different
* cpu may set the task state to TASK_RUNNING again
* before the scheduler is reached. */
get_task_struct(tsk);
tsk->thread.pfault_wait = 1;
list_add(&tsk->thread.list, &pfault_list);
__set_task_state(tsk, TASK_UNINTERRUPTIBLE);
set_tsk_need_resched(tsk);
}
}
out:
spin_unlock(&pfault_lock);
put_task_struct(tsk);
}
static int __cpuinit pfault_cpu_notify(struct notifier_block *self,
unsigned long action, void *hcpu)
{
struct thread_struct *thread, *next;
struct task_struct *tsk;
switch (action & ~CPU_TASKS_FROZEN) {
case CPU_DEAD:
spin_lock_irq(&pfault_lock);
list_for_each_entry_safe(thread, next, &pfault_list, list) {
thread->pfault_wait = 0;
list_del(&thread->list);
tsk = container_of(thread, struct task_struct, thread);
wake_up_process(tsk);
put_task_struct(tsk);
}
spin_unlock_irq(&pfault_lock);
break;
default:
break;
}
return NOTIFY_OK;
}
static int __init pfault_irq_init(void)
{
int rc;
rc = register_external_interrupt(0x2603, pfault_interrupt);
if (rc)
goto out_extint;
rc = pfault_init() == 0 ? 0 : -EOPNOTSUPP;
if (rc)
goto out_pfault;
service_subclass_irq_register();
hotcpu_notifier(pfault_cpu_notify, 0);
return 0;
out_pfault:
unregister_external_interrupt(0x2603, pfault_interrupt);
out_extint:
pfault_disable = 1;
return rc;
}
early_initcall(pfault_irq_init);
#endif /* CONFIG_PFAULT */
| gpl-2.0 |
Keff/skull-kernel | drivers/target/iscsi/iscsi_target_configfs.c | 1174 | 51930 | /*******************************************************************************
* This file contains the configfs implementation for iSCSI Target mode
* from the LIO-Target Project.
*
* \u00a9 Copyright 2007-2011 RisingTide Systems LLC.
*
* Licensed to the Linux Foundation under the General Public License (GPL) version 2.
*
* Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
****************************************************************************/
#include <linux/configfs.h>
#include <linux/export.h>
#include <linux/inet.h>
#include <target/target_core_base.h>
#include <target/target_core_fabric.h>
#include <target/target_core_fabric_configfs.h>
#include <target/target_core_configfs.h>
#include <target/configfs_macros.h>
#include "iscsi_target_core.h"
#include "iscsi_target_parameters.h"
#include "iscsi_target_device.h"
#include "iscsi_target_erl0.h"
#include "iscsi_target_nodeattrib.h"
#include "iscsi_target_tpg.h"
#include "iscsi_target_util.h"
#include "iscsi_target.h"
#include "iscsi_target_stat.h"
#include "iscsi_target_configfs.h"
struct target_fabric_configfs *lio_target_fabric_configfs;
struct lio_target_configfs_attribute {
struct configfs_attribute attr;
ssize_t (*show)(void *, char *);
ssize_t (*store)(void *, const char *, size_t);
};
struct iscsi_portal_group *lio_get_tpg_from_tpg_item(
struct config_item *item,
struct iscsi_tiqn **tiqn_out)
{
struct se_portal_group *se_tpg = container_of(to_config_group(item),
struct se_portal_group, tpg_group);
struct iscsi_portal_group *tpg = se_tpg->se_tpg_fabric_ptr;
int ret;
if (!tpg) {
pr_err("Unable to locate struct iscsi_portal_group "
"pointer\n");
return NULL;
}
ret = iscsit_get_tpg(tpg);
if (ret < 0)
return NULL;
*tiqn_out = tpg->tpg_tiqn;
return tpg;
}
/* Start items for lio_target_portal_cit */
static ssize_t lio_target_np_show_sctp(
struct se_tpg_np *se_tpg_np,
char *page)
{
struct iscsi_tpg_np *tpg_np = container_of(se_tpg_np,
struct iscsi_tpg_np, se_tpg_np);
struct iscsi_tpg_np *tpg_np_sctp;
ssize_t rb;
tpg_np_sctp = iscsit_tpg_locate_child_np(tpg_np, ISCSI_SCTP_TCP);
if (tpg_np_sctp)
rb = sprintf(page, "1\n");
else
rb = sprintf(page, "0\n");
return rb;
}
static ssize_t lio_target_np_store_sctp(
struct se_tpg_np *se_tpg_np,
const char *page,
size_t count)
{
struct iscsi_np *np;
struct iscsi_portal_group *tpg;
struct iscsi_tpg_np *tpg_np = container_of(se_tpg_np,
struct iscsi_tpg_np, se_tpg_np);
struct iscsi_tpg_np *tpg_np_sctp = NULL;
char *endptr;
u32 op;
int ret;
op = simple_strtoul(page, &endptr, 0);
if ((op != 1) && (op != 0)) {
pr_err("Illegal value for tpg_enable: %u\n", op);
return -EINVAL;
}
np = tpg_np->tpg_np;
if (!np) {
pr_err("Unable to locate struct iscsi_np from"
" struct iscsi_tpg_np\n");
return -EINVAL;
}
tpg = tpg_np->tpg;
if (iscsit_get_tpg(tpg) < 0)
return -EINVAL;
if (op) {
/*
* Use existing np->np_sockaddr for SCTP network portal reference
*/
tpg_np_sctp = iscsit_tpg_add_network_portal(tpg, &np->np_sockaddr,
np->np_ip, tpg_np, ISCSI_SCTP_TCP);
if (!tpg_np_sctp || IS_ERR(tpg_np_sctp))
goto out;
} else {
tpg_np_sctp = iscsit_tpg_locate_child_np(tpg_np, ISCSI_SCTP_TCP);
if (!tpg_np_sctp)
goto out;
ret = iscsit_tpg_del_network_portal(tpg, tpg_np_sctp);
if (ret < 0)
goto out;
}
iscsit_put_tpg(tpg);
return count;
out:
iscsit_put_tpg(tpg);
return -EINVAL;
}
TF_NP_BASE_ATTR(lio_target, sctp, S_IRUGO | S_IWUSR);
static struct configfs_attribute *lio_target_portal_attrs[] = {
&lio_target_np_sctp.attr,
NULL,
};
/* Stop items for lio_target_portal_cit */
/* Start items for lio_target_np_cit */
#define MAX_PORTAL_LEN 256
struct se_tpg_np *lio_target_call_addnptotpg(
struct se_portal_group *se_tpg,
struct config_group *group,
const char *name)
{
struct iscsi_portal_group *tpg;
struct iscsi_tpg_np *tpg_np;
char *str, *str2, *ip_str, *port_str;
struct __kernel_sockaddr_storage sockaddr;
struct sockaddr_in *sock_in;
struct sockaddr_in6 *sock_in6;
unsigned long port;
int ret;
char buf[MAX_PORTAL_LEN + 1];
if (strlen(name) > MAX_PORTAL_LEN) {
pr_err("strlen(name): %d exceeds MAX_PORTAL_LEN: %d\n",
(int)strlen(name), MAX_PORTAL_LEN);
return ERR_PTR(-EOVERFLOW);
}
memset(buf, 0, MAX_PORTAL_LEN + 1);
snprintf(buf, MAX_PORTAL_LEN + 1, "%s", name);
memset(&sockaddr, 0, sizeof(struct __kernel_sockaddr_storage));
str = strstr(buf, "[");
if (str) {
const char *end;
str2 = strstr(str, "]");
if (!str2) {
pr_err("Unable to locate trailing \"]\""
" in IPv6 iSCSI network portal address\n");
return ERR_PTR(-EINVAL);
}
str++; /* Skip over leading "[" */
*str2 = '\0'; /* Terminate the IPv6 address */
str2++; /* Skip over the "]" */
port_str = strstr(str2, ":");
if (!port_str) {
pr_err("Unable to locate \":port\""
" in IPv6 iSCSI network portal address\n");
return ERR_PTR(-EINVAL);
}
*port_str = '\0'; /* Terminate string for IP */
port_str++; /* Skip over ":" */
ret = strict_strtoul(port_str, 0, &port);
if (ret < 0) {
pr_err("strict_strtoul() failed for port_str: %d\n", ret);
return ERR_PTR(ret);
}
sock_in6 = (struct sockaddr_in6 *)&sockaddr;
sock_in6->sin6_family = AF_INET6;
sock_in6->sin6_port = htons((unsigned short)port);
ret = in6_pton(str, IPV6_ADDRESS_SPACE,
(void *)&sock_in6->sin6_addr.in6_u, -1, &end);
if (ret <= 0) {
pr_err("in6_pton returned: %d\n", ret);
return ERR_PTR(-EINVAL);
}
} else {
str = ip_str = &buf[0];
port_str = strstr(ip_str, ":");
if (!port_str) {
pr_err("Unable to locate \":port\""
" in IPv4 iSCSI network portal address\n");
return ERR_PTR(-EINVAL);
}
*port_str = '\0'; /* Terminate string for IP */
port_str++; /* Skip over ":" */
ret = strict_strtoul(port_str, 0, &port);
if (ret < 0) {
pr_err("strict_strtoul() failed for port_str: %d\n", ret);
return ERR_PTR(ret);
}
sock_in = (struct sockaddr_in *)&sockaddr;
sock_in->sin_family = AF_INET;
sock_in->sin_port = htons((unsigned short)port);
sock_in->sin_addr.s_addr = in_aton(ip_str);
}
tpg = container_of(se_tpg, struct iscsi_portal_group, tpg_se_tpg);
ret = iscsit_get_tpg(tpg);
if (ret < 0)
return ERR_PTR(-EINVAL);
pr_debug("LIO_Target_ConfigFS: REGISTER -> %s TPGT: %hu"
" PORTAL: %s\n",
config_item_name(&se_tpg->se_tpg_wwn->wwn_group.cg_item),
tpg->tpgt, name);
/*
* Assume ISCSI_TCP by default. Other network portals for other
* iSCSI fabrics:
*
* Traditional iSCSI over SCTP (initial support)
* iSER/TCP (TODO, hardware available)
* iSER/SCTP (TODO, software emulation with osc-iwarp)
* iSER/IB (TODO, hardware available)
*
* can be enabled with atributes under
* sys/kernel/config/iscsi/$IQN/$TPG/np/$IP:$PORT/
*
*/
tpg_np = iscsit_tpg_add_network_portal(tpg, &sockaddr, str, NULL,
ISCSI_TCP);
if (IS_ERR(tpg_np)) {
iscsit_put_tpg(tpg);
return ERR_CAST(tpg_np);
}
pr_debug("LIO_Target_ConfigFS: addnptotpg done!\n");
iscsit_put_tpg(tpg);
return &tpg_np->se_tpg_np;
}
static void lio_target_call_delnpfromtpg(
struct se_tpg_np *se_tpg_np)
{
struct iscsi_portal_group *tpg;
struct iscsi_tpg_np *tpg_np;
struct se_portal_group *se_tpg;
int ret;
tpg_np = container_of(se_tpg_np, struct iscsi_tpg_np, se_tpg_np);
tpg = tpg_np->tpg;
ret = iscsit_get_tpg(tpg);
if (ret < 0)
return;
se_tpg = &tpg->tpg_se_tpg;
pr_debug("LIO_Target_ConfigFS: DEREGISTER -> %s TPGT: %hu"
" PORTAL: %s:%hu\n", config_item_name(&se_tpg->se_tpg_wwn->wwn_group.cg_item),
tpg->tpgt, tpg_np->tpg_np->np_ip, tpg_np->tpg_np->np_port);
ret = iscsit_tpg_del_network_portal(tpg, tpg_np);
if (ret < 0)
goto out;
pr_debug("LIO_Target_ConfigFS: delnpfromtpg done!\n");
out:
iscsit_put_tpg(tpg);
}
/* End items for lio_target_np_cit */
/* Start items for lio_target_nacl_attrib_cit */
#define DEF_NACL_ATTRIB(name) \
static ssize_t iscsi_nacl_attrib_show_##name( \
struct se_node_acl *se_nacl, \
char *page) \
{ \
struct iscsi_node_acl *nacl = container_of(se_nacl, struct iscsi_node_acl, \
se_node_acl); \
\
return sprintf(page, "%u\n", ISCSI_NODE_ATTRIB(nacl)->name); \
} \
\
static ssize_t iscsi_nacl_attrib_store_##name( \
struct se_node_acl *se_nacl, \
const char *page, \
size_t count) \
{ \
struct iscsi_node_acl *nacl = container_of(se_nacl, struct iscsi_node_acl, \
se_node_acl); \
char *endptr; \
u32 val; \
int ret; \
\
val = simple_strtoul(page, &endptr, 0); \
ret = iscsit_na_##name(nacl, val); \
if (ret < 0) \
return ret; \
\
return count; \
}
#define NACL_ATTR(_name, _mode) TF_NACL_ATTRIB_ATTR(iscsi, _name, _mode);
/*
* Define iscsi_node_attrib_s_dataout_timeout
*/
DEF_NACL_ATTRIB(dataout_timeout);
NACL_ATTR(dataout_timeout, S_IRUGO | S_IWUSR);
/*
* Define iscsi_node_attrib_s_dataout_timeout_retries
*/
DEF_NACL_ATTRIB(dataout_timeout_retries);
NACL_ATTR(dataout_timeout_retries, S_IRUGO | S_IWUSR);
/*
* Define iscsi_node_attrib_s_default_erl
*/
DEF_NACL_ATTRIB(default_erl);
NACL_ATTR(default_erl, S_IRUGO | S_IWUSR);
/*
* Define iscsi_node_attrib_s_nopin_timeout
*/
DEF_NACL_ATTRIB(nopin_timeout);
NACL_ATTR(nopin_timeout, S_IRUGO | S_IWUSR);
/*
* Define iscsi_node_attrib_s_nopin_response_timeout
*/
DEF_NACL_ATTRIB(nopin_response_timeout);
NACL_ATTR(nopin_response_timeout, S_IRUGO | S_IWUSR);
/*
* Define iscsi_node_attrib_s_random_datain_pdu_offsets
*/
DEF_NACL_ATTRIB(random_datain_pdu_offsets);
NACL_ATTR(random_datain_pdu_offsets, S_IRUGO | S_IWUSR);
/*
* Define iscsi_node_attrib_s_random_datain_seq_offsets
*/
DEF_NACL_ATTRIB(random_datain_seq_offsets);
NACL_ATTR(random_datain_seq_offsets, S_IRUGO | S_IWUSR);
/*
* Define iscsi_node_attrib_s_random_r2t_offsets
*/
DEF_NACL_ATTRIB(random_r2t_offsets);
NACL_ATTR(random_r2t_offsets, S_IRUGO | S_IWUSR);
static struct configfs_attribute *lio_target_nacl_attrib_attrs[] = {
&iscsi_nacl_attrib_dataout_timeout.attr,
&iscsi_nacl_attrib_dataout_timeout_retries.attr,
&iscsi_nacl_attrib_default_erl.attr,
&iscsi_nacl_attrib_nopin_timeout.attr,
&iscsi_nacl_attrib_nopin_response_timeout.attr,
&iscsi_nacl_attrib_random_datain_pdu_offsets.attr,
&iscsi_nacl_attrib_random_datain_seq_offsets.attr,
&iscsi_nacl_attrib_random_r2t_offsets.attr,
NULL,
};
/* End items for lio_target_nacl_attrib_cit */
/* Start items for lio_target_nacl_auth_cit */
#define __DEF_NACL_AUTH_STR(prefix, name, flags) \
static ssize_t __iscsi_##prefix##_show_##name( \
struct iscsi_node_acl *nacl, \
char *page) \
{ \
struct iscsi_node_auth *auth = &nacl->node_auth; \
\
if (!capable(CAP_SYS_ADMIN)) \
return -EPERM; \
return snprintf(page, PAGE_SIZE, "%s\n", auth->name); \
} \
\
static ssize_t __iscsi_##prefix##_store_##name( \
struct iscsi_node_acl *nacl, \
const char *page, \
size_t count) \
{ \
struct iscsi_node_auth *auth = &nacl->node_auth; \
\
if (!capable(CAP_SYS_ADMIN)) \
return -EPERM; \
\
snprintf(auth->name, sizeof(auth->name), "%s", page); \
if (!strncmp("NULL", auth->name, 4)) \
auth->naf_flags &= ~flags; \
else \
auth->naf_flags |= flags; \
\
if ((auth->naf_flags & NAF_USERID_IN_SET) && \
(auth->naf_flags & NAF_PASSWORD_IN_SET)) \
auth->authenticate_target = 1; \
else \
auth->authenticate_target = 0; \
\
return count; \
}
#define __DEF_NACL_AUTH_INT(prefix, name) \
static ssize_t __iscsi_##prefix##_show_##name( \
struct iscsi_node_acl *nacl, \
char *page) \
{ \
struct iscsi_node_auth *auth = &nacl->node_auth; \
\
if (!capable(CAP_SYS_ADMIN)) \
return -EPERM; \
\
return snprintf(page, PAGE_SIZE, "%d\n", auth->name); \
}
#define DEF_NACL_AUTH_STR(name, flags) \
__DEF_NACL_AUTH_STR(nacl_auth, name, flags) \
static ssize_t iscsi_nacl_auth_show_##name( \
struct se_node_acl *nacl, \
char *page) \
{ \
return __iscsi_nacl_auth_show_##name(container_of(nacl, \
struct iscsi_node_acl, se_node_acl), page); \
} \
static ssize_t iscsi_nacl_auth_store_##name( \
struct se_node_acl *nacl, \
const char *page, \
size_t count) \
{ \
return __iscsi_nacl_auth_store_##name(container_of(nacl, \
struct iscsi_node_acl, se_node_acl), page, count); \
}
#define DEF_NACL_AUTH_INT(name) \
__DEF_NACL_AUTH_INT(nacl_auth, name) \
static ssize_t iscsi_nacl_auth_show_##name( \
struct se_node_acl *nacl, \
char *page) \
{ \
return __iscsi_nacl_auth_show_##name(container_of(nacl, \
struct iscsi_node_acl, se_node_acl), page); \
}
#define AUTH_ATTR(_name, _mode) TF_NACL_AUTH_ATTR(iscsi, _name, _mode);
#define AUTH_ATTR_RO(_name) TF_NACL_AUTH_ATTR_RO(iscsi, _name);
/*
* One-way authentication userid
*/
DEF_NACL_AUTH_STR(userid, NAF_USERID_SET);
AUTH_ATTR(userid, S_IRUGO | S_IWUSR);
/*
* One-way authentication password
*/
DEF_NACL_AUTH_STR(password, NAF_PASSWORD_SET);
AUTH_ATTR(password, S_IRUGO | S_IWUSR);
/*
* Enforce mutual authentication
*/
DEF_NACL_AUTH_INT(authenticate_target);
AUTH_ATTR_RO(authenticate_target);
/*
* Mutual authentication userid
*/
DEF_NACL_AUTH_STR(userid_mutual, NAF_USERID_IN_SET);
AUTH_ATTR(userid_mutual, S_IRUGO | S_IWUSR);
/*
* Mutual authentication password
*/
DEF_NACL_AUTH_STR(password_mutual, NAF_PASSWORD_IN_SET);
AUTH_ATTR(password_mutual, S_IRUGO | S_IWUSR);
static struct configfs_attribute *lio_target_nacl_auth_attrs[] = {
&iscsi_nacl_auth_userid.attr,
&iscsi_nacl_auth_password.attr,
&iscsi_nacl_auth_authenticate_target.attr,
&iscsi_nacl_auth_userid_mutual.attr,
&iscsi_nacl_auth_password_mutual.attr,
NULL,
};
/* End items for lio_target_nacl_auth_cit */
/* Start items for lio_target_nacl_param_cit */
#define DEF_NACL_PARAM(name) \
static ssize_t iscsi_nacl_param_show_##name( \
struct se_node_acl *se_nacl, \
char *page) \
{ \
struct iscsi_session *sess; \
struct se_session *se_sess; \
ssize_t rb; \
\
spin_lock_bh(&se_nacl->nacl_sess_lock); \
se_sess = se_nacl->nacl_sess; \
if (!se_sess) { \
rb = snprintf(page, PAGE_SIZE, \
"No Active iSCSI Session\n"); \
} else { \
sess = se_sess->fabric_sess_ptr; \
rb = snprintf(page, PAGE_SIZE, "%u\n", \
(u32)sess->sess_ops->name); \
} \
spin_unlock_bh(&se_nacl->nacl_sess_lock); \
\
return rb; \
}
#define NACL_PARAM_ATTR(_name) TF_NACL_PARAM_ATTR_RO(iscsi, _name);
DEF_NACL_PARAM(MaxConnections);
NACL_PARAM_ATTR(MaxConnections);
DEF_NACL_PARAM(InitialR2T);
NACL_PARAM_ATTR(InitialR2T);
DEF_NACL_PARAM(ImmediateData);
NACL_PARAM_ATTR(ImmediateData);
DEF_NACL_PARAM(MaxBurstLength);
NACL_PARAM_ATTR(MaxBurstLength);
DEF_NACL_PARAM(FirstBurstLength);
NACL_PARAM_ATTR(FirstBurstLength);
DEF_NACL_PARAM(DefaultTime2Wait);
NACL_PARAM_ATTR(DefaultTime2Wait);
DEF_NACL_PARAM(DefaultTime2Retain);
NACL_PARAM_ATTR(DefaultTime2Retain);
DEF_NACL_PARAM(MaxOutstandingR2T);
NACL_PARAM_ATTR(MaxOutstandingR2T);
DEF_NACL_PARAM(DataPDUInOrder);
NACL_PARAM_ATTR(DataPDUInOrder);
DEF_NACL_PARAM(DataSequenceInOrder);
NACL_PARAM_ATTR(DataSequenceInOrder);
DEF_NACL_PARAM(ErrorRecoveryLevel);
NACL_PARAM_ATTR(ErrorRecoveryLevel);
static struct configfs_attribute *lio_target_nacl_param_attrs[] = {
&iscsi_nacl_param_MaxConnections.attr,
&iscsi_nacl_param_InitialR2T.attr,
&iscsi_nacl_param_ImmediateData.attr,
&iscsi_nacl_param_MaxBurstLength.attr,
&iscsi_nacl_param_FirstBurstLength.attr,
&iscsi_nacl_param_DefaultTime2Wait.attr,
&iscsi_nacl_param_DefaultTime2Retain.attr,
&iscsi_nacl_param_MaxOutstandingR2T.attr,
&iscsi_nacl_param_DataPDUInOrder.attr,
&iscsi_nacl_param_DataSequenceInOrder.attr,
&iscsi_nacl_param_ErrorRecoveryLevel.attr,
NULL,
};
/* End items for lio_target_nacl_param_cit */
/* Start items for lio_target_acl_cit */
static ssize_t lio_target_nacl_show_info(
struct se_node_acl *se_nacl,
char *page)
{
struct iscsi_session *sess;
struct iscsi_conn *conn;
struct se_session *se_sess;
ssize_t rb = 0;
spin_lock_bh(&se_nacl->nacl_sess_lock);
se_sess = se_nacl->nacl_sess;
if (!se_sess) {
rb += sprintf(page+rb, "No active iSCSI Session for Initiator"
" Endpoint: %s\n", se_nacl->initiatorname);
} else {
sess = se_sess->fabric_sess_ptr;
if (sess->sess_ops->InitiatorName)
rb += sprintf(page+rb, "InitiatorName: %s\n",
sess->sess_ops->InitiatorName);
if (sess->sess_ops->InitiatorAlias)
rb += sprintf(page+rb, "InitiatorAlias: %s\n",
sess->sess_ops->InitiatorAlias);
rb += sprintf(page+rb, "LIO Session ID: %u "
"ISID: 0x%02x %02x %02x %02x %02x %02x "
"TSIH: %hu ", sess->sid,
sess->isid[0], sess->isid[1], sess->isid[2],
sess->isid[3], sess->isid[4], sess->isid[5],
sess->tsih);
rb += sprintf(page+rb, "SessionType: %s\n",
(sess->sess_ops->SessionType) ?
"Discovery" : "Normal");
rb += sprintf(page+rb, "Session State: ");
switch (sess->session_state) {
case TARG_SESS_STATE_FREE:
rb += sprintf(page+rb, "TARG_SESS_FREE\n");
break;
case TARG_SESS_STATE_ACTIVE:
rb += sprintf(page+rb, "TARG_SESS_STATE_ACTIVE\n");
break;
case TARG_SESS_STATE_LOGGED_IN:
rb += sprintf(page+rb, "TARG_SESS_STATE_LOGGED_IN\n");
break;
case TARG_SESS_STATE_FAILED:
rb += sprintf(page+rb, "TARG_SESS_STATE_FAILED\n");
break;
case TARG_SESS_STATE_IN_CONTINUE:
rb += sprintf(page+rb, "TARG_SESS_STATE_IN_CONTINUE\n");
break;
default:
rb += sprintf(page+rb, "ERROR: Unknown Session"
" State!\n");
break;
}
rb += sprintf(page+rb, "---------------------[iSCSI Session"
" Values]-----------------------\n");
rb += sprintf(page+rb, " CmdSN/WR : CmdSN/WC : ExpCmdSN"
" : MaxCmdSN : ITT : TTT\n");
rb += sprintf(page+rb, " 0x%08x 0x%08x 0x%08x 0x%08x"
" 0x%08x 0x%08x\n",
sess->cmdsn_window,
(sess->max_cmd_sn - sess->exp_cmd_sn) + 1,
sess->exp_cmd_sn, sess->max_cmd_sn,
sess->init_task_tag, sess->targ_xfer_tag);
rb += sprintf(page+rb, "----------------------[iSCSI"
" Connections]-------------------------\n");
spin_lock(&sess->conn_lock);
list_for_each_entry(conn, &sess->sess_conn_list, conn_list) {
rb += sprintf(page+rb, "CID: %hu Connection"
" State: ", conn->cid);
switch (conn->conn_state) {
case TARG_CONN_STATE_FREE:
rb += sprintf(page+rb,
"TARG_CONN_STATE_FREE\n");
break;
case TARG_CONN_STATE_XPT_UP:
rb += sprintf(page+rb,
"TARG_CONN_STATE_XPT_UP\n");
break;
case TARG_CONN_STATE_IN_LOGIN:
rb += sprintf(page+rb,
"TARG_CONN_STATE_IN_LOGIN\n");
break;
case TARG_CONN_STATE_LOGGED_IN:
rb += sprintf(page+rb,
"TARG_CONN_STATE_LOGGED_IN\n");
break;
case TARG_CONN_STATE_IN_LOGOUT:
rb += sprintf(page+rb,
"TARG_CONN_STATE_IN_LOGOUT\n");
break;
case TARG_CONN_STATE_LOGOUT_REQUESTED:
rb += sprintf(page+rb,
"TARG_CONN_STATE_LOGOUT_REQUESTED\n");
break;
case TARG_CONN_STATE_CLEANUP_WAIT:
rb += sprintf(page+rb,
"TARG_CONN_STATE_CLEANUP_WAIT\n");
break;
default:
rb += sprintf(page+rb,
"ERROR: Unknown Connection State!\n");
break;
}
rb += sprintf(page+rb, " Address %s %s", conn->login_ip,
(conn->network_transport == ISCSI_TCP) ?
"TCP" : "SCTP");
rb += sprintf(page+rb, " StatSN: 0x%08x\n",
conn->stat_sn);
}
spin_unlock(&sess->conn_lock);
}
spin_unlock_bh(&se_nacl->nacl_sess_lock);
return rb;
}
TF_NACL_BASE_ATTR_RO(lio_target, info);
static ssize_t lio_target_nacl_show_cmdsn_depth(
struct se_node_acl *se_nacl,
char *page)
{
return sprintf(page, "%u\n", se_nacl->queue_depth);
}
static ssize_t lio_target_nacl_store_cmdsn_depth(
struct se_node_acl *se_nacl,
const char *page,
size_t count)
{
struct se_portal_group *se_tpg = se_nacl->se_tpg;
struct iscsi_portal_group *tpg = container_of(se_tpg,
struct iscsi_portal_group, tpg_se_tpg);
struct config_item *acl_ci, *tpg_ci, *wwn_ci;
char *endptr;
u32 cmdsn_depth = 0;
int ret;
cmdsn_depth = simple_strtoul(page, &endptr, 0);
if (cmdsn_depth > TA_DEFAULT_CMDSN_DEPTH_MAX) {
pr_err("Passed cmdsn_depth: %u exceeds"
" TA_DEFAULT_CMDSN_DEPTH_MAX: %u\n", cmdsn_depth,
TA_DEFAULT_CMDSN_DEPTH_MAX);
return -EINVAL;
}
acl_ci = &se_nacl->acl_group.cg_item;
if (!acl_ci) {
pr_err("Unable to locatel acl_ci\n");
return -EINVAL;
}
tpg_ci = &acl_ci->ci_parent->ci_group->cg_item;
if (!tpg_ci) {
pr_err("Unable to locate tpg_ci\n");
return -EINVAL;
}
wwn_ci = &tpg_ci->ci_group->cg_item;
if (!wwn_ci) {
pr_err("Unable to locate config_item wwn_ci\n");
return -EINVAL;
}
if (iscsit_get_tpg(tpg) < 0)
return -EINVAL;
/*
* iscsit_tpg_set_initiator_node_queue_depth() assumes force=1
*/
ret = iscsit_tpg_set_initiator_node_queue_depth(tpg,
config_item_name(acl_ci), cmdsn_depth, 1);
pr_debug("LIO_Target_ConfigFS: %s/%s Set CmdSN Window: %u for"
"InitiatorName: %s\n", config_item_name(wwn_ci),
config_item_name(tpg_ci), cmdsn_depth,
config_item_name(acl_ci));
iscsit_put_tpg(tpg);
return (!ret) ? count : (ssize_t)ret;
}
TF_NACL_BASE_ATTR(lio_target, cmdsn_depth, S_IRUGO | S_IWUSR);
static struct configfs_attribute *lio_target_initiator_attrs[] = {
&lio_target_nacl_info.attr,
&lio_target_nacl_cmdsn_depth.attr,
NULL,
};
static struct se_node_acl *lio_tpg_alloc_fabric_acl(
struct se_portal_group *se_tpg)
{
struct iscsi_node_acl *acl;
acl = kzalloc(sizeof(struct iscsi_node_acl), GFP_KERNEL);
if (!acl) {
pr_err("Unable to allocate memory for struct iscsi_node_acl\n");
return NULL;
}
return &acl->se_node_acl;
}
static struct se_node_acl *lio_target_make_nodeacl(
struct se_portal_group *se_tpg,
struct config_group *group,
const char *name)
{
struct config_group *stats_cg;
struct iscsi_node_acl *acl;
struct se_node_acl *se_nacl_new, *se_nacl;
struct iscsi_portal_group *tpg = container_of(se_tpg,
struct iscsi_portal_group, tpg_se_tpg);
u32 cmdsn_depth;
se_nacl_new = lio_tpg_alloc_fabric_acl(se_tpg);
if (!se_nacl_new)
return ERR_PTR(-ENOMEM);
cmdsn_depth = ISCSI_TPG_ATTRIB(tpg)->default_cmdsn_depth;
/*
* se_nacl_new may be released by core_tpg_add_initiator_node_acl()
* when converting a NdoeACL from demo mode -> explict
*/
se_nacl = core_tpg_add_initiator_node_acl(se_tpg, se_nacl_new,
name, cmdsn_depth);
if (IS_ERR(se_nacl))
return se_nacl;
acl = container_of(se_nacl, struct iscsi_node_acl, se_node_acl);
stats_cg = &se_nacl->acl_fabric_stat_group;
stats_cg->default_groups = kzalloc(sizeof(struct config_group) * 2,
GFP_KERNEL);
if (!stats_cg->default_groups) {
pr_err("Unable to allocate memory for"
" stats_cg->default_groups\n");
core_tpg_del_initiator_node_acl(se_tpg, se_nacl, 1);
kfree(acl);
return ERR_PTR(-ENOMEM);
}
stats_cg->default_groups[0] = &NODE_STAT_GRPS(acl)->iscsi_sess_stats_group;
stats_cg->default_groups[1] = NULL;
config_group_init_type_name(&NODE_STAT_GRPS(acl)->iscsi_sess_stats_group,
"iscsi_sess_stats", &iscsi_stat_sess_cit);
return se_nacl;
}
static void lio_target_drop_nodeacl(
struct se_node_acl *se_nacl)
{
struct se_portal_group *se_tpg = se_nacl->se_tpg;
struct iscsi_node_acl *acl = container_of(se_nacl,
struct iscsi_node_acl, se_node_acl);
struct config_item *df_item;
struct config_group *stats_cg;
int i;
stats_cg = &acl->se_node_acl.acl_fabric_stat_group;
for (i = 0; stats_cg->default_groups[i]; i++) {
df_item = &stats_cg->default_groups[i]->cg_item;
stats_cg->default_groups[i] = NULL;
config_item_put(df_item);
}
kfree(stats_cg->default_groups);
core_tpg_del_initiator_node_acl(se_tpg, se_nacl, 1);
kfree(acl);
}
/* End items for lio_target_acl_cit */
/* Start items for lio_target_tpg_attrib_cit */
#define DEF_TPG_ATTRIB(name) \
\
static ssize_t iscsi_tpg_attrib_show_##name( \
struct se_portal_group *se_tpg, \
char *page) \
{ \
struct iscsi_portal_group *tpg = container_of(se_tpg, \
struct iscsi_portal_group, tpg_se_tpg); \
ssize_t rb; \
\
if (iscsit_get_tpg(tpg) < 0) \
return -EINVAL; \
\
rb = sprintf(page, "%u\n", ISCSI_TPG_ATTRIB(tpg)->name); \
iscsit_put_tpg(tpg); \
return rb; \
} \
\
static ssize_t iscsi_tpg_attrib_store_##name( \
struct se_portal_group *se_tpg, \
const char *page, \
size_t count) \
{ \
struct iscsi_portal_group *tpg = container_of(se_tpg, \
struct iscsi_portal_group, tpg_se_tpg); \
char *endptr; \
u32 val; \
int ret; \
\
if (iscsit_get_tpg(tpg) < 0) \
return -EINVAL; \
\
val = simple_strtoul(page, &endptr, 0); \
ret = iscsit_ta_##name(tpg, val); \
if (ret < 0) \
goto out; \
\
iscsit_put_tpg(tpg); \
return count; \
out: \
iscsit_put_tpg(tpg); \
return ret; \
}
#define TPG_ATTR(_name, _mode) TF_TPG_ATTRIB_ATTR(iscsi, _name, _mode);
/*
* Define iscsi_tpg_attrib_s_authentication
*/
DEF_TPG_ATTRIB(authentication);
TPG_ATTR(authentication, S_IRUGO | S_IWUSR);
/*
* Define iscsi_tpg_attrib_s_login_timeout
*/
DEF_TPG_ATTRIB(login_timeout);
TPG_ATTR(login_timeout, S_IRUGO | S_IWUSR);
/*
* Define iscsi_tpg_attrib_s_netif_timeout
*/
DEF_TPG_ATTRIB(netif_timeout);
TPG_ATTR(netif_timeout, S_IRUGO | S_IWUSR);
/*
* Define iscsi_tpg_attrib_s_generate_node_acls
*/
DEF_TPG_ATTRIB(generate_node_acls);
TPG_ATTR(generate_node_acls, S_IRUGO | S_IWUSR);
/*
* Define iscsi_tpg_attrib_s_default_cmdsn_depth
*/
DEF_TPG_ATTRIB(default_cmdsn_depth);
TPG_ATTR(default_cmdsn_depth, S_IRUGO | S_IWUSR);
/*
Define iscsi_tpg_attrib_s_cache_dynamic_acls
*/
DEF_TPG_ATTRIB(cache_dynamic_acls);
TPG_ATTR(cache_dynamic_acls, S_IRUGO | S_IWUSR);
/*
* Define iscsi_tpg_attrib_s_demo_mode_write_protect
*/
DEF_TPG_ATTRIB(demo_mode_write_protect);
TPG_ATTR(demo_mode_write_protect, S_IRUGO | S_IWUSR);
/*
* Define iscsi_tpg_attrib_s_prod_mode_write_protect
*/
DEF_TPG_ATTRIB(prod_mode_write_protect);
TPG_ATTR(prod_mode_write_protect, S_IRUGO | S_IWUSR);
static struct configfs_attribute *lio_target_tpg_attrib_attrs[] = {
&iscsi_tpg_attrib_authentication.attr,
&iscsi_tpg_attrib_login_timeout.attr,
&iscsi_tpg_attrib_netif_timeout.attr,
&iscsi_tpg_attrib_generate_node_acls.attr,
&iscsi_tpg_attrib_default_cmdsn_depth.attr,
&iscsi_tpg_attrib_cache_dynamic_acls.attr,
&iscsi_tpg_attrib_demo_mode_write_protect.attr,
&iscsi_tpg_attrib_prod_mode_write_protect.attr,
NULL,
};
/* End items for lio_target_tpg_attrib_cit */
/* Start items for lio_target_tpg_param_cit */
#define DEF_TPG_PARAM(name) \
static ssize_t iscsi_tpg_param_show_##name( \
struct se_portal_group *se_tpg, \
char *page) \
{ \
struct iscsi_portal_group *tpg = container_of(se_tpg, \
struct iscsi_portal_group, tpg_se_tpg); \
struct iscsi_param *param; \
ssize_t rb; \
\
if (iscsit_get_tpg(tpg) < 0) \
return -EINVAL; \
\
param = iscsi_find_param_from_key(__stringify(name), \
tpg->param_list); \
if (!param) { \
iscsit_put_tpg(tpg); \
return -EINVAL; \
} \
rb = snprintf(page, PAGE_SIZE, "%s\n", param->value); \
\
iscsit_put_tpg(tpg); \
return rb; \
} \
static ssize_t iscsi_tpg_param_store_##name( \
struct se_portal_group *se_tpg, \
const char *page, \
size_t count) \
{ \
struct iscsi_portal_group *tpg = container_of(se_tpg, \
struct iscsi_portal_group, tpg_se_tpg); \
char *buf; \
int ret; \
\
buf = kzalloc(PAGE_SIZE, GFP_KERNEL); \
if (!buf) \
return -ENOMEM; \
snprintf(buf, PAGE_SIZE, "%s=%s", __stringify(name), page); \
buf[strlen(buf)-1] = '\0'; /* Kill newline */ \
\
if (iscsit_get_tpg(tpg) < 0) { \
kfree(buf); \
return -EINVAL; \
} \
\
ret = iscsi_change_param_value(buf, tpg->param_list, 1); \
if (ret < 0) \
goto out; \
\
kfree(buf); \
iscsit_put_tpg(tpg); \
return count; \
out: \
kfree(buf); \
iscsit_put_tpg(tpg); \
return -EINVAL; \
}
#define TPG_PARAM_ATTR(_name, _mode) TF_TPG_PARAM_ATTR(iscsi, _name, _mode);
DEF_TPG_PARAM(AuthMethod);
TPG_PARAM_ATTR(AuthMethod, S_IRUGO | S_IWUSR);
DEF_TPG_PARAM(HeaderDigest);
TPG_PARAM_ATTR(HeaderDigest, S_IRUGO | S_IWUSR);
DEF_TPG_PARAM(DataDigest);
TPG_PARAM_ATTR(DataDigest, S_IRUGO | S_IWUSR);
DEF_TPG_PARAM(MaxConnections);
TPG_PARAM_ATTR(MaxConnections, S_IRUGO | S_IWUSR);
DEF_TPG_PARAM(TargetAlias);
TPG_PARAM_ATTR(TargetAlias, S_IRUGO | S_IWUSR);
DEF_TPG_PARAM(InitialR2T);
TPG_PARAM_ATTR(InitialR2T, S_IRUGO | S_IWUSR);
DEF_TPG_PARAM(ImmediateData);
TPG_PARAM_ATTR(ImmediateData, S_IRUGO | S_IWUSR);
DEF_TPG_PARAM(MaxRecvDataSegmentLength);
TPG_PARAM_ATTR(MaxRecvDataSegmentLength, S_IRUGO | S_IWUSR);
DEF_TPG_PARAM(MaxBurstLength);
TPG_PARAM_ATTR(MaxBurstLength, S_IRUGO | S_IWUSR);
DEF_TPG_PARAM(FirstBurstLength);
TPG_PARAM_ATTR(FirstBurstLength, S_IRUGO | S_IWUSR);
DEF_TPG_PARAM(DefaultTime2Wait);
TPG_PARAM_ATTR(DefaultTime2Wait, S_IRUGO | S_IWUSR);
DEF_TPG_PARAM(DefaultTime2Retain);
TPG_PARAM_ATTR(DefaultTime2Retain, S_IRUGO | S_IWUSR);
DEF_TPG_PARAM(MaxOutstandingR2T);
TPG_PARAM_ATTR(MaxOutstandingR2T, S_IRUGO | S_IWUSR);
DEF_TPG_PARAM(DataPDUInOrder);
TPG_PARAM_ATTR(DataPDUInOrder, S_IRUGO | S_IWUSR);
DEF_TPG_PARAM(DataSequenceInOrder);
TPG_PARAM_ATTR(DataSequenceInOrder, S_IRUGO | S_IWUSR);
DEF_TPG_PARAM(ErrorRecoveryLevel);
TPG_PARAM_ATTR(ErrorRecoveryLevel, S_IRUGO | S_IWUSR);
DEF_TPG_PARAM(IFMarker);
TPG_PARAM_ATTR(IFMarker, S_IRUGO | S_IWUSR);
DEF_TPG_PARAM(OFMarker);
TPG_PARAM_ATTR(OFMarker, S_IRUGO | S_IWUSR);
DEF_TPG_PARAM(IFMarkInt);
TPG_PARAM_ATTR(IFMarkInt, S_IRUGO | S_IWUSR);
DEF_TPG_PARAM(OFMarkInt);
TPG_PARAM_ATTR(OFMarkInt, S_IRUGO | S_IWUSR);
static struct configfs_attribute *lio_target_tpg_param_attrs[] = {
&iscsi_tpg_param_AuthMethod.attr,
&iscsi_tpg_param_HeaderDigest.attr,
&iscsi_tpg_param_DataDigest.attr,
&iscsi_tpg_param_MaxConnections.attr,
&iscsi_tpg_param_TargetAlias.attr,
&iscsi_tpg_param_InitialR2T.attr,
&iscsi_tpg_param_ImmediateData.attr,
&iscsi_tpg_param_MaxRecvDataSegmentLength.attr,
&iscsi_tpg_param_MaxBurstLength.attr,
&iscsi_tpg_param_FirstBurstLength.attr,
&iscsi_tpg_param_DefaultTime2Wait.attr,
&iscsi_tpg_param_DefaultTime2Retain.attr,
&iscsi_tpg_param_MaxOutstandingR2T.attr,
&iscsi_tpg_param_DataPDUInOrder.attr,
&iscsi_tpg_param_DataSequenceInOrder.attr,
&iscsi_tpg_param_ErrorRecoveryLevel.attr,
&iscsi_tpg_param_IFMarker.attr,
&iscsi_tpg_param_OFMarker.attr,
&iscsi_tpg_param_IFMarkInt.attr,
&iscsi_tpg_param_OFMarkInt.attr,
NULL,
};
/* End items for lio_target_tpg_param_cit */
/* Start items for lio_target_tpg_cit */
static ssize_t lio_target_tpg_show_enable(
struct se_portal_group *se_tpg,
char *page)
{
struct iscsi_portal_group *tpg = container_of(se_tpg,
struct iscsi_portal_group, tpg_se_tpg);
ssize_t len;
spin_lock(&tpg->tpg_state_lock);
len = sprintf(page, "%d\n",
(tpg->tpg_state == TPG_STATE_ACTIVE) ? 1 : 0);
spin_unlock(&tpg->tpg_state_lock);
return len;
}
static ssize_t lio_target_tpg_store_enable(
struct se_portal_group *se_tpg,
const char *page,
size_t count)
{
struct iscsi_portal_group *tpg = container_of(se_tpg,
struct iscsi_portal_group, tpg_se_tpg);
char *endptr;
u32 op;
int ret = 0;
op = simple_strtoul(page, &endptr, 0);
if ((op != 1) && (op != 0)) {
pr_err("Illegal value for tpg_enable: %u\n", op);
return -EINVAL;
}
ret = iscsit_get_tpg(tpg);
if (ret < 0)
return -EINVAL;
if (op) {
ret = iscsit_tpg_enable_portal_group(tpg);
if (ret < 0)
goto out;
} else {
/*
* iscsit_tpg_disable_portal_group() assumes force=1
*/
ret = iscsit_tpg_disable_portal_group(tpg, 1);
if (ret < 0)
goto out;
}
iscsit_put_tpg(tpg);
return count;
out:
iscsit_put_tpg(tpg);
return -EINVAL;
}
TF_TPG_BASE_ATTR(lio_target, enable, S_IRUGO | S_IWUSR);
static struct configfs_attribute *lio_target_tpg_attrs[] = {
&lio_target_tpg_enable.attr,
NULL,
};
/* End items for lio_target_tpg_cit */
/* Start items for lio_target_tiqn_cit */
struct se_portal_group *lio_target_tiqn_addtpg(
struct se_wwn *wwn,
struct config_group *group,
const char *name)
{
struct iscsi_portal_group *tpg;
struct iscsi_tiqn *tiqn;
char *tpgt_str, *end_ptr;
int ret = 0;
unsigned short int tpgt;
tiqn = container_of(wwn, struct iscsi_tiqn, tiqn_wwn);
/*
* Only tpgt_# directory groups can be created below
* target/iscsi/iqn.superturodiskarry/
*/
tpgt_str = strstr(name, "tpgt_");
if (!tpgt_str) {
pr_err("Unable to locate \"tpgt_#\" directory"
" group\n");
return NULL;
}
tpgt_str += 5; /* Skip ahead of "tpgt_" */
tpgt = (unsigned short int) simple_strtoul(tpgt_str, &end_ptr, 0);
tpg = iscsit_alloc_portal_group(tiqn, tpgt);
if (!tpg)
return NULL;
ret = core_tpg_register(
&lio_target_fabric_configfs->tf_ops,
wwn, &tpg->tpg_se_tpg, tpg,
TRANSPORT_TPG_TYPE_NORMAL);
if (ret < 0)
return NULL;
ret = iscsit_tpg_add_portal_group(tiqn, tpg);
if (ret != 0)
goto out;
pr_debug("LIO_Target_ConfigFS: REGISTER -> %s\n", tiqn->tiqn);
pr_debug("LIO_Target_ConfigFS: REGISTER -> Allocated TPG: %s\n",
name);
return &tpg->tpg_se_tpg;
out:
core_tpg_deregister(&tpg->tpg_se_tpg);
kfree(tpg);
return NULL;
}
void lio_target_tiqn_deltpg(struct se_portal_group *se_tpg)
{
struct iscsi_portal_group *tpg;
struct iscsi_tiqn *tiqn;
tpg = container_of(se_tpg, struct iscsi_portal_group, tpg_se_tpg);
tiqn = tpg->tpg_tiqn;
/*
* iscsit_tpg_del_portal_group() assumes force=1
*/
pr_debug("LIO_Target_ConfigFS: DEREGISTER -> Releasing TPG\n");
iscsit_tpg_del_portal_group(tiqn, tpg, 1);
}
/* End items for lio_target_tiqn_cit */
/* Start LIO-Target TIQN struct contig_item lio_target_cit */
static ssize_t lio_target_wwn_show_attr_lio_version(
struct target_fabric_configfs *tf,
char *page)
{
return sprintf(page, "RisingTide Systems Linux-iSCSI Target "ISCSIT_VERSION"\n");
}
TF_WWN_ATTR_RO(lio_target, lio_version);
static struct configfs_attribute *lio_target_wwn_attrs[] = {
&lio_target_wwn_lio_version.attr,
NULL,
};
struct se_wwn *lio_target_call_coreaddtiqn(
struct target_fabric_configfs *tf,
struct config_group *group,
const char *name)
{
struct config_group *stats_cg;
struct iscsi_tiqn *tiqn;
tiqn = iscsit_add_tiqn((unsigned char *)name);
if (IS_ERR(tiqn))
return ERR_CAST(tiqn);
/*
* Setup struct iscsi_wwn_stat_grps for se_wwn->fabric_stat_group.
*/
stats_cg = &tiqn->tiqn_wwn.fabric_stat_group;
stats_cg->default_groups = kzalloc(sizeof(struct config_group) * 6,
GFP_KERNEL);
if (!stats_cg->default_groups) {
pr_err("Unable to allocate memory for"
" stats_cg->default_groups\n");
iscsit_del_tiqn(tiqn);
return ERR_PTR(-ENOMEM);
}
stats_cg->default_groups[0] = &WWN_STAT_GRPS(tiqn)->iscsi_instance_group;
stats_cg->default_groups[1] = &WWN_STAT_GRPS(tiqn)->iscsi_sess_err_group;
stats_cg->default_groups[2] = &WWN_STAT_GRPS(tiqn)->iscsi_tgt_attr_group;
stats_cg->default_groups[3] = &WWN_STAT_GRPS(tiqn)->iscsi_login_stats_group;
stats_cg->default_groups[4] = &WWN_STAT_GRPS(tiqn)->iscsi_logout_stats_group;
stats_cg->default_groups[5] = NULL;
config_group_init_type_name(&WWN_STAT_GRPS(tiqn)->iscsi_instance_group,
"iscsi_instance", &iscsi_stat_instance_cit);
config_group_init_type_name(&WWN_STAT_GRPS(tiqn)->iscsi_sess_err_group,
"iscsi_sess_err", &iscsi_stat_sess_err_cit);
config_group_init_type_name(&WWN_STAT_GRPS(tiqn)->iscsi_tgt_attr_group,
"iscsi_tgt_attr", &iscsi_stat_tgt_attr_cit);
config_group_init_type_name(&WWN_STAT_GRPS(tiqn)->iscsi_login_stats_group,
"iscsi_login_stats", &iscsi_stat_login_cit);
config_group_init_type_name(&WWN_STAT_GRPS(tiqn)->iscsi_logout_stats_group,
"iscsi_logout_stats", &iscsi_stat_logout_cit);
pr_debug("LIO_Target_ConfigFS: REGISTER -> %s\n", tiqn->tiqn);
pr_debug("LIO_Target_ConfigFS: REGISTER -> Allocated Node:"
" %s\n", name);
return &tiqn->tiqn_wwn;
}
void lio_target_call_coredeltiqn(
struct se_wwn *wwn)
{
struct iscsi_tiqn *tiqn = container_of(wwn, struct iscsi_tiqn, tiqn_wwn);
struct config_item *df_item;
struct config_group *stats_cg;
int i;
stats_cg = &tiqn->tiqn_wwn.fabric_stat_group;
for (i = 0; stats_cg->default_groups[i]; i++) {
df_item = &stats_cg->default_groups[i]->cg_item;
stats_cg->default_groups[i] = NULL;
config_item_put(df_item);
}
kfree(stats_cg->default_groups);
pr_debug("LIO_Target_ConfigFS: DEREGISTER -> %s\n",
tiqn->tiqn);
iscsit_del_tiqn(tiqn);
}
/* End LIO-Target TIQN struct contig_lio_target_cit */
/* Start lio_target_discovery_auth_cit */
#define DEF_DISC_AUTH_STR(name, flags) \
__DEF_NACL_AUTH_STR(disc, name, flags) \
static ssize_t iscsi_disc_show_##name( \
struct target_fabric_configfs *tf, \
char *page) \
{ \
return __iscsi_disc_show_##name(&iscsit_global->discovery_acl, \
page); \
} \
static ssize_t iscsi_disc_store_##name( \
struct target_fabric_configfs *tf, \
const char *page, \
size_t count) \
{ \
return __iscsi_disc_store_##name(&iscsit_global->discovery_acl, \
page, count); \
}
#define DEF_DISC_AUTH_INT(name) \
__DEF_NACL_AUTH_INT(disc, name) \
static ssize_t iscsi_disc_show_##name( \
struct target_fabric_configfs *tf, \
char *page) \
{ \
return __iscsi_disc_show_##name(&iscsit_global->discovery_acl, \
page); \
}
#define DISC_AUTH_ATTR(_name, _mode) TF_DISC_ATTR(iscsi, _name, _mode)
#define DISC_AUTH_ATTR_RO(_name) TF_DISC_ATTR_RO(iscsi, _name)
/*
* One-way authentication userid
*/
DEF_DISC_AUTH_STR(userid, NAF_USERID_SET);
DISC_AUTH_ATTR(userid, S_IRUGO | S_IWUSR);
/*
* One-way authentication password
*/
DEF_DISC_AUTH_STR(password, NAF_PASSWORD_SET);
DISC_AUTH_ATTR(password, S_IRUGO | S_IWUSR);
/*
* Enforce mutual authentication
*/
DEF_DISC_AUTH_INT(authenticate_target);
DISC_AUTH_ATTR_RO(authenticate_target);
/*
* Mutual authentication userid
*/
DEF_DISC_AUTH_STR(userid_mutual, NAF_USERID_IN_SET);
DISC_AUTH_ATTR(userid_mutual, S_IRUGO | S_IWUSR);
/*
* Mutual authentication password
*/
DEF_DISC_AUTH_STR(password_mutual, NAF_PASSWORD_IN_SET);
DISC_AUTH_ATTR(password_mutual, S_IRUGO | S_IWUSR);
/*
* enforce_discovery_auth
*/
static ssize_t iscsi_disc_show_enforce_discovery_auth(
struct target_fabric_configfs *tf,
char *page)
{
struct iscsi_node_auth *discovery_auth = &iscsit_global->discovery_acl.node_auth;
return sprintf(page, "%d\n", discovery_auth->enforce_discovery_auth);
}
static ssize_t iscsi_disc_store_enforce_discovery_auth(
struct target_fabric_configfs *tf,
const char *page,
size_t count)
{
struct iscsi_param *param;
struct iscsi_portal_group *discovery_tpg = iscsit_global->discovery_tpg;
char *endptr;
u32 op;
op = simple_strtoul(page, &endptr, 0);
if ((op != 1) && (op != 0)) {
pr_err("Illegal value for enforce_discovery_auth:"
" %u\n", op);
return -EINVAL;
}
if (!discovery_tpg) {
pr_err("iscsit_global->discovery_tpg is NULL\n");
return -EINVAL;
}
param = iscsi_find_param_from_key(AUTHMETHOD,
discovery_tpg->param_list);
if (!param)
return -EINVAL;
if (op) {
/*
* Reset the AuthMethod key to CHAP.
*/
if (iscsi_update_param_value(param, CHAP) < 0)
return -EINVAL;
discovery_tpg->tpg_attrib.authentication = 1;
iscsit_global->discovery_acl.node_auth.enforce_discovery_auth = 1;
pr_debug("LIO-CORE[0] Successfully enabled"
" authentication enforcement for iSCSI"
" Discovery TPG\n");
} else {
/*
* Reset the AuthMethod key to CHAP,None
*/
if (iscsi_update_param_value(param, "CHAP,None") < 0)
return -EINVAL;
discovery_tpg->tpg_attrib.authentication = 0;
iscsit_global->discovery_acl.node_auth.enforce_discovery_auth = 0;
pr_debug("LIO-CORE[0] Successfully disabled"
" authentication enforcement for iSCSI"
" Discovery TPG\n");
}
return count;
}
DISC_AUTH_ATTR(enforce_discovery_auth, S_IRUGO | S_IWUSR);
static struct configfs_attribute *lio_target_discovery_auth_attrs[] = {
&iscsi_disc_userid.attr,
&iscsi_disc_password.attr,
&iscsi_disc_authenticate_target.attr,
&iscsi_disc_userid_mutual.attr,
&iscsi_disc_password_mutual.attr,
&iscsi_disc_enforce_discovery_auth.attr,
NULL,
};
/* End lio_target_discovery_auth_cit */
/* Start functions for target_core_fabric_ops */
static char *iscsi_get_fabric_name(void)
{
return "iSCSI";
}
static u32 iscsi_get_task_tag(struct se_cmd *se_cmd)
{
struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);
return cmd->init_task_tag;
}
static int iscsi_get_cmd_state(struct se_cmd *se_cmd)
{
struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);
return cmd->i_state;
}
static u32 lio_sess_get_index(struct se_session *se_sess)
{
struct iscsi_session *sess = se_sess->fabric_sess_ptr;
return sess->session_index;
}
static u32 lio_sess_get_initiator_sid(
struct se_session *se_sess,
unsigned char *buf,
u32 size)
{
struct iscsi_session *sess = se_sess->fabric_sess_ptr;
/*
* iSCSI Initiator Session Identifier from RFC-3720.
*/
return snprintf(buf, size, "%02x%02x%02x%02x%02x%02x",
sess->isid[0], sess->isid[1], sess->isid[2],
sess->isid[3], sess->isid[4], sess->isid[5]);
}
static int lio_queue_data_in(struct se_cmd *se_cmd)
{
struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);
cmd->i_state = ISTATE_SEND_DATAIN;
iscsit_add_cmd_to_response_queue(cmd, cmd->conn, cmd->i_state);
return 0;
}
static int lio_write_pending(struct se_cmd *se_cmd)
{
struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);
if (!cmd->immediate_data && !cmd->unsolicited_data)
return iscsit_build_r2ts_for_cmd(cmd, cmd->conn, 1);
return 0;
}
static int lio_write_pending_status(struct se_cmd *se_cmd)
{
struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);
int ret;
spin_lock_bh(&cmd->istate_lock);
ret = !(cmd->cmd_flags & ICF_GOT_LAST_DATAOUT);
spin_unlock_bh(&cmd->istate_lock);
return ret;
}
static int lio_queue_status(struct se_cmd *se_cmd)
{
struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);
cmd->i_state = ISTATE_SEND_STATUS;
iscsit_add_cmd_to_response_queue(cmd, cmd->conn, cmd->i_state);
return 0;
}
static u16 lio_set_fabric_sense_len(struct se_cmd *se_cmd, u32 sense_length)
{
unsigned char *buffer = se_cmd->sense_buffer;
/*
* From RFC-3720 10.4.7. Data Segment - Sense and Response Data Segment
* 16-bit SenseLength.
*/
buffer[0] = ((sense_length >> 8) & 0xff);
buffer[1] = (sense_length & 0xff);
/*
* Return two byte offset into allocated sense_buffer.
*/
return 2;
}
static u16 lio_get_fabric_sense_len(void)
{
/*
* Return two byte offset into allocated sense_buffer.
*/
return 2;
}
static int lio_queue_tm_rsp(struct se_cmd *se_cmd)
{
struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);
cmd->i_state = ISTATE_SEND_TASKMGTRSP;
iscsit_add_cmd_to_response_queue(cmd, cmd->conn, cmd->i_state);
return 0;
}
static char *lio_tpg_get_endpoint_wwn(struct se_portal_group *se_tpg)
{
struct iscsi_portal_group *tpg = se_tpg->se_tpg_fabric_ptr;
return &tpg->tpg_tiqn->tiqn[0];
}
static u16 lio_tpg_get_tag(struct se_portal_group *se_tpg)
{
struct iscsi_portal_group *tpg = se_tpg->se_tpg_fabric_ptr;
return tpg->tpgt;
}
static u32 lio_tpg_get_default_depth(struct se_portal_group *se_tpg)
{
struct iscsi_portal_group *tpg = se_tpg->se_tpg_fabric_ptr;
return ISCSI_TPG_ATTRIB(tpg)->default_cmdsn_depth;
}
static int lio_tpg_check_demo_mode(struct se_portal_group *se_tpg)
{
struct iscsi_portal_group *tpg = se_tpg->se_tpg_fabric_ptr;
return ISCSI_TPG_ATTRIB(tpg)->generate_node_acls;
}
static int lio_tpg_check_demo_mode_cache(struct se_portal_group *se_tpg)
{
struct iscsi_portal_group *tpg = se_tpg->se_tpg_fabric_ptr;
return ISCSI_TPG_ATTRIB(tpg)->cache_dynamic_acls;
}
static int lio_tpg_check_demo_mode_write_protect(
struct se_portal_group *se_tpg)
{
struct iscsi_portal_group *tpg = se_tpg->se_tpg_fabric_ptr;
return ISCSI_TPG_ATTRIB(tpg)->demo_mode_write_protect;
}
static int lio_tpg_check_prod_mode_write_protect(
struct se_portal_group *se_tpg)
{
struct iscsi_portal_group *tpg = se_tpg->se_tpg_fabric_ptr;
return ISCSI_TPG_ATTRIB(tpg)->prod_mode_write_protect;
}
static void lio_tpg_release_fabric_acl(
struct se_portal_group *se_tpg,
struct se_node_acl *se_acl)
{
struct iscsi_node_acl *acl = container_of(se_acl,
struct iscsi_node_acl, se_node_acl);
kfree(acl);
}
/*
* Called with spin_lock_bh(struct se_portal_group->session_lock) held..
*
* Also, this function calls iscsit_inc_session_usage_count() on the
* struct iscsi_session in question.
*/
static int lio_tpg_shutdown_session(struct se_session *se_sess)
{
struct iscsi_session *sess = se_sess->fabric_sess_ptr;
spin_lock(&sess->conn_lock);
if (atomic_read(&sess->session_fall_back_to_erl0) ||
atomic_read(&sess->session_logout) ||
(sess->time2retain_timer_flags & ISCSI_TF_EXPIRED)) {
spin_unlock(&sess->conn_lock);
return 0;
}
atomic_set(&sess->session_reinstatement, 1);
spin_unlock(&sess->conn_lock);
iscsit_stop_time2retain_timer(sess);
iscsit_stop_session(sess, 1, 1);
return 1;
}
/*
* Calls iscsit_dec_session_usage_count() as inverse of
* lio_tpg_shutdown_session()
*/
static void lio_tpg_close_session(struct se_session *se_sess)
{
struct iscsi_session *sess = se_sess->fabric_sess_ptr;
/*
* If the iSCSI Session for the iSCSI Initiator Node exists,
* forcefully shutdown the iSCSI NEXUS.
*/
iscsit_close_session(sess);
}
static u32 lio_tpg_get_inst_index(struct se_portal_group *se_tpg)
{
struct iscsi_portal_group *tpg = se_tpg->se_tpg_fabric_ptr;
return tpg->tpg_tiqn->tiqn_index;
}
static void lio_set_default_node_attributes(struct se_node_acl *se_acl)
{
struct iscsi_node_acl *acl = container_of(se_acl, struct iscsi_node_acl,
se_node_acl);
ISCSI_NODE_ATTRIB(acl)->nacl = acl;
iscsit_set_default_node_attribues(acl);
}
static void lio_release_cmd(struct se_cmd *se_cmd)
{
struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);
iscsit_release_cmd(cmd);
}
/* End functions for target_core_fabric_ops */
int iscsi_target_register_configfs(void)
{
struct target_fabric_configfs *fabric;
int ret;
lio_target_fabric_configfs = NULL;
fabric = target_fabric_configfs_init(THIS_MODULE, "iscsi");
if (IS_ERR(fabric)) {
pr_err("target_fabric_configfs_init() for"
" LIO-Target failed!\n");
return PTR_ERR(fabric);
}
/*
* Setup the fabric API of function pointers used by target_core_mod..
*/
fabric->tf_ops.get_fabric_name = &iscsi_get_fabric_name;
fabric->tf_ops.get_fabric_proto_ident = &iscsi_get_fabric_proto_ident;
fabric->tf_ops.tpg_get_wwn = &lio_tpg_get_endpoint_wwn;
fabric->tf_ops.tpg_get_tag = &lio_tpg_get_tag;
fabric->tf_ops.tpg_get_default_depth = &lio_tpg_get_default_depth;
fabric->tf_ops.tpg_get_pr_transport_id = &iscsi_get_pr_transport_id;
fabric->tf_ops.tpg_get_pr_transport_id_len =
&iscsi_get_pr_transport_id_len;
fabric->tf_ops.tpg_parse_pr_out_transport_id =
&iscsi_parse_pr_out_transport_id;
fabric->tf_ops.tpg_check_demo_mode = &lio_tpg_check_demo_mode;
fabric->tf_ops.tpg_check_demo_mode_cache =
&lio_tpg_check_demo_mode_cache;
fabric->tf_ops.tpg_check_demo_mode_write_protect =
&lio_tpg_check_demo_mode_write_protect;
fabric->tf_ops.tpg_check_prod_mode_write_protect =
&lio_tpg_check_prod_mode_write_protect;
fabric->tf_ops.tpg_alloc_fabric_acl = &lio_tpg_alloc_fabric_acl;
fabric->tf_ops.tpg_release_fabric_acl = &lio_tpg_release_fabric_acl;
fabric->tf_ops.tpg_get_inst_index = &lio_tpg_get_inst_index;
fabric->tf_ops.release_cmd = &lio_release_cmd;
fabric->tf_ops.shutdown_session = &lio_tpg_shutdown_session;
fabric->tf_ops.close_session = &lio_tpg_close_session;
fabric->tf_ops.sess_get_index = &lio_sess_get_index;
fabric->tf_ops.sess_get_initiator_sid = &lio_sess_get_initiator_sid;
fabric->tf_ops.write_pending = &lio_write_pending;
fabric->tf_ops.write_pending_status = &lio_write_pending_status;
fabric->tf_ops.set_default_node_attributes =
&lio_set_default_node_attributes;
fabric->tf_ops.get_task_tag = &iscsi_get_task_tag;
fabric->tf_ops.get_cmd_state = &iscsi_get_cmd_state;
fabric->tf_ops.queue_data_in = &lio_queue_data_in;
fabric->tf_ops.queue_status = &lio_queue_status;
fabric->tf_ops.queue_tm_rsp = &lio_queue_tm_rsp;
fabric->tf_ops.set_fabric_sense_len = &lio_set_fabric_sense_len;
fabric->tf_ops.get_fabric_sense_len = &lio_get_fabric_sense_len;
/*
* Setup function pointers for generic logic in target_core_fabric_configfs.c
*/
fabric->tf_ops.fabric_make_wwn = &lio_target_call_coreaddtiqn;
fabric->tf_ops.fabric_drop_wwn = &lio_target_call_coredeltiqn;
fabric->tf_ops.fabric_make_tpg = &lio_target_tiqn_addtpg;
fabric->tf_ops.fabric_drop_tpg = &lio_target_tiqn_deltpg;
fabric->tf_ops.fabric_post_link = NULL;
fabric->tf_ops.fabric_pre_unlink = NULL;
fabric->tf_ops.fabric_make_np = &lio_target_call_addnptotpg;
fabric->tf_ops.fabric_drop_np = &lio_target_call_delnpfromtpg;
fabric->tf_ops.fabric_make_nodeacl = &lio_target_make_nodeacl;
fabric->tf_ops.fabric_drop_nodeacl = &lio_target_drop_nodeacl;
/*
* Setup default attribute lists for various fabric->tf_cit_tmpl
* sturct config_item_type's
*/
TF_CIT_TMPL(fabric)->tfc_discovery_cit.ct_attrs = lio_target_discovery_auth_attrs;
TF_CIT_TMPL(fabric)->tfc_wwn_cit.ct_attrs = lio_target_wwn_attrs;
TF_CIT_TMPL(fabric)->tfc_tpg_base_cit.ct_attrs = lio_target_tpg_attrs;
TF_CIT_TMPL(fabric)->tfc_tpg_attrib_cit.ct_attrs = lio_target_tpg_attrib_attrs;
TF_CIT_TMPL(fabric)->tfc_tpg_param_cit.ct_attrs = lio_target_tpg_param_attrs;
TF_CIT_TMPL(fabric)->tfc_tpg_np_base_cit.ct_attrs = lio_target_portal_attrs;
TF_CIT_TMPL(fabric)->tfc_tpg_nacl_base_cit.ct_attrs = lio_target_initiator_attrs;
TF_CIT_TMPL(fabric)->tfc_tpg_nacl_attrib_cit.ct_attrs = lio_target_nacl_attrib_attrs;
TF_CIT_TMPL(fabric)->tfc_tpg_nacl_auth_cit.ct_attrs = lio_target_nacl_auth_attrs;
TF_CIT_TMPL(fabric)->tfc_tpg_nacl_param_cit.ct_attrs = lio_target_nacl_param_attrs;
ret = target_fabric_configfs_register(fabric);
if (ret < 0) {
pr_err("target_fabric_configfs_register() for"
" LIO-Target failed!\n");
target_fabric_configfs_free(fabric);
return ret;
}
lio_target_fabric_configfs = fabric;
pr_debug("LIO_TARGET[0] - Set fabric ->"
" lio_target_fabric_configfs\n");
return 0;
}
void iscsi_target_deregister_configfs(void)
{
if (!lio_target_fabric_configfs)
return;
/*
* Shutdown discovery sessions and disable discovery TPG
*/
if (iscsit_global->discovery_tpg)
iscsit_tpg_disable_portal_group(iscsit_global->discovery_tpg, 1);
target_fabric_configfs_deregister(lio_target_fabric_configfs);
lio_target_fabric_configfs = NULL;
pr_debug("LIO_TARGET[0] - Cleared"
" lio_target_fabric_configfs\n");
}
| gpl-2.0 |
gbtian/linux_mpip | arch/arm64/kvm/reset.c | 1686 | 2696 | /*
* Copyright (C) 2012,2013 - ARM Ltd
* Author: Marc Zyngier <marc.zyngier@arm.com>
*
* Derived from arch/arm/kvm/reset.c
* Copyright (C) 2012 - Virtual Open Systems and Columbia University
* Author: Christoffer Dall <c.dall@virtualopensystems.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, version 2, as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/errno.h>
#include <linux/kvm_host.h>
#include <linux/kvm.h>
#include <kvm/arm_arch_timer.h>
#include <asm/cputype.h>
#include <asm/ptrace.h>
#include <asm/kvm_arm.h>
#include <asm/kvm_coproc.h>
/*
* ARMv8 Reset Values
*/
static const struct kvm_regs default_regs_reset = {
.regs.pstate = (PSR_MODE_EL1h | PSR_A_BIT | PSR_I_BIT |
PSR_F_BIT | PSR_D_BIT),
};
static const struct kvm_regs default_regs_reset32 = {
.regs.pstate = (COMPAT_PSR_MODE_SVC | COMPAT_PSR_A_BIT |
COMPAT_PSR_I_BIT | COMPAT_PSR_F_BIT),
};
static const struct kvm_irq_level default_vtimer_irq = {
.irq = 27,
.level = 1,
};
static bool cpu_has_32bit_el1(void)
{
u64 pfr0;
pfr0 = read_cpuid(ID_AA64PFR0_EL1);
return !!(pfr0 & 0x20);
}
int kvm_arch_dev_ioctl_check_extension(long ext)
{
int r;
switch (ext) {
case KVM_CAP_ARM_EL1_32BIT:
r = cpu_has_32bit_el1();
break;
default:
r = 0;
}
return r;
}
/**
* kvm_reset_vcpu - sets core registers and sys_regs to reset value
* @vcpu: The VCPU pointer
*
* This function finds the right table above and sets the registers on
* the virtual CPU struct to their architectually defined reset
* values.
*/
int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
{
const struct kvm_irq_level *cpu_vtimer_irq;
const struct kvm_regs *cpu_reset;
switch (vcpu->arch.target) {
default:
if (test_bit(KVM_ARM_VCPU_EL1_32BIT, vcpu->arch.features)) {
if (!cpu_has_32bit_el1())
return -EINVAL;
cpu_reset = &default_regs_reset32;
vcpu->arch.hcr_el2 &= ~HCR_RW;
} else {
cpu_reset = &default_regs_reset;
}
cpu_vtimer_irq = &default_vtimer_irq;
break;
}
/* Reset core registers */
memcpy(vcpu_gp_regs(vcpu), cpu_reset, sizeof(*cpu_reset));
/* Reset system registers */
kvm_reset_sys_regs(vcpu);
/* Reset timer */
kvm_timer_vcpu_reset(vcpu, cpu_vtimer_irq);
return 0;
}
| gpl-2.0 |
imoseyon/leanKernel-shamu | arch/sparc/kernel/leon_pci_grpci1.c | 2454 | 19468 | /*
* leon_pci_grpci1.c: GRPCI1 Host PCI driver
*
* Copyright (C) 2013 Aeroflex Gaisler AB
*
* This GRPCI1 driver does not support PCI interrupts taken from
* GPIO pins. Interrupt generation at PCI parity and system error
* detection is by default turned off since some GRPCI1 cores does
* not support detection. It can be turned on from the bootloader
* using the all_pci_errors property.
*
* Contributors: Daniel Hellstrom <daniel@gaisler.com>
*/
#include <linux/of_device.h>
#include <linux/export.h>
#include <linux/kernel.h>
#include <linux/of_irq.h>
#include <linux/delay.h>
#include <linux/pci.h>
#include <asm/leon_pci.h>
#include <asm/sections.h>
#include <asm/vaddrs.h>
#include <asm/leon.h>
#include <asm/io.h>
#include "irq.h"
/* Enable/Disable Debugging Configuration Space Access */
#undef GRPCI1_DEBUG_CFGACCESS
/*
* GRPCI1 APB Register MAP
*/
struct grpci1_regs {
unsigned int cfg_stat; /* 0x00 Configuration / Status */
unsigned int bar0; /* 0x04 BAR0 (RO) */
unsigned int page0; /* 0x08 PAGE0 (RO) */
unsigned int bar1; /* 0x0C BAR1 (RO) */
unsigned int page1; /* 0x10 PAGE1 */
unsigned int iomap; /* 0x14 IO Map */
unsigned int stat_cmd; /* 0x18 PCI Status & Command (RO) */
unsigned int irq; /* 0x1C Interrupt register */
};
#define REGLOAD(a) (be32_to_cpu(__raw_readl(&(a))))
#define REGSTORE(a, v) (__raw_writel(cpu_to_be32(v), &(a)))
#define PAGE0_BTEN_BIT 0
#define PAGE0_BTEN (1 << PAGE0_BTEN_BIT)
#define CFGSTAT_HOST_BIT 13
#define CFGSTAT_CTO_BIT 8
#define CFGSTAT_HOST (1 << CFGSTAT_HOST_BIT)
#define CFGSTAT_CTO (1 << CFGSTAT_CTO_BIT)
#define IRQ_DPE (1 << 9)
#define IRQ_SSE (1 << 8)
#define IRQ_RMA (1 << 7)
#define IRQ_RTA (1 << 6)
#define IRQ_STA (1 << 5)
#define IRQ_DPED (1 << 4)
#define IRQ_INTD (1 << 3)
#define IRQ_INTC (1 << 2)
#define IRQ_INTB (1 << 1)
#define IRQ_INTA (1 << 0)
#define IRQ_DEF_ERRORS (IRQ_RMA | IRQ_RTA | IRQ_STA)
#define IRQ_ALL_ERRORS (IRQ_DPED | IRQ_DEF_ERRORS | IRQ_SSE | IRQ_DPE)
#define IRQ_INTX (IRQ_INTA | IRQ_INTB | IRQ_INTC | IRQ_INTD)
#define IRQ_MASK_BIT 16
#define DEF_PCI_ERRORS (PCI_STATUS_SIG_TARGET_ABORT | \
PCI_STATUS_REC_TARGET_ABORT | \
PCI_STATUS_REC_MASTER_ABORT)
#define ALL_PCI_ERRORS (PCI_STATUS_PARITY | PCI_STATUS_DETECTED_PARITY | \
PCI_STATUS_SIG_SYSTEM_ERROR | DEF_PCI_ERRORS)
#define TGT 256
struct grpci1_priv {
struct leon_pci_info info; /* must be on top of this structure */
struct grpci1_regs *regs; /* GRPCI register map */
struct device *dev;
int pci_err_mask; /* STATUS register error mask */
int irq; /* LEON irqctrl GRPCI IRQ */
unsigned char irq_map[4]; /* GRPCI nexus PCI INTX# IRQs */
unsigned int irq_err; /* GRPCI nexus Virt Error IRQ */
/* AHB PCI Windows */
unsigned long pci_area; /* MEMORY */
unsigned long pci_area_end;
unsigned long pci_io; /* I/O */
unsigned long pci_conf; /* CONFIGURATION */
unsigned long pci_conf_end;
unsigned long pci_io_va;
};
static struct grpci1_priv *grpci1priv;
static int grpci1_cfg_w32(struct grpci1_priv *priv, unsigned int bus,
unsigned int devfn, int where, u32 val);
int grpci1_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
struct grpci1_priv *priv = dev->bus->sysdata;
int irq_group;
/* Use default IRQ decoding on PCI BUS0 according slot numbering */
irq_group = slot & 0x3;
pin = ((pin - 1) + irq_group) & 0x3;
return priv->irq_map[pin];
}
static int grpci1_cfg_r32(struct grpci1_priv *priv, unsigned int bus,
unsigned int devfn, int where, u32 *val)
{
u32 *pci_conf, tmp, cfg;
if (where & 0x3)
return -EINVAL;
if (bus == 0) {
devfn += (0x8 * 6); /* start at AD16=Device0 */
} else if (bus == TGT) {
bus = 0;
devfn = 0; /* special case: bridge controller itself */
}
/* Select bus */
cfg = REGLOAD(priv->regs->cfg_stat);
REGSTORE(priv->regs->cfg_stat, (cfg & ~(0xf << 23)) | (bus << 23));
/* do read access */
pci_conf = (u32 *) (priv->pci_conf | (devfn << 8) | (where & 0xfc));
tmp = LEON3_BYPASS_LOAD_PA(pci_conf);
/* check if master abort was received */
if (REGLOAD(priv->regs->cfg_stat) & CFGSTAT_CTO) {
*val = 0xffffffff;
/* Clear Master abort bit in PCI cfg space (is set) */
tmp = REGLOAD(priv->regs->stat_cmd);
grpci1_cfg_w32(priv, TGT, 0, PCI_COMMAND, tmp);
} else {
/* Bus always little endian (unaffected by byte-swapping) */
*val = flip_dword(tmp);
}
return 0;
}
static int grpci1_cfg_r16(struct grpci1_priv *priv, unsigned int bus,
unsigned int devfn, int where, u32 *val)
{
u32 v;
int ret;
if (where & 0x1)
return -EINVAL;
ret = grpci1_cfg_r32(priv, bus, devfn, where & ~0x3, &v);
*val = 0xffff & (v >> (8 * (where & 0x3)));
return ret;
}
static int grpci1_cfg_r8(struct grpci1_priv *priv, unsigned int bus,
unsigned int devfn, int where, u32 *val)
{
u32 v;
int ret;
ret = grpci1_cfg_r32(priv, bus, devfn, where & ~0x3, &v);
*val = 0xff & (v >> (8 * (where & 3)));
return ret;
}
static int grpci1_cfg_w32(struct grpci1_priv *priv, unsigned int bus,
unsigned int devfn, int where, u32 val)
{
unsigned int *pci_conf;
u32 cfg;
if (where & 0x3)
return -EINVAL;
if (bus == 0) {
devfn += (0x8 * 6); /* start at AD16=Device0 */
} else if (bus == TGT) {
bus = 0;
devfn = 0; /* special case: bridge controller itself */
}
/* Select bus */
cfg = REGLOAD(priv->regs->cfg_stat);
REGSTORE(priv->regs->cfg_stat, (cfg & ~(0xf << 23)) | (bus << 23));
pci_conf = (unsigned int *) (priv->pci_conf |
(devfn << 8) | (where & 0xfc));
LEON3_BYPASS_STORE_PA(pci_conf, flip_dword(val));
return 0;
}
static int grpci1_cfg_w16(struct grpci1_priv *priv, unsigned int bus,
unsigned int devfn, int where, u32 val)
{
int ret;
u32 v;
if (where & 0x1)
return -EINVAL;
ret = grpci1_cfg_r32(priv, bus, devfn, where&~3, &v);
if (ret)
return ret;
v = (v & ~(0xffff << (8 * (where & 0x3)))) |
((0xffff & val) << (8 * (where & 0x3)));
return grpci1_cfg_w32(priv, bus, devfn, where & ~0x3, v);
}
static int grpci1_cfg_w8(struct grpci1_priv *priv, unsigned int bus,
unsigned int devfn, int where, u32 val)
{
int ret;
u32 v;
ret = grpci1_cfg_r32(priv, bus, devfn, where & ~0x3, &v);
if (ret != 0)
return ret;
v = (v & ~(0xff << (8 * (where & 0x3)))) |
((0xff & val) << (8 * (where & 0x3)));
return grpci1_cfg_w32(priv, bus, devfn, where & ~0x3, v);
}
/* Read from Configuration Space. When entering here the PCI layer has taken
* the pci_lock spinlock and IRQ is off.
*/
static int grpci1_read_config(struct pci_bus *bus, unsigned int devfn,
int where, int size, u32 *val)
{
struct grpci1_priv *priv = grpci1priv;
unsigned int busno = bus->number;
int ret;
if (PCI_SLOT(devfn) > 15 || busno > 15) {
*val = ~0;
return 0;
}
switch (size) {
case 1:
ret = grpci1_cfg_r8(priv, busno, devfn, where, val);
break;
case 2:
ret = grpci1_cfg_r16(priv, busno, devfn, where, val);
break;
case 4:
ret = grpci1_cfg_r32(priv, busno, devfn, where, val);
break;
default:
ret = -EINVAL;
break;
}
#ifdef GRPCI1_DEBUG_CFGACCESS
printk(KERN_INFO
"grpci1_read_config: [%02x:%02x:%x] ofs=%d val=%x size=%d\n",
busno, PCI_SLOT(devfn), PCI_FUNC(devfn), where, *val, size);
#endif
return ret;
}
/* Write to Configuration Space. When entering here the PCI layer has taken
* the pci_lock spinlock and IRQ is off.
*/
static int grpci1_write_config(struct pci_bus *bus, unsigned int devfn,
int where, int size, u32 val)
{
struct grpci1_priv *priv = grpci1priv;
unsigned int busno = bus->number;
if (PCI_SLOT(devfn) > 15 || busno > 15)
return 0;
#ifdef GRPCI1_DEBUG_CFGACCESS
printk(KERN_INFO
"grpci1_write_config: [%02x:%02x:%x] ofs=%d size=%d val=%x\n",
busno, PCI_SLOT(devfn), PCI_FUNC(devfn), where, size, val);
#endif
switch (size) {
default:
return -EINVAL;
case 1:
return grpci1_cfg_w8(priv, busno, devfn, where, val);
case 2:
return grpci1_cfg_w16(priv, busno, devfn, where, val);
case 4:
return grpci1_cfg_w32(priv, busno, devfn, where, val);
}
}
static struct pci_ops grpci1_ops = {
.read = grpci1_read_config,
.write = grpci1_write_config,
};
/* GENIRQ IRQ chip implementation for grpci1 irqmode=0..2. In configuration
* 3 where all PCI Interrupts has a separate IRQ on the system IRQ controller
* this is not needed and the standard IRQ controller can be used.
*/
static void grpci1_mask_irq(struct irq_data *data)
{
u32 irqidx;
struct grpci1_priv *priv = grpci1priv;
irqidx = (u32)data->chip_data - 1;
if (irqidx > 3) /* only mask PCI interrupts here */
return;
irqidx += IRQ_MASK_BIT;
REGSTORE(priv->regs->irq, REGLOAD(priv->regs->irq) & ~(1 << irqidx));
}
static void grpci1_unmask_irq(struct irq_data *data)
{
u32 irqidx;
struct grpci1_priv *priv = grpci1priv;
irqidx = (u32)data->chip_data - 1;
if (irqidx > 3) /* only unmask PCI interrupts here */
return;
irqidx += IRQ_MASK_BIT;
REGSTORE(priv->regs->irq, REGLOAD(priv->regs->irq) | (1 << irqidx));
}
static unsigned int grpci1_startup_irq(struct irq_data *data)
{
grpci1_unmask_irq(data);
return 0;
}
static void grpci1_shutdown_irq(struct irq_data *data)
{
grpci1_mask_irq(data);
}
static struct irq_chip grpci1_irq = {
.name = "grpci1",
.irq_startup = grpci1_startup_irq,
.irq_shutdown = grpci1_shutdown_irq,
.irq_mask = grpci1_mask_irq,
.irq_unmask = grpci1_unmask_irq,
};
/* Handle one or multiple IRQs from the PCI core */
static void grpci1_pci_flow_irq(unsigned int irq, struct irq_desc *desc)
{
struct grpci1_priv *priv = grpci1priv;
int i, ack = 0;
unsigned int irqreg;
irqreg = REGLOAD(priv->regs->irq);
irqreg = (irqreg >> IRQ_MASK_BIT) & irqreg;
/* Error Interrupt? */
if (irqreg & IRQ_ALL_ERRORS) {
generic_handle_irq(priv->irq_err);
ack = 1;
}
/* PCI Interrupt? */
if (irqreg & IRQ_INTX) {
/* Call respective PCI Interrupt handler */
for (i = 0; i < 4; i++) {
if (irqreg & (1 << i))
generic_handle_irq(priv->irq_map[i]);
}
ack = 1;
}
/*
* Call "first level" IRQ chip end-of-irq handler. It will ACK LEON IRQ
* Controller, this must be done after IRQ sources have been handled to
* avoid double IRQ generation
*/
if (ack)
desc->irq_data.chip->irq_eoi(&desc->irq_data);
}
/* Create a virtual IRQ */
static unsigned int grpci1_build_device_irq(unsigned int irq)
{
unsigned int virq = 0, pil;
pil = 1 << 8;
virq = irq_alloc(irq, pil);
if (virq == 0)
goto out;
irq_set_chip_and_handler_name(virq, &grpci1_irq, handle_simple_irq,
"pcilvl");
irq_set_chip_data(virq, (void *)irq);
out:
return virq;
}
/*
* Initialize mappings AMBA<->PCI, clear IRQ state, setup PCI interface
*
* Target BARs:
* BAR0: unused in this implementation
* BAR1: peripheral DMA to host's memory (size at least 256MByte)
* BAR2..BAR5: not implemented in hardware
*/
void grpci1_hw_init(struct grpci1_priv *priv)
{
u32 ahbadr, bar_sz, data, pciadr;
struct grpci1_regs *regs = priv->regs;
/* set 1:1 mapping between AHB -> PCI memory space */
REGSTORE(regs->cfg_stat, priv->pci_area & 0xf0000000);
/* map PCI accesses to target BAR1 to Linux kernel memory 1:1 */
ahbadr = 0xf0000000 & (u32)__pa(PAGE_ALIGN((unsigned long) &_end));
REGSTORE(regs->page1, ahbadr);
/* translate I/O accesses to 0, I/O Space always @ PCI low 64Kbytes */
REGSTORE(regs->iomap, REGLOAD(regs->iomap) & 0x0000ffff);
/* disable and clear pending interrupts */
REGSTORE(regs->irq, 0);
/* Setup BAR0 outside access range so that it does not conflict with
* peripheral DMA. There is no need to set up the PAGE0 register.
*/
grpci1_cfg_w32(priv, TGT, 0, PCI_BASE_ADDRESS_0, 0xffffffff);
grpci1_cfg_r32(priv, TGT, 0, PCI_BASE_ADDRESS_0, &bar_sz);
bar_sz = ~bar_sz + 1;
pciadr = priv->pci_area - bar_sz;
grpci1_cfg_w32(priv, TGT, 0, PCI_BASE_ADDRESS_0, pciadr);
/*
* Setup the Host's PCI Target BAR1 for other peripherals to access,
* and do DMA to the host's memory.
*/
grpci1_cfg_w32(priv, TGT, 0, PCI_BASE_ADDRESS_1, ahbadr);
/*
* Setup Latency Timer and cache line size. Default cache line
* size will result in poor performance (256 word fetches), 0xff
* will set it according to the max size of the PCI FIFO.
*/
grpci1_cfg_w8(priv, TGT, 0, PCI_CACHE_LINE_SIZE, 0xff);
grpci1_cfg_w8(priv, TGT, 0, PCI_LATENCY_TIMER, 0x40);
/* set as bus master, enable pci memory responses, clear status bits */
grpci1_cfg_r32(priv, TGT, 0, PCI_COMMAND, &data);
data |= (PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER);
grpci1_cfg_w32(priv, TGT, 0, PCI_COMMAND, data);
}
static irqreturn_t grpci1_jump_interrupt(int irq, void *arg)
{
struct grpci1_priv *priv = arg;
dev_err(priv->dev, "Jump IRQ happened\n");
return IRQ_NONE;
}
/* Handle GRPCI1 Error Interrupt */
static irqreturn_t grpci1_err_interrupt(int irq, void *arg)
{
struct grpci1_priv *priv = arg;
u32 status;
grpci1_cfg_r16(priv, TGT, 0, PCI_STATUS, &status);
status &= priv->pci_err_mask;
if (status == 0)
return IRQ_NONE;
if (status & PCI_STATUS_PARITY)
dev_err(priv->dev, "Data Parity Error\n");
if (status & PCI_STATUS_SIG_TARGET_ABORT)
dev_err(priv->dev, "Signalled Target Abort\n");
if (status & PCI_STATUS_REC_TARGET_ABORT)
dev_err(priv->dev, "Received Target Abort\n");
if (status & PCI_STATUS_REC_MASTER_ABORT)
dev_err(priv->dev, "Received Master Abort\n");
if (status & PCI_STATUS_SIG_SYSTEM_ERROR)
dev_err(priv->dev, "Signalled System Error\n");
if (status & PCI_STATUS_DETECTED_PARITY)
dev_err(priv->dev, "Parity Error\n");
/* Clear handled INT TYPE IRQs */
grpci1_cfg_w16(priv, TGT, 0, PCI_STATUS, status);
return IRQ_HANDLED;
}
static int grpci1_of_probe(struct platform_device *ofdev)
{
struct grpci1_regs *regs;
struct grpci1_priv *priv;
int err, len;
const int *tmp;
u32 cfg, size, err_mask;
struct resource *res;
if (grpci1priv) {
dev_err(&ofdev->dev, "only one GRPCI1 supported\n");
return -ENODEV;
}
if (ofdev->num_resources < 3) {
dev_err(&ofdev->dev, "not enough APB/AHB resources\n");
return -EIO;
}
priv = devm_kzalloc(&ofdev->dev, sizeof(*priv), GFP_KERNEL);
if (!priv) {
dev_err(&ofdev->dev, "memory allocation failed\n");
return -ENOMEM;
}
platform_set_drvdata(ofdev, priv);
priv->dev = &ofdev->dev;
/* find device register base address */
res = platform_get_resource(ofdev, IORESOURCE_MEM, 0);
regs = devm_ioremap_resource(&ofdev->dev, res);
if (IS_ERR(regs))
return PTR_ERR(regs);
/*
* check that we're in Host Slot and that we can act as a Host Bridge
* and not only as target/peripheral.
*/
cfg = REGLOAD(regs->cfg_stat);
if ((cfg & CFGSTAT_HOST) == 0) {
dev_err(&ofdev->dev, "not in host system slot\n");
return -EIO;
}
/* check that BAR1 support 256 MByte so that we can map kernel space */
REGSTORE(regs->page1, 0xffffffff);
size = ~REGLOAD(regs->page1) + 1;
if (size < 0x10000000) {
dev_err(&ofdev->dev, "BAR1 must be at least 256MByte\n");
return -EIO;
}
/* hardware must support little-endian PCI (byte-twisting) */
if ((REGLOAD(regs->page0) & PAGE0_BTEN) == 0) {
dev_err(&ofdev->dev, "byte-twisting is required\n");
return -EIO;
}
priv->regs = regs;
priv->irq = irq_of_parse_and_map(ofdev->dev.of_node, 0);
dev_info(&ofdev->dev, "host found at 0x%p, irq%d\n", regs, priv->irq);
/* Find PCI Memory, I/O and Configuration Space Windows */
priv->pci_area = ofdev->resource[1].start;
priv->pci_area_end = ofdev->resource[1].end+1;
priv->pci_io = ofdev->resource[2].start;
priv->pci_conf = ofdev->resource[2].start + 0x10000;
priv->pci_conf_end = priv->pci_conf + 0x10000;
priv->pci_io_va = (unsigned long)ioremap(priv->pci_io, 0x10000);
if (!priv->pci_io_va) {
dev_err(&ofdev->dev, "unable to map PCI I/O area\n");
return -EIO;
}
printk(KERN_INFO
"GRPCI1: MEMORY SPACE [0x%08lx - 0x%08lx]\n"
" I/O SPACE [0x%08lx - 0x%08lx]\n"
" CONFIG SPACE [0x%08lx - 0x%08lx]\n",
priv->pci_area, priv->pci_area_end-1,
priv->pci_io, priv->pci_conf-1,
priv->pci_conf, priv->pci_conf_end-1);
/*
* I/O Space resources in I/O Window mapped into Virtual Adr Space
* We never use low 4KB because some devices seem have problems using
* address 0.
*/
priv->info.io_space.name = "GRPCI1 PCI I/O Space";
priv->info.io_space.start = priv->pci_io_va + 0x1000;
priv->info.io_space.end = priv->pci_io_va + 0x10000 - 1;
priv->info.io_space.flags = IORESOURCE_IO;
/*
* grpci1 has no prefetchable memory, map everything as
* non-prefetchable memory
*/
priv->info.mem_space.name = "GRPCI1 PCI MEM Space";
priv->info.mem_space.start = priv->pci_area;
priv->info.mem_space.end = priv->pci_area_end - 1;
priv->info.mem_space.flags = IORESOURCE_MEM;
if (request_resource(&iomem_resource, &priv->info.mem_space) < 0) {
dev_err(&ofdev->dev, "unable to request PCI memory area\n");
err = -ENOMEM;
goto err1;
}
if (request_resource(&ioport_resource, &priv->info.io_space) < 0) {
dev_err(&ofdev->dev, "unable to request PCI I/O area\n");
err = -ENOMEM;
goto err2;
}
/* setup maximum supported PCI buses */
priv->info.busn.name = "GRPCI1 busn";
priv->info.busn.start = 0;
priv->info.busn.end = 15;
grpci1priv = priv;
/* Initialize hardware */
grpci1_hw_init(priv);
/*
* Get PCI Interrupt to System IRQ mapping and setup IRQ handling
* Error IRQ. All PCI and PCI-Error interrupts are shared using the
* same system IRQ.
*/
leon_update_virq_handling(priv->irq, grpci1_pci_flow_irq, "pcilvl", 0);
priv->irq_map[0] = grpci1_build_device_irq(1);
priv->irq_map[1] = grpci1_build_device_irq(2);
priv->irq_map[2] = grpci1_build_device_irq(3);
priv->irq_map[3] = grpci1_build_device_irq(4);
priv->irq_err = grpci1_build_device_irq(5);
printk(KERN_INFO " PCI INTA..D#: IRQ%d, IRQ%d, IRQ%d, IRQ%d\n",
priv->irq_map[0], priv->irq_map[1], priv->irq_map[2],
priv->irq_map[3]);
/* Enable IRQs on LEON IRQ controller */
err = devm_request_irq(&ofdev->dev, priv->irq, grpci1_jump_interrupt, 0,
"GRPCI1_JUMP", priv);
if (err) {
dev_err(&ofdev->dev, "ERR IRQ request failed: %d\n", err);
goto err3;
}
/* Setup IRQ handler for access errors */
err = devm_request_irq(&ofdev->dev, priv->irq_err,
grpci1_err_interrupt, IRQF_SHARED, "GRPCI1_ERR",
priv);
if (err) {
dev_err(&ofdev->dev, "ERR VIRQ request failed: %d\n", err);
goto err3;
}
tmp = of_get_property(ofdev->dev.of_node, "all_pci_errors", &len);
if (tmp && (len == 4)) {
priv->pci_err_mask = ALL_PCI_ERRORS;
err_mask = IRQ_ALL_ERRORS << IRQ_MASK_BIT;
} else {
priv->pci_err_mask = DEF_PCI_ERRORS;
err_mask = IRQ_DEF_ERRORS << IRQ_MASK_BIT;
}
/*
* Enable Error Interrupts. PCI interrupts are unmasked once request_irq
* is called by the PCI Device drivers
*/
REGSTORE(regs->irq, err_mask);
/* Init common layer and scan buses */
priv->info.ops = &grpci1_ops;
priv->info.map_irq = grpci1_map_irq;
leon_pci_init(ofdev, &priv->info);
return 0;
err3:
release_resource(&priv->info.io_space);
err2:
release_resource(&priv->info.mem_space);
err1:
iounmap((void *)priv->pci_io_va);
grpci1priv = NULL;
return err;
}
static struct of_device_id grpci1_of_match[] = {
{
.name = "GAISLER_PCIFBRG",
},
{
.name = "01_014",
},
{},
};
static struct platform_driver grpci1_of_driver = {
.driver = {
.name = "grpci1",
.owner = THIS_MODULE,
.of_match_table = grpci1_of_match,
},
.probe = grpci1_of_probe,
};
static int __init grpci1_init(void)
{
return platform_driver_register(&grpci1_of_driver);
}
subsys_initcall(grpci1_init);
| gpl-2.0 |
MpApQ/kernel_huawei | drivers/ide/at91_ide.c | 3990 | 10514 | /*
* IDE host driver for AT91 (SAM9, CAP9, AT572D940HF) Static Memory Controller
* with Compact Flash True IDE logic
*
* Copyright (c) 2008, 2009 Kelvatek Ltd.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/ide.h>
#include <linux/platform_device.h>
#include <mach/board.h>
#include <mach/gpio.h>
#include <mach/at91sam9_smc.h>
#define DRV_NAME "at91_ide"
#define perr(fmt, args...) pr_err(DRV_NAME ": " fmt, ##args)
#define pdbg(fmt, args...) pr_debug("%s " fmt, __func__, ##args)
/*
* Access to IDE device is possible through EBI Static Memory Controller
* with Compact Flash logic. For details see EBI and SMC datasheet sections
* of any microcontroller from AT91SAM9 family.
*
* Within SMC chip select address space, lines A[23:21] distinguish Compact
* Flash modes (I/O, common memory, attribute memory, True IDE). IDE modes are:
* 0x00c0000 - True IDE
* 0x00e0000 - Alternate True IDE (Alt Status Register)
*
* On True IDE mode Task File and Data Register are mapped at the same address.
* To distinguish access between these two different bus data width is used:
* 8Bit for Task File, 16Bit for Data I/O.
*
* After initialization we do 8/16 bit flipping (changes in SMC MODE register)
* only inside IDE callback routines which are serialized by IDE layer,
* so no additional locking needed.
*/
#define TASK_FILE 0x00c00000
#define ALT_MODE 0x00e00000
#define REGS_SIZE 8
#define enter_16bit(cs, mode) do { \
mode = at91_sys_read(AT91_SMC_MODE(cs)); \
at91_sys_write(AT91_SMC_MODE(cs), mode | AT91_SMC_DBW_16); \
} while (0)
#define leave_16bit(cs, mode) at91_sys_write(AT91_SMC_MODE(cs), mode);
static void set_smc_timings(const u8 chipselect, const u16 cycle,
const u16 setup, const u16 pulse,
const u16 data_float, int use_iordy)
{
unsigned long mode = AT91_SMC_READMODE | AT91_SMC_WRITEMODE |
AT91_SMC_BAT_SELECT;
/* disable or enable waiting for IORDY signal */
if (use_iordy)
mode |= AT91_SMC_EXNWMODE_READY;
/* add data float cycles if needed */
if (data_float)
mode |= AT91_SMC_TDF_(data_float);
at91_sys_write(AT91_SMC_MODE(chipselect), mode);
/* setup timings in SMC */
at91_sys_write(AT91_SMC_SETUP(chipselect), AT91_SMC_NWESETUP_(setup) |
AT91_SMC_NCS_WRSETUP_(0) |
AT91_SMC_NRDSETUP_(setup) |
AT91_SMC_NCS_RDSETUP_(0));
at91_sys_write(AT91_SMC_PULSE(chipselect), AT91_SMC_NWEPULSE_(pulse) |
AT91_SMC_NCS_WRPULSE_(cycle) |
AT91_SMC_NRDPULSE_(pulse) |
AT91_SMC_NCS_RDPULSE_(cycle));
at91_sys_write(AT91_SMC_CYCLE(chipselect), AT91_SMC_NWECYCLE_(cycle) |
AT91_SMC_NRDCYCLE_(cycle));
}
static unsigned int calc_mck_cycles(unsigned int ns, unsigned int mck_hz)
{
u64 tmp = ns;
tmp *= mck_hz;
tmp += 1000*1000*1000 - 1; /* round up */
do_div(tmp, 1000*1000*1000);
return (unsigned int) tmp;
}
static void apply_timings(const u8 chipselect, const u8 pio,
const struct ide_timing *timing, int use_iordy)
{
unsigned int t0, t1, t2, t6z;
unsigned int cycle, setup, pulse, data_float;
unsigned int mck_hz;
struct clk *mck;
/* see table 22 of Compact Flash standard 4.1 for the meaning,
* we do not stretch active (t2) time, so setup (t1) + hold time (th)
* assure at least minimal recovery (t2i) time */
t0 = timing->cyc8b;
t1 = timing->setup;
t2 = timing->act8b;
t6z = (pio < 5) ? 30 : 20;
pdbg("t0=%u t1=%u t2=%u t6z=%u\n", t0, t1, t2, t6z);
mck = clk_get(NULL, "mck");
BUG_ON(IS_ERR(mck));
mck_hz = clk_get_rate(mck);
pdbg("mck_hz=%u\n", mck_hz);
cycle = calc_mck_cycles(t0, mck_hz);
setup = calc_mck_cycles(t1, mck_hz);
pulse = calc_mck_cycles(t2, mck_hz);
data_float = calc_mck_cycles(t6z, mck_hz);
pdbg("cycle=%u setup=%u pulse=%u data_float=%u\n",
cycle, setup, pulse, data_float);
set_smc_timings(chipselect, cycle, setup, pulse, data_float, use_iordy);
}
static void at91_ide_input_data(ide_drive_t *drive, struct ide_cmd *cmd,
void *buf, unsigned int len)
{
ide_hwif_t *hwif = drive->hwif;
struct ide_io_ports *io_ports = &hwif->io_ports;
u8 chipselect = hwif->select_data;
unsigned long mode;
pdbg("cs %u buf %p len %d\n", chipselect, buf, len);
len++;
enter_16bit(chipselect, mode);
readsw((void __iomem *)io_ports->data_addr, buf, len / 2);
leave_16bit(chipselect, mode);
}
static void at91_ide_output_data(ide_drive_t *drive, struct ide_cmd *cmd,
void *buf, unsigned int len)
{
ide_hwif_t *hwif = drive->hwif;
struct ide_io_ports *io_ports = &hwif->io_ports;
u8 chipselect = hwif->select_data;
unsigned long mode;
pdbg("cs %u buf %p len %d\n", chipselect, buf, len);
enter_16bit(chipselect, mode);
writesw((void __iomem *)io_ports->data_addr, buf, len / 2);
leave_16bit(chipselect, mode);
}
static void at91_ide_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
{
struct ide_timing *timing;
u8 chipselect = hwif->select_data;
int use_iordy = 0;
const u8 pio = drive->pio_mode - XFER_PIO_0;
pdbg("chipselect %u pio %u\n", chipselect, pio);
timing = ide_timing_find_mode(XFER_PIO_0 + pio);
BUG_ON(!timing);
if (ide_pio_need_iordy(drive, pio))
use_iordy = 1;
apply_timings(chipselect, pio, timing, use_iordy);
}
static const struct ide_tp_ops at91_ide_tp_ops = {
.exec_command = ide_exec_command,
.read_status = ide_read_status,
.read_altstatus = ide_read_altstatus,
.write_devctl = ide_write_devctl,
.dev_select = ide_dev_select,
.tf_load = ide_tf_load,
.tf_read = ide_tf_read,
.input_data = at91_ide_input_data,
.output_data = at91_ide_output_data,
};
static const struct ide_port_ops at91_ide_port_ops = {
.set_pio_mode = at91_ide_set_pio_mode,
};
static const struct ide_port_info at91_ide_port_info __initdata = {
.port_ops = &at91_ide_port_ops,
.tp_ops = &at91_ide_tp_ops,
.host_flags = IDE_HFLAG_MMIO | IDE_HFLAG_NO_DMA | IDE_HFLAG_SINGLE |
IDE_HFLAG_NO_IO_32BIT | IDE_HFLAG_UNMASK_IRQS,
.pio_mask = ATA_PIO6,
.chipset = ide_generic,
};
/*
* If interrupt is delivered through GPIO, IRQ are triggered on falling
* and rising edge of signal. Whereas IDE device request interrupt on high
* level (rising edge in our case). This mean we have fake interrupts, so
* we need to check interrupt pin and exit instantly from ISR when line
* is on low level.
*/
irqreturn_t at91_irq_handler(int irq, void *dev_id)
{
int ntries = 8;
int pin_val1, pin_val2;
/* additional deglitch, line can be noisy in badly designed PCB */
do {
pin_val1 = at91_get_gpio_value(irq);
pin_val2 = at91_get_gpio_value(irq);
} while (pin_val1 != pin_val2 && --ntries > 0);
if (pin_val1 == 0 || ntries <= 0)
return IRQ_HANDLED;
return ide_intr(irq, dev_id);
}
static int __init at91_ide_probe(struct platform_device *pdev)
{
int ret;
struct ide_hw hw, *hws[] = { &hw };
struct ide_host *host;
struct resource *res;
unsigned long tf_base = 0, ctl_base = 0;
struct at91_cf_data *board = pdev->dev.platform_data;
if (!board)
return -ENODEV;
if (board->det_pin && at91_get_gpio_value(board->det_pin) != 0) {
perr("no device detected\n");
return -ENODEV;
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
perr("can't get memory resource\n");
return -ENODEV;
}
if (!devm_request_mem_region(&pdev->dev, res->start + TASK_FILE,
REGS_SIZE, "ide") ||
!devm_request_mem_region(&pdev->dev, res->start + ALT_MODE,
REGS_SIZE, "alt")) {
perr("memory resources in use\n");
return -EBUSY;
}
pdbg("chipselect %u irq %u res %08lx\n", board->chipselect,
board->irq_pin, (unsigned long) res->start);
tf_base = (unsigned long) devm_ioremap(&pdev->dev, res->start + TASK_FILE,
REGS_SIZE);
ctl_base = (unsigned long) devm_ioremap(&pdev->dev, res->start + ALT_MODE,
REGS_SIZE);
if (!tf_base || !ctl_base) {
perr("can't map memory regions\n");
return -EBUSY;
}
memset(&hw, 0, sizeof(hw));
if (board->flags & AT91_IDE_SWAP_A0_A2) {
/* workaround for stupid hardware bug */
hw.io_ports.data_addr = tf_base + 0;
hw.io_ports.error_addr = tf_base + 4;
hw.io_ports.nsect_addr = tf_base + 2;
hw.io_ports.lbal_addr = tf_base + 6;
hw.io_ports.lbam_addr = tf_base + 1;
hw.io_ports.lbah_addr = tf_base + 5;
hw.io_ports.device_addr = tf_base + 3;
hw.io_ports.command_addr = tf_base + 7;
hw.io_ports.ctl_addr = ctl_base + 3;
} else
ide_std_init_ports(&hw, tf_base, ctl_base + 6);
hw.irq = board->irq_pin;
hw.dev = &pdev->dev;
host = ide_host_alloc(&at91_ide_port_info, hws, 1);
if (!host) {
perr("failed to allocate ide host\n");
return -ENOMEM;
}
/* setup Static Memory Controller - PIO 0 as default */
apply_timings(board->chipselect, 0, ide_timing_find_mode(XFER_PIO_0), 0);
/* with GPIO interrupt we have to do quirks in handler */
if (board->irq_pin >= PIN_BASE)
host->irq_handler = at91_irq_handler;
host->ports[0]->select_data = board->chipselect;
ret = ide_host_register(host, &at91_ide_port_info, hws);
if (ret) {
perr("failed to register ide host\n");
goto err_free_host;
}
platform_set_drvdata(pdev, host);
return 0;
err_free_host:
ide_host_free(host);
return ret;
}
static int __exit at91_ide_remove(struct platform_device *pdev)
{
struct ide_host *host = platform_get_drvdata(pdev);
ide_host_remove(host);
return 0;
}
static struct platform_driver at91_ide_driver = {
.driver = {
.name = DRV_NAME,
.owner = THIS_MODULE,
},
.remove = __exit_p(at91_ide_remove),
};
static int __init at91_ide_init(void)
{
return platform_driver_probe(&at91_ide_driver, at91_ide_probe);
}
static void __exit at91_ide_exit(void)
{
platform_driver_unregister(&at91_ide_driver);
}
module_init(at91_ide_init);
module_exit(at91_ide_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Stanislaw Gruszka <stf_xl@wp.pl>");
| gpl-2.0 |
h8rift/android_kernel_htc_holiday-htcsrc | drivers/ide/at91_ide.c | 3990 | 10514 | /*
* IDE host driver for AT91 (SAM9, CAP9, AT572D940HF) Static Memory Controller
* with Compact Flash True IDE logic
*
* Copyright (c) 2008, 2009 Kelvatek Ltd.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/ide.h>
#include <linux/platform_device.h>
#include <mach/board.h>
#include <mach/gpio.h>
#include <mach/at91sam9_smc.h>
#define DRV_NAME "at91_ide"
#define perr(fmt, args...) pr_err(DRV_NAME ": " fmt, ##args)
#define pdbg(fmt, args...) pr_debug("%s " fmt, __func__, ##args)
/*
* Access to IDE device is possible through EBI Static Memory Controller
* with Compact Flash logic. For details see EBI and SMC datasheet sections
* of any microcontroller from AT91SAM9 family.
*
* Within SMC chip select address space, lines A[23:21] distinguish Compact
* Flash modes (I/O, common memory, attribute memory, True IDE). IDE modes are:
* 0x00c0000 - True IDE
* 0x00e0000 - Alternate True IDE (Alt Status Register)
*
* On True IDE mode Task File and Data Register are mapped at the same address.
* To distinguish access between these two different bus data width is used:
* 8Bit for Task File, 16Bit for Data I/O.
*
* After initialization we do 8/16 bit flipping (changes in SMC MODE register)
* only inside IDE callback routines which are serialized by IDE layer,
* so no additional locking needed.
*/
#define TASK_FILE 0x00c00000
#define ALT_MODE 0x00e00000
#define REGS_SIZE 8
#define enter_16bit(cs, mode) do { \
mode = at91_sys_read(AT91_SMC_MODE(cs)); \
at91_sys_write(AT91_SMC_MODE(cs), mode | AT91_SMC_DBW_16); \
} while (0)
#define leave_16bit(cs, mode) at91_sys_write(AT91_SMC_MODE(cs), mode);
static void set_smc_timings(const u8 chipselect, const u16 cycle,
const u16 setup, const u16 pulse,
const u16 data_float, int use_iordy)
{
unsigned long mode = AT91_SMC_READMODE | AT91_SMC_WRITEMODE |
AT91_SMC_BAT_SELECT;
/* disable or enable waiting for IORDY signal */
if (use_iordy)
mode |= AT91_SMC_EXNWMODE_READY;
/* add data float cycles if needed */
if (data_float)
mode |= AT91_SMC_TDF_(data_float);
at91_sys_write(AT91_SMC_MODE(chipselect), mode);
/* setup timings in SMC */
at91_sys_write(AT91_SMC_SETUP(chipselect), AT91_SMC_NWESETUP_(setup) |
AT91_SMC_NCS_WRSETUP_(0) |
AT91_SMC_NRDSETUP_(setup) |
AT91_SMC_NCS_RDSETUP_(0));
at91_sys_write(AT91_SMC_PULSE(chipselect), AT91_SMC_NWEPULSE_(pulse) |
AT91_SMC_NCS_WRPULSE_(cycle) |
AT91_SMC_NRDPULSE_(pulse) |
AT91_SMC_NCS_RDPULSE_(cycle));
at91_sys_write(AT91_SMC_CYCLE(chipselect), AT91_SMC_NWECYCLE_(cycle) |
AT91_SMC_NRDCYCLE_(cycle));
}
static unsigned int calc_mck_cycles(unsigned int ns, unsigned int mck_hz)
{
u64 tmp = ns;
tmp *= mck_hz;
tmp += 1000*1000*1000 - 1; /* round up */
do_div(tmp, 1000*1000*1000);
return (unsigned int) tmp;
}
static void apply_timings(const u8 chipselect, const u8 pio,
const struct ide_timing *timing, int use_iordy)
{
unsigned int t0, t1, t2, t6z;
unsigned int cycle, setup, pulse, data_float;
unsigned int mck_hz;
struct clk *mck;
/* see table 22 of Compact Flash standard 4.1 for the meaning,
* we do not stretch active (t2) time, so setup (t1) + hold time (th)
* assure at least minimal recovery (t2i) time */
t0 = timing->cyc8b;
t1 = timing->setup;
t2 = timing->act8b;
t6z = (pio < 5) ? 30 : 20;
pdbg("t0=%u t1=%u t2=%u t6z=%u\n", t0, t1, t2, t6z);
mck = clk_get(NULL, "mck");
BUG_ON(IS_ERR(mck));
mck_hz = clk_get_rate(mck);
pdbg("mck_hz=%u\n", mck_hz);
cycle = calc_mck_cycles(t0, mck_hz);
setup = calc_mck_cycles(t1, mck_hz);
pulse = calc_mck_cycles(t2, mck_hz);
data_float = calc_mck_cycles(t6z, mck_hz);
pdbg("cycle=%u setup=%u pulse=%u data_float=%u\n",
cycle, setup, pulse, data_float);
set_smc_timings(chipselect, cycle, setup, pulse, data_float, use_iordy);
}
static void at91_ide_input_data(ide_drive_t *drive, struct ide_cmd *cmd,
void *buf, unsigned int len)
{
ide_hwif_t *hwif = drive->hwif;
struct ide_io_ports *io_ports = &hwif->io_ports;
u8 chipselect = hwif->select_data;
unsigned long mode;
pdbg("cs %u buf %p len %d\n", chipselect, buf, len);
len++;
enter_16bit(chipselect, mode);
readsw((void __iomem *)io_ports->data_addr, buf, len / 2);
leave_16bit(chipselect, mode);
}
static void at91_ide_output_data(ide_drive_t *drive, struct ide_cmd *cmd,
void *buf, unsigned int len)
{
ide_hwif_t *hwif = drive->hwif;
struct ide_io_ports *io_ports = &hwif->io_ports;
u8 chipselect = hwif->select_data;
unsigned long mode;
pdbg("cs %u buf %p len %d\n", chipselect, buf, len);
enter_16bit(chipselect, mode);
writesw((void __iomem *)io_ports->data_addr, buf, len / 2);
leave_16bit(chipselect, mode);
}
static void at91_ide_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
{
struct ide_timing *timing;
u8 chipselect = hwif->select_data;
int use_iordy = 0;
const u8 pio = drive->pio_mode - XFER_PIO_0;
pdbg("chipselect %u pio %u\n", chipselect, pio);
timing = ide_timing_find_mode(XFER_PIO_0 + pio);
BUG_ON(!timing);
if (ide_pio_need_iordy(drive, pio))
use_iordy = 1;
apply_timings(chipselect, pio, timing, use_iordy);
}
static const struct ide_tp_ops at91_ide_tp_ops = {
.exec_command = ide_exec_command,
.read_status = ide_read_status,
.read_altstatus = ide_read_altstatus,
.write_devctl = ide_write_devctl,
.dev_select = ide_dev_select,
.tf_load = ide_tf_load,
.tf_read = ide_tf_read,
.input_data = at91_ide_input_data,
.output_data = at91_ide_output_data,
};
static const struct ide_port_ops at91_ide_port_ops = {
.set_pio_mode = at91_ide_set_pio_mode,
};
static const struct ide_port_info at91_ide_port_info __initdata = {
.port_ops = &at91_ide_port_ops,
.tp_ops = &at91_ide_tp_ops,
.host_flags = IDE_HFLAG_MMIO | IDE_HFLAG_NO_DMA | IDE_HFLAG_SINGLE |
IDE_HFLAG_NO_IO_32BIT | IDE_HFLAG_UNMASK_IRQS,
.pio_mask = ATA_PIO6,
.chipset = ide_generic,
};
/*
* If interrupt is delivered through GPIO, IRQ are triggered on falling
* and rising edge of signal. Whereas IDE device request interrupt on high
* level (rising edge in our case). This mean we have fake interrupts, so
* we need to check interrupt pin and exit instantly from ISR when line
* is on low level.
*/
irqreturn_t at91_irq_handler(int irq, void *dev_id)
{
int ntries = 8;
int pin_val1, pin_val2;
/* additional deglitch, line can be noisy in badly designed PCB */
do {
pin_val1 = at91_get_gpio_value(irq);
pin_val2 = at91_get_gpio_value(irq);
} while (pin_val1 != pin_val2 && --ntries > 0);
if (pin_val1 == 0 || ntries <= 0)
return IRQ_HANDLED;
return ide_intr(irq, dev_id);
}
static int __init at91_ide_probe(struct platform_device *pdev)
{
int ret;
struct ide_hw hw, *hws[] = { &hw };
struct ide_host *host;
struct resource *res;
unsigned long tf_base = 0, ctl_base = 0;
struct at91_cf_data *board = pdev->dev.platform_data;
if (!board)
return -ENODEV;
if (board->det_pin && at91_get_gpio_value(board->det_pin) != 0) {
perr("no device detected\n");
return -ENODEV;
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
perr("can't get memory resource\n");
return -ENODEV;
}
if (!devm_request_mem_region(&pdev->dev, res->start + TASK_FILE,
REGS_SIZE, "ide") ||
!devm_request_mem_region(&pdev->dev, res->start + ALT_MODE,
REGS_SIZE, "alt")) {
perr("memory resources in use\n");
return -EBUSY;
}
pdbg("chipselect %u irq %u res %08lx\n", board->chipselect,
board->irq_pin, (unsigned long) res->start);
tf_base = (unsigned long) devm_ioremap(&pdev->dev, res->start + TASK_FILE,
REGS_SIZE);
ctl_base = (unsigned long) devm_ioremap(&pdev->dev, res->start + ALT_MODE,
REGS_SIZE);
if (!tf_base || !ctl_base) {
perr("can't map memory regions\n");
return -EBUSY;
}
memset(&hw, 0, sizeof(hw));
if (board->flags & AT91_IDE_SWAP_A0_A2) {
/* workaround for stupid hardware bug */
hw.io_ports.data_addr = tf_base + 0;
hw.io_ports.error_addr = tf_base + 4;
hw.io_ports.nsect_addr = tf_base + 2;
hw.io_ports.lbal_addr = tf_base + 6;
hw.io_ports.lbam_addr = tf_base + 1;
hw.io_ports.lbah_addr = tf_base + 5;
hw.io_ports.device_addr = tf_base + 3;
hw.io_ports.command_addr = tf_base + 7;
hw.io_ports.ctl_addr = ctl_base + 3;
} else
ide_std_init_ports(&hw, tf_base, ctl_base + 6);
hw.irq = board->irq_pin;
hw.dev = &pdev->dev;
host = ide_host_alloc(&at91_ide_port_info, hws, 1);
if (!host) {
perr("failed to allocate ide host\n");
return -ENOMEM;
}
/* setup Static Memory Controller - PIO 0 as default */
apply_timings(board->chipselect, 0, ide_timing_find_mode(XFER_PIO_0), 0);
/* with GPIO interrupt we have to do quirks in handler */
if (board->irq_pin >= PIN_BASE)
host->irq_handler = at91_irq_handler;
host->ports[0]->select_data = board->chipselect;
ret = ide_host_register(host, &at91_ide_port_info, hws);
if (ret) {
perr("failed to register ide host\n");
goto err_free_host;
}
platform_set_drvdata(pdev, host);
return 0;
err_free_host:
ide_host_free(host);
return ret;
}
static int __exit at91_ide_remove(struct platform_device *pdev)
{
struct ide_host *host = platform_get_drvdata(pdev);
ide_host_remove(host);
return 0;
}
static struct platform_driver at91_ide_driver = {
.driver = {
.name = DRV_NAME,
.owner = THIS_MODULE,
},
.remove = __exit_p(at91_ide_remove),
};
static int __init at91_ide_init(void)
{
return platform_driver_probe(&at91_ide_driver, at91_ide_probe);
}
static void __exit at91_ide_exit(void)
{
platform_driver_unregister(&at91_ide_driver);
}
module_init(at91_ide_init);
module_exit(at91_ide_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Stanislaw Gruszka <stf_xl@wp.pl>");
| gpl-2.0 |
npf-ati/linux-2.6-imx | arch/microblaze/kernel/exceptions.c | 4246 | 4189 | /*
* HW exception handling
*
* Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu>
* Copyright (C) 2008 PetaLogix
*
* This file is subject to the terms and conditions of the GNU General
* Public License. See the file COPYING in the main directory of this
* archive for more details.
*/
/*
* This file handles the architecture-dependent parts of hardware exceptions
*/
#include <linux/export.h>
#include <linux/kernel.h>
#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/kallsyms.h>
#include <asm/exceptions.h>
#include <asm/entry.h> /* For KM CPU var */
#include <linux/uaccess.h>
#include <linux/errno.h>
#include <linux/ptrace.h>
#include <asm/current.h>
#include <asm/cacheflush.h>
#define MICROBLAZE_ILL_OPCODE_EXCEPTION 0x02
#define MICROBLAZE_IBUS_EXCEPTION 0x03
#define MICROBLAZE_DBUS_EXCEPTION 0x04
#define MICROBLAZE_DIV_ZERO_EXCEPTION 0x05
#define MICROBLAZE_FPU_EXCEPTION 0x06
#define MICROBLAZE_PRIVILEGED_EXCEPTION 0x07
static DEFINE_SPINLOCK(die_lock);
void die(const char *str, struct pt_regs *fp, long err)
{
console_verbose();
spin_lock_irq(&die_lock);
pr_warn("Oops: %s, sig: %ld\n", str, err);
show_regs(fp);
spin_unlock_irq(&die_lock);
/* do_exit() should take care of panic'ing from an interrupt
* context so we don't handle it here
*/
do_exit(err);
}
/* for user application debugging */
asmlinkage void sw_exception(struct pt_regs *regs)
{
_exception(SIGTRAP, regs, TRAP_BRKPT, regs->r16);
flush_dcache_range(regs->r16, regs->r16 + 0x4);
flush_icache_range(regs->r16, regs->r16 + 0x4);
}
void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr)
{
siginfo_t info;
if (kernel_mode(regs))
die("Exception in kernel mode", regs, signr);
info.si_signo = signr;
info.si_errno = 0;
info.si_code = code;
info.si_addr = (void __user *) addr;
force_sig_info(signr, &info, current);
}
asmlinkage void full_exception(struct pt_regs *regs, unsigned int type,
int fsr, int addr)
{
#ifdef CONFIG_MMU
addr = regs->pc;
#endif
#if 0
pr_warn("Exception %02x in %s mode, FSR=%08x PC=%08x ESR=%08x\n",
type, user_mode(regs) ? "user" : "kernel", fsr,
(unsigned int) regs->pc, (unsigned int) regs->esr);
#endif
switch (type & 0x1F) {
case MICROBLAZE_ILL_OPCODE_EXCEPTION:
if (user_mode(regs)) {
pr_debug("Illegal opcode exception in user mode\n");
_exception(SIGILL, regs, ILL_ILLOPC, addr);
return;
}
pr_warn("Illegal opcode exception in kernel mode.\n");
die("opcode exception", regs, SIGBUS);
break;
case MICROBLAZE_IBUS_EXCEPTION:
if (user_mode(regs)) {
pr_debug("Instruction bus error exception in user mode\n");
_exception(SIGBUS, regs, BUS_ADRERR, addr);
return;
}
pr_warn("Instruction bus error exception in kernel mode.\n");
die("bus exception", regs, SIGBUS);
break;
case MICROBLAZE_DBUS_EXCEPTION:
if (user_mode(regs)) {
pr_debug("Data bus error exception in user mode\n");
_exception(SIGBUS, regs, BUS_ADRERR, addr);
return;
}
pr_warn("Data bus error exception in kernel mode.\n");
die("bus exception", regs, SIGBUS);
break;
case MICROBLAZE_DIV_ZERO_EXCEPTION:
if (user_mode(regs)) {
pr_debug("Divide by zero exception in user mode\n");
_exception(SIGFPE, regs, FPE_INTDIV, addr);
return;
}
pr_warn("Divide by zero exception in kernel mode.\n");
die("Divide by zero exception", regs, SIGBUS);
break;
case MICROBLAZE_FPU_EXCEPTION:
pr_debug("FPU exception\n");
/* IEEE FP exception */
/* I removed fsr variable and use code var for storing fsr */
if (fsr & FSR_IO)
fsr = FPE_FLTINV;
else if (fsr & FSR_OF)
fsr = FPE_FLTOVF;
else if (fsr & FSR_UF)
fsr = FPE_FLTUND;
else if (fsr & FSR_DZ)
fsr = FPE_FLTDIV;
else if (fsr & FSR_DO)
fsr = FPE_FLTRES;
_exception(SIGFPE, regs, fsr, addr);
break;
#ifdef CONFIG_MMU
case MICROBLAZE_PRIVILEGED_EXCEPTION:
pr_debug("Privileged exception\n");
_exception(SIGILL, regs, ILL_PRVOPC, addr);
break;
#endif
default:
/* FIXME what to do in unexpected exception */
pr_warn("Unexpected exception %02x PC=%08x in %s mode\n",
type, (unsigned int) addr,
kernel_mode(regs) ? "kernel" : "user");
}
return;
}
| gpl-2.0 |
somcom3x/tw_herc_kernel | drivers/hid/hid-gyration.c | 8086 | 3121 | /*
* HID driver for some gyration "special" devices
*
* Copyright (c) 1999 Andreas Gal
* Copyright (c) 2000-2005 Vojtech Pavlik <vojtech@suse.cz>
* Copyright (c) 2005 Michael Haboustak <mike-@cinci.rr.com> for Concept2, Inc
* Copyright (c) 2007 Paul Walmsley
* Copyright (c) 2008 Jiri Slaby
* Copyright (c) 2006-2008 Jiri Kosina
*/
/*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*/
#include <linux/device.h>
#include <linux/input.h>
#include <linux/hid.h>
#include <linux/module.h>
#include "hid-ids.h"
#define gy_map_key_clear(c) hid_map_usage_clear(hi, usage, bit, max, \
EV_KEY, (c))
static int gyration_input_mapping(struct hid_device *hdev, struct hid_input *hi,
struct hid_field *field, struct hid_usage *usage,
unsigned long **bit, int *max)
{
if ((usage->hid & HID_USAGE_PAGE) != HID_UP_LOGIVENDOR)
return 0;
set_bit(EV_REP, hi->input->evbit);
switch (usage->hid & HID_USAGE) {
/* Reported on Gyration MCE Remote */
case 0x00d: gy_map_key_clear(KEY_HOME); break;
case 0x024: gy_map_key_clear(KEY_DVD); break;
case 0x025: gy_map_key_clear(KEY_PVR); break;
case 0x046: gy_map_key_clear(KEY_MEDIA); break;
case 0x047: gy_map_key_clear(KEY_MP3); break;
case 0x048: gy_map_key_clear(KEY_MEDIA); break;
case 0x049: gy_map_key_clear(KEY_CAMERA); break;
case 0x04a: gy_map_key_clear(KEY_VIDEO); break;
case 0x05a: gy_map_key_clear(KEY_TEXT); break;
case 0x05b: gy_map_key_clear(KEY_RED); break;
case 0x05c: gy_map_key_clear(KEY_GREEN); break;
case 0x05d: gy_map_key_clear(KEY_YELLOW); break;
case 0x05e: gy_map_key_clear(KEY_BLUE); break;
default:
return 0;
}
return 1;
}
static int gyration_event(struct hid_device *hdev, struct hid_field *field,
struct hid_usage *usage, __s32 value)
{
if (!(hdev->claimed & HID_CLAIMED_INPUT) || !field->hidinput)
return 0;
if ((usage->hid & HID_USAGE_PAGE) == HID_UP_GENDESK &&
(usage->hid & 0xff) == 0x82) {
struct input_dev *input = field->hidinput->input;
input_event(input, usage->type, usage->code, 1);
input_sync(input);
input_event(input, usage->type, usage->code, 0);
input_sync(input);
return 1;
}
return 0;
}
static const struct hid_device_id gyration_devices[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE_2) },
{ HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE_3) },
{ }
};
MODULE_DEVICE_TABLE(hid, gyration_devices);
static struct hid_driver gyration_driver = {
.name = "gyration",
.id_table = gyration_devices,
.input_mapping = gyration_input_mapping,
.event = gyration_event,
};
static int __init gyration_init(void)
{
return hid_register_driver(&gyration_driver);
}
static void __exit gyration_exit(void)
{
hid_unregister_driver(&gyration_driver);
}
module_init(gyration_init);
module_exit(gyration_exit);
MODULE_LICENSE("GPL");
| gpl-2.0 |
Naoya-Horiguchi/linux | drivers/clk/ti/fapll.c | 151 | 15681 | /*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation version 2.
*
* This program is distributed "as is" WITHOUT ANY WARRANTY of any
* kind, whether express or implied; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/io.h>
#include <linux/math64.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/clk/ti.h>
/* FAPLL Control Register PLL_CTRL */
#define FAPLL_MAIN_MULT_N_SHIFT 16
#define FAPLL_MAIN_DIV_P_SHIFT 8
#define FAPLL_MAIN_LOCK BIT(7)
#define FAPLL_MAIN_PLLEN BIT(3)
#define FAPLL_MAIN_BP BIT(2)
#define FAPLL_MAIN_LOC_CTL BIT(0)
#define FAPLL_MAIN_MAX_MULT_N 0xffff
#define FAPLL_MAIN_MAX_DIV_P 0xff
#define FAPLL_MAIN_CLEAR_MASK \
((FAPLL_MAIN_MAX_MULT_N << FAPLL_MAIN_MULT_N_SHIFT) | \
(FAPLL_MAIN_DIV_P_SHIFT << FAPLL_MAIN_DIV_P_SHIFT) | \
FAPLL_MAIN_LOC_CTL)
/* FAPLL powerdown register PWD */
#define FAPLL_PWD_OFFSET 4
#define MAX_FAPLL_OUTPUTS 7
#define FAPLL_MAX_RETRIES 1000
#define to_fapll(_hw) container_of(_hw, struct fapll_data, hw)
#define to_synth(_hw) container_of(_hw, struct fapll_synth, hw)
/* The bypass bit is inverted on the ddr_pll.. */
#define fapll_is_ddr_pll(va) (((u32)(va) & 0xffff) == 0x0440)
/*
* The audio_pll_clk1 input is hard wired to the 27MHz bypass clock,
* and the audio_pll_clk1 synthesizer is hardwared to 32KiHz output.
*/
#define is_ddr_pll_clk1(va) (((u32)(va) & 0xffff) == 0x044c)
#define is_audio_pll_clk1(va) (((u32)(va) & 0xffff) == 0x04a8)
/* Synthesizer divider register */
#define SYNTH_LDMDIV1 BIT(8)
/* Synthesizer frequency register */
#define SYNTH_LDFREQ BIT(31)
#define SYNTH_PHASE_K 8
#define SYNTH_MAX_INT_DIV 0xf
#define SYNTH_MAX_DIV_M 0xff
struct fapll_data {
struct clk_hw hw;
void __iomem *base;
const char *name;
struct clk *clk_ref;
struct clk *clk_bypass;
struct clk_onecell_data outputs;
bool bypass_bit_inverted;
};
struct fapll_synth {
struct clk_hw hw;
struct fapll_data *fd;
int index;
void __iomem *freq;
void __iomem *div;
const char *name;
struct clk *clk_pll;
};
static bool ti_fapll_clock_is_bypass(struct fapll_data *fd)
{
u32 v = readl_relaxed(fd->base);
if (fd->bypass_bit_inverted)
return !(v & FAPLL_MAIN_BP);
else
return !!(v & FAPLL_MAIN_BP);
}
static void ti_fapll_set_bypass(struct fapll_data *fd)
{
u32 v = readl_relaxed(fd->base);
if (fd->bypass_bit_inverted)
v &= ~FAPLL_MAIN_BP;
else
v |= FAPLL_MAIN_BP;
writel_relaxed(v, fd->base);
}
static void ti_fapll_clear_bypass(struct fapll_data *fd)
{
u32 v = readl_relaxed(fd->base);
if (fd->bypass_bit_inverted)
v |= FAPLL_MAIN_BP;
else
v &= ~FAPLL_MAIN_BP;
writel_relaxed(v, fd->base);
}
static int ti_fapll_wait_lock(struct fapll_data *fd)
{
int retries = FAPLL_MAX_RETRIES;
u32 v;
while ((v = readl_relaxed(fd->base))) {
if (v & FAPLL_MAIN_LOCK)
return 0;
if (retries-- <= 0)
break;
udelay(1);
}
pr_err("%s failed to lock\n", fd->name);
return -ETIMEDOUT;
}
static int ti_fapll_enable(struct clk_hw *hw)
{
struct fapll_data *fd = to_fapll(hw);
u32 v = readl_relaxed(fd->base);
v |= FAPLL_MAIN_PLLEN;
writel_relaxed(v, fd->base);
ti_fapll_wait_lock(fd);
return 0;
}
static void ti_fapll_disable(struct clk_hw *hw)
{
struct fapll_data *fd = to_fapll(hw);
u32 v = readl_relaxed(fd->base);
v &= ~FAPLL_MAIN_PLLEN;
writel_relaxed(v, fd->base);
}
static int ti_fapll_is_enabled(struct clk_hw *hw)
{
struct fapll_data *fd = to_fapll(hw);
u32 v = readl_relaxed(fd->base);
return v & FAPLL_MAIN_PLLEN;
}
static unsigned long ti_fapll_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
struct fapll_data *fd = to_fapll(hw);
u32 fapll_n, fapll_p, v;
u64 rate;
if (ti_fapll_clock_is_bypass(fd))
return parent_rate;
rate = parent_rate;
/* PLL pre-divider is P and multiplier is N */
v = readl_relaxed(fd->base);
fapll_p = (v >> 8) & 0xff;
if (fapll_p)
do_div(rate, fapll_p);
fapll_n = v >> 16;
if (fapll_n)
rate *= fapll_n;
return rate;
}
static u8 ti_fapll_get_parent(struct clk_hw *hw)
{
struct fapll_data *fd = to_fapll(hw);
if (ti_fapll_clock_is_bypass(fd))
return 1;
return 0;
}
static int ti_fapll_set_div_mult(unsigned long rate,
unsigned long parent_rate,
u32 *pre_div_p, u32 *mult_n)
{
/*
* So far no luck getting decent clock with PLL divider,
* PLL does not seem to lock and the signal does not look
* right. It seems the divider can only be used together
* with the multiplier?
*/
if (rate < parent_rate) {
pr_warn("FAPLL main divider rates unsupported\n");
return -EINVAL;
}
*mult_n = rate / parent_rate;
if (*mult_n > FAPLL_MAIN_MAX_MULT_N)
return -EINVAL;
*pre_div_p = 1;
return 0;
}
static long ti_fapll_round_rate(struct clk_hw *hw, unsigned long rate,
unsigned long *parent_rate)
{
u32 pre_div_p, mult_n;
int error;
if (!rate)
return -EINVAL;
error = ti_fapll_set_div_mult(rate, *parent_rate,
&pre_div_p, &mult_n);
if (error)
return error;
rate = *parent_rate / pre_div_p;
rate *= mult_n;
return rate;
}
static int ti_fapll_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
struct fapll_data *fd = to_fapll(hw);
u32 pre_div_p, mult_n, v;
int error;
if (!rate)
return -EINVAL;
error = ti_fapll_set_div_mult(rate, parent_rate,
&pre_div_p, &mult_n);
if (error)
return error;
ti_fapll_set_bypass(fd);
v = readl_relaxed(fd->base);
v &= ~FAPLL_MAIN_CLEAR_MASK;
v |= pre_div_p << FAPLL_MAIN_DIV_P_SHIFT;
v |= mult_n << FAPLL_MAIN_MULT_N_SHIFT;
writel_relaxed(v, fd->base);
if (ti_fapll_is_enabled(hw))
ti_fapll_wait_lock(fd);
ti_fapll_clear_bypass(fd);
return 0;
}
static const struct clk_ops ti_fapll_ops = {
.enable = ti_fapll_enable,
.disable = ti_fapll_disable,
.is_enabled = ti_fapll_is_enabled,
.recalc_rate = ti_fapll_recalc_rate,
.get_parent = ti_fapll_get_parent,
.round_rate = ti_fapll_round_rate,
.set_rate = ti_fapll_set_rate,
};
static int ti_fapll_synth_enable(struct clk_hw *hw)
{
struct fapll_synth *synth = to_synth(hw);
u32 v = readl_relaxed(synth->fd->base + FAPLL_PWD_OFFSET);
v &= ~(1 << synth->index);
writel_relaxed(v, synth->fd->base + FAPLL_PWD_OFFSET);
return 0;
}
static void ti_fapll_synth_disable(struct clk_hw *hw)
{
struct fapll_synth *synth = to_synth(hw);
u32 v = readl_relaxed(synth->fd->base + FAPLL_PWD_OFFSET);
v |= 1 << synth->index;
writel_relaxed(v, synth->fd->base + FAPLL_PWD_OFFSET);
}
static int ti_fapll_synth_is_enabled(struct clk_hw *hw)
{
struct fapll_synth *synth = to_synth(hw);
u32 v = readl_relaxed(synth->fd->base + FAPLL_PWD_OFFSET);
return !(v & (1 << synth->index));
}
/*
* See dm816x TRM chapter 1.10.3 Flying Adder PLL fore more info
*/
static unsigned long ti_fapll_synth_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
struct fapll_synth *synth = to_synth(hw);
u32 synth_div_m;
u64 rate;
/* The audio_pll_clk1 is hardwired to produce 32.768KiHz clock */
if (!synth->div)
return 32768;
/*
* PLL in bypass sets the synths in bypass mode too. The PLL rate
* can be also be set to 27MHz, so we can't use parent_rate to
* check for bypass mode.
*/
if (ti_fapll_clock_is_bypass(synth->fd))
return parent_rate;
rate = parent_rate;
/*
* Synth frequency integer and fractional divider.
* Note that the phase output K is 8, so the result needs
* to be multiplied by SYNTH_PHASE_K.
*/
if (synth->freq) {
u32 v, synth_int_div, synth_frac_div, synth_div_freq;
v = readl_relaxed(synth->freq);
synth_int_div = (v >> 24) & 0xf;
synth_frac_div = v & 0xffffff;
synth_div_freq = (synth_int_div * 10000000) + synth_frac_div;
rate *= 10000000;
do_div(rate, synth_div_freq);
rate *= SYNTH_PHASE_K;
}
/* Synth post-divider M */
synth_div_m = readl_relaxed(synth->div) & SYNTH_MAX_DIV_M;
return DIV_ROUND_UP_ULL(rate, synth_div_m);
}
static unsigned long ti_fapll_synth_get_frac_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
struct fapll_synth *synth = to_synth(hw);
unsigned long current_rate, frac_rate;
u32 post_div_m;
current_rate = ti_fapll_synth_recalc_rate(hw, parent_rate);
post_div_m = readl_relaxed(synth->div) & SYNTH_MAX_DIV_M;
frac_rate = current_rate * post_div_m;
return frac_rate;
}
static u32 ti_fapll_synth_set_frac_rate(struct fapll_synth *synth,
unsigned long rate,
unsigned long parent_rate)
{
u32 post_div_m, synth_int_div = 0, synth_frac_div = 0, v;
post_div_m = DIV_ROUND_UP_ULL((u64)parent_rate * SYNTH_PHASE_K, rate);
post_div_m = post_div_m / SYNTH_MAX_INT_DIV;
if (post_div_m > SYNTH_MAX_DIV_M)
return -EINVAL;
if (!post_div_m)
post_div_m = 1;
for (; post_div_m < SYNTH_MAX_DIV_M; post_div_m++) {
synth_int_div = DIV_ROUND_UP_ULL((u64)parent_rate *
SYNTH_PHASE_K *
10000000,
rate * post_div_m);
synth_frac_div = synth_int_div % 10000000;
synth_int_div /= 10000000;
if (synth_int_div <= SYNTH_MAX_INT_DIV)
break;
}
if (synth_int_div > SYNTH_MAX_INT_DIV)
return -EINVAL;
v = readl_relaxed(synth->freq);
v &= ~0x1fffffff;
v |= (synth_int_div & SYNTH_MAX_INT_DIV) << 24;
v |= (synth_frac_div & 0xffffff);
v |= SYNTH_LDFREQ;
writel_relaxed(v, synth->freq);
return post_div_m;
}
static long ti_fapll_synth_round_rate(struct clk_hw *hw, unsigned long rate,
unsigned long *parent_rate)
{
struct fapll_synth *synth = to_synth(hw);
struct fapll_data *fd = synth->fd;
unsigned long r;
if (ti_fapll_clock_is_bypass(fd) || !synth->div || !rate)
return -EINVAL;
/* Only post divider m available with no fractional divider? */
if (!synth->freq) {
unsigned long frac_rate;
u32 synth_post_div_m;
frac_rate = ti_fapll_synth_get_frac_rate(hw, *parent_rate);
synth_post_div_m = DIV_ROUND_UP(frac_rate, rate);
r = DIV_ROUND_UP(frac_rate, synth_post_div_m);
goto out;
}
r = *parent_rate * SYNTH_PHASE_K;
if (rate > r)
goto out;
r = DIV_ROUND_UP_ULL(r, SYNTH_MAX_INT_DIV * SYNTH_MAX_DIV_M);
if (rate < r)
goto out;
r = rate;
out:
return r;
}
static int ti_fapll_synth_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
struct fapll_synth *synth = to_synth(hw);
struct fapll_data *fd = synth->fd;
unsigned long frac_rate, post_rate = 0;
u32 post_div_m = 0, v;
if (ti_fapll_clock_is_bypass(fd) || !synth->div || !rate)
return -EINVAL;
/* Produce the rate with just post divider M? */
frac_rate = ti_fapll_synth_get_frac_rate(hw, parent_rate);
if (frac_rate < rate) {
if (!synth->freq)
return -EINVAL;
} else {
post_div_m = DIV_ROUND_UP(frac_rate, rate);
if (post_div_m && (post_div_m <= SYNTH_MAX_DIV_M))
post_rate = DIV_ROUND_UP(frac_rate, post_div_m);
if (!synth->freq && !post_rate)
return -EINVAL;
}
/* Need to recalculate the fractional divider? */
if ((post_rate != rate) && synth->freq)
post_div_m = ti_fapll_synth_set_frac_rate(synth,
rate,
parent_rate);
v = readl_relaxed(synth->div);
v &= ~SYNTH_MAX_DIV_M;
v |= post_div_m;
v |= SYNTH_LDMDIV1;
writel_relaxed(v, synth->div);
return 0;
}
static const struct clk_ops ti_fapll_synt_ops = {
.enable = ti_fapll_synth_enable,
.disable = ti_fapll_synth_disable,
.is_enabled = ti_fapll_synth_is_enabled,
.recalc_rate = ti_fapll_synth_recalc_rate,
.round_rate = ti_fapll_synth_round_rate,
.set_rate = ti_fapll_synth_set_rate,
};
static struct clk * __init ti_fapll_synth_setup(struct fapll_data *fd,
void __iomem *freq,
void __iomem *div,
int index,
const char *name,
const char *parent,
struct clk *pll_clk)
{
struct clk_init_data *init;
struct fapll_synth *synth;
struct clk *clk = ERR_PTR(-ENOMEM);
init = kzalloc(sizeof(*init), GFP_KERNEL);
if (!init)
return ERR_PTR(-ENOMEM);
init->ops = &ti_fapll_synt_ops;
init->name = name;
init->parent_names = &parent;
init->num_parents = 1;
synth = kzalloc(sizeof(*synth), GFP_KERNEL);
if (!synth)
goto free;
synth->fd = fd;
synth->index = index;
synth->freq = freq;
synth->div = div;
synth->name = name;
synth->hw.init = init;
synth->clk_pll = pll_clk;
clk = clk_register(NULL, &synth->hw);
if (IS_ERR(clk)) {
pr_err("failed to register clock\n");
goto free;
}
return clk;
free:
kfree(synth);
kfree(init);
return clk;
}
static void __init ti_fapll_setup(struct device_node *node)
{
struct fapll_data *fd;
struct clk_init_data *init = NULL;
const char *parent_name[2];
struct clk *pll_clk;
int i;
fd = kzalloc(sizeof(*fd), GFP_KERNEL);
if (!fd)
return;
fd->outputs.clks = kzalloc(sizeof(struct clk *) *
MAX_FAPLL_OUTPUTS + 1,
GFP_KERNEL);
if (!fd->outputs.clks)
goto free;
init = kzalloc(sizeof(*init), GFP_KERNEL);
if (!init)
goto free;
init->ops = &ti_fapll_ops;
init->name = node->name;
init->num_parents = of_clk_get_parent_count(node);
if (init->num_parents != 2) {
pr_err("%pOFn must have two parents\n", node);
goto free;
}
of_clk_parent_fill(node, parent_name, 2);
init->parent_names = parent_name;
fd->clk_ref = of_clk_get(node, 0);
if (IS_ERR(fd->clk_ref)) {
pr_err("%pOFn could not get clk_ref\n", node);
goto free;
}
fd->clk_bypass = of_clk_get(node, 1);
if (IS_ERR(fd->clk_bypass)) {
pr_err("%pOFn could not get clk_bypass\n", node);
goto free;
}
fd->base = of_iomap(node, 0);
if (!fd->base) {
pr_err("%pOFn could not get IO base\n", node);
goto free;
}
if (fapll_is_ddr_pll(fd->base))
fd->bypass_bit_inverted = true;
fd->name = node->name;
fd->hw.init = init;
/* Register the parent PLL */
pll_clk = clk_register(NULL, &fd->hw);
if (IS_ERR(pll_clk))
goto unmap;
fd->outputs.clks[0] = pll_clk;
fd->outputs.clk_num++;
/*
* Set up the child synthesizers starting at index 1 as the
* PLL output is at index 0. We need to check the clock-indices
* for numbering in case there are holes in the synth mapping,
* and then probe the synth register to see if it has a FREQ
* register available.
*/
for (i = 0; i < MAX_FAPLL_OUTPUTS; i++) {
const char *output_name;
void __iomem *freq, *div;
struct clk *synth_clk;
int output_instance;
u32 v;
if (of_property_read_string_index(node, "clock-output-names",
i, &output_name))
continue;
if (of_property_read_u32_index(node, "clock-indices", i,
&output_instance))
output_instance = i;
freq = fd->base + (output_instance * 8);
div = freq + 4;
/* Check for hardwired audio_pll_clk1 */
if (is_audio_pll_clk1(freq)) {
freq = NULL;
div = NULL;
} else {
/* Does the synthesizer have a FREQ register? */
v = readl_relaxed(freq);
if (!v)
freq = NULL;
}
synth_clk = ti_fapll_synth_setup(fd, freq, div, output_instance,
output_name, node->name,
pll_clk);
if (IS_ERR(synth_clk))
continue;
fd->outputs.clks[output_instance] = synth_clk;
fd->outputs.clk_num++;
clk_register_clkdev(synth_clk, output_name, NULL);
}
/* Register the child synthesizers as the FAPLL outputs */
of_clk_add_provider(node, of_clk_src_onecell_get, &fd->outputs);
/* Add clock alias for the outputs */
kfree(init);
return;
unmap:
iounmap(fd->base);
free:
if (fd->clk_bypass)
clk_put(fd->clk_bypass);
if (fd->clk_ref)
clk_put(fd->clk_ref);
kfree(fd->outputs.clks);
kfree(fd);
kfree(init);
}
CLK_OF_DECLARE(ti_fapll_clock, "ti,dm816-fapll-clock", ti_fapll_setup);
| gpl-2.0 |
stefansaraev/linux | kernel/irq/chip.c | 151 | 25176 | /*
* linux/kernel/irq/chip.c
*
* Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
* Copyright (C) 2005-2006, Thomas Gleixner, Russell King
*
* This file contains the core interrupt handling code, for irq-chip
* based architectures.
*
* Detailed information is available in Documentation/DocBook/genericirq
*/
#include <linux/irq.h>
#include <linux/msi.h>
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/kernel_stat.h>
#include <linux/irqdomain.h>
#include <trace/events/irq.h>
#include "internals.h"
/**
* irq_set_chip - set the irq chip for an irq
* @irq: irq number
* @chip: pointer to irq chip description structure
*/
int irq_set_chip(unsigned int irq, struct irq_chip *chip)
{
unsigned long flags;
struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
if (!desc)
return -EINVAL;
if (!chip)
chip = &no_irq_chip;
desc->irq_data.chip = chip;
irq_put_desc_unlock(desc, flags);
/*
* For !CONFIG_SPARSE_IRQ make the irq show up in
* allocated_irqs.
*/
irq_mark_irq(irq);
return 0;
}
EXPORT_SYMBOL(irq_set_chip);
/**
* irq_set_type - set the irq trigger type for an irq
* @irq: irq number
* @type: IRQ_TYPE_{LEVEL,EDGE}_* value - see include/linux/irq.h
*/
int irq_set_irq_type(unsigned int irq, unsigned int type)
{
unsigned long flags;
struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
int ret = 0;
if (!desc)
return -EINVAL;
type &= IRQ_TYPE_SENSE_MASK;
ret = __irq_set_trigger(desc, irq, type);
irq_put_desc_busunlock(desc, flags);
return ret;
}
EXPORT_SYMBOL(irq_set_irq_type);
/**
* irq_set_handler_data - set irq handler data for an irq
* @irq: Interrupt number
* @data: Pointer to interrupt specific data
*
* Set the hardware irq controller data for an irq
*/
int irq_set_handler_data(unsigned int irq, void *data)
{
unsigned long flags;
struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
if (!desc)
return -EINVAL;
desc->irq_data.handler_data = data;
irq_put_desc_unlock(desc, flags);
return 0;
}
EXPORT_SYMBOL(irq_set_handler_data);
/**
* irq_set_msi_desc_off - set MSI descriptor data for an irq at offset
* @irq_base: Interrupt number base
* @irq_offset: Interrupt number offset
* @entry: Pointer to MSI descriptor data
*
* Set the MSI descriptor entry for an irq at offset
*/
int irq_set_msi_desc_off(unsigned int irq_base, unsigned int irq_offset,
struct msi_desc *entry)
{
unsigned long flags;
struct irq_desc *desc = irq_get_desc_lock(irq_base + irq_offset, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
if (!desc)
return -EINVAL;
desc->irq_data.msi_desc = entry;
if (entry && !irq_offset)
entry->irq = irq_base;
irq_put_desc_unlock(desc, flags);
return 0;
}
/**
* irq_set_msi_desc - set MSI descriptor data for an irq
* @irq: Interrupt number
* @entry: Pointer to MSI descriptor data
*
* Set the MSI descriptor entry for an irq
*/
int irq_set_msi_desc(unsigned int irq, struct msi_desc *entry)
{
return irq_set_msi_desc_off(irq, 0, entry);
}
/**
* irq_set_chip_data - set irq chip data for an irq
* @irq: Interrupt number
* @data: Pointer to chip specific data
*
* Set the hardware irq chip data for an irq
*/
int irq_set_chip_data(unsigned int irq, void *data)
{
unsigned long flags;
struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
if (!desc)
return -EINVAL;
desc->irq_data.chip_data = data;
irq_put_desc_unlock(desc, flags);
return 0;
}
EXPORT_SYMBOL(irq_set_chip_data);
struct irq_data *irq_get_irq_data(unsigned int irq)
{
struct irq_desc *desc = irq_to_desc(irq);
return desc ? &desc->irq_data : NULL;
}
EXPORT_SYMBOL_GPL(irq_get_irq_data);
static void irq_state_clr_disabled(struct irq_desc *desc)
{
irqd_clear(&desc->irq_data, IRQD_IRQ_DISABLED);
}
static void irq_state_set_disabled(struct irq_desc *desc)
{
irqd_set(&desc->irq_data, IRQD_IRQ_DISABLED);
}
static void irq_state_clr_masked(struct irq_desc *desc)
{
irqd_clear(&desc->irq_data, IRQD_IRQ_MASKED);
}
static void irq_state_set_masked(struct irq_desc *desc)
{
irqd_set(&desc->irq_data, IRQD_IRQ_MASKED);
}
int irq_startup(struct irq_desc *desc, bool resend)
{
int ret = 0;
irq_state_clr_disabled(desc);
desc->depth = 0;
irq_domain_activate_irq(&desc->irq_data);
if (desc->irq_data.chip->irq_startup) {
ret = desc->irq_data.chip->irq_startup(&desc->irq_data);
irq_state_clr_masked(desc);
} else {
irq_enable(desc);
}
if (resend)
check_irq_resend(desc, desc->irq_data.irq);
return ret;
}
void irq_shutdown(struct irq_desc *desc)
{
irq_state_set_disabled(desc);
desc->depth = 1;
if (desc->irq_data.chip->irq_shutdown)
desc->irq_data.chip->irq_shutdown(&desc->irq_data);
else if (desc->irq_data.chip->irq_disable)
desc->irq_data.chip->irq_disable(&desc->irq_data);
else
desc->irq_data.chip->irq_mask(&desc->irq_data);
irq_domain_deactivate_irq(&desc->irq_data);
irq_state_set_masked(desc);
}
void irq_enable(struct irq_desc *desc)
{
irq_state_clr_disabled(desc);
if (desc->irq_data.chip->irq_enable)
desc->irq_data.chip->irq_enable(&desc->irq_data);
else
desc->irq_data.chip->irq_unmask(&desc->irq_data);
irq_state_clr_masked(desc);
}
/**
* irq_disable - Mark interrupt disabled
* @desc: irq descriptor which should be disabled
*
* If the chip does not implement the irq_disable callback, we
* use a lazy disable approach. That means we mark the interrupt
* disabled, but leave the hardware unmasked. That's an
* optimization because we avoid the hardware access for the
* common case where no interrupt happens after we marked it
* disabled. If an interrupt happens, then the interrupt flow
* handler masks the line at the hardware level and marks it
* pending.
*/
void irq_disable(struct irq_desc *desc)
{
irq_state_set_disabled(desc);
if (desc->irq_data.chip->irq_disable) {
desc->irq_data.chip->irq_disable(&desc->irq_data);
irq_state_set_masked(desc);
}
}
void irq_percpu_enable(struct irq_desc *desc, unsigned int cpu)
{
if (desc->irq_data.chip->irq_enable)
desc->irq_data.chip->irq_enable(&desc->irq_data);
else
desc->irq_data.chip->irq_unmask(&desc->irq_data);
cpumask_set_cpu(cpu, desc->percpu_enabled);
}
void irq_percpu_disable(struct irq_desc *desc, unsigned int cpu)
{
if (desc->irq_data.chip->irq_disable)
desc->irq_data.chip->irq_disable(&desc->irq_data);
else
desc->irq_data.chip->irq_mask(&desc->irq_data);
cpumask_clear_cpu(cpu, desc->percpu_enabled);
}
static inline void mask_ack_irq(struct irq_desc *desc)
{
if (desc->irq_data.chip->irq_mask_ack)
desc->irq_data.chip->irq_mask_ack(&desc->irq_data);
else {
desc->irq_data.chip->irq_mask(&desc->irq_data);
if (desc->irq_data.chip->irq_ack)
desc->irq_data.chip->irq_ack(&desc->irq_data);
}
irq_state_set_masked(desc);
}
void mask_irq(struct irq_desc *desc)
{
if (desc->irq_data.chip->irq_mask) {
desc->irq_data.chip->irq_mask(&desc->irq_data);
irq_state_set_masked(desc);
}
}
void unmask_irq(struct irq_desc *desc)
{
if (desc->irq_data.chip->irq_unmask) {
desc->irq_data.chip->irq_unmask(&desc->irq_data);
irq_state_clr_masked(desc);
}
}
void unmask_threaded_irq(struct irq_desc *desc)
{
struct irq_chip *chip = desc->irq_data.chip;
if (chip->flags & IRQCHIP_EOI_THREADED)
chip->irq_eoi(&desc->irq_data);
if (chip->irq_unmask) {
chip->irq_unmask(&desc->irq_data);
irq_state_clr_masked(desc);
}
}
/*
* handle_nested_irq - Handle a nested irq from a irq thread
* @irq: the interrupt number
*
* Handle interrupts which are nested into a threaded interrupt
* handler. The handler function is called inside the calling
* threads context.
*/
void handle_nested_irq(unsigned int irq)
{
struct irq_desc *desc = irq_to_desc(irq);
struct irqaction *action;
irqreturn_t action_ret;
might_sleep();
raw_spin_lock_irq(&desc->lock);
desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
kstat_incr_irqs_this_cpu(irq, desc);
action = desc->action;
if (unlikely(!action || irqd_irq_disabled(&desc->irq_data))) {
desc->istate |= IRQS_PENDING;
goto out_unlock;
}
irqd_set(&desc->irq_data, IRQD_IRQ_INPROGRESS);
raw_spin_unlock_irq(&desc->lock);
action_ret = action->thread_fn(action->irq, action->dev_id);
if (!noirqdebug)
note_interrupt(irq, desc, action_ret);
raw_spin_lock_irq(&desc->lock);
irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);
out_unlock:
raw_spin_unlock_irq(&desc->lock);
}
EXPORT_SYMBOL_GPL(handle_nested_irq);
static bool irq_check_poll(struct irq_desc *desc)
{
if (!(desc->istate & IRQS_POLL_INPROGRESS))
return false;
return irq_wait_for_poll(desc);
}
static bool irq_may_run(struct irq_desc *desc)
{
unsigned int mask = IRQD_IRQ_INPROGRESS | IRQD_WAKEUP_ARMED;
/*
* If the interrupt is not in progress and is not an armed
* wakeup interrupt, proceed.
*/
if (!irqd_has_set(&desc->irq_data, mask))
return true;
/*
* If the interrupt is an armed wakeup source, mark it pending
* and suspended, disable it and notify the pm core about the
* event.
*/
if (irq_pm_check_wakeup(desc))
return false;
/*
* Handle a potential concurrent poll on a different core.
*/
return irq_check_poll(desc);
}
/**
* handle_simple_irq - Simple and software-decoded IRQs.
* @irq: the interrupt number
* @desc: the interrupt description structure for this irq
*
* Simple interrupts are either sent from a demultiplexing interrupt
* handler or come from hardware, where no interrupt hardware control
* is necessary.
*
* Note: The caller is expected to handle the ack, clear, mask and
* unmask issues if necessary.
*/
void
handle_simple_irq(unsigned int irq, struct irq_desc *desc)
{
raw_spin_lock(&desc->lock);
if (!irq_may_run(desc))
goto out_unlock;
desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
kstat_incr_irqs_this_cpu(irq, desc);
if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
desc->istate |= IRQS_PENDING;
goto out_unlock;
}
handle_irq_event(desc);
out_unlock:
raw_spin_unlock(&desc->lock);
}
EXPORT_SYMBOL_GPL(handle_simple_irq);
/*
* Called unconditionally from handle_level_irq() and only for oneshot
* interrupts from handle_fasteoi_irq()
*/
static void cond_unmask_irq(struct irq_desc *desc)
{
/*
* We need to unmask in the following cases:
* - Standard level irq (IRQF_ONESHOT is not set)
* - Oneshot irq which did not wake the thread (caused by a
* spurious interrupt or a primary handler handling it
* completely).
*/
if (!irqd_irq_disabled(&desc->irq_data) &&
irqd_irq_masked(&desc->irq_data) && !desc->threads_oneshot)
unmask_irq(desc);
}
/**
* handle_level_irq - Level type irq handler
* @irq: the interrupt number
* @desc: the interrupt description structure for this irq
*
* Level type interrupts are active as long as the hardware line has
* the active level. This may require to mask the interrupt and unmask
* it after the associated handler has acknowledged the device, so the
* interrupt line is back to inactive.
*/
void
handle_level_irq(unsigned int irq, struct irq_desc *desc)
{
raw_spin_lock(&desc->lock);
mask_ack_irq(desc);
if (!irq_may_run(desc))
goto out_unlock;
desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
kstat_incr_irqs_this_cpu(irq, desc);
/*
* If its disabled or no action available
* keep it masked and get out of here
*/
if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
desc->istate |= IRQS_PENDING;
goto out_unlock;
}
handle_irq_event(desc);
cond_unmask_irq(desc);
out_unlock:
raw_spin_unlock(&desc->lock);
}
EXPORT_SYMBOL_GPL(handle_level_irq);
#ifdef CONFIG_IRQ_PREFLOW_FASTEOI
static inline void preflow_handler(struct irq_desc *desc)
{
if (desc->preflow_handler)
desc->preflow_handler(&desc->irq_data);
}
#else
static inline void preflow_handler(struct irq_desc *desc) { }
#endif
static void cond_unmask_eoi_irq(struct irq_desc *desc, struct irq_chip *chip)
{
if (!(desc->istate & IRQS_ONESHOT)) {
chip->irq_eoi(&desc->irq_data);
return;
}
/*
* We need to unmask in the following cases:
* - Oneshot irq which did not wake the thread (caused by a
* spurious interrupt or a primary handler handling it
* completely).
*/
if (!irqd_irq_disabled(&desc->irq_data) &&
irqd_irq_masked(&desc->irq_data) && !desc->threads_oneshot) {
chip->irq_eoi(&desc->irq_data);
unmask_irq(desc);
} else if (!(chip->flags & IRQCHIP_EOI_THREADED)) {
chip->irq_eoi(&desc->irq_data);
}
}
/**
* handle_fasteoi_irq - irq handler for transparent controllers
* @irq: the interrupt number
* @desc: the interrupt description structure for this irq
*
* Only a single callback will be issued to the chip: an ->eoi()
* call when the interrupt has been serviced. This enables support
* for modern forms of interrupt handlers, which handle the flow
* details in hardware, transparently.
*/
void
handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc)
{
struct irq_chip *chip = desc->irq_data.chip;
raw_spin_lock(&desc->lock);
if (!irq_may_run(desc))
goto out;
desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
kstat_incr_irqs_this_cpu(irq, desc);
/*
* If its disabled or no action available
* then mask it and get out of here:
*/
if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
desc->istate |= IRQS_PENDING;
mask_irq(desc);
goto out;
}
if (desc->istate & IRQS_ONESHOT)
mask_irq(desc);
preflow_handler(desc);
handle_irq_event(desc);
cond_unmask_eoi_irq(desc, chip);
raw_spin_unlock(&desc->lock);
return;
out:
if (!(chip->flags & IRQCHIP_EOI_IF_HANDLED))
chip->irq_eoi(&desc->irq_data);
raw_spin_unlock(&desc->lock);
}
EXPORT_SYMBOL_GPL(handle_fasteoi_irq);
/**
* handle_edge_irq - edge type IRQ handler
* @irq: the interrupt number
* @desc: the interrupt description structure for this irq
*
* Interrupt occures on the falling and/or rising edge of a hardware
* signal. The occurrence is latched into the irq controller hardware
* and must be acked in order to be reenabled. After the ack another
* interrupt can happen on the same source even before the first one
* is handled by the associated event handler. If this happens it
* might be necessary to disable (mask) the interrupt depending on the
* controller hardware. This requires to reenable the interrupt inside
* of the loop which handles the interrupts which have arrived while
* the handler was running. If all pending interrupts are handled, the
* loop is left.
*/
void
handle_edge_irq(unsigned int irq, struct irq_desc *desc)
{
raw_spin_lock(&desc->lock);
desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
if (!irq_may_run(desc)) {
desc->istate |= IRQS_PENDING;
mask_ack_irq(desc);
goto out_unlock;
}
/*
* If its disabled or no action available then mask it and get
* out of here.
*/
if (irqd_irq_disabled(&desc->irq_data) || !desc->action) {
desc->istate |= IRQS_PENDING;
mask_ack_irq(desc);
goto out_unlock;
}
kstat_incr_irqs_this_cpu(irq, desc);
/* Start handling the irq */
desc->irq_data.chip->irq_ack(&desc->irq_data);
do {
if (unlikely(!desc->action)) {
mask_irq(desc);
goto out_unlock;
}
/*
* When another irq arrived while we were handling
* one, we could have masked the irq.
* Renable it, if it was not disabled in meantime.
*/
if (unlikely(desc->istate & IRQS_PENDING)) {
if (!irqd_irq_disabled(&desc->irq_data) &&
irqd_irq_masked(&desc->irq_data))
unmask_irq(desc);
}
handle_irq_event(desc);
} while ((desc->istate & IRQS_PENDING) &&
!irqd_irq_disabled(&desc->irq_data));
out_unlock:
raw_spin_unlock(&desc->lock);
}
EXPORT_SYMBOL(handle_edge_irq);
#ifdef CONFIG_IRQ_EDGE_EOI_HANDLER
/**
* handle_edge_eoi_irq - edge eoi type IRQ handler
* @irq: the interrupt number
* @desc: the interrupt description structure for this irq
*
* Similar as the above handle_edge_irq, but using eoi and w/o the
* mask/unmask logic.
*/
void handle_edge_eoi_irq(unsigned int irq, struct irq_desc *desc)
{
struct irq_chip *chip = irq_desc_get_chip(desc);
raw_spin_lock(&desc->lock);
desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
if (!irq_may_run(desc)) {
desc->istate |= IRQS_PENDING;
goto out_eoi;
}
/*
* If its disabled or no action available then mask it and get
* out of here.
*/
if (irqd_irq_disabled(&desc->irq_data) || !desc->action) {
desc->istate |= IRQS_PENDING;
goto out_eoi;
}
kstat_incr_irqs_this_cpu(irq, desc);
do {
if (unlikely(!desc->action))
goto out_eoi;
handle_irq_event(desc);
} while ((desc->istate & IRQS_PENDING) &&
!irqd_irq_disabled(&desc->irq_data));
out_eoi:
chip->irq_eoi(&desc->irq_data);
raw_spin_unlock(&desc->lock);
}
#endif
/**
* handle_percpu_irq - Per CPU local irq handler
* @irq: the interrupt number
* @desc: the interrupt description structure for this irq
*
* Per CPU interrupts on SMP machines without locking requirements
*/
void
handle_percpu_irq(unsigned int irq, struct irq_desc *desc)
{
struct irq_chip *chip = irq_desc_get_chip(desc);
kstat_incr_irqs_this_cpu(irq, desc);
if (chip->irq_ack)
chip->irq_ack(&desc->irq_data);
handle_irq_event_percpu(desc, desc->action);
if (chip->irq_eoi)
chip->irq_eoi(&desc->irq_data);
}
/**
* handle_percpu_devid_irq - Per CPU local irq handler with per cpu dev ids
* @irq: the interrupt number
* @desc: the interrupt description structure for this irq
*
* Per CPU interrupts on SMP machines without locking requirements. Same as
* handle_percpu_irq() above but with the following extras:
*
* action->percpu_dev_id is a pointer to percpu variables which
* contain the real device id for the cpu on which this handler is
* called
*/
void handle_percpu_devid_irq(unsigned int irq, struct irq_desc *desc)
{
struct irq_chip *chip = irq_desc_get_chip(desc);
struct irqaction *action = desc->action;
void *dev_id = raw_cpu_ptr(action->percpu_dev_id);
irqreturn_t res;
kstat_incr_irqs_this_cpu(irq, desc);
if (chip->irq_ack)
chip->irq_ack(&desc->irq_data);
trace_irq_handler_entry(irq, action);
res = action->handler(irq, dev_id);
trace_irq_handler_exit(irq, action, res);
if (chip->irq_eoi)
chip->irq_eoi(&desc->irq_data);
}
void
__irq_set_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained,
const char *name)
{
unsigned long flags;
struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, 0);
if (!desc)
return;
if (!handle) {
handle = handle_bad_irq;
} else {
struct irq_data *irq_data = &desc->irq_data;
#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
/*
* With hierarchical domains we might run into a
* situation where the outermost chip is not yet set
* up, but the inner chips are there. Instead of
* bailing we install the handler, but obviously we
* cannot enable/startup the interrupt at this point.
*/
while (irq_data) {
if (irq_data->chip != &no_irq_chip)
break;
/*
* Bail out if the outer chip is not set up
* and the interrrupt supposed to be started
* right away.
*/
if (WARN_ON(is_chained))
goto out;
/* Try the parent */
irq_data = irq_data->parent_data;
}
#endif
if (WARN_ON(!irq_data || irq_data->chip == &no_irq_chip))
goto out;
}
/* Uninstall? */
if (handle == handle_bad_irq) {
if (desc->irq_data.chip != &no_irq_chip)
mask_ack_irq(desc);
irq_state_set_disabled(desc);
desc->depth = 1;
}
desc->handle_irq = handle;
desc->name = name;
if (handle != handle_bad_irq && is_chained) {
irq_settings_set_noprobe(desc);
irq_settings_set_norequest(desc);
irq_settings_set_nothread(desc);
irq_startup(desc, true);
}
out:
irq_put_desc_busunlock(desc, flags);
}
EXPORT_SYMBOL_GPL(__irq_set_handler);
void
irq_set_chip_and_handler_name(unsigned int irq, struct irq_chip *chip,
irq_flow_handler_t handle, const char *name)
{
irq_set_chip(irq, chip);
__irq_set_handler(irq, handle, 0, name);
}
EXPORT_SYMBOL_GPL(irq_set_chip_and_handler_name);
void irq_modify_status(unsigned int irq, unsigned long clr, unsigned long set)
{
unsigned long flags;
struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
if (!desc)
return;
irq_settings_clr_and_set(desc, clr, set);
irqd_clear(&desc->irq_data, IRQD_NO_BALANCING | IRQD_PER_CPU |
IRQD_TRIGGER_MASK | IRQD_LEVEL | IRQD_MOVE_PCNTXT);
if (irq_settings_has_no_balance_set(desc))
irqd_set(&desc->irq_data, IRQD_NO_BALANCING);
if (irq_settings_is_per_cpu(desc))
irqd_set(&desc->irq_data, IRQD_PER_CPU);
if (irq_settings_can_move_pcntxt(desc))
irqd_set(&desc->irq_data, IRQD_MOVE_PCNTXT);
if (irq_settings_is_level(desc))
irqd_set(&desc->irq_data, IRQD_LEVEL);
irqd_set(&desc->irq_data, irq_settings_get_trigger_mask(desc));
irq_put_desc_unlock(desc, flags);
}
EXPORT_SYMBOL_GPL(irq_modify_status);
/**
* irq_cpu_online - Invoke all irq_cpu_online functions.
*
* Iterate through all irqs and invoke the chip.irq_cpu_online()
* for each.
*/
void irq_cpu_online(void)
{
struct irq_desc *desc;
struct irq_chip *chip;
unsigned long flags;
unsigned int irq;
for_each_active_irq(irq) {
desc = irq_to_desc(irq);
if (!desc)
continue;
raw_spin_lock_irqsave(&desc->lock, flags);
chip = irq_data_get_irq_chip(&desc->irq_data);
if (chip && chip->irq_cpu_online &&
(!(chip->flags & IRQCHIP_ONOFFLINE_ENABLED) ||
!irqd_irq_disabled(&desc->irq_data)))
chip->irq_cpu_online(&desc->irq_data);
raw_spin_unlock_irqrestore(&desc->lock, flags);
}
}
/**
* irq_cpu_offline - Invoke all irq_cpu_offline functions.
*
* Iterate through all irqs and invoke the chip.irq_cpu_offline()
* for each.
*/
void irq_cpu_offline(void)
{
struct irq_desc *desc;
struct irq_chip *chip;
unsigned long flags;
unsigned int irq;
for_each_active_irq(irq) {
desc = irq_to_desc(irq);
if (!desc)
continue;
raw_spin_lock_irqsave(&desc->lock, flags);
chip = irq_data_get_irq_chip(&desc->irq_data);
if (chip && chip->irq_cpu_offline &&
(!(chip->flags & IRQCHIP_ONOFFLINE_ENABLED) ||
!irqd_irq_disabled(&desc->irq_data)))
chip->irq_cpu_offline(&desc->irq_data);
raw_spin_unlock_irqrestore(&desc->lock, flags);
}
}
#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
/**
* irq_chip_ack_parent - Acknowledge the parent interrupt
* @data: Pointer to interrupt specific data
*/
void irq_chip_ack_parent(struct irq_data *data)
{
data = data->parent_data;
data->chip->irq_ack(data);
}
/**
* irq_chip_mask_parent - Mask the parent interrupt
* @data: Pointer to interrupt specific data
*/
void irq_chip_mask_parent(struct irq_data *data)
{
data = data->parent_data;
data->chip->irq_mask(data);
}
/**
* irq_chip_unmask_parent - Unmask the parent interrupt
* @data: Pointer to interrupt specific data
*/
void irq_chip_unmask_parent(struct irq_data *data)
{
data = data->parent_data;
data->chip->irq_unmask(data);
}
/**
* irq_chip_eoi_parent - Invoke EOI on the parent interrupt
* @data: Pointer to interrupt specific data
*/
void irq_chip_eoi_parent(struct irq_data *data)
{
data = data->parent_data;
data->chip->irq_eoi(data);
}
/**
* irq_chip_set_affinity_parent - Set affinity on the parent interrupt
* @data: Pointer to interrupt specific data
* @dest: The affinity mask to set
* @force: Flag to enforce setting (disable online checks)
*
* Conditinal, as the underlying parent chip might not implement it.
*/
int irq_chip_set_affinity_parent(struct irq_data *data,
const struct cpumask *dest, bool force)
{
data = data->parent_data;
if (data->chip->irq_set_affinity)
return data->chip->irq_set_affinity(data, dest, force);
return -ENOSYS;
}
/**
* irq_chip_retrigger_hierarchy - Retrigger an interrupt in hardware
* @data: Pointer to interrupt specific data
*
* Iterate through the domain hierarchy of the interrupt and check
* whether a hw retrigger function exists. If yes, invoke it.
*/
int irq_chip_retrigger_hierarchy(struct irq_data *data)
{
for (data = data->parent_data; data; data = data->parent_data)
if (data->chip && data->chip->irq_retrigger)
return data->chip->irq_retrigger(data);
return -ENOSYS;
}
/**
* irq_chip_set_wake_parent - Set/reset wake-up on the parent interrupt
* @data: Pointer to interrupt specific data
* @on: Whether to set or reset the wake-up capability of this irq
*
* Conditional, as the underlying parent chip might not implement it.
*/
int irq_chip_set_wake_parent(struct irq_data *data, unsigned int on)
{
data = data->parent_data;
if (data->chip->irq_set_wake)
return data->chip->irq_set_wake(data, on);
return -ENOSYS;
}
#endif
/**
* irq_chip_compose_msi_msg - Componse msi message for a irq chip
* @data: Pointer to interrupt specific data
* @msg: Pointer to the MSI message
*
* For hierarchical domains we find the first chip in the hierarchy
* which implements the irq_compose_msi_msg callback. For non
* hierarchical we use the top level chip.
*/
int irq_chip_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
{
struct irq_data *pos = NULL;
#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
for (; data; data = data->parent_data)
#endif
if (data->chip && data->chip->irq_compose_msi_msg)
pos = data;
if (!pos)
return -ENOSYS;
pos->chip->irq_compose_msi_msg(pos, msg);
return 0;
}
| gpl-2.0 |
bigzz/linux | arch/arm/mach-orion5x/tsx09-common.c | 151 | 2553 | /*
* QNAP TS-x09 Boards common functions
*
* Maintainers: Lennert Buytenhek <buytenh@marvell.com>
* Byron Bradley <byron.bbradley@gmail.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/mv643xx_eth.h>
#include <linux/timex.h>
#include <linux/serial_reg.h>
#include <mach/orion5x.h>
#include "tsx09-common.h"
#include "common.h"
/*****************************************************************************
* QNAP TS-x09 specific power off method via UART1-attached PIC
****************************************************************************/
#define UART1_REG(x) (UART1_VIRT_BASE + ((UART_##x) << 2))
void qnap_tsx09_power_off(void)
{
/* 19200 baud divisor */
const unsigned divisor = ((orion5x_tclk + (8 * 19200)) / (16 * 19200));
pr_info("%s: triggering power-off...\n", __func__);
/* hijack uart1 and reset into sane state (19200,8n1) */
writel(0x83, UART1_REG(LCR));
writel(divisor & 0xff, UART1_REG(DLL));
writel((divisor >> 8) & 0xff, UART1_REG(DLM));
writel(0x03, UART1_REG(LCR));
writel(0x00, UART1_REG(IER));
writel(0x00, UART1_REG(FCR));
writel(0x00, UART1_REG(MCR));
/* send the power-off command 'A' to PIC */
writel('A', UART1_REG(TX));
}
/*****************************************************************************
* Ethernet
****************************************************************************/
struct mv643xx_eth_platform_data qnap_tsx09_eth_data = {
.phy_addr = MV643XX_ETH_PHY_ADDR(8),
};
static int __init qnap_tsx09_check_mac_addr(const char *addr_str)
{
u_int8_t addr[6];
if (!mac_pton(addr_str, addr))
return -1;
printk(KERN_INFO "tsx09: found ethernet mac address %pM\n", addr);
memcpy(qnap_tsx09_eth_data.mac_addr, addr, 6);
return 0;
}
/*
* The 'NAS Config' flash partition has an ext2 filesystem which
* contains a file that has the ethernet MAC address in plain text
* (format "xx:xx:xx:xx:xx:xx\n").
*/
void __init qnap_tsx09_find_mac_addr(u32 mem_base, u32 size)
{
unsigned long addr;
for (addr = mem_base; addr < (mem_base + size); addr += 1024) {
void __iomem *nor_page;
int ret = 0;
nor_page = ioremap(addr, 1024);
if (nor_page != NULL) {
ret = qnap_tsx09_check_mac_addr((__force const char *)nor_page);
iounmap(nor_page);
}
if (ret == 0)
break;
}
}
| gpl-2.0 |
mangelajo/linux-2.6.28.2-lpc313x-nbee | arch/x86/boot/compressed/misc.c | 151 | 11235 | /*
* misc.c
*
* This is a collection of several routines from gzip-1.0.3
* adapted for Linux.
*
* malloc by Hannu Savolainen 1993 and Matthias Urlichs 1994
* puts by Nick Holloway 1993, better puts by Martin Mares 1995
* High loaded stuff by Hans Lermen & Werner Almesberger, Feb. 1996
*/
/*
* we have to be careful, because no indirections are allowed here, and
* paravirt_ops is a kind of one. As it will only run in baremetal anyway,
* we just keep it from happening
*/
#undef CONFIG_PARAVIRT
#ifdef CONFIG_X86_32
#define _ASM_X86_DESC_H 1
#endif
#ifdef CONFIG_X86_64
#define _LINUX_STRING_H_ 1
#define __LINUX_BITMAP_H 1
#endif
#include <linux/linkage.h>
#include <linux/screen_info.h>
#include <linux/elf.h>
#include <linux/io.h>
#include <asm/page.h>
#include <asm/boot.h>
#include <asm/bootparam.h>
/* WARNING!!
* This code is compiled with -fPIC and it is relocated dynamically
* at run time, but no relocation processing is performed.
* This means that it is not safe to place pointers in static structures.
*/
/*
* Getting to provable safe in place decompression is hard.
* Worst case behaviours need to be analyzed.
* Background information:
*
* The file layout is:
* magic[2]
* method[1]
* flags[1]
* timestamp[4]
* extraflags[1]
* os[1]
* compressed data blocks[N]
* crc[4] orig_len[4]
*
* resulting in 18 bytes of non compressed data overhead.
*
* Files divided into blocks
* 1 bit (last block flag)
* 2 bits (block type)
*
* 1 block occurs every 32K -1 bytes or when there 50% compression
* has been achieved. The smallest block type encoding is always used.
*
* stored:
* 32 bits length in bytes.
*
* fixed:
* magic fixed tree.
* symbols.
*
* dynamic:
* dynamic tree encoding.
* symbols.
*
*
* The buffer for decompression in place is the length of the
* uncompressed data, plus a small amount extra to keep the algorithm safe.
* The compressed data is placed at the end of the buffer. The output
* pointer is placed at the start of the buffer and the input pointer
* is placed where the compressed data starts. Problems will occur
* when the output pointer overruns the input pointer.
*
* The output pointer can only overrun the input pointer if the input
* pointer is moving faster than the output pointer. A condition only
* triggered by data whose compressed form is larger than the uncompressed
* form.
*
* The worst case at the block level is a growth of the compressed data
* of 5 bytes per 32767 bytes.
*
* The worst case internal to a compressed block is very hard to figure.
* The worst case can at least be boundined by having one bit that represents
* 32764 bytes and then all of the rest of the bytes representing the very
* very last byte.
*
* All of which is enough to compute an amount of extra data that is required
* to be safe. To avoid problems at the block level allocating 5 extra bytes
* per 32767 bytes of data is sufficient. To avoind problems internal to a
* block adding an extra 32767 bytes (the worst case uncompressed block size)
* is sufficient, to ensure that in the worst case the decompressed data for
* block will stop the byte before the compressed data for a block begins.
* To avoid problems with the compressed data's meta information an extra 18
* bytes are needed. Leading to the formula:
*
* extra_bytes = (uncompressed_size >> 12) + 32768 + 18 + decompressor_size.
*
* Adding 8 bytes per 32K is a bit excessive but much easier to calculate.
* Adding 32768 instead of 32767 just makes for round numbers.
* Adding the decompressor_size is necessary as it musht live after all
* of the data as well. Last I measured the decompressor is about 14K.
* 10K of actual data and 4K of bss.
*
*/
/*
* gzip declarations
*/
#define OF(args) args
#define STATIC static
#undef memset
#undef memcpy
#define memzero(s, n) memset((s), 0, (n))
typedef unsigned char uch;
typedef unsigned short ush;
typedef unsigned long ulg;
/*
* Window size must be at least 32k, and a power of two.
* We don't actually have a window just a huge output buffer,
* so we report a 2G window size, as that should always be
* larger than our output buffer:
*/
#define WSIZE 0x80000000
/* Input buffer: */
static unsigned char *inbuf;
/* Sliding window buffer (and final output buffer): */
static unsigned char *window;
/* Valid bytes in inbuf: */
static unsigned insize;
/* Index of next byte to be processed in inbuf: */
static unsigned inptr;
/* Bytes in output buffer: */
static unsigned outcnt;
/* gzip flag byte */
#define ASCII_FLAG 0x01 /* bit 0 set: file probably ASCII text */
#define CONTINUATION 0x02 /* bit 1 set: continuation of multi-part gz file */
#define EXTRA_FIELD 0x04 /* bit 2 set: extra field present */
#define ORIG_NAM 0x08 /* bit 3 set: original file name present */
#define COMMENT 0x10 /* bit 4 set: file comment present */
#define ENCRYPTED 0x20 /* bit 5 set: file is encrypted */
#define RESERVED 0xC0 /* bit 6, 7: reserved */
#define get_byte() (inptr < insize ? inbuf[inptr++] : fill_inbuf())
/* Diagnostic functions */
#ifdef DEBUG
# define Assert(cond, msg) do { if (!(cond)) error(msg); } while (0)
# define Trace(x) do { fprintf x; } while (0)
# define Tracev(x) do { if (verbose) fprintf x ; } while (0)
# define Tracevv(x) do { if (verbose > 1) fprintf x ; } while (0)
# define Tracec(c, x) do { if (verbose && (c)) fprintf x ; } while (0)
# define Tracecv(c, x) do { if (verbose > 1 && (c)) fprintf x ; } while (0)
#else
# define Assert(cond, msg)
# define Trace(x)
# define Tracev(x)
# define Tracevv(x)
# define Tracec(c, x)
# define Tracecv(c, x)
#endif
static int fill_inbuf(void);
static void flush_window(void);
static void error(char *m);
/*
* This is set up by the setup-routine at boot-time
*/
static struct boot_params *real_mode; /* Pointer to real-mode data */
static int quiet;
extern unsigned char input_data[];
extern int input_len;
static long bytes_out;
static void *memset(void *s, int c, unsigned n);
static void *memcpy(void *dest, const void *src, unsigned n);
static void __putstr(int, const char *);
#define putstr(__x) __putstr(0, __x)
#ifdef CONFIG_X86_64
#define memptr long
#else
#define memptr unsigned
#endif
static memptr free_mem_ptr;
static memptr free_mem_end_ptr;
static char *vidmem;
static int vidport;
static int lines, cols;
#include "../../../../lib/inflate.c"
static void scroll(void)
{
int i;
memcpy(vidmem, vidmem + cols * 2, (lines - 1) * cols * 2);
for (i = (lines - 1) * cols * 2; i < lines * cols * 2; i += 2)
vidmem[i] = ' ';
}
static void __putstr(int error, const char *s)
{
int x, y, pos;
char c;
#ifndef CONFIG_X86_VERBOSE_BOOTUP
if (!error)
return;
#endif
#ifdef CONFIG_X86_32
if (real_mode->screen_info.orig_video_mode == 0 &&
lines == 0 && cols == 0)
return;
#endif
x = real_mode->screen_info.orig_x;
y = real_mode->screen_info.orig_y;
while ((c = *s++) != '\0') {
if (c == '\n') {
x = 0;
if (++y >= lines) {
scroll();
y--;
}
} else {
vidmem[(x + cols * y) * 2] = c;
if (++x >= cols) {
x = 0;
if (++y >= lines) {
scroll();
y--;
}
}
}
}
real_mode->screen_info.orig_x = x;
real_mode->screen_info.orig_y = y;
pos = (x + cols * y) * 2; /* Update cursor position */
outb(14, vidport);
outb(0xff & (pos >> 9), vidport+1);
outb(15, vidport);
outb(0xff & (pos >> 1), vidport+1);
}
static void *memset(void *s, int c, unsigned n)
{
int i;
char *ss = s;
for (i = 0; i < n; i++)
ss[i] = c;
return s;
}
static void *memcpy(void *dest, const void *src, unsigned n)
{
int i;
const char *s = src;
char *d = dest;
for (i = 0; i < n; i++)
d[i] = s[i];
return dest;
}
/* ===========================================================================
* Fill the input buffer. This is called only when the buffer is empty
* and at least one byte is really needed.
*/
static int fill_inbuf(void)
{
error("ran out of input data");
return 0;
}
/* ===========================================================================
* Write the output window window[0..outcnt-1] and update crc and bytes_out.
* (Used for the decompressed data only.)
*/
static void flush_window(void)
{
/* With my window equal to my output buffer
* I only need to compute the crc here.
*/
unsigned long c = crc; /* temporary variable */
unsigned n;
unsigned char *in, ch;
in = window;
for (n = 0; n < outcnt; n++) {
ch = *in++;
c = crc_32_tab[((int)c ^ ch) & 0xff] ^ (c >> 8);
}
crc = c;
bytes_out += (unsigned long)outcnt;
outcnt = 0;
}
static void error(char *x)
{
__putstr(1, "\n\n");
__putstr(1, x);
__putstr(1, "\n\n -- System halted");
while (1)
asm("hlt");
}
static void parse_elf(void *output)
{
#ifdef CONFIG_X86_64
Elf64_Ehdr ehdr;
Elf64_Phdr *phdrs, *phdr;
#else
Elf32_Ehdr ehdr;
Elf32_Phdr *phdrs, *phdr;
#endif
void *dest;
int i;
memcpy(&ehdr, output, sizeof(ehdr));
if (ehdr.e_ident[EI_MAG0] != ELFMAG0 ||
ehdr.e_ident[EI_MAG1] != ELFMAG1 ||
ehdr.e_ident[EI_MAG2] != ELFMAG2 ||
ehdr.e_ident[EI_MAG3] != ELFMAG3) {
error("Kernel is not a valid ELF file");
return;
}
if (!quiet)
putstr("Parsing ELF... ");
phdrs = malloc(sizeof(*phdrs) * ehdr.e_phnum);
if (!phdrs)
error("Failed to allocate space for phdrs");
memcpy(phdrs, output + ehdr.e_phoff, sizeof(*phdrs) * ehdr.e_phnum);
for (i = 0; i < ehdr.e_phnum; i++) {
phdr = &phdrs[i];
switch (phdr->p_type) {
case PT_LOAD:
#ifdef CONFIG_RELOCATABLE
dest = output;
dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR);
#else
dest = (void *)(phdr->p_paddr);
#endif
memcpy(dest,
output + phdr->p_offset,
phdr->p_filesz);
break;
default: /* Ignore other PT_* */ break;
}
}
}
asmlinkage void decompress_kernel(void *rmode, memptr heap,
unsigned char *input_data,
unsigned long input_len,
unsigned char *output)
{
real_mode = rmode;
if (real_mode->hdr.loadflags & QUIET_FLAG)
quiet = 1;
if (real_mode->screen_info.orig_video_mode == 7) {
vidmem = (char *) 0xb0000;
vidport = 0x3b4;
} else {
vidmem = (char *) 0xb8000;
vidport = 0x3d4;
}
lines = real_mode->screen_info.orig_video_lines;
cols = real_mode->screen_info.orig_video_cols;
window = output; /* Output buffer (Normally at 1M) */
free_mem_ptr = heap; /* Heap */
free_mem_end_ptr = heap + BOOT_HEAP_SIZE;
inbuf = input_data; /* Input buffer */
insize = input_len;
inptr = 0;
#ifdef CONFIG_X86_64
if ((unsigned long)output & (__KERNEL_ALIGN - 1))
error("Destination address not 2M aligned");
if ((unsigned long)output >= 0xffffffffffUL)
error("Destination address too large");
#else
if ((u32)output & (CONFIG_PHYSICAL_ALIGN - 1))
error("Destination address not CONFIG_PHYSICAL_ALIGN aligned");
if (heap > ((-__PAGE_OFFSET-(512<<20)-1) & 0x7fffffff))
error("Destination address too large");
#ifndef CONFIG_RELOCATABLE
if ((u32)output != LOAD_PHYSICAL_ADDR)
error("Wrong destination address");
#endif
#endif
makecrc();
if (!quiet)
putstr("\nDecompressing Linux... ");
gunzip();
parse_elf(output);
if (!quiet)
putstr("done.\nBooting the kernel.\n");
return;
}
| gpl-2.0 |
zCraig/android_kernel_acer_tf30 | drivers/tty/serial/msm_smd_tty.c | 151 | 5679 | /* drivers/tty/serial/msm_smd_tty.c
*
* Copyright (C) 2007 Google, Inc.
* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
* Author: Brian Swetland <swetland@google.com>
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <linux/module.h>
#include <linux/fs.h>
#include <linux/cdev.h>
#include <linux/device.h>
#include <linux/wait.h>
#include <linux/tty.h>
#include <linux/tty_driver.h>
#include <linux/tty_flip.h>
#include <mach/msm_smd.h>
#define MAX_SMD_TTYS 32
struct smd_tty_info {
struct tty_port port;
smd_channel_t *ch;
};
struct smd_tty_channel_desc {
int id;
const char *name;
};
static struct smd_tty_info smd_tty[MAX_SMD_TTYS];
static const struct smd_tty_channel_desc smd_default_tty_channels[] = {
{ .id = 0, .name = "SMD_DS" },
{ .id = 27, .name = "SMD_GPSNMEA" },
};
static const struct smd_tty_channel_desc *smd_tty_channels =
smd_default_tty_channels;
static int smd_tty_channels_len = ARRAY_SIZE(smd_default_tty_channels);
static void smd_tty_notify(void *priv, unsigned event)
{
unsigned char *ptr;
int avail;
struct smd_tty_info *info = priv;
struct tty_struct *tty;
if (event != SMD_EVENT_DATA)
return;
tty = tty_port_tty_get(&info->port);
if (!tty)
return;
for (;;) {
if (test_bit(TTY_THROTTLED, &tty->flags))
break;
avail = smd_read_avail(info->ch);
if (avail == 0)
break;
avail = tty_prepare_flip_string(tty, &ptr, avail);
if (smd_read(info->ch, ptr, avail) != avail) {
/* shouldn't be possible since we're in interrupt
** context here and nobody else could 'steal' our
** characters.
*/
pr_err("OOPS - smd_tty_buffer mismatch?!");
}
tty_flip_buffer_push(tty);
}
/* XXX only when writable and necessary */
tty_wakeup(tty);
tty_kref_put(tty);
}
static int smd_tty_port_activate(struct tty_port *tport, struct tty_struct *tty)
{
int i, res = 0;
int n = tty->index;
const char *name = NULL;
struct smd_tty_info *info = smd_tty + n;
for (i = 0; i < smd_tty_channels_len; i++) {
if (smd_tty_channels[i].id == n) {
name = smd_tty_channels[i].name;
break;
}
}
if (!name)
return -ENODEV;
if (info->ch)
smd_kick(info->ch);
else
res = smd_open(name, &info->ch, info, smd_tty_notify);
if (!res)
tty->driver_data = info;
return res;
}
static void smd_tty_port_shutdown(struct tty_port *tport)
{
struct smd_tty_info *info;
struct tty_struct *tty = tty_port_tty_get(tport);
info = tty->driver_data;
if (info->ch) {
smd_close(info->ch);
info->ch = 0;
}
tty->driver_data = 0;
tty_kref_put(tty);
}
static int smd_tty_open(struct tty_struct *tty, struct file *f)
{
struct smd_tty_info *info = smd_tty + tty->index;
return tty_port_open(&info->port, tty, f);
}
static void smd_tty_close(struct tty_struct *tty, struct file *f)
{
struct smd_tty_info *info = tty->driver_data;
tty_port_close(&info->port, tty, f);
}
static int smd_tty_write(struct tty_struct *tty,
const unsigned char *buf, int len)
{
struct smd_tty_info *info = tty->driver_data;
int avail;
/* if we're writing to a packet channel we will
** never be able to write more data than there
** is currently space for
*/
avail = smd_write_avail(info->ch);
if (len > avail)
len = avail;
return smd_write(info->ch, buf, len);
}
static int smd_tty_write_room(struct tty_struct *tty)
{
struct smd_tty_info *info = tty->driver_data;
return smd_write_avail(info->ch);
}
static int smd_tty_chars_in_buffer(struct tty_struct *tty)
{
struct smd_tty_info *info = tty->driver_data;
return smd_read_avail(info->ch);
}
static void smd_tty_unthrottle(struct tty_struct *tty)
{
struct smd_tty_info *info = tty->driver_data;
smd_kick(info->ch);
}
static const struct tty_port_operations smd_tty_port_ops = {
.shutdown = smd_tty_port_shutdown,
.activate = smd_tty_port_activate,
};
static const struct tty_operations smd_tty_ops = {
.open = smd_tty_open,
.close = smd_tty_close,
.write = smd_tty_write,
.write_room = smd_tty_write_room,
.chars_in_buffer = smd_tty_chars_in_buffer,
.unthrottle = smd_tty_unthrottle,
};
static struct tty_driver *smd_tty_driver;
static int __init smd_tty_init(void)
{
int ret, i;
smd_tty_driver = alloc_tty_driver(MAX_SMD_TTYS);
if (smd_tty_driver == 0)
return -ENOMEM;
smd_tty_driver->owner = THIS_MODULE;
smd_tty_driver->driver_name = "smd_tty_driver";
smd_tty_driver->name = "smd";
smd_tty_driver->major = 0;
smd_tty_driver->minor_start = 0;
smd_tty_driver->type = TTY_DRIVER_TYPE_SERIAL;
smd_tty_driver->subtype = SERIAL_TYPE_NORMAL;
smd_tty_driver->init_termios = tty_std_termios;
smd_tty_driver->init_termios.c_iflag = 0;
smd_tty_driver->init_termios.c_oflag = 0;
smd_tty_driver->init_termios.c_cflag = B38400 | CS8 | CREAD;
smd_tty_driver->init_termios.c_lflag = 0;
smd_tty_driver->flags = TTY_DRIVER_RESET_TERMIOS |
TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV;
tty_set_operations(smd_tty_driver, &smd_tty_ops);
ret = tty_register_driver(smd_tty_driver);
if (ret)
return ret;
for (i = 0; i < smd_tty_channels_len; i++) {
tty_port_init(&smd_tty[smd_tty_channels[i].id].port);
smd_tty[smd_tty_channels[i].id].port.ops = &smd_tty_port_ops;
tty_register_device(smd_tty_driver, smd_tty_channels[i].id, 0);
}
return 0;
}
module_init(smd_tty_init);
| gpl-2.0 |
OptiPop/kernel_asus_grouper | drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c | 407 | 53889 | /******************************************************************************
*
* Copyright(c) 2009-2010 Realtek Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
*
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
*
* Contact Information:
* wlanfae <wlanfae@realtek.com>
* Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
* Hsinchu 300, Taiwan.
*
* Larry Finger <Larry.Finger@lwfinger.net>
*
*****************************************************************************/
#include "dm_common.h"
#include "phy_common.h"
#include "../pci.h"
#include "../base.h"
struct dig_t dm_digtable;
static struct ps_t dm_pstable;
#define BT_RSSI_STATE_NORMAL_POWER BIT_OFFSET_LEN_MASK_32(0, 1)
#define BT_RSSI_STATE_AMDPU_OFF BIT_OFFSET_LEN_MASK_32(1, 1)
#define BT_RSSI_STATE_SPECIAL_LOW BIT_OFFSET_LEN_MASK_32(2, 1)
#define BT_RSSI_STATE_BG_EDCA_LOW BIT_OFFSET_LEN_MASK_32(3, 1)
#define BT_RSSI_STATE_TXPOWER_LOW BIT_OFFSET_LEN_MASK_32(4, 1)
#define RTLPRIV (struct rtl_priv *)
#define GET_UNDECORATED_AVERAGE_RSSI(_priv) \
((RTLPRIV(_priv))->mac80211.opmode == \
NL80211_IFTYPE_ADHOC) ? \
((RTLPRIV(_priv))->dm.entry_min_undecoratedsmoothed_pwdb) : \
((RTLPRIV(_priv))->dm.undecorated_smoothed_pwdb)
static const u32 ofdmswing_table[OFDM_TABLE_SIZE] = {
0x7f8001fe,
0x788001e2,
0x71c001c7,
0x6b8001ae,
0x65400195,
0x5fc0017f,
0x5a400169,
0x55400155,
0x50800142,
0x4c000130,
0x47c0011f,
0x43c0010f,
0x40000100,
0x3c8000f2,
0x390000e4,
0x35c000d7,
0x32c000cb,
0x300000c0,
0x2d4000b5,
0x2ac000ab,
0x288000a2,
0x26000098,
0x24000090,
0x22000088,
0x20000080,
0x1e400079,
0x1c800072,
0x1b00006c,
0x19800066,
0x18000060,
0x16c0005b,
0x15800056,
0x14400051,
0x1300004c,
0x12000048,
0x11000044,
0x10000040,
};
static const u8 cckswing_table_ch1ch13[CCK_TABLE_SIZE][8] = {
{0x36, 0x35, 0x2e, 0x25, 0x1c, 0x12, 0x09, 0x04},
{0x33, 0x32, 0x2b, 0x23, 0x1a, 0x11, 0x08, 0x04},
{0x30, 0x2f, 0x29, 0x21, 0x19, 0x10, 0x08, 0x03},
{0x2d, 0x2d, 0x27, 0x1f, 0x18, 0x0f, 0x08, 0x03},
{0x2b, 0x2a, 0x25, 0x1e, 0x16, 0x0e, 0x07, 0x03},
{0x28, 0x28, 0x22, 0x1c, 0x15, 0x0d, 0x07, 0x03},
{0x26, 0x25, 0x21, 0x1b, 0x14, 0x0d, 0x06, 0x03},
{0x24, 0x23, 0x1f, 0x19, 0x13, 0x0c, 0x06, 0x03},
{0x22, 0x21, 0x1d, 0x18, 0x11, 0x0b, 0x06, 0x02},
{0x20, 0x20, 0x1b, 0x16, 0x11, 0x08, 0x05, 0x02},
{0x1f, 0x1e, 0x1a, 0x15, 0x10, 0x0a, 0x05, 0x02},
{0x1d, 0x1c, 0x18, 0x14, 0x0f, 0x0a, 0x05, 0x02},
{0x1b, 0x1a, 0x17, 0x13, 0x0e, 0x09, 0x04, 0x02},
{0x1a, 0x19, 0x16, 0x12, 0x0d, 0x09, 0x04, 0x02},
{0x18, 0x17, 0x15, 0x11, 0x0c, 0x08, 0x04, 0x02},
{0x17, 0x16, 0x13, 0x10, 0x0c, 0x08, 0x04, 0x02},
{0x16, 0x15, 0x12, 0x0f, 0x0b, 0x07, 0x04, 0x01},
{0x14, 0x14, 0x11, 0x0e, 0x0b, 0x07, 0x03, 0x02},
{0x13, 0x13, 0x10, 0x0d, 0x0a, 0x06, 0x03, 0x01},
{0x12, 0x12, 0x0f, 0x0c, 0x09, 0x06, 0x03, 0x01},
{0x11, 0x11, 0x0f, 0x0c, 0x09, 0x06, 0x03, 0x01},
{0x10, 0x10, 0x0e, 0x0b, 0x08, 0x05, 0x03, 0x01},
{0x0f, 0x0f, 0x0d, 0x0b, 0x08, 0x05, 0x03, 0x01},
{0x0e, 0x0e, 0x0c, 0x0a, 0x08, 0x05, 0x02, 0x01},
{0x0d, 0x0d, 0x0c, 0x0a, 0x07, 0x05, 0x02, 0x01},
{0x0d, 0x0c, 0x0b, 0x09, 0x07, 0x04, 0x02, 0x01},
{0x0c, 0x0c, 0x0a, 0x09, 0x06, 0x04, 0x02, 0x01},
{0x0b, 0x0b, 0x0a, 0x08, 0x06, 0x04, 0x02, 0x01},
{0x0b, 0x0a, 0x09, 0x08, 0x06, 0x04, 0x02, 0x01},
{0x0a, 0x0a, 0x09, 0x07, 0x05, 0x03, 0x02, 0x01},
{0x0a, 0x09, 0x08, 0x07, 0x05, 0x03, 0x02, 0x01},
{0x09, 0x09, 0x08, 0x06, 0x05, 0x03, 0x01, 0x01},
{0x09, 0x08, 0x07, 0x06, 0x04, 0x03, 0x01, 0x01}
};
static const u8 cckswing_table_ch14[CCK_TABLE_SIZE][8] = {
{0x36, 0x35, 0x2e, 0x1b, 0x00, 0x00, 0x00, 0x00},
{0x33, 0x32, 0x2b, 0x19, 0x00, 0x00, 0x00, 0x00},
{0x30, 0x2f, 0x29, 0x18, 0x00, 0x00, 0x00, 0x00},
{0x2d, 0x2d, 0x17, 0x17, 0x00, 0x00, 0x00, 0x00},
{0x2b, 0x2a, 0x25, 0x15, 0x00, 0x00, 0x00, 0x00},
{0x28, 0x28, 0x24, 0x14, 0x00, 0x00, 0x00, 0x00},
{0x26, 0x25, 0x21, 0x13, 0x00, 0x00, 0x00, 0x00},
{0x24, 0x23, 0x1f, 0x12, 0x00, 0x00, 0x00, 0x00},
{0x22, 0x21, 0x1d, 0x11, 0x00, 0x00, 0x00, 0x00},
{0x20, 0x20, 0x1b, 0x10, 0x00, 0x00, 0x00, 0x00},
{0x1f, 0x1e, 0x1a, 0x0f, 0x00, 0x00, 0x00, 0x00},
{0x1d, 0x1c, 0x18, 0x0e, 0x00, 0x00, 0x00, 0x00},
{0x1b, 0x1a, 0x17, 0x0e, 0x00, 0x00, 0x00, 0x00},
{0x1a, 0x19, 0x16, 0x0d, 0x00, 0x00, 0x00, 0x00},
{0x18, 0x17, 0x15, 0x0c, 0x00, 0x00, 0x00, 0x00},
{0x17, 0x16, 0x13, 0x0b, 0x00, 0x00, 0x00, 0x00},
{0x16, 0x15, 0x12, 0x0b, 0x00, 0x00, 0x00, 0x00},
{0x14, 0x14, 0x11, 0x0a, 0x00, 0x00, 0x00, 0x00},
{0x13, 0x13, 0x10, 0x0a, 0x00, 0x00, 0x00, 0x00},
{0x12, 0x12, 0x0f, 0x09, 0x00, 0x00, 0x00, 0x00},
{0x11, 0x11, 0x0f, 0x09, 0x00, 0x00, 0x00, 0x00},
{0x10, 0x10, 0x0e, 0x08, 0x00, 0x00, 0x00, 0x00},
{0x0f, 0x0f, 0x0d, 0x08, 0x00, 0x00, 0x00, 0x00},
{0x0e, 0x0e, 0x0c, 0x07, 0x00, 0x00, 0x00, 0x00},
{0x0d, 0x0d, 0x0c, 0x07, 0x00, 0x00, 0x00, 0x00},
{0x0d, 0x0c, 0x0b, 0x06, 0x00, 0x00, 0x00, 0x00},
{0x0c, 0x0c, 0x0a, 0x06, 0x00, 0x00, 0x00, 0x00},
{0x0b, 0x0b, 0x0a, 0x06, 0x00, 0x00, 0x00, 0x00},
{0x0b, 0x0a, 0x09, 0x05, 0x00, 0x00, 0x00, 0x00},
{0x0a, 0x0a, 0x09, 0x05, 0x00, 0x00, 0x00, 0x00},
{0x0a, 0x09, 0x08, 0x05, 0x00, 0x00, 0x00, 0x00},
{0x09, 0x09, 0x08, 0x05, 0x00, 0x00, 0x00, 0x00},
{0x09, 0x08, 0x07, 0x04, 0x00, 0x00, 0x00, 0x00}
};
static void rtl92c_dm_diginit(struct ieee80211_hw *hw)
{
dm_digtable.dig_enable_flag = true;
dm_digtable.dig_ext_port_stage = DIG_EXT_PORT_STAGE_MAX;
dm_digtable.cur_igvalue = 0x20;
dm_digtable.pre_igvalue = 0x0;
dm_digtable.cursta_connectctate = DIG_STA_DISCONNECT;
dm_digtable.presta_connectstate = DIG_STA_DISCONNECT;
dm_digtable.curmultista_connectstate = DIG_MULTISTA_DISCONNECT;
dm_digtable.rssi_lowthresh = DM_DIG_THRESH_LOW;
dm_digtable.rssi_highthresh = DM_DIG_THRESH_HIGH;
dm_digtable.fa_lowthresh = DM_FALSEALARM_THRESH_LOW;
dm_digtable.fa_highthresh = DM_FALSEALARM_THRESH_HIGH;
dm_digtable.rx_gain_range_max = DM_DIG_MAX;
dm_digtable.rx_gain_range_min = DM_DIG_MIN;
dm_digtable.backoff_val = DM_DIG_BACKOFF_DEFAULT;
dm_digtable.backoff_val_range_max = DM_DIG_BACKOFF_MAX;
dm_digtable.backoff_val_range_min = DM_DIG_BACKOFF_MIN;
dm_digtable.pre_cck_pd_state = CCK_PD_STAGE_MAX;
dm_digtable.cur_cck_pd_state = CCK_PD_STAGE_MAX;
}
static u8 rtl92c_dm_initial_gain_min_pwdb(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
long rssi_val_min = 0;
if ((dm_digtable.curmultista_connectstate == DIG_MULTISTA_CONNECT) &&
(dm_digtable.cursta_connectctate == DIG_STA_CONNECT)) {
if (rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb != 0)
rssi_val_min =
(rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb >
rtlpriv->dm.undecorated_smoothed_pwdb) ?
rtlpriv->dm.undecorated_smoothed_pwdb :
rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb;
else
rssi_val_min = rtlpriv->dm.undecorated_smoothed_pwdb;
} else if (dm_digtable.cursta_connectctate == DIG_STA_CONNECT ||
dm_digtable.cursta_connectctate == DIG_STA_BEFORE_CONNECT) {
rssi_val_min = rtlpriv->dm.undecorated_smoothed_pwdb;
} else if (dm_digtable.curmultista_connectstate ==
DIG_MULTISTA_CONNECT) {
rssi_val_min = rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb;
}
return (u8) rssi_val_min;
}
static void rtl92c_dm_false_alarm_counter_statistics(struct ieee80211_hw *hw)
{
u32 ret_value;
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct false_alarm_statistics *falsealm_cnt = &(rtlpriv->falsealm_cnt);
ret_value = rtl_get_bbreg(hw, ROFDM_PHYCOUNTER1, MASKDWORD);
falsealm_cnt->cnt_parity_fail = ((ret_value & 0xffff0000) >> 16);
ret_value = rtl_get_bbreg(hw, ROFDM_PHYCOUNTER2, MASKDWORD);
falsealm_cnt->cnt_rate_illegal = (ret_value & 0xffff);
falsealm_cnt->cnt_crc8_fail = ((ret_value & 0xffff0000) >> 16);
ret_value = rtl_get_bbreg(hw, ROFDM_PHYCOUNTER3, MASKDWORD);
falsealm_cnt->cnt_mcs_fail = (ret_value & 0xffff);
falsealm_cnt->cnt_ofdm_fail = falsealm_cnt->cnt_parity_fail +
falsealm_cnt->cnt_rate_illegal +
falsealm_cnt->cnt_crc8_fail + falsealm_cnt->cnt_mcs_fail;
rtl_set_bbreg(hw, RCCK0_FALSEALARMREPORT, BIT(14), 1);
ret_value = rtl_get_bbreg(hw, RCCK0_FACOUNTERLOWER, MASKBYTE0);
falsealm_cnt->cnt_cck_fail = ret_value;
ret_value = rtl_get_bbreg(hw, RCCK0_FACOUNTERUPPER, MASKBYTE3);
falsealm_cnt->cnt_cck_fail += (ret_value & 0xff) << 8;
falsealm_cnt->cnt_all = (falsealm_cnt->cnt_parity_fail +
falsealm_cnt->cnt_rate_illegal +
falsealm_cnt->cnt_crc8_fail +
falsealm_cnt->cnt_mcs_fail +
falsealm_cnt->cnt_cck_fail);
rtl_set_bbreg(hw, ROFDM1_LSTF, 0x08000000, 1);
rtl_set_bbreg(hw, ROFDM1_LSTF, 0x08000000, 0);
rtl_set_bbreg(hw, RCCK0_FALSEALARMREPORT, 0x0000c000, 0);
rtl_set_bbreg(hw, RCCK0_FALSEALARMREPORT, 0x0000c000, 2);
RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE,
("cnt_parity_fail = %d, cnt_rate_illegal = %d, "
"cnt_crc8_fail = %d, cnt_mcs_fail = %d\n",
falsealm_cnt->cnt_parity_fail,
falsealm_cnt->cnt_rate_illegal,
falsealm_cnt->cnt_crc8_fail, falsealm_cnt->cnt_mcs_fail));
RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE,
("cnt_ofdm_fail = %x, cnt_cck_fail = %x, cnt_all = %x\n",
falsealm_cnt->cnt_ofdm_fail,
falsealm_cnt->cnt_cck_fail, falsealm_cnt->cnt_all));
}
static void rtl92c_dm_ctrl_initgain_by_fa(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
u8 value_igi = dm_digtable.cur_igvalue;
if (rtlpriv->falsealm_cnt.cnt_all < DM_DIG_FA_TH0)
value_igi--;
else if (rtlpriv->falsealm_cnt.cnt_all < DM_DIG_FA_TH1)
value_igi += 0;
else if (rtlpriv->falsealm_cnt.cnt_all < DM_DIG_FA_TH2)
value_igi++;
else if (rtlpriv->falsealm_cnt.cnt_all >= DM_DIG_FA_TH2)
value_igi += 2;
if (value_igi > DM_DIG_FA_UPPER)
value_igi = DM_DIG_FA_UPPER;
else if (value_igi < DM_DIG_FA_LOWER)
value_igi = DM_DIG_FA_LOWER;
if (rtlpriv->falsealm_cnt.cnt_all > 10000)
value_igi = 0x32;
dm_digtable.cur_igvalue = value_igi;
rtl92c_dm_write_dig(hw);
}
static void rtl92c_dm_ctrl_initgain_by_rssi(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
if (rtlpriv->falsealm_cnt.cnt_all > dm_digtable.fa_highthresh) {
if ((dm_digtable.backoff_val - 2) <
dm_digtable.backoff_val_range_min)
dm_digtable.backoff_val =
dm_digtable.backoff_val_range_min;
else
dm_digtable.backoff_val -= 2;
} else if (rtlpriv->falsealm_cnt.cnt_all < dm_digtable.fa_lowthresh) {
if ((dm_digtable.backoff_val + 2) >
dm_digtable.backoff_val_range_max)
dm_digtable.backoff_val =
dm_digtable.backoff_val_range_max;
else
dm_digtable.backoff_val += 2;
}
if ((dm_digtable.rssi_val_min + 10 - dm_digtable.backoff_val) >
dm_digtable.rx_gain_range_max)
dm_digtable.cur_igvalue = dm_digtable.rx_gain_range_max;
else if ((dm_digtable.rssi_val_min + 10 -
dm_digtable.backoff_val) < dm_digtable.rx_gain_range_min)
dm_digtable.cur_igvalue = dm_digtable.rx_gain_range_min;
else
dm_digtable.cur_igvalue = dm_digtable.rssi_val_min + 10 -
dm_digtable.backoff_val;
RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE,
("rssi_val_min = %x backoff_val %x\n",
dm_digtable.rssi_val_min, dm_digtable.backoff_val));
rtl92c_dm_write_dig(hw);
}
static void rtl92c_dm_initial_gain_multi_sta(struct ieee80211_hw *hw)
{
static u8 initialized; /* initialized to false */
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
long rssi_strength = rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb;
bool multi_sta = false;
if (mac->opmode == NL80211_IFTYPE_ADHOC)
multi_sta = true;
if ((multi_sta == false) || (dm_digtable.cursta_connectctate !=
DIG_STA_DISCONNECT)) {
initialized = false;
dm_digtable.dig_ext_port_stage = DIG_EXT_PORT_STAGE_MAX;
return;
} else if (initialized == false) {
initialized = true;
dm_digtable.dig_ext_port_stage = DIG_EXT_PORT_STAGE_0;
dm_digtable.cur_igvalue = 0x20;
rtl92c_dm_write_dig(hw);
}
if (dm_digtable.curmultista_connectstate == DIG_MULTISTA_CONNECT) {
if ((rssi_strength < dm_digtable.rssi_lowthresh) &&
(dm_digtable.dig_ext_port_stage != DIG_EXT_PORT_STAGE_1)) {
if (dm_digtable.dig_ext_port_stage ==
DIG_EXT_PORT_STAGE_2) {
dm_digtable.cur_igvalue = 0x20;
rtl92c_dm_write_dig(hw);
}
dm_digtable.dig_ext_port_stage = DIG_EXT_PORT_STAGE_1;
} else if (rssi_strength > dm_digtable.rssi_highthresh) {
dm_digtable.dig_ext_port_stage = DIG_EXT_PORT_STAGE_2;
rtl92c_dm_ctrl_initgain_by_fa(hw);
}
} else if (dm_digtable.dig_ext_port_stage != DIG_EXT_PORT_STAGE_0) {
dm_digtable.dig_ext_port_stage = DIG_EXT_PORT_STAGE_0;
dm_digtable.cur_igvalue = 0x20;
rtl92c_dm_write_dig(hw);
}
RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE,
("curmultista_connectstate = "
"%x dig_ext_port_stage %x\n",
dm_digtable.curmultista_connectstate,
dm_digtable.dig_ext_port_stage));
}
static void rtl92c_dm_initial_gain_sta(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE,
("presta_connectstate = %x,"
" cursta_connectctate = %x\n",
dm_digtable.presta_connectstate,
dm_digtable.cursta_connectctate));
if (dm_digtable.presta_connectstate == dm_digtable.cursta_connectctate
|| dm_digtable.cursta_connectctate == DIG_STA_BEFORE_CONNECT
|| dm_digtable.cursta_connectctate == DIG_STA_CONNECT) {
if (dm_digtable.cursta_connectctate != DIG_STA_DISCONNECT) {
dm_digtable.rssi_val_min =
rtl92c_dm_initial_gain_min_pwdb(hw);
rtl92c_dm_ctrl_initgain_by_rssi(hw);
}
} else {
dm_digtable.rssi_val_min = 0;
dm_digtable.dig_ext_port_stage = DIG_EXT_PORT_STAGE_MAX;
dm_digtable.backoff_val = DM_DIG_BACKOFF_DEFAULT;
dm_digtable.cur_igvalue = 0x20;
dm_digtable.pre_igvalue = 0;
rtl92c_dm_write_dig(hw);
}
}
static void rtl92c_dm_cck_packet_detection_thresh(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
if (dm_digtable.cursta_connectctate == DIG_STA_CONNECT) {
dm_digtable.rssi_val_min = rtl92c_dm_initial_gain_min_pwdb(hw);
if (dm_digtable.pre_cck_pd_state == CCK_PD_STAGE_LowRssi) {
if (dm_digtable.rssi_val_min <= 25)
dm_digtable.cur_cck_pd_state =
CCK_PD_STAGE_LowRssi;
else
dm_digtable.cur_cck_pd_state =
CCK_PD_STAGE_HighRssi;
} else {
if (dm_digtable.rssi_val_min <= 20)
dm_digtable.cur_cck_pd_state =
CCK_PD_STAGE_LowRssi;
else
dm_digtable.cur_cck_pd_state =
CCK_PD_STAGE_HighRssi;
}
} else {
dm_digtable.cur_cck_pd_state = CCK_PD_STAGE_MAX;
}
if (dm_digtable.pre_cck_pd_state != dm_digtable.cur_cck_pd_state) {
if (dm_digtable.cur_cck_pd_state == CCK_PD_STAGE_LowRssi) {
if (rtlpriv->falsealm_cnt.cnt_cck_fail > 800)
dm_digtable.cur_cck_fa_state =
CCK_FA_STAGE_High;
else
dm_digtable.cur_cck_fa_state = CCK_FA_STAGE_Low;
if (dm_digtable.pre_cck_fa_state !=
dm_digtable.cur_cck_fa_state) {
if (dm_digtable.cur_cck_fa_state ==
CCK_FA_STAGE_Low)
rtl_set_bbreg(hw, RCCK0_CCA, MASKBYTE2,
0x83);
else
rtl_set_bbreg(hw, RCCK0_CCA, MASKBYTE2,
0xcd);
dm_digtable.pre_cck_fa_state =
dm_digtable.cur_cck_fa_state;
}
rtl_set_bbreg(hw, RCCK0_SYSTEM, MASKBYTE1, 0x40);
if (IS_92C_SERIAL(rtlhal->version))
rtl_set_bbreg(hw, RCCK0_FALSEALARMREPORT,
MASKBYTE2, 0xd7);
} else {
rtl_set_bbreg(hw, RCCK0_CCA, MASKBYTE2, 0xcd);
rtl_set_bbreg(hw, RCCK0_SYSTEM, MASKBYTE1, 0x47);
if (IS_92C_SERIAL(rtlhal->version))
rtl_set_bbreg(hw, RCCK0_FALSEALARMREPORT,
MASKBYTE2, 0xd3);
}
dm_digtable.pre_cck_pd_state = dm_digtable.cur_cck_pd_state;
}
RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE,
("CCKPDStage=%x\n", dm_digtable.cur_cck_pd_state));
RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE,
("is92C=%x\n", IS_92C_SERIAL(rtlhal->version)));
}
static void rtl92c_dm_ctrl_initgain_by_twoport(struct ieee80211_hw *hw)
{
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
if (mac->act_scanning)
return;
if (mac->link_state >= MAC80211_LINKED)
dm_digtable.cursta_connectctate = DIG_STA_CONNECT;
else
dm_digtable.cursta_connectctate = DIG_STA_DISCONNECT;
rtl92c_dm_initial_gain_sta(hw);
rtl92c_dm_initial_gain_multi_sta(hw);
rtl92c_dm_cck_packet_detection_thresh(hw);
dm_digtable.presta_connectstate = dm_digtable.cursta_connectctate;
}
static void rtl92c_dm_dig(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
if (rtlpriv->dm.dm_initialgain_enable == false)
return;
if (dm_digtable.dig_enable_flag == false)
return;
rtl92c_dm_ctrl_initgain_by_twoport(hw);
}
static void rtl92c_dm_init_dynamic_txpower(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
rtlpriv->dm.dynamic_txpower_enable = false;
rtlpriv->dm.last_dtp_lvl = TXHIGHPWRLEVEL_NORMAL;
rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_NORMAL;
}
void rtl92c_dm_write_dig(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD,
("cur_igvalue = 0x%x, "
"pre_igvalue = 0x%x, backoff_val = %d\n",
dm_digtable.cur_igvalue, dm_digtable.pre_igvalue,
dm_digtable.backoff_val));
if (dm_digtable.pre_igvalue != dm_digtable.cur_igvalue) {
rtl_set_bbreg(hw, ROFDM0_XAAGCCORE1, 0x7f,
dm_digtable.cur_igvalue);
rtl_set_bbreg(hw, ROFDM0_XBAGCCORE1, 0x7f,
dm_digtable.cur_igvalue);
dm_digtable.pre_igvalue = dm_digtable.cur_igvalue;
}
}
EXPORT_SYMBOL(rtl92c_dm_write_dig);
static void rtl92c_dm_pwdb_monitor(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
long tmpentry_max_pwdb = 0, tmpentry_min_pwdb = 0xff;
u8 h2c_parameter[3] = { 0 };
return;
if (tmpentry_max_pwdb != 0) {
rtlpriv->dm.entry_max_undecoratedsmoothed_pwdb =
tmpentry_max_pwdb;
} else {
rtlpriv->dm.entry_max_undecoratedsmoothed_pwdb = 0;
}
if (tmpentry_min_pwdb != 0xff) {
rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb =
tmpentry_min_pwdb;
} else {
rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb = 0;
}
h2c_parameter[2] = (u8) (rtlpriv->dm.undecorated_smoothed_pwdb & 0xFF);
h2c_parameter[0] = 0;
rtl92c_fill_h2c_cmd(hw, H2C_RSSI_REPORT, 3, h2c_parameter);
}
void rtl92c_dm_init_edca_turbo(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
rtlpriv->dm.current_turbo_edca = false;
rtlpriv->dm.is_any_nonbepkts = false;
rtlpriv->dm.is_cur_rdlstate = false;
}
EXPORT_SYMBOL(rtl92c_dm_init_edca_turbo);
static void rtl92c_dm_check_edca_turbo(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
static u64 last_txok_cnt;
static u64 last_rxok_cnt;
static u32 last_bt_edca_ul;
static u32 last_bt_edca_dl;
u64 cur_txok_cnt = 0;
u64 cur_rxok_cnt = 0;
u32 edca_be_ul = 0x5ea42b;
u32 edca_be_dl = 0x5ea42b;
bool bt_change_edca = false;
if ((last_bt_edca_ul != rtlpcipriv->bt_coexist.bt_edca_ul) ||
(last_bt_edca_dl != rtlpcipriv->bt_coexist.bt_edca_dl)) {
rtlpriv->dm.current_turbo_edca = false;
last_bt_edca_ul = rtlpcipriv->bt_coexist.bt_edca_ul;
last_bt_edca_dl = rtlpcipriv->bt_coexist.bt_edca_dl;
}
if (rtlpcipriv->bt_coexist.bt_edca_ul != 0) {
edca_be_ul = rtlpcipriv->bt_coexist.bt_edca_ul;
bt_change_edca = true;
}
if (rtlpcipriv->bt_coexist.bt_edca_dl != 0) {
edca_be_ul = rtlpcipriv->bt_coexist.bt_edca_dl;
bt_change_edca = true;
}
if (mac->link_state != MAC80211_LINKED) {
rtlpriv->dm.current_turbo_edca = false;
return;
}
if ((!mac->ht_enable) && (!rtlpcipriv->bt_coexist.bt_coexistence)) {
if (!(edca_be_ul & 0xffff0000))
edca_be_ul |= 0x005e0000;
if (!(edca_be_dl & 0xffff0000))
edca_be_dl |= 0x005e0000;
}
if ((bt_change_edca) || ((!rtlpriv->dm.is_any_nonbepkts) &&
(!rtlpriv->dm.disable_framebursting))) {
cur_txok_cnt = rtlpriv->stats.txbytesunicast - last_txok_cnt;
cur_rxok_cnt = rtlpriv->stats.rxbytesunicast - last_rxok_cnt;
if (cur_rxok_cnt > 4 * cur_txok_cnt) {
if (!rtlpriv->dm.is_cur_rdlstate ||
!rtlpriv->dm.current_turbo_edca) {
rtl_write_dword(rtlpriv,
REG_EDCA_BE_PARAM,
edca_be_dl);
rtlpriv->dm.is_cur_rdlstate = true;
}
} else {
if (rtlpriv->dm.is_cur_rdlstate ||
!rtlpriv->dm.current_turbo_edca) {
rtl_write_dword(rtlpriv,
REG_EDCA_BE_PARAM,
edca_be_ul);
rtlpriv->dm.is_cur_rdlstate = false;
}
}
rtlpriv->dm.current_turbo_edca = true;
} else {
if (rtlpriv->dm.current_turbo_edca) {
u8 tmp = AC0_BE;
rtlpriv->cfg->ops->set_hw_reg(hw,
HW_VAR_AC_PARAM,
(u8 *) (&tmp));
rtlpriv->dm.current_turbo_edca = false;
}
}
rtlpriv->dm.is_any_nonbepkts = false;
last_txok_cnt = rtlpriv->stats.txbytesunicast;
last_rxok_cnt = rtlpriv->stats.rxbytesunicast;
}
static void rtl92c_dm_txpower_tracking_callback_thermalmeter(struct ieee80211_hw
*hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
struct rtl_phy *rtlphy = &(rtlpriv->phy);
struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
u8 thermalvalue, delta, delta_lck, delta_iqk;
long ele_a, ele_d, temp_cck, val_x, value32;
long val_y, ele_c = 0;
u8 ofdm_index[2], cck_index = 0, ofdm_index_old[2], cck_index_old = 0;
int i;
bool is2t = IS_92C_SERIAL(rtlhal->version);
s8 txpwr_level[2] = {0, 0};
u8 ofdm_min_index = 6, rf;
rtlpriv->dm.txpower_trackinginit = true;
RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
("rtl92c_dm_txpower_tracking_callback_thermalmeter\n"));
thermalvalue = (u8) rtl_get_rfreg(hw, RF90_PATH_A, RF_T_METER, 0x1f);
RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
("Readback Thermal Meter = 0x%x pre thermal meter 0x%x "
"eeprom_thermalmeter 0x%x\n",
thermalvalue, rtlpriv->dm.thermalvalue,
rtlefuse->eeprom_thermalmeter));
rtl92c_phy_ap_calibrate(hw, (thermalvalue -
rtlefuse->eeprom_thermalmeter));
if (is2t)
rf = 2;
else
rf = 1;
if (thermalvalue) {
ele_d = rtl_get_bbreg(hw, ROFDM0_XATXIQIMBALANCE,
MASKDWORD) & MASKOFDM_D;
for (i = 0; i < OFDM_TABLE_LENGTH; i++) {
if (ele_d == (ofdmswing_table[i] & MASKOFDM_D)) {
ofdm_index_old[0] = (u8) i;
RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
("Initial pathA ele_d reg0x%x = 0x%lx, "
"ofdm_index=0x%x\n",
ROFDM0_XATXIQIMBALANCE,
ele_d, ofdm_index_old[0]));
break;
}
}
if (is2t) {
ele_d = rtl_get_bbreg(hw, ROFDM0_XBTXIQIMBALANCE,
MASKDWORD) & MASKOFDM_D;
for (i = 0; i < OFDM_TABLE_LENGTH; i++) {
if (ele_d == (ofdmswing_table[i] &
MASKOFDM_D)) {
RT_TRACE(rtlpriv, COMP_POWER_TRACKING,
DBG_LOUD,
("Initial pathB ele_d reg0x%x = "
"0x%lx, ofdm_index=0x%x\n",
ROFDM0_XBTXIQIMBALANCE, ele_d,
ofdm_index_old[1]));
break;
}
}
}
temp_cck =
rtl_get_bbreg(hw, RCCK0_TXFILTER2, MASKDWORD) & MASKCCK;
for (i = 0; i < CCK_TABLE_LENGTH; i++) {
if (rtlpriv->dm.cck_inch14) {
if (memcmp((void *)&temp_cck,
(void *)&cckswing_table_ch14[i][2],
4) == 0) {
cck_index_old = (u8) i;
RT_TRACE(rtlpriv, COMP_POWER_TRACKING,
DBG_LOUD,
("Initial reg0x%x = 0x%lx, "
"cck_index=0x%x, ch 14 %d\n",
RCCK0_TXFILTER2, temp_cck,
cck_index_old,
rtlpriv->dm.cck_inch14));
break;
}
} else {
if (memcmp((void *)&temp_cck,
(void *)
&cckswing_table_ch1ch13[i][2],
4) == 0) {
cck_index_old = (u8) i;
RT_TRACE(rtlpriv, COMP_POWER_TRACKING,
DBG_LOUD,
("Initial reg0x%x = 0x%lx, "
"cck_index=0x%x, ch14 %d\n",
RCCK0_TXFILTER2, temp_cck,
cck_index_old,
rtlpriv->dm.cck_inch14));
break;
}
}
}
if (!rtlpriv->dm.thermalvalue) {
rtlpriv->dm.thermalvalue =
rtlefuse->eeprom_thermalmeter;
rtlpriv->dm.thermalvalue_lck = thermalvalue;
rtlpriv->dm.thermalvalue_iqk = thermalvalue;
for (i = 0; i < rf; i++)
rtlpriv->dm.ofdm_index[i] = ofdm_index_old[i];
rtlpriv->dm.cck_index = cck_index_old;
}
delta = (thermalvalue > rtlpriv->dm.thermalvalue) ?
(thermalvalue - rtlpriv->dm.thermalvalue) :
(rtlpriv->dm.thermalvalue - thermalvalue);
delta_lck = (thermalvalue > rtlpriv->dm.thermalvalue_lck) ?
(thermalvalue - rtlpriv->dm.thermalvalue_lck) :
(rtlpriv->dm.thermalvalue_lck - thermalvalue);
delta_iqk = (thermalvalue > rtlpriv->dm.thermalvalue_iqk) ?
(thermalvalue - rtlpriv->dm.thermalvalue_iqk) :
(rtlpriv->dm.thermalvalue_iqk - thermalvalue);
RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
("Readback Thermal Meter = 0x%x pre thermal meter 0x%x "
"eeprom_thermalmeter 0x%x delta 0x%x "
"delta_lck 0x%x delta_iqk 0x%x\n",
thermalvalue, rtlpriv->dm.thermalvalue,
rtlefuse->eeprom_thermalmeter, delta, delta_lck,
delta_iqk));
if (delta_lck > 1) {
rtlpriv->dm.thermalvalue_lck = thermalvalue;
rtl92c_phy_lc_calibrate(hw);
}
if (delta > 0 && rtlpriv->dm.txpower_track_control) {
if (thermalvalue > rtlpriv->dm.thermalvalue) {
for (i = 0; i < rf; i++)
rtlpriv->dm.ofdm_index[i] -= delta;
rtlpriv->dm.cck_index -= delta;
} else {
for (i = 0; i < rf; i++)
rtlpriv->dm.ofdm_index[i] += delta;
rtlpriv->dm.cck_index += delta;
}
if (is2t) {
RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
("temp OFDM_A_index=0x%x, "
"OFDM_B_index=0x%x,"
"cck_index=0x%x\n",
rtlpriv->dm.ofdm_index[0],
rtlpriv->dm.ofdm_index[1],
rtlpriv->dm.cck_index));
} else {
RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
("temp OFDM_A_index=0x%x,"
"cck_index=0x%x\n",
rtlpriv->dm.ofdm_index[0],
rtlpriv->dm.cck_index));
}
if (thermalvalue > rtlefuse->eeprom_thermalmeter) {
for (i = 0; i < rf; i++)
ofdm_index[i] =
rtlpriv->dm.ofdm_index[i]
+ 1;
cck_index = rtlpriv->dm.cck_index + 1;
} else {
for (i = 0; i < rf; i++)
ofdm_index[i] =
rtlpriv->dm.ofdm_index[i];
cck_index = rtlpriv->dm.cck_index;
}
for (i = 0; i < rf; i++) {
if (txpwr_level[i] >= 0 &&
txpwr_level[i] <= 26) {
if (thermalvalue >
rtlefuse->eeprom_thermalmeter) {
if (delta < 5)
ofdm_index[i] -= 1;
else
ofdm_index[i] -= 2;
} else if (delta > 5 && thermalvalue <
rtlefuse->
eeprom_thermalmeter) {
ofdm_index[i] += 1;
}
} else if (txpwr_level[i] >= 27 &&
txpwr_level[i] <= 32
&& thermalvalue >
rtlefuse->eeprom_thermalmeter) {
if (delta < 5)
ofdm_index[i] -= 1;
else
ofdm_index[i] -= 2;
} else if (txpwr_level[i] >= 32 &&
txpwr_level[i] <= 38 &&
thermalvalue >
rtlefuse->eeprom_thermalmeter
&& delta > 5) {
ofdm_index[i] -= 1;
}
}
if (txpwr_level[i] >= 0 && txpwr_level[i] <= 26) {
if (thermalvalue >
rtlefuse->eeprom_thermalmeter) {
if (delta < 5)
cck_index -= 1;
else
cck_index -= 2;
} else if (delta > 5 && thermalvalue <
rtlefuse->eeprom_thermalmeter) {
cck_index += 1;
}
} else if (txpwr_level[i] >= 27 &&
txpwr_level[i] <= 32 &&
thermalvalue >
rtlefuse->eeprom_thermalmeter) {
if (delta < 5)
cck_index -= 1;
else
cck_index -= 2;
} else if (txpwr_level[i] >= 32 &&
txpwr_level[i] <= 38 &&
thermalvalue > rtlefuse->eeprom_thermalmeter
&& delta > 5) {
cck_index -= 1;
}
for (i = 0; i < rf; i++) {
if (ofdm_index[i] > OFDM_TABLE_SIZE - 1)
ofdm_index[i] = OFDM_TABLE_SIZE - 1;
else if (ofdm_index[i] < ofdm_min_index)
ofdm_index[i] = ofdm_min_index;
}
if (cck_index > CCK_TABLE_SIZE - 1)
cck_index = CCK_TABLE_SIZE - 1;
else if (cck_index < 0)
cck_index = 0;
if (is2t) {
RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
("new OFDM_A_index=0x%x, "
"OFDM_B_index=0x%x,"
"cck_index=0x%x\n",
ofdm_index[0], ofdm_index[1],
cck_index));
} else {
RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
("new OFDM_A_index=0x%x,"
"cck_index=0x%x\n",
ofdm_index[0], cck_index));
}
}
if (rtlpriv->dm.txpower_track_control && delta != 0) {
ele_d =
(ofdmswing_table[ofdm_index[0]] & 0xFFC00000) >> 22;
val_x = rtlphy->reg_e94;
val_y = rtlphy->reg_e9c;
if (val_x != 0) {
if ((val_x & 0x00000200) != 0)
val_x = val_x | 0xFFFFFC00;
ele_a = ((val_x * ele_d) >> 8) & 0x000003FF;
if ((val_y & 0x00000200) != 0)
val_y = val_y | 0xFFFFFC00;
ele_c = ((val_y * ele_d) >> 8) & 0x000003FF;
value32 = (ele_d << 22) |
((ele_c & 0x3F) << 16) | ele_a;
rtl_set_bbreg(hw, ROFDM0_XATXIQIMBALANCE,
MASKDWORD, value32);
value32 = (ele_c & 0x000003C0) >> 6;
rtl_set_bbreg(hw, ROFDM0_XCTXAFE, MASKH4BITS,
value32);
value32 = ((val_x * ele_d) >> 7) & 0x01;
rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD,
BIT(31), value32);
value32 = ((val_y * ele_d) >> 7) & 0x01;
rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD,
BIT(29), value32);
} else {
rtl_set_bbreg(hw, ROFDM0_XATXIQIMBALANCE,
MASKDWORD,
ofdmswing_table[ofdm_index[0]]);
rtl_set_bbreg(hw, ROFDM0_XCTXAFE, MASKH4BITS,
0x00);
rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD,
BIT(31) | BIT(29), 0x00);
}
if (!rtlpriv->dm.cck_inch14) {
rtl_write_byte(rtlpriv, 0xa22,
cckswing_table_ch1ch13[cck_index]
[0]);
rtl_write_byte(rtlpriv, 0xa23,
cckswing_table_ch1ch13[cck_index]
[1]);
rtl_write_byte(rtlpriv, 0xa24,
cckswing_table_ch1ch13[cck_index]
[2]);
rtl_write_byte(rtlpriv, 0xa25,
cckswing_table_ch1ch13[cck_index]
[3]);
rtl_write_byte(rtlpriv, 0xa26,
cckswing_table_ch1ch13[cck_index]
[4]);
rtl_write_byte(rtlpriv, 0xa27,
cckswing_table_ch1ch13[cck_index]
[5]);
rtl_write_byte(rtlpriv, 0xa28,
cckswing_table_ch1ch13[cck_index]
[6]);
rtl_write_byte(rtlpriv, 0xa29,
cckswing_table_ch1ch13[cck_index]
[7]);
} else {
rtl_write_byte(rtlpriv, 0xa22,
cckswing_table_ch14[cck_index]
[0]);
rtl_write_byte(rtlpriv, 0xa23,
cckswing_table_ch14[cck_index]
[1]);
rtl_write_byte(rtlpriv, 0xa24,
cckswing_table_ch14[cck_index]
[2]);
rtl_write_byte(rtlpriv, 0xa25,
cckswing_table_ch14[cck_index]
[3]);
rtl_write_byte(rtlpriv, 0xa26,
cckswing_table_ch14[cck_index]
[4]);
rtl_write_byte(rtlpriv, 0xa27,
cckswing_table_ch14[cck_index]
[5]);
rtl_write_byte(rtlpriv, 0xa28,
cckswing_table_ch14[cck_index]
[6]);
rtl_write_byte(rtlpriv, 0xa29,
cckswing_table_ch14[cck_index]
[7]);
}
if (is2t) {
ele_d = (ofdmswing_table[ofdm_index[1]] &
0xFFC00000) >> 22;
val_x = rtlphy->reg_eb4;
val_y = rtlphy->reg_ebc;
if (val_x != 0) {
if ((val_x & 0x00000200) != 0)
val_x = val_x | 0xFFFFFC00;
ele_a = ((val_x * ele_d) >> 8) &
0x000003FF;
if ((val_y & 0x00000200) != 0)
val_y = val_y | 0xFFFFFC00;
ele_c = ((val_y * ele_d) >> 8) &
0x00003FF;
value32 = (ele_d << 22) |
((ele_c & 0x3F) << 16) | ele_a;
rtl_set_bbreg(hw,
ROFDM0_XBTXIQIMBALANCE,
MASKDWORD, value32);
value32 = (ele_c & 0x000003C0) >> 6;
rtl_set_bbreg(hw, ROFDM0_XDTXAFE,
MASKH4BITS, value32);
value32 = ((val_x * ele_d) >> 7) & 0x01;
rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD,
BIT(27), value32);
value32 = ((val_y * ele_d) >> 7) & 0x01;
rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD,
BIT(25), value32);
} else {
rtl_set_bbreg(hw,
ROFDM0_XBTXIQIMBALANCE,
MASKDWORD,
ofdmswing_table[ofdm_index
[1]]);
rtl_set_bbreg(hw, ROFDM0_XDTXAFE,
MASKH4BITS, 0x00);
rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD,
BIT(27) | BIT(25), 0x00);
}
}
}
if (delta_iqk > 3) {
rtlpriv->dm.thermalvalue_iqk = thermalvalue;
rtl92c_phy_iq_calibrate(hw, false);
}
if (rtlpriv->dm.txpower_track_control)
rtlpriv->dm.thermalvalue = thermalvalue;
}
RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, ("<===\n"));
}
static void rtl92c_dm_initialize_txpower_tracking_thermalmeter(
struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
rtlpriv->dm.txpower_tracking = true;
rtlpriv->dm.txpower_trackinginit = false;
RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
("pMgntInfo->txpower_tracking = %d\n",
rtlpriv->dm.txpower_tracking));
}
static void rtl92c_dm_initialize_txpower_tracking(struct ieee80211_hw *hw)
{
rtl92c_dm_initialize_txpower_tracking_thermalmeter(hw);
}
static void rtl92c_dm_txpower_tracking_directcall(struct ieee80211_hw *hw)
{
rtl92c_dm_txpower_tracking_callback_thermalmeter(hw);
}
static void rtl92c_dm_check_txpower_tracking_thermal_meter(
struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
static u8 tm_trigger;
if (!rtlpriv->dm.txpower_tracking)
return;
if (!tm_trigger) {
rtl_set_rfreg(hw, RF90_PATH_A, RF_T_METER, RFREG_OFFSET_MASK,
0x60);
RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
("Trigger 92S Thermal Meter!!\n"));
tm_trigger = 1;
return;
} else {
RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
("Schedule TxPowerTracking direct call!!\n"));
rtl92c_dm_txpower_tracking_directcall(hw);
tm_trigger = 0;
}
}
void rtl92c_dm_check_txpower_tracking(struct ieee80211_hw *hw)
{
rtl92c_dm_check_txpower_tracking_thermal_meter(hw);
}
EXPORT_SYMBOL(rtl92c_dm_check_txpower_tracking);
void rtl92c_dm_init_rate_adaptive_mask(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rate_adaptive *p_ra = &(rtlpriv->ra);
p_ra->ratr_state = DM_RATR_STA_INIT;
p_ra->pre_ratr_state = DM_RATR_STA_INIT;
if (rtlpriv->dm.dm_type == DM_TYPE_BYDRIVER)
rtlpriv->dm.useramask = true;
else
rtlpriv->dm.useramask = false;
}
EXPORT_SYMBOL(rtl92c_dm_init_rate_adaptive_mask);
static void rtl92c_dm_refresh_rate_adaptive_mask(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
struct rate_adaptive *p_ra = &(rtlpriv->ra);
u32 low_rssithresh_for_ra, high_rssithresh_for_ra;
struct ieee80211_sta *sta = NULL;
if (is_hal_stop(rtlhal)) {
RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD,
("<---- driver is going to unload\n"));
return;
}
if (!rtlpriv->dm.useramask) {
RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD,
("<---- driver does not control rate adaptive mask\n"));
return;
}
if (mac->link_state == MAC80211_LINKED &&
mac->opmode == NL80211_IFTYPE_STATION) {
switch (p_ra->pre_ratr_state) {
case DM_RATR_STA_HIGH:
high_rssithresh_for_ra = 50;
low_rssithresh_for_ra = 20;
break;
case DM_RATR_STA_MIDDLE:
high_rssithresh_for_ra = 55;
low_rssithresh_for_ra = 20;
break;
case DM_RATR_STA_LOW:
high_rssithresh_for_ra = 50;
low_rssithresh_for_ra = 25;
break;
default:
high_rssithresh_for_ra = 50;
low_rssithresh_for_ra = 20;
break;
}
if (rtlpriv->dm.undecorated_smoothed_pwdb >
(long)high_rssithresh_for_ra)
p_ra->ratr_state = DM_RATR_STA_HIGH;
else if (rtlpriv->dm.undecorated_smoothed_pwdb >
(long)low_rssithresh_for_ra)
p_ra->ratr_state = DM_RATR_STA_MIDDLE;
else
p_ra->ratr_state = DM_RATR_STA_LOW;
if (p_ra->pre_ratr_state != p_ra->ratr_state) {
RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD,
("RSSI = %ld\n",
rtlpriv->dm.undecorated_smoothed_pwdb));
RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD,
("RSSI_LEVEL = %d\n", p_ra->ratr_state));
RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD,
("PreState = %d, CurState = %d\n",
p_ra->pre_ratr_state, p_ra->ratr_state));
rcu_read_lock();
sta = ieee80211_find_sta(mac->vif, mac->bssid);
rtlpriv->cfg->ops->update_rate_tbl(hw, sta,
p_ra->ratr_state);
p_ra->pre_ratr_state = p_ra->ratr_state;
rcu_read_unlock();
}
}
}
static void rtl92c_dm_init_dynamic_bb_powersaving(struct ieee80211_hw *hw)
{
dm_pstable.pre_ccastate = CCA_MAX;
dm_pstable.cur_ccasate = CCA_MAX;
dm_pstable.pre_rfstate = RF_MAX;
dm_pstable.cur_rfstate = RF_MAX;
dm_pstable.rssi_val_min = 0;
}
void rtl92c_dm_rf_saving(struct ieee80211_hw *hw, u8 bforce_in_normal)
{
static u8 initialize;
static u32 reg_874, reg_c70, reg_85c, reg_a74;
if (initialize == 0) {
reg_874 = (rtl_get_bbreg(hw, RFPGA0_XCD_RFINTERFACESW,
MASKDWORD) & 0x1CC000) >> 14;
reg_c70 = (rtl_get_bbreg(hw, ROFDM0_AGCPARAMETER1,
MASKDWORD) & BIT(3)) >> 3;
reg_85c = (rtl_get_bbreg(hw, RFPGA0_XCD_SWITCHCONTROL,
MASKDWORD) & 0xFF000000) >> 24;
reg_a74 = (rtl_get_bbreg(hw, 0xa74, MASKDWORD) & 0xF000) >> 12;
initialize = 1;
}
if (!bforce_in_normal) {
if (dm_pstable.rssi_val_min != 0) {
if (dm_pstable.pre_rfstate == RF_NORMAL) {
if (dm_pstable.rssi_val_min >= 30)
dm_pstable.cur_rfstate = RF_SAVE;
else
dm_pstable.cur_rfstate = RF_NORMAL;
} else {
if (dm_pstable.rssi_val_min <= 25)
dm_pstable.cur_rfstate = RF_NORMAL;
else
dm_pstable.cur_rfstate = RF_SAVE;
}
} else {
dm_pstable.cur_rfstate = RF_MAX;
}
} else {
dm_pstable.cur_rfstate = RF_NORMAL;
}
if (dm_pstable.pre_rfstate != dm_pstable.cur_rfstate) {
if (dm_pstable.cur_rfstate == RF_SAVE) {
rtl_set_bbreg(hw, RFPGA0_XCD_RFINTERFACESW,
0x1C0000, 0x2);
rtl_set_bbreg(hw, ROFDM0_AGCPARAMETER1, BIT(3), 0);
rtl_set_bbreg(hw, RFPGA0_XCD_SWITCHCONTROL,
0xFF000000, 0x63);
rtl_set_bbreg(hw, RFPGA0_XCD_RFINTERFACESW,
0xC000, 0x2);
rtl_set_bbreg(hw, 0xa74, 0xF000, 0x3);
rtl_set_bbreg(hw, 0x818, BIT(28), 0x0);
rtl_set_bbreg(hw, 0x818, BIT(28), 0x1);
} else {
rtl_set_bbreg(hw, RFPGA0_XCD_RFINTERFACESW,
0x1CC000, reg_874);
rtl_set_bbreg(hw, ROFDM0_AGCPARAMETER1, BIT(3),
reg_c70);
rtl_set_bbreg(hw, RFPGA0_XCD_SWITCHCONTROL, 0xFF000000,
reg_85c);
rtl_set_bbreg(hw, 0xa74, 0xF000, reg_a74);
rtl_set_bbreg(hw, 0x818, BIT(28), 0x0);
}
dm_pstable.pre_rfstate = dm_pstable.cur_rfstate;
}
}
EXPORT_SYMBOL(rtl92c_dm_rf_saving);
static void rtl92c_dm_dynamic_bb_powersaving(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
if (((mac->link_state == MAC80211_NOLINK)) &&
(rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb == 0)) {
dm_pstable.rssi_val_min = 0;
RT_TRACE(rtlpriv, DBG_LOUD, DBG_LOUD,
("Not connected to any\n"));
}
if (mac->link_state == MAC80211_LINKED) {
if (mac->opmode == NL80211_IFTYPE_ADHOC) {
dm_pstable.rssi_val_min =
rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb;
RT_TRACE(rtlpriv, DBG_LOUD, DBG_LOUD,
("AP Client PWDB = 0x%lx\n",
dm_pstable.rssi_val_min));
} else {
dm_pstable.rssi_val_min =
rtlpriv->dm.undecorated_smoothed_pwdb;
RT_TRACE(rtlpriv, DBG_LOUD, DBG_LOUD,
("STA Default Port PWDB = 0x%lx\n",
dm_pstable.rssi_val_min));
}
} else {
dm_pstable.rssi_val_min =
rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb;
RT_TRACE(rtlpriv, DBG_LOUD, DBG_LOUD,
("AP Ext Port PWDB = 0x%lx\n",
dm_pstable.rssi_val_min));
}
if (IS_92C_SERIAL(rtlhal->version))
;/* rtl92c_dm_1r_cca(hw); */
else
rtl92c_dm_rf_saving(hw, false);
}
void rtl92c_dm_init(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
rtlpriv->dm.dm_type = DM_TYPE_BYDRIVER;
rtl92c_dm_diginit(hw);
rtl92c_dm_init_dynamic_txpower(hw);
rtl92c_dm_init_edca_turbo(hw);
rtl92c_dm_init_rate_adaptive_mask(hw);
rtl92c_dm_initialize_txpower_tracking(hw);
rtl92c_dm_init_dynamic_bb_powersaving(hw);
}
EXPORT_SYMBOL(rtl92c_dm_init);
void rtl92c_dm_dynamic_txpower(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_phy *rtlphy = &(rtlpriv->phy);
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
long undecorated_smoothed_pwdb;
if (!rtlpriv->dm.dynamic_txpower_enable)
return;
if (rtlpriv->dm.dm_flag & HAL_DM_HIPWR_DISABLE) {
rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_NORMAL;
return;
}
if ((mac->link_state < MAC80211_LINKED) &&
(rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb == 0)) {
RT_TRACE(rtlpriv, COMP_POWER, DBG_TRACE,
("Not connected to any\n"));
rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_NORMAL;
rtlpriv->dm.last_dtp_lvl = TXHIGHPWRLEVEL_NORMAL;
return;
}
if (mac->link_state >= MAC80211_LINKED) {
if (mac->opmode == NL80211_IFTYPE_ADHOC) {
undecorated_smoothed_pwdb =
rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb;
RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
("AP Client PWDB = 0x%lx\n",
undecorated_smoothed_pwdb));
} else {
undecorated_smoothed_pwdb =
rtlpriv->dm.undecorated_smoothed_pwdb;
RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
("STA Default Port PWDB = 0x%lx\n",
undecorated_smoothed_pwdb));
}
} else {
undecorated_smoothed_pwdb =
rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb;
RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
("AP Ext Port PWDB = 0x%lx\n",
undecorated_smoothed_pwdb));
}
if (undecorated_smoothed_pwdb >= TX_POWER_NEAR_FIELD_THRESH_LVL2) {
rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_LEVEL1;
RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
("TXHIGHPWRLEVEL_LEVEL1 (TxPwr=0x0)\n"));
} else if ((undecorated_smoothed_pwdb <
(TX_POWER_NEAR_FIELD_THRESH_LVL2 - 3)) &&
(undecorated_smoothed_pwdb >=
TX_POWER_NEAR_FIELD_THRESH_LVL1)) {
rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_LEVEL1;
RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
("TXHIGHPWRLEVEL_LEVEL1 (TxPwr=0x10)\n"));
} else if (undecorated_smoothed_pwdb <
(TX_POWER_NEAR_FIELD_THRESH_LVL1 - 5)) {
rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_NORMAL;
RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
("TXHIGHPWRLEVEL_NORMAL\n"));
}
if ((rtlpriv->dm.dynamic_txhighpower_lvl != rtlpriv->dm.last_dtp_lvl)) {
RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
("PHY_SetTxPowerLevel8192S() Channel = %d\n",
rtlphy->current_channel));
rtl92c_phy_set_txpower_level(hw, rtlphy->current_channel);
}
rtlpriv->dm.last_dtp_lvl = rtlpriv->dm.dynamic_txhighpower_lvl;
}
void rtl92c_dm_watchdog(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
bool fw_current_inpsmode = false;
bool fw_ps_awake = true;
rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_FW_PSMODE_STATUS,
(u8 *) (&fw_current_inpsmode));
rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_FWLPS_RF_ON,
(u8 *) (&fw_ps_awake));
if ((ppsc->rfpwr_state == ERFON) && ((!fw_current_inpsmode) &&
fw_ps_awake)
&& (!ppsc->rfchange_inprogress)) {
rtl92c_dm_pwdb_monitor(hw);
rtl92c_dm_dig(hw);
rtl92c_dm_false_alarm_counter_statistics(hw);
rtl92c_dm_dynamic_bb_powersaving(hw);
rtl92c_dm_dynamic_txpower(hw);
rtl92c_dm_check_txpower_tracking(hw);
rtl92c_dm_refresh_rate_adaptive_mask(hw);
rtl92c_dm_bt_coexist(hw);
rtl92c_dm_check_edca_turbo(hw);
}
}
EXPORT_SYMBOL(rtl92c_dm_watchdog);
u8 rtl92c_bt_rssi_state_change(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
long undecorated_smoothed_pwdb;
u8 curr_bt_rssi_state = 0x00;
if (rtlpriv->mac80211.link_state == MAC80211_LINKED) {
undecorated_smoothed_pwdb =
GET_UNDECORATED_AVERAGE_RSSI(rtlpriv);
} else {
if (rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb == 0)
undecorated_smoothed_pwdb = 100;
else
undecorated_smoothed_pwdb =
rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb;
}
/* Check RSSI to determine HighPower/NormalPower state for
* BT coexistence. */
if (undecorated_smoothed_pwdb >= 67)
curr_bt_rssi_state &= (~BT_RSSI_STATE_NORMAL_POWER);
else if (undecorated_smoothed_pwdb < 62)
curr_bt_rssi_state |= BT_RSSI_STATE_NORMAL_POWER;
/* Check RSSI to determine AMPDU setting for BT coexistence. */
if (undecorated_smoothed_pwdb >= 40)
curr_bt_rssi_state &= (~BT_RSSI_STATE_AMDPU_OFF);
else if (undecorated_smoothed_pwdb <= 32)
curr_bt_rssi_state |= BT_RSSI_STATE_AMDPU_OFF;
/* Marked RSSI state. It will be used to determine BT coexistence
* setting later. */
if (undecorated_smoothed_pwdb < 35)
curr_bt_rssi_state |= BT_RSSI_STATE_SPECIAL_LOW;
else
curr_bt_rssi_state &= (~BT_RSSI_STATE_SPECIAL_LOW);
/* Set Tx Power according to BT status. */
if (undecorated_smoothed_pwdb >= 30)
curr_bt_rssi_state |= BT_RSSI_STATE_TXPOWER_LOW;
else if (undecorated_smoothed_pwdb < 25)
curr_bt_rssi_state &= (~BT_RSSI_STATE_TXPOWER_LOW);
/* Check BT state related to BT_Idle in B/G mode. */
if (undecorated_smoothed_pwdb < 15)
curr_bt_rssi_state |= BT_RSSI_STATE_BG_EDCA_LOW;
else
curr_bt_rssi_state &= (~BT_RSSI_STATE_BG_EDCA_LOW);
if (curr_bt_rssi_state != rtlpcipriv->bt_coexist.bt_rssi_state) {
rtlpcipriv->bt_coexist.bt_rssi_state = curr_bt_rssi_state;
return true;
} else {
return false;
}
}
EXPORT_SYMBOL(rtl92c_bt_rssi_state_change);
static bool rtl92c_bt_state_change(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
u32 polling, ratio_tx, ratio_pri;
u32 bt_tx, bt_pri;
u8 bt_state;
u8 cur_service_type;
if (rtlpriv->mac80211.link_state < MAC80211_LINKED)
return false;
bt_state = rtl_read_byte(rtlpriv, 0x4fd);
bt_tx = rtl_read_dword(rtlpriv, 0x488);
bt_tx = bt_tx & 0x00ffffff;
bt_pri = rtl_read_dword(rtlpriv, 0x48c);
bt_pri = bt_pri & 0x00ffffff;
polling = rtl_read_dword(rtlpriv, 0x490);
if (bt_tx == 0xffffffff && bt_pri == 0xffffffff &&
polling == 0xffffffff && bt_state == 0xff)
return false;
bt_state &= BIT_OFFSET_LEN_MASK_32(0, 1);
if (bt_state != rtlpcipriv->bt_coexist.bt_cur_state) {
rtlpcipriv->bt_coexist.bt_cur_state = bt_state;
if (rtlpcipriv->bt_coexist.reg_bt_sco == 3) {
rtlpcipriv->bt_coexist.bt_service = BT_IDLE;
bt_state = bt_state |
((rtlpcipriv->bt_coexist.bt_ant_isolation == 1) ?
0 : BIT_OFFSET_LEN_MASK_32(1, 1)) |
BIT_OFFSET_LEN_MASK_32(2, 1);
rtl_write_byte(rtlpriv, 0x4fd, bt_state);
}
return true;
}
ratio_tx = bt_tx * 1000 / polling;
ratio_pri = bt_pri * 1000 / polling;
rtlpcipriv->bt_coexist.ratio_tx = ratio_tx;
rtlpcipriv->bt_coexist.ratio_pri = ratio_pri;
if (bt_state && rtlpcipriv->bt_coexist.reg_bt_sco == 3) {
if ((ratio_tx < 30) && (ratio_pri < 30))
cur_service_type = BT_IDLE;
else if ((ratio_pri > 110) && (ratio_pri < 250))
cur_service_type = BT_SCO;
else if ((ratio_tx >= 200) && (ratio_pri >= 200))
cur_service_type = BT_BUSY;
else if ((ratio_tx >= 350) && (ratio_tx < 500))
cur_service_type = BT_OTHERBUSY;
else if (ratio_tx >= 500)
cur_service_type = BT_PAN;
else
cur_service_type = BT_OTHER_ACTION;
if (cur_service_type != rtlpcipriv->bt_coexist.bt_service) {
rtlpcipriv->bt_coexist.bt_service = cur_service_type;
bt_state = bt_state |
((rtlpcipriv->bt_coexist.bt_ant_isolation == 1) ?
0 : BIT_OFFSET_LEN_MASK_32(1, 1)) |
((rtlpcipriv->bt_coexist.bt_service != BT_IDLE) ?
0 : BIT_OFFSET_LEN_MASK_32(2, 1));
/* Add interrupt migration when bt is not ini
* idle state (no traffic). */
if (rtlpcipriv->bt_coexist.bt_service != BT_IDLE) {
rtl_write_word(rtlpriv, 0x504, 0x0ccc);
rtl_write_byte(rtlpriv, 0x506, 0x54);
rtl_write_byte(rtlpriv, 0x507, 0x54);
} else {
rtl_write_byte(rtlpriv, 0x506, 0x00);
rtl_write_byte(rtlpriv, 0x507, 0x00);
}
rtl_write_byte(rtlpriv, 0x4fd, bt_state);
return true;
}
}
return false;
}
static bool rtl92c_bt_wifi_connect_change(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
static bool media_connect;
if (rtlpriv->mac80211.link_state < MAC80211_LINKED) {
media_connect = false;
} else {
if (!media_connect) {
media_connect = true;
return true;
}
media_connect = true;
}
return false;
}
static void rtl92c_bt_set_normal(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
if (rtlpcipriv->bt_coexist.bt_service == BT_OTHERBUSY) {
rtlpcipriv->bt_coexist.bt_edca_ul = 0x5ea72b;
rtlpcipriv->bt_coexist.bt_edca_dl = 0x5ea72b;
} else if (rtlpcipriv->bt_coexist.bt_service == BT_BUSY) {
rtlpcipriv->bt_coexist.bt_edca_ul = 0x5eb82f;
rtlpcipriv->bt_coexist.bt_edca_dl = 0x5eb82f;
} else if (rtlpcipriv->bt_coexist.bt_service == BT_SCO) {
if (rtlpcipriv->bt_coexist.ratio_tx > 160) {
rtlpcipriv->bt_coexist.bt_edca_ul = 0x5ea72f;
rtlpcipriv->bt_coexist.bt_edca_dl = 0x5ea72f;
} else {
rtlpcipriv->bt_coexist.bt_edca_ul = 0x5ea32b;
rtlpcipriv->bt_coexist.bt_edca_dl = 0x5ea42b;
}
} else {
rtlpcipriv->bt_coexist.bt_edca_ul = 0;
rtlpcipriv->bt_coexist.bt_edca_dl = 0;
}
if ((rtlpcipriv->bt_coexist.bt_service != BT_IDLE) &&
(rtlpriv->mac80211.mode == WIRELESS_MODE_G ||
(rtlpriv->mac80211.mode == (WIRELESS_MODE_G | WIRELESS_MODE_B))) &&
(rtlpcipriv->bt_coexist.bt_rssi_state &
BT_RSSI_STATE_BG_EDCA_LOW)) {
rtlpcipriv->bt_coexist.bt_edca_ul = 0x5eb82b;
rtlpcipriv->bt_coexist.bt_edca_dl = 0x5eb82b;
}
}
static void rtl92c_bt_ant_isolation(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
/* Only enable HW BT coexist when BT in "Busy" state. */
if (rtlpriv->mac80211.vendor == PEER_CISCO &&
rtlpcipriv->bt_coexist.bt_service == BT_OTHER_ACTION) {
rtl_write_byte(rtlpriv, REG_GPIO_MUXCFG, 0xa0);
} else {
if ((rtlpcipriv->bt_coexist.bt_service == BT_BUSY) &&
(rtlpcipriv->bt_coexist.bt_rssi_state &
BT_RSSI_STATE_NORMAL_POWER)) {
rtl_write_byte(rtlpriv, REG_GPIO_MUXCFG, 0xa0);
} else if ((rtlpcipriv->bt_coexist.bt_service ==
BT_OTHER_ACTION) && (rtlpriv->mac80211.mode <
WIRELESS_MODE_N_24G) &&
(rtlpcipriv->bt_coexist.bt_rssi_state &
BT_RSSI_STATE_SPECIAL_LOW)) {
rtl_write_byte(rtlpriv, REG_GPIO_MUXCFG, 0xa0);
} else if (rtlpcipriv->bt_coexist.bt_service == BT_PAN) {
rtl_write_byte(rtlpriv, REG_GPIO_MUXCFG, 0x00);
} else {
rtl_write_byte(rtlpriv, REG_GPIO_MUXCFG, 0x00);
}
}
if (rtlpcipriv->bt_coexist.bt_service == BT_PAN)
rtl_write_dword(rtlpriv, REG_GPIO_PIN_CTRL, 0x10100);
else
rtl_write_dword(rtlpriv, REG_GPIO_PIN_CTRL, 0x0);
if (rtlpcipriv->bt_coexist.bt_rssi_state &
BT_RSSI_STATE_NORMAL_POWER) {
rtl92c_bt_set_normal(hw);
} else {
rtlpcipriv->bt_coexist.bt_edca_ul = 0;
rtlpcipriv->bt_coexist.bt_edca_dl = 0;
}
if (rtlpcipriv->bt_coexist.bt_service != BT_IDLE) {
rtlpriv->cfg->ops->set_rfreg(hw,
RF90_PATH_A,
0x1e,
0xf0, 0xf);
} else {
rtlpriv->cfg->ops->set_rfreg(hw,
RF90_PATH_A, 0x1e, 0xf0,
rtlpcipriv->bt_coexist.bt_rfreg_origin_1e);
}
if (!rtlpriv->dm.dynamic_txpower_enable) {
if (rtlpcipriv->bt_coexist.bt_service != BT_IDLE) {
if (rtlpcipriv->bt_coexist.bt_rssi_state &
BT_RSSI_STATE_TXPOWER_LOW) {
rtlpriv->dm.dynamic_txhighpower_lvl =
TXHIGHPWRLEVEL_BT2;
} else {
rtlpriv->dm.dynamic_txhighpower_lvl =
TXHIGHPWRLEVEL_BT1;
}
} else {
rtlpriv->dm.dynamic_txhighpower_lvl =
TXHIGHPWRLEVEL_NORMAL;
}
rtl92c_phy_set_txpower_level(hw,
rtlpriv->phy.current_channel);
}
}
static void rtl92c_check_bt_change(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
if (rtlpcipriv->bt_coexist.bt_cur_state) {
if (rtlpcipriv->bt_coexist.bt_ant_isolation)
rtl92c_bt_ant_isolation(hw);
} else {
rtl_write_byte(rtlpriv, REG_GPIO_MUXCFG, 0x00);
rtlpriv->cfg->ops->set_rfreg(hw, RF90_PATH_A, 0x1e, 0xf0,
rtlpcipriv->bt_coexist.bt_rfreg_origin_1e);
rtlpcipriv->bt_coexist.bt_edca_ul = 0;
rtlpcipriv->bt_coexist.bt_edca_dl = 0;
}
}
void rtl92c_dm_bt_coexist(struct ieee80211_hw *hw)
{
struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
bool wifi_connect_change;
bool bt_state_change;
bool rssi_state_change;
if ((rtlpcipriv->bt_coexist.bt_coexistence) &&
(rtlpcipriv->bt_coexist.bt_coexist_type == BT_CSR_BC4)) {
wifi_connect_change = rtl92c_bt_wifi_connect_change(hw);
bt_state_change = rtl92c_bt_state_change(hw);
rssi_state_change = rtl92c_bt_rssi_state_change(hw);
if (wifi_connect_change || bt_state_change || rssi_state_change)
rtl92c_check_bt_change(hw);
}
}
EXPORT_SYMBOL(rtl92c_dm_bt_coexist);
| gpl-2.0 |
daver18qc/android_kernel_samsung_kylessopen | drivers/md/md.c | 1175 | 198779 | /*
md.c : Multiple Devices driver for Linux
Copyright (C) 1998, 1999, 2000 Ingo Molnar
completely rewritten, based on the MD driver code from Marc Zyngier
Changes:
- RAID-1/RAID-5 extensions by Miguel de Icaza, Gadi Oxman, Ingo Molnar
- RAID-6 extensions by H. Peter Anvin <hpa@zytor.com>
- boot support for linear and striped mode by Harald Hoyer <HarryH@Royal.Net>
- kerneld support by Boris Tobotras <boris@xtalk.msk.su>
- kmod support by: Cyrus Durgin
- RAID0 bugfixes: Mark Anthony Lisher <markal@iname.com>
- Devfs support by Richard Gooch <rgooch@atnf.csiro.au>
- lots of fixes and improvements to the RAID1/RAID5 and generic
RAID code (such as request based resynchronization):
Neil Brown <neilb@cse.unsw.edu.au>.
- persistent bitmap code
Copyright (C) 2003-2004, Paul Clements, SteelEye Technology, Inc.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2, or (at your option)
any later version.
You should have received a copy of the GNU General Public License
(for example /usr/src/linux/COPYING); if not, write to the Free
Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/kthread.h>
#include <linux/blkdev.h>
#include <linux/sysctl.h>
#include <linux/seq_file.h>
#include <linux/mutex.h>
#include <linux/buffer_head.h> /* for invalidate_bdev */
#include <linux/poll.h>
#include <linux/ctype.h>
#include <linux/string.h>
#include <linux/hdreg.h>
#include <linux/proc_fs.h>
#include <linux/random.h>
#include <linux/reboot.h>
#include <linux/file.h>
#include <linux/compat.h>
#include <linux/delay.h>
#include <linux/raid/md_p.h>
#include <linux/raid/md_u.h>
#include <linux/slab.h>
#include "md.h"
#include "bitmap.h"
#define DEBUG 0
#define dprintk(x...) ((void)(DEBUG && printk(x)))
#ifndef MODULE
static void autostart_arrays(int part);
#endif
/* pers_list is a list of registered personalities protected
* by pers_lock.
* pers_lock does extra service to protect accesses to
* mddev->thread when the mutex cannot be held.
*/
static LIST_HEAD(pers_list);
static DEFINE_SPINLOCK(pers_lock);
static void md_print_devices(void);
static DECLARE_WAIT_QUEUE_HEAD(resync_wait);
static struct workqueue_struct *md_wq;
static struct workqueue_struct *md_misc_wq;
#define MD_BUG(x...) { printk("md: bug in file %s, line %d\n", __FILE__, __LINE__); md_print_devices(); }
/*
* Default number of read corrections we'll attempt on an rdev
* before ejecting it from the array. We divide the read error
* count by 2 for every hour elapsed between read errors.
*/
#define MD_DEFAULT_MAX_CORRECTED_READ_ERRORS 20
/*
* Current RAID-1,4,5 parallel reconstruction 'guaranteed speed limit'
* is 1000 KB/sec, so the extra system load does not show up that much.
* Increase it if you want to have more _guaranteed_ speed. Note that
* the RAID driver will use the maximum available bandwidth if the IO
* subsystem is idle. There is also an 'absolute maximum' reconstruction
* speed limit - in case reconstruction slows down your system despite
* idle IO detection.
*
* you can change it via /proc/sys/dev/raid/speed_limit_min and _max.
* or /sys/block/mdX/md/sync_speed_{min,max}
*/
static int sysctl_speed_limit_min = 1000;
static int sysctl_speed_limit_max = 200000;
static inline int speed_min(mddev_t *mddev)
{
return mddev->sync_speed_min ?
mddev->sync_speed_min : sysctl_speed_limit_min;
}
static inline int speed_max(mddev_t *mddev)
{
return mddev->sync_speed_max ?
mddev->sync_speed_max : sysctl_speed_limit_max;
}
static struct ctl_table_header *raid_table_header;
static ctl_table raid_table[] = {
{
.procname = "speed_limit_min",
.data = &sysctl_speed_limit_min,
.maxlen = sizeof(int),
.mode = S_IRUGO|S_IWUSR,
.proc_handler = proc_dointvec,
},
{
.procname = "speed_limit_max",
.data = &sysctl_speed_limit_max,
.maxlen = sizeof(int),
.mode = S_IRUGO|S_IWUSR,
.proc_handler = proc_dointvec,
},
{ }
};
static ctl_table raid_dir_table[] = {
{
.procname = "raid",
.maxlen = 0,
.mode = S_IRUGO|S_IXUGO,
.child = raid_table,
},
{ }
};
static ctl_table raid_root_table[] = {
{
.procname = "dev",
.maxlen = 0,
.mode = 0555,
.child = raid_dir_table,
},
{ }
};
static const struct block_device_operations md_fops;
static int start_readonly;
/* bio_clone_mddev
* like bio_clone, but with a local bio set
*/
static void mddev_bio_destructor(struct bio *bio)
{
mddev_t *mddev, **mddevp;
mddevp = (void*)bio;
mddev = mddevp[-1];
bio_free(bio, mddev->bio_set);
}
struct bio *bio_alloc_mddev(gfp_t gfp_mask, int nr_iovecs,
mddev_t *mddev)
{
struct bio *b;
mddev_t **mddevp;
if (!mddev || !mddev->bio_set)
return bio_alloc(gfp_mask, nr_iovecs);
b = bio_alloc_bioset(gfp_mask, nr_iovecs,
mddev->bio_set);
if (!b)
return NULL;
mddevp = (void*)b;
mddevp[-1] = mddev;
b->bi_destructor = mddev_bio_destructor;
return b;
}
EXPORT_SYMBOL_GPL(bio_alloc_mddev);
struct bio *bio_clone_mddev(struct bio *bio, gfp_t gfp_mask,
mddev_t *mddev)
{
struct bio *b;
mddev_t **mddevp;
if (!mddev || !mddev->bio_set)
return bio_clone(bio, gfp_mask);
b = bio_alloc_bioset(gfp_mask, bio->bi_max_vecs,
mddev->bio_set);
if (!b)
return NULL;
mddevp = (void*)b;
mddevp[-1] = mddev;
b->bi_destructor = mddev_bio_destructor;
__bio_clone(b, bio);
if (bio_integrity(bio)) {
int ret;
ret = bio_integrity_clone(b, bio, gfp_mask, mddev->bio_set);
if (ret < 0) {
bio_put(b);
return NULL;
}
}
return b;
}
EXPORT_SYMBOL_GPL(bio_clone_mddev);
/*
* We have a system wide 'event count' that is incremented
* on any 'interesting' event, and readers of /proc/mdstat
* can use 'poll' or 'select' to find out when the event
* count increases.
*
* Events are:
* start array, stop array, error, add device, remove device,
* start build, activate spare
*/
static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
static atomic_t md_event_count;
void md_new_event(mddev_t *mddev)
{
atomic_inc(&md_event_count);
wake_up(&md_event_waiters);
}
EXPORT_SYMBOL_GPL(md_new_event);
/* Alternate version that can be called from interrupts
* when calling sysfs_notify isn't needed.
*/
static void md_new_event_inintr(mddev_t *mddev)
{
atomic_inc(&md_event_count);
wake_up(&md_event_waiters);
}
/*
* Enables to iterate over all existing md arrays
* all_mddevs_lock protects this list.
*/
static LIST_HEAD(all_mddevs);
static DEFINE_SPINLOCK(all_mddevs_lock);
/*
* iterates through all used mddevs in the system.
* We take care to grab the all_mddevs_lock whenever navigating
* the list, and to always hold a refcount when unlocked.
* Any code which breaks out of this loop while own
* a reference to the current mddev and must mddev_put it.
*/
#define for_each_mddev(mddev,tmp) \
\
for (({ spin_lock(&all_mddevs_lock); \
tmp = all_mddevs.next; \
mddev = NULL;}); \
({ if (tmp != &all_mddevs) \
mddev_get(list_entry(tmp, mddev_t, all_mddevs));\
spin_unlock(&all_mddevs_lock); \
if (mddev) mddev_put(mddev); \
mddev = list_entry(tmp, mddev_t, all_mddevs); \
tmp != &all_mddevs;}); \
({ spin_lock(&all_mddevs_lock); \
tmp = tmp->next;}) \
)
/* Rather than calling directly into the personality make_request function,
* IO requests come here first so that we can check if the device is
* being suspended pending a reconfiguration.
* We hold a refcount over the call to ->make_request. By the time that
* call has finished, the bio has been linked into some internal structure
* and so is visible to ->quiesce(), so we don't need the refcount any more.
*/
static int md_make_request(struct request_queue *q, struct bio *bio)
{
const int rw = bio_data_dir(bio);
mddev_t *mddev = q->queuedata;
int rv;
int cpu;
unsigned int sectors;
if (mddev == NULL || mddev->pers == NULL
|| !mddev->ready) {
bio_io_error(bio);
return 0;
}
smp_rmb(); /* Ensure implications of 'active' are visible */
rcu_read_lock();
if (mddev->suspended) {
DEFINE_WAIT(__wait);
for (;;) {
prepare_to_wait(&mddev->sb_wait, &__wait,
TASK_UNINTERRUPTIBLE);
if (!mddev->suspended)
break;
rcu_read_unlock();
schedule();
rcu_read_lock();
}
finish_wait(&mddev->sb_wait, &__wait);
}
atomic_inc(&mddev->active_io);
rcu_read_unlock();
/*
* save the sectors now since our bio can
* go away inside make_request
*/
sectors = bio_sectors(bio);
rv = mddev->pers->make_request(mddev, bio);
cpu = part_stat_lock();
part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]);
part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw], sectors);
part_stat_unlock();
if (atomic_dec_and_test(&mddev->active_io) && mddev->suspended)
wake_up(&mddev->sb_wait);
return rv;
}
/* mddev_suspend makes sure no new requests are submitted
* to the device, and that any requests that have been submitted
* are completely handled.
* Once ->stop is called and completes, the module will be completely
* unused.
*/
void mddev_suspend(mddev_t *mddev)
{
BUG_ON(mddev->suspended);
mddev->suspended = 1;
synchronize_rcu();
wait_event(mddev->sb_wait, atomic_read(&mddev->active_io) == 0);
mddev->pers->quiesce(mddev, 1);
}
EXPORT_SYMBOL_GPL(mddev_suspend);
void mddev_resume(mddev_t *mddev)
{
mddev->suspended = 0;
wake_up(&mddev->sb_wait);
mddev->pers->quiesce(mddev, 0);
md_wakeup_thread(mddev->thread);
md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */
}
EXPORT_SYMBOL_GPL(mddev_resume);
int mddev_congested(mddev_t *mddev, int bits)
{
return mddev->suspended;
}
EXPORT_SYMBOL(mddev_congested);
/*
* Generic flush handling for md
*/
static void md_end_flush(struct bio *bio, int err)
{
mdk_rdev_t *rdev = bio->bi_private;
mddev_t *mddev = rdev->mddev;
rdev_dec_pending(rdev, mddev);
if (atomic_dec_and_test(&mddev->flush_pending)) {
/* The pre-request flush has finished */
queue_work(md_wq, &mddev->flush_work);
}
bio_put(bio);
}
static void md_submit_flush_data(struct work_struct *ws);
static void submit_flushes(struct work_struct *ws)
{
mddev_t *mddev = container_of(ws, mddev_t, flush_work);
mdk_rdev_t *rdev;
INIT_WORK(&mddev->flush_work, md_submit_flush_data);
atomic_set(&mddev->flush_pending, 1);
rcu_read_lock();
list_for_each_entry_rcu(rdev, &mddev->disks, same_set)
if (rdev->raid_disk >= 0 &&
!test_bit(Faulty, &rdev->flags)) {
/* Take two references, one is dropped
* when request finishes, one after
* we reclaim rcu_read_lock
*/
struct bio *bi;
atomic_inc(&rdev->nr_pending);
atomic_inc(&rdev->nr_pending);
rcu_read_unlock();
bi = bio_alloc_mddev(GFP_KERNEL, 0, mddev);
bi->bi_end_io = md_end_flush;
bi->bi_private = rdev;
bi->bi_bdev = rdev->bdev;
atomic_inc(&mddev->flush_pending);
submit_bio(WRITE_FLUSH, bi);
rcu_read_lock();
rdev_dec_pending(rdev, mddev);
}
rcu_read_unlock();
if (atomic_dec_and_test(&mddev->flush_pending))
queue_work(md_wq, &mddev->flush_work);
}
static void md_submit_flush_data(struct work_struct *ws)
{
mddev_t *mddev = container_of(ws, mddev_t, flush_work);
struct bio *bio = mddev->flush_bio;
if (bio->bi_size == 0)
/* an empty barrier - all done */
bio_endio(bio, 0);
else {
bio->bi_rw &= ~REQ_FLUSH;
if (mddev->pers->make_request(mddev, bio))
generic_make_request(bio);
}
mddev->flush_bio = NULL;
wake_up(&mddev->sb_wait);
}
void md_flush_request(mddev_t *mddev, struct bio *bio)
{
spin_lock_irq(&mddev->write_lock);
wait_event_lock_irq(mddev->sb_wait,
!mddev->flush_bio,
mddev->write_lock, /*nothing*/);
mddev->flush_bio = bio;
spin_unlock_irq(&mddev->write_lock);
INIT_WORK(&mddev->flush_work, submit_flushes);
queue_work(md_wq, &mddev->flush_work);
}
EXPORT_SYMBOL(md_flush_request);
/* Support for plugging.
* This mirrors the plugging support in request_queue, but does not
* require having a whole queue or request structures.
* We allocate an md_plug_cb for each md device and each thread it gets
* plugged on. This links tot the private plug_handle structure in the
* personality data where we keep a count of the number of outstanding
* plugs so other code can see if a plug is active.
*/
struct md_plug_cb {
struct blk_plug_cb cb;
mddev_t *mddev;
};
static void plugger_unplug(struct blk_plug_cb *cb)
{
struct md_plug_cb *mdcb = container_of(cb, struct md_plug_cb, cb);
if (atomic_dec_and_test(&mdcb->mddev->plug_cnt))
md_wakeup_thread(mdcb->mddev->thread);
kfree(mdcb);
}
/* Check that an unplug wakeup will come shortly.
* If not, wakeup the md thread immediately
*/
int mddev_check_plugged(mddev_t *mddev)
{
struct blk_plug *plug = current->plug;
struct md_plug_cb *mdcb;
if (!plug)
return 0;
list_for_each_entry(mdcb, &plug->cb_list, cb.list) {
if (mdcb->cb.callback == plugger_unplug &&
mdcb->mddev == mddev) {
/* Already on the list, move to top */
if (mdcb != list_first_entry(&plug->cb_list,
struct md_plug_cb,
cb.list))
list_move(&mdcb->cb.list, &plug->cb_list);
return 1;
}
}
/* Not currently on the callback list */
mdcb = kmalloc(sizeof(*mdcb), GFP_ATOMIC);
if (!mdcb)
return 0;
mdcb->mddev = mddev;
mdcb->cb.callback = plugger_unplug;
atomic_inc(&mddev->plug_cnt);
list_add(&mdcb->cb.list, &plug->cb_list);
return 1;
}
EXPORT_SYMBOL_GPL(mddev_check_plugged);
static inline mddev_t *mddev_get(mddev_t *mddev)
{
atomic_inc(&mddev->active);
return mddev;
}
static void mddev_delayed_delete(struct work_struct *ws);
static void mddev_put(mddev_t *mddev)
{
struct bio_set *bs = NULL;
if (!atomic_dec_and_lock(&mddev->active, &all_mddevs_lock))
return;
if (!mddev->raid_disks && list_empty(&mddev->disks) &&
mddev->ctime == 0 && !mddev->hold_active) {
/* Array is not configured at all, and not held active,
* so destroy it */
list_del(&mddev->all_mddevs);
bs = mddev->bio_set;
mddev->bio_set = NULL;
if (mddev->gendisk) {
/* We did a probe so need to clean up. Call
* queue_work inside the spinlock so that
* flush_workqueue() after mddev_find will
* succeed in waiting for the work to be done.
*/
INIT_WORK(&mddev->del_work, mddev_delayed_delete);
queue_work(md_misc_wq, &mddev->del_work);
} else
kfree(mddev);
}
spin_unlock(&all_mddevs_lock);
if (bs)
bioset_free(bs);
}
void mddev_init(mddev_t *mddev)
{
mutex_init(&mddev->open_mutex);
mutex_init(&mddev->reconfig_mutex);
mutex_init(&mddev->bitmap_info.mutex);
INIT_LIST_HEAD(&mddev->disks);
INIT_LIST_HEAD(&mddev->all_mddevs);
init_timer(&mddev->safemode_timer);
atomic_set(&mddev->active, 1);
atomic_set(&mddev->openers, 0);
atomic_set(&mddev->active_io, 0);
atomic_set(&mddev->plug_cnt, 0);
spin_lock_init(&mddev->write_lock);
atomic_set(&mddev->flush_pending, 0);
init_waitqueue_head(&mddev->sb_wait);
init_waitqueue_head(&mddev->recovery_wait);
mddev->reshape_position = MaxSector;
mddev->resync_min = 0;
mddev->resync_max = MaxSector;
mddev->level = LEVEL_NONE;
}
EXPORT_SYMBOL_GPL(mddev_init);
static mddev_t * mddev_find(dev_t unit)
{
mddev_t *mddev, *new = NULL;
if (unit && MAJOR(unit) != MD_MAJOR)
unit &= ~((1<<MdpMinorShift)-1);
retry:
spin_lock(&all_mddevs_lock);
if (unit) {
list_for_each_entry(mddev, &all_mddevs, all_mddevs)
if (mddev->unit == unit) {
mddev_get(mddev);
spin_unlock(&all_mddevs_lock);
kfree(new);
return mddev;
}
if (new) {
list_add(&new->all_mddevs, &all_mddevs);
spin_unlock(&all_mddevs_lock);
new->hold_active = UNTIL_IOCTL;
return new;
}
} else if (new) {
/* find an unused unit number */
static int next_minor = 512;
int start = next_minor;
int is_free = 0;
int dev = 0;
while (!is_free) {
dev = MKDEV(MD_MAJOR, next_minor);
next_minor++;
if (next_minor > MINORMASK)
next_minor = 0;
if (next_minor == start) {
/* Oh dear, all in use. */
spin_unlock(&all_mddevs_lock);
kfree(new);
return NULL;
}
is_free = 1;
list_for_each_entry(mddev, &all_mddevs, all_mddevs)
if (mddev->unit == dev) {
is_free = 0;
break;
}
}
new->unit = dev;
new->md_minor = MINOR(dev);
new->hold_active = UNTIL_STOP;
list_add(&new->all_mddevs, &all_mddevs);
spin_unlock(&all_mddevs_lock);
return new;
}
spin_unlock(&all_mddevs_lock);
new = kzalloc(sizeof(*new), GFP_KERNEL);
if (!new)
return NULL;
new->unit = unit;
if (MAJOR(unit) == MD_MAJOR)
new->md_minor = MINOR(unit);
else
new->md_minor = MINOR(unit) >> MdpMinorShift;
mddev_init(new);
goto retry;
}
static inline int mddev_lock(mddev_t * mddev)
{
return mutex_lock_interruptible(&mddev->reconfig_mutex);
}
static inline int mddev_is_locked(mddev_t *mddev)
{
return mutex_is_locked(&mddev->reconfig_mutex);
}
static inline int mddev_trylock(mddev_t * mddev)
{
return mutex_trylock(&mddev->reconfig_mutex);
}
static struct attribute_group md_redundancy_group;
static void mddev_unlock(mddev_t * mddev)
{
if (mddev->to_remove) {
/* These cannot be removed under reconfig_mutex as
* an access to the files will try to take reconfig_mutex
* while holding the file unremovable, which leads to
* a deadlock.
* So hold set sysfs_active while the remove in happeing,
* and anything else which might set ->to_remove or my
* otherwise change the sysfs namespace will fail with
* -EBUSY if sysfs_active is still set.
* We set sysfs_active under reconfig_mutex and elsewhere
* test it under the same mutex to ensure its correct value
* is seen.
*/
struct attribute_group *to_remove = mddev->to_remove;
mddev->to_remove = NULL;
mddev->sysfs_active = 1;
mutex_unlock(&mddev->reconfig_mutex);
if (mddev->kobj.sd) {
if (to_remove != &md_redundancy_group)
sysfs_remove_group(&mddev->kobj, to_remove);
if (mddev->pers == NULL ||
mddev->pers->sync_request == NULL) {
sysfs_remove_group(&mddev->kobj, &md_redundancy_group);
if (mddev->sysfs_action)
sysfs_put(mddev->sysfs_action);
mddev->sysfs_action = NULL;
}
}
mddev->sysfs_active = 0;
} else
mutex_unlock(&mddev->reconfig_mutex);
/* was we've dropped the mutex we need a spinlock to
* make sur the thread doesn't disappear
*/
spin_lock(&pers_lock);
md_wakeup_thread(mddev->thread);
spin_unlock(&pers_lock);
}
static mdk_rdev_t * find_rdev_nr(mddev_t *mddev, int nr)
{
mdk_rdev_t *rdev;
list_for_each_entry(rdev, &mddev->disks, same_set)
if (rdev->desc_nr == nr)
return rdev;
return NULL;
}
static mdk_rdev_t * find_rdev(mddev_t * mddev, dev_t dev)
{
mdk_rdev_t *rdev;
list_for_each_entry(rdev, &mddev->disks, same_set)
if (rdev->bdev->bd_dev == dev)
return rdev;
return NULL;
}
static struct mdk_personality *find_pers(int level, char *clevel)
{
struct mdk_personality *pers;
list_for_each_entry(pers, &pers_list, list) {
if (level != LEVEL_NONE && pers->level == level)
return pers;
if (strcmp(pers->name, clevel)==0)
return pers;
}
return NULL;
}
/* return the offset of the super block in 512byte sectors */
static inline sector_t calc_dev_sboffset(mdk_rdev_t *rdev)
{
sector_t num_sectors = i_size_read(rdev->bdev->bd_inode) / 512;
return MD_NEW_SIZE_SECTORS(num_sectors);
}
static int alloc_disk_sb(mdk_rdev_t * rdev)
{
if (rdev->sb_page)
MD_BUG();
rdev->sb_page = alloc_page(GFP_KERNEL);
if (!rdev->sb_page) {
printk(KERN_ALERT "md: out of memory.\n");
return -ENOMEM;
}
return 0;
}
static void free_disk_sb(mdk_rdev_t * rdev)
{
if (rdev->sb_page) {
put_page(rdev->sb_page);
rdev->sb_loaded = 0;
rdev->sb_page = NULL;
rdev->sb_start = 0;
rdev->sectors = 0;
}
}
static void super_written(struct bio *bio, int error)
{
mdk_rdev_t *rdev = bio->bi_private;
mddev_t *mddev = rdev->mddev;
if (error || !test_bit(BIO_UPTODATE, &bio->bi_flags)) {
printk("md: super_written gets error=%d, uptodate=%d\n",
error, test_bit(BIO_UPTODATE, &bio->bi_flags));
WARN_ON(test_bit(BIO_UPTODATE, &bio->bi_flags));
md_error(mddev, rdev);
}
if (atomic_dec_and_test(&mddev->pending_writes))
wake_up(&mddev->sb_wait);
bio_put(bio);
}
void md_super_write(mddev_t *mddev, mdk_rdev_t *rdev,
sector_t sector, int size, struct page *page)
{
/* write first size bytes of page to sector of rdev
* Increment mddev->pending_writes before returning
* and decrement it on completion, waking up sb_wait
* if zero is reached.
* If an error occurred, call md_error
*/
struct bio *bio = bio_alloc_mddev(GFP_NOIO, 1, mddev);
bio->bi_bdev = rdev->meta_bdev ? rdev->meta_bdev : rdev->bdev;
bio->bi_sector = sector;
bio_add_page(bio, page, size, 0);
bio->bi_private = rdev;
bio->bi_end_io = super_written;
atomic_inc(&mddev->pending_writes);
submit_bio(REQ_WRITE | REQ_SYNC | REQ_FLUSH | REQ_FUA, bio);
}
void md_super_wait(mddev_t *mddev)
{
/* wait for all superblock writes that were scheduled to complete */
DEFINE_WAIT(wq);
for(;;) {
prepare_to_wait(&mddev->sb_wait, &wq, TASK_UNINTERRUPTIBLE);
if (atomic_read(&mddev->pending_writes)==0)
break;
schedule();
}
finish_wait(&mddev->sb_wait, &wq);
}
static void bi_complete(struct bio *bio, int error)
{
complete((struct completion*)bio->bi_private);
}
int sync_page_io(mdk_rdev_t *rdev, sector_t sector, int size,
struct page *page, int rw, bool metadata_op)
{
struct bio *bio = bio_alloc_mddev(GFP_NOIO, 1, rdev->mddev);
struct completion event;
int ret;
rw |= REQ_SYNC;
bio->bi_bdev = (metadata_op && rdev->meta_bdev) ?
rdev->meta_bdev : rdev->bdev;
if (metadata_op)
bio->bi_sector = sector + rdev->sb_start;
else
bio->bi_sector = sector + rdev->data_offset;
bio_add_page(bio, page, size, 0);
init_completion(&event);
bio->bi_private = &event;
bio->bi_end_io = bi_complete;
submit_bio(rw, bio);
wait_for_completion(&event);
ret = test_bit(BIO_UPTODATE, &bio->bi_flags);
bio_put(bio);
return ret;
}
EXPORT_SYMBOL_GPL(sync_page_io);
static int read_disk_sb(mdk_rdev_t * rdev, int size)
{
char b[BDEVNAME_SIZE];
if (!rdev->sb_page) {
MD_BUG();
return -EINVAL;
}
if (rdev->sb_loaded)
return 0;
if (!sync_page_io(rdev, 0, size, rdev->sb_page, READ, true))
goto fail;
rdev->sb_loaded = 1;
return 0;
fail:
printk(KERN_WARNING "md: disabled device %s, could not read superblock.\n",
bdevname(rdev->bdev,b));
return -EINVAL;
}
static int uuid_equal(mdp_super_t *sb1, mdp_super_t *sb2)
{
return sb1->set_uuid0 == sb2->set_uuid0 &&
sb1->set_uuid1 == sb2->set_uuid1 &&
sb1->set_uuid2 == sb2->set_uuid2 &&
sb1->set_uuid3 == sb2->set_uuid3;
}
static int sb_equal(mdp_super_t *sb1, mdp_super_t *sb2)
{
int ret;
mdp_super_t *tmp1, *tmp2;
tmp1 = kmalloc(sizeof(*tmp1),GFP_KERNEL);
tmp2 = kmalloc(sizeof(*tmp2),GFP_KERNEL);
if (!tmp1 || !tmp2) {
ret = 0;
printk(KERN_INFO "md.c sb_equal(): failed to allocate memory!\n");
goto abort;
}
*tmp1 = *sb1;
*tmp2 = *sb2;
/*
* nr_disks is not constant
*/
tmp1->nr_disks = 0;
tmp2->nr_disks = 0;
ret = (memcmp(tmp1, tmp2, MD_SB_GENERIC_CONSTANT_WORDS * 4) == 0);
abort:
kfree(tmp1);
kfree(tmp2);
return ret;
}
static u32 md_csum_fold(u32 csum)
{
csum = (csum & 0xffff) + (csum >> 16);
return (csum & 0xffff) + (csum >> 16);
}
static unsigned int calc_sb_csum(mdp_super_t * sb)
{
u64 newcsum = 0;
u32 *sb32 = (u32*)sb;
int i;
unsigned int disk_csum, csum;
disk_csum = sb->sb_csum;
sb->sb_csum = 0;
for (i = 0; i < MD_SB_BYTES/4 ; i++)
newcsum += sb32[i];
csum = (newcsum & 0xffffffff) + (newcsum>>32);
#ifdef CONFIG_ALPHA
/* This used to use csum_partial, which was wrong for several
* reasons including that different results are returned on
* different architectures. It isn't critical that we get exactly
* the same return value as before (we always csum_fold before
* testing, and that removes any differences). However as we
* know that csum_partial always returned a 16bit value on
* alphas, do a fold to maximise conformity to previous behaviour.
*/
sb->sb_csum = md_csum_fold(disk_csum);
#else
sb->sb_csum = disk_csum;
#endif
return csum;
}
/*
* Handle superblock details.
* We want to be able to handle multiple superblock formats
* so we have a common interface to them all, and an array of
* different handlers.
* We rely on user-space to write the initial superblock, and support
* reading and updating of superblocks.
* Interface methods are:
* int load_super(mdk_rdev_t *dev, mdk_rdev_t *refdev, int minor_version)
* loads and validates a superblock on dev.
* if refdev != NULL, compare superblocks on both devices
* Return:
* 0 - dev has a superblock that is compatible with refdev
* 1 - dev has a superblock that is compatible and newer than refdev
* so dev should be used as the refdev in future
* -EINVAL superblock incompatible or invalid
* -othererror e.g. -EIO
*
* int validate_super(mddev_t *mddev, mdk_rdev_t *dev)
* Verify that dev is acceptable into mddev.
* The first time, mddev->raid_disks will be 0, and data from
* dev should be merged in. Subsequent calls check that dev
* is new enough. Return 0 or -EINVAL
*
* void sync_super(mddev_t *mddev, mdk_rdev_t *dev)
* Update the superblock for rdev with data in mddev
* This does not write to disc.
*
*/
struct super_type {
char *name;
struct module *owner;
int (*load_super)(mdk_rdev_t *rdev, mdk_rdev_t *refdev,
int minor_version);
int (*validate_super)(mddev_t *mddev, mdk_rdev_t *rdev);
void (*sync_super)(mddev_t *mddev, mdk_rdev_t *rdev);
unsigned long long (*rdev_size_change)(mdk_rdev_t *rdev,
sector_t num_sectors);
};
/*
* Check that the given mddev has no bitmap.
*
* This function is called from the run method of all personalities that do not
* support bitmaps. It prints an error message and returns non-zero if mddev
* has a bitmap. Otherwise, it returns 0.
*
*/
int md_check_no_bitmap(mddev_t *mddev)
{
if (!mddev->bitmap_info.file && !mddev->bitmap_info.offset)
return 0;
printk(KERN_ERR "%s: bitmaps are not supported for %s\n",
mdname(mddev), mddev->pers->name);
return 1;
}
EXPORT_SYMBOL(md_check_no_bitmap);
/*
* load_super for 0.90.0
*/
static int super_90_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version)
{
char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
mdp_super_t *sb;
int ret;
/*
* Calculate the position of the superblock (512byte sectors),
* it's at the end of the disk.
*
* It also happens to be a multiple of 4Kb.
*/
rdev->sb_start = calc_dev_sboffset(rdev);
ret = read_disk_sb(rdev, MD_SB_BYTES);
if (ret) return ret;
ret = -EINVAL;
bdevname(rdev->bdev, b);
sb = (mdp_super_t*)page_address(rdev->sb_page);
if (sb->md_magic != MD_SB_MAGIC) {
printk(KERN_ERR "md: invalid raid superblock magic on %s\n",
b);
goto abort;
}
if (sb->major_version != 0 ||
sb->minor_version < 90 ||
sb->minor_version > 91) {
printk(KERN_WARNING "Bad version number %d.%d on %s\n",
sb->major_version, sb->minor_version,
b);
goto abort;
}
if (sb->raid_disks <= 0)
goto abort;
if (md_csum_fold(calc_sb_csum(sb)) != md_csum_fold(sb->sb_csum)) {
printk(KERN_WARNING "md: invalid superblock checksum on %s\n",
b);
goto abort;
}
rdev->preferred_minor = sb->md_minor;
rdev->data_offset = 0;
rdev->sb_size = MD_SB_BYTES;
if (sb->level == LEVEL_MULTIPATH)
rdev->desc_nr = -1;
else
rdev->desc_nr = sb->this_disk.number;
if (!refdev) {
ret = 1;
} else {
__u64 ev1, ev2;
mdp_super_t *refsb = (mdp_super_t*)page_address(refdev->sb_page);
if (!uuid_equal(refsb, sb)) {
printk(KERN_WARNING "md: %s has different UUID to %s\n",
b, bdevname(refdev->bdev,b2));
goto abort;
}
if (!sb_equal(refsb, sb)) {
printk(KERN_WARNING "md: %s has same UUID"
" but different superblock to %s\n",
b, bdevname(refdev->bdev, b2));
goto abort;
}
ev1 = md_event(sb);
ev2 = md_event(refsb);
if (ev1 > ev2)
ret = 1;
else
ret = 0;
}
rdev->sectors = rdev->sb_start;
/* Limit to 4TB as metadata cannot record more than that */
if (rdev->sectors >= (2ULL << 32))
rdev->sectors = (2ULL << 32) - 2;
if (rdev->sectors < ((sector_t)sb->size) * 2 && sb->level >= 1)
/* "this cannot possibly happen" ... */
ret = -EINVAL;
abort:
return ret;
}
/*
* validate_super for 0.90.0
*/
static int super_90_validate(mddev_t *mddev, mdk_rdev_t *rdev)
{
mdp_disk_t *desc;
mdp_super_t *sb = (mdp_super_t *)page_address(rdev->sb_page);
__u64 ev1 = md_event(sb);
rdev->raid_disk = -1;
clear_bit(Faulty, &rdev->flags);
clear_bit(In_sync, &rdev->flags);
clear_bit(WriteMostly, &rdev->flags);
if (mddev->raid_disks == 0) {
mddev->major_version = 0;
mddev->minor_version = sb->minor_version;
mddev->patch_version = sb->patch_version;
mddev->external = 0;
mddev->chunk_sectors = sb->chunk_size >> 9;
mddev->ctime = sb->ctime;
mddev->utime = sb->utime;
mddev->level = sb->level;
mddev->clevel[0] = 0;
mddev->layout = sb->layout;
mddev->raid_disks = sb->raid_disks;
mddev->dev_sectors = ((sector_t)sb->size) * 2;
mddev->events = ev1;
mddev->bitmap_info.offset = 0;
mddev->bitmap_info.default_offset = MD_SB_BYTES >> 9;
if (mddev->minor_version >= 91) {
mddev->reshape_position = sb->reshape_position;
mddev->delta_disks = sb->delta_disks;
mddev->new_level = sb->new_level;
mddev->new_layout = sb->new_layout;
mddev->new_chunk_sectors = sb->new_chunk >> 9;
} else {
mddev->reshape_position = MaxSector;
mddev->delta_disks = 0;
mddev->new_level = mddev->level;
mddev->new_layout = mddev->layout;
mddev->new_chunk_sectors = mddev->chunk_sectors;
}
if (sb->state & (1<<MD_SB_CLEAN))
mddev->recovery_cp = MaxSector;
else {
if (sb->events_hi == sb->cp_events_hi &&
sb->events_lo == sb->cp_events_lo) {
mddev->recovery_cp = sb->recovery_cp;
} else
mddev->recovery_cp = 0;
}
memcpy(mddev->uuid+0, &sb->set_uuid0, 4);
memcpy(mddev->uuid+4, &sb->set_uuid1, 4);
memcpy(mddev->uuid+8, &sb->set_uuid2, 4);
memcpy(mddev->uuid+12,&sb->set_uuid3, 4);
mddev->max_disks = MD_SB_DISKS;
if (sb->state & (1<<MD_SB_BITMAP_PRESENT) &&
mddev->bitmap_info.file == NULL)
mddev->bitmap_info.offset =
mddev->bitmap_info.default_offset;
} else if (mddev->pers == NULL) {
/* Insist on good event counter while assembling, except
* for spares (which don't need an event count) */
++ev1;
if (sb->disks[rdev->desc_nr].state & (
(1<<MD_DISK_SYNC) | (1 << MD_DISK_ACTIVE)))
if (ev1 < mddev->events)
return -EINVAL;
} else if (mddev->bitmap) {
/* if adding to array with a bitmap, then we can accept an
* older device ... but not too old.
*/
if (ev1 < mddev->bitmap->events_cleared)
return 0;
} else {
if (ev1 < mddev->events)
/* just a hot-add of a new device, leave raid_disk at -1 */
return 0;
}
if (mddev->level != LEVEL_MULTIPATH) {
desc = sb->disks + rdev->desc_nr;
if (desc->state & (1<<MD_DISK_FAULTY))
set_bit(Faulty, &rdev->flags);
else if (desc->state & (1<<MD_DISK_SYNC) /* &&
desc->raid_disk < mddev->raid_disks */) {
set_bit(In_sync, &rdev->flags);
rdev->raid_disk = desc->raid_disk;
} else if (desc->state & (1<<MD_DISK_ACTIVE)) {
/* active but not in sync implies recovery up to
* reshape position. We don't know exactly where
* that is, so set to zero for now */
if (mddev->minor_version >= 91) {
rdev->recovery_offset = 0;
rdev->raid_disk = desc->raid_disk;
}
}
if (desc->state & (1<<MD_DISK_WRITEMOSTLY))
set_bit(WriteMostly, &rdev->flags);
} else /* MULTIPATH are always insync */
set_bit(In_sync, &rdev->flags);
return 0;
}
/*
* sync_super for 0.90.0
*/
static void super_90_sync(mddev_t *mddev, mdk_rdev_t *rdev)
{
mdp_super_t *sb;
mdk_rdev_t *rdev2;
int next_spare = mddev->raid_disks;
/* make rdev->sb match mddev data..
*
* 1/ zero out disks
* 2/ Add info for each disk, keeping track of highest desc_nr (next_spare);
* 3/ any empty disks < next_spare become removed
*
* disks[0] gets initialised to REMOVED because
* we cannot be sure from other fields if it has
* been initialised or not.
*/
int i;
int active=0, working=0,failed=0,spare=0,nr_disks=0;
rdev->sb_size = MD_SB_BYTES;
sb = (mdp_super_t*)page_address(rdev->sb_page);
memset(sb, 0, sizeof(*sb));
sb->md_magic = MD_SB_MAGIC;
sb->major_version = mddev->major_version;
sb->patch_version = mddev->patch_version;
sb->gvalid_words = 0; /* ignored */
memcpy(&sb->set_uuid0, mddev->uuid+0, 4);
memcpy(&sb->set_uuid1, mddev->uuid+4, 4);
memcpy(&sb->set_uuid2, mddev->uuid+8, 4);
memcpy(&sb->set_uuid3, mddev->uuid+12,4);
sb->ctime = mddev->ctime;
sb->level = mddev->level;
sb->size = mddev->dev_sectors / 2;
sb->raid_disks = mddev->raid_disks;
sb->md_minor = mddev->md_minor;
sb->not_persistent = 0;
sb->utime = mddev->utime;
sb->state = 0;
sb->events_hi = (mddev->events>>32);
sb->events_lo = (u32)mddev->events;
if (mddev->reshape_position == MaxSector)
sb->minor_version = 90;
else {
sb->minor_version = 91;
sb->reshape_position = mddev->reshape_position;
sb->new_level = mddev->new_level;
sb->delta_disks = mddev->delta_disks;
sb->new_layout = mddev->new_layout;
sb->new_chunk = mddev->new_chunk_sectors << 9;
}
mddev->minor_version = sb->minor_version;
if (mddev->in_sync)
{
sb->recovery_cp = mddev->recovery_cp;
sb->cp_events_hi = (mddev->events>>32);
sb->cp_events_lo = (u32)mddev->events;
if (mddev->recovery_cp == MaxSector)
sb->state = (1<< MD_SB_CLEAN);
} else
sb->recovery_cp = 0;
sb->layout = mddev->layout;
sb->chunk_size = mddev->chunk_sectors << 9;
if (mddev->bitmap && mddev->bitmap_info.file == NULL)
sb->state |= (1<<MD_SB_BITMAP_PRESENT);
sb->disks[0].state = (1<<MD_DISK_REMOVED);
list_for_each_entry(rdev2, &mddev->disks, same_set) {
mdp_disk_t *d;
int desc_nr;
int is_active = test_bit(In_sync, &rdev2->flags);
if (rdev2->raid_disk >= 0 &&
sb->minor_version >= 91)
/* we have nowhere to store the recovery_offset,
* but if it is not below the reshape_position,
* we can piggy-back on that.
*/
is_active = 1;
if (rdev2->raid_disk < 0 ||
test_bit(Faulty, &rdev2->flags))
is_active = 0;
if (is_active)
desc_nr = rdev2->raid_disk;
else
desc_nr = next_spare++;
rdev2->desc_nr = desc_nr;
d = &sb->disks[rdev2->desc_nr];
nr_disks++;
d->number = rdev2->desc_nr;
d->major = MAJOR(rdev2->bdev->bd_dev);
d->minor = MINOR(rdev2->bdev->bd_dev);
if (is_active)
d->raid_disk = rdev2->raid_disk;
else
d->raid_disk = rdev2->desc_nr; /* compatibility */
if (test_bit(Faulty, &rdev2->flags))
d->state = (1<<MD_DISK_FAULTY);
else if (is_active) {
d->state = (1<<MD_DISK_ACTIVE);
if (test_bit(In_sync, &rdev2->flags))
d->state |= (1<<MD_DISK_SYNC);
active++;
working++;
} else {
d->state = 0;
spare++;
working++;
}
if (test_bit(WriteMostly, &rdev2->flags))
d->state |= (1<<MD_DISK_WRITEMOSTLY);
}
/* now set the "removed" and "faulty" bits on any missing devices */
for (i=0 ; i < mddev->raid_disks ; i++) {
mdp_disk_t *d = &sb->disks[i];
if (d->state == 0 && d->number == 0) {
d->number = i;
d->raid_disk = i;
d->state = (1<<MD_DISK_REMOVED);
d->state |= (1<<MD_DISK_FAULTY);
failed++;
}
}
sb->nr_disks = nr_disks;
sb->active_disks = active;
sb->working_disks = working;
sb->failed_disks = failed;
sb->spare_disks = spare;
sb->this_disk = sb->disks[rdev->desc_nr];
sb->sb_csum = calc_sb_csum(sb);
}
/*
* rdev_size_change for 0.90.0
*/
static unsigned long long
super_90_rdev_size_change(mdk_rdev_t *rdev, sector_t num_sectors)
{
if (num_sectors && num_sectors < rdev->mddev->dev_sectors)
return 0; /* component must fit device */
if (rdev->mddev->bitmap_info.offset)
return 0; /* can't move bitmap */
rdev->sb_start = calc_dev_sboffset(rdev);
if (!num_sectors || num_sectors > rdev->sb_start)
num_sectors = rdev->sb_start;
/* Limit to 4TB as metadata cannot record more than that.
* 4TB == 2^32 KB, or 2*2^32 sectors.
*/
if (num_sectors >= (2ULL << 32))
num_sectors = (2ULL << 32) - 2;
md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size,
rdev->sb_page);
md_super_wait(rdev->mddev);
return num_sectors;
}
/*
* version 1 superblock
*/
static __le32 calc_sb_1_csum(struct mdp_superblock_1 * sb)
{
__le32 disk_csum;
u32 csum;
unsigned long long newcsum;
int size = 256 + le32_to_cpu(sb->max_dev)*2;
__le32 *isuper = (__le32*)sb;
int i;
disk_csum = sb->sb_csum;
sb->sb_csum = 0;
newcsum = 0;
for (i=0; size>=4; size -= 4 )
newcsum += le32_to_cpu(*isuper++);
if (size == 2)
newcsum += le16_to_cpu(*(__le16*) isuper);
csum = (newcsum & 0xffffffff) + (newcsum >> 32);
sb->sb_csum = disk_csum;
return cpu_to_le32(csum);
}
static int super_1_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version)
{
struct mdp_superblock_1 *sb;
int ret;
sector_t sb_start;
char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
int bmask;
/*
* Calculate the position of the superblock in 512byte sectors.
* It is always aligned to a 4K boundary and
* depeding on minor_version, it can be:
* 0: At least 8K, but less than 12K, from end of device
* 1: At start of device
* 2: 4K from start of device.
*/
switch(minor_version) {
case 0:
sb_start = i_size_read(rdev->bdev->bd_inode) >> 9;
sb_start -= 8*2;
sb_start &= ~(sector_t)(4*2-1);
break;
case 1:
sb_start = 0;
break;
case 2:
sb_start = 8;
break;
default:
return -EINVAL;
}
rdev->sb_start = sb_start;
/* superblock is rarely larger than 1K, but it can be larger,
* and it is safe to read 4k, so we do that
*/
ret = read_disk_sb(rdev, 4096);
if (ret) return ret;
sb = (struct mdp_superblock_1*)page_address(rdev->sb_page);
if (sb->magic != cpu_to_le32(MD_SB_MAGIC) ||
sb->major_version != cpu_to_le32(1) ||
le32_to_cpu(sb->max_dev) > (4096-256)/2 ||
le64_to_cpu(sb->super_offset) != rdev->sb_start ||
(le32_to_cpu(sb->feature_map) & ~MD_FEATURE_ALL) != 0)
return -EINVAL;
if (calc_sb_1_csum(sb) != sb->sb_csum) {
printk("md: invalid superblock checksum on %s\n",
bdevname(rdev->bdev,b));
return -EINVAL;
}
if (le64_to_cpu(sb->data_size) < 10) {
printk("md: data_size too small on %s\n",
bdevname(rdev->bdev,b));
return -EINVAL;
}
rdev->preferred_minor = 0xffff;
rdev->data_offset = le64_to_cpu(sb->data_offset);
atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
if (rdev->sb_size & bmask)
rdev->sb_size = (rdev->sb_size | bmask) + 1;
if (minor_version
&& rdev->data_offset < sb_start + (rdev->sb_size/512))
return -EINVAL;
if (sb->level == cpu_to_le32(LEVEL_MULTIPATH))
rdev->desc_nr = -1;
else
rdev->desc_nr = le32_to_cpu(sb->dev_number);
if (!refdev) {
ret = 1;
} else {
__u64 ev1, ev2;
struct mdp_superblock_1 *refsb =
(struct mdp_superblock_1*)page_address(refdev->sb_page);
if (memcmp(sb->set_uuid, refsb->set_uuid, 16) != 0 ||
sb->level != refsb->level ||
sb->layout != refsb->layout ||
sb->chunksize != refsb->chunksize) {
printk(KERN_WARNING "md: %s has strangely different"
" superblock to %s\n",
bdevname(rdev->bdev,b),
bdevname(refdev->bdev,b2));
return -EINVAL;
}
ev1 = le64_to_cpu(sb->events);
ev2 = le64_to_cpu(refsb->events);
if (ev1 > ev2)
ret = 1;
else
ret = 0;
}
if (minor_version)
rdev->sectors = (i_size_read(rdev->bdev->bd_inode) >> 9) -
le64_to_cpu(sb->data_offset);
else
rdev->sectors = rdev->sb_start;
if (rdev->sectors < le64_to_cpu(sb->data_size))
return -EINVAL;
rdev->sectors = le64_to_cpu(sb->data_size);
if (le64_to_cpu(sb->size) > rdev->sectors)
return -EINVAL;
return ret;
}
static int super_1_validate(mddev_t *mddev, mdk_rdev_t *rdev)
{
struct mdp_superblock_1 *sb = (struct mdp_superblock_1*)page_address(rdev->sb_page);
__u64 ev1 = le64_to_cpu(sb->events);
rdev->raid_disk = -1;
clear_bit(Faulty, &rdev->flags);
clear_bit(In_sync, &rdev->flags);
clear_bit(WriteMostly, &rdev->flags);
if (mddev->raid_disks == 0) {
mddev->major_version = 1;
mddev->patch_version = 0;
mddev->external = 0;
mddev->chunk_sectors = le32_to_cpu(sb->chunksize);
mddev->ctime = le64_to_cpu(sb->ctime) & ((1ULL << 32)-1);
mddev->utime = le64_to_cpu(sb->utime) & ((1ULL << 32)-1);
mddev->level = le32_to_cpu(sb->level);
mddev->clevel[0] = 0;
mddev->layout = le32_to_cpu(sb->layout);
mddev->raid_disks = le32_to_cpu(sb->raid_disks);
mddev->dev_sectors = le64_to_cpu(sb->size);
mddev->events = ev1;
mddev->bitmap_info.offset = 0;
mddev->bitmap_info.default_offset = 1024 >> 9;
mddev->recovery_cp = le64_to_cpu(sb->resync_offset);
memcpy(mddev->uuid, sb->set_uuid, 16);
mddev->max_disks = (4096-256)/2;
if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_BITMAP_OFFSET) &&
mddev->bitmap_info.file == NULL )
mddev->bitmap_info.offset =
(__s32)le32_to_cpu(sb->bitmap_offset);
if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE)) {
mddev->reshape_position = le64_to_cpu(sb->reshape_position);
mddev->delta_disks = le32_to_cpu(sb->delta_disks);
mddev->new_level = le32_to_cpu(sb->new_level);
mddev->new_layout = le32_to_cpu(sb->new_layout);
mddev->new_chunk_sectors = le32_to_cpu(sb->new_chunk);
} else {
mddev->reshape_position = MaxSector;
mddev->delta_disks = 0;
mddev->new_level = mddev->level;
mddev->new_layout = mddev->layout;
mddev->new_chunk_sectors = mddev->chunk_sectors;
}
} else if (mddev->pers == NULL) {
/* Insist of good event counter while assembling, except for
* spares (which don't need an event count) */
++ev1;
if (rdev->desc_nr >= 0 &&
rdev->desc_nr < le32_to_cpu(sb->max_dev) &&
le16_to_cpu(sb->dev_roles[rdev->desc_nr]) < 0xfffe)
if (ev1 < mddev->events)
return -EINVAL;
} else if (mddev->bitmap) {
/* If adding to array with a bitmap, then we can accept an
* older device, but not too old.
*/
if (ev1 < mddev->bitmap->events_cleared)
return 0;
} else {
if (ev1 < mddev->events)
/* just a hot-add of a new device, leave raid_disk at -1 */
return 0;
}
if (mddev->level != LEVEL_MULTIPATH) {
int role;
if (rdev->desc_nr < 0 ||
rdev->desc_nr >= le32_to_cpu(sb->max_dev)) {
role = 0xffff;
rdev->desc_nr = -1;
} else
role = le16_to_cpu(sb->dev_roles[rdev->desc_nr]);
switch(role) {
case 0xffff: /* spare */
break;
case 0xfffe: /* faulty */
set_bit(Faulty, &rdev->flags);
break;
default:
if ((le32_to_cpu(sb->feature_map) &
MD_FEATURE_RECOVERY_OFFSET))
rdev->recovery_offset = le64_to_cpu(sb->recovery_offset);
else
set_bit(In_sync, &rdev->flags);
rdev->raid_disk = role;
break;
}
if (sb->devflags & WriteMostly1)
set_bit(WriteMostly, &rdev->flags);
} else /* MULTIPATH are always insync */
set_bit(In_sync, &rdev->flags);
return 0;
}
static void super_1_sync(mddev_t *mddev, mdk_rdev_t *rdev)
{
struct mdp_superblock_1 *sb;
mdk_rdev_t *rdev2;
int max_dev, i;
/* make rdev->sb match mddev and rdev data. */
sb = (struct mdp_superblock_1*)page_address(rdev->sb_page);
sb->feature_map = 0;
sb->pad0 = 0;
sb->recovery_offset = cpu_to_le64(0);
memset(sb->pad1, 0, sizeof(sb->pad1));
memset(sb->pad2, 0, sizeof(sb->pad2));
memset(sb->pad3, 0, sizeof(sb->pad3));
sb->utime = cpu_to_le64((__u64)mddev->utime);
sb->events = cpu_to_le64(mddev->events);
if (mddev->in_sync)
sb->resync_offset = cpu_to_le64(mddev->recovery_cp);
else
sb->resync_offset = cpu_to_le64(0);
sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
sb->raid_disks = cpu_to_le32(mddev->raid_disks);
sb->size = cpu_to_le64(mddev->dev_sectors);
sb->chunksize = cpu_to_le32(mddev->chunk_sectors);
sb->level = cpu_to_le32(mddev->level);
sb->layout = cpu_to_le32(mddev->layout);
if (mddev->bitmap && mddev->bitmap_info.file == NULL) {
sb->bitmap_offset = cpu_to_le32((__u32)mddev->bitmap_info.offset);
sb->feature_map = cpu_to_le32(MD_FEATURE_BITMAP_OFFSET);
}
if (rdev->raid_disk >= 0 &&
!test_bit(In_sync, &rdev->flags)) {
sb->feature_map |=
cpu_to_le32(MD_FEATURE_RECOVERY_OFFSET);
sb->recovery_offset =
cpu_to_le64(rdev->recovery_offset);
}
if (mddev->reshape_position != MaxSector) {
sb->feature_map |= cpu_to_le32(MD_FEATURE_RESHAPE_ACTIVE);
sb->reshape_position = cpu_to_le64(mddev->reshape_position);
sb->new_layout = cpu_to_le32(mddev->new_layout);
sb->delta_disks = cpu_to_le32(mddev->delta_disks);
sb->new_level = cpu_to_le32(mddev->new_level);
sb->new_chunk = cpu_to_le32(mddev->new_chunk_sectors);
}
max_dev = 0;
list_for_each_entry(rdev2, &mddev->disks, same_set)
if (rdev2->desc_nr+1 > max_dev)
max_dev = rdev2->desc_nr+1;
if (max_dev > le32_to_cpu(sb->max_dev)) {
int bmask;
sb->max_dev = cpu_to_le32(max_dev);
rdev->sb_size = max_dev * 2 + 256;
bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
if (rdev->sb_size & bmask)
rdev->sb_size = (rdev->sb_size | bmask) + 1;
} else
max_dev = le32_to_cpu(sb->max_dev);
for (i=0; i<max_dev;i++)
sb->dev_roles[i] = cpu_to_le16(0xfffe);
list_for_each_entry(rdev2, &mddev->disks, same_set) {
i = rdev2->desc_nr;
if (test_bit(Faulty, &rdev2->flags))
sb->dev_roles[i] = cpu_to_le16(0xfffe);
else if (test_bit(In_sync, &rdev2->flags))
sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk);
else if (rdev2->raid_disk >= 0)
sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk);
else
sb->dev_roles[i] = cpu_to_le16(0xffff);
}
sb->sb_csum = calc_sb_1_csum(sb);
}
static unsigned long long
super_1_rdev_size_change(mdk_rdev_t *rdev, sector_t num_sectors)
{
struct mdp_superblock_1 *sb;
sector_t max_sectors;
if (num_sectors && num_sectors < rdev->mddev->dev_sectors)
return 0; /* component must fit device */
if (rdev->sb_start < rdev->data_offset) {
/* minor versions 1 and 2; superblock before data */
max_sectors = i_size_read(rdev->bdev->bd_inode) >> 9;
max_sectors -= rdev->data_offset;
if (!num_sectors || num_sectors > max_sectors)
num_sectors = max_sectors;
} else if (rdev->mddev->bitmap_info.offset) {
/* minor version 0 with bitmap we can't move */
return 0;
} else {
/* minor version 0; superblock after data */
sector_t sb_start;
sb_start = (i_size_read(rdev->bdev->bd_inode) >> 9) - 8*2;
sb_start &= ~(sector_t)(4*2 - 1);
max_sectors = rdev->sectors + sb_start - rdev->sb_start;
if (!num_sectors || num_sectors > max_sectors)
num_sectors = max_sectors;
rdev->sb_start = sb_start;
}
sb = (struct mdp_superblock_1 *) page_address(rdev->sb_page);
sb->data_size = cpu_to_le64(num_sectors);
sb->super_offset = rdev->sb_start;
sb->sb_csum = calc_sb_1_csum(sb);
md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size,
rdev->sb_page);
md_super_wait(rdev->mddev);
return num_sectors;
}
static struct super_type super_types[] = {
[0] = {
.name = "0.90.0",
.owner = THIS_MODULE,
.load_super = super_90_load,
.validate_super = super_90_validate,
.sync_super = super_90_sync,
.rdev_size_change = super_90_rdev_size_change,
},
[1] = {
.name = "md-1",
.owner = THIS_MODULE,
.load_super = super_1_load,
.validate_super = super_1_validate,
.sync_super = super_1_sync,
.rdev_size_change = super_1_rdev_size_change,
},
};
static void sync_super(mddev_t *mddev, mdk_rdev_t *rdev)
{
if (mddev->sync_super) {
mddev->sync_super(mddev, rdev);
return;
}
BUG_ON(mddev->major_version >= ARRAY_SIZE(super_types));
super_types[mddev->major_version].sync_super(mddev, rdev);
}
static int match_mddev_units(mddev_t *mddev1, mddev_t *mddev2)
{
mdk_rdev_t *rdev, *rdev2;
rcu_read_lock();
rdev_for_each_rcu(rdev, mddev1)
rdev_for_each_rcu(rdev2, mddev2)
if (rdev->bdev->bd_contains ==
rdev2->bdev->bd_contains) {
rcu_read_unlock();
return 1;
}
rcu_read_unlock();
return 0;
}
static LIST_HEAD(pending_raid_disks);
/*
* Try to register data integrity profile for an mddev
*
* This is called when an array is started and after a disk has been kicked
* from the array. It only succeeds if all working and active component devices
* are integrity capable with matching profiles.
*/
int md_integrity_register(mddev_t *mddev)
{
mdk_rdev_t *rdev, *reference = NULL;
if (list_empty(&mddev->disks))
return 0; /* nothing to do */
if (!mddev->gendisk || blk_get_integrity(mddev->gendisk))
return 0; /* shouldn't register, or already is */
list_for_each_entry(rdev, &mddev->disks, same_set) {
/* skip spares and non-functional disks */
if (test_bit(Faulty, &rdev->flags))
continue;
if (rdev->raid_disk < 0)
continue;
if (!reference) {
/* Use the first rdev as the reference */
reference = rdev;
continue;
}
/* does this rdev's profile match the reference profile? */
if (blk_integrity_compare(reference->bdev->bd_disk,
rdev->bdev->bd_disk) < 0)
return -EINVAL;
}
if (!reference || !bdev_get_integrity(reference->bdev))
return 0;
/*
* All component devices are integrity capable and have matching
* profiles, register the common profile for the md device.
*/
if (blk_integrity_register(mddev->gendisk,
bdev_get_integrity(reference->bdev)) != 0) {
printk(KERN_ERR "md: failed to register integrity for %s\n",
mdname(mddev));
return -EINVAL;
}
printk(KERN_NOTICE "md: data integrity enabled on %s\n", mdname(mddev));
if (bioset_integrity_create(mddev->bio_set, BIO_POOL_SIZE)) {
printk(KERN_ERR "md: failed to create integrity pool for %s\n",
mdname(mddev));
return -EINVAL;
}
return 0;
}
EXPORT_SYMBOL(md_integrity_register);
/* Disable data integrity if non-capable/non-matching disk is being added */
void md_integrity_add_rdev(mdk_rdev_t *rdev, mddev_t *mddev)
{
struct blk_integrity *bi_rdev = bdev_get_integrity(rdev->bdev);
struct blk_integrity *bi_mddev = blk_get_integrity(mddev->gendisk);
if (!bi_mddev) /* nothing to do */
return;
if (rdev->raid_disk < 0) /* skip spares */
return;
if (bi_rdev && blk_integrity_compare(mddev->gendisk,
rdev->bdev->bd_disk) >= 0)
return;
printk(KERN_NOTICE "disabling data integrity on %s\n", mdname(mddev));
blk_integrity_unregister(mddev->gendisk);
}
EXPORT_SYMBOL(md_integrity_add_rdev);
static int bind_rdev_to_array(mdk_rdev_t * rdev, mddev_t * mddev)
{
char b[BDEVNAME_SIZE];
struct kobject *ko;
char *s;
int err;
if (rdev->mddev) {
MD_BUG();
return -EINVAL;
}
/* prevent duplicates */
if (find_rdev(mddev, rdev->bdev->bd_dev))
return -EEXIST;
/* make sure rdev->sectors exceeds mddev->dev_sectors */
if (rdev->sectors && (mddev->dev_sectors == 0 ||
rdev->sectors < mddev->dev_sectors)) {
if (mddev->pers) {
/* Cannot change size, so fail
* If mddev->level <= 0, then we don't care
* about aligning sizes (e.g. linear)
*/
if (mddev->level > 0)
return -ENOSPC;
} else
mddev->dev_sectors = rdev->sectors;
}
/* Verify rdev->desc_nr is unique.
* If it is -1, assign a free number, else
* check number is not in use
*/
if (rdev->desc_nr < 0) {
int choice = 0;
if (mddev->pers) choice = mddev->raid_disks;
while (find_rdev_nr(mddev, choice))
choice++;
rdev->desc_nr = choice;
} else {
if (find_rdev_nr(mddev, rdev->desc_nr))
return -EBUSY;
}
if (mddev->max_disks && rdev->desc_nr >= mddev->max_disks) {
printk(KERN_WARNING "md: %s: array is limited to %d devices\n",
mdname(mddev), mddev->max_disks);
return -EBUSY;
}
bdevname(rdev->bdev,b);
while ( (s=strchr(b, '/')) != NULL)
*s = '!';
rdev->mddev = mddev;
printk(KERN_INFO "md: bind<%s>\n", b);
if ((err = kobject_add(&rdev->kobj, &mddev->kobj, "dev-%s", b)))
goto fail;
ko = &part_to_dev(rdev->bdev->bd_part)->kobj;
if (sysfs_create_link(&rdev->kobj, ko, "block"))
/* failure here is OK */;
rdev->sysfs_state = sysfs_get_dirent_safe(rdev->kobj.sd, "state");
list_add_rcu(&rdev->same_set, &mddev->disks);
bd_link_disk_holder(rdev->bdev, mddev->gendisk);
/* May as well allow recovery to be retried once */
mddev->recovery_disabled = 0;
return 0;
fail:
printk(KERN_WARNING "md: failed to register dev-%s for %s\n",
b, mdname(mddev));
return err;
}
static void md_delayed_delete(struct work_struct *ws)
{
mdk_rdev_t *rdev = container_of(ws, mdk_rdev_t, del_work);
kobject_del(&rdev->kobj);
kobject_put(&rdev->kobj);
}
static void unbind_rdev_from_array(mdk_rdev_t * rdev)
{
char b[BDEVNAME_SIZE];
if (!rdev->mddev) {
MD_BUG();
return;
}
bd_unlink_disk_holder(rdev->bdev, rdev->mddev->gendisk);
list_del_rcu(&rdev->same_set);
printk(KERN_INFO "md: unbind<%s>\n", bdevname(rdev->bdev,b));
rdev->mddev = NULL;
sysfs_remove_link(&rdev->kobj, "block");
sysfs_put(rdev->sysfs_state);
rdev->sysfs_state = NULL;
/* We need to delay this, otherwise we can deadlock when
* writing to 'remove' to "dev/state". We also need
* to delay it due to rcu usage.
*/
synchronize_rcu();
INIT_WORK(&rdev->del_work, md_delayed_delete);
kobject_get(&rdev->kobj);
queue_work(md_misc_wq, &rdev->del_work);
}
/*
* prevent the device from being mounted, repartitioned or
* otherwise reused by a RAID array (or any other kernel
* subsystem), by bd_claiming the device.
*/
static int lock_rdev(mdk_rdev_t *rdev, dev_t dev, int shared)
{
int err = 0;
struct block_device *bdev;
char b[BDEVNAME_SIZE];
bdev = blkdev_get_by_dev(dev, FMODE_READ|FMODE_WRITE|FMODE_EXCL,
shared ? (mdk_rdev_t *)lock_rdev : rdev);
if (IS_ERR(bdev)) {
printk(KERN_ERR "md: could not open %s.\n",
__bdevname(dev, b));
return PTR_ERR(bdev);
}
rdev->bdev = bdev;
return err;
}
static void unlock_rdev(mdk_rdev_t *rdev)
{
struct block_device *bdev = rdev->bdev;
rdev->bdev = NULL;
if (!bdev)
MD_BUG();
blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
}
void md_autodetect_dev(dev_t dev);
static void export_rdev(mdk_rdev_t * rdev)
{
char b[BDEVNAME_SIZE];
printk(KERN_INFO "md: export_rdev(%s)\n",
bdevname(rdev->bdev,b));
if (rdev->mddev)
MD_BUG();
free_disk_sb(rdev);
#ifndef MODULE
if (test_bit(AutoDetected, &rdev->flags))
md_autodetect_dev(rdev->bdev->bd_dev);
#endif
unlock_rdev(rdev);
kobject_put(&rdev->kobj);
}
static void kick_rdev_from_array(mdk_rdev_t * rdev)
{
unbind_rdev_from_array(rdev);
export_rdev(rdev);
}
static void export_array(mddev_t *mddev)
{
mdk_rdev_t *rdev, *tmp;
rdev_for_each(rdev, tmp, mddev) {
if (!rdev->mddev) {
MD_BUG();
continue;
}
kick_rdev_from_array(rdev);
}
if (!list_empty(&mddev->disks))
MD_BUG();
mddev->raid_disks = 0;
mddev->major_version = 0;
}
static void print_desc(mdp_disk_t *desc)
{
printk(" DISK<N:%d,(%d,%d),R:%d,S:%d>\n", desc->number,
desc->major,desc->minor,desc->raid_disk,desc->state);
}
static void print_sb_90(mdp_super_t *sb)
{
int i;
printk(KERN_INFO
"md: SB: (V:%d.%d.%d) ID:<%08x.%08x.%08x.%08x> CT:%08x\n",
sb->major_version, sb->minor_version, sb->patch_version,
sb->set_uuid0, sb->set_uuid1, sb->set_uuid2, sb->set_uuid3,
sb->ctime);
printk(KERN_INFO "md: L%d S%08d ND:%d RD:%d md%d LO:%d CS:%d\n",
sb->level, sb->size, sb->nr_disks, sb->raid_disks,
sb->md_minor, sb->layout, sb->chunk_size);
printk(KERN_INFO "md: UT:%08x ST:%d AD:%d WD:%d"
" FD:%d SD:%d CSUM:%08x E:%08lx\n",
sb->utime, sb->state, sb->active_disks, sb->working_disks,
sb->failed_disks, sb->spare_disks,
sb->sb_csum, (unsigned long)sb->events_lo);
printk(KERN_INFO);
for (i = 0; i < MD_SB_DISKS; i++) {
mdp_disk_t *desc;
desc = sb->disks + i;
if (desc->number || desc->major || desc->minor ||
desc->raid_disk || (desc->state && (desc->state != 4))) {
printk(" D %2d: ", i);
print_desc(desc);
}
}
printk(KERN_INFO "md: THIS: ");
print_desc(&sb->this_disk);
}
static void print_sb_1(struct mdp_superblock_1 *sb)
{
__u8 *uuid;
uuid = sb->set_uuid;
printk(KERN_INFO
"md: SB: (V:%u) (F:0x%08x) Array-ID:<%pU>\n"
"md: Name: \"%s\" CT:%llu\n",
le32_to_cpu(sb->major_version),
le32_to_cpu(sb->feature_map),
uuid,
sb->set_name,
(unsigned long long)le64_to_cpu(sb->ctime)
& MD_SUPERBLOCK_1_TIME_SEC_MASK);
uuid = sb->device_uuid;
printk(KERN_INFO
"md: L%u SZ%llu RD:%u LO:%u CS:%u DO:%llu DS:%llu SO:%llu"
" RO:%llu\n"
"md: Dev:%08x UUID: %pU\n"
"md: (F:0x%08x) UT:%llu Events:%llu ResyncOffset:%llu CSUM:0x%08x\n"
"md: (MaxDev:%u) \n",
le32_to_cpu(sb->level),
(unsigned long long)le64_to_cpu(sb->size),
le32_to_cpu(sb->raid_disks),
le32_to_cpu(sb->layout),
le32_to_cpu(sb->chunksize),
(unsigned long long)le64_to_cpu(sb->data_offset),
(unsigned long long)le64_to_cpu(sb->data_size),
(unsigned long long)le64_to_cpu(sb->super_offset),
(unsigned long long)le64_to_cpu(sb->recovery_offset),
le32_to_cpu(sb->dev_number),
uuid,
sb->devflags,
(unsigned long long)le64_to_cpu(sb->utime) & MD_SUPERBLOCK_1_TIME_SEC_MASK,
(unsigned long long)le64_to_cpu(sb->events),
(unsigned long long)le64_to_cpu(sb->resync_offset),
le32_to_cpu(sb->sb_csum),
le32_to_cpu(sb->max_dev)
);
}
static void print_rdev(mdk_rdev_t *rdev, int major_version)
{
char b[BDEVNAME_SIZE];
printk(KERN_INFO "md: rdev %s, Sect:%08llu F:%d S:%d DN:%u\n",
bdevname(rdev->bdev, b), (unsigned long long)rdev->sectors,
test_bit(Faulty, &rdev->flags), test_bit(In_sync, &rdev->flags),
rdev->desc_nr);
if (rdev->sb_loaded) {
printk(KERN_INFO "md: rdev superblock (MJ:%d):\n", major_version);
switch (major_version) {
case 0:
print_sb_90((mdp_super_t*)page_address(rdev->sb_page));
break;
case 1:
print_sb_1((struct mdp_superblock_1 *)page_address(rdev->sb_page));
break;
}
} else
printk(KERN_INFO "md: no rdev superblock!\n");
}
static void md_print_devices(void)
{
struct list_head *tmp;
mdk_rdev_t *rdev;
mddev_t *mddev;
char b[BDEVNAME_SIZE];
printk("\n");
printk("md: **********************************\n");
printk("md: * <COMPLETE RAID STATE PRINTOUT> *\n");
printk("md: **********************************\n");
for_each_mddev(mddev, tmp) {
if (mddev->bitmap)
bitmap_print_sb(mddev->bitmap);
else
printk("%s: ", mdname(mddev));
list_for_each_entry(rdev, &mddev->disks, same_set)
printk("<%s>", bdevname(rdev->bdev,b));
printk("\n");
list_for_each_entry(rdev, &mddev->disks, same_set)
print_rdev(rdev, mddev->major_version);
}
printk("md: **********************************\n");
printk("\n");
}
static void sync_sbs(mddev_t * mddev, int nospares)
{
/* Update each superblock (in-memory image), but
* if we are allowed to, skip spares which already
* have the right event counter, or have one earlier
* (which would mean they aren't being marked as dirty
* with the rest of the array)
*/
mdk_rdev_t *rdev;
list_for_each_entry(rdev, &mddev->disks, same_set) {
if (rdev->sb_events == mddev->events ||
(nospares &&
rdev->raid_disk < 0 &&
rdev->sb_events+1 == mddev->events)) {
/* Don't update this superblock */
rdev->sb_loaded = 2;
} else {
sync_super(mddev, rdev);
rdev->sb_loaded = 1;
}
}
}
static void md_update_sb(mddev_t * mddev, int force_change)
{
mdk_rdev_t *rdev;
int sync_req;
int nospares = 0;
repeat:
/* First make sure individual recovery_offsets are correct */
list_for_each_entry(rdev, &mddev->disks, same_set) {
if (rdev->raid_disk >= 0 &&
mddev->delta_disks >= 0 &&
!test_bit(In_sync, &rdev->flags) &&
mddev->curr_resync_completed > rdev->recovery_offset)
rdev->recovery_offset = mddev->curr_resync_completed;
}
if (!mddev->persistent) {
clear_bit(MD_CHANGE_CLEAN, &mddev->flags);
clear_bit(MD_CHANGE_DEVS, &mddev->flags);
if (!mddev->external)
clear_bit(MD_CHANGE_PENDING, &mddev->flags);
wake_up(&mddev->sb_wait);
return;
}
spin_lock_irq(&mddev->write_lock);
mddev->utime = get_seconds();
if (test_and_clear_bit(MD_CHANGE_DEVS, &mddev->flags))
force_change = 1;
if (test_and_clear_bit(MD_CHANGE_CLEAN, &mddev->flags))
/* just a clean<-> dirty transition, possibly leave spares alone,
* though if events isn't the right even/odd, we will have to do
* spares after all
*/
nospares = 1;
if (force_change)
nospares = 0;
if (mddev->degraded)
/* If the array is degraded, then skipping spares is both
* dangerous and fairly pointless.
* Dangerous because a device that was removed from the array
* might have a event_count that still looks up-to-date,
* so it can be re-added without a resync.
* Pointless because if there are any spares to skip,
* then a recovery will happen and soon that array won't
* be degraded any more and the spare can go back to sleep then.
*/
nospares = 0;
sync_req = mddev->in_sync;
/* If this is just a dirty<->clean transition, and the array is clean
* and 'events' is odd, we can roll back to the previous clean state */
if (nospares
&& (mddev->in_sync && mddev->recovery_cp == MaxSector)
&& mddev->can_decrease_events
&& mddev->events != 1) {
mddev->events--;
mddev->can_decrease_events = 0;
} else {
/* otherwise we have to go forward and ... */
mddev->events ++;
mddev->can_decrease_events = nospares;
}
if (!mddev->events) {
/*
* oops, this 64-bit counter should never wrap.
* Either we are in around ~1 trillion A.C., assuming
* 1 reboot per second, or we have a bug:
*/
MD_BUG();
mddev->events --;
}
sync_sbs(mddev, nospares);
spin_unlock_irq(&mddev->write_lock);
dprintk(KERN_INFO
"md: updating %s RAID superblock on device (in sync %d)\n",
mdname(mddev),mddev->in_sync);
bitmap_update_sb(mddev->bitmap);
list_for_each_entry(rdev, &mddev->disks, same_set) {
char b[BDEVNAME_SIZE];
dprintk(KERN_INFO "md: ");
if (rdev->sb_loaded != 1)
continue; /* no noise on spare devices */
if (test_bit(Faulty, &rdev->flags))
dprintk("(skipping faulty ");
dprintk("%s ", bdevname(rdev->bdev,b));
if (!test_bit(Faulty, &rdev->flags)) {
md_super_write(mddev,rdev,
rdev->sb_start, rdev->sb_size,
rdev->sb_page);
dprintk(KERN_INFO "(write) %s's sb offset: %llu\n",
bdevname(rdev->bdev,b),
(unsigned long long)rdev->sb_start);
rdev->sb_events = mddev->events;
} else
dprintk(")\n");
if (mddev->level == LEVEL_MULTIPATH)
/* only need to write one superblock... */
break;
}
md_super_wait(mddev);
/* if there was a failure, MD_CHANGE_DEVS was set, and we re-write super */
spin_lock_irq(&mddev->write_lock);
if (mddev->in_sync != sync_req ||
test_bit(MD_CHANGE_DEVS, &mddev->flags)) {
/* have to write it out again */
spin_unlock_irq(&mddev->write_lock);
goto repeat;
}
clear_bit(MD_CHANGE_PENDING, &mddev->flags);
spin_unlock_irq(&mddev->write_lock);
wake_up(&mddev->sb_wait);
if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
sysfs_notify(&mddev->kobj, NULL, "sync_completed");
}
/* words written to sysfs files may, or may not, be \n terminated.
* We want to accept with case. For this we use cmd_match.
*/
static int cmd_match(const char *cmd, const char *str)
{
/* See if cmd, written into a sysfs file, matches
* str. They must either be the same, or cmd can
* have a trailing newline
*/
while (*cmd && *str && *cmd == *str) {
cmd++;
str++;
}
if (*cmd == '\n')
cmd++;
if (*str || *cmd)
return 0;
return 1;
}
struct rdev_sysfs_entry {
struct attribute attr;
ssize_t (*show)(mdk_rdev_t *, char *);
ssize_t (*store)(mdk_rdev_t *, const char *, size_t);
};
static ssize_t
state_show(mdk_rdev_t *rdev, char *page)
{
char *sep = "";
size_t len = 0;
if (test_bit(Faulty, &rdev->flags)) {
len+= sprintf(page+len, "%sfaulty",sep);
sep = ",";
}
if (test_bit(In_sync, &rdev->flags)) {
len += sprintf(page+len, "%sin_sync",sep);
sep = ",";
}
if (test_bit(WriteMostly, &rdev->flags)) {
len += sprintf(page+len, "%swrite_mostly",sep);
sep = ",";
}
if (test_bit(Blocked, &rdev->flags)) {
len += sprintf(page+len, "%sblocked", sep);
sep = ",";
}
if (!test_bit(Faulty, &rdev->flags) &&
!test_bit(In_sync, &rdev->flags)) {
len += sprintf(page+len, "%sspare", sep);
sep = ",";
}
return len+sprintf(page+len, "\n");
}
static ssize_t
state_store(mdk_rdev_t *rdev, const char *buf, size_t len)
{
/* can write
* faulty - simulates and error
* remove - disconnects the device
* writemostly - sets write_mostly
* -writemostly - clears write_mostly
* blocked - sets the Blocked flag
* -blocked - clears the Blocked flag
* insync - sets Insync providing device isn't active
*/
int err = -EINVAL;
if (cmd_match(buf, "faulty") && rdev->mddev->pers) {
md_error(rdev->mddev, rdev);
err = 0;
} else if (cmd_match(buf, "remove")) {
if (rdev->raid_disk >= 0)
err = -EBUSY;
else {
mddev_t *mddev = rdev->mddev;
kick_rdev_from_array(rdev);
if (mddev->pers)
md_update_sb(mddev, 1);
md_new_event(mddev);
err = 0;
}
} else if (cmd_match(buf, "writemostly")) {
set_bit(WriteMostly, &rdev->flags);
err = 0;
} else if (cmd_match(buf, "-writemostly")) {
clear_bit(WriteMostly, &rdev->flags);
err = 0;
} else if (cmd_match(buf, "blocked")) {
set_bit(Blocked, &rdev->flags);
err = 0;
} else if (cmd_match(buf, "-blocked")) {
clear_bit(Blocked, &rdev->flags);
wake_up(&rdev->blocked_wait);
set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery);
md_wakeup_thread(rdev->mddev->thread);
err = 0;
} else if (cmd_match(buf, "insync") && rdev->raid_disk == -1) {
set_bit(In_sync, &rdev->flags);
err = 0;
}
if (!err)
sysfs_notify_dirent_safe(rdev->sysfs_state);
return err ? err : len;
}
static struct rdev_sysfs_entry rdev_state =
__ATTR(state, S_IRUGO|S_IWUSR, state_show, state_store);
static ssize_t
errors_show(mdk_rdev_t *rdev, char *page)
{
return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
}
static ssize_t
errors_store(mdk_rdev_t *rdev, const char *buf, size_t len)
{
char *e;
unsigned long n = simple_strtoul(buf, &e, 10);
if (*buf && (*e == 0 || *e == '\n')) {
atomic_set(&rdev->corrected_errors, n);
return len;
}
return -EINVAL;
}
static struct rdev_sysfs_entry rdev_errors =
__ATTR(errors, S_IRUGO|S_IWUSR, errors_show, errors_store);
static ssize_t
slot_show(mdk_rdev_t *rdev, char *page)
{
if (rdev->raid_disk < 0)
return sprintf(page, "none\n");
else
return sprintf(page, "%d\n", rdev->raid_disk);
}
static ssize_t
slot_store(mdk_rdev_t *rdev, const char *buf, size_t len)
{
char *e;
int err;
char nm[20];
int slot = simple_strtoul(buf, &e, 10);
if (strncmp(buf, "none", 4)==0)
slot = -1;
else if (e==buf || (*e && *e!= '\n'))
return -EINVAL;
if (rdev->mddev->pers && slot == -1) {
/* Setting 'slot' on an active array requires also
* updating the 'rd%d' link, and communicating
* with the personality with ->hot_*_disk.
* For now we only support removing
* failed/spare devices. This normally happens automatically,
* but not when the metadata is externally managed.
*/
if (rdev->raid_disk == -1)
return -EEXIST;
/* personality does all needed checks */
if (rdev->mddev->pers->hot_remove_disk == NULL)
return -EINVAL;
err = rdev->mddev->pers->
hot_remove_disk(rdev->mddev, rdev->raid_disk);
if (err)
return err;
sprintf(nm, "rd%d", rdev->raid_disk);
sysfs_remove_link(&rdev->mddev->kobj, nm);
rdev->raid_disk = -1;
set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery);
md_wakeup_thread(rdev->mddev->thread);
} else if (rdev->mddev->pers) {
mdk_rdev_t *rdev2;
/* Activating a spare .. or possibly reactivating
* if we ever get bitmaps working here.
*/
if (rdev->raid_disk != -1)
return -EBUSY;
if (test_bit(MD_RECOVERY_RUNNING, &rdev->mddev->recovery))
return -EBUSY;
if (rdev->mddev->pers->hot_add_disk == NULL)
return -EINVAL;
list_for_each_entry(rdev2, &rdev->mddev->disks, same_set)
if (rdev2->raid_disk == slot)
return -EEXIST;
if (slot >= rdev->mddev->raid_disks &&
slot >= rdev->mddev->raid_disks + rdev->mddev->delta_disks)
return -ENOSPC;
rdev->raid_disk = slot;
if (test_bit(In_sync, &rdev->flags))
rdev->saved_raid_disk = slot;
else
rdev->saved_raid_disk = -1;
err = rdev->mddev->pers->
hot_add_disk(rdev->mddev, rdev);
if (err) {
rdev->raid_disk = -1;
return err;
} else
sysfs_notify_dirent_safe(rdev->sysfs_state);
sprintf(nm, "rd%d", rdev->raid_disk);
if (sysfs_create_link(&rdev->mddev->kobj, &rdev->kobj, nm))
/* failure here is OK */;
/* don't wakeup anyone, leave that to userspace. */
} else {
if (slot >= rdev->mddev->raid_disks &&
slot >= rdev->mddev->raid_disks + rdev->mddev->delta_disks)
return -ENOSPC;
rdev->raid_disk = slot;
/* assume it is working */
clear_bit(Faulty, &rdev->flags);
clear_bit(WriteMostly, &rdev->flags);
set_bit(In_sync, &rdev->flags);
sysfs_notify_dirent_safe(rdev->sysfs_state);
}
return len;
}
static struct rdev_sysfs_entry rdev_slot =
__ATTR(slot, S_IRUGO|S_IWUSR, slot_show, slot_store);
static ssize_t
offset_show(mdk_rdev_t *rdev, char *page)
{
return sprintf(page, "%llu\n", (unsigned long long)rdev->data_offset);
}
static ssize_t
offset_store(mdk_rdev_t *rdev, const char *buf, size_t len)
{
char *e;
unsigned long long offset = simple_strtoull(buf, &e, 10);
if (e==buf || (*e && *e != '\n'))
return -EINVAL;
if (rdev->mddev->pers && rdev->raid_disk >= 0)
return -EBUSY;
if (rdev->sectors && rdev->mddev->external)
/* Must set offset before size, so overlap checks
* can be sane */
return -EBUSY;
rdev->data_offset = offset;
return len;
}
static struct rdev_sysfs_entry rdev_offset =
__ATTR(offset, S_IRUGO|S_IWUSR, offset_show, offset_store);
static ssize_t
rdev_size_show(mdk_rdev_t *rdev, char *page)
{
return sprintf(page, "%llu\n", (unsigned long long)rdev->sectors / 2);
}
static int overlaps(sector_t s1, sector_t l1, sector_t s2, sector_t l2)
{
/* check if two start/length pairs overlap */
if (s1+l1 <= s2)
return 0;
if (s2+l2 <= s1)
return 0;
return 1;
}
static int strict_blocks_to_sectors(const char *buf, sector_t *sectors)
{
unsigned long long blocks;
sector_t new;
if (strict_strtoull(buf, 10, &blocks) < 0)
return -EINVAL;
if (blocks & 1ULL << (8 * sizeof(blocks) - 1))
return -EINVAL; /* sector conversion overflow */
new = blocks * 2;
if (new != blocks * 2)
return -EINVAL; /* unsigned long long to sector_t overflow */
*sectors = new;
return 0;
}
static ssize_t
rdev_size_store(mdk_rdev_t *rdev, const char *buf, size_t len)
{
mddev_t *my_mddev = rdev->mddev;
sector_t oldsectors = rdev->sectors;
sector_t sectors;
if (strict_blocks_to_sectors(buf, §ors) < 0)
return -EINVAL;
if (my_mddev->pers && rdev->raid_disk >= 0) {
if (my_mddev->persistent) {
sectors = super_types[my_mddev->major_version].
rdev_size_change(rdev, sectors);
if (!sectors)
return -EBUSY;
} else if (!sectors)
sectors = (i_size_read(rdev->bdev->bd_inode) >> 9) -
rdev->data_offset;
}
if (sectors < my_mddev->dev_sectors)
return -EINVAL; /* component must fit device */
rdev->sectors = sectors;
if (sectors > oldsectors && my_mddev->external) {
/* need to check that all other rdevs with the same ->bdev
* do not overlap. We need to unlock the mddev to avoid
* a deadlock. We have already changed rdev->sectors, and if
* we have to change it back, we will have the lock again.
*/
mddev_t *mddev;
int overlap = 0;
struct list_head *tmp;
mddev_unlock(my_mddev);
for_each_mddev(mddev, tmp) {
mdk_rdev_t *rdev2;
mddev_lock(mddev);
list_for_each_entry(rdev2, &mddev->disks, same_set)
if (rdev->bdev == rdev2->bdev &&
rdev != rdev2 &&
overlaps(rdev->data_offset, rdev->sectors,
rdev2->data_offset,
rdev2->sectors)) {
overlap = 1;
break;
}
mddev_unlock(mddev);
if (overlap) {
mddev_put(mddev);
break;
}
}
mddev_lock(my_mddev);
if (overlap) {
/* Someone else could have slipped in a size
* change here, but doing so is just silly.
* We put oldsectors back because we *know* it is
* safe, and trust userspace not to race with
* itself
*/
rdev->sectors = oldsectors;
return -EBUSY;
}
}
return len;
}
static struct rdev_sysfs_entry rdev_size =
__ATTR(size, S_IRUGO|S_IWUSR, rdev_size_show, rdev_size_store);
static ssize_t recovery_start_show(mdk_rdev_t *rdev, char *page)
{
unsigned long long recovery_start = rdev->recovery_offset;
if (test_bit(In_sync, &rdev->flags) ||
recovery_start == MaxSector)
return sprintf(page, "none\n");
return sprintf(page, "%llu\n", recovery_start);
}
static ssize_t recovery_start_store(mdk_rdev_t *rdev, const char *buf, size_t len)
{
unsigned long long recovery_start;
if (cmd_match(buf, "none"))
recovery_start = MaxSector;
else if (strict_strtoull(buf, 10, &recovery_start))
return -EINVAL;
if (rdev->mddev->pers &&
rdev->raid_disk >= 0)
return -EBUSY;
rdev->recovery_offset = recovery_start;
if (recovery_start == MaxSector)
set_bit(In_sync, &rdev->flags);
else
clear_bit(In_sync, &rdev->flags);
return len;
}
static struct rdev_sysfs_entry rdev_recovery_start =
__ATTR(recovery_start, S_IRUGO|S_IWUSR, recovery_start_show, recovery_start_store);
static struct attribute *rdev_default_attrs[] = {
&rdev_state.attr,
&rdev_errors.attr,
&rdev_slot.attr,
&rdev_offset.attr,
&rdev_size.attr,
&rdev_recovery_start.attr,
NULL,
};
static ssize_t
rdev_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
{
struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr);
mdk_rdev_t *rdev = container_of(kobj, mdk_rdev_t, kobj);
mddev_t *mddev = rdev->mddev;
ssize_t rv;
if (!entry->show)
return -EIO;
rv = mddev ? mddev_lock(mddev) : -EBUSY;
if (!rv) {
if (rdev->mddev == NULL)
rv = -EBUSY;
else
rv = entry->show(rdev, page);
mddev_unlock(mddev);
}
return rv;
}
static ssize_t
rdev_attr_store(struct kobject *kobj, struct attribute *attr,
const char *page, size_t length)
{
struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr);
mdk_rdev_t *rdev = container_of(kobj, mdk_rdev_t, kobj);
ssize_t rv;
mddev_t *mddev = rdev->mddev;
if (!entry->store)
return -EIO;
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
rv = mddev ? mddev_lock(mddev): -EBUSY;
if (!rv) {
if (rdev->mddev == NULL)
rv = -EBUSY;
else
rv = entry->store(rdev, page, length);
mddev_unlock(mddev);
}
return rv;
}
static void rdev_free(struct kobject *ko)
{
mdk_rdev_t *rdev = container_of(ko, mdk_rdev_t, kobj);
kfree(rdev);
}
static const struct sysfs_ops rdev_sysfs_ops = {
.show = rdev_attr_show,
.store = rdev_attr_store,
};
static struct kobj_type rdev_ktype = {
.release = rdev_free,
.sysfs_ops = &rdev_sysfs_ops,
.default_attrs = rdev_default_attrs,
};
void md_rdev_init(mdk_rdev_t *rdev)
{
rdev->desc_nr = -1;
rdev->saved_raid_disk = -1;
rdev->raid_disk = -1;
rdev->flags = 0;
rdev->data_offset = 0;
rdev->sb_events = 0;
rdev->last_read_error.tv_sec = 0;
rdev->last_read_error.tv_nsec = 0;
atomic_set(&rdev->nr_pending, 0);
atomic_set(&rdev->read_errors, 0);
atomic_set(&rdev->corrected_errors, 0);
INIT_LIST_HEAD(&rdev->same_set);
init_waitqueue_head(&rdev->blocked_wait);
}
EXPORT_SYMBOL_GPL(md_rdev_init);
/*
* Import a device. If 'super_format' >= 0, then sanity check the superblock
*
* mark the device faulty if:
*
* - the device is nonexistent (zero size)
* - the device has no valid superblock
*
* a faulty rdev _never_ has rdev->sb set.
*/
static mdk_rdev_t *md_import_device(dev_t newdev, int super_format, int super_minor)
{
char b[BDEVNAME_SIZE];
int err;
mdk_rdev_t *rdev;
sector_t size;
rdev = kzalloc(sizeof(*rdev), GFP_KERNEL);
if (!rdev) {
printk(KERN_ERR "md: could not alloc mem for new device!\n");
return ERR_PTR(-ENOMEM);
}
md_rdev_init(rdev);
if ((err = alloc_disk_sb(rdev)))
goto abort_free;
err = lock_rdev(rdev, newdev, super_format == -2);
if (err)
goto abort_free;
kobject_init(&rdev->kobj, &rdev_ktype);
size = i_size_read(rdev->bdev->bd_inode) >> BLOCK_SIZE_BITS;
if (!size) {
printk(KERN_WARNING
"md: %s has zero or unknown size, marking faulty!\n",
bdevname(rdev->bdev,b));
err = -EINVAL;
goto abort_free;
}
if (super_format >= 0) {
err = super_types[super_format].
load_super(rdev, NULL, super_minor);
if (err == -EINVAL) {
printk(KERN_WARNING
"md: %s does not have a valid v%d.%d "
"superblock, not importing!\n",
bdevname(rdev->bdev,b),
super_format, super_minor);
goto abort_free;
}
if (err < 0) {
printk(KERN_WARNING
"md: could not read %s's sb, not importing!\n",
bdevname(rdev->bdev,b));
goto abort_free;
}
}
return rdev;
abort_free:
if (rdev->sb_page) {
if (rdev->bdev)
unlock_rdev(rdev);
free_disk_sb(rdev);
}
kfree(rdev);
return ERR_PTR(err);
}
/*
* Check a full RAID array for plausibility
*/
static void analyze_sbs(mddev_t * mddev)
{
int i;
mdk_rdev_t *rdev, *freshest, *tmp;
char b[BDEVNAME_SIZE];
freshest = NULL;
rdev_for_each(rdev, tmp, mddev)
switch (super_types[mddev->major_version].
load_super(rdev, freshest, mddev->minor_version)) {
case 1:
freshest = rdev;
break;
case 0:
break;
default:
printk( KERN_ERR \
"md: fatal superblock inconsistency in %s"
" -- removing from array\n",
bdevname(rdev->bdev,b));
kick_rdev_from_array(rdev);
}
super_types[mddev->major_version].
validate_super(mddev, freshest);
i = 0;
rdev_for_each(rdev, tmp, mddev) {
if (mddev->max_disks &&
(rdev->desc_nr >= mddev->max_disks ||
i > mddev->max_disks)) {
printk(KERN_WARNING
"md: %s: %s: only %d devices permitted\n",
mdname(mddev), bdevname(rdev->bdev, b),
mddev->max_disks);
kick_rdev_from_array(rdev);
continue;
}
if (rdev != freshest)
if (super_types[mddev->major_version].
validate_super(mddev, rdev)) {
printk(KERN_WARNING "md: kicking non-fresh %s"
" from array!\n",
bdevname(rdev->bdev,b));
kick_rdev_from_array(rdev);
continue;
}
if (mddev->level == LEVEL_MULTIPATH) {
rdev->desc_nr = i++;
rdev->raid_disk = rdev->desc_nr;
set_bit(In_sync, &rdev->flags);
} else if (rdev->raid_disk >= (mddev->raid_disks - min(0, mddev->delta_disks))) {
rdev->raid_disk = -1;
clear_bit(In_sync, &rdev->flags);
}
}
}
/* Read a fixed-point number.
* Numbers in sysfs attributes should be in "standard" units where
* possible, so time should be in seconds.
* However we internally use a a much smaller unit such as
* milliseconds or jiffies.
* This function takes a decimal number with a possible fractional
* component, and produces an integer which is the result of
* multiplying that number by 10^'scale'.
* all without any floating-point arithmetic.
*/
int strict_strtoul_scaled(const char *cp, unsigned long *res, int scale)
{
unsigned long result = 0;
long decimals = -1;
while (isdigit(*cp) || (*cp == '.' && decimals < 0)) {
if (*cp == '.')
decimals = 0;
else if (decimals < scale) {
unsigned int value;
value = *cp - '0';
result = result * 10 + value;
if (decimals >= 0)
decimals++;
}
cp++;
}
if (*cp == '\n')
cp++;
if (*cp)
return -EINVAL;
if (decimals < 0)
decimals = 0;
while (decimals < scale) {
result *= 10;
decimals ++;
}
*res = result;
return 0;
}
static void md_safemode_timeout(unsigned long data);
static ssize_t
safe_delay_show(mddev_t *mddev, char *page)
{
int msec = (mddev->safemode_delay*1000)/HZ;
return sprintf(page, "%d.%03d\n", msec/1000, msec%1000);
}
static ssize_t
safe_delay_store(mddev_t *mddev, const char *cbuf, size_t len)
{
unsigned long msec;
if (strict_strtoul_scaled(cbuf, &msec, 3) < 0)
return -EINVAL;
if (msec == 0)
mddev->safemode_delay = 0;
else {
unsigned long old_delay = mddev->safemode_delay;
mddev->safemode_delay = (msec*HZ)/1000;
if (mddev->safemode_delay == 0)
mddev->safemode_delay = 1;
if (mddev->safemode_delay < old_delay)
md_safemode_timeout((unsigned long)mddev);
}
return len;
}
static struct md_sysfs_entry md_safe_delay =
__ATTR(safe_mode_delay, S_IRUGO|S_IWUSR,safe_delay_show, safe_delay_store);
static ssize_t
level_show(mddev_t *mddev, char *page)
{
struct mdk_personality *p = mddev->pers;
if (p)
return sprintf(page, "%s\n", p->name);
else if (mddev->clevel[0])
return sprintf(page, "%s\n", mddev->clevel);
else if (mddev->level != LEVEL_NONE)
return sprintf(page, "%d\n", mddev->level);
else
return 0;
}
static ssize_t
level_store(mddev_t *mddev, const char *buf, size_t len)
{
char clevel[16];
ssize_t rv = len;
struct mdk_personality *pers;
long level;
void *priv;
mdk_rdev_t *rdev;
if (mddev->pers == NULL) {
if (len == 0)
return 0;
if (len >= sizeof(mddev->clevel))
return -ENOSPC;
strncpy(mddev->clevel, buf, len);
if (mddev->clevel[len-1] == '\n')
len--;
mddev->clevel[len] = 0;
mddev->level = LEVEL_NONE;
return rv;
}
/* request to change the personality. Need to ensure:
* - array is not engaged in resync/recovery/reshape
* - old personality can be suspended
* - new personality will access other array.
*/
if (mddev->sync_thread ||
mddev->reshape_position != MaxSector ||
mddev->sysfs_active)
return -EBUSY;
if (!mddev->pers->quiesce) {
printk(KERN_WARNING "md: %s: %s does not support online personality change\n",
mdname(mddev), mddev->pers->name);
return -EINVAL;
}
/* Now find the new personality */
if (len == 0 || len >= sizeof(clevel))
return -EINVAL;
strncpy(clevel, buf, len);
if (clevel[len-1] == '\n')
len--;
clevel[len] = 0;
if (strict_strtol(clevel, 10, &level))
level = LEVEL_NONE;
if (request_module("md-%s", clevel) != 0)
request_module("md-level-%s", clevel);
spin_lock(&pers_lock);
pers = find_pers(level, clevel);
if (!pers || !try_module_get(pers->owner)) {
spin_unlock(&pers_lock);
printk(KERN_WARNING "md: personality %s not loaded\n", clevel);
return -EINVAL;
}
spin_unlock(&pers_lock);
if (pers == mddev->pers) {
/* Nothing to do! */
module_put(pers->owner);
return rv;
}
if (!pers->takeover) {
module_put(pers->owner);
printk(KERN_WARNING "md: %s: %s does not support personality takeover\n",
mdname(mddev), clevel);
return -EINVAL;
}
list_for_each_entry(rdev, &mddev->disks, same_set)
rdev->new_raid_disk = rdev->raid_disk;
/* ->takeover must set new_* and/or delta_disks
* if it succeeds, and may set them when it fails.
*/
priv = pers->takeover(mddev);
if (IS_ERR(priv)) {
mddev->new_level = mddev->level;
mddev->new_layout = mddev->layout;
mddev->new_chunk_sectors = mddev->chunk_sectors;
mddev->raid_disks -= mddev->delta_disks;
mddev->delta_disks = 0;
module_put(pers->owner);
printk(KERN_WARNING "md: %s: %s would not accept array\n",
mdname(mddev), clevel);
return PTR_ERR(priv);
}
/* Looks like we have a winner */
mddev_suspend(mddev);
mddev->pers->stop(mddev);
if (mddev->pers->sync_request == NULL &&
pers->sync_request != NULL) {
/* need to add the md_redundancy_group */
if (sysfs_create_group(&mddev->kobj, &md_redundancy_group))
printk(KERN_WARNING
"md: cannot register extra attributes for %s\n",
mdname(mddev));
mddev->sysfs_action = sysfs_get_dirent(mddev->kobj.sd, NULL, "sync_action");
}
if (mddev->pers->sync_request != NULL &&
pers->sync_request == NULL) {
/* need to remove the md_redundancy_group */
if (mddev->to_remove == NULL)
mddev->to_remove = &md_redundancy_group;
}
if (mddev->pers->sync_request == NULL &&
mddev->external) {
/* We are converting from a no-redundancy array
* to a redundancy array and metadata is managed
* externally so we need to be sure that writes
* won't block due to a need to transition
* clean->dirty
* until external management is started.
*/
mddev->in_sync = 0;
mddev->safemode_delay = 0;
mddev->safemode = 0;
}
list_for_each_entry(rdev, &mddev->disks, same_set) {
char nm[20];
if (rdev->raid_disk < 0)
continue;
if (rdev->new_raid_disk >= mddev->raid_disks)
rdev->new_raid_disk = -1;
if (rdev->new_raid_disk == rdev->raid_disk)
continue;
sprintf(nm, "rd%d", rdev->raid_disk);
sysfs_remove_link(&mddev->kobj, nm);
}
list_for_each_entry(rdev, &mddev->disks, same_set) {
if (rdev->raid_disk < 0)
continue;
if (rdev->new_raid_disk == rdev->raid_disk)
continue;
rdev->raid_disk = rdev->new_raid_disk;
if (rdev->raid_disk < 0)
clear_bit(In_sync, &rdev->flags);
else {
char nm[20];
sprintf(nm, "rd%d", rdev->raid_disk);
if(sysfs_create_link(&mddev->kobj, &rdev->kobj, nm))
printk("md: cannot register %s for %s after level change\n",
nm, mdname(mddev));
}
}
module_put(mddev->pers->owner);
mddev->pers = pers;
mddev->private = priv;
strlcpy(mddev->clevel, pers->name, sizeof(mddev->clevel));
mddev->level = mddev->new_level;
mddev->layout = mddev->new_layout;
mddev->chunk_sectors = mddev->new_chunk_sectors;
mddev->delta_disks = 0;
mddev->degraded = 0;
if (mddev->pers->sync_request == NULL) {
/* this is now an array without redundancy, so
* it must always be in_sync
*/
mddev->in_sync = 1;
del_timer_sync(&mddev->safemode_timer);
}
pers->run(mddev);
mddev_resume(mddev);
set_bit(MD_CHANGE_DEVS, &mddev->flags);
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
md_wakeup_thread(mddev->thread);
sysfs_notify(&mddev->kobj, NULL, "level");
md_new_event(mddev);
return rv;
}
static struct md_sysfs_entry md_level =
__ATTR(level, S_IRUGO|S_IWUSR, level_show, level_store);
static ssize_t
layout_show(mddev_t *mddev, char *page)
{
/* just a number, not meaningful for all levels */
if (mddev->reshape_position != MaxSector &&
mddev->layout != mddev->new_layout)
return sprintf(page, "%d (%d)\n",
mddev->new_layout, mddev->layout);
return sprintf(page, "%d\n", mddev->layout);
}
static ssize_t
layout_store(mddev_t *mddev, const char *buf, size_t len)
{
char *e;
unsigned long n = simple_strtoul(buf, &e, 10);
if (!*buf || (*e && *e != '\n'))
return -EINVAL;
if (mddev->pers) {
int err;
if (mddev->pers->check_reshape == NULL)
return -EBUSY;
mddev->new_layout = n;
err = mddev->pers->check_reshape(mddev);
if (err) {
mddev->new_layout = mddev->layout;
return err;
}
} else {
mddev->new_layout = n;
if (mddev->reshape_position == MaxSector)
mddev->layout = n;
}
return len;
}
static struct md_sysfs_entry md_layout =
__ATTR(layout, S_IRUGO|S_IWUSR, layout_show, layout_store);
static ssize_t
raid_disks_show(mddev_t *mddev, char *page)
{
if (mddev->raid_disks == 0)
return 0;
if (mddev->reshape_position != MaxSector &&
mddev->delta_disks != 0)
return sprintf(page, "%d (%d)\n", mddev->raid_disks,
mddev->raid_disks - mddev->delta_disks);
return sprintf(page, "%d\n", mddev->raid_disks);
}
static int update_raid_disks(mddev_t *mddev, int raid_disks);
static ssize_t
raid_disks_store(mddev_t *mddev, const char *buf, size_t len)
{
char *e;
int rv = 0;
unsigned long n = simple_strtoul(buf, &e, 10);
if (!*buf || (*e && *e != '\n'))
return -EINVAL;
if (mddev->pers)
rv = update_raid_disks(mddev, n);
else if (mddev->reshape_position != MaxSector) {
int olddisks = mddev->raid_disks - mddev->delta_disks;
mddev->delta_disks = n - olddisks;
mddev->raid_disks = n;
} else
mddev->raid_disks = n;
return rv ? rv : len;
}
static struct md_sysfs_entry md_raid_disks =
__ATTR(raid_disks, S_IRUGO|S_IWUSR, raid_disks_show, raid_disks_store);
static ssize_t
chunk_size_show(mddev_t *mddev, char *page)
{
if (mddev->reshape_position != MaxSector &&
mddev->chunk_sectors != mddev->new_chunk_sectors)
return sprintf(page, "%d (%d)\n",
mddev->new_chunk_sectors << 9,
mddev->chunk_sectors << 9);
return sprintf(page, "%d\n", mddev->chunk_sectors << 9);
}
static ssize_t
chunk_size_store(mddev_t *mddev, const char *buf, size_t len)
{
char *e;
unsigned long n = simple_strtoul(buf, &e, 10);
if (!*buf || (*e && *e != '\n'))
return -EINVAL;
if (mddev->pers) {
int err;
if (mddev->pers->check_reshape == NULL)
return -EBUSY;
mddev->new_chunk_sectors = n >> 9;
err = mddev->pers->check_reshape(mddev);
if (err) {
mddev->new_chunk_sectors = mddev->chunk_sectors;
return err;
}
} else {
mddev->new_chunk_sectors = n >> 9;
if (mddev->reshape_position == MaxSector)
mddev->chunk_sectors = n >> 9;
}
return len;
}
static struct md_sysfs_entry md_chunk_size =
__ATTR(chunk_size, S_IRUGO|S_IWUSR, chunk_size_show, chunk_size_store);
static ssize_t
resync_start_show(mddev_t *mddev, char *page)
{
if (mddev->recovery_cp == MaxSector)
return sprintf(page, "none\n");
return sprintf(page, "%llu\n", (unsigned long long)mddev->recovery_cp);
}
static ssize_t
resync_start_store(mddev_t *mddev, const char *buf, size_t len)
{
char *e;
unsigned long long n = simple_strtoull(buf, &e, 10);
if (mddev->pers && !test_bit(MD_RECOVERY_FROZEN, &mddev->recovery))
return -EBUSY;
if (cmd_match(buf, "none"))
n = MaxSector;
else if (!*buf || (*e && *e != '\n'))
return -EINVAL;
mddev->recovery_cp = n;
return len;
}
static struct md_sysfs_entry md_resync_start =
__ATTR(resync_start, S_IRUGO|S_IWUSR, resync_start_show, resync_start_store);
/*
* The array state can be:
*
* clear
* No devices, no size, no level
* Equivalent to STOP_ARRAY ioctl
* inactive
* May have some settings, but array is not active
* all IO results in error
* When written, doesn't tear down array, but just stops it
* suspended (not supported yet)
* All IO requests will block. The array can be reconfigured.
* Writing this, if accepted, will block until array is quiescent
* readonly
* no resync can happen. no superblocks get written.
* write requests fail
* read-auto
* like readonly, but behaves like 'clean' on a write request.
*
* clean - no pending writes, but otherwise active.
* When written to inactive array, starts without resync
* If a write request arrives then
* if metadata is known, mark 'dirty' and switch to 'active'.
* if not known, block and switch to write-pending
* If written to an active array that has pending writes, then fails.
* active
* fully active: IO and resync can be happening.
* When written to inactive array, starts with resync
*
* write-pending
* clean, but writes are blocked waiting for 'active' to be written.
*
* active-idle
* like active, but no writes have been seen for a while (100msec).
*
*/
enum array_state { clear, inactive, suspended, readonly, read_auto, clean, active,
write_pending, active_idle, bad_word};
static char *array_states[] = {
"clear", "inactive", "suspended", "readonly", "read-auto", "clean", "active",
"write-pending", "active-idle", NULL };
static int match_word(const char *word, char **list)
{
int n;
for (n=0; list[n]; n++)
if (cmd_match(word, list[n]))
break;
return n;
}
static ssize_t
array_state_show(mddev_t *mddev, char *page)
{
enum array_state st = inactive;
if (mddev->pers)
switch(mddev->ro) {
case 1:
st = readonly;
break;
case 2:
st = read_auto;
break;
case 0:
if (mddev->in_sync)
st = clean;
else if (test_bit(MD_CHANGE_PENDING, &mddev->flags))
st = write_pending;
else if (mddev->safemode)
st = active_idle;
else
st = active;
}
else {
if (list_empty(&mddev->disks) &&
mddev->raid_disks == 0 &&
mddev->dev_sectors == 0)
st = clear;
else
st = inactive;
}
return sprintf(page, "%s\n", array_states[st]);
}
static int do_md_stop(mddev_t * mddev, int ro, int is_open);
static int md_set_readonly(mddev_t * mddev, int is_open);
static int do_md_run(mddev_t * mddev);
static int restart_array(mddev_t *mddev);
static ssize_t
array_state_store(mddev_t *mddev, const char *buf, size_t len)
{
int err = -EINVAL;
enum array_state st = match_word(buf, array_states);
switch(st) {
case bad_word:
break;
case clear:
/* stopping an active array */
if (atomic_read(&mddev->openers) > 0)
return -EBUSY;
err = do_md_stop(mddev, 0, 0);
break;
case inactive:
/* stopping an active array */
if (mddev->pers) {
if (atomic_read(&mddev->openers) > 0)
return -EBUSY;
err = do_md_stop(mddev, 2, 0);
} else
err = 0; /* already inactive */
break;
case suspended:
break; /* not supported yet */
case readonly:
if (mddev->pers)
err = md_set_readonly(mddev, 0);
else {
mddev->ro = 1;
set_disk_ro(mddev->gendisk, 1);
err = do_md_run(mddev);
}
break;
case read_auto:
if (mddev->pers) {
if (mddev->ro == 0)
err = md_set_readonly(mddev, 0);
else if (mddev->ro == 1)
err = restart_array(mddev);
if (err == 0) {
mddev->ro = 2;
set_disk_ro(mddev->gendisk, 0);
}
} else {
mddev->ro = 2;
err = do_md_run(mddev);
}
break;
case clean:
if (mddev->pers) {
restart_array(mddev);
spin_lock_irq(&mddev->write_lock);
if (atomic_read(&mddev->writes_pending) == 0) {
if (mddev->in_sync == 0) {
mddev->in_sync = 1;
if (mddev->safemode == 1)
mddev->safemode = 0;
set_bit(MD_CHANGE_CLEAN, &mddev->flags);
}
err = 0;
} else
err = -EBUSY;
spin_unlock_irq(&mddev->write_lock);
} else
err = -EINVAL;
break;
case active:
if (mddev->pers) {
restart_array(mddev);
clear_bit(MD_CHANGE_PENDING, &mddev->flags);
wake_up(&mddev->sb_wait);
err = 0;
} else {
mddev->ro = 0;
set_disk_ro(mddev->gendisk, 0);
err = do_md_run(mddev);
}
break;
case write_pending:
case active_idle:
/* these cannot be set */
break;
}
if (err)
return err;
else {
sysfs_notify_dirent_safe(mddev->sysfs_state);
return len;
}
}
static struct md_sysfs_entry md_array_state =
__ATTR(array_state, S_IRUGO|S_IWUSR, array_state_show, array_state_store);
static ssize_t
max_corrected_read_errors_show(mddev_t *mddev, char *page) {
return sprintf(page, "%d\n",
atomic_read(&mddev->max_corr_read_errors));
}
static ssize_t
max_corrected_read_errors_store(mddev_t *mddev, const char *buf, size_t len)
{
char *e;
unsigned long n = simple_strtoul(buf, &e, 10);
if (*buf && (*e == 0 || *e == '\n')) {
atomic_set(&mddev->max_corr_read_errors, n);
return len;
}
return -EINVAL;
}
static struct md_sysfs_entry max_corr_read_errors =
__ATTR(max_read_errors, S_IRUGO|S_IWUSR, max_corrected_read_errors_show,
max_corrected_read_errors_store);
static ssize_t
null_show(mddev_t *mddev, char *page)
{
return -EINVAL;
}
static ssize_t
new_dev_store(mddev_t *mddev, const char *buf, size_t len)
{
/* buf must be %d:%d\n? giving major and minor numbers */
/* The new device is added to the array.
* If the array has a persistent superblock, we read the
* superblock to initialise info and check validity.
* Otherwise, only checking done is that in bind_rdev_to_array,
* which mainly checks size.
*/
char *e;
int major = simple_strtoul(buf, &e, 10);
int minor;
dev_t dev;
mdk_rdev_t *rdev;
int err;
if (!*buf || *e != ':' || !e[1] || e[1] == '\n')
return -EINVAL;
minor = simple_strtoul(e+1, &e, 10);
if (*e && *e != '\n')
return -EINVAL;
dev = MKDEV(major, minor);
if (major != MAJOR(dev) ||
minor != MINOR(dev))
return -EOVERFLOW;
if (mddev->persistent) {
rdev = md_import_device(dev, mddev->major_version,
mddev->minor_version);
if (!IS_ERR(rdev) && !list_empty(&mddev->disks)) {
mdk_rdev_t *rdev0 = list_entry(mddev->disks.next,
mdk_rdev_t, same_set);
err = super_types[mddev->major_version]
.load_super(rdev, rdev0, mddev->minor_version);
if (err < 0)
goto out;
}
} else if (mddev->external)
rdev = md_import_device(dev, -2, -1);
else
rdev = md_import_device(dev, -1, -1);
if (IS_ERR(rdev))
return PTR_ERR(rdev);
err = bind_rdev_to_array(rdev, mddev);
out:
if (err)
export_rdev(rdev);
return err ? err : len;
}
static struct md_sysfs_entry md_new_device =
__ATTR(new_dev, S_IWUSR, null_show, new_dev_store);
static ssize_t
bitmap_store(mddev_t *mddev, const char *buf, size_t len)
{
char *end;
unsigned long chunk, end_chunk;
if (!mddev->bitmap)
goto out;
/* buf should be <chunk> <chunk> ... or <chunk>-<chunk> ... (range) */
while (*buf) {
chunk = end_chunk = simple_strtoul(buf, &end, 0);
if (buf == end) break;
if (*end == '-') { /* range */
buf = end + 1;
end_chunk = simple_strtoul(buf, &end, 0);
if (buf == end) break;
}
if (*end && !isspace(*end)) break;
bitmap_dirty_bits(mddev->bitmap, chunk, end_chunk);
buf = skip_spaces(end);
}
bitmap_unplug(mddev->bitmap); /* flush the bits to disk */
out:
return len;
}
static struct md_sysfs_entry md_bitmap =
__ATTR(bitmap_set_bits, S_IWUSR, null_show, bitmap_store);
static ssize_t
size_show(mddev_t *mddev, char *page)
{
return sprintf(page, "%llu\n",
(unsigned long long)mddev->dev_sectors / 2);
}
static int update_size(mddev_t *mddev, sector_t num_sectors);
static ssize_t
size_store(mddev_t *mddev, const char *buf, size_t len)
{
/* If array is inactive, we can reduce the component size, but
* not increase it (except from 0).
* If array is active, we can try an on-line resize
*/
sector_t sectors;
int err = strict_blocks_to_sectors(buf, §ors);
if (err < 0)
return err;
if (mddev->pers) {
err = update_size(mddev, sectors);
md_update_sb(mddev, 1);
} else {
if (mddev->dev_sectors == 0 ||
mddev->dev_sectors > sectors)
mddev->dev_sectors = sectors;
else
err = -ENOSPC;
}
return err ? err : len;
}
static struct md_sysfs_entry md_size =
__ATTR(component_size, S_IRUGO|S_IWUSR, size_show, size_store);
/* Metdata version.
* This is one of
* 'none' for arrays with no metadata (good luck...)
* 'external' for arrays with externally managed metadata,
* or N.M for internally known formats
*/
static ssize_t
metadata_show(mddev_t *mddev, char *page)
{
if (mddev->persistent)
return sprintf(page, "%d.%d\n",
mddev->major_version, mddev->minor_version);
else if (mddev->external)
return sprintf(page, "external:%s\n", mddev->metadata_type);
else
return sprintf(page, "none\n");
}
static ssize_t
metadata_store(mddev_t *mddev, const char *buf, size_t len)
{
int major, minor;
char *e;
/* Changing the details of 'external' metadata is
* always permitted. Otherwise there must be
* no devices attached to the array.
*/
if (mddev->external && strncmp(buf, "external:", 9) == 0)
;
else if (!list_empty(&mddev->disks))
return -EBUSY;
if (cmd_match(buf, "none")) {
mddev->persistent = 0;
mddev->external = 0;
mddev->major_version = 0;
mddev->minor_version = 90;
return len;
}
if (strncmp(buf, "external:", 9) == 0) {
size_t namelen = len-9;
if (namelen >= sizeof(mddev->metadata_type))
namelen = sizeof(mddev->metadata_type)-1;
strncpy(mddev->metadata_type, buf+9, namelen);
mddev->metadata_type[namelen] = 0;
if (namelen && mddev->metadata_type[namelen-1] == '\n')
mddev->metadata_type[--namelen] = 0;
mddev->persistent = 0;
mddev->external = 1;
mddev->major_version = 0;
mddev->minor_version = 90;
return len;
}
major = simple_strtoul(buf, &e, 10);
if (e==buf || *e != '.')
return -EINVAL;
buf = e+1;
minor = simple_strtoul(buf, &e, 10);
if (e==buf || (*e && *e != '\n') )
return -EINVAL;
if (major >= ARRAY_SIZE(super_types) || super_types[major].name == NULL)
return -ENOENT;
mddev->major_version = major;
mddev->minor_version = minor;
mddev->persistent = 1;
mddev->external = 0;
return len;
}
static struct md_sysfs_entry md_metadata =
__ATTR(metadata_version, S_IRUGO|S_IWUSR, metadata_show, metadata_store);
static ssize_t
action_show(mddev_t *mddev, char *page)
{
char *type = "idle";
if (test_bit(MD_RECOVERY_FROZEN, &mddev->recovery))
type = "frozen";
else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
(!mddev->ro && test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))) {
if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
type = "reshape";
else if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
if (!test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
type = "resync";
else if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery))
type = "check";
else
type = "repair";
} else if (test_bit(MD_RECOVERY_RECOVER, &mddev->recovery))
type = "recover";
}
return sprintf(page, "%s\n", type);
}
static void reap_sync_thread(mddev_t *mddev);
static ssize_t
action_store(mddev_t *mddev, const char *page, size_t len)
{
if (!mddev->pers || !mddev->pers->sync_request)
return -EINVAL;
if (cmd_match(page, "frozen"))
set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
else
clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
if (cmd_match(page, "idle") || cmd_match(page, "frozen")) {
if (mddev->sync_thread) {
set_bit(MD_RECOVERY_INTR, &mddev->recovery);
reap_sync_thread(mddev);
}
} else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))
return -EBUSY;
else if (cmd_match(page, "resync"))
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
else if (cmd_match(page, "recover")) {
set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
} else if (cmd_match(page, "reshape")) {
int err;
if (mddev->pers->start_reshape == NULL)
return -EINVAL;
err = mddev->pers->start_reshape(mddev);
if (err)
return err;
sysfs_notify(&mddev->kobj, NULL, "degraded");
} else {
if (cmd_match(page, "check"))
set_bit(MD_RECOVERY_CHECK, &mddev->recovery);
else if (!cmd_match(page, "repair"))
return -EINVAL;
set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
}
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
md_wakeup_thread(mddev->thread);
sysfs_notify_dirent_safe(mddev->sysfs_action);
return len;
}
static ssize_t
mismatch_cnt_show(mddev_t *mddev, char *page)
{
return sprintf(page, "%llu\n",
(unsigned long long) mddev->resync_mismatches);
}
static struct md_sysfs_entry md_scan_mode =
__ATTR(sync_action, S_IRUGO|S_IWUSR, action_show, action_store);
static struct md_sysfs_entry md_mismatches = __ATTR_RO(mismatch_cnt);
static ssize_t
sync_min_show(mddev_t *mddev, char *page)
{
return sprintf(page, "%d (%s)\n", speed_min(mddev),
mddev->sync_speed_min ? "local": "system");
}
static ssize_t
sync_min_store(mddev_t *mddev, const char *buf, size_t len)
{
int min;
char *e;
if (strncmp(buf, "system", 6)==0) {
mddev->sync_speed_min = 0;
return len;
}
min = simple_strtoul(buf, &e, 10);
if (buf == e || (*e && *e != '\n') || min <= 0)
return -EINVAL;
mddev->sync_speed_min = min;
return len;
}
static struct md_sysfs_entry md_sync_min =
__ATTR(sync_speed_min, S_IRUGO|S_IWUSR, sync_min_show, sync_min_store);
static ssize_t
sync_max_show(mddev_t *mddev, char *page)
{
return sprintf(page, "%d (%s)\n", speed_max(mddev),
mddev->sync_speed_max ? "local": "system");
}
static ssize_t
sync_max_store(mddev_t *mddev, const char *buf, size_t len)
{
int max;
char *e;
if (strncmp(buf, "system", 6)==0) {
mddev->sync_speed_max = 0;
return len;
}
max = simple_strtoul(buf, &e, 10);
if (buf == e || (*e && *e != '\n') || max <= 0)
return -EINVAL;
mddev->sync_speed_max = max;
return len;
}
static struct md_sysfs_entry md_sync_max =
__ATTR(sync_speed_max, S_IRUGO|S_IWUSR, sync_max_show, sync_max_store);
static ssize_t
degraded_show(mddev_t *mddev, char *page)
{
return sprintf(page, "%d\n", mddev->degraded);
}
static struct md_sysfs_entry md_degraded = __ATTR_RO(degraded);
static ssize_t
sync_force_parallel_show(mddev_t *mddev, char *page)
{
return sprintf(page, "%d\n", mddev->parallel_resync);
}
static ssize_t
sync_force_parallel_store(mddev_t *mddev, const char *buf, size_t len)
{
long n;
if (strict_strtol(buf, 10, &n))
return -EINVAL;
if (n != 0 && n != 1)
return -EINVAL;
mddev->parallel_resync = n;
if (mddev->sync_thread)
wake_up(&resync_wait);
return len;
}
/* force parallel resync, even with shared block devices */
static struct md_sysfs_entry md_sync_force_parallel =
__ATTR(sync_force_parallel, S_IRUGO|S_IWUSR,
sync_force_parallel_show, sync_force_parallel_store);
static ssize_t
sync_speed_show(mddev_t *mddev, char *page)
{
unsigned long resync, dt, db;
if (mddev->curr_resync == 0)
return sprintf(page, "none\n");
resync = mddev->curr_mark_cnt - atomic_read(&mddev->recovery_active);
dt = (jiffies - mddev->resync_mark) / HZ;
if (!dt) dt++;
db = resync - mddev->resync_mark_cnt;
return sprintf(page, "%lu\n", db/dt/2); /* K/sec */
}
static struct md_sysfs_entry md_sync_speed = __ATTR_RO(sync_speed);
static ssize_t
sync_completed_show(mddev_t *mddev, char *page)
{
unsigned long long max_sectors, resync;
if (!test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
return sprintf(page, "none\n");
if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
max_sectors = mddev->resync_max_sectors;
else
max_sectors = mddev->dev_sectors;
resync = mddev->curr_resync_completed;
return sprintf(page, "%llu / %llu\n", resync, max_sectors);
}
static struct md_sysfs_entry md_sync_completed = __ATTR_RO(sync_completed);
static ssize_t
min_sync_show(mddev_t *mddev, char *page)
{
return sprintf(page, "%llu\n",
(unsigned long long)mddev->resync_min);
}
static ssize_t
min_sync_store(mddev_t *mddev, const char *buf, size_t len)
{
unsigned long long min;
if (strict_strtoull(buf, 10, &min))
return -EINVAL;
if (min > mddev->resync_max)
return -EINVAL;
if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
return -EBUSY;
/* Must be a multiple of chunk_size */
if (mddev->chunk_sectors) {
sector_t temp = min;
if (sector_div(temp, mddev->chunk_sectors))
return -EINVAL;
}
mddev->resync_min = min;
return len;
}
static struct md_sysfs_entry md_min_sync =
__ATTR(sync_min, S_IRUGO|S_IWUSR, min_sync_show, min_sync_store);
static ssize_t
max_sync_show(mddev_t *mddev, char *page)
{
if (mddev->resync_max == MaxSector)
return sprintf(page, "max\n");
else
return sprintf(page, "%llu\n",
(unsigned long long)mddev->resync_max);
}
static ssize_t
max_sync_store(mddev_t *mddev, const char *buf, size_t len)
{
if (strncmp(buf, "max", 3) == 0)
mddev->resync_max = MaxSector;
else {
unsigned long long max;
if (strict_strtoull(buf, 10, &max))
return -EINVAL;
if (max < mddev->resync_min)
return -EINVAL;
if (max < mddev->resync_max &&
mddev->ro == 0 &&
test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
return -EBUSY;
/* Must be a multiple of chunk_size */
if (mddev->chunk_sectors) {
sector_t temp = max;
if (sector_div(temp, mddev->chunk_sectors))
return -EINVAL;
}
mddev->resync_max = max;
}
wake_up(&mddev->recovery_wait);
return len;
}
static struct md_sysfs_entry md_max_sync =
__ATTR(sync_max, S_IRUGO|S_IWUSR, max_sync_show, max_sync_store);
static ssize_t
suspend_lo_show(mddev_t *mddev, char *page)
{
return sprintf(page, "%llu\n", (unsigned long long)mddev->suspend_lo);
}
static ssize_t
suspend_lo_store(mddev_t *mddev, const char *buf, size_t len)
{
char *e;
unsigned long long new = simple_strtoull(buf, &e, 10);
unsigned long long old = mddev->suspend_lo;
if (mddev->pers == NULL ||
mddev->pers->quiesce == NULL)
return -EINVAL;
if (buf == e || (*e && *e != '\n'))
return -EINVAL;
mddev->suspend_lo = new;
if (new >= old)
/* Shrinking suspended region */
mddev->pers->quiesce(mddev, 2);
else {
/* Expanding suspended region - need to wait */
mddev->pers->quiesce(mddev, 1);
mddev->pers->quiesce(mddev, 0);
}
return len;
}
static struct md_sysfs_entry md_suspend_lo =
__ATTR(suspend_lo, S_IRUGO|S_IWUSR, suspend_lo_show, suspend_lo_store);
static ssize_t
suspend_hi_show(mddev_t *mddev, char *page)
{
return sprintf(page, "%llu\n", (unsigned long long)mddev->suspend_hi);
}
static ssize_t
suspend_hi_store(mddev_t *mddev, const char *buf, size_t len)
{
char *e;
unsigned long long new = simple_strtoull(buf, &e, 10);
unsigned long long old = mddev->suspend_hi;
if (mddev->pers == NULL ||
mddev->pers->quiesce == NULL)
return -EINVAL;
if (buf == e || (*e && *e != '\n'))
return -EINVAL;
mddev->suspend_hi = new;
if (new <= old)
/* Shrinking suspended region */
mddev->pers->quiesce(mddev, 2);
else {
/* Expanding suspended region - need to wait */
mddev->pers->quiesce(mddev, 1);
mddev->pers->quiesce(mddev, 0);
}
return len;
}
static struct md_sysfs_entry md_suspend_hi =
__ATTR(suspend_hi, S_IRUGO|S_IWUSR, suspend_hi_show, suspend_hi_store);
static ssize_t
reshape_position_show(mddev_t *mddev, char *page)
{
if (mddev->reshape_position != MaxSector)
return sprintf(page, "%llu\n",
(unsigned long long)mddev->reshape_position);
strcpy(page, "none\n");
return 5;
}
static ssize_t
reshape_position_store(mddev_t *mddev, const char *buf, size_t len)
{
char *e;
unsigned long long new = simple_strtoull(buf, &e, 10);
if (mddev->pers)
return -EBUSY;
if (buf == e || (*e && *e != '\n'))
return -EINVAL;
mddev->reshape_position = new;
mddev->delta_disks = 0;
mddev->new_level = mddev->level;
mddev->new_layout = mddev->layout;
mddev->new_chunk_sectors = mddev->chunk_sectors;
return len;
}
static struct md_sysfs_entry md_reshape_position =
__ATTR(reshape_position, S_IRUGO|S_IWUSR, reshape_position_show,
reshape_position_store);
static ssize_t
array_size_show(mddev_t *mddev, char *page)
{
if (mddev->external_size)
return sprintf(page, "%llu\n",
(unsigned long long)mddev->array_sectors/2);
else
return sprintf(page, "default\n");
}
static ssize_t
array_size_store(mddev_t *mddev, const char *buf, size_t len)
{
sector_t sectors;
if (strncmp(buf, "default", 7) == 0) {
if (mddev->pers)
sectors = mddev->pers->size(mddev, 0, 0);
else
sectors = mddev->array_sectors;
mddev->external_size = 0;
} else {
if (strict_blocks_to_sectors(buf, §ors) < 0)
return -EINVAL;
if (mddev->pers && mddev->pers->size(mddev, 0, 0) < sectors)
return -E2BIG;
mddev->external_size = 1;
}
mddev->array_sectors = sectors;
if (mddev->pers) {
set_capacity(mddev->gendisk, mddev->array_sectors);
revalidate_disk(mddev->gendisk);
}
return len;
}
static struct md_sysfs_entry md_array_size =
__ATTR(array_size, S_IRUGO|S_IWUSR, array_size_show,
array_size_store);
static struct attribute *md_default_attrs[] = {
&md_level.attr,
&md_layout.attr,
&md_raid_disks.attr,
&md_chunk_size.attr,
&md_size.attr,
&md_resync_start.attr,
&md_metadata.attr,
&md_new_device.attr,
&md_safe_delay.attr,
&md_array_state.attr,
&md_reshape_position.attr,
&md_array_size.attr,
&max_corr_read_errors.attr,
NULL,
};
static struct attribute *md_redundancy_attrs[] = {
&md_scan_mode.attr,
&md_mismatches.attr,
&md_sync_min.attr,
&md_sync_max.attr,
&md_sync_speed.attr,
&md_sync_force_parallel.attr,
&md_sync_completed.attr,
&md_min_sync.attr,
&md_max_sync.attr,
&md_suspend_lo.attr,
&md_suspend_hi.attr,
&md_bitmap.attr,
&md_degraded.attr,
NULL,
};
static struct attribute_group md_redundancy_group = {
.name = NULL,
.attrs = md_redundancy_attrs,
};
static ssize_t
md_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
{
struct md_sysfs_entry *entry = container_of(attr, struct md_sysfs_entry, attr);
mddev_t *mddev = container_of(kobj, struct mddev_s, kobj);
ssize_t rv;
if (!entry->show)
return -EIO;
rv = mddev_lock(mddev);
if (!rv) {
rv = entry->show(mddev, page);
mddev_unlock(mddev);
}
return rv;
}
static ssize_t
md_attr_store(struct kobject *kobj, struct attribute *attr,
const char *page, size_t length)
{
struct md_sysfs_entry *entry = container_of(attr, struct md_sysfs_entry, attr);
mddev_t *mddev = container_of(kobj, struct mddev_s, kobj);
ssize_t rv;
if (!entry->store)
return -EIO;
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
rv = mddev_lock(mddev);
if (mddev->hold_active == UNTIL_IOCTL)
mddev->hold_active = 0;
if (!rv) {
rv = entry->store(mddev, page, length);
mddev_unlock(mddev);
}
return rv;
}
static void md_free(struct kobject *ko)
{
mddev_t *mddev = container_of(ko, mddev_t, kobj);
if (mddev->sysfs_state)
sysfs_put(mddev->sysfs_state);
if (mddev->gendisk) {
del_gendisk(mddev->gendisk);
put_disk(mddev->gendisk);
}
if (mddev->queue)
blk_cleanup_queue(mddev->queue);
kfree(mddev);
}
static const struct sysfs_ops md_sysfs_ops = {
.show = md_attr_show,
.store = md_attr_store,
};
static struct kobj_type md_ktype = {
.release = md_free,
.sysfs_ops = &md_sysfs_ops,
.default_attrs = md_default_attrs,
};
int mdp_major = 0;
static void mddev_delayed_delete(struct work_struct *ws)
{
mddev_t *mddev = container_of(ws, mddev_t, del_work);
sysfs_remove_group(&mddev->kobj, &md_bitmap_group);
kobject_del(&mddev->kobj);
kobject_put(&mddev->kobj);
}
static int md_alloc(dev_t dev, char *name)
{
static DEFINE_MUTEX(disks_mutex);
mddev_t *mddev = mddev_find(dev);
struct gendisk *disk;
int partitioned;
int shift;
int unit;
int error;
if (!mddev)
return -ENODEV;
partitioned = (MAJOR(mddev->unit) != MD_MAJOR);
shift = partitioned ? MdpMinorShift : 0;
unit = MINOR(mddev->unit) >> shift;
/* wait for any previous instance of this device to be
* completely removed (mddev_delayed_delete).
*/
flush_workqueue(md_misc_wq);
mutex_lock(&disks_mutex);
error = -EEXIST;
if (mddev->gendisk)
goto abort;
if (name) {
/* Need to ensure that 'name' is not a duplicate.
*/
mddev_t *mddev2;
spin_lock(&all_mddevs_lock);
list_for_each_entry(mddev2, &all_mddevs, all_mddevs)
if (mddev2->gendisk &&
strcmp(mddev2->gendisk->disk_name, name) == 0) {
spin_unlock(&all_mddevs_lock);
goto abort;
}
spin_unlock(&all_mddevs_lock);
}
error = -ENOMEM;
mddev->queue = blk_alloc_queue(GFP_KERNEL);
if (!mddev->queue)
goto abort;
mddev->queue->queuedata = mddev;
blk_queue_make_request(mddev->queue, md_make_request);
disk = alloc_disk(1 << shift);
if (!disk) {
blk_cleanup_queue(mddev->queue);
mddev->queue = NULL;
goto abort;
}
disk->major = MAJOR(mddev->unit);
disk->first_minor = unit << shift;
if (name)
strcpy(disk->disk_name, name);
else if (partitioned)
sprintf(disk->disk_name, "md_d%d", unit);
else
sprintf(disk->disk_name, "md%d", unit);
disk->fops = &md_fops;
disk->private_data = mddev;
disk->queue = mddev->queue;
blk_queue_flush(mddev->queue, REQ_FLUSH | REQ_FUA);
/* Allow extended partitions. This makes the
* 'mdp' device redundant, but we can't really
* remove it now.
*/
disk->flags |= GENHD_FL_EXT_DEVT;
mddev->gendisk = disk;
/* As soon as we call add_disk(), another thread could get
* through to md_open, so make sure it doesn't get too far
*/
mutex_lock(&mddev->open_mutex);
add_disk(disk);
error = kobject_init_and_add(&mddev->kobj, &md_ktype,
&disk_to_dev(disk)->kobj, "%s", "md");
if (error) {
/* This isn't possible, but as kobject_init_and_add is marked
* __must_check, we must do something with the result
*/
printk(KERN_WARNING "md: cannot register %s/md - name in use\n",
disk->disk_name);
error = 0;
}
if (mddev->kobj.sd &&
sysfs_create_group(&mddev->kobj, &md_bitmap_group))
printk(KERN_DEBUG "pointless warning\n");
mutex_unlock(&mddev->open_mutex);
abort:
mutex_unlock(&disks_mutex);
if (!error && mddev->kobj.sd) {
kobject_uevent(&mddev->kobj, KOBJ_ADD);
mddev->sysfs_state = sysfs_get_dirent_safe(mddev->kobj.sd, "array_state");
}
mddev_put(mddev);
return error;
}
static struct kobject *md_probe(dev_t dev, int *part, void *data)
{
md_alloc(dev, NULL);
return NULL;
}
static int add_named_array(const char *val, struct kernel_param *kp)
{
/* val must be "md_*" where * is not all digits.
* We allocate an array with a large free minor number, and
* set the name to val. val must not already be an active name.
*/
int len = strlen(val);
char buf[DISK_NAME_LEN];
while (len && val[len-1] == '\n')
len--;
if (len >= DISK_NAME_LEN)
return -E2BIG;
strlcpy(buf, val, len+1);
if (strncmp(buf, "md_", 3) != 0)
return -EINVAL;
return md_alloc(0, buf);
}
static void md_safemode_timeout(unsigned long data)
{
mddev_t *mddev = (mddev_t *) data;
if (!atomic_read(&mddev->writes_pending)) {
mddev->safemode = 1;
if (mddev->external)
sysfs_notify_dirent_safe(mddev->sysfs_state);
}
md_wakeup_thread(mddev->thread);
}
static int start_dirty_degraded;
int md_run(mddev_t *mddev)
{
int err;
mdk_rdev_t *rdev;
struct mdk_personality *pers;
if (list_empty(&mddev->disks))
/* cannot run an array with no devices.. */
return -EINVAL;
if (mddev->pers)
return -EBUSY;
/* Cannot run until previous stop completes properly */
if (mddev->sysfs_active)
return -EBUSY;
/*
* Analyze all RAID superblock(s)
*/
if (!mddev->raid_disks) {
if (!mddev->persistent)
return -EINVAL;
analyze_sbs(mddev);
}
if (mddev->level != LEVEL_NONE)
request_module("md-level-%d", mddev->level);
else if (mddev->clevel[0])
request_module("md-%s", mddev->clevel);
/*
* Drop all container device buffers, from now on
* the only valid external interface is through the md
* device.
*/
list_for_each_entry(rdev, &mddev->disks, same_set) {
if (test_bit(Faulty, &rdev->flags))
continue;
sync_blockdev(rdev->bdev);
invalidate_bdev(rdev->bdev);
/* perform some consistency tests on the device.
* We don't want the data to overlap the metadata,
* Internal Bitmap issues have been handled elsewhere.
*/
if (rdev->meta_bdev) {
/* Nothing to check */;
} else if (rdev->data_offset < rdev->sb_start) {
if (mddev->dev_sectors &&
rdev->data_offset + mddev->dev_sectors
> rdev->sb_start) {
printk("md: %s: data overlaps metadata\n",
mdname(mddev));
return -EINVAL;
}
} else {
if (rdev->sb_start + rdev->sb_size/512
> rdev->data_offset) {
printk("md: %s: metadata overlaps data\n",
mdname(mddev));
return -EINVAL;
}
}
sysfs_notify_dirent_safe(rdev->sysfs_state);
}
if (mddev->bio_set == NULL)
mddev->bio_set = bioset_create(BIO_POOL_SIZE, sizeof(mddev));
spin_lock(&pers_lock);
pers = find_pers(mddev->level, mddev->clevel);
if (!pers || !try_module_get(pers->owner)) {
spin_unlock(&pers_lock);
if (mddev->level != LEVEL_NONE)
printk(KERN_WARNING "md: personality for level %d is not loaded!\n",
mddev->level);
else
printk(KERN_WARNING "md: personality for level %s is not loaded!\n",
mddev->clevel);
return -EINVAL;
}
mddev->pers = pers;
spin_unlock(&pers_lock);
if (mddev->level != pers->level) {
mddev->level = pers->level;
mddev->new_level = pers->level;
}
strlcpy(mddev->clevel, pers->name, sizeof(mddev->clevel));
if (mddev->reshape_position != MaxSector &&
pers->start_reshape == NULL) {
/* This personality cannot handle reshaping... */
mddev->pers = NULL;
module_put(pers->owner);
return -EINVAL;
}
if (pers->sync_request) {
/* Warn if this is a potentially silly
* configuration.
*/
char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
mdk_rdev_t *rdev2;
int warned = 0;
list_for_each_entry(rdev, &mddev->disks, same_set)
list_for_each_entry(rdev2, &mddev->disks, same_set) {
if (rdev < rdev2 &&
rdev->bdev->bd_contains ==
rdev2->bdev->bd_contains) {
printk(KERN_WARNING
"%s: WARNING: %s appears to be"
" on the same physical disk as"
" %s.\n",
mdname(mddev),
bdevname(rdev->bdev,b),
bdevname(rdev2->bdev,b2));
warned = 1;
}
}
if (warned)
printk(KERN_WARNING
"True protection against single-disk"
" failure might be compromised.\n");
}
mddev->recovery = 0;
/* may be over-ridden by personality */
mddev->resync_max_sectors = mddev->dev_sectors;
mddev->ok_start_degraded = start_dirty_degraded;
if (start_readonly && mddev->ro == 0)
mddev->ro = 2; /* read-only, but switch on first write */
err = mddev->pers->run(mddev);
if (err)
printk(KERN_ERR "md: pers->run() failed ...\n");
else if (mddev->pers->size(mddev, 0, 0) < mddev->array_sectors) {
WARN_ONCE(!mddev->external_size, "%s: default size too small,"
" but 'external_size' not in effect?\n", __func__);
printk(KERN_ERR
"md: invalid array_size %llu > default size %llu\n",
(unsigned long long)mddev->array_sectors / 2,
(unsigned long long)mddev->pers->size(mddev, 0, 0) / 2);
err = -EINVAL;
mddev->pers->stop(mddev);
}
if (err == 0 && mddev->pers->sync_request) {
err = bitmap_create(mddev);
if (err) {
printk(KERN_ERR "%s: failed to create bitmap (%d)\n",
mdname(mddev), err);
mddev->pers->stop(mddev);
}
}
if (err) {
module_put(mddev->pers->owner);
mddev->pers = NULL;
bitmap_destroy(mddev);
return err;
}
if (mddev->pers->sync_request) {
if (mddev->kobj.sd &&
sysfs_create_group(&mddev->kobj, &md_redundancy_group))
printk(KERN_WARNING
"md: cannot register extra attributes for %s\n",
mdname(mddev));
mddev->sysfs_action = sysfs_get_dirent_safe(mddev->kobj.sd, "sync_action");
} else if (mddev->ro == 2) /* auto-readonly not meaningful */
mddev->ro = 0;
atomic_set(&mddev->writes_pending,0);
atomic_set(&mddev->max_corr_read_errors,
MD_DEFAULT_MAX_CORRECTED_READ_ERRORS);
mddev->safemode = 0;
mddev->safemode_timer.function = md_safemode_timeout;
mddev->safemode_timer.data = (unsigned long) mddev;
mddev->safemode_delay = (200 * HZ)/1000 +1; /* 200 msec delay */
mddev->in_sync = 1;
smp_wmb();
mddev->ready = 1;
list_for_each_entry(rdev, &mddev->disks, same_set)
if (rdev->raid_disk >= 0) {
char nm[20];
sprintf(nm, "rd%d", rdev->raid_disk);
if (sysfs_create_link(&mddev->kobj, &rdev->kobj, nm))
/* failure here is OK */;
}
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
if (mddev->flags)
md_update_sb(mddev, 0);
md_new_event(mddev);
sysfs_notify_dirent_safe(mddev->sysfs_state);
sysfs_notify_dirent_safe(mddev->sysfs_action);
sysfs_notify(&mddev->kobj, NULL, "degraded");
return 0;
}
EXPORT_SYMBOL_GPL(md_run);
static int do_md_run(mddev_t *mddev)
{
int err;
err = md_run(mddev);
if (err)
goto out;
err = bitmap_load(mddev);
if (err) {
bitmap_destroy(mddev);
goto out;
}
md_wakeup_thread(mddev->thread);
md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */
set_capacity(mddev->gendisk, mddev->array_sectors);
revalidate_disk(mddev->gendisk);
mddev->changed = 1;
kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE);
out:
return err;
}
static int restart_array(mddev_t *mddev)
{
struct gendisk *disk = mddev->gendisk;
/* Complain if it has no devices */
if (list_empty(&mddev->disks))
return -ENXIO;
if (!mddev->pers)
return -EINVAL;
if (!mddev->ro)
return -EBUSY;
mddev->safemode = 0;
mddev->ro = 0;
set_disk_ro(disk, 0);
printk(KERN_INFO "md: %s switched to read-write mode.\n",
mdname(mddev));
/* Kick recovery or resync if necessary */
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
md_wakeup_thread(mddev->thread);
md_wakeup_thread(mddev->sync_thread);
sysfs_notify_dirent_safe(mddev->sysfs_state);
return 0;
}
/* similar to deny_write_access, but accounts for our holding a reference
* to the file ourselves */
static int deny_bitmap_write_access(struct file * file)
{
struct inode *inode = file->f_mapping->host;
spin_lock(&inode->i_lock);
if (atomic_read(&inode->i_writecount) > 1) {
spin_unlock(&inode->i_lock);
return -ETXTBSY;
}
atomic_set(&inode->i_writecount, -1);
spin_unlock(&inode->i_lock);
return 0;
}
void restore_bitmap_write_access(struct file *file)
{
struct inode *inode = file->f_mapping->host;
spin_lock(&inode->i_lock);
atomic_set(&inode->i_writecount, 1);
spin_unlock(&inode->i_lock);
}
static void md_clean(mddev_t *mddev)
{
mddev->array_sectors = 0;
mddev->external_size = 0;
mddev->dev_sectors = 0;
mddev->raid_disks = 0;
mddev->recovery_cp = 0;
mddev->resync_min = 0;
mddev->resync_max = MaxSector;
mddev->reshape_position = MaxSector;
mddev->external = 0;
mddev->persistent = 0;
mddev->level = LEVEL_NONE;
mddev->clevel[0] = 0;
mddev->flags = 0;
mddev->ro = 0;
mddev->metadata_type[0] = 0;
mddev->chunk_sectors = 0;
mddev->ctime = mddev->utime = 0;
mddev->layout = 0;
mddev->max_disks = 0;
mddev->events = 0;
mddev->can_decrease_events = 0;
mddev->delta_disks = 0;
mddev->new_level = LEVEL_NONE;
mddev->new_layout = 0;
mddev->new_chunk_sectors = 0;
mddev->curr_resync = 0;
mddev->resync_mismatches = 0;
mddev->suspend_lo = mddev->suspend_hi = 0;
mddev->sync_speed_min = mddev->sync_speed_max = 0;
mddev->recovery = 0;
mddev->in_sync = 0;
mddev->changed = 0;
mddev->degraded = 0;
mddev->safemode = 0;
mddev->bitmap_info.offset = 0;
mddev->bitmap_info.default_offset = 0;
mddev->bitmap_info.chunksize = 0;
mddev->bitmap_info.daemon_sleep = 0;
mddev->bitmap_info.max_write_behind = 0;
}
static void __md_stop_writes(mddev_t *mddev)
{
if (mddev->sync_thread) {
set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
set_bit(MD_RECOVERY_INTR, &mddev->recovery);
reap_sync_thread(mddev);
}
del_timer_sync(&mddev->safemode_timer);
bitmap_flush(mddev);
md_super_wait(mddev);
if (!mddev->in_sync || mddev->flags) {
/* mark array as shutdown cleanly */
mddev->in_sync = 1;
md_update_sb(mddev, 1);
}
}
void md_stop_writes(mddev_t *mddev)
{
mddev_lock(mddev);
__md_stop_writes(mddev);
mddev_unlock(mddev);
}
EXPORT_SYMBOL_GPL(md_stop_writes);
void md_stop(mddev_t *mddev)
{
mddev->ready = 0;
mddev->pers->stop(mddev);
if (mddev->pers->sync_request && mddev->to_remove == NULL)
mddev->to_remove = &md_redundancy_group;
module_put(mddev->pers->owner);
mddev->pers = NULL;
clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
}
EXPORT_SYMBOL_GPL(md_stop);
static int md_set_readonly(mddev_t *mddev, int is_open)
{
int err = 0;
mutex_lock(&mddev->open_mutex);
if (atomic_read(&mddev->openers) > is_open) {
printk("md: %s still in use.\n",mdname(mddev));
err = -EBUSY;
goto out;
}
if (mddev->pers) {
__md_stop_writes(mddev);
err = -ENXIO;
if (mddev->ro==1)
goto out;
mddev->ro = 1;
set_disk_ro(mddev->gendisk, 1);
clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
sysfs_notify_dirent_safe(mddev->sysfs_state);
err = 0;
}
out:
mutex_unlock(&mddev->open_mutex);
return err;
}
/* mode:
* 0 - completely stop and dis-assemble array
* 2 - stop but do not disassemble array
*/
static int do_md_stop(mddev_t * mddev, int mode, int is_open)
{
struct gendisk *disk = mddev->gendisk;
mdk_rdev_t *rdev;
mutex_lock(&mddev->open_mutex);
if (atomic_read(&mddev->openers) > is_open ||
mddev->sysfs_active) {
printk("md: %s still in use.\n",mdname(mddev));
mutex_unlock(&mddev->open_mutex);
return -EBUSY;
}
if (mddev->pers) {
if (mddev->ro)
set_disk_ro(disk, 0);
__md_stop_writes(mddev);
md_stop(mddev);
mddev->queue->merge_bvec_fn = NULL;
mddev->queue->backing_dev_info.congested_fn = NULL;
/* tell userspace to handle 'inactive' */
sysfs_notify_dirent_safe(mddev->sysfs_state);
list_for_each_entry(rdev, &mddev->disks, same_set)
if (rdev->raid_disk >= 0) {
char nm[20];
sprintf(nm, "rd%d", rdev->raid_disk);
sysfs_remove_link(&mddev->kobj, nm);
}
set_capacity(disk, 0);
mutex_unlock(&mddev->open_mutex);
mddev->changed = 1;
revalidate_disk(disk);
if (mddev->ro)
mddev->ro = 0;
} else
mutex_unlock(&mddev->open_mutex);
/*
* Free resources if final stop
*/
if (mode == 0) {
printk(KERN_INFO "md: %s stopped.\n", mdname(mddev));
bitmap_destroy(mddev);
if (mddev->bitmap_info.file) {
restore_bitmap_write_access(mddev->bitmap_info.file);
fput(mddev->bitmap_info.file);
mddev->bitmap_info.file = NULL;
}
mddev->bitmap_info.offset = 0;
export_array(mddev);
md_clean(mddev);
kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE);
if (mddev->hold_active == UNTIL_STOP)
mddev->hold_active = 0;
}
blk_integrity_unregister(disk);
md_new_event(mddev);
sysfs_notify_dirent_safe(mddev->sysfs_state);
return 0;
}
#ifndef MODULE
static void autorun_array(mddev_t *mddev)
{
mdk_rdev_t *rdev;
int err;
if (list_empty(&mddev->disks))
return;
printk(KERN_INFO "md: running: ");
list_for_each_entry(rdev, &mddev->disks, same_set) {
char b[BDEVNAME_SIZE];
printk("<%s>", bdevname(rdev->bdev,b));
}
printk("\n");
err = do_md_run(mddev);
if (err) {
printk(KERN_WARNING "md: do_md_run() returned %d\n", err);
do_md_stop(mddev, 0, 0);
}
}
/*
* lets try to run arrays based on all disks that have arrived
* until now. (those are in pending_raid_disks)
*
* the method: pick the first pending disk, collect all disks with
* the same UUID, remove all from the pending list and put them into
* the 'same_array' list. Then order this list based on superblock
* update time (freshest comes first), kick out 'old' disks and
* compare superblocks. If everything's fine then run it.
*
* If "unit" is allocated, then bump its reference count
*/
static void autorun_devices(int part)
{
mdk_rdev_t *rdev0, *rdev, *tmp;
mddev_t *mddev;
char b[BDEVNAME_SIZE];
printk(KERN_INFO "md: autorun ...\n");
while (!list_empty(&pending_raid_disks)) {
int unit;
dev_t dev;
LIST_HEAD(candidates);
rdev0 = list_entry(pending_raid_disks.next,
mdk_rdev_t, same_set);
printk(KERN_INFO "md: considering %s ...\n",
bdevname(rdev0->bdev,b));
INIT_LIST_HEAD(&candidates);
rdev_for_each_list(rdev, tmp, &pending_raid_disks)
if (super_90_load(rdev, rdev0, 0) >= 0) {
printk(KERN_INFO "md: adding %s ...\n",
bdevname(rdev->bdev,b));
list_move(&rdev->same_set, &candidates);
}
/*
* now we have a set of devices, with all of them having
* mostly sane superblocks. It's time to allocate the
* mddev.
*/
if (part) {
dev = MKDEV(mdp_major,
rdev0->preferred_minor << MdpMinorShift);
unit = MINOR(dev) >> MdpMinorShift;
} else {
dev = MKDEV(MD_MAJOR, rdev0->preferred_minor);
unit = MINOR(dev);
}
if (rdev0->preferred_minor != unit) {
printk(KERN_INFO "md: unit number in %s is bad: %d\n",
bdevname(rdev0->bdev, b), rdev0->preferred_minor);
break;
}
md_probe(dev, NULL, NULL);
mddev = mddev_find(dev);
if (!mddev || !mddev->gendisk) {
if (mddev)
mddev_put(mddev);
printk(KERN_ERR
"md: cannot allocate memory for md drive.\n");
break;
}
if (mddev_lock(mddev))
printk(KERN_WARNING "md: %s locked, cannot run\n",
mdname(mddev));
else if (mddev->raid_disks || mddev->major_version
|| !list_empty(&mddev->disks)) {
printk(KERN_WARNING
"md: %s already running, cannot run %s\n",
mdname(mddev), bdevname(rdev0->bdev,b));
mddev_unlock(mddev);
} else {
printk(KERN_INFO "md: created %s\n", mdname(mddev));
mddev->persistent = 1;
rdev_for_each_list(rdev, tmp, &candidates) {
list_del_init(&rdev->same_set);
if (bind_rdev_to_array(rdev, mddev))
export_rdev(rdev);
}
autorun_array(mddev);
mddev_unlock(mddev);
}
/* on success, candidates will be empty, on error
* it won't...
*/
rdev_for_each_list(rdev, tmp, &candidates) {
list_del_init(&rdev->same_set);
export_rdev(rdev);
}
mddev_put(mddev);
}
printk(KERN_INFO "md: ... autorun DONE.\n");
}
#endif /* !MODULE */
static int get_version(void __user * arg)
{
mdu_version_t ver;
ver.major = MD_MAJOR_VERSION;
ver.minor = MD_MINOR_VERSION;
ver.patchlevel = MD_PATCHLEVEL_VERSION;
if (copy_to_user(arg, &ver, sizeof(ver)))
return -EFAULT;
return 0;
}
static int get_array_info(mddev_t * mddev, void __user * arg)
{
mdu_array_info_t info;
int nr,working,insync,failed,spare;
mdk_rdev_t *rdev;
nr=working=insync=failed=spare=0;
list_for_each_entry(rdev, &mddev->disks, same_set) {
nr++;
if (test_bit(Faulty, &rdev->flags))
failed++;
else {
working++;
if (test_bit(In_sync, &rdev->flags))
insync++;
else
spare++;
}
}
info.major_version = mddev->major_version;
info.minor_version = mddev->minor_version;
info.patch_version = MD_PATCHLEVEL_VERSION;
info.ctime = mddev->ctime;
info.level = mddev->level;
info.size = mddev->dev_sectors / 2;
if (info.size != mddev->dev_sectors / 2) /* overflow */
info.size = -1;
info.nr_disks = nr;
info.raid_disks = mddev->raid_disks;
info.md_minor = mddev->md_minor;
info.not_persistent= !mddev->persistent;
info.utime = mddev->utime;
info.state = 0;
if (mddev->in_sync)
info.state = (1<<MD_SB_CLEAN);
if (mddev->bitmap && mddev->bitmap_info.offset)
info.state = (1<<MD_SB_BITMAP_PRESENT);
info.active_disks = insync;
info.working_disks = working;
info.failed_disks = failed;
info.spare_disks = spare;
info.layout = mddev->layout;
info.chunk_size = mddev->chunk_sectors << 9;
if (copy_to_user(arg, &info, sizeof(info)))
return -EFAULT;
return 0;
}
static int get_bitmap_file(mddev_t * mddev, void __user * arg)
{
mdu_bitmap_file_t *file = NULL; /* too big for stack allocation */
char *ptr, *buf = NULL;
int err = -ENOMEM;
if (md_allow_write(mddev))
file = kmalloc(sizeof(*file), GFP_NOIO);
else
file = kmalloc(sizeof(*file), GFP_KERNEL);
if (!file)
goto out;
/* bitmap disabled, zero the first byte and copy out */
if (!mddev->bitmap || !mddev->bitmap->file) {
file->pathname[0] = '\0';
goto copy_out;
}
buf = kmalloc(sizeof(file->pathname), GFP_KERNEL);
if (!buf)
goto out;
ptr = d_path(&mddev->bitmap->file->f_path, buf, sizeof(file->pathname));
if (IS_ERR(ptr))
goto out;
strcpy(file->pathname, ptr);
copy_out:
err = 0;
if (copy_to_user(arg, file, sizeof(*file)))
err = -EFAULT;
out:
kfree(buf);
kfree(file);
return err;
}
static int get_disk_info(mddev_t * mddev, void __user * arg)
{
mdu_disk_info_t info;
mdk_rdev_t *rdev;
if (copy_from_user(&info, arg, sizeof(info)))
return -EFAULT;
rdev = find_rdev_nr(mddev, info.number);
if (rdev) {
info.major = MAJOR(rdev->bdev->bd_dev);
info.minor = MINOR(rdev->bdev->bd_dev);
info.raid_disk = rdev->raid_disk;
info.state = 0;
if (test_bit(Faulty, &rdev->flags))
info.state |= (1<<MD_DISK_FAULTY);
else if (test_bit(In_sync, &rdev->flags)) {
info.state |= (1<<MD_DISK_ACTIVE);
info.state |= (1<<MD_DISK_SYNC);
}
if (test_bit(WriteMostly, &rdev->flags))
info.state |= (1<<MD_DISK_WRITEMOSTLY);
} else {
info.major = info.minor = 0;
info.raid_disk = -1;
info.state = (1<<MD_DISK_REMOVED);
}
if (copy_to_user(arg, &info, sizeof(info)))
return -EFAULT;
return 0;
}
static int add_new_disk(mddev_t * mddev, mdu_disk_info_t *info)
{
char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
mdk_rdev_t *rdev;
dev_t dev = MKDEV(info->major,info->minor);
if (info->major != MAJOR(dev) || info->minor != MINOR(dev))
return -EOVERFLOW;
if (!mddev->raid_disks) {
int err;
/* expecting a device which has a superblock */
rdev = md_import_device(dev, mddev->major_version, mddev->minor_version);
if (IS_ERR(rdev)) {
printk(KERN_WARNING
"md: md_import_device returned %ld\n",
PTR_ERR(rdev));
return PTR_ERR(rdev);
}
if (!list_empty(&mddev->disks)) {
mdk_rdev_t *rdev0 = list_entry(mddev->disks.next,
mdk_rdev_t, same_set);
err = super_types[mddev->major_version]
.load_super(rdev, rdev0, mddev->minor_version);
if (err < 0) {
printk(KERN_WARNING
"md: %s has different UUID to %s\n",
bdevname(rdev->bdev,b),
bdevname(rdev0->bdev,b2));
export_rdev(rdev);
return -EINVAL;
}
}
err = bind_rdev_to_array(rdev, mddev);
if (err)
export_rdev(rdev);
return err;
}
/*
* add_new_disk can be used once the array is assembled
* to add "hot spares". They must already have a superblock
* written
*/
if (mddev->pers) {
int err;
if (!mddev->pers->hot_add_disk) {
printk(KERN_WARNING
"%s: personality does not support diskops!\n",
mdname(mddev));
return -EINVAL;
}
if (mddev->persistent)
rdev = md_import_device(dev, mddev->major_version,
mddev->minor_version);
else
rdev = md_import_device(dev, -1, -1);
if (IS_ERR(rdev)) {
printk(KERN_WARNING
"md: md_import_device returned %ld\n",
PTR_ERR(rdev));
return PTR_ERR(rdev);
}
/* set saved_raid_disk if appropriate */
if (!mddev->persistent) {
if (info->state & (1<<MD_DISK_SYNC) &&
info->raid_disk < mddev->raid_disks) {
rdev->raid_disk = info->raid_disk;
set_bit(In_sync, &rdev->flags);
} else
rdev->raid_disk = -1;
} else
super_types[mddev->major_version].
validate_super(mddev, rdev);
if ((info->state & (1<<MD_DISK_SYNC)) &&
(!test_bit(In_sync, &rdev->flags) ||
rdev->raid_disk != info->raid_disk)) {
/* This was a hot-add request, but events doesn't
* match, so reject it.
*/
export_rdev(rdev);
return -EINVAL;
}
if (test_bit(In_sync, &rdev->flags))
rdev->saved_raid_disk = rdev->raid_disk;
else
rdev->saved_raid_disk = -1;
clear_bit(In_sync, &rdev->flags); /* just to be sure */
if (info->state & (1<<MD_DISK_WRITEMOSTLY))
set_bit(WriteMostly, &rdev->flags);
else
clear_bit(WriteMostly, &rdev->flags);
rdev->raid_disk = -1;
err = bind_rdev_to_array(rdev, mddev);
if (!err && !mddev->pers->hot_remove_disk) {
/* If there is hot_add_disk but no hot_remove_disk
* then added disks for geometry changes,
* and should be added immediately.
*/
super_types[mddev->major_version].
validate_super(mddev, rdev);
err = mddev->pers->hot_add_disk(mddev, rdev);
if (err)
unbind_rdev_from_array(rdev);
}
if (err)
export_rdev(rdev);
else
sysfs_notify_dirent_safe(rdev->sysfs_state);
md_update_sb(mddev, 1);
if (mddev->degraded)
set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
if (!err)
md_new_event(mddev);
md_wakeup_thread(mddev->thread);
return err;
}
/* otherwise, add_new_disk is only allowed
* for major_version==0 superblocks
*/
if (mddev->major_version != 0) {
printk(KERN_WARNING "%s: ADD_NEW_DISK not supported\n",
mdname(mddev));
return -EINVAL;
}
if (!(info->state & (1<<MD_DISK_FAULTY))) {
int err;
rdev = md_import_device(dev, -1, 0);
if (IS_ERR(rdev)) {
printk(KERN_WARNING
"md: error, md_import_device() returned %ld\n",
PTR_ERR(rdev));
return PTR_ERR(rdev);
}
rdev->desc_nr = info->number;
if (info->raid_disk < mddev->raid_disks)
rdev->raid_disk = info->raid_disk;
else
rdev->raid_disk = -1;
if (rdev->raid_disk < mddev->raid_disks)
if (info->state & (1<<MD_DISK_SYNC))
set_bit(In_sync, &rdev->flags);
if (info->state & (1<<MD_DISK_WRITEMOSTLY))
set_bit(WriteMostly, &rdev->flags);
if (!mddev->persistent) {
printk(KERN_INFO "md: nonpersistent superblock ...\n");
rdev->sb_start = i_size_read(rdev->bdev->bd_inode) / 512;
} else
rdev->sb_start = calc_dev_sboffset(rdev);
rdev->sectors = rdev->sb_start;
err = bind_rdev_to_array(rdev, mddev);
if (err) {
export_rdev(rdev);
return err;
}
}
return 0;
}
static int hot_remove_disk(mddev_t * mddev, dev_t dev)
{
char b[BDEVNAME_SIZE];
mdk_rdev_t *rdev;
rdev = find_rdev(mddev, dev);
if (!rdev)
return -ENXIO;
if (rdev->raid_disk >= 0)
goto busy;
kick_rdev_from_array(rdev);
md_update_sb(mddev, 1);
md_new_event(mddev);
return 0;
busy:
printk(KERN_WARNING "md: cannot remove active disk %s from %s ...\n",
bdevname(rdev->bdev,b), mdname(mddev));
return -EBUSY;
}
static int hot_add_disk(mddev_t * mddev, dev_t dev)
{
char b[BDEVNAME_SIZE];
int err;
mdk_rdev_t *rdev;
if (!mddev->pers)
return -ENODEV;
if (mddev->major_version != 0) {
printk(KERN_WARNING "%s: HOT_ADD may only be used with"
" version-0 superblocks.\n",
mdname(mddev));
return -EINVAL;
}
if (!mddev->pers->hot_add_disk) {
printk(KERN_WARNING
"%s: personality does not support diskops!\n",
mdname(mddev));
return -EINVAL;
}
rdev = md_import_device(dev, -1, 0);
if (IS_ERR(rdev)) {
printk(KERN_WARNING
"md: error, md_import_device() returned %ld\n",
PTR_ERR(rdev));
return -EINVAL;
}
if (mddev->persistent)
rdev->sb_start = calc_dev_sboffset(rdev);
else
rdev->sb_start = i_size_read(rdev->bdev->bd_inode) / 512;
rdev->sectors = rdev->sb_start;
if (test_bit(Faulty, &rdev->flags)) {
printk(KERN_WARNING
"md: can not hot-add faulty %s disk to %s!\n",
bdevname(rdev->bdev,b), mdname(mddev));
err = -EINVAL;
goto abort_export;
}
clear_bit(In_sync, &rdev->flags);
rdev->desc_nr = -1;
rdev->saved_raid_disk = -1;
err = bind_rdev_to_array(rdev, mddev);
if (err)
goto abort_export;
/*
* The rest should better be atomic, we can have disk failures
* noticed in interrupt contexts ...
*/
rdev->raid_disk = -1;
md_update_sb(mddev, 1);
/*
* Kick recovery, maybe this spare has to be added to the
* array immediately.
*/
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
md_wakeup_thread(mddev->thread);
md_new_event(mddev);
return 0;
abort_export:
export_rdev(rdev);
return err;
}
static int set_bitmap_file(mddev_t *mddev, int fd)
{
int err;
if (mddev->pers) {
if (!mddev->pers->quiesce)
return -EBUSY;
if (mddev->recovery || mddev->sync_thread)
return -EBUSY;
/* we should be able to change the bitmap.. */
}
if (fd >= 0) {
if (mddev->bitmap)
return -EEXIST; /* cannot add when bitmap is present */
mddev->bitmap_info.file = fget(fd);
if (mddev->bitmap_info.file == NULL) {
printk(KERN_ERR "%s: error: failed to get bitmap file\n",
mdname(mddev));
return -EBADF;
}
err = deny_bitmap_write_access(mddev->bitmap_info.file);
if (err) {
printk(KERN_ERR "%s: error: bitmap file is already in use\n",
mdname(mddev));
fput(mddev->bitmap_info.file);
mddev->bitmap_info.file = NULL;
return err;
}
mddev->bitmap_info.offset = 0; /* file overrides offset */
} else if (mddev->bitmap == NULL)
return -ENOENT; /* cannot remove what isn't there */
err = 0;
if (mddev->pers) {
mddev->pers->quiesce(mddev, 1);
if (fd >= 0) {
err = bitmap_create(mddev);
if (!err)
err = bitmap_load(mddev);
}
if (fd < 0 || err) {
bitmap_destroy(mddev);
fd = -1; /* make sure to put the file */
}
mddev->pers->quiesce(mddev, 0);
}
if (fd < 0) {
if (mddev->bitmap_info.file) {
restore_bitmap_write_access(mddev->bitmap_info.file);
fput(mddev->bitmap_info.file);
}
mddev->bitmap_info.file = NULL;
}
return err;
}
/*
* set_array_info is used two different ways
* The original usage is when creating a new array.
* In this usage, raid_disks is > 0 and it together with
* level, size, not_persistent,layout,chunksize determine the
* shape of the array.
* This will always create an array with a type-0.90.0 superblock.
* The newer usage is when assembling an array.
* In this case raid_disks will be 0, and the major_version field is
* use to determine which style super-blocks are to be found on the devices.
* The minor and patch _version numbers are also kept incase the
* super_block handler wishes to interpret them.
*/
static int set_array_info(mddev_t * mddev, mdu_array_info_t *info)
{
if (info->raid_disks == 0) {
/* just setting version number for superblock loading */
if (info->major_version < 0 ||
info->major_version >= ARRAY_SIZE(super_types) ||
super_types[info->major_version].name == NULL) {
/* maybe try to auto-load a module? */
printk(KERN_INFO
"md: superblock version %d not known\n",
info->major_version);
return -EINVAL;
}
mddev->major_version = info->major_version;
mddev->minor_version = info->minor_version;
mddev->patch_version = info->patch_version;
mddev->persistent = !info->not_persistent;
/* ensure mddev_put doesn't delete this now that there
* is some minimal configuration.
*/
mddev->ctime = get_seconds();
return 0;
}
mddev->major_version = MD_MAJOR_VERSION;
mddev->minor_version = MD_MINOR_VERSION;
mddev->patch_version = MD_PATCHLEVEL_VERSION;
mddev->ctime = get_seconds();
mddev->level = info->level;
mddev->clevel[0] = 0;
mddev->dev_sectors = 2 * (sector_t)info->size;
mddev->raid_disks = info->raid_disks;
/* don't set md_minor, it is determined by which /dev/md* was
* openned
*/
if (info->state & (1<<MD_SB_CLEAN))
mddev->recovery_cp = MaxSector;
else
mddev->recovery_cp = 0;
mddev->persistent = ! info->not_persistent;
mddev->external = 0;
mddev->layout = info->layout;
mddev->chunk_sectors = info->chunk_size >> 9;
mddev->max_disks = MD_SB_DISKS;
if (mddev->persistent)
mddev->flags = 0;
set_bit(MD_CHANGE_DEVS, &mddev->flags);
mddev->bitmap_info.default_offset = MD_SB_BYTES >> 9;
mddev->bitmap_info.offset = 0;
mddev->reshape_position = MaxSector;
/*
* Generate a 128 bit UUID
*/
get_random_bytes(mddev->uuid, 16);
mddev->new_level = mddev->level;
mddev->new_chunk_sectors = mddev->chunk_sectors;
mddev->new_layout = mddev->layout;
mddev->delta_disks = 0;
return 0;
}
void md_set_array_sectors(mddev_t *mddev, sector_t array_sectors)
{
WARN(!mddev_is_locked(mddev), "%s: unlocked mddev!\n", __func__);
if (mddev->external_size)
return;
mddev->array_sectors = array_sectors;
}
EXPORT_SYMBOL(md_set_array_sectors);
static int update_size(mddev_t *mddev, sector_t num_sectors)
{
mdk_rdev_t *rdev;
int rv;
int fit = (num_sectors == 0);
if (mddev->pers->resize == NULL)
return -EINVAL;
/* The "num_sectors" is the number of sectors of each device that
* is used. This can only make sense for arrays with redundancy.
* linear and raid0 always use whatever space is available. We can only
* consider changing this number if no resync or reconstruction is
* happening, and if the new size is acceptable. It must fit before the
* sb_start or, if that is <data_offset, it must fit before the size
* of each device. If num_sectors is zero, we find the largest size
* that fits.
*/
if (mddev->sync_thread)
return -EBUSY;
if (mddev->bitmap)
/* Sorry, cannot grow a bitmap yet, just remove it,
* grow, and re-add.
*/
return -EBUSY;
list_for_each_entry(rdev, &mddev->disks, same_set) {
sector_t avail = rdev->sectors;
if (fit && (num_sectors == 0 || num_sectors > avail))
num_sectors = avail;
if (avail < num_sectors)
return -ENOSPC;
}
rv = mddev->pers->resize(mddev, num_sectors);
if (!rv)
revalidate_disk(mddev->gendisk);
return rv;
}
static int update_raid_disks(mddev_t *mddev, int raid_disks)
{
int rv;
/* change the number of raid disks */
if (mddev->pers->check_reshape == NULL)
return -EINVAL;
if (raid_disks <= 0 ||
(mddev->max_disks && raid_disks >= mddev->max_disks))
return -EINVAL;
if (mddev->sync_thread || mddev->reshape_position != MaxSector)
return -EBUSY;
mddev->delta_disks = raid_disks - mddev->raid_disks;
rv = mddev->pers->check_reshape(mddev);
if (rv < 0)
mddev->delta_disks = 0;
return rv;
}
/*
* update_array_info is used to change the configuration of an
* on-line array.
* The version, ctime,level,size,raid_disks,not_persistent, layout,chunk_size
* fields in the info are checked against the array.
* Any differences that cannot be handled will cause an error.
* Normally, only one change can be managed at a time.
*/
static int update_array_info(mddev_t *mddev, mdu_array_info_t *info)
{
int rv = 0;
int cnt = 0;
int state = 0;
/* calculate expected state,ignoring low bits */
if (mddev->bitmap && mddev->bitmap_info.offset)
state |= (1 << MD_SB_BITMAP_PRESENT);
if (mddev->major_version != info->major_version ||
mddev->minor_version != info->minor_version ||
/* mddev->patch_version != info->patch_version || */
mddev->ctime != info->ctime ||
mddev->level != info->level ||
/* mddev->layout != info->layout || */
!mddev->persistent != info->not_persistent||
mddev->chunk_sectors != info->chunk_size >> 9 ||
/* ignore bottom 8 bits of state, and allow SB_BITMAP_PRESENT to change */
((state^info->state) & 0xfffffe00)
)
return -EINVAL;
/* Check there is only one change */
if (info->size >= 0 && mddev->dev_sectors / 2 != info->size)
cnt++;
if (mddev->raid_disks != info->raid_disks)
cnt++;
if (mddev->layout != info->layout)
cnt++;
if ((state ^ info->state) & (1<<MD_SB_BITMAP_PRESENT))
cnt++;
if (cnt == 0)
return 0;
if (cnt > 1)
return -EINVAL;
if (mddev->layout != info->layout) {
/* Change layout
* we don't need to do anything at the md level, the
* personality will take care of it all.
*/
if (mddev->pers->check_reshape == NULL)
return -EINVAL;
else {
mddev->new_layout = info->layout;
rv = mddev->pers->check_reshape(mddev);
if (rv)
mddev->new_layout = mddev->layout;
return rv;
}
}
if (info->size >= 0 && mddev->dev_sectors / 2 != info->size)
rv = update_size(mddev, (sector_t)info->size * 2);
if (mddev->raid_disks != info->raid_disks)
rv = update_raid_disks(mddev, info->raid_disks);
if ((state ^ info->state) & (1<<MD_SB_BITMAP_PRESENT)) {
if (mddev->pers->quiesce == NULL)
return -EINVAL;
if (mddev->recovery || mddev->sync_thread)
return -EBUSY;
if (info->state & (1<<MD_SB_BITMAP_PRESENT)) {
/* add the bitmap */
if (mddev->bitmap)
return -EEXIST;
if (mddev->bitmap_info.default_offset == 0)
return -EINVAL;
mddev->bitmap_info.offset =
mddev->bitmap_info.default_offset;
mddev->pers->quiesce(mddev, 1);
rv = bitmap_create(mddev);
if (!rv)
rv = bitmap_load(mddev);
if (rv)
bitmap_destroy(mddev);
mddev->pers->quiesce(mddev, 0);
} else {
/* remove the bitmap */
if (!mddev->bitmap)
return -ENOENT;
if (mddev->bitmap->file)
return -EINVAL;
mddev->pers->quiesce(mddev, 1);
bitmap_destroy(mddev);
mddev->pers->quiesce(mddev, 0);
mddev->bitmap_info.offset = 0;
}
}
md_update_sb(mddev, 1);
return rv;
}
static int set_disk_faulty(mddev_t *mddev, dev_t dev)
{
mdk_rdev_t *rdev;
if (mddev->pers == NULL)
return -ENODEV;
rdev = find_rdev(mddev, dev);
if (!rdev)
return -ENODEV;
md_error(mddev, rdev);
return 0;
}
/*
* We have a problem here : there is no easy way to give a CHS
* virtual geometry. We currently pretend that we have a 2 heads
* 4 sectors (with a BIG number of cylinders...). This drives
* dosfs just mad... ;-)
*/
static int md_getgeo(struct block_device *bdev, struct hd_geometry *geo)
{
mddev_t *mddev = bdev->bd_disk->private_data;
geo->heads = 2;
geo->sectors = 4;
geo->cylinders = mddev->array_sectors / 8;
return 0;
}
static int md_ioctl(struct block_device *bdev, fmode_t mode,
unsigned int cmd, unsigned long arg)
{
int err = 0;
void __user *argp = (void __user *)arg;
mddev_t *mddev = NULL;
int ro;
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
/*
* Commands dealing with the RAID driver but not any
* particular array:
*/
switch (cmd)
{
case RAID_VERSION:
err = get_version(argp);
goto done;
case PRINT_RAID_DEBUG:
err = 0;
md_print_devices();
goto done;
#ifndef MODULE
case RAID_AUTORUN:
err = 0;
autostart_arrays(arg);
goto done;
#endif
default:;
}
/*
* Commands creating/starting a new array:
*/
mddev = bdev->bd_disk->private_data;
if (!mddev) {
BUG();
goto abort;
}
err = mddev_lock(mddev);
if (err) {
printk(KERN_INFO
"md: ioctl lock interrupted, reason %d, cmd %d\n",
err, cmd);
goto abort;
}
switch (cmd)
{
case SET_ARRAY_INFO:
{
mdu_array_info_t info;
if (!arg)
memset(&info, 0, sizeof(info));
else if (copy_from_user(&info, argp, sizeof(info))) {
err = -EFAULT;
goto abort_unlock;
}
if (mddev->pers) {
err = update_array_info(mddev, &info);
if (err) {
printk(KERN_WARNING "md: couldn't update"
" array info. %d\n", err);
goto abort_unlock;
}
goto done_unlock;
}
if (!list_empty(&mddev->disks)) {
printk(KERN_WARNING
"md: array %s already has disks!\n",
mdname(mddev));
err = -EBUSY;
goto abort_unlock;
}
if (mddev->raid_disks) {
printk(KERN_WARNING
"md: array %s already initialised!\n",
mdname(mddev));
err = -EBUSY;
goto abort_unlock;
}
err = set_array_info(mddev, &info);
if (err) {
printk(KERN_WARNING "md: couldn't set"
" array info. %d\n", err);
goto abort_unlock;
}
}
goto done_unlock;
default:;
}
/*
* Commands querying/configuring an existing array:
*/
/* if we are not initialised yet, only ADD_NEW_DISK, STOP_ARRAY,
* RUN_ARRAY, and GET_ and SET_BITMAP_FILE are allowed */
if ((!mddev->raid_disks && !mddev->external)
&& cmd != ADD_NEW_DISK && cmd != STOP_ARRAY
&& cmd != RUN_ARRAY && cmd != SET_BITMAP_FILE
&& cmd != GET_BITMAP_FILE) {
err = -ENODEV;
goto abort_unlock;
}
/*
* Commands even a read-only array can execute:
*/
switch (cmd)
{
case GET_ARRAY_INFO:
err = get_array_info(mddev, argp);
goto done_unlock;
case GET_BITMAP_FILE:
err = get_bitmap_file(mddev, argp);
goto done_unlock;
case GET_DISK_INFO:
err = get_disk_info(mddev, argp);
goto done_unlock;
case RESTART_ARRAY_RW:
err = restart_array(mddev);
goto done_unlock;
case STOP_ARRAY:
err = do_md_stop(mddev, 0, 1);
goto done_unlock;
case STOP_ARRAY_RO:
err = md_set_readonly(mddev, 1);
goto done_unlock;
case BLKROSET:
if (get_user(ro, (int __user *)(arg))) {
err = -EFAULT;
goto done_unlock;
}
err = -EINVAL;
/* if the bdev is going readonly the value of mddev->ro
* does not matter, no writes are coming
*/
if (ro)
goto done_unlock;
/* are we are already prepared for writes? */
if (mddev->ro != 1)
goto done_unlock;
/* transitioning to readauto need only happen for
* arrays that call md_write_start
*/
if (mddev->pers) {
err = restart_array(mddev);
if (err == 0) {
mddev->ro = 2;
set_disk_ro(mddev->gendisk, 0);
}
}
goto done_unlock;
}
/*
* The remaining ioctls are changing the state of the
* superblock, so we do not allow them on read-only arrays.
* However non-MD ioctls (e.g. get-size) will still come through
* here and hit the 'default' below, so only disallow
* 'md' ioctls, and switch to rw mode if started auto-readonly.
*/
if (_IOC_TYPE(cmd) == MD_MAJOR && mddev->ro && mddev->pers) {
if (mddev->ro == 2) {
mddev->ro = 0;
sysfs_notify_dirent_safe(mddev->sysfs_state);
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
md_wakeup_thread(mddev->thread);
} else {
err = -EROFS;
goto abort_unlock;
}
}
switch (cmd)
{
case ADD_NEW_DISK:
{
mdu_disk_info_t info;
if (copy_from_user(&info, argp, sizeof(info)))
err = -EFAULT;
else
err = add_new_disk(mddev, &info);
goto done_unlock;
}
case HOT_REMOVE_DISK:
err = hot_remove_disk(mddev, new_decode_dev(arg));
goto done_unlock;
case HOT_ADD_DISK:
err = hot_add_disk(mddev, new_decode_dev(arg));
goto done_unlock;
case SET_DISK_FAULTY:
err = set_disk_faulty(mddev, new_decode_dev(arg));
goto done_unlock;
case RUN_ARRAY:
err = do_md_run(mddev);
goto done_unlock;
case SET_BITMAP_FILE:
err = set_bitmap_file(mddev, (int)arg);
goto done_unlock;
default:
err = -EINVAL;
goto abort_unlock;
}
done_unlock:
abort_unlock:
if (mddev->hold_active == UNTIL_IOCTL &&
err != -EINVAL)
mddev->hold_active = 0;
mddev_unlock(mddev);
return err;
done:
if (err)
MD_BUG();
abort:
return err;
}
#ifdef CONFIG_COMPAT
static int md_compat_ioctl(struct block_device *bdev, fmode_t mode,
unsigned int cmd, unsigned long arg)
{
switch (cmd) {
case HOT_REMOVE_DISK:
case HOT_ADD_DISK:
case SET_DISK_FAULTY:
case SET_BITMAP_FILE:
/* These take in integer arg, do not convert */
break;
default:
arg = (unsigned long)compat_ptr(arg);
break;
}
return md_ioctl(bdev, mode, cmd, arg);
}
#endif /* CONFIG_COMPAT */
static int md_open(struct block_device *bdev, fmode_t mode)
{
/*
* Succeed if we can lock the mddev, which confirms that
* it isn't being stopped right now.
*/
mddev_t *mddev = mddev_find(bdev->bd_dev);
int err;
if (mddev->gendisk != bdev->bd_disk) {
/* we are racing with mddev_put which is discarding this
* bd_disk.
*/
mddev_put(mddev);
/* Wait until bdev->bd_disk is definitely gone */
flush_workqueue(md_misc_wq);
/* Then retry the open from the top */
return -ERESTARTSYS;
}
BUG_ON(mddev != bdev->bd_disk->private_data);
if ((err = mutex_lock_interruptible(&mddev->open_mutex)))
goto out;
err = 0;
atomic_inc(&mddev->openers);
mutex_unlock(&mddev->open_mutex);
check_disk_change(bdev);
out:
return err;
}
static int md_release(struct gendisk *disk, fmode_t mode)
{
mddev_t *mddev = disk->private_data;
BUG_ON(!mddev);
atomic_dec(&mddev->openers);
mddev_put(mddev);
return 0;
}
static int md_media_changed(struct gendisk *disk)
{
mddev_t *mddev = disk->private_data;
return mddev->changed;
}
static int md_revalidate(struct gendisk *disk)
{
mddev_t *mddev = disk->private_data;
mddev->changed = 0;
return 0;
}
static const struct block_device_operations md_fops =
{
.owner = THIS_MODULE,
.open = md_open,
.release = md_release,
.ioctl = md_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = md_compat_ioctl,
#endif
.getgeo = md_getgeo,
.media_changed = md_media_changed,
.revalidate_disk= md_revalidate,
};
static int md_thread(void * arg)
{
mdk_thread_t *thread = arg;
/*
* md_thread is a 'system-thread', it's priority should be very
* high. We avoid resource deadlocks individually in each
* raid personality. (RAID5 does preallocation) We also use RR and
* the very same RT priority as kswapd, thus we will never get
* into a priority inversion deadlock.
*
* we definitely have to have equal or higher priority than
* bdflush, otherwise bdflush will deadlock if there are too
* many dirty RAID5 blocks.
*/
allow_signal(SIGKILL);
while (!kthread_should_stop()) {
/* We need to wait INTERRUPTIBLE so that
* we don't add to the load-average.
* That means we need to be sure no signals are
* pending
*/
if (signal_pending(current))
flush_signals(current);
wait_event_interruptible_timeout
(thread->wqueue,
test_bit(THREAD_WAKEUP, &thread->flags)
|| kthread_should_stop(),
thread->timeout);
clear_bit(THREAD_WAKEUP, &thread->flags);
if (!kthread_should_stop())
thread->run(thread->mddev);
}
return 0;
}
void md_wakeup_thread(mdk_thread_t *thread)
{
if (thread) {
dprintk("md: waking up MD thread %s.\n", thread->tsk->comm);
set_bit(THREAD_WAKEUP, &thread->flags);
wake_up(&thread->wqueue);
}
}
mdk_thread_t *md_register_thread(void (*run) (mddev_t *), mddev_t *mddev,
const char *name)
{
mdk_thread_t *thread;
thread = kzalloc(sizeof(mdk_thread_t), GFP_KERNEL);
if (!thread)
return NULL;
init_waitqueue_head(&thread->wqueue);
thread->run = run;
thread->mddev = mddev;
thread->timeout = MAX_SCHEDULE_TIMEOUT;
thread->tsk = kthread_run(md_thread, thread,
"%s_%s",
mdname(thread->mddev),
name ?: mddev->pers->name);
if (IS_ERR(thread->tsk)) {
kfree(thread);
return NULL;
}
return thread;
}
void md_unregister_thread(mdk_thread_t **threadp)
{
mdk_thread_t *thread = *threadp;
if (!thread)
return;
dprintk("interrupting MD-thread pid %d\n", task_pid_nr(thread->tsk));
/* Locking ensures that mddev_unlock does not wake_up a
* non-existent thread
*/
spin_lock(&pers_lock);
*threadp = NULL;
spin_unlock(&pers_lock);
kthread_stop(thread->tsk);
kfree(thread);
}
void md_error(mddev_t *mddev, mdk_rdev_t *rdev)
{
if (!mddev) {
MD_BUG();
return;
}
if (!rdev || test_bit(Faulty, &rdev->flags))
return;
if (mddev->external)
set_bit(Blocked, &rdev->flags);
/*
dprintk("md_error dev:%s, rdev:(%d:%d), (caller: %p,%p,%p,%p).\n",
mdname(mddev),
MAJOR(rdev->bdev->bd_dev), MINOR(rdev->bdev->bd_dev),
__builtin_return_address(0),__builtin_return_address(1),
__builtin_return_address(2),__builtin_return_address(3));
*/
if (!mddev->pers)
return;
if (!mddev->pers->error_handler)
return;
mddev->pers->error_handler(mddev,rdev);
if (mddev->degraded)
set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
sysfs_notify_dirent_safe(rdev->sysfs_state);
set_bit(MD_RECOVERY_INTR, &mddev->recovery);
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
md_wakeup_thread(mddev->thread);
if (mddev->event_work.func)
queue_work(md_misc_wq, &mddev->event_work);
md_new_event_inintr(mddev);
}
/* seq_file implementation /proc/mdstat */
static void status_unused(struct seq_file *seq)
{
int i = 0;
mdk_rdev_t *rdev;
seq_printf(seq, "unused devices: ");
list_for_each_entry(rdev, &pending_raid_disks, same_set) {
char b[BDEVNAME_SIZE];
i++;
seq_printf(seq, "%s ",
bdevname(rdev->bdev,b));
}
if (!i)
seq_printf(seq, "<none>");
seq_printf(seq, "\n");
}
static void status_resync(struct seq_file *seq, mddev_t * mddev)
{
sector_t max_sectors, resync, res;
unsigned long dt, db;
sector_t rt;
int scale;
unsigned int per_milli;
resync = mddev->curr_resync - atomic_read(&mddev->recovery_active);
if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
max_sectors = mddev->resync_max_sectors;
else
max_sectors = mddev->dev_sectors;
/*
* Should not happen.
*/
if (!max_sectors) {
MD_BUG();
return;
}
/* Pick 'scale' such that (resync>>scale)*1000 will fit
* in a sector_t, and (max_sectors>>scale) will fit in a
* u32, as those are the requirements for sector_div.
* Thus 'scale' must be at least 10
*/
scale = 10;
if (sizeof(sector_t) > sizeof(unsigned long)) {
while ( max_sectors/2 > (1ULL<<(scale+32)))
scale++;
}
res = (resync>>scale)*1000;
sector_div(res, (u32)((max_sectors>>scale)+1));
per_milli = res;
{
int i, x = per_milli/50, y = 20-x;
seq_printf(seq, "[");
for (i = 0; i < x; i++)
seq_printf(seq, "=");
seq_printf(seq, ">");
for (i = 0; i < y; i++)
seq_printf(seq, ".");
seq_printf(seq, "] ");
}
seq_printf(seq, " %s =%3u.%u%% (%llu/%llu)",
(test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)?
"reshape" :
(test_bit(MD_RECOVERY_CHECK, &mddev->recovery)?
"check" :
(test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ?
"resync" : "recovery"))),
per_milli/10, per_milli % 10,
(unsigned long long) resync/2,
(unsigned long long) max_sectors/2);
/*
* dt: time from mark until now
* db: blocks written from mark until now
* rt: remaining time
*
* rt is a sector_t, so could be 32bit or 64bit.
* So we divide before multiply in case it is 32bit and close
* to the limit.
* We scale the divisor (db) by 32 to avoid losing precision
* near the end of resync when the number of remaining sectors
* is close to 'db'.
* We then divide rt by 32 after multiplying by db to compensate.
* The '+1' avoids division by zero if db is very small.
*/
dt = ((jiffies - mddev->resync_mark) / HZ);
if (!dt) dt++;
db = (mddev->curr_mark_cnt - atomic_read(&mddev->recovery_active))
- mddev->resync_mark_cnt;
rt = max_sectors - resync; /* number of remaining sectors */
sector_div(rt, db/32+1);
rt *= dt;
rt >>= 5;
seq_printf(seq, " finish=%lu.%lumin", (unsigned long)rt / 60,
((unsigned long)rt % 60)/6);
seq_printf(seq, " speed=%ldK/sec", db/2/dt);
}
static void *md_seq_start(struct seq_file *seq, loff_t *pos)
{
struct list_head *tmp;
loff_t l = *pos;
mddev_t *mddev;
if (l >= 0x10000)
return NULL;
if (!l--)
/* header */
return (void*)1;
spin_lock(&all_mddevs_lock);
list_for_each(tmp,&all_mddevs)
if (!l--) {
mddev = list_entry(tmp, mddev_t, all_mddevs);
mddev_get(mddev);
spin_unlock(&all_mddevs_lock);
return mddev;
}
spin_unlock(&all_mddevs_lock);
if (!l--)
return (void*)2;/* tail */
return NULL;
}
static void *md_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
struct list_head *tmp;
mddev_t *next_mddev, *mddev = v;
++*pos;
if (v == (void*)2)
return NULL;
spin_lock(&all_mddevs_lock);
if (v == (void*)1)
tmp = all_mddevs.next;
else
tmp = mddev->all_mddevs.next;
if (tmp != &all_mddevs)
next_mddev = mddev_get(list_entry(tmp,mddev_t,all_mddevs));
else {
next_mddev = (void*)2;
*pos = 0x10000;
}
spin_unlock(&all_mddevs_lock);
if (v != (void*)1)
mddev_put(mddev);
return next_mddev;
}
static void md_seq_stop(struct seq_file *seq, void *v)
{
mddev_t *mddev = v;
if (mddev && v != (void*)1 && v != (void*)2)
mddev_put(mddev);
}
struct mdstat_info {
int event;
};
static int md_seq_show(struct seq_file *seq, void *v)
{
mddev_t *mddev = v;
sector_t sectors;
mdk_rdev_t *rdev;
struct mdstat_info *mi = seq->private;
struct bitmap *bitmap;
if (v == (void*)1) {
struct mdk_personality *pers;
seq_printf(seq, "Personalities : ");
spin_lock(&pers_lock);
list_for_each_entry(pers, &pers_list, list)
seq_printf(seq, "[%s] ", pers->name);
spin_unlock(&pers_lock);
seq_printf(seq, "\n");
mi->event = atomic_read(&md_event_count);
return 0;
}
if (v == (void*)2) {
status_unused(seq);
return 0;
}
if (mddev_lock(mddev) < 0)
return -EINTR;
if (mddev->pers || mddev->raid_disks || !list_empty(&mddev->disks)) {
seq_printf(seq, "%s : %sactive", mdname(mddev),
mddev->pers ? "" : "in");
if (mddev->pers) {
if (mddev->ro==1)
seq_printf(seq, " (read-only)");
if (mddev->ro==2)
seq_printf(seq, " (auto-read-only)");
seq_printf(seq, " %s", mddev->pers->name);
}
sectors = 0;
list_for_each_entry(rdev, &mddev->disks, same_set) {
char b[BDEVNAME_SIZE];
seq_printf(seq, " %s[%d]",
bdevname(rdev->bdev,b), rdev->desc_nr);
if (test_bit(WriteMostly, &rdev->flags))
seq_printf(seq, "(W)");
if (test_bit(Faulty, &rdev->flags)) {
seq_printf(seq, "(F)");
continue;
} else if (rdev->raid_disk < 0)
seq_printf(seq, "(S)"); /* spare */
sectors += rdev->sectors;
}
if (!list_empty(&mddev->disks)) {
if (mddev->pers)
seq_printf(seq, "\n %llu blocks",
(unsigned long long)
mddev->array_sectors / 2);
else
seq_printf(seq, "\n %llu blocks",
(unsigned long long)sectors / 2);
}
if (mddev->persistent) {
if (mddev->major_version != 0 ||
mddev->minor_version != 90) {
seq_printf(seq," super %d.%d",
mddev->major_version,
mddev->minor_version);
}
} else if (mddev->external)
seq_printf(seq, " super external:%s",
mddev->metadata_type);
else
seq_printf(seq, " super non-persistent");
if (mddev->pers) {
mddev->pers->status(seq, mddev);
seq_printf(seq, "\n ");
if (mddev->pers->sync_request) {
if (mddev->curr_resync > 2) {
status_resync(seq, mddev);
seq_printf(seq, "\n ");
} else if (mddev->curr_resync == 1 || mddev->curr_resync == 2)
seq_printf(seq, "\tresync=DELAYED\n ");
else if (mddev->recovery_cp < MaxSector)
seq_printf(seq, "\tresync=PENDING\n ");
}
} else
seq_printf(seq, "\n ");
if ((bitmap = mddev->bitmap)) {
unsigned long chunk_kb;
unsigned long flags;
spin_lock_irqsave(&bitmap->lock, flags);
chunk_kb = mddev->bitmap_info.chunksize >> 10;
seq_printf(seq, "bitmap: %lu/%lu pages [%luKB], "
"%lu%s chunk",
bitmap->pages - bitmap->missing_pages,
bitmap->pages,
(bitmap->pages - bitmap->missing_pages)
<< (PAGE_SHIFT - 10),
chunk_kb ? chunk_kb : mddev->bitmap_info.chunksize,
chunk_kb ? "KB" : "B");
if (bitmap->file) {
seq_printf(seq, ", file: ");
seq_path(seq, &bitmap->file->f_path, " \t\n");
}
seq_printf(seq, "\n");
spin_unlock_irqrestore(&bitmap->lock, flags);
}
seq_printf(seq, "\n");
}
mddev_unlock(mddev);
return 0;
}
static const struct seq_operations md_seq_ops = {
.start = md_seq_start,
.next = md_seq_next,
.stop = md_seq_stop,
.show = md_seq_show,
};
static int md_seq_open(struct inode *inode, struct file *file)
{
int error;
struct mdstat_info *mi = kmalloc(sizeof(*mi), GFP_KERNEL);
if (mi == NULL)
return -ENOMEM;
error = seq_open(file, &md_seq_ops);
if (error)
kfree(mi);
else {
struct seq_file *p = file->private_data;
p->private = mi;
mi->event = atomic_read(&md_event_count);
}
return error;
}
static unsigned int mdstat_poll(struct file *filp, poll_table *wait)
{
struct seq_file *m = filp->private_data;
struct mdstat_info *mi = m->private;
int mask;
poll_wait(filp, &md_event_waiters, wait);
/* always allow read */
mask = POLLIN | POLLRDNORM;
if (mi->event != atomic_read(&md_event_count))
mask |= POLLERR | POLLPRI;
return mask;
}
static const struct file_operations md_seq_fops = {
.owner = THIS_MODULE,
.open = md_seq_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release_private,
.poll = mdstat_poll,
};
int register_md_personality(struct mdk_personality *p)
{
spin_lock(&pers_lock);
list_add_tail(&p->list, &pers_list);
printk(KERN_INFO "md: %s personality registered for level %d\n", p->name, p->level);
spin_unlock(&pers_lock);
return 0;
}
int unregister_md_personality(struct mdk_personality *p)
{
printk(KERN_INFO "md: %s personality unregistered\n", p->name);
spin_lock(&pers_lock);
list_del_init(&p->list);
spin_unlock(&pers_lock);
return 0;
}
static int is_mddev_idle(mddev_t *mddev, int init)
{
mdk_rdev_t * rdev;
int idle;
int curr_events;
idle = 1;
rcu_read_lock();
rdev_for_each_rcu(rdev, mddev) {
struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
(int)part_stat_read(&disk->part0, sectors[1]) -
atomic_read(&disk->sync_io);
/* sync IO will cause sync_io to increase before the disk_stats
* as sync_io is counted when a request starts, and
* disk_stats is counted when it completes.
* So resync activity will cause curr_events to be smaller than
* when there was no such activity.
* non-sync IO will cause disk_stat to increase without
* increasing sync_io so curr_events will (eventually)
* be larger than it was before. Once it becomes
* substantially larger, the test below will cause
* the array to appear non-idle, and resync will slow
* down.
* If there is a lot of outstanding resync activity when
* we set last_event to curr_events, then all that activity
* completing might cause the array to appear non-idle
* and resync will be slowed down even though there might
* not have been non-resync activity. This will only
* happen once though. 'last_events' will soon reflect
* the state where there is little or no outstanding
* resync requests, and further resync activity will
* always make curr_events less than last_events.
*
*/
if (init || curr_events - rdev->last_events > 64) {
rdev->last_events = curr_events;
idle = 0;
}
}
rcu_read_unlock();
return idle;
}
void md_done_sync(mddev_t *mddev, int blocks, int ok)
{
/* another "blocks" (512byte) blocks have been synced */
atomic_sub(blocks, &mddev->recovery_active);
wake_up(&mddev->recovery_wait);
if (!ok) {
set_bit(MD_RECOVERY_INTR, &mddev->recovery);
md_wakeup_thread(mddev->thread);
// stop recovery, signal do_sync ....
}
}
/* md_write_start(mddev, bi)
* If we need to update some array metadata (e.g. 'active' flag
* in superblock) before writing, schedule a superblock update
* and wait for it to complete.
*/
void md_write_start(mddev_t *mddev, struct bio *bi)
{
int did_change = 0;
if (bio_data_dir(bi) != WRITE)
return;
BUG_ON(mddev->ro == 1);
if (mddev->ro == 2) {
/* need to switch to read/write */
mddev->ro = 0;
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
md_wakeup_thread(mddev->thread);
md_wakeup_thread(mddev->sync_thread);
did_change = 1;
}
atomic_inc(&mddev->writes_pending);
if (mddev->safemode == 1)
mddev->safemode = 0;
if (mddev->in_sync) {
spin_lock_irq(&mddev->write_lock);
if (mddev->in_sync) {
mddev->in_sync = 0;
set_bit(MD_CHANGE_CLEAN, &mddev->flags);
set_bit(MD_CHANGE_PENDING, &mddev->flags);
md_wakeup_thread(mddev->thread);
did_change = 1;
}
spin_unlock_irq(&mddev->write_lock);
}
if (did_change)
sysfs_notify_dirent_safe(mddev->sysfs_state);
wait_event(mddev->sb_wait,
!test_bit(MD_CHANGE_PENDING, &mddev->flags));
}
void md_write_end(mddev_t *mddev)
{
if (atomic_dec_and_test(&mddev->writes_pending)) {
if (mddev->safemode == 2)
md_wakeup_thread(mddev->thread);
else if (mddev->safemode_delay)
mod_timer(&mddev->safemode_timer, jiffies + mddev->safemode_delay);
}
}
/* md_allow_write(mddev)
* Calling this ensures that the array is marked 'active' so that writes
* may proceed without blocking. It is important to call this before
* attempting a GFP_KERNEL allocation while holding the mddev lock.
* Must be called with mddev_lock held.
*
* In the ->external case MD_CHANGE_CLEAN can not be cleared until mddev->lock
* is dropped, so return -EAGAIN after notifying userspace.
*/
int md_allow_write(mddev_t *mddev)
{
if (!mddev->pers)
return 0;
if (mddev->ro)
return 0;
if (!mddev->pers->sync_request)
return 0;
spin_lock_irq(&mddev->write_lock);
if (mddev->in_sync) {
mddev->in_sync = 0;
set_bit(MD_CHANGE_CLEAN, &mddev->flags);
set_bit(MD_CHANGE_PENDING, &mddev->flags);
if (mddev->safemode_delay &&
mddev->safemode == 0)
mddev->safemode = 1;
spin_unlock_irq(&mddev->write_lock);
md_update_sb(mddev, 0);
sysfs_notify_dirent_safe(mddev->sysfs_state);
} else
spin_unlock_irq(&mddev->write_lock);
if (test_bit(MD_CHANGE_PENDING, &mddev->flags))
return -EAGAIN;
else
return 0;
}
EXPORT_SYMBOL_GPL(md_allow_write);
#define SYNC_MARKS 10
#define SYNC_MARK_STEP (3*HZ)
void md_do_sync(mddev_t *mddev)
{
mddev_t *mddev2;
unsigned int currspeed = 0,
window;
sector_t max_sectors,j, io_sectors;
unsigned long mark[SYNC_MARKS];
sector_t mark_cnt[SYNC_MARKS];
int last_mark,m;
struct list_head *tmp;
sector_t last_check;
int skipped = 0;
mdk_rdev_t *rdev;
char *desc;
/* just incase thread restarts... */
if (test_bit(MD_RECOVERY_DONE, &mddev->recovery))
return;
if (mddev->ro) /* never try to sync a read-only array */
return;
if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery))
desc = "data-check";
else if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
desc = "requested-resync";
else
desc = "resync";
} else if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
desc = "reshape";
else
desc = "recovery";
/* we overload curr_resync somewhat here.
* 0 == not engaged in resync at all
* 2 == checking that there is no conflict with another sync
* 1 == like 2, but have yielded to allow conflicting resync to
* commense
* other == active in resync - this many blocks
*
* Before starting a resync we must have set curr_resync to
* 2, and then checked that every "conflicting" array has curr_resync
* less than ours. When we find one that is the same or higher
* we wait on resync_wait. To avoid deadlock, we reduce curr_resync
* to 1 if we choose to yield (based arbitrarily on address of mddev structure).
* This will mean we have to start checking from the beginning again.
*
*/
do {
mddev->curr_resync = 2;
try_again:
if (kthread_should_stop())
set_bit(MD_RECOVERY_INTR, &mddev->recovery);
if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
goto skip;
for_each_mddev(mddev2, tmp) {
if (mddev2 == mddev)
continue;
if (!mddev->parallel_resync
&& mddev2->curr_resync
&& match_mddev_units(mddev, mddev2)) {
DEFINE_WAIT(wq);
if (mddev < mddev2 && mddev->curr_resync == 2) {
/* arbitrarily yield */
mddev->curr_resync = 1;
wake_up(&resync_wait);
}
if (mddev > mddev2 && mddev->curr_resync == 1)
/* no need to wait here, we can wait the next
* time 'round when curr_resync == 2
*/
continue;
/* We need to wait 'interruptible' so as not to
* contribute to the load average, and not to
* be caught by 'softlockup'
*/
prepare_to_wait(&resync_wait, &wq, TASK_INTERRUPTIBLE);
if (!kthread_should_stop() &&
mddev2->curr_resync >= mddev->curr_resync) {
printk(KERN_INFO "md: delaying %s of %s"
" until %s has finished (they"
" share one or more physical units)\n",
desc, mdname(mddev), mdname(mddev2));
mddev_put(mddev2);
if (signal_pending(current))
flush_signals(current);
schedule();
finish_wait(&resync_wait, &wq);
goto try_again;
}
finish_wait(&resync_wait, &wq);
}
}
} while (mddev->curr_resync < 2);
j = 0;
if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
/* resync follows the size requested by the personality,
* which defaults to physical size, but can be virtual size
*/
max_sectors = mddev->resync_max_sectors;
mddev->resync_mismatches = 0;
/* we don't use the checkpoint if there's a bitmap */
if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
j = mddev->resync_min;
else if (!mddev->bitmap)
j = mddev->recovery_cp;
} else if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
max_sectors = mddev->dev_sectors;
else {
/* recovery follows the physical size of devices */
max_sectors = mddev->dev_sectors;
j = MaxSector;
rcu_read_lock();
list_for_each_entry_rcu(rdev, &mddev->disks, same_set)
if (rdev->raid_disk >= 0 &&
!test_bit(Faulty, &rdev->flags) &&
!test_bit(In_sync, &rdev->flags) &&
rdev->recovery_offset < j)
j = rdev->recovery_offset;
rcu_read_unlock();
}
printk(KERN_INFO "md: %s of RAID array %s\n", desc, mdname(mddev));
printk(KERN_INFO "md: minimum _guaranteed_ speed:"
" %d KB/sec/disk.\n", speed_min(mddev));
printk(KERN_INFO "md: using maximum available idle IO bandwidth "
"(but not more than %d KB/sec) for %s.\n",
speed_max(mddev), desc);
is_mddev_idle(mddev, 1); /* this initializes IO event counters */
io_sectors = 0;
for (m = 0; m < SYNC_MARKS; m++) {
mark[m] = jiffies;
mark_cnt[m] = io_sectors;
}
last_mark = 0;
mddev->resync_mark = mark[last_mark];
mddev->resync_mark_cnt = mark_cnt[last_mark];
/*
* Tune reconstruction:
*/
window = 32*(PAGE_SIZE/512);
printk(KERN_INFO "md: using %dk window, over a total of %lluk.\n",
window/2, (unsigned long long)max_sectors/2);
atomic_set(&mddev->recovery_active, 0);
last_check = 0;
if (j>2) {
printk(KERN_INFO
"md: resuming %s of %s from checkpoint.\n",
desc, mdname(mddev));
mddev->curr_resync = j;
}
mddev->curr_resync_completed = j;
while (j < max_sectors) {
sector_t sectors;
skipped = 0;
if (!test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
((mddev->curr_resync > mddev->curr_resync_completed &&
(mddev->curr_resync - mddev->curr_resync_completed)
> (max_sectors >> 4)) ||
(j - mddev->curr_resync_completed)*2
>= mddev->resync_max - mddev->curr_resync_completed
)) {
/* time to update curr_resync_completed */
wait_event(mddev->recovery_wait,
atomic_read(&mddev->recovery_active) == 0);
mddev->curr_resync_completed = j;
set_bit(MD_CHANGE_CLEAN, &mddev->flags);
sysfs_notify(&mddev->kobj, NULL, "sync_completed");
}
while (j >= mddev->resync_max && !kthread_should_stop()) {
/* As this condition is controlled by user-space,
* we can block indefinitely, so use '_interruptible'
* to avoid triggering warnings.
*/
flush_signals(current); /* just in case */
wait_event_interruptible(mddev->recovery_wait,
mddev->resync_max > j
|| kthread_should_stop());
}
if (kthread_should_stop())
goto interrupted;
sectors = mddev->pers->sync_request(mddev, j, &skipped,
currspeed < speed_min(mddev));
if (sectors == 0) {
set_bit(MD_RECOVERY_INTR, &mddev->recovery);
goto out;
}
if (!skipped) { /* actual IO requested */
io_sectors += sectors;
atomic_add(sectors, &mddev->recovery_active);
}
j += sectors;
if (j>1) mddev->curr_resync = j;
mddev->curr_mark_cnt = io_sectors;
if (last_check == 0)
/* this is the earliers that rebuilt will be
* visible in /proc/mdstat
*/
md_new_event(mddev);
if (last_check + window > io_sectors || j == max_sectors)
continue;
last_check = io_sectors;
if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
break;
repeat:
if (time_after_eq(jiffies, mark[last_mark] + SYNC_MARK_STEP )) {
/* step marks */
int next = (last_mark+1) % SYNC_MARKS;
mddev->resync_mark = mark[next];
mddev->resync_mark_cnt = mark_cnt[next];
mark[next] = jiffies;
mark_cnt[next] = io_sectors - atomic_read(&mddev->recovery_active);
last_mark = next;
}
if (kthread_should_stop())
goto interrupted;
/*
* this loop exits only if either when we are slower than
* the 'hard' speed limit, or the system was IO-idle for
* a jiffy.
* the system might be non-idle CPU-wise, but we only care
* about not overloading the IO subsystem. (things like an
* e2fsck being done on the RAID array should execute fast)
*/
cond_resched();
currspeed = ((unsigned long)(io_sectors-mddev->resync_mark_cnt))/2
/((jiffies-mddev->resync_mark)/HZ +1) +1;
if (currspeed > speed_min(mddev)) {
if ((currspeed > speed_max(mddev)) ||
!is_mddev_idle(mddev, 0)) {
msleep(500);
goto repeat;
}
}
}
printk(KERN_INFO "md: %s: %s done.\n",mdname(mddev), desc);
/*
* this also signals 'finished resyncing' to md_stop
*/
out:
wait_event(mddev->recovery_wait, !atomic_read(&mddev->recovery_active));
/* tell personality that we are finished */
mddev->pers->sync_request(mddev, max_sectors, &skipped, 1);
if (!test_bit(MD_RECOVERY_CHECK, &mddev->recovery) &&
mddev->curr_resync > 2) {
if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
if (mddev->curr_resync >= mddev->recovery_cp) {
printk(KERN_INFO
"md: checkpointing %s of %s.\n",
desc, mdname(mddev));
mddev->recovery_cp = mddev->curr_resync;
}
} else
mddev->recovery_cp = MaxSector;
} else {
if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery))
mddev->curr_resync = MaxSector;
rcu_read_lock();
list_for_each_entry_rcu(rdev, &mddev->disks, same_set)
if (rdev->raid_disk >= 0 &&
mddev->delta_disks >= 0 &&
!test_bit(Faulty, &rdev->flags) &&
!test_bit(In_sync, &rdev->flags) &&
rdev->recovery_offset < mddev->curr_resync)
rdev->recovery_offset = mddev->curr_resync;
rcu_read_unlock();
}
}
set_bit(MD_CHANGE_DEVS, &mddev->flags);
skip:
if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
/* We completed so min/max setting can be forgotten if used. */
if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
mddev->resync_min = 0;
mddev->resync_max = MaxSector;
} else if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
mddev->resync_min = mddev->curr_resync_completed;
mddev->curr_resync = 0;
wake_up(&resync_wait);
set_bit(MD_RECOVERY_DONE, &mddev->recovery);
md_wakeup_thread(mddev->thread);
return;
interrupted:
/*
* got a signal, exit.
*/
printk(KERN_INFO
"md: md_do_sync() got signal ... exiting\n");
set_bit(MD_RECOVERY_INTR, &mddev->recovery);
goto out;
}
EXPORT_SYMBOL_GPL(md_do_sync);
static int remove_and_add_spares(mddev_t *mddev)
{
mdk_rdev_t *rdev;
int spares = 0;
mddev->curr_resync_completed = 0;
list_for_each_entry(rdev, &mddev->disks, same_set)
if (rdev->raid_disk >= 0 &&
!test_bit(Blocked, &rdev->flags) &&
(test_bit(Faulty, &rdev->flags) ||
! test_bit(In_sync, &rdev->flags)) &&
atomic_read(&rdev->nr_pending)==0) {
if (mddev->pers->hot_remove_disk(
mddev, rdev->raid_disk)==0) {
char nm[20];
sprintf(nm,"rd%d", rdev->raid_disk);
sysfs_remove_link(&mddev->kobj, nm);
rdev->raid_disk = -1;
}
}
if (mddev->degraded && !mddev->recovery_disabled) {
list_for_each_entry(rdev, &mddev->disks, same_set) {
if (rdev->raid_disk >= 0 &&
!test_bit(In_sync, &rdev->flags) &&
!test_bit(Faulty, &rdev->flags) &&
!test_bit(Blocked, &rdev->flags))
spares++;
if (rdev->raid_disk < 0
&& !test_bit(Faulty, &rdev->flags)) {
rdev->recovery_offset = 0;
if (mddev->pers->
hot_add_disk(mddev, rdev) == 0) {
char nm[20];
sprintf(nm, "rd%d", rdev->raid_disk);
if (sysfs_create_link(&mddev->kobj,
&rdev->kobj, nm))
/* failure here is OK */;
spares++;
md_new_event(mddev);
set_bit(MD_CHANGE_DEVS, &mddev->flags);
} else
break;
}
}
}
return spares;
}
static void reap_sync_thread(mddev_t *mddev)
{
mdk_rdev_t *rdev;
/* resync has finished, collect result */
md_unregister_thread(&mddev->sync_thread);
if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
!test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
/* success...*/
/* activate any spares */
if (mddev->pers->spare_active(mddev))
sysfs_notify(&mddev->kobj, NULL,
"degraded");
}
if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
mddev->pers->finish_reshape)
mddev->pers->finish_reshape(mddev);
md_update_sb(mddev, 1);
/* if array is no-longer degraded, then any saved_raid_disk
* information must be scrapped
*/
if (!mddev->degraded)
list_for_each_entry(rdev, &mddev->disks, same_set)
rdev->saved_raid_disk = -1;
clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
clear_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
/* flag recovery needed just to double check */
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
sysfs_notify_dirent_safe(mddev->sysfs_action);
md_new_event(mddev);
}
/*
* This routine is regularly called by all per-raid-array threads to
* deal with generic issues like resync and super-block update.
* Raid personalities that don't have a thread (linear/raid0) do not
* need this as they never do any recovery or update the superblock.
*
* It does not do any resync itself, but rather "forks" off other threads
* to do that as needed.
* When it is determined that resync is needed, we set MD_RECOVERY_RUNNING in
* "->recovery" and create a thread at ->sync_thread.
* When the thread finishes it sets MD_RECOVERY_DONE
* and wakeups up this thread which will reap the thread and finish up.
* This thread also removes any faulty devices (with nr_pending == 0).
*
* The overall approach is:
* 1/ if the superblock needs updating, update it.
* 2/ If a recovery thread is running, don't do anything else.
* 3/ If recovery has finished, clean up, possibly marking spares active.
* 4/ If there are any faulty devices, remove them.
* 5/ If array is degraded, try to add spares devices
* 6/ If array has spares or is not in-sync, start a resync thread.
*/
void md_check_recovery(mddev_t *mddev)
{
if (mddev->suspended)
return;
if (mddev->bitmap)
bitmap_daemon_work(mddev);
if (mddev->ro)
return;
if (signal_pending(current)) {
if (mddev->pers->sync_request && !mddev->external) {
printk(KERN_INFO "md: %s in immediate safe mode\n",
mdname(mddev));
mddev->safemode = 2;
}
flush_signals(current);
}
if (mddev->ro && !test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))
return;
if ( ! (
(mddev->flags & ~ (1<<MD_CHANGE_PENDING)) ||
test_bit(MD_RECOVERY_NEEDED, &mddev->recovery) ||
test_bit(MD_RECOVERY_DONE, &mddev->recovery) ||
(mddev->external == 0 && mddev->safemode == 1) ||
(mddev->safemode == 2 && ! atomic_read(&mddev->writes_pending)
&& !mddev->in_sync && mddev->recovery_cp == MaxSector)
))
return;
if (mddev_trylock(mddev)) {
int spares = 0;
if (mddev->ro) {
/* Only thing we do on a ro array is remove
* failed devices.
*/
mdk_rdev_t *rdev;
list_for_each_entry(rdev, &mddev->disks, same_set)
if (rdev->raid_disk >= 0 &&
!test_bit(Blocked, &rdev->flags) &&
test_bit(Faulty, &rdev->flags) &&
atomic_read(&rdev->nr_pending)==0) {
if (mddev->pers->hot_remove_disk(
mddev, rdev->raid_disk)==0) {
char nm[20];
sprintf(nm,"rd%d", rdev->raid_disk);
sysfs_remove_link(&mddev->kobj, nm);
rdev->raid_disk = -1;
}
}
clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
goto unlock;
}
if (!mddev->external) {
int did_change = 0;
spin_lock_irq(&mddev->write_lock);
if (mddev->safemode &&
!atomic_read(&mddev->writes_pending) &&
!mddev->in_sync &&
mddev->recovery_cp == MaxSector) {
mddev->in_sync = 1;
did_change = 1;
set_bit(MD_CHANGE_CLEAN, &mddev->flags);
}
if (mddev->safemode == 1)
mddev->safemode = 0;
spin_unlock_irq(&mddev->write_lock);
if (did_change)
sysfs_notify_dirent_safe(mddev->sysfs_state);
}
if (mddev->flags)
md_update_sb(mddev, 0);
if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) &&
!test_bit(MD_RECOVERY_DONE, &mddev->recovery)) {
/* resync/recovery still happening */
clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
goto unlock;
}
if (mddev->sync_thread) {
reap_sync_thread(mddev);
goto unlock;
}
/* Set RUNNING before clearing NEEDED to avoid
* any transients in the value of "sync_action".
*/
set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
/* Clear some bits that don't mean anything, but
* might be left set
*/
clear_bit(MD_RECOVERY_INTR, &mddev->recovery);
clear_bit(MD_RECOVERY_DONE, &mddev->recovery);
if (test_bit(MD_RECOVERY_FROZEN, &mddev->recovery))
goto unlock;
/* no recovery is running.
* remove any failed drives, then
* add spares if possible.
* Spare are also removed and re-added, to allow
* the personality to fail the re-add.
*/
if (mddev->reshape_position != MaxSector) {
if (mddev->pers->check_reshape == NULL ||
mddev->pers->check_reshape(mddev) != 0)
/* Cannot proceed */
goto unlock;
set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
} else if ((spares = remove_and_add_spares(mddev))) {
clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
} else if (mddev->recovery_cp < MaxSector) {
set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
} else if (!test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
/* nothing to be done ... */
goto unlock;
if (mddev->pers->sync_request) {
if (spares && mddev->bitmap && ! mddev->bitmap->file) {
/* We are adding a device or devices to an array
* which has the bitmap stored on all devices.
* So make sure all bitmap pages get written
*/
bitmap_write_all(mddev->bitmap);
}
mddev->sync_thread = md_register_thread(md_do_sync,
mddev,
"resync");
if (!mddev->sync_thread) {
printk(KERN_ERR "%s: could not start resync"
" thread...\n",
mdname(mddev));
/* leave the spares where they are, it shouldn't hurt */
clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
clear_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
} else
md_wakeup_thread(mddev->sync_thread);
sysfs_notify_dirent_safe(mddev->sysfs_action);
md_new_event(mddev);
}
unlock:
if (!mddev->sync_thread) {
clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
if (test_and_clear_bit(MD_RECOVERY_RECOVER,
&mddev->recovery))
if (mddev->sysfs_action)
sysfs_notify_dirent_safe(mddev->sysfs_action);
}
mddev_unlock(mddev);
}
}
void md_wait_for_blocked_rdev(mdk_rdev_t *rdev, mddev_t *mddev)
{
sysfs_notify_dirent_safe(rdev->sysfs_state);
wait_event_timeout(rdev->blocked_wait,
!test_bit(Blocked, &rdev->flags),
msecs_to_jiffies(5000));
rdev_dec_pending(rdev, mddev);
}
EXPORT_SYMBOL(md_wait_for_blocked_rdev);
static int md_notify_reboot(struct notifier_block *this,
unsigned long code, void *x)
{
struct list_head *tmp;
mddev_t *mddev;
if ((code == SYS_DOWN) || (code == SYS_HALT) || (code == SYS_POWER_OFF)) {
printk(KERN_INFO "md: stopping all md devices.\n");
for_each_mddev(mddev, tmp)
if (mddev_trylock(mddev)) {
/* Force a switch to readonly even array
* appears to still be in use. Hence
* the '100'.
*/
md_set_readonly(mddev, 100);
mddev_unlock(mddev);
}
/*
* certain more exotic SCSI devices are known to be
* volatile wrt too early system reboots. While the
* right place to handle this issue is the given
* driver, we do want to have a safe RAID driver ...
*/
mdelay(1000*1);
}
return NOTIFY_DONE;
}
static struct notifier_block md_notifier = {
.notifier_call = md_notify_reboot,
.next = NULL,
.priority = INT_MAX, /* before any real devices */
};
static void md_geninit(void)
{
dprintk("md: sizeof(mdp_super_t) = %d\n", (int)sizeof(mdp_super_t));
proc_create("mdstat", S_IRUGO, NULL, &md_seq_fops);
}
static int __init md_init(void)
{
int ret = -ENOMEM;
md_wq = alloc_workqueue("md", WQ_MEM_RECLAIM, 0);
if (!md_wq)
goto err_wq;
md_misc_wq = alloc_workqueue("md_misc", 0, 0);
if (!md_misc_wq)
goto err_misc_wq;
if ((ret = register_blkdev(MD_MAJOR, "md")) < 0)
goto err_md;
if ((ret = register_blkdev(0, "mdp")) < 0)
goto err_mdp;
mdp_major = ret;
blk_register_region(MKDEV(MD_MAJOR, 0), 1UL<<MINORBITS, THIS_MODULE,
md_probe, NULL, NULL);
blk_register_region(MKDEV(mdp_major, 0), 1UL<<MINORBITS, THIS_MODULE,
md_probe, NULL, NULL);
register_reboot_notifier(&md_notifier);
raid_table_header = register_sysctl_table(raid_root_table);
md_geninit();
return 0;
err_mdp:
unregister_blkdev(MD_MAJOR, "md");
err_md:
destroy_workqueue(md_misc_wq);
err_misc_wq:
destroy_workqueue(md_wq);
err_wq:
return ret;
}
#ifndef MODULE
/*
* Searches all registered partitions for autorun RAID arrays
* at boot time.
*/
static LIST_HEAD(all_detected_devices);
struct detected_devices_node {
struct list_head list;
dev_t dev;
};
void md_autodetect_dev(dev_t dev)
{
struct detected_devices_node *node_detected_dev;
node_detected_dev = kzalloc(sizeof(*node_detected_dev), GFP_KERNEL);
if (node_detected_dev) {
node_detected_dev->dev = dev;
list_add_tail(&node_detected_dev->list, &all_detected_devices);
} else {
printk(KERN_CRIT "md: md_autodetect_dev: kzalloc failed"
", skipping dev(%d,%d)\n", MAJOR(dev), MINOR(dev));
}
}
static void autostart_arrays(int part)
{
mdk_rdev_t *rdev;
struct detected_devices_node *node_detected_dev;
dev_t dev;
int i_scanned, i_passed;
i_scanned = 0;
i_passed = 0;
printk(KERN_INFO "md: Autodetecting RAID arrays.\n");
while (!list_empty(&all_detected_devices) && i_scanned < INT_MAX) {
i_scanned++;
node_detected_dev = list_entry(all_detected_devices.next,
struct detected_devices_node, list);
list_del(&node_detected_dev->list);
dev = node_detected_dev->dev;
kfree(node_detected_dev);
rdev = md_import_device(dev,0, 90);
if (IS_ERR(rdev))
continue;
if (test_bit(Faulty, &rdev->flags)) {
MD_BUG();
continue;
}
set_bit(AutoDetected, &rdev->flags);
list_add(&rdev->same_set, &pending_raid_disks);
i_passed++;
}
printk(KERN_INFO "md: Scanned %d and added %d devices.\n",
i_scanned, i_passed);
autorun_devices(part);
}
#endif /* !MODULE */
static __exit void md_exit(void)
{
mddev_t *mddev;
struct list_head *tmp;
blk_unregister_region(MKDEV(MD_MAJOR,0), 1U << MINORBITS);
blk_unregister_region(MKDEV(mdp_major,0), 1U << MINORBITS);
unregister_blkdev(MD_MAJOR,"md");
unregister_blkdev(mdp_major, "mdp");
unregister_reboot_notifier(&md_notifier);
unregister_sysctl_table(raid_table_header);
remove_proc_entry("mdstat", NULL);
for_each_mddev(mddev, tmp) {
export_array(mddev);
mddev->hold_active = 0;
}
destroy_workqueue(md_misc_wq);
destroy_workqueue(md_wq);
}
subsys_initcall(md_init);
module_exit(md_exit)
static int get_ro(char *buffer, struct kernel_param *kp)
{
return sprintf(buffer, "%d", start_readonly);
}
static int set_ro(const char *val, struct kernel_param *kp)
{
char *e;
int num = simple_strtoul(val, &e, 10);
if (*val && (*e == '\0' || *e == '\n')) {
start_readonly = num;
return 0;
}
return -EINVAL;
}
module_param_call(start_ro, set_ro, get_ro, NULL, S_IRUSR|S_IWUSR);
module_param(start_dirty_degraded, int, S_IRUGO|S_IWUSR);
module_param_call(new_array, add_named_array, NULL, NULL, S_IWUSR);
EXPORT_SYMBOL(register_md_personality);
EXPORT_SYMBOL(unregister_md_personality);
EXPORT_SYMBOL(md_error);
EXPORT_SYMBOL(md_done_sync);
EXPORT_SYMBOL(md_write_start);
EXPORT_SYMBOL(md_write_end);
EXPORT_SYMBOL(md_register_thread);
EXPORT_SYMBOL(md_unregister_thread);
EXPORT_SYMBOL(md_wakeup_thread);
EXPORT_SYMBOL(md_check_recovery);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("MD RAID framework");
MODULE_ALIAS("md");
MODULE_ALIAS_BLOCKDEV_MAJOR(MD_MAJOR);
| gpl-2.0 |
zarboz/aozp-ville | arch/arm/mach-lpc32xx/irq.c | 1431 | 11515 | /*
* arch/arm/mach-lpc32xx/irq.c
*
* Author: Kevin Wells <kevin.wells@nxp.com>
*
* Copyright (C) 2010 NXP Semiconductors
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/err.h>
#include <linux/io.h>
#include <mach/irqs.h>
#include <mach/hardware.h>
#include <mach/platform.h>
#include "common.h"
/*
* Default value representing the Activation polarity of all internal
* interrupt sources
*/
#define MIC_APR_DEFAULT 0x3FF0EFE0
#define SIC1_APR_DEFAULT 0xFBD27186
#define SIC2_APR_DEFAULT 0x801810C0
/*
* Default value representing the Activation Type of all internal
* interrupt sources. All are level sensitive.
*/
#define MIC_ATR_DEFAULT 0x00000000
#define SIC1_ATR_DEFAULT 0x00026000
#define SIC2_ATR_DEFAULT 0x00000000
struct lpc32xx_event_group_regs {
void __iomem *enab_reg;
void __iomem *edge_reg;
void __iomem *maskstat_reg;
void __iomem *rawstat_reg;
};
static const struct lpc32xx_event_group_regs lpc32xx_event_int_regs = {
.enab_reg = LPC32XX_CLKPWR_INT_ER,
.edge_reg = LPC32XX_CLKPWR_INT_AP,
.maskstat_reg = LPC32XX_CLKPWR_INT_SR,
.rawstat_reg = LPC32XX_CLKPWR_INT_RS,
};
static const struct lpc32xx_event_group_regs lpc32xx_event_pin_regs = {
.enab_reg = LPC32XX_CLKPWR_PIN_ER,
.edge_reg = LPC32XX_CLKPWR_PIN_AP,
.maskstat_reg = LPC32XX_CLKPWR_PIN_SR,
.rawstat_reg = LPC32XX_CLKPWR_PIN_RS,
};
struct lpc32xx_event_info {
const struct lpc32xx_event_group_regs *event_group;
u32 mask;
};
/*
* Maps an IRQ number to and event mask and register
*/
static const struct lpc32xx_event_info lpc32xx_events[NR_IRQS] = {
[IRQ_LPC32XX_GPI_08] = {
.event_group = &lpc32xx_event_pin_regs,
.mask = LPC32XX_CLKPWR_EXTSRC_GPI_08_BIT,
},
[IRQ_LPC32XX_GPI_09] = {
.event_group = &lpc32xx_event_pin_regs,
.mask = LPC32XX_CLKPWR_EXTSRC_GPI_09_BIT,
},
[IRQ_LPC32XX_GPI_19] = {
.event_group = &lpc32xx_event_pin_regs,
.mask = LPC32XX_CLKPWR_EXTSRC_GPI_19_BIT,
},
[IRQ_LPC32XX_GPI_07] = {
.event_group = &lpc32xx_event_pin_regs,
.mask = LPC32XX_CLKPWR_EXTSRC_GPI_07_BIT,
},
[IRQ_LPC32XX_GPI_00] = {
.event_group = &lpc32xx_event_pin_regs,
.mask = LPC32XX_CLKPWR_EXTSRC_GPI_00_BIT,
},
[IRQ_LPC32XX_GPI_01] = {
.event_group = &lpc32xx_event_pin_regs,
.mask = LPC32XX_CLKPWR_EXTSRC_GPI_01_BIT,
},
[IRQ_LPC32XX_GPI_02] = {
.event_group = &lpc32xx_event_pin_regs,
.mask = LPC32XX_CLKPWR_EXTSRC_GPI_02_BIT,
},
[IRQ_LPC32XX_GPI_03] = {
.event_group = &lpc32xx_event_pin_regs,
.mask = LPC32XX_CLKPWR_EXTSRC_GPI_03_BIT,
},
[IRQ_LPC32XX_GPI_04] = {
.event_group = &lpc32xx_event_pin_regs,
.mask = LPC32XX_CLKPWR_EXTSRC_GPI_04_BIT,
},
[IRQ_LPC32XX_GPI_05] = {
.event_group = &lpc32xx_event_pin_regs,
.mask = LPC32XX_CLKPWR_EXTSRC_GPI_05_BIT,
},
[IRQ_LPC32XX_GPI_06] = {
.event_group = &lpc32xx_event_pin_regs,
.mask = LPC32XX_CLKPWR_EXTSRC_GPI_06_BIT,
},
[IRQ_LPC32XX_GPIO_00] = {
.event_group = &lpc32xx_event_int_regs,
.mask = LPC32XX_CLKPWR_INTSRC_GPIO_00_BIT,
},
[IRQ_LPC32XX_GPIO_01] = {
.event_group = &lpc32xx_event_int_regs,
.mask = LPC32XX_CLKPWR_INTSRC_GPIO_01_BIT,
},
[IRQ_LPC32XX_GPIO_02] = {
.event_group = &lpc32xx_event_int_regs,
.mask = LPC32XX_CLKPWR_INTSRC_GPIO_02_BIT,
},
[IRQ_LPC32XX_GPIO_03] = {
.event_group = &lpc32xx_event_int_regs,
.mask = LPC32XX_CLKPWR_INTSRC_GPIO_03_BIT,
},
[IRQ_LPC32XX_GPIO_04] = {
.event_group = &lpc32xx_event_int_regs,
.mask = LPC32XX_CLKPWR_INTSRC_GPIO_04_BIT,
},
[IRQ_LPC32XX_GPIO_05] = {
.event_group = &lpc32xx_event_int_regs,
.mask = LPC32XX_CLKPWR_INTSRC_GPIO_05_BIT,
},
[IRQ_LPC32XX_KEY] = {
.event_group = &lpc32xx_event_int_regs,
.mask = LPC32XX_CLKPWR_INTSRC_KEY_BIT,
},
[IRQ_LPC32XX_USB_OTG_ATX] = {
.event_group = &lpc32xx_event_int_regs,
.mask = LPC32XX_CLKPWR_INTSRC_USBATXINT_BIT,
},
[IRQ_LPC32XX_USB_HOST] = {
.event_group = &lpc32xx_event_int_regs,
.mask = LPC32XX_CLKPWR_INTSRC_USB_BIT,
},
[IRQ_LPC32XX_RTC] = {
.event_group = &lpc32xx_event_int_regs,
.mask = LPC32XX_CLKPWR_INTSRC_RTC_BIT,
},
[IRQ_LPC32XX_MSTIMER] = {
.event_group = &lpc32xx_event_int_regs,
.mask = LPC32XX_CLKPWR_INTSRC_MSTIMER_BIT,
},
[IRQ_LPC32XX_TS_AUX] = {
.event_group = &lpc32xx_event_int_regs,
.mask = LPC32XX_CLKPWR_INTSRC_TS_AUX_BIT,
},
[IRQ_LPC32XX_TS_P] = {
.event_group = &lpc32xx_event_int_regs,
.mask = LPC32XX_CLKPWR_INTSRC_TS_P_BIT,
},
[IRQ_LPC32XX_TS_IRQ] = {
.event_group = &lpc32xx_event_int_regs,
.mask = LPC32XX_CLKPWR_INTSRC_ADC_BIT,
},
};
static void get_controller(unsigned int irq, unsigned int *base,
unsigned int *irqbit)
{
if (irq < 32) {
*base = LPC32XX_MIC_BASE;
*irqbit = 1 << irq;
} else if (irq < 64) {
*base = LPC32XX_SIC1_BASE;
*irqbit = 1 << (irq - 32);
} else {
*base = LPC32XX_SIC2_BASE;
*irqbit = 1 << (irq - 64);
}
}
static void lpc32xx_mask_irq(struct irq_data *d)
{
unsigned int reg, ctrl, mask;
get_controller(d->irq, &ctrl, &mask);
reg = __raw_readl(LPC32XX_INTC_MASK(ctrl)) & ~mask;
__raw_writel(reg, LPC32XX_INTC_MASK(ctrl));
}
static void lpc32xx_unmask_irq(struct irq_data *d)
{
unsigned int reg, ctrl, mask;
get_controller(d->irq, &ctrl, &mask);
reg = __raw_readl(LPC32XX_INTC_MASK(ctrl)) | mask;
__raw_writel(reg, LPC32XX_INTC_MASK(ctrl));
}
static void lpc32xx_ack_irq(struct irq_data *d)
{
unsigned int ctrl, mask;
get_controller(d->irq, &ctrl, &mask);
__raw_writel(mask, LPC32XX_INTC_RAW_STAT(ctrl));
/* Also need to clear pending wake event */
if (lpc32xx_events[d->irq].mask != 0)
__raw_writel(lpc32xx_events[d->irq].mask,
lpc32xx_events[d->irq].event_group->rawstat_reg);
}
static void __lpc32xx_set_irq_type(unsigned int irq, int use_high_level,
int use_edge)
{
unsigned int reg, ctrl, mask;
get_controller(irq, &ctrl, &mask);
/* Activation level, high or low */
reg = __raw_readl(LPC32XX_INTC_POLAR(ctrl));
if (use_high_level)
reg |= mask;
else
reg &= ~mask;
__raw_writel(reg, LPC32XX_INTC_POLAR(ctrl));
/* Activation type, edge or level */
reg = __raw_readl(LPC32XX_INTC_ACT_TYPE(ctrl));
if (use_edge)
reg |= mask;
else
reg &= ~mask;
__raw_writel(reg, LPC32XX_INTC_ACT_TYPE(ctrl));
/* Use same polarity for the wake events */
if (lpc32xx_events[irq].mask != 0) {
reg = __raw_readl(lpc32xx_events[irq].event_group->edge_reg);
if (use_high_level)
reg |= lpc32xx_events[irq].mask;
else
reg &= ~lpc32xx_events[irq].mask;
__raw_writel(reg, lpc32xx_events[irq].event_group->edge_reg);
}
}
static int lpc32xx_set_irq_type(struct irq_data *d, unsigned int type)
{
switch (type) {
case IRQ_TYPE_EDGE_RISING:
/* Rising edge sensitive */
__lpc32xx_set_irq_type(d->irq, 1, 1);
break;
case IRQ_TYPE_EDGE_FALLING:
/* Falling edge sensitive */
__lpc32xx_set_irq_type(d->irq, 0, 1);
break;
case IRQ_TYPE_LEVEL_LOW:
/* Low level sensitive */
__lpc32xx_set_irq_type(d->irq, 0, 0);
break;
case IRQ_TYPE_LEVEL_HIGH:
/* High level sensitive */
__lpc32xx_set_irq_type(d->irq, 1, 0);
break;
/* Other modes are not supported */
default:
return -EINVAL;
}
/* Ok to use the level handler for all types */
irq_set_handler(d->irq, handle_level_irq);
return 0;
}
static int lpc32xx_irq_wake(struct irq_data *d, unsigned int state)
{
unsigned long eventreg;
if (lpc32xx_events[d->irq].mask != 0) {
eventreg = __raw_readl(lpc32xx_events[d->irq].
event_group->enab_reg);
if (state)
eventreg |= lpc32xx_events[d->irq].mask;
else
eventreg &= ~lpc32xx_events[d->irq].mask;
__raw_writel(eventreg,
lpc32xx_events[d->irq].event_group->enab_reg);
return 0;
}
/* Clear event */
__raw_writel(lpc32xx_events[d->irq].mask,
lpc32xx_events[d->irq].event_group->rawstat_reg);
return -ENODEV;
}
static void __init lpc32xx_set_default_mappings(unsigned int apr,
unsigned int atr, unsigned int offset)
{
unsigned int i;
/* Set activation levels for each interrupt */
i = 0;
while (i < 32) {
__lpc32xx_set_irq_type(offset + i, ((apr >> i) & 0x1),
((atr >> i) & 0x1));
i++;
}
}
static struct irq_chip lpc32xx_irq_chip = {
.irq_ack = lpc32xx_ack_irq,
.irq_mask = lpc32xx_mask_irq,
.irq_unmask = lpc32xx_unmask_irq,
.irq_set_type = lpc32xx_set_irq_type,
.irq_set_wake = lpc32xx_irq_wake
};
static void lpc32xx_sic1_handler(unsigned int irq, struct irq_desc *desc)
{
unsigned long ints = __raw_readl(LPC32XX_INTC_STAT(LPC32XX_SIC1_BASE));
while (ints != 0) {
int irqno = fls(ints) - 1;
ints &= ~(1 << irqno);
generic_handle_irq(LPC32XX_SIC1_IRQ(irqno));
}
}
static void lpc32xx_sic2_handler(unsigned int irq, struct irq_desc *desc)
{
unsigned long ints = __raw_readl(LPC32XX_INTC_STAT(LPC32XX_SIC2_BASE));
while (ints != 0) {
int irqno = fls(ints) - 1;
ints &= ~(1 << irqno);
generic_handle_irq(LPC32XX_SIC2_IRQ(irqno));
}
}
void __init lpc32xx_init_irq(void)
{
unsigned int i;
/* Setup MIC */
__raw_writel(0, LPC32XX_INTC_MASK(LPC32XX_MIC_BASE));
__raw_writel(MIC_APR_DEFAULT, LPC32XX_INTC_POLAR(LPC32XX_MIC_BASE));
__raw_writel(MIC_ATR_DEFAULT, LPC32XX_INTC_ACT_TYPE(LPC32XX_MIC_BASE));
/* Setup SIC1 */
__raw_writel(0, LPC32XX_INTC_MASK(LPC32XX_SIC1_BASE));
__raw_writel(MIC_APR_DEFAULT, LPC32XX_INTC_POLAR(LPC32XX_SIC1_BASE));
__raw_writel(MIC_ATR_DEFAULT, LPC32XX_INTC_ACT_TYPE(LPC32XX_SIC1_BASE));
/* Setup SIC2 */
__raw_writel(0, LPC32XX_INTC_MASK(LPC32XX_SIC2_BASE));
__raw_writel(MIC_APR_DEFAULT, LPC32XX_INTC_POLAR(LPC32XX_SIC2_BASE));
__raw_writel(MIC_ATR_DEFAULT, LPC32XX_INTC_ACT_TYPE(LPC32XX_SIC2_BASE));
/* Configure supported IRQ's */
for (i = 0; i < NR_IRQS; i++) {
irq_set_chip_and_handler(i, &lpc32xx_irq_chip,
handle_level_irq);
set_irq_flags(i, IRQF_VALID);
}
/* Set default mappings */
lpc32xx_set_default_mappings(MIC_APR_DEFAULT, MIC_ATR_DEFAULT, 0);
lpc32xx_set_default_mappings(SIC1_APR_DEFAULT, SIC1_ATR_DEFAULT, 32);
lpc32xx_set_default_mappings(SIC2_APR_DEFAULT, SIC2_ATR_DEFAULT, 64);
/* mask all interrupts except SUBIRQ */
__raw_writel(0, LPC32XX_INTC_MASK(LPC32XX_MIC_BASE));
__raw_writel(0, LPC32XX_INTC_MASK(LPC32XX_SIC1_BASE));
__raw_writel(0, LPC32XX_INTC_MASK(LPC32XX_SIC2_BASE));
/* MIC SUBIRQx interrupts will route handling to the chain handlers */
irq_set_chained_handler(IRQ_LPC32XX_SUB1IRQ, lpc32xx_sic1_handler);
irq_set_chained_handler(IRQ_LPC32XX_SUB2IRQ, lpc32xx_sic2_handler);
/* Initially disable all wake events */
__raw_writel(0, LPC32XX_CLKPWR_P01_ER);
__raw_writel(0, LPC32XX_CLKPWR_INT_ER);
__raw_writel(0, LPC32XX_CLKPWR_PIN_ER);
/*
* Default wake activation polarities, all pin sources are low edge
* triggered
*/
__raw_writel(LPC32XX_CLKPWR_INTSRC_TS_P_BIT |
LPC32XX_CLKPWR_INTSRC_MSTIMER_BIT |
LPC32XX_CLKPWR_INTSRC_RTC_BIT,
LPC32XX_CLKPWR_INT_AP);
__raw_writel(0, LPC32XX_CLKPWR_PIN_AP);
/* Clear latched wake event states */
__raw_writel(__raw_readl(LPC32XX_CLKPWR_PIN_RS),
LPC32XX_CLKPWR_PIN_RS);
__raw_writel(__raw_readl(LPC32XX_CLKPWR_INT_RS),
LPC32XX_CLKPWR_INT_RS);
}
| gpl-2.0 |
krinkin/linux | drivers/hwmon/g760a.c | 1687 | 5719 | /*
* g760a - Driver for the Global Mixed-mode Technology Inc. G760A
* fan speed PWM controller chip
*
* Copyright (C) 2007 Herbert Valerio Riedel <hvr@gnu.org>
*
* Complete datasheet is available at GMT's website:
* http://www.gmt.com.tw/product/datasheet/EDS-760A.pdf
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/jiffies.h>
#include <linux/i2c.h>
#include <linux/hwmon.h>
#include <linux/hwmon-sysfs.h>
#include <linux/err.h>
#include <linux/mutex.h>
#include <linux/sysfs.h>
enum g760a_regs {
G760A_REG_SET_CNT = 0x00,
G760A_REG_ACT_CNT = 0x01,
G760A_REG_FAN_STA = 0x02
};
#define G760A_REG_FAN_STA_RPM_OFF 0x1 /* +/-20% off */
#define G760A_REG_FAN_STA_RPM_LOW 0x2 /* below 1920rpm */
/* register data is read (and cached) at most once per second */
#define G760A_UPDATE_INTERVAL (HZ)
struct g760a_data {
struct i2c_client *client;
struct mutex update_lock;
/* board specific parameters */
u32 clk; /* default 32kHz */
u16 fan_div; /* default P=2 */
/* g760a register cache */
unsigned int valid:1;
unsigned long last_updated; /* In jiffies */
u8 set_cnt; /* PWM (period) count number; 0xff stops fan */
u8 act_cnt; /* formula: cnt = (CLK * 30)/(rpm * P) */
u8 fan_sta; /* bit 0: set when actual fan speed more than 20%
* outside requested fan speed
* bit 1: set when fan speed below 1920 rpm
*/
};
#define G760A_DEFAULT_CLK 32768
#define G760A_DEFAULT_FAN_DIV 2
#define PWM_FROM_CNT(cnt) (0xff-(cnt))
#define PWM_TO_CNT(pwm) (0xff-(pwm))
static inline unsigned int rpm_from_cnt(u8 val, u32 clk, u16 div)
{
return ((val == 0x00) ? 0 : ((clk*30)/(val*div)));
}
/* read/write wrappers */
static int g760a_read_value(struct i2c_client *client, enum g760a_regs reg)
{
return i2c_smbus_read_byte_data(client, reg);
}
static int g760a_write_value(struct i2c_client *client, enum g760a_regs reg,
u16 value)
{
return i2c_smbus_write_byte_data(client, reg, value);
}
/*
* sysfs attributes
*/
static struct g760a_data *g760a_update_client(struct device *dev)
{
struct g760a_data *data = dev_get_drvdata(dev);
struct i2c_client *client = data->client;
mutex_lock(&data->update_lock);
if (time_after(jiffies, data->last_updated + G760A_UPDATE_INTERVAL)
|| !data->valid) {
dev_dbg(&client->dev, "Starting g760a update\n");
data->set_cnt = g760a_read_value(client, G760A_REG_SET_CNT);
data->act_cnt = g760a_read_value(client, G760A_REG_ACT_CNT);
data->fan_sta = g760a_read_value(client, G760A_REG_FAN_STA);
data->last_updated = jiffies;
data->valid = 1;
}
mutex_unlock(&data->update_lock);
return data;
}
static ssize_t show_fan(struct device *dev, struct device_attribute *da,
char *buf)
{
struct g760a_data *data = g760a_update_client(dev);
unsigned int rpm = 0;
mutex_lock(&data->update_lock);
if (!(data->fan_sta & G760A_REG_FAN_STA_RPM_LOW))
rpm = rpm_from_cnt(data->act_cnt, data->clk, data->fan_div);
mutex_unlock(&data->update_lock);
return sprintf(buf, "%d\n", rpm);
}
static ssize_t show_fan_alarm(struct device *dev, struct device_attribute *da,
char *buf)
{
struct g760a_data *data = g760a_update_client(dev);
int fan_alarm = (data->fan_sta & G760A_REG_FAN_STA_RPM_OFF) ? 1 : 0;
return sprintf(buf, "%d\n", fan_alarm);
}
static ssize_t get_pwm(struct device *dev, struct device_attribute *da,
char *buf)
{
struct g760a_data *data = g760a_update_client(dev);
return sprintf(buf, "%d\n", PWM_FROM_CNT(data->set_cnt));
}
static ssize_t set_pwm(struct device *dev, struct device_attribute *da,
const char *buf, size_t count)
{
struct g760a_data *data = g760a_update_client(dev);
struct i2c_client *client = data->client;
unsigned long val;
if (kstrtoul(buf, 10, &val))
return -EINVAL;
mutex_lock(&data->update_lock);
data->set_cnt = PWM_TO_CNT(clamp_val(val, 0, 255));
g760a_write_value(client, G760A_REG_SET_CNT, data->set_cnt);
mutex_unlock(&data->update_lock);
return count;
}
static DEVICE_ATTR(pwm1, S_IWUSR | S_IRUGO, get_pwm, set_pwm);
static DEVICE_ATTR(fan1_input, S_IRUGO, show_fan, NULL);
static DEVICE_ATTR(fan1_alarm, S_IRUGO, show_fan_alarm, NULL);
static struct attribute *g760a_attrs[] = {
&dev_attr_pwm1.attr,
&dev_attr_fan1_input.attr,
&dev_attr_fan1_alarm.attr,
NULL
};
ATTRIBUTE_GROUPS(g760a);
/*
* new-style driver model code
*/
static int g760a_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct device *dev = &client->dev;
struct g760a_data *data;
struct device *hwmon_dev;
if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA))
return -EIO;
data = devm_kzalloc(dev, sizeof(struct g760a_data), GFP_KERNEL);
if (!data)
return -ENOMEM;
data->client = client;
mutex_init(&data->update_lock);
/* setup default configuration for now */
data->fan_div = G760A_DEFAULT_FAN_DIV;
data->clk = G760A_DEFAULT_CLK;
hwmon_dev = devm_hwmon_device_register_with_groups(dev, client->name,
data,
g760a_groups);
return PTR_ERR_OR_ZERO(hwmon_dev);
}
static const struct i2c_device_id g760a_id[] = {
{ "g760a", 0 },
{ }
};
MODULE_DEVICE_TABLE(i2c, g760a_id);
static struct i2c_driver g760a_driver = {
.driver = {
.name = "g760a",
},
.probe = g760a_probe,
.id_table = g760a_id,
};
module_i2c_driver(g760a_driver);
MODULE_AUTHOR("Herbert Valerio Riedel <hvr@gnu.org>");
MODULE_DESCRIPTION("GMT G760A driver");
MODULE_LICENSE("GPL");
| gpl-2.0 |
Bogdacutu/STLinux-Kernel | net/ipv6/ipv6_sockglue.c | 1943 | 28608 | /*
* IPv6 BSD socket options interface
* Linux INET6 implementation
*
* Authors:
* Pedro Roque <roque@di.fc.ul.pt>
*
* Based on linux/net/ipv4/ip_sockglue.c
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
* FIXME: Make the setsockopt code POSIX compliant: That is
*
* o Truncate getsockopt returns
* o Return an optlen of the truncated length if need be
*
* Changes:
* David L Stevens <dlstevens@us.ibm.com>:
* - added multicast source filtering API for MLDv2
*/
#include <linux/module.h>
#include <linux/capability.h>
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/socket.h>
#include <linux/sockios.h>
#include <linux/net.h>
#include <linux/in6.h>
#include <linux/mroute6.h>
#include <linux/netdevice.h>
#include <linux/if_arp.h>
#include <linux/init.h>
#include <linux/sysctl.h>
#include <linux/netfilter.h>
#include <linux/slab.h>
#include <net/sock.h>
#include <net/snmp.h>
#include <net/ipv6.h>
#include <net/ndisc.h>
#include <net/protocol.h>
#include <net/transp_v6.h>
#include <net/ip6_route.h>
#include <net/addrconf.h>
#include <net/inet_common.h>
#include <net/tcp.h>
#include <net/udp.h>
#include <net/udplite.h>
#include <net/xfrm.h>
#include <net/compat.h>
#include <asm/uaccess.h>
struct ip6_ra_chain *ip6_ra_chain;
DEFINE_RWLOCK(ip6_ra_lock);
int ip6_ra_control(struct sock *sk, int sel)
{
struct ip6_ra_chain *ra, *new_ra, **rap;
/* RA packet may be delivered ONLY to IPPROTO_RAW socket */
if (sk->sk_type != SOCK_RAW || inet_sk(sk)->inet_num != IPPROTO_RAW)
return -ENOPROTOOPT;
new_ra = (sel>=0) ? kmalloc(sizeof(*new_ra), GFP_KERNEL) : NULL;
write_lock_bh(&ip6_ra_lock);
for (rap = &ip6_ra_chain; (ra=*rap) != NULL; rap = &ra->next) {
if (ra->sk == sk) {
if (sel>=0) {
write_unlock_bh(&ip6_ra_lock);
kfree(new_ra);
return -EADDRINUSE;
}
*rap = ra->next;
write_unlock_bh(&ip6_ra_lock);
sock_put(sk);
kfree(ra);
return 0;
}
}
if (new_ra == NULL) {
write_unlock_bh(&ip6_ra_lock);
return -ENOBUFS;
}
new_ra->sk = sk;
new_ra->sel = sel;
new_ra->next = ra;
*rap = new_ra;
sock_hold(sk);
write_unlock_bh(&ip6_ra_lock);
return 0;
}
static
struct ipv6_txoptions *ipv6_update_options(struct sock *sk,
struct ipv6_txoptions *opt)
{
if (inet_sk(sk)->is_icsk) {
if (opt &&
!((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE)) &&
inet_sk(sk)->inet_daddr != LOOPBACK4_IPV6) {
struct inet_connection_sock *icsk = inet_csk(sk);
icsk->icsk_ext_hdr_len = opt->opt_flen + opt->opt_nflen;
icsk->icsk_sync_mss(sk, icsk->icsk_pmtu_cookie);
}
opt = xchg(&inet6_sk(sk)->opt, opt);
} else {
spin_lock(&sk->sk_dst_lock);
opt = xchg(&inet6_sk(sk)->opt, opt);
spin_unlock(&sk->sk_dst_lock);
}
sk_dst_reset(sk);
return opt;
}
static int do_ipv6_setsockopt(struct sock *sk, int level, int optname,
char __user *optval, unsigned int optlen)
{
struct ipv6_pinfo *np = inet6_sk(sk);
struct net *net = sock_net(sk);
int val, valbool;
int retv = -ENOPROTOOPT;
if (optval == NULL)
val=0;
else {
if (optlen >= sizeof(int)) {
if (get_user(val, (int __user *) optval))
return -EFAULT;
} else
val = 0;
}
valbool = (val!=0);
if (ip6_mroute_opt(optname))
return ip6_mroute_setsockopt(sk, optname, optval, optlen);
lock_sock(sk);
switch (optname) {
case IPV6_ADDRFORM:
if (optlen < sizeof(int))
goto e_inval;
if (val == PF_INET) {
struct ipv6_txoptions *opt;
struct sk_buff *pktopt;
if (sk->sk_type == SOCK_RAW)
break;
if (sk->sk_protocol == IPPROTO_UDP ||
sk->sk_protocol == IPPROTO_UDPLITE) {
struct udp_sock *up = udp_sk(sk);
if (up->pending == AF_INET6) {
retv = -EBUSY;
break;
}
} else if (sk->sk_protocol != IPPROTO_TCP)
break;
if (sk->sk_state != TCP_ESTABLISHED) {
retv = -ENOTCONN;
break;
}
if (ipv6_only_sock(sk) ||
!ipv6_addr_v4mapped(&np->daddr)) {
retv = -EADDRNOTAVAIL;
break;
}
fl6_free_socklist(sk);
ipv6_sock_mc_close(sk);
/*
* Sock is moving from IPv6 to IPv4 (sk_prot), so
* remove it from the refcnt debug socks count in the
* original family...
*/
sk_refcnt_debug_dec(sk);
if (sk->sk_protocol == IPPROTO_TCP) {
struct inet_connection_sock *icsk = inet_csk(sk);
local_bh_disable();
sock_prot_inuse_add(net, sk->sk_prot, -1);
sock_prot_inuse_add(net, &tcp_prot, 1);
local_bh_enable();
sk->sk_prot = &tcp_prot;
icsk->icsk_af_ops = &ipv4_specific;
sk->sk_socket->ops = &inet_stream_ops;
sk->sk_family = PF_INET;
tcp_sync_mss(sk, icsk->icsk_pmtu_cookie);
} else {
struct proto *prot = &udp_prot;
if (sk->sk_protocol == IPPROTO_UDPLITE)
prot = &udplite_prot;
local_bh_disable();
sock_prot_inuse_add(net, sk->sk_prot, -1);
sock_prot_inuse_add(net, prot, 1);
local_bh_enable();
sk->sk_prot = prot;
sk->sk_socket->ops = &inet_dgram_ops;
sk->sk_family = PF_INET;
}
opt = xchg(&np->opt, NULL);
if (opt)
sock_kfree_s(sk, opt, opt->tot_len);
pktopt = xchg(&np->pktoptions, NULL);
kfree_skb(pktopt);
sk->sk_destruct = inet_sock_destruct;
/*
* ... and add it to the refcnt debug socks count
* in the new family. -acme
*/
sk_refcnt_debug_inc(sk);
module_put(THIS_MODULE);
retv = 0;
break;
}
goto e_inval;
case IPV6_V6ONLY:
if (optlen < sizeof(int) ||
inet_sk(sk)->inet_num)
goto e_inval;
np->ipv6only = valbool;
retv = 0;
break;
case IPV6_RECVPKTINFO:
if (optlen < sizeof(int))
goto e_inval;
np->rxopt.bits.rxinfo = valbool;
retv = 0;
break;
case IPV6_2292PKTINFO:
if (optlen < sizeof(int))
goto e_inval;
np->rxopt.bits.rxoinfo = valbool;
retv = 0;
break;
case IPV6_RECVHOPLIMIT:
if (optlen < sizeof(int))
goto e_inval;
np->rxopt.bits.rxhlim = valbool;
retv = 0;
break;
case IPV6_2292HOPLIMIT:
if (optlen < sizeof(int))
goto e_inval;
np->rxopt.bits.rxohlim = valbool;
retv = 0;
break;
case IPV6_RECVRTHDR:
if (optlen < sizeof(int))
goto e_inval;
np->rxopt.bits.srcrt = valbool;
retv = 0;
break;
case IPV6_2292RTHDR:
if (optlen < sizeof(int))
goto e_inval;
np->rxopt.bits.osrcrt = valbool;
retv = 0;
break;
case IPV6_RECVHOPOPTS:
if (optlen < sizeof(int))
goto e_inval;
np->rxopt.bits.hopopts = valbool;
retv = 0;
break;
case IPV6_2292HOPOPTS:
if (optlen < sizeof(int))
goto e_inval;
np->rxopt.bits.ohopopts = valbool;
retv = 0;
break;
case IPV6_RECVDSTOPTS:
if (optlen < sizeof(int))
goto e_inval;
np->rxopt.bits.dstopts = valbool;
retv = 0;
break;
case IPV6_2292DSTOPTS:
if (optlen < sizeof(int))
goto e_inval;
np->rxopt.bits.odstopts = valbool;
retv = 0;
break;
case IPV6_TCLASS:
if (optlen < sizeof(int))
goto e_inval;
if (val < -1 || val > 0xff)
goto e_inval;
/* RFC 3542, 6.5: default traffic class of 0x0 */
if (val == -1)
val = 0;
np->tclass = val;
retv = 0;
break;
case IPV6_RECVTCLASS:
if (optlen < sizeof(int))
goto e_inval;
np->rxopt.bits.rxtclass = valbool;
retv = 0;
break;
case IPV6_FLOWINFO:
if (optlen < sizeof(int))
goto e_inval;
np->rxopt.bits.rxflow = valbool;
retv = 0;
break;
case IPV6_RECVPATHMTU:
if (optlen < sizeof(int))
goto e_inval;
np->rxopt.bits.rxpmtu = valbool;
retv = 0;
break;
case IPV6_TRANSPARENT:
if (valbool && !ns_capable(net->user_ns, CAP_NET_ADMIN) &&
!ns_capable(net->user_ns, CAP_NET_RAW)) {
retv = -EPERM;
break;
}
if (optlen < sizeof(int))
goto e_inval;
/* we don't have a separate transparent bit for IPV6 we use the one in the IPv4 socket */
inet_sk(sk)->transparent = valbool;
retv = 0;
break;
case IPV6_RECVORIGDSTADDR:
if (optlen < sizeof(int))
goto e_inval;
np->rxopt.bits.rxorigdstaddr = valbool;
retv = 0;
break;
case IPV6_HOPOPTS:
case IPV6_RTHDRDSTOPTS:
case IPV6_RTHDR:
case IPV6_DSTOPTS:
{
struct ipv6_txoptions *opt;
/* remove any sticky options header with a zero option
* length, per RFC3542.
*/
if (optlen == 0)
optval = NULL;
else if (optval == NULL)
goto e_inval;
else if (optlen < sizeof(struct ipv6_opt_hdr) ||
optlen & 0x7 || optlen > 8 * 255)
goto e_inval;
/* hop-by-hop / destination options are privileged option */
retv = -EPERM;
if (optname != IPV6_RTHDR && !ns_capable(net->user_ns, CAP_NET_RAW))
break;
opt = ipv6_renew_options(sk, np->opt, optname,
(struct ipv6_opt_hdr __user *)optval,
optlen);
if (IS_ERR(opt)) {
retv = PTR_ERR(opt);
break;
}
/* routing header option needs extra check */
retv = -EINVAL;
if (optname == IPV6_RTHDR && opt && opt->srcrt) {
struct ipv6_rt_hdr *rthdr = opt->srcrt;
switch (rthdr->type) {
#if IS_ENABLED(CONFIG_IPV6_MIP6)
case IPV6_SRCRT_TYPE_2:
if (rthdr->hdrlen != 2 ||
rthdr->segments_left != 1)
goto sticky_done;
break;
#endif
default:
goto sticky_done;
}
}
retv = 0;
opt = ipv6_update_options(sk, opt);
sticky_done:
if (opt)
sock_kfree_s(sk, opt, opt->tot_len);
break;
}
case IPV6_PKTINFO:
{
struct in6_pktinfo pkt;
if (optlen == 0)
goto e_inval;
else if (optlen < sizeof(struct in6_pktinfo) || optval == NULL)
goto e_inval;
if (copy_from_user(&pkt, optval, sizeof(struct in6_pktinfo))) {
retv = -EFAULT;
break;
}
if (sk->sk_bound_dev_if && pkt.ipi6_ifindex != sk->sk_bound_dev_if)
goto e_inval;
np->sticky_pktinfo.ipi6_ifindex = pkt.ipi6_ifindex;
np->sticky_pktinfo.ipi6_addr = pkt.ipi6_addr;
retv = 0;
break;
}
case IPV6_2292PKTOPTIONS:
{
struct ipv6_txoptions *opt = NULL;
struct msghdr msg;
struct flowi6 fl6;
int junk;
memset(&fl6, 0, sizeof(fl6));
fl6.flowi6_oif = sk->sk_bound_dev_if;
fl6.flowi6_mark = sk->sk_mark;
if (optlen == 0)
goto update;
/* 1K is probably excessive
* 1K is surely not enough, 2K per standard header is 16K.
*/
retv = -EINVAL;
if (optlen > 64*1024)
break;
opt = sock_kmalloc(sk, sizeof(*opt) + optlen, GFP_KERNEL);
retv = -ENOBUFS;
if (opt == NULL)
break;
memset(opt, 0, sizeof(*opt));
opt->tot_len = sizeof(*opt) + optlen;
retv = -EFAULT;
if (copy_from_user(opt+1, optval, optlen))
goto done;
msg.msg_controllen = optlen;
msg.msg_control = (void*)(opt+1);
retv = ip6_datagram_send_ctl(net, sk, &msg, &fl6, opt, &junk,
&junk, &junk);
if (retv)
goto done;
update:
retv = 0;
opt = ipv6_update_options(sk, opt);
done:
if (opt)
sock_kfree_s(sk, opt, opt->tot_len);
break;
}
case IPV6_UNICAST_HOPS:
if (optlen < sizeof(int))
goto e_inval;
if (val > 255 || val < -1)
goto e_inval;
np->hop_limit = val;
retv = 0;
break;
case IPV6_MULTICAST_HOPS:
if (sk->sk_type == SOCK_STREAM)
break;
if (optlen < sizeof(int))
goto e_inval;
if (val > 255 || val < -1)
goto e_inval;
np->mcast_hops = (val == -1 ? IPV6_DEFAULT_MCASTHOPS : val);
retv = 0;
break;
case IPV6_MULTICAST_LOOP:
if (optlen < sizeof(int))
goto e_inval;
if (val != valbool)
goto e_inval;
np->mc_loop = valbool;
retv = 0;
break;
case IPV6_UNICAST_IF:
{
struct net_device *dev = NULL;
int ifindex;
if (optlen != sizeof(int))
goto e_inval;
ifindex = (__force int)ntohl((__force __be32)val);
if (ifindex == 0) {
np->ucast_oif = 0;
retv = 0;
break;
}
dev = dev_get_by_index(net, ifindex);
retv = -EADDRNOTAVAIL;
if (!dev)
break;
dev_put(dev);
retv = -EINVAL;
if (sk->sk_bound_dev_if)
break;
np->ucast_oif = ifindex;
retv = 0;
break;
}
case IPV6_MULTICAST_IF:
if (sk->sk_type == SOCK_STREAM)
break;
if (optlen < sizeof(int))
goto e_inval;
if (val) {
struct net_device *dev;
if (sk->sk_bound_dev_if && sk->sk_bound_dev_if != val)
goto e_inval;
dev = dev_get_by_index(net, val);
if (!dev) {
retv = -ENODEV;
break;
}
dev_put(dev);
}
np->mcast_oif = val;
retv = 0;
break;
case IPV6_ADD_MEMBERSHIP:
case IPV6_DROP_MEMBERSHIP:
{
struct ipv6_mreq mreq;
if (optlen < sizeof(struct ipv6_mreq))
goto e_inval;
retv = -EPROTO;
if (inet_sk(sk)->is_icsk)
break;
retv = -EFAULT;
if (copy_from_user(&mreq, optval, sizeof(struct ipv6_mreq)))
break;
if (optname == IPV6_ADD_MEMBERSHIP)
retv = ipv6_sock_mc_join(sk, mreq.ipv6mr_ifindex, &mreq.ipv6mr_multiaddr);
else
retv = ipv6_sock_mc_drop(sk, mreq.ipv6mr_ifindex, &mreq.ipv6mr_multiaddr);
break;
}
case IPV6_JOIN_ANYCAST:
case IPV6_LEAVE_ANYCAST:
{
struct ipv6_mreq mreq;
if (optlen < sizeof(struct ipv6_mreq))
goto e_inval;
retv = -EFAULT;
if (copy_from_user(&mreq, optval, sizeof(struct ipv6_mreq)))
break;
if (optname == IPV6_JOIN_ANYCAST)
retv = ipv6_sock_ac_join(sk, mreq.ipv6mr_ifindex, &mreq.ipv6mr_acaddr);
else
retv = ipv6_sock_ac_drop(sk, mreq.ipv6mr_ifindex, &mreq.ipv6mr_acaddr);
break;
}
case MCAST_JOIN_GROUP:
case MCAST_LEAVE_GROUP:
{
struct group_req greq;
struct sockaddr_in6 *psin6;
if (optlen < sizeof(struct group_req))
goto e_inval;
retv = -EFAULT;
if (copy_from_user(&greq, optval, sizeof(struct group_req)))
break;
if (greq.gr_group.ss_family != AF_INET6) {
retv = -EADDRNOTAVAIL;
break;
}
psin6 = (struct sockaddr_in6 *)&greq.gr_group;
if (optname == MCAST_JOIN_GROUP)
retv = ipv6_sock_mc_join(sk, greq.gr_interface,
&psin6->sin6_addr);
else
retv = ipv6_sock_mc_drop(sk, greq.gr_interface,
&psin6->sin6_addr);
break;
}
case MCAST_JOIN_SOURCE_GROUP:
case MCAST_LEAVE_SOURCE_GROUP:
case MCAST_BLOCK_SOURCE:
case MCAST_UNBLOCK_SOURCE:
{
struct group_source_req greqs;
int omode, add;
if (optlen < sizeof(struct group_source_req))
goto e_inval;
if (copy_from_user(&greqs, optval, sizeof(greqs))) {
retv = -EFAULT;
break;
}
if (greqs.gsr_group.ss_family != AF_INET6 ||
greqs.gsr_source.ss_family != AF_INET6) {
retv = -EADDRNOTAVAIL;
break;
}
if (optname == MCAST_BLOCK_SOURCE) {
omode = MCAST_EXCLUDE;
add = 1;
} else if (optname == MCAST_UNBLOCK_SOURCE) {
omode = MCAST_EXCLUDE;
add = 0;
} else if (optname == MCAST_JOIN_SOURCE_GROUP) {
struct sockaddr_in6 *psin6;
psin6 = (struct sockaddr_in6 *)&greqs.gsr_group;
retv = ipv6_sock_mc_join(sk, greqs.gsr_interface,
&psin6->sin6_addr);
/* prior join w/ different source is ok */
if (retv && retv != -EADDRINUSE)
break;
omode = MCAST_INCLUDE;
add = 1;
} else /* MCAST_LEAVE_SOURCE_GROUP */ {
omode = MCAST_INCLUDE;
add = 0;
}
retv = ip6_mc_source(add, omode, sk, &greqs);
break;
}
case MCAST_MSFILTER:
{
struct group_filter *gsf;
if (optlen < GROUP_FILTER_SIZE(0))
goto e_inval;
if (optlen > sysctl_optmem_max) {
retv = -ENOBUFS;
break;
}
gsf = kmalloc(optlen,GFP_KERNEL);
if (!gsf) {
retv = -ENOBUFS;
break;
}
retv = -EFAULT;
if (copy_from_user(gsf, optval, optlen)) {
kfree(gsf);
break;
}
/* numsrc >= (4G-140)/128 overflow in 32 bits */
if (gsf->gf_numsrc >= 0x1ffffffU ||
gsf->gf_numsrc > sysctl_mld_max_msf) {
kfree(gsf);
retv = -ENOBUFS;
break;
}
if (GROUP_FILTER_SIZE(gsf->gf_numsrc) > optlen) {
kfree(gsf);
retv = -EINVAL;
break;
}
retv = ip6_mc_msfilter(sk, gsf);
kfree(gsf);
break;
}
case IPV6_ROUTER_ALERT:
if (optlen < sizeof(int))
goto e_inval;
retv = ip6_ra_control(sk, val);
break;
case IPV6_MTU_DISCOVER:
if (optlen < sizeof(int))
goto e_inval;
if (val < IP_PMTUDISC_DONT || val > IP_PMTUDISC_PROBE)
goto e_inval;
np->pmtudisc = val;
retv = 0;
break;
case IPV6_MTU:
if (optlen < sizeof(int))
goto e_inval;
if (val && val < IPV6_MIN_MTU)
goto e_inval;
np->frag_size = val;
retv = 0;
break;
case IPV6_RECVERR:
if (optlen < sizeof(int))
goto e_inval;
np->recverr = valbool;
if (!val)
skb_queue_purge(&sk->sk_error_queue);
retv = 0;
break;
case IPV6_FLOWINFO_SEND:
if (optlen < sizeof(int))
goto e_inval;
np->sndflow = valbool;
retv = 0;
break;
case IPV6_FLOWLABEL_MGR:
retv = ipv6_flowlabel_opt(sk, optval, optlen);
break;
case IPV6_IPSEC_POLICY:
case IPV6_XFRM_POLICY:
retv = -EPERM;
if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
break;
retv = xfrm_user_policy(sk, optname, optval, optlen);
break;
case IPV6_ADDR_PREFERENCES:
{
unsigned int pref = 0;
unsigned int prefmask = ~0;
if (optlen < sizeof(int))
goto e_inval;
retv = -EINVAL;
/* check PUBLIC/TMP/PUBTMP_DEFAULT conflicts */
switch (val & (IPV6_PREFER_SRC_PUBLIC|
IPV6_PREFER_SRC_TMP|
IPV6_PREFER_SRC_PUBTMP_DEFAULT)) {
case IPV6_PREFER_SRC_PUBLIC:
pref |= IPV6_PREFER_SRC_PUBLIC;
break;
case IPV6_PREFER_SRC_TMP:
pref |= IPV6_PREFER_SRC_TMP;
break;
case IPV6_PREFER_SRC_PUBTMP_DEFAULT:
break;
case 0:
goto pref_skip_pubtmp;
default:
goto e_inval;
}
prefmask &= ~(IPV6_PREFER_SRC_PUBLIC|
IPV6_PREFER_SRC_TMP);
pref_skip_pubtmp:
/* check HOME/COA conflicts */
switch (val & (IPV6_PREFER_SRC_HOME|IPV6_PREFER_SRC_COA)) {
case IPV6_PREFER_SRC_HOME:
break;
case IPV6_PREFER_SRC_COA:
pref |= IPV6_PREFER_SRC_COA;
case 0:
goto pref_skip_coa;
default:
goto e_inval;
}
prefmask &= ~IPV6_PREFER_SRC_COA;
pref_skip_coa:
/* check CGA/NONCGA conflicts */
switch (val & (IPV6_PREFER_SRC_CGA|IPV6_PREFER_SRC_NONCGA)) {
case IPV6_PREFER_SRC_CGA:
case IPV6_PREFER_SRC_NONCGA:
case 0:
break;
default:
goto e_inval;
}
np->srcprefs = (np->srcprefs & prefmask) | pref;
retv = 0;
break;
}
case IPV6_MINHOPCOUNT:
if (optlen < sizeof(int))
goto e_inval;
if (val < 0 || val > 255)
goto e_inval;
np->min_hopcount = val;
retv = 0;
break;
case IPV6_DONTFRAG:
np->dontfrag = valbool;
retv = 0;
break;
}
release_sock(sk);
return retv;
e_inval:
release_sock(sk);
return -EINVAL;
}
int ipv6_setsockopt(struct sock *sk, int level, int optname,
char __user *optval, unsigned int optlen)
{
int err;
if (level == SOL_IP && sk->sk_type != SOCK_RAW)
return udp_prot.setsockopt(sk, level, optname, optval, optlen);
if (level != SOL_IPV6)
return -ENOPROTOOPT;
err = do_ipv6_setsockopt(sk, level, optname, optval, optlen);
#ifdef CONFIG_NETFILTER
/* we need to exclude all possible ENOPROTOOPTs except default case */
if (err == -ENOPROTOOPT && optname != IPV6_IPSEC_POLICY &&
optname != IPV6_XFRM_POLICY) {
lock_sock(sk);
err = nf_setsockopt(sk, PF_INET6, optname, optval,
optlen);
release_sock(sk);
}
#endif
return err;
}
EXPORT_SYMBOL(ipv6_setsockopt);
#ifdef CONFIG_COMPAT
int compat_ipv6_setsockopt(struct sock *sk, int level, int optname,
char __user *optval, unsigned int optlen)
{
int err;
if (level == SOL_IP && sk->sk_type != SOCK_RAW) {
if (udp_prot.compat_setsockopt != NULL)
return udp_prot.compat_setsockopt(sk, level, optname,
optval, optlen);
return udp_prot.setsockopt(sk, level, optname, optval, optlen);
}
if (level != SOL_IPV6)
return -ENOPROTOOPT;
if (optname >= MCAST_JOIN_GROUP && optname <= MCAST_MSFILTER)
return compat_mc_setsockopt(sk, level, optname, optval, optlen,
ipv6_setsockopt);
err = do_ipv6_setsockopt(sk, level, optname, optval, optlen);
#ifdef CONFIG_NETFILTER
/* we need to exclude all possible ENOPROTOOPTs except default case */
if (err == -ENOPROTOOPT && optname != IPV6_IPSEC_POLICY &&
optname != IPV6_XFRM_POLICY) {
lock_sock(sk);
err = compat_nf_setsockopt(sk, PF_INET6, optname,
optval, optlen);
release_sock(sk);
}
#endif
return err;
}
EXPORT_SYMBOL(compat_ipv6_setsockopt);
#endif
static int ipv6_getsockopt_sticky(struct sock *sk, struct ipv6_txoptions *opt,
int optname, char __user *optval, int len)
{
struct ipv6_opt_hdr *hdr;
if (!opt)
return 0;
switch(optname) {
case IPV6_HOPOPTS:
hdr = opt->hopopt;
break;
case IPV6_RTHDRDSTOPTS:
hdr = opt->dst0opt;
break;
case IPV6_RTHDR:
hdr = (struct ipv6_opt_hdr *)opt->srcrt;
break;
case IPV6_DSTOPTS:
hdr = opt->dst1opt;
break;
default:
return -EINVAL; /* should not happen */
}
if (!hdr)
return 0;
len = min_t(unsigned int, len, ipv6_optlen(hdr));
if (copy_to_user(optval, hdr, len))
return -EFAULT;
return len;
}
static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
char __user *optval, int __user *optlen, unsigned int flags)
{
struct ipv6_pinfo *np = inet6_sk(sk);
int len;
int val;
if (ip6_mroute_opt(optname))
return ip6_mroute_getsockopt(sk, optname, optval, optlen);
if (get_user(len, optlen))
return -EFAULT;
switch (optname) {
case IPV6_ADDRFORM:
if (sk->sk_protocol != IPPROTO_UDP &&
sk->sk_protocol != IPPROTO_UDPLITE &&
sk->sk_protocol != IPPROTO_TCP)
return -ENOPROTOOPT;
if (sk->sk_state != TCP_ESTABLISHED)
return -ENOTCONN;
val = sk->sk_family;
break;
case MCAST_MSFILTER:
{
struct group_filter gsf;
int err;
if (len < GROUP_FILTER_SIZE(0))
return -EINVAL;
if (copy_from_user(&gsf, optval, GROUP_FILTER_SIZE(0)))
return -EFAULT;
if (gsf.gf_group.ss_family != AF_INET6)
return -EADDRNOTAVAIL;
lock_sock(sk);
err = ip6_mc_msfget(sk, &gsf,
(struct group_filter __user *)optval, optlen);
release_sock(sk);
return err;
}
case IPV6_2292PKTOPTIONS:
{
struct msghdr msg;
struct sk_buff *skb;
if (sk->sk_type != SOCK_STREAM)
return -ENOPROTOOPT;
msg.msg_control = optval;
msg.msg_controllen = len;
msg.msg_flags = flags;
lock_sock(sk);
skb = np->pktoptions;
if (skb)
atomic_inc(&skb->users);
release_sock(sk);
if (skb) {
int err = ip6_datagram_recv_ctl(sk, &msg, skb);
kfree_skb(skb);
if (err)
return err;
} else {
if (np->rxopt.bits.rxinfo) {
struct in6_pktinfo src_info;
src_info.ipi6_ifindex = np->mcast_oif ? np->mcast_oif :
np->sticky_pktinfo.ipi6_ifindex;
src_info.ipi6_addr = np->mcast_oif ? np->daddr : np->sticky_pktinfo.ipi6_addr;
put_cmsg(&msg, SOL_IPV6, IPV6_PKTINFO, sizeof(src_info), &src_info);
}
if (np->rxopt.bits.rxhlim) {
int hlim = np->mcast_hops;
put_cmsg(&msg, SOL_IPV6, IPV6_HOPLIMIT, sizeof(hlim), &hlim);
}
if (np->rxopt.bits.rxtclass) {
int tclass = np->rcv_tclass;
put_cmsg(&msg, SOL_IPV6, IPV6_TCLASS, sizeof(tclass), &tclass);
}
if (np->rxopt.bits.rxoinfo) {
struct in6_pktinfo src_info;
src_info.ipi6_ifindex = np->mcast_oif ? np->mcast_oif :
np->sticky_pktinfo.ipi6_ifindex;
src_info.ipi6_addr = np->mcast_oif ? np->daddr : np->sticky_pktinfo.ipi6_addr;
put_cmsg(&msg, SOL_IPV6, IPV6_2292PKTINFO, sizeof(src_info), &src_info);
}
if (np->rxopt.bits.rxohlim) {
int hlim = np->mcast_hops;
put_cmsg(&msg, SOL_IPV6, IPV6_2292HOPLIMIT, sizeof(hlim), &hlim);
}
}
len -= msg.msg_controllen;
return put_user(len, optlen);
}
case IPV6_MTU:
{
struct dst_entry *dst;
val = 0;
rcu_read_lock();
dst = __sk_dst_get(sk);
if (dst)
val = dst_mtu(dst);
rcu_read_unlock();
if (!val)
return -ENOTCONN;
break;
}
case IPV6_V6ONLY:
val = np->ipv6only;
break;
case IPV6_RECVPKTINFO:
val = np->rxopt.bits.rxinfo;
break;
case IPV6_2292PKTINFO:
val = np->rxopt.bits.rxoinfo;
break;
case IPV6_RECVHOPLIMIT:
val = np->rxopt.bits.rxhlim;
break;
case IPV6_2292HOPLIMIT:
val = np->rxopt.bits.rxohlim;
break;
case IPV6_RECVRTHDR:
val = np->rxopt.bits.srcrt;
break;
case IPV6_2292RTHDR:
val = np->rxopt.bits.osrcrt;
break;
case IPV6_HOPOPTS:
case IPV6_RTHDRDSTOPTS:
case IPV6_RTHDR:
case IPV6_DSTOPTS:
{
lock_sock(sk);
len = ipv6_getsockopt_sticky(sk, np->opt,
optname, optval, len);
release_sock(sk);
/* check if ipv6_getsockopt_sticky() returns err code */
if (len < 0)
return len;
return put_user(len, optlen);
}
case IPV6_RECVHOPOPTS:
val = np->rxopt.bits.hopopts;
break;
case IPV6_2292HOPOPTS:
val = np->rxopt.bits.ohopopts;
break;
case IPV6_RECVDSTOPTS:
val = np->rxopt.bits.dstopts;
break;
case IPV6_2292DSTOPTS:
val = np->rxopt.bits.odstopts;
break;
case IPV6_TCLASS:
val = np->tclass;
break;
case IPV6_RECVTCLASS:
val = np->rxopt.bits.rxtclass;
break;
case IPV6_FLOWINFO:
val = np->rxopt.bits.rxflow;
break;
case IPV6_RECVPATHMTU:
val = np->rxopt.bits.rxpmtu;
break;
case IPV6_PATHMTU:
{
struct dst_entry *dst;
struct ip6_mtuinfo mtuinfo;
if (len < sizeof(mtuinfo))
return -EINVAL;
len = sizeof(mtuinfo);
memset(&mtuinfo, 0, sizeof(mtuinfo));
rcu_read_lock();
dst = __sk_dst_get(sk);
if (dst)
mtuinfo.ip6m_mtu = dst_mtu(dst);
rcu_read_unlock();
if (!mtuinfo.ip6m_mtu)
return -ENOTCONN;
if (put_user(len, optlen))
return -EFAULT;
if (copy_to_user(optval, &mtuinfo, len))
return -EFAULT;
return 0;
break;
}
case IPV6_TRANSPARENT:
val = inet_sk(sk)->transparent;
break;
case IPV6_RECVORIGDSTADDR:
val = np->rxopt.bits.rxorigdstaddr;
break;
case IPV6_UNICAST_HOPS:
case IPV6_MULTICAST_HOPS:
{
struct dst_entry *dst;
if (optname == IPV6_UNICAST_HOPS)
val = np->hop_limit;
else
val = np->mcast_hops;
if (val < 0) {
rcu_read_lock();
dst = __sk_dst_get(sk);
if (dst)
val = ip6_dst_hoplimit(dst);
rcu_read_unlock();
}
if (val < 0)
val = sock_net(sk)->ipv6.devconf_all->hop_limit;
break;
}
case IPV6_MULTICAST_LOOP:
val = np->mc_loop;
break;
case IPV6_MULTICAST_IF:
val = np->mcast_oif;
break;
case IPV6_UNICAST_IF:
val = (__force int)htonl((__u32) np->ucast_oif);
break;
case IPV6_MTU_DISCOVER:
val = np->pmtudisc;
break;
case IPV6_RECVERR:
val = np->recverr;
break;
case IPV6_FLOWINFO_SEND:
val = np->sndflow;
break;
case IPV6_ADDR_PREFERENCES:
val = 0;
if (np->srcprefs & IPV6_PREFER_SRC_TMP)
val |= IPV6_PREFER_SRC_TMP;
else if (np->srcprefs & IPV6_PREFER_SRC_PUBLIC)
val |= IPV6_PREFER_SRC_PUBLIC;
else {
/* XXX: should we return system default? */
val |= IPV6_PREFER_SRC_PUBTMP_DEFAULT;
}
if (np->srcprefs & IPV6_PREFER_SRC_COA)
val |= IPV6_PREFER_SRC_COA;
else
val |= IPV6_PREFER_SRC_HOME;
break;
case IPV6_MINHOPCOUNT:
val = np->min_hopcount;
break;
case IPV6_DONTFRAG:
val = np->dontfrag;
break;
default:
return -ENOPROTOOPT;
}
len = min_t(unsigned int, sizeof(int), len);
if(put_user(len, optlen))
return -EFAULT;
if(copy_to_user(optval,&val,len))
return -EFAULT;
return 0;
}
int ipv6_getsockopt(struct sock *sk, int level, int optname,
char __user *optval, int __user *optlen)
{
int err;
if (level == SOL_IP && sk->sk_type != SOCK_RAW)
return udp_prot.getsockopt(sk, level, optname, optval, optlen);
if(level != SOL_IPV6)
return -ENOPROTOOPT;
err = do_ipv6_getsockopt(sk, level, optname, optval, optlen, 0);
#ifdef CONFIG_NETFILTER
/* we need to exclude all possible ENOPROTOOPTs except default case */
if (err == -ENOPROTOOPT && optname != IPV6_2292PKTOPTIONS) {
int len;
if (get_user(len, optlen))
return -EFAULT;
lock_sock(sk);
err = nf_getsockopt(sk, PF_INET6, optname, optval,
&len);
release_sock(sk);
if (err >= 0)
err = put_user(len, optlen);
}
#endif
return err;
}
EXPORT_SYMBOL(ipv6_getsockopt);
#ifdef CONFIG_COMPAT
int compat_ipv6_getsockopt(struct sock *sk, int level, int optname,
char __user *optval, int __user *optlen)
{
int err;
if (level == SOL_IP && sk->sk_type != SOCK_RAW) {
if (udp_prot.compat_getsockopt != NULL)
return udp_prot.compat_getsockopt(sk, level, optname,
optval, optlen);
return udp_prot.getsockopt(sk, level, optname, optval, optlen);
}
if (level != SOL_IPV6)
return -ENOPROTOOPT;
if (optname == MCAST_MSFILTER)
return compat_mc_getsockopt(sk, level, optname, optval, optlen,
ipv6_getsockopt);
err = do_ipv6_getsockopt(sk, level, optname, optval, optlen,
MSG_CMSG_COMPAT);
#ifdef CONFIG_NETFILTER
/* we need to exclude all possible ENOPROTOOPTs except default case */
if (err == -ENOPROTOOPT && optname != IPV6_2292PKTOPTIONS) {
int len;
if (get_user(len, optlen))
return -EFAULT;
lock_sock(sk);
err = compat_nf_getsockopt(sk, PF_INET6,
optname, optval, &len);
release_sock(sk);
if (err >= 0)
err = put_user(len, optlen);
}
#endif
return err;
}
EXPORT_SYMBOL(compat_ipv6_getsockopt);
#endif
| gpl-2.0 |
ZdrowyGosciu/kernel_lge_d802_v30d | fs/ceph/debugfs.c | 3991 | 6550 | #include <linux/ceph/ceph_debug.h>
#include <linux/device.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/ctype.h>
#include <linux/debugfs.h>
#include <linux/seq_file.h>
#include <linux/ceph/libceph.h>
#include <linux/ceph/mon_client.h>
#include <linux/ceph/auth.h>
#include <linux/ceph/debugfs.h>
#include "super.h"
#ifdef CONFIG_DEBUG_FS
#include "mds_client.h"
static int mdsmap_show(struct seq_file *s, void *p)
{
int i;
struct ceph_fs_client *fsc = s->private;
if (fsc->mdsc == NULL || fsc->mdsc->mdsmap == NULL)
return 0;
seq_printf(s, "epoch %d\n", fsc->mdsc->mdsmap->m_epoch);
seq_printf(s, "root %d\n", fsc->mdsc->mdsmap->m_root);
seq_printf(s, "session_timeout %d\n",
fsc->mdsc->mdsmap->m_session_timeout);
seq_printf(s, "session_autoclose %d\n",
fsc->mdsc->mdsmap->m_session_autoclose);
for (i = 0; i < fsc->mdsc->mdsmap->m_max_mds; i++) {
struct ceph_entity_addr *addr =
&fsc->mdsc->mdsmap->m_info[i].addr;
int state = fsc->mdsc->mdsmap->m_info[i].state;
seq_printf(s, "\tmds%d\t%s\t(%s)\n", i,
ceph_pr_addr(&addr->in_addr),
ceph_mds_state_name(state));
}
return 0;
}
/*
* mdsc debugfs
*/
static int mdsc_show(struct seq_file *s, void *p)
{
struct ceph_fs_client *fsc = s->private;
struct ceph_mds_client *mdsc = fsc->mdsc;
struct ceph_mds_request *req;
struct rb_node *rp;
int pathlen;
u64 pathbase;
char *path;
mutex_lock(&mdsc->mutex);
for (rp = rb_first(&mdsc->request_tree); rp; rp = rb_next(rp)) {
req = rb_entry(rp, struct ceph_mds_request, r_node);
if (req->r_request && req->r_session)
seq_printf(s, "%lld\tmds%d\t", req->r_tid,
req->r_session->s_mds);
else if (!req->r_request)
seq_printf(s, "%lld\t(no request)\t", req->r_tid);
else
seq_printf(s, "%lld\t(no session)\t", req->r_tid);
seq_printf(s, "%s", ceph_mds_op_name(req->r_op));
if (req->r_got_unsafe)
seq_printf(s, "\t(unsafe)");
else
seq_printf(s, "\t");
if (req->r_inode) {
seq_printf(s, " #%llx", ceph_ino(req->r_inode));
} else if (req->r_dentry) {
path = ceph_mdsc_build_path(req->r_dentry, &pathlen,
&pathbase, 0);
if (IS_ERR(path))
path = NULL;
spin_lock(&req->r_dentry->d_lock);
seq_printf(s, " #%llx/%.*s (%s)",
ceph_ino(req->r_dentry->d_parent->d_inode),
req->r_dentry->d_name.len,
req->r_dentry->d_name.name,
path ? path : "");
spin_unlock(&req->r_dentry->d_lock);
kfree(path);
} else if (req->r_path1) {
seq_printf(s, " #%llx/%s", req->r_ino1.ino,
req->r_path1);
}
if (req->r_old_dentry) {
path = ceph_mdsc_build_path(req->r_old_dentry, &pathlen,
&pathbase, 0);
if (IS_ERR(path))
path = NULL;
spin_lock(&req->r_old_dentry->d_lock);
seq_printf(s, " #%llx/%.*s (%s)",
ceph_ino(req->r_old_dentry_dir),
req->r_old_dentry->d_name.len,
req->r_old_dentry->d_name.name,
path ? path : "");
spin_unlock(&req->r_old_dentry->d_lock);
kfree(path);
} else if (req->r_path2) {
if (req->r_ino2.ino)
seq_printf(s, " #%llx/%s", req->r_ino2.ino,
req->r_path2);
else
seq_printf(s, " %s", req->r_path2);
}
seq_printf(s, "\n");
}
mutex_unlock(&mdsc->mutex);
return 0;
}
static int caps_show(struct seq_file *s, void *p)
{
struct ceph_fs_client *fsc = s->private;
int total, avail, used, reserved, min;
ceph_reservation_status(fsc, &total, &avail, &used, &reserved, &min);
seq_printf(s, "total\t\t%d\n"
"avail\t\t%d\n"
"used\t\t%d\n"
"reserved\t%d\n"
"min\t%d\n",
total, avail, used, reserved, min);
return 0;
}
static int dentry_lru_show(struct seq_file *s, void *ptr)
{
struct ceph_fs_client *fsc = s->private;
struct ceph_mds_client *mdsc = fsc->mdsc;
struct ceph_dentry_info *di;
spin_lock(&mdsc->dentry_lru_lock);
list_for_each_entry(di, &mdsc->dentry_lru, lru) {
struct dentry *dentry = di->dentry;
seq_printf(s, "%p %p\t%.*s\n",
di, dentry, dentry->d_name.len, dentry->d_name.name);
}
spin_unlock(&mdsc->dentry_lru_lock);
return 0;
}
CEPH_DEFINE_SHOW_FUNC(mdsmap_show)
CEPH_DEFINE_SHOW_FUNC(mdsc_show)
CEPH_DEFINE_SHOW_FUNC(caps_show)
CEPH_DEFINE_SHOW_FUNC(dentry_lru_show)
/*
* debugfs
*/
static int congestion_kb_set(void *data, u64 val)
{
struct ceph_fs_client *fsc = (struct ceph_fs_client *)data;
fsc->mount_options->congestion_kb = (int)val;
return 0;
}
static int congestion_kb_get(void *data, u64 *val)
{
struct ceph_fs_client *fsc = (struct ceph_fs_client *)data;
*val = (u64)fsc->mount_options->congestion_kb;
return 0;
}
DEFINE_SIMPLE_ATTRIBUTE(congestion_kb_fops, congestion_kb_get,
congestion_kb_set, "%llu\n");
void ceph_fs_debugfs_cleanup(struct ceph_fs_client *fsc)
{
dout("ceph_fs_debugfs_cleanup\n");
debugfs_remove(fsc->debugfs_bdi);
debugfs_remove(fsc->debugfs_congestion_kb);
debugfs_remove(fsc->debugfs_mdsmap);
debugfs_remove(fsc->debugfs_caps);
debugfs_remove(fsc->debugfs_mdsc);
debugfs_remove(fsc->debugfs_dentry_lru);
}
int ceph_fs_debugfs_init(struct ceph_fs_client *fsc)
{
char name[100];
int err = -ENOMEM;
dout("ceph_fs_debugfs_init\n");
fsc->debugfs_congestion_kb =
debugfs_create_file("writeback_congestion_kb",
0600,
fsc->client->debugfs_dir,
fsc,
&congestion_kb_fops);
if (!fsc->debugfs_congestion_kb)
goto out;
snprintf(name, sizeof(name), "../../bdi/%s",
dev_name(fsc->backing_dev_info.dev));
fsc->debugfs_bdi =
debugfs_create_symlink("bdi",
fsc->client->debugfs_dir,
name);
if (!fsc->debugfs_bdi)
goto out;
fsc->debugfs_mdsmap = debugfs_create_file("mdsmap",
0600,
fsc->client->debugfs_dir,
fsc,
&mdsmap_show_fops);
if (!fsc->debugfs_mdsmap)
goto out;
fsc->debugfs_mdsc = debugfs_create_file("mdsc",
0600,
fsc->client->debugfs_dir,
fsc,
&mdsc_show_fops);
if (!fsc->debugfs_mdsc)
goto out;
fsc->debugfs_caps = debugfs_create_file("caps",
0400,
fsc->client->debugfs_dir,
fsc,
&caps_show_fops);
if (!fsc->debugfs_caps)
goto out;
fsc->debugfs_dentry_lru = debugfs_create_file("dentry_lru",
0600,
fsc->client->debugfs_dir,
fsc,
&dentry_lru_show_fops);
if (!fsc->debugfs_dentry_lru)
goto out;
return 0;
out:
ceph_fs_debugfs_cleanup(fsc);
return err;
}
#else /* CONFIG_DEBUG_FS */
int ceph_fs_debugfs_init(struct ceph_fs_client *fsc)
{
return 0;
}
void ceph_fs_debugfs_cleanup(struct ceph_fs_client *fsc)
{
}
#endif /* CONFIG_DEBUG_FS */
| gpl-2.0 |
eagleeyetom/android_kernel_oppo_msm8974 | arch/sh/kernel/cpu/sh4a/setup-sh7786.c | 4503 | 26958 | /*
* SH7786 Setup
*
* Copyright (C) 2009 - 2011 Renesas Solutions Corp.
* Kuninori Morimoto <morimoto.kuninori@renesas.com>
* Paul Mundt <paul.mundt@renesas.com>
*
* Based on SH7785 Setup
*
* Copyright (C) 2007 Paul Mundt
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#include <linux/platform_device.h>
#include <linux/init.h>
#include <linux/serial.h>
#include <linux/serial_sci.h>
#include <linux/io.h>
#include <linux/mm.h>
#include <linux/dma-mapping.h>
#include <linux/sh_timer.h>
#include <linux/sh_dma.h>
#include <linux/sh_intc.h>
#include <cpu/dma-register.h>
#include <asm/mmzone.h>
static struct plat_sci_port scif0_platform_data = {
.mapbase = 0xffea0000,
.flags = UPF_BOOT_AUTOCONF,
.scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_CKE1,
.scbrr_algo_id = SCBRR_ALGO_1,
.type = PORT_SCIF,
.irqs = { 40, 41, 43, 42 },
.regtype = SCIx_SH4_SCIF_FIFODATA_REGTYPE,
};
static struct platform_device scif0_device = {
.name = "sh-sci",
.id = 0,
.dev = {
.platform_data = &scif0_platform_data,
},
};
/*
* The rest of these all have multiplexed IRQs
*/
static struct plat_sci_port scif1_platform_data = {
.mapbase = 0xffeb0000,
.flags = UPF_BOOT_AUTOCONF,
.scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_CKE1,
.scbrr_algo_id = SCBRR_ALGO_1,
.type = PORT_SCIF,
.irqs = { 44, 44, 44, 44 },
.regtype = SCIx_SH4_SCIF_FIFODATA_REGTYPE,
};
static struct platform_device scif1_device = {
.name = "sh-sci",
.id = 1,
.dev = {
.platform_data = &scif1_platform_data,
},
};
static struct plat_sci_port scif2_platform_data = {
.mapbase = 0xffec0000,
.flags = UPF_BOOT_AUTOCONF,
.scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_CKE1,
.scbrr_algo_id = SCBRR_ALGO_1,
.type = PORT_SCIF,
.irqs = { 50, 50, 50, 50 },
.regtype = SCIx_SH4_SCIF_FIFODATA_REGTYPE,
};
static struct platform_device scif2_device = {
.name = "sh-sci",
.id = 2,
.dev = {
.platform_data = &scif2_platform_data,
},
};
static struct plat_sci_port scif3_platform_data = {
.mapbase = 0xffed0000,
.flags = UPF_BOOT_AUTOCONF,
.scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_CKE1,
.scbrr_algo_id = SCBRR_ALGO_1,
.type = PORT_SCIF,
.irqs = { 51, 51, 51, 51 },
.regtype = SCIx_SH4_SCIF_FIFODATA_REGTYPE,
};
static struct platform_device scif3_device = {
.name = "sh-sci",
.id = 3,
.dev = {
.platform_data = &scif3_platform_data,
},
};
static struct plat_sci_port scif4_platform_data = {
.mapbase = 0xffee0000,
.flags = UPF_BOOT_AUTOCONF,
.scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_CKE1,
.scbrr_algo_id = SCBRR_ALGO_1,
.type = PORT_SCIF,
.irqs = { 52, 52, 52, 52 },
.regtype = SCIx_SH4_SCIF_FIFODATA_REGTYPE,
};
static struct platform_device scif4_device = {
.name = "sh-sci",
.id = 4,
.dev = {
.platform_data = &scif4_platform_data,
},
};
static struct plat_sci_port scif5_platform_data = {
.mapbase = 0xffef0000,
.flags = UPF_BOOT_AUTOCONF,
.scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_CKE1,
.scbrr_algo_id = SCBRR_ALGO_1,
.type = PORT_SCIF,
.irqs = { 53, 53, 53, 53 },
.regtype = SCIx_SH4_SCIF_FIFODATA_REGTYPE,
};
static struct platform_device scif5_device = {
.name = "sh-sci",
.id = 5,
.dev = {
.platform_data = &scif5_platform_data,
},
};
static struct sh_timer_config tmu0_platform_data = {
.channel_offset = 0x04,
.timer_bit = 0,
.clockevent_rating = 200,
};
static struct resource tmu0_resources[] = {
[0] = {
.start = 0xffd80008,
.end = 0xffd80013,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = 16,
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device tmu0_device = {
.name = "sh_tmu",
.id = 0,
.dev = {
.platform_data = &tmu0_platform_data,
},
.resource = tmu0_resources,
.num_resources = ARRAY_SIZE(tmu0_resources),
};
static struct sh_timer_config tmu1_platform_data = {
.channel_offset = 0x10,
.timer_bit = 1,
.clocksource_rating = 200,
};
static struct resource tmu1_resources[] = {
[0] = {
.start = 0xffd80014,
.end = 0xffd8001f,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = 17,
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device tmu1_device = {
.name = "sh_tmu",
.id = 1,
.dev = {
.platform_data = &tmu1_platform_data,
},
.resource = tmu1_resources,
.num_resources = ARRAY_SIZE(tmu1_resources),
};
static struct sh_timer_config tmu2_platform_data = {
.channel_offset = 0x1c,
.timer_bit = 2,
};
static struct resource tmu2_resources[] = {
[0] = {
.start = 0xffd80020,
.end = 0xffd8002f,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = 18,
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device tmu2_device = {
.name = "sh_tmu",
.id = 2,
.dev = {
.platform_data = &tmu2_platform_data,
},
.resource = tmu2_resources,
.num_resources = ARRAY_SIZE(tmu2_resources),
};
static struct sh_timer_config tmu3_platform_data = {
.channel_offset = 0x04,
.timer_bit = 0,
};
static struct resource tmu3_resources[] = {
[0] = {
.start = 0xffda0008,
.end = 0xffda0013,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = 20,
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device tmu3_device = {
.name = "sh_tmu",
.id = 3,
.dev = {
.platform_data = &tmu3_platform_data,
},
.resource = tmu3_resources,
.num_resources = ARRAY_SIZE(tmu3_resources),
};
static struct sh_timer_config tmu4_platform_data = {
.channel_offset = 0x10,
.timer_bit = 1,
};
static struct resource tmu4_resources[] = {
[0] = {
.start = 0xffda0014,
.end = 0xffda001f,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = 21,
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device tmu4_device = {
.name = "sh_tmu",
.id = 4,
.dev = {
.platform_data = &tmu4_platform_data,
},
.resource = tmu4_resources,
.num_resources = ARRAY_SIZE(tmu4_resources),
};
static struct sh_timer_config tmu5_platform_data = {
.channel_offset = 0x1c,
.timer_bit = 2,
};
static struct resource tmu5_resources[] = {
[0] = {
.start = 0xffda0020,
.end = 0xffda002b,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = 22,
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device tmu5_device = {
.name = "sh_tmu",
.id = 5,
.dev = {
.platform_data = &tmu5_platform_data,
},
.resource = tmu5_resources,
.num_resources = ARRAY_SIZE(tmu5_resources),
};
static struct sh_timer_config tmu6_platform_data = {
.channel_offset = 0x04,
.timer_bit = 0,
};
static struct resource tmu6_resources[] = {
[0] = {
.start = 0xffdc0008,
.end = 0xffdc0013,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = 45,
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device tmu6_device = {
.name = "sh_tmu",
.id = 6,
.dev = {
.platform_data = &tmu6_platform_data,
},
.resource = tmu6_resources,
.num_resources = ARRAY_SIZE(tmu6_resources),
};
static struct sh_timer_config tmu7_platform_data = {
.channel_offset = 0x10,
.timer_bit = 1,
};
static struct resource tmu7_resources[] = {
[0] = {
.start = 0xffdc0014,
.end = 0xffdc001f,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = 45,
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device tmu7_device = {
.name = "sh_tmu",
.id = 7,
.dev = {
.platform_data = &tmu7_platform_data,
},
.resource = tmu7_resources,
.num_resources = ARRAY_SIZE(tmu7_resources),
};
static struct sh_timer_config tmu8_platform_data = {
.channel_offset = 0x1c,
.timer_bit = 2,
};
static struct resource tmu8_resources[] = {
[0] = {
.start = 0xffdc0020,
.end = 0xffdc002b,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = 45,
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device tmu8_device = {
.name = "sh_tmu",
.id = 8,
.dev = {
.platform_data = &tmu8_platform_data,
},
.resource = tmu8_resources,
.num_resources = ARRAY_SIZE(tmu8_resources),
};
static struct sh_timer_config tmu9_platform_data = {
.channel_offset = 0x04,
.timer_bit = 0,
};
static struct resource tmu9_resources[] = {
[0] = {
.start = 0xffde0008,
.end = 0xffde0013,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = 46,
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device tmu9_device = {
.name = "sh_tmu",
.id = 9,
.dev = {
.platform_data = &tmu9_platform_data,
},
.resource = tmu9_resources,
.num_resources = ARRAY_SIZE(tmu9_resources),
};
static struct sh_timer_config tmu10_platform_data = {
.channel_offset = 0x10,
.timer_bit = 1,
};
static struct resource tmu10_resources[] = {
[0] = {
.start = 0xffde0014,
.end = 0xffde001f,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = 46,
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device tmu10_device = {
.name = "sh_tmu",
.id = 10,
.dev = {
.platform_data = &tmu10_platform_data,
},
.resource = tmu10_resources,
.num_resources = ARRAY_SIZE(tmu10_resources),
};
static struct sh_timer_config tmu11_platform_data = {
.channel_offset = 0x1c,
.timer_bit = 2,
};
static struct resource tmu11_resources[] = {
[0] = {
.start = 0xffde0020,
.end = 0xffde002b,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = 46,
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device tmu11_device = {
.name = "sh_tmu",
.id = 11,
.dev = {
.platform_data = &tmu11_platform_data,
},
.resource = tmu11_resources,
.num_resources = ARRAY_SIZE(tmu11_resources),
};
static const struct sh_dmae_channel dmac0_channels[] = {
{
.offset = 0,
.dmars = 0,
.dmars_bit = 0,
}, {
.offset = 0x10,
.dmars = 0,
.dmars_bit = 8,
}, {
.offset = 0x20,
.dmars = 4,
.dmars_bit = 0,
}, {
.offset = 0x30,
.dmars = 4,
.dmars_bit = 8,
}, {
.offset = 0x50,
.dmars = 8,
.dmars_bit = 0,
}, {
.offset = 0x60,
.dmars = 8,
.dmars_bit = 8,
}
};
static const unsigned int ts_shift[] = TS_SHIFT;
static struct sh_dmae_pdata dma0_platform_data = {
.channel = dmac0_channels,
.channel_num = ARRAY_SIZE(dmac0_channels),
.ts_low_shift = CHCR_TS_LOW_SHIFT,
.ts_low_mask = CHCR_TS_LOW_MASK,
.ts_high_shift = CHCR_TS_HIGH_SHIFT,
.ts_high_mask = CHCR_TS_HIGH_MASK,
.ts_shift = ts_shift,
.ts_shift_num = ARRAY_SIZE(ts_shift),
.dmaor_init = DMAOR_INIT,
};
/* Resource order important! */
static struct resource dmac0_resources[] = {
{
/* Channel registers and DMAOR */
.start = 0xfe008020,
.end = 0xfe00808f,
.flags = IORESOURCE_MEM,
}, {
/* DMARSx */
.start = 0xfe009000,
.end = 0xfe00900b,
.flags = IORESOURCE_MEM,
}, {
.name = "error_irq",
.start = evt2irq(0x5c0),
.end = evt2irq(0x5c0),
.flags = IORESOURCE_IRQ,
}, {
/* IRQ for channels 0-5 */
.start = evt2irq(0x500),
.end = evt2irq(0x5a0),
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device dma0_device = {
.name = "sh-dma-engine",
.id = 0,
.resource = dmac0_resources,
.num_resources = ARRAY_SIZE(dmac0_resources),
.dev = {
.platform_data = &dma0_platform_data,
},
};
#define USB_EHCI_START 0xffe70000
#define USB_OHCI_START 0xffe70400
static struct resource usb_ehci_resources[] = {
[0] = {
.start = USB_EHCI_START,
.end = USB_EHCI_START + 0x3ff,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = 77,
.end = 77,
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device usb_ehci_device = {
.name = "sh_ehci",
.id = -1,
.dev = {
.dma_mask = &usb_ehci_device.dev.coherent_dma_mask,
.coherent_dma_mask = DMA_BIT_MASK(32),
},
.num_resources = ARRAY_SIZE(usb_ehci_resources),
.resource = usb_ehci_resources,
};
static struct resource usb_ohci_resources[] = {
[0] = {
.start = USB_OHCI_START,
.end = USB_OHCI_START + 0x3ff,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = 77,
.end = 77,
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device usb_ohci_device = {
.name = "sh_ohci",
.id = -1,
.dev = {
.dma_mask = &usb_ohci_device.dev.coherent_dma_mask,
.coherent_dma_mask = DMA_BIT_MASK(32),
},
.num_resources = ARRAY_SIZE(usb_ohci_resources),
.resource = usb_ohci_resources,
};
static struct platform_device *sh7786_early_devices[] __initdata = {
&scif0_device,
&scif1_device,
&scif2_device,
&scif3_device,
&scif4_device,
&scif5_device,
&tmu0_device,
&tmu1_device,
&tmu2_device,
&tmu3_device,
&tmu4_device,
&tmu5_device,
&tmu6_device,
&tmu7_device,
&tmu8_device,
&tmu9_device,
&tmu10_device,
&tmu11_device,
};
static struct platform_device *sh7786_devices[] __initdata = {
&dma0_device,
&usb_ehci_device,
&usb_ohci_device,
};
/*
* Please call this function if your platform board
* use external clock for USB
* */
#define USBCTL0 0xffe70858
#define CLOCK_MODE_MASK 0xffffff7f
#define EXT_CLOCK_MODE 0x00000080
void __init sh7786_usb_use_exclock(void)
{
u32 val = __raw_readl(USBCTL0) & CLOCK_MODE_MASK;
__raw_writel(val | EXT_CLOCK_MODE, USBCTL0);
}
#define USBINITREG1 0xffe70094
#define USBINITREG2 0xffe7009c
#define USBINITVAL1 0x00ff0040
#define USBINITVAL2 0x00000001
#define USBPCTL1 0xffe70804
#define USBST 0xffe70808
#define PHY_ENB 0x00000001
#define PLL_ENB 0x00000002
#define PHY_RST 0x00000004
#define ACT_PLL_STATUS 0xc0000000
static void __init sh7786_usb_setup(void)
{
int i = 1000000;
/*
* USB initial settings
*
* The following settings are necessary
* for using the USB modules.
*
* see "USB Initial Settings" for detail
*/
__raw_writel(USBINITVAL1, USBINITREG1);
__raw_writel(USBINITVAL2, USBINITREG2);
/*
* Set the PHY and PLL enable bit
*/
__raw_writel(PHY_ENB | PLL_ENB, USBPCTL1);
while (i--) {
if (ACT_PLL_STATUS == (__raw_readl(USBST) & ACT_PLL_STATUS)) {
/* Set the PHY RST bit */
__raw_writel(PHY_ENB | PLL_ENB | PHY_RST, USBPCTL1);
printk(KERN_INFO "sh7786 usb setup done\n");
break;
}
cpu_relax();
}
}
enum {
UNUSED = 0,
/* interrupt sources */
IRL0_LLLL, IRL0_LLLH, IRL0_LLHL, IRL0_LLHH,
IRL0_LHLL, IRL0_LHLH, IRL0_LHHL, IRL0_LHHH,
IRL0_HLLL, IRL0_HLLH, IRL0_HLHL, IRL0_HLHH,
IRL0_HHLL, IRL0_HHLH, IRL0_HHHL,
IRL4_LLLL, IRL4_LLLH, IRL4_LLHL, IRL4_LLHH,
IRL4_LHLL, IRL4_LHLH, IRL4_LHHL, IRL4_LHHH,
IRL4_HLLL, IRL4_HLLH, IRL4_HLHL, IRL4_HLHH,
IRL4_HHLL, IRL4_HHLH, IRL4_HHHL,
IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7,
WDT,
TMU0_0, TMU0_1, TMU0_2, TMU0_3,
TMU1_0, TMU1_1, TMU1_2,
DMAC0_0, DMAC0_1, DMAC0_2, DMAC0_3, DMAC0_4, DMAC0_5, DMAC0_6,
HUDI1, HUDI0,
DMAC1_0, DMAC1_1, DMAC1_2, DMAC1_3,
HPB_0, HPB_1, HPB_2,
SCIF0_0, SCIF0_1, SCIF0_2, SCIF0_3,
SCIF1,
TMU2, TMU3,
SCIF2, SCIF3, SCIF4, SCIF5,
Eth_0, Eth_1,
PCIeC0_0, PCIeC0_1, PCIeC0_2,
PCIeC1_0, PCIeC1_1, PCIeC1_2,
USB,
I2C0, I2C1,
DU,
SSI0, SSI1, SSI2, SSI3,
PCIeC2_0, PCIeC2_1, PCIeC2_2,
HAC0, HAC1,
FLCTL,
HSPI,
GPIO0, GPIO1,
Thermal,
INTICI0, INTICI1, INTICI2, INTICI3,
INTICI4, INTICI5, INTICI6, INTICI7,
/* Muxed sub-events */
TXI1, BRI1, RXI1, ERI1,
};
static struct intc_vect sh7786_vectors[] __initdata = {
INTC_VECT(WDT, 0x3e0),
INTC_VECT(TMU0_0, 0x400), INTC_VECT(TMU0_1, 0x420),
INTC_VECT(TMU0_2, 0x440), INTC_VECT(TMU0_3, 0x460),
INTC_VECT(TMU1_0, 0x480), INTC_VECT(TMU1_1, 0x4a0),
INTC_VECT(TMU1_2, 0x4c0),
INTC_VECT(DMAC0_0, 0x500), INTC_VECT(DMAC0_1, 0x520),
INTC_VECT(DMAC0_2, 0x540), INTC_VECT(DMAC0_3, 0x560),
INTC_VECT(DMAC0_4, 0x580), INTC_VECT(DMAC0_5, 0x5a0),
INTC_VECT(DMAC0_6, 0x5c0),
INTC_VECT(HUDI1, 0x5e0), INTC_VECT(HUDI0, 0x600),
INTC_VECT(DMAC1_0, 0x620), INTC_VECT(DMAC1_1, 0x640),
INTC_VECT(DMAC1_2, 0x660), INTC_VECT(DMAC1_3, 0x680),
INTC_VECT(HPB_0, 0x6a0), INTC_VECT(HPB_1, 0x6c0),
INTC_VECT(HPB_2, 0x6e0),
INTC_VECT(SCIF0_0, 0x700), INTC_VECT(SCIF0_1, 0x720),
INTC_VECT(SCIF0_2, 0x740), INTC_VECT(SCIF0_3, 0x760),
INTC_VECT(SCIF1, 0x780),
INTC_VECT(TMU2, 0x7a0), INTC_VECT(TMU3, 0x7c0),
INTC_VECT(SCIF2, 0x840), INTC_VECT(SCIF3, 0x860),
INTC_VECT(SCIF4, 0x880), INTC_VECT(SCIF5, 0x8a0),
INTC_VECT(Eth_0, 0x8c0), INTC_VECT(Eth_1, 0x8e0),
INTC_VECT(PCIeC0_0, 0xae0), INTC_VECT(PCIeC0_1, 0xb00),
INTC_VECT(PCIeC0_2, 0xb20),
INTC_VECT(PCIeC1_0, 0xb40), INTC_VECT(PCIeC1_1, 0xb60),
INTC_VECT(PCIeC1_2, 0xb80),
INTC_VECT(USB, 0xba0),
INTC_VECT(I2C0, 0xcc0), INTC_VECT(I2C1, 0xce0),
INTC_VECT(DU, 0xd00),
INTC_VECT(SSI0, 0xd20), INTC_VECT(SSI1, 0xd40),
INTC_VECT(SSI2, 0xd60), INTC_VECT(SSI3, 0xd80),
INTC_VECT(PCIeC2_0, 0xda0), INTC_VECT(PCIeC2_1, 0xdc0),
INTC_VECT(PCIeC2_2, 0xde0),
INTC_VECT(HAC0, 0xe00), INTC_VECT(HAC1, 0xe20),
INTC_VECT(FLCTL, 0xe40),
INTC_VECT(HSPI, 0xe80),
INTC_VECT(GPIO0, 0xea0), INTC_VECT(GPIO1, 0xec0),
INTC_VECT(Thermal, 0xee0),
INTC_VECT(INTICI0, 0xf00), INTC_VECT(INTICI1, 0xf20),
INTC_VECT(INTICI2, 0xf40), INTC_VECT(INTICI3, 0xf60),
INTC_VECT(INTICI4, 0xf80), INTC_VECT(INTICI5, 0xfa0),
INTC_VECT(INTICI6, 0xfc0), INTC_VECT(INTICI7, 0xfe0),
};
#define CnINTMSK0 0xfe410030
#define CnINTMSK1 0xfe410040
#define CnINTMSKCLR0 0xfe410050
#define CnINTMSKCLR1 0xfe410060
#define CnINT2MSKR0 0xfe410a20
#define CnINT2MSKR1 0xfe410a24
#define CnINT2MSKR2 0xfe410a28
#define CnINT2MSKR3 0xfe410a2c
#define CnINT2MSKCR0 0xfe410a30
#define CnINT2MSKCR1 0xfe410a34
#define CnINT2MSKCR2 0xfe410a38
#define CnINT2MSKCR3 0xfe410a3c
#define INTMSK2 0xfe410068
#define INTMSKCLR2 0xfe41006c
#define INTDISTCR0 0xfe4100b0
#define INTDISTCR1 0xfe4100b4
#define INT2DISTCR0 0xfe410900
#define INT2DISTCR1 0xfe410904
#define INT2DISTCR2 0xfe410908
#define INT2DISTCR3 0xfe41090c
static struct intc_mask_reg sh7786_mask_registers[] __initdata = {
{ CnINTMSK0, CnINTMSKCLR0, 32,
{ IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 },
INTC_SMP_BALANCING(INTDISTCR0) },
{ INTMSK2, INTMSKCLR2, 32,
{ IRL0_LLLL, IRL0_LLLH, IRL0_LLHL, IRL0_LLHH,
IRL0_LHLL, IRL0_LHLH, IRL0_LHHL, IRL0_LHHH,
IRL0_HLLL, IRL0_HLLH, IRL0_HLHL, IRL0_HLHH,
IRL0_HHLL, IRL0_HHLH, IRL0_HHHL, 0,
IRL4_LLLL, IRL4_LLLH, IRL4_LLHL, IRL4_LLHH,
IRL4_LHLL, IRL4_LHLH, IRL4_LHHL, IRL4_LHHH,
IRL4_HLLL, IRL4_HLLH, IRL4_HLHL, IRL4_HLHH,
IRL4_HHLL, IRL4_HHLH, IRL4_HHHL, 0, } },
{ CnINT2MSKR0, CnINT2MSKCR0 , 32,
{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, WDT },
INTC_SMP_BALANCING(INT2DISTCR0) },
{ CnINT2MSKR1, CnINT2MSKCR1, 32,
{ TMU0_0, TMU0_1, TMU0_2, TMU0_3, TMU1_0, TMU1_1, TMU1_2, 0,
DMAC0_0, DMAC0_1, DMAC0_2, DMAC0_3, DMAC0_4, DMAC0_5, DMAC0_6,
HUDI1, HUDI0,
DMAC1_0, DMAC1_1, DMAC1_2, DMAC1_3,
HPB_0, HPB_1, HPB_2,
SCIF0_0, SCIF0_1, SCIF0_2, SCIF0_3,
SCIF1,
TMU2, TMU3, 0, }, INTC_SMP_BALANCING(INT2DISTCR1) },
{ CnINT2MSKR2, CnINT2MSKCR2, 32,
{ 0, 0, SCIF2, SCIF3, SCIF4, SCIF5,
Eth_0, Eth_1,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
PCIeC0_0, PCIeC0_1, PCIeC0_2,
PCIeC1_0, PCIeC1_1, PCIeC1_2,
USB, 0, 0 }, INTC_SMP_BALANCING(INT2DISTCR2) },
{ CnINT2MSKR3, CnINT2MSKCR3, 32,
{ 0, 0, 0, 0, 0, 0,
I2C0, I2C1,
DU, SSI0, SSI1, SSI2, SSI3,
PCIeC2_0, PCIeC2_1, PCIeC2_2,
HAC0, HAC1,
FLCTL, 0,
HSPI, GPIO0, GPIO1, Thermal,
0, 0, 0, 0, 0, 0, 0, 0 }, INTC_SMP_BALANCING(INT2DISTCR3) },
};
static struct intc_prio_reg sh7786_prio_registers[] __initdata = {
{ 0xfe410010, 0, 32, 4, /* INTPRI */ { IRQ0, IRQ1, IRQ2, IRQ3,
IRQ4, IRQ5, IRQ6, IRQ7 } },
{ 0xfe410800, 0, 32, 8, /* INT2PRI0 */ { 0, 0, 0, WDT } },
{ 0xfe410804, 0, 32, 8, /* INT2PRI1 */ { TMU0_0, TMU0_1,
TMU0_2, TMU0_3 } },
{ 0xfe410808, 0, 32, 8, /* INT2PRI2 */ { TMU1_0, TMU1_1,
TMU1_2, 0 } },
{ 0xfe41080c, 0, 32, 8, /* INT2PRI3 */ { DMAC0_0, DMAC0_1,
DMAC0_2, DMAC0_3 } },
{ 0xfe410810, 0, 32, 8, /* INT2PRI4 */ { DMAC0_4, DMAC0_5,
DMAC0_6, HUDI1 } },
{ 0xfe410814, 0, 32, 8, /* INT2PRI5 */ { HUDI0, DMAC1_0,
DMAC1_1, DMAC1_2 } },
{ 0xfe410818, 0, 32, 8, /* INT2PRI6 */ { DMAC1_3, HPB_0,
HPB_1, HPB_2 } },
{ 0xfe41081c, 0, 32, 8, /* INT2PRI7 */ { SCIF0_0, SCIF0_1,
SCIF0_2, SCIF0_3 } },
{ 0xfe410820, 0, 32, 8, /* INT2PRI8 */ { SCIF1, TMU2, TMU3, 0 } },
{ 0xfe410824, 0, 32, 8, /* INT2PRI9 */ { 0, 0, SCIF2, SCIF3 } },
{ 0xfe410828, 0, 32, 8, /* INT2PRI10 */ { SCIF4, SCIF5,
Eth_0, Eth_1 } },
{ 0xfe41082c, 0, 32, 8, /* INT2PRI11 */ { 0, 0, 0, 0 } },
{ 0xfe410830, 0, 32, 8, /* INT2PRI12 */ { 0, 0, 0, 0 } },
{ 0xfe410834, 0, 32, 8, /* INT2PRI13 */ { 0, 0, 0, 0 } },
{ 0xfe410838, 0, 32, 8, /* INT2PRI14 */ { 0, 0, 0, PCIeC0_0 } },
{ 0xfe41083c, 0, 32, 8, /* INT2PRI15 */ { PCIeC0_1, PCIeC0_2,
PCIeC1_0, PCIeC1_1 } },
{ 0xfe410840, 0, 32, 8, /* INT2PRI16 */ { PCIeC1_2, USB, 0, 0 } },
{ 0xfe410844, 0, 32, 8, /* INT2PRI17 */ { 0, 0, 0, 0 } },
{ 0xfe410848, 0, 32, 8, /* INT2PRI18 */ { 0, 0, I2C0, I2C1 } },
{ 0xfe41084c, 0, 32, 8, /* INT2PRI19 */ { DU, SSI0, SSI1, SSI2 } },
{ 0xfe410850, 0, 32, 8, /* INT2PRI20 */ { SSI3, PCIeC2_0,
PCIeC2_1, PCIeC2_2 } },
{ 0xfe410854, 0, 32, 8, /* INT2PRI21 */ { HAC0, HAC1, FLCTL, 0 } },
{ 0xfe410858, 0, 32, 8, /* INT2PRI22 */ { HSPI, GPIO0,
GPIO1, Thermal } },
{ 0xfe41085c, 0, 32, 8, /* INT2PRI23 */ { 0, 0, 0, 0 } },
{ 0xfe410860, 0, 32, 8, /* INT2PRI24 */ { 0, 0, 0, 0 } },
{ 0xfe410090, 0xfe4100a0, 32, 4, /* CnICIPRI / CnICIPRICLR */
{ INTICI7, INTICI6, INTICI5, INTICI4,
INTICI3, INTICI2, INTICI1, INTICI0 }, INTC_SMP(4, 2) },
};
static struct intc_subgroup sh7786_subgroups[] __initdata = {
{ 0xfe410c20, 32, SCIF1,
{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, TXI1, BRI1, RXI1, ERI1 } },
};
static struct intc_desc sh7786_intc_desc __initdata = {
.name = "sh7786",
.hw = {
.vectors = sh7786_vectors,
.nr_vectors = ARRAY_SIZE(sh7786_vectors),
.mask_regs = sh7786_mask_registers,
.nr_mask_regs = ARRAY_SIZE(sh7786_mask_registers),
.subgroups = sh7786_subgroups,
.nr_subgroups = ARRAY_SIZE(sh7786_subgroups),
.prio_regs = sh7786_prio_registers,
.nr_prio_regs = ARRAY_SIZE(sh7786_prio_registers),
},
};
/* Support for external interrupt pins in IRQ mode */
static struct intc_vect vectors_irq0123[] __initdata = {
INTC_VECT(IRQ0, 0x200), INTC_VECT(IRQ1, 0x240),
INTC_VECT(IRQ2, 0x280), INTC_VECT(IRQ3, 0x2c0),
};
static struct intc_vect vectors_irq4567[] __initdata = {
INTC_VECT(IRQ4, 0x300), INTC_VECT(IRQ5, 0x340),
INTC_VECT(IRQ6, 0x380), INTC_VECT(IRQ7, 0x3c0),
};
static struct intc_sense_reg sh7786_sense_registers[] __initdata = {
{ 0xfe41001c, 32, 2, /* ICR1 */ { IRQ0, IRQ1, IRQ2, IRQ3,
IRQ4, IRQ5, IRQ6, IRQ7 } },
};
static struct intc_mask_reg sh7786_ack_registers[] __initdata = {
{ 0xfe410024, 0, 32, /* INTREQ */
{ IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } },
};
static DECLARE_INTC_DESC_ACK(intc_desc_irq0123, "sh7786-irq0123",
vectors_irq0123, NULL, sh7786_mask_registers,
sh7786_prio_registers, sh7786_sense_registers,
sh7786_ack_registers);
static DECLARE_INTC_DESC_ACK(intc_desc_irq4567, "sh7786-irq4567",
vectors_irq4567, NULL, sh7786_mask_registers,
sh7786_prio_registers, sh7786_sense_registers,
sh7786_ack_registers);
/* External interrupt pins in IRL mode */
static struct intc_vect vectors_irl0123[] __initdata = {
INTC_VECT(IRL0_LLLL, 0x200), INTC_VECT(IRL0_LLLH, 0x220),
INTC_VECT(IRL0_LLHL, 0x240), INTC_VECT(IRL0_LLHH, 0x260),
INTC_VECT(IRL0_LHLL, 0x280), INTC_VECT(IRL0_LHLH, 0x2a0),
INTC_VECT(IRL0_LHHL, 0x2c0), INTC_VECT(IRL0_LHHH, 0x2e0),
INTC_VECT(IRL0_HLLL, 0x300), INTC_VECT(IRL0_HLLH, 0x320),
INTC_VECT(IRL0_HLHL, 0x340), INTC_VECT(IRL0_HLHH, 0x360),
INTC_VECT(IRL0_HHLL, 0x380), INTC_VECT(IRL0_HHLH, 0x3a0),
INTC_VECT(IRL0_HHHL, 0x3c0),
};
static struct intc_vect vectors_irl4567[] __initdata = {
INTC_VECT(IRL4_LLLL, 0x900), INTC_VECT(IRL4_LLLH, 0x920),
INTC_VECT(IRL4_LLHL, 0x940), INTC_VECT(IRL4_LLHH, 0x960),
INTC_VECT(IRL4_LHLL, 0x980), INTC_VECT(IRL4_LHLH, 0x9a0),
INTC_VECT(IRL4_LHHL, 0x9c0), INTC_VECT(IRL4_LHHH, 0x9e0),
INTC_VECT(IRL4_HLLL, 0xa00), INTC_VECT(IRL4_HLLH, 0xa20),
INTC_VECT(IRL4_HLHL, 0xa40), INTC_VECT(IRL4_HLHH, 0xa60),
INTC_VECT(IRL4_HHLL, 0xa80), INTC_VECT(IRL4_HHLH, 0xaa0),
INTC_VECT(IRL4_HHHL, 0xac0),
};
static DECLARE_INTC_DESC(intc_desc_irl0123, "sh7786-irl0123", vectors_irl0123,
NULL, sh7786_mask_registers, NULL, NULL);
static DECLARE_INTC_DESC(intc_desc_irl4567, "sh7786-irl4567", vectors_irl4567,
NULL, sh7786_mask_registers, NULL, NULL);
#define INTC_ICR0 0xfe410000
#define INTC_INTMSK0 CnINTMSK0
#define INTC_INTMSK1 CnINTMSK1
#define INTC_INTMSK2 INTMSK2
#define INTC_INTMSKCLR1 CnINTMSKCLR1
#define INTC_INTMSKCLR2 INTMSKCLR2
void __init plat_irq_setup(void)
{
/* disable IRQ3-0 + IRQ7-4 */
__raw_writel(0xff000000, INTC_INTMSK0);
/* disable IRL3-0 + IRL7-4 */
__raw_writel(0xc0000000, INTC_INTMSK1);
__raw_writel(0xfffefffe, INTC_INTMSK2);
/* select IRL mode for IRL3-0 + IRL7-4 */
__raw_writel(__raw_readl(INTC_ICR0) & ~0x00c00000, INTC_ICR0);
register_intc_controller(&sh7786_intc_desc);
}
void __init plat_irq_setup_pins(int mode)
{
switch (mode) {
case IRQ_MODE_IRQ7654:
/* select IRQ mode for IRL7-4 */
__raw_writel(__raw_readl(INTC_ICR0) | 0x00400000, INTC_ICR0);
register_intc_controller(&intc_desc_irq4567);
break;
case IRQ_MODE_IRQ3210:
/* select IRQ mode for IRL3-0 */
__raw_writel(__raw_readl(INTC_ICR0) | 0x00800000, INTC_ICR0);
register_intc_controller(&intc_desc_irq0123);
break;
case IRQ_MODE_IRL7654:
/* enable IRL7-4 but don't provide any masking */
__raw_writel(0x40000000, INTC_INTMSKCLR1);
__raw_writel(0x0000fffe, INTC_INTMSKCLR2);
break;
case IRQ_MODE_IRL3210:
/* enable IRL0-3 but don't provide any masking */
__raw_writel(0x80000000, INTC_INTMSKCLR1);
__raw_writel(0xfffe0000, INTC_INTMSKCLR2);
break;
case IRQ_MODE_IRL7654_MASK:
/* enable IRL7-4 and mask using cpu intc controller */
__raw_writel(0x40000000, INTC_INTMSKCLR1);
register_intc_controller(&intc_desc_irl4567);
break;
case IRQ_MODE_IRL3210_MASK:
/* enable IRL0-3 and mask using cpu intc controller */
__raw_writel(0x80000000, INTC_INTMSKCLR1);
register_intc_controller(&intc_desc_irl0123);
break;
default:
BUG();
}
}
void __init plat_mem_setup(void)
{
}
static int __init sh7786_devices_setup(void)
{
int ret, irq;
sh7786_usb_setup();
/*
* De-mux SCIF1 IRQs if possible
*/
irq = intc_irq_lookup(sh7786_intc_desc.name, TXI1);
if (irq > 0) {
scif1_platform_data.irqs[SCIx_TXI_IRQ] = irq;
scif1_platform_data.irqs[SCIx_ERI_IRQ] =
intc_irq_lookup(sh7786_intc_desc.name, ERI1);
scif1_platform_data.irqs[SCIx_BRI_IRQ] =
intc_irq_lookup(sh7786_intc_desc.name, BRI1);
scif1_platform_data.irqs[SCIx_RXI_IRQ] =
intc_irq_lookup(sh7786_intc_desc.name, RXI1);
}
ret = platform_add_devices(sh7786_early_devices,
ARRAY_SIZE(sh7786_early_devices));
if (unlikely(ret != 0))
return ret;
return platform_add_devices(sh7786_devices,
ARRAY_SIZE(sh7786_devices));
}
arch_initcall(sh7786_devices_setup);
void __init plat_early_device_setup(void)
{
early_platform_add_devices(sh7786_early_devices,
ARRAY_SIZE(sh7786_early_devices));
}
| gpl-2.0 |
Split-Screen/android_kernel_samsung_jf | arch/sh/boards/board-magicpanelr2.c | 4503 | 10719 | /*
* linux/arch/sh/boards/magicpanel/setup.c
*
* Copyright (C) 2007 Markus Brunner, Mark Jonas
*
* Magic Panel Release 2 board setup
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#include <linux/init.h>
#include <linux/irq.h>
#include <linux/platform_device.h>
#include <linux/delay.h>
#include <linux/gpio.h>
#include <linux/smsc911x.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
#include <linux/mtd/physmap.h>
#include <linux/mtd/map.h>
#include <mach/magicpanelr2.h>
#include <asm/heartbeat.h>
#include <cpu/sh7720.h>
#define LAN9115_READY (__raw_readl(0xA8000084UL) & 0x00000001UL)
/* Wait until reset finished. Timeout is 100ms. */
static int __init ethernet_reset_finished(void)
{
int i;
if (LAN9115_READY)
return 1;
for (i = 0; i < 10; ++i) {
mdelay(10);
if (LAN9115_READY)
return 1;
}
return 0;
}
static void __init reset_ethernet(void)
{
/* PMDR: LAN_RESET=on */
CLRBITS_OUTB(0x10, PORT_PMDR);
udelay(200);
/* PMDR: LAN_RESET=off */
SETBITS_OUTB(0x10, PORT_PMDR);
}
static void __init setup_chip_select(void)
{
/* CS2: LAN (0x08000000 - 0x0bffffff) */
/* no idle cycles, normal space, 8 bit data bus */
__raw_writel(0x36db0400, CS2BCR);
/* (SW:1.5 WR:3 HW:1.5), ext. wait */
__raw_writel(0x000003c0, CS2WCR);
/* CS4: CAN1 (0xb0000000 - 0xb3ffffff) */
/* no idle cycles, normal space, 8 bit data bus */
__raw_writel(0x00000200, CS4BCR);
/* (SW:1.5 WR:3 HW:1.5), ext. wait */
__raw_writel(0x00100981, CS4WCR);
/* CS5a: CAN2 (0xb4000000 - 0xb5ffffff) */
/* no idle cycles, normal space, 8 bit data bus */
__raw_writel(0x00000200, CS5ABCR);
/* (SW:1.5 WR:3 HW:1.5), ext. wait */
__raw_writel(0x00100981, CS5AWCR);
/* CS5b: CAN3 (0xb6000000 - 0xb7ffffff) */
/* no idle cycles, normal space, 8 bit data bus */
__raw_writel(0x00000200, CS5BBCR);
/* (SW:1.5 WR:3 HW:1.5), ext. wait */
__raw_writel(0x00100981, CS5BWCR);
/* CS6a: Rotary (0xb8000000 - 0xb9ffffff) */
/* no idle cycles, normal space, 8 bit data bus */
__raw_writel(0x00000200, CS6ABCR);
/* (SW:1.5 WR:3 HW:1.5), no ext. wait */
__raw_writel(0x001009C1, CS6AWCR);
}
static void __init setup_port_multiplexing(void)
{
/* A7 GPO(LED8); A6 GPO(LED7); A5 GPO(LED6); A4 GPO(LED5);
* A3 GPO(LED4); A2 GPO(LED3); A1 GPO(LED2); A0 GPO(LED1);
*/
__raw_writew(0x5555, PORT_PACR); /* 01 01 01 01 01 01 01 01 */
/* B7 GPO(RST4); B6 GPO(RST3); B5 GPO(RST2); B4 GPO(RST1);
* B3 GPO(PB3); B2 GPO(PB2); B1 GPO(PB1); B0 GPO(PB0);
*/
__raw_writew(0x5555, PORT_PBCR); /* 01 01 01 01 01 01 01 01 */
/* C7 GPO(PC7); C6 GPO(PC6); C5 GPO(PC5); C4 GPO(PC4);
* C3 LCD_DATA3; C2 LCD_DATA2; C1 LCD_DATA1; C0 LCD_DATA0;
*/
__raw_writew(0x5500, PORT_PCCR); /* 01 01 01 01 00 00 00 00 */
/* D7 GPO(PD7); D6 GPO(PD6); D5 GPO(PD5); D4 GPO(PD4);
* D3 GPO(PD3); D2 GPO(PD2); D1 GPO(PD1); D0 GPO(PD0);
*/
__raw_writew(0x5555, PORT_PDCR); /* 01 01 01 01 01 01 01 01 */
/* E7 (x); E6 GPI(nu); E5 GPI(nu); E4 LCD_M_DISP;
* E3 LCD_CL1; E2 LCD_CL2; E1 LCD_DON; E0 LCD_FLM;
*/
__raw_writew(0x3C00, PORT_PECR); /* 00 11 11 00 00 00 00 00 */
/* F7 (x); F6 DA1(VLCD); F5 DA0(nc); F4 AN3;
* F3 AN2(MID_AD); F2 AN1(EARTH_AD); F1 AN0(TEMP); F0 GPI+(nc);
*/
__raw_writew(0x0002, PORT_PFCR); /* 00 00 00 00 00 00 00 10 */
/* G7 (x); G6 IRQ5(TOUCH_BUSY); G5 IRQ4(TOUCH_IRQ); G4 GPI(KEY2);
* G3 GPI(KEY1); G2 GPO(LED11); G1 GPO(LED10); G0 GPO(LED9);
*/
__raw_writew(0x03D5, PORT_PGCR); /* 00 00 00 11 11 01 01 01 */
/* H7 (x); H6 /RAS(BRAS); H5 /CAS(BCAS); H4 CKE(BCKE);
* H3 GPO(EARTH_OFF); H2 GPO(EARTH_TEST); H1 USB2_PWR; H0 USB1_PWR;
*/
__raw_writew(0x0050, PORT_PHCR); /* 00 00 00 00 01 01 00 00 */
/* J7 (x); J6 AUDCK; J5 ASEBRKAK; J4 AUDATA3;
* J3 AUDATA2; J2 AUDATA1; J1 AUDATA0; J0 AUDSYNC;
*/
__raw_writew(0x0000, PORT_PJCR); /* 00 00 00 00 00 00 00 00 */
/* K7 (x); K6 (x); K5 (x); K4 (x);
* K3 PINT7(/PWR2); K2 PINT6(/PWR1); K1 PINT5(nu); K0 PINT4(FLASH_READY)
*/
__raw_writew(0x00FF, PORT_PKCR); /* 00 00 00 00 11 11 11 11 */
/* L7 TRST; L6 TMS; L5 TDO; L4 TDI;
* L3 TCK; L2 (x); L1 (x); L0 (x);
*/
__raw_writew(0x0000, PORT_PLCR); /* 00 00 00 00 00 00 00 00 */
/* M7 GPO(CURRENT_SINK); M6 GPO(PWR_SWITCH); M5 GPO(LAN_SPEED);
* M4 GPO(LAN_RESET); M3 GPO(BUZZER); M2 GPO(LCD_BL);
* M1 CS5B(CAN3_CS); M0 GPI+(nc);
*/
__raw_writew(0x5552, PORT_PMCR); /* 01 01 01 01 01 01 00 10 */
/* CURRENT_SINK=off, PWR_SWITCH=off, LAN_SPEED=100MBit,
* LAN_RESET=off, BUZZER=off, LCD_BL=off
*/
#if CONFIG_SH_MAGIC_PANEL_R2_VERSION == 2
__raw_writeb(0x30, PORT_PMDR);
#elif CONFIG_SH_MAGIC_PANEL_R2_VERSION == 3
__raw_writeb(0xF0, PORT_PMDR);
#else
#error Unknown revision of PLATFORM_MP_R2
#endif
/* P7 (x); P6 (x); P5 (x);
* P4 GPO(nu); P3 IRQ3(LAN_IRQ); P2 IRQ2(CAN3_IRQ);
* P1 IRQ1(CAN2_IRQ); P0 IRQ0(CAN1_IRQ)
*/
__raw_writew(0x0100, PORT_PPCR); /* 00 00 00 01 00 00 00 00 */
__raw_writeb(0x10, PORT_PPDR);
/* R7 A25; R6 A24; R5 A23; R4 A22;
* R3 A21; R2 A20; R1 A19; R0 A0;
*/
gpio_request(GPIO_FN_A25, NULL);
gpio_request(GPIO_FN_A24, NULL);
gpio_request(GPIO_FN_A23, NULL);
gpio_request(GPIO_FN_A22, NULL);
gpio_request(GPIO_FN_A21, NULL);
gpio_request(GPIO_FN_A20, NULL);
gpio_request(GPIO_FN_A19, NULL);
gpio_request(GPIO_FN_A0, NULL);
/* S7 (x); S6 (x); S5 (x); S4 GPO(EEPROM_CS2);
* S3 GPO(EEPROM_CS1); S2 SIOF0_TXD; S1 SIOF0_RXD; S0 SIOF0_SCK;
*/
__raw_writew(0x0140, PORT_PSCR); /* 00 00 00 01 01 00 00 00 */
/* T7 (x); T6 (x); T5 (x); T4 COM1_CTS;
* T3 COM1_RTS; T2 COM1_TXD; T1 COM1_RXD; T0 GPO(WDOG)
*/
__raw_writew(0x0001, PORT_PTCR); /* 00 00 00 00 00 00 00 01 */
/* U7 (x); U6 (x); U5 (x); U4 GPI+(/AC_FAULT);
* U3 GPO(TOUCH_CS); U2 TOUCH_TXD; U1 TOUCH_RXD; U0 TOUCH_SCK;
*/
__raw_writew(0x0240, PORT_PUCR); /* 00 00 00 10 01 00 00 00 */
/* V7 (x); V6 (x); V5 (x); V4 GPO(MID2);
* V3 GPO(MID1); V2 CARD_TxD; V1 CARD_RxD; V0 GPI+(/BAT_FAULT);
*/
__raw_writew(0x0142, PORT_PVCR); /* 00 00 00 01 01 00 00 10 */
}
static void __init mpr2_setup(char **cmdline_p)
{
/* set Pin Select Register A:
* /PCC_CD1, /PCC_CD2, PCC_BVD1, PCC_BVD2,
* /IOIS16, IRQ4, IRQ5, USB1d_SUSPEND
*/
__raw_writew(0xAABC, PORT_PSELA);
/* set Pin Select Register B:
* /SCIF0_RTS, /SCIF0_CTS, LCD_VCPWC,
* LCD_VEPWC, IIC_SDA, IIC_SCL, Reserved
*/
__raw_writew(0x3C00, PORT_PSELB);
/* set Pin Select Register C:
* SIOF1_SCK, SIOF1_RxD, SCIF1_RxD, SCIF1_TxD, Reserved
*/
__raw_writew(0x0000, PORT_PSELC);
/* set Pin Select Register D: Reserved, SIOF1_TxD, Reserved, SIOF1_MCLK,
* Reserved, SIOF1_SYNC, Reserved, SCIF1_SCK, Reserved
*/
__raw_writew(0x0000, PORT_PSELD);
/* set USB TxRx Control: Reserved, DRV, Reserved, USB_TRANS, USB_SEL */
__raw_writew(0x0101, PORT_UTRCTL);
/* set USB Clock Control: USSCS, USSTB, Reserved (HighByte always A5) */
__raw_writew(0xA5C0, PORT_UCLKCR_W);
setup_chip_select();
setup_port_multiplexing();
reset_ethernet();
printk(KERN_INFO "Magic Panel Release 2 A.%i\n",
CONFIG_SH_MAGIC_PANEL_R2_VERSION);
if (ethernet_reset_finished() == 0)
printk(KERN_WARNING "Ethernet not ready\n");
}
static struct resource smsc911x_resources[] = {
[0] = {
.start = 0xa8000000,
.end = 0xabffffff,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = 35,
.end = 35,
.flags = IORESOURCE_IRQ,
},
};
static struct smsc911x_platform_config smsc911x_config = {
.phy_interface = PHY_INTERFACE_MODE_MII,
.irq_polarity = SMSC911X_IRQ_POLARITY_ACTIVE_LOW,
.irq_type = SMSC911X_IRQ_TYPE_OPEN_DRAIN,
.flags = SMSC911X_USE_32BIT,
};
static struct platform_device smsc911x_device = {
.name = "smsc911x",
.id = -1,
.num_resources = ARRAY_SIZE(smsc911x_resources),
.resource = smsc911x_resources,
.dev = {
.platform_data = &smsc911x_config,
},
};
static struct resource heartbeat_resources[] = {
[0] = {
.start = PA_LED,
.end = PA_LED,
.flags = IORESOURCE_MEM,
},
};
static struct heartbeat_data heartbeat_data = {
.flags = HEARTBEAT_INVERTED,
};
static struct platform_device heartbeat_device = {
.name = "heartbeat",
.id = -1,
.dev = {
.platform_data = &heartbeat_data,
},
.num_resources = ARRAY_SIZE(heartbeat_resources),
.resource = heartbeat_resources,
};
static struct mtd_partition mpr2_partitions[] = {
/* Reserved for bootloader, read-only */
{
.name = "Bootloader",
.offset = 0x00000000UL,
.size = MPR2_MTD_BOOTLOADER_SIZE,
.mask_flags = MTD_WRITEABLE,
},
/* Reserved for kernel image */
{
.name = "Kernel",
.offset = MTDPART_OFS_NXTBLK,
.size = MPR2_MTD_KERNEL_SIZE,
},
/* Rest is used for Flash FS */
{
.name = "Flash_FS",
.offset = MTDPART_OFS_NXTBLK,
.size = MTDPART_SIZ_FULL,
}
};
static struct physmap_flash_data flash_data = {
.parts = mpr2_partitions,
.nr_parts = ARRAY_SIZE(mpr2_partitions),
.width = 2,
};
static struct resource flash_resource = {
.start = 0x00000000,
.end = 0x2000000UL,
.flags = IORESOURCE_MEM,
};
static struct platform_device flash_device = {
.name = "physmap-flash",
.id = -1,
.resource = &flash_resource,
.num_resources = 1,
.dev = {
.platform_data = &flash_data,
},
};
/*
* Add all resources to the platform_device
*/
static struct platform_device *mpr2_devices[] __initdata = {
&heartbeat_device,
&smsc911x_device,
&flash_device,
};
static int __init mpr2_devices_setup(void)
{
return platform_add_devices(mpr2_devices, ARRAY_SIZE(mpr2_devices));
}
device_initcall(mpr2_devices_setup);
/*
* Initialize IRQ setting
*/
static void __init init_mpr2_IRQ(void)
{
plat_irq_setup_pins(IRQ_MODE_IRQ); /* install handlers for IRQ0-5 */
irq_set_irq_type(32, IRQ_TYPE_LEVEL_LOW); /* IRQ0 CAN1 */
irq_set_irq_type(33, IRQ_TYPE_LEVEL_LOW); /* IRQ1 CAN2 */
irq_set_irq_type(34, IRQ_TYPE_LEVEL_LOW); /* IRQ2 CAN3 */
irq_set_irq_type(35, IRQ_TYPE_LEVEL_LOW); /* IRQ3 SMSC9115 */
irq_set_irq_type(36, IRQ_TYPE_EDGE_RISING); /* IRQ4 touchscreen */
irq_set_irq_type(37, IRQ_TYPE_EDGE_FALLING); /* IRQ5 touchscreen */
intc_set_priority(32, 13); /* IRQ0 CAN1 */
intc_set_priority(33, 13); /* IRQ0 CAN2 */
intc_set_priority(34, 13); /* IRQ0 CAN3 */
intc_set_priority(35, 6); /* IRQ3 SMSC9115 */
}
/*
* The Machine Vector
*/
static struct sh_machine_vector mv_mpr2 __initmv = {
.mv_name = "mpr2",
.mv_setup = mpr2_setup,
.mv_init_irq = init_mpr2_IRQ,
};
| gpl-2.0 |
FrozenCow/msm | arch/arm/mach-tegra/pinmux-tegra20-tables.c | 4759 | 17127 | /*
* linux/arch/arm/mach-tegra/pinmux-tegra20-tables.c
*
* Common pinmux configurations for Tegra20 SoCs
*
* Copyright (C) 2010 NVIDIA Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/spinlock.h>
#include <linux/io.h>
#include <linux/init.h>
#include <linux/string.h>
#include <mach/iomap.h>
#include <mach/pinmux.h>
#include <mach/pinmux-tegra20.h>
#include <mach/suspend.h>
#define TRISTATE_REG_A 0x14
#define PIN_MUX_CTL_REG_A 0x80
#define PULLUPDOWN_REG_A 0xa0
#define PINGROUP_REG_A 0x868
#define DRIVE_PINGROUP(pg_name, r) \
[TEGRA_DRIVE_PINGROUP_ ## pg_name] = { \
.name = #pg_name, \
.reg_bank = 3, \
.reg = ((r) - PINGROUP_REG_A) \
}
static const struct tegra_drive_pingroup_desc tegra_soc_drive_pingroups[TEGRA_MAX_DRIVE_PINGROUP] = {
DRIVE_PINGROUP(AO1, 0x868),
DRIVE_PINGROUP(AO2, 0x86c),
DRIVE_PINGROUP(AT1, 0x870),
DRIVE_PINGROUP(AT2, 0x874),
DRIVE_PINGROUP(CDEV1, 0x878),
DRIVE_PINGROUP(CDEV2, 0x87c),
DRIVE_PINGROUP(CSUS, 0x880),
DRIVE_PINGROUP(DAP1, 0x884),
DRIVE_PINGROUP(DAP2, 0x888),
DRIVE_PINGROUP(DAP3, 0x88c),
DRIVE_PINGROUP(DAP4, 0x890),
DRIVE_PINGROUP(DBG, 0x894),
DRIVE_PINGROUP(LCD1, 0x898),
DRIVE_PINGROUP(LCD2, 0x89c),
DRIVE_PINGROUP(SDMMC2, 0x8a0),
DRIVE_PINGROUP(SDMMC3, 0x8a4),
DRIVE_PINGROUP(SPI, 0x8a8),
DRIVE_PINGROUP(UAA, 0x8ac),
DRIVE_PINGROUP(UAB, 0x8b0),
DRIVE_PINGROUP(UART2, 0x8b4),
DRIVE_PINGROUP(UART3, 0x8b8),
DRIVE_PINGROUP(VI1, 0x8bc),
DRIVE_PINGROUP(VI2, 0x8c0),
DRIVE_PINGROUP(XM2A, 0x8c4),
DRIVE_PINGROUP(XM2C, 0x8c8),
DRIVE_PINGROUP(XM2D, 0x8cc),
DRIVE_PINGROUP(XM2CLK, 0x8d0),
DRIVE_PINGROUP(MEMCOMP, 0x8d4),
DRIVE_PINGROUP(SDIO1, 0x8e0),
DRIVE_PINGROUP(CRT, 0x8ec),
DRIVE_PINGROUP(DDC, 0x8f0),
DRIVE_PINGROUP(GMA, 0x8f4),
DRIVE_PINGROUP(GMB, 0x8f8),
DRIVE_PINGROUP(GMC, 0x8fc),
DRIVE_PINGROUP(GMD, 0x900),
DRIVE_PINGROUP(GME, 0x904),
DRIVE_PINGROUP(OWR, 0x908),
DRIVE_PINGROUP(UAD, 0x90c),
};
#define PINGROUP(pg_name, vdd, f0, f1, f2, f3, f_safe, \
tri_r, tri_b, mux_r, mux_b, pupd_r, pupd_b) \
[TEGRA_PINGROUP_ ## pg_name] = { \
.name = #pg_name, \
.vddio = TEGRA_VDDIO_ ## vdd, \
.funcs = { \
TEGRA_MUX_ ## f0, \
TEGRA_MUX_ ## f1, \
TEGRA_MUX_ ## f2, \
TEGRA_MUX_ ## f3, \
}, \
.func_safe = TEGRA_MUX_ ## f_safe, \
.tri_bank = 0, \
.tri_reg = ((tri_r) - TRISTATE_REG_A), \
.tri_bit = tri_b, \
.mux_bank = 1, \
.mux_reg = ((mux_r) - PIN_MUX_CTL_REG_A), \
.mux_bit = mux_b, \
.pupd_bank = 2, \
.pupd_reg = ((pupd_r) - PULLUPDOWN_REG_A), \
.pupd_bit = pupd_b, \
.lock_bit = -1, \
.od_bit = -1, \
.ioreset_bit = -1, \
.io_default = -1, \
}
static const struct tegra_pingroup_desc tegra_soc_pingroups[TEGRA_MAX_PINGROUP] = {
PINGROUP(ATA, NAND, IDE, NAND, GMI, RSVD, IDE, 0x14, 0, 0x80, 24, 0xA0, 0),
PINGROUP(ATB, NAND, IDE, NAND, GMI, SDIO4, IDE, 0x14, 1, 0x80, 16, 0xA0, 2),
PINGROUP(ATC, NAND, IDE, NAND, GMI, SDIO4, IDE, 0x14, 2, 0x80, 22, 0xA0, 4),
PINGROUP(ATD, NAND, IDE, NAND, GMI, SDIO4, IDE, 0x14, 3, 0x80, 20, 0xA0, 6),
PINGROUP(ATE, NAND, IDE, NAND, GMI, RSVD, IDE, 0x18, 25, 0x80, 12, 0xA0, 8),
PINGROUP(CDEV1, AUDIO, OSC, PLLA_OUT, PLLM_OUT1, AUDIO_SYNC, OSC, 0x14, 4, 0x88, 2, 0xA8, 0),
PINGROUP(CDEV2, AUDIO, OSC, AHB_CLK, APB_CLK, PLLP_OUT4, OSC, 0x14, 5, 0x88, 4, 0xA8, 2),
PINGROUP(CRTP, LCD, CRT, RSVD, RSVD, RSVD, RSVD, 0x20, 14, 0x98, 20, 0xA4, 24),
PINGROUP(CSUS, VI, PLLC_OUT1, PLLP_OUT2, PLLP_OUT3, VI_SENSOR_CLK, PLLC_OUT1, 0x14, 6, 0x88, 6, 0xAC, 24),
PINGROUP(DAP1, AUDIO, DAP1, RSVD, GMI, SDIO2, DAP1, 0x14, 7, 0x88, 20, 0xA0, 10),
PINGROUP(DAP2, AUDIO, DAP2, TWC, RSVD, GMI, DAP2, 0x14, 8, 0x88, 22, 0xA0, 12),
PINGROUP(DAP3, BB, DAP3, RSVD, RSVD, RSVD, DAP3, 0x14, 9, 0x88, 24, 0xA0, 14),
PINGROUP(DAP4, UART, DAP4, RSVD, GMI, RSVD, DAP4, 0x14, 10, 0x88, 26, 0xA0, 16),
PINGROUP(DDC, LCD, I2C2, RSVD, RSVD, RSVD, RSVD4, 0x18, 31, 0x88, 0, 0xB0, 28),
PINGROUP(DTA, VI, RSVD, SDIO2, VI, RSVD, RSVD4, 0x14, 11, 0x84, 20, 0xA0, 18),
PINGROUP(DTB, VI, RSVD, RSVD, VI, SPI1, RSVD1, 0x14, 12, 0x84, 22, 0xA0, 20),
PINGROUP(DTC, VI, RSVD, RSVD, VI, RSVD, RSVD1, 0x14, 13, 0x84, 26, 0xA0, 22),
PINGROUP(DTD, VI, RSVD, SDIO2, VI, RSVD, RSVD1, 0x14, 14, 0x84, 28, 0xA0, 24),
PINGROUP(DTE, VI, RSVD, RSVD, VI, SPI1, RSVD1, 0x14, 15, 0x84, 30, 0xA0, 26),
PINGROUP(DTF, VI, I2C3, RSVD, VI, RSVD, RSVD4, 0x20, 12, 0x98, 30, 0xA0, 28),
PINGROUP(GMA, NAND, UARTE, SPI3, GMI, SDIO4, SPI3, 0x14, 28, 0x84, 0, 0xB0, 20),
PINGROUP(GMB, NAND, IDE, NAND, GMI, GMI_INT, GMI, 0x18, 29, 0x88, 28, 0xB0, 22),
PINGROUP(GMC, NAND, UARTD, SPI4, GMI, SFLASH, SPI4, 0x14, 29, 0x84, 2, 0xB0, 24),
PINGROUP(GMD, NAND, RSVD, NAND, GMI, SFLASH, GMI, 0x18, 30, 0x88, 30, 0xB0, 26),
PINGROUP(GME, NAND, RSVD, DAP5, GMI, SDIO4, GMI, 0x18, 0, 0x8C, 0, 0xA8, 24),
PINGROUP(GPU, UART, PWM, UARTA, GMI, RSVD, RSVD4, 0x14, 16, 0x8C, 4, 0xA4, 20),
PINGROUP(GPU7, SYS, RTCK, RSVD, RSVD, RSVD, RTCK, 0x20, 11, 0x98, 28, 0xA4, 6),
PINGROUP(GPV, SD, PCIE, RSVD, RSVD, RSVD, PCIE, 0x14, 17, 0x8C, 2, 0xA0, 30),
PINGROUP(HDINT, LCD, HDMI, RSVD, RSVD, RSVD, HDMI, 0x1C, 23, 0x84, 4, 0xAC, 22),
PINGROUP(I2CP, SYS, I2C, RSVD, RSVD, RSVD, RSVD4, 0x14, 18, 0x88, 8, 0xA4, 2),
PINGROUP(IRRX, UART, UARTA, UARTB, GMI, SPI4, UARTB, 0x14, 20, 0x88, 18, 0xA8, 22),
PINGROUP(IRTX, UART, UARTA, UARTB, GMI, SPI4, UARTB, 0x14, 19, 0x88, 16, 0xA8, 20),
PINGROUP(KBCA, SYS, KBC, NAND, SDIO2, EMC_TEST0_DLL, KBC, 0x14, 22, 0x88, 10, 0xA4, 8),
PINGROUP(KBCB, SYS, KBC, NAND, SDIO2, MIO, KBC, 0x14, 21, 0x88, 12, 0xA4, 10),
PINGROUP(KBCC, SYS, KBC, NAND, TRACE, EMC_TEST1_DLL, KBC, 0x18, 26, 0x88, 14, 0xA4, 12),
PINGROUP(KBCD, SYS, KBC, NAND, SDIO2, MIO, KBC, 0x20, 10, 0x98, 26, 0xA4, 14),
PINGROUP(KBCE, SYS, KBC, NAND, OWR, RSVD, KBC, 0x14, 26, 0x80, 28, 0xB0, 2),
PINGROUP(KBCF, SYS, KBC, NAND, TRACE, MIO, KBC, 0x14, 27, 0x80, 26, 0xB0, 0),
PINGROUP(LCSN, LCD, DISPLAYA, DISPLAYB, SPI3, RSVD, RSVD4, 0x1C, 31, 0x90, 12, 0xAC, 20),
PINGROUP(LD0, LCD, DISPLAYA, DISPLAYB, XIO, RSVD, RSVD4, 0x1C, 0, 0x94, 0, 0xAC, 12),
PINGROUP(LD1, LCD, DISPLAYA, DISPLAYB, XIO, RSVD, RSVD4, 0x1C, 1, 0x94, 2, 0xAC, 12),
PINGROUP(LD10, LCD, DISPLAYA, DISPLAYB, XIO, RSVD, RSVD4, 0x1C, 10, 0x94, 20, 0xAC, 12),
PINGROUP(LD11, LCD, DISPLAYA, DISPLAYB, XIO, RSVD, RSVD4, 0x1C, 11, 0x94, 22, 0xAC, 12),
PINGROUP(LD12, LCD, DISPLAYA, DISPLAYB, XIO, RSVD, RSVD4, 0x1C, 12, 0x94, 24, 0xAC, 12),
PINGROUP(LD13, LCD, DISPLAYA, DISPLAYB, XIO, RSVD, RSVD4, 0x1C, 13, 0x94, 26, 0xAC, 12),
PINGROUP(LD14, LCD, DISPLAYA, DISPLAYB, XIO, RSVD, RSVD4, 0x1C, 14, 0x94, 28, 0xAC, 12),
PINGROUP(LD15, LCD, DISPLAYA, DISPLAYB, XIO, RSVD, RSVD4, 0x1C, 15, 0x94, 30, 0xAC, 12),
PINGROUP(LD16, LCD, DISPLAYA, DISPLAYB, XIO, RSVD, RSVD4, 0x1C, 16, 0x98, 0, 0xAC, 12),
PINGROUP(LD17, LCD, DISPLAYA, DISPLAYB, RSVD, RSVD, RSVD4, 0x1C, 17, 0x98, 2, 0xAC, 12),
PINGROUP(LD2, LCD, DISPLAYA, DISPLAYB, XIO, RSVD, RSVD4, 0x1C, 2, 0x94, 4, 0xAC, 12),
PINGROUP(LD3, LCD, DISPLAYA, DISPLAYB, XIO, RSVD, RSVD4, 0x1C, 3, 0x94, 6, 0xAC, 12),
PINGROUP(LD4, LCD, DISPLAYA, DISPLAYB, XIO, RSVD, RSVD4, 0x1C, 4, 0x94, 8, 0xAC, 12),
PINGROUP(LD5, LCD, DISPLAYA, DISPLAYB, XIO, RSVD, RSVD4, 0x1C, 5, 0x94, 10, 0xAC, 12),
PINGROUP(LD6, LCD, DISPLAYA, DISPLAYB, XIO, RSVD, RSVD4, 0x1C, 6, 0x94, 12, 0xAC, 12),
PINGROUP(LD7, LCD, DISPLAYA, DISPLAYB, XIO, RSVD, RSVD4, 0x1C, 7, 0x94, 14, 0xAC, 12),
PINGROUP(LD8, LCD, DISPLAYA, DISPLAYB, XIO, RSVD, RSVD4, 0x1C, 8, 0x94, 16, 0xAC, 12),
PINGROUP(LD9, LCD, DISPLAYA, DISPLAYB, XIO, RSVD, RSVD4, 0x1C, 9, 0x94, 18, 0xAC, 12),
PINGROUP(LDC, LCD, DISPLAYA, DISPLAYB, RSVD, RSVD, RSVD4, 0x1C, 30, 0x90, 14, 0xAC, 20),
PINGROUP(LDI, LCD, DISPLAYA, DISPLAYB, RSVD, RSVD, RSVD4, 0x20, 6, 0x98, 16, 0xAC, 18),
PINGROUP(LHP0, LCD, DISPLAYA, DISPLAYB, RSVD, RSVD, RSVD4, 0x1C, 18, 0x98, 10, 0xAC, 16),
PINGROUP(LHP1, LCD, DISPLAYA, DISPLAYB, RSVD, RSVD, RSVD4, 0x1C, 19, 0x98, 4, 0xAC, 14),
PINGROUP(LHP2, LCD, DISPLAYA, DISPLAYB, RSVD, RSVD, RSVD4, 0x1C, 20, 0x98, 6, 0xAC, 14),
PINGROUP(LHS, LCD, DISPLAYA, DISPLAYB, XIO, RSVD, RSVD4, 0x20, 7, 0x90, 22, 0xAC, 22),
PINGROUP(LM0, LCD, DISPLAYA, DISPLAYB, SPI3, RSVD, RSVD4, 0x1C, 24, 0x90, 26, 0xAC, 22),
PINGROUP(LM1, LCD, DISPLAYA, DISPLAYB, RSVD, CRT, RSVD3, 0x1C, 25, 0x90, 28, 0xAC, 22),
PINGROUP(LPP, LCD, DISPLAYA, DISPLAYB, RSVD, RSVD, RSVD4, 0x20, 8, 0x98, 14, 0xAC, 18),
PINGROUP(LPW0, LCD, DISPLAYA, DISPLAYB, SPI3, HDMI, DISPLAYA, 0x20, 3, 0x90, 0, 0xAC, 20),
PINGROUP(LPW1, LCD, DISPLAYA, DISPLAYB, RSVD, RSVD, RSVD4, 0x20, 4, 0x90, 2, 0xAC, 20),
PINGROUP(LPW2, LCD, DISPLAYA, DISPLAYB, SPI3, HDMI, DISPLAYA, 0x20, 5, 0x90, 4, 0xAC, 20),
PINGROUP(LSC0, LCD, DISPLAYA, DISPLAYB, XIO, RSVD, RSVD4, 0x1C, 27, 0x90, 18, 0xAC, 22),
PINGROUP(LSC1, LCD, DISPLAYA, DISPLAYB, SPI3, HDMI, DISPLAYA, 0x1C, 28, 0x90, 20, 0xAC, 20),
PINGROUP(LSCK, LCD, DISPLAYA, DISPLAYB, SPI3, HDMI, DISPLAYA, 0x1C, 29, 0x90, 16, 0xAC, 20),
PINGROUP(LSDA, LCD, DISPLAYA, DISPLAYB, SPI3, HDMI, DISPLAYA, 0x20, 1, 0x90, 8, 0xAC, 20),
PINGROUP(LSDI, LCD, DISPLAYA, DISPLAYB, SPI3, RSVD, DISPLAYA, 0x20, 2, 0x90, 6, 0xAC, 20),
PINGROUP(LSPI, LCD, DISPLAYA, DISPLAYB, XIO, HDMI, DISPLAYA, 0x20, 0, 0x90, 10, 0xAC, 22),
PINGROUP(LVP0, LCD, DISPLAYA, DISPLAYB, RSVD, RSVD, RSVD4, 0x1C, 21, 0x90, 30, 0xAC, 22),
PINGROUP(LVP1, LCD, DISPLAYA, DISPLAYB, RSVD, RSVD, RSVD4, 0x1C, 22, 0x98, 8, 0xAC, 16),
PINGROUP(LVS, LCD, DISPLAYA, DISPLAYB, XIO, RSVD, RSVD4, 0x1C, 26, 0x90, 24, 0xAC, 22),
PINGROUP(OWC, SYS, OWR, RSVD, RSVD, RSVD, OWR, 0x14, 31, 0x84, 8, 0xB0, 30),
PINGROUP(PMC, SYS, PWR_ON, PWR_INTR, RSVD, RSVD, PWR_ON, 0x14, 23, 0x98, 18, -1, -1),
PINGROUP(PTA, NAND, I2C2, HDMI, GMI, RSVD, RSVD4, 0x14, 24, 0x98, 22, 0xA4, 4),
PINGROUP(RM, UART, I2C, RSVD, RSVD, RSVD, RSVD4, 0x14, 25, 0x80, 14, 0xA4, 0),
PINGROUP(SDB, SD, UARTA, PWM, SDIO3, SPI2, PWM, 0x20, 15, 0x8C, 10, -1, -1),
PINGROUP(SDC, SD, PWM, TWC, SDIO3, SPI3, TWC, 0x18, 1, 0x8C, 12, 0xAC, 28),
PINGROUP(SDD, SD, UARTA, PWM, SDIO3, SPI3, PWM, 0x18, 2, 0x8C, 14, 0xAC, 30),
PINGROUP(SDIO1, BB, SDIO1, RSVD, UARTE, UARTA, RSVD2, 0x14, 30, 0x80, 30, 0xB0, 18),
PINGROUP(SLXA, SD, PCIE, SPI4, SDIO3, SPI2, PCIE, 0x18, 3, 0x84, 6, 0xA4, 22),
PINGROUP(SLXC, SD, SPDIF, SPI4, SDIO3, SPI2, SPI4, 0x18, 5, 0x84, 10, 0xA4, 26),
PINGROUP(SLXD, SD, SPDIF, SPI4, SDIO3, SPI2, SPI4, 0x18, 6, 0x84, 12, 0xA4, 28),
PINGROUP(SLXK, SD, PCIE, SPI4, SDIO3, SPI2, PCIE, 0x18, 7, 0x84, 14, 0xA4, 30),
PINGROUP(SPDI, AUDIO, SPDIF, RSVD, I2C, SDIO2, RSVD2, 0x18, 8, 0x8C, 8, 0xA4, 16),
PINGROUP(SPDO, AUDIO, SPDIF, RSVD, I2C, SDIO2, RSVD2, 0x18, 9, 0x8C, 6, 0xA4, 18),
PINGROUP(SPIA, AUDIO, SPI1, SPI2, SPI3, GMI, GMI, 0x18, 10, 0x8C, 30, 0xA8, 4),
PINGROUP(SPIB, AUDIO, SPI1, SPI2, SPI3, GMI, GMI, 0x18, 11, 0x8C, 28, 0xA8, 6),
PINGROUP(SPIC, AUDIO, SPI1, SPI2, SPI3, GMI, GMI, 0x18, 12, 0x8C, 26, 0xA8, 8),
PINGROUP(SPID, AUDIO, SPI2, SPI1, SPI2_ALT, GMI, GMI, 0x18, 13, 0x8C, 24, 0xA8, 10),
PINGROUP(SPIE, AUDIO, SPI2, SPI1, SPI2_ALT, GMI, GMI, 0x18, 14, 0x8C, 22, 0xA8, 12),
PINGROUP(SPIF, AUDIO, SPI3, SPI1, SPI2, RSVD, RSVD4, 0x18, 15, 0x8C, 20, 0xA8, 14),
PINGROUP(SPIG, AUDIO, SPI3, SPI2, SPI2_ALT, I2C, SPI2_ALT, 0x18, 16, 0x8C, 18, 0xA8, 16),
PINGROUP(SPIH, AUDIO, SPI3, SPI2, SPI2_ALT, I2C, SPI2_ALT, 0x18, 17, 0x8C, 16, 0xA8, 18),
PINGROUP(UAA, BB, SPI3, MIPI_HS, UARTA, ULPI, MIPI_HS, 0x18, 18, 0x80, 0, 0xAC, 0),
PINGROUP(UAB, BB, SPI2, MIPI_HS, UARTA, ULPI, MIPI_HS, 0x18, 19, 0x80, 2, 0xAC, 2),
PINGROUP(UAC, BB, OWR, RSVD, RSVD, RSVD, RSVD4, 0x18, 20, 0x80, 4, 0xAC, 4),
PINGROUP(UAD, UART, IRDA, SPDIF, UARTA, SPI4, SPDIF, 0x18, 21, 0x80, 6, 0xAC, 6),
PINGROUP(UCA, UART, UARTC, RSVD, GMI, RSVD, RSVD4, 0x18, 22, 0x84, 16, 0xAC, 8),
PINGROUP(UCB, UART, UARTC, PWM, GMI, RSVD, RSVD4, 0x18, 23, 0x84, 18, 0xAC, 10),
PINGROUP(UDA, BB, SPI1, RSVD, UARTD, ULPI, RSVD2, 0x20, 13, 0x80, 8, 0xB0, 16),
/* these pin groups only have pullup and pull down control */
PINGROUP(CK32, SYS, RSVD, RSVD, RSVD, RSVD, RSVD, -1, -1, -1, -1, 0xB0, 14),
PINGROUP(DDRC, DDR, RSVD, RSVD, RSVD, RSVD, RSVD, -1, -1, -1, -1, 0xAC, 26),
PINGROUP(PMCA, SYS, RSVD, RSVD, RSVD, RSVD, RSVD, -1, -1, -1, -1, 0xB0, 4),
PINGROUP(PMCB, SYS, RSVD, RSVD, RSVD, RSVD, RSVD, -1, -1, -1, -1, 0xB0, 6),
PINGROUP(PMCC, SYS, RSVD, RSVD, RSVD, RSVD, RSVD, -1, -1, -1, -1, 0xB0, 8),
PINGROUP(PMCD, SYS, RSVD, RSVD, RSVD, RSVD, RSVD, -1, -1, -1, -1, 0xB0, 10),
PINGROUP(PMCE, SYS, RSVD, RSVD, RSVD, RSVD, RSVD, -1, -1, -1, -1, 0xB0, 12),
PINGROUP(XM2C, DDR, RSVD, RSVD, RSVD, RSVD, RSVD, -1, -1, -1, -1, 0xA8, 30),
PINGROUP(XM2D, DDR, RSVD, RSVD, RSVD, RSVD, RSVD, -1, -1, -1, -1, 0xA8, 28),
};
void __devinit tegra20_pinmux_init(const struct tegra_pingroup_desc **pg,
int *pg_max, const struct tegra_drive_pingroup_desc **pgdrive,
int *pgdrive_max)
{
*pg = tegra_soc_pingroups;
*pg_max = TEGRA_MAX_PINGROUP;
*pgdrive = tegra_soc_drive_pingroups;
*pgdrive_max = TEGRA_MAX_DRIVE_PINGROUP;
}
| gpl-2.0 |
PyYoshi/android_kernel_sharp_303sh | arch/sparc/kernel/auxio_64.c | 7575 | 3195 | /* auxio.c: Probing for the Sparc AUXIO register at boot time.
*
* Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
*
* Refactoring for unified NCR/PCIO support 2002 Eric Brower (ebrower@usa.net)
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/ioport.h>
#include <linux/of_device.h>
#include <asm/prom.h>
#include <asm/io.h>
#include <asm/auxio.h>
void __iomem *auxio_register = NULL;
EXPORT_SYMBOL(auxio_register);
enum auxio_type {
AUXIO_TYPE_NODEV,
AUXIO_TYPE_SBUS,
AUXIO_TYPE_EBUS
};
static enum auxio_type auxio_devtype = AUXIO_TYPE_NODEV;
static DEFINE_SPINLOCK(auxio_lock);
static void __auxio_rmw(u8 bits_on, u8 bits_off, int ebus)
{
if (auxio_register) {
unsigned long flags;
u8 regval, newval;
spin_lock_irqsave(&auxio_lock, flags);
regval = (ebus ?
(u8) readl(auxio_register) :
sbus_readb(auxio_register));
newval = regval | bits_on;
newval &= ~bits_off;
if (!ebus)
newval &= ~AUXIO_AUX1_MASK;
if (ebus)
writel((u32) newval, auxio_register);
else
sbus_writeb(newval, auxio_register);
spin_unlock_irqrestore(&auxio_lock, flags);
}
}
static void __auxio_set_bit(u8 bit, int on, int ebus)
{
u8 bits_on = (ebus ? AUXIO_PCIO_LED : AUXIO_AUX1_LED);
u8 bits_off = 0;
if (!on) {
u8 tmp = bits_off;
bits_off = bits_on;
bits_on = tmp;
}
__auxio_rmw(bits_on, bits_off, ebus);
}
void auxio_set_led(int on)
{
int ebus = auxio_devtype == AUXIO_TYPE_EBUS;
u8 bit;
bit = (ebus ? AUXIO_PCIO_LED : AUXIO_AUX1_LED);
__auxio_set_bit(bit, on, ebus);
}
EXPORT_SYMBOL(auxio_set_led);
static void __auxio_sbus_set_lte(int on)
{
__auxio_set_bit(AUXIO_AUX1_LTE, on, 0);
}
void auxio_set_lte(int on)
{
switch(auxio_devtype) {
case AUXIO_TYPE_SBUS:
__auxio_sbus_set_lte(on);
break;
case AUXIO_TYPE_EBUS:
/* FALL-THROUGH */
default:
break;
}
}
EXPORT_SYMBOL(auxio_set_lte);
static const struct of_device_id auxio_match[] = {
{
.name = "auxio",
},
{},
};
MODULE_DEVICE_TABLE(of, auxio_match);
static int __devinit auxio_probe(struct platform_device *dev)
{
struct device_node *dp = dev->dev.of_node;
unsigned long size;
if (!strcmp(dp->parent->name, "ebus")) {
auxio_devtype = AUXIO_TYPE_EBUS;
size = sizeof(u32);
} else if (!strcmp(dp->parent->name, "sbus")) {
auxio_devtype = AUXIO_TYPE_SBUS;
size = 1;
} else {
printk("auxio: Unknown parent bus type [%s]\n",
dp->parent->name);
return -ENODEV;
}
auxio_register = of_ioremap(&dev->resource[0], 0, size, "auxio");
if (!auxio_register)
return -ENODEV;
printk(KERN_INFO "AUXIO: Found device at %s\n",
dp->full_name);
if (auxio_devtype == AUXIO_TYPE_EBUS)
auxio_set_led(AUXIO_LED_ON);
return 0;
}
static struct platform_driver auxio_driver = {
.probe = auxio_probe,
.driver = {
.name = "auxio",
.owner = THIS_MODULE,
.of_match_table = auxio_match,
},
};
static int __init auxio_init(void)
{
return platform_driver_register(&auxio_driver);
}
/* Must be after subsys_initcall() so that busses are probed. Must
* be before device_initcall() because things like the floppy driver
* need to use the AUXIO register.
*/
fs_initcall(auxio_init);
| gpl-2.0 |
sssangram14/android_kernel_samsung_arubaslim | arch/sparc/kernel/auxio_64.c | 7575 | 3195 | /* auxio.c: Probing for the Sparc AUXIO register at boot time.
*
* Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
*
* Refactoring for unified NCR/PCIO support 2002 Eric Brower (ebrower@usa.net)
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/ioport.h>
#include <linux/of_device.h>
#include <asm/prom.h>
#include <asm/io.h>
#include <asm/auxio.h>
void __iomem *auxio_register = NULL;
EXPORT_SYMBOL(auxio_register);
enum auxio_type {
AUXIO_TYPE_NODEV,
AUXIO_TYPE_SBUS,
AUXIO_TYPE_EBUS
};
static enum auxio_type auxio_devtype = AUXIO_TYPE_NODEV;
static DEFINE_SPINLOCK(auxio_lock);
static void __auxio_rmw(u8 bits_on, u8 bits_off, int ebus)
{
if (auxio_register) {
unsigned long flags;
u8 regval, newval;
spin_lock_irqsave(&auxio_lock, flags);
regval = (ebus ?
(u8) readl(auxio_register) :
sbus_readb(auxio_register));
newval = regval | bits_on;
newval &= ~bits_off;
if (!ebus)
newval &= ~AUXIO_AUX1_MASK;
if (ebus)
writel((u32) newval, auxio_register);
else
sbus_writeb(newval, auxio_register);
spin_unlock_irqrestore(&auxio_lock, flags);
}
}
static void __auxio_set_bit(u8 bit, int on, int ebus)
{
u8 bits_on = (ebus ? AUXIO_PCIO_LED : AUXIO_AUX1_LED);
u8 bits_off = 0;
if (!on) {
u8 tmp = bits_off;
bits_off = bits_on;
bits_on = tmp;
}
__auxio_rmw(bits_on, bits_off, ebus);
}
void auxio_set_led(int on)
{
int ebus = auxio_devtype == AUXIO_TYPE_EBUS;
u8 bit;
bit = (ebus ? AUXIO_PCIO_LED : AUXIO_AUX1_LED);
__auxio_set_bit(bit, on, ebus);
}
EXPORT_SYMBOL(auxio_set_led);
static void __auxio_sbus_set_lte(int on)
{
__auxio_set_bit(AUXIO_AUX1_LTE, on, 0);
}
void auxio_set_lte(int on)
{
switch(auxio_devtype) {
case AUXIO_TYPE_SBUS:
__auxio_sbus_set_lte(on);
break;
case AUXIO_TYPE_EBUS:
/* FALL-THROUGH */
default:
break;
}
}
EXPORT_SYMBOL(auxio_set_lte);
static const struct of_device_id auxio_match[] = {
{
.name = "auxio",
},
{},
};
MODULE_DEVICE_TABLE(of, auxio_match);
static int __devinit auxio_probe(struct platform_device *dev)
{
struct device_node *dp = dev->dev.of_node;
unsigned long size;
if (!strcmp(dp->parent->name, "ebus")) {
auxio_devtype = AUXIO_TYPE_EBUS;
size = sizeof(u32);
} else if (!strcmp(dp->parent->name, "sbus")) {
auxio_devtype = AUXIO_TYPE_SBUS;
size = 1;
} else {
printk("auxio: Unknown parent bus type [%s]\n",
dp->parent->name);
return -ENODEV;
}
auxio_register = of_ioremap(&dev->resource[0], 0, size, "auxio");
if (!auxio_register)
return -ENODEV;
printk(KERN_INFO "AUXIO: Found device at %s\n",
dp->full_name);
if (auxio_devtype == AUXIO_TYPE_EBUS)
auxio_set_led(AUXIO_LED_ON);
return 0;
}
static struct platform_driver auxio_driver = {
.probe = auxio_probe,
.driver = {
.name = "auxio",
.owner = THIS_MODULE,
.of_match_table = auxio_match,
},
};
static int __init auxio_init(void)
{
return platform_driver_register(&auxio_driver);
}
/* Must be after subsys_initcall() so that busses are probed. Must
* be before device_initcall() because things like the floppy driver
* need to use the AUXIO register.
*/
fs_initcall(auxio_init);
| gpl-2.0 |
sub-b/android_kernel_samsung_matissewifi-old | drivers/isdn/mISDN/dsp_tones.c | 9623 | 17374 | /*
* Audio support data for ISDN4Linux.
*
* Copyright Andreas Eversberg (jolly@eversberg.eu)
*
* This software may be used and distributed according to the terms
* of the GNU General Public License, incorporated herein by reference.
*
*/
#include <linux/gfp.h>
#include <linux/mISDNif.h>
#include <linux/mISDNdsp.h>
#include "core.h"
#include "dsp.h"
#define DATA_S sample_silence
#define SIZE_S (&sizeof_silence)
#define DATA_GA sample_german_all
#define SIZE_GA (&sizeof_german_all)
#define DATA_GO sample_german_old
#define SIZE_GO (&sizeof_german_old)
#define DATA_DT sample_american_dialtone
#define SIZE_DT (&sizeof_american_dialtone)
#define DATA_RI sample_american_ringing
#define SIZE_RI (&sizeof_american_ringing)
#define DATA_BU sample_american_busy
#define SIZE_BU (&sizeof_american_busy)
#define DATA_S1 sample_special1
#define SIZE_S1 (&sizeof_special1)
#define DATA_S2 sample_special2
#define SIZE_S2 (&sizeof_special2)
#define DATA_S3 sample_special3
#define SIZE_S3 (&sizeof_special3)
/***************/
/* tones loops */
/***************/
/* all tones are alaw encoded */
/* the last sample+1 is in phase with the first sample. the error is low */
static u8 sample_german_all[] = {
0x80, 0xab, 0x81, 0x6d, 0xfd, 0xdd, 0x5d, 0x9d,
0x4d, 0xd1, 0x89, 0x88, 0xd0, 0x4c, 0x9c, 0x5c,
0xdc, 0xfc, 0x6c,
0x80, 0xab, 0x81, 0x6d, 0xfd, 0xdd, 0x5d, 0x9d,
0x4d, 0xd1, 0x89, 0x88, 0xd0, 0x4c, 0x9c, 0x5c,
0xdc, 0xfc, 0x6c,
0x80, 0xab, 0x81, 0x6d, 0xfd, 0xdd, 0x5d, 0x9d,
0x4d, 0xd1, 0x89, 0x88, 0xd0, 0x4c, 0x9c, 0x5c,
0xdc, 0xfc, 0x6c,
0x80, 0xab, 0x81, 0x6d, 0xfd, 0xdd, 0x5d, 0x9d,
0x4d, 0xd1, 0x89, 0x88, 0xd0, 0x4c, 0x9c, 0x5c,
0xdc, 0xfc, 0x6c,
};
static u32 sizeof_german_all = sizeof(sample_german_all);
static u8 sample_german_old[] = {
0xec, 0x68, 0xe1, 0x6d, 0x6d, 0x91, 0x51, 0xed,
0x6d, 0x01, 0x1e, 0x10, 0x0c, 0x90, 0x60, 0x70,
0x8c,
0xec, 0x68, 0xe1, 0x6d, 0x6d, 0x91, 0x51, 0xed,
0x6d, 0x01, 0x1e, 0x10, 0x0c, 0x90, 0x60, 0x70,
0x8c,
0xec, 0x68, 0xe1, 0x6d, 0x6d, 0x91, 0x51, 0xed,
0x6d, 0x01, 0x1e, 0x10, 0x0c, 0x90, 0x60, 0x70,
0x8c,
0xec, 0x68, 0xe1, 0x6d, 0x6d, 0x91, 0x51, 0xed,
0x6d, 0x01, 0x1e, 0x10, 0x0c, 0x90, 0x60, 0x70,
0x8c,
};
static u32 sizeof_german_old = sizeof(sample_german_old);
static u8 sample_american_dialtone[] = {
0x2a, 0x18, 0x90, 0x6c, 0x4c, 0xbc, 0x4c, 0x6c,
0x10, 0x58, 0x32, 0xb9, 0x31, 0x2d, 0x8d, 0x0d,
0x8d, 0x2d, 0x31, 0x99, 0x0f, 0x28, 0x60, 0xf0,
0xd0, 0x50, 0xd0, 0x30, 0x60, 0x08, 0x8e, 0x67,
0x09, 0x19, 0x21, 0xe1, 0xd9, 0xb9, 0x29, 0x67,
0x83, 0x02, 0xce, 0xbe, 0xee, 0x1a, 0x1b, 0xef,
0xbf, 0xcf, 0x03, 0x82, 0x66, 0x28, 0xb8, 0xd8,
0xe0, 0x20, 0x18, 0x08, 0x66, 0x8f, 0x09, 0x61,
0x31, 0xd1, 0x51, 0xd1, 0xf1, 0x61, 0x29, 0x0e,
0x98, 0x30, 0x2c, 0x8c, 0x0c, 0x8c, 0x2c, 0x30,
0xb8, 0x33, 0x59, 0x11, 0x6d, 0x4d, 0xbd, 0x4d,
0x6d, 0x91, 0x19,
};
static u32 sizeof_american_dialtone = sizeof(sample_american_dialtone);
static u8 sample_american_ringing[] = {
0x2a, 0xe0, 0xac, 0x0c, 0xbc, 0x4c, 0x8c, 0x90,
0x48, 0xc7, 0xc1, 0xed, 0xcd, 0x4d, 0xcd, 0xed,
0xc1, 0xb7, 0x08, 0x30, 0xec, 0xcc, 0xcc, 0x8c,
0x10, 0x58, 0x1a, 0x99, 0x71, 0xed, 0x8d, 0x8d,
0x2d, 0x41, 0x89, 0x9e, 0x20, 0x70, 0x2c, 0xec,
0x2c, 0x70, 0x20, 0x86, 0x77, 0xe1, 0x31, 0x11,
0xd1, 0xf1, 0x81, 0x09, 0xa3, 0x56, 0x58, 0x00,
0x40, 0xc0, 0x60, 0x38, 0x46, 0x43, 0x57, 0x39,
0xd9, 0x59, 0x99, 0xc9, 0x77, 0x2f, 0x2e, 0xc6,
0xd6, 0x28, 0xd6, 0x36, 0x26, 0x2e, 0x8a, 0xa3,
0x43, 0x63, 0x4b, 0x4a, 0x62, 0x42, 0xa2, 0x8b,
0x2f, 0x27, 0x37, 0xd7, 0x29, 0xd7, 0xc7, 0x2f,
0x2e, 0x76, 0xc8, 0x98, 0x58, 0xd8, 0x38, 0x56,
0x42, 0x47, 0x39, 0x61, 0xc1, 0x41, 0x01, 0x59,
0x57, 0xa2, 0x08, 0x80, 0xf0, 0xd0, 0x10, 0x30,
0xe0, 0x76, 0x87, 0x21, 0x71, 0x2d, 0xed, 0x2d,
0x71, 0x21, 0x9f, 0x88, 0x40, 0x2c, 0x8c, 0x8c,
0xec, 0x70, 0x98, 0x1b, 0x59, 0x11, 0x8d, 0xcd,
0xcd, 0xed, 0x31, 0x09, 0xb6, 0xc0, 0xec, 0xcc,
0x4c, 0xcc, 0xec, 0xc0, 0xc6, 0x49, 0x91, 0x8d,
0x4d, 0xbd, 0x0d, 0xad, 0xe1,
};
static u32 sizeof_american_ringing = sizeof(sample_american_ringing);
static u8 sample_american_busy[] = {
0x2a, 0x00, 0x6c, 0x4c, 0x4c, 0x6c, 0xb0, 0x66,
0x99, 0x11, 0x6d, 0x8d, 0x2d, 0x41, 0xd7, 0x96,
0x60, 0xf0, 0x70, 0x40, 0x58, 0xf6, 0x53, 0x57,
0x09, 0x89, 0xd7, 0x5f, 0xe3, 0x2a, 0xe3, 0x5f,
0xd7, 0x89, 0x09, 0x57, 0x53, 0xf6, 0x58, 0x40,
0x70, 0xf0, 0x60, 0x96, 0xd7, 0x41, 0x2d, 0x8d,
0x6d, 0x11, 0x99, 0x66, 0xb0, 0x6c, 0x4c, 0x4c,
0x6c, 0x00, 0x2a, 0x01, 0x6d, 0x4d, 0x4d, 0x6d,
0xb1, 0x67, 0x98, 0x10, 0x6c, 0x8c, 0x2c, 0x40,
0xd6, 0x97, 0x61, 0xf1, 0x71, 0x41, 0x59, 0xf7,
0x52, 0x56, 0x08, 0x88, 0xd6, 0x5e, 0xe2, 0x2a,
0xe2, 0x5e, 0xd6, 0x88, 0x08, 0x56, 0x52, 0xf7,
0x59, 0x41, 0x71, 0xf1, 0x61, 0x97, 0xd6, 0x40,
0x2c, 0x8c, 0x6c, 0x10, 0x98, 0x67, 0xb1, 0x6d,
0x4d, 0x4d, 0x6d, 0x01,
};
static u32 sizeof_american_busy = sizeof(sample_american_busy);
static u8 sample_special1[] = {
0x2a, 0x2c, 0xbc, 0x6c, 0xd6, 0x71, 0xbd, 0x0d,
0xd9, 0x80, 0xcc, 0x4c, 0x40, 0x39, 0x0d, 0xbd,
0x11, 0x86, 0xec, 0xbc, 0xec, 0x0e, 0x51, 0xbd,
0x8d, 0x89, 0x30, 0x4c, 0xcc, 0xe0, 0xe1, 0xcd,
0x4d, 0x31, 0x88, 0x8c, 0xbc, 0x50, 0x0f, 0xed,
0xbd, 0xed, 0x87, 0x10, 0xbc, 0x0c, 0x38, 0x41,
0x4d, 0xcd, 0x81, 0xd8, 0x0c, 0xbc, 0x70, 0xd7,
0x6d, 0xbd, 0x2d,
};
static u32 sizeof_special1 = sizeof(sample_special1);
static u8 sample_special2[] = {
0x2a, 0xcc, 0x8c, 0xd7, 0x4d, 0x2d, 0x18, 0xbc,
0x10, 0xc1, 0xbd, 0xc1, 0x10, 0xbc, 0x18, 0x2d,
0x4d, 0xd7, 0x8c, 0xcc, 0x2a, 0xcd, 0x8d, 0xd6,
0x4c, 0x2c, 0x19, 0xbd, 0x11, 0xc0, 0xbc, 0xc0,
0x11, 0xbd, 0x19, 0x2c, 0x4c, 0xd6, 0x8d, 0xcd,
0x2a, 0xcc, 0x8c, 0xd7, 0x4d, 0x2d, 0x18, 0xbc,
0x10, 0xc1, 0xbd, 0xc1, 0x10, 0xbc, 0x18, 0x2d,
0x4d, 0xd7, 0x8c, 0xcc, 0x2a, 0xcd, 0x8d, 0xd6,
0x4c, 0x2c, 0x19, 0xbd, 0x11, 0xc0, 0xbc, 0xc0,
0x11, 0xbd, 0x19, 0x2c, 0x4c, 0xd6, 0x8d, 0xcd,
};
static u32 sizeof_special2 = sizeof(sample_special2);
static u8 sample_special3[] = {
0x2a, 0xbc, 0x18, 0xcd, 0x11, 0x2c, 0x8c, 0xc1,
0x4d, 0xd6, 0xbc, 0xd6, 0x4d, 0xc1, 0x8c, 0x2c,
0x11, 0xcd, 0x18, 0xbc, 0x2a, 0xbd, 0x19, 0xcc,
0x10, 0x2d, 0x8d, 0xc0, 0x4c, 0xd7, 0xbd, 0xd7,
0x4c, 0xc0, 0x8d, 0x2d, 0x10, 0xcc, 0x19, 0xbd,
0x2a, 0xbc, 0x18, 0xcd, 0x11, 0x2c, 0x8c, 0xc1,
0x4d, 0xd6, 0xbc, 0xd6, 0x4d, 0xc1, 0x8c, 0x2c,
0x11, 0xcd, 0x18, 0xbc, 0x2a, 0xbd, 0x19, 0xcc,
0x10, 0x2d, 0x8d, 0xc0, 0x4c, 0xd7, 0xbd, 0xd7,
0x4c, 0xc0, 0x8d, 0x2d, 0x10, 0xcc, 0x19, 0xbd,
};
static u32 sizeof_special3 = sizeof(sample_special3);
static u8 sample_silence[] = {
0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a,
0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a,
0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a,
0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a,
0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a,
0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a,
0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a,
0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a,
0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a,
0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a,
0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a,
0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a,
};
static u32 sizeof_silence = sizeof(sample_silence);
struct tones_samples {
u32 *len;
u8 *data;
};
static struct
tones_samples samples[] = {
{&sizeof_german_all, sample_german_all},
{&sizeof_german_old, sample_german_old},
{&sizeof_american_dialtone, sample_american_dialtone},
{&sizeof_american_ringing, sample_american_ringing},
{&sizeof_american_busy, sample_american_busy},
{&sizeof_special1, sample_special1},
{&sizeof_special2, sample_special2},
{&sizeof_special3, sample_special3},
{NULL, NULL},
};
/***********************************
* generate ulaw from alaw samples *
***********************************/
void
dsp_audio_generate_ulaw_samples(void)
{
int i, j;
i = 0;
while (samples[i].len) {
j = 0;
while (j < (*samples[i].len)) {
samples[i].data[j] =
dsp_audio_alaw_to_ulaw[samples[i].data[j]];
j++;
}
i++;
}
}
/****************************
* tone sequence definition *
****************************/
static struct pattern {
int tone;
u8 *data[10];
u32 *siz[10];
u32 seq[10];
} pattern[] = {
{TONE_GERMAN_DIALTONE,
{DATA_GA, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL},
{SIZE_GA, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL},
{1900, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
{TONE_GERMAN_OLDDIALTONE,
{DATA_GO, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL},
{SIZE_GO, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL},
{1998, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
{TONE_AMERICAN_DIALTONE,
{DATA_DT, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL},
{SIZE_DT, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL},
{8000, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
{TONE_GERMAN_DIALPBX,
{DATA_GA, DATA_S, DATA_GA, DATA_S, DATA_GA, DATA_S, NULL, NULL, NULL,
NULL},
{SIZE_GA, SIZE_S, SIZE_GA, SIZE_S, SIZE_GA, SIZE_S, NULL, NULL, NULL,
NULL},
{2000, 2000, 2000, 2000, 2000, 12000, 0, 0, 0, 0} },
{TONE_GERMAN_OLDDIALPBX,
{DATA_GO, DATA_S, DATA_GO, DATA_S, DATA_GO, DATA_S, NULL, NULL, NULL,
NULL},
{SIZE_GO, SIZE_S, SIZE_GO, SIZE_S, SIZE_GO, SIZE_S, NULL, NULL, NULL,
NULL},
{2000, 2000, 2000, 2000, 2000, 12000, 0, 0, 0, 0} },
{TONE_AMERICAN_DIALPBX,
{DATA_DT, DATA_S, DATA_DT, DATA_S, DATA_DT, DATA_S, NULL, NULL, NULL,
NULL},
{SIZE_DT, SIZE_S, SIZE_DT, SIZE_S, SIZE_DT, SIZE_S, NULL, NULL, NULL,
NULL},
{2000, 2000, 2000, 2000, 2000, 12000, 0, 0, 0, 0} },
{TONE_GERMAN_RINGING,
{DATA_GA, DATA_S, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL},
{SIZE_GA, SIZE_S, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL},
{8000, 32000, 0, 0, 0, 0, 0, 0, 0, 0} },
{TONE_GERMAN_OLDRINGING,
{DATA_GO, DATA_S, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL},
{SIZE_GO, SIZE_S, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL},
{8000, 40000, 0, 0, 0, 0, 0, 0, 0, 0} },
{TONE_AMERICAN_RINGING,
{DATA_RI, DATA_S, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL},
{SIZE_RI, SIZE_S, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL},
{8000, 32000, 0, 0, 0, 0, 0, 0, 0, 0} },
{TONE_GERMAN_RINGPBX,
{DATA_GA, DATA_S, DATA_GA, DATA_S, NULL, NULL, NULL, NULL, NULL, NULL},
{SIZE_GA, SIZE_S, SIZE_GA, SIZE_S, NULL, NULL, NULL, NULL, NULL, NULL},
{4000, 4000, 4000, 28000, 0, 0, 0, 0, 0, 0} },
{TONE_GERMAN_OLDRINGPBX,
{DATA_GO, DATA_S, DATA_GO, DATA_S, NULL, NULL, NULL, NULL, NULL, NULL},
{SIZE_GO, SIZE_S, SIZE_GO, SIZE_S, NULL, NULL, NULL, NULL, NULL, NULL},
{4000, 4000, 4000, 28000, 0, 0, 0, 0, 0, 0} },
{TONE_AMERICAN_RINGPBX,
{DATA_RI, DATA_S, DATA_RI, DATA_S, NULL, NULL, NULL, NULL, NULL, NULL},
{SIZE_RI, SIZE_S, SIZE_RI, SIZE_S, NULL, NULL, NULL, NULL, NULL, NULL},
{4000, 4000, 4000, 28000, 0, 0, 0, 0, 0, 0} },
{TONE_GERMAN_BUSY,
{DATA_GA, DATA_S, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL},
{SIZE_GA, SIZE_S, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL},
{4000, 4000, 0, 0, 0, 0, 0, 0, 0, 0} },
{TONE_GERMAN_OLDBUSY,
{DATA_GO, DATA_S, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL},
{SIZE_GO, SIZE_S, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL},
{1000, 5000, 0, 0, 0, 0, 0, 0, 0, 0} },
{TONE_AMERICAN_BUSY,
{DATA_BU, DATA_S, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL},
{SIZE_BU, SIZE_S, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL},
{4000, 4000, 0, 0, 0, 0, 0, 0, 0, 0} },
{TONE_GERMAN_HANGUP,
{DATA_GA, DATA_S, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL},
{SIZE_GA, SIZE_S, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL},
{4000, 4000, 0, 0, 0, 0, 0, 0, 0, 0} },
{TONE_GERMAN_OLDHANGUP,
{DATA_GO, DATA_S, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL},
{SIZE_GO, SIZE_S, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL},
{1000, 5000, 0, 0, 0, 0, 0, 0, 0, 0} },
{TONE_AMERICAN_HANGUP,
{DATA_DT, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL},
{SIZE_DT, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL},
{8000, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
{TONE_SPECIAL_INFO,
{DATA_S1, DATA_S2, DATA_S3, DATA_S, NULL, NULL, NULL, NULL, NULL, NULL},
{SIZE_S1, SIZE_S2, SIZE_S3, SIZE_S, NULL, NULL, NULL, NULL, NULL, NULL},
{2666, 2666, 2666, 8002, 0, 0, 0, 0, 0, 0} },
{TONE_GERMAN_GASSENBESETZT,
{DATA_GA, DATA_S, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL},
{SIZE_GA, SIZE_S, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL},
{2000, 2000, 0, 0, 0, 0, 0, 0, 0, 0} },
{TONE_GERMAN_AUFSCHALTTON,
{DATA_GO, DATA_S, DATA_GO, DATA_S, NULL, NULL, NULL, NULL, NULL, NULL},
{SIZE_GO, SIZE_S, SIZE_GO, SIZE_S, NULL, NULL, NULL, NULL, NULL, NULL},
{1000, 5000, 1000, 17000, 0, 0, 0, 0, 0, 0} },
{0,
{NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL},
{NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL},
{0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
};
/******************
* copy tone data *
******************/
/* an sk_buff is generated from the number of samples needed.
* the count will be changed and may begin from 0 each pattern period.
* the clue is to precalculate the pointers and legths to use only one
* memcpy per function call, or two memcpy if the tone sequence changes.
*
* pattern - the type of the pattern
* count - the sample from the beginning of the pattern (phase)
* len - the number of bytes
*
* return - the sk_buff with the sample
*
* if tones has finished (e.g. knocking tone), dsp->tones is turned off
*/
void dsp_tone_copy(struct dsp *dsp, u8 *data, int len)
{
int index, count, start, num;
struct pattern *pat;
struct dsp_tone *tone = &dsp->tone;
/* if we have no tone, we copy silence */
if (!tone->tone) {
memset(data, dsp_silence, len);
return;
}
/* process pattern */
pat = (struct pattern *)tone->pattern;
/* points to the current pattern */
index = tone->index; /* gives current sequence index */
count = tone->count; /* gives current sample */
/* copy sample */
while (len) {
/* find sample to start with */
while (42) {
/* wrap around */
if (!pat->seq[index]) {
count = 0;
index = 0;
}
/* check if we are currently playing this tone */
if (count < pat->seq[index])
break;
if (dsp_debug & DEBUG_DSP_TONE)
printk(KERN_DEBUG "%s: reaching next sequence "
"(index=%d)\n", __func__, index);
count -= pat->seq[index];
index++;
}
/* calculate start and number of samples */
start = count % (*(pat->siz[index]));
num = len;
if (num + count > pat->seq[index])
num = pat->seq[index] - count;
if (num + start > (*(pat->siz[index])))
num = (*(pat->siz[index])) - start;
/* copy memory */
memcpy(data, pat->data[index] + start, num);
/* reduce length */
data += num;
count += num;
len -= num;
}
tone->index = index;
tone->count = count;
/* return sk_buff */
return;
}
/*******************************
* send HW message to hfc card *
*******************************/
static void
dsp_tone_hw_message(struct dsp *dsp, u8 *sample, int len)
{
struct sk_buff *nskb;
/* unlocking is not required, because we don't expect a response */
nskb = _alloc_mISDN_skb(PH_CONTROL_REQ,
(len) ? HFC_SPL_LOOP_ON : HFC_SPL_LOOP_OFF, len, sample,
GFP_ATOMIC);
if (nskb) {
if (dsp->ch.peer) {
if (dsp->ch.recv(dsp->ch.peer, nskb))
dev_kfree_skb(nskb);
} else
dev_kfree_skb(nskb);
}
}
/*****************
* timer expires *
*****************/
void
dsp_tone_timeout(void *arg)
{
struct dsp *dsp = arg;
struct dsp_tone *tone = &dsp->tone;
struct pattern *pat = (struct pattern *)tone->pattern;
int index = tone->index;
if (!tone->tone)
return;
index++;
if (!pat->seq[index])
index = 0;
tone->index = index;
/* set next tone */
if (pat->data[index] == DATA_S)
dsp_tone_hw_message(dsp, NULL, 0);
else
dsp_tone_hw_message(dsp, pat->data[index], *(pat->siz[index]));
/* set timer */
init_timer(&tone->tl);
tone->tl.expires = jiffies + (pat->seq[index] * HZ) / 8000;
add_timer(&tone->tl);
}
/********************
* set/release tone *
********************/
/*
* tones are relaized by streaming or by special loop commands if supported
* by hardware. when hardware is used, the patterns will be controlled by
* timers.
*/
int
dsp_tone(struct dsp *dsp, int tone)
{
struct pattern *pat;
int i;
struct dsp_tone *tonet = &dsp->tone;
tonet->software = 0;
tonet->hardware = 0;
/* we turn off the tone */
if (!tone) {
if (dsp->features.hfc_loops && timer_pending(&tonet->tl))
del_timer(&tonet->tl);
if (dsp->features.hfc_loops)
dsp_tone_hw_message(dsp, NULL, 0);
tonet->tone = 0;
return 0;
}
pat = NULL;
i = 0;
while (pattern[i].tone) {
if (pattern[i].tone == tone) {
pat = &pattern[i];
break;
}
i++;
}
if (!pat) {
printk(KERN_WARNING "dsp: given tone 0x%x is invalid\n", tone);
return -EINVAL;
}
if (dsp_debug & DEBUG_DSP_TONE)
printk(KERN_DEBUG "%s: now starting tone %d (index=%d)\n",
__func__, tone, 0);
tonet->tone = tone;
tonet->pattern = pat;
tonet->index = 0;
tonet->count = 0;
if (dsp->features.hfc_loops) {
tonet->hardware = 1;
/* set first tone */
dsp_tone_hw_message(dsp, pat->data[0], *(pat->siz[0]));
/* set timer */
if (timer_pending(&tonet->tl))
del_timer(&tonet->tl);
init_timer(&tonet->tl);
tonet->tl.expires = jiffies + (pat->seq[0] * HZ) / 8000;
add_timer(&tonet->tl);
} else {
tonet->software = 1;
}
return 0;
}
| gpl-2.0 |
htc-mirror/endeavoru-2.6.39-86aa44d | drivers/net/cnic.c | 152 | 142020 | /* cnic.c: Broadcom CNIC core network driver.
*
* Copyright (c) 2006-2010 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation.
*
* Original skeleton written by: John(Zongxi) Chen (zongxi@broadcom.com)
* Modified and maintained by: Michael Chan <mchan@broadcom.com>
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/list.h>
#include <linux/slab.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/netdevice.h>
#include <linux/uio_driver.h>
#include <linux/in.h>
#include <linux/dma-mapping.h>
#include <linux/delay.h>
#include <linux/ethtool.h>
#include <linux/if_vlan.h>
#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
#define BCM_VLAN 1
#endif
#include <net/ip.h>
#include <net/tcp.h>
#include <net/route.h>
#include <net/ipv6.h>
#include <net/ip6_route.h>
#include <net/ip6_checksum.h>
#include <scsi/iscsi_if.h>
#include "cnic_if.h"
#include "bnx2.h"
#include "bnx2x/bnx2x_reg.h"
#include "bnx2x/bnx2x_fw_defs.h"
#include "bnx2x/bnx2x_hsi.h"
#include "../scsi/bnx2i/57xx_iscsi_constants.h"
#include "../scsi/bnx2i/57xx_iscsi_hsi.h"
#include "cnic.h"
#include "cnic_defs.h"
#define DRV_MODULE_NAME "cnic"
static char version[] __devinitdata =
"Broadcom NetXtreme II CNIC Driver " DRV_MODULE_NAME " v" CNIC_MODULE_VERSION " (" CNIC_MODULE_RELDATE ")\n";
MODULE_AUTHOR("Michael Chan <mchan@broadcom.com> and John(Zongxi) "
"Chen (zongxi@broadcom.com");
MODULE_DESCRIPTION("Broadcom NetXtreme II CNIC Driver");
MODULE_LICENSE("GPL");
MODULE_VERSION(CNIC_MODULE_VERSION);
/* cnic_dev_list modifications are protected by both rtnl and cnic_dev_lock */
static LIST_HEAD(cnic_dev_list);
static LIST_HEAD(cnic_udev_list);
static DEFINE_RWLOCK(cnic_dev_lock);
static DEFINE_MUTEX(cnic_lock);
static struct cnic_ulp_ops __rcu *cnic_ulp_tbl[MAX_CNIC_ULP_TYPE];
/* helper function, assuming cnic_lock is held */
static inline struct cnic_ulp_ops *cnic_ulp_tbl_prot(int type)
{
return rcu_dereference_protected(cnic_ulp_tbl[type],
lockdep_is_held(&cnic_lock));
}
static int cnic_service_bnx2(void *, void *);
static int cnic_service_bnx2x(void *, void *);
static int cnic_ctl(void *, struct cnic_ctl_info *);
static struct cnic_ops cnic_bnx2_ops = {
.cnic_owner = THIS_MODULE,
.cnic_handler = cnic_service_bnx2,
.cnic_ctl = cnic_ctl,
};
static struct cnic_ops cnic_bnx2x_ops = {
.cnic_owner = THIS_MODULE,
.cnic_handler = cnic_service_bnx2x,
.cnic_ctl = cnic_ctl,
};
static struct workqueue_struct *cnic_wq;
static void cnic_shutdown_rings(struct cnic_dev *);
static void cnic_init_rings(struct cnic_dev *);
static int cnic_cm_set_pg(struct cnic_sock *);
static int cnic_uio_open(struct uio_info *uinfo, struct inode *inode)
{
struct cnic_uio_dev *udev = uinfo->priv;
struct cnic_dev *dev;
if (!capable(CAP_NET_ADMIN))
return -EPERM;
if (udev->uio_dev != -1)
return -EBUSY;
rtnl_lock();
dev = udev->dev;
if (!dev || !test_bit(CNIC_F_CNIC_UP, &dev->flags)) {
rtnl_unlock();
return -ENODEV;
}
udev->uio_dev = iminor(inode);
cnic_shutdown_rings(dev);
cnic_init_rings(dev);
rtnl_unlock();
return 0;
}
static int cnic_uio_close(struct uio_info *uinfo, struct inode *inode)
{
struct cnic_uio_dev *udev = uinfo->priv;
udev->uio_dev = -1;
return 0;
}
static inline void cnic_hold(struct cnic_dev *dev)
{
atomic_inc(&dev->ref_count);
}
static inline void cnic_put(struct cnic_dev *dev)
{
atomic_dec(&dev->ref_count);
}
static inline void csk_hold(struct cnic_sock *csk)
{
atomic_inc(&csk->ref_count);
}
static inline void csk_put(struct cnic_sock *csk)
{
atomic_dec(&csk->ref_count);
}
static struct cnic_dev *cnic_from_netdev(struct net_device *netdev)
{
struct cnic_dev *cdev;
read_lock(&cnic_dev_lock);
list_for_each_entry(cdev, &cnic_dev_list, list) {
if (netdev == cdev->netdev) {
cnic_hold(cdev);
read_unlock(&cnic_dev_lock);
return cdev;
}
}
read_unlock(&cnic_dev_lock);
return NULL;
}
static inline void ulp_get(struct cnic_ulp_ops *ulp_ops)
{
atomic_inc(&ulp_ops->ref_count);
}
static inline void ulp_put(struct cnic_ulp_ops *ulp_ops)
{
atomic_dec(&ulp_ops->ref_count);
}
static void cnic_ctx_wr(struct cnic_dev *dev, u32 cid_addr, u32 off, u32 val)
{
struct cnic_local *cp = dev->cnic_priv;
struct cnic_eth_dev *ethdev = cp->ethdev;
struct drv_ctl_info info;
struct drv_ctl_io *io = &info.data.io;
info.cmd = DRV_CTL_CTX_WR_CMD;
io->cid_addr = cid_addr;
io->offset = off;
io->data = val;
ethdev->drv_ctl(dev->netdev, &info);
}
static void cnic_ctx_tbl_wr(struct cnic_dev *dev, u32 off, dma_addr_t addr)
{
struct cnic_local *cp = dev->cnic_priv;
struct cnic_eth_dev *ethdev = cp->ethdev;
struct drv_ctl_info info;
struct drv_ctl_io *io = &info.data.io;
info.cmd = DRV_CTL_CTXTBL_WR_CMD;
io->offset = off;
io->dma_addr = addr;
ethdev->drv_ctl(dev->netdev, &info);
}
static void cnic_ring_ctl(struct cnic_dev *dev, u32 cid, u32 cl_id, int start)
{
struct cnic_local *cp = dev->cnic_priv;
struct cnic_eth_dev *ethdev = cp->ethdev;
struct drv_ctl_info info;
struct drv_ctl_l2_ring *ring = &info.data.ring;
if (start)
info.cmd = DRV_CTL_START_L2_CMD;
else
info.cmd = DRV_CTL_STOP_L2_CMD;
ring->cid = cid;
ring->client_id = cl_id;
ethdev->drv_ctl(dev->netdev, &info);
}
static void cnic_reg_wr_ind(struct cnic_dev *dev, u32 off, u32 val)
{
struct cnic_local *cp = dev->cnic_priv;
struct cnic_eth_dev *ethdev = cp->ethdev;
struct drv_ctl_info info;
struct drv_ctl_io *io = &info.data.io;
info.cmd = DRV_CTL_IO_WR_CMD;
io->offset = off;
io->data = val;
ethdev->drv_ctl(dev->netdev, &info);
}
static u32 cnic_reg_rd_ind(struct cnic_dev *dev, u32 off)
{
struct cnic_local *cp = dev->cnic_priv;
struct cnic_eth_dev *ethdev = cp->ethdev;
struct drv_ctl_info info;
struct drv_ctl_io *io = &info.data.io;
info.cmd = DRV_CTL_IO_RD_CMD;
io->offset = off;
ethdev->drv_ctl(dev->netdev, &info);
return io->data;
}
static int cnic_in_use(struct cnic_sock *csk)
{
return test_bit(SK_F_INUSE, &csk->flags);
}
static void cnic_spq_completion(struct cnic_dev *dev, int cmd, u32 count)
{
struct cnic_local *cp = dev->cnic_priv;
struct cnic_eth_dev *ethdev = cp->ethdev;
struct drv_ctl_info info;
info.cmd = cmd;
info.data.credit.credit_count = count;
ethdev->drv_ctl(dev->netdev, &info);
}
static int cnic_get_l5_cid(struct cnic_local *cp, u32 cid, u32 *l5_cid)
{
u32 i;
for (i = 0; i < cp->max_cid_space; i++) {
if (cp->ctx_tbl[i].cid == cid) {
*l5_cid = i;
return 0;
}
}
return -EINVAL;
}
static int cnic_send_nlmsg(struct cnic_local *cp, u32 type,
struct cnic_sock *csk)
{
struct iscsi_path path_req;
char *buf = NULL;
u16 len = 0;
u32 msg_type = ISCSI_KEVENT_IF_DOWN;
struct cnic_ulp_ops *ulp_ops;
struct cnic_uio_dev *udev = cp->udev;
int rc = 0, retry = 0;
if (!udev || udev->uio_dev == -1)
return -ENODEV;
if (csk) {
len = sizeof(path_req);
buf = (char *) &path_req;
memset(&path_req, 0, len);
msg_type = ISCSI_KEVENT_PATH_REQ;
path_req.handle = (u64) csk->l5_cid;
if (test_bit(SK_F_IPV6, &csk->flags)) {
memcpy(&path_req.dst.v6_addr, &csk->dst_ip[0],
sizeof(struct in6_addr));
path_req.ip_addr_len = 16;
} else {
memcpy(&path_req.dst.v4_addr, &csk->dst_ip[0],
sizeof(struct in_addr));
path_req.ip_addr_len = 4;
}
path_req.vlan_id = csk->vlan_id;
path_req.pmtu = csk->mtu;
}
while (retry < 3) {
rc = 0;
rcu_read_lock();
ulp_ops = rcu_dereference(cnic_ulp_tbl[CNIC_ULP_ISCSI]);
if (ulp_ops)
rc = ulp_ops->iscsi_nl_send_msg(
cp->ulp_handle[CNIC_ULP_ISCSI],
msg_type, buf, len);
rcu_read_unlock();
if (rc == 0 || msg_type != ISCSI_KEVENT_PATH_REQ)
break;
msleep(100);
retry++;
}
return 0;
}
static void cnic_cm_upcall(struct cnic_local *, struct cnic_sock *, u8);
static int cnic_iscsi_nl_msg_recv(struct cnic_dev *dev, u32 msg_type,
char *buf, u16 len)
{
int rc = -EINVAL;
switch (msg_type) {
case ISCSI_UEVENT_PATH_UPDATE: {
struct cnic_local *cp;
u32 l5_cid;
struct cnic_sock *csk;
struct iscsi_path *path_resp;
if (len < sizeof(*path_resp))
break;
path_resp = (struct iscsi_path *) buf;
cp = dev->cnic_priv;
l5_cid = (u32) path_resp->handle;
if (l5_cid >= MAX_CM_SK_TBL_SZ)
break;
rcu_read_lock();
if (!rcu_dereference(cp->ulp_ops[CNIC_ULP_L4])) {
rc = -ENODEV;
rcu_read_unlock();
break;
}
csk = &cp->csk_tbl[l5_cid];
csk_hold(csk);
if (cnic_in_use(csk) &&
test_bit(SK_F_CONNECT_START, &csk->flags)) {
memcpy(csk->ha, path_resp->mac_addr, 6);
if (test_bit(SK_F_IPV6, &csk->flags))
memcpy(&csk->src_ip[0], &path_resp->src.v6_addr,
sizeof(struct in6_addr));
else
memcpy(&csk->src_ip[0], &path_resp->src.v4_addr,
sizeof(struct in_addr));
if (is_valid_ether_addr(csk->ha)) {
cnic_cm_set_pg(csk);
} else if (!test_bit(SK_F_OFFLD_SCHED, &csk->flags) &&
!test_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) {
cnic_cm_upcall(cp, csk,
L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE);
clear_bit(SK_F_CONNECT_START, &csk->flags);
}
}
csk_put(csk);
rcu_read_unlock();
rc = 0;
}
}
return rc;
}
static int cnic_offld_prep(struct cnic_sock *csk)
{
if (test_and_set_bit(SK_F_OFFLD_SCHED, &csk->flags))
return 0;
if (!test_bit(SK_F_CONNECT_START, &csk->flags)) {
clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
return 0;
}
return 1;
}
static int cnic_close_prep(struct cnic_sock *csk)
{
clear_bit(SK_F_CONNECT_START, &csk->flags);
smp_mb__after_clear_bit();
if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) {
while (test_and_set_bit(SK_F_OFFLD_SCHED, &csk->flags))
msleep(1);
return 1;
}
return 0;
}
static int cnic_abort_prep(struct cnic_sock *csk)
{
clear_bit(SK_F_CONNECT_START, &csk->flags);
smp_mb__after_clear_bit();
while (test_and_set_bit(SK_F_OFFLD_SCHED, &csk->flags))
msleep(1);
if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) {
csk->state = L4_KCQE_OPCODE_VALUE_RESET_COMP;
return 1;
}
return 0;
}
int cnic_register_driver(int ulp_type, struct cnic_ulp_ops *ulp_ops)
{
struct cnic_dev *dev;
if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) {
pr_err("%s: Bad type %d\n", __func__, ulp_type);
return -EINVAL;
}
mutex_lock(&cnic_lock);
if (cnic_ulp_tbl_prot(ulp_type)) {
pr_err("%s: Type %d has already been registered\n",
__func__, ulp_type);
mutex_unlock(&cnic_lock);
return -EBUSY;
}
read_lock(&cnic_dev_lock);
list_for_each_entry(dev, &cnic_dev_list, list) {
struct cnic_local *cp = dev->cnic_priv;
clear_bit(ULP_F_INIT, &cp->ulp_flags[ulp_type]);
}
read_unlock(&cnic_dev_lock);
atomic_set(&ulp_ops->ref_count, 0);
rcu_assign_pointer(cnic_ulp_tbl[ulp_type], ulp_ops);
mutex_unlock(&cnic_lock);
/* Prevent race conditions with netdev_event */
rtnl_lock();
list_for_each_entry(dev, &cnic_dev_list, list) {
struct cnic_local *cp = dev->cnic_priv;
if (!test_and_set_bit(ULP_F_INIT, &cp->ulp_flags[ulp_type]))
ulp_ops->cnic_init(dev);
}
rtnl_unlock();
return 0;
}
int cnic_unregister_driver(int ulp_type)
{
struct cnic_dev *dev;
struct cnic_ulp_ops *ulp_ops;
int i = 0;
if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) {
pr_err("%s: Bad type %d\n", __func__, ulp_type);
return -EINVAL;
}
mutex_lock(&cnic_lock);
ulp_ops = cnic_ulp_tbl_prot(ulp_type);
if (!ulp_ops) {
pr_err("%s: Type %d has not been registered\n",
__func__, ulp_type);
goto out_unlock;
}
read_lock(&cnic_dev_lock);
list_for_each_entry(dev, &cnic_dev_list, list) {
struct cnic_local *cp = dev->cnic_priv;
if (rcu_dereference(cp->ulp_ops[ulp_type])) {
pr_err("%s: Type %d still has devices registered\n",
__func__, ulp_type);
read_unlock(&cnic_dev_lock);
goto out_unlock;
}
}
read_unlock(&cnic_dev_lock);
rcu_assign_pointer(cnic_ulp_tbl[ulp_type], NULL);
mutex_unlock(&cnic_lock);
synchronize_rcu();
while ((atomic_read(&ulp_ops->ref_count) != 0) && (i < 20)) {
msleep(100);
i++;
}
if (atomic_read(&ulp_ops->ref_count) != 0)
netdev_warn(dev->netdev, "Failed waiting for ref count to go to zero\n");
return 0;
out_unlock:
mutex_unlock(&cnic_lock);
return -EINVAL;
}
static int cnic_start_hw(struct cnic_dev *);
static void cnic_stop_hw(struct cnic_dev *);
static int cnic_register_device(struct cnic_dev *dev, int ulp_type,
void *ulp_ctx)
{
struct cnic_local *cp = dev->cnic_priv;
struct cnic_ulp_ops *ulp_ops;
if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) {
pr_err("%s: Bad type %d\n", __func__, ulp_type);
return -EINVAL;
}
mutex_lock(&cnic_lock);
if (cnic_ulp_tbl_prot(ulp_type) == NULL) {
pr_err("%s: Driver with type %d has not been registered\n",
__func__, ulp_type);
mutex_unlock(&cnic_lock);
return -EAGAIN;
}
if (rcu_dereference(cp->ulp_ops[ulp_type])) {
pr_err("%s: Type %d has already been registered to this device\n",
__func__, ulp_type);
mutex_unlock(&cnic_lock);
return -EBUSY;
}
clear_bit(ULP_F_START, &cp->ulp_flags[ulp_type]);
cp->ulp_handle[ulp_type] = ulp_ctx;
ulp_ops = cnic_ulp_tbl_prot(ulp_type);
rcu_assign_pointer(cp->ulp_ops[ulp_type], ulp_ops);
cnic_hold(dev);
if (test_bit(CNIC_F_CNIC_UP, &dev->flags))
if (!test_and_set_bit(ULP_F_START, &cp->ulp_flags[ulp_type]))
ulp_ops->cnic_start(cp->ulp_handle[ulp_type]);
mutex_unlock(&cnic_lock);
return 0;
}
EXPORT_SYMBOL(cnic_register_driver);
static int cnic_unregister_device(struct cnic_dev *dev, int ulp_type)
{
struct cnic_local *cp = dev->cnic_priv;
int i = 0;
if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) {
pr_err("%s: Bad type %d\n", __func__, ulp_type);
return -EINVAL;
}
mutex_lock(&cnic_lock);
if (rcu_dereference(cp->ulp_ops[ulp_type])) {
rcu_assign_pointer(cp->ulp_ops[ulp_type], NULL);
cnic_put(dev);
} else {
pr_err("%s: device not registered to this ulp type %d\n",
__func__, ulp_type);
mutex_unlock(&cnic_lock);
return -EINVAL;
}
mutex_unlock(&cnic_lock);
if (ulp_type == CNIC_ULP_ISCSI)
cnic_send_nlmsg(cp, ISCSI_KEVENT_IF_DOWN, NULL);
synchronize_rcu();
while (test_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[ulp_type]) &&
i < 20) {
msleep(100);
i++;
}
if (test_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[ulp_type]))
netdev_warn(dev->netdev, "Failed waiting for ULP up call to complete\n");
return 0;
}
EXPORT_SYMBOL(cnic_unregister_driver);
static int cnic_init_id_tbl(struct cnic_id_tbl *id_tbl, u32 size, u32 start_id)
{
id_tbl->start = start_id;
id_tbl->max = size;
id_tbl->next = 0;
spin_lock_init(&id_tbl->lock);
id_tbl->table = kzalloc(DIV_ROUND_UP(size, 32) * 4, GFP_KERNEL);
if (!id_tbl->table)
return -ENOMEM;
return 0;
}
static void cnic_free_id_tbl(struct cnic_id_tbl *id_tbl)
{
kfree(id_tbl->table);
id_tbl->table = NULL;
}
static int cnic_alloc_id(struct cnic_id_tbl *id_tbl, u32 id)
{
int ret = -1;
id -= id_tbl->start;
if (id >= id_tbl->max)
return ret;
spin_lock(&id_tbl->lock);
if (!test_bit(id, id_tbl->table)) {
set_bit(id, id_tbl->table);
ret = 0;
}
spin_unlock(&id_tbl->lock);
return ret;
}
/* Returns -1 if not successful */
static u32 cnic_alloc_new_id(struct cnic_id_tbl *id_tbl)
{
u32 id;
spin_lock(&id_tbl->lock);
id = find_next_zero_bit(id_tbl->table, id_tbl->max, id_tbl->next);
if (id >= id_tbl->max) {
id = -1;
if (id_tbl->next != 0) {
id = find_first_zero_bit(id_tbl->table, id_tbl->next);
if (id >= id_tbl->next)
id = -1;
}
}
if (id < id_tbl->max) {
set_bit(id, id_tbl->table);
id_tbl->next = (id + 1) & (id_tbl->max - 1);
id += id_tbl->start;
}
spin_unlock(&id_tbl->lock);
return id;
}
static void cnic_free_id(struct cnic_id_tbl *id_tbl, u32 id)
{
if (id == -1)
return;
id -= id_tbl->start;
if (id >= id_tbl->max)
return;
clear_bit(id, id_tbl->table);
}
static void cnic_free_dma(struct cnic_dev *dev, struct cnic_dma *dma)
{
int i;
if (!dma->pg_arr)
return;
for (i = 0; i < dma->num_pages; i++) {
if (dma->pg_arr[i]) {
dma_free_coherent(&dev->pcidev->dev, BCM_PAGE_SIZE,
dma->pg_arr[i], dma->pg_map_arr[i]);
dma->pg_arr[i] = NULL;
}
}
if (dma->pgtbl) {
dma_free_coherent(&dev->pcidev->dev, dma->pgtbl_size,
dma->pgtbl, dma->pgtbl_map);
dma->pgtbl = NULL;
}
kfree(dma->pg_arr);
dma->pg_arr = NULL;
dma->num_pages = 0;
}
static void cnic_setup_page_tbl(struct cnic_dev *dev, struct cnic_dma *dma)
{
int i;
__le32 *page_table = (__le32 *) dma->pgtbl;
for (i = 0; i < dma->num_pages; i++) {
/* Each entry needs to be in big endian format. */
*page_table = cpu_to_le32((u64) dma->pg_map_arr[i] >> 32);
page_table++;
*page_table = cpu_to_le32(dma->pg_map_arr[i] & 0xffffffff);
page_table++;
}
}
static void cnic_setup_page_tbl_le(struct cnic_dev *dev, struct cnic_dma *dma)
{
int i;
__le32 *page_table = (__le32 *) dma->pgtbl;
for (i = 0; i < dma->num_pages; i++) {
/* Each entry needs to be in little endian format. */
*page_table = cpu_to_le32(dma->pg_map_arr[i] & 0xffffffff);
page_table++;
*page_table = cpu_to_le32((u64) dma->pg_map_arr[i] >> 32);
page_table++;
}
}
static int cnic_alloc_dma(struct cnic_dev *dev, struct cnic_dma *dma,
int pages, int use_pg_tbl)
{
int i, size;
struct cnic_local *cp = dev->cnic_priv;
size = pages * (sizeof(void *) + sizeof(dma_addr_t));
dma->pg_arr = kzalloc(size, GFP_ATOMIC);
if (dma->pg_arr == NULL)
return -ENOMEM;
dma->pg_map_arr = (dma_addr_t *) (dma->pg_arr + pages);
dma->num_pages = pages;
for (i = 0; i < pages; i++) {
dma->pg_arr[i] = dma_alloc_coherent(&dev->pcidev->dev,
BCM_PAGE_SIZE,
&dma->pg_map_arr[i],
GFP_ATOMIC);
if (dma->pg_arr[i] == NULL)
goto error;
}
if (!use_pg_tbl)
return 0;
dma->pgtbl_size = ((pages * 8) + BCM_PAGE_SIZE - 1) &
~(BCM_PAGE_SIZE - 1);
dma->pgtbl = dma_alloc_coherent(&dev->pcidev->dev, dma->pgtbl_size,
&dma->pgtbl_map, GFP_ATOMIC);
if (dma->pgtbl == NULL)
goto error;
cp->setup_pgtbl(dev, dma);
return 0;
error:
cnic_free_dma(dev, dma);
return -ENOMEM;
}
static void cnic_free_context(struct cnic_dev *dev)
{
struct cnic_local *cp = dev->cnic_priv;
int i;
for (i = 0; i < cp->ctx_blks; i++) {
if (cp->ctx_arr[i].ctx) {
dma_free_coherent(&dev->pcidev->dev, cp->ctx_blk_size,
cp->ctx_arr[i].ctx,
cp->ctx_arr[i].mapping);
cp->ctx_arr[i].ctx = NULL;
}
}
}
static void __cnic_free_uio(struct cnic_uio_dev *udev)
{
uio_unregister_device(&udev->cnic_uinfo);
if (udev->l2_buf) {
dma_free_coherent(&udev->pdev->dev, udev->l2_buf_size,
udev->l2_buf, udev->l2_buf_map);
udev->l2_buf = NULL;
}
if (udev->l2_ring) {
dma_free_coherent(&udev->pdev->dev, udev->l2_ring_size,
udev->l2_ring, udev->l2_ring_map);
udev->l2_ring = NULL;
}
pci_dev_put(udev->pdev);
kfree(udev);
}
static void cnic_free_uio(struct cnic_uio_dev *udev)
{
if (!udev)
return;
write_lock(&cnic_dev_lock);
list_del_init(&udev->list);
write_unlock(&cnic_dev_lock);
__cnic_free_uio(udev);
}
static void cnic_free_resc(struct cnic_dev *dev)
{
struct cnic_local *cp = dev->cnic_priv;
struct cnic_uio_dev *udev = cp->udev;
if (udev) {
udev->dev = NULL;
cp->udev = NULL;
}
cnic_free_context(dev);
kfree(cp->ctx_arr);
cp->ctx_arr = NULL;
cp->ctx_blks = 0;
cnic_free_dma(dev, &cp->gbl_buf_info);
cnic_free_dma(dev, &cp->conn_buf_info);
cnic_free_dma(dev, &cp->kwq_info);
cnic_free_dma(dev, &cp->kwq_16_data_info);
cnic_free_dma(dev, &cp->kcq2.dma);
cnic_free_dma(dev, &cp->kcq1.dma);
kfree(cp->iscsi_tbl);
cp->iscsi_tbl = NULL;
kfree(cp->ctx_tbl);
cp->ctx_tbl = NULL;
cnic_free_id_tbl(&cp->fcoe_cid_tbl);
cnic_free_id_tbl(&cp->cid_tbl);
}
static int cnic_alloc_context(struct cnic_dev *dev)
{
struct cnic_local *cp = dev->cnic_priv;
if (CHIP_NUM(cp) == CHIP_NUM_5709) {
int i, k, arr_size;
cp->ctx_blk_size = BCM_PAGE_SIZE;
cp->cids_per_blk = BCM_PAGE_SIZE / 128;
arr_size = BNX2_MAX_CID / cp->cids_per_blk *
sizeof(struct cnic_ctx);
cp->ctx_arr = kzalloc(arr_size, GFP_KERNEL);
if (cp->ctx_arr == NULL)
return -ENOMEM;
k = 0;
for (i = 0; i < 2; i++) {
u32 j, reg, off, lo, hi;
if (i == 0)
off = BNX2_PG_CTX_MAP;
else
off = BNX2_ISCSI_CTX_MAP;
reg = cnic_reg_rd_ind(dev, off);
lo = reg >> 16;
hi = reg & 0xffff;
for (j = lo; j < hi; j += cp->cids_per_blk, k++)
cp->ctx_arr[k].cid = j;
}
cp->ctx_blks = k;
if (cp->ctx_blks >= (BNX2_MAX_CID / cp->cids_per_blk)) {
cp->ctx_blks = 0;
return -ENOMEM;
}
for (i = 0; i < cp->ctx_blks; i++) {
cp->ctx_arr[i].ctx =
dma_alloc_coherent(&dev->pcidev->dev,
BCM_PAGE_SIZE,
&cp->ctx_arr[i].mapping,
GFP_KERNEL);
if (cp->ctx_arr[i].ctx == NULL)
return -ENOMEM;
}
}
return 0;
}
static int cnic_alloc_kcq(struct cnic_dev *dev, struct kcq_info *info)
{
int err, i, is_bnx2 = 0;
struct kcqe **kcq;
if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags))
is_bnx2 = 1;
err = cnic_alloc_dma(dev, &info->dma, KCQ_PAGE_CNT, is_bnx2);
if (err)
return err;
kcq = (struct kcqe **) info->dma.pg_arr;
info->kcq = kcq;
if (is_bnx2)
return 0;
for (i = 0; i < KCQ_PAGE_CNT; i++) {
struct bnx2x_bd_chain_next *next =
(struct bnx2x_bd_chain_next *) &kcq[i][MAX_KCQE_CNT];
int j = i + 1;
if (j >= KCQ_PAGE_CNT)
j = 0;
next->addr_hi = (u64) info->dma.pg_map_arr[j] >> 32;
next->addr_lo = info->dma.pg_map_arr[j] & 0xffffffff;
}
return 0;
}
static int cnic_alloc_uio_rings(struct cnic_dev *dev, int pages)
{
struct cnic_local *cp = dev->cnic_priv;
struct cnic_uio_dev *udev;
read_lock(&cnic_dev_lock);
list_for_each_entry(udev, &cnic_udev_list, list) {
if (udev->pdev == dev->pcidev) {
udev->dev = dev;
cp->udev = udev;
read_unlock(&cnic_dev_lock);
return 0;
}
}
read_unlock(&cnic_dev_lock);
udev = kzalloc(sizeof(struct cnic_uio_dev), GFP_ATOMIC);
if (!udev)
return -ENOMEM;
udev->uio_dev = -1;
udev->dev = dev;
udev->pdev = dev->pcidev;
udev->l2_ring_size = pages * BCM_PAGE_SIZE;
udev->l2_ring = dma_alloc_coherent(&udev->pdev->dev, udev->l2_ring_size,
&udev->l2_ring_map,
GFP_KERNEL | __GFP_COMP);
if (!udev->l2_ring)
goto err_udev;
udev->l2_buf_size = (cp->l2_rx_ring_size + 1) * cp->l2_single_buf_size;
udev->l2_buf_size = PAGE_ALIGN(udev->l2_buf_size);
udev->l2_buf = dma_alloc_coherent(&udev->pdev->dev, udev->l2_buf_size,
&udev->l2_buf_map,
GFP_KERNEL | __GFP_COMP);
if (!udev->l2_buf)
goto err_dma;
write_lock(&cnic_dev_lock);
list_add(&udev->list, &cnic_udev_list);
write_unlock(&cnic_dev_lock);
pci_dev_get(udev->pdev);
cp->udev = udev;
return 0;
err_dma:
dma_free_coherent(&udev->pdev->dev, udev->l2_ring_size,
udev->l2_ring, udev->l2_ring_map);
err_udev:
kfree(udev);
return -ENOMEM;
}
static int cnic_init_uio(struct cnic_dev *dev)
{
struct cnic_local *cp = dev->cnic_priv;
struct cnic_uio_dev *udev = cp->udev;
struct uio_info *uinfo;
int ret = 0;
if (!udev)
return -ENOMEM;
uinfo = &udev->cnic_uinfo;
uinfo->mem[0].addr = dev->netdev->base_addr;
uinfo->mem[0].internal_addr = dev->regview;
uinfo->mem[0].size = dev->netdev->mem_end - dev->netdev->mem_start;
uinfo->mem[0].memtype = UIO_MEM_PHYS;
if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) {
uinfo->mem[1].addr = (unsigned long) cp->status_blk.gen &
PAGE_MASK;
if (cp->ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX)
uinfo->mem[1].size = BNX2_SBLK_MSIX_ALIGN_SIZE * 9;
else
uinfo->mem[1].size = BNX2_SBLK_MSIX_ALIGN_SIZE;
uinfo->name = "bnx2_cnic";
} else if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) {
uinfo->mem[1].addr = (unsigned long) cp->bnx2x_def_status_blk &
PAGE_MASK;
uinfo->mem[1].size = sizeof(*cp->bnx2x_def_status_blk);
uinfo->name = "bnx2x_cnic";
}
uinfo->mem[1].memtype = UIO_MEM_LOGICAL;
uinfo->mem[2].addr = (unsigned long) udev->l2_ring;
uinfo->mem[2].size = udev->l2_ring_size;
uinfo->mem[2].memtype = UIO_MEM_LOGICAL;
uinfo->mem[3].addr = (unsigned long) udev->l2_buf;
uinfo->mem[3].size = udev->l2_buf_size;
uinfo->mem[3].memtype = UIO_MEM_LOGICAL;
uinfo->version = CNIC_MODULE_VERSION;
uinfo->irq = UIO_IRQ_CUSTOM;
uinfo->open = cnic_uio_open;
uinfo->release = cnic_uio_close;
if (udev->uio_dev == -1) {
if (!uinfo->priv) {
uinfo->priv = udev;
ret = uio_register_device(&udev->pdev->dev, uinfo);
}
} else {
cnic_init_rings(dev);
}
return ret;
}
static int cnic_alloc_bnx2_resc(struct cnic_dev *dev)
{
struct cnic_local *cp = dev->cnic_priv;
int ret;
ret = cnic_alloc_dma(dev, &cp->kwq_info, KWQ_PAGE_CNT, 1);
if (ret)
goto error;
cp->kwq = (struct kwqe **) cp->kwq_info.pg_arr;
ret = cnic_alloc_kcq(dev, &cp->kcq1);
if (ret)
goto error;
ret = cnic_alloc_context(dev);
if (ret)
goto error;
ret = cnic_alloc_uio_rings(dev, 2);
if (ret)
goto error;
ret = cnic_init_uio(dev);
if (ret)
goto error;
return 0;
error:
cnic_free_resc(dev);
return ret;
}
static int cnic_alloc_bnx2x_context(struct cnic_dev *dev)
{
struct cnic_local *cp = dev->cnic_priv;
int ctx_blk_size = cp->ethdev->ctx_blk_size;
int total_mem, blks, i;
total_mem = BNX2X_CONTEXT_MEM_SIZE * cp->max_cid_space;
blks = total_mem / ctx_blk_size;
if (total_mem % ctx_blk_size)
blks++;
if (blks > cp->ethdev->ctx_tbl_len)
return -ENOMEM;
cp->ctx_arr = kcalloc(blks, sizeof(struct cnic_ctx), GFP_KERNEL);
if (cp->ctx_arr == NULL)
return -ENOMEM;
cp->ctx_blks = blks;
cp->ctx_blk_size = ctx_blk_size;
if (!BNX2X_CHIP_IS_57710(cp->chip_id))
cp->ctx_align = 0;
else
cp->ctx_align = ctx_blk_size;
cp->cids_per_blk = ctx_blk_size / BNX2X_CONTEXT_MEM_SIZE;
for (i = 0; i < blks; i++) {
cp->ctx_arr[i].ctx =
dma_alloc_coherent(&dev->pcidev->dev, cp->ctx_blk_size,
&cp->ctx_arr[i].mapping,
GFP_KERNEL);
if (cp->ctx_arr[i].ctx == NULL)
return -ENOMEM;
if (cp->ctx_align && cp->ctx_blk_size == ctx_blk_size) {
if (cp->ctx_arr[i].mapping & (cp->ctx_align - 1)) {
cnic_free_context(dev);
cp->ctx_blk_size += cp->ctx_align;
i = -1;
continue;
}
}
}
return 0;
}
static int cnic_alloc_bnx2x_resc(struct cnic_dev *dev)
{
struct cnic_local *cp = dev->cnic_priv;
struct cnic_eth_dev *ethdev = cp->ethdev;
u32 start_cid = ethdev->starting_cid;
int i, j, n, ret, pages;
struct cnic_dma *kwq_16_dma = &cp->kwq_16_data_info;
cp->iro_arr = ethdev->iro_arr;
cp->max_cid_space = MAX_ISCSI_TBL_SZ + BNX2X_FCOE_NUM_CONNECTIONS;
cp->iscsi_start_cid = start_cid;
cp->fcoe_start_cid = start_cid + MAX_ISCSI_TBL_SZ;
if (BNX2X_CHIP_IS_E2(cp->chip_id)) {
cp->max_cid_space += BNX2X_FCOE_NUM_CONNECTIONS;
cp->fcoe_init_cid = ethdev->fcoe_init_cid;
if (!cp->fcoe_init_cid)
cp->fcoe_init_cid = 0x10;
}
if (start_cid < BNX2X_ISCSI_START_CID) {
u32 delta = BNX2X_ISCSI_START_CID - start_cid;
cp->iscsi_start_cid = BNX2X_ISCSI_START_CID;
cp->fcoe_start_cid += delta;
cp->max_cid_space += delta;
}
cp->iscsi_tbl = kzalloc(sizeof(struct cnic_iscsi) * MAX_ISCSI_TBL_SZ,
GFP_KERNEL);
if (!cp->iscsi_tbl)
goto error;
cp->ctx_tbl = kzalloc(sizeof(struct cnic_context) *
cp->max_cid_space, GFP_KERNEL);
if (!cp->ctx_tbl)
goto error;
for (i = 0; i < MAX_ISCSI_TBL_SZ; i++) {
cp->ctx_tbl[i].proto.iscsi = &cp->iscsi_tbl[i];
cp->ctx_tbl[i].ulp_proto_id = CNIC_ULP_ISCSI;
}
for (i = MAX_ISCSI_TBL_SZ; i < cp->max_cid_space; i++)
cp->ctx_tbl[i].ulp_proto_id = CNIC_ULP_FCOE;
pages = PAGE_ALIGN(cp->max_cid_space * CNIC_KWQ16_DATA_SIZE) /
PAGE_SIZE;
ret = cnic_alloc_dma(dev, kwq_16_dma, pages, 0);
if (ret)
return -ENOMEM;
n = PAGE_SIZE / CNIC_KWQ16_DATA_SIZE;
for (i = 0, j = 0; i < cp->max_cid_space; i++) {
long off = CNIC_KWQ16_DATA_SIZE * (i % n);
cp->ctx_tbl[i].kwqe_data = kwq_16_dma->pg_arr[j] + off;
cp->ctx_tbl[i].kwqe_data_mapping = kwq_16_dma->pg_map_arr[j] +
off;
if ((i % n) == (n - 1))
j++;
}
ret = cnic_alloc_kcq(dev, &cp->kcq1);
if (ret)
goto error;
if (BNX2X_CHIP_IS_E2(cp->chip_id)) {
ret = cnic_alloc_kcq(dev, &cp->kcq2);
if (ret)
goto error;
}
pages = PAGE_ALIGN(BNX2X_ISCSI_NUM_CONNECTIONS *
BNX2X_ISCSI_CONN_BUF_SIZE) / PAGE_SIZE;
ret = cnic_alloc_dma(dev, &cp->conn_buf_info, pages, 1);
if (ret)
goto error;
pages = PAGE_ALIGN(BNX2X_ISCSI_GLB_BUF_SIZE) / PAGE_SIZE;
ret = cnic_alloc_dma(dev, &cp->gbl_buf_info, pages, 0);
if (ret)
goto error;
ret = cnic_alloc_bnx2x_context(dev);
if (ret)
goto error;
cp->bnx2x_def_status_blk = cp->ethdev->irq_arr[1].status_blk;
cp->l2_rx_ring_size = 15;
ret = cnic_alloc_uio_rings(dev, 4);
if (ret)
goto error;
ret = cnic_init_uio(dev);
if (ret)
goto error;
return 0;
error:
cnic_free_resc(dev);
return -ENOMEM;
}
static inline u32 cnic_kwq_avail(struct cnic_local *cp)
{
return cp->max_kwq_idx -
((cp->kwq_prod_idx - cp->kwq_con_idx) & cp->max_kwq_idx);
}
static int cnic_submit_bnx2_kwqes(struct cnic_dev *dev, struct kwqe *wqes[],
u32 num_wqes)
{
struct cnic_local *cp = dev->cnic_priv;
struct kwqe *prod_qe;
u16 prod, sw_prod, i;
if (!test_bit(CNIC_F_CNIC_UP, &dev->flags))
return -EAGAIN; /* bnx2 is down */
spin_lock_bh(&cp->cnic_ulp_lock);
if (num_wqes > cnic_kwq_avail(cp) &&
!test_bit(CNIC_LCL_FL_KWQ_INIT, &cp->cnic_local_flags)) {
spin_unlock_bh(&cp->cnic_ulp_lock);
return -EAGAIN;
}
clear_bit(CNIC_LCL_FL_KWQ_INIT, &cp->cnic_local_flags);
prod = cp->kwq_prod_idx;
sw_prod = prod & MAX_KWQ_IDX;
for (i = 0; i < num_wqes; i++) {
prod_qe = &cp->kwq[KWQ_PG(sw_prod)][KWQ_IDX(sw_prod)];
memcpy(prod_qe, wqes[i], sizeof(struct kwqe));
prod++;
sw_prod = prod & MAX_KWQ_IDX;
}
cp->kwq_prod_idx = prod;
CNIC_WR16(dev, cp->kwq_io_addr, cp->kwq_prod_idx);
spin_unlock_bh(&cp->cnic_ulp_lock);
return 0;
}
static void *cnic_get_kwqe_16_data(struct cnic_local *cp, u32 l5_cid,
union l5cm_specific_data *l5_data)
{
struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
dma_addr_t map;
map = ctx->kwqe_data_mapping;
l5_data->phy_address.lo = (u64) map & 0xffffffff;
l5_data->phy_address.hi = (u64) map >> 32;
return ctx->kwqe_data;
}
static int cnic_submit_kwqe_16(struct cnic_dev *dev, u32 cmd, u32 cid,
u32 type, union l5cm_specific_data *l5_data)
{
struct cnic_local *cp = dev->cnic_priv;
struct l5cm_spe kwqe;
struct kwqe_16 *kwq[1];
u16 type_16;
int ret;
kwqe.hdr.conn_and_cmd_data =
cpu_to_le32(((cmd << SPE_HDR_CMD_ID_SHIFT) |
BNX2X_HW_CID(cp, cid)));
type_16 = (type << SPE_HDR_CONN_TYPE_SHIFT) & SPE_HDR_CONN_TYPE;
type_16 |= (cp->pfid << SPE_HDR_FUNCTION_ID_SHIFT) &
SPE_HDR_FUNCTION_ID;
kwqe.hdr.type = cpu_to_le16(type_16);
kwqe.hdr.reserved1 = 0;
kwqe.data.phy_address.lo = cpu_to_le32(l5_data->phy_address.lo);
kwqe.data.phy_address.hi = cpu_to_le32(l5_data->phy_address.hi);
kwq[0] = (struct kwqe_16 *) &kwqe;
spin_lock_bh(&cp->cnic_ulp_lock);
ret = cp->ethdev->drv_submit_kwqes_16(dev->netdev, kwq, 1);
spin_unlock_bh(&cp->cnic_ulp_lock);
if (ret == 1)
return 0;
return -EBUSY;
}
static void cnic_reply_bnx2x_kcqes(struct cnic_dev *dev, int ulp_type,
struct kcqe *cqes[], u32 num_cqes)
{
struct cnic_local *cp = dev->cnic_priv;
struct cnic_ulp_ops *ulp_ops;
rcu_read_lock();
ulp_ops = rcu_dereference(cp->ulp_ops[ulp_type]);
if (likely(ulp_ops)) {
ulp_ops->indicate_kcqes(cp->ulp_handle[ulp_type],
cqes, num_cqes);
}
rcu_read_unlock();
}
static int cnic_bnx2x_iscsi_init1(struct cnic_dev *dev, struct kwqe *kwqe)
{
struct cnic_local *cp = dev->cnic_priv;
struct iscsi_kwqe_init1 *req1 = (struct iscsi_kwqe_init1 *) kwqe;
int hq_bds, pages;
u32 pfid = cp->pfid;
cp->num_iscsi_tasks = req1->num_tasks_per_conn;
cp->num_ccells = req1->num_ccells_per_conn;
cp->task_array_size = BNX2X_ISCSI_TASK_CONTEXT_SIZE *
cp->num_iscsi_tasks;
cp->r2tq_size = cp->num_iscsi_tasks * BNX2X_ISCSI_MAX_PENDING_R2TS *
BNX2X_ISCSI_R2TQE_SIZE;
cp->hq_size = cp->num_ccells * BNX2X_ISCSI_HQ_BD_SIZE;
pages = PAGE_ALIGN(cp->hq_size) / PAGE_SIZE;
hq_bds = pages * (PAGE_SIZE / BNX2X_ISCSI_HQ_BD_SIZE);
cp->num_cqs = req1->num_cqs;
if (!dev->max_iscsi_conn)
return 0;
/* init Tstorm RAM */
CNIC_WR16(dev, BAR_TSTRORM_INTMEM + TSTORM_ISCSI_RQ_SIZE_OFFSET(pfid),
req1->rq_num_wqes);
CNIC_WR16(dev, BAR_TSTRORM_INTMEM + TSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid),
PAGE_SIZE);
CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
TSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), PAGE_SHIFT);
CNIC_WR16(dev, BAR_TSTRORM_INTMEM +
TSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid),
req1->num_tasks_per_conn);
/* init Ustorm RAM */
CNIC_WR16(dev, BAR_USTRORM_INTMEM +
USTORM_ISCSI_RQ_BUFFER_SIZE_OFFSET(pfid),
req1->rq_buffer_size);
CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_PAGE_SIZE_OFFSET(pfid),
PAGE_SIZE);
CNIC_WR8(dev, BAR_USTRORM_INTMEM +
USTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), PAGE_SHIFT);
CNIC_WR16(dev, BAR_USTRORM_INTMEM +
USTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid),
req1->num_tasks_per_conn);
CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_RQ_SIZE_OFFSET(pfid),
req1->rq_num_wqes);
CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_CQ_SIZE_OFFSET(pfid),
req1->cq_num_wqes);
CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_R2TQ_SIZE_OFFSET(pfid),
cp->num_iscsi_tasks * BNX2X_ISCSI_MAX_PENDING_R2TS);
/* init Xstorm RAM */
CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid),
PAGE_SIZE);
CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
XSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), PAGE_SHIFT);
CNIC_WR16(dev, BAR_XSTRORM_INTMEM +
XSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid),
req1->num_tasks_per_conn);
CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_HQ_SIZE_OFFSET(pfid),
hq_bds);
CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_SQ_SIZE_OFFSET(pfid),
req1->num_tasks_per_conn);
CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_R2TQ_SIZE_OFFSET(pfid),
cp->num_iscsi_tasks * BNX2X_ISCSI_MAX_PENDING_R2TS);
/* init Cstorm RAM */
CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid),
PAGE_SIZE);
CNIC_WR8(dev, BAR_CSTRORM_INTMEM +
CSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), PAGE_SHIFT);
CNIC_WR16(dev, BAR_CSTRORM_INTMEM +
CSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid),
req1->num_tasks_per_conn);
CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_CQ_SIZE_OFFSET(pfid),
req1->cq_num_wqes);
CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_HQ_SIZE_OFFSET(pfid),
hq_bds);
return 0;
}
static int cnic_bnx2x_iscsi_init2(struct cnic_dev *dev, struct kwqe *kwqe)
{
struct iscsi_kwqe_init2 *req2 = (struct iscsi_kwqe_init2 *) kwqe;
struct cnic_local *cp = dev->cnic_priv;
u32 pfid = cp->pfid;
struct iscsi_kcqe kcqe;
struct kcqe *cqes[1];
memset(&kcqe, 0, sizeof(kcqe));
if (!dev->max_iscsi_conn) {
kcqe.completion_status =
ISCSI_KCQE_COMPLETION_STATUS_ISCSI_NOT_SUPPORTED;
goto done;
}
CNIC_WR(dev, BAR_TSTRORM_INTMEM +
TSTORM_ISCSI_ERROR_BITMAP_OFFSET(pfid), req2->error_bit_map[0]);
CNIC_WR(dev, BAR_TSTRORM_INTMEM +
TSTORM_ISCSI_ERROR_BITMAP_OFFSET(pfid) + 4,
req2->error_bit_map[1]);
CNIC_WR16(dev, BAR_USTRORM_INTMEM +
USTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfid), req2->max_cq_sqn);
CNIC_WR(dev, BAR_USTRORM_INTMEM +
USTORM_ISCSI_ERROR_BITMAP_OFFSET(pfid), req2->error_bit_map[0]);
CNIC_WR(dev, BAR_USTRORM_INTMEM +
USTORM_ISCSI_ERROR_BITMAP_OFFSET(pfid) + 4,
req2->error_bit_map[1]);
CNIC_WR16(dev, BAR_CSTRORM_INTMEM +
CSTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfid), req2->max_cq_sqn);
kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_SUCCESS;
done:
kcqe.op_code = ISCSI_KCQE_OPCODE_INIT;
cqes[0] = (struct kcqe *) &kcqe;
cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_ISCSI, cqes, 1);
return 0;
}
static void cnic_free_bnx2x_conn_resc(struct cnic_dev *dev, u32 l5_cid)
{
struct cnic_local *cp = dev->cnic_priv;
struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
if (ctx->ulp_proto_id == CNIC_ULP_ISCSI) {
struct cnic_iscsi *iscsi = ctx->proto.iscsi;
cnic_free_dma(dev, &iscsi->hq_info);
cnic_free_dma(dev, &iscsi->r2tq_info);
cnic_free_dma(dev, &iscsi->task_array_info);
cnic_free_id(&cp->cid_tbl, ctx->cid);
} else {
cnic_free_id(&cp->fcoe_cid_tbl, ctx->cid);
}
ctx->cid = 0;
}
static int cnic_alloc_bnx2x_conn_resc(struct cnic_dev *dev, u32 l5_cid)
{
u32 cid;
int ret, pages;
struct cnic_local *cp = dev->cnic_priv;
struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
struct cnic_iscsi *iscsi = ctx->proto.iscsi;
if (ctx->ulp_proto_id == CNIC_ULP_FCOE) {
cid = cnic_alloc_new_id(&cp->fcoe_cid_tbl);
if (cid == -1) {
ret = -ENOMEM;
goto error;
}
ctx->cid = cid;
return 0;
}
cid = cnic_alloc_new_id(&cp->cid_tbl);
if (cid == -1) {
ret = -ENOMEM;
goto error;
}
ctx->cid = cid;
pages = PAGE_ALIGN(cp->task_array_size) / PAGE_SIZE;
ret = cnic_alloc_dma(dev, &iscsi->task_array_info, pages, 1);
if (ret)
goto error;
pages = PAGE_ALIGN(cp->r2tq_size) / PAGE_SIZE;
ret = cnic_alloc_dma(dev, &iscsi->r2tq_info, pages, 1);
if (ret)
goto error;
pages = PAGE_ALIGN(cp->hq_size) / PAGE_SIZE;
ret = cnic_alloc_dma(dev, &iscsi->hq_info, pages, 1);
if (ret)
goto error;
return 0;
error:
cnic_free_bnx2x_conn_resc(dev, l5_cid);
return ret;
}
static void *cnic_get_bnx2x_ctx(struct cnic_dev *dev, u32 cid, int init,
struct regpair *ctx_addr)
{
struct cnic_local *cp = dev->cnic_priv;
struct cnic_eth_dev *ethdev = cp->ethdev;
int blk = (cid - ethdev->starting_cid) / cp->cids_per_blk;
int off = (cid - ethdev->starting_cid) % cp->cids_per_blk;
unsigned long align_off = 0;
dma_addr_t ctx_map;
void *ctx;
if (cp->ctx_align) {
unsigned long mask = cp->ctx_align - 1;
if (cp->ctx_arr[blk].mapping & mask)
align_off = cp->ctx_align -
(cp->ctx_arr[blk].mapping & mask);
}
ctx_map = cp->ctx_arr[blk].mapping + align_off +
(off * BNX2X_CONTEXT_MEM_SIZE);
ctx = cp->ctx_arr[blk].ctx + align_off +
(off * BNX2X_CONTEXT_MEM_SIZE);
if (init)
memset(ctx, 0, BNX2X_CONTEXT_MEM_SIZE);
ctx_addr->lo = ctx_map & 0xffffffff;
ctx_addr->hi = (u64) ctx_map >> 32;
return ctx;
}
static int cnic_setup_bnx2x_ctx(struct cnic_dev *dev, struct kwqe *wqes[],
u32 num)
{
struct cnic_local *cp = dev->cnic_priv;
struct iscsi_kwqe_conn_offload1 *req1 =
(struct iscsi_kwqe_conn_offload1 *) wqes[0];
struct iscsi_kwqe_conn_offload2 *req2 =
(struct iscsi_kwqe_conn_offload2 *) wqes[1];
struct iscsi_kwqe_conn_offload3 *req3;
struct cnic_context *ctx = &cp->ctx_tbl[req1->iscsi_conn_id];
struct cnic_iscsi *iscsi = ctx->proto.iscsi;
u32 cid = ctx->cid;
u32 hw_cid = BNX2X_HW_CID(cp, cid);
struct iscsi_context *ictx;
struct regpair context_addr;
int i, j, n = 2, n_max;
ctx->ctx_flags = 0;
if (!req2->num_additional_wqes)
return -EINVAL;
n_max = req2->num_additional_wqes + 2;
ictx = cnic_get_bnx2x_ctx(dev, cid, 1, &context_addr);
if (ictx == NULL)
return -ENOMEM;
req3 = (struct iscsi_kwqe_conn_offload3 *) wqes[n++];
ictx->xstorm_ag_context.hq_prod = 1;
ictx->xstorm_st_context.iscsi.first_burst_length =
ISCSI_DEF_FIRST_BURST_LEN;
ictx->xstorm_st_context.iscsi.max_send_pdu_length =
ISCSI_DEF_MAX_RECV_SEG_LEN;
ictx->xstorm_st_context.iscsi.sq_pbl_base.lo =
req1->sq_page_table_addr_lo;
ictx->xstorm_st_context.iscsi.sq_pbl_base.hi =
req1->sq_page_table_addr_hi;
ictx->xstorm_st_context.iscsi.sq_curr_pbe.lo = req2->sq_first_pte.hi;
ictx->xstorm_st_context.iscsi.sq_curr_pbe.hi = req2->sq_first_pte.lo;
ictx->xstorm_st_context.iscsi.hq_pbl_base.lo =
iscsi->hq_info.pgtbl_map & 0xffffffff;
ictx->xstorm_st_context.iscsi.hq_pbl_base.hi =
(u64) iscsi->hq_info.pgtbl_map >> 32;
ictx->xstorm_st_context.iscsi.hq_curr_pbe_base.lo =
iscsi->hq_info.pgtbl[0];
ictx->xstorm_st_context.iscsi.hq_curr_pbe_base.hi =
iscsi->hq_info.pgtbl[1];
ictx->xstorm_st_context.iscsi.r2tq_pbl_base.lo =
iscsi->r2tq_info.pgtbl_map & 0xffffffff;
ictx->xstorm_st_context.iscsi.r2tq_pbl_base.hi =
(u64) iscsi->r2tq_info.pgtbl_map >> 32;
ictx->xstorm_st_context.iscsi.r2tq_curr_pbe_base.lo =
iscsi->r2tq_info.pgtbl[0];
ictx->xstorm_st_context.iscsi.r2tq_curr_pbe_base.hi =
iscsi->r2tq_info.pgtbl[1];
ictx->xstorm_st_context.iscsi.task_pbl_base.lo =
iscsi->task_array_info.pgtbl_map & 0xffffffff;
ictx->xstorm_st_context.iscsi.task_pbl_base.hi =
(u64) iscsi->task_array_info.pgtbl_map >> 32;
ictx->xstorm_st_context.iscsi.task_pbl_cache_idx =
BNX2X_ISCSI_PBL_NOT_CACHED;
ictx->xstorm_st_context.iscsi.flags.flags |=
XSTORM_ISCSI_CONTEXT_FLAGS_B_IMMEDIATE_DATA;
ictx->xstorm_st_context.iscsi.flags.flags |=
XSTORM_ISCSI_CONTEXT_FLAGS_B_INITIAL_R2T;
ictx->tstorm_st_context.iscsi.hdr_bytes_2_fetch = ISCSI_HEADER_SIZE;
/* TSTORM requires the base address of RQ DB & not PTE */
ictx->tstorm_st_context.iscsi.rq_db_phy_addr.lo =
req2->rq_page_table_addr_lo & PAGE_MASK;
ictx->tstorm_st_context.iscsi.rq_db_phy_addr.hi =
req2->rq_page_table_addr_hi;
ictx->tstorm_st_context.iscsi.iscsi_conn_id = req1->iscsi_conn_id;
ictx->tstorm_st_context.tcp.cwnd = 0x5A8;
ictx->tstorm_st_context.tcp.flags2 |=
TSTORM_TCP_ST_CONTEXT_SECTION_DA_EN;
ictx->tstorm_st_context.tcp.ooo_support_mode =
TCP_TSTORM_OOO_DROP_AND_PROC_ACK;
ictx->timers_context.flags |= TIMERS_BLOCK_CONTEXT_CONN_VALID_FLG;
ictx->ustorm_st_context.ring.rq.pbl_base.lo =
req2->rq_page_table_addr_lo;
ictx->ustorm_st_context.ring.rq.pbl_base.hi =
req2->rq_page_table_addr_hi;
ictx->ustorm_st_context.ring.rq.curr_pbe.lo = req3->qp_first_pte[0].hi;
ictx->ustorm_st_context.ring.rq.curr_pbe.hi = req3->qp_first_pte[0].lo;
ictx->ustorm_st_context.ring.r2tq.pbl_base.lo =
iscsi->r2tq_info.pgtbl_map & 0xffffffff;
ictx->ustorm_st_context.ring.r2tq.pbl_base.hi =
(u64) iscsi->r2tq_info.pgtbl_map >> 32;
ictx->ustorm_st_context.ring.r2tq.curr_pbe.lo =
iscsi->r2tq_info.pgtbl[0];
ictx->ustorm_st_context.ring.r2tq.curr_pbe.hi =
iscsi->r2tq_info.pgtbl[1];
ictx->ustorm_st_context.ring.cq_pbl_base.lo =
req1->cq_page_table_addr_lo;
ictx->ustorm_st_context.ring.cq_pbl_base.hi =
req1->cq_page_table_addr_hi;
ictx->ustorm_st_context.ring.cq[0].cq_sn = ISCSI_INITIAL_SN;
ictx->ustorm_st_context.ring.cq[0].curr_pbe.lo = req2->cq_first_pte.hi;
ictx->ustorm_st_context.ring.cq[0].curr_pbe.hi = req2->cq_first_pte.lo;
ictx->ustorm_st_context.task_pbe_cache_index =
BNX2X_ISCSI_PBL_NOT_CACHED;
ictx->ustorm_st_context.task_pdu_cache_index =
BNX2X_ISCSI_PDU_HEADER_NOT_CACHED;
for (i = 1, j = 1; i < cp->num_cqs; i++, j++) {
if (j == 3) {
if (n >= n_max)
break;
req3 = (struct iscsi_kwqe_conn_offload3 *) wqes[n++];
j = 0;
}
ictx->ustorm_st_context.ring.cq[i].cq_sn = ISCSI_INITIAL_SN;
ictx->ustorm_st_context.ring.cq[i].curr_pbe.lo =
req3->qp_first_pte[j].hi;
ictx->ustorm_st_context.ring.cq[i].curr_pbe.hi =
req3->qp_first_pte[j].lo;
}
ictx->ustorm_st_context.task_pbl_base.lo =
iscsi->task_array_info.pgtbl_map & 0xffffffff;
ictx->ustorm_st_context.task_pbl_base.hi =
(u64) iscsi->task_array_info.pgtbl_map >> 32;
ictx->ustorm_st_context.tce_phy_addr.lo =
iscsi->task_array_info.pgtbl[0];
ictx->ustorm_st_context.tce_phy_addr.hi =
iscsi->task_array_info.pgtbl[1];
ictx->ustorm_st_context.iscsi_conn_id = req1->iscsi_conn_id;
ictx->ustorm_st_context.num_cqs = cp->num_cqs;
ictx->ustorm_st_context.negotiated_rx |= ISCSI_DEF_MAX_RECV_SEG_LEN;
ictx->ustorm_st_context.negotiated_rx_and_flags |=
ISCSI_DEF_MAX_BURST_LEN;
ictx->ustorm_st_context.negotiated_rx |=
ISCSI_DEFAULT_MAX_OUTSTANDING_R2T <<
USTORM_ISCSI_ST_CONTEXT_MAX_OUTSTANDING_R2TS_SHIFT;
ictx->cstorm_st_context.hq_pbl_base.lo =
iscsi->hq_info.pgtbl_map & 0xffffffff;
ictx->cstorm_st_context.hq_pbl_base.hi =
(u64) iscsi->hq_info.pgtbl_map >> 32;
ictx->cstorm_st_context.hq_curr_pbe.lo = iscsi->hq_info.pgtbl[0];
ictx->cstorm_st_context.hq_curr_pbe.hi = iscsi->hq_info.pgtbl[1];
ictx->cstorm_st_context.task_pbl_base.lo =
iscsi->task_array_info.pgtbl_map & 0xffffffff;
ictx->cstorm_st_context.task_pbl_base.hi =
(u64) iscsi->task_array_info.pgtbl_map >> 32;
/* CSTORM and USTORM initialization is different, CSTORM requires
* CQ DB base & not PTE addr */
ictx->cstorm_st_context.cq_db_base.lo =
req1->cq_page_table_addr_lo & PAGE_MASK;
ictx->cstorm_st_context.cq_db_base.hi = req1->cq_page_table_addr_hi;
ictx->cstorm_st_context.iscsi_conn_id = req1->iscsi_conn_id;
ictx->cstorm_st_context.cq_proc_en_bit_map = (1 << cp->num_cqs) - 1;
for (i = 0; i < cp->num_cqs; i++) {
ictx->cstorm_st_context.cq_c_prod_sqn_arr.sqn[i] =
ISCSI_INITIAL_SN;
ictx->cstorm_st_context.cq_c_sqn_2_notify_arr.sqn[i] =
ISCSI_INITIAL_SN;
}
ictx->xstorm_ag_context.cdu_reserved =
CDU_RSRVD_VALUE_TYPE_A(hw_cid, CDU_REGION_NUMBER_XCM_AG,
ISCSI_CONNECTION_TYPE);
ictx->ustorm_ag_context.cdu_usage =
CDU_RSRVD_VALUE_TYPE_A(hw_cid, CDU_REGION_NUMBER_UCM_AG,
ISCSI_CONNECTION_TYPE);
return 0;
}
static int cnic_bnx2x_iscsi_ofld1(struct cnic_dev *dev, struct kwqe *wqes[],
u32 num, int *work)
{
struct iscsi_kwqe_conn_offload1 *req1;
struct iscsi_kwqe_conn_offload2 *req2;
struct cnic_local *cp = dev->cnic_priv;
struct cnic_context *ctx;
struct iscsi_kcqe kcqe;
struct kcqe *cqes[1];
u32 l5_cid;
int ret = 0;
if (num < 2) {
*work = num;
return -EINVAL;
}
req1 = (struct iscsi_kwqe_conn_offload1 *) wqes[0];
req2 = (struct iscsi_kwqe_conn_offload2 *) wqes[1];
if ((num - 2) < req2->num_additional_wqes) {
*work = num;
return -EINVAL;
}
*work = 2 + req2->num_additional_wqes;
l5_cid = req1->iscsi_conn_id;
if (l5_cid >= MAX_ISCSI_TBL_SZ)
return -EINVAL;
memset(&kcqe, 0, sizeof(kcqe));
kcqe.op_code = ISCSI_KCQE_OPCODE_OFFLOAD_CONN;
kcqe.iscsi_conn_id = l5_cid;
kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE;
ctx = &cp->ctx_tbl[l5_cid];
if (test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags)) {
kcqe.completion_status =
ISCSI_KCQE_COMPLETION_STATUS_CID_BUSY;
goto done;
}
if (atomic_inc_return(&cp->iscsi_conn) > dev->max_iscsi_conn) {
atomic_dec(&cp->iscsi_conn);
goto done;
}
ret = cnic_alloc_bnx2x_conn_resc(dev, l5_cid);
if (ret) {
atomic_dec(&cp->iscsi_conn);
ret = 0;
goto done;
}
ret = cnic_setup_bnx2x_ctx(dev, wqes, num);
if (ret < 0) {
cnic_free_bnx2x_conn_resc(dev, l5_cid);
atomic_dec(&cp->iscsi_conn);
goto done;
}
kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_SUCCESS;
kcqe.iscsi_conn_context_id = BNX2X_HW_CID(cp, cp->ctx_tbl[l5_cid].cid);
done:
cqes[0] = (struct kcqe *) &kcqe;
cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_ISCSI, cqes, 1);
return ret;
}
static int cnic_bnx2x_iscsi_update(struct cnic_dev *dev, struct kwqe *kwqe)
{
struct cnic_local *cp = dev->cnic_priv;
struct iscsi_kwqe_conn_update *req =
(struct iscsi_kwqe_conn_update *) kwqe;
void *data;
union l5cm_specific_data l5_data;
u32 l5_cid, cid = BNX2X_SW_CID(req->context_id);
int ret;
if (cnic_get_l5_cid(cp, cid, &l5_cid) != 0)
return -EINVAL;
data = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data);
if (!data)
return -ENOMEM;
memcpy(data, kwqe, sizeof(struct kwqe));
ret = cnic_submit_kwqe_16(dev, ISCSI_RAMROD_CMD_ID_UPDATE_CONN,
req->context_id, ISCSI_CONNECTION_TYPE, &l5_data);
return ret;
}
static int cnic_bnx2x_destroy_ramrod(struct cnic_dev *dev, u32 l5_cid)
{
struct cnic_local *cp = dev->cnic_priv;
struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
union l5cm_specific_data l5_data;
int ret;
u32 hw_cid;
init_waitqueue_head(&ctx->waitq);
ctx->wait_cond = 0;
memset(&l5_data, 0, sizeof(l5_data));
hw_cid = BNX2X_HW_CID(cp, ctx->cid);
ret = cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_COMMON_CFC_DEL,
hw_cid, NONE_CONNECTION_TYPE, &l5_data);
if (ret == 0)
wait_event(ctx->waitq, ctx->wait_cond);
return ret;
}
static int cnic_bnx2x_iscsi_destroy(struct cnic_dev *dev, struct kwqe *kwqe)
{
struct cnic_local *cp = dev->cnic_priv;
struct iscsi_kwqe_conn_destroy *req =
(struct iscsi_kwqe_conn_destroy *) kwqe;
u32 l5_cid = req->reserved0;
struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
int ret = 0;
struct iscsi_kcqe kcqe;
struct kcqe *cqes[1];
if (!test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags))
goto skip_cfc_delete;
if (!time_after(jiffies, ctx->timestamp + (2 * HZ))) {
unsigned long delta = ctx->timestamp + (2 * HZ) - jiffies;
if (delta > (2 * HZ))
delta = 0;
set_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags);
queue_delayed_work(cnic_wq, &cp->delete_task, delta);
goto destroy_reply;
}
ret = cnic_bnx2x_destroy_ramrod(dev, l5_cid);
skip_cfc_delete:
cnic_free_bnx2x_conn_resc(dev, l5_cid);
atomic_dec(&cp->iscsi_conn);
clear_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags);
destroy_reply:
memset(&kcqe, 0, sizeof(kcqe));
kcqe.op_code = ISCSI_KCQE_OPCODE_DESTROY_CONN;
kcqe.iscsi_conn_id = l5_cid;
kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_SUCCESS;
kcqe.iscsi_conn_context_id = req->context_id;
cqes[0] = (struct kcqe *) &kcqe;
cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_ISCSI, cqes, 1);
return ret;
}
static void cnic_init_storm_conn_bufs(struct cnic_dev *dev,
struct l4_kwq_connect_req1 *kwqe1,
struct l4_kwq_connect_req3 *kwqe3,
struct l5cm_active_conn_buffer *conn_buf)
{
struct l5cm_conn_addr_params *conn_addr = &conn_buf->conn_addr_buf;
struct l5cm_xstorm_conn_buffer *xstorm_buf =
&conn_buf->xstorm_conn_buffer;
struct l5cm_tstorm_conn_buffer *tstorm_buf =
&conn_buf->tstorm_conn_buffer;
struct regpair context_addr;
u32 cid = BNX2X_SW_CID(kwqe1->cid);
struct in6_addr src_ip, dst_ip;
int i;
u32 *addrp;
addrp = (u32 *) &conn_addr->local_ip_addr;
for (i = 0; i < 4; i++, addrp++)
src_ip.in6_u.u6_addr32[i] = cpu_to_be32(*addrp);
addrp = (u32 *) &conn_addr->remote_ip_addr;
for (i = 0; i < 4; i++, addrp++)
dst_ip.in6_u.u6_addr32[i] = cpu_to_be32(*addrp);
cnic_get_bnx2x_ctx(dev, cid, 0, &context_addr);
xstorm_buf->context_addr.hi = context_addr.hi;
xstorm_buf->context_addr.lo = context_addr.lo;
xstorm_buf->mss = 0xffff;
xstorm_buf->rcv_buf = kwqe3->rcv_buf;
if (kwqe1->tcp_flags & L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE)
xstorm_buf->params |= L5CM_XSTORM_CONN_BUFFER_NAGLE_ENABLE;
xstorm_buf->pseudo_header_checksum =
swab16(~csum_ipv6_magic(&src_ip, &dst_ip, 0, IPPROTO_TCP, 0));
if (!(kwqe1->tcp_flags & L4_KWQ_CONNECT_REQ1_NO_DELAY_ACK))
tstorm_buf->params |=
L5CM_TSTORM_CONN_BUFFER_DELAYED_ACK_ENABLE;
if (kwqe3->ka_timeout) {
tstorm_buf->ka_enable = 1;
tstorm_buf->ka_timeout = kwqe3->ka_timeout;
tstorm_buf->ka_interval = kwqe3->ka_interval;
tstorm_buf->ka_max_probe_count = kwqe3->ka_max_probe_count;
}
tstorm_buf->rcv_buf = kwqe3->rcv_buf;
tstorm_buf->snd_buf = kwqe3->snd_buf;
tstorm_buf->max_rt_time = 0xffffffff;
}
static void cnic_init_bnx2x_mac(struct cnic_dev *dev)
{
struct cnic_local *cp = dev->cnic_priv;
u32 pfid = cp->pfid;
u8 *mac = dev->mac_addr;
CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
XSTORM_ISCSI_LOCAL_MAC_ADDR0_OFFSET(pfid), mac[0]);
CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
XSTORM_ISCSI_LOCAL_MAC_ADDR1_OFFSET(pfid), mac[1]);
CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
XSTORM_ISCSI_LOCAL_MAC_ADDR2_OFFSET(pfid), mac[2]);
CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
XSTORM_ISCSI_LOCAL_MAC_ADDR3_OFFSET(pfid), mac[3]);
CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
XSTORM_ISCSI_LOCAL_MAC_ADDR4_OFFSET(pfid), mac[4]);
CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
XSTORM_ISCSI_LOCAL_MAC_ADDR5_OFFSET(pfid), mac[5]);
CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
TSTORM_ISCSI_TCP_VARS_LSB_LOCAL_MAC_ADDR_OFFSET(pfid), mac[5]);
CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
TSTORM_ISCSI_TCP_VARS_LSB_LOCAL_MAC_ADDR_OFFSET(pfid) + 1,
mac[4]);
CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfid), mac[3]);
CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfid) + 1,
mac[2]);
CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfid) + 2,
mac[1]);
CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfid) + 3,
mac[0]);
}
static void cnic_bnx2x_set_tcp_timestamp(struct cnic_dev *dev, int tcp_ts)
{
struct cnic_local *cp = dev->cnic_priv;
u8 xstorm_flags = XSTORM_L5CM_TCP_FLAGS_WND_SCL_EN;
u16 tstorm_flags = 0;
if (tcp_ts) {
xstorm_flags |= XSTORM_L5CM_TCP_FLAGS_TS_ENABLED;
tstorm_flags |= TSTORM_L5CM_TCP_FLAGS_TS_ENABLED;
}
CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
XSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(cp->pfid), xstorm_flags);
CNIC_WR16(dev, BAR_TSTRORM_INTMEM +
TSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(cp->pfid), tstorm_flags);
}
static int cnic_bnx2x_connect(struct cnic_dev *dev, struct kwqe *wqes[],
u32 num, int *work)
{
struct cnic_local *cp = dev->cnic_priv;
struct l4_kwq_connect_req1 *kwqe1 =
(struct l4_kwq_connect_req1 *) wqes[0];
struct l4_kwq_connect_req3 *kwqe3;
struct l5cm_active_conn_buffer *conn_buf;
struct l5cm_conn_addr_params *conn_addr;
union l5cm_specific_data l5_data;
u32 l5_cid = kwqe1->pg_cid;
struct cnic_sock *csk = &cp->csk_tbl[l5_cid];
struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
int ret;
if (num < 2) {
*work = num;
return -EINVAL;
}
if (kwqe1->conn_flags & L4_KWQ_CONNECT_REQ1_IP_V6)
*work = 3;
else
*work = 2;
if (num < *work) {
*work = num;
return -EINVAL;
}
if (sizeof(*conn_buf) > CNIC_KWQ16_DATA_SIZE) {
netdev_err(dev->netdev, "conn_buf size too big\n");
return -ENOMEM;
}
conn_buf = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data);
if (!conn_buf)
return -ENOMEM;
memset(conn_buf, 0, sizeof(*conn_buf));
conn_addr = &conn_buf->conn_addr_buf;
conn_addr->remote_addr_0 = csk->ha[0];
conn_addr->remote_addr_1 = csk->ha[1];
conn_addr->remote_addr_2 = csk->ha[2];
conn_addr->remote_addr_3 = csk->ha[3];
conn_addr->remote_addr_4 = csk->ha[4];
conn_addr->remote_addr_5 = csk->ha[5];
if (kwqe1->conn_flags & L4_KWQ_CONNECT_REQ1_IP_V6) {
struct l4_kwq_connect_req2 *kwqe2 =
(struct l4_kwq_connect_req2 *) wqes[1];
conn_addr->local_ip_addr.ip_addr_hi_hi = kwqe2->src_ip_v6_4;
conn_addr->local_ip_addr.ip_addr_hi_lo = kwqe2->src_ip_v6_3;
conn_addr->local_ip_addr.ip_addr_lo_hi = kwqe2->src_ip_v6_2;
conn_addr->remote_ip_addr.ip_addr_hi_hi = kwqe2->dst_ip_v6_4;
conn_addr->remote_ip_addr.ip_addr_hi_lo = kwqe2->dst_ip_v6_3;
conn_addr->remote_ip_addr.ip_addr_lo_hi = kwqe2->dst_ip_v6_2;
conn_addr->params |= L5CM_CONN_ADDR_PARAMS_IP_VERSION;
}
kwqe3 = (struct l4_kwq_connect_req3 *) wqes[*work - 1];
conn_addr->local_ip_addr.ip_addr_lo_lo = kwqe1->src_ip;
conn_addr->remote_ip_addr.ip_addr_lo_lo = kwqe1->dst_ip;
conn_addr->local_tcp_port = kwqe1->src_port;
conn_addr->remote_tcp_port = kwqe1->dst_port;
conn_addr->pmtu = kwqe3->pmtu;
cnic_init_storm_conn_bufs(dev, kwqe1, kwqe3, conn_buf);
CNIC_WR16(dev, BAR_XSTRORM_INTMEM +
XSTORM_ISCSI_LOCAL_VLAN_OFFSET(cp->pfid), csk->vlan_id);
cnic_bnx2x_set_tcp_timestamp(dev,
kwqe1->tcp_flags & L4_KWQ_CONNECT_REQ1_TIME_STAMP);
ret = cnic_submit_kwqe_16(dev, L5CM_RAMROD_CMD_ID_TCP_CONNECT,
kwqe1->cid, ISCSI_CONNECTION_TYPE, &l5_data);
if (!ret)
set_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags);
return ret;
}
static int cnic_bnx2x_close(struct cnic_dev *dev, struct kwqe *kwqe)
{
struct l4_kwq_close_req *req = (struct l4_kwq_close_req *) kwqe;
union l5cm_specific_data l5_data;
int ret;
memset(&l5_data, 0, sizeof(l5_data));
ret = cnic_submit_kwqe_16(dev, L5CM_RAMROD_CMD_ID_CLOSE,
req->cid, ISCSI_CONNECTION_TYPE, &l5_data);
return ret;
}
static int cnic_bnx2x_reset(struct cnic_dev *dev, struct kwqe *kwqe)
{
struct l4_kwq_reset_req *req = (struct l4_kwq_reset_req *) kwqe;
union l5cm_specific_data l5_data;
int ret;
memset(&l5_data, 0, sizeof(l5_data));
ret = cnic_submit_kwqe_16(dev, L5CM_RAMROD_CMD_ID_ABORT,
req->cid, ISCSI_CONNECTION_TYPE, &l5_data);
return ret;
}
static int cnic_bnx2x_offload_pg(struct cnic_dev *dev, struct kwqe *kwqe)
{
struct l4_kwq_offload_pg *req = (struct l4_kwq_offload_pg *) kwqe;
struct l4_kcq kcqe;
struct kcqe *cqes[1];
memset(&kcqe, 0, sizeof(kcqe));
kcqe.pg_host_opaque = req->host_opaque;
kcqe.pg_cid = req->host_opaque;
kcqe.op_code = L4_KCQE_OPCODE_VALUE_OFFLOAD_PG;
cqes[0] = (struct kcqe *) &kcqe;
cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_L4, cqes, 1);
return 0;
}
static int cnic_bnx2x_update_pg(struct cnic_dev *dev, struct kwqe *kwqe)
{
struct l4_kwq_update_pg *req = (struct l4_kwq_update_pg *) kwqe;
struct l4_kcq kcqe;
struct kcqe *cqes[1];
memset(&kcqe, 0, sizeof(kcqe));
kcqe.pg_host_opaque = req->pg_host_opaque;
kcqe.pg_cid = req->pg_cid;
kcqe.op_code = L4_KCQE_OPCODE_VALUE_UPDATE_PG;
cqes[0] = (struct kcqe *) &kcqe;
cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_L4, cqes, 1);
return 0;
}
static int cnic_bnx2x_fcoe_stat(struct cnic_dev *dev, struct kwqe *kwqe)
{
struct fcoe_kwqe_stat *req;
struct fcoe_stat_ramrod_params *fcoe_stat;
union l5cm_specific_data l5_data;
struct cnic_local *cp = dev->cnic_priv;
int ret;
u32 cid;
req = (struct fcoe_kwqe_stat *) kwqe;
cid = BNX2X_HW_CID(cp, cp->fcoe_init_cid);
fcoe_stat = cnic_get_kwqe_16_data(cp, BNX2X_FCOE_L5_CID_BASE, &l5_data);
if (!fcoe_stat)
return -ENOMEM;
memset(fcoe_stat, 0, sizeof(*fcoe_stat));
memcpy(&fcoe_stat->stat_kwqe, req, sizeof(*req));
ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_STAT, cid,
FCOE_CONNECTION_TYPE, &l5_data);
return ret;
}
static int cnic_bnx2x_fcoe_init1(struct cnic_dev *dev, struct kwqe *wqes[],
u32 num, int *work)
{
int ret;
struct cnic_local *cp = dev->cnic_priv;
u32 cid;
struct fcoe_init_ramrod_params *fcoe_init;
struct fcoe_kwqe_init1 *req1;
struct fcoe_kwqe_init2 *req2;
struct fcoe_kwqe_init3 *req3;
union l5cm_specific_data l5_data;
if (num < 3) {
*work = num;
return -EINVAL;
}
req1 = (struct fcoe_kwqe_init1 *) wqes[0];
req2 = (struct fcoe_kwqe_init2 *) wqes[1];
req3 = (struct fcoe_kwqe_init3 *) wqes[2];
if (req2->hdr.op_code != FCOE_KWQE_OPCODE_INIT2) {
*work = 1;
return -EINVAL;
}
if (req3->hdr.op_code != FCOE_KWQE_OPCODE_INIT3) {
*work = 2;
return -EINVAL;
}
if (sizeof(*fcoe_init) > CNIC_KWQ16_DATA_SIZE) {
netdev_err(dev->netdev, "fcoe_init size too big\n");
return -ENOMEM;
}
fcoe_init = cnic_get_kwqe_16_data(cp, BNX2X_FCOE_L5_CID_BASE, &l5_data);
if (!fcoe_init)
return -ENOMEM;
memset(fcoe_init, 0, sizeof(*fcoe_init));
memcpy(&fcoe_init->init_kwqe1, req1, sizeof(*req1));
memcpy(&fcoe_init->init_kwqe2, req2, sizeof(*req2));
memcpy(&fcoe_init->init_kwqe3, req3, sizeof(*req3));
fcoe_init->eq_addr.lo = cp->kcq2.dma.pg_map_arr[0] & 0xffffffff;
fcoe_init->eq_addr.hi = (u64) cp->kcq2.dma.pg_map_arr[0] >> 32;
fcoe_init->eq_next_page_addr.lo =
cp->kcq2.dma.pg_map_arr[1] & 0xffffffff;
fcoe_init->eq_next_page_addr.hi =
(u64) cp->kcq2.dma.pg_map_arr[1] >> 32;
fcoe_init->sb_num = cp->status_blk_num;
fcoe_init->eq_prod = MAX_KCQ_IDX;
fcoe_init->sb_id = HC_INDEX_FCOE_EQ_CONS;
cp->kcq2.sw_prod_idx = 0;
cid = BNX2X_HW_CID(cp, cp->fcoe_init_cid);
ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_INIT, cid,
FCOE_CONNECTION_TYPE, &l5_data);
*work = 3;
return ret;
}
static int cnic_bnx2x_fcoe_ofld1(struct cnic_dev *dev, struct kwqe *wqes[],
u32 num, int *work)
{
int ret = 0;
u32 cid = -1, l5_cid;
struct cnic_local *cp = dev->cnic_priv;
struct fcoe_kwqe_conn_offload1 *req1;
struct fcoe_kwqe_conn_offload2 *req2;
struct fcoe_kwqe_conn_offload3 *req3;
struct fcoe_kwqe_conn_offload4 *req4;
struct fcoe_conn_offload_ramrod_params *fcoe_offload;
struct cnic_context *ctx;
struct fcoe_context *fctx;
struct regpair ctx_addr;
union l5cm_specific_data l5_data;
struct fcoe_kcqe kcqe;
struct kcqe *cqes[1];
if (num < 4) {
*work = num;
return -EINVAL;
}
req1 = (struct fcoe_kwqe_conn_offload1 *) wqes[0];
req2 = (struct fcoe_kwqe_conn_offload2 *) wqes[1];
req3 = (struct fcoe_kwqe_conn_offload3 *) wqes[2];
req4 = (struct fcoe_kwqe_conn_offload4 *) wqes[3];
*work = 4;
l5_cid = req1->fcoe_conn_id;
if (l5_cid >= BNX2X_FCOE_NUM_CONNECTIONS)
goto err_reply;
l5_cid += BNX2X_FCOE_L5_CID_BASE;
ctx = &cp->ctx_tbl[l5_cid];
if (test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags))
goto err_reply;
ret = cnic_alloc_bnx2x_conn_resc(dev, l5_cid);
if (ret) {
ret = 0;
goto err_reply;
}
cid = ctx->cid;
fctx = cnic_get_bnx2x_ctx(dev, cid, 1, &ctx_addr);
if (fctx) {
u32 hw_cid = BNX2X_HW_CID(cp, cid);
u32 val;
val = CDU_RSRVD_VALUE_TYPE_A(hw_cid, CDU_REGION_NUMBER_XCM_AG,
FCOE_CONNECTION_TYPE);
fctx->xstorm_ag_context.cdu_reserved = val;
val = CDU_RSRVD_VALUE_TYPE_A(hw_cid, CDU_REGION_NUMBER_UCM_AG,
FCOE_CONNECTION_TYPE);
fctx->ustorm_ag_context.cdu_usage = val;
}
if (sizeof(*fcoe_offload) > CNIC_KWQ16_DATA_SIZE) {
netdev_err(dev->netdev, "fcoe_offload size too big\n");
goto err_reply;
}
fcoe_offload = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data);
if (!fcoe_offload)
goto err_reply;
memset(fcoe_offload, 0, sizeof(*fcoe_offload));
memcpy(&fcoe_offload->offload_kwqe1, req1, sizeof(*req1));
memcpy(&fcoe_offload->offload_kwqe2, req2, sizeof(*req2));
memcpy(&fcoe_offload->offload_kwqe3, req3, sizeof(*req3));
memcpy(&fcoe_offload->offload_kwqe4, req4, sizeof(*req4));
cid = BNX2X_HW_CID(cp, cid);
ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_OFFLOAD_CONN, cid,
FCOE_CONNECTION_TYPE, &l5_data);
if (!ret)
set_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags);
return ret;
err_reply:
if (cid != -1)
cnic_free_bnx2x_conn_resc(dev, l5_cid);
memset(&kcqe, 0, sizeof(kcqe));
kcqe.op_code = FCOE_KCQE_OPCODE_OFFLOAD_CONN;
kcqe.fcoe_conn_id = req1->fcoe_conn_id;
kcqe.completion_status = FCOE_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE;
cqes[0] = (struct kcqe *) &kcqe;
cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_FCOE, cqes, 1);
return ret;
}
static int cnic_bnx2x_fcoe_enable(struct cnic_dev *dev, struct kwqe *kwqe)
{
struct fcoe_kwqe_conn_enable_disable *req;
struct fcoe_conn_enable_disable_ramrod_params *fcoe_enable;
union l5cm_specific_data l5_data;
int ret;
u32 cid, l5_cid;
struct cnic_local *cp = dev->cnic_priv;
req = (struct fcoe_kwqe_conn_enable_disable *) kwqe;
cid = req->context_id;
l5_cid = req->conn_id + BNX2X_FCOE_L5_CID_BASE;
if (sizeof(*fcoe_enable) > CNIC_KWQ16_DATA_SIZE) {
netdev_err(dev->netdev, "fcoe_enable size too big\n");
return -ENOMEM;
}
fcoe_enable = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data);
if (!fcoe_enable)
return -ENOMEM;
memset(fcoe_enable, 0, sizeof(*fcoe_enable));
memcpy(&fcoe_enable->enable_disable_kwqe, req, sizeof(*req));
ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_ENABLE_CONN, cid,
FCOE_CONNECTION_TYPE, &l5_data);
return ret;
}
static int cnic_bnx2x_fcoe_disable(struct cnic_dev *dev, struct kwqe *kwqe)
{
struct fcoe_kwqe_conn_enable_disable *req;
struct fcoe_conn_enable_disable_ramrod_params *fcoe_disable;
union l5cm_specific_data l5_data;
int ret;
u32 cid, l5_cid;
struct cnic_local *cp = dev->cnic_priv;
req = (struct fcoe_kwqe_conn_enable_disable *) kwqe;
cid = req->context_id;
l5_cid = req->conn_id;
if (l5_cid >= BNX2X_FCOE_NUM_CONNECTIONS)
return -EINVAL;
l5_cid += BNX2X_FCOE_L5_CID_BASE;
if (sizeof(*fcoe_disable) > CNIC_KWQ16_DATA_SIZE) {
netdev_err(dev->netdev, "fcoe_disable size too big\n");
return -ENOMEM;
}
fcoe_disable = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data);
if (!fcoe_disable)
return -ENOMEM;
memset(fcoe_disable, 0, sizeof(*fcoe_disable));
memcpy(&fcoe_disable->enable_disable_kwqe, req, sizeof(*req));
ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_DISABLE_CONN, cid,
FCOE_CONNECTION_TYPE, &l5_data);
return ret;
}
static int cnic_bnx2x_fcoe_destroy(struct cnic_dev *dev, struct kwqe *kwqe)
{
struct fcoe_kwqe_conn_destroy *req;
union l5cm_specific_data l5_data;
int ret;
u32 cid, l5_cid;
struct cnic_local *cp = dev->cnic_priv;
struct cnic_context *ctx;
struct fcoe_kcqe kcqe;
struct kcqe *cqes[1];
req = (struct fcoe_kwqe_conn_destroy *) kwqe;
cid = req->context_id;
l5_cid = req->conn_id;
if (l5_cid >= BNX2X_FCOE_NUM_CONNECTIONS)
return -EINVAL;
l5_cid += BNX2X_FCOE_L5_CID_BASE;
ctx = &cp->ctx_tbl[l5_cid];
init_waitqueue_head(&ctx->waitq);
ctx->wait_cond = 0;
memset(&l5_data, 0, sizeof(l5_data));
ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_TERMINATE_CONN, cid,
FCOE_CONNECTION_TYPE, &l5_data);
if (ret == 0) {
wait_event(ctx->waitq, ctx->wait_cond);
set_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags);
queue_delayed_work(cnic_wq, &cp->delete_task,
msecs_to_jiffies(2000));
}
memset(&kcqe, 0, sizeof(kcqe));
kcqe.op_code = FCOE_KCQE_OPCODE_DESTROY_CONN;
kcqe.fcoe_conn_id = req->conn_id;
kcqe.fcoe_conn_context_id = cid;
cqes[0] = (struct kcqe *) &kcqe;
cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_FCOE, cqes, 1);
return ret;
}
static int cnic_bnx2x_fcoe_fw_destroy(struct cnic_dev *dev, struct kwqe *kwqe)
{
struct fcoe_kwqe_destroy *req;
union l5cm_specific_data l5_data;
struct cnic_local *cp = dev->cnic_priv;
int ret;
u32 cid;
req = (struct fcoe_kwqe_destroy *) kwqe;
cid = BNX2X_HW_CID(cp, cp->fcoe_init_cid);
memset(&l5_data, 0, sizeof(l5_data));
ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_DESTROY, cid,
FCOE_CONNECTION_TYPE, &l5_data);
return ret;
}
static int cnic_submit_bnx2x_iscsi_kwqes(struct cnic_dev *dev,
struct kwqe *wqes[], u32 num_wqes)
{
int i, work, ret;
u32 opcode;
struct kwqe *kwqe;
if (!test_bit(CNIC_F_CNIC_UP, &dev->flags))
return -EAGAIN; /* bnx2 is down */
for (i = 0; i < num_wqes; ) {
kwqe = wqes[i];
opcode = KWQE_OPCODE(kwqe->kwqe_op_flag);
work = 1;
switch (opcode) {
case ISCSI_KWQE_OPCODE_INIT1:
ret = cnic_bnx2x_iscsi_init1(dev, kwqe);
break;
case ISCSI_KWQE_OPCODE_INIT2:
ret = cnic_bnx2x_iscsi_init2(dev, kwqe);
break;
case ISCSI_KWQE_OPCODE_OFFLOAD_CONN1:
ret = cnic_bnx2x_iscsi_ofld1(dev, &wqes[i],
num_wqes - i, &work);
break;
case ISCSI_KWQE_OPCODE_UPDATE_CONN:
ret = cnic_bnx2x_iscsi_update(dev, kwqe);
break;
case ISCSI_KWQE_OPCODE_DESTROY_CONN:
ret = cnic_bnx2x_iscsi_destroy(dev, kwqe);
break;
case L4_KWQE_OPCODE_VALUE_CONNECT1:
ret = cnic_bnx2x_connect(dev, &wqes[i], num_wqes - i,
&work);
break;
case L4_KWQE_OPCODE_VALUE_CLOSE:
ret = cnic_bnx2x_close(dev, kwqe);
break;
case L4_KWQE_OPCODE_VALUE_RESET:
ret = cnic_bnx2x_reset(dev, kwqe);
break;
case L4_KWQE_OPCODE_VALUE_OFFLOAD_PG:
ret = cnic_bnx2x_offload_pg(dev, kwqe);
break;
case L4_KWQE_OPCODE_VALUE_UPDATE_PG:
ret = cnic_bnx2x_update_pg(dev, kwqe);
break;
case L4_KWQE_OPCODE_VALUE_UPLOAD_PG:
ret = 0;
break;
default:
ret = 0;
netdev_err(dev->netdev, "Unknown type of KWQE(0x%x)\n",
opcode);
break;
}
if (ret < 0)
netdev_err(dev->netdev, "KWQE(0x%x) failed\n",
opcode);
i += work;
}
return 0;
}
static int cnic_submit_bnx2x_fcoe_kwqes(struct cnic_dev *dev,
struct kwqe *wqes[], u32 num_wqes)
{
struct cnic_local *cp = dev->cnic_priv;
int i, work, ret;
u32 opcode;
struct kwqe *kwqe;
if (!test_bit(CNIC_F_CNIC_UP, &dev->flags))
return -EAGAIN; /* bnx2 is down */
if (BNX2X_CHIP_NUM(cp->chip_id) == BNX2X_CHIP_NUM_57710)
return -EINVAL;
for (i = 0; i < num_wqes; ) {
kwqe = wqes[i];
opcode = KWQE_OPCODE(kwqe->kwqe_op_flag);
work = 1;
switch (opcode) {
case FCOE_KWQE_OPCODE_INIT1:
ret = cnic_bnx2x_fcoe_init1(dev, &wqes[i],
num_wqes - i, &work);
break;
case FCOE_KWQE_OPCODE_OFFLOAD_CONN1:
ret = cnic_bnx2x_fcoe_ofld1(dev, &wqes[i],
num_wqes - i, &work);
break;
case FCOE_KWQE_OPCODE_ENABLE_CONN:
ret = cnic_bnx2x_fcoe_enable(dev, kwqe);
break;
case FCOE_KWQE_OPCODE_DISABLE_CONN:
ret = cnic_bnx2x_fcoe_disable(dev, kwqe);
break;
case FCOE_KWQE_OPCODE_DESTROY_CONN:
ret = cnic_bnx2x_fcoe_destroy(dev, kwqe);
break;
case FCOE_KWQE_OPCODE_DESTROY:
ret = cnic_bnx2x_fcoe_fw_destroy(dev, kwqe);
break;
case FCOE_KWQE_OPCODE_STAT:
ret = cnic_bnx2x_fcoe_stat(dev, kwqe);
break;
default:
ret = 0;
netdev_err(dev->netdev, "Unknown type of KWQE(0x%x)\n",
opcode);
break;
}
if (ret < 0)
netdev_err(dev->netdev, "KWQE(0x%x) failed\n",
opcode);
i += work;
}
return 0;
}
static int cnic_submit_bnx2x_kwqes(struct cnic_dev *dev, struct kwqe *wqes[],
u32 num_wqes)
{
int ret = -EINVAL;
u32 layer_code;
if (!test_bit(CNIC_F_CNIC_UP, &dev->flags))
return -EAGAIN; /* bnx2x is down */
if (!num_wqes)
return 0;
layer_code = wqes[0]->kwqe_op_flag & KWQE_LAYER_MASK;
switch (layer_code) {
case KWQE_FLAGS_LAYER_MASK_L5_ISCSI:
case KWQE_FLAGS_LAYER_MASK_L4:
case KWQE_FLAGS_LAYER_MASK_L2:
ret = cnic_submit_bnx2x_iscsi_kwqes(dev, wqes, num_wqes);
break;
case KWQE_FLAGS_LAYER_MASK_L5_FCOE:
ret = cnic_submit_bnx2x_fcoe_kwqes(dev, wqes, num_wqes);
break;
}
return ret;
}
static inline u32 cnic_get_kcqe_layer_mask(u32 opflag)
{
if (unlikely(KCQE_OPCODE(opflag) == FCOE_RAMROD_CMD_ID_TERMINATE_CONN))
return KCQE_FLAGS_LAYER_MASK_L4;
return opflag & KCQE_FLAGS_LAYER_MASK;
}
static void service_kcqes(struct cnic_dev *dev, int num_cqes)
{
struct cnic_local *cp = dev->cnic_priv;
int i, j, comp = 0;
i = 0;
j = 1;
while (num_cqes) {
struct cnic_ulp_ops *ulp_ops;
int ulp_type;
u32 kcqe_op_flag = cp->completed_kcq[i]->kcqe_op_flag;
u32 kcqe_layer = cnic_get_kcqe_layer_mask(kcqe_op_flag);
if (unlikely(kcqe_op_flag & KCQE_RAMROD_COMPLETION))
comp++;
while (j < num_cqes) {
u32 next_op = cp->completed_kcq[i + j]->kcqe_op_flag;
if (cnic_get_kcqe_layer_mask(next_op) != kcqe_layer)
break;
if (unlikely(next_op & KCQE_RAMROD_COMPLETION))
comp++;
j++;
}
if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L5_RDMA)
ulp_type = CNIC_ULP_RDMA;
else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L5_ISCSI)
ulp_type = CNIC_ULP_ISCSI;
else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L5_FCOE)
ulp_type = CNIC_ULP_FCOE;
else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L4)
ulp_type = CNIC_ULP_L4;
else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L2)
goto end;
else {
netdev_err(dev->netdev, "Unknown type of KCQE(0x%x)\n",
kcqe_op_flag);
goto end;
}
rcu_read_lock();
ulp_ops = rcu_dereference(cp->ulp_ops[ulp_type]);
if (likely(ulp_ops)) {
ulp_ops->indicate_kcqes(cp->ulp_handle[ulp_type],
cp->completed_kcq + i, j);
}
rcu_read_unlock();
end:
num_cqes -= j;
i += j;
j = 1;
}
if (unlikely(comp))
cnic_spq_completion(dev, DRV_CTL_RET_L5_SPQ_CREDIT_CMD, comp);
}
static u16 cnic_bnx2_next_idx(u16 idx)
{
return idx + 1;
}
static u16 cnic_bnx2_hw_idx(u16 idx)
{
return idx;
}
static u16 cnic_bnx2x_next_idx(u16 idx)
{
idx++;
if ((idx & MAX_KCQE_CNT) == MAX_KCQE_CNT)
idx++;
return idx;
}
static u16 cnic_bnx2x_hw_idx(u16 idx)
{
if ((idx & MAX_KCQE_CNT) == MAX_KCQE_CNT)
idx++;
return idx;
}
static int cnic_get_kcqes(struct cnic_dev *dev, struct kcq_info *info)
{
struct cnic_local *cp = dev->cnic_priv;
u16 i, ri, hw_prod, last;
struct kcqe *kcqe;
int kcqe_cnt = 0, last_cnt = 0;
i = ri = last = info->sw_prod_idx;
ri &= MAX_KCQ_IDX;
hw_prod = *info->hw_prod_idx_ptr;
hw_prod = cp->hw_idx(hw_prod);
while ((i != hw_prod) && (kcqe_cnt < MAX_COMPLETED_KCQE)) {
kcqe = &info->kcq[KCQ_PG(ri)][KCQ_IDX(ri)];
cp->completed_kcq[kcqe_cnt++] = kcqe;
i = cp->next_idx(i);
ri = i & MAX_KCQ_IDX;
if (likely(!(kcqe->kcqe_op_flag & KCQE_FLAGS_NEXT))) {
last_cnt = kcqe_cnt;
last = i;
}
}
info->sw_prod_idx = last;
return last_cnt;
}
static int cnic_l2_completion(struct cnic_local *cp)
{
u16 hw_cons, sw_cons;
struct cnic_uio_dev *udev = cp->udev;
union eth_rx_cqe *cqe, *cqe_ring = (union eth_rx_cqe *)
(udev->l2_ring + (2 * BCM_PAGE_SIZE));
u32 cmd;
int comp = 0;
if (!test_bit(CNIC_F_BNX2X_CLASS, &cp->dev->flags))
return 0;
hw_cons = *cp->rx_cons_ptr;
if ((hw_cons & BNX2X_MAX_RCQ_DESC_CNT) == BNX2X_MAX_RCQ_DESC_CNT)
hw_cons++;
sw_cons = cp->rx_cons;
while (sw_cons != hw_cons) {
u8 cqe_fp_flags;
cqe = &cqe_ring[sw_cons & BNX2X_MAX_RCQ_DESC_CNT];
cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
if (cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE) {
cmd = le32_to_cpu(cqe->ramrod_cqe.conn_and_cmd_data);
cmd >>= COMMON_RAMROD_ETH_RX_CQE_CMD_ID_SHIFT;
if (cmd == RAMROD_CMD_ID_ETH_CLIENT_SETUP ||
cmd == RAMROD_CMD_ID_ETH_HALT)
comp++;
}
sw_cons = BNX2X_NEXT_RCQE(sw_cons);
}
return comp;
}
static void cnic_chk_pkt_rings(struct cnic_local *cp)
{
u16 rx_cons, tx_cons;
int comp = 0;
if (!test_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags))
return;
rx_cons = *cp->rx_cons_ptr;
tx_cons = *cp->tx_cons_ptr;
if (cp->tx_cons != tx_cons || cp->rx_cons != rx_cons) {
if (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags))
comp = cnic_l2_completion(cp);
cp->tx_cons = tx_cons;
cp->rx_cons = rx_cons;
if (cp->udev)
uio_event_notify(&cp->udev->cnic_uinfo);
}
if (comp)
clear_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags);
}
static u32 cnic_service_bnx2_queues(struct cnic_dev *dev)
{
struct cnic_local *cp = dev->cnic_priv;
u32 status_idx = (u16) *cp->kcq1.status_idx_ptr;
int kcqe_cnt;
/* status block index must be read before reading other fields */
rmb();
cp->kwq_con_idx = *cp->kwq_con_idx_ptr;
while ((kcqe_cnt = cnic_get_kcqes(dev, &cp->kcq1))) {
service_kcqes(dev, kcqe_cnt);
/* Tell compiler that status_blk fields can change. */
barrier();
if (status_idx != *cp->kcq1.status_idx_ptr) {
status_idx = (u16) *cp->kcq1.status_idx_ptr;
/* status block index must be read first */
rmb();
cp->kwq_con_idx = *cp->kwq_con_idx_ptr;
} else
break;
}
CNIC_WR16(dev, cp->kcq1.io_addr, cp->kcq1.sw_prod_idx);
cnic_chk_pkt_rings(cp);
return status_idx;
}
static int cnic_service_bnx2(void *data, void *status_blk)
{
struct cnic_dev *dev = data;
if (unlikely(!test_bit(CNIC_F_CNIC_UP, &dev->flags))) {
struct status_block *sblk = status_blk;
return sblk->status_idx;
}
return cnic_service_bnx2_queues(dev);
}
static void cnic_service_bnx2_msix(unsigned long data)
{
struct cnic_dev *dev = (struct cnic_dev *) data;
struct cnic_local *cp = dev->cnic_priv;
cp->last_status_idx = cnic_service_bnx2_queues(dev);
CNIC_WR(dev, BNX2_PCICFG_INT_ACK_CMD, cp->int_num |
BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | cp->last_status_idx);
}
static void cnic_doirq(struct cnic_dev *dev)
{
struct cnic_local *cp = dev->cnic_priv;
if (likely(test_bit(CNIC_F_CNIC_UP, &dev->flags))) {
u16 prod = cp->kcq1.sw_prod_idx & MAX_KCQ_IDX;
prefetch(cp->status_blk.gen);
prefetch(&cp->kcq1.kcq[KCQ_PG(prod)][KCQ_IDX(prod)]);
tasklet_schedule(&cp->cnic_irq_task);
}
}
static irqreturn_t cnic_irq(int irq, void *dev_instance)
{
struct cnic_dev *dev = dev_instance;
struct cnic_local *cp = dev->cnic_priv;
if (cp->ack_int)
cp->ack_int(dev);
cnic_doirq(dev);
return IRQ_HANDLED;
}
static inline void cnic_ack_bnx2x_int(struct cnic_dev *dev, u8 id, u8 storm,
u16 index, u8 op, u8 update)
{
struct cnic_local *cp = dev->cnic_priv;
u32 hc_addr = (HC_REG_COMMAND_REG + CNIC_PORT(cp) * 32 +
COMMAND_REG_INT_ACK);
struct igu_ack_register igu_ack;
igu_ack.status_block_index = index;
igu_ack.sb_id_and_flags =
((id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
(storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
(update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
(op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
CNIC_WR(dev, hc_addr, (*(u32 *)&igu_ack));
}
static void cnic_ack_igu_sb(struct cnic_dev *dev, u8 igu_sb_id, u8 segment,
u16 index, u8 op, u8 update)
{
struct igu_regular cmd_data;
u32 igu_addr = BAR_IGU_INTMEM + (IGU_CMD_INT_ACK_BASE + igu_sb_id) * 8;
cmd_data.sb_id_and_flags =
(index << IGU_REGULAR_SB_INDEX_SHIFT) |
(segment << IGU_REGULAR_SEGMENT_ACCESS_SHIFT) |
(update << IGU_REGULAR_BUPDATE_SHIFT) |
(op << IGU_REGULAR_ENABLE_INT_SHIFT);
CNIC_WR(dev, igu_addr, cmd_data.sb_id_and_flags);
}
static void cnic_ack_bnx2x_msix(struct cnic_dev *dev)
{
struct cnic_local *cp = dev->cnic_priv;
cnic_ack_bnx2x_int(dev, cp->bnx2x_igu_sb_id, CSTORM_ID, 0,
IGU_INT_DISABLE, 0);
}
static void cnic_ack_bnx2x_e2_msix(struct cnic_dev *dev)
{
struct cnic_local *cp = dev->cnic_priv;
cnic_ack_igu_sb(dev, cp->bnx2x_igu_sb_id, IGU_SEG_ACCESS_DEF, 0,
IGU_INT_DISABLE, 0);
}
static u32 cnic_service_bnx2x_kcq(struct cnic_dev *dev, struct kcq_info *info)
{
u32 last_status = *info->status_idx_ptr;
int kcqe_cnt;
/* status block index must be read before reading the KCQ */
rmb();
while ((kcqe_cnt = cnic_get_kcqes(dev, info))) {
service_kcqes(dev, kcqe_cnt);
/* Tell compiler that sblk fields can change. */
barrier();
if (last_status == *info->status_idx_ptr)
break;
last_status = *info->status_idx_ptr;
/* status block index must be read before reading the KCQ */
rmb();
}
return last_status;
}
static void cnic_service_bnx2x_bh(unsigned long data)
{
struct cnic_dev *dev = (struct cnic_dev *) data;
struct cnic_local *cp = dev->cnic_priv;
u32 status_idx, new_status_idx;
if (unlikely(!test_bit(CNIC_F_CNIC_UP, &dev->flags)))
return;
while (1) {
status_idx = cnic_service_bnx2x_kcq(dev, &cp->kcq1);
CNIC_WR16(dev, cp->kcq1.io_addr,
cp->kcq1.sw_prod_idx + MAX_KCQ_IDX);
if (!BNX2X_CHIP_IS_E2(cp->chip_id)) {
cnic_ack_bnx2x_int(dev, cp->bnx2x_igu_sb_id, USTORM_ID,
status_idx, IGU_INT_ENABLE, 1);
break;
}
new_status_idx = cnic_service_bnx2x_kcq(dev, &cp->kcq2);
if (new_status_idx != status_idx)
continue;
CNIC_WR16(dev, cp->kcq2.io_addr, cp->kcq2.sw_prod_idx +
MAX_KCQ_IDX);
cnic_ack_igu_sb(dev, cp->bnx2x_igu_sb_id, IGU_SEG_ACCESS_DEF,
status_idx, IGU_INT_ENABLE, 1);
break;
}
}
static int cnic_service_bnx2x(void *data, void *status_blk)
{
struct cnic_dev *dev = data;
struct cnic_local *cp = dev->cnic_priv;
if (!(cp->ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX))
cnic_doirq(dev);
cnic_chk_pkt_rings(cp);
return 0;
}
static void cnic_ulp_stop(struct cnic_dev *dev)
{
struct cnic_local *cp = dev->cnic_priv;
int if_type;
cnic_send_nlmsg(cp, ISCSI_KEVENT_IF_DOWN, NULL);
for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++) {
struct cnic_ulp_ops *ulp_ops;
mutex_lock(&cnic_lock);
ulp_ops = rcu_dereference_protected(cp->ulp_ops[if_type],
lockdep_is_held(&cnic_lock));
if (!ulp_ops) {
mutex_unlock(&cnic_lock);
continue;
}
set_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]);
mutex_unlock(&cnic_lock);
if (test_and_clear_bit(ULP_F_START, &cp->ulp_flags[if_type]))
ulp_ops->cnic_stop(cp->ulp_handle[if_type]);
clear_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]);
}
}
static void cnic_ulp_start(struct cnic_dev *dev)
{
struct cnic_local *cp = dev->cnic_priv;
int if_type;
for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++) {
struct cnic_ulp_ops *ulp_ops;
mutex_lock(&cnic_lock);
ulp_ops = rcu_dereference_protected(cp->ulp_ops[if_type],
lockdep_is_held(&cnic_lock));
if (!ulp_ops || !ulp_ops->cnic_start) {
mutex_unlock(&cnic_lock);
continue;
}
set_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]);
mutex_unlock(&cnic_lock);
if (!test_and_set_bit(ULP_F_START, &cp->ulp_flags[if_type]))
ulp_ops->cnic_start(cp->ulp_handle[if_type]);
clear_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]);
}
}
static int cnic_ctl(void *data, struct cnic_ctl_info *info)
{
struct cnic_dev *dev = data;
switch (info->cmd) {
case CNIC_CTL_STOP_CMD:
cnic_hold(dev);
cnic_ulp_stop(dev);
cnic_stop_hw(dev);
cnic_put(dev);
break;
case CNIC_CTL_START_CMD:
cnic_hold(dev);
if (!cnic_start_hw(dev))
cnic_ulp_start(dev);
cnic_put(dev);
break;
case CNIC_CTL_COMPLETION_CMD: {
u32 cid = BNX2X_SW_CID(info->data.comp.cid);
u32 l5_cid;
struct cnic_local *cp = dev->cnic_priv;
if (cnic_get_l5_cid(cp, cid, &l5_cid) == 0) {
struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
ctx->wait_cond = 1;
wake_up(&ctx->waitq);
}
break;
}
default:
return -EINVAL;
}
return 0;
}
static void cnic_ulp_init(struct cnic_dev *dev)
{
int i;
struct cnic_local *cp = dev->cnic_priv;
for (i = 0; i < MAX_CNIC_ULP_TYPE_EXT; i++) {
struct cnic_ulp_ops *ulp_ops;
mutex_lock(&cnic_lock);
ulp_ops = cnic_ulp_tbl_prot(i);
if (!ulp_ops || !ulp_ops->cnic_init) {
mutex_unlock(&cnic_lock);
continue;
}
ulp_get(ulp_ops);
mutex_unlock(&cnic_lock);
if (!test_and_set_bit(ULP_F_INIT, &cp->ulp_flags[i]))
ulp_ops->cnic_init(dev);
ulp_put(ulp_ops);
}
}
static void cnic_ulp_exit(struct cnic_dev *dev)
{
int i;
struct cnic_local *cp = dev->cnic_priv;
for (i = 0; i < MAX_CNIC_ULP_TYPE_EXT; i++) {
struct cnic_ulp_ops *ulp_ops;
mutex_lock(&cnic_lock);
ulp_ops = cnic_ulp_tbl_prot(i);
if (!ulp_ops || !ulp_ops->cnic_exit) {
mutex_unlock(&cnic_lock);
continue;
}
ulp_get(ulp_ops);
mutex_unlock(&cnic_lock);
if (test_and_clear_bit(ULP_F_INIT, &cp->ulp_flags[i]))
ulp_ops->cnic_exit(dev);
ulp_put(ulp_ops);
}
}
static int cnic_cm_offload_pg(struct cnic_sock *csk)
{
struct cnic_dev *dev = csk->dev;
struct l4_kwq_offload_pg *l4kwqe;
struct kwqe *wqes[1];
l4kwqe = (struct l4_kwq_offload_pg *) &csk->kwqe1;
memset(l4kwqe, 0, sizeof(*l4kwqe));
wqes[0] = (struct kwqe *) l4kwqe;
l4kwqe->op_code = L4_KWQE_OPCODE_VALUE_OFFLOAD_PG;
l4kwqe->flags =
L4_LAYER_CODE << L4_KWQ_OFFLOAD_PG_LAYER_CODE_SHIFT;
l4kwqe->l2hdr_nbytes = ETH_HLEN;
l4kwqe->da0 = csk->ha[0];
l4kwqe->da1 = csk->ha[1];
l4kwqe->da2 = csk->ha[2];
l4kwqe->da3 = csk->ha[3];
l4kwqe->da4 = csk->ha[4];
l4kwqe->da5 = csk->ha[5];
l4kwqe->sa0 = dev->mac_addr[0];
l4kwqe->sa1 = dev->mac_addr[1];
l4kwqe->sa2 = dev->mac_addr[2];
l4kwqe->sa3 = dev->mac_addr[3];
l4kwqe->sa4 = dev->mac_addr[4];
l4kwqe->sa5 = dev->mac_addr[5];
l4kwqe->etype = ETH_P_IP;
l4kwqe->ipid_start = DEF_IPID_START;
l4kwqe->host_opaque = csk->l5_cid;
if (csk->vlan_id) {
l4kwqe->pg_flags |= L4_KWQ_OFFLOAD_PG_VLAN_TAGGING;
l4kwqe->vlan_tag = csk->vlan_id;
l4kwqe->l2hdr_nbytes += 4;
}
return dev->submit_kwqes(dev, wqes, 1);
}
static int cnic_cm_update_pg(struct cnic_sock *csk)
{
struct cnic_dev *dev = csk->dev;
struct l4_kwq_update_pg *l4kwqe;
struct kwqe *wqes[1];
l4kwqe = (struct l4_kwq_update_pg *) &csk->kwqe1;
memset(l4kwqe, 0, sizeof(*l4kwqe));
wqes[0] = (struct kwqe *) l4kwqe;
l4kwqe->opcode = L4_KWQE_OPCODE_VALUE_UPDATE_PG;
l4kwqe->flags =
L4_LAYER_CODE << L4_KWQ_UPDATE_PG_LAYER_CODE_SHIFT;
l4kwqe->pg_cid = csk->pg_cid;
l4kwqe->da0 = csk->ha[0];
l4kwqe->da1 = csk->ha[1];
l4kwqe->da2 = csk->ha[2];
l4kwqe->da3 = csk->ha[3];
l4kwqe->da4 = csk->ha[4];
l4kwqe->da5 = csk->ha[5];
l4kwqe->pg_host_opaque = csk->l5_cid;
l4kwqe->pg_valids = L4_KWQ_UPDATE_PG_VALIDS_DA;
return dev->submit_kwqes(dev, wqes, 1);
}
static int cnic_cm_upload_pg(struct cnic_sock *csk)
{
struct cnic_dev *dev = csk->dev;
struct l4_kwq_upload *l4kwqe;
struct kwqe *wqes[1];
l4kwqe = (struct l4_kwq_upload *) &csk->kwqe1;
memset(l4kwqe, 0, sizeof(*l4kwqe));
wqes[0] = (struct kwqe *) l4kwqe;
l4kwqe->opcode = L4_KWQE_OPCODE_VALUE_UPLOAD_PG;
l4kwqe->flags =
L4_LAYER_CODE << L4_KWQ_UPLOAD_LAYER_CODE_SHIFT;
l4kwqe->cid = csk->pg_cid;
return dev->submit_kwqes(dev, wqes, 1);
}
static int cnic_cm_conn_req(struct cnic_sock *csk)
{
struct cnic_dev *dev = csk->dev;
struct l4_kwq_connect_req1 *l4kwqe1;
struct l4_kwq_connect_req2 *l4kwqe2;
struct l4_kwq_connect_req3 *l4kwqe3;
struct kwqe *wqes[3];
u8 tcp_flags = 0;
int num_wqes = 2;
l4kwqe1 = (struct l4_kwq_connect_req1 *) &csk->kwqe1;
l4kwqe2 = (struct l4_kwq_connect_req2 *) &csk->kwqe2;
l4kwqe3 = (struct l4_kwq_connect_req3 *) &csk->kwqe3;
memset(l4kwqe1, 0, sizeof(*l4kwqe1));
memset(l4kwqe2, 0, sizeof(*l4kwqe2));
memset(l4kwqe3, 0, sizeof(*l4kwqe3));
l4kwqe3->op_code = L4_KWQE_OPCODE_VALUE_CONNECT3;
l4kwqe3->flags =
L4_LAYER_CODE << L4_KWQ_CONNECT_REQ3_LAYER_CODE_SHIFT;
l4kwqe3->ka_timeout = csk->ka_timeout;
l4kwqe3->ka_interval = csk->ka_interval;
l4kwqe3->ka_max_probe_count = csk->ka_max_probe_count;
l4kwqe3->tos = csk->tos;
l4kwqe3->ttl = csk->ttl;
l4kwqe3->snd_seq_scale = csk->snd_seq_scale;
l4kwqe3->pmtu = csk->mtu;
l4kwqe3->rcv_buf = csk->rcv_buf;
l4kwqe3->snd_buf = csk->snd_buf;
l4kwqe3->seed = csk->seed;
wqes[0] = (struct kwqe *) l4kwqe1;
if (test_bit(SK_F_IPV6, &csk->flags)) {
wqes[1] = (struct kwqe *) l4kwqe2;
wqes[2] = (struct kwqe *) l4kwqe3;
num_wqes = 3;
l4kwqe1->conn_flags = L4_KWQ_CONNECT_REQ1_IP_V6;
l4kwqe2->op_code = L4_KWQE_OPCODE_VALUE_CONNECT2;
l4kwqe2->flags =
L4_KWQ_CONNECT_REQ2_LINKED_WITH_NEXT |
L4_LAYER_CODE << L4_KWQ_CONNECT_REQ2_LAYER_CODE_SHIFT;
l4kwqe2->src_ip_v6_2 = be32_to_cpu(csk->src_ip[1]);
l4kwqe2->src_ip_v6_3 = be32_to_cpu(csk->src_ip[2]);
l4kwqe2->src_ip_v6_4 = be32_to_cpu(csk->src_ip[3]);
l4kwqe2->dst_ip_v6_2 = be32_to_cpu(csk->dst_ip[1]);
l4kwqe2->dst_ip_v6_3 = be32_to_cpu(csk->dst_ip[2]);
l4kwqe2->dst_ip_v6_4 = be32_to_cpu(csk->dst_ip[3]);
l4kwqe3->mss = l4kwqe3->pmtu - sizeof(struct ipv6hdr) -
sizeof(struct tcphdr);
} else {
wqes[1] = (struct kwqe *) l4kwqe3;
l4kwqe3->mss = l4kwqe3->pmtu - sizeof(struct iphdr) -
sizeof(struct tcphdr);
}
l4kwqe1->op_code = L4_KWQE_OPCODE_VALUE_CONNECT1;
l4kwqe1->flags =
(L4_LAYER_CODE << L4_KWQ_CONNECT_REQ1_LAYER_CODE_SHIFT) |
L4_KWQ_CONNECT_REQ3_LINKED_WITH_NEXT;
l4kwqe1->cid = csk->cid;
l4kwqe1->pg_cid = csk->pg_cid;
l4kwqe1->src_ip = be32_to_cpu(csk->src_ip[0]);
l4kwqe1->dst_ip = be32_to_cpu(csk->dst_ip[0]);
l4kwqe1->src_port = be16_to_cpu(csk->src_port);
l4kwqe1->dst_port = be16_to_cpu(csk->dst_port);
if (csk->tcp_flags & SK_TCP_NO_DELAY_ACK)
tcp_flags |= L4_KWQ_CONNECT_REQ1_NO_DELAY_ACK;
if (csk->tcp_flags & SK_TCP_KEEP_ALIVE)
tcp_flags |= L4_KWQ_CONNECT_REQ1_KEEP_ALIVE;
if (csk->tcp_flags & SK_TCP_NAGLE)
tcp_flags |= L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE;
if (csk->tcp_flags & SK_TCP_TIMESTAMP)
tcp_flags |= L4_KWQ_CONNECT_REQ1_TIME_STAMP;
if (csk->tcp_flags & SK_TCP_SACK)
tcp_flags |= L4_KWQ_CONNECT_REQ1_SACK;
if (csk->tcp_flags & SK_TCP_SEG_SCALING)
tcp_flags |= L4_KWQ_CONNECT_REQ1_SEG_SCALING;
l4kwqe1->tcp_flags = tcp_flags;
return dev->submit_kwqes(dev, wqes, num_wqes);
}
static int cnic_cm_close_req(struct cnic_sock *csk)
{
struct cnic_dev *dev = csk->dev;
struct l4_kwq_close_req *l4kwqe;
struct kwqe *wqes[1];
l4kwqe = (struct l4_kwq_close_req *) &csk->kwqe2;
memset(l4kwqe, 0, sizeof(*l4kwqe));
wqes[0] = (struct kwqe *) l4kwqe;
l4kwqe->op_code = L4_KWQE_OPCODE_VALUE_CLOSE;
l4kwqe->flags = L4_LAYER_CODE << L4_KWQ_CLOSE_REQ_LAYER_CODE_SHIFT;
l4kwqe->cid = csk->cid;
return dev->submit_kwqes(dev, wqes, 1);
}
static int cnic_cm_abort_req(struct cnic_sock *csk)
{
struct cnic_dev *dev = csk->dev;
struct l4_kwq_reset_req *l4kwqe;
struct kwqe *wqes[1];
l4kwqe = (struct l4_kwq_reset_req *) &csk->kwqe2;
memset(l4kwqe, 0, sizeof(*l4kwqe));
wqes[0] = (struct kwqe *) l4kwqe;
l4kwqe->op_code = L4_KWQE_OPCODE_VALUE_RESET;
l4kwqe->flags = L4_LAYER_CODE << L4_KWQ_RESET_REQ_LAYER_CODE_SHIFT;
l4kwqe->cid = csk->cid;
return dev->submit_kwqes(dev, wqes, 1);
}
static int cnic_cm_create(struct cnic_dev *dev, int ulp_type, u32 cid,
u32 l5_cid, struct cnic_sock **csk, void *context)
{
struct cnic_local *cp = dev->cnic_priv;
struct cnic_sock *csk1;
if (l5_cid >= MAX_CM_SK_TBL_SZ)
return -EINVAL;
if (cp->ctx_tbl) {
struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
if (test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags))
return -EAGAIN;
}
csk1 = &cp->csk_tbl[l5_cid];
if (atomic_read(&csk1->ref_count))
return -EAGAIN;
if (test_and_set_bit(SK_F_INUSE, &csk1->flags))
return -EBUSY;
csk1->dev = dev;
csk1->cid = cid;
csk1->l5_cid = l5_cid;
csk1->ulp_type = ulp_type;
csk1->context = context;
csk1->ka_timeout = DEF_KA_TIMEOUT;
csk1->ka_interval = DEF_KA_INTERVAL;
csk1->ka_max_probe_count = DEF_KA_MAX_PROBE_COUNT;
csk1->tos = DEF_TOS;
csk1->ttl = DEF_TTL;
csk1->snd_seq_scale = DEF_SND_SEQ_SCALE;
csk1->rcv_buf = DEF_RCV_BUF;
csk1->snd_buf = DEF_SND_BUF;
csk1->seed = DEF_SEED;
*csk = csk1;
return 0;
}
static void cnic_cm_cleanup(struct cnic_sock *csk)
{
if (csk->src_port) {
struct cnic_dev *dev = csk->dev;
struct cnic_local *cp = dev->cnic_priv;
cnic_free_id(&cp->csk_port_tbl, be16_to_cpu(csk->src_port));
csk->src_port = 0;
}
}
static void cnic_close_conn(struct cnic_sock *csk)
{
if (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags)) {
cnic_cm_upload_pg(csk);
clear_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags);
}
cnic_cm_cleanup(csk);
}
static int cnic_cm_destroy(struct cnic_sock *csk)
{
if (!cnic_in_use(csk))
return -EINVAL;
csk_hold(csk);
clear_bit(SK_F_INUSE, &csk->flags);
smp_mb__after_clear_bit();
while (atomic_read(&csk->ref_count) != 1)
msleep(1);
cnic_cm_cleanup(csk);
csk->flags = 0;
csk_put(csk);
return 0;
}
static inline u16 cnic_get_vlan(struct net_device *dev,
struct net_device **vlan_dev)
{
if (dev->priv_flags & IFF_802_1Q_VLAN) {
*vlan_dev = vlan_dev_real_dev(dev);
return vlan_dev_vlan_id(dev);
}
*vlan_dev = dev;
return 0;
}
static int cnic_get_v4_route(struct sockaddr_in *dst_addr,
struct dst_entry **dst)
{
#if defined(CONFIG_INET)
struct rtable *rt;
rt = ip_route_output(&init_net, dst_addr->sin_addr.s_addr, 0, 0, 0);
if (!IS_ERR(rt)) {
*dst = &rt->dst;
return 0;
}
return PTR_ERR(rt);
#else
return -ENETUNREACH;
#endif
}
static int cnic_get_v6_route(struct sockaddr_in6 *dst_addr,
struct dst_entry **dst)
{
#if defined(CONFIG_IPV6) || (defined(CONFIG_IPV6_MODULE) && defined(MODULE))
struct flowi6 fl6;
memset(&fl6, 0, sizeof(fl6));
ipv6_addr_copy(&fl6.daddr, &dst_addr->sin6_addr);
if (ipv6_addr_type(&fl6.daddr) & IPV6_ADDR_LINKLOCAL)
fl6.flowi6_oif = dst_addr->sin6_scope_id;
*dst = ip6_route_output(&init_net, NULL, &fl6);
if (*dst)
return 0;
#endif
return -ENETUNREACH;
}
static struct cnic_dev *cnic_cm_select_dev(struct sockaddr_in *dst_addr,
int ulp_type)
{
struct cnic_dev *dev = NULL;
struct dst_entry *dst;
struct net_device *netdev = NULL;
int err = -ENETUNREACH;
if (dst_addr->sin_family == AF_INET)
err = cnic_get_v4_route(dst_addr, &dst);
else if (dst_addr->sin_family == AF_INET6) {
struct sockaddr_in6 *dst_addr6 =
(struct sockaddr_in6 *) dst_addr;
err = cnic_get_v6_route(dst_addr6, &dst);
} else
return NULL;
if (err)
return NULL;
if (!dst->dev)
goto done;
cnic_get_vlan(dst->dev, &netdev);
dev = cnic_from_netdev(netdev);
done:
dst_release(dst);
if (dev)
cnic_put(dev);
return dev;
}
static int cnic_resolve_addr(struct cnic_sock *csk, struct cnic_sockaddr *saddr)
{
struct cnic_dev *dev = csk->dev;
struct cnic_local *cp = dev->cnic_priv;
return cnic_send_nlmsg(cp, ISCSI_KEVENT_PATH_REQ, csk);
}
static int cnic_get_route(struct cnic_sock *csk, struct cnic_sockaddr *saddr)
{
struct cnic_dev *dev = csk->dev;
struct cnic_local *cp = dev->cnic_priv;
int is_v6, rc = 0;
struct dst_entry *dst = NULL;
struct net_device *realdev;
__be16 local_port;
u32 port_id;
if (saddr->local.v6.sin6_family == AF_INET6 &&
saddr->remote.v6.sin6_family == AF_INET6)
is_v6 = 1;
else if (saddr->local.v4.sin_family == AF_INET &&
saddr->remote.v4.sin_family == AF_INET)
is_v6 = 0;
else
return -EINVAL;
clear_bit(SK_F_IPV6, &csk->flags);
if (is_v6) {
set_bit(SK_F_IPV6, &csk->flags);
cnic_get_v6_route(&saddr->remote.v6, &dst);
memcpy(&csk->dst_ip[0], &saddr->remote.v6.sin6_addr,
sizeof(struct in6_addr));
csk->dst_port = saddr->remote.v6.sin6_port;
local_port = saddr->local.v6.sin6_port;
} else {
cnic_get_v4_route(&saddr->remote.v4, &dst);
csk->dst_ip[0] = saddr->remote.v4.sin_addr.s_addr;
csk->dst_port = saddr->remote.v4.sin_port;
local_port = saddr->local.v4.sin_port;
}
csk->vlan_id = 0;
csk->mtu = dev->netdev->mtu;
if (dst && dst->dev) {
u16 vlan = cnic_get_vlan(dst->dev, &realdev);
if (realdev == dev->netdev) {
csk->vlan_id = vlan;
csk->mtu = dst_mtu(dst);
}
}
port_id = be16_to_cpu(local_port);
if (port_id >= CNIC_LOCAL_PORT_MIN &&
port_id < CNIC_LOCAL_PORT_MAX) {
if (cnic_alloc_id(&cp->csk_port_tbl, port_id))
port_id = 0;
} else
port_id = 0;
if (!port_id) {
port_id = cnic_alloc_new_id(&cp->csk_port_tbl);
if (port_id == -1) {
rc = -ENOMEM;
goto err_out;
}
local_port = cpu_to_be16(port_id);
}
csk->src_port = local_port;
err_out:
dst_release(dst);
return rc;
}
static void cnic_init_csk_state(struct cnic_sock *csk)
{
csk->state = 0;
clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
clear_bit(SK_F_CLOSING, &csk->flags);
}
static int cnic_cm_connect(struct cnic_sock *csk, struct cnic_sockaddr *saddr)
{
int err = 0;
if (!cnic_in_use(csk))
return -EINVAL;
if (test_and_set_bit(SK_F_CONNECT_START, &csk->flags))
return -EINVAL;
cnic_init_csk_state(csk);
err = cnic_get_route(csk, saddr);
if (err)
goto err_out;
err = cnic_resolve_addr(csk, saddr);
if (!err)
return 0;
err_out:
clear_bit(SK_F_CONNECT_START, &csk->flags);
return err;
}
static int cnic_cm_abort(struct cnic_sock *csk)
{
struct cnic_local *cp = csk->dev->cnic_priv;
u32 opcode = L4_KCQE_OPCODE_VALUE_RESET_COMP;
if (!cnic_in_use(csk))
return -EINVAL;
if (cnic_abort_prep(csk))
return cnic_cm_abort_req(csk);
/* Getting here means that we haven't started connect, or
* connect was not successful.
*/
cp->close_conn(csk, opcode);
if (csk->state != opcode)
return -EALREADY;
return 0;
}
static int cnic_cm_close(struct cnic_sock *csk)
{
if (!cnic_in_use(csk))
return -EINVAL;
if (cnic_close_prep(csk)) {
csk->state = L4_KCQE_OPCODE_VALUE_CLOSE_COMP;
return cnic_cm_close_req(csk);
} else {
return -EALREADY;
}
return 0;
}
static void cnic_cm_upcall(struct cnic_local *cp, struct cnic_sock *csk,
u8 opcode)
{
struct cnic_ulp_ops *ulp_ops;
int ulp_type = csk->ulp_type;
rcu_read_lock();
ulp_ops = rcu_dereference(cp->ulp_ops[ulp_type]);
if (ulp_ops) {
if (opcode == L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE)
ulp_ops->cm_connect_complete(csk);
else if (opcode == L4_KCQE_OPCODE_VALUE_CLOSE_COMP)
ulp_ops->cm_close_complete(csk);
else if (opcode == L4_KCQE_OPCODE_VALUE_RESET_RECEIVED)
ulp_ops->cm_remote_abort(csk);
else if (opcode == L4_KCQE_OPCODE_VALUE_RESET_COMP)
ulp_ops->cm_abort_complete(csk);
else if (opcode == L4_KCQE_OPCODE_VALUE_CLOSE_RECEIVED)
ulp_ops->cm_remote_close(csk);
}
rcu_read_unlock();
}
static int cnic_cm_set_pg(struct cnic_sock *csk)
{
if (cnic_offld_prep(csk)) {
if (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags))
cnic_cm_update_pg(csk);
else
cnic_cm_offload_pg(csk);
}
return 0;
}
static void cnic_cm_process_offld_pg(struct cnic_dev *dev, struct l4_kcq *kcqe)
{
struct cnic_local *cp = dev->cnic_priv;
u32 l5_cid = kcqe->pg_host_opaque;
u8 opcode = kcqe->op_code;
struct cnic_sock *csk = &cp->csk_tbl[l5_cid];
csk_hold(csk);
if (!cnic_in_use(csk))
goto done;
if (opcode == L4_KCQE_OPCODE_VALUE_UPDATE_PG) {
clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
goto done;
}
/* Possible PG kcqe status: SUCCESS, OFFLOADED_PG, or CTX_ALLOC_FAIL */
if (kcqe->status == L4_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAIL) {
clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
cnic_cm_upcall(cp, csk,
L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE);
goto done;
}
csk->pg_cid = kcqe->pg_cid;
set_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags);
cnic_cm_conn_req(csk);
done:
csk_put(csk);
}
static void cnic_process_fcoe_term_conn(struct cnic_dev *dev, struct kcqe *kcqe)
{
struct cnic_local *cp = dev->cnic_priv;
struct fcoe_kcqe *fc_kcqe = (struct fcoe_kcqe *) kcqe;
u32 l5_cid = fc_kcqe->fcoe_conn_id + BNX2X_FCOE_L5_CID_BASE;
struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
ctx->timestamp = jiffies;
ctx->wait_cond = 1;
wake_up(&ctx->waitq);
}
static void cnic_cm_process_kcqe(struct cnic_dev *dev, struct kcqe *kcqe)
{
struct cnic_local *cp = dev->cnic_priv;
struct l4_kcq *l4kcqe = (struct l4_kcq *) kcqe;
u8 opcode = l4kcqe->op_code;
u32 l5_cid;
struct cnic_sock *csk;
if (opcode == FCOE_RAMROD_CMD_ID_TERMINATE_CONN) {
cnic_process_fcoe_term_conn(dev, kcqe);
return;
}
if (opcode == L4_KCQE_OPCODE_VALUE_OFFLOAD_PG ||
opcode == L4_KCQE_OPCODE_VALUE_UPDATE_PG) {
cnic_cm_process_offld_pg(dev, l4kcqe);
return;
}
l5_cid = l4kcqe->conn_id;
if (opcode & 0x80)
l5_cid = l4kcqe->cid;
if (l5_cid >= MAX_CM_SK_TBL_SZ)
return;
csk = &cp->csk_tbl[l5_cid];
csk_hold(csk);
if (!cnic_in_use(csk)) {
csk_put(csk);
return;
}
switch (opcode) {
case L5CM_RAMROD_CMD_ID_TCP_CONNECT:
if (l4kcqe->status != 0) {
clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
cnic_cm_upcall(cp, csk,
L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE);
}
break;
case L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE:
if (l4kcqe->status == 0)
set_bit(SK_F_OFFLD_COMPLETE, &csk->flags);
smp_mb__before_clear_bit();
clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
cnic_cm_upcall(cp, csk, opcode);
break;
case L4_KCQE_OPCODE_VALUE_RESET_RECEIVED:
case L4_KCQE_OPCODE_VALUE_CLOSE_COMP:
case L4_KCQE_OPCODE_VALUE_RESET_COMP:
case L5CM_RAMROD_CMD_ID_SEARCHER_DELETE:
case L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD:
cp->close_conn(csk, opcode);
break;
case L4_KCQE_OPCODE_VALUE_CLOSE_RECEIVED:
cnic_cm_upcall(cp, csk, opcode);
break;
}
csk_put(csk);
}
static void cnic_cm_indicate_kcqe(void *data, struct kcqe *kcqe[], u32 num)
{
struct cnic_dev *dev = data;
int i;
for (i = 0; i < num; i++)
cnic_cm_process_kcqe(dev, kcqe[i]);
}
static struct cnic_ulp_ops cm_ulp_ops = {
.indicate_kcqes = cnic_cm_indicate_kcqe,
};
static void cnic_cm_free_mem(struct cnic_dev *dev)
{
struct cnic_local *cp = dev->cnic_priv;
kfree(cp->csk_tbl);
cp->csk_tbl = NULL;
cnic_free_id_tbl(&cp->csk_port_tbl);
}
static int cnic_cm_alloc_mem(struct cnic_dev *dev)
{
struct cnic_local *cp = dev->cnic_priv;
cp->csk_tbl = kzalloc(sizeof(struct cnic_sock) * MAX_CM_SK_TBL_SZ,
GFP_KERNEL);
if (!cp->csk_tbl)
return -ENOMEM;
if (cnic_init_id_tbl(&cp->csk_port_tbl, CNIC_LOCAL_PORT_RANGE,
CNIC_LOCAL_PORT_MIN)) {
cnic_cm_free_mem(dev);
return -ENOMEM;
}
return 0;
}
static int cnic_ready_to_close(struct cnic_sock *csk, u32 opcode)
{
if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) {
/* Unsolicited RESET_COMP or RESET_RECEIVED */
opcode = L4_KCQE_OPCODE_VALUE_RESET_RECEIVED;
csk->state = opcode;
}
/* 1. If event opcode matches the expected event in csk->state
* 2. If the expected event is CLOSE_COMP, we accept any event
* 3. If the expected event is 0, meaning the connection was never
* never established, we accept the opcode from cm_abort.
*/
if (opcode == csk->state || csk->state == 0 ||
csk->state == L4_KCQE_OPCODE_VALUE_CLOSE_COMP) {
if (!test_and_set_bit(SK_F_CLOSING, &csk->flags)) {
if (csk->state == 0)
csk->state = opcode;
return 1;
}
}
return 0;
}
static void cnic_close_bnx2_conn(struct cnic_sock *csk, u32 opcode)
{
struct cnic_dev *dev = csk->dev;
struct cnic_local *cp = dev->cnic_priv;
if (opcode == L4_KCQE_OPCODE_VALUE_RESET_RECEIVED) {
cnic_cm_upcall(cp, csk, opcode);
return;
}
clear_bit(SK_F_CONNECT_START, &csk->flags);
cnic_close_conn(csk);
csk->state = opcode;
cnic_cm_upcall(cp, csk, opcode);
}
static void cnic_cm_stop_bnx2_hw(struct cnic_dev *dev)
{
}
static int cnic_cm_init_bnx2_hw(struct cnic_dev *dev)
{
u32 seed;
get_random_bytes(&seed, 4);
cnic_ctx_wr(dev, 45, 0, seed);
return 0;
}
static void cnic_close_bnx2x_conn(struct cnic_sock *csk, u32 opcode)
{
struct cnic_dev *dev = csk->dev;
struct cnic_local *cp = dev->cnic_priv;
struct cnic_context *ctx = &cp->ctx_tbl[csk->l5_cid];
union l5cm_specific_data l5_data;
u32 cmd = 0;
int close_complete = 0;
switch (opcode) {
case L4_KCQE_OPCODE_VALUE_RESET_RECEIVED:
case L4_KCQE_OPCODE_VALUE_CLOSE_COMP:
case L4_KCQE_OPCODE_VALUE_RESET_COMP:
if (cnic_ready_to_close(csk, opcode)) {
if (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags))
cmd = L5CM_RAMROD_CMD_ID_SEARCHER_DELETE;
else
close_complete = 1;
}
break;
case L5CM_RAMROD_CMD_ID_SEARCHER_DELETE:
cmd = L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD;
break;
case L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD:
close_complete = 1;
break;
}
if (cmd) {
memset(&l5_data, 0, sizeof(l5_data));
cnic_submit_kwqe_16(dev, cmd, csk->cid, ISCSI_CONNECTION_TYPE,
&l5_data);
} else if (close_complete) {
ctx->timestamp = jiffies;
cnic_close_conn(csk);
cnic_cm_upcall(cp, csk, csk->state);
}
}
static void cnic_cm_stop_bnx2x_hw(struct cnic_dev *dev)
{
struct cnic_local *cp = dev->cnic_priv;
int i;
if (!cp->ctx_tbl)
return;
if (!netif_running(dev->netdev))
return;
for (i = 0; i < cp->max_cid_space; i++) {
struct cnic_context *ctx = &cp->ctx_tbl[i];
while (test_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags))
msleep(10);
if (test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags))
netdev_warn(dev->netdev, "CID %x not deleted\n",
ctx->cid);
}
cancel_delayed_work(&cp->delete_task);
flush_workqueue(cnic_wq);
if (atomic_read(&cp->iscsi_conn) != 0)
netdev_warn(dev->netdev, "%d iSCSI connections not destroyed\n",
atomic_read(&cp->iscsi_conn));
}
static int cnic_cm_init_bnx2x_hw(struct cnic_dev *dev)
{
struct cnic_local *cp = dev->cnic_priv;
u32 pfid = cp->pfid;
u32 port = CNIC_PORT(cp);
cnic_init_bnx2x_mac(dev);
cnic_bnx2x_set_tcp_timestamp(dev, 1);
CNIC_WR16(dev, BAR_XSTRORM_INTMEM +
XSTORM_ISCSI_LOCAL_VLAN_OFFSET(pfid), 0);
CNIC_WR(dev, BAR_XSTRORM_INTMEM +
XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_ENABLED_OFFSET(port), 1);
CNIC_WR(dev, BAR_XSTRORM_INTMEM +
XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_MAX_COUNT_OFFSET(port),
DEF_MAX_DA_COUNT);
CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
XSTORM_ISCSI_TCP_VARS_TTL_OFFSET(pfid), DEF_TTL);
CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
XSTORM_ISCSI_TCP_VARS_TOS_OFFSET(pfid), DEF_TOS);
CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
XSTORM_ISCSI_TCP_VARS_ADV_WND_SCL_OFFSET(pfid), 2);
CNIC_WR(dev, BAR_XSTRORM_INTMEM +
XSTORM_TCP_TX_SWS_TIMER_VAL_OFFSET(pfid), DEF_SWS_TIMER);
CNIC_WR(dev, BAR_TSTRORM_INTMEM + TSTORM_TCP_MAX_CWND_OFFSET(pfid),
DEF_MAX_CWND);
return 0;
}
static void cnic_delete_task(struct work_struct *work)
{
struct cnic_local *cp;
struct cnic_dev *dev;
u32 i;
int need_resched = 0;
cp = container_of(work, struct cnic_local, delete_task.work);
dev = cp->dev;
for (i = 0; i < cp->max_cid_space; i++) {
struct cnic_context *ctx = &cp->ctx_tbl[i];
if (!test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags) ||
!test_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags))
continue;
if (!time_after(jiffies, ctx->timestamp + (2 * HZ))) {
need_resched = 1;
continue;
}
if (!test_and_clear_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags))
continue;
cnic_bnx2x_destroy_ramrod(dev, i);
cnic_free_bnx2x_conn_resc(dev, i);
if (ctx->ulp_proto_id == CNIC_ULP_ISCSI)
atomic_dec(&cp->iscsi_conn);
clear_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags);
}
if (need_resched)
queue_delayed_work(cnic_wq, &cp->delete_task,
msecs_to_jiffies(10));
}
static int cnic_cm_open(struct cnic_dev *dev)
{
struct cnic_local *cp = dev->cnic_priv;
int err;
err = cnic_cm_alloc_mem(dev);
if (err)
return err;
err = cp->start_cm(dev);
if (err)
goto err_out;
INIT_DELAYED_WORK(&cp->delete_task, cnic_delete_task);
dev->cm_create = cnic_cm_create;
dev->cm_destroy = cnic_cm_destroy;
dev->cm_connect = cnic_cm_connect;
dev->cm_abort = cnic_cm_abort;
dev->cm_close = cnic_cm_close;
dev->cm_select_dev = cnic_cm_select_dev;
cp->ulp_handle[CNIC_ULP_L4] = dev;
rcu_assign_pointer(cp->ulp_ops[CNIC_ULP_L4], &cm_ulp_ops);
return 0;
err_out:
cnic_cm_free_mem(dev);
return err;
}
static int cnic_cm_shutdown(struct cnic_dev *dev)
{
struct cnic_local *cp = dev->cnic_priv;
int i;
cp->stop_cm(dev);
if (!cp->csk_tbl)
return 0;
for (i = 0; i < MAX_CM_SK_TBL_SZ; i++) {
struct cnic_sock *csk = &cp->csk_tbl[i];
clear_bit(SK_F_INUSE, &csk->flags);
cnic_cm_cleanup(csk);
}
cnic_cm_free_mem(dev);
return 0;
}
static void cnic_init_context(struct cnic_dev *dev, u32 cid)
{
u32 cid_addr;
int i;
cid_addr = GET_CID_ADDR(cid);
for (i = 0; i < CTX_SIZE; i += 4)
cnic_ctx_wr(dev, cid_addr, i, 0);
}
static int cnic_setup_5709_context(struct cnic_dev *dev, int valid)
{
struct cnic_local *cp = dev->cnic_priv;
int ret = 0, i;
u32 valid_bit = valid ? BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID : 0;
if (CHIP_NUM(cp) != CHIP_NUM_5709)
return 0;
for (i = 0; i < cp->ctx_blks; i++) {
int j;
u32 idx = cp->ctx_arr[i].cid / cp->cids_per_blk;
u32 val;
memset(cp->ctx_arr[i].ctx, 0, BCM_PAGE_SIZE);
CNIC_WR(dev, BNX2_CTX_HOST_PAGE_TBL_DATA0,
(cp->ctx_arr[i].mapping & 0xffffffff) | valid_bit);
CNIC_WR(dev, BNX2_CTX_HOST_PAGE_TBL_DATA1,
(u64) cp->ctx_arr[i].mapping >> 32);
CNIC_WR(dev, BNX2_CTX_HOST_PAGE_TBL_CTRL, idx |
BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
for (j = 0; j < 10; j++) {
val = CNIC_RD(dev, BNX2_CTX_HOST_PAGE_TBL_CTRL);
if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
break;
udelay(5);
}
if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
ret = -EBUSY;
break;
}
}
return ret;
}
static void cnic_free_irq(struct cnic_dev *dev)
{
struct cnic_local *cp = dev->cnic_priv;
struct cnic_eth_dev *ethdev = cp->ethdev;
if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
cp->disable_int_sync(dev);
tasklet_kill(&cp->cnic_irq_task);
free_irq(ethdev->irq_arr[0].vector, dev);
}
}
static int cnic_request_irq(struct cnic_dev *dev)
{
struct cnic_local *cp = dev->cnic_priv;
struct cnic_eth_dev *ethdev = cp->ethdev;
int err;
err = request_irq(ethdev->irq_arr[0].vector, cnic_irq, 0, "cnic", dev);
if (err)
tasklet_disable(&cp->cnic_irq_task);
return err;
}
static int cnic_init_bnx2_irq(struct cnic_dev *dev)
{
struct cnic_local *cp = dev->cnic_priv;
struct cnic_eth_dev *ethdev = cp->ethdev;
if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
int err, i = 0;
int sblk_num = cp->status_blk_num;
u32 base = ((sblk_num - 1) * BNX2_HC_SB_CONFIG_SIZE) +
BNX2_HC_SB_CONFIG_1;
CNIC_WR(dev, base, BNX2_HC_SB_CONFIG_1_ONE_SHOT);
CNIC_WR(dev, base + BNX2_HC_COMP_PROD_TRIP_OFF, (2 << 16) | 8);
CNIC_WR(dev, base + BNX2_HC_COM_TICKS_OFF, (64 << 16) | 220);
CNIC_WR(dev, base + BNX2_HC_CMD_TICKS_OFF, (64 << 16) | 220);
cp->last_status_idx = cp->status_blk.bnx2->status_idx;
tasklet_init(&cp->cnic_irq_task, cnic_service_bnx2_msix,
(unsigned long) dev);
err = cnic_request_irq(dev);
if (err)
return err;
while (cp->status_blk.bnx2->status_completion_producer_index &&
i < 10) {
CNIC_WR(dev, BNX2_HC_COALESCE_NOW,
1 << (11 + sblk_num));
udelay(10);
i++;
barrier();
}
if (cp->status_blk.bnx2->status_completion_producer_index) {
cnic_free_irq(dev);
goto failed;
}
} else {
struct status_block *sblk = cp->status_blk.gen;
u32 hc_cmd = CNIC_RD(dev, BNX2_HC_COMMAND);
int i = 0;
while (sblk->status_completion_producer_index && i < 10) {
CNIC_WR(dev, BNX2_HC_COMMAND,
hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
udelay(10);
i++;
barrier();
}
if (sblk->status_completion_producer_index)
goto failed;
}
return 0;
failed:
netdev_err(dev->netdev, "KCQ index not resetting to 0\n");
return -EBUSY;
}
static void cnic_enable_bnx2_int(struct cnic_dev *dev)
{
struct cnic_local *cp = dev->cnic_priv;
struct cnic_eth_dev *ethdev = cp->ethdev;
if (!(ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX))
return;
CNIC_WR(dev, BNX2_PCICFG_INT_ACK_CMD, cp->int_num |
BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | cp->last_status_idx);
}
static void cnic_get_bnx2_iscsi_info(struct cnic_dev *dev)
{
u32 max_conn;
max_conn = cnic_reg_rd_ind(dev, BNX2_FW_MAX_ISCSI_CONN);
dev->max_iscsi_conn = max_conn;
}
static void cnic_disable_bnx2_int_sync(struct cnic_dev *dev)
{
struct cnic_local *cp = dev->cnic_priv;
struct cnic_eth_dev *ethdev = cp->ethdev;
if (!(ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX))
return;
CNIC_WR(dev, BNX2_PCICFG_INT_ACK_CMD, cp->int_num |
BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
CNIC_RD(dev, BNX2_PCICFG_INT_ACK_CMD);
synchronize_irq(ethdev->irq_arr[0].vector);
}
static void cnic_init_bnx2_tx_ring(struct cnic_dev *dev)
{
struct cnic_local *cp = dev->cnic_priv;
struct cnic_eth_dev *ethdev = cp->ethdev;
struct cnic_uio_dev *udev = cp->udev;
u32 cid_addr, tx_cid, sb_id;
u32 val, offset0, offset1, offset2, offset3;
int i;
struct tx_bd *txbd;
dma_addr_t buf_map, ring_map = udev->l2_ring_map;
struct status_block *s_blk = cp->status_blk.gen;
sb_id = cp->status_blk_num;
tx_cid = 20;
cp->tx_cons_ptr = &s_blk->status_tx_quick_consumer_index2;
if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
struct status_block_msix *sblk = cp->status_blk.bnx2;
tx_cid = TX_TSS_CID + sb_id - 1;
CNIC_WR(dev, BNX2_TSCH_TSS_CFG, (sb_id << 24) |
(TX_TSS_CID << 7));
cp->tx_cons_ptr = &sblk->status_tx_quick_consumer_index;
}
cp->tx_cons = *cp->tx_cons_ptr;
cid_addr = GET_CID_ADDR(tx_cid);
if (CHIP_NUM(cp) == CHIP_NUM_5709) {
u32 cid_addr2 = GET_CID_ADDR(tx_cid + 4) + 0x40;
for (i = 0; i < PHY_CTX_SIZE; i += 4)
cnic_ctx_wr(dev, cid_addr2, i, 0);
offset0 = BNX2_L2CTX_TYPE_XI;
offset1 = BNX2_L2CTX_CMD_TYPE_XI;
offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
} else {
cnic_init_context(dev, tx_cid);
cnic_init_context(dev, tx_cid + 1);
offset0 = BNX2_L2CTX_TYPE;
offset1 = BNX2_L2CTX_CMD_TYPE;
offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
}
val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
cnic_ctx_wr(dev, cid_addr, offset0, val);
val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
cnic_ctx_wr(dev, cid_addr, offset1, val);
txbd = (struct tx_bd *) udev->l2_ring;
buf_map = udev->l2_buf_map;
for (i = 0; i < MAX_TX_DESC_CNT; i++, txbd++) {
txbd->tx_bd_haddr_hi = (u64) buf_map >> 32;
txbd->tx_bd_haddr_lo = (u64) buf_map & 0xffffffff;
}
val = (u64) ring_map >> 32;
cnic_ctx_wr(dev, cid_addr, offset2, val);
txbd->tx_bd_haddr_hi = val;
val = (u64) ring_map & 0xffffffff;
cnic_ctx_wr(dev, cid_addr, offset3, val);
txbd->tx_bd_haddr_lo = val;
}
static void cnic_init_bnx2_rx_ring(struct cnic_dev *dev)
{
struct cnic_local *cp = dev->cnic_priv;
struct cnic_eth_dev *ethdev = cp->ethdev;
struct cnic_uio_dev *udev = cp->udev;
u32 cid_addr, sb_id, val, coal_reg, coal_val;
int i;
struct rx_bd *rxbd;
struct status_block *s_blk = cp->status_blk.gen;
dma_addr_t ring_map = udev->l2_ring_map;
sb_id = cp->status_blk_num;
cnic_init_context(dev, 2);
cp->rx_cons_ptr = &s_blk->status_rx_quick_consumer_index2;
coal_reg = BNX2_HC_COMMAND;
coal_val = CNIC_RD(dev, coal_reg);
if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
struct status_block_msix *sblk = cp->status_blk.bnx2;
cp->rx_cons_ptr = &sblk->status_rx_quick_consumer_index;
coal_reg = BNX2_HC_COALESCE_NOW;
coal_val = 1 << (11 + sb_id);
}
i = 0;
while (!(*cp->rx_cons_ptr != 0) && i < 10) {
CNIC_WR(dev, coal_reg, coal_val);
udelay(10);
i++;
barrier();
}
cp->rx_cons = *cp->rx_cons_ptr;
cid_addr = GET_CID_ADDR(2);
val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE |
BNX2_L2CTX_CTX_TYPE_SIZE_L2 | (0x02 << 8);
cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_CTX_TYPE, val);
if (sb_id == 0)
val = 2 << BNX2_L2CTX_L2_STATUSB_NUM_SHIFT;
else
val = BNX2_L2CTX_L2_STATUSB_NUM(sb_id);
cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_HOST_BDIDX, val);
rxbd = (struct rx_bd *) (udev->l2_ring + BCM_PAGE_SIZE);
for (i = 0; i < MAX_RX_DESC_CNT; i++, rxbd++) {
dma_addr_t buf_map;
int n = (i % cp->l2_rx_ring_size) + 1;
buf_map = udev->l2_buf_map + (n * cp->l2_single_buf_size);
rxbd->rx_bd_len = cp->l2_single_buf_size;
rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
rxbd->rx_bd_haddr_hi = (u64) buf_map >> 32;
rxbd->rx_bd_haddr_lo = (u64) buf_map & 0xffffffff;
}
val = (u64) (ring_map + BCM_PAGE_SIZE) >> 32;
cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
rxbd->rx_bd_haddr_hi = val;
val = (u64) (ring_map + BCM_PAGE_SIZE) & 0xffffffff;
cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
rxbd->rx_bd_haddr_lo = val;
val = cnic_reg_rd_ind(dev, BNX2_RXP_SCRATCH_RXP_FLOOD);
cnic_reg_wr_ind(dev, BNX2_RXP_SCRATCH_RXP_FLOOD, val | (1 << 2));
}
static void cnic_shutdown_bnx2_rx_ring(struct cnic_dev *dev)
{
struct kwqe *wqes[1], l2kwqe;
memset(&l2kwqe, 0, sizeof(l2kwqe));
wqes[0] = &l2kwqe;
l2kwqe.kwqe_op_flag = (L2_LAYER_CODE << KWQE_LAYER_SHIFT) |
(L2_KWQE_OPCODE_VALUE_FLUSH <<
KWQE_OPCODE_SHIFT) | 2;
dev->submit_kwqes(dev, wqes, 1);
}
static void cnic_set_bnx2_mac(struct cnic_dev *dev)
{
struct cnic_local *cp = dev->cnic_priv;
u32 val;
val = cp->func << 2;
cp->shmem_base = cnic_reg_rd_ind(dev, BNX2_SHM_HDR_ADDR_0 + val);
val = cnic_reg_rd_ind(dev, cp->shmem_base +
BNX2_PORT_HW_CFG_ISCSI_MAC_UPPER);
dev->mac_addr[0] = (u8) (val >> 8);
dev->mac_addr[1] = (u8) val;
CNIC_WR(dev, BNX2_EMAC_MAC_MATCH4, val);
val = cnic_reg_rd_ind(dev, cp->shmem_base +
BNX2_PORT_HW_CFG_ISCSI_MAC_LOWER);
dev->mac_addr[2] = (u8) (val >> 24);
dev->mac_addr[3] = (u8) (val >> 16);
dev->mac_addr[4] = (u8) (val >> 8);
dev->mac_addr[5] = (u8) val;
CNIC_WR(dev, BNX2_EMAC_MAC_MATCH5, val);
val = 4 | BNX2_RPM_SORT_USER2_BC_EN;
if (CHIP_NUM(cp) != CHIP_NUM_5709)
val |= BNX2_RPM_SORT_USER2_PROM_VLAN;
CNIC_WR(dev, BNX2_RPM_SORT_USER2, 0x0);
CNIC_WR(dev, BNX2_RPM_SORT_USER2, val);
CNIC_WR(dev, BNX2_RPM_SORT_USER2, val | BNX2_RPM_SORT_USER2_ENA);
}
static int cnic_start_bnx2_hw(struct cnic_dev *dev)
{
struct cnic_local *cp = dev->cnic_priv;
struct cnic_eth_dev *ethdev = cp->ethdev;
struct status_block *sblk = cp->status_blk.gen;
u32 val, kcq_cid_addr, kwq_cid_addr;
int err;
cnic_set_bnx2_mac(dev);
val = CNIC_RD(dev, BNX2_MQ_CONFIG);
val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
if (BCM_PAGE_BITS > 12)
val |= (12 - 8) << 4;
else
val |= (BCM_PAGE_BITS - 8) << 4;
CNIC_WR(dev, BNX2_MQ_CONFIG, val);
CNIC_WR(dev, BNX2_HC_COMP_PROD_TRIP, (2 << 16) | 8);
CNIC_WR(dev, BNX2_HC_COM_TICKS, (64 << 16) | 220);
CNIC_WR(dev, BNX2_HC_CMD_TICKS, (64 << 16) | 220);
err = cnic_setup_5709_context(dev, 1);
if (err)
return err;
cnic_init_context(dev, KWQ_CID);
cnic_init_context(dev, KCQ_CID);
kwq_cid_addr = GET_CID_ADDR(KWQ_CID);
cp->kwq_io_addr = MB_GET_CID_ADDR(KWQ_CID) + L5_KRNLQ_HOST_QIDX;
cp->max_kwq_idx = MAX_KWQ_IDX;
cp->kwq_prod_idx = 0;
cp->kwq_con_idx = 0;
set_bit(CNIC_LCL_FL_KWQ_INIT, &cp->cnic_local_flags);
if (CHIP_NUM(cp) == CHIP_NUM_5706 || CHIP_NUM(cp) == CHIP_NUM_5708)
cp->kwq_con_idx_ptr = &sblk->status_rx_quick_consumer_index15;
else
cp->kwq_con_idx_ptr = &sblk->status_cmd_consumer_index;
/* Initialize the kernel work queue context. */
val = KRNLQ_TYPE_TYPE_KRNLQ | KRNLQ_SIZE_TYPE_SIZE |
(BCM_PAGE_BITS - 8) | KRNLQ_FLAGS_QE_SELF_SEQ;
cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_TYPE, val);
val = (BCM_PAGE_SIZE / sizeof(struct kwqe) - 1) << 16;
cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_QE_SELF_SEQ_MAX, val);
val = ((BCM_PAGE_SIZE / sizeof(struct kwqe)) << 16) | KWQ_PAGE_CNT;
cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_PGTBL_NPAGES, val);
val = (u32) ((u64) cp->kwq_info.pgtbl_map >> 32);
cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_PGTBL_HADDR_HI, val);
val = (u32) cp->kwq_info.pgtbl_map;
cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_PGTBL_HADDR_LO, val);
kcq_cid_addr = GET_CID_ADDR(KCQ_CID);
cp->kcq1.io_addr = MB_GET_CID_ADDR(KCQ_CID) + L5_KRNLQ_HOST_QIDX;
cp->kcq1.sw_prod_idx = 0;
cp->kcq1.hw_prod_idx_ptr =
(u16 *) &sblk->status_completion_producer_index;
cp->kcq1.status_idx_ptr = (u16 *) &sblk->status_idx;
/* Initialize the kernel complete queue context. */
val = KRNLQ_TYPE_TYPE_KRNLQ | KRNLQ_SIZE_TYPE_SIZE |
(BCM_PAGE_BITS - 8) | KRNLQ_FLAGS_QE_SELF_SEQ;
cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_TYPE, val);
val = (BCM_PAGE_SIZE / sizeof(struct kcqe) - 1) << 16;
cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_QE_SELF_SEQ_MAX, val);
val = ((BCM_PAGE_SIZE / sizeof(struct kcqe)) << 16) | KCQ_PAGE_CNT;
cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_PGTBL_NPAGES, val);
val = (u32) ((u64) cp->kcq1.dma.pgtbl_map >> 32);
cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_PGTBL_HADDR_HI, val);
val = (u32) cp->kcq1.dma.pgtbl_map;
cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_PGTBL_HADDR_LO, val);
cp->int_num = 0;
if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
struct status_block_msix *msblk = cp->status_blk.bnx2;
u32 sb_id = cp->status_blk_num;
u32 sb = BNX2_L2CTX_L5_STATUSB_NUM(sb_id);
cp->kcq1.hw_prod_idx_ptr =
(u16 *) &msblk->status_completion_producer_index;
cp->kcq1.status_idx_ptr = (u16 *) &msblk->status_idx;
cp->kwq_con_idx_ptr = (u16 *) &msblk->status_cmd_consumer_index;
cp->int_num = sb_id << BNX2_PCICFG_INT_ACK_CMD_INT_NUM_SHIFT;
cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_HOST_QIDX, sb);
cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_HOST_QIDX, sb);
}
/* Enable Commnad Scheduler notification when we write to the
* host producer index of the kernel contexts. */
CNIC_WR(dev, BNX2_MQ_KNL_CMD_MASK1, 2);
/* Enable Command Scheduler notification when we write to either
* the Send Queue or Receive Queue producer indexes of the kernel
* bypass contexts. */
CNIC_WR(dev, BNX2_MQ_KNL_BYP_CMD_MASK1, 7);
CNIC_WR(dev, BNX2_MQ_KNL_BYP_WRITE_MASK1, 7);
/* Notify COM when the driver post an application buffer. */
CNIC_WR(dev, BNX2_MQ_KNL_RX_V2P_MASK2, 0x2000);
/* Set the CP and COM doorbells. These two processors polls the
* doorbell for a non zero value before running. This must be done
* after setting up the kernel queue contexts. */
cnic_reg_wr_ind(dev, BNX2_CP_SCRATCH + 0x20, 1);
cnic_reg_wr_ind(dev, BNX2_COM_SCRATCH + 0x20, 1);
cnic_init_bnx2_tx_ring(dev);
cnic_init_bnx2_rx_ring(dev);
err = cnic_init_bnx2_irq(dev);
if (err) {
netdev_err(dev->netdev, "cnic_init_irq failed\n");
cnic_reg_wr_ind(dev, BNX2_CP_SCRATCH + 0x20, 0);
cnic_reg_wr_ind(dev, BNX2_COM_SCRATCH + 0x20, 0);
return err;
}
cnic_get_bnx2_iscsi_info(dev);
return 0;
}
static void cnic_setup_bnx2x_context(struct cnic_dev *dev)
{
struct cnic_local *cp = dev->cnic_priv;
struct cnic_eth_dev *ethdev = cp->ethdev;
u32 start_offset = ethdev->ctx_tbl_offset;
int i;
for (i = 0; i < cp->ctx_blks; i++) {
struct cnic_ctx *ctx = &cp->ctx_arr[i];
dma_addr_t map = ctx->mapping;
if (cp->ctx_align) {
unsigned long mask = cp->ctx_align - 1;
map = (map + mask) & ~mask;
}
cnic_ctx_tbl_wr(dev, start_offset + i, map);
}
}
static int cnic_init_bnx2x_irq(struct cnic_dev *dev)
{
struct cnic_local *cp = dev->cnic_priv;
struct cnic_eth_dev *ethdev = cp->ethdev;
int err = 0;
tasklet_init(&cp->cnic_irq_task, cnic_service_bnx2x_bh,
(unsigned long) dev);
if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX)
err = cnic_request_irq(dev);
return err;
}
static inline void cnic_storm_memset_hc_disable(struct cnic_dev *dev,
u16 sb_id, u8 sb_index,
u8 disable)
{
u32 addr = BAR_CSTRORM_INTMEM +
CSTORM_STATUS_BLOCK_DATA_OFFSET(sb_id) +
offsetof(struct hc_status_block_data_e1x, index_data) +
sizeof(struct hc_index_data)*sb_index +
offsetof(struct hc_index_data, flags);
u16 flags = CNIC_RD16(dev, addr);
/* clear and set */
flags &= ~HC_INDEX_DATA_HC_ENABLED;
flags |= (((~disable) << HC_INDEX_DATA_HC_ENABLED_SHIFT) &
HC_INDEX_DATA_HC_ENABLED);
CNIC_WR16(dev, addr, flags);
}
static void cnic_enable_bnx2x_int(struct cnic_dev *dev)
{
struct cnic_local *cp = dev->cnic_priv;
u8 sb_id = cp->status_blk_num;
CNIC_WR8(dev, BAR_CSTRORM_INTMEM +
CSTORM_STATUS_BLOCK_DATA_OFFSET(sb_id) +
offsetof(struct hc_status_block_data_e1x, index_data) +
sizeof(struct hc_index_data)*HC_INDEX_ISCSI_EQ_CONS +
offsetof(struct hc_index_data, timeout), 64 / 12);
cnic_storm_memset_hc_disable(dev, sb_id, HC_INDEX_ISCSI_EQ_CONS, 0);
}
static void cnic_disable_bnx2x_int_sync(struct cnic_dev *dev)
{
}
static void cnic_init_bnx2x_tx_ring(struct cnic_dev *dev,
struct client_init_ramrod_data *data)
{
struct cnic_local *cp = dev->cnic_priv;
struct cnic_uio_dev *udev = cp->udev;
union eth_tx_bd_types *txbd = (union eth_tx_bd_types *) udev->l2_ring;
dma_addr_t buf_map, ring_map = udev->l2_ring_map;
struct host_sp_status_block *sb = cp->bnx2x_def_status_blk;
int port = CNIC_PORT(cp);
int i;
u32 cli = cp->ethdev->iscsi_l2_client_id;
u32 val;
memset(txbd, 0, BCM_PAGE_SIZE);
buf_map = udev->l2_buf_map;
for (i = 0; i < MAX_TX_DESC_CNT; i += 3, txbd += 3) {
struct eth_tx_start_bd *start_bd = &txbd->start_bd;
struct eth_tx_bd *reg_bd = &((txbd + 2)->reg_bd);
start_bd->addr_hi = cpu_to_le32((u64) buf_map >> 32);
start_bd->addr_lo = cpu_to_le32(buf_map & 0xffffffff);
reg_bd->addr_hi = start_bd->addr_hi;
reg_bd->addr_lo = start_bd->addr_lo + 0x10;
start_bd->nbytes = cpu_to_le16(0x10);
start_bd->nbd = cpu_to_le16(3);
start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
start_bd->general_data = (UNICAST_ADDRESS <<
ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT);
start_bd->general_data |= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT);
}
val = (u64) ring_map >> 32;
txbd->next_bd.addr_hi = cpu_to_le32(val);
data->tx.tx_bd_page_base.hi = cpu_to_le32(val);
val = (u64) ring_map & 0xffffffff;
txbd->next_bd.addr_lo = cpu_to_le32(val);
data->tx.tx_bd_page_base.lo = cpu_to_le32(val);
/* Other ramrod params */
data->tx.tx_sb_index_number = HC_SP_INDEX_ETH_ISCSI_CQ_CONS;
data->tx.tx_status_block_id = BNX2X_DEF_SB_ID;
/* reset xstorm per client statistics */
if (cli < MAX_STAT_COUNTER_ID) {
val = BAR_XSTRORM_INTMEM +
XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cli);
for (i = 0; i < sizeof(struct xstorm_per_client_stats) / 4; i++)
CNIC_WR(dev, val + i * 4, 0);
}
cp->tx_cons_ptr =
&sb->sp_sb.index_values[HC_SP_INDEX_ETH_ISCSI_CQ_CONS];
}
static void cnic_init_bnx2x_rx_ring(struct cnic_dev *dev,
struct client_init_ramrod_data *data)
{
struct cnic_local *cp = dev->cnic_priv;
struct cnic_uio_dev *udev = cp->udev;
struct eth_rx_bd *rxbd = (struct eth_rx_bd *) (udev->l2_ring +
BCM_PAGE_SIZE);
struct eth_rx_cqe_next_page *rxcqe = (struct eth_rx_cqe_next_page *)
(udev->l2_ring + (2 * BCM_PAGE_SIZE));
struct host_sp_status_block *sb = cp->bnx2x_def_status_blk;
int i;
int port = CNIC_PORT(cp);
u32 cli = cp->ethdev->iscsi_l2_client_id;
int cl_qzone_id = BNX2X_CL_QZONE_ID(cp, cli);
u32 val;
dma_addr_t ring_map = udev->l2_ring_map;
/* General data */
data->general.client_id = cli;
data->general.statistics_en_flg = 1;
data->general.statistics_counter_id = cli;
data->general.activate_flg = 1;
data->general.sp_client_id = cli;
for (i = 0; i < BNX2X_MAX_RX_DESC_CNT; i++, rxbd++) {
dma_addr_t buf_map;
int n = (i % cp->l2_rx_ring_size) + 1;
buf_map = udev->l2_buf_map + (n * cp->l2_single_buf_size);
rxbd->addr_hi = cpu_to_le32((u64) buf_map >> 32);
rxbd->addr_lo = cpu_to_le32(buf_map & 0xffffffff);
}
val = (u64) (ring_map + BCM_PAGE_SIZE) >> 32;
rxbd->addr_hi = cpu_to_le32(val);
data->rx.bd_page_base.hi = cpu_to_le32(val);
val = (u64) (ring_map + BCM_PAGE_SIZE) & 0xffffffff;
rxbd->addr_lo = cpu_to_le32(val);
data->rx.bd_page_base.lo = cpu_to_le32(val);
rxcqe += BNX2X_MAX_RCQ_DESC_CNT;
val = (u64) (ring_map + (2 * BCM_PAGE_SIZE)) >> 32;
rxcqe->addr_hi = cpu_to_le32(val);
data->rx.cqe_page_base.hi = cpu_to_le32(val);
val = (u64) (ring_map + (2 * BCM_PAGE_SIZE)) & 0xffffffff;
rxcqe->addr_lo = cpu_to_le32(val);
data->rx.cqe_page_base.lo = cpu_to_le32(val);
/* Other ramrod params */
data->rx.client_qzone_id = cl_qzone_id;
data->rx.rx_sb_index_number = HC_SP_INDEX_ETH_ISCSI_RX_CQ_CONS;
data->rx.status_block_id = BNX2X_DEF_SB_ID;
data->rx.cache_line_alignment_log_size = L1_CACHE_SHIFT;
data->rx.bd_buff_size = cpu_to_le16(cp->l2_single_buf_size);
data->rx.mtu = cpu_to_le16(cp->l2_single_buf_size - 14);
data->rx.outer_vlan_removal_enable_flg = 1;
/* reset tstorm and ustorm per client statistics */
if (cli < MAX_STAT_COUNTER_ID) {
val = BAR_TSTRORM_INTMEM +
TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cli);
for (i = 0; i < sizeof(struct tstorm_per_client_stats) / 4; i++)
CNIC_WR(dev, val + i * 4, 0);
val = BAR_USTRORM_INTMEM +
USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cli);
for (i = 0; i < sizeof(struct ustorm_per_client_stats) / 4; i++)
CNIC_WR(dev, val + i * 4, 0);
}
cp->rx_cons_ptr =
&sb->sp_sb.index_values[HC_SP_INDEX_ETH_ISCSI_RX_CQ_CONS];
cp->rx_cons = *cp->rx_cons_ptr;
}
static void cnic_init_bnx2x_kcq(struct cnic_dev *dev)
{
struct cnic_local *cp = dev->cnic_priv;
u32 pfid = cp->pfid;
cp->kcq1.io_addr = BAR_CSTRORM_INTMEM +
CSTORM_ISCSI_EQ_PROD_OFFSET(pfid, 0);
cp->kcq1.sw_prod_idx = 0;
if (BNX2X_CHIP_IS_E2(cp->chip_id)) {
struct host_hc_status_block_e2 *sb = cp->status_blk.gen;
cp->kcq1.hw_prod_idx_ptr =
&sb->sb.index_values[HC_INDEX_ISCSI_EQ_CONS];
cp->kcq1.status_idx_ptr =
&sb->sb.running_index[SM_RX_ID];
} else {
struct host_hc_status_block_e1x *sb = cp->status_blk.gen;
cp->kcq1.hw_prod_idx_ptr =
&sb->sb.index_values[HC_INDEX_ISCSI_EQ_CONS];
cp->kcq1.status_idx_ptr =
&sb->sb.running_index[SM_RX_ID];
}
if (BNX2X_CHIP_IS_E2(cp->chip_id)) {
struct host_hc_status_block_e2 *sb = cp->status_blk.gen;
cp->kcq2.io_addr = BAR_USTRORM_INTMEM +
USTORM_FCOE_EQ_PROD_OFFSET(pfid);
cp->kcq2.sw_prod_idx = 0;
cp->kcq2.hw_prod_idx_ptr =
&sb->sb.index_values[HC_INDEX_FCOE_EQ_CONS];
cp->kcq2.status_idx_ptr =
&sb->sb.running_index[SM_RX_ID];
}
}
static int cnic_start_bnx2x_hw(struct cnic_dev *dev)
{
struct cnic_local *cp = dev->cnic_priv;
struct cnic_eth_dev *ethdev = cp->ethdev;
int func = CNIC_FUNC(cp), ret, i;
u32 pfid;
if (BNX2X_CHIP_IS_E2(cp->chip_id)) {
u32 val = CNIC_RD(dev, MISC_REG_PORT4MODE_EN_OVWR);
if (!(val & 1))
val = CNIC_RD(dev, MISC_REG_PORT4MODE_EN);
else
val = (val >> 1) & 1;
if (val)
cp->pfid = func >> 1;
else
cp->pfid = func & 0x6;
} else {
cp->pfid = func;
}
pfid = cp->pfid;
ret = cnic_init_id_tbl(&cp->cid_tbl, MAX_ISCSI_TBL_SZ,
cp->iscsi_start_cid);
if (ret)
return -ENOMEM;
if (BNX2X_CHIP_IS_E2(cp->chip_id)) {
ret = cnic_init_id_tbl(&cp->fcoe_cid_tbl,
BNX2X_FCOE_NUM_CONNECTIONS,
cp->fcoe_start_cid);
if (ret)
return -ENOMEM;
}
cp->bnx2x_igu_sb_id = ethdev->irq_arr[0].status_blk_num2;
cnic_init_bnx2x_kcq(dev);
/* Only 1 EQ */
CNIC_WR16(dev, cp->kcq1.io_addr, MAX_KCQ_IDX);
CNIC_WR(dev, BAR_CSTRORM_INTMEM +
CSTORM_ISCSI_EQ_CONS_OFFSET(pfid, 0), 0);
CNIC_WR(dev, BAR_CSTRORM_INTMEM +
CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(pfid, 0),
cp->kcq1.dma.pg_map_arr[1] & 0xffffffff);
CNIC_WR(dev, BAR_CSTRORM_INTMEM +
CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(pfid, 0) + 4,
(u64) cp->kcq1.dma.pg_map_arr[1] >> 32);
CNIC_WR(dev, BAR_CSTRORM_INTMEM +
CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(pfid, 0),
cp->kcq1.dma.pg_map_arr[0] & 0xffffffff);
CNIC_WR(dev, BAR_CSTRORM_INTMEM +
CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(pfid, 0) + 4,
(u64) cp->kcq1.dma.pg_map_arr[0] >> 32);
CNIC_WR8(dev, BAR_CSTRORM_INTMEM +
CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_VALID_OFFSET(pfid, 0), 1);
CNIC_WR16(dev, BAR_CSTRORM_INTMEM +
CSTORM_ISCSI_EQ_SB_NUM_OFFSET(pfid, 0), cp->status_blk_num);
CNIC_WR8(dev, BAR_CSTRORM_INTMEM +
CSTORM_ISCSI_EQ_SB_INDEX_OFFSET(pfid, 0),
HC_INDEX_ISCSI_EQ_CONS);
for (i = 0; i < cp->conn_buf_info.num_pages; i++) {
CNIC_WR(dev, BAR_TSTRORM_INTMEM +
TSTORM_ISCSI_CONN_BUF_PBL_OFFSET(pfid, i),
cp->conn_buf_info.pgtbl[2 * i]);
CNIC_WR(dev, BAR_TSTRORM_INTMEM +
TSTORM_ISCSI_CONN_BUF_PBL_OFFSET(pfid, i) + 4,
cp->conn_buf_info.pgtbl[(2 * i) + 1]);
}
CNIC_WR(dev, BAR_USTRORM_INTMEM +
USTORM_ISCSI_GLOBAL_BUF_PHYS_ADDR_OFFSET(pfid),
cp->gbl_buf_info.pg_map_arr[0] & 0xffffffff);
CNIC_WR(dev, BAR_USTRORM_INTMEM +
USTORM_ISCSI_GLOBAL_BUF_PHYS_ADDR_OFFSET(pfid) + 4,
(u64) cp->gbl_buf_info.pg_map_arr[0] >> 32);
CNIC_WR(dev, BAR_TSTRORM_INTMEM +
TSTORM_ISCSI_TCP_LOCAL_ADV_WND_OFFSET(pfid), DEF_RCV_BUF);
cnic_setup_bnx2x_context(dev);
ret = cnic_init_bnx2x_irq(dev);
if (ret)
return ret;
return 0;
}
static void cnic_init_rings(struct cnic_dev *dev)
{
struct cnic_local *cp = dev->cnic_priv;
struct cnic_uio_dev *udev = cp->udev;
if (test_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags))
return;
if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) {
cnic_init_bnx2_tx_ring(dev);
cnic_init_bnx2_rx_ring(dev);
set_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags);
} else if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) {
u32 cli = cp->ethdev->iscsi_l2_client_id;
u32 cid = cp->ethdev->iscsi_l2_cid;
u32 cl_qzone_id;
struct client_init_ramrod_data *data;
union l5cm_specific_data l5_data;
struct ustorm_eth_rx_producers rx_prods = {0};
u32 off, i;
rx_prods.bd_prod = 0;
rx_prods.cqe_prod = BNX2X_MAX_RCQ_DESC_CNT;
barrier();
cl_qzone_id = BNX2X_CL_QZONE_ID(cp, cli);
off = BAR_USTRORM_INTMEM +
(BNX2X_CHIP_IS_E2(cp->chip_id) ?
USTORM_RX_PRODS_E2_OFFSET(cl_qzone_id) :
USTORM_RX_PRODS_E1X_OFFSET(CNIC_PORT(cp), cli));
for (i = 0; i < sizeof(struct ustorm_eth_rx_producers) / 4; i++)
CNIC_WR(dev, off + i * 4, ((u32 *) &rx_prods)[i]);
set_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags);
data = udev->l2_buf;
memset(data, 0, sizeof(*data));
cnic_init_bnx2x_tx_ring(dev, data);
cnic_init_bnx2x_rx_ring(dev, data);
l5_data.phy_address.lo = udev->l2_buf_map & 0xffffffff;
l5_data.phy_address.hi = (u64) udev->l2_buf_map >> 32;
set_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags);
cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_ETH_CLIENT_SETUP,
cid, ETH_CONNECTION_TYPE, &l5_data);
i = 0;
while (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags) &&
++i < 10)
msleep(1);
if (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags))
netdev_err(dev->netdev,
"iSCSI CLIENT_SETUP did not complete\n");
cnic_spq_completion(dev, DRV_CTL_RET_L2_SPQ_CREDIT_CMD, 1);
cnic_ring_ctl(dev, cid, cli, 1);
}
}
static void cnic_shutdown_rings(struct cnic_dev *dev)
{
struct cnic_local *cp = dev->cnic_priv;
if (!test_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags))
return;
if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) {
cnic_shutdown_bnx2_rx_ring(dev);
} else if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) {
struct cnic_local *cp = dev->cnic_priv;
u32 cli = cp->ethdev->iscsi_l2_client_id;
u32 cid = cp->ethdev->iscsi_l2_cid;
union l5cm_specific_data l5_data;
int i;
cnic_ring_ctl(dev, cid, cli, 0);
set_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags);
l5_data.phy_address.lo = cli;
l5_data.phy_address.hi = 0;
cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_ETH_HALT,
cid, ETH_CONNECTION_TYPE, &l5_data);
i = 0;
while (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags) &&
++i < 10)
msleep(1);
if (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags))
netdev_err(dev->netdev,
"iSCSI CLIENT_HALT did not complete\n");
cnic_spq_completion(dev, DRV_CTL_RET_L2_SPQ_CREDIT_CMD, 1);
memset(&l5_data, 0, sizeof(l5_data));
cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_COMMON_CFC_DEL,
cid, NONE_CONNECTION_TYPE, &l5_data);
msleep(10);
}
clear_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags);
}
static int cnic_register_netdev(struct cnic_dev *dev)
{
struct cnic_local *cp = dev->cnic_priv;
struct cnic_eth_dev *ethdev = cp->ethdev;
int err;
if (!ethdev)
return -ENODEV;
if (ethdev->drv_state & CNIC_DRV_STATE_REGD)
return 0;
err = ethdev->drv_register_cnic(dev->netdev, cp->cnic_ops, dev);
if (err)
netdev_err(dev->netdev, "register_cnic failed\n");
return err;
}
static void cnic_unregister_netdev(struct cnic_dev *dev)
{
struct cnic_local *cp = dev->cnic_priv;
struct cnic_eth_dev *ethdev = cp->ethdev;
if (!ethdev)
return;
ethdev->drv_unregister_cnic(dev->netdev);
}
static int cnic_start_hw(struct cnic_dev *dev)
{
struct cnic_local *cp = dev->cnic_priv;
struct cnic_eth_dev *ethdev = cp->ethdev;
int err;
if (test_bit(CNIC_F_CNIC_UP, &dev->flags))
return -EALREADY;
dev->regview = ethdev->io_base;
pci_dev_get(dev->pcidev);
cp->func = PCI_FUNC(dev->pcidev->devfn);
cp->status_blk.gen = ethdev->irq_arr[0].status_blk;
cp->status_blk_num = ethdev->irq_arr[0].status_blk_num;
err = cp->alloc_resc(dev);
if (err) {
netdev_err(dev->netdev, "allocate resource failure\n");
goto err1;
}
err = cp->start_hw(dev);
if (err)
goto err1;
err = cnic_cm_open(dev);
if (err)
goto err1;
set_bit(CNIC_F_CNIC_UP, &dev->flags);
cp->enable_int(dev);
return 0;
err1:
cp->free_resc(dev);
pci_dev_put(dev->pcidev);
return err;
}
static void cnic_stop_bnx2_hw(struct cnic_dev *dev)
{
cnic_disable_bnx2_int_sync(dev);
cnic_reg_wr_ind(dev, BNX2_CP_SCRATCH + 0x20, 0);
cnic_reg_wr_ind(dev, BNX2_COM_SCRATCH + 0x20, 0);
cnic_init_context(dev, KWQ_CID);
cnic_init_context(dev, KCQ_CID);
cnic_setup_5709_context(dev, 0);
cnic_free_irq(dev);
cnic_free_resc(dev);
}
static void cnic_stop_bnx2x_hw(struct cnic_dev *dev)
{
struct cnic_local *cp = dev->cnic_priv;
cnic_free_irq(dev);
*cp->kcq1.hw_prod_idx_ptr = 0;
CNIC_WR(dev, BAR_CSTRORM_INTMEM +
CSTORM_ISCSI_EQ_CONS_OFFSET(cp->pfid, 0), 0);
CNIC_WR16(dev, cp->kcq1.io_addr, 0);
cnic_free_resc(dev);
}
static void cnic_stop_hw(struct cnic_dev *dev)
{
if (test_bit(CNIC_F_CNIC_UP, &dev->flags)) {
struct cnic_local *cp = dev->cnic_priv;
int i = 0;
/* Need to wait for the ring shutdown event to complete
* before clearing the CNIC_UP flag.
*/
while (cp->udev->uio_dev != -1 && i < 15) {
msleep(100);
i++;
}
cnic_shutdown_rings(dev);
clear_bit(CNIC_F_CNIC_UP, &dev->flags);
rcu_assign_pointer(cp->ulp_ops[CNIC_ULP_L4], NULL);
synchronize_rcu();
cnic_cm_shutdown(dev);
cp->stop_hw(dev);
pci_dev_put(dev->pcidev);
}
}
static void cnic_free_dev(struct cnic_dev *dev)
{
int i = 0;
while ((atomic_read(&dev->ref_count) != 0) && i < 10) {
msleep(100);
i++;
}
if (atomic_read(&dev->ref_count) != 0)
netdev_err(dev->netdev, "Failed waiting for ref count to go to zero\n");
netdev_info(dev->netdev, "Removed CNIC device\n");
dev_put(dev->netdev);
kfree(dev);
}
static struct cnic_dev *cnic_alloc_dev(struct net_device *dev,
struct pci_dev *pdev)
{
struct cnic_dev *cdev;
struct cnic_local *cp;
int alloc_size;
alloc_size = sizeof(struct cnic_dev) + sizeof(struct cnic_local);
cdev = kzalloc(alloc_size , GFP_KERNEL);
if (cdev == NULL) {
netdev_err(dev, "allocate dev struct failure\n");
return NULL;
}
cdev->netdev = dev;
cdev->cnic_priv = (char *)cdev + sizeof(struct cnic_dev);
cdev->register_device = cnic_register_device;
cdev->unregister_device = cnic_unregister_device;
cdev->iscsi_nl_msg_recv = cnic_iscsi_nl_msg_recv;
cp = cdev->cnic_priv;
cp->dev = cdev;
cp->l2_single_buf_size = 0x400;
cp->l2_rx_ring_size = 3;
spin_lock_init(&cp->cnic_ulp_lock);
netdev_info(dev, "Added CNIC device\n");
return cdev;
}
static struct cnic_dev *init_bnx2_cnic(struct net_device *dev)
{
struct pci_dev *pdev;
struct cnic_dev *cdev;
struct cnic_local *cp;
struct cnic_eth_dev *ethdev = NULL;
struct cnic_eth_dev *(*probe)(struct net_device *) = NULL;
probe = symbol_get(bnx2_cnic_probe);
if (probe) {
ethdev = (*probe)(dev);
symbol_put(bnx2_cnic_probe);
}
if (!ethdev)
return NULL;
pdev = ethdev->pdev;
if (!pdev)
return NULL;
dev_hold(dev);
pci_dev_get(pdev);
if ((pdev->device == PCI_DEVICE_ID_NX2_5709 ||
pdev->device == PCI_DEVICE_ID_NX2_5709S) &&
(pdev->revision < 0x10)) {
pci_dev_put(pdev);
goto cnic_err;
}
pci_dev_put(pdev);
cdev = cnic_alloc_dev(dev, pdev);
if (cdev == NULL)
goto cnic_err;
set_bit(CNIC_F_BNX2_CLASS, &cdev->flags);
cdev->submit_kwqes = cnic_submit_bnx2_kwqes;
cp = cdev->cnic_priv;
cp->ethdev = ethdev;
cdev->pcidev = pdev;
cp->chip_id = ethdev->chip_id;
cp->cnic_ops = &cnic_bnx2_ops;
cp->start_hw = cnic_start_bnx2_hw;
cp->stop_hw = cnic_stop_bnx2_hw;
cp->setup_pgtbl = cnic_setup_page_tbl;
cp->alloc_resc = cnic_alloc_bnx2_resc;
cp->free_resc = cnic_free_resc;
cp->start_cm = cnic_cm_init_bnx2_hw;
cp->stop_cm = cnic_cm_stop_bnx2_hw;
cp->enable_int = cnic_enable_bnx2_int;
cp->disable_int_sync = cnic_disable_bnx2_int_sync;
cp->close_conn = cnic_close_bnx2_conn;
cp->next_idx = cnic_bnx2_next_idx;
cp->hw_idx = cnic_bnx2_hw_idx;
return cdev;
cnic_err:
dev_put(dev);
return NULL;
}
static struct cnic_dev *init_bnx2x_cnic(struct net_device *dev)
{
struct pci_dev *pdev;
struct cnic_dev *cdev;
struct cnic_local *cp;
struct cnic_eth_dev *ethdev = NULL;
struct cnic_eth_dev *(*probe)(struct net_device *) = NULL;
probe = symbol_get(bnx2x_cnic_probe);
if (probe) {
ethdev = (*probe)(dev);
symbol_put(bnx2x_cnic_probe);
}
if (!ethdev)
return NULL;
pdev = ethdev->pdev;
if (!pdev)
return NULL;
dev_hold(dev);
cdev = cnic_alloc_dev(dev, pdev);
if (cdev == NULL) {
dev_put(dev);
return NULL;
}
set_bit(CNIC_F_BNX2X_CLASS, &cdev->flags);
cdev->submit_kwqes = cnic_submit_bnx2x_kwqes;
cp = cdev->cnic_priv;
cp->ethdev = ethdev;
cdev->pcidev = pdev;
cp->chip_id = ethdev->chip_id;
if (!(ethdev->drv_state & CNIC_DRV_STATE_NO_ISCSI))
cdev->max_iscsi_conn = ethdev->max_iscsi_conn;
if (BNX2X_CHIP_IS_E2(cp->chip_id) &&
!(ethdev->drv_state & CNIC_DRV_STATE_NO_FCOE))
cdev->max_fcoe_conn = ethdev->max_fcoe_conn;
memcpy(cdev->mac_addr, ethdev->iscsi_mac, 6);
cp->cnic_ops = &cnic_bnx2x_ops;
cp->start_hw = cnic_start_bnx2x_hw;
cp->stop_hw = cnic_stop_bnx2x_hw;
cp->setup_pgtbl = cnic_setup_page_tbl_le;
cp->alloc_resc = cnic_alloc_bnx2x_resc;
cp->free_resc = cnic_free_resc;
cp->start_cm = cnic_cm_init_bnx2x_hw;
cp->stop_cm = cnic_cm_stop_bnx2x_hw;
cp->enable_int = cnic_enable_bnx2x_int;
cp->disable_int_sync = cnic_disable_bnx2x_int_sync;
if (BNX2X_CHIP_IS_E2(cp->chip_id))
cp->ack_int = cnic_ack_bnx2x_e2_msix;
else
cp->ack_int = cnic_ack_bnx2x_msix;
cp->close_conn = cnic_close_bnx2x_conn;
cp->next_idx = cnic_bnx2x_next_idx;
cp->hw_idx = cnic_bnx2x_hw_idx;
return cdev;
}
static struct cnic_dev *is_cnic_dev(struct net_device *dev)
{
struct ethtool_drvinfo drvinfo;
struct cnic_dev *cdev = NULL;
if (dev->ethtool_ops && dev->ethtool_ops->get_drvinfo) {
memset(&drvinfo, 0, sizeof(drvinfo));
dev->ethtool_ops->get_drvinfo(dev, &drvinfo);
if (!strcmp(drvinfo.driver, "bnx2"))
cdev = init_bnx2_cnic(dev);
if (!strcmp(drvinfo.driver, "bnx2x"))
cdev = init_bnx2x_cnic(dev);
if (cdev) {
write_lock(&cnic_dev_lock);
list_add(&cdev->list, &cnic_dev_list);
write_unlock(&cnic_dev_lock);
}
}
return cdev;
}
/**
* netdev event handler
*/
static int cnic_netdev_event(struct notifier_block *this, unsigned long event,
void *ptr)
{
struct net_device *netdev = ptr;
struct cnic_dev *dev;
int if_type;
int new_dev = 0;
dev = cnic_from_netdev(netdev);
if (!dev && (event == NETDEV_REGISTER || event == NETDEV_UP)) {
/* Check for the hot-plug device */
dev = is_cnic_dev(netdev);
if (dev) {
new_dev = 1;
cnic_hold(dev);
}
}
if (dev) {
struct cnic_local *cp = dev->cnic_priv;
if (new_dev)
cnic_ulp_init(dev);
else if (event == NETDEV_UNREGISTER)
cnic_ulp_exit(dev);
if (event == NETDEV_UP) {
if (cnic_register_netdev(dev) != 0) {
cnic_put(dev);
goto done;
}
if (!cnic_start_hw(dev))
cnic_ulp_start(dev);
}
rcu_read_lock();
for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++) {
struct cnic_ulp_ops *ulp_ops;
void *ctx;
ulp_ops = rcu_dereference(cp->ulp_ops[if_type]);
if (!ulp_ops || !ulp_ops->indicate_netevent)
continue;
ctx = cp->ulp_handle[if_type];
ulp_ops->indicate_netevent(ctx, event);
}
rcu_read_unlock();
if (event == NETDEV_GOING_DOWN) {
cnic_ulp_stop(dev);
cnic_stop_hw(dev);
cnic_unregister_netdev(dev);
} else if (event == NETDEV_UNREGISTER) {
write_lock(&cnic_dev_lock);
list_del_init(&dev->list);
write_unlock(&cnic_dev_lock);
cnic_put(dev);
cnic_free_dev(dev);
goto done;
}
cnic_put(dev);
}
done:
return NOTIFY_DONE;
}
static struct notifier_block cnic_netdev_notifier = {
.notifier_call = cnic_netdev_event
};
static void cnic_release(void)
{
struct cnic_dev *dev;
struct cnic_uio_dev *udev;
while (!list_empty(&cnic_dev_list)) {
dev = list_entry(cnic_dev_list.next, struct cnic_dev, list);
if (test_bit(CNIC_F_CNIC_UP, &dev->flags)) {
cnic_ulp_stop(dev);
cnic_stop_hw(dev);
}
cnic_ulp_exit(dev);
cnic_unregister_netdev(dev);
list_del_init(&dev->list);
cnic_free_dev(dev);
}
while (!list_empty(&cnic_udev_list)) {
udev = list_entry(cnic_udev_list.next, struct cnic_uio_dev,
list);
cnic_free_uio(udev);
}
}
static int __init cnic_init(void)
{
int rc = 0;
pr_info("%s", version);
rc = register_netdevice_notifier(&cnic_netdev_notifier);
if (rc) {
cnic_release();
return rc;
}
cnic_wq = create_singlethread_workqueue("cnic_wq");
if (!cnic_wq) {
cnic_release();
unregister_netdevice_notifier(&cnic_netdev_notifier);
return -ENOMEM;
}
return 0;
}
static void __exit cnic_exit(void)
{
unregister_netdevice_notifier(&cnic_netdev_notifier);
cnic_release();
destroy_workqueue(cnic_wq);
}
module_init(cnic_init);
module_exit(cnic_exit);
| gpl-2.0 |
subingangadharan/rpmsg | drivers/net/cnic.c | 152 | 142020 | /* cnic.c: Broadcom CNIC core network driver.
*
* Copyright (c) 2006-2010 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation.
*
* Original skeleton written by: John(Zongxi) Chen (zongxi@broadcom.com)
* Modified and maintained by: Michael Chan <mchan@broadcom.com>
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/list.h>
#include <linux/slab.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/netdevice.h>
#include <linux/uio_driver.h>
#include <linux/in.h>
#include <linux/dma-mapping.h>
#include <linux/delay.h>
#include <linux/ethtool.h>
#include <linux/if_vlan.h>
#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
#define BCM_VLAN 1
#endif
#include <net/ip.h>
#include <net/tcp.h>
#include <net/route.h>
#include <net/ipv6.h>
#include <net/ip6_route.h>
#include <net/ip6_checksum.h>
#include <scsi/iscsi_if.h>
#include "cnic_if.h"
#include "bnx2.h"
#include "bnx2x/bnx2x_reg.h"
#include "bnx2x/bnx2x_fw_defs.h"
#include "bnx2x/bnx2x_hsi.h"
#include "../scsi/bnx2i/57xx_iscsi_constants.h"
#include "../scsi/bnx2i/57xx_iscsi_hsi.h"
#include "cnic.h"
#include "cnic_defs.h"
#define DRV_MODULE_NAME "cnic"
static char version[] __devinitdata =
"Broadcom NetXtreme II CNIC Driver " DRV_MODULE_NAME " v" CNIC_MODULE_VERSION " (" CNIC_MODULE_RELDATE ")\n";
MODULE_AUTHOR("Michael Chan <mchan@broadcom.com> and John(Zongxi) "
"Chen (zongxi@broadcom.com");
MODULE_DESCRIPTION("Broadcom NetXtreme II CNIC Driver");
MODULE_LICENSE("GPL");
MODULE_VERSION(CNIC_MODULE_VERSION);
/* cnic_dev_list modifications are protected by both rtnl and cnic_dev_lock */
static LIST_HEAD(cnic_dev_list);
static LIST_HEAD(cnic_udev_list);
static DEFINE_RWLOCK(cnic_dev_lock);
static DEFINE_MUTEX(cnic_lock);
static struct cnic_ulp_ops __rcu *cnic_ulp_tbl[MAX_CNIC_ULP_TYPE];
/* helper function, assuming cnic_lock is held */
static inline struct cnic_ulp_ops *cnic_ulp_tbl_prot(int type)
{
return rcu_dereference_protected(cnic_ulp_tbl[type],
lockdep_is_held(&cnic_lock));
}
static int cnic_service_bnx2(void *, void *);
static int cnic_service_bnx2x(void *, void *);
static int cnic_ctl(void *, struct cnic_ctl_info *);
static struct cnic_ops cnic_bnx2_ops = {
.cnic_owner = THIS_MODULE,
.cnic_handler = cnic_service_bnx2,
.cnic_ctl = cnic_ctl,
};
static struct cnic_ops cnic_bnx2x_ops = {
.cnic_owner = THIS_MODULE,
.cnic_handler = cnic_service_bnx2x,
.cnic_ctl = cnic_ctl,
};
static struct workqueue_struct *cnic_wq;
static void cnic_shutdown_rings(struct cnic_dev *);
static void cnic_init_rings(struct cnic_dev *);
static int cnic_cm_set_pg(struct cnic_sock *);
static int cnic_uio_open(struct uio_info *uinfo, struct inode *inode)
{
struct cnic_uio_dev *udev = uinfo->priv;
struct cnic_dev *dev;
if (!capable(CAP_NET_ADMIN))
return -EPERM;
if (udev->uio_dev != -1)
return -EBUSY;
rtnl_lock();
dev = udev->dev;
if (!dev || !test_bit(CNIC_F_CNIC_UP, &dev->flags)) {
rtnl_unlock();
return -ENODEV;
}
udev->uio_dev = iminor(inode);
cnic_shutdown_rings(dev);
cnic_init_rings(dev);
rtnl_unlock();
return 0;
}
static int cnic_uio_close(struct uio_info *uinfo, struct inode *inode)
{
struct cnic_uio_dev *udev = uinfo->priv;
udev->uio_dev = -1;
return 0;
}
static inline void cnic_hold(struct cnic_dev *dev)
{
atomic_inc(&dev->ref_count);
}
static inline void cnic_put(struct cnic_dev *dev)
{
atomic_dec(&dev->ref_count);
}
static inline void csk_hold(struct cnic_sock *csk)
{
atomic_inc(&csk->ref_count);
}
static inline void csk_put(struct cnic_sock *csk)
{
atomic_dec(&csk->ref_count);
}
static struct cnic_dev *cnic_from_netdev(struct net_device *netdev)
{
struct cnic_dev *cdev;
read_lock(&cnic_dev_lock);
list_for_each_entry(cdev, &cnic_dev_list, list) {
if (netdev == cdev->netdev) {
cnic_hold(cdev);
read_unlock(&cnic_dev_lock);
return cdev;
}
}
read_unlock(&cnic_dev_lock);
return NULL;
}
static inline void ulp_get(struct cnic_ulp_ops *ulp_ops)
{
atomic_inc(&ulp_ops->ref_count);
}
static inline void ulp_put(struct cnic_ulp_ops *ulp_ops)
{
atomic_dec(&ulp_ops->ref_count);
}
static void cnic_ctx_wr(struct cnic_dev *dev, u32 cid_addr, u32 off, u32 val)
{
struct cnic_local *cp = dev->cnic_priv;
struct cnic_eth_dev *ethdev = cp->ethdev;
struct drv_ctl_info info;
struct drv_ctl_io *io = &info.data.io;
info.cmd = DRV_CTL_CTX_WR_CMD;
io->cid_addr = cid_addr;
io->offset = off;
io->data = val;
ethdev->drv_ctl(dev->netdev, &info);
}
static void cnic_ctx_tbl_wr(struct cnic_dev *dev, u32 off, dma_addr_t addr)
{
struct cnic_local *cp = dev->cnic_priv;
struct cnic_eth_dev *ethdev = cp->ethdev;
struct drv_ctl_info info;
struct drv_ctl_io *io = &info.data.io;
info.cmd = DRV_CTL_CTXTBL_WR_CMD;
io->offset = off;
io->dma_addr = addr;
ethdev->drv_ctl(dev->netdev, &info);
}
static void cnic_ring_ctl(struct cnic_dev *dev, u32 cid, u32 cl_id, int start)
{
struct cnic_local *cp = dev->cnic_priv;
struct cnic_eth_dev *ethdev = cp->ethdev;
struct drv_ctl_info info;
struct drv_ctl_l2_ring *ring = &info.data.ring;
if (start)
info.cmd = DRV_CTL_START_L2_CMD;
else
info.cmd = DRV_CTL_STOP_L2_CMD;
ring->cid = cid;
ring->client_id = cl_id;
ethdev->drv_ctl(dev->netdev, &info);
}
static void cnic_reg_wr_ind(struct cnic_dev *dev, u32 off, u32 val)
{
struct cnic_local *cp = dev->cnic_priv;
struct cnic_eth_dev *ethdev = cp->ethdev;
struct drv_ctl_info info;
struct drv_ctl_io *io = &info.data.io;
info.cmd = DRV_CTL_IO_WR_CMD;
io->offset = off;
io->data = val;
ethdev->drv_ctl(dev->netdev, &info);
}
static u32 cnic_reg_rd_ind(struct cnic_dev *dev, u32 off)
{
struct cnic_local *cp = dev->cnic_priv;
struct cnic_eth_dev *ethdev = cp->ethdev;
struct drv_ctl_info info;
struct drv_ctl_io *io = &info.data.io;
info.cmd = DRV_CTL_IO_RD_CMD;
io->offset = off;
ethdev->drv_ctl(dev->netdev, &info);
return io->data;
}
static int cnic_in_use(struct cnic_sock *csk)
{
return test_bit(SK_F_INUSE, &csk->flags);
}
static void cnic_spq_completion(struct cnic_dev *dev, int cmd, u32 count)
{
struct cnic_local *cp = dev->cnic_priv;
struct cnic_eth_dev *ethdev = cp->ethdev;
struct drv_ctl_info info;
info.cmd = cmd;
info.data.credit.credit_count = count;
ethdev->drv_ctl(dev->netdev, &info);
}
static int cnic_get_l5_cid(struct cnic_local *cp, u32 cid, u32 *l5_cid)
{
u32 i;
for (i = 0; i < cp->max_cid_space; i++) {
if (cp->ctx_tbl[i].cid == cid) {
*l5_cid = i;
return 0;
}
}
return -EINVAL;
}
static int cnic_send_nlmsg(struct cnic_local *cp, u32 type,
struct cnic_sock *csk)
{
struct iscsi_path path_req;
char *buf = NULL;
u16 len = 0;
u32 msg_type = ISCSI_KEVENT_IF_DOWN;
struct cnic_ulp_ops *ulp_ops;
struct cnic_uio_dev *udev = cp->udev;
int rc = 0, retry = 0;
if (!udev || udev->uio_dev == -1)
return -ENODEV;
if (csk) {
len = sizeof(path_req);
buf = (char *) &path_req;
memset(&path_req, 0, len);
msg_type = ISCSI_KEVENT_PATH_REQ;
path_req.handle = (u64) csk->l5_cid;
if (test_bit(SK_F_IPV6, &csk->flags)) {
memcpy(&path_req.dst.v6_addr, &csk->dst_ip[0],
sizeof(struct in6_addr));
path_req.ip_addr_len = 16;
} else {
memcpy(&path_req.dst.v4_addr, &csk->dst_ip[0],
sizeof(struct in_addr));
path_req.ip_addr_len = 4;
}
path_req.vlan_id = csk->vlan_id;
path_req.pmtu = csk->mtu;
}
while (retry < 3) {
rc = 0;
rcu_read_lock();
ulp_ops = rcu_dereference(cnic_ulp_tbl[CNIC_ULP_ISCSI]);
if (ulp_ops)
rc = ulp_ops->iscsi_nl_send_msg(
cp->ulp_handle[CNIC_ULP_ISCSI],
msg_type, buf, len);
rcu_read_unlock();
if (rc == 0 || msg_type != ISCSI_KEVENT_PATH_REQ)
break;
msleep(100);
retry++;
}
return 0;
}
static void cnic_cm_upcall(struct cnic_local *, struct cnic_sock *, u8);
static int cnic_iscsi_nl_msg_recv(struct cnic_dev *dev, u32 msg_type,
char *buf, u16 len)
{
int rc = -EINVAL;
switch (msg_type) {
case ISCSI_UEVENT_PATH_UPDATE: {
struct cnic_local *cp;
u32 l5_cid;
struct cnic_sock *csk;
struct iscsi_path *path_resp;
if (len < sizeof(*path_resp))
break;
path_resp = (struct iscsi_path *) buf;
cp = dev->cnic_priv;
l5_cid = (u32) path_resp->handle;
if (l5_cid >= MAX_CM_SK_TBL_SZ)
break;
rcu_read_lock();
if (!rcu_dereference(cp->ulp_ops[CNIC_ULP_L4])) {
rc = -ENODEV;
rcu_read_unlock();
break;
}
csk = &cp->csk_tbl[l5_cid];
csk_hold(csk);
if (cnic_in_use(csk) &&
test_bit(SK_F_CONNECT_START, &csk->flags)) {
memcpy(csk->ha, path_resp->mac_addr, 6);
if (test_bit(SK_F_IPV6, &csk->flags))
memcpy(&csk->src_ip[0], &path_resp->src.v6_addr,
sizeof(struct in6_addr));
else
memcpy(&csk->src_ip[0], &path_resp->src.v4_addr,
sizeof(struct in_addr));
if (is_valid_ether_addr(csk->ha)) {
cnic_cm_set_pg(csk);
} else if (!test_bit(SK_F_OFFLD_SCHED, &csk->flags) &&
!test_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) {
cnic_cm_upcall(cp, csk,
L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE);
clear_bit(SK_F_CONNECT_START, &csk->flags);
}
}
csk_put(csk);
rcu_read_unlock();
rc = 0;
}
}
return rc;
}
static int cnic_offld_prep(struct cnic_sock *csk)
{
if (test_and_set_bit(SK_F_OFFLD_SCHED, &csk->flags))
return 0;
if (!test_bit(SK_F_CONNECT_START, &csk->flags)) {
clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
return 0;
}
return 1;
}
static int cnic_close_prep(struct cnic_sock *csk)
{
clear_bit(SK_F_CONNECT_START, &csk->flags);
smp_mb__after_clear_bit();
if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) {
while (test_and_set_bit(SK_F_OFFLD_SCHED, &csk->flags))
msleep(1);
return 1;
}
return 0;
}
static int cnic_abort_prep(struct cnic_sock *csk)
{
clear_bit(SK_F_CONNECT_START, &csk->flags);
smp_mb__after_clear_bit();
while (test_and_set_bit(SK_F_OFFLD_SCHED, &csk->flags))
msleep(1);
if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) {
csk->state = L4_KCQE_OPCODE_VALUE_RESET_COMP;
return 1;
}
return 0;
}
int cnic_register_driver(int ulp_type, struct cnic_ulp_ops *ulp_ops)
{
struct cnic_dev *dev;
if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) {
pr_err("%s: Bad type %d\n", __func__, ulp_type);
return -EINVAL;
}
mutex_lock(&cnic_lock);
if (cnic_ulp_tbl_prot(ulp_type)) {
pr_err("%s: Type %d has already been registered\n",
__func__, ulp_type);
mutex_unlock(&cnic_lock);
return -EBUSY;
}
read_lock(&cnic_dev_lock);
list_for_each_entry(dev, &cnic_dev_list, list) {
struct cnic_local *cp = dev->cnic_priv;
clear_bit(ULP_F_INIT, &cp->ulp_flags[ulp_type]);
}
read_unlock(&cnic_dev_lock);
atomic_set(&ulp_ops->ref_count, 0);
rcu_assign_pointer(cnic_ulp_tbl[ulp_type], ulp_ops);
mutex_unlock(&cnic_lock);
/* Prevent race conditions with netdev_event */
rtnl_lock();
list_for_each_entry(dev, &cnic_dev_list, list) {
struct cnic_local *cp = dev->cnic_priv;
if (!test_and_set_bit(ULP_F_INIT, &cp->ulp_flags[ulp_type]))
ulp_ops->cnic_init(dev);
}
rtnl_unlock();
return 0;
}
int cnic_unregister_driver(int ulp_type)
{
struct cnic_dev *dev;
struct cnic_ulp_ops *ulp_ops;
int i = 0;
if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) {
pr_err("%s: Bad type %d\n", __func__, ulp_type);
return -EINVAL;
}
mutex_lock(&cnic_lock);
ulp_ops = cnic_ulp_tbl_prot(ulp_type);
if (!ulp_ops) {
pr_err("%s: Type %d has not been registered\n",
__func__, ulp_type);
goto out_unlock;
}
read_lock(&cnic_dev_lock);
list_for_each_entry(dev, &cnic_dev_list, list) {
struct cnic_local *cp = dev->cnic_priv;
if (rcu_dereference(cp->ulp_ops[ulp_type])) {
pr_err("%s: Type %d still has devices registered\n",
__func__, ulp_type);
read_unlock(&cnic_dev_lock);
goto out_unlock;
}
}
read_unlock(&cnic_dev_lock);
rcu_assign_pointer(cnic_ulp_tbl[ulp_type], NULL);
mutex_unlock(&cnic_lock);
synchronize_rcu();
while ((atomic_read(&ulp_ops->ref_count) != 0) && (i < 20)) {
msleep(100);
i++;
}
if (atomic_read(&ulp_ops->ref_count) != 0)
netdev_warn(dev->netdev, "Failed waiting for ref count to go to zero\n");
return 0;
out_unlock:
mutex_unlock(&cnic_lock);
return -EINVAL;
}
static int cnic_start_hw(struct cnic_dev *);
static void cnic_stop_hw(struct cnic_dev *);
static int cnic_register_device(struct cnic_dev *dev, int ulp_type,
void *ulp_ctx)
{
struct cnic_local *cp = dev->cnic_priv;
struct cnic_ulp_ops *ulp_ops;
if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) {
pr_err("%s: Bad type %d\n", __func__, ulp_type);
return -EINVAL;
}
mutex_lock(&cnic_lock);
if (cnic_ulp_tbl_prot(ulp_type) == NULL) {
pr_err("%s: Driver with type %d has not been registered\n",
__func__, ulp_type);
mutex_unlock(&cnic_lock);
return -EAGAIN;
}
if (rcu_dereference(cp->ulp_ops[ulp_type])) {
pr_err("%s: Type %d has already been registered to this device\n",
__func__, ulp_type);
mutex_unlock(&cnic_lock);
return -EBUSY;
}
clear_bit(ULP_F_START, &cp->ulp_flags[ulp_type]);
cp->ulp_handle[ulp_type] = ulp_ctx;
ulp_ops = cnic_ulp_tbl_prot(ulp_type);
rcu_assign_pointer(cp->ulp_ops[ulp_type], ulp_ops);
cnic_hold(dev);
if (test_bit(CNIC_F_CNIC_UP, &dev->flags))
if (!test_and_set_bit(ULP_F_START, &cp->ulp_flags[ulp_type]))
ulp_ops->cnic_start(cp->ulp_handle[ulp_type]);
mutex_unlock(&cnic_lock);
return 0;
}
EXPORT_SYMBOL(cnic_register_driver);
static int cnic_unregister_device(struct cnic_dev *dev, int ulp_type)
{
struct cnic_local *cp = dev->cnic_priv;
int i = 0;
if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) {
pr_err("%s: Bad type %d\n", __func__, ulp_type);
return -EINVAL;
}
mutex_lock(&cnic_lock);
if (rcu_dereference(cp->ulp_ops[ulp_type])) {
rcu_assign_pointer(cp->ulp_ops[ulp_type], NULL);
cnic_put(dev);
} else {
pr_err("%s: device not registered to this ulp type %d\n",
__func__, ulp_type);
mutex_unlock(&cnic_lock);
return -EINVAL;
}
mutex_unlock(&cnic_lock);
if (ulp_type == CNIC_ULP_ISCSI)
cnic_send_nlmsg(cp, ISCSI_KEVENT_IF_DOWN, NULL);
synchronize_rcu();
while (test_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[ulp_type]) &&
i < 20) {
msleep(100);
i++;
}
if (test_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[ulp_type]))
netdev_warn(dev->netdev, "Failed waiting for ULP up call to complete\n");
return 0;
}
EXPORT_SYMBOL(cnic_unregister_driver);
static int cnic_init_id_tbl(struct cnic_id_tbl *id_tbl, u32 size, u32 start_id)
{
id_tbl->start = start_id;
id_tbl->max = size;
id_tbl->next = 0;
spin_lock_init(&id_tbl->lock);
id_tbl->table = kzalloc(DIV_ROUND_UP(size, 32) * 4, GFP_KERNEL);
if (!id_tbl->table)
return -ENOMEM;
return 0;
}
static void cnic_free_id_tbl(struct cnic_id_tbl *id_tbl)
{
kfree(id_tbl->table);
id_tbl->table = NULL;
}
static int cnic_alloc_id(struct cnic_id_tbl *id_tbl, u32 id)
{
int ret = -1;
id -= id_tbl->start;
if (id >= id_tbl->max)
return ret;
spin_lock(&id_tbl->lock);
if (!test_bit(id, id_tbl->table)) {
set_bit(id, id_tbl->table);
ret = 0;
}
spin_unlock(&id_tbl->lock);
return ret;
}
/* Returns -1 if not successful */
static u32 cnic_alloc_new_id(struct cnic_id_tbl *id_tbl)
{
u32 id;
spin_lock(&id_tbl->lock);
id = find_next_zero_bit(id_tbl->table, id_tbl->max, id_tbl->next);
if (id >= id_tbl->max) {
id = -1;
if (id_tbl->next != 0) {
id = find_first_zero_bit(id_tbl->table, id_tbl->next);
if (id >= id_tbl->next)
id = -1;
}
}
if (id < id_tbl->max) {
set_bit(id, id_tbl->table);
id_tbl->next = (id + 1) & (id_tbl->max - 1);
id += id_tbl->start;
}
spin_unlock(&id_tbl->lock);
return id;
}
static void cnic_free_id(struct cnic_id_tbl *id_tbl, u32 id)
{
if (id == -1)
return;
id -= id_tbl->start;
if (id >= id_tbl->max)
return;
clear_bit(id, id_tbl->table);
}
static void cnic_free_dma(struct cnic_dev *dev, struct cnic_dma *dma)
{
int i;
if (!dma->pg_arr)
return;
for (i = 0; i < dma->num_pages; i++) {
if (dma->pg_arr[i]) {
dma_free_coherent(&dev->pcidev->dev, BCM_PAGE_SIZE,
dma->pg_arr[i], dma->pg_map_arr[i]);
dma->pg_arr[i] = NULL;
}
}
if (dma->pgtbl) {
dma_free_coherent(&dev->pcidev->dev, dma->pgtbl_size,
dma->pgtbl, dma->pgtbl_map);
dma->pgtbl = NULL;
}
kfree(dma->pg_arr);
dma->pg_arr = NULL;
dma->num_pages = 0;
}
static void cnic_setup_page_tbl(struct cnic_dev *dev, struct cnic_dma *dma)
{
int i;
__le32 *page_table = (__le32 *) dma->pgtbl;
for (i = 0; i < dma->num_pages; i++) {
/* Each entry needs to be in big endian format. */
*page_table = cpu_to_le32((u64) dma->pg_map_arr[i] >> 32);
page_table++;
*page_table = cpu_to_le32(dma->pg_map_arr[i] & 0xffffffff);
page_table++;
}
}
static void cnic_setup_page_tbl_le(struct cnic_dev *dev, struct cnic_dma *dma)
{
int i;
__le32 *page_table = (__le32 *) dma->pgtbl;
for (i = 0; i < dma->num_pages; i++) {
/* Each entry needs to be in little endian format. */
*page_table = cpu_to_le32(dma->pg_map_arr[i] & 0xffffffff);
page_table++;
*page_table = cpu_to_le32((u64) dma->pg_map_arr[i] >> 32);
page_table++;
}
}
static int cnic_alloc_dma(struct cnic_dev *dev, struct cnic_dma *dma,
int pages, int use_pg_tbl)
{
int i, size;
struct cnic_local *cp = dev->cnic_priv;
size = pages * (sizeof(void *) + sizeof(dma_addr_t));
dma->pg_arr = kzalloc(size, GFP_ATOMIC);
if (dma->pg_arr == NULL)
return -ENOMEM;
dma->pg_map_arr = (dma_addr_t *) (dma->pg_arr + pages);
dma->num_pages = pages;
for (i = 0; i < pages; i++) {
dma->pg_arr[i] = dma_alloc_coherent(&dev->pcidev->dev,
BCM_PAGE_SIZE,
&dma->pg_map_arr[i],
GFP_ATOMIC);
if (dma->pg_arr[i] == NULL)
goto error;
}
if (!use_pg_tbl)
return 0;
dma->pgtbl_size = ((pages * 8) + BCM_PAGE_SIZE - 1) &
~(BCM_PAGE_SIZE - 1);
dma->pgtbl = dma_alloc_coherent(&dev->pcidev->dev, dma->pgtbl_size,
&dma->pgtbl_map, GFP_ATOMIC);
if (dma->pgtbl == NULL)
goto error;
cp->setup_pgtbl(dev, dma);
return 0;
error:
cnic_free_dma(dev, dma);
return -ENOMEM;
}
static void cnic_free_context(struct cnic_dev *dev)
{
struct cnic_local *cp = dev->cnic_priv;
int i;
for (i = 0; i < cp->ctx_blks; i++) {
if (cp->ctx_arr[i].ctx) {
dma_free_coherent(&dev->pcidev->dev, cp->ctx_blk_size,
cp->ctx_arr[i].ctx,
cp->ctx_arr[i].mapping);
cp->ctx_arr[i].ctx = NULL;
}
}
}
static void __cnic_free_uio(struct cnic_uio_dev *udev)
{
uio_unregister_device(&udev->cnic_uinfo);
if (udev->l2_buf) {
dma_free_coherent(&udev->pdev->dev, udev->l2_buf_size,
udev->l2_buf, udev->l2_buf_map);
udev->l2_buf = NULL;
}
if (udev->l2_ring) {
dma_free_coherent(&udev->pdev->dev, udev->l2_ring_size,
udev->l2_ring, udev->l2_ring_map);
udev->l2_ring = NULL;
}
pci_dev_put(udev->pdev);
kfree(udev);
}
static void cnic_free_uio(struct cnic_uio_dev *udev)
{
if (!udev)
return;
write_lock(&cnic_dev_lock);
list_del_init(&udev->list);
write_unlock(&cnic_dev_lock);
__cnic_free_uio(udev);
}
static void cnic_free_resc(struct cnic_dev *dev)
{
struct cnic_local *cp = dev->cnic_priv;
struct cnic_uio_dev *udev = cp->udev;
if (udev) {
udev->dev = NULL;
cp->udev = NULL;
}
cnic_free_context(dev);
kfree(cp->ctx_arr);
cp->ctx_arr = NULL;
cp->ctx_blks = 0;
cnic_free_dma(dev, &cp->gbl_buf_info);
cnic_free_dma(dev, &cp->conn_buf_info);
cnic_free_dma(dev, &cp->kwq_info);
cnic_free_dma(dev, &cp->kwq_16_data_info);
cnic_free_dma(dev, &cp->kcq2.dma);
cnic_free_dma(dev, &cp->kcq1.dma);
kfree(cp->iscsi_tbl);
cp->iscsi_tbl = NULL;
kfree(cp->ctx_tbl);
cp->ctx_tbl = NULL;
cnic_free_id_tbl(&cp->fcoe_cid_tbl);
cnic_free_id_tbl(&cp->cid_tbl);
}
static int cnic_alloc_context(struct cnic_dev *dev)
{
struct cnic_local *cp = dev->cnic_priv;
if (CHIP_NUM(cp) == CHIP_NUM_5709) {
int i, k, arr_size;
cp->ctx_blk_size = BCM_PAGE_SIZE;
cp->cids_per_blk = BCM_PAGE_SIZE / 128;
arr_size = BNX2_MAX_CID / cp->cids_per_blk *
sizeof(struct cnic_ctx);
cp->ctx_arr = kzalloc(arr_size, GFP_KERNEL);
if (cp->ctx_arr == NULL)
return -ENOMEM;
k = 0;
for (i = 0; i < 2; i++) {
u32 j, reg, off, lo, hi;
if (i == 0)
off = BNX2_PG_CTX_MAP;
else
off = BNX2_ISCSI_CTX_MAP;
reg = cnic_reg_rd_ind(dev, off);
lo = reg >> 16;
hi = reg & 0xffff;
for (j = lo; j < hi; j += cp->cids_per_blk, k++)
cp->ctx_arr[k].cid = j;
}
cp->ctx_blks = k;
if (cp->ctx_blks >= (BNX2_MAX_CID / cp->cids_per_blk)) {
cp->ctx_blks = 0;
return -ENOMEM;
}
for (i = 0; i < cp->ctx_blks; i++) {
cp->ctx_arr[i].ctx =
dma_alloc_coherent(&dev->pcidev->dev,
BCM_PAGE_SIZE,
&cp->ctx_arr[i].mapping,
GFP_KERNEL);
if (cp->ctx_arr[i].ctx == NULL)
return -ENOMEM;
}
}
return 0;
}
static int cnic_alloc_kcq(struct cnic_dev *dev, struct kcq_info *info)
{
int err, i, is_bnx2 = 0;
struct kcqe **kcq;
if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags))
is_bnx2 = 1;
err = cnic_alloc_dma(dev, &info->dma, KCQ_PAGE_CNT, is_bnx2);
if (err)
return err;
kcq = (struct kcqe **) info->dma.pg_arr;
info->kcq = kcq;
if (is_bnx2)
return 0;
for (i = 0; i < KCQ_PAGE_CNT; i++) {
struct bnx2x_bd_chain_next *next =
(struct bnx2x_bd_chain_next *) &kcq[i][MAX_KCQE_CNT];
int j = i + 1;
if (j >= KCQ_PAGE_CNT)
j = 0;
next->addr_hi = (u64) info->dma.pg_map_arr[j] >> 32;
next->addr_lo = info->dma.pg_map_arr[j] & 0xffffffff;
}
return 0;
}
static int cnic_alloc_uio_rings(struct cnic_dev *dev, int pages)
{
struct cnic_local *cp = dev->cnic_priv;
struct cnic_uio_dev *udev;
read_lock(&cnic_dev_lock);
list_for_each_entry(udev, &cnic_udev_list, list) {
if (udev->pdev == dev->pcidev) {
udev->dev = dev;
cp->udev = udev;
read_unlock(&cnic_dev_lock);
return 0;
}
}
read_unlock(&cnic_dev_lock);
udev = kzalloc(sizeof(struct cnic_uio_dev), GFP_ATOMIC);
if (!udev)
return -ENOMEM;
udev->uio_dev = -1;
udev->dev = dev;
udev->pdev = dev->pcidev;
udev->l2_ring_size = pages * BCM_PAGE_SIZE;
udev->l2_ring = dma_alloc_coherent(&udev->pdev->dev, udev->l2_ring_size,
&udev->l2_ring_map,
GFP_KERNEL | __GFP_COMP);
if (!udev->l2_ring)
goto err_udev;
udev->l2_buf_size = (cp->l2_rx_ring_size + 1) * cp->l2_single_buf_size;
udev->l2_buf_size = PAGE_ALIGN(udev->l2_buf_size);
udev->l2_buf = dma_alloc_coherent(&udev->pdev->dev, udev->l2_buf_size,
&udev->l2_buf_map,
GFP_KERNEL | __GFP_COMP);
if (!udev->l2_buf)
goto err_dma;
write_lock(&cnic_dev_lock);
list_add(&udev->list, &cnic_udev_list);
write_unlock(&cnic_dev_lock);
pci_dev_get(udev->pdev);
cp->udev = udev;
return 0;
err_dma:
dma_free_coherent(&udev->pdev->dev, udev->l2_ring_size,
udev->l2_ring, udev->l2_ring_map);
err_udev:
kfree(udev);
return -ENOMEM;
}
static int cnic_init_uio(struct cnic_dev *dev)
{
struct cnic_local *cp = dev->cnic_priv;
struct cnic_uio_dev *udev = cp->udev;
struct uio_info *uinfo;
int ret = 0;
if (!udev)
return -ENOMEM;
uinfo = &udev->cnic_uinfo;
uinfo->mem[0].addr = dev->netdev->base_addr;
uinfo->mem[0].internal_addr = dev->regview;
uinfo->mem[0].size = dev->netdev->mem_end - dev->netdev->mem_start;
uinfo->mem[0].memtype = UIO_MEM_PHYS;
if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) {
uinfo->mem[1].addr = (unsigned long) cp->status_blk.gen &
PAGE_MASK;
if (cp->ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX)
uinfo->mem[1].size = BNX2_SBLK_MSIX_ALIGN_SIZE * 9;
else
uinfo->mem[1].size = BNX2_SBLK_MSIX_ALIGN_SIZE;
uinfo->name = "bnx2_cnic";
} else if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) {
uinfo->mem[1].addr = (unsigned long) cp->bnx2x_def_status_blk &
PAGE_MASK;
uinfo->mem[1].size = sizeof(*cp->bnx2x_def_status_blk);
uinfo->name = "bnx2x_cnic";
}
uinfo->mem[1].memtype = UIO_MEM_LOGICAL;
uinfo->mem[2].addr = (unsigned long) udev->l2_ring;
uinfo->mem[2].size = udev->l2_ring_size;
uinfo->mem[2].memtype = UIO_MEM_LOGICAL;
uinfo->mem[3].addr = (unsigned long) udev->l2_buf;
uinfo->mem[3].size = udev->l2_buf_size;
uinfo->mem[3].memtype = UIO_MEM_LOGICAL;
uinfo->version = CNIC_MODULE_VERSION;
uinfo->irq = UIO_IRQ_CUSTOM;
uinfo->open = cnic_uio_open;
uinfo->release = cnic_uio_close;
if (udev->uio_dev == -1) {
if (!uinfo->priv) {
uinfo->priv = udev;
ret = uio_register_device(&udev->pdev->dev, uinfo);
}
} else {
cnic_init_rings(dev);
}
return ret;
}
static int cnic_alloc_bnx2_resc(struct cnic_dev *dev)
{
struct cnic_local *cp = dev->cnic_priv;
int ret;
ret = cnic_alloc_dma(dev, &cp->kwq_info, KWQ_PAGE_CNT, 1);
if (ret)
goto error;
cp->kwq = (struct kwqe **) cp->kwq_info.pg_arr;
ret = cnic_alloc_kcq(dev, &cp->kcq1);
if (ret)
goto error;
ret = cnic_alloc_context(dev);
if (ret)
goto error;
ret = cnic_alloc_uio_rings(dev, 2);
if (ret)
goto error;
ret = cnic_init_uio(dev);
if (ret)
goto error;
return 0;
error:
cnic_free_resc(dev);
return ret;
}
static int cnic_alloc_bnx2x_context(struct cnic_dev *dev)
{
struct cnic_local *cp = dev->cnic_priv;
int ctx_blk_size = cp->ethdev->ctx_blk_size;
int total_mem, blks, i;
total_mem = BNX2X_CONTEXT_MEM_SIZE * cp->max_cid_space;
blks = total_mem / ctx_blk_size;
if (total_mem % ctx_blk_size)
blks++;
if (blks > cp->ethdev->ctx_tbl_len)
return -ENOMEM;
cp->ctx_arr = kcalloc(blks, sizeof(struct cnic_ctx), GFP_KERNEL);
if (cp->ctx_arr == NULL)
return -ENOMEM;
cp->ctx_blks = blks;
cp->ctx_blk_size = ctx_blk_size;
if (!BNX2X_CHIP_IS_57710(cp->chip_id))
cp->ctx_align = 0;
else
cp->ctx_align = ctx_blk_size;
cp->cids_per_blk = ctx_blk_size / BNX2X_CONTEXT_MEM_SIZE;
for (i = 0; i < blks; i++) {
cp->ctx_arr[i].ctx =
dma_alloc_coherent(&dev->pcidev->dev, cp->ctx_blk_size,
&cp->ctx_arr[i].mapping,
GFP_KERNEL);
if (cp->ctx_arr[i].ctx == NULL)
return -ENOMEM;
if (cp->ctx_align && cp->ctx_blk_size == ctx_blk_size) {
if (cp->ctx_arr[i].mapping & (cp->ctx_align - 1)) {
cnic_free_context(dev);
cp->ctx_blk_size += cp->ctx_align;
i = -1;
continue;
}
}
}
return 0;
}
static int cnic_alloc_bnx2x_resc(struct cnic_dev *dev)
{
struct cnic_local *cp = dev->cnic_priv;
struct cnic_eth_dev *ethdev = cp->ethdev;
u32 start_cid = ethdev->starting_cid;
int i, j, n, ret, pages;
struct cnic_dma *kwq_16_dma = &cp->kwq_16_data_info;
cp->iro_arr = ethdev->iro_arr;
cp->max_cid_space = MAX_ISCSI_TBL_SZ + BNX2X_FCOE_NUM_CONNECTIONS;
cp->iscsi_start_cid = start_cid;
cp->fcoe_start_cid = start_cid + MAX_ISCSI_TBL_SZ;
if (BNX2X_CHIP_IS_E2(cp->chip_id)) {
cp->max_cid_space += BNX2X_FCOE_NUM_CONNECTIONS;
cp->fcoe_init_cid = ethdev->fcoe_init_cid;
if (!cp->fcoe_init_cid)
cp->fcoe_init_cid = 0x10;
}
if (start_cid < BNX2X_ISCSI_START_CID) {
u32 delta = BNX2X_ISCSI_START_CID - start_cid;
cp->iscsi_start_cid = BNX2X_ISCSI_START_CID;
cp->fcoe_start_cid += delta;
cp->max_cid_space += delta;
}
cp->iscsi_tbl = kzalloc(sizeof(struct cnic_iscsi) * MAX_ISCSI_TBL_SZ,
GFP_KERNEL);
if (!cp->iscsi_tbl)
goto error;
cp->ctx_tbl = kzalloc(sizeof(struct cnic_context) *
cp->max_cid_space, GFP_KERNEL);
if (!cp->ctx_tbl)
goto error;
for (i = 0; i < MAX_ISCSI_TBL_SZ; i++) {
cp->ctx_tbl[i].proto.iscsi = &cp->iscsi_tbl[i];
cp->ctx_tbl[i].ulp_proto_id = CNIC_ULP_ISCSI;
}
for (i = MAX_ISCSI_TBL_SZ; i < cp->max_cid_space; i++)
cp->ctx_tbl[i].ulp_proto_id = CNIC_ULP_FCOE;
pages = PAGE_ALIGN(cp->max_cid_space * CNIC_KWQ16_DATA_SIZE) /
PAGE_SIZE;
ret = cnic_alloc_dma(dev, kwq_16_dma, pages, 0);
if (ret)
return -ENOMEM;
n = PAGE_SIZE / CNIC_KWQ16_DATA_SIZE;
for (i = 0, j = 0; i < cp->max_cid_space; i++) {
long off = CNIC_KWQ16_DATA_SIZE * (i % n);
cp->ctx_tbl[i].kwqe_data = kwq_16_dma->pg_arr[j] + off;
cp->ctx_tbl[i].kwqe_data_mapping = kwq_16_dma->pg_map_arr[j] +
off;
if ((i % n) == (n - 1))
j++;
}
ret = cnic_alloc_kcq(dev, &cp->kcq1);
if (ret)
goto error;
if (BNX2X_CHIP_IS_E2(cp->chip_id)) {
ret = cnic_alloc_kcq(dev, &cp->kcq2);
if (ret)
goto error;
}
pages = PAGE_ALIGN(BNX2X_ISCSI_NUM_CONNECTIONS *
BNX2X_ISCSI_CONN_BUF_SIZE) / PAGE_SIZE;
ret = cnic_alloc_dma(dev, &cp->conn_buf_info, pages, 1);
if (ret)
goto error;
pages = PAGE_ALIGN(BNX2X_ISCSI_GLB_BUF_SIZE) / PAGE_SIZE;
ret = cnic_alloc_dma(dev, &cp->gbl_buf_info, pages, 0);
if (ret)
goto error;
ret = cnic_alloc_bnx2x_context(dev);
if (ret)
goto error;
cp->bnx2x_def_status_blk = cp->ethdev->irq_arr[1].status_blk;
cp->l2_rx_ring_size = 15;
ret = cnic_alloc_uio_rings(dev, 4);
if (ret)
goto error;
ret = cnic_init_uio(dev);
if (ret)
goto error;
return 0;
error:
cnic_free_resc(dev);
return -ENOMEM;
}
static inline u32 cnic_kwq_avail(struct cnic_local *cp)
{
return cp->max_kwq_idx -
((cp->kwq_prod_idx - cp->kwq_con_idx) & cp->max_kwq_idx);
}
static int cnic_submit_bnx2_kwqes(struct cnic_dev *dev, struct kwqe *wqes[],
u32 num_wqes)
{
struct cnic_local *cp = dev->cnic_priv;
struct kwqe *prod_qe;
u16 prod, sw_prod, i;
if (!test_bit(CNIC_F_CNIC_UP, &dev->flags))
return -EAGAIN; /* bnx2 is down */
spin_lock_bh(&cp->cnic_ulp_lock);
if (num_wqes > cnic_kwq_avail(cp) &&
!test_bit(CNIC_LCL_FL_KWQ_INIT, &cp->cnic_local_flags)) {
spin_unlock_bh(&cp->cnic_ulp_lock);
return -EAGAIN;
}
clear_bit(CNIC_LCL_FL_KWQ_INIT, &cp->cnic_local_flags);
prod = cp->kwq_prod_idx;
sw_prod = prod & MAX_KWQ_IDX;
for (i = 0; i < num_wqes; i++) {
prod_qe = &cp->kwq[KWQ_PG(sw_prod)][KWQ_IDX(sw_prod)];
memcpy(prod_qe, wqes[i], sizeof(struct kwqe));
prod++;
sw_prod = prod & MAX_KWQ_IDX;
}
cp->kwq_prod_idx = prod;
CNIC_WR16(dev, cp->kwq_io_addr, cp->kwq_prod_idx);
spin_unlock_bh(&cp->cnic_ulp_lock);
return 0;
}
static void *cnic_get_kwqe_16_data(struct cnic_local *cp, u32 l5_cid,
union l5cm_specific_data *l5_data)
{
struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
dma_addr_t map;
map = ctx->kwqe_data_mapping;
l5_data->phy_address.lo = (u64) map & 0xffffffff;
l5_data->phy_address.hi = (u64) map >> 32;
return ctx->kwqe_data;
}
static int cnic_submit_kwqe_16(struct cnic_dev *dev, u32 cmd, u32 cid,
u32 type, union l5cm_specific_data *l5_data)
{
struct cnic_local *cp = dev->cnic_priv;
struct l5cm_spe kwqe;
struct kwqe_16 *kwq[1];
u16 type_16;
int ret;
kwqe.hdr.conn_and_cmd_data =
cpu_to_le32(((cmd << SPE_HDR_CMD_ID_SHIFT) |
BNX2X_HW_CID(cp, cid)));
type_16 = (type << SPE_HDR_CONN_TYPE_SHIFT) & SPE_HDR_CONN_TYPE;
type_16 |= (cp->pfid << SPE_HDR_FUNCTION_ID_SHIFT) &
SPE_HDR_FUNCTION_ID;
kwqe.hdr.type = cpu_to_le16(type_16);
kwqe.hdr.reserved1 = 0;
kwqe.data.phy_address.lo = cpu_to_le32(l5_data->phy_address.lo);
kwqe.data.phy_address.hi = cpu_to_le32(l5_data->phy_address.hi);
kwq[0] = (struct kwqe_16 *) &kwqe;
spin_lock_bh(&cp->cnic_ulp_lock);
ret = cp->ethdev->drv_submit_kwqes_16(dev->netdev, kwq, 1);
spin_unlock_bh(&cp->cnic_ulp_lock);
if (ret == 1)
return 0;
return -EBUSY;
}
static void cnic_reply_bnx2x_kcqes(struct cnic_dev *dev, int ulp_type,
struct kcqe *cqes[], u32 num_cqes)
{
struct cnic_local *cp = dev->cnic_priv;
struct cnic_ulp_ops *ulp_ops;
rcu_read_lock();
ulp_ops = rcu_dereference(cp->ulp_ops[ulp_type]);
if (likely(ulp_ops)) {
ulp_ops->indicate_kcqes(cp->ulp_handle[ulp_type],
cqes, num_cqes);
}
rcu_read_unlock();
}
static int cnic_bnx2x_iscsi_init1(struct cnic_dev *dev, struct kwqe *kwqe)
{
struct cnic_local *cp = dev->cnic_priv;
struct iscsi_kwqe_init1 *req1 = (struct iscsi_kwqe_init1 *) kwqe;
int hq_bds, pages;
u32 pfid = cp->pfid;
cp->num_iscsi_tasks = req1->num_tasks_per_conn;
cp->num_ccells = req1->num_ccells_per_conn;
cp->task_array_size = BNX2X_ISCSI_TASK_CONTEXT_SIZE *
cp->num_iscsi_tasks;
cp->r2tq_size = cp->num_iscsi_tasks * BNX2X_ISCSI_MAX_PENDING_R2TS *
BNX2X_ISCSI_R2TQE_SIZE;
cp->hq_size = cp->num_ccells * BNX2X_ISCSI_HQ_BD_SIZE;
pages = PAGE_ALIGN(cp->hq_size) / PAGE_SIZE;
hq_bds = pages * (PAGE_SIZE / BNX2X_ISCSI_HQ_BD_SIZE);
cp->num_cqs = req1->num_cqs;
if (!dev->max_iscsi_conn)
return 0;
/* init Tstorm RAM */
CNIC_WR16(dev, BAR_TSTRORM_INTMEM + TSTORM_ISCSI_RQ_SIZE_OFFSET(pfid),
req1->rq_num_wqes);
CNIC_WR16(dev, BAR_TSTRORM_INTMEM + TSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid),
PAGE_SIZE);
CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
TSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), PAGE_SHIFT);
CNIC_WR16(dev, BAR_TSTRORM_INTMEM +
TSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid),
req1->num_tasks_per_conn);
/* init Ustorm RAM */
CNIC_WR16(dev, BAR_USTRORM_INTMEM +
USTORM_ISCSI_RQ_BUFFER_SIZE_OFFSET(pfid),
req1->rq_buffer_size);
CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_PAGE_SIZE_OFFSET(pfid),
PAGE_SIZE);
CNIC_WR8(dev, BAR_USTRORM_INTMEM +
USTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), PAGE_SHIFT);
CNIC_WR16(dev, BAR_USTRORM_INTMEM +
USTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid),
req1->num_tasks_per_conn);
CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_RQ_SIZE_OFFSET(pfid),
req1->rq_num_wqes);
CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_CQ_SIZE_OFFSET(pfid),
req1->cq_num_wqes);
CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_R2TQ_SIZE_OFFSET(pfid),
cp->num_iscsi_tasks * BNX2X_ISCSI_MAX_PENDING_R2TS);
/* init Xstorm RAM */
CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid),
PAGE_SIZE);
CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
XSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), PAGE_SHIFT);
CNIC_WR16(dev, BAR_XSTRORM_INTMEM +
XSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid),
req1->num_tasks_per_conn);
CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_HQ_SIZE_OFFSET(pfid),
hq_bds);
CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_SQ_SIZE_OFFSET(pfid),
req1->num_tasks_per_conn);
CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_R2TQ_SIZE_OFFSET(pfid),
cp->num_iscsi_tasks * BNX2X_ISCSI_MAX_PENDING_R2TS);
/* init Cstorm RAM */
CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid),
PAGE_SIZE);
CNIC_WR8(dev, BAR_CSTRORM_INTMEM +
CSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), PAGE_SHIFT);
CNIC_WR16(dev, BAR_CSTRORM_INTMEM +
CSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid),
req1->num_tasks_per_conn);
CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_CQ_SIZE_OFFSET(pfid),
req1->cq_num_wqes);
CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_HQ_SIZE_OFFSET(pfid),
hq_bds);
return 0;
}
static int cnic_bnx2x_iscsi_init2(struct cnic_dev *dev, struct kwqe *kwqe)
{
struct iscsi_kwqe_init2 *req2 = (struct iscsi_kwqe_init2 *) kwqe;
struct cnic_local *cp = dev->cnic_priv;
u32 pfid = cp->pfid;
struct iscsi_kcqe kcqe;
struct kcqe *cqes[1];
memset(&kcqe, 0, sizeof(kcqe));
if (!dev->max_iscsi_conn) {
kcqe.completion_status =
ISCSI_KCQE_COMPLETION_STATUS_ISCSI_NOT_SUPPORTED;
goto done;
}
CNIC_WR(dev, BAR_TSTRORM_INTMEM +
TSTORM_ISCSI_ERROR_BITMAP_OFFSET(pfid), req2->error_bit_map[0]);
CNIC_WR(dev, BAR_TSTRORM_INTMEM +
TSTORM_ISCSI_ERROR_BITMAP_OFFSET(pfid) + 4,
req2->error_bit_map[1]);
CNIC_WR16(dev, BAR_USTRORM_INTMEM +
USTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfid), req2->max_cq_sqn);
CNIC_WR(dev, BAR_USTRORM_INTMEM +
USTORM_ISCSI_ERROR_BITMAP_OFFSET(pfid), req2->error_bit_map[0]);
CNIC_WR(dev, BAR_USTRORM_INTMEM +
USTORM_ISCSI_ERROR_BITMAP_OFFSET(pfid) + 4,
req2->error_bit_map[1]);
CNIC_WR16(dev, BAR_CSTRORM_INTMEM +
CSTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfid), req2->max_cq_sqn);
kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_SUCCESS;
done:
kcqe.op_code = ISCSI_KCQE_OPCODE_INIT;
cqes[0] = (struct kcqe *) &kcqe;
cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_ISCSI, cqes, 1);
return 0;
}
static void cnic_free_bnx2x_conn_resc(struct cnic_dev *dev, u32 l5_cid)
{
struct cnic_local *cp = dev->cnic_priv;
struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
if (ctx->ulp_proto_id == CNIC_ULP_ISCSI) {
struct cnic_iscsi *iscsi = ctx->proto.iscsi;
cnic_free_dma(dev, &iscsi->hq_info);
cnic_free_dma(dev, &iscsi->r2tq_info);
cnic_free_dma(dev, &iscsi->task_array_info);
cnic_free_id(&cp->cid_tbl, ctx->cid);
} else {
cnic_free_id(&cp->fcoe_cid_tbl, ctx->cid);
}
ctx->cid = 0;
}
static int cnic_alloc_bnx2x_conn_resc(struct cnic_dev *dev, u32 l5_cid)
{
u32 cid;
int ret, pages;
struct cnic_local *cp = dev->cnic_priv;
struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
struct cnic_iscsi *iscsi = ctx->proto.iscsi;
if (ctx->ulp_proto_id == CNIC_ULP_FCOE) {
cid = cnic_alloc_new_id(&cp->fcoe_cid_tbl);
if (cid == -1) {
ret = -ENOMEM;
goto error;
}
ctx->cid = cid;
return 0;
}
cid = cnic_alloc_new_id(&cp->cid_tbl);
if (cid == -1) {
ret = -ENOMEM;
goto error;
}
ctx->cid = cid;
pages = PAGE_ALIGN(cp->task_array_size) / PAGE_SIZE;
ret = cnic_alloc_dma(dev, &iscsi->task_array_info, pages, 1);
if (ret)
goto error;
pages = PAGE_ALIGN(cp->r2tq_size) / PAGE_SIZE;
ret = cnic_alloc_dma(dev, &iscsi->r2tq_info, pages, 1);
if (ret)
goto error;
pages = PAGE_ALIGN(cp->hq_size) / PAGE_SIZE;
ret = cnic_alloc_dma(dev, &iscsi->hq_info, pages, 1);
if (ret)
goto error;
return 0;
error:
cnic_free_bnx2x_conn_resc(dev, l5_cid);
return ret;
}
static void *cnic_get_bnx2x_ctx(struct cnic_dev *dev, u32 cid, int init,
struct regpair *ctx_addr)
{
struct cnic_local *cp = dev->cnic_priv;
struct cnic_eth_dev *ethdev = cp->ethdev;
int blk = (cid - ethdev->starting_cid) / cp->cids_per_blk;
int off = (cid - ethdev->starting_cid) % cp->cids_per_blk;
unsigned long align_off = 0;
dma_addr_t ctx_map;
void *ctx;
if (cp->ctx_align) {
unsigned long mask = cp->ctx_align - 1;
if (cp->ctx_arr[blk].mapping & mask)
align_off = cp->ctx_align -
(cp->ctx_arr[blk].mapping & mask);
}
ctx_map = cp->ctx_arr[blk].mapping + align_off +
(off * BNX2X_CONTEXT_MEM_SIZE);
ctx = cp->ctx_arr[blk].ctx + align_off +
(off * BNX2X_CONTEXT_MEM_SIZE);
if (init)
memset(ctx, 0, BNX2X_CONTEXT_MEM_SIZE);
ctx_addr->lo = ctx_map & 0xffffffff;
ctx_addr->hi = (u64) ctx_map >> 32;
return ctx;
}
static int cnic_setup_bnx2x_ctx(struct cnic_dev *dev, struct kwqe *wqes[],
u32 num)
{
struct cnic_local *cp = dev->cnic_priv;
struct iscsi_kwqe_conn_offload1 *req1 =
(struct iscsi_kwqe_conn_offload1 *) wqes[0];
struct iscsi_kwqe_conn_offload2 *req2 =
(struct iscsi_kwqe_conn_offload2 *) wqes[1];
struct iscsi_kwqe_conn_offload3 *req3;
struct cnic_context *ctx = &cp->ctx_tbl[req1->iscsi_conn_id];
struct cnic_iscsi *iscsi = ctx->proto.iscsi;
u32 cid = ctx->cid;
u32 hw_cid = BNX2X_HW_CID(cp, cid);
struct iscsi_context *ictx;
struct regpair context_addr;
int i, j, n = 2, n_max;
ctx->ctx_flags = 0;
if (!req2->num_additional_wqes)
return -EINVAL;
n_max = req2->num_additional_wqes + 2;
ictx = cnic_get_bnx2x_ctx(dev, cid, 1, &context_addr);
if (ictx == NULL)
return -ENOMEM;
req3 = (struct iscsi_kwqe_conn_offload3 *) wqes[n++];
ictx->xstorm_ag_context.hq_prod = 1;
ictx->xstorm_st_context.iscsi.first_burst_length =
ISCSI_DEF_FIRST_BURST_LEN;
ictx->xstorm_st_context.iscsi.max_send_pdu_length =
ISCSI_DEF_MAX_RECV_SEG_LEN;
ictx->xstorm_st_context.iscsi.sq_pbl_base.lo =
req1->sq_page_table_addr_lo;
ictx->xstorm_st_context.iscsi.sq_pbl_base.hi =
req1->sq_page_table_addr_hi;
ictx->xstorm_st_context.iscsi.sq_curr_pbe.lo = req2->sq_first_pte.hi;
ictx->xstorm_st_context.iscsi.sq_curr_pbe.hi = req2->sq_first_pte.lo;
ictx->xstorm_st_context.iscsi.hq_pbl_base.lo =
iscsi->hq_info.pgtbl_map & 0xffffffff;
ictx->xstorm_st_context.iscsi.hq_pbl_base.hi =
(u64) iscsi->hq_info.pgtbl_map >> 32;
ictx->xstorm_st_context.iscsi.hq_curr_pbe_base.lo =
iscsi->hq_info.pgtbl[0];
ictx->xstorm_st_context.iscsi.hq_curr_pbe_base.hi =
iscsi->hq_info.pgtbl[1];
ictx->xstorm_st_context.iscsi.r2tq_pbl_base.lo =
iscsi->r2tq_info.pgtbl_map & 0xffffffff;
ictx->xstorm_st_context.iscsi.r2tq_pbl_base.hi =
(u64) iscsi->r2tq_info.pgtbl_map >> 32;
ictx->xstorm_st_context.iscsi.r2tq_curr_pbe_base.lo =
iscsi->r2tq_info.pgtbl[0];
ictx->xstorm_st_context.iscsi.r2tq_curr_pbe_base.hi =
iscsi->r2tq_info.pgtbl[1];
ictx->xstorm_st_context.iscsi.task_pbl_base.lo =
iscsi->task_array_info.pgtbl_map & 0xffffffff;
ictx->xstorm_st_context.iscsi.task_pbl_base.hi =
(u64) iscsi->task_array_info.pgtbl_map >> 32;
ictx->xstorm_st_context.iscsi.task_pbl_cache_idx =
BNX2X_ISCSI_PBL_NOT_CACHED;
ictx->xstorm_st_context.iscsi.flags.flags |=
XSTORM_ISCSI_CONTEXT_FLAGS_B_IMMEDIATE_DATA;
ictx->xstorm_st_context.iscsi.flags.flags |=
XSTORM_ISCSI_CONTEXT_FLAGS_B_INITIAL_R2T;
ictx->tstorm_st_context.iscsi.hdr_bytes_2_fetch = ISCSI_HEADER_SIZE;
/* TSTORM requires the base address of RQ DB & not PTE */
ictx->tstorm_st_context.iscsi.rq_db_phy_addr.lo =
req2->rq_page_table_addr_lo & PAGE_MASK;
ictx->tstorm_st_context.iscsi.rq_db_phy_addr.hi =
req2->rq_page_table_addr_hi;
ictx->tstorm_st_context.iscsi.iscsi_conn_id = req1->iscsi_conn_id;
ictx->tstorm_st_context.tcp.cwnd = 0x5A8;
ictx->tstorm_st_context.tcp.flags2 |=
TSTORM_TCP_ST_CONTEXT_SECTION_DA_EN;
ictx->tstorm_st_context.tcp.ooo_support_mode =
TCP_TSTORM_OOO_DROP_AND_PROC_ACK;
ictx->timers_context.flags |= TIMERS_BLOCK_CONTEXT_CONN_VALID_FLG;
ictx->ustorm_st_context.ring.rq.pbl_base.lo =
req2->rq_page_table_addr_lo;
ictx->ustorm_st_context.ring.rq.pbl_base.hi =
req2->rq_page_table_addr_hi;
ictx->ustorm_st_context.ring.rq.curr_pbe.lo = req3->qp_first_pte[0].hi;
ictx->ustorm_st_context.ring.rq.curr_pbe.hi = req3->qp_first_pte[0].lo;
ictx->ustorm_st_context.ring.r2tq.pbl_base.lo =
iscsi->r2tq_info.pgtbl_map & 0xffffffff;
ictx->ustorm_st_context.ring.r2tq.pbl_base.hi =
(u64) iscsi->r2tq_info.pgtbl_map >> 32;
ictx->ustorm_st_context.ring.r2tq.curr_pbe.lo =
iscsi->r2tq_info.pgtbl[0];
ictx->ustorm_st_context.ring.r2tq.curr_pbe.hi =
iscsi->r2tq_info.pgtbl[1];
ictx->ustorm_st_context.ring.cq_pbl_base.lo =
req1->cq_page_table_addr_lo;
ictx->ustorm_st_context.ring.cq_pbl_base.hi =
req1->cq_page_table_addr_hi;
ictx->ustorm_st_context.ring.cq[0].cq_sn = ISCSI_INITIAL_SN;
ictx->ustorm_st_context.ring.cq[0].curr_pbe.lo = req2->cq_first_pte.hi;
ictx->ustorm_st_context.ring.cq[0].curr_pbe.hi = req2->cq_first_pte.lo;
ictx->ustorm_st_context.task_pbe_cache_index =
BNX2X_ISCSI_PBL_NOT_CACHED;
ictx->ustorm_st_context.task_pdu_cache_index =
BNX2X_ISCSI_PDU_HEADER_NOT_CACHED;
for (i = 1, j = 1; i < cp->num_cqs; i++, j++) {
if (j == 3) {
if (n >= n_max)
break;
req3 = (struct iscsi_kwqe_conn_offload3 *) wqes[n++];
j = 0;
}
ictx->ustorm_st_context.ring.cq[i].cq_sn = ISCSI_INITIAL_SN;
ictx->ustorm_st_context.ring.cq[i].curr_pbe.lo =
req3->qp_first_pte[j].hi;
ictx->ustorm_st_context.ring.cq[i].curr_pbe.hi =
req3->qp_first_pte[j].lo;
}
ictx->ustorm_st_context.task_pbl_base.lo =
iscsi->task_array_info.pgtbl_map & 0xffffffff;
ictx->ustorm_st_context.task_pbl_base.hi =
(u64) iscsi->task_array_info.pgtbl_map >> 32;
ictx->ustorm_st_context.tce_phy_addr.lo =
iscsi->task_array_info.pgtbl[0];
ictx->ustorm_st_context.tce_phy_addr.hi =
iscsi->task_array_info.pgtbl[1];
ictx->ustorm_st_context.iscsi_conn_id = req1->iscsi_conn_id;
ictx->ustorm_st_context.num_cqs = cp->num_cqs;
ictx->ustorm_st_context.negotiated_rx |= ISCSI_DEF_MAX_RECV_SEG_LEN;
ictx->ustorm_st_context.negotiated_rx_and_flags |=
ISCSI_DEF_MAX_BURST_LEN;
ictx->ustorm_st_context.negotiated_rx |=
ISCSI_DEFAULT_MAX_OUTSTANDING_R2T <<
USTORM_ISCSI_ST_CONTEXT_MAX_OUTSTANDING_R2TS_SHIFT;
ictx->cstorm_st_context.hq_pbl_base.lo =
iscsi->hq_info.pgtbl_map & 0xffffffff;
ictx->cstorm_st_context.hq_pbl_base.hi =
(u64) iscsi->hq_info.pgtbl_map >> 32;
ictx->cstorm_st_context.hq_curr_pbe.lo = iscsi->hq_info.pgtbl[0];
ictx->cstorm_st_context.hq_curr_pbe.hi = iscsi->hq_info.pgtbl[1];
ictx->cstorm_st_context.task_pbl_base.lo =
iscsi->task_array_info.pgtbl_map & 0xffffffff;
ictx->cstorm_st_context.task_pbl_base.hi =
(u64) iscsi->task_array_info.pgtbl_map >> 32;
/* CSTORM and USTORM initialization is different, CSTORM requires
* CQ DB base & not PTE addr */
ictx->cstorm_st_context.cq_db_base.lo =
req1->cq_page_table_addr_lo & PAGE_MASK;
ictx->cstorm_st_context.cq_db_base.hi = req1->cq_page_table_addr_hi;
ictx->cstorm_st_context.iscsi_conn_id = req1->iscsi_conn_id;
ictx->cstorm_st_context.cq_proc_en_bit_map = (1 << cp->num_cqs) - 1;
for (i = 0; i < cp->num_cqs; i++) {
ictx->cstorm_st_context.cq_c_prod_sqn_arr.sqn[i] =
ISCSI_INITIAL_SN;
ictx->cstorm_st_context.cq_c_sqn_2_notify_arr.sqn[i] =
ISCSI_INITIAL_SN;
}
ictx->xstorm_ag_context.cdu_reserved =
CDU_RSRVD_VALUE_TYPE_A(hw_cid, CDU_REGION_NUMBER_XCM_AG,
ISCSI_CONNECTION_TYPE);
ictx->ustorm_ag_context.cdu_usage =
CDU_RSRVD_VALUE_TYPE_A(hw_cid, CDU_REGION_NUMBER_UCM_AG,
ISCSI_CONNECTION_TYPE);
return 0;
}
static int cnic_bnx2x_iscsi_ofld1(struct cnic_dev *dev, struct kwqe *wqes[],
u32 num, int *work)
{
struct iscsi_kwqe_conn_offload1 *req1;
struct iscsi_kwqe_conn_offload2 *req2;
struct cnic_local *cp = dev->cnic_priv;
struct cnic_context *ctx;
struct iscsi_kcqe kcqe;
struct kcqe *cqes[1];
u32 l5_cid;
int ret = 0;
if (num < 2) {
*work = num;
return -EINVAL;
}
req1 = (struct iscsi_kwqe_conn_offload1 *) wqes[0];
req2 = (struct iscsi_kwqe_conn_offload2 *) wqes[1];
if ((num - 2) < req2->num_additional_wqes) {
*work = num;
return -EINVAL;
}
*work = 2 + req2->num_additional_wqes;
l5_cid = req1->iscsi_conn_id;
if (l5_cid >= MAX_ISCSI_TBL_SZ)
return -EINVAL;
memset(&kcqe, 0, sizeof(kcqe));
kcqe.op_code = ISCSI_KCQE_OPCODE_OFFLOAD_CONN;
kcqe.iscsi_conn_id = l5_cid;
kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE;
ctx = &cp->ctx_tbl[l5_cid];
if (test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags)) {
kcqe.completion_status =
ISCSI_KCQE_COMPLETION_STATUS_CID_BUSY;
goto done;
}
if (atomic_inc_return(&cp->iscsi_conn) > dev->max_iscsi_conn) {
atomic_dec(&cp->iscsi_conn);
goto done;
}
ret = cnic_alloc_bnx2x_conn_resc(dev, l5_cid);
if (ret) {
atomic_dec(&cp->iscsi_conn);
ret = 0;
goto done;
}
ret = cnic_setup_bnx2x_ctx(dev, wqes, num);
if (ret < 0) {
cnic_free_bnx2x_conn_resc(dev, l5_cid);
atomic_dec(&cp->iscsi_conn);
goto done;
}
kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_SUCCESS;
kcqe.iscsi_conn_context_id = BNX2X_HW_CID(cp, cp->ctx_tbl[l5_cid].cid);
done:
cqes[0] = (struct kcqe *) &kcqe;
cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_ISCSI, cqes, 1);
return ret;
}
static int cnic_bnx2x_iscsi_update(struct cnic_dev *dev, struct kwqe *kwqe)
{
struct cnic_local *cp = dev->cnic_priv;
struct iscsi_kwqe_conn_update *req =
(struct iscsi_kwqe_conn_update *) kwqe;
void *data;
union l5cm_specific_data l5_data;
u32 l5_cid, cid = BNX2X_SW_CID(req->context_id);
int ret;
if (cnic_get_l5_cid(cp, cid, &l5_cid) != 0)
return -EINVAL;
data = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data);
if (!data)
return -ENOMEM;
memcpy(data, kwqe, sizeof(struct kwqe));
ret = cnic_submit_kwqe_16(dev, ISCSI_RAMROD_CMD_ID_UPDATE_CONN,
req->context_id, ISCSI_CONNECTION_TYPE, &l5_data);
return ret;
}
static int cnic_bnx2x_destroy_ramrod(struct cnic_dev *dev, u32 l5_cid)
{
struct cnic_local *cp = dev->cnic_priv;
struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
union l5cm_specific_data l5_data;
int ret;
u32 hw_cid;
init_waitqueue_head(&ctx->waitq);
ctx->wait_cond = 0;
memset(&l5_data, 0, sizeof(l5_data));
hw_cid = BNX2X_HW_CID(cp, ctx->cid);
ret = cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_COMMON_CFC_DEL,
hw_cid, NONE_CONNECTION_TYPE, &l5_data);
if (ret == 0)
wait_event(ctx->waitq, ctx->wait_cond);
return ret;
}
static int cnic_bnx2x_iscsi_destroy(struct cnic_dev *dev, struct kwqe *kwqe)
{
struct cnic_local *cp = dev->cnic_priv;
struct iscsi_kwqe_conn_destroy *req =
(struct iscsi_kwqe_conn_destroy *) kwqe;
u32 l5_cid = req->reserved0;
struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
int ret = 0;
struct iscsi_kcqe kcqe;
struct kcqe *cqes[1];
if (!test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags))
goto skip_cfc_delete;
if (!time_after(jiffies, ctx->timestamp + (2 * HZ))) {
unsigned long delta = ctx->timestamp + (2 * HZ) - jiffies;
if (delta > (2 * HZ))
delta = 0;
set_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags);
queue_delayed_work(cnic_wq, &cp->delete_task, delta);
goto destroy_reply;
}
ret = cnic_bnx2x_destroy_ramrod(dev, l5_cid);
skip_cfc_delete:
cnic_free_bnx2x_conn_resc(dev, l5_cid);
atomic_dec(&cp->iscsi_conn);
clear_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags);
destroy_reply:
memset(&kcqe, 0, sizeof(kcqe));
kcqe.op_code = ISCSI_KCQE_OPCODE_DESTROY_CONN;
kcqe.iscsi_conn_id = l5_cid;
kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_SUCCESS;
kcqe.iscsi_conn_context_id = req->context_id;
cqes[0] = (struct kcqe *) &kcqe;
cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_ISCSI, cqes, 1);
return ret;
}
static void cnic_init_storm_conn_bufs(struct cnic_dev *dev,
struct l4_kwq_connect_req1 *kwqe1,
struct l4_kwq_connect_req3 *kwqe3,
struct l5cm_active_conn_buffer *conn_buf)
{
struct l5cm_conn_addr_params *conn_addr = &conn_buf->conn_addr_buf;
struct l5cm_xstorm_conn_buffer *xstorm_buf =
&conn_buf->xstorm_conn_buffer;
struct l5cm_tstorm_conn_buffer *tstorm_buf =
&conn_buf->tstorm_conn_buffer;
struct regpair context_addr;
u32 cid = BNX2X_SW_CID(kwqe1->cid);
struct in6_addr src_ip, dst_ip;
int i;
u32 *addrp;
addrp = (u32 *) &conn_addr->local_ip_addr;
for (i = 0; i < 4; i++, addrp++)
src_ip.in6_u.u6_addr32[i] = cpu_to_be32(*addrp);
addrp = (u32 *) &conn_addr->remote_ip_addr;
for (i = 0; i < 4; i++, addrp++)
dst_ip.in6_u.u6_addr32[i] = cpu_to_be32(*addrp);
cnic_get_bnx2x_ctx(dev, cid, 0, &context_addr);
xstorm_buf->context_addr.hi = context_addr.hi;
xstorm_buf->context_addr.lo = context_addr.lo;
xstorm_buf->mss = 0xffff;
xstorm_buf->rcv_buf = kwqe3->rcv_buf;
if (kwqe1->tcp_flags & L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE)
xstorm_buf->params |= L5CM_XSTORM_CONN_BUFFER_NAGLE_ENABLE;
xstorm_buf->pseudo_header_checksum =
swab16(~csum_ipv6_magic(&src_ip, &dst_ip, 0, IPPROTO_TCP, 0));
if (!(kwqe1->tcp_flags & L4_KWQ_CONNECT_REQ1_NO_DELAY_ACK))
tstorm_buf->params |=
L5CM_TSTORM_CONN_BUFFER_DELAYED_ACK_ENABLE;
if (kwqe3->ka_timeout) {
tstorm_buf->ka_enable = 1;
tstorm_buf->ka_timeout = kwqe3->ka_timeout;
tstorm_buf->ka_interval = kwqe3->ka_interval;
tstorm_buf->ka_max_probe_count = kwqe3->ka_max_probe_count;
}
tstorm_buf->rcv_buf = kwqe3->rcv_buf;
tstorm_buf->snd_buf = kwqe3->snd_buf;
tstorm_buf->max_rt_time = 0xffffffff;
}
static void cnic_init_bnx2x_mac(struct cnic_dev *dev)
{
struct cnic_local *cp = dev->cnic_priv;
u32 pfid = cp->pfid;
u8 *mac = dev->mac_addr;
CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
XSTORM_ISCSI_LOCAL_MAC_ADDR0_OFFSET(pfid), mac[0]);
CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
XSTORM_ISCSI_LOCAL_MAC_ADDR1_OFFSET(pfid), mac[1]);
CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
XSTORM_ISCSI_LOCAL_MAC_ADDR2_OFFSET(pfid), mac[2]);
CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
XSTORM_ISCSI_LOCAL_MAC_ADDR3_OFFSET(pfid), mac[3]);
CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
XSTORM_ISCSI_LOCAL_MAC_ADDR4_OFFSET(pfid), mac[4]);
CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
XSTORM_ISCSI_LOCAL_MAC_ADDR5_OFFSET(pfid), mac[5]);
CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
TSTORM_ISCSI_TCP_VARS_LSB_LOCAL_MAC_ADDR_OFFSET(pfid), mac[5]);
CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
TSTORM_ISCSI_TCP_VARS_LSB_LOCAL_MAC_ADDR_OFFSET(pfid) + 1,
mac[4]);
CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfid), mac[3]);
CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfid) + 1,
mac[2]);
CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfid) + 2,
mac[1]);
CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfid) + 3,
mac[0]);
}
static void cnic_bnx2x_set_tcp_timestamp(struct cnic_dev *dev, int tcp_ts)
{
struct cnic_local *cp = dev->cnic_priv;
u8 xstorm_flags = XSTORM_L5CM_TCP_FLAGS_WND_SCL_EN;
u16 tstorm_flags = 0;
if (tcp_ts) {
xstorm_flags |= XSTORM_L5CM_TCP_FLAGS_TS_ENABLED;
tstorm_flags |= TSTORM_L5CM_TCP_FLAGS_TS_ENABLED;
}
CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
XSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(cp->pfid), xstorm_flags);
CNIC_WR16(dev, BAR_TSTRORM_INTMEM +
TSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(cp->pfid), tstorm_flags);
}
static int cnic_bnx2x_connect(struct cnic_dev *dev, struct kwqe *wqes[],
u32 num, int *work)
{
struct cnic_local *cp = dev->cnic_priv;
struct l4_kwq_connect_req1 *kwqe1 =
(struct l4_kwq_connect_req1 *) wqes[0];
struct l4_kwq_connect_req3 *kwqe3;
struct l5cm_active_conn_buffer *conn_buf;
struct l5cm_conn_addr_params *conn_addr;
union l5cm_specific_data l5_data;
u32 l5_cid = kwqe1->pg_cid;
struct cnic_sock *csk = &cp->csk_tbl[l5_cid];
struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
int ret;
if (num < 2) {
*work = num;
return -EINVAL;
}
if (kwqe1->conn_flags & L4_KWQ_CONNECT_REQ1_IP_V6)
*work = 3;
else
*work = 2;
if (num < *work) {
*work = num;
return -EINVAL;
}
if (sizeof(*conn_buf) > CNIC_KWQ16_DATA_SIZE) {
netdev_err(dev->netdev, "conn_buf size too big\n");
return -ENOMEM;
}
conn_buf = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data);
if (!conn_buf)
return -ENOMEM;
memset(conn_buf, 0, sizeof(*conn_buf));
conn_addr = &conn_buf->conn_addr_buf;
conn_addr->remote_addr_0 = csk->ha[0];
conn_addr->remote_addr_1 = csk->ha[1];
conn_addr->remote_addr_2 = csk->ha[2];
conn_addr->remote_addr_3 = csk->ha[3];
conn_addr->remote_addr_4 = csk->ha[4];
conn_addr->remote_addr_5 = csk->ha[5];
if (kwqe1->conn_flags & L4_KWQ_CONNECT_REQ1_IP_V6) {
struct l4_kwq_connect_req2 *kwqe2 =
(struct l4_kwq_connect_req2 *) wqes[1];
conn_addr->local_ip_addr.ip_addr_hi_hi = kwqe2->src_ip_v6_4;
conn_addr->local_ip_addr.ip_addr_hi_lo = kwqe2->src_ip_v6_3;
conn_addr->local_ip_addr.ip_addr_lo_hi = kwqe2->src_ip_v6_2;
conn_addr->remote_ip_addr.ip_addr_hi_hi = kwqe2->dst_ip_v6_4;
conn_addr->remote_ip_addr.ip_addr_hi_lo = kwqe2->dst_ip_v6_3;
conn_addr->remote_ip_addr.ip_addr_lo_hi = kwqe2->dst_ip_v6_2;
conn_addr->params |= L5CM_CONN_ADDR_PARAMS_IP_VERSION;
}
kwqe3 = (struct l4_kwq_connect_req3 *) wqes[*work - 1];
conn_addr->local_ip_addr.ip_addr_lo_lo = kwqe1->src_ip;
conn_addr->remote_ip_addr.ip_addr_lo_lo = kwqe1->dst_ip;
conn_addr->local_tcp_port = kwqe1->src_port;
conn_addr->remote_tcp_port = kwqe1->dst_port;
conn_addr->pmtu = kwqe3->pmtu;
cnic_init_storm_conn_bufs(dev, kwqe1, kwqe3, conn_buf);
CNIC_WR16(dev, BAR_XSTRORM_INTMEM +
XSTORM_ISCSI_LOCAL_VLAN_OFFSET(cp->pfid), csk->vlan_id);
cnic_bnx2x_set_tcp_timestamp(dev,
kwqe1->tcp_flags & L4_KWQ_CONNECT_REQ1_TIME_STAMP);
ret = cnic_submit_kwqe_16(dev, L5CM_RAMROD_CMD_ID_TCP_CONNECT,
kwqe1->cid, ISCSI_CONNECTION_TYPE, &l5_data);
if (!ret)
set_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags);
return ret;
}
static int cnic_bnx2x_close(struct cnic_dev *dev, struct kwqe *kwqe)
{
struct l4_kwq_close_req *req = (struct l4_kwq_close_req *) kwqe;
union l5cm_specific_data l5_data;
int ret;
memset(&l5_data, 0, sizeof(l5_data));
ret = cnic_submit_kwqe_16(dev, L5CM_RAMROD_CMD_ID_CLOSE,
req->cid, ISCSI_CONNECTION_TYPE, &l5_data);
return ret;
}
static int cnic_bnx2x_reset(struct cnic_dev *dev, struct kwqe *kwqe)
{
struct l4_kwq_reset_req *req = (struct l4_kwq_reset_req *) kwqe;
union l5cm_specific_data l5_data;
int ret;
memset(&l5_data, 0, sizeof(l5_data));
ret = cnic_submit_kwqe_16(dev, L5CM_RAMROD_CMD_ID_ABORT,
req->cid, ISCSI_CONNECTION_TYPE, &l5_data);
return ret;
}
static int cnic_bnx2x_offload_pg(struct cnic_dev *dev, struct kwqe *kwqe)
{
struct l4_kwq_offload_pg *req = (struct l4_kwq_offload_pg *) kwqe;
struct l4_kcq kcqe;
struct kcqe *cqes[1];
memset(&kcqe, 0, sizeof(kcqe));
kcqe.pg_host_opaque = req->host_opaque;
kcqe.pg_cid = req->host_opaque;
kcqe.op_code = L4_KCQE_OPCODE_VALUE_OFFLOAD_PG;
cqes[0] = (struct kcqe *) &kcqe;
cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_L4, cqes, 1);
return 0;
}
static int cnic_bnx2x_update_pg(struct cnic_dev *dev, struct kwqe *kwqe)
{
struct l4_kwq_update_pg *req = (struct l4_kwq_update_pg *) kwqe;
struct l4_kcq kcqe;
struct kcqe *cqes[1];
memset(&kcqe, 0, sizeof(kcqe));
kcqe.pg_host_opaque = req->pg_host_opaque;
kcqe.pg_cid = req->pg_cid;
kcqe.op_code = L4_KCQE_OPCODE_VALUE_UPDATE_PG;
cqes[0] = (struct kcqe *) &kcqe;
cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_L4, cqes, 1);
return 0;
}
static int cnic_bnx2x_fcoe_stat(struct cnic_dev *dev, struct kwqe *kwqe)
{
struct fcoe_kwqe_stat *req;
struct fcoe_stat_ramrod_params *fcoe_stat;
union l5cm_specific_data l5_data;
struct cnic_local *cp = dev->cnic_priv;
int ret;
u32 cid;
req = (struct fcoe_kwqe_stat *) kwqe;
cid = BNX2X_HW_CID(cp, cp->fcoe_init_cid);
fcoe_stat = cnic_get_kwqe_16_data(cp, BNX2X_FCOE_L5_CID_BASE, &l5_data);
if (!fcoe_stat)
return -ENOMEM;
memset(fcoe_stat, 0, sizeof(*fcoe_stat));
memcpy(&fcoe_stat->stat_kwqe, req, sizeof(*req));
ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_STAT, cid,
FCOE_CONNECTION_TYPE, &l5_data);
return ret;
}
static int cnic_bnx2x_fcoe_init1(struct cnic_dev *dev, struct kwqe *wqes[],
u32 num, int *work)
{
int ret;
struct cnic_local *cp = dev->cnic_priv;
u32 cid;
struct fcoe_init_ramrod_params *fcoe_init;
struct fcoe_kwqe_init1 *req1;
struct fcoe_kwqe_init2 *req2;
struct fcoe_kwqe_init3 *req3;
union l5cm_specific_data l5_data;
if (num < 3) {
*work = num;
return -EINVAL;
}
req1 = (struct fcoe_kwqe_init1 *) wqes[0];
req2 = (struct fcoe_kwqe_init2 *) wqes[1];
req3 = (struct fcoe_kwqe_init3 *) wqes[2];
if (req2->hdr.op_code != FCOE_KWQE_OPCODE_INIT2) {
*work = 1;
return -EINVAL;
}
if (req3->hdr.op_code != FCOE_KWQE_OPCODE_INIT3) {
*work = 2;
return -EINVAL;
}
if (sizeof(*fcoe_init) > CNIC_KWQ16_DATA_SIZE) {
netdev_err(dev->netdev, "fcoe_init size too big\n");
return -ENOMEM;
}
fcoe_init = cnic_get_kwqe_16_data(cp, BNX2X_FCOE_L5_CID_BASE, &l5_data);
if (!fcoe_init)
return -ENOMEM;
memset(fcoe_init, 0, sizeof(*fcoe_init));
memcpy(&fcoe_init->init_kwqe1, req1, sizeof(*req1));
memcpy(&fcoe_init->init_kwqe2, req2, sizeof(*req2));
memcpy(&fcoe_init->init_kwqe3, req3, sizeof(*req3));
fcoe_init->eq_addr.lo = cp->kcq2.dma.pg_map_arr[0] & 0xffffffff;
fcoe_init->eq_addr.hi = (u64) cp->kcq2.dma.pg_map_arr[0] >> 32;
fcoe_init->eq_next_page_addr.lo =
cp->kcq2.dma.pg_map_arr[1] & 0xffffffff;
fcoe_init->eq_next_page_addr.hi =
(u64) cp->kcq2.dma.pg_map_arr[1] >> 32;
fcoe_init->sb_num = cp->status_blk_num;
fcoe_init->eq_prod = MAX_KCQ_IDX;
fcoe_init->sb_id = HC_INDEX_FCOE_EQ_CONS;
cp->kcq2.sw_prod_idx = 0;
cid = BNX2X_HW_CID(cp, cp->fcoe_init_cid);
ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_INIT, cid,
FCOE_CONNECTION_TYPE, &l5_data);
*work = 3;
return ret;
}
static int cnic_bnx2x_fcoe_ofld1(struct cnic_dev *dev, struct kwqe *wqes[],
u32 num, int *work)
{
int ret = 0;
u32 cid = -1, l5_cid;
struct cnic_local *cp = dev->cnic_priv;
struct fcoe_kwqe_conn_offload1 *req1;
struct fcoe_kwqe_conn_offload2 *req2;
struct fcoe_kwqe_conn_offload3 *req3;
struct fcoe_kwqe_conn_offload4 *req4;
struct fcoe_conn_offload_ramrod_params *fcoe_offload;
struct cnic_context *ctx;
struct fcoe_context *fctx;
struct regpair ctx_addr;
union l5cm_specific_data l5_data;
struct fcoe_kcqe kcqe;
struct kcqe *cqes[1];
if (num < 4) {
*work = num;
return -EINVAL;
}
req1 = (struct fcoe_kwqe_conn_offload1 *) wqes[0];
req2 = (struct fcoe_kwqe_conn_offload2 *) wqes[1];
req3 = (struct fcoe_kwqe_conn_offload3 *) wqes[2];
req4 = (struct fcoe_kwqe_conn_offload4 *) wqes[3];
*work = 4;
l5_cid = req1->fcoe_conn_id;
if (l5_cid >= BNX2X_FCOE_NUM_CONNECTIONS)
goto err_reply;
l5_cid += BNX2X_FCOE_L5_CID_BASE;
ctx = &cp->ctx_tbl[l5_cid];
if (test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags))
goto err_reply;
ret = cnic_alloc_bnx2x_conn_resc(dev, l5_cid);
if (ret) {
ret = 0;
goto err_reply;
}
cid = ctx->cid;
fctx = cnic_get_bnx2x_ctx(dev, cid, 1, &ctx_addr);
if (fctx) {
u32 hw_cid = BNX2X_HW_CID(cp, cid);
u32 val;
val = CDU_RSRVD_VALUE_TYPE_A(hw_cid, CDU_REGION_NUMBER_XCM_AG,
FCOE_CONNECTION_TYPE);
fctx->xstorm_ag_context.cdu_reserved = val;
val = CDU_RSRVD_VALUE_TYPE_A(hw_cid, CDU_REGION_NUMBER_UCM_AG,
FCOE_CONNECTION_TYPE);
fctx->ustorm_ag_context.cdu_usage = val;
}
if (sizeof(*fcoe_offload) > CNIC_KWQ16_DATA_SIZE) {
netdev_err(dev->netdev, "fcoe_offload size too big\n");
goto err_reply;
}
fcoe_offload = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data);
if (!fcoe_offload)
goto err_reply;
memset(fcoe_offload, 0, sizeof(*fcoe_offload));
memcpy(&fcoe_offload->offload_kwqe1, req1, sizeof(*req1));
memcpy(&fcoe_offload->offload_kwqe2, req2, sizeof(*req2));
memcpy(&fcoe_offload->offload_kwqe3, req3, sizeof(*req3));
memcpy(&fcoe_offload->offload_kwqe4, req4, sizeof(*req4));
cid = BNX2X_HW_CID(cp, cid);
ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_OFFLOAD_CONN, cid,
FCOE_CONNECTION_TYPE, &l5_data);
if (!ret)
set_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags);
return ret;
err_reply:
if (cid != -1)
cnic_free_bnx2x_conn_resc(dev, l5_cid);
memset(&kcqe, 0, sizeof(kcqe));
kcqe.op_code = FCOE_KCQE_OPCODE_OFFLOAD_CONN;
kcqe.fcoe_conn_id = req1->fcoe_conn_id;
kcqe.completion_status = FCOE_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE;
cqes[0] = (struct kcqe *) &kcqe;
cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_FCOE, cqes, 1);
return ret;
}
static int cnic_bnx2x_fcoe_enable(struct cnic_dev *dev, struct kwqe *kwqe)
{
struct fcoe_kwqe_conn_enable_disable *req;
struct fcoe_conn_enable_disable_ramrod_params *fcoe_enable;
union l5cm_specific_data l5_data;
int ret;
u32 cid, l5_cid;
struct cnic_local *cp = dev->cnic_priv;
req = (struct fcoe_kwqe_conn_enable_disable *) kwqe;
cid = req->context_id;
l5_cid = req->conn_id + BNX2X_FCOE_L5_CID_BASE;
if (sizeof(*fcoe_enable) > CNIC_KWQ16_DATA_SIZE) {
netdev_err(dev->netdev, "fcoe_enable size too big\n");
return -ENOMEM;
}
fcoe_enable = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data);
if (!fcoe_enable)
return -ENOMEM;
memset(fcoe_enable, 0, sizeof(*fcoe_enable));
memcpy(&fcoe_enable->enable_disable_kwqe, req, sizeof(*req));
ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_ENABLE_CONN, cid,
FCOE_CONNECTION_TYPE, &l5_data);
return ret;
}
static int cnic_bnx2x_fcoe_disable(struct cnic_dev *dev, struct kwqe *kwqe)
{
struct fcoe_kwqe_conn_enable_disable *req;
struct fcoe_conn_enable_disable_ramrod_params *fcoe_disable;
union l5cm_specific_data l5_data;
int ret;
u32 cid, l5_cid;
struct cnic_local *cp = dev->cnic_priv;
req = (struct fcoe_kwqe_conn_enable_disable *) kwqe;
cid = req->context_id;
l5_cid = req->conn_id;
if (l5_cid >= BNX2X_FCOE_NUM_CONNECTIONS)
return -EINVAL;
l5_cid += BNX2X_FCOE_L5_CID_BASE;
if (sizeof(*fcoe_disable) > CNIC_KWQ16_DATA_SIZE) {
netdev_err(dev->netdev, "fcoe_disable size too big\n");
return -ENOMEM;
}
fcoe_disable = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data);
if (!fcoe_disable)
return -ENOMEM;
memset(fcoe_disable, 0, sizeof(*fcoe_disable));
memcpy(&fcoe_disable->enable_disable_kwqe, req, sizeof(*req));
ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_DISABLE_CONN, cid,
FCOE_CONNECTION_TYPE, &l5_data);
return ret;
}
static int cnic_bnx2x_fcoe_destroy(struct cnic_dev *dev, struct kwqe *kwqe)
{
struct fcoe_kwqe_conn_destroy *req;
union l5cm_specific_data l5_data;
int ret;
u32 cid, l5_cid;
struct cnic_local *cp = dev->cnic_priv;
struct cnic_context *ctx;
struct fcoe_kcqe kcqe;
struct kcqe *cqes[1];
req = (struct fcoe_kwqe_conn_destroy *) kwqe;
cid = req->context_id;
l5_cid = req->conn_id;
if (l5_cid >= BNX2X_FCOE_NUM_CONNECTIONS)
return -EINVAL;
l5_cid += BNX2X_FCOE_L5_CID_BASE;
ctx = &cp->ctx_tbl[l5_cid];
init_waitqueue_head(&ctx->waitq);
ctx->wait_cond = 0;
memset(&l5_data, 0, sizeof(l5_data));
ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_TERMINATE_CONN, cid,
FCOE_CONNECTION_TYPE, &l5_data);
if (ret == 0) {
wait_event(ctx->waitq, ctx->wait_cond);
set_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags);
queue_delayed_work(cnic_wq, &cp->delete_task,
msecs_to_jiffies(2000));
}
memset(&kcqe, 0, sizeof(kcqe));
kcqe.op_code = FCOE_KCQE_OPCODE_DESTROY_CONN;
kcqe.fcoe_conn_id = req->conn_id;
kcqe.fcoe_conn_context_id = cid;
cqes[0] = (struct kcqe *) &kcqe;
cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_FCOE, cqes, 1);
return ret;
}
static int cnic_bnx2x_fcoe_fw_destroy(struct cnic_dev *dev, struct kwqe *kwqe)
{
struct fcoe_kwqe_destroy *req;
union l5cm_specific_data l5_data;
struct cnic_local *cp = dev->cnic_priv;
int ret;
u32 cid;
req = (struct fcoe_kwqe_destroy *) kwqe;
cid = BNX2X_HW_CID(cp, cp->fcoe_init_cid);
memset(&l5_data, 0, sizeof(l5_data));
ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_DESTROY, cid,
FCOE_CONNECTION_TYPE, &l5_data);
return ret;
}
static int cnic_submit_bnx2x_iscsi_kwqes(struct cnic_dev *dev,
struct kwqe *wqes[], u32 num_wqes)
{
int i, work, ret;
u32 opcode;
struct kwqe *kwqe;
if (!test_bit(CNIC_F_CNIC_UP, &dev->flags))
return -EAGAIN; /* bnx2 is down */
for (i = 0; i < num_wqes; ) {
kwqe = wqes[i];
opcode = KWQE_OPCODE(kwqe->kwqe_op_flag);
work = 1;
switch (opcode) {
case ISCSI_KWQE_OPCODE_INIT1:
ret = cnic_bnx2x_iscsi_init1(dev, kwqe);
break;
case ISCSI_KWQE_OPCODE_INIT2:
ret = cnic_bnx2x_iscsi_init2(dev, kwqe);
break;
case ISCSI_KWQE_OPCODE_OFFLOAD_CONN1:
ret = cnic_bnx2x_iscsi_ofld1(dev, &wqes[i],
num_wqes - i, &work);
break;
case ISCSI_KWQE_OPCODE_UPDATE_CONN:
ret = cnic_bnx2x_iscsi_update(dev, kwqe);
break;
case ISCSI_KWQE_OPCODE_DESTROY_CONN:
ret = cnic_bnx2x_iscsi_destroy(dev, kwqe);
break;
case L4_KWQE_OPCODE_VALUE_CONNECT1:
ret = cnic_bnx2x_connect(dev, &wqes[i], num_wqes - i,
&work);
break;
case L4_KWQE_OPCODE_VALUE_CLOSE:
ret = cnic_bnx2x_close(dev, kwqe);
break;
case L4_KWQE_OPCODE_VALUE_RESET:
ret = cnic_bnx2x_reset(dev, kwqe);
break;
case L4_KWQE_OPCODE_VALUE_OFFLOAD_PG:
ret = cnic_bnx2x_offload_pg(dev, kwqe);
break;
case L4_KWQE_OPCODE_VALUE_UPDATE_PG:
ret = cnic_bnx2x_update_pg(dev, kwqe);
break;
case L4_KWQE_OPCODE_VALUE_UPLOAD_PG:
ret = 0;
break;
default:
ret = 0;
netdev_err(dev->netdev, "Unknown type of KWQE(0x%x)\n",
opcode);
break;
}
if (ret < 0)
netdev_err(dev->netdev, "KWQE(0x%x) failed\n",
opcode);
i += work;
}
return 0;
}
static int cnic_submit_bnx2x_fcoe_kwqes(struct cnic_dev *dev,
struct kwqe *wqes[], u32 num_wqes)
{
struct cnic_local *cp = dev->cnic_priv;
int i, work, ret;
u32 opcode;
struct kwqe *kwqe;
if (!test_bit(CNIC_F_CNIC_UP, &dev->flags))
return -EAGAIN; /* bnx2 is down */
if (BNX2X_CHIP_NUM(cp->chip_id) == BNX2X_CHIP_NUM_57710)
return -EINVAL;
for (i = 0; i < num_wqes; ) {
kwqe = wqes[i];
opcode = KWQE_OPCODE(kwqe->kwqe_op_flag);
work = 1;
switch (opcode) {
case FCOE_KWQE_OPCODE_INIT1:
ret = cnic_bnx2x_fcoe_init1(dev, &wqes[i],
num_wqes - i, &work);
break;
case FCOE_KWQE_OPCODE_OFFLOAD_CONN1:
ret = cnic_bnx2x_fcoe_ofld1(dev, &wqes[i],
num_wqes - i, &work);
break;
case FCOE_KWQE_OPCODE_ENABLE_CONN:
ret = cnic_bnx2x_fcoe_enable(dev, kwqe);
break;
case FCOE_KWQE_OPCODE_DISABLE_CONN:
ret = cnic_bnx2x_fcoe_disable(dev, kwqe);
break;
case FCOE_KWQE_OPCODE_DESTROY_CONN:
ret = cnic_bnx2x_fcoe_destroy(dev, kwqe);
break;
case FCOE_KWQE_OPCODE_DESTROY:
ret = cnic_bnx2x_fcoe_fw_destroy(dev, kwqe);
break;
case FCOE_KWQE_OPCODE_STAT:
ret = cnic_bnx2x_fcoe_stat(dev, kwqe);
break;
default:
ret = 0;
netdev_err(dev->netdev, "Unknown type of KWQE(0x%x)\n",
opcode);
break;
}
if (ret < 0)
netdev_err(dev->netdev, "KWQE(0x%x) failed\n",
opcode);
i += work;
}
return 0;
}
static int cnic_submit_bnx2x_kwqes(struct cnic_dev *dev, struct kwqe *wqes[],
u32 num_wqes)
{
int ret = -EINVAL;
u32 layer_code;
if (!test_bit(CNIC_F_CNIC_UP, &dev->flags))
return -EAGAIN; /* bnx2x is down */
if (!num_wqes)
return 0;
layer_code = wqes[0]->kwqe_op_flag & KWQE_LAYER_MASK;
switch (layer_code) {
case KWQE_FLAGS_LAYER_MASK_L5_ISCSI:
case KWQE_FLAGS_LAYER_MASK_L4:
case KWQE_FLAGS_LAYER_MASK_L2:
ret = cnic_submit_bnx2x_iscsi_kwqes(dev, wqes, num_wqes);
break;
case KWQE_FLAGS_LAYER_MASK_L5_FCOE:
ret = cnic_submit_bnx2x_fcoe_kwqes(dev, wqes, num_wqes);
break;
}
return ret;
}
static inline u32 cnic_get_kcqe_layer_mask(u32 opflag)
{
if (unlikely(KCQE_OPCODE(opflag) == FCOE_RAMROD_CMD_ID_TERMINATE_CONN))
return KCQE_FLAGS_LAYER_MASK_L4;
return opflag & KCQE_FLAGS_LAYER_MASK;
}
static void service_kcqes(struct cnic_dev *dev, int num_cqes)
{
struct cnic_local *cp = dev->cnic_priv;
int i, j, comp = 0;
i = 0;
j = 1;
while (num_cqes) {
struct cnic_ulp_ops *ulp_ops;
int ulp_type;
u32 kcqe_op_flag = cp->completed_kcq[i]->kcqe_op_flag;
u32 kcqe_layer = cnic_get_kcqe_layer_mask(kcqe_op_flag);
if (unlikely(kcqe_op_flag & KCQE_RAMROD_COMPLETION))
comp++;
while (j < num_cqes) {
u32 next_op = cp->completed_kcq[i + j]->kcqe_op_flag;
if (cnic_get_kcqe_layer_mask(next_op) != kcqe_layer)
break;
if (unlikely(next_op & KCQE_RAMROD_COMPLETION))
comp++;
j++;
}
if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L5_RDMA)
ulp_type = CNIC_ULP_RDMA;
else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L5_ISCSI)
ulp_type = CNIC_ULP_ISCSI;
else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L5_FCOE)
ulp_type = CNIC_ULP_FCOE;
else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L4)
ulp_type = CNIC_ULP_L4;
else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L2)
goto end;
else {
netdev_err(dev->netdev, "Unknown type of KCQE(0x%x)\n",
kcqe_op_flag);
goto end;
}
rcu_read_lock();
ulp_ops = rcu_dereference(cp->ulp_ops[ulp_type]);
if (likely(ulp_ops)) {
ulp_ops->indicate_kcqes(cp->ulp_handle[ulp_type],
cp->completed_kcq + i, j);
}
rcu_read_unlock();
end:
num_cqes -= j;
i += j;
j = 1;
}
if (unlikely(comp))
cnic_spq_completion(dev, DRV_CTL_RET_L5_SPQ_CREDIT_CMD, comp);
}
static u16 cnic_bnx2_next_idx(u16 idx)
{
return idx + 1;
}
static u16 cnic_bnx2_hw_idx(u16 idx)
{
return idx;
}
static u16 cnic_bnx2x_next_idx(u16 idx)
{
idx++;
if ((idx & MAX_KCQE_CNT) == MAX_KCQE_CNT)
idx++;
return idx;
}
static u16 cnic_bnx2x_hw_idx(u16 idx)
{
if ((idx & MAX_KCQE_CNT) == MAX_KCQE_CNT)
idx++;
return idx;
}
static int cnic_get_kcqes(struct cnic_dev *dev, struct kcq_info *info)
{
struct cnic_local *cp = dev->cnic_priv;
u16 i, ri, hw_prod, last;
struct kcqe *kcqe;
int kcqe_cnt = 0, last_cnt = 0;
i = ri = last = info->sw_prod_idx;
ri &= MAX_KCQ_IDX;
hw_prod = *info->hw_prod_idx_ptr;
hw_prod = cp->hw_idx(hw_prod);
while ((i != hw_prod) && (kcqe_cnt < MAX_COMPLETED_KCQE)) {
kcqe = &info->kcq[KCQ_PG(ri)][KCQ_IDX(ri)];
cp->completed_kcq[kcqe_cnt++] = kcqe;
i = cp->next_idx(i);
ri = i & MAX_KCQ_IDX;
if (likely(!(kcqe->kcqe_op_flag & KCQE_FLAGS_NEXT))) {
last_cnt = kcqe_cnt;
last = i;
}
}
info->sw_prod_idx = last;
return last_cnt;
}
static int cnic_l2_completion(struct cnic_local *cp)
{
u16 hw_cons, sw_cons;
struct cnic_uio_dev *udev = cp->udev;
union eth_rx_cqe *cqe, *cqe_ring = (union eth_rx_cqe *)
(udev->l2_ring + (2 * BCM_PAGE_SIZE));
u32 cmd;
int comp = 0;
if (!test_bit(CNIC_F_BNX2X_CLASS, &cp->dev->flags))
return 0;
hw_cons = *cp->rx_cons_ptr;
if ((hw_cons & BNX2X_MAX_RCQ_DESC_CNT) == BNX2X_MAX_RCQ_DESC_CNT)
hw_cons++;
sw_cons = cp->rx_cons;
while (sw_cons != hw_cons) {
u8 cqe_fp_flags;
cqe = &cqe_ring[sw_cons & BNX2X_MAX_RCQ_DESC_CNT];
cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
if (cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE) {
cmd = le32_to_cpu(cqe->ramrod_cqe.conn_and_cmd_data);
cmd >>= COMMON_RAMROD_ETH_RX_CQE_CMD_ID_SHIFT;
if (cmd == RAMROD_CMD_ID_ETH_CLIENT_SETUP ||
cmd == RAMROD_CMD_ID_ETH_HALT)
comp++;
}
sw_cons = BNX2X_NEXT_RCQE(sw_cons);
}
return comp;
}
static void cnic_chk_pkt_rings(struct cnic_local *cp)
{
u16 rx_cons, tx_cons;
int comp = 0;
if (!test_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags))
return;
rx_cons = *cp->rx_cons_ptr;
tx_cons = *cp->tx_cons_ptr;
if (cp->tx_cons != tx_cons || cp->rx_cons != rx_cons) {
if (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags))
comp = cnic_l2_completion(cp);
cp->tx_cons = tx_cons;
cp->rx_cons = rx_cons;
if (cp->udev)
uio_event_notify(&cp->udev->cnic_uinfo);
}
if (comp)
clear_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags);
}
static u32 cnic_service_bnx2_queues(struct cnic_dev *dev)
{
struct cnic_local *cp = dev->cnic_priv;
u32 status_idx = (u16) *cp->kcq1.status_idx_ptr;
int kcqe_cnt;
/* status block index must be read before reading other fields */
rmb();
cp->kwq_con_idx = *cp->kwq_con_idx_ptr;
while ((kcqe_cnt = cnic_get_kcqes(dev, &cp->kcq1))) {
service_kcqes(dev, kcqe_cnt);
/* Tell compiler that status_blk fields can change. */
barrier();
if (status_idx != *cp->kcq1.status_idx_ptr) {
status_idx = (u16) *cp->kcq1.status_idx_ptr;
/* status block index must be read first */
rmb();
cp->kwq_con_idx = *cp->kwq_con_idx_ptr;
} else
break;
}
CNIC_WR16(dev, cp->kcq1.io_addr, cp->kcq1.sw_prod_idx);
cnic_chk_pkt_rings(cp);
return status_idx;
}
static int cnic_service_bnx2(void *data, void *status_blk)
{
struct cnic_dev *dev = data;
if (unlikely(!test_bit(CNIC_F_CNIC_UP, &dev->flags))) {
struct status_block *sblk = status_blk;
return sblk->status_idx;
}
return cnic_service_bnx2_queues(dev);
}
static void cnic_service_bnx2_msix(unsigned long data)
{
struct cnic_dev *dev = (struct cnic_dev *) data;
struct cnic_local *cp = dev->cnic_priv;
cp->last_status_idx = cnic_service_bnx2_queues(dev);
CNIC_WR(dev, BNX2_PCICFG_INT_ACK_CMD, cp->int_num |
BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | cp->last_status_idx);
}
static void cnic_doirq(struct cnic_dev *dev)
{
struct cnic_local *cp = dev->cnic_priv;
if (likely(test_bit(CNIC_F_CNIC_UP, &dev->flags))) {
u16 prod = cp->kcq1.sw_prod_idx & MAX_KCQ_IDX;
prefetch(cp->status_blk.gen);
prefetch(&cp->kcq1.kcq[KCQ_PG(prod)][KCQ_IDX(prod)]);
tasklet_schedule(&cp->cnic_irq_task);
}
}
static irqreturn_t cnic_irq(int irq, void *dev_instance)
{
struct cnic_dev *dev = dev_instance;
struct cnic_local *cp = dev->cnic_priv;
if (cp->ack_int)
cp->ack_int(dev);
cnic_doirq(dev);
return IRQ_HANDLED;
}
static inline void cnic_ack_bnx2x_int(struct cnic_dev *dev, u8 id, u8 storm,
u16 index, u8 op, u8 update)
{
struct cnic_local *cp = dev->cnic_priv;
u32 hc_addr = (HC_REG_COMMAND_REG + CNIC_PORT(cp) * 32 +
COMMAND_REG_INT_ACK);
struct igu_ack_register igu_ack;
igu_ack.status_block_index = index;
igu_ack.sb_id_and_flags =
((id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
(storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
(update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
(op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
CNIC_WR(dev, hc_addr, (*(u32 *)&igu_ack));
}
static void cnic_ack_igu_sb(struct cnic_dev *dev, u8 igu_sb_id, u8 segment,
u16 index, u8 op, u8 update)
{
struct igu_regular cmd_data;
u32 igu_addr = BAR_IGU_INTMEM + (IGU_CMD_INT_ACK_BASE + igu_sb_id) * 8;
cmd_data.sb_id_and_flags =
(index << IGU_REGULAR_SB_INDEX_SHIFT) |
(segment << IGU_REGULAR_SEGMENT_ACCESS_SHIFT) |
(update << IGU_REGULAR_BUPDATE_SHIFT) |
(op << IGU_REGULAR_ENABLE_INT_SHIFT);
CNIC_WR(dev, igu_addr, cmd_data.sb_id_and_flags);
}
static void cnic_ack_bnx2x_msix(struct cnic_dev *dev)
{
struct cnic_local *cp = dev->cnic_priv;
cnic_ack_bnx2x_int(dev, cp->bnx2x_igu_sb_id, CSTORM_ID, 0,
IGU_INT_DISABLE, 0);
}
static void cnic_ack_bnx2x_e2_msix(struct cnic_dev *dev)
{
struct cnic_local *cp = dev->cnic_priv;
cnic_ack_igu_sb(dev, cp->bnx2x_igu_sb_id, IGU_SEG_ACCESS_DEF, 0,
IGU_INT_DISABLE, 0);
}
static u32 cnic_service_bnx2x_kcq(struct cnic_dev *dev, struct kcq_info *info)
{
u32 last_status = *info->status_idx_ptr;
int kcqe_cnt;
/* status block index must be read before reading the KCQ */
rmb();
while ((kcqe_cnt = cnic_get_kcqes(dev, info))) {
service_kcqes(dev, kcqe_cnt);
/* Tell compiler that sblk fields can change. */
barrier();
if (last_status == *info->status_idx_ptr)
break;
last_status = *info->status_idx_ptr;
/* status block index must be read before reading the KCQ */
rmb();
}
return last_status;
}
static void cnic_service_bnx2x_bh(unsigned long data)
{
struct cnic_dev *dev = (struct cnic_dev *) data;
struct cnic_local *cp = dev->cnic_priv;
u32 status_idx, new_status_idx;
if (unlikely(!test_bit(CNIC_F_CNIC_UP, &dev->flags)))
return;
while (1) {
status_idx = cnic_service_bnx2x_kcq(dev, &cp->kcq1);
CNIC_WR16(dev, cp->kcq1.io_addr,
cp->kcq1.sw_prod_idx + MAX_KCQ_IDX);
if (!BNX2X_CHIP_IS_E2(cp->chip_id)) {
cnic_ack_bnx2x_int(dev, cp->bnx2x_igu_sb_id, USTORM_ID,
status_idx, IGU_INT_ENABLE, 1);
break;
}
new_status_idx = cnic_service_bnx2x_kcq(dev, &cp->kcq2);
if (new_status_idx != status_idx)
continue;
CNIC_WR16(dev, cp->kcq2.io_addr, cp->kcq2.sw_prod_idx +
MAX_KCQ_IDX);
cnic_ack_igu_sb(dev, cp->bnx2x_igu_sb_id, IGU_SEG_ACCESS_DEF,
status_idx, IGU_INT_ENABLE, 1);
break;
}
}
static int cnic_service_bnx2x(void *data, void *status_blk)
{
struct cnic_dev *dev = data;
struct cnic_local *cp = dev->cnic_priv;
if (!(cp->ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX))
cnic_doirq(dev);
cnic_chk_pkt_rings(cp);
return 0;
}
static void cnic_ulp_stop(struct cnic_dev *dev)
{
struct cnic_local *cp = dev->cnic_priv;
int if_type;
cnic_send_nlmsg(cp, ISCSI_KEVENT_IF_DOWN, NULL);
for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++) {
struct cnic_ulp_ops *ulp_ops;
mutex_lock(&cnic_lock);
ulp_ops = rcu_dereference_protected(cp->ulp_ops[if_type],
lockdep_is_held(&cnic_lock));
if (!ulp_ops) {
mutex_unlock(&cnic_lock);
continue;
}
set_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]);
mutex_unlock(&cnic_lock);
if (test_and_clear_bit(ULP_F_START, &cp->ulp_flags[if_type]))
ulp_ops->cnic_stop(cp->ulp_handle[if_type]);
clear_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]);
}
}
static void cnic_ulp_start(struct cnic_dev *dev)
{
struct cnic_local *cp = dev->cnic_priv;
int if_type;
for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++) {
struct cnic_ulp_ops *ulp_ops;
mutex_lock(&cnic_lock);
ulp_ops = rcu_dereference_protected(cp->ulp_ops[if_type],
lockdep_is_held(&cnic_lock));
if (!ulp_ops || !ulp_ops->cnic_start) {
mutex_unlock(&cnic_lock);
continue;
}
set_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]);
mutex_unlock(&cnic_lock);
if (!test_and_set_bit(ULP_F_START, &cp->ulp_flags[if_type]))
ulp_ops->cnic_start(cp->ulp_handle[if_type]);
clear_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]);
}
}
static int cnic_ctl(void *data, struct cnic_ctl_info *info)
{
struct cnic_dev *dev = data;
switch (info->cmd) {
case CNIC_CTL_STOP_CMD:
cnic_hold(dev);
cnic_ulp_stop(dev);
cnic_stop_hw(dev);
cnic_put(dev);
break;
case CNIC_CTL_START_CMD:
cnic_hold(dev);
if (!cnic_start_hw(dev))
cnic_ulp_start(dev);
cnic_put(dev);
break;
case CNIC_CTL_COMPLETION_CMD: {
u32 cid = BNX2X_SW_CID(info->data.comp.cid);
u32 l5_cid;
struct cnic_local *cp = dev->cnic_priv;
if (cnic_get_l5_cid(cp, cid, &l5_cid) == 0) {
struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
ctx->wait_cond = 1;
wake_up(&ctx->waitq);
}
break;
}
default:
return -EINVAL;
}
return 0;
}
static void cnic_ulp_init(struct cnic_dev *dev)
{
int i;
struct cnic_local *cp = dev->cnic_priv;
for (i = 0; i < MAX_CNIC_ULP_TYPE_EXT; i++) {
struct cnic_ulp_ops *ulp_ops;
mutex_lock(&cnic_lock);
ulp_ops = cnic_ulp_tbl_prot(i);
if (!ulp_ops || !ulp_ops->cnic_init) {
mutex_unlock(&cnic_lock);
continue;
}
ulp_get(ulp_ops);
mutex_unlock(&cnic_lock);
if (!test_and_set_bit(ULP_F_INIT, &cp->ulp_flags[i]))
ulp_ops->cnic_init(dev);
ulp_put(ulp_ops);
}
}
static void cnic_ulp_exit(struct cnic_dev *dev)
{
int i;
struct cnic_local *cp = dev->cnic_priv;
for (i = 0; i < MAX_CNIC_ULP_TYPE_EXT; i++) {
struct cnic_ulp_ops *ulp_ops;
mutex_lock(&cnic_lock);
ulp_ops = cnic_ulp_tbl_prot(i);
if (!ulp_ops || !ulp_ops->cnic_exit) {
mutex_unlock(&cnic_lock);
continue;
}
ulp_get(ulp_ops);
mutex_unlock(&cnic_lock);
if (test_and_clear_bit(ULP_F_INIT, &cp->ulp_flags[i]))
ulp_ops->cnic_exit(dev);
ulp_put(ulp_ops);
}
}
static int cnic_cm_offload_pg(struct cnic_sock *csk)
{
struct cnic_dev *dev = csk->dev;
struct l4_kwq_offload_pg *l4kwqe;
struct kwqe *wqes[1];
l4kwqe = (struct l4_kwq_offload_pg *) &csk->kwqe1;
memset(l4kwqe, 0, sizeof(*l4kwqe));
wqes[0] = (struct kwqe *) l4kwqe;
l4kwqe->op_code = L4_KWQE_OPCODE_VALUE_OFFLOAD_PG;
l4kwqe->flags =
L4_LAYER_CODE << L4_KWQ_OFFLOAD_PG_LAYER_CODE_SHIFT;
l4kwqe->l2hdr_nbytes = ETH_HLEN;
l4kwqe->da0 = csk->ha[0];
l4kwqe->da1 = csk->ha[1];
l4kwqe->da2 = csk->ha[2];
l4kwqe->da3 = csk->ha[3];
l4kwqe->da4 = csk->ha[4];
l4kwqe->da5 = csk->ha[5];
l4kwqe->sa0 = dev->mac_addr[0];
l4kwqe->sa1 = dev->mac_addr[1];
l4kwqe->sa2 = dev->mac_addr[2];
l4kwqe->sa3 = dev->mac_addr[3];
l4kwqe->sa4 = dev->mac_addr[4];
l4kwqe->sa5 = dev->mac_addr[5];
l4kwqe->etype = ETH_P_IP;
l4kwqe->ipid_start = DEF_IPID_START;
l4kwqe->host_opaque = csk->l5_cid;
if (csk->vlan_id) {
l4kwqe->pg_flags |= L4_KWQ_OFFLOAD_PG_VLAN_TAGGING;
l4kwqe->vlan_tag = csk->vlan_id;
l4kwqe->l2hdr_nbytes += 4;
}
return dev->submit_kwqes(dev, wqes, 1);
}
static int cnic_cm_update_pg(struct cnic_sock *csk)
{
struct cnic_dev *dev = csk->dev;
struct l4_kwq_update_pg *l4kwqe;
struct kwqe *wqes[1];
l4kwqe = (struct l4_kwq_update_pg *) &csk->kwqe1;
memset(l4kwqe, 0, sizeof(*l4kwqe));
wqes[0] = (struct kwqe *) l4kwqe;
l4kwqe->opcode = L4_KWQE_OPCODE_VALUE_UPDATE_PG;
l4kwqe->flags =
L4_LAYER_CODE << L4_KWQ_UPDATE_PG_LAYER_CODE_SHIFT;
l4kwqe->pg_cid = csk->pg_cid;
l4kwqe->da0 = csk->ha[0];
l4kwqe->da1 = csk->ha[1];
l4kwqe->da2 = csk->ha[2];
l4kwqe->da3 = csk->ha[3];
l4kwqe->da4 = csk->ha[4];
l4kwqe->da5 = csk->ha[5];
l4kwqe->pg_host_opaque = csk->l5_cid;
l4kwqe->pg_valids = L4_KWQ_UPDATE_PG_VALIDS_DA;
return dev->submit_kwqes(dev, wqes, 1);
}
static int cnic_cm_upload_pg(struct cnic_sock *csk)
{
struct cnic_dev *dev = csk->dev;
struct l4_kwq_upload *l4kwqe;
struct kwqe *wqes[1];
l4kwqe = (struct l4_kwq_upload *) &csk->kwqe1;
memset(l4kwqe, 0, sizeof(*l4kwqe));
wqes[0] = (struct kwqe *) l4kwqe;
l4kwqe->opcode = L4_KWQE_OPCODE_VALUE_UPLOAD_PG;
l4kwqe->flags =
L4_LAYER_CODE << L4_KWQ_UPLOAD_LAYER_CODE_SHIFT;
l4kwqe->cid = csk->pg_cid;
return dev->submit_kwqes(dev, wqes, 1);
}
static int cnic_cm_conn_req(struct cnic_sock *csk)
{
struct cnic_dev *dev = csk->dev;
struct l4_kwq_connect_req1 *l4kwqe1;
struct l4_kwq_connect_req2 *l4kwqe2;
struct l4_kwq_connect_req3 *l4kwqe3;
struct kwqe *wqes[3];
u8 tcp_flags = 0;
int num_wqes = 2;
l4kwqe1 = (struct l4_kwq_connect_req1 *) &csk->kwqe1;
l4kwqe2 = (struct l4_kwq_connect_req2 *) &csk->kwqe2;
l4kwqe3 = (struct l4_kwq_connect_req3 *) &csk->kwqe3;
memset(l4kwqe1, 0, sizeof(*l4kwqe1));
memset(l4kwqe2, 0, sizeof(*l4kwqe2));
memset(l4kwqe3, 0, sizeof(*l4kwqe3));
l4kwqe3->op_code = L4_KWQE_OPCODE_VALUE_CONNECT3;
l4kwqe3->flags =
L4_LAYER_CODE << L4_KWQ_CONNECT_REQ3_LAYER_CODE_SHIFT;
l4kwqe3->ka_timeout = csk->ka_timeout;
l4kwqe3->ka_interval = csk->ka_interval;
l4kwqe3->ka_max_probe_count = csk->ka_max_probe_count;
l4kwqe3->tos = csk->tos;
l4kwqe3->ttl = csk->ttl;
l4kwqe3->snd_seq_scale = csk->snd_seq_scale;
l4kwqe3->pmtu = csk->mtu;
l4kwqe3->rcv_buf = csk->rcv_buf;
l4kwqe3->snd_buf = csk->snd_buf;
l4kwqe3->seed = csk->seed;
wqes[0] = (struct kwqe *) l4kwqe1;
if (test_bit(SK_F_IPV6, &csk->flags)) {
wqes[1] = (struct kwqe *) l4kwqe2;
wqes[2] = (struct kwqe *) l4kwqe3;
num_wqes = 3;
l4kwqe1->conn_flags = L4_KWQ_CONNECT_REQ1_IP_V6;
l4kwqe2->op_code = L4_KWQE_OPCODE_VALUE_CONNECT2;
l4kwqe2->flags =
L4_KWQ_CONNECT_REQ2_LINKED_WITH_NEXT |
L4_LAYER_CODE << L4_KWQ_CONNECT_REQ2_LAYER_CODE_SHIFT;
l4kwqe2->src_ip_v6_2 = be32_to_cpu(csk->src_ip[1]);
l4kwqe2->src_ip_v6_3 = be32_to_cpu(csk->src_ip[2]);
l4kwqe2->src_ip_v6_4 = be32_to_cpu(csk->src_ip[3]);
l4kwqe2->dst_ip_v6_2 = be32_to_cpu(csk->dst_ip[1]);
l4kwqe2->dst_ip_v6_3 = be32_to_cpu(csk->dst_ip[2]);
l4kwqe2->dst_ip_v6_4 = be32_to_cpu(csk->dst_ip[3]);
l4kwqe3->mss = l4kwqe3->pmtu - sizeof(struct ipv6hdr) -
sizeof(struct tcphdr);
} else {
wqes[1] = (struct kwqe *) l4kwqe3;
l4kwqe3->mss = l4kwqe3->pmtu - sizeof(struct iphdr) -
sizeof(struct tcphdr);
}
l4kwqe1->op_code = L4_KWQE_OPCODE_VALUE_CONNECT1;
l4kwqe1->flags =
(L4_LAYER_CODE << L4_KWQ_CONNECT_REQ1_LAYER_CODE_SHIFT) |
L4_KWQ_CONNECT_REQ3_LINKED_WITH_NEXT;
l4kwqe1->cid = csk->cid;
l4kwqe1->pg_cid = csk->pg_cid;
l4kwqe1->src_ip = be32_to_cpu(csk->src_ip[0]);
l4kwqe1->dst_ip = be32_to_cpu(csk->dst_ip[0]);
l4kwqe1->src_port = be16_to_cpu(csk->src_port);
l4kwqe1->dst_port = be16_to_cpu(csk->dst_port);
if (csk->tcp_flags & SK_TCP_NO_DELAY_ACK)
tcp_flags |= L4_KWQ_CONNECT_REQ1_NO_DELAY_ACK;
if (csk->tcp_flags & SK_TCP_KEEP_ALIVE)
tcp_flags |= L4_KWQ_CONNECT_REQ1_KEEP_ALIVE;
if (csk->tcp_flags & SK_TCP_NAGLE)
tcp_flags |= L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE;
if (csk->tcp_flags & SK_TCP_TIMESTAMP)
tcp_flags |= L4_KWQ_CONNECT_REQ1_TIME_STAMP;
if (csk->tcp_flags & SK_TCP_SACK)
tcp_flags |= L4_KWQ_CONNECT_REQ1_SACK;
if (csk->tcp_flags & SK_TCP_SEG_SCALING)
tcp_flags |= L4_KWQ_CONNECT_REQ1_SEG_SCALING;
l4kwqe1->tcp_flags = tcp_flags;
return dev->submit_kwqes(dev, wqes, num_wqes);
}
static int cnic_cm_close_req(struct cnic_sock *csk)
{
struct cnic_dev *dev = csk->dev;
struct l4_kwq_close_req *l4kwqe;
struct kwqe *wqes[1];
l4kwqe = (struct l4_kwq_close_req *) &csk->kwqe2;
memset(l4kwqe, 0, sizeof(*l4kwqe));
wqes[0] = (struct kwqe *) l4kwqe;
l4kwqe->op_code = L4_KWQE_OPCODE_VALUE_CLOSE;
l4kwqe->flags = L4_LAYER_CODE << L4_KWQ_CLOSE_REQ_LAYER_CODE_SHIFT;
l4kwqe->cid = csk->cid;
return dev->submit_kwqes(dev, wqes, 1);
}
static int cnic_cm_abort_req(struct cnic_sock *csk)
{
struct cnic_dev *dev = csk->dev;
struct l4_kwq_reset_req *l4kwqe;
struct kwqe *wqes[1];
l4kwqe = (struct l4_kwq_reset_req *) &csk->kwqe2;
memset(l4kwqe, 0, sizeof(*l4kwqe));
wqes[0] = (struct kwqe *) l4kwqe;
l4kwqe->op_code = L4_KWQE_OPCODE_VALUE_RESET;
l4kwqe->flags = L4_LAYER_CODE << L4_KWQ_RESET_REQ_LAYER_CODE_SHIFT;
l4kwqe->cid = csk->cid;
return dev->submit_kwqes(dev, wqes, 1);
}
static int cnic_cm_create(struct cnic_dev *dev, int ulp_type, u32 cid,
u32 l5_cid, struct cnic_sock **csk, void *context)
{
struct cnic_local *cp = dev->cnic_priv;
struct cnic_sock *csk1;
if (l5_cid >= MAX_CM_SK_TBL_SZ)
return -EINVAL;
if (cp->ctx_tbl) {
struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
if (test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags))
return -EAGAIN;
}
csk1 = &cp->csk_tbl[l5_cid];
if (atomic_read(&csk1->ref_count))
return -EAGAIN;
if (test_and_set_bit(SK_F_INUSE, &csk1->flags))
return -EBUSY;
csk1->dev = dev;
csk1->cid = cid;
csk1->l5_cid = l5_cid;
csk1->ulp_type = ulp_type;
csk1->context = context;
csk1->ka_timeout = DEF_KA_TIMEOUT;
csk1->ka_interval = DEF_KA_INTERVAL;
csk1->ka_max_probe_count = DEF_KA_MAX_PROBE_COUNT;
csk1->tos = DEF_TOS;
csk1->ttl = DEF_TTL;
csk1->snd_seq_scale = DEF_SND_SEQ_SCALE;
csk1->rcv_buf = DEF_RCV_BUF;
csk1->snd_buf = DEF_SND_BUF;
csk1->seed = DEF_SEED;
*csk = csk1;
return 0;
}
static void cnic_cm_cleanup(struct cnic_sock *csk)
{
if (csk->src_port) {
struct cnic_dev *dev = csk->dev;
struct cnic_local *cp = dev->cnic_priv;
cnic_free_id(&cp->csk_port_tbl, be16_to_cpu(csk->src_port));
csk->src_port = 0;
}
}
static void cnic_close_conn(struct cnic_sock *csk)
{
if (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags)) {
cnic_cm_upload_pg(csk);
clear_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags);
}
cnic_cm_cleanup(csk);
}
static int cnic_cm_destroy(struct cnic_sock *csk)
{
if (!cnic_in_use(csk))
return -EINVAL;
csk_hold(csk);
clear_bit(SK_F_INUSE, &csk->flags);
smp_mb__after_clear_bit();
while (atomic_read(&csk->ref_count) != 1)
msleep(1);
cnic_cm_cleanup(csk);
csk->flags = 0;
csk_put(csk);
return 0;
}
static inline u16 cnic_get_vlan(struct net_device *dev,
struct net_device **vlan_dev)
{
if (dev->priv_flags & IFF_802_1Q_VLAN) {
*vlan_dev = vlan_dev_real_dev(dev);
return vlan_dev_vlan_id(dev);
}
*vlan_dev = dev;
return 0;
}
static int cnic_get_v4_route(struct sockaddr_in *dst_addr,
struct dst_entry **dst)
{
#if defined(CONFIG_INET)
struct rtable *rt;
rt = ip_route_output(&init_net, dst_addr->sin_addr.s_addr, 0, 0, 0);
if (!IS_ERR(rt)) {
*dst = &rt->dst;
return 0;
}
return PTR_ERR(rt);
#else
return -ENETUNREACH;
#endif
}
static int cnic_get_v6_route(struct sockaddr_in6 *dst_addr,
struct dst_entry **dst)
{
#if defined(CONFIG_IPV6) || (defined(CONFIG_IPV6_MODULE) && defined(MODULE))
struct flowi6 fl6;
memset(&fl6, 0, sizeof(fl6));
ipv6_addr_copy(&fl6.daddr, &dst_addr->sin6_addr);
if (ipv6_addr_type(&fl6.daddr) & IPV6_ADDR_LINKLOCAL)
fl6.flowi6_oif = dst_addr->sin6_scope_id;
*dst = ip6_route_output(&init_net, NULL, &fl6);
if (*dst)
return 0;
#endif
return -ENETUNREACH;
}
static struct cnic_dev *cnic_cm_select_dev(struct sockaddr_in *dst_addr,
int ulp_type)
{
struct cnic_dev *dev = NULL;
struct dst_entry *dst;
struct net_device *netdev = NULL;
int err = -ENETUNREACH;
if (dst_addr->sin_family == AF_INET)
err = cnic_get_v4_route(dst_addr, &dst);
else if (dst_addr->sin_family == AF_INET6) {
struct sockaddr_in6 *dst_addr6 =
(struct sockaddr_in6 *) dst_addr;
err = cnic_get_v6_route(dst_addr6, &dst);
} else
return NULL;
if (err)
return NULL;
if (!dst->dev)
goto done;
cnic_get_vlan(dst->dev, &netdev);
dev = cnic_from_netdev(netdev);
done:
dst_release(dst);
if (dev)
cnic_put(dev);
return dev;
}
static int cnic_resolve_addr(struct cnic_sock *csk, struct cnic_sockaddr *saddr)
{
struct cnic_dev *dev = csk->dev;
struct cnic_local *cp = dev->cnic_priv;
return cnic_send_nlmsg(cp, ISCSI_KEVENT_PATH_REQ, csk);
}
static int cnic_get_route(struct cnic_sock *csk, struct cnic_sockaddr *saddr)
{
struct cnic_dev *dev = csk->dev;
struct cnic_local *cp = dev->cnic_priv;
int is_v6, rc = 0;
struct dst_entry *dst = NULL;
struct net_device *realdev;
__be16 local_port;
u32 port_id;
if (saddr->local.v6.sin6_family == AF_INET6 &&
saddr->remote.v6.sin6_family == AF_INET6)
is_v6 = 1;
else if (saddr->local.v4.sin_family == AF_INET &&
saddr->remote.v4.sin_family == AF_INET)
is_v6 = 0;
else
return -EINVAL;
clear_bit(SK_F_IPV6, &csk->flags);
if (is_v6) {
set_bit(SK_F_IPV6, &csk->flags);
cnic_get_v6_route(&saddr->remote.v6, &dst);
memcpy(&csk->dst_ip[0], &saddr->remote.v6.sin6_addr,
sizeof(struct in6_addr));
csk->dst_port = saddr->remote.v6.sin6_port;
local_port = saddr->local.v6.sin6_port;
} else {
cnic_get_v4_route(&saddr->remote.v4, &dst);
csk->dst_ip[0] = saddr->remote.v4.sin_addr.s_addr;
csk->dst_port = saddr->remote.v4.sin_port;
local_port = saddr->local.v4.sin_port;
}
csk->vlan_id = 0;
csk->mtu = dev->netdev->mtu;
if (dst && dst->dev) {
u16 vlan = cnic_get_vlan(dst->dev, &realdev);
if (realdev == dev->netdev) {
csk->vlan_id = vlan;
csk->mtu = dst_mtu(dst);
}
}
port_id = be16_to_cpu(local_port);
if (port_id >= CNIC_LOCAL_PORT_MIN &&
port_id < CNIC_LOCAL_PORT_MAX) {
if (cnic_alloc_id(&cp->csk_port_tbl, port_id))
port_id = 0;
} else
port_id = 0;
if (!port_id) {
port_id = cnic_alloc_new_id(&cp->csk_port_tbl);
if (port_id == -1) {
rc = -ENOMEM;
goto err_out;
}
local_port = cpu_to_be16(port_id);
}
csk->src_port = local_port;
err_out:
dst_release(dst);
return rc;
}
static void cnic_init_csk_state(struct cnic_sock *csk)
{
csk->state = 0;
clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
clear_bit(SK_F_CLOSING, &csk->flags);
}
static int cnic_cm_connect(struct cnic_sock *csk, struct cnic_sockaddr *saddr)
{
int err = 0;
if (!cnic_in_use(csk))
return -EINVAL;
if (test_and_set_bit(SK_F_CONNECT_START, &csk->flags))
return -EINVAL;
cnic_init_csk_state(csk);
err = cnic_get_route(csk, saddr);
if (err)
goto err_out;
err = cnic_resolve_addr(csk, saddr);
if (!err)
return 0;
err_out:
clear_bit(SK_F_CONNECT_START, &csk->flags);
return err;
}
static int cnic_cm_abort(struct cnic_sock *csk)
{
struct cnic_local *cp = csk->dev->cnic_priv;
u32 opcode = L4_KCQE_OPCODE_VALUE_RESET_COMP;
if (!cnic_in_use(csk))
return -EINVAL;
if (cnic_abort_prep(csk))
return cnic_cm_abort_req(csk);
/* Getting here means that we haven't started connect, or
* connect was not successful.
*/
cp->close_conn(csk, opcode);
if (csk->state != opcode)
return -EALREADY;
return 0;
}
static int cnic_cm_close(struct cnic_sock *csk)
{
if (!cnic_in_use(csk))
return -EINVAL;
if (cnic_close_prep(csk)) {
csk->state = L4_KCQE_OPCODE_VALUE_CLOSE_COMP;
return cnic_cm_close_req(csk);
} else {
return -EALREADY;
}
return 0;
}
static void cnic_cm_upcall(struct cnic_local *cp, struct cnic_sock *csk,
u8 opcode)
{
struct cnic_ulp_ops *ulp_ops;
int ulp_type = csk->ulp_type;
rcu_read_lock();
ulp_ops = rcu_dereference(cp->ulp_ops[ulp_type]);
if (ulp_ops) {
if (opcode == L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE)
ulp_ops->cm_connect_complete(csk);
else if (opcode == L4_KCQE_OPCODE_VALUE_CLOSE_COMP)
ulp_ops->cm_close_complete(csk);
else if (opcode == L4_KCQE_OPCODE_VALUE_RESET_RECEIVED)
ulp_ops->cm_remote_abort(csk);
else if (opcode == L4_KCQE_OPCODE_VALUE_RESET_COMP)
ulp_ops->cm_abort_complete(csk);
else if (opcode == L4_KCQE_OPCODE_VALUE_CLOSE_RECEIVED)
ulp_ops->cm_remote_close(csk);
}
rcu_read_unlock();
}
static int cnic_cm_set_pg(struct cnic_sock *csk)
{
if (cnic_offld_prep(csk)) {
if (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags))
cnic_cm_update_pg(csk);
else
cnic_cm_offload_pg(csk);
}
return 0;
}
static void cnic_cm_process_offld_pg(struct cnic_dev *dev, struct l4_kcq *kcqe)
{
struct cnic_local *cp = dev->cnic_priv;
u32 l5_cid = kcqe->pg_host_opaque;
u8 opcode = kcqe->op_code;
struct cnic_sock *csk = &cp->csk_tbl[l5_cid];
csk_hold(csk);
if (!cnic_in_use(csk))
goto done;
if (opcode == L4_KCQE_OPCODE_VALUE_UPDATE_PG) {
clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
goto done;
}
/* Possible PG kcqe status: SUCCESS, OFFLOADED_PG, or CTX_ALLOC_FAIL */
if (kcqe->status == L4_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAIL) {
clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
cnic_cm_upcall(cp, csk,
L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE);
goto done;
}
csk->pg_cid = kcqe->pg_cid;
set_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags);
cnic_cm_conn_req(csk);
done:
csk_put(csk);
}
static void cnic_process_fcoe_term_conn(struct cnic_dev *dev, struct kcqe *kcqe)
{
struct cnic_local *cp = dev->cnic_priv;
struct fcoe_kcqe *fc_kcqe = (struct fcoe_kcqe *) kcqe;
u32 l5_cid = fc_kcqe->fcoe_conn_id + BNX2X_FCOE_L5_CID_BASE;
struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
ctx->timestamp = jiffies;
ctx->wait_cond = 1;
wake_up(&ctx->waitq);
}
static void cnic_cm_process_kcqe(struct cnic_dev *dev, struct kcqe *kcqe)
{
struct cnic_local *cp = dev->cnic_priv;
struct l4_kcq *l4kcqe = (struct l4_kcq *) kcqe;
u8 opcode = l4kcqe->op_code;
u32 l5_cid;
struct cnic_sock *csk;
if (opcode == FCOE_RAMROD_CMD_ID_TERMINATE_CONN) {
cnic_process_fcoe_term_conn(dev, kcqe);
return;
}
if (opcode == L4_KCQE_OPCODE_VALUE_OFFLOAD_PG ||
opcode == L4_KCQE_OPCODE_VALUE_UPDATE_PG) {
cnic_cm_process_offld_pg(dev, l4kcqe);
return;
}
l5_cid = l4kcqe->conn_id;
if (opcode & 0x80)
l5_cid = l4kcqe->cid;
if (l5_cid >= MAX_CM_SK_TBL_SZ)
return;
csk = &cp->csk_tbl[l5_cid];
csk_hold(csk);
if (!cnic_in_use(csk)) {
csk_put(csk);
return;
}
switch (opcode) {
case L5CM_RAMROD_CMD_ID_TCP_CONNECT:
if (l4kcqe->status != 0) {
clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
cnic_cm_upcall(cp, csk,
L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE);
}
break;
case L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE:
if (l4kcqe->status == 0)
set_bit(SK_F_OFFLD_COMPLETE, &csk->flags);
smp_mb__before_clear_bit();
clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
cnic_cm_upcall(cp, csk, opcode);
break;
case L4_KCQE_OPCODE_VALUE_RESET_RECEIVED:
case L4_KCQE_OPCODE_VALUE_CLOSE_COMP:
case L4_KCQE_OPCODE_VALUE_RESET_COMP:
case L5CM_RAMROD_CMD_ID_SEARCHER_DELETE:
case L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD:
cp->close_conn(csk, opcode);
break;
case L4_KCQE_OPCODE_VALUE_CLOSE_RECEIVED:
cnic_cm_upcall(cp, csk, opcode);
break;
}
csk_put(csk);
}
static void cnic_cm_indicate_kcqe(void *data, struct kcqe *kcqe[], u32 num)
{
struct cnic_dev *dev = data;
int i;
for (i = 0; i < num; i++)
cnic_cm_process_kcqe(dev, kcqe[i]);
}
static struct cnic_ulp_ops cm_ulp_ops = {
.indicate_kcqes = cnic_cm_indicate_kcqe,
};
static void cnic_cm_free_mem(struct cnic_dev *dev)
{
struct cnic_local *cp = dev->cnic_priv;
kfree(cp->csk_tbl);
cp->csk_tbl = NULL;
cnic_free_id_tbl(&cp->csk_port_tbl);
}
static int cnic_cm_alloc_mem(struct cnic_dev *dev)
{
struct cnic_local *cp = dev->cnic_priv;
cp->csk_tbl = kzalloc(sizeof(struct cnic_sock) * MAX_CM_SK_TBL_SZ,
GFP_KERNEL);
if (!cp->csk_tbl)
return -ENOMEM;
if (cnic_init_id_tbl(&cp->csk_port_tbl, CNIC_LOCAL_PORT_RANGE,
CNIC_LOCAL_PORT_MIN)) {
cnic_cm_free_mem(dev);
return -ENOMEM;
}
return 0;
}
static int cnic_ready_to_close(struct cnic_sock *csk, u32 opcode)
{
if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) {
/* Unsolicited RESET_COMP or RESET_RECEIVED */
opcode = L4_KCQE_OPCODE_VALUE_RESET_RECEIVED;
csk->state = opcode;
}
/* 1. If event opcode matches the expected event in csk->state
* 2. If the expected event is CLOSE_COMP, we accept any event
* 3. If the expected event is 0, meaning the connection was never
* never established, we accept the opcode from cm_abort.
*/
if (opcode == csk->state || csk->state == 0 ||
csk->state == L4_KCQE_OPCODE_VALUE_CLOSE_COMP) {
if (!test_and_set_bit(SK_F_CLOSING, &csk->flags)) {
if (csk->state == 0)
csk->state = opcode;
return 1;
}
}
return 0;
}
static void cnic_close_bnx2_conn(struct cnic_sock *csk, u32 opcode)
{
struct cnic_dev *dev = csk->dev;
struct cnic_local *cp = dev->cnic_priv;
if (opcode == L4_KCQE_OPCODE_VALUE_RESET_RECEIVED) {
cnic_cm_upcall(cp, csk, opcode);
return;
}
clear_bit(SK_F_CONNECT_START, &csk->flags);
cnic_close_conn(csk);
csk->state = opcode;
cnic_cm_upcall(cp, csk, opcode);
}
static void cnic_cm_stop_bnx2_hw(struct cnic_dev *dev)
{
}
static int cnic_cm_init_bnx2_hw(struct cnic_dev *dev)
{
u32 seed;
get_random_bytes(&seed, 4);
cnic_ctx_wr(dev, 45, 0, seed);
return 0;
}
static void cnic_close_bnx2x_conn(struct cnic_sock *csk, u32 opcode)
{
struct cnic_dev *dev = csk->dev;
struct cnic_local *cp = dev->cnic_priv;
struct cnic_context *ctx = &cp->ctx_tbl[csk->l5_cid];
union l5cm_specific_data l5_data;
u32 cmd = 0;
int close_complete = 0;
switch (opcode) {
case L4_KCQE_OPCODE_VALUE_RESET_RECEIVED:
case L4_KCQE_OPCODE_VALUE_CLOSE_COMP:
case L4_KCQE_OPCODE_VALUE_RESET_COMP:
if (cnic_ready_to_close(csk, opcode)) {
if (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags))
cmd = L5CM_RAMROD_CMD_ID_SEARCHER_DELETE;
else
close_complete = 1;
}
break;
case L5CM_RAMROD_CMD_ID_SEARCHER_DELETE:
cmd = L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD;
break;
case L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD:
close_complete = 1;
break;
}
if (cmd) {
memset(&l5_data, 0, sizeof(l5_data));
cnic_submit_kwqe_16(dev, cmd, csk->cid, ISCSI_CONNECTION_TYPE,
&l5_data);
} else if (close_complete) {
ctx->timestamp = jiffies;
cnic_close_conn(csk);
cnic_cm_upcall(cp, csk, csk->state);
}
}
static void cnic_cm_stop_bnx2x_hw(struct cnic_dev *dev)
{
struct cnic_local *cp = dev->cnic_priv;
int i;
if (!cp->ctx_tbl)
return;
if (!netif_running(dev->netdev))
return;
for (i = 0; i < cp->max_cid_space; i++) {
struct cnic_context *ctx = &cp->ctx_tbl[i];
while (test_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags))
msleep(10);
if (test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags))
netdev_warn(dev->netdev, "CID %x not deleted\n",
ctx->cid);
}
cancel_delayed_work(&cp->delete_task);
flush_workqueue(cnic_wq);
if (atomic_read(&cp->iscsi_conn) != 0)
netdev_warn(dev->netdev, "%d iSCSI connections not destroyed\n",
atomic_read(&cp->iscsi_conn));
}
static int cnic_cm_init_bnx2x_hw(struct cnic_dev *dev)
{
struct cnic_local *cp = dev->cnic_priv;
u32 pfid = cp->pfid;
u32 port = CNIC_PORT(cp);
cnic_init_bnx2x_mac(dev);
cnic_bnx2x_set_tcp_timestamp(dev, 1);
CNIC_WR16(dev, BAR_XSTRORM_INTMEM +
XSTORM_ISCSI_LOCAL_VLAN_OFFSET(pfid), 0);
CNIC_WR(dev, BAR_XSTRORM_INTMEM +
XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_ENABLED_OFFSET(port), 1);
CNIC_WR(dev, BAR_XSTRORM_INTMEM +
XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_MAX_COUNT_OFFSET(port),
DEF_MAX_DA_COUNT);
CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
XSTORM_ISCSI_TCP_VARS_TTL_OFFSET(pfid), DEF_TTL);
CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
XSTORM_ISCSI_TCP_VARS_TOS_OFFSET(pfid), DEF_TOS);
CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
XSTORM_ISCSI_TCP_VARS_ADV_WND_SCL_OFFSET(pfid), 2);
CNIC_WR(dev, BAR_XSTRORM_INTMEM +
XSTORM_TCP_TX_SWS_TIMER_VAL_OFFSET(pfid), DEF_SWS_TIMER);
CNIC_WR(dev, BAR_TSTRORM_INTMEM + TSTORM_TCP_MAX_CWND_OFFSET(pfid),
DEF_MAX_CWND);
return 0;
}
static void cnic_delete_task(struct work_struct *work)
{
struct cnic_local *cp;
struct cnic_dev *dev;
u32 i;
int need_resched = 0;
cp = container_of(work, struct cnic_local, delete_task.work);
dev = cp->dev;
for (i = 0; i < cp->max_cid_space; i++) {
struct cnic_context *ctx = &cp->ctx_tbl[i];
if (!test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags) ||
!test_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags))
continue;
if (!time_after(jiffies, ctx->timestamp + (2 * HZ))) {
need_resched = 1;
continue;
}
if (!test_and_clear_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags))
continue;
cnic_bnx2x_destroy_ramrod(dev, i);
cnic_free_bnx2x_conn_resc(dev, i);
if (ctx->ulp_proto_id == CNIC_ULP_ISCSI)
atomic_dec(&cp->iscsi_conn);
clear_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags);
}
if (need_resched)
queue_delayed_work(cnic_wq, &cp->delete_task,
msecs_to_jiffies(10));
}
static int cnic_cm_open(struct cnic_dev *dev)
{
struct cnic_local *cp = dev->cnic_priv;
int err;
err = cnic_cm_alloc_mem(dev);
if (err)
return err;
err = cp->start_cm(dev);
if (err)
goto err_out;
INIT_DELAYED_WORK(&cp->delete_task, cnic_delete_task);
dev->cm_create = cnic_cm_create;
dev->cm_destroy = cnic_cm_destroy;
dev->cm_connect = cnic_cm_connect;
dev->cm_abort = cnic_cm_abort;
dev->cm_close = cnic_cm_close;
dev->cm_select_dev = cnic_cm_select_dev;
cp->ulp_handle[CNIC_ULP_L4] = dev;
rcu_assign_pointer(cp->ulp_ops[CNIC_ULP_L4], &cm_ulp_ops);
return 0;
err_out:
cnic_cm_free_mem(dev);
return err;
}
static int cnic_cm_shutdown(struct cnic_dev *dev)
{
struct cnic_local *cp = dev->cnic_priv;
int i;
cp->stop_cm(dev);
if (!cp->csk_tbl)
return 0;
for (i = 0; i < MAX_CM_SK_TBL_SZ; i++) {
struct cnic_sock *csk = &cp->csk_tbl[i];
clear_bit(SK_F_INUSE, &csk->flags);
cnic_cm_cleanup(csk);
}
cnic_cm_free_mem(dev);
return 0;
}
static void cnic_init_context(struct cnic_dev *dev, u32 cid)
{
u32 cid_addr;
int i;
cid_addr = GET_CID_ADDR(cid);
for (i = 0; i < CTX_SIZE; i += 4)
cnic_ctx_wr(dev, cid_addr, i, 0);
}
static int cnic_setup_5709_context(struct cnic_dev *dev, int valid)
{
struct cnic_local *cp = dev->cnic_priv;
int ret = 0, i;
u32 valid_bit = valid ? BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID : 0;
if (CHIP_NUM(cp) != CHIP_NUM_5709)
return 0;
for (i = 0; i < cp->ctx_blks; i++) {
int j;
u32 idx = cp->ctx_arr[i].cid / cp->cids_per_blk;
u32 val;
memset(cp->ctx_arr[i].ctx, 0, BCM_PAGE_SIZE);
CNIC_WR(dev, BNX2_CTX_HOST_PAGE_TBL_DATA0,
(cp->ctx_arr[i].mapping & 0xffffffff) | valid_bit);
CNIC_WR(dev, BNX2_CTX_HOST_PAGE_TBL_DATA1,
(u64) cp->ctx_arr[i].mapping >> 32);
CNIC_WR(dev, BNX2_CTX_HOST_PAGE_TBL_CTRL, idx |
BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
for (j = 0; j < 10; j++) {
val = CNIC_RD(dev, BNX2_CTX_HOST_PAGE_TBL_CTRL);
if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
break;
udelay(5);
}
if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
ret = -EBUSY;
break;
}
}
return ret;
}
static void cnic_free_irq(struct cnic_dev *dev)
{
struct cnic_local *cp = dev->cnic_priv;
struct cnic_eth_dev *ethdev = cp->ethdev;
if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
cp->disable_int_sync(dev);
tasklet_kill(&cp->cnic_irq_task);
free_irq(ethdev->irq_arr[0].vector, dev);
}
}
static int cnic_request_irq(struct cnic_dev *dev)
{
struct cnic_local *cp = dev->cnic_priv;
struct cnic_eth_dev *ethdev = cp->ethdev;
int err;
err = request_irq(ethdev->irq_arr[0].vector, cnic_irq, 0, "cnic", dev);
if (err)
tasklet_disable(&cp->cnic_irq_task);
return err;
}
static int cnic_init_bnx2_irq(struct cnic_dev *dev)
{
struct cnic_local *cp = dev->cnic_priv;
struct cnic_eth_dev *ethdev = cp->ethdev;
if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
int err, i = 0;
int sblk_num = cp->status_blk_num;
u32 base = ((sblk_num - 1) * BNX2_HC_SB_CONFIG_SIZE) +
BNX2_HC_SB_CONFIG_1;
CNIC_WR(dev, base, BNX2_HC_SB_CONFIG_1_ONE_SHOT);
CNIC_WR(dev, base + BNX2_HC_COMP_PROD_TRIP_OFF, (2 << 16) | 8);
CNIC_WR(dev, base + BNX2_HC_COM_TICKS_OFF, (64 << 16) | 220);
CNIC_WR(dev, base + BNX2_HC_CMD_TICKS_OFF, (64 << 16) | 220);
cp->last_status_idx = cp->status_blk.bnx2->status_idx;
tasklet_init(&cp->cnic_irq_task, cnic_service_bnx2_msix,
(unsigned long) dev);
err = cnic_request_irq(dev);
if (err)
return err;
while (cp->status_blk.bnx2->status_completion_producer_index &&
i < 10) {
CNIC_WR(dev, BNX2_HC_COALESCE_NOW,
1 << (11 + sblk_num));
udelay(10);
i++;
barrier();
}
if (cp->status_blk.bnx2->status_completion_producer_index) {
cnic_free_irq(dev);
goto failed;
}
} else {
struct status_block *sblk = cp->status_blk.gen;
u32 hc_cmd = CNIC_RD(dev, BNX2_HC_COMMAND);
int i = 0;
while (sblk->status_completion_producer_index && i < 10) {
CNIC_WR(dev, BNX2_HC_COMMAND,
hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
udelay(10);
i++;
barrier();
}
if (sblk->status_completion_producer_index)
goto failed;
}
return 0;
failed:
netdev_err(dev->netdev, "KCQ index not resetting to 0\n");
return -EBUSY;
}
static void cnic_enable_bnx2_int(struct cnic_dev *dev)
{
struct cnic_local *cp = dev->cnic_priv;
struct cnic_eth_dev *ethdev = cp->ethdev;
if (!(ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX))
return;
CNIC_WR(dev, BNX2_PCICFG_INT_ACK_CMD, cp->int_num |
BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | cp->last_status_idx);
}
static void cnic_get_bnx2_iscsi_info(struct cnic_dev *dev)
{
u32 max_conn;
max_conn = cnic_reg_rd_ind(dev, BNX2_FW_MAX_ISCSI_CONN);
dev->max_iscsi_conn = max_conn;
}
static void cnic_disable_bnx2_int_sync(struct cnic_dev *dev)
{
struct cnic_local *cp = dev->cnic_priv;
struct cnic_eth_dev *ethdev = cp->ethdev;
if (!(ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX))
return;
CNIC_WR(dev, BNX2_PCICFG_INT_ACK_CMD, cp->int_num |
BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
CNIC_RD(dev, BNX2_PCICFG_INT_ACK_CMD);
synchronize_irq(ethdev->irq_arr[0].vector);
}
static void cnic_init_bnx2_tx_ring(struct cnic_dev *dev)
{
struct cnic_local *cp = dev->cnic_priv;
struct cnic_eth_dev *ethdev = cp->ethdev;
struct cnic_uio_dev *udev = cp->udev;
u32 cid_addr, tx_cid, sb_id;
u32 val, offset0, offset1, offset2, offset3;
int i;
struct tx_bd *txbd;
dma_addr_t buf_map, ring_map = udev->l2_ring_map;
struct status_block *s_blk = cp->status_blk.gen;
sb_id = cp->status_blk_num;
tx_cid = 20;
cp->tx_cons_ptr = &s_blk->status_tx_quick_consumer_index2;
if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
struct status_block_msix *sblk = cp->status_blk.bnx2;
tx_cid = TX_TSS_CID + sb_id - 1;
CNIC_WR(dev, BNX2_TSCH_TSS_CFG, (sb_id << 24) |
(TX_TSS_CID << 7));
cp->tx_cons_ptr = &sblk->status_tx_quick_consumer_index;
}
cp->tx_cons = *cp->tx_cons_ptr;
cid_addr = GET_CID_ADDR(tx_cid);
if (CHIP_NUM(cp) == CHIP_NUM_5709) {
u32 cid_addr2 = GET_CID_ADDR(tx_cid + 4) + 0x40;
for (i = 0; i < PHY_CTX_SIZE; i += 4)
cnic_ctx_wr(dev, cid_addr2, i, 0);
offset0 = BNX2_L2CTX_TYPE_XI;
offset1 = BNX2_L2CTX_CMD_TYPE_XI;
offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
} else {
cnic_init_context(dev, tx_cid);
cnic_init_context(dev, tx_cid + 1);
offset0 = BNX2_L2CTX_TYPE;
offset1 = BNX2_L2CTX_CMD_TYPE;
offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
}
val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
cnic_ctx_wr(dev, cid_addr, offset0, val);
val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
cnic_ctx_wr(dev, cid_addr, offset1, val);
txbd = (struct tx_bd *) udev->l2_ring;
buf_map = udev->l2_buf_map;
for (i = 0; i < MAX_TX_DESC_CNT; i++, txbd++) {
txbd->tx_bd_haddr_hi = (u64) buf_map >> 32;
txbd->tx_bd_haddr_lo = (u64) buf_map & 0xffffffff;
}
val = (u64) ring_map >> 32;
cnic_ctx_wr(dev, cid_addr, offset2, val);
txbd->tx_bd_haddr_hi = val;
val = (u64) ring_map & 0xffffffff;
cnic_ctx_wr(dev, cid_addr, offset3, val);
txbd->tx_bd_haddr_lo = val;
}
static void cnic_init_bnx2_rx_ring(struct cnic_dev *dev)
{
struct cnic_local *cp = dev->cnic_priv;
struct cnic_eth_dev *ethdev = cp->ethdev;
struct cnic_uio_dev *udev = cp->udev;
u32 cid_addr, sb_id, val, coal_reg, coal_val;
int i;
struct rx_bd *rxbd;
struct status_block *s_blk = cp->status_blk.gen;
dma_addr_t ring_map = udev->l2_ring_map;
sb_id = cp->status_blk_num;
cnic_init_context(dev, 2);
cp->rx_cons_ptr = &s_blk->status_rx_quick_consumer_index2;
coal_reg = BNX2_HC_COMMAND;
coal_val = CNIC_RD(dev, coal_reg);
if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
struct status_block_msix *sblk = cp->status_blk.bnx2;
cp->rx_cons_ptr = &sblk->status_rx_quick_consumer_index;
coal_reg = BNX2_HC_COALESCE_NOW;
coal_val = 1 << (11 + sb_id);
}
i = 0;
while (!(*cp->rx_cons_ptr != 0) && i < 10) {
CNIC_WR(dev, coal_reg, coal_val);
udelay(10);
i++;
barrier();
}
cp->rx_cons = *cp->rx_cons_ptr;
cid_addr = GET_CID_ADDR(2);
val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE |
BNX2_L2CTX_CTX_TYPE_SIZE_L2 | (0x02 << 8);
cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_CTX_TYPE, val);
if (sb_id == 0)
val = 2 << BNX2_L2CTX_L2_STATUSB_NUM_SHIFT;
else
val = BNX2_L2CTX_L2_STATUSB_NUM(sb_id);
cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_HOST_BDIDX, val);
rxbd = (struct rx_bd *) (udev->l2_ring + BCM_PAGE_SIZE);
for (i = 0; i < MAX_RX_DESC_CNT; i++, rxbd++) {
dma_addr_t buf_map;
int n = (i % cp->l2_rx_ring_size) + 1;
buf_map = udev->l2_buf_map + (n * cp->l2_single_buf_size);
rxbd->rx_bd_len = cp->l2_single_buf_size;
rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
rxbd->rx_bd_haddr_hi = (u64) buf_map >> 32;
rxbd->rx_bd_haddr_lo = (u64) buf_map & 0xffffffff;
}
val = (u64) (ring_map + BCM_PAGE_SIZE) >> 32;
cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
rxbd->rx_bd_haddr_hi = val;
val = (u64) (ring_map + BCM_PAGE_SIZE) & 0xffffffff;
cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
rxbd->rx_bd_haddr_lo = val;
val = cnic_reg_rd_ind(dev, BNX2_RXP_SCRATCH_RXP_FLOOD);
cnic_reg_wr_ind(dev, BNX2_RXP_SCRATCH_RXP_FLOOD, val | (1 << 2));
}
static void cnic_shutdown_bnx2_rx_ring(struct cnic_dev *dev)
{
struct kwqe *wqes[1], l2kwqe;
memset(&l2kwqe, 0, sizeof(l2kwqe));
wqes[0] = &l2kwqe;
l2kwqe.kwqe_op_flag = (L2_LAYER_CODE << KWQE_LAYER_SHIFT) |
(L2_KWQE_OPCODE_VALUE_FLUSH <<
KWQE_OPCODE_SHIFT) | 2;
dev->submit_kwqes(dev, wqes, 1);
}
static void cnic_set_bnx2_mac(struct cnic_dev *dev)
{
struct cnic_local *cp = dev->cnic_priv;
u32 val;
val = cp->func << 2;
cp->shmem_base = cnic_reg_rd_ind(dev, BNX2_SHM_HDR_ADDR_0 + val);
val = cnic_reg_rd_ind(dev, cp->shmem_base +
BNX2_PORT_HW_CFG_ISCSI_MAC_UPPER);
dev->mac_addr[0] = (u8) (val >> 8);
dev->mac_addr[1] = (u8) val;
CNIC_WR(dev, BNX2_EMAC_MAC_MATCH4, val);
val = cnic_reg_rd_ind(dev, cp->shmem_base +
BNX2_PORT_HW_CFG_ISCSI_MAC_LOWER);
dev->mac_addr[2] = (u8) (val >> 24);
dev->mac_addr[3] = (u8) (val >> 16);
dev->mac_addr[4] = (u8) (val >> 8);
dev->mac_addr[5] = (u8) val;
CNIC_WR(dev, BNX2_EMAC_MAC_MATCH5, val);
val = 4 | BNX2_RPM_SORT_USER2_BC_EN;
if (CHIP_NUM(cp) != CHIP_NUM_5709)
val |= BNX2_RPM_SORT_USER2_PROM_VLAN;
CNIC_WR(dev, BNX2_RPM_SORT_USER2, 0x0);
CNIC_WR(dev, BNX2_RPM_SORT_USER2, val);
CNIC_WR(dev, BNX2_RPM_SORT_USER2, val | BNX2_RPM_SORT_USER2_ENA);
}
static int cnic_start_bnx2_hw(struct cnic_dev *dev)
{
struct cnic_local *cp = dev->cnic_priv;
struct cnic_eth_dev *ethdev = cp->ethdev;
struct status_block *sblk = cp->status_blk.gen;
u32 val, kcq_cid_addr, kwq_cid_addr;
int err;
cnic_set_bnx2_mac(dev);
val = CNIC_RD(dev, BNX2_MQ_CONFIG);
val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
if (BCM_PAGE_BITS > 12)
val |= (12 - 8) << 4;
else
val |= (BCM_PAGE_BITS - 8) << 4;
CNIC_WR(dev, BNX2_MQ_CONFIG, val);
CNIC_WR(dev, BNX2_HC_COMP_PROD_TRIP, (2 << 16) | 8);
CNIC_WR(dev, BNX2_HC_COM_TICKS, (64 << 16) | 220);
CNIC_WR(dev, BNX2_HC_CMD_TICKS, (64 << 16) | 220);
err = cnic_setup_5709_context(dev, 1);
if (err)
return err;
cnic_init_context(dev, KWQ_CID);
cnic_init_context(dev, KCQ_CID);
kwq_cid_addr = GET_CID_ADDR(KWQ_CID);
cp->kwq_io_addr = MB_GET_CID_ADDR(KWQ_CID) + L5_KRNLQ_HOST_QIDX;
cp->max_kwq_idx = MAX_KWQ_IDX;
cp->kwq_prod_idx = 0;
cp->kwq_con_idx = 0;
set_bit(CNIC_LCL_FL_KWQ_INIT, &cp->cnic_local_flags);
if (CHIP_NUM(cp) == CHIP_NUM_5706 || CHIP_NUM(cp) == CHIP_NUM_5708)
cp->kwq_con_idx_ptr = &sblk->status_rx_quick_consumer_index15;
else
cp->kwq_con_idx_ptr = &sblk->status_cmd_consumer_index;
/* Initialize the kernel work queue context. */
val = KRNLQ_TYPE_TYPE_KRNLQ | KRNLQ_SIZE_TYPE_SIZE |
(BCM_PAGE_BITS - 8) | KRNLQ_FLAGS_QE_SELF_SEQ;
cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_TYPE, val);
val = (BCM_PAGE_SIZE / sizeof(struct kwqe) - 1) << 16;
cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_QE_SELF_SEQ_MAX, val);
val = ((BCM_PAGE_SIZE / sizeof(struct kwqe)) << 16) | KWQ_PAGE_CNT;
cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_PGTBL_NPAGES, val);
val = (u32) ((u64) cp->kwq_info.pgtbl_map >> 32);
cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_PGTBL_HADDR_HI, val);
val = (u32) cp->kwq_info.pgtbl_map;
cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_PGTBL_HADDR_LO, val);
kcq_cid_addr = GET_CID_ADDR(KCQ_CID);
cp->kcq1.io_addr = MB_GET_CID_ADDR(KCQ_CID) + L5_KRNLQ_HOST_QIDX;
cp->kcq1.sw_prod_idx = 0;
cp->kcq1.hw_prod_idx_ptr =
(u16 *) &sblk->status_completion_producer_index;
cp->kcq1.status_idx_ptr = (u16 *) &sblk->status_idx;
/* Initialize the kernel complete queue context. */
val = KRNLQ_TYPE_TYPE_KRNLQ | KRNLQ_SIZE_TYPE_SIZE |
(BCM_PAGE_BITS - 8) | KRNLQ_FLAGS_QE_SELF_SEQ;
cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_TYPE, val);
val = (BCM_PAGE_SIZE / sizeof(struct kcqe) - 1) << 16;
cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_QE_SELF_SEQ_MAX, val);
val = ((BCM_PAGE_SIZE / sizeof(struct kcqe)) << 16) | KCQ_PAGE_CNT;
cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_PGTBL_NPAGES, val);
val = (u32) ((u64) cp->kcq1.dma.pgtbl_map >> 32);
cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_PGTBL_HADDR_HI, val);
val = (u32) cp->kcq1.dma.pgtbl_map;
cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_PGTBL_HADDR_LO, val);
cp->int_num = 0;
if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
struct status_block_msix *msblk = cp->status_blk.bnx2;
u32 sb_id = cp->status_blk_num;
u32 sb = BNX2_L2CTX_L5_STATUSB_NUM(sb_id);
cp->kcq1.hw_prod_idx_ptr =
(u16 *) &msblk->status_completion_producer_index;
cp->kcq1.status_idx_ptr = (u16 *) &msblk->status_idx;
cp->kwq_con_idx_ptr = (u16 *) &msblk->status_cmd_consumer_index;
cp->int_num = sb_id << BNX2_PCICFG_INT_ACK_CMD_INT_NUM_SHIFT;
cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_HOST_QIDX, sb);
cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_HOST_QIDX, sb);
}
/* Enable Commnad Scheduler notification when we write to the
* host producer index of the kernel contexts. */
CNIC_WR(dev, BNX2_MQ_KNL_CMD_MASK1, 2);
/* Enable Command Scheduler notification when we write to either
* the Send Queue or Receive Queue producer indexes of the kernel
* bypass contexts. */
CNIC_WR(dev, BNX2_MQ_KNL_BYP_CMD_MASK1, 7);
CNIC_WR(dev, BNX2_MQ_KNL_BYP_WRITE_MASK1, 7);
/* Notify COM when the driver post an application buffer. */
CNIC_WR(dev, BNX2_MQ_KNL_RX_V2P_MASK2, 0x2000);
/* Set the CP and COM doorbells. These two processors polls the
* doorbell for a non zero value before running. This must be done
* after setting up the kernel queue contexts. */
cnic_reg_wr_ind(dev, BNX2_CP_SCRATCH + 0x20, 1);
cnic_reg_wr_ind(dev, BNX2_COM_SCRATCH + 0x20, 1);
cnic_init_bnx2_tx_ring(dev);
cnic_init_bnx2_rx_ring(dev);
err = cnic_init_bnx2_irq(dev);
if (err) {
netdev_err(dev->netdev, "cnic_init_irq failed\n");
cnic_reg_wr_ind(dev, BNX2_CP_SCRATCH + 0x20, 0);
cnic_reg_wr_ind(dev, BNX2_COM_SCRATCH + 0x20, 0);
return err;
}
cnic_get_bnx2_iscsi_info(dev);
return 0;
}
static void cnic_setup_bnx2x_context(struct cnic_dev *dev)
{
struct cnic_local *cp = dev->cnic_priv;
struct cnic_eth_dev *ethdev = cp->ethdev;
u32 start_offset = ethdev->ctx_tbl_offset;
int i;
for (i = 0; i < cp->ctx_blks; i++) {
struct cnic_ctx *ctx = &cp->ctx_arr[i];
dma_addr_t map = ctx->mapping;
if (cp->ctx_align) {
unsigned long mask = cp->ctx_align - 1;
map = (map + mask) & ~mask;
}
cnic_ctx_tbl_wr(dev, start_offset + i, map);
}
}
static int cnic_init_bnx2x_irq(struct cnic_dev *dev)
{
struct cnic_local *cp = dev->cnic_priv;
struct cnic_eth_dev *ethdev = cp->ethdev;
int err = 0;
tasklet_init(&cp->cnic_irq_task, cnic_service_bnx2x_bh,
(unsigned long) dev);
if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX)
err = cnic_request_irq(dev);
return err;
}
static inline void cnic_storm_memset_hc_disable(struct cnic_dev *dev,
u16 sb_id, u8 sb_index,
u8 disable)
{
u32 addr = BAR_CSTRORM_INTMEM +
CSTORM_STATUS_BLOCK_DATA_OFFSET(sb_id) +
offsetof(struct hc_status_block_data_e1x, index_data) +
sizeof(struct hc_index_data)*sb_index +
offsetof(struct hc_index_data, flags);
u16 flags = CNIC_RD16(dev, addr);
/* clear and set */
flags &= ~HC_INDEX_DATA_HC_ENABLED;
flags |= (((~disable) << HC_INDEX_DATA_HC_ENABLED_SHIFT) &
HC_INDEX_DATA_HC_ENABLED);
CNIC_WR16(dev, addr, flags);
}
static void cnic_enable_bnx2x_int(struct cnic_dev *dev)
{
struct cnic_local *cp = dev->cnic_priv;
u8 sb_id = cp->status_blk_num;
CNIC_WR8(dev, BAR_CSTRORM_INTMEM +
CSTORM_STATUS_BLOCK_DATA_OFFSET(sb_id) +
offsetof(struct hc_status_block_data_e1x, index_data) +
sizeof(struct hc_index_data)*HC_INDEX_ISCSI_EQ_CONS +
offsetof(struct hc_index_data, timeout), 64 / 12);
cnic_storm_memset_hc_disable(dev, sb_id, HC_INDEX_ISCSI_EQ_CONS, 0);
}
static void cnic_disable_bnx2x_int_sync(struct cnic_dev *dev)
{
}
static void cnic_init_bnx2x_tx_ring(struct cnic_dev *dev,
struct client_init_ramrod_data *data)
{
struct cnic_local *cp = dev->cnic_priv;
struct cnic_uio_dev *udev = cp->udev;
union eth_tx_bd_types *txbd = (union eth_tx_bd_types *) udev->l2_ring;
dma_addr_t buf_map, ring_map = udev->l2_ring_map;
struct host_sp_status_block *sb = cp->bnx2x_def_status_blk;
int port = CNIC_PORT(cp);
int i;
u32 cli = cp->ethdev->iscsi_l2_client_id;
u32 val;
memset(txbd, 0, BCM_PAGE_SIZE);
buf_map = udev->l2_buf_map;
for (i = 0; i < MAX_TX_DESC_CNT; i += 3, txbd += 3) {
struct eth_tx_start_bd *start_bd = &txbd->start_bd;
struct eth_tx_bd *reg_bd = &((txbd + 2)->reg_bd);
start_bd->addr_hi = cpu_to_le32((u64) buf_map >> 32);
start_bd->addr_lo = cpu_to_le32(buf_map & 0xffffffff);
reg_bd->addr_hi = start_bd->addr_hi;
reg_bd->addr_lo = start_bd->addr_lo + 0x10;
start_bd->nbytes = cpu_to_le16(0x10);
start_bd->nbd = cpu_to_le16(3);
start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
start_bd->general_data = (UNICAST_ADDRESS <<
ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT);
start_bd->general_data |= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT);
}
val = (u64) ring_map >> 32;
txbd->next_bd.addr_hi = cpu_to_le32(val);
data->tx.tx_bd_page_base.hi = cpu_to_le32(val);
val = (u64) ring_map & 0xffffffff;
txbd->next_bd.addr_lo = cpu_to_le32(val);
data->tx.tx_bd_page_base.lo = cpu_to_le32(val);
/* Other ramrod params */
data->tx.tx_sb_index_number = HC_SP_INDEX_ETH_ISCSI_CQ_CONS;
data->tx.tx_status_block_id = BNX2X_DEF_SB_ID;
/* reset xstorm per client statistics */
if (cli < MAX_STAT_COUNTER_ID) {
val = BAR_XSTRORM_INTMEM +
XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cli);
for (i = 0; i < sizeof(struct xstorm_per_client_stats) / 4; i++)
CNIC_WR(dev, val + i * 4, 0);
}
cp->tx_cons_ptr =
&sb->sp_sb.index_values[HC_SP_INDEX_ETH_ISCSI_CQ_CONS];
}
static void cnic_init_bnx2x_rx_ring(struct cnic_dev *dev,
struct client_init_ramrod_data *data)
{
struct cnic_local *cp = dev->cnic_priv;
struct cnic_uio_dev *udev = cp->udev;
struct eth_rx_bd *rxbd = (struct eth_rx_bd *) (udev->l2_ring +
BCM_PAGE_SIZE);
struct eth_rx_cqe_next_page *rxcqe = (struct eth_rx_cqe_next_page *)
(udev->l2_ring + (2 * BCM_PAGE_SIZE));
struct host_sp_status_block *sb = cp->bnx2x_def_status_blk;
int i;
int port = CNIC_PORT(cp);
u32 cli = cp->ethdev->iscsi_l2_client_id;
int cl_qzone_id = BNX2X_CL_QZONE_ID(cp, cli);
u32 val;
dma_addr_t ring_map = udev->l2_ring_map;
/* General data */
data->general.client_id = cli;
data->general.statistics_en_flg = 1;
data->general.statistics_counter_id = cli;
data->general.activate_flg = 1;
data->general.sp_client_id = cli;
for (i = 0; i < BNX2X_MAX_RX_DESC_CNT; i++, rxbd++) {
dma_addr_t buf_map;
int n = (i % cp->l2_rx_ring_size) + 1;
buf_map = udev->l2_buf_map + (n * cp->l2_single_buf_size);
rxbd->addr_hi = cpu_to_le32((u64) buf_map >> 32);
rxbd->addr_lo = cpu_to_le32(buf_map & 0xffffffff);
}
val = (u64) (ring_map + BCM_PAGE_SIZE) >> 32;
rxbd->addr_hi = cpu_to_le32(val);
data->rx.bd_page_base.hi = cpu_to_le32(val);
val = (u64) (ring_map + BCM_PAGE_SIZE) & 0xffffffff;
rxbd->addr_lo = cpu_to_le32(val);
data->rx.bd_page_base.lo = cpu_to_le32(val);
rxcqe += BNX2X_MAX_RCQ_DESC_CNT;
val = (u64) (ring_map + (2 * BCM_PAGE_SIZE)) >> 32;
rxcqe->addr_hi = cpu_to_le32(val);
data->rx.cqe_page_base.hi = cpu_to_le32(val);
val = (u64) (ring_map + (2 * BCM_PAGE_SIZE)) & 0xffffffff;
rxcqe->addr_lo = cpu_to_le32(val);
data->rx.cqe_page_base.lo = cpu_to_le32(val);
/* Other ramrod params */
data->rx.client_qzone_id = cl_qzone_id;
data->rx.rx_sb_index_number = HC_SP_INDEX_ETH_ISCSI_RX_CQ_CONS;
data->rx.status_block_id = BNX2X_DEF_SB_ID;
data->rx.cache_line_alignment_log_size = L1_CACHE_SHIFT;
data->rx.bd_buff_size = cpu_to_le16(cp->l2_single_buf_size);
data->rx.mtu = cpu_to_le16(cp->l2_single_buf_size - 14);
data->rx.outer_vlan_removal_enable_flg = 1;
/* reset tstorm and ustorm per client statistics */
if (cli < MAX_STAT_COUNTER_ID) {
val = BAR_TSTRORM_INTMEM +
TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cli);
for (i = 0; i < sizeof(struct tstorm_per_client_stats) / 4; i++)
CNIC_WR(dev, val + i * 4, 0);
val = BAR_USTRORM_INTMEM +
USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cli);
for (i = 0; i < sizeof(struct ustorm_per_client_stats) / 4; i++)
CNIC_WR(dev, val + i * 4, 0);
}
cp->rx_cons_ptr =
&sb->sp_sb.index_values[HC_SP_INDEX_ETH_ISCSI_RX_CQ_CONS];
cp->rx_cons = *cp->rx_cons_ptr;
}
static void cnic_init_bnx2x_kcq(struct cnic_dev *dev)
{
struct cnic_local *cp = dev->cnic_priv;
u32 pfid = cp->pfid;
cp->kcq1.io_addr = BAR_CSTRORM_INTMEM +
CSTORM_ISCSI_EQ_PROD_OFFSET(pfid, 0);
cp->kcq1.sw_prod_idx = 0;
if (BNX2X_CHIP_IS_E2(cp->chip_id)) {
struct host_hc_status_block_e2 *sb = cp->status_blk.gen;
cp->kcq1.hw_prod_idx_ptr =
&sb->sb.index_values[HC_INDEX_ISCSI_EQ_CONS];
cp->kcq1.status_idx_ptr =
&sb->sb.running_index[SM_RX_ID];
} else {
struct host_hc_status_block_e1x *sb = cp->status_blk.gen;
cp->kcq1.hw_prod_idx_ptr =
&sb->sb.index_values[HC_INDEX_ISCSI_EQ_CONS];
cp->kcq1.status_idx_ptr =
&sb->sb.running_index[SM_RX_ID];
}
if (BNX2X_CHIP_IS_E2(cp->chip_id)) {
struct host_hc_status_block_e2 *sb = cp->status_blk.gen;
cp->kcq2.io_addr = BAR_USTRORM_INTMEM +
USTORM_FCOE_EQ_PROD_OFFSET(pfid);
cp->kcq2.sw_prod_idx = 0;
cp->kcq2.hw_prod_idx_ptr =
&sb->sb.index_values[HC_INDEX_FCOE_EQ_CONS];
cp->kcq2.status_idx_ptr =
&sb->sb.running_index[SM_RX_ID];
}
}
static int cnic_start_bnx2x_hw(struct cnic_dev *dev)
{
struct cnic_local *cp = dev->cnic_priv;
struct cnic_eth_dev *ethdev = cp->ethdev;
int func = CNIC_FUNC(cp), ret, i;
u32 pfid;
if (BNX2X_CHIP_IS_E2(cp->chip_id)) {
u32 val = CNIC_RD(dev, MISC_REG_PORT4MODE_EN_OVWR);
if (!(val & 1))
val = CNIC_RD(dev, MISC_REG_PORT4MODE_EN);
else
val = (val >> 1) & 1;
if (val)
cp->pfid = func >> 1;
else
cp->pfid = func & 0x6;
} else {
cp->pfid = func;
}
pfid = cp->pfid;
ret = cnic_init_id_tbl(&cp->cid_tbl, MAX_ISCSI_TBL_SZ,
cp->iscsi_start_cid);
if (ret)
return -ENOMEM;
if (BNX2X_CHIP_IS_E2(cp->chip_id)) {
ret = cnic_init_id_tbl(&cp->fcoe_cid_tbl,
BNX2X_FCOE_NUM_CONNECTIONS,
cp->fcoe_start_cid);
if (ret)
return -ENOMEM;
}
cp->bnx2x_igu_sb_id = ethdev->irq_arr[0].status_blk_num2;
cnic_init_bnx2x_kcq(dev);
/* Only 1 EQ */
CNIC_WR16(dev, cp->kcq1.io_addr, MAX_KCQ_IDX);
CNIC_WR(dev, BAR_CSTRORM_INTMEM +
CSTORM_ISCSI_EQ_CONS_OFFSET(pfid, 0), 0);
CNIC_WR(dev, BAR_CSTRORM_INTMEM +
CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(pfid, 0),
cp->kcq1.dma.pg_map_arr[1] & 0xffffffff);
CNIC_WR(dev, BAR_CSTRORM_INTMEM +
CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(pfid, 0) + 4,
(u64) cp->kcq1.dma.pg_map_arr[1] >> 32);
CNIC_WR(dev, BAR_CSTRORM_INTMEM +
CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(pfid, 0),
cp->kcq1.dma.pg_map_arr[0] & 0xffffffff);
CNIC_WR(dev, BAR_CSTRORM_INTMEM +
CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(pfid, 0) + 4,
(u64) cp->kcq1.dma.pg_map_arr[0] >> 32);
CNIC_WR8(dev, BAR_CSTRORM_INTMEM +
CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_VALID_OFFSET(pfid, 0), 1);
CNIC_WR16(dev, BAR_CSTRORM_INTMEM +
CSTORM_ISCSI_EQ_SB_NUM_OFFSET(pfid, 0), cp->status_blk_num);
CNIC_WR8(dev, BAR_CSTRORM_INTMEM +
CSTORM_ISCSI_EQ_SB_INDEX_OFFSET(pfid, 0),
HC_INDEX_ISCSI_EQ_CONS);
for (i = 0; i < cp->conn_buf_info.num_pages; i++) {
CNIC_WR(dev, BAR_TSTRORM_INTMEM +
TSTORM_ISCSI_CONN_BUF_PBL_OFFSET(pfid, i),
cp->conn_buf_info.pgtbl[2 * i]);
CNIC_WR(dev, BAR_TSTRORM_INTMEM +
TSTORM_ISCSI_CONN_BUF_PBL_OFFSET(pfid, i) + 4,
cp->conn_buf_info.pgtbl[(2 * i) + 1]);
}
CNIC_WR(dev, BAR_USTRORM_INTMEM +
USTORM_ISCSI_GLOBAL_BUF_PHYS_ADDR_OFFSET(pfid),
cp->gbl_buf_info.pg_map_arr[0] & 0xffffffff);
CNIC_WR(dev, BAR_USTRORM_INTMEM +
USTORM_ISCSI_GLOBAL_BUF_PHYS_ADDR_OFFSET(pfid) + 4,
(u64) cp->gbl_buf_info.pg_map_arr[0] >> 32);
CNIC_WR(dev, BAR_TSTRORM_INTMEM +
TSTORM_ISCSI_TCP_LOCAL_ADV_WND_OFFSET(pfid), DEF_RCV_BUF);
cnic_setup_bnx2x_context(dev);
ret = cnic_init_bnx2x_irq(dev);
if (ret)
return ret;
return 0;
}
static void cnic_init_rings(struct cnic_dev *dev)
{
struct cnic_local *cp = dev->cnic_priv;
struct cnic_uio_dev *udev = cp->udev;
if (test_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags))
return;
if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) {
cnic_init_bnx2_tx_ring(dev);
cnic_init_bnx2_rx_ring(dev);
set_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags);
} else if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) {
u32 cli = cp->ethdev->iscsi_l2_client_id;
u32 cid = cp->ethdev->iscsi_l2_cid;
u32 cl_qzone_id;
struct client_init_ramrod_data *data;
union l5cm_specific_data l5_data;
struct ustorm_eth_rx_producers rx_prods = {0};
u32 off, i;
rx_prods.bd_prod = 0;
rx_prods.cqe_prod = BNX2X_MAX_RCQ_DESC_CNT;
barrier();
cl_qzone_id = BNX2X_CL_QZONE_ID(cp, cli);
off = BAR_USTRORM_INTMEM +
(BNX2X_CHIP_IS_E2(cp->chip_id) ?
USTORM_RX_PRODS_E2_OFFSET(cl_qzone_id) :
USTORM_RX_PRODS_E1X_OFFSET(CNIC_PORT(cp), cli));
for (i = 0; i < sizeof(struct ustorm_eth_rx_producers) / 4; i++)
CNIC_WR(dev, off + i * 4, ((u32 *) &rx_prods)[i]);
set_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags);
data = udev->l2_buf;
memset(data, 0, sizeof(*data));
cnic_init_bnx2x_tx_ring(dev, data);
cnic_init_bnx2x_rx_ring(dev, data);
l5_data.phy_address.lo = udev->l2_buf_map & 0xffffffff;
l5_data.phy_address.hi = (u64) udev->l2_buf_map >> 32;
set_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags);
cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_ETH_CLIENT_SETUP,
cid, ETH_CONNECTION_TYPE, &l5_data);
i = 0;
while (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags) &&
++i < 10)
msleep(1);
if (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags))
netdev_err(dev->netdev,
"iSCSI CLIENT_SETUP did not complete\n");
cnic_spq_completion(dev, DRV_CTL_RET_L2_SPQ_CREDIT_CMD, 1);
cnic_ring_ctl(dev, cid, cli, 1);
}
}
static void cnic_shutdown_rings(struct cnic_dev *dev)
{
struct cnic_local *cp = dev->cnic_priv;
if (!test_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags))
return;
if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) {
cnic_shutdown_bnx2_rx_ring(dev);
} else if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) {
struct cnic_local *cp = dev->cnic_priv;
u32 cli = cp->ethdev->iscsi_l2_client_id;
u32 cid = cp->ethdev->iscsi_l2_cid;
union l5cm_specific_data l5_data;
int i;
cnic_ring_ctl(dev, cid, cli, 0);
set_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags);
l5_data.phy_address.lo = cli;
l5_data.phy_address.hi = 0;
cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_ETH_HALT,
cid, ETH_CONNECTION_TYPE, &l5_data);
i = 0;
while (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags) &&
++i < 10)
msleep(1);
if (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags))
netdev_err(dev->netdev,
"iSCSI CLIENT_HALT did not complete\n");
cnic_spq_completion(dev, DRV_CTL_RET_L2_SPQ_CREDIT_CMD, 1);
memset(&l5_data, 0, sizeof(l5_data));
cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_COMMON_CFC_DEL,
cid, NONE_CONNECTION_TYPE, &l5_data);
msleep(10);
}
clear_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags);
}
static int cnic_register_netdev(struct cnic_dev *dev)
{
struct cnic_local *cp = dev->cnic_priv;
struct cnic_eth_dev *ethdev = cp->ethdev;
int err;
if (!ethdev)
return -ENODEV;
if (ethdev->drv_state & CNIC_DRV_STATE_REGD)
return 0;
err = ethdev->drv_register_cnic(dev->netdev, cp->cnic_ops, dev);
if (err)
netdev_err(dev->netdev, "register_cnic failed\n");
return err;
}
static void cnic_unregister_netdev(struct cnic_dev *dev)
{
struct cnic_local *cp = dev->cnic_priv;
struct cnic_eth_dev *ethdev = cp->ethdev;
if (!ethdev)
return;
ethdev->drv_unregister_cnic(dev->netdev);
}
static int cnic_start_hw(struct cnic_dev *dev)
{
struct cnic_local *cp = dev->cnic_priv;
struct cnic_eth_dev *ethdev = cp->ethdev;
int err;
if (test_bit(CNIC_F_CNIC_UP, &dev->flags))
return -EALREADY;
dev->regview = ethdev->io_base;
pci_dev_get(dev->pcidev);
cp->func = PCI_FUNC(dev->pcidev->devfn);
cp->status_blk.gen = ethdev->irq_arr[0].status_blk;
cp->status_blk_num = ethdev->irq_arr[0].status_blk_num;
err = cp->alloc_resc(dev);
if (err) {
netdev_err(dev->netdev, "allocate resource failure\n");
goto err1;
}
err = cp->start_hw(dev);
if (err)
goto err1;
err = cnic_cm_open(dev);
if (err)
goto err1;
set_bit(CNIC_F_CNIC_UP, &dev->flags);
cp->enable_int(dev);
return 0;
err1:
cp->free_resc(dev);
pci_dev_put(dev->pcidev);
return err;
}
static void cnic_stop_bnx2_hw(struct cnic_dev *dev)
{
cnic_disable_bnx2_int_sync(dev);
cnic_reg_wr_ind(dev, BNX2_CP_SCRATCH + 0x20, 0);
cnic_reg_wr_ind(dev, BNX2_COM_SCRATCH + 0x20, 0);
cnic_init_context(dev, KWQ_CID);
cnic_init_context(dev, KCQ_CID);
cnic_setup_5709_context(dev, 0);
cnic_free_irq(dev);
cnic_free_resc(dev);
}
static void cnic_stop_bnx2x_hw(struct cnic_dev *dev)
{
struct cnic_local *cp = dev->cnic_priv;
cnic_free_irq(dev);
*cp->kcq1.hw_prod_idx_ptr = 0;
CNIC_WR(dev, BAR_CSTRORM_INTMEM +
CSTORM_ISCSI_EQ_CONS_OFFSET(cp->pfid, 0), 0);
CNIC_WR16(dev, cp->kcq1.io_addr, 0);
cnic_free_resc(dev);
}
static void cnic_stop_hw(struct cnic_dev *dev)
{
if (test_bit(CNIC_F_CNIC_UP, &dev->flags)) {
struct cnic_local *cp = dev->cnic_priv;
int i = 0;
/* Need to wait for the ring shutdown event to complete
* before clearing the CNIC_UP flag.
*/
while (cp->udev->uio_dev != -1 && i < 15) {
msleep(100);
i++;
}
cnic_shutdown_rings(dev);
clear_bit(CNIC_F_CNIC_UP, &dev->flags);
rcu_assign_pointer(cp->ulp_ops[CNIC_ULP_L4], NULL);
synchronize_rcu();
cnic_cm_shutdown(dev);
cp->stop_hw(dev);
pci_dev_put(dev->pcidev);
}
}
static void cnic_free_dev(struct cnic_dev *dev)
{
int i = 0;
while ((atomic_read(&dev->ref_count) != 0) && i < 10) {
msleep(100);
i++;
}
if (atomic_read(&dev->ref_count) != 0)
netdev_err(dev->netdev, "Failed waiting for ref count to go to zero\n");
netdev_info(dev->netdev, "Removed CNIC device\n");
dev_put(dev->netdev);
kfree(dev);
}
static struct cnic_dev *cnic_alloc_dev(struct net_device *dev,
struct pci_dev *pdev)
{
struct cnic_dev *cdev;
struct cnic_local *cp;
int alloc_size;
alloc_size = sizeof(struct cnic_dev) + sizeof(struct cnic_local);
cdev = kzalloc(alloc_size , GFP_KERNEL);
if (cdev == NULL) {
netdev_err(dev, "allocate dev struct failure\n");
return NULL;
}
cdev->netdev = dev;
cdev->cnic_priv = (char *)cdev + sizeof(struct cnic_dev);
cdev->register_device = cnic_register_device;
cdev->unregister_device = cnic_unregister_device;
cdev->iscsi_nl_msg_recv = cnic_iscsi_nl_msg_recv;
cp = cdev->cnic_priv;
cp->dev = cdev;
cp->l2_single_buf_size = 0x400;
cp->l2_rx_ring_size = 3;
spin_lock_init(&cp->cnic_ulp_lock);
netdev_info(dev, "Added CNIC device\n");
return cdev;
}
static struct cnic_dev *init_bnx2_cnic(struct net_device *dev)
{
struct pci_dev *pdev;
struct cnic_dev *cdev;
struct cnic_local *cp;
struct cnic_eth_dev *ethdev = NULL;
struct cnic_eth_dev *(*probe)(struct net_device *) = NULL;
probe = symbol_get(bnx2_cnic_probe);
if (probe) {
ethdev = (*probe)(dev);
symbol_put(bnx2_cnic_probe);
}
if (!ethdev)
return NULL;
pdev = ethdev->pdev;
if (!pdev)
return NULL;
dev_hold(dev);
pci_dev_get(pdev);
if ((pdev->device == PCI_DEVICE_ID_NX2_5709 ||
pdev->device == PCI_DEVICE_ID_NX2_5709S) &&
(pdev->revision < 0x10)) {
pci_dev_put(pdev);
goto cnic_err;
}
pci_dev_put(pdev);
cdev = cnic_alloc_dev(dev, pdev);
if (cdev == NULL)
goto cnic_err;
set_bit(CNIC_F_BNX2_CLASS, &cdev->flags);
cdev->submit_kwqes = cnic_submit_bnx2_kwqes;
cp = cdev->cnic_priv;
cp->ethdev = ethdev;
cdev->pcidev = pdev;
cp->chip_id = ethdev->chip_id;
cp->cnic_ops = &cnic_bnx2_ops;
cp->start_hw = cnic_start_bnx2_hw;
cp->stop_hw = cnic_stop_bnx2_hw;
cp->setup_pgtbl = cnic_setup_page_tbl;
cp->alloc_resc = cnic_alloc_bnx2_resc;
cp->free_resc = cnic_free_resc;
cp->start_cm = cnic_cm_init_bnx2_hw;
cp->stop_cm = cnic_cm_stop_bnx2_hw;
cp->enable_int = cnic_enable_bnx2_int;
cp->disable_int_sync = cnic_disable_bnx2_int_sync;
cp->close_conn = cnic_close_bnx2_conn;
cp->next_idx = cnic_bnx2_next_idx;
cp->hw_idx = cnic_bnx2_hw_idx;
return cdev;
cnic_err:
dev_put(dev);
return NULL;
}
static struct cnic_dev *init_bnx2x_cnic(struct net_device *dev)
{
struct pci_dev *pdev;
struct cnic_dev *cdev;
struct cnic_local *cp;
struct cnic_eth_dev *ethdev = NULL;
struct cnic_eth_dev *(*probe)(struct net_device *) = NULL;
probe = symbol_get(bnx2x_cnic_probe);
if (probe) {
ethdev = (*probe)(dev);
symbol_put(bnx2x_cnic_probe);
}
if (!ethdev)
return NULL;
pdev = ethdev->pdev;
if (!pdev)
return NULL;
dev_hold(dev);
cdev = cnic_alloc_dev(dev, pdev);
if (cdev == NULL) {
dev_put(dev);
return NULL;
}
set_bit(CNIC_F_BNX2X_CLASS, &cdev->flags);
cdev->submit_kwqes = cnic_submit_bnx2x_kwqes;
cp = cdev->cnic_priv;
cp->ethdev = ethdev;
cdev->pcidev = pdev;
cp->chip_id = ethdev->chip_id;
if (!(ethdev->drv_state & CNIC_DRV_STATE_NO_ISCSI))
cdev->max_iscsi_conn = ethdev->max_iscsi_conn;
if (BNX2X_CHIP_IS_E2(cp->chip_id) &&
!(ethdev->drv_state & CNIC_DRV_STATE_NO_FCOE))
cdev->max_fcoe_conn = ethdev->max_fcoe_conn;
memcpy(cdev->mac_addr, ethdev->iscsi_mac, 6);
cp->cnic_ops = &cnic_bnx2x_ops;
cp->start_hw = cnic_start_bnx2x_hw;
cp->stop_hw = cnic_stop_bnx2x_hw;
cp->setup_pgtbl = cnic_setup_page_tbl_le;
cp->alloc_resc = cnic_alloc_bnx2x_resc;
cp->free_resc = cnic_free_resc;
cp->start_cm = cnic_cm_init_bnx2x_hw;
cp->stop_cm = cnic_cm_stop_bnx2x_hw;
cp->enable_int = cnic_enable_bnx2x_int;
cp->disable_int_sync = cnic_disable_bnx2x_int_sync;
if (BNX2X_CHIP_IS_E2(cp->chip_id))
cp->ack_int = cnic_ack_bnx2x_e2_msix;
else
cp->ack_int = cnic_ack_bnx2x_msix;
cp->close_conn = cnic_close_bnx2x_conn;
cp->next_idx = cnic_bnx2x_next_idx;
cp->hw_idx = cnic_bnx2x_hw_idx;
return cdev;
}
static struct cnic_dev *is_cnic_dev(struct net_device *dev)
{
struct ethtool_drvinfo drvinfo;
struct cnic_dev *cdev = NULL;
if (dev->ethtool_ops && dev->ethtool_ops->get_drvinfo) {
memset(&drvinfo, 0, sizeof(drvinfo));
dev->ethtool_ops->get_drvinfo(dev, &drvinfo);
if (!strcmp(drvinfo.driver, "bnx2"))
cdev = init_bnx2_cnic(dev);
if (!strcmp(drvinfo.driver, "bnx2x"))
cdev = init_bnx2x_cnic(dev);
if (cdev) {
write_lock(&cnic_dev_lock);
list_add(&cdev->list, &cnic_dev_list);
write_unlock(&cnic_dev_lock);
}
}
return cdev;
}
/**
* netdev event handler
*/
static int cnic_netdev_event(struct notifier_block *this, unsigned long event,
void *ptr)
{
struct net_device *netdev = ptr;
struct cnic_dev *dev;
int if_type;
int new_dev = 0;
dev = cnic_from_netdev(netdev);
if (!dev && (event == NETDEV_REGISTER || event == NETDEV_UP)) {
/* Check for the hot-plug device */
dev = is_cnic_dev(netdev);
if (dev) {
new_dev = 1;
cnic_hold(dev);
}
}
if (dev) {
struct cnic_local *cp = dev->cnic_priv;
if (new_dev)
cnic_ulp_init(dev);
else if (event == NETDEV_UNREGISTER)
cnic_ulp_exit(dev);
if (event == NETDEV_UP) {
if (cnic_register_netdev(dev) != 0) {
cnic_put(dev);
goto done;
}
if (!cnic_start_hw(dev))
cnic_ulp_start(dev);
}
rcu_read_lock();
for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++) {
struct cnic_ulp_ops *ulp_ops;
void *ctx;
ulp_ops = rcu_dereference(cp->ulp_ops[if_type]);
if (!ulp_ops || !ulp_ops->indicate_netevent)
continue;
ctx = cp->ulp_handle[if_type];
ulp_ops->indicate_netevent(ctx, event);
}
rcu_read_unlock();
if (event == NETDEV_GOING_DOWN) {
cnic_ulp_stop(dev);
cnic_stop_hw(dev);
cnic_unregister_netdev(dev);
} else if (event == NETDEV_UNREGISTER) {
write_lock(&cnic_dev_lock);
list_del_init(&dev->list);
write_unlock(&cnic_dev_lock);
cnic_put(dev);
cnic_free_dev(dev);
goto done;
}
cnic_put(dev);
}
done:
return NOTIFY_DONE;
}
static struct notifier_block cnic_netdev_notifier = {
.notifier_call = cnic_netdev_event
};
static void cnic_release(void)
{
struct cnic_dev *dev;
struct cnic_uio_dev *udev;
while (!list_empty(&cnic_dev_list)) {
dev = list_entry(cnic_dev_list.next, struct cnic_dev, list);
if (test_bit(CNIC_F_CNIC_UP, &dev->flags)) {
cnic_ulp_stop(dev);
cnic_stop_hw(dev);
}
cnic_ulp_exit(dev);
cnic_unregister_netdev(dev);
list_del_init(&dev->list);
cnic_free_dev(dev);
}
while (!list_empty(&cnic_udev_list)) {
udev = list_entry(cnic_udev_list.next, struct cnic_uio_dev,
list);
cnic_free_uio(udev);
}
}
static int __init cnic_init(void)
{
int rc = 0;
pr_info("%s", version);
rc = register_netdevice_notifier(&cnic_netdev_notifier);
if (rc) {
cnic_release();
return rc;
}
cnic_wq = create_singlethread_workqueue("cnic_wq");
if (!cnic_wq) {
cnic_release();
unregister_netdevice_notifier(&cnic_netdev_notifier);
return -ENOMEM;
}
return 0;
}
static void __exit cnic_exit(void)
{
unregister_netdevice_notifier(&cnic_netdev_notifier);
cnic_release();
destroy_workqueue(cnic_wq);
}
module_init(cnic_init);
module_exit(cnic_exit);
| gpl-2.0 |
rperier/linux-rockchip | drivers/net/ethernet/ti/am65-cpsw-qos.c | 152 | 16684 | // SPDX-License-Identifier: GPL-2.0
/* Texas Instruments K3 AM65 Ethernet QoS submodule
* Copyright (C) 2020 Texas Instruments Incorporated - http://www.ti.com/
*
* quality of service module includes:
* Enhanced Scheduler Traffic (EST - P802.1Qbv/D2.2)
*/
#include <linux/pm_runtime.h>
#include <linux/time.h>
#include "am65-cpsw-nuss.h"
#include "am65-cpsw-qos.h"
#include "am65-cpts.h"
#define AM65_CPSW_REG_CTL 0x004
#define AM65_CPSW_PN_REG_CTL 0x004
#define AM65_CPSW_PN_REG_FIFO_STATUS 0x050
#define AM65_CPSW_PN_REG_EST_CTL 0x060
/* AM65_CPSW_REG_CTL register fields */
#define AM65_CPSW_CTL_EST_EN BIT(18)
/* AM65_CPSW_PN_REG_CTL register fields */
#define AM65_CPSW_PN_CTL_EST_PORT_EN BIT(17)
/* AM65_CPSW_PN_REG_EST_CTL register fields */
#define AM65_CPSW_PN_EST_ONEBUF BIT(0)
#define AM65_CPSW_PN_EST_BUFSEL BIT(1)
#define AM65_CPSW_PN_EST_TS_EN BIT(2)
#define AM65_CPSW_PN_EST_TS_FIRST BIT(3)
#define AM65_CPSW_PN_EST_ONEPRI BIT(4)
#define AM65_CPSW_PN_EST_TS_PRI_MSK GENMASK(7, 5)
/* AM65_CPSW_PN_REG_FIFO_STATUS register fields */
#define AM65_CPSW_PN_FST_TX_PRI_ACTIVE_MSK GENMASK(7, 0)
#define AM65_CPSW_PN_FST_TX_E_MAC_ALLOW_MSK GENMASK(15, 8)
#define AM65_CPSW_PN_FST_EST_CNT_ERR BIT(16)
#define AM65_CPSW_PN_FST_EST_ADD_ERR BIT(17)
#define AM65_CPSW_PN_FST_EST_BUFACT BIT(18)
/* EST FETCH COMMAND RAM */
#define AM65_CPSW_FETCH_RAM_CMD_NUM 0x80
#define AM65_CPSW_FETCH_CNT_MSK GENMASK(21, 8)
#define AM65_CPSW_FETCH_CNT_MAX (AM65_CPSW_FETCH_CNT_MSK >> 8)
#define AM65_CPSW_FETCH_CNT_OFFSET 8
#define AM65_CPSW_FETCH_ALLOW_MSK GENMASK(7, 0)
#define AM65_CPSW_FETCH_ALLOW_MAX AM65_CPSW_FETCH_ALLOW_MSK
enum timer_act {
TACT_PROG, /* need program timer */
TACT_NEED_STOP, /* need stop first */
TACT_SKIP_PROG, /* just buffer can be updated */
};
static int am65_cpsw_port_est_enabled(struct am65_cpsw_port *port)
{
return port->qos.est_oper || port->qos.est_admin;
}
static void am65_cpsw_est_enable(struct am65_cpsw_common *common, int enable)
{
u32 val;
val = readl(common->cpsw_base + AM65_CPSW_REG_CTL);
if (enable)
val |= AM65_CPSW_CTL_EST_EN;
else
val &= ~AM65_CPSW_CTL_EST_EN;
writel(val, common->cpsw_base + AM65_CPSW_REG_CTL);
common->est_enabled = enable;
}
static void am65_cpsw_port_est_enable(struct am65_cpsw_port *port, int enable)
{
u32 val;
val = readl(port->port_base + AM65_CPSW_PN_REG_CTL);
if (enable)
val |= AM65_CPSW_PN_CTL_EST_PORT_EN;
else
val &= ~AM65_CPSW_PN_CTL_EST_PORT_EN;
writel(val, port->port_base + AM65_CPSW_PN_REG_CTL);
}
/* target new EST RAM buffer, actual toggle happens after cycle completion */
static void am65_cpsw_port_est_assign_buf_num(struct net_device *ndev,
int buf_num)
{
struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
u32 val;
val = readl(port->port_base + AM65_CPSW_PN_REG_EST_CTL);
if (buf_num)
val |= AM65_CPSW_PN_EST_BUFSEL;
else
val &= ~AM65_CPSW_PN_EST_BUFSEL;
writel(val, port->port_base + AM65_CPSW_PN_REG_EST_CTL);
}
/* am65_cpsw_port_est_is_swapped() - Indicate if h/w is transitioned
* admin -> oper or not
*
* Return true if already transitioned. i.e oper is equal to admin and buf
* numbers match (est_oper->buf match with est_admin->buf).
* false if before transition. i.e oper is not equal to admin, (i.e a
* previous admin command is waiting to be transitioned to oper state
* and est_oper->buf not match with est_oper->buf).
*/
static int am65_cpsw_port_est_is_swapped(struct net_device *ndev, int *oper,
int *admin)
{
struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
u32 val;
val = readl(port->port_base + AM65_CPSW_PN_REG_FIFO_STATUS);
*oper = !!(val & AM65_CPSW_PN_FST_EST_BUFACT);
val = readl(port->port_base + AM65_CPSW_PN_REG_EST_CTL);
*admin = !!(val & AM65_CPSW_PN_EST_BUFSEL);
return *admin == *oper;
}
/* am65_cpsw_port_est_get_free_buf_num() - Get free buffer number for
* Admin to program the new schedule.
*
* Logic as follows:-
* If oper is same as admin, return the other buffer (!oper) as the admin
* buffer. If oper is not the same, driver let the current oper to continue
* as it is in the process of transitioning from admin -> oper. So keep the
* oper by selecting the same oper buffer by writing to EST_BUFSEL bit in
* EST CTL register. In the second iteration they will match and code returns.
* The actual buffer to write command is selected later before it is ready
* to update the schedule.
*/
static int am65_cpsw_port_est_get_free_buf_num(struct net_device *ndev)
{
int oper, admin;
int roll = 2;
while (roll--) {
if (am65_cpsw_port_est_is_swapped(ndev, &oper, &admin))
return !oper;
/* admin is not set, so hinder transition as it's not allowed
* to touch memory in-flight, by targeting same oper buf.
*/
am65_cpsw_port_est_assign_buf_num(ndev, oper);
dev_info(&ndev->dev,
"Prev. EST admin cycle is in transit %d -> %d\n",
oper, admin);
}
return admin;
}
static void am65_cpsw_admin_to_oper(struct net_device *ndev)
{
struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
if (port->qos.est_oper)
devm_kfree(&ndev->dev, port->qos.est_oper);
port->qos.est_oper = port->qos.est_admin;
port->qos.est_admin = NULL;
}
static void am65_cpsw_port_est_get_buf_num(struct net_device *ndev,
struct am65_cpsw_est *est_new)
{
struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
u32 val;
val = readl(port->port_base + AM65_CPSW_PN_REG_EST_CTL);
val &= ~AM65_CPSW_PN_EST_ONEBUF;
writel(val, port->port_base + AM65_CPSW_PN_REG_EST_CTL);
est_new->buf = am65_cpsw_port_est_get_free_buf_num(ndev);
/* rolled buf num means changed buf while configuring */
if (port->qos.est_oper && port->qos.est_admin &&
est_new->buf == port->qos.est_oper->buf)
am65_cpsw_admin_to_oper(ndev);
}
static void am65_cpsw_est_set(struct net_device *ndev, int enable)
{
struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
struct am65_cpsw_common *common = port->common;
int common_enable = 0;
int i;
am65_cpsw_port_est_enable(port, enable);
for (i = 0; i < common->port_num; i++)
common_enable |= am65_cpsw_port_est_enabled(&common->ports[i]);
common_enable |= enable;
am65_cpsw_est_enable(common, common_enable);
}
/* This update is supposed to be used in any routine before getting real state
* of admin -> oper transition, particularly it's supposed to be used in some
* generic routine for providing real state to Taprio Qdisc.
*/
static void am65_cpsw_est_update_state(struct net_device *ndev)
{
struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
int oper, admin;
if (!port->qos.est_admin)
return;
if (!am65_cpsw_port_est_is_swapped(ndev, &oper, &admin))
return;
am65_cpsw_admin_to_oper(ndev);
}
/* Fetch command count it's number of bytes in Gigabit mode or nibbles in
* 10/100Mb mode. So, having speed and time in ns, recalculate ns to number of
* bytes/nibbles that can be sent while transmission on given speed.
*/
static int am65_est_cmd_ns_to_cnt(u64 ns, int link_speed)
{
u64 temp;
temp = ns * link_speed;
if (link_speed < SPEED_1000)
temp <<= 1;
return DIV_ROUND_UP(temp, 8 * 1000);
}
static void __iomem *am65_cpsw_est_set_sched_cmds(void __iomem *addr,
int fetch_cnt,
int fetch_allow)
{
u32 prio_mask, cmd_fetch_cnt, cmd;
do {
if (fetch_cnt > AM65_CPSW_FETCH_CNT_MAX) {
fetch_cnt -= AM65_CPSW_FETCH_CNT_MAX;
cmd_fetch_cnt = AM65_CPSW_FETCH_CNT_MAX;
} else {
cmd_fetch_cnt = fetch_cnt;
/* fetch count can't be less than 16? */
if (cmd_fetch_cnt && cmd_fetch_cnt < 16)
cmd_fetch_cnt = 16;
fetch_cnt = 0;
}
prio_mask = fetch_allow & AM65_CPSW_FETCH_ALLOW_MSK;
cmd = (cmd_fetch_cnt << AM65_CPSW_FETCH_CNT_OFFSET) | prio_mask;
writel(cmd, addr);
addr += 4;
} while (fetch_cnt);
return addr;
}
static int am65_cpsw_est_calc_cmd_num(struct net_device *ndev,
struct tc_taprio_qopt_offload *taprio,
int link_speed)
{
int i, cmd_cnt, cmd_sum = 0;
u32 fetch_cnt;
for (i = 0; i < taprio->num_entries; i++) {
if (taprio->entries[i].command != TC_TAPRIO_CMD_SET_GATES) {
dev_err(&ndev->dev, "Only SET command is supported");
return -EINVAL;
}
fetch_cnt = am65_est_cmd_ns_to_cnt(taprio->entries[i].interval,
link_speed);
cmd_cnt = DIV_ROUND_UP(fetch_cnt, AM65_CPSW_FETCH_CNT_MAX);
if (!cmd_cnt)
cmd_cnt++;
cmd_sum += cmd_cnt;
if (!fetch_cnt)
break;
}
return cmd_sum;
}
static int am65_cpsw_est_check_scheds(struct net_device *ndev,
struct am65_cpsw_est *est_new)
{
struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
int cmd_num;
cmd_num = am65_cpsw_est_calc_cmd_num(ndev, &est_new->taprio,
port->qos.link_speed);
if (cmd_num < 0)
return cmd_num;
if (cmd_num > AM65_CPSW_FETCH_RAM_CMD_NUM / 2) {
dev_err(&ndev->dev, "No fetch RAM");
return -ENOMEM;
}
return 0;
}
static void am65_cpsw_est_set_sched_list(struct net_device *ndev,
struct am65_cpsw_est *est_new)
{
struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
u32 fetch_cnt, fetch_allow, all_fetch_allow = 0;
void __iomem *ram_addr, *max_ram_addr;
struct tc_taprio_sched_entry *entry;
int i, ram_size;
ram_addr = port->fetch_ram_base;
ram_size = AM65_CPSW_FETCH_RAM_CMD_NUM * 2;
ram_addr += est_new->buf * ram_size;
max_ram_addr = ram_size + ram_addr;
for (i = 0; i < est_new->taprio.num_entries; i++) {
entry = &est_new->taprio.entries[i];
fetch_cnt = am65_est_cmd_ns_to_cnt(entry->interval,
port->qos.link_speed);
fetch_allow = entry->gate_mask;
if (fetch_allow > AM65_CPSW_FETCH_ALLOW_MAX)
dev_dbg(&ndev->dev, "fetch_allow > 8 bits: %d\n",
fetch_allow);
ram_addr = am65_cpsw_est_set_sched_cmds(ram_addr, fetch_cnt,
fetch_allow);
if (!fetch_cnt && i < est_new->taprio.num_entries - 1) {
dev_info(&ndev->dev,
"next scheds after %d have no impact", i + 1);
break;
}
all_fetch_allow |= fetch_allow;
}
/* end cmd, enabling non-timed queues for potential over cycle time */
if (ram_addr < max_ram_addr)
writel(~all_fetch_allow & AM65_CPSW_FETCH_ALLOW_MSK, ram_addr);
}
/*
* Enable ESTf periodic output, set cycle start time and interval.
*/
static int am65_cpsw_timer_set(struct net_device *ndev,
struct am65_cpsw_est *est_new)
{
struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
struct am65_cpsw_common *common = port->common;
struct am65_cpts *cpts = common->cpts;
struct am65_cpts_estf_cfg cfg;
cfg.ns_period = est_new->taprio.cycle_time;
cfg.ns_start = est_new->taprio.base_time;
return am65_cpts_estf_enable(cpts, port->port_id - 1, &cfg);
}
static void am65_cpsw_timer_stop(struct net_device *ndev)
{
struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
struct am65_cpts *cpts = port->common->cpts;
am65_cpts_estf_disable(cpts, port->port_id - 1);
}
static enum timer_act am65_cpsw_timer_act(struct net_device *ndev,
struct am65_cpsw_est *est_new)
{
struct tc_taprio_qopt_offload *taprio_oper, *taprio_new;
struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
struct am65_cpts *cpts = port->common->cpts;
u64 cur_time;
s64 diff;
if (!port->qos.est_oper)
return TACT_PROG;
taprio_new = &est_new->taprio;
taprio_oper = &port->qos.est_oper->taprio;
if (taprio_new->cycle_time != taprio_oper->cycle_time)
return TACT_NEED_STOP;
/* in order to avoid timer reset get base_time form oper taprio */
if (!taprio_new->base_time && taprio_oper)
taprio_new->base_time = taprio_oper->base_time;
if (taprio_new->base_time == taprio_oper->base_time)
return TACT_SKIP_PROG;
/* base times are cycle synchronized */
diff = taprio_new->base_time - taprio_oper->base_time;
diff = diff < 0 ? -diff : diff;
if (diff % taprio_new->cycle_time)
return TACT_NEED_STOP;
cur_time = am65_cpts_ns_gettime(cpts);
if (taprio_new->base_time <= cur_time + taprio_new->cycle_time)
return TACT_SKIP_PROG;
/* TODO: Admin schedule at future time is not currently supported */
return TACT_NEED_STOP;
}
static void am65_cpsw_stop_est(struct net_device *ndev)
{
am65_cpsw_est_set(ndev, 0);
am65_cpsw_timer_stop(ndev);
}
static void am65_cpsw_purge_est(struct net_device *ndev)
{
struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
am65_cpsw_stop_est(ndev);
if (port->qos.est_admin)
devm_kfree(&ndev->dev, port->qos.est_admin);
if (port->qos.est_oper)
devm_kfree(&ndev->dev, port->qos.est_oper);
port->qos.est_oper = NULL;
port->qos.est_admin = NULL;
}
static int am65_cpsw_configure_taprio(struct net_device *ndev,
struct am65_cpsw_est *est_new)
{
struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
struct am65_cpts *cpts = common->cpts;
int ret = 0, tact = TACT_PROG;
am65_cpsw_est_update_state(ndev);
if (!est_new->taprio.enable) {
am65_cpsw_stop_est(ndev);
return ret;
}
ret = am65_cpsw_est_check_scheds(ndev, est_new);
if (ret < 0)
return ret;
tact = am65_cpsw_timer_act(ndev, est_new);
if (tact == TACT_NEED_STOP) {
dev_err(&ndev->dev,
"Can't toggle estf timer, stop taprio first");
return -EINVAL;
}
if (tact == TACT_PROG)
am65_cpsw_timer_stop(ndev);
if (!est_new->taprio.base_time)
est_new->taprio.base_time = am65_cpts_ns_gettime(cpts);
am65_cpsw_port_est_get_buf_num(ndev, est_new);
am65_cpsw_est_set_sched_list(ndev, est_new);
am65_cpsw_port_est_assign_buf_num(ndev, est_new->buf);
am65_cpsw_est_set(ndev, est_new->taprio.enable);
if (tact == TACT_PROG) {
ret = am65_cpsw_timer_set(ndev, est_new);
if (ret) {
dev_err(&ndev->dev, "Failed to set cycle time");
return ret;
}
}
return ret;
}
static void am65_cpsw_cp_taprio(struct tc_taprio_qopt_offload *from,
struct tc_taprio_qopt_offload *to)
{
int i;
*to = *from;
for (i = 0; i < from->num_entries; i++)
to->entries[i] = from->entries[i];
}
static int am65_cpsw_set_taprio(struct net_device *ndev, void *type_data)
{
struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
struct tc_taprio_qopt_offload *taprio = type_data;
struct am65_cpsw_est *est_new;
int ret = 0;
if (taprio->cycle_time_extension) {
dev_err(&ndev->dev, "Failed to set cycle time extension");
return -EOPNOTSUPP;
}
est_new = devm_kzalloc(&ndev->dev,
struct_size(est_new, taprio.entries, taprio->num_entries),
GFP_KERNEL);
if (!est_new)
return -ENOMEM;
am65_cpsw_cp_taprio(taprio, &est_new->taprio);
ret = am65_cpsw_configure_taprio(ndev, est_new);
if (!ret) {
if (taprio->enable) {
if (port->qos.est_admin)
devm_kfree(&ndev->dev, port->qos.est_admin);
port->qos.est_admin = est_new;
} else {
devm_kfree(&ndev->dev, est_new);
am65_cpsw_purge_est(ndev);
}
} else {
devm_kfree(&ndev->dev, est_new);
}
return ret;
}
static void am65_cpsw_est_link_up(struct net_device *ndev, int link_speed)
{
struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
ktime_t cur_time;
s64 delta;
port->qos.link_speed = link_speed;
if (!am65_cpsw_port_est_enabled(port))
return;
if (port->qos.link_down_time) {
cur_time = ktime_get();
delta = ktime_us_delta(cur_time, port->qos.link_down_time);
if (delta > USEC_PER_SEC) {
dev_err(&ndev->dev,
"Link has been lost too long, stopping TAS");
goto purge_est;
}
}
return;
purge_est:
am65_cpsw_purge_est(ndev);
}
static int am65_cpsw_setup_taprio(struct net_device *ndev, void *type_data)
{
struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
struct am65_cpsw_common *common = port->common;
if (!IS_ENABLED(CONFIG_TI_AM65_CPSW_TAS))
return -ENODEV;
if (!netif_running(ndev)) {
dev_err(&ndev->dev, "interface is down, link speed unknown\n");
return -ENETDOWN;
}
if (common->pf_p0_rx_ptype_rrobin) {
dev_err(&ndev->dev,
"p0-rx-ptype-rrobin flag conflicts with taprio qdisc\n");
return -EINVAL;
}
if (port->qos.link_speed == SPEED_UNKNOWN)
return -ENOLINK;
return am65_cpsw_set_taprio(ndev, type_data);
}
int am65_cpsw_qos_ndo_setup_tc(struct net_device *ndev, enum tc_setup_type type,
void *type_data)
{
switch (type) {
case TC_SETUP_QDISC_TAPRIO:
return am65_cpsw_setup_taprio(ndev, type_data);
default:
return -EOPNOTSUPP;
}
}
void am65_cpsw_qos_link_up(struct net_device *ndev, int link_speed)
{
struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
if (!IS_ENABLED(CONFIG_TI_AM65_CPSW_TAS))
return;
am65_cpsw_est_link_up(ndev, link_speed);
port->qos.link_down_time = 0;
}
void am65_cpsw_qos_link_down(struct net_device *ndev)
{
struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
if (!IS_ENABLED(CONFIG_TI_AM65_CPSW_TAS))
return;
if (!port->qos.link_down_time)
port->qos.link_down_time = ktime_get();
port->qos.link_speed = SPEED_UNKNOWN;
}
| gpl-2.0 |
kumasento/linux | drivers/staging/rtl8192e/rtllib_module.c | 408 | 6967 | /*******************************************************************************
Copyright(c) 2004 Intel Corporation. All rights reserved.
Portions of this file are based on the WEP enablement code provided by the
Host AP project hostap-drivers v0.1.3
Copyright (c) 2001-2002, SSH Communications Security Corp and Jouni Malinen
<jkmaline@cc.hut.fi>
Copyright (c) 2002-2003, Jouni Malinen <jkmaline@cc.hut.fi>
This program is free software; you can redistribute it and/or modify it
under the terms of version 2 of the GNU General Public License as
published by the Free Software Foundation.
This program is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
more details.
You should have received a copy of the GNU General Public License along with
this program; if not, write to the Free Software Foundation, Inc., 59
Temple Place - Suite 330, Boston, MA 02111-1307, USA.
The full GNU General Public License is included in this distribution in the
file called LICENSE.
Contact Information:
James P. Ketrenos <ipw2100-admin@linux.intel.com>
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*******************************************************************************/
#include <linux/compiler.h>
#include <linux/errno.h>
#include <linux/if_arp.h>
#include <linux/in6.h>
#include <linux/in.h>
#include <linux/ip.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/pci.h>
#include <linux/proc_fs.h>
#include <linux/skbuff.h>
#include <linux/slab.h>
#include <linux/tcp.h>
#include <linux/types.h>
#include <linux/wireless.h>
#include <linux/etherdevice.h>
#include <linux/uaccess.h>
#include <net/arp.h>
#include "rtllib.h"
u32 rt_global_debug_component = COMP_ERR;
EXPORT_SYMBOL(rt_global_debug_component);
void _setup_timer(struct timer_list *ptimer, void *fun, unsigned long data)
{
ptimer->function = fun;
ptimer->data = data;
init_timer(ptimer);
}
static inline int rtllib_networks_allocate(struct rtllib_device *ieee)
{
if (ieee->networks)
return 0;
ieee->networks = kzalloc(
MAX_NETWORK_COUNT * sizeof(struct rtllib_network),
GFP_KERNEL);
if (!ieee->networks) {
printk(KERN_WARNING "%s: Out of memory allocating beacons\n",
ieee->dev->name);
return -ENOMEM;
}
return 0;
}
static inline void rtllib_networks_free(struct rtllib_device *ieee)
{
if (!ieee->networks)
return;
kfree(ieee->networks);
ieee->networks = NULL;
}
static inline void rtllib_networks_initialize(struct rtllib_device *ieee)
{
int i;
INIT_LIST_HEAD(&ieee->network_free_list);
INIT_LIST_HEAD(&ieee->network_list);
for (i = 0; i < MAX_NETWORK_COUNT; i++)
list_add_tail(&ieee->networks[i].list,
&ieee->network_free_list);
}
struct net_device *alloc_rtllib(int sizeof_priv)
{
struct rtllib_device *ieee = NULL;
struct net_device *dev;
int i, err;
RTLLIB_DEBUG_INFO("Initializing...\n");
dev = alloc_etherdev(sizeof(struct rtllib_device) + sizeof_priv);
if (!dev) {
RTLLIB_ERROR("Unable to network device.\n");
return NULL;
}
ieee = (struct rtllib_device *)netdev_priv_rsl(dev);
memset(ieee, 0, sizeof(struct rtllib_device)+sizeof_priv);
ieee->dev = dev;
err = rtllib_networks_allocate(ieee);
if (err) {
RTLLIB_ERROR("Unable to allocate beacon storage: %d\n",
err);
goto failed;
}
rtllib_networks_initialize(ieee);
/* Default fragmentation threshold is maximum payload size */
ieee->fts = DEFAULT_FTS;
ieee->scan_age = DEFAULT_MAX_SCAN_AGE;
ieee->open_wep = 1;
/* Default to enabling full open WEP with host based encrypt/decrypt */
ieee->host_encrypt = 1;
ieee->host_decrypt = 1;
ieee->ieee802_1x = 1; /* Default to supporting 802.1x */
ieee->rtllib_ap_sec_type = rtllib_ap_sec_type;
spin_lock_init(&ieee->lock);
spin_lock_init(&ieee->wpax_suitlist_lock);
spin_lock_init(&ieee->bw_spinlock);
spin_lock_init(&ieee->reorder_spinlock);
atomic_set(&(ieee->atm_chnlop), 0);
atomic_set(&(ieee->atm_swbw), 0);
/* SAM FIXME */
lib80211_crypt_info_init(&ieee->crypt_info, "RTLLIB", &ieee->lock);
ieee->bHalfNMode = false;
ieee->wpa_enabled = 0;
ieee->tkip_countermeasures = 0;
ieee->drop_unencrypted = 0;
ieee->privacy_invoked = 0;
ieee->ieee802_1x = 1;
ieee->raw_tx = 0;
ieee->hwsec_active = 0;
memset(ieee->swcamtable, 0, sizeof(struct sw_cam_table) * 32);
rtllib_softmac_init(ieee);
ieee->pHTInfo = kzalloc(sizeof(struct rt_hi_throughput), GFP_KERNEL);
if (ieee->pHTInfo == NULL) {
RTLLIB_DEBUG(RTLLIB_DL_ERR, "can't alloc memory for HTInfo\n");
return NULL;
}
HTUpdateDefaultSetting(ieee);
HTInitializeHTInfo(ieee);
TSInitialize(ieee);
for (i = 0; i < IEEE_IBSS_MAC_HASH_SIZE; i++)
INIT_LIST_HEAD(&ieee->ibss_mac_hash[i]);
for (i = 0; i < 17; i++) {
ieee->last_rxseq_num[i] = -1;
ieee->last_rxfrag_num[i] = -1;
ieee->last_packet_time[i] = 0;
}
return dev;
failed:
free_netdev(dev);
return NULL;
}
EXPORT_SYMBOL(alloc_rtllib);
void free_rtllib(struct net_device *dev)
{
struct rtllib_device *ieee = (struct rtllib_device *)
netdev_priv_rsl(dev);
kfree(ieee->pHTInfo);
ieee->pHTInfo = NULL;
rtllib_softmac_free(ieee);
lib80211_crypt_info_free(&ieee->crypt_info);
rtllib_networks_free(ieee);
free_netdev(dev);
}
EXPORT_SYMBOL(free_rtllib);
u32 rtllib_debug_level;
static int debug = \
RTLLIB_DL_ERR
;
static struct proc_dir_entry *rtllib_proc;
static int show_debug_level(struct seq_file *m, void *v)
{
return seq_printf(m, "0x%08X\n", rtllib_debug_level);
}
static ssize_t write_debug_level(struct file *file, const char __user *buffer,
size_t count, loff_t *ppos)
{
unsigned long val;
int err = kstrtoul_from_user(buffer, count, 0, &val);
if (err)
return err;
rtllib_debug_level = val;
return count;
}
static int open_debug_level(struct inode *inode, struct file *file)
{
return single_open(file, show_debug_level, NULL);
}
static const struct file_operations fops = {
.open = open_debug_level,
.read = seq_read,
.llseek = seq_lseek,
.write = write_debug_level,
.release = single_release,
};
static int __init rtllib_init(void)
{
struct proc_dir_entry *e;
rtllib_debug_level = debug;
rtllib_proc = proc_mkdir(DRV_NAME, init_net.proc_net);
if (rtllib_proc == NULL) {
RTLLIB_ERROR("Unable to create " DRV_NAME
" proc directory\n");
return -EIO;
}
e = proc_create("debug_level", S_IRUGO | S_IWUSR, rtllib_proc, &fops);
if (!e) {
remove_proc_entry(DRV_NAME, init_net.proc_net);
rtllib_proc = NULL;
return -EIO;
}
return 0;
}
static void __exit rtllib_exit(void)
{
if (rtllib_proc) {
remove_proc_entry("debug_level", rtllib_proc);
remove_proc_entry(DRV_NAME, init_net.proc_net);
rtllib_proc = NULL;
}
}
module_init(rtllib_init);
module_exit(rtllib_exit);
MODULE_LICENSE("GPL");
| gpl-2.0 |
AmperificSuperKANG/one_m7 | arch/powerpc/platforms/85xx/p1022_ds.c | 408 | 14889 | /*
* P1022DS board specific routines
*
* Authors: Travis Wheatley <travis.wheatley@freescale.com>
* Dave Liu <daveliu@freescale.com>
* Timur Tabi <timur@freescale.com>
*
* Copyright 2010 Freescale Semiconductor, Inc.
*
* This file is taken from the Freescale P1022DS BSP, with modifications:
* 2) No AMP support
* 3) No PCI endpoint support
*
* This file is licensed under the terms of the GNU General Public License
* version 2. This program is licensed "as is" without any warranty of any
* kind, whether express or implied.
*/
#include <linux/pci.h>
#include <linux/of_platform.h>
#include <linux/memblock.h>
#include <asm/div64.h>
#include <asm/mpic.h>
#include <asm/swiotlb.h>
#include <sysdev/fsl_soc.h>
#include <sysdev/fsl_pci.h>
#include <asm/udbg.h>
#include <asm/fsl_guts.h>
#include <asm/fsl_lbc.h>
#include "smp.h"
#include "mpc85xx.h"
#if defined(CONFIG_FB_FSL_DIU) || defined(CONFIG_FB_FSL_DIU_MODULE)
#define PMUXCR_ELBCDIU_MASK 0xc0000000
#define PMUXCR_ELBCDIU_NOR16 0x80000000
#define PMUXCR_ELBCDIU_DIU 0x40000000
/*
* Board-specific initialization of the DIU. This code should probably be
* executed when the DIU is opened, rather than in arch code, but the DIU
* driver does not have a mechanism for this (yet).
*
* This is especially problematic on the P1022DS because the local bus (eLBC)
* and the DIU video signals share the same pins, which means that enabling the
* DIU will disable access to NOR flash.
*/
/* DIU Pixel Clock bits of the CLKDVDR Global Utilities register */
#define CLKDVDR_PXCKEN 0x80000000
#define CLKDVDR_PXCKINV 0x10000000
#define CLKDVDR_PXCKDLY 0x06000000
#define CLKDVDR_PXCLK_MASK 0x00FF0000
/* Some ngPIXIS register definitions */
#define PX_CTL 3
#define PX_BRDCFG0 8
#define PX_BRDCFG1 9
#define PX_BRDCFG0_ELBC_SPI_MASK 0xc0
#define PX_BRDCFG0_ELBC_SPI_ELBC 0x00
#define PX_BRDCFG0_ELBC_SPI_NULL 0xc0
#define PX_BRDCFG0_ELBC_DIU 0x02
#define PX_BRDCFG1_DVIEN 0x80
#define PX_BRDCFG1_DFPEN 0x40
#define PX_BRDCFG1_BACKLIGHT 0x20
#define PX_BRDCFG1_DDCEN 0x10
#define PX_CTL_ALTACC 0x80
/*
* DIU Area Descriptor
*
* Note that we need to byte-swap the value before it's written to the AD
* register. So even though the registers don't look like they're in the same
* bit positions as they are on the MPC8610, the same value is written to the
* AD register on the MPC8610 and on the P1022.
*/
#define AD_BYTE_F 0x10000000
#define AD_ALPHA_C_MASK 0x0E000000
#define AD_ALPHA_C_SHIFT 25
#define AD_BLUE_C_MASK 0x01800000
#define AD_BLUE_C_SHIFT 23
#define AD_GREEN_C_MASK 0x00600000
#define AD_GREEN_C_SHIFT 21
#define AD_RED_C_MASK 0x00180000
#define AD_RED_C_SHIFT 19
#define AD_PALETTE 0x00040000
#define AD_PIXEL_S_MASK 0x00030000
#define AD_PIXEL_S_SHIFT 16
#define AD_COMP_3_MASK 0x0000F000
#define AD_COMP_3_SHIFT 12
#define AD_COMP_2_MASK 0x00000F00
#define AD_COMP_2_SHIFT 8
#define AD_COMP_1_MASK 0x000000F0
#define AD_COMP_1_SHIFT 4
#define AD_COMP_0_MASK 0x0000000F
#define AD_COMP_0_SHIFT 0
#define MAKE_AD(alpha, red, blue, green, size, c0, c1, c2, c3) \
cpu_to_le32(AD_BYTE_F | (alpha << AD_ALPHA_C_SHIFT) | \
(blue << AD_BLUE_C_SHIFT) | (green << AD_GREEN_C_SHIFT) | \
(red << AD_RED_C_SHIFT) | (c3 << AD_COMP_3_SHIFT) | \
(c2 << AD_COMP_2_SHIFT) | (c1 << AD_COMP_1_SHIFT) | \
(c0 << AD_COMP_0_SHIFT) | (size << AD_PIXEL_S_SHIFT))
/**
* p1022ds_get_pixel_format: return the Area Descriptor for a given pixel depth
*
* The Area Descriptor is a 32-bit value that determine which bits in each
* pixel are to be used for each color.
*/
static u32 p1022ds_get_pixel_format(enum fsl_diu_monitor_port port,
unsigned int bits_per_pixel)
{
switch (bits_per_pixel) {
case 32:
/* 0x88883316 */
return MAKE_AD(3, 2, 0, 1, 3, 8, 8, 8, 8);
case 24:
/* 0x88082219 */
return MAKE_AD(4, 0, 1, 2, 2, 0, 8, 8, 8);
case 16:
/* 0x65053118 */
return MAKE_AD(4, 2, 1, 0, 1, 5, 6, 5, 0);
default:
pr_err("fsl-diu: unsupported pixel depth %u\n", bits_per_pixel);
return 0;
}
}
/**
* p1022ds_set_gamma_table: update the gamma table, if necessary
*
* On some boards, the gamma table for some ports may need to be modified.
* This is not the case on the P1022DS, so we do nothing.
*/
static void p1022ds_set_gamma_table(enum fsl_diu_monitor_port port,
char *gamma_table_base)
{
}
struct fsl_law {
u32 lawbar;
u32 reserved1;
u32 lawar;
u32 reserved[5];
};
#define LAWBAR_MASK 0x00F00000
#define LAWBAR_SHIFT 12
#define LAWAR_EN 0x80000000
#define LAWAR_TGT_MASK 0x01F00000
#define LAW_TRGT_IF_LBC (0x04 << 20)
#define LAWAR_MASK (LAWAR_EN | LAWAR_TGT_MASK)
#define LAWAR_MATCH (LAWAR_EN | LAW_TRGT_IF_LBC)
#define BR_BA 0xFFFF8000
/*
* Map a BRx value to a physical address
*
* The localbus BRx registers only store the lower 32 bits of the address. To
* obtain the upper four bits, we need to scan the LAW table. The entry which
* maps to the localbus will contain the upper four bits.
*/
static phys_addr_t lbc_br_to_phys(const void *ecm, unsigned int count, u32 br)
{
#ifndef CONFIG_PHYS_64BIT
/*
* If we only have 32-bit addressing, then the BRx address *is* the
* physical address.
*/
return br & BR_BA;
#else
const struct fsl_law *law = ecm + 0xc08;
unsigned int i;
for (i = 0; i < count; i++) {
u64 lawbar = in_be32(&law[i].lawbar);
u32 lawar = in_be32(&law[i].lawar);
if ((lawar & LAWAR_MASK) == LAWAR_MATCH)
/* Extract the upper four bits */
return (br & BR_BA) | ((lawbar & LAWBAR_MASK) << 12);
}
return 0;
#endif
}
/**
* p1022ds_set_monitor_port: switch the output to a different monitor port
*/
static void p1022ds_set_monitor_port(enum fsl_diu_monitor_port port)
{
struct device_node *guts_node;
struct device_node *lbc_node = NULL;
struct device_node *law_node = NULL;
struct ccsr_guts __iomem *guts;
struct fsl_lbc_regs *lbc = NULL;
void *ecm = NULL;
u8 __iomem *lbc_lcs0_ba = NULL;
u8 __iomem *lbc_lcs1_ba = NULL;
phys_addr_t cs0_addr, cs1_addr;
const __be32 *iprop;
unsigned int num_laws;
u8 b;
/* Map the global utilities registers. */
guts_node = of_find_compatible_node(NULL, NULL, "fsl,p1022-guts");
if (!guts_node) {
pr_err("p1022ds: missing global utilties device node\n");
return;
}
guts = of_iomap(guts_node, 0);
if (!guts) {
pr_err("p1022ds: could not map global utilties device\n");
goto exit;
}
lbc_node = of_find_compatible_node(NULL, NULL, "fsl,p1022-elbc");
if (!lbc_node) {
pr_err("p1022ds: missing localbus node\n");
goto exit;
}
lbc = of_iomap(lbc_node, 0);
if (!lbc) {
pr_err("p1022ds: could not map localbus node\n");
goto exit;
}
law_node = of_find_compatible_node(NULL, NULL, "fsl,ecm-law");
if (!law_node) {
pr_err("p1022ds: missing local access window node\n");
goto exit;
}
ecm = of_iomap(law_node, 0);
if (!ecm) {
pr_err("p1022ds: could not map local access window node\n");
goto exit;
}
iprop = of_get_property(law_node, "fsl,num-laws", 0);
if (!iprop) {
pr_err("p1022ds: LAW node is missing fsl,num-laws property\n");
goto exit;
}
num_laws = be32_to_cpup(iprop);
cs0_addr = lbc_br_to_phys(ecm, num_laws, in_be32(&lbc->bank[0].br));
cs1_addr = lbc_br_to_phys(ecm, num_laws, in_be32(&lbc->bank[1].br));
lbc_lcs0_ba = ioremap(cs0_addr, 1);
lbc_lcs1_ba = ioremap(cs1_addr, 1);
/* Make sure we're in indirect mode first. */
if ((in_be32(&guts->pmuxcr) & PMUXCR_ELBCDIU_MASK) !=
PMUXCR_ELBCDIU_DIU) {
struct device_node *pixis_node;
void __iomem *pixis;
pixis_node =
of_find_compatible_node(NULL, NULL, "fsl,p1022ds-fpga");
if (!pixis_node) {
pr_err("p1022ds: missing pixis node\n");
goto exit;
}
pixis = of_iomap(pixis_node, 0);
of_node_put(pixis_node);
if (!pixis) {
pr_err("p1022ds: could not map pixis registers\n");
goto exit;
}
/* Enable indirect PIXIS mode. */
setbits8(pixis + PX_CTL, PX_CTL_ALTACC);
iounmap(pixis);
/* Switch the board mux to the DIU */
out_8(lbc_lcs0_ba, PX_BRDCFG0); /* BRDCFG0 */
b = in_8(lbc_lcs1_ba);
b |= PX_BRDCFG0_ELBC_DIU;
out_8(lbc_lcs1_ba, b);
/* Set the chip mux to DIU mode. */
clrsetbits_be32(&guts->pmuxcr, PMUXCR_ELBCDIU_MASK,
PMUXCR_ELBCDIU_DIU);
in_be32(&guts->pmuxcr);
}
switch (port) {
case FSL_DIU_PORT_DVI:
/* Enable the DVI port, disable the DFP and the backlight */
out_8(lbc_lcs0_ba, PX_BRDCFG1);
b = in_8(lbc_lcs1_ba);
b &= ~(PX_BRDCFG1_DFPEN | PX_BRDCFG1_BACKLIGHT);
b |= PX_BRDCFG1_DVIEN;
out_8(lbc_lcs1_ba, b);
break;
case FSL_DIU_PORT_LVDS:
/*
* LVDS also needs backlight enabled, otherwise the display
* will be blank.
*/
/* Enable the DFP port, disable the DVI and the backlight */
out_8(lbc_lcs0_ba, PX_BRDCFG1);
b = in_8(lbc_lcs1_ba);
b &= ~PX_BRDCFG1_DVIEN;
b |= PX_BRDCFG1_DFPEN | PX_BRDCFG1_BACKLIGHT;
out_8(lbc_lcs1_ba, b);
break;
default:
pr_err("p1022ds: unsupported monitor port %i\n", port);
}
exit:
if (lbc_lcs1_ba)
iounmap(lbc_lcs1_ba);
if (lbc_lcs0_ba)
iounmap(lbc_lcs0_ba);
if (lbc)
iounmap(lbc);
if (ecm)
iounmap(ecm);
if (guts)
iounmap(guts);
of_node_put(law_node);
of_node_put(lbc_node);
of_node_put(guts_node);
}
/**
* p1022ds_set_pixel_clock: program the DIU's clock
*
* @pixclock: the wavelength, in picoseconds, of the clock
*/
void p1022ds_set_pixel_clock(unsigned int pixclock)
{
struct device_node *guts_np = NULL;
struct ccsr_guts __iomem *guts;
unsigned long freq;
u64 temp;
u32 pxclk;
/* Map the global utilities registers. */
guts_np = of_find_compatible_node(NULL, NULL, "fsl,p1022-guts");
if (!guts_np) {
pr_err("p1022ds: missing global utilties device node\n");
return;
}
guts = of_iomap(guts_np, 0);
of_node_put(guts_np);
if (!guts) {
pr_err("p1022ds: could not map global utilties device\n");
return;
}
/* Convert pixclock from a wavelength to a frequency */
temp = 1000000000000ULL;
do_div(temp, pixclock);
freq = temp;
/*
* 'pxclk' is the ratio of the platform clock to the pixel clock.
* This number is programmed into the CLKDVDR register, and the valid
* range of values is 2-255.
*/
pxclk = DIV_ROUND_CLOSEST(fsl_get_sys_freq(), freq);
pxclk = clamp_t(u32, pxclk, 2, 255);
/* Disable the pixel clock, and set it to non-inverted and no delay */
clrbits32(&guts->clkdvdr,
CLKDVDR_PXCKEN | CLKDVDR_PXCKDLY | CLKDVDR_PXCLK_MASK);
/* Enable the clock and set the pxclk */
setbits32(&guts->clkdvdr, CLKDVDR_PXCKEN | (pxclk << 16));
iounmap(guts);
}
/**
* p1022ds_valid_monitor_port: set the monitor port for sysfs
*/
enum fsl_diu_monitor_port
p1022ds_valid_monitor_port(enum fsl_diu_monitor_port port)
{
switch (port) {
case FSL_DIU_PORT_DVI:
case FSL_DIU_PORT_LVDS:
return port;
default:
return FSL_DIU_PORT_DVI; /* Dual-link LVDS is not supported */
}
}
#endif
void __init p1022_ds_pic_init(void)
{
struct mpic *mpic = mpic_alloc(NULL, 0, MPIC_BIG_ENDIAN |
MPIC_SINGLE_DEST_CPU,
0, 256, " OpenPIC ");
BUG_ON(mpic == NULL);
mpic_init(mpic);
}
#if defined(CONFIG_FB_FSL_DIU) || defined(CONFIG_FB_FSL_DIU_MODULE)
/*
* Disables a node in the device tree.
*
* This function is called before kmalloc() is available, so the 'new' object
* should be allocated in the global area. The easiest way is to do that is
* to allocate one static local variable for each call to this function.
*/
static void __init disable_one_node(struct device_node *np, struct property *new)
{
struct property *old;
old = of_find_property(np, new->name, NULL);
if (old)
prom_update_property(np, new, old);
else
prom_add_property(np, new);
}
/* TRUE if there is a "video=fslfb" command-line parameter. */
static bool fslfb;
/*
* Search for a "video=fslfb" command-line parameter, and set 'fslfb' to
* true if we find it.
*
* We need to use early_param() instead of __setup() because the normal
* __setup() gets called to late. However, early_param() gets called very
* early, before the device tree is unflattened, so all we can do now is set a
* global variable. Later on, p1022_ds_setup_arch() will use that variable
* to determine if we need to update the device tree.
*/
static int __init early_video_setup(char *options)
{
fslfb = (strncmp(options, "fslfb:", 6) == 0);
return 0;
}
early_param("video", early_video_setup);
#endif
/*
* Setup the architecture
*/
static void __init p1022_ds_setup_arch(void)
{
#ifdef CONFIG_PCI
struct device_node *np;
#endif
dma_addr_t max = 0xffffffff;
if (ppc_md.progress)
ppc_md.progress("p1022_ds_setup_arch()", 0);
#ifdef CONFIG_PCI
for_each_compatible_node(np, "pci", "fsl,p1022-pcie") {
struct resource rsrc;
struct pci_controller *hose;
of_address_to_resource(np, 0, &rsrc);
if ((rsrc.start & 0xfffff) == 0x8000)
fsl_add_bridge(np, 1);
else
fsl_add_bridge(np, 0);
hose = pci_find_hose_for_OF_device(np);
max = min(max, hose->dma_window_base_cur +
hose->dma_window_size);
}
#endif
#if defined(CONFIG_FB_FSL_DIU) || defined(CONFIG_FB_FSL_DIU_MODULE)
diu_ops.get_pixel_format = p1022ds_get_pixel_format;
diu_ops.set_gamma_table = p1022ds_set_gamma_table;
diu_ops.set_monitor_port = p1022ds_set_monitor_port;
diu_ops.set_pixel_clock = p1022ds_set_pixel_clock;
diu_ops.valid_monitor_port = p1022ds_valid_monitor_port;
/*
* Disable the NOR flash node if there is video=fslfb... command-line
* parameter. When the DIU is active, NOR flash is unavailable, so we
* have to disable the node before the MTD driver loads.
*/
if (fslfb) {
struct device_node *np =
of_find_compatible_node(NULL, NULL, "fsl,p1022-elbc");
if (np) {
np = of_find_compatible_node(np, NULL, "cfi-flash");
if (np) {
static struct property nor_status = {
.name = "status",
.value = "disabled",
.length = sizeof("disabled"),
};
pr_info("p1022ds: disabling %s node",
np->full_name);
disable_one_node(np, &nor_status);
of_node_put(np);
}
}
}
#endif
mpc85xx_smp_init();
#ifdef CONFIG_SWIOTLB
if (memblock_end_of_DRAM() > max) {
ppc_swiotlb_enable = 1;
set_pci_dma_ops(&swiotlb_dma_ops);
ppc_md.pci_dma_dev_setup = pci_dma_dev_setup_swiotlb;
}
#endif
pr_info("Freescale P1022 DS reference board\n");
}
machine_device_initcall(p1022_ds, mpc85xx_common_publish_devices);
machine_arch_initcall(p1022_ds, swiotlb_setup_bus_notifier);
/*
* Called very early, device-tree isn't unflattened
*/
static int __init p1022_ds_probe(void)
{
unsigned long root = of_get_flat_dt_root();
return of_flat_dt_is_compatible(root, "fsl,p1022ds");
}
define_machine(p1022_ds) {
.name = "P1022 DS",
.probe = p1022_ds_probe,
.setup_arch = p1022_ds_setup_arch,
.init_IRQ = p1022_ds_pic_init,
#ifdef CONFIG_PCI
.pcibios_fixup_bus = fsl_pcibios_fixup_bus,
#endif
.get_irq = mpic_get_irq,
.restart = fsl_rstcr_restart,
.calibrate_decr = generic_calibrate_decr,
.progress = udbg_progress,
};
| gpl-2.0 |
blueskycoco/sq-linux | drivers/s390/crypto/zcrypt_pcicc.c | 664 | 18945 | /*
* linux/drivers/s390/crypto/zcrypt_pcicc.c
*
* zcrypt 2.1.0
*
* Copyright (C) 2001, 2006 IBM Corporation
* Author(s): Robert Burroughs
* Eric Rossman (edrossma@us.ibm.com)
*
* Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
* Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
* Ralph Wuerthner <rwuerthn@de.ibm.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/gfp.h>
#include <linux/err.h>
#include <linux/atomic.h>
#include <asm/uaccess.h>
#include "ap_bus.h"
#include "zcrypt_api.h"
#include "zcrypt_error.h"
#include "zcrypt_pcicc.h"
#include "zcrypt_cca_key.h"
#define PCICC_MIN_MOD_SIZE 64 /* 512 bits */
#define PCICC_MAX_MOD_SIZE_OLD 128 /* 1024 bits */
#define PCICC_MAX_MOD_SIZE 256 /* 2048 bits */
/*
* PCICC cards need a speed rating of 0. This keeps them at the end of
* the zcrypt device list (see zcrypt_api.c). PCICC cards are only
* used if no other cards are present because they are slow and can only
* cope with PKCS12 padded requests. The logic is queer. PKCS11 padded
* requests are rejected. The modexpo function encrypts PKCS12 padded data
* and decrypts any non-PKCS12 padded data (except PKCS11) in the assumption
* that it's encrypted PKCS12 data. The modexpo_crt function always decrypts
* the data in the assumption that its PKCS12 encrypted data.
*/
#define PCICC_SPEED_RATING 0
#define PCICC_MAX_MESSAGE_SIZE 0x710 /* max size type6 v1 crt message */
#define PCICC_MAX_RESPONSE_SIZE 0x710 /* max size type86 v1 reply */
#define PCICC_CLEANUP_TIME (15*HZ)
static struct ap_device_id zcrypt_pcicc_ids[] = {
{ AP_DEVICE(AP_DEVICE_TYPE_PCICC) },
{ /* end of list */ },
};
#ifndef CONFIG_ZCRYPT_MONOLITHIC
MODULE_DEVICE_TABLE(ap, zcrypt_pcicc_ids);
MODULE_AUTHOR("IBM Corporation");
MODULE_DESCRIPTION("PCICC Cryptographic Coprocessor device driver, "
"Copyright 2001, 2006 IBM Corporation");
MODULE_LICENSE("GPL");
#endif
static int zcrypt_pcicc_probe(struct ap_device *ap_dev);
static void zcrypt_pcicc_remove(struct ap_device *ap_dev);
static void zcrypt_pcicc_receive(struct ap_device *, struct ap_message *,
struct ap_message *);
static struct ap_driver zcrypt_pcicc_driver = {
.probe = zcrypt_pcicc_probe,
.remove = zcrypt_pcicc_remove,
.receive = zcrypt_pcicc_receive,
.ids = zcrypt_pcicc_ids,
.request_timeout = PCICC_CLEANUP_TIME,
};
/**
* The following is used to initialize the CPRB passed to the PCICC card
* in a type6 message. The 3 fields that must be filled in at execution
* time are req_parml, rpl_parml and usage_domain. Note that all three
* fields are *little*-endian. Actually, everything about this interface
* is ascii/little-endian, since the device has 'Intel inside'.
*
* The CPRB is followed immediately by the parm block.
* The parm block contains:
* - function code ('PD' 0x5044 or 'PK' 0x504B)
* - rule block (0x0A00 'PKCS-1.2' or 0x0A00 'ZERO-PAD')
* - VUD block
*/
static struct CPRB static_cprb = {
.cprb_len = __constant_cpu_to_le16(0x0070),
.cprb_ver_id = 0x41,
.func_id = {0x54,0x32},
.checkpoint_flag= 0x01,
.svr_namel = __constant_cpu_to_le16(0x0008),
.svr_name = {'I','C','S','F',' ',' ',' ',' '}
};
/**
* Check the message for PKCS11 padding.
*/
static inline int is_PKCS11_padded(unsigned char *buffer, int length)
{
int i;
if ((buffer[0] != 0x00) || (buffer[1] != 0x01))
return 0;
for (i = 2; i < length; i++)
if (buffer[i] != 0xFF)
break;
if (i < 10 || i == length)
return 0;
if (buffer[i] != 0x00)
return 0;
return 1;
}
/**
* Check the message for PKCS12 padding.
*/
static inline int is_PKCS12_padded(unsigned char *buffer, int length)
{
int i;
if ((buffer[0] != 0x00) || (buffer[1] != 0x02))
return 0;
for (i = 2; i < length; i++)
if (buffer[i] == 0x00)
break;
if ((i < 10) || (i == length))
return 0;
if (buffer[i] != 0x00)
return 0;
return 1;
}
/**
* Convert a ICAMEX message to a type6 MEX message.
*
* @zdev: crypto device pointer
* @zreq: crypto request pointer
* @mex: pointer to user input data
*
* Returns 0 on success or -EFAULT.
*/
static int ICAMEX_msg_to_type6MEX_msg(struct zcrypt_device *zdev,
struct ap_message *ap_msg,
struct ica_rsa_modexpo *mex)
{
static struct type6_hdr static_type6_hdr = {
.type = 0x06,
.offset1 = 0x00000058,
.agent_id = {0x01,0x00,0x43,0x43,0x41,0x2D,0x41,0x50,
0x50,0x4C,0x20,0x20,0x20,0x01,0x01,0x01},
.function_code = {'P','K'},
};
static struct function_and_rules_block static_pke_function_and_rules ={
.function_code = {'P','K'},
.ulen = __constant_cpu_to_le16(10),
.only_rule = {'P','K','C','S','-','1','.','2'}
};
struct {
struct type6_hdr hdr;
struct CPRB cprb;
struct function_and_rules_block fr;
unsigned short length;
char text[0];
} __attribute__((packed)) *msg = ap_msg->message;
int vud_len, pad_len, size;
/* VUD.ciphertext */
if (copy_from_user(msg->text, mex->inputdata, mex->inputdatalength))
return -EFAULT;
if (is_PKCS11_padded(msg->text, mex->inputdatalength))
return -EINVAL;
/* static message header and f&r */
msg->hdr = static_type6_hdr;
msg->fr = static_pke_function_and_rules;
if (is_PKCS12_padded(msg->text, mex->inputdatalength)) {
/* strip the padding and adjust the data length */
pad_len = strnlen(msg->text + 2, mex->inputdatalength - 2) + 3;
if (pad_len <= 9 || pad_len >= mex->inputdatalength)
return -ENODEV;
vud_len = mex->inputdatalength - pad_len;
memmove(msg->text, msg->text + pad_len, vud_len);
msg->length = cpu_to_le16(vud_len + 2);
/* Set up key after the variable length text. */
size = zcrypt_type6_mex_key_en(mex, msg->text + vud_len, 0);
if (size < 0)
return size;
size += sizeof(*msg) + vud_len; /* total size of msg */
} else {
vud_len = mex->inputdatalength;
msg->length = cpu_to_le16(2 + vud_len);
msg->hdr.function_code[1] = 'D';
msg->fr.function_code[1] = 'D';
/* Set up key after the variable length text. */
size = zcrypt_type6_mex_key_de(mex, msg->text + vud_len, 0);
if (size < 0)
return size;
size += sizeof(*msg) + vud_len; /* total size of msg */
}
/* message header, cprb and f&r */
msg->hdr.ToCardLen1 = (size - sizeof(msg->hdr) + 3) & -4;
msg->hdr.FromCardLen1 = PCICC_MAX_RESPONSE_SIZE - sizeof(msg->hdr);
msg->cprb = static_cprb;
msg->cprb.usage_domain[0]= AP_QID_QUEUE(zdev->ap_dev->qid);
msg->cprb.req_parml = cpu_to_le16(size - sizeof(msg->hdr) -
sizeof(msg->cprb));
msg->cprb.rpl_parml = cpu_to_le16(msg->hdr.FromCardLen1);
ap_msg->length = (size + 3) & -4;
return 0;
}
/**
* Convert a ICACRT message to a type6 CRT message.
*
* @zdev: crypto device pointer
* @zreq: crypto request pointer
* @crt: pointer to user input data
*
* Returns 0 on success or -EFAULT.
*/
static int ICACRT_msg_to_type6CRT_msg(struct zcrypt_device *zdev,
struct ap_message *ap_msg,
struct ica_rsa_modexpo_crt *crt)
{
static struct type6_hdr static_type6_hdr = {
.type = 0x06,
.offset1 = 0x00000058,
.agent_id = {0x01,0x00,0x43,0x43,0x41,0x2D,0x41,0x50,
0x50,0x4C,0x20,0x20,0x20,0x01,0x01,0x01},
.function_code = {'P','D'},
};
static struct function_and_rules_block static_pkd_function_and_rules ={
.function_code = {'P','D'},
.ulen = __constant_cpu_to_le16(10),
.only_rule = {'P','K','C','S','-','1','.','2'}
};
struct {
struct type6_hdr hdr;
struct CPRB cprb;
struct function_and_rules_block fr;
unsigned short length;
char text[0];
} __attribute__((packed)) *msg = ap_msg->message;
int size;
/* VUD.ciphertext */
msg->length = cpu_to_le16(2 + crt->inputdatalength);
if (copy_from_user(msg->text, crt->inputdata, crt->inputdatalength))
return -EFAULT;
if (is_PKCS11_padded(msg->text, crt->inputdatalength))
return -EINVAL;
/* Set up key after the variable length text. */
size = zcrypt_type6_crt_key(crt, msg->text + crt->inputdatalength, 0);
if (size < 0)
return size;
size += sizeof(*msg) + crt->inputdatalength; /* total size of msg */
/* message header, cprb and f&r */
msg->hdr = static_type6_hdr;
msg->hdr.ToCardLen1 = (size - sizeof(msg->hdr) + 3) & -4;
msg->hdr.FromCardLen1 = PCICC_MAX_RESPONSE_SIZE - sizeof(msg->hdr);
msg->cprb = static_cprb;
msg->cprb.usage_domain[0] = AP_QID_QUEUE(zdev->ap_dev->qid);
msg->cprb.req_parml = msg->cprb.rpl_parml =
cpu_to_le16(size - sizeof(msg->hdr) - sizeof(msg->cprb));
msg->fr = static_pkd_function_and_rules;
ap_msg->length = (size + 3) & -4;
return 0;
}
/**
* Copy results from a type 86 reply message back to user space.
*
* @zdev: crypto device pointer
* @reply: reply AP message.
* @data: pointer to user output data
* @length: size of user output data
*
* Returns 0 on success or -EINVAL, -EFAULT, -EAGAIN in case of an error.
*/
struct type86_reply {
struct type86_hdr hdr;
struct type86_fmt2_ext fmt2;
struct CPRB cprb;
unsigned char pad[4]; /* 4 byte function code/rules block ? */
unsigned short length;
char text[0];
} __attribute__((packed));
static int convert_type86(struct zcrypt_device *zdev,
struct ap_message *reply,
char __user *outputdata,
unsigned int outputdatalength)
{
static unsigned char static_pad[] = {
0x00,0x02,
0x1B,0x7B,0x5D,0xB5,0x75,0x01,0x3D,0xFD,
0x8D,0xD1,0xC7,0x03,0x2D,0x09,0x23,0x57,
0x89,0x49,0xB9,0x3F,0xBB,0x99,0x41,0x5B,
0x75,0x21,0x7B,0x9D,0x3B,0x6B,0x51,0x39,
0xBB,0x0D,0x35,0xB9,0x89,0x0F,0x93,0xA5,
0x0B,0x47,0xF1,0xD3,0xBB,0xCB,0xF1,0x9D,
0x23,0x73,0x71,0xFF,0xF3,0xF5,0x45,0xFB,
0x61,0x29,0x23,0xFD,0xF1,0x29,0x3F,0x7F,
0x17,0xB7,0x1B,0xA9,0x19,0xBD,0x57,0xA9,
0xD7,0x95,0xA3,0xCB,0xED,0x1D,0xDB,0x45,
0x7D,0x11,0xD1,0x51,0x1B,0xED,0x71,0xE9,
0xB1,0xD1,0xAB,0xAB,0x21,0x2B,0x1B,0x9F,
0x3B,0x9F,0xF7,0xF7,0xBD,0x63,0xEB,0xAD,
0xDF,0xB3,0x6F,0x5B,0xDB,0x8D,0xA9,0x5D,
0xE3,0x7D,0x77,0x49,0x47,0xF5,0xA7,0xFD,
0xAB,0x2F,0x27,0x35,0x77,0xD3,0x49,0xC9,
0x09,0xEB,0xB1,0xF9,0xBF,0x4B,0xCB,0x2B,
0xEB,0xEB,0x05,0xFF,0x7D,0xC7,0x91,0x8B,
0x09,0x83,0xB9,0xB9,0x69,0x33,0x39,0x6B,
0x79,0x75,0x19,0xBF,0xBB,0x07,0x1D,0xBD,
0x29,0xBF,0x39,0x95,0x93,0x1D,0x35,0xC7,
0xC9,0x4D,0xE5,0x97,0x0B,0x43,0x9B,0xF1,
0x16,0x93,0x03,0x1F,0xA5,0xFB,0xDB,0xF3,
0x27,0x4F,0x27,0x61,0x05,0x1F,0xB9,0x23,
0x2F,0xC3,0x81,0xA9,0x23,0x71,0x55,0x55,
0xEB,0xED,0x41,0xE5,0xF3,0x11,0xF1,0x43,
0x69,0x03,0xBD,0x0B,0x37,0x0F,0x51,0x8F,
0x0B,0xB5,0x89,0x5B,0x67,0xA9,0xD9,0x4F,
0x01,0xF9,0x21,0x77,0x37,0x73,0x79,0xC5,
0x7F,0x51,0xC1,0xCF,0x97,0xA1,0x75,0xAD,
0x35,0x9D,0xD3,0xD3,0xA7,0x9D,0x5D,0x41,
0x6F,0x65,0x1B,0xCF,0xA9,0x87,0x91,0x09
};
struct type86_reply *msg = reply->message;
unsigned short service_rc, service_rs;
unsigned int reply_len, pad_len;
char *data;
service_rc = le16_to_cpu(msg->cprb.ccp_rtcode);
if (unlikely(service_rc != 0)) {
service_rs = le16_to_cpu(msg->cprb.ccp_rscode);
if (service_rc == 8 && service_rs == 66)
return -EINVAL;
if (service_rc == 8 && service_rs == 65)
return -EINVAL;
if (service_rc == 8 && service_rs == 770) {
zdev->max_mod_size = PCICC_MAX_MOD_SIZE_OLD;
return -EAGAIN;
}
if (service_rc == 8 && service_rs == 783) {
zdev->max_mod_size = PCICC_MAX_MOD_SIZE_OLD;
return -EAGAIN;
}
if (service_rc == 8 && service_rs == 72)
return -EINVAL;
zdev->online = 0;
return -EAGAIN; /* repeat the request on a different device. */
}
data = msg->text;
reply_len = le16_to_cpu(msg->length) - 2;
if (reply_len > outputdatalength)
return -EINVAL;
/*
* For all encipher requests, the length of the ciphertext (reply_len)
* will always equal the modulus length. For MEX decipher requests
* the output needs to get padded. Minimum pad size is 10.
*
* Currently, the cases where padding will be added is for:
* - PCIXCC_MCL2 using a CRT form token (since PKD didn't support
* ZERO-PAD and CRT is only supported for PKD requests)
* - PCICC, always
*/
pad_len = outputdatalength - reply_len;
if (pad_len > 0) {
if (pad_len < 10)
return -EINVAL;
/* 'restore' padding left in the PCICC/PCIXCC card. */
if (copy_to_user(outputdata, static_pad, pad_len - 1))
return -EFAULT;
if (put_user(0, outputdata + pad_len - 1))
return -EFAULT;
}
/* Copy the crypto response to user space. */
if (copy_to_user(outputdata + pad_len, data, reply_len))
return -EFAULT;
return 0;
}
static int convert_response(struct zcrypt_device *zdev,
struct ap_message *reply,
char __user *outputdata,
unsigned int outputdatalength)
{
struct type86_reply *msg = reply->message;
/* Response type byte is the second byte in the response. */
switch (msg->hdr.type) {
case TYPE82_RSP_CODE:
case TYPE88_RSP_CODE:
return convert_error(zdev, reply);
case TYPE86_RSP_CODE:
if (msg->hdr.reply_code)
return convert_error(zdev, reply);
if (msg->cprb.cprb_ver_id == 0x01)
return convert_type86(zdev, reply,
outputdata, outputdatalength);
/* no break, incorrect cprb version is an unknown response */
default: /* Unknown response type, this should NEVER EVER happen */
zdev->online = 0;
return -EAGAIN; /* repeat the request on a different device. */
}
}
/**
* This function is called from the AP bus code after a crypto request
* "msg" has finished with the reply message "reply".
* It is called from tasklet context.
* @ap_dev: pointer to the AP device
* @msg: pointer to the AP message
* @reply: pointer to the AP reply message
*/
static void zcrypt_pcicc_receive(struct ap_device *ap_dev,
struct ap_message *msg,
struct ap_message *reply)
{
static struct error_hdr error_reply = {
.type = TYPE82_RSP_CODE,
.reply_code = REP82_ERROR_MACHINE_FAILURE,
};
struct type86_reply *t86r;
int length;
/* Copy the reply message to the request message buffer. */
if (IS_ERR(reply)) {
memcpy(msg->message, &error_reply, sizeof(error_reply));
goto out;
}
t86r = reply->message;
if (t86r->hdr.type == TYPE86_RSP_CODE &&
t86r->cprb.cprb_ver_id == 0x01) {
length = sizeof(struct type86_reply) + t86r->length - 2;
length = min(PCICC_MAX_RESPONSE_SIZE, length);
memcpy(msg->message, reply->message, length);
} else
memcpy(msg->message, reply->message, sizeof error_reply);
out:
complete((struct completion *) msg->private);
}
static atomic_t zcrypt_step = ATOMIC_INIT(0);
/**
* The request distributor calls this function if it picked the PCICC
* device to handle a modexpo request.
* @zdev: pointer to zcrypt_device structure that identifies the
* PCICC device to the request distributor
* @mex: pointer to the modexpo request buffer
*/
static long zcrypt_pcicc_modexpo(struct zcrypt_device *zdev,
struct ica_rsa_modexpo *mex)
{
struct ap_message ap_msg;
struct completion work;
int rc;
ap_init_message(&ap_msg);
ap_msg.message = (void *) get_zeroed_page(GFP_KERNEL);
if (!ap_msg.message)
return -ENOMEM;
ap_msg.length = PAGE_SIZE;
ap_msg.psmid = (((unsigned long long) current->pid) << 32) +
atomic_inc_return(&zcrypt_step);
ap_msg.private = &work;
rc = ICAMEX_msg_to_type6MEX_msg(zdev, &ap_msg, mex);
if (rc)
goto out_free;
init_completion(&work);
ap_queue_message(zdev->ap_dev, &ap_msg);
rc = wait_for_completion_interruptible(&work);
if (rc == 0)
rc = convert_response(zdev, &ap_msg, mex->outputdata,
mex->outputdatalength);
else
/* Signal pending. */
ap_cancel_message(zdev->ap_dev, &ap_msg);
out_free:
free_page((unsigned long) ap_msg.message);
return rc;
}
/**
* The request distributor calls this function if it picked the PCICC
* device to handle a modexpo_crt request.
* @zdev: pointer to zcrypt_device structure that identifies the
* PCICC device to the request distributor
* @crt: pointer to the modexpoc_crt request buffer
*/
static long zcrypt_pcicc_modexpo_crt(struct zcrypt_device *zdev,
struct ica_rsa_modexpo_crt *crt)
{
struct ap_message ap_msg;
struct completion work;
int rc;
ap_init_message(&ap_msg);
ap_msg.message = (void *) get_zeroed_page(GFP_KERNEL);
if (!ap_msg.message)
return -ENOMEM;
ap_msg.length = PAGE_SIZE;
ap_msg.psmid = (((unsigned long long) current->pid) << 32) +
atomic_inc_return(&zcrypt_step);
ap_msg.private = &work;
rc = ICACRT_msg_to_type6CRT_msg(zdev, &ap_msg, crt);
if (rc)
goto out_free;
init_completion(&work);
ap_queue_message(zdev->ap_dev, &ap_msg);
rc = wait_for_completion_interruptible(&work);
if (rc == 0)
rc = convert_response(zdev, &ap_msg, crt->outputdata,
crt->outputdatalength);
else
/* Signal pending. */
ap_cancel_message(zdev->ap_dev, &ap_msg);
out_free:
free_page((unsigned long) ap_msg.message);
return rc;
}
/**
* The crypto operations for a PCICC card.
*/
static struct zcrypt_ops zcrypt_pcicc_ops = {
.rsa_modexpo = zcrypt_pcicc_modexpo,
.rsa_modexpo_crt = zcrypt_pcicc_modexpo_crt,
};
/**
* Probe function for PCICC cards. It always accepts the AP device
* since the bus_match already checked the hardware type.
* @ap_dev: pointer to the AP device.
*/
static int zcrypt_pcicc_probe(struct ap_device *ap_dev)
{
struct zcrypt_device *zdev;
int rc;
zdev = zcrypt_device_alloc(PCICC_MAX_RESPONSE_SIZE);
if (!zdev)
return -ENOMEM;
zdev->ap_dev = ap_dev;
zdev->ops = &zcrypt_pcicc_ops;
zdev->online = 1;
zdev->user_space_type = ZCRYPT_PCICC;
zdev->type_string = "PCICC";
zdev->min_mod_size = PCICC_MIN_MOD_SIZE;
zdev->max_mod_size = PCICC_MAX_MOD_SIZE;
zdev->speed_rating = PCICC_SPEED_RATING;
zdev->max_exp_bit_length = PCICC_MAX_MOD_SIZE;
ap_dev->reply = &zdev->reply;
ap_dev->private = zdev;
rc = zcrypt_device_register(zdev);
if (rc)
goto out_free;
return 0;
out_free:
ap_dev->private = NULL;
zcrypt_device_free(zdev);
return rc;
}
/**
* This is called to remove the extended PCICC driver information
* if an AP device is removed.
*/
static void zcrypt_pcicc_remove(struct ap_device *ap_dev)
{
struct zcrypt_device *zdev = ap_dev->private;
zcrypt_device_unregister(zdev);
}
int __init zcrypt_pcicc_init(void)
{
return ap_driver_register(&zcrypt_pcicc_driver, THIS_MODULE, "pcicc");
}
void zcrypt_pcicc_exit(void)
{
ap_driver_unregister(&zcrypt_pcicc_driver);
}
#ifndef CONFIG_ZCRYPT_MONOLITHIC
module_init(zcrypt_pcicc_init);
module_exit(zcrypt_pcicc_exit);
#endif
| gpl-2.0 |
slz/samsung-exhibit-ii-kernel | fs/befs/linuxvfs.c | 1176 | 24525 | /*
* linux/fs/befs/linuxvfs.c
*
* Copyright (C) 2001 Will Dyson <will_dyson@pobox.com
*
*/
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/fs.h>
#include <linux/errno.h>
#include <linux/stat.h>
#include <linux/nls.h>
#include <linux/buffer_head.h>
#include <linux/vfs.h>
#include <linux/parser.h>
#include <linux/namei.h>
#include "befs.h"
#include "btree.h"
#include "inode.h"
#include "datastream.h"
#include "super.h"
#include "io.h"
MODULE_DESCRIPTION("BeOS File System (BeFS) driver");
MODULE_AUTHOR("Will Dyson");
MODULE_LICENSE("GPL");
/* The units the vfs expects inode->i_blocks to be in */
#define VFS_BLOCK_SIZE 512
static int befs_readdir(struct file *, void *, filldir_t);
static int befs_get_block(struct inode *, sector_t, struct buffer_head *, int);
static int befs_readpage(struct file *file, struct page *page);
static sector_t befs_bmap(struct address_space *mapping, sector_t block);
static struct dentry *befs_lookup(struct inode *, struct dentry *, struct nameidata *);
static struct inode *befs_iget(struct super_block *, unsigned long);
static struct inode *befs_alloc_inode(struct super_block *sb);
static void befs_destroy_inode(struct inode *inode);
static int befs_init_inodecache(void);
static void befs_destroy_inodecache(void);
static void *befs_follow_link(struct dentry *, struct nameidata *);
static void befs_put_link(struct dentry *, struct nameidata *, void *);
static int befs_utf2nls(struct super_block *sb, const char *in, int in_len,
char **out, int *out_len);
static int befs_nls2utf(struct super_block *sb, const char *in, int in_len,
char **out, int *out_len);
static void befs_put_super(struct super_block *);
static int befs_remount(struct super_block *, int *, char *);
static int befs_statfs(struct dentry *, struct kstatfs *);
static int parse_options(char *, befs_mount_options *);
static const struct super_operations befs_sops = {
.alloc_inode = befs_alloc_inode, /* allocate a new inode */
.destroy_inode = befs_destroy_inode, /* deallocate an inode */
.put_super = befs_put_super, /* uninit super */
.statfs = befs_statfs, /* statfs */
.remount_fs = befs_remount,
.show_options = generic_show_options,
};
/* slab cache for befs_inode_info objects */
static struct kmem_cache *befs_inode_cachep;
static const struct file_operations befs_dir_operations = {
.read = generic_read_dir,
.readdir = befs_readdir,
.llseek = generic_file_llseek,
};
static const struct inode_operations befs_dir_inode_operations = {
.lookup = befs_lookup,
};
static const struct address_space_operations befs_aops = {
.readpage = befs_readpage,
.sync_page = block_sync_page,
.bmap = befs_bmap,
};
static const struct inode_operations befs_symlink_inode_operations = {
.readlink = generic_readlink,
.follow_link = befs_follow_link,
.put_link = befs_put_link,
};
/*
* Called by generic_file_read() to read a page of data
*
* In turn, simply calls a generic block read function and
* passes it the address of befs_get_block, for mapping file
* positions to disk blocks.
*/
static int
befs_readpage(struct file *file, struct page *page)
{
return block_read_full_page(page, befs_get_block);
}
static sector_t
befs_bmap(struct address_space *mapping, sector_t block)
{
return generic_block_bmap(mapping, block, befs_get_block);
}
/*
* Generic function to map a file position (block) to a
* disk offset (passed back in bh_result).
*
* Used by many higher level functions.
*
* Calls befs_fblock2brun() in datastream.c to do the real work.
*
* -WD 10-26-01
*/
static int
befs_get_block(struct inode *inode, sector_t block,
struct buffer_head *bh_result, int create)
{
struct super_block *sb = inode->i_sb;
befs_data_stream *ds = &BEFS_I(inode)->i_data.ds;
befs_block_run run = BAD_IADDR;
int res = 0;
ulong disk_off;
befs_debug(sb, "---> befs_get_block() for inode %lu, block %ld",
inode->i_ino, block);
if (block < 0) {
befs_error(sb, "befs_get_block() was asked for a block "
"number less than zero: block %ld in inode %lu",
block, inode->i_ino);
return -EIO;
}
if (create) {
befs_error(sb, "befs_get_block() was asked to write to "
"block %ld in inode %lu", block, inode->i_ino);
return -EPERM;
}
res = befs_fblock2brun(sb, ds, block, &run);
if (res != BEFS_OK) {
befs_error(sb,
"<--- befs_get_block() for inode %lu, block "
"%ld ERROR", inode->i_ino, block);
return -EFBIG;
}
disk_off = (ulong) iaddr2blockno(sb, &run);
map_bh(bh_result, inode->i_sb, disk_off);
befs_debug(sb, "<--- befs_get_block() for inode %lu, block %ld, "
"disk address %lu", inode->i_ino, block, disk_off);
return 0;
}
static struct dentry *
befs_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd)
{
struct inode *inode = NULL;
struct super_block *sb = dir->i_sb;
befs_data_stream *ds = &BEFS_I(dir)->i_data.ds;
befs_off_t offset;
int ret;
int utfnamelen;
char *utfname;
const char *name = dentry->d_name.name;
befs_debug(sb, "---> befs_lookup() "
"name %s inode %ld", dentry->d_name.name, dir->i_ino);
/* Convert to UTF-8 */
if (BEFS_SB(sb)->nls) {
ret =
befs_nls2utf(sb, name, strlen(name), &utfname, &utfnamelen);
if (ret < 0) {
befs_debug(sb, "<--- befs_lookup() ERROR");
return ERR_PTR(ret);
}
ret = befs_btree_find(sb, ds, utfname, &offset);
kfree(utfname);
} else {
ret = befs_btree_find(sb, ds, dentry->d_name.name, &offset);
}
if (ret == BEFS_BT_NOT_FOUND) {
befs_debug(sb, "<--- befs_lookup() %s not found",
dentry->d_name.name);
return ERR_PTR(-ENOENT);
} else if (ret != BEFS_OK || offset == 0) {
befs_warning(sb, "<--- befs_lookup() Error");
return ERR_PTR(-ENODATA);
}
inode = befs_iget(dir->i_sb, (ino_t) offset);
if (IS_ERR(inode))
return ERR_CAST(inode);
d_add(dentry, inode);
befs_debug(sb, "<--- befs_lookup()");
return NULL;
}
static int
befs_readdir(struct file *filp, void *dirent, filldir_t filldir)
{
struct inode *inode = filp->f_path.dentry->d_inode;
struct super_block *sb = inode->i_sb;
befs_data_stream *ds = &BEFS_I(inode)->i_data.ds;
befs_off_t value;
int result;
size_t keysize;
unsigned char d_type;
char keybuf[BEFS_NAME_LEN + 1];
char *nlsname;
int nlsnamelen;
const char *dirname = filp->f_path.dentry->d_name.name;
befs_debug(sb, "---> befs_readdir() "
"name %s, inode %ld, filp->f_pos %Ld",
dirname, inode->i_ino, filp->f_pos);
result = befs_btree_read(sb, ds, filp->f_pos, BEFS_NAME_LEN + 1,
keybuf, &keysize, &value);
if (result == BEFS_ERR) {
befs_debug(sb, "<--- befs_readdir() ERROR");
befs_error(sb, "IO error reading %s (inode %lu)",
dirname, inode->i_ino);
return -EIO;
} else if (result == BEFS_BT_END) {
befs_debug(sb, "<--- befs_readdir() END");
return 0;
} else if (result == BEFS_BT_EMPTY) {
befs_debug(sb, "<--- befs_readdir() Empty directory");
return 0;
}
d_type = DT_UNKNOWN;
/* Convert to NLS */
if (BEFS_SB(sb)->nls) {
result =
befs_utf2nls(sb, keybuf, keysize, &nlsname, &nlsnamelen);
if (result < 0) {
befs_debug(sb, "<--- befs_readdir() ERROR");
return result;
}
result = filldir(dirent, nlsname, nlsnamelen, filp->f_pos,
(ino_t) value, d_type);
kfree(nlsname);
} else {
result = filldir(dirent, keybuf, keysize, filp->f_pos,
(ino_t) value, d_type);
}
filp->f_pos++;
befs_debug(sb, "<--- befs_readdir() filp->f_pos %Ld", filp->f_pos);
return 0;
}
static struct inode *
befs_alloc_inode(struct super_block *sb)
{
struct befs_inode_info *bi;
bi = (struct befs_inode_info *)kmem_cache_alloc(befs_inode_cachep,
GFP_KERNEL);
if (!bi)
return NULL;
return &bi->vfs_inode;
}
static void
befs_destroy_inode(struct inode *inode)
{
kmem_cache_free(befs_inode_cachep, BEFS_I(inode));
}
static void init_once(void *foo)
{
struct befs_inode_info *bi = (struct befs_inode_info *) foo;
inode_init_once(&bi->vfs_inode);
}
static struct inode *befs_iget(struct super_block *sb, unsigned long ino)
{
struct buffer_head *bh = NULL;
befs_inode *raw_inode = NULL;
befs_sb_info *befs_sb = BEFS_SB(sb);
befs_inode_info *befs_ino = NULL;
struct inode *inode;
long ret = -EIO;
befs_debug(sb, "---> befs_read_inode() " "inode = %lu", ino);
inode = iget_locked(sb, ino);
if (IS_ERR(inode))
return inode;
if (!(inode->i_state & I_NEW))
return inode;
befs_ino = BEFS_I(inode);
/* convert from vfs's inode number to befs's inode number */
befs_ino->i_inode_num = blockno2iaddr(sb, inode->i_ino);
befs_debug(sb, " real inode number [%u, %hu, %hu]",
befs_ino->i_inode_num.allocation_group,
befs_ino->i_inode_num.start, befs_ino->i_inode_num.len);
bh = befs_bread(sb, inode->i_ino);
if (!bh) {
befs_error(sb, "unable to read inode block - "
"inode = %lu", inode->i_ino);
goto unacquire_none;
}
raw_inode = (befs_inode *) bh->b_data;
befs_dump_inode(sb, raw_inode);
if (befs_check_inode(sb, raw_inode, inode->i_ino) != BEFS_OK) {
befs_error(sb, "Bad inode: %lu", inode->i_ino);
goto unacquire_bh;
}
inode->i_mode = (umode_t) fs32_to_cpu(sb, raw_inode->mode);
/*
* set uid and gid. But since current BeOS is single user OS, so
* you can change by "uid" or "gid" options.
*/
inode->i_uid = befs_sb->mount_opts.use_uid ?
befs_sb->mount_opts.uid : (uid_t) fs32_to_cpu(sb, raw_inode->uid);
inode->i_gid = befs_sb->mount_opts.use_gid ?
befs_sb->mount_opts.gid : (gid_t) fs32_to_cpu(sb, raw_inode->gid);
inode->i_nlink = 1;
/*
* BEFS's time is 64 bits, but current VFS is 32 bits...
* BEFS don't have access time. Nor inode change time. VFS
* doesn't have creation time.
* Also, the lower 16 bits of the last_modified_time and
* create_time are just a counter to help ensure uniqueness
* for indexing purposes. (PFD, page 54)
*/
inode->i_mtime.tv_sec =
fs64_to_cpu(sb, raw_inode->last_modified_time) >> 16;
inode->i_mtime.tv_nsec = 0; /* lower 16 bits are not a time */
inode->i_ctime = inode->i_mtime;
inode->i_atime = inode->i_mtime;
befs_ino->i_inode_num = fsrun_to_cpu(sb, raw_inode->inode_num);
befs_ino->i_parent = fsrun_to_cpu(sb, raw_inode->parent);
befs_ino->i_attribute = fsrun_to_cpu(sb, raw_inode->attributes);
befs_ino->i_flags = fs32_to_cpu(sb, raw_inode->flags);
if (S_ISLNK(inode->i_mode) && !(befs_ino->i_flags & BEFS_LONG_SYMLINK)){
inode->i_size = 0;
inode->i_blocks = befs_sb->block_size / VFS_BLOCK_SIZE;
strncpy(befs_ino->i_data.symlink, raw_inode->data.symlink,
BEFS_SYMLINK_LEN - 1);
befs_ino->i_data.symlink[BEFS_SYMLINK_LEN - 1] = '\0';
} else {
int num_blks;
befs_ino->i_data.ds =
fsds_to_cpu(sb, raw_inode->data.datastream);
num_blks = befs_count_blocks(sb, &befs_ino->i_data.ds);
inode->i_blocks =
num_blks * (befs_sb->block_size / VFS_BLOCK_SIZE);
inode->i_size = befs_ino->i_data.ds.size;
}
inode->i_mapping->a_ops = &befs_aops;
if (S_ISREG(inode->i_mode)) {
inode->i_fop = &generic_ro_fops;
} else if (S_ISDIR(inode->i_mode)) {
inode->i_op = &befs_dir_inode_operations;
inode->i_fop = &befs_dir_operations;
} else if (S_ISLNK(inode->i_mode)) {
inode->i_op = &befs_symlink_inode_operations;
} else {
befs_error(sb, "Inode %lu is not a regular file, "
"directory or symlink. THAT IS WRONG! BeFS has no "
"on disk special files", inode->i_ino);
goto unacquire_bh;
}
brelse(bh);
befs_debug(sb, "<--- befs_read_inode()");
unlock_new_inode(inode);
return inode;
unacquire_bh:
brelse(bh);
unacquire_none:
iget_failed(inode);
befs_debug(sb, "<--- befs_read_inode() - Bad inode");
return ERR_PTR(ret);
}
/* Initialize the inode cache. Called at fs setup.
*
* Taken from NFS implementation by Al Viro.
*/
static int
befs_init_inodecache(void)
{
befs_inode_cachep = kmem_cache_create("befs_inode_cache",
sizeof (struct befs_inode_info),
0, (SLAB_RECLAIM_ACCOUNT|
SLAB_MEM_SPREAD),
init_once);
if (befs_inode_cachep == NULL) {
printk(KERN_ERR "befs_init_inodecache: "
"Couldn't initalize inode slabcache\n");
return -ENOMEM;
}
return 0;
}
/* Called at fs teardown.
*
* Taken from NFS implementation by Al Viro.
*/
static void
befs_destroy_inodecache(void)
{
kmem_cache_destroy(befs_inode_cachep);
}
/*
* The inode of symbolic link is different to data stream.
* The data stream become link name. Unless the LONG_SYMLINK
* flag is set.
*/
static void *
befs_follow_link(struct dentry *dentry, struct nameidata *nd)
{
befs_inode_info *befs_ino = BEFS_I(dentry->d_inode);
char *link;
if (befs_ino->i_flags & BEFS_LONG_SYMLINK) {
struct super_block *sb = dentry->d_sb;
befs_data_stream *data = &befs_ino->i_data.ds;
befs_off_t len = data->size;
befs_debug(sb, "Follow long symlink");
link = kmalloc(len, GFP_NOFS);
if (!link) {
link = ERR_PTR(-ENOMEM);
} else if (befs_read_lsymlink(sb, data, link, len) != len) {
kfree(link);
befs_error(sb, "Failed to read entire long symlink");
link = ERR_PTR(-EIO);
} else {
link[len - 1] = '\0';
}
} else {
link = befs_ino->i_data.symlink;
}
nd_set_link(nd, link);
return NULL;
}
static void befs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
{
befs_inode_info *befs_ino = BEFS_I(dentry->d_inode);
if (befs_ino->i_flags & BEFS_LONG_SYMLINK) {
char *link = nd_get_link(nd);
if (!IS_ERR(link))
kfree(link);
}
}
/*
* UTF-8 to NLS charset convert routine
*
*
* Changed 8/10/01 by Will Dyson. Now use uni2char() / char2uni() rather than
* the nls tables directly
*/
static int
befs_utf2nls(struct super_block *sb, const char *in,
int in_len, char **out, int *out_len)
{
struct nls_table *nls = BEFS_SB(sb)->nls;
int i, o;
unicode_t uni;
int unilen, utflen;
char *result;
/* The utf8->nls conversion won't make the final nls string bigger
* than the utf one, but if the string is pure ascii they'll have the
* same width and an extra char is needed to save the additional \0
*/
int maxlen = in_len + 1;
befs_debug(sb, "---> utf2nls()");
if (!nls) {
befs_error(sb, "befs_utf2nls called with no NLS table loaded");
return -EINVAL;
}
*out = result = kmalloc(maxlen, GFP_NOFS);
if (!*out) {
befs_error(sb, "befs_utf2nls() cannot allocate memory");
*out_len = 0;
return -ENOMEM;
}
for (i = o = 0; i < in_len; i += utflen, o += unilen) {
/* convert from UTF-8 to Unicode */
utflen = utf8_to_utf32(&in[i], in_len - i, &uni);
if (utflen < 0)
goto conv_err;
/* convert from Unicode to nls */
if (uni > MAX_WCHAR_T)
goto conv_err;
unilen = nls->uni2char(uni, &result[o], in_len - o);
if (unilen < 0)
goto conv_err;
}
result[o] = '\0';
*out_len = o;
befs_debug(sb, "<--- utf2nls()");
return o;
conv_err:
befs_error(sb, "Name using character set %s contains a character that "
"cannot be converted to unicode.", nls->charset);
befs_debug(sb, "<--- utf2nls()");
kfree(result);
return -EILSEQ;
}
/**
* befs_nls2utf - Convert NLS string to utf8 encodeing
* @sb: Superblock
* @src: Input string buffer in NLS format
* @srclen: Length of input string in bytes
* @dest: The output string in UTF-8 format
* @destlen: Length of the output buffer
*
* Converts input string @src, which is in the format of the loaded NLS map,
* into a utf8 string.
*
* The destination string @dest is allocated by this function and the caller is
* responsible for freeing it with kfree()
*
* On return, *@destlen is the length of @dest in bytes.
*
* On success, the return value is the number of utf8 characters written to
* the output buffer @dest.
*
* On Failure, a negative number coresponding to the error code is returned.
*/
static int
befs_nls2utf(struct super_block *sb, const char *in,
int in_len, char **out, int *out_len)
{
struct nls_table *nls = BEFS_SB(sb)->nls;
int i, o;
wchar_t uni;
int unilen, utflen;
char *result;
/* There're nls characters that will translate to 3-chars-wide UTF-8
* characters, a additional byte is needed to save the final \0
* in special cases */
int maxlen = (3 * in_len) + 1;
befs_debug(sb, "---> nls2utf()\n");
if (!nls) {
befs_error(sb, "befs_nls2utf called with no NLS table loaded.");
return -EINVAL;
}
*out = result = kmalloc(maxlen, GFP_NOFS);
if (!*out) {
befs_error(sb, "befs_nls2utf() cannot allocate memory");
*out_len = 0;
return -ENOMEM;
}
for (i = o = 0; i < in_len; i += unilen, o += utflen) {
/* convert from nls to unicode */
unilen = nls->char2uni(&in[i], in_len - i, &uni);
if (unilen < 0)
goto conv_err;
/* convert from unicode to UTF-8 */
utflen = utf32_to_utf8(uni, &result[o], 3);
if (utflen <= 0)
goto conv_err;
}
result[o] = '\0';
*out_len = o;
befs_debug(sb, "<--- nls2utf()");
return i;
conv_err:
befs_error(sb, "Name using charecter set %s contains a charecter that "
"cannot be converted to unicode.", nls->charset);
befs_debug(sb, "<--- nls2utf()");
kfree(result);
return -EILSEQ;
}
/**
* Use the
*
*/
enum {
Opt_uid, Opt_gid, Opt_charset, Opt_debug, Opt_err,
};
static const match_table_t befs_tokens = {
{Opt_uid, "uid=%d"},
{Opt_gid, "gid=%d"},
{Opt_charset, "iocharset=%s"},
{Opt_debug, "debug"},
{Opt_err, NULL}
};
static int
parse_options(char *options, befs_mount_options * opts)
{
char *p;
substring_t args[MAX_OPT_ARGS];
int option;
/* Initialize options */
opts->uid = 0;
opts->gid = 0;
opts->use_uid = 0;
opts->use_gid = 0;
opts->iocharset = NULL;
opts->debug = 0;
if (!options)
return 1;
while ((p = strsep(&options, ",")) != NULL) {
int token;
if (!*p)
continue;
token = match_token(p, befs_tokens, args);
switch (token) {
case Opt_uid:
if (match_int(&args[0], &option))
return 0;
if (option < 0) {
printk(KERN_ERR "BeFS: Invalid uid %d, "
"using default\n", option);
break;
}
opts->uid = option;
opts->use_uid = 1;
break;
case Opt_gid:
if (match_int(&args[0], &option))
return 0;
if (option < 0) {
printk(KERN_ERR "BeFS: Invalid gid %d, "
"using default\n", option);
break;
}
opts->gid = option;
opts->use_gid = 1;
break;
case Opt_charset:
kfree(opts->iocharset);
opts->iocharset = match_strdup(&args[0]);
if (!opts->iocharset) {
printk(KERN_ERR "BeFS: allocation failure for "
"iocharset string\n");
return 0;
}
break;
case Opt_debug:
opts->debug = 1;
break;
default:
printk(KERN_ERR "BeFS: Unrecognized mount option \"%s\" "
"or missing value\n", p);
return 0;
}
}
return 1;
}
/* This function has the responsibiltiy of getting the
* filesystem ready for unmounting.
* Basicly, we free everything that we allocated in
* befs_read_inode
*/
static void
befs_put_super(struct super_block *sb)
{
kfree(BEFS_SB(sb)->mount_opts.iocharset);
BEFS_SB(sb)->mount_opts.iocharset = NULL;
unload_nls(BEFS_SB(sb)->nls);
kfree(sb->s_fs_info);
sb->s_fs_info = NULL;
}
/* Allocate private field of the superblock, fill it.
*
* Finish filling the public superblock fields
* Make the root directory
* Load a set of NLS translations if needed.
*/
static int
befs_fill_super(struct super_block *sb, void *data, int silent)
{
struct buffer_head *bh;
befs_sb_info *befs_sb;
befs_super_block *disk_sb;
struct inode *root;
long ret = -EINVAL;
const unsigned long sb_block = 0;
const off_t x86_sb_off = 512;
save_mount_options(sb, data);
sb->s_fs_info = kmalloc(sizeof (*befs_sb), GFP_KERNEL);
if (sb->s_fs_info == NULL) {
printk(KERN_ERR
"BeFS(%s): Unable to allocate memory for private "
"portion of superblock. Bailing.\n", sb->s_id);
goto unacquire_none;
}
befs_sb = BEFS_SB(sb);
memset(befs_sb, 0, sizeof(befs_sb_info));
if (!parse_options((char *) data, &befs_sb->mount_opts)) {
befs_error(sb, "cannot parse mount options");
goto unacquire_priv_sbp;
}
befs_debug(sb, "---> befs_fill_super()");
#ifndef CONFIG_BEFS_RW
if (!(sb->s_flags & MS_RDONLY)) {
befs_warning(sb,
"No write support. Marking filesystem read-only");
sb->s_flags |= MS_RDONLY;
}
#endif /* CONFIG_BEFS_RW */
/*
* Set dummy blocksize to read super block.
* Will be set to real fs blocksize later.
*
* Linux 2.4.10 and later refuse to read blocks smaller than
* the hardsect size for the device. But we also need to read at
* least 1k to get the second 512 bytes of the volume.
* -WD 10-26-01
*/
sb_min_blocksize(sb, 1024);
if (!(bh = sb_bread(sb, sb_block))) {
befs_error(sb, "unable to read superblock");
goto unacquire_priv_sbp;
}
/* account for offset of super block on x86 */
disk_sb = (befs_super_block *) bh->b_data;
if ((disk_sb->magic1 == BEFS_SUPER_MAGIC1_LE) ||
(disk_sb->magic1 == BEFS_SUPER_MAGIC1_BE)) {
befs_debug(sb, "Using PPC superblock location");
} else {
befs_debug(sb, "Using x86 superblock location");
disk_sb =
(befs_super_block *) ((void *) bh->b_data + x86_sb_off);
}
if (befs_load_sb(sb, disk_sb) != BEFS_OK)
goto unacquire_bh;
befs_dump_super_block(sb, disk_sb);
brelse(bh);
if (befs_check_sb(sb) != BEFS_OK)
goto unacquire_priv_sbp;
if( befs_sb->num_blocks > ~((sector_t)0) ) {
befs_error(sb, "blocks count: %Lu "
"is larger than the host can use",
befs_sb->num_blocks);
goto unacquire_priv_sbp;
}
/*
* set up enough so that it can read an inode
* Fill in kernel superblock fields from private sb
*/
sb->s_magic = BEFS_SUPER_MAGIC;
/* Set real blocksize of fs */
sb_set_blocksize(sb, (ulong) befs_sb->block_size);
sb->s_op = &befs_sops;
root = befs_iget(sb, iaddr2blockno(sb, &(befs_sb->root_dir)));
if (IS_ERR(root)) {
ret = PTR_ERR(root);
goto unacquire_priv_sbp;
}
sb->s_root = d_alloc_root(root);
if (!sb->s_root) {
iput(root);
befs_error(sb, "get root inode failed");
goto unacquire_priv_sbp;
}
/* load nls library */
if (befs_sb->mount_opts.iocharset) {
befs_debug(sb, "Loading nls: %s",
befs_sb->mount_opts.iocharset);
befs_sb->nls = load_nls(befs_sb->mount_opts.iocharset);
if (!befs_sb->nls) {
befs_warning(sb, "Cannot load nls %s"
" loading default nls",
befs_sb->mount_opts.iocharset);
befs_sb->nls = load_nls_default();
}
/* load default nls if none is specified in mount options */
} else {
befs_debug(sb, "Loading default nls");
befs_sb->nls = load_nls_default();
}
return 0;
/*****************/
unacquire_bh:
brelse(bh);
unacquire_priv_sbp:
kfree(befs_sb->mount_opts.iocharset);
kfree(sb->s_fs_info);
unacquire_none:
sb->s_fs_info = NULL;
return ret;
}
static int
befs_remount(struct super_block *sb, int *flags, char *data)
{
if (!(*flags & MS_RDONLY))
return -EINVAL;
return 0;
}
static int
befs_statfs(struct dentry *dentry, struct kstatfs *buf)
{
struct super_block *sb = dentry->d_sb;
u64 id = huge_encode_dev(sb->s_bdev->bd_dev);
befs_debug(sb, "---> befs_statfs()");
buf->f_type = BEFS_SUPER_MAGIC;
buf->f_bsize = sb->s_blocksize;
buf->f_blocks = BEFS_SB(sb)->num_blocks;
buf->f_bfree = BEFS_SB(sb)->num_blocks - BEFS_SB(sb)->used_blocks;
buf->f_bavail = buf->f_bfree;
buf->f_files = 0; /* UNKNOWN */
buf->f_ffree = 0; /* UNKNOWN */
buf->f_fsid.val[0] = (u32)id;
buf->f_fsid.val[1] = (u32)(id >> 32);
buf->f_namelen = BEFS_NAME_LEN;
befs_debug(sb, "<--- befs_statfs()");
return 0;
}
static int
befs_get_sb(struct file_system_type *fs_type, int flags, const char *dev_name,
void *data, struct vfsmount *mnt)
{
return get_sb_bdev(fs_type, flags, dev_name, data, befs_fill_super,
mnt);
}
static struct file_system_type befs_fs_type = {
.owner = THIS_MODULE,
.name = "befs",
.get_sb = befs_get_sb,
.kill_sb = kill_block_super,
.fs_flags = FS_REQUIRES_DEV,
};
static int __init
init_befs_fs(void)
{
int err;
printk(KERN_INFO "BeFS version: %s\n", BEFS_VERSION);
err = befs_init_inodecache();
if (err)
goto unacquire_none;
err = register_filesystem(&befs_fs_type);
if (err)
goto unacquire_inodecache;
return 0;
unacquire_inodecache:
befs_destroy_inodecache();
unacquire_none:
return err;
}
static void __exit
exit_befs_fs(void)
{
befs_destroy_inodecache();
unregister_filesystem(&befs_fs_type);
}
/*
Macros that typecheck the init and exit functions,
ensures that they are called at init and cleanup,
and eliminates warnings about unused functions.
*/
module_init(init_befs_fs)
module_exit(exit_befs_fs)
| gpl-2.0 |
aaronknister/linux-stable | drivers/media/tuners/tda18271-maps.c | 1176 | 45486 | /*
tda18271-maps.c - driver for the Philips / NXP TDA18271 silicon tuner
Copyright (C) 2007, 2008 Michael Krufky <mkrufky@linuxtv.org>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include "tda18271-priv.h"
struct tda18271_pll_map {
u32 lomax;
u8 pd; /* post div */
u8 d; /* div */
};
struct tda18271_map {
u32 rfmax;
u8 val;
};
/*---------------------------------------------------------------------*/
static struct tda18271_pll_map tda18271c1_main_pll[] = {
{ .lomax = 32000, .pd = 0x5f, .d = 0xf0 },
{ .lomax = 35000, .pd = 0x5e, .d = 0xe0 },
{ .lomax = 37000, .pd = 0x5d, .d = 0xd0 },
{ .lomax = 41000, .pd = 0x5c, .d = 0xc0 },
{ .lomax = 44000, .pd = 0x5b, .d = 0xb0 },
{ .lomax = 49000, .pd = 0x5a, .d = 0xa0 },
{ .lomax = 54000, .pd = 0x59, .d = 0x90 },
{ .lomax = 61000, .pd = 0x58, .d = 0x80 },
{ .lomax = 65000, .pd = 0x4f, .d = 0x78 },
{ .lomax = 70000, .pd = 0x4e, .d = 0x70 },
{ .lomax = 75000, .pd = 0x4d, .d = 0x68 },
{ .lomax = 82000, .pd = 0x4c, .d = 0x60 },
{ .lomax = 89000, .pd = 0x4b, .d = 0x58 },
{ .lomax = 98000, .pd = 0x4a, .d = 0x50 },
{ .lomax = 109000, .pd = 0x49, .d = 0x48 },
{ .lomax = 123000, .pd = 0x48, .d = 0x40 },
{ .lomax = 131000, .pd = 0x3f, .d = 0x3c },
{ .lomax = 141000, .pd = 0x3e, .d = 0x38 },
{ .lomax = 151000, .pd = 0x3d, .d = 0x34 },
{ .lomax = 164000, .pd = 0x3c, .d = 0x30 },
{ .lomax = 179000, .pd = 0x3b, .d = 0x2c },
{ .lomax = 197000, .pd = 0x3a, .d = 0x28 },
{ .lomax = 219000, .pd = 0x39, .d = 0x24 },
{ .lomax = 246000, .pd = 0x38, .d = 0x20 },
{ .lomax = 263000, .pd = 0x2f, .d = 0x1e },
{ .lomax = 282000, .pd = 0x2e, .d = 0x1c },
{ .lomax = 303000, .pd = 0x2d, .d = 0x1a },
{ .lomax = 329000, .pd = 0x2c, .d = 0x18 },
{ .lomax = 359000, .pd = 0x2b, .d = 0x16 },
{ .lomax = 395000, .pd = 0x2a, .d = 0x14 },
{ .lomax = 438000, .pd = 0x29, .d = 0x12 },
{ .lomax = 493000, .pd = 0x28, .d = 0x10 },
{ .lomax = 526000, .pd = 0x1f, .d = 0x0f },
{ .lomax = 564000, .pd = 0x1e, .d = 0x0e },
{ .lomax = 607000, .pd = 0x1d, .d = 0x0d },
{ .lomax = 658000, .pd = 0x1c, .d = 0x0c },
{ .lomax = 718000, .pd = 0x1b, .d = 0x0b },
{ .lomax = 790000, .pd = 0x1a, .d = 0x0a },
{ .lomax = 877000, .pd = 0x19, .d = 0x09 },
{ .lomax = 987000, .pd = 0x18, .d = 0x08 },
{ .lomax = 0, .pd = 0x00, .d = 0x00 }, /* end */
};
static struct tda18271_pll_map tda18271c2_main_pll[] = {
{ .lomax = 33125, .pd = 0x57, .d = 0xf0 },
{ .lomax = 35500, .pd = 0x56, .d = 0xe0 },
{ .lomax = 38188, .pd = 0x55, .d = 0xd0 },
{ .lomax = 41375, .pd = 0x54, .d = 0xc0 },
{ .lomax = 45125, .pd = 0x53, .d = 0xb0 },
{ .lomax = 49688, .pd = 0x52, .d = 0xa0 },
{ .lomax = 55188, .pd = 0x51, .d = 0x90 },
{ .lomax = 62125, .pd = 0x50, .d = 0x80 },
{ .lomax = 66250, .pd = 0x47, .d = 0x78 },
{ .lomax = 71000, .pd = 0x46, .d = 0x70 },
{ .lomax = 76375, .pd = 0x45, .d = 0x68 },
{ .lomax = 82750, .pd = 0x44, .d = 0x60 },
{ .lomax = 90250, .pd = 0x43, .d = 0x58 },
{ .lomax = 99375, .pd = 0x42, .d = 0x50 },
{ .lomax = 110375, .pd = 0x41, .d = 0x48 },
{ .lomax = 124250, .pd = 0x40, .d = 0x40 },
{ .lomax = 132500, .pd = 0x37, .d = 0x3c },
{ .lomax = 142000, .pd = 0x36, .d = 0x38 },
{ .lomax = 152750, .pd = 0x35, .d = 0x34 },
{ .lomax = 165500, .pd = 0x34, .d = 0x30 },
{ .lomax = 180500, .pd = 0x33, .d = 0x2c },
{ .lomax = 198750, .pd = 0x32, .d = 0x28 },
{ .lomax = 220750, .pd = 0x31, .d = 0x24 },
{ .lomax = 248500, .pd = 0x30, .d = 0x20 },
{ .lomax = 265000, .pd = 0x27, .d = 0x1e },
{ .lomax = 284000, .pd = 0x26, .d = 0x1c },
{ .lomax = 305500, .pd = 0x25, .d = 0x1a },
{ .lomax = 331000, .pd = 0x24, .d = 0x18 },
{ .lomax = 361000, .pd = 0x23, .d = 0x16 },
{ .lomax = 397500, .pd = 0x22, .d = 0x14 },
{ .lomax = 441500, .pd = 0x21, .d = 0x12 },
{ .lomax = 497000, .pd = 0x20, .d = 0x10 },
{ .lomax = 530000, .pd = 0x17, .d = 0x0f },
{ .lomax = 568000, .pd = 0x16, .d = 0x0e },
{ .lomax = 611000, .pd = 0x15, .d = 0x0d },
{ .lomax = 662000, .pd = 0x14, .d = 0x0c },
{ .lomax = 722000, .pd = 0x13, .d = 0x0b },
{ .lomax = 795000, .pd = 0x12, .d = 0x0a },
{ .lomax = 883000, .pd = 0x11, .d = 0x09 },
{ .lomax = 994000, .pd = 0x10, .d = 0x08 },
{ .lomax = 0, .pd = 0x00, .d = 0x00 }, /* end */
};
static struct tda18271_pll_map tda18271c1_cal_pll[] = {
{ .lomax = 33000, .pd = 0xdd, .d = 0xd0 },
{ .lomax = 36000, .pd = 0xdc, .d = 0xc0 },
{ .lomax = 40000, .pd = 0xdb, .d = 0xb0 },
{ .lomax = 44000, .pd = 0xda, .d = 0xa0 },
{ .lomax = 49000, .pd = 0xd9, .d = 0x90 },
{ .lomax = 55000, .pd = 0xd8, .d = 0x80 },
{ .lomax = 63000, .pd = 0xd3, .d = 0x70 },
{ .lomax = 67000, .pd = 0xcd, .d = 0x68 },
{ .lomax = 73000, .pd = 0xcc, .d = 0x60 },
{ .lomax = 80000, .pd = 0xcb, .d = 0x58 },
{ .lomax = 88000, .pd = 0xca, .d = 0x50 },
{ .lomax = 98000, .pd = 0xc9, .d = 0x48 },
{ .lomax = 110000, .pd = 0xc8, .d = 0x40 },
{ .lomax = 126000, .pd = 0xc3, .d = 0x38 },
{ .lomax = 135000, .pd = 0xbd, .d = 0x34 },
{ .lomax = 147000, .pd = 0xbc, .d = 0x30 },
{ .lomax = 160000, .pd = 0xbb, .d = 0x2c },
{ .lomax = 176000, .pd = 0xba, .d = 0x28 },
{ .lomax = 196000, .pd = 0xb9, .d = 0x24 },
{ .lomax = 220000, .pd = 0xb8, .d = 0x20 },
{ .lomax = 252000, .pd = 0xb3, .d = 0x1c },
{ .lomax = 271000, .pd = 0xad, .d = 0x1a },
{ .lomax = 294000, .pd = 0xac, .d = 0x18 },
{ .lomax = 321000, .pd = 0xab, .d = 0x16 },
{ .lomax = 353000, .pd = 0xaa, .d = 0x14 },
{ .lomax = 392000, .pd = 0xa9, .d = 0x12 },
{ .lomax = 441000, .pd = 0xa8, .d = 0x10 },
{ .lomax = 505000, .pd = 0xa3, .d = 0x0e },
{ .lomax = 543000, .pd = 0x9d, .d = 0x0d },
{ .lomax = 589000, .pd = 0x9c, .d = 0x0c },
{ .lomax = 642000, .pd = 0x9b, .d = 0x0b },
{ .lomax = 707000, .pd = 0x9a, .d = 0x0a },
{ .lomax = 785000, .pd = 0x99, .d = 0x09 },
{ .lomax = 883000, .pd = 0x98, .d = 0x08 },
{ .lomax = 1010000, .pd = 0x93, .d = 0x07 },
{ .lomax = 0, .pd = 0x00, .d = 0x00 }, /* end */
};
static struct tda18271_pll_map tda18271c2_cal_pll[] = {
{ .lomax = 33813, .pd = 0xdd, .d = 0xd0 },
{ .lomax = 36625, .pd = 0xdc, .d = 0xc0 },
{ .lomax = 39938, .pd = 0xdb, .d = 0xb0 },
{ .lomax = 43938, .pd = 0xda, .d = 0xa0 },
{ .lomax = 48813, .pd = 0xd9, .d = 0x90 },
{ .lomax = 54938, .pd = 0xd8, .d = 0x80 },
{ .lomax = 62813, .pd = 0xd3, .d = 0x70 },
{ .lomax = 67625, .pd = 0xcd, .d = 0x68 },
{ .lomax = 73250, .pd = 0xcc, .d = 0x60 },
{ .lomax = 79875, .pd = 0xcb, .d = 0x58 },
{ .lomax = 87875, .pd = 0xca, .d = 0x50 },
{ .lomax = 97625, .pd = 0xc9, .d = 0x48 },
{ .lomax = 109875, .pd = 0xc8, .d = 0x40 },
{ .lomax = 125625, .pd = 0xc3, .d = 0x38 },
{ .lomax = 135250, .pd = 0xbd, .d = 0x34 },
{ .lomax = 146500, .pd = 0xbc, .d = 0x30 },
{ .lomax = 159750, .pd = 0xbb, .d = 0x2c },
{ .lomax = 175750, .pd = 0xba, .d = 0x28 },
{ .lomax = 195250, .pd = 0xb9, .d = 0x24 },
{ .lomax = 219750, .pd = 0xb8, .d = 0x20 },
{ .lomax = 251250, .pd = 0xb3, .d = 0x1c },
{ .lomax = 270500, .pd = 0xad, .d = 0x1a },
{ .lomax = 293000, .pd = 0xac, .d = 0x18 },
{ .lomax = 319500, .pd = 0xab, .d = 0x16 },
{ .lomax = 351500, .pd = 0xaa, .d = 0x14 },
{ .lomax = 390500, .pd = 0xa9, .d = 0x12 },
{ .lomax = 439500, .pd = 0xa8, .d = 0x10 },
{ .lomax = 502500, .pd = 0xa3, .d = 0x0e },
{ .lomax = 541000, .pd = 0x9d, .d = 0x0d },
{ .lomax = 586000, .pd = 0x9c, .d = 0x0c },
{ .lomax = 639000, .pd = 0x9b, .d = 0x0b },
{ .lomax = 703000, .pd = 0x9a, .d = 0x0a },
{ .lomax = 781000, .pd = 0x99, .d = 0x09 },
{ .lomax = 879000, .pd = 0x98, .d = 0x08 },
{ .lomax = 0, .pd = 0x00, .d = 0x00 }, /* end */
};
static struct tda18271_map tda18271_bp_filter[] = {
{ .rfmax = 62000, .val = 0x00 },
{ .rfmax = 84000, .val = 0x01 },
{ .rfmax = 100000, .val = 0x02 },
{ .rfmax = 140000, .val = 0x03 },
{ .rfmax = 170000, .val = 0x04 },
{ .rfmax = 180000, .val = 0x05 },
{ .rfmax = 865000, .val = 0x06 },
{ .rfmax = 0, .val = 0x00 }, /* end */
};
static struct tda18271_map tda18271c1_km[] = {
{ .rfmax = 61100, .val = 0x74 },
{ .rfmax = 350000, .val = 0x40 },
{ .rfmax = 720000, .val = 0x30 },
{ .rfmax = 865000, .val = 0x40 },
{ .rfmax = 0, .val = 0x00 }, /* end */
};
static struct tda18271_map tda18271c2_km[] = {
{ .rfmax = 47900, .val = 0x38 },
{ .rfmax = 61100, .val = 0x44 },
{ .rfmax = 350000, .val = 0x30 },
{ .rfmax = 720000, .val = 0x24 },
{ .rfmax = 865000, .val = 0x3c },
{ .rfmax = 0, .val = 0x00 }, /* end */
};
static struct tda18271_map tda18271_rf_band[] = {
{ .rfmax = 47900, .val = 0x00 },
{ .rfmax = 61100, .val = 0x01 },
{ .rfmax = 152600, .val = 0x02 },
{ .rfmax = 164700, .val = 0x03 },
{ .rfmax = 203500, .val = 0x04 },
{ .rfmax = 457800, .val = 0x05 },
{ .rfmax = 865000, .val = 0x06 },
{ .rfmax = 0, .val = 0x00 }, /* end */
};
static struct tda18271_map tda18271_gain_taper[] = {
{ .rfmax = 45400, .val = 0x1f },
{ .rfmax = 45800, .val = 0x1e },
{ .rfmax = 46200, .val = 0x1d },
{ .rfmax = 46700, .val = 0x1c },
{ .rfmax = 47100, .val = 0x1b },
{ .rfmax = 47500, .val = 0x1a },
{ .rfmax = 47900, .val = 0x19 },
{ .rfmax = 49600, .val = 0x17 },
{ .rfmax = 51200, .val = 0x16 },
{ .rfmax = 52900, .val = 0x15 },
{ .rfmax = 54500, .val = 0x14 },
{ .rfmax = 56200, .val = 0x13 },
{ .rfmax = 57800, .val = 0x12 },
{ .rfmax = 59500, .val = 0x11 },
{ .rfmax = 61100, .val = 0x10 },
{ .rfmax = 67600, .val = 0x0d },
{ .rfmax = 74200, .val = 0x0c },
{ .rfmax = 80700, .val = 0x0b },
{ .rfmax = 87200, .val = 0x0a },
{ .rfmax = 93800, .val = 0x09 },
{ .rfmax = 100300, .val = 0x08 },
{ .rfmax = 106900, .val = 0x07 },
{ .rfmax = 113400, .val = 0x06 },
{ .rfmax = 119900, .val = 0x05 },
{ .rfmax = 126500, .val = 0x04 },
{ .rfmax = 133000, .val = 0x03 },
{ .rfmax = 139500, .val = 0x02 },
{ .rfmax = 146100, .val = 0x01 },
{ .rfmax = 152600, .val = 0x00 },
{ .rfmax = 154300, .val = 0x1f },
{ .rfmax = 156100, .val = 0x1e },
{ .rfmax = 157800, .val = 0x1d },
{ .rfmax = 159500, .val = 0x1c },
{ .rfmax = 161200, .val = 0x1b },
{ .rfmax = 163000, .val = 0x1a },
{ .rfmax = 164700, .val = 0x19 },
{ .rfmax = 170200, .val = 0x17 },
{ .rfmax = 175800, .val = 0x16 },
{ .rfmax = 181300, .val = 0x15 },
{ .rfmax = 186900, .val = 0x14 },
{ .rfmax = 192400, .val = 0x13 },
{ .rfmax = 198000, .val = 0x12 },
{ .rfmax = 203500, .val = 0x11 },
{ .rfmax = 216200, .val = 0x14 },
{ .rfmax = 228900, .val = 0x13 },
{ .rfmax = 241600, .val = 0x12 },
{ .rfmax = 254400, .val = 0x11 },
{ .rfmax = 267100, .val = 0x10 },
{ .rfmax = 279800, .val = 0x0f },
{ .rfmax = 292500, .val = 0x0e },
{ .rfmax = 305200, .val = 0x0d },
{ .rfmax = 317900, .val = 0x0c },
{ .rfmax = 330700, .val = 0x0b },
{ .rfmax = 343400, .val = 0x0a },
{ .rfmax = 356100, .val = 0x09 },
{ .rfmax = 368800, .val = 0x08 },
{ .rfmax = 381500, .val = 0x07 },
{ .rfmax = 394200, .val = 0x06 },
{ .rfmax = 406900, .val = 0x05 },
{ .rfmax = 419700, .val = 0x04 },
{ .rfmax = 432400, .val = 0x03 },
{ .rfmax = 445100, .val = 0x02 },
{ .rfmax = 457800, .val = 0x01 },
{ .rfmax = 476300, .val = 0x19 },
{ .rfmax = 494800, .val = 0x18 },
{ .rfmax = 513300, .val = 0x17 },
{ .rfmax = 531800, .val = 0x16 },
{ .rfmax = 550300, .val = 0x15 },
{ .rfmax = 568900, .val = 0x14 },
{ .rfmax = 587400, .val = 0x13 },
{ .rfmax = 605900, .val = 0x12 },
{ .rfmax = 624400, .val = 0x11 },
{ .rfmax = 642900, .val = 0x10 },
{ .rfmax = 661400, .val = 0x0f },
{ .rfmax = 679900, .val = 0x0e },
{ .rfmax = 698400, .val = 0x0d },
{ .rfmax = 716900, .val = 0x0c },
{ .rfmax = 735400, .val = 0x0b },
{ .rfmax = 753900, .val = 0x0a },
{ .rfmax = 772500, .val = 0x09 },
{ .rfmax = 791000, .val = 0x08 },
{ .rfmax = 809500, .val = 0x07 },
{ .rfmax = 828000, .val = 0x06 },
{ .rfmax = 846500, .val = 0x05 },
{ .rfmax = 865000, .val = 0x04 },
{ .rfmax = 0, .val = 0x00 }, /* end */
};
static struct tda18271_map tda18271c1_rf_cal[] = {
{ .rfmax = 41000, .val = 0x1e },
{ .rfmax = 43000, .val = 0x30 },
{ .rfmax = 45000, .val = 0x43 },
{ .rfmax = 46000, .val = 0x4d },
{ .rfmax = 47000, .val = 0x54 },
{ .rfmax = 47900, .val = 0x64 },
{ .rfmax = 49100, .val = 0x20 },
{ .rfmax = 50000, .val = 0x22 },
{ .rfmax = 51000, .val = 0x2a },
{ .rfmax = 53000, .val = 0x32 },
{ .rfmax = 55000, .val = 0x35 },
{ .rfmax = 56000, .val = 0x3c },
{ .rfmax = 57000, .val = 0x3f },
{ .rfmax = 58000, .val = 0x48 },
{ .rfmax = 59000, .val = 0x4d },
{ .rfmax = 60000, .val = 0x58 },
{ .rfmax = 61100, .val = 0x5f },
{ .rfmax = 0, .val = 0x00 }, /* end */
};
static struct tda18271_map tda18271c2_rf_cal[] = {
{ .rfmax = 41000, .val = 0x0f },
{ .rfmax = 43000, .val = 0x1c },
{ .rfmax = 45000, .val = 0x2f },
{ .rfmax = 46000, .val = 0x39 },
{ .rfmax = 47000, .val = 0x40 },
{ .rfmax = 47900, .val = 0x50 },
{ .rfmax = 49100, .val = 0x16 },
{ .rfmax = 50000, .val = 0x18 },
{ .rfmax = 51000, .val = 0x20 },
{ .rfmax = 53000, .val = 0x28 },
{ .rfmax = 55000, .val = 0x2b },
{ .rfmax = 56000, .val = 0x32 },
{ .rfmax = 57000, .val = 0x35 },
{ .rfmax = 58000, .val = 0x3e },
{ .rfmax = 59000, .val = 0x43 },
{ .rfmax = 60000, .val = 0x4e },
{ .rfmax = 61100, .val = 0x55 },
{ .rfmax = 63000, .val = 0x0f },
{ .rfmax = 64000, .val = 0x11 },
{ .rfmax = 65000, .val = 0x12 },
{ .rfmax = 66000, .val = 0x15 },
{ .rfmax = 67000, .val = 0x16 },
{ .rfmax = 68000, .val = 0x17 },
{ .rfmax = 70000, .val = 0x19 },
{ .rfmax = 71000, .val = 0x1c },
{ .rfmax = 72000, .val = 0x1d },
{ .rfmax = 73000, .val = 0x1f },
{ .rfmax = 74000, .val = 0x20 },
{ .rfmax = 75000, .val = 0x21 },
{ .rfmax = 76000, .val = 0x24 },
{ .rfmax = 77000, .val = 0x25 },
{ .rfmax = 78000, .val = 0x27 },
{ .rfmax = 80000, .val = 0x28 },
{ .rfmax = 81000, .val = 0x29 },
{ .rfmax = 82000, .val = 0x2d },
{ .rfmax = 83000, .val = 0x2e },
{ .rfmax = 84000, .val = 0x2f },
{ .rfmax = 85000, .val = 0x31 },
{ .rfmax = 86000, .val = 0x33 },
{ .rfmax = 87000, .val = 0x34 },
{ .rfmax = 88000, .val = 0x35 },
{ .rfmax = 89000, .val = 0x37 },
{ .rfmax = 90000, .val = 0x38 },
{ .rfmax = 91000, .val = 0x39 },
{ .rfmax = 93000, .val = 0x3c },
{ .rfmax = 94000, .val = 0x3e },
{ .rfmax = 95000, .val = 0x3f },
{ .rfmax = 96000, .val = 0x40 },
{ .rfmax = 97000, .val = 0x42 },
{ .rfmax = 99000, .val = 0x45 },
{ .rfmax = 100000, .val = 0x46 },
{ .rfmax = 102000, .val = 0x48 },
{ .rfmax = 103000, .val = 0x4a },
{ .rfmax = 105000, .val = 0x4d },
{ .rfmax = 106000, .val = 0x4e },
{ .rfmax = 107000, .val = 0x50 },
{ .rfmax = 108000, .val = 0x51 },
{ .rfmax = 110000, .val = 0x54 },
{ .rfmax = 111000, .val = 0x56 },
{ .rfmax = 112000, .val = 0x57 },
{ .rfmax = 113000, .val = 0x58 },
{ .rfmax = 114000, .val = 0x59 },
{ .rfmax = 115000, .val = 0x5c },
{ .rfmax = 116000, .val = 0x5d },
{ .rfmax = 117000, .val = 0x5f },
{ .rfmax = 119000, .val = 0x60 },
{ .rfmax = 120000, .val = 0x64 },
{ .rfmax = 121000, .val = 0x65 },
{ .rfmax = 122000, .val = 0x66 },
{ .rfmax = 123000, .val = 0x68 },
{ .rfmax = 124000, .val = 0x69 },
{ .rfmax = 125000, .val = 0x6c },
{ .rfmax = 126000, .val = 0x6d },
{ .rfmax = 127000, .val = 0x6e },
{ .rfmax = 128000, .val = 0x70 },
{ .rfmax = 129000, .val = 0x71 },
{ .rfmax = 130000, .val = 0x75 },
{ .rfmax = 131000, .val = 0x77 },
{ .rfmax = 132000, .val = 0x78 },
{ .rfmax = 133000, .val = 0x7b },
{ .rfmax = 134000, .val = 0x7e },
{ .rfmax = 135000, .val = 0x81 },
{ .rfmax = 136000, .val = 0x82 },
{ .rfmax = 137000, .val = 0x87 },
{ .rfmax = 138000, .val = 0x88 },
{ .rfmax = 139000, .val = 0x8d },
{ .rfmax = 140000, .val = 0x8e },
{ .rfmax = 141000, .val = 0x91 },
{ .rfmax = 142000, .val = 0x95 },
{ .rfmax = 143000, .val = 0x9a },
{ .rfmax = 144000, .val = 0x9d },
{ .rfmax = 145000, .val = 0xa1 },
{ .rfmax = 146000, .val = 0xa2 },
{ .rfmax = 147000, .val = 0xa4 },
{ .rfmax = 148000, .val = 0xa9 },
{ .rfmax = 149000, .val = 0xae },
{ .rfmax = 150000, .val = 0xb0 },
{ .rfmax = 151000, .val = 0xb1 },
{ .rfmax = 152000, .val = 0xb7 },
{ .rfmax = 152600, .val = 0xbd },
{ .rfmax = 154000, .val = 0x20 },
{ .rfmax = 155000, .val = 0x22 },
{ .rfmax = 156000, .val = 0x24 },
{ .rfmax = 157000, .val = 0x25 },
{ .rfmax = 158000, .val = 0x27 },
{ .rfmax = 159000, .val = 0x29 },
{ .rfmax = 160000, .val = 0x2c },
{ .rfmax = 161000, .val = 0x2d },
{ .rfmax = 163000, .val = 0x2e },
{ .rfmax = 164000, .val = 0x2f },
{ .rfmax = 164700, .val = 0x30 },
{ .rfmax = 166000, .val = 0x11 },
{ .rfmax = 167000, .val = 0x12 },
{ .rfmax = 168000, .val = 0x13 },
{ .rfmax = 169000, .val = 0x14 },
{ .rfmax = 170000, .val = 0x15 },
{ .rfmax = 172000, .val = 0x16 },
{ .rfmax = 173000, .val = 0x17 },
{ .rfmax = 174000, .val = 0x18 },
{ .rfmax = 175000, .val = 0x1a },
{ .rfmax = 176000, .val = 0x1b },
{ .rfmax = 178000, .val = 0x1d },
{ .rfmax = 179000, .val = 0x1e },
{ .rfmax = 180000, .val = 0x1f },
{ .rfmax = 181000, .val = 0x20 },
{ .rfmax = 182000, .val = 0x21 },
{ .rfmax = 183000, .val = 0x22 },
{ .rfmax = 184000, .val = 0x24 },
{ .rfmax = 185000, .val = 0x25 },
{ .rfmax = 186000, .val = 0x26 },
{ .rfmax = 187000, .val = 0x27 },
{ .rfmax = 188000, .val = 0x29 },
{ .rfmax = 189000, .val = 0x2a },
{ .rfmax = 190000, .val = 0x2c },
{ .rfmax = 191000, .val = 0x2d },
{ .rfmax = 192000, .val = 0x2e },
{ .rfmax = 193000, .val = 0x2f },
{ .rfmax = 194000, .val = 0x30 },
{ .rfmax = 195000, .val = 0x33 },
{ .rfmax = 196000, .val = 0x35 },
{ .rfmax = 198000, .val = 0x36 },
{ .rfmax = 200000, .val = 0x38 },
{ .rfmax = 201000, .val = 0x3c },
{ .rfmax = 202000, .val = 0x3d },
{ .rfmax = 203500, .val = 0x3e },
{ .rfmax = 206000, .val = 0x0e },
{ .rfmax = 208000, .val = 0x0f },
{ .rfmax = 212000, .val = 0x10 },
{ .rfmax = 216000, .val = 0x11 },
{ .rfmax = 217000, .val = 0x12 },
{ .rfmax = 218000, .val = 0x13 },
{ .rfmax = 220000, .val = 0x14 },
{ .rfmax = 222000, .val = 0x15 },
{ .rfmax = 225000, .val = 0x16 },
{ .rfmax = 228000, .val = 0x17 },
{ .rfmax = 231000, .val = 0x18 },
{ .rfmax = 234000, .val = 0x19 },
{ .rfmax = 235000, .val = 0x1a },
{ .rfmax = 236000, .val = 0x1b },
{ .rfmax = 237000, .val = 0x1c },
{ .rfmax = 240000, .val = 0x1d },
{ .rfmax = 242000, .val = 0x1e },
{ .rfmax = 244000, .val = 0x1f },
{ .rfmax = 247000, .val = 0x20 },
{ .rfmax = 249000, .val = 0x21 },
{ .rfmax = 252000, .val = 0x22 },
{ .rfmax = 253000, .val = 0x23 },
{ .rfmax = 254000, .val = 0x24 },
{ .rfmax = 256000, .val = 0x25 },
{ .rfmax = 259000, .val = 0x26 },
{ .rfmax = 262000, .val = 0x27 },
{ .rfmax = 264000, .val = 0x28 },
{ .rfmax = 267000, .val = 0x29 },
{ .rfmax = 269000, .val = 0x2a },
{ .rfmax = 271000, .val = 0x2b },
{ .rfmax = 273000, .val = 0x2c },
{ .rfmax = 275000, .val = 0x2d },
{ .rfmax = 277000, .val = 0x2e },
{ .rfmax = 279000, .val = 0x2f },
{ .rfmax = 282000, .val = 0x30 },
{ .rfmax = 284000, .val = 0x31 },
{ .rfmax = 286000, .val = 0x32 },
{ .rfmax = 287000, .val = 0x33 },
{ .rfmax = 290000, .val = 0x34 },
{ .rfmax = 293000, .val = 0x35 },
{ .rfmax = 295000, .val = 0x36 },
{ .rfmax = 297000, .val = 0x37 },
{ .rfmax = 300000, .val = 0x38 },
{ .rfmax = 303000, .val = 0x39 },
{ .rfmax = 305000, .val = 0x3a },
{ .rfmax = 306000, .val = 0x3b },
{ .rfmax = 307000, .val = 0x3c },
{ .rfmax = 310000, .val = 0x3d },
{ .rfmax = 312000, .val = 0x3e },
{ .rfmax = 315000, .val = 0x3f },
{ .rfmax = 318000, .val = 0x40 },
{ .rfmax = 320000, .val = 0x41 },
{ .rfmax = 323000, .val = 0x42 },
{ .rfmax = 324000, .val = 0x43 },
{ .rfmax = 325000, .val = 0x44 },
{ .rfmax = 327000, .val = 0x45 },
{ .rfmax = 331000, .val = 0x46 },
{ .rfmax = 334000, .val = 0x47 },
{ .rfmax = 337000, .val = 0x48 },
{ .rfmax = 339000, .val = 0x49 },
{ .rfmax = 340000, .val = 0x4a },
{ .rfmax = 341000, .val = 0x4b },
{ .rfmax = 343000, .val = 0x4c },
{ .rfmax = 345000, .val = 0x4d },
{ .rfmax = 349000, .val = 0x4e },
{ .rfmax = 352000, .val = 0x4f },
{ .rfmax = 353000, .val = 0x50 },
{ .rfmax = 355000, .val = 0x51 },
{ .rfmax = 357000, .val = 0x52 },
{ .rfmax = 359000, .val = 0x53 },
{ .rfmax = 361000, .val = 0x54 },
{ .rfmax = 362000, .val = 0x55 },
{ .rfmax = 364000, .val = 0x56 },
{ .rfmax = 368000, .val = 0x57 },
{ .rfmax = 370000, .val = 0x58 },
{ .rfmax = 372000, .val = 0x59 },
{ .rfmax = 375000, .val = 0x5a },
{ .rfmax = 376000, .val = 0x5b },
{ .rfmax = 377000, .val = 0x5c },
{ .rfmax = 379000, .val = 0x5d },
{ .rfmax = 382000, .val = 0x5e },
{ .rfmax = 384000, .val = 0x5f },
{ .rfmax = 385000, .val = 0x60 },
{ .rfmax = 386000, .val = 0x61 },
{ .rfmax = 388000, .val = 0x62 },
{ .rfmax = 390000, .val = 0x63 },
{ .rfmax = 393000, .val = 0x64 },
{ .rfmax = 394000, .val = 0x65 },
{ .rfmax = 396000, .val = 0x66 },
{ .rfmax = 397000, .val = 0x67 },
{ .rfmax = 398000, .val = 0x68 },
{ .rfmax = 400000, .val = 0x69 },
{ .rfmax = 402000, .val = 0x6a },
{ .rfmax = 403000, .val = 0x6b },
{ .rfmax = 407000, .val = 0x6c },
{ .rfmax = 408000, .val = 0x6d },
{ .rfmax = 409000, .val = 0x6e },
{ .rfmax = 410000, .val = 0x6f },
{ .rfmax = 411000, .val = 0x70 },
{ .rfmax = 412000, .val = 0x71 },
{ .rfmax = 413000, .val = 0x72 },
{ .rfmax = 414000, .val = 0x73 },
{ .rfmax = 417000, .val = 0x74 },
{ .rfmax = 418000, .val = 0x75 },
{ .rfmax = 420000, .val = 0x76 },
{ .rfmax = 422000, .val = 0x77 },
{ .rfmax = 423000, .val = 0x78 },
{ .rfmax = 424000, .val = 0x79 },
{ .rfmax = 427000, .val = 0x7a },
{ .rfmax = 428000, .val = 0x7b },
{ .rfmax = 429000, .val = 0x7d },
{ .rfmax = 432000, .val = 0x7f },
{ .rfmax = 434000, .val = 0x80 },
{ .rfmax = 435000, .val = 0x81 },
{ .rfmax = 436000, .val = 0x83 },
{ .rfmax = 437000, .val = 0x84 },
{ .rfmax = 438000, .val = 0x85 },
{ .rfmax = 439000, .val = 0x86 },
{ .rfmax = 440000, .val = 0x87 },
{ .rfmax = 441000, .val = 0x88 },
{ .rfmax = 442000, .val = 0x89 },
{ .rfmax = 445000, .val = 0x8a },
{ .rfmax = 446000, .val = 0x8b },
{ .rfmax = 447000, .val = 0x8c },
{ .rfmax = 448000, .val = 0x8e },
{ .rfmax = 449000, .val = 0x8f },
{ .rfmax = 450000, .val = 0x90 },
{ .rfmax = 452000, .val = 0x91 },
{ .rfmax = 453000, .val = 0x93 },
{ .rfmax = 454000, .val = 0x94 },
{ .rfmax = 456000, .val = 0x96 },
{ .rfmax = 457800, .val = 0x98 },
{ .rfmax = 461000, .val = 0x11 },
{ .rfmax = 468000, .val = 0x12 },
{ .rfmax = 472000, .val = 0x13 },
{ .rfmax = 473000, .val = 0x14 },
{ .rfmax = 474000, .val = 0x15 },
{ .rfmax = 481000, .val = 0x16 },
{ .rfmax = 486000, .val = 0x17 },
{ .rfmax = 491000, .val = 0x18 },
{ .rfmax = 498000, .val = 0x19 },
{ .rfmax = 499000, .val = 0x1a },
{ .rfmax = 501000, .val = 0x1b },
{ .rfmax = 506000, .val = 0x1c },
{ .rfmax = 511000, .val = 0x1d },
{ .rfmax = 516000, .val = 0x1e },
{ .rfmax = 520000, .val = 0x1f },
{ .rfmax = 521000, .val = 0x20 },
{ .rfmax = 525000, .val = 0x21 },
{ .rfmax = 529000, .val = 0x22 },
{ .rfmax = 533000, .val = 0x23 },
{ .rfmax = 539000, .val = 0x24 },
{ .rfmax = 541000, .val = 0x25 },
{ .rfmax = 547000, .val = 0x26 },
{ .rfmax = 549000, .val = 0x27 },
{ .rfmax = 551000, .val = 0x28 },
{ .rfmax = 556000, .val = 0x29 },
{ .rfmax = 561000, .val = 0x2a },
{ .rfmax = 563000, .val = 0x2b },
{ .rfmax = 565000, .val = 0x2c },
{ .rfmax = 569000, .val = 0x2d },
{ .rfmax = 571000, .val = 0x2e },
{ .rfmax = 577000, .val = 0x2f },
{ .rfmax = 580000, .val = 0x30 },
{ .rfmax = 582000, .val = 0x31 },
{ .rfmax = 584000, .val = 0x32 },
{ .rfmax = 588000, .val = 0x33 },
{ .rfmax = 591000, .val = 0x34 },
{ .rfmax = 596000, .val = 0x35 },
{ .rfmax = 598000, .val = 0x36 },
{ .rfmax = 603000, .val = 0x37 },
{ .rfmax = 604000, .val = 0x38 },
{ .rfmax = 606000, .val = 0x39 },
{ .rfmax = 612000, .val = 0x3a },
{ .rfmax = 615000, .val = 0x3b },
{ .rfmax = 617000, .val = 0x3c },
{ .rfmax = 621000, .val = 0x3d },
{ .rfmax = 622000, .val = 0x3e },
{ .rfmax = 625000, .val = 0x3f },
{ .rfmax = 632000, .val = 0x40 },
{ .rfmax = 633000, .val = 0x41 },
{ .rfmax = 634000, .val = 0x42 },
{ .rfmax = 642000, .val = 0x43 },
{ .rfmax = 643000, .val = 0x44 },
{ .rfmax = 647000, .val = 0x45 },
{ .rfmax = 650000, .val = 0x46 },
{ .rfmax = 652000, .val = 0x47 },
{ .rfmax = 657000, .val = 0x48 },
{ .rfmax = 661000, .val = 0x49 },
{ .rfmax = 662000, .val = 0x4a },
{ .rfmax = 665000, .val = 0x4b },
{ .rfmax = 667000, .val = 0x4c },
{ .rfmax = 670000, .val = 0x4d },
{ .rfmax = 673000, .val = 0x4e },
{ .rfmax = 676000, .val = 0x4f },
{ .rfmax = 677000, .val = 0x50 },
{ .rfmax = 681000, .val = 0x51 },
{ .rfmax = 683000, .val = 0x52 },
{ .rfmax = 686000, .val = 0x53 },
{ .rfmax = 688000, .val = 0x54 },
{ .rfmax = 689000, .val = 0x55 },
{ .rfmax = 691000, .val = 0x56 },
{ .rfmax = 695000, .val = 0x57 },
{ .rfmax = 698000, .val = 0x58 },
{ .rfmax = 703000, .val = 0x59 },
{ .rfmax = 704000, .val = 0x5a },
{ .rfmax = 705000, .val = 0x5b },
{ .rfmax = 707000, .val = 0x5c },
{ .rfmax = 710000, .val = 0x5d },
{ .rfmax = 712000, .val = 0x5e },
{ .rfmax = 717000, .val = 0x5f },
{ .rfmax = 718000, .val = 0x60 },
{ .rfmax = 721000, .val = 0x61 },
{ .rfmax = 722000, .val = 0x62 },
{ .rfmax = 723000, .val = 0x63 },
{ .rfmax = 725000, .val = 0x64 },
{ .rfmax = 727000, .val = 0x65 },
{ .rfmax = 730000, .val = 0x66 },
{ .rfmax = 732000, .val = 0x67 },
{ .rfmax = 735000, .val = 0x68 },
{ .rfmax = 740000, .val = 0x69 },
{ .rfmax = 741000, .val = 0x6a },
{ .rfmax = 742000, .val = 0x6b },
{ .rfmax = 743000, .val = 0x6c },
{ .rfmax = 745000, .val = 0x6d },
{ .rfmax = 747000, .val = 0x6e },
{ .rfmax = 748000, .val = 0x6f },
{ .rfmax = 750000, .val = 0x70 },
{ .rfmax = 752000, .val = 0x71 },
{ .rfmax = 754000, .val = 0x72 },
{ .rfmax = 757000, .val = 0x73 },
{ .rfmax = 758000, .val = 0x74 },
{ .rfmax = 760000, .val = 0x75 },
{ .rfmax = 763000, .val = 0x76 },
{ .rfmax = 764000, .val = 0x77 },
{ .rfmax = 766000, .val = 0x78 },
{ .rfmax = 767000, .val = 0x79 },
{ .rfmax = 768000, .val = 0x7a },
{ .rfmax = 773000, .val = 0x7b },
{ .rfmax = 774000, .val = 0x7c },
{ .rfmax = 776000, .val = 0x7d },
{ .rfmax = 777000, .val = 0x7e },
{ .rfmax = 778000, .val = 0x7f },
{ .rfmax = 779000, .val = 0x80 },
{ .rfmax = 781000, .val = 0x81 },
{ .rfmax = 783000, .val = 0x82 },
{ .rfmax = 784000, .val = 0x83 },
{ .rfmax = 785000, .val = 0x84 },
{ .rfmax = 786000, .val = 0x85 },
{ .rfmax = 793000, .val = 0x86 },
{ .rfmax = 794000, .val = 0x87 },
{ .rfmax = 795000, .val = 0x88 },
{ .rfmax = 797000, .val = 0x89 },
{ .rfmax = 799000, .val = 0x8a },
{ .rfmax = 801000, .val = 0x8b },
{ .rfmax = 802000, .val = 0x8c },
{ .rfmax = 803000, .val = 0x8d },
{ .rfmax = 804000, .val = 0x8e },
{ .rfmax = 810000, .val = 0x90 },
{ .rfmax = 811000, .val = 0x91 },
{ .rfmax = 812000, .val = 0x92 },
{ .rfmax = 814000, .val = 0x93 },
{ .rfmax = 816000, .val = 0x94 },
{ .rfmax = 817000, .val = 0x96 },
{ .rfmax = 818000, .val = 0x97 },
{ .rfmax = 820000, .val = 0x98 },
{ .rfmax = 821000, .val = 0x99 },
{ .rfmax = 822000, .val = 0x9a },
{ .rfmax = 828000, .val = 0x9b },
{ .rfmax = 829000, .val = 0x9d },
{ .rfmax = 830000, .val = 0x9f },
{ .rfmax = 831000, .val = 0xa0 },
{ .rfmax = 833000, .val = 0xa1 },
{ .rfmax = 835000, .val = 0xa2 },
{ .rfmax = 836000, .val = 0xa3 },
{ .rfmax = 837000, .val = 0xa4 },
{ .rfmax = 838000, .val = 0xa6 },
{ .rfmax = 840000, .val = 0xa8 },
{ .rfmax = 842000, .val = 0xa9 },
{ .rfmax = 845000, .val = 0xaa },
{ .rfmax = 846000, .val = 0xab },
{ .rfmax = 847000, .val = 0xad },
{ .rfmax = 848000, .val = 0xae },
{ .rfmax = 852000, .val = 0xaf },
{ .rfmax = 853000, .val = 0xb0 },
{ .rfmax = 858000, .val = 0xb1 },
{ .rfmax = 860000, .val = 0xb2 },
{ .rfmax = 861000, .val = 0xb3 },
{ .rfmax = 862000, .val = 0xb4 },
{ .rfmax = 863000, .val = 0xb6 },
{ .rfmax = 864000, .val = 0xb8 },
{ .rfmax = 865000, .val = 0xb9 },
{ .rfmax = 0, .val = 0x00 }, /* end */
};
static struct tda18271_map tda18271_ir_measure[] = {
{ .rfmax = 30000, .val = 4 },
{ .rfmax = 200000, .val = 5 },
{ .rfmax = 600000, .val = 6 },
{ .rfmax = 865000, .val = 7 },
{ .rfmax = 0, .val = 0 }, /* end */
};
static struct tda18271_map tda18271_rf_cal_dc_over_dt[] = {
{ .rfmax = 47900, .val = 0x00 },
{ .rfmax = 55000, .val = 0x00 },
{ .rfmax = 61100, .val = 0x0a },
{ .rfmax = 64000, .val = 0x0a },
{ .rfmax = 82000, .val = 0x14 },
{ .rfmax = 84000, .val = 0x19 },
{ .rfmax = 119000, .val = 0x1c },
{ .rfmax = 124000, .val = 0x20 },
{ .rfmax = 129000, .val = 0x2a },
{ .rfmax = 134000, .val = 0x32 },
{ .rfmax = 139000, .val = 0x39 },
{ .rfmax = 144000, .val = 0x3e },
{ .rfmax = 149000, .val = 0x3f },
{ .rfmax = 152600, .val = 0x40 },
{ .rfmax = 154000, .val = 0x40 },
{ .rfmax = 164700, .val = 0x41 },
{ .rfmax = 203500, .val = 0x32 },
{ .rfmax = 353000, .val = 0x19 },
{ .rfmax = 356000, .val = 0x1a },
{ .rfmax = 359000, .val = 0x1b },
{ .rfmax = 363000, .val = 0x1c },
{ .rfmax = 366000, .val = 0x1d },
{ .rfmax = 369000, .val = 0x1e },
{ .rfmax = 373000, .val = 0x1f },
{ .rfmax = 376000, .val = 0x20 },
{ .rfmax = 379000, .val = 0x21 },
{ .rfmax = 383000, .val = 0x22 },
{ .rfmax = 386000, .val = 0x23 },
{ .rfmax = 389000, .val = 0x24 },
{ .rfmax = 393000, .val = 0x25 },
{ .rfmax = 396000, .val = 0x26 },
{ .rfmax = 399000, .val = 0x27 },
{ .rfmax = 402000, .val = 0x28 },
{ .rfmax = 404000, .val = 0x29 },
{ .rfmax = 407000, .val = 0x2a },
{ .rfmax = 409000, .val = 0x2b },
{ .rfmax = 412000, .val = 0x2c },
{ .rfmax = 414000, .val = 0x2d },
{ .rfmax = 417000, .val = 0x2e },
{ .rfmax = 419000, .val = 0x2f },
{ .rfmax = 422000, .val = 0x30 },
{ .rfmax = 424000, .val = 0x31 },
{ .rfmax = 427000, .val = 0x32 },
{ .rfmax = 429000, .val = 0x33 },
{ .rfmax = 432000, .val = 0x34 },
{ .rfmax = 434000, .val = 0x35 },
{ .rfmax = 437000, .val = 0x36 },
{ .rfmax = 439000, .val = 0x37 },
{ .rfmax = 442000, .val = 0x38 },
{ .rfmax = 444000, .val = 0x39 },
{ .rfmax = 447000, .val = 0x3a },
{ .rfmax = 449000, .val = 0x3b },
{ .rfmax = 457800, .val = 0x3c },
{ .rfmax = 465000, .val = 0x0f },
{ .rfmax = 477000, .val = 0x12 },
{ .rfmax = 483000, .val = 0x14 },
{ .rfmax = 502000, .val = 0x19 },
{ .rfmax = 508000, .val = 0x1b },
{ .rfmax = 519000, .val = 0x1c },
{ .rfmax = 522000, .val = 0x1d },
{ .rfmax = 524000, .val = 0x1e },
{ .rfmax = 534000, .val = 0x1f },
{ .rfmax = 549000, .val = 0x20 },
{ .rfmax = 554000, .val = 0x22 },
{ .rfmax = 584000, .val = 0x24 },
{ .rfmax = 589000, .val = 0x26 },
{ .rfmax = 658000, .val = 0x27 },
{ .rfmax = 664000, .val = 0x2c },
{ .rfmax = 669000, .val = 0x2d },
{ .rfmax = 699000, .val = 0x2e },
{ .rfmax = 704000, .val = 0x30 },
{ .rfmax = 709000, .val = 0x31 },
{ .rfmax = 714000, .val = 0x32 },
{ .rfmax = 724000, .val = 0x33 },
{ .rfmax = 729000, .val = 0x36 },
{ .rfmax = 739000, .val = 0x38 },
{ .rfmax = 744000, .val = 0x39 },
{ .rfmax = 749000, .val = 0x3b },
{ .rfmax = 754000, .val = 0x3c },
{ .rfmax = 759000, .val = 0x3d },
{ .rfmax = 764000, .val = 0x3e },
{ .rfmax = 769000, .val = 0x3f },
{ .rfmax = 774000, .val = 0x40 },
{ .rfmax = 779000, .val = 0x41 },
{ .rfmax = 784000, .val = 0x43 },
{ .rfmax = 789000, .val = 0x46 },
{ .rfmax = 794000, .val = 0x48 },
{ .rfmax = 799000, .val = 0x4b },
{ .rfmax = 804000, .val = 0x4f },
{ .rfmax = 809000, .val = 0x54 },
{ .rfmax = 814000, .val = 0x59 },
{ .rfmax = 819000, .val = 0x5d },
{ .rfmax = 824000, .val = 0x61 },
{ .rfmax = 829000, .val = 0x68 },
{ .rfmax = 834000, .val = 0x6e },
{ .rfmax = 839000, .val = 0x75 },
{ .rfmax = 844000, .val = 0x7e },
{ .rfmax = 849000, .val = 0x82 },
{ .rfmax = 854000, .val = 0x84 },
{ .rfmax = 859000, .val = 0x8f },
{ .rfmax = 865000, .val = 0x9a },
{ .rfmax = 0, .val = 0x00 }, /* end */
};
/*---------------------------------------------------------------------*/
struct tda18271_thermo_map {
u8 d;
u8 r0;
u8 r1;
};
static struct tda18271_thermo_map tda18271_thermometer[] = {
{ .d = 0x00, .r0 = 60, .r1 = 92 },
{ .d = 0x01, .r0 = 62, .r1 = 94 },
{ .d = 0x02, .r0 = 66, .r1 = 98 },
{ .d = 0x03, .r0 = 64, .r1 = 96 },
{ .d = 0x04, .r0 = 74, .r1 = 106 },
{ .d = 0x05, .r0 = 72, .r1 = 104 },
{ .d = 0x06, .r0 = 68, .r1 = 100 },
{ .d = 0x07, .r0 = 70, .r1 = 102 },
{ .d = 0x08, .r0 = 90, .r1 = 122 },
{ .d = 0x09, .r0 = 88, .r1 = 120 },
{ .d = 0x0a, .r0 = 84, .r1 = 116 },
{ .d = 0x0b, .r0 = 86, .r1 = 118 },
{ .d = 0x0c, .r0 = 76, .r1 = 108 },
{ .d = 0x0d, .r0 = 78, .r1 = 110 },
{ .d = 0x0e, .r0 = 82, .r1 = 114 },
{ .d = 0x0f, .r0 = 80, .r1 = 112 },
{ .d = 0x00, .r0 = 0, .r1 = 0 }, /* end */
};
int tda18271_lookup_thermometer(struct dvb_frontend *fe)
{
struct tda18271_priv *priv = fe->tuner_priv;
unsigned char *regs = priv->tda18271_regs;
int val, i = 0;
while (tda18271_thermometer[i].d < (regs[R_TM] & 0x0f)) {
if (tda18271_thermometer[i + 1].d == 0)
break;
i++;
}
if ((regs[R_TM] & 0x20) == 0x20)
val = tda18271_thermometer[i].r1;
else
val = tda18271_thermometer[i].r0;
tda_map("(%d) tm = %d\n", i, val);
return val;
}
/*---------------------------------------------------------------------*/
struct tda18271_cid_target_map {
u32 rfmax;
u8 target;
u16 limit;
};
static struct tda18271_cid_target_map tda18271_cid_target[] = {
{ .rfmax = 46000, .target = 0x04, .limit = 1800 },
{ .rfmax = 52200, .target = 0x0a, .limit = 1500 },
{ .rfmax = 70100, .target = 0x01, .limit = 4000 },
{ .rfmax = 136800, .target = 0x18, .limit = 4000 },
{ .rfmax = 156700, .target = 0x18, .limit = 4000 },
{ .rfmax = 186250, .target = 0x0a, .limit = 4000 },
{ .rfmax = 230000, .target = 0x0a, .limit = 4000 },
{ .rfmax = 345000, .target = 0x18, .limit = 4000 },
{ .rfmax = 426000, .target = 0x0e, .limit = 4000 },
{ .rfmax = 489500, .target = 0x1e, .limit = 4000 },
{ .rfmax = 697500, .target = 0x32, .limit = 4000 },
{ .rfmax = 842000, .target = 0x3a, .limit = 4000 },
{ .rfmax = 0, .target = 0x00, .limit = 0 }, /* end */
};
int tda18271_lookup_cid_target(struct dvb_frontend *fe,
u32 *freq, u8 *cid_target, u16 *count_limit)
{
struct tda18271_priv *priv = fe->tuner_priv;
int i = 0;
while ((tda18271_cid_target[i].rfmax * 1000) < *freq) {
if (tda18271_cid_target[i + 1].rfmax == 0)
break;
i++;
}
*cid_target = tda18271_cid_target[i].target;
*count_limit = tda18271_cid_target[i].limit;
tda_map("(%d) cid_target = %02x, count_limit = %d\n", i,
tda18271_cid_target[i].target, tda18271_cid_target[i].limit);
return 0;
}
/*---------------------------------------------------------------------*/
static struct tda18271_rf_tracking_filter_cal tda18271_rf_band_template[] = {
{ .rfmax = 47900, .rfband = 0x00,
.rf1_def = 46000, .rf2_def = 0, .rf3_def = 0 },
{ .rfmax = 61100, .rfband = 0x01,
.rf1_def = 52200, .rf2_def = 0, .rf3_def = 0 },
{ .rfmax = 152600, .rfband = 0x02,
.rf1_def = 70100, .rf2_def = 136800, .rf3_def = 0 },
{ .rfmax = 164700, .rfband = 0x03,
.rf1_def = 156700, .rf2_def = 0, .rf3_def = 0 },
{ .rfmax = 203500, .rfband = 0x04,
.rf1_def = 186250, .rf2_def = 0, .rf3_def = 0 },
{ .rfmax = 457800, .rfband = 0x05,
.rf1_def = 230000, .rf2_def = 345000, .rf3_def = 426000 },
{ .rfmax = 865000, .rfband = 0x06,
.rf1_def = 489500, .rf2_def = 697500, .rf3_def = 842000 },
{ .rfmax = 0, .rfband = 0x00,
.rf1_def = 0, .rf2_def = 0, .rf3_def = 0 }, /* end */
};
int tda18271_lookup_rf_band(struct dvb_frontend *fe, u32 *freq, u8 *rf_band)
{
struct tda18271_priv *priv = fe->tuner_priv;
struct tda18271_rf_tracking_filter_cal *map = priv->rf_cal_state;
int i = 0;
while ((map[i].rfmax * 1000) < *freq) {
if (tda18271_debug & DBG_ADV)
tda_map("(%d) rfmax = %d < freq = %d, "
"rf1_def = %d, rf2_def = %d, rf3_def = %d, "
"rf1 = %d, rf2 = %d, rf3 = %d, "
"rf_a1 = %d, rf_a2 = %d, "
"rf_b1 = %d, rf_b2 = %d\n",
i, map[i].rfmax * 1000, *freq,
map[i].rf1_def, map[i].rf2_def, map[i].rf3_def,
map[i].rf1, map[i].rf2, map[i].rf3,
map[i].rf_a1, map[i].rf_a2,
map[i].rf_b1, map[i].rf_b2);
if (map[i].rfmax == 0)
return -EINVAL;
i++;
}
if (rf_band)
*rf_band = map[i].rfband;
tda_map("(%d) rf_band = %02x\n", i, map[i].rfband);
return i;
}
/*---------------------------------------------------------------------*/
struct tda18271_map_layout {
struct tda18271_pll_map *main_pll;
struct tda18271_pll_map *cal_pll;
struct tda18271_map *rf_cal;
struct tda18271_map *rf_cal_kmco;
struct tda18271_map *rf_cal_dc_over_dt;
struct tda18271_map *bp_filter;
struct tda18271_map *rf_band;
struct tda18271_map *gain_taper;
struct tda18271_map *ir_measure;
};
/*---------------------------------------------------------------------*/
int tda18271_lookup_pll_map(struct dvb_frontend *fe,
enum tda18271_map_type map_type,
u32 *freq, u8 *post_div, u8 *div)
{
struct tda18271_priv *priv = fe->tuner_priv;
struct tda18271_pll_map *map = NULL;
unsigned int i = 0;
char *map_name;
int ret = 0;
BUG_ON(!priv->maps);
switch (map_type) {
case MAIN_PLL:
map = priv->maps->main_pll;
map_name = "main_pll";
break;
case CAL_PLL:
map = priv->maps->cal_pll;
map_name = "cal_pll";
break;
default:
/* we should never get here */
map_name = "undefined";
break;
}
if (!map) {
tda_warn("%s map is not set!\n", map_name);
ret = -EINVAL;
goto fail;
}
while ((map[i].lomax * 1000) < *freq) {
if (map[i + 1].lomax == 0) {
tda_map("%s: frequency (%d) out of range\n",
map_name, *freq);
ret = -ERANGE;
break;
}
i++;
}
*post_div = map[i].pd;
*div = map[i].d;
tda_map("(%d) %s: post div = 0x%02x, div = 0x%02x\n",
i, map_name, *post_div, *div);
fail:
return ret;
}
int tda18271_lookup_map(struct dvb_frontend *fe,
enum tda18271_map_type map_type,
u32 *freq, u8 *val)
{
struct tda18271_priv *priv = fe->tuner_priv;
struct tda18271_map *map = NULL;
unsigned int i = 0;
char *map_name;
int ret = 0;
BUG_ON(!priv->maps);
switch (map_type) {
case BP_FILTER:
map = priv->maps->bp_filter;
map_name = "bp_filter";
break;
case RF_CAL_KMCO:
map = priv->maps->rf_cal_kmco;
map_name = "km";
break;
case RF_BAND:
map = priv->maps->rf_band;
map_name = "rf_band";
break;
case GAIN_TAPER:
map = priv->maps->gain_taper;
map_name = "gain_taper";
break;
case RF_CAL:
map = priv->maps->rf_cal;
map_name = "rf_cal";
break;
case IR_MEASURE:
map = priv->maps->ir_measure;
map_name = "ir_measure";
break;
case RF_CAL_DC_OVER_DT:
map = priv->maps->rf_cal_dc_over_dt;
map_name = "rf_cal_dc_over_dt";
break;
default:
/* we should never get here */
map_name = "undefined";
break;
}
if (!map) {
tda_warn("%s map is not set!\n", map_name);
ret = -EINVAL;
goto fail;
}
while ((map[i].rfmax * 1000) < *freq) {
if (map[i + 1].rfmax == 0) {
tda_map("%s: frequency (%d) out of range\n",
map_name, *freq);
ret = -ERANGE;
break;
}
i++;
}
*val = map[i].val;
tda_map("(%d) %s: 0x%02x\n", i, map_name, *val);
fail:
return ret;
}
/*---------------------------------------------------------------------*/
static struct tda18271_std_map tda18271c1_std_map = {
.fm_radio = { .if_freq = 1250, .fm_rfn = 1, .agc_mode = 3, .std = 0,
.if_lvl = 0, .rfagc_top = 0x2c, }, /* EP3[4:0] 0x18 */
.atv_b = { .if_freq = 6750, .fm_rfn = 0, .agc_mode = 1, .std = 6,
.if_lvl = 0, .rfagc_top = 0x2c, }, /* EP3[4:0] 0x0e */
.atv_dk = { .if_freq = 7750, .fm_rfn = 0, .agc_mode = 1, .std = 7,
.if_lvl = 0, .rfagc_top = 0x2c, }, /* EP3[4:0] 0x0f */
.atv_gh = { .if_freq = 7750, .fm_rfn = 0, .agc_mode = 1, .std = 7,
.if_lvl = 0, .rfagc_top = 0x2c, }, /* EP3[4:0] 0x0f */
.atv_i = { .if_freq = 7750, .fm_rfn = 0, .agc_mode = 1, .std = 7,
.if_lvl = 0, .rfagc_top = 0x2c, }, /* EP3[4:0] 0x0f */
.atv_l = { .if_freq = 7750, .fm_rfn = 0, .agc_mode = 1, .std = 7,
.if_lvl = 0, .rfagc_top = 0x2c, }, /* EP3[4:0] 0x0f */
.atv_lc = { .if_freq = 1250, .fm_rfn = 0, .agc_mode = 1, .std = 7,
.if_lvl = 0, .rfagc_top = 0x2c, }, /* EP3[4:0] 0x0f */
.atv_mn = { .if_freq = 5750, .fm_rfn = 0, .agc_mode = 1, .std = 5,
.if_lvl = 0, .rfagc_top = 0x2c, }, /* EP3[4:0] 0x0d */
.atsc_6 = { .if_freq = 3250, .fm_rfn = 0, .agc_mode = 3, .std = 4,
.if_lvl = 1, .rfagc_top = 0x37, }, /* EP3[4:0] 0x1c */
.dvbt_6 = { .if_freq = 3300, .fm_rfn = 0, .agc_mode = 3, .std = 4,
.if_lvl = 1, .rfagc_top = 0x37, }, /* EP3[4:0] 0x1c */
.dvbt_7 = { .if_freq = 3800, .fm_rfn = 0, .agc_mode = 3, .std = 5,
.if_lvl = 1, .rfagc_top = 0x37, }, /* EP3[4:0] 0x1d */
.dvbt_8 = { .if_freq = 4300, .fm_rfn = 0, .agc_mode = 3, .std = 6,
.if_lvl = 1, .rfagc_top = 0x37, }, /* EP3[4:0] 0x1e */
.qam_6 = { .if_freq = 4000, .fm_rfn = 0, .agc_mode = 3, .std = 5,
.if_lvl = 1, .rfagc_top = 0x37, }, /* EP3[4:0] 0x1d */
.qam_7 = { .if_freq = 4500, .fm_rfn = 0, .agc_mode = 3, .std = 6,
.if_lvl = 1, .rfagc_top = 0x37, }, /* EP3[4:0] 0x1e */
.qam_8 = { .if_freq = 5000, .fm_rfn = 0, .agc_mode = 3, .std = 7,
.if_lvl = 1, .rfagc_top = 0x37, }, /* EP3[4:0] 0x1f */
};
static struct tda18271_std_map tda18271c2_std_map = {
.fm_radio = { .if_freq = 1250, .fm_rfn = 1, .agc_mode = 3, .std = 0,
.if_lvl = 0, .rfagc_top = 0x2c, }, /* EP3[4:0] 0x18 */
.atv_b = { .if_freq = 6000, .fm_rfn = 0, .agc_mode = 1, .std = 5,
.if_lvl = 0, .rfagc_top = 0x2c, }, /* EP3[4:0] 0x0d */
.atv_dk = { .if_freq = 6900, .fm_rfn = 0, .agc_mode = 1, .std = 6,
.if_lvl = 0, .rfagc_top = 0x2c, }, /* EP3[4:0] 0x0e */
.atv_gh = { .if_freq = 7100, .fm_rfn = 0, .agc_mode = 1, .std = 6,
.if_lvl = 0, .rfagc_top = 0x2c, }, /* EP3[4:0] 0x0e */
.atv_i = { .if_freq = 7250, .fm_rfn = 0, .agc_mode = 1, .std = 6,
.if_lvl = 0, .rfagc_top = 0x2c, }, /* EP3[4:0] 0x0e */
.atv_l = { .if_freq = 6900, .fm_rfn = 0, .agc_mode = 1, .std = 6,
.if_lvl = 0, .rfagc_top = 0x2c, }, /* EP3[4:0] 0x0e */
.atv_lc = { .if_freq = 1250, .fm_rfn = 0, .agc_mode = 1, .std = 6,
.if_lvl = 0, .rfagc_top = 0x2c, }, /* EP3[4:0] 0x0e */
.atv_mn = { .if_freq = 5400, .fm_rfn = 0, .agc_mode = 1, .std = 4,
.if_lvl = 0, .rfagc_top = 0x2c, }, /* EP3[4:0] 0x0c */
.atsc_6 = { .if_freq = 3250, .fm_rfn = 0, .agc_mode = 3, .std = 4,
.if_lvl = 1, .rfagc_top = 0x37, }, /* EP3[4:0] 0x1c */
.dvbt_6 = { .if_freq = 3300, .fm_rfn = 0, .agc_mode = 3, .std = 4,
.if_lvl = 1, .rfagc_top = 0x37, }, /* EP3[4:0] 0x1c */
.dvbt_7 = { .if_freq = 3500, .fm_rfn = 0, .agc_mode = 3, .std = 4,
.if_lvl = 1, .rfagc_top = 0x37, }, /* EP3[4:0] 0x1c */
.dvbt_8 = { .if_freq = 4000, .fm_rfn = 0, .agc_mode = 3, .std = 5,
.if_lvl = 1, .rfagc_top = 0x37, }, /* EP3[4:0] 0x1d */
.qam_6 = { .if_freq = 4000, .fm_rfn = 0, .agc_mode = 3, .std = 5,
.if_lvl = 1, .rfagc_top = 0x37, }, /* EP3[4:0] 0x1d */
.qam_7 = { .if_freq = 4500, .fm_rfn = 0, .agc_mode = 3, .std = 6,
.if_lvl = 1, .rfagc_top = 0x37, }, /* EP3[4:0] 0x1e */
.qam_8 = { .if_freq = 5000, .fm_rfn = 0, .agc_mode = 3, .std = 7,
.if_lvl = 1, .rfagc_top = 0x37, }, /* EP3[4:0] 0x1f */
};
/*---------------------------------------------------------------------*/
static struct tda18271_map_layout tda18271c1_map_layout = {
.main_pll = tda18271c1_main_pll,
.cal_pll = tda18271c1_cal_pll,
.rf_cal = tda18271c1_rf_cal,
.rf_cal_kmco = tda18271c1_km,
.bp_filter = tda18271_bp_filter,
.rf_band = tda18271_rf_band,
.gain_taper = tda18271_gain_taper,
.ir_measure = tda18271_ir_measure,
};
static struct tda18271_map_layout tda18271c2_map_layout = {
.main_pll = tda18271c2_main_pll,
.cal_pll = tda18271c2_cal_pll,
.rf_cal = tda18271c2_rf_cal,
.rf_cal_kmco = tda18271c2_km,
.rf_cal_dc_over_dt = tda18271_rf_cal_dc_over_dt,
.bp_filter = tda18271_bp_filter,
.rf_band = tda18271_rf_band,
.gain_taper = tda18271_gain_taper,
.ir_measure = tda18271_ir_measure,
};
int tda18271_assign_map_layout(struct dvb_frontend *fe)
{
struct tda18271_priv *priv = fe->tuner_priv;
int ret = 0;
switch (priv->id) {
case TDA18271HDC1:
priv->maps = &tda18271c1_map_layout;
priv->std = tda18271c1_std_map;
break;
case TDA18271HDC2:
priv->maps = &tda18271c2_map_layout;
priv->std = tda18271c2_std_map;
break;
default:
ret = -EINVAL;
break;
}
memcpy(priv->rf_cal_state, &tda18271_rf_band_template,
sizeof(tda18271_rf_band_template));
return ret;
}
| gpl-2.0 |
mialwe/midnight-i9100 | drivers/media/dvb/mantis/mantis_uart.c | 2456 | 4363 | /*
Mantis PCI bridge driver
Copyright (C) Manu Abraham (abraham.manu@gmail.com)
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/kernel.h>
#include <linux/spinlock.h>
#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/interrupt.h>
#include "dmxdev.h"
#include "dvbdev.h"
#include "dvb_demux.h"
#include "dvb_frontend.h"
#include "dvb_net.h"
#include "mantis_common.h"
#include "mantis_reg.h"
#include "mantis_uart.h"
struct mantis_uart_params {
enum mantis_baud baud_rate;
enum mantis_parity parity;
};
static struct {
char string[7];
} rates[5] = {
{ "9600" },
{ "19200" },
{ "38400" },
{ "57600" },
{ "115200" }
};
static struct {
char string[5];
} parity[3] = {
{ "NONE" },
{ "ODD" },
{ "EVEN" }
};
#define UART_MAX_BUF 16
int mantis_uart_read(struct mantis_pci *mantis, u8 *data)
{
struct mantis_hwconfig *config = mantis->hwconfig;
u32 stat = 0, i;
/* get data */
for (i = 0; i < (config->bytes + 1); i++) {
stat = mmread(MANTIS_UART_STAT);
if (stat & MANTIS_UART_RXFIFO_FULL) {
dprintk(MANTIS_ERROR, 1, "RX Fifo FULL");
}
data[i] = mmread(MANTIS_UART_RXD) & 0x3f;
dprintk(MANTIS_DEBUG, 1, "Reading ... <%02x>", data[i] & 0x3f);
if (data[i] & (1 << 7)) {
dprintk(MANTIS_ERROR, 1, "UART framing error");
return -EINVAL;
}
if (data[i] & (1 << 6)) {
dprintk(MANTIS_ERROR, 1, "UART parity error");
return -EINVAL;
}
}
return 0;
}
static void mantis_uart_work(struct work_struct *work)
{
struct mantis_pci *mantis = container_of(work, struct mantis_pci, uart_work);
struct mantis_hwconfig *config = mantis->hwconfig;
u8 buf[16];
int i;
mantis_uart_read(mantis, buf);
for (i = 0; i < (config->bytes + 1); i++)
dprintk(MANTIS_INFO, 1, "UART BUF:%d <%02x> ", i, buf[i]);
dprintk(MANTIS_DEBUG, 0, "\n");
}
static int mantis_uart_setup(struct mantis_pci *mantis,
struct mantis_uart_params *params)
{
u32 reg;
mmwrite((mmread(MANTIS_UART_CTL) | (params->parity & 0x3)), MANTIS_UART_CTL);
reg = mmread(MANTIS_UART_BAUD);
switch (params->baud_rate) {
case MANTIS_BAUD_9600:
reg |= 0xd8;
break;
case MANTIS_BAUD_19200:
reg |= 0x6c;
break;
case MANTIS_BAUD_38400:
reg |= 0x36;
break;
case MANTIS_BAUD_57600:
reg |= 0x23;
break;
case MANTIS_BAUD_115200:
reg |= 0x11;
break;
default:
return -EINVAL;
}
mmwrite(reg, MANTIS_UART_BAUD);
return 0;
}
int mantis_uart_init(struct mantis_pci *mantis)
{
struct mantis_hwconfig *config = mantis->hwconfig;
struct mantis_uart_params params;
/* default parity: */
params.baud_rate = config->baud_rate;
params.parity = config->parity;
dprintk(MANTIS_INFO, 1, "Initializing UART @ %sbps parity:%s",
rates[params.baud_rate].string,
parity[params.parity].string);
init_waitqueue_head(&mantis->uart_wq);
spin_lock_init(&mantis->uart_lock);
INIT_WORK(&mantis->uart_work, mantis_uart_work);
/* disable interrupt */
mmwrite(mmread(MANTIS_UART_CTL) & 0xffef, MANTIS_UART_CTL);
mantis_uart_setup(mantis, ¶ms);
/* default 1 byte */
mmwrite((mmread(MANTIS_UART_BAUD) | (config->bytes << 8)), MANTIS_UART_BAUD);
/* flush buffer */
mmwrite((mmread(MANTIS_UART_CTL) | MANTIS_UART_RXFLUSH), MANTIS_UART_CTL);
/* enable interrupt */
mmwrite(mmread(MANTIS_INT_MASK) | 0x800, MANTIS_INT_MASK);
mmwrite(mmread(MANTIS_UART_CTL) | MANTIS_UART_RXINT, MANTIS_UART_CTL);
schedule_work(&mantis->uart_work);
dprintk(MANTIS_DEBUG, 1, "UART successfully initialized");
return 0;
}
EXPORT_SYMBOL_GPL(mantis_uart_init);
void mantis_uart_exit(struct mantis_pci *mantis)
{
/* disable interrupt */
mmwrite(mmread(MANTIS_UART_CTL) & 0xffef, MANTIS_UART_CTL);
flush_work_sync(&mantis->uart_work);
}
EXPORT_SYMBOL_GPL(mantis_uart_exit);
| gpl-2.0 |
AndroidDeveloperAlliance/kernel_mapphone_kexec | net/rds/bind.c | 2712 | 5286 | /*
* Copyright (c) 2006 Oracle. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#include <linux/kernel.h>
#include <net/sock.h>
#include <linux/in.h>
#include <linux/if_arp.h>
#include <linux/jhash.h>
#include "rds.h"
#define BIND_HASH_SIZE 1024
static struct hlist_head bind_hash_table[BIND_HASH_SIZE];
static DEFINE_SPINLOCK(rds_bind_lock);
static struct hlist_head *hash_to_bucket(__be32 addr, __be16 port)
{
return bind_hash_table + (jhash_2words((u32)addr, (u32)port, 0) &
(BIND_HASH_SIZE - 1));
}
static struct rds_sock *rds_bind_lookup(__be32 addr, __be16 port,
struct rds_sock *insert)
{
struct rds_sock *rs;
struct hlist_node *node;
struct hlist_head *head = hash_to_bucket(addr, port);
u64 cmp;
u64 needle = ((u64)be32_to_cpu(addr) << 32) | be16_to_cpu(port);
rcu_read_lock();
hlist_for_each_entry_rcu(rs, node, head, rs_bound_node) {
cmp = ((u64)be32_to_cpu(rs->rs_bound_addr) << 32) |
be16_to_cpu(rs->rs_bound_port);
if (cmp == needle) {
rcu_read_unlock();
return rs;
}
}
rcu_read_unlock();
if (insert) {
/*
* make sure our addr and port are set before
* we are added to the list, other people
* in rcu will find us as soon as the
* hlist_add_head_rcu is done
*/
insert->rs_bound_addr = addr;
insert->rs_bound_port = port;
rds_sock_addref(insert);
hlist_add_head_rcu(&insert->rs_bound_node, head);
}
return NULL;
}
/*
* Return the rds_sock bound at the given local address.
*
* The rx path can race with rds_release. We notice if rds_release() has
* marked this socket and don't return a rs ref to the rx path.
*/
struct rds_sock *rds_find_bound(__be32 addr, __be16 port)
{
struct rds_sock *rs;
rs = rds_bind_lookup(addr, port, NULL);
if (rs && !sock_flag(rds_rs_to_sk(rs), SOCK_DEAD))
rds_sock_addref(rs);
else
rs = NULL;
rdsdebug("returning rs %p for %pI4:%u\n", rs, &addr,
ntohs(port));
return rs;
}
/* returns -ve errno or +ve port */
static int rds_add_bound(struct rds_sock *rs, __be32 addr, __be16 *port)
{
unsigned long flags;
int ret = -EADDRINUSE;
u16 rover, last;
if (*port != 0) {
rover = be16_to_cpu(*port);
last = rover;
} else {
rover = max_t(u16, net_random(), 2);
last = rover - 1;
}
spin_lock_irqsave(&rds_bind_lock, flags);
do {
if (rover == 0)
rover++;
if (!rds_bind_lookup(addr, cpu_to_be16(rover), rs)) {
*port = rs->rs_bound_port;
ret = 0;
rdsdebug("rs %p binding to %pI4:%d\n",
rs, &addr, (int)ntohs(*port));
break;
}
} while (rover++ != last);
spin_unlock_irqrestore(&rds_bind_lock, flags);
return ret;
}
void rds_remove_bound(struct rds_sock *rs)
{
unsigned long flags;
spin_lock_irqsave(&rds_bind_lock, flags);
if (rs->rs_bound_addr) {
rdsdebug("rs %p unbinding from %pI4:%d\n",
rs, &rs->rs_bound_addr,
ntohs(rs->rs_bound_port));
hlist_del_init_rcu(&rs->rs_bound_node);
rds_sock_put(rs);
rs->rs_bound_addr = 0;
}
spin_unlock_irqrestore(&rds_bind_lock, flags);
}
int rds_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
{
struct sock *sk = sock->sk;
struct sockaddr_in *sin = (struct sockaddr_in *)uaddr;
struct rds_sock *rs = rds_sk_to_rs(sk);
struct rds_transport *trans;
int ret = 0;
lock_sock(sk);
if (addr_len != sizeof(struct sockaddr_in) ||
sin->sin_family != AF_INET ||
rs->rs_bound_addr ||
sin->sin_addr.s_addr == htonl(INADDR_ANY)) {
ret = -EINVAL;
goto out;
}
ret = rds_add_bound(rs, sin->sin_addr.s_addr, &sin->sin_port);
if (ret)
goto out;
trans = rds_trans_get_preferred(sin->sin_addr.s_addr);
if (!trans) {
ret = -EADDRNOTAVAIL;
rds_remove_bound(rs);
if (printk_ratelimit())
printk(KERN_INFO "RDS: rds_bind() could not find a transport, "
"load rds_tcp or rds_rdma?\n");
goto out;
}
rs->rs_transport = trans;
ret = 0;
out:
release_sock(sk);
/* we might have called rds_remove_bound on error */
if (ret)
synchronize_rcu();
return ret;
}
| gpl-2.0 |
KlinkOnE/android_kernel_ba2x_2.0-1 | drivers/infiniband/hw/qib/qib_sd7220.c | 3992 | 40725 | /*
* Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation.
* All rights reserved.
* Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
/*
* This file contains all of the code that is specific to the SerDes
* on the QLogic_IB 7220 chip.
*/
#include <linux/pci.h>
#include <linux/delay.h>
#include <linux/firmware.h>
#include "qib.h"
#include "qib_7220.h"
#define SD7220_FW_NAME "qlogic/sd7220.fw"
MODULE_FIRMWARE(SD7220_FW_NAME);
/*
* Same as in qib_iba7220.c, but just the registers needed here.
* Could move whole set to qib_7220.h, but decided better to keep
* local.
*/
#define KREG_IDX(regname) (QIB_7220_##regname##_OFFS / sizeof(u64))
#define kr_hwerrclear KREG_IDX(HwErrClear)
#define kr_hwerrmask KREG_IDX(HwErrMask)
#define kr_hwerrstatus KREG_IDX(HwErrStatus)
#define kr_ibcstatus KREG_IDX(IBCStatus)
#define kr_ibserdesctrl KREG_IDX(IBSerDesCtrl)
#define kr_scratch KREG_IDX(Scratch)
#define kr_xgxs_cfg KREG_IDX(XGXSCfg)
/* these are used only here, not in qib_iba7220.c */
#define kr_ibsd_epb_access_ctrl KREG_IDX(ibsd_epb_access_ctrl)
#define kr_ibsd_epb_transaction_reg KREG_IDX(ibsd_epb_transaction_reg)
#define kr_pciesd_epb_transaction_reg KREG_IDX(pciesd_epb_transaction_reg)
#define kr_pciesd_epb_access_ctrl KREG_IDX(pciesd_epb_access_ctrl)
#define kr_serdes_ddsrxeq0 KREG_IDX(SerDes_DDSRXEQ0)
/*
* The IBSerDesMappTable is a memory that holds values to be stored in
* various SerDes registers by IBC.
*/
#define kr_serdes_maptable KREG_IDX(IBSerDesMappTable)
/*
* Below used for sdnum parameter, selecting one of the two sections
* used for PCIe, or the single SerDes used for IB.
*/
#define PCIE_SERDES0 0
#define PCIE_SERDES1 1
/*
* The EPB requires addressing in a particular form. EPB_LOC() is intended
* to make #definitions a little more readable.
*/
#define EPB_ADDR_SHF 8
#define EPB_LOC(chn, elt, reg) \
(((elt & 0xf) | ((chn & 7) << 4) | ((reg & 0x3f) << 9)) << \
EPB_ADDR_SHF)
#define EPB_IB_QUAD0_CS_SHF (25)
#define EPB_IB_QUAD0_CS (1U << EPB_IB_QUAD0_CS_SHF)
#define EPB_IB_UC_CS_SHF (26)
#define EPB_PCIE_UC_CS_SHF (27)
#define EPB_GLOBAL_WR (1U << (EPB_ADDR_SHF + 8))
/* Forward declarations. */
static int qib_sd7220_reg_mod(struct qib_devdata *dd, int sdnum, u32 loc,
u32 data, u32 mask);
static int ibsd_mod_allchnls(struct qib_devdata *dd, int loc, int val,
int mask);
static int qib_sd_trimdone_poll(struct qib_devdata *dd);
static void qib_sd_trimdone_monitor(struct qib_devdata *dd, const char *where);
static int qib_sd_setvals(struct qib_devdata *dd);
static int qib_sd_early(struct qib_devdata *dd);
static int qib_sd_dactrim(struct qib_devdata *dd);
static int qib_internal_presets(struct qib_devdata *dd);
/* Tweak the register (CMUCTRL5) that contains the TRIMSELF controls */
static int qib_sd_trimself(struct qib_devdata *dd, int val);
static int epb_access(struct qib_devdata *dd, int sdnum, int claim);
static int qib_sd7220_ib_load(struct qib_devdata *dd,
const struct firmware *fw);
static int qib_sd7220_ib_vfy(struct qib_devdata *dd,
const struct firmware *fw);
/*
* Below keeps track of whether the "once per power-on" initialization has
* been done, because uC code Version 1.32.17 or higher allows the uC to
* be reset at will, and Automatic Equalization may require it. So the
* state of the reset "pin", is no longer valid. Instead, we check for the
* actual uC code having been loaded.
*/
static int qib_ibsd_ucode_loaded(struct qib_pportdata *ppd,
const struct firmware *fw)
{
struct qib_devdata *dd = ppd->dd;
if (!dd->cspec->serdes_first_init_done &&
qib_sd7220_ib_vfy(dd, fw) > 0)
dd->cspec->serdes_first_init_done = 1;
return dd->cspec->serdes_first_init_done;
}
/* repeat #define for local use. "Real" #define is in qib_iba7220.c */
#define QLOGIC_IB_HWE_IB_UC_MEMORYPARITYERR 0x0000004000000000ULL
#define IB_MPREG5 (EPB_LOC(6, 0, 0xE) | (1L << EPB_IB_UC_CS_SHF))
#define IB_MPREG6 (EPB_LOC(6, 0, 0xF) | (1U << EPB_IB_UC_CS_SHF))
#define UC_PAR_CLR_D 8
#define UC_PAR_CLR_M 0xC
#define IB_CTRL2(chn) (EPB_LOC(chn, 7, 3) | EPB_IB_QUAD0_CS)
#define START_EQ1(chan) EPB_LOC(chan, 7, 0x27)
void qib_sd7220_clr_ibpar(struct qib_devdata *dd)
{
int ret;
/* clear, then re-enable parity errs */
ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, IB_MPREG6,
UC_PAR_CLR_D, UC_PAR_CLR_M);
if (ret < 0) {
qib_dev_err(dd, "Failed clearing IBSerDes Parity err\n");
goto bail;
}
ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, IB_MPREG6, 0,
UC_PAR_CLR_M);
qib_read_kreg32(dd, kr_scratch);
udelay(4);
qib_write_kreg(dd, kr_hwerrclear,
QLOGIC_IB_HWE_IB_UC_MEMORYPARITYERR);
qib_read_kreg32(dd, kr_scratch);
bail:
return;
}
/*
* After a reset or other unusual event, the epb interface may need
* to be re-synchronized, between the host and the uC.
* returns <0 for failure to resync within IBSD_RESYNC_TRIES (not expected)
*/
#define IBSD_RESYNC_TRIES 3
#define IB_PGUDP(chn) (EPB_LOC((chn), 2, 1) | EPB_IB_QUAD0_CS)
#define IB_CMUDONE(chn) (EPB_LOC((chn), 7, 0xF) | EPB_IB_QUAD0_CS)
static int qib_resync_ibepb(struct qib_devdata *dd)
{
int ret, pat, tries, chn;
u32 loc;
ret = -1;
chn = 0;
for (tries = 0; tries < (4 * IBSD_RESYNC_TRIES); ++tries) {
loc = IB_PGUDP(chn);
ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, loc, 0, 0);
if (ret < 0) {
qib_dev_err(dd, "Failed read in resync\n");
continue;
}
if (ret != 0xF0 && ret != 0x55 && tries == 0)
qib_dev_err(dd, "unexpected pattern in resync\n");
pat = ret ^ 0xA5; /* alternate F0 and 55 */
ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, loc, pat, 0xFF);
if (ret < 0) {
qib_dev_err(dd, "Failed write in resync\n");
continue;
}
ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, loc, 0, 0);
if (ret < 0) {
qib_dev_err(dd, "Failed re-read in resync\n");
continue;
}
if (ret != pat) {
qib_dev_err(dd, "Failed compare1 in resync\n");
continue;
}
loc = IB_CMUDONE(chn);
ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, loc, 0, 0);
if (ret < 0) {
qib_dev_err(dd, "Failed CMUDONE rd in resync\n");
continue;
}
if ((ret & 0x70) != ((chn << 4) | 0x40)) {
qib_dev_err(dd, "Bad CMUDONE value %02X, chn %d\n",
ret, chn);
continue;
}
if (++chn == 4)
break; /* Success */
}
return (ret > 0) ? 0 : ret;
}
/*
* Localize the stuff that should be done to change IB uC reset
* returns <0 for errors.
*/
static int qib_ibsd_reset(struct qib_devdata *dd, int assert_rst)
{
u64 rst_val;
int ret = 0;
unsigned long flags;
rst_val = qib_read_kreg64(dd, kr_ibserdesctrl);
if (assert_rst) {
/*
* Vendor recommends "interrupting" uC before reset, to
* minimize possible glitches.
*/
spin_lock_irqsave(&dd->cspec->sdepb_lock, flags);
epb_access(dd, IB_7220_SERDES, 1);
rst_val |= 1ULL;
/* Squelch possible parity error from _asserting_ reset */
qib_write_kreg(dd, kr_hwerrmask,
dd->cspec->hwerrmask &
~QLOGIC_IB_HWE_IB_UC_MEMORYPARITYERR);
qib_write_kreg(dd, kr_ibserdesctrl, rst_val);
/* flush write, delay to ensure it took effect */
qib_read_kreg32(dd, kr_scratch);
udelay(2);
/* once it's reset, can remove interrupt */
epb_access(dd, IB_7220_SERDES, -1);
spin_unlock_irqrestore(&dd->cspec->sdepb_lock, flags);
} else {
/*
* Before we de-assert reset, we need to deal with
* possible glitch on the Parity-error line.
* Suppress it around the reset, both in chip-level
* hwerrmask and in IB uC control reg. uC will allow
* it again during startup.
*/
u64 val;
rst_val &= ~(1ULL);
qib_write_kreg(dd, kr_hwerrmask,
dd->cspec->hwerrmask &
~QLOGIC_IB_HWE_IB_UC_MEMORYPARITYERR);
ret = qib_resync_ibepb(dd);
if (ret < 0)
qib_dev_err(dd, "unable to re-sync IB EPB\n");
/* set uC control regs to suppress parity errs */
ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, IB_MPREG5, 1, 1);
if (ret < 0)
goto bail;
/* IB uC code past Version 1.32.17 allow suppression of wdog */
ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, IB_MPREG6, 0x80,
0x80);
if (ret < 0) {
qib_dev_err(dd, "Failed to set WDOG disable\n");
goto bail;
}
qib_write_kreg(dd, kr_ibserdesctrl, rst_val);
/* flush write, delay for startup */
qib_read_kreg32(dd, kr_scratch);
udelay(1);
/* clear, then re-enable parity errs */
qib_sd7220_clr_ibpar(dd);
val = qib_read_kreg64(dd, kr_hwerrstatus);
if (val & QLOGIC_IB_HWE_IB_UC_MEMORYPARITYERR) {
qib_dev_err(dd, "IBUC Parity still set after RST\n");
dd->cspec->hwerrmask &=
~QLOGIC_IB_HWE_IB_UC_MEMORYPARITYERR;
}
qib_write_kreg(dd, kr_hwerrmask,
dd->cspec->hwerrmask);
}
bail:
return ret;
}
static void qib_sd_trimdone_monitor(struct qib_devdata *dd,
const char *where)
{
int ret, chn, baduns;
u64 val;
if (!where)
where = "?";
/* give time for reset to settle out in EPB */
udelay(2);
ret = qib_resync_ibepb(dd);
if (ret < 0)
qib_dev_err(dd, "not able to re-sync IB EPB (%s)\n", where);
/* Do "sacrificial read" to get EPB in sane state after reset */
ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, IB_CTRL2(0), 0, 0);
if (ret < 0)
qib_dev_err(dd, "Failed TRIMDONE 1st read, (%s)\n", where);
/* Check/show "summary" Trim-done bit in IBCStatus */
val = qib_read_kreg64(dd, kr_ibcstatus);
if (!(val & (1ULL << 11)))
qib_dev_err(dd, "IBCS TRIMDONE clear (%s)\n", where);
/*
* Do "dummy read/mod/wr" to get EPB in sane state after reset
* The default value for MPREG6 is 0.
*/
udelay(2);
ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, IB_MPREG6, 0x80, 0x80);
if (ret < 0)
qib_dev_err(dd, "Failed Dummy RMW, (%s)\n", where);
udelay(10);
baduns = 0;
for (chn = 3; chn >= 0; --chn) {
/* Read CTRL reg for each channel to check TRIMDONE */
ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES,
IB_CTRL2(chn), 0, 0);
if (ret < 0)
qib_dev_err(dd, "Failed checking TRIMDONE, chn %d"
" (%s)\n", chn, where);
if (!(ret & 0x10)) {
int probe;
baduns |= (1 << chn);
qib_dev_err(dd, "TRIMDONE cleared on chn %d (%02X)."
" (%s)\n", chn, ret, where);
probe = qib_sd7220_reg_mod(dd, IB_7220_SERDES,
IB_PGUDP(0), 0, 0);
qib_dev_err(dd, "probe is %d (%02X)\n",
probe, probe);
probe = qib_sd7220_reg_mod(dd, IB_7220_SERDES,
IB_CTRL2(chn), 0, 0);
qib_dev_err(dd, "re-read: %d (%02X)\n",
probe, probe);
ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES,
IB_CTRL2(chn), 0x10, 0x10);
if (ret < 0)
qib_dev_err(dd,
"Err on TRIMDONE rewrite1\n");
}
}
for (chn = 3; chn >= 0; --chn) {
/* Read CTRL reg for each channel to check TRIMDONE */
if (baduns & (1 << chn)) {
qib_dev_err(dd,
"Reseting TRIMDONE on chn %d (%s)\n",
chn, where);
ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES,
IB_CTRL2(chn), 0x10, 0x10);
if (ret < 0)
qib_dev_err(dd, "Failed re-setting "
"TRIMDONE, chn %d (%s)\n",
chn, where);
}
}
}
/*
* Below is portion of IBA7220-specific bringup_serdes() that actually
* deals with registers and memory within the SerDes itself.
* Post IB uC code version 1.32.17, was_reset being 1 is not really
* informative, so we double-check.
*/
int qib_sd7220_init(struct qib_devdata *dd)
{
const struct firmware *fw;
int ret = 1; /* default to failure */
int first_reset, was_reset;
/* SERDES MPU reset recorded in D0 */
was_reset = (qib_read_kreg64(dd, kr_ibserdesctrl) & 1);
if (!was_reset) {
/* entered with reset not asserted, we need to do it */
qib_ibsd_reset(dd, 1);
qib_sd_trimdone_monitor(dd, "Driver-reload");
}
ret = request_firmware(&fw, SD7220_FW_NAME, &dd->pcidev->dev);
if (ret) {
qib_dev_err(dd, "Failed to load IB SERDES image\n");
goto done;
}
/* Substitute our deduced value for was_reset */
ret = qib_ibsd_ucode_loaded(dd->pport, fw);
if (ret < 0)
goto bail;
first_reset = !ret; /* First reset if IBSD uCode not yet loaded */
/*
* Alter some regs per vendor latest doc, reset-defaults
* are not right for IB.
*/
ret = qib_sd_early(dd);
if (ret < 0) {
qib_dev_err(dd, "Failed to set IB SERDES early defaults\n");
goto bail;
}
/*
* Set DAC manual trim IB.
* We only do this once after chip has been reset (usually
* same as once per system boot).
*/
if (first_reset) {
ret = qib_sd_dactrim(dd);
if (ret < 0) {
qib_dev_err(dd, "Failed IB SERDES DAC trim\n");
goto bail;
}
}
/*
* Set various registers (DDS and RXEQ) that will be
* controlled by IBC (in 1.2 mode) to reasonable preset values
* Calling the "internal" version avoids the "check for needed"
* and "trimdone monitor" that might be counter-productive.
*/
ret = qib_internal_presets(dd);
if (ret < 0) {
qib_dev_err(dd, "Failed to set IB SERDES presets\n");
goto bail;
}
ret = qib_sd_trimself(dd, 0x80);
if (ret < 0) {
qib_dev_err(dd, "Failed to set IB SERDES TRIMSELF\n");
goto bail;
}
/* Load image, then try to verify */
ret = 0; /* Assume success */
if (first_reset) {
int vfy;
int trim_done;
ret = qib_sd7220_ib_load(dd, fw);
if (ret < 0) {
qib_dev_err(dd, "Failed to load IB SERDES image\n");
goto bail;
} else {
/* Loaded image, try to verify */
vfy = qib_sd7220_ib_vfy(dd, fw);
if (vfy != ret) {
qib_dev_err(dd, "SERDES PRAM VFY failed\n");
goto bail;
} /* end if verified */
} /* end if loaded */
/*
* Loaded and verified. Almost good...
* hold "success" in ret
*/
ret = 0;
/*
* Prev steps all worked, continue bringup
* De-assert RESET to uC, only in first reset, to allow
* trimming.
*
* Since our default setup sets START_EQ1 to
* PRESET, we need to clear that for this very first run.
*/
ret = ibsd_mod_allchnls(dd, START_EQ1(0), 0, 0x38);
if (ret < 0) {
qib_dev_err(dd, "Failed clearing START_EQ1\n");
goto bail;
}
qib_ibsd_reset(dd, 0);
/*
* If this is not the first reset, trimdone should be set
* already. We may need to check about this.
*/
trim_done = qib_sd_trimdone_poll(dd);
/*
* Whether or not trimdone succeeded, we need to put the
* uC back into reset to avoid a possible fight with the
* IBC state-machine.
*/
qib_ibsd_reset(dd, 1);
if (!trim_done) {
qib_dev_err(dd, "No TRIMDONE seen\n");
goto bail;
}
/*
* DEBUG: check each time we reset if trimdone bits have
* gotten cleared, and re-set them.
*/
qib_sd_trimdone_monitor(dd, "First-reset");
/* Remember so we do not re-do the load, dactrim, etc. */
dd->cspec->serdes_first_init_done = 1;
}
/*
* setup for channel training and load values for
* RxEq and DDS in tables used by IBC in IB1.2 mode
*/
ret = 0;
if (qib_sd_setvals(dd) >= 0)
goto done;
bail:
ret = 1;
done:
/* start relock timer regardless, but start at 1 second */
set_7220_relock_poll(dd, -1);
release_firmware(fw);
return ret;
}
#define EPB_ACC_REQ 1
#define EPB_ACC_GNT 0x100
#define EPB_DATA_MASK 0xFF
#define EPB_RD (1ULL << 24)
#define EPB_TRANS_RDY (1ULL << 31)
#define EPB_TRANS_ERR (1ULL << 30)
#define EPB_TRANS_TRIES 5
/*
* query, claim, release ownership of the EPB (External Parallel Bus)
* for a specified SERDES.
* the "claim" parameter is >0 to claim, <0 to release, 0 to query.
* Returns <0 for errors, >0 if we had ownership, else 0.
*/
static int epb_access(struct qib_devdata *dd, int sdnum, int claim)
{
u16 acc;
u64 accval;
int owned = 0;
u64 oct_sel = 0;
switch (sdnum) {
case IB_7220_SERDES:
/*
* The IB SERDES "ownership" is fairly simple. A single each
* request/grant.
*/
acc = kr_ibsd_epb_access_ctrl;
break;
case PCIE_SERDES0:
case PCIE_SERDES1:
/* PCIe SERDES has two "octants", need to select which */
acc = kr_pciesd_epb_access_ctrl;
oct_sel = (2 << (sdnum - PCIE_SERDES0));
break;
default:
return 0;
}
/* Make sure any outstanding transaction was seen */
qib_read_kreg32(dd, kr_scratch);
udelay(15);
accval = qib_read_kreg32(dd, acc);
owned = !!(accval & EPB_ACC_GNT);
if (claim < 0) {
/* Need to release */
u64 pollval;
/*
* The only writeable bits are the request and CS.
* Both should be clear
*/
u64 newval = 0;
qib_write_kreg(dd, acc, newval);
/* First read after write is not trustworthy */
pollval = qib_read_kreg32(dd, acc);
udelay(5);
pollval = qib_read_kreg32(dd, acc);
if (pollval & EPB_ACC_GNT)
owned = -1;
} else if (claim > 0) {
/* Need to claim */
u64 pollval;
u64 newval = EPB_ACC_REQ | oct_sel;
qib_write_kreg(dd, acc, newval);
/* First read after write is not trustworthy */
pollval = qib_read_kreg32(dd, acc);
udelay(5);
pollval = qib_read_kreg32(dd, acc);
if (!(pollval & EPB_ACC_GNT))
owned = -1;
}
return owned;
}
/*
* Lemma to deal with race condition of write..read to epb regs
*/
static int epb_trans(struct qib_devdata *dd, u16 reg, u64 i_val, u64 *o_vp)
{
int tries;
u64 transval;
qib_write_kreg(dd, reg, i_val);
/* Throw away first read, as RDY bit may be stale */
transval = qib_read_kreg64(dd, reg);
for (tries = EPB_TRANS_TRIES; tries; --tries) {
transval = qib_read_kreg32(dd, reg);
if (transval & EPB_TRANS_RDY)
break;
udelay(5);
}
if (transval & EPB_TRANS_ERR)
return -1;
if (tries > 0 && o_vp)
*o_vp = transval;
return tries;
}
/**
* qib_sd7220_reg_mod - modify SERDES register
* @dd: the qlogic_ib device
* @sdnum: which SERDES to access
* @loc: location - channel, element, register, as packed by EPB_LOC() macro.
* @wd: Write Data - value to set in register
* @mask: ones where data should be spliced into reg.
*
* Basic register read/modify/write, with un-needed acesses elided. That is,
* a mask of zero will prevent write, while a mask of 0xFF will prevent read.
* returns current (presumed, if a write was done) contents of selected
* register, or <0 if errors.
*/
static int qib_sd7220_reg_mod(struct qib_devdata *dd, int sdnum, u32 loc,
u32 wd, u32 mask)
{
u16 trans;
u64 transval;
int owned;
int tries, ret;
unsigned long flags;
switch (sdnum) {
case IB_7220_SERDES:
trans = kr_ibsd_epb_transaction_reg;
break;
case PCIE_SERDES0:
case PCIE_SERDES1:
trans = kr_pciesd_epb_transaction_reg;
break;
default:
return -1;
}
/*
* All access is locked in software (vs other host threads) and
* hardware (vs uC access).
*/
spin_lock_irqsave(&dd->cspec->sdepb_lock, flags);
owned = epb_access(dd, sdnum, 1);
if (owned < 0) {
spin_unlock_irqrestore(&dd->cspec->sdepb_lock, flags);
return -1;
}
ret = 0;
for (tries = EPB_TRANS_TRIES; tries; --tries) {
transval = qib_read_kreg32(dd, trans);
if (transval & EPB_TRANS_RDY)
break;
udelay(5);
}
if (tries > 0) {
tries = 1; /* to make read-skip work */
if (mask != 0xFF) {
/*
* Not a pure write, so need to read.
* loc encodes chip-select as well as address
*/
transval = loc | EPB_RD;
tries = epb_trans(dd, trans, transval, &transval);
}
if (tries > 0 && mask != 0) {
/*
* Not a pure read, so need to write.
*/
wd = (wd & mask) | (transval & ~mask);
transval = loc | (wd & EPB_DATA_MASK);
tries = epb_trans(dd, trans, transval, &transval);
}
}
/* else, failed to see ready, what error-handling? */
/*
* Release bus. Failure is an error.
*/
if (epb_access(dd, sdnum, -1) < 0)
ret = -1;
else
ret = transval & EPB_DATA_MASK;
spin_unlock_irqrestore(&dd->cspec->sdepb_lock, flags);
if (tries <= 0)
ret = -1;
return ret;
}
#define EPB_ROM_R (2)
#define EPB_ROM_W (1)
/*
* Below, all uC-related, use appropriate UC_CS, depending
* on which SerDes is used.
*/
#define EPB_UC_CTL EPB_LOC(6, 0, 0)
#define EPB_MADDRL EPB_LOC(6, 0, 2)
#define EPB_MADDRH EPB_LOC(6, 0, 3)
#define EPB_ROMDATA EPB_LOC(6, 0, 4)
#define EPB_RAMDATA EPB_LOC(6, 0, 5)
/* Transfer date to/from uC Program RAM of IB or PCIe SerDes */
static int qib_sd7220_ram_xfer(struct qib_devdata *dd, int sdnum, u32 loc,
u8 *buf, int cnt, int rd_notwr)
{
u16 trans;
u64 transval;
u64 csbit;
int owned;
int tries;
int sofar;
int addr;
int ret;
unsigned long flags;
const char *op;
/* Pick appropriate transaction reg and "Chip select" for this serdes */
switch (sdnum) {
case IB_7220_SERDES:
csbit = 1ULL << EPB_IB_UC_CS_SHF;
trans = kr_ibsd_epb_transaction_reg;
break;
case PCIE_SERDES0:
case PCIE_SERDES1:
/* PCIe SERDES has uC "chip select" in different bit, too */
csbit = 1ULL << EPB_PCIE_UC_CS_SHF;
trans = kr_pciesd_epb_transaction_reg;
break;
default:
return -1;
}
op = rd_notwr ? "Rd" : "Wr";
spin_lock_irqsave(&dd->cspec->sdepb_lock, flags);
owned = epb_access(dd, sdnum, 1);
if (owned < 0) {
spin_unlock_irqrestore(&dd->cspec->sdepb_lock, flags);
return -1;
}
/*
* In future code, we may need to distinguish several address ranges,
* and select various memories based on this. For now, just trim
* "loc" (location including address and memory select) to
* "addr" (address within memory). we will only support PRAM
* The memory is 8KB.
*/
addr = loc & 0x1FFF;
for (tries = EPB_TRANS_TRIES; tries; --tries) {
transval = qib_read_kreg32(dd, trans);
if (transval & EPB_TRANS_RDY)
break;
udelay(5);
}
sofar = 0;
if (tries > 0) {
/*
* Every "memory" access is doubly-indirect.
* We set two bytes of address, then read/write
* one or mores bytes of data.
*/
/* First, we set control to "Read" or "Write" */
transval = csbit | EPB_UC_CTL |
(rd_notwr ? EPB_ROM_R : EPB_ROM_W);
tries = epb_trans(dd, trans, transval, &transval);
while (tries > 0 && sofar < cnt) {
if (!sofar) {
/* Only set address at start of chunk */
int addrbyte = (addr + sofar) >> 8;
transval = csbit | EPB_MADDRH | addrbyte;
tries = epb_trans(dd, trans, transval,
&transval);
if (tries <= 0)
break;
addrbyte = (addr + sofar) & 0xFF;
transval = csbit | EPB_MADDRL | addrbyte;
tries = epb_trans(dd, trans, transval,
&transval);
if (tries <= 0)
break;
}
if (rd_notwr)
transval = csbit | EPB_ROMDATA | EPB_RD;
else
transval = csbit | EPB_ROMDATA | buf[sofar];
tries = epb_trans(dd, trans, transval, &transval);
if (tries <= 0)
break;
if (rd_notwr)
buf[sofar] = transval & EPB_DATA_MASK;
++sofar;
}
/* Finally, clear control-bit for Read or Write */
transval = csbit | EPB_UC_CTL;
tries = epb_trans(dd, trans, transval, &transval);
}
ret = sofar;
/* Release bus. Failure is an error */
if (epb_access(dd, sdnum, -1) < 0)
ret = -1;
spin_unlock_irqrestore(&dd->cspec->sdepb_lock, flags);
if (tries <= 0)
ret = -1;
return ret;
}
#define PROG_CHUNK 64
static int qib_sd7220_prog_ld(struct qib_devdata *dd, int sdnum,
const u8 *img, int len, int offset)
{
int cnt, sofar, req;
sofar = 0;
while (sofar < len) {
req = len - sofar;
if (req > PROG_CHUNK)
req = PROG_CHUNK;
cnt = qib_sd7220_ram_xfer(dd, sdnum, offset + sofar,
(u8 *)img + sofar, req, 0);
if (cnt < req) {
sofar = -1;
break;
}
sofar += req;
}
return sofar;
}
#define VFY_CHUNK 64
#define SD_PRAM_ERROR_LIMIT 42
static int qib_sd7220_prog_vfy(struct qib_devdata *dd, int sdnum,
const u8 *img, int len, int offset)
{
int cnt, sofar, req, idx, errors;
unsigned char readback[VFY_CHUNK];
errors = 0;
sofar = 0;
while (sofar < len) {
req = len - sofar;
if (req > VFY_CHUNK)
req = VFY_CHUNK;
cnt = qib_sd7220_ram_xfer(dd, sdnum, sofar + offset,
readback, req, 1);
if (cnt < req) {
/* failed in read itself */
sofar = -1;
break;
}
for (idx = 0; idx < cnt; ++idx) {
if (readback[idx] != img[idx+sofar])
++errors;
}
sofar += cnt;
}
return errors ? -errors : sofar;
}
static int
qib_sd7220_ib_load(struct qib_devdata *dd, const struct firmware *fw)
{
return qib_sd7220_prog_ld(dd, IB_7220_SERDES, fw->data, fw->size, 0);
}
static int
qib_sd7220_ib_vfy(struct qib_devdata *dd, const struct firmware *fw)
{
return qib_sd7220_prog_vfy(dd, IB_7220_SERDES, fw->data, fw->size, 0);
}
/*
* IRQ not set up at this point in init, so we poll.
*/
#define IB_SERDES_TRIM_DONE (1ULL << 11)
#define TRIM_TMO (30)
static int qib_sd_trimdone_poll(struct qib_devdata *dd)
{
int trim_tmo, ret;
uint64_t val;
/*
* Default to failure, so IBC will not start
* without IB_SERDES_TRIM_DONE.
*/
ret = 0;
for (trim_tmo = 0; trim_tmo < TRIM_TMO; ++trim_tmo) {
val = qib_read_kreg64(dd, kr_ibcstatus);
if (val & IB_SERDES_TRIM_DONE) {
ret = 1;
break;
}
msleep(10);
}
if (trim_tmo >= TRIM_TMO) {
qib_dev_err(dd, "No TRIMDONE in %d tries\n", trim_tmo);
ret = 0;
}
return ret;
}
#define TX_FAST_ELT (9)
/*
* Set the "negotiation" values for SERDES. These are used by the IB1.2
* link negotiation. Macros below are attempt to keep the values a
* little more human-editable.
* First, values related to Drive De-emphasis Settings.
*/
#define NUM_DDS_REGS 6
#define DDS_REG_MAP 0x76A910 /* LSB-first list of regs (in elt 9) to mod */
#define DDS_VAL(amp_d, main_d, ipst_d, ipre_d, amp_s, main_s, ipst_s, ipre_s) \
{ { ((amp_d & 0x1F) << 1) | 1, ((amp_s & 0x1F) << 1) | 1, \
(main_d << 3) | 4 | (ipre_d >> 2), \
(main_s << 3) | 4 | (ipre_s >> 2), \
((ipst_d & 0xF) << 1) | ((ipre_d & 3) << 6) | 0x21, \
((ipst_s & 0xF) << 1) | ((ipre_s & 3) << 6) | 0x21 } }
static struct dds_init {
uint8_t reg_vals[NUM_DDS_REGS];
} dds_init_vals[] = {
/* DDR(FDR) SDR(HDR) */
/* Vendor recommends below for 3m cable */
#define DDS_3M 0
DDS_VAL(31, 19, 12, 0, 29, 22, 9, 0),
DDS_VAL(31, 12, 15, 4, 31, 15, 15, 1),
DDS_VAL(31, 13, 15, 3, 31, 16, 15, 0),
DDS_VAL(31, 14, 15, 2, 31, 17, 14, 0),
DDS_VAL(31, 15, 15, 1, 31, 18, 13, 0),
DDS_VAL(31, 16, 15, 0, 31, 19, 12, 0),
DDS_VAL(31, 17, 14, 0, 31, 20, 11, 0),
DDS_VAL(31, 18, 13, 0, 30, 21, 10, 0),
DDS_VAL(31, 20, 11, 0, 28, 23, 8, 0),
DDS_VAL(31, 21, 10, 0, 27, 24, 7, 0),
DDS_VAL(31, 22, 9, 0, 26, 25, 6, 0),
DDS_VAL(30, 23, 8, 0, 25, 26, 5, 0),
DDS_VAL(29, 24, 7, 0, 23, 27, 4, 0),
/* Vendor recommends below for 1m cable */
#define DDS_1M 13
DDS_VAL(28, 25, 6, 0, 21, 28, 3, 0),
DDS_VAL(27, 26, 5, 0, 19, 29, 2, 0),
DDS_VAL(25, 27, 4, 0, 17, 30, 1, 0)
};
/*
* Now the RXEQ section of the table.
*/
/* Hardware packs an element number and register address thus: */
#define RXEQ_INIT_RDESC(elt, addr) (((elt) & 0xF) | ((addr) << 4))
#define RXEQ_VAL(elt, adr, val0, val1, val2, val3) \
{RXEQ_INIT_RDESC((elt), (adr)), {(val0), (val1), (val2), (val3)} }
#define RXEQ_VAL_ALL(elt, adr, val) \
{RXEQ_INIT_RDESC((elt), (adr)), {(val), (val), (val), (val)} }
#define RXEQ_SDR_DFELTH 0
#define RXEQ_SDR_TLTH 0
#define RXEQ_SDR_G1CNT_Z1CNT 0x11
#define RXEQ_SDR_ZCNT 23
static struct rxeq_init {
u16 rdesc; /* in form used in SerDesDDSRXEQ */
u8 rdata[4];
} rxeq_init_vals[] = {
/* Set Rcv Eq. to Preset node */
RXEQ_VAL_ALL(7, 0x27, 0x10),
/* Set DFELTHFDR/HDR thresholds */
RXEQ_VAL(7, 8, 0, 0, 0, 0), /* FDR, was 0, 1, 2, 3 */
RXEQ_VAL(7, 0x21, 0, 0, 0, 0), /* HDR */
/* Set TLTHFDR/HDR theshold */
RXEQ_VAL(7, 9, 2, 2, 2, 2), /* FDR, was 0, 2, 4, 6 */
RXEQ_VAL(7, 0x23, 2, 2, 2, 2), /* HDR, was 0, 1, 2, 3 */
/* Set Preamp setting 2 (ZFR/ZCNT) */
RXEQ_VAL(7, 0x1B, 12, 12, 12, 12), /* FDR, was 12, 16, 20, 24 */
RXEQ_VAL(7, 0x1C, 12, 12, 12, 12), /* HDR, was 12, 16, 20, 24 */
/* Set Preamp DC gain and Setting 1 (GFR/GHR) */
RXEQ_VAL(7, 0x1E, 16, 16, 16, 16), /* FDR, was 16, 17, 18, 20 */
RXEQ_VAL(7, 0x1F, 16, 16, 16, 16), /* HDR, was 16, 17, 18, 20 */
/* Toggle RELOCK (in VCDL_CTRL0) to lock to data */
RXEQ_VAL_ALL(6, 6, 0x20), /* Set D5 High */
RXEQ_VAL_ALL(6, 6, 0), /* Set D5 Low */
};
/* There are 17 values from vendor, but IBC only accesses the first 16 */
#define DDS_ROWS (16)
#define RXEQ_ROWS ARRAY_SIZE(rxeq_init_vals)
static int qib_sd_setvals(struct qib_devdata *dd)
{
int idx, midx;
int min_idx; /* Minimum index for this portion of table */
uint32_t dds_reg_map;
u64 __iomem *taddr, *iaddr;
uint64_t data;
uint64_t sdctl;
taddr = dd->kregbase + kr_serdes_maptable;
iaddr = dd->kregbase + kr_serdes_ddsrxeq0;
/*
* Init the DDS section of the table.
* Each "row" of the table provokes NUM_DDS_REG writes, to the
* registers indicated in DDS_REG_MAP.
*/
sdctl = qib_read_kreg64(dd, kr_ibserdesctrl);
sdctl = (sdctl & ~(0x1f << 8)) | (NUM_DDS_REGS << 8);
sdctl = (sdctl & ~(0x1f << 13)) | (RXEQ_ROWS << 13);
qib_write_kreg(dd, kr_ibserdesctrl, sdctl);
/*
* Iterate down table within loop for each register to store.
*/
dds_reg_map = DDS_REG_MAP;
for (idx = 0; idx < NUM_DDS_REGS; ++idx) {
data = ((dds_reg_map & 0xF) << 4) | TX_FAST_ELT;
writeq(data, iaddr + idx);
mmiowb();
qib_read_kreg32(dd, kr_scratch);
dds_reg_map >>= 4;
for (midx = 0; midx < DDS_ROWS; ++midx) {
u64 __iomem *daddr = taddr + ((midx << 4) + idx);
data = dds_init_vals[midx].reg_vals[idx];
writeq(data, daddr);
mmiowb();
qib_read_kreg32(dd, kr_scratch);
} /* End inner for (vals for this reg, each row) */
} /* end outer for (regs to be stored) */
/*
* Init the RXEQ section of the table.
* This runs in a different order, as the pattern of
* register references is more complex, but there are only
* four "data" values per register.
*/
min_idx = idx; /* RXEQ indices pick up where DDS left off */
taddr += 0x100; /* RXEQ data is in second half of table */
/* Iterate through RXEQ register addresses */
for (idx = 0; idx < RXEQ_ROWS; ++idx) {
int didx; /* "destination" */
int vidx;
/* didx is offset by min_idx to address RXEQ range of regs */
didx = idx + min_idx;
/* Store the next RXEQ register address */
writeq(rxeq_init_vals[idx].rdesc, iaddr + didx);
mmiowb();
qib_read_kreg32(dd, kr_scratch);
/* Iterate through RXEQ values */
for (vidx = 0; vidx < 4; vidx++) {
data = rxeq_init_vals[idx].rdata[vidx];
writeq(data, taddr + (vidx << 6) + idx);
mmiowb();
qib_read_kreg32(dd, kr_scratch);
}
} /* end outer for (Reg-writes for RXEQ) */
return 0;
}
#define CMUCTRL5 EPB_LOC(7, 0, 0x15)
#define RXHSCTRL0(chan) EPB_LOC(chan, 6, 0)
#define VCDL_DAC2(chan) EPB_LOC(chan, 6, 5)
#define VCDL_CTRL0(chan) EPB_LOC(chan, 6, 6)
#define VCDL_CTRL2(chan) EPB_LOC(chan, 6, 8)
#define START_EQ2(chan) EPB_LOC(chan, 7, 0x28)
/*
* Repeat a "store" across all channels of the IB SerDes.
* Although nominally it inherits the "read value" of the last
* channel it modified, the only really useful return is <0 for
* failure, >= 0 for success. The parameter 'loc' is assumed to
* be the location in some channel of the register to be modified
* The caller can specify use of the "gang write" option of EPB,
* in which case we use the specified channel data for any fields
* not explicitely written.
*/
static int ibsd_mod_allchnls(struct qib_devdata *dd, int loc, int val,
int mask)
{
int ret = -1;
int chnl;
if (loc & EPB_GLOBAL_WR) {
/*
* Our caller has assured us that we can set all four
* channels at once. Trust that. If mask is not 0xFF,
* we will read the _specified_ channel for our starting
* value.
*/
loc |= (1U << EPB_IB_QUAD0_CS_SHF);
chnl = (loc >> (4 + EPB_ADDR_SHF)) & 7;
if (mask != 0xFF) {
ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES,
loc & ~EPB_GLOBAL_WR, 0, 0);
if (ret < 0) {
int sloc = loc >> EPB_ADDR_SHF;
qib_dev_err(dd, "pre-read failed: elt %d,"
" addr 0x%X, chnl %d\n",
(sloc & 0xF),
(sloc >> 9) & 0x3f, chnl);
return ret;
}
val = (ret & ~mask) | (val & mask);
}
loc &= ~(7 << (4+EPB_ADDR_SHF));
ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, loc, val, 0xFF);
if (ret < 0) {
int sloc = loc >> EPB_ADDR_SHF;
qib_dev_err(dd, "Global WR failed: elt %d,"
" addr 0x%X, val %02X\n",
(sloc & 0xF), (sloc >> 9) & 0x3f, val);
}
return ret;
}
/* Clear "channel" and set CS so we can simply iterate */
loc &= ~(7 << (4+EPB_ADDR_SHF));
loc |= (1U << EPB_IB_QUAD0_CS_SHF);
for (chnl = 0; chnl < 4; ++chnl) {
int cloc = loc | (chnl << (4+EPB_ADDR_SHF));
ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, cloc, val, mask);
if (ret < 0) {
int sloc = loc >> EPB_ADDR_SHF;
qib_dev_err(dd, "Write failed: elt %d,"
" addr 0x%X, chnl %d, val 0x%02X,"
" mask 0x%02X\n",
(sloc & 0xF), (sloc >> 9) & 0x3f, chnl,
val & 0xFF, mask & 0xFF);
break;
}
}
return ret;
}
/*
* Set the Tx values normally modified by IBC in IB1.2 mode to default
* values, as gotten from first row of init table.
*/
static int set_dds_vals(struct qib_devdata *dd, struct dds_init *ddi)
{
int ret;
int idx, reg, data;
uint32_t regmap;
regmap = DDS_REG_MAP;
for (idx = 0; idx < NUM_DDS_REGS; ++idx) {
reg = (regmap & 0xF);
regmap >>= 4;
data = ddi->reg_vals[idx];
/* Vendor says RMW not needed for these regs, use 0xFF mask */
ret = ibsd_mod_allchnls(dd, EPB_LOC(0, 9, reg), data, 0xFF);
if (ret < 0)
break;
}
return ret;
}
/*
* Set the Rx values normally modified by IBC in IB1.2 mode to default
* values, as gotten from selected column of init table.
*/
static int set_rxeq_vals(struct qib_devdata *dd, int vsel)
{
int ret;
int ridx;
int cnt = ARRAY_SIZE(rxeq_init_vals);
for (ridx = 0; ridx < cnt; ++ridx) {
int elt, reg, val, loc;
elt = rxeq_init_vals[ridx].rdesc & 0xF;
reg = rxeq_init_vals[ridx].rdesc >> 4;
loc = EPB_LOC(0, elt, reg);
val = rxeq_init_vals[ridx].rdata[vsel];
/* mask of 0xFF, because hardware does full-byte store. */
ret = ibsd_mod_allchnls(dd, loc, val, 0xFF);
if (ret < 0)
break;
}
return ret;
}
/*
* Set the default values (row 0) for DDR Driver Demphasis.
* we do this initially and whenever we turn off IB-1.2
*
* The "default" values for Rx equalization are also stored to
* SerDes registers. Formerly (and still default), we used set 2.
* For experimenting with cables and link-partners, we allow changing
* that via a module parameter.
*/
static unsigned qib_rxeq_set = 2;
module_param_named(rxeq_default_set, qib_rxeq_set, uint,
S_IWUSR | S_IRUGO);
MODULE_PARM_DESC(rxeq_default_set,
"Which set [0..3] of Rx Equalization values is default");
static int qib_internal_presets(struct qib_devdata *dd)
{
int ret = 0;
ret = set_dds_vals(dd, dds_init_vals + DDS_3M);
if (ret < 0)
qib_dev_err(dd, "Failed to set default DDS values\n");
ret = set_rxeq_vals(dd, qib_rxeq_set & 3);
if (ret < 0)
qib_dev_err(dd, "Failed to set default RXEQ values\n");
return ret;
}
int qib_sd7220_presets(struct qib_devdata *dd)
{
int ret = 0;
if (!dd->cspec->presets_needed)
return ret;
dd->cspec->presets_needed = 0;
/* Assert uC reset, so we don't clash with it. */
qib_ibsd_reset(dd, 1);
udelay(2);
qib_sd_trimdone_monitor(dd, "link-down");
ret = qib_internal_presets(dd);
return ret;
}
static int qib_sd_trimself(struct qib_devdata *dd, int val)
{
int loc = CMUCTRL5 | (1U << EPB_IB_QUAD0_CS_SHF);
return qib_sd7220_reg_mod(dd, IB_7220_SERDES, loc, val, 0xFF);
}
static int qib_sd_early(struct qib_devdata *dd)
{
int ret;
ret = ibsd_mod_allchnls(dd, RXHSCTRL0(0) | EPB_GLOBAL_WR, 0xD4, 0xFF);
if (ret < 0)
goto bail;
ret = ibsd_mod_allchnls(dd, START_EQ1(0) | EPB_GLOBAL_WR, 0x10, 0xFF);
if (ret < 0)
goto bail;
ret = ibsd_mod_allchnls(dd, START_EQ2(0) | EPB_GLOBAL_WR, 0x30, 0xFF);
bail:
return ret;
}
#define BACTRL(chnl) EPB_LOC(chnl, 6, 0x0E)
#define LDOUTCTRL1(chnl) EPB_LOC(chnl, 7, 6)
#define RXHSSTATUS(chnl) EPB_LOC(chnl, 6, 0xF)
static int qib_sd_dactrim(struct qib_devdata *dd)
{
int ret;
ret = ibsd_mod_allchnls(dd, VCDL_DAC2(0) | EPB_GLOBAL_WR, 0x2D, 0xFF);
if (ret < 0)
goto bail;
/* more fine-tuning of what will be default */
ret = ibsd_mod_allchnls(dd, VCDL_CTRL2(0), 3, 0xF);
if (ret < 0)
goto bail;
ret = ibsd_mod_allchnls(dd, BACTRL(0) | EPB_GLOBAL_WR, 0x40, 0xFF);
if (ret < 0)
goto bail;
ret = ibsd_mod_allchnls(dd, LDOUTCTRL1(0) | EPB_GLOBAL_WR, 0x04, 0xFF);
if (ret < 0)
goto bail;
ret = ibsd_mod_allchnls(dd, RXHSSTATUS(0) | EPB_GLOBAL_WR, 0x04, 0xFF);
if (ret < 0)
goto bail;
/*
* Delay for max possible number of steps, with slop.
* Each step is about 4usec.
*/
udelay(415);
ret = ibsd_mod_allchnls(dd, LDOUTCTRL1(0) | EPB_GLOBAL_WR, 0x00, 0xFF);
bail:
return ret;
}
#define RELOCK_FIRST_MS 3
#define RXLSPPM(chan) EPB_LOC(chan, 0, 2)
void toggle_7220_rclkrls(struct qib_devdata *dd)
{
int loc = RXLSPPM(0) | EPB_GLOBAL_WR;
int ret;
ret = ibsd_mod_allchnls(dd, loc, 0, 0x80);
if (ret < 0)
qib_dev_err(dd, "RCLKRLS failed to clear D7\n");
else {
udelay(1);
ibsd_mod_allchnls(dd, loc, 0x80, 0x80);
}
/* And again for good measure */
udelay(1);
ret = ibsd_mod_allchnls(dd, loc, 0, 0x80);
if (ret < 0)
qib_dev_err(dd, "RCLKRLS failed to clear D7\n");
else {
udelay(1);
ibsd_mod_allchnls(dd, loc, 0x80, 0x80);
}
/* Now reset xgxs and IBC to complete the recovery */
dd->f_xgxs_reset(dd->pport);
}
/*
* Shut down the timer that polls for relock occasions, if needed
* this is "hooked" from qib_7220_quiet_serdes(), which is called
* just before qib_shutdown_device() in qib_driver.c shuts down all
* the other timers
*/
void shutdown_7220_relock_poll(struct qib_devdata *dd)
{
if (dd->cspec->relock_timer_active)
del_timer_sync(&dd->cspec->relock_timer);
}
static unsigned qib_relock_by_timer = 1;
module_param_named(relock_by_timer, qib_relock_by_timer, uint,
S_IWUSR | S_IRUGO);
MODULE_PARM_DESC(relock_by_timer, "Allow relock attempt if link not up");
static void qib_run_relock(unsigned long opaque)
{
struct qib_devdata *dd = (struct qib_devdata *)opaque;
struct qib_pportdata *ppd = dd->pport;
struct qib_chip_specific *cs = dd->cspec;
int timeoff;
/*
* Check link-training state for "stuck" state, when down.
* if found, try relock and schedule another try at
* exponentially growing delay, maxed at one second.
* if not stuck, our work is done.
*/
if ((dd->flags & QIB_INITTED) && !(ppd->lflags &
(QIBL_IB_AUTONEG_INPROG | QIBL_LINKINIT | QIBL_LINKARMED |
QIBL_LINKACTIVE))) {
if (qib_relock_by_timer) {
if (!(ppd->lflags & QIBL_IB_LINK_DISABLED))
toggle_7220_rclkrls(dd);
}
/* re-set timer for next check */
timeoff = cs->relock_interval << 1;
if (timeoff > HZ)
timeoff = HZ;
cs->relock_interval = timeoff;
} else
timeoff = HZ;
mod_timer(&cs->relock_timer, jiffies + timeoff);
}
void set_7220_relock_poll(struct qib_devdata *dd, int ibup)
{
struct qib_chip_specific *cs = dd->cspec;
if (ibup) {
/* We are now up, relax timer to 1 second interval */
if (cs->relock_timer_active) {
cs->relock_interval = HZ;
mod_timer(&cs->relock_timer, jiffies + HZ);
}
} else {
/* Transition to down, (re-)set timer to short interval. */
unsigned int timeout;
timeout = msecs_to_jiffies(RELOCK_FIRST_MS);
if (timeout == 0)
timeout = 1;
/* If timer has not yet been started, do so. */
if (!cs->relock_timer_active) {
cs->relock_timer_active = 1;
init_timer(&cs->relock_timer);
cs->relock_timer.function = qib_run_relock;
cs->relock_timer.data = (unsigned long) dd;
cs->relock_interval = timeout;
cs->relock_timer.expires = jiffies + timeout;
add_timer(&cs->relock_timer);
} else {
cs->relock_interval = timeout;
mod_timer(&cs->relock_timer, jiffies + timeout);
}
}
}
| gpl-2.0 |
NamelessRom/android_kernel_lge_omap4 | drivers/media/dvb/frontends/stb6000.c | 4248 | 5422 | /*
Driver for ST STB6000 DVBS Silicon tuner
Copyright (C) 2008 Igor M. Liplianin (liplianin@me.by)
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/dvb/frontend.h>
#include <asm/types.h>
#include "stb6000.h"
static int debug;
#define dprintk(args...) \
do { \
if (debug) \
printk(KERN_DEBUG "stb6000: " args); \
} while (0)
struct stb6000_priv {
/* i2c details */
int i2c_address;
struct i2c_adapter *i2c;
u32 frequency;
};
static int stb6000_release(struct dvb_frontend *fe)
{
kfree(fe->tuner_priv);
fe->tuner_priv = NULL;
return 0;
}
static int stb6000_sleep(struct dvb_frontend *fe)
{
struct stb6000_priv *priv = fe->tuner_priv;
int ret;
u8 buf[] = { 10, 0 };
struct i2c_msg msg = {
.addr = priv->i2c_address,
.flags = 0,
.buf = buf,
.len = 2
};
dprintk("%s:\n", __func__);
if (fe->ops.i2c_gate_ctrl)
fe->ops.i2c_gate_ctrl(fe, 1);
ret = i2c_transfer(priv->i2c, &msg, 1);
if (ret != 1)
dprintk("%s: i2c error\n", __func__);
if (fe->ops.i2c_gate_ctrl)
fe->ops.i2c_gate_ctrl(fe, 0);
return (ret == 1) ? 0 : ret;
}
static int stb6000_set_params(struct dvb_frontend *fe,
struct dvb_frontend_parameters *params)
{
struct stb6000_priv *priv = fe->tuner_priv;
unsigned int n, m;
int ret;
u32 freq_mhz;
int bandwidth;
u8 buf[12];
struct i2c_msg msg = {
.addr = priv->i2c_address,
.flags = 0,
.buf = buf,
.len = 12
};
dprintk("%s:\n", __func__);
freq_mhz = params->frequency / 1000;
bandwidth = params->u.qpsk.symbol_rate / 1000000;
if (bandwidth > 31)
bandwidth = 31;
if ((freq_mhz > 949) && (freq_mhz < 2151)) {
buf[0] = 0x01;
buf[1] = 0xac;
if (freq_mhz < 1950)
buf[1] = 0xaa;
if (freq_mhz < 1800)
buf[1] = 0xa8;
if (freq_mhz < 1650)
buf[1] = 0xa6;
if (freq_mhz < 1530)
buf[1] = 0xa5;
if (freq_mhz < 1470)
buf[1] = 0xa4;
if (freq_mhz < 1370)
buf[1] = 0xa2;
if (freq_mhz < 1300)
buf[1] = 0xa1;
if (freq_mhz < 1200)
buf[1] = 0xa0;
if (freq_mhz < 1075)
buf[1] = 0xbc;
if (freq_mhz < 1000)
buf[1] = 0xba;
if (freq_mhz < 1075) {
n = freq_mhz / 8; /* vco=lo*4 */
m = 2;
} else {
n = freq_mhz / 16; /* vco=lo*2 */
m = 1;
}
buf[2] = n >> 1;
buf[3] = (unsigned char)(((n & 1) << 7) |
(m * freq_mhz - n * 16) | 0x60);
buf[4] = 0x04;
buf[5] = 0x0e;
buf[6] = (unsigned char)(bandwidth);
buf[7] = 0xd8;
buf[8] = 0xd0;
buf[9] = 0x50;
buf[10] = 0xeb;
buf[11] = 0x4f;
if (fe->ops.i2c_gate_ctrl)
fe->ops.i2c_gate_ctrl(fe, 1);
ret = i2c_transfer(priv->i2c, &msg, 1);
if (ret != 1)
dprintk("%s: i2c error\n", __func__);
udelay(10);
if (fe->ops.i2c_gate_ctrl)
fe->ops.i2c_gate_ctrl(fe, 0);
buf[0] = 0x07;
buf[1] = 0xdf;
buf[2] = 0xd0;
buf[3] = 0x50;
buf[4] = 0xfb;
msg.len = 5;
if (fe->ops.i2c_gate_ctrl)
fe->ops.i2c_gate_ctrl(fe, 1);
ret = i2c_transfer(priv->i2c, &msg, 1);
if (ret != 1)
dprintk("%s: i2c error\n", __func__);
udelay(10);
if (fe->ops.i2c_gate_ctrl)
fe->ops.i2c_gate_ctrl(fe, 0);
priv->frequency = freq_mhz * 1000;
return (ret == 1) ? 0 : ret;
}
return -1;
}
static int stb6000_get_frequency(struct dvb_frontend *fe, u32 *frequency)
{
struct stb6000_priv *priv = fe->tuner_priv;
*frequency = priv->frequency;
return 0;
}
static struct dvb_tuner_ops stb6000_tuner_ops = {
.info = {
.name = "ST STB6000",
.frequency_min = 950000,
.frequency_max = 2150000
},
.release = stb6000_release,
.sleep = stb6000_sleep,
.set_params = stb6000_set_params,
.get_frequency = stb6000_get_frequency,
};
struct dvb_frontend *stb6000_attach(struct dvb_frontend *fe, int addr,
struct i2c_adapter *i2c)
{
struct stb6000_priv *priv = NULL;
u8 b0[] = { 0 };
u8 b1[] = { 0, 0 };
struct i2c_msg msg[2] = {
{
.addr = addr,
.flags = 0,
.buf = b0,
.len = 0
}, {
.addr = addr,
.flags = I2C_M_RD,
.buf = b1,
.len = 2
}
};
int ret;
dprintk("%s:\n", __func__);
if (fe->ops.i2c_gate_ctrl)
fe->ops.i2c_gate_ctrl(fe, 1);
/* is some i2c device here ? */
ret = i2c_transfer(i2c, msg, 2);
if (fe->ops.i2c_gate_ctrl)
fe->ops.i2c_gate_ctrl(fe, 0);
if (ret != 2)
return NULL;
priv = kzalloc(sizeof(struct stb6000_priv), GFP_KERNEL);
if (priv == NULL)
return NULL;
priv->i2c_address = addr;
priv->i2c = i2c;
memcpy(&fe->ops.tuner_ops, &stb6000_tuner_ops,
sizeof(struct dvb_tuner_ops));
fe->tuner_priv = priv;
return fe;
}
EXPORT_SYMBOL(stb6000_attach);
module_param(debug, int, 0644);
MODULE_PARM_DESC(debug, "Turn on/off frontend debugging (default:off).");
MODULE_DESCRIPTION("DVB STB6000 driver");
MODULE_AUTHOR("Igor M. Liplianin <liplianin@me.by>");
MODULE_LICENSE("GPL");
| gpl-2.0 |
IOKP-kitkat/kernel_samsung_jf | arch/mips/alchemy/devboards/db1300.c | 4504 | 19980 | /*
* DBAu1300 init and platform device setup.
*
* (c) 2009 Manuel Lauss <manuel.lauss@googlemail.com>
*/
#include <linux/dma-mapping.h>
#include <linux/gpio.h>
#include <linux/gpio_keys.h>
#include <linux/init.h>
#include <linux/input.h> /* KEY_* codes */
#include <linux/i2c.h>
#include <linux/io.h>
#include <linux/leds.h>
#include <linux/ata_platform.h>
#include <linux/mmc/host.h>
#include <linux/module.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/nand.h>
#include <linux/mtd/partitions.h>
#include <linux/platform_device.h>
#include <linux/smsc911x.h>
#include <asm/mach-au1x00/au1000.h>
#include <asm/mach-au1x00/au1100_mmc.h>
#include <asm/mach-au1x00/au1200fb.h>
#include <asm/mach-au1x00/au1xxx_dbdma.h>
#include <asm/mach-au1x00/au1xxx_psc.h>
#include <asm/mach-db1x00/db1300.h>
#include <asm/mach-db1x00/bcsr.h>
#include <asm/mach-au1x00/prom.h>
#include "platform.h"
static struct i2c_board_info db1300_i2c_devs[] __initdata = {
{ I2C_BOARD_INFO("wm8731", 0x1b), }, /* I2S audio codec */
{ I2C_BOARD_INFO("ne1619", 0x2d), }, /* adm1025-compat hwmon */
};
/* multifunction pins to assign to GPIO controller */
static int db1300_gpio_pins[] __initdata = {
AU1300_PIN_LCDPWM0, AU1300_PIN_PSC2SYNC1, AU1300_PIN_WAKE1,
AU1300_PIN_WAKE2, AU1300_PIN_WAKE3, AU1300_PIN_FG3AUX,
AU1300_PIN_EXTCLK1,
-1, /* terminator */
};
/* multifunction pins to assign to device functions */
static int db1300_dev_pins[] __initdata = {
/* wake-from-str pins 0-3 */
AU1300_PIN_WAKE0,
/* external clock sources for PSC0 */
AU1300_PIN_EXTCLK0,
/* 8bit MMC interface on SD0: 6-9 */
AU1300_PIN_SD0DAT4, AU1300_PIN_SD0DAT5, AU1300_PIN_SD0DAT6,
AU1300_PIN_SD0DAT7,
/* UART1 pins: 11-18 */
AU1300_PIN_U1RI, AU1300_PIN_U1DCD, AU1300_PIN_U1DSR,
AU1300_PIN_U1CTS, AU1300_PIN_U1RTS, AU1300_PIN_U1DTR,
AU1300_PIN_U1RX, AU1300_PIN_U1TX,
/* UART0 pins: 19-24 */
AU1300_PIN_U0RI, AU1300_PIN_U0DCD, AU1300_PIN_U0DSR,
AU1300_PIN_U0CTS, AU1300_PIN_U0RTS, AU1300_PIN_U0DTR,
/* UART2: 25-26 */
AU1300_PIN_U2RX, AU1300_PIN_U2TX,
/* UART3: 27-28 */
AU1300_PIN_U3RX, AU1300_PIN_U3TX,
/* LCD controller PWMs, ext pixclock: 30-31 */
AU1300_PIN_LCDPWM1, AU1300_PIN_LCDCLKIN,
/* SD1 interface: 32-37 */
AU1300_PIN_SD1DAT0, AU1300_PIN_SD1DAT1, AU1300_PIN_SD1DAT2,
AU1300_PIN_SD1DAT3, AU1300_PIN_SD1CMD, AU1300_PIN_SD1CLK,
/* SD2 interface: 38-43 */
AU1300_PIN_SD2DAT0, AU1300_PIN_SD2DAT1, AU1300_PIN_SD2DAT2,
AU1300_PIN_SD2DAT3, AU1300_PIN_SD2CMD, AU1300_PIN_SD2CLK,
/* PSC0/1 clocks: 44-45 */
AU1300_PIN_PSC0CLK, AU1300_PIN_PSC1CLK,
/* PSCs: 46-49/50-53/54-57/58-61 */
AU1300_PIN_PSC0SYNC0, AU1300_PIN_PSC0SYNC1, AU1300_PIN_PSC0D0,
AU1300_PIN_PSC0D1,
AU1300_PIN_PSC1SYNC0, AU1300_PIN_PSC1SYNC1, AU1300_PIN_PSC1D0,
AU1300_PIN_PSC1D1,
AU1300_PIN_PSC2SYNC0, AU1300_PIN_PSC2D0,
AU1300_PIN_PSC2D1,
AU1300_PIN_PSC3SYNC0, AU1300_PIN_PSC3SYNC1, AU1300_PIN_PSC3D0,
AU1300_PIN_PSC3D1,
/* PCMCIA interface: 62-70 */
AU1300_PIN_PCE2, AU1300_PIN_PCE1, AU1300_PIN_PIOS16,
AU1300_PIN_PIOR, AU1300_PIN_PWE, AU1300_PIN_PWAIT,
AU1300_PIN_PREG, AU1300_PIN_POE, AU1300_PIN_PIOW,
/* camera interface H/V sync inputs: 71-72 */
AU1300_PIN_CIMLS, AU1300_PIN_CIMFS,
/* PSC2/3 clocks: 73-74 */
AU1300_PIN_PSC2CLK, AU1300_PIN_PSC3CLK,
-1, /* terminator */
};
static void __init db1300_gpio_config(void)
{
int *i;
i = &db1300_dev_pins[0];
while (*i != -1)
au1300_pinfunc_to_dev(*i++);
i = &db1300_gpio_pins[0];
while (*i != -1)
au1300_gpio_direction_input(*i++);/* implies pin_to_gpio */
au1300_set_dbdma_gpio(1, AU1300_PIN_FG3AUX);
}
char *get_system_type(void)
{
return "DB1300";
}
/**********************************************************************/
static void au1300_nand_cmd_ctrl(struct mtd_info *mtd, int cmd,
unsigned int ctrl)
{
struct nand_chip *this = mtd->priv;
unsigned long ioaddr = (unsigned long)this->IO_ADDR_W;
ioaddr &= 0xffffff00;
if (ctrl & NAND_CLE) {
ioaddr += MEM_STNAND_CMD;
} else if (ctrl & NAND_ALE) {
ioaddr += MEM_STNAND_ADDR;
} else {
/* assume we want to r/w real data by default */
ioaddr += MEM_STNAND_DATA;
}
this->IO_ADDR_R = this->IO_ADDR_W = (void __iomem *)ioaddr;
if (cmd != NAND_CMD_NONE) {
__raw_writeb(cmd, this->IO_ADDR_W);
wmb();
}
}
static int au1300_nand_device_ready(struct mtd_info *mtd)
{
return __raw_readl((void __iomem *)MEM_STSTAT) & 1;
}
static const char *db1300_part_probes[] = { "cmdlinepart", NULL };
static struct mtd_partition db1300_nand_parts[] = {
{
.name = "NAND FS 0",
.offset = 0,
.size = 8 * 1024 * 1024,
},
{
.name = "NAND FS 1",
.offset = MTDPART_OFS_APPEND,
.size = MTDPART_SIZ_FULL
},
};
struct platform_nand_data db1300_nand_platdata = {
.chip = {
.nr_chips = 1,
.chip_offset = 0,
.nr_partitions = ARRAY_SIZE(db1300_nand_parts),
.partitions = db1300_nand_parts,
.chip_delay = 20,
.part_probe_types = db1300_part_probes,
},
.ctrl = {
.dev_ready = au1300_nand_device_ready,
.cmd_ctrl = au1300_nand_cmd_ctrl,
},
};
static struct resource db1300_nand_res[] = {
[0] = {
.start = DB1300_NAND_PHYS_ADDR,
.end = DB1300_NAND_PHYS_ADDR + 0xff,
.flags = IORESOURCE_MEM,
},
};
static struct platform_device db1300_nand_dev = {
.name = "gen_nand",
.num_resources = ARRAY_SIZE(db1300_nand_res),
.resource = db1300_nand_res,
.id = -1,
.dev = {
.platform_data = &db1300_nand_platdata,
}
};
/**********************************************************************/
static struct resource db1300_eth_res[] = {
[0] = {
.start = DB1300_ETH_PHYS_ADDR,
.end = DB1300_ETH_PHYS_END,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = DB1300_ETH_INT,
.end = DB1300_ETH_INT,
.flags = IORESOURCE_IRQ,
},
};
static struct smsc911x_platform_config db1300_eth_config = {
.phy_interface = PHY_INTERFACE_MODE_MII,
.irq_polarity = SMSC911X_IRQ_POLARITY_ACTIVE_LOW,
.irq_type = SMSC911X_IRQ_TYPE_PUSH_PULL,
.flags = SMSC911X_USE_32BIT,
};
static struct platform_device db1300_eth_dev = {
.name = "smsc911x",
.id = -1,
.num_resources = ARRAY_SIZE(db1300_eth_res),
.resource = db1300_eth_res,
.dev = {
.platform_data = &db1300_eth_config,
},
};
/**********************************************************************/
static struct resource au1300_psc1_res[] = {
[0] = {
.start = AU1300_PSC1_PHYS_ADDR,
.end = AU1300_PSC1_PHYS_ADDR + 0x0fff,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = AU1300_PSC1_INT,
.end = AU1300_PSC1_INT,
.flags = IORESOURCE_IRQ,
},
[2] = {
.start = AU1300_DSCR_CMD0_PSC1_TX,
.end = AU1300_DSCR_CMD0_PSC1_TX,
.flags = IORESOURCE_DMA,
},
[3] = {
.start = AU1300_DSCR_CMD0_PSC1_RX,
.end = AU1300_DSCR_CMD0_PSC1_RX,
.flags = IORESOURCE_DMA,
},
};
static struct platform_device db1300_ac97_dev = {
.name = "au1xpsc_ac97",
.id = 1, /* PSC ID. match with AC97 codec ID! */
.num_resources = ARRAY_SIZE(au1300_psc1_res),
.resource = au1300_psc1_res,
};
/**********************************************************************/
static struct resource au1300_psc2_res[] = {
[0] = {
.start = AU1300_PSC2_PHYS_ADDR,
.end = AU1300_PSC2_PHYS_ADDR + 0x0fff,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = AU1300_PSC2_INT,
.end = AU1300_PSC2_INT,
.flags = IORESOURCE_IRQ,
},
[2] = {
.start = AU1300_DSCR_CMD0_PSC2_TX,
.end = AU1300_DSCR_CMD0_PSC2_TX,
.flags = IORESOURCE_DMA,
},
[3] = {
.start = AU1300_DSCR_CMD0_PSC2_RX,
.end = AU1300_DSCR_CMD0_PSC2_RX,
.flags = IORESOURCE_DMA,
},
};
static struct platform_device db1300_i2s_dev = {
.name = "au1xpsc_i2s",
.id = 2, /* PSC ID */
.num_resources = ARRAY_SIZE(au1300_psc2_res),
.resource = au1300_psc2_res,
};
/**********************************************************************/
static struct resource au1300_psc3_res[] = {
[0] = {
.start = AU1300_PSC3_PHYS_ADDR,
.end = AU1300_PSC3_PHYS_ADDR + 0x0fff,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = AU1300_PSC3_INT,
.end = AU1300_PSC3_INT,
.flags = IORESOURCE_IRQ,
},
[2] = {
.start = AU1300_DSCR_CMD0_PSC3_TX,
.end = AU1300_DSCR_CMD0_PSC3_TX,
.flags = IORESOURCE_DMA,
},
[3] = {
.start = AU1300_DSCR_CMD0_PSC3_RX,
.end = AU1300_DSCR_CMD0_PSC3_RX,
.flags = IORESOURCE_DMA,
},
};
static struct platform_device db1300_i2c_dev = {
.name = "au1xpsc_smbus",
.id = 0, /* bus number */
.num_resources = ARRAY_SIZE(au1300_psc3_res),
.resource = au1300_psc3_res,
};
/**********************************************************************/
/* proper key assignments when facing the LCD panel. For key assignments
* according to the schematics swap up with down and left with right.
* I chose to use it to emulate the arrow keys of a keyboard.
*/
static struct gpio_keys_button db1300_5waysw_arrowkeys[] = {
{
.code = KEY_DOWN,
.gpio = AU1300_PIN_LCDPWM0,
.type = EV_KEY,
.debounce_interval = 1,
.active_low = 1,
.desc = "5waysw-down",
},
{
.code = KEY_UP,
.gpio = AU1300_PIN_PSC2SYNC1,
.type = EV_KEY,
.debounce_interval = 1,
.active_low = 1,
.desc = "5waysw-up",
},
{
.code = KEY_RIGHT,
.gpio = AU1300_PIN_WAKE3,
.type = EV_KEY,
.debounce_interval = 1,
.active_low = 1,
.desc = "5waysw-right",
},
{
.code = KEY_LEFT,
.gpio = AU1300_PIN_WAKE2,
.type = EV_KEY,
.debounce_interval = 1,
.active_low = 1,
.desc = "5waysw-left",
},
{
.code = KEY_ENTER,
.gpio = AU1300_PIN_WAKE1,
.type = EV_KEY,
.debounce_interval = 1,
.active_low = 1,
.desc = "5waysw-push",
},
};
static struct gpio_keys_platform_data db1300_5waysw_data = {
.buttons = db1300_5waysw_arrowkeys,
.nbuttons = ARRAY_SIZE(db1300_5waysw_arrowkeys),
.rep = 1,
.name = "db1300-5wayswitch",
};
static struct platform_device db1300_5waysw_dev = {
.name = "gpio-keys",
.dev = {
.platform_data = &db1300_5waysw_data,
},
};
/**********************************************************************/
static struct pata_platform_info db1300_ide_info = {
.ioport_shift = DB1300_IDE_REG_SHIFT,
};
#define IDE_ALT_START (14 << DB1300_IDE_REG_SHIFT)
static struct resource db1300_ide_res[] = {
[0] = {
.start = DB1300_IDE_PHYS_ADDR,
.end = DB1300_IDE_PHYS_ADDR + IDE_ALT_START - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = DB1300_IDE_PHYS_ADDR + IDE_ALT_START,
.end = DB1300_IDE_PHYS_ADDR + DB1300_IDE_PHYS_LEN - 1,
.flags = IORESOURCE_MEM,
},
[2] = {
.start = DB1300_IDE_INT,
.end = DB1300_IDE_INT,
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device db1300_ide_dev = {
.dev = {
.platform_data = &db1300_ide_info,
},
.name = "pata_platform",
.resource = db1300_ide_res,
.num_resources = ARRAY_SIZE(db1300_ide_res),
};
/**********************************************************************/
static irqreturn_t db1300_mmc_cd(int irq, void *ptr)
{
void(*mmc_cd)(struct mmc_host *, unsigned long);
/* disable the one currently screaming. No other way to shut it up */
if (irq == DB1300_SD1_INSERT_INT) {
disable_irq_nosync(DB1300_SD1_INSERT_INT);
enable_irq(DB1300_SD1_EJECT_INT);
} else {
disable_irq_nosync(DB1300_SD1_EJECT_INT);
enable_irq(DB1300_SD1_INSERT_INT);
}
/* link against CONFIG_MMC=m. We can only be called once MMC core has
* initialized the controller, so symbol_get() should always succeed.
*/
mmc_cd = symbol_get(mmc_detect_change);
mmc_cd(ptr, msecs_to_jiffies(500));
symbol_put(mmc_detect_change);
return IRQ_HANDLED;
}
static int db1300_mmc_card_readonly(void *mmc_host)
{
/* it uses SD1 interface, but the DB1200's SD0 bit in the CPLD */
return bcsr_read(BCSR_STATUS) & BCSR_STATUS_SD0WP;
}
static int db1300_mmc_card_inserted(void *mmc_host)
{
return bcsr_read(BCSR_SIGSTAT) & (1 << 12); /* insertion irq signal */
}
static int db1300_mmc_cd_setup(void *mmc_host, int en)
{
int ret;
if (en) {
ret = request_irq(DB1300_SD1_INSERT_INT, db1300_mmc_cd, 0,
"sd_insert", mmc_host);
if (ret)
goto out;
ret = request_irq(DB1300_SD1_EJECT_INT, db1300_mmc_cd, 0,
"sd_eject", mmc_host);
if (ret) {
free_irq(DB1300_SD1_INSERT_INT, mmc_host);
goto out;
}
if (db1300_mmc_card_inserted(mmc_host))
enable_irq(DB1300_SD1_EJECT_INT);
else
enable_irq(DB1300_SD1_INSERT_INT);
} else {
free_irq(DB1300_SD1_INSERT_INT, mmc_host);
free_irq(DB1300_SD1_EJECT_INT, mmc_host);
}
ret = 0;
out:
return ret;
}
static void db1300_mmcled_set(struct led_classdev *led,
enum led_brightness brightness)
{
if (brightness != LED_OFF)
bcsr_mod(BCSR_LEDS, BCSR_LEDS_LED0, 0);
else
bcsr_mod(BCSR_LEDS, 0, BCSR_LEDS_LED0);
}
static struct led_classdev db1300_mmc_led = {
.brightness_set = db1300_mmcled_set,
};
struct au1xmmc_platform_data db1300_sd1_platdata = {
.cd_setup = db1300_mmc_cd_setup,
.card_inserted = db1300_mmc_card_inserted,
.card_readonly = db1300_mmc_card_readonly,
.led = &db1300_mmc_led,
};
static struct resource au1300_sd1_res[] = {
[0] = {
.start = AU1300_SD1_PHYS_ADDR,
.end = AU1300_SD1_PHYS_ADDR,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = AU1300_SD1_INT,
.end = AU1300_SD1_INT,
.flags = IORESOURCE_IRQ,
},
[2] = {
.start = AU1300_DSCR_CMD0_SDMS_TX1,
.end = AU1300_DSCR_CMD0_SDMS_TX1,
.flags = IORESOURCE_DMA,
},
[3] = {
.start = AU1300_DSCR_CMD0_SDMS_RX1,
.end = AU1300_DSCR_CMD0_SDMS_RX1,
.flags = IORESOURCE_DMA,
},
};
static struct platform_device db1300_sd1_dev = {
.dev = {
.platform_data = &db1300_sd1_platdata,
},
.name = "au1xxx-mmc",
.id = 1,
.resource = au1300_sd1_res,
.num_resources = ARRAY_SIZE(au1300_sd1_res),
};
/**********************************************************************/
static int db1300_movinand_inserted(void *mmc_host)
{
return 0; /* disable for now, it doesn't work yet */
}
static int db1300_movinand_readonly(void *mmc_host)
{
return 0;
}
static void db1300_movinand_led_set(struct led_classdev *led,
enum led_brightness brightness)
{
if (brightness != LED_OFF)
bcsr_mod(BCSR_LEDS, BCSR_LEDS_LED1, 0);
else
bcsr_mod(BCSR_LEDS, 0, BCSR_LEDS_LED1);
}
static struct led_classdev db1300_movinand_led = {
.brightness_set = db1300_movinand_led_set,
};
struct au1xmmc_platform_data db1300_sd0_platdata = {
.card_inserted = db1300_movinand_inserted,
.card_readonly = db1300_movinand_readonly,
.led = &db1300_movinand_led,
.mask_host_caps = MMC_CAP_NEEDS_POLL,
};
static struct resource au1300_sd0_res[] = {
[0] = {
.start = AU1100_SD0_PHYS_ADDR,
.end = AU1100_SD0_PHYS_ADDR,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = AU1300_SD0_INT,
.end = AU1300_SD0_INT,
.flags = IORESOURCE_IRQ,
},
[2] = {
.start = AU1300_DSCR_CMD0_SDMS_TX0,
.end = AU1300_DSCR_CMD0_SDMS_TX0,
.flags = IORESOURCE_DMA,
},
[3] = {
.start = AU1300_DSCR_CMD0_SDMS_RX0,
.end = AU1300_DSCR_CMD0_SDMS_RX0,
.flags = IORESOURCE_DMA,
},
};
static struct platform_device db1300_sd0_dev = {
.dev = {
.platform_data = &db1300_sd0_platdata,
},
.name = "au1xxx-mmc",
.id = 0,
.resource = au1300_sd0_res,
.num_resources = ARRAY_SIZE(au1300_sd0_res),
};
/**********************************************************************/
static struct platform_device db1300_wm9715_dev = {
.name = "wm9712-codec",
.id = 1, /* ID of PSC for AC97 audio, see asoc glue! */
};
static struct platform_device db1300_ac97dma_dev = {
.name = "au1xpsc-pcm",
.id = 1, /* PSC ID */
};
static struct platform_device db1300_i2sdma_dev = {
.name = "au1xpsc-pcm",
.id = 2, /* PSC ID */
};
static struct platform_device db1300_sndac97_dev = {
.name = "db1300-ac97",
};
static struct platform_device db1300_sndi2s_dev = {
.name = "db1300-i2s",
};
/**********************************************************************/
static int db1300fb_panel_index(void)
{
return 9; /* DB1300_800x480 */
}
static int db1300fb_panel_init(void)
{
/* Apply power (Vee/Vdd logic is inverted on Panel DB1300_800x480) */
bcsr_mod(BCSR_BOARD, BCSR_BOARD_LCDVEE | BCSR_BOARD_LCDVDD,
BCSR_BOARD_LCDBL);
return 0;
}
static int db1300fb_panel_shutdown(void)
{
/* Remove power (Vee/Vdd logic is inverted on Panel DB1300_800x480) */
bcsr_mod(BCSR_BOARD, BCSR_BOARD_LCDBL,
BCSR_BOARD_LCDVEE | BCSR_BOARD_LCDVDD);
return 0;
}
static struct au1200fb_platdata db1300fb_pd = {
.panel_index = db1300fb_panel_index,
.panel_init = db1300fb_panel_init,
.panel_shutdown = db1300fb_panel_shutdown,
};
static struct resource au1300_lcd_res[] = {
[0] = {
.start = AU1200_LCD_PHYS_ADDR,
.end = AU1200_LCD_PHYS_ADDR + 0x800 - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = AU1300_LCD_INT,
.end = AU1300_LCD_INT,
.flags = IORESOURCE_IRQ,
}
};
static u64 au1300_lcd_dmamask = DMA_BIT_MASK(32);
static struct platform_device db1300_lcd_dev = {
.name = "au1200-lcd",
.id = 0,
.dev = {
.dma_mask = &au1300_lcd_dmamask,
.coherent_dma_mask = DMA_BIT_MASK(32),
.platform_data = &db1300fb_pd,
},
.num_resources = ARRAY_SIZE(au1300_lcd_res),
.resource = au1300_lcd_res,
};
/**********************************************************************/
static struct platform_device *db1300_dev[] __initdata = {
&db1300_eth_dev,
&db1300_i2c_dev,
&db1300_5waysw_dev,
&db1300_nand_dev,
&db1300_ide_dev,
&db1300_sd0_dev,
&db1300_sd1_dev,
&db1300_lcd_dev,
&db1300_ac97_dev,
&db1300_i2s_dev,
&db1300_wm9715_dev,
&db1300_ac97dma_dev,
&db1300_i2sdma_dev,
&db1300_sndac97_dev,
&db1300_sndi2s_dev,
};
static int __init db1300_device_init(void)
{
int swapped, cpldirq;
/* setup CPLD IRQ muxer */
cpldirq = au1300_gpio_to_irq(AU1300_PIN_EXTCLK1);
irq_set_irq_type(cpldirq, IRQ_TYPE_LEVEL_HIGH);
bcsr_init_irq(DB1300_FIRST_INT, DB1300_LAST_INT, cpldirq);
/* insert/eject IRQs: one always triggers so don't enable them
* when doing request_irq() on them. DB1200 has this bug too.
*/
irq_set_status_flags(DB1300_SD1_INSERT_INT, IRQ_NOAUTOEN);
irq_set_status_flags(DB1300_SD1_EJECT_INT, IRQ_NOAUTOEN);
irq_set_status_flags(DB1300_CF_INSERT_INT, IRQ_NOAUTOEN);
irq_set_status_flags(DB1300_CF_EJECT_INT, IRQ_NOAUTOEN);
/*
* setup board
*/
prom_get_ethernet_addr(&db1300_eth_config.mac[0]);
i2c_register_board_info(0, db1300_i2c_devs,
ARRAY_SIZE(db1300_i2c_devs));
/* Audio PSC clock is supplied by codecs (PSC1, 2) */
__raw_writel(PSC_SEL_CLK_SERCLK,
(void __iomem *)KSEG1ADDR(AU1300_PSC1_PHYS_ADDR) + PSC_SEL_OFFSET);
wmb();
__raw_writel(PSC_SEL_CLK_SERCLK,
(void __iomem *)KSEG1ADDR(AU1300_PSC2_PHYS_ADDR) + PSC_SEL_OFFSET);
wmb();
/* I2C uses internal 48MHz EXTCLK1 */
__raw_writel(PSC_SEL_CLK_INTCLK,
(void __iomem *)KSEG1ADDR(AU1300_PSC3_PHYS_ADDR) + PSC_SEL_OFFSET);
wmb();
/* enable power to USB ports */
bcsr_mod(BCSR_RESETS, 0, BCSR_RESETS_USBHPWR | BCSR_RESETS_OTGPWR);
/* although it is socket #0, it uses the CPLD bits which previous boards
* have used for socket #1.
*/
db1x_register_pcmcia_socket(
AU1000_PCMCIA_ATTR_PHYS_ADDR,
AU1000_PCMCIA_ATTR_PHYS_ADDR + 0x00400000 - 1,
AU1000_PCMCIA_MEM_PHYS_ADDR,
AU1000_PCMCIA_MEM_PHYS_ADDR + 0x00400000 - 1,
AU1000_PCMCIA_IO_PHYS_ADDR,
AU1000_PCMCIA_IO_PHYS_ADDR + 0x00010000 - 1,
DB1300_CF_INT, DB1300_CF_INSERT_INT, 0, DB1300_CF_EJECT_INT, 1);
swapped = bcsr_read(BCSR_STATUS) & BCSR_STATUS_DB1200_SWAPBOOT;
db1x_register_norflash(64 << 20, 2, swapped);
return platform_add_devices(db1300_dev, ARRAY_SIZE(db1300_dev));
}
device_initcall(db1300_device_init);
void __init board_setup(void)
{
unsigned short whoami;
db1300_gpio_config();
bcsr_init(DB1300_BCSR_PHYS_ADDR,
DB1300_BCSR_PHYS_ADDR + DB1300_BCSR_HEXLED_OFS);
whoami = bcsr_read(BCSR_WHOAMI);
printk(KERN_INFO "NetLogic DBAu1300 Development Platform.\n\t"
"BoardID %d CPLD Rev %d DaughtercardID %d\n",
BCSR_WHOAMI_BOARD(whoami), BCSR_WHOAMI_CPLD(whoami),
BCSR_WHOAMI_DCID(whoami));
/* enable UARTs, YAMON only enables #2 */
alchemy_uart_enable(AU1300_UART0_PHYS_ADDR);
alchemy_uart_enable(AU1300_UART1_PHYS_ADDR);
alchemy_uart_enable(AU1300_UART3_PHYS_ADDR);
}
| gpl-2.0 |
DeqingSun/Glass_kernel | arch/blackfin/mach-bf537/boards/minotaur.c | 4504 | 13580 | /*
* Copyright 2004-2009 Analog Devices Inc.
* 2008-2009 Cambridge Signal Processing
* 2005 National ICT Australia (NICTA)
* Aidan Williams <aidan@nicta.com.au>
*
* Licensed under the GPL-2 or later.
*/
#include <linux/device.h>
#include <linux/platform_device.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
#include <linux/spi/spi.h>
#include <linux/spi/flash.h>
#if defined(CONFIG_USB_ISP1362_HCD) || defined(CONFIG_USB_ISP1362_HCD_MODULE)
#include <linux/usb/isp1362.h>
#endif
#include <linux/ata_platform.h>
#include <linux/irq.h>
#include <linux/interrupt.h>
#include <linux/usb/sl811.h>
#include <asm/dma.h>
#include <asm/bfin5xx_spi.h>
#include <asm/reboot.h>
#include <asm/portmux.h>
#include <linux/spi/ad7877.h>
/*
* Name the Board for the /proc/cpuinfo
*/
const char bfin_board_name[] = "CamSig Minotaur BF537";
#if defined(CONFIG_BFIN_CFPCMCIA) || defined(CONFIG_BFIN_CFPCMCIA_MODULE)
static struct resource bfin_pcmcia_cf_resources[] = {
{
.start = 0x20310000, /* IO PORT */
.end = 0x20312000,
.flags = IORESOURCE_MEM,
}, {
.start = 0x20311000, /* Attribute Memory */
.end = 0x20311FFF,
.flags = IORESOURCE_MEM,
}, {
.start = IRQ_PF4,
.end = IRQ_PF4,
.flags = IORESOURCE_IRQ | IORESOURCE_IRQ_LOWLEVEL,
}, {
.start = IRQ_PF6, /* Card Detect PF6 */
.end = IRQ_PF6,
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device bfin_pcmcia_cf_device = {
.name = "bfin_cf_pcmcia",
.id = -1,
.num_resources = ARRAY_SIZE(bfin_pcmcia_cf_resources),
.resource = bfin_pcmcia_cf_resources,
};
#endif
#if defined(CONFIG_RTC_DRV_BFIN) || defined(CONFIG_RTC_DRV_BFIN_MODULE)
static struct platform_device rtc_device = {
.name = "rtc-bfin",
.id = -1,
};
#endif
#if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE)
#include <linux/bfin_mac.h>
static const unsigned short bfin_mac_peripherals[] = P_MII0;
static struct bfin_phydev_platform_data bfin_phydev_data[] = {
{
.addr = 1,
.irq = IRQ_MAC_PHYINT,
},
};
static struct bfin_mii_bus_platform_data bfin_mii_bus_data = {
.phydev_number = 1,
.phydev_data = bfin_phydev_data,
.phy_mode = PHY_INTERFACE_MODE_MII,
.mac_peripherals = bfin_mac_peripherals,
};
static struct platform_device bfin_mii_bus = {
.name = "bfin_mii_bus",
.dev = {
.platform_data = &bfin_mii_bus_data,
}
};
static struct platform_device bfin_mac_device = {
.name = "bfin_mac",
.dev = {
.platform_data = &bfin_mii_bus,
}
};
#endif
#if defined(CONFIG_USB_NET2272) || defined(CONFIG_USB_NET2272_MODULE)
static struct resource net2272_bfin_resources[] = {
{
.start = 0x20300000,
.end = 0x20300000 + 0x100,
.flags = IORESOURCE_MEM,
}, {
.start = IRQ_PF7,
.end = IRQ_PF7,
.flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL,
},
};
static struct platform_device net2272_bfin_device = {
.name = "net2272",
.id = -1,
.num_resources = ARRAY_SIZE(net2272_bfin_resources),
.resource = net2272_bfin_resources,
};
#endif
#if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE)
/* all SPI peripherals info goes here */
#if defined(CONFIG_MTD_M25P80) \
|| defined(CONFIG_MTD_M25P80_MODULE)
/* Partition sizes */
#define FLASH_SIZE 0x00400000
#define PSIZE_UBOOT 0x00030000
#define PSIZE_INITRAMFS 0x00240000
static struct mtd_partition bfin_spi_flash_partitions[] = {
{
.name = "bootloader(spi)",
.size = PSIZE_UBOOT,
.offset = 0x000000,
.mask_flags = MTD_CAP_ROM
}, {
.name = "initramfs(spi)",
.size = PSIZE_INITRAMFS,
.offset = PSIZE_UBOOT
}, {
.name = "opt(spi)",
.size = FLASH_SIZE - (PSIZE_UBOOT + PSIZE_INITRAMFS),
.offset = PSIZE_UBOOT + PSIZE_INITRAMFS,
}
};
static struct flash_platform_data bfin_spi_flash_data = {
.name = "m25p80",
.parts = bfin_spi_flash_partitions,
.nr_parts = ARRAY_SIZE(bfin_spi_flash_partitions),
.type = "m25p64",
};
/* SPI flash chip (m25p64) */
static struct bfin5xx_spi_chip spi_flash_chip_info = {
.enable_dma = 0, /* use dma transfer with this chip*/
};
#endif
#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE)
static struct bfin5xx_spi_chip mmc_spi_chip_info = {
.enable_dma = 0,
};
#endif
static struct spi_board_info bfin_spi_board_info[] __initdata = {
#if defined(CONFIG_MTD_M25P80) \
|| defined(CONFIG_MTD_M25P80_MODULE)
{
/* the modalias must be the same as spi device driver name */
.modalias = "m25p80", /* Name of spi_driver for this device */
.max_speed_hz = 25000000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0, /* Framework bus number */
.chip_select = 1, /* Framework chip select. On STAMP537 it is SPISSEL1*/
.platform_data = &bfin_spi_flash_data,
.controller_data = &spi_flash_chip_info,
.mode = SPI_MODE_3,
},
#endif
#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE)
{
.modalias = "mmc_spi",
.max_speed_hz = 5000000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0,
.chip_select = 5,
.controller_data = &mmc_spi_chip_info,
.mode = SPI_MODE_3,
},
#endif
};
/* SPI controller data */
static struct bfin5xx_spi_master bfin_spi0_info = {
.num_chipselect = 8,
.enable_dma = 1, /* master has the ability to do dma transfer */
};
/* SPI (0) */
static struct resource bfin_spi0_resource[] = {
[0] = {
.start = SPI0_REGBASE,
.end = SPI0_REGBASE + 0xFF,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = CH_SPI,
.end = CH_SPI,
.flags = IORESOURCE_DMA,
},
[2] = {
.start = IRQ_SPI,
.end = IRQ_SPI,
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device bfin_spi0_device = {
.name = "bfin-spi",
.id = 0, /* Bus number */
.num_resources = ARRAY_SIZE(bfin_spi0_resource),
.resource = bfin_spi0_resource,
.dev = {
.platform_data = &bfin_spi0_info, /* Passed to driver */
},
};
#endif /* spi master and devices */
#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE)
#ifdef CONFIG_SERIAL_BFIN_UART0
static struct resource bfin_uart0_resources[] = {
{
.start = UART0_THR,
.end = UART0_GCTL+2,
.flags = IORESOURCE_MEM,
},
{
.start = IRQ_UART0_TX,
.end = IRQ_UART0_TX,
.flags = IORESOURCE_IRQ,
},
{
.start = IRQ_UART0_RX,
.end = IRQ_UART0_RX,
.flags = IORESOURCE_IRQ,
},
{
.start = IRQ_UART0_ERROR,
.end = IRQ_UART0_ERROR,
.flags = IORESOURCE_IRQ,
},
{
.start = CH_UART0_TX,
.end = CH_UART0_TX,
.flags = IORESOURCE_DMA,
},
{
.start = CH_UART0_RX,
.end = CH_UART0_RX,
.flags = IORESOURCE_DMA,
},
};
static unsigned short bfin_uart0_peripherals[] = {
P_UART0_TX, P_UART0_RX, 0
};
static struct platform_device bfin_uart0_device = {
.name = "bfin-uart",
.id = 0,
.num_resources = ARRAY_SIZE(bfin_uart0_resources),
.resource = bfin_uart0_resources,
.dev = {
.platform_data = &bfin_uart0_peripherals, /* Passed to driver */
},
};
#endif
#ifdef CONFIG_SERIAL_BFIN_UART1
static struct resource bfin_uart1_resources[] = {
{
.start = UART1_THR,
.end = UART1_GCTL+2,
.flags = IORESOURCE_MEM,
},
{
.start = IRQ_UART1_TX,
.end = IRQ_UART1_TX,
.flags = IORESOURCE_IRQ,
},
{
.start = IRQ_UART1_RX,
.end = IRQ_UART1_RX,
.flags = IORESOURCE_IRQ,
},
{
.start = IRQ_UART1_ERROR,
.end = IRQ_UART1_ERROR,
.flags = IORESOURCE_IRQ,
},
{
.start = CH_UART1_TX,
.end = CH_UART1_TX,
.flags = IORESOURCE_DMA,
},
{
.start = CH_UART1_RX,
.end = CH_UART1_RX,
.flags = IORESOURCE_DMA,
},
};
static unsigned short bfin_uart1_peripherals[] = {
P_UART1_TX, P_UART1_RX, 0
};
static struct platform_device bfin_uart1_device = {
.name = "bfin-uart",
.id = 1,
.num_resources = ARRAY_SIZE(bfin_uart1_resources),
.resource = bfin_uart1_resources,
.dev = {
.platform_data = &bfin_uart1_peripherals, /* Passed to driver */
},
};
#endif
#endif
#if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE)
#ifdef CONFIG_BFIN_SIR0
static struct resource bfin_sir0_resources[] = {
{
.start = 0xFFC00400,
.end = 0xFFC004FF,
.flags = IORESOURCE_MEM,
},
{
.start = IRQ_UART0_RX,
.end = IRQ_UART0_RX+1,
.flags = IORESOURCE_IRQ,
},
{
.start = CH_UART0_RX,
.end = CH_UART0_RX+1,
.flags = IORESOURCE_DMA,
},
};
static struct platform_device bfin_sir0_device = {
.name = "bfin_sir",
.id = 0,
.num_resources = ARRAY_SIZE(bfin_sir0_resources),
.resource = bfin_sir0_resources,
};
#endif
#ifdef CONFIG_BFIN_SIR1
static struct resource bfin_sir1_resources[] = {
{
.start = 0xFFC02000,
.end = 0xFFC020FF,
.flags = IORESOURCE_MEM,
},
{
.start = IRQ_UART1_RX,
.end = IRQ_UART1_RX+1,
.flags = IORESOURCE_IRQ,
},
{
.start = CH_UART1_RX,
.end = CH_UART1_RX+1,
.flags = IORESOURCE_DMA,
},
};
static struct platform_device bfin_sir1_device = {
.name = "bfin_sir",
.id = 1,
.num_resources = ARRAY_SIZE(bfin_sir1_resources),
.resource = bfin_sir1_resources,
};
#endif
#endif
#if defined(CONFIG_I2C_BLACKFIN_TWI) || defined(CONFIG_I2C_BLACKFIN_TWI_MODULE)
static struct resource bfin_twi0_resource[] = {
[0] = {
.start = TWI0_REGBASE,
.end = TWI0_REGBASE + 0xFF,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = IRQ_TWI,
.end = IRQ_TWI,
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device i2c_bfin_twi_device = {
.name = "i2c-bfin-twi",
.id = 0,
.num_resources = ARRAY_SIZE(bfin_twi0_resource),
.resource = bfin_twi0_resource,
};
#endif
#if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE)
#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART
static struct resource bfin_sport0_uart_resources[] = {
{
.start = SPORT0_TCR1,
.end = SPORT0_MRCS3+4,
.flags = IORESOURCE_MEM,
},
{
.start = IRQ_SPORT0_RX,
.end = IRQ_SPORT0_RX+1,
.flags = IORESOURCE_IRQ,
},
{
.start = IRQ_SPORT0_ERROR,
.end = IRQ_SPORT0_ERROR,
.flags = IORESOURCE_IRQ,
},
};
static unsigned short bfin_sport0_peripherals[] = {
P_SPORT0_TFS, P_SPORT0_DTPRI, P_SPORT0_TSCLK, P_SPORT0_RFS,
P_SPORT0_DRPRI, P_SPORT0_RSCLK, 0
};
static struct platform_device bfin_sport0_uart_device = {
.name = "bfin-sport-uart",
.id = 0,
.num_resources = ARRAY_SIZE(bfin_sport0_uart_resources),
.resource = bfin_sport0_uart_resources,
.dev = {
.platform_data = &bfin_sport0_peripherals, /* Passed to driver */
},
};
#endif
#ifdef CONFIG_SERIAL_BFIN_SPORT1_UART
static struct resource bfin_sport1_uart_resources[] = {
{
.start = SPORT1_TCR1,
.end = SPORT1_MRCS3+4,
.flags = IORESOURCE_MEM,
},
{
.start = IRQ_SPORT1_RX,
.end = IRQ_SPORT1_RX+1,
.flags = IORESOURCE_IRQ,
},
{
.start = IRQ_SPORT1_ERROR,
.end = IRQ_SPORT1_ERROR,
.flags = IORESOURCE_IRQ,
},
};
static unsigned short bfin_sport1_peripherals[] = {
P_SPORT1_TFS, P_SPORT1_DTPRI, P_SPORT1_TSCLK, P_SPORT1_RFS,
P_SPORT1_DRPRI, P_SPORT1_RSCLK, 0
};
static struct platform_device bfin_sport1_uart_device = {
.name = "bfin-sport-uart",
.id = 1,
.num_resources = ARRAY_SIZE(bfin_sport1_uart_resources),
.resource = bfin_sport1_uart_resources,
.dev = {
.platform_data = &bfin_sport1_peripherals, /* Passed to driver */
},
};
#endif
#endif
static struct platform_device *minotaur_devices[] __initdata = {
#if defined(CONFIG_BFIN_CFPCMCIA) || defined(CONFIG_BFIN_CFPCMCIA_MODULE)
&bfin_pcmcia_cf_device,
#endif
#if defined(CONFIG_RTC_DRV_BFIN) || defined(CONFIG_RTC_DRV_BFIN_MODULE)
&rtc_device,
#endif
#if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE)
&bfin_mii_bus,
&bfin_mac_device,
#endif
#if defined(CONFIG_USB_NET2272) || defined(CONFIG_USB_NET2272_MODULE)
&net2272_bfin_device,
#endif
#if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE)
&bfin_spi0_device,
#endif
#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE)
#ifdef CONFIG_SERIAL_BFIN_UART0
&bfin_uart0_device,
#endif
#ifdef CONFIG_SERIAL_BFIN_UART1
&bfin_uart1_device,
#endif
#endif
#if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE)
#ifdef CONFIG_BFIN_SIR0
&bfin_sir0_device,
#endif
#ifdef CONFIG_BFIN_SIR1
&bfin_sir1_device,
#endif
#endif
#if defined(CONFIG_I2C_BLACKFIN_TWI) || defined(CONFIG_I2C_BLACKFIN_TWI_MODULE)
&i2c_bfin_twi_device,
#endif
#if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE)
#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART
&bfin_sport0_uart_device,
#endif
#ifdef CONFIG_SERIAL_BFIN_SPORT1_UART
&bfin_sport1_uart_device,
#endif
#endif
};
static int __init minotaur_init(void)
{
printk(KERN_INFO "%s(): registering device resources\n", __func__);
platform_add_devices(minotaur_devices, ARRAY_SIZE(minotaur_devices));
#if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE)
spi_register_board_info(bfin_spi_board_info,
ARRAY_SIZE(bfin_spi_board_info));
#endif
return 0;
}
arch_initcall(minotaur_init);
static struct platform_device *minotaur_early_devices[] __initdata = {
#if defined(CONFIG_SERIAL_BFIN_CONSOLE) || defined(CONFIG_EARLY_PRINTK)
#ifdef CONFIG_SERIAL_BFIN_UART0
&bfin_uart0_device,
#endif
#ifdef CONFIG_SERIAL_BFIN_UART1
&bfin_uart1_device,
#endif
#endif
#if defined(CONFIG_SERIAL_BFIN_SPORT_CONSOLE)
#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART
&bfin_sport0_uart_device,
#endif
#ifdef CONFIG_SERIAL_BFIN_SPORT1_UART
&bfin_sport1_uart_device,
#endif
#endif
};
void __init native_machine_early_platform_add_devices(void)
{
printk(KERN_INFO "register early platform devices\n");
early_platform_add_devices(minotaur_early_devices,
ARRAY_SIZE(minotaur_early_devices));
}
void native_machine_restart(char *cmd)
{
/* workaround reboot hang when booting from SPI */
if ((bfin_read_SYSCR() & 0x7) == 0x3)
bfin_reset_boot_spi_cs(P_DEFAULT_BOOT_SPI_CS);
}
| gpl-2.0 |
STR4NG3R/android_kernel_motorola_msm8226 | fs/ecryptfs/read_write.c | 5016 | 8866 | /**
* eCryptfs: Linux filesystem encryption layer
*
* Copyright (C) 2007 International Business Machines Corp.
* Author(s): Michael A. Halcrow <mahalcro@us.ibm.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 2 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
* 02111-1307, USA.
*/
#include <linux/fs.h>
#include <linux/pagemap.h>
#include "ecryptfs_kernel.h"
/**
* ecryptfs_write_lower
* @ecryptfs_inode: The eCryptfs inode
* @data: Data to write
* @offset: Byte offset in the lower file to which to write the data
* @size: Number of bytes from @data to write at @offset in the lower
* file
*
* Write data to the lower file.
*
* Returns bytes written on success; less than zero on error
*/
int ecryptfs_write_lower(struct inode *ecryptfs_inode, char *data,
loff_t offset, size_t size)
{
struct file *lower_file;
mm_segment_t fs_save;
ssize_t rc;
lower_file = ecryptfs_inode_to_private(ecryptfs_inode)->lower_file;
if (!lower_file)
return -EIO;
fs_save = get_fs();
set_fs(get_ds());
rc = vfs_write(lower_file, data, size, &offset);
set_fs(fs_save);
mark_inode_dirty_sync(ecryptfs_inode);
return rc;
}
/**
* ecryptfs_write_lower_page_segment
* @ecryptfs_inode: The eCryptfs inode
* @page_for_lower: The page containing the data to be written to the
* lower file
* @offset_in_page: The offset in the @page_for_lower from which to
* start writing the data
* @size: The amount of data from @page_for_lower to write to the
* lower file
*
* Determines the byte offset in the file for the given page and
* offset within the page, maps the page, and makes the call to write
* the contents of @page_for_lower to the lower inode.
*
* Returns zero on success; non-zero otherwise
*/
int ecryptfs_write_lower_page_segment(struct inode *ecryptfs_inode,
struct page *page_for_lower,
size_t offset_in_page, size_t size)
{
char *virt;
loff_t offset;
int rc;
offset = ((((loff_t)page_for_lower->index) << PAGE_CACHE_SHIFT)
+ offset_in_page);
virt = kmap(page_for_lower);
rc = ecryptfs_write_lower(ecryptfs_inode, virt, offset, size);
if (rc > 0)
rc = 0;
kunmap(page_for_lower);
return rc;
}
/**
* ecryptfs_write
* @ecryptfs_inode: The eCryptfs file into which to write
* @data: Virtual address where data to write is located
* @offset: Offset in the eCryptfs file at which to begin writing the
* data from @data
* @size: The number of bytes to write from @data
*
* Write an arbitrary amount of data to an arbitrary location in the
* eCryptfs inode page cache. This is done on a page-by-page, and then
* by an extent-by-extent, basis; individual extents are encrypted and
* written to the lower page cache (via VFS writes). This function
* takes care of all the address translation to locations in the lower
* filesystem; it also handles truncate events, writing out zeros
* where necessary.
*
* Returns zero on success; non-zero otherwise
*/
int ecryptfs_write(struct inode *ecryptfs_inode, char *data, loff_t offset,
size_t size)
{
struct page *ecryptfs_page;
struct ecryptfs_crypt_stat *crypt_stat;
char *ecryptfs_page_virt;
loff_t ecryptfs_file_size = i_size_read(ecryptfs_inode);
loff_t data_offset = 0;
loff_t pos;
int rc = 0;
crypt_stat = &ecryptfs_inode_to_private(ecryptfs_inode)->crypt_stat;
/*
* if we are writing beyond current size, then start pos
* at the current size - we'll fill in zeros from there.
*/
if (offset > ecryptfs_file_size)
pos = ecryptfs_file_size;
else
pos = offset;
while (pos < (offset + size)) {
pgoff_t ecryptfs_page_idx = (pos >> PAGE_CACHE_SHIFT);
size_t start_offset_in_page = (pos & ~PAGE_CACHE_MASK);
size_t num_bytes = (PAGE_CACHE_SIZE - start_offset_in_page);
loff_t total_remaining_bytes = ((offset + size) - pos);
if (fatal_signal_pending(current)) {
rc = -EINTR;
break;
}
if (num_bytes > total_remaining_bytes)
num_bytes = total_remaining_bytes;
if (pos < offset) {
/* remaining zeros to write, up to destination offset */
loff_t total_remaining_zeros = (offset - pos);
if (num_bytes > total_remaining_zeros)
num_bytes = total_remaining_zeros;
}
ecryptfs_page = ecryptfs_get_locked_page(ecryptfs_inode,
ecryptfs_page_idx);
if (IS_ERR(ecryptfs_page)) {
rc = PTR_ERR(ecryptfs_page);
printk(KERN_ERR "%s: Error getting page at "
"index [%ld] from eCryptfs inode "
"mapping; rc = [%d]\n", __func__,
ecryptfs_page_idx, rc);
goto out;
}
ecryptfs_page_virt = kmap_atomic(ecryptfs_page);
/*
* pos: where we're now writing, offset: where the request was
* If current pos is before request, we are filling zeros
* If we are at or beyond request, we are writing the *data*
* If we're in a fresh page beyond eof, zero it in either case
*/
if (pos < offset || !start_offset_in_page) {
/* We are extending past the previous end of the file.
* Fill in zero values to the end of the page */
memset(((char *)ecryptfs_page_virt
+ start_offset_in_page), 0,
PAGE_CACHE_SIZE - start_offset_in_page);
}
/* pos >= offset, we are now writing the data request */
if (pos >= offset) {
memcpy(((char *)ecryptfs_page_virt
+ start_offset_in_page),
(data + data_offset), num_bytes);
data_offset += num_bytes;
}
kunmap_atomic(ecryptfs_page_virt);
flush_dcache_page(ecryptfs_page);
SetPageUptodate(ecryptfs_page);
unlock_page(ecryptfs_page);
if (crypt_stat->flags & ECRYPTFS_ENCRYPTED)
rc = ecryptfs_encrypt_page(ecryptfs_page);
else
rc = ecryptfs_write_lower_page_segment(ecryptfs_inode,
ecryptfs_page,
start_offset_in_page,
data_offset);
page_cache_release(ecryptfs_page);
if (rc) {
printk(KERN_ERR "%s: Error encrypting "
"page; rc = [%d]\n", __func__, rc);
goto out;
}
pos += num_bytes;
}
if (pos > ecryptfs_file_size) {
i_size_write(ecryptfs_inode, pos);
if (crypt_stat->flags & ECRYPTFS_ENCRYPTED) {
int rc2;
rc2 = ecryptfs_write_inode_size_to_metadata(
ecryptfs_inode);
if (rc2) {
printk(KERN_ERR "Problem with "
"ecryptfs_write_inode_size_to_metadata; "
"rc = [%d]\n", rc2);
if (!rc)
rc = rc2;
goto out;
}
}
}
out:
return rc;
}
/**
* ecryptfs_read_lower
* @data: The read data is stored here by this function
* @offset: Byte offset in the lower file from which to read the data
* @size: Number of bytes to read from @offset of the lower file and
* store into @data
* @ecryptfs_inode: The eCryptfs inode
*
* Read @size bytes of data at byte offset @offset from the lower
* inode into memory location @data.
*
* Returns bytes read on success; 0 on EOF; less than zero on error
*/
int ecryptfs_read_lower(char *data, loff_t offset, size_t size,
struct inode *ecryptfs_inode)
{
struct file *lower_file;
mm_segment_t fs_save;
ssize_t rc;
lower_file = ecryptfs_inode_to_private(ecryptfs_inode)->lower_file;
if (!lower_file)
return -EIO;
fs_save = get_fs();
set_fs(get_ds());
rc = vfs_read(lower_file, data, size, &offset);
set_fs(fs_save);
return rc;
}
/**
* ecryptfs_read_lower_page_segment
* @page_for_ecryptfs: The page into which data for eCryptfs will be
* written
* @offset_in_page: Offset in @page_for_ecryptfs from which to start
* writing
* @size: The number of bytes to write into @page_for_ecryptfs
* @ecryptfs_inode: The eCryptfs inode
*
* Determines the byte offset in the file for the given page and
* offset within the page, maps the page, and makes the call to read
* the contents of @page_for_ecryptfs from the lower inode.
*
* Returns zero on success; non-zero otherwise
*/
int ecryptfs_read_lower_page_segment(struct page *page_for_ecryptfs,
pgoff_t page_index,
size_t offset_in_page, size_t size,
struct inode *ecryptfs_inode)
{
char *virt;
loff_t offset;
int rc;
offset = ((((loff_t)page_index) << PAGE_CACHE_SHIFT) + offset_in_page);
virt = kmap(page_for_ecryptfs);
rc = ecryptfs_read_lower(virt, offset, size, ecryptfs_inode);
if (rc > 0)
rc = 0;
kunmap(page_for_ecryptfs);
flush_dcache_page(page_for_ecryptfs);
return rc;
}
| gpl-2.0 |
Pafcholini/linux-3.10.y | drivers/net/ethernet/dec/tulip/pnic2.c | 9624 | 12465 | /*
drivers/net/ethernet/dec/tulip/pnic2.c
Copyright 2000,2001 The Linux Kernel Team
Written/copyright 1994-2001 by Donald Becker.
Modified to hep support PNIC_II by Kevin B. Hendricks
This software may be used and distributed according to the terms
of the GNU General Public License, incorporated herein by reference.
Please submit bugs to http://bugzilla.kernel.org/ .
*/
/* Understanding the PNIC_II - everything is this file is based
* on the PNIC_II_PDF datasheet which is sorely lacking in detail
*
* As I understand things, here are the registers and bits that
* explain the masks and constants used in this file that are
* either different from the 21142/3 or important for basic operation.
*
*
* CSR 6 (mask = 0xfe3bd1fd of bits not to change)
* -----
* Bit 24 - SCR
* Bit 23 - PCS
* Bit 22 - TTM (Trasmit Threshold Mode)
* Bit 18 - Port Select
* Bit 13 - Start - 1, Stop - 0 Transmissions
* Bit 11:10 - Loop Back Operation Mode
* Bit 9 - Full Duplex mode (Advertise 10BaseT-FD is CSR14<7> is set)
* Bit 1 - Start - 1, Stop - 0 Receive
*
*
* CSR 14 (mask = 0xfff0ee39 of bits not to change)
* ------
* Bit 19 - PAUSE-Pause
* Bit 18 - Advertise T4
* Bit 17 - Advertise 100baseTx-FD
* Bit 16 - Advertise 100baseTx-HD
* Bit 12 - LTE - Link Test Enable
* Bit 7 - ANE - Auto Negotiate Enable
* Bit 6 - HDE - Advertise 10baseT-HD
* Bit 2 - Reset to Power down - kept as 1 for normal operation
* Bit 1 - Loop Back enable for 10baseT MCC
*
*
* CSR 12
* ------
* Bit 25 - Partner can do T4
* Bit 24 - Partner can do 100baseTx-FD
* Bit 23 - Partner can do 100baseTx-HD
* Bit 22 - Partner can do 10baseT-FD
* Bit 21 - Partner can do 10baseT-HD
* Bit 15 - LPN is 1 if all above bits are valid other wise 0
* Bit 14:12 - autonegotiation state (write 001 to start autonegotiate)
* Bit 3 - Autopolarity state
* Bit 2 - LS10B - link state of 10baseT 0 - good, 1 - failed
* Bit 1 - LS100B - link state of 100baseT 0 - good, 1 - failed
*
*
* Data Port Selection Info
*-------------------------
*
* CSR14<7> CSR6<18> CSR6<22> CSR6<23> CSR6<24> MODE/PORT
* 1 0 0 (X) 0 (X) 1 NWAY
* 0 0 1 0 (X) 0 10baseT
* 0 1 0 1 1 (X) 100baseT
*
*
*/
#include "tulip.h"
#include <linux/delay.h>
void pnic2_timer(unsigned long data)
{
struct net_device *dev = (struct net_device *)data;
struct tulip_private *tp = netdev_priv(dev);
void __iomem *ioaddr = tp->base_addr;
int next_tick = 60*HZ;
if (tulip_debug > 3)
dev_info(&dev->dev, "PNIC2 negotiation status %08x\n",
ioread32(ioaddr + CSR12));
if (next_tick) {
mod_timer(&tp->timer, RUN_AT(next_tick));
}
}
void pnic2_start_nway(struct net_device *dev)
{
struct tulip_private *tp = netdev_priv(dev);
void __iomem *ioaddr = tp->base_addr;
int csr14;
int csr12;
/* set up what to advertise during the negotiation */
/* load in csr14 and mask off bits not to touch
* comment at top of file explains mask value
*/
csr14 = (ioread32(ioaddr + CSR14) & 0xfff0ee39);
/* bit 17 - advetise 100baseTx-FD */
if (tp->sym_advertise & 0x0100) csr14 |= 0x00020000;
/* bit 16 - advertise 100baseTx-HD */
if (tp->sym_advertise & 0x0080) csr14 |= 0x00010000;
/* bit 6 - advertise 10baseT-HD */
if (tp->sym_advertise & 0x0020) csr14 |= 0x00000040;
/* Now set bit 12 Link Test Enable, Bit 7 Autonegotiation Enable
* and bit 0 Don't PowerDown 10baseT
*/
csr14 |= 0x00001184;
if (tulip_debug > 1)
netdev_dbg(dev, "Restarting PNIC2 autonegotiation, csr14=%08x\n",
csr14);
/* tell pnic2_lnk_change we are doing an nway negotiation */
dev->if_port = 0;
tp->nway = tp->mediasense = 1;
tp->nwayset = tp->lpar = 0;
/* now we have to set up csr6 for NWAY state */
tp->csr6 = ioread32(ioaddr + CSR6);
if (tulip_debug > 1)
netdev_dbg(dev, "On Entry to Nway, csr6=%08x\n", tp->csr6);
/* mask off any bits not to touch
* comment at top of file explains mask value
*/
tp->csr6 = tp->csr6 & 0xfe3bd1fd;
/* don't forget that bit 9 is also used for advertising */
/* advertise 10baseT-FD for the negotiation (bit 9) */
if (tp->sym_advertise & 0x0040) tp->csr6 |= 0x00000200;
/* set bit 24 for nway negotiation mode ...
* see Data Port Selection comment at top of file
* and "Stop" - reset both Transmit (bit 13) and Receive (bit 1)
*/
tp->csr6 |= 0x01000000;
iowrite32(csr14, ioaddr + CSR14);
iowrite32(tp->csr6, ioaddr + CSR6);
udelay(100);
/* all set up so now force the negotiation to begin */
/* read in current values and mask off all but the
* Autonegotiation bits 14:12. Writing a 001 to those bits
* should start the autonegotiation
*/
csr12 = (ioread32(ioaddr + CSR12) & 0xffff8fff);
csr12 |= 0x1000;
iowrite32(csr12, ioaddr + CSR12);
}
void pnic2_lnk_change(struct net_device *dev, int csr5)
{
struct tulip_private *tp = netdev_priv(dev);
void __iomem *ioaddr = tp->base_addr;
int csr14;
/* read the staus register to find out what is up */
int csr12 = ioread32(ioaddr + CSR12);
if (tulip_debug > 1)
dev_info(&dev->dev,
"PNIC2 link status interrupt %08x, CSR5 %x, %08x\n",
csr12, csr5, ioread32(ioaddr + CSR14));
/* If NWay finished and we have a negotiated partner capability.
* check bits 14:12 for bit pattern 101 - all is good
*/
if (tp->nway && !tp->nwayset) {
/* we did an auto negotiation */
if ((csr12 & 0x7000) == 0x5000) {
/* negotiation ended successfully */
/* get the link partners reply and mask out all but
* bits 24-21 which show the partners capabilities
* and match those to what we advertised
*
* then begin to interpret the results of the negotiation.
* Always go in this order : (we are ignoring T4 for now)
* 100baseTx-FD, 100baseTx-HD, 10baseT-FD, 10baseT-HD
*/
int negotiated = ((csr12 >> 16) & 0x01E0) & tp->sym_advertise;
tp->lpar = (csr12 >> 16);
tp->nwayset = 1;
if (negotiated & 0x0100) dev->if_port = 5;
else if (negotiated & 0x0080) dev->if_port = 3;
else if (negotiated & 0x0040) dev->if_port = 4;
else if (negotiated & 0x0020) dev->if_port = 0;
else {
if (tulip_debug > 1)
dev_info(&dev->dev,
"funny autonegotiate result csr12 %08x advertising %04x\n",
csr12, tp->sym_advertise);
tp->nwayset = 0;
/* so check if 100baseTx link state is okay */
if ((csr12 & 2) == 0 && (tp->sym_advertise & 0x0180))
dev->if_port = 3;
}
/* now record the duplex that was negotiated */
tp->full_duplex = 0;
if ((dev->if_port == 4) || (dev->if_port == 5))
tp->full_duplex = 1;
if (tulip_debug > 1) {
if (tp->nwayset)
dev_info(&dev->dev,
"Switching to %s based on link negotiation %04x & %04x = %04x\n",
medianame[dev->if_port],
tp->sym_advertise, tp->lpar,
negotiated);
}
/* remember to turn off bit 7 - autonegotiate
* enable so we can properly end nway mode and
* set duplex (ie. use csr6<9> again)
*/
csr14 = (ioread32(ioaddr + CSR14) & 0xffffff7f);
iowrite32(csr14,ioaddr + CSR14);
/* now set the data port and operating mode
* (see the Data Port Selection comments at
* the top of the file
*/
/* get current csr6 and mask off bits not to touch */
/* see comment at top of file */
tp->csr6 = (ioread32(ioaddr + CSR6) & 0xfe3bd1fd);
/* so if using if_port 3 or 5 then select the 100baseT
* port else select the 10baseT port.
* See the Data Port Selection table at the top
* of the file which was taken from the PNIC_II.PDF
* datasheet
*/
if (dev->if_port & 1) tp->csr6 |= 0x01840000;
else tp->csr6 |= 0x00400000;
/* now set the full duplex bit appropriately */
if (tp->full_duplex) tp->csr6 |= 0x00000200;
iowrite32(1, ioaddr + CSR13);
if (tulip_debug > 2)
netdev_dbg(dev, "Setting CSR6 %08x/%x CSR12 %08x\n",
tp->csr6,
ioread32(ioaddr + CSR6),
ioread32(ioaddr + CSR12));
/* now the following actually writes out the
* new csr6 values
*/
tulip_start_rxtx(tp);
return;
} else {
dev_info(&dev->dev,
"Autonegotiation failed, using %s, link beat status %04x\n",
medianame[dev->if_port], csr12);
/* remember to turn off bit 7 - autonegotiate
* enable so we don't forget
*/
csr14 = (ioread32(ioaddr + CSR14) & 0xffffff7f);
iowrite32(csr14,ioaddr + CSR14);
/* what should we do when autonegotiate fails?
* should we try again or default to baseline
* case. I just don't know.
*
* for now default to some baseline case
*/
dev->if_port = 0;
tp->nway = 0;
tp->nwayset = 1;
/* set to 10baseTx-HD - see Data Port Selection
* comment given at the top of the file
*/
tp->csr6 = (ioread32(ioaddr + CSR6) & 0xfe3bd1fd);
tp->csr6 |= 0x00400000;
tulip_restart_rxtx(tp);
return;
}
}
if ((tp->nwayset && (csr5 & 0x08000000) &&
(dev->if_port == 3 || dev->if_port == 5) &&
(csr12 & 2) == 2) || (tp->nway && (csr5 & (TPLnkFail)))) {
/* Link blew? Maybe restart NWay. */
if (tulip_debug > 2)
netdev_dbg(dev, "Ugh! Link blew?\n");
del_timer_sync(&tp->timer);
pnic2_start_nway(dev);
tp->timer.expires = RUN_AT(3*HZ);
add_timer(&tp->timer);
return;
}
if (dev->if_port == 3 || dev->if_port == 5) {
/* we are at 100mb and a potential link change occurred */
if (tulip_debug > 1)
dev_info(&dev->dev, "PNIC2 %s link beat %s\n",
medianame[dev->if_port],
(csr12 & 2) ? "failed" : "good");
/* check 100 link beat */
tp->nway = 0;
tp->nwayset = 1;
/* if failed then try doing an nway to get in sync */
if ((csr12 & 2) && ! tp->medialock) {
del_timer_sync(&tp->timer);
pnic2_start_nway(dev);
tp->timer.expires = RUN_AT(3*HZ);
add_timer(&tp->timer);
}
return;
}
if (dev->if_port == 0 || dev->if_port == 4) {
/* we are at 10mb and a potential link change occurred */
if (tulip_debug > 1)
dev_info(&dev->dev, "PNIC2 %s link beat %s\n",
medianame[dev->if_port],
(csr12 & 4) ? "failed" : "good");
tp->nway = 0;
tp->nwayset = 1;
/* if failed, try doing an nway to get in sync */
if ((csr12 & 4) && ! tp->medialock) {
del_timer_sync(&tp->timer);
pnic2_start_nway(dev);
tp->timer.expires = RUN_AT(3*HZ);
add_timer(&tp->timer);
}
return;
}
if (tulip_debug > 1)
dev_info(&dev->dev, "PNIC2 Link Change Default?\n");
/* if all else fails default to trying 10baseT-HD */
dev->if_port = 0;
/* make sure autonegotiate enable is off */
csr14 = (ioread32(ioaddr + CSR14) & 0xffffff7f);
iowrite32(csr14,ioaddr + CSR14);
/* set to 10baseTx-HD - see Data Port Selection
* comment given at the top of the file
*/
tp->csr6 = (ioread32(ioaddr + CSR6) & 0xfe3bd1fd);
tp->csr6 |= 0x00400000;
tulip_restart_rxtx(tp);
}
| gpl-2.0 |
UNGLinux/UNGKernel | drivers/staging/lustre/lnet/lnet/config.c | 153 | 26135 | /*
* GPL HEADER START
*
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 only,
* as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License version 2 for more details (a copy is included
* in the LICENSE file that accompanied this code).
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
* http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
*
* Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
* CA 95054 USA or visit www.sun.com if you need additional information or
* have any questions.
*
* GPL HEADER END
*/
/*
* Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
*
* Copyright (c) 2012, Intel Corporation.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
* Lustre is a trademark of Sun Microsystems, Inc.
*/
#define DEBUG_SUBSYSTEM S_LNET
#include <linux/lnet/lib-lnet.h>
typedef struct { /* tmp struct for parsing routes */
struct list_head ltb_list; /* stash on lists */
int ltb_size; /* allocated size */
char ltb_text[0]; /* text buffer */
} lnet_text_buf_t;
static int lnet_tbnob = 0; /* track text buf allocation */
#define LNET_MAX_TEXTBUF_NOB (64<<10) /* bound allocation */
#define LNET_SINGLE_TEXTBUF_NOB (4<<10)
void
lnet_syntax(char *name, char *str, int offset, int width)
{
static char dots[LNET_SINGLE_TEXTBUF_NOB];
static char dashes[LNET_SINGLE_TEXTBUF_NOB];
memset(dots, '.', sizeof(dots));
dots[sizeof(dots)-1] = 0;
memset(dashes, '-', sizeof(dashes));
dashes[sizeof(dashes)-1] = 0;
LCONSOLE_ERROR_MSG(0x10f, "Error parsing '%s=\"%s\"'\n", name, str);
LCONSOLE_ERROR_MSG(0x110, "here...........%.*s..%.*s|%.*s|\n",
(int)strlen(name), dots, offset, dots,
(width < 1) ? 0 : width - 1, dashes);
}
int
lnet_issep (char c)
{
switch (c) {
case '\n':
case '\r':
case ';':
return 1;
default:
return 0;
}
}
int
lnet_net_unique(__u32 net, struct list_head *nilist)
{
struct list_head *tmp;
lnet_ni_t *ni;
list_for_each (tmp, nilist) {
ni = list_entry(tmp, lnet_ni_t, ni_list);
if (LNET_NIDNET(ni->ni_nid) == net)
return 0;
}
return 1;
}
void
lnet_ni_free(struct lnet_ni *ni)
{
if (ni->ni_refs != NULL)
cfs_percpt_free(ni->ni_refs);
if (ni->ni_tx_queues != NULL)
cfs_percpt_free(ni->ni_tx_queues);
if (ni->ni_cpts != NULL)
cfs_expr_list_values_free(ni->ni_cpts, ni->ni_ncpts);
LIBCFS_FREE(ni, sizeof(*ni));
}
lnet_ni_t *
lnet_ni_alloc(__u32 net, struct cfs_expr_list *el, struct list_head *nilist)
{
struct lnet_tx_queue *tq;
struct lnet_ni *ni;
int rc;
int i;
if (!lnet_net_unique(net, nilist)) {
LCONSOLE_ERROR_MSG(0x111, "Duplicate network specified: %s\n",
libcfs_net2str(net));
return NULL;
}
LIBCFS_ALLOC(ni, sizeof(*ni));
if (ni == NULL) {
CERROR("Out of memory creating network %s\n",
libcfs_net2str(net));
return NULL;
}
spin_lock_init(&ni->ni_lock);
INIT_LIST_HEAD(&ni->ni_cptlist);
ni->ni_refs = cfs_percpt_alloc(lnet_cpt_table(),
sizeof(*ni->ni_refs[0]));
if (ni->ni_refs == NULL)
goto failed;
ni->ni_tx_queues = cfs_percpt_alloc(lnet_cpt_table(),
sizeof(*ni->ni_tx_queues[0]));
if (ni->ni_tx_queues == NULL)
goto failed;
cfs_percpt_for_each(tq, i, ni->ni_tx_queues)
INIT_LIST_HEAD(&tq->tq_delayed);
if (el == NULL) {
ni->ni_cpts = NULL;
ni->ni_ncpts = LNET_CPT_NUMBER;
} else {
rc = cfs_expr_list_values(el, LNET_CPT_NUMBER, &ni->ni_cpts);
if (rc <= 0) {
CERROR("Failed to set CPTs for NI %s: %d\n",
libcfs_net2str(net), rc);
goto failed;
}
LASSERT(rc <= LNET_CPT_NUMBER);
if (rc == LNET_CPT_NUMBER) {
LIBCFS_FREE(ni->ni_cpts, rc * sizeof(ni->ni_cpts[0]));
ni->ni_cpts = NULL;
}
ni->ni_ncpts = rc;
}
/* LND will fill in the address part of the NID */
ni->ni_nid = LNET_MKNID(net, 0);
ni->ni_last_alive = cfs_time_current_sec();
list_add_tail(&ni->ni_list, nilist);
return ni;
failed:
lnet_ni_free(ni);
return NULL;
}
int
lnet_parse_networks(struct list_head *nilist, char *networks)
{
struct cfs_expr_list *el = NULL;
int tokensize = strlen(networks) + 1;
char *tokens;
char *str;
char *tmp;
struct lnet_ni *ni;
__u32 net;
int nnets = 0;
if (strlen(networks) > LNET_SINGLE_TEXTBUF_NOB) {
/* _WAY_ conservative */
LCONSOLE_ERROR_MSG(0x112, "Can't parse networks: string too "
"long\n");
return -EINVAL;
}
LIBCFS_ALLOC(tokens, tokensize);
if (tokens == NULL) {
CERROR("Can't allocate net tokens\n");
return -ENOMEM;
}
the_lnet.ln_network_tokens = tokens;
the_lnet.ln_network_tokens_nob = tokensize;
memcpy (tokens, networks, tokensize);
str = tmp = tokens;
/* Add in the loopback network */
ni = lnet_ni_alloc(LNET_MKNET(LOLND, 0), NULL, nilist);
if (ni == NULL)
goto failed;
while (str != NULL && *str != 0) {
char *comma = strchr(str, ',');
char *bracket = strchr(str, '(');
char *square = strchr(str, '[');
char *iface;
int niface;
int rc;
/* NB we don't check interface conflicts here; it's the LNDs
* responsibility (if it cares at all) */
if (square != NULL && (comma == NULL || square < comma)) {
/* i.e: o2ib0(ib0)[1,2], number between square
* brackets are CPTs this NI needs to be bond */
if (bracket != NULL && bracket > square) {
tmp = square;
goto failed_syntax;
}
tmp = strchr(square, ']');
if (tmp == NULL) {
tmp = square;
goto failed_syntax;
}
rc = cfs_expr_list_parse(square, tmp - square + 1,
0, LNET_CPT_NUMBER - 1, &el);
if (rc != 0) {
tmp = square;
goto failed_syntax;
}
while (square <= tmp)
*square++ = ' ';
}
if (bracket == NULL ||
(comma != NULL && comma < bracket)) {
/* no interface list specified */
if (comma != NULL)
*comma++ = 0;
net = libcfs_str2net(cfs_trimwhite(str));
if (net == LNET_NIDNET(LNET_NID_ANY)) {
LCONSOLE_ERROR_MSG(0x113, "Unrecognised network"
" type\n");
tmp = str;
goto failed_syntax;
}
if (LNET_NETTYP(net) != LOLND && /* LO is implicit */
lnet_ni_alloc(net, el, nilist) == NULL)
goto failed;
if (el != NULL) {
cfs_expr_list_free(el);
el = NULL;
}
str = comma;
continue;
}
*bracket = 0;
net = libcfs_str2net(cfs_trimwhite(str));
if (net == LNET_NIDNET(LNET_NID_ANY)) {
tmp = str;
goto failed_syntax;
}
nnets++;
ni = lnet_ni_alloc(net, el, nilist);
if (ni == NULL)
goto failed;
if (el != NULL) {
cfs_expr_list_free(el);
el = NULL;
}
niface = 0;
iface = bracket + 1;
bracket = strchr(iface, ')');
if (bracket == NULL) {
tmp = iface;
goto failed_syntax;
}
*bracket = 0;
do {
comma = strchr(iface, ',');
if (comma != NULL)
*comma++ = 0;
iface = cfs_trimwhite(iface);
if (*iface == 0) {
tmp = iface;
goto failed_syntax;
}
if (niface == LNET_MAX_INTERFACES) {
LCONSOLE_ERROR_MSG(0x115, "Too many interfaces "
"for net %s\n",
libcfs_net2str(net));
goto failed;
}
ni->ni_interfaces[niface++] = iface;
iface = comma;
} while (iface != NULL);
str = bracket + 1;
comma = strchr(bracket + 1, ',');
if (comma != NULL) {
*comma = 0;
str = cfs_trimwhite(str);
if (*str != 0) {
tmp = str;
goto failed_syntax;
}
str = comma + 1;
continue;
}
str = cfs_trimwhite(str);
if (*str != 0) {
tmp = str;
goto failed_syntax;
}
}
LASSERT(!list_empty(nilist));
return 0;
failed_syntax:
lnet_syntax("networks", networks, (int)(tmp - tokens), strlen(tmp));
failed:
while (!list_empty(nilist)) {
ni = list_entry(nilist->next, lnet_ni_t, ni_list);
list_del(&ni->ni_list);
lnet_ni_free(ni);
}
if (el != NULL)
cfs_expr_list_free(el);
LIBCFS_FREE(tokens, tokensize);
the_lnet.ln_network_tokens = NULL;
return -EINVAL;
}
lnet_text_buf_t *
lnet_new_text_buf (int str_len)
{
lnet_text_buf_t *ltb;
int nob;
/* NB allocate space for the terminating 0 */
nob = offsetof(lnet_text_buf_t, ltb_text[str_len + 1]);
if (nob > LNET_SINGLE_TEXTBUF_NOB) {
/* _way_ conservative for "route net gateway..." */
CERROR("text buffer too big\n");
return NULL;
}
if (lnet_tbnob + nob > LNET_MAX_TEXTBUF_NOB) {
CERROR("Too many text buffers\n");
return NULL;
}
LIBCFS_ALLOC(ltb, nob);
if (ltb == NULL)
return NULL;
ltb->ltb_size = nob;
ltb->ltb_text[0] = 0;
lnet_tbnob += nob;
return ltb;
}
void
lnet_free_text_buf (lnet_text_buf_t *ltb)
{
lnet_tbnob -= ltb->ltb_size;
LIBCFS_FREE(ltb, ltb->ltb_size);
}
void
lnet_free_text_bufs(struct list_head *tbs)
{
lnet_text_buf_t *ltb;
while (!list_empty(tbs)) {
ltb = list_entry(tbs->next, lnet_text_buf_t, ltb_list);
list_del(<b->ltb_list);
lnet_free_text_buf(ltb);
}
}
void
lnet_print_text_bufs(struct list_head *tbs)
{
struct list_head *tmp;
lnet_text_buf_t *ltb;
list_for_each (tmp, tbs) {
ltb = list_entry(tmp, lnet_text_buf_t, ltb_list);
CDEBUG(D_WARNING, "%s\n", ltb->ltb_text);
}
CDEBUG(D_WARNING, "%d allocated\n", lnet_tbnob);
}
int
lnet_str2tbs_sep (struct list_head *tbs, char *str)
{
struct list_head pending;
char *sep;
int nob;
int i;
lnet_text_buf_t *ltb;
INIT_LIST_HEAD(&pending);
/* Split 'str' into separate commands */
for (;;) {
/* skip leading whitespace */
while (cfs_iswhite(*str))
str++;
/* scan for separator or comment */
for (sep = str; *sep != 0; sep++)
if (lnet_issep(*sep) || *sep == '#')
break;
nob = (int)(sep - str);
if (nob > 0) {
ltb = lnet_new_text_buf(nob);
if (ltb == NULL) {
lnet_free_text_bufs(&pending);
return -1;
}
for (i = 0; i < nob; i++)
if (cfs_iswhite(str[i]))
ltb->ltb_text[i] = ' ';
else
ltb->ltb_text[i] = str[i];
ltb->ltb_text[nob] = 0;
list_add_tail(<b->ltb_list, &pending);
}
if (*sep == '#') {
/* scan for separator */
do {
sep++;
} while (*sep != 0 && !lnet_issep(*sep));
}
if (*sep == 0)
break;
str = sep + 1;
}
list_splice(&pending, tbs->prev);
return 0;
}
int
lnet_expand1tb (struct list_head *list,
char *str, char *sep1, char *sep2,
char *item, int itemlen)
{
int len1 = (int)(sep1 - str);
int len2 = strlen(sep2 + 1);
lnet_text_buf_t *ltb;
LASSERT (*sep1 == '[');
LASSERT (*sep2 == ']');
ltb = lnet_new_text_buf(len1 + itemlen + len2);
if (ltb == NULL)
return -ENOMEM;
memcpy(ltb->ltb_text, str, len1);
memcpy(<b->ltb_text[len1], item, itemlen);
memcpy(<b->ltb_text[len1+itemlen], sep2 + 1, len2);
ltb->ltb_text[len1 + itemlen + len2] = 0;
list_add_tail(<b->ltb_list, list);
return 0;
}
int
lnet_str2tbs_expand (struct list_head *tbs, char *str)
{
char num[16];
struct list_head pending;
char *sep;
char *sep2;
char *parsed;
char *enditem;
int lo;
int hi;
int stride;
int i;
int nob;
int scanned;
INIT_LIST_HEAD(&pending);
sep = strchr(str, '[');
if (sep == NULL) /* nothing to expand */
return 0;
sep2 = strchr(sep, ']');
if (sep2 == NULL)
goto failed;
for (parsed = sep; parsed < sep2; parsed = enditem) {
enditem = ++parsed;
while (enditem < sep2 && *enditem != ',')
enditem++;
if (enditem == parsed) /* no empty items */
goto failed;
if (sscanf(parsed, "%d-%d/%d%n", &lo, &hi, &stride, &scanned) < 3) {
if (sscanf(parsed, "%d-%d%n", &lo, &hi, &scanned) < 2) {
/* simple string enumeration */
if (lnet_expand1tb(&pending, str, sep, sep2,
parsed, (int)(enditem - parsed)) != 0)
goto failed;
continue;
}
stride = 1;
}
/* range expansion */
if (enditem != parsed + scanned) /* no trailing junk */
goto failed;
if (hi < 0 || lo < 0 || stride < 0 || hi < lo ||
(hi - lo) % stride != 0)
goto failed;
for (i = lo; i <= hi; i += stride) {
snprintf(num, sizeof(num), "%d", i);
nob = strlen(num);
if (nob + 1 == sizeof(num))
goto failed;
if (lnet_expand1tb(&pending, str, sep, sep2,
num, nob) != 0)
goto failed;
}
}
list_splice(&pending, tbs->prev);
return 1;
failed:
lnet_free_text_bufs(&pending);
return -1;
}
int
lnet_parse_hops (char *str, unsigned int *hops)
{
int len = strlen(str);
int nob = len;
return (sscanf(str, "%u%n", hops, &nob) >= 1 &&
nob == len &&
*hops > 0 && *hops < 256);
}
int
lnet_parse_route (char *str, int *im_a_router)
{
/* static scratch buffer OK (single threaded) */
static char cmd[LNET_SINGLE_TEXTBUF_NOB];
struct list_head nets;
struct list_head gateways;
struct list_head *tmp1;
struct list_head *tmp2;
__u32 net;
lnet_nid_t nid;
lnet_text_buf_t *ltb;
int rc;
char *sep;
char *token = str;
int ntokens = 0;
int myrc = -1;
unsigned int hops;
int got_hops = 0;
INIT_LIST_HEAD(&gateways);
INIT_LIST_HEAD(&nets);
/* save a copy of the string for error messages */
strncpy(cmd, str, sizeof(cmd) - 1);
cmd[sizeof(cmd) - 1] = 0;
sep = str;
for (;;) {
/* scan for token start */
while (cfs_iswhite(*sep))
sep++;
if (*sep == 0) {
if (ntokens < (got_hops ? 3 : 2))
goto token_error;
break;
}
ntokens++;
token = sep++;
/* scan for token end */
while (*sep != 0 && !cfs_iswhite(*sep))
sep++;
if (*sep != 0)
*sep++ = 0;
if (ntokens == 1) {
tmp2 = &nets; /* expanding nets */
} else if (ntokens == 2 &&
lnet_parse_hops(token, &hops)) {
got_hops = 1; /* got a hop count */
continue;
} else {
tmp2 = &gateways; /* expanding gateways */
}
ltb = lnet_new_text_buf(strlen(token));
if (ltb == NULL)
goto out;
strcpy(ltb->ltb_text, token);
tmp1 = <b->ltb_list;
list_add_tail(tmp1, tmp2);
while (tmp1 != tmp2) {
ltb = list_entry(tmp1, lnet_text_buf_t, ltb_list);
rc = lnet_str2tbs_expand(tmp1->next, ltb->ltb_text);
if (rc < 0)
goto token_error;
tmp1 = tmp1->next;
if (rc > 0) { /* expanded! */
list_del(<b->ltb_list);
lnet_free_text_buf(ltb);
continue;
}
if (ntokens == 1) {
net = libcfs_str2net(ltb->ltb_text);
if (net == LNET_NIDNET(LNET_NID_ANY) ||
LNET_NETTYP(net) == LOLND)
goto token_error;
} else {
nid = libcfs_str2nid(ltb->ltb_text);
if (nid == LNET_NID_ANY ||
LNET_NETTYP(LNET_NIDNET(nid)) == LOLND)
goto token_error;
}
}
}
if (!got_hops)
hops = 1;
LASSERT (!list_empty(&nets));
LASSERT (!list_empty(&gateways));
list_for_each (tmp1, &nets) {
ltb = list_entry(tmp1, lnet_text_buf_t, ltb_list);
net = libcfs_str2net(ltb->ltb_text);
LASSERT (net != LNET_NIDNET(LNET_NID_ANY));
list_for_each (tmp2, &gateways) {
ltb = list_entry(tmp2, lnet_text_buf_t, ltb_list);
nid = libcfs_str2nid(ltb->ltb_text);
LASSERT (nid != LNET_NID_ANY);
if (lnet_islocalnid(nid)) {
*im_a_router = 1;
continue;
}
rc = lnet_add_route (net, hops, nid);
if (rc != 0) {
CERROR("Can't create route "
"to %s via %s\n",
libcfs_net2str(net),
libcfs_nid2str(nid));
goto out;
}
}
}
myrc = 0;
goto out;
token_error:
lnet_syntax("routes", cmd, (int)(token - str), strlen(token));
out:
lnet_free_text_bufs(&nets);
lnet_free_text_bufs(&gateways);
return myrc;
}
int
lnet_parse_route_tbs(struct list_head *tbs, int *im_a_router)
{
lnet_text_buf_t *ltb;
while (!list_empty(tbs)) {
ltb = list_entry(tbs->next, lnet_text_buf_t, ltb_list);
if (lnet_parse_route(ltb->ltb_text, im_a_router) < 0) {
lnet_free_text_bufs(tbs);
return -EINVAL;
}
list_del(<b->ltb_list);
lnet_free_text_buf(ltb);
}
return 0;
}
int
lnet_parse_routes (char *routes, int *im_a_router)
{
struct list_head tbs;
int rc = 0;
*im_a_router = 0;
INIT_LIST_HEAD(&tbs);
if (lnet_str2tbs_sep(&tbs, routes) < 0) {
CERROR("Error parsing routes\n");
rc = -EINVAL;
} else {
rc = lnet_parse_route_tbs(&tbs, im_a_router);
}
LASSERT (lnet_tbnob == 0);
return rc;
}
int
lnet_match_network_token(char *token, int len, __u32 *ipaddrs, int nip)
{
LIST_HEAD (list);
int rc;
int i;
rc = cfs_ip_addr_parse(token, len, &list);
if (rc != 0)
return rc;
for (rc = i = 0; !rc && i < nip; i++)
rc = cfs_ip_addr_match(ipaddrs[i], &list);
cfs_ip_addr_free(&list);
return rc;
}
int
lnet_match_network_tokens(char *net_entry, __u32 *ipaddrs, int nip)
{
static char tokens[LNET_SINGLE_TEXTBUF_NOB];
int matched = 0;
int ntokens = 0;
int len;
char *net = NULL;
char *sep;
char *token;
int rc;
LASSERT (strlen(net_entry) < sizeof(tokens));
/* work on a copy of the string */
strcpy(tokens, net_entry);
sep = tokens;
for (;;) {
/* scan for token start */
while (cfs_iswhite(*sep))
sep++;
if (*sep == 0)
break;
token = sep++;
/* scan for token end */
while (*sep != 0 && !cfs_iswhite(*sep))
sep++;
if (*sep != 0)
*sep++ = 0;
if (ntokens++ == 0) {
net = token;
continue;
}
len = strlen(token);
rc = lnet_match_network_token(token, len, ipaddrs, nip);
if (rc < 0) {
lnet_syntax("ip2nets", net_entry,
(int)(token - tokens), len);
return rc;
}
matched |= (rc != 0);
}
if (!matched)
return 0;
strcpy(net_entry, net); /* replace with matched net */
return 1;
}
__u32
lnet_netspec2net(char *netspec)
{
char *bracket = strchr(netspec, '(');
__u32 net;
if (bracket != NULL)
*bracket = 0;
net = libcfs_str2net(netspec);
if (bracket != NULL)
*bracket = '(';
return net;
}
int
lnet_splitnets(char *source, struct list_head *nets)
{
int offset = 0;
int offset2;
int len;
lnet_text_buf_t *tb;
lnet_text_buf_t *tb2;
struct list_head *t;
char *sep;
char *bracket;
__u32 net;
LASSERT (!list_empty(nets));
LASSERT (nets->next == nets->prev); /* single entry */
tb = list_entry(nets->next, lnet_text_buf_t, ltb_list);
for (;;) {
sep = strchr(tb->ltb_text, ',');
bracket = strchr(tb->ltb_text, '(');
if (sep != NULL &&
bracket != NULL &&
bracket < sep) {
/* netspec lists interfaces... */
offset2 = offset + (int)(bracket - tb->ltb_text);
len = strlen(bracket);
bracket = strchr(bracket + 1, ')');
if (bracket == NULL ||
!(bracket[1] == ',' || bracket[1] == 0)) {
lnet_syntax("ip2nets", source, offset2, len);
return -EINVAL;
}
sep = (bracket[1] == 0) ? NULL : bracket + 1;
}
if (sep != NULL)
*sep++ = 0;
net = lnet_netspec2net(tb->ltb_text);
if (net == LNET_NIDNET(LNET_NID_ANY)) {
lnet_syntax("ip2nets", source, offset,
strlen(tb->ltb_text));
return -EINVAL;
}
list_for_each(t, nets) {
tb2 = list_entry(t, lnet_text_buf_t, ltb_list);
if (tb2 == tb)
continue;
if (net == lnet_netspec2net(tb2->ltb_text)) {
/* duplicate network */
lnet_syntax("ip2nets", source, offset,
strlen(tb->ltb_text));
return -EINVAL;
}
}
if (sep == NULL)
return 0;
offset += (int)(sep - tb->ltb_text);
tb2 = lnet_new_text_buf(strlen(sep));
if (tb2 == NULL)
return -ENOMEM;
strcpy(tb2->ltb_text, sep);
list_add_tail(&tb2->ltb_list, nets);
tb = tb2;
}
}
int
lnet_match_networks (char **networksp, char *ip2nets, __u32 *ipaddrs, int nip)
{
static char networks[LNET_SINGLE_TEXTBUF_NOB];
static char source[LNET_SINGLE_TEXTBUF_NOB];
struct list_head raw_entries;
struct list_head matched_nets;
struct list_head current_nets;
struct list_head *t;
struct list_head *t2;
lnet_text_buf_t *tb;
lnet_text_buf_t *tb2;
__u32 net1;
__u32 net2;
int len;
int count;
int dup;
int rc;
INIT_LIST_HEAD(&raw_entries);
if (lnet_str2tbs_sep(&raw_entries, ip2nets) < 0) {
CERROR("Error parsing ip2nets\n");
LASSERT (lnet_tbnob == 0);
return -EINVAL;
}
INIT_LIST_HEAD(&matched_nets);
INIT_LIST_HEAD(¤t_nets);
networks[0] = 0;
count = 0;
len = 0;
rc = 0;
while (!list_empty(&raw_entries)) {
tb = list_entry(raw_entries.next, lnet_text_buf_t,
ltb_list);
strncpy(source, tb->ltb_text, sizeof(source)-1);
source[sizeof(source)-1] = 0;
/* replace ltb_text with the network(s) add on match */
rc = lnet_match_network_tokens(tb->ltb_text, ipaddrs, nip);
if (rc < 0)
break;
list_del(&tb->ltb_list);
if (rc == 0) { /* no match */
lnet_free_text_buf(tb);
continue;
}
/* split into separate networks */
INIT_LIST_HEAD(¤t_nets);
list_add(&tb->ltb_list, ¤t_nets);
rc = lnet_splitnets(source, ¤t_nets);
if (rc < 0)
break;
dup = 0;
list_for_each (t, ¤t_nets) {
tb = list_entry(t, lnet_text_buf_t, ltb_list);
net1 = lnet_netspec2net(tb->ltb_text);
LASSERT (net1 != LNET_NIDNET(LNET_NID_ANY));
list_for_each(t2, &matched_nets) {
tb2 = list_entry(t2, lnet_text_buf_t,
ltb_list);
net2 = lnet_netspec2net(tb2->ltb_text);
LASSERT (net2 != LNET_NIDNET(LNET_NID_ANY));
if (net1 == net2) {
dup = 1;
break;
}
}
if (dup)
break;
}
if (dup) {
lnet_free_text_bufs(¤t_nets);
continue;
}
list_for_each_safe(t, t2, ¤t_nets) {
tb = list_entry(t, lnet_text_buf_t, ltb_list);
list_del(&tb->ltb_list);
list_add_tail(&tb->ltb_list, &matched_nets);
len += snprintf(networks + len, sizeof(networks) - len,
"%s%s", (len == 0) ? "" : ",",
tb->ltb_text);
if (len >= sizeof(networks)) {
CERROR("Too many matched networks\n");
rc = -E2BIG;
goto out;
}
}
count++;
}
out:
lnet_free_text_bufs(&raw_entries);
lnet_free_text_bufs(&matched_nets);
lnet_free_text_bufs(¤t_nets);
LASSERT (lnet_tbnob == 0);
if (rc < 0)
return rc;
*networksp = networks;
return count;
}
void
lnet_ipaddr_free_enumeration(__u32 *ipaddrs, int nip)
{
LIBCFS_FREE(ipaddrs, nip * sizeof(*ipaddrs));
}
int
lnet_ipaddr_enumerate (__u32 **ipaddrsp)
{
int up;
__u32 netmask;
__u32 *ipaddrs;
__u32 *ipaddrs2;
int nip;
char **ifnames;
int nif = libcfs_ipif_enumerate(&ifnames);
int i;
int rc;
if (nif <= 0)
return nif;
LIBCFS_ALLOC(ipaddrs, nif * sizeof(*ipaddrs));
if (ipaddrs == NULL) {
CERROR("Can't allocate ipaddrs[%d]\n", nif);
libcfs_ipif_free_enumeration(ifnames, nif);
return -ENOMEM;
}
for (i = nip = 0; i < nif; i++) {
if (!strcmp(ifnames[i], "lo"))
continue;
rc = libcfs_ipif_query(ifnames[i], &up,
&ipaddrs[nip], &netmask);
if (rc != 0) {
CWARN("Can't query interface %s: %d\n",
ifnames[i], rc);
continue;
}
if (!up) {
CWARN("Ignoring interface %s: it's down\n",
ifnames[i]);
continue;
}
nip++;
}
libcfs_ipif_free_enumeration(ifnames, nif);
if (nip == nif) {
*ipaddrsp = ipaddrs;
} else {
if (nip > 0) {
LIBCFS_ALLOC(ipaddrs2, nip * sizeof(*ipaddrs2));
if (ipaddrs2 == NULL) {
CERROR("Can't allocate ipaddrs[%d]\n", nip);
nip = -ENOMEM;
} else {
memcpy(ipaddrs2, ipaddrs,
nip * sizeof(*ipaddrs));
*ipaddrsp = ipaddrs2;
rc = nip;
}
}
lnet_ipaddr_free_enumeration(ipaddrs, nif);
}
return nip;
}
int
lnet_parse_ip2nets (char **networksp, char *ip2nets)
{
__u32 *ipaddrs;
int nip = lnet_ipaddr_enumerate(&ipaddrs);
int rc;
if (nip < 0) {
LCONSOLE_ERROR_MSG(0x117, "Error %d enumerating local IP "
"interfaces for ip2nets to match\n", nip);
return nip;
}
if (nip == 0) {
LCONSOLE_ERROR_MSG(0x118, "No local IP interfaces "
"for ip2nets to match\n");
return -ENOENT;
}
rc = lnet_match_networks(networksp, ip2nets, ipaddrs, nip);
lnet_ipaddr_free_enumeration(ipaddrs, nip);
if (rc < 0) {
LCONSOLE_ERROR_MSG(0x119, "Error %d parsing ip2nets\n", rc);
return rc;
}
if (rc == 0) {
LCONSOLE_ERROR_MSG(0x11a, "ip2nets does not match "
"any local IP interfaces\n");
return -ENOENT;
}
return 0;
}
int
lnet_set_ip_niaddr (lnet_ni_t *ni)
{
__u32 net = LNET_NIDNET(ni->ni_nid);
char **names;
int n;
__u32 ip;
__u32 netmask;
int up;
int i;
int rc;
/* Convenience for LNDs that use the IP address of a local interface as
* the local address part of their NID */
if (ni->ni_interfaces[0] != NULL) {
CLASSERT (LNET_MAX_INTERFACES > 1);
if (ni->ni_interfaces[1] != NULL) {
CERROR("Net %s doesn't support multiple interfaces\n",
libcfs_net2str(net));
return -EPERM;
}
rc = libcfs_ipif_query(ni->ni_interfaces[0],
&up, &ip, &netmask);
if (rc != 0) {
CERROR("Net %s can't query interface %s: %d\n",
libcfs_net2str(net), ni->ni_interfaces[0], rc);
return -EPERM;
}
if (!up) {
CERROR("Net %s can't use interface %s: it's down\n",
libcfs_net2str(net), ni->ni_interfaces[0]);
return -ENETDOWN;
}
ni->ni_nid = LNET_MKNID(net, ip);
return 0;
}
n = libcfs_ipif_enumerate(&names);
if (n <= 0) {
CERROR("Net %s can't enumerate interfaces: %d\n",
libcfs_net2str(net), n);
return 0;
}
for (i = 0; i < n; i++) {
if (!strcmp(names[i], "lo")) /* skip the loopback IF */
continue;
rc = libcfs_ipif_query(names[i], &up, &ip, &netmask);
if (rc != 0) {
CWARN("Net %s can't query interface %s: %d\n",
libcfs_net2str(net), names[i], rc);
continue;
}
if (!up) {
CWARN("Net %s ignoring interface %s (down)\n",
libcfs_net2str(net), names[i]);
continue;
}
libcfs_ipif_free_enumeration(names, n);
ni->ni_nid = LNET_MKNID(net, ip);
return 0;
}
CERROR("Net %s can't find any interfaces\n", libcfs_net2str(net));
libcfs_ipif_free_enumeration(names, n);
return -ENOENT;
}
EXPORT_SYMBOL(lnet_set_ip_niaddr);
| gpl-2.0 |
Mazout360/lge-kernel-star | arch/arm/mach-dove/common.c | 153 | 20876 | /*
* arch/arm/mach-dove/common.c
*
* Core functions for Marvell Dove 88AP510 System On Chip
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
* warranty of any kind, whether express or implied.
*/
#include <linux/kernel.h>
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/pci.h>
#include <linux/serial_8250.h>
#include <linux/clk.h>
#include <linux/mbus.h>
#include <linux/mv643xx_eth.h>
#include <linux/mv643xx_i2c.h>
#include <linux/ata_platform.h>
#include <linux/spi/orion_spi.h>
#include <linux/gpio.h>
#include <asm/page.h>
#include <asm/setup.h>
#include <asm/timex.h>
#include <asm/hardware/cache-tauros2.h>
#include <asm/mach/map.h>
#include <asm/mach/time.h>
#include <asm/mach/pci.h>
#include <mach/dove.h>
#include <mach/bridge-regs.h>
#include <asm/mach/arch.h>
#include <linux/irq.h>
#include <plat/mv_xor.h>
#include <plat/ehci-orion.h>
#include <plat/time.h>
#include "common.h"
/*****************************************************************************
* I/O Address Mapping
****************************************************************************/
static struct map_desc dove_io_desc[] __initdata = {
{
.virtual = DOVE_SB_REGS_VIRT_BASE,
.pfn = __phys_to_pfn(DOVE_SB_REGS_PHYS_BASE),
.length = DOVE_SB_REGS_SIZE,
.type = MT_DEVICE,
}, {
.virtual = DOVE_NB_REGS_VIRT_BASE,
.pfn = __phys_to_pfn(DOVE_NB_REGS_PHYS_BASE),
.length = DOVE_NB_REGS_SIZE,
.type = MT_DEVICE,
}, {
.virtual = DOVE_PCIE0_IO_VIRT_BASE,
.pfn = __phys_to_pfn(DOVE_PCIE0_IO_PHYS_BASE),
.length = DOVE_PCIE0_IO_SIZE,
.type = MT_DEVICE,
}, {
.virtual = DOVE_PCIE1_IO_VIRT_BASE,
.pfn = __phys_to_pfn(DOVE_PCIE1_IO_PHYS_BASE),
.length = DOVE_PCIE1_IO_SIZE,
.type = MT_DEVICE,
},
};
void __init dove_map_io(void)
{
iotable_init(dove_io_desc, ARRAY_SIZE(dove_io_desc));
}
/*****************************************************************************
* EHCI
****************************************************************************/
static struct orion_ehci_data dove_ehci_data = {
.dram = &dove_mbus_dram_info,
.phy_version = EHCI_PHY_NA,
};
static u64 ehci_dmamask = DMA_BIT_MASK(32);
/*****************************************************************************
* EHCI0
****************************************************************************/
static struct resource dove_ehci0_resources[] = {
{
.start = DOVE_USB0_PHYS_BASE,
.end = DOVE_USB0_PHYS_BASE + SZ_4K - 1,
.flags = IORESOURCE_MEM,
}, {
.start = IRQ_DOVE_USB0,
.end = IRQ_DOVE_USB0,
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device dove_ehci0 = {
.name = "orion-ehci",
.id = 0,
.dev = {
.dma_mask = &ehci_dmamask,
.coherent_dma_mask = DMA_BIT_MASK(32),
.platform_data = &dove_ehci_data,
},
.resource = dove_ehci0_resources,
.num_resources = ARRAY_SIZE(dove_ehci0_resources),
};
void __init dove_ehci0_init(void)
{
platform_device_register(&dove_ehci0);
}
/*****************************************************************************
* EHCI1
****************************************************************************/
static struct resource dove_ehci1_resources[] = {
{
.start = DOVE_USB1_PHYS_BASE,
.end = DOVE_USB1_PHYS_BASE + SZ_4K - 1,
.flags = IORESOURCE_MEM,
}, {
.start = IRQ_DOVE_USB1,
.end = IRQ_DOVE_USB1,
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device dove_ehci1 = {
.name = "orion-ehci",
.id = 1,
.dev = {
.dma_mask = &ehci_dmamask,
.coherent_dma_mask = DMA_BIT_MASK(32),
.platform_data = &dove_ehci_data,
},
.resource = dove_ehci1_resources,
.num_resources = ARRAY_SIZE(dove_ehci1_resources),
};
void __init dove_ehci1_init(void)
{
platform_device_register(&dove_ehci1);
}
/*****************************************************************************
* GE00
****************************************************************************/
struct mv643xx_eth_shared_platform_data dove_ge00_shared_data = {
.t_clk = 0,
.dram = &dove_mbus_dram_info,
};
static struct resource dove_ge00_shared_resources[] = {
{
.name = "ge00 base",
.start = DOVE_GE00_PHYS_BASE + 0x2000,
.end = DOVE_GE00_PHYS_BASE + SZ_16K - 1,
.flags = IORESOURCE_MEM,
},
};
static struct platform_device dove_ge00_shared = {
.name = MV643XX_ETH_SHARED_NAME,
.id = 0,
.dev = {
.platform_data = &dove_ge00_shared_data,
},
.num_resources = 1,
.resource = dove_ge00_shared_resources,
};
static struct resource dove_ge00_resources[] = {
{
.name = "ge00 irq",
.start = IRQ_DOVE_GE00_SUM,
.end = IRQ_DOVE_GE00_SUM,
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device dove_ge00 = {
.name = MV643XX_ETH_NAME,
.id = 0,
.num_resources = 1,
.resource = dove_ge00_resources,
.dev = {
.coherent_dma_mask = 0xffffffff,
},
};
void __init dove_ge00_init(struct mv643xx_eth_platform_data *eth_data)
{
eth_data->shared = &dove_ge00_shared;
dove_ge00.dev.platform_data = eth_data;
platform_device_register(&dove_ge00_shared);
platform_device_register(&dove_ge00);
}
/*****************************************************************************
* SoC RTC
****************************************************************************/
static struct resource dove_rtc_resource[] = {
{
.start = DOVE_RTC_PHYS_BASE,
.end = DOVE_RTC_PHYS_BASE + 32 - 1,
.flags = IORESOURCE_MEM,
}, {
.start = IRQ_DOVE_RTC,
.flags = IORESOURCE_IRQ,
}
};
void __init dove_rtc_init(void)
{
platform_device_register_simple("rtc-mv", -1, dove_rtc_resource, 2);
}
/*****************************************************************************
* SATA
****************************************************************************/
static struct resource dove_sata_resources[] = {
{
.name = "sata base",
.start = DOVE_SATA_PHYS_BASE,
.end = DOVE_SATA_PHYS_BASE + 0x5000 - 1,
.flags = IORESOURCE_MEM,
}, {
.name = "sata irq",
.start = IRQ_DOVE_SATA,
.end = IRQ_DOVE_SATA,
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device dove_sata = {
.name = "sata_mv",
.id = 0,
.dev = {
.coherent_dma_mask = DMA_BIT_MASK(32),
},
.num_resources = ARRAY_SIZE(dove_sata_resources),
.resource = dove_sata_resources,
};
void __init dove_sata_init(struct mv_sata_platform_data *sata_data)
{
sata_data->dram = &dove_mbus_dram_info;
dove_sata.dev.platform_data = sata_data;
platform_device_register(&dove_sata);
}
/*****************************************************************************
* UART0
****************************************************************************/
static struct plat_serial8250_port dove_uart0_data[] = {
{
.mapbase = DOVE_UART0_PHYS_BASE,
.membase = (char *)DOVE_UART0_VIRT_BASE,
.irq = IRQ_DOVE_UART_0,
.flags = UPF_SKIP_TEST | UPF_BOOT_AUTOCONF,
.iotype = UPIO_MEM,
.regshift = 2,
.uartclk = 0,
}, {
},
};
static struct resource dove_uart0_resources[] = {
{
.start = DOVE_UART0_PHYS_BASE,
.end = DOVE_UART0_PHYS_BASE + SZ_256 - 1,
.flags = IORESOURCE_MEM,
}, {
.start = IRQ_DOVE_UART_0,
.end = IRQ_DOVE_UART_0,
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device dove_uart0 = {
.name = "serial8250",
.id = 0,
.dev = {
.platform_data = dove_uart0_data,
},
.resource = dove_uart0_resources,
.num_resources = ARRAY_SIZE(dove_uart0_resources),
};
void __init dove_uart0_init(void)
{
platform_device_register(&dove_uart0);
}
/*****************************************************************************
* UART1
****************************************************************************/
static struct plat_serial8250_port dove_uart1_data[] = {
{
.mapbase = DOVE_UART1_PHYS_BASE,
.membase = (char *)DOVE_UART1_VIRT_BASE,
.irq = IRQ_DOVE_UART_1,
.flags = UPF_SKIP_TEST | UPF_BOOT_AUTOCONF,
.iotype = UPIO_MEM,
.regshift = 2,
.uartclk = 0,
}, {
},
};
static struct resource dove_uart1_resources[] = {
{
.start = DOVE_UART1_PHYS_BASE,
.end = DOVE_UART1_PHYS_BASE + SZ_256 - 1,
.flags = IORESOURCE_MEM,
}, {
.start = IRQ_DOVE_UART_1,
.end = IRQ_DOVE_UART_1,
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device dove_uart1 = {
.name = "serial8250",
.id = 1,
.dev = {
.platform_data = dove_uart1_data,
},
.resource = dove_uart1_resources,
.num_resources = ARRAY_SIZE(dove_uart1_resources),
};
void __init dove_uart1_init(void)
{
platform_device_register(&dove_uart1);
}
/*****************************************************************************
* UART2
****************************************************************************/
static struct plat_serial8250_port dove_uart2_data[] = {
{
.mapbase = DOVE_UART2_PHYS_BASE,
.membase = (char *)DOVE_UART2_VIRT_BASE,
.irq = IRQ_DOVE_UART_2,
.flags = UPF_SKIP_TEST | UPF_BOOT_AUTOCONF,
.iotype = UPIO_MEM,
.regshift = 2,
.uartclk = 0,
}, {
},
};
static struct resource dove_uart2_resources[] = {
{
.start = DOVE_UART2_PHYS_BASE,
.end = DOVE_UART2_PHYS_BASE + SZ_256 - 1,
.flags = IORESOURCE_MEM,
}, {
.start = IRQ_DOVE_UART_2,
.end = IRQ_DOVE_UART_2,
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device dove_uart2 = {
.name = "serial8250",
.id = 2,
.dev = {
.platform_data = dove_uart2_data,
},
.resource = dove_uart2_resources,
.num_resources = ARRAY_SIZE(dove_uart2_resources),
};
void __init dove_uart2_init(void)
{
platform_device_register(&dove_uart2);
}
/*****************************************************************************
* UART3
****************************************************************************/
static struct plat_serial8250_port dove_uart3_data[] = {
{
.mapbase = DOVE_UART3_PHYS_BASE,
.membase = (char *)DOVE_UART3_VIRT_BASE,
.irq = IRQ_DOVE_UART_3,
.flags = UPF_SKIP_TEST | UPF_BOOT_AUTOCONF,
.iotype = UPIO_MEM,
.regshift = 2,
.uartclk = 0,
}, {
},
};
static struct resource dove_uart3_resources[] = {
{
.start = DOVE_UART3_PHYS_BASE,
.end = DOVE_UART3_PHYS_BASE + SZ_256 - 1,
.flags = IORESOURCE_MEM,
}, {
.start = IRQ_DOVE_UART_3,
.end = IRQ_DOVE_UART_3,
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device dove_uart3 = {
.name = "serial8250",
.id = 3,
.dev = {
.platform_data = dove_uart3_data,
},
.resource = dove_uart3_resources,
.num_resources = ARRAY_SIZE(dove_uart3_resources),
};
void __init dove_uart3_init(void)
{
platform_device_register(&dove_uart3);
}
/*****************************************************************************
* SPI0
****************************************************************************/
static struct orion_spi_info dove_spi0_data = {
.tclk = 0,
};
static struct resource dove_spi0_resources[] = {
{
.start = DOVE_SPI0_PHYS_BASE,
.end = DOVE_SPI0_PHYS_BASE + SZ_512 - 1,
.flags = IORESOURCE_MEM,
}, {
.start = IRQ_DOVE_SPI0,
.end = IRQ_DOVE_SPI0,
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device dove_spi0 = {
.name = "orion_spi",
.id = 0,
.resource = dove_spi0_resources,
.dev = {
.platform_data = &dove_spi0_data,
},
.num_resources = ARRAY_SIZE(dove_spi0_resources),
};
void __init dove_spi0_init(void)
{
platform_device_register(&dove_spi0);
}
/*****************************************************************************
* SPI1
****************************************************************************/
static struct orion_spi_info dove_spi1_data = {
.tclk = 0,
};
static struct resource dove_spi1_resources[] = {
{
.start = DOVE_SPI1_PHYS_BASE,
.end = DOVE_SPI1_PHYS_BASE + SZ_512 - 1,
.flags = IORESOURCE_MEM,
}, {
.start = IRQ_DOVE_SPI1,
.end = IRQ_DOVE_SPI1,
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device dove_spi1 = {
.name = "orion_spi",
.id = 1,
.resource = dove_spi1_resources,
.dev = {
.platform_data = &dove_spi1_data,
},
.num_resources = ARRAY_SIZE(dove_spi1_resources),
};
void __init dove_spi1_init(void)
{
platform_device_register(&dove_spi1);
}
/*****************************************************************************
* I2C
****************************************************************************/
static struct mv64xxx_i2c_pdata dove_i2c_data = {
.freq_m = 10, /* assumes 166 MHz TCLK gets 94.3kHz */
.freq_n = 3,
.timeout = 1000, /* Default timeout of 1 second */
};
static struct resource dove_i2c_resources[] = {
{
.name = "i2c base",
.start = DOVE_I2C_PHYS_BASE,
.end = DOVE_I2C_PHYS_BASE + 0x20 - 1,
.flags = IORESOURCE_MEM,
}, {
.name = "i2c irq",
.start = IRQ_DOVE_I2C,
.end = IRQ_DOVE_I2C,
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device dove_i2c = {
.name = MV64XXX_I2C_CTLR_NAME,
.id = 0,
.num_resources = ARRAY_SIZE(dove_i2c_resources),
.resource = dove_i2c_resources,
.dev = {
.platform_data = &dove_i2c_data,
},
};
void __init dove_i2c_init(void)
{
platform_device_register(&dove_i2c);
}
/*****************************************************************************
* Time handling
****************************************************************************/
void __init dove_init_early(void)
{
orion_time_set_base(TIMER_VIRT_BASE);
}
static int get_tclk(void)
{
/* use DOVE_RESET_SAMPLE_HI/LO to detect tclk */
return 166666667;
}
static void dove_timer_init(void)
{
orion_time_init(BRIDGE_VIRT_BASE, BRIDGE_INT_TIMER1_CLR,
IRQ_DOVE_BRIDGE, get_tclk());
}
struct sys_timer dove_timer = {
.init = dove_timer_init,
};
/*****************************************************************************
* XOR
****************************************************************************/
static struct mv_xor_platform_shared_data dove_xor_shared_data = {
.dram = &dove_mbus_dram_info,
};
/*****************************************************************************
* XOR 0
****************************************************************************/
static u64 dove_xor0_dmamask = DMA_BIT_MASK(32);
static struct resource dove_xor0_shared_resources[] = {
{
.name = "xor 0 low",
.start = DOVE_XOR0_PHYS_BASE,
.end = DOVE_XOR0_PHYS_BASE + 0xff,
.flags = IORESOURCE_MEM,
}, {
.name = "xor 0 high",
.start = DOVE_XOR0_HIGH_PHYS_BASE,
.end = DOVE_XOR0_HIGH_PHYS_BASE + 0xff,
.flags = IORESOURCE_MEM,
},
};
static struct platform_device dove_xor0_shared = {
.name = MV_XOR_SHARED_NAME,
.id = 0,
.dev = {
.platform_data = &dove_xor_shared_data,
},
.num_resources = ARRAY_SIZE(dove_xor0_shared_resources),
.resource = dove_xor0_shared_resources,
};
static struct resource dove_xor00_resources[] = {
[0] = {
.start = IRQ_DOVE_XOR_00,
.end = IRQ_DOVE_XOR_00,
.flags = IORESOURCE_IRQ,
},
};
static struct mv_xor_platform_data dove_xor00_data = {
.shared = &dove_xor0_shared,
.hw_id = 0,
.pool_size = PAGE_SIZE,
};
static struct platform_device dove_xor00_channel = {
.name = MV_XOR_NAME,
.id = 0,
.num_resources = ARRAY_SIZE(dove_xor00_resources),
.resource = dove_xor00_resources,
.dev = {
.dma_mask = &dove_xor0_dmamask,
.coherent_dma_mask = DMA_BIT_MASK(64),
.platform_data = &dove_xor00_data,
},
};
static struct resource dove_xor01_resources[] = {
[0] = {
.start = IRQ_DOVE_XOR_01,
.end = IRQ_DOVE_XOR_01,
.flags = IORESOURCE_IRQ,
},
};
static struct mv_xor_platform_data dove_xor01_data = {
.shared = &dove_xor0_shared,
.hw_id = 1,
.pool_size = PAGE_SIZE,
};
static struct platform_device dove_xor01_channel = {
.name = MV_XOR_NAME,
.id = 1,
.num_resources = ARRAY_SIZE(dove_xor01_resources),
.resource = dove_xor01_resources,
.dev = {
.dma_mask = &dove_xor0_dmamask,
.coherent_dma_mask = DMA_BIT_MASK(64),
.platform_data = &dove_xor01_data,
},
};
void __init dove_xor0_init(void)
{
platform_device_register(&dove_xor0_shared);
/*
* two engines can't do memset simultaneously, this limitation
* satisfied by removing memset support from one of the engines.
*/
dma_cap_set(DMA_MEMCPY, dove_xor00_data.cap_mask);
dma_cap_set(DMA_XOR, dove_xor00_data.cap_mask);
platform_device_register(&dove_xor00_channel);
dma_cap_set(DMA_MEMCPY, dove_xor01_data.cap_mask);
dma_cap_set(DMA_MEMSET, dove_xor01_data.cap_mask);
dma_cap_set(DMA_XOR, dove_xor01_data.cap_mask);
platform_device_register(&dove_xor01_channel);
}
/*****************************************************************************
* XOR 1
****************************************************************************/
static u64 dove_xor1_dmamask = DMA_BIT_MASK(32);
static struct resource dove_xor1_shared_resources[] = {
{
.name = "xor 0 low",
.start = DOVE_XOR1_PHYS_BASE,
.end = DOVE_XOR1_PHYS_BASE + 0xff,
.flags = IORESOURCE_MEM,
}, {
.name = "xor 0 high",
.start = DOVE_XOR1_HIGH_PHYS_BASE,
.end = DOVE_XOR1_HIGH_PHYS_BASE + 0xff,
.flags = IORESOURCE_MEM,
},
};
static struct platform_device dove_xor1_shared = {
.name = MV_XOR_SHARED_NAME,
.id = 1,
.dev = {
.platform_data = &dove_xor_shared_data,
},
.num_resources = ARRAY_SIZE(dove_xor1_shared_resources),
.resource = dove_xor1_shared_resources,
};
static struct resource dove_xor10_resources[] = {
[0] = {
.start = IRQ_DOVE_XOR_10,
.end = IRQ_DOVE_XOR_10,
.flags = IORESOURCE_IRQ,
},
};
static struct mv_xor_platform_data dove_xor10_data = {
.shared = &dove_xor1_shared,
.hw_id = 0,
.pool_size = PAGE_SIZE,
};
static struct platform_device dove_xor10_channel = {
.name = MV_XOR_NAME,
.id = 2,
.num_resources = ARRAY_SIZE(dove_xor10_resources),
.resource = dove_xor10_resources,
.dev = {
.dma_mask = &dove_xor1_dmamask,
.coherent_dma_mask = DMA_BIT_MASK(64),
.platform_data = &dove_xor10_data,
},
};
static struct resource dove_xor11_resources[] = {
[0] = {
.start = IRQ_DOVE_XOR_11,
.end = IRQ_DOVE_XOR_11,
.flags = IORESOURCE_IRQ,
},
};
static struct mv_xor_platform_data dove_xor11_data = {
.shared = &dove_xor1_shared,
.hw_id = 1,
.pool_size = PAGE_SIZE,
};
static struct platform_device dove_xor11_channel = {
.name = MV_XOR_NAME,
.id = 3,
.num_resources = ARRAY_SIZE(dove_xor11_resources),
.resource = dove_xor11_resources,
.dev = {
.dma_mask = &dove_xor1_dmamask,
.coherent_dma_mask = DMA_BIT_MASK(64),
.platform_data = &dove_xor11_data,
},
};
void __init dove_xor1_init(void)
{
platform_device_register(&dove_xor1_shared);
/*
* two engines can't do memset simultaneously, this limitation
* satisfied by removing memset support from one of the engines.
*/
dma_cap_set(DMA_MEMCPY, dove_xor10_data.cap_mask);
dma_cap_set(DMA_XOR, dove_xor10_data.cap_mask);
platform_device_register(&dove_xor10_channel);
dma_cap_set(DMA_MEMCPY, dove_xor11_data.cap_mask);
dma_cap_set(DMA_MEMSET, dove_xor11_data.cap_mask);
dma_cap_set(DMA_XOR, dove_xor11_data.cap_mask);
platform_device_register(&dove_xor11_channel);
}
/*****************************************************************************
* SDIO
****************************************************************************/
static u64 sdio_dmamask = DMA_BIT_MASK(32);
static struct resource dove_sdio0_resources[] = {
{
.start = DOVE_SDIO0_PHYS_BASE,
.end = DOVE_SDIO0_PHYS_BASE + 0xff,
.flags = IORESOURCE_MEM,
}, {
.start = IRQ_DOVE_SDIO0,
.end = IRQ_DOVE_SDIO0,
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device dove_sdio0 = {
.name = "sdhci-dove",
.id = 0,
.dev = {
.dma_mask = &sdio_dmamask,
.coherent_dma_mask = DMA_BIT_MASK(32),
},
.resource = dove_sdio0_resources,
.num_resources = ARRAY_SIZE(dove_sdio0_resources),
};
void __init dove_sdio0_init(void)
{
platform_device_register(&dove_sdio0);
}
static struct resource dove_sdio1_resources[] = {
{
.start = DOVE_SDIO1_PHYS_BASE,
.end = DOVE_SDIO1_PHYS_BASE + 0xff,
.flags = IORESOURCE_MEM,
}, {
.start = IRQ_DOVE_SDIO1,
.end = IRQ_DOVE_SDIO1,
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device dove_sdio1 = {
.name = "sdhci-dove",
.id = 1,
.dev = {
.dma_mask = &sdio_dmamask,
.coherent_dma_mask = DMA_BIT_MASK(32),
},
.resource = dove_sdio1_resources,
.num_resources = ARRAY_SIZE(dove_sdio1_resources),
};
void __init dove_sdio1_init(void)
{
platform_device_register(&dove_sdio1);
}
void __init dove_init(void)
{
int tclk;
tclk = get_tclk();
printk(KERN_INFO "Dove 88AP510 SoC, ");
printk(KERN_INFO "TCLK = %dMHz\n", (tclk + 499999) / 1000000);
#ifdef CONFIG_CACHE_TAUROS2
tauros2_init();
#endif
dove_setup_cpu_mbus();
dove_ge00_shared_data.t_clk = tclk;
dove_uart0_data[0].uartclk = tclk;
dove_uart1_data[0].uartclk = tclk;
dove_uart2_data[0].uartclk = tclk;
dove_uart3_data[0].uartclk = tclk;
dove_spi0_data.tclk = tclk;
dove_spi1_data.tclk = tclk;
/* internal devices that every board has */
dove_rtc_init();
dove_xor0_init();
dove_xor1_init();
}
| gpl-2.0 |
wan-qy/linux | arch/powerpc/platforms/cell/spufs/syscalls.c | 1689 | 2186 | #include <linux/file.h>
#include <linux/fs.h>
#include <linux/export.h>
#include <linux/mount.h>
#include <linux/namei.h>
#include <linux/slab.h>
#include <asm/uaccess.h>
#include "spufs.h"
/**
* sys_spu_run - run code loaded into an SPU
*
* @unpc: next program counter for the SPU
* @ustatus: status of the SPU
*
* This system call transfers the control of execution of a
* user space thread to an SPU. It will return when the
* SPU has finished executing or when it hits an error
* condition and it will be interrupted if a signal needs
* to be delivered to a handler in user space.
*
* The next program counter is set to the passed value
* before the SPU starts fetching code and the user space
* pointer gets updated with the new value when returning
* from kernel space.
*
* The status value returned from spu_run reflects the
* value of the spu_status register after the SPU has stopped.
*
*/
static long do_spu_run(struct file *filp,
__u32 __user *unpc,
__u32 __user *ustatus)
{
long ret;
struct spufs_inode_info *i;
u32 npc, status;
ret = -EFAULT;
if (get_user(npc, unpc))
goto out;
/* check if this file was created by spu_create */
ret = -EINVAL;
if (filp->f_op != &spufs_context_fops)
goto out;
i = SPUFS_I(file_inode(filp));
ret = spufs_run_spu(i->i_ctx, &npc, &status);
if (put_user(npc, unpc))
ret = -EFAULT;
if (ustatus && put_user(status, ustatus))
ret = -EFAULT;
out:
return ret;
}
static long do_spu_create(const char __user *pathname, unsigned int flags,
umode_t mode, struct file *neighbor)
{
struct path path;
struct dentry *dentry;
int ret;
dentry = user_path_create(AT_FDCWD, pathname, &path, LOOKUP_DIRECTORY);
ret = PTR_ERR(dentry);
if (!IS_ERR(dentry)) {
ret = spufs_create(&path, dentry, flags, mode, neighbor);
done_path_create(&path, dentry);
}
return ret;
}
struct spufs_calls spufs_calls = {
.create_thread = do_spu_create,
.spu_run = do_spu_run,
.notify_spus_active = do_notify_spus_active,
.owner = THIS_MODULE,
#ifdef CONFIG_COREDUMP
.coredump_extra_notes_size = spufs_coredump_extra_notes_size,
.coredump_extra_notes_write = spufs_coredump_extra_notes_write,
#endif
};
| gpl-2.0 |
parheliamm/i939u2 | drivers/acpi/acpica/exoparg1.c | 3225 | 27545 |
/******************************************************************************
*
* Module Name: exoparg1 - AML execution - opcodes with 1 argument
*
*****************************************************************************/
/*
* Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions, and the following disclaimer,
* without modification.
* 2. Redistributions in binary form must reproduce at minimum a disclaimer
* substantially similar to the "NO WARRANTY" disclaimer below
* ("Disclaimer") and any redistribution must be conditioned upon
* including a substantially similar Disclaimer requirement for further
* binary redistribution.
* 3. Neither the names of the above-listed copyright holders nor the names
* of any contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* Alternatively, this software may be distributed under the terms of the
* GNU General Public License ("GPL") version 2 as published by the Free
* Software Foundation.
*
* NO WARRANTY
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGES.
*/
#include <acpi/acpi.h>
#include "accommon.h"
#include "acparser.h"
#include "acdispat.h"
#include "acinterp.h"
#include "amlcode.h"
#include "acnamesp.h"
#define _COMPONENT ACPI_EXECUTER
ACPI_MODULE_NAME("exoparg1")
/*!
* Naming convention for AML interpreter execution routines.
*
* The routines that begin execution of AML opcodes are named with a common
* convention based upon the number of arguments, the number of target operands,
* and whether or not a value is returned:
*
* AcpiExOpcode_xA_yT_zR
*
* Where:
*
* xA - ARGUMENTS: The number of arguments (input operands) that are
* required for this opcode type (0 through 6 args).
* yT - TARGETS: The number of targets (output operands) that are required
* for this opcode type (0, 1, or 2 targets).
* zR - RETURN VALUE: Indicates whether this opcode type returns a value
* as the function return (0 or 1).
*
* The AcpiExOpcode* functions are called via the Dispatcher component with
* fully resolved operands.
!*/
/*******************************************************************************
*
* FUNCTION: acpi_ex_opcode_0A_0T_1R
*
* PARAMETERS: walk_state - Current state (contains AML opcode)
*
* RETURN: Status
*
* DESCRIPTION: Execute operator with no operands, one return value
*
******************************************************************************/
acpi_status acpi_ex_opcode_0A_0T_1R(struct acpi_walk_state *walk_state)
{
acpi_status status = AE_OK;
union acpi_operand_object *return_desc = NULL;
ACPI_FUNCTION_TRACE_STR(ex_opcode_0A_0T_1R,
acpi_ps_get_opcode_name(walk_state->opcode));
/* Examine the AML opcode */
switch (walk_state->opcode) {
case AML_TIMER_OP: /* Timer () */
/* Create a return object of type Integer */
return_desc =
acpi_ut_create_integer_object(acpi_os_get_timer());
if (!return_desc) {
status = AE_NO_MEMORY;
goto cleanup;
}
break;
default: /* Unknown opcode */
ACPI_ERROR((AE_INFO, "Unknown AML opcode 0x%X",
walk_state->opcode));
status = AE_AML_BAD_OPCODE;
break;
}
cleanup:
/* Delete return object on error */
if ((ACPI_FAILURE(status)) || walk_state->result_obj) {
acpi_ut_remove_reference(return_desc);
walk_state->result_obj = NULL;
} else {
/* Save the return value */
walk_state->result_obj = return_desc;
}
return_ACPI_STATUS(status);
}
/*******************************************************************************
*
* FUNCTION: acpi_ex_opcode_1A_0T_0R
*
* PARAMETERS: walk_state - Current state (contains AML opcode)
*
* RETURN: Status
*
* DESCRIPTION: Execute Type 1 monadic operator with numeric operand on
* object stack
*
******************************************************************************/
acpi_status acpi_ex_opcode_1A_0T_0R(struct acpi_walk_state *walk_state)
{
union acpi_operand_object **operand = &walk_state->operands[0];
acpi_status status = AE_OK;
ACPI_FUNCTION_TRACE_STR(ex_opcode_1A_0T_0R,
acpi_ps_get_opcode_name(walk_state->opcode));
/* Examine the AML opcode */
switch (walk_state->opcode) {
case AML_RELEASE_OP: /* Release (mutex_object) */
status = acpi_ex_release_mutex(operand[0], walk_state);
break;
case AML_RESET_OP: /* Reset (event_object) */
status = acpi_ex_system_reset_event(operand[0]);
break;
case AML_SIGNAL_OP: /* Signal (event_object) */
status = acpi_ex_system_signal_event(operand[0]);
break;
case AML_SLEEP_OP: /* Sleep (msec_time) */
status = acpi_ex_system_do_sleep(operand[0]->integer.value);
break;
case AML_STALL_OP: /* Stall (usec_time) */
status =
acpi_ex_system_do_stall((u32) operand[0]->integer.value);
break;
case AML_UNLOAD_OP: /* Unload (Handle) */
status = acpi_ex_unload_table(operand[0]);
break;
default: /* Unknown opcode */
ACPI_ERROR((AE_INFO, "Unknown AML opcode 0x%X",
walk_state->opcode));
status = AE_AML_BAD_OPCODE;
break;
}
return_ACPI_STATUS(status);
}
/*******************************************************************************
*
* FUNCTION: acpi_ex_opcode_1A_1T_0R
*
* PARAMETERS: walk_state - Current state (contains AML opcode)
*
* RETURN: Status
*
* DESCRIPTION: Execute opcode with one argument, one target, and no
* return value.
*
******************************************************************************/
acpi_status acpi_ex_opcode_1A_1T_0R(struct acpi_walk_state *walk_state)
{
acpi_status status = AE_OK;
union acpi_operand_object **operand = &walk_state->operands[0];
ACPI_FUNCTION_TRACE_STR(ex_opcode_1A_1T_0R,
acpi_ps_get_opcode_name(walk_state->opcode));
/* Examine the AML opcode */
switch (walk_state->opcode) {
case AML_LOAD_OP:
status = acpi_ex_load_op(operand[0], operand[1], walk_state);
break;
default: /* Unknown opcode */
ACPI_ERROR((AE_INFO, "Unknown AML opcode 0x%X",
walk_state->opcode));
status = AE_AML_BAD_OPCODE;
goto cleanup;
}
cleanup:
return_ACPI_STATUS(status);
}
/*******************************************************************************
*
* FUNCTION: acpi_ex_opcode_1A_1T_1R
*
* PARAMETERS: walk_state - Current state (contains AML opcode)
*
* RETURN: Status
*
* DESCRIPTION: Execute opcode with one argument, one target, and a
* return value.
*
******************************************************************************/
acpi_status acpi_ex_opcode_1A_1T_1R(struct acpi_walk_state *walk_state)
{
acpi_status status = AE_OK;
union acpi_operand_object **operand = &walk_state->operands[0];
union acpi_operand_object *return_desc = NULL;
union acpi_operand_object *return_desc2 = NULL;
u32 temp32;
u32 i;
u64 power_of_ten;
u64 digit;
ACPI_FUNCTION_TRACE_STR(ex_opcode_1A_1T_1R,
acpi_ps_get_opcode_name(walk_state->opcode));
/* Examine the AML opcode */
switch (walk_state->opcode) {
case AML_BIT_NOT_OP:
case AML_FIND_SET_LEFT_BIT_OP:
case AML_FIND_SET_RIGHT_BIT_OP:
case AML_FROM_BCD_OP:
case AML_TO_BCD_OP:
case AML_COND_REF_OF_OP:
/* Create a return object of type Integer for these opcodes */
return_desc = acpi_ut_create_internal_object(ACPI_TYPE_INTEGER);
if (!return_desc) {
status = AE_NO_MEMORY;
goto cleanup;
}
switch (walk_state->opcode) {
case AML_BIT_NOT_OP: /* Not (Operand, Result) */
return_desc->integer.value = ~operand[0]->integer.value;
break;
case AML_FIND_SET_LEFT_BIT_OP: /* find_set_left_bit (Operand, Result) */
return_desc->integer.value = operand[0]->integer.value;
/*
* Acpi specification describes Integer type as a little
* endian unsigned value, so this boundary condition is valid.
*/
for (temp32 = 0; return_desc->integer.value &&
temp32 < ACPI_INTEGER_BIT_SIZE; ++temp32) {
return_desc->integer.value >>= 1;
}
return_desc->integer.value = temp32;
break;
case AML_FIND_SET_RIGHT_BIT_OP: /* find_set_right_bit (Operand, Result) */
return_desc->integer.value = operand[0]->integer.value;
/*
* The Acpi specification describes Integer type as a little
* endian unsigned value, so this boundary condition is valid.
*/
for (temp32 = 0; return_desc->integer.value &&
temp32 < ACPI_INTEGER_BIT_SIZE; ++temp32) {
return_desc->integer.value <<= 1;
}
/* Since the bit position is one-based, subtract from 33 (65) */
return_desc->integer.value =
temp32 ==
0 ? 0 : (ACPI_INTEGER_BIT_SIZE + 1) - temp32;
break;
case AML_FROM_BCD_OP: /* from_bcd (BCDValue, Result) */
/*
* The 64-bit ACPI integer can hold 16 4-bit BCD characters
* (if table is 32-bit, integer can hold 8 BCD characters)
* Convert each 4-bit BCD value
*/
power_of_ten = 1;
return_desc->integer.value = 0;
digit = operand[0]->integer.value;
/* Convert each BCD digit (each is one nybble wide) */
for (i = 0;
(i < acpi_gbl_integer_nybble_width) && (digit > 0);
i++) {
/* Get the least significant 4-bit BCD digit */
temp32 = ((u32) digit) & 0xF;
/* Check the range of the digit */
if (temp32 > 9) {
ACPI_ERROR((AE_INFO,
"BCD digit too large (not decimal): 0x%X",
temp32));
status = AE_AML_NUMERIC_OVERFLOW;
goto cleanup;
}
/* Sum the digit into the result with the current power of 10 */
return_desc->integer.value +=
(((u64) temp32) * power_of_ten);
/* Shift to next BCD digit */
digit >>= 4;
/* Next power of 10 */
power_of_ten *= 10;
}
break;
case AML_TO_BCD_OP: /* to_bcd (Operand, Result) */
return_desc->integer.value = 0;
digit = operand[0]->integer.value;
/* Each BCD digit is one nybble wide */
for (i = 0;
(i < acpi_gbl_integer_nybble_width) && (digit > 0);
i++) {
(void)acpi_ut_short_divide(digit, 10, &digit,
&temp32);
/*
* Insert the BCD digit that resides in the
* remainder from above
*/
return_desc->integer.value |=
(((u64) temp32) << ACPI_MUL_4(i));
}
/* Overflow if there is any data left in Digit */
if (digit > 0) {
ACPI_ERROR((AE_INFO,
"Integer too large to convert to BCD: 0x%8.8X%8.8X",
ACPI_FORMAT_UINT64(operand[0]->
integer.value)));
status = AE_AML_NUMERIC_OVERFLOW;
goto cleanup;
}
break;
case AML_COND_REF_OF_OP: /* cond_ref_of (source_object, Result) */
/*
* This op is a little strange because the internal return value is
* different than the return value stored in the result descriptor
* (There are really two return values)
*/
if ((struct acpi_namespace_node *)operand[0] ==
acpi_gbl_root_node) {
/*
* This means that the object does not exist in the namespace,
* return FALSE
*/
return_desc->integer.value = 0;
goto cleanup;
}
/* Get the object reference, store it, and remove our reference */
status = acpi_ex_get_object_reference(operand[0],
&return_desc2,
walk_state);
if (ACPI_FAILURE(status)) {
goto cleanup;
}
status =
acpi_ex_store(return_desc2, operand[1], walk_state);
acpi_ut_remove_reference(return_desc2);
/* The object exists in the namespace, return TRUE */
return_desc->integer.value = ACPI_UINT64_MAX;
goto cleanup;
default:
/* No other opcodes get here */
break;
}
break;
case AML_STORE_OP: /* Store (Source, Target) */
/*
* A store operand is typically a number, string, buffer or lvalue
* Be careful about deleting the source object,
* since the object itself may have been stored.
*/
status = acpi_ex_store(operand[0], operand[1], walk_state);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
/* It is possible that the Store already produced a return object */
if (!walk_state->result_obj) {
/*
* Normally, we would remove a reference on the Operand[0]
* parameter; But since it is being used as the internal return
* object (meaning we would normally increment it), the two
* cancel out, and we simply don't do anything.
*/
walk_state->result_obj = operand[0];
walk_state->operands[0] = NULL; /* Prevent deletion */
}
return_ACPI_STATUS(status);
/*
* ACPI 2.0 Opcodes
*/
case AML_COPY_OP: /* Copy (Source, Target) */
status =
acpi_ut_copy_iobject_to_iobject(operand[0], &return_desc,
walk_state);
break;
case AML_TO_DECSTRING_OP: /* to_decimal_string (Data, Result) */
status = acpi_ex_convert_to_string(operand[0], &return_desc,
ACPI_EXPLICIT_CONVERT_DECIMAL);
if (return_desc == operand[0]) {
/* No conversion performed, add ref to handle return value */
acpi_ut_add_reference(return_desc);
}
break;
case AML_TO_HEXSTRING_OP: /* to_hex_string (Data, Result) */
status = acpi_ex_convert_to_string(operand[0], &return_desc,
ACPI_EXPLICIT_CONVERT_HEX);
if (return_desc == operand[0]) {
/* No conversion performed, add ref to handle return value */
acpi_ut_add_reference(return_desc);
}
break;
case AML_TO_BUFFER_OP: /* to_buffer (Data, Result) */
status = acpi_ex_convert_to_buffer(operand[0], &return_desc);
if (return_desc == operand[0]) {
/* No conversion performed, add ref to handle return value */
acpi_ut_add_reference(return_desc);
}
break;
case AML_TO_INTEGER_OP: /* to_integer (Data, Result) */
status = acpi_ex_convert_to_integer(operand[0], &return_desc,
ACPI_ANY_BASE);
if (return_desc == operand[0]) {
/* No conversion performed, add ref to handle return value */
acpi_ut_add_reference(return_desc);
}
break;
case AML_SHIFT_LEFT_BIT_OP: /* shift_left_bit (Source, bit_num) */
case AML_SHIFT_RIGHT_BIT_OP: /* shift_right_bit (Source, bit_num) */
/* These are two obsolete opcodes */
ACPI_ERROR((AE_INFO,
"%s is obsolete and not implemented",
acpi_ps_get_opcode_name(walk_state->opcode)));
status = AE_SUPPORT;
goto cleanup;
default: /* Unknown opcode */
ACPI_ERROR((AE_INFO, "Unknown AML opcode 0x%X",
walk_state->opcode));
status = AE_AML_BAD_OPCODE;
goto cleanup;
}
if (ACPI_SUCCESS(status)) {
/* Store the return value computed above into the target object */
status = acpi_ex_store(return_desc, operand[1], walk_state);
}
cleanup:
/* Delete return object on error */
if (ACPI_FAILURE(status)) {
acpi_ut_remove_reference(return_desc);
}
/* Save return object on success */
else if (!walk_state->result_obj) {
walk_state->result_obj = return_desc;
}
return_ACPI_STATUS(status);
}
/*******************************************************************************
*
* FUNCTION: acpi_ex_opcode_1A_0T_1R
*
* PARAMETERS: walk_state - Current state (contains AML opcode)
*
* RETURN: Status
*
* DESCRIPTION: Execute opcode with one argument, no target, and a return value
*
******************************************************************************/
acpi_status acpi_ex_opcode_1A_0T_1R(struct acpi_walk_state *walk_state)
{
union acpi_operand_object **operand = &walk_state->operands[0];
union acpi_operand_object *temp_desc;
union acpi_operand_object *return_desc = NULL;
acpi_status status = AE_OK;
u32 type;
u64 value;
ACPI_FUNCTION_TRACE_STR(ex_opcode_1A_0T_1R,
acpi_ps_get_opcode_name(walk_state->opcode));
/* Examine the AML opcode */
switch (walk_state->opcode) {
case AML_LNOT_OP: /* LNot (Operand) */
return_desc = acpi_ut_create_integer_object((u64) 0);
if (!return_desc) {
status = AE_NO_MEMORY;
goto cleanup;
}
/*
* Set result to ONES (TRUE) if Value == 0. Note:
* return_desc->Integer.Value is initially == 0 (FALSE) from above.
*/
if (!operand[0]->integer.value) {
return_desc->integer.value = ACPI_UINT64_MAX;
}
break;
case AML_DECREMENT_OP: /* Decrement (Operand) */
case AML_INCREMENT_OP: /* Increment (Operand) */
/*
* Create a new integer. Can't just get the base integer and
* increment it because it may be an Arg or Field.
*/
return_desc = acpi_ut_create_internal_object(ACPI_TYPE_INTEGER);
if (!return_desc) {
status = AE_NO_MEMORY;
goto cleanup;
}
/*
* Since we are expecting a Reference operand, it can be either a
* NS Node or an internal object.
*/
temp_desc = operand[0];
if (ACPI_GET_DESCRIPTOR_TYPE(temp_desc) ==
ACPI_DESC_TYPE_OPERAND) {
/* Internal reference object - prevent deletion */
acpi_ut_add_reference(temp_desc);
}
/*
* Convert the Reference operand to an Integer (This removes a
* reference on the Operand[0] object)
*
* NOTE: We use LNOT_OP here in order to force resolution of the
* reference operand to an actual integer.
*/
status =
acpi_ex_resolve_operands(AML_LNOT_OP, &temp_desc,
walk_state);
if (ACPI_FAILURE(status)) {
ACPI_EXCEPTION((AE_INFO, status,
"While resolving operands for [%s]",
acpi_ps_get_opcode_name(walk_state->
opcode)));
goto cleanup;
}
/*
* temp_desc is now guaranteed to be an Integer object --
* Perform the actual increment or decrement
*/
if (walk_state->opcode == AML_INCREMENT_OP) {
return_desc->integer.value =
temp_desc->integer.value + 1;
} else {
return_desc->integer.value =
temp_desc->integer.value - 1;
}
/* Finished with this Integer object */
acpi_ut_remove_reference(temp_desc);
/*
* Store the result back (indirectly) through the original
* Reference object
*/
status = acpi_ex_store(return_desc, operand[0], walk_state);
break;
case AML_TYPE_OP: /* object_type (source_object) */
/*
* Note: The operand is not resolved at this point because we want to
* get the associated object, not its value. For example, we don't
* want to resolve a field_unit to its value, we want the actual
* field_unit object.
*/
/* Get the type of the base object */
status =
acpi_ex_resolve_multiple(walk_state, operand[0], &type,
NULL);
if (ACPI_FAILURE(status)) {
goto cleanup;
}
/* Allocate a descriptor to hold the type. */
return_desc = acpi_ut_create_integer_object((u64) type);
if (!return_desc) {
status = AE_NO_MEMORY;
goto cleanup;
}
break;
case AML_SIZE_OF_OP: /* size_of (source_object) */
/*
* Note: The operand is not resolved at this point because we want to
* get the associated object, not its value.
*/
/* Get the base object */
status = acpi_ex_resolve_multiple(walk_state,
operand[0], &type,
&temp_desc);
if (ACPI_FAILURE(status)) {
goto cleanup;
}
/*
* The type of the base object must be integer, buffer, string, or
* package. All others are not supported.
*
* NOTE: Integer is not specifically supported by the ACPI spec,
* but is supported implicitly via implicit operand conversion.
* rather than bother with conversion, we just use the byte width
* global (4 or 8 bytes).
*/
switch (type) {
case ACPI_TYPE_INTEGER:
value = acpi_gbl_integer_byte_width;
break;
case ACPI_TYPE_STRING:
value = temp_desc->string.length;
break;
case ACPI_TYPE_BUFFER:
/* Buffer arguments may not be evaluated at this point */
status = acpi_ds_get_buffer_arguments(temp_desc);
value = temp_desc->buffer.length;
break;
case ACPI_TYPE_PACKAGE:
/* Package arguments may not be evaluated at this point */
status = acpi_ds_get_package_arguments(temp_desc);
value = temp_desc->package.count;
break;
default:
ACPI_ERROR((AE_INFO,
"Operand must be Buffer/Integer/String/Package - found type %s",
acpi_ut_get_type_name(type)));
status = AE_AML_OPERAND_TYPE;
goto cleanup;
}
if (ACPI_FAILURE(status)) {
goto cleanup;
}
/*
* Now that we have the size of the object, create a result
* object to hold the value
*/
return_desc = acpi_ut_create_integer_object(value);
if (!return_desc) {
status = AE_NO_MEMORY;
goto cleanup;
}
break;
case AML_REF_OF_OP: /* ref_of (source_object) */
status =
acpi_ex_get_object_reference(operand[0], &return_desc,
walk_state);
if (ACPI_FAILURE(status)) {
goto cleanup;
}
break;
case AML_DEREF_OF_OP: /* deref_of (obj_reference | String) */
/* Check for a method local or argument, or standalone String */
if (ACPI_GET_DESCRIPTOR_TYPE(operand[0]) ==
ACPI_DESC_TYPE_NAMED) {
temp_desc =
acpi_ns_get_attached_object((struct
acpi_namespace_node *)
operand[0]);
if (temp_desc
&& ((temp_desc->common.type == ACPI_TYPE_STRING)
|| (temp_desc->common.type ==
ACPI_TYPE_LOCAL_REFERENCE))) {
operand[0] = temp_desc;
acpi_ut_add_reference(temp_desc);
} else {
status = AE_AML_OPERAND_TYPE;
goto cleanup;
}
} else {
switch ((operand[0])->common.type) {
case ACPI_TYPE_LOCAL_REFERENCE:
/*
* This is a deref_of (local_x | arg_x)
*
* Must resolve/dereference the local/arg reference first
*/
switch (operand[0]->reference.class) {
case ACPI_REFCLASS_LOCAL:
case ACPI_REFCLASS_ARG:
/* Set Operand[0] to the value of the local/arg */
status =
acpi_ds_method_data_get_value
(operand[0]->reference.class,
operand[0]->reference.value,
walk_state, &temp_desc);
if (ACPI_FAILURE(status)) {
goto cleanup;
}
/*
* Delete our reference to the input object and
* point to the object just retrieved
*/
acpi_ut_remove_reference(operand[0]);
operand[0] = temp_desc;
break;
case ACPI_REFCLASS_REFOF:
/* Get the object to which the reference refers */
temp_desc =
operand[0]->reference.object;
acpi_ut_remove_reference(operand[0]);
operand[0] = temp_desc;
break;
default:
/* Must be an Index op - handled below */
break;
}
break;
case ACPI_TYPE_STRING:
break;
default:
status = AE_AML_OPERAND_TYPE;
goto cleanup;
}
}
if (ACPI_GET_DESCRIPTOR_TYPE(operand[0]) !=
ACPI_DESC_TYPE_NAMED) {
if ((operand[0])->common.type == ACPI_TYPE_STRING) {
/*
* This is a deref_of (String). The string is a reference
* to a named ACPI object.
*
* 1) Find the owning Node
* 2) Dereference the node to an actual object. Could be a
* Field, so we need to resolve the node to a value.
*/
status =
acpi_ns_get_node(walk_state->scope_info->
scope.node,
operand[0]->string.pointer,
ACPI_NS_SEARCH_PARENT,
ACPI_CAST_INDIRECT_PTR
(struct
acpi_namespace_node,
&return_desc));
if (ACPI_FAILURE(status)) {
goto cleanup;
}
status =
acpi_ex_resolve_node_to_value
(ACPI_CAST_INDIRECT_PTR
(struct acpi_namespace_node, &return_desc),
walk_state);
goto cleanup;
}
}
/* Operand[0] may have changed from the code above */
if (ACPI_GET_DESCRIPTOR_TYPE(operand[0]) ==
ACPI_DESC_TYPE_NAMED) {
/*
* This is a deref_of (object_reference)
* Get the actual object from the Node (This is the dereference).
* This case may only happen when a local_x or arg_x is
* dereferenced above.
*/
return_desc = acpi_ns_get_attached_object((struct
acpi_namespace_node
*)
operand[0]);
acpi_ut_add_reference(return_desc);
} else {
/*
* This must be a reference object produced by either the
* Index() or ref_of() operator
*/
switch (operand[0]->reference.class) {
case ACPI_REFCLASS_INDEX:
/*
* The target type for the Index operator must be
* either a Buffer or a Package
*/
switch (operand[0]->reference.target_type) {
case ACPI_TYPE_BUFFER_FIELD:
temp_desc =
operand[0]->reference.object;
/*
* Create a new object that contains one element of the
* buffer -- the element pointed to by the index.
*
* NOTE: index into a buffer is NOT a pointer to a
* sub-buffer of the main buffer, it is only a pointer to a
* single element (byte) of the buffer!
*
* Since we are returning the value of the buffer at the
* indexed location, we don't need to add an additional
* reference to the buffer itself.
*/
return_desc =
acpi_ut_create_integer_object((u64)
temp_desc->
buffer.
pointer
[operand
[0]->
reference.
value]);
if (!return_desc) {
status = AE_NO_MEMORY;
goto cleanup;
}
break;
case ACPI_TYPE_PACKAGE:
/*
* Return the referenced element of the package. We must
* add another reference to the referenced object, however.
*/
return_desc =
*(operand[0]->reference.where);
if (return_desc) {
acpi_ut_add_reference
(return_desc);
}
break;
default:
ACPI_ERROR((AE_INFO,
"Unknown Index TargetType 0x%X in reference object %p",
operand[0]->reference.
target_type, operand[0]));
status = AE_AML_OPERAND_TYPE;
goto cleanup;
}
break;
case ACPI_REFCLASS_REFOF:
return_desc = operand[0]->reference.object;
if (ACPI_GET_DESCRIPTOR_TYPE(return_desc) ==
ACPI_DESC_TYPE_NAMED) {
return_desc =
acpi_ns_get_attached_object((struct
acpi_namespace_node
*)
return_desc);
}
/* Add another reference to the object! */
acpi_ut_add_reference(return_desc);
break;
default:
ACPI_ERROR((AE_INFO,
"Unknown class in reference(%p) - 0x%2.2X",
operand[0],
operand[0]->reference.class));
status = AE_TYPE;
goto cleanup;
}
}
break;
default:
ACPI_ERROR((AE_INFO, "Unknown AML opcode 0x%X",
walk_state->opcode));
status = AE_AML_BAD_OPCODE;
goto cleanup;
}
cleanup:
/* Delete return object on error */
if (ACPI_FAILURE(status)) {
acpi_ut_remove_reference(return_desc);
}
/* Save return object on success */
else {
walk_state->result_obj = return_desc;
}
return_ACPI_STATUS(status);
}
| gpl-2.0 |
nhondong/android_kernel_samsung_v2wifixx | drivers/usb/serial/oti6858.c | 3993 | 27227 | /*
* Ours Technology Inc. OTi-6858 USB to serial adapter driver.
*
* Copyleft (C) 2007 Kees Lemmens (adapted for kernel 2.6.20)
* Copyright (C) 2006 Tomasz Michal Lukaszewski (FIXME: add e-mail)
* Copyright (C) 2001-2004 Greg Kroah-Hartman (greg@kroah.com)
* Copyright (C) 2003 IBM Corp.
*
* Many thanks to the authors of pl2303 driver: all functions in this file
* are heavily based on pl2303 code, buffering code is a 1-to-1 copy.
*
* Warning! You use this driver on your own risk! The only official
* description of this device I have is datasheet from manufacturer,
* and it doesn't contain almost any information needed to write a driver.
* Almost all knowlegde used while writing this driver was gathered by:
* - analyzing traffic between device and the M$ Windows 2000 driver,
* - trying different bit combinations and checking pin states
* with a voltmeter,
* - receiving malformed frames and producing buffer overflows
* to learn how errors are reported,
* So, THIS CODE CAN DESTROY OTi-6858 AND ANY OTHER DEVICES, THAT ARE
* CONNECTED TO IT!
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License.
*
* See Documentation/usb/usb-serial.txt for more information on using this
* driver
*
* TODO:
* - implement correct flushing for ioctls and oti6858_close()
* - check how errors (rx overflow, parity error, framing error) are reported
* - implement oti6858_break_ctl()
* - implement more ioctls
* - test/implement flow control
* - allow setting custom baud rates
*/
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/tty.h>
#include <linux/tty_driver.h>
#include <linux/tty_flip.h>
#include <linux/serial.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/spinlock.h>
#include <linux/usb.h>
#include <linux/usb/serial.h>
#include <linux/uaccess.h>
#include <linux/kfifo.h>
#include "oti6858.h"
#define OTI6858_DESCRIPTION \
"Ours Technology Inc. OTi-6858 USB to serial adapter driver"
#define OTI6858_AUTHOR "Tomasz Michal Lukaszewski <FIXME@FIXME>"
#define OTI6858_VERSION "0.2"
static const struct usb_device_id id_table[] = {
{ USB_DEVICE(OTI6858_VENDOR_ID, OTI6858_PRODUCT_ID) },
{ }
};
MODULE_DEVICE_TABLE(usb, id_table);
static struct usb_driver oti6858_driver = {
.name = "oti6858",
.probe = usb_serial_probe,
.disconnect = usb_serial_disconnect,
.id_table = id_table,
};
static bool debug;
/* requests */
#define OTI6858_REQ_GET_STATUS (USB_DIR_IN | USB_TYPE_VENDOR | 0x00)
#define OTI6858_REQ_T_GET_STATUS 0x01
#define OTI6858_REQ_SET_LINE (USB_DIR_OUT | USB_TYPE_VENDOR | 0x00)
#define OTI6858_REQ_T_SET_LINE 0x00
#define OTI6858_REQ_CHECK_TXBUFF (USB_DIR_IN | USB_TYPE_VENDOR | 0x01)
#define OTI6858_REQ_T_CHECK_TXBUFF 0x00
/* format of the control packet */
struct oti6858_control_pkt {
__le16 divisor; /* baud rate = 96000000 / (16 * divisor), LE */
#define OTI6858_MAX_BAUD_RATE 3000000
u8 frame_fmt;
#define FMT_STOP_BITS_MASK 0xc0
#define FMT_STOP_BITS_1 0x00
#define FMT_STOP_BITS_2 0x40 /* 1.5 stop bits if FMT_DATA_BITS_5 */
#define FMT_PARITY_MASK 0x38
#define FMT_PARITY_NONE 0x00
#define FMT_PARITY_ODD 0x08
#define FMT_PARITY_EVEN 0x18
#define FMT_PARITY_MARK 0x28
#define FMT_PARITY_SPACE 0x38
#define FMT_DATA_BITS_MASK 0x03
#define FMT_DATA_BITS_5 0x00
#define FMT_DATA_BITS_6 0x01
#define FMT_DATA_BITS_7 0x02
#define FMT_DATA_BITS_8 0x03
u8 something; /* always equals 0x43 */
u8 control; /* settings of flow control lines */
#define CONTROL_MASK 0x0c
#define CONTROL_DTR_HIGH 0x08
#define CONTROL_RTS_HIGH 0x04
u8 tx_status;
#define TX_BUFFER_EMPTIED 0x09
u8 pin_state;
#define PIN_MASK 0x3f
#define PIN_RTS 0x20 /* output pin */
#define PIN_CTS 0x10 /* input pin, active low */
#define PIN_DSR 0x08 /* input pin, active low */
#define PIN_DTR 0x04 /* output pin */
#define PIN_RI 0x02 /* input pin, active low */
#define PIN_DCD 0x01 /* input pin, active low */
u8 rx_bytes_avail; /* number of bytes in rx buffer */;
};
#define OTI6858_CTRL_PKT_SIZE sizeof(struct oti6858_control_pkt)
#define OTI6858_CTRL_EQUALS_PENDING(a, priv) \
(((a)->divisor == (priv)->pending_setup.divisor) \
&& ((a)->control == (priv)->pending_setup.control) \
&& ((a)->frame_fmt == (priv)->pending_setup.frame_fmt))
/* function prototypes */
static int oti6858_open(struct tty_struct *tty, struct usb_serial_port *port);
static void oti6858_close(struct usb_serial_port *port);
static void oti6858_set_termios(struct tty_struct *tty,
struct usb_serial_port *port, struct ktermios *old);
static void oti6858_init_termios(struct tty_struct *tty);
static int oti6858_ioctl(struct tty_struct *tty,
unsigned int cmd, unsigned long arg);
static void oti6858_read_int_callback(struct urb *urb);
static void oti6858_read_bulk_callback(struct urb *urb);
static void oti6858_write_bulk_callback(struct urb *urb);
static int oti6858_write(struct tty_struct *tty, struct usb_serial_port *port,
const unsigned char *buf, int count);
static int oti6858_write_room(struct tty_struct *tty);
static int oti6858_chars_in_buffer(struct tty_struct *tty);
static int oti6858_tiocmget(struct tty_struct *tty);
static int oti6858_tiocmset(struct tty_struct *tty,
unsigned int set, unsigned int clear);
static int oti6858_startup(struct usb_serial *serial);
static void oti6858_release(struct usb_serial *serial);
/* device info */
static struct usb_serial_driver oti6858_device = {
.driver = {
.owner = THIS_MODULE,
.name = "oti6858",
},
.id_table = id_table,
.num_ports = 1,
.open = oti6858_open,
.close = oti6858_close,
.write = oti6858_write,
.ioctl = oti6858_ioctl,
.set_termios = oti6858_set_termios,
.init_termios = oti6858_init_termios,
.tiocmget = oti6858_tiocmget,
.tiocmset = oti6858_tiocmset,
.read_bulk_callback = oti6858_read_bulk_callback,
.read_int_callback = oti6858_read_int_callback,
.write_bulk_callback = oti6858_write_bulk_callback,
.write_room = oti6858_write_room,
.chars_in_buffer = oti6858_chars_in_buffer,
.attach = oti6858_startup,
.release = oti6858_release,
};
static struct usb_serial_driver * const serial_drivers[] = {
&oti6858_device, NULL
};
struct oti6858_private {
spinlock_t lock;
struct oti6858_control_pkt status;
struct {
u8 read_urb_in_use;
u8 write_urb_in_use;
} flags;
struct delayed_work delayed_write_work;
struct {
__le16 divisor;
u8 frame_fmt;
u8 control;
} pending_setup;
u8 transient;
u8 setup_done;
struct delayed_work delayed_setup_work;
wait_queue_head_t intr_wait;
struct usb_serial_port *port; /* USB port with which associated */
};
static void setup_line(struct work_struct *work)
{
struct oti6858_private *priv = container_of(work,
struct oti6858_private, delayed_setup_work.work);
struct usb_serial_port *port = priv->port;
struct oti6858_control_pkt *new_setup;
unsigned long flags;
int result;
dbg("%s(port = %d)", __func__, port->number);
new_setup = kmalloc(OTI6858_CTRL_PKT_SIZE, GFP_KERNEL);
if (new_setup == NULL) {
dev_err(&port->dev, "%s(): out of memory!\n", __func__);
/* we will try again */
schedule_delayed_work(&priv->delayed_setup_work,
msecs_to_jiffies(2));
return;
}
result = usb_control_msg(port->serial->dev,
usb_rcvctrlpipe(port->serial->dev, 0),
OTI6858_REQ_T_GET_STATUS,
OTI6858_REQ_GET_STATUS,
0, 0,
new_setup, OTI6858_CTRL_PKT_SIZE,
100);
if (result != OTI6858_CTRL_PKT_SIZE) {
dev_err(&port->dev, "%s(): error reading status\n", __func__);
kfree(new_setup);
/* we will try again */
schedule_delayed_work(&priv->delayed_setup_work,
msecs_to_jiffies(2));
return;
}
spin_lock_irqsave(&priv->lock, flags);
if (!OTI6858_CTRL_EQUALS_PENDING(new_setup, priv)) {
new_setup->divisor = priv->pending_setup.divisor;
new_setup->control = priv->pending_setup.control;
new_setup->frame_fmt = priv->pending_setup.frame_fmt;
spin_unlock_irqrestore(&priv->lock, flags);
result = usb_control_msg(port->serial->dev,
usb_sndctrlpipe(port->serial->dev, 0),
OTI6858_REQ_T_SET_LINE,
OTI6858_REQ_SET_LINE,
0, 0,
new_setup, OTI6858_CTRL_PKT_SIZE,
100);
} else {
spin_unlock_irqrestore(&priv->lock, flags);
result = 0;
}
kfree(new_setup);
spin_lock_irqsave(&priv->lock, flags);
if (result != OTI6858_CTRL_PKT_SIZE)
priv->transient = 0;
priv->setup_done = 1;
spin_unlock_irqrestore(&priv->lock, flags);
dbg("%s(): submitting interrupt urb", __func__);
result = usb_submit_urb(port->interrupt_in_urb, GFP_KERNEL);
if (result != 0) {
dev_err(&port->dev, "%s(): usb_submit_urb() failed"
" with error %d\n", __func__, result);
}
}
static void send_data(struct work_struct *work)
{
struct oti6858_private *priv = container_of(work,
struct oti6858_private, delayed_write_work.work);
struct usb_serial_port *port = priv->port;
int count = 0, result;
unsigned long flags;
u8 *allow;
dbg("%s(port = %d)", __func__, port->number);
spin_lock_irqsave(&priv->lock, flags);
if (priv->flags.write_urb_in_use) {
spin_unlock_irqrestore(&priv->lock, flags);
schedule_delayed_work(&priv->delayed_write_work,
msecs_to_jiffies(2));
return;
}
priv->flags.write_urb_in_use = 1;
spin_unlock_irqrestore(&priv->lock, flags);
spin_lock_irqsave(&port->lock, flags);
count = kfifo_len(&port->write_fifo);
spin_unlock_irqrestore(&port->lock, flags);
if (count > port->bulk_out_size)
count = port->bulk_out_size;
if (count != 0) {
allow = kmalloc(1, GFP_KERNEL);
if (!allow) {
dev_err_console(port, "%s(): kmalloc failed\n",
__func__);
return;
}
result = usb_control_msg(port->serial->dev,
usb_rcvctrlpipe(port->serial->dev, 0),
OTI6858_REQ_T_CHECK_TXBUFF,
OTI6858_REQ_CHECK_TXBUFF,
count, 0, allow, 1, 100);
if (result != 1 || *allow != 0)
count = 0;
kfree(allow);
}
if (count == 0) {
priv->flags.write_urb_in_use = 0;
dbg("%s(): submitting interrupt urb", __func__);
result = usb_submit_urb(port->interrupt_in_urb, GFP_NOIO);
if (result != 0) {
dev_err(&port->dev, "%s(): usb_submit_urb() failed"
" with error %d\n", __func__, result);
}
return;
}
count = kfifo_out_locked(&port->write_fifo,
port->write_urb->transfer_buffer,
count, &port->lock);
port->write_urb->transfer_buffer_length = count;
result = usb_submit_urb(port->write_urb, GFP_NOIO);
if (result != 0) {
dev_err_console(port, "%s(): usb_submit_urb() failed"
" with error %d\n", __func__, result);
priv->flags.write_urb_in_use = 0;
}
usb_serial_port_softint(port);
}
static int oti6858_startup(struct usb_serial *serial)
{
struct usb_serial_port *port = serial->port[0];
struct oti6858_private *priv;
int i;
for (i = 0; i < serial->num_ports; ++i) {
priv = kzalloc(sizeof(struct oti6858_private), GFP_KERNEL);
if (!priv)
break;
spin_lock_init(&priv->lock);
init_waitqueue_head(&priv->intr_wait);
/* INIT_WORK(&priv->setup_work, setup_line, serial->port[i]); */
/* INIT_WORK(&priv->write_work, send_data, serial->port[i]); */
priv->port = port;
INIT_DELAYED_WORK(&priv->delayed_setup_work, setup_line);
INIT_DELAYED_WORK(&priv->delayed_write_work, send_data);
usb_set_serial_port_data(serial->port[i], priv);
}
if (i == serial->num_ports)
return 0;
for (--i; i >= 0; --i) {
priv = usb_get_serial_port_data(serial->port[i]);
kfree(priv);
usb_set_serial_port_data(serial->port[i], NULL);
}
return -ENOMEM;
}
static int oti6858_write(struct tty_struct *tty, struct usb_serial_port *port,
const unsigned char *buf, int count)
{
dbg("%s(port = %d, count = %d)", __func__, port->number, count);
if (!count)
return count;
count = kfifo_in_locked(&port->write_fifo, buf, count, &port->lock);
return count;
}
static int oti6858_write_room(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
int room = 0;
unsigned long flags;
dbg("%s(port = %d)", __func__, port->number);
spin_lock_irqsave(&port->lock, flags);
room = kfifo_avail(&port->write_fifo);
spin_unlock_irqrestore(&port->lock, flags);
return room;
}
static int oti6858_chars_in_buffer(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
int chars = 0;
unsigned long flags;
dbg("%s(port = %d)", __func__, port->number);
spin_lock_irqsave(&port->lock, flags);
chars = kfifo_len(&port->write_fifo);
spin_unlock_irqrestore(&port->lock, flags);
return chars;
}
static void oti6858_init_termios(struct tty_struct *tty)
{
*(tty->termios) = tty_std_termios;
tty->termios->c_cflag = B38400 | CS8 | CREAD | HUPCL | CLOCAL;
tty->termios->c_ispeed = 38400;
tty->termios->c_ospeed = 38400;
}
static void oti6858_set_termios(struct tty_struct *tty,
struct usb_serial_port *port, struct ktermios *old_termios)
{
struct oti6858_private *priv = usb_get_serial_port_data(port);
unsigned long flags;
unsigned int cflag;
u8 frame_fmt, control;
__le16 divisor;
int br;
dbg("%s(port = %d)", __func__, port->number);
if (!tty) {
dbg("%s(): no tty structures", __func__);
return;
}
cflag = tty->termios->c_cflag;
spin_lock_irqsave(&priv->lock, flags);
divisor = priv->pending_setup.divisor;
frame_fmt = priv->pending_setup.frame_fmt;
control = priv->pending_setup.control;
spin_unlock_irqrestore(&priv->lock, flags);
frame_fmt &= ~FMT_DATA_BITS_MASK;
switch (cflag & CSIZE) {
case CS5:
frame_fmt |= FMT_DATA_BITS_5;
break;
case CS6:
frame_fmt |= FMT_DATA_BITS_6;
break;
case CS7:
frame_fmt |= FMT_DATA_BITS_7;
break;
default:
case CS8:
frame_fmt |= FMT_DATA_BITS_8;
break;
}
/* manufacturer claims that this device can work with baud rates
* up to 3 Mbps; I've tested it only on 115200 bps, so I can't
* guarantee that any other baud rate will work (especially
* the higher ones)
*/
br = tty_get_baud_rate(tty);
if (br == 0) {
divisor = 0;
} else {
int real_br;
int new_divisor;
br = min(br, OTI6858_MAX_BAUD_RATE);
new_divisor = (96000000 + 8 * br) / (16 * br);
real_br = 96000000 / (16 * new_divisor);
divisor = cpu_to_le16(new_divisor);
tty_encode_baud_rate(tty, real_br, real_br);
}
frame_fmt &= ~FMT_STOP_BITS_MASK;
if ((cflag & CSTOPB) != 0)
frame_fmt |= FMT_STOP_BITS_2;
else
frame_fmt |= FMT_STOP_BITS_1;
frame_fmt &= ~FMT_PARITY_MASK;
if ((cflag & PARENB) != 0) {
if ((cflag & PARODD) != 0)
frame_fmt |= FMT_PARITY_ODD;
else
frame_fmt |= FMT_PARITY_EVEN;
} else {
frame_fmt |= FMT_PARITY_NONE;
}
control &= ~CONTROL_MASK;
if ((cflag & CRTSCTS) != 0)
control |= (CONTROL_DTR_HIGH | CONTROL_RTS_HIGH);
/* change control lines if we are switching to or from B0 */
/* FIXME:
spin_lock_irqsave(&priv->lock, flags);
control = priv->line_control;
if ((cflag & CBAUD) == B0)
priv->line_control &= ~(CONTROL_DTR | CONTROL_RTS);
else
priv->line_control |= (CONTROL_DTR | CONTROL_RTS);
if (control != priv->line_control) {
control = priv->line_control;
spin_unlock_irqrestore(&priv->lock, flags);
set_control_lines(serial->dev, control);
} else {
spin_unlock_irqrestore(&priv->lock, flags);
}
*/
spin_lock_irqsave(&priv->lock, flags);
if (divisor != priv->pending_setup.divisor
|| control != priv->pending_setup.control
|| frame_fmt != priv->pending_setup.frame_fmt) {
priv->pending_setup.divisor = divisor;
priv->pending_setup.control = control;
priv->pending_setup.frame_fmt = frame_fmt;
}
spin_unlock_irqrestore(&priv->lock, flags);
}
static int oti6858_open(struct tty_struct *tty, struct usb_serial_port *port)
{
struct oti6858_private *priv = usb_get_serial_port_data(port);
struct ktermios tmp_termios;
struct usb_serial *serial = port->serial;
struct oti6858_control_pkt *buf;
unsigned long flags;
int result;
dbg("%s(port = %d)", __func__, port->number);
usb_clear_halt(serial->dev, port->write_urb->pipe);
usb_clear_halt(serial->dev, port->read_urb->pipe);
buf = kmalloc(OTI6858_CTRL_PKT_SIZE, GFP_KERNEL);
if (buf == NULL) {
dev_err(&port->dev, "%s(): out of memory!\n", __func__);
return -ENOMEM;
}
result = usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0),
OTI6858_REQ_T_GET_STATUS,
OTI6858_REQ_GET_STATUS,
0, 0,
buf, OTI6858_CTRL_PKT_SIZE,
100);
if (result != OTI6858_CTRL_PKT_SIZE) {
/* assume default (after power-on reset) values */
buf->divisor = cpu_to_le16(0x009c); /* 38400 bps */
buf->frame_fmt = 0x03; /* 8N1 */
buf->something = 0x43;
buf->control = 0x4c; /* DTR, RTS */
buf->tx_status = 0x00;
buf->pin_state = 0x5b; /* RTS, CTS, DSR, DTR, RI, DCD */
buf->rx_bytes_avail = 0x00;
}
spin_lock_irqsave(&priv->lock, flags);
memcpy(&priv->status, buf, OTI6858_CTRL_PKT_SIZE);
priv->pending_setup.divisor = buf->divisor;
priv->pending_setup.frame_fmt = buf->frame_fmt;
priv->pending_setup.control = buf->control;
spin_unlock_irqrestore(&priv->lock, flags);
kfree(buf);
dbg("%s(): submitting interrupt urb", __func__);
result = usb_submit_urb(port->interrupt_in_urb, GFP_KERNEL);
if (result != 0) {
dev_err(&port->dev, "%s(): usb_submit_urb() failed"
" with error %d\n", __func__, result);
oti6858_close(port);
return result;
}
/* setup termios */
if (tty)
oti6858_set_termios(tty, port, &tmp_termios);
port->port.drain_delay = 256; /* FIXME: check the FIFO length */
return 0;
}
static void oti6858_close(struct usb_serial_port *port)
{
struct oti6858_private *priv = usb_get_serial_port_data(port);
unsigned long flags;
dbg("%s(port = %d)", __func__, port->number);
spin_lock_irqsave(&port->lock, flags);
/* clear out any remaining data in the buffer */
kfifo_reset_out(&port->write_fifo);
spin_unlock_irqrestore(&port->lock, flags);
dbg("%s(): after buf_clear()", __func__);
/* cancel scheduled setup */
cancel_delayed_work_sync(&priv->delayed_setup_work);
cancel_delayed_work_sync(&priv->delayed_write_work);
/* shutdown our urbs */
dbg("%s(): shutting down urbs", __func__);
usb_kill_urb(port->write_urb);
usb_kill_urb(port->read_urb);
usb_kill_urb(port->interrupt_in_urb);
}
static int oti6858_tiocmset(struct tty_struct *tty,
unsigned int set, unsigned int clear)
{
struct usb_serial_port *port = tty->driver_data;
struct oti6858_private *priv = usb_get_serial_port_data(port);
unsigned long flags;
u8 control;
dbg("%s(port = %d, set = 0x%08x, clear = 0x%08x)",
__func__, port->number, set, clear);
if (!usb_get_intfdata(port->serial->interface))
return -ENODEV;
/* FIXME: check if this is correct (active high/low) */
spin_lock_irqsave(&priv->lock, flags);
control = priv->pending_setup.control;
if ((set & TIOCM_RTS) != 0)
control |= CONTROL_RTS_HIGH;
if ((set & TIOCM_DTR) != 0)
control |= CONTROL_DTR_HIGH;
if ((clear & TIOCM_RTS) != 0)
control &= ~CONTROL_RTS_HIGH;
if ((clear & TIOCM_DTR) != 0)
control &= ~CONTROL_DTR_HIGH;
if (control != priv->pending_setup.control)
priv->pending_setup.control = control;
spin_unlock_irqrestore(&priv->lock, flags);
return 0;
}
static int oti6858_tiocmget(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
struct oti6858_private *priv = usb_get_serial_port_data(port);
unsigned long flags;
unsigned pin_state;
unsigned result = 0;
dbg("%s(port = %d)", __func__, port->number);
if (!usb_get_intfdata(port->serial->interface))
return -ENODEV;
spin_lock_irqsave(&priv->lock, flags);
pin_state = priv->status.pin_state & PIN_MASK;
spin_unlock_irqrestore(&priv->lock, flags);
/* FIXME: check if this is correct (active high/low) */
if ((pin_state & PIN_RTS) != 0)
result |= TIOCM_RTS;
if ((pin_state & PIN_CTS) != 0)
result |= TIOCM_CTS;
if ((pin_state & PIN_DSR) != 0)
result |= TIOCM_DSR;
if ((pin_state & PIN_DTR) != 0)
result |= TIOCM_DTR;
if ((pin_state & PIN_RI) != 0)
result |= TIOCM_RI;
if ((pin_state & PIN_DCD) != 0)
result |= TIOCM_CD;
dbg("%s() = 0x%08x", __func__, result);
return result;
}
static int wait_modem_info(struct usb_serial_port *port, unsigned int arg)
{
struct oti6858_private *priv = usb_get_serial_port_data(port);
unsigned long flags;
unsigned int prev, status;
unsigned int changed;
spin_lock_irqsave(&priv->lock, flags);
prev = priv->status.pin_state;
spin_unlock_irqrestore(&priv->lock, flags);
while (1) {
wait_event_interruptible(priv->intr_wait,
priv->status.pin_state != prev);
if (signal_pending(current))
return -ERESTARTSYS;
spin_lock_irqsave(&priv->lock, flags);
status = priv->status.pin_state & PIN_MASK;
spin_unlock_irqrestore(&priv->lock, flags);
changed = prev ^ status;
/* FIXME: check if this is correct (active high/low) */
if (((arg & TIOCM_RNG) && (changed & PIN_RI)) ||
((arg & TIOCM_DSR) && (changed & PIN_DSR)) ||
((arg & TIOCM_CD) && (changed & PIN_DCD)) ||
((arg & TIOCM_CTS) && (changed & PIN_CTS)))
return 0;
prev = status;
}
/* NOTREACHED */
return 0;
}
static int oti6858_ioctl(struct tty_struct *tty,
unsigned int cmd, unsigned long arg)
{
struct usb_serial_port *port = tty->driver_data;
dbg("%s(port = %d, cmd = 0x%04x, arg = 0x%08lx)",
__func__, port->number, cmd, arg);
switch (cmd) {
case TIOCMIWAIT:
dbg("%s(): TIOCMIWAIT", __func__);
return wait_modem_info(port, arg);
default:
dbg("%s(): 0x%04x not supported", __func__, cmd);
break;
}
return -ENOIOCTLCMD;
}
static void oti6858_release(struct usb_serial *serial)
{
int i;
dbg("%s()", __func__);
for (i = 0; i < serial->num_ports; ++i)
kfree(usb_get_serial_port_data(serial->port[i]));
}
static void oti6858_read_int_callback(struct urb *urb)
{
struct usb_serial_port *port = urb->context;
struct oti6858_private *priv = usb_get_serial_port_data(port);
int transient = 0, can_recv = 0, resubmit = 1;
int status = urb->status;
dbg("%s(port = %d, status = %d)",
__func__, port->number, status);
switch (status) {
case 0:
/* success */
break;
case -ECONNRESET:
case -ENOENT:
case -ESHUTDOWN:
/* this urb is terminated, clean up */
dbg("%s(): urb shutting down with status: %d",
__func__, status);
return;
default:
dbg("%s(): nonzero urb status received: %d",
__func__, status);
break;
}
if (status == 0 && urb->actual_length == OTI6858_CTRL_PKT_SIZE) {
struct oti6858_control_pkt *xs = urb->transfer_buffer;
unsigned long flags;
spin_lock_irqsave(&priv->lock, flags);
if (!priv->transient) {
if (!OTI6858_CTRL_EQUALS_PENDING(xs, priv)) {
if (xs->rx_bytes_avail == 0) {
priv->transient = 4;
priv->setup_done = 0;
resubmit = 0;
dbg("%s(): scheduling setup_line()",
__func__);
schedule_delayed_work(&priv->delayed_setup_work, 0);
}
}
} else {
if (OTI6858_CTRL_EQUALS_PENDING(xs, priv)) {
priv->transient = 0;
} else if (!priv->setup_done) {
resubmit = 0;
} else if (--priv->transient == 0) {
if (xs->rx_bytes_avail == 0) {
priv->transient = 4;
priv->setup_done = 0;
resubmit = 0;
dbg("%s(): scheduling setup_line()",
__func__);
schedule_delayed_work(&priv->delayed_setup_work, 0);
}
}
}
if (!priv->transient) {
if (xs->pin_state != priv->status.pin_state)
wake_up_interruptible(&priv->intr_wait);
memcpy(&priv->status, xs, OTI6858_CTRL_PKT_SIZE);
}
if (!priv->transient && xs->rx_bytes_avail != 0) {
can_recv = xs->rx_bytes_avail;
priv->flags.read_urb_in_use = 1;
}
transient = priv->transient;
spin_unlock_irqrestore(&priv->lock, flags);
}
if (can_recv) {
int result;
result = usb_submit_urb(port->read_urb, GFP_ATOMIC);
if (result != 0) {
priv->flags.read_urb_in_use = 0;
dev_err(&port->dev, "%s(): usb_submit_urb() failed,"
" error %d\n", __func__, result);
} else {
resubmit = 0;
}
} else if (!transient) {
unsigned long flags;
int count;
spin_lock_irqsave(&port->lock, flags);
count = kfifo_len(&port->write_fifo);
spin_unlock_irqrestore(&port->lock, flags);
spin_lock_irqsave(&priv->lock, flags);
if (priv->flags.write_urb_in_use == 0 && count != 0) {
schedule_delayed_work(&priv->delayed_write_work, 0);
resubmit = 0;
}
spin_unlock_irqrestore(&priv->lock, flags);
}
if (resubmit) {
int result;
/* dbg("%s(): submitting interrupt urb", __func__); */
result = usb_submit_urb(urb, GFP_ATOMIC);
if (result != 0) {
dev_err(&urb->dev->dev,
"%s(): usb_submit_urb() failed with"
" error %d\n", __func__, result);
}
}
}
static void oti6858_read_bulk_callback(struct urb *urb)
{
struct usb_serial_port *port = urb->context;
struct oti6858_private *priv = usb_get_serial_port_data(port);
struct tty_struct *tty;
unsigned char *data = urb->transfer_buffer;
unsigned long flags;
int status = urb->status;
int result;
dbg("%s(port = %d, status = %d)",
__func__, port->number, status);
spin_lock_irqsave(&priv->lock, flags);
priv->flags.read_urb_in_use = 0;
spin_unlock_irqrestore(&priv->lock, flags);
if (status != 0) {
dbg("%s(): unable to handle the error, exiting", __func__);
return;
}
tty = tty_port_tty_get(&port->port);
if (tty != NULL && urb->actual_length > 0) {
tty_insert_flip_string(tty, data, urb->actual_length);
tty_flip_buffer_push(tty);
}
tty_kref_put(tty);
/* schedule the interrupt urb */
result = usb_submit_urb(port->interrupt_in_urb, GFP_ATOMIC);
if (result != 0 && result != -EPERM) {
dev_err(&port->dev, "%s(): usb_submit_urb() failed,"
" error %d\n", __func__, result);
}
}
static void oti6858_write_bulk_callback(struct urb *urb)
{
struct usb_serial_port *port = urb->context;
struct oti6858_private *priv = usb_get_serial_port_data(port);
int status = urb->status;
int result;
dbg("%s(port = %d, status = %d)",
__func__, port->number, status);
switch (status) {
case 0:
/* success */
break;
case -ECONNRESET:
case -ENOENT:
case -ESHUTDOWN:
/* this urb is terminated, clean up */
dbg("%s(): urb shutting down with status: %d",
__func__, status);
priv->flags.write_urb_in_use = 0;
return;
default:
/* error in the urb, so we have to resubmit it */
dbg("%s(): nonzero write bulk status received: %d",
__func__, status);
dbg("%s(): overflow in write", __func__);
port->write_urb->transfer_buffer_length = 1;
result = usb_submit_urb(port->write_urb, GFP_ATOMIC);
if (result) {
dev_err_console(port, "%s(): usb_submit_urb() failed,"
" error %d\n", __func__, result);
} else {
return;
}
}
priv->flags.write_urb_in_use = 0;
/* schedule the interrupt urb if we are still open */
dbg("%s(): submitting interrupt urb", __func__);
result = usb_submit_urb(port->interrupt_in_urb, GFP_ATOMIC);
if (result != 0) {
dev_err(&port->dev, "%s(): failed submitting int urb,"
" error %d\n", __func__, result);
}
}
module_usb_serial_driver(oti6858_driver, serial_drivers);
MODULE_DESCRIPTION(OTI6858_DESCRIPTION);
MODULE_AUTHOR(OTI6858_AUTHOR);
MODULE_VERSION(OTI6858_VERSION);
MODULE_LICENSE("GPL");
module_param(debug, bool, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(debug, "enable debug output");
| gpl-2.0 |
imoseyon/leanKernel-d2vzw | drivers/net/ethernet/sfc/txc43128_phy.c | 7321 | 16313 | /****************************************************************************
* Driver for Solarflare Solarstorm network controllers and boards
* Copyright 2006-2011 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation, incorporated herein by reference.
*/
/*
* Driver for Transwitch/Mysticom CX4 retimer
* see www.transwitch.com, part is TXC-43128
*/
#include <linux/delay.h>
#include <linux/slab.h>
#include "efx.h"
#include "mdio_10g.h"
#include "phy.h"
#include "nic.h"
/* We expect these MMDs to be in the package */
#define TXC_REQUIRED_DEVS (MDIO_DEVS_PCS | \
MDIO_DEVS_PMAPMD | \
MDIO_DEVS_PHYXS)
#define TXC_LOOPBACKS ((1 << LOOPBACK_PCS) | \
(1 << LOOPBACK_PMAPMD) | \
(1 << LOOPBACK_PHYXS_WS))
/**************************************************************************
*
* Compile-time config
*
**************************************************************************
*/
#define TXCNAME "TXC43128"
/* Total length of time we'll wait for the PHY to come out of reset (ms) */
#define TXC_MAX_RESET_TIME 500
/* Interval between checks (ms) */
#define TXC_RESET_WAIT 10
/* How long to run BIST (us) */
#define TXC_BIST_DURATION 50
/**************************************************************************
*
* Register definitions
*
**************************************************************************
*/
/* Command register */
#define TXC_GLRGS_GLCMD 0xc004
/* Useful bits in command register */
/* Lane power-down */
#define TXC_GLCMD_L01PD_LBN 5
#define TXC_GLCMD_L23PD_LBN 6
/* Limited SW reset: preserves configuration but
* initiates a logic reset. Self-clearing */
#define TXC_GLCMD_LMTSWRST_LBN 14
/* Signal Quality Control */
#define TXC_GLRGS_GSGQLCTL 0xc01a
/* Enable bit */
#define TXC_GSGQLCT_SGQLEN_LBN 15
/* Lane selection */
#define TXC_GSGQLCT_LNSL_LBN 13
#define TXC_GSGQLCT_LNSL_WIDTH 2
/* Analog TX control */
#define TXC_ALRGS_ATXCTL 0xc040
/* Lane power-down */
#define TXC_ATXCTL_TXPD3_LBN 15
#define TXC_ATXCTL_TXPD2_LBN 14
#define TXC_ATXCTL_TXPD1_LBN 13
#define TXC_ATXCTL_TXPD0_LBN 12
/* Amplitude on lanes 0, 1 */
#define TXC_ALRGS_ATXAMP0 0xc041
/* Amplitude on lanes 2, 3 */
#define TXC_ALRGS_ATXAMP1 0xc042
/* Bit position of value for lane 0 (or 2) */
#define TXC_ATXAMP_LANE02_LBN 3
/* Bit position of value for lane 1 (or 3) */
#define TXC_ATXAMP_LANE13_LBN 11
#define TXC_ATXAMP_1280_mV 0
#define TXC_ATXAMP_1200_mV 8
#define TXC_ATXAMP_1120_mV 12
#define TXC_ATXAMP_1060_mV 14
#define TXC_ATXAMP_0820_mV 25
#define TXC_ATXAMP_0720_mV 26
#define TXC_ATXAMP_0580_mV 27
#define TXC_ATXAMP_0440_mV 28
#define TXC_ATXAMP_0820_BOTH \
((TXC_ATXAMP_0820_mV << TXC_ATXAMP_LANE02_LBN) \
| (TXC_ATXAMP_0820_mV << TXC_ATXAMP_LANE13_LBN))
#define TXC_ATXAMP_DEFAULT 0x6060 /* From databook */
/* Preemphasis on lanes 0, 1 */
#define TXC_ALRGS_ATXPRE0 0xc043
/* Preemphasis on lanes 2, 3 */
#define TXC_ALRGS_ATXPRE1 0xc044
#define TXC_ATXPRE_NONE 0
#define TXC_ATXPRE_DEFAULT 0x1010 /* From databook */
#define TXC_ALRGS_ARXCTL 0xc045
/* Lane power-down */
#define TXC_ARXCTL_RXPD3_LBN 15
#define TXC_ARXCTL_RXPD2_LBN 14
#define TXC_ARXCTL_RXPD1_LBN 13
#define TXC_ARXCTL_RXPD0_LBN 12
/* Main control */
#define TXC_MRGS_CTL 0xc340
/* Bits in main control */
#define TXC_MCTL_RESET_LBN 15 /* Self clear */
#define TXC_MCTL_TXLED_LBN 14 /* 1 to show align status */
#define TXC_MCTL_RXLED_LBN 13 /* 1 to show align status */
/* GPIO output */
#define TXC_GPIO_OUTPUT 0xc346
#define TXC_GPIO_DIR 0xc348
/* Vendor-specific BIST registers */
#define TXC_BIST_CTL 0xc280
#define TXC_BIST_TXFRMCNT 0xc281
#define TXC_BIST_RX0FRMCNT 0xc282
#define TXC_BIST_RX1FRMCNT 0xc283
#define TXC_BIST_RX2FRMCNT 0xc284
#define TXC_BIST_RX3FRMCNT 0xc285
#define TXC_BIST_RX0ERRCNT 0xc286
#define TXC_BIST_RX1ERRCNT 0xc287
#define TXC_BIST_RX2ERRCNT 0xc288
#define TXC_BIST_RX3ERRCNT 0xc289
/* BIST type (controls bit patter in test) */
#define TXC_BIST_CTRL_TYPE_LBN 10
#define TXC_BIST_CTRL_TYPE_TSD 0 /* TranSwitch Deterministic */
#define TXC_BIST_CTRL_TYPE_CRP 1 /* CRPAT standard */
#define TXC_BIST_CTRL_TYPE_CJP 2 /* CJPAT standard */
#define TXC_BIST_CTRL_TYPE_TSR 3 /* TranSwitch pseudo-random */
/* Set this to 1 for 10 bit and 0 for 8 bit */
#define TXC_BIST_CTRL_B10EN_LBN 12
/* Enable BIST (write 0 to disable) */
#define TXC_BIST_CTRL_ENAB_LBN 13
/* Stop BIST (self-clears when stop complete) */
#define TXC_BIST_CTRL_STOP_LBN 14
/* Start BIST (cleared by writing 1 to STOP) */
#define TXC_BIST_CTRL_STRT_LBN 15
/* Mt. Diablo test configuration */
#define TXC_MTDIABLO_CTRL 0xc34f
#define TXC_MTDIABLO_CTRL_PMA_LOOP_LBN 10
struct txc43128_data {
unsigned long bug10934_timer;
enum efx_phy_mode phy_mode;
enum efx_loopback_mode loopback_mode;
};
/* The PHY sometimes needs a reset to bring the link back up. So long as
* it reports link down, we reset it every 5 seconds.
*/
#define BUG10934_RESET_INTERVAL (5 * HZ)
/* Perform a reset that doesn't clear configuration changes */
static void txc_reset_logic(struct efx_nic *efx);
/* Set the output value of a gpio */
void falcon_txc_set_gpio_val(struct efx_nic *efx, int pin, int on)
{
efx_mdio_set_flag(efx, MDIO_MMD_PHYXS, TXC_GPIO_OUTPUT, 1 << pin, on);
}
/* Set up the GPIO direction register */
void falcon_txc_set_gpio_dir(struct efx_nic *efx, int pin, int dir)
{
efx_mdio_set_flag(efx, MDIO_MMD_PHYXS, TXC_GPIO_DIR, 1 << pin, dir);
}
/* Reset the PMA/PMD MMD. The documentation is explicit that this does a
* global reset (it's less clear what reset of other MMDs does).*/
static int txc_reset_phy(struct efx_nic *efx)
{
int rc = efx_mdio_reset_mmd(efx, MDIO_MMD_PMAPMD,
TXC_MAX_RESET_TIME / TXC_RESET_WAIT,
TXC_RESET_WAIT);
if (rc < 0)
goto fail;
/* Check that all the MMDs we expect are present and responding. */
rc = efx_mdio_check_mmds(efx, TXC_REQUIRED_DEVS);
if (rc < 0)
goto fail;
return 0;
fail:
netif_err(efx, hw, efx->net_dev, TXCNAME ": reset timed out!\n");
return rc;
}
/* Run a single BIST on one MMD */
static int txc_bist_one(struct efx_nic *efx, int mmd, int test)
{
int ctrl, bctl;
int lane;
int rc = 0;
/* Set PMA to test into loopback using Mt Diablo reg as per app note */
ctrl = efx_mdio_read(efx, MDIO_MMD_PCS, TXC_MTDIABLO_CTRL);
ctrl |= (1 << TXC_MTDIABLO_CTRL_PMA_LOOP_LBN);
efx_mdio_write(efx, MDIO_MMD_PCS, TXC_MTDIABLO_CTRL, ctrl);
/* The BIST app. note lists these as 3 distinct steps. */
/* Set the BIST type */
bctl = (test << TXC_BIST_CTRL_TYPE_LBN);
efx_mdio_write(efx, mmd, TXC_BIST_CTL, bctl);
/* Set the BSTEN bit in the BIST Control register to enable */
bctl |= (1 << TXC_BIST_CTRL_ENAB_LBN);
efx_mdio_write(efx, mmd, TXC_BIST_CTL, bctl);
/* Set the BSTRT bit in the BIST Control register */
efx_mdio_write(efx, mmd, TXC_BIST_CTL,
bctl | (1 << TXC_BIST_CTRL_STRT_LBN));
/* Wait. */
udelay(TXC_BIST_DURATION);
/* Set the BSTOP bit in the BIST Control register */
bctl |= (1 << TXC_BIST_CTRL_STOP_LBN);
efx_mdio_write(efx, mmd, TXC_BIST_CTL, bctl);
/* The STOP bit should go off when things have stopped */
while (bctl & (1 << TXC_BIST_CTRL_STOP_LBN))
bctl = efx_mdio_read(efx, mmd, TXC_BIST_CTL);
/* Check all the error counts are 0 and all the frame counts are
non-zero */
for (lane = 0; lane < 4; lane++) {
int count = efx_mdio_read(efx, mmd, TXC_BIST_RX0ERRCNT + lane);
if (count != 0) {
netif_err(efx, hw, efx->net_dev, TXCNAME": BIST error. "
"Lane %d had %d errs\n", lane, count);
rc = -EIO;
}
count = efx_mdio_read(efx, mmd, TXC_BIST_RX0FRMCNT + lane);
if (count == 0) {
netif_err(efx, hw, efx->net_dev, TXCNAME": BIST error. "
"Lane %d got 0 frames\n", lane);
rc = -EIO;
}
}
if (rc == 0)
netif_info(efx, hw, efx->net_dev, TXCNAME": BIST pass\n");
/* Disable BIST */
efx_mdio_write(efx, mmd, TXC_BIST_CTL, 0);
/* Turn off loopback */
ctrl &= ~(1 << TXC_MTDIABLO_CTRL_PMA_LOOP_LBN);
efx_mdio_write(efx, MDIO_MMD_PCS, TXC_MTDIABLO_CTRL, ctrl);
return rc;
}
static int txc_bist(struct efx_nic *efx)
{
return txc_bist_one(efx, MDIO_MMD_PCS, TXC_BIST_CTRL_TYPE_TSD);
}
/* Push the non-configurable defaults into the PHY. This must be
* done after every full reset */
static void txc_apply_defaults(struct efx_nic *efx)
{
int mctrl;
/* Turn amplitude down and preemphasis off on the host side
* (PHY<->MAC) as this is believed less likely to upset Falcon
* and no adverse effects have been noted. It probably also
* saves a picowatt or two */
/* Turn off preemphasis */
efx_mdio_write(efx, MDIO_MMD_PHYXS, TXC_ALRGS_ATXPRE0, TXC_ATXPRE_NONE);
efx_mdio_write(efx, MDIO_MMD_PHYXS, TXC_ALRGS_ATXPRE1, TXC_ATXPRE_NONE);
/* Turn down the amplitude */
efx_mdio_write(efx, MDIO_MMD_PHYXS,
TXC_ALRGS_ATXAMP0, TXC_ATXAMP_0820_BOTH);
efx_mdio_write(efx, MDIO_MMD_PHYXS,
TXC_ALRGS_ATXAMP1, TXC_ATXAMP_0820_BOTH);
/* Set the line side amplitude and preemphasis to the databook
* defaults as an erratum causes them to be 0 on at least some
* PHY rev.s */
efx_mdio_write(efx, MDIO_MMD_PMAPMD,
TXC_ALRGS_ATXPRE0, TXC_ATXPRE_DEFAULT);
efx_mdio_write(efx, MDIO_MMD_PMAPMD,
TXC_ALRGS_ATXPRE1, TXC_ATXPRE_DEFAULT);
efx_mdio_write(efx, MDIO_MMD_PMAPMD,
TXC_ALRGS_ATXAMP0, TXC_ATXAMP_DEFAULT);
efx_mdio_write(efx, MDIO_MMD_PMAPMD,
TXC_ALRGS_ATXAMP1, TXC_ATXAMP_DEFAULT);
/* Set up the LEDs */
mctrl = efx_mdio_read(efx, MDIO_MMD_PHYXS, TXC_MRGS_CTL);
/* Set the Green and Red LEDs to their default modes */
mctrl &= ~((1 << TXC_MCTL_TXLED_LBN) | (1 << TXC_MCTL_RXLED_LBN));
efx_mdio_write(efx, MDIO_MMD_PHYXS, TXC_MRGS_CTL, mctrl);
/* Databook recommends doing this after configuration changes */
txc_reset_logic(efx);
falcon_board(efx)->type->init_phy(efx);
}
static int txc43128_phy_probe(struct efx_nic *efx)
{
struct txc43128_data *phy_data;
/* Allocate phy private storage */
phy_data = kzalloc(sizeof(*phy_data), GFP_KERNEL);
if (!phy_data)
return -ENOMEM;
efx->phy_data = phy_data;
phy_data->phy_mode = efx->phy_mode;
efx->mdio.mmds = TXC_REQUIRED_DEVS;
efx->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
efx->loopback_modes = TXC_LOOPBACKS | FALCON_XMAC_LOOPBACKS;
return 0;
}
/* Initialisation entry point for this PHY driver */
static int txc43128_phy_init(struct efx_nic *efx)
{
int rc;
rc = txc_reset_phy(efx);
if (rc < 0)
return rc;
rc = txc_bist(efx);
if (rc < 0)
return rc;
txc_apply_defaults(efx);
return 0;
}
/* Set the lane power down state in the global registers */
static void txc_glrgs_lane_power(struct efx_nic *efx, int mmd)
{
int pd = (1 << TXC_GLCMD_L01PD_LBN) | (1 << TXC_GLCMD_L23PD_LBN);
int ctl = efx_mdio_read(efx, mmd, TXC_GLRGS_GLCMD);
if (!(efx->phy_mode & PHY_MODE_LOW_POWER))
ctl &= ~pd;
else
ctl |= pd;
efx_mdio_write(efx, mmd, TXC_GLRGS_GLCMD, ctl);
}
/* Set the lane power down state in the analog control registers */
static void txc_analog_lane_power(struct efx_nic *efx, int mmd)
{
int txpd = (1 << TXC_ATXCTL_TXPD3_LBN) | (1 << TXC_ATXCTL_TXPD2_LBN)
| (1 << TXC_ATXCTL_TXPD1_LBN) | (1 << TXC_ATXCTL_TXPD0_LBN);
int rxpd = (1 << TXC_ARXCTL_RXPD3_LBN) | (1 << TXC_ARXCTL_RXPD2_LBN)
| (1 << TXC_ARXCTL_RXPD1_LBN) | (1 << TXC_ARXCTL_RXPD0_LBN);
int txctl = efx_mdio_read(efx, mmd, TXC_ALRGS_ATXCTL);
int rxctl = efx_mdio_read(efx, mmd, TXC_ALRGS_ARXCTL);
if (!(efx->phy_mode & PHY_MODE_LOW_POWER)) {
txctl &= ~txpd;
rxctl &= ~rxpd;
} else {
txctl |= txpd;
rxctl |= rxpd;
}
efx_mdio_write(efx, mmd, TXC_ALRGS_ATXCTL, txctl);
efx_mdio_write(efx, mmd, TXC_ALRGS_ARXCTL, rxctl);
}
static void txc_set_power(struct efx_nic *efx)
{
/* According to the data book, all the MMDs can do low power */
efx_mdio_set_mmds_lpower(efx,
!!(efx->phy_mode & PHY_MODE_LOW_POWER),
TXC_REQUIRED_DEVS);
/* Global register bank is in PCS, PHY XS. These control the host
* side and line side settings respectively. */
txc_glrgs_lane_power(efx, MDIO_MMD_PCS);
txc_glrgs_lane_power(efx, MDIO_MMD_PHYXS);
/* Analog register bank in PMA/PMD, PHY XS */
txc_analog_lane_power(efx, MDIO_MMD_PMAPMD);
txc_analog_lane_power(efx, MDIO_MMD_PHYXS);
}
static void txc_reset_logic_mmd(struct efx_nic *efx, int mmd)
{
int val = efx_mdio_read(efx, mmd, TXC_GLRGS_GLCMD);
int tries = 50;
val |= (1 << TXC_GLCMD_LMTSWRST_LBN);
efx_mdio_write(efx, mmd, TXC_GLRGS_GLCMD, val);
while (tries--) {
val = efx_mdio_read(efx, mmd, TXC_GLRGS_GLCMD);
if (!(val & (1 << TXC_GLCMD_LMTSWRST_LBN)))
break;
udelay(1);
}
if (!tries)
netif_info(efx, hw, efx->net_dev,
TXCNAME " Logic reset timed out!\n");
}
/* Perform a logic reset. This preserves the configuration registers
* and is needed for some configuration changes to take effect */
static void txc_reset_logic(struct efx_nic *efx)
{
/* The data sheet claims we can do the logic reset on either the
* PCS or the PHYXS and the result is a reset of both host- and
* line-side logic. */
txc_reset_logic_mmd(efx, MDIO_MMD_PCS);
}
static bool txc43128_phy_read_link(struct efx_nic *efx)
{
return efx_mdio_links_ok(efx, TXC_REQUIRED_DEVS);
}
static int txc43128_phy_reconfigure(struct efx_nic *efx)
{
struct txc43128_data *phy_data = efx->phy_data;
enum efx_phy_mode mode_change = efx->phy_mode ^ phy_data->phy_mode;
bool loop_change = LOOPBACK_CHANGED(phy_data, efx, TXC_LOOPBACKS);
if (efx->phy_mode & mode_change & PHY_MODE_TX_DISABLED) {
txc_reset_phy(efx);
txc_apply_defaults(efx);
falcon_reset_xaui(efx);
mode_change &= ~PHY_MODE_TX_DISABLED;
}
efx_mdio_transmit_disable(efx);
efx_mdio_phy_reconfigure(efx);
if (mode_change & PHY_MODE_LOW_POWER)
txc_set_power(efx);
/* The data sheet claims this is required after every reconfiguration
* (note at end of 7.1), but we mustn't do it when nothing changes as
* it glitches the link, and reconfigure gets called on link change,
* so we get an IRQ storm on link up. */
if (loop_change || mode_change)
txc_reset_logic(efx);
phy_data->phy_mode = efx->phy_mode;
phy_data->loopback_mode = efx->loopback_mode;
return 0;
}
static void txc43128_phy_fini(struct efx_nic *efx)
{
/* Disable link events */
efx_mdio_write(efx, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_CTRL, 0);
}
static void txc43128_phy_remove(struct efx_nic *efx)
{
kfree(efx->phy_data);
efx->phy_data = NULL;
}
/* Periodic callback: this exists mainly to poll link status as we
* don't use LASI interrupts */
static bool txc43128_phy_poll(struct efx_nic *efx)
{
struct txc43128_data *data = efx->phy_data;
bool was_up = efx->link_state.up;
efx->link_state.up = txc43128_phy_read_link(efx);
efx->link_state.speed = 10000;
efx->link_state.fd = true;
efx->link_state.fc = efx->wanted_fc;
if (efx->link_state.up || (efx->loopback_mode != LOOPBACK_NONE)) {
data->bug10934_timer = jiffies;
} else {
if (time_after_eq(jiffies, (data->bug10934_timer +
BUG10934_RESET_INTERVAL))) {
data->bug10934_timer = jiffies;
txc_reset_logic(efx);
}
}
return efx->link_state.up != was_up;
}
static const char *const txc43128_test_names[] = {
"bist"
};
static const char *txc43128_test_name(struct efx_nic *efx, unsigned int index)
{
if (index < ARRAY_SIZE(txc43128_test_names))
return txc43128_test_names[index];
return NULL;
}
static int txc43128_run_tests(struct efx_nic *efx, int *results, unsigned flags)
{
int rc;
if (!(flags & ETH_TEST_FL_OFFLINE))
return 0;
rc = txc_reset_phy(efx);
if (rc < 0)
return rc;
rc = txc_bist(efx);
txc_apply_defaults(efx);
results[0] = rc ? -1 : 1;
return rc;
}
static void txc43128_get_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd)
{
mdio45_ethtool_gset(&efx->mdio, ecmd);
}
const struct efx_phy_operations falcon_txc_phy_ops = {
.probe = txc43128_phy_probe,
.init = txc43128_phy_init,
.reconfigure = txc43128_phy_reconfigure,
.poll = txc43128_phy_poll,
.fini = txc43128_phy_fini,
.remove = txc43128_phy_remove,
.get_settings = txc43128_get_settings,
.set_settings = efx_mdio_set_settings,
.test_alive = efx_mdio_test_alive,
.run_tests = txc43128_run_tests,
.test_name = txc43128_test_name,
};
| gpl-2.0 |
nian0114/CherryS.AOSP-smdk4412 | arch/sh/kernel/kprobes.c | 7833 | 15267 | /*
* Kernel probes (kprobes) for SuperH
*
* Copyright (C) 2007 Chris Smith <chris.smith@st.com>
* Copyright (C) 2006 Lineo Solutions, Inc.
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#include <linux/kprobes.h>
#include <linux/module.h>
#include <linux/ptrace.h>
#include <linux/preempt.h>
#include <linux/kdebug.h>
#include <linux/slab.h>
#include <asm/cacheflush.h>
#include <asm/uaccess.h>
DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
static DEFINE_PER_CPU(struct kprobe, saved_current_opcode);
static DEFINE_PER_CPU(struct kprobe, saved_next_opcode);
static DEFINE_PER_CPU(struct kprobe, saved_next_opcode2);
#define OPCODE_JMP(x) (((x) & 0xF0FF) == 0x402b)
#define OPCODE_JSR(x) (((x) & 0xF0FF) == 0x400b)
#define OPCODE_BRA(x) (((x) & 0xF000) == 0xa000)
#define OPCODE_BRAF(x) (((x) & 0xF0FF) == 0x0023)
#define OPCODE_BSR(x) (((x) & 0xF000) == 0xb000)
#define OPCODE_BSRF(x) (((x) & 0xF0FF) == 0x0003)
#define OPCODE_BF_S(x) (((x) & 0xFF00) == 0x8f00)
#define OPCODE_BT_S(x) (((x) & 0xFF00) == 0x8d00)
#define OPCODE_BF(x) (((x) & 0xFF00) == 0x8b00)
#define OPCODE_BT(x) (((x) & 0xFF00) == 0x8900)
#define OPCODE_RTS(x) (((x) & 0x000F) == 0x000b)
#define OPCODE_RTE(x) (((x) & 0xFFFF) == 0x002b)
int __kprobes arch_prepare_kprobe(struct kprobe *p)
{
kprobe_opcode_t opcode = *(kprobe_opcode_t *) (p->addr);
if (OPCODE_RTE(opcode))
return -EFAULT; /* Bad breakpoint */
p->opcode = opcode;
return 0;
}
void __kprobes arch_copy_kprobe(struct kprobe *p)
{
memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
p->opcode = *p->addr;
}
void __kprobes arch_arm_kprobe(struct kprobe *p)
{
*p->addr = BREAKPOINT_INSTRUCTION;
flush_icache_range((unsigned long)p->addr,
(unsigned long)p->addr + sizeof(kprobe_opcode_t));
}
void __kprobes arch_disarm_kprobe(struct kprobe *p)
{
*p->addr = p->opcode;
flush_icache_range((unsigned long)p->addr,
(unsigned long)p->addr + sizeof(kprobe_opcode_t));
}
int __kprobes arch_trampoline_kprobe(struct kprobe *p)
{
if (*p->addr == BREAKPOINT_INSTRUCTION)
return 1;
return 0;
}
/**
* If an illegal slot instruction exception occurs for an address
* containing a kprobe, remove the probe.
*
* Returns 0 if the exception was handled successfully, 1 otherwise.
*/
int __kprobes kprobe_handle_illslot(unsigned long pc)
{
struct kprobe *p = get_kprobe((kprobe_opcode_t *) pc + 1);
if (p != NULL) {
printk("Warning: removing kprobe from delay slot: 0x%.8x\n",
(unsigned int)pc + 2);
unregister_kprobe(p);
return 0;
}
return 1;
}
void __kprobes arch_remove_kprobe(struct kprobe *p)
{
struct kprobe *saved = &__get_cpu_var(saved_next_opcode);
if (saved->addr) {
arch_disarm_kprobe(p);
arch_disarm_kprobe(saved);
saved->addr = NULL;
saved->opcode = 0;
saved = &__get_cpu_var(saved_next_opcode2);
if (saved->addr) {
arch_disarm_kprobe(saved);
saved->addr = NULL;
saved->opcode = 0;
}
}
}
static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb)
{
kcb->prev_kprobe.kp = kprobe_running();
kcb->prev_kprobe.status = kcb->kprobe_status;
}
static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb)
{
__get_cpu_var(current_kprobe) = kcb->prev_kprobe.kp;
kcb->kprobe_status = kcb->prev_kprobe.status;
}
static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
struct kprobe_ctlblk *kcb)
{
__get_cpu_var(current_kprobe) = p;
}
/*
* Singlestep is implemented by disabling the current kprobe and setting one
* on the next instruction, following branches. Two probes are set if the
* branch is conditional.
*/
static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
{
__get_cpu_var(saved_current_opcode).addr = (kprobe_opcode_t *)regs->pc;
if (p != NULL) {
struct kprobe *op1, *op2;
arch_disarm_kprobe(p);
op1 = &__get_cpu_var(saved_next_opcode);
op2 = &__get_cpu_var(saved_next_opcode2);
if (OPCODE_JSR(p->opcode) || OPCODE_JMP(p->opcode)) {
unsigned int reg_nr = ((p->opcode >> 8) & 0x000F);
op1->addr = (kprobe_opcode_t *) regs->regs[reg_nr];
} else if (OPCODE_BRA(p->opcode) || OPCODE_BSR(p->opcode)) {
unsigned long disp = (p->opcode & 0x0FFF);
op1->addr =
(kprobe_opcode_t *) (regs->pc + 4 + disp * 2);
} else if (OPCODE_BRAF(p->opcode) || OPCODE_BSRF(p->opcode)) {
unsigned int reg_nr = ((p->opcode >> 8) & 0x000F);
op1->addr =
(kprobe_opcode_t *) (regs->pc + 4 +
regs->regs[reg_nr]);
} else if (OPCODE_RTS(p->opcode)) {
op1->addr = (kprobe_opcode_t *) regs->pr;
} else if (OPCODE_BF(p->opcode) || OPCODE_BT(p->opcode)) {
unsigned long disp = (p->opcode & 0x00FF);
/* case 1 */
op1->addr = p->addr + 1;
/* case 2 */
op2->addr =
(kprobe_opcode_t *) (regs->pc + 4 + disp * 2);
op2->opcode = *(op2->addr);
arch_arm_kprobe(op2);
} else if (OPCODE_BF_S(p->opcode) || OPCODE_BT_S(p->opcode)) {
unsigned long disp = (p->opcode & 0x00FF);
/* case 1 */
op1->addr = p->addr + 2;
/* case 2 */
op2->addr =
(kprobe_opcode_t *) (regs->pc + 4 + disp * 2);
op2->opcode = *(op2->addr);
arch_arm_kprobe(op2);
} else {
op1->addr = p->addr + 1;
}
op1->opcode = *(op1->addr);
arch_arm_kprobe(op1);
}
}
/* Called with kretprobe_lock held */
void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
struct pt_regs *regs)
{
ri->ret_addr = (kprobe_opcode_t *) regs->pr;
/* Replace the return addr with trampoline addr */
regs->pr = (unsigned long)kretprobe_trampoline;
}
static int __kprobes kprobe_handler(struct pt_regs *regs)
{
struct kprobe *p;
int ret = 0;
kprobe_opcode_t *addr = NULL;
struct kprobe_ctlblk *kcb;
/*
* We don't want to be preempted for the entire
* duration of kprobe processing
*/
preempt_disable();
kcb = get_kprobe_ctlblk();
addr = (kprobe_opcode_t *) (regs->pc);
/* Check we're not actually recursing */
if (kprobe_running()) {
p = get_kprobe(addr);
if (p) {
if (kcb->kprobe_status == KPROBE_HIT_SS &&
*p->ainsn.insn == BREAKPOINT_INSTRUCTION) {
goto no_kprobe;
}
/* We have reentered the kprobe_handler(), since
* another probe was hit while within the handler.
* We here save the original kprobes variables and
* just single step on the instruction of the new probe
* without calling any user handlers.
*/
save_previous_kprobe(kcb);
set_current_kprobe(p, regs, kcb);
kprobes_inc_nmissed_count(p);
prepare_singlestep(p, regs);
kcb->kprobe_status = KPROBE_REENTER;
return 1;
} else {
p = __get_cpu_var(current_kprobe);
if (p->break_handler && p->break_handler(p, regs)) {
goto ss_probe;
}
}
goto no_kprobe;
}
p = get_kprobe(addr);
if (!p) {
/* Not one of ours: let kernel handle it */
if (*(kprobe_opcode_t *)addr != BREAKPOINT_INSTRUCTION) {
/*
* The breakpoint instruction was removed right
* after we hit it. Another cpu has removed
* either a probepoint or a debugger breakpoint
* at this address. In either case, no further
* handling of this interrupt is appropriate.
*/
ret = 1;
}
goto no_kprobe;
}
set_current_kprobe(p, regs, kcb);
kcb->kprobe_status = KPROBE_HIT_ACTIVE;
if (p->pre_handler && p->pre_handler(p, regs))
/* handler has already set things up, so skip ss setup */
return 1;
ss_probe:
prepare_singlestep(p, regs);
kcb->kprobe_status = KPROBE_HIT_SS;
return 1;
no_kprobe:
preempt_enable_no_resched();
return ret;
}
/*
* For function-return probes, init_kprobes() establishes a probepoint
* here. When a retprobed function returns, this probe is hit and
* trampoline_probe_handler() runs, calling the kretprobe's handler.
*/
static void __used kretprobe_trampoline_holder(void)
{
asm volatile (".globl kretprobe_trampoline\n"
"kretprobe_trampoline:\n\t"
"nop\n");
}
/*
* Called when we hit the probe point at kretprobe_trampoline
*/
int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
{
struct kretprobe_instance *ri = NULL;
struct hlist_head *head, empty_rp;
struct hlist_node *node, *tmp;
unsigned long flags, orig_ret_address = 0;
unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline;
INIT_HLIST_HEAD(&empty_rp);
kretprobe_hash_lock(current, &head, &flags);
/*
* It is possible to have multiple instances associated with a given
* task either because an multiple functions in the call path
* have a return probe installed on them, and/or more then one return
* return probe was registered for a target function.
*
* We can handle this because:
* - instances are always inserted at the head of the list
* - when multiple return probes are registered for the same
* function, the first instance's ret_addr will point to the
* real return address, and all the rest will point to
* kretprobe_trampoline
*/
hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
if (ri->task != current)
/* another task is sharing our hash bucket */
continue;
if (ri->rp && ri->rp->handler) {
__get_cpu_var(current_kprobe) = &ri->rp->kp;
ri->rp->handler(ri, regs);
__get_cpu_var(current_kprobe) = NULL;
}
orig_ret_address = (unsigned long)ri->ret_addr;
recycle_rp_inst(ri, &empty_rp);
if (orig_ret_address != trampoline_address)
/*
* This is the real return address. Any other
* instances associated with this task are for
* other calls deeper on the call stack
*/
break;
}
kretprobe_assert(ri, orig_ret_address, trampoline_address);
regs->pc = orig_ret_address;
kretprobe_hash_unlock(current, &flags);
preempt_enable_no_resched();
hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) {
hlist_del(&ri->hlist);
kfree(ri);
}
return orig_ret_address;
}
static int __kprobes post_kprobe_handler(struct pt_regs *regs)
{
struct kprobe *cur = kprobe_running();
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
kprobe_opcode_t *addr = NULL;
struct kprobe *p = NULL;
if (!cur)
return 0;
if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) {
kcb->kprobe_status = KPROBE_HIT_SSDONE;
cur->post_handler(cur, regs, 0);
}
p = &__get_cpu_var(saved_next_opcode);
if (p->addr) {
arch_disarm_kprobe(p);
p->addr = NULL;
p->opcode = 0;
addr = __get_cpu_var(saved_current_opcode).addr;
__get_cpu_var(saved_current_opcode).addr = NULL;
p = get_kprobe(addr);
arch_arm_kprobe(p);
p = &__get_cpu_var(saved_next_opcode2);
if (p->addr) {
arch_disarm_kprobe(p);
p->addr = NULL;
p->opcode = 0;
}
}
/* Restore back the original saved kprobes variables and continue. */
if (kcb->kprobe_status == KPROBE_REENTER) {
restore_previous_kprobe(kcb);
goto out;
}
reset_current_kprobe();
out:
preempt_enable_no_resched();
return 1;
}
int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr)
{
struct kprobe *cur = kprobe_running();
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
const struct exception_table_entry *entry;
switch (kcb->kprobe_status) {
case KPROBE_HIT_SS:
case KPROBE_REENTER:
/*
* We are here because the instruction being single
* stepped caused a page fault. We reset the current
* kprobe, point the pc back to the probe address
* and allow the page fault handler to continue as a
* normal page fault.
*/
regs->pc = (unsigned long)cur->addr;
if (kcb->kprobe_status == KPROBE_REENTER)
restore_previous_kprobe(kcb);
else
reset_current_kprobe();
preempt_enable_no_resched();
break;
case KPROBE_HIT_ACTIVE:
case KPROBE_HIT_SSDONE:
/*
* We increment the nmissed count for accounting,
* we can also use npre/npostfault count for accounting
* these specific fault cases.
*/
kprobes_inc_nmissed_count(cur);
/*
* We come here because instructions in the pre/post
* handler caused the page_fault, this could happen
* if handler tries to access user space by
* copy_from_user(), get_user() etc. Let the
* user-specified handler try to fix it first.
*/
if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr))
return 1;
/*
* In case the user-specified fault handler returned
* zero, try to fix up.
*/
if ((entry = search_exception_tables(regs->pc)) != NULL) {
regs->pc = entry->fixup;
return 1;
}
/*
* fixup_exception() could not handle it,
* Let do_page_fault() fix it.
*/
break;
default:
break;
}
return 0;
}
/*
* Wrapper routine to for handling exceptions.
*/
int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
unsigned long val, void *data)
{
struct kprobe *p = NULL;
struct die_args *args = (struct die_args *)data;
int ret = NOTIFY_DONE;
kprobe_opcode_t *addr = NULL;
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
addr = (kprobe_opcode_t *) (args->regs->pc);
if (val == DIE_TRAP) {
if (!kprobe_running()) {
if (kprobe_handler(args->regs)) {
ret = NOTIFY_STOP;
} else {
/* Not a kprobe trap */
ret = NOTIFY_DONE;
}
} else {
p = get_kprobe(addr);
if ((kcb->kprobe_status == KPROBE_HIT_SS) ||
(kcb->kprobe_status == KPROBE_REENTER)) {
if (post_kprobe_handler(args->regs))
ret = NOTIFY_STOP;
} else {
if (kprobe_handler(args->regs)) {
ret = NOTIFY_STOP;
} else {
p = __get_cpu_var(current_kprobe);
if (p->break_handler &&
p->break_handler(p, args->regs))
ret = NOTIFY_STOP;
}
}
}
}
return ret;
}
int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
{
struct jprobe *jp = container_of(p, struct jprobe, kp);
unsigned long addr;
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
kcb->jprobe_saved_regs = *regs;
kcb->jprobe_saved_r15 = regs->regs[15];
addr = kcb->jprobe_saved_r15;
/*
* TBD: As Linus pointed out, gcc assumes that the callee
* owns the argument space and could overwrite it, e.g.
* tailcall optimization. So, to be absolutely safe
* we also save and restore enough stack bytes to cover
* the argument area.
*/
memcpy(kcb->jprobes_stack, (kprobe_opcode_t *) addr,
MIN_STACK_SIZE(addr));
regs->pc = (unsigned long)(jp->entry);
return 1;
}
void __kprobes jprobe_return(void)
{
asm volatile ("trapa #0x3a\n\t" "jprobe_return_end:\n\t" "nop\n\t");
}
int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
{
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
unsigned long stack_addr = kcb->jprobe_saved_r15;
u8 *addr = (u8 *)regs->pc;
if ((addr >= (u8 *)jprobe_return) &&
(addr <= (u8 *)jprobe_return_end)) {
*regs = kcb->jprobe_saved_regs;
memcpy((kprobe_opcode_t *)stack_addr, kcb->jprobes_stack,
MIN_STACK_SIZE(stack_addr));
kcb->kprobe_status = KPROBE_HIT_SS;
preempt_enable_no_resched();
return 1;
}
return 0;
}
static struct kprobe trampoline_p = {
.addr = (kprobe_opcode_t *)&kretprobe_trampoline,
.pre_handler = trampoline_probe_handler
};
int __init arch_init_kprobes(void)
{
return register_kprobe(&trampoline_p);
}
| gpl-2.0 |
ShinySide/HispAsian_S5 | arch/x86/kernel/apic/ipi.c | 8089 | 3836 | #include <linux/cpumask.h>
#include <linux/interrupt.h>
#include <linux/init.h>
#include <linux/mm.h>
#include <linux/delay.h>
#include <linux/spinlock.h>
#include <linux/kernel_stat.h>
#include <linux/mc146818rtc.h>
#include <linux/cache.h>
#include <linux/cpu.h>
#include <linux/module.h>
#include <asm/smp.h>
#include <asm/mtrr.h>
#include <asm/tlbflush.h>
#include <asm/mmu_context.h>
#include <asm/apic.h>
#include <asm/proto.h>
#include <asm/ipi.h>
void default_send_IPI_mask_sequence_phys(const struct cpumask *mask, int vector)
{
unsigned long query_cpu;
unsigned long flags;
/*
* Hack. The clustered APIC addressing mode doesn't allow us to send
* to an arbitrary mask, so I do a unicast to each CPU instead.
* - mbligh
*/
local_irq_save(flags);
for_each_cpu(query_cpu, mask) {
__default_send_IPI_dest_field(per_cpu(x86_cpu_to_apicid,
query_cpu), vector, APIC_DEST_PHYSICAL);
}
local_irq_restore(flags);
}
void default_send_IPI_mask_allbutself_phys(const struct cpumask *mask,
int vector)
{
unsigned int this_cpu = smp_processor_id();
unsigned int query_cpu;
unsigned long flags;
/* See Hack comment above */
local_irq_save(flags);
for_each_cpu(query_cpu, mask) {
if (query_cpu == this_cpu)
continue;
__default_send_IPI_dest_field(per_cpu(x86_cpu_to_apicid,
query_cpu), vector, APIC_DEST_PHYSICAL);
}
local_irq_restore(flags);
}
#ifdef CONFIG_X86_32
void default_send_IPI_mask_sequence_logical(const struct cpumask *mask,
int vector)
{
unsigned long flags;
unsigned int query_cpu;
/*
* Hack. The clustered APIC addressing mode doesn't allow us to send
* to an arbitrary mask, so I do a unicasts to each CPU instead. This
* should be modified to do 1 message per cluster ID - mbligh
*/
local_irq_save(flags);
for_each_cpu(query_cpu, mask)
__default_send_IPI_dest_field(
early_per_cpu(x86_cpu_to_logical_apicid, query_cpu),
vector, apic->dest_logical);
local_irq_restore(flags);
}
void default_send_IPI_mask_allbutself_logical(const struct cpumask *mask,
int vector)
{
unsigned long flags;
unsigned int query_cpu;
unsigned int this_cpu = smp_processor_id();
/* See Hack comment above */
local_irq_save(flags);
for_each_cpu(query_cpu, mask) {
if (query_cpu == this_cpu)
continue;
__default_send_IPI_dest_field(
early_per_cpu(x86_cpu_to_logical_apicid, query_cpu),
vector, apic->dest_logical);
}
local_irq_restore(flags);
}
/*
* This is only used on smaller machines.
*/
void default_send_IPI_mask_logical(const struct cpumask *cpumask, int vector)
{
unsigned long mask = cpumask_bits(cpumask)[0];
unsigned long flags;
if (WARN_ONCE(!mask, "empty IPI mask"))
return;
local_irq_save(flags);
WARN_ON(mask & ~cpumask_bits(cpu_online_mask)[0]);
__default_send_IPI_dest_field(mask, vector, apic->dest_logical);
local_irq_restore(flags);
}
void default_send_IPI_allbutself(int vector)
{
/*
* if there are no other CPUs in the system then we get an APIC send
* error if we try to broadcast, thus avoid sending IPIs in this case.
*/
if (!(num_online_cpus() > 1))
return;
__default_local_send_IPI_allbutself(vector);
}
void default_send_IPI_all(int vector)
{
__default_local_send_IPI_all(vector);
}
void default_send_IPI_self(int vector)
{
__default_send_IPI_shortcut(APIC_DEST_SELF, vector, apic->dest_logical);
}
/* must come after the send_IPI functions above for inlining */
static int convert_apicid_to_cpu(int apic_id)
{
int i;
for_each_possible_cpu(i) {
if (per_cpu(x86_cpu_to_apicid, i) == apic_id)
return i;
}
return -1;
}
int safe_smp_processor_id(void)
{
int apicid, cpuid;
if (!cpu_has_apic)
return 0;
apicid = hard_smp_processor_id();
if (apicid == BAD_APICID)
return 0;
cpuid = convert_apicid_to_cpu(apicid);
return cpuid >= 0 ? cpuid : 0;
}
#endif
| gpl-2.0 |
TeamLGOG/android_kernel_lge_f320k | drivers/char/pcmcia/cm4000_cs.c | 8089 | 49404 | /*
* A driver for the PCMCIA Smartcard Reader "Omnikey CardMan Mobile 4000"
*
* cm4000_cs.c support.linux@omnikey.com
*
* Tue Oct 23 11:32:43 GMT 2001 herp - cleaned up header files
* Sun Jan 20 10:11:15 MET 2002 herp - added modversion header files
* Thu Nov 14 16:34:11 GMT 2002 mh - added PPS functionality
* Tue Nov 19 16:36:27 GMT 2002 mh - added SUSPEND/RESUME functionailty
* Wed Jul 28 12:55:01 CEST 2004 mh - kernel 2.6 adjustments
*
* current version: 2.4.0gm4
*
* (C) 2000,2001,2002,2003,2004 Omnikey AG
*
* (C) 2005-2006 Harald Welte <laforge@gnumonks.org>
* - Adhere to Kernel CodingStyle
* - Port to 2.6.13 "new" style PCMCIA
* - Check for copy_{from,to}_user return values
* - Use nonseekable_open()
* - add class interface for udev device creation
*
* All rights reserved. Licensed under dual BSD/GPL license.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/fs.h>
#include <linux/delay.h>
#include <linux/bitrev.h>
#include <linux/mutex.h>
#include <linux/uaccess.h>
#include <linux/io.h>
#include <pcmcia/cistpl.h>
#include <pcmcia/cisreg.h>
#include <pcmcia/ciscode.h>
#include <pcmcia/ds.h>
#include <linux/cm4000_cs.h>
/* #define ATR_CSUM */
#define reader_to_dev(x) (&x->p_dev->dev)
/* n (debug level) is ignored */
/* additional debug output may be enabled by re-compiling with
* CM4000_DEBUG set */
/* #define CM4000_DEBUG */
#define DEBUGP(n, rdr, x, args...) do { \
dev_dbg(reader_to_dev(rdr), "%s:" x, \
__func__ , ## args); \
} while (0)
static DEFINE_MUTEX(cmm_mutex);
#define T_1SEC (HZ)
#define T_10MSEC msecs_to_jiffies(10)
#define T_20MSEC msecs_to_jiffies(20)
#define T_40MSEC msecs_to_jiffies(40)
#define T_50MSEC msecs_to_jiffies(50)
#define T_100MSEC msecs_to_jiffies(100)
#define T_500MSEC msecs_to_jiffies(500)
static void cm4000_release(struct pcmcia_device *link);
static int major; /* major number we get from the kernel */
/* note: the first state has to have number 0 always */
#define M_FETCH_ATR 0
#define M_TIMEOUT_WAIT 1
#define M_READ_ATR_LEN 2
#define M_READ_ATR 3
#define M_ATR_PRESENT 4
#define M_BAD_CARD 5
#define M_CARDOFF 6
#define LOCK_IO 0
#define LOCK_MONITOR 1
#define IS_AUTOPPS_ACT 6
#define IS_PROCBYTE_PRESENT 7
#define IS_INVREV 8
#define IS_ANY_T0 9
#define IS_ANY_T1 10
#define IS_ATR_PRESENT 11
#define IS_ATR_VALID 12
#define IS_CMM_ABSENT 13
#define IS_BAD_LENGTH 14
#define IS_BAD_CSUM 15
#define IS_BAD_CARD 16
#define REG_FLAGS0(x) (x + 0)
#define REG_FLAGS1(x) (x + 1)
#define REG_NUM_BYTES(x) (x + 2)
#define REG_BUF_ADDR(x) (x + 3)
#define REG_BUF_DATA(x) (x + 4)
#define REG_NUM_SEND(x) (x + 5)
#define REG_BAUDRATE(x) (x + 6)
#define REG_STOPBITS(x) (x + 7)
struct cm4000_dev {
struct pcmcia_device *p_dev;
unsigned char atr[MAX_ATR];
unsigned char rbuf[512];
unsigned char sbuf[512];
wait_queue_head_t devq; /* when removing cardman must not be
zeroed! */
wait_queue_head_t ioq; /* if IO is locked, wait on this Q */
wait_queue_head_t atrq; /* wait for ATR valid */
wait_queue_head_t readq; /* used by write to wake blk.read */
/* warning: do not move this fields.
* initialising to zero depends on it - see ZERO_DEV below. */
unsigned char atr_csum;
unsigned char atr_len_retry;
unsigned short atr_len;
unsigned short rlen; /* bytes avail. after write */
unsigned short rpos; /* latest read pos. write zeroes */
unsigned char procbyte; /* T=0 procedure byte */
unsigned char mstate; /* state of card monitor */
unsigned char cwarn; /* slow down warning */
unsigned char flags0; /* cardman IO-flags 0 */
unsigned char flags1; /* cardman IO-flags 1 */
unsigned int mdelay; /* variable monitor speeds, in jiffies */
unsigned int baudv; /* baud value for speed */
unsigned char ta1;
unsigned char proto; /* T=0, T=1, ... */
unsigned long flags; /* lock+flags (MONITOR,IO,ATR) * for concurrent
access */
unsigned char pts[4];
struct timer_list timer; /* used to keep monitor running */
int monitor_running;
};
#define ZERO_DEV(dev) \
memset(&dev->atr_csum,0, \
sizeof(struct cm4000_dev) - \
offsetof(struct cm4000_dev, atr_csum))
static struct pcmcia_device *dev_table[CM4000_MAX_DEV];
static struct class *cmm_class;
/* This table doesn't use spaces after the comma between fields and thus
* violates CodingStyle. However, I don't really think wrapping it around will
* make it any clearer to read -HW */
static unsigned char fi_di_table[10][14] = {
/*FI 00 01 02 03 04 05 06 07 08 09 10 11 12 13 */
/*DI */
/* 0 */ {0x11,0x11,0x11,0x11,0x11,0x11,0x11,0x11,0x11,0x11,0x11,0x11,0x11,0x11},
/* 1 */ {0x01,0x11,0x11,0x11,0x11,0x11,0x11,0x11,0x11,0x91,0x11,0x11,0x11,0x11},
/* 2 */ {0x02,0x12,0x22,0x32,0x11,0x11,0x11,0x11,0x11,0x92,0xA2,0xB2,0x11,0x11},
/* 3 */ {0x03,0x13,0x23,0x33,0x43,0x53,0x63,0x11,0x11,0x93,0xA3,0xB3,0xC3,0xD3},
/* 4 */ {0x04,0x14,0x24,0x34,0x44,0x54,0x64,0x11,0x11,0x94,0xA4,0xB4,0xC4,0xD4},
/* 5 */ {0x00,0x15,0x25,0x35,0x45,0x55,0x65,0x11,0x11,0x95,0xA5,0xB5,0xC5,0xD5},
/* 6 */ {0x06,0x16,0x26,0x36,0x46,0x56,0x66,0x11,0x11,0x96,0xA6,0xB6,0xC6,0xD6},
/* 7 */ {0x11,0x11,0x11,0x11,0x11,0x11,0x11,0x11,0x11,0x11,0x11,0x11,0x11,0x11},
/* 8 */ {0x08,0x11,0x28,0x38,0x48,0x58,0x68,0x11,0x11,0x98,0xA8,0xB8,0xC8,0xD8},
/* 9 */ {0x09,0x19,0x29,0x39,0x49,0x59,0x69,0x11,0x11,0x99,0xA9,0xB9,0xC9,0xD9}
};
#ifndef CM4000_DEBUG
#define xoutb outb
#define xinb inb
#else
static inline void xoutb(unsigned char val, unsigned short port)
{
pr_debug("outb(val=%.2x,port=%.4x)\n", val, port);
outb(val, port);
}
static inline unsigned char xinb(unsigned short port)
{
unsigned char val;
val = inb(port);
pr_debug("%.2x=inb(%.4x)\n", val, port);
return val;
}
#endif
static inline unsigned char invert_revert(unsigned char ch)
{
return bitrev8(~ch);
}
static void str_invert_revert(unsigned char *b, int len)
{
int i;
for (i = 0; i < len; i++)
b[i] = invert_revert(b[i]);
}
#define ATRLENCK(dev,pos) \
if (pos>=dev->atr_len || pos>=MAX_ATR) \
goto return_0;
static unsigned int calc_baudv(unsigned char fidi)
{
unsigned int wcrcf, wbrcf, fi_rfu, di_rfu;
fi_rfu = 372;
di_rfu = 1;
/* FI */
switch ((fidi >> 4) & 0x0F) {
case 0x00:
wcrcf = 372;
break;
case 0x01:
wcrcf = 372;
break;
case 0x02:
wcrcf = 558;
break;
case 0x03:
wcrcf = 744;
break;
case 0x04:
wcrcf = 1116;
break;
case 0x05:
wcrcf = 1488;
break;
case 0x06:
wcrcf = 1860;
break;
case 0x07:
wcrcf = fi_rfu;
break;
case 0x08:
wcrcf = fi_rfu;
break;
case 0x09:
wcrcf = 512;
break;
case 0x0A:
wcrcf = 768;
break;
case 0x0B:
wcrcf = 1024;
break;
case 0x0C:
wcrcf = 1536;
break;
case 0x0D:
wcrcf = 2048;
break;
default:
wcrcf = fi_rfu;
break;
}
/* DI */
switch (fidi & 0x0F) {
case 0x00:
wbrcf = di_rfu;
break;
case 0x01:
wbrcf = 1;
break;
case 0x02:
wbrcf = 2;
break;
case 0x03:
wbrcf = 4;
break;
case 0x04:
wbrcf = 8;
break;
case 0x05:
wbrcf = 16;
break;
case 0x06:
wbrcf = 32;
break;
case 0x07:
wbrcf = di_rfu;
break;
case 0x08:
wbrcf = 12;
break;
case 0x09:
wbrcf = 20;
break;
default:
wbrcf = di_rfu;
break;
}
return (wcrcf / wbrcf);
}
static unsigned short io_read_num_rec_bytes(unsigned int iobase,
unsigned short *s)
{
unsigned short tmp;
tmp = *s = 0;
do {
*s = tmp;
tmp = inb(REG_NUM_BYTES(iobase)) |
(inb(REG_FLAGS0(iobase)) & 4 ? 0x100 : 0);
} while (tmp != *s);
return *s;
}
static int parse_atr(struct cm4000_dev *dev)
{
unsigned char any_t1, any_t0;
unsigned char ch, ifno;
int ix, done;
DEBUGP(3, dev, "-> parse_atr: dev->atr_len = %i\n", dev->atr_len);
if (dev->atr_len < 3) {
DEBUGP(5, dev, "parse_atr: atr_len < 3\n");
return 0;
}
if (dev->atr[0] == 0x3f)
set_bit(IS_INVREV, &dev->flags);
else
clear_bit(IS_INVREV, &dev->flags);
ix = 1;
ifno = 1;
ch = dev->atr[1];
dev->proto = 0; /* XXX PROTO */
any_t1 = any_t0 = done = 0;
dev->ta1 = 0x11; /* defaults to 9600 baud */
do {
if (ifno == 1 && (ch & 0x10)) {
/* read first interface byte and TA1 is present */
dev->ta1 = dev->atr[2];
DEBUGP(5, dev, "Card says FiDi is 0x%.2x\n", dev->ta1);
ifno++;
} else if ((ifno == 2) && (ch & 0x10)) { /* TA(2) */
dev->ta1 = 0x11;
ifno++;
}
DEBUGP(5, dev, "Yi=%.2x\n", ch & 0xf0);
ix += ((ch & 0x10) >> 4) /* no of int.face chars */
+((ch & 0x20) >> 5)
+ ((ch & 0x40) >> 6)
+ ((ch & 0x80) >> 7);
/* ATRLENCK(dev,ix); */
if (ch & 0x80) { /* TDi */
ch = dev->atr[ix];
if ((ch & 0x0f)) {
any_t1 = 1;
DEBUGP(5, dev, "card is capable of T=1\n");
} else {
any_t0 = 1;
DEBUGP(5, dev, "card is capable of T=0\n");
}
} else
done = 1;
} while (!done);
DEBUGP(5, dev, "ix=%d noHist=%d any_t1=%d\n",
ix, dev->atr[1] & 15, any_t1);
if (ix + 1 + (dev->atr[1] & 0x0f) + any_t1 != dev->atr_len) {
DEBUGP(5, dev, "length error\n");
return 0;
}
if (any_t0)
set_bit(IS_ANY_T0, &dev->flags);
if (any_t1) { /* compute csum */
dev->atr_csum = 0;
#ifdef ATR_CSUM
for (i = 1; i < dev->atr_len; i++)
dev->atr_csum ^= dev->atr[i];
if (dev->atr_csum) {
set_bit(IS_BAD_CSUM, &dev->flags);
DEBUGP(5, dev, "bad checksum\n");
goto return_0;
}
#endif
if (any_t0 == 0)
dev->proto = 1; /* XXX PROTO */
set_bit(IS_ANY_T1, &dev->flags);
}
return 1;
}
struct card_fixup {
char atr[12];
u_int8_t atr_len;
u_int8_t stopbits;
};
static struct card_fixup card_fixups[] = {
{ /* ACOS */
.atr = { 0x3b, 0xb3, 0x11, 0x00, 0x00, 0x41, 0x01 },
.atr_len = 7,
.stopbits = 0x03,
},
{ /* Motorola */
.atr = {0x3b, 0x76, 0x13, 0x00, 0x00, 0x80, 0x62, 0x07,
0x41, 0x81, 0x81 },
.atr_len = 11,
.stopbits = 0x04,
},
};
static void set_cardparameter(struct cm4000_dev *dev)
{
int i;
unsigned int iobase = dev->p_dev->resource[0]->start;
u_int8_t stopbits = 0x02; /* ISO default */
DEBUGP(3, dev, "-> set_cardparameter\n");
dev->flags1 = dev->flags1 | (((dev->baudv - 1) & 0x0100) >> 8);
xoutb(dev->flags1, REG_FLAGS1(iobase));
DEBUGP(5, dev, "flags1 = 0x%02x\n", dev->flags1);
/* set baudrate */
xoutb((unsigned char)((dev->baudv - 1) & 0xFF), REG_BAUDRATE(iobase));
DEBUGP(5, dev, "baudv = %i -> write 0x%02x\n", dev->baudv,
((dev->baudv - 1) & 0xFF));
/* set stopbits */
for (i = 0; i < ARRAY_SIZE(card_fixups); i++) {
if (!memcmp(dev->atr, card_fixups[i].atr,
card_fixups[i].atr_len))
stopbits = card_fixups[i].stopbits;
}
xoutb(stopbits, REG_STOPBITS(iobase));
DEBUGP(3, dev, "<- set_cardparameter\n");
}
static int set_protocol(struct cm4000_dev *dev, struct ptsreq *ptsreq)
{
unsigned long tmp, i;
unsigned short num_bytes_read;
unsigned char pts_reply[4];
ssize_t rc;
unsigned int iobase = dev->p_dev->resource[0]->start;
rc = 0;
DEBUGP(3, dev, "-> set_protocol\n");
DEBUGP(5, dev, "ptsreq->Protocol = 0x%.8x, ptsreq->Flags=0x%.8x, "
"ptsreq->pts1=0x%.2x, ptsreq->pts2=0x%.2x, "
"ptsreq->pts3=0x%.2x\n", (unsigned int)ptsreq->protocol,
(unsigned int)ptsreq->flags, ptsreq->pts1, ptsreq->pts2,
ptsreq->pts3);
/* Fill PTS structure */
dev->pts[0] = 0xff;
dev->pts[1] = 0x00;
tmp = ptsreq->protocol;
while ((tmp = (tmp >> 1)) > 0)
dev->pts[1]++;
dev->proto = dev->pts[1]; /* Set new protocol */
dev->pts[1] = (0x01 << 4) | (dev->pts[1]);
/* Correct Fi/Di according to CM4000 Fi/Di table */
DEBUGP(5, dev, "Ta(1) from ATR is 0x%.2x\n", dev->ta1);
/* set Fi/Di according to ATR TA(1) */
dev->pts[2] = fi_di_table[dev->ta1 & 0x0F][(dev->ta1 >> 4) & 0x0F];
/* Calculate PCK character */
dev->pts[3] = dev->pts[0] ^ dev->pts[1] ^ dev->pts[2];
DEBUGP(5, dev, "pts0=%.2x, pts1=%.2x, pts2=%.2x, pts3=%.2x\n",
dev->pts[0], dev->pts[1], dev->pts[2], dev->pts[3]);
/* check card convention */
if (test_bit(IS_INVREV, &dev->flags))
str_invert_revert(dev->pts, 4);
/* reset SM */
xoutb(0x80, REG_FLAGS0(iobase));
/* Enable access to the message buffer */
DEBUGP(5, dev, "Enable access to the messages buffer\n");
dev->flags1 = 0x20 /* T_Active */
| (test_bit(IS_INVREV, &dev->flags) ? 0x02 : 0x00) /* inv parity */
| ((dev->baudv >> 8) & 0x01); /* MSB-baud */
xoutb(dev->flags1, REG_FLAGS1(iobase));
DEBUGP(5, dev, "Enable message buffer -> flags1 = 0x%.2x\n",
dev->flags1);
/* write challenge to the buffer */
DEBUGP(5, dev, "Write challenge to buffer: ");
for (i = 0; i < 4; i++) {
xoutb(i, REG_BUF_ADDR(iobase));
xoutb(dev->pts[i], REG_BUF_DATA(iobase)); /* buf data */
#ifdef CM4000_DEBUG
pr_debug("0x%.2x ", dev->pts[i]);
}
pr_debug("\n");
#else
}
#endif
/* set number of bytes to write */
DEBUGP(5, dev, "Set number of bytes to write\n");
xoutb(0x04, REG_NUM_SEND(iobase));
/* Trigger CARDMAN CONTROLLER */
xoutb(0x50, REG_FLAGS0(iobase));
/* Monitor progress */
/* wait for xmit done */
DEBUGP(5, dev, "Waiting for NumRecBytes getting valid\n");
for (i = 0; i < 100; i++) {
if (inb(REG_FLAGS0(iobase)) & 0x08) {
DEBUGP(5, dev, "NumRecBytes is valid\n");
break;
}
mdelay(10);
}
if (i == 100) {
DEBUGP(5, dev, "Timeout waiting for NumRecBytes getting "
"valid\n");
rc = -EIO;
goto exit_setprotocol;
}
DEBUGP(5, dev, "Reading NumRecBytes\n");
for (i = 0; i < 100; i++) {
io_read_num_rec_bytes(iobase, &num_bytes_read);
if (num_bytes_read >= 4) {
DEBUGP(2, dev, "NumRecBytes = %i\n", num_bytes_read);
break;
}
mdelay(10);
}
/* check whether it is a short PTS reply? */
if (num_bytes_read == 3)
i = 0;
if (i == 100) {
DEBUGP(5, dev, "Timeout reading num_bytes_read\n");
rc = -EIO;
goto exit_setprotocol;
}
DEBUGP(5, dev, "Reset the CARDMAN CONTROLLER\n");
xoutb(0x80, REG_FLAGS0(iobase));
/* Read PPS reply */
DEBUGP(5, dev, "Read PPS reply\n");
for (i = 0; i < num_bytes_read; i++) {
xoutb(i, REG_BUF_ADDR(iobase));
pts_reply[i] = inb(REG_BUF_DATA(iobase));
}
#ifdef CM4000_DEBUG
DEBUGP(2, dev, "PTSreply: ");
for (i = 0; i < num_bytes_read; i++) {
pr_debug("0x%.2x ", pts_reply[i]);
}
pr_debug("\n");
#endif /* CM4000_DEBUG */
DEBUGP(5, dev, "Clear Tactive in Flags1\n");
xoutb(0x20, REG_FLAGS1(iobase));
/* Compare ptsreq and ptsreply */
if ((dev->pts[0] == pts_reply[0]) &&
(dev->pts[1] == pts_reply[1]) &&
(dev->pts[2] == pts_reply[2]) && (dev->pts[3] == pts_reply[3])) {
/* setcardparameter according to PPS */
dev->baudv = calc_baudv(dev->pts[2]);
set_cardparameter(dev);
} else if ((dev->pts[0] == pts_reply[0]) &&
((dev->pts[1] & 0xef) == pts_reply[1]) &&
((pts_reply[0] ^ pts_reply[1]) == pts_reply[2])) {
/* short PTS reply, set card parameter to default values */
dev->baudv = calc_baudv(0x11);
set_cardparameter(dev);
} else
rc = -EIO;
exit_setprotocol:
DEBUGP(3, dev, "<- set_protocol\n");
return rc;
}
static int io_detect_cm4000(unsigned int iobase, struct cm4000_dev *dev)
{
/* note: statemachine is assumed to be reset */
if (inb(REG_FLAGS0(iobase)) & 8) {
clear_bit(IS_ATR_VALID, &dev->flags);
set_bit(IS_CMM_ABSENT, &dev->flags);
return 0; /* detect CMM = 1 -> failure */
}
/* xoutb(0x40, REG_FLAGS1(iobase)); detectCMM */
xoutb(dev->flags1 | 0x40, REG_FLAGS1(iobase));
if ((inb(REG_FLAGS0(iobase)) & 8) == 0) {
clear_bit(IS_ATR_VALID, &dev->flags);
set_bit(IS_CMM_ABSENT, &dev->flags);
return 0; /* detect CMM=0 -> failure */
}
/* clear detectCMM again by restoring original flags1 */
xoutb(dev->flags1, REG_FLAGS1(iobase));
return 1;
}
static void terminate_monitor(struct cm4000_dev *dev)
{
/* tell the monitor to stop and wait until
* it terminates.
*/
DEBUGP(3, dev, "-> terminate_monitor\n");
wait_event_interruptible(dev->devq,
test_and_set_bit(LOCK_MONITOR,
(void *)&dev->flags));
/* now, LOCK_MONITOR has been set.
* allow a last cycle in the monitor.
* the monitor will indicate that it has
* finished by clearing this bit.
*/
DEBUGP(5, dev, "Now allow last cycle of monitor!\n");
while (test_bit(LOCK_MONITOR, (void *)&dev->flags))
msleep(25);
DEBUGP(5, dev, "Delete timer\n");
del_timer_sync(&dev->timer);
#ifdef CM4000_DEBUG
dev->monitor_running = 0;
#endif
DEBUGP(3, dev, "<- terminate_monitor\n");
}
/*
* monitor the card every 50msec. as a side-effect, retrieve the
* atr once a card is inserted. another side-effect of retrieving the
* atr is that the card will be powered on, so there is no need to
* power on the card explictely from the application: the driver
* is already doing that for you.
*/
static void monitor_card(unsigned long p)
{
struct cm4000_dev *dev = (struct cm4000_dev *) p;
unsigned int iobase = dev->p_dev->resource[0]->start;
unsigned short s;
struct ptsreq ptsreq;
int i, atrc;
DEBUGP(7, dev, "-> monitor_card\n");
/* if someone has set the lock for us: we're done! */
if (test_and_set_bit(LOCK_MONITOR, &dev->flags)) {
DEBUGP(4, dev, "About to stop monitor\n");
/* no */
dev->rlen =
dev->rpos =
dev->atr_csum = dev->atr_len_retry = dev->cwarn = 0;
dev->mstate = M_FETCH_ATR;
clear_bit(LOCK_MONITOR, &dev->flags);
/* close et al. are sleeping on devq, so wake it */
wake_up_interruptible(&dev->devq);
DEBUGP(2, dev, "<- monitor_card (we are done now)\n");
return;
}
/* try to lock io: if it is already locked, just add another timer */
if (test_and_set_bit(LOCK_IO, (void *)&dev->flags)) {
DEBUGP(4, dev, "Couldn't get IO lock\n");
goto return_with_timer;
}
/* is a card/a reader inserted at all ? */
dev->flags0 = xinb(REG_FLAGS0(iobase));
DEBUGP(7, dev, "dev->flags0 = 0x%2x\n", dev->flags0);
DEBUGP(7, dev, "smartcard present: %s\n",
dev->flags0 & 1 ? "yes" : "no");
DEBUGP(7, dev, "cardman present: %s\n",
dev->flags0 == 0xff ? "no" : "yes");
if ((dev->flags0 & 1) == 0 /* no smartcard inserted */
|| dev->flags0 == 0xff) { /* no cardman inserted */
/* no */
dev->rlen =
dev->rpos =
dev->atr_csum = dev->atr_len_retry = dev->cwarn = 0;
dev->mstate = M_FETCH_ATR;
dev->flags &= 0x000000ff; /* only keep IO and MONITOR locks */
if (dev->flags0 == 0xff) {
DEBUGP(4, dev, "set IS_CMM_ABSENT bit\n");
set_bit(IS_CMM_ABSENT, &dev->flags);
} else if (test_bit(IS_CMM_ABSENT, &dev->flags)) {
DEBUGP(4, dev, "clear IS_CMM_ABSENT bit "
"(card is removed)\n");
clear_bit(IS_CMM_ABSENT, &dev->flags);
}
goto release_io;
} else if ((dev->flags0 & 1) && test_bit(IS_CMM_ABSENT, &dev->flags)) {
/* cardman and card present but cardman was absent before
* (after suspend with inserted card) */
DEBUGP(4, dev, "clear IS_CMM_ABSENT bit (card is inserted)\n");
clear_bit(IS_CMM_ABSENT, &dev->flags);
}
if (test_bit(IS_ATR_VALID, &dev->flags) == 1) {
DEBUGP(7, dev, "believe ATR is already valid (do nothing)\n");
goto release_io;
}
switch (dev->mstate) {
unsigned char flags0;
case M_CARDOFF:
DEBUGP(4, dev, "M_CARDOFF\n");
flags0 = inb(REG_FLAGS0(iobase));
if (flags0 & 0x02) {
/* wait until Flags0 indicate power is off */
dev->mdelay = T_10MSEC;
} else {
/* Flags0 indicate power off and no card inserted now;
* Reset CARDMAN CONTROLLER */
xoutb(0x80, REG_FLAGS0(iobase));
/* prepare for fetching ATR again: after card off ATR
* is read again automatically */
dev->rlen =
dev->rpos =
dev->atr_csum =
dev->atr_len_retry = dev->cwarn = 0;
dev->mstate = M_FETCH_ATR;
/* minimal gap between CARDOFF and read ATR is 50msec */
dev->mdelay = T_50MSEC;
}
break;
case M_FETCH_ATR:
DEBUGP(4, dev, "M_FETCH_ATR\n");
xoutb(0x80, REG_FLAGS0(iobase));
DEBUGP(4, dev, "Reset BAUDV to 9600\n");
dev->baudv = 0x173; /* 9600 */
xoutb(0x02, REG_STOPBITS(iobase)); /* stopbits=2 */
xoutb(0x73, REG_BAUDRATE(iobase)); /* baud value */
xoutb(0x21, REG_FLAGS1(iobase)); /* T_Active=1, baud
value */
/* warm start vs. power on: */
xoutb(dev->flags0 & 2 ? 0x46 : 0x44, REG_FLAGS0(iobase));
dev->mdelay = T_40MSEC;
dev->mstate = M_TIMEOUT_WAIT;
break;
case M_TIMEOUT_WAIT:
DEBUGP(4, dev, "M_TIMEOUT_WAIT\n");
/* numRecBytes */
io_read_num_rec_bytes(iobase, &dev->atr_len);
dev->mdelay = T_10MSEC;
dev->mstate = M_READ_ATR_LEN;
break;
case M_READ_ATR_LEN:
DEBUGP(4, dev, "M_READ_ATR_LEN\n");
/* infinite loop possible, since there is no timeout */
#define MAX_ATR_LEN_RETRY 100
if (dev->atr_len == io_read_num_rec_bytes(iobase, &s)) {
if (dev->atr_len_retry++ >= MAX_ATR_LEN_RETRY) { /* + XX msec */
dev->mdelay = T_10MSEC;
dev->mstate = M_READ_ATR;
}
} else {
dev->atr_len = s;
dev->atr_len_retry = 0; /* set new timeout */
}
DEBUGP(4, dev, "Current ATR_LEN = %i\n", dev->atr_len);
break;
case M_READ_ATR:
DEBUGP(4, dev, "M_READ_ATR\n");
xoutb(0x80, REG_FLAGS0(iobase)); /* reset SM */
for (i = 0; i < dev->atr_len; i++) {
xoutb(i, REG_BUF_ADDR(iobase));
dev->atr[i] = inb(REG_BUF_DATA(iobase));
}
/* Deactivate T_Active flags */
DEBUGP(4, dev, "Deactivate T_Active flags\n");
dev->flags1 = 0x01;
xoutb(dev->flags1, REG_FLAGS1(iobase));
/* atr is present (which doesn't mean it's valid) */
set_bit(IS_ATR_PRESENT, &dev->flags);
if (dev->atr[0] == 0x03)
str_invert_revert(dev->atr, dev->atr_len);
atrc = parse_atr(dev);
if (atrc == 0) { /* atr invalid */
dev->mdelay = 0;
dev->mstate = M_BAD_CARD;
} else {
dev->mdelay = T_50MSEC;
dev->mstate = M_ATR_PRESENT;
set_bit(IS_ATR_VALID, &dev->flags);
}
if (test_bit(IS_ATR_VALID, &dev->flags) == 1) {
DEBUGP(4, dev, "monitor_card: ATR valid\n");
/* if ta1 == 0x11, no PPS necessary (default values) */
/* do not do PPS with multi protocol cards */
if ((test_bit(IS_AUTOPPS_ACT, &dev->flags) == 0) &&
(dev->ta1 != 0x11) &&
!(test_bit(IS_ANY_T0, &dev->flags) &&
test_bit(IS_ANY_T1, &dev->flags))) {
DEBUGP(4, dev, "Perform AUTOPPS\n");
set_bit(IS_AUTOPPS_ACT, &dev->flags);
ptsreq.protocol = (0x01 << dev->proto);
ptsreq.flags = 0x01;
ptsreq.pts1 = 0x00;
ptsreq.pts2 = 0x00;
ptsreq.pts3 = 0x00;
if (set_protocol(dev, &ptsreq) == 0) {
DEBUGP(4, dev, "AUTOPPS ret SUCC\n");
clear_bit(IS_AUTOPPS_ACT, &dev->flags);
wake_up_interruptible(&dev->atrq);
} else {
DEBUGP(4, dev, "AUTOPPS failed: "
"repower using defaults\n");
/* prepare for repowering */
clear_bit(IS_ATR_PRESENT, &dev->flags);
clear_bit(IS_ATR_VALID, &dev->flags);
dev->rlen =
dev->rpos =
dev->atr_csum =
dev->atr_len_retry = dev->cwarn = 0;
dev->mstate = M_FETCH_ATR;
dev->mdelay = T_50MSEC;
}
} else {
/* for cards which use slightly different
* params (extra guard time) */
set_cardparameter(dev);
if (test_bit(IS_AUTOPPS_ACT, &dev->flags) == 1)
DEBUGP(4, dev, "AUTOPPS already active "
"2nd try:use default values\n");
if (dev->ta1 == 0x11)
DEBUGP(4, dev, "No AUTOPPS necessary "
"TA(1)==0x11\n");
if (test_bit(IS_ANY_T0, &dev->flags)
&& test_bit(IS_ANY_T1, &dev->flags))
DEBUGP(4, dev, "Do NOT perform AUTOPPS "
"with multiprotocol cards\n");
clear_bit(IS_AUTOPPS_ACT, &dev->flags);
wake_up_interruptible(&dev->atrq);
}
} else {
DEBUGP(4, dev, "ATR invalid\n");
wake_up_interruptible(&dev->atrq);
}
break;
case M_BAD_CARD:
DEBUGP(4, dev, "M_BAD_CARD\n");
/* slow down warning, but prompt immediately after insertion */
if (dev->cwarn == 0 || dev->cwarn == 10) {
set_bit(IS_BAD_CARD, &dev->flags);
dev_warn(&dev->p_dev->dev, MODULE_NAME ": ");
if (test_bit(IS_BAD_CSUM, &dev->flags)) {
DEBUGP(4, dev, "ATR checksum (0x%.2x, should "
"be zero) failed\n", dev->atr_csum);
}
#ifdef CM4000_DEBUG
else if (test_bit(IS_BAD_LENGTH, &dev->flags)) {
DEBUGP(4, dev, "ATR length error\n");
} else {
DEBUGP(4, dev, "card damaged or wrong way "
"inserted\n");
}
#endif
dev->cwarn = 0;
wake_up_interruptible(&dev->atrq); /* wake open */
}
dev->cwarn++;
dev->mdelay = T_100MSEC;
dev->mstate = M_FETCH_ATR;
break;
default:
DEBUGP(7, dev, "Unknown action\n");
break; /* nothing */
}
release_io:
DEBUGP(7, dev, "release_io\n");
clear_bit(LOCK_IO, &dev->flags);
wake_up_interruptible(&dev->ioq); /* whoever needs IO */
return_with_timer:
DEBUGP(7, dev, "<- monitor_card (returns with timer)\n");
mod_timer(&dev->timer, jiffies + dev->mdelay);
clear_bit(LOCK_MONITOR, &dev->flags);
}
/* Interface to userland (file_operations) */
static ssize_t cmm_read(struct file *filp, __user char *buf, size_t count,
loff_t *ppos)
{
struct cm4000_dev *dev = filp->private_data;
unsigned int iobase = dev->p_dev->resource[0]->start;
ssize_t rc;
int i, j, k;
DEBUGP(2, dev, "-> cmm_read(%s,%d)\n", current->comm, current->pid);
if (count == 0) /* according to manpage */
return 0;
if (!pcmcia_dev_present(dev->p_dev) || /* device removed */
test_bit(IS_CMM_ABSENT, &dev->flags))
return -ENODEV;
if (test_bit(IS_BAD_CSUM, &dev->flags))
return -EIO;
/* also see the note about this in cmm_write */
if (wait_event_interruptible
(dev->atrq,
((filp->f_flags & O_NONBLOCK)
|| (test_bit(IS_ATR_PRESENT, (void *)&dev->flags) != 0)))) {
if (filp->f_flags & O_NONBLOCK)
return -EAGAIN;
return -ERESTARTSYS;
}
if (test_bit(IS_ATR_VALID, &dev->flags) == 0)
return -EIO;
/* this one implements blocking IO */
if (wait_event_interruptible
(dev->readq,
((filp->f_flags & O_NONBLOCK) || (dev->rpos < dev->rlen)))) {
if (filp->f_flags & O_NONBLOCK)
return -EAGAIN;
return -ERESTARTSYS;
}
/* lock io */
if (wait_event_interruptible
(dev->ioq,
((filp->f_flags & O_NONBLOCK)
|| (test_and_set_bit(LOCK_IO, (void *)&dev->flags) == 0)))) {
if (filp->f_flags & O_NONBLOCK)
return -EAGAIN;
return -ERESTARTSYS;
}
rc = 0;
dev->flags0 = inb(REG_FLAGS0(iobase));
if ((dev->flags0 & 1) == 0 /* no smartcard inserted */
|| dev->flags0 == 0xff) { /* no cardman inserted */
clear_bit(IS_ATR_VALID, &dev->flags);
if (dev->flags0 & 1) {
set_bit(IS_CMM_ABSENT, &dev->flags);
rc = -ENODEV;
} else {
rc = -EIO;
}
goto release_io;
}
DEBUGP(4, dev, "begin read answer\n");
j = min(count, (size_t)(dev->rlen - dev->rpos));
k = dev->rpos;
if (k + j > 255)
j = 256 - k;
DEBUGP(4, dev, "read1 j=%d\n", j);
for (i = 0; i < j; i++) {
xoutb(k++, REG_BUF_ADDR(iobase));
dev->rbuf[i] = xinb(REG_BUF_DATA(iobase));
}
j = min(count, (size_t)(dev->rlen - dev->rpos));
if (k + j > 255) {
DEBUGP(4, dev, "read2 j=%d\n", j);
dev->flags1 |= 0x10; /* MSB buf addr set */
xoutb(dev->flags1, REG_FLAGS1(iobase));
for (; i < j; i++) {
xoutb(k++, REG_BUF_ADDR(iobase));
dev->rbuf[i] = xinb(REG_BUF_DATA(iobase));
}
}
if (dev->proto == 0 && count > dev->rlen - dev->rpos && i) {
DEBUGP(4, dev, "T=0 and count > buffer\n");
dev->rbuf[i] = dev->rbuf[i - 1];
dev->rbuf[i - 1] = dev->procbyte;
j++;
}
count = j;
dev->rpos = dev->rlen + 1;
/* Clear T1Active */
DEBUGP(4, dev, "Clear T1Active\n");
dev->flags1 &= 0xdf;
xoutb(dev->flags1, REG_FLAGS1(iobase));
xoutb(0, REG_FLAGS1(iobase)); /* clear detectCMM */
/* last check before exit */
if (!io_detect_cm4000(iobase, dev)) {
rc = -ENODEV;
goto release_io;
}
if (test_bit(IS_INVREV, &dev->flags) && count > 0)
str_invert_revert(dev->rbuf, count);
if (copy_to_user(buf, dev->rbuf, count))
rc = -EFAULT;
release_io:
clear_bit(LOCK_IO, &dev->flags);
wake_up_interruptible(&dev->ioq);
DEBUGP(2, dev, "<- cmm_read returns: rc = %Zi\n",
(rc < 0 ? rc : count));
return rc < 0 ? rc : count;
}
static ssize_t cmm_write(struct file *filp, const char __user *buf,
size_t count, loff_t *ppos)
{
struct cm4000_dev *dev = filp->private_data;
unsigned int iobase = dev->p_dev->resource[0]->start;
unsigned short s;
unsigned char tmp;
unsigned char infolen;
unsigned char sendT0;
unsigned short nsend;
unsigned short nr;
ssize_t rc;
int i;
DEBUGP(2, dev, "-> cmm_write(%s,%d)\n", current->comm, current->pid);
if (count == 0) /* according to manpage */
return 0;
if (dev->proto == 0 && count < 4) {
/* T0 must have at least 4 bytes */
DEBUGP(4, dev, "T0 short write\n");
return -EIO;
}
nr = count & 0x1ff; /* max bytes to write */
sendT0 = dev->proto ? 0 : nr > 5 ? 0x08 : 0;
if (!pcmcia_dev_present(dev->p_dev) || /* device removed */
test_bit(IS_CMM_ABSENT, &dev->flags))
return -ENODEV;
if (test_bit(IS_BAD_CSUM, &dev->flags)) {
DEBUGP(4, dev, "bad csum\n");
return -EIO;
}
/*
* wait for atr to become valid.
* note: it is important to lock this code. if we dont, the monitor
* could be run between test_bit and the call to sleep on the
* atr-queue. if *then* the monitor detects atr valid, it will wake up
* any process on the atr-queue, *but* since we have been interrupted,
* we do not yet sleep on this queue. this would result in a missed
* wake_up and the calling process would sleep forever (until
* interrupted). also, do *not* restore_flags before sleep_on, because
* this could result in the same situation!
*/
if (wait_event_interruptible
(dev->atrq,
((filp->f_flags & O_NONBLOCK)
|| (test_bit(IS_ATR_PRESENT, (void *)&dev->flags) != 0)))) {
if (filp->f_flags & O_NONBLOCK)
return -EAGAIN;
return -ERESTARTSYS;
}
if (test_bit(IS_ATR_VALID, &dev->flags) == 0) { /* invalid atr */
DEBUGP(4, dev, "invalid ATR\n");
return -EIO;
}
/* lock io */
if (wait_event_interruptible
(dev->ioq,
((filp->f_flags & O_NONBLOCK)
|| (test_and_set_bit(LOCK_IO, (void *)&dev->flags) == 0)))) {
if (filp->f_flags & O_NONBLOCK)
return -EAGAIN;
return -ERESTARTSYS;
}
if (copy_from_user(dev->sbuf, buf, ((count > 512) ? 512 : count)))
return -EFAULT;
rc = 0;
dev->flags0 = inb(REG_FLAGS0(iobase));
if ((dev->flags0 & 1) == 0 /* no smartcard inserted */
|| dev->flags0 == 0xff) { /* no cardman inserted */
clear_bit(IS_ATR_VALID, &dev->flags);
if (dev->flags0 & 1) {
set_bit(IS_CMM_ABSENT, &dev->flags);
rc = -ENODEV;
} else {
DEBUGP(4, dev, "IO error\n");
rc = -EIO;
}
goto release_io;
}
xoutb(0x80, REG_FLAGS0(iobase)); /* reset SM */
if (!io_detect_cm4000(iobase, dev)) {
rc = -ENODEV;
goto release_io;
}
/* reflect T=0 send/read mode in flags1 */
dev->flags1 |= (sendT0);
set_cardparameter(dev);
/* dummy read, reset flag procedure received */
tmp = inb(REG_FLAGS1(iobase));
dev->flags1 = 0x20 /* T_Active */
| (sendT0)
| (test_bit(IS_INVREV, &dev->flags) ? 2 : 0)/* inverse parity */
| (((dev->baudv - 1) & 0x0100) >> 8); /* MSB-Baud */
DEBUGP(1, dev, "set dev->flags1 = 0x%.2x\n", dev->flags1);
xoutb(dev->flags1, REG_FLAGS1(iobase));
/* xmit data */
DEBUGP(4, dev, "Xmit data\n");
for (i = 0; i < nr; i++) {
if (i >= 256) {
dev->flags1 = 0x20 /* T_Active */
| (sendT0) /* SendT0 */
/* inverse parity: */
| (test_bit(IS_INVREV, &dev->flags) ? 2 : 0)
| (((dev->baudv - 1) & 0x0100) >> 8) /* MSB-Baud */
| 0x10; /* set address high */
DEBUGP(4, dev, "dev->flags = 0x%.2x - set address "
"high\n", dev->flags1);
xoutb(dev->flags1, REG_FLAGS1(iobase));
}
if (test_bit(IS_INVREV, &dev->flags)) {
DEBUGP(4, dev, "Apply inverse convention for 0x%.2x "
"-> 0x%.2x\n", (unsigned char)dev->sbuf[i],
invert_revert(dev->sbuf[i]));
xoutb(i, REG_BUF_ADDR(iobase));
xoutb(invert_revert(dev->sbuf[i]),
REG_BUF_DATA(iobase));
} else {
xoutb(i, REG_BUF_ADDR(iobase));
xoutb(dev->sbuf[i], REG_BUF_DATA(iobase));
}
}
DEBUGP(4, dev, "Xmit done\n");
if (dev->proto == 0) {
/* T=0 proto: 0 byte reply */
if (nr == 4) {
DEBUGP(4, dev, "T=0 assumes 0 byte reply\n");
xoutb(i, REG_BUF_ADDR(iobase));
if (test_bit(IS_INVREV, &dev->flags))
xoutb(0xff, REG_BUF_DATA(iobase));
else
xoutb(0x00, REG_BUF_DATA(iobase));
}
/* numSendBytes */
if (sendT0)
nsend = nr;
else {
if (nr == 4)
nsend = 5;
else {
nsend = 5 + (unsigned char)dev->sbuf[4];
if (dev->sbuf[4] == 0)
nsend += 0x100;
}
}
} else
nsend = nr;
/* T0: output procedure byte */
if (test_bit(IS_INVREV, &dev->flags)) {
DEBUGP(4, dev, "T=0 set Procedure byte (inverse-reverse) "
"0x%.2x\n", invert_revert(dev->sbuf[1]));
xoutb(invert_revert(dev->sbuf[1]), REG_NUM_BYTES(iobase));
} else {
DEBUGP(4, dev, "T=0 set Procedure byte 0x%.2x\n", dev->sbuf[1]);
xoutb(dev->sbuf[1], REG_NUM_BYTES(iobase));
}
DEBUGP(1, dev, "set NumSendBytes = 0x%.2x\n",
(unsigned char)(nsend & 0xff));
xoutb((unsigned char)(nsend & 0xff), REG_NUM_SEND(iobase));
DEBUGP(1, dev, "Trigger CARDMAN CONTROLLER (0x%.2x)\n",
0x40 /* SM_Active */
| (dev->flags0 & 2 ? 0 : 4) /* power on if needed */
|(dev->proto ? 0x10 : 0x08) /* T=1/T=0 */
|(nsend & 0x100) >> 8 /* MSB numSendBytes */ );
xoutb(0x40 /* SM_Active */
| (dev->flags0 & 2 ? 0 : 4) /* power on if needed */
|(dev->proto ? 0x10 : 0x08) /* T=1/T=0 */
|(nsend & 0x100) >> 8, /* MSB numSendBytes */
REG_FLAGS0(iobase));
/* wait for xmit done */
if (dev->proto == 1) {
DEBUGP(4, dev, "Wait for xmit done\n");
for (i = 0; i < 1000; i++) {
if (inb(REG_FLAGS0(iobase)) & 0x08)
break;
msleep_interruptible(10);
}
if (i == 1000) {
DEBUGP(4, dev, "timeout waiting for xmit done\n");
rc = -EIO;
goto release_io;
}
}
/* T=1: wait for infoLen */
infolen = 0;
if (dev->proto) {
/* wait until infoLen is valid */
for (i = 0; i < 6000; i++) { /* max waiting time of 1 min */
io_read_num_rec_bytes(iobase, &s);
if (s >= 3) {
infolen = inb(REG_FLAGS1(iobase));
DEBUGP(4, dev, "infolen=%d\n", infolen);
break;
}
msleep_interruptible(10);
}
if (i == 6000) {
DEBUGP(4, dev, "timeout waiting for infoLen\n");
rc = -EIO;
goto release_io;
}
} else
clear_bit(IS_PROCBYTE_PRESENT, &dev->flags);
/* numRecBytes | bit9 of numRecytes */
io_read_num_rec_bytes(iobase, &dev->rlen);
for (i = 0; i < 600; i++) { /* max waiting time of 2 sec */
if (dev->proto) {
if (dev->rlen >= infolen + 4)
break;
}
msleep_interruptible(10);
/* numRecBytes | bit9 of numRecytes */
io_read_num_rec_bytes(iobase, &s);
if (s > dev->rlen) {
DEBUGP(1, dev, "NumRecBytes inc (reset timeout)\n");
i = 0; /* reset timeout */
dev->rlen = s;
}
/* T=0: we are done when numRecBytes doesn't
* increment any more and NoProcedureByte
* is set and numRecBytes == bytes sent + 6
* (header bytes + data + 1 for sw2)
* except when the card replies an error
* which means, no data will be sent back.
*/
else if (dev->proto == 0) {
if ((inb(REG_BUF_ADDR(iobase)) & 0x80)) {
/* no procedure byte received since last read */
DEBUGP(1, dev, "NoProcedure byte set\n");
/* i=0; */
} else {
/* procedure byte received since last read */
DEBUGP(1, dev, "NoProcedure byte unset "
"(reset timeout)\n");
dev->procbyte = inb(REG_FLAGS1(iobase));
DEBUGP(1, dev, "Read procedure byte 0x%.2x\n",
dev->procbyte);
i = 0; /* resettimeout */
}
if (inb(REG_FLAGS0(iobase)) & 0x08) {
DEBUGP(1, dev, "T0Done flag (read reply)\n");
break;
}
}
if (dev->proto)
infolen = inb(REG_FLAGS1(iobase));
}
if (i == 600) {
DEBUGP(1, dev, "timeout waiting for numRecBytes\n");
rc = -EIO;
goto release_io;
} else {
if (dev->proto == 0) {
DEBUGP(1, dev, "Wait for T0Done bit to be set\n");
for (i = 0; i < 1000; i++) {
if (inb(REG_FLAGS0(iobase)) & 0x08)
break;
msleep_interruptible(10);
}
if (i == 1000) {
DEBUGP(1, dev, "timeout waiting for T0Done\n");
rc = -EIO;
goto release_io;
}
dev->procbyte = inb(REG_FLAGS1(iobase));
DEBUGP(4, dev, "Read procedure byte 0x%.2x\n",
dev->procbyte);
io_read_num_rec_bytes(iobase, &dev->rlen);
DEBUGP(4, dev, "Read NumRecBytes = %i\n", dev->rlen);
}
}
/* T=1: read offset=zero, T=0: read offset=after challenge */
dev->rpos = dev->proto ? 0 : nr == 4 ? 5 : nr > dev->rlen ? 5 : nr;
DEBUGP(4, dev, "dev->rlen = %i, dev->rpos = %i, nr = %i\n",
dev->rlen, dev->rpos, nr);
release_io:
DEBUGP(4, dev, "Reset SM\n");
xoutb(0x80, REG_FLAGS0(iobase)); /* reset SM */
if (rc < 0) {
DEBUGP(4, dev, "Write failed but clear T_Active\n");
dev->flags1 &= 0xdf;
xoutb(dev->flags1, REG_FLAGS1(iobase));
}
clear_bit(LOCK_IO, &dev->flags);
wake_up_interruptible(&dev->ioq);
wake_up_interruptible(&dev->readq); /* tell read we have data */
/* ITSEC E2: clear write buffer */
memset((char *)dev->sbuf, 0, 512);
/* return error or actually written bytes */
DEBUGP(2, dev, "<- cmm_write\n");
return rc < 0 ? rc : nr;
}
static void start_monitor(struct cm4000_dev *dev)
{
DEBUGP(3, dev, "-> start_monitor\n");
if (!dev->monitor_running) {
DEBUGP(5, dev, "create, init and add timer\n");
setup_timer(&dev->timer, monitor_card, (unsigned long)dev);
dev->monitor_running = 1;
mod_timer(&dev->timer, jiffies);
} else
DEBUGP(5, dev, "monitor already running\n");
DEBUGP(3, dev, "<- start_monitor\n");
}
static void stop_monitor(struct cm4000_dev *dev)
{
DEBUGP(3, dev, "-> stop_monitor\n");
if (dev->monitor_running) {
DEBUGP(5, dev, "stopping monitor\n");
terminate_monitor(dev);
/* reset monitor SM */
clear_bit(IS_ATR_VALID, &dev->flags);
clear_bit(IS_ATR_PRESENT, &dev->flags);
} else
DEBUGP(5, dev, "monitor already stopped\n");
DEBUGP(3, dev, "<- stop_monitor\n");
}
static long cmm_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
struct cm4000_dev *dev = filp->private_data;
unsigned int iobase = dev->p_dev->resource[0]->start;
struct inode *inode = filp->f_path.dentry->d_inode;
struct pcmcia_device *link;
int size;
int rc;
void __user *argp = (void __user *)arg;
#ifdef CM4000_DEBUG
char *ioctl_names[CM_IOC_MAXNR + 1] = {
[_IOC_NR(CM_IOCGSTATUS)] "CM_IOCGSTATUS",
[_IOC_NR(CM_IOCGATR)] "CM_IOCGATR",
[_IOC_NR(CM_IOCARDOFF)] "CM_IOCARDOFF",
[_IOC_NR(CM_IOCSPTS)] "CM_IOCSPTS",
[_IOC_NR(CM_IOSDBGLVL)] "CM4000_DBGLVL",
};
DEBUGP(3, dev, "cmm_ioctl(device=%d.%d) %s\n", imajor(inode),
iminor(inode), ioctl_names[_IOC_NR(cmd)]);
#endif
mutex_lock(&cmm_mutex);
rc = -ENODEV;
link = dev_table[iminor(inode)];
if (!pcmcia_dev_present(link)) {
DEBUGP(4, dev, "DEV_OK false\n");
goto out;
}
if (test_bit(IS_CMM_ABSENT, &dev->flags)) {
DEBUGP(4, dev, "CMM_ABSENT flag set\n");
goto out;
}
rc = -EINVAL;
if (_IOC_TYPE(cmd) != CM_IOC_MAGIC) {
DEBUGP(4, dev, "ioctype mismatch\n");
goto out;
}
if (_IOC_NR(cmd) > CM_IOC_MAXNR) {
DEBUGP(4, dev, "iocnr mismatch\n");
goto out;
}
size = _IOC_SIZE(cmd);
rc = -EFAULT;
DEBUGP(4, dev, "iocdir=%.4x iocr=%.4x iocw=%.4x iocsize=%d cmd=%.4x\n",
_IOC_DIR(cmd), _IOC_READ, _IOC_WRITE, size, cmd);
if (_IOC_DIR(cmd) & _IOC_READ) {
if (!access_ok(VERIFY_WRITE, argp, size))
goto out;
}
if (_IOC_DIR(cmd) & _IOC_WRITE) {
if (!access_ok(VERIFY_READ, argp, size))
goto out;
}
rc = 0;
switch (cmd) {
case CM_IOCGSTATUS:
DEBUGP(4, dev, " ... in CM_IOCGSTATUS\n");
{
int status;
/* clear other bits, but leave inserted & powered as
* they are */
status = dev->flags0 & 3;
if (test_bit(IS_ATR_PRESENT, &dev->flags))
status |= CM_ATR_PRESENT;
if (test_bit(IS_ATR_VALID, &dev->flags))
status |= CM_ATR_VALID;
if (test_bit(IS_CMM_ABSENT, &dev->flags))
status |= CM_NO_READER;
if (test_bit(IS_BAD_CARD, &dev->flags))
status |= CM_BAD_CARD;
if (copy_to_user(argp, &status, sizeof(int)))
rc = -EFAULT;
}
break;
case CM_IOCGATR:
DEBUGP(4, dev, "... in CM_IOCGATR\n");
{
struct atreq __user *atreq = argp;
int tmp;
/* allow nonblocking io and being interrupted */
if (wait_event_interruptible
(dev->atrq,
((filp->f_flags & O_NONBLOCK)
|| (test_bit(IS_ATR_PRESENT, (void *)&dev->flags)
!= 0)))) {
if (filp->f_flags & O_NONBLOCK)
rc = -EAGAIN;
else
rc = -ERESTARTSYS;
break;
}
rc = -EFAULT;
if (test_bit(IS_ATR_VALID, &dev->flags) == 0) {
tmp = -1;
if (copy_to_user(&(atreq->atr_len), &tmp,
sizeof(int)))
break;
} else {
if (copy_to_user(atreq->atr, dev->atr,
dev->atr_len))
break;
tmp = dev->atr_len;
if (copy_to_user(&(atreq->atr_len), &tmp, sizeof(int)))
break;
}
rc = 0;
break;
}
case CM_IOCARDOFF:
#ifdef CM4000_DEBUG
DEBUGP(4, dev, "... in CM_IOCARDOFF\n");
if (dev->flags0 & 0x01) {
DEBUGP(4, dev, " Card inserted\n");
} else {
DEBUGP(2, dev, " No card inserted\n");
}
if (dev->flags0 & 0x02) {
DEBUGP(4, dev, " Card powered\n");
} else {
DEBUGP(2, dev, " Card not powered\n");
}
#endif
/* is a card inserted and powered? */
if ((dev->flags0 & 0x01) && (dev->flags0 & 0x02)) {
/* get IO lock */
if (wait_event_interruptible
(dev->ioq,
((filp->f_flags & O_NONBLOCK)
|| (test_and_set_bit(LOCK_IO, (void *)&dev->flags)
== 0)))) {
if (filp->f_flags & O_NONBLOCK)
rc = -EAGAIN;
else
rc = -ERESTARTSYS;
break;
}
/* Set Flags0 = 0x42 */
DEBUGP(4, dev, "Set Flags0=0x42 \n");
xoutb(0x42, REG_FLAGS0(iobase));
clear_bit(IS_ATR_PRESENT, &dev->flags);
clear_bit(IS_ATR_VALID, &dev->flags);
dev->mstate = M_CARDOFF;
clear_bit(LOCK_IO, &dev->flags);
if (wait_event_interruptible
(dev->atrq,
((filp->f_flags & O_NONBLOCK)
|| (test_bit(IS_ATR_VALID, (void *)&dev->flags) !=
0)))) {
if (filp->f_flags & O_NONBLOCK)
rc = -EAGAIN;
else
rc = -ERESTARTSYS;
break;
}
}
/* release lock */
clear_bit(LOCK_IO, &dev->flags);
wake_up_interruptible(&dev->ioq);
rc = 0;
break;
case CM_IOCSPTS:
{
struct ptsreq krnptsreq;
if (copy_from_user(&krnptsreq, argp,
sizeof(struct ptsreq))) {
rc = -EFAULT;
break;
}
rc = 0;
DEBUGP(4, dev, "... in CM_IOCSPTS\n");
/* wait for ATR to get valid */
if (wait_event_interruptible
(dev->atrq,
((filp->f_flags & O_NONBLOCK)
|| (test_bit(IS_ATR_PRESENT, (void *)&dev->flags)
!= 0)))) {
if (filp->f_flags & O_NONBLOCK)
rc = -EAGAIN;
else
rc = -ERESTARTSYS;
break;
}
/* get IO lock */
if (wait_event_interruptible
(dev->ioq,
((filp->f_flags & O_NONBLOCK)
|| (test_and_set_bit(LOCK_IO, (void *)&dev->flags)
== 0)))) {
if (filp->f_flags & O_NONBLOCK)
rc = -EAGAIN;
else
rc = -ERESTARTSYS;
break;
}
if ((rc = set_protocol(dev, &krnptsreq)) != 0) {
/* auto power_on again */
dev->mstate = M_FETCH_ATR;
clear_bit(IS_ATR_VALID, &dev->flags);
}
/* release lock */
clear_bit(LOCK_IO, &dev->flags);
wake_up_interruptible(&dev->ioq);
}
break;
#ifdef CM4000_DEBUG
case CM_IOSDBGLVL:
rc = -ENOTTY;
break;
#endif
default:
DEBUGP(4, dev, "... in default (unknown IOCTL code)\n");
rc = -ENOTTY;
}
out:
mutex_unlock(&cmm_mutex);
return rc;
}
static int cmm_open(struct inode *inode, struct file *filp)
{
struct cm4000_dev *dev;
struct pcmcia_device *link;
int minor = iminor(inode);
int ret;
if (minor >= CM4000_MAX_DEV)
return -ENODEV;
mutex_lock(&cmm_mutex);
link = dev_table[minor];
if (link == NULL || !pcmcia_dev_present(link)) {
ret = -ENODEV;
goto out;
}
if (link->open) {
ret = -EBUSY;
goto out;
}
dev = link->priv;
filp->private_data = dev;
DEBUGP(2, dev, "-> cmm_open(device=%d.%d process=%s,%d)\n",
imajor(inode), minor, current->comm, current->pid);
/* init device variables, they may be "polluted" after close
* or, the device may never have been closed (i.e. open failed)
*/
ZERO_DEV(dev);
/* opening will always block since the
* monitor will be started by open, which
* means we have to wait for ATR becoming
* valid = block until valid (or card
* inserted)
*/
if (filp->f_flags & O_NONBLOCK) {
ret = -EAGAIN;
goto out;
}
dev->mdelay = T_50MSEC;
/* start monitoring the cardstatus */
start_monitor(dev);
link->open = 1; /* only one open per device */
DEBUGP(2, dev, "<- cmm_open\n");
ret = nonseekable_open(inode, filp);
out:
mutex_unlock(&cmm_mutex);
return ret;
}
static int cmm_close(struct inode *inode, struct file *filp)
{
struct cm4000_dev *dev;
struct pcmcia_device *link;
int minor = iminor(inode);
if (minor >= CM4000_MAX_DEV)
return -ENODEV;
link = dev_table[minor];
if (link == NULL)
return -ENODEV;
dev = link->priv;
DEBUGP(2, dev, "-> cmm_close(maj/min=%d.%d)\n",
imajor(inode), minor);
stop_monitor(dev);
ZERO_DEV(dev);
link->open = 0; /* only one open per device */
wake_up(&dev->devq); /* socket removed? */
DEBUGP(2, dev, "cmm_close\n");
return 0;
}
static void cmm_cm4000_release(struct pcmcia_device * link)
{
struct cm4000_dev *dev = link->priv;
/* dont terminate the monitor, rather rely on
* close doing that for us.
*/
DEBUGP(3, dev, "-> cmm_cm4000_release\n");
while (link->open) {
printk(KERN_INFO MODULE_NAME ": delaying release until "
"process has terminated\n");
/* note: don't interrupt us:
* close the applications which own
* the devices _first_ !
*/
wait_event(dev->devq, (link->open == 0));
}
/* dev->devq=NULL; this cannot be zeroed earlier */
DEBUGP(3, dev, "<- cmm_cm4000_release\n");
return;
}
/*==== Interface to PCMCIA Layer =======================================*/
static int cm4000_config_check(struct pcmcia_device *p_dev, void *priv_data)
{
return pcmcia_request_io(p_dev);
}
static int cm4000_config(struct pcmcia_device * link, int devno)
{
struct cm4000_dev *dev;
link->config_flags |= CONF_AUTO_SET_IO;
/* read the config-tuples */
if (pcmcia_loop_config(link, cm4000_config_check, NULL))
goto cs_release;
if (pcmcia_enable_device(link))
goto cs_release;
dev = link->priv;
return 0;
cs_release:
cm4000_release(link);
return -ENODEV;
}
static int cm4000_suspend(struct pcmcia_device *link)
{
struct cm4000_dev *dev;
dev = link->priv;
stop_monitor(dev);
return 0;
}
static int cm4000_resume(struct pcmcia_device *link)
{
struct cm4000_dev *dev;
dev = link->priv;
if (link->open)
start_monitor(dev);
return 0;
}
static void cm4000_release(struct pcmcia_device *link)
{
cmm_cm4000_release(link); /* delay release until device closed */
pcmcia_disable_device(link);
}
static int cm4000_probe(struct pcmcia_device *link)
{
struct cm4000_dev *dev;
int i, ret;
for (i = 0; i < CM4000_MAX_DEV; i++)
if (dev_table[i] == NULL)
break;
if (i == CM4000_MAX_DEV) {
printk(KERN_NOTICE MODULE_NAME ": all devices in use\n");
return -ENODEV;
}
/* create a new cm4000_cs device */
dev = kzalloc(sizeof(struct cm4000_dev), GFP_KERNEL);
if (dev == NULL)
return -ENOMEM;
dev->p_dev = link;
link->priv = dev;
dev_table[i] = link;
init_waitqueue_head(&dev->devq);
init_waitqueue_head(&dev->ioq);
init_waitqueue_head(&dev->atrq);
init_waitqueue_head(&dev->readq);
ret = cm4000_config(link, i);
if (ret) {
dev_table[i] = NULL;
kfree(dev);
return ret;
}
device_create(cmm_class, NULL, MKDEV(major, i), NULL, "cmm%d", i);
return 0;
}
static void cm4000_detach(struct pcmcia_device *link)
{
struct cm4000_dev *dev = link->priv;
int devno;
/* find device */
for (devno = 0; devno < CM4000_MAX_DEV; devno++)
if (dev_table[devno] == link)
break;
if (devno == CM4000_MAX_DEV)
return;
stop_monitor(dev);
cm4000_release(link);
dev_table[devno] = NULL;
kfree(dev);
device_destroy(cmm_class, MKDEV(major, devno));
return;
}
static const struct file_operations cm4000_fops = {
.owner = THIS_MODULE,
.read = cmm_read,
.write = cmm_write,
.unlocked_ioctl = cmm_ioctl,
.open = cmm_open,
.release= cmm_close,
.llseek = no_llseek,
};
static const struct pcmcia_device_id cm4000_ids[] = {
PCMCIA_DEVICE_MANF_CARD(0x0223, 0x0002),
PCMCIA_DEVICE_PROD_ID12("CardMan", "4000", 0x2FB368CA, 0xA2BD8C39),
PCMCIA_DEVICE_NULL,
};
MODULE_DEVICE_TABLE(pcmcia, cm4000_ids);
static struct pcmcia_driver cm4000_driver = {
.owner = THIS_MODULE,
.name = "cm4000_cs",
.probe = cm4000_probe,
.remove = cm4000_detach,
.suspend = cm4000_suspend,
.resume = cm4000_resume,
.id_table = cm4000_ids,
};
static int __init cmm_init(void)
{
int rc;
cmm_class = class_create(THIS_MODULE, "cardman_4000");
if (IS_ERR(cmm_class))
return PTR_ERR(cmm_class);
major = register_chrdev(0, DEVICE_NAME, &cm4000_fops);
if (major < 0) {
printk(KERN_WARNING MODULE_NAME
": could not get major number\n");
class_destroy(cmm_class);
return major;
}
rc = pcmcia_register_driver(&cm4000_driver);
if (rc < 0) {
unregister_chrdev(major, DEVICE_NAME);
class_destroy(cmm_class);
return rc;
}
return 0;
}
static void __exit cmm_exit(void)
{
pcmcia_unregister_driver(&cm4000_driver);
unregister_chrdev(major, DEVICE_NAME);
class_destroy(cmm_class);
};
module_init(cmm_init);
module_exit(cmm_exit);
MODULE_LICENSE("Dual BSD/GPL");
| gpl-2.0 |
bedalus/ATnT | Documentation/auxdisplay/cfag12864b-example.c | 14489 | 5954 | /*
* Filename: cfag12864b-example.c
* Version: 0.1.0
* Description: cfag12864b LCD userspace example program
* License: GPLv2
*
* Author: Copyright (C) Miguel Ojeda Sandonis
* Date: 2006-10-31
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
/*
* ------------------------
* start of cfag12864b code
* ------------------------
*/
#include <string.h>
#include <fcntl.h>
#include <unistd.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <sys/mman.h>
#define CFAG12864B_WIDTH (128)
#define CFAG12864B_HEIGHT (64)
#define CFAG12864B_SIZE (128 * 64 / 8)
#define CFAG12864B_BPB (8)
#define CFAG12864B_ADDRESS(x, y) ((y) * CFAG12864B_WIDTH / \
CFAG12864B_BPB + (x) / CFAG12864B_BPB)
#define CFAG12864B_BIT(n) (((unsigned char) 1) << (n))
#undef CFAG12864B_DOCHECK
#ifdef CFAG12864B_DOCHECK
#define CFAG12864B_CHECK(x, y) ((x) < CFAG12864B_WIDTH && \
(y) < CFAG12864B_HEIGHT)
#else
#define CFAG12864B_CHECK(x, y) (1)
#endif
int cfag12864b_fd;
unsigned char * cfag12864b_mem;
unsigned char cfag12864b_buffer[CFAG12864B_SIZE];
/*
* init a cfag12864b framebuffer device
*
* No error: return = 0
* Unable to open: return = -1
* Unable to mmap: return = -2
*/
static int cfag12864b_init(char *path)
{
cfag12864b_fd = open(path, O_RDWR);
if (cfag12864b_fd == -1)
return -1;
cfag12864b_mem = mmap(0, CFAG12864B_SIZE, PROT_READ | PROT_WRITE,
MAP_SHARED, cfag12864b_fd, 0);
if (cfag12864b_mem == MAP_FAILED) {
close(cfag12864b_fd);
return -2;
}
return 0;
}
/*
* exit a cfag12864b framebuffer device
*/
static void cfag12864b_exit(void)
{
munmap(cfag12864b_mem, CFAG12864B_SIZE);
close(cfag12864b_fd);
}
/*
* set (x, y) pixel
*/
static void cfag12864b_set(unsigned char x, unsigned char y)
{
if (CFAG12864B_CHECK(x, y))
cfag12864b_buffer[CFAG12864B_ADDRESS(x, y)] |=
CFAG12864B_BIT(x % CFAG12864B_BPB);
}
/*
* unset (x, y) pixel
*/
static void cfag12864b_unset(unsigned char x, unsigned char y)
{
if (CFAG12864B_CHECK(x, y))
cfag12864b_buffer[CFAG12864B_ADDRESS(x, y)] &=
~CFAG12864B_BIT(x % CFAG12864B_BPB);
}
/*
* is set (x, y) pixel?
*
* Pixel off: return = 0
* Pixel on: return = 1
*/
static unsigned char cfag12864b_isset(unsigned char x, unsigned char y)
{
if (CFAG12864B_CHECK(x, y))
if (cfag12864b_buffer[CFAG12864B_ADDRESS(x, y)] &
CFAG12864B_BIT(x % CFAG12864B_BPB))
return 1;
return 0;
}
/*
* not (x, y) pixel
*/
static void cfag12864b_not(unsigned char x, unsigned char y)
{
if (cfag12864b_isset(x, y))
cfag12864b_unset(x, y);
else
cfag12864b_set(x, y);
}
/*
* fill (set all pixels)
*/
static void cfag12864b_fill(void)
{
unsigned short i;
for (i = 0; i < CFAG12864B_SIZE; i++)
cfag12864b_buffer[i] = 0xFF;
}
/*
* clear (unset all pixels)
*/
static void cfag12864b_clear(void)
{
unsigned short i;
for (i = 0; i < CFAG12864B_SIZE; i++)
cfag12864b_buffer[i] = 0;
}
/*
* format a [128*64] matrix
*
* Pixel off: src[i] = 0
* Pixel on: src[i] > 0
*/
static void cfag12864b_format(unsigned char * matrix)
{
unsigned char i, j, n;
for (i = 0; i < CFAG12864B_HEIGHT; i++)
for (j = 0; j < CFAG12864B_WIDTH / CFAG12864B_BPB; j++) {
cfag12864b_buffer[i * CFAG12864B_WIDTH / CFAG12864B_BPB +
j] = 0;
for (n = 0; n < CFAG12864B_BPB; n++)
if (matrix[i * CFAG12864B_WIDTH +
j * CFAG12864B_BPB + n])
cfag12864b_buffer[i * CFAG12864B_WIDTH /
CFAG12864B_BPB + j] |=
CFAG12864B_BIT(n);
}
}
/*
* blit buffer to lcd
*/
static void cfag12864b_blit(void)
{
memcpy(cfag12864b_mem, cfag12864b_buffer, CFAG12864B_SIZE);
}
/*
* ----------------------
* end of cfag12864b code
* ----------------------
*/
#include <stdio.h>
#define EXAMPLES 6
static void example(unsigned char n)
{
unsigned short i, j;
unsigned char matrix[CFAG12864B_WIDTH * CFAG12864B_HEIGHT];
if (n > EXAMPLES)
return;
printf("Example %i/%i - ", n, EXAMPLES);
switch (n) {
case 1:
printf("Draw points setting bits");
cfag12864b_clear();
for (i = 0; i < CFAG12864B_WIDTH; i += 2)
for (j = 0; j < CFAG12864B_HEIGHT; j += 2)
cfag12864b_set(i, j);
break;
case 2:
printf("Clear the LCD");
cfag12864b_clear();
break;
case 3:
printf("Draw rows formatting a [128*64] matrix");
memset(matrix, 0, CFAG12864B_WIDTH * CFAG12864B_HEIGHT);
for (i = 0; i < CFAG12864B_WIDTH; i++)
for (j = 0; j < CFAG12864B_HEIGHT; j += 2)
matrix[j * CFAG12864B_WIDTH + i] = 1;
cfag12864b_format(matrix);
break;
case 4:
printf("Fill the lcd");
cfag12864b_fill();
break;
case 5:
printf("Draw columns unsetting bits");
for (i = 0; i < CFAG12864B_WIDTH; i += 2)
for (j = 0; j < CFAG12864B_HEIGHT; j++)
cfag12864b_unset(i, j);
break;
case 6:
printf("Do negative not-ing all bits");
for (i = 0; i < CFAG12864B_WIDTH; i++)
for (j = 0; j < CFAG12864B_HEIGHT; j ++)
cfag12864b_not(i, j);
break;
}
puts(" - [Press Enter]");
}
int main(int argc, char *argv[])
{
unsigned char n;
if (argc != 2) {
printf(
"Sintax: %s fbdev\n"
"Usually: /dev/fb0, /dev/fb1...\n", argv[0]);
return -1;
}
if (cfag12864b_init(argv[1])) {
printf("Can't init %s fbdev\n", argv[1]);
return -2;
}
for (n = 1; n <= EXAMPLES; n++) {
example(n);
cfag12864b_blit();
while (getchar() != '\n');
}
cfag12864b_exit();
return 0;
}
| gpl-2.0 |
abligh/qemu-upstream-4.2-testing | hw/9pfs/coxattr.c | 154 | 2524 |
/*
* Virtio 9p backend
*
* Copyright IBM, Corp. 2011
*
* Authors:
* Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
*
* This work is licensed under the terms of the GNU GPL, version 2. See
* the COPYING file in the top-level directory.
*
*/
#include "fsdev/qemu-fsdev.h"
#include "qemu-thread.h"
#include "qemu-coroutine.h"
#include "virtio-9p-coth.h"
int v9fs_co_llistxattr(V9fsPDU *pdu, V9fsPath *path, void *value, size_t size)
{
int err;
V9fsState *s = pdu->s;
if (v9fs_request_cancelled(pdu)) {
return -EINTR;
}
v9fs_path_read_lock(s);
v9fs_co_run_in_worker(
{
err = s->ops->llistxattr(&s->ctx, path, value, size);
if (err < 0) {
err = -errno;
}
});
v9fs_path_unlock(s);
return err;
}
int v9fs_co_lgetxattr(V9fsPDU *pdu, V9fsPath *path,
V9fsString *xattr_name,
void *value, size_t size)
{
int err;
V9fsState *s = pdu->s;
if (v9fs_request_cancelled(pdu)) {
return -EINTR;
}
v9fs_path_read_lock(s);
v9fs_co_run_in_worker(
{
err = s->ops->lgetxattr(&s->ctx, path,
xattr_name->data,
value, size);
if (err < 0) {
err = -errno;
}
});
v9fs_path_unlock(s);
return err;
}
int v9fs_co_lsetxattr(V9fsPDU *pdu, V9fsPath *path,
V9fsString *xattr_name, void *value,
size_t size, int flags)
{
int err;
V9fsState *s = pdu->s;
if (v9fs_request_cancelled(pdu)) {
return -EINTR;
}
v9fs_path_read_lock(s);
v9fs_co_run_in_worker(
{
err = s->ops->lsetxattr(&s->ctx, path,
xattr_name->data, value,
size, flags);
if (err < 0) {
err = -errno;
}
});
v9fs_path_unlock(s);
return err;
}
int v9fs_co_lremovexattr(V9fsPDU *pdu, V9fsPath *path,
V9fsString *xattr_name)
{
int err;
V9fsState *s = pdu->s;
if (v9fs_request_cancelled(pdu)) {
return -EINTR;
}
v9fs_path_read_lock(s);
v9fs_co_run_in_worker(
{
err = s->ops->lremovexattr(&s->ctx, path, xattr_name->data);
if (err < 0) {
err = -errno;
}
});
v9fs_path_unlock(s);
return err;
}
| gpl-2.0 |
phenyl-sphinx/linux | drivers/cpufreq/freq_table.c | 154 | 7687 | /*
* linux/drivers/cpufreq/freq_table.c
*
* Copyright (C) 2002 - 2003 Dominik Brodowski
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/cpufreq.h>
#include <linux/module.h>
/*********************************************************************
* FREQUENCY TABLE HELPERS *
*********************************************************************/
int cpufreq_frequency_table_cpuinfo(struct cpufreq_policy *policy,
struct cpufreq_frequency_table *table)
{
struct cpufreq_frequency_table *pos;
unsigned int min_freq = ~0;
unsigned int max_freq = 0;
unsigned int freq;
cpufreq_for_each_valid_entry(pos, table) {
freq = pos->frequency;
if (!cpufreq_boost_enabled()
&& (pos->flags & CPUFREQ_BOOST_FREQ))
continue;
pr_debug("table entry %u: %u kHz\n", (int)(pos - table), freq);
if (freq < min_freq)
min_freq = freq;
if (freq > max_freq)
max_freq = freq;
}
policy->min = policy->cpuinfo.min_freq = min_freq;
policy->max = policy->cpuinfo.max_freq = max_freq;
if (policy->min == ~0)
return -EINVAL;
else
return 0;
}
EXPORT_SYMBOL_GPL(cpufreq_frequency_table_cpuinfo);
int cpufreq_frequency_table_verify(struct cpufreq_policy *policy,
struct cpufreq_frequency_table *table)
{
struct cpufreq_frequency_table *pos;
unsigned int freq, next_larger = ~0;
bool found = false;
pr_debug("request for verification of policy (%u - %u kHz) for cpu %u\n",
policy->min, policy->max, policy->cpu);
cpufreq_verify_within_cpu_limits(policy);
cpufreq_for_each_valid_entry(pos, table) {
freq = pos->frequency;
if ((freq >= policy->min) && (freq <= policy->max)) {
found = true;
break;
}
if ((next_larger > freq) && (freq > policy->max))
next_larger = freq;
}
if (!found) {
policy->max = next_larger;
cpufreq_verify_within_cpu_limits(policy);
}
pr_debug("verification lead to (%u - %u kHz) for cpu %u\n",
policy->min, policy->max, policy->cpu);
return 0;
}
EXPORT_SYMBOL_GPL(cpufreq_frequency_table_verify);
/*
* Generic routine to verify policy & frequency table, requires driver to set
* policy->freq_table prior to it.
*/
int cpufreq_generic_frequency_table_verify(struct cpufreq_policy *policy)
{
struct cpufreq_frequency_table *table =
cpufreq_frequency_get_table(policy->cpu);
if (!table)
return -ENODEV;
return cpufreq_frequency_table_verify(policy, table);
}
EXPORT_SYMBOL_GPL(cpufreq_generic_frequency_table_verify);
int cpufreq_frequency_table_target(struct cpufreq_policy *policy,
struct cpufreq_frequency_table *table,
unsigned int target_freq,
unsigned int relation,
unsigned int *index)
{
struct cpufreq_frequency_table optimal = {
.driver_data = ~0,
.frequency = 0,
};
struct cpufreq_frequency_table suboptimal = {
.driver_data = ~0,
.frequency = 0,
};
struct cpufreq_frequency_table *pos;
unsigned int freq, diff, i = 0;
pr_debug("request for target %u kHz (relation: %u) for cpu %u\n",
target_freq, relation, policy->cpu);
switch (relation) {
case CPUFREQ_RELATION_H:
suboptimal.frequency = ~0;
break;
case CPUFREQ_RELATION_L:
case CPUFREQ_RELATION_C:
optimal.frequency = ~0;
break;
}
cpufreq_for_each_valid_entry(pos, table) {
freq = pos->frequency;
i = pos - table;
if ((freq < policy->min) || (freq > policy->max))
continue;
if (freq == target_freq) {
optimal.driver_data = i;
break;
}
switch (relation) {
case CPUFREQ_RELATION_H:
if (freq < target_freq) {
if (freq >= optimal.frequency) {
optimal.frequency = freq;
optimal.driver_data = i;
}
} else {
if (freq <= suboptimal.frequency) {
suboptimal.frequency = freq;
suboptimal.driver_data = i;
}
}
break;
case CPUFREQ_RELATION_L:
if (freq > target_freq) {
if (freq <= optimal.frequency) {
optimal.frequency = freq;
optimal.driver_data = i;
}
} else {
if (freq >= suboptimal.frequency) {
suboptimal.frequency = freq;
suboptimal.driver_data = i;
}
}
break;
case CPUFREQ_RELATION_C:
diff = abs(freq - target_freq);
if (diff < optimal.frequency ||
(diff == optimal.frequency &&
freq > table[optimal.driver_data].frequency)) {
optimal.frequency = diff;
optimal.driver_data = i;
}
break;
}
}
if (optimal.driver_data > i) {
if (suboptimal.driver_data > i)
return -EINVAL;
*index = suboptimal.driver_data;
} else
*index = optimal.driver_data;
pr_debug("target index is %u, freq is:%u kHz\n", *index,
table[*index].frequency);
return 0;
}
EXPORT_SYMBOL_GPL(cpufreq_frequency_table_target);
int cpufreq_frequency_table_get_index(struct cpufreq_policy *policy,
unsigned int freq)
{
struct cpufreq_frequency_table *pos, *table;
table = cpufreq_frequency_get_table(policy->cpu);
if (unlikely(!table)) {
pr_debug("%s: Unable to find frequency table\n", __func__);
return -ENOENT;
}
cpufreq_for_each_valid_entry(pos, table)
if (pos->frequency == freq)
return pos - table;
return -EINVAL;
}
EXPORT_SYMBOL_GPL(cpufreq_frequency_table_get_index);
/**
* show_available_freqs - show available frequencies for the specified CPU
*/
static ssize_t show_available_freqs(struct cpufreq_policy *policy, char *buf,
bool show_boost)
{
ssize_t count = 0;
struct cpufreq_frequency_table *pos, *table = policy->freq_table;
if (!table)
return -ENODEV;
cpufreq_for_each_valid_entry(pos, table) {
/*
* show_boost = true and driver_data = BOOST freq
* display BOOST freqs
*
* show_boost = false and driver_data = BOOST freq
* show_boost = true and driver_data != BOOST freq
* continue - do not display anything
*
* show_boost = false and driver_data != BOOST freq
* display NON BOOST freqs
*/
if (show_boost ^ (pos->flags & CPUFREQ_BOOST_FREQ))
continue;
count += sprintf(&buf[count], "%d ", pos->frequency);
}
count += sprintf(&buf[count], "\n");
return count;
}
#define cpufreq_attr_available_freq(_name) \
struct freq_attr cpufreq_freq_attr_##_name##_freqs = \
__ATTR_RO(_name##_frequencies)
/**
* show_scaling_available_frequencies - show available normal frequencies for
* the specified CPU
*/
static ssize_t scaling_available_frequencies_show(struct cpufreq_policy *policy,
char *buf)
{
return show_available_freqs(policy, buf, false);
}
cpufreq_attr_available_freq(scaling_available);
EXPORT_SYMBOL_GPL(cpufreq_freq_attr_scaling_available_freqs);
/**
* show_available_boost_freqs - show available boost frequencies for
* the specified CPU
*/
static ssize_t scaling_boost_frequencies_show(struct cpufreq_policy *policy,
char *buf)
{
return show_available_freqs(policy, buf, true);
}
cpufreq_attr_available_freq(scaling_boost);
EXPORT_SYMBOL_GPL(cpufreq_freq_attr_scaling_boost_freqs);
struct freq_attr *cpufreq_generic_attr[] = {
&cpufreq_freq_attr_scaling_available_freqs,
#ifdef CONFIG_CPU_FREQ_BOOST_SW
&cpufreq_freq_attr_scaling_boost_freqs,
#endif
NULL,
};
EXPORT_SYMBOL_GPL(cpufreq_generic_attr);
int cpufreq_table_validate_and_show(struct cpufreq_policy *policy,
struct cpufreq_frequency_table *table)
{
int ret = cpufreq_frequency_table_cpuinfo(policy, table);
if (!ret)
policy->freq_table = table;
return ret;
}
EXPORT_SYMBOL_GPL(cpufreq_table_validate_and_show);
MODULE_AUTHOR("Dominik Brodowski <linux@brodo.de>");
MODULE_DESCRIPTION("CPUfreq frequency table helpers");
MODULE_LICENSE("GPL");
| gpl-2.0 |
allanm84/linux-imx | drivers/staging/gdm724x/netlink_k.c | 410 | 3507 | /*
* Copyright (c) 2012 GCT Semiconductor, Inc. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/export.h>
#include <linux/etherdevice.h>
#include <linux/netlink.h>
#include <asm/byteorder.h>
#include <net/sock.h>
#include "netlink_k.h"
#if defined(DEFINE_MUTEX)
static DEFINE_MUTEX(netlink_mutex);
#else
static struct semaphore netlink_mutex;
#define mutex_lock(x) down(x)
#define mutex_unlock(x) up(x)
#endif
#define ND_MAX_GROUP 30
#define ND_IFINDEX_LEN sizeof(int)
#define ND_NLMSG_SPACE(len) (NLMSG_SPACE(len) + ND_IFINDEX_LEN)
#define ND_NLMSG_DATA(nlh) ((void *)((char *)NLMSG_DATA(nlh) + ND_IFINDEX_LEN))
#define ND_NLMSG_S_LEN(len) (len+ND_IFINDEX_LEN)
#define ND_NLMSG_R_LEN(nlh) (nlh->nlmsg_len-ND_IFINDEX_LEN)
#define ND_NLMSG_IFIDX(nlh) NLMSG_DATA(nlh)
#define ND_MAX_MSG_LEN (1024 * 32)
static void (*rcv_cb)(struct net_device *dev, u16 type, void *msg, int len);
static void netlink_rcv_cb(struct sk_buff *skb)
{
struct nlmsghdr *nlh;
struct net_device *dev;
u32 mlen;
void *msg;
int ifindex;
if (!rcv_cb) {
pr_err("nl cb - unregistered\n");
return;
}
if (skb->len < NLMSG_SPACE(0)) {
pr_err("nl cb - invalid skb length\n");
return;
}
nlh = (struct nlmsghdr *)skb->data;
if (skb->len < nlh->nlmsg_len || nlh->nlmsg_len > ND_MAX_MSG_LEN) {
pr_err("nl cb - invalid length (%d,%d)\n",
skb->len, nlh->nlmsg_len);
return;
}
memcpy(&ifindex, ND_NLMSG_IFIDX(nlh), ND_IFINDEX_LEN);
msg = ND_NLMSG_DATA(nlh);
mlen = ND_NLMSG_R_LEN(nlh);
dev = dev_get_by_index(&init_net, ifindex);
if (dev) {
rcv_cb(dev, nlh->nlmsg_type, msg, mlen);
dev_put(dev);
} else {
pr_err("nl cb - dev (%d) not found\n", ifindex);
}
}
static void netlink_rcv(struct sk_buff *skb)
{
mutex_lock(&netlink_mutex);
netlink_rcv_cb(skb);
mutex_unlock(&netlink_mutex);
}
struct sock *netlink_init(int unit,
void (*cb)(struct net_device *dev, u16 type, void *msg, int len))
{
struct sock *sock;
struct netlink_kernel_cfg cfg = {
.input = netlink_rcv,
};
#if !defined(DEFINE_MUTEX)
init_MUTEX(&netlink_mutex);
#endif
sock = netlink_kernel_create(&init_net, unit, &cfg);
if (sock)
rcv_cb = cb;
return sock;
}
void netlink_exit(struct sock *sock)
{
sock_release(sock->sk_socket);
}
int netlink_send(struct sock *sock, int group, u16 type, void *msg, int len)
{
static u32 seq;
struct sk_buff *skb = NULL;
struct nlmsghdr *nlh;
int ret = 0;
if (group > ND_MAX_GROUP)
return -EINVAL;
if (!netlink_has_listeners(sock, group+1))
return -ESRCH;
skb = alloc_skb(NLMSG_SPACE(len), GFP_ATOMIC);
if (!skb)
return -ENOMEM;
seq++;
nlh = nlmsg_put(skb, 0, seq, type, len, 0);
memcpy(NLMSG_DATA(nlh), msg, len);
NETLINK_CB(skb).portid = 0;
NETLINK_CB(skb).dst_group = 0;
ret = netlink_broadcast(sock, skb, 0, group+1, GFP_ATOMIC);
if (!ret)
return len;
if (ret != -ESRCH)
pr_err("nl broadcast g=%d, t=%d, l=%d, r=%d\n",
group, type, len, ret);
else if (netlink_has_listeners(sock, group+1))
return -EAGAIN;
return ret;
}
| gpl-2.0 |
Lprigara/KernelLinuxRaspberry | drivers/mfd/pcf50633-adc.c | 410 | 6033 | /* NXP PCF50633 ADC Driver
*
* (C) 2006-2008 by Openmoko, Inc.
* Author: Balaji Rao <balajirrao@openmoko.org>
* All rights reserved.
*
* Broken down from monstrous PCF50633 driver mainly by
* Harald Welte, Andy Green and Werner Almesberger
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* NOTE: This driver does not yet support subtractive ADC mode, which means
* you can do only one measurement per read request.
*/
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/device.h>
#include <linux/platform_device.h>
#include <linux/completion.h>
#include <linux/mfd/pcf50633/core.h>
#include <linux/mfd/pcf50633/adc.h>
struct pcf50633_adc_request {
int mux;
int avg;
void (*callback)(struct pcf50633 *, void *, int);
void *callback_param;
};
struct pcf50633_adc_sync_request {
int result;
struct completion completion;
};
#define PCF50633_MAX_ADC_FIFO_DEPTH 8
struct pcf50633_adc {
struct pcf50633 *pcf;
/* Private stuff */
struct pcf50633_adc_request *queue[PCF50633_MAX_ADC_FIFO_DEPTH];
int queue_head;
int queue_tail;
struct mutex queue_mutex;
};
static inline struct pcf50633_adc *__to_adc(struct pcf50633 *pcf)
{
return platform_get_drvdata(pcf->adc_pdev);
}
static void adc_setup(struct pcf50633 *pcf, int channel, int avg)
{
channel &= PCF50633_ADCC1_ADCMUX_MASK;
/* kill ratiometric, but enable ACCSW biasing */
pcf50633_reg_write(pcf, PCF50633_REG_ADCC2, 0x00);
pcf50633_reg_write(pcf, PCF50633_REG_ADCC3, 0x01);
/* start ADC conversion on selected channel */
pcf50633_reg_write(pcf, PCF50633_REG_ADCC1, channel | avg |
PCF50633_ADCC1_ADCSTART | PCF50633_ADCC1_RES_10BIT);
}
static void trigger_next_adc_job_if_any(struct pcf50633 *pcf)
{
struct pcf50633_adc *adc = __to_adc(pcf);
int head;
head = adc->queue_head;
if (!adc->queue[head])
return;
adc_setup(pcf, adc->queue[head]->mux, adc->queue[head]->avg);
}
static int
adc_enqueue_request(struct pcf50633 *pcf, struct pcf50633_adc_request *req)
{
struct pcf50633_adc *adc = __to_adc(pcf);
int head, tail;
mutex_lock(&adc->queue_mutex);
head = adc->queue_head;
tail = adc->queue_tail;
if (adc->queue[tail]) {
mutex_unlock(&adc->queue_mutex);
dev_err(pcf->dev, "ADC queue is full, dropping request\n");
return -EBUSY;
}
adc->queue[tail] = req;
if (head == tail)
trigger_next_adc_job_if_any(pcf);
adc->queue_tail = (tail + 1) & (PCF50633_MAX_ADC_FIFO_DEPTH - 1);
mutex_unlock(&adc->queue_mutex);
return 0;
}
static void pcf50633_adc_sync_read_callback(struct pcf50633 *pcf, void *param,
int result)
{
struct pcf50633_adc_sync_request *req = param;
req->result = result;
complete(&req->completion);
}
int pcf50633_adc_sync_read(struct pcf50633 *pcf, int mux, int avg)
{
struct pcf50633_adc_sync_request req;
int ret;
init_completion(&req.completion);
ret = pcf50633_adc_async_read(pcf, mux, avg,
pcf50633_adc_sync_read_callback, &req);
if (ret)
return ret;
wait_for_completion(&req.completion);
return req.result;
}
EXPORT_SYMBOL_GPL(pcf50633_adc_sync_read);
int pcf50633_adc_async_read(struct pcf50633 *pcf, int mux, int avg,
void (*callback)(struct pcf50633 *, void *, int),
void *callback_param)
{
struct pcf50633_adc_request *req;
/* req is freed when the result is ready, in interrupt handler */
req = kmalloc(sizeof(*req), GFP_KERNEL);
if (!req)
return -ENOMEM;
req->mux = mux;
req->avg = avg;
req->callback = callback;
req->callback_param = callback_param;
return adc_enqueue_request(pcf, req);
}
EXPORT_SYMBOL_GPL(pcf50633_adc_async_read);
static int adc_result(struct pcf50633 *pcf)
{
u8 adcs1, adcs3;
u16 result;
adcs1 = pcf50633_reg_read(pcf, PCF50633_REG_ADCS1);
adcs3 = pcf50633_reg_read(pcf, PCF50633_REG_ADCS3);
result = (adcs1 << 2) | (adcs3 & PCF50633_ADCS3_ADCDAT1L_MASK);
dev_dbg(pcf->dev, "adc result = %d\n", result);
return result;
}
static void pcf50633_adc_irq(int irq, void *data)
{
struct pcf50633_adc *adc = data;
struct pcf50633 *pcf = adc->pcf;
struct pcf50633_adc_request *req;
int head, res;
mutex_lock(&adc->queue_mutex);
head = adc->queue_head;
req = adc->queue[head];
if (WARN_ON(!req)) {
dev_err(pcf->dev, "pcf50633-adc irq: ADC queue empty!\n");
mutex_unlock(&adc->queue_mutex);
return;
}
adc->queue[head] = NULL;
adc->queue_head = (head + 1) &
(PCF50633_MAX_ADC_FIFO_DEPTH - 1);
res = adc_result(pcf);
trigger_next_adc_job_if_any(pcf);
mutex_unlock(&adc->queue_mutex);
req->callback(pcf, req->callback_param, res);
kfree(req);
}
static int pcf50633_adc_probe(struct platform_device *pdev)
{
struct pcf50633_adc *adc;
adc = devm_kzalloc(&pdev->dev, sizeof(*adc), GFP_KERNEL);
if (!adc)
return -ENOMEM;
adc->pcf = dev_to_pcf50633(pdev->dev.parent);
platform_set_drvdata(pdev, adc);
pcf50633_register_irq(adc->pcf, PCF50633_IRQ_ADCRDY,
pcf50633_adc_irq, adc);
mutex_init(&adc->queue_mutex);
return 0;
}
static int pcf50633_adc_remove(struct platform_device *pdev)
{
struct pcf50633_adc *adc = platform_get_drvdata(pdev);
int i, head;
pcf50633_free_irq(adc->pcf, PCF50633_IRQ_ADCRDY);
mutex_lock(&adc->queue_mutex);
head = adc->queue_head;
if (WARN_ON(adc->queue[head]))
dev_err(adc->pcf->dev,
"adc driver removed with request pending\n");
for (i = 0; i < PCF50633_MAX_ADC_FIFO_DEPTH; i++)
kfree(adc->queue[i]);
mutex_unlock(&adc->queue_mutex);
return 0;
}
static struct platform_driver pcf50633_adc_driver = {
.driver = {
.name = "pcf50633-adc",
},
.probe = pcf50633_adc_probe,
.remove = pcf50633_adc_remove,
};
module_platform_driver(pcf50633_adc_driver);
MODULE_AUTHOR("Balaji Rao <balajirrao@openmoko.org>");
MODULE_DESCRIPTION("PCF50633 adc driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:pcf50633-adc");
| gpl-2.0 |
BigBrother1984/android_kernel_lge_mako | arch/arm/mach-msm/board-8930-camera.c | 410 | 15896 | /* Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <asm/mach-types.h>
#include <linux/gpio.h>
#include <mach/camera.h>
#include <mach/msm_bus_board.h>
#include <mach/gpiomux.h>
#include "devices.h"
#include "board-8930.h"
#ifdef CONFIG_MSM_CAMERA
#if (defined(CONFIG_GPIO_SX150X) || defined(CONFIG_GPIO_SX150X_MODULE)) && \
defined(CONFIG_I2C)
static struct i2c_board_info cam_expander_i2c_info[] = {
{
I2C_BOARD_INFO("sx1508q", 0x22),
.platform_data = &msm8930_sx150x_data[SX150X_CAM]
},
};
static struct msm_cam_expander_info cam_expander_info[] = {
{
cam_expander_i2c_info,
MSM_8930_GSBI4_QUP_I2C_BUS_ID,
},
};
#endif
static struct gpiomux_setting cam_settings[] = {
{
.func = GPIOMUX_FUNC_GPIO, /*suspend*/
.drv = GPIOMUX_DRV_2MA,
.pull = GPIOMUX_PULL_DOWN,
},
{
.func = GPIOMUX_FUNC_1, /*active 1*/
.drv = GPIOMUX_DRV_2MA,
.pull = GPIOMUX_PULL_NONE,
},
{
.func = GPIOMUX_FUNC_GPIO, /*active 2*/
.drv = GPIOMUX_DRV_2MA,
.pull = GPIOMUX_PULL_NONE,
},
{
.func = GPIOMUX_FUNC_1, /*active 3*/
.drv = GPIOMUX_DRV_8MA,
.pull = GPIOMUX_PULL_NONE,
},
{
.func = GPIOMUX_FUNC_5, /*active 4*/
.drv = GPIOMUX_DRV_8MA,
.pull = GPIOMUX_PULL_UP,
},
{
.func = GPIOMUX_FUNC_6, /*active 5*/
.drv = GPIOMUX_DRV_8MA,
.pull = GPIOMUX_PULL_UP,
},
{
.func = GPIOMUX_FUNC_2, /*active 6*/
.drv = GPIOMUX_DRV_2MA,
.pull = GPIOMUX_PULL_UP,
},
{
.func = GPIOMUX_FUNC_3, /*active 7*/
.drv = GPIOMUX_DRV_8MA,
.pull = GPIOMUX_PULL_UP,
},
{
.func = GPIOMUX_FUNC_GPIO, /*i2c suspend*/
.drv = GPIOMUX_DRV_2MA,
.pull = GPIOMUX_PULL_KEEPER,
},
{
.func = GPIOMUX_FUNC_2, /*active 9*/
.drv = GPIOMUX_DRV_2MA,
.pull = GPIOMUX_PULL_NONE,
},
};
static struct msm_gpiomux_config msm8930_cam_common_configs[] = {
{
.gpio = 2,
.settings = {
[GPIOMUX_ACTIVE] = &cam_settings[2],
[GPIOMUX_SUSPENDED] = &cam_settings[0],
},
},
{
.gpio = 3,
.settings = {
[GPIOMUX_ACTIVE] = &cam_settings[1],
[GPIOMUX_SUSPENDED] = &cam_settings[0],
},
},
{
.gpio = 4,
.settings = {
[GPIOMUX_ACTIVE] = &cam_settings[9],
[GPIOMUX_SUSPENDED] = &cam_settings[0],
},
},
{
.gpio = 5,
.settings = {
[GPIOMUX_ACTIVE] = &cam_settings[1],
[GPIOMUX_SUSPENDED] = &cam_settings[0],
},
},
{
.gpio = 76,
.settings = {
[GPIOMUX_ACTIVE] = &cam_settings[2],
[GPIOMUX_SUSPENDED] = &cam_settings[0],
},
},
{
.gpio = 107,
.settings = {
[GPIOMUX_ACTIVE] = &cam_settings[2],
[GPIOMUX_SUSPENDED] = &cam_settings[0],
},
},
{
.gpio = 54,
.settings = {
[GPIOMUX_ACTIVE] = &cam_settings[2],
[GPIOMUX_SUSPENDED] = &cam_settings[0],
},
},
};
static struct msm_gpiomux_config msm8930_cam_2d_configs[] = {
{
.gpio = 18,
.settings = {
[GPIOMUX_ACTIVE] = &cam_settings[3],
[GPIOMUX_SUSPENDED] = &cam_settings[8],
},
},
{
.gpio = 19,
.settings = {
[GPIOMUX_ACTIVE] = &cam_settings[3],
[GPIOMUX_SUSPENDED] = &cam_settings[8],
},
},
{
.gpio = 20,
.settings = {
[GPIOMUX_ACTIVE] = &cam_settings[3],
[GPIOMUX_SUSPENDED] = &cam_settings[8],
},
},
{
.gpio = 21,
.settings = {
[GPIOMUX_ACTIVE] = &cam_settings[3],
[GPIOMUX_SUSPENDED] = &cam_settings[8],
},
},
};
#define VFE_CAMIF_TIMER1_GPIO 2
#define VFE_CAMIF_TIMER2_GPIO 3
#define VFE_CAMIF_TIMER3_GPIO_INT 4
static struct msm_camera_sensor_strobe_flash_data strobe_flash_xenon = {
.flash_trigger = VFE_CAMIF_TIMER2_GPIO,
.flash_charge = VFE_CAMIF_TIMER1_GPIO,
.flash_charge_done = VFE_CAMIF_TIMER3_GPIO_INT,
.flash_recharge_duration = 50000,
.irq = MSM_GPIO_TO_INT(VFE_CAMIF_TIMER3_GPIO_INT),
};
#ifdef CONFIG_MSM_CAMERA_FLASH
static struct msm_camera_sensor_flash_src msm_flash_src = {
.flash_sr_type = MSM_CAMERA_FLASH_SRC_EXT,
._fsrc.ext_driver_src.led_en = VFE_CAMIF_TIMER1_GPIO,
._fsrc.ext_driver_src.led_flash_en = VFE_CAMIF_TIMER2_GPIO,
._fsrc.ext_driver_src.flash_id = MAM_CAMERA_EXT_LED_FLASH_TPS61310,
};
#endif
static struct msm_bus_vectors cam_init_vectors[] = {
{
.src = MSM_BUS_MASTER_VFE,
.dst = MSM_BUS_SLAVE_EBI_CH0,
.ab = 0,
.ib = 0,
},
{
.src = MSM_BUS_MASTER_VPE,
.dst = MSM_BUS_SLAVE_EBI_CH0,
.ab = 0,
.ib = 0,
},
{
.src = MSM_BUS_MASTER_JPEG_ENC,
.dst = MSM_BUS_SLAVE_EBI_CH0,
.ab = 0,
.ib = 0,
},
};
static struct msm_bus_vectors cam_preview_vectors[] = {
{
.src = MSM_BUS_MASTER_VFE,
.dst = MSM_BUS_SLAVE_EBI_CH0,
.ab = 27648000,
.ib = 2656000000UL,
},
{
.src = MSM_BUS_MASTER_VPE,
.dst = MSM_BUS_SLAVE_EBI_CH0,
.ab = 0,
.ib = 0,
},
{
.src = MSM_BUS_MASTER_JPEG_ENC,
.dst = MSM_BUS_SLAVE_EBI_CH0,
.ab = 0,
.ib = 0,
},
};
static struct msm_bus_vectors cam_video_vectors[] = {
{
.src = MSM_BUS_MASTER_VFE,
.dst = MSM_BUS_SLAVE_EBI_CH0,
.ab = 600000000,
.ib = 2656000000UL,
},
{
.src = MSM_BUS_MASTER_VPE,
.dst = MSM_BUS_SLAVE_EBI_CH0,
.ab = 206807040,
.ib = 488816640,
},
{
.src = MSM_BUS_MASTER_JPEG_ENC,
.dst = MSM_BUS_SLAVE_EBI_CH0,
.ab = 0,
.ib = 0,
},
};
static struct msm_bus_vectors cam_snapshot_vectors[] = {
{
.src = MSM_BUS_MASTER_VFE,
.dst = MSM_BUS_SLAVE_EBI_CH0,
.ab = 600000000,
.ib = 2656000000UL,
},
{
.src = MSM_BUS_MASTER_VPE,
.dst = MSM_BUS_SLAVE_EBI_CH0,
.ab = 0,
.ib = 0,
},
{
.src = MSM_BUS_MASTER_JPEG_ENC,
.dst = MSM_BUS_SLAVE_EBI_CH0,
.ab = 540000000,
.ib = 1350000000,
},
};
static struct msm_bus_vectors cam_zsl_vectors[] = {
{
.src = MSM_BUS_MASTER_VFE,
.dst = MSM_BUS_SLAVE_EBI_CH0,
.ab = 600000000,
.ib = 2656000000UL,
},
{
.src = MSM_BUS_MASTER_VPE,
.dst = MSM_BUS_SLAVE_EBI_CH0,
.ab = 0,
.ib = 0,
},
{
.src = MSM_BUS_MASTER_JPEG_ENC,
.dst = MSM_BUS_SLAVE_EBI_CH0,
.ab = 540000000,
.ib = 1350000000,
},
};
static struct msm_bus_vectors cam_video_ls_vectors[] = {
{
.src = MSM_BUS_MASTER_VFE,
.dst = MSM_BUS_SLAVE_EBI_CH0,
.ab = 348192000,
.ib = 617103360,
},
{
.src = MSM_BUS_MASTER_VPE,
.dst = MSM_BUS_SLAVE_EBI_CH0,
.ab = 206807040,
.ib = 488816640,
},
{
.src = MSM_BUS_MASTER_JPEG_ENC,
.dst = MSM_BUS_SLAVE_EBI_CH0,
.ab = 540000000,
.ib = 1350000000,
},
};
static struct msm_bus_vectors cam_dual_vectors[] = {
{
.src = MSM_BUS_MASTER_VFE,
.dst = MSM_BUS_SLAVE_EBI_CH0,
.ab = 302071680,
.ib = 1208286720,
},
{
.src = MSM_BUS_MASTER_VPE,
.dst = MSM_BUS_SLAVE_EBI_CH0,
.ab = 206807040,
.ib = 488816640,
},
{
.src = MSM_BUS_MASTER_JPEG_ENC,
.dst = MSM_BUS_SLAVE_EBI_CH0,
.ab = 540000000,
.ib = 1350000000,
},
};
static struct msm_bus_paths cam_bus_client_config[] = {
{
ARRAY_SIZE(cam_init_vectors),
cam_init_vectors,
},
{
ARRAY_SIZE(cam_preview_vectors),
cam_preview_vectors,
},
{
ARRAY_SIZE(cam_video_vectors),
cam_video_vectors,
},
{
ARRAY_SIZE(cam_snapshot_vectors),
cam_snapshot_vectors,
},
{
ARRAY_SIZE(cam_zsl_vectors),
cam_zsl_vectors,
},
{
ARRAY_SIZE(cam_video_ls_vectors),
cam_video_ls_vectors,
},
{
ARRAY_SIZE(cam_dual_vectors),
cam_dual_vectors,
},
};
static struct msm_bus_scale_pdata cam_bus_client_pdata = {
cam_bus_client_config,
ARRAY_SIZE(cam_bus_client_config),
.name = "msm_camera",
};
static struct msm_camera_device_platform_data msm_camera_csi_device_data[] = {
{
.csid_core = 0,
.is_vpe = 1,
.cam_bus_scale_table = &cam_bus_client_pdata,
},
{
.csid_core = 1,
.is_vpe = 1,
.cam_bus_scale_table = &cam_bus_client_pdata,
},
};
static struct camera_vreg_t msm_8930_cam_vreg[] = {
{"cam_vdig", REG_LDO, 1200000, 1200000, 105000},
{"cam_vio", REG_VS, 0, 0, 0},
{"cam_vana", REG_LDO, 2800000, 2850000, 85600},
{"cam_vaf", REG_LDO, 2800000, 2850000, 300000},
};
static struct gpio msm8930_common_cam_gpio[] = {
{20, GPIOF_DIR_IN, "CAMIF_I2C_DATA"},
{21, GPIOF_DIR_IN, "CAMIF_I2C_CLK"},
};
static struct gpio msm8930_front_cam_gpio[] = {
{4, GPIOF_DIR_IN, "CAMIF_MCLK"},
{76, GPIOF_DIR_OUT, "CAM_RESET"},
};
static struct gpio msm8930_back_cam_gpio[] = {
{5, GPIOF_DIR_IN, "CAMIF_MCLK"},
{107, GPIOF_DIR_OUT, "CAM_RESET"},
{54, GPIOF_DIR_OUT, "CAM_STBY_N"},
};
static struct msm_gpio_set_tbl msm8930_front_cam_gpio_set_tbl[] = {
{76, GPIOF_OUT_INIT_LOW, 1000},
{76, GPIOF_OUT_INIT_HIGH, 4000},
};
static struct msm_gpio_set_tbl msm8930_back_cam_gpio_set_tbl[] = {
{54, GPIOF_OUT_INIT_LOW, 1000},
{54, GPIOF_OUT_INIT_HIGH, 4000},
{107, GPIOF_OUT_INIT_LOW, 1000},
{107, GPIOF_OUT_INIT_HIGH, 4000},
};
static struct msm_camera_gpio_conf msm_8930_front_cam_gpio_conf = {
.cam_gpiomux_conf_tbl = msm8930_cam_2d_configs,
.cam_gpiomux_conf_tbl_size = ARRAY_SIZE(msm8930_cam_2d_configs),
.cam_gpio_common_tbl = msm8930_common_cam_gpio,
.cam_gpio_common_tbl_size = ARRAY_SIZE(msm8930_common_cam_gpio),
.cam_gpio_req_tbl = msm8930_front_cam_gpio,
.cam_gpio_req_tbl_size = ARRAY_SIZE(msm8930_front_cam_gpio),
.cam_gpio_set_tbl = msm8930_front_cam_gpio_set_tbl,
.cam_gpio_set_tbl_size = ARRAY_SIZE(msm8930_front_cam_gpio_set_tbl),
};
static struct msm_camera_gpio_conf msm_8930_back_cam_gpio_conf = {
.cam_gpiomux_conf_tbl = msm8930_cam_2d_configs,
.cam_gpiomux_conf_tbl_size = ARRAY_SIZE(msm8930_cam_2d_configs),
.cam_gpio_common_tbl = msm8930_common_cam_gpio,
.cam_gpio_common_tbl_size = ARRAY_SIZE(msm8930_common_cam_gpio),
.cam_gpio_req_tbl = msm8930_back_cam_gpio,
.cam_gpio_req_tbl_size = ARRAY_SIZE(msm8930_back_cam_gpio),
.cam_gpio_set_tbl = msm8930_back_cam_gpio_set_tbl,
.cam_gpio_set_tbl_size = ARRAY_SIZE(msm8930_back_cam_gpio_set_tbl),
};
static struct i2c_board_info msm_act_main_cam_i2c_info = {
I2C_BOARD_INFO("msm_actuator", 0x11),
};
static struct msm_actuator_info msm_act_main_cam_0_info = {
.board_info = &msm_act_main_cam_i2c_info,
.cam_name = MSM_ACTUATOR_MAIN_CAM_0,
.bus_id = MSM_8930_GSBI4_QUP_I2C_BUS_ID,
.vcm_pwd = 0,
.vcm_enable = 0,
};
static struct msm_camera_sensor_flash_data flash_imx074 = {
.flash_type = MSM_CAMERA_FLASH_LED,
#ifdef CONFIG_MSM_CAMERA_FLASH
.flash_src = &msm_flash_src
#endif
};
static struct msm_camera_csi_lane_params imx074_csi_lane_params = {
.csi_lane_assign = 0xE4,
.csi_lane_mask = 0xF,
};
static struct msm_camera_sensor_platform_info sensor_board_info_imx074 = {
.mount_angle = 90,
.cam_vreg = msm_8930_cam_vreg,
.num_vreg = ARRAY_SIZE(msm_8930_cam_vreg),
.gpio_conf = &msm_8930_back_cam_gpio_conf,
.csi_lane_params = &imx074_csi_lane_params,
};
static struct msm_camera_sensor_info msm_camera_sensor_imx074_data = {
.sensor_name = "imx074",
.pdata = &msm_camera_csi_device_data[0],
.flash_data = &flash_imx074,
.strobe_flash_data = &strobe_flash_xenon,
.sensor_platform_info = &sensor_board_info_imx074,
.csi_if = 1,
.camera_type = BACK_CAMERA_2D,
.sensor_type = BAYER_SENSOR,
.actuator_info = &msm_act_main_cam_0_info,
};
static struct msm_camera_sensor_flash_data flash_mt9m114 = {
.flash_type = MSM_CAMERA_FLASH_NONE
};
static struct msm_camera_csi_lane_params mt9m114_csi_lane_params = {
.csi_lane_assign = 0xE4,
.csi_lane_mask = 0x1,
};
static struct msm_camera_sensor_platform_info sensor_board_info_mt9m114 = {
.mount_angle = 90,
.cam_vreg = msm_8930_cam_vreg,
.num_vreg = ARRAY_SIZE(msm_8930_cam_vreg),
.gpio_conf = &msm_8930_front_cam_gpio_conf,
.csi_lane_params = &mt9m114_csi_lane_params,
};
static struct msm_camera_sensor_info msm_camera_sensor_mt9m114_data = {
.sensor_name = "mt9m114",
.pdata = &msm_camera_csi_device_data[1],
.flash_data = &flash_mt9m114,
.sensor_platform_info = &sensor_board_info_mt9m114,
.csi_if = 1,
.camera_type = FRONT_CAMERA_2D,
.sensor_type = YUV_SENSOR,
};
static struct msm_camera_sensor_flash_data flash_ov2720 = {
.flash_type = MSM_CAMERA_FLASH_NONE,
};
static struct msm_camera_csi_lane_params ov2720_csi_lane_params = {
.csi_lane_assign = 0xE4,
.csi_lane_mask = 0x3,
};
static struct msm_camera_sensor_platform_info sensor_board_info_ov2720 = {
.mount_angle = 0,
.cam_vreg = msm_8930_cam_vreg,
.num_vreg = ARRAY_SIZE(msm_8930_cam_vreg),
.gpio_conf = &msm_8930_front_cam_gpio_conf,
.csi_lane_params = &ov2720_csi_lane_params,
};
static struct msm_camera_sensor_info msm_camera_sensor_ov2720_data = {
.sensor_name = "ov2720",
.pdata = &msm_camera_csi_device_data[1],
.flash_data = &flash_ov2720,
.sensor_platform_info = &sensor_board_info_ov2720,
.csi_if = 1,
.camera_type = FRONT_CAMERA_2D,
.sensor_type = BAYER_SENSOR,
};
static struct msm_camera_sensor_flash_data flash_s5k3l1yx = {
.flash_type = MSM_CAMERA_FLASH_LED,
.flash_src = &msm_flash_src
};
static struct msm_camera_csi_lane_params s5k3l1yx_csi_lane_params = {
.csi_lane_assign = 0xE4,
.csi_lane_mask = 0xF,
};
static struct msm_camera_sensor_platform_info sensor_board_info_s5k3l1yx = {
.mount_angle = 90,
.cam_vreg = msm_8930_cam_vreg,
.num_vreg = ARRAY_SIZE(msm_8930_cam_vreg),
.gpio_conf = &msm_8930_back_cam_gpio_conf,
.csi_lane_params = &s5k3l1yx_csi_lane_params,
};
static struct msm_actuator_info msm_act_main_cam_2_info = {
.board_info = &msm_act_main_cam_i2c_info,
.cam_name = MSM_ACTUATOR_MAIN_CAM_2,
.bus_id = MSM_8930_GSBI4_QUP_I2C_BUS_ID,
.vcm_pwd = 0,
.vcm_enable = 0,
};
static struct msm_camera_sensor_info msm_camera_sensor_s5k3l1yx_data = {
.sensor_name = "s5k3l1yx",
.pdata = &msm_camera_csi_device_data[0],
.flash_data = &flash_s5k3l1yx,
.sensor_platform_info = &sensor_board_info_s5k3l1yx,
.csi_if = 1,
.camera_type = BACK_CAMERA_2D,
.sensor_type = BAYER_SENSOR,
.actuator_info = &msm_act_main_cam_2_info,
};
static struct platform_device msm_camera_server = {
.name = "msm_cam_server",
.id = 0,
};
void __init msm8930_init_cam(void)
{
msm_gpiomux_install(msm8930_cam_common_configs,
ARRAY_SIZE(msm8930_cam_common_configs));
if (machine_is_msm8930_cdp()) {
struct msm_camera_sensor_info *s_info;
s_info = &msm_camera_sensor_s5k3l1yx_data;
s_info->sensor_platform_info->mount_angle = 0;
#if defined(CONFIG_I2C) && (defined(CONFIG_GPIO_SX150X) || \
defined(CONFIG_GPIO_SX150X_MODULE))
msm_flash_src._fsrc.ext_driver_src.led_en =
GPIO_CAM_GP_LED_EN1;
msm_flash_src._fsrc.ext_driver_src.led_flash_en =
GPIO_CAM_GP_LED_EN2;
msm_flash_src._fsrc.ext_driver_src.expander_info =
cam_expander_info;
#endif
}
platform_device_register(&msm_camera_server);
platform_device_register(&msm8960_device_csiphy0);
platform_device_register(&msm8960_device_csiphy1);
platform_device_register(&msm8960_device_csid0);
platform_device_register(&msm8960_device_csid1);
platform_device_register(&msm8960_device_ispif);
platform_device_register(&msm8960_device_vfe);
platform_device_register(&msm8960_device_vpe);
}
#ifdef CONFIG_I2C
struct i2c_board_info msm8930_camera_i2c_boardinfo[] = {
{
I2C_BOARD_INFO("imx074", 0x1A),
.platform_data = &msm_camera_sensor_imx074_data,
},
{
I2C_BOARD_INFO("ov2720", 0x6C),
.platform_data = &msm_camera_sensor_ov2720_data,
},
{
I2C_BOARD_INFO("mt9m114", 0x48),
.platform_data = &msm_camera_sensor_mt9m114_data,
},
{
I2C_BOARD_INFO("s5k3l1yx", 0x20),
.platform_data = &msm_camera_sensor_s5k3l1yx_data,
},
{
I2C_BOARD_INFO("tps61310", 0x66),
},
};
struct msm_camera_board_info msm8930_camera_board_info = {
.board_info = msm8930_camera_i2c_boardinfo,
.num_i2c_board_info = ARRAY_SIZE(msm8930_camera_i2c_boardinfo),
};
#endif
#endif
| gpl-2.0 |
jwhitham/ppc_linux | drivers/staging/lustre/lustre/lov/lovsub_dev.c | 410 | 5376 | /*
* GPL HEADER START
*
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 only,
* as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License version 2 for more details (a copy is included
* in the LICENSE file that accompanied this code).
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
* http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
*
* Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
* CA 95054 USA or visit www.sun.com if you need additional information or
* have any questions.
*
* GPL HEADER END
*/
/*
* Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
* Lustre is a trademark of Sun Microsystems, Inc.
*
* Implementation of cl_device and cl_device_type for LOVSUB layer.
*
* Author: Nikita Danilov <nikita.danilov@sun.com>
*/
#define DEBUG_SUBSYSTEM S_LOV
#include "lov_cl_internal.h"
/** \addtogroup lov
* @{
*/
/*****************************************************************************
*
* Lovsub transfer operations.
*
*/
static void lovsub_req_completion(const struct lu_env *env,
const struct cl_req_slice *slice, int ioret)
{
struct lovsub_req *lsr;
lsr = cl2lovsub_req(slice);
OBD_SLAB_FREE_PTR(lsr, lovsub_req_kmem);
}
/**
* Implementation of struct cl_req_operations::cro_attr_set() for lovsub
* layer. Lov and lovsub are responsible only for struct obdo::o_stripe_idx
* field, which is filled there.
*/
static void lovsub_req_attr_set(const struct lu_env *env,
const struct cl_req_slice *slice,
const struct cl_object *obj,
struct cl_req_attr *attr, obd_valid flags)
{
struct lovsub_object *subobj;
subobj = cl2lovsub(obj);
/*
* There is no OBD_MD_* flag for obdo::o_stripe_idx, so set it
* unconditionally. It never changes anyway.
*/
attr->cra_oa->o_stripe_idx = subobj->lso_index;
}
static const struct cl_req_operations lovsub_req_ops = {
.cro_attr_set = lovsub_req_attr_set,
.cro_completion = lovsub_req_completion
};
/*****************************************************************************
*
* Lov-sub device and device type functions.
*
*/
static int lovsub_device_init(const struct lu_env *env, struct lu_device *d,
const char *name, struct lu_device *next)
{
struct lovsub_device *lsd = lu2lovsub_dev(d);
struct lu_device_type *ldt;
int rc;
next->ld_site = d->ld_site;
ldt = next->ld_type;
LASSERT(ldt != NULL);
rc = ldt->ldt_ops->ldto_device_init(env, next, ldt->ldt_name, NULL);
if (rc) {
next->ld_site = NULL;
return rc;
}
lu_device_get(next);
lu_ref_add(&next->ld_reference, "lu-stack", &lu_site_init);
lsd->acid_next = lu2cl_dev(next);
return rc;
}
static struct lu_device *lovsub_device_fini(const struct lu_env *env,
struct lu_device *d)
{
struct lu_device *next;
struct lovsub_device *lsd;
lsd = lu2lovsub_dev(d);
next = cl2lu_dev(lsd->acid_next);
lsd->acid_super = NULL;
lsd->acid_next = NULL;
return next;
}
static struct lu_device *lovsub_device_free(const struct lu_env *env,
struct lu_device *d)
{
struct lovsub_device *lsd = lu2lovsub_dev(d);
struct lu_device *next = cl2lu_dev(lsd->acid_next);
cl_device_fini(lu2cl_dev(d));
OBD_FREE_PTR(lsd);
return next;
}
static int lovsub_req_init(const struct lu_env *env, struct cl_device *dev,
struct cl_req *req)
{
struct lovsub_req *lsr;
int result;
OBD_SLAB_ALLOC_PTR_GFP(lsr, lovsub_req_kmem, __GFP_IO);
if (lsr != NULL) {
cl_req_slice_add(req, &lsr->lsrq_cl, dev, &lovsub_req_ops);
result = 0;
} else
result = -ENOMEM;
return result;
}
static const struct lu_device_operations lovsub_lu_ops = {
.ldo_object_alloc = lovsub_object_alloc,
.ldo_process_config = NULL,
.ldo_recovery_complete = NULL
};
static const struct cl_device_operations lovsub_cl_ops = {
.cdo_req_init = lovsub_req_init
};
static struct lu_device *lovsub_device_alloc(const struct lu_env *env,
struct lu_device_type *t,
struct lustre_cfg *cfg)
{
struct lu_device *d;
struct lovsub_device *lsd;
OBD_ALLOC_PTR(lsd);
if (lsd != NULL) {
int result;
result = cl_device_init(&lsd->acid_cl, t);
if (result == 0) {
d = lovsub2lu_dev(lsd);
d->ld_ops = &lovsub_lu_ops;
lsd->acid_cl.cd_ops = &lovsub_cl_ops;
} else
d = ERR_PTR(result);
} else
d = ERR_PTR(-ENOMEM);
return d;
}
static const struct lu_device_type_operations lovsub_device_type_ops = {
.ldto_device_alloc = lovsub_device_alloc,
.ldto_device_free = lovsub_device_free,
.ldto_device_init = lovsub_device_init,
.ldto_device_fini = lovsub_device_fini
};
#define LUSTRE_LOVSUB_NAME "lovsub"
struct lu_device_type lovsub_device_type = {
.ldt_tags = LU_DEVICE_CL,
.ldt_name = LUSTRE_LOVSUB_NAME,
.ldt_ops = &lovsub_device_type_ops,
.ldt_ctx_tags = LCT_CL_THREAD
};
/** @} lov */
| gpl-2.0 |
profglavcho/mt6577-kernel-3.10.65 | kernel/events/hw_breakpoint.c | 1178 | 16509 | /*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*
* Copyright (C) 2007 Alan Stern
* Copyright (C) IBM Corporation, 2009
* Copyright (C) 2009, Frederic Weisbecker <fweisbec@gmail.com>
*
* Thanks to Ingo Molnar for his many suggestions.
*
* Authors: Alan Stern <stern@rowland.harvard.edu>
* K.Prasad <prasad@linux.vnet.ibm.com>
* Frederic Weisbecker <fweisbec@gmail.com>
*/
/*
* HW_breakpoint: a unified kernel/user-space hardware breakpoint facility,
* using the CPU's debug registers.
* This file contains the arch-independent routines.
*/
#include <linux/irqflags.h>
#include <linux/kallsyms.h>
#include <linux/notifier.h>
#include <linux/kprobes.h>
#include <linux/kdebug.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/percpu.h>
#include <linux/sched.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/list.h>
#include <linux/cpu.h>
#include <linux/smp.h>
#include <linux/hw_breakpoint.h>
/*
* Constraints data
*/
/* Number of pinned cpu breakpoints in a cpu */
static DEFINE_PER_CPU(unsigned int, nr_cpu_bp_pinned[TYPE_MAX]);
/* Number of pinned task breakpoints in a cpu */
static DEFINE_PER_CPU(unsigned int *, nr_task_bp_pinned[TYPE_MAX]);
/* Number of non-pinned cpu/task breakpoints in a cpu */
static DEFINE_PER_CPU(unsigned int, nr_bp_flexible[TYPE_MAX]);
static int nr_slots[TYPE_MAX];
/* Keep track of the breakpoints attached to tasks */
static LIST_HEAD(bp_task_head);
static int constraints_initialized;
/* Gather the number of total pinned and un-pinned bp in a cpuset */
struct bp_busy_slots {
unsigned int pinned;
unsigned int flexible;
};
/* Serialize accesses to the above constraints */
static DEFINE_MUTEX(nr_bp_mutex);
__weak int hw_breakpoint_weight(struct perf_event *bp)
{
return 1;
}
static inline enum bp_type_idx find_slot_idx(struct perf_event *bp)
{
if (bp->attr.bp_type & HW_BREAKPOINT_RW)
return TYPE_DATA;
return TYPE_INST;
}
/*
* Report the maximum number of pinned breakpoints a task
* have in this cpu
*/
static unsigned int max_task_bp_pinned(int cpu, enum bp_type_idx type)
{
int i;
unsigned int *tsk_pinned = per_cpu(nr_task_bp_pinned[type], cpu);
for (i = nr_slots[type] - 1; i >= 0; i--) {
if (tsk_pinned[i] > 0)
return i + 1;
}
return 0;
}
/*
* Count the number of breakpoints of the same type and same task.
* The given event must be not on the list.
*/
static int task_bp_pinned(int cpu, struct perf_event *bp, enum bp_type_idx type)
{
struct task_struct *tsk = bp->hw.bp_target;
struct perf_event *iter;
int count = 0;
list_for_each_entry(iter, &bp_task_head, hw.bp_list) {
if (iter->hw.bp_target == tsk &&
find_slot_idx(iter) == type &&
(iter->cpu < 0 || cpu == iter->cpu))
count += hw_breakpoint_weight(iter);
}
return count;
}
/*
* Report the number of pinned/un-pinned breakpoints we have in
* a given cpu (cpu > -1) or in all of them (cpu = -1).
*/
static void
fetch_bp_busy_slots(struct bp_busy_slots *slots, struct perf_event *bp,
enum bp_type_idx type)
{
int cpu = bp->cpu;
struct task_struct *tsk = bp->hw.bp_target;
if (cpu >= 0) {
slots->pinned = per_cpu(nr_cpu_bp_pinned[type], cpu);
if (!tsk)
slots->pinned += max_task_bp_pinned(cpu, type);
else
slots->pinned += task_bp_pinned(cpu, bp, type);
slots->flexible = per_cpu(nr_bp_flexible[type], cpu);
return;
}
for_each_possible_cpu(cpu) {
unsigned int nr;
nr = per_cpu(nr_cpu_bp_pinned[type], cpu);
if (!tsk)
nr += max_task_bp_pinned(cpu, type);
else
nr += task_bp_pinned(cpu, bp, type);
if (nr > slots->pinned)
slots->pinned = nr;
nr = per_cpu(nr_bp_flexible[type], cpu);
if (nr > slots->flexible)
slots->flexible = nr;
}
}
/*
* For now, continue to consider flexible as pinned, until we can
* ensure no flexible event can ever be scheduled before a pinned event
* in a same cpu.
*/
static void
fetch_this_slot(struct bp_busy_slots *slots, int weight)
{
slots->pinned += weight;
}
/*
* Add a pinned breakpoint for the given task in our constraint table
*/
static void toggle_bp_task_slot(struct perf_event *bp, int cpu, bool enable,
enum bp_type_idx type, int weight)
{
unsigned int *tsk_pinned;
int old_count = 0;
int old_idx = 0;
int idx = 0;
old_count = task_bp_pinned(cpu, bp, type);
old_idx = old_count - 1;
idx = old_idx + weight;
/* tsk_pinned[n] is the number of tasks having n breakpoints */
tsk_pinned = per_cpu(nr_task_bp_pinned[type], cpu);
if (enable) {
tsk_pinned[idx]++;
if (old_count > 0)
tsk_pinned[old_idx]--;
} else {
tsk_pinned[idx]--;
if (old_count > 0)
tsk_pinned[old_idx]++;
}
}
/*
* Add/remove the given breakpoint in our constraint table
*/
static void
toggle_bp_slot(struct perf_event *bp, bool enable, enum bp_type_idx type,
int weight)
{
int cpu = bp->cpu;
struct task_struct *tsk = bp->hw.bp_target;
/* Pinned counter cpu profiling */
if (!tsk) {
if (enable)
per_cpu(nr_cpu_bp_pinned[type], bp->cpu) += weight;
else
per_cpu(nr_cpu_bp_pinned[type], bp->cpu) -= weight;
return;
}
/* Pinned counter task profiling */
if (!enable)
list_del(&bp->hw.bp_list);
if (cpu >= 0) {
toggle_bp_task_slot(bp, cpu, enable, type, weight);
} else {
for_each_possible_cpu(cpu)
toggle_bp_task_slot(bp, cpu, enable, type, weight);
}
if (enable)
list_add_tail(&bp->hw.bp_list, &bp_task_head);
}
/*
* Function to perform processor-specific cleanup during unregistration
*/
__weak void arch_unregister_hw_breakpoint(struct perf_event *bp)
{
/*
* A weak stub function here for those archs that don't define
* it inside arch/.../kernel/hw_breakpoint.c
*/
}
/*
* Contraints to check before allowing this new breakpoint counter:
*
* == Non-pinned counter == (Considered as pinned for now)
*
* - If attached to a single cpu, check:
*
* (per_cpu(nr_bp_flexible, cpu) || (per_cpu(nr_cpu_bp_pinned, cpu)
* + max(per_cpu(nr_task_bp_pinned, cpu)))) < HBP_NUM
*
* -> If there are already non-pinned counters in this cpu, it means
* there is already a free slot for them.
* Otherwise, we check that the maximum number of per task
* breakpoints (for this cpu) plus the number of per cpu breakpoint
* (for this cpu) doesn't cover every registers.
*
* - If attached to every cpus, check:
*
* (per_cpu(nr_bp_flexible, *) || (max(per_cpu(nr_cpu_bp_pinned, *))
* + max(per_cpu(nr_task_bp_pinned, *)))) < HBP_NUM
*
* -> This is roughly the same, except we check the number of per cpu
* bp for every cpu and we keep the max one. Same for the per tasks
* breakpoints.
*
*
* == Pinned counter ==
*
* - If attached to a single cpu, check:
*
* ((per_cpu(nr_bp_flexible, cpu) > 1) + per_cpu(nr_cpu_bp_pinned, cpu)
* + max(per_cpu(nr_task_bp_pinned, cpu))) < HBP_NUM
*
* -> Same checks as before. But now the nr_bp_flexible, if any, must keep
* one register at least (or they will never be fed).
*
* - If attached to every cpus, check:
*
* ((per_cpu(nr_bp_flexible, *) > 1) + max(per_cpu(nr_cpu_bp_pinned, *))
* + max(per_cpu(nr_task_bp_pinned, *))) < HBP_NUM
*/
static int __reserve_bp_slot(struct perf_event *bp)
{
struct bp_busy_slots slots = {0};
enum bp_type_idx type;
int weight;
/* We couldn't initialize breakpoint constraints on boot */
if (!constraints_initialized)
return -ENOMEM;
/* Basic checks */
if (bp->attr.bp_type == HW_BREAKPOINT_EMPTY ||
bp->attr.bp_type == HW_BREAKPOINT_INVALID)
return -EINVAL;
type = find_slot_idx(bp);
weight = hw_breakpoint_weight(bp);
fetch_bp_busy_slots(&slots, bp, type);
/*
* Simulate the addition of this breakpoint to the constraints
* and see the result.
*/
fetch_this_slot(&slots, weight);
/* Flexible counters need to keep at least one slot */
if (slots.pinned + (!!slots.flexible) > nr_slots[type])
return -ENOSPC;
toggle_bp_slot(bp, true, type, weight);
return 0;
}
int reserve_bp_slot(struct perf_event *bp)
{
int ret;
mutex_lock(&nr_bp_mutex);
ret = __reserve_bp_slot(bp);
mutex_unlock(&nr_bp_mutex);
return ret;
}
static void __release_bp_slot(struct perf_event *bp)
{
enum bp_type_idx type;
int weight;
type = find_slot_idx(bp);
weight = hw_breakpoint_weight(bp);
toggle_bp_slot(bp, false, type, weight);
}
void release_bp_slot(struct perf_event *bp)
{
mutex_lock(&nr_bp_mutex);
arch_unregister_hw_breakpoint(bp);
__release_bp_slot(bp);
mutex_unlock(&nr_bp_mutex);
}
/*
* Allow the kernel debugger to reserve breakpoint slots without
* taking a lock using the dbg_* variant of for the reserve and
* release breakpoint slots.
*/
int dbg_reserve_bp_slot(struct perf_event *bp)
{
if (mutex_is_locked(&nr_bp_mutex))
return -1;
return __reserve_bp_slot(bp);
}
int dbg_release_bp_slot(struct perf_event *bp)
{
if (mutex_is_locked(&nr_bp_mutex))
return -1;
__release_bp_slot(bp);
return 0;
}
static int validate_hw_breakpoint(struct perf_event *bp)
{
int ret;
ret = arch_validate_hwbkpt_settings(bp);
if (ret)
return ret;
if (arch_check_bp_in_kernelspace(bp)) {
if (bp->attr.exclude_kernel)
return -EINVAL;
/*
* Don't let unprivileged users set a breakpoint in the trap
* path to avoid trap recursion attacks.
*/
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
}
return 0;
}
int register_perf_hw_breakpoint(struct perf_event *bp)
{
int ret;
ret = reserve_bp_slot(bp);
if (ret)
return ret;
ret = validate_hw_breakpoint(bp);
/* if arch_validate_hwbkpt_settings() fails then release bp slot */
if (ret)
release_bp_slot(bp);
return ret;
}
/**
* register_user_hw_breakpoint - register a hardware breakpoint for user space
* @attr: breakpoint attributes
* @triggered: callback to trigger when we hit the breakpoint
* @tsk: pointer to 'task_struct' of the process to which the address belongs
*/
struct perf_event *
register_user_hw_breakpoint(struct perf_event_attr *attr,
perf_overflow_handler_t triggered,
void *context,
struct task_struct *tsk)
{
return perf_event_create_kernel_counter(attr, -1, tsk, triggered,
context);
}
EXPORT_SYMBOL_GPL(register_user_hw_breakpoint);
/**
* modify_user_hw_breakpoint - modify a user-space hardware breakpoint
* @bp: the breakpoint structure to modify
* @attr: new breakpoint attributes
* @triggered: callback to trigger when we hit the breakpoint
* @tsk: pointer to 'task_struct' of the process to which the address belongs
*/
int modify_user_hw_breakpoint(struct perf_event *bp, struct perf_event_attr *attr)
{
u64 old_addr = bp->attr.bp_addr;
u64 old_len = bp->attr.bp_len;
int old_type = bp->attr.bp_type;
int err = 0;
/*
* modify_user_hw_breakpoint can be invoked with IRQs disabled and hence it
* will not be possible to raise IPIs that invoke __perf_event_disable.
* So call the function directly after making sure we are targeting the
* current task.
*/
if (irqs_disabled() && bp->ctx && bp->ctx->task == current)
__perf_event_disable(bp);
else
perf_event_disable(bp);
bp->attr.bp_addr = attr->bp_addr;
bp->attr.bp_type = attr->bp_type;
bp->attr.bp_len = attr->bp_len;
if (attr->disabled)
goto end;
err = validate_hw_breakpoint(bp);
if (!err)
perf_event_enable(bp);
if (err) {
bp->attr.bp_addr = old_addr;
bp->attr.bp_type = old_type;
bp->attr.bp_len = old_len;
if (!bp->attr.disabled)
perf_event_enable(bp);
return err;
}
end:
bp->attr.disabled = attr->disabled;
return 0;
}
EXPORT_SYMBOL_GPL(modify_user_hw_breakpoint);
/**
* unregister_hw_breakpoint - unregister a user-space hardware breakpoint
* @bp: the breakpoint structure to unregister
*/
void unregister_hw_breakpoint(struct perf_event *bp)
{
if (!bp)
return;
perf_event_release_kernel(bp);
}
EXPORT_SYMBOL_GPL(unregister_hw_breakpoint);
/**
* register_wide_hw_breakpoint - register a wide breakpoint in the kernel
* @attr: breakpoint attributes
* @triggered: callback to trigger when we hit the breakpoint
*
* @return a set of per_cpu pointers to perf events
*/
struct perf_event * __percpu *
register_wide_hw_breakpoint(struct perf_event_attr *attr,
perf_overflow_handler_t triggered,
void *context)
{
struct perf_event * __percpu *cpu_events, **pevent, *bp;
long err;
int cpu;
cpu_events = alloc_percpu(typeof(*cpu_events));
if (!cpu_events)
return (void __percpu __force *)ERR_PTR(-ENOMEM);
get_online_cpus();
for_each_online_cpu(cpu) {
pevent = per_cpu_ptr(cpu_events, cpu);
bp = perf_event_create_kernel_counter(attr, cpu, NULL,
triggered, context);
*pevent = bp;
if (IS_ERR(bp)) {
err = PTR_ERR(bp);
goto fail;
}
}
put_online_cpus();
return cpu_events;
fail:
for_each_online_cpu(cpu) {
pevent = per_cpu_ptr(cpu_events, cpu);
if (IS_ERR(*pevent))
break;
unregister_hw_breakpoint(*pevent);
}
put_online_cpus();
free_percpu(cpu_events);
return (void __percpu __force *)ERR_PTR(err);
}
EXPORT_SYMBOL_GPL(register_wide_hw_breakpoint);
/**
* unregister_wide_hw_breakpoint - unregister a wide breakpoint in the kernel
* @cpu_events: the per cpu set of events to unregister
*/
void unregister_wide_hw_breakpoint(struct perf_event * __percpu *cpu_events)
{
int cpu;
struct perf_event **pevent;
for_each_possible_cpu(cpu) {
pevent = per_cpu_ptr(cpu_events, cpu);
unregister_hw_breakpoint(*pevent);
}
free_percpu(cpu_events);
}
EXPORT_SYMBOL_GPL(unregister_wide_hw_breakpoint);
static struct notifier_block hw_breakpoint_exceptions_nb = {
.notifier_call = hw_breakpoint_exceptions_notify,
/* we need to be notified first */
.priority = 0x7fffffff
};
static void bp_perf_event_destroy(struct perf_event *event)
{
release_bp_slot(event);
}
static int hw_breakpoint_event_init(struct perf_event *bp)
{
int err;
if (bp->attr.type != PERF_TYPE_BREAKPOINT)
return -ENOENT;
/*
* no branch sampling for breakpoint events
*/
if (has_branch_stack(bp))
return -EOPNOTSUPP;
err = register_perf_hw_breakpoint(bp);
if (err)
return err;
bp->destroy = bp_perf_event_destroy;
return 0;
}
static int hw_breakpoint_add(struct perf_event *bp, int flags)
{
if (!(flags & PERF_EF_START))
bp->hw.state = PERF_HES_STOPPED;
return arch_install_hw_breakpoint(bp);
}
static void hw_breakpoint_del(struct perf_event *bp, int flags)
{
arch_uninstall_hw_breakpoint(bp);
}
static void hw_breakpoint_start(struct perf_event *bp, int flags)
{
bp->hw.state = 0;
}
static void hw_breakpoint_stop(struct perf_event *bp, int flags)
{
bp->hw.state = PERF_HES_STOPPED;
}
static int hw_breakpoint_event_idx(struct perf_event *bp)
{
return 0;
}
static struct pmu perf_breakpoint = {
.task_ctx_nr = perf_sw_context, /* could eventually get its own */
.event_init = hw_breakpoint_event_init,
.add = hw_breakpoint_add,
.del = hw_breakpoint_del,
.start = hw_breakpoint_start,
.stop = hw_breakpoint_stop,
.read = hw_breakpoint_pmu_read,
.event_idx = hw_breakpoint_event_idx,
};
int __init init_hw_breakpoint(void)
{
unsigned int **task_bp_pinned;
int cpu, err_cpu;
int i;
for (i = 0; i < TYPE_MAX; i++)
nr_slots[i] = hw_breakpoint_slots(i);
for_each_possible_cpu(cpu) {
for (i = 0; i < TYPE_MAX; i++) {
task_bp_pinned = &per_cpu(nr_task_bp_pinned[i], cpu);
*task_bp_pinned = kzalloc(sizeof(int) * nr_slots[i],
GFP_KERNEL);
if (!*task_bp_pinned)
goto err_alloc;
}
}
constraints_initialized = 1;
perf_pmu_register(&perf_breakpoint, "breakpoint", PERF_TYPE_BREAKPOINT);
return register_die_notifier(&hw_breakpoint_exceptions_nb);
err_alloc:
for_each_possible_cpu(err_cpu) {
for (i = 0; i < TYPE_MAX; i++)
kfree(per_cpu(nr_task_bp_pinned[i], err_cpu));
if (err_cpu == cpu)
break;
}
return -ENOMEM;
}
| gpl-2.0 |
up2wing/fox-kernel-comment | linux-3.10.89/tools/perf/util/scripting-engines/trace-event-python.c | 2202 | 18037 | /*
* trace-event-python. Feed trace events to an embedded Python interpreter.
*
* Copyright (C) 2010 Tom Zanussi <tzanussi@gmail.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
#include <Python.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <errno.h>
#include "../../perf.h"
#include "../evsel.h"
#include "../util.h"
#include "../event.h"
#include "../thread.h"
#include "../trace-event.h"
PyMODINIT_FUNC initperf_trace_context(void);
#define FTRACE_MAX_EVENT \
((1 << (sizeof(unsigned short) * 8)) - 1)
struct event_format *events[FTRACE_MAX_EVENT];
#define MAX_FIELDS 64
#define N_COMMON_FIELDS 7
extern struct scripting_context *scripting_context;
static char *cur_field_name;
static int zero_flag_atom;
static PyObject *main_module, *main_dict;
static void handler_call_die(const char *handler_name)
{
PyErr_Print();
Py_FatalError("problem in Python trace event handler");
}
static void define_value(enum print_arg_type field_type,
const char *ev_name,
const char *field_name,
const char *field_value,
const char *field_str)
{
const char *handler_name = "define_flag_value";
PyObject *handler, *t, *retval;
unsigned long long value;
unsigned n = 0;
if (field_type == PRINT_SYMBOL)
handler_name = "define_symbolic_value";
t = PyTuple_New(4);
if (!t)
Py_FatalError("couldn't create Python tuple");
value = eval_flag(field_value);
PyTuple_SetItem(t, n++, PyString_FromString(ev_name));
PyTuple_SetItem(t, n++, PyString_FromString(field_name));
PyTuple_SetItem(t, n++, PyInt_FromLong(value));
PyTuple_SetItem(t, n++, PyString_FromString(field_str));
handler = PyDict_GetItemString(main_dict, handler_name);
if (handler && PyCallable_Check(handler)) {
retval = PyObject_CallObject(handler, t);
if (retval == NULL)
handler_call_die(handler_name);
}
Py_DECREF(t);
}
static void define_values(enum print_arg_type field_type,
struct print_flag_sym *field,
const char *ev_name,
const char *field_name)
{
define_value(field_type, ev_name, field_name, field->value,
field->str);
if (field->next)
define_values(field_type, field->next, ev_name, field_name);
}
static void define_field(enum print_arg_type field_type,
const char *ev_name,
const char *field_name,
const char *delim)
{
const char *handler_name = "define_flag_field";
PyObject *handler, *t, *retval;
unsigned n = 0;
if (field_type == PRINT_SYMBOL)
handler_name = "define_symbolic_field";
if (field_type == PRINT_FLAGS)
t = PyTuple_New(3);
else
t = PyTuple_New(2);
if (!t)
Py_FatalError("couldn't create Python tuple");
PyTuple_SetItem(t, n++, PyString_FromString(ev_name));
PyTuple_SetItem(t, n++, PyString_FromString(field_name));
if (field_type == PRINT_FLAGS)
PyTuple_SetItem(t, n++, PyString_FromString(delim));
handler = PyDict_GetItemString(main_dict, handler_name);
if (handler && PyCallable_Check(handler)) {
retval = PyObject_CallObject(handler, t);
if (retval == NULL)
handler_call_die(handler_name);
}
Py_DECREF(t);
}
static void define_event_symbols(struct event_format *event,
const char *ev_name,
struct print_arg *args)
{
switch (args->type) {
case PRINT_NULL:
break;
case PRINT_ATOM:
define_value(PRINT_FLAGS, ev_name, cur_field_name, "0",
args->atom.atom);
zero_flag_atom = 0;
break;
case PRINT_FIELD:
if (cur_field_name)
free(cur_field_name);
cur_field_name = strdup(args->field.name);
break;
case PRINT_FLAGS:
define_event_symbols(event, ev_name, args->flags.field);
define_field(PRINT_FLAGS, ev_name, cur_field_name,
args->flags.delim);
define_values(PRINT_FLAGS, args->flags.flags, ev_name,
cur_field_name);
break;
case PRINT_SYMBOL:
define_event_symbols(event, ev_name, args->symbol.field);
define_field(PRINT_SYMBOL, ev_name, cur_field_name, NULL);
define_values(PRINT_SYMBOL, args->symbol.symbols, ev_name,
cur_field_name);
break;
case PRINT_HEX:
define_event_symbols(event, ev_name, args->hex.field);
define_event_symbols(event, ev_name, args->hex.size);
break;
case PRINT_STRING:
break;
case PRINT_TYPE:
define_event_symbols(event, ev_name, args->typecast.item);
break;
case PRINT_OP:
if (strcmp(args->op.op, ":") == 0)
zero_flag_atom = 1;
define_event_symbols(event, ev_name, args->op.left);
define_event_symbols(event, ev_name, args->op.right);
break;
default:
/* gcc warns for these? */
case PRINT_BSTRING:
case PRINT_DYNAMIC_ARRAY:
case PRINT_FUNC:
/* we should warn... */
return;
}
if (args->next)
define_event_symbols(event, ev_name, args->next);
}
static inline struct event_format *find_cache_event(struct perf_evsel *evsel)
{
static char ev_name[256];
struct event_format *event;
int type = evsel->attr.config;
/*
* XXX: Do we really need to cache this since now we have evsel->tp_format
* cached already? Need to re-read this "cache" routine that as well calls
* define_event_symbols() :-\
*/
if (events[type])
return events[type];
events[type] = event = evsel->tp_format;
if (!event)
return NULL;
sprintf(ev_name, "%s__%s", event->system, event->name);
define_event_symbols(event, ev_name, event->print_fmt.args);
return event;
}
static void python_process_tracepoint(union perf_event *perf_event
__maybe_unused,
struct perf_sample *sample,
struct perf_evsel *evsel,
struct machine *machine __maybe_unused,
struct addr_location *al)
{
PyObject *handler, *retval, *context, *t, *obj, *dict = NULL;
static char handler_name[256];
struct format_field *field;
unsigned long long val;
unsigned long s, ns;
struct event_format *event;
unsigned n = 0;
int pid;
int cpu = sample->cpu;
void *data = sample->raw_data;
unsigned long long nsecs = sample->time;
struct thread *thread = al->thread;
char *comm = thread->comm;
t = PyTuple_New(MAX_FIELDS);
if (!t)
Py_FatalError("couldn't create Python tuple");
event = find_cache_event(evsel);
if (!event)
die("ug! no event found for type %d", (int)evsel->attr.config);
pid = raw_field_value(event, "common_pid", data);
sprintf(handler_name, "%s__%s", event->system, event->name);
handler = PyDict_GetItemString(main_dict, handler_name);
if (handler && !PyCallable_Check(handler))
handler = NULL;
if (!handler) {
dict = PyDict_New();
if (!dict)
Py_FatalError("couldn't create Python dict");
}
s = nsecs / NSECS_PER_SEC;
ns = nsecs - s * NSECS_PER_SEC;
scripting_context->event_data = data;
scripting_context->pevent = evsel->tp_format->pevent;
context = PyCObject_FromVoidPtr(scripting_context, NULL);
PyTuple_SetItem(t, n++, PyString_FromString(handler_name));
PyTuple_SetItem(t, n++, context);
if (handler) {
PyTuple_SetItem(t, n++, PyInt_FromLong(cpu));
PyTuple_SetItem(t, n++, PyInt_FromLong(s));
PyTuple_SetItem(t, n++, PyInt_FromLong(ns));
PyTuple_SetItem(t, n++, PyInt_FromLong(pid));
PyTuple_SetItem(t, n++, PyString_FromString(comm));
} else {
PyDict_SetItemString(dict, "common_cpu", PyInt_FromLong(cpu));
PyDict_SetItemString(dict, "common_s", PyInt_FromLong(s));
PyDict_SetItemString(dict, "common_ns", PyInt_FromLong(ns));
PyDict_SetItemString(dict, "common_pid", PyInt_FromLong(pid));
PyDict_SetItemString(dict, "common_comm", PyString_FromString(comm));
}
for (field = event->format.fields; field; field = field->next) {
if (field->flags & FIELD_IS_STRING) {
int offset;
if (field->flags & FIELD_IS_DYNAMIC) {
offset = *(int *)(data + field->offset);
offset &= 0xffff;
} else
offset = field->offset;
obj = PyString_FromString((char *)data + offset);
} else { /* FIELD_IS_NUMERIC */
val = read_size(event, data + field->offset,
field->size);
if (field->flags & FIELD_IS_SIGNED) {
if ((long long)val >= LONG_MIN &&
(long long)val <= LONG_MAX)
obj = PyInt_FromLong(val);
else
obj = PyLong_FromLongLong(val);
} else {
if (val <= LONG_MAX)
obj = PyInt_FromLong(val);
else
obj = PyLong_FromUnsignedLongLong(val);
}
}
if (handler)
PyTuple_SetItem(t, n++, obj);
else
PyDict_SetItemString(dict, field->name, obj);
}
if (!handler)
PyTuple_SetItem(t, n++, dict);
if (_PyTuple_Resize(&t, n) == -1)
Py_FatalError("error resizing Python tuple");
if (handler) {
retval = PyObject_CallObject(handler, t);
if (retval == NULL)
handler_call_die(handler_name);
} else {
handler = PyDict_GetItemString(main_dict, "trace_unhandled");
if (handler && PyCallable_Check(handler)) {
retval = PyObject_CallObject(handler, t);
if (retval == NULL)
handler_call_die("trace_unhandled");
}
Py_DECREF(dict);
}
Py_DECREF(t);
}
static void python_process_general_event(union perf_event *perf_event
__maybe_unused,
struct perf_sample *sample,
struct perf_evsel *evsel,
struct machine *machine __maybe_unused,
struct addr_location *al)
{
PyObject *handler, *retval, *t, *dict;
static char handler_name[64];
unsigned n = 0;
struct thread *thread = al->thread;
/*
* Use the MAX_FIELDS to make the function expandable, though
* currently there is only one item for the tuple.
*/
t = PyTuple_New(MAX_FIELDS);
if (!t)
Py_FatalError("couldn't create Python tuple");
dict = PyDict_New();
if (!dict)
Py_FatalError("couldn't create Python dictionary");
snprintf(handler_name, sizeof(handler_name), "%s", "process_event");
handler = PyDict_GetItemString(main_dict, handler_name);
if (!handler || !PyCallable_Check(handler))
goto exit;
PyDict_SetItemString(dict, "ev_name", PyString_FromString(perf_evsel__name(evsel)));
PyDict_SetItemString(dict, "attr", PyString_FromStringAndSize(
(const char *)&evsel->attr, sizeof(evsel->attr)));
PyDict_SetItemString(dict, "sample", PyString_FromStringAndSize(
(const char *)sample, sizeof(*sample)));
PyDict_SetItemString(dict, "raw_buf", PyString_FromStringAndSize(
(const char *)sample->raw_data, sample->raw_size));
PyDict_SetItemString(dict, "comm",
PyString_FromString(thread->comm));
if (al->map) {
PyDict_SetItemString(dict, "dso",
PyString_FromString(al->map->dso->name));
}
if (al->sym) {
PyDict_SetItemString(dict, "symbol",
PyString_FromString(al->sym->name));
}
PyTuple_SetItem(t, n++, dict);
if (_PyTuple_Resize(&t, n) == -1)
Py_FatalError("error resizing Python tuple");
retval = PyObject_CallObject(handler, t);
if (retval == NULL)
handler_call_die(handler_name);
exit:
Py_DECREF(dict);
Py_DECREF(t);
}
static void python_process_event(union perf_event *perf_event,
struct perf_sample *sample,
struct perf_evsel *evsel,
struct machine *machine,
struct addr_location *al)
{
switch (evsel->attr.type) {
case PERF_TYPE_TRACEPOINT:
python_process_tracepoint(perf_event, sample, evsel,
machine, al);
break;
/* Reserve for future process_hw/sw/raw APIs */
default:
python_process_general_event(perf_event, sample, evsel,
machine, al);
}
}
static int run_start_sub(void)
{
PyObject *handler, *retval;
int err = 0;
main_module = PyImport_AddModule("__main__");
if (main_module == NULL)
return -1;
Py_INCREF(main_module);
main_dict = PyModule_GetDict(main_module);
if (main_dict == NULL) {
err = -1;
goto error;
}
Py_INCREF(main_dict);
handler = PyDict_GetItemString(main_dict, "trace_begin");
if (handler == NULL || !PyCallable_Check(handler))
goto out;
retval = PyObject_CallObject(handler, NULL);
if (retval == NULL)
handler_call_die("trace_begin");
Py_DECREF(retval);
return err;
error:
Py_XDECREF(main_dict);
Py_XDECREF(main_module);
out:
return err;
}
/*
* Start trace script
*/
static int python_start_script(const char *script, int argc, const char **argv)
{
const char **command_line;
char buf[PATH_MAX];
int i, err = 0;
FILE *fp;
command_line = malloc((argc + 1) * sizeof(const char *));
command_line[0] = script;
for (i = 1; i < argc + 1; i++)
command_line[i] = argv[i - 1];
Py_Initialize();
initperf_trace_context();
PySys_SetArgv(argc + 1, (char **)command_line);
fp = fopen(script, "r");
if (!fp) {
sprintf(buf, "Can't open python script \"%s\"", script);
perror(buf);
err = -1;
goto error;
}
err = PyRun_SimpleFile(fp, script);
if (err) {
fprintf(stderr, "Error running python script %s\n", script);
goto error;
}
err = run_start_sub();
if (err) {
fprintf(stderr, "Error starting python script %s\n", script);
goto error;
}
free(command_line);
return err;
error:
Py_Finalize();
free(command_line);
return err;
}
/*
* Stop trace script
*/
static int python_stop_script(void)
{
PyObject *handler, *retval;
int err = 0;
handler = PyDict_GetItemString(main_dict, "trace_end");
if (handler == NULL || !PyCallable_Check(handler))
goto out;
retval = PyObject_CallObject(handler, NULL);
if (retval == NULL)
handler_call_die("trace_end");
else
Py_DECREF(retval);
out:
Py_XDECREF(main_dict);
Py_XDECREF(main_module);
Py_Finalize();
return err;
}
static int python_generate_script(struct pevent *pevent, const char *outfile)
{
struct event_format *event = NULL;
struct format_field *f;
char fname[PATH_MAX];
int not_first, count;
FILE *ofp;
sprintf(fname, "%s.py", outfile);
ofp = fopen(fname, "w");
if (ofp == NULL) {
fprintf(stderr, "couldn't open %s\n", fname);
return -1;
}
fprintf(ofp, "# perf script event handlers, "
"generated by perf script -g python\n");
fprintf(ofp, "# Licensed under the terms of the GNU GPL"
" License version 2\n\n");
fprintf(ofp, "# The common_* event handler fields are the most useful "
"fields common to\n");
fprintf(ofp, "# all events. They don't necessarily correspond to "
"the 'common_*' fields\n");
fprintf(ofp, "# in the format files. Those fields not available as "
"handler params can\n");
fprintf(ofp, "# be retrieved using Python functions of the form "
"common_*(context).\n");
fprintf(ofp, "# See the perf-trace-python Documentation for the list "
"of available functions.\n\n");
fprintf(ofp, "import os\n");
fprintf(ofp, "import sys\n\n");
fprintf(ofp, "sys.path.append(os.environ['PERF_EXEC_PATH'] + \\\n");
fprintf(ofp, "\t'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')\n");
fprintf(ofp, "\nfrom perf_trace_context import *\n");
fprintf(ofp, "from Core import *\n\n\n");
fprintf(ofp, "def trace_begin():\n");
fprintf(ofp, "\tprint \"in trace_begin\"\n\n");
fprintf(ofp, "def trace_end():\n");
fprintf(ofp, "\tprint \"in trace_end\"\n\n");
while ((event = trace_find_next_event(pevent, event))) {
fprintf(ofp, "def %s__%s(", event->system, event->name);
fprintf(ofp, "event_name, ");
fprintf(ofp, "context, ");
fprintf(ofp, "common_cpu,\n");
fprintf(ofp, "\tcommon_secs, ");
fprintf(ofp, "common_nsecs, ");
fprintf(ofp, "common_pid, ");
fprintf(ofp, "common_comm,\n\t");
not_first = 0;
count = 0;
for (f = event->format.fields; f; f = f->next) {
if (not_first++)
fprintf(ofp, ", ");
if (++count % 5 == 0)
fprintf(ofp, "\n\t");
fprintf(ofp, "%s", f->name);
}
fprintf(ofp, "):\n");
fprintf(ofp, "\t\tprint_header(event_name, common_cpu, "
"common_secs, common_nsecs,\n\t\t\t"
"common_pid, common_comm)\n\n");
fprintf(ofp, "\t\tprint \"");
not_first = 0;
count = 0;
for (f = event->format.fields; f; f = f->next) {
if (not_first++)
fprintf(ofp, ", ");
if (count && count % 3 == 0) {
fprintf(ofp, "\" \\\n\t\t\"");
}
count++;
fprintf(ofp, "%s=", f->name);
if (f->flags & FIELD_IS_STRING ||
f->flags & FIELD_IS_FLAG ||
f->flags & FIELD_IS_SYMBOLIC)
fprintf(ofp, "%%s");
else if (f->flags & FIELD_IS_SIGNED)
fprintf(ofp, "%%d");
else
fprintf(ofp, "%%u");
}
fprintf(ofp, "\\n\" %% \\\n\t\t(");
not_first = 0;
count = 0;
for (f = event->format.fields; f; f = f->next) {
if (not_first++)
fprintf(ofp, ", ");
if (++count % 5 == 0)
fprintf(ofp, "\n\t\t");
if (f->flags & FIELD_IS_FLAG) {
if ((count - 1) % 5 != 0) {
fprintf(ofp, "\n\t\t");
count = 4;
}
fprintf(ofp, "flag_str(\"");
fprintf(ofp, "%s__%s\", ", event->system,
event->name);
fprintf(ofp, "\"%s\", %s)", f->name,
f->name);
} else if (f->flags & FIELD_IS_SYMBOLIC) {
if ((count - 1) % 5 != 0) {
fprintf(ofp, "\n\t\t");
count = 4;
}
fprintf(ofp, "symbol_str(\"");
fprintf(ofp, "%s__%s\", ", event->system,
event->name);
fprintf(ofp, "\"%s\", %s)", f->name,
f->name);
} else
fprintf(ofp, "%s", f->name);
}
fprintf(ofp, "),\n\n");
}
fprintf(ofp, "def trace_unhandled(event_name, context, "
"event_fields_dict):\n");
fprintf(ofp, "\t\tprint ' '.join(['%%s=%%s'%%(k,str(v))"
"for k,v in sorted(event_fields_dict.items())])\n\n");
fprintf(ofp, "def print_header("
"event_name, cpu, secs, nsecs, pid, comm):\n"
"\tprint \"%%-20s %%5u %%05u.%%09u %%8u %%-20s \" %% \\\n\t"
"(event_name, cpu, secs, nsecs, pid, comm),\n");
fclose(ofp);
fprintf(stderr, "generated Python script: %s\n", fname);
return 0;
}
struct scripting_ops python_scripting_ops = {
.name = "Python",
.start_script = python_start_script,
.stop_script = python_stop_script,
.process_event = python_process_event,
.generate_script = python_generate_script,
};
| gpl-2.0 |
EnJens/android-tegra-nv-2.6.39 | drivers/net/wireless/orinoco/fw.c | 2714 | 9886 | /* Firmware file reading and download helpers
*
* See copyright notice in main.c
*/
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/firmware.h>
#include <linux/device.h>
#include "hermes.h"
#include "hermes_dld.h"
#include "orinoco.h"
#include "fw.h"
/* End markers (for Symbol firmware only) */
#define TEXT_END 0x1A /* End of text header */
struct fw_info {
char *pri_fw;
char *sta_fw;
char *ap_fw;
u32 pda_addr;
u16 pda_size;
};
static const struct fw_info orinoco_fw[] = {
{ NULL, "agere_sta_fw.bin", "agere_ap_fw.bin", 0x00390000, 1000 },
{ NULL, "prism_sta_fw.bin", "prism_ap_fw.bin", 0, 1024 },
{ "symbol_sp24t_prim_fw", "symbol_sp24t_sec_fw", NULL, 0x00003100, 512 }
};
MODULE_FIRMWARE("agere_sta_fw.bin");
MODULE_FIRMWARE("agere_ap_fw.bin");
MODULE_FIRMWARE("prism_sta_fw.bin");
MODULE_FIRMWARE("prism_ap_fw.bin");
MODULE_FIRMWARE("symbol_sp24t_prim_fw");
MODULE_FIRMWARE("symbol_sp24t_sec_fw");
/* Structure used to access fields in FW
* Make sure LE decoding macros are used
*/
struct orinoco_fw_header {
char hdr_vers[6]; /* ASCII string for header version */
__le16 headersize; /* Total length of header */
__le32 entry_point; /* NIC entry point */
__le32 blocks; /* Number of blocks to program */
__le32 block_offset; /* Offset of block data from eof header */
__le32 pdr_offset; /* Offset to PDR data from eof header */
__le32 pri_offset; /* Offset to primary plug data */
__le32 compat_offset; /* Offset to compatibility data*/
char signature[0]; /* FW signature length headersize-20 */
} __packed;
/* Check the range of various header entries. Return a pointer to a
* description of the problem, or NULL if everything checks out. */
static const char *validate_fw(const struct orinoco_fw_header *hdr, size_t len)
{
u16 hdrsize;
if (len < sizeof(*hdr))
return "image too small";
if (memcmp(hdr->hdr_vers, "HFW", 3) != 0)
return "format not recognised";
hdrsize = le16_to_cpu(hdr->headersize);
if (hdrsize > len)
return "bad headersize";
if ((hdrsize + le32_to_cpu(hdr->block_offset)) > len)
return "bad block offset";
if ((hdrsize + le32_to_cpu(hdr->pdr_offset)) > len)
return "bad PDR offset";
if ((hdrsize + le32_to_cpu(hdr->pri_offset)) > len)
return "bad PRI offset";
if ((hdrsize + le32_to_cpu(hdr->compat_offset)) > len)
return "bad compat offset";
/* TODO: consider adding a checksum or CRC to the firmware format */
return NULL;
}
#if defined(CONFIG_HERMES_CACHE_FW_ON_INIT) || defined(CONFIG_PM_SLEEP)
static inline const struct firmware *
orinoco_cached_fw_get(struct orinoco_private *priv, bool primary)
{
if (primary)
return priv->cached_pri_fw;
else
return priv->cached_fw;
}
#else
#define orinoco_cached_fw_get(priv, primary) (NULL)
#endif
/* Download either STA or AP firmware into the card. */
static int
orinoco_dl_firmware(struct orinoco_private *priv,
const struct fw_info *fw,
int ap)
{
/* Plug Data Area (PDA) */
__le16 *pda;
hermes_t *hw = &priv->hw;
const struct firmware *fw_entry;
const struct orinoco_fw_header *hdr;
const unsigned char *first_block;
const void *end;
const char *firmware;
const char *fw_err;
struct device *dev = priv->dev;
int err = 0;
pda = kzalloc(fw->pda_size, GFP_KERNEL);
if (!pda)
return -ENOMEM;
if (ap)
firmware = fw->ap_fw;
else
firmware = fw->sta_fw;
dev_dbg(dev, "Attempting to download firmware %s\n", firmware);
/* Read current plug data */
err = hw->ops->read_pda(hw, pda, fw->pda_addr, fw->pda_size);
dev_dbg(dev, "Read PDA returned %d\n", err);
if (err)
goto free;
if (!orinoco_cached_fw_get(priv, false)) {
err = request_firmware(&fw_entry, firmware, priv->dev);
if (err) {
dev_err(dev, "Cannot find firmware %s\n", firmware);
err = -ENOENT;
goto free;
}
} else
fw_entry = orinoco_cached_fw_get(priv, false);
hdr = (const struct orinoco_fw_header *) fw_entry->data;
fw_err = validate_fw(hdr, fw_entry->size);
if (fw_err) {
dev_warn(dev, "Invalid firmware image detected (%s). "
"Aborting download\n", fw_err);
err = -EINVAL;
goto abort;
}
/* Enable aux port to allow programming */
err = hw->ops->program_init(hw, le32_to_cpu(hdr->entry_point));
dev_dbg(dev, "Program init returned %d\n", err);
if (err != 0)
goto abort;
/* Program data */
first_block = (fw_entry->data +
le16_to_cpu(hdr->headersize) +
le32_to_cpu(hdr->block_offset));
end = fw_entry->data + fw_entry->size;
err = hermes_program(hw, first_block, end);
dev_dbg(dev, "Program returned %d\n", err);
if (err != 0)
goto abort;
/* Update production data */
first_block = (fw_entry->data +
le16_to_cpu(hdr->headersize) +
le32_to_cpu(hdr->pdr_offset));
err = hermes_apply_pda_with_defaults(hw, first_block, end, pda,
&pda[fw->pda_size / sizeof(*pda)]);
dev_dbg(dev, "Apply PDA returned %d\n", err);
if (err)
goto abort;
/* Tell card we've finished */
err = hw->ops->program_end(hw);
dev_dbg(dev, "Program end returned %d\n", err);
if (err != 0)
goto abort;
/* Check if we're running */
dev_dbg(dev, "hermes_present returned %d\n", hermes_present(hw));
abort:
/* If we requested the firmware, release it. */
if (!orinoco_cached_fw_get(priv, false))
release_firmware(fw_entry);
free:
kfree(pda);
return err;
}
/*
* Process a firmware image - stop the card, load the firmware, reset
* the card and make sure it responds. For the secondary firmware take
* care of the PDA - read it and then write it on top of the firmware.
*/
static int
symbol_dl_image(struct orinoco_private *priv, const struct fw_info *fw,
const unsigned char *image, const void *end,
int secondary)
{
hermes_t *hw = &priv->hw;
int ret = 0;
const unsigned char *ptr;
const unsigned char *first_block;
/* Plug Data Area (PDA) */
__le16 *pda = NULL;
/* Binary block begins after the 0x1A marker */
ptr = image;
while (*ptr++ != TEXT_END);
first_block = ptr;
/* Read the PDA from EEPROM */
if (secondary) {
pda = kzalloc(fw->pda_size, GFP_KERNEL);
if (!pda)
return -ENOMEM;
ret = hw->ops->read_pda(hw, pda, fw->pda_addr, fw->pda_size);
if (ret)
goto free;
}
/* Stop the firmware, so that it can be safely rewritten */
if (priv->stop_fw) {
ret = priv->stop_fw(priv, 1);
if (ret)
goto free;
}
/* Program the adapter with new firmware */
ret = hermes_program(hw, first_block, end);
if (ret)
goto free;
/* Write the PDA to the adapter */
if (secondary) {
size_t len = hermes_blocks_length(first_block, end);
ptr = first_block + len;
ret = hermes_apply_pda(hw, ptr, end, pda,
&pda[fw->pda_size / sizeof(*pda)]);
kfree(pda);
if (ret)
return ret;
}
/* Run the firmware */
if (priv->stop_fw) {
ret = priv->stop_fw(priv, 0);
if (ret)
return ret;
}
/* Reset hermes chip and make sure it responds */
ret = hw->ops->init(hw);
/* hermes_reset() should return 0 with the secondary firmware */
if (secondary && ret != 0)
return -ENODEV;
/* And this should work with any firmware */
if (!hermes_present(hw))
return -ENODEV;
return 0;
free:
kfree(pda);
return ret;
}
/*
* Download the firmware into the card, this also does a PCMCIA soft
* reset on the card, to make sure it's in a sane state.
*/
static int
symbol_dl_firmware(struct orinoco_private *priv,
const struct fw_info *fw)
{
struct device *dev = priv->dev;
int ret;
const struct firmware *fw_entry;
if (!orinoco_cached_fw_get(priv, true)) {
if (request_firmware(&fw_entry, fw->pri_fw, priv->dev) != 0) {
dev_err(dev, "Cannot find firmware: %s\n", fw->pri_fw);
return -ENOENT;
}
} else
fw_entry = orinoco_cached_fw_get(priv, true);
/* Load primary firmware */
ret = symbol_dl_image(priv, fw, fw_entry->data,
fw_entry->data + fw_entry->size, 0);
if (!orinoco_cached_fw_get(priv, true))
release_firmware(fw_entry);
if (ret) {
dev_err(dev, "Primary firmware download failed\n");
return ret;
}
if (!orinoco_cached_fw_get(priv, false)) {
if (request_firmware(&fw_entry, fw->sta_fw, priv->dev) != 0) {
dev_err(dev, "Cannot find firmware: %s\n", fw->sta_fw);
return -ENOENT;
}
} else
fw_entry = orinoco_cached_fw_get(priv, false);
/* Load secondary firmware */
ret = symbol_dl_image(priv, fw, fw_entry->data,
fw_entry->data + fw_entry->size, 1);
if (!orinoco_cached_fw_get(priv, false))
release_firmware(fw_entry);
if (ret) {
dev_err(dev, "Secondary firmware download failed\n");
}
return ret;
}
int orinoco_download(struct orinoco_private *priv)
{
int err = 0;
/* Reload firmware */
switch (priv->firmware_type) {
case FIRMWARE_TYPE_AGERE:
/* case FIRMWARE_TYPE_INTERSIL: */
err = orinoco_dl_firmware(priv,
&orinoco_fw[priv->firmware_type], 0);
break;
case FIRMWARE_TYPE_SYMBOL:
err = symbol_dl_firmware(priv,
&orinoco_fw[priv->firmware_type]);
break;
case FIRMWARE_TYPE_INTERSIL:
break;
}
/* TODO: if we fail we probably need to reinitialise
* the driver */
return err;
}
#if defined(CONFIG_HERMES_CACHE_FW_ON_INIT) || defined(CONFIG_PM_SLEEP)
void orinoco_cache_fw(struct orinoco_private *priv, int ap)
{
const struct firmware *fw_entry = NULL;
const char *pri_fw;
const char *fw;
pri_fw = orinoco_fw[priv->firmware_type].pri_fw;
if (ap)
fw = orinoco_fw[priv->firmware_type].ap_fw;
else
fw = orinoco_fw[priv->firmware_type].sta_fw;
if (pri_fw) {
if (request_firmware(&fw_entry, pri_fw, priv->dev) == 0)
priv->cached_pri_fw = fw_entry;
}
if (fw) {
if (request_firmware(&fw_entry, fw, priv->dev) == 0)
priv->cached_fw = fw_entry;
}
}
void orinoco_uncache_fw(struct orinoco_private *priv)
{
if (priv->cached_pri_fw)
release_firmware(priv->cached_pri_fw);
if (priv->cached_fw)
release_firmware(priv->cached_fw);
priv->cached_pri_fw = NULL;
priv->cached_fw = NULL;
}
#endif
| gpl-2.0 |
rezvorck/android_kernel_s450m_4g_mm | arch/alpha/kernel/srmcons.c | 3226 | 6191 | /*
* linux/arch/alpha/kernel/srmcons.c
*
* Callback based driver for SRM Console console device.
* (TTY driver and console driver)
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/console.h>
#include <linux/delay.h>
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/timer.h>
#include <linux/tty.h>
#include <linux/tty_driver.h>
#include <linux/tty_flip.h>
#include <asm/console.h>
#include <asm/uaccess.h>
static DEFINE_SPINLOCK(srmcons_callback_lock);
static int srm_is_registered_console = 0;
/*
* The TTY driver
*/
#define MAX_SRM_CONSOLE_DEVICES 1 /* only support 1 console device */
struct srmcons_private {
struct tty_port port;
struct timer_list timer;
} srmcons_singleton;
typedef union _srmcons_result {
struct {
unsigned long c :61;
unsigned long status :3;
} bits;
long as_long;
} srmcons_result;
/* called with callback_lock held */
static int
srmcons_do_receive_chars(struct tty_port *port)
{
srmcons_result result;
int count = 0, loops = 0;
do {
result.as_long = callback_getc(0);
if (result.bits.status < 2) {
tty_insert_flip_char(port, (char)result.bits.c, 0);
count++;
}
} while((result.bits.status & 1) && (++loops < 10));
if (count)
tty_schedule_flip(port);
return count;
}
static void
srmcons_receive_chars(unsigned long data)
{
struct srmcons_private *srmconsp = (struct srmcons_private *)data;
struct tty_port *port = &srmconsp->port;
unsigned long flags;
int incr = 10;
local_irq_save(flags);
if (spin_trylock(&srmcons_callback_lock)) {
if (!srmcons_do_receive_chars(port))
incr = 100;
spin_unlock(&srmcons_callback_lock);
}
spin_lock(&port->lock);
if (port->tty)
mod_timer(&srmconsp->timer, jiffies + incr);
spin_unlock(&port->lock);
local_irq_restore(flags);
}
/* called with callback_lock held */
static int
srmcons_do_write(struct tty_port *port, const char *buf, int count)
{
static char str_cr[1] = "\r";
long c, remaining = count;
srmcons_result result;
char *cur;
int need_cr;
for (cur = (char *)buf; remaining > 0; ) {
need_cr = 0;
/*
* Break it up into reasonable size chunks to allow a chance
* for input to get in
*/
for (c = 0; c < min_t(long, 128L, remaining) && !need_cr; c++)
if (cur[c] == '\n')
need_cr = 1;
while (c > 0) {
result.as_long = callback_puts(0, cur, c);
c -= result.bits.c;
remaining -= result.bits.c;
cur += result.bits.c;
/*
* Check for pending input iff a tty port was provided
*/
if (port)
srmcons_do_receive_chars(port);
}
while (need_cr) {
result.as_long = callback_puts(0, str_cr, 1);
if (result.bits.c > 0)
need_cr = 0;
}
}
return count;
}
static int
srmcons_write(struct tty_struct *tty,
const unsigned char *buf, int count)
{
unsigned long flags;
spin_lock_irqsave(&srmcons_callback_lock, flags);
srmcons_do_write(tty->port, (const char *) buf, count);
spin_unlock_irqrestore(&srmcons_callback_lock, flags);
return count;
}
static int
srmcons_write_room(struct tty_struct *tty)
{
return 512;
}
static int
srmcons_chars_in_buffer(struct tty_struct *tty)
{
return 0;
}
static int
srmcons_open(struct tty_struct *tty, struct file *filp)
{
struct srmcons_private *srmconsp = &srmcons_singleton;
struct tty_port *port = &srmconsp->port;
unsigned long flags;
spin_lock_irqsave(&port->lock, flags);
if (!port->tty) {
tty->driver_data = srmconsp;
tty->port = port;
port->tty = tty; /* XXX proper refcounting */
mod_timer(&srmconsp->timer, jiffies + 10);
}
spin_unlock_irqrestore(&port->lock, flags);
return 0;
}
static void
srmcons_close(struct tty_struct *tty, struct file *filp)
{
struct srmcons_private *srmconsp = tty->driver_data;
struct tty_port *port = &srmconsp->port;
unsigned long flags;
spin_lock_irqsave(&port->lock, flags);
if (tty->count == 1) {
port->tty = NULL;
del_timer(&srmconsp->timer);
}
spin_unlock_irqrestore(&port->lock, flags);
}
static struct tty_driver *srmcons_driver;
static const struct tty_operations srmcons_ops = {
.open = srmcons_open,
.close = srmcons_close,
.write = srmcons_write,
.write_room = srmcons_write_room,
.chars_in_buffer= srmcons_chars_in_buffer,
};
static int __init
srmcons_init(void)
{
setup_timer(&srmcons_singleton.timer, srmcons_receive_chars,
(unsigned long)&srmcons_singleton);
if (srm_is_registered_console) {
struct tty_driver *driver;
int err;
driver = alloc_tty_driver(MAX_SRM_CONSOLE_DEVICES);
if (!driver)
return -ENOMEM;
tty_port_init(&srmcons_singleton.port);
driver->driver_name = "srm";
driver->name = "srm";
driver->major = 0; /* dynamic */
driver->minor_start = 0;
driver->type = TTY_DRIVER_TYPE_SYSTEM;
driver->subtype = SYSTEM_TYPE_SYSCONS;
driver->init_termios = tty_std_termios;
tty_set_operations(driver, &srmcons_ops);
tty_port_link_device(&srmcons_singleton.port, driver, 0);
err = tty_register_driver(driver);
if (err) {
put_tty_driver(driver);
tty_port_destroy(&srmcons_singleton.port);
return err;
}
srmcons_driver = driver;
}
return -ENODEV;
}
module_init(srmcons_init);
/*
* The console driver
*/
static void
srm_console_write(struct console *co, const char *s, unsigned count)
{
unsigned long flags;
spin_lock_irqsave(&srmcons_callback_lock, flags);
srmcons_do_write(NULL, s, count);
spin_unlock_irqrestore(&srmcons_callback_lock, flags);
}
static struct tty_driver *
srm_console_device(struct console *co, int *index)
{
*index = co->index;
return srmcons_driver;
}
static int
srm_console_setup(struct console *co, char *options)
{
return 0;
}
static struct console srmcons = {
.name = "srm",
.write = srm_console_write,
.device = srm_console_device,
.setup = srm_console_setup,
.flags = CON_PRINTBUFFER | CON_BOOT,
.index = -1,
};
void __init
register_srm_console(void)
{
if (!srm_is_registered_console) {
callback_open_console();
register_console(&srmcons);
srm_is_registered_console = 1;
}
}
void __init
unregister_srm_console(void)
{
if (srm_is_registered_console) {
callback_close_console();
unregister_console(&srmcons);
srm_is_registered_console = 0;
}
}
| gpl-2.0 |
davidmueller13/TW_Kernel_LP | net/sunrpc/xprtrdma/svc_rdma_sendto.c | 4762 | 21170 | /*
* Copyright (c) 2005-2006 Network Appliance, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the BSD-type
* license below:
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
*
* Neither the name of the Network Appliance, Inc. nor the names of
* its contributors may be used to endorse or promote products
* derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* Author: Tom Tucker <tom@opengridcomputing.com>
*/
#include <linux/sunrpc/debug.h>
#include <linux/sunrpc/rpc_rdma.h>
#include <linux/spinlock.h>
#include <asm/unaligned.h>
#include <rdma/ib_verbs.h>
#include <rdma/rdma_cm.h>
#include <linux/sunrpc/svc_rdma.h>
#define RPCDBG_FACILITY RPCDBG_SVCXPRT
/* Encode an XDR as an array of IB SGE
*
* Assumptions:
* - head[0] is physically contiguous.
* - tail[0] is physically contiguous.
* - pages[] is not physically or virtually contiguous and consists of
* PAGE_SIZE elements.
*
* Output:
* SGE[0] reserved for RCPRDMA header
* SGE[1] data from xdr->head[]
* SGE[2..sge_count-2] data from xdr->pages[]
* SGE[sge_count-1] data from xdr->tail.
*
* The max SGE we need is the length of the XDR / pagesize + one for
* head + one for tail + one for RPCRDMA header. Since RPCSVC_MAXPAGES
* reserves a page for both the request and the reply header, and this
* array is only concerned with the reply we are assured that we have
* on extra page for the RPCRMDA header.
*/
static int fast_reg_xdr(struct svcxprt_rdma *xprt,
struct xdr_buf *xdr,
struct svc_rdma_req_map *vec)
{
int sge_no;
u32 sge_bytes;
u32 page_bytes;
u32 page_off;
int page_no = 0;
u8 *frva;
struct svc_rdma_fastreg_mr *frmr;
frmr = svc_rdma_get_frmr(xprt);
if (IS_ERR(frmr))
return -ENOMEM;
vec->frmr = frmr;
/* Skip the RPCRDMA header */
sge_no = 1;
/* Map the head. */
frva = (void *)((unsigned long)(xdr->head[0].iov_base) & PAGE_MASK);
vec->sge[sge_no].iov_base = xdr->head[0].iov_base;
vec->sge[sge_no].iov_len = xdr->head[0].iov_len;
vec->count = 2;
sge_no++;
/* Map the XDR head */
frmr->kva = frva;
frmr->direction = DMA_TO_DEVICE;
frmr->access_flags = 0;
frmr->map_len = PAGE_SIZE;
frmr->page_list_len = 1;
page_off = (unsigned long)xdr->head[0].iov_base & ~PAGE_MASK;
frmr->page_list->page_list[page_no] =
ib_dma_map_page(xprt->sc_cm_id->device,
virt_to_page(xdr->head[0].iov_base),
page_off,
PAGE_SIZE - page_off,
DMA_TO_DEVICE);
if (ib_dma_mapping_error(xprt->sc_cm_id->device,
frmr->page_list->page_list[page_no]))
goto fatal_err;
atomic_inc(&xprt->sc_dma_used);
/* Map the XDR page list */
page_off = xdr->page_base;
page_bytes = xdr->page_len + page_off;
if (!page_bytes)
goto encode_tail;
/* Map the pages */
vec->sge[sge_no].iov_base = frva + frmr->map_len + page_off;
vec->sge[sge_no].iov_len = page_bytes;
sge_no++;
while (page_bytes) {
struct page *page;
page = xdr->pages[page_no++];
sge_bytes = min_t(u32, page_bytes, (PAGE_SIZE - page_off));
page_bytes -= sge_bytes;
frmr->page_list->page_list[page_no] =
ib_dma_map_page(xprt->sc_cm_id->device,
page, page_off,
sge_bytes, DMA_TO_DEVICE);
if (ib_dma_mapping_error(xprt->sc_cm_id->device,
frmr->page_list->page_list[page_no]))
goto fatal_err;
atomic_inc(&xprt->sc_dma_used);
page_off = 0; /* reset for next time through loop */
frmr->map_len += PAGE_SIZE;
frmr->page_list_len++;
}
vec->count++;
encode_tail:
/* Map tail */
if (0 == xdr->tail[0].iov_len)
goto done;
vec->count++;
vec->sge[sge_no].iov_len = xdr->tail[0].iov_len;
if (((unsigned long)xdr->tail[0].iov_base & PAGE_MASK) ==
((unsigned long)xdr->head[0].iov_base & PAGE_MASK)) {
/*
* If head and tail use the same page, we don't need
* to map it again.
*/
vec->sge[sge_no].iov_base = xdr->tail[0].iov_base;
} else {
void *va;
/* Map another page for the tail */
page_off = (unsigned long)xdr->tail[0].iov_base & ~PAGE_MASK;
va = (void *)((unsigned long)xdr->tail[0].iov_base & PAGE_MASK);
vec->sge[sge_no].iov_base = frva + frmr->map_len + page_off;
frmr->page_list->page_list[page_no] =
ib_dma_map_page(xprt->sc_cm_id->device, virt_to_page(va),
page_off,
PAGE_SIZE,
DMA_TO_DEVICE);
if (ib_dma_mapping_error(xprt->sc_cm_id->device,
frmr->page_list->page_list[page_no]))
goto fatal_err;
atomic_inc(&xprt->sc_dma_used);
frmr->map_len += PAGE_SIZE;
frmr->page_list_len++;
}
done:
if (svc_rdma_fastreg(xprt, frmr))
goto fatal_err;
return 0;
fatal_err:
printk("svcrdma: Error fast registering memory for xprt %p\n", xprt);
vec->frmr = NULL;
svc_rdma_put_frmr(xprt, frmr);
return -EIO;
}
static int map_xdr(struct svcxprt_rdma *xprt,
struct xdr_buf *xdr,
struct svc_rdma_req_map *vec)
{
int sge_no;
u32 sge_bytes;
u32 page_bytes;
u32 page_off;
int page_no;
BUG_ON(xdr->len !=
(xdr->head[0].iov_len + xdr->page_len + xdr->tail[0].iov_len));
if (xprt->sc_frmr_pg_list_len)
return fast_reg_xdr(xprt, xdr, vec);
/* Skip the first sge, this is for the RPCRDMA header */
sge_no = 1;
/* Head SGE */
vec->sge[sge_no].iov_base = xdr->head[0].iov_base;
vec->sge[sge_no].iov_len = xdr->head[0].iov_len;
sge_no++;
/* pages SGE */
page_no = 0;
page_bytes = xdr->page_len;
page_off = xdr->page_base;
while (page_bytes) {
vec->sge[sge_no].iov_base =
page_address(xdr->pages[page_no]) + page_off;
sge_bytes = min_t(u32, page_bytes, (PAGE_SIZE - page_off));
page_bytes -= sge_bytes;
vec->sge[sge_no].iov_len = sge_bytes;
sge_no++;
page_no++;
page_off = 0; /* reset for next time through loop */
}
/* Tail SGE */
if (xdr->tail[0].iov_len) {
vec->sge[sge_no].iov_base = xdr->tail[0].iov_base;
vec->sge[sge_no].iov_len = xdr->tail[0].iov_len;
sge_no++;
}
dprintk("svcrdma: map_xdr: sge_no %d page_no %d "
"page_base %u page_len %u head_len %zu tail_len %zu\n",
sge_no, page_no, xdr->page_base, xdr->page_len,
xdr->head[0].iov_len, xdr->tail[0].iov_len);
vec->count = sge_no;
return 0;
}
static dma_addr_t dma_map_xdr(struct svcxprt_rdma *xprt,
struct xdr_buf *xdr,
u32 xdr_off, size_t len, int dir)
{
struct page *page;
dma_addr_t dma_addr;
if (xdr_off < xdr->head[0].iov_len) {
/* This offset is in the head */
xdr_off += (unsigned long)xdr->head[0].iov_base & ~PAGE_MASK;
page = virt_to_page(xdr->head[0].iov_base);
} else {
xdr_off -= xdr->head[0].iov_len;
if (xdr_off < xdr->page_len) {
/* This offset is in the page list */
page = xdr->pages[xdr_off >> PAGE_SHIFT];
xdr_off &= ~PAGE_MASK;
} else {
/* This offset is in the tail */
xdr_off -= xdr->page_len;
xdr_off += (unsigned long)
xdr->tail[0].iov_base & ~PAGE_MASK;
page = virt_to_page(xdr->tail[0].iov_base);
}
}
dma_addr = ib_dma_map_page(xprt->sc_cm_id->device, page, xdr_off,
min_t(size_t, PAGE_SIZE, len), dir);
return dma_addr;
}
/* Assumptions:
* - We are using FRMR
* - or -
* - The specified write_len can be represented in sc_max_sge * PAGE_SIZE
*/
static int send_write(struct svcxprt_rdma *xprt, struct svc_rqst *rqstp,
u32 rmr, u64 to,
u32 xdr_off, int write_len,
struct svc_rdma_req_map *vec)
{
struct ib_send_wr write_wr;
struct ib_sge *sge;
int xdr_sge_no;
int sge_no;
int sge_bytes;
int sge_off;
int bc;
struct svc_rdma_op_ctxt *ctxt;
BUG_ON(vec->count > RPCSVC_MAXPAGES);
dprintk("svcrdma: RDMA_WRITE rmr=%x, to=%llx, xdr_off=%d, "
"write_len=%d, vec->sge=%p, vec->count=%lu\n",
rmr, (unsigned long long)to, xdr_off,
write_len, vec->sge, vec->count);
ctxt = svc_rdma_get_context(xprt);
ctxt->direction = DMA_TO_DEVICE;
sge = ctxt->sge;
/* Find the SGE associated with xdr_off */
for (bc = xdr_off, xdr_sge_no = 1; bc && xdr_sge_no < vec->count;
xdr_sge_no++) {
if (vec->sge[xdr_sge_no].iov_len > bc)
break;
bc -= vec->sge[xdr_sge_no].iov_len;
}
sge_off = bc;
bc = write_len;
sge_no = 0;
/* Copy the remaining SGE */
while (bc != 0) {
sge_bytes = min_t(size_t,
bc, vec->sge[xdr_sge_no].iov_len-sge_off);
sge[sge_no].length = sge_bytes;
if (!vec->frmr) {
sge[sge_no].addr =
dma_map_xdr(xprt, &rqstp->rq_res, xdr_off,
sge_bytes, DMA_TO_DEVICE);
xdr_off += sge_bytes;
if (ib_dma_mapping_error(xprt->sc_cm_id->device,
sge[sge_no].addr))
goto err;
atomic_inc(&xprt->sc_dma_used);
sge[sge_no].lkey = xprt->sc_dma_lkey;
} else {
sge[sge_no].addr = (unsigned long)
vec->sge[xdr_sge_no].iov_base + sge_off;
sge[sge_no].lkey = vec->frmr->mr->lkey;
}
ctxt->count++;
ctxt->frmr = vec->frmr;
sge_off = 0;
sge_no++;
xdr_sge_no++;
BUG_ON(xdr_sge_no > vec->count);
bc -= sge_bytes;
}
/* Prepare WRITE WR */
memset(&write_wr, 0, sizeof write_wr);
ctxt->wr_op = IB_WR_RDMA_WRITE;
write_wr.wr_id = (unsigned long)ctxt;
write_wr.sg_list = &sge[0];
write_wr.num_sge = sge_no;
write_wr.opcode = IB_WR_RDMA_WRITE;
write_wr.send_flags = IB_SEND_SIGNALED;
write_wr.wr.rdma.rkey = rmr;
write_wr.wr.rdma.remote_addr = to;
/* Post It */
atomic_inc(&rdma_stat_write);
if (svc_rdma_send(xprt, &write_wr))
goto err;
return 0;
err:
svc_rdma_unmap_dma(ctxt);
svc_rdma_put_frmr(xprt, vec->frmr);
svc_rdma_put_context(ctxt, 0);
/* Fatal error, close transport */
return -EIO;
}
static int send_write_chunks(struct svcxprt_rdma *xprt,
struct rpcrdma_msg *rdma_argp,
struct rpcrdma_msg *rdma_resp,
struct svc_rqst *rqstp,
struct svc_rdma_req_map *vec)
{
u32 xfer_len = rqstp->rq_res.page_len + rqstp->rq_res.tail[0].iov_len;
int write_len;
int max_write;
u32 xdr_off;
int chunk_off;
int chunk_no;
struct rpcrdma_write_array *arg_ary;
struct rpcrdma_write_array *res_ary;
int ret;
arg_ary = svc_rdma_get_write_array(rdma_argp);
if (!arg_ary)
return 0;
res_ary = (struct rpcrdma_write_array *)
&rdma_resp->rm_body.rm_chunks[1];
if (vec->frmr)
max_write = vec->frmr->map_len;
else
max_write = xprt->sc_max_sge * PAGE_SIZE;
/* Write chunks start at the pagelist */
for (xdr_off = rqstp->rq_res.head[0].iov_len, chunk_no = 0;
xfer_len && chunk_no < arg_ary->wc_nchunks;
chunk_no++) {
struct rpcrdma_segment *arg_ch;
u64 rs_offset;
arg_ch = &arg_ary->wc_array[chunk_no].wc_target;
write_len = min(xfer_len, ntohl(arg_ch->rs_length));
/* Prepare the response chunk given the length actually
* written */
xdr_decode_hyper((__be32 *)&arg_ch->rs_offset, &rs_offset);
svc_rdma_xdr_encode_array_chunk(res_ary, chunk_no,
arg_ch->rs_handle,
arg_ch->rs_offset,
write_len);
chunk_off = 0;
while (write_len) {
int this_write;
this_write = min(write_len, max_write);
ret = send_write(xprt, rqstp,
ntohl(arg_ch->rs_handle),
rs_offset + chunk_off,
xdr_off,
this_write,
vec);
if (ret) {
dprintk("svcrdma: RDMA_WRITE failed, ret=%d\n",
ret);
return -EIO;
}
chunk_off += this_write;
xdr_off += this_write;
xfer_len -= this_write;
write_len -= this_write;
}
}
/* Update the req with the number of chunks actually used */
svc_rdma_xdr_encode_write_list(rdma_resp, chunk_no);
return rqstp->rq_res.page_len + rqstp->rq_res.tail[0].iov_len;
}
static int send_reply_chunks(struct svcxprt_rdma *xprt,
struct rpcrdma_msg *rdma_argp,
struct rpcrdma_msg *rdma_resp,
struct svc_rqst *rqstp,
struct svc_rdma_req_map *vec)
{
u32 xfer_len = rqstp->rq_res.len;
int write_len;
int max_write;
u32 xdr_off;
int chunk_no;
int chunk_off;
int nchunks;
struct rpcrdma_segment *ch;
struct rpcrdma_write_array *arg_ary;
struct rpcrdma_write_array *res_ary;
int ret;
arg_ary = svc_rdma_get_reply_array(rdma_argp);
if (!arg_ary)
return 0;
/* XXX: need to fix when reply lists occur with read-list and or
* write-list */
res_ary = (struct rpcrdma_write_array *)
&rdma_resp->rm_body.rm_chunks[2];
if (vec->frmr)
max_write = vec->frmr->map_len;
else
max_write = xprt->sc_max_sge * PAGE_SIZE;
/* xdr offset starts at RPC message */
nchunks = ntohl(arg_ary->wc_nchunks);
for (xdr_off = 0, chunk_no = 0;
xfer_len && chunk_no < nchunks;
chunk_no++) {
u64 rs_offset;
ch = &arg_ary->wc_array[chunk_no].wc_target;
write_len = min(xfer_len, htonl(ch->rs_length));
/* Prepare the reply chunk given the length actually
* written */
xdr_decode_hyper((__be32 *)&ch->rs_offset, &rs_offset);
svc_rdma_xdr_encode_array_chunk(res_ary, chunk_no,
ch->rs_handle, ch->rs_offset,
write_len);
chunk_off = 0;
while (write_len) {
int this_write;
this_write = min(write_len, max_write);
ret = send_write(xprt, rqstp,
ntohl(ch->rs_handle),
rs_offset + chunk_off,
xdr_off,
this_write,
vec);
if (ret) {
dprintk("svcrdma: RDMA_WRITE failed, ret=%d\n",
ret);
return -EIO;
}
chunk_off += this_write;
xdr_off += this_write;
xfer_len -= this_write;
write_len -= this_write;
}
}
/* Update the req with the number of chunks actually used */
svc_rdma_xdr_encode_reply_array(res_ary, chunk_no);
return rqstp->rq_res.len;
}
/* This function prepares the portion of the RPCRDMA message to be
* sent in the RDMA_SEND. This function is called after data sent via
* RDMA has already been transmitted. There are three cases:
* - The RPCRDMA header, RPC header, and payload are all sent in a
* single RDMA_SEND. This is the "inline" case.
* - The RPCRDMA header and some portion of the RPC header and data
* are sent via this RDMA_SEND and another portion of the data is
* sent via RDMA.
* - The RPCRDMA header [NOMSG] is sent in this RDMA_SEND and the RPC
* header and data are all transmitted via RDMA.
* In all three cases, this function prepares the RPCRDMA header in
* sge[0], the 'type' parameter indicates the type to place in the
* RPCRDMA header, and the 'byte_count' field indicates how much of
* the XDR to include in this RDMA_SEND. NB: The offset of the payload
* to send is zero in the XDR.
*/
static int send_reply(struct svcxprt_rdma *rdma,
struct svc_rqst *rqstp,
struct page *page,
struct rpcrdma_msg *rdma_resp,
struct svc_rdma_op_ctxt *ctxt,
struct svc_rdma_req_map *vec,
int byte_count)
{
struct ib_send_wr send_wr;
struct ib_send_wr inv_wr;
int sge_no;
int sge_bytes;
int page_no;
int ret;
/* Post a recv buffer to handle another request. */
ret = svc_rdma_post_recv(rdma);
if (ret) {
printk(KERN_INFO
"svcrdma: could not post a receive buffer, err=%d."
"Closing transport %p.\n", ret, rdma);
set_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags);
svc_rdma_put_frmr(rdma, vec->frmr);
svc_rdma_put_context(ctxt, 0);
return -ENOTCONN;
}
/* Prepare the context */
ctxt->pages[0] = page;
ctxt->count = 1;
ctxt->frmr = vec->frmr;
if (vec->frmr)
set_bit(RDMACTXT_F_FAST_UNREG, &ctxt->flags);
else
clear_bit(RDMACTXT_F_FAST_UNREG, &ctxt->flags);
/* Prepare the SGE for the RPCRDMA Header */
ctxt->sge[0].lkey = rdma->sc_dma_lkey;
ctxt->sge[0].length = svc_rdma_xdr_get_reply_hdr_len(rdma_resp);
ctxt->sge[0].addr =
ib_dma_map_page(rdma->sc_cm_id->device, page, 0,
ctxt->sge[0].length, DMA_TO_DEVICE);
if (ib_dma_mapping_error(rdma->sc_cm_id->device, ctxt->sge[0].addr))
goto err;
atomic_inc(&rdma->sc_dma_used);
ctxt->direction = DMA_TO_DEVICE;
/* Map the payload indicated by 'byte_count' */
for (sge_no = 1; byte_count && sge_no < vec->count; sge_no++) {
int xdr_off = 0;
sge_bytes = min_t(size_t, vec->sge[sge_no].iov_len, byte_count);
byte_count -= sge_bytes;
if (!vec->frmr) {
ctxt->sge[sge_no].addr =
dma_map_xdr(rdma, &rqstp->rq_res, xdr_off,
sge_bytes, DMA_TO_DEVICE);
xdr_off += sge_bytes;
if (ib_dma_mapping_error(rdma->sc_cm_id->device,
ctxt->sge[sge_no].addr))
goto err;
atomic_inc(&rdma->sc_dma_used);
ctxt->sge[sge_no].lkey = rdma->sc_dma_lkey;
} else {
ctxt->sge[sge_no].addr = (unsigned long)
vec->sge[sge_no].iov_base;
ctxt->sge[sge_no].lkey = vec->frmr->mr->lkey;
}
ctxt->sge[sge_no].length = sge_bytes;
}
BUG_ON(byte_count != 0);
/* Save all respages in the ctxt and remove them from the
* respages array. They are our pages until the I/O
* completes.
*/
for (page_no = 0; page_no < rqstp->rq_resused; page_no++) {
ctxt->pages[page_no+1] = rqstp->rq_respages[page_no];
ctxt->count++;
rqstp->rq_respages[page_no] = NULL;
/*
* If there are more pages than SGE, terminate SGE
* list so that svc_rdma_unmap_dma doesn't attempt to
* unmap garbage.
*/
if (page_no+1 >= sge_no)
ctxt->sge[page_no+1].length = 0;
}
BUG_ON(sge_no > rdma->sc_max_sge);
memset(&send_wr, 0, sizeof send_wr);
ctxt->wr_op = IB_WR_SEND;
send_wr.wr_id = (unsigned long)ctxt;
send_wr.sg_list = ctxt->sge;
send_wr.num_sge = sge_no;
send_wr.opcode = IB_WR_SEND;
send_wr.send_flags = IB_SEND_SIGNALED;
if (vec->frmr) {
/* Prepare INVALIDATE WR */
memset(&inv_wr, 0, sizeof inv_wr);
inv_wr.opcode = IB_WR_LOCAL_INV;
inv_wr.send_flags = IB_SEND_SIGNALED;
inv_wr.ex.invalidate_rkey =
vec->frmr->mr->lkey;
send_wr.next = &inv_wr;
}
ret = svc_rdma_send(rdma, &send_wr);
if (ret)
goto err;
return 0;
err:
svc_rdma_unmap_dma(ctxt);
svc_rdma_put_frmr(rdma, vec->frmr);
svc_rdma_put_context(ctxt, 1);
return -EIO;
}
void svc_rdma_prep_reply_hdr(struct svc_rqst *rqstp)
{
}
/*
* Return the start of an xdr buffer.
*/
static void *xdr_start(struct xdr_buf *xdr)
{
return xdr->head[0].iov_base -
(xdr->len -
xdr->page_len -
xdr->tail[0].iov_len -
xdr->head[0].iov_len);
}
int svc_rdma_sendto(struct svc_rqst *rqstp)
{
struct svc_xprt *xprt = rqstp->rq_xprt;
struct svcxprt_rdma *rdma =
container_of(xprt, struct svcxprt_rdma, sc_xprt);
struct rpcrdma_msg *rdma_argp;
struct rpcrdma_msg *rdma_resp;
struct rpcrdma_write_array *reply_ary;
enum rpcrdma_proc reply_type;
int ret;
int inline_bytes;
struct page *res_page;
struct svc_rdma_op_ctxt *ctxt;
struct svc_rdma_req_map *vec;
dprintk("svcrdma: sending response for rqstp=%p\n", rqstp);
/* Get the RDMA request header. */
rdma_argp = xdr_start(&rqstp->rq_arg);
/* Build an req vec for the XDR */
ctxt = svc_rdma_get_context(rdma);
ctxt->direction = DMA_TO_DEVICE;
vec = svc_rdma_get_req_map();
ret = map_xdr(rdma, &rqstp->rq_res, vec);
if (ret)
goto err0;
inline_bytes = rqstp->rq_res.len;
/* Create the RDMA response header */
res_page = svc_rdma_get_page();
rdma_resp = page_address(res_page);
reply_ary = svc_rdma_get_reply_array(rdma_argp);
if (reply_ary)
reply_type = RDMA_NOMSG;
else
reply_type = RDMA_MSG;
svc_rdma_xdr_encode_reply_header(rdma, rdma_argp,
rdma_resp, reply_type);
/* Send any write-chunk data and build resp write-list */
ret = send_write_chunks(rdma, rdma_argp, rdma_resp,
rqstp, vec);
if (ret < 0) {
printk(KERN_ERR "svcrdma: failed to send write chunks, rc=%d\n",
ret);
goto err1;
}
inline_bytes -= ret;
/* Send any reply-list data and update resp reply-list */
ret = send_reply_chunks(rdma, rdma_argp, rdma_resp,
rqstp, vec);
if (ret < 0) {
printk(KERN_ERR "svcrdma: failed to send reply chunks, rc=%d\n",
ret);
goto err1;
}
inline_bytes -= ret;
ret = send_reply(rdma, rqstp, res_page, rdma_resp, ctxt, vec,
inline_bytes);
svc_rdma_put_req_map(vec);
dprintk("svcrdma: send_reply returns %d\n", ret);
return ret;
err1:
put_page(res_page);
err0:
svc_rdma_put_req_map(vec);
svc_rdma_put_context(ctxt, 0);
return ret;
}
| gpl-2.0 |
boa19861105/BOA_Eye_M6.0_Kernel | fs/ocfs2/cluster/tcp.c | 5018 | 59577 | /* -*- mode: c; c-basic-offset: 8; -*-
*
* vim: noexpandtab sw=8 ts=8 sts=0:
*
* Copyright (C) 2004 Oracle. All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public
* License along with this program; if not, write to the
* Free Software Foundation, Inc., 59 Temple Place - Suite 330,
* Boston, MA 021110-1307, USA.
*
* ----
*
* Callers for this were originally written against a very simple synchronus
* API. This implementation reflects those simple callers. Some day I'm sure
* we'll need to move to a more robust posting/callback mechanism.
*
* Transmit calls pass in kernel virtual addresses and block copying this into
* the socket's tx buffers via a usual blocking sendmsg. They'll block waiting
* for a failed socket to timeout. TX callers can also pass in a poniter to an
* 'int' which gets filled with an errno off the wire in response to the
* message they send.
*
* Handlers for unsolicited messages are registered. Each socket has a page
* that incoming data is copied into. First the header, then the data.
* Handlers are called from only one thread with a reference to this per-socket
* page. This page is destroyed after the handler call, so it can't be
* referenced beyond the call. Handlers may block but are discouraged from
* doing so.
*
* Any framing errors (bad magic, large payload lengths) close a connection.
*
* Our sock_container holds the state we associate with a socket. It's current
* framing state is held there as well as the refcounting we do around when it
* is safe to tear down the socket. The socket is only finally torn down from
* the container when the container loses all of its references -- so as long
* as you hold a ref on the container you can trust that the socket is valid
* for use with kernel socket APIs.
*
* Connections are initiated between a pair of nodes when the node with the
* higher node number gets a heartbeat callback which indicates that the lower
* numbered node has started heartbeating. The lower numbered node is passive
* and only accepts the connection if the higher numbered node is heartbeating.
*/
#include <linux/kernel.h>
#include <linux/jiffies.h>
#include <linux/slab.h>
#include <linux/idr.h>
#include <linux/kref.h>
#include <linux/net.h>
#include <linux/export.h>
#include <net/tcp.h>
#include <asm/uaccess.h>
#include "heartbeat.h"
#include "tcp.h"
#include "nodemanager.h"
#define MLOG_MASK_PREFIX ML_TCP
#include "masklog.h"
#include "quorum.h"
#include "tcp_internal.h"
#define SC_NODEF_FMT "node %s (num %u) at %pI4:%u"
#define SC_NODEF_ARGS(sc) sc->sc_node->nd_name, sc->sc_node->nd_num, \
&sc->sc_node->nd_ipv4_address, \
ntohs(sc->sc_node->nd_ipv4_port)
/*
* In the following two log macros, the whitespace after the ',' just
* before ##args is intentional. Otherwise, gcc 2.95 will eat the
* previous token if args expands to nothing.
*/
#define msglog(hdr, fmt, args...) do { \
typeof(hdr) __hdr = (hdr); \
mlog(ML_MSG, "[mag %u len %u typ %u stat %d sys_stat %d " \
"key %08x num %u] " fmt, \
be16_to_cpu(__hdr->magic), be16_to_cpu(__hdr->data_len), \
be16_to_cpu(__hdr->msg_type), be32_to_cpu(__hdr->status), \
be32_to_cpu(__hdr->sys_status), be32_to_cpu(__hdr->key), \
be32_to_cpu(__hdr->msg_num) , ##args); \
} while (0)
#define sclog(sc, fmt, args...) do { \
typeof(sc) __sc = (sc); \
mlog(ML_SOCKET, "[sc %p refs %d sock %p node %u page %p " \
"pg_off %zu] " fmt, __sc, \
atomic_read(&__sc->sc_kref.refcount), __sc->sc_sock, \
__sc->sc_node->nd_num, __sc->sc_page, __sc->sc_page_off , \
##args); \
} while (0)
static DEFINE_RWLOCK(o2net_handler_lock);
static struct rb_root o2net_handler_tree = RB_ROOT;
static struct o2net_node o2net_nodes[O2NM_MAX_NODES];
/* XXX someday we'll need better accounting */
static struct socket *o2net_listen_sock = NULL;
/*
* listen work is only queued by the listening socket callbacks on the
* o2net_wq. teardown detaches the callbacks before destroying the workqueue.
* quorum work is queued as sock containers are shutdown.. stop_listening
* tears down all the node's sock containers, preventing future shutdowns
* and queued quroum work, before canceling delayed quorum work and
* destroying the work queue.
*/
static struct workqueue_struct *o2net_wq;
static struct work_struct o2net_listen_work;
static struct o2hb_callback_func o2net_hb_up, o2net_hb_down;
#define O2NET_HB_PRI 0x1
static struct o2net_handshake *o2net_hand;
static struct o2net_msg *o2net_keep_req, *o2net_keep_resp;
static int o2net_sys_err_translations[O2NET_ERR_MAX] =
{[O2NET_ERR_NONE] = 0,
[O2NET_ERR_NO_HNDLR] = -ENOPROTOOPT,
[O2NET_ERR_OVERFLOW] = -EOVERFLOW,
[O2NET_ERR_DIED] = -EHOSTDOWN,};
/* can't quite avoid *all* internal declarations :/ */
static void o2net_sc_connect_completed(struct work_struct *work);
static void o2net_rx_until_empty(struct work_struct *work);
static void o2net_shutdown_sc(struct work_struct *work);
static void o2net_listen_data_ready(struct sock *sk, int bytes);
static void o2net_sc_send_keep_req(struct work_struct *work);
static void o2net_idle_timer(unsigned long data);
static void o2net_sc_postpone_idle(struct o2net_sock_container *sc);
static void o2net_sc_reset_idle_timer(struct o2net_sock_container *sc);
#ifdef CONFIG_DEBUG_FS
static void o2net_init_nst(struct o2net_send_tracking *nst, u32 msgtype,
u32 msgkey, struct task_struct *task, u8 node)
{
INIT_LIST_HEAD(&nst->st_net_debug_item);
nst->st_task = task;
nst->st_msg_type = msgtype;
nst->st_msg_key = msgkey;
nst->st_node = node;
}
static inline void o2net_set_nst_sock_time(struct o2net_send_tracking *nst)
{
nst->st_sock_time = ktime_get();
}
static inline void o2net_set_nst_send_time(struct o2net_send_tracking *nst)
{
nst->st_send_time = ktime_get();
}
static inline void o2net_set_nst_status_time(struct o2net_send_tracking *nst)
{
nst->st_status_time = ktime_get();
}
static inline void o2net_set_nst_sock_container(struct o2net_send_tracking *nst,
struct o2net_sock_container *sc)
{
nst->st_sc = sc;
}
static inline void o2net_set_nst_msg_id(struct o2net_send_tracking *nst,
u32 msg_id)
{
nst->st_id = msg_id;
}
static inline void o2net_set_sock_timer(struct o2net_sock_container *sc)
{
sc->sc_tv_timer = ktime_get();
}
static inline void o2net_set_data_ready_time(struct o2net_sock_container *sc)
{
sc->sc_tv_data_ready = ktime_get();
}
static inline void o2net_set_advance_start_time(struct o2net_sock_container *sc)
{
sc->sc_tv_advance_start = ktime_get();
}
static inline void o2net_set_advance_stop_time(struct o2net_sock_container *sc)
{
sc->sc_tv_advance_stop = ktime_get();
}
static inline void o2net_set_func_start_time(struct o2net_sock_container *sc)
{
sc->sc_tv_func_start = ktime_get();
}
static inline void o2net_set_func_stop_time(struct o2net_sock_container *sc)
{
sc->sc_tv_func_stop = ktime_get();
}
#else /* CONFIG_DEBUG_FS */
# define o2net_init_nst(a, b, c, d, e)
# define o2net_set_nst_sock_time(a)
# define o2net_set_nst_send_time(a)
# define o2net_set_nst_status_time(a)
# define o2net_set_nst_sock_container(a, b)
# define o2net_set_nst_msg_id(a, b)
# define o2net_set_sock_timer(a)
# define o2net_set_data_ready_time(a)
# define o2net_set_advance_start_time(a)
# define o2net_set_advance_stop_time(a)
# define o2net_set_func_start_time(a)
# define o2net_set_func_stop_time(a)
#endif /* CONFIG_DEBUG_FS */
#ifdef CONFIG_OCFS2_FS_STATS
static ktime_t o2net_get_func_run_time(struct o2net_sock_container *sc)
{
return ktime_sub(sc->sc_tv_func_stop, sc->sc_tv_func_start);
}
static void o2net_update_send_stats(struct o2net_send_tracking *nst,
struct o2net_sock_container *sc)
{
sc->sc_tv_status_total = ktime_add(sc->sc_tv_status_total,
ktime_sub(ktime_get(),
nst->st_status_time));
sc->sc_tv_send_total = ktime_add(sc->sc_tv_send_total,
ktime_sub(nst->st_status_time,
nst->st_send_time));
sc->sc_tv_acquiry_total = ktime_add(sc->sc_tv_acquiry_total,
ktime_sub(nst->st_send_time,
nst->st_sock_time));
sc->sc_send_count++;
}
static void o2net_update_recv_stats(struct o2net_sock_container *sc)
{
sc->sc_tv_process_total = ktime_add(sc->sc_tv_process_total,
o2net_get_func_run_time(sc));
sc->sc_recv_count++;
}
#else
# define o2net_update_send_stats(a, b)
# define o2net_update_recv_stats(sc)
#endif /* CONFIG_OCFS2_FS_STATS */
static inline int o2net_reconnect_delay(void)
{
return o2nm_single_cluster->cl_reconnect_delay_ms;
}
static inline int o2net_keepalive_delay(void)
{
return o2nm_single_cluster->cl_keepalive_delay_ms;
}
static inline int o2net_idle_timeout(void)
{
return o2nm_single_cluster->cl_idle_timeout_ms;
}
static inline int o2net_sys_err_to_errno(enum o2net_system_error err)
{
int trans;
BUG_ON(err >= O2NET_ERR_MAX);
trans = o2net_sys_err_translations[err];
/* Just in case we mess up the translation table above */
BUG_ON(err != O2NET_ERR_NONE && trans == 0);
return trans;
}
static struct o2net_node * o2net_nn_from_num(u8 node_num)
{
BUG_ON(node_num >= ARRAY_SIZE(o2net_nodes));
return &o2net_nodes[node_num];
}
static u8 o2net_num_from_nn(struct o2net_node *nn)
{
BUG_ON(nn == NULL);
return nn - o2net_nodes;
}
/* ------------------------------------------------------------ */
static int o2net_prep_nsw(struct o2net_node *nn, struct o2net_status_wait *nsw)
{
int ret = 0;
do {
if (!idr_pre_get(&nn->nn_status_idr, GFP_ATOMIC)) {
ret = -EAGAIN;
break;
}
spin_lock(&nn->nn_lock);
ret = idr_get_new(&nn->nn_status_idr, nsw, &nsw->ns_id);
if (ret == 0)
list_add_tail(&nsw->ns_node_item,
&nn->nn_status_list);
spin_unlock(&nn->nn_lock);
} while (ret == -EAGAIN);
if (ret == 0) {
init_waitqueue_head(&nsw->ns_wq);
nsw->ns_sys_status = O2NET_ERR_NONE;
nsw->ns_status = 0;
}
return ret;
}
static void o2net_complete_nsw_locked(struct o2net_node *nn,
struct o2net_status_wait *nsw,
enum o2net_system_error sys_status,
s32 status)
{
assert_spin_locked(&nn->nn_lock);
if (!list_empty(&nsw->ns_node_item)) {
list_del_init(&nsw->ns_node_item);
nsw->ns_sys_status = sys_status;
nsw->ns_status = status;
idr_remove(&nn->nn_status_idr, nsw->ns_id);
wake_up(&nsw->ns_wq);
}
}
static void o2net_complete_nsw(struct o2net_node *nn,
struct o2net_status_wait *nsw,
u64 id, enum o2net_system_error sys_status,
s32 status)
{
spin_lock(&nn->nn_lock);
if (nsw == NULL) {
if (id > INT_MAX)
goto out;
nsw = idr_find(&nn->nn_status_idr, id);
if (nsw == NULL)
goto out;
}
o2net_complete_nsw_locked(nn, nsw, sys_status, status);
out:
spin_unlock(&nn->nn_lock);
return;
}
static void o2net_complete_nodes_nsw(struct o2net_node *nn)
{
struct o2net_status_wait *nsw, *tmp;
unsigned int num_kills = 0;
assert_spin_locked(&nn->nn_lock);
list_for_each_entry_safe(nsw, tmp, &nn->nn_status_list, ns_node_item) {
o2net_complete_nsw_locked(nn, nsw, O2NET_ERR_DIED, 0);
num_kills++;
}
mlog(0, "completed %d messages for node %u\n", num_kills,
o2net_num_from_nn(nn));
}
static int o2net_nsw_completed(struct o2net_node *nn,
struct o2net_status_wait *nsw)
{
int completed;
spin_lock(&nn->nn_lock);
completed = list_empty(&nsw->ns_node_item);
spin_unlock(&nn->nn_lock);
return completed;
}
/* ------------------------------------------------------------ */
static void sc_kref_release(struct kref *kref)
{
struct o2net_sock_container *sc = container_of(kref,
struct o2net_sock_container, sc_kref);
BUG_ON(timer_pending(&sc->sc_idle_timeout));
sclog(sc, "releasing\n");
if (sc->sc_sock) {
sock_release(sc->sc_sock);
sc->sc_sock = NULL;
}
o2nm_undepend_item(&sc->sc_node->nd_item);
o2nm_node_put(sc->sc_node);
sc->sc_node = NULL;
o2net_debug_del_sc(sc);
kfree(sc);
}
static void sc_put(struct o2net_sock_container *sc)
{
sclog(sc, "put\n");
kref_put(&sc->sc_kref, sc_kref_release);
}
static void sc_get(struct o2net_sock_container *sc)
{
sclog(sc, "get\n");
kref_get(&sc->sc_kref);
}
static struct o2net_sock_container *sc_alloc(struct o2nm_node *node)
{
struct o2net_sock_container *sc, *ret = NULL;
struct page *page = NULL;
int status = 0;
page = alloc_page(GFP_NOFS);
sc = kzalloc(sizeof(*sc), GFP_NOFS);
if (sc == NULL || page == NULL)
goto out;
kref_init(&sc->sc_kref);
o2nm_node_get(node);
sc->sc_node = node;
/* pin the node item of the remote node */
status = o2nm_depend_item(&node->nd_item);
if (status) {
mlog_errno(status);
o2nm_node_put(node);
goto out;
}
INIT_WORK(&sc->sc_connect_work, o2net_sc_connect_completed);
INIT_WORK(&sc->sc_rx_work, o2net_rx_until_empty);
INIT_WORK(&sc->sc_shutdown_work, o2net_shutdown_sc);
INIT_DELAYED_WORK(&sc->sc_keepalive_work, o2net_sc_send_keep_req);
init_timer(&sc->sc_idle_timeout);
sc->sc_idle_timeout.function = o2net_idle_timer;
sc->sc_idle_timeout.data = (unsigned long)sc;
sclog(sc, "alloced\n");
ret = sc;
sc->sc_page = page;
o2net_debug_add_sc(sc);
sc = NULL;
page = NULL;
out:
if (page)
__free_page(page);
kfree(sc);
return ret;
}
/* ------------------------------------------------------------ */
static void o2net_sc_queue_work(struct o2net_sock_container *sc,
struct work_struct *work)
{
sc_get(sc);
if (!queue_work(o2net_wq, work))
sc_put(sc);
}
static void o2net_sc_queue_delayed_work(struct o2net_sock_container *sc,
struct delayed_work *work,
int delay)
{
sc_get(sc);
if (!queue_delayed_work(o2net_wq, work, delay))
sc_put(sc);
}
static void o2net_sc_cancel_delayed_work(struct o2net_sock_container *sc,
struct delayed_work *work)
{
if (cancel_delayed_work(work))
sc_put(sc);
}
static atomic_t o2net_connected_peers = ATOMIC_INIT(0);
int o2net_num_connected_peers(void)
{
return atomic_read(&o2net_connected_peers);
}
static void o2net_set_nn_state(struct o2net_node *nn,
struct o2net_sock_container *sc,
unsigned valid, int err)
{
int was_valid = nn->nn_sc_valid;
int was_err = nn->nn_persistent_error;
struct o2net_sock_container *old_sc = nn->nn_sc;
assert_spin_locked(&nn->nn_lock);
if (old_sc && !sc)
atomic_dec(&o2net_connected_peers);
else if (!old_sc && sc)
atomic_inc(&o2net_connected_peers);
/* the node num comparison and single connect/accept path should stop
* an non-null sc from being overwritten with another */
BUG_ON(sc && nn->nn_sc && nn->nn_sc != sc);
mlog_bug_on_msg(err && valid, "err %d valid %u\n", err, valid);
mlog_bug_on_msg(valid && !sc, "valid %u sc %p\n", valid, sc);
if (was_valid && !valid && err == 0)
err = -ENOTCONN;
mlog(ML_CONN, "node %u sc: %p -> %p, valid %u -> %u, err %d -> %d\n",
o2net_num_from_nn(nn), nn->nn_sc, sc, nn->nn_sc_valid, valid,
nn->nn_persistent_error, err);
nn->nn_sc = sc;
nn->nn_sc_valid = valid ? 1 : 0;
nn->nn_persistent_error = err;
/* mirrors o2net_tx_can_proceed() */
if (nn->nn_persistent_error || nn->nn_sc_valid)
wake_up(&nn->nn_sc_wq);
if (!was_err && nn->nn_persistent_error) {
o2quo_conn_err(o2net_num_from_nn(nn));
queue_delayed_work(o2net_wq, &nn->nn_still_up,
msecs_to_jiffies(O2NET_QUORUM_DELAY_MS));
}
if (was_valid && !valid) {
printk(KERN_NOTICE "o2net: No longer connected to "
SC_NODEF_FMT "\n", SC_NODEF_ARGS(old_sc));
o2net_complete_nodes_nsw(nn);
}
if (!was_valid && valid) {
o2quo_conn_up(o2net_num_from_nn(nn));
cancel_delayed_work(&nn->nn_connect_expired);
printk(KERN_NOTICE "o2net: %s " SC_NODEF_FMT "\n",
o2nm_this_node() > sc->sc_node->nd_num ?
"Connected to" : "Accepted connection from",
SC_NODEF_ARGS(sc));
}
/* trigger the connecting worker func as long as we're not valid,
* it will back off if it shouldn't connect. This can be called
* from node config teardown and so needs to be careful about
* the work queue actually being up. */
if (!valid && o2net_wq) {
unsigned long delay;
/* delay if we're within a RECONNECT_DELAY of the
* last attempt */
delay = (nn->nn_last_connect_attempt +
msecs_to_jiffies(o2net_reconnect_delay()))
- jiffies;
if (delay > msecs_to_jiffies(o2net_reconnect_delay()))
delay = 0;
mlog(ML_CONN, "queueing conn attempt in %lu jiffies\n", delay);
queue_delayed_work(o2net_wq, &nn->nn_connect_work, delay);
/*
* Delay the expired work after idle timeout.
*
* We might have lots of failed connection attempts that run
* through here but we only cancel the connect_expired work when
* a connection attempt succeeds. So only the first enqueue of
* the connect_expired work will do anything. The rest will see
* that it's already queued and do nothing.
*/
delay += msecs_to_jiffies(o2net_idle_timeout());
queue_delayed_work(o2net_wq, &nn->nn_connect_expired, delay);
}
/* keep track of the nn's sc ref for the caller */
if ((old_sc == NULL) && sc)
sc_get(sc);
if (old_sc && (old_sc != sc)) {
o2net_sc_queue_work(old_sc, &old_sc->sc_shutdown_work);
sc_put(old_sc);
}
}
/* see o2net_register_callbacks() */
static void o2net_data_ready(struct sock *sk, int bytes)
{
void (*ready)(struct sock *sk, int bytes);
read_lock(&sk->sk_callback_lock);
if (sk->sk_user_data) {
struct o2net_sock_container *sc = sk->sk_user_data;
sclog(sc, "data_ready hit\n");
o2net_set_data_ready_time(sc);
o2net_sc_queue_work(sc, &sc->sc_rx_work);
ready = sc->sc_data_ready;
} else {
ready = sk->sk_data_ready;
}
read_unlock(&sk->sk_callback_lock);
ready(sk, bytes);
}
/* see o2net_register_callbacks() */
static void o2net_state_change(struct sock *sk)
{
void (*state_change)(struct sock *sk);
struct o2net_sock_container *sc;
read_lock(&sk->sk_callback_lock);
sc = sk->sk_user_data;
if (sc == NULL) {
state_change = sk->sk_state_change;
goto out;
}
sclog(sc, "state_change to %d\n", sk->sk_state);
state_change = sc->sc_state_change;
switch(sk->sk_state) {
/* ignore connecting sockets as they make progress */
case TCP_SYN_SENT:
case TCP_SYN_RECV:
break;
case TCP_ESTABLISHED:
o2net_sc_queue_work(sc, &sc->sc_connect_work);
break;
default:
printk(KERN_INFO "o2net: Connection to " SC_NODEF_FMT
" shutdown, state %d\n",
SC_NODEF_ARGS(sc), sk->sk_state);
o2net_sc_queue_work(sc, &sc->sc_shutdown_work);
break;
}
out:
read_unlock(&sk->sk_callback_lock);
state_change(sk);
}
/*
* we register callbacks so we can queue work on events before calling
* the original callbacks. our callbacks our careful to test user_data
* to discover when they've reaced with o2net_unregister_callbacks().
*/
static void o2net_register_callbacks(struct sock *sk,
struct o2net_sock_container *sc)
{
write_lock_bh(&sk->sk_callback_lock);
/* accepted sockets inherit the old listen socket data ready */
if (sk->sk_data_ready == o2net_listen_data_ready) {
sk->sk_data_ready = sk->sk_user_data;
sk->sk_user_data = NULL;
}
BUG_ON(sk->sk_user_data != NULL);
sk->sk_user_data = sc;
sc_get(sc);
sc->sc_data_ready = sk->sk_data_ready;
sc->sc_state_change = sk->sk_state_change;
sk->sk_data_ready = o2net_data_ready;
sk->sk_state_change = o2net_state_change;
mutex_init(&sc->sc_send_lock);
write_unlock_bh(&sk->sk_callback_lock);
}
static int o2net_unregister_callbacks(struct sock *sk,
struct o2net_sock_container *sc)
{
int ret = 0;
write_lock_bh(&sk->sk_callback_lock);
if (sk->sk_user_data == sc) {
ret = 1;
sk->sk_user_data = NULL;
sk->sk_data_ready = sc->sc_data_ready;
sk->sk_state_change = sc->sc_state_change;
}
write_unlock_bh(&sk->sk_callback_lock);
return ret;
}
/*
* this is a little helper that is called by callers who have seen a problem
* with an sc and want to detach it from the nn if someone already hasn't beat
* them to it. if an error is given then the shutdown will be persistent
* and pending transmits will be canceled.
*/
static void o2net_ensure_shutdown(struct o2net_node *nn,
struct o2net_sock_container *sc,
int err)
{
spin_lock(&nn->nn_lock);
if (nn->nn_sc == sc)
o2net_set_nn_state(nn, NULL, 0, err);
spin_unlock(&nn->nn_lock);
}
/*
* This work queue function performs the blocking parts of socket shutdown. A
* few paths lead here. set_nn_state will trigger this callback if it sees an
* sc detached from the nn. state_change will also trigger this callback
* directly when it sees errors. In that case we need to call set_nn_state
* ourselves as state_change couldn't get the nn_lock and call set_nn_state
* itself.
*/
static void o2net_shutdown_sc(struct work_struct *work)
{
struct o2net_sock_container *sc =
container_of(work, struct o2net_sock_container,
sc_shutdown_work);
struct o2net_node *nn = o2net_nn_from_num(sc->sc_node->nd_num);
sclog(sc, "shutting down\n");
/* drop the callbacks ref and call shutdown only once */
if (o2net_unregister_callbacks(sc->sc_sock->sk, sc)) {
/* we shouldn't flush as we're in the thread, the
* races with pending sc work structs are harmless */
del_timer_sync(&sc->sc_idle_timeout);
o2net_sc_cancel_delayed_work(sc, &sc->sc_keepalive_work);
sc_put(sc);
kernel_sock_shutdown(sc->sc_sock, SHUT_RDWR);
}
/* not fatal so failed connects before the other guy has our
* heartbeat can be retried */
o2net_ensure_shutdown(nn, sc, 0);
sc_put(sc);
}
/* ------------------------------------------------------------ */
static int o2net_handler_cmp(struct o2net_msg_handler *nmh, u32 msg_type,
u32 key)
{
int ret = memcmp(&nmh->nh_key, &key, sizeof(key));
if (ret == 0)
ret = memcmp(&nmh->nh_msg_type, &msg_type, sizeof(msg_type));
return ret;
}
static struct o2net_msg_handler *
o2net_handler_tree_lookup(u32 msg_type, u32 key, struct rb_node ***ret_p,
struct rb_node **ret_parent)
{
struct rb_node **p = &o2net_handler_tree.rb_node;
struct rb_node *parent = NULL;
struct o2net_msg_handler *nmh, *ret = NULL;
int cmp;
while (*p) {
parent = *p;
nmh = rb_entry(parent, struct o2net_msg_handler, nh_node);
cmp = o2net_handler_cmp(nmh, msg_type, key);
if (cmp < 0)
p = &(*p)->rb_left;
else if (cmp > 0)
p = &(*p)->rb_right;
else {
ret = nmh;
break;
}
}
if (ret_p != NULL)
*ret_p = p;
if (ret_parent != NULL)
*ret_parent = parent;
return ret;
}
static void o2net_handler_kref_release(struct kref *kref)
{
struct o2net_msg_handler *nmh;
nmh = container_of(kref, struct o2net_msg_handler, nh_kref);
kfree(nmh);
}
static void o2net_handler_put(struct o2net_msg_handler *nmh)
{
kref_put(&nmh->nh_kref, o2net_handler_kref_release);
}
/* max_len is protection for the handler func. incoming messages won't
* be given to the handler if their payload is longer than the max. */
int o2net_register_handler(u32 msg_type, u32 key, u32 max_len,
o2net_msg_handler_func *func, void *data,
o2net_post_msg_handler_func *post_func,
struct list_head *unreg_list)
{
struct o2net_msg_handler *nmh = NULL;
struct rb_node **p, *parent;
int ret = 0;
if (max_len > O2NET_MAX_PAYLOAD_BYTES) {
mlog(0, "max_len for message handler out of range: %u\n",
max_len);
ret = -EINVAL;
goto out;
}
if (!msg_type) {
mlog(0, "no message type provided: %u, %p\n", msg_type, func);
ret = -EINVAL;
goto out;
}
if (!func) {
mlog(0, "no message handler provided: %u, %p\n",
msg_type, func);
ret = -EINVAL;
goto out;
}
nmh = kzalloc(sizeof(struct o2net_msg_handler), GFP_NOFS);
if (nmh == NULL) {
ret = -ENOMEM;
goto out;
}
nmh->nh_func = func;
nmh->nh_func_data = data;
nmh->nh_post_func = post_func;
nmh->nh_msg_type = msg_type;
nmh->nh_max_len = max_len;
nmh->nh_key = key;
/* the tree and list get this ref.. they're both removed in
* unregister when this ref is dropped */
kref_init(&nmh->nh_kref);
INIT_LIST_HEAD(&nmh->nh_unregister_item);
write_lock(&o2net_handler_lock);
if (o2net_handler_tree_lookup(msg_type, key, &p, &parent))
ret = -EEXIST;
else {
rb_link_node(&nmh->nh_node, parent, p);
rb_insert_color(&nmh->nh_node, &o2net_handler_tree);
list_add_tail(&nmh->nh_unregister_item, unreg_list);
mlog(ML_TCP, "registered handler func %p type %u key %08x\n",
func, msg_type, key);
/* we've had some trouble with handlers seemingly vanishing. */
mlog_bug_on_msg(o2net_handler_tree_lookup(msg_type, key, &p,
&parent) == NULL,
"couldn't find handler we *just* registerd "
"for type %u key %08x\n", msg_type, key);
}
write_unlock(&o2net_handler_lock);
if (ret)
goto out;
out:
if (ret)
kfree(nmh);
return ret;
}
EXPORT_SYMBOL_GPL(o2net_register_handler);
void o2net_unregister_handler_list(struct list_head *list)
{
struct o2net_msg_handler *nmh, *n;
write_lock(&o2net_handler_lock);
list_for_each_entry_safe(nmh, n, list, nh_unregister_item) {
mlog(ML_TCP, "unregistering handler func %p type %u key %08x\n",
nmh->nh_func, nmh->nh_msg_type, nmh->nh_key);
rb_erase(&nmh->nh_node, &o2net_handler_tree);
list_del_init(&nmh->nh_unregister_item);
kref_put(&nmh->nh_kref, o2net_handler_kref_release);
}
write_unlock(&o2net_handler_lock);
}
EXPORT_SYMBOL_GPL(o2net_unregister_handler_list);
static struct o2net_msg_handler *o2net_handler_get(u32 msg_type, u32 key)
{
struct o2net_msg_handler *nmh;
read_lock(&o2net_handler_lock);
nmh = o2net_handler_tree_lookup(msg_type, key, NULL, NULL);
if (nmh)
kref_get(&nmh->nh_kref);
read_unlock(&o2net_handler_lock);
return nmh;
}
/* ------------------------------------------------------------ */
static int o2net_recv_tcp_msg(struct socket *sock, void *data, size_t len)
{
int ret;
mm_segment_t oldfs;
struct kvec vec = {
.iov_len = len,
.iov_base = data,
};
struct msghdr msg = {
.msg_iovlen = 1,
.msg_iov = (struct iovec *)&vec,
.msg_flags = MSG_DONTWAIT,
};
oldfs = get_fs();
set_fs(get_ds());
ret = sock_recvmsg(sock, &msg, len, msg.msg_flags);
set_fs(oldfs);
return ret;
}
static int o2net_send_tcp_msg(struct socket *sock, struct kvec *vec,
size_t veclen, size_t total)
{
int ret;
mm_segment_t oldfs;
struct msghdr msg = {
.msg_iov = (struct iovec *)vec,
.msg_iovlen = veclen,
};
if (sock == NULL) {
ret = -EINVAL;
goto out;
}
oldfs = get_fs();
set_fs(get_ds());
ret = sock_sendmsg(sock, &msg, total);
set_fs(oldfs);
if (ret != total) {
mlog(ML_ERROR, "sendmsg returned %d instead of %zu\n", ret,
total);
if (ret >= 0)
ret = -EPIPE; /* should be smarter, I bet */
goto out;
}
ret = 0;
out:
if (ret < 0)
mlog(0, "returning error: %d\n", ret);
return ret;
}
static void o2net_sendpage(struct o2net_sock_container *sc,
void *kmalloced_virt,
size_t size)
{
struct o2net_node *nn = o2net_nn_from_num(sc->sc_node->nd_num);
ssize_t ret;
while (1) {
mutex_lock(&sc->sc_send_lock);
ret = sc->sc_sock->ops->sendpage(sc->sc_sock,
virt_to_page(kmalloced_virt),
(long)kmalloced_virt & ~PAGE_MASK,
size, MSG_DONTWAIT);
mutex_unlock(&sc->sc_send_lock);
if (ret == size)
break;
if (ret == (ssize_t)-EAGAIN) {
mlog(0, "sendpage of size %zu to " SC_NODEF_FMT
" returned EAGAIN\n", size, SC_NODEF_ARGS(sc));
cond_resched();
continue;
}
mlog(ML_ERROR, "sendpage of size %zu to " SC_NODEF_FMT
" failed with %zd\n", size, SC_NODEF_ARGS(sc), ret);
o2net_ensure_shutdown(nn, sc, 0);
break;
}
}
static void o2net_init_msg(struct o2net_msg *msg, u16 data_len, u16 msg_type, u32 key)
{
memset(msg, 0, sizeof(struct o2net_msg));
msg->magic = cpu_to_be16(O2NET_MSG_MAGIC);
msg->data_len = cpu_to_be16(data_len);
msg->msg_type = cpu_to_be16(msg_type);
msg->sys_status = cpu_to_be32(O2NET_ERR_NONE);
msg->status = 0;
msg->key = cpu_to_be32(key);
}
static int o2net_tx_can_proceed(struct o2net_node *nn,
struct o2net_sock_container **sc_ret,
int *error)
{
int ret = 0;
spin_lock(&nn->nn_lock);
if (nn->nn_persistent_error) {
ret = 1;
*sc_ret = NULL;
*error = nn->nn_persistent_error;
} else if (nn->nn_sc_valid) {
kref_get(&nn->nn_sc->sc_kref);
ret = 1;
*sc_ret = nn->nn_sc;
*error = 0;
}
spin_unlock(&nn->nn_lock);
return ret;
}
/* Get a map of all nodes to which this node is currently connected to */
void o2net_fill_node_map(unsigned long *map, unsigned bytes)
{
struct o2net_sock_container *sc;
int node, ret;
BUG_ON(bytes < (BITS_TO_LONGS(O2NM_MAX_NODES) * sizeof(unsigned long)));
memset(map, 0, bytes);
for (node = 0; node < O2NM_MAX_NODES; ++node) {
o2net_tx_can_proceed(o2net_nn_from_num(node), &sc, &ret);
if (!ret) {
set_bit(node, map);
sc_put(sc);
}
}
}
EXPORT_SYMBOL_GPL(o2net_fill_node_map);
int o2net_send_message_vec(u32 msg_type, u32 key, struct kvec *caller_vec,
size_t caller_veclen, u8 target_node, int *status)
{
int ret = 0;
struct o2net_msg *msg = NULL;
size_t veclen, caller_bytes = 0;
struct kvec *vec = NULL;
struct o2net_sock_container *sc = NULL;
struct o2net_node *nn = o2net_nn_from_num(target_node);
struct o2net_status_wait nsw = {
.ns_node_item = LIST_HEAD_INIT(nsw.ns_node_item),
};
struct o2net_send_tracking nst;
o2net_init_nst(&nst, msg_type, key, current, target_node);
if (o2net_wq == NULL) {
mlog(0, "attempt to tx without o2netd running\n");
ret = -ESRCH;
goto out;
}
if (caller_veclen == 0) {
mlog(0, "bad kvec array length\n");
ret = -EINVAL;
goto out;
}
caller_bytes = iov_length((struct iovec *)caller_vec, caller_veclen);
if (caller_bytes > O2NET_MAX_PAYLOAD_BYTES) {
mlog(0, "total payload len %zu too large\n", caller_bytes);
ret = -EINVAL;
goto out;
}
if (target_node == o2nm_this_node()) {
ret = -ELOOP;
goto out;
}
o2net_debug_add_nst(&nst);
o2net_set_nst_sock_time(&nst);
wait_event(nn->nn_sc_wq, o2net_tx_can_proceed(nn, &sc, &ret));
if (ret)
goto out;
o2net_set_nst_sock_container(&nst, sc);
veclen = caller_veclen + 1;
vec = kmalloc(sizeof(struct kvec) * veclen, GFP_ATOMIC);
if (vec == NULL) {
mlog(0, "failed to %zu element kvec!\n", veclen);
ret = -ENOMEM;
goto out;
}
msg = kmalloc(sizeof(struct o2net_msg), GFP_ATOMIC);
if (!msg) {
mlog(0, "failed to allocate a o2net_msg!\n");
ret = -ENOMEM;
goto out;
}
o2net_init_msg(msg, caller_bytes, msg_type, key);
vec[0].iov_len = sizeof(struct o2net_msg);
vec[0].iov_base = msg;
memcpy(&vec[1], caller_vec, caller_veclen * sizeof(struct kvec));
ret = o2net_prep_nsw(nn, &nsw);
if (ret)
goto out;
msg->msg_num = cpu_to_be32(nsw.ns_id);
o2net_set_nst_msg_id(&nst, nsw.ns_id);
o2net_set_nst_send_time(&nst);
/* finally, convert the message header to network byte-order
* and send */
mutex_lock(&sc->sc_send_lock);
ret = o2net_send_tcp_msg(sc->sc_sock, vec, veclen,
sizeof(struct o2net_msg) + caller_bytes);
mutex_unlock(&sc->sc_send_lock);
msglog(msg, "sending returned %d\n", ret);
if (ret < 0) {
mlog(0, "error returned from o2net_send_tcp_msg=%d\n", ret);
goto out;
}
/* wait on other node's handler */
o2net_set_nst_status_time(&nst);
wait_event(nsw.ns_wq, o2net_nsw_completed(nn, &nsw));
o2net_update_send_stats(&nst, sc);
/* Note that we avoid overwriting the callers status return
* variable if a system error was reported on the other
* side. Callers beware. */
ret = o2net_sys_err_to_errno(nsw.ns_sys_status);
if (status && !ret)
*status = nsw.ns_status;
mlog(0, "woken, returning system status %d, user status %d\n",
ret, nsw.ns_status);
out:
o2net_debug_del_nst(&nst); /* must be before dropping sc and node */
if (sc)
sc_put(sc);
if (vec)
kfree(vec);
if (msg)
kfree(msg);
o2net_complete_nsw(nn, &nsw, 0, 0, 0);
return ret;
}
EXPORT_SYMBOL_GPL(o2net_send_message_vec);
int o2net_send_message(u32 msg_type, u32 key, void *data, u32 len,
u8 target_node, int *status)
{
struct kvec vec = {
.iov_base = data,
.iov_len = len,
};
return o2net_send_message_vec(msg_type, key, &vec, 1,
target_node, status);
}
EXPORT_SYMBOL_GPL(o2net_send_message);
static int o2net_send_status_magic(struct socket *sock, struct o2net_msg *hdr,
enum o2net_system_error syserr, int err)
{
struct kvec vec = {
.iov_base = hdr,
.iov_len = sizeof(struct o2net_msg),
};
BUG_ON(syserr >= O2NET_ERR_MAX);
/* leave other fields intact from the incoming message, msg_num
* in particular */
hdr->sys_status = cpu_to_be32(syserr);
hdr->status = cpu_to_be32(err);
hdr->magic = cpu_to_be16(O2NET_MSG_STATUS_MAGIC); // twiddle the magic
hdr->data_len = 0;
msglog(hdr, "about to send status magic %d\n", err);
/* hdr has been in host byteorder this whole time */
return o2net_send_tcp_msg(sock, &vec, 1, sizeof(struct o2net_msg));
}
/* this returns -errno if the header was unknown or too large, etc.
* after this is called the buffer us reused for the next message */
static int o2net_process_message(struct o2net_sock_container *sc,
struct o2net_msg *hdr)
{
struct o2net_node *nn = o2net_nn_from_num(sc->sc_node->nd_num);
int ret = 0, handler_status;
enum o2net_system_error syserr;
struct o2net_msg_handler *nmh = NULL;
void *ret_data = NULL;
msglog(hdr, "processing message\n");
o2net_sc_postpone_idle(sc);
switch(be16_to_cpu(hdr->magic)) {
case O2NET_MSG_STATUS_MAGIC:
/* special type for returning message status */
o2net_complete_nsw(nn, NULL,
be32_to_cpu(hdr->msg_num),
be32_to_cpu(hdr->sys_status),
be32_to_cpu(hdr->status));
goto out;
case O2NET_MSG_KEEP_REQ_MAGIC:
o2net_sendpage(sc, o2net_keep_resp,
sizeof(*o2net_keep_resp));
goto out;
case O2NET_MSG_KEEP_RESP_MAGIC:
goto out;
case O2NET_MSG_MAGIC:
break;
default:
msglog(hdr, "bad magic\n");
ret = -EINVAL;
goto out;
break;
}
/* find a handler for it */
handler_status = 0;
nmh = o2net_handler_get(be16_to_cpu(hdr->msg_type),
be32_to_cpu(hdr->key));
if (!nmh) {
mlog(ML_TCP, "couldn't find handler for type %u key %08x\n",
be16_to_cpu(hdr->msg_type), be32_to_cpu(hdr->key));
syserr = O2NET_ERR_NO_HNDLR;
goto out_respond;
}
syserr = O2NET_ERR_NONE;
if (be16_to_cpu(hdr->data_len) > nmh->nh_max_len)
syserr = O2NET_ERR_OVERFLOW;
if (syserr != O2NET_ERR_NONE)
goto out_respond;
o2net_set_func_start_time(sc);
sc->sc_msg_key = be32_to_cpu(hdr->key);
sc->sc_msg_type = be16_to_cpu(hdr->msg_type);
handler_status = (nmh->nh_func)(hdr, sizeof(struct o2net_msg) +
be16_to_cpu(hdr->data_len),
nmh->nh_func_data, &ret_data);
o2net_set_func_stop_time(sc);
o2net_update_recv_stats(sc);
out_respond:
/* this destroys the hdr, so don't use it after this */
mutex_lock(&sc->sc_send_lock);
ret = o2net_send_status_magic(sc->sc_sock, hdr, syserr,
handler_status);
mutex_unlock(&sc->sc_send_lock);
hdr = NULL;
mlog(0, "sending handler status %d, syserr %d returned %d\n",
handler_status, syserr, ret);
if (nmh) {
BUG_ON(ret_data != NULL && nmh->nh_post_func == NULL);
if (nmh->nh_post_func)
(nmh->nh_post_func)(handler_status, nmh->nh_func_data,
ret_data);
}
out:
if (nmh)
o2net_handler_put(nmh);
return ret;
}
static int o2net_check_handshake(struct o2net_sock_container *sc)
{
struct o2net_handshake *hand = page_address(sc->sc_page);
struct o2net_node *nn = o2net_nn_from_num(sc->sc_node->nd_num);
if (hand->protocol_version != cpu_to_be64(O2NET_PROTOCOL_VERSION)) {
printk(KERN_NOTICE "o2net: " SC_NODEF_FMT " Advertised net "
"protocol version %llu but %llu is required. "
"Disconnecting.\n", SC_NODEF_ARGS(sc),
(unsigned long long)be64_to_cpu(hand->protocol_version),
O2NET_PROTOCOL_VERSION);
/* don't bother reconnecting if its the wrong version. */
o2net_ensure_shutdown(nn, sc, -ENOTCONN);
return -1;
}
/*
* Ensure timeouts are consistent with other nodes, otherwise
* we can end up with one node thinking that the other must be down,
* but isn't. This can ultimately cause corruption.
*/
if (be32_to_cpu(hand->o2net_idle_timeout_ms) !=
o2net_idle_timeout()) {
printk(KERN_NOTICE "o2net: " SC_NODEF_FMT " uses a network "
"idle timeout of %u ms, but we use %u ms locally. "
"Disconnecting.\n", SC_NODEF_ARGS(sc),
be32_to_cpu(hand->o2net_idle_timeout_ms),
o2net_idle_timeout());
o2net_ensure_shutdown(nn, sc, -ENOTCONN);
return -1;
}
if (be32_to_cpu(hand->o2net_keepalive_delay_ms) !=
o2net_keepalive_delay()) {
printk(KERN_NOTICE "o2net: " SC_NODEF_FMT " uses a keepalive "
"delay of %u ms, but we use %u ms locally. "
"Disconnecting.\n", SC_NODEF_ARGS(sc),
be32_to_cpu(hand->o2net_keepalive_delay_ms),
o2net_keepalive_delay());
o2net_ensure_shutdown(nn, sc, -ENOTCONN);
return -1;
}
if (be32_to_cpu(hand->o2hb_heartbeat_timeout_ms) !=
O2HB_MAX_WRITE_TIMEOUT_MS) {
printk(KERN_NOTICE "o2net: " SC_NODEF_FMT " uses a heartbeat "
"timeout of %u ms, but we use %u ms locally. "
"Disconnecting.\n", SC_NODEF_ARGS(sc),
be32_to_cpu(hand->o2hb_heartbeat_timeout_ms),
O2HB_MAX_WRITE_TIMEOUT_MS);
o2net_ensure_shutdown(nn, sc, -ENOTCONN);
return -1;
}
sc->sc_handshake_ok = 1;
spin_lock(&nn->nn_lock);
/* set valid and queue the idle timers only if it hasn't been
* shut down already */
if (nn->nn_sc == sc) {
o2net_sc_reset_idle_timer(sc);
atomic_set(&nn->nn_timeout, 0);
o2net_set_nn_state(nn, sc, 1, 0);
}
spin_unlock(&nn->nn_lock);
/* shift everything up as though it wasn't there */
sc->sc_page_off -= sizeof(struct o2net_handshake);
if (sc->sc_page_off)
memmove(hand, hand + 1, sc->sc_page_off);
return 0;
}
/* this demuxes the queued rx bytes into header or payload bits and calls
* handlers as each full message is read off the socket. it returns -error,
* == 0 eof, or > 0 for progress made.*/
static int o2net_advance_rx(struct o2net_sock_container *sc)
{
struct o2net_msg *hdr;
int ret = 0;
void *data;
size_t datalen;
sclog(sc, "receiving\n");
o2net_set_advance_start_time(sc);
if (unlikely(sc->sc_handshake_ok == 0)) {
if(sc->sc_page_off < sizeof(struct o2net_handshake)) {
data = page_address(sc->sc_page) + sc->sc_page_off;
datalen = sizeof(struct o2net_handshake) - sc->sc_page_off;
ret = o2net_recv_tcp_msg(sc->sc_sock, data, datalen);
if (ret > 0)
sc->sc_page_off += ret;
}
if (sc->sc_page_off == sizeof(struct o2net_handshake)) {
o2net_check_handshake(sc);
if (unlikely(sc->sc_handshake_ok == 0))
ret = -EPROTO;
}
goto out;
}
/* do we need more header? */
if (sc->sc_page_off < sizeof(struct o2net_msg)) {
data = page_address(sc->sc_page) + sc->sc_page_off;
datalen = sizeof(struct o2net_msg) - sc->sc_page_off;
ret = o2net_recv_tcp_msg(sc->sc_sock, data, datalen);
if (ret > 0) {
sc->sc_page_off += ret;
/* only swab incoming here.. we can
* only get here once as we cross from
* being under to over */
if (sc->sc_page_off == sizeof(struct o2net_msg)) {
hdr = page_address(sc->sc_page);
if (be16_to_cpu(hdr->data_len) >
O2NET_MAX_PAYLOAD_BYTES)
ret = -EOVERFLOW;
}
}
if (ret <= 0)
goto out;
}
if (sc->sc_page_off < sizeof(struct o2net_msg)) {
/* oof, still don't have a header */
goto out;
}
/* this was swabbed above when we first read it */
hdr = page_address(sc->sc_page);
msglog(hdr, "at page_off %zu\n", sc->sc_page_off);
/* do we need more payload? */
if (sc->sc_page_off - sizeof(struct o2net_msg) < be16_to_cpu(hdr->data_len)) {
/* need more payload */
data = page_address(sc->sc_page) + sc->sc_page_off;
datalen = (sizeof(struct o2net_msg) + be16_to_cpu(hdr->data_len)) -
sc->sc_page_off;
ret = o2net_recv_tcp_msg(sc->sc_sock, data, datalen);
if (ret > 0)
sc->sc_page_off += ret;
if (ret <= 0)
goto out;
}
if (sc->sc_page_off - sizeof(struct o2net_msg) == be16_to_cpu(hdr->data_len)) {
/* we can only get here once, the first time we read
* the payload.. so set ret to progress if the handler
* works out. after calling this the message is toast */
ret = o2net_process_message(sc, hdr);
if (ret == 0)
ret = 1;
sc->sc_page_off = 0;
}
out:
sclog(sc, "ret = %d\n", ret);
o2net_set_advance_stop_time(sc);
return ret;
}
/* this work func is triggerd by data ready. it reads until it can read no
* more. it interprets 0, eof, as fatal. if data_ready hits while we're doing
* our work the work struct will be marked and we'll be called again. */
static void o2net_rx_until_empty(struct work_struct *work)
{
struct o2net_sock_container *sc =
container_of(work, struct o2net_sock_container, sc_rx_work);
int ret;
do {
ret = o2net_advance_rx(sc);
} while (ret > 0);
if (ret <= 0 && ret != -EAGAIN) {
struct o2net_node *nn = o2net_nn_from_num(sc->sc_node->nd_num);
sclog(sc, "saw error %d, closing\n", ret);
/* not permanent so read failed handshake can retry */
o2net_ensure_shutdown(nn, sc, 0);
}
sc_put(sc);
}
static int o2net_set_nodelay(struct socket *sock)
{
int ret, val = 1;
mm_segment_t oldfs;
oldfs = get_fs();
set_fs(KERNEL_DS);
/*
* Dear unsuspecting programmer,
*
* Don't use sock_setsockopt() for SOL_TCP. It doesn't check its level
* argument and assumes SOL_SOCKET so, say, your TCP_NODELAY will
* silently turn into SO_DEBUG.
*
* Yours,
* Keeper of hilariously fragile interfaces.
*/
ret = sock->ops->setsockopt(sock, SOL_TCP, TCP_NODELAY,
(char __user *)&val, sizeof(val));
set_fs(oldfs);
return ret;
}
static void o2net_initialize_handshake(void)
{
o2net_hand->o2hb_heartbeat_timeout_ms = cpu_to_be32(
O2HB_MAX_WRITE_TIMEOUT_MS);
o2net_hand->o2net_idle_timeout_ms = cpu_to_be32(o2net_idle_timeout());
o2net_hand->o2net_keepalive_delay_ms = cpu_to_be32(
o2net_keepalive_delay());
o2net_hand->o2net_reconnect_delay_ms = cpu_to_be32(
o2net_reconnect_delay());
}
/* ------------------------------------------------------------ */
/* called when a connect completes and after a sock is accepted. the
* rx path will see the response and mark the sc valid */
static void o2net_sc_connect_completed(struct work_struct *work)
{
struct o2net_sock_container *sc =
container_of(work, struct o2net_sock_container,
sc_connect_work);
mlog(ML_MSG, "sc sending handshake with ver %llu id %llx\n",
(unsigned long long)O2NET_PROTOCOL_VERSION,
(unsigned long long)be64_to_cpu(o2net_hand->connector_id));
o2net_initialize_handshake();
o2net_sendpage(sc, o2net_hand, sizeof(*o2net_hand));
sc_put(sc);
}
/* this is called as a work_struct func. */
static void o2net_sc_send_keep_req(struct work_struct *work)
{
struct o2net_sock_container *sc =
container_of(work, struct o2net_sock_container,
sc_keepalive_work.work);
o2net_sendpage(sc, o2net_keep_req, sizeof(*o2net_keep_req));
sc_put(sc);
}
/* socket shutdown does a del_timer_sync against this as it tears down.
* we can't start this timer until we've got to the point in sc buildup
* where shutdown is going to be involved */
static void o2net_idle_timer(unsigned long data)
{
struct o2net_sock_container *sc = (struct o2net_sock_container *)data;
struct o2net_node *nn = o2net_nn_from_num(sc->sc_node->nd_num);
#ifdef CONFIG_DEBUG_FS
unsigned long msecs = ktime_to_ms(ktime_get()) -
ktime_to_ms(sc->sc_tv_timer);
#else
unsigned long msecs = o2net_idle_timeout();
#endif
printk(KERN_NOTICE "o2net: Connection to " SC_NODEF_FMT " has been "
"idle for %lu.%lu secs, shutting it down.\n", SC_NODEF_ARGS(sc),
msecs / 1000, msecs % 1000);
/*
* Initialize the nn_timeout so that the next connection attempt
* will continue in o2net_start_connect.
*/
atomic_set(&nn->nn_timeout, 1);
o2net_sc_queue_work(sc, &sc->sc_shutdown_work);
}
static void o2net_sc_reset_idle_timer(struct o2net_sock_container *sc)
{
o2net_sc_cancel_delayed_work(sc, &sc->sc_keepalive_work);
o2net_sc_queue_delayed_work(sc, &sc->sc_keepalive_work,
msecs_to_jiffies(o2net_keepalive_delay()));
o2net_set_sock_timer(sc);
mod_timer(&sc->sc_idle_timeout,
jiffies + msecs_to_jiffies(o2net_idle_timeout()));
}
static void o2net_sc_postpone_idle(struct o2net_sock_container *sc)
{
/* Only push out an existing timer */
if (timer_pending(&sc->sc_idle_timeout))
o2net_sc_reset_idle_timer(sc);
}
/* this work func is kicked whenever a path sets the nn state which doesn't
* have valid set. This includes seeing hb come up, losing a connection,
* having a connect attempt fail, etc. This centralizes the logic which decides
* if a connect attempt should be made or if we should give up and all future
* transmit attempts should fail */
static void o2net_start_connect(struct work_struct *work)
{
struct o2net_node *nn =
container_of(work, struct o2net_node, nn_connect_work.work);
struct o2net_sock_container *sc = NULL;
struct o2nm_node *node = NULL, *mynode = NULL;
struct socket *sock = NULL;
struct sockaddr_in myaddr = {0, }, remoteaddr = {0, };
int ret = 0, stop;
unsigned int timeout;
/* if we're greater we initiate tx, otherwise we accept */
if (o2nm_this_node() <= o2net_num_from_nn(nn))
goto out;
/* watch for racing with tearing a node down */
node = o2nm_get_node_by_num(o2net_num_from_nn(nn));
if (node == NULL) {
ret = 0;
goto out;
}
mynode = o2nm_get_node_by_num(o2nm_this_node());
if (mynode == NULL) {
ret = 0;
goto out;
}
spin_lock(&nn->nn_lock);
/*
* see if we already have one pending or have given up.
* For nn_timeout, it is set when we close the connection
* because of the idle time out. So it means that we have
* at least connected to that node successfully once,
* now try to connect to it again.
*/
timeout = atomic_read(&nn->nn_timeout);
stop = (nn->nn_sc ||
(nn->nn_persistent_error &&
(nn->nn_persistent_error != -ENOTCONN || timeout == 0)));
spin_unlock(&nn->nn_lock);
if (stop)
goto out;
nn->nn_last_connect_attempt = jiffies;
sc = sc_alloc(node);
if (sc == NULL) {
mlog(0, "couldn't allocate sc\n");
ret = -ENOMEM;
goto out;
}
ret = sock_create(PF_INET, SOCK_STREAM, IPPROTO_TCP, &sock);
if (ret < 0) {
mlog(0, "can't create socket: %d\n", ret);
goto out;
}
sc->sc_sock = sock; /* freed by sc_kref_release */
sock->sk->sk_allocation = GFP_ATOMIC;
myaddr.sin_family = AF_INET;
myaddr.sin_addr.s_addr = mynode->nd_ipv4_address;
myaddr.sin_port = htons(0); /* any port */
ret = sock->ops->bind(sock, (struct sockaddr *)&myaddr,
sizeof(myaddr));
if (ret) {
mlog(ML_ERROR, "bind failed with %d at address %pI4\n",
ret, &mynode->nd_ipv4_address);
goto out;
}
ret = o2net_set_nodelay(sc->sc_sock);
if (ret) {
mlog(ML_ERROR, "setting TCP_NODELAY failed with %d\n", ret);
goto out;
}
o2net_register_callbacks(sc->sc_sock->sk, sc);
spin_lock(&nn->nn_lock);
/* handshake completion will set nn->nn_sc_valid */
o2net_set_nn_state(nn, sc, 0, 0);
spin_unlock(&nn->nn_lock);
remoteaddr.sin_family = AF_INET;
remoteaddr.sin_addr.s_addr = node->nd_ipv4_address;
remoteaddr.sin_port = node->nd_ipv4_port;
ret = sc->sc_sock->ops->connect(sc->sc_sock,
(struct sockaddr *)&remoteaddr,
sizeof(remoteaddr),
O_NONBLOCK);
if (ret == -EINPROGRESS)
ret = 0;
out:
if (ret) {
printk(KERN_NOTICE "o2net: Connect attempt to " SC_NODEF_FMT
" failed with errno %d\n", SC_NODEF_ARGS(sc), ret);
/* 0 err so that another will be queued and attempted
* from set_nn_state */
if (sc)
o2net_ensure_shutdown(nn, sc, 0);
}
if (sc)
sc_put(sc);
if (node)
o2nm_node_put(node);
if (mynode)
o2nm_node_put(mynode);
return;
}
static void o2net_connect_expired(struct work_struct *work)
{
struct o2net_node *nn =
container_of(work, struct o2net_node, nn_connect_expired.work);
spin_lock(&nn->nn_lock);
if (!nn->nn_sc_valid) {
printk(KERN_NOTICE "o2net: No connection established with "
"node %u after %u.%u seconds, giving up.\n",
o2net_num_from_nn(nn),
o2net_idle_timeout() / 1000,
o2net_idle_timeout() % 1000);
o2net_set_nn_state(nn, NULL, 0, -ENOTCONN);
}
spin_unlock(&nn->nn_lock);
}
static void o2net_still_up(struct work_struct *work)
{
struct o2net_node *nn =
container_of(work, struct o2net_node, nn_still_up.work);
o2quo_hb_still_up(o2net_num_from_nn(nn));
}
/* ------------------------------------------------------------ */
void o2net_disconnect_node(struct o2nm_node *node)
{
struct o2net_node *nn = o2net_nn_from_num(node->nd_num);
/* don't reconnect until it's heartbeating again */
spin_lock(&nn->nn_lock);
atomic_set(&nn->nn_timeout, 0);
o2net_set_nn_state(nn, NULL, 0, -ENOTCONN);
spin_unlock(&nn->nn_lock);
if (o2net_wq) {
cancel_delayed_work(&nn->nn_connect_expired);
cancel_delayed_work(&nn->nn_connect_work);
cancel_delayed_work(&nn->nn_still_up);
flush_workqueue(o2net_wq);
}
}
static void o2net_hb_node_down_cb(struct o2nm_node *node, int node_num,
void *data)
{
o2quo_hb_down(node_num);
if (!node)
return;
if (node_num != o2nm_this_node())
o2net_disconnect_node(node);
BUG_ON(atomic_read(&o2net_connected_peers) < 0);
}
static void o2net_hb_node_up_cb(struct o2nm_node *node, int node_num,
void *data)
{
struct o2net_node *nn = o2net_nn_from_num(node_num);
o2quo_hb_up(node_num);
BUG_ON(!node);
/* ensure an immediate connect attempt */
nn->nn_last_connect_attempt = jiffies -
(msecs_to_jiffies(o2net_reconnect_delay()) + 1);
if (node_num != o2nm_this_node()) {
/* believe it or not, accept and node hearbeating testing
* can succeed for this node before we got here.. so
* only use set_nn_state to clear the persistent error
* if that hasn't already happened */
spin_lock(&nn->nn_lock);
atomic_set(&nn->nn_timeout, 0);
if (nn->nn_persistent_error)
o2net_set_nn_state(nn, NULL, 0, 0);
spin_unlock(&nn->nn_lock);
}
}
void o2net_unregister_hb_callbacks(void)
{
o2hb_unregister_callback(NULL, &o2net_hb_up);
o2hb_unregister_callback(NULL, &o2net_hb_down);
}
int o2net_register_hb_callbacks(void)
{
int ret;
o2hb_setup_callback(&o2net_hb_down, O2HB_NODE_DOWN_CB,
o2net_hb_node_down_cb, NULL, O2NET_HB_PRI);
o2hb_setup_callback(&o2net_hb_up, O2HB_NODE_UP_CB,
o2net_hb_node_up_cb, NULL, O2NET_HB_PRI);
ret = o2hb_register_callback(NULL, &o2net_hb_up);
if (ret == 0)
ret = o2hb_register_callback(NULL, &o2net_hb_down);
if (ret)
o2net_unregister_hb_callbacks();
return ret;
}
/* ------------------------------------------------------------ */
static int o2net_accept_one(struct socket *sock)
{
int ret, slen;
struct sockaddr_in sin;
struct socket *new_sock = NULL;
struct o2nm_node *node = NULL;
struct o2nm_node *local_node = NULL;
struct o2net_sock_container *sc = NULL;
struct o2net_node *nn;
BUG_ON(sock == NULL);
ret = sock_create_lite(sock->sk->sk_family, sock->sk->sk_type,
sock->sk->sk_protocol, &new_sock);
if (ret)
goto out;
new_sock->type = sock->type;
new_sock->ops = sock->ops;
ret = sock->ops->accept(sock, new_sock, O_NONBLOCK);
if (ret < 0)
goto out;
new_sock->sk->sk_allocation = GFP_ATOMIC;
ret = o2net_set_nodelay(new_sock);
if (ret) {
mlog(ML_ERROR, "setting TCP_NODELAY failed with %d\n", ret);
goto out;
}
slen = sizeof(sin);
ret = new_sock->ops->getname(new_sock, (struct sockaddr *) &sin,
&slen, 1);
if (ret < 0)
goto out;
node = o2nm_get_node_by_ip(sin.sin_addr.s_addr);
if (node == NULL) {
printk(KERN_NOTICE "o2net: Attempt to connect from unknown "
"node at %pI4:%d\n", &sin.sin_addr.s_addr,
ntohs(sin.sin_port));
ret = -EINVAL;
goto out;
}
if (o2nm_this_node() >= node->nd_num) {
local_node = o2nm_get_node_by_num(o2nm_this_node());
printk(KERN_NOTICE "o2net: Unexpected connect attempt seen "
"at node '%s' (%u, %pI4:%d) from node '%s' (%u, "
"%pI4:%d)\n", local_node->nd_name, local_node->nd_num,
&(local_node->nd_ipv4_address),
ntohs(local_node->nd_ipv4_port), node->nd_name,
node->nd_num, &sin.sin_addr.s_addr, ntohs(sin.sin_port));
ret = -EINVAL;
goto out;
}
/* this happens all the time when the other node sees our heartbeat
* and tries to connect before we see their heartbeat */
if (!o2hb_check_node_heartbeating_from_callback(node->nd_num)) {
mlog(ML_CONN, "attempt to connect from node '%s' at "
"%pI4:%d but it isn't heartbeating\n",
node->nd_name, &sin.sin_addr.s_addr,
ntohs(sin.sin_port));
ret = -EINVAL;
goto out;
}
nn = o2net_nn_from_num(node->nd_num);
spin_lock(&nn->nn_lock);
if (nn->nn_sc)
ret = -EBUSY;
else
ret = 0;
spin_unlock(&nn->nn_lock);
if (ret) {
printk(KERN_NOTICE "o2net: Attempt to connect from node '%s' "
"at %pI4:%d but it already has an open connection\n",
node->nd_name, &sin.sin_addr.s_addr,
ntohs(sin.sin_port));
goto out;
}
sc = sc_alloc(node);
if (sc == NULL) {
ret = -ENOMEM;
goto out;
}
sc->sc_sock = new_sock;
new_sock = NULL;
spin_lock(&nn->nn_lock);
atomic_set(&nn->nn_timeout, 0);
o2net_set_nn_state(nn, sc, 0, 0);
spin_unlock(&nn->nn_lock);
o2net_register_callbacks(sc->sc_sock->sk, sc);
o2net_sc_queue_work(sc, &sc->sc_rx_work);
o2net_initialize_handshake();
o2net_sendpage(sc, o2net_hand, sizeof(*o2net_hand));
out:
if (new_sock)
sock_release(new_sock);
if (node)
o2nm_node_put(node);
if (local_node)
o2nm_node_put(local_node);
if (sc)
sc_put(sc);
return ret;
}
static void o2net_accept_many(struct work_struct *work)
{
struct socket *sock = o2net_listen_sock;
while (o2net_accept_one(sock) == 0)
cond_resched();
}
static void o2net_listen_data_ready(struct sock *sk, int bytes)
{
void (*ready)(struct sock *sk, int bytes);
read_lock(&sk->sk_callback_lock);
ready = sk->sk_user_data;
if (ready == NULL) { /* check for teardown race */
ready = sk->sk_data_ready;
goto out;
}
/* ->sk_data_ready is also called for a newly established child socket
* before it has been accepted and the acceptor has set up their
* data_ready.. we only want to queue listen work for our listening
* socket */
if (sk->sk_state == TCP_LISTEN) {
mlog(ML_TCP, "bytes: %d\n", bytes);
queue_work(o2net_wq, &o2net_listen_work);
}
out:
read_unlock(&sk->sk_callback_lock);
ready(sk, bytes);
}
static int o2net_open_listening_sock(__be32 addr, __be16 port)
{
struct socket *sock = NULL;
int ret;
struct sockaddr_in sin = {
.sin_family = PF_INET,
.sin_addr = { .s_addr = addr },
.sin_port = port,
};
ret = sock_create(PF_INET, SOCK_STREAM, IPPROTO_TCP, &sock);
if (ret < 0) {
printk(KERN_ERR "o2net: Error %d while creating socket\n", ret);
goto out;
}
sock->sk->sk_allocation = GFP_ATOMIC;
write_lock_bh(&sock->sk->sk_callback_lock);
sock->sk->sk_user_data = sock->sk->sk_data_ready;
sock->sk->sk_data_ready = o2net_listen_data_ready;
write_unlock_bh(&sock->sk->sk_callback_lock);
o2net_listen_sock = sock;
INIT_WORK(&o2net_listen_work, o2net_accept_many);
sock->sk->sk_reuse = 1;
ret = sock->ops->bind(sock, (struct sockaddr *)&sin, sizeof(sin));
if (ret < 0) {
printk(KERN_ERR "o2net: Error %d while binding socket at "
"%pI4:%u\n", ret, &addr, ntohs(port));
goto out;
}
ret = sock->ops->listen(sock, 64);
if (ret < 0)
printk(KERN_ERR "o2net: Error %d while listening on %pI4:%u\n",
ret, &addr, ntohs(port));
out:
if (ret) {
o2net_listen_sock = NULL;
if (sock)
sock_release(sock);
}
return ret;
}
/*
* called from node manager when we should bring up our network listening
* socket. node manager handles all the serialization to only call this
* once and to match it with o2net_stop_listening(). note,
* o2nm_this_node() doesn't work yet as we're being called while it
* is being set up.
*/
int o2net_start_listening(struct o2nm_node *node)
{
int ret = 0;
BUG_ON(o2net_wq != NULL);
BUG_ON(o2net_listen_sock != NULL);
mlog(ML_KTHREAD, "starting o2net thread...\n");
o2net_wq = create_singlethread_workqueue("o2net");
if (o2net_wq == NULL) {
mlog(ML_ERROR, "unable to launch o2net thread\n");
return -ENOMEM; /* ? */
}
ret = o2net_open_listening_sock(node->nd_ipv4_address,
node->nd_ipv4_port);
if (ret) {
destroy_workqueue(o2net_wq);
o2net_wq = NULL;
} else
o2quo_conn_up(node->nd_num);
return ret;
}
/* again, o2nm_this_node() doesn't work here as we're involved in
* tearing it down */
void o2net_stop_listening(struct o2nm_node *node)
{
struct socket *sock = o2net_listen_sock;
size_t i;
BUG_ON(o2net_wq == NULL);
BUG_ON(o2net_listen_sock == NULL);
/* stop the listening socket from generating work */
write_lock_bh(&sock->sk->sk_callback_lock);
sock->sk->sk_data_ready = sock->sk->sk_user_data;
sock->sk->sk_user_data = NULL;
write_unlock_bh(&sock->sk->sk_callback_lock);
for (i = 0; i < ARRAY_SIZE(o2net_nodes); i++) {
struct o2nm_node *node = o2nm_get_node_by_num(i);
if (node) {
o2net_disconnect_node(node);
o2nm_node_put(node);
}
}
/* finish all work and tear down the work queue */
mlog(ML_KTHREAD, "waiting for o2net thread to exit....\n");
destroy_workqueue(o2net_wq);
o2net_wq = NULL;
sock_release(o2net_listen_sock);
o2net_listen_sock = NULL;
o2quo_conn_err(node->nd_num);
}
/* ------------------------------------------------------------ */
int o2net_init(void)
{
unsigned long i;
o2quo_init();
if (o2net_debugfs_init())
return -ENOMEM;
o2net_hand = kzalloc(sizeof(struct o2net_handshake), GFP_KERNEL);
o2net_keep_req = kzalloc(sizeof(struct o2net_msg), GFP_KERNEL);
o2net_keep_resp = kzalloc(sizeof(struct o2net_msg), GFP_KERNEL);
if (!o2net_hand || !o2net_keep_req || !o2net_keep_resp) {
kfree(o2net_hand);
kfree(o2net_keep_req);
kfree(o2net_keep_resp);
return -ENOMEM;
}
o2net_hand->protocol_version = cpu_to_be64(O2NET_PROTOCOL_VERSION);
o2net_hand->connector_id = cpu_to_be64(1);
o2net_keep_req->magic = cpu_to_be16(O2NET_MSG_KEEP_REQ_MAGIC);
o2net_keep_resp->magic = cpu_to_be16(O2NET_MSG_KEEP_RESP_MAGIC);
for (i = 0; i < ARRAY_SIZE(o2net_nodes); i++) {
struct o2net_node *nn = o2net_nn_from_num(i);
atomic_set(&nn->nn_timeout, 0);
spin_lock_init(&nn->nn_lock);
INIT_DELAYED_WORK(&nn->nn_connect_work, o2net_start_connect);
INIT_DELAYED_WORK(&nn->nn_connect_expired,
o2net_connect_expired);
INIT_DELAYED_WORK(&nn->nn_still_up, o2net_still_up);
/* until we see hb from a node we'll return einval */
nn->nn_persistent_error = -ENOTCONN;
init_waitqueue_head(&nn->nn_sc_wq);
idr_init(&nn->nn_status_idr);
INIT_LIST_HEAD(&nn->nn_status_list);
}
return 0;
}
void o2net_exit(void)
{
o2quo_exit();
kfree(o2net_hand);
kfree(o2net_keep_req);
kfree(o2net_keep_resp);
o2net_debugfs_exit();
}
| gpl-2.0 |
binkybear/kernel_msm | drivers/platform/x86/hp-wmi.c | 7834 | 22622 | /*
* HP WMI hotkeys
*
* Copyright (C) 2008 Red Hat <mjg@redhat.com>
* Copyright (C) 2010, 2011 Anssi Hannula <anssi.hannula@iki.fi>
*
* Portions based on wistron_btns.c:
* Copyright (C) 2005 Miloslav Trmac <mitr@volny.cz>
* Copyright (C) 2005 Bernhard Rosenkraenzer <bero@arklinux.org>
* Copyright (C) 2005 Dmitry Torokhov <dtor@mail.ru>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/input.h>
#include <linux/input/sparse-keymap.h>
#include <linux/platform_device.h>
#include <linux/acpi.h>
#include <linux/rfkill.h>
#include <linux/string.h>
MODULE_AUTHOR("Matthew Garrett <mjg59@srcf.ucam.org>");
MODULE_DESCRIPTION("HP laptop WMI hotkeys driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("wmi:95F24279-4D7B-4334-9387-ACCDC67EF61C");
MODULE_ALIAS("wmi:5FB7F034-2C63-45e9-BE91-3D44E2C707E4");
#define HPWMI_EVENT_GUID "95F24279-4D7B-4334-9387-ACCDC67EF61C"
#define HPWMI_BIOS_GUID "5FB7F034-2C63-45e9-BE91-3D44E2C707E4"
#define HPWMI_DISPLAY_QUERY 0x1
#define HPWMI_HDDTEMP_QUERY 0x2
#define HPWMI_ALS_QUERY 0x3
#define HPWMI_HARDWARE_QUERY 0x4
#define HPWMI_WIRELESS_QUERY 0x5
#define HPWMI_HOTKEY_QUERY 0xc
#define HPWMI_WIRELESS2_QUERY 0x1b
enum hp_wmi_radio {
HPWMI_WIFI = 0,
HPWMI_BLUETOOTH = 1,
HPWMI_WWAN = 2,
};
enum hp_wmi_event_ids {
HPWMI_DOCK_EVENT = 1,
HPWMI_PARK_HDD = 2,
HPWMI_SMART_ADAPTER = 3,
HPWMI_BEZEL_BUTTON = 4,
HPWMI_WIRELESS = 5,
HPWMI_CPU_BATTERY_THROTTLE = 6,
HPWMI_LOCK_SWITCH = 7,
};
static int __devinit hp_wmi_bios_setup(struct platform_device *device);
static int __exit hp_wmi_bios_remove(struct platform_device *device);
static int hp_wmi_resume_handler(struct device *device);
struct bios_args {
u32 signature;
u32 command;
u32 commandtype;
u32 datasize;
u32 data;
};
struct bios_return {
u32 sigpass;
u32 return_code;
};
enum hp_return_value {
HPWMI_RET_WRONG_SIGNATURE = 0x02,
HPWMI_RET_UNKNOWN_COMMAND = 0x03,
HPWMI_RET_UNKNOWN_CMDTYPE = 0x04,
HPWMI_RET_INVALID_PARAMETERS = 0x05,
};
enum hp_wireless2_bits {
HPWMI_POWER_STATE = 0x01,
HPWMI_POWER_SOFT = 0x02,
HPWMI_POWER_BIOS = 0x04,
HPWMI_POWER_HARD = 0x08,
};
#define IS_HWBLOCKED(x) ((x & (HPWMI_POWER_BIOS | HPWMI_POWER_HARD)) \
!= (HPWMI_POWER_BIOS | HPWMI_POWER_HARD))
#define IS_SWBLOCKED(x) !(x & HPWMI_POWER_SOFT)
struct bios_rfkill2_device_state {
u8 radio_type;
u8 bus_type;
u16 vendor_id;
u16 product_id;
u16 subsys_vendor_id;
u16 subsys_product_id;
u8 rfkill_id;
u8 power;
u8 unknown[4];
};
/* 7 devices fit into the 128 byte buffer */
#define HPWMI_MAX_RFKILL2_DEVICES 7
struct bios_rfkill2_state {
u8 unknown[7];
u8 count;
u8 pad[8];
struct bios_rfkill2_device_state device[HPWMI_MAX_RFKILL2_DEVICES];
};
static const struct key_entry hp_wmi_keymap[] = {
{ KE_KEY, 0x02, { KEY_BRIGHTNESSUP } },
{ KE_KEY, 0x03, { KEY_BRIGHTNESSDOWN } },
{ KE_KEY, 0x20e6, { KEY_PROG1 } },
{ KE_KEY, 0x20e8, { KEY_MEDIA } },
{ KE_KEY, 0x2142, { KEY_MEDIA } },
{ KE_KEY, 0x213b, { KEY_INFO } },
{ KE_KEY, 0x2169, { KEY_DIRECTION } },
{ KE_KEY, 0x231b, { KEY_HELP } },
{ KE_END, 0 }
};
static struct input_dev *hp_wmi_input_dev;
static struct platform_device *hp_wmi_platform_dev;
static struct rfkill *wifi_rfkill;
static struct rfkill *bluetooth_rfkill;
static struct rfkill *wwan_rfkill;
struct rfkill2_device {
u8 id;
int num;
struct rfkill *rfkill;
};
static int rfkill2_count;
static struct rfkill2_device rfkill2[HPWMI_MAX_RFKILL2_DEVICES];
static const struct dev_pm_ops hp_wmi_pm_ops = {
.resume = hp_wmi_resume_handler,
.restore = hp_wmi_resume_handler,
};
static struct platform_driver hp_wmi_driver = {
.driver = {
.name = "hp-wmi",
.owner = THIS_MODULE,
.pm = &hp_wmi_pm_ops,
},
.probe = hp_wmi_bios_setup,
.remove = hp_wmi_bios_remove,
};
/*
* hp_wmi_perform_query
*
* query: The commandtype -> What should be queried
* write: The command -> 0 read, 1 write, 3 ODM specific
* buffer: Buffer used as input and/or output
* insize: Size of input buffer
* outsize: Size of output buffer
*
* returns zero on success
* an HP WMI query specific error code (which is positive)
* -EINVAL if the query was not successful at all
* -EINVAL if the output buffer size exceeds buffersize
*
* Note: The buffersize must at least be the maximum of the input and output
* size. E.g. Battery info query (0x7) is defined to have 1 byte input
* and 128 byte output. The caller would do:
* buffer = kzalloc(128, GFP_KERNEL);
* ret = hp_wmi_perform_query(0x7, 0, buffer, 1, 128)
*/
static int hp_wmi_perform_query(int query, int write, void *buffer,
int insize, int outsize)
{
struct bios_return *bios_return;
int actual_outsize;
union acpi_object *obj;
struct bios_args args = {
.signature = 0x55434553,
.command = write ? 0x2 : 0x1,
.commandtype = query,
.datasize = insize,
.data = 0,
};
struct acpi_buffer input = { sizeof(struct bios_args), &args };
struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
u32 rc;
if (WARN_ON(insize > sizeof(args.data)))
return -EINVAL;
memcpy(&args.data, buffer, insize);
wmi_evaluate_method(HPWMI_BIOS_GUID, 0, 0x3, &input, &output);
obj = output.pointer;
if (!obj)
return -EINVAL;
else if (obj->type != ACPI_TYPE_BUFFER) {
kfree(obj);
return -EINVAL;
}
bios_return = (struct bios_return *)obj->buffer.pointer;
rc = bios_return->return_code;
if (rc) {
if (rc != HPWMI_RET_UNKNOWN_CMDTYPE)
pr_warn("query 0x%x returned error 0x%x\n", query, rc);
kfree(obj);
return rc;
}
if (!outsize) {
/* ignore output data */
kfree(obj);
return 0;
}
actual_outsize = min(outsize, (int)(obj->buffer.length - sizeof(*bios_return)));
memcpy(buffer, obj->buffer.pointer + sizeof(*bios_return), actual_outsize);
memset(buffer + actual_outsize, 0, outsize - actual_outsize);
kfree(obj);
return 0;
}
static int hp_wmi_display_state(void)
{
int state = 0;
int ret = hp_wmi_perform_query(HPWMI_DISPLAY_QUERY, 0, &state,
sizeof(state), sizeof(state));
if (ret)
return -EINVAL;
return state;
}
static int hp_wmi_hddtemp_state(void)
{
int state = 0;
int ret = hp_wmi_perform_query(HPWMI_HDDTEMP_QUERY, 0, &state,
sizeof(state), sizeof(state));
if (ret)
return -EINVAL;
return state;
}
static int hp_wmi_als_state(void)
{
int state = 0;
int ret = hp_wmi_perform_query(HPWMI_ALS_QUERY, 0, &state,
sizeof(state), sizeof(state));
if (ret)
return -EINVAL;
return state;
}
static int hp_wmi_dock_state(void)
{
int state = 0;
int ret = hp_wmi_perform_query(HPWMI_HARDWARE_QUERY, 0, &state,
sizeof(state), sizeof(state));
if (ret)
return -EINVAL;
return state & 0x1;
}
static int hp_wmi_tablet_state(void)
{
int state = 0;
int ret = hp_wmi_perform_query(HPWMI_HARDWARE_QUERY, 0, &state,
sizeof(state), sizeof(state));
if (ret)
return ret;
return (state & 0x4) ? 1 : 0;
}
static int hp_wmi_set_block(void *data, bool blocked)
{
enum hp_wmi_radio r = (enum hp_wmi_radio) data;
int query = BIT(r + 8) | ((!blocked) << r);
int ret;
ret = hp_wmi_perform_query(HPWMI_WIRELESS_QUERY, 1,
&query, sizeof(query), 0);
if (ret)
return -EINVAL;
return 0;
}
static const struct rfkill_ops hp_wmi_rfkill_ops = {
.set_block = hp_wmi_set_block,
};
static bool hp_wmi_get_sw_state(enum hp_wmi_radio r)
{
int wireless = 0;
int mask;
hp_wmi_perform_query(HPWMI_WIRELESS_QUERY, 0,
&wireless, sizeof(wireless),
sizeof(wireless));
/* TBD: Pass error */
mask = 0x200 << (r * 8);
if (wireless & mask)
return false;
else
return true;
}
static bool hp_wmi_get_hw_state(enum hp_wmi_radio r)
{
int wireless = 0;
int mask;
hp_wmi_perform_query(HPWMI_WIRELESS_QUERY, 0,
&wireless, sizeof(wireless),
sizeof(wireless));
/* TBD: Pass error */
mask = 0x800 << (r * 8);
if (wireless & mask)
return false;
else
return true;
}
static int hp_wmi_rfkill2_set_block(void *data, bool blocked)
{
int rfkill_id = (int)(long)data;
char buffer[4] = { 0x01, 0x00, rfkill_id, !blocked };
if (hp_wmi_perform_query(HPWMI_WIRELESS2_QUERY, 1,
buffer, sizeof(buffer), 0))
return -EINVAL;
return 0;
}
static const struct rfkill_ops hp_wmi_rfkill2_ops = {
.set_block = hp_wmi_rfkill2_set_block,
};
static int hp_wmi_rfkill2_refresh(void)
{
int err, i;
struct bios_rfkill2_state state;
err = hp_wmi_perform_query(HPWMI_WIRELESS2_QUERY, 0, &state,
0, sizeof(state));
if (err)
return err;
for (i = 0; i < rfkill2_count; i++) {
int num = rfkill2[i].num;
struct bios_rfkill2_device_state *devstate;
devstate = &state.device[num];
if (num >= state.count ||
devstate->rfkill_id != rfkill2[i].id) {
pr_warn("power configuration of the wireless devices unexpectedly changed\n");
continue;
}
rfkill_set_states(rfkill2[i].rfkill,
IS_SWBLOCKED(devstate->power),
IS_HWBLOCKED(devstate->power));
}
return 0;
}
static ssize_t show_display(struct device *dev, struct device_attribute *attr,
char *buf)
{
int value = hp_wmi_display_state();
if (value < 0)
return -EINVAL;
return sprintf(buf, "%d\n", value);
}
static ssize_t show_hddtemp(struct device *dev, struct device_attribute *attr,
char *buf)
{
int value = hp_wmi_hddtemp_state();
if (value < 0)
return -EINVAL;
return sprintf(buf, "%d\n", value);
}
static ssize_t show_als(struct device *dev, struct device_attribute *attr,
char *buf)
{
int value = hp_wmi_als_state();
if (value < 0)
return -EINVAL;
return sprintf(buf, "%d\n", value);
}
static ssize_t show_dock(struct device *dev, struct device_attribute *attr,
char *buf)
{
int value = hp_wmi_dock_state();
if (value < 0)
return -EINVAL;
return sprintf(buf, "%d\n", value);
}
static ssize_t show_tablet(struct device *dev, struct device_attribute *attr,
char *buf)
{
int value = hp_wmi_tablet_state();
if (value < 0)
return -EINVAL;
return sprintf(buf, "%d\n", value);
}
static ssize_t set_als(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
u32 tmp = simple_strtoul(buf, NULL, 10);
int ret = hp_wmi_perform_query(HPWMI_ALS_QUERY, 1, &tmp,
sizeof(tmp), sizeof(tmp));
if (ret)
return -EINVAL;
return count;
}
static DEVICE_ATTR(display, S_IRUGO, show_display, NULL);
static DEVICE_ATTR(hddtemp, S_IRUGO, show_hddtemp, NULL);
static DEVICE_ATTR(als, S_IRUGO | S_IWUSR, show_als, set_als);
static DEVICE_ATTR(dock, S_IRUGO, show_dock, NULL);
static DEVICE_ATTR(tablet, S_IRUGO, show_tablet, NULL);
static void hp_wmi_notify(u32 value, void *context)
{
struct acpi_buffer response = { ACPI_ALLOCATE_BUFFER, NULL };
union acpi_object *obj;
u32 event_id, event_data;
int key_code = 0, ret;
u32 *location;
acpi_status status;
status = wmi_get_event_data(value, &response);
if (status != AE_OK) {
pr_info("bad event status 0x%x\n", status);
return;
}
obj = (union acpi_object *)response.pointer;
if (!obj)
return;
if (obj->type != ACPI_TYPE_BUFFER) {
pr_info("Unknown response received %d\n", obj->type);
kfree(obj);
return;
}
/*
* Depending on ACPI version the concatenation of id and event data
* inside _WED function will result in a 8 or 16 byte buffer.
*/
location = (u32 *)obj->buffer.pointer;
if (obj->buffer.length == 8) {
event_id = *location;
event_data = *(location + 1);
} else if (obj->buffer.length == 16) {
event_id = *location;
event_data = *(location + 2);
} else {
pr_info("Unknown buffer length %d\n", obj->buffer.length);
kfree(obj);
return;
}
kfree(obj);
switch (event_id) {
case HPWMI_DOCK_EVENT:
input_report_switch(hp_wmi_input_dev, SW_DOCK,
hp_wmi_dock_state());
input_report_switch(hp_wmi_input_dev, SW_TABLET_MODE,
hp_wmi_tablet_state());
input_sync(hp_wmi_input_dev);
break;
case HPWMI_PARK_HDD:
break;
case HPWMI_SMART_ADAPTER:
break;
case HPWMI_BEZEL_BUTTON:
ret = hp_wmi_perform_query(HPWMI_HOTKEY_QUERY, 0,
&key_code,
sizeof(key_code),
sizeof(key_code));
if (ret)
break;
if (!sparse_keymap_report_event(hp_wmi_input_dev,
key_code, 1, true))
pr_info("Unknown key code - 0x%x\n", key_code);
break;
case HPWMI_WIRELESS:
if (rfkill2_count) {
hp_wmi_rfkill2_refresh();
break;
}
if (wifi_rfkill)
rfkill_set_states(wifi_rfkill,
hp_wmi_get_sw_state(HPWMI_WIFI),
hp_wmi_get_hw_state(HPWMI_WIFI));
if (bluetooth_rfkill)
rfkill_set_states(bluetooth_rfkill,
hp_wmi_get_sw_state(HPWMI_BLUETOOTH),
hp_wmi_get_hw_state(HPWMI_BLUETOOTH));
if (wwan_rfkill)
rfkill_set_states(wwan_rfkill,
hp_wmi_get_sw_state(HPWMI_WWAN),
hp_wmi_get_hw_state(HPWMI_WWAN));
break;
case HPWMI_CPU_BATTERY_THROTTLE:
pr_info("Unimplemented CPU throttle because of 3 Cell battery event detected\n");
break;
case HPWMI_LOCK_SWITCH:
break;
default:
pr_info("Unknown event_id - %d - 0x%x\n", event_id, event_data);
break;
}
}
static int __init hp_wmi_input_setup(void)
{
acpi_status status;
int err;
hp_wmi_input_dev = input_allocate_device();
if (!hp_wmi_input_dev)
return -ENOMEM;
hp_wmi_input_dev->name = "HP WMI hotkeys";
hp_wmi_input_dev->phys = "wmi/input0";
hp_wmi_input_dev->id.bustype = BUS_HOST;
__set_bit(EV_SW, hp_wmi_input_dev->evbit);
__set_bit(SW_DOCK, hp_wmi_input_dev->swbit);
__set_bit(SW_TABLET_MODE, hp_wmi_input_dev->swbit);
err = sparse_keymap_setup(hp_wmi_input_dev, hp_wmi_keymap, NULL);
if (err)
goto err_free_dev;
/* Set initial hardware state */
input_report_switch(hp_wmi_input_dev, SW_DOCK, hp_wmi_dock_state());
input_report_switch(hp_wmi_input_dev, SW_TABLET_MODE,
hp_wmi_tablet_state());
input_sync(hp_wmi_input_dev);
status = wmi_install_notify_handler(HPWMI_EVENT_GUID, hp_wmi_notify, NULL);
if (ACPI_FAILURE(status)) {
err = -EIO;
goto err_free_keymap;
}
err = input_register_device(hp_wmi_input_dev);
if (err)
goto err_uninstall_notifier;
return 0;
err_uninstall_notifier:
wmi_remove_notify_handler(HPWMI_EVENT_GUID);
err_free_keymap:
sparse_keymap_free(hp_wmi_input_dev);
err_free_dev:
input_free_device(hp_wmi_input_dev);
return err;
}
static void hp_wmi_input_destroy(void)
{
wmi_remove_notify_handler(HPWMI_EVENT_GUID);
sparse_keymap_free(hp_wmi_input_dev);
input_unregister_device(hp_wmi_input_dev);
}
static void cleanup_sysfs(struct platform_device *device)
{
device_remove_file(&device->dev, &dev_attr_display);
device_remove_file(&device->dev, &dev_attr_hddtemp);
device_remove_file(&device->dev, &dev_attr_als);
device_remove_file(&device->dev, &dev_attr_dock);
device_remove_file(&device->dev, &dev_attr_tablet);
}
static int __devinit hp_wmi_rfkill_setup(struct platform_device *device)
{
int err;
int wireless = 0;
err = hp_wmi_perform_query(HPWMI_WIRELESS_QUERY, 0, &wireless,
sizeof(wireless), sizeof(wireless));
if (err)
return err;
if (wireless & 0x1) {
wifi_rfkill = rfkill_alloc("hp-wifi", &device->dev,
RFKILL_TYPE_WLAN,
&hp_wmi_rfkill_ops,
(void *) HPWMI_WIFI);
rfkill_init_sw_state(wifi_rfkill,
hp_wmi_get_sw_state(HPWMI_WIFI));
rfkill_set_hw_state(wifi_rfkill,
hp_wmi_get_hw_state(HPWMI_WIFI));
err = rfkill_register(wifi_rfkill);
if (err)
goto register_wifi_error;
}
if (wireless & 0x2) {
bluetooth_rfkill = rfkill_alloc("hp-bluetooth", &device->dev,
RFKILL_TYPE_BLUETOOTH,
&hp_wmi_rfkill_ops,
(void *) HPWMI_BLUETOOTH);
rfkill_init_sw_state(bluetooth_rfkill,
hp_wmi_get_sw_state(HPWMI_BLUETOOTH));
rfkill_set_hw_state(bluetooth_rfkill,
hp_wmi_get_hw_state(HPWMI_BLUETOOTH));
err = rfkill_register(bluetooth_rfkill);
if (err)
goto register_bluetooth_error;
}
if (wireless & 0x4) {
wwan_rfkill = rfkill_alloc("hp-wwan", &device->dev,
RFKILL_TYPE_WWAN,
&hp_wmi_rfkill_ops,
(void *) HPWMI_WWAN);
rfkill_init_sw_state(wwan_rfkill,
hp_wmi_get_sw_state(HPWMI_WWAN));
rfkill_set_hw_state(wwan_rfkill,
hp_wmi_get_hw_state(HPWMI_WWAN));
err = rfkill_register(wwan_rfkill);
if (err)
goto register_wwan_err;
}
return 0;
register_wwan_err:
rfkill_destroy(wwan_rfkill);
wwan_rfkill = NULL;
if (bluetooth_rfkill)
rfkill_unregister(bluetooth_rfkill);
register_bluetooth_error:
rfkill_destroy(bluetooth_rfkill);
bluetooth_rfkill = NULL;
if (wifi_rfkill)
rfkill_unregister(wifi_rfkill);
register_wifi_error:
rfkill_destroy(wifi_rfkill);
wifi_rfkill = NULL;
return err;
}
static int __devinit hp_wmi_rfkill2_setup(struct platform_device *device)
{
int err, i;
struct bios_rfkill2_state state;
err = hp_wmi_perform_query(HPWMI_WIRELESS2_QUERY, 0, &state,
0, sizeof(state));
if (err)
return err;
if (state.count > HPWMI_MAX_RFKILL2_DEVICES) {
pr_warn("unable to parse 0x1b query output\n");
return -EINVAL;
}
for (i = 0; i < state.count; i++) {
struct rfkill *rfkill;
enum rfkill_type type;
char *name;
switch (state.device[i].radio_type) {
case HPWMI_WIFI:
type = RFKILL_TYPE_WLAN;
name = "hp-wifi";
break;
case HPWMI_BLUETOOTH:
type = RFKILL_TYPE_BLUETOOTH;
name = "hp-bluetooth";
break;
case HPWMI_WWAN:
type = RFKILL_TYPE_WWAN;
name = "hp-wwan";
break;
default:
pr_warn("unknown device type 0x%x\n",
state.device[i].radio_type);
continue;
}
if (!state.device[i].vendor_id) {
pr_warn("zero device %d while %d reported\n",
i, state.count);
continue;
}
rfkill = rfkill_alloc(name, &device->dev, type,
&hp_wmi_rfkill2_ops, (void *)(long)i);
if (!rfkill) {
err = -ENOMEM;
goto fail;
}
rfkill2[rfkill2_count].id = state.device[i].rfkill_id;
rfkill2[rfkill2_count].num = i;
rfkill2[rfkill2_count].rfkill = rfkill;
rfkill_init_sw_state(rfkill,
IS_SWBLOCKED(state.device[i].power));
rfkill_set_hw_state(rfkill,
IS_HWBLOCKED(state.device[i].power));
if (!(state.device[i].power & HPWMI_POWER_BIOS))
pr_info("device %s blocked by BIOS\n", name);
err = rfkill_register(rfkill);
if (err) {
rfkill_destroy(rfkill);
goto fail;
}
rfkill2_count++;
}
return 0;
fail:
for (; rfkill2_count > 0; rfkill2_count--) {
rfkill_unregister(rfkill2[rfkill2_count - 1].rfkill);
rfkill_destroy(rfkill2[rfkill2_count - 1].rfkill);
}
return err;
}
static int __devinit hp_wmi_bios_setup(struct platform_device *device)
{
int err;
/* clear detected rfkill devices */
wifi_rfkill = NULL;
bluetooth_rfkill = NULL;
wwan_rfkill = NULL;
rfkill2_count = 0;
if (hp_wmi_rfkill_setup(device))
hp_wmi_rfkill2_setup(device);
err = device_create_file(&device->dev, &dev_attr_display);
if (err)
goto add_sysfs_error;
err = device_create_file(&device->dev, &dev_attr_hddtemp);
if (err)
goto add_sysfs_error;
err = device_create_file(&device->dev, &dev_attr_als);
if (err)
goto add_sysfs_error;
err = device_create_file(&device->dev, &dev_attr_dock);
if (err)
goto add_sysfs_error;
err = device_create_file(&device->dev, &dev_attr_tablet);
if (err)
goto add_sysfs_error;
return 0;
add_sysfs_error:
cleanup_sysfs(device);
return err;
}
static int __exit hp_wmi_bios_remove(struct platform_device *device)
{
int i;
cleanup_sysfs(device);
for (i = 0; i < rfkill2_count; i++) {
rfkill_unregister(rfkill2[i].rfkill);
rfkill_destroy(rfkill2[i].rfkill);
}
if (wifi_rfkill) {
rfkill_unregister(wifi_rfkill);
rfkill_destroy(wifi_rfkill);
}
if (bluetooth_rfkill) {
rfkill_unregister(bluetooth_rfkill);
rfkill_destroy(bluetooth_rfkill);
}
if (wwan_rfkill) {
rfkill_unregister(wwan_rfkill);
rfkill_destroy(wwan_rfkill);
}
return 0;
}
static int hp_wmi_resume_handler(struct device *device)
{
/*
* Hardware state may have changed while suspended, so trigger
* input events for the current state. As this is a switch,
* the input layer will only actually pass it on if the state
* changed.
*/
if (hp_wmi_input_dev) {
input_report_switch(hp_wmi_input_dev, SW_DOCK,
hp_wmi_dock_state());
input_report_switch(hp_wmi_input_dev, SW_TABLET_MODE,
hp_wmi_tablet_state());
input_sync(hp_wmi_input_dev);
}
if (rfkill2_count)
hp_wmi_rfkill2_refresh();
if (wifi_rfkill)
rfkill_set_states(wifi_rfkill,
hp_wmi_get_sw_state(HPWMI_WIFI),
hp_wmi_get_hw_state(HPWMI_WIFI));
if (bluetooth_rfkill)
rfkill_set_states(bluetooth_rfkill,
hp_wmi_get_sw_state(HPWMI_BLUETOOTH),
hp_wmi_get_hw_state(HPWMI_BLUETOOTH));
if (wwan_rfkill)
rfkill_set_states(wwan_rfkill,
hp_wmi_get_sw_state(HPWMI_WWAN),
hp_wmi_get_hw_state(HPWMI_WWAN));
return 0;
}
static int __init hp_wmi_init(void)
{
int err;
int event_capable = wmi_has_guid(HPWMI_EVENT_GUID);
int bios_capable = wmi_has_guid(HPWMI_BIOS_GUID);
if (event_capable) {
err = hp_wmi_input_setup();
if (err)
return err;
}
if (bios_capable) {
err = platform_driver_register(&hp_wmi_driver);
if (err)
goto err_driver_reg;
hp_wmi_platform_dev = platform_device_alloc("hp-wmi", -1);
if (!hp_wmi_platform_dev) {
err = -ENOMEM;
goto err_device_alloc;
}
err = platform_device_add(hp_wmi_platform_dev);
if (err)
goto err_device_add;
}
if (!bios_capable && !event_capable)
return -ENODEV;
return 0;
err_device_add:
platform_device_put(hp_wmi_platform_dev);
err_device_alloc:
platform_driver_unregister(&hp_wmi_driver);
err_driver_reg:
if (event_capable)
hp_wmi_input_destroy();
return err;
}
static void __exit hp_wmi_exit(void)
{
if (wmi_has_guid(HPWMI_EVENT_GUID))
hp_wmi_input_destroy();
if (hp_wmi_platform_dev) {
platform_device_unregister(hp_wmi_platform_dev);
platform_driver_unregister(&hp_wmi_driver);
}
}
module_init(hp_wmi_init);
module_exit(hp_wmi_exit);
| gpl-2.0 |
Trinityhaxxor/Trinity_Kernel_msm8660_XperiaS | arch/cris/arch-v32/mach-a3/vcs_hook.c | 9370 | 1689 | /*
* Simulator hook mechanism
*/
#include "vcs_hook.h"
#include <asm/io.h>
#include <stdarg.h>
#define HOOK_TRIG_ADDR 0xb7000000
#define HOOK_MEM_BASE_ADDR 0xce000000
static volatile unsigned *hook_base;
#define HOOK_DATA(offset) hook_base[offset]
#define VHOOK_DATA(offset) hook_base[offset]
#define HOOK_TRIG(funcid) \
do { \
*((unsigned *) HOOK_TRIG_ADDR) = funcid; \
} while (0)
#define HOOK_DATA_BYTE(offset) ((unsigned char *)hook_base)[offset]
static void hook_init(void)
{
static int first = 1;
if (first) {
first = 0;
hook_base = ioremap(HOOK_MEM_BASE_ADDR, 8192);
}
}
static unsigned hook_trig(unsigned id)
{
unsigned ret;
/* preempt_disable(); */
/* Dummy read from mem to make sure data has propagated to memory
* before trigging */
ret = *hook_base;
/* trigger hook */
HOOK_TRIG(id);
/* wait for call to finish */
while (VHOOK_DATA(0) > 0) ;
/* extract return value */
ret = VHOOK_DATA(1);
return ret;
}
int hook_call(unsigned id, unsigned pcnt, ...)
{
va_list ap;
int i;
unsigned ret;
hook_init();
HOOK_DATA(0) = id;
va_start(ap, pcnt);
for (i = 1; i <= pcnt; i++)
HOOK_DATA(i) = va_arg(ap, unsigned);
va_end(ap);
ret = hook_trig(id);
return ret;
}
int hook_call_str(unsigned id, unsigned size, const char *str)
{
int i;
unsigned ret;
hook_init();
HOOK_DATA(0) = id;
HOOK_DATA(1) = size;
for (i = 0; i < size; i++)
HOOK_DATA_BYTE(8 + i) = str[i];
HOOK_DATA_BYTE(8 + i) = 0;
ret = hook_trig(id);
return ret;
}
void print_str(const char *str)
{
int i;
/* find null at end of string */
for (i = 1; str[i]; i++) ;
hook_call(hook_print_str, i, str);
}
void CPU_WATCHDOG_TIMEOUT(unsigned t)
{
}
| gpl-2.0 |
unknownlamer/xbmc | xbmc/platform/android/jni/WifiConfiguration.cpp | 155 | 5081 | /*
* Copyright (C) 2013 Team XBMC
* http://xbmc.org
*
* This Program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This Program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with XBMC; see the file COPYING. If not, see
* <http://www.gnu.org/licenses/>.
*
*/
#include "WifiConfiguration.h"
#include "jutils/jutils-details.hpp"
using namespace jni;
CJNIWifiConfiguration::CJNIWifiConfiguration() : CJNIBase("android/net/wifi/WifiConfiguration")
{
m_object = new_object(GetClassName());
m_object.setGlobal();
}
int CJNIWifiConfiguration::getnetworkId() const
{
return get_field<jint>(m_object, "networkId");
}
int CJNIWifiConfiguration::getstatus() const
{
return get_field<jint>(m_object, "status");
}
std::string CJNIWifiConfiguration::getSSID() const
{
return jcast<std::string>(get_field<jhstring>(m_object, "SSID"));
}
std::string CJNIWifiConfiguration::getBSSID() const
{
return jcast<std::string>(get_field<jhstring>(m_object, "BSSID"));
}
std::string CJNIWifiConfiguration::getpreSharedKey() const
{
return jcast<std::string>(get_field<jhstring>(m_object, "preSharedKey"));
}
std::vector<std::string> CJNIWifiConfiguration::getwepKeys() const
{
return jcast<std::vector<std::string>>(get_field<jhobjectArray>(m_object,"wepKeys", "[Ljava/lang/String;"));
}
int CJNIWifiConfiguration::getwepTxKeyIndex() const
{
return get_field<jint>(m_object, "wepTxKeyIndex");
}
int CJNIWifiConfiguration::getpriority() const
{
return get_field<jint>(m_object, "priority");
}
bool CJNIWifiConfiguration::gethiddenSSID() const
{
return get_field<jboolean>(m_object, "hiddenSSID");
}
CJNIBitSet CJNIWifiConfiguration::getallowedKeyManagement() const
{
return get_field<jhobject>(m_object, "allowedKeyManagement", "Ljava/util/BitSet;");
}
CJNIBitSet CJNIWifiConfiguration::getallowedProtocols() const
{
return get_field<jhobject>(m_object, "allowedProtocols", "Ljava/util/BitSet;");
}
CJNIBitSet CJNIWifiConfiguration::getallowedPairwiseCiphers() const
{
return get_field<jhobject>(m_object, "allowedPairwiseCiphers", "Ljava/util/BitSet;");
}
CJNIBitSet CJNIWifiConfiguration::getallowedGroupCiphers() const
{
return get_field<jhobject>(m_object, "allowedGroupCiphers", "Ljava/util/BitSet;");
}
void CJNIWifiConfiguration::setnetworkId(int networkId)
{
set_field(m_object,"networkId", networkId);
}
void CJNIWifiConfiguration::setstatus(int status)
{
set_field(m_object,"networkId", status);
}
void CJNIWifiConfiguration::setSSID(const std::string &SSID)
{
set_field(m_object, "SSID", jcast<jhstring>(SSID));
}
void CJNIWifiConfiguration::setBSSID(const std::string &BSSID)
{
set_field(m_object, "BSSID", jcast<jhstring>(BSSID));
}
void CJNIWifiConfiguration::setpreSharedKey(const std::string &preSharedKey)
{
set_field(m_object,"preSharedKey", jcast<jhstring>(preSharedKey));
}
void CJNIWifiConfiguration::setwepKeys(const std::vector<std::string> &wepKeys)
{
set_field(m_object, "wepKeys", "[Ljava/lang/String;", jcast<jhobjectArray>(wepKeys));
}
void CJNIWifiConfiguration::setwepTxKeyIndex(int wepTxKeyIndex)
{
set_field(m_object, "wepTxKeyIndex", wepTxKeyIndex);
}
void CJNIWifiConfiguration::setpriority(int priority)
{
set_field(m_object, "priority", priority);
}
void CJNIWifiConfiguration::sethiddenSSID(bool hiddenSSID)
{
set_field(m_object, "hiddenSSID", (jboolean)hiddenSSID);
}
void CJNIWifiConfiguration::setallowedKeyManagement(const CJNIBitSet& allowedKeyManagement)
{
set_field(m_object, "allowedKeyManagement", "Ljava/util/BitSet;", allowedKeyManagement.get_raw());
}
void CJNIWifiConfiguration::setallowedProtocols(const CJNIBitSet& allowedProtocols)
{
set_field(m_object, "allowedProtocols", "Ljava/util/BitSet;", allowedProtocols.get_raw());
}
void CJNIWifiConfiguration::setallowedAuthAlgorithms(const CJNIBitSet& allowedAuthAlgorithms)
{
set_field(m_object, "allowedAuthAlgorithms", "Ljava/util/BitSet;", allowedAuthAlgorithms.get_raw());
}
void CJNIWifiConfiguration::setallowedPairwiseCiphers(const CJNIBitSet& allowedPairwiseCiphers)
{
set_field(m_object, "allowedPairwiseCiphers", "Ljava/util/BitSet;", allowedPairwiseCiphers.get_raw());
}
void CJNIWifiConfiguration::setallowedGroupCiphers(const CJNIBitSet& allowedGroupCiphers)
{
set_field(m_object, "allowedGroupCiphers", "Ljava/util/BitSet;", allowedGroupCiphers.get_raw());
}
std::string CJNIWifiConfiguration::toString()
{
return jcast<std::string>(call_method<jhstring>(m_object,
"toString", "()Ljava/lang/String;"));
}
int CJNIWifiConfiguration::describeContents()
{
return call_method<jint>(m_object,
"describeContents", "()I");
}
| gpl-2.0 |
tbalden/htc-kernel-endeavoru-stable | drivers/staging/gma500/psb_buffer.c | 155 | 12546 | /**************************************************************************
* Copyright (c) 2007, Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
*
**************************************************************************/
/*
* Authors: Thomas Hellstrom <thomas-at-tungstengraphics.com>
*/
#include "ttm/ttm_placement.h"
#include "ttm/ttm_execbuf_util.h"
#include "psb_ttm_fence_api.h"
#include <drm/drmP.h>
#include "psb_drv.h"
#define DRM_MEM_TTM 26
struct drm_psb_ttm_backend {
struct ttm_backend base;
struct page **pages;
unsigned int desired_tile_stride;
unsigned int hw_tile_stride;
int mem_type;
unsigned long offset;
unsigned long num_pages;
};
/*
* MSVDX/TOPAZ GPU virtual space looks like this
* (We currently use only one MMU context).
* PSB_MEM_MMU_START: from 0x00000000~0xe000000, for generic buffers
* TTM_PL_CI: from 0xe0000000+half GTT space, for camear/video buffer sharing
* TTM_PL_RAR: from TTM_PL_CI+CI size, for RAR/video buffer sharing
* TTM_PL_TT: from TTM_PL_RAR+RAR size, for buffers need to mapping into GTT
*/
static int psb_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
struct ttm_mem_type_manager *man)
{
struct drm_psb_private *dev_priv =
container_of(bdev, struct drm_psb_private, bdev);
struct psb_gtt *pg = dev_priv->pg;
switch (type) {
case TTM_PL_SYSTEM:
man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
man->available_caching = TTM_PL_FLAG_CACHED |
TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC;
man->default_caching = TTM_PL_FLAG_CACHED;
break;
case DRM_PSB_MEM_MMU:
man->func = &ttm_bo_manager_func;
man->flags = TTM_MEMTYPE_FLAG_MAPPABLE |
TTM_MEMTYPE_FLAG_CMA;
man->gpu_offset = PSB_MEM_MMU_START;
man->available_caching = TTM_PL_FLAG_CACHED |
TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC;
man->default_caching = TTM_PL_FLAG_WC;
break;
case TTM_PL_CI:
man->func = &ttm_bo_manager_func;
man->flags = TTM_MEMTYPE_FLAG_MAPPABLE |
TTM_MEMTYPE_FLAG_FIXED;
man->gpu_offset = pg->mmu_gatt_start + (pg->ci_start);
man->available_caching = TTM_PL_FLAG_UNCACHED;
man->default_caching = TTM_PL_FLAG_UNCACHED;
break;
case TTM_PL_RAR: /* Unmappable RAR memory */
man->func = &ttm_bo_manager_func;
man->flags = TTM_MEMTYPE_FLAG_MAPPABLE |
TTM_MEMTYPE_FLAG_FIXED;
man->available_caching = TTM_PL_FLAG_UNCACHED;
man->default_caching = TTM_PL_FLAG_UNCACHED;
man->gpu_offset = pg->mmu_gatt_start + (pg->rar_start);
break;
case TTM_PL_TT: /* Mappable GATT memory */
man->func = &ttm_bo_manager_func;
#ifdef PSB_WORKING_HOST_MMU_ACCESS
man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
#else
man->flags = TTM_MEMTYPE_FLAG_MAPPABLE |
TTM_MEMTYPE_FLAG_CMA;
#endif
man->available_caching = TTM_PL_FLAG_CACHED |
TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC;
man->default_caching = TTM_PL_FLAG_WC;
man->gpu_offset = pg->mmu_gatt_start +
(pg->rar_start + dev_priv->rar_region_size);
break;
default:
DRM_ERROR("Unsupported memory type %u\n", (unsigned) type);
return -EINVAL;
}
return 0;
}
static void psb_evict_mask(struct ttm_buffer_object *bo,
struct ttm_placement *placement)
{
static uint32_t cur_placement;
cur_placement = bo->mem.placement & ~TTM_PL_MASK_MEM;
cur_placement |= TTM_PL_FLAG_SYSTEM;
placement->fpfn = 0;
placement->lpfn = 0;
placement->num_placement = 1;
placement->placement = &cur_placement;
placement->num_busy_placement = 0;
placement->busy_placement = NULL;
/* all buffers evicted to system memory */
/* return cur_placement | TTM_PL_FLAG_SYSTEM; */
}
static int psb_invalidate_caches(struct ttm_bo_device *bdev,
uint32_t placement)
{
return 0;
}
static int psb_move_blit(struct ttm_buffer_object *bo,
bool evict, bool no_wait,
struct ttm_mem_reg *new_mem)
{
BUG();
return 0;
}
/*
* Flip destination ttm into GATT,
* then blit and subsequently move out again.
*/
static int psb_move_flip(struct ttm_buffer_object *bo,
bool evict, bool interruptible, bool no_wait,
struct ttm_mem_reg *new_mem)
{
/*struct ttm_bo_device *bdev = bo->bdev;*/
struct ttm_mem_reg tmp_mem;
int ret;
struct ttm_placement placement;
uint32_t flags = TTM_PL_FLAG_TT;
tmp_mem = *new_mem;
tmp_mem.mm_node = NULL;
placement.fpfn = 0;
placement.lpfn = 0;
placement.num_placement = 1;
placement.placement = &flags;
placement.num_busy_placement = 0; /* FIXME */
placement.busy_placement = NULL;
ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, interruptible,
false, no_wait);
if (ret)
return ret;
ret = ttm_tt_bind(bo->ttm, &tmp_mem);
if (ret)
goto out_cleanup;
ret = psb_move_blit(bo, true, no_wait, &tmp_mem);
if (ret)
goto out_cleanup;
ret = ttm_bo_move_ttm(bo, evict, false, no_wait, new_mem);
out_cleanup:
if (tmp_mem.mm_node) {
drm_mm_put_block(tmp_mem.mm_node);
tmp_mem.mm_node = NULL;
}
return ret;
}
static int psb_move(struct ttm_buffer_object *bo,
bool evict, bool interruptible, bool no_wait_reserve,
bool no_wait, struct ttm_mem_reg *new_mem)
{
struct ttm_mem_reg *old_mem = &bo->mem;
if ((old_mem->mem_type == TTM_PL_RAR) ||
(new_mem->mem_type == TTM_PL_RAR)) {
if (old_mem->mm_node) {
spin_lock(&bo->glob->lru_lock);
drm_mm_put_block(old_mem->mm_node);
spin_unlock(&bo->glob->lru_lock);
}
old_mem->mm_node = NULL;
*old_mem = *new_mem;
} else if (old_mem->mem_type == TTM_PL_SYSTEM) {
return ttm_bo_move_memcpy(bo, evict, false, no_wait, new_mem);
} else if (new_mem->mem_type == TTM_PL_SYSTEM) {
int ret = psb_move_flip(bo, evict, interruptible,
no_wait, new_mem);
if (unlikely(ret != 0)) {
if (ret == -ERESTART)
return ret;
else
return ttm_bo_move_memcpy(bo, evict, false,
no_wait, new_mem);
}
} else {
if (psb_move_blit(bo, evict, no_wait, new_mem))
return ttm_bo_move_memcpy(bo, evict, false, no_wait,
new_mem);
}
return 0;
}
static int drm_psb_tbe_populate(struct ttm_backend *backend,
unsigned long num_pages,
struct page **pages,
struct page *dummy_read_page,
dma_addr_t *dma_addrs)
{
struct drm_psb_ttm_backend *psb_be =
container_of(backend, struct drm_psb_ttm_backend, base);
psb_be->pages = pages;
return 0;
}
static int drm_psb_tbe_unbind(struct ttm_backend *backend)
{
struct ttm_bo_device *bdev = backend->bdev;
struct drm_psb_private *dev_priv =
container_of(bdev, struct drm_psb_private, bdev);
struct drm_psb_ttm_backend *psb_be =
container_of(backend, struct drm_psb_ttm_backend, base);
struct psb_mmu_pd *pd = psb_mmu_get_default_pd(dev_priv->mmu);
/* struct ttm_mem_type_manager *man = &bdev->man[psb_be->mem_type]; */
if (psb_be->mem_type == TTM_PL_TT) {
uint32_t gatt_p_offset =
(psb_be->offset - dev_priv->pg->mmu_gatt_start)
>> PAGE_SHIFT;
(void) psb_gtt_remove_pages(dev_priv->pg, gatt_p_offset,
psb_be->num_pages,
psb_be->desired_tile_stride,
psb_be->hw_tile_stride, 0);
}
psb_mmu_remove_pages(pd, psb_be->offset,
psb_be->num_pages,
psb_be->desired_tile_stride,
psb_be->hw_tile_stride);
return 0;
}
static int drm_psb_tbe_bind(struct ttm_backend *backend,
struct ttm_mem_reg *bo_mem)
{
struct ttm_bo_device *bdev = backend->bdev;
struct drm_psb_private *dev_priv =
container_of(bdev, struct drm_psb_private, bdev);
struct drm_psb_ttm_backend *psb_be =
container_of(backend, struct drm_psb_ttm_backend, base);
struct psb_mmu_pd *pd = psb_mmu_get_default_pd(dev_priv->mmu);
struct ttm_mem_type_manager *man = &bdev->man[bo_mem->mem_type];
struct drm_mm_node *mm_node = bo_mem->mm_node;
int type;
int ret = 0;
psb_be->mem_type = bo_mem->mem_type;
psb_be->num_pages = bo_mem->num_pages;
psb_be->desired_tile_stride = 0;
psb_be->hw_tile_stride = 0;
psb_be->offset = (mm_node->start << PAGE_SHIFT) +
man->gpu_offset;
type =
(bo_mem->
placement & TTM_PL_FLAG_CACHED) ? PSB_MMU_CACHED_MEMORY : 0;
if (psb_be->mem_type == TTM_PL_TT) {
uint32_t gatt_p_offset =
(psb_be->offset - dev_priv->pg->mmu_gatt_start)
>> PAGE_SHIFT;
ret = psb_gtt_insert_pages(dev_priv->pg, psb_be->pages,
gatt_p_offset,
psb_be->num_pages,
psb_be->desired_tile_stride,
psb_be->hw_tile_stride, type);
}
ret = psb_mmu_insert_pages(pd, psb_be->pages,
psb_be->offset, psb_be->num_pages,
psb_be->desired_tile_stride,
psb_be->hw_tile_stride, type);
if (ret)
goto out_err;
return 0;
out_err:
drm_psb_tbe_unbind(backend);
return ret;
}
static void drm_psb_tbe_clear(struct ttm_backend *backend)
{
struct drm_psb_ttm_backend *psb_be =
container_of(backend, struct drm_psb_ttm_backend, base);
psb_be->pages = NULL;
return;
}
static void drm_psb_tbe_destroy(struct ttm_backend *backend)
{
struct drm_psb_ttm_backend *psb_be =
container_of(backend, struct drm_psb_ttm_backend, base);
if (backend)
kfree(psb_be);
}
static struct ttm_backend_func psb_ttm_backend = {
.populate = drm_psb_tbe_populate,
.clear = drm_psb_tbe_clear,
.bind = drm_psb_tbe_bind,
.unbind = drm_psb_tbe_unbind,
.destroy = drm_psb_tbe_destroy,
};
static struct ttm_backend *drm_psb_tbe_init(struct ttm_bo_device *bdev)
{
struct drm_psb_ttm_backend *psb_be;
psb_be = kzalloc(sizeof(*psb_be), GFP_KERNEL);
if (!psb_be)
return NULL;
psb_be->pages = NULL;
psb_be->base.func = &psb_ttm_backend;
psb_be->base.bdev = bdev;
return &psb_be->base;
}
static int psb_ttm_io_mem_reserve(struct ttm_bo_device *bdev,
struct ttm_mem_reg *mem)
{
struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
struct drm_psb_private *dev_priv =
container_of(bdev, struct drm_psb_private, bdev);
struct psb_gtt *pg = dev_priv->pg;
struct drm_mm_node *mm_node = mem->mm_node;
mem->bus.addr = NULL;
mem->bus.offset = 0;
mem->bus.size = mem->num_pages << PAGE_SHIFT;
mem->bus.base = 0;
mem->bus.is_iomem = false;
if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
return -EINVAL;
switch (mem->mem_type) {
case TTM_PL_SYSTEM:
/* system memory */
return 0;
case TTM_PL_TT:
mem->bus.offset = mm_node->start << PAGE_SHIFT;
mem->bus.base = pg->gatt_start;
mem->bus.is_iomem = false;
/* Don't know whether it is IO_MEM, this flag
used in vm_fault handle */
break;
case DRM_PSB_MEM_MMU:
mem->bus.offset = mm_node->start << PAGE_SHIFT;
mem->bus.base = 0x00000000;
break;
case TTM_PL_CI:
mem->bus.offset = mm_node->start << PAGE_SHIFT;
mem->bus.base = dev_priv->ci_region_start;;
mem->bus.is_iomem = true;
break;
case TTM_PL_RAR:
mem->bus.offset = mm_node->start << PAGE_SHIFT;
mem->bus.base = dev_priv->rar_region_start;;
mem->bus.is_iomem = true;
break;
default:
return -EINVAL;
}
return 0;
}
static void psb_ttm_io_mem_free(struct ttm_bo_device *bdev,
struct ttm_mem_reg *mem)
{
}
/*
* Use this memory type priority if no eviction is needed.
*/
/*
static uint32_t psb_mem_prios[] = {
TTM_PL_CI,
TTM_PL_RAR,
TTM_PL_TT,
DRM_PSB_MEM_MMU,
TTM_PL_SYSTEM
};
*/
/*
* Use this memory type priority if need to evict.
*/
/*
static uint32_t psb_busy_prios[] = {
TTM_PL_TT,
TTM_PL_CI,
TTM_PL_RAR,
DRM_PSB_MEM_MMU,
TTM_PL_SYSTEM
};
*/
struct ttm_bo_driver psb_ttm_bo_driver = {
/*
.mem_type_prio = psb_mem_prios,
.mem_busy_prio = psb_busy_prios,
.num_mem_type_prio = ARRAY_SIZE(psb_mem_prios),
.num_mem_busy_prio = ARRAY_SIZE(psb_busy_prios),
*/
.create_ttm_backend_entry = &drm_psb_tbe_init,
.invalidate_caches = &psb_invalidate_caches,
.init_mem_type = &psb_init_mem_type,
.evict_flags = &psb_evict_mask,
.move = &psb_move,
.verify_access = &psb_verify_access,
.sync_obj_signaled = &ttm_fence_sync_obj_signaled,
.sync_obj_wait = &ttm_fence_sync_obj_wait,
.sync_obj_flush = &ttm_fence_sync_obj_flush,
.sync_obj_unref = &ttm_fence_sync_obj_unref,
.sync_obj_ref = &ttm_fence_sync_obj_ref,
.io_mem_reserve = &psb_ttm_io_mem_reserve,
.io_mem_free = &psb_ttm_io_mem_free
};
| gpl-2.0 |
mixtile/loftq-linux | drivers/net/wireless/rtl8188eu/core/rtw_rf.c | 411 | 2388 | /******************************************************************************
*
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
*
*
******************************************************************************/
#define _RTW_RF_C_
#include <drv_types.h>
struct ch_freq {
u32 channel;
u32 frequency;
};
struct ch_freq ch_freq_map[] = {
{1, 2412},{2, 2417},{3, 2422},{4, 2427},{5, 2432},
{6, 2437},{7, 2442},{8, 2447},{9, 2452},{10, 2457},
{11, 2462},{12, 2467},{13, 2472},{14, 2484},
/* UNII */
{36, 5180},{40, 5200},{44, 5220},{48, 5240},{52, 5260},
{56, 5280},{60, 5300},{64, 5320},{149, 5745},{153, 5765},
{157, 5785},{161, 5805},{165, 5825},{167, 5835},{169, 5845},
{171, 5855},{173, 5865},
/* HiperLAN2 */
{100, 5500},{104, 5520},{108, 5540},{112, 5560},{116, 5580},
{120, 5600},{124, 5620},{128, 5640},{132, 5660},{136, 5680},
{140, 5700},
/* Japan MMAC */
{34, 5170},{38, 5190},{42, 5210},{46, 5230},
/* Japan */
{184, 4920},{188, 4940},{192, 4960},{196, 4980},
{208, 5040},/* Japan, means J08 */
{212, 5060},/* Japan, means J12 */
{216, 5080},/* Japan, means J16 */
};
int ch_freq_map_num = (sizeof(ch_freq_map) / sizeof(struct ch_freq));
u32 rtw_ch2freq(u32 channel)
{
u8 i;
u32 freq = 0;
for (i = 0; i < ch_freq_map_num; i++)
{
if (channel == ch_freq_map[i].channel)
{
freq = ch_freq_map[i].frequency;
break;
}
}
if (i == ch_freq_map_num)
freq = 2412;
return freq;
}
u32 rtw_freq2ch(u32 freq)
{
u8 i;
u32 ch = 0;
for (i = 0; i < ch_freq_map_num; i++)
{
if (freq == ch_freq_map[i].frequency)
{
ch = ch_freq_map[i].channel;
break;
}
}
if (i == ch_freq_map_num)
ch = 1;
return ch;
}
| gpl-2.0 |
AaronNGray/Freescale-kernel | drivers/watchdog/sbc7240_wdt.c | 667 | 7472 | /*
* NANO7240 SBC Watchdog device driver
*
* Based on w83877f.c by Scott Jennings,
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation;
*
* Software distributed under the License is distributed on an "AS IS"
* basis, WITHOUT WARRANTY OF ANY KIND, either express or
* implied. See the License for the specific language governing
* rights and limitations under the License.
*
* (c) Copyright 2007 Gilles GIGAN <gilles.gigan@jcu.edu.au>
*
*/
#include <linux/fs.h>
#include <linux/init.h>
#include <linux/ioport.h>
#include <linux/jiffies.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/miscdevice.h>
#include <linux/notifier.h>
#include <linux/reboot.h>
#include <linux/types.h>
#include <linux/watchdog.h>
#include <linux/io.h>
#include <linux/uaccess.h>
#include <linux/atomic.h>
#include <asm/system.h>
#define SBC7240_PREFIX "sbc7240_wdt: "
#define SBC7240_ENABLE_PORT 0x443
#define SBC7240_DISABLE_PORT 0x043
#define SBC7240_SET_TIMEOUT_PORT SBC7240_ENABLE_PORT
#define SBC7240_MAGIC_CHAR 'V'
#define SBC7240_TIMEOUT 30
#define SBC7240_MAX_TIMEOUT 255
static int timeout = SBC7240_TIMEOUT; /* in seconds */
module_param(timeout, int, 0);
MODULE_PARM_DESC(timeout, "Watchdog timeout in seconds. (1<=timeout<="
__MODULE_STRING(SBC7240_MAX_TIMEOUT) ", default="
__MODULE_STRING(SBC7240_TIMEOUT) ")");
static int nowayout = WATCHDOG_NOWAYOUT;
module_param(nowayout, int, 0);
MODULE_PARM_DESC(nowayout, "Disable watchdog when closing device file");
#define SBC7240_OPEN_STATUS_BIT 0
#define SBC7240_ENABLED_STATUS_BIT 1
#define SBC7240_EXPECT_CLOSE_STATUS_BIT 2
static unsigned long wdt_status;
/*
* Utility routines
*/
static void wdt_disable(void)
{
/* disable the watchdog */
if (test_and_clear_bit(SBC7240_ENABLED_STATUS_BIT, &wdt_status)) {
inb_p(SBC7240_DISABLE_PORT);
printk(KERN_INFO SBC7240_PREFIX
"Watchdog timer is now disabled.\n");
}
}
static void wdt_enable(void)
{
/* enable the watchdog */
if (!test_and_set_bit(SBC7240_ENABLED_STATUS_BIT, &wdt_status)) {
inb_p(SBC7240_ENABLE_PORT);
printk(KERN_INFO SBC7240_PREFIX
"Watchdog timer is now enabled.\n");
}
}
static int wdt_set_timeout(int t)
{
if (t < 1 || t > SBC7240_MAX_TIMEOUT) {
printk(KERN_ERR SBC7240_PREFIX
"timeout value must be 1<=x<=%d\n",
SBC7240_MAX_TIMEOUT);
return -1;
}
/* set the timeout */
outb_p((unsigned)t, SBC7240_SET_TIMEOUT_PORT);
timeout = t;
printk(KERN_INFO SBC7240_PREFIX "timeout set to %d seconds\n", t);
return 0;
}
/* Whack the dog */
static inline void wdt_keepalive(void)
{
if (test_bit(SBC7240_ENABLED_STATUS_BIT, &wdt_status))
inb_p(SBC7240_ENABLE_PORT);
}
/*
* /dev/watchdog handling
*/
static ssize_t fop_write(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
size_t i;
char c;
if (count) {
if (!nowayout) {
clear_bit(SBC7240_EXPECT_CLOSE_STATUS_BIT,
&wdt_status);
/* is there a magic char ? */
for (i = 0; i != count; i++) {
if (get_user(c, buf + i))
return -EFAULT;
if (c == SBC7240_MAGIC_CHAR) {
set_bit(SBC7240_EXPECT_CLOSE_STATUS_BIT,
&wdt_status);
break;
}
}
}
wdt_keepalive();
}
return count;
}
static int fop_open(struct inode *inode, struct file *file)
{
if (test_and_set_bit(SBC7240_OPEN_STATUS_BIT, &wdt_status))
return -EBUSY;
wdt_enable();
return nonseekable_open(inode, file);
}
static int fop_close(struct inode *inode, struct file *file)
{
if (test_and_clear_bit(SBC7240_EXPECT_CLOSE_STATUS_BIT, &wdt_status)
|| !nowayout) {
wdt_disable();
} else {
printk(KERN_CRIT SBC7240_PREFIX
"Unexpected close, not stopping watchdog!\n");
wdt_keepalive();
}
clear_bit(SBC7240_OPEN_STATUS_BIT, &wdt_status);
return 0;
}
static const struct watchdog_info ident = {
.options = WDIOF_KEEPALIVEPING|
WDIOF_SETTIMEOUT|
WDIOF_MAGICCLOSE,
.firmware_version = 1,
.identity = "SBC7240",
};
static long fop_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
switch (cmd) {
case WDIOC_GETSUPPORT:
return copy_to_user((void __user *)arg, &ident, sizeof(ident))
? -EFAULT : 0;
case WDIOC_GETSTATUS:
case WDIOC_GETBOOTSTATUS:
return put_user(0, (int __user *)arg);
case WDIOC_SETOPTIONS:
{
int options;
int retval = -EINVAL;
if (get_user(options, (int __user *)arg))
return -EFAULT;
if (options & WDIOS_DISABLECARD) {
wdt_disable();
retval = 0;
}
if (options & WDIOS_ENABLECARD) {
wdt_enable();
retval = 0;
}
return retval;
}
case WDIOC_KEEPALIVE:
wdt_keepalive();
return 0;
case WDIOC_SETTIMEOUT:
{
int new_timeout;
if (get_user(new_timeout, (int __user *)arg))
return -EFAULT;
if (wdt_set_timeout(new_timeout))
return -EINVAL;
/* Fall through */
}
case WDIOC_GETTIMEOUT:
return put_user(timeout, (int __user *)arg);
default:
return -ENOTTY;
}
}
static const struct file_operations wdt_fops = {
.owner = THIS_MODULE,
.llseek = no_llseek,
.write = fop_write,
.open = fop_open,
.release = fop_close,
.unlocked_ioctl = fop_ioctl,
};
static struct miscdevice wdt_miscdev = {
.minor = WATCHDOG_MINOR,
.name = "watchdog",
.fops = &wdt_fops,
};
/*
* Notifier for system down
*/
static int wdt_notify_sys(struct notifier_block *this, unsigned long code,
void *unused)
{
if (code == SYS_DOWN || code == SYS_HALT)
wdt_disable();
return NOTIFY_DONE;
}
static struct notifier_block wdt_notifier = {
.notifier_call = wdt_notify_sys,
};
static void __exit sbc7240_wdt_unload(void)
{
printk(KERN_INFO SBC7240_PREFIX "Removing watchdog\n");
misc_deregister(&wdt_miscdev);
unregister_reboot_notifier(&wdt_notifier);
release_region(SBC7240_ENABLE_PORT, 1);
}
static int __init sbc7240_wdt_init(void)
{
int rc = -EBUSY;
if (!request_region(SBC7240_ENABLE_PORT, 1, "SBC7240 WDT")) {
printk(KERN_ERR SBC7240_PREFIX
"I/O address 0x%04x already in use\n",
SBC7240_ENABLE_PORT);
rc = -EIO;
goto err_out;
}
/* The IO port 0x043 used to disable the watchdog
* is already claimed by the system timer, so we
* can't request_region() it ...*/
if (timeout < 1 || timeout > SBC7240_MAX_TIMEOUT) {
timeout = SBC7240_TIMEOUT;
printk(KERN_INFO SBC7240_PREFIX
"timeout value must be 1<=x<=%d, using %d\n",
SBC7240_MAX_TIMEOUT, timeout);
}
wdt_set_timeout(timeout);
wdt_disable();
rc = register_reboot_notifier(&wdt_notifier);
if (rc) {
printk(KERN_ERR SBC7240_PREFIX
"cannot register reboot notifier (err=%d)\n", rc);
goto err_out_region;
}
rc = misc_register(&wdt_miscdev);
if (rc) {
printk(KERN_ERR SBC7240_PREFIX
"cannot register miscdev on minor=%d (err=%d)\n",
wdt_miscdev.minor, rc);
goto err_out_reboot_notifier;
}
printk(KERN_INFO SBC7240_PREFIX
"Watchdog driver for SBC7240 initialised (nowayout=%d)\n",
nowayout);
return 0;
err_out_reboot_notifier:
unregister_reboot_notifier(&wdt_notifier);
err_out_region:
release_region(SBC7240_ENABLE_PORT, 1);
err_out:
return rc;
}
module_init(sbc7240_wdt_init);
module_exit(sbc7240_wdt_unload);
MODULE_AUTHOR("Gilles Gigan");
MODULE_DESCRIPTION("Watchdog device driver for single board"
" computers EPIC Nano 7240 from iEi");
MODULE_LICENSE("GPL");
MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
| gpl-2.0 |
ikarosdev/kernel_wx435 | fs/binfmt_misc.c | 667 | 15436 | /*
* binfmt_misc.c
*
* Copyright (C) 1997 Richard Günther
*
* binfmt_misc detects binaries via a magic or filename extension and invokes
* a specified wrapper. This should obsolete binfmt_java, binfmt_em86 and
* binfmt_mz.
*
* 1997-04-25 first version
* [...]
* 1997-05-19 cleanup
* 1997-06-26 hpa: pass the real filename rather than argv[0]
* 1997-06-30 minor cleanup
* 1997-08-09 removed extension stripping, locking cleanup
* 2001-02-28 AV: rewritten into something that resembles C. Original didn't.
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/sched.h>
#include <linux/binfmts.h>
#include <linux/slab.h>
#include <linux/ctype.h>
#include <linux/file.h>
#include <linux/pagemap.h>
#include <linux/namei.h>
#include <linux/mount.h>
#include <linux/syscalls.h>
#include <linux/fs.h>
#include <asm/uaccess.h>
enum {
VERBOSE_STATUS = 1 /* make it zero to save 400 bytes kernel memory */
};
static LIST_HEAD(entries);
static int enabled = 1;
enum {Enabled, Magic};
#define MISC_FMT_PRESERVE_ARGV0 (1<<31)
#define MISC_FMT_OPEN_BINARY (1<<30)
#define MISC_FMT_CREDENTIALS (1<<29)
typedef struct {
struct list_head list;
unsigned long flags; /* type, status, etc. */
int offset; /* offset of magic */
int size; /* size of magic/mask */
char *magic; /* magic or filename extension */
char *mask; /* mask, NULL for exact match */
char *interpreter; /* filename of interpreter */
char *name;
struct dentry *dentry;
} Node;
static DEFINE_RWLOCK(entries_lock);
static struct file_system_type bm_fs_type;
static struct vfsmount *bm_mnt;
static int entry_count;
/*
* Check if we support the binfmt
* if we do, return the node, else NULL
* locking is done in load_misc_binary
*/
static Node *check_file(struct linux_binprm *bprm)
{
char *p = strrchr(bprm->interp, '.');
struct list_head *l;
list_for_each(l, &entries) {
Node *e = list_entry(l, Node, list);
char *s;
int j;
if (!test_bit(Enabled, &e->flags))
continue;
if (!test_bit(Magic, &e->flags)) {
if (p && !strcmp(e->magic, p + 1))
return e;
continue;
}
s = bprm->buf + e->offset;
if (e->mask) {
for (j = 0; j < e->size; j++)
if ((*s++ ^ e->magic[j]) & e->mask[j])
break;
} else {
for (j = 0; j < e->size; j++)
if ((*s++ ^ e->magic[j]))
break;
}
if (j == e->size)
return e;
}
return NULL;
}
/*
* the loader itself
*/
static int load_misc_binary(struct linux_binprm *bprm, struct pt_regs *regs)
{
Node *fmt;
struct file * interp_file = NULL;
char iname[BINPRM_BUF_SIZE];
char *iname_addr = iname;
int retval;
int fd_binary = -1;
retval = -ENOEXEC;
if (!enabled)
goto _ret;
retval = -ENOEXEC;
if (bprm->recursion_depth > BINPRM_MAX_RECURSION)
goto _ret;
/* to keep locking time low, we copy the interpreter string */
read_lock(&entries_lock);
fmt = check_file(bprm);
if (fmt)
strlcpy(iname, fmt->interpreter, BINPRM_BUF_SIZE);
read_unlock(&entries_lock);
if (!fmt)
goto _ret;
if (!(fmt->flags & MISC_FMT_PRESERVE_ARGV0)) {
retval = remove_arg_zero(bprm);
if (retval)
goto _ret;
}
if (fmt->flags & MISC_FMT_OPEN_BINARY) {
/* if the binary should be opened on behalf of the
* interpreter than keep it open and assign descriptor
* to it */
fd_binary = get_unused_fd();
if (fd_binary < 0) {
retval = fd_binary;
goto _ret;
}
fd_install(fd_binary, bprm->file);
/* if the binary is not readable than enforce mm->dumpable=0
regardless of the interpreter's permissions */
if (file_permission(bprm->file, MAY_READ))
bprm->interp_flags |= BINPRM_FLAGS_ENFORCE_NONDUMP;
allow_write_access(bprm->file);
bprm->file = NULL;
/* mark the bprm that fd should be passed to interp */
bprm->interp_flags |= BINPRM_FLAGS_EXECFD;
bprm->interp_data = fd_binary;
} else {
allow_write_access(bprm->file);
fput(bprm->file);
bprm->file = NULL;
}
/* make argv[1] be the path to the binary */
retval = copy_strings_kernel (1, &bprm->interp, bprm);
if (retval < 0)
goto _error;
bprm->argc++;
/* add the interp as argv[0] */
retval = copy_strings_kernel (1, &iname_addr, bprm);
if (retval < 0)
goto _error;
bprm->argc ++;
bprm->interp = iname; /* for binfmt_script */
interp_file = open_exec (iname);
retval = PTR_ERR (interp_file);
if (IS_ERR (interp_file))
goto _error;
bprm->file = interp_file;
if (fmt->flags & MISC_FMT_CREDENTIALS) {
/*
* No need to call prepare_binprm(), it's already been
* done. bprm->buf is stale, update from interp_file.
*/
memset(bprm->buf, 0, BINPRM_BUF_SIZE);
retval = kernel_read(bprm->file, 0, bprm->buf, BINPRM_BUF_SIZE);
} else
retval = prepare_binprm (bprm);
if (retval < 0)
goto _error;
bprm->recursion_depth++;
retval = search_binary_handler (bprm, regs);
if (retval < 0)
goto _error;
_ret:
return retval;
_error:
if (fd_binary > 0)
sys_close(fd_binary);
bprm->interp_flags = 0;
bprm->interp_data = 0;
goto _ret;
}
/* Command parsers */
/*
* parses and copies one argument enclosed in del from *sp to *dp,
* recognising the \x special.
* returns pointer to the copied argument or NULL in case of an
* error (and sets err) or null argument length.
*/
static char *scanarg(char *s, char del)
{
char c;
while ((c = *s++) != del) {
if (c == '\\' && *s == 'x') {
s++;
if (!isxdigit(*s++))
return NULL;
if (!isxdigit(*s++))
return NULL;
}
}
return s;
}
static int unquote(char *from)
{
char c = 0, *s = from, *p = from;
while ((c = *s++) != '\0') {
if (c == '\\' && *s == 'x') {
s++;
c = toupper(*s++);
*p = (c - (isdigit(c) ? '0' : 'A' - 10)) << 4;
c = toupper(*s++);
*p++ |= c - (isdigit(c) ? '0' : 'A' - 10);
continue;
}
*p++ = c;
}
return p - from;
}
static char * check_special_flags (char * sfs, Node * e)
{
char * p = sfs;
int cont = 1;
/* special flags */
while (cont) {
switch (*p) {
case 'P':
p++;
e->flags |= MISC_FMT_PRESERVE_ARGV0;
break;
case 'O':
p++;
e->flags |= MISC_FMT_OPEN_BINARY;
break;
case 'C':
p++;
/* this flags also implies the
open-binary flag */
e->flags |= (MISC_FMT_CREDENTIALS |
MISC_FMT_OPEN_BINARY);
break;
default:
cont = 0;
}
}
return p;
}
/*
* This registers a new binary format, it recognises the syntax
* ':name:type:offset:magic:mask:interpreter:flags'
* where the ':' is the IFS, that can be chosen with the first char
*/
static Node *create_entry(const char __user *buffer, size_t count)
{
Node *e;
int memsize, err;
char *buf, *p;
char del;
/* some sanity checks */
err = -EINVAL;
if ((count < 11) || (count > 256))
goto out;
err = -ENOMEM;
memsize = sizeof(Node) + count + 8;
e = kmalloc(memsize, GFP_USER);
if (!e)
goto out;
p = buf = (char *)e + sizeof(Node);
memset(e, 0, sizeof(Node));
if (copy_from_user(buf, buffer, count))
goto Efault;
del = *p++; /* delimeter */
memset(buf+count, del, 8);
e->name = p;
p = strchr(p, del);
if (!p)
goto Einval;
*p++ = '\0';
if (!e->name[0] ||
!strcmp(e->name, ".") ||
!strcmp(e->name, "..") ||
strchr(e->name, '/'))
goto Einval;
switch (*p++) {
case 'E': e->flags = 1<<Enabled; break;
case 'M': e->flags = (1<<Enabled) | (1<<Magic); break;
default: goto Einval;
}
if (*p++ != del)
goto Einval;
if (test_bit(Magic, &e->flags)) {
char *s = strchr(p, del);
if (!s)
goto Einval;
*s++ = '\0';
e->offset = simple_strtoul(p, &p, 10);
if (*p++)
goto Einval;
e->magic = p;
p = scanarg(p, del);
if (!p)
goto Einval;
p[-1] = '\0';
if (!e->magic[0])
goto Einval;
e->mask = p;
p = scanarg(p, del);
if (!p)
goto Einval;
p[-1] = '\0';
if (!e->mask[0])
e->mask = NULL;
e->size = unquote(e->magic);
if (e->mask && unquote(e->mask) != e->size)
goto Einval;
if (e->size + e->offset > BINPRM_BUF_SIZE)
goto Einval;
} else {
p = strchr(p, del);
if (!p)
goto Einval;
*p++ = '\0';
e->magic = p;
p = strchr(p, del);
if (!p)
goto Einval;
*p++ = '\0';
if (!e->magic[0] || strchr(e->magic, '/'))
goto Einval;
p = strchr(p, del);
if (!p)
goto Einval;
*p++ = '\0';
}
e->interpreter = p;
p = strchr(p, del);
if (!p)
goto Einval;
*p++ = '\0';
if (!e->interpreter[0])
goto Einval;
p = check_special_flags (p, e);
if (*p == '\n')
p++;
if (p != buf + count)
goto Einval;
return e;
out:
return ERR_PTR(err);
Efault:
kfree(e);
return ERR_PTR(-EFAULT);
Einval:
kfree(e);
return ERR_PTR(-EINVAL);
}
/*
* Set status of entry/binfmt_misc:
* '1' enables, '0' disables and '-1' clears entry/binfmt_misc
*/
static int parse_command(const char __user *buffer, size_t count)
{
char s[4];
if (!count)
return 0;
if (count > 3)
return -EINVAL;
if (copy_from_user(s, buffer, count))
return -EFAULT;
if (s[count-1] == '\n')
count--;
if (count == 1 && s[0] == '0')
return 1;
if (count == 1 && s[0] == '1')
return 2;
if (count == 2 && s[0] == '-' && s[1] == '1')
return 3;
return -EINVAL;
}
/* generic stuff */
static void entry_status(Node *e, char *page)
{
char *dp;
char *status = "disabled";
const char * flags = "flags: ";
if (test_bit(Enabled, &e->flags))
status = "enabled";
if (!VERBOSE_STATUS) {
sprintf(page, "%s\n", status);
return;
}
sprintf(page, "%s\ninterpreter %s\n", status, e->interpreter);
dp = page + strlen(page);
/* print the special flags */
sprintf (dp, "%s", flags);
dp += strlen (flags);
if (e->flags & MISC_FMT_PRESERVE_ARGV0) {
*dp ++ = 'P';
}
if (e->flags & MISC_FMT_OPEN_BINARY) {
*dp ++ = 'O';
}
if (e->flags & MISC_FMT_CREDENTIALS) {
*dp ++ = 'C';
}
*dp ++ = '\n';
if (!test_bit(Magic, &e->flags)) {
sprintf(dp, "extension .%s\n", e->magic);
} else {
int i;
sprintf(dp, "offset %i\nmagic ", e->offset);
dp = page + strlen(page);
for (i = 0; i < e->size; i++) {
sprintf(dp, "%02x", 0xff & (int) (e->magic[i]));
dp += 2;
}
if (e->mask) {
sprintf(dp, "\nmask ");
dp += 6;
for (i = 0; i < e->size; i++) {
sprintf(dp, "%02x", 0xff & (int) (e->mask[i]));
dp += 2;
}
}
*dp++ = '\n';
*dp = '\0';
}
}
static struct inode *bm_get_inode(struct super_block *sb, int mode)
{
struct inode * inode = new_inode(sb);
if (inode) {
inode->i_mode = mode;
inode->i_atime = inode->i_mtime = inode->i_ctime =
current_fs_time(inode->i_sb);
}
return inode;
}
static void bm_clear_inode(struct inode *inode)
{
kfree(inode->i_private);
}
static void kill_node(Node *e)
{
struct dentry *dentry;
write_lock(&entries_lock);
dentry = e->dentry;
if (dentry) {
list_del_init(&e->list);
e->dentry = NULL;
}
write_unlock(&entries_lock);
if (dentry) {
dentry->d_inode->i_nlink--;
d_drop(dentry);
dput(dentry);
simple_release_fs(&bm_mnt, &entry_count);
}
}
/* /<entry> */
static ssize_t
bm_entry_read(struct file * file, char __user * buf, size_t nbytes, loff_t *ppos)
{
Node *e = file->f_path.dentry->d_inode->i_private;
ssize_t res;
char *page;
if (!(page = (char*) __get_free_page(GFP_KERNEL)))
return -ENOMEM;
entry_status(e, page);
res = simple_read_from_buffer(buf, nbytes, ppos, page, strlen(page));
free_page((unsigned long) page);
return res;
}
static ssize_t bm_entry_write(struct file *file, const char __user *buffer,
size_t count, loff_t *ppos)
{
struct dentry *root;
Node *e = file->f_path.dentry->d_inode->i_private;
int res = parse_command(buffer, count);
switch (res) {
case 1: clear_bit(Enabled, &e->flags);
break;
case 2: set_bit(Enabled, &e->flags);
break;
case 3: root = dget(file->f_path.mnt->mnt_sb->s_root);
mutex_lock(&root->d_inode->i_mutex);
kill_node(e);
mutex_unlock(&root->d_inode->i_mutex);
dput(root);
break;
default: return res;
}
return count;
}
static const struct file_operations bm_entry_operations = {
.read = bm_entry_read,
.write = bm_entry_write,
};
/* /register */
static ssize_t bm_register_write(struct file *file, const char __user *buffer,
size_t count, loff_t *ppos)
{
Node *e;
struct inode *inode;
struct dentry *root, *dentry;
struct super_block *sb = file->f_path.mnt->mnt_sb;
int err = 0;
e = create_entry(buffer, count);
if (IS_ERR(e))
return PTR_ERR(e);
root = dget(sb->s_root);
mutex_lock(&root->d_inode->i_mutex);
dentry = lookup_one_len(e->name, root, strlen(e->name));
err = PTR_ERR(dentry);
if (IS_ERR(dentry))
goto out;
err = -EEXIST;
if (dentry->d_inode)
goto out2;
inode = bm_get_inode(sb, S_IFREG | 0644);
err = -ENOMEM;
if (!inode)
goto out2;
err = simple_pin_fs(&bm_fs_type, &bm_mnt, &entry_count);
if (err) {
iput(inode);
inode = NULL;
goto out2;
}
e->dentry = dget(dentry);
inode->i_private = e;
inode->i_fop = &bm_entry_operations;
d_instantiate(dentry, inode);
write_lock(&entries_lock);
list_add(&e->list, &entries);
write_unlock(&entries_lock);
err = 0;
out2:
dput(dentry);
out:
mutex_unlock(&root->d_inode->i_mutex);
dput(root);
if (err) {
kfree(e);
return -EINVAL;
}
return count;
}
static const struct file_operations bm_register_operations = {
.write = bm_register_write,
};
/* /status */
static ssize_t
bm_status_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos)
{
char *s = enabled ? "enabled\n" : "disabled\n";
return simple_read_from_buffer(buf, nbytes, ppos, s, strlen(s));
}
static ssize_t bm_status_write(struct file * file, const char __user * buffer,
size_t count, loff_t *ppos)
{
int res = parse_command(buffer, count);
struct dentry *root;
switch (res) {
case 1: enabled = 0; break;
case 2: enabled = 1; break;
case 3: root = dget(file->f_path.mnt->mnt_sb->s_root);
mutex_lock(&root->d_inode->i_mutex);
while (!list_empty(&entries))
kill_node(list_entry(entries.next, Node, list));
mutex_unlock(&root->d_inode->i_mutex);
dput(root);
default: return res;
}
return count;
}
static const struct file_operations bm_status_operations = {
.read = bm_status_read,
.write = bm_status_write,
};
/* Superblock handling */
static const struct super_operations s_ops = {
.statfs = simple_statfs,
.clear_inode = bm_clear_inode,
};
static int bm_fill_super(struct super_block * sb, void * data, int silent)
{
static struct tree_descr bm_files[] = {
[2] = {"status", &bm_status_operations, S_IWUSR|S_IRUGO},
[3] = {"register", &bm_register_operations, S_IWUSR},
/* last one */ {""}
};
int err = simple_fill_super(sb, 0x42494e4d, bm_files);
if (!err)
sb->s_op = &s_ops;
return err;
}
static int bm_get_sb(struct file_system_type *fs_type,
int flags, const char *dev_name, void *data, struct vfsmount *mnt)
{
return get_sb_single(fs_type, flags, data, bm_fill_super, mnt);
}
static struct linux_binfmt misc_format = {
.module = THIS_MODULE,
.load_binary = load_misc_binary,
};
static struct file_system_type bm_fs_type = {
.owner = THIS_MODULE,
.name = "binfmt_misc",
.get_sb = bm_get_sb,
.kill_sb = kill_litter_super,
};
static int __init init_misc_binfmt(void)
{
int err = register_filesystem(&bm_fs_type);
if (!err) {
err = register_binfmt(&misc_format);
if (err)
unregister_filesystem(&bm_fs_type);
}
return err;
}
static void __exit exit_misc_binfmt(void)
{
unregister_binfmt(&misc_format);
unregister_filesystem(&bm_fs_type);
}
core_initcall(init_misc_binfmt);
module_exit(exit_misc_binfmt);
MODULE_LICENSE("GPL");
| gpl-2.0 |
gromaudio/linux-imx6-31053 | ipc/shm.c | 1179 | 32665 | /*
* linux/ipc/shm.c
* Copyright (C) 1992, 1993 Krishna Balasubramanian
* Many improvements/fixes by Bruno Haible.
* Replaced `struct shm_desc' by `struct vm_area_struct', July 1994.
* Fixed the shm swap deallocation (shm_unuse()), August 1998 Andrea Arcangeli.
*
* /proc/sysvipc/shm support (c) 1999 Dragos Acostachioaie <dragos@iname.com>
* BIGMEM support, Andrea Arcangeli <andrea@suse.de>
* SMP thread shm, Jean-Luc Boyard <jean-luc.boyard@siemens.fr>
* HIGHMEM support, Ingo Molnar <mingo@redhat.com>
* Make shmmax, shmall, shmmni sysctl'able, Christoph Rohland <cr@sap.com>
* Shared /dev/zero support, Kanoj Sarcar <kanoj@sgi.com>
* Move the mm functionality over to mm/shmem.c, Christoph Rohland <cr@sap.com>
*
* support for audit of ipc object properties and permission changes
* Dustin Kirkland <dustin.kirkland@us.ibm.com>
*
* namespaces support
* OpenVZ, SWsoft Inc.
* Pavel Emelianov <xemul@openvz.org>
*
* Better ipc lock (kern_ipc_perm.lock) handling
* Davidlohr Bueso <davidlohr.bueso@hp.com>, June 2013.
*/
#include <linux/slab.h>
#include <linux/mm.h>
#include <linux/hugetlb.h>
#include <linux/shm.h>
#include <linux/init.h>
#include <linux/file.h>
#include <linux/mman.h>
#include <linux/shmem_fs.h>
#include <linux/security.h>
#include <linux/syscalls.h>
#include <linux/audit.h>
#include <linux/capability.h>
#include <linux/ptrace.h>
#include <linux/seq_file.h>
#include <linux/rwsem.h>
#include <linux/nsproxy.h>
#include <linux/mount.h>
#include <linux/ipc_namespace.h>
#include <asm/uaccess.h>
#include "util.h"
struct shm_file_data {
int id;
struct ipc_namespace *ns;
struct file *file;
const struct vm_operations_struct *vm_ops;
};
#define shm_file_data(file) (*((struct shm_file_data **)&(file)->private_data))
static const struct file_operations shm_file_operations;
static const struct vm_operations_struct shm_vm_ops;
#define shm_ids(ns) ((ns)->ids[IPC_SHM_IDS])
#define shm_unlock(shp) \
ipc_unlock(&(shp)->shm_perm)
static int newseg(struct ipc_namespace *, struct ipc_params *);
static void shm_open(struct vm_area_struct *vma);
static void shm_close(struct vm_area_struct *vma);
static void shm_destroy (struct ipc_namespace *ns, struct shmid_kernel *shp);
#ifdef CONFIG_PROC_FS
static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
#endif
void shm_init_ns(struct ipc_namespace *ns)
{
ns->shm_ctlmax = SHMMAX;
ns->shm_ctlall = SHMALL;
ns->shm_ctlmni = SHMMNI;
ns->shm_rmid_forced = 0;
ns->shm_tot = 0;
ipc_init_ids(&shm_ids(ns));
}
/*
* Called with shm_ids.rwsem (writer) and the shp structure locked.
* Only shm_ids.rwsem remains locked on exit.
*/
static void do_shm_rmid(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
{
struct shmid_kernel *shp;
shp = container_of(ipcp, struct shmid_kernel, shm_perm);
if (shp->shm_nattch){
shp->shm_perm.mode |= SHM_DEST;
/* Do not find it any more */
shp->shm_perm.key = IPC_PRIVATE;
shm_unlock(shp);
} else
shm_destroy(ns, shp);
}
#ifdef CONFIG_IPC_NS
void shm_exit_ns(struct ipc_namespace *ns)
{
free_ipcs(ns, &shm_ids(ns), do_shm_rmid);
idr_destroy(&ns->ids[IPC_SHM_IDS].ipcs_idr);
}
#endif
static int __init ipc_ns_init(void)
{
shm_init_ns(&init_ipc_ns);
return 0;
}
pure_initcall(ipc_ns_init);
void __init shm_init (void)
{
ipc_init_proc_interface("sysvipc/shm",
#if BITS_PER_LONG <= 32
" key shmid perms size cpid lpid nattch uid gid cuid cgid atime dtime ctime rss swap\n",
#else
" key shmid perms size cpid lpid nattch uid gid cuid cgid atime dtime ctime rss swap\n",
#endif
IPC_SHM_IDS, sysvipc_shm_proc_show);
}
static inline struct shmid_kernel *shm_obtain_object(struct ipc_namespace *ns, int id)
{
struct kern_ipc_perm *ipcp = ipc_obtain_object(&shm_ids(ns), id);
if (IS_ERR(ipcp))
return ERR_CAST(ipcp);
return container_of(ipcp, struct shmid_kernel, shm_perm);
}
static inline struct shmid_kernel *shm_obtain_object_check(struct ipc_namespace *ns, int id)
{
struct kern_ipc_perm *ipcp = ipc_obtain_object_check(&shm_ids(ns), id);
if (IS_ERR(ipcp))
return ERR_CAST(ipcp);
return container_of(ipcp, struct shmid_kernel, shm_perm);
}
/*
* shm_lock_(check_) routines are called in the paths where the rwsem
* is not necessarily held.
*/
static inline struct shmid_kernel *shm_lock(struct ipc_namespace *ns, int id)
{
struct kern_ipc_perm *ipcp = ipc_lock(&shm_ids(ns), id);
if (IS_ERR(ipcp))
return (struct shmid_kernel *)ipcp;
return container_of(ipcp, struct shmid_kernel, shm_perm);
}
static inline void shm_lock_by_ptr(struct shmid_kernel *ipcp)
{
rcu_read_lock();
ipc_lock_object(&ipcp->shm_perm);
}
static void shm_rcu_free(struct rcu_head *head)
{
struct ipc_rcu *p = container_of(head, struct ipc_rcu, rcu);
struct shmid_kernel *shp = ipc_rcu_to_struct(p);
security_shm_free(shp);
ipc_rcu_free(head);
}
static inline void shm_rmid(struct ipc_namespace *ns, struct shmid_kernel *s)
{
ipc_rmid(&shm_ids(ns), &s->shm_perm);
}
/* This is called by fork, once for every shm attach. */
static void shm_open(struct vm_area_struct *vma)
{
struct file *file = vma->vm_file;
struct shm_file_data *sfd = shm_file_data(file);
struct shmid_kernel *shp;
shp = shm_lock(sfd->ns, sfd->id);
BUG_ON(IS_ERR(shp));
shp->shm_atim = get_seconds();
shp->shm_lprid = task_tgid_vnr(current);
shp->shm_nattch++;
shm_unlock(shp);
}
/*
* shm_destroy - free the struct shmid_kernel
*
* @ns: namespace
* @shp: struct to free
*
* It has to be called with shp and shm_ids.rwsem (writer) locked,
* but returns with shp unlocked and freed.
*/
static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp)
{
struct file *shm_file;
shm_file = shp->shm_file;
shp->shm_file = NULL;
ns->shm_tot -= (shp->shm_segsz + PAGE_SIZE - 1) >> PAGE_SHIFT;
shm_rmid(ns, shp);
shm_unlock(shp);
if (!is_file_hugepages(shm_file))
shmem_lock(shm_file, 0, shp->mlock_user);
else if (shp->mlock_user)
user_shm_unlock(file_inode(shm_file)->i_size, shp->mlock_user);
fput(shm_file);
ipc_rcu_putref(shp, shm_rcu_free);
}
/*
* shm_may_destroy - identifies whether shm segment should be destroyed now
*
* Returns true if and only if there are no active users of the segment and
* one of the following is true:
*
* 1) shmctl(id, IPC_RMID, NULL) was called for this shp
*
* 2) sysctl kernel.shm_rmid_forced is set to 1.
*/
static bool shm_may_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp)
{
return (shp->shm_nattch == 0) &&
(ns->shm_rmid_forced ||
(shp->shm_perm.mode & SHM_DEST));
}
/*
* remove the attach descriptor vma.
* free memory for segment if it is marked destroyed.
* The descriptor has already been removed from the current->mm->mmap list
* and will later be kfree()d.
*/
static void shm_close(struct vm_area_struct *vma)
{
struct file * file = vma->vm_file;
struct shm_file_data *sfd = shm_file_data(file);
struct shmid_kernel *shp;
struct ipc_namespace *ns = sfd->ns;
down_write(&shm_ids(ns).rwsem);
/* remove from the list of attaches of the shm segment */
shp = shm_lock(ns, sfd->id);
BUG_ON(IS_ERR(shp));
shp->shm_lprid = task_tgid_vnr(current);
shp->shm_dtim = get_seconds();
shp->shm_nattch--;
if (shm_may_destroy(ns, shp))
shm_destroy(ns, shp);
else
shm_unlock(shp);
up_write(&shm_ids(ns).rwsem);
}
/* Called with ns->shm_ids(ns).rwsem locked */
static int shm_try_destroy_current(int id, void *p, void *data)
{
struct ipc_namespace *ns = data;
struct kern_ipc_perm *ipcp = p;
struct shmid_kernel *shp = container_of(ipcp, struct shmid_kernel, shm_perm);
if (shp->shm_creator != current)
return 0;
/*
* Mark it as orphaned to destroy the segment when
* kernel.shm_rmid_forced is changed.
* It is noop if the following shm_may_destroy() returns true.
*/
shp->shm_creator = NULL;
/*
* Don't even try to destroy it. If shm_rmid_forced=0 and IPC_RMID
* is not set, it shouldn't be deleted here.
*/
if (!ns->shm_rmid_forced)
return 0;
if (shm_may_destroy(ns, shp)) {
shm_lock_by_ptr(shp);
shm_destroy(ns, shp);
}
return 0;
}
/* Called with ns->shm_ids(ns).rwsem locked */
static int shm_try_destroy_orphaned(int id, void *p, void *data)
{
struct ipc_namespace *ns = data;
struct kern_ipc_perm *ipcp = p;
struct shmid_kernel *shp = container_of(ipcp, struct shmid_kernel, shm_perm);
/*
* We want to destroy segments without users and with already
* exit'ed originating process.
*
* As shp->* are changed under rwsem, it's safe to skip shp locking.
*/
if (shp->shm_creator != NULL)
return 0;
if (shm_may_destroy(ns, shp)) {
shm_lock_by_ptr(shp);
shm_destroy(ns, shp);
}
return 0;
}
void shm_destroy_orphaned(struct ipc_namespace *ns)
{
down_write(&shm_ids(ns).rwsem);
if (shm_ids(ns).in_use)
idr_for_each(&shm_ids(ns).ipcs_idr, &shm_try_destroy_orphaned, ns);
up_write(&shm_ids(ns).rwsem);
}
void exit_shm(struct task_struct *task)
{
struct ipc_namespace *ns = task->nsproxy->ipc_ns;
if (shm_ids(ns).in_use == 0)
return;
/* Destroy all already created segments, but not mapped yet */
down_write(&shm_ids(ns).rwsem);
if (shm_ids(ns).in_use)
idr_for_each(&shm_ids(ns).ipcs_idr, &shm_try_destroy_current, ns);
up_write(&shm_ids(ns).rwsem);
}
static int shm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
struct file *file = vma->vm_file;
struct shm_file_data *sfd = shm_file_data(file);
return sfd->vm_ops->fault(vma, vmf);
}
#ifdef CONFIG_NUMA
static int shm_set_policy(struct vm_area_struct *vma, struct mempolicy *new)
{
struct file *file = vma->vm_file;
struct shm_file_data *sfd = shm_file_data(file);
int err = 0;
if (sfd->vm_ops->set_policy)
err = sfd->vm_ops->set_policy(vma, new);
return err;
}
static struct mempolicy *shm_get_policy(struct vm_area_struct *vma,
unsigned long addr)
{
struct file *file = vma->vm_file;
struct shm_file_data *sfd = shm_file_data(file);
struct mempolicy *pol = NULL;
if (sfd->vm_ops->get_policy)
pol = sfd->vm_ops->get_policy(vma, addr);
else if (vma->vm_policy)
pol = vma->vm_policy;
return pol;
}
#endif
static int shm_mmap(struct file * file, struct vm_area_struct * vma)
{
struct shm_file_data *sfd = shm_file_data(file);
int ret;
ret = sfd->file->f_op->mmap(sfd->file, vma);
if (ret != 0)
return ret;
sfd->vm_ops = vma->vm_ops;
#ifdef CONFIG_MMU
BUG_ON(!sfd->vm_ops->fault);
#endif
vma->vm_ops = &shm_vm_ops;
shm_open(vma);
return ret;
}
static int shm_release(struct inode *ino, struct file *file)
{
struct shm_file_data *sfd = shm_file_data(file);
put_ipc_ns(sfd->ns);
shm_file_data(file) = NULL;
kfree(sfd);
return 0;
}
static int shm_fsync(struct file *file, loff_t start, loff_t end, int datasync)
{
struct shm_file_data *sfd = shm_file_data(file);
if (!sfd->file->f_op->fsync)
return -EINVAL;
return sfd->file->f_op->fsync(sfd->file, start, end, datasync);
}
static long shm_fallocate(struct file *file, int mode, loff_t offset,
loff_t len)
{
struct shm_file_data *sfd = shm_file_data(file);
if (!sfd->file->f_op->fallocate)
return -EOPNOTSUPP;
return sfd->file->f_op->fallocate(file, mode, offset, len);
}
static unsigned long shm_get_unmapped_area(struct file *file,
unsigned long addr, unsigned long len, unsigned long pgoff,
unsigned long flags)
{
struct shm_file_data *sfd = shm_file_data(file);
return sfd->file->f_op->get_unmapped_area(sfd->file, addr, len,
pgoff, flags);
}
static const struct file_operations shm_file_operations = {
.mmap = shm_mmap,
.fsync = shm_fsync,
.release = shm_release,
#ifndef CONFIG_MMU
.get_unmapped_area = shm_get_unmapped_area,
#endif
.llseek = noop_llseek,
.fallocate = shm_fallocate,
};
static const struct file_operations shm_file_operations_huge = {
.mmap = shm_mmap,
.fsync = shm_fsync,
.release = shm_release,
.get_unmapped_area = shm_get_unmapped_area,
.llseek = noop_llseek,
.fallocate = shm_fallocate,
};
int is_file_shm_hugepages(struct file *file)
{
return file->f_op == &shm_file_operations_huge;
}
static const struct vm_operations_struct shm_vm_ops = {
.open = shm_open, /* callback for a new vm-area open */
.close = shm_close, /* callback for when the vm-area is released */
.fault = shm_fault,
#if defined(CONFIG_NUMA)
.set_policy = shm_set_policy,
.get_policy = shm_get_policy,
#endif
};
/**
* newseg - Create a new shared memory segment
* @ns: namespace
* @params: ptr to the structure that contains key, size and shmflg
*
* Called with shm_ids.rwsem held as a writer.
*/
static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
{
key_t key = params->key;
int shmflg = params->flg;
size_t size = params->u.size;
int error;
struct shmid_kernel *shp;
size_t numpages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
struct file * file;
char name[13];
int id;
vm_flags_t acctflag = 0;
if (size < SHMMIN || size > ns->shm_ctlmax)
return -EINVAL;
if (ns->shm_tot + numpages > ns->shm_ctlall)
return -ENOSPC;
shp = ipc_rcu_alloc(sizeof(*shp));
if (!shp)
return -ENOMEM;
shp->shm_perm.key = key;
shp->shm_perm.mode = (shmflg & S_IRWXUGO);
shp->mlock_user = NULL;
shp->shm_perm.security = NULL;
error = security_shm_alloc(shp);
if (error) {
ipc_rcu_putref(shp, ipc_rcu_free);
return error;
}
sprintf (name, "SYSV%08x", key);
if (shmflg & SHM_HUGETLB) {
struct hstate *hs = hstate_sizelog((shmflg >> SHM_HUGE_SHIFT)
& SHM_HUGE_MASK);
size_t hugesize;
if (!hs) {
error = -EINVAL;
goto no_file;
}
hugesize = ALIGN(size, huge_page_size(hs));
/* hugetlb_file_setup applies strict accounting */
if (shmflg & SHM_NORESERVE)
acctflag = VM_NORESERVE;
file = hugetlb_file_setup(name, hugesize, acctflag,
&shp->mlock_user, HUGETLB_SHMFS_INODE,
(shmflg >> SHM_HUGE_SHIFT) & SHM_HUGE_MASK);
} else {
/*
* Do not allow no accounting for OVERCOMMIT_NEVER, even
* if it's asked for.
*/
if ((shmflg & SHM_NORESERVE) &&
sysctl_overcommit_memory != OVERCOMMIT_NEVER)
acctflag = VM_NORESERVE;
file = shmem_file_setup(name, size, acctflag);
}
error = PTR_ERR(file);
if (IS_ERR(file))
goto no_file;
id = ipc_addid(&shm_ids(ns), &shp->shm_perm, ns->shm_ctlmni);
if (id < 0) {
error = id;
goto no_id;
}
shp->shm_cprid = task_tgid_vnr(current);
shp->shm_lprid = 0;
shp->shm_atim = shp->shm_dtim = 0;
shp->shm_ctim = get_seconds();
shp->shm_segsz = size;
shp->shm_nattch = 0;
shp->shm_file = file;
shp->shm_creator = current;
/*
* shmid gets reported as "inode#" in /proc/pid/maps.
* proc-ps tools use this. Changing this will break them.
*/
file_inode(file)->i_ino = shp->shm_perm.id;
ns->shm_tot += numpages;
error = shp->shm_perm.id;
ipc_unlock_object(&shp->shm_perm);
rcu_read_unlock();
return error;
no_id:
if (is_file_hugepages(file) && shp->mlock_user)
user_shm_unlock(size, shp->mlock_user);
fput(file);
no_file:
ipc_rcu_putref(shp, shm_rcu_free);
return error;
}
/*
* Called with shm_ids.rwsem and ipcp locked.
*/
static inline int shm_security(struct kern_ipc_perm *ipcp, int shmflg)
{
struct shmid_kernel *shp;
shp = container_of(ipcp, struct shmid_kernel, shm_perm);
return security_shm_associate(shp, shmflg);
}
/*
* Called with shm_ids.rwsem and ipcp locked.
*/
static inline int shm_more_checks(struct kern_ipc_perm *ipcp,
struct ipc_params *params)
{
struct shmid_kernel *shp;
shp = container_of(ipcp, struct shmid_kernel, shm_perm);
if (shp->shm_segsz < params->u.size)
return -EINVAL;
return 0;
}
SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg)
{
struct ipc_namespace *ns;
struct ipc_ops shm_ops;
struct ipc_params shm_params;
ns = current->nsproxy->ipc_ns;
shm_ops.getnew = newseg;
shm_ops.associate = shm_security;
shm_ops.more_checks = shm_more_checks;
shm_params.key = key;
shm_params.flg = shmflg;
shm_params.u.size = size;
return ipcget(ns, &shm_ids(ns), &shm_ops, &shm_params);
}
static inline unsigned long copy_shmid_to_user(void __user *buf, struct shmid64_ds *in, int version)
{
switch(version) {
case IPC_64:
return copy_to_user(buf, in, sizeof(*in));
case IPC_OLD:
{
struct shmid_ds out;
memset(&out, 0, sizeof(out));
ipc64_perm_to_ipc_perm(&in->shm_perm, &out.shm_perm);
out.shm_segsz = in->shm_segsz;
out.shm_atime = in->shm_atime;
out.shm_dtime = in->shm_dtime;
out.shm_ctime = in->shm_ctime;
out.shm_cpid = in->shm_cpid;
out.shm_lpid = in->shm_lpid;
out.shm_nattch = in->shm_nattch;
return copy_to_user(buf, &out, sizeof(out));
}
default:
return -EINVAL;
}
}
static inline unsigned long
copy_shmid_from_user(struct shmid64_ds *out, void __user *buf, int version)
{
switch(version) {
case IPC_64:
if (copy_from_user(out, buf, sizeof(*out)))
return -EFAULT;
return 0;
case IPC_OLD:
{
struct shmid_ds tbuf_old;
if (copy_from_user(&tbuf_old, buf, sizeof(tbuf_old)))
return -EFAULT;
out->shm_perm.uid = tbuf_old.shm_perm.uid;
out->shm_perm.gid = tbuf_old.shm_perm.gid;
out->shm_perm.mode = tbuf_old.shm_perm.mode;
return 0;
}
default:
return -EINVAL;
}
}
static inline unsigned long copy_shminfo_to_user(void __user *buf, struct shminfo64 *in, int version)
{
switch(version) {
case IPC_64:
return copy_to_user(buf, in, sizeof(*in));
case IPC_OLD:
{
struct shminfo out;
if(in->shmmax > INT_MAX)
out.shmmax = INT_MAX;
else
out.shmmax = (int)in->shmmax;
out.shmmin = in->shmmin;
out.shmmni = in->shmmni;
out.shmseg = in->shmseg;
out.shmall = in->shmall;
return copy_to_user(buf, &out, sizeof(out));
}
default:
return -EINVAL;
}
}
/*
* Calculate and add used RSS and swap pages of a shm.
* Called with shm_ids.rwsem held as a reader
*/
static void shm_add_rss_swap(struct shmid_kernel *shp,
unsigned long *rss_add, unsigned long *swp_add)
{
struct inode *inode;
inode = file_inode(shp->shm_file);
if (is_file_hugepages(shp->shm_file)) {
struct address_space *mapping = inode->i_mapping;
struct hstate *h = hstate_file(shp->shm_file);
*rss_add += pages_per_huge_page(h) * mapping->nrpages;
} else {
#ifdef CONFIG_SHMEM
struct shmem_inode_info *info = SHMEM_I(inode);
spin_lock(&info->lock);
*rss_add += inode->i_mapping->nrpages;
*swp_add += info->swapped;
spin_unlock(&info->lock);
#else
*rss_add += inode->i_mapping->nrpages;
#endif
}
}
/*
* Called with shm_ids.rwsem held as a reader
*/
static void shm_get_stat(struct ipc_namespace *ns, unsigned long *rss,
unsigned long *swp)
{
int next_id;
int total, in_use;
*rss = 0;
*swp = 0;
in_use = shm_ids(ns).in_use;
for (total = 0, next_id = 0; total < in_use; next_id++) {
struct kern_ipc_perm *ipc;
struct shmid_kernel *shp;
ipc = idr_find(&shm_ids(ns).ipcs_idr, next_id);
if (ipc == NULL)
continue;
shp = container_of(ipc, struct shmid_kernel, shm_perm);
shm_add_rss_swap(shp, rss, swp);
total++;
}
}
/*
* This function handles some shmctl commands which require the rwsem
* to be held in write mode.
* NOTE: no locks must be held, the rwsem is taken inside this function.
*/
static int shmctl_down(struct ipc_namespace *ns, int shmid, int cmd,
struct shmid_ds __user *buf, int version)
{
struct kern_ipc_perm *ipcp;
struct shmid64_ds shmid64;
struct shmid_kernel *shp;
int err;
if (cmd == IPC_SET) {
if (copy_shmid_from_user(&shmid64, buf, version))
return -EFAULT;
}
down_write(&shm_ids(ns).rwsem);
rcu_read_lock();
ipcp = ipcctl_pre_down_nolock(ns, &shm_ids(ns), shmid, cmd,
&shmid64.shm_perm, 0);
if (IS_ERR(ipcp)) {
err = PTR_ERR(ipcp);
goto out_unlock1;
}
shp = container_of(ipcp, struct shmid_kernel, shm_perm);
err = security_shm_shmctl(shp, cmd);
if (err)
goto out_unlock1;
switch (cmd) {
case IPC_RMID:
ipc_lock_object(&shp->shm_perm);
/* do_shm_rmid unlocks the ipc object and rcu */
do_shm_rmid(ns, ipcp);
goto out_up;
case IPC_SET:
ipc_lock_object(&shp->shm_perm);
err = ipc_update_perm(&shmid64.shm_perm, ipcp);
if (err)
goto out_unlock0;
shp->shm_ctim = get_seconds();
break;
default:
err = -EINVAL;
goto out_unlock1;
}
out_unlock0:
ipc_unlock_object(&shp->shm_perm);
out_unlock1:
rcu_read_unlock();
out_up:
up_write(&shm_ids(ns).rwsem);
return err;
}
static int shmctl_nolock(struct ipc_namespace *ns, int shmid,
int cmd, int version, void __user *buf)
{
int err;
struct shmid_kernel *shp;
/* preliminary security checks for *_INFO */
if (cmd == IPC_INFO || cmd == SHM_INFO) {
err = security_shm_shmctl(NULL, cmd);
if (err)
return err;
}
switch (cmd) {
case IPC_INFO:
{
struct shminfo64 shminfo;
memset(&shminfo, 0, sizeof(shminfo));
shminfo.shmmni = shminfo.shmseg = ns->shm_ctlmni;
shminfo.shmmax = ns->shm_ctlmax;
shminfo.shmall = ns->shm_ctlall;
shminfo.shmmin = SHMMIN;
if(copy_shminfo_to_user (buf, &shminfo, version))
return -EFAULT;
down_read(&shm_ids(ns).rwsem);
err = ipc_get_maxid(&shm_ids(ns));
up_read(&shm_ids(ns).rwsem);
if(err<0)
err = 0;
goto out;
}
case SHM_INFO:
{
struct shm_info shm_info;
memset(&shm_info, 0, sizeof(shm_info));
down_read(&shm_ids(ns).rwsem);
shm_info.used_ids = shm_ids(ns).in_use;
shm_get_stat (ns, &shm_info.shm_rss, &shm_info.shm_swp);
shm_info.shm_tot = ns->shm_tot;
shm_info.swap_attempts = 0;
shm_info.swap_successes = 0;
err = ipc_get_maxid(&shm_ids(ns));
up_read(&shm_ids(ns).rwsem);
if (copy_to_user(buf, &shm_info, sizeof(shm_info))) {
err = -EFAULT;
goto out;
}
err = err < 0 ? 0 : err;
goto out;
}
case SHM_STAT:
case IPC_STAT:
{
struct shmid64_ds tbuf;
int result;
rcu_read_lock();
if (cmd == SHM_STAT) {
shp = shm_obtain_object(ns, shmid);
if (IS_ERR(shp)) {
err = PTR_ERR(shp);
goto out_unlock;
}
result = shp->shm_perm.id;
} else {
shp = shm_obtain_object_check(ns, shmid);
if (IS_ERR(shp)) {
err = PTR_ERR(shp);
goto out_unlock;
}
result = 0;
}
err = -EACCES;
if (ipcperms(ns, &shp->shm_perm, S_IRUGO))
goto out_unlock;
err = security_shm_shmctl(shp, cmd);
if (err)
goto out_unlock;
memset(&tbuf, 0, sizeof(tbuf));
kernel_to_ipc64_perm(&shp->shm_perm, &tbuf.shm_perm);
tbuf.shm_segsz = shp->shm_segsz;
tbuf.shm_atime = shp->shm_atim;
tbuf.shm_dtime = shp->shm_dtim;
tbuf.shm_ctime = shp->shm_ctim;
tbuf.shm_cpid = shp->shm_cprid;
tbuf.shm_lpid = shp->shm_lprid;
tbuf.shm_nattch = shp->shm_nattch;
rcu_read_unlock();
if (copy_shmid_to_user(buf, &tbuf, version))
err = -EFAULT;
else
err = result;
goto out;
}
default:
return -EINVAL;
}
out_unlock:
rcu_read_unlock();
out:
return err;
}
SYSCALL_DEFINE3(shmctl, int, shmid, int, cmd, struct shmid_ds __user *, buf)
{
struct shmid_kernel *shp;
int err, version;
struct ipc_namespace *ns;
if (cmd < 0 || shmid < 0)
return -EINVAL;
version = ipc_parse_version(&cmd);
ns = current->nsproxy->ipc_ns;
switch (cmd) {
case IPC_INFO:
case SHM_INFO:
case SHM_STAT:
case IPC_STAT:
return shmctl_nolock(ns, shmid, cmd, version, buf);
case IPC_RMID:
case IPC_SET:
return shmctl_down(ns, shmid, cmd, buf, version);
case SHM_LOCK:
case SHM_UNLOCK:
{
struct file *shm_file;
rcu_read_lock();
shp = shm_obtain_object_check(ns, shmid);
if (IS_ERR(shp)) {
err = PTR_ERR(shp);
goto out_unlock1;
}
audit_ipc_obj(&(shp->shm_perm));
err = security_shm_shmctl(shp, cmd);
if (err)
goto out_unlock1;
ipc_lock_object(&shp->shm_perm);
if (!ns_capable(ns->user_ns, CAP_IPC_LOCK)) {
kuid_t euid = current_euid();
if (!uid_eq(euid, shp->shm_perm.uid) &&
!uid_eq(euid, shp->shm_perm.cuid)) {
err = -EPERM;
goto out_unlock0;
}
if (cmd == SHM_LOCK && !rlimit(RLIMIT_MEMLOCK)) {
err = -EPERM;
goto out_unlock0;
}
}
shm_file = shp->shm_file;
/* check if shm_destroy() is tearing down shp */
if (shm_file == NULL) {
err = -EIDRM;
goto out_unlock0;
}
if (is_file_hugepages(shm_file))
goto out_unlock0;
if (cmd == SHM_LOCK) {
struct user_struct *user = current_user();
err = shmem_lock(shm_file, 1, user);
if (!err && !(shp->shm_perm.mode & SHM_LOCKED)) {
shp->shm_perm.mode |= SHM_LOCKED;
shp->mlock_user = user;
}
goto out_unlock0;
}
/* SHM_UNLOCK */
if (!(shp->shm_perm.mode & SHM_LOCKED))
goto out_unlock0;
shmem_lock(shm_file, 0, shp->mlock_user);
shp->shm_perm.mode &= ~SHM_LOCKED;
shp->mlock_user = NULL;
get_file(shm_file);
ipc_unlock_object(&shp->shm_perm);
rcu_read_unlock();
shmem_unlock_mapping(shm_file->f_mapping);
fput(shm_file);
return err;
}
default:
return -EINVAL;
}
out_unlock0:
ipc_unlock_object(&shp->shm_perm);
out_unlock1:
rcu_read_unlock();
return err;
}
/*
* Fix shmaddr, allocate descriptor, map shm, add attach descriptor to lists.
*
* NOTE! Despite the name, this is NOT a direct system call entrypoint. The
* "raddr" thing points to kernel space, and there has to be a wrapper around
* this.
*/
long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr,
unsigned long shmlba)
{
struct shmid_kernel *shp;
unsigned long addr;
unsigned long size;
struct file * file;
int err;
unsigned long flags;
unsigned long prot;
int acc_mode;
struct ipc_namespace *ns;
struct shm_file_data *sfd;
struct path path;
fmode_t f_mode;
unsigned long populate = 0;
err = -EINVAL;
if (shmid < 0)
goto out;
else if ((addr = (ulong)shmaddr)) {
if (addr & (shmlba - 1)) {
if (shmflg & SHM_RND)
addr &= ~(shmlba - 1); /* round down */
else
#ifndef __ARCH_FORCE_SHMLBA
if (addr & ~PAGE_MASK)
#endif
goto out;
}
flags = MAP_SHARED | MAP_FIXED;
} else {
if ((shmflg & SHM_REMAP))
goto out;
flags = MAP_SHARED;
}
if (shmflg & SHM_RDONLY) {
prot = PROT_READ;
acc_mode = S_IRUGO;
f_mode = FMODE_READ;
} else {
prot = PROT_READ | PROT_WRITE;
acc_mode = S_IRUGO | S_IWUGO;
f_mode = FMODE_READ | FMODE_WRITE;
}
if (shmflg & SHM_EXEC) {
prot |= PROT_EXEC;
acc_mode |= S_IXUGO;
}
/*
* We cannot rely on the fs check since SYSV IPC does have an
* additional creator id...
*/
ns = current->nsproxy->ipc_ns;
rcu_read_lock();
shp = shm_obtain_object_check(ns, shmid);
if (IS_ERR(shp)) {
err = PTR_ERR(shp);
goto out_unlock;
}
err = -EACCES;
if (ipcperms(ns, &shp->shm_perm, acc_mode))
goto out_unlock;
err = security_shm_shmat(shp, shmaddr, shmflg);
if (err)
goto out_unlock;
ipc_lock_object(&shp->shm_perm);
/* check if shm_destroy() is tearing down shp */
if (shp->shm_file == NULL) {
ipc_unlock_object(&shp->shm_perm);
err = -EIDRM;
goto out_unlock;
}
path = shp->shm_file->f_path;
path_get(&path);
shp->shm_nattch++;
size = i_size_read(path.dentry->d_inode);
ipc_unlock_object(&shp->shm_perm);
rcu_read_unlock();
err = -ENOMEM;
sfd = kzalloc(sizeof(*sfd), GFP_KERNEL);
if (!sfd) {
path_put(&path);
goto out_nattch;
}
file = alloc_file(&path, f_mode,
is_file_hugepages(shp->shm_file) ?
&shm_file_operations_huge :
&shm_file_operations);
err = PTR_ERR(file);
if (IS_ERR(file)) {
kfree(sfd);
path_put(&path);
goto out_nattch;
}
file->private_data = sfd;
file->f_mapping = shp->shm_file->f_mapping;
sfd->id = shp->shm_perm.id;
sfd->ns = get_ipc_ns(ns);
sfd->file = shp->shm_file;
sfd->vm_ops = NULL;
err = security_mmap_file(file, prot, flags);
if (err)
goto out_fput;
down_write(¤t->mm->mmap_sem);
if (addr && !(shmflg & SHM_REMAP)) {
err = -EINVAL;
if (find_vma_intersection(current->mm, addr, addr + size))
goto invalid;
/*
* If shm segment goes below stack, make sure there is some
* space left for the stack to grow (at least 4 pages).
*/
if (addr < current->mm->start_stack &&
addr > current->mm->start_stack - size - PAGE_SIZE * 5)
goto invalid;
}
addr = do_mmap_pgoff(file, addr, size, prot, flags, 0, &populate);
*raddr = addr;
err = 0;
if (IS_ERR_VALUE(addr))
err = (long)addr;
invalid:
up_write(¤t->mm->mmap_sem);
if (populate)
mm_populate(addr, populate);
out_fput:
fput(file);
out_nattch:
down_write(&shm_ids(ns).rwsem);
shp = shm_lock(ns, shmid);
BUG_ON(IS_ERR(shp));
shp->shm_nattch--;
if (shm_may_destroy(ns, shp))
shm_destroy(ns, shp);
else
shm_unlock(shp);
up_write(&shm_ids(ns).rwsem);
return err;
out_unlock:
rcu_read_unlock();
out:
return err;
}
SYSCALL_DEFINE3(shmat, int, shmid, char __user *, shmaddr, int, shmflg)
{
unsigned long ret;
long err;
err = do_shmat(shmid, shmaddr, shmflg, &ret, SHMLBA);
if (err)
return err;
force_successful_syscall_return();
return (long)ret;
}
/*
* detach and kill segment if marked destroyed.
* The work is done in shm_close.
*/
SYSCALL_DEFINE1(shmdt, char __user *, shmaddr)
{
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
unsigned long addr = (unsigned long)shmaddr;
int retval = -EINVAL;
#ifdef CONFIG_MMU
loff_t size = 0;
struct vm_area_struct *next;
#endif
if (addr & ~PAGE_MASK)
return retval;
down_write(&mm->mmap_sem);
/*
* This function tries to be smart and unmap shm segments that
* were modified by partial mlock or munmap calls:
* - It first determines the size of the shm segment that should be
* unmapped: It searches for a vma that is backed by shm and that
* started at address shmaddr. It records it's size and then unmaps
* it.
* - Then it unmaps all shm vmas that started at shmaddr and that
* are within the initially determined size.
* Errors from do_munmap are ignored: the function only fails if
* it's called with invalid parameters or if it's called to unmap
* a part of a vma. Both calls in this function are for full vmas,
* the parameters are directly copied from the vma itself and always
* valid - therefore do_munmap cannot fail. (famous last words?)
*/
/*
* If it had been mremap()'d, the starting address would not
* match the usual checks anyway. So assume all vma's are
* above the starting address given.
*/
vma = find_vma(mm, addr);
#ifdef CONFIG_MMU
while (vma) {
next = vma->vm_next;
/*
* Check if the starting address would match, i.e. it's
* a fragment created by mprotect() and/or munmap(), or it
* otherwise it starts at this address with no hassles.
*/
if ((vma->vm_ops == &shm_vm_ops) &&
(vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff) {
size = file_inode(vma->vm_file)->i_size;
do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start);
/*
* We discovered the size of the shm segment, so
* break out of here and fall through to the next
* loop that uses the size information to stop
* searching for matching vma's.
*/
retval = 0;
vma = next;
break;
}
vma = next;
}
/*
* We need look no further than the maximum address a fragment
* could possibly have landed at. Also cast things to loff_t to
* prevent overflows and make comparisons vs. equal-width types.
*/
size = PAGE_ALIGN(size);
while (vma && (loff_t)(vma->vm_end - addr) <= size) {
next = vma->vm_next;
/* finding a matching vma now does not alter retval */
if ((vma->vm_ops == &shm_vm_ops) &&
(vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff)
do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start);
vma = next;
}
#else /* CONFIG_MMU */
/* under NOMMU conditions, the exact address to be destroyed must be
* given */
if (vma && vma->vm_start == addr && vma->vm_ops == &shm_vm_ops) {
do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start);
retval = 0;
}
#endif
up_write(&mm->mmap_sem);
return retval;
}
#ifdef CONFIG_PROC_FS
static int sysvipc_shm_proc_show(struct seq_file *s, void *it)
{
struct user_namespace *user_ns = seq_user_ns(s);
struct shmid_kernel *shp = it;
unsigned long rss = 0, swp = 0;
shm_add_rss_swap(shp, &rss, &swp);
#if BITS_PER_LONG <= 32
#define SIZE_SPEC "%10lu"
#else
#define SIZE_SPEC "%21lu"
#endif
return seq_printf(s,
"%10d %10d %4o " SIZE_SPEC " %5u %5u "
"%5lu %5u %5u %5u %5u %10lu %10lu %10lu "
SIZE_SPEC " " SIZE_SPEC "\n",
shp->shm_perm.key,
shp->shm_perm.id,
shp->shm_perm.mode,
shp->shm_segsz,
shp->shm_cprid,
shp->shm_lprid,
shp->shm_nattch,
from_kuid_munged(user_ns, shp->shm_perm.uid),
from_kgid_munged(user_ns, shp->shm_perm.gid),
from_kuid_munged(user_ns, shp->shm_perm.cuid),
from_kgid_munged(user_ns, shp->shm_perm.cgid),
shp->shm_atim,
shp->shm_dtim,
shp->shm_ctim,
rss * PAGE_SIZE,
swp * PAGE_SIZE);
}
#endif
| gpl-2.0 |
ngiordano/chimera_kernel | drivers/media/dvb/frontends/dib9000.c | 2203 | 71458 | /*
* Linux-DVB Driver for DiBcom's DiB9000 and demodulator-family.
*
* Copyright (C) 2005-10 DiBcom (http://www.dibcom.fr/)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation, version 2.
*/
#include <linux/kernel.h>
#include <linux/i2c.h>
#include <linux/mutex.h>
#include "dvb_math.h"
#include "dvb_frontend.h"
#include "dib9000.h"
#include "dibx000_common.h"
static int debug;
module_param(debug, int, 0644);
MODULE_PARM_DESC(debug, "turn on debugging (default: 0)");
#define dprintk(args...) do { if (debug) { printk(KERN_DEBUG "DiB9000: "); printk(args); printk("\n"); } } while (0)
#define MAX_NUMBER_OF_FRONTENDS 6
struct i2c_device {
struct i2c_adapter *i2c_adap;
u8 i2c_addr;
u8 *i2c_read_buffer;
u8 *i2c_write_buffer;
};
/* lock */
#define DIB_LOCK struct mutex
#define DibAcquireLock(lock) do { if (mutex_lock_interruptible(lock) < 0) dprintk("could not get the lock"); } while (0)
#define DibReleaseLock(lock) mutex_unlock(lock)
#define DibInitLock(lock) mutex_init(lock)
#define DibFreeLock(lock)
struct dib9000_pid_ctrl {
#define DIB9000_PID_FILTER_CTRL 0
#define DIB9000_PID_FILTER 1
u8 cmd;
u8 id;
u16 pid;
u8 onoff;
};
struct dib9000_state {
struct i2c_device i2c;
struct dibx000_i2c_master i2c_master;
struct i2c_adapter tuner_adap;
struct i2c_adapter component_bus;
u16 revision;
u8 reg_offs;
enum frontend_tune_state tune_state;
u32 status;
struct dvb_frontend_parametersContext channel_status;
u8 fe_id;
#define DIB9000_GPIO_DEFAULT_DIRECTIONS 0xffff
u16 gpio_dir;
#define DIB9000_GPIO_DEFAULT_VALUES 0x0000
u16 gpio_val;
#define DIB9000_GPIO_DEFAULT_PWM_POS 0xffff
u16 gpio_pwm_pos;
union { /* common for all chips */
struct {
u8 mobile_mode:1;
} host;
struct {
struct dib9000_fe_memory_map {
u16 addr;
u16 size;
} fe_mm[18];
u8 memcmd;
DIB_LOCK mbx_if_lock; /* to protect read/write operations */
DIB_LOCK mbx_lock; /* to protect the whole mailbox handling */
DIB_LOCK mem_lock; /* to protect the memory accesses */
DIB_LOCK mem_mbx_lock; /* to protect the memory-based mailbox */
#define MBX_MAX_WORDS (256 - 200 - 2)
#define DIB9000_MSG_CACHE_SIZE 2
u16 message_cache[DIB9000_MSG_CACHE_SIZE][MBX_MAX_WORDS];
u8 fw_is_running;
} risc;
} platform;
union { /* common for all platforms */
struct {
struct dib9000_config cfg;
} d9;
} chip;
struct dvb_frontend *fe[MAX_NUMBER_OF_FRONTENDS];
u16 component_bus_speed;
/* for the I2C transfer */
struct i2c_msg msg[2];
u8 i2c_write_buffer[255];
u8 i2c_read_buffer[255];
DIB_LOCK demod_lock;
u8 get_frontend_internal;
struct dib9000_pid_ctrl pid_ctrl[10];
s8 pid_ctrl_index; /* -1: empty list; -2: do not use the list */
};
static const u32 fe_info[44] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0
};
enum dib9000_power_mode {
DIB9000_POWER_ALL = 0,
DIB9000_POWER_NO,
DIB9000_POWER_INTERF_ANALOG_AGC,
DIB9000_POWER_COR4_DINTLV_ICIRM_EQUAL_CFROD,
DIB9000_POWER_COR4_CRY_ESRAM_MOUT_NUD,
DIB9000_POWER_INTERFACE_ONLY,
};
enum dib9000_out_messages {
OUT_MSG_HBM_ACK,
OUT_MSG_HOST_BUF_FAIL,
OUT_MSG_REQ_VERSION,
OUT_MSG_BRIDGE_I2C_W,
OUT_MSG_BRIDGE_I2C_R,
OUT_MSG_BRIDGE_APB_W,
OUT_MSG_BRIDGE_APB_R,
OUT_MSG_SCAN_CHANNEL,
OUT_MSG_MONIT_DEMOD,
OUT_MSG_CONF_GPIO,
OUT_MSG_DEBUG_HELP,
OUT_MSG_SUBBAND_SEL,
OUT_MSG_ENABLE_TIME_SLICE,
OUT_MSG_FE_FW_DL,
OUT_MSG_FE_CHANNEL_SEARCH,
OUT_MSG_FE_CHANNEL_TUNE,
OUT_MSG_FE_SLEEP,
OUT_MSG_FE_SYNC,
OUT_MSG_CTL_MONIT,
OUT_MSG_CONF_SVC,
OUT_MSG_SET_HBM,
OUT_MSG_INIT_DEMOD,
OUT_MSG_ENABLE_DIVERSITY,
OUT_MSG_SET_OUTPUT_MODE,
OUT_MSG_SET_PRIORITARY_CHANNEL,
OUT_MSG_ACK_FRG,
OUT_MSG_INIT_PMU,
};
enum dib9000_in_messages {
IN_MSG_DATA,
IN_MSG_FRAME_INFO,
IN_MSG_CTL_MONIT,
IN_MSG_ACK_FREE_ITEM,
IN_MSG_DEBUG_BUF,
IN_MSG_MPE_MONITOR,
IN_MSG_RAWTS_MONITOR,
IN_MSG_END_BRIDGE_I2C_RW,
IN_MSG_END_BRIDGE_APB_RW,
IN_MSG_VERSION,
IN_MSG_END_OF_SCAN,
IN_MSG_MONIT_DEMOD,
IN_MSG_ERROR,
IN_MSG_FE_FW_DL_DONE,
IN_MSG_EVENT,
IN_MSG_ACK_CHANGE_SVC,
IN_MSG_HBM_PROF,
};
/* memory_access requests */
#define FE_MM_W_CHANNEL 0
#define FE_MM_W_FE_INFO 1
#define FE_MM_RW_SYNC 2
#define FE_SYNC_CHANNEL 1
#define FE_SYNC_W_GENERIC_MONIT 2
#define FE_SYNC_COMPONENT_ACCESS 3
#define FE_MM_R_CHANNEL_SEARCH_STATE 3
#define FE_MM_R_CHANNEL_UNION_CONTEXT 4
#define FE_MM_R_FE_INFO 5
#define FE_MM_R_FE_MONITOR 6
#define FE_MM_W_CHANNEL_HEAD 7
#define FE_MM_W_CHANNEL_UNION 8
#define FE_MM_W_CHANNEL_CONTEXT 9
#define FE_MM_R_CHANNEL_UNION 10
#define FE_MM_R_CHANNEL_CONTEXT 11
#define FE_MM_R_CHANNEL_TUNE_STATE 12
#define FE_MM_R_GENERIC_MONITORING_SIZE 13
#define FE_MM_W_GENERIC_MONITORING 14
#define FE_MM_R_GENERIC_MONITORING 15
#define FE_MM_W_COMPONENT_ACCESS 16
#define FE_MM_RW_COMPONENT_ACCESS_BUFFER 17
static int dib9000_risc_apb_access_read(struct dib9000_state *state, u32 address, u16 attribute, const u8 * tx, u32 txlen, u8 * b, u32 len);
static int dib9000_risc_apb_access_write(struct dib9000_state *state, u32 address, u16 attribute, const u8 * b, u32 len);
static u16 to_fw_output_mode(u16 mode)
{
switch (mode) {
case OUTMODE_HIGH_Z:
return 0;
case OUTMODE_MPEG2_PAR_GATED_CLK:
return 4;
case OUTMODE_MPEG2_PAR_CONT_CLK:
return 8;
case OUTMODE_MPEG2_SERIAL:
return 16;
case OUTMODE_DIVERSITY:
return 128;
case OUTMODE_MPEG2_FIFO:
return 2;
case OUTMODE_ANALOG_ADC:
return 1;
default:
return 0;
}
}
static u16 dib9000_read16_attr(struct dib9000_state *state, u16 reg, u8 * b, u32 len, u16 attribute)
{
u32 chunk_size = 126;
u32 l;
int ret;
if (state->platform.risc.fw_is_running && (reg < 1024))
return dib9000_risc_apb_access_read(state, reg, attribute, NULL, 0, b, len);
memset(state->msg, 0, 2 * sizeof(struct i2c_msg));
state->msg[0].addr = state->i2c.i2c_addr >> 1;
state->msg[0].flags = 0;
state->msg[0].buf = state->i2c_write_buffer;
state->msg[0].len = 2;
state->msg[1].addr = state->i2c.i2c_addr >> 1;
state->msg[1].flags = I2C_M_RD;
state->msg[1].buf = b;
state->msg[1].len = len;
state->i2c_write_buffer[0] = reg >> 8;
state->i2c_write_buffer[1] = reg & 0xff;
if (attribute & DATA_BUS_ACCESS_MODE_8BIT)
state->i2c_write_buffer[0] |= (1 << 5);
if (attribute & DATA_BUS_ACCESS_MODE_NO_ADDRESS_INCREMENT)
state->i2c_write_buffer[0] |= (1 << 4);
do {
l = len < chunk_size ? len : chunk_size;
state->msg[1].len = l;
state->msg[1].buf = b;
ret = i2c_transfer(state->i2c.i2c_adap, state->msg, 2) != 2 ? -EREMOTEIO : 0;
if (ret != 0) {
dprintk("i2c read error on %d", reg);
return -EREMOTEIO;
}
b += l;
len -= l;
if (!(attribute & DATA_BUS_ACCESS_MODE_NO_ADDRESS_INCREMENT))
reg += l / 2;
} while ((ret == 0) && len);
return 0;
}
static u16 dib9000_i2c_read16(struct i2c_device *i2c, u16 reg)
{
struct i2c_msg msg[2] = {
{.addr = i2c->i2c_addr >> 1, .flags = 0,
.buf = i2c->i2c_write_buffer, .len = 2},
{.addr = i2c->i2c_addr >> 1, .flags = I2C_M_RD,
.buf = i2c->i2c_read_buffer, .len = 2},
};
i2c->i2c_write_buffer[0] = reg >> 8;
i2c->i2c_write_buffer[1] = reg & 0xff;
if (i2c_transfer(i2c->i2c_adap, msg, 2) != 2) {
dprintk("read register %x error", reg);
return 0;
}
return (i2c->i2c_read_buffer[0] << 8) | i2c->i2c_read_buffer[1];
}
static inline u16 dib9000_read_word(struct dib9000_state *state, u16 reg)
{
if (dib9000_read16_attr(state, reg, state->i2c_read_buffer, 2, 0) != 0)
return 0;
return (state->i2c_read_buffer[0] << 8) | state->i2c_read_buffer[1];
}
static inline u16 dib9000_read_word_attr(struct dib9000_state *state, u16 reg, u16 attribute)
{
if (dib9000_read16_attr(state, reg, state->i2c_read_buffer, 2,
attribute) != 0)
return 0;
return (state->i2c_read_buffer[0] << 8) | state->i2c_read_buffer[1];
}
#define dib9000_read16_noinc_attr(state, reg, b, len, attribute) dib9000_read16_attr(state, reg, b, len, (attribute) | DATA_BUS_ACCESS_MODE_NO_ADDRESS_INCREMENT)
static u16 dib9000_write16_attr(struct dib9000_state *state, u16 reg, const u8 * buf, u32 len, u16 attribute)
{
u32 chunk_size = 126;
u32 l;
int ret;
if (state->platform.risc.fw_is_running && (reg < 1024)) {
if (dib9000_risc_apb_access_write
(state, reg, DATA_BUS_ACCESS_MODE_16BIT | DATA_BUS_ACCESS_MODE_NO_ADDRESS_INCREMENT | attribute, buf, len) != 0)
return -EINVAL;
return 0;
}
memset(&state->msg[0], 0, sizeof(struct i2c_msg));
state->msg[0].addr = state->i2c.i2c_addr >> 1;
state->msg[0].flags = 0;
state->msg[0].buf = state->i2c_write_buffer;
state->msg[0].len = len + 2;
state->i2c_write_buffer[0] = (reg >> 8) & 0xff;
state->i2c_write_buffer[1] = (reg) & 0xff;
if (attribute & DATA_BUS_ACCESS_MODE_8BIT)
state->i2c_write_buffer[0] |= (1 << 5);
if (attribute & DATA_BUS_ACCESS_MODE_NO_ADDRESS_INCREMENT)
state->i2c_write_buffer[0] |= (1 << 4);
do {
l = len < chunk_size ? len : chunk_size;
state->msg[0].len = l + 2;
memcpy(&state->i2c_write_buffer[2], buf, l);
ret = i2c_transfer(state->i2c.i2c_adap, state->msg, 1) != 1 ? -EREMOTEIO : 0;
buf += l;
len -= l;
if (!(attribute & DATA_BUS_ACCESS_MODE_NO_ADDRESS_INCREMENT))
reg += l / 2;
} while ((ret == 0) && len);
return ret;
}
static int dib9000_i2c_write16(struct i2c_device *i2c, u16 reg, u16 val)
{
struct i2c_msg msg = {
.addr = i2c->i2c_addr >> 1, .flags = 0,
.buf = i2c->i2c_write_buffer, .len = 4
};
i2c->i2c_write_buffer[0] = (reg >> 8) & 0xff;
i2c->i2c_write_buffer[1] = reg & 0xff;
i2c->i2c_write_buffer[2] = (val >> 8) & 0xff;
i2c->i2c_write_buffer[3] = val & 0xff;
return i2c_transfer(i2c->i2c_adap, &msg, 1) != 1 ? -EREMOTEIO : 0;
}
static inline int dib9000_write_word(struct dib9000_state *state, u16 reg, u16 val)
{
u8 b[2] = { val >> 8, val & 0xff };
return dib9000_write16_attr(state, reg, b, 2, 0);
}
static inline int dib9000_write_word_attr(struct dib9000_state *state, u16 reg, u16 val, u16 attribute)
{
u8 b[2] = { val >> 8, val & 0xff };
return dib9000_write16_attr(state, reg, b, 2, attribute);
}
#define dib9000_write(state, reg, buf, len) dib9000_write16_attr(state, reg, buf, len, 0)
#define dib9000_write16_noinc(state, reg, buf, len) dib9000_write16_attr(state, reg, buf, len, DATA_BUS_ACCESS_MODE_NO_ADDRESS_INCREMENT)
#define dib9000_write16_noinc_attr(state, reg, buf, len, attribute) dib9000_write16_attr(state, reg, buf, len, DATA_BUS_ACCESS_MODE_NO_ADDRESS_INCREMENT | (attribute))
#define dib9000_mbx_send(state, id, data, len) dib9000_mbx_send_attr(state, id, data, len, 0)
#define dib9000_mbx_get_message(state, id, msg, len) dib9000_mbx_get_message_attr(state, id, msg, len, 0)
#define MAC_IRQ (1 << 1)
#define IRQ_POL_MSK (1 << 4)
#define dib9000_risc_mem_read_chunks(state, b, len) dib9000_read16_attr(state, 1063, b, len, DATA_BUS_ACCESS_MODE_8BIT | DATA_BUS_ACCESS_MODE_NO_ADDRESS_INCREMENT)
#define dib9000_risc_mem_write_chunks(state, buf, len) dib9000_write16_attr(state, 1063, buf, len, DATA_BUS_ACCESS_MODE_8BIT | DATA_BUS_ACCESS_MODE_NO_ADDRESS_INCREMENT)
static void dib9000_risc_mem_setup_cmd(struct dib9000_state *state, u32 addr, u32 len, u8 reading)
{
u8 b[14] = { 0 };
/* dprintk("%d memcmd: %d %d %d\n", state->fe_id, addr, addr+len, len); */
/* b[0] = 0 << 7; */
b[1] = 1;
/* b[2] = 0; */
/* b[3] = 0; */
b[4] = (u8) (addr >> 8);
b[5] = (u8) (addr & 0xff);
/* b[10] = 0; */
/* b[11] = 0; */
b[12] = (u8) (addr >> 8);
b[13] = (u8) (addr & 0xff);
addr += len;
/* b[6] = 0; */
/* b[7] = 0; */
b[8] = (u8) (addr >> 8);
b[9] = (u8) (addr & 0xff);
dib9000_write(state, 1056, b, 14);
if (reading)
dib9000_write_word(state, 1056, (1 << 15) | 1);
state->platform.risc.memcmd = -1; /* if it was called directly reset it - to force a future setup-call to set it */
}
static void dib9000_risc_mem_setup(struct dib9000_state *state, u8 cmd)
{
struct dib9000_fe_memory_map *m = &state->platform.risc.fe_mm[cmd & 0x7f];
/* decide whether we need to "refresh" the memory controller */
if (state->platform.risc.memcmd == cmd && /* same command */
!(cmd & 0x80 && m->size < 67)) /* and we do not want to read something with less than 67 bytes looping - working around a bug in the memory controller */
return;
dib9000_risc_mem_setup_cmd(state, m->addr, m->size, cmd & 0x80);
state->platform.risc.memcmd = cmd;
}
static int dib9000_risc_mem_read(struct dib9000_state *state, u8 cmd, u8 * b, u16 len)
{
if (!state->platform.risc.fw_is_running)
return -EIO;
DibAcquireLock(&state->platform.risc.mem_lock);
dib9000_risc_mem_setup(state, cmd | 0x80);
dib9000_risc_mem_read_chunks(state, b, len);
DibReleaseLock(&state->platform.risc.mem_lock);
return 0;
}
static int dib9000_risc_mem_write(struct dib9000_state *state, u8 cmd, const u8 * b)
{
struct dib9000_fe_memory_map *m = &state->platform.risc.fe_mm[cmd];
if (!state->platform.risc.fw_is_running)
return -EIO;
DibAcquireLock(&state->platform.risc.mem_lock);
dib9000_risc_mem_setup(state, cmd);
dib9000_risc_mem_write_chunks(state, b, m->size);
DibReleaseLock(&state->platform.risc.mem_lock);
return 0;
}
static int dib9000_firmware_download(struct dib9000_state *state, u8 risc_id, u16 key, const u8 * code, u32 len)
{
u16 offs;
if (risc_id == 1)
offs = 16;
else
offs = 0;
/* config crtl reg */
dib9000_write_word(state, 1024 + offs, 0x000f);
dib9000_write_word(state, 1025 + offs, 0);
dib9000_write_word(state, 1031 + offs, key);
dprintk("going to download %dB of microcode", len);
if (dib9000_write16_noinc(state, 1026 + offs, (u8 *) code, (u16) len) != 0) {
dprintk("error while downloading microcode for RISC %c", 'A' + risc_id);
return -EIO;
}
dprintk("Microcode for RISC %c loaded", 'A' + risc_id);
return 0;
}
static int dib9000_mbx_host_init(struct dib9000_state *state, u8 risc_id)
{
u16 mbox_offs;
u16 reset_reg;
u16 tries = 1000;
if (risc_id == 1)
mbox_offs = 16;
else
mbox_offs = 0;
/* Reset mailbox */
dib9000_write_word(state, 1027 + mbox_offs, 0x8000);
/* Read reset status */
do {
reset_reg = dib9000_read_word(state, 1027 + mbox_offs);
msleep(100);
} while ((reset_reg & 0x8000) && --tries);
if (reset_reg & 0x8000) {
dprintk("MBX: init ERROR, no response from RISC %c", 'A' + risc_id);
return -EIO;
}
dprintk("MBX: initialized");
return 0;
}
#define MAX_MAILBOX_TRY 100
static int dib9000_mbx_send_attr(struct dib9000_state *state, u8 id, u16 * data, u8 len, u16 attr)
{
u8 *d, b[2];
u16 tmp;
u16 size;
u32 i;
int ret = 0;
if (!state->platform.risc.fw_is_running)
return -EINVAL;
DibAcquireLock(&state->platform.risc.mbx_if_lock);
tmp = MAX_MAILBOX_TRY;
do {
size = dib9000_read_word_attr(state, 1043, attr) & 0xff;
if ((size + len + 1) > MBX_MAX_WORDS && --tmp) {
dprintk("MBX: RISC mbx full, retrying");
msleep(100);
} else
break;
} while (1);
/*dprintk( "MBX: size: %d", size); */
if (tmp == 0) {
ret = -EINVAL;
goto out;
}
#ifdef DUMP_MSG
dprintk("--> %02x %d ", id, len + 1);
for (i = 0; i < len; i++)
dprintk("%04x ", data[i]);
dprintk("\n");
#endif
/* byte-order conversion - works on big (where it is not necessary) or little endian */
d = (u8 *) data;
for (i = 0; i < len; i++) {
tmp = data[i];
*d++ = tmp >> 8;
*d++ = tmp & 0xff;
}
/* write msg */
b[0] = id;
b[1] = len + 1;
if (dib9000_write16_noinc_attr(state, 1045, b, 2, attr) != 0 || dib9000_write16_noinc_attr(state, 1045, (u8 *) data, len * 2, attr) != 0) {
ret = -EIO;
goto out;
}
/* update register nb_mes_in_RX */
ret = (u8) dib9000_write_word_attr(state, 1043, 1 << 14, attr);
out:
DibReleaseLock(&state->platform.risc.mbx_if_lock);
return ret;
}
static u8 dib9000_mbx_read(struct dib9000_state *state, u16 * data, u8 risc_id, u16 attr)
{
#ifdef DUMP_MSG
u16 *d = data;
#endif
u16 tmp, i;
u8 size;
u8 mc_base;
if (!state->platform.risc.fw_is_running)
return 0;
DibAcquireLock(&state->platform.risc.mbx_if_lock);
if (risc_id == 1)
mc_base = 16;
else
mc_base = 0;
/* Length and type in the first word */
*data = dib9000_read_word_attr(state, 1029 + mc_base, attr);
size = *data & 0xff;
if (size <= MBX_MAX_WORDS) {
data++;
size--; /* Initial word already read */
dib9000_read16_noinc_attr(state, 1029 + mc_base, (u8 *) data, size * 2, attr);
/* to word conversion */
for (i = 0; i < size; i++) {
tmp = *data;
*data = (tmp >> 8) | (tmp << 8);
data++;
}
#ifdef DUMP_MSG
dprintk("<-- ");
for (i = 0; i < size + 1; i++)
dprintk("%04x ", d[i]);
dprintk("\n");
#endif
} else {
dprintk("MBX: message is too big for message cache (%d), flushing message", size);
size--; /* Initial word already read */
while (size--)
dib9000_read16_noinc_attr(state, 1029 + mc_base, (u8 *) data, 2, attr);
}
/* Update register nb_mes_in_TX */
dib9000_write_word_attr(state, 1028 + mc_base, 1 << 14, attr);
DibReleaseLock(&state->platform.risc.mbx_if_lock);
return size + 1;
}
static int dib9000_risc_debug_buf(struct dib9000_state *state, u16 * data, u8 size)
{
u32 ts = data[1] << 16 | data[0];
char *b = (char *)&data[2];
b[2 * (size - 2) - 1] = '\0'; /* Bullet proof the buffer */
if (*b == '~') {
b++;
dprintk(b);
} else
dprintk("RISC%d: %d.%04d %s", state->fe_id, ts / 10000, ts % 10000, *b ? b : "<emtpy>");
return 1;
}
static int dib9000_mbx_fetch_to_cache(struct dib9000_state *state, u16 attr)
{
int i;
u8 size;
u16 *block;
/* find a free slot */
for (i = 0; i < DIB9000_MSG_CACHE_SIZE; i++) {
block = state->platform.risc.message_cache[i];
if (*block == 0) {
size = dib9000_mbx_read(state, block, 1, attr);
/* dprintk( "MBX: fetched %04x message to cache", *block); */
switch (*block >> 8) {
case IN_MSG_DEBUG_BUF:
dib9000_risc_debug_buf(state, block + 1, size); /* debug-messages are going to be printed right away */
*block = 0; /* free the block */
break;
#if 0
case IN_MSG_DATA: /* FE-TRACE */
dib9000_risc_data_process(state, block + 1, size);
*block = 0;
break;
#endif
default:
break;
}
return 1;
}
}
dprintk("MBX: no free cache-slot found for new message...");
return -1;
}
static u8 dib9000_mbx_count(struct dib9000_state *state, u8 risc_id, u16 attr)
{
if (risc_id == 0)
return (u8) (dib9000_read_word_attr(state, 1028, attr) >> 10) & 0x1f; /* 5 bit field */
else
return (u8) (dib9000_read_word_attr(state, 1044, attr) >> 8) & 0x7f; /* 7 bit field */
}
static int dib9000_mbx_process(struct dib9000_state *state, u16 attr)
{
int ret = 0;
u16 tmp;
if (!state->platform.risc.fw_is_running)
return -1;
DibAcquireLock(&state->platform.risc.mbx_lock);
if (dib9000_mbx_count(state, 1, attr)) /* 1=RiscB */
ret = dib9000_mbx_fetch_to_cache(state, attr);
tmp = dib9000_read_word_attr(state, 1229, attr); /* Clear the IRQ */
/* if (tmp) */
/* dprintk( "cleared IRQ: %x", tmp); */
DibReleaseLock(&state->platform.risc.mbx_lock);
return ret;
}
static int dib9000_mbx_get_message_attr(struct dib9000_state *state, u16 id, u16 * msg, u8 * size, u16 attr)
{
u8 i;
u16 *block;
u16 timeout = 30;
*msg = 0;
do {
/* dib9000_mbx_get_from_cache(); */
for (i = 0; i < DIB9000_MSG_CACHE_SIZE; i++) {
block = state->platform.risc.message_cache[i];
if ((*block >> 8) == id) {
*size = (*block & 0xff) - 1;
memcpy(msg, block + 1, (*size) * 2);
*block = 0; /* free the block */
i = 0; /* signal that we found a message */
break;
}
}
if (i == 0)
break;
if (dib9000_mbx_process(state, attr) == -1) /* try to fetch one message - if any */
return -1;
} while (--timeout);
if (timeout == 0) {
dprintk("waiting for message %d timed out", id);
return -1;
}
return i == 0;
}
static int dib9000_risc_check_version(struct dib9000_state *state)
{
u8 r[4];
u8 size;
u16 fw_version = 0;
if (dib9000_mbx_send(state, OUT_MSG_REQ_VERSION, &fw_version, 1) != 0)
return -EIO;
if (dib9000_mbx_get_message(state, IN_MSG_VERSION, (u16 *) r, &size) < 0)
return -EIO;
fw_version = (r[0] << 8) | r[1];
dprintk("RISC: ver: %d.%02d (IC: %d)", fw_version >> 10, fw_version & 0x3ff, (r[2] << 8) | r[3]);
if ((fw_version >> 10) != 7)
return -EINVAL;
switch (fw_version & 0x3ff) {
case 11:
case 12:
case 14:
case 15:
case 16:
case 17:
break;
default:
dprintk("RISC: invalid firmware version");
return -EINVAL;
}
dprintk("RISC: valid firmware version");
return 0;
}
static int dib9000_fw_boot(struct dib9000_state *state, const u8 * codeA, u32 lenA, const u8 * codeB, u32 lenB)
{
/* Reconfig pool mac ram */
dib9000_write_word(state, 1225, 0x02); /* A: 8k C, 4 k D - B: 32k C 6 k D - IRAM 96k */
dib9000_write_word(state, 1226, 0x05);
/* Toggles IP crypto to Host APB interface. */
dib9000_write_word(state, 1542, 1);
/* Set jump and no jump in the dma box */
dib9000_write_word(state, 1074, 0);
dib9000_write_word(state, 1075, 0);
/* Set MAC as APB Master. */
dib9000_write_word(state, 1237, 0);
/* Reset the RISCs */
if (codeA != NULL)
dib9000_write_word(state, 1024, 2);
else
dib9000_write_word(state, 1024, 15);
if (codeB != NULL)
dib9000_write_word(state, 1040, 2);
if (codeA != NULL)
dib9000_firmware_download(state, 0, 0x1234, codeA, lenA);
if (codeB != NULL)
dib9000_firmware_download(state, 1, 0x1234, codeB, lenB);
/* Run the RISCs */
if (codeA != NULL)
dib9000_write_word(state, 1024, 0);
if (codeB != NULL)
dib9000_write_word(state, 1040, 0);
if (codeA != NULL)
if (dib9000_mbx_host_init(state, 0) != 0)
return -EIO;
if (codeB != NULL)
if (dib9000_mbx_host_init(state, 1) != 0)
return -EIO;
msleep(100);
state->platform.risc.fw_is_running = 1;
if (dib9000_risc_check_version(state) != 0)
return -EINVAL;
state->platform.risc.memcmd = 0xff;
return 0;
}
static u16 dib9000_identify(struct i2c_device *client)
{
u16 value;
value = dib9000_i2c_read16(client, 896);
if (value != 0x01b3) {
dprintk("wrong Vendor ID (0x%x)", value);
return 0;
}
value = dib9000_i2c_read16(client, 897);
if (value != 0x4000 && value != 0x4001 && value != 0x4002 && value != 0x4003 && value != 0x4004 && value != 0x4005) {
dprintk("wrong Device ID (0x%x)", value);
return 0;
}
/* protect this driver to be used with 7000PC */
if (value == 0x4000 && dib9000_i2c_read16(client, 769) == 0x4000) {
dprintk("this driver does not work with DiB7000PC");
return 0;
}
switch (value) {
case 0x4000:
dprintk("found DiB7000MA/PA/MB/PB");
break;
case 0x4001:
dprintk("found DiB7000HC");
break;
case 0x4002:
dprintk("found DiB7000MC");
break;
case 0x4003:
dprintk("found DiB9000A");
break;
case 0x4004:
dprintk("found DiB9000H");
break;
case 0x4005:
dprintk("found DiB9000M");
break;
}
return value;
}
static void dib9000_set_power_mode(struct dib9000_state *state, enum dib9000_power_mode mode)
{
/* by default everything is going to be powered off */
u16 reg_903 = 0x3fff, reg_904 = 0xffff, reg_905 = 0xffff, reg_906;
u8 offset;
if (state->revision == 0x4003 || state->revision == 0x4004 || state->revision == 0x4005)
offset = 1;
else
offset = 0;
reg_906 = dib9000_read_word(state, 906 + offset) | 0x3; /* keep settings for RISC */
/* now, depending on the requested mode, we power on */
switch (mode) {
/* power up everything in the demod */
case DIB9000_POWER_ALL:
reg_903 = 0x0000;
reg_904 = 0x0000;
reg_905 = 0x0000;
reg_906 = 0x0000;
break;
/* just leave power on the control-interfaces: GPIO and (I2C or SDIO or SRAM) */
case DIB9000_POWER_INTERFACE_ONLY: /* TODO power up either SDIO or I2C or SRAM */
reg_905 &= ~((1 << 7) | (1 << 6) | (1 << 5) | (1 << 2));
break;
case DIB9000_POWER_INTERF_ANALOG_AGC:
reg_903 &= ~((1 << 15) | (1 << 14) | (1 << 11) | (1 << 10));
reg_905 &= ~((1 << 7) | (1 << 6) | (1 << 5) | (1 << 4) | (1 << 2));
reg_906 &= ~((1 << 0));
break;
case DIB9000_POWER_COR4_DINTLV_ICIRM_EQUAL_CFROD:
reg_903 = 0x0000;
reg_904 = 0x801f;
reg_905 = 0x0000;
reg_906 &= ~((1 << 0));
break;
case DIB9000_POWER_COR4_CRY_ESRAM_MOUT_NUD:
reg_903 = 0x0000;
reg_904 = 0x8000;
reg_905 = 0x010b;
reg_906 &= ~((1 << 0));
break;
default:
case DIB9000_POWER_NO:
break;
}
/* always power down unused parts */
if (!state->platform.host.mobile_mode)
reg_904 |= (1 << 7) | (1 << 6) | (1 << 4) | (1 << 2) | (1 << 1);
/* P_sdio_select_clk = 0 on MC and after */
if (state->revision != 0x4000)
reg_906 <<= 1;
dib9000_write_word(state, 903 + offset, reg_903);
dib9000_write_word(state, 904 + offset, reg_904);
dib9000_write_word(state, 905 + offset, reg_905);
dib9000_write_word(state, 906 + offset, reg_906);
}
static int dib9000_fw_reset(struct dvb_frontend *fe)
{
struct dib9000_state *state = fe->demodulator_priv;
dib9000_write_word(state, 1817, 0x0003);
dib9000_write_word(state, 1227, 1);
dib9000_write_word(state, 1227, 0);
switch ((state->revision = dib9000_identify(&state->i2c))) {
case 0x4003:
case 0x4004:
case 0x4005:
state->reg_offs = 1;
break;
default:
return -EINVAL;
}
/* reset the i2c-master to use the host interface */
dibx000_reset_i2c_master(&state->i2c_master);
dib9000_set_power_mode(state, DIB9000_POWER_ALL);
/* unforce divstr regardless whether i2c enumeration was done or not */
dib9000_write_word(state, 1794, dib9000_read_word(state, 1794) & ~(1 << 1));
dib9000_write_word(state, 1796, 0);
dib9000_write_word(state, 1805, 0x805);
/* restart all parts */
dib9000_write_word(state, 898, 0xffff);
dib9000_write_word(state, 899, 0xffff);
dib9000_write_word(state, 900, 0x0001);
dib9000_write_word(state, 901, 0xff19);
dib9000_write_word(state, 902, 0x003c);
dib9000_write_word(state, 898, 0);
dib9000_write_word(state, 899, 0);
dib9000_write_word(state, 900, 0);
dib9000_write_word(state, 901, 0);
dib9000_write_word(state, 902, 0);
dib9000_write_word(state, 911, state->chip.d9.cfg.if_drives);
dib9000_set_power_mode(state, DIB9000_POWER_INTERFACE_ONLY);
return 0;
}
static int dib9000_risc_apb_access_read(struct dib9000_state *state, u32 address, u16 attribute, const u8 * tx, u32 txlen, u8 * b, u32 len)
{
u16 mb[10];
u8 i, s;
if (address >= 1024 || !state->platform.risc.fw_is_running)
return -EINVAL;
/* dprintk( "APB access thru rd fw %d %x", address, attribute); */
mb[0] = (u16) address;
mb[1] = len / 2;
dib9000_mbx_send_attr(state, OUT_MSG_BRIDGE_APB_R, mb, 2, attribute);
switch (dib9000_mbx_get_message_attr(state, IN_MSG_END_BRIDGE_APB_RW, mb, &s, attribute)) {
case 1:
s--;
for (i = 0; i < s; i++) {
b[i * 2] = (mb[i + 1] >> 8) & 0xff;
b[i * 2 + 1] = (mb[i + 1]) & 0xff;
}
return 0;
default:
return -EIO;
}
return -EIO;
}
static int dib9000_risc_apb_access_write(struct dib9000_state *state, u32 address, u16 attribute, const u8 * b, u32 len)
{
u16 mb[10];
u8 s, i;
if (address >= 1024 || !state->platform.risc.fw_is_running)
return -EINVAL;
/* dprintk( "APB access thru wr fw %d %x", address, attribute); */
mb[0] = (unsigned short)address;
for (i = 0; i < len && i < 20; i += 2)
mb[1 + (i / 2)] = (b[i] << 8 | b[i + 1]);
dib9000_mbx_send_attr(state, OUT_MSG_BRIDGE_APB_W, mb, 1 + len / 2, attribute);
return dib9000_mbx_get_message_attr(state, IN_MSG_END_BRIDGE_APB_RW, mb, &s, attribute) == 1 ? 0 : -EINVAL;
}
static int dib9000_fw_memmbx_sync(struct dib9000_state *state, u8 i)
{
u8 index_loop = 10;
if (!state->platform.risc.fw_is_running)
return 0;
dib9000_risc_mem_write(state, FE_MM_RW_SYNC, &i);
do {
dib9000_risc_mem_read(state, FE_MM_RW_SYNC, state->i2c_read_buffer, 1);
} while (state->i2c_read_buffer[0] && index_loop--);
if (index_loop > 0)
return 0;
return -EIO;
}
static int dib9000_fw_init(struct dib9000_state *state)
{
struct dibGPIOFunction *f;
u16 b[40] = { 0 };
u8 i;
u8 size;
if (dib9000_fw_boot(state, NULL, 0, state->chip.d9.cfg.microcode_B_fe_buffer, state->chip.d9.cfg.microcode_B_fe_size) != 0)
return -EIO;
/* initialize the firmware */
for (i = 0; i < ARRAY_SIZE(state->chip.d9.cfg.gpio_function); i++) {
f = &state->chip.d9.cfg.gpio_function[i];
if (f->mask) {
switch (f->function) {
case BOARD_GPIO_FUNCTION_COMPONENT_ON:
b[0] = (u16) f->mask;
b[1] = (u16) f->direction;
b[2] = (u16) f->value;
break;
case BOARD_GPIO_FUNCTION_COMPONENT_OFF:
b[3] = (u16) f->mask;
b[4] = (u16) f->direction;
b[5] = (u16) f->value;
break;
}
}
}
if (dib9000_mbx_send(state, OUT_MSG_CONF_GPIO, b, 15) != 0)
return -EIO;
/* subband */
b[0] = state->chip.d9.cfg.subband.size; /* type == 0 -> GPIO - PWM not yet supported */
for (i = 0; i < state->chip.d9.cfg.subband.size; i++) {
b[1 + i * 4] = state->chip.d9.cfg.subband.subband[i].f_mhz;
b[2 + i * 4] = (u16) state->chip.d9.cfg.subband.subband[i].gpio.mask;
b[3 + i * 4] = (u16) state->chip.d9.cfg.subband.subband[i].gpio.direction;
b[4 + i * 4] = (u16) state->chip.d9.cfg.subband.subband[i].gpio.value;
}
b[1 + i * 4] = 0; /* fe_id */
if (dib9000_mbx_send(state, OUT_MSG_SUBBAND_SEL, b, 2 + 4 * i) != 0)
return -EIO;
/* 0 - id, 1 - no_of_frontends */
b[0] = (0 << 8) | 1;
/* 0 = i2c-address demod, 0 = tuner */
b[1] = (0 << 8) | (0);
b[2] = (u16) (((state->chip.d9.cfg.xtal_clock_khz * 1000) >> 16) & 0xffff);
b[3] = (u16) (((state->chip.d9.cfg.xtal_clock_khz * 1000)) & 0xffff);
b[4] = (u16) ((state->chip.d9.cfg.vcxo_timer >> 16) & 0xffff);
b[5] = (u16) ((state->chip.d9.cfg.vcxo_timer) & 0xffff);
b[6] = (u16) ((state->chip.d9.cfg.timing_frequency >> 16) & 0xffff);
b[7] = (u16) ((state->chip.d9.cfg.timing_frequency) & 0xffff);
b[29] = state->chip.d9.cfg.if_drives;
if (dib9000_mbx_send(state, OUT_MSG_INIT_DEMOD, b, ARRAY_SIZE(b)) != 0)
return -EIO;
if (dib9000_mbx_send(state, OUT_MSG_FE_FW_DL, NULL, 0) != 0)
return -EIO;
if (dib9000_mbx_get_message(state, IN_MSG_FE_FW_DL_DONE, b, &size) < 0)
return -EIO;
if (size > ARRAY_SIZE(b)) {
dprintk("error : firmware returned %dbytes needed but the used buffer has only %dbytes\n Firmware init ABORTED", size,
(int)ARRAY_SIZE(b));
return -EINVAL;
}
for (i = 0; i < size; i += 2) {
state->platform.risc.fe_mm[i / 2].addr = b[i + 0];
state->platform.risc.fe_mm[i / 2].size = b[i + 1];
}
return 0;
}
static void dib9000_fw_set_channel_head(struct dib9000_state *state, struct dvb_frontend_parameters *ch)
{
u8 b[9];
u32 freq = state->fe[0]->dtv_property_cache.frequency / 1000;
if (state->fe_id % 2)
freq += 101;
b[0] = (u8) ((freq >> 0) & 0xff);
b[1] = (u8) ((freq >> 8) & 0xff);
b[2] = (u8) ((freq >> 16) & 0xff);
b[3] = (u8) ((freq >> 24) & 0xff);
b[4] = (u8) ((state->fe[0]->dtv_property_cache.bandwidth_hz / 1000 >> 0) & 0xff);
b[5] = (u8) ((state->fe[0]->dtv_property_cache.bandwidth_hz / 1000 >> 8) & 0xff);
b[6] = (u8) ((state->fe[0]->dtv_property_cache.bandwidth_hz / 1000 >> 16) & 0xff);
b[7] = (u8) ((state->fe[0]->dtv_property_cache.bandwidth_hz / 1000 >> 24) & 0xff);
b[8] = 0x80; /* do not wait for CELL ID when doing autosearch */
if (state->fe[0]->dtv_property_cache.delivery_system == SYS_DVBT)
b[8] |= 1;
dib9000_risc_mem_write(state, FE_MM_W_CHANNEL_HEAD, b);
}
static int dib9000_fw_get_channel(struct dvb_frontend *fe, struct dvb_frontend_parameters *channel)
{
struct dib9000_state *state = fe->demodulator_priv;
struct dibDVBTChannel {
s8 spectrum_inversion;
s8 nfft;
s8 guard;
s8 constellation;
s8 hrch;
s8 alpha;
s8 code_rate_hp;
s8 code_rate_lp;
s8 select_hp;
s8 intlv_native;
};
struct dibDVBTChannel *ch;
int ret = 0;
DibAcquireLock(&state->platform.risc.mem_mbx_lock);
if (dib9000_fw_memmbx_sync(state, FE_SYNC_CHANNEL) < 0) {
goto error;
ret = -EIO;
}
dib9000_risc_mem_read(state, FE_MM_R_CHANNEL_UNION,
state->i2c_read_buffer, sizeof(struct dibDVBTChannel));
ch = (struct dibDVBTChannel *)state->i2c_read_buffer;
switch (ch->spectrum_inversion & 0x7) {
case 1:
state->fe[0]->dtv_property_cache.inversion = INVERSION_ON;
break;
case 0:
state->fe[0]->dtv_property_cache.inversion = INVERSION_OFF;
break;
default:
case -1:
state->fe[0]->dtv_property_cache.inversion = INVERSION_AUTO;
break;
}
switch (ch->nfft) {
case 0:
state->fe[0]->dtv_property_cache.transmission_mode = TRANSMISSION_MODE_2K;
break;
case 2:
state->fe[0]->dtv_property_cache.transmission_mode = TRANSMISSION_MODE_4K;
break;
case 1:
state->fe[0]->dtv_property_cache.transmission_mode = TRANSMISSION_MODE_8K;
break;
default:
case -1:
state->fe[0]->dtv_property_cache.transmission_mode = TRANSMISSION_MODE_AUTO;
break;
}
switch (ch->guard) {
case 0:
state->fe[0]->dtv_property_cache.guard_interval = GUARD_INTERVAL_1_32;
break;
case 1:
state->fe[0]->dtv_property_cache.guard_interval = GUARD_INTERVAL_1_16;
break;
case 2:
state->fe[0]->dtv_property_cache.guard_interval = GUARD_INTERVAL_1_8;
break;
case 3:
state->fe[0]->dtv_property_cache.guard_interval = GUARD_INTERVAL_1_4;
break;
default:
case -1:
state->fe[0]->dtv_property_cache.guard_interval = GUARD_INTERVAL_AUTO;
break;
}
switch (ch->constellation) {
case 2:
state->fe[0]->dtv_property_cache.modulation = QAM_64;
break;
case 1:
state->fe[0]->dtv_property_cache.modulation = QAM_16;
break;
case 0:
state->fe[0]->dtv_property_cache.modulation = QPSK;
break;
default:
case -1:
state->fe[0]->dtv_property_cache.modulation = QAM_AUTO;
break;
}
switch (ch->hrch) {
case 0:
state->fe[0]->dtv_property_cache.hierarchy = HIERARCHY_NONE;
break;
case 1:
state->fe[0]->dtv_property_cache.hierarchy = HIERARCHY_1;
break;
default:
case -1:
state->fe[0]->dtv_property_cache.hierarchy = HIERARCHY_AUTO;
break;
}
switch (ch->code_rate_hp) {
case 1:
state->fe[0]->dtv_property_cache.code_rate_HP = FEC_1_2;
break;
case 2:
state->fe[0]->dtv_property_cache.code_rate_HP = FEC_2_3;
break;
case 3:
state->fe[0]->dtv_property_cache.code_rate_HP = FEC_3_4;
break;
case 5:
state->fe[0]->dtv_property_cache.code_rate_HP = FEC_5_6;
break;
case 7:
state->fe[0]->dtv_property_cache.code_rate_HP = FEC_7_8;
break;
default:
case -1:
state->fe[0]->dtv_property_cache.code_rate_HP = FEC_AUTO;
break;
}
switch (ch->code_rate_lp) {
case 1:
state->fe[0]->dtv_property_cache.code_rate_LP = FEC_1_2;
break;
case 2:
state->fe[0]->dtv_property_cache.code_rate_LP = FEC_2_3;
break;
case 3:
state->fe[0]->dtv_property_cache.code_rate_LP = FEC_3_4;
break;
case 5:
state->fe[0]->dtv_property_cache.code_rate_LP = FEC_5_6;
break;
case 7:
state->fe[0]->dtv_property_cache.code_rate_LP = FEC_7_8;
break;
default:
case -1:
state->fe[0]->dtv_property_cache.code_rate_LP = FEC_AUTO;
break;
}
error:
DibReleaseLock(&state->platform.risc.mem_mbx_lock);
return ret;
}
static int dib9000_fw_set_channel_union(struct dvb_frontend *fe, struct dvb_frontend_parameters *channel)
{
struct dib9000_state *state = fe->demodulator_priv;
struct dibDVBTChannel {
s8 spectrum_inversion;
s8 nfft;
s8 guard;
s8 constellation;
s8 hrch;
s8 alpha;
s8 code_rate_hp;
s8 code_rate_lp;
s8 select_hp;
s8 intlv_native;
};
struct dibDVBTChannel ch;
switch (state->fe[0]->dtv_property_cache.inversion) {
case INVERSION_ON:
ch.spectrum_inversion = 1;
break;
case INVERSION_OFF:
ch.spectrum_inversion = 0;
break;
default:
case INVERSION_AUTO:
ch.spectrum_inversion = -1;
break;
}
switch (state->fe[0]->dtv_property_cache.transmission_mode) {
case TRANSMISSION_MODE_2K:
ch.nfft = 0;
break;
case TRANSMISSION_MODE_4K:
ch.nfft = 2;
break;
case TRANSMISSION_MODE_8K:
ch.nfft = 1;
break;
default:
case TRANSMISSION_MODE_AUTO:
ch.nfft = 1;
break;
}
switch (state->fe[0]->dtv_property_cache.guard_interval) {
case GUARD_INTERVAL_1_32:
ch.guard = 0;
break;
case GUARD_INTERVAL_1_16:
ch.guard = 1;
break;
case GUARD_INTERVAL_1_8:
ch.guard = 2;
break;
case GUARD_INTERVAL_1_4:
ch.guard = 3;
break;
default:
case GUARD_INTERVAL_AUTO:
ch.guard = -1;
break;
}
switch (state->fe[0]->dtv_property_cache.modulation) {
case QAM_64:
ch.constellation = 2;
break;
case QAM_16:
ch.constellation = 1;
break;
case QPSK:
ch.constellation = 0;
break;
default:
case QAM_AUTO:
ch.constellation = -1;
break;
}
switch (state->fe[0]->dtv_property_cache.hierarchy) {
case HIERARCHY_NONE:
ch.hrch = 0;
break;
case HIERARCHY_1:
case HIERARCHY_2:
case HIERARCHY_4:
ch.hrch = 1;
break;
default:
case HIERARCHY_AUTO:
ch.hrch = -1;
break;
}
ch.alpha = 1;
switch (state->fe[0]->dtv_property_cache.code_rate_HP) {
case FEC_1_2:
ch.code_rate_hp = 1;
break;
case FEC_2_3:
ch.code_rate_hp = 2;
break;
case FEC_3_4:
ch.code_rate_hp = 3;
break;
case FEC_5_6:
ch.code_rate_hp = 5;
break;
case FEC_7_8:
ch.code_rate_hp = 7;
break;
default:
case FEC_AUTO:
ch.code_rate_hp = -1;
break;
}
switch (state->fe[0]->dtv_property_cache.code_rate_LP) {
case FEC_1_2:
ch.code_rate_lp = 1;
break;
case FEC_2_3:
ch.code_rate_lp = 2;
break;
case FEC_3_4:
ch.code_rate_lp = 3;
break;
case FEC_5_6:
ch.code_rate_lp = 5;
break;
case FEC_7_8:
ch.code_rate_lp = 7;
break;
default:
case FEC_AUTO:
ch.code_rate_lp = -1;
break;
}
ch.select_hp = 1;
ch.intlv_native = 1;
dib9000_risc_mem_write(state, FE_MM_W_CHANNEL_UNION, (u8 *) &ch);
return 0;
}
static int dib9000_fw_tune(struct dvb_frontend *fe, struct dvb_frontend_parameters *ch)
{
struct dib9000_state *state = fe->demodulator_priv;
int ret = 10, search = state->channel_status.status == CHANNEL_STATUS_PARAMETERS_UNKNOWN;
s8 i;
switch (state->tune_state) {
case CT_DEMOD_START:
dib9000_fw_set_channel_head(state, ch);
/* write the channel context - a channel is initialized to 0, so it is OK */
dib9000_risc_mem_write(state, FE_MM_W_CHANNEL_CONTEXT, (u8 *) fe_info);
dib9000_risc_mem_write(state, FE_MM_W_FE_INFO, (u8 *) fe_info);
if (search)
dib9000_mbx_send(state, OUT_MSG_FE_CHANNEL_SEARCH, NULL, 0);
else {
dib9000_fw_set_channel_union(fe, ch);
dib9000_mbx_send(state, OUT_MSG_FE_CHANNEL_TUNE, NULL, 0);
}
state->tune_state = CT_DEMOD_STEP_1;
break;
case CT_DEMOD_STEP_1:
if (search)
dib9000_risc_mem_read(state, FE_MM_R_CHANNEL_SEARCH_STATE, state->i2c_read_buffer, 1);
else
dib9000_risc_mem_read(state, FE_MM_R_CHANNEL_TUNE_STATE, state->i2c_read_buffer, 1);
i = (s8)state->i2c_read_buffer[0];
switch (i) { /* something happened */
case 0:
break;
case -2: /* tps locks are "slower" than MPEG locks -> even in autosearch data is OK here */
if (search)
state->status = FE_STATUS_DEMOD_SUCCESS;
else {
state->tune_state = CT_DEMOD_STOP;
state->status = FE_STATUS_LOCKED;
}
break;
default:
state->status = FE_STATUS_TUNE_FAILED;
state->tune_state = CT_DEMOD_STOP;
break;
}
break;
default:
ret = FE_CALLBACK_TIME_NEVER;
break;
}
return ret;
}
static int dib9000_fw_set_diversity_in(struct dvb_frontend *fe, int onoff)
{
struct dib9000_state *state = fe->demodulator_priv;
u16 mode = (u16) onoff;
return dib9000_mbx_send(state, OUT_MSG_ENABLE_DIVERSITY, &mode, 1);
}
static int dib9000_fw_set_output_mode(struct dvb_frontend *fe, int mode)
{
struct dib9000_state *state = fe->demodulator_priv;
u16 outreg, smo_mode;
dprintk("setting output mode for demod %p to %d", fe, mode);
switch (mode) {
case OUTMODE_MPEG2_PAR_GATED_CLK:
outreg = (1 << 10); /* 0x0400 */
break;
case OUTMODE_MPEG2_PAR_CONT_CLK:
outreg = (1 << 10) | (1 << 6); /* 0x0440 */
break;
case OUTMODE_MPEG2_SERIAL:
outreg = (1 << 10) | (2 << 6) | (0 << 1); /* 0x0482 */
break;
case OUTMODE_DIVERSITY:
outreg = (1 << 10) | (4 << 6); /* 0x0500 */
break;
case OUTMODE_MPEG2_FIFO:
outreg = (1 << 10) | (5 << 6);
break;
case OUTMODE_HIGH_Z:
outreg = 0;
break;
default:
dprintk("Unhandled output_mode passed to be set for demod %p", &state->fe[0]);
return -EINVAL;
}
dib9000_write_word(state, 1795, outreg);
switch (mode) {
case OUTMODE_MPEG2_PAR_GATED_CLK:
case OUTMODE_MPEG2_PAR_CONT_CLK:
case OUTMODE_MPEG2_SERIAL:
case OUTMODE_MPEG2_FIFO:
smo_mode = (dib9000_read_word(state, 295) & 0x0010) | (1 << 1);
if (state->chip.d9.cfg.output_mpeg2_in_188_bytes)
smo_mode |= (1 << 5);
dib9000_write_word(state, 295, smo_mode);
break;
}
outreg = to_fw_output_mode(mode);
return dib9000_mbx_send(state, OUT_MSG_SET_OUTPUT_MODE, &outreg, 1);
}
static int dib9000_tuner_xfer(struct i2c_adapter *i2c_adap, struct i2c_msg msg[], int num)
{
struct dib9000_state *state = i2c_get_adapdata(i2c_adap);
u16 i, len, t, index_msg;
for (index_msg = 0; index_msg < num; index_msg++) {
if (msg[index_msg].flags & I2C_M_RD) { /* read */
len = msg[index_msg].len;
if (len > 16)
len = 16;
if (dib9000_read_word(state, 790) != 0)
dprintk("TunerITF: read busy");
dib9000_write_word(state, 784, (u16) (msg[index_msg].addr));
dib9000_write_word(state, 787, (len / 2) - 1);
dib9000_write_word(state, 786, 1); /* start read */
i = 1000;
while (dib9000_read_word(state, 790) != (len / 2) && i)
i--;
if (i == 0)
dprintk("TunerITF: read failed");
for (i = 0; i < len; i += 2) {
t = dib9000_read_word(state, 785);
msg[index_msg].buf[i] = (t >> 8) & 0xff;
msg[index_msg].buf[i + 1] = (t) & 0xff;
}
if (dib9000_read_word(state, 790) != 0)
dprintk("TunerITF: read more data than expected");
} else {
i = 1000;
while (dib9000_read_word(state, 789) && i)
i--;
if (i == 0)
dprintk("TunerITF: write busy");
len = msg[index_msg].len;
if (len > 16)
len = 16;
for (i = 0; i < len; i += 2)
dib9000_write_word(state, 785, (msg[index_msg].buf[i] << 8) | msg[index_msg].buf[i + 1]);
dib9000_write_word(state, 784, (u16) msg[index_msg].addr);
dib9000_write_word(state, 787, (len / 2) - 1);
dib9000_write_word(state, 786, 0); /* start write */
i = 1000;
while (dib9000_read_word(state, 791) > 0 && i)
i--;
if (i == 0)
dprintk("TunerITF: write failed");
}
}
return num;
}
int dib9000_fw_set_component_bus_speed(struct dvb_frontend *fe, u16 speed)
{
struct dib9000_state *state = fe->demodulator_priv;
state->component_bus_speed = speed;
return 0;
}
EXPORT_SYMBOL(dib9000_fw_set_component_bus_speed);
static int dib9000_fw_component_bus_xfer(struct i2c_adapter *i2c_adap, struct i2c_msg msg[], int num)
{
struct dib9000_state *state = i2c_get_adapdata(i2c_adap);
u8 type = 0; /* I2C */
u8 port = DIBX000_I2C_INTERFACE_GPIO_3_4;
u16 scl = state->component_bus_speed; /* SCL frequency */
struct dib9000_fe_memory_map *m = &state->platform.risc.fe_mm[FE_MM_RW_COMPONENT_ACCESS_BUFFER];
u8 p[13] = { 0 };
p[0] = type;
p[1] = port;
p[2] = msg[0].addr << 1;
p[3] = (u8) scl & 0xff; /* scl */
p[4] = (u8) (scl >> 8);
p[7] = 0;
p[8] = 0;
p[9] = (u8) (msg[0].len);
p[10] = (u8) (msg[0].len >> 8);
if ((num > 1) && (msg[1].flags & I2C_M_RD)) {
p[11] = (u8) (msg[1].len);
p[12] = (u8) (msg[1].len >> 8);
} else {
p[11] = 0;
p[12] = 0;
}
DibAcquireLock(&state->platform.risc.mem_mbx_lock);
dib9000_risc_mem_write(state, FE_MM_W_COMPONENT_ACCESS, p);
{ /* write-part */
dib9000_risc_mem_setup_cmd(state, m->addr, msg[0].len, 0);
dib9000_risc_mem_write_chunks(state, msg[0].buf, msg[0].len);
}
/* do the transaction */
if (dib9000_fw_memmbx_sync(state, FE_SYNC_COMPONENT_ACCESS) < 0) {
DibReleaseLock(&state->platform.risc.mem_mbx_lock);
return 0;
}
/* read back any possible result */
if ((num > 1) && (msg[1].flags & I2C_M_RD))
dib9000_risc_mem_read(state, FE_MM_RW_COMPONENT_ACCESS_BUFFER, msg[1].buf, msg[1].len);
DibReleaseLock(&state->platform.risc.mem_mbx_lock);
return num;
}
static u32 dib9000_i2c_func(struct i2c_adapter *adapter)
{
return I2C_FUNC_I2C;
}
static struct i2c_algorithm dib9000_tuner_algo = {
.master_xfer = dib9000_tuner_xfer,
.functionality = dib9000_i2c_func,
};
static struct i2c_algorithm dib9000_component_bus_algo = {
.master_xfer = dib9000_fw_component_bus_xfer,
.functionality = dib9000_i2c_func,
};
struct i2c_adapter *dib9000_get_tuner_interface(struct dvb_frontend *fe)
{
struct dib9000_state *st = fe->demodulator_priv;
return &st->tuner_adap;
}
EXPORT_SYMBOL(dib9000_get_tuner_interface);
struct i2c_adapter *dib9000_get_component_bus_interface(struct dvb_frontend *fe)
{
struct dib9000_state *st = fe->demodulator_priv;
return &st->component_bus;
}
EXPORT_SYMBOL(dib9000_get_component_bus_interface);
struct i2c_adapter *dib9000_get_i2c_master(struct dvb_frontend *fe, enum dibx000_i2c_interface intf, int gating)
{
struct dib9000_state *st = fe->demodulator_priv;
return dibx000_get_i2c_adapter(&st->i2c_master, intf, gating);
}
EXPORT_SYMBOL(dib9000_get_i2c_master);
int dib9000_set_i2c_adapter(struct dvb_frontend *fe, struct i2c_adapter *i2c)
{
struct dib9000_state *st = fe->demodulator_priv;
st->i2c.i2c_adap = i2c;
return 0;
}
EXPORT_SYMBOL(dib9000_set_i2c_adapter);
static int dib9000_cfg_gpio(struct dib9000_state *st, u8 num, u8 dir, u8 val)
{
st->gpio_dir = dib9000_read_word(st, 773);
st->gpio_dir &= ~(1 << num); /* reset the direction bit */
st->gpio_dir |= (dir & 0x1) << num; /* set the new direction */
dib9000_write_word(st, 773, st->gpio_dir);
st->gpio_val = dib9000_read_word(st, 774);
st->gpio_val &= ~(1 << num); /* reset the direction bit */
st->gpio_val |= (val & 0x01) << num; /* set the new value */
dib9000_write_word(st, 774, st->gpio_val);
dprintk("gpio dir: %04x: gpio val: %04x", st->gpio_dir, st->gpio_val);
return 0;
}
int dib9000_set_gpio(struct dvb_frontend *fe, u8 num, u8 dir, u8 val)
{
struct dib9000_state *state = fe->demodulator_priv;
return dib9000_cfg_gpio(state, num, dir, val);
}
EXPORT_SYMBOL(dib9000_set_gpio);
int dib9000_fw_pid_filter_ctrl(struct dvb_frontend *fe, u8 onoff)
{
struct dib9000_state *state = fe->demodulator_priv;
u16 val;
int ret;
if ((state->pid_ctrl_index != -2) && (state->pid_ctrl_index < 9)) {
/* postpone the pid filtering cmd */
dprintk("pid filter cmd postpone");
state->pid_ctrl_index++;
state->pid_ctrl[state->pid_ctrl_index].cmd = DIB9000_PID_FILTER_CTRL;
state->pid_ctrl[state->pid_ctrl_index].onoff = onoff;
return 0;
}
DibAcquireLock(&state->demod_lock);
val = dib9000_read_word(state, 294 + 1) & 0xffef;
val |= (onoff & 0x1) << 4;
dprintk("PID filter enabled %d", onoff);
ret = dib9000_write_word(state, 294 + 1, val);
DibReleaseLock(&state->demod_lock);
return ret;
}
EXPORT_SYMBOL(dib9000_fw_pid_filter_ctrl);
int dib9000_fw_pid_filter(struct dvb_frontend *fe, u8 id, u16 pid, u8 onoff)
{
struct dib9000_state *state = fe->demodulator_priv;
int ret;
if (state->pid_ctrl_index != -2) {
/* postpone the pid filtering cmd */
dprintk("pid filter postpone");
if (state->pid_ctrl_index < 9) {
state->pid_ctrl_index++;
state->pid_ctrl[state->pid_ctrl_index].cmd = DIB9000_PID_FILTER;
state->pid_ctrl[state->pid_ctrl_index].id = id;
state->pid_ctrl[state->pid_ctrl_index].pid = pid;
state->pid_ctrl[state->pid_ctrl_index].onoff = onoff;
} else
dprintk("can not add any more pid ctrl cmd");
return 0;
}
DibAcquireLock(&state->demod_lock);
dprintk("Index %x, PID %d, OnOff %d", id, pid, onoff);
ret = dib9000_write_word(state, 300 + 1 + id,
onoff ? (1 << 13) | pid : 0);
DibReleaseLock(&state->demod_lock);
return ret;
}
EXPORT_SYMBOL(dib9000_fw_pid_filter);
int dib9000_firmware_post_pll_init(struct dvb_frontend *fe)
{
struct dib9000_state *state = fe->demodulator_priv;
return dib9000_fw_init(state);
}
EXPORT_SYMBOL(dib9000_firmware_post_pll_init);
static void dib9000_release(struct dvb_frontend *demod)
{
struct dib9000_state *st = demod->demodulator_priv;
u8 index_frontend;
for (index_frontend = 1; (index_frontend < MAX_NUMBER_OF_FRONTENDS) && (st->fe[index_frontend] != NULL); index_frontend++)
dvb_frontend_detach(st->fe[index_frontend]);
DibFreeLock(&state->platform.risc.mbx_if_lock);
DibFreeLock(&state->platform.risc.mbx_lock);
DibFreeLock(&state->platform.risc.mem_lock);
DibFreeLock(&state->platform.risc.mem_mbx_lock);
DibFreeLock(&state->demod_lock);
dibx000_exit_i2c_master(&st->i2c_master);
i2c_del_adapter(&st->tuner_adap);
i2c_del_adapter(&st->component_bus);
kfree(st->fe[0]);
kfree(st);
}
static int dib9000_wakeup(struct dvb_frontend *fe)
{
return 0;
}
static int dib9000_sleep(struct dvb_frontend *fe)
{
struct dib9000_state *state = fe->demodulator_priv;
u8 index_frontend;
int ret = 0;
DibAcquireLock(&state->demod_lock);
for (index_frontend = 1; (index_frontend < MAX_NUMBER_OF_FRONTENDS) && (state->fe[index_frontend] != NULL); index_frontend++) {
ret = state->fe[index_frontend]->ops.sleep(state->fe[index_frontend]);
if (ret < 0)
goto error;
}
ret = dib9000_mbx_send(state, OUT_MSG_FE_SLEEP, NULL, 0);
error:
DibReleaseLock(&state->demod_lock);
return ret;
}
static int dib9000_fe_get_tune_settings(struct dvb_frontend *fe, struct dvb_frontend_tune_settings *tune)
{
tune->min_delay_ms = 1000;
return 0;
}
static int dib9000_get_frontend(struct dvb_frontend *fe, struct dvb_frontend_parameters *fep)
{
struct dib9000_state *state = fe->demodulator_priv;
u8 index_frontend, sub_index_frontend;
fe_status_t stat;
int ret = 0;
if (state->get_frontend_internal == 0)
DibAcquireLock(&state->demod_lock);
for (index_frontend = 1; (index_frontend < MAX_NUMBER_OF_FRONTENDS) && (state->fe[index_frontend] != NULL); index_frontend++) {
state->fe[index_frontend]->ops.read_status(state->fe[index_frontend], &stat);
if (stat & FE_HAS_SYNC) {
dprintk("TPS lock on the slave%i", index_frontend);
/* synchronize the cache with the other frontends */
state->fe[index_frontend]->ops.get_frontend(state->fe[index_frontend], fep);
for (sub_index_frontend = 0; (sub_index_frontend < MAX_NUMBER_OF_FRONTENDS) && (state->fe[sub_index_frontend] != NULL);
sub_index_frontend++) {
if (sub_index_frontend != index_frontend) {
state->fe[sub_index_frontend]->dtv_property_cache.modulation =
state->fe[index_frontend]->dtv_property_cache.modulation;
state->fe[sub_index_frontend]->dtv_property_cache.inversion =
state->fe[index_frontend]->dtv_property_cache.inversion;
state->fe[sub_index_frontend]->dtv_property_cache.transmission_mode =
state->fe[index_frontend]->dtv_property_cache.transmission_mode;
state->fe[sub_index_frontend]->dtv_property_cache.guard_interval =
state->fe[index_frontend]->dtv_property_cache.guard_interval;
state->fe[sub_index_frontend]->dtv_property_cache.hierarchy =
state->fe[index_frontend]->dtv_property_cache.hierarchy;
state->fe[sub_index_frontend]->dtv_property_cache.code_rate_HP =
state->fe[index_frontend]->dtv_property_cache.code_rate_HP;
state->fe[sub_index_frontend]->dtv_property_cache.code_rate_LP =
state->fe[index_frontend]->dtv_property_cache.code_rate_LP;
state->fe[sub_index_frontend]->dtv_property_cache.rolloff =
state->fe[index_frontend]->dtv_property_cache.rolloff;
}
}
ret = 0;
goto return_value;
}
}
/* get the channel from master chip */
ret = dib9000_fw_get_channel(fe, fep);
if (ret != 0)
goto return_value;
/* synchronize the cache with the other frontends */
for (index_frontend = 1; (index_frontend < MAX_NUMBER_OF_FRONTENDS) && (state->fe[index_frontend] != NULL); index_frontend++) {
state->fe[index_frontend]->dtv_property_cache.inversion = fe->dtv_property_cache.inversion;
state->fe[index_frontend]->dtv_property_cache.transmission_mode = fe->dtv_property_cache.transmission_mode;
state->fe[index_frontend]->dtv_property_cache.guard_interval = fe->dtv_property_cache.guard_interval;
state->fe[index_frontend]->dtv_property_cache.modulation = fe->dtv_property_cache.modulation;
state->fe[index_frontend]->dtv_property_cache.hierarchy = fe->dtv_property_cache.hierarchy;
state->fe[index_frontend]->dtv_property_cache.code_rate_HP = fe->dtv_property_cache.code_rate_HP;
state->fe[index_frontend]->dtv_property_cache.code_rate_LP = fe->dtv_property_cache.code_rate_LP;
state->fe[index_frontend]->dtv_property_cache.rolloff = fe->dtv_property_cache.rolloff;
}
ret = 0;
return_value:
if (state->get_frontend_internal == 0)
DibReleaseLock(&state->demod_lock);
return ret;
}
static int dib9000_set_tune_state(struct dvb_frontend *fe, enum frontend_tune_state tune_state)
{
struct dib9000_state *state = fe->demodulator_priv;
state->tune_state = tune_state;
if (tune_state == CT_DEMOD_START)
state->status = FE_STATUS_TUNE_PENDING;
return 0;
}
static u32 dib9000_get_status(struct dvb_frontend *fe)
{
struct dib9000_state *state = fe->demodulator_priv;
return state->status;
}
static int dib9000_set_channel_status(struct dvb_frontend *fe, struct dvb_frontend_parametersContext *channel_status)
{
struct dib9000_state *state = fe->demodulator_priv;
memcpy(&state->channel_status, channel_status, sizeof(struct dvb_frontend_parametersContext));
return 0;
}
static int dib9000_set_frontend(struct dvb_frontend *fe, struct dvb_frontend_parameters *fep)
{
struct dib9000_state *state = fe->demodulator_priv;
int sleep_time, sleep_time_slave;
u32 frontend_status;
u8 nbr_pending, exit_condition, index_frontend, index_frontend_success;
struct dvb_frontend_parametersContext channel_status;
/* check that the correct parameters are set */
if (state->fe[0]->dtv_property_cache.frequency == 0) {
dprintk("dib9000: must specify frequency ");
return 0;
}
if (state->fe[0]->dtv_property_cache.bandwidth_hz == 0) {
dprintk("dib9000: must specify bandwidth ");
return 0;
}
state->pid_ctrl_index = -1; /* postpone the pid filtering cmd */
DibAcquireLock(&state->demod_lock);
fe->dtv_property_cache.delivery_system = SYS_DVBT;
/* set the master status */
if (fep->u.ofdm.transmission_mode == TRANSMISSION_MODE_AUTO ||
fep->u.ofdm.guard_interval == GUARD_INTERVAL_AUTO || fep->u.ofdm.constellation == QAM_AUTO || fep->u.ofdm.code_rate_HP == FEC_AUTO) {
/* no channel specified, autosearch the channel */
state->channel_status.status = CHANNEL_STATUS_PARAMETERS_UNKNOWN;
} else
state->channel_status.status = CHANNEL_STATUS_PARAMETERS_SET;
/* set mode and status for the different frontends */
for (index_frontend = 0; (index_frontend < MAX_NUMBER_OF_FRONTENDS) && (state->fe[index_frontend] != NULL); index_frontend++) {
dib9000_fw_set_diversity_in(state->fe[index_frontend], 1);
/* synchronization of the cache */
memcpy(&state->fe[index_frontend]->dtv_property_cache, &fe->dtv_property_cache, sizeof(struct dtv_frontend_properties));
state->fe[index_frontend]->dtv_property_cache.delivery_system = SYS_DVBT;
dib9000_fw_set_output_mode(state->fe[index_frontend], OUTMODE_HIGH_Z);
dib9000_set_channel_status(state->fe[index_frontend], &state->channel_status);
dib9000_set_tune_state(state->fe[index_frontend], CT_DEMOD_START);
}
/* actual tune */
exit_condition = 0; /* 0: tune pending; 1: tune failed; 2:tune success */
index_frontend_success = 0;
do {
sleep_time = dib9000_fw_tune(state->fe[0], NULL);
for (index_frontend = 1; (index_frontend < MAX_NUMBER_OF_FRONTENDS) && (state->fe[index_frontend] != NULL); index_frontend++) {
sleep_time_slave = dib9000_fw_tune(state->fe[index_frontend], NULL);
if (sleep_time == FE_CALLBACK_TIME_NEVER)
sleep_time = sleep_time_slave;
else if ((sleep_time_slave != FE_CALLBACK_TIME_NEVER) && (sleep_time_slave > sleep_time))
sleep_time = sleep_time_slave;
}
if (sleep_time != FE_CALLBACK_TIME_NEVER)
msleep(sleep_time / 10);
else
break;
nbr_pending = 0;
exit_condition = 0;
index_frontend_success = 0;
for (index_frontend = 0; (index_frontend < MAX_NUMBER_OF_FRONTENDS) && (state->fe[index_frontend] != NULL); index_frontend++) {
frontend_status = -dib9000_get_status(state->fe[index_frontend]);
if (frontend_status > -FE_STATUS_TUNE_PENDING) {
exit_condition = 2; /* tune success */
index_frontend_success = index_frontend;
break;
}
if (frontend_status == -FE_STATUS_TUNE_PENDING)
nbr_pending++; /* some frontends are still tuning */
}
if ((exit_condition != 2) && (nbr_pending == 0))
exit_condition = 1; /* if all tune are done and no success, exit: tune failed */
} while (exit_condition == 0);
/* check the tune result */
if (exit_condition == 1) { /* tune failed */
dprintk("tune failed");
DibReleaseLock(&state->demod_lock);
/* tune failed; put all the pid filtering cmd to junk */
state->pid_ctrl_index = -1;
return 0;
}
dprintk("tune success on frontend%i", index_frontend_success);
/* synchronize all the channel cache */
state->get_frontend_internal = 1;
dib9000_get_frontend(state->fe[0], fep);
state->get_frontend_internal = 0;
/* retune the other frontends with the found channel */
channel_status.status = CHANNEL_STATUS_PARAMETERS_SET;
for (index_frontend = 0; (index_frontend < MAX_NUMBER_OF_FRONTENDS) && (state->fe[index_frontend] != NULL); index_frontend++) {
/* only retune the frontends which was not tuned success */
if (index_frontend != index_frontend_success) {
dib9000_set_channel_status(state->fe[index_frontend], &channel_status);
dib9000_set_tune_state(state->fe[index_frontend], CT_DEMOD_START);
}
}
do {
sleep_time = FE_CALLBACK_TIME_NEVER;
for (index_frontend = 0; (index_frontend < MAX_NUMBER_OF_FRONTENDS) && (state->fe[index_frontend] != NULL); index_frontend++) {
if (index_frontend != index_frontend_success) {
sleep_time_slave = dib9000_fw_tune(state->fe[index_frontend], NULL);
if (sleep_time == FE_CALLBACK_TIME_NEVER)
sleep_time = sleep_time_slave;
else if ((sleep_time_slave != FE_CALLBACK_TIME_NEVER) && (sleep_time_slave > sleep_time))
sleep_time = sleep_time_slave;
}
}
if (sleep_time != FE_CALLBACK_TIME_NEVER)
msleep(sleep_time / 10);
else
break;
nbr_pending = 0;
for (index_frontend = 0; (index_frontend < MAX_NUMBER_OF_FRONTENDS) && (state->fe[index_frontend] != NULL); index_frontend++) {
if (index_frontend != index_frontend_success) {
frontend_status = -dib9000_get_status(state->fe[index_frontend]);
if ((index_frontend != index_frontend_success) && (frontend_status == -FE_STATUS_TUNE_PENDING))
nbr_pending++; /* some frontends are still tuning */
}
}
} while (nbr_pending != 0);
/* set the output mode */
dib9000_fw_set_output_mode(state->fe[0], state->chip.d9.cfg.output_mode);
for (index_frontend = 1; (index_frontend < MAX_NUMBER_OF_FRONTENDS) && (state->fe[index_frontend] != NULL); index_frontend++)
dib9000_fw_set_output_mode(state->fe[index_frontend], OUTMODE_DIVERSITY);
/* turn off the diversity for the last frontend */
dib9000_fw_set_diversity_in(state->fe[index_frontend - 1], 0);
DibReleaseLock(&state->demod_lock);
if (state->pid_ctrl_index >= 0) {
u8 index_pid_filter_cmd;
u8 pid_ctrl_index = state->pid_ctrl_index;
state->pid_ctrl_index = -2;
for (index_pid_filter_cmd = 0;
index_pid_filter_cmd <= pid_ctrl_index;
index_pid_filter_cmd++) {
if (state->pid_ctrl[index_pid_filter_cmd].cmd == DIB9000_PID_FILTER_CTRL)
dib9000_fw_pid_filter_ctrl(state->fe[0],
state->pid_ctrl[index_pid_filter_cmd].onoff);
else if (state->pid_ctrl[index_pid_filter_cmd].cmd == DIB9000_PID_FILTER)
dib9000_fw_pid_filter(state->fe[0],
state->pid_ctrl[index_pid_filter_cmd].id,
state->pid_ctrl[index_pid_filter_cmd].pid,
state->pid_ctrl[index_pid_filter_cmd].onoff);
}
}
/* do not postpone any more the pid filtering */
state->pid_ctrl_index = -2;
return 0;
}
static u16 dib9000_read_lock(struct dvb_frontend *fe)
{
struct dib9000_state *state = fe->demodulator_priv;
return dib9000_read_word(state, 535);
}
static int dib9000_read_status(struct dvb_frontend *fe, fe_status_t * stat)
{
struct dib9000_state *state = fe->demodulator_priv;
u8 index_frontend;
u16 lock = 0, lock_slave = 0;
DibAcquireLock(&state->demod_lock);
for (index_frontend = 1; (index_frontend < MAX_NUMBER_OF_FRONTENDS) && (state->fe[index_frontend] != NULL); index_frontend++)
lock_slave |= dib9000_read_lock(state->fe[index_frontend]);
lock = dib9000_read_word(state, 535);
*stat = 0;
if ((lock & 0x8000) || (lock_slave & 0x8000))
*stat |= FE_HAS_SIGNAL;
if ((lock & 0x3000) || (lock_slave & 0x3000))
*stat |= FE_HAS_CARRIER;
if ((lock & 0x0100) || (lock_slave & 0x0100))
*stat |= FE_HAS_VITERBI;
if (((lock & 0x0038) == 0x38) || ((lock_slave & 0x0038) == 0x38))
*stat |= FE_HAS_SYNC;
if ((lock & 0x0008) || (lock_slave & 0x0008))
*stat |= FE_HAS_LOCK;
DibReleaseLock(&state->demod_lock);
return 0;
}
static int dib9000_read_ber(struct dvb_frontend *fe, u32 * ber)
{
struct dib9000_state *state = fe->demodulator_priv;
u16 *c;
int ret = 0;
DibAcquireLock(&state->demod_lock);
DibAcquireLock(&state->platform.risc.mem_mbx_lock);
if (dib9000_fw_memmbx_sync(state, FE_SYNC_CHANNEL) < 0) {
ret = -EIO;
goto error;
}
dib9000_risc_mem_read(state, FE_MM_R_FE_MONITOR,
state->i2c_read_buffer, 16 * 2);
DibReleaseLock(&state->platform.risc.mem_mbx_lock);
c = (u16 *)state->i2c_read_buffer;
*ber = c[10] << 16 | c[11];
error:
DibReleaseLock(&state->demod_lock);
return ret;
}
static int dib9000_read_signal_strength(struct dvb_frontend *fe, u16 * strength)
{
struct dib9000_state *state = fe->demodulator_priv;
u8 index_frontend;
u16 *c = (u16 *)state->i2c_read_buffer;
u16 val;
int ret = 0;
DibAcquireLock(&state->demod_lock);
*strength = 0;
for (index_frontend = 1; (index_frontend < MAX_NUMBER_OF_FRONTENDS) && (state->fe[index_frontend] != NULL); index_frontend++) {
state->fe[index_frontend]->ops.read_signal_strength(state->fe[index_frontend], &val);
if (val > 65535 - *strength)
*strength = 65535;
else
*strength += val;
}
DibAcquireLock(&state->platform.risc.mem_mbx_lock);
if (dib9000_fw_memmbx_sync(state, FE_SYNC_CHANNEL) < 0) {
ret = -EIO;
goto error;
}
dib9000_risc_mem_read(state, FE_MM_R_FE_MONITOR, (u8 *) c, 16 * 2);
DibReleaseLock(&state->platform.risc.mem_mbx_lock);
val = 65535 - c[4];
if (val > 65535 - *strength)
*strength = 65535;
else
*strength += val;
error:
DibReleaseLock(&state->demod_lock);
return ret;
}
static u32 dib9000_get_snr(struct dvb_frontend *fe)
{
struct dib9000_state *state = fe->demodulator_priv;
u16 *c = (u16 *)state->i2c_read_buffer;
u32 n, s, exp;
u16 val;
DibAcquireLock(&state->platform.risc.mem_mbx_lock);
if (dib9000_fw_memmbx_sync(state, FE_SYNC_CHANNEL) < 0)
return -EIO;
dib9000_risc_mem_read(state, FE_MM_R_FE_MONITOR, (u8 *) c, 16 * 2);
DibReleaseLock(&state->platform.risc.mem_mbx_lock);
val = c[7];
n = (val >> 4) & 0xff;
exp = ((val & 0xf) << 2);
val = c[8];
exp += ((val >> 14) & 0x3);
if ((exp & 0x20) != 0)
exp -= 0x40;
n <<= exp + 16;
s = (val >> 6) & 0xFF;
exp = (val & 0x3F);
if ((exp & 0x20) != 0)
exp -= 0x40;
s <<= exp + 16;
if (n > 0) {
u32 t = (s / n) << 16;
return t + ((s << 16) - n * t) / n;
}
return 0xffffffff;
}
static int dib9000_read_snr(struct dvb_frontend *fe, u16 * snr)
{
struct dib9000_state *state = fe->demodulator_priv;
u8 index_frontend;
u32 snr_master;
DibAcquireLock(&state->demod_lock);
snr_master = dib9000_get_snr(fe);
for (index_frontend = 1; (index_frontend < MAX_NUMBER_OF_FRONTENDS) && (state->fe[index_frontend] != NULL); index_frontend++)
snr_master += dib9000_get_snr(state->fe[index_frontend]);
if ((snr_master >> 16) != 0) {
snr_master = 10 * intlog10(snr_master >> 16);
*snr = snr_master / ((1 << 24) / 10);
} else
*snr = 0;
DibReleaseLock(&state->demod_lock);
return 0;
}
static int dib9000_read_unc_blocks(struct dvb_frontend *fe, u32 * unc)
{
struct dib9000_state *state = fe->demodulator_priv;
u16 *c = (u16 *)state->i2c_read_buffer;
int ret = 0;
DibAcquireLock(&state->demod_lock);
DibAcquireLock(&state->platform.risc.mem_mbx_lock);
if (dib9000_fw_memmbx_sync(state, FE_SYNC_CHANNEL) < 0) {
ret = -EIO;
goto error;
}
dib9000_risc_mem_read(state, FE_MM_R_FE_MONITOR, (u8 *) c, 16 * 2);
DibReleaseLock(&state->platform.risc.mem_mbx_lock);
*unc = c[12];
error:
DibReleaseLock(&state->demod_lock);
return ret;
}
int dib9000_i2c_enumeration(struct i2c_adapter *i2c, int no_of_demods, u8 default_addr, u8 first_addr)
{
int k = 0, ret = 0;
u8 new_addr = 0;
struct i2c_device client = {.i2c_adap = i2c };
client.i2c_write_buffer = kzalloc(4 * sizeof(u8), GFP_KERNEL);
if (!client.i2c_write_buffer) {
dprintk("%s: not enough memory", __func__);
return -ENOMEM;
}
client.i2c_read_buffer = kzalloc(4 * sizeof(u8), GFP_KERNEL);
if (!client.i2c_read_buffer) {
dprintk("%s: not enough memory", __func__);
ret = -ENOMEM;
goto error_memory;
}
client.i2c_addr = default_addr + 16;
dib9000_i2c_write16(&client, 1796, 0x0);
for (k = no_of_demods - 1; k >= 0; k--) {
/* designated i2c address */
new_addr = first_addr + (k << 1);
client.i2c_addr = default_addr;
dib9000_i2c_write16(&client, 1817, 3);
dib9000_i2c_write16(&client, 1796, 0);
dib9000_i2c_write16(&client, 1227, 1);
dib9000_i2c_write16(&client, 1227, 0);
client.i2c_addr = new_addr;
dib9000_i2c_write16(&client, 1817, 3);
dib9000_i2c_write16(&client, 1796, 0);
dib9000_i2c_write16(&client, 1227, 1);
dib9000_i2c_write16(&client, 1227, 0);
if (dib9000_identify(&client) == 0) {
client.i2c_addr = default_addr;
if (dib9000_identify(&client) == 0) {
dprintk("DiB9000 #%d: not identified", k);
ret = -EIO;
goto error;
}
}
dib9000_i2c_write16(&client, 1795, (1 << 10) | (4 << 6));
dib9000_i2c_write16(&client, 1794, (new_addr << 2) | 2);
dprintk("IC %d initialized (to i2c_address 0x%x)", k, new_addr);
}
for (k = 0; k < no_of_demods; k++) {
new_addr = first_addr | (k << 1);
client.i2c_addr = new_addr;
dib9000_i2c_write16(&client, 1794, (new_addr << 2));
dib9000_i2c_write16(&client, 1795, 0);
}
error:
kfree(client.i2c_read_buffer);
error_memory:
kfree(client.i2c_write_buffer);
return ret;
}
EXPORT_SYMBOL(dib9000_i2c_enumeration);
int dib9000_set_slave_frontend(struct dvb_frontend *fe, struct dvb_frontend *fe_slave)
{
struct dib9000_state *state = fe->demodulator_priv;
u8 index_frontend = 1;
while ((index_frontend < MAX_NUMBER_OF_FRONTENDS) && (state->fe[index_frontend] != NULL))
index_frontend++;
if (index_frontend < MAX_NUMBER_OF_FRONTENDS) {
dprintk("set slave fe %p to index %i", fe_slave, index_frontend);
state->fe[index_frontend] = fe_slave;
return 0;
}
dprintk("too many slave frontend");
return -ENOMEM;
}
EXPORT_SYMBOL(dib9000_set_slave_frontend);
int dib9000_remove_slave_frontend(struct dvb_frontend *fe)
{
struct dib9000_state *state = fe->demodulator_priv;
u8 index_frontend = 1;
while ((index_frontend < MAX_NUMBER_OF_FRONTENDS) && (state->fe[index_frontend] != NULL))
index_frontend++;
if (index_frontend != 1) {
dprintk("remove slave fe %p (index %i)", state->fe[index_frontend - 1], index_frontend - 1);
state->fe[index_frontend] = NULL;
return 0;
}
dprintk("no frontend to be removed");
return -ENODEV;
}
EXPORT_SYMBOL(dib9000_remove_slave_frontend);
struct dvb_frontend *dib9000_get_slave_frontend(struct dvb_frontend *fe, int slave_index)
{
struct dib9000_state *state = fe->demodulator_priv;
if (slave_index >= MAX_NUMBER_OF_FRONTENDS)
return NULL;
return state->fe[slave_index];
}
EXPORT_SYMBOL(dib9000_get_slave_frontend);
static struct dvb_frontend_ops dib9000_ops;
struct dvb_frontend *dib9000_attach(struct i2c_adapter *i2c_adap, u8 i2c_addr, const struct dib9000_config *cfg)
{
struct dvb_frontend *fe;
struct dib9000_state *st;
st = kzalloc(sizeof(struct dib9000_state), GFP_KERNEL);
if (st == NULL)
return NULL;
fe = kzalloc(sizeof(struct dvb_frontend), GFP_KERNEL);
if (fe == NULL) {
kfree(st);
return NULL;
}
memcpy(&st->chip.d9.cfg, cfg, sizeof(struct dib9000_config));
st->i2c.i2c_adap = i2c_adap;
st->i2c.i2c_addr = i2c_addr;
st->i2c.i2c_write_buffer = st->i2c_write_buffer;
st->i2c.i2c_read_buffer = st->i2c_read_buffer;
st->gpio_dir = DIB9000_GPIO_DEFAULT_DIRECTIONS;
st->gpio_val = DIB9000_GPIO_DEFAULT_VALUES;
st->gpio_pwm_pos = DIB9000_GPIO_DEFAULT_PWM_POS;
DibInitLock(&st->platform.risc.mbx_if_lock);
DibInitLock(&st->platform.risc.mbx_lock);
DibInitLock(&st->platform.risc.mem_lock);
DibInitLock(&st->platform.risc.mem_mbx_lock);
DibInitLock(&st->demod_lock);
st->get_frontend_internal = 0;
st->pid_ctrl_index = -2;
st->fe[0] = fe;
fe->demodulator_priv = st;
memcpy(&st->fe[0]->ops, &dib9000_ops, sizeof(struct dvb_frontend_ops));
/* Ensure the output mode remains at the previous default if it's
* not specifically set by the caller.
*/
if ((st->chip.d9.cfg.output_mode != OUTMODE_MPEG2_SERIAL) && (st->chip.d9.cfg.output_mode != OUTMODE_MPEG2_PAR_GATED_CLK))
st->chip.d9.cfg.output_mode = OUTMODE_MPEG2_FIFO;
if (dib9000_identify(&st->i2c) == 0)
goto error;
dibx000_init_i2c_master(&st->i2c_master, DIB7000MC, st->i2c.i2c_adap, st->i2c.i2c_addr);
st->tuner_adap.dev.parent = i2c_adap->dev.parent;
strncpy(st->tuner_adap.name, "DIB9000_FW TUNER ACCESS", sizeof(st->tuner_adap.name));
st->tuner_adap.algo = &dib9000_tuner_algo;
st->tuner_adap.algo_data = NULL;
i2c_set_adapdata(&st->tuner_adap, st);
if (i2c_add_adapter(&st->tuner_adap) < 0)
goto error;
st->component_bus.dev.parent = i2c_adap->dev.parent;
strncpy(st->component_bus.name, "DIB9000_FW COMPONENT BUS ACCESS", sizeof(st->component_bus.name));
st->component_bus.algo = &dib9000_component_bus_algo;
st->component_bus.algo_data = NULL;
st->component_bus_speed = 340;
i2c_set_adapdata(&st->component_bus, st);
if (i2c_add_adapter(&st->component_bus) < 0)
goto component_bus_add_error;
dib9000_fw_reset(fe);
return fe;
component_bus_add_error:
i2c_del_adapter(&st->tuner_adap);
error:
kfree(st);
return NULL;
}
EXPORT_SYMBOL(dib9000_attach);
static struct dvb_frontend_ops dib9000_ops = {
.info = {
.name = "DiBcom 9000",
.type = FE_OFDM,
.frequency_min = 44250000,
.frequency_max = 867250000,
.frequency_stepsize = 62500,
.caps = FE_CAN_INVERSION_AUTO |
FE_CAN_FEC_1_2 | FE_CAN_FEC_2_3 | FE_CAN_FEC_3_4 |
FE_CAN_FEC_5_6 | FE_CAN_FEC_7_8 | FE_CAN_FEC_AUTO |
FE_CAN_QPSK | FE_CAN_QAM_16 | FE_CAN_QAM_64 | FE_CAN_QAM_AUTO |
FE_CAN_TRANSMISSION_MODE_AUTO | FE_CAN_GUARD_INTERVAL_AUTO | FE_CAN_RECOVER | FE_CAN_HIERARCHY_AUTO,
},
.release = dib9000_release,
.init = dib9000_wakeup,
.sleep = dib9000_sleep,
.set_frontend = dib9000_set_frontend,
.get_tune_settings = dib9000_fe_get_tune_settings,
.get_frontend = dib9000_get_frontend,
.read_status = dib9000_read_status,
.read_ber = dib9000_read_ber,
.read_signal_strength = dib9000_read_signal_strength,
.read_snr = dib9000_read_snr,
.read_ucblocks = dib9000_read_unc_blocks,
};
MODULE_AUTHOR("Patrick Boettcher <pboettcher@dibcom.fr>");
MODULE_AUTHOR("Olivier Grenie <ogrenie@dibcom.fr>");
MODULE_DESCRIPTION("Driver for the DiBcom 9000 COFDM demodulator");
MODULE_LICENSE("GPL");
| gpl-2.0 |
ciwrl/android_kernel_huawei_msm8939 | drivers/scsi/qla2xxx/qla_mid.c | 2203 | 21694 | /*
* QLogic Fibre Channel HBA Driver
* Copyright (c) 2003-2013 QLogic Corporation
*
* See LICENSE.qla2xxx for copyright and licensing details.
*/
#include "qla_def.h"
#include "qla_gbl.h"
#include "qla_target.h"
#include <linux/moduleparam.h>
#include <linux/vmalloc.h>
#include <linux/slab.h>
#include <linux/list.h>
#include <scsi/scsi_tcq.h>
#include <scsi/scsicam.h>
#include <linux/delay.h>
void
qla2x00_vp_stop_timer(scsi_qla_host_t *vha)
{
if (vha->vp_idx && vha->timer_active) {
del_timer_sync(&vha->timer);
vha->timer_active = 0;
}
}
static uint32_t
qla24xx_allocate_vp_id(scsi_qla_host_t *vha)
{
uint32_t vp_id;
struct qla_hw_data *ha = vha->hw;
unsigned long flags;
/* Find an empty slot and assign an vp_id */
mutex_lock(&ha->vport_lock);
vp_id = find_first_zero_bit(ha->vp_idx_map, ha->max_npiv_vports + 1);
if (vp_id > ha->max_npiv_vports) {
ql_dbg(ql_dbg_vport, vha, 0xa000,
"vp_id %d is bigger than max-supported %d.\n",
vp_id, ha->max_npiv_vports);
mutex_unlock(&ha->vport_lock);
return vp_id;
}
set_bit(vp_id, ha->vp_idx_map);
ha->num_vhosts++;
vha->vp_idx = vp_id;
spin_lock_irqsave(&ha->vport_slock, flags);
list_add_tail(&vha->list, &ha->vp_list);
qlt_update_vp_map(vha, SET_VP_IDX);
spin_unlock_irqrestore(&ha->vport_slock, flags);
mutex_unlock(&ha->vport_lock);
return vp_id;
}
void
qla24xx_deallocate_vp_id(scsi_qla_host_t *vha)
{
uint16_t vp_id;
struct qla_hw_data *ha = vha->hw;
unsigned long flags = 0;
mutex_lock(&ha->vport_lock);
/*
* Wait for all pending activities to finish before removing vport from
* the list.
* Lock needs to be held for safe removal from the list (it
* ensures no active vp_list traversal while the vport is removed
* from the queue)
*/
spin_lock_irqsave(&ha->vport_slock, flags);
while (atomic_read(&vha->vref_count)) {
spin_unlock_irqrestore(&ha->vport_slock, flags);
msleep(500);
spin_lock_irqsave(&ha->vport_slock, flags);
}
list_del(&vha->list);
qlt_update_vp_map(vha, RESET_VP_IDX);
spin_unlock_irqrestore(&ha->vport_slock, flags);
vp_id = vha->vp_idx;
ha->num_vhosts--;
clear_bit(vp_id, ha->vp_idx_map);
mutex_unlock(&ha->vport_lock);
}
static scsi_qla_host_t *
qla24xx_find_vhost_by_name(struct qla_hw_data *ha, uint8_t *port_name)
{
scsi_qla_host_t *vha;
struct scsi_qla_host *tvha;
unsigned long flags;
spin_lock_irqsave(&ha->vport_slock, flags);
/* Locate matching device in database. */
list_for_each_entry_safe(vha, tvha, &ha->vp_list, list) {
if (!memcmp(port_name, vha->port_name, WWN_SIZE)) {
spin_unlock_irqrestore(&ha->vport_slock, flags);
return vha;
}
}
spin_unlock_irqrestore(&ha->vport_slock, flags);
return NULL;
}
/*
* qla2x00_mark_vp_devices_dead
* Updates fcport state when device goes offline.
*
* Input:
* ha = adapter block pointer.
* fcport = port structure pointer.
*
* Return:
* None.
*
* Context:
*/
static void
qla2x00_mark_vp_devices_dead(scsi_qla_host_t *vha)
{
/*
* !!! NOTE !!!
* This function, if called in contexts other than vp create, disable
* or delete, please make sure this is synchronized with the
* delete thread.
*/
fc_port_t *fcport;
list_for_each_entry(fcport, &vha->vp_fcports, list) {
ql_dbg(ql_dbg_vport, vha, 0xa001,
"Marking port dead, loop_id=0x%04x : %x.\n",
fcport->loop_id, fcport->vha->vp_idx);
qla2x00_mark_device_lost(vha, fcport, 0, 0);
qla2x00_set_fcport_state(fcport, FCS_UNCONFIGURED);
}
}
int
qla24xx_disable_vp(scsi_qla_host_t *vha)
{
unsigned long flags;
int ret;
ret = qla24xx_control_vp(vha, VCE_COMMAND_DISABLE_VPS_LOGO_ALL);
atomic_set(&vha->loop_state, LOOP_DOWN);
atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
/* Remove port id from vp target map */
spin_lock_irqsave(&vha->hw->vport_slock, flags);
qlt_update_vp_map(vha, RESET_AL_PA);
spin_unlock_irqrestore(&vha->hw->vport_slock, flags);
qla2x00_mark_vp_devices_dead(vha);
atomic_set(&vha->vp_state, VP_FAILED);
vha->flags.management_server_logged_in = 0;
if (ret == QLA_SUCCESS) {
fc_vport_set_state(vha->fc_vport, FC_VPORT_DISABLED);
} else {
fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
return -1;
}
return 0;
}
int
qla24xx_enable_vp(scsi_qla_host_t *vha)
{
int ret;
struct qla_hw_data *ha = vha->hw;
scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
/* Check if physical ha port is Up */
if (atomic_read(&base_vha->loop_state) == LOOP_DOWN ||
atomic_read(&base_vha->loop_state) == LOOP_DEAD ||
!(ha->current_topology & ISP_CFG_F)) {
vha->vp_err_state = VP_ERR_PORTDWN;
fc_vport_set_state(vha->fc_vport, FC_VPORT_LINKDOWN);
goto enable_failed;
}
/* Initialize the new vport unless it is a persistent port */
mutex_lock(&ha->vport_lock);
ret = qla24xx_modify_vp_config(vha);
mutex_unlock(&ha->vport_lock);
if (ret != QLA_SUCCESS) {
fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
goto enable_failed;
}
ql_dbg(ql_dbg_taskm, vha, 0x801a,
"Virtual port with id: %d - Enabled.\n", vha->vp_idx);
return 0;
enable_failed:
ql_dbg(ql_dbg_taskm, vha, 0x801b,
"Virtual port with id: %d - Disabled.\n", vha->vp_idx);
return 1;
}
static void
qla24xx_configure_vp(scsi_qla_host_t *vha)
{
struct fc_vport *fc_vport;
int ret;
fc_vport = vha->fc_vport;
ql_dbg(ql_dbg_vport, vha, 0xa002,
"%s: change request #3.\n", __func__);
ret = qla2x00_send_change_request(vha, 0x3, vha->vp_idx);
if (ret != QLA_SUCCESS) {
ql_dbg(ql_dbg_vport, vha, 0xa003, "Failed to enable "
"receiving of RSCN requests: 0x%x.\n", ret);
return;
} else {
/* Corresponds to SCR enabled */
clear_bit(VP_SCR_NEEDED, &vha->vp_flags);
}
vha->flags.online = 1;
if (qla24xx_configure_vhba(vha))
return;
atomic_set(&vha->vp_state, VP_ACTIVE);
fc_vport_set_state(fc_vport, FC_VPORT_ACTIVE);
}
void
qla2x00_alert_all_vps(struct rsp_que *rsp, uint16_t *mb)
{
scsi_qla_host_t *vha;
struct qla_hw_data *ha = rsp->hw;
int i = 0;
unsigned long flags;
spin_lock_irqsave(&ha->vport_slock, flags);
list_for_each_entry(vha, &ha->vp_list, list) {
if (vha->vp_idx) {
atomic_inc(&vha->vref_count);
spin_unlock_irqrestore(&ha->vport_slock, flags);
switch (mb[0]) {
case MBA_LIP_OCCURRED:
case MBA_LOOP_UP:
case MBA_LOOP_DOWN:
case MBA_LIP_RESET:
case MBA_POINT_TO_POINT:
case MBA_CHG_IN_CONNECTION:
case MBA_PORT_UPDATE:
case MBA_RSCN_UPDATE:
ql_dbg(ql_dbg_async, vha, 0x5024,
"Async_event for VP[%d], mb=0x%x vha=%p.\n",
i, *mb, vha);
qla2x00_async_event(vha, rsp, mb);
break;
}
spin_lock_irqsave(&ha->vport_slock, flags);
atomic_dec(&vha->vref_count);
}
i++;
}
spin_unlock_irqrestore(&ha->vport_slock, flags);
}
int
qla2x00_vp_abort_isp(scsi_qla_host_t *vha)
{
/*
* Physical port will do most of the abort and recovery work. We can
* just treat it as a loop down
*/
if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
atomic_set(&vha->loop_state, LOOP_DOWN);
qla2x00_mark_all_devices_lost(vha, 0);
} else {
if (!atomic_read(&vha->loop_down_timer))
atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
}
/*
* To exclusively reset vport, we need to log it out first. Note: this
* control_vp can fail if ISP reset is already issued, this is
* expected, as the vp would be already logged out due to ISP reset.
*/
if (!test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags))
qla24xx_control_vp(vha, VCE_COMMAND_DISABLE_VPS_LOGO_ALL);
ql_dbg(ql_dbg_taskm, vha, 0x801d,
"Scheduling enable of Vport %d.\n", vha->vp_idx);
return qla24xx_enable_vp(vha);
}
static int
qla2x00_do_dpc_vp(scsi_qla_host_t *vha)
{
ql_dbg(ql_dbg_dpc + ql_dbg_verbose, vha, 0x4012,
"Entering %s vp_flags: 0x%lx.\n", __func__, vha->vp_flags);
qla2x00_do_work(vha);
if (test_and_clear_bit(VP_IDX_ACQUIRED, &vha->vp_flags)) {
/* VP acquired. complete port configuration */
ql_dbg(ql_dbg_dpc, vha, 0x4014,
"Configure VP scheduled.\n");
qla24xx_configure_vp(vha);
ql_dbg(ql_dbg_dpc, vha, 0x4015,
"Configure VP end.\n");
return 0;
}
if (test_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags)) {
ql_dbg(ql_dbg_dpc, vha, 0x4016,
"FCPort update scheduled.\n");
qla2x00_update_fcports(vha);
clear_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags);
ql_dbg(ql_dbg_dpc, vha, 0x4017,
"FCPort update end.\n");
}
if ((test_and_clear_bit(RELOGIN_NEEDED, &vha->dpc_flags)) &&
!test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags) &&
atomic_read(&vha->loop_state) != LOOP_DOWN) {
ql_dbg(ql_dbg_dpc, vha, 0x4018,
"Relogin needed scheduled.\n");
qla2x00_relogin(vha);
ql_dbg(ql_dbg_dpc, vha, 0x4019,
"Relogin needed end.\n");
}
if (test_and_clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags) &&
(!(test_and_set_bit(RESET_ACTIVE, &vha->dpc_flags)))) {
clear_bit(RESET_ACTIVE, &vha->dpc_flags);
}
if (test_and_clear_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) {
if (!(test_and_set_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags))) {
ql_dbg(ql_dbg_dpc, vha, 0x401a,
"Loop resync scheduled.\n");
qla2x00_loop_resync(vha);
clear_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags);
ql_dbg(ql_dbg_dpc, vha, 0x401b,
"Loop resync end.\n");
}
}
ql_dbg(ql_dbg_dpc + ql_dbg_verbose, vha, 0x401c,
"Exiting %s.\n", __func__);
return 0;
}
void
qla2x00_do_dpc_all_vps(scsi_qla_host_t *vha)
{
int ret;
struct qla_hw_data *ha = vha->hw;
scsi_qla_host_t *vp;
unsigned long flags = 0;
if (vha->vp_idx)
return;
if (list_empty(&ha->vp_list))
return;
clear_bit(VP_DPC_NEEDED, &vha->dpc_flags);
if (!(ha->current_topology & ISP_CFG_F))
return;
spin_lock_irqsave(&ha->vport_slock, flags);
list_for_each_entry(vp, &ha->vp_list, list) {
if (vp->vp_idx) {
atomic_inc(&vp->vref_count);
spin_unlock_irqrestore(&ha->vport_slock, flags);
ret = qla2x00_do_dpc_vp(vp);
spin_lock_irqsave(&ha->vport_slock, flags);
atomic_dec(&vp->vref_count);
}
}
spin_unlock_irqrestore(&ha->vport_slock, flags);
}
int
qla24xx_vport_create_req_sanity_check(struct fc_vport *fc_vport)
{
scsi_qla_host_t *base_vha = shost_priv(fc_vport->shost);
struct qla_hw_data *ha = base_vha->hw;
scsi_qla_host_t *vha;
uint8_t port_name[WWN_SIZE];
if (fc_vport->roles != FC_PORT_ROLE_FCP_INITIATOR)
return VPCERR_UNSUPPORTED;
/* Check up the F/W and H/W support NPIV */
if (!ha->flags.npiv_supported)
return VPCERR_UNSUPPORTED;
/* Check up whether npiv supported switch presented */
if (!(ha->switch_cap & FLOGI_MID_SUPPORT))
return VPCERR_NO_FABRIC_SUPP;
/* Check up unique WWPN */
u64_to_wwn(fc_vport->port_name, port_name);
if (!memcmp(port_name, base_vha->port_name, WWN_SIZE))
return VPCERR_BAD_WWN;
vha = qla24xx_find_vhost_by_name(ha, port_name);
if (vha)
return VPCERR_BAD_WWN;
/* Check up max-npiv-supports */
if (ha->num_vhosts > ha->max_npiv_vports) {
ql_dbg(ql_dbg_vport, vha, 0xa004,
"num_vhosts %ud is bigger "
"than max_npiv_vports %ud.\n",
ha->num_vhosts, ha->max_npiv_vports);
return VPCERR_UNSUPPORTED;
}
return 0;
}
scsi_qla_host_t *
qla24xx_create_vhost(struct fc_vport *fc_vport)
{
scsi_qla_host_t *base_vha = shost_priv(fc_vport->shost);
struct qla_hw_data *ha = base_vha->hw;
scsi_qla_host_t *vha;
struct scsi_host_template *sht = &qla2xxx_driver_template;
struct Scsi_Host *host;
vha = qla2x00_create_host(sht, ha);
if (!vha) {
ql_log(ql_log_warn, vha, 0xa005,
"scsi_host_alloc() failed for vport.\n");
return(NULL);
}
host = vha->host;
fc_vport->dd_data = vha;
/* New host info */
u64_to_wwn(fc_vport->node_name, vha->node_name);
u64_to_wwn(fc_vport->port_name, vha->port_name);
vha->fc_vport = fc_vport;
vha->device_flags = 0;
vha->vp_idx = qla24xx_allocate_vp_id(vha);
if (vha->vp_idx > ha->max_npiv_vports) {
ql_dbg(ql_dbg_vport, vha, 0xa006,
"Couldn't allocate vp_id.\n");
goto create_vhost_failed;
}
vha->mgmt_svr_loop_id = 10 + vha->vp_idx;
vha->dpc_flags = 0L;
/*
* To fix the issue of processing a parent's RSCN for the vport before
* its SCR is complete.
*/
set_bit(VP_SCR_NEEDED, &vha->vp_flags);
atomic_set(&vha->loop_state, LOOP_DOWN);
atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
qla2x00_start_timer(vha, qla2x00_timer, WATCH_INTERVAL);
vha->req = base_vha->req;
host->can_queue = base_vha->req->length + 128;
host->cmd_per_lun = 3;
if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif)
host->max_cmd_len = 32;
else
host->max_cmd_len = MAX_CMDSZ;
host->max_channel = MAX_BUSES - 1;
host->max_lun = ql2xmaxlun;
host->unique_id = host->host_no;
host->max_id = ha->max_fibre_devices;
host->transportt = qla2xxx_transport_vport_template;
ql_dbg(ql_dbg_vport, vha, 0xa007,
"Detect vport hba %ld at address = %p.\n",
vha->host_no, vha);
vha->flags.init_done = 1;
mutex_lock(&ha->vport_lock);
set_bit(vha->vp_idx, ha->vp_idx_map);
ha->cur_vport_count++;
mutex_unlock(&ha->vport_lock);
return vha;
create_vhost_failed:
return NULL;
}
static void
qla25xx_free_req_que(struct scsi_qla_host *vha, struct req_que *req)
{
struct qla_hw_data *ha = vha->hw;
uint16_t que_id = req->id;
dma_free_coherent(&ha->pdev->dev, (req->length + 1) *
sizeof(request_t), req->ring, req->dma);
req->ring = NULL;
req->dma = 0;
if (que_id) {
ha->req_q_map[que_id] = NULL;
mutex_lock(&ha->vport_lock);
clear_bit(que_id, ha->req_qid_map);
mutex_unlock(&ha->vport_lock);
}
kfree(req->outstanding_cmds);
kfree(req);
req = NULL;
}
static void
qla25xx_free_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
{
struct qla_hw_data *ha = vha->hw;
uint16_t que_id = rsp->id;
if (rsp->msix && rsp->msix->have_irq) {
free_irq(rsp->msix->vector, rsp);
rsp->msix->have_irq = 0;
rsp->msix->rsp = NULL;
}
dma_free_coherent(&ha->pdev->dev, (rsp->length + 1) *
sizeof(response_t), rsp->ring, rsp->dma);
rsp->ring = NULL;
rsp->dma = 0;
if (que_id) {
ha->rsp_q_map[que_id] = NULL;
mutex_lock(&ha->vport_lock);
clear_bit(que_id, ha->rsp_qid_map);
mutex_unlock(&ha->vport_lock);
}
kfree(rsp);
rsp = NULL;
}
int
qla25xx_delete_req_que(struct scsi_qla_host *vha, struct req_que *req)
{
int ret = -1;
if (req) {
req->options |= BIT_0;
ret = qla25xx_init_req_que(vha, req);
}
if (ret == QLA_SUCCESS)
qla25xx_free_req_que(vha, req);
return ret;
}
static int
qla25xx_delete_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
{
int ret = -1;
if (rsp) {
rsp->options |= BIT_0;
ret = qla25xx_init_rsp_que(vha, rsp);
}
if (ret == QLA_SUCCESS)
qla25xx_free_rsp_que(vha, rsp);
return ret;
}
/* Delete all queues for a given vhost */
int
qla25xx_delete_queues(struct scsi_qla_host *vha)
{
int cnt, ret = 0;
struct req_que *req = NULL;
struct rsp_que *rsp = NULL;
struct qla_hw_data *ha = vha->hw;
/* Delete request queues */
for (cnt = 1; cnt < ha->max_req_queues; cnt++) {
req = ha->req_q_map[cnt];
if (req) {
ret = qla25xx_delete_req_que(vha, req);
if (ret != QLA_SUCCESS) {
ql_log(ql_log_warn, vha, 0x00ea,
"Couldn't delete req que %d.\n",
req->id);
return ret;
}
}
}
/* Delete response queues */
for (cnt = 1; cnt < ha->max_rsp_queues; cnt++) {
rsp = ha->rsp_q_map[cnt];
if (rsp) {
ret = qla25xx_delete_rsp_que(vha, rsp);
if (ret != QLA_SUCCESS) {
ql_log(ql_log_warn, vha, 0x00eb,
"Couldn't delete rsp que %d.\n",
rsp->id);
return ret;
}
}
}
return ret;
}
int
qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options,
uint8_t vp_idx, uint16_t rid, int rsp_que, uint8_t qos)
{
int ret = 0;
struct req_que *req = NULL;
struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
uint16_t que_id = 0;
device_reg_t __iomem *reg;
uint32_t cnt;
req = kzalloc(sizeof(struct req_que), GFP_KERNEL);
if (req == NULL) {
ql_log(ql_log_fatal, base_vha, 0x00d9,
"Failed to allocate memory for request queue.\n");
goto failed;
}
req->length = REQUEST_ENTRY_CNT_24XX;
req->ring = dma_alloc_coherent(&ha->pdev->dev,
(req->length + 1) * sizeof(request_t),
&req->dma, GFP_KERNEL);
if (req->ring == NULL) {
ql_log(ql_log_fatal, base_vha, 0x00da,
"Failed to allocate memory for request_ring.\n");
goto que_failed;
}
ret = qla2x00_alloc_outstanding_cmds(ha, req);
if (ret != QLA_SUCCESS)
goto que_failed;
mutex_lock(&ha->vport_lock);
que_id = find_first_zero_bit(ha->req_qid_map, ha->max_req_queues);
if (que_id >= ha->max_req_queues) {
mutex_unlock(&ha->vport_lock);
ql_log(ql_log_warn, base_vha, 0x00db,
"No resources to create additional request queue.\n");
goto que_failed;
}
set_bit(que_id, ha->req_qid_map);
ha->req_q_map[que_id] = req;
req->rid = rid;
req->vp_idx = vp_idx;
req->qos = qos;
ql_dbg(ql_dbg_multiq, base_vha, 0xc002,
"queue_id=%d rid=%d vp_idx=%d qos=%d.\n",
que_id, req->rid, req->vp_idx, req->qos);
ql_dbg(ql_dbg_init, base_vha, 0x00dc,
"queue_id=%d rid=%d vp_idx=%d qos=%d.\n",
que_id, req->rid, req->vp_idx, req->qos);
if (rsp_que < 0)
req->rsp = NULL;
else
req->rsp = ha->rsp_q_map[rsp_que];
/* Use alternate PCI bus number */
if (MSB(req->rid))
options |= BIT_4;
/* Use alternate PCI devfn */
if (LSB(req->rid))
options |= BIT_5;
req->options = options;
ql_dbg(ql_dbg_multiq, base_vha, 0xc003,
"options=0x%x.\n", req->options);
ql_dbg(ql_dbg_init, base_vha, 0x00dd,
"options=0x%x.\n", req->options);
for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++)
req->outstanding_cmds[cnt] = NULL;
req->current_outstanding_cmd = 1;
req->ring_ptr = req->ring;
req->ring_index = 0;
req->cnt = req->length;
req->id = que_id;
reg = ISP_QUE_REG(ha, que_id);
req->max_q_depth = ha->req_q_map[0]->max_q_depth;
mutex_unlock(&ha->vport_lock);
ql_dbg(ql_dbg_multiq, base_vha, 0xc004,
"ring_ptr=%p ring_index=%d, "
"cnt=%d id=%d max_q_depth=%d.\n",
req->ring_ptr, req->ring_index,
req->cnt, req->id, req->max_q_depth);
ql_dbg(ql_dbg_init, base_vha, 0x00de,
"ring_ptr=%p ring_index=%d, "
"cnt=%d id=%d max_q_depth=%d.\n",
req->ring_ptr, req->ring_index, req->cnt,
req->id, req->max_q_depth);
ret = qla25xx_init_req_que(base_vha, req);
if (ret != QLA_SUCCESS) {
ql_log(ql_log_fatal, base_vha, 0x00df,
"%s failed.\n", __func__);
mutex_lock(&ha->vport_lock);
clear_bit(que_id, ha->req_qid_map);
mutex_unlock(&ha->vport_lock);
goto que_failed;
}
return req->id;
que_failed:
qla25xx_free_req_que(base_vha, req);
failed:
return 0;
}
static void qla_do_work(struct work_struct *work)
{
unsigned long flags;
struct rsp_que *rsp = container_of(work, struct rsp_que, q_work);
struct scsi_qla_host *vha;
struct qla_hw_data *ha = rsp->hw;
spin_lock_irqsave(&rsp->hw->hardware_lock, flags);
vha = pci_get_drvdata(ha->pdev);
qla24xx_process_response_queue(vha, rsp);
spin_unlock_irqrestore(&rsp->hw->hardware_lock, flags);
}
/* create response queue */
int
qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options,
uint8_t vp_idx, uint16_t rid, int req)
{
int ret = 0;
struct rsp_que *rsp = NULL;
struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
uint16_t que_id = 0;
device_reg_t __iomem *reg;
rsp = kzalloc(sizeof(struct rsp_que), GFP_KERNEL);
if (rsp == NULL) {
ql_log(ql_log_warn, base_vha, 0x0066,
"Failed to allocate memory for response queue.\n");
goto failed;
}
rsp->length = RESPONSE_ENTRY_CNT_MQ;
rsp->ring = dma_alloc_coherent(&ha->pdev->dev,
(rsp->length + 1) * sizeof(response_t),
&rsp->dma, GFP_KERNEL);
if (rsp->ring == NULL) {
ql_log(ql_log_warn, base_vha, 0x00e1,
"Failed to allocate memory for response ring.\n");
goto que_failed;
}
mutex_lock(&ha->vport_lock);
que_id = find_first_zero_bit(ha->rsp_qid_map, ha->max_rsp_queues);
if (que_id >= ha->max_rsp_queues) {
mutex_unlock(&ha->vport_lock);
ql_log(ql_log_warn, base_vha, 0x00e2,
"No resources to create additional request queue.\n");
goto que_failed;
}
set_bit(que_id, ha->rsp_qid_map);
if (ha->flags.msix_enabled)
rsp->msix = &ha->msix_entries[que_id + 1];
else
ql_log(ql_log_warn, base_vha, 0x00e3,
"MSIX not enalbled.\n");
ha->rsp_q_map[que_id] = rsp;
rsp->rid = rid;
rsp->vp_idx = vp_idx;
rsp->hw = ha;
ql_dbg(ql_dbg_init, base_vha, 0x00e4,
"queue_id=%d rid=%d vp_idx=%d hw=%p.\n",
que_id, rsp->rid, rsp->vp_idx, rsp->hw);
/* Use alternate PCI bus number */
if (MSB(rsp->rid))
options |= BIT_4;
/* Use alternate PCI devfn */
if (LSB(rsp->rid))
options |= BIT_5;
/* Enable MSIX handshake mode on for uncapable adapters */
if (!IS_MSIX_NACK_CAPABLE(ha))
options |= BIT_6;
rsp->options = options;
rsp->id = que_id;
reg = ISP_QUE_REG(ha, que_id);
rsp->rsp_q_in = ®->isp25mq.rsp_q_in;
rsp->rsp_q_out = ®->isp25mq.rsp_q_out;
mutex_unlock(&ha->vport_lock);
ql_dbg(ql_dbg_multiq, base_vha, 0xc00b,
"options=%x id=%d rsp_q_in=%p rsp_q_out=%p",
rsp->options, rsp->id, rsp->rsp_q_in,
rsp->rsp_q_out);
ql_dbg(ql_dbg_init, base_vha, 0x00e5,
"options=%x id=%d rsp_q_in=%p rsp_q_out=%p",
rsp->options, rsp->id, rsp->rsp_q_in,
rsp->rsp_q_out);
ret = qla25xx_request_irq(rsp);
if (ret)
goto que_failed;
ret = qla25xx_init_rsp_que(base_vha, rsp);
if (ret != QLA_SUCCESS) {
ql_log(ql_log_fatal, base_vha, 0x00e7,
"%s failed.\n", __func__);
mutex_lock(&ha->vport_lock);
clear_bit(que_id, ha->rsp_qid_map);
mutex_unlock(&ha->vport_lock);
goto que_failed;
}
if (req >= 0)
rsp->req = ha->req_q_map[req];
else
rsp->req = NULL;
qla2x00_init_response_q_entries(rsp);
if (rsp->hw->wq)
INIT_WORK(&rsp->q_work, qla_do_work);
return rsp->id;
que_failed:
qla25xx_free_rsp_que(base_vha, rsp);
failed:
return 0;
}
| gpl-2.0 |
TangCheng/hisi351x_linux-3.0.y | arch/arm/mach-omap1/clock_data.c | 2971 | 27173 | /*
* linux/arch/arm/mach-omap1/clock_data.c
*
* Copyright (C) 2004 - 2005, 2009-2010 Nokia Corporation
* Written by Tuukka Tikkanen <tuukka.tikkanen@elektrobit.com>
* Based on clocks.h by Tony Lindgren, Gordon McNutt and RidgeRun, Inc
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* To do:
* - Clocks that are only available on some chips should be marked with the
* chips that they are present on.
*/
#include <linux/kernel.h>
#include <linux/clk.h>
#include <linux/io.h>
#include <asm/mach-types.h> /* for machine_is_* */
#include <plat/clock.h>
#include <plat/cpu.h>
#include <plat/clkdev_omap.h>
#include <plat/usb.h> /* for OTG_BASE */
#include "clock.h"
/* Some ARM_IDLECT1 bit shifts - used in struct arm_idlect1_clk */
#define IDL_CLKOUT_ARM_SHIFT 12
#define IDLTIM_ARM_SHIFT 9
#define IDLAPI_ARM_SHIFT 8
#define IDLIF_ARM_SHIFT 6
#define IDLLB_ARM_SHIFT 4 /* undocumented? */
#define OMAP1510_IDLLCD_ARM_SHIFT 3 /* undocumented? */
#define IDLPER_ARM_SHIFT 2
#define IDLXORP_ARM_SHIFT 1
#define IDLWDT_ARM_SHIFT 0
/* Some MOD_CONF_CTRL_0 bit shifts - used in struct clk.enable_bit */
#define CONF_MOD_UART3_CLK_MODE_R 31
#define CONF_MOD_UART2_CLK_MODE_R 30
#define CONF_MOD_UART1_CLK_MODE_R 29
#define CONF_MOD_MMC_SD_CLK_REQ_R 23
#define CONF_MOD_MCBSP3_AUXON 20
/* Some MOD_CONF_CTRL_1 bit shifts - used in struct clk.enable_bit */
#define CONF_MOD_SOSSI_CLK_EN_R 16
/* Some OTG_SYSCON_2-specific bit fields */
#define OTG_SYSCON_2_UHOST_EN_SHIFT 8
/* Some SOFT_REQ_REG bit fields - used in struct clk.enable_bit */
#define SOFT_MMC2_DPLL_REQ_SHIFT 13
#define SOFT_MMC_DPLL_REQ_SHIFT 12
#define SOFT_UART3_DPLL_REQ_SHIFT 11
#define SOFT_UART2_DPLL_REQ_SHIFT 10
#define SOFT_UART1_DPLL_REQ_SHIFT 9
#define SOFT_USB_OTG_DPLL_REQ_SHIFT 8
#define SOFT_CAM_DPLL_REQ_SHIFT 7
#define SOFT_COM_MCKO_REQ_SHIFT 6
#define SOFT_PERIPH_REQ_SHIFT 5 /* sys_ck gate for UART2 ? */
#define USB_REQ_EN_SHIFT 4
#define SOFT_USB_REQ_SHIFT 3 /* sys_ck gate for USB host? */
#define SOFT_SDW_REQ_SHIFT 2 /* sys_ck gate for Bluetooth? */
#define SOFT_COM_REQ_SHIFT 1 /* sys_ck gate for com proc? */
#define SOFT_DPLL_REQ_SHIFT 0
/*
* Omap1 clocks
*/
static struct clk ck_ref = {
.name = "ck_ref",
.ops = &clkops_null,
.rate = 12000000,
};
static struct clk ck_dpll1 = {
.name = "ck_dpll1",
.ops = &clkops_null,
.parent = &ck_ref,
};
/*
* FIXME: This clock seems to be necessary but no-one has asked for its
* activation. [ FIX: SoSSI, SSR ]
*/
static struct arm_idlect1_clk ck_dpll1out = {
.clk = {
.name = "ck_dpll1out",
.ops = &clkops_generic,
.parent = &ck_dpll1,
.flags = CLOCK_IDLE_CONTROL | ENABLE_REG_32BIT |
ENABLE_ON_INIT,
.enable_reg = OMAP1_IO_ADDRESS(ARM_IDLECT2),
.enable_bit = EN_CKOUT_ARM,
.recalc = &followparent_recalc,
},
.idlect_shift = IDL_CLKOUT_ARM_SHIFT,
};
static struct clk sossi_ck = {
.name = "ck_sossi",
.ops = &clkops_generic,
.parent = &ck_dpll1out.clk,
.flags = CLOCK_NO_IDLE_PARENT | ENABLE_REG_32BIT,
.enable_reg = OMAP1_IO_ADDRESS(MOD_CONF_CTRL_1),
.enable_bit = CONF_MOD_SOSSI_CLK_EN_R,
.recalc = &omap1_sossi_recalc,
.set_rate = &omap1_set_sossi_rate,
};
static struct clk arm_ck = {
.name = "arm_ck",
.ops = &clkops_null,
.parent = &ck_dpll1,
.rate_offset = CKCTL_ARMDIV_OFFSET,
.recalc = &omap1_ckctl_recalc,
.round_rate = omap1_clk_round_rate_ckctl_arm,
.set_rate = omap1_clk_set_rate_ckctl_arm,
};
static struct arm_idlect1_clk armper_ck = {
.clk = {
.name = "armper_ck",
.ops = &clkops_generic,
.parent = &ck_dpll1,
.flags = CLOCK_IDLE_CONTROL,
.enable_reg = OMAP1_IO_ADDRESS(ARM_IDLECT2),
.enable_bit = EN_PERCK,
.rate_offset = CKCTL_PERDIV_OFFSET,
.recalc = &omap1_ckctl_recalc,
.round_rate = omap1_clk_round_rate_ckctl_arm,
.set_rate = omap1_clk_set_rate_ckctl_arm,
},
.idlect_shift = IDLPER_ARM_SHIFT,
};
/*
* FIXME: This clock seems to be necessary but no-one has asked for its
* activation. [ GPIO code for 1510 ]
*/
static struct clk arm_gpio_ck = {
.name = "ick",
.ops = &clkops_generic,
.parent = &ck_dpll1,
.flags = ENABLE_ON_INIT,
.enable_reg = OMAP1_IO_ADDRESS(ARM_IDLECT2),
.enable_bit = EN_GPIOCK,
.recalc = &followparent_recalc,
};
static struct arm_idlect1_clk armxor_ck = {
.clk = {
.name = "armxor_ck",
.ops = &clkops_generic,
.parent = &ck_ref,
.flags = CLOCK_IDLE_CONTROL,
.enable_reg = OMAP1_IO_ADDRESS(ARM_IDLECT2),
.enable_bit = EN_XORPCK,
.recalc = &followparent_recalc,
},
.idlect_shift = IDLXORP_ARM_SHIFT,
};
static struct arm_idlect1_clk armtim_ck = {
.clk = {
.name = "armtim_ck",
.ops = &clkops_generic,
.parent = &ck_ref,
.flags = CLOCK_IDLE_CONTROL,
.enable_reg = OMAP1_IO_ADDRESS(ARM_IDLECT2),
.enable_bit = EN_TIMCK,
.recalc = &followparent_recalc,
},
.idlect_shift = IDLTIM_ARM_SHIFT,
};
static struct arm_idlect1_clk armwdt_ck = {
.clk = {
.name = "armwdt_ck",
.ops = &clkops_generic,
.parent = &ck_ref,
.flags = CLOCK_IDLE_CONTROL,
.enable_reg = OMAP1_IO_ADDRESS(ARM_IDLECT2),
.enable_bit = EN_WDTCK,
.fixed_div = 14,
.recalc = &omap_fixed_divisor_recalc,
},
.idlect_shift = IDLWDT_ARM_SHIFT,
};
static struct clk arminth_ck16xx = {
.name = "arminth_ck",
.ops = &clkops_null,
.parent = &arm_ck,
.recalc = &followparent_recalc,
/* Note: On 16xx the frequency can be divided by 2 by programming
* ARM_CKCTL:ARM_INTHCK_SEL(14) to 1
*
* 1510 version is in TC clocks.
*/
};
static struct clk dsp_ck = {
.name = "dsp_ck",
.ops = &clkops_generic,
.parent = &ck_dpll1,
.enable_reg = OMAP1_IO_ADDRESS(ARM_CKCTL),
.enable_bit = EN_DSPCK,
.rate_offset = CKCTL_DSPDIV_OFFSET,
.recalc = &omap1_ckctl_recalc,
.round_rate = omap1_clk_round_rate_ckctl_arm,
.set_rate = omap1_clk_set_rate_ckctl_arm,
};
static struct clk dspmmu_ck = {
.name = "dspmmu_ck",
.ops = &clkops_null,
.parent = &ck_dpll1,
.rate_offset = CKCTL_DSPMMUDIV_OFFSET,
.recalc = &omap1_ckctl_recalc,
.round_rate = omap1_clk_round_rate_ckctl_arm,
.set_rate = omap1_clk_set_rate_ckctl_arm,
};
static struct clk dspper_ck = {
.name = "dspper_ck",
.ops = &clkops_dspck,
.parent = &ck_dpll1,
.enable_reg = DSP_IDLECT2,
.enable_bit = EN_PERCK,
.rate_offset = CKCTL_PERDIV_OFFSET,
.recalc = &omap1_ckctl_recalc_dsp_domain,
.round_rate = omap1_clk_round_rate_ckctl_arm,
.set_rate = &omap1_clk_set_rate_dsp_domain,
};
static struct clk dspxor_ck = {
.name = "dspxor_ck",
.ops = &clkops_dspck,
.parent = &ck_ref,
.enable_reg = DSP_IDLECT2,
.enable_bit = EN_XORPCK,
.recalc = &followparent_recalc,
};
static struct clk dsptim_ck = {
.name = "dsptim_ck",
.ops = &clkops_dspck,
.parent = &ck_ref,
.enable_reg = DSP_IDLECT2,
.enable_bit = EN_DSPTIMCK,
.recalc = &followparent_recalc,
};
static struct arm_idlect1_clk tc_ck = {
.clk = {
.name = "tc_ck",
.ops = &clkops_null,
.parent = &ck_dpll1,
.flags = CLOCK_IDLE_CONTROL,
.rate_offset = CKCTL_TCDIV_OFFSET,
.recalc = &omap1_ckctl_recalc,
.round_rate = omap1_clk_round_rate_ckctl_arm,
.set_rate = omap1_clk_set_rate_ckctl_arm,
},
.idlect_shift = IDLIF_ARM_SHIFT,
};
static struct clk arminth_ck1510 = {
.name = "arminth_ck",
.ops = &clkops_null,
.parent = &tc_ck.clk,
.recalc = &followparent_recalc,
/* Note: On 1510 the frequency follows TC_CK
*
* 16xx version is in MPU clocks.
*/
};
static struct clk tipb_ck = {
/* No-idle controlled by "tc_ck" */
.name = "tipb_ck",
.ops = &clkops_null,
.parent = &tc_ck.clk,
.recalc = &followparent_recalc,
};
static struct clk l3_ocpi_ck = {
/* No-idle controlled by "tc_ck" */
.name = "l3_ocpi_ck",
.ops = &clkops_generic,
.parent = &tc_ck.clk,
.enable_reg = OMAP1_IO_ADDRESS(ARM_IDLECT3),
.enable_bit = EN_OCPI_CK,
.recalc = &followparent_recalc,
};
static struct clk tc1_ck = {
.name = "tc1_ck",
.ops = &clkops_generic,
.parent = &tc_ck.clk,
.enable_reg = OMAP1_IO_ADDRESS(ARM_IDLECT3),
.enable_bit = EN_TC1_CK,
.recalc = &followparent_recalc,
};
/*
* FIXME: This clock seems to be necessary but no-one has asked for its
* activation. [ pm.c (SRAM), CCP, Camera ]
*/
static struct clk tc2_ck = {
.name = "tc2_ck",
.ops = &clkops_generic,
.parent = &tc_ck.clk,
.flags = ENABLE_ON_INIT,
.enable_reg = OMAP1_IO_ADDRESS(ARM_IDLECT3),
.enable_bit = EN_TC2_CK,
.recalc = &followparent_recalc,
};
static struct clk dma_ck = {
/* No-idle controlled by "tc_ck" */
.name = "dma_ck",
.ops = &clkops_null,
.parent = &tc_ck.clk,
.recalc = &followparent_recalc,
};
static struct clk dma_lcdfree_ck = {
.name = "dma_lcdfree_ck",
.ops = &clkops_null,
.parent = &tc_ck.clk,
.recalc = &followparent_recalc,
};
static struct arm_idlect1_clk api_ck = {
.clk = {
.name = "api_ck",
.ops = &clkops_generic,
.parent = &tc_ck.clk,
.flags = CLOCK_IDLE_CONTROL,
.enable_reg = OMAP1_IO_ADDRESS(ARM_IDLECT2),
.enable_bit = EN_APICK,
.recalc = &followparent_recalc,
},
.idlect_shift = IDLAPI_ARM_SHIFT,
};
static struct arm_idlect1_clk lb_ck = {
.clk = {
.name = "lb_ck",
.ops = &clkops_generic,
.parent = &tc_ck.clk,
.flags = CLOCK_IDLE_CONTROL,
.enable_reg = OMAP1_IO_ADDRESS(ARM_IDLECT2),
.enable_bit = EN_LBCK,
.recalc = &followparent_recalc,
},
.idlect_shift = IDLLB_ARM_SHIFT,
};
static struct clk rhea1_ck = {
.name = "rhea1_ck",
.ops = &clkops_null,
.parent = &tc_ck.clk,
.recalc = &followparent_recalc,
};
static struct clk rhea2_ck = {
.name = "rhea2_ck",
.ops = &clkops_null,
.parent = &tc_ck.clk,
.recalc = &followparent_recalc,
};
static struct clk lcd_ck_16xx = {
.name = "lcd_ck",
.ops = &clkops_generic,
.parent = &ck_dpll1,
.enable_reg = OMAP1_IO_ADDRESS(ARM_IDLECT2),
.enable_bit = EN_LCDCK,
.rate_offset = CKCTL_LCDDIV_OFFSET,
.recalc = &omap1_ckctl_recalc,
.round_rate = omap1_clk_round_rate_ckctl_arm,
.set_rate = omap1_clk_set_rate_ckctl_arm,
};
static struct arm_idlect1_clk lcd_ck_1510 = {
.clk = {
.name = "lcd_ck",
.ops = &clkops_generic,
.parent = &ck_dpll1,
.flags = CLOCK_IDLE_CONTROL,
.enable_reg = OMAP1_IO_ADDRESS(ARM_IDLECT2),
.enable_bit = EN_LCDCK,
.rate_offset = CKCTL_LCDDIV_OFFSET,
.recalc = &omap1_ckctl_recalc,
.round_rate = omap1_clk_round_rate_ckctl_arm,
.set_rate = omap1_clk_set_rate_ckctl_arm,
},
.idlect_shift = OMAP1510_IDLLCD_ARM_SHIFT,
};
/*
* XXX The enable_bit here is misused - it simply switches between 12MHz
* and 48MHz. Reimplement with clksel.
*
* XXX does this need SYSC register handling?
*/
static struct clk uart1_1510 = {
.name = "uart1_ck",
.ops = &clkops_null,
/* Direct from ULPD, no real parent */
.parent = &armper_ck.clk,
.rate = 12000000,
.flags = ENABLE_REG_32BIT | CLOCK_NO_IDLE_PARENT,
.enable_reg = OMAP1_IO_ADDRESS(MOD_CONF_CTRL_0),
.enable_bit = CONF_MOD_UART1_CLK_MODE_R,
.set_rate = &omap1_set_uart_rate,
.recalc = &omap1_uart_recalc,
};
/*
* XXX The enable_bit here is misused - it simply switches between 12MHz
* and 48MHz. Reimplement with clksel.
*
* XXX SYSC register handling does not belong in the clock framework
*/
static struct uart_clk uart1_16xx = {
.clk = {
.name = "uart1_ck",
.ops = &clkops_uart_16xx,
/* Direct from ULPD, no real parent */
.parent = &armper_ck.clk,
.rate = 48000000,
.flags = ENABLE_REG_32BIT | CLOCK_NO_IDLE_PARENT,
.enable_reg = OMAP1_IO_ADDRESS(MOD_CONF_CTRL_0),
.enable_bit = CONF_MOD_UART1_CLK_MODE_R,
},
.sysc_addr = 0xfffb0054,
};
/*
* XXX The enable_bit here is misused - it simply switches between 12MHz
* and 48MHz. Reimplement with clksel.
*
* XXX does this need SYSC register handling?
*/
static struct clk uart2_ck = {
.name = "uart2_ck",
.ops = &clkops_null,
/* Direct from ULPD, no real parent */
.parent = &armper_ck.clk,
.rate = 12000000,
.flags = ENABLE_REG_32BIT | CLOCK_NO_IDLE_PARENT,
.enable_reg = OMAP1_IO_ADDRESS(MOD_CONF_CTRL_0),
.enable_bit = CONF_MOD_UART2_CLK_MODE_R,
.set_rate = &omap1_set_uart_rate,
.recalc = &omap1_uart_recalc,
};
/*
* XXX The enable_bit here is misused - it simply switches between 12MHz
* and 48MHz. Reimplement with clksel.
*
* XXX does this need SYSC register handling?
*/
static struct clk uart3_1510 = {
.name = "uart3_ck",
.ops = &clkops_null,
/* Direct from ULPD, no real parent */
.parent = &armper_ck.clk,
.rate = 12000000,
.flags = ENABLE_REG_32BIT | CLOCK_NO_IDLE_PARENT,
.enable_reg = OMAP1_IO_ADDRESS(MOD_CONF_CTRL_0),
.enable_bit = CONF_MOD_UART3_CLK_MODE_R,
.set_rate = &omap1_set_uart_rate,
.recalc = &omap1_uart_recalc,
};
/*
* XXX The enable_bit here is misused - it simply switches between 12MHz
* and 48MHz. Reimplement with clksel.
*
* XXX SYSC register handling does not belong in the clock framework
*/
static struct uart_clk uart3_16xx = {
.clk = {
.name = "uart3_ck",
.ops = &clkops_uart_16xx,
/* Direct from ULPD, no real parent */
.parent = &armper_ck.clk,
.rate = 48000000,
.flags = ENABLE_REG_32BIT | CLOCK_NO_IDLE_PARENT,
.enable_reg = OMAP1_IO_ADDRESS(MOD_CONF_CTRL_0),
.enable_bit = CONF_MOD_UART3_CLK_MODE_R,
},
.sysc_addr = 0xfffb9854,
};
static struct clk usb_clko = { /* 6 MHz output on W4_USB_CLKO */
.name = "usb_clko",
.ops = &clkops_generic,
/* Direct from ULPD, no parent */
.rate = 6000000,
.flags = ENABLE_REG_32BIT,
.enable_reg = OMAP1_IO_ADDRESS(ULPD_CLOCK_CTRL),
.enable_bit = USB_MCLK_EN_BIT,
};
static struct clk usb_hhc_ck1510 = {
.name = "usb_hhc_ck",
.ops = &clkops_generic,
/* Direct from ULPD, no parent */
.rate = 48000000, /* Actually 2 clocks, 12MHz and 48MHz */
.flags = ENABLE_REG_32BIT,
.enable_reg = OMAP1_IO_ADDRESS(MOD_CONF_CTRL_0),
.enable_bit = USB_HOST_HHC_UHOST_EN,
};
static struct clk usb_hhc_ck16xx = {
.name = "usb_hhc_ck",
.ops = &clkops_generic,
/* Direct from ULPD, no parent */
.rate = 48000000,
/* OTG_SYSCON_2.OTG_PADEN == 0 (not 1510-compatible) */
.flags = ENABLE_REG_32BIT,
.enable_reg = OMAP1_IO_ADDRESS(OTG_BASE + 0x08), /* OTG_SYSCON_2 */
.enable_bit = OTG_SYSCON_2_UHOST_EN_SHIFT
};
static struct clk usb_dc_ck = {
.name = "usb_dc_ck",
.ops = &clkops_generic,
/* Direct from ULPD, no parent */
.rate = 48000000,
.enable_reg = OMAP1_IO_ADDRESS(SOFT_REQ_REG),
.enable_bit = USB_REQ_EN_SHIFT,
};
static struct clk usb_dc_ck7xx = {
.name = "usb_dc_ck",
.ops = &clkops_generic,
/* Direct from ULPD, no parent */
.rate = 48000000,
.enable_reg = OMAP1_IO_ADDRESS(SOFT_REQ_REG),
.enable_bit = SOFT_USB_OTG_DPLL_REQ_SHIFT,
};
static struct clk uart1_7xx = {
.name = "uart1_ck",
.ops = &clkops_generic,
/* Direct from ULPD, no parent */
.rate = 12000000,
.enable_reg = OMAP1_IO_ADDRESS(SOFT_REQ_REG),
.enable_bit = 9,
};
static struct clk uart2_7xx = {
.name = "uart2_ck",
.ops = &clkops_generic,
/* Direct from ULPD, no parent */
.rate = 12000000,
.enable_reg = OMAP1_IO_ADDRESS(SOFT_REQ_REG),
.enable_bit = 11,
};
static struct clk mclk_1510 = {
.name = "mclk",
.ops = &clkops_generic,
/* Direct from ULPD, no parent. May be enabled by ext hardware. */
.rate = 12000000,
.enable_reg = OMAP1_IO_ADDRESS(SOFT_REQ_REG),
.enable_bit = SOFT_COM_MCKO_REQ_SHIFT,
};
static struct clk mclk_16xx = {
.name = "mclk",
.ops = &clkops_generic,
/* Direct from ULPD, no parent. May be enabled by ext hardware. */
.enable_reg = OMAP1_IO_ADDRESS(COM_CLK_DIV_CTRL_SEL),
.enable_bit = COM_ULPD_PLL_CLK_REQ,
.set_rate = &omap1_set_ext_clk_rate,
.round_rate = &omap1_round_ext_clk_rate,
.init = &omap1_init_ext_clk,
};
static struct clk bclk_1510 = {
.name = "bclk",
.ops = &clkops_generic,
/* Direct from ULPD, no parent. May be enabled by ext hardware. */
.rate = 12000000,
};
static struct clk bclk_16xx = {
.name = "bclk",
.ops = &clkops_generic,
/* Direct from ULPD, no parent. May be enabled by ext hardware. */
.enable_reg = OMAP1_IO_ADDRESS(SWD_CLK_DIV_CTRL_SEL),
.enable_bit = SWD_ULPD_PLL_CLK_REQ,
.set_rate = &omap1_set_ext_clk_rate,
.round_rate = &omap1_round_ext_clk_rate,
.init = &omap1_init_ext_clk,
};
static struct clk mmc1_ck = {
.name = "mmc1_ck",
.ops = &clkops_generic,
/* Functional clock is direct from ULPD, interface clock is ARMPER */
.parent = &armper_ck.clk,
.rate = 48000000,
.flags = ENABLE_REG_32BIT | CLOCK_NO_IDLE_PARENT,
.enable_reg = OMAP1_IO_ADDRESS(MOD_CONF_CTRL_0),
.enable_bit = CONF_MOD_MMC_SD_CLK_REQ_R,
};
/*
* XXX MOD_CONF_CTRL_0 bit 20 is defined in the 1510 TRM as
* CONF_MOD_MCBSP3_AUXON ??
*/
static struct clk mmc2_ck = {
.name = "mmc2_ck",
.ops = &clkops_generic,
/* Functional clock is direct from ULPD, interface clock is ARMPER */
.parent = &armper_ck.clk,
.rate = 48000000,
.flags = ENABLE_REG_32BIT | CLOCK_NO_IDLE_PARENT,
.enable_reg = OMAP1_IO_ADDRESS(MOD_CONF_CTRL_0),
.enable_bit = 20,
};
static struct clk mmc3_ck = {
.name = "mmc3_ck",
.ops = &clkops_generic,
/* Functional clock is direct from ULPD, interface clock is ARMPER */
.parent = &armper_ck.clk,
.rate = 48000000,
.flags = ENABLE_REG_32BIT | CLOCK_NO_IDLE_PARENT,
.enable_reg = OMAP1_IO_ADDRESS(SOFT_REQ_REG),
.enable_bit = SOFT_MMC_DPLL_REQ_SHIFT,
};
static struct clk virtual_ck_mpu = {
.name = "mpu",
.ops = &clkops_null,
.parent = &arm_ck, /* Is smarter alias for */
.recalc = &followparent_recalc,
.set_rate = &omap1_select_table_rate,
.round_rate = &omap1_round_to_table_rate,
};
/* virtual functional clock domain for I2C. Just for making sure that ARMXOR_CK
remains active during MPU idle whenever this is enabled */
static struct clk i2c_fck = {
.name = "i2c_fck",
.ops = &clkops_null,
.flags = CLOCK_NO_IDLE_PARENT,
.parent = &armxor_ck.clk,
.recalc = &followparent_recalc,
};
static struct clk i2c_ick = {
.name = "i2c_ick",
.ops = &clkops_null,
.flags = CLOCK_NO_IDLE_PARENT,
.parent = &armper_ck.clk,
.recalc = &followparent_recalc,
};
/*
* clkdev integration
*/
static struct omap_clk omap_clks[] = {
/* non-ULPD clocks */
CLK(NULL, "ck_ref", &ck_ref, CK_16XX | CK_1510 | CK_310 | CK_7XX),
CLK(NULL, "ck_dpll1", &ck_dpll1, CK_16XX | CK_1510 | CK_310 | CK_7XX),
/* CK_GEN1 clocks */
CLK(NULL, "ck_dpll1out", &ck_dpll1out.clk, CK_16XX),
CLK(NULL, "ck_sossi", &sossi_ck, CK_16XX),
CLK(NULL, "arm_ck", &arm_ck, CK_16XX | CK_1510 | CK_310),
CLK(NULL, "armper_ck", &armper_ck.clk, CK_16XX | CK_1510 | CK_310),
CLK("omap_gpio.0", "ick", &arm_gpio_ck, CK_1510 | CK_310),
CLK(NULL, "armxor_ck", &armxor_ck.clk, CK_16XX | CK_1510 | CK_310 | CK_7XX),
CLK(NULL, "armtim_ck", &armtim_ck.clk, CK_16XX | CK_1510 | CK_310),
CLK("omap_wdt", "fck", &armwdt_ck.clk, CK_16XX | CK_1510 | CK_310),
CLK("omap_wdt", "ick", &armper_ck.clk, CK_16XX),
CLK("omap_wdt", "ick", &dummy_ck, CK_1510 | CK_310),
CLK(NULL, "arminth_ck", &arminth_ck1510, CK_1510 | CK_310),
CLK(NULL, "arminth_ck", &arminth_ck16xx, CK_16XX),
/* CK_GEN2 clocks */
CLK(NULL, "dsp_ck", &dsp_ck, CK_16XX | CK_1510 | CK_310),
CLK(NULL, "dspmmu_ck", &dspmmu_ck, CK_16XX | CK_1510 | CK_310),
CLK(NULL, "dspper_ck", &dspper_ck, CK_16XX | CK_1510 | CK_310),
CLK(NULL, "dspxor_ck", &dspxor_ck, CK_16XX | CK_1510 | CK_310),
CLK(NULL, "dsptim_ck", &dsptim_ck, CK_16XX | CK_1510 | CK_310),
/* CK_GEN3 clocks */
CLK(NULL, "tc_ck", &tc_ck.clk, CK_16XX | CK_1510 | CK_310 | CK_7XX),
CLK(NULL, "tipb_ck", &tipb_ck, CK_1510 | CK_310),
CLK(NULL, "l3_ocpi_ck", &l3_ocpi_ck, CK_16XX | CK_7XX),
CLK(NULL, "tc1_ck", &tc1_ck, CK_16XX),
CLK(NULL, "tc2_ck", &tc2_ck, CK_16XX),
CLK(NULL, "dma_ck", &dma_ck, CK_16XX | CK_1510 | CK_310),
CLK(NULL, "dma_lcdfree_ck", &dma_lcdfree_ck, CK_16XX),
CLK(NULL, "api_ck", &api_ck.clk, CK_16XX | CK_1510 | CK_310 | CK_7XX),
CLK(NULL, "lb_ck", &lb_ck.clk, CK_1510 | CK_310),
CLK(NULL, "rhea1_ck", &rhea1_ck, CK_16XX),
CLK(NULL, "rhea2_ck", &rhea2_ck, CK_16XX),
CLK(NULL, "lcd_ck", &lcd_ck_16xx, CK_16XX | CK_7XX),
CLK(NULL, "lcd_ck", &lcd_ck_1510.clk, CK_1510 | CK_310),
/* ULPD clocks */
CLK(NULL, "uart1_ck", &uart1_1510, CK_1510 | CK_310),
CLK(NULL, "uart1_ck", &uart1_16xx.clk, CK_16XX),
CLK(NULL, "uart1_ck", &uart1_7xx, CK_7XX),
CLK(NULL, "uart2_ck", &uart2_ck, CK_16XX | CK_1510 | CK_310),
CLK(NULL, "uart2_ck", &uart2_7xx, CK_7XX),
CLK(NULL, "uart3_ck", &uart3_1510, CK_1510 | CK_310),
CLK(NULL, "uart3_ck", &uart3_16xx.clk, CK_16XX),
CLK(NULL, "usb_clko", &usb_clko, CK_16XX | CK_1510 | CK_310),
CLK(NULL, "usb_hhc_ck", &usb_hhc_ck1510, CK_1510 | CK_310),
CLK(NULL, "usb_hhc_ck", &usb_hhc_ck16xx, CK_16XX),
CLK(NULL, "usb_dc_ck", &usb_dc_ck, CK_16XX),
CLK(NULL, "usb_dc_ck", &usb_dc_ck7xx, CK_7XX),
CLK(NULL, "mclk", &mclk_1510, CK_1510 | CK_310),
CLK(NULL, "mclk", &mclk_16xx, CK_16XX),
CLK(NULL, "bclk", &bclk_1510, CK_1510 | CK_310),
CLK(NULL, "bclk", &bclk_16xx, CK_16XX),
CLK("mmci-omap.0", "fck", &mmc1_ck, CK_16XX | CK_1510 | CK_310),
CLK("mmci-omap.0", "fck", &mmc3_ck, CK_7XX),
CLK("mmci-omap.0", "ick", &armper_ck.clk, CK_16XX | CK_1510 | CK_310 | CK_7XX),
CLK("mmci-omap.1", "fck", &mmc2_ck, CK_16XX),
CLK("mmci-omap.1", "ick", &armper_ck.clk, CK_16XX),
/* Virtual clocks */
CLK(NULL, "mpu", &virtual_ck_mpu, CK_16XX | CK_1510 | CK_310),
CLK("omap_i2c.1", "fck", &i2c_fck, CK_16XX | CK_1510 | CK_310 | CK_7XX),
CLK("omap_i2c.1", "ick", &i2c_ick, CK_16XX),
CLK("omap_i2c.1", "ick", &dummy_ck, CK_1510 | CK_310 | CK_7XX),
CLK("omap1_spi100k.1", "fck", &dummy_ck, CK_7XX),
CLK("omap1_spi100k.1", "ick", &dummy_ck, CK_7XX),
CLK("omap1_spi100k.2", "fck", &dummy_ck, CK_7XX),
CLK("omap1_spi100k.2", "ick", &dummy_ck, CK_7XX),
CLK("omap_uwire", "fck", &armxor_ck.clk, CK_16XX | CK_1510 | CK_310),
CLK("omap-mcbsp.1", "ick", &dspper_ck, CK_16XX),
CLK("omap-mcbsp.1", "ick", &dummy_ck, CK_1510 | CK_310),
CLK("omap-mcbsp.2", "ick", &armper_ck.clk, CK_16XX),
CLK("omap-mcbsp.2", "ick", &dummy_ck, CK_1510 | CK_310),
CLK("omap-mcbsp.3", "ick", &dspper_ck, CK_16XX),
CLK("omap-mcbsp.3", "ick", &dummy_ck, CK_1510 | CK_310),
CLK("omap-mcbsp.1", "fck", &dspxor_ck, CK_16XX | CK_1510 | CK_310),
CLK("omap-mcbsp.2", "fck", &armper_ck.clk, CK_16XX | CK_1510 | CK_310),
CLK("omap-mcbsp.3", "fck", &dspxor_ck, CK_16XX | CK_1510 | CK_310),
};
/*
* init
*/
static struct clk_functions omap1_clk_functions = {
.clk_enable = omap1_clk_enable,
.clk_disable = omap1_clk_disable,
.clk_round_rate = omap1_clk_round_rate,
.clk_set_rate = omap1_clk_set_rate,
.clk_disable_unused = omap1_clk_disable_unused,
};
int __init omap1_clk_init(void)
{
struct omap_clk *c;
const struct omap_clock_config *info;
int crystal_type = 0; /* Default 12 MHz */
u32 reg, cpu_mask;
#ifdef CONFIG_DEBUG_LL
/*
* Resets some clocks that may be left on from bootloader,
* but leaves serial clocks on.
*/
omap_writel(0x3 << 29, MOD_CONF_CTRL_0);
#endif
/* USB_REQ_EN will be disabled later if necessary (usb_dc_ck) */
reg = omap_readw(SOFT_REQ_REG) & (1 << 4);
omap_writew(reg, SOFT_REQ_REG);
if (!cpu_is_omap15xx())
omap_writew(0, SOFT_REQ_REG2);
clk_init(&omap1_clk_functions);
/* By default all idlect1 clocks are allowed to idle */
arm_idlect1_mask = ~0;
for (c = omap_clks; c < omap_clks + ARRAY_SIZE(omap_clks); c++)
clk_preinit(c->lk.clk);
cpu_mask = 0;
if (cpu_is_omap16xx())
cpu_mask |= CK_16XX;
if (cpu_is_omap1510())
cpu_mask |= CK_1510;
if (cpu_is_omap7xx())
cpu_mask |= CK_7XX;
if (cpu_is_omap310())
cpu_mask |= CK_310;
for (c = omap_clks; c < omap_clks + ARRAY_SIZE(omap_clks); c++)
if (c->cpu & cpu_mask) {
clkdev_add(&c->lk);
clk_register(c->lk.clk);
}
/* Pointers to these clocks are needed by code in clock.c */
api_ck_p = clk_get(NULL, "api_ck");
ck_dpll1_p = clk_get(NULL, "ck_dpll1");
ck_ref_p = clk_get(NULL, "ck_ref");
info = omap_get_config(OMAP_TAG_CLOCK, struct omap_clock_config);
if (info != NULL) {
if (!cpu_is_omap15xx())
crystal_type = info->system_clock_type;
}
if (cpu_is_omap7xx())
ck_ref.rate = 13000000;
if (cpu_is_omap16xx() && crystal_type == 2)
ck_ref.rate = 19200000;
pr_info("Clocks: ARM_SYSST: 0x%04x DPLL_CTL: 0x%04x ARM_CKCTL: "
"0x%04x\n", omap_readw(ARM_SYSST), omap_readw(DPLL_CTL),
omap_readw(ARM_CKCTL));
/* We want to be in syncronous scalable mode */
omap_writew(0x1000, ARM_SYSST);
#ifdef CONFIG_OMAP_CLOCKS_SET_BY_BOOTLOADER
/* Use values set by bootloader. Determine PLL rate and recalculate
* dependent clocks as if kernel had changed PLL or divisors.
*/
{
unsigned pll_ctl_val = omap_readw(DPLL_CTL);
ck_dpll1.rate = ck_ref.rate; /* Base xtal rate */
if (pll_ctl_val & 0x10) {
/* PLL enabled, apply multiplier and divisor */
if (pll_ctl_val & 0xf80)
ck_dpll1.rate *= (pll_ctl_val & 0xf80) >> 7;
ck_dpll1.rate /= ((pll_ctl_val & 0x60) >> 5) + 1;
} else {
/* PLL disabled, apply bypass divisor */
switch (pll_ctl_val & 0xc) {
case 0:
break;
case 0x4:
ck_dpll1.rate /= 2;
break;
default:
ck_dpll1.rate /= 4;
break;
}
}
}
#else
/* Find the highest supported frequency and enable it */
if (omap1_select_table_rate(&virtual_ck_mpu, ~0)) {
printk(KERN_ERR "System frequencies not set. Check your config.\n");
/* Guess sane values (60MHz) */
omap_writew(0x2290, DPLL_CTL);
omap_writew(cpu_is_omap7xx() ? 0x3005 : 0x1005, ARM_CKCTL);
ck_dpll1.rate = 60000000;
}
#endif
propagate_rate(&ck_dpll1);
/* Cache rates for clocks connected to ck_ref (not dpll1) */
propagate_rate(&ck_ref);
printk(KERN_INFO "Clocking rate (xtal/DPLL1/MPU): "
"%ld.%01ld/%ld.%01ld/%ld.%01ld MHz\n",
ck_ref.rate / 1000000, (ck_ref.rate / 100000) % 10,
ck_dpll1.rate / 1000000, (ck_dpll1.rate / 100000) % 10,
arm_ck.rate / 1000000, (arm_ck.rate / 100000) % 10);
if (machine_is_omap_perseus2() || machine_is_omap_fsample()) {
/* Select slicer output as OMAP input clock */
omap_writew(omap_readw(OMAP7XX_PCC_UPLD_CTRL) & ~0x1,
OMAP7XX_PCC_UPLD_CTRL);
}
/* Amstrad Delta wants BCLK high when inactive */
if (machine_is_ams_delta())
omap_writel(omap_readl(ULPD_CLOCK_CTRL) |
(1 << SDW_MCLK_INV_BIT),
ULPD_CLOCK_CTRL);
/* Turn off DSP and ARM_TIMXO. Make sure ARM_INTHCK is not divided */
/* (on 730, bit 13 must not be cleared) */
if (cpu_is_omap7xx())
omap_writew(omap_readw(ARM_CKCTL) & 0x2fff, ARM_CKCTL);
else
omap_writew(omap_readw(ARM_CKCTL) & 0x0fff, ARM_CKCTL);
/* Put DSP/MPUI into reset until needed */
omap_writew(0, ARM_RSTCT1);
omap_writew(1, ARM_RSTCT2);
omap_writew(0x400, ARM_IDLECT1);
/*
* According to OMAP5910 Erratum SYS_DMA_1, bit DMACK_REQ (bit 8)
* of the ARM_IDLECT2 register must be set to zero. The power-on
* default value of this bit is one.
*/
omap_writew(0x0000, ARM_IDLECT2); /* Turn LCD clock off also */
/*
* Only enable those clocks we will need, let the drivers
* enable other clocks as necessary
*/
clk_enable(&armper_ck.clk);
clk_enable(&armxor_ck.clk);
clk_enable(&armtim_ck.clk); /* This should be done by timer code */
if (cpu_is_omap15xx())
clk_enable(&arm_gpio_ck);
return 0;
}
| gpl-2.0 |
thicklizard/Komodo1 | drivers/acpi/acpica/rscalc.c | 3227 | 18229 | /*******************************************************************************
*
* Module Name: rscalc - Calculate stream and list lengths
*
******************************************************************************/
/*
* Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions, and the following disclaimer,
* without modification.
* 2. Redistributions in binary form must reproduce at minimum a disclaimer
* substantially similar to the "NO WARRANTY" disclaimer below
* ("Disclaimer") and any redistribution must be conditioned upon
* including a substantially similar Disclaimer requirement for further
* binary redistribution.
* 3. Neither the names of the above-listed copyright holders nor the names
* of any contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* Alternatively, this software may be distributed under the terms of the
* GNU General Public License ("GPL") version 2 as published by the Free
* Software Foundation.
*
* NO WARRANTY
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGES.
*/
#include <acpi/acpi.h>
#include "accommon.h"
#include "acresrc.h"
#include "acnamesp.h"
#define _COMPONENT ACPI_RESOURCES
ACPI_MODULE_NAME("rscalc")
/* Local prototypes */
static u8 acpi_rs_count_set_bits(u16 bit_field);
static acpi_rs_length
acpi_rs_struct_option_length(struct acpi_resource_source *resource_source);
static u32
acpi_rs_stream_option_length(u32 resource_length, u32 minimum_total_length);
/*******************************************************************************
*
* FUNCTION: acpi_rs_count_set_bits
*
* PARAMETERS: bit_field - Field in which to count bits
*
* RETURN: Number of bits set within the field
*
* DESCRIPTION: Count the number of bits set in a resource field. Used for
* (Short descriptor) interrupt and DMA lists.
*
******************************************************************************/
static u8 acpi_rs_count_set_bits(u16 bit_field)
{
u8 bits_set;
ACPI_FUNCTION_ENTRY();
for (bits_set = 0; bit_field; bits_set++) {
/* Zero the least significant bit that is set */
bit_field &= (u16) (bit_field - 1);
}
return bits_set;
}
/*******************************************************************************
*
* FUNCTION: acpi_rs_struct_option_length
*
* PARAMETERS: resource_source - Pointer to optional descriptor field
*
* RETURN: Status
*
* DESCRIPTION: Common code to handle optional resource_source_index and
* resource_source fields in some Large descriptors. Used during
* list-to-stream conversion
*
******************************************************************************/
static acpi_rs_length
acpi_rs_struct_option_length(struct acpi_resource_source *resource_source)
{
ACPI_FUNCTION_ENTRY();
/*
* If the resource_source string is valid, return the size of the string
* (string_length includes the NULL terminator) plus the size of the
* resource_source_index (1).
*/
if (resource_source->string_ptr) {
return ((acpi_rs_length) (resource_source->string_length + 1));
}
return (0);
}
/*******************************************************************************
*
* FUNCTION: acpi_rs_stream_option_length
*
* PARAMETERS: resource_length - Length from the resource header
* minimum_total_length - Minimum length of this resource, before
* any optional fields. Includes header size
*
* RETURN: Length of optional string (0 if no string present)
*
* DESCRIPTION: Common code to handle optional resource_source_index and
* resource_source fields in some Large descriptors. Used during
* stream-to-list conversion
*
******************************************************************************/
static u32
acpi_rs_stream_option_length(u32 resource_length,
u32 minimum_aml_resource_length)
{
u32 string_length = 0;
ACPI_FUNCTION_ENTRY();
/*
* The resource_source_index and resource_source are optional elements of some
* Large-type resource descriptors.
*/
/*
* If the length of the actual resource descriptor is greater than the ACPI
* spec-defined minimum length, it means that a resource_source_index exists
* and is followed by a (required) null terminated string. The string length
* (including the null terminator) is the resource length minus the minimum
* length, minus one byte for the resource_source_index itself.
*/
if (resource_length > minimum_aml_resource_length) {
/* Compute the length of the optional string */
string_length =
resource_length - minimum_aml_resource_length - 1;
}
/*
* Round the length up to a multiple of the native word in order to
* guarantee that the entire resource descriptor is native word aligned
*/
return ((u32) ACPI_ROUND_UP_TO_NATIVE_WORD(string_length));
}
/*******************************************************************************
*
* FUNCTION: acpi_rs_get_aml_length
*
* PARAMETERS: Resource - Pointer to the resource linked list
* size_needed - Where the required size is returned
*
* RETURN: Status
*
* DESCRIPTION: Takes a linked list of internal resource descriptors and
* calculates the size buffer needed to hold the corresponding
* external resource byte stream.
*
******************************************************************************/
acpi_status
acpi_rs_get_aml_length(struct acpi_resource * resource, acpi_size * size_needed)
{
acpi_size aml_size_needed = 0;
acpi_rs_length total_size;
ACPI_FUNCTION_TRACE(rs_get_aml_length);
/* Traverse entire list of internal resource descriptors */
while (resource) {
/* Validate the descriptor type */
if (resource->type > ACPI_RESOURCE_TYPE_MAX) {
return_ACPI_STATUS(AE_AML_INVALID_RESOURCE_TYPE);
}
/* Get the base size of the (external stream) resource descriptor */
total_size = acpi_gbl_aml_resource_sizes[resource->type];
/*
* Augment the base size for descriptors with optional and/or
* variable-length fields
*/
switch (resource->type) {
case ACPI_RESOURCE_TYPE_IRQ:
/* Length can be 3 or 2 */
if (resource->data.irq.descriptor_length == 2) {
total_size--;
}
break;
case ACPI_RESOURCE_TYPE_START_DEPENDENT:
/* Length can be 1 or 0 */
if (resource->data.irq.descriptor_length == 0) {
total_size--;
}
break;
case ACPI_RESOURCE_TYPE_VENDOR:
/*
* Vendor Defined Resource:
* For a Vendor Specific resource, if the Length is between 1 and 7
* it will be created as a Small Resource data type, otherwise it
* is a Large Resource data type.
*/
if (resource->data.vendor.byte_length > 7) {
/* Base size of a Large resource descriptor */
total_size =
sizeof(struct aml_resource_large_header);
}
/* Add the size of the vendor-specific data */
total_size = (acpi_rs_length)
(total_size + resource->data.vendor.byte_length);
break;
case ACPI_RESOURCE_TYPE_END_TAG:
/*
* End Tag:
* We are done -- return the accumulated total size.
*/
*size_needed = aml_size_needed + total_size;
/* Normal exit */
return_ACPI_STATUS(AE_OK);
case ACPI_RESOURCE_TYPE_ADDRESS16:
/*
* 16-Bit Address Resource:
* Add the size of the optional resource_source info
*/
total_size = (acpi_rs_length)
(total_size +
acpi_rs_struct_option_length(&resource->data.
address16.
resource_source));
break;
case ACPI_RESOURCE_TYPE_ADDRESS32:
/*
* 32-Bit Address Resource:
* Add the size of the optional resource_source info
*/
total_size = (acpi_rs_length)
(total_size +
acpi_rs_struct_option_length(&resource->data.
address32.
resource_source));
break;
case ACPI_RESOURCE_TYPE_ADDRESS64:
/*
* 64-Bit Address Resource:
* Add the size of the optional resource_source info
*/
total_size = (acpi_rs_length)
(total_size +
acpi_rs_struct_option_length(&resource->data.
address64.
resource_source));
break;
case ACPI_RESOURCE_TYPE_EXTENDED_IRQ:
/*
* Extended IRQ Resource:
* Add the size of each additional optional interrupt beyond the
* required 1 (4 bytes for each u32 interrupt number)
*/
total_size = (acpi_rs_length)
(total_size +
((resource->data.extended_irq.interrupt_count -
1) * 4) +
/* Add the size of the optional resource_source info */
acpi_rs_struct_option_length(&resource->data.
extended_irq.
resource_source));
break;
default:
break;
}
/* Update the total */
aml_size_needed += total_size;
/* Point to the next object */
resource =
ACPI_ADD_PTR(struct acpi_resource, resource,
resource->length);
}
/* Did not find an end_tag resource descriptor */
return_ACPI_STATUS(AE_AML_NO_RESOURCE_END_TAG);
}
/*******************************************************************************
*
* FUNCTION: acpi_rs_get_list_length
*
* PARAMETERS: aml_buffer - Pointer to the resource byte stream
* aml_buffer_length - Size of aml_buffer
* size_needed - Where the size needed is returned
*
* RETURN: Status
*
* DESCRIPTION: Takes an external resource byte stream and calculates the size
* buffer needed to hold the corresponding internal resource
* descriptor linked list.
*
******************************************************************************/
acpi_status
acpi_rs_get_list_length(u8 * aml_buffer,
u32 aml_buffer_length, acpi_size * size_needed)
{
acpi_status status;
u8 *end_aml;
u8 *buffer;
u32 buffer_size;
u16 temp16;
u16 resource_length;
u32 extra_struct_bytes;
u8 resource_index;
u8 minimum_aml_resource_length;
ACPI_FUNCTION_TRACE(rs_get_list_length);
*size_needed = 0;
end_aml = aml_buffer + aml_buffer_length;
/* Walk the list of AML resource descriptors */
while (aml_buffer < end_aml) {
/* Validate the Resource Type and Resource Length */
status = acpi_ut_validate_resource(aml_buffer, &resource_index);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
/* Get the resource length and base (minimum) AML size */
resource_length = acpi_ut_get_resource_length(aml_buffer);
minimum_aml_resource_length =
acpi_gbl_resource_aml_sizes[resource_index];
/*
* Augment the size for descriptors with optional
* and/or variable length fields
*/
extra_struct_bytes = 0;
buffer =
aml_buffer + acpi_ut_get_resource_header_length(aml_buffer);
switch (acpi_ut_get_resource_type(aml_buffer)) {
case ACPI_RESOURCE_NAME_IRQ:
/*
* IRQ Resource:
* Get the number of bits set in the 16-bit IRQ mask
*/
ACPI_MOVE_16_TO_16(&temp16, buffer);
extra_struct_bytes = acpi_rs_count_set_bits(temp16);
break;
case ACPI_RESOURCE_NAME_DMA:
/*
* DMA Resource:
* Get the number of bits set in the 8-bit DMA mask
*/
extra_struct_bytes = acpi_rs_count_set_bits(*buffer);
break;
case ACPI_RESOURCE_NAME_VENDOR_SMALL:
case ACPI_RESOURCE_NAME_VENDOR_LARGE:
/*
* Vendor Resource:
* Get the number of vendor data bytes
*/
extra_struct_bytes = resource_length;
break;
case ACPI_RESOURCE_NAME_END_TAG:
/*
* End Tag:
* This is the normal exit, add size of end_tag
*/
*size_needed += ACPI_RS_SIZE_MIN;
return_ACPI_STATUS(AE_OK);
case ACPI_RESOURCE_NAME_ADDRESS32:
case ACPI_RESOURCE_NAME_ADDRESS16:
case ACPI_RESOURCE_NAME_ADDRESS64:
/*
* Address Resource:
* Add the size of the optional resource_source
*/
extra_struct_bytes =
acpi_rs_stream_option_length(resource_length,
minimum_aml_resource_length);
break;
case ACPI_RESOURCE_NAME_EXTENDED_IRQ:
/*
* Extended IRQ Resource:
* Using the interrupt_table_length, add 4 bytes for each additional
* interrupt. Note: at least one interrupt is required and is
* included in the minimum descriptor size (reason for the -1)
*/
extra_struct_bytes = (buffer[1] - 1) * sizeof(u32);
/* Add the size of the optional resource_source */
extra_struct_bytes +=
acpi_rs_stream_option_length(resource_length -
extra_struct_bytes,
minimum_aml_resource_length);
break;
default:
break;
}
/*
* Update the required buffer size for the internal descriptor structs
*
* Important: Round the size up for the appropriate alignment. This
* is a requirement on IA64.
*/
buffer_size = acpi_gbl_resource_struct_sizes[resource_index] +
extra_struct_bytes;
buffer_size = (u32) ACPI_ROUND_UP_TO_NATIVE_WORD(buffer_size);
*size_needed += buffer_size;
ACPI_DEBUG_PRINT((ACPI_DB_RESOURCES,
"Type %.2X, AmlLength %.2X InternalLength %.2X\n",
acpi_ut_get_resource_type(aml_buffer),
acpi_ut_get_descriptor_length(aml_buffer),
buffer_size));
/*
* Point to the next resource within the AML stream using the length
* contained in the resource descriptor header
*/
aml_buffer += acpi_ut_get_descriptor_length(aml_buffer);
}
/* Did not find an end_tag resource descriptor */
return_ACPI_STATUS(AE_AML_NO_RESOURCE_END_TAG);
}
/*******************************************************************************
*
* FUNCTION: acpi_rs_get_pci_routing_table_length
*
* PARAMETERS: package_object - Pointer to the package object
* buffer_size_needed - u32 pointer of the size buffer
* needed to properly return the
* parsed data
*
* RETURN: Status
*
* DESCRIPTION: Given a package representing a PCI routing table, this
* calculates the size of the corresponding linked list of
* descriptions.
*
******************************************************************************/
acpi_status
acpi_rs_get_pci_routing_table_length(union acpi_operand_object *package_object,
acpi_size * buffer_size_needed)
{
u32 number_of_elements;
acpi_size temp_size_needed = 0;
union acpi_operand_object **top_object_list;
u32 index;
union acpi_operand_object *package_element;
union acpi_operand_object **sub_object_list;
u8 name_found;
u32 table_index;
ACPI_FUNCTION_TRACE(rs_get_pci_routing_table_length);
number_of_elements = package_object->package.count;
/*
* Calculate the size of the return buffer.
* The base size is the number of elements * the sizes of the
* structures. Additional space for the strings is added below.
* The minus one is to subtract the size of the u8 Source[1]
* member because it is added below.
*
* But each PRT_ENTRY structure has a pointer to a string and
* the size of that string must be found.
*/
top_object_list = package_object->package.elements;
for (index = 0; index < number_of_elements; index++) {
/* Dereference the sub-package */
package_element = *top_object_list;
/* We must have a valid Package object */
if (!package_element ||
(package_element->common.type != ACPI_TYPE_PACKAGE)) {
return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
}
/*
* The sub_object_list will now point to an array of the
* four IRQ elements: Address, Pin, Source and source_index
*/
sub_object_list = package_element->package.elements;
/* Scan the irq_table_elements for the Source Name String */
name_found = FALSE;
for (table_index = 0; table_index < 4 && !name_found;
table_index++) {
if (*sub_object_list && /* Null object allowed */
((ACPI_TYPE_STRING ==
(*sub_object_list)->common.type) ||
((ACPI_TYPE_LOCAL_REFERENCE ==
(*sub_object_list)->common.type) &&
((*sub_object_list)->reference.class ==
ACPI_REFCLASS_NAME)))) {
name_found = TRUE;
} else {
/* Look at the next element */
sub_object_list++;
}
}
temp_size_needed += (sizeof(struct acpi_pci_routing_table) - 4);
/* Was a String type found? */
if (name_found) {
if ((*sub_object_list)->common.type == ACPI_TYPE_STRING) {
/*
* The length String.Length field does not include the
* terminating NULL, add 1
*/
temp_size_needed += ((acpi_size)
(*sub_object_list)->string.
length + 1);
} else {
temp_size_needed +=
acpi_ns_get_pathname_length((*sub_object_list)->reference.node);
}
} else {
/*
* If no name was found, then this is a NULL, which is
* translated as a u32 zero.
*/
temp_size_needed += sizeof(u32);
}
/* Round up the size since each element must be aligned */
temp_size_needed = ACPI_ROUND_UP_TO_64BIT(temp_size_needed);
/* Point to the next union acpi_operand_object */
top_object_list++;
}
/*
* Add an extra element to the end of the list, essentially a
* NULL terminator
*/
*buffer_size_needed =
temp_size_needed + sizeof(struct acpi_pci_routing_table);
return_ACPI_STATUS(AE_OK);
}
| gpl-2.0 |
ya-mouse/linux-moxa-np6610 | drivers/acpi/acpica/psopcode.c | 3227 | 32635 | /******************************************************************************
*
* Module Name: psopcode - Parser/Interpreter opcode information table
*
*****************************************************************************/
/*
* Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions, and the following disclaimer,
* without modification.
* 2. Redistributions in binary form must reproduce at minimum a disclaimer
* substantially similar to the "NO WARRANTY" disclaimer below
* ("Disclaimer") and any redistribution must be conditioned upon
* including a substantially similar Disclaimer requirement for further
* binary redistribution.
* 3. Neither the names of the above-listed copyright holders nor the names
* of any contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* Alternatively, this software may be distributed under the terms of the
* GNU General Public License ("GPL") version 2 as published by the Free
* Software Foundation.
*
* NO WARRANTY
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGES.
*/
#include <acpi/acpi.h>
#include "accommon.h"
#include "acparser.h"
#include "acopcode.h"
#include "amlcode.h"
#define _COMPONENT ACPI_PARSER
ACPI_MODULE_NAME("psopcode")
static const u8 acpi_gbl_argument_count[] =
{ 0, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 6 };
/*******************************************************************************
*
* NAME: acpi_gbl_aml_op_info
*
* DESCRIPTION: Opcode table. Each entry contains <opcode, type, name, operands>
* The name is a simple ascii string, the operand specifier is an
* ascii string with one letter per operand. The letter specifies
* the operand type.
*
******************************************************************************/
/*
* Summary of opcode types/flags
*
Opcodes that have associated namespace objects (AML_NSOBJECT flag)
AML_SCOPE_OP
AML_DEVICE_OP
AML_THERMAL_ZONE_OP
AML_METHOD_OP
AML_POWER_RES_OP
AML_PROCESSOR_OP
AML_FIELD_OP
AML_INDEX_FIELD_OP
AML_BANK_FIELD_OP
AML_NAME_OP
AML_ALIAS_OP
AML_MUTEX_OP
AML_EVENT_OP
AML_REGION_OP
AML_CREATE_FIELD_OP
AML_CREATE_BIT_FIELD_OP
AML_CREATE_BYTE_FIELD_OP
AML_CREATE_WORD_FIELD_OP
AML_CREATE_DWORD_FIELD_OP
AML_CREATE_QWORD_FIELD_OP
AML_INT_NAMEDFIELD_OP
AML_INT_METHODCALL_OP
AML_INT_NAMEPATH_OP
Opcodes that are "namespace" opcodes (AML_NSOPCODE flag)
AML_SCOPE_OP
AML_DEVICE_OP
AML_THERMAL_ZONE_OP
AML_METHOD_OP
AML_POWER_RES_OP
AML_PROCESSOR_OP
AML_FIELD_OP
AML_INDEX_FIELD_OP
AML_BANK_FIELD_OP
AML_NAME_OP
AML_ALIAS_OP
AML_MUTEX_OP
AML_EVENT_OP
AML_REGION_OP
AML_INT_NAMEDFIELD_OP
Opcodes that have an associated namespace node (AML_NSNODE flag)
AML_SCOPE_OP
AML_DEVICE_OP
AML_THERMAL_ZONE_OP
AML_METHOD_OP
AML_POWER_RES_OP
AML_PROCESSOR_OP
AML_NAME_OP
AML_ALIAS_OP
AML_MUTEX_OP
AML_EVENT_OP
AML_REGION_OP
AML_CREATE_FIELD_OP
AML_CREATE_BIT_FIELD_OP
AML_CREATE_BYTE_FIELD_OP
AML_CREATE_WORD_FIELD_OP
AML_CREATE_DWORD_FIELD_OP
AML_CREATE_QWORD_FIELD_OP
AML_INT_NAMEDFIELD_OP
AML_INT_METHODCALL_OP
AML_INT_NAMEPATH_OP
Opcodes that define named ACPI objects (AML_NAMED flag)
AML_SCOPE_OP
AML_DEVICE_OP
AML_THERMAL_ZONE_OP
AML_METHOD_OP
AML_POWER_RES_OP
AML_PROCESSOR_OP
AML_NAME_OP
AML_ALIAS_OP
AML_MUTEX_OP
AML_EVENT_OP
AML_REGION_OP
AML_INT_NAMEDFIELD_OP
Opcodes that contain executable AML as part of the definition that
must be deferred until needed
AML_METHOD_OP
AML_VAR_PACKAGE_OP
AML_CREATE_FIELD_OP
AML_CREATE_BIT_FIELD_OP
AML_CREATE_BYTE_FIELD_OP
AML_CREATE_WORD_FIELD_OP
AML_CREATE_DWORD_FIELD_OP
AML_CREATE_QWORD_FIELD_OP
AML_REGION_OP
AML_BUFFER_OP
Field opcodes
AML_CREATE_FIELD_OP
AML_FIELD_OP
AML_INDEX_FIELD_OP
AML_BANK_FIELD_OP
Field "Create" opcodes
AML_CREATE_FIELD_OP
AML_CREATE_BIT_FIELD_OP
AML_CREATE_BYTE_FIELD_OP
AML_CREATE_WORD_FIELD_OP
AML_CREATE_DWORD_FIELD_OP
AML_CREATE_QWORD_FIELD_OP
******************************************************************************/
/*
* Master Opcode information table. A summary of everything we know about each
* opcode, all in one place.
*/
const struct acpi_opcode_info acpi_gbl_aml_op_info[AML_NUM_OPCODES] = {
/*! [Begin] no source code translation */
/* Index Name Parser Args Interpreter Args ObjectType Class Type Flags */
/* 00 */ ACPI_OP("Zero", ARGP_ZERO_OP, ARGI_ZERO_OP, ACPI_TYPE_INTEGER,
AML_CLASS_ARGUMENT, AML_TYPE_CONSTANT, AML_CONSTANT),
/* 01 */ ACPI_OP("One", ARGP_ONE_OP, ARGI_ONE_OP, ACPI_TYPE_INTEGER,
AML_CLASS_ARGUMENT, AML_TYPE_CONSTANT, AML_CONSTANT),
/* 02 */ ACPI_OP("Alias", ARGP_ALIAS_OP, ARGI_ALIAS_OP,
ACPI_TYPE_LOCAL_ALIAS, AML_CLASS_NAMED_OBJECT,
AML_TYPE_NAMED_SIMPLE,
AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE |
AML_NSNODE | AML_NAMED),
/* 03 */ ACPI_OP("Name", ARGP_NAME_OP, ARGI_NAME_OP, ACPI_TYPE_ANY,
AML_CLASS_NAMED_OBJECT, AML_TYPE_NAMED_COMPLEX,
AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE |
AML_NSNODE | AML_NAMED),
/* 04 */ ACPI_OP("ByteConst", ARGP_BYTE_OP, ARGI_BYTE_OP,
ACPI_TYPE_INTEGER, AML_CLASS_ARGUMENT,
AML_TYPE_LITERAL, AML_CONSTANT),
/* 05 */ ACPI_OP("WordConst", ARGP_WORD_OP, ARGI_WORD_OP,
ACPI_TYPE_INTEGER, AML_CLASS_ARGUMENT,
AML_TYPE_LITERAL, AML_CONSTANT),
/* 06 */ ACPI_OP("DwordConst", ARGP_DWORD_OP, ARGI_DWORD_OP,
ACPI_TYPE_INTEGER, AML_CLASS_ARGUMENT,
AML_TYPE_LITERAL, AML_CONSTANT),
/* 07 */ ACPI_OP("String", ARGP_STRING_OP, ARGI_STRING_OP,
ACPI_TYPE_STRING, AML_CLASS_ARGUMENT,
AML_TYPE_LITERAL, AML_CONSTANT),
/* 08 */ ACPI_OP("Scope", ARGP_SCOPE_OP, ARGI_SCOPE_OP,
ACPI_TYPE_LOCAL_SCOPE, AML_CLASS_NAMED_OBJECT,
AML_TYPE_NAMED_NO_OBJ,
AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE |
AML_NSNODE | AML_NAMED),
/* 09 */ ACPI_OP("Buffer", ARGP_BUFFER_OP, ARGI_BUFFER_OP,
ACPI_TYPE_BUFFER, AML_CLASS_CREATE,
AML_TYPE_CREATE_OBJECT,
AML_HAS_ARGS | AML_DEFER | AML_CONSTANT),
/* 0A */ ACPI_OP("Package", ARGP_PACKAGE_OP, ARGI_PACKAGE_OP,
ACPI_TYPE_PACKAGE, AML_CLASS_CREATE,
AML_TYPE_CREATE_OBJECT,
AML_HAS_ARGS | AML_DEFER | AML_CONSTANT),
/* 0B */ ACPI_OP("Method", ARGP_METHOD_OP, ARGI_METHOD_OP,
ACPI_TYPE_METHOD, AML_CLASS_NAMED_OBJECT,
AML_TYPE_NAMED_COMPLEX,
AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE |
AML_NSNODE | AML_NAMED | AML_DEFER),
/* 0C */ ACPI_OP("Local0", ARGP_LOCAL0, ARGI_LOCAL0,
ACPI_TYPE_LOCAL_REFERENCE, AML_CLASS_ARGUMENT,
AML_TYPE_LOCAL_VARIABLE, 0),
/* 0D */ ACPI_OP("Local1", ARGP_LOCAL1, ARGI_LOCAL1,
ACPI_TYPE_LOCAL_REFERENCE, AML_CLASS_ARGUMENT,
AML_TYPE_LOCAL_VARIABLE, 0),
/* 0E */ ACPI_OP("Local2", ARGP_LOCAL2, ARGI_LOCAL2,
ACPI_TYPE_LOCAL_REFERENCE, AML_CLASS_ARGUMENT,
AML_TYPE_LOCAL_VARIABLE, 0),
/* 0F */ ACPI_OP("Local3", ARGP_LOCAL3, ARGI_LOCAL3,
ACPI_TYPE_LOCAL_REFERENCE, AML_CLASS_ARGUMENT,
AML_TYPE_LOCAL_VARIABLE, 0),
/* 10 */ ACPI_OP("Local4", ARGP_LOCAL4, ARGI_LOCAL4,
ACPI_TYPE_LOCAL_REFERENCE, AML_CLASS_ARGUMENT,
AML_TYPE_LOCAL_VARIABLE, 0),
/* 11 */ ACPI_OP("Local5", ARGP_LOCAL5, ARGI_LOCAL5,
ACPI_TYPE_LOCAL_REFERENCE, AML_CLASS_ARGUMENT,
AML_TYPE_LOCAL_VARIABLE, 0),
/* 12 */ ACPI_OP("Local6", ARGP_LOCAL6, ARGI_LOCAL6,
ACPI_TYPE_LOCAL_REFERENCE, AML_CLASS_ARGUMENT,
AML_TYPE_LOCAL_VARIABLE, 0),
/* 13 */ ACPI_OP("Local7", ARGP_LOCAL7, ARGI_LOCAL7,
ACPI_TYPE_LOCAL_REFERENCE, AML_CLASS_ARGUMENT,
AML_TYPE_LOCAL_VARIABLE, 0),
/* 14 */ ACPI_OP("Arg0", ARGP_ARG0, ARGI_ARG0,
ACPI_TYPE_LOCAL_REFERENCE, AML_CLASS_ARGUMENT,
AML_TYPE_METHOD_ARGUMENT, 0),
/* 15 */ ACPI_OP("Arg1", ARGP_ARG1, ARGI_ARG1,
ACPI_TYPE_LOCAL_REFERENCE, AML_CLASS_ARGUMENT,
AML_TYPE_METHOD_ARGUMENT, 0),
/* 16 */ ACPI_OP("Arg2", ARGP_ARG2, ARGI_ARG2,
ACPI_TYPE_LOCAL_REFERENCE, AML_CLASS_ARGUMENT,
AML_TYPE_METHOD_ARGUMENT, 0),
/* 17 */ ACPI_OP("Arg3", ARGP_ARG3, ARGI_ARG3,
ACPI_TYPE_LOCAL_REFERENCE, AML_CLASS_ARGUMENT,
AML_TYPE_METHOD_ARGUMENT, 0),
/* 18 */ ACPI_OP("Arg4", ARGP_ARG4, ARGI_ARG4,
ACPI_TYPE_LOCAL_REFERENCE, AML_CLASS_ARGUMENT,
AML_TYPE_METHOD_ARGUMENT, 0),
/* 19 */ ACPI_OP("Arg5", ARGP_ARG5, ARGI_ARG5,
ACPI_TYPE_LOCAL_REFERENCE, AML_CLASS_ARGUMENT,
AML_TYPE_METHOD_ARGUMENT, 0),
/* 1A */ ACPI_OP("Arg6", ARGP_ARG6, ARGI_ARG6,
ACPI_TYPE_LOCAL_REFERENCE, AML_CLASS_ARGUMENT,
AML_TYPE_METHOD_ARGUMENT, 0),
/* 1B */ ACPI_OP("Store", ARGP_STORE_OP, ARGI_STORE_OP, ACPI_TYPE_ANY,
AML_CLASS_EXECUTE, AML_TYPE_EXEC_1A_1T_1R,
AML_FLAGS_EXEC_1A_1T_1R),
/* 1C */ ACPI_OP("RefOf", ARGP_REF_OF_OP, ARGI_REF_OF_OP, ACPI_TYPE_ANY,
AML_CLASS_EXECUTE, AML_TYPE_EXEC_1A_0T_1R,
AML_FLAGS_EXEC_1A_0T_1R),
/* 1D */ ACPI_OP("Add", ARGP_ADD_OP, ARGI_ADD_OP, ACPI_TYPE_ANY,
AML_CLASS_EXECUTE, AML_TYPE_EXEC_2A_1T_1R,
AML_FLAGS_EXEC_2A_1T_1R | AML_MATH | AML_CONSTANT),
/* 1E */ ACPI_OP("Concatenate", ARGP_CONCAT_OP, ARGI_CONCAT_OP,
ACPI_TYPE_ANY, AML_CLASS_EXECUTE,
AML_TYPE_EXEC_2A_1T_1R,
AML_FLAGS_EXEC_2A_1T_1R | AML_CONSTANT),
/* 1F */ ACPI_OP("Subtract", ARGP_SUBTRACT_OP, ARGI_SUBTRACT_OP,
ACPI_TYPE_ANY, AML_CLASS_EXECUTE,
AML_TYPE_EXEC_2A_1T_1R,
AML_FLAGS_EXEC_2A_1T_1R | AML_MATH | AML_CONSTANT),
/* 20 */ ACPI_OP("Increment", ARGP_INCREMENT_OP, ARGI_INCREMENT_OP,
ACPI_TYPE_ANY, AML_CLASS_EXECUTE,
AML_TYPE_EXEC_1A_0T_1R,
AML_FLAGS_EXEC_1A_0T_1R | AML_CONSTANT),
/* 21 */ ACPI_OP("Decrement", ARGP_DECREMENT_OP, ARGI_DECREMENT_OP,
ACPI_TYPE_ANY, AML_CLASS_EXECUTE,
AML_TYPE_EXEC_1A_0T_1R,
AML_FLAGS_EXEC_1A_0T_1R | AML_CONSTANT),
/* 22 */ ACPI_OP("Multiply", ARGP_MULTIPLY_OP, ARGI_MULTIPLY_OP,
ACPI_TYPE_ANY, AML_CLASS_EXECUTE,
AML_TYPE_EXEC_2A_1T_1R,
AML_FLAGS_EXEC_2A_1T_1R | AML_MATH | AML_CONSTANT),
/* 23 */ ACPI_OP("Divide", ARGP_DIVIDE_OP, ARGI_DIVIDE_OP,
ACPI_TYPE_ANY, AML_CLASS_EXECUTE,
AML_TYPE_EXEC_2A_2T_1R,
AML_FLAGS_EXEC_2A_2T_1R | AML_CONSTANT),
/* 24 */ ACPI_OP("ShiftLeft", ARGP_SHIFT_LEFT_OP, ARGI_SHIFT_LEFT_OP,
ACPI_TYPE_ANY, AML_CLASS_EXECUTE,
AML_TYPE_EXEC_2A_1T_1R,
AML_FLAGS_EXEC_2A_1T_1R | AML_MATH | AML_CONSTANT),
/* 25 */ ACPI_OP("ShiftRight", ARGP_SHIFT_RIGHT_OP, ARGI_SHIFT_RIGHT_OP,
ACPI_TYPE_ANY, AML_CLASS_EXECUTE,
AML_TYPE_EXEC_2A_1T_1R,
AML_FLAGS_EXEC_2A_1T_1R | AML_MATH | AML_CONSTANT),
/* 26 */ ACPI_OP("And", ARGP_BIT_AND_OP, ARGI_BIT_AND_OP, ACPI_TYPE_ANY,
AML_CLASS_EXECUTE, AML_TYPE_EXEC_2A_1T_1R,
AML_FLAGS_EXEC_2A_1T_1R | AML_MATH | AML_CONSTANT),
/* 27 */ ACPI_OP("NAnd", ARGP_BIT_NAND_OP, ARGI_BIT_NAND_OP,
ACPI_TYPE_ANY, AML_CLASS_EXECUTE,
AML_TYPE_EXEC_2A_1T_1R,
AML_FLAGS_EXEC_2A_1T_1R | AML_MATH | AML_CONSTANT),
/* 28 */ ACPI_OP("Or", ARGP_BIT_OR_OP, ARGI_BIT_OR_OP, ACPI_TYPE_ANY,
AML_CLASS_EXECUTE, AML_TYPE_EXEC_2A_1T_1R,
AML_FLAGS_EXEC_2A_1T_1R | AML_MATH | AML_CONSTANT),
/* 29 */ ACPI_OP("NOr", ARGP_BIT_NOR_OP, ARGI_BIT_NOR_OP, ACPI_TYPE_ANY,
AML_CLASS_EXECUTE, AML_TYPE_EXEC_2A_1T_1R,
AML_FLAGS_EXEC_2A_1T_1R | AML_MATH | AML_CONSTANT),
/* 2A */ ACPI_OP("XOr", ARGP_BIT_XOR_OP, ARGI_BIT_XOR_OP, ACPI_TYPE_ANY,
AML_CLASS_EXECUTE, AML_TYPE_EXEC_2A_1T_1R,
AML_FLAGS_EXEC_2A_1T_1R | AML_MATH | AML_CONSTANT),
/* 2B */ ACPI_OP("Not", ARGP_BIT_NOT_OP, ARGI_BIT_NOT_OP, ACPI_TYPE_ANY,
AML_CLASS_EXECUTE, AML_TYPE_EXEC_1A_1T_1R,
AML_FLAGS_EXEC_1A_1T_1R | AML_CONSTANT),
/* 2C */ ACPI_OP("FindSetLeftBit", ARGP_FIND_SET_LEFT_BIT_OP,
ARGI_FIND_SET_LEFT_BIT_OP, ACPI_TYPE_ANY,
AML_CLASS_EXECUTE, AML_TYPE_EXEC_1A_1T_1R,
AML_FLAGS_EXEC_1A_1T_1R | AML_CONSTANT),
/* 2D */ ACPI_OP("FindSetRightBit", ARGP_FIND_SET_RIGHT_BIT_OP,
ARGI_FIND_SET_RIGHT_BIT_OP, ACPI_TYPE_ANY,
AML_CLASS_EXECUTE, AML_TYPE_EXEC_1A_1T_1R,
AML_FLAGS_EXEC_1A_1T_1R | AML_CONSTANT),
/* 2E */ ACPI_OP("DerefOf", ARGP_DEREF_OF_OP, ARGI_DEREF_OF_OP,
ACPI_TYPE_ANY, AML_CLASS_EXECUTE,
AML_TYPE_EXEC_1A_0T_1R, AML_FLAGS_EXEC_1A_0T_1R),
/* 2F */ ACPI_OP("Notify", ARGP_NOTIFY_OP, ARGI_NOTIFY_OP,
ACPI_TYPE_ANY, AML_CLASS_EXECUTE,
AML_TYPE_EXEC_2A_0T_0R, AML_FLAGS_EXEC_2A_0T_0R),
/* 30 */ ACPI_OP("SizeOf", ARGP_SIZE_OF_OP, ARGI_SIZE_OF_OP,
ACPI_TYPE_ANY, AML_CLASS_EXECUTE,
AML_TYPE_EXEC_1A_0T_1R,
AML_FLAGS_EXEC_1A_0T_1R | AML_NO_OPERAND_RESOLVE),
/* 31 */ ACPI_OP("Index", ARGP_INDEX_OP, ARGI_INDEX_OP, ACPI_TYPE_ANY,
AML_CLASS_EXECUTE, AML_TYPE_EXEC_2A_1T_1R,
AML_FLAGS_EXEC_2A_1T_1R),
/* 32 */ ACPI_OP("Match", ARGP_MATCH_OP, ARGI_MATCH_OP, ACPI_TYPE_ANY,
AML_CLASS_EXECUTE, AML_TYPE_EXEC_6A_0T_1R,
AML_FLAGS_EXEC_6A_0T_1R | AML_CONSTANT),
/* 33 */ ACPI_OP("CreateDWordField", ARGP_CREATE_DWORD_FIELD_OP,
ARGI_CREATE_DWORD_FIELD_OP,
ACPI_TYPE_BUFFER_FIELD, AML_CLASS_CREATE,
AML_TYPE_CREATE_FIELD,
AML_HAS_ARGS | AML_NSOBJECT | AML_NSNODE |
AML_DEFER | AML_CREATE),
/* 34 */ ACPI_OP("CreateWordField", ARGP_CREATE_WORD_FIELD_OP,
ARGI_CREATE_WORD_FIELD_OP,
ACPI_TYPE_BUFFER_FIELD, AML_CLASS_CREATE,
AML_TYPE_CREATE_FIELD,
AML_HAS_ARGS | AML_NSOBJECT | AML_NSNODE |
AML_DEFER | AML_CREATE),
/* 35 */ ACPI_OP("CreateByteField", ARGP_CREATE_BYTE_FIELD_OP,
ARGI_CREATE_BYTE_FIELD_OP,
ACPI_TYPE_BUFFER_FIELD, AML_CLASS_CREATE,
AML_TYPE_CREATE_FIELD,
AML_HAS_ARGS | AML_NSOBJECT | AML_NSNODE |
AML_DEFER | AML_CREATE),
/* 36 */ ACPI_OP("CreateBitField", ARGP_CREATE_BIT_FIELD_OP,
ARGI_CREATE_BIT_FIELD_OP,
ACPI_TYPE_BUFFER_FIELD, AML_CLASS_CREATE,
AML_TYPE_CREATE_FIELD,
AML_HAS_ARGS | AML_NSOBJECT | AML_NSNODE |
AML_DEFER | AML_CREATE),
/* 37 */ ACPI_OP("ObjectType", ARGP_TYPE_OP, ARGI_TYPE_OP,
ACPI_TYPE_ANY, AML_CLASS_EXECUTE,
AML_TYPE_EXEC_1A_0T_1R,
AML_FLAGS_EXEC_1A_0T_1R | AML_NO_OPERAND_RESOLVE),
/* 38 */ ACPI_OP("LAnd", ARGP_LAND_OP, ARGI_LAND_OP, ACPI_TYPE_ANY,
AML_CLASS_EXECUTE, AML_TYPE_EXEC_2A_0T_1R,
AML_FLAGS_EXEC_2A_0T_1R | AML_LOGICAL_NUMERIC | AML_CONSTANT),
/* 39 */ ACPI_OP("LOr", ARGP_LOR_OP, ARGI_LOR_OP, ACPI_TYPE_ANY,
AML_CLASS_EXECUTE, AML_TYPE_EXEC_2A_0T_1R,
AML_FLAGS_EXEC_2A_0T_1R | AML_LOGICAL_NUMERIC | AML_CONSTANT),
/* 3A */ ACPI_OP("LNot", ARGP_LNOT_OP, ARGI_LNOT_OP, ACPI_TYPE_ANY,
AML_CLASS_EXECUTE, AML_TYPE_EXEC_1A_0T_1R,
AML_FLAGS_EXEC_1A_0T_1R | AML_CONSTANT),
/* 3B */ ACPI_OP("LEqual", ARGP_LEQUAL_OP, ARGI_LEQUAL_OP,
ACPI_TYPE_ANY, AML_CLASS_EXECUTE,
AML_TYPE_EXEC_2A_0T_1R,
AML_FLAGS_EXEC_2A_0T_1R | AML_LOGICAL | AML_CONSTANT),
/* 3C */ ACPI_OP("LGreater", ARGP_LGREATER_OP, ARGI_LGREATER_OP,
ACPI_TYPE_ANY, AML_CLASS_EXECUTE,
AML_TYPE_EXEC_2A_0T_1R,
AML_FLAGS_EXEC_2A_0T_1R | AML_LOGICAL | AML_CONSTANT),
/* 3D */ ACPI_OP("LLess", ARGP_LLESS_OP, ARGI_LLESS_OP, ACPI_TYPE_ANY,
AML_CLASS_EXECUTE, AML_TYPE_EXEC_2A_0T_1R,
AML_FLAGS_EXEC_2A_0T_1R | AML_LOGICAL | AML_CONSTANT),
/* 3E */ ACPI_OP("If", ARGP_IF_OP, ARGI_IF_OP, ACPI_TYPE_ANY,
AML_CLASS_CONTROL, AML_TYPE_CONTROL, AML_HAS_ARGS),
/* 3F */ ACPI_OP("Else", ARGP_ELSE_OP, ARGI_ELSE_OP, ACPI_TYPE_ANY,
AML_CLASS_CONTROL, AML_TYPE_CONTROL, AML_HAS_ARGS),
/* 40 */ ACPI_OP("While", ARGP_WHILE_OP, ARGI_WHILE_OP, ACPI_TYPE_ANY,
AML_CLASS_CONTROL, AML_TYPE_CONTROL, AML_HAS_ARGS),
/* 41 */ ACPI_OP("Noop", ARGP_NOOP_OP, ARGI_NOOP_OP, ACPI_TYPE_ANY,
AML_CLASS_CONTROL, AML_TYPE_CONTROL, 0),
/* 42 */ ACPI_OP("Return", ARGP_RETURN_OP, ARGI_RETURN_OP,
ACPI_TYPE_ANY, AML_CLASS_CONTROL,
AML_TYPE_CONTROL, AML_HAS_ARGS),
/* 43 */ ACPI_OP("Break", ARGP_BREAK_OP, ARGI_BREAK_OP, ACPI_TYPE_ANY,
AML_CLASS_CONTROL, AML_TYPE_CONTROL, 0),
/* 44 */ ACPI_OP("BreakPoint", ARGP_BREAK_POINT_OP, ARGI_BREAK_POINT_OP,
ACPI_TYPE_ANY, AML_CLASS_CONTROL, AML_TYPE_CONTROL, 0),
/* 45 */ ACPI_OP("Ones", ARGP_ONES_OP, ARGI_ONES_OP, ACPI_TYPE_INTEGER,
AML_CLASS_ARGUMENT, AML_TYPE_CONSTANT, AML_CONSTANT),
/* Prefixed opcodes (Two-byte opcodes with a prefix op) */
/* 46 */ ACPI_OP("Mutex", ARGP_MUTEX_OP, ARGI_MUTEX_OP, ACPI_TYPE_MUTEX,
AML_CLASS_NAMED_OBJECT, AML_TYPE_NAMED_SIMPLE,
AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE |
AML_NSNODE | AML_NAMED),
/* 47 */ ACPI_OP("Event", ARGP_EVENT_OP, ARGI_EVENT_OP, ACPI_TYPE_EVENT,
AML_CLASS_NAMED_OBJECT, AML_TYPE_NAMED_SIMPLE,
AML_NSOBJECT | AML_NSOPCODE | AML_NSNODE | AML_NAMED),
/* 48 */ ACPI_OP("CondRefOf", ARGP_COND_REF_OF_OP, ARGI_COND_REF_OF_OP,
ACPI_TYPE_ANY, AML_CLASS_EXECUTE,
AML_TYPE_EXEC_1A_1T_1R, AML_FLAGS_EXEC_1A_1T_1R),
/* 49 */ ACPI_OP("CreateField", ARGP_CREATE_FIELD_OP,
ARGI_CREATE_FIELD_OP, ACPI_TYPE_BUFFER_FIELD,
AML_CLASS_CREATE, AML_TYPE_CREATE_FIELD,
AML_HAS_ARGS | AML_NSOBJECT | AML_NSNODE |
AML_DEFER | AML_FIELD | AML_CREATE),
/* 4A */ ACPI_OP("Load", ARGP_LOAD_OP, ARGI_LOAD_OP, ACPI_TYPE_ANY,
AML_CLASS_EXECUTE, AML_TYPE_EXEC_1A_1T_0R,
AML_FLAGS_EXEC_1A_1T_0R),
/* 4B */ ACPI_OP("Stall", ARGP_STALL_OP, ARGI_STALL_OP, ACPI_TYPE_ANY,
AML_CLASS_EXECUTE, AML_TYPE_EXEC_1A_0T_0R,
AML_FLAGS_EXEC_1A_0T_0R),
/* 4C */ ACPI_OP("Sleep", ARGP_SLEEP_OP, ARGI_SLEEP_OP, ACPI_TYPE_ANY,
AML_CLASS_EXECUTE, AML_TYPE_EXEC_1A_0T_0R,
AML_FLAGS_EXEC_1A_0T_0R),
/* 4D */ ACPI_OP("Acquire", ARGP_ACQUIRE_OP, ARGI_ACQUIRE_OP,
ACPI_TYPE_ANY, AML_CLASS_EXECUTE,
AML_TYPE_EXEC_2A_0T_1R, AML_FLAGS_EXEC_2A_0T_1R),
/* 4E */ ACPI_OP("Signal", ARGP_SIGNAL_OP, ARGI_SIGNAL_OP,
ACPI_TYPE_ANY, AML_CLASS_EXECUTE,
AML_TYPE_EXEC_1A_0T_0R, AML_FLAGS_EXEC_1A_0T_0R),
/* 4F */ ACPI_OP("Wait", ARGP_WAIT_OP, ARGI_WAIT_OP, ACPI_TYPE_ANY,
AML_CLASS_EXECUTE, AML_TYPE_EXEC_2A_0T_1R,
AML_FLAGS_EXEC_2A_0T_1R),
/* 50 */ ACPI_OP("Reset", ARGP_RESET_OP, ARGI_RESET_OP, ACPI_TYPE_ANY,
AML_CLASS_EXECUTE, AML_TYPE_EXEC_1A_0T_0R,
AML_FLAGS_EXEC_1A_0T_0R),
/* 51 */ ACPI_OP("Release", ARGP_RELEASE_OP, ARGI_RELEASE_OP,
ACPI_TYPE_ANY, AML_CLASS_EXECUTE,
AML_TYPE_EXEC_1A_0T_0R, AML_FLAGS_EXEC_1A_0T_0R),
/* 52 */ ACPI_OP("FromBCD", ARGP_FROM_BCD_OP, ARGI_FROM_BCD_OP,
ACPI_TYPE_ANY, AML_CLASS_EXECUTE,
AML_TYPE_EXEC_1A_1T_1R,
AML_FLAGS_EXEC_1A_1T_1R | AML_CONSTANT),
/* 53 */ ACPI_OP("ToBCD", ARGP_TO_BCD_OP, ARGI_TO_BCD_OP, ACPI_TYPE_ANY,
AML_CLASS_EXECUTE, AML_TYPE_EXEC_1A_1T_1R,
AML_FLAGS_EXEC_1A_1T_1R | AML_CONSTANT),
/* 54 */ ACPI_OP("Unload", ARGP_UNLOAD_OP, ARGI_UNLOAD_OP,
ACPI_TYPE_ANY, AML_CLASS_EXECUTE,
AML_TYPE_EXEC_1A_0T_0R, AML_FLAGS_EXEC_1A_0T_0R),
/* 55 */ ACPI_OP("Revision", ARGP_REVISION_OP, ARGI_REVISION_OP,
ACPI_TYPE_INTEGER, AML_CLASS_ARGUMENT,
AML_TYPE_CONSTANT, 0),
/* 56 */ ACPI_OP("Debug", ARGP_DEBUG_OP, ARGI_DEBUG_OP,
ACPI_TYPE_LOCAL_REFERENCE, AML_CLASS_ARGUMENT,
AML_TYPE_CONSTANT, 0),
/* 57 */ ACPI_OP("Fatal", ARGP_FATAL_OP, ARGI_FATAL_OP, ACPI_TYPE_ANY,
AML_CLASS_EXECUTE, AML_TYPE_EXEC_3A_0T_0R,
AML_FLAGS_EXEC_3A_0T_0R),
/* 58 */ ACPI_OP("OperationRegion", ARGP_REGION_OP, ARGI_REGION_OP,
ACPI_TYPE_REGION, AML_CLASS_NAMED_OBJECT,
AML_TYPE_NAMED_COMPLEX,
AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE |
AML_NSNODE | AML_NAMED | AML_DEFER),
/* 59 */ ACPI_OP("Field", ARGP_FIELD_OP, ARGI_FIELD_OP, ACPI_TYPE_ANY,
AML_CLASS_NAMED_OBJECT, AML_TYPE_NAMED_FIELD,
AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE | AML_FIELD),
/* 5A */ ACPI_OP("Device", ARGP_DEVICE_OP, ARGI_DEVICE_OP,
ACPI_TYPE_DEVICE, AML_CLASS_NAMED_OBJECT,
AML_TYPE_NAMED_NO_OBJ,
AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE |
AML_NSNODE | AML_NAMED),
/* 5B */ ACPI_OP("Processor", ARGP_PROCESSOR_OP, ARGI_PROCESSOR_OP,
ACPI_TYPE_PROCESSOR, AML_CLASS_NAMED_OBJECT,
AML_TYPE_NAMED_SIMPLE,
AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE |
AML_NSNODE | AML_NAMED),
/* 5C */ ACPI_OP("PowerResource", ARGP_POWER_RES_OP, ARGI_POWER_RES_OP,
ACPI_TYPE_POWER, AML_CLASS_NAMED_OBJECT,
AML_TYPE_NAMED_SIMPLE,
AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE |
AML_NSNODE | AML_NAMED),
/* 5D */ ACPI_OP("ThermalZone", ARGP_THERMAL_ZONE_OP,
ARGI_THERMAL_ZONE_OP, ACPI_TYPE_THERMAL,
AML_CLASS_NAMED_OBJECT, AML_TYPE_NAMED_NO_OBJ,
AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE |
AML_NSNODE | AML_NAMED),
/* 5E */ ACPI_OP("IndexField", ARGP_INDEX_FIELD_OP, ARGI_INDEX_FIELD_OP,
ACPI_TYPE_ANY, AML_CLASS_NAMED_OBJECT,
AML_TYPE_NAMED_FIELD,
AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE | AML_FIELD),
/* 5F */ ACPI_OP("BankField", ARGP_BANK_FIELD_OP, ARGI_BANK_FIELD_OP,
ACPI_TYPE_LOCAL_BANK_FIELD, AML_CLASS_NAMED_OBJECT,
AML_TYPE_NAMED_FIELD,
AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE | AML_FIELD |
AML_DEFER),
/* Internal opcodes that map to invalid AML opcodes */
/* 60 */ ACPI_OP("LNotEqual", ARGP_LNOTEQUAL_OP, ARGI_LNOTEQUAL_OP,
ACPI_TYPE_ANY, AML_CLASS_INTERNAL,
AML_TYPE_BOGUS, AML_HAS_ARGS | AML_CONSTANT),
/* 61 */ ACPI_OP("LLessEqual", ARGP_LLESSEQUAL_OP, ARGI_LLESSEQUAL_OP,
ACPI_TYPE_ANY, AML_CLASS_INTERNAL,
AML_TYPE_BOGUS, AML_HAS_ARGS | AML_CONSTANT),
/* 62 */ ACPI_OP("LGreaterEqual", ARGP_LGREATEREQUAL_OP,
ARGI_LGREATEREQUAL_OP, ACPI_TYPE_ANY,
AML_CLASS_INTERNAL, AML_TYPE_BOGUS,
AML_HAS_ARGS | AML_CONSTANT),
/* 63 */ ACPI_OP("-NamePath-", ARGP_NAMEPATH_OP, ARGI_NAMEPATH_OP,
ACPI_TYPE_LOCAL_REFERENCE, AML_CLASS_ARGUMENT,
AML_TYPE_LITERAL, AML_NSOBJECT | AML_NSNODE),
/* 64 */ ACPI_OP("-MethodCall-", ARGP_METHODCALL_OP, ARGI_METHODCALL_OP,
ACPI_TYPE_METHOD, AML_CLASS_METHOD_CALL,
AML_TYPE_METHOD_CALL,
AML_HAS_ARGS | AML_NSOBJECT | AML_NSNODE),
/* 65 */ ACPI_OP("-ByteList-", ARGP_BYTELIST_OP, ARGI_BYTELIST_OP,
ACPI_TYPE_ANY, AML_CLASS_ARGUMENT,
AML_TYPE_LITERAL, 0),
/* 66 */ ACPI_OP("-ReservedField-", ARGP_RESERVEDFIELD_OP,
ARGI_RESERVEDFIELD_OP, ACPI_TYPE_ANY,
AML_CLASS_INTERNAL, AML_TYPE_BOGUS, 0),
/* 67 */ ACPI_OP("-NamedField-", ARGP_NAMEDFIELD_OP, ARGI_NAMEDFIELD_OP,
ACPI_TYPE_ANY, AML_CLASS_INTERNAL,
AML_TYPE_BOGUS,
AML_NSOBJECT | AML_NSOPCODE | AML_NSNODE | AML_NAMED),
/* 68 */ ACPI_OP("-AccessField-", ARGP_ACCESSFIELD_OP,
ARGI_ACCESSFIELD_OP, ACPI_TYPE_ANY,
AML_CLASS_INTERNAL, AML_TYPE_BOGUS, 0),
/* 69 */ ACPI_OP("-StaticString", ARGP_STATICSTRING_OP,
ARGI_STATICSTRING_OP, ACPI_TYPE_ANY,
AML_CLASS_INTERNAL, AML_TYPE_BOGUS, 0),
/* 6A */ ACPI_OP("-Return Value-", ARG_NONE, ARG_NONE, ACPI_TYPE_ANY,
AML_CLASS_RETURN_VALUE, AML_TYPE_RETURN,
AML_HAS_ARGS | AML_HAS_RETVAL),
/* 6B */ ACPI_OP("-UNKNOWN_OP-", ARG_NONE, ARG_NONE, ACPI_TYPE_INVALID,
AML_CLASS_UNKNOWN, AML_TYPE_BOGUS, AML_HAS_ARGS),
/* 6C */ ACPI_OP("-ASCII_ONLY-", ARG_NONE, ARG_NONE, ACPI_TYPE_ANY,
AML_CLASS_ASCII, AML_TYPE_BOGUS, AML_HAS_ARGS),
/* 6D */ ACPI_OP("-PREFIX_ONLY-", ARG_NONE, ARG_NONE, ACPI_TYPE_ANY,
AML_CLASS_PREFIX, AML_TYPE_BOGUS, AML_HAS_ARGS),
/* ACPI 2.0 opcodes */
/* 6E */ ACPI_OP("QwordConst", ARGP_QWORD_OP, ARGI_QWORD_OP,
ACPI_TYPE_INTEGER, AML_CLASS_ARGUMENT,
AML_TYPE_LITERAL, AML_CONSTANT),
/* 6F */ ACPI_OP("Package", /* Var */ ARGP_VAR_PACKAGE_OP,
ARGI_VAR_PACKAGE_OP, ACPI_TYPE_PACKAGE,
AML_CLASS_CREATE, AML_TYPE_CREATE_OBJECT,
AML_HAS_ARGS | AML_DEFER),
/* 70 */ ACPI_OP("ConcatenateResTemplate", ARGP_CONCAT_RES_OP,
ARGI_CONCAT_RES_OP, ACPI_TYPE_ANY,
AML_CLASS_EXECUTE, AML_TYPE_EXEC_2A_1T_1R,
AML_FLAGS_EXEC_2A_1T_1R | AML_CONSTANT),
/* 71 */ ACPI_OP("Mod", ARGP_MOD_OP, ARGI_MOD_OP, ACPI_TYPE_ANY,
AML_CLASS_EXECUTE, AML_TYPE_EXEC_2A_1T_1R,
AML_FLAGS_EXEC_2A_1T_1R | AML_CONSTANT),
/* 72 */ ACPI_OP("CreateQWordField", ARGP_CREATE_QWORD_FIELD_OP,
ARGI_CREATE_QWORD_FIELD_OP,
ACPI_TYPE_BUFFER_FIELD, AML_CLASS_CREATE,
AML_TYPE_CREATE_FIELD,
AML_HAS_ARGS | AML_NSOBJECT | AML_NSNODE |
AML_DEFER | AML_CREATE),
/* 73 */ ACPI_OP("ToBuffer", ARGP_TO_BUFFER_OP, ARGI_TO_BUFFER_OP,
ACPI_TYPE_ANY, AML_CLASS_EXECUTE,
AML_TYPE_EXEC_1A_1T_1R,
AML_FLAGS_EXEC_1A_1T_1R | AML_CONSTANT),
/* 74 */ ACPI_OP("ToDecimalString", ARGP_TO_DEC_STR_OP,
ARGI_TO_DEC_STR_OP, ACPI_TYPE_ANY,
AML_CLASS_EXECUTE, AML_TYPE_EXEC_1A_1T_1R,
AML_FLAGS_EXEC_1A_1T_1R | AML_CONSTANT),
/* 75 */ ACPI_OP("ToHexString", ARGP_TO_HEX_STR_OP, ARGI_TO_HEX_STR_OP,
ACPI_TYPE_ANY, AML_CLASS_EXECUTE,
AML_TYPE_EXEC_1A_1T_1R,
AML_FLAGS_EXEC_1A_1T_1R | AML_CONSTANT),
/* 76 */ ACPI_OP("ToInteger", ARGP_TO_INTEGER_OP, ARGI_TO_INTEGER_OP,
ACPI_TYPE_ANY, AML_CLASS_EXECUTE,
AML_TYPE_EXEC_1A_1T_1R,
AML_FLAGS_EXEC_1A_1T_1R | AML_CONSTANT),
/* 77 */ ACPI_OP("ToString", ARGP_TO_STRING_OP, ARGI_TO_STRING_OP,
ACPI_TYPE_ANY, AML_CLASS_EXECUTE,
AML_TYPE_EXEC_2A_1T_1R,
AML_FLAGS_EXEC_2A_1T_1R | AML_CONSTANT),
/* 78 */ ACPI_OP("CopyObject", ARGP_COPY_OP, ARGI_COPY_OP,
ACPI_TYPE_ANY, AML_CLASS_EXECUTE,
AML_TYPE_EXEC_1A_1T_1R, AML_FLAGS_EXEC_1A_1T_1R),
/* 79 */ ACPI_OP("Mid", ARGP_MID_OP, ARGI_MID_OP, ACPI_TYPE_ANY,
AML_CLASS_EXECUTE, AML_TYPE_EXEC_3A_1T_1R,
AML_FLAGS_EXEC_3A_1T_1R | AML_CONSTANT),
/* 7A */ ACPI_OP("Continue", ARGP_CONTINUE_OP, ARGI_CONTINUE_OP,
ACPI_TYPE_ANY, AML_CLASS_CONTROL, AML_TYPE_CONTROL, 0),
/* 7B */ ACPI_OP("LoadTable", ARGP_LOAD_TABLE_OP, ARGI_LOAD_TABLE_OP,
ACPI_TYPE_ANY, AML_CLASS_EXECUTE,
AML_TYPE_EXEC_6A_0T_1R, AML_FLAGS_EXEC_6A_0T_1R),
/* 7C */ ACPI_OP("DataTableRegion", ARGP_DATA_REGION_OP,
ARGI_DATA_REGION_OP, ACPI_TYPE_REGION,
AML_CLASS_NAMED_OBJECT, AML_TYPE_NAMED_COMPLEX,
AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE |
AML_NSNODE | AML_NAMED | AML_DEFER),
/* 7D */ ACPI_OP("[EvalSubTree]", ARGP_SCOPE_OP, ARGI_SCOPE_OP,
ACPI_TYPE_ANY, AML_CLASS_NAMED_OBJECT,
AML_TYPE_NAMED_NO_OBJ,
AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE | AML_NSNODE),
/* ACPI 3.0 opcodes */
/* 7E */ ACPI_OP("Timer", ARGP_TIMER_OP, ARGI_TIMER_OP, ACPI_TYPE_ANY,
AML_CLASS_EXECUTE, AML_TYPE_EXEC_0A_0T_1R,
AML_FLAGS_EXEC_0A_0T_1R)
/*! [End] no source code translation !*/
};
/*
* This table is directly indexed by the opcodes, and returns an
* index into the table above
*/
static const u8 acpi_gbl_short_op_index[256] = {
/* 0 1 2 3 4 5 6 7 */
/* 8 9 A B C D E F */
/* 0x00 */ 0x00, 0x01, _UNK, _UNK, _UNK, _UNK, 0x02, _UNK,
/* 0x08 */ 0x03, _UNK, 0x04, 0x05, 0x06, 0x07, 0x6E, _UNK,
/* 0x10 */ 0x08, 0x09, 0x0a, 0x6F, 0x0b, _UNK, _UNK, _UNK,
/* 0x18 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
/* 0x20 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
/* 0x28 */ _UNK, _UNK, _UNK, _UNK, _UNK, 0x63, _PFX, _PFX,
/* 0x30 */ 0x67, 0x66, 0x68, 0x65, 0x69, 0x64, 0x6A, 0x7D,
/* 0x38 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
/* 0x40 */ _UNK, _ASC, _ASC, _ASC, _ASC, _ASC, _ASC, _ASC,
/* 0x48 */ _ASC, _ASC, _ASC, _ASC, _ASC, _ASC, _ASC, _ASC,
/* 0x50 */ _ASC, _ASC, _ASC, _ASC, _ASC, _ASC, _ASC, _ASC,
/* 0x58 */ _ASC, _ASC, _ASC, _UNK, _PFX, _UNK, _PFX, _ASC,
/* 0x60 */ 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13,
/* 0x68 */ 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, _UNK,
/* 0x70 */ 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20, 0x21, 0x22,
/* 0x78 */ 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2a,
/* 0x80 */ 0x2b, 0x2c, 0x2d, 0x2e, 0x70, 0x71, 0x2f, 0x30,
/* 0x88 */ 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x72,
/* 0x90 */ 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x73, 0x74,
/* 0x98 */ 0x75, 0x76, _UNK, _UNK, 0x77, 0x78, 0x79, 0x7A,
/* 0xA0 */ 0x3e, 0x3f, 0x40, 0x41, 0x42, 0x43, 0x60, 0x61,
/* 0xA8 */ 0x62, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
/* 0xB0 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
/* 0xB8 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
/* 0xC0 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
/* 0xC8 */ _UNK, _UNK, _UNK, _UNK, 0x44, _UNK, _UNK, _UNK,
/* 0xD0 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
/* 0xD8 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
/* 0xE0 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
/* 0xE8 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
/* 0xF0 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
/* 0xF8 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, 0x45,
};
/*
* This table is indexed by the second opcode of the extended opcode
* pair. It returns an index into the opcode table (acpi_gbl_aml_op_info)
*/
static const u8 acpi_gbl_long_op_index[NUM_EXTENDED_OPCODE] = {
/* 0 1 2 3 4 5 6 7 */
/* 8 9 A B C D E F */
/* 0x00 */ _UNK, 0x46, 0x47, _UNK, _UNK, _UNK, _UNK, _UNK,
/* 0x08 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
/* 0x10 */ _UNK, _UNK, 0x48, 0x49, _UNK, _UNK, _UNK, _UNK,
/* 0x18 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, 0x7B,
/* 0x20 */ 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, 0x50, 0x51,
/* 0x28 */ 0x52, 0x53, 0x54, _UNK, _UNK, _UNK, _UNK, _UNK,
/* 0x30 */ 0x55, 0x56, 0x57, 0x7e, _UNK, _UNK, _UNK, _UNK,
/* 0x38 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
/* 0x40 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
/* 0x48 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
/* 0x50 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
/* 0x58 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
/* 0x60 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
/* 0x68 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
/* 0x70 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
/* 0x78 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
/* 0x80 */ 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f,
/* 0x88 */ 0x7C,
};
/*******************************************************************************
*
* FUNCTION: acpi_ps_get_opcode_info
*
* PARAMETERS: Opcode - The AML opcode
*
* RETURN: A pointer to the info about the opcode.
*
* DESCRIPTION: Find AML opcode description based on the opcode.
* NOTE: This procedure must ALWAYS return a valid pointer!
*
******************************************************************************/
const struct acpi_opcode_info *acpi_ps_get_opcode_info(u16 opcode)
{
ACPI_FUNCTION_NAME(ps_get_opcode_info);
/*
* Detect normal 8-bit opcode or extended 16-bit opcode
*/
if (!(opcode & 0xFF00)) {
/* Simple (8-bit) opcode: 0-255, can't index beyond table */
return (&acpi_gbl_aml_op_info
[acpi_gbl_short_op_index[(u8) opcode]]);
}
if (((opcode & 0xFF00) == AML_EXTENDED_OPCODE) &&
(((u8) opcode) <= MAX_EXTENDED_OPCODE)) {
/* Valid extended (16-bit) opcode */
return (&acpi_gbl_aml_op_info
[acpi_gbl_long_op_index[(u8) opcode]]);
}
/* Unknown AML opcode */
ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
"Unknown AML opcode [%4.4X]\n", opcode));
return (&acpi_gbl_aml_op_info[_UNK]);
}
/*******************************************************************************
*
* FUNCTION: acpi_ps_get_opcode_name
*
* PARAMETERS: Opcode - The AML opcode
*
* RETURN: A pointer to the name of the opcode (ASCII String)
* Note: Never returns NULL.
*
* DESCRIPTION: Translate an opcode into a human-readable string
*
******************************************************************************/
char *acpi_ps_get_opcode_name(u16 opcode)
{
#if defined(ACPI_DISASSEMBLER) || defined (ACPI_DEBUG_OUTPUT)
const struct acpi_opcode_info *op;
op = acpi_ps_get_opcode_info(opcode);
/* Always guaranteed to return a valid pointer */
return (op->name);
#else
return ("OpcodeName unavailable");
#endif
}
/*******************************************************************************
*
* FUNCTION: acpi_ps_get_argument_count
*
* PARAMETERS: op_type - Type associated with the AML opcode
*
* RETURN: Argument count
*
* DESCRIPTION: Obtain the number of expected arguments for an AML opcode
*
******************************************************************************/
u8 acpi_ps_get_argument_count(u32 op_type)
{
if (op_type <= AML_TYPE_EXEC_6A_0T_1R) {
return (acpi_gbl_argument_count[op_type]);
}
return (0);
}
| gpl-2.0 |
lambchops468/omap443x-overclock-lge-p769 | drivers/rtc/rtc-isl12022.c | 3227 | 7725 | /*
* An I2C driver for the Intersil ISL 12022
*
* Author: Roman Fietze <roman.fietze@telemotive.de>
*
* Based on the Philips PCF8563 RTC
* by Alessandro Zummo <a.zummo@towertech.it>.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version
* 2 as published by the Free Software Foundation.
*/
#include <linux/i2c.h>
#include <linux/bcd.h>
#include <linux/rtc.h>
#include <linux/slab.h>
#define DRV_VERSION "0.1"
/* ISL register offsets */
#define ISL12022_REG_SC 0x00
#define ISL12022_REG_MN 0x01
#define ISL12022_REG_HR 0x02
#define ISL12022_REG_DT 0x03
#define ISL12022_REG_MO 0x04
#define ISL12022_REG_YR 0x05
#define ISL12022_REG_DW 0x06
#define ISL12022_REG_SR 0x07
#define ISL12022_REG_INT 0x08
/* ISL register bits */
#define ISL12022_HR_MIL (1 << 7) /* military or 24 hour time */
#define ISL12022_SR_LBAT85 (1 << 2)
#define ISL12022_SR_LBAT75 (1 << 1)
#define ISL12022_INT_WRTC (1 << 6)
static struct i2c_driver isl12022_driver;
struct isl12022 {
struct rtc_device *rtc;
bool write_enabled; /* true if write enable is set */
};
static int isl12022_read_regs(struct i2c_client *client, uint8_t reg,
uint8_t *data, size_t n)
{
struct i2c_msg msgs[] = {
{
.addr = client->addr,
.flags = 0,
.len = 1,
.buf = data
}, /* setup read ptr */
{
.addr = client->addr,
.flags = I2C_M_RD,
.len = n,
.buf = data
}
};
int ret;
data[0] = reg;
ret = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs));
if (ret != ARRAY_SIZE(msgs)) {
dev_err(&client->dev, "%s: read error, ret=%d\n",
__func__, ret);
return -EIO;
}
return 0;
}
static int isl12022_write_reg(struct i2c_client *client,
uint8_t reg, uint8_t val)
{
uint8_t data[2] = { reg, val };
int err;
err = i2c_master_send(client, data, sizeof(data));
if (err != sizeof(data)) {
dev_err(&client->dev,
"%s: err=%d addr=%02x, data=%02x\n",
__func__, err, data[0], data[1]);
return -EIO;
}
return 0;
}
/*
* In the routines that deal directly with the isl12022 hardware, we use
* rtc_time -- month 0-11, hour 0-23, yr = calendar year-epoch.
*/
static int isl12022_get_datetime(struct i2c_client *client, struct rtc_time *tm)
{
uint8_t buf[ISL12022_REG_INT + 1];
int ret;
ret = isl12022_read_regs(client, ISL12022_REG_SC, buf, sizeof(buf));
if (ret)
return ret;
if (buf[ISL12022_REG_SR] & (ISL12022_SR_LBAT85 | ISL12022_SR_LBAT75)) {
dev_warn(&client->dev,
"voltage dropped below %u%%, "
"date and time is not reliable.\n",
buf[ISL12022_REG_SR] & ISL12022_SR_LBAT85 ? 85 : 75);
}
dev_dbg(&client->dev,
"%s: raw data is sec=%02x, min=%02x, hr=%02x, "
"mday=%02x, mon=%02x, year=%02x, wday=%02x, "
"sr=%02x, int=%02x",
__func__,
buf[ISL12022_REG_SC],
buf[ISL12022_REG_MN],
buf[ISL12022_REG_HR],
buf[ISL12022_REG_DT],
buf[ISL12022_REG_MO],
buf[ISL12022_REG_YR],
buf[ISL12022_REG_DW],
buf[ISL12022_REG_SR],
buf[ISL12022_REG_INT]);
tm->tm_sec = bcd2bin(buf[ISL12022_REG_SC] & 0x7F);
tm->tm_min = bcd2bin(buf[ISL12022_REG_MN] & 0x7F);
tm->tm_hour = bcd2bin(buf[ISL12022_REG_HR] & 0x3F);
tm->tm_mday = bcd2bin(buf[ISL12022_REG_DT] & 0x3F);
tm->tm_wday = buf[ISL12022_REG_DW] & 0x07;
tm->tm_mon = bcd2bin(buf[ISL12022_REG_MO] & 0x1F) - 1;
tm->tm_year = bcd2bin(buf[ISL12022_REG_YR]) + 100;
dev_dbg(&client->dev, "%s: secs=%d, mins=%d, hours=%d, "
"mday=%d, mon=%d, year=%d, wday=%d\n",
__func__,
tm->tm_sec, tm->tm_min, tm->tm_hour,
tm->tm_mday, tm->tm_mon, tm->tm_year, tm->tm_wday);
/* The clock can give out invalid datetime, but we cannot return
* -EINVAL otherwise hwclock will refuse to set the time on bootup. */
if (rtc_valid_tm(tm) < 0)
dev_err(&client->dev, "retrieved date and time is invalid.\n");
return 0;
}
static int isl12022_set_datetime(struct i2c_client *client, struct rtc_time *tm)
{
struct isl12022 *isl12022 = i2c_get_clientdata(client);
size_t i;
int ret;
uint8_t buf[ISL12022_REG_DW + 1];
dev_dbg(&client->dev, "%s: secs=%d, mins=%d, hours=%d, "
"mday=%d, mon=%d, year=%d, wday=%d\n",
__func__,
tm->tm_sec, tm->tm_min, tm->tm_hour,
tm->tm_mday, tm->tm_mon, tm->tm_year, tm->tm_wday);
if (!isl12022->write_enabled) {
ret = isl12022_read_regs(client, ISL12022_REG_INT, buf, 1);
if (ret)
return ret;
/* Check if WRTC (write rtc enable) is set factory default is
* 0 (not set) */
if (!(buf[0] & ISL12022_INT_WRTC)) {
dev_info(&client->dev,
"init write enable and 24 hour format\n");
/* Set the write enable bit. */
ret = isl12022_write_reg(client,
ISL12022_REG_INT,
buf[0] | ISL12022_INT_WRTC);
if (ret)
return ret;
/* Write to any RTC register to start RTC, we use the
* HR register, setting the MIL bit to use the 24 hour
* format. */
ret = isl12022_read_regs(client, ISL12022_REG_HR,
buf, 1);
if (ret)
return ret;
ret = isl12022_write_reg(client,
ISL12022_REG_HR,
buf[0] | ISL12022_HR_MIL);
if (ret)
return ret;
}
isl12022->write_enabled = 1;
}
/* hours, minutes and seconds */
buf[ISL12022_REG_SC] = bin2bcd(tm->tm_sec);
buf[ISL12022_REG_MN] = bin2bcd(tm->tm_min);
buf[ISL12022_REG_HR] = bin2bcd(tm->tm_hour) | ISL12022_HR_MIL;
buf[ISL12022_REG_DT] = bin2bcd(tm->tm_mday);
/* month, 1 - 12 */
buf[ISL12022_REG_MO] = bin2bcd(tm->tm_mon + 1);
/* year and century */
buf[ISL12022_REG_YR] = bin2bcd(tm->tm_year % 100);
buf[ISL12022_REG_DW] = tm->tm_wday & 0x07;
/* write register's data */
for (i = 0; i < ARRAY_SIZE(buf); i++) {
ret = isl12022_write_reg(client, ISL12022_REG_SC + i,
buf[ISL12022_REG_SC + i]);
if (ret)
return -EIO;
};
return 0;
}
static int isl12022_rtc_read_time(struct device *dev, struct rtc_time *tm)
{
return isl12022_get_datetime(to_i2c_client(dev), tm);
}
static int isl12022_rtc_set_time(struct device *dev, struct rtc_time *tm)
{
return isl12022_set_datetime(to_i2c_client(dev), tm);
}
static const struct rtc_class_ops isl12022_rtc_ops = {
.read_time = isl12022_rtc_read_time,
.set_time = isl12022_rtc_set_time,
};
static int isl12022_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct isl12022 *isl12022;
int ret = 0;
if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C))
return -ENODEV;
isl12022 = kzalloc(sizeof(struct isl12022), GFP_KERNEL);
if (!isl12022)
return -ENOMEM;
dev_dbg(&client->dev, "chip found, driver version " DRV_VERSION "\n");
i2c_set_clientdata(client, isl12022);
isl12022->rtc = rtc_device_register(isl12022_driver.driver.name,
&client->dev,
&isl12022_rtc_ops,
THIS_MODULE);
if (IS_ERR(isl12022->rtc)) {
ret = PTR_ERR(isl12022->rtc);
goto exit_kfree;
}
return 0;
exit_kfree:
kfree(isl12022);
return ret;
}
static int isl12022_remove(struct i2c_client *client)
{
struct isl12022 *isl12022 = i2c_get_clientdata(client);
rtc_device_unregister(isl12022->rtc);
kfree(isl12022);
return 0;
}
static const struct i2c_device_id isl12022_id[] = {
{ "isl12022", 0 },
{ "rtc8564", 0 },
{ }
};
MODULE_DEVICE_TABLE(i2c, isl12022_id);
static struct i2c_driver isl12022_driver = {
.driver = {
.name = "rtc-isl12022",
},
.probe = isl12022_probe,
.remove = isl12022_remove,
.id_table = isl12022_id,
};
static int __init isl12022_init(void)
{
return i2c_add_driver(&isl12022_driver);
}
static void __exit isl12022_exit(void)
{
i2c_del_driver(&isl12022_driver);
}
module_init(isl12022_init);
module_exit(isl12022_exit);
MODULE_AUTHOR("roman.fietze@telemotive.de");
MODULE_DESCRIPTION("ISL 12022 RTC driver");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);
| gpl-2.0 |
maniacx/android_kernel_htcleo-3.0_old | drivers/acpi/acpica/psscope.c | 3227 | 8353 | /******************************************************************************
*
* Module Name: psscope - Parser scope stack management routines
*
*****************************************************************************/
/*
* Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions, and the following disclaimer,
* without modification.
* 2. Redistributions in binary form must reproduce at minimum a disclaimer
* substantially similar to the "NO WARRANTY" disclaimer below
* ("Disclaimer") and any redistribution must be conditioned upon
* including a substantially similar Disclaimer requirement for further
* binary redistribution.
* 3. Neither the names of the above-listed copyright holders nor the names
* of any contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* Alternatively, this software may be distributed under the terms of the
* GNU General Public License ("GPL") version 2 as published by the Free
* Software Foundation.
*
* NO WARRANTY
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGES.
*/
#include <acpi/acpi.h>
#include "accommon.h"
#include "acparser.h"
#define _COMPONENT ACPI_PARSER
ACPI_MODULE_NAME("psscope")
/*******************************************************************************
*
* FUNCTION: acpi_ps_get_parent_scope
*
* PARAMETERS: parser_state - Current parser state object
*
* RETURN: Pointer to an Op object
*
* DESCRIPTION: Get parent of current op being parsed
*
******************************************************************************/
union acpi_parse_object *acpi_ps_get_parent_scope(struct acpi_parse_state
*parser_state)
{
return (parser_state->scope->parse_scope.op);
}
/*******************************************************************************
*
* FUNCTION: acpi_ps_has_completed_scope
*
* PARAMETERS: parser_state - Current parser state object
*
* RETURN: Boolean, TRUE = scope completed.
*
* DESCRIPTION: Is parsing of current argument complete? Determined by
* 1) AML pointer is at or beyond the end of the scope
* 2) The scope argument count has reached zero.
*
******************************************************************************/
u8 acpi_ps_has_completed_scope(struct acpi_parse_state * parser_state)
{
return ((u8)
((parser_state->aml >= parser_state->scope->parse_scope.arg_end
|| !parser_state->scope->parse_scope.arg_count)));
}
/*******************************************************************************
*
* FUNCTION: acpi_ps_init_scope
*
* PARAMETERS: parser_state - Current parser state object
* Root - the Root Node of this new scope
*
* RETURN: Status
*
* DESCRIPTION: Allocate and init a new scope object
*
******************************************************************************/
acpi_status
acpi_ps_init_scope(struct acpi_parse_state * parser_state,
union acpi_parse_object * root_op)
{
union acpi_generic_state *scope;
ACPI_FUNCTION_TRACE_PTR(ps_init_scope, root_op);
scope = acpi_ut_create_generic_state();
if (!scope) {
return_ACPI_STATUS(AE_NO_MEMORY);
}
scope->common.descriptor_type = ACPI_DESC_TYPE_STATE_RPSCOPE;
scope->parse_scope.op = root_op;
scope->parse_scope.arg_count = ACPI_VAR_ARGS;
scope->parse_scope.arg_end = parser_state->aml_end;
scope->parse_scope.pkg_end = parser_state->aml_end;
parser_state->scope = scope;
parser_state->start_op = root_op;
return_ACPI_STATUS(AE_OK);
}
/*******************************************************************************
*
* FUNCTION: acpi_ps_push_scope
*
* PARAMETERS: parser_state - Current parser state object
* Op - Current op to be pushed
* remaining_args - List of args remaining
* arg_count - Fixed or variable number of args
*
* RETURN: Status
*
* DESCRIPTION: Push current op to begin parsing its argument
*
******************************************************************************/
acpi_status
acpi_ps_push_scope(struct acpi_parse_state *parser_state,
union acpi_parse_object *op,
u32 remaining_args, u32 arg_count)
{
union acpi_generic_state *scope;
ACPI_FUNCTION_TRACE_PTR(ps_push_scope, op);
scope = acpi_ut_create_generic_state();
if (!scope) {
return_ACPI_STATUS(AE_NO_MEMORY);
}
scope->common.descriptor_type = ACPI_DESC_TYPE_STATE_PSCOPE;
scope->parse_scope.op = op;
scope->parse_scope.arg_list = remaining_args;
scope->parse_scope.arg_count = arg_count;
scope->parse_scope.pkg_end = parser_state->pkg_end;
/* Push onto scope stack */
acpi_ut_push_generic_state(&parser_state->scope, scope);
if (arg_count == ACPI_VAR_ARGS) {
/* Multiple arguments */
scope->parse_scope.arg_end = parser_state->pkg_end;
} else {
/* Single argument */
scope->parse_scope.arg_end = ACPI_TO_POINTER(ACPI_MAX_PTR);
}
return_ACPI_STATUS(AE_OK);
}
/*******************************************************************************
*
* FUNCTION: acpi_ps_pop_scope
*
* PARAMETERS: parser_state - Current parser state object
* Op - Where the popped op is returned
* arg_list - Where the popped "next argument" is
* returned
* arg_count - Count of objects in arg_list
*
* RETURN: Status
*
* DESCRIPTION: Return to parsing a previous op
*
******************************************************************************/
void
acpi_ps_pop_scope(struct acpi_parse_state *parser_state,
union acpi_parse_object **op, u32 * arg_list, u32 * arg_count)
{
union acpi_generic_state *scope = parser_state->scope;
ACPI_FUNCTION_TRACE(ps_pop_scope);
/* Only pop the scope if there is in fact a next scope */
if (scope->common.next) {
scope = acpi_ut_pop_generic_state(&parser_state->scope);
/* Return to parsing previous op */
*op = scope->parse_scope.op;
*arg_list = scope->parse_scope.arg_list;
*arg_count = scope->parse_scope.arg_count;
parser_state->pkg_end = scope->parse_scope.pkg_end;
/* All done with this scope state structure */
acpi_ut_delete_generic_state(scope);
} else {
/* Empty parse stack, prepare to fetch next opcode */
*op = NULL;
*arg_list = 0;
*arg_count = 0;
}
ACPI_DEBUG_PRINT((ACPI_DB_PARSE,
"Popped Op %p Args %X\n", *op, *arg_count));
return_VOID;
}
/*******************************************************************************
*
* FUNCTION: acpi_ps_cleanup_scope
*
* PARAMETERS: parser_state - Current parser state object
*
* RETURN: None
*
* DESCRIPTION: Destroy available list, remaining stack levels, and return
* root scope
*
******************************************************************************/
void acpi_ps_cleanup_scope(struct acpi_parse_state *parser_state)
{
union acpi_generic_state *scope;
ACPI_FUNCTION_TRACE_PTR(ps_cleanup_scope, parser_state);
if (!parser_state) {
return_VOID;
}
/* Delete anything on the scope stack */
while (parser_state->scope) {
scope = acpi_ut_pop_generic_state(&parser_state->scope);
acpi_ut_delete_generic_state(scope);
}
return_VOID;
}
| gpl-2.0 |
fbli41/android_kernel_samsung_cs02 | drivers/usb/gadget/goku_udc.c | 4763 | 47813 | /*
* Toshiba TC86C001 ("Goku-S") USB Device Controller driver
*
* Copyright (C) 2000-2002 Lineo
* by Stuart Lynne, Tom Rushworth, and Bruce Balden
* Copyright (C) 2002 Toshiba Corporation
* Copyright (C) 2003 MontaVista Software (source@mvista.com)
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
* warranty of any kind, whether express or implied.
*/
/*
* This device has ep0 and three semi-configurable bulk/interrupt endpoints.
*
* - Endpoint numbering is fixed: ep{1,2,3}-bulk
* - Gadget drivers can choose ep maxpacket (8/16/32/64)
* - Gadget drivers can choose direction (IN, OUT)
* - DMA works with ep1 (OUT transfers) and ep2 (IN transfers).
*/
// #define VERBOSE /* extra debug messages (success too) */
// #define USB_TRACE /* packet-level success messages */
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/delay.h>
#include <linux/ioport.h>
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/timer.h>
#include <linux/list.h>
#include <linux/interrupt.h>
#include <linux/proc_fs.h>
#include <linux/device.h>
#include <linux/usb/ch9.h>
#include <linux/usb/gadget.h>
#include <linux/prefetch.h>
#include <asm/byteorder.h>
#include <asm/io.h>
#include <asm/irq.h>
#include <asm/unaligned.h>
#include "goku_udc.h"
#define DRIVER_DESC "TC86C001 USB Device Controller"
#define DRIVER_VERSION "30-Oct 2003"
#define DMA_ADDR_INVALID (~(dma_addr_t)0)
static const char driver_name [] = "goku_udc";
static const char driver_desc [] = DRIVER_DESC;
MODULE_AUTHOR("source@mvista.com");
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
/*
* IN dma behaves ok under testing, though the IN-dma abort paths don't
* seem to behave quite as expected. Used by default.
*
* OUT dma documents design problems handling the common "short packet"
* transfer termination policy; it couldn't be enabled by default, even
* if the OUT-dma abort problems had a resolution.
*/
static unsigned use_dma = 1;
#if 0
//#include <linux/moduleparam.h>
/* "modprobe goku_udc use_dma=1" etc
* 0 to disable dma
* 1 to use IN dma only (normal operation)
* 2 to use IN and OUT dma
*/
module_param(use_dma, uint, S_IRUGO);
#endif
/*-------------------------------------------------------------------------*/
static void nuke(struct goku_ep *, int status);
static inline void
command(struct goku_udc_regs __iomem *regs, int command, unsigned epnum)
{
writel(COMMAND_EP(epnum) | command, ®s->Command);
udelay(300);
}
static int
goku_ep_enable(struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc)
{
struct goku_udc *dev;
struct goku_ep *ep;
u32 mode;
u16 max;
unsigned long flags;
ep = container_of(_ep, struct goku_ep, ep);
if (!_ep || !desc || ep->desc
|| desc->bDescriptorType != USB_DT_ENDPOINT)
return -EINVAL;
dev = ep->dev;
if (ep == &dev->ep[0])
return -EINVAL;
if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)
return -ESHUTDOWN;
if (ep->num != usb_endpoint_num(desc))
return -EINVAL;
switch (usb_endpoint_type(desc)) {
case USB_ENDPOINT_XFER_BULK:
case USB_ENDPOINT_XFER_INT:
break;
default:
return -EINVAL;
}
if ((readl(ep->reg_status) & EPxSTATUS_EP_MASK)
!= EPxSTATUS_EP_INVALID)
return -EBUSY;
/* enabling the no-toggle interrupt mode would need an api hook */
mode = 0;
max = get_unaligned_le16(&desc->wMaxPacketSize);
switch (max) {
case 64: mode++;
case 32: mode++;
case 16: mode++;
case 8: mode <<= 3;
break;
default:
return -EINVAL;
}
mode |= 2 << 1; /* bulk, or intr-with-toggle */
/* ep1/ep2 dma direction is chosen early; it works in the other
* direction, with pio. be cautious with out-dma.
*/
ep->is_in = usb_endpoint_dir_in(desc);
if (ep->is_in) {
mode |= 1;
ep->dma = (use_dma != 0) && (ep->num == UDC_MSTRD_ENDPOINT);
} else {
ep->dma = (use_dma == 2) && (ep->num == UDC_MSTWR_ENDPOINT);
if (ep->dma)
DBG(dev, "%s out-dma hides short packets\n",
ep->ep.name);
}
spin_lock_irqsave(&ep->dev->lock, flags);
/* ep1 and ep2 can do double buffering and/or dma */
if (ep->num < 3) {
struct goku_udc_regs __iomem *regs = ep->dev->regs;
u32 tmp;
/* double buffer except (for now) with pio in */
tmp = ((ep->dma || !ep->is_in)
? 0x10 /* double buffered */
: 0x11 /* single buffer */
) << ep->num;
tmp |= readl(®s->EPxSingle);
writel(tmp, ®s->EPxSingle);
tmp = (ep->dma ? 0x10/*dma*/ : 0x11/*pio*/) << ep->num;
tmp |= readl(®s->EPxBCS);
writel(tmp, ®s->EPxBCS);
}
writel(mode, ep->reg_mode);
command(ep->dev->regs, COMMAND_RESET, ep->num);
ep->ep.maxpacket = max;
ep->stopped = 0;
ep->desc = desc;
spin_unlock_irqrestore(&ep->dev->lock, flags);
DBG(dev, "enable %s %s %s maxpacket %u\n", ep->ep.name,
ep->is_in ? "IN" : "OUT",
ep->dma ? "dma" : "pio",
max);
return 0;
}
static void ep_reset(struct goku_udc_regs __iomem *regs, struct goku_ep *ep)
{
struct goku_udc *dev = ep->dev;
if (regs) {
command(regs, COMMAND_INVALID, ep->num);
if (ep->num) {
if (ep->num == UDC_MSTWR_ENDPOINT)
dev->int_enable &= ~(INT_MSTWREND
|INT_MSTWRTMOUT);
else if (ep->num == UDC_MSTRD_ENDPOINT)
dev->int_enable &= ~INT_MSTRDEND;
dev->int_enable &= ~INT_EPxDATASET (ep->num);
} else
dev->int_enable &= ~INT_EP0;
writel(dev->int_enable, ®s->int_enable);
readl(®s->int_enable);
if (ep->num < 3) {
struct goku_udc_regs __iomem *r = ep->dev->regs;
u32 tmp;
tmp = readl(&r->EPxSingle);
tmp &= ~(0x11 << ep->num);
writel(tmp, &r->EPxSingle);
tmp = readl(&r->EPxBCS);
tmp &= ~(0x11 << ep->num);
writel(tmp, &r->EPxBCS);
}
/* reset dma in case we're still using it */
if (ep->dma) {
u32 master;
master = readl(®s->dma_master) & MST_RW_BITS;
if (ep->num == UDC_MSTWR_ENDPOINT) {
master &= ~MST_W_BITS;
master |= MST_WR_RESET;
} else {
master &= ~MST_R_BITS;
master |= MST_RD_RESET;
}
writel(master, ®s->dma_master);
}
}
ep->ep.maxpacket = MAX_FIFO_SIZE;
ep->desc = NULL;
ep->ep.desc = NULL;
ep->stopped = 1;
ep->irqs = 0;
ep->dma = 0;
}
static int goku_ep_disable(struct usb_ep *_ep)
{
struct goku_ep *ep;
struct goku_udc *dev;
unsigned long flags;
ep = container_of(_ep, struct goku_ep, ep);
if (!_ep || !ep->desc)
return -ENODEV;
dev = ep->dev;
if (dev->ep0state == EP0_SUSPEND)
return -EBUSY;
VDBG(dev, "disable %s\n", _ep->name);
spin_lock_irqsave(&dev->lock, flags);
nuke(ep, -ESHUTDOWN);
ep_reset(dev->regs, ep);
spin_unlock_irqrestore(&dev->lock, flags);
return 0;
}
/*-------------------------------------------------------------------------*/
static struct usb_request *
goku_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags)
{
struct goku_request *req;
if (!_ep)
return NULL;
req = kzalloc(sizeof *req, gfp_flags);
if (!req)
return NULL;
req->req.dma = DMA_ADDR_INVALID;
INIT_LIST_HEAD(&req->queue);
return &req->req;
}
static void
goku_free_request(struct usb_ep *_ep, struct usb_request *_req)
{
struct goku_request *req;
if (!_ep || !_req)
return;
req = container_of(_req, struct goku_request, req);
WARN_ON(!list_empty(&req->queue));
kfree(req);
}
/*-------------------------------------------------------------------------*/
static void
done(struct goku_ep *ep, struct goku_request *req, int status)
{
struct goku_udc *dev;
unsigned stopped = ep->stopped;
list_del_init(&req->queue);
if (likely(req->req.status == -EINPROGRESS))
req->req.status = status;
else
status = req->req.status;
dev = ep->dev;
if (ep->dma)
usb_gadget_unmap_request(&dev->gadget, &req->req, ep->is_in);
#ifndef USB_TRACE
if (status && status != -ESHUTDOWN)
#endif
VDBG(dev, "complete %s req %p stat %d len %u/%u\n",
ep->ep.name, &req->req, status,
req->req.actual, req->req.length);
/* don't modify queue heads during completion callback */
ep->stopped = 1;
spin_unlock(&dev->lock);
req->req.complete(&ep->ep, &req->req);
spin_lock(&dev->lock);
ep->stopped = stopped;
}
/*-------------------------------------------------------------------------*/
static inline int
write_packet(u32 __iomem *fifo, u8 *buf, struct goku_request *req, unsigned max)
{
unsigned length, count;
length = min(req->req.length - req->req.actual, max);
req->req.actual += length;
count = length;
while (likely(count--))
writel(*buf++, fifo);
return length;
}
// return: 0 = still running, 1 = completed, negative = errno
static int write_fifo(struct goku_ep *ep, struct goku_request *req)
{
struct goku_udc *dev = ep->dev;
u32 tmp;
u8 *buf;
unsigned count;
int is_last;
tmp = readl(&dev->regs->DataSet);
buf = req->req.buf + req->req.actual;
prefetch(buf);
dev = ep->dev;
if (unlikely(ep->num == 0 && dev->ep0state != EP0_IN))
return -EL2HLT;
/* NOTE: just single-buffered PIO-IN for now. */
if (unlikely((tmp & DATASET_A(ep->num)) != 0))
return 0;
/* clear our "packet available" irq */
if (ep->num != 0)
writel(~INT_EPxDATASET(ep->num), &dev->regs->int_status);
count = write_packet(ep->reg_fifo, buf, req, ep->ep.maxpacket);
/* last packet often short (sometimes a zlp, especially on ep0) */
if (unlikely(count != ep->ep.maxpacket)) {
writel(~(1<<ep->num), &dev->regs->EOP);
if (ep->num == 0) {
dev->ep[0].stopped = 1;
dev->ep0state = EP0_STATUS;
}
is_last = 1;
} else {
if (likely(req->req.length != req->req.actual)
|| req->req.zero)
is_last = 0;
else
is_last = 1;
}
#if 0 /* printk seemed to trash is_last...*/
//#ifdef USB_TRACE
VDBG(dev, "wrote %s %u bytes%s IN %u left %p\n",
ep->ep.name, count, is_last ? "/last" : "",
req->req.length - req->req.actual, req);
#endif
/* requests complete when all IN data is in the FIFO,
* or sometimes later, if a zlp was needed.
*/
if (is_last) {
done(ep, req, 0);
return 1;
}
return 0;
}
static int read_fifo(struct goku_ep *ep, struct goku_request *req)
{
struct goku_udc_regs __iomem *regs;
u32 size, set;
u8 *buf;
unsigned bufferspace, is_short, dbuff;
regs = ep->dev->regs;
top:
buf = req->req.buf + req->req.actual;
prefetchw(buf);
if (unlikely(ep->num == 0 && ep->dev->ep0state != EP0_OUT))
return -EL2HLT;
dbuff = (ep->num == 1 || ep->num == 2);
do {
/* ack dataset irq matching the status we'll handle */
if (ep->num != 0)
writel(~INT_EPxDATASET(ep->num), ®s->int_status);
set = readl(®s->DataSet) & DATASET_AB(ep->num);
size = readl(®s->EPxSizeLA[ep->num]);
bufferspace = req->req.length - req->req.actual;
/* usually do nothing without an OUT packet */
if (likely(ep->num != 0 || bufferspace != 0)) {
if (unlikely(set == 0))
break;
/* use ep1/ep2 double-buffering for OUT */
if (!(size & PACKET_ACTIVE))
size = readl(®s->EPxSizeLB[ep->num]);
if (!(size & PACKET_ACTIVE)) /* "can't happen" */
break;
size &= DATASIZE; /* EPxSizeH == 0 */
/* ep0out no-out-data case for set_config, etc */
} else
size = 0;
/* read all bytes from this packet */
req->req.actual += size;
is_short = (size < ep->ep.maxpacket);
#ifdef USB_TRACE
VDBG(ep->dev, "read %s %u bytes%s OUT req %p %u/%u\n",
ep->ep.name, size, is_short ? "/S" : "",
req, req->req.actual, req->req.length);
#endif
while (likely(size-- != 0)) {
u8 byte = (u8) readl(ep->reg_fifo);
if (unlikely(bufferspace == 0)) {
/* this happens when the driver's buffer
* is smaller than what the host sent.
* discard the extra data in this packet.
*/
if (req->req.status != -EOVERFLOW)
DBG(ep->dev, "%s overflow %u\n",
ep->ep.name, size);
req->req.status = -EOVERFLOW;
} else {
*buf++ = byte;
bufferspace--;
}
}
/* completion */
if (unlikely(is_short || req->req.actual == req->req.length)) {
if (unlikely(ep->num == 0)) {
/* non-control endpoints now usable? */
if (ep->dev->req_config)
writel(ep->dev->configured
? USBSTATE_CONFIGURED
: 0,
®s->UsbState);
/* ep0out status stage */
writel(~(1<<0), ®s->EOP);
ep->stopped = 1;
ep->dev->ep0state = EP0_STATUS;
}
done(ep, req, 0);
/* empty the second buffer asap */
if (dbuff && !list_empty(&ep->queue)) {
req = list_entry(ep->queue.next,
struct goku_request, queue);
goto top;
}
return 1;
}
} while (dbuff);
return 0;
}
static inline void
pio_irq_enable(struct goku_udc *dev,
struct goku_udc_regs __iomem *regs, int epnum)
{
dev->int_enable |= INT_EPxDATASET (epnum);
writel(dev->int_enable, ®s->int_enable);
/* write may still be posted */
}
static inline void
pio_irq_disable(struct goku_udc *dev,
struct goku_udc_regs __iomem *regs, int epnum)
{
dev->int_enable &= ~INT_EPxDATASET (epnum);
writel(dev->int_enable, ®s->int_enable);
/* write may still be posted */
}
static inline void
pio_advance(struct goku_ep *ep)
{
struct goku_request *req;
if (unlikely(list_empty (&ep->queue)))
return;
req = list_entry(ep->queue.next, struct goku_request, queue);
(ep->is_in ? write_fifo : read_fifo)(ep, req);
}
/*-------------------------------------------------------------------------*/
// return: 0 = q running, 1 = q stopped, negative = errno
static int start_dma(struct goku_ep *ep, struct goku_request *req)
{
struct goku_udc_regs __iomem *regs = ep->dev->regs;
u32 master;
u32 start = req->req.dma;
u32 end = start + req->req.length - 1;
master = readl(®s->dma_master) & MST_RW_BITS;
/* re-init the bits affecting IN dma; careful with zlps */
if (likely(ep->is_in)) {
if (unlikely(master & MST_RD_ENA)) {
DBG (ep->dev, "start, IN active dma %03x!!\n",
master);
// return -EL2HLT;
}
writel(end, ®s->in_dma_end);
writel(start, ®s->in_dma_start);
master &= ~MST_R_BITS;
if (unlikely(req->req.length == 0))
master = MST_RD_ENA | MST_RD_EOPB;
else if ((req->req.length % ep->ep.maxpacket) != 0
|| req->req.zero)
master = MST_RD_ENA | MST_EOPB_ENA;
else
master = MST_RD_ENA | MST_EOPB_DIS;
ep->dev->int_enable |= INT_MSTRDEND;
/* Goku DMA-OUT merges short packets, which plays poorly with
* protocols where short packets mark the transfer boundaries.
* The chip supports a nonstandard policy with INT_MSTWRTMOUT,
* ending transfers after 3 SOFs; we don't turn it on.
*/
} else {
if (unlikely(master & MST_WR_ENA)) {
DBG (ep->dev, "start, OUT active dma %03x!!\n",
master);
// return -EL2HLT;
}
writel(end, ®s->out_dma_end);
writel(start, ®s->out_dma_start);
master &= ~MST_W_BITS;
master |= MST_WR_ENA | MST_TIMEOUT_DIS;
ep->dev->int_enable |= INT_MSTWREND|INT_MSTWRTMOUT;
}
writel(master, ®s->dma_master);
writel(ep->dev->int_enable, ®s->int_enable);
return 0;
}
static void dma_advance(struct goku_udc *dev, struct goku_ep *ep)
{
struct goku_request *req;
struct goku_udc_regs __iomem *regs = ep->dev->regs;
u32 master;
master = readl(®s->dma_master);
if (unlikely(list_empty(&ep->queue))) {
stop:
if (ep->is_in)
dev->int_enable &= ~INT_MSTRDEND;
else
dev->int_enable &= ~(INT_MSTWREND|INT_MSTWRTMOUT);
writel(dev->int_enable, ®s->int_enable);
return;
}
req = list_entry(ep->queue.next, struct goku_request, queue);
/* normal hw dma completion (not abort) */
if (likely(ep->is_in)) {
if (unlikely(master & MST_RD_ENA))
return;
req->req.actual = readl(®s->in_dma_current);
} else {
if (unlikely(master & MST_WR_ENA))
return;
/* hardware merges short packets, and also hides packet
* overruns. a partial packet MAY be in the fifo here.
*/
req->req.actual = readl(®s->out_dma_current);
}
req->req.actual -= req->req.dma;
req->req.actual++;
#ifdef USB_TRACE
VDBG(dev, "done %s %s dma, %u/%u bytes, req %p\n",
ep->ep.name, ep->is_in ? "IN" : "OUT",
req->req.actual, req->req.length, req);
#endif
done(ep, req, 0);
if (list_empty(&ep->queue))
goto stop;
req = list_entry(ep->queue.next, struct goku_request, queue);
(void) start_dma(ep, req);
}
static void abort_dma(struct goku_ep *ep, int status)
{
struct goku_udc_regs __iomem *regs = ep->dev->regs;
struct goku_request *req;
u32 curr, master;
/* NAK future host requests, hoping the implicit delay lets the
* dma engine finish reading (or writing) its latest packet and
* empty the dma buffer (up to 16 bytes).
*
* This avoids needing to clean up a partial packet in the fifo;
* we can't do that for IN without side effects to HALT and TOGGLE.
*/
command(regs, COMMAND_FIFO_DISABLE, ep->num);
req = list_entry(ep->queue.next, struct goku_request, queue);
master = readl(®s->dma_master) & MST_RW_BITS;
/* FIXME using these resets isn't usably documented. this may
* not work unless it's followed by disabling the endpoint.
*
* FIXME the OUT reset path doesn't even behave consistently.
*/
if (ep->is_in) {
if (unlikely((readl(®s->dma_master) & MST_RD_ENA) == 0))
goto finished;
curr = readl(®s->in_dma_current);
writel(curr, ®s->in_dma_end);
writel(curr, ®s->in_dma_start);
master &= ~MST_R_BITS;
master |= MST_RD_RESET;
writel(master, ®s->dma_master);
if (readl(®s->dma_master) & MST_RD_ENA)
DBG(ep->dev, "IN dma active after reset!\n");
} else {
if (unlikely((readl(®s->dma_master) & MST_WR_ENA) == 0))
goto finished;
curr = readl(®s->out_dma_current);
writel(curr, ®s->out_dma_end);
writel(curr, ®s->out_dma_start);
master &= ~MST_W_BITS;
master |= MST_WR_RESET;
writel(master, ®s->dma_master);
if (readl(®s->dma_master) & MST_WR_ENA)
DBG(ep->dev, "OUT dma active after reset!\n");
}
req->req.actual = (curr - req->req.dma) + 1;
req->req.status = status;
VDBG(ep->dev, "%s %s %s %d/%d\n", __func__, ep->ep.name,
ep->is_in ? "IN" : "OUT",
req->req.actual, req->req.length);
command(regs, COMMAND_FIFO_ENABLE, ep->num);
return;
finished:
/* dma already completed; no abort needed */
command(regs, COMMAND_FIFO_ENABLE, ep->num);
req->req.actual = req->req.length;
req->req.status = 0;
}
/*-------------------------------------------------------------------------*/
static int
goku_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)
{
struct goku_request *req;
struct goku_ep *ep;
struct goku_udc *dev;
unsigned long flags;
int status;
/* always require a cpu-view buffer so pio works */
req = container_of(_req, struct goku_request, req);
if (unlikely(!_req || !_req->complete
|| !_req->buf || !list_empty(&req->queue)))
return -EINVAL;
ep = container_of(_ep, struct goku_ep, ep);
if (unlikely(!_ep || (!ep->desc && ep->num != 0)))
return -EINVAL;
dev = ep->dev;
if (unlikely(!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN))
return -ESHUTDOWN;
/* can't touch registers when suspended */
if (dev->ep0state == EP0_SUSPEND)
return -EBUSY;
/* set up dma mapping in case the caller didn't */
if (ep->dma) {
status = usb_gadget_map_request(&dev->gadget, &req->req,
ep->is_in);
if (status)
return status;
}
#ifdef USB_TRACE
VDBG(dev, "%s queue req %p, len %u buf %p\n",
_ep->name, _req, _req->length, _req->buf);
#endif
spin_lock_irqsave(&dev->lock, flags);
_req->status = -EINPROGRESS;
_req->actual = 0;
/* for ep0 IN without premature status, zlp is required and
* writing EOP starts the status stage (OUT).
*/
if (unlikely(ep->num == 0 && ep->is_in))
_req->zero = 1;
/* kickstart this i/o queue? */
status = 0;
if (list_empty(&ep->queue) && likely(!ep->stopped)) {
/* dma: done after dma completion IRQ (or error)
* pio: done after last fifo operation
*/
if (ep->dma)
status = start_dma(ep, req);
else
status = (ep->is_in ? write_fifo : read_fifo)(ep, req);
if (unlikely(status != 0)) {
if (status > 0)
status = 0;
req = NULL;
}
} /* else pio or dma irq handler advances the queue. */
if (likely(req != 0))
list_add_tail(&req->queue, &ep->queue);
if (likely(!list_empty(&ep->queue))
&& likely(ep->num != 0)
&& !ep->dma
&& !(dev->int_enable & INT_EPxDATASET (ep->num)))
pio_irq_enable(dev, dev->regs, ep->num);
spin_unlock_irqrestore(&dev->lock, flags);
/* pci writes may still be posted */
return status;
}
/* dequeue ALL requests */
static void nuke(struct goku_ep *ep, int status)
{
struct goku_request *req;
ep->stopped = 1;
if (list_empty(&ep->queue))
return;
if (ep->dma)
abort_dma(ep, status);
while (!list_empty(&ep->queue)) {
req = list_entry(ep->queue.next, struct goku_request, queue);
done(ep, req, status);
}
}
/* dequeue JUST ONE request */
static int goku_dequeue(struct usb_ep *_ep, struct usb_request *_req)
{
struct goku_request *req;
struct goku_ep *ep;
struct goku_udc *dev;
unsigned long flags;
ep = container_of(_ep, struct goku_ep, ep);
if (!_ep || !_req || (!ep->desc && ep->num != 0))
return -EINVAL;
dev = ep->dev;
if (!dev->driver)
return -ESHUTDOWN;
/* we can't touch (dma) registers when suspended */
if (dev->ep0state == EP0_SUSPEND)
return -EBUSY;
VDBG(dev, "%s %s %s %s %p\n", __func__, _ep->name,
ep->is_in ? "IN" : "OUT",
ep->dma ? "dma" : "pio",
_req);
spin_lock_irqsave(&dev->lock, flags);
/* make sure it's actually queued on this endpoint */
list_for_each_entry (req, &ep->queue, queue) {
if (&req->req == _req)
break;
}
if (&req->req != _req) {
spin_unlock_irqrestore (&dev->lock, flags);
return -EINVAL;
}
if (ep->dma && ep->queue.next == &req->queue && !ep->stopped) {
abort_dma(ep, -ECONNRESET);
done(ep, req, -ECONNRESET);
dma_advance(dev, ep);
} else if (!list_empty(&req->queue))
done(ep, req, -ECONNRESET);
else
req = NULL;
spin_unlock_irqrestore(&dev->lock, flags);
return req ? 0 : -EOPNOTSUPP;
}
/*-------------------------------------------------------------------------*/
static void goku_clear_halt(struct goku_ep *ep)
{
// assert (ep->num !=0)
VDBG(ep->dev, "%s clear halt\n", ep->ep.name);
command(ep->dev->regs, COMMAND_SETDATA0, ep->num);
command(ep->dev->regs, COMMAND_STALL_CLEAR, ep->num);
if (ep->stopped) {
ep->stopped = 0;
if (ep->dma) {
struct goku_request *req;
if (list_empty(&ep->queue))
return;
req = list_entry(ep->queue.next, struct goku_request,
queue);
(void) start_dma(ep, req);
} else
pio_advance(ep);
}
}
static int goku_set_halt(struct usb_ep *_ep, int value)
{
struct goku_ep *ep;
unsigned long flags;
int retval = 0;
if (!_ep)
return -ENODEV;
ep = container_of (_ep, struct goku_ep, ep);
if (ep->num == 0) {
if (value) {
ep->dev->ep0state = EP0_STALL;
ep->dev->ep[0].stopped = 1;
} else
return -EINVAL;
/* don't change EPxSTATUS_EP_INVALID to READY */
} else if (!ep->desc) {
DBG(ep->dev, "%s %s inactive?\n", __func__, ep->ep.name);
return -EINVAL;
}
spin_lock_irqsave(&ep->dev->lock, flags);
if (!list_empty(&ep->queue))
retval = -EAGAIN;
else if (ep->is_in && value
/* data in (either) packet buffer? */
&& (readl(&ep->dev->regs->DataSet)
& DATASET_AB(ep->num)))
retval = -EAGAIN;
else if (!value)
goku_clear_halt(ep);
else {
ep->stopped = 1;
VDBG(ep->dev, "%s set halt\n", ep->ep.name);
command(ep->dev->regs, COMMAND_STALL, ep->num);
readl(ep->reg_status);
}
spin_unlock_irqrestore(&ep->dev->lock, flags);
return retval;
}
static int goku_fifo_status(struct usb_ep *_ep)
{
struct goku_ep *ep;
struct goku_udc_regs __iomem *regs;
u32 size;
if (!_ep)
return -ENODEV;
ep = container_of(_ep, struct goku_ep, ep);
/* size is only reported sanely for OUT */
if (ep->is_in)
return -EOPNOTSUPP;
/* ignores 16-byte dma buffer; SizeH == 0 */
regs = ep->dev->regs;
size = readl(®s->EPxSizeLA[ep->num]) & DATASIZE;
size += readl(®s->EPxSizeLB[ep->num]) & DATASIZE;
VDBG(ep->dev, "%s %s %u\n", __func__, ep->ep.name, size);
return size;
}
static void goku_fifo_flush(struct usb_ep *_ep)
{
struct goku_ep *ep;
struct goku_udc_regs __iomem *regs;
u32 size;
if (!_ep)
return;
ep = container_of(_ep, struct goku_ep, ep);
VDBG(ep->dev, "%s %s\n", __func__, ep->ep.name);
/* don't change EPxSTATUS_EP_INVALID to READY */
if (!ep->desc && ep->num != 0) {
DBG(ep->dev, "%s %s inactive?\n", __func__, ep->ep.name);
return;
}
regs = ep->dev->regs;
size = readl(®s->EPxSizeLA[ep->num]);
size &= DATASIZE;
/* Non-desirable behavior: FIFO_CLEAR also clears the
* endpoint halt feature. For OUT, we _could_ just read
* the bytes out (PIO, if !ep->dma); for in, no choice.
*/
if (size)
command(regs, COMMAND_FIFO_CLEAR, ep->num);
}
static struct usb_ep_ops goku_ep_ops = {
.enable = goku_ep_enable,
.disable = goku_ep_disable,
.alloc_request = goku_alloc_request,
.free_request = goku_free_request,
.queue = goku_queue,
.dequeue = goku_dequeue,
.set_halt = goku_set_halt,
.fifo_status = goku_fifo_status,
.fifo_flush = goku_fifo_flush,
};
/*-------------------------------------------------------------------------*/
static int goku_get_frame(struct usb_gadget *_gadget)
{
return -EOPNOTSUPP;
}
static int goku_start(struct usb_gadget_driver *driver,
int (*bind)(struct usb_gadget *));
static int goku_stop(struct usb_gadget_driver *driver);
static const struct usb_gadget_ops goku_ops = {
.get_frame = goku_get_frame,
.start = goku_start,
.stop = goku_stop,
// no remote wakeup
// not selfpowered
};
/*-------------------------------------------------------------------------*/
static inline char *dmastr(void)
{
if (use_dma == 0)
return "(dma disabled)";
else if (use_dma == 2)
return "(dma IN and OUT)";
else
return "(dma IN)";
}
#ifdef CONFIG_USB_GADGET_DEBUG_FILES
static const char proc_node_name [] = "driver/udc";
#define FOURBITS "%s%s%s%s"
#define EIGHTBITS FOURBITS FOURBITS
static void
dump_intmask(const char *label, u32 mask, char **next, unsigned *size)
{
int t;
/* int_status is the same format ... */
t = scnprintf(*next, *size,
"%s %05X =" FOURBITS EIGHTBITS EIGHTBITS "\n",
label, mask,
(mask & INT_PWRDETECT) ? " power" : "",
(mask & INT_SYSERROR) ? " sys" : "",
(mask & INT_MSTRDEND) ? " in-dma" : "",
(mask & INT_MSTWRTMOUT) ? " wrtmo" : "",
(mask & INT_MSTWREND) ? " out-dma" : "",
(mask & INT_MSTWRSET) ? " wrset" : "",
(mask & INT_ERR) ? " err" : "",
(mask & INT_SOF) ? " sof" : "",
(mask & INT_EP3NAK) ? " ep3nak" : "",
(mask & INT_EP2NAK) ? " ep2nak" : "",
(mask & INT_EP1NAK) ? " ep1nak" : "",
(mask & INT_EP3DATASET) ? " ep3" : "",
(mask & INT_EP2DATASET) ? " ep2" : "",
(mask & INT_EP1DATASET) ? " ep1" : "",
(mask & INT_STATUSNAK) ? " ep0snak" : "",
(mask & INT_STATUS) ? " ep0status" : "",
(mask & INT_SETUP) ? " setup" : "",
(mask & INT_ENDPOINT0) ? " ep0" : "",
(mask & INT_USBRESET) ? " reset" : "",
(mask & INT_SUSPEND) ? " suspend" : "");
*size -= t;
*next += t;
}
static int
udc_proc_read(char *buffer, char **start, off_t off, int count,
int *eof, void *_dev)
{
char *buf = buffer;
struct goku_udc *dev = _dev;
struct goku_udc_regs __iomem *regs = dev->regs;
char *next = buf;
unsigned size = count;
unsigned long flags;
int i, t, is_usb_connected;
u32 tmp;
if (off != 0)
return 0;
local_irq_save(flags);
/* basic device status */
tmp = readl(®s->power_detect);
is_usb_connected = tmp & PW_DETECT;
t = scnprintf(next, size,
"%s - %s\n"
"%s version: %s %s\n"
"Gadget driver: %s\n"
"Host %s, %s\n"
"\n",
pci_name(dev->pdev), driver_desc,
driver_name, DRIVER_VERSION, dmastr(),
dev->driver ? dev->driver->driver.name : "(none)",
is_usb_connected
? ((tmp & PW_PULLUP) ? "full speed" : "powered")
: "disconnected",
({char *state;
switch(dev->ep0state){
case EP0_DISCONNECT: state = "ep0_disconnect"; break;
case EP0_IDLE: state = "ep0_idle"; break;
case EP0_IN: state = "ep0_in"; break;
case EP0_OUT: state = "ep0_out"; break;
case EP0_STATUS: state = "ep0_status"; break;
case EP0_STALL: state = "ep0_stall"; break;
case EP0_SUSPEND: state = "ep0_suspend"; break;
default: state = "ep0_?"; break;
} state; })
);
size -= t;
next += t;
dump_intmask("int_status", readl(®s->int_status), &next, &size);
dump_intmask("int_enable", readl(®s->int_enable), &next, &size);
if (!is_usb_connected || !dev->driver || (tmp & PW_PULLUP) == 0)
goto done;
/* registers for (active) device and ep0 */
t = scnprintf(next, size, "\nirqs %lu\ndataset %02x "
"single.bcs %02x.%02x state %x addr %u\n",
dev->irqs, readl(®s->DataSet),
readl(®s->EPxSingle), readl(®s->EPxBCS),
readl(®s->UsbState),
readl(®s->address));
size -= t;
next += t;
tmp = readl(®s->dma_master);
t = scnprintf(next, size,
"dma %03X =" EIGHTBITS "%s %s\n", tmp,
(tmp & MST_EOPB_DIS) ? " eopb-" : "",
(tmp & MST_EOPB_ENA) ? " eopb+" : "",
(tmp & MST_TIMEOUT_DIS) ? " tmo-" : "",
(tmp & MST_TIMEOUT_ENA) ? " tmo+" : "",
(tmp & MST_RD_EOPB) ? " eopb" : "",
(tmp & MST_RD_RESET) ? " in_reset" : "",
(tmp & MST_WR_RESET) ? " out_reset" : "",
(tmp & MST_RD_ENA) ? " IN" : "",
(tmp & MST_WR_ENA) ? " OUT" : "",
(tmp & MST_CONNECTION)
? "ep1in/ep2out"
: "ep1out/ep2in");
size -= t;
next += t;
/* dump endpoint queues */
for (i = 0; i < 4; i++) {
struct goku_ep *ep = &dev->ep [i];
struct goku_request *req;
if (i && !ep->desc)
continue;
tmp = readl(ep->reg_status);
t = scnprintf(next, size,
"%s %s max %u %s, irqs %lu, "
"status %02x (%s) " FOURBITS "\n",
ep->ep.name,
ep->is_in ? "in" : "out",
ep->ep.maxpacket,
ep->dma ? "dma" : "pio",
ep->irqs,
tmp, ({ char *s;
switch (tmp & EPxSTATUS_EP_MASK) {
case EPxSTATUS_EP_READY:
s = "ready"; break;
case EPxSTATUS_EP_DATAIN:
s = "packet"; break;
case EPxSTATUS_EP_FULL:
s = "full"; break;
case EPxSTATUS_EP_TX_ERR: // host will retry
s = "tx_err"; break;
case EPxSTATUS_EP_RX_ERR:
s = "rx_err"; break;
case EPxSTATUS_EP_BUSY: /* ep0 only */
s = "busy"; break;
case EPxSTATUS_EP_STALL:
s = "stall"; break;
case EPxSTATUS_EP_INVALID: // these "can't happen"
s = "invalid"; break;
default:
s = "?"; break;
}; s; }),
(tmp & EPxSTATUS_TOGGLE) ? "data1" : "data0",
(tmp & EPxSTATUS_SUSPEND) ? " suspend" : "",
(tmp & EPxSTATUS_FIFO_DISABLE) ? " disable" : "",
(tmp & EPxSTATUS_STAGE_ERROR) ? " ep0stat" : ""
);
if (t <= 0 || t > size)
goto done;
size -= t;
next += t;
if (list_empty(&ep->queue)) {
t = scnprintf(next, size, "\t(nothing queued)\n");
if (t <= 0 || t > size)
goto done;
size -= t;
next += t;
continue;
}
list_for_each_entry(req, &ep->queue, queue) {
if (ep->dma && req->queue.prev == &ep->queue) {
if (i == UDC_MSTRD_ENDPOINT)
tmp = readl(®s->in_dma_current);
else
tmp = readl(®s->out_dma_current);
tmp -= req->req.dma;
tmp++;
} else
tmp = req->req.actual;
t = scnprintf(next, size,
"\treq %p len %u/%u buf %p\n",
&req->req, tmp, req->req.length,
req->req.buf);
if (t <= 0 || t > size)
goto done;
size -= t;
next += t;
}
}
done:
local_irq_restore(flags);
*eof = 1;
return count - size;
}
#endif /* CONFIG_USB_GADGET_DEBUG_FILES */
/*-------------------------------------------------------------------------*/
static void udc_reinit (struct goku_udc *dev)
{
static char *names [] = { "ep0", "ep1-bulk", "ep2-bulk", "ep3-bulk" };
unsigned i;
INIT_LIST_HEAD (&dev->gadget.ep_list);
dev->gadget.ep0 = &dev->ep [0].ep;
dev->gadget.speed = USB_SPEED_UNKNOWN;
dev->ep0state = EP0_DISCONNECT;
dev->irqs = 0;
for (i = 0; i < 4; i++) {
struct goku_ep *ep = &dev->ep[i];
ep->num = i;
ep->ep.name = names[i];
ep->reg_fifo = &dev->regs->ep_fifo [i];
ep->reg_status = &dev->regs->ep_status [i];
ep->reg_mode = &dev->regs->ep_mode[i];
ep->ep.ops = &goku_ep_ops;
list_add_tail (&ep->ep.ep_list, &dev->gadget.ep_list);
ep->dev = dev;
INIT_LIST_HEAD (&ep->queue);
ep_reset(NULL, ep);
}
dev->ep[0].reg_mode = NULL;
dev->ep[0].ep.maxpacket = MAX_EP0_SIZE;
list_del_init (&dev->ep[0].ep.ep_list);
}
static void udc_reset(struct goku_udc *dev)
{
struct goku_udc_regs __iomem *regs = dev->regs;
writel(0, ®s->power_detect);
writel(0, ®s->int_enable);
readl(®s->int_enable);
dev->int_enable = 0;
/* deassert reset, leave USB D+ at hi-Z (no pullup)
* don't let INT_PWRDETECT sequence begin
*/
udelay(250);
writel(PW_RESETB, ®s->power_detect);
readl(®s->int_enable);
}
static void ep0_start(struct goku_udc *dev)
{
struct goku_udc_regs __iomem *regs = dev->regs;
unsigned i;
VDBG(dev, "%s\n", __func__);
udc_reset(dev);
udc_reinit (dev);
//writel(MST_EOPB_ENA | MST_TIMEOUT_ENA, ®s->dma_master);
/* hw handles set_address, set_feature, get_status; maybe more */
writel( G_REQMODE_SET_INTF | G_REQMODE_GET_INTF
| G_REQMODE_SET_CONF | G_REQMODE_GET_CONF
| G_REQMODE_GET_DESC
| G_REQMODE_CLEAR_FEAT
, ®s->reqmode);
for (i = 0; i < 4; i++)
dev->ep[i].irqs = 0;
/* can't modify descriptors after writing UsbReady */
for (i = 0; i < DESC_LEN; i++)
writel(0, ®s->descriptors[i]);
writel(0, ®s->UsbReady);
/* expect ep0 requests when the host drops reset */
writel(PW_RESETB | PW_PULLUP, ®s->power_detect);
dev->int_enable = INT_DEVWIDE | INT_EP0;
writel(dev->int_enable, &dev->regs->int_enable);
readl(®s->int_enable);
dev->gadget.speed = USB_SPEED_FULL;
dev->ep0state = EP0_IDLE;
}
static void udc_enable(struct goku_udc *dev)
{
/* start enumeration now, or after power detect irq */
if (readl(&dev->regs->power_detect) & PW_DETECT)
ep0_start(dev);
else {
DBG(dev, "%s\n", __func__);
dev->int_enable = INT_PWRDETECT;
writel(dev->int_enable, &dev->regs->int_enable);
}
}
/*-------------------------------------------------------------------------*/
/* keeping it simple:
* - one bus driver, initted first;
* - one function driver, initted second
*/
static struct goku_udc *the_controller;
/* when a driver is successfully registered, it will receive
* control requests including set_configuration(), which enables
* non-control requests. then usb traffic follows until a
* disconnect is reported. then a host may connect again, or
* the driver might get unbound.
*/
static int goku_start(struct usb_gadget_driver *driver,
int (*bind)(struct usb_gadget *))
{
struct goku_udc *dev = the_controller;
int retval;
if (!driver
|| driver->max_speed < USB_SPEED_FULL
|| !bind
|| !driver->disconnect
|| !driver->setup)
return -EINVAL;
if (!dev)
return -ENODEV;
if (dev->driver)
return -EBUSY;
/* hook up the driver */
driver->driver.bus = NULL;
dev->driver = driver;
dev->gadget.dev.driver = &driver->driver;
retval = bind(&dev->gadget);
if (retval) {
DBG(dev, "bind to driver %s --> error %d\n",
driver->driver.name, retval);
dev->driver = NULL;
dev->gadget.dev.driver = NULL;
return retval;
}
/* then enable host detection and ep0; and we're ready
* for set_configuration as well as eventual disconnect.
*/
udc_enable(dev);
DBG(dev, "registered gadget driver '%s'\n", driver->driver.name);
return 0;
}
static void
stop_activity(struct goku_udc *dev, struct usb_gadget_driver *driver)
{
unsigned i;
DBG (dev, "%s\n", __func__);
if (dev->gadget.speed == USB_SPEED_UNKNOWN)
driver = NULL;
/* disconnect gadget driver after quiesceing hw and the driver */
udc_reset (dev);
for (i = 0; i < 4; i++)
nuke(&dev->ep [i], -ESHUTDOWN);
if (driver) {
spin_unlock(&dev->lock);
driver->disconnect(&dev->gadget);
spin_lock(&dev->lock);
}
if (dev->driver)
udc_enable(dev);
}
static int goku_stop(struct usb_gadget_driver *driver)
{
struct goku_udc *dev = the_controller;
unsigned long flags;
if (!dev)
return -ENODEV;
if (!driver || driver != dev->driver || !driver->unbind)
return -EINVAL;
spin_lock_irqsave(&dev->lock, flags);
dev->driver = NULL;
stop_activity(dev, driver);
spin_unlock_irqrestore(&dev->lock, flags);
driver->unbind(&dev->gadget);
dev->gadget.dev.driver = NULL;
DBG(dev, "unregistered driver '%s'\n", driver->driver.name);
return 0;
}
/*-------------------------------------------------------------------------*/
static void ep0_setup(struct goku_udc *dev)
{
struct goku_udc_regs __iomem *regs = dev->regs;
struct usb_ctrlrequest ctrl;
int tmp;
/* read SETUP packet and enter DATA stage */
ctrl.bRequestType = readl(®s->bRequestType);
ctrl.bRequest = readl(®s->bRequest);
ctrl.wValue = cpu_to_le16((readl(®s->wValueH) << 8)
| readl(®s->wValueL));
ctrl.wIndex = cpu_to_le16((readl(®s->wIndexH) << 8)
| readl(®s->wIndexL));
ctrl.wLength = cpu_to_le16((readl(®s->wLengthH) << 8)
| readl(®s->wLengthL));
writel(0, ®s->SetupRecv);
nuke(&dev->ep[0], 0);
dev->ep[0].stopped = 0;
if (likely(ctrl.bRequestType & USB_DIR_IN)) {
dev->ep[0].is_in = 1;
dev->ep0state = EP0_IN;
/* detect early status stages */
writel(ICONTROL_STATUSNAK, &dev->regs->IntControl);
} else {
dev->ep[0].is_in = 0;
dev->ep0state = EP0_OUT;
/* NOTE: CLEAR_FEATURE is done in software so that we can
* synchronize transfer restarts after bulk IN stalls. data
* won't even enter the fifo until the halt is cleared.
*/
switch (ctrl.bRequest) {
case USB_REQ_CLEAR_FEATURE:
switch (ctrl.bRequestType) {
case USB_RECIP_ENDPOINT:
tmp = le16_to_cpu(ctrl.wIndex) & 0x0f;
/* active endpoint */
if (tmp > 3 || (!dev->ep[tmp].desc && tmp != 0))
goto stall;
if (ctrl.wIndex & cpu_to_le16(
USB_DIR_IN)) {
if (!dev->ep[tmp].is_in)
goto stall;
} else {
if (dev->ep[tmp].is_in)
goto stall;
}
if (ctrl.wValue != cpu_to_le16(
USB_ENDPOINT_HALT))
goto stall;
if (tmp)
goku_clear_halt(&dev->ep[tmp]);
succeed:
/* start ep0out status stage */
writel(~(1<<0), ®s->EOP);
dev->ep[0].stopped = 1;
dev->ep0state = EP0_STATUS;
return;
case USB_RECIP_DEVICE:
/* device remote wakeup: always clear */
if (ctrl.wValue != cpu_to_le16(1))
goto stall;
VDBG(dev, "clear dev remote wakeup\n");
goto succeed;
case USB_RECIP_INTERFACE:
goto stall;
default: /* pass to gadget driver */
break;
}
break;
default:
break;
}
}
#ifdef USB_TRACE
VDBG(dev, "SETUP %02x.%02x v%04x i%04x l%04x\n",
ctrl.bRequestType, ctrl.bRequest,
le16_to_cpu(ctrl.wValue), le16_to_cpu(ctrl.wIndex),
le16_to_cpu(ctrl.wLength));
#endif
/* hw wants to know when we're configured (or not) */
dev->req_config = (ctrl.bRequest == USB_REQ_SET_CONFIGURATION
&& ctrl.bRequestType == USB_RECIP_DEVICE);
if (unlikely(dev->req_config))
dev->configured = (ctrl.wValue != cpu_to_le16(0));
/* delegate everything to the gadget driver.
* it may respond after this irq handler returns.
*/
spin_unlock (&dev->lock);
tmp = dev->driver->setup(&dev->gadget, &ctrl);
spin_lock (&dev->lock);
if (unlikely(tmp < 0)) {
stall:
#ifdef USB_TRACE
VDBG(dev, "req %02x.%02x protocol STALL; err %d\n",
ctrl.bRequestType, ctrl.bRequest, tmp);
#endif
command(regs, COMMAND_STALL, 0);
dev->ep[0].stopped = 1;
dev->ep0state = EP0_STALL;
}
/* expect at least one data or status stage irq */
}
#define ACK(irqbit) { \
stat &= ~irqbit; \
writel(~irqbit, ®s->int_status); \
handled = 1; \
}
static irqreturn_t goku_irq(int irq, void *_dev)
{
struct goku_udc *dev = _dev;
struct goku_udc_regs __iomem *regs = dev->regs;
struct goku_ep *ep;
u32 stat, handled = 0;
unsigned i, rescans = 5;
spin_lock(&dev->lock);
rescan:
stat = readl(®s->int_status) & dev->int_enable;
if (!stat)
goto done;
dev->irqs++;
/* device-wide irqs */
if (unlikely(stat & INT_DEVWIDE)) {
if (stat & INT_SYSERROR) {
ERROR(dev, "system error\n");
stop_activity(dev, dev->driver);
stat = 0;
handled = 1;
// FIXME have a neater way to prevent re-enumeration
dev->driver = NULL;
goto done;
}
if (stat & INT_PWRDETECT) {
writel(~stat, ®s->int_status);
if (readl(&dev->regs->power_detect) & PW_DETECT) {
VDBG(dev, "connect\n");
ep0_start(dev);
} else {
DBG(dev, "disconnect\n");
if (dev->gadget.speed == USB_SPEED_FULL)
stop_activity(dev, dev->driver);
dev->ep0state = EP0_DISCONNECT;
dev->int_enable = INT_DEVWIDE;
writel(dev->int_enable, &dev->regs->int_enable);
}
stat = 0;
handled = 1;
goto done;
}
if (stat & INT_SUSPEND) {
ACK(INT_SUSPEND);
if (readl(®s->ep_status[0]) & EPxSTATUS_SUSPEND) {
switch (dev->ep0state) {
case EP0_DISCONNECT:
case EP0_SUSPEND:
goto pm_next;
default:
break;
}
DBG(dev, "USB suspend\n");
dev->ep0state = EP0_SUSPEND;
if (dev->gadget.speed != USB_SPEED_UNKNOWN
&& dev->driver
&& dev->driver->suspend) {
spin_unlock(&dev->lock);
dev->driver->suspend(&dev->gadget);
spin_lock(&dev->lock);
}
} else {
if (dev->ep0state != EP0_SUSPEND) {
DBG(dev, "bogus USB resume %d\n",
dev->ep0state);
goto pm_next;
}
DBG(dev, "USB resume\n");
dev->ep0state = EP0_IDLE;
if (dev->gadget.speed != USB_SPEED_UNKNOWN
&& dev->driver
&& dev->driver->resume) {
spin_unlock(&dev->lock);
dev->driver->resume(&dev->gadget);
spin_lock(&dev->lock);
}
}
}
pm_next:
if (stat & INT_USBRESET) { /* hub reset done */
ACK(INT_USBRESET);
INFO(dev, "USB reset done, gadget %s\n",
dev->driver->driver.name);
}
// and INT_ERR on some endpoint's crc/bitstuff/... problem
}
/* progress ep0 setup, data, or status stages.
* no transition {EP0_STATUS, EP0_STALL} --> EP0_IDLE; saves irqs
*/
if (stat & INT_SETUP) {
ACK(INT_SETUP);
dev->ep[0].irqs++;
ep0_setup(dev);
}
if (stat & INT_STATUSNAK) {
ACK(INT_STATUSNAK|INT_ENDPOINT0);
if (dev->ep0state == EP0_IN) {
ep = &dev->ep[0];
ep->irqs++;
nuke(ep, 0);
writel(~(1<<0), ®s->EOP);
dev->ep0state = EP0_STATUS;
}
}
if (stat & INT_ENDPOINT0) {
ACK(INT_ENDPOINT0);
ep = &dev->ep[0];
ep->irqs++;
pio_advance(ep);
}
/* dma completion */
if (stat & INT_MSTRDEND) { /* IN */
ACK(INT_MSTRDEND);
ep = &dev->ep[UDC_MSTRD_ENDPOINT];
ep->irqs++;
dma_advance(dev, ep);
}
if (stat & INT_MSTWREND) { /* OUT */
ACK(INT_MSTWREND);
ep = &dev->ep[UDC_MSTWR_ENDPOINT];
ep->irqs++;
dma_advance(dev, ep);
}
if (stat & INT_MSTWRTMOUT) { /* OUT */
ACK(INT_MSTWRTMOUT);
ep = &dev->ep[UDC_MSTWR_ENDPOINT];
ep->irqs++;
ERROR(dev, "%s write timeout ?\n", ep->ep.name);
// reset dma? then dma_advance()
}
/* pio */
for (i = 1; i < 4; i++) {
u32 tmp = INT_EPxDATASET(i);
if (!(stat & tmp))
continue;
ep = &dev->ep[i];
pio_advance(ep);
if (list_empty (&ep->queue))
pio_irq_disable(dev, regs, i);
stat &= ~tmp;
handled = 1;
ep->irqs++;
}
if (rescans--)
goto rescan;
done:
(void)readl(®s->int_enable);
spin_unlock(&dev->lock);
if (stat)
DBG(dev, "unhandled irq status: %05x (%05x, %05x)\n", stat,
readl(®s->int_status), dev->int_enable);
return IRQ_RETVAL(handled);
}
#undef ACK
/*-------------------------------------------------------------------------*/
static void gadget_release(struct device *_dev)
{
struct goku_udc *dev = dev_get_drvdata(_dev);
kfree(dev);
}
/* tear down the binding between this driver and the pci device */
static void goku_remove(struct pci_dev *pdev)
{
struct goku_udc *dev = pci_get_drvdata(pdev);
DBG(dev, "%s\n", __func__);
usb_del_gadget_udc(&dev->gadget);
BUG_ON(dev->driver);
#ifdef CONFIG_USB_GADGET_DEBUG_FILES
remove_proc_entry(proc_node_name, NULL);
#endif
if (dev->regs)
udc_reset(dev);
if (dev->got_irq)
free_irq(pdev->irq, dev);
if (dev->regs)
iounmap(dev->regs);
if (dev->got_region)
release_mem_region(pci_resource_start (pdev, 0),
pci_resource_len (pdev, 0));
if (dev->enabled)
pci_disable_device(pdev);
if (dev->registered)
device_unregister(&dev->gadget.dev);
pci_set_drvdata(pdev, NULL);
dev->regs = NULL;
the_controller = NULL;
INFO(dev, "unbind\n");
}
/* wrap this driver around the specified pci device, but
* don't respond over USB until a gadget driver binds to us.
*/
static int goku_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct goku_udc *dev = NULL;
unsigned long resource, len;
void __iomem *base = NULL;
int retval;
/* if you want to support more than one controller in a system,
* usb_gadget_driver_{register,unregister}() must change.
*/
if (the_controller) {
pr_warning("ignoring %s\n", pci_name(pdev));
return -EBUSY;
}
if (!pdev->irq) {
printk(KERN_ERR "Check PCI %s IRQ setup!\n", pci_name(pdev));
retval = -ENODEV;
goto err;
}
/* alloc, and start init */
dev = kzalloc (sizeof *dev, GFP_KERNEL);
if (dev == NULL){
pr_debug("enomem %s\n", pci_name(pdev));
retval = -ENOMEM;
goto err;
}
spin_lock_init(&dev->lock);
dev->pdev = pdev;
dev->gadget.ops = &goku_ops;
dev->gadget.max_speed = USB_SPEED_FULL;
/* the "gadget" abstracts/virtualizes the controller */
dev_set_name(&dev->gadget.dev, "gadget");
dev->gadget.dev.parent = &pdev->dev;
dev->gadget.dev.dma_mask = pdev->dev.dma_mask;
dev->gadget.dev.release = gadget_release;
dev->gadget.name = driver_name;
/* now all the pci goodies ... */
retval = pci_enable_device(pdev);
if (retval < 0) {
DBG(dev, "can't enable, %d\n", retval);
goto err;
}
dev->enabled = 1;
resource = pci_resource_start(pdev, 0);
len = pci_resource_len(pdev, 0);
if (!request_mem_region(resource, len, driver_name)) {
DBG(dev, "controller already in use\n");
retval = -EBUSY;
goto err;
}
dev->got_region = 1;
base = ioremap_nocache(resource, len);
if (base == NULL) {
DBG(dev, "can't map memory\n");
retval = -EFAULT;
goto err;
}
dev->regs = (struct goku_udc_regs __iomem *) base;
pci_set_drvdata(pdev, dev);
INFO(dev, "%s\n", driver_desc);
INFO(dev, "version: " DRIVER_VERSION " %s\n", dmastr());
INFO(dev, "irq %d, pci mem %p\n", pdev->irq, base);
/* init to known state, then setup irqs */
udc_reset(dev);
udc_reinit (dev);
if (request_irq(pdev->irq, goku_irq, IRQF_SHARED/*|IRQF_SAMPLE_RANDOM*/,
driver_name, dev) != 0) {
DBG(dev, "request interrupt %d failed\n", pdev->irq);
retval = -EBUSY;
goto err;
}
dev->got_irq = 1;
if (use_dma)
pci_set_master(pdev);
#ifdef CONFIG_USB_GADGET_DEBUG_FILES
create_proc_read_entry(proc_node_name, 0, NULL, udc_proc_read, dev);
#endif
the_controller = dev;
retval = device_register(&dev->gadget.dev);
if (retval) {
put_device(&dev->gadget.dev);
goto err;
}
dev->registered = 1;
retval = usb_add_gadget_udc(&pdev->dev, &dev->gadget);
if (retval)
goto err;
return 0;
err:
if (dev)
goku_remove (pdev);
return retval;
}
/*-------------------------------------------------------------------------*/
static const struct pci_device_id pci_ids[] = { {
.class = ((PCI_CLASS_SERIAL_USB << 8) | 0xfe),
.class_mask = ~0,
.vendor = 0x102f, /* Toshiba */
.device = 0x0107, /* this UDC */
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
}, { /* end: all zeroes */ }
};
MODULE_DEVICE_TABLE (pci, pci_ids);
static struct pci_driver goku_pci_driver = {
.name = (char *) driver_name,
.id_table = pci_ids,
.probe = goku_probe,
.remove = goku_remove,
/* FIXME add power management support */
};
static int __init init (void)
{
return pci_register_driver (&goku_pci_driver);
}
module_init (init);
static void __exit cleanup (void)
{
pci_unregister_driver (&goku_pci_driver);
}
module_exit (cleanup);
| gpl-2.0 |
gdetal/kernel_msm_mptcp | arch/avr32/kernel/setup.c | 7323 | 15307 | /*
* Copyright (C) 2004-2006 Atmel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/clk.h>
#include <linux/init.h>
#include <linux/initrd.h>
#include <linux/sched.h>
#include <linux/console.h>
#include <linux/ioport.h>
#include <linux/bootmem.h>
#include <linux/fs.h>
#include <linux/module.h>
#include <linux/pfn.h>
#include <linux/root_dev.h>
#include <linux/cpu.h>
#include <linux/kernel.h>
#include <asm/sections.h>
#include <asm/processor.h>
#include <asm/pgtable.h>
#include <asm/setup.h>
#include <asm/sysreg.h>
#include <mach/board.h>
#include <mach/init.h>
extern int root_mountflags;
/*
* Initialize loops_per_jiffy as 5000000 (500MIPS).
* Better make it too large than too small...
*/
struct avr32_cpuinfo boot_cpu_data = {
.loops_per_jiffy = 5000000
};
EXPORT_SYMBOL(boot_cpu_data);
static char __initdata command_line[COMMAND_LINE_SIZE];
/*
* Standard memory resources
*/
static struct resource __initdata kernel_data = {
.name = "Kernel data",
.start = 0,
.end = 0,
.flags = IORESOURCE_MEM,
};
static struct resource __initdata kernel_code = {
.name = "Kernel code",
.start = 0,
.end = 0,
.flags = IORESOURCE_MEM,
.sibling = &kernel_data,
};
/*
* Available system RAM and reserved regions as singly linked
* lists. These lists are traversed using the sibling pointer in
* struct resource and are kept sorted at all times.
*/
static struct resource *__initdata system_ram;
static struct resource *__initdata reserved = &kernel_code;
/*
* We need to allocate these before the bootmem allocator is up and
* running, so we need this "cache". 32 entries are probably enough
* for all but the most insanely complex systems.
*/
static struct resource __initdata res_cache[32];
static unsigned int __initdata res_cache_next_free;
static void __init resource_init(void)
{
struct resource *mem, *res;
struct resource *new;
kernel_code.start = __pa(init_mm.start_code);
for (mem = system_ram; mem; mem = mem->sibling) {
new = alloc_bootmem_low(sizeof(struct resource));
memcpy(new, mem, sizeof(struct resource));
new->sibling = NULL;
if (request_resource(&iomem_resource, new))
printk(KERN_WARNING "Bad RAM resource %08x-%08x\n",
mem->start, mem->end);
}
for (res = reserved; res; res = res->sibling) {
new = alloc_bootmem_low(sizeof(struct resource));
memcpy(new, res, sizeof(struct resource));
new->sibling = NULL;
if (insert_resource(&iomem_resource, new))
printk(KERN_WARNING
"Bad reserved resource %s (%08x-%08x)\n",
res->name, res->start, res->end);
}
}
static void __init
add_physical_memory(resource_size_t start, resource_size_t end)
{
struct resource *new, *next, **pprev;
for (pprev = &system_ram, next = system_ram; next;
pprev = &next->sibling, next = next->sibling) {
if (end < next->start)
break;
if (start <= next->end) {
printk(KERN_WARNING
"Warning: Physical memory map is broken\n");
printk(KERN_WARNING
"Warning: %08x-%08x overlaps %08x-%08x\n",
start, end, next->start, next->end);
return;
}
}
if (res_cache_next_free >= ARRAY_SIZE(res_cache)) {
printk(KERN_WARNING
"Warning: Failed to add physical memory %08x-%08x\n",
start, end);
return;
}
new = &res_cache[res_cache_next_free++];
new->start = start;
new->end = end;
new->name = "System RAM";
new->flags = IORESOURCE_MEM;
*pprev = new;
}
static int __init
add_reserved_region(resource_size_t start, resource_size_t end,
const char *name)
{
struct resource *new, *next, **pprev;
if (end < start)
return -EINVAL;
if (res_cache_next_free >= ARRAY_SIZE(res_cache))
return -ENOMEM;
for (pprev = &reserved, next = reserved; next;
pprev = &next->sibling, next = next->sibling) {
if (end < next->start)
break;
if (start <= next->end)
return -EBUSY;
}
new = &res_cache[res_cache_next_free++];
new->start = start;
new->end = end;
new->name = name;
new->sibling = next;
new->flags = IORESOURCE_MEM;
*pprev = new;
return 0;
}
static unsigned long __init
find_free_region(const struct resource *mem, resource_size_t size,
resource_size_t align)
{
struct resource *res;
unsigned long target;
target = ALIGN(mem->start, align);
for (res = reserved; res; res = res->sibling) {
if ((target + size) <= res->start)
break;
if (target <= res->end)
target = ALIGN(res->end + 1, align);
}
if ((target + size) > (mem->end + 1))
return mem->end + 1;
return target;
}
static int __init
alloc_reserved_region(resource_size_t *start, resource_size_t size,
resource_size_t align, const char *name)
{
struct resource *mem;
resource_size_t target;
int ret;
for (mem = system_ram; mem; mem = mem->sibling) {
target = find_free_region(mem, size, align);
if (target <= mem->end) {
ret = add_reserved_region(target, target + size - 1,
name);
if (!ret)
*start = target;
return ret;
}
}
return -ENOMEM;
}
/*
* Early framebuffer allocation. Works as follows:
* - If fbmem_size is zero, nothing will be allocated or reserved.
* - If fbmem_start is zero when setup_bootmem() is called,
* a block of fbmem_size bytes will be reserved before bootmem
* initialization. It will be aligned to the largest page size
* that fbmem_size is a multiple of.
* - If fbmem_start is nonzero, an area of size fbmem_size will be
* reserved at the physical address fbmem_start if possible. If
* it collides with other reserved memory, a different block of
* same size will be allocated, just as if fbmem_start was zero.
*
* Board-specific code may use these variables to set up platform data
* for the framebuffer driver if fbmem_size is nonzero.
*/
resource_size_t __initdata fbmem_start;
resource_size_t __initdata fbmem_size;
/*
* "fbmem=xxx[kKmM]" allocates the specified amount of boot memory for
* use as framebuffer.
*
* "fbmem=xxx[kKmM]@yyy[kKmM]" defines a memory region of size xxx and
* starting at yyy to be reserved for use as framebuffer.
*
* The kernel won't verify that the memory region starting at yyy
* actually contains usable RAM.
*/
static int __init early_parse_fbmem(char *p)
{
int ret;
unsigned long align;
fbmem_size = memparse(p, &p);
if (*p == '@') {
fbmem_start = memparse(p + 1, &p);
ret = add_reserved_region(fbmem_start,
fbmem_start + fbmem_size - 1,
"Framebuffer");
if (ret) {
printk(KERN_WARNING
"Failed to reserve framebuffer memory\n");
fbmem_start = 0;
}
}
if (!fbmem_start) {
if ((fbmem_size & 0x000fffffUL) == 0)
align = 0x100000; /* 1 MiB */
else if ((fbmem_size & 0x0000ffffUL) == 0)
align = 0x10000; /* 64 KiB */
else
align = 0x1000; /* 4 KiB */
ret = alloc_reserved_region(&fbmem_start, fbmem_size,
align, "Framebuffer");
if (ret) {
printk(KERN_WARNING
"Failed to allocate framebuffer memory\n");
fbmem_size = 0;
} else {
memset(__va(fbmem_start), 0, fbmem_size);
}
}
return 0;
}
early_param("fbmem", early_parse_fbmem);
/*
* Pick out the memory size. We look for mem=size@start,
* where start and size are "size[KkMmGg]"
*/
static int __init early_mem(char *p)
{
resource_size_t size, start;
start = system_ram->start;
size = memparse(p, &p);
if (*p == '@')
start = memparse(p + 1, &p);
system_ram->start = start;
system_ram->end = system_ram->start + size - 1;
return 0;
}
early_param("mem", early_mem);
static int __init parse_tag_core(struct tag *tag)
{
if (tag->hdr.size > 2) {
if ((tag->u.core.flags & 1) == 0)
root_mountflags &= ~MS_RDONLY;
ROOT_DEV = new_decode_dev(tag->u.core.rootdev);
}
return 0;
}
__tagtable(ATAG_CORE, parse_tag_core);
static int __init parse_tag_mem(struct tag *tag)
{
unsigned long start, end;
/*
* Ignore zero-sized entries. If we're running standalone, the
* SDRAM code may emit such entries if something goes
* wrong...
*/
if (tag->u.mem_range.size == 0)
return 0;
start = tag->u.mem_range.addr;
end = tag->u.mem_range.addr + tag->u.mem_range.size - 1;
add_physical_memory(start, end);
return 0;
}
__tagtable(ATAG_MEM, parse_tag_mem);
static int __init parse_tag_rdimg(struct tag *tag)
{
#ifdef CONFIG_BLK_DEV_INITRD
struct tag_mem_range *mem = &tag->u.mem_range;
int ret;
if (initrd_start) {
printk(KERN_WARNING
"Warning: Only the first initrd image will be used\n");
return 0;
}
ret = add_reserved_region(mem->addr, mem->addr + mem->size - 1,
"initrd");
if (ret) {
printk(KERN_WARNING
"Warning: Failed to reserve initrd memory\n");
return ret;
}
initrd_start = (unsigned long)__va(mem->addr);
initrd_end = initrd_start + mem->size;
#else
printk(KERN_WARNING "RAM disk image present, but "
"no initrd support in kernel, ignoring\n");
#endif
return 0;
}
__tagtable(ATAG_RDIMG, parse_tag_rdimg);
static int __init parse_tag_rsvd_mem(struct tag *tag)
{
struct tag_mem_range *mem = &tag->u.mem_range;
return add_reserved_region(mem->addr, mem->addr + mem->size - 1,
"Reserved");
}
__tagtable(ATAG_RSVD_MEM, parse_tag_rsvd_mem);
static int __init parse_tag_cmdline(struct tag *tag)
{
strlcpy(boot_command_line, tag->u.cmdline.cmdline, COMMAND_LINE_SIZE);
return 0;
}
__tagtable(ATAG_CMDLINE, parse_tag_cmdline);
static int __init parse_tag_clock(struct tag *tag)
{
/*
* We'll figure out the clocks by peeking at the system
* manager regs directly.
*/
return 0;
}
__tagtable(ATAG_CLOCK, parse_tag_clock);
/*
* The board_number correspond to the bd->bi_board_number in U-Boot. This
* parameter is only available during initialisation and can be used in some
* kind of board identification.
*/
u32 __initdata board_number;
static int __init parse_tag_boardinfo(struct tag *tag)
{
board_number = tag->u.boardinfo.board_number;
return 0;
}
__tagtable(ATAG_BOARDINFO, parse_tag_boardinfo);
/*
* Scan the tag table for this tag, and call its parse function. The
* tag table is built by the linker from all the __tagtable
* declarations.
*/
static int __init parse_tag(struct tag *tag)
{
extern struct tagtable __tagtable_begin, __tagtable_end;
struct tagtable *t;
for (t = &__tagtable_begin; t < &__tagtable_end; t++)
if (tag->hdr.tag == t->tag) {
t->parse(tag);
break;
}
return t < &__tagtable_end;
}
/*
* Parse all tags in the list we got from the boot loader
*/
static void __init parse_tags(struct tag *t)
{
for (; t->hdr.tag != ATAG_NONE; t = tag_next(t))
if (!parse_tag(t))
printk(KERN_WARNING
"Ignoring unrecognised tag 0x%08x\n",
t->hdr.tag);
}
/*
* Find a free memory region large enough for storing the
* bootmem bitmap.
*/
static unsigned long __init
find_bootmap_pfn(const struct resource *mem)
{
unsigned long bootmap_pages, bootmap_len;
unsigned long node_pages = PFN_UP(resource_size(mem));
unsigned long bootmap_start;
bootmap_pages = bootmem_bootmap_pages(node_pages);
bootmap_len = bootmap_pages << PAGE_SHIFT;
/*
* Find a large enough region without reserved pages for
* storing the bootmem bitmap. We can take advantage of the
* fact that all lists have been sorted.
*
* We have to check that we don't collide with any reserved
* regions, which includes the kernel image and any RAMDISK
* images.
*/
bootmap_start = find_free_region(mem, bootmap_len, PAGE_SIZE);
return bootmap_start >> PAGE_SHIFT;
}
#define MAX_LOWMEM HIGHMEM_START
#define MAX_LOWMEM_PFN PFN_DOWN(MAX_LOWMEM)
static void __init setup_bootmem(void)
{
unsigned bootmap_size;
unsigned long first_pfn, bootmap_pfn, pages;
unsigned long max_pfn, max_low_pfn;
unsigned node = 0;
struct resource *res;
printk(KERN_INFO "Physical memory:\n");
for (res = system_ram; res; res = res->sibling)
printk(" %08x-%08x\n", res->start, res->end);
printk(KERN_INFO "Reserved memory:\n");
for (res = reserved; res; res = res->sibling)
printk(" %08x-%08x: %s\n",
res->start, res->end, res->name);
nodes_clear(node_online_map);
if (system_ram->sibling)
printk(KERN_WARNING "Only using first memory bank\n");
for (res = system_ram; res; res = NULL) {
first_pfn = PFN_UP(res->start);
max_low_pfn = max_pfn = PFN_DOWN(res->end + 1);
bootmap_pfn = find_bootmap_pfn(res);
if (bootmap_pfn > max_pfn)
panic("No space for bootmem bitmap!\n");
if (max_low_pfn > MAX_LOWMEM_PFN) {
max_low_pfn = MAX_LOWMEM_PFN;
#ifndef CONFIG_HIGHMEM
/*
* Lowmem is memory that can be addressed
* directly through P1/P2
*/
printk(KERN_WARNING
"Node %u: Only %ld MiB of memory will be used.\n",
node, MAX_LOWMEM >> 20);
printk(KERN_WARNING "Use a HIGHMEM enabled kernel.\n");
#else
#error HIGHMEM is not supported by AVR32 yet
#endif
}
/* Initialize the boot-time allocator with low memory only. */
bootmap_size = init_bootmem_node(NODE_DATA(node), bootmap_pfn,
first_pfn, max_low_pfn);
/*
* Register fully available RAM pages with the bootmem
* allocator.
*/
pages = max_low_pfn - first_pfn;
free_bootmem_node (NODE_DATA(node), PFN_PHYS(first_pfn),
PFN_PHYS(pages));
/* Reserve space for the bootmem bitmap... */
reserve_bootmem_node(NODE_DATA(node),
PFN_PHYS(bootmap_pfn),
bootmap_size,
BOOTMEM_DEFAULT);
/* ...and any other reserved regions. */
for (res = reserved; res; res = res->sibling) {
if (res->start > PFN_PHYS(max_pfn))
break;
/*
* resource_init will complain about partial
* overlaps, so we'll just ignore such
* resources for now.
*/
if (res->start >= PFN_PHYS(first_pfn)
&& res->end < PFN_PHYS(max_pfn))
reserve_bootmem_node(NODE_DATA(node),
res->start,
resource_size(res),
BOOTMEM_DEFAULT);
}
node_set_online(node);
}
}
void __init setup_arch (char **cmdline_p)
{
struct clk *cpu_clk;
init_mm.start_code = (unsigned long)_text;
init_mm.end_code = (unsigned long)_etext;
init_mm.end_data = (unsigned long)_edata;
init_mm.brk = (unsigned long)_end;
/*
* Include .init section to make allocations easier. It will
* be removed before the resource is actually requested.
*/
kernel_code.start = __pa(__init_begin);
kernel_code.end = __pa(init_mm.end_code - 1);
kernel_data.start = __pa(init_mm.end_code);
kernel_data.end = __pa(init_mm.brk - 1);
parse_tags(bootloader_tags);
setup_processor();
setup_platform();
setup_board();
cpu_clk = clk_get(NULL, "cpu");
if (IS_ERR(cpu_clk)) {
printk(KERN_WARNING "Warning: Unable to get CPU clock\n");
} else {
unsigned long cpu_hz = clk_get_rate(cpu_clk);
/*
* Well, duh, but it's probably a good idea to
* increment the use count.
*/
clk_enable(cpu_clk);
boot_cpu_data.clk = cpu_clk;
boot_cpu_data.loops_per_jiffy = cpu_hz * 4;
printk("CPU: Running at %lu.%03lu MHz\n",
((cpu_hz + 500) / 1000) / 1000,
((cpu_hz + 500) / 1000) % 1000);
}
strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE);
*cmdline_p = command_line;
parse_early_param();
setup_bootmem();
#ifdef CONFIG_VT
conswitchp = &dummy_con;
#endif
paging_init();
resource_init();
}
| gpl-2.0 |
cameron581/android_kernel_lge_msm8974 | arch/avr32/kernel/setup.c | 7323 | 15307 | /*
* Copyright (C) 2004-2006 Atmel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/clk.h>
#include <linux/init.h>
#include <linux/initrd.h>
#include <linux/sched.h>
#include <linux/console.h>
#include <linux/ioport.h>
#include <linux/bootmem.h>
#include <linux/fs.h>
#include <linux/module.h>
#include <linux/pfn.h>
#include <linux/root_dev.h>
#include <linux/cpu.h>
#include <linux/kernel.h>
#include <asm/sections.h>
#include <asm/processor.h>
#include <asm/pgtable.h>
#include <asm/setup.h>
#include <asm/sysreg.h>
#include <mach/board.h>
#include <mach/init.h>
extern int root_mountflags;
/*
* Initialize loops_per_jiffy as 5000000 (500MIPS).
* Better make it too large than too small...
*/
struct avr32_cpuinfo boot_cpu_data = {
.loops_per_jiffy = 5000000
};
EXPORT_SYMBOL(boot_cpu_data);
static char __initdata command_line[COMMAND_LINE_SIZE];
/*
* Standard memory resources
*/
static struct resource __initdata kernel_data = {
.name = "Kernel data",
.start = 0,
.end = 0,
.flags = IORESOURCE_MEM,
};
static struct resource __initdata kernel_code = {
.name = "Kernel code",
.start = 0,
.end = 0,
.flags = IORESOURCE_MEM,
.sibling = &kernel_data,
};
/*
* Available system RAM and reserved regions as singly linked
* lists. These lists are traversed using the sibling pointer in
* struct resource and are kept sorted at all times.
*/
static struct resource *__initdata system_ram;
static struct resource *__initdata reserved = &kernel_code;
/*
* We need to allocate these before the bootmem allocator is up and
* running, so we need this "cache". 32 entries are probably enough
* for all but the most insanely complex systems.
*/
static struct resource __initdata res_cache[32];
static unsigned int __initdata res_cache_next_free;
static void __init resource_init(void)
{
struct resource *mem, *res;
struct resource *new;
kernel_code.start = __pa(init_mm.start_code);
for (mem = system_ram; mem; mem = mem->sibling) {
new = alloc_bootmem_low(sizeof(struct resource));
memcpy(new, mem, sizeof(struct resource));
new->sibling = NULL;
if (request_resource(&iomem_resource, new))
printk(KERN_WARNING "Bad RAM resource %08x-%08x\n",
mem->start, mem->end);
}
for (res = reserved; res; res = res->sibling) {
new = alloc_bootmem_low(sizeof(struct resource));
memcpy(new, res, sizeof(struct resource));
new->sibling = NULL;
if (insert_resource(&iomem_resource, new))
printk(KERN_WARNING
"Bad reserved resource %s (%08x-%08x)\n",
res->name, res->start, res->end);
}
}
static void __init
add_physical_memory(resource_size_t start, resource_size_t end)
{
struct resource *new, *next, **pprev;
for (pprev = &system_ram, next = system_ram; next;
pprev = &next->sibling, next = next->sibling) {
if (end < next->start)
break;
if (start <= next->end) {
printk(KERN_WARNING
"Warning: Physical memory map is broken\n");
printk(KERN_WARNING
"Warning: %08x-%08x overlaps %08x-%08x\n",
start, end, next->start, next->end);
return;
}
}
if (res_cache_next_free >= ARRAY_SIZE(res_cache)) {
printk(KERN_WARNING
"Warning: Failed to add physical memory %08x-%08x\n",
start, end);
return;
}
new = &res_cache[res_cache_next_free++];
new->start = start;
new->end = end;
new->name = "System RAM";
new->flags = IORESOURCE_MEM;
*pprev = new;
}
static int __init
add_reserved_region(resource_size_t start, resource_size_t end,
const char *name)
{
struct resource *new, *next, **pprev;
if (end < start)
return -EINVAL;
if (res_cache_next_free >= ARRAY_SIZE(res_cache))
return -ENOMEM;
for (pprev = &reserved, next = reserved; next;
pprev = &next->sibling, next = next->sibling) {
if (end < next->start)
break;
if (start <= next->end)
return -EBUSY;
}
new = &res_cache[res_cache_next_free++];
new->start = start;
new->end = end;
new->name = name;
new->sibling = next;
new->flags = IORESOURCE_MEM;
*pprev = new;
return 0;
}
static unsigned long __init
find_free_region(const struct resource *mem, resource_size_t size,
resource_size_t align)
{
struct resource *res;
unsigned long target;
target = ALIGN(mem->start, align);
for (res = reserved; res; res = res->sibling) {
if ((target + size) <= res->start)
break;
if (target <= res->end)
target = ALIGN(res->end + 1, align);
}
if ((target + size) > (mem->end + 1))
return mem->end + 1;
return target;
}
static int __init
alloc_reserved_region(resource_size_t *start, resource_size_t size,
resource_size_t align, const char *name)
{
struct resource *mem;
resource_size_t target;
int ret;
for (mem = system_ram; mem; mem = mem->sibling) {
target = find_free_region(mem, size, align);
if (target <= mem->end) {
ret = add_reserved_region(target, target + size - 1,
name);
if (!ret)
*start = target;
return ret;
}
}
return -ENOMEM;
}
/*
* Early framebuffer allocation. Works as follows:
* - If fbmem_size is zero, nothing will be allocated or reserved.
* - If fbmem_start is zero when setup_bootmem() is called,
* a block of fbmem_size bytes will be reserved before bootmem
* initialization. It will be aligned to the largest page size
* that fbmem_size is a multiple of.
* - If fbmem_start is nonzero, an area of size fbmem_size will be
* reserved at the physical address fbmem_start if possible. If
* it collides with other reserved memory, a different block of
* same size will be allocated, just as if fbmem_start was zero.
*
* Board-specific code may use these variables to set up platform data
* for the framebuffer driver if fbmem_size is nonzero.
*/
resource_size_t __initdata fbmem_start;
resource_size_t __initdata fbmem_size;
/*
* "fbmem=xxx[kKmM]" allocates the specified amount of boot memory for
* use as framebuffer.
*
* "fbmem=xxx[kKmM]@yyy[kKmM]" defines a memory region of size xxx and
* starting at yyy to be reserved for use as framebuffer.
*
* The kernel won't verify that the memory region starting at yyy
* actually contains usable RAM.
*/
static int __init early_parse_fbmem(char *p)
{
int ret;
unsigned long align;
fbmem_size = memparse(p, &p);
if (*p == '@') {
fbmem_start = memparse(p + 1, &p);
ret = add_reserved_region(fbmem_start,
fbmem_start + fbmem_size - 1,
"Framebuffer");
if (ret) {
printk(KERN_WARNING
"Failed to reserve framebuffer memory\n");
fbmem_start = 0;
}
}
if (!fbmem_start) {
if ((fbmem_size & 0x000fffffUL) == 0)
align = 0x100000; /* 1 MiB */
else if ((fbmem_size & 0x0000ffffUL) == 0)
align = 0x10000; /* 64 KiB */
else
align = 0x1000; /* 4 KiB */
ret = alloc_reserved_region(&fbmem_start, fbmem_size,
align, "Framebuffer");
if (ret) {
printk(KERN_WARNING
"Failed to allocate framebuffer memory\n");
fbmem_size = 0;
} else {
memset(__va(fbmem_start), 0, fbmem_size);
}
}
return 0;
}
early_param("fbmem", early_parse_fbmem);
/*
* Pick out the memory size. We look for mem=size@start,
* where start and size are "size[KkMmGg]"
*/
static int __init early_mem(char *p)
{
resource_size_t size, start;
start = system_ram->start;
size = memparse(p, &p);
if (*p == '@')
start = memparse(p + 1, &p);
system_ram->start = start;
system_ram->end = system_ram->start + size - 1;
return 0;
}
early_param("mem", early_mem);
static int __init parse_tag_core(struct tag *tag)
{
if (tag->hdr.size > 2) {
if ((tag->u.core.flags & 1) == 0)
root_mountflags &= ~MS_RDONLY;
ROOT_DEV = new_decode_dev(tag->u.core.rootdev);
}
return 0;
}
__tagtable(ATAG_CORE, parse_tag_core);
static int __init parse_tag_mem(struct tag *tag)
{
unsigned long start, end;
/*
* Ignore zero-sized entries. If we're running standalone, the
* SDRAM code may emit such entries if something goes
* wrong...
*/
if (tag->u.mem_range.size == 0)
return 0;
start = tag->u.mem_range.addr;
end = tag->u.mem_range.addr + tag->u.mem_range.size - 1;
add_physical_memory(start, end);
return 0;
}
__tagtable(ATAG_MEM, parse_tag_mem);
static int __init parse_tag_rdimg(struct tag *tag)
{
#ifdef CONFIG_BLK_DEV_INITRD
struct tag_mem_range *mem = &tag->u.mem_range;
int ret;
if (initrd_start) {
printk(KERN_WARNING
"Warning: Only the first initrd image will be used\n");
return 0;
}
ret = add_reserved_region(mem->addr, mem->addr + mem->size - 1,
"initrd");
if (ret) {
printk(KERN_WARNING
"Warning: Failed to reserve initrd memory\n");
return ret;
}
initrd_start = (unsigned long)__va(mem->addr);
initrd_end = initrd_start + mem->size;
#else
printk(KERN_WARNING "RAM disk image present, but "
"no initrd support in kernel, ignoring\n");
#endif
return 0;
}
__tagtable(ATAG_RDIMG, parse_tag_rdimg);
static int __init parse_tag_rsvd_mem(struct tag *tag)
{
struct tag_mem_range *mem = &tag->u.mem_range;
return add_reserved_region(mem->addr, mem->addr + mem->size - 1,
"Reserved");
}
__tagtable(ATAG_RSVD_MEM, parse_tag_rsvd_mem);
static int __init parse_tag_cmdline(struct tag *tag)
{
strlcpy(boot_command_line, tag->u.cmdline.cmdline, COMMAND_LINE_SIZE);
return 0;
}
__tagtable(ATAG_CMDLINE, parse_tag_cmdline);
static int __init parse_tag_clock(struct tag *tag)
{
/*
* We'll figure out the clocks by peeking at the system
* manager regs directly.
*/
return 0;
}
__tagtable(ATAG_CLOCK, parse_tag_clock);
/*
* The board_number correspond to the bd->bi_board_number in U-Boot. This
* parameter is only available during initialisation and can be used in some
* kind of board identification.
*/
u32 __initdata board_number;
static int __init parse_tag_boardinfo(struct tag *tag)
{
board_number = tag->u.boardinfo.board_number;
return 0;
}
__tagtable(ATAG_BOARDINFO, parse_tag_boardinfo);
/*
* Scan the tag table for this tag, and call its parse function. The
* tag table is built by the linker from all the __tagtable
* declarations.
*/
static int __init parse_tag(struct tag *tag)
{
extern struct tagtable __tagtable_begin, __tagtable_end;
struct tagtable *t;
for (t = &__tagtable_begin; t < &__tagtable_end; t++)
if (tag->hdr.tag == t->tag) {
t->parse(tag);
break;
}
return t < &__tagtable_end;
}
/*
* Parse all tags in the list we got from the boot loader
*/
static void __init parse_tags(struct tag *t)
{
for (; t->hdr.tag != ATAG_NONE; t = tag_next(t))
if (!parse_tag(t))
printk(KERN_WARNING
"Ignoring unrecognised tag 0x%08x\n",
t->hdr.tag);
}
/*
* Find a free memory region large enough for storing the
* bootmem bitmap.
*/
static unsigned long __init
find_bootmap_pfn(const struct resource *mem)
{
unsigned long bootmap_pages, bootmap_len;
unsigned long node_pages = PFN_UP(resource_size(mem));
unsigned long bootmap_start;
bootmap_pages = bootmem_bootmap_pages(node_pages);
bootmap_len = bootmap_pages << PAGE_SHIFT;
/*
* Find a large enough region without reserved pages for
* storing the bootmem bitmap. We can take advantage of the
* fact that all lists have been sorted.
*
* We have to check that we don't collide with any reserved
* regions, which includes the kernel image and any RAMDISK
* images.
*/
bootmap_start = find_free_region(mem, bootmap_len, PAGE_SIZE);
return bootmap_start >> PAGE_SHIFT;
}
#define MAX_LOWMEM HIGHMEM_START
#define MAX_LOWMEM_PFN PFN_DOWN(MAX_LOWMEM)
static void __init setup_bootmem(void)
{
unsigned bootmap_size;
unsigned long first_pfn, bootmap_pfn, pages;
unsigned long max_pfn, max_low_pfn;
unsigned node = 0;
struct resource *res;
printk(KERN_INFO "Physical memory:\n");
for (res = system_ram; res; res = res->sibling)
printk(" %08x-%08x\n", res->start, res->end);
printk(KERN_INFO "Reserved memory:\n");
for (res = reserved; res; res = res->sibling)
printk(" %08x-%08x: %s\n",
res->start, res->end, res->name);
nodes_clear(node_online_map);
if (system_ram->sibling)
printk(KERN_WARNING "Only using first memory bank\n");
for (res = system_ram; res; res = NULL) {
first_pfn = PFN_UP(res->start);
max_low_pfn = max_pfn = PFN_DOWN(res->end + 1);
bootmap_pfn = find_bootmap_pfn(res);
if (bootmap_pfn > max_pfn)
panic("No space for bootmem bitmap!\n");
if (max_low_pfn > MAX_LOWMEM_PFN) {
max_low_pfn = MAX_LOWMEM_PFN;
#ifndef CONFIG_HIGHMEM
/*
* Lowmem is memory that can be addressed
* directly through P1/P2
*/
printk(KERN_WARNING
"Node %u: Only %ld MiB of memory will be used.\n",
node, MAX_LOWMEM >> 20);
printk(KERN_WARNING "Use a HIGHMEM enabled kernel.\n");
#else
#error HIGHMEM is not supported by AVR32 yet
#endif
}
/* Initialize the boot-time allocator with low memory only. */
bootmap_size = init_bootmem_node(NODE_DATA(node), bootmap_pfn,
first_pfn, max_low_pfn);
/*
* Register fully available RAM pages with the bootmem
* allocator.
*/
pages = max_low_pfn - first_pfn;
free_bootmem_node (NODE_DATA(node), PFN_PHYS(first_pfn),
PFN_PHYS(pages));
/* Reserve space for the bootmem bitmap... */
reserve_bootmem_node(NODE_DATA(node),
PFN_PHYS(bootmap_pfn),
bootmap_size,
BOOTMEM_DEFAULT);
/* ...and any other reserved regions. */
for (res = reserved; res; res = res->sibling) {
if (res->start > PFN_PHYS(max_pfn))
break;
/*
* resource_init will complain about partial
* overlaps, so we'll just ignore such
* resources for now.
*/
if (res->start >= PFN_PHYS(first_pfn)
&& res->end < PFN_PHYS(max_pfn))
reserve_bootmem_node(NODE_DATA(node),
res->start,
resource_size(res),
BOOTMEM_DEFAULT);
}
node_set_online(node);
}
}
void __init setup_arch (char **cmdline_p)
{
struct clk *cpu_clk;
init_mm.start_code = (unsigned long)_text;
init_mm.end_code = (unsigned long)_etext;
init_mm.end_data = (unsigned long)_edata;
init_mm.brk = (unsigned long)_end;
/*
* Include .init section to make allocations easier. It will
* be removed before the resource is actually requested.
*/
kernel_code.start = __pa(__init_begin);
kernel_code.end = __pa(init_mm.end_code - 1);
kernel_data.start = __pa(init_mm.end_code);
kernel_data.end = __pa(init_mm.brk - 1);
parse_tags(bootloader_tags);
setup_processor();
setup_platform();
setup_board();
cpu_clk = clk_get(NULL, "cpu");
if (IS_ERR(cpu_clk)) {
printk(KERN_WARNING "Warning: Unable to get CPU clock\n");
} else {
unsigned long cpu_hz = clk_get_rate(cpu_clk);
/*
* Well, duh, but it's probably a good idea to
* increment the use count.
*/
clk_enable(cpu_clk);
boot_cpu_data.clk = cpu_clk;
boot_cpu_data.loops_per_jiffy = cpu_hz * 4;
printk("CPU: Running at %lu.%03lu MHz\n",
((cpu_hz + 500) / 1000) / 1000,
((cpu_hz + 500) / 1000) % 1000);
}
strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE);
*cmdline_p = command_line;
parse_early_param();
setup_bootmem();
#ifdef CONFIG_VT
conswitchp = &dummy_con;
#endif
paging_init();
resource_init();
}
| gpl-2.0 |
civato/CivZ-KatTurbine-SM-N9005-900T | arch/x86/um/elfcore.c | 11419 | 1947 | #include <linux/elf.h>
#include <linux/coredump.h>
#include <linux/fs.h>
#include <linux/mm.h>
#include <asm/elf.h>
Elf32_Half elf_core_extra_phdrs(void)
{
return vsyscall_ehdr ? (((struct elfhdr *)vsyscall_ehdr)->e_phnum) : 0;
}
int elf_core_write_extra_phdrs(struct file *file, loff_t offset, size_t *size,
unsigned long limit)
{
if ( vsyscall_ehdr ) {
const struct elfhdr *const ehdrp =
(struct elfhdr *) vsyscall_ehdr;
const struct elf_phdr *const phdrp =
(const struct elf_phdr *) (vsyscall_ehdr + ehdrp->e_phoff);
int i;
Elf32_Off ofs = 0;
for (i = 0; i < ehdrp->e_phnum; ++i) {
struct elf_phdr phdr = phdrp[i];
if (phdr.p_type == PT_LOAD) {
ofs = phdr.p_offset = offset;
offset += phdr.p_filesz;
} else {
phdr.p_offset += ofs;
}
phdr.p_paddr = 0; /* match other core phdrs */
*size += sizeof(phdr);
if (*size > limit
|| !dump_write(file, &phdr, sizeof(phdr)))
return 0;
}
}
return 1;
}
int elf_core_write_extra_data(struct file *file, size_t *size,
unsigned long limit)
{
if ( vsyscall_ehdr ) {
const struct elfhdr *const ehdrp =
(struct elfhdr *) vsyscall_ehdr;
const struct elf_phdr *const phdrp =
(const struct elf_phdr *) (vsyscall_ehdr + ehdrp->e_phoff);
int i;
for (i = 0; i < ehdrp->e_phnum; ++i) {
if (phdrp[i].p_type == PT_LOAD) {
void *addr = (void *) phdrp[i].p_vaddr;
size_t filesz = phdrp[i].p_filesz;
*size += filesz;
if (*size > limit
|| !dump_write(file, addr, filesz))
return 0;
}
}
}
return 1;
}
size_t elf_core_extra_data_size(void)
{
if ( vsyscall_ehdr ) {
const struct elfhdr *const ehdrp =
(struct elfhdr *)vsyscall_ehdr;
const struct elf_phdr *const phdrp =
(const struct elf_phdr *) (vsyscall_ehdr + ehdrp->e_phoff);
int i;
for (i = 0; i < ehdrp->e_phnum; ++i)
if (phdrp[i].p_type == PT_LOAD)
return (size_t) phdrp[i].p_filesz;
}
return 0;
}
| gpl-2.0 |
wan-qy/linux | arch/parisc/lib/io.c | 13979 | 9956 | /*
* arch/parisc/lib/io.c
*
* Copyright (c) Matthew Wilcox 2001 for Hewlett-Packard
* Copyright (c) Randolph Chung 2001 <tausq@debian.org>
*
* IO accessing functions which shouldn't be inlined because they're too big
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <asm/io.h>
/* Copies a block of memory to a device in an efficient manner.
* Assumes the device can cope with 32-bit transfers. If it can't,
* don't use this function.
*/
void memcpy_toio(volatile void __iomem *dst, const void *src, int count)
{
if (((unsigned long)dst & 3) != ((unsigned long)src & 3))
goto bytecopy;
while ((unsigned long)dst & 3) {
writeb(*(char *)src, dst++);
src++;
count--;
}
while (count > 3) {
__raw_writel(*(u32 *)src, dst);
src += 4;
dst += 4;
count -= 4;
}
bytecopy:
while (count--) {
writeb(*(char *)src, dst++);
src++;
}
}
/*
** Copies a block of memory from a device in an efficient manner.
** Assumes the device can cope with 32-bit transfers. If it can't,
** don't use this function.
**
** CR16 counts on C3000 reading 256 bytes from Symbios 896 RAM:
** 27341/64 = 427 cyc per int
** 61311/128 = 478 cyc per short
** 122637/256 = 479 cyc per byte
** Ergo bus latencies dominant (not transfer size).
** Minimize total number of transfers at cost of CPU cycles.
** TODO: only look at src alignment and adjust the stores to dest.
*/
void memcpy_fromio(void *dst, const volatile void __iomem *src, int count)
{
/* first compare alignment of src/dst */
if ( (((unsigned long)dst ^ (unsigned long)src) & 1) || (count < 2) )
goto bytecopy;
if ( (((unsigned long)dst ^ (unsigned long)src) & 2) || (count < 4) )
goto shortcopy;
/* Then check for misaligned start address */
if ((unsigned long)src & 1) {
*(u8 *)dst = readb(src);
src++;
dst++;
count--;
if (count < 2) goto bytecopy;
}
if ((unsigned long)src & 2) {
*(u16 *)dst = __raw_readw(src);
src += 2;
dst += 2;
count -= 2;
}
while (count > 3) {
*(u32 *)dst = __raw_readl(src);
dst += 4;
src += 4;
count -= 4;
}
shortcopy:
while (count > 1) {
*(u16 *)dst = __raw_readw(src);
src += 2;
dst += 2;
count -= 2;
}
bytecopy:
while (count--) {
*(char *)dst = readb(src);
src++;
dst++;
}
}
/* Sets a block of memory on a device to a given value.
* Assumes the device can cope with 32-bit transfers. If it can't,
* don't use this function.
*/
void memset_io(volatile void __iomem *addr, unsigned char val, int count)
{
u32 val32 = (val << 24) | (val << 16) | (val << 8) | val;
while ((unsigned long)addr & 3) {
writeb(val, addr++);
count--;
}
while (count > 3) {
__raw_writel(val32, addr);
addr += 4;
count -= 4;
}
while (count--) {
writeb(val, addr++);
}
}
/*
* Read COUNT 8-bit bytes from port PORT into memory starting at
* SRC.
*/
void insb (unsigned long port, void *dst, unsigned long count)
{
unsigned char *p;
p = (unsigned char *)dst;
while (((unsigned long)p) & 0x3) {
if (!count)
return;
count--;
*p = inb(port);
p++;
}
while (count >= 4) {
unsigned int w;
count -= 4;
w = inb(port) << 24;
w |= inb(port) << 16;
w |= inb(port) << 8;
w |= inb(port);
*(unsigned int *) p = w;
p += 4;
}
while (count) {
--count;
*p = inb(port);
p++;
}
}
/*
* Read COUNT 16-bit words from port PORT into memory starting at
* SRC. SRC must be at least short aligned. This is used by the
* IDE driver to read disk sectors. Performance is important, but
* the interfaces seems to be slow: just using the inlined version
* of the inw() breaks things.
*/
void insw (unsigned long port, void *dst, unsigned long count)
{
unsigned int l = 0, l2;
unsigned char *p;
p = (unsigned char *)dst;
if (!count)
return;
switch (((unsigned long)p) & 0x3)
{
case 0x00: /* Buffer 32-bit aligned */
while (count>=2) {
count -= 2;
l = cpu_to_le16(inw(port)) << 16;
l |= cpu_to_le16(inw(port));
*(unsigned int *)p = l;
p += 4;
}
if (count) {
*(unsigned short *)p = cpu_to_le16(inw(port));
}
break;
case 0x02: /* Buffer 16-bit aligned */
*(unsigned short *)p = cpu_to_le16(inw(port));
p += 2;
count--;
while (count>=2) {
count -= 2;
l = cpu_to_le16(inw(port)) << 16;
l |= cpu_to_le16(inw(port));
*(unsigned int *)p = l;
p += 4;
}
if (count) {
*(unsigned short *)p = cpu_to_le16(inw(port));
}
break;
case 0x01: /* Buffer 8-bit aligned */
case 0x03:
/* I don't bother with 32bit transfers
* in this case, 16bit will have to do -- DE */
--count;
l = cpu_to_le16(inw(port));
*p = l >> 8;
p++;
while (count--)
{
l2 = cpu_to_le16(inw(port));
*(unsigned short *)p = (l & 0xff) << 8 | (l2 >> 8);
p += 2;
l = l2;
}
*p = l & 0xff;
break;
}
}
/*
* Read COUNT 32-bit words from port PORT into memory starting at
* SRC. Now works with any alignment in SRC. Performance is important,
* but the interfaces seems to be slow: just using the inlined version
* of the inl() breaks things.
*/
void insl (unsigned long port, void *dst, unsigned long count)
{
unsigned int l = 0, l2;
unsigned char *p;
p = (unsigned char *)dst;
if (!count)
return;
switch (((unsigned long) dst) & 0x3)
{
case 0x00: /* Buffer 32-bit aligned */
while (count--)
{
*(unsigned int *)p = cpu_to_le32(inl(port));
p += 4;
}
break;
case 0x02: /* Buffer 16-bit aligned */
--count;
l = cpu_to_le32(inl(port));
*(unsigned short *)p = l >> 16;
p += 2;
while (count--)
{
l2 = cpu_to_le32(inl(port));
*(unsigned int *)p = (l & 0xffff) << 16 | (l2 >> 16);
p += 4;
l = l2;
}
*(unsigned short *)p = l & 0xffff;
break;
case 0x01: /* Buffer 8-bit aligned */
--count;
l = cpu_to_le32(inl(port));
*(unsigned char *)p = l >> 24;
p++;
*(unsigned short *)p = (l >> 8) & 0xffff;
p += 2;
while (count--)
{
l2 = cpu_to_le32(inl(port));
*(unsigned int *)p = (l & 0xff) << 24 | (l2 >> 8);
p += 4;
l = l2;
}
*p = l & 0xff;
break;
case 0x03: /* Buffer 8-bit aligned */
--count;
l = cpu_to_le32(inl(port));
*p = l >> 24;
p++;
while (count--)
{
l2 = cpu_to_le32(inl(port));
*(unsigned int *)p = (l & 0xffffff) << 8 | l2 >> 24;
p += 4;
l = l2;
}
*(unsigned short *)p = (l >> 8) & 0xffff;
p += 2;
*p = l & 0xff;
break;
}
}
/*
* Like insb but in the opposite direction.
* Don't worry as much about doing aligned memory transfers:
* doing byte reads the "slow" way isn't nearly as slow as
* doing byte writes the slow way (no r-m-w cycle).
*/
void outsb(unsigned long port, const void * src, unsigned long count)
{
const unsigned char *p;
p = (const unsigned char *)src;
while (count) {
count--;
outb(*p, port);
p++;
}
}
/*
* Like insw but in the opposite direction. This is used by the IDE
* driver to write disk sectors. Performance is important, but the
* interfaces seems to be slow: just using the inlined version of the
* outw() breaks things.
*/
void outsw (unsigned long port, const void *src, unsigned long count)
{
unsigned int l = 0, l2;
const unsigned char *p;
p = (const unsigned char *)src;
if (!count)
return;
switch (((unsigned long)p) & 0x3)
{
case 0x00: /* Buffer 32-bit aligned */
while (count>=2) {
count -= 2;
l = *(unsigned int *)p;
p += 4;
outw(le16_to_cpu(l >> 16), port);
outw(le16_to_cpu(l & 0xffff), port);
}
if (count) {
outw(le16_to_cpu(*(unsigned short*)p), port);
}
break;
case 0x02: /* Buffer 16-bit aligned */
outw(le16_to_cpu(*(unsigned short*)p), port);
p += 2;
count--;
while (count>=2) {
count -= 2;
l = *(unsigned int *)p;
p += 4;
outw(le16_to_cpu(l >> 16), port);
outw(le16_to_cpu(l & 0xffff), port);
}
if (count) {
outw(le16_to_cpu(*(unsigned short *)p), port);
}
break;
case 0x01: /* Buffer 8-bit aligned */
/* I don't bother with 32bit transfers
* in this case, 16bit will have to do -- DE */
l = *p << 8;
p++;
count--;
while (count)
{
count--;
l2 = *(unsigned short *)p;
p += 2;
outw(le16_to_cpu(l | l2 >> 8), port);
l = l2 << 8;
}
l2 = *(unsigned char *)p;
outw (le16_to_cpu(l | l2>>8), port);
break;
}
}
/*
* Like insl but in the opposite direction. This is used by the IDE
* driver to write disk sectors. Works with any alignment in SRC.
* Performance is important, but the interfaces seems to be slow:
* just using the inlined version of the outl() breaks things.
*/
void outsl (unsigned long port, const void *src, unsigned long count)
{
unsigned int l = 0, l2;
const unsigned char *p;
p = (const unsigned char *)src;
if (!count)
return;
switch (((unsigned long)p) & 0x3)
{
case 0x00: /* Buffer 32-bit aligned */
while (count--)
{
outl(le32_to_cpu(*(unsigned int *)p), port);
p += 4;
}
break;
case 0x02: /* Buffer 16-bit aligned */
--count;
l = *(unsigned short *)p;
p += 2;
while (count--)
{
l2 = *(unsigned int *)p;
p += 4;
outl (le32_to_cpu(l << 16 | l2 >> 16), port);
l = l2;
}
l2 = *(unsigned short *)p;
outl (le32_to_cpu(l << 16 | l2), port);
break;
case 0x01: /* Buffer 8-bit aligned */
--count;
l = *p << 24;
p++;
l |= *(unsigned short *)p << 8;
p += 2;
while (count--)
{
l2 = *(unsigned int *)p;
p += 4;
outl (le32_to_cpu(l | l2 >> 24), port);
l = l2 << 8;
}
l2 = *p;
outl (le32_to_cpu(l | l2), port);
break;
case 0x03: /* Buffer 8-bit aligned */
--count;
l = *p << 24;
p++;
while (count--)
{
l2 = *(unsigned int *)p;
p += 4;
outl (le32_to_cpu(l | l2 >> 8), port);
l = l2 << 24;
}
l2 = *(unsigned short *)p << 16;
p += 2;
l2 |= *p;
outl (le32_to_cpu(l | l2), port);
break;
}
}
EXPORT_SYMBOL(insb);
EXPORT_SYMBOL(insw);
EXPORT_SYMBOL(insl);
EXPORT_SYMBOL(outsb);
EXPORT_SYMBOL(outsw);
EXPORT_SYMBOL(outsl);
| gpl-2.0 |
JonnyXDA/DarkSense-M7 | drivers/video/msm/mipi_orise_video_720p_pt.c | 156 | 2744 | /* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include "msm_fb.h"
#include "mipi_dsi.h"
#include "mipi_orise.h"
static struct msm_panel_info pinfo;
static struct mipi_dsi_phy_ctrl dsi_video_mode_phy_db = {
{0x03, 0x0a, 0x04, 0x00, 0x20},
{0x83, 0x31, 0x13, 0x00, 0x42, 0x4d, 0x18, 0x35,
0x21, 0x03, 0x04, 0xa0},
{0x5f, 0x00, 0x00, 0x10},
{0xff, 0x00, 0x06, 0x00},
{0x0, 0x0e, 0x30, 0xc0, 0x00, 0x40, 0x03, 0x62,
0x40, 0x07, 0x07,
0x00, 0x1a, 0x00, 0x00, 0x02, 0x00, 0x20, 0x00, 0x01 },
};
static int __init mipi_video_orise_720p_pt_init(void)
{
int ret;
if (msm_fb_detect_client("mipi_video_orise_720p"))
return 0;
pinfo.xres = 720;
pinfo.yres = 1280;
pinfo.lcdc.xres_pad = 0;
pinfo.lcdc.yres_pad = 0;
pinfo.type = MIPI_VIDEO_PANEL;
pinfo.pdest = DISPLAY_1;
pinfo.wait_cycle = 0;
pinfo.bpp = 24;
pinfo.lcdc.h_back_porch = 160;
pinfo.lcdc.h_front_porch = 160;
pinfo.lcdc.h_pulse_width = 8;
pinfo.lcdc.v_back_porch = 32;
pinfo.lcdc.v_front_porch = 32;
pinfo.lcdc.v_pulse_width = 1;
pinfo.lcdc.border_clr = 0;
pinfo.lcdc.underflow_clr = 0xff;
pinfo.lcdc.hsync_skew = 0;
pinfo.bl_max = 200;
pinfo.bl_min = 1;
pinfo.fb_num = 2;
pinfo.mipi.mode = DSI_VIDEO_MODE;
pinfo.mipi.pulse_mode_hsa_he = TRUE;
pinfo.mipi.hfp_power_stop = TRUE;
pinfo.mipi.hbp_power_stop = TRUE;
pinfo.mipi.hsa_power_stop = FALSE;
pinfo.mipi.eof_bllp_power_stop = TRUE;
pinfo.mipi.bllp_power_stop = TRUE;
pinfo.mipi.traffic_mode = DSI_NON_BURST_SYNCH_EVENT;
pinfo.mipi.dst_format = DSI_VIDEO_DST_FORMAT_RGB888;
pinfo.mipi.vc = 0;
pinfo.mipi.rgb_swap = DSI_RGB_SWAP_RGB;
pinfo.mipi.data_lane0 = TRUE;
pinfo.mipi.data_lane1 = TRUE;
pinfo.mipi.data_lane2 = TRUE;
pinfo.mipi.data_lane3 = TRUE;
pinfo.mipi.t_clk_post = 0x04;
pinfo.mipi.t_clk_pre = 0x1c;
pinfo.mipi.stream = 0;
pinfo.mipi.mdp_trigger = 0;
pinfo.mipi.dma_trigger = DSI_CMD_TRIGGER_SW;
pinfo.mipi.frame_rate = 55;
pinfo.mipi.dsi_phy_db = &dsi_video_mode_phy_db;
pinfo.mipi.tx_eot_append = TRUE;
pinfo.mipi.esc_byte_ratio = 4;
ret = mipi_orise_device_register(&pinfo, MIPI_DSI_PRIM,
MIPI_DSI_PANEL_720P_PT);
if (ret)
printk(KERN_ERR "%s: failed to register device!\n", __func__);
return ret;
}
module_init(mipi_video_orise_720p_pt_init);
| gpl-2.0 |
dseifert/linux-odroid | drivers/rtc/rtc-pcap.c | 156 | 5557 | /*
* pcap rtc code for Motorola EZX phones
*
* Copyright (c) 2008 guiming zhuo <gmzhuo@gmail.com>
* Copyright (c) 2009 Daniel Ribeiro <drwyrm@gmail.com>
*
* Based on Motorola's rtc.c Copyright (c) 2003-2005 Motorola
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/mfd/ezx-pcap.h>
#include <linux/rtc.h>
#include <linux/slab.h>
#include <linux/platform_device.h>
struct pcap_rtc {
struct pcap_chip *pcap;
struct rtc_device *rtc;
};
static irqreturn_t pcap_rtc_irq(int irq, void *_pcap_rtc)
{
struct pcap_rtc *pcap_rtc = _pcap_rtc;
unsigned long rtc_events;
if (irq == pcap_to_irq(pcap_rtc->pcap, PCAP_IRQ_1HZ))
rtc_events = RTC_IRQF | RTC_UF;
else if (irq == pcap_to_irq(pcap_rtc->pcap, PCAP_IRQ_TODA))
rtc_events = RTC_IRQF | RTC_AF;
else
rtc_events = 0;
rtc_update_irq(pcap_rtc->rtc, 1, rtc_events);
return IRQ_HANDLED;
}
static int pcap_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
{
struct platform_device *pdev = to_platform_device(dev);
struct pcap_rtc *pcap_rtc = platform_get_drvdata(pdev);
struct rtc_time *tm = &alrm->time;
unsigned long secs;
u32 tod; /* time of day, seconds since midnight */
u32 days; /* days since 1/1/1970 */
ezx_pcap_read(pcap_rtc->pcap, PCAP_REG_RTC_TODA, &tod);
secs = tod & PCAP_RTC_TOD_MASK;
ezx_pcap_read(pcap_rtc->pcap, PCAP_REG_RTC_DAYA, &days);
secs += (days & PCAP_RTC_DAY_MASK) * SEC_PER_DAY;
rtc_time_to_tm(secs, tm);
return 0;
}
static int pcap_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
{
struct platform_device *pdev = to_platform_device(dev);
struct pcap_rtc *pcap_rtc = platform_get_drvdata(pdev);
struct rtc_time *tm = &alrm->time;
unsigned long secs;
u32 tod, days;
rtc_tm_to_time(tm, &secs);
tod = secs % SEC_PER_DAY;
ezx_pcap_write(pcap_rtc->pcap, PCAP_REG_RTC_TODA, tod);
days = secs / SEC_PER_DAY;
ezx_pcap_write(pcap_rtc->pcap, PCAP_REG_RTC_DAYA, days);
return 0;
}
static int pcap_rtc_read_time(struct device *dev, struct rtc_time *tm)
{
struct platform_device *pdev = to_platform_device(dev);
struct pcap_rtc *pcap_rtc = platform_get_drvdata(pdev);
unsigned long secs;
u32 tod, days;
ezx_pcap_read(pcap_rtc->pcap, PCAP_REG_RTC_TOD, &tod);
secs = tod & PCAP_RTC_TOD_MASK;
ezx_pcap_read(pcap_rtc->pcap, PCAP_REG_RTC_DAY, &days);
secs += (days & PCAP_RTC_DAY_MASK) * SEC_PER_DAY;
rtc_time_to_tm(secs, tm);
return rtc_valid_tm(tm);
}
static int pcap_rtc_set_mmss(struct device *dev, unsigned long secs)
{
struct platform_device *pdev = to_platform_device(dev);
struct pcap_rtc *pcap_rtc = platform_get_drvdata(pdev);
u32 tod, days;
tod = secs % SEC_PER_DAY;
ezx_pcap_write(pcap_rtc->pcap, PCAP_REG_RTC_TOD, tod);
days = secs / SEC_PER_DAY;
ezx_pcap_write(pcap_rtc->pcap, PCAP_REG_RTC_DAY, days);
return 0;
}
static int pcap_rtc_irq_enable(struct device *dev, int pirq, unsigned int en)
{
struct platform_device *pdev = to_platform_device(dev);
struct pcap_rtc *pcap_rtc = platform_get_drvdata(pdev);
if (en)
enable_irq(pcap_to_irq(pcap_rtc->pcap, pirq));
else
disable_irq(pcap_to_irq(pcap_rtc->pcap, pirq));
return 0;
}
static int pcap_rtc_alarm_irq_enable(struct device *dev, unsigned int en)
{
return pcap_rtc_irq_enable(dev, PCAP_IRQ_TODA, en);
}
static const struct rtc_class_ops pcap_rtc_ops = {
.read_time = pcap_rtc_read_time,
.read_alarm = pcap_rtc_read_alarm,
.set_alarm = pcap_rtc_set_alarm,
.set_mmss = pcap_rtc_set_mmss,
.alarm_irq_enable = pcap_rtc_alarm_irq_enable,
};
static int pcap_rtc_probe(struct platform_device *pdev)
{
struct pcap_rtc *pcap_rtc;
int timer_irq, alarm_irq;
int err = -ENOMEM;
pcap_rtc = kmalloc(sizeof(struct pcap_rtc), GFP_KERNEL);
if (!pcap_rtc)
return err;
pcap_rtc->pcap = dev_get_drvdata(pdev->dev.parent);
platform_set_drvdata(pdev, pcap_rtc);
pcap_rtc->rtc = rtc_device_register("pcap", &pdev->dev,
&pcap_rtc_ops, THIS_MODULE);
if (IS_ERR(pcap_rtc->rtc)) {
err = PTR_ERR(pcap_rtc->rtc);
goto fail_rtc;
}
timer_irq = pcap_to_irq(pcap_rtc->pcap, PCAP_IRQ_1HZ);
alarm_irq = pcap_to_irq(pcap_rtc->pcap, PCAP_IRQ_TODA);
err = request_irq(timer_irq, pcap_rtc_irq, 0, "RTC Timer", pcap_rtc);
if (err)
goto fail_timer;
err = request_irq(alarm_irq, pcap_rtc_irq, 0, "RTC Alarm", pcap_rtc);
if (err)
goto fail_alarm;
return 0;
fail_alarm:
free_irq(timer_irq, pcap_rtc);
fail_timer:
rtc_device_unregister(pcap_rtc->rtc);
fail_rtc:
platform_set_drvdata(pdev, NULL);
kfree(pcap_rtc);
return err;
}
static int pcap_rtc_remove(struct platform_device *pdev)
{
struct pcap_rtc *pcap_rtc = platform_get_drvdata(pdev);
free_irq(pcap_to_irq(pcap_rtc->pcap, PCAP_IRQ_1HZ), pcap_rtc);
free_irq(pcap_to_irq(pcap_rtc->pcap, PCAP_IRQ_TODA), pcap_rtc);
rtc_device_unregister(pcap_rtc->rtc);
kfree(pcap_rtc);
return 0;
}
static struct platform_driver pcap_rtc_driver = {
.remove = pcap_rtc_remove,
.driver = {
.name = "pcap-rtc",
.owner = THIS_MODULE,
},
};
static int __init rtc_pcap_init(void)
{
return platform_driver_probe(&pcap_rtc_driver, pcap_rtc_probe);
}
static void __exit rtc_pcap_exit(void)
{
platform_driver_unregister(&pcap_rtc_driver);
}
module_init(rtc_pcap_init);
module_exit(rtc_pcap_exit);
MODULE_DESCRIPTION("Motorola pcap rtc driver");
MODULE_AUTHOR("guiming zhuo <gmzhuo@gmail.com>");
MODULE_LICENSE("GPL");
| gpl-2.0 |
371816210/kk44 | drivers/hid/hid-core.c | 412 | 73499 | /*
* HID support for Linux
*
* Copyright (c) 1999 Andreas Gal
* Copyright (c) 2000-2005 Vojtech Pavlik <vojtech@suse.cz>
* Copyright (c) 2005 Michael Haboustak <mike-@cinci.rr.com> for Concept2, Inc
* Copyright (c) 2006-2010 Jiri Kosina
*/
/*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/mm.h>
#include <linux/spinlock.h>
#include <asm/unaligned.h>
#include <asm/byteorder.h>
#include <linux/input.h>
#include <linux/wait.h>
#include <linux/vmalloc.h>
#include <linux/sched.h>
#include <linux/hid.h>
#include <linux/hiddev.h>
#include <linux/hid-debug.h>
#include <linux/hidraw.h>
#include "hid-ids.h"
/*
* Version Information
*/
#define DRIVER_DESC "HID core driver"
#define DRIVER_LICENSE "GPL"
int hid_debug = 0;
module_param_named(debug, hid_debug, int, 0600);
MODULE_PARM_DESC(debug, "toggle HID debugging messages");
EXPORT_SYMBOL_GPL(hid_debug);
/*
* Register a new report for a device.
*/
struct hid_report *hid_register_report(struct hid_device *device, unsigned type, unsigned id)
{
struct hid_report_enum *report_enum = device->report_enum + type;
struct hid_report *report;
if (report_enum->report_id_hash[id])
return report_enum->report_id_hash[id];
report = kzalloc(sizeof(struct hid_report), GFP_KERNEL);
if (!report)
return NULL;
if (id != 0)
report_enum->numbered = 1;
report->id = id;
report->type = type;
report->size = 0;
report->device = device;
report_enum->report_id_hash[id] = report;
list_add_tail(&report->list, &report_enum->report_list);
return report;
}
EXPORT_SYMBOL_GPL(hid_register_report);
/*
* Register a new field for this report.
*/
static struct hid_field *hid_register_field(struct hid_report *report, unsigned usages, unsigned values)
{
struct hid_field *field;
if (report->maxfield == HID_MAX_FIELDS) {
dbg_hid("too many fields in report\n");
return NULL;
}
field = kzalloc((sizeof(struct hid_field) +
usages * sizeof(struct hid_usage) +
values * sizeof(unsigned)), GFP_KERNEL);
if (!field)
return NULL;
field->index = report->maxfield++;
report->field[field->index] = field;
field->usage = (struct hid_usage *)(field + 1);
field->value = (s32 *)(field->usage + usages);
field->report = report;
return field;
}
/*
* Open a collection. The type/usage is pushed on the stack.
*/
static int open_collection(struct hid_parser *parser, unsigned type)
{
struct hid_collection *collection;
unsigned usage;
usage = parser->local.usage[0];
if (parser->collection_stack_ptr == HID_COLLECTION_STACK_SIZE) {
dbg_hid("collection stack overflow\n");
return -1;
}
if (parser->device->maxcollection == parser->device->collection_size) {
collection = kmalloc(sizeof(struct hid_collection) *
parser->device->collection_size * 2, GFP_KERNEL);
if (collection == NULL) {
dbg_hid("failed to reallocate collection array\n");
return -1;
}
memcpy(collection, parser->device->collection,
sizeof(struct hid_collection) *
parser->device->collection_size);
memset(collection + parser->device->collection_size, 0,
sizeof(struct hid_collection) *
parser->device->collection_size);
kfree(parser->device->collection);
parser->device->collection = collection;
parser->device->collection_size *= 2;
}
parser->collection_stack[parser->collection_stack_ptr++] =
parser->device->maxcollection;
collection = parser->device->collection +
parser->device->maxcollection++;
collection->type = type;
collection->usage = usage;
collection->level = parser->collection_stack_ptr - 1;
if (type == HID_COLLECTION_APPLICATION)
parser->device->maxapplication++;
return 0;
}
/*
* Close a collection.
*/
static int close_collection(struct hid_parser *parser)
{
if (!parser->collection_stack_ptr) {
dbg_hid("collection stack underflow\n");
return -1;
}
parser->collection_stack_ptr--;
return 0;
}
/*
* Climb up the stack, search for the specified collection type
* and return the usage.
*/
static unsigned hid_lookup_collection(struct hid_parser *parser, unsigned type)
{
struct hid_collection *collection = parser->device->collection;
int n;
for (n = parser->collection_stack_ptr - 1; n >= 0; n--) {
unsigned index = parser->collection_stack[n];
if (collection[index].type == type)
return collection[index].usage;
}
return 0; /* we know nothing about this usage type */
}
/*
* Add a usage to the temporary parser table.
*/
static int hid_add_usage(struct hid_parser *parser, unsigned usage)
{
if (parser->local.usage_index >= HID_MAX_USAGES) {
dbg_hid("usage index exceeded\n");
return -1;
}
parser->local.usage[parser->local.usage_index] = usage;
parser->local.collection_index[parser->local.usage_index] =
parser->collection_stack_ptr ?
parser->collection_stack[parser->collection_stack_ptr - 1] : 0;
parser->local.usage_index++;
return 0;
}
/*
* Register a new field for this report.
*/
static int hid_add_field(struct hid_parser *parser, unsigned report_type, unsigned flags)
{
struct hid_report *report;
struct hid_field *field;
int usages;
unsigned offset;
int i;
report = hid_register_report(parser->device, report_type, parser->global.report_id);
if (!report) {
dbg_hid("hid_register_report failed\n");
return -1;
}
if (parser->global.logical_maximum < parser->global.logical_minimum) {
dbg_hid("logical range invalid %d %d\n", parser->global.logical_minimum, parser->global.logical_maximum);
return -1;
}
offset = report->size;
report->size += parser->global.report_size * parser->global.report_count;
if (!parser->local.usage_index) /* Ignore padding fields */
return 0;
usages = max_t(int, parser->local.usage_index, parser->global.report_count);
field = hid_register_field(report, usages, parser->global.report_count);
if (!field)
return 0;
field->physical = hid_lookup_collection(parser, HID_COLLECTION_PHYSICAL);
field->logical = hid_lookup_collection(parser, HID_COLLECTION_LOGICAL);
field->application = hid_lookup_collection(parser, HID_COLLECTION_APPLICATION);
for (i = 0; i < usages; i++) {
int j = i;
/* Duplicate the last usage we parsed if we have excess values */
if (i >= parser->local.usage_index)
j = parser->local.usage_index - 1;
field->usage[i].hid = parser->local.usage[j];
field->usage[i].collection_index =
parser->local.collection_index[j];
}
field->maxusage = usages;
field->flags = flags;
field->report_offset = offset;
field->report_type = report_type;
field->report_size = parser->global.report_size;
field->report_count = parser->global.report_count;
field->logical_minimum = parser->global.logical_minimum;
field->logical_maximum = parser->global.logical_maximum;
field->physical_minimum = parser->global.physical_minimum;
field->physical_maximum = parser->global.physical_maximum;
field->unit_exponent = parser->global.unit_exponent;
field->unit = parser->global.unit;
return 0;
}
/*
* Read data value from item.
*/
static u32 item_udata(struct hid_item *item)
{
switch (item->size) {
case 1: return item->data.u8;
case 2: return item->data.u16;
case 4: return item->data.u32;
}
return 0;
}
static s32 item_sdata(struct hid_item *item)
{
switch (item->size) {
case 1: return item->data.s8;
case 2: return item->data.s16;
case 4: return item->data.s32;
}
return 0;
}
/*
* Process a global item.
*/
static int hid_parser_global(struct hid_parser *parser, struct hid_item *item)
{
switch (item->tag) {
case HID_GLOBAL_ITEM_TAG_PUSH:
if (parser->global_stack_ptr == HID_GLOBAL_STACK_SIZE) {
dbg_hid("global environment stack overflow\n");
return -1;
}
memcpy(parser->global_stack + parser->global_stack_ptr++,
&parser->global, sizeof(struct hid_global));
return 0;
case HID_GLOBAL_ITEM_TAG_POP:
if (!parser->global_stack_ptr) {
dbg_hid("global environment stack underflow\n");
return -1;
}
memcpy(&parser->global, parser->global_stack +
--parser->global_stack_ptr, sizeof(struct hid_global));
return 0;
case HID_GLOBAL_ITEM_TAG_USAGE_PAGE:
parser->global.usage_page = item_udata(item);
return 0;
case HID_GLOBAL_ITEM_TAG_LOGICAL_MINIMUM:
parser->global.logical_minimum = item_sdata(item);
return 0;
case HID_GLOBAL_ITEM_TAG_LOGICAL_MAXIMUM:
if (parser->global.logical_minimum < 0)
parser->global.logical_maximum = item_sdata(item);
else
parser->global.logical_maximum = item_udata(item);
return 0;
case HID_GLOBAL_ITEM_TAG_PHYSICAL_MINIMUM:
parser->global.physical_minimum = item_sdata(item);
return 0;
case HID_GLOBAL_ITEM_TAG_PHYSICAL_MAXIMUM:
if (parser->global.physical_minimum < 0)
parser->global.physical_maximum = item_sdata(item);
else
parser->global.physical_maximum = item_udata(item);
return 0;
case HID_GLOBAL_ITEM_TAG_UNIT_EXPONENT:
parser->global.unit_exponent = item_sdata(item);
return 0;
case HID_GLOBAL_ITEM_TAG_UNIT:
parser->global.unit = item_udata(item);
return 0;
case HID_GLOBAL_ITEM_TAG_REPORT_SIZE:
parser->global.report_size = item_udata(item);
if (parser->global.report_size > 96) {
dbg_hid("invalid report_size %d\n",
parser->global.report_size);
return -1;
}
return 0;
case HID_GLOBAL_ITEM_TAG_REPORT_COUNT:
parser->global.report_count = item_udata(item);
if (parser->global.report_count > HID_MAX_USAGES) {
dbg_hid("invalid report_count %d\n",
parser->global.report_count);
return -1;
}
return 0;
case HID_GLOBAL_ITEM_TAG_REPORT_ID:
parser->global.report_id = item_udata(item);
if (parser->global.report_id == 0) {
dbg_hid("report_id 0 is invalid\n");
return -1;
}
return 0;
default:
dbg_hid("unknown global tag 0x%x\n", item->tag);
return -1;
}
}
/*
* Process a local item.
*/
static int hid_parser_local(struct hid_parser *parser, struct hid_item *item)
{
__u32 data;
unsigned n;
data = item_udata(item);
switch (item->tag) {
case HID_LOCAL_ITEM_TAG_DELIMITER:
if (data) {
/*
* We treat items before the first delimiter
* as global to all usage sets (branch 0).
* In the moment we process only these global
* items and the first delimiter set.
*/
if (parser->local.delimiter_depth != 0) {
dbg_hid("nested delimiters\n");
return -1;
}
parser->local.delimiter_depth++;
parser->local.delimiter_branch++;
} else {
if (parser->local.delimiter_depth < 1) {
dbg_hid("bogus close delimiter\n");
return -1;
}
parser->local.delimiter_depth--;
}
return 1;
case HID_LOCAL_ITEM_TAG_USAGE:
if (parser->local.delimiter_branch > 1) {
dbg_hid("alternative usage ignored\n");
return 0;
}
if (item->size <= 2)
data = (parser->global.usage_page << 16) + data;
return hid_add_usage(parser, data);
case HID_LOCAL_ITEM_TAG_USAGE_MINIMUM:
if (parser->local.delimiter_branch > 1) {
dbg_hid("alternative usage ignored\n");
return 0;
}
if (item->size <= 2)
data = (parser->global.usage_page << 16) + data;
parser->local.usage_minimum = data;
return 0;
case HID_LOCAL_ITEM_TAG_USAGE_MAXIMUM:
if (parser->local.delimiter_branch > 1) {
dbg_hid("alternative usage ignored\n");
return 0;
}
if (item->size <= 2)
data = (parser->global.usage_page << 16) + data;
for (n = parser->local.usage_minimum; n <= data; n++)
if (hid_add_usage(parser, n)) {
dbg_hid("hid_add_usage failed\n");
return -1;
}
return 0;
default:
dbg_hid("unknown local item tag 0x%x\n", item->tag);
return 0;
}
return 0;
}
/*
* Process a main item.
*/
static int hid_parser_main(struct hid_parser *parser, struct hid_item *item)
{
__u32 data;
int ret;
data = item_udata(item);
switch (item->tag) {
case HID_MAIN_ITEM_TAG_BEGIN_COLLECTION:
ret = open_collection(parser, data & 0xff);
break;
case HID_MAIN_ITEM_TAG_END_COLLECTION:
ret = close_collection(parser);
break;
case HID_MAIN_ITEM_TAG_INPUT:
ret = hid_add_field(parser, HID_INPUT_REPORT, data);
break;
case HID_MAIN_ITEM_TAG_OUTPUT:
ret = hid_add_field(parser, HID_OUTPUT_REPORT, data);
break;
case HID_MAIN_ITEM_TAG_FEATURE:
ret = hid_add_field(parser, HID_FEATURE_REPORT, data);
break;
default:
dbg_hid("unknown main item tag 0x%x\n", item->tag);
ret = 0;
}
memset(&parser->local, 0, sizeof(parser->local)); /* Reset the local parser environment */
return ret;
}
/*
* Process a reserved item.
*/
static int hid_parser_reserved(struct hid_parser *parser, struct hid_item *item)
{
dbg_hid("reserved item type, tag 0x%x\n", item->tag);
return 0;
}
/*
* Free a report and all registered fields. The field->usage and
* field->value table's are allocated behind the field, so we need
* only to free(field) itself.
*/
static void hid_free_report(struct hid_report *report)
{
unsigned n;
for (n = 0; n < report->maxfield; n++)
kfree(report->field[n]);
kfree(report);
}
/*
* Free a device structure, all reports, and all fields.
*/
static void hid_device_release(struct device *dev)
{
struct hid_device *device = container_of(dev, struct hid_device, dev);
unsigned i, j;
for (i = 0; i < HID_REPORT_TYPES; i++) {
struct hid_report_enum *report_enum = device->report_enum + i;
for (j = 0; j < 256; j++) {
struct hid_report *report = report_enum->report_id_hash[j];
if (report)
hid_free_report(report);
}
}
kfree(device->rdesc);
kfree(device->collection);
kfree(device);
}
/*
* Fetch a report description item from the data stream. We support long
* items, though they are not used yet.
*/
static u8 *fetch_item(__u8 *start, __u8 *end, struct hid_item *item)
{
u8 b;
if ((end - start) <= 0)
return NULL;
b = *start++;
item->type = (b >> 2) & 3;
item->tag = (b >> 4) & 15;
if (item->tag == HID_ITEM_TAG_LONG) {
item->format = HID_ITEM_FORMAT_LONG;
if ((end - start) < 2)
return NULL;
item->size = *start++;
item->tag = *start++;
if ((end - start) < item->size)
return NULL;
item->data.longdata = start;
start += item->size;
return start;
}
item->format = HID_ITEM_FORMAT_SHORT;
item->size = b & 3;
switch (item->size) {
case 0:
return start;
case 1:
if ((end - start) < 1)
return NULL;
item->data.u8 = *start++;
return start;
case 2:
if ((end - start) < 2)
return NULL;
item->data.u16 = get_unaligned_le16(start);
start = (__u8 *)((__le16 *)start + 1);
return start;
case 3:
item->size++;
if ((end - start) < 4)
return NULL;
item->data.u32 = get_unaligned_le32(start);
start = (__u8 *)((__le32 *)start + 1);
return start;
}
return NULL;
}
/**
* hid_parse_report - parse device report
*
* @device: hid device
* @start: report start
* @size: report size
*
* Parse a report description into a hid_device structure. Reports are
* enumerated, fields are attached to these reports.
* 0 returned on success, otherwise nonzero error value.
*/
int hid_parse_report(struct hid_device *device, __u8 *start,
unsigned size)
{
struct hid_parser *parser;
struct hid_item item;
__u8 *end;
int ret;
static int (*dispatch_type[])(struct hid_parser *parser,
struct hid_item *item) = {
hid_parser_main,
hid_parser_global,
hid_parser_local,
hid_parser_reserved
};
if (device->driver->report_fixup)
start = device->driver->report_fixup(device, start, &size);
device->rdesc = kmemdup(start, size, GFP_KERNEL);
if (device->rdesc == NULL)
return -ENOMEM;
device->rsize = size;
parser = vzalloc(sizeof(struct hid_parser));
if (!parser) {
ret = -ENOMEM;
goto err;
}
parser->device = device;
end = start + size;
ret = -EINVAL;
while ((start = fetch_item(start, end, &item)) != NULL) {
if (item.format != HID_ITEM_FORMAT_SHORT) {
dbg_hid("unexpected long global item\n");
goto err;
}
if (dispatch_type[item.type](parser, &item)) {
dbg_hid("item %u %u %u %u parsing failed\n",
item.format, (unsigned)item.size,
(unsigned)item.type, (unsigned)item.tag);
goto err;
}
if (start == end) {
if (parser->collection_stack_ptr) {
dbg_hid("unbalanced collection at end of report description\n");
goto err;
}
if (parser->local.delimiter_depth) {
dbg_hid("unbalanced delimiter at end of report description\n");
goto err;
}
vfree(parser);
return 0;
}
}
dbg_hid("item fetching failed at offset %d\n", (int)(end - start));
err:
vfree(parser);
return ret;
}
EXPORT_SYMBOL_GPL(hid_parse_report);
/*
* Convert a signed n-bit integer to signed 32-bit integer. Common
* cases are done through the compiler, the screwed things has to be
* done by hand.
*/
static s32 snto32(__u32 value, unsigned n)
{
switch (n) {
case 8: return ((__s8)value);
case 16: return ((__s16)value);
case 32: return ((__s32)value);
}
return value & (1 << (n - 1)) ? value | (-1 << n) : value;
}
/*
* Convert a signed 32-bit integer to a signed n-bit integer.
*/
static u32 s32ton(__s32 value, unsigned n)
{
s32 a = value >> (n - 1);
if (a && a != -1)
return value < 0 ? 1 << (n - 1) : (1 << (n - 1)) - 1;
return value & ((1 << n) - 1);
}
/*
* Extract/implement a data field from/to a little endian report (bit array).
*
* Code sort-of follows HID spec:
* http://www.usb.org/developers/devclass_docs/HID1_11.pdf
*
* While the USB HID spec allows unlimited length bit fields in "report
* descriptors", most devices never use more than 16 bits.
* One model of UPS is claimed to report "LINEV" as a 32-bit field.
* Search linux-kernel and linux-usb-devel archives for "hid-core extract".
*/
static __u32 extract(const struct hid_device *hid, __u8 *report,
unsigned offset, unsigned n)
{
u64 x;
if (n > 32)
hid_warn(hid, "extract() called with n (%d) > 32! (%s)\n",
n, current->comm);
report += offset >> 3; /* adjust byte index */
offset &= 7; /* now only need bit offset into one byte */
x = get_unaligned_le64(report);
x = (x >> offset) & ((1ULL << n) - 1); /* extract bit field */
return (u32) x;
}
/*
* "implement" : set bits in a little endian bit stream.
* Same concepts as "extract" (see comments above).
* The data mangled in the bit stream remains in little endian
* order the whole time. It make more sense to talk about
* endianness of register values by considering a register
* a "cached" copy of the little endiad bit stream.
*/
static void implement(const struct hid_device *hid, __u8 *report,
unsigned offset, unsigned n, __u32 value)
{
u64 x;
u64 m = (1ULL << n) - 1;
if (n > 32)
hid_warn(hid, "%s() called with n (%d) > 32! (%s)\n",
__func__, n, current->comm);
if (value > m)
hid_warn(hid, "%s() called with too large value %d! (%s)\n",
__func__, value, current->comm);
WARN_ON(value > m);
value &= m;
report += offset >> 3;
offset &= 7;
x = get_unaligned_le64(report);
x &= ~(m << offset);
x |= ((u64)value) << offset;
put_unaligned_le64(x, report);
}
/*
* Search an array for a value.
*/
static int search(__s32 *array, __s32 value, unsigned n)
{
while (n--) {
if (*array++ == value)
return 0;
}
return -1;
}
/**
* hid_match_report - check if driver's raw_event should be called
*
* @hid: hid device
* @report_type: type to match against
*
* compare hid->driver->report_table->report_type to report->type
*/
static int hid_match_report(struct hid_device *hid, struct hid_report *report)
{
const struct hid_report_id *id = hid->driver->report_table;
if (!id) /* NULL means all */
return 1;
for (; id->report_type != HID_TERMINATOR; id++)
if (id->report_type == HID_ANY_ID ||
id->report_type == report->type)
return 1;
return 0;
}
/**
* hid_match_usage - check if driver's event should be called
*
* @hid: hid device
* @usage: usage to match against
*
* compare hid->driver->usage_table->usage_{type,code} to
* usage->usage_{type,code}
*/
static int hid_match_usage(struct hid_device *hid, struct hid_usage *usage)
{
const struct hid_usage_id *id = hid->driver->usage_table;
if (!id) /* NULL means all */
return 1;
for (; id->usage_type != HID_ANY_ID - 1; id++)
if ((id->usage_hid == HID_ANY_ID ||
id->usage_hid == usage->hid) &&
(id->usage_type == HID_ANY_ID ||
id->usage_type == usage->type) &&
(id->usage_code == HID_ANY_ID ||
id->usage_code == usage->code))
return 1;
return 0;
}
static void hid_process_event(struct hid_device *hid, struct hid_field *field,
struct hid_usage *usage, __s32 value, int interrupt)
{
struct hid_driver *hdrv = hid->driver;
int ret;
hid_dump_input(hid, usage, value);
if (hdrv && hdrv->event && hid_match_usage(hid, usage)) {
ret = hdrv->event(hid, field, usage, value);
if (ret != 0) {
if (ret < 0)
dbg_hid("%s's event failed with %d\n",
hdrv->name, ret);
return;
}
}
if (hid->claimed & HID_CLAIMED_INPUT)
hidinput_hid_event(hid, field, usage, value);
if (hid->claimed & HID_CLAIMED_HIDDEV && interrupt && hid->hiddev_hid_event)
hid->hiddev_hid_event(hid, field, usage, value);
}
/*
* Analyse a received field, and fetch the data from it. The field
* content is stored for next report processing (we do differential
* reporting to the layer).
*/
static void hid_input_field(struct hid_device *hid, struct hid_field *field,
__u8 *data, int interrupt)
{
unsigned n;
unsigned count = field->report_count;
unsigned offset = field->report_offset;
unsigned size = field->report_size;
__s32 min = field->logical_minimum;
__s32 max = field->logical_maximum;
__s32 *value;
value = kmalloc(sizeof(__s32) * count, GFP_ATOMIC);
if (!value)
return;
for (n = 0; n < count; n++) {
value[n] = min < 0 ?
snto32(extract(hid, data, offset + n * size, size),
size) :
extract(hid, data, offset + n * size, size);
/* Ignore report if ErrorRollOver */
if (!(field->flags & HID_MAIN_ITEM_VARIABLE) &&
value[n] >= min && value[n] <= max &&
field->usage[value[n] - min].hid == HID_UP_KEYBOARD + 1)
goto exit;
}
for (n = 0; n < count; n++) {
if (HID_MAIN_ITEM_VARIABLE & field->flags) {
hid_process_event(hid, field, &field->usage[n], value[n], interrupt);
continue;
}
if (field->value[n] >= min && field->value[n] <= max
&& field->usage[field->value[n] - min].hid
&& search(value, field->value[n], count))
hid_process_event(hid, field, &field->usage[field->value[n] - min], 0, interrupt);
if (value[n] >= min && value[n] <= max
&& field->usage[value[n] - min].hid
&& search(field->value, value[n], count))
hid_process_event(hid, field, &field->usage[value[n] - min], 1, interrupt);
}
memcpy(field->value, value, count * sizeof(__s32));
exit:
kfree(value);
}
/*
* Output the field into the report.
*/
static void hid_output_field(const struct hid_device *hid,
struct hid_field *field, __u8 *data)
{
unsigned count = field->report_count;
unsigned offset = field->report_offset;
unsigned size = field->report_size;
unsigned n;
for (n = 0; n < count; n++) {
if (field->logical_minimum < 0) /* signed values */
implement(hid, data, offset + n * size, size,
s32ton(field->value[n], size));
else /* unsigned values */
implement(hid, data, offset + n * size, size,
field->value[n]);
}
}
/*
* Create a report.
*/
void hid_output_report(struct hid_report *report, __u8 *data)
{
unsigned n;
if (report->id > 0)
*data++ = report->id;
memset(data, 0, ((report->size - 1) >> 3) + 1);
for (n = 0; n < report->maxfield; n++)
hid_output_field(report->device, report->field[n], data);
}
EXPORT_SYMBOL_GPL(hid_output_report);
/*
* Set a field value. The report this field belongs to has to be
* created and transferred to the device, to set this value in the
* device.
*/
int hid_set_field(struct hid_field *field, unsigned offset, __s32 value)
{
unsigned size = field->report_size;
hid_dump_input(field->report->device, field->usage + offset, value);
if (offset >= field->report_count) {
dbg_hid("offset (%d) exceeds report_count (%d)\n", offset, field->report_count);
return -1;
}
if (field->logical_minimum < 0) {
if (value != snto32(s32ton(value, size), size)) {
dbg_hid("value %d is out of range\n", value);
return -1;
}
}
field->value[offset] = value;
return 0;
}
EXPORT_SYMBOL_GPL(hid_set_field);
static struct hid_report *hid_get_report(struct hid_report_enum *report_enum,
const u8 *data)
{
struct hid_report *report;
unsigned int n = 0; /* Normally report number is 0 */
/* Device uses numbered reports, data[0] is report number */
if (report_enum->numbered)
n = *data;
report = report_enum->report_id_hash[n];
if (report == NULL)
dbg_hid("undefined report_id %u received\n", n);
return report;
}
void hid_report_raw_event(struct hid_device *hid, int type, u8 *data, int size,
int interrupt)
{
struct hid_report_enum *report_enum = hid->report_enum + type;
struct hid_report *report;
unsigned int a;
int rsize, csize = size;
u8 *cdata = data;
report = hid_get_report(report_enum, data);
if (!report)
return;
if (report_enum->numbered) {
cdata++;
csize--;
}
rsize = ((report->size - 1) >> 3) + 1;
if (rsize > HID_MAX_BUFFER_SIZE)
rsize = HID_MAX_BUFFER_SIZE;
if (csize < rsize) {
dbg_hid("report %d is too short, (%d < %d)\n", report->id,
csize, rsize);
memset(cdata + csize, 0, rsize - csize);
}
if ((hid->claimed & HID_CLAIMED_HIDDEV) && hid->hiddev_report_event)
hid->hiddev_report_event(hid, report);
if (hid->claimed & HID_CLAIMED_HIDRAW)
hidraw_report_event(hid, data, size);
for (a = 0; a < report->maxfield; a++)
hid_input_field(hid, report->field[a], cdata, interrupt);
if (hid->claimed & HID_CLAIMED_INPUT)
hidinput_report_event(hid, report);
}
EXPORT_SYMBOL_GPL(hid_report_raw_event);
/**
* hid_input_report - report data from lower layer (usb, bt...)
*
* @hid: hid device
* @type: HID report type (HID_*_REPORT)
* @data: report contents
* @size: size of data parameter
* @interrupt: distinguish between interrupt and control transfers
*
* This is data entry for lower layers.
*/
int hid_input_report(struct hid_device *hid, int type, u8 *data, int size, int interrupt)
{
struct hid_report_enum *report_enum;
struct hid_driver *hdrv;
struct hid_report *report;
char *buf;
unsigned int i;
int ret;
if (!hid || !hid->driver)
return -ENODEV;
report_enum = hid->report_enum + type;
hdrv = hid->driver;
if (!size) {
dbg_hid("empty report\n");
return -1;
}
buf = kmalloc(sizeof(char) * HID_DEBUG_BUFSIZE, GFP_ATOMIC);
if (!buf)
goto nomem;
/* dump the report */
snprintf(buf, HID_DEBUG_BUFSIZE - 1,
"\nreport (size %u) (%snumbered) = ", size, report_enum->numbered ? "" : "un");
hid_debug_event(hid, buf);
for (i = 0; i < size; i++) {
snprintf(buf, HID_DEBUG_BUFSIZE - 1,
" %02x", data[i]);
hid_debug_event(hid, buf);
}
hid_debug_event(hid, "\n");
kfree(buf);
nomem:
report = hid_get_report(report_enum, data);
if (!report)
return -1;
if (hdrv && hdrv->raw_event && hid_match_report(hid, report)) {
ret = hdrv->raw_event(hid, report, data, size);
if (ret != 0)
return ret < 0 ? ret : 0;
}
hid_report_raw_event(hid, type, data, size, interrupt);
return 0;
}
EXPORT_SYMBOL_GPL(hid_input_report);
static bool hid_match_one_id(struct hid_device *hdev,
const struct hid_device_id *id)
{
return id->bus == hdev->bus &&
(id->vendor == HID_ANY_ID || id->vendor == hdev->vendor) &&
(id->product == HID_ANY_ID || id->product == hdev->product);
}
static const struct hid_device_id *hid_match_id(struct hid_device *hdev,
const struct hid_device_id *id)
{
for (; id->bus; id++)
if (hid_match_one_id(hdev, id))
return id;
return NULL;
}
static const struct hid_device_id hid_hiddev_list[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_MGE, USB_DEVICE_ID_MGE_UPS) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MGE, USB_DEVICE_ID_MGE_UPS1) },
{ }
};
static bool hid_hiddev(struct hid_device *hdev)
{
return !!hid_match_id(hdev, hid_hiddev_list);
}
static ssize_t
read_report_descriptor(struct file *filp, struct kobject *kobj,
struct bin_attribute *attr,
char *buf, loff_t off, size_t count)
{
struct device *dev = container_of(kobj, struct device, kobj);
struct hid_device *hdev = container_of(dev, struct hid_device, dev);
if (off >= hdev->rsize)
return 0;
if (off + count > hdev->rsize)
count = hdev->rsize - off;
memcpy(buf, hdev->rdesc + off, count);
return count;
}
static struct bin_attribute dev_bin_attr_report_desc = {
.attr = { .name = "report_descriptor", .mode = 0444 },
.read = read_report_descriptor,
.size = HID_MAX_DESCRIPTOR_SIZE,
};
int hid_connect(struct hid_device *hdev, unsigned int connect_mask)
{
static const char *types[] = { "Device", "Pointer", "Mouse", "Device",
"Joystick", "Gamepad", "Keyboard", "Keypad",
"Multi-Axis Controller"
};
const char *type, *bus;
char buf[64];
unsigned int i;
int len;
int ret;
if (hdev->quirks & HID_QUIRK_HIDDEV_FORCE)
connect_mask |= (HID_CONNECT_HIDDEV_FORCE | HID_CONNECT_HIDDEV);
if (hdev->quirks & HID_QUIRK_HIDINPUT_FORCE)
connect_mask |= HID_CONNECT_HIDINPUT_FORCE;
if (hdev->bus != BUS_USB)
connect_mask &= ~HID_CONNECT_HIDDEV;
if (hid_hiddev(hdev))
connect_mask |= HID_CONNECT_HIDDEV_FORCE;
if ((connect_mask & HID_CONNECT_HIDINPUT) && !hidinput_connect(hdev,
connect_mask & HID_CONNECT_HIDINPUT_FORCE))
hdev->claimed |= HID_CLAIMED_INPUT;
if ((connect_mask & HID_CONNECT_HIDDEV) && hdev->hiddev_connect &&
!hdev->hiddev_connect(hdev,
connect_mask & HID_CONNECT_HIDDEV_FORCE))
hdev->claimed |= HID_CLAIMED_HIDDEV;
if ((connect_mask & HID_CONNECT_HIDRAW) && !hidraw_connect(hdev))
hdev->claimed |= HID_CLAIMED_HIDRAW;
if (!hdev->claimed) {
hid_err(hdev, "claimed by neither input, hiddev nor hidraw\n");
return -ENODEV;
}
if ((hdev->claimed & HID_CLAIMED_INPUT) &&
(connect_mask & HID_CONNECT_FF) && hdev->ff_init)
hdev->ff_init(hdev);
len = 0;
if (hdev->claimed & HID_CLAIMED_INPUT)
len += sprintf(buf + len, "input");
if (hdev->claimed & HID_CLAIMED_HIDDEV)
len += sprintf(buf + len, "%shiddev%d", len ? "," : "",
hdev->minor);
if (hdev->claimed & HID_CLAIMED_HIDRAW)
len += sprintf(buf + len, "%shidraw%d", len ? "," : "",
((struct hidraw *)hdev->hidraw)->minor);
type = "Device";
for (i = 0; i < hdev->maxcollection; i++) {
struct hid_collection *col = &hdev->collection[i];
if (col->type == HID_COLLECTION_APPLICATION &&
(col->usage & HID_USAGE_PAGE) == HID_UP_GENDESK &&
(col->usage & 0xffff) < ARRAY_SIZE(types)) {
type = types[col->usage & 0xffff];
break;
}
}
switch (hdev->bus) {
case BUS_USB:
bus = "USB";
break;
case BUS_BLUETOOTH:
bus = "BLUETOOTH";
break;
default:
bus = "<UNKNOWN>";
}
ret = device_create_bin_file(&hdev->dev, &dev_bin_attr_report_desc);
if (ret)
hid_warn(hdev,
"can't create sysfs report descriptor attribute err: %d\n", ret);
hid_info(hdev, "%s: %s HID v%x.%02x %s [%s] on %s\n",
buf, bus, hdev->version >> 8, hdev->version & 0xff,
type, hdev->name, hdev->phys);
return 0;
}
EXPORT_SYMBOL_GPL(hid_connect);
void hid_disconnect(struct hid_device *hdev)
{
device_remove_bin_file(&hdev->dev, &dev_bin_attr_report_desc);
if (hdev->claimed & HID_CLAIMED_INPUT)
hidinput_disconnect(hdev);
if (hdev->claimed & HID_CLAIMED_HIDDEV)
hdev->hiddev_disconnect(hdev);
if (hdev->claimed & HID_CLAIMED_HIDRAW)
hidraw_disconnect(hdev);
}
EXPORT_SYMBOL_GPL(hid_disconnect);
/* a list of devices for which there is a specialized driver on HID bus */
static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_3M, USB_DEVICE_ID_3M1968) },
{ HID_USB_DEVICE(USB_VENDOR_ID_3M, USB_DEVICE_ID_3M2256) },
{ HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_WCP32PU) },
{ HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_X5_005D) },
{ HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_RP_649) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ACRUX, 0x0802) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ACTIONSTAR, USB_DEVICE_ID_ACTIONSTAR_1011) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ATV_IRCONTROL) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IRCONTROL4) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MIGHTYMOUSE) },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGICMOUSE) },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGICTRACKPAD) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_ANSI) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_ISO) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER_ANSI) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER_ISO) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER_JIS) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER3_ANSI) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER3_ISO) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER3_JIS) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_ANSI) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_ISO) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_JIS) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_MINI_ANSI) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_MINI_ISO) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_MINI_JIS) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_ANSI) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_ISO) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_JIS) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_HF_ANSI) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_HF_ISO) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_HF_JIS) },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_ANSI) },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_ISO) },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_JIS) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING_ANSI) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING_ISO) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING_JIS) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING2_ANSI) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING2_ISO) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING2_JIS) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING3_ANSI) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING3_ISO) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING3_JIS) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4_ANSI) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4_ISO) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4_JIS) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4A_ANSI) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4A_ISO) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4A_JIS) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5_ANSI) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5_ISO) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5_JIS) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5A_ANSI) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5A_ISO) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5A_JIS) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6_ANSI) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6_ISO) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6_JIS) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6A_ANSI) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6A_ISO) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6A_JIS) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_REVB_ANSI) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_REVB_ISO) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_REVB_JIS) },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI) },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO) },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_JIS) },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ISO) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ASUS, USB_DEVICE_ID_ASUS_T91MT) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ASUS, USB_DEVICE_ID_ASUSTEK_MULTITOUCH_YFO) },
{ HID_USB_DEVICE(USB_VENDOR_ID_BELKIN, USB_DEVICE_ID_FLIP_KVM) },
{ HID_USB_DEVICE(USB_VENDOR_ID_BTC, USB_DEVICE_ID_BTC_EMPREX_REMOTE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_BTC, USB_DEVICE_ID_BTC_EMPREX_REMOTE_2) },
{ HID_USB_DEVICE(USB_VENDOR_ID_CANDO, USB_DEVICE_ID_CANDO_PIXCIR_MULTI_TOUCH) },
{ HID_USB_DEVICE(USB_VENDOR_ID_CANDO, USB_DEVICE_ID_CANDO_MULTI_TOUCH) },
{ HID_USB_DEVICE(USB_VENDOR_ID_CANDO, USB_DEVICE_ID_CANDO_MULTI_TOUCH_10_1) },
{ HID_USB_DEVICE(USB_VENDOR_ID_CANDO, USB_DEVICE_ID_CANDO_MULTI_TOUCH_11_6) },
{ HID_USB_DEVICE(USB_VENDOR_ID_CANDO, USB_DEVICE_ID_CANDO_MULTI_TOUCH_15_6) },
{ HID_USB_DEVICE(USB_VENDOR_ID_CHERRY, USB_DEVICE_ID_CHERRY_CYMOTION) },
{ HID_USB_DEVICE(USB_VENDOR_ID_CHERRY, USB_DEVICE_ID_CHERRY_CYMOTION_SOLAR) },
{ HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_TACTICAL_PAD) },
{ HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_WIRELESS) },
{ HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_WIRELESS2) },
{ HID_USB_DEVICE(USB_VENDOR_ID_CHUNGHWAT, USB_DEVICE_ID_CHUNGHWAT_MULTITOUCH) },
{ HID_USB_DEVICE(USB_VENDOR_ID_CREATIVELABS, USB_DEVICE_ID_PRODIKEYS_PCMIDI) },
{ HID_USB_DEVICE(USB_VENDOR_ID_CVTOUCH, USB_DEVICE_ID_CVTOUCH_SCREEN) },
{ HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_1) },
{ HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_2) },
{ HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_3) },
{ HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_MOUSE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_TRUETOUCH) },
{ HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, 0x0006) },
{ HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, 0x0011) },
{ HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_480D) },
{ HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_480E) },
{ HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_720C) },
{ HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_726B) },
{ HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_72A1) },
{ HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_7302) },
{ HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_A001) },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_BM084) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ELO, USB_DEVICE_ID_ELO_TS2515) },
{ HID_USB_DEVICE(USB_VENDOR_ID_EMS, USB_DEVICE_ID_EMS_TRIO_LINKER_PLUS_II) },
{ HID_USB_DEVICE(USB_VENDOR_ID_EZKEY, USB_DEVICE_ID_BTC_8193) },
{ HID_USB_DEVICE(USB_VENDOR_ID_GAMERON, USB_DEVICE_ID_GAMERON_DUAL_PSX_ADAPTOR) },
{ HID_USB_DEVICE(USB_VENDOR_ID_GAMERON, USB_DEVICE_ID_GAMERON_DUAL_PCS_ADAPTOR) },
{ HID_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH, USB_DEVICE_ID_GENERAL_TOUCH_WIN7_TWOFINGERS) },
{ HID_USB_DEVICE(USB_VENDOR_ID_GOODTOUCH, USB_DEVICE_ID_GOODTOUCH_000f) },
{ HID_USB_DEVICE(USB_VENDOR_ID_GREENASIA, 0x0003) },
{ HID_USB_DEVICE(USB_VENDOR_ID_GREENASIA, 0x0012) },
{ HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE_2) },
{ HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE_3) },
{ HID_USB_DEVICE(USB_VENDOR_ID_HANVON, USB_DEVICE_ID_HANVON_MULTITOUCH) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ILITEK, USB_DEVICE_ID_ILITEK_MULTITOUCH) },
{ HID_USB_DEVICE(USB_VENDOR_ID_IRTOUCHSYSTEMS, USB_DEVICE_ID_IRTOUCH_INFRARED_USB) },
{ HID_USB_DEVICE(USB_VENDOR_ID_KENSINGTON, USB_DEVICE_ID_KS_SLIMBLADE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_KEYTOUCH, USB_DEVICE_ID_KEYTOUCH_IEC) },
{ HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_ERGO_525V) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LABTEC, USB_DEVICE_ID_LABTEC_WIRELESS_KEYBOARD) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LCPOWER, USB_DEVICE_ID_LCPOWER_LC1000 ) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LG, USB_DEVICE_ID_LG_MULTITOUCH) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_MX3000_RECEIVER) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_S510_RECEIVER) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_S510_RECEIVER_2) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RECEIVER) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_DINOVO_DESKTOP) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_DINOVO_EDGE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_DINOVO_MINI) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_ELITE_KBD) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_CORDLESS_DESKTOP_LX500) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_EXTREME_3D) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_WHEEL) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RUMBLEPAD_CORD) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RUMBLEPAD) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RUMBLEPAD2_2) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_WINGMAN_F3D) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_WINGMAN_FFG ) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_FORCE3D_PRO) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_FLIGHT_SYSTEM_G940) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOMO_WHEEL) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOMO_WHEEL2) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_DFP_WHEEL) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_G25_WHEEL) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_G27_WHEEL) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_WII_WHEEL) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RUMBLEPAD2) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_SPACETRAVELLER) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_SPACENAVIGATOR) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LUMIO, USB_DEVICE_ID_CRYSTALTOUCH) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LUMIO, USB_DEVICE_ID_CRYSTALTOUCH_DUAL) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICOLCD) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICOLCD_BOOTLOADER) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_SIDEWINDER_GV) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_NE4K) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_LK6K) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_PRESENTER_8K_USB) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_WIRELESS_OPTICAL_DESKTOP_3_0) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MONTEREY, USB_DEVICE_ID_GENIUS_KB29E) },
{ HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN) },
{ HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_1) },
{ HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_2) },
{ HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_3) },
{ HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_4) },
{ HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_5) },
{ HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_6) },
{ HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_7) },
{ HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_8) },
{ HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_9) },
{ HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_10) },
{ HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_11) },
{ HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_12) },
{ HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_13) },
{ HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_14) },
{ HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_15) },
{ HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_16) },
{ HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_17) },
{ HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_18) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ORTEK, USB_DEVICE_ID_ORTEK_PKB1700) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ORTEK, USB_DEVICE_ID_ORTEK_WKB2000) },
{ HID_USB_DEVICE(USB_VENDOR_ID_PENMOUNT, USB_DEVICE_ID_PENMOUNT_PCI) },
{ HID_USB_DEVICE(USB_VENDOR_ID_PETALYNX, USB_DEVICE_ID_PETALYNX_MAXTER_REMOTE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH) },
{ HID_USB_DEVICE(USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_PIXART_IMAGING_INC_OPTICAL_TOUCH_SCREEN) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_KONE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_ARVO) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_KONEPLUS) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_KOVAPLUS) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_PYRA_WIRED) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_PYRA_WIRELESS) },
{ HID_USB_DEVICE(USB_VENDOR_ID_SAMSUNG, USB_DEVICE_ID_SAMSUNG_IR_REMOTE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_SAMSUNG, USB_DEVICE_ID_SAMSUNG_WIRELESS_KBD_MOUSE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_SKYCABLE, USB_DEVICE_ID_SKYCABLE_WIRELESS_PRESENTER) },
{ HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS3_CONTROLLER) },
{ HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_NAVIGATION_CONTROLLER) },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS3_CONTROLLER) },
{ HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_VAIO_VGX_MOUSE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_STANTUM, USB_DEVICE_ID_MTP) },
{ HID_USB_DEVICE(USB_VENDOR_ID_STANTUM_STM, USB_DEVICE_ID_MTP_STM) },
{ HID_USB_DEVICE(USB_VENDOR_ID_STANTUM_SITRONIX, USB_DEVICE_ID_MTP_SITRONIX) },
{ HID_USB_DEVICE(USB_VENDOR_ID_SUNPLUS, USB_DEVICE_ID_SUNPLUS_WDESKTOP) },
{ HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb300) },
{ HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb304) },
{ HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb323) },
{ HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb324) },
{ HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb651) },
{ HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb653) },
{ HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb654) },
{ HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb65a) },
{ HID_USB_DEVICE(USB_VENDOR_ID_TOPSEED, USB_DEVICE_ID_TOPSEED_CYBERLINK) },
{ HID_USB_DEVICE(USB_VENDOR_ID_TOPSEED2, USB_DEVICE_ID_TOPSEED2_RF_COMBO) },
{ HID_USB_DEVICE(USB_VENDOR_ID_TOUCH_INTL, USB_DEVICE_ID_TOUCH_INTL_MULTI_TOUCH) },
{ HID_USB_DEVICE(USB_VENDOR_ID_TWINHAN, USB_DEVICE_ID_TWINHAN_IR_REMOTE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_TURBOX, USB_DEVICE_ID_TURBOX_TOUCHSCREEN_MOSART) },
{ HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_PF1209) },
{ HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_WP4030U) },
{ HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_WP5540U) },
{ HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_WP8060U) },
{ HID_USB_DEVICE(USB_VENDOR_ID_UNITEC, USB_DEVICE_ID_UNITEC_USB_TOUCH_0709) },
{ HID_USB_DEVICE(USB_VENDOR_ID_UNITEC, USB_DEVICE_ID_UNITEC_USB_TOUCH_0A19) },
{ HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_SMARTJOY_PLUS) },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_WACOM, USB_DEVICE_ID_WACOM_GRAPHIRE_BLUETOOTH) },
{ HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_SLIM_TABLET_5_8_INCH) },
{ HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_SLIM_TABLET_12_1_INCH) },
{ HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_MEDIA_TABLET_10_6_INCH) },
{ HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_MEDIA_TABLET_14_1_INCH) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ZEROPLUS, 0x0005) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ZEROPLUS, 0x0030) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ZYDACRON, USB_DEVICE_ID_ZYDACRON_REMOTE_CONTROL) },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_PRESENTER_8K_BT) },
{ }
};
struct hid_dynid {
struct list_head list;
struct hid_device_id id;
};
/**
* store_new_id - add a new HID device ID to this driver and re-probe devices
* @driver: target device driver
* @buf: buffer for scanning device ID data
* @count: input size
*
* Adds a new dynamic hid device ID to this driver,
* and causes the driver to probe for all devices again.
*/
static ssize_t store_new_id(struct device_driver *drv, const char *buf,
size_t count)
{
struct hid_driver *hdrv = container_of(drv, struct hid_driver, driver);
struct hid_dynid *dynid;
__u32 bus, vendor, product;
unsigned long driver_data = 0;
int ret;
ret = sscanf(buf, "%x %x %x %lx",
&bus, &vendor, &product, &driver_data);
if (ret < 3)
return -EINVAL;
dynid = kzalloc(sizeof(*dynid), GFP_KERNEL);
if (!dynid)
return -ENOMEM;
dynid->id.bus = bus;
dynid->id.vendor = vendor;
dynid->id.product = product;
dynid->id.driver_data = driver_data;
spin_lock(&hdrv->dyn_lock);
list_add_tail(&dynid->list, &hdrv->dyn_list);
spin_unlock(&hdrv->dyn_lock);
ret = 0;
if (get_driver(&hdrv->driver)) {
ret = driver_attach(&hdrv->driver);
put_driver(&hdrv->driver);
}
return ret ? : count;
}
static DRIVER_ATTR(new_id, S_IWUSR, NULL, store_new_id);
static void hid_free_dynids(struct hid_driver *hdrv)
{
struct hid_dynid *dynid, *n;
spin_lock(&hdrv->dyn_lock);
list_for_each_entry_safe(dynid, n, &hdrv->dyn_list, list) {
list_del(&dynid->list);
kfree(dynid);
}
spin_unlock(&hdrv->dyn_lock);
}
static const struct hid_device_id *hid_match_device(struct hid_device *hdev,
struct hid_driver *hdrv)
{
struct hid_dynid *dynid;
spin_lock(&hdrv->dyn_lock);
list_for_each_entry(dynid, &hdrv->dyn_list, list) {
if (hid_match_one_id(hdev, &dynid->id)) {
spin_unlock(&hdrv->dyn_lock);
return &dynid->id;
}
}
spin_unlock(&hdrv->dyn_lock);
return hid_match_id(hdev, hdrv->id_table);
}
static int hid_bus_match(struct device *dev, struct device_driver *drv)
{
struct hid_driver *hdrv = container_of(drv, struct hid_driver, driver);
struct hid_device *hdev = container_of(dev, struct hid_device, dev);
if (!hid_match_device(hdev, hdrv))
return 0;
/* generic wants all that don't have specialized driver */
if (!strncmp(hdrv->name, "generic-", 8))
return !hid_match_id(hdev, hid_have_special_driver);
return 1;
}
static int hid_device_probe(struct device *dev)
{
struct hid_driver *hdrv = container_of(dev->driver,
struct hid_driver, driver);
struct hid_device *hdev = container_of(dev, struct hid_device, dev);
const struct hid_device_id *id;
int ret = 0;
if (!hdev->driver) {
id = hid_match_device(hdev, hdrv);
if (id == NULL)
return -ENODEV;
hdev->driver = hdrv;
if (hdrv->probe) {
ret = hdrv->probe(hdev, id);
} else { /* default probe */
ret = hid_parse(hdev);
if (!ret)
ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
}
if (ret)
hdev->driver = NULL;
}
return ret;
}
static int hid_device_remove(struct device *dev)
{
struct hid_device *hdev = container_of(dev, struct hid_device, dev);
struct hid_driver *hdrv = hdev->driver;
if (hdrv) {
if (hdrv->remove)
hdrv->remove(hdev);
else /* default remove */
hid_hw_stop(hdev);
hdev->driver = NULL;
}
return 0;
}
static int hid_uevent(struct device *dev, struct kobj_uevent_env *env)
{
struct hid_device *hdev = container_of(dev, struct hid_device, dev);
if (add_uevent_var(env, "HID_ID=%04X:%08X:%08X",
hdev->bus, hdev->vendor, hdev->product))
return -ENOMEM;
if (add_uevent_var(env, "HID_NAME=%s", hdev->name))
return -ENOMEM;
if (add_uevent_var(env, "HID_PHYS=%s", hdev->phys))
return -ENOMEM;
if (add_uevent_var(env, "HID_UNIQ=%s", hdev->uniq))
return -ENOMEM;
if (add_uevent_var(env, "MODALIAS=hid:b%04Xv%08Xp%08X",
hdev->bus, hdev->vendor, hdev->product))
return -ENOMEM;
return 0;
}
static struct bus_type hid_bus_type = {
.name = "hid",
.match = hid_bus_match,
.probe = hid_device_probe,
.remove = hid_device_remove,
.uevent = hid_uevent,
};
/* a list of devices that shouldn't be handled by HID core at all */
static const struct hid_device_id hid_ignore_list[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_ACECAD, USB_DEVICE_ID_ACECAD_FLAIR) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ACECAD, USB_DEVICE_ID_ACECAD_302) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ADS_TECH, USB_DEVICE_ID_ADS_TECH_RADIO_SI470X) },
{ HID_USB_DEVICE(USB_VENDOR_ID_AIPTEK, USB_DEVICE_ID_AIPTEK_01) },
{ HID_USB_DEVICE(USB_VENDOR_ID_AIPTEK, USB_DEVICE_ID_AIPTEK_10) },
{ HID_USB_DEVICE(USB_VENDOR_ID_AIPTEK, USB_DEVICE_ID_AIPTEK_20) },
{ HID_USB_DEVICE(USB_VENDOR_ID_AIPTEK, USB_DEVICE_ID_AIPTEK_21) },
{ HID_USB_DEVICE(USB_VENDOR_ID_AIPTEK, USB_DEVICE_ID_AIPTEK_22) },
{ HID_USB_DEVICE(USB_VENDOR_ID_AIPTEK, USB_DEVICE_ID_AIPTEK_23) },
{ HID_USB_DEVICE(USB_VENDOR_ID_AIPTEK, USB_DEVICE_ID_AIPTEK_24) },
{ HID_USB_DEVICE(USB_VENDOR_ID_AIRCABLE, USB_DEVICE_ID_AIRCABLE1) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ALCOR, USB_DEVICE_ID_ALCOR_USBRS232) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_LCM)},
{ HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_LCM2)},
{ HID_USB_DEVICE(USB_VENDOR_ID_AVERMEDIA, USB_DEVICE_ID_AVER_FM_MR800) },
{ HID_USB_DEVICE(USB_VENDOR_ID_BERKSHIRE, USB_DEVICE_ID_BERKSHIRE_PCWD) },
{ HID_USB_DEVICE(USB_VENDOR_ID_CIDC, 0x0103) },
{ HID_USB_DEVICE(USB_VENDOR_ID_CYGNAL, USB_DEVICE_ID_CYGNAL_RADIO_SI470X) },
{ HID_USB_DEVICE(USB_VENDOR_ID_CMEDIA, USB_DEVICE_ID_CM109) },
{ HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_HIDCOM) },
{ HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_ULTRAMOUSE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_DEALEXTREAME, USB_DEVICE_ID_DEALEXTREAME_RADIO_SI4701) },
{ HID_USB_DEVICE(USB_VENDOR_ID_DELORME, USB_DEVICE_ID_DELORME_EARTHMATE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_DELORME, USB_DEVICE_ID_DELORME_EM_LT20) },
{ HID_USB_DEVICE(USB_VENDOR_ID_DREAM_CHEEKY, 0x0004) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ESSENTIAL_REALITY, USB_DEVICE_ID_ESSENTIAL_REALITY_P5) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ETT, USB_DEVICE_ID_TC5UH) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ETT, USB_DEVICE_ID_TC4UM) },
{ HID_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH, 0x0001) },
{ HID_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH, 0x0002) },
{ HID_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH, 0x0004) },
{ HID_USB_DEVICE(USB_VENDOR_ID_GLAB, USB_DEVICE_ID_4_PHIDGETSERVO_30) },
{ HID_USB_DEVICE(USB_VENDOR_ID_GLAB, USB_DEVICE_ID_1_PHIDGETSERVO_30) },
{ HID_USB_DEVICE(USB_VENDOR_ID_GLAB, USB_DEVICE_ID_0_0_4_IF_KIT) },
{ HID_USB_DEVICE(USB_VENDOR_ID_GLAB, USB_DEVICE_ID_0_16_16_IF_KIT) },
{ HID_USB_DEVICE(USB_VENDOR_ID_GLAB, USB_DEVICE_ID_8_8_8_IF_KIT) },
{ HID_USB_DEVICE(USB_VENDOR_ID_GLAB, USB_DEVICE_ID_0_8_7_IF_KIT) },
{ HID_USB_DEVICE(USB_VENDOR_ID_GLAB, USB_DEVICE_ID_0_8_8_IF_KIT) },
{ HID_USB_DEVICE(USB_VENDOR_ID_GLAB, USB_DEVICE_ID_PHIDGET_MOTORCONTROL) },
{ HID_USB_DEVICE(USB_VENDOR_ID_GOTOP, USB_DEVICE_ID_SUPER_Q2) },
{ HID_USB_DEVICE(USB_VENDOR_ID_GOTOP, USB_DEVICE_ID_GOGOPEN) },
{ HID_USB_DEVICE(USB_VENDOR_ID_GOTOP, USB_DEVICE_ID_PENPOWER) },
{ HID_USB_DEVICE(USB_VENDOR_ID_GRETAGMACBETH, USB_DEVICE_ID_GRETAGMACBETH_HUEY) },
{ HID_USB_DEVICE(USB_VENDOR_ID_GRIFFIN, USB_DEVICE_ID_POWERMATE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_GRIFFIN, USB_DEVICE_ID_SOUNDKNOB) },
{ HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_90) },
{ HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_100) },
{ HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_101) },
{ HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_103) },
{ HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_104) },
{ HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_105) },
{ HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_106) },
{ HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_107) },
{ HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_108) },
{ HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_200) },
{ HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_201) },
{ HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_202) },
{ HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_203) },
{ HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_204) },
{ HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_205) },
{ HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_206) },
{ HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_207) },
{ HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_300) },
{ HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_301) },
{ HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_302) },
{ HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_303) },
{ HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_304) },
{ HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_305) },
{ HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_306) },
{ HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_307) },
{ HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_308) },
{ HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_309) },
{ HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_400) },
{ HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_401) },
{ HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_402) },
{ HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_403) },
{ HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_404) },
{ HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_405) },
{ HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_500) },
{ HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_501) },
{ HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_502) },
{ HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_503) },
{ HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_504) },
{ HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_1000) },
{ HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_1001) },
{ HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_1002) },
{ HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_1003) },
{ HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_1004) },
{ HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_1005) },
{ HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_1006) },
{ HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_1007) },
{ HID_USB_DEVICE(USB_VENDOR_ID_IMATION, USB_DEVICE_ID_DISC_STAKKA) },
{ HID_USB_DEVICE(USB_VENDOR_ID_JESS, USB_DEVICE_ID_JESS_YUREX) },
{ HID_USB_DEVICE(USB_VENDOR_ID_KBGEAR, USB_DEVICE_ID_KBGEAR_JAMSTUDIO) },
{ HID_USB_DEVICE(USB_VENDOR_ID_KWORLD, USB_DEVICE_ID_KWORLD_RADIO_FM700) },
{ HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_GPEN_560) },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_KYE, 0x0058) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_CASSY) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_CASSY2) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_POCKETCASSY) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_POCKETCASSY2) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MOBILECASSY) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MOBILECASSY2) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MICROCASSYVOLTAGE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MICROCASSYCURRENT) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MICROCASSYTIME) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MICROCASSYTEMPERATURE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MICROCASSYPH) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_JWM) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_DMMP) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_UMIP) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_UMIC) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_UMIB) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_XRAY) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_XRAY2) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_VIDEOCOM) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MOTOR) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_COM3LAB) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_TELEPORT) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_NETWORKANALYSER) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_POWERCONTROL) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MACHINETEST) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MOSTANALYSER) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MOSTANALYSER2) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_ABSESP) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_AUTODATABUS) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MCT) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_HYBRID) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_HEATCONTROL) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MCC, USB_DEVICE_ID_MCC_PMD1024LS) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MCC, USB_DEVICE_ID_MCC_PMD1208LS) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICKIT1) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICKIT2) },
{ HID_USB_DEVICE(USB_VENDOR_ID_NATIONAL_SEMICONDUCTOR, USB_DEVICE_ID_N_S_HARMONY) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ONTRAK, USB_DEVICE_ID_ONTRAK_ADU100) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ONTRAK, USB_DEVICE_ID_ONTRAK_ADU100 + 20) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ONTRAK, USB_DEVICE_ID_ONTRAK_ADU100 + 30) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ONTRAK, USB_DEVICE_ID_ONTRAK_ADU100 + 100) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ONTRAK, USB_DEVICE_ID_ONTRAK_ADU100 + 108) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ONTRAK, USB_DEVICE_ID_ONTRAK_ADU100 + 118) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ONTRAK, USB_DEVICE_ID_ONTRAK_ADU100 + 200) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ONTRAK, USB_DEVICE_ID_ONTRAK_ADU100 + 300) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ONTRAK, USB_DEVICE_ID_ONTRAK_ADU100 + 400) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ONTRAK, USB_DEVICE_ID_ONTRAK_ADU100 + 500) },
{ HID_USB_DEVICE(USB_VENDOR_ID_PANJIT, 0x0001) },
{ HID_USB_DEVICE(USB_VENDOR_ID_PANJIT, 0x0002) },
{ HID_USB_DEVICE(USB_VENDOR_ID_PANJIT, 0x0003) },
{ HID_USB_DEVICE(USB_VENDOR_ID_PANJIT, 0x0004) },
{ HID_USB_DEVICE(USB_VENDOR_ID_PHILIPS, USB_DEVICE_ID_PHILIPS_IEEE802154_DONGLE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_POWERCOM, USB_DEVICE_ID_POWERCOM_UPS) },
{ HID_USB_DEVICE(USB_VENDOR_ID_VERNIER, USB_DEVICE_ID_VERNIER_LABPRO) },
{ HID_USB_DEVICE(USB_VENDOR_ID_VERNIER, USB_DEVICE_ID_VERNIER_GOTEMP) },
{ HID_USB_DEVICE(USB_VENDOR_ID_VERNIER, USB_DEVICE_ID_VERNIER_SKIP) },
{ HID_USB_DEVICE(USB_VENDOR_ID_VERNIER, USB_DEVICE_ID_VERNIER_CYCLOPS) },
{ HID_USB_DEVICE(USB_VENDOR_ID_VERNIER, USB_DEVICE_ID_VERNIER_LCSPEC) },
{ HID_USB_DEVICE(USB_VENDOR_ID_WACOM, HID_ANY_ID) },
{ HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_4_PHIDGETSERVO_20) },
{ HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_1_PHIDGETSERVO_20) },
{ HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_8_8_4_IF_KIT) },
{ HID_USB_DEVICE(USB_VENDOR_ID_YEALINK, USB_DEVICE_ID_YEALINK_P1K_P4K_B2K) },
{ }
};
/**
* hid_mouse_ignore_list - mouse devices which should not be handled by the hid layer
*
* There are composite devices for which we want to ignore only a certain
* interface. This is a list of devices for which only the mouse interface will
* be ignored. This allows a dedicated driver to take care of the interface.
*/
static const struct hid_device_id hid_mouse_ignore_list[] = {
/* appletouch driver */
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_ANSI) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_ISO) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER_ANSI) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER_ISO) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER_JIS) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER3_ANSI) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER3_ISO) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER3_JIS) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_ANSI) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_ISO) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_JIS) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_HF_ANSI) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_HF_ISO) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_HF_JIS) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING_ANSI) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING_ISO) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING_JIS) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING2_ANSI) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING2_ISO) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING2_JIS) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING3_ANSI) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING3_ISO) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING3_JIS) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4_ANSI) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4_ISO) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4_JIS) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4A_ANSI) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4A_ISO) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4A_JIS) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5_ANSI) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5_ISO) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5_JIS) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5A_ANSI) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5A_ISO) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5A_JIS) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY) },
{ }
};
static bool hid_ignore(struct hid_device *hdev)
{
switch (hdev->vendor) {
case USB_VENDOR_ID_CODEMERCS:
/* ignore all Code Mercenaries IOWarrior devices */
if (hdev->product >= USB_DEVICE_ID_CODEMERCS_IOW_FIRST &&
hdev->product <= USB_DEVICE_ID_CODEMERCS_IOW_LAST)
return true;
break;
case USB_VENDOR_ID_LOGITECH:
if (hdev->product >= USB_DEVICE_ID_LOGITECH_HARMONY_FIRST &&
hdev->product <= USB_DEVICE_ID_LOGITECH_HARMONY_LAST)
return true;
break;
case USB_VENDOR_ID_SOUNDGRAPH:
if (hdev->product >= USB_DEVICE_ID_SOUNDGRAPH_IMON_FIRST &&
hdev->product <= USB_DEVICE_ID_SOUNDGRAPH_IMON_LAST)
return true;
break;
case USB_VENDOR_ID_HANWANG:
if (hdev->product >= USB_DEVICE_ID_HANWANG_TABLET_FIRST &&
hdev->product <= USB_DEVICE_ID_HANWANG_TABLET_LAST)
return true;
break;
}
if (hdev->type == HID_TYPE_USBMOUSE &&
hid_match_id(hdev, hid_mouse_ignore_list))
return true;
return !!hid_match_id(hdev, hid_ignore_list);
}
int hid_add_device(struct hid_device *hdev)
{
static atomic_t id = ATOMIC_INIT(0);
int ret;
if (WARN_ON(hdev->status & HID_STAT_ADDED))
return -EBUSY;
/* we need to kill them here, otherwise they will stay allocated to
* wait for coming driver */
if (!(hdev->quirks & HID_QUIRK_NO_IGNORE)
&& (hid_ignore(hdev) || (hdev->quirks & HID_QUIRK_IGNORE)))
return -ENODEV;
/* XXX hack, any other cleaner solution after the driver core
* is converted to allow more than 20 bytes as the device name? */
dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus,
hdev->vendor, hdev->product, atomic_inc_return(&id));
hid_debug_register(hdev, dev_name(&hdev->dev));
ret = device_add(&hdev->dev);
if (!ret)
hdev->status |= HID_STAT_ADDED;
else
hid_debug_unregister(hdev);
return ret;
}
EXPORT_SYMBOL_GPL(hid_add_device);
/**
* hid_allocate_device - allocate new hid device descriptor
*
* Allocate and initialize hid device, so that hid_destroy_device might be
* used to free it.
*
* New hid_device pointer is returned on success, otherwise ERR_PTR encoded
* error value.
*/
struct hid_device *hid_allocate_device(void)
{
struct hid_device *hdev;
unsigned int i;
int ret = -ENOMEM;
hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
if (hdev == NULL)
return ERR_PTR(ret);
device_initialize(&hdev->dev);
hdev->dev.release = hid_device_release;
hdev->dev.bus = &hid_bus_type;
hdev->collection = kcalloc(HID_DEFAULT_NUM_COLLECTIONS,
sizeof(struct hid_collection), GFP_KERNEL);
if (hdev->collection == NULL)
goto err;
hdev->collection_size = HID_DEFAULT_NUM_COLLECTIONS;
for (i = 0; i < HID_REPORT_TYPES; i++)
INIT_LIST_HEAD(&hdev->report_enum[i].report_list);
init_waitqueue_head(&hdev->debug_wait);
INIT_LIST_HEAD(&hdev->debug_list);
return hdev;
err:
put_device(&hdev->dev);
return ERR_PTR(ret);
}
EXPORT_SYMBOL_GPL(hid_allocate_device);
static void hid_remove_device(struct hid_device *hdev)
{
if (hdev->status & HID_STAT_ADDED) {
device_del(&hdev->dev);
hid_debug_unregister(hdev);
hdev->status &= ~HID_STAT_ADDED;
}
}
/**
* hid_destroy_device - free previously allocated device
*
* @hdev: hid device
*
* If you allocate hid_device through hid_allocate_device, you should ever
* free by this function.
*/
void hid_destroy_device(struct hid_device *hdev)
{
hid_remove_device(hdev);
put_device(&hdev->dev);
}
EXPORT_SYMBOL_GPL(hid_destroy_device);
int __hid_register_driver(struct hid_driver *hdrv, struct module *owner,
const char *mod_name)
{
int ret;
hdrv->driver.name = hdrv->name;
hdrv->driver.bus = &hid_bus_type;
hdrv->driver.owner = owner;
hdrv->driver.mod_name = mod_name;
INIT_LIST_HEAD(&hdrv->dyn_list);
spin_lock_init(&hdrv->dyn_lock);
ret = driver_register(&hdrv->driver);
if (ret)
return ret;
ret = driver_create_file(&hdrv->driver, &driver_attr_new_id);
if (ret)
driver_unregister(&hdrv->driver);
return ret;
}
EXPORT_SYMBOL_GPL(__hid_register_driver);
void hid_unregister_driver(struct hid_driver *hdrv)
{
driver_remove_file(&hdrv->driver, &driver_attr_new_id);
driver_unregister(&hdrv->driver);
hid_free_dynids(hdrv);
}
EXPORT_SYMBOL_GPL(hid_unregister_driver);
int hid_check_keys_pressed(struct hid_device *hid)
{
struct hid_input *hidinput;
int i;
if (!(hid->claimed & HID_CLAIMED_INPUT))
return 0;
list_for_each_entry(hidinput, &hid->inputs, list) {
for (i = 0; i < BITS_TO_LONGS(KEY_MAX); i++)
if (hidinput->input->key[i])
return 1;
}
return 0;
}
EXPORT_SYMBOL_GPL(hid_check_keys_pressed);
static int __init hid_init(void)
{
int ret;
if (hid_debug)
pr_warn("hid_debug is now used solely for parser and driver debugging.\n"
"debugfs is now used for inspecting the device (report descriptor, reports)\n");
ret = bus_register(&hid_bus_type);
if (ret) {
pr_err("can't register hid bus\n");
goto err;
}
ret = hidraw_init();
if (ret)
goto err_bus;
hid_debug_init();
return 0;
err_bus:
bus_unregister(&hid_bus_type);
err:
return ret;
}
static void __exit hid_exit(void)
{
hid_debug_exit();
hidraw_exit();
bus_unregister(&hid_bus_type);
}
module_init(hid_init);
module_exit(hid_exit);
MODULE_AUTHOR("Andreas Gal");
MODULE_AUTHOR("Vojtech Pavlik");
MODULE_AUTHOR("Jiri Kosina");
MODULE_LICENSE(DRIVER_LICENSE);
| gpl-2.0 |
DarkenedSky94/android_kernel_samsung_smdk4412 | arch/arm/plat-s5p/dev-fimc-s5p.c | 412 | 5099 | /* linux/arch/arm/plat-s5p/dev-fimc-s5p.c
*
* Copyright (c) 2011 Samsung Electronics Co., Ltd.
* http://www.samsung.com/
*
* Device definition for FIMC device
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/platform_device.h>
#include <mach/map.h>
#include <asm/irq.h>
#include <plat/devs.h>
#include <plat/cpu.h>
#include <plat/fimc.h>
#ifdef CONFIG_USE_FIMC_CMA
#include <linux/dma-mapping.h>
static u64 s3c_fimc_dmamask = DMA_BIT_MASK(32);
#endif
static struct resource s3c_fimc0_resource[] = {
[0] = {
.start = S5P_PA_FIMC0,
.end = S5P_PA_FIMC0 + SZ_4K - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = IRQ_FIMC0,
.end = IRQ_FIMC0,
.flags = IORESOURCE_IRQ,
},
};
struct platform_device s3c_device_fimc0 = {
.name = "s3c-fimc",
.id = 0,
.num_resources = ARRAY_SIZE(s3c_fimc0_resource),
.resource = s3c_fimc0_resource,
};
static struct s3c_platform_fimc default_fimc0_data __initdata = {
.default_cam = CAMERA_PAR_A,
.hw_ver = 0x51,
};
void __init s3c_fimc0_set_platdata(struct s3c_platform_fimc *pd)
{
struct s3c_platform_fimc *npd;
if (!pd)
pd = &default_fimc0_data;
npd = kmemdup(pd, sizeof(struct s3c_platform_fimc), GFP_KERNEL);
if (!npd)
printk(KERN_ERR "%s: no memory for platform data\n", __func__);
else {
if (!npd->cfg_gpio)
npd->cfg_gpio = s3c_fimc0_cfg_gpio;
if (!npd->clk_on)
npd->clk_on = s3c_fimc_clk_on;
if (!npd->clk_off)
npd->clk_off = s3c_fimc_clk_off;
npd->hw_ver = 0x51;
npd->use_cam = true;
s3c_device_fimc0.dev.platform_data = npd;
}
}
static struct resource s3c_fimc1_resource[] = {
[0] = {
.start = S5P_PA_FIMC1,
.end = S5P_PA_FIMC1 + SZ_4K - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = IRQ_FIMC1,
.end = IRQ_FIMC1,
.flags = IORESOURCE_IRQ,
},
};
struct platform_device s3c_device_fimc1 = {
.name = "s3c-fimc",
.id = 1,
#ifdef CONFIG_USE_FIMC_CMA
.dev = {
.dma_mask = &s3c_fimc_dmamask,
.coherent_dma_mask = DMA_BIT_MASK(32),
},
#endif
.num_resources = ARRAY_SIZE(s3c_fimc1_resource),
.resource = s3c_fimc1_resource,
};
static struct s3c_platform_fimc default_fimc1_data __initdata = {
.default_cam = CAMERA_PAR_A,
.hw_ver = 0x51,
};
void __init s3c_fimc1_set_platdata(struct s3c_platform_fimc *pd)
{
struct s3c_platform_fimc *npd;
if (!pd)
pd = &default_fimc1_data;
npd = kmemdup(pd, sizeof(struct s3c_platform_fimc), GFP_KERNEL);
if (!npd)
printk(KERN_ERR "%s: no memory for platform data\n", __func__);
else {
if (!npd->cfg_gpio)
npd->cfg_gpio = s3c_fimc1_cfg_gpio;
if (!npd->clk_on)
npd->clk_on = s3c_fimc_clk_on;
if (!npd->clk_off)
npd->clk_off = s3c_fimc_clk_off;
npd->hw_ver = 0x51;
npd->use_cam = false;
s3c_device_fimc1.dev.platform_data = npd;
}
}
static struct resource s3c_fimc2_resource[] = {
[0] = {
.start = S5P_PA_FIMC2,
.end = S5P_PA_FIMC2 + SZ_4K - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = IRQ_FIMC2,
.end = IRQ_FIMC2,
.flags = IORESOURCE_IRQ,
},
};
struct platform_device s3c_device_fimc2 = {
.name = "s3c-fimc",
.id = 2,
.num_resources = ARRAY_SIZE(s3c_fimc2_resource),
.resource = s3c_fimc2_resource,
};
static struct s3c_platform_fimc default_fimc2_data __initdata = {
.default_cam = CAMERA_PAR_A,
.hw_ver = 0x51,
};
void __init s3c_fimc2_set_platdata(struct s3c_platform_fimc *pd)
{
struct s3c_platform_fimc *npd;
if (!pd)
pd = &default_fimc2_data;
npd = kmemdup(pd, sizeof(struct s3c_platform_fimc), GFP_KERNEL);
if (!npd)
printk(KERN_ERR "%s: no memory for platform data\n", __func__);
else {
if (!npd->cfg_gpio)
npd->cfg_gpio = s3c_fimc2_cfg_gpio;
if (!npd->clk_on)
npd->clk_on = s3c_fimc_clk_on;
if (!npd->clk_off)
npd->clk_off = s3c_fimc_clk_off;
npd->hw_ver = 0x51;
npd->use_cam = false;
s3c_device_fimc2.dev.platform_data = npd;
}
}
static struct resource s3c_fimc3_resource[] = {
[0] = {
.start = S5P_PA_FIMC3,
.end = S5P_PA_FIMC3 + SZ_4K - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = IRQ_FIMC3,
.end = IRQ_FIMC3,
.flags = IORESOURCE_IRQ,
},
};
struct platform_device s3c_device_fimc3 = {
.name = "s3c-fimc",
.id = 3,
.num_resources = ARRAY_SIZE(s3c_fimc3_resource),
.resource = s3c_fimc3_resource,
};
static struct s3c_platform_fimc default_fimc3_data __initdata = {
.default_cam = CAMERA_PAR_A,
.hw_ver = 0x51,
};
void __init s3c_fimc3_set_platdata(struct s3c_platform_fimc *pd)
{
struct s3c_platform_fimc *npd;
if (!pd)
pd = &default_fimc3_data;
npd = kmemdup(pd, sizeof(struct s3c_platform_fimc), GFP_KERNEL);
if (!npd)
printk(KERN_ERR "%s: no memory for platform data\n", __func__);
else {
if (!npd->cfg_gpio)
npd->cfg_gpio = s3c_fimc3_cfg_gpio;
if (!npd->clk_on)
npd->clk_on = s3c_fimc_clk_on;
if (!npd->clk_off)
npd->clk_off = s3c_fimc_clk_off;
npd->hw_ver = 0x51;
npd->use_cam = false;
s3c_device_fimc3.dev.platform_data = npd;
}
}
| gpl-2.0 |
ZoliN/ChuwiHBKernel | drivers/staging/lustre/lustre/llite/symlink.c | 412 | 5480 | /*
* GPL HEADER START
*
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 only,
* as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License version 2 for more details (a copy is included
* in the LICENSE file that accompanied this code).
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
* http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
*
* Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
* CA 95054 USA or visit www.sun.com if you need additional information or
* have any questions.
*
* GPL HEADER END
*/
/*
* Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
*
* Copyright (c) 2011, 2012, Intel Corporation.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
* Lustre is a trademark of Sun Microsystems, Inc.
*/
#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/stat.h>
#define DEBUG_SUBSYSTEM S_LLITE
#include <lustre_lite.h>
#include "llite_internal.h"
static int ll_readlink_internal(struct inode *inode,
struct ptlrpc_request **request, char **symname)
{
struct ll_inode_info *lli = ll_i2info(inode);
struct ll_sb_info *sbi = ll_i2sbi(inode);
int rc, symlen = i_size_read(inode) + 1;
struct mdt_body *body;
struct md_op_data *op_data;
*request = NULL;
if (lli->lli_symlink_name) {
int print_limit = min_t(int, PAGE_SIZE - 128, symlen);
*symname = lli->lli_symlink_name;
/* If the total CDEBUG() size is larger than a page, it
* will print a warning to the console, avoid this by
* printing just the last part of the symlink. */
CDEBUG(D_INODE, "using cached symlink %s%.*s, len = %d\n",
print_limit < symlen ? "..." : "", print_limit,
(*symname) + symlen - print_limit, symlen);
return 0;
}
op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL, 0, symlen,
LUSTRE_OPC_ANY, NULL);
if (IS_ERR(op_data))
return PTR_ERR(op_data);
op_data->op_valid = OBD_MD_LINKNAME;
rc = md_getattr(sbi->ll_md_exp, op_data, request);
ll_finish_md_op_data(op_data);
if (rc) {
if (rc != -ENOENT)
CERROR("inode %lu: rc = %d\n", inode->i_ino, rc);
GOTO (failed, rc);
}
body = req_capsule_server_get(&(*request)->rq_pill, &RMF_MDT_BODY);
LASSERT(body != NULL);
if ((body->valid & OBD_MD_LINKNAME) == 0) {
CERROR("OBD_MD_LINKNAME not set on reply\n");
GOTO(failed, rc = -EPROTO);
}
LASSERT(symlen != 0);
if (body->eadatasize != symlen) {
CERROR("inode %lu: symlink length %d not expected %d\n",
inode->i_ino, body->eadatasize - 1, symlen - 1);
GOTO(failed, rc = -EPROTO);
}
*symname = req_capsule_server_get(&(*request)->rq_pill, &RMF_MDT_MD);
if (*symname == NULL ||
strnlen(*symname, symlen) != symlen - 1) {
/* not full/NULL terminated */
CERROR("inode %lu: symlink not NULL terminated string"
"of length %d\n", inode->i_ino, symlen - 1);
GOTO(failed, rc = -EPROTO);
}
OBD_ALLOC(lli->lli_symlink_name, symlen);
/* do not return an error if we cannot cache the symlink locally */
if (lli->lli_symlink_name) {
memcpy(lli->lli_symlink_name, *symname, symlen);
*symname = lli->lli_symlink_name;
}
return 0;
failed:
return rc;
}
static int ll_readlink(struct dentry *dentry, char *buffer, int buflen)
{
struct inode *inode = dentry->d_inode;
struct ptlrpc_request *request;
char *symname;
int rc;
CDEBUG(D_VFSTRACE, "VFS Op\n");
ll_inode_size_lock(inode);
rc = ll_readlink_internal(inode, &request, &symname);
if (rc)
GOTO(out, rc);
rc = vfs_readlink(dentry, buffer, buflen, symname);
out:
ptlrpc_req_finished(request);
ll_inode_size_unlock(inode);
return rc;
}
static void *ll_follow_link(struct dentry *dentry, struct nameidata *nd)
{
struct inode *inode = dentry->d_inode;
struct ptlrpc_request *request = NULL;
int rc;
char *symname;
CDEBUG(D_VFSTRACE, "VFS Op\n");
/* Limit the recursive symlink depth to 5 instead of default
* 8 links when kernel has 4k stack to prevent stack overflow.
* For 8k stacks we need to limit it to 7 for local servers. */
if (THREAD_SIZE < 8192 && current->link_count >= 6) {
rc = -ELOOP;
} else if (THREAD_SIZE == 8192 && current->link_count >= 8) {
rc = -ELOOP;
} else {
ll_inode_size_lock(inode);
rc = ll_readlink_internal(inode, &request, &symname);
ll_inode_size_unlock(inode);
}
if (rc) {
ptlrpc_req_finished(request);
request = NULL;
symname = ERR_PTR(rc);
}
nd_set_link(nd, symname);
/* symname may contain a pointer to the request message buffer,
* we delay request releasing until ll_put_link then.
*/
return request;
}
static void ll_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
{
ptlrpc_req_finished(cookie);
}
struct inode_operations ll_fast_symlink_inode_operations = {
.readlink = ll_readlink,
.setattr = ll_setattr,
.follow_link = ll_follow_link,
.put_link = ll_put_link,
.getattr = ll_getattr,
.permission = ll_inode_permission,
.setxattr = ll_setxattr,
.getxattr = ll_getxattr,
.listxattr = ll_listxattr,
.removexattr = ll_removexattr,
};
| gpl-2.0 |
taudac/linux | arch/arm/mach-rockchip/rockchip.c | 412 | 1184 | /*
* Device Tree support for Rockchip SoCs
*
* Copyright (c) 2013 MundoReader S.L.
* Author: Heiko Stuebner <heiko@sntech.de>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/of_platform.h>
#include <linux/irqchip.h>
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
#include <asm/hardware/cache-l2x0.h>
#include "core.h"
static const char * const rockchip_board_dt_compat[] = {
"rockchip,rk2928",
"rockchip,rk3066a",
"rockchip,rk3066b",
"rockchip,rk3188",
"rockchip,rk3288",
NULL,
};
DT_MACHINE_START(ROCKCHIP_DT, "Rockchip Cortex-A9 (Device Tree)")
.l2c_aux_val = 0,
.l2c_aux_mask = ~0,
.dt_compat = rockchip_board_dt_compat,
MACHINE_END
| gpl-2.0 |
CyanogenMod/android_kernel_huawei_angler | arch/parisc/mm/fault.c | 924 | 7603 | /*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
*
* Copyright (C) 1995, 1996, 1997, 1998 by Ralf Baechle
* Copyright 1999 SuSE GmbH (Philipp Rumpf, prumpf@tux.org)
* Copyright 1999 Hewlett Packard Co.
*
*/
#include <linux/mm.h>
#include <linux/ptrace.h>
#include <linux/sched.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <asm/uaccess.h>
#include <asm/traps.h>
#define PRINT_USER_FAULTS /* (turn this on if you want user faults to be */
/* dumped to the console via printk) */
/* Various important other fields */
#define bit22set(x) (x & 0x00000200)
#define bits23_25set(x) (x & 0x000001c0)
#define isGraphicsFlushRead(x) ((x & 0xfc003fdf) == 0x04001a80)
/* extended opcode is 0x6a */
#define BITSSET 0x1c0 /* for identifying LDCW */
DEFINE_PER_CPU(struct exception_data, exception_data);
/*
* parisc_acctyp(unsigned int inst) --
* Given a PA-RISC memory access instruction, determine if the
* the instruction would perform a memory read or memory write
* operation.
*
* This function assumes that the given instruction is a memory access
* instruction (i.e. you should really only call it if you know that
* the instruction has generated some sort of a memory access fault).
*
* Returns:
* VM_READ if read operation
* VM_WRITE if write operation
* VM_EXEC if execute operation
*/
static unsigned long
parisc_acctyp(unsigned long code, unsigned int inst)
{
if (code == 6 || code == 16)
return VM_EXEC;
switch (inst & 0xf0000000) {
case 0x40000000: /* load */
case 0x50000000: /* new load */
return VM_READ;
case 0x60000000: /* store */
case 0x70000000: /* new store */
return VM_WRITE;
case 0x20000000: /* coproc */
case 0x30000000: /* coproc2 */
if (bit22set(inst))
return VM_WRITE;
case 0x0: /* indexed/memory management */
if (bit22set(inst)) {
/*
* Check for the 'Graphics Flush Read' instruction.
* It resembles an FDC instruction, except for bits
* 20 and 21. Any combination other than zero will
* utilize the block mover functionality on some
* older PA-RISC platforms. The case where a block
* move is performed from VM to graphics IO space
* should be treated as a READ.
*
* The significance of bits 20,21 in the FDC
* instruction is:
*
* 00 Flush data cache (normal instruction behavior)
* 01 Graphics flush write (IO space -> VM)
* 10 Graphics flush read (VM -> IO space)
* 11 Graphics flush read/write (VM <-> IO space)
*/
if (isGraphicsFlushRead(inst))
return VM_READ;
return VM_WRITE;
} else {
/*
* Check for LDCWX and LDCWS (semaphore instructions).
* If bits 23 through 25 are all 1's it is one of
* the above two instructions and is a write.
*
* Note: With the limited bits we are looking at,
* this will also catch PROBEW and PROBEWI. However,
* these should never get in here because they don't
* generate exceptions of the type:
* Data TLB miss fault/data page fault
* Data memory protection trap
*/
if (bits23_25set(inst) == BITSSET)
return VM_WRITE;
}
return VM_READ; /* Default */
}
return VM_READ; /* Default */
}
#undef bit22set
#undef bits23_25set
#undef isGraphicsFlushRead
#undef BITSSET
#if 0
/* This is the treewalk to find a vma which is the highest that has
* a start < addr. We're using find_vma_prev instead right now, but
* we might want to use this at some point in the future. Probably
* not, but I want it committed to CVS so I don't lose it :-)
*/
while (tree != vm_avl_empty) {
if (tree->vm_start > addr) {
tree = tree->vm_avl_left;
} else {
prev = tree;
if (prev->vm_next == NULL)
break;
if (prev->vm_next->vm_start > addr)
break;
tree = tree->vm_avl_right;
}
}
#endif
int fixup_exception(struct pt_regs *regs)
{
const struct exception_table_entry *fix;
fix = search_exception_tables(regs->iaoq[0]);
if (fix) {
struct exception_data *d;
d = &__get_cpu_var(exception_data);
d->fault_ip = regs->iaoq[0];
d->fault_space = regs->isr;
d->fault_addr = regs->ior;
regs->iaoq[0] = ((fix->fixup) & ~3);
/*
* NOTE: In some cases the faulting instruction
* may be in the delay slot of a branch. We
* don't want to take the branch, so we don't
* increment iaoq[1], instead we set it to be
* iaoq[0]+4, and clear the B bit in the PSW
*/
regs->iaoq[1] = regs->iaoq[0] + 4;
regs->gr[0] &= ~PSW_B; /* IPSW in gr[0] */
return 1;
}
return 0;
}
void do_page_fault(struct pt_regs *regs, unsigned long code,
unsigned long address)
{
struct vm_area_struct *vma, *prev_vma;
struct task_struct *tsk = current;
struct mm_struct *mm = tsk->mm;
unsigned long acc_type;
int fault;
unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
if (in_atomic() || !mm)
goto no_context;
if (user_mode(regs))
flags |= FAULT_FLAG_USER;
if (acc_type & VM_WRITE)
flags |= FAULT_FLAG_WRITE;
retry:
down_read(&mm->mmap_sem);
vma = find_vma_prev(mm, address, &prev_vma);
if (!vma || address < vma->vm_start)
goto check_expansion;
/*
* Ok, we have a good vm_area for this memory access. We still need to
* check the access permissions.
*/
good_area:
acc_type = parisc_acctyp(code,regs->iir);
if ((vma->vm_flags & acc_type) != acc_type)
goto bad_area;
/*
* If for any reason at all we couldn't handle the fault, make
* sure we exit gracefully rather than endlessly redo the
* fault.
*/
fault = handle_mm_fault(mm, vma, address, flags);
if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
return;
if (unlikely(fault & VM_FAULT_ERROR)) {
/*
* We hit a shared mapping outside of the file, or some
* other thing happened to us that made us unable to
* handle the page fault gracefully.
*/
if (fault & VM_FAULT_OOM)
goto out_of_memory;
else if (fault & VM_FAULT_SIGBUS)
goto bad_area;
BUG();
}
if (flags & FAULT_FLAG_ALLOW_RETRY) {
if (fault & VM_FAULT_MAJOR)
current->maj_flt++;
else
current->min_flt++;
if (fault & VM_FAULT_RETRY) {
flags &= ~FAULT_FLAG_ALLOW_RETRY;
/*
* No need to up_read(&mm->mmap_sem) as we would
* have already released it in __lock_page_or_retry
* in mm/filemap.c.
*/
goto retry;
}
}
up_read(&mm->mmap_sem);
return;
check_expansion:
vma = prev_vma;
if (vma && (expand_stack(vma, address) == 0))
goto good_area;
/*
* Something tried to access memory that isn't in our memory map..
*/
bad_area:
up_read(&mm->mmap_sem);
if (user_mode(regs)) {
struct siginfo si;
#ifdef PRINT_USER_FAULTS
printk(KERN_DEBUG "\n");
printk(KERN_DEBUG "do_page_fault() pid=%d command='%s' type=%lu address=0x%08lx\n",
task_pid_nr(tsk), tsk->comm, code, address);
if (vma) {
printk(KERN_DEBUG "vm_start = 0x%08lx, vm_end = 0x%08lx\n",
vma->vm_start, vma->vm_end);
}
show_regs(regs);
#endif
/* FIXME: actually we need to get the signo and code correct */
si.si_signo = SIGSEGV;
si.si_errno = 0;
si.si_code = SEGV_MAPERR;
si.si_addr = (void __user *) address;
force_sig_info(SIGSEGV, &si, current);
return;
}
no_context:
if (!user_mode(regs) && fixup_exception(regs)) {
return;
}
parisc_terminate("Bad Address (null pointer deref?)", regs, code, address);
out_of_memory:
up_read(&mm->mmap_sem);
if (!user_mode(regs))
goto no_context;
pagefault_out_of_memory();
}
| gpl-2.0 |
sdugit/linux-3.0_7025 | arch/arm/mach-iop13xx/iq81340sc.c | 2972 | 2593 | /*
* iq81340sc board support
* Copyright (c) 2005-2006, Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc., 59 Temple
* Place - Suite 330, Boston, MA 02111-1307 USA.
*
*/
#include <linux/pci.h>
#include <mach/hardware.h>
#include <asm/irq.h>
#include <asm/mach/pci.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
#include <mach/pci.h>
#include <asm/mach/time.h>
#include <mach/time.h>
extern int init_atu;
static int __init
iq81340sc_atux_map_irq(struct pci_dev *dev, u8 idsel, u8 pin)
{
WARN_ON(idsel < 1 || idsel > 2);
switch (idsel) {
case 1:
switch (pin) {
case 1: return ATUX_INTB;
case 2: return ATUX_INTC;
case 3: return ATUX_INTD;
case 4: return ATUX_INTA;
default: return -1;
}
case 2:
switch (pin) {
case 1: return ATUX_INTC;
case 2: return ATUX_INTC;
case 3: return ATUX_INTC;
case 4: return ATUX_INTC;
default: return -1;
}
default: return -1;
}
}
static struct hw_pci iq81340sc_pci __initdata = {
.swizzle = pci_std_swizzle,
.nr_controllers = 0,
.setup = iop13xx_pci_setup,
.scan = iop13xx_scan_bus,
.map_irq = iq81340sc_atux_map_irq,
.preinit = iop13xx_pci_init
};
static int __init iq81340sc_pci_init(void)
{
iop13xx_atu_select(&iq81340sc_pci);
pci_common_init(&iq81340sc_pci);
iop13xx_map_pci_memory();
return 0;
}
static void __init iq81340sc_init(void)
{
iop13xx_platform_init();
iq81340sc_pci_init();
iop13xx_add_tpmi_devices();
}
static void __init iq81340sc_timer_init(void)
{
unsigned long bus_freq = iop13xx_core_freq() / iop13xx_xsi_bus_ratio();
printk(KERN_DEBUG "%s: bus frequency: %lu\n", __func__, bus_freq);
iop_init_time(bus_freq);
}
static struct sys_timer iq81340sc_timer = {
.init = iq81340sc_timer_init,
};
MACHINE_START(IQ81340SC, "Intel IQ81340SC")
/* Maintainer: Dan Williams <dan.j.williams@intel.com> */
.boot_params = 0x00000100,
.map_io = iop13xx_map_io,
.init_irq = iop13xx_init_irq,
.timer = &iq81340sc_timer,
.init_machine = iq81340sc_init,
MACHINE_END
| gpl-2.0 |
SlimRoms/kernel_oneplus_msm8974 | drivers/net/wireless/iwmc3200wifi/cfg80211.c | 5020 | 22230 | /*
* Intel Wireless Multicomm 3200 WiFi driver
*
* Copyright (C) 2009 Intel Corporation <ilw@linux.intel.com>
* Samuel Ortiz <samuel.ortiz@intel.com>
* Zhu Yi <yi.zhu@intel.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version
* 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*
*/
#include <linux/kernel.h>
#include <linux/netdevice.h>
#include <linux/sched.h>
#include <linux/etherdevice.h>
#include <linux/wireless.h>
#include <linux/ieee80211.h>
#include <linux/slab.h>
#include <net/cfg80211.h>
#include "iwm.h"
#include "commands.h"
#include "cfg80211.h"
#include "debug.h"
#define RATETAB_ENT(_rate, _rateid, _flags) \
{ \
.bitrate = (_rate), \
.hw_value = (_rateid), \
.flags = (_flags), \
}
#define CHAN2G(_channel, _freq, _flags) { \
.band = IEEE80211_BAND_2GHZ, \
.center_freq = (_freq), \
.hw_value = (_channel), \
.flags = (_flags), \
.max_antenna_gain = 0, \
.max_power = 30, \
}
#define CHAN5G(_channel, _flags) { \
.band = IEEE80211_BAND_5GHZ, \
.center_freq = 5000 + (5 * (_channel)), \
.hw_value = (_channel), \
.flags = (_flags), \
.max_antenna_gain = 0, \
.max_power = 30, \
}
static struct ieee80211_rate iwm_rates[] = {
RATETAB_ENT(10, 0x1, 0),
RATETAB_ENT(20, 0x2, 0),
RATETAB_ENT(55, 0x4, 0),
RATETAB_ENT(110, 0x8, 0),
RATETAB_ENT(60, 0x10, 0),
RATETAB_ENT(90, 0x20, 0),
RATETAB_ENT(120, 0x40, 0),
RATETAB_ENT(180, 0x80, 0),
RATETAB_ENT(240, 0x100, 0),
RATETAB_ENT(360, 0x200, 0),
RATETAB_ENT(480, 0x400, 0),
RATETAB_ENT(540, 0x800, 0),
};
#define iwm_a_rates (iwm_rates + 4)
#define iwm_a_rates_size 8
#define iwm_g_rates (iwm_rates + 0)
#define iwm_g_rates_size 12
static struct ieee80211_channel iwm_2ghz_channels[] = {
CHAN2G(1, 2412, 0),
CHAN2G(2, 2417, 0),
CHAN2G(3, 2422, 0),
CHAN2G(4, 2427, 0),
CHAN2G(5, 2432, 0),
CHAN2G(6, 2437, 0),
CHAN2G(7, 2442, 0),
CHAN2G(8, 2447, 0),
CHAN2G(9, 2452, 0),
CHAN2G(10, 2457, 0),
CHAN2G(11, 2462, 0),
CHAN2G(12, 2467, 0),
CHAN2G(13, 2472, 0),
CHAN2G(14, 2484, 0),
};
static struct ieee80211_channel iwm_5ghz_a_channels[] = {
CHAN5G(34, 0), CHAN5G(36, 0),
CHAN5G(38, 0), CHAN5G(40, 0),
CHAN5G(42, 0), CHAN5G(44, 0),
CHAN5G(46, 0), CHAN5G(48, 0),
CHAN5G(52, 0), CHAN5G(56, 0),
CHAN5G(60, 0), CHAN5G(64, 0),
CHAN5G(100, 0), CHAN5G(104, 0),
CHAN5G(108, 0), CHAN5G(112, 0),
CHAN5G(116, 0), CHAN5G(120, 0),
CHAN5G(124, 0), CHAN5G(128, 0),
CHAN5G(132, 0), CHAN5G(136, 0),
CHAN5G(140, 0), CHAN5G(149, 0),
CHAN5G(153, 0), CHAN5G(157, 0),
CHAN5G(161, 0), CHAN5G(165, 0),
CHAN5G(184, 0), CHAN5G(188, 0),
CHAN5G(192, 0), CHAN5G(196, 0),
CHAN5G(200, 0), CHAN5G(204, 0),
CHAN5G(208, 0), CHAN5G(212, 0),
CHAN5G(216, 0),
};
static struct ieee80211_supported_band iwm_band_2ghz = {
.channels = iwm_2ghz_channels,
.n_channels = ARRAY_SIZE(iwm_2ghz_channels),
.bitrates = iwm_g_rates,
.n_bitrates = iwm_g_rates_size,
};
static struct ieee80211_supported_band iwm_band_5ghz = {
.channels = iwm_5ghz_a_channels,
.n_channels = ARRAY_SIZE(iwm_5ghz_a_channels),
.bitrates = iwm_a_rates,
.n_bitrates = iwm_a_rates_size,
};
static int iwm_key_init(struct iwm_key *key, u8 key_index,
const u8 *mac_addr, struct key_params *params)
{
key->hdr.key_idx = key_index;
if (!mac_addr || is_broadcast_ether_addr(mac_addr)) {
key->hdr.multicast = 1;
memset(key->hdr.mac, 0xff, ETH_ALEN);
} else {
key->hdr.multicast = 0;
memcpy(key->hdr.mac, mac_addr, ETH_ALEN);
}
if (params) {
if (params->key_len > WLAN_MAX_KEY_LEN ||
params->seq_len > IW_ENCODE_SEQ_MAX_SIZE)
return -EINVAL;
key->cipher = params->cipher;
key->key_len = params->key_len;
key->seq_len = params->seq_len;
memcpy(key->key, params->key, key->key_len);
memcpy(key->seq, params->seq, key->seq_len);
}
return 0;
}
static int iwm_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev,
u8 key_index, bool pairwise, const u8 *mac_addr,
struct key_params *params)
{
struct iwm_priv *iwm = ndev_to_iwm(ndev);
struct iwm_key *key;
int ret;
IWM_DBG_WEXT(iwm, DBG, "Adding key for %pM\n", mac_addr);
if (key_index >= IWM_NUM_KEYS)
return -ENOENT;
key = &iwm->keys[key_index];
memset(key, 0, sizeof(struct iwm_key));
ret = iwm_key_init(key, key_index, mac_addr, params);
if (ret < 0) {
IWM_ERR(iwm, "Invalid key_params\n");
return ret;
}
return iwm_set_key(iwm, 0, key);
}
static int iwm_cfg80211_get_key(struct wiphy *wiphy, struct net_device *ndev,
u8 key_index, bool pairwise, const u8 *mac_addr,
void *cookie,
void (*callback)(void *cookie,
struct key_params*))
{
struct iwm_priv *iwm = ndev_to_iwm(ndev);
struct iwm_key *key;
struct key_params params;
IWM_DBG_WEXT(iwm, DBG, "Getting key %d\n", key_index);
if (key_index >= IWM_NUM_KEYS)
return -ENOENT;
memset(¶ms, 0, sizeof(params));
key = &iwm->keys[key_index];
params.cipher = key->cipher;
params.key_len = key->key_len;
params.seq_len = key->seq_len;
params.seq = key->seq;
params.key = key->key;
callback(cookie, ¶ms);
return key->key_len ? 0 : -ENOENT;
}
static int iwm_cfg80211_del_key(struct wiphy *wiphy, struct net_device *ndev,
u8 key_index, bool pairwise, const u8 *mac_addr)
{
struct iwm_priv *iwm = ndev_to_iwm(ndev);
struct iwm_key *key;
if (key_index >= IWM_NUM_KEYS)
return -ENOENT;
key = &iwm->keys[key_index];
if (!iwm->keys[key_index].key_len) {
IWM_DBG_WEXT(iwm, DBG, "Key %d not used\n", key_index);
return 0;
}
if (key_index == iwm->default_key)
iwm->default_key = -1;
return iwm_set_key(iwm, 1, key);
}
static int iwm_cfg80211_set_default_key(struct wiphy *wiphy,
struct net_device *ndev,
u8 key_index, bool unicast,
bool multicast)
{
struct iwm_priv *iwm = ndev_to_iwm(ndev);
IWM_DBG_WEXT(iwm, DBG, "Default key index is: %d\n", key_index);
if (key_index >= IWM_NUM_KEYS)
return -ENOENT;
if (!iwm->keys[key_index].key_len) {
IWM_ERR(iwm, "Key %d not used\n", key_index);
return -EINVAL;
}
iwm->default_key = key_index;
return iwm_set_tx_key(iwm, key_index);
}
static int iwm_cfg80211_get_station(struct wiphy *wiphy,
struct net_device *ndev,
u8 *mac, struct station_info *sinfo)
{
struct iwm_priv *iwm = ndev_to_iwm(ndev);
if (memcmp(mac, iwm->bssid, ETH_ALEN))
return -ENOENT;
sinfo->filled |= STATION_INFO_TX_BITRATE;
sinfo->txrate.legacy = iwm->rate * 10;
if (test_bit(IWM_STATUS_ASSOCIATED, &iwm->status)) {
sinfo->filled |= STATION_INFO_SIGNAL;
sinfo->signal = iwm->wstats.qual.level;
}
return 0;
}
int iwm_cfg80211_inform_bss(struct iwm_priv *iwm)
{
struct wiphy *wiphy = iwm_to_wiphy(iwm);
struct iwm_bss_info *bss;
struct iwm_umac_notif_bss_info *umac_bss;
struct ieee80211_mgmt *mgmt;
struct ieee80211_channel *channel;
struct ieee80211_supported_band *band;
s32 signal;
int freq;
list_for_each_entry(bss, &iwm->bss_list, node) {
umac_bss = bss->bss;
mgmt = (struct ieee80211_mgmt *)(umac_bss->frame_buf);
if (umac_bss->band == UMAC_BAND_2GHZ)
band = wiphy->bands[IEEE80211_BAND_2GHZ];
else if (umac_bss->band == UMAC_BAND_5GHZ)
band = wiphy->bands[IEEE80211_BAND_5GHZ];
else {
IWM_ERR(iwm, "Invalid band: %d\n", umac_bss->band);
return -EINVAL;
}
freq = ieee80211_channel_to_frequency(umac_bss->channel,
band->band);
channel = ieee80211_get_channel(wiphy, freq);
signal = umac_bss->rssi * 100;
if (!cfg80211_inform_bss_frame(wiphy, channel, mgmt,
le16_to_cpu(umac_bss->frame_len),
signal, GFP_KERNEL))
return -EINVAL;
}
return 0;
}
static int iwm_cfg80211_change_iface(struct wiphy *wiphy,
struct net_device *ndev,
enum nl80211_iftype type, u32 *flags,
struct vif_params *params)
{
struct wireless_dev *wdev;
struct iwm_priv *iwm;
u32 old_mode;
wdev = ndev->ieee80211_ptr;
iwm = ndev_to_iwm(ndev);
old_mode = iwm->conf.mode;
switch (type) {
case NL80211_IFTYPE_STATION:
iwm->conf.mode = UMAC_MODE_BSS;
break;
case NL80211_IFTYPE_ADHOC:
iwm->conf.mode = UMAC_MODE_IBSS;
break;
default:
return -EOPNOTSUPP;
}
wdev->iftype = type;
if ((old_mode == iwm->conf.mode) || !iwm->umac_profile)
return 0;
iwm->umac_profile->mode = cpu_to_le32(iwm->conf.mode);
if (iwm->umac_profile_active)
iwm_invalidate_mlme_profile(iwm);
return 0;
}
static int iwm_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev,
struct cfg80211_scan_request *request)
{
struct iwm_priv *iwm = ndev_to_iwm(ndev);
int ret;
if (!test_bit(IWM_STATUS_READY, &iwm->status)) {
IWM_ERR(iwm, "Scan while device is not ready\n");
return -EIO;
}
if (test_bit(IWM_STATUS_SCANNING, &iwm->status)) {
IWM_ERR(iwm, "Scanning already\n");
return -EAGAIN;
}
if (test_bit(IWM_STATUS_SCAN_ABORTING, &iwm->status)) {
IWM_ERR(iwm, "Scanning being aborted\n");
return -EAGAIN;
}
set_bit(IWM_STATUS_SCANNING, &iwm->status);
ret = iwm_scan_ssids(iwm, request->ssids, request->n_ssids);
if (ret) {
clear_bit(IWM_STATUS_SCANNING, &iwm->status);
return ret;
}
iwm->scan_request = request;
return 0;
}
static int iwm_cfg80211_set_wiphy_params(struct wiphy *wiphy, u32 changed)
{
struct iwm_priv *iwm = wiphy_to_iwm(wiphy);
if (changed & WIPHY_PARAM_RTS_THRESHOLD &&
(iwm->conf.rts_threshold != wiphy->rts_threshold)) {
int ret;
iwm->conf.rts_threshold = wiphy->rts_threshold;
ret = iwm_umac_set_config_fix(iwm, UMAC_PARAM_TBL_CFG_FIX,
CFG_RTS_THRESHOLD,
iwm->conf.rts_threshold);
if (ret < 0)
return ret;
}
if (changed & WIPHY_PARAM_FRAG_THRESHOLD &&
(iwm->conf.frag_threshold != wiphy->frag_threshold)) {
int ret;
iwm->conf.frag_threshold = wiphy->frag_threshold;
ret = iwm_umac_set_config_fix(iwm, UMAC_PARAM_TBL_FA_CFG_FIX,
CFG_FRAG_THRESHOLD,
iwm->conf.frag_threshold);
if (ret < 0)
return ret;
}
return 0;
}
static int iwm_cfg80211_join_ibss(struct wiphy *wiphy, struct net_device *dev,
struct cfg80211_ibss_params *params)
{
struct iwm_priv *iwm = wiphy_to_iwm(wiphy);
struct ieee80211_channel *chan = params->channel;
if (!test_bit(IWM_STATUS_READY, &iwm->status))
return -EIO;
/* UMAC doesn't support creating or joining an IBSS network
* with specified bssid. */
if (params->bssid)
return -EOPNOTSUPP;
iwm->channel = ieee80211_frequency_to_channel(chan->center_freq);
iwm->umac_profile->ibss.band = chan->band;
iwm->umac_profile->ibss.channel = iwm->channel;
iwm->umac_profile->ssid.ssid_len = params->ssid_len;
memcpy(iwm->umac_profile->ssid.ssid, params->ssid, params->ssid_len);
return iwm_send_mlme_profile(iwm);
}
static int iwm_cfg80211_leave_ibss(struct wiphy *wiphy, struct net_device *dev)
{
struct iwm_priv *iwm = wiphy_to_iwm(wiphy);
if (iwm->umac_profile_active)
return iwm_invalidate_mlme_profile(iwm);
return 0;
}
static int iwm_set_auth_type(struct iwm_priv *iwm,
enum nl80211_auth_type sme_auth_type)
{
u8 *auth_type = &iwm->umac_profile->sec.auth_type;
switch (sme_auth_type) {
case NL80211_AUTHTYPE_AUTOMATIC:
case NL80211_AUTHTYPE_OPEN_SYSTEM:
IWM_DBG_WEXT(iwm, DBG, "OPEN auth\n");
*auth_type = UMAC_AUTH_TYPE_OPEN;
break;
case NL80211_AUTHTYPE_SHARED_KEY:
if (iwm->umac_profile->sec.flags &
(UMAC_SEC_FLG_WPA_ON_MSK | UMAC_SEC_FLG_RSNA_ON_MSK)) {
IWM_DBG_WEXT(iwm, DBG, "WPA auth alg\n");
*auth_type = UMAC_AUTH_TYPE_RSNA_PSK;
} else {
IWM_DBG_WEXT(iwm, DBG, "WEP shared key auth alg\n");
*auth_type = UMAC_AUTH_TYPE_LEGACY_PSK;
}
break;
default:
IWM_ERR(iwm, "Unsupported auth alg: 0x%x\n", sme_auth_type);
return -ENOTSUPP;
}
return 0;
}
static int iwm_set_wpa_version(struct iwm_priv *iwm, u32 wpa_version)
{
IWM_DBG_WEXT(iwm, DBG, "wpa_version: %d\n", wpa_version);
if (!wpa_version) {
iwm->umac_profile->sec.flags = UMAC_SEC_FLG_LEGACY_PROFILE;
return 0;
}
if (wpa_version & NL80211_WPA_VERSION_1)
iwm->umac_profile->sec.flags = UMAC_SEC_FLG_WPA_ON_MSK;
if (wpa_version & NL80211_WPA_VERSION_2)
iwm->umac_profile->sec.flags = UMAC_SEC_FLG_RSNA_ON_MSK;
return 0;
}
static int iwm_set_cipher(struct iwm_priv *iwm, u32 cipher, bool ucast)
{
u8 *profile_cipher = ucast ? &iwm->umac_profile->sec.ucast_cipher :
&iwm->umac_profile->sec.mcast_cipher;
if (!cipher) {
*profile_cipher = UMAC_CIPHER_TYPE_NONE;
return 0;
}
IWM_DBG_WEXT(iwm, DBG, "%ccast cipher is 0x%x\n", ucast ? 'u' : 'm',
cipher);
switch (cipher) {
case IW_AUTH_CIPHER_NONE:
*profile_cipher = UMAC_CIPHER_TYPE_NONE;
break;
case WLAN_CIPHER_SUITE_WEP40:
*profile_cipher = UMAC_CIPHER_TYPE_WEP_40;
break;
case WLAN_CIPHER_SUITE_WEP104:
*profile_cipher = UMAC_CIPHER_TYPE_WEP_104;
break;
case WLAN_CIPHER_SUITE_TKIP:
*profile_cipher = UMAC_CIPHER_TYPE_TKIP;
break;
case WLAN_CIPHER_SUITE_CCMP:
*profile_cipher = UMAC_CIPHER_TYPE_CCMP;
break;
default:
IWM_ERR(iwm, "Unsupported cipher: 0x%x\n", cipher);
return -ENOTSUPP;
}
return 0;
}
static int iwm_set_key_mgt(struct iwm_priv *iwm, u32 key_mgt)
{
u8 *auth_type = &iwm->umac_profile->sec.auth_type;
IWM_DBG_WEXT(iwm, DBG, "key_mgt: 0x%x\n", key_mgt);
if (key_mgt == WLAN_AKM_SUITE_8021X)
*auth_type = UMAC_AUTH_TYPE_8021X;
else if (key_mgt == WLAN_AKM_SUITE_PSK) {
if (iwm->umac_profile->sec.flags &
(UMAC_SEC_FLG_WPA_ON_MSK | UMAC_SEC_FLG_RSNA_ON_MSK))
*auth_type = UMAC_AUTH_TYPE_RSNA_PSK;
else
*auth_type = UMAC_AUTH_TYPE_LEGACY_PSK;
} else {
IWM_ERR(iwm, "Invalid key mgt: 0x%x\n", key_mgt);
return -EINVAL;
}
return 0;
}
static int iwm_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev,
struct cfg80211_connect_params *sme)
{
struct iwm_priv *iwm = wiphy_to_iwm(wiphy);
struct ieee80211_channel *chan = sme->channel;
struct key_params key_param;
int ret;
if (!test_bit(IWM_STATUS_READY, &iwm->status))
return -EIO;
if (!sme->ssid)
return -EINVAL;
if (iwm->umac_profile_active) {
ret = iwm_invalidate_mlme_profile(iwm);
if (ret) {
IWM_ERR(iwm, "Couldn't invalidate profile\n");
return ret;
}
}
if (chan)
iwm->channel =
ieee80211_frequency_to_channel(chan->center_freq);
iwm->umac_profile->ssid.ssid_len = sme->ssid_len;
memcpy(iwm->umac_profile->ssid.ssid, sme->ssid, sme->ssid_len);
if (sme->bssid) {
IWM_DBG_WEXT(iwm, DBG, "BSSID: %pM\n", sme->bssid);
memcpy(&iwm->umac_profile->bssid[0], sme->bssid, ETH_ALEN);
iwm->umac_profile->bss_num = 1;
} else {
memset(&iwm->umac_profile->bssid[0], 0, ETH_ALEN);
iwm->umac_profile->bss_num = 0;
}
ret = iwm_set_wpa_version(iwm, sme->crypto.wpa_versions);
if (ret < 0)
return ret;
ret = iwm_set_auth_type(iwm, sme->auth_type);
if (ret < 0)
return ret;
if (sme->crypto.n_ciphers_pairwise) {
ret = iwm_set_cipher(iwm, sme->crypto.ciphers_pairwise[0],
true);
if (ret < 0)
return ret;
}
ret = iwm_set_cipher(iwm, sme->crypto.cipher_group, false);
if (ret < 0)
return ret;
if (sme->crypto.n_akm_suites) {
ret = iwm_set_key_mgt(iwm, sme->crypto.akm_suites[0]);
if (ret < 0)
return ret;
}
/*
* We save the WEP key in case we want to do shared authentication.
* We have to do it so because UMAC will assert whenever it gets a
* key before a profile.
*/
if (sme->key) {
key_param.key = kmemdup(sme->key, sme->key_len, GFP_KERNEL);
if (key_param.key == NULL)
return -ENOMEM;
key_param.key_len = sme->key_len;
key_param.seq_len = 0;
key_param.cipher = sme->crypto.ciphers_pairwise[0];
ret = iwm_key_init(&iwm->keys[sme->key_idx], sme->key_idx,
NULL, &key_param);
kfree(key_param.key);
if (ret < 0) {
IWM_ERR(iwm, "Invalid key_params\n");
return ret;
}
iwm->default_key = sme->key_idx;
}
/* WPA and open AUTH type from wpa_s means WPS (a.k.a. WSC) */
if ((iwm->umac_profile->sec.flags &
(UMAC_SEC_FLG_WPA_ON_MSK | UMAC_SEC_FLG_RSNA_ON_MSK)) &&
iwm->umac_profile->sec.auth_type == UMAC_AUTH_TYPE_OPEN) {
iwm->umac_profile->sec.flags = UMAC_SEC_FLG_WSC_ON_MSK;
}
ret = iwm_send_mlme_profile(iwm);
if (iwm->umac_profile->sec.auth_type != UMAC_AUTH_TYPE_LEGACY_PSK ||
sme->key == NULL)
return ret;
/*
* We want to do shared auth.
* We need to actually set the key we previously cached,
* and then tell the UMAC it's the default one.
* That will trigger the auth+assoc UMAC machinery, and again,
* this must be done after setting the profile.
*/
ret = iwm_set_key(iwm, 0, &iwm->keys[sme->key_idx]);
if (ret < 0)
return ret;
return iwm_set_tx_key(iwm, iwm->default_key);
}
static int iwm_cfg80211_disconnect(struct wiphy *wiphy, struct net_device *dev,
u16 reason_code)
{
struct iwm_priv *iwm = wiphy_to_iwm(wiphy);
IWM_DBG_WEXT(iwm, DBG, "Active: %d\n", iwm->umac_profile_active);
if (iwm->umac_profile_active)
iwm_invalidate_mlme_profile(iwm);
return 0;
}
static int iwm_cfg80211_set_txpower(struct wiphy *wiphy,
enum nl80211_tx_power_setting type, int mbm)
{
struct iwm_priv *iwm = wiphy_to_iwm(wiphy);
int ret;
switch (type) {
case NL80211_TX_POWER_AUTOMATIC:
return 0;
case NL80211_TX_POWER_FIXED:
if (mbm < 0 || (mbm % 100))
return -EOPNOTSUPP;
if (!test_bit(IWM_STATUS_READY, &iwm->status))
return 0;
ret = iwm_umac_set_config_fix(iwm, UMAC_PARAM_TBL_CFG_FIX,
CFG_TX_PWR_LIMIT_USR,
MBM_TO_DBM(mbm) * 2);
if (ret < 0)
return ret;
return iwm_tx_power_trigger(iwm);
default:
IWM_ERR(iwm, "Unsupported power type: %d\n", type);
return -EOPNOTSUPP;
}
return 0;
}
static int iwm_cfg80211_get_txpower(struct wiphy *wiphy, int *dbm)
{
struct iwm_priv *iwm = wiphy_to_iwm(wiphy);
*dbm = iwm->txpower >> 1;
return 0;
}
static int iwm_cfg80211_set_power_mgmt(struct wiphy *wiphy,
struct net_device *dev,
bool enabled, int timeout)
{
struct iwm_priv *iwm = wiphy_to_iwm(wiphy);
u32 power_index;
if (enabled)
power_index = IWM_POWER_INDEX_DEFAULT;
else
power_index = IWM_POWER_INDEX_MIN;
if (power_index == iwm->conf.power_index)
return 0;
iwm->conf.power_index = power_index;
return iwm_umac_set_config_fix(iwm, UMAC_PARAM_TBL_CFG_FIX,
CFG_POWER_INDEX, iwm->conf.power_index);
}
static int iwm_cfg80211_set_pmksa(struct wiphy *wiphy,
struct net_device *netdev,
struct cfg80211_pmksa *pmksa)
{
struct iwm_priv *iwm = wiphy_to_iwm(wiphy);
return iwm_send_pmkid_update(iwm, pmksa, IWM_CMD_PMKID_ADD);
}
static int iwm_cfg80211_del_pmksa(struct wiphy *wiphy,
struct net_device *netdev,
struct cfg80211_pmksa *pmksa)
{
struct iwm_priv *iwm = wiphy_to_iwm(wiphy);
return iwm_send_pmkid_update(iwm, pmksa, IWM_CMD_PMKID_DEL);
}
static int iwm_cfg80211_flush_pmksa(struct wiphy *wiphy,
struct net_device *netdev)
{
struct iwm_priv *iwm = wiphy_to_iwm(wiphy);
struct cfg80211_pmksa pmksa;
memset(&pmksa, 0, sizeof(struct cfg80211_pmksa));
return iwm_send_pmkid_update(iwm, &pmksa, IWM_CMD_PMKID_FLUSH);
}
static struct cfg80211_ops iwm_cfg80211_ops = {
.change_virtual_intf = iwm_cfg80211_change_iface,
.add_key = iwm_cfg80211_add_key,
.get_key = iwm_cfg80211_get_key,
.del_key = iwm_cfg80211_del_key,
.set_default_key = iwm_cfg80211_set_default_key,
.get_station = iwm_cfg80211_get_station,
.scan = iwm_cfg80211_scan,
.set_wiphy_params = iwm_cfg80211_set_wiphy_params,
.connect = iwm_cfg80211_connect,
.disconnect = iwm_cfg80211_disconnect,
.join_ibss = iwm_cfg80211_join_ibss,
.leave_ibss = iwm_cfg80211_leave_ibss,
.set_tx_power = iwm_cfg80211_set_txpower,
.get_tx_power = iwm_cfg80211_get_txpower,
.set_power_mgmt = iwm_cfg80211_set_power_mgmt,
.set_pmksa = iwm_cfg80211_set_pmksa,
.del_pmksa = iwm_cfg80211_del_pmksa,
.flush_pmksa = iwm_cfg80211_flush_pmksa,
};
static const u32 cipher_suites[] = {
WLAN_CIPHER_SUITE_WEP40,
WLAN_CIPHER_SUITE_WEP104,
WLAN_CIPHER_SUITE_TKIP,
WLAN_CIPHER_SUITE_CCMP,
};
struct wireless_dev *iwm_wdev_alloc(int sizeof_bus, struct device *dev)
{
int ret = 0;
struct wireless_dev *wdev;
/*
* We're trying to have the following memory
* layout:
*
* +-------------------------+
* | struct wiphy |
* +-------------------------+
* | struct iwm_priv |
* +-------------------------+
* | bus private data |
* | (e.g. iwm_priv_sdio) |
* +-------------------------+
*
*/
wdev = kzalloc(sizeof(struct wireless_dev), GFP_KERNEL);
if (!wdev) {
dev_err(dev, "Couldn't allocate wireless device\n");
return ERR_PTR(-ENOMEM);
}
wdev->wiphy = wiphy_new(&iwm_cfg80211_ops,
sizeof(struct iwm_priv) + sizeof_bus);
if (!wdev->wiphy) {
dev_err(dev, "Couldn't allocate wiphy device\n");
ret = -ENOMEM;
goto out_err_new;
}
set_wiphy_dev(wdev->wiphy, dev);
wdev->wiphy->max_scan_ssids = UMAC_WIFI_IF_PROBE_OPTION_MAX;
wdev->wiphy->max_num_pmkids = UMAC_MAX_NUM_PMKIDS;
wdev->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
BIT(NL80211_IFTYPE_ADHOC);
wdev->wiphy->bands[IEEE80211_BAND_2GHZ] = &iwm_band_2ghz;
wdev->wiphy->bands[IEEE80211_BAND_5GHZ] = &iwm_band_5ghz;
wdev->wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM;
wdev->wiphy->cipher_suites = cipher_suites;
wdev->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);
ret = wiphy_register(wdev->wiphy);
if (ret < 0) {
dev_err(dev, "Couldn't register wiphy device\n");
goto out_err_register;
}
return wdev;
out_err_register:
wiphy_free(wdev->wiphy);
out_err_new:
kfree(wdev);
return ERR_PTR(ret);
}
void iwm_wdev_free(struct iwm_priv *iwm)
{
struct wireless_dev *wdev = iwm_to_wdev(iwm);
if (!wdev)
return;
wiphy_unregister(wdev->wiphy);
wiphy_free(wdev->wiphy);
kfree(wdev);
}
| gpl-2.0 |
Bdaman80/BD-Ace | drivers/misc/ibmasm/dot_command.c | 5020 | 4062 | /*
* IBM ASM Service Processor Device Driver
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*
* Copyright (C) IBM Corporation, 2004
*
* Author: Max Asböck <amax@us.ibm.com>
*
*/
#include "ibmasm.h"
#include "dot_command.h"
/**
* Dispatch an incoming message to the specific handler for the message.
* Called from interrupt context.
*/
void ibmasm_receive_message(struct service_processor *sp, void *message, int message_size)
{
u32 size;
struct dot_command_header *header = (struct dot_command_header *)message;
if (message_size == 0)
return;
size = get_dot_command_size(message);
if (size == 0)
return;
if (size > message_size)
size = message_size;
switch (header->type) {
case sp_event:
ibmasm_receive_event(sp, message, size);
break;
case sp_command_response:
ibmasm_receive_command_response(sp, message, size);
break;
case sp_heartbeat:
ibmasm_receive_heartbeat(sp, message, size);
break;
default:
dev_err(sp->dev, "Received unknown message from service processor\n");
}
}
#define INIT_BUFFER_SIZE 32
/**
* send the 4.3.5.10 dot command (driver VPD) to the service processor
*/
int ibmasm_send_driver_vpd(struct service_processor *sp)
{
struct command *command;
struct dot_command_header *header;
u8 *vpd_command;
u8 *vpd_data;
int result = 0;
command = ibmasm_new_command(sp, INIT_BUFFER_SIZE);
if (command == NULL)
return -ENOMEM;
header = (struct dot_command_header *)command->buffer;
header->type = sp_write;
header->command_size = 4;
header->data_size = 16;
header->status = 0;
header->reserved = 0;
vpd_command = command->buffer + sizeof(struct dot_command_header);
vpd_command[0] = 0x4;
vpd_command[1] = 0x3;
vpd_command[2] = 0x5;
vpd_command[3] = 0xa;
vpd_data = vpd_command + header->command_size;
vpd_data[0] = 0;
strcat(vpd_data, IBMASM_DRIVER_VPD);
vpd_data[10] = 0;
vpd_data[15] = 0;
ibmasm_exec_command(sp, command);
ibmasm_wait_for_response(command, IBMASM_CMD_TIMEOUT_NORMAL);
if (command->status != IBMASM_CMD_COMPLETE)
result = -ENODEV;
command_put(command);
return result;
}
struct os_state_command {
struct dot_command_header header;
unsigned char command[3];
unsigned char data;
};
/**
* send the 4.3.6 dot command (os state) to the service processor
* During driver init this function is called with os state "up".
* This causes the service processor to start sending heartbeats the
* driver.
* During driver exit the function is called with os state "down",
* causing the service processor to stop the heartbeats.
*/
int ibmasm_send_os_state(struct service_processor *sp, int os_state)
{
struct command *cmd;
struct os_state_command *os_state_cmd;
int result = 0;
cmd = ibmasm_new_command(sp, sizeof(struct os_state_command));
if (cmd == NULL)
return -ENOMEM;
os_state_cmd = (struct os_state_command *)cmd->buffer;
os_state_cmd->header.type = sp_write;
os_state_cmd->header.command_size = 3;
os_state_cmd->header.data_size = 1;
os_state_cmd->header.status = 0;
os_state_cmd->command[0] = 4;
os_state_cmd->command[1] = 3;
os_state_cmd->command[2] = 6;
os_state_cmd->data = os_state;
ibmasm_exec_command(sp, cmd);
ibmasm_wait_for_response(cmd, IBMASM_CMD_TIMEOUT_NORMAL);
if (cmd->status != IBMASM_CMD_COMPLETE)
result = -ENODEV;
command_put(cmd);
return result;
}
| gpl-2.0 |
Klozz/XPerience_Kernel_msm8226_mmi_Falcon | drivers/net/ethernet/sfc/mcdi_mac.c | 7324 | 3523 | /****************************************************************************
* Driver for Solarflare Solarstorm network controllers and boards
* Copyright 2009-2010 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation, incorporated herein by reference.
*/
#include "net_driver.h"
#include "efx.h"
#include "mcdi.h"
#include "mcdi_pcol.h"
int efx_mcdi_set_mac(struct efx_nic *efx)
{
u32 reject, fcntl;
u8 cmdbytes[MC_CMD_SET_MAC_IN_LEN];
memcpy(cmdbytes + MC_CMD_SET_MAC_IN_ADDR_OFST,
efx->net_dev->dev_addr, ETH_ALEN);
MCDI_SET_DWORD(cmdbytes, SET_MAC_IN_MTU,
EFX_MAX_FRAME_LEN(efx->net_dev->mtu));
MCDI_SET_DWORD(cmdbytes, SET_MAC_IN_DRAIN, 0);
/* The MCDI command provides for controlling accept/reject
* of broadcast packets too, but the driver doesn't currently
* expose this. */
reject = (efx->promiscuous) ? 0 :
(1 << MC_CMD_SET_MAC_IN_REJECT_UNCST_LBN);
MCDI_SET_DWORD(cmdbytes, SET_MAC_IN_REJECT, reject);
switch (efx->wanted_fc) {
case EFX_FC_RX | EFX_FC_TX:
fcntl = MC_CMD_FCNTL_BIDIR;
break;
case EFX_FC_RX:
fcntl = MC_CMD_FCNTL_RESPOND;
break;
default:
fcntl = MC_CMD_FCNTL_OFF;
break;
}
if (efx->wanted_fc & EFX_FC_AUTO)
fcntl = MC_CMD_FCNTL_AUTO;
if (efx->fc_disable)
fcntl = MC_CMD_FCNTL_OFF;
MCDI_SET_DWORD(cmdbytes, SET_MAC_IN_FCNTL, fcntl);
return efx_mcdi_rpc(efx, MC_CMD_SET_MAC, cmdbytes, sizeof(cmdbytes),
NULL, 0, NULL);
}
bool efx_mcdi_mac_check_fault(struct efx_nic *efx)
{
u8 outbuf[MC_CMD_GET_LINK_OUT_LEN];
size_t outlength;
int rc;
BUILD_BUG_ON(MC_CMD_GET_LINK_IN_LEN != 0);
rc = efx_mcdi_rpc(efx, MC_CMD_GET_LINK, NULL, 0,
outbuf, sizeof(outbuf), &outlength);
if (rc) {
netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n",
__func__, rc);
return true;
}
return MCDI_DWORD(outbuf, GET_LINK_OUT_MAC_FAULT) != 0;
}
int efx_mcdi_mac_stats(struct efx_nic *efx, dma_addr_t dma_addr,
u32 dma_len, int enable, int clear)
{
u8 inbuf[MC_CMD_MAC_STATS_IN_LEN];
int rc;
efx_dword_t *cmd_ptr;
int period = enable ? 1000 : 0;
u32 addr_hi;
u32 addr_lo;
BUILD_BUG_ON(MC_CMD_MAC_STATS_OUT_DMA_LEN != 0);
addr_lo = ((u64)dma_addr) >> 0;
addr_hi = ((u64)dma_addr) >> 32;
MCDI_SET_DWORD(inbuf, MAC_STATS_IN_DMA_ADDR_LO, addr_lo);
MCDI_SET_DWORD(inbuf, MAC_STATS_IN_DMA_ADDR_HI, addr_hi);
cmd_ptr = (efx_dword_t *)MCDI_PTR(inbuf, MAC_STATS_IN_CMD);
EFX_POPULATE_DWORD_7(*cmd_ptr,
MC_CMD_MAC_STATS_IN_DMA, !!enable,
MC_CMD_MAC_STATS_IN_CLEAR, clear,
MC_CMD_MAC_STATS_IN_PERIODIC_CHANGE, 1,
MC_CMD_MAC_STATS_IN_PERIODIC_ENABLE, !!enable,
MC_CMD_MAC_STATS_IN_PERIODIC_CLEAR, 0,
MC_CMD_MAC_STATS_IN_PERIODIC_NOEVENT, 1,
MC_CMD_MAC_STATS_IN_PERIOD_MS, period);
MCDI_SET_DWORD(inbuf, MAC_STATS_IN_DMA_LEN, dma_len);
rc = efx_mcdi_rpc(efx, MC_CMD_MAC_STATS, inbuf, sizeof(inbuf),
NULL, 0, NULL);
if (rc)
goto fail;
return 0;
fail:
netif_err(efx, hw, efx->net_dev, "%s: %s failed rc=%d\n",
__func__, enable ? "enable" : "disable", rc);
return rc;
}
int efx_mcdi_mac_reconfigure(struct efx_nic *efx)
{
int rc;
WARN_ON(!mutex_is_locked(&efx->mac_lock));
rc = efx_mcdi_set_mac(efx);
if (rc != 0)
return rc;
return efx_mcdi_rpc(efx, MC_CMD_SET_MCAST_HASH,
efx->multicast_hash.byte,
sizeof(efx->multicast_hash),
NULL, 0, NULL);
}
| gpl-2.0 |
Hadramos/android_sony_xperiaz_kernel_sources | drivers/input/joystick/maplecontrol.c | 9884 | 4963 | /*
* SEGA Dreamcast controller driver
* Based on drivers/usb/iforce.c
*
* Copyright Yaegashi Takeshi, 2001
* Adrian McMenamin, 2008 - 2009
*/
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/input.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/timer.h>
#include <linux/maple.h>
MODULE_AUTHOR("Adrian McMenamin <adrian@mcmen.demon.co.uk>");
MODULE_DESCRIPTION("SEGA Dreamcast controller driver");
MODULE_LICENSE("GPL");
struct dc_pad {
struct input_dev *dev;
struct maple_device *mdev;
};
static void dc_pad_callback(struct mapleq *mq)
{
unsigned short buttons;
struct maple_device *mapledev = mq->dev;
struct dc_pad *pad = maple_get_drvdata(mapledev);
struct input_dev *dev = pad->dev;
unsigned char *res = mq->recvbuf->buf;
buttons = ~le16_to_cpup((__le16 *)(res + 8));
input_report_abs(dev, ABS_HAT0Y,
(buttons & 0x0010 ? -1 : 0) + (buttons & 0x0020 ? 1 : 0));
input_report_abs(dev, ABS_HAT0X,
(buttons & 0x0040 ? -1 : 0) + (buttons & 0x0080 ? 1 : 0));
input_report_abs(dev, ABS_HAT1Y,
(buttons & 0x1000 ? -1 : 0) + (buttons & 0x2000 ? 1 : 0));
input_report_abs(dev, ABS_HAT1X,
(buttons & 0x4000 ? -1 : 0) + (buttons & 0x8000 ? 1 : 0));
input_report_key(dev, BTN_C, buttons & 0x0001);
input_report_key(dev, BTN_B, buttons & 0x0002);
input_report_key(dev, BTN_A, buttons & 0x0004);
input_report_key(dev, BTN_START, buttons & 0x0008);
input_report_key(dev, BTN_Z, buttons & 0x0100);
input_report_key(dev, BTN_Y, buttons & 0x0200);
input_report_key(dev, BTN_X, buttons & 0x0400);
input_report_key(dev, BTN_SELECT, buttons & 0x0800);
input_report_abs(dev, ABS_GAS, res[10]);
input_report_abs(dev, ABS_BRAKE, res[11]);
input_report_abs(dev, ABS_X, res[12]);
input_report_abs(dev, ABS_Y, res[13]);
input_report_abs(dev, ABS_RX, res[14]);
input_report_abs(dev, ABS_RY, res[15]);
}
static int dc_pad_open(struct input_dev *dev)
{
struct dc_pad *pad = dev->dev.platform_data;
maple_getcond_callback(pad->mdev, dc_pad_callback, HZ/20,
MAPLE_FUNC_CONTROLLER);
return 0;
}
static void dc_pad_close(struct input_dev *dev)
{
struct dc_pad *pad = dev->dev.platform_data;
maple_getcond_callback(pad->mdev, dc_pad_callback, 0,
MAPLE_FUNC_CONTROLLER);
}
/* allow the controller to be used */
static int __devinit probe_maple_controller(struct device *dev)
{
static const short btn_bit[32] = {
BTN_C, BTN_B, BTN_A, BTN_START, -1, -1, -1, -1,
BTN_Z, BTN_Y, BTN_X, BTN_SELECT, -1, -1, -1, -1,
-1, -1, -1, -1, -1, -1, -1, -1,
-1, -1, -1, -1, -1, -1, -1, -1,
};
static const short abs_bit[32] = {
-1, -1, -1, -1, ABS_HAT0Y, ABS_HAT0Y, ABS_HAT0X, ABS_HAT0X,
-1, -1, -1, -1, ABS_HAT1Y, ABS_HAT1Y, ABS_HAT1X, ABS_HAT1X,
ABS_GAS, ABS_BRAKE, ABS_X, ABS_Y, ABS_RX, ABS_RY, -1, -1,
-1, -1, -1, -1, -1, -1, -1, -1,
};
struct maple_device *mdev = to_maple_dev(dev);
struct maple_driver *mdrv = to_maple_driver(dev->driver);
int i, error;
struct dc_pad *pad;
struct input_dev *idev;
unsigned long data = be32_to_cpu(mdev->devinfo.function_data[0]);
pad = kzalloc(sizeof(struct dc_pad), GFP_KERNEL);
idev = input_allocate_device();
if (!pad || !idev) {
error = -ENOMEM;
goto fail;
}
pad->dev = idev;
pad->mdev = mdev;
idev->open = dc_pad_open;
idev->close = dc_pad_close;
for (i = 0; i < 32; i++) {
if (data & (1 << i)) {
if (btn_bit[i] >= 0)
__set_bit(btn_bit[i], idev->keybit);
else if (abs_bit[i] >= 0)
__set_bit(abs_bit[i], idev->absbit);
}
}
if (idev->keybit[BIT_WORD(BTN_JOYSTICK)])
idev->evbit[0] |= BIT_MASK(EV_KEY);
if (idev->absbit[0])
idev->evbit[0] |= BIT_MASK(EV_ABS);
for (i = ABS_X; i <= ABS_BRAKE; i++)
input_set_abs_params(idev, i, 0, 255, 0, 0);
for (i = ABS_HAT0X; i <= ABS_HAT3Y; i++)
input_set_abs_params(idev, i, 1, -1, 0, 0);
idev->dev.platform_data = pad;
idev->dev.parent = &mdev->dev;
idev->name = mdev->product_name;
idev->id.bustype = BUS_HOST;
input_set_drvdata(idev, pad);
error = input_register_device(idev);
if (error)
goto fail;
mdev->driver = mdrv;
maple_set_drvdata(mdev, pad);
return 0;
fail:
input_free_device(idev);
kfree(pad);
maple_set_drvdata(mdev, NULL);
return error;
}
static int __devexit remove_maple_controller(struct device *dev)
{
struct maple_device *mdev = to_maple_dev(dev);
struct dc_pad *pad = maple_get_drvdata(mdev);
mdev->callback = NULL;
input_unregister_device(pad->dev);
maple_set_drvdata(mdev, NULL);
kfree(pad);
return 0;
}
static struct maple_driver dc_pad_driver = {
.function = MAPLE_FUNC_CONTROLLER,
.drv = {
.name = "Dreamcast_controller",
.probe = probe_maple_controller,
.remove = __devexit_p(remove_maple_controller),
},
};
static int __init dc_pad_init(void)
{
return maple_driver_register(&dc_pad_driver);
}
static void __exit dc_pad_exit(void)
{
maple_driver_unregister(&dc_pad_driver);
}
module_init(dc_pad_init);
module_exit(dc_pad_exit);
| gpl-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.