repo_name string | path string | copies string | size string | content string | license string |
|---|---|---|---|---|---|
deanz93/android_kernel_sony_apq8064 | drivers/mtd/onenand/onenand_bbt.c | 7510 | 6918 | /*
* linux/drivers/mtd/onenand/onenand_bbt.c
*
* Bad Block Table support for the OneNAND driver
*
* Copyright(c) 2005 Samsung Electronics
* Kyungmin Park <kyungmin.park@samsung.com>
*
* Derived from nand_bbt.c
*
* TODO:
* Split BBT core and chip specific BBT.
*/
#include <linux/slab.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/onenand.h>
#include <linux/export.h>
/**
* check_short_pattern - [GENERIC] check if a pattern is in the buffer
* @param buf the buffer to search
* @param len the length of buffer to search
* @param paglen the pagelength
* @param td search pattern descriptor
*
* Check for a pattern at the given place. Used to search bad block
* tables and good / bad block identifiers. Same as check_pattern, but
* no optional empty check and the pattern is expected to start
* at offset 0.
*
*/
static int check_short_pattern(uint8_t *buf, int len, int paglen, struct nand_bbt_descr *td)
{
int i;
uint8_t *p = buf;
/* Compare the pattern */
for (i = 0; i < td->len; i++) {
if (p[i] != td->pattern[i])
return -1;
}
return 0;
}
/**
* create_bbt - [GENERIC] Create a bad block table by scanning the device
* @param mtd MTD device structure
* @param buf temporary buffer
* @param bd descriptor for the good/bad block search pattern
* @param chip create the table for a specific chip, -1 read all chips.
* Applies only if NAND_BBT_PERCHIP option is set
*
* Create a bad block table by scanning the device
* for the given good/bad block identify pattern
*/
static int create_bbt(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr *bd, int chip)
{
struct onenand_chip *this = mtd->priv;
struct bbm_info *bbm = this->bbm;
int i, j, numblocks, len, scanlen;
int startblock;
loff_t from;
size_t readlen, ooblen;
struct mtd_oob_ops ops;
int rgn;
printk(KERN_INFO "Scanning device for bad blocks\n");
len = 2;
/* We need only read few bytes from the OOB area */
scanlen = ooblen = 0;
readlen = bd->len;
/* chip == -1 case only */
/* Note that numblocks is 2 * (real numblocks) here;
* see i += 2 below as it makses shifting and masking less painful
*/
numblocks = this->chipsize >> (bbm->bbt_erase_shift - 1);
startblock = 0;
from = 0;
ops.mode = MTD_OPS_PLACE_OOB;
ops.ooblen = readlen;
ops.oobbuf = buf;
ops.len = ops.ooboffs = ops.retlen = ops.oobretlen = 0;
for (i = startblock; i < numblocks; ) {
int ret;
for (j = 0; j < len; j++) {
/* No need to read pages fully,
* just read required OOB bytes */
ret = onenand_bbt_read_oob(mtd,
from + j * this->writesize + bd->offs, &ops);
/* If it is a initial bad block, just ignore it */
if (ret == ONENAND_BBT_READ_FATAL_ERROR)
return -EIO;
if (ret || check_short_pattern(&buf[j * scanlen],
scanlen, this->writesize, bd)) {
bbm->bbt[i >> 3] |= 0x03 << (i & 0x6);
printk(KERN_INFO "OneNAND eraseblock %d is an "
"initial bad block\n", i >> 1);
mtd->ecc_stats.badblocks++;
break;
}
}
i += 2;
if (FLEXONENAND(this)) {
rgn = flexonenand_region(mtd, from);
from += mtd->eraseregions[rgn].erasesize;
} else
from += (1 << bbm->bbt_erase_shift);
}
return 0;
}
/**
* onenand_memory_bbt - [GENERIC] create a memory based bad block table
* @param mtd MTD device structure
* @param bd descriptor for the good/bad block search pattern
*
* The function creates a memory based bbt by scanning the device
* for manufacturer / software marked good / bad blocks
*/
static inline int onenand_memory_bbt (struct mtd_info *mtd, struct nand_bbt_descr *bd)
{
struct onenand_chip *this = mtd->priv;
bd->options &= ~NAND_BBT_SCANEMPTY;
return create_bbt(mtd, this->page_buf, bd, -1);
}
/**
* onenand_isbad_bbt - [OneNAND Interface] Check if a block is bad
* @param mtd MTD device structure
* @param offs offset in the device
* @param allowbbt allow access to bad block table region
*/
static int onenand_isbad_bbt(struct mtd_info *mtd, loff_t offs, int allowbbt)
{
struct onenand_chip *this = mtd->priv;
struct bbm_info *bbm = this->bbm;
int block;
uint8_t res;
/* Get block number * 2 */
block = (int) (onenand_block(this, offs) << 1);
res = (bbm->bbt[block >> 3] >> (block & 0x06)) & 0x03;
pr_debug("onenand_isbad_bbt: bbt info for offs 0x%08x: (block %d) 0x%02x\n",
(unsigned int) offs, block >> 1, res);
switch ((int) res) {
case 0x00: return 0;
case 0x01: return 1;
case 0x02: return allowbbt ? 0 : 1;
}
return 1;
}
/**
* onenand_scan_bbt - [OneNAND Interface] scan, find, read and maybe create bad block table(s)
* @param mtd MTD device structure
* @param bd descriptor for the good/bad block search pattern
*
* The function checks, if a bad block table(s) is/are already
* available. If not it scans the device for manufacturer
* marked good / bad blocks and writes the bad block table(s) to
* the selected place.
*
* The bad block table memory is allocated here. It is freed
* by the onenand_release function.
*
*/
int onenand_scan_bbt(struct mtd_info *mtd, struct nand_bbt_descr *bd)
{
struct onenand_chip *this = mtd->priv;
struct bbm_info *bbm = this->bbm;
int len, ret = 0;
len = this->chipsize >> (this->erase_shift + 2);
/* Allocate memory (2bit per block) and clear the memory bad block table */
bbm->bbt = kzalloc(len, GFP_KERNEL);
if (!bbm->bbt)
return -ENOMEM;
/* Set the bad block position */
bbm->badblockpos = ONENAND_BADBLOCK_POS;
/* Set erase shift */
bbm->bbt_erase_shift = this->erase_shift;
if (!bbm->isbad_bbt)
bbm->isbad_bbt = onenand_isbad_bbt;
/* Scan the device to build a memory based bad block table */
if ((ret = onenand_memory_bbt(mtd, bd))) {
printk(KERN_ERR "onenand_scan_bbt: Can't scan flash and build the RAM-based BBT\n");
kfree(bbm->bbt);
bbm->bbt = NULL;
}
return ret;
}
/*
* Define some generic bad / good block scan pattern which are used
* while scanning a device for factory marked good / bad blocks.
*/
static uint8_t scan_ff_pattern[] = { 0xff, 0xff };
static struct nand_bbt_descr largepage_memorybased = {
.options = 0,
.offs = 0,
.len = 2,
.pattern = scan_ff_pattern,
};
/**
* onenand_default_bbt - [OneNAND Interface] Select a default bad block table for the device
* @param mtd MTD device structure
*
* This function selects the default bad block table
* support for the device and calls the onenand_scan_bbt function
*/
int onenand_default_bbt(struct mtd_info *mtd)
{
struct onenand_chip *this = mtd->priv;
struct bbm_info *bbm;
this->bbm = kzalloc(sizeof(struct bbm_info), GFP_KERNEL);
if (!this->bbm)
return -ENOMEM;
bbm = this->bbm;
/* 1KB page has same configuration as 2KB page */
if (!bbm->badblock_pattern)
bbm->badblock_pattern = &largepage_memorybased;
return onenand_scan_bbt(mtd, bbm->badblock_pattern);
}
EXPORT_SYMBOL(onenand_scan_bbt);
EXPORT_SYMBOL(onenand_default_bbt);
| gpl-2.0 |
Art-Chen/android_kernel_samsung_galaxys2plus-common | arch/sparc/kernel/sys_sparc_32.c | 7510 | 7574 | /* linux/arch/sparc/kernel/sys_sparc.c
*
* This file contains various random system calls that
* have a non-standard calling sequence on the Linux/sparc
* platform.
*/
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/fs.h>
#include <linux/file.h>
#include <linux/sem.h>
#include <linux/msg.h>
#include <linux/shm.h>
#include <linux/stat.h>
#include <linux/syscalls.h>
#include <linux/mman.h>
#include <linux/utsname.h>
#include <linux/smp.h>
#include <linux/ipc.h>
#include <asm/uaccess.h>
#include <asm/unistd.h>
/* #define DEBUG_UNIMP_SYSCALL */
/* XXX Make this per-binary type, this way we can detect the type of
* XXX a binary. Every Sparc executable calls this very early on.
*/
asmlinkage unsigned long sys_getpagesize(void)
{
return PAGE_SIZE; /* Possibly older binaries want 8192 on sun4's? */
}
#define COLOUR_ALIGN(addr) (((addr)+SHMLBA-1)&~(SHMLBA-1))
unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags)
{
struct vm_area_struct * vmm;
if (flags & MAP_FIXED) {
/* We do not accept a shared mapping if it would violate
* cache aliasing constraints.
*/
if ((flags & MAP_SHARED) &&
((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
return -EINVAL;
return addr;
}
/* See asm-sparc/uaccess.h */
if (len > TASK_SIZE - PAGE_SIZE)
return -ENOMEM;
if (ARCH_SUN4C && len > 0x20000000)
return -ENOMEM;
if (!addr)
addr = TASK_UNMAPPED_BASE;
if (flags & MAP_SHARED)
addr = COLOUR_ALIGN(addr);
else
addr = PAGE_ALIGN(addr);
for (vmm = find_vma(current->mm, addr); ; vmm = vmm->vm_next) {
/* At this point: (!vmm || addr < vmm->vm_end). */
if (ARCH_SUN4C && addr < 0xe0000000 && 0x20000000 - len < addr) {
addr = PAGE_OFFSET;
vmm = find_vma(current->mm, PAGE_OFFSET);
}
if (TASK_SIZE - PAGE_SIZE - len < addr)
return -ENOMEM;
if (!vmm || addr + len <= vmm->vm_start)
return addr;
addr = vmm->vm_end;
if (flags & MAP_SHARED)
addr = COLOUR_ALIGN(addr);
}
}
/*
* sys_pipe() is the normal C calling standard for creating
* a pipe. It's not the way unix traditionally does this, though.
*/
asmlinkage int sparc_pipe(struct pt_regs *regs)
{
int fd[2];
int error;
error = do_pipe_flags(fd, 0);
if (error)
goto out;
regs->u_regs[UREG_I1] = fd[1];
error = fd[0];
out:
return error;
}
int sparc_mmap_check(unsigned long addr, unsigned long len)
{
if (ARCH_SUN4C &&
(len > 0x20000000 ||
(addr < 0xe0000000 && addr + len > 0x20000000)))
return -EINVAL;
/* See asm-sparc/uaccess.h */
if (len > TASK_SIZE - PAGE_SIZE || addr + len > TASK_SIZE - PAGE_SIZE)
return -EINVAL;
return 0;
}
/* Linux version of mmap */
asmlinkage unsigned long sys_mmap2(unsigned long addr, unsigned long len,
unsigned long prot, unsigned long flags, unsigned long fd,
unsigned long pgoff)
{
/* Make sure the shift for mmap2 is constant (12), no matter what PAGE_SIZE
we have. */
return sys_mmap_pgoff(addr, len, prot, flags, fd,
pgoff >> (PAGE_SHIFT - 12));
}
asmlinkage unsigned long sys_mmap(unsigned long addr, unsigned long len,
unsigned long prot, unsigned long flags, unsigned long fd,
unsigned long off)
{
/* no alignment check? */
return sys_mmap_pgoff(addr, len, prot, flags, fd, off >> PAGE_SHIFT);
}
long sparc_remap_file_pages(unsigned long start, unsigned long size,
unsigned long prot, unsigned long pgoff,
unsigned long flags)
{
/* This works on an existing mmap so we don't need to validate
* the range as that was done at the original mmap call.
*/
return sys_remap_file_pages(start, size, prot,
(pgoff >> (PAGE_SHIFT - 12)), flags);
}
/* we come to here via sys_nis_syscall so it can setup the regs argument */
asmlinkage unsigned long
c_sys_nis_syscall (struct pt_regs *regs)
{
static int count = 0;
if (count++ > 5)
return -ENOSYS;
printk ("%s[%d]: Unimplemented SPARC system call %d\n",
current->comm, task_pid_nr(current), (int)regs->u_regs[1]);
#ifdef DEBUG_UNIMP_SYSCALL
show_regs (regs);
#endif
return -ENOSYS;
}
/* #define DEBUG_SPARC_BREAKPOINT */
asmlinkage void
sparc_breakpoint (struct pt_regs *regs)
{
siginfo_t info;
#ifdef DEBUG_SPARC_BREAKPOINT
printk ("TRAP: Entering kernel PC=%x, nPC=%x\n", regs->pc, regs->npc);
#endif
info.si_signo = SIGTRAP;
info.si_errno = 0;
info.si_code = TRAP_BRKPT;
info.si_addr = (void __user *)regs->pc;
info.si_trapno = 0;
force_sig_info(SIGTRAP, &info, current);
#ifdef DEBUG_SPARC_BREAKPOINT
printk ("TRAP: Returning to space: PC=%x nPC=%x\n", regs->pc, regs->npc);
#endif
}
asmlinkage int
sparc_sigaction (int sig, const struct old_sigaction __user *act,
struct old_sigaction __user *oact)
{
struct k_sigaction new_ka, old_ka;
int ret;
WARN_ON_ONCE(sig >= 0);
sig = -sig;
if (act) {
unsigned long mask;
if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
__get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
__get_user(new_ka.sa.sa_restorer, &act->sa_restorer))
return -EFAULT;
__get_user(new_ka.sa.sa_flags, &act->sa_flags);
__get_user(mask, &act->sa_mask);
siginitset(&new_ka.sa.sa_mask, mask);
new_ka.ka_restorer = NULL;
}
ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
if (!ret && oact) {
/* In the clone() case we could copy half consistent
* state to the user, however this could sleep and
* deadlock us if we held the signal lock on SMP. So for
* now I take the easy way out and do no locking.
*/
if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
__put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
__put_user(old_ka.sa.sa_restorer, &oact->sa_restorer))
return -EFAULT;
__put_user(old_ka.sa.sa_flags, &oact->sa_flags);
__put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask);
}
return ret;
}
asmlinkage long
sys_rt_sigaction(int sig,
const struct sigaction __user *act,
struct sigaction __user *oact,
void __user *restorer,
size_t sigsetsize)
{
struct k_sigaction new_ka, old_ka;
int ret;
/* XXX: Don't preclude handling different sized sigset_t's. */
if (sigsetsize != sizeof(sigset_t))
return -EINVAL;
if (act) {
new_ka.ka_restorer = restorer;
if (copy_from_user(&new_ka.sa, act, sizeof(*act)))
return -EFAULT;
}
ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
if (!ret && oact) {
if (copy_to_user(oact, &old_ka.sa, sizeof(*oact)))
return -EFAULT;
}
return ret;
}
asmlinkage int sys_getdomainname(char __user *name, int len)
{
int nlen, err;
if (len < 0)
return -EINVAL;
down_read(&uts_sem);
nlen = strlen(utsname()->domainname) + 1;
err = -EINVAL;
if (nlen > len)
goto out;
err = -EFAULT;
if (!copy_to_user(name, utsname()->domainname, nlen))
err = 0;
out:
up_read(&uts_sem);
return err;
}
/*
* Do a system call from kernel instead of calling sys_execve so we
* end up with proper pt_regs.
*/
int kernel_execve(const char *filename,
const char *const argv[],
const char *const envp[])
{
long __res;
register long __g1 __asm__ ("g1") = __NR_execve;
register long __o0 __asm__ ("o0") = (long)(filename);
register long __o1 __asm__ ("o1") = (long)(argv);
register long __o2 __asm__ ("o2") = (long)(envp);
asm volatile ("t 0x10\n\t"
"bcc 1f\n\t"
"mov %%o0, %0\n\t"
"sub %%g0, %%o0, %0\n\t"
"1:\n\t"
: "=r" (__res), "=&r" (__o0)
: "1" (__o0), "r" (__o1), "r" (__o2), "r" (__g1)
: "cc");
return __res;
}
| gpl-2.0 |
cannondalev2000/kernel_lge_msm8974 | drivers/mtd/onenand/onenand_bbt.c | 7510 | 6918 | /*
* linux/drivers/mtd/onenand/onenand_bbt.c
*
* Bad Block Table support for the OneNAND driver
*
* Copyright(c) 2005 Samsung Electronics
* Kyungmin Park <kyungmin.park@samsung.com>
*
* Derived from nand_bbt.c
*
* TODO:
* Split BBT core and chip specific BBT.
*/
#include <linux/slab.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/onenand.h>
#include <linux/export.h>
/**
* check_short_pattern - [GENERIC] check if a pattern is in the buffer
* @param buf the buffer to search
* @param len the length of buffer to search
* @param paglen the pagelength
* @param td search pattern descriptor
*
* Check for a pattern at the given place. Used to search bad block
* tables and good / bad block identifiers. Same as check_pattern, but
* no optional empty check and the pattern is expected to start
* at offset 0.
*
*/
static int check_short_pattern(uint8_t *buf, int len, int paglen, struct nand_bbt_descr *td)
{
int i;
uint8_t *p = buf;
/* Compare the pattern */
for (i = 0; i < td->len; i++) {
if (p[i] != td->pattern[i])
return -1;
}
return 0;
}
/**
* create_bbt - [GENERIC] Create a bad block table by scanning the device
* @param mtd MTD device structure
* @param buf temporary buffer
* @param bd descriptor for the good/bad block search pattern
* @param chip create the table for a specific chip, -1 read all chips.
* Applies only if NAND_BBT_PERCHIP option is set
*
* Create a bad block table by scanning the device
* for the given good/bad block identify pattern
*/
static int create_bbt(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr *bd, int chip)
{
struct onenand_chip *this = mtd->priv;
struct bbm_info *bbm = this->bbm;
int i, j, numblocks, len, scanlen;
int startblock;
loff_t from;
size_t readlen, ooblen;
struct mtd_oob_ops ops;
int rgn;
printk(KERN_INFO "Scanning device for bad blocks\n");
len = 2;
/* We need only read few bytes from the OOB area */
scanlen = ooblen = 0;
readlen = bd->len;
/* chip == -1 case only */
/* Note that numblocks is 2 * (real numblocks) here;
* see i += 2 below as it makses shifting and masking less painful
*/
numblocks = this->chipsize >> (bbm->bbt_erase_shift - 1);
startblock = 0;
from = 0;
ops.mode = MTD_OPS_PLACE_OOB;
ops.ooblen = readlen;
ops.oobbuf = buf;
ops.len = ops.ooboffs = ops.retlen = ops.oobretlen = 0;
for (i = startblock; i < numblocks; ) {
int ret;
for (j = 0; j < len; j++) {
/* No need to read pages fully,
* just read required OOB bytes */
ret = onenand_bbt_read_oob(mtd,
from + j * this->writesize + bd->offs, &ops);
/* If it is a initial bad block, just ignore it */
if (ret == ONENAND_BBT_READ_FATAL_ERROR)
return -EIO;
if (ret || check_short_pattern(&buf[j * scanlen],
scanlen, this->writesize, bd)) {
bbm->bbt[i >> 3] |= 0x03 << (i & 0x6);
printk(KERN_INFO "OneNAND eraseblock %d is an "
"initial bad block\n", i >> 1);
mtd->ecc_stats.badblocks++;
break;
}
}
i += 2;
if (FLEXONENAND(this)) {
rgn = flexonenand_region(mtd, from);
from += mtd->eraseregions[rgn].erasesize;
} else
from += (1 << bbm->bbt_erase_shift);
}
return 0;
}
/**
* onenand_memory_bbt - [GENERIC] create a memory based bad block table
* @param mtd MTD device structure
* @param bd descriptor for the good/bad block search pattern
*
* The function creates a memory based bbt by scanning the device
* for manufacturer / software marked good / bad blocks
*/
static inline int onenand_memory_bbt (struct mtd_info *mtd, struct nand_bbt_descr *bd)
{
struct onenand_chip *this = mtd->priv;
bd->options &= ~NAND_BBT_SCANEMPTY;
return create_bbt(mtd, this->page_buf, bd, -1);
}
/**
* onenand_isbad_bbt - [OneNAND Interface] Check if a block is bad
* @param mtd MTD device structure
* @param offs offset in the device
* @param allowbbt allow access to bad block table region
*/
static int onenand_isbad_bbt(struct mtd_info *mtd, loff_t offs, int allowbbt)
{
struct onenand_chip *this = mtd->priv;
struct bbm_info *bbm = this->bbm;
int block;
uint8_t res;
/* Get block number * 2 */
block = (int) (onenand_block(this, offs) << 1);
res = (bbm->bbt[block >> 3] >> (block & 0x06)) & 0x03;
pr_debug("onenand_isbad_bbt: bbt info for offs 0x%08x: (block %d) 0x%02x\n",
(unsigned int) offs, block >> 1, res);
switch ((int) res) {
case 0x00: return 0;
case 0x01: return 1;
case 0x02: return allowbbt ? 0 : 1;
}
return 1;
}
/**
* onenand_scan_bbt - [OneNAND Interface] scan, find, read and maybe create bad block table(s)
* @param mtd MTD device structure
* @param bd descriptor for the good/bad block search pattern
*
* The function checks, if a bad block table(s) is/are already
* available. If not it scans the device for manufacturer
* marked good / bad blocks and writes the bad block table(s) to
* the selected place.
*
* The bad block table memory is allocated here. It is freed
* by the onenand_release function.
*
*/
int onenand_scan_bbt(struct mtd_info *mtd, struct nand_bbt_descr *bd)
{
struct onenand_chip *this = mtd->priv;
struct bbm_info *bbm = this->bbm;
int len, ret = 0;
len = this->chipsize >> (this->erase_shift + 2);
/* Allocate memory (2bit per block) and clear the memory bad block table */
bbm->bbt = kzalloc(len, GFP_KERNEL);
if (!bbm->bbt)
return -ENOMEM;
/* Set the bad block position */
bbm->badblockpos = ONENAND_BADBLOCK_POS;
/* Set erase shift */
bbm->bbt_erase_shift = this->erase_shift;
if (!bbm->isbad_bbt)
bbm->isbad_bbt = onenand_isbad_bbt;
/* Scan the device to build a memory based bad block table */
if ((ret = onenand_memory_bbt(mtd, bd))) {
printk(KERN_ERR "onenand_scan_bbt: Can't scan flash and build the RAM-based BBT\n");
kfree(bbm->bbt);
bbm->bbt = NULL;
}
return ret;
}
/*
* Define some generic bad / good block scan pattern which are used
* while scanning a device for factory marked good / bad blocks.
*/
static uint8_t scan_ff_pattern[] = { 0xff, 0xff };
static struct nand_bbt_descr largepage_memorybased = {
.options = 0,
.offs = 0,
.len = 2,
.pattern = scan_ff_pattern,
};
/**
* onenand_default_bbt - [OneNAND Interface] Select a default bad block table for the device
* @param mtd MTD device structure
*
* This function selects the default bad block table
* support for the device and calls the onenand_scan_bbt function
*/
int onenand_default_bbt(struct mtd_info *mtd)
{
struct onenand_chip *this = mtd->priv;
struct bbm_info *bbm;
this->bbm = kzalloc(sizeof(struct bbm_info), GFP_KERNEL);
if (!this->bbm)
return -ENOMEM;
bbm = this->bbm;
/* 1KB page has same configuration as 2KB page */
if (!bbm->badblock_pattern)
bbm->badblock_pattern = &largepage_memorybased;
return onenand_scan_bbt(mtd, bbm->badblock_pattern);
}
EXPORT_SYMBOL(onenand_scan_bbt);
EXPORT_SYMBOL(onenand_default_bbt);
| gpl-2.0 |
crpalmer/android_kernel_samsung_mondrianwifi | drivers/macintosh/windfarm_cpufreq_clamp.c | 9046 | 2417 | #include <linux/types.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/wait.h>
#include <linux/cpufreq.h>
#include <asm/prom.h>
#include "windfarm.h"
#define VERSION "0.3"
static int clamped;
static struct wf_control *clamp_control;
static int clamp_notifier_call(struct notifier_block *self,
unsigned long event, void *data)
{
struct cpufreq_policy *p = data;
unsigned long max_freq;
if (event != CPUFREQ_ADJUST)
return 0;
max_freq = clamped ? (p->cpuinfo.min_freq) : (p->cpuinfo.max_freq);
cpufreq_verify_within_limits(p, 0, max_freq);
return 0;
}
static struct notifier_block clamp_notifier = {
.notifier_call = clamp_notifier_call,
};
static int clamp_set(struct wf_control *ct, s32 value)
{
if (value)
printk(KERN_INFO "windfarm: Clamping CPU frequency to "
"minimum !\n");
else
printk(KERN_INFO "windfarm: CPU frequency unclamped !\n");
clamped = value;
cpufreq_update_policy(0);
return 0;
}
static int clamp_get(struct wf_control *ct, s32 *value)
{
*value = clamped;
return 0;
}
static s32 clamp_min(struct wf_control *ct)
{
return 0;
}
static s32 clamp_max(struct wf_control *ct)
{
return 1;
}
static struct wf_control_ops clamp_ops = {
.set_value = clamp_set,
.get_value = clamp_get,
.get_min = clamp_min,
.get_max = clamp_max,
.owner = THIS_MODULE,
};
static int __init wf_cpufreq_clamp_init(void)
{
struct wf_control *clamp;
/* Don't register on old machines that use therm_pm72 for now */
if (of_machine_is_compatible("PowerMac7,2") ||
of_machine_is_compatible("PowerMac7,3") ||
of_machine_is_compatible("RackMac3,1"))
return -ENODEV;
clamp = kmalloc(sizeof(struct wf_control), GFP_KERNEL);
if (clamp == NULL)
return -ENOMEM;
cpufreq_register_notifier(&clamp_notifier, CPUFREQ_POLICY_NOTIFIER);
clamp->ops = &clamp_ops;
clamp->name = "cpufreq-clamp";
if (wf_register_control(clamp))
goto fail;
clamp_control = clamp;
return 0;
fail:
kfree(clamp);
return -ENODEV;
}
static void __exit wf_cpufreq_clamp_exit(void)
{
if (clamp_control)
wf_unregister_control(clamp_control);
}
module_init(wf_cpufreq_clamp_init);
module_exit(wf_cpufreq_clamp_exit);
MODULE_AUTHOR("Benjamin Herrenschmidt <benh@kernel.crashing.org>");
MODULE_DESCRIPTION("CPU frequency clamp for PowerMacs thermal control");
MODULE_LICENSE("GPL");
| gpl-2.0 |
Tof37/Es209ra-3.x | arch/mips/sni/eisa.c | 9302 | 1176 | /*
* Virtual EISA root driver.
* Acts as a placeholder if we don't have a proper EISA bridge.
*
* (C) 2003 Marc Zyngier <maz@wild-wind.fr.eu.org>
* modified for SNI usage by Thomas Bogendoerfer
*
* This code is released under the GPL version 2.
*/
#include <linux/kernel.h>
#include <linux/platform_device.h>
#include <linux/eisa.h>
#include <linux/init.h>
/* The default EISA device parent (virtual root device).
* Now use a platform device, since that's the obvious choice. */
static struct platform_device eisa_root_dev = {
.name = "eisa",
.id = 0,
};
static struct eisa_root_device eisa_bus_root = {
.dev = &eisa_root_dev.dev,
.bus_base_addr = 0,
.res = &ioport_resource,
.slots = EISA_MAX_SLOTS,
.dma_mask = 0xffffffff,
.force_probe = 1,
};
int __init sni_eisa_root_init(void)
{
int r;
r = platform_device_register(&eisa_root_dev);
if (!r)
return r;
dev_set_drvdata(&eisa_root_dev.dev, &eisa_bus_root);
if (eisa_root_register(&eisa_bus_root)) {
/* A real bridge may have been registered before
* us. So quietly unregister. */
platform_device_unregister(&eisa_root_dev);
return -1;
}
return 0;
}
| gpl-2.0 |
n1kolaa/nv-3.1-grouper | arch/cris/arch-v32/drivers/pci/dma.c | 12630 | 1156 | /*
* Dynamic DMA mapping support.
*
* On cris there is no hardware dynamic DMA address translation,
* so consistent alloc/free are merely page allocation/freeing.
* The rest of the dynamic DMA mapping interface is implemented
* in asm/pci.h.
*
* Borrowed from i386.
*/
#include <linux/types.h>
#include <linux/mm.h>
#include <linux/string.h>
#include <linux/pci.h>
#include <linux/gfp.h>
#include <asm/io.h>
void *dma_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t gfp)
{
void *ret;
int order = get_order(size);
/* ignore region specifiers */
gfp &= ~(__GFP_DMA | __GFP_HIGHMEM);
if (dma_alloc_from_coherent(dev, size, dma_handle, &ret))
return ret;
if (dev == NULL || (dev->coherent_dma_mask < 0xffffffff))
gfp |= GFP_DMA;
ret = (void *)__get_free_pages(gfp, order);
if (ret != NULL) {
memset(ret, 0, size);
*dma_handle = virt_to_phys(ret);
}
return ret;
}
void dma_free_coherent(struct device *dev, size_t size,
void *vaddr, dma_addr_t dma_handle)
{
int order = get_order(size);
if (!dma_release_from_coherent(dev, order, vaddr))
free_pages((unsigned long)vaddr, order);
}
| gpl-2.0 |
InES-HPMM/linux-l4t | drivers/net/wireless/sd8897/mlan/mlan_scan.c | 87 | 166774 | /** @file mlan_scan.c
*
* @brief Functions implementing wlan scan IOCTL and firmware command APIs
*
* IOCTL handlers as well as command preparation and response routines
* for sending scan commands to the firmware.
*
* Copyright (C) 2008-2013, Marvell International Ltd.
*
* This software file (the "File") is distributed by Marvell International
* Ltd. under the terms of the GNU General Public License Version 2, June 1991
* (the "License"). You may use, redistribute and/or modify this File in
* accordance with the terms and conditions of the License, a copy of which
* is available by writing to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
* worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
*
* THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
* IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
* ARE EXPRESSLY DISCLAIMED. The License provides additional details about
* this warranty disclaimer.
*/
/******************************************************
Change log:
10/28/2008: initial version
******************************************************/
#include "mlan.h"
#include "mlan_join.h"
#include "mlan_util.h"
#include "mlan_fw.h"
#include "mlan_main.h"
#include "mlan_11n.h"
#include "mlan_11ac.h"
#include "mlan_11h.h"
/********************************************************
Local Constants
********************************************************/
/** The maximum number of channels the firmware can scan per command */
#define MRVDRV_MAX_CHANNELS_PER_SPECIFIC_SCAN 14
/**
* Number of channels to scan per firmware scan command issuance.
*
* Number restricted to prevent hitting the limit on the amount of scan data
* returned in a single firmware scan command.
*/
#define MRVDRV_CHANNELS_PER_SCAN_CMD 4
/** Memory needed to store a max sized Channel List TLV for a firmware scan */
#define CHAN_TLV_MAX_SIZE (sizeof(MrvlIEtypesHeader_t) \
+ (MRVDRV_MAX_CHANNELS_PER_SPECIFIC_SCAN \
* sizeof(ChanScanParamSet_t)))
/** Memory needed to store supported rate */
#define RATE_TLV_MAX_SIZE (sizeof(MrvlIEtypes_RatesParamSet_t) + HOSTCMD_SUPPORTED_RATES)
/** Memory needed to store a max number/size WildCard SSID TLV for a firmware scan */
#define WILDCARD_SSID_TLV_MAX_SIZE \
(MRVDRV_MAX_SSID_LIST_LENGTH * \
(sizeof(MrvlIEtypes_WildCardSsIdParamSet_t) + \
MRVDRV_MAX_SSID_LENGTH))
/** WPS TLV MAX size is MAX IE size plus 2 bytes for t_u16 MRVL TLV extension */
#define WPS_TLV_MAX_SIZE (sizeof(IEEEtypes_VendorSpecific_t) + 2)
/** Maximum memory needed for a wlan_scan_cmd_config with all TLVs at max */
#define MAX_SCAN_CFG_ALLOC (sizeof(wlan_scan_cmd_config) \
+ sizeof(MrvlIEtypes_NumProbes_t) \
+ sizeof(MrvlIETypes_HTCap_t) \
+ CHAN_TLV_MAX_SIZE \
+ RATE_TLV_MAX_SIZE \
+ WILDCARD_SSID_TLV_MAX_SIZE \
+ WPS_TLV_MAX_SIZE)
/********************************************************
Local Variables
********************************************************/
/**
* Interally used to send a configured scan cmd between driver routines
*/
typedef union {
/** Scan configuration (variable length) */
wlan_scan_cmd_config config;
/** Max allocated block */
t_u8 config_alloc_buf[MAX_SCAN_CFG_ALLOC];
} wlan_scan_cmd_config_tlv;
/********************************************************
Global Variables
********************************************************/
/********************************************************
Local Functions
********************************************************/
/** Cipher suite definition */
enum cipher_suite {
CIPHER_SUITE_WEP40,
CIPHER_SUITE_TKIP,
CIPHER_SUITE_CCMP,
CIPHER_SUITE_WEP104,
CIPHER_SUITE_MAX
};
static t_u8 wpa_oui[CIPHER_SUITE_MAX][4] = {
{0x00, 0x50, 0xf2, 0x01}, /* WEP40 */
{0x00, 0x50, 0xf2, 0x02}, /* TKIP */
{0x00, 0x50, 0xf2, 0x04}, /* AES */
{0x00, 0x50, 0xf2, 0x05}, /* WEP104 */
};
static t_u8 rsn_oui[CIPHER_SUITE_MAX][4] = {
{0x00, 0x0f, 0xac, 0x01}, /* WEP40 */
{0x00, 0x0f, 0xac, 0x02}, /* TKIP */
{0x00, 0x0f, 0xac, 0x04}, /* AES */
{0x00, 0x0f, 0xac, 0x05}, /* WEP104 */
};
/**
* @brief This function will update the channel statistics from scan result
*
* @param pmpriv A pointer to mlan_private structure
* @param pchanstats_tlv A pointer to MrvlIEtypes_ChannelStats_t tlv
*
* @return NA
*/
void
wlan_update_chan_statistics(mlan_private * pmpriv,
MrvlIEtypes_ChannelStats_t * pchanstats_tlv)
{
mlan_adapter *pmadapter = pmpriv->adapter;
t_u8 i, j;
ChanStatistics_t *pchan_stats =
(ChanStatistics_t *) ((t_u8 *) pchanstats_tlv +
sizeof(MrvlIEtypesHeader_t));
t_u8 num_chan =
wlan_le16_to_cpu(pchanstats_tlv->header.len) /
sizeof(ChanStatistics_t);
ENTER();
for (j = 0; j < num_chan; j++) {
for (i = 0; i < pmadapter->num_in_chan_stats; i++) {
if (pmadapter->pchan_stats[i].chan_num ==
pchan_stats->chan_num) {
pchan_stats->total_networks =
wlan_le16_to_cpu(pchan_stats->
total_networks);
pchan_stats->cca_scan_duration =
wlan_le16_to_cpu(pchan_stats->
cca_scan_duration);
pchan_stats->cca_busy_duration =
wlan_le16_to_cpu(pchan_stats->
cca_busy_duration);
PRINTM(MCMND,
"chan=%d, noise=%d, total_network=%d scan_duration=%d, busy_duration=%d\n",
pchan_stats->chan_num,
pchan_stats->noise,
pchan_stats->total_networks,
pchan_stats->cca_scan_duration,
pchan_stats->cca_busy_duration);
memcpy(pmadapter, &pmadapter->pchan_stats[i],
pchan_stats, sizeof(ChanStatistics_t));
break;
}
}
pchan_stats++;
}
LEAVE();
return;
}
/**
* @brief This function will parse a given IE for a given OUI
*
* Parse a given WPA/RSN IE to find if it has a given oui in PTK,
* if no OUI found for PTK it returns 0.
*
* @param pbss_desc A pointer to current BSS descriptor
* @return 0 on failure to find OUI, 1 on success.
*/
static t_u8
search_oui_in_ie(mlan_adapter * pmadapter, IEBody * ie_body, t_u8 * oui)
{
t_u8 count;
count = ie_body->PtkCnt[0];
ENTER();
/* There could be multiple OUIs for PTK hence 1) Take the length. 2)
Check all the OUIs for AES. 3) If one of them is AES then pass
success. */
while (count) {
if (!memcmp
(pmadapter, ie_body->PtkBody, oui,
sizeof(ie_body->PtkBody))) {
LEAVE();
return MLAN_OUI_PRESENT;
}
--count;
if (count) {
ie_body = (IEBody *) ((t_u8 *) ie_body +
sizeof(ie_body->PtkBody));
}
}
PRINTM(MINFO, "The OUI %x:%x:%x:%x is not found in PTK\n", oui[0],
oui[1], oui[2], oui[3]);
LEAVE();
return MLAN_OUI_NOT_PRESENT;
}
/**
* @brief This function will pass the correct ie and oui to search_oui_in_ie
*
* Check the pbss_desc for appropriate IE and then check if RSN IE has AES
* OUI in it. If RSN IE does not have AES in PTK then return 0;
*
* @param pbss_desc A pointer to current BSS descriptor
* @return 0 on failure to find AES OUI, 1 on success.
*/
static t_u8
is_rsn_oui_present(mlan_adapter * pmadapter, BSSDescriptor_t * pbss_desc,
t_u32 cipher_suite)
{
t_u8 *oui = MNULL;
IEBody *ie_body = MNULL;
t_u8 ret = MLAN_OUI_NOT_PRESENT;
ENTER();
if (((pbss_desc->prsn_ie) &&
((*(pbss_desc->prsn_ie)).ieee_hdr.element_id == RSN_IE))) {
ie_body =
(IEBody *) (((t_u8 *) pbss_desc->prsn_ie->data) +
RSN_GTK_OUI_OFFSET);
oui = &rsn_oui[cipher_suite][0];
ret = search_oui_in_ie(pmadapter, ie_body, oui);
if (ret) {
LEAVE();
return ret;
}
}
LEAVE();
return ret;
}
/**
* @brief This function will pass the correct ie and oui to search_oui_in_ie
*
* Check the pbss_desc for appropriate IE and then check if WPA IE has AES
* OUI in it. If WPA IE does not have AES in PTK then return 0;
*
* @param pbss_desc A pointer to current BSS descriptor
* @return 0 on failure to find AES OUI, 1 on success.
*/
static t_u8
is_wpa_oui_present(mlan_adapter * pmadapter, BSSDescriptor_t * pbss_desc,
t_u32 cipher_suite)
{
t_u8 *oui = MNULL;
IEBody *ie_body = MNULL;
t_u8 ret = MLAN_OUI_NOT_PRESENT;
ENTER();
if (((pbss_desc->pwpa_ie) &&
((*(pbss_desc->pwpa_ie)).vend_hdr.element_id == WPA_IE))) {
ie_body = (IEBody *) pbss_desc->pwpa_ie->data;
oui = &wpa_oui[cipher_suite][0];
ret = search_oui_in_ie(pmadapter, ie_body, oui);
if (ret) {
LEAVE();
return ret;
}
}
LEAVE();
return ret;
}
/**
* @brief compare config band and a band from the scan result,
* which is defined by functiion radio_type_to_band(t_u8 radio_type) above
*
* @param cfg_band: band configured
* scan_band: band from scan result
*
* @return matched: non-zero. unmatched: 0
*
*/
static t_u8
wlan_is_band_compatible(t_u8 cfg_band, t_u8 scan_band)
{
t_u8 band;
switch (scan_band) {
case BAND_A:
band = BAND_A | BAND_AN | BAND_AAC;
break;
case BAND_G:
default:
band = BAND_B | BAND_G | BAND_GN | BAND_GAC;
}
return cfg_band & band;
}
/**
* @brief Convert radio type scan parameter to a band config used in join cmd
*
* @param radio_type Scan parameter indicating the radio used for a channel
* in a scan command.
*
* @return Band type conversion of scanBand used in join/assoc cmds
*
*/
static t_u8
radio_type_to_band(t_u8 radio_type)
{
t_u8 ret_band;
switch (radio_type) {
case HostCmd_SCAN_RADIO_TYPE_A:
ret_band = BAND_A;
break;
case HostCmd_SCAN_RADIO_TYPE_BG:
default:
ret_band = BAND_G;
break;
}
return ret_band;
}
/**
* @brief This function finds the best SSID in the Scan List
*
* Search the scan table for the best SSID that also matches the current
* adapter network preference (infrastructure or adhoc)
*
* @param pmpriv A pointer to mlan_private structure
* @return index in BSSID list
*/
static t_s32
wlan_find_best_network_in_list(IN mlan_private * pmpriv)
{
mlan_adapter *pmadapter = pmpriv->adapter;
t_u32 mode = pmpriv->bss_mode;
t_s32 best_net = -1;
t_s32 best_rssi = 0;
t_u32 i;
ENTER();
PRINTM(MINFO, "Num of BSSIDs = %d\n", pmadapter->num_in_scan_table);
for (i = 0; i < pmadapter->num_in_scan_table; i++) {
switch (mode) {
case MLAN_BSS_MODE_INFRA:
case MLAN_BSS_MODE_IBSS:
if (wlan_is_network_compatible(pmpriv, i, mode) >= 0) {
if (SCAN_RSSI(pmadapter->pscan_table[i].rssi) >
best_rssi) {
best_rssi =
SCAN_RSSI(pmadapter->
pscan_table[i].rssi);
best_net = i;
}
}
break;
case MLAN_BSS_MODE_AUTO:
default:
if (SCAN_RSSI(pmadapter->pscan_table[i].rssi) >
best_rssi) {
best_rssi =
SCAN_RSSI(pmadapter->pscan_table[i].
rssi);
best_net = i;
}
break;
}
}
LEAVE();
return best_net;
}
/**
* @brief Create a channel list for the driver to scan based on region info
*
* Use the driver region/band information to construct a comprehensive list
* of channels to scan. This routine is used for any scan that is not
* provided a specific channel list to scan.
*
* @param pmpriv A pointer to mlan_private structure
* @param puser_scan_in MNULL or pointer to scan configuration parameters
* @param pscan_chan_list Output parameter: Resulting channel list to scan
* @param filtered_scan Flag indicating whether or not a BSSID or SSID filter
* is being sent in the command to firmware. Used to
* increase the number of channels sent in a scan
* command and to disable the firmware channel scan
* filter.
*
* @return N/A
*/
static t_void
wlan_scan_create_channel_list(IN mlan_private * pmpriv,
IN const wlan_user_scan_cfg * puser_scan_in,
OUT ChanScanParamSet_t * pscan_chan_list,
IN t_u8 filtered_scan)
{
mlan_adapter *pmadapter = pmpriv->adapter;
region_chan_t *pscan_region;
chan_freq_power_t *cfp;
t_u32 region_idx;
t_u32 chan_idx = 0;
t_u32 next_chan;
t_u8 scan_type;
t_u8 radio_type;
ENTER();
for (region_idx = 0;
region_idx < NELEMENTS(pmadapter->region_channel); region_idx++) {
if (wlan_11d_is_enabled(pmpriv) &&
pmpriv->media_connected != MTRUE) {
/* Scan all the supported chan for the first scan */
if (!pmadapter->universal_channel[region_idx].valid)
continue;
pscan_region =
&pmadapter->universal_channel[region_idx];
} else {
if (!pmadapter->region_channel[region_idx].valid)
continue;
pscan_region = &pmadapter->region_channel[region_idx];
}
if (puser_scan_in && !puser_scan_in->chan_list[0].chan_number &&
puser_scan_in->chan_list[0].radio_type & BAND_SPECIFIED) {
radio_type =
puser_scan_in->chan_list[0].
radio_type & ~BAND_SPECIFIED;
if (!radio_type && (pscan_region->band != BAND_B) &&
(pscan_region->band != BAND_G))
continue;
if (radio_type && (pscan_region->band != BAND_A))
continue;
}
if (!wlan_is_band_compatible
(pmpriv->config_bands | pmadapter->adhoc_start_band,
pscan_region->band))
continue;
for (next_chan = 0;
next_chan < pscan_region->num_cfp;
next_chan++, chan_idx++) {
/* Set the default scan type to the user specified
type, will later be changed to passive on a per
channel basis if restricted by regulatory
requirements (11d or 11h) */
scan_type = pmadapter->scan_type;
cfp = pscan_region->pcfp + next_chan;
if (scan_type == MLAN_SCAN_TYPE_ACTIVE
&& wlan_11d_is_enabled(pmpriv)) {
scan_type = wlan_11d_get_scan_type(pmadapter,
pscan_region->
band,
(t_u8) cfp->
channel,
&pmadapter->
parsed_region_chan);
}
switch (pscan_region->band) {
case BAND_A:
pscan_chan_list[chan_idx].radio_type =
HostCmd_SCAN_RADIO_TYPE_A;
if (!wlan_11d_is_enabled(pmpriv)) {
/* 11D not available... play it safe on
DFS channels */
if (wlan_11h_radar_detect_required
(pmpriv, (t_u8) cfp->channel))
scan_type =
MLAN_SCAN_TYPE_PASSIVE;
}
break;
case BAND_B:
case BAND_G:
if (!wlan_11d_is_enabled(pmpriv))
if (wlan_bg_scan_type_is_passive
(pmpriv, (t_u8) cfp->channel)) {
scan_type =
MLAN_SCAN_TYPE_PASSIVE;
}
pscan_chan_list[chan_idx].radio_type =
HostCmd_SCAN_RADIO_TYPE_BG;
break;
default:
pscan_chan_list[chan_idx].radio_type =
HostCmd_SCAN_RADIO_TYPE_BG;
break;
}
if (puser_scan_in &&
puser_scan_in->chan_list[0].scan_time) {
pscan_chan_list[chan_idx].max_scan_time =
wlan_cpu_to_le16((t_u16) puser_scan_in->
chan_list[0].
scan_time);
} else if (scan_type == MLAN_SCAN_TYPE_PASSIVE) {
pscan_chan_list[chan_idx].max_scan_time =
wlan_cpu_to_le16(pmadapter->
passive_scan_time);
} else if (filtered_scan) {
pscan_chan_list[chan_idx].max_scan_time =
wlan_cpu_to_le16(pmadapter->
specific_scan_time);
} else {
pscan_chan_list[chan_idx].max_scan_time =
wlan_cpu_to_le16(pmadapter->
active_scan_time);
}
if (scan_type == MLAN_SCAN_TYPE_PASSIVE) {
pscan_chan_list[chan_idx].chan_scan_mode.
passive_scan = MTRUE;
} else {
pscan_chan_list[chan_idx].chan_scan_mode.
passive_scan = MFALSE;
}
pscan_chan_list[chan_idx].chan_number =
(t_u8) cfp->channel;
if (filtered_scan) {
pscan_chan_list[chan_idx].chan_scan_mode.
disable_chan_filt = MTRUE;
}
}
}
LEAVE();
}
/**
* @brief Add WPS IE to probe request frame
*
* @param pmpriv A pointer to mlan_private structure
* @param pptlv_out A pointer to TLV to fill in
*
* @return N/A
*/
static void
wlan_add_wps_probe_request_ie(IN mlan_private * pmpriv, OUT t_u8 ** pptlv_out)
{
MrvlIEtypesHeader_t *tlv;
ENTER();
if (pmpriv->wps.wps_ie.vend_hdr.len) {
tlv = (MrvlIEtypesHeader_t *) * pptlv_out;
tlv->type = wlan_cpu_to_le16(VENDOR_SPECIFIC_221);
tlv->len = wlan_cpu_to_le16(pmpriv->wps.wps_ie.vend_hdr.len);
*pptlv_out += sizeof(MrvlIEtypesHeader_t);
memcpy(pmpriv->adapter, *pptlv_out,
pmpriv->wps.wps_ie.vend_hdr.oui,
pmpriv->wps.wps_ie.vend_hdr.len);
*pptlv_out += (pmpriv->wps.wps_ie.vend_hdr.len
+ sizeof(MrvlIEtypesHeader_t));
}
LEAVE();
}
/**
* @brief Construct and send multiple scan config commands to the firmware
*
* Previous routines have created a wlan_scan_cmd_config with any requested
* TLVs. This function splits the channel TLV into max_chan_per_scan lists
* and sends the portion of the channel TLV along with the other TLVs
* to the wlan_cmd routines for execution in the firmware.
*
* @param pmpriv A pointer to mlan_private structure
* @param pioctl_buf A pointer to MLAN IOCTL Request buffer
* @param max_chan_per_scan Maximum number channels to be included in each
* scan command sent to firmware
* @param filtered_scan Flag indicating whether or not a BSSID or SSID
* filter is being used for the firmware command
* scan command sent to firmware
* @param pscan_cfg_out Scan configuration used for this scan.
* @param pchan_tlv_out Pointer in the pscan_cfg_out where the channel TLV
* should start. This is past any other TLVs that
* must be sent down in each firmware command.
* @param pscan_chan_list List of channels to scan in max_chan_per_scan segments
*
* @return MLAN_STATUS_SUCCESS or error return otherwise
*/
static mlan_status
wlan_scan_channel_list(IN mlan_private * pmpriv,
IN t_void * pioctl_buf,
IN t_u32 max_chan_per_scan,
IN t_u8 filtered_scan,
OUT wlan_scan_cmd_config * pscan_cfg_out,
OUT MrvlIEtypes_ChanListParamSet_t * pchan_tlv_out,
IN ChanScanParamSet_t * pscan_chan_list)
{
mlan_status ret = MLAN_STATUS_SUCCESS;
mlan_adapter *pmadapter = pmpriv->adapter;
ChanScanParamSet_t *ptmp_chan_list;
ChanScanParamSet_t *pstart_chan;
pmlan_ioctl_req pioctl_req = (mlan_ioctl_req *) pioctl_buf;
t_u8 *pchan_tlv_out_temp = MNULL;
t_u8 *ptlv_temp = MNULL;
t_bool foundJPch14 = MFALSE;
t_u16 tlv_buf_len = 0;
t_u32 tlv_idx;
t_u32 total_scan_time;
t_u32 done_early;
t_u32 cmd_no;
t_u32 first_chan = 1;
mlan_callbacks *pcb = (mlan_callbacks *) & pmadapter->callbacks;
ENTER();
if (!pscan_cfg_out || !pchan_tlv_out || !pscan_chan_list) {
PRINTM(MINFO, "Scan: Null detect: %p, %p, %p\n",
pscan_cfg_out, pchan_tlv_out, pscan_chan_list);
if (pioctl_req)
pioctl_req->status_code = MLAN_ERROR_CMD_SCAN_FAIL;
LEAVE();
return MLAN_STATUS_FAILURE;
}
if (!pscan_chan_list->chan_number) {
PRINTM(MERROR, "Scan: No channel configured\n");
if (pioctl_req)
pioctl_req->status_code = MLAN_ERROR_CMD_SCAN_FAIL;
LEAVE();
return MLAN_STATUS_FAILURE;
}
/* check expiry before preparing scan list - may affect blacklist */
wlan_11h_get_csa_closed_channel(pmpriv);
pchan_tlv_out->header.type = wlan_cpu_to_le16(TLV_TYPE_CHANLIST);
/* Set the temp channel struct pointer to the start of the desired list
*/
ptmp_chan_list = pscan_chan_list;
/* Loop through the desired channel list, sending a new firmware scan
commands for each max_chan_per_scan channels (or for 1,6,11
individually if configured accordingly) */
while (ptmp_chan_list->chan_number) {
tlv_idx = 0;
total_scan_time = 0;
pchan_tlv_out->header.len = 0;
pstart_chan = ptmp_chan_list;
done_early = MFALSE;
/*
* Construct the Channel TLV for the scan command. Continue to
* insert channel TLVs until:
* - the tlv_idx hits the maximum configured per scan command
* - the next channel to insert is 0 (end of desired channel list)
* - done_early is set (controlling individual scanning of 1,6,11)
*/
while (tlv_idx < max_chan_per_scan &&
ptmp_chan_list->chan_number && !done_early) {
if (wlan_is_chan_blacklisted(pmpriv,
radio_type_to_band
(ptmp_chan_list->
radio_type),
ptmp_chan_list->
chan_number)) {
ptmp_chan_list++;
continue;
}
if (first_chan) {
ptmp_chan_list->chan_scan_mode.first_chan =
MTRUE;
first_chan = 0;
}
PRINTM(MINFO,
"Scan: Chan(%3d), Radio(%d), Mode(%d,%d), Dur(%d)\n",
ptmp_chan_list->chan_number,
ptmp_chan_list->radio_type,
ptmp_chan_list->chan_scan_mode.passive_scan,
ptmp_chan_list->chan_scan_mode.disable_chan_filt,
wlan_le16_to_cpu(ptmp_chan_list->max_scan_time));
if (foundJPch14 == MTRUE) {
foundJPch14 = MFALSE;
/* Restore the TLV buffer */
pchan_tlv_out =
(MrvlIEtypes_ChanListParamSet_t *)
pchan_tlv_out_temp;
pchan_tlv_out->header.type =
wlan_cpu_to_le16(TLV_TYPE_CHANLIST);
pchan_tlv_out->header.len = 0;
if (ptlv_temp) {
memcpy(pmadapter,
pscan_cfg_out->tlv_buf,
ptlv_temp, tlv_buf_len);
pcb->moal_mfree(pmadapter->pmoal_handle,
ptlv_temp);
ptlv_temp = MNULL;
}
}
/* Special Case: For Japan, Scan on CH14 for 11G rates
is not allowed Hence Rates TLV needs to be updated
to support only 11B rates */
if ((pmadapter->region_code == COUNTRY_CODE_JP_40 ||
pmadapter->region_code == COUNTRY_CODE_JP_FF)
&& (ptmp_chan_list->chan_number == 14)) {
t_u8 *ptlv_pos = pscan_cfg_out->tlv_buf;
t_u16 old_ratetlv_len, new_ratetlv_len;
MrvlIEtypesHeader_t *header;
MrvlIEtypes_RatesParamSet_t *prates_tlv;
/* Preserve the current TLV buffer */
ret = pcb->moal_malloc(pmadapter->pmoal_handle,
MAX_SCAN_CFG_ALLOC -
CHAN_TLV_MAX_SIZE,
MLAN_MEM_DEF,
(t_u8 **) & ptlv_temp);
if (ret != MLAN_STATUS_SUCCESS || !ptlv_temp) {
PRINTM(MERROR,
"Memory allocation for pscan_cfg_out failed!\n");
if (pioctl_req)
pioctl_req->status_code =
MLAN_ERROR_NO_MEM;
LEAVE();
return MLAN_STATUS_FAILURE;
}
pchan_tlv_out_temp = (t_u8 *) pchan_tlv_out;
tlv_buf_len =
(t_u32) (pchan_tlv_out_temp -
pscan_cfg_out->tlv_buf);
memcpy(pmadapter, ptlv_temp, ptlv_pos,
tlv_buf_len);
/* Search for Rates TLV */
while ((!foundJPch14) &&
(ptlv_pos < pchan_tlv_out_temp)) {
header = (MrvlIEtypesHeader_t *)
ptlv_pos;
if (header->type ==
wlan_cpu_to_le16(TLV_TYPE_RATES))
foundJPch14 = MTRUE;
else
ptlv_pos +=
(sizeof
(MrvlIEtypesHeader_t) +
wlan_le16_to_cpu
(header->len));
}
if (foundJPch14) {
/* Update the TLV buffer with *new*
Rates TLV and rearrange remaining
TLV buffer */
prates_tlv =
(MrvlIEtypes_RatesParamSet_t *)
ptlv_pos;
old_ratetlv_len =
sizeof(MrvlIEtypesHeader_t) +
wlan_le16_to_cpu(prates_tlv->
header.len);
prates_tlv->header.len =
wlan_copy_rates(prates_tlv->
rates, 0,
SupportedRates_B,
sizeof
(SupportedRates_B));
new_ratetlv_len =
sizeof(MrvlIEtypesHeader_t) +
prates_tlv->header.len;
prates_tlv->header.len =
wlan_cpu_to_le16(prates_tlv->
header.len);
memmove(pmadapter,
ptlv_pos + new_ratetlv_len,
ptlv_pos + old_ratetlv_len,
(t_u32) (pchan_tlv_out_temp -
(ptlv_pos +
old_ratetlv_len)));
pchan_tlv_out =
(MrvlIEtypes_ChanListParamSet_t
*)
(pchan_tlv_out_temp -
(old_ratetlv_len -
new_ratetlv_len));
pchan_tlv_out->header.type =
wlan_cpu_to_le16
(TLV_TYPE_CHANLIST);
pchan_tlv_out->header.len = 0;
}
}
/* Copy the current channel TLV to the command being
prepared */
memcpy(pmadapter,
pchan_tlv_out->chan_scan_param + tlv_idx,
ptmp_chan_list,
sizeof(pchan_tlv_out->chan_scan_param));
/* Increment the TLV header length by the size appended
*/
pchan_tlv_out->header.len +=
sizeof(pchan_tlv_out->chan_scan_param);
/*
* The tlv buffer length is set to the number of bytes of the
* between the channel tlv pointer and the start of the
* tlv buffer. This compensates for any TLVs that were appended
* before the channel list.
*/
pscan_cfg_out->tlv_buf_len =
(t_u32) ((t_u8 *) pchan_tlv_out -
pscan_cfg_out->tlv_buf);
/* Add the size of the channel tlv header and the data
length */
pscan_cfg_out->tlv_buf_len +=
(sizeof(pchan_tlv_out->header)
+ pchan_tlv_out->header.len);
/* Increment the index to the channel tlv we are
constructing */
tlv_idx++;
/* Count the total scan time per command */
total_scan_time +=
wlan_le16_to_cpu(ptmp_chan_list->max_scan_time);
done_early = MFALSE;
/* Stop the loop if the *current* channel is in the
1,6,11 set and we are not filtering on a BSSID or
SSID. */
if (!filtered_scan &&
(ptmp_chan_list->chan_number == 1 ||
ptmp_chan_list->chan_number == 6 ||
ptmp_chan_list->chan_number == 11)) {
done_early = MTRUE;
}
/* Stop the loop if the *current* channel is 14 and
region code is Japan (0x40 or 0xFF) */
if ((pmadapter->region_code == COUNTRY_CODE_JP_40 ||
pmadapter->region_code == COUNTRY_CODE_JP_FF)
&& (ptmp_chan_list->chan_number == 14)) {
done_early = MTRUE;
}
/* Increment the tmp pointer to the next channel to be
scanned */
ptmp_chan_list++;
/* Stop the loop if the *next* channel is in the 1,6,11
set. This will cause it to be the only channel
scanned on the next interation */
if (!filtered_scan &&
(ptmp_chan_list->chan_number == 1 ||
ptmp_chan_list->chan_number == 6 ||
ptmp_chan_list->chan_number == 11)) {
done_early = MTRUE;
}
/* Stop the loop if the *next* channel is 14 and region
code is Japan (0x40 or 0xFF) */
if ((pmadapter->region_code == COUNTRY_CODE_JP_40 ||
pmadapter->region_code == COUNTRY_CODE_JP_FF)
&& (ptmp_chan_list->chan_number == 14)) {
done_early = MTRUE;
}
}
/* The total scan time should be less than scan command timeout
value */
if (total_scan_time > MRVDRV_MAX_TOTAL_SCAN_TIME) {
PRINTM(MMSG,
"Total scan time %d ms is over limit (%d ms), scan skipped\n",
total_scan_time, MRVDRV_MAX_TOTAL_SCAN_TIME);
if (pioctl_req)
pioctl_req->status_code =
MLAN_ERROR_CMD_SCAN_FAIL;
ret = MLAN_STATUS_FAILURE;
break;
}
pchan_tlv_out->header.len =
wlan_cpu_to_le16(pchan_tlv_out->header.len);
pmadapter->pscan_channels = pstart_chan;
/* Send the scan command to the firmware with the specified cfg
*/
if (pmadapter->ext_scan)
cmd_no = HostCmd_CMD_802_11_SCAN_EXT;
else
cmd_no = HostCmd_CMD_802_11_SCAN;
ret = wlan_prepare_cmd(pmpriv,
cmd_no,
HostCmd_ACT_GEN_SET,
0, MNULL, pscan_cfg_out);
if (ret)
break;
}
LEAVE();
if (ptlv_temp)
pcb->moal_mfree(pmadapter->pmoal_handle, ptlv_temp);
if (ret)
return MLAN_STATUS_FAILURE;
return MLAN_STATUS_SUCCESS;
}
/**
* @brief Construct a wlan_scan_cmd_config structure to use in scan commands
*
* Application layer or other functions can invoke wlan_scan_networks
* with a scan configuration supplied in a wlan_ioctl_user_scan_cfg struct.
* This structure is used as the basis of one or many wlan_scan_cmd_config
* commands that are sent to the command processing module and sent to
* firmware.
*
* Create a wlan_scan_cmd_config based on the following user supplied
* parameters (if present):
* - SSID filter
* - BSSID filter
* - Number of Probes to be sent
* - Channel list
*
* If the SSID or BSSID filter is not present, disable/clear the filter.
* If the number of probes is not set, use the adapter default setting
* Qualify the channel
*
* @param pmpriv A pointer to mlan_private structure
* @param puser_scan_in MNULL or pointer to scan config parameters
* @param pscan_cfg_out Output parameter: Resulting scan configuration
* @param ppchan_list_out Output parameter: Pointer to the start of the
* channel TLV portion of the output scan config
* @param pscan_chan_list Output parameter: Pointer to the resulting
* channel list to scan
* @param pmax_chan_per_scan Output parameter: Number of channels to scan for
* each issuance of the firmware scan command
* @param pfiltered_scan Output parameter: Flag indicating whether or not
* a BSSID or SSID filter is being sent in the
* command to firmware. Used to increase the number
* of channels sent in a scan command and to
* disable the firmware channel scan filter.
* @param pscan_current_only Output parameter: Flag indicating whether or not
* we are only scanning our current active channel
*
* @return MLAN_STATUS_SUCCESS or MLAN_STATUS_FAILURE
*/
static mlan_status
wlan_scan_setup_scan_config(IN mlan_private * pmpriv,
IN const wlan_user_scan_cfg * puser_scan_in,
OUT wlan_scan_cmd_config * pscan_cfg_out,
OUT MrvlIEtypes_ChanListParamSet_t **
ppchan_list_out,
OUT ChanScanParamSet_t * pscan_chan_list,
OUT t_u8 * pmax_chan_per_scan,
OUT t_u8 * pfiltered_scan,
OUT t_u8 * pscan_current_only)
{
mlan_adapter *pmadapter = pmpriv->adapter;
mlan_status ret = MLAN_STATUS_SUCCESS;
MrvlIEtypes_NumProbes_t *pnum_probes_tlv;
MrvlIEtypes_WildCardSsIdParamSet_t *pwildcard_ssid_tlv;
MrvlIEtypes_RatesParamSet_t *prates_tlv;
MrvlIEtypes_Bssid_List_t *pbssid_tlv;
const t_u8 zero_mac[MLAN_MAC_ADDR_LENGTH] = { 0, 0, 0, 0, 0, 0 };
t_u8 *ptlv_pos;
t_u32 num_probes;
t_u32 ssid_len;
t_u32 chan_idx;
t_u32 scan_type;
t_u16 scan_dur;
t_u8 channel;
t_u8 radio_type;
t_u32 ssid_idx;
t_u8 ssid_filter;
WLAN_802_11_RATES rates;
t_u32 rates_size;
MrvlIETypes_HTCap_t *pht_cap;
MrvlIETypes_VHTCap_t *pvht_cap;
MrvlIEtypes_ScanChanGap_t *pscan_gap_tlv;
ENTER();
/* The tlv_buf_len is calculated for each scan command. The TLVs added
in this routine will be preserved since the routine that sends the
command will append channelTLVs at *ppchan_list_out. The difference
between the *ppchan_list_out and the tlv_buf start will be used to
calculate the size of anything we add in this routine. */
pscan_cfg_out->tlv_buf_len = 0;
/* Running tlv pointer. Assigned to ppchan_list_out at end of function
so later routines know where channels can be added to the command
buf */
ptlv_pos = pscan_cfg_out->tlv_buf;
/* Initialize the scan as un-filtered; the flag is later set to TRUE
below if a SSID or BSSID filter is sent in the command */
*pfiltered_scan = MFALSE;
/* Initialize the scan as not being only on the current channel. If
the channel list is customized, only contains one channel, and is
the active channel, this is set true and data flow is not halted. */
*pscan_current_only = MFALSE;
if (puser_scan_in) {
ssid_filter = MFALSE;
/* Set the bss type scan filter, use Adapter setting if unset */
pscan_cfg_out->bss_mode = (puser_scan_in->bss_mode
? (t_u8) puser_scan_in->bss_mode :
(t_u8) pmadapter->scan_mode);
/* Set the number of probes to send, use Adapter setting if
unset */
num_probes =
(puser_scan_in->num_probes ? puser_scan_in->
num_probes : pmadapter->scan_probes);
/*
* Set the BSSID filter to the incoming configuration,
* if non-zero. If not set, it will remain disabled (all zeros).
*/
memcpy(pmadapter, pscan_cfg_out->specific_bssid,
puser_scan_in->specific_bssid,
sizeof(pscan_cfg_out->specific_bssid));
if (pmadapter->ext_scan
&& memcmp(pmadapter, pscan_cfg_out->specific_bssid,
&zero_mac, sizeof(zero_mac))) {
pbssid_tlv = (MrvlIEtypes_Bssid_List_t *) ptlv_pos;
pbssid_tlv->header.type = TLV_TYPE_BSSID;
pbssid_tlv->header.len = MLAN_MAC_ADDR_LENGTH;
memcpy(pmadapter, pbssid_tlv->bssid,
puser_scan_in->specific_bssid,
MLAN_MAC_ADDR_LENGTH);
ptlv_pos += sizeof(MrvlIEtypes_Bssid_List_t);
}
for (ssid_idx = 0;
((ssid_idx < NELEMENTS(puser_scan_in->ssid_list))
&& (*puser_scan_in->ssid_list[ssid_idx].ssid ||
puser_scan_in->ssid_list[ssid_idx].max_len));
ssid_idx++) {
ssid_len =
wlan_strlen((char *)puser_scan_in->
ssid_list[ssid_idx].ssid);
pwildcard_ssid_tlv
=
(MrvlIEtypes_WildCardSsIdParamSet_t *) ptlv_pos;
pwildcard_ssid_tlv->header.type =
wlan_cpu_to_le16(TLV_TYPE_WILDCARDSSID);
pwildcard_ssid_tlv->header.len =
(t_u16) (ssid_len +
sizeof(pwildcard_ssid_tlv->
max_ssid_length));
pwildcard_ssid_tlv->max_ssid_length =
puser_scan_in->ssid_list[ssid_idx].max_len;
memcpy(pmadapter, pwildcard_ssid_tlv->ssid,
puser_scan_in->ssid_list[ssid_idx].ssid,
MIN(MLAN_MAX_SSID_LENGTH, ssid_len));
ptlv_pos += (sizeof(pwildcard_ssid_tlv->header)
+ pwildcard_ssid_tlv->header.len);
pwildcard_ssid_tlv->header.len
=
wlan_cpu_to_le16(pwildcard_ssid_tlv->header.
len);
PRINTM(MINFO, "Scan: ssid_list[%d]: %s, %d\n",
ssid_idx,
pwildcard_ssid_tlv->ssid,
pwildcard_ssid_tlv->max_ssid_length);
if (ssid_len)
ssid_filter = MTRUE;
}
/*
* The default number of channels sent in the command is low to
* ensure the response buffer from the firmware does not truncate
* scan results. That is not an issue with an SSID or BSSID
* filter applied to the scan results in the firmware.
*/
if ((ssid_idx && ssid_filter) ||
memcmp(pmadapter, pscan_cfg_out->specific_bssid, &zero_mac,
sizeof(zero_mac))) {
*pfiltered_scan = MTRUE;
}
} else {
pscan_cfg_out->bss_mode = (t_u8) pmadapter->scan_mode;
num_probes = pmadapter->scan_probes;
}
/*
* If a specific BSSID or SSID is used, the number of channels in the
* scan command will be increased to the absolute maximum.
*/
if (*pfiltered_scan)
*pmax_chan_per_scan = MRVDRV_MAX_CHANNELS_PER_SPECIFIC_SCAN;
else
*pmax_chan_per_scan = MRVDRV_CHANNELS_PER_SCAN_CMD;
if (puser_scan_in && puser_scan_in->scan_chan_gap) {
*pmax_chan_per_scan = MRVDRV_MAX_CHANNELS_PER_SPECIFIC_SCAN;
PRINTM(MINFO, "Scan: channel gap = %d\n",
puser_scan_in->scan_chan_gap);
pscan_gap_tlv = (MrvlIEtypes_ScanChanGap_t *) ptlv_pos;
pscan_gap_tlv->header.type =
wlan_cpu_to_le16(TLV_TYPE_SCAN_CHANNEL_GAP);
pscan_gap_tlv->header.len = sizeof(pscan_gap_tlv->gap);
pscan_gap_tlv->gap =
wlan_cpu_to_le16((t_u16) puser_scan_in->scan_chan_gap);
ptlv_pos +=
sizeof(pscan_gap_tlv->header) +
pscan_gap_tlv->header.len;
pscan_gap_tlv->header.len =
wlan_cpu_to_le16(pscan_gap_tlv->header.len);
}
/* If the input config or adapter has the number of Probes set, add tlv
*/
if (num_probes) {
PRINTM(MINFO, "Scan: num_probes = %d\n", num_probes);
pnum_probes_tlv = (MrvlIEtypes_NumProbes_t *) ptlv_pos;
pnum_probes_tlv->header.type =
wlan_cpu_to_le16(TLV_TYPE_NUMPROBES);
pnum_probes_tlv->header.len =
sizeof(pnum_probes_tlv->num_probes);
pnum_probes_tlv->num_probes =
wlan_cpu_to_le16((t_u16) num_probes);
ptlv_pos +=
sizeof(pnum_probes_tlv->header) +
pnum_probes_tlv->header.len;
pnum_probes_tlv->header.len =
wlan_cpu_to_le16(pnum_probes_tlv->header.len);
}
/* Append rates tlv */
memset(pmadapter, rates, 0, sizeof(rates));
rates_size = wlan_get_supported_rates(pmpriv, pmpriv->bss_mode,
(pmpriv->bss_mode ==
MLAN_BSS_MODE_INFRA) ? pmpriv->
config_bands : pmadapter->
adhoc_start_band, rates);
prates_tlv = (MrvlIEtypes_RatesParamSet_t *) ptlv_pos;
prates_tlv->header.type = wlan_cpu_to_le16(TLV_TYPE_RATES);
prates_tlv->header.len = wlan_cpu_to_le16((t_u16) rates_size);
memcpy(pmadapter, prates_tlv->rates, rates, rates_size);
ptlv_pos += sizeof(prates_tlv->header) + rates_size;
PRINTM(MINFO, "SCAN_CMD: Rates size = %d\n", rates_size);
if (ISSUPP_11NENABLED(pmpriv->adapter->fw_cap_info)
&& (pmpriv->config_bands & BAND_GN
|| pmpriv->config_bands & BAND_AN)) {
pht_cap = (MrvlIETypes_HTCap_t *) ptlv_pos;
memset(pmadapter, pht_cap, 0, sizeof(MrvlIETypes_HTCap_t));
pht_cap->header.type = wlan_cpu_to_le16(HT_CAPABILITY);
pht_cap->header.len = sizeof(HTCap_t);
wlan_fill_ht_cap_tlv(pmpriv, pht_cap, pmpriv->config_bands);
HEXDUMP("SCAN: HT_CAPABILITIES IE", (t_u8 *) pht_cap,
sizeof(MrvlIETypes_HTCap_t));
ptlv_pos += sizeof(MrvlIETypes_HTCap_t);
pht_cap->header.len = wlan_cpu_to_le16(pht_cap->header.len);
}
if (ISSUPP_11ACENABLED(pmpriv->adapter->fw_cap_info)
&& (pmpriv->config_bands & BAND_AAC)) {
pvht_cap = (MrvlIETypes_VHTCap_t *) ptlv_pos;
memset(pmadapter, pvht_cap, 0, sizeof(MrvlIETypes_VHTCap_t));
pvht_cap->header.type = wlan_cpu_to_le16(VHT_CAPABILITY);
pvht_cap->header.len = sizeof(VHT_capa_t);
wlan_fill_vht_cap_tlv(pmpriv, pvht_cap, pmpriv->config_bands);
HEXDUMP("SCAN: VHT_CAPABILITIES IE", (t_u8 *) pvht_cap,
sizeof(MrvlIETypes_VHTCap_t));
ptlv_pos += sizeof(MrvlIETypes_VHTCap_t);
pvht_cap->header.len = wlan_cpu_to_le16(pvht_cap->header.len);
}
if (wlan_is_ext_capa_support(pmpriv))
wlan_add_ext_capa_info_ie(pmpriv, &ptlv_pos);
wlan_add_wps_probe_request_ie(pmpriv, &ptlv_pos);
/*
* Set the output for the channel TLV to the address in the tlv buffer
* past any TLVs that were added in this function (SSID, num_probes).
* Channel TLVs will be added past this for each scan command, preserving
* the TLVs that were previously added.
*/
*ppchan_list_out = (MrvlIEtypes_ChanListParamSet_t *) ptlv_pos;
if (puser_scan_in && puser_scan_in->chan_list[0].chan_number) {
PRINTM(MINFO, "Scan: Using supplied channel list\n");
for (chan_idx = 0;
chan_idx < WLAN_USER_SCAN_CHAN_MAX
&& puser_scan_in->chan_list[chan_idx].chan_number;
chan_idx++) {
channel =
puser_scan_in->chan_list[chan_idx].chan_number;
(pscan_chan_list + chan_idx)->chan_number = channel;
radio_type =
puser_scan_in->chan_list[chan_idx].radio_type;
(pscan_chan_list + chan_idx)->radio_type = radio_type;
scan_type =
puser_scan_in->chan_list[chan_idx].scan_type;
if (scan_type == MLAN_SCAN_TYPE_UNCHANGED)
scan_type = pmadapter->scan_type;
if (radio_type == HostCmd_SCAN_RADIO_TYPE_A) {
if (pmadapter->fw_bands & BAND_A)
PRINTM(MINFO,
"UserScan request for A Band channel %d!!\n",
channel);
else {
PRINTM(MERROR,
"Scan in A band is not allowed!!\n");
ret = MLAN_STATUS_FAILURE;
LEAVE();
return ret;
}
}
/* Prevent active scanning on a radar controlled
channel */
if (radio_type == HostCmd_SCAN_RADIO_TYPE_A) {
if (wlan_11h_radar_detect_required
(pmpriv, channel)) {
scan_type = MLAN_SCAN_TYPE_PASSIVE;
}
}
if (radio_type == HostCmd_SCAN_RADIO_TYPE_BG) {
if (wlan_bg_scan_type_is_passive
(pmpriv, channel)) {
scan_type = MLAN_SCAN_TYPE_PASSIVE;
}
}
if (scan_type == MLAN_SCAN_TYPE_PASSIVE) {
(pscan_chan_list +
chan_idx)->chan_scan_mode.passive_scan = MTRUE;
} else {
(pscan_chan_list +
chan_idx)->chan_scan_mode.passive_scan =
MFALSE;
}
if (puser_scan_in->chan_list[chan_idx].scan_time) {
scan_dur =
(t_u16) puser_scan_in->
chan_list[chan_idx].scan_time;
} else {
if (scan_type == MLAN_SCAN_TYPE_PASSIVE) {
scan_dur = pmadapter->passive_scan_time;
} else if (*pfiltered_scan) {
scan_dur =
pmadapter->specific_scan_time;
} else {
scan_dur = pmadapter->active_scan_time;
}
}
(pscan_chan_list + chan_idx)->min_scan_time =
wlan_cpu_to_le16(scan_dur);
(pscan_chan_list + chan_idx)->max_scan_time =
wlan_cpu_to_le16(scan_dur);
if (*pfiltered_scan) {
(pscan_chan_list +
chan_idx)->chan_scan_mode.disable_chan_filt =
MTRUE;
}
}
/* Check if we are only scanning the current channel */
if ((chan_idx == 1)
&& (puser_scan_in->chan_list[0].chan_number
== pmpriv->curr_bss_params.bss_descriptor.channel)) {
*pscan_current_only = MTRUE;
PRINTM(MINFO, "Scan: Scanning current channel only\n");
}
} else {
PRINTM(MINFO, "Scan: Creating full region channel list\n");
wlan_scan_create_channel_list(pmpriv, puser_scan_in,
pscan_chan_list, *pfiltered_scan);
}
LEAVE();
return ret;
}
/**
* @brief Inspect the scan response buffer for pointers to expected TLVs
*
* TLVs can be included at the end of the scan response BSS information.
* Parse the data in the buffer for pointers to TLVs that can potentially
* be passed back in the response
*
* @param pmadapter Pointer to the mlan_adapter structure
* @param ptlv Pointer to the start of the TLV buffer to parse
* @param tlv_buf_size Size of the TLV buffer
* @param req_tlv_type Request TLV's type
* @param pptlv Output parameter: Pointer to the request TLV if found
*
* @return N/A
*/
static t_void
wlan_ret_802_11_scan_get_tlv_ptrs(IN pmlan_adapter pmadapter,
IN MrvlIEtypes_Data_t * ptlv,
IN t_u32 tlv_buf_size,
IN t_u32 req_tlv_type,
OUT MrvlIEtypes_Data_t ** pptlv)
{
MrvlIEtypes_Data_t *pcurrent_tlv;
t_u32 tlv_buf_left;
t_u32 tlv_type;
t_u32 tlv_len;
ENTER();
pcurrent_tlv = ptlv;
tlv_buf_left = tlv_buf_size;
*pptlv = MNULL;
PRINTM(MINFO, "SCAN_RESP: tlv_buf_size = %d\n", tlv_buf_size);
while (tlv_buf_left >= sizeof(MrvlIEtypesHeader_t)) {
tlv_type = wlan_le16_to_cpu(pcurrent_tlv->header.type);
tlv_len = wlan_le16_to_cpu(pcurrent_tlv->header.len);
if (sizeof(ptlv->header) + tlv_len > tlv_buf_left) {
PRINTM(MERROR, "SCAN_RESP: TLV buffer corrupt\n");
break;
}
if (req_tlv_type == tlv_type) {
switch (tlv_type) {
case TLV_TYPE_TSFTIMESTAMP:
PRINTM(MINFO,
"SCAN_RESP: TSF Timestamp TLV, len = %d\n",
tlv_len);
*pptlv = (MrvlIEtypes_Data_t *) pcurrent_tlv;
break;
case TLV_TYPE_CHANNELBANDLIST:
PRINTM(MINFO,
"SCAN_RESP: CHANNEL BAND LIST TLV, len = %d\n",
tlv_len);
*pptlv = (MrvlIEtypes_Data_t *) pcurrent_tlv;
break;
case TLV_TYPE_CHANNEL_STATS:
PRINTM(MINFO,
"SCAN_RESP: CHANNEL STATS TLV, len = %d\n",
tlv_len);
*pptlv = (MrvlIEtypes_Data_t *) pcurrent_tlv;
break;
default:
PRINTM(MERROR,
"SCAN_RESP: Unhandled TLV = %d\n",
tlv_type);
/* Give up, this seems corrupted */
LEAVE();
return;
}
}
if (*pptlv) {
/* HEXDUMP("SCAN_RESP: TLV Buf", (t_u8 *)*pptlv+4,
tlv_len); */
break;
}
tlv_buf_left -= (sizeof(ptlv->header) + tlv_len);
pcurrent_tlv =
(MrvlIEtypes_Data_t *) (pcurrent_tlv->data + tlv_len);
} /* while */
LEAVE();
}
/**
* @brief Interpret a BSS scan response returned from the firmware
*
* Parse the various fixed fields and IEs passed back for a BSS probe
* response or beacon from the scan command. Record information as needed
* in the scan table BSSDescriptor_t for that entry.
*
* @param pmadapter A pointer to mlan_adapter structure
* @param pbss_entry Output parameter: Pointer to the BSS Entry
* @param pbeacon_info Pointer to the Beacon information
* @param bytes_left Number of bytes left to parse
* @param ext_scan extended scan
*
* @return MLAN_STATUS_SUCCESS or MLAN_STATUS_FAILURE
*/
static mlan_status
wlan_interpret_bss_desc_with_ie(IN pmlan_adapter pmadapter,
OUT BSSDescriptor_t * pbss_entry,
IN t_u8 ** pbeacon_info,
IN t_u32 * bytes_left, IN t_u8 ext_scan)
{
mlan_status ret = MLAN_STATUS_SUCCESS;
IEEEtypes_ElementId_e element_id;
IEEEtypes_FhParamSet_t *pfh_param_set;
IEEEtypes_DsParamSet_t *pds_param_set;
IEEEtypes_CfParamSet_t *pcf_param_set;
IEEEtypes_IbssParamSet_t *pibss_param_set;
IEEEtypes_CapInfo_t *pcap_info;
WLAN_802_11_FIXED_IEs fixed_ie;
t_u8 *pcurrent_ptr;
t_u8 *prate;
t_u8 element_len;
t_u16 total_ie_len;
t_u8 bytes_to_copy;
t_u8 rate_size;
t_u16 beacon_size;
t_u8 found_data_rate_ie;
t_u32 bytes_left_for_current_beacon;
IEEEtypes_ERPInfo_t *perp_info;
IEEEtypes_VendorSpecific_t *pvendor_ie;
const t_u8 wpa_oui[4] = { 0x00, 0x50, 0xf2, 0x01 };
const t_u8 wmm_oui[4] = { 0x00, 0x50, 0xf2, 0x02 };
IEEEtypes_CountryInfoSet_t *pcountry_info;
ENTER();
found_data_rate_ie = MFALSE;
rate_size = 0;
beacon_size = 0;
if (*bytes_left >= sizeof(beacon_size)) {
/* Extract & convert beacon size from the command buffer */
memcpy(pmadapter, &beacon_size, *pbeacon_info,
sizeof(beacon_size));
beacon_size = wlan_le16_to_cpu(beacon_size);
*bytes_left -= sizeof(beacon_size);
*pbeacon_info += sizeof(beacon_size);
}
if (!beacon_size || beacon_size > *bytes_left) {
*pbeacon_info += *bytes_left;
*bytes_left = 0;
LEAVE();
return MLAN_STATUS_FAILURE;
}
/* Initialize the current working beacon pointer for this BSS iteration
*/
pcurrent_ptr = *pbeacon_info;
/* Advance the return beacon pointer past the current beacon */
*pbeacon_info += beacon_size;
*bytes_left -= beacon_size;
bytes_left_for_current_beacon = beacon_size;
if (bytes_left_for_current_beacon <
(MLAN_MAC_ADDR_LENGTH + sizeof(t_u8) +
sizeof(WLAN_802_11_FIXED_IEs))) {
PRINTM(MERROR, "InterpretIE: Not enough bytes left\n");
LEAVE();
return MLAN_STATUS_FAILURE;
}
memcpy(pmadapter, pbss_entry->mac_address, pcurrent_ptr,
MLAN_MAC_ADDR_LENGTH);
PRINTM(MINFO, "InterpretIE: AP MAC Addr-" MACSTR "\n",
MAC2STR(pbss_entry->mac_address));
pcurrent_ptr += MLAN_MAC_ADDR_LENGTH;
bytes_left_for_current_beacon -= MLAN_MAC_ADDR_LENGTH;
/*
* Next 4 fields are RSSI (for legacy scan only), time stamp,
* beacon interval, and capability information
*/
if (!ext_scan) {
/* RSSI is 1 byte long */
pbss_entry->rssi = (t_s32) (*pcurrent_ptr);
PRINTM(MINFO, "InterpretIE: RSSI=%02X\n", *pcurrent_ptr);
pcurrent_ptr += 1;
bytes_left_for_current_beacon -= 1;
}
/*
* The RSSI is not part of the beacon/probe response. After we have
* advanced pcurrent_ptr past the RSSI field, save the remaining
* data for use at the application layer
*/
pbss_entry->pbeacon_buf = pcurrent_ptr;
pbss_entry->beacon_buf_size = bytes_left_for_current_beacon;
/* Time stamp is 8 bytes long */
memcpy(pmadapter, fixed_ie.time_stamp, pcurrent_ptr, 8);
memcpy(pmadapter, pbss_entry->time_stamp, pcurrent_ptr, 8);
pcurrent_ptr += 8;
bytes_left_for_current_beacon -= 8;
/* Beacon interval is 2 bytes long */
memcpy(pmadapter, &fixed_ie.beacon_interval, pcurrent_ptr, 2);
pbss_entry->beacon_period = wlan_le16_to_cpu(fixed_ie.beacon_interval);
pcurrent_ptr += 2;
bytes_left_for_current_beacon -= 2;
/* Capability information is 2 bytes long */
memcpy(pmadapter, &fixed_ie.capabilities, pcurrent_ptr, 2);
PRINTM(MINFO, "InterpretIE: fixed_ie.capabilities=0x%X\n",
fixed_ie.capabilities);
fixed_ie.capabilities = wlan_le16_to_cpu(fixed_ie.capabilities);
pcap_info = (IEEEtypes_CapInfo_t *) & fixed_ie.capabilities;
memcpy(pmadapter, &pbss_entry->cap_info, pcap_info,
sizeof(IEEEtypes_CapInfo_t));
pcurrent_ptr += 2;
bytes_left_for_current_beacon -= 2;
/* Rest of the current buffer are IE's */
PRINTM(MINFO, "InterpretIE: IELength for this AP = %d\n",
bytes_left_for_current_beacon);
HEXDUMP("InterpretIE: IE info", (t_u8 *) pcurrent_ptr,
bytes_left_for_current_beacon);
if (pcap_info->privacy) {
PRINTM(MINFO, "InterpretIE: AP WEP enabled\n");
pbss_entry->privacy = Wlan802_11PrivFilter8021xWEP;
} else {
pbss_entry->privacy = Wlan802_11PrivFilterAcceptAll;
}
if (pcap_info->ibss == 1)
pbss_entry->bss_mode = MLAN_BSS_MODE_IBSS;
else
pbss_entry->bss_mode = MLAN_BSS_MODE_INFRA;
if (pcap_info->spectrum_mgmt == 1) {
PRINTM(MINFO, "InterpretIE: 11h- Spectrum Management "
"capability bit found\n");
pbss_entry->wlan_11h_bss_info.sensed_11h = 1;
}
/* Process variable IE */
while (bytes_left_for_current_beacon >= 2) {
element_id = (IEEEtypes_ElementId_e) (*((t_u8 *) pcurrent_ptr));
element_len = *((t_u8 *) pcurrent_ptr + 1);
total_ie_len = element_len + sizeof(IEEEtypes_Header_t);
if (bytes_left_for_current_beacon < total_ie_len) {
PRINTM(MERROR, "InterpretIE: Error in processing IE, "
"bytes left < IE length\n");
bytes_left_for_current_beacon = 0;
continue;
}
switch (element_id) {
case SSID:
if (element_len > MRVDRV_MAX_SSID_LENGTH) {
bytes_left_for_current_beacon = 0;
continue;
}
if (!pbss_entry->ssid.ssid_len) {
pbss_entry->ssid.ssid_len = element_len;
memcpy(pmadapter, pbss_entry->ssid.ssid,
(pcurrent_ptr + 2), element_len);
}
PRINTM(MINFO, "InterpretIE: ssid: %-32s\n",
pbss_entry->ssid.ssid);
break;
case SUPPORTED_RATES:
if (element_len > WLAN_SUPPORTED_RATES) {
bytes_left_for_current_beacon = 0;
continue;
}
memcpy(pmadapter, pbss_entry->data_rates,
pcurrent_ptr + 2, element_len);
memcpy(pmadapter, pbss_entry->supported_rates,
pcurrent_ptr + 2, element_len);
HEXDUMP("InterpretIE: SupportedRates:",
pbss_entry->supported_rates, element_len);
rate_size = element_len;
found_data_rate_ie = MTRUE;
break;
case FH_PARAM_SET:
pfh_param_set = (IEEEtypes_FhParamSet_t *) pcurrent_ptr;
pbss_entry->network_type_use = Wlan802_11FH;
memcpy(pmadapter,
&pbss_entry->phy_param_set.fh_param_set,
pfh_param_set, MIN(total_ie_len,
sizeof
(IEEEtypes_FhParamSet_t)));
pbss_entry->phy_param_set.fh_param_set.len =
MIN(element_len, (sizeof(IEEEtypes_FhParamSet_t)
-
sizeof(IEEEtypes_Header_t)));
pbss_entry->phy_param_set.fh_param_set.dwell_time =
wlan_le16_to_cpu(pbss_entry->phy_param_set.
fh_param_set.dwell_time);
break;
case DS_PARAM_SET:
pds_param_set = (IEEEtypes_DsParamSet_t *) pcurrent_ptr;
pbss_entry->network_type_use = Wlan802_11DS;
pbss_entry->channel = pds_param_set->current_chan;
memcpy(pmadapter,
&pbss_entry->phy_param_set.ds_param_set,
pds_param_set, MIN(total_ie_len,
sizeof
(IEEEtypes_DsParamSet_t)));
pbss_entry->phy_param_set.ds_param_set.len =
MIN(element_len, (sizeof(IEEEtypes_DsParamSet_t)
-
sizeof(IEEEtypes_Header_t)));
break;
case CF_PARAM_SET:
pcf_param_set = (IEEEtypes_CfParamSet_t *) pcurrent_ptr;
memcpy(pmadapter,
&pbss_entry->ss_param_set.cf_param_set,
pcf_param_set, MIN(total_ie_len,
sizeof
(IEEEtypes_CfParamSet_t)));
pbss_entry->ss_param_set.cf_param_set.len =
MIN(element_len, (sizeof(IEEEtypes_CfParamSet_t)
-
sizeof(IEEEtypes_Header_t)));
break;
case IBSS_PARAM_SET:
pibss_param_set =
(IEEEtypes_IbssParamSet_t *) pcurrent_ptr;
pbss_entry->atim_window =
wlan_le16_to_cpu(pibss_param_set->atim_window);
memcpy(pmadapter,
&pbss_entry->ss_param_set.ibss_param_set,
pibss_param_set, MIN(total_ie_len,
sizeof
(IEEEtypes_IbssParamSet_t)));
pbss_entry->ss_param_set.ibss_param_set.len =
MIN(element_len,
(sizeof(IEEEtypes_IbssParamSet_t)
- sizeof(IEEEtypes_Header_t)));
break;
/* Handle Country Info IE */
case COUNTRY_INFO:
pcountry_info =
(IEEEtypes_CountryInfoSet_t *) pcurrent_ptr;
if (pcountry_info->len <
sizeof(pcountry_info->country_code) ||
(unsigned)(pcountry_info->len + 2) >
sizeof(IEEEtypes_CountryInfoFullSet_t)) {
PRINTM(MERROR,
"InterpretIE: 11D- Err "
"country_info len =%d min=%d max=%d\n",
pcountry_info->len,
sizeof(pcountry_info->country_code),
sizeof(IEEEtypes_CountryInfoFullSet_t));
LEAVE();
return MLAN_STATUS_FAILURE;
}
memcpy(pmadapter, &pbss_entry->country_info,
pcountry_info, pcountry_info->len + 2);
HEXDUMP("InterpretIE: 11D- country_info:",
(t_u8 *) pcountry_info,
(t_u32) (pcountry_info->len + 2));
break;
case ERP_INFO:
perp_info = (IEEEtypes_ERPInfo_t *) pcurrent_ptr;
pbss_entry->erp_flags = perp_info->erp_flags;
break;
case POWER_CONSTRAINT:
case POWER_CAPABILITY:
case TPC_REPORT:
case CHANNEL_SWITCH_ANN:
case QUIET:
case IBSS_DFS:
case SUPPORTED_CHANNELS:
case TPC_REQUEST:
wlan_11h_process_bss_elem(pmadapter,
&pbss_entry->
wlan_11h_bss_info,
pcurrent_ptr);
break;
case EXTENDED_SUPPORTED_RATES:
/*
* Only process extended supported rate
* if data rate is already found.
* Data rate IE should come before
* extended supported rate IE
*/
if (found_data_rate_ie) {
if ((element_len + rate_size) >
WLAN_SUPPORTED_RATES) {
bytes_to_copy =
(WLAN_SUPPORTED_RATES -
rate_size);
} else {
bytes_to_copy = element_len;
}
prate = (t_u8 *) pbss_entry->data_rates;
prate += rate_size;
memcpy(pmadapter, prate, pcurrent_ptr + 2,
bytes_to_copy);
prate = (t_u8 *) pbss_entry->supported_rates;
prate += rate_size;
memcpy(pmadapter, prate, pcurrent_ptr + 2,
bytes_to_copy);
}
HEXDUMP("InterpretIE: ExtSupportedRates:",
pbss_entry->supported_rates,
element_len + rate_size);
break;
case VENDOR_SPECIFIC_221:
pvendor_ie =
(IEEEtypes_VendorSpecific_t *) pcurrent_ptr;
if (!memcmp
(pmadapter, pvendor_ie->vend_hdr.oui, wpa_oui,
sizeof(wpa_oui))) {
pbss_entry->pwpa_ie =
(IEEEtypes_VendorSpecific_t *)
pcurrent_ptr;
pbss_entry->wpa_offset =
(t_u16) (pcurrent_ptr -
pbss_entry->pbeacon_buf);
HEXDUMP("InterpretIE: Resp WPA_IE",
(t_u8 *) pbss_entry->pwpa_ie,
((*(pbss_entry->pwpa_ie)).vend_hdr.len +
sizeof(IEEEtypes_Header_t)));
} else if (!memcmp
(pmadapter, pvendor_ie->vend_hdr.oui,
wmm_oui, sizeof(wmm_oui))) {
if (total_ie_len ==
sizeof(IEEEtypes_WmmParameter_t)
|| total_ie_len ==
sizeof(IEEEtypes_WmmInfo_t)) {
/*
* Only accept and copy the WMM IE if it matches
* the size expected for the WMM Info IE or the
* WMM Parameter IE.
*/
memcpy(pmadapter,
(t_u8 *) & pbss_entry->wmm_ie,
pcurrent_ptr, total_ie_len);
HEXDUMP("InterpretIE: Resp WMM_IE",
(t_u8 *) & pbss_entry->wmm_ie,
total_ie_len);
}
}
break;
case RSN_IE:
pbss_entry->prsn_ie =
(IEEEtypes_Generic_t *) pcurrent_ptr;
pbss_entry->rsn_offset =
(t_u16) (pcurrent_ptr -
pbss_entry->pbeacon_buf);
HEXDUMP("InterpretIE: Resp RSN_IE",
(t_u8 *) pbss_entry->prsn_ie,
(*(pbss_entry->prsn_ie)).ieee_hdr.len +
sizeof(IEEEtypes_Header_t));
break;
case WAPI_IE:
pbss_entry->pwapi_ie =
(IEEEtypes_Generic_t *) pcurrent_ptr;
pbss_entry->wapi_offset =
(t_u16) (pcurrent_ptr -
pbss_entry->pbeacon_buf);
HEXDUMP("InterpretIE: Resp WAPI_IE",
(t_u8 *) pbss_entry->pwapi_ie,
(*(pbss_entry->pwapi_ie)).ieee_hdr.len +
sizeof(IEEEtypes_Header_t));
break;
case HT_CAPABILITY:
pbss_entry->pht_cap =
(IEEEtypes_HTCap_t *) pcurrent_ptr;
pbss_entry->ht_cap_offset =
(t_u16) (pcurrent_ptr -
pbss_entry->pbeacon_buf);
HEXDUMP("InterpretIE: Resp HTCAP_IE",
(t_u8 *) pbss_entry->pht_cap,
(*(pbss_entry->pht_cap)).ieee_hdr.len +
sizeof(IEEEtypes_Header_t));
break;
case HT_OPERATION:
pbss_entry->pht_info =
(IEEEtypes_HTInfo_t *) pcurrent_ptr;
pbss_entry->ht_info_offset =
(t_u16) (pcurrent_ptr -
pbss_entry->pbeacon_buf);
HEXDUMP("InterpretIE: Resp HTINFO_IE",
(t_u8 *) pbss_entry->pht_info,
(*(pbss_entry->pht_info)).ieee_hdr.len +
sizeof(IEEEtypes_Header_t));
break;
case BSSCO_2040:
pbss_entry->pbss_co_2040 =
(IEEEtypes_2040BSSCo_t *) pcurrent_ptr;
pbss_entry->bss_co_2040_offset =
(t_u16) (pcurrent_ptr -
pbss_entry->pbeacon_buf);
HEXDUMP("InterpretIE: Resp 2040BSSCOEXISTANCE_IE",
(t_u8 *) pbss_entry->pbss_co_2040,
(*(pbss_entry->pbss_co_2040)).ieee_hdr.len +
sizeof(IEEEtypes_Header_t));
break;
case EXT_CAPABILITY:
pbss_entry->pext_cap =
(IEEEtypes_ExtCap_t *) pcurrent_ptr;
pbss_entry->ext_cap_offset =
(t_u16) (pcurrent_ptr -
pbss_entry->pbeacon_buf);
HEXDUMP("InterpretIE: Resp EXTCAP_IE",
(t_u8 *) pbss_entry->pext_cap,
(*(pbss_entry->pext_cap)).ieee_hdr.len +
sizeof(IEEEtypes_Header_t));
break;
case OVERLAPBSSSCANPARAM:
pbss_entry->poverlap_bss_scan_param =
(IEEEtypes_OverlapBSSScanParam_t *)
pcurrent_ptr;
pbss_entry->overlap_bss_offset =
(t_u16) (pcurrent_ptr -
pbss_entry->pbeacon_buf);
HEXDUMP("InterpretIE: Resp OBSS_IE",
(t_u8 *) pbss_entry->poverlap_bss_scan_param,
(*(pbss_entry->poverlap_bss_scan_param)).
ieee_hdr.len + sizeof(IEEEtypes_Header_t));
break;
case VHT_CAPABILITY:
pbss_entry->pvht_cap =
(IEEEtypes_VHTCap_t *) pcurrent_ptr;
pbss_entry->vht_cap_offset =
(t_u16) (pcurrent_ptr -
pbss_entry->pbeacon_buf);
HEXDUMP("InterpretIE: Resp VHTCAP_IE",
(t_u8 *) pbss_entry->pvht_cap,
(*(pbss_entry->pvht_cap)).ieee_hdr.len +
sizeof(IEEEtypes_Header_t));
break;
case VHT_OPERATION:
pbss_entry->pvht_oprat =
(IEEEtypes_VHTOprat_t *) pcurrent_ptr;
pbss_entry->vht_oprat_offset =
(t_u16) (pcurrent_ptr -
pbss_entry->pbeacon_buf);
HEXDUMP("InterpretIE: Resp VHTOPER_IE",
(t_u8 *) pbss_entry->pvht_oprat,
(*(pbss_entry->pvht_oprat)).ieee_hdr.len +
sizeof(IEEEtypes_Header_t));
break;
case EXT_BSS_LOAD:
pbss_entry->pext_bssload =
(IEEEtypes_ExtBSSload_t *) pcurrent_ptr;
pbss_entry->ext_bssload_offset =
(t_u16) (pcurrent_ptr -
pbss_entry->pbeacon_buf);
HEXDUMP("InterpretIE: Resp EXTBSSLOAD_IE",
(t_u8 *) pbss_entry->pext_bssload,
(*(pbss_entry->pext_bssload)).ieee_hdr.len +
sizeof(IEEEtypes_Header_t));
break;
case VHT_TX_POWER_ENV:
pbss_entry->pvht_txpower =
(IEEEtypes_VHTtxpower_t *) pcurrent_ptr;
pbss_entry->vht_txpower_offset =
(t_u16) (pcurrent_ptr -
pbss_entry->pbeacon_buf);
HEXDUMP("InterpretIE: Resp TXPOW_IE",
(t_u8 *) pbss_entry->pvht_txpower,
(*(pbss_entry->pvht_txpower)).ieee_hdr.len +
sizeof(IEEEtypes_Header_t));
break;
case EXT_POWER_CONSTR:
pbss_entry->pext_pwer =
(IEEEtypes_ExtPwerCons_t *) pcurrent_ptr;
pbss_entry->ext_pwer_offset =
(t_u16) (pcurrent_ptr -
pbss_entry->pbeacon_buf);
HEXDUMP("InterpretIE: Resp EXTPOW_IE",
(t_u8 *) pbss_entry->pext_pwer,
(*(pbss_entry->pext_pwer)).ieee_hdr.len +
sizeof(IEEEtypes_Header_t));
break;
case QUIET_CHAN:
pbss_entry->pquiet_chan =
(IEEEtypes_QuietChan_t *) pcurrent_ptr;
pbss_entry->quiet_chan_offset =
(t_u16) (pcurrent_ptr -
pbss_entry->pbeacon_buf);
HEXDUMP("InterpretIE: Resp QUIETCHAN_IE",
(t_u8 *) pbss_entry->pquiet_chan,
(*(pbss_entry->pquiet_chan)).ieee_hdr.len +
sizeof(IEEEtypes_Header_t));
break;
case BW_CHANNEL_SWITCH:
/* RANDYTODO */
break;
case AID_INFO:
break;
case OPER_MODE_NTF:
pbss_entry->poper_mode =
(IEEEtypes_OperModeNtf_t *) pcurrent_ptr;
pbss_entry->oper_mode_offset =
(t_u16) (pcurrent_ptr -
pbss_entry->pbeacon_buf);
HEXDUMP("InterpretIE: Resp OPERMODENTF_IE",
(t_u8 *) pbss_entry->poper_mode,
(*(pbss_entry->poper_mode)).ieee_hdr.len +
sizeof(IEEEtypes_Header_t));
break;
default:
break;
}
pcurrent_ptr += element_len + 2;
/* Need to account for IE ID and IE Len */
bytes_left_for_current_beacon -= (element_len + 2);
} /* while (bytes_left_for_current_beacon > 2) */
LEAVE();
return ret;
}
/**
* @brief Adjust ie's position in BSSDescriptor_t
*
* @param pmpriv A pointer to mlan_private structure
* @param pbss_entry A pointer to BSSDescriptor_t structure
*
* @return N/A
*/
static t_void
wlan_adjust_ie_in_bss_entry(IN mlan_private * pmpriv,
IN BSSDescriptor_t * pbss_entry)
{
ENTER();
if (pbss_entry->pbeacon_buf) {
if (pbss_entry->pwpa_ie) {
pbss_entry->pwpa_ie = (IEEEtypes_VendorSpecific_t *)
(pbss_entry->pbeacon_buf +
pbss_entry->wpa_offset);
}
if (pbss_entry->prsn_ie) {
pbss_entry->prsn_ie = (IEEEtypes_Generic_t *)
(pbss_entry->pbeacon_buf +
pbss_entry->rsn_offset);
}
if (pbss_entry->pwapi_ie) {
pbss_entry->pwapi_ie = (IEEEtypes_Generic_t *)
(pbss_entry->pbeacon_buf +
pbss_entry->wapi_offset);
}
if (pbss_entry->pht_cap) {
pbss_entry->pht_cap = (IEEEtypes_HTCap_t *)
(pbss_entry->pbeacon_buf +
pbss_entry->ht_cap_offset);
}
if (pbss_entry->pht_info) {
pbss_entry->pht_info = (IEEEtypes_HTInfo_t *)
(pbss_entry->pbeacon_buf +
pbss_entry->ht_info_offset);
}
if (pbss_entry->pbss_co_2040) {
pbss_entry->pbss_co_2040 = (IEEEtypes_2040BSSCo_t *)
(pbss_entry->pbeacon_buf +
pbss_entry->bss_co_2040_offset);
}
if (pbss_entry->pext_cap) {
pbss_entry->pext_cap = (IEEEtypes_ExtCap_t *)
(pbss_entry->pbeacon_buf +
pbss_entry->ext_cap_offset);
}
if (pbss_entry->poverlap_bss_scan_param) {
pbss_entry->poverlap_bss_scan_param =
(IEEEtypes_OverlapBSSScanParam_t *)
(pbss_entry->pbeacon_buf +
pbss_entry->overlap_bss_offset);
}
if (pbss_entry->pvht_cap) {
pbss_entry->pvht_cap = (IEEEtypes_VHTCap_t *)
(pbss_entry->pbeacon_buf +
pbss_entry->vht_cap_offset);
}
if (pbss_entry->pvht_oprat) {
pbss_entry->pvht_oprat = (IEEEtypes_VHTOprat_t *)
(pbss_entry->pbeacon_buf +
pbss_entry->vht_oprat_offset);
}
if (pbss_entry->pvht_txpower) {
pbss_entry->pvht_txpower = (IEEEtypes_VHTtxpower_t *)
(pbss_entry->pbeacon_buf +
pbss_entry->vht_txpower_offset);
}
if (pbss_entry->pext_pwer) {
pbss_entry->pext_pwer = (IEEEtypes_ExtPwerCons_t *)
(pbss_entry->pbeacon_buf +
pbss_entry->ext_pwer_offset);
}
if (pbss_entry->pext_bssload) {
pbss_entry->pext_bssload = (IEEEtypes_ExtBSSload_t *)
(pbss_entry->pbeacon_buf +
pbss_entry->ext_bssload_offset);
}
if (pbss_entry->pquiet_chan) {
pbss_entry->pquiet_chan = (IEEEtypes_QuietChan_t *)
(pbss_entry->pbeacon_buf +
pbss_entry->quiet_chan_offset);
}
if (pbss_entry->poper_mode) {
pbss_entry->poper_mode = (IEEEtypes_OperModeNtf_t *)
(pbss_entry->pbeacon_buf +
pbss_entry->oper_mode_offset);
}
} else {
pbss_entry->pwpa_ie = MNULL;
pbss_entry->wpa_offset = 0;
pbss_entry->prsn_ie = MNULL;
pbss_entry->rsn_offset = 0;
pbss_entry->pwapi_ie = MNULL;
pbss_entry->wapi_offset = 0;
pbss_entry->pht_cap = MNULL;
pbss_entry->ht_cap_offset = 0;
pbss_entry->pht_info = MNULL;
pbss_entry->ht_info_offset = 0;
pbss_entry->pbss_co_2040 = MNULL;
pbss_entry->bss_co_2040_offset = 0;
pbss_entry->pext_cap = MNULL;
pbss_entry->ext_cap_offset = 0;
pbss_entry->poverlap_bss_scan_param = MNULL;
pbss_entry->overlap_bss_offset = 0;
}
LEAVE();
return;
}
/**
* @brief Store a beacon or probe response for a BSS returned in the scan
*
* Store a new scan response or an update for a previous scan response. New
* entries need to verify that they do not exceed the total amount of
* memory allocated for the table.
* Replacement entries need to take into consideration the amount of space
* currently allocated for the beacon/probe response and adjust the entry
* as needed.
*
* A small amount of extra pad (SCAN_BEACON_ENTRY_PAD) is generally reserved
* for an entry in case it is a beacon since a probe response for the
* network will by larger per the standard. This helps to reduce the
* amount of memory copying to fit a new probe response into an entry
* already occupied by a network's previously stored beacon.
*
* @param pmpriv A pointer to mlan_private structure
* @param beacon_idx Index in the scan table to store this entry; may be
* replacing an older duplicate entry for this BSS
* @param num_of_ent Number of entries currently in the table
* @param pnew_beacon Pointer to the new beacon/probe response to save
*
* @return N/A
*/
static t_void
wlan_ret_802_11_scan_store_beacon(IN mlan_private * pmpriv,
IN t_u32 beacon_idx,
IN t_u32 num_of_ent,
IN BSSDescriptor_t * pnew_beacon)
{
mlan_adapter *pmadapter = pmpriv->adapter;
t_u8 *pbcn_store;
t_u32 new_bcn_size;
t_u32 old_bcn_size;
t_u32 bcn_space;
t_u32 adj_idx;
mlan_status ret = MLAN_STATUS_SUCCESS;
t_u8 *tmp_buf;
t_u16 bcn_size = 0;
t_u32 bcn_offset = 0;
ENTER();
if (pmadapter->pscan_table[beacon_idx].pbeacon_buf) {
new_bcn_size = pnew_beacon->beacon_buf_size;
old_bcn_size =
pmadapter->pscan_table[beacon_idx].beacon_buf_size;
bcn_space =
pmadapter->pscan_table[beacon_idx].beacon_buf_size_max;
pbcn_store = pmadapter->pscan_table[beacon_idx].pbeacon_buf;
/* Set the max to be the same as current entry unless changed
below */
pnew_beacon->beacon_buf_size_max = bcn_space;
if (new_bcn_size == old_bcn_size) {
/*
* Beacon is the same size as the previous entry.
* Replace the previous contents with the scan result
*/
memcpy(pmadapter, pbcn_store,
pnew_beacon->pbeacon_buf,
pnew_beacon->beacon_buf_size);
} else if (new_bcn_size <= bcn_space) {
/*
* New beacon size will fit in the amount of space
* we have previously allocated for it
*/
/* Copy the new beacon buffer entry over the old one */
memcpy(pmadapter, pbcn_store, pnew_beacon->pbeacon_buf,
new_bcn_size);
/*
* If the old beacon size was less than the maximum
* we had allotted for the entry, and the new entry
* is even smaller, reset the max size to the old beacon
* entry and compress the storage space (leaving a new
* pad space of (old_bcn_size - new_bcn_size).
*/
if (old_bcn_size < bcn_space &&
new_bcn_size <= old_bcn_size) {
/*
* Old Beacon size is smaller than the allotted storage size.
* Shrink the allotted storage space.
*/
PRINTM(MINFO,
"AppControl: Smaller Duplicate Beacon (%d), "
"old = %d, new = %d, space = %d, left = %d\n",
beacon_idx, old_bcn_size, new_bcn_size,
bcn_space,
(pmadapter->bcn_buf_size -
(pmadapter->pbcn_buf_end -
pmadapter->bcn_buf)));
/*
* memmove (since the memory overlaps) the data
* after the beacon we just stored to the end of
* the current beacon. This cleans up any unused
* space the old larger beacon was using in the buffer
*/
memmove(pmadapter,
(void *)((t_ptr) pbcn_store +
(t_ptr) old_bcn_size),
(void *)((t_ptr) pbcn_store +
(t_ptr) bcn_space),
(t_u32) ((t_ptr) pmadapter->
pbcn_buf_end -
((t_ptr) pbcn_store +
(t_ptr) bcn_space)));
/*
* Decrement the end pointer by the difference between
* the old larger size and the new smaller size since
* we are using less space due to the new beacon being
* smaller
*/
pmadapter->pbcn_buf_end -=
(bcn_space - old_bcn_size);
/* Set the maximum storage size to the old
beacon size */
pnew_beacon->beacon_buf_size_max = old_bcn_size;
/* Adjust beacon buffer pointers that are past
the current */
for (adj_idx = 0; adj_idx < num_of_ent;
adj_idx++) {
if (pmadapter->pscan_table[adj_idx].
pbeacon_buf > pbcn_store) {
pmadapter->pscan_table[adj_idx].
pbeacon_buf -=
(bcn_space -
old_bcn_size);
wlan_adjust_ie_in_bss_entry
(pmpriv,
&pmadapter->
pscan_table[adj_idx]);
}
}
}
} else if (pmadapter->pbcn_buf_end + (new_bcn_size - bcn_space)
< (pmadapter->bcn_buf + pmadapter->bcn_buf_size)) {
/*
* Beacon is larger than space previously allocated (bcn_space)
* and there is enough space left in the beaconBuffer to store
* the additional data
*/
PRINTM(MINFO,
"AppControl: Larger Duplicate Beacon (%d), "
"old = %d, new = %d, space = %d, left = %d\n",
beacon_idx, old_bcn_size, new_bcn_size,
bcn_space,
(pmadapter->bcn_buf_size -
(pmadapter->pbcn_buf_end -
pmadapter->bcn_buf)));
/*
* memmove (since the memory overlaps) the data
* after the beacon we just stored to the end of
* the current beacon. This moves the data for
* the beacons after this further in memory to
* make space for the new larger beacon we are
* about to copy in.
*/
memmove(pmadapter,
(void *)((t_ptr) pbcn_store +
(t_ptr) new_bcn_size),
(void *)((t_ptr) pbcn_store +
(t_ptr) bcn_space),
(t_u32) ((t_ptr) pmadapter->pbcn_buf_end -
((t_ptr) pbcn_store +
(t_ptr) bcn_space)));
/* Copy the new beacon buffer entry over the old one */
memcpy(pmadapter, pbcn_store, pnew_beacon->pbeacon_buf,
new_bcn_size);
/* Move the beacon end pointer by the amount of new
beacon data we are adding */
pmadapter->pbcn_buf_end += (new_bcn_size - bcn_space);
/*
* This entry is bigger than the allotted max space
* previously reserved. Increase the max space to
* be equal to the new beacon size
*/
pnew_beacon->beacon_buf_size_max = new_bcn_size;
/* Adjust beacon buffer pointers that are past the
current */
for (adj_idx = 0; adj_idx < num_of_ent; adj_idx++) {
if (pmadapter->pscan_table[adj_idx].
pbeacon_buf > pbcn_store) {
pmadapter->pscan_table[adj_idx].
pbeacon_buf +=
(new_bcn_size - bcn_space);
wlan_adjust_ie_in_bss_entry(pmpriv,
&pmadapter->
pscan_table
[adj_idx]);
}
}
} else {
/*
* Beacon is larger than the previously allocated space, but
* there is not enough free space to store the additional data
*/
PRINTM(MERROR,
"AppControl: Failed: Larger Duplicate Beacon (%d),"
" old = %d, new = %d, space = %d, left = %d\n",
beacon_idx, old_bcn_size, new_bcn_size,
bcn_space,
(pmadapter->bcn_buf_size -
(pmadapter->pbcn_buf_end -
pmadapter->bcn_buf)));
/* Storage failure, keep old beacon intact */
pnew_beacon->beacon_buf_size = old_bcn_size;
if (pnew_beacon->pwpa_ie)
pnew_beacon->wpa_offset =
pmadapter->pscan_table[beacon_idx].
wpa_offset;
if (pnew_beacon->prsn_ie)
pnew_beacon->rsn_offset =
pmadapter->pscan_table[beacon_idx].
rsn_offset;
if (pnew_beacon->pwapi_ie)
pnew_beacon->wapi_offset =
pmadapter->pscan_table[beacon_idx].
wapi_offset;
if (pnew_beacon->pht_cap)
pnew_beacon->ht_cap_offset =
pmadapter->pscan_table[beacon_idx].
ht_cap_offset;
if (pnew_beacon->pht_info)
pnew_beacon->ht_info_offset =
pmadapter->pscan_table[beacon_idx].
ht_info_offset;
if (pnew_beacon->pbss_co_2040)
pnew_beacon->bss_co_2040_offset =
pmadapter->pscan_table[beacon_idx].
bss_co_2040_offset;
if (pnew_beacon->pext_cap)
pnew_beacon->ext_cap_offset =
pmadapter->pscan_table[beacon_idx].
ext_cap_offset;
if (pnew_beacon->poverlap_bss_scan_param)
pnew_beacon->overlap_bss_offset =
pmadapter->pscan_table[beacon_idx].
overlap_bss_offset;
if (pnew_beacon->pvht_cap)
pnew_beacon->vht_cap_offset =
pmadapter->pscan_table[beacon_idx].
vht_cap_offset;
if (pnew_beacon->pvht_oprat)
pnew_beacon->vht_oprat_offset =
pmadapter->pscan_table[beacon_idx].
vht_oprat_offset;
if (pnew_beacon->pvht_txpower)
pnew_beacon->vht_txpower_offset =
pmadapter->pscan_table[beacon_idx].
vht_txpower_offset;
if (pnew_beacon->pext_pwer)
pnew_beacon->ext_pwer_offset =
pmadapter->pscan_table[beacon_idx].
ext_pwer_offset;
if (pnew_beacon->pext_bssload)
pnew_beacon->ext_bssload_offset =
pmadapter->pscan_table[beacon_idx].
ext_bssload_offset;
if (pnew_beacon->pquiet_chan)
pnew_beacon->quiet_chan_offset =
pmadapter->pscan_table[beacon_idx].
quiet_chan_offset;
if (pnew_beacon->poper_mode)
pnew_beacon->oper_mode_offset =
pmadapter->pscan_table[beacon_idx].
oper_mode_offset;
}
/* Point the new entry to its permanent storage space */
pnew_beacon->pbeacon_buf = pbcn_store;
wlan_adjust_ie_in_bss_entry(pmpriv, pnew_beacon);
} else {
if ((pmadapter->pbcn_buf_end + pnew_beacon->beacon_buf_size +
SCAN_BEACON_ENTRY_PAD > (pmadapter->bcn_buf +
pmadapter->bcn_buf_size)) &&
(pmadapter->bcn_buf_size < MAX_SCAN_BEACON_BUFFER)) {
/* no space for this entry, realloc bcn buffer */
if (pmadapter->callbacks.moal_vmalloc &&
pmadapter->callbacks.moal_vfree)
ret = pmadapter->callbacks.
moal_vmalloc(pmadapter->pmoal_handle,
pmadapter->bcn_buf_size +
DEFAULT_SCAN_BEACON_BUFFER,
(t_u8 **) & tmp_buf);
else
ret = pmadapter->callbacks.
moal_malloc(pmadapter->pmoal_handle,
pmadapter->bcn_buf_size +
DEFAULT_SCAN_BEACON_BUFFER,
MLAN_MEM_DEF,
(t_u8 **) & tmp_buf);
if ((ret == MLAN_STATUS_SUCCESS) && (tmp_buf)) {
PRINTM(MCMND,
"Realloc Beacon buffer, old size=%d, new_size=%d\n",
pmadapter->bcn_buf_size,
pmadapter->bcn_buf_size +
DEFAULT_SCAN_BEACON_BUFFER);
bcn_size =
pmadapter->pbcn_buf_end -
pmadapter->bcn_buf;
memcpy(pmadapter, tmp_buf, pmadapter->bcn_buf,
bcn_size);
/* Adjust beacon buffer pointers that are past
the current */
for (adj_idx = 0; adj_idx < num_of_ent;
adj_idx++) {
bcn_offset =
pmadapter->pscan_table[adj_idx].
pbeacon_buf -
pmadapter->bcn_buf;
pmadapter->pscan_table[adj_idx].
pbeacon_buf =
tmp_buf + bcn_offset;
wlan_adjust_ie_in_bss_entry(pmpriv,
&pmadapter->
pscan_table
[adj_idx]);
}
pmadapter->pbcn_buf_end = tmp_buf + bcn_size;
if (pmadapter->callbacks.moal_vmalloc &&
pmadapter->callbacks.moal_vfree)
pmadapter->callbacks.
moal_vfree(pmadapter->
pmoal_handle,
(t_u8 *) pmadapter->
bcn_buf);
else
pmadapter->callbacks.
moal_mfree(pmadapter->
pmoal_handle,
(t_u8 *) pmadapter->
bcn_buf);
pmadapter->bcn_buf = tmp_buf;
pmadapter->bcn_buf_size +=
DEFAULT_SCAN_BEACON_BUFFER;
}
}
/*
* No existing beacon data exists for this entry, check to see
* if we can fit it in the remaining space
*/
if (pmadapter->pbcn_buf_end + pnew_beacon->beacon_buf_size +
SCAN_BEACON_ENTRY_PAD < (pmadapter->bcn_buf +
pmadapter->bcn_buf_size)) {
/*
* Copy the beacon buffer data from the local entry to the
* adapter dev struct buffer space used to store the raw
* beacon data for each entry in the scan table
*/
memcpy(pmadapter, pmadapter->pbcn_buf_end,
pnew_beacon->pbeacon_buf,
pnew_beacon->beacon_buf_size);
/* Update the beacon ptr to point to the table save
area */
pnew_beacon->pbeacon_buf = pmadapter->pbcn_buf_end;
pnew_beacon->beacon_buf_size_max =
(pnew_beacon->beacon_buf_size +
SCAN_BEACON_ENTRY_PAD);
wlan_adjust_ie_in_bss_entry(pmpriv, pnew_beacon);
/* Increment the end pointer by the size reserved */
pmadapter->pbcn_buf_end +=
pnew_beacon->beacon_buf_size_max;
PRINTM(MINFO, "AppControl: Beacon[%02d] sz=%03d,"
" used = %04d, left = %04d\n",
beacon_idx,
pnew_beacon->beacon_buf_size,
(pmadapter->pbcn_buf_end - pmadapter->bcn_buf),
(pmadapter->bcn_buf_size -
(pmadapter->pbcn_buf_end -
pmadapter->bcn_buf)));
} else {
/*
* No space for new beacon
*/
PRINTM(MCMND, "AppControl: No space beacon (%d): "
MACSTR "; sz=%03d, left=%03d\n",
beacon_idx,
MAC2STR(pnew_beacon->mac_address),
pnew_beacon->beacon_buf_size,
(pmadapter->bcn_buf_size -
(pmadapter->pbcn_buf_end -
pmadapter->bcn_buf)));
/* Storage failure; clear storage records for this bcn */
pnew_beacon->pbeacon_buf = MNULL;
pnew_beacon->beacon_buf_size = 0;
pnew_beacon->beacon_buf_size_max = 0;
wlan_adjust_ie_in_bss_entry(pmpriv, pnew_beacon);
}
}
LEAVE();
}
/**
* @brief Restore a beacon buffer of the current bss descriptor
*
* @param pmpriv A pointer to mlan_private structure
*
* @return N/A
*/
static t_void
wlan_restore_curr_bcn(IN mlan_private * pmpriv)
{
mlan_adapter *pmadapter = pmpriv->adapter;
mlan_callbacks *pcb = (pmlan_callbacks) & pmadapter->callbacks;
BSSDescriptor_t *pcurr_bss = &pmpriv->curr_bss_params.bss_descriptor;
ENTER();
if (pmpriv->pcurr_bcn_buf &&
((pmadapter->pbcn_buf_end + pmpriv->curr_bcn_size) <
(pmadapter->bcn_buf + pmadapter->bcn_buf_size))) {
pcb->moal_spin_lock(pmadapter->pmoal_handle,
pmpriv->curr_bcn_buf_lock);
/* restore the current beacon buffer */
memcpy(pmadapter, pmadapter->pbcn_buf_end,
pmpriv->pcurr_bcn_buf, pmpriv->curr_bcn_size);
pcurr_bss->pbeacon_buf = pmadapter->pbcn_buf_end;
pcurr_bss->beacon_buf_size = pmpriv->curr_bcn_size;
pmadapter->pbcn_buf_end += pmpriv->curr_bcn_size;
/* adjust the pointers in the current bss descriptor */
if (pcurr_bss->pwpa_ie) {
pcurr_bss->pwpa_ie = (IEEEtypes_VendorSpecific_t *)
(pcurr_bss->pbeacon_buf +
pcurr_bss->wpa_offset);
}
if (pcurr_bss->prsn_ie) {
pcurr_bss->prsn_ie = (IEEEtypes_Generic_t *)
(pcurr_bss->pbeacon_buf +
pcurr_bss->rsn_offset);
}
if (pcurr_bss->pht_cap) {
pcurr_bss->pht_cap = (IEEEtypes_HTCap_t *)
(pcurr_bss->pbeacon_buf +
pcurr_bss->ht_cap_offset);
}
if (pcurr_bss->pht_info) {
pcurr_bss->pht_info = (IEEEtypes_HTInfo_t *)
(pcurr_bss->pbeacon_buf +
pcurr_bss->ht_info_offset);
}
if (pcurr_bss->pbss_co_2040) {
pcurr_bss->pbss_co_2040 = (IEEEtypes_2040BSSCo_t *)
(pcurr_bss->pbeacon_buf +
pcurr_bss->bss_co_2040_offset);
}
if (pcurr_bss->pext_cap) {
pcurr_bss->pext_cap = (IEEEtypes_ExtCap_t *)
(pcurr_bss->pbeacon_buf +
pcurr_bss->ext_cap_offset);
}
if (pcurr_bss->poverlap_bss_scan_param) {
pcurr_bss->poverlap_bss_scan_param =
(IEEEtypes_OverlapBSSScanParam_t *)
(pcurr_bss->pbeacon_buf +
pcurr_bss->overlap_bss_offset);
}
if (pcurr_bss->pvht_cap) {
pcurr_bss->pvht_cap = (IEEEtypes_VHTCap_t *)
(pcurr_bss->pbeacon_buf +
pcurr_bss->vht_cap_offset);
}
if (pcurr_bss->pvht_oprat) {
pcurr_bss->pvht_oprat = (IEEEtypes_VHTOprat_t *)
(pcurr_bss->pbeacon_buf +
pcurr_bss->vht_oprat_offset);
}
if (pcurr_bss->pvht_txpower) {
pcurr_bss->pvht_txpower = (IEEEtypes_VHTtxpower_t *)
(pcurr_bss->pbeacon_buf +
pcurr_bss->vht_txpower_offset);
}
if (pcurr_bss->pext_pwer) {
pcurr_bss->pext_pwer = (IEEEtypes_ExtPwerCons_t *)
(pcurr_bss->pbeacon_buf +
pcurr_bss->ext_pwer_offset);
}
if (pcurr_bss->pext_bssload) {
pcurr_bss->pext_bssload = (IEEEtypes_ExtBSSload_t *)
(pcurr_bss->pbeacon_buf +
pcurr_bss->ext_bssload_offset);
}
if (pcurr_bss->pquiet_chan) {
pcurr_bss->pquiet_chan = (IEEEtypes_QuietChan_t *)
(pcurr_bss->pbeacon_buf +
pcurr_bss->quiet_chan_offset);
}
if (pcurr_bss->poper_mode) {
pcurr_bss->poper_mode = (IEEEtypes_OperModeNtf_t *)
(pcurr_bss->pbeacon_buf +
pcurr_bss->oper_mode_offset);
}
pcb->moal_spin_unlock(pmadapter->pmoal_handle,
pmpriv->curr_bcn_buf_lock);
PRINTM(MINFO, "current beacon restored %d\n",
pmpriv->curr_bcn_size);
} else {
PRINTM(MWARN,
"curr_bcn_buf not saved or bcn_buf has no space\n");
}
LEAVE();
}
/**
* @brief Post process the scan table after a new scan command has completed
*
* Inspect each entry of the scan table and try to find an entry that
* matches our current associated/joined network from the scan. If
* one is found, update the stored copy of the BSSDescriptor for our
* current network.
*
* Debug dump the current scan table contents if compiled accordingly.
*
* @param pmpriv A pointer to mlan_private structure
*
* @return N/A
*/
static t_void
wlan_scan_process_results(IN mlan_private * pmpriv)
{
mlan_adapter *pmadapter = pmpriv->adapter;
t_s32 j;
t_u32 i;
ENTER();
if (pmpriv->media_connected == MTRUE) {
j = wlan_find_ssid_in_list(pmpriv,
&pmpriv->curr_bss_params.
bss_descriptor.ssid,
pmpriv->curr_bss_params.
bss_descriptor.mac_address,
pmpriv->bss_mode);
if (j >= 0) {
pmadapter->callbacks.moal_spin_lock(pmadapter->
pmoal_handle,
pmpriv->
curr_bcn_buf_lock);
pmpriv->curr_bss_params.bss_descriptor.pwpa_ie = MNULL;
pmpriv->curr_bss_params.bss_descriptor.wpa_offset = 0;
pmpriv->curr_bss_params.bss_descriptor.prsn_ie = MNULL;
pmpriv->curr_bss_params.bss_descriptor.rsn_offset = 0;
pmpriv->curr_bss_params.bss_descriptor.pwapi_ie = MNULL;
pmpriv->curr_bss_params.bss_descriptor.wapi_offset = 0;
pmpriv->curr_bss_params.bss_descriptor.pht_cap = MNULL;
pmpriv->curr_bss_params.bss_descriptor.ht_cap_offset =
0;
pmpriv->curr_bss_params.bss_descriptor.pht_info = MNULL;
pmpriv->curr_bss_params.bss_descriptor.ht_info_offset =
0;
pmpriv->curr_bss_params.bss_descriptor.pbss_co_2040 =
MNULL;
pmpriv->curr_bss_params.bss_descriptor.
bss_co_2040_offset = 0;
pmpriv->curr_bss_params.bss_descriptor.pext_cap = MNULL;
pmpriv->curr_bss_params.bss_descriptor.ext_cap_offset =
0;
pmpriv->curr_bss_params.bss_descriptor.
poverlap_bss_scan_param = MNULL;
pmpriv->curr_bss_params.bss_descriptor.
overlap_bss_offset = 0;
pmpriv->curr_bss_params.bss_descriptor.pvht_cap = MNULL;
pmpriv->curr_bss_params.bss_descriptor.vht_cap_offset =
0;
pmpriv->curr_bss_params.bss_descriptor.pvht_oprat =
MNULL;
pmpriv->curr_bss_params.bss_descriptor.
vht_oprat_offset = 0;
pmpriv->curr_bss_params.bss_descriptor.pvht_txpower =
MNULL;
pmpriv->curr_bss_params.bss_descriptor.
vht_txpower_offset = 0;
pmpriv->curr_bss_params.bss_descriptor.pext_pwer =
MNULL;
pmpriv->curr_bss_params.bss_descriptor.ext_pwer_offset =
0;
pmpriv->curr_bss_params.bss_descriptor.pext_bssload =
MNULL;
pmpriv->curr_bss_params.bss_descriptor.
ext_bssload_offset = 0;
pmpriv->curr_bss_params.bss_descriptor.pquiet_chan =
MNULL;
pmpriv->curr_bss_params.bss_descriptor.
quiet_chan_offset = 0;
pmpriv->curr_bss_params.bss_descriptor.poper_mode =
MNULL;
pmpriv->curr_bss_params.bss_descriptor.
oper_mode_offset = 0;
pmpriv->curr_bss_params.bss_descriptor.pbeacon_buf =
MNULL;
pmpriv->curr_bss_params.bss_descriptor.beacon_buf_size =
0;
pmpriv->curr_bss_params.bss_descriptor.
beacon_buf_size_max = 0;
PRINTM(MINFO,
"Found current ssid/bssid in list @ index #%d\n",
j);
/* Make a copy of current BSSID descriptor */
memcpy(pmadapter,
&pmpriv->curr_bss_params.bss_descriptor,
&pmadapter->pscan_table[j],
sizeof(pmpriv->curr_bss_params.bss_descriptor));
wlan_save_curr_bcn(pmpriv);
pmadapter->callbacks.moal_spin_unlock(pmadapter->
pmoal_handle,
pmpriv->
curr_bcn_buf_lock);
} else {
wlan_restore_curr_bcn(pmpriv);
}
}
for (i = 0; i < pmadapter->num_in_scan_table; i++)
PRINTM(MINFO, "Scan:(%02d) " MACSTR ", "
"RSSI[%03d], SSID[%s]\n",
i,
MAC2STR(pmadapter->pscan_table[i].mac_address),
(t_s32) pmadapter->pscan_table[i].rssi,
pmadapter->pscan_table[i].ssid.ssid);
/*
* Prepares domain info from scan table and downloads the
* domain info command to the FW.
*/
wlan_11d_prepare_dnld_domain_info_cmd(pmpriv);
PRINTM(MMSG, "wlan: SCAN COMPLETED: scanned AP count=%d\n",
pmadapter->num_in_scan_table);
LEAVE();
}
/**
* @brief Delete a specific indexed entry from the scan table.
*
* Delete the scan table entry indexed by table_idx. Compact the remaining
* entries and adjust any buffering of beacon/probe response data
* if needed.
*
* @param pmpriv A pointer to mlan_private structure
* @param table_idx Scan table entry index to delete from the table
*
* @return N/A
*
* @pre table_idx must be an index to a valid entry
*/
static t_void
wlan_scan_delete_table_entry(IN mlan_private * pmpriv, IN t_s32 table_idx)
{
mlan_adapter *pmadapter = pmpriv->adapter;
t_u32 del_idx;
t_u32 beacon_buf_adj;
t_u8 *pbeacon_buf;
ENTER();
/*
* Shift the saved beacon buffer data for the scan table back over the
* entry being removed. Update the end of buffer pointer. Save the
* deleted buffer allocation size for pointer adjustments for entries
* compacted after the deleted index.
*/
beacon_buf_adj = pmadapter->pscan_table[table_idx].beacon_buf_size_max;
PRINTM(MINFO,
"Scan: Delete Entry %d, beacon buffer removal = %d bytes\n",
table_idx, beacon_buf_adj);
/* Check if the table entry had storage allocated for its beacon */
if (beacon_buf_adj) {
pbeacon_buf = pmadapter->pscan_table[table_idx].pbeacon_buf;
/*
* Remove the entry's buffer space, decrement the table end pointer
* by the amount we are removing
*/
pmadapter->pbcn_buf_end -= beacon_buf_adj;
PRINTM(MINFO,
"Scan: Delete Entry %d, compact data: %p <- %p (sz = %d)\n",
table_idx,
pbeacon_buf,
pbeacon_buf + beacon_buf_adj,
pmadapter->pbcn_buf_end - pbeacon_buf);
/*
* Compact data storage. Copy all data after the deleted entry's
* end address (pbeacon_buf + beacon_buf_adj) back to the original
* start address (pbeacon_buf).
*
* Scan table entries affected by the move will have their entry
* pointer adjusted below.
*
* Use memmove since the dest/src memory regions overlap.
*/
memmove(pmadapter, pbeacon_buf,
(void *)((t_ptr) pbeacon_buf + (t_ptr) beacon_buf_adj),
(t_u32) ((t_ptr) pmadapter->pbcn_buf_end -
(t_ptr) pbeacon_buf));
}
PRINTM(MINFO, "Scan: Delete Entry %d, num_in_scan_table = %d\n",
table_idx, pmadapter->num_in_scan_table);
/* Shift all of the entries after the table_idx back by one, compacting
the table and removing the requested entry */
for (del_idx = table_idx; (del_idx + 1) < pmadapter->num_in_scan_table;
del_idx++) {
/* Copy the next entry over this one */
memcpy(pmadapter, pmadapter->pscan_table + del_idx,
pmadapter->pscan_table + del_idx + 1,
sizeof(BSSDescriptor_t));
/*
* Adjust this entry's pointer to its beacon buffer based on the
* removed/compacted entry from the deleted index. Don't decrement
* if the buffer pointer is MNULL (no data stored for this entry).
*/
if (pmadapter->pscan_table[del_idx].pbeacon_buf) {
pmadapter->pscan_table[del_idx].pbeacon_buf -=
beacon_buf_adj;
if (pmadapter->pscan_table[del_idx].pwpa_ie) {
pmadapter->pscan_table[del_idx].pwpa_ie =
(IEEEtypes_VendorSpecific_t *)
(pmadapter->pscan_table[del_idx].
pbeacon_buf +
pmadapter->pscan_table[del_idx].
wpa_offset);
}
if (pmadapter->pscan_table[del_idx].prsn_ie) {
pmadapter->pscan_table[del_idx].prsn_ie =
(IEEEtypes_Generic_t *)
(pmadapter->pscan_table[del_idx].
pbeacon_buf +
pmadapter->pscan_table[del_idx].
rsn_offset);
}
if (pmadapter->pscan_table[del_idx].pwapi_ie) {
pmadapter->pscan_table[del_idx].pwapi_ie =
(IEEEtypes_Generic_t *)
(pmadapter->pscan_table[del_idx].
pbeacon_buf +
pmadapter->pscan_table[del_idx].
wapi_offset);
}
if (pmadapter->pscan_table[del_idx].pht_cap) {
pmadapter->pscan_table[del_idx].pht_cap =
(IEEEtypes_HTCap_t *) (pmadapter->
pscan_table
[del_idx].
pbeacon_buf +
pmadapter->
pscan_table
[del_idx].
ht_cap_offset);
}
if (pmadapter->pscan_table[del_idx].pht_info) {
pmadapter->pscan_table[del_idx].pht_info =
(IEEEtypes_HTInfo_t *) (pmadapter->
pscan_table
[del_idx].
pbeacon_buf +
pmadapter->
pscan_table
[del_idx].
ht_info_offset);
}
if (pmadapter->pscan_table[del_idx].pbss_co_2040) {
pmadapter->pscan_table[del_idx].pbss_co_2040 =
(IEEEtypes_2040BSSCo_t *) (pmadapter->
pscan_table
[del_idx].
pbeacon_buf +
pmadapter->
pscan_table
[del_idx].
bss_co_2040_offset);
}
if (pmadapter->pscan_table[del_idx].pext_cap) {
pmadapter->pscan_table[del_idx].pext_cap =
(IEEEtypes_ExtCap_t *) (pmadapter->
pscan_table
[del_idx].
pbeacon_buf +
pmadapter->
pscan_table
[del_idx].
ext_cap_offset);
}
if (pmadapter->pscan_table[del_idx].
poverlap_bss_scan_param) {
pmadapter->pscan_table[del_idx].
poverlap_bss_scan_param =
(IEEEtypes_OverlapBSSScanParam_t
*) (pmadapter->pscan_table[del_idx].
pbeacon_buf +
pmadapter->pscan_table[del_idx].
overlap_bss_offset);
}
if (pmadapter->pscan_table[del_idx].pvht_cap) {
pmadapter->pscan_table[del_idx].pvht_cap =
(IEEEtypes_VHTCap_t *) (pmadapter->
pscan_table
[del_idx].
pbeacon_buf +
pmadapter->
pscan_table
[del_idx].
vht_cap_offset);
}
if (pmadapter->pscan_table[del_idx].pvht_oprat) {
pmadapter->pscan_table[del_idx].pvht_oprat =
(IEEEtypes_VHTOprat_t *) (pmadapter->
pscan_table
[del_idx].
pbeacon_buf +
pmadapter->
pscan_table
[del_idx].
vht_oprat_offset);
}
if (pmadapter->pscan_table[del_idx].pvht_txpower) {
pmadapter->pscan_table[del_idx].pvht_txpower =
(IEEEtypes_VHTtxpower_t *) (pmadapter->
pscan_table
[del_idx].
pbeacon_buf
+
pmadapter->
pscan_table
[del_idx].
vht_txpower_offset);
}
if (pmadapter->pscan_table[del_idx].pext_pwer) {
pmadapter->pscan_table[del_idx].pext_pwer =
(IEEEtypes_ExtPwerCons_t *) (pmadapter->
pscan_table
[del_idx].
pbeacon_buf
+
pmadapter->
pscan_table
[del_idx].
ext_pwer_offset);
}
if (pmadapter->pscan_table[del_idx].pext_bssload) {
pmadapter->pscan_table[del_idx].pext_bssload =
(IEEEtypes_ExtBSSload_t *) (pmadapter->
pscan_table
[del_idx].
pbeacon_buf
+
pmadapter->
pscan_table
[del_idx].
ext_bssload_offset);
}
if (pmadapter->pscan_table[del_idx].pquiet_chan) {
pmadapter->pscan_table[del_idx].pquiet_chan =
(IEEEtypes_QuietChan_t *) (pmadapter->
pscan_table
[del_idx].
pbeacon_buf +
pmadapter->
pscan_table
[del_idx].
quiet_chan_offset);
}
if (pmadapter->pscan_table[del_idx].poper_mode) {
pmadapter->pscan_table[del_idx].poper_mode =
(IEEEtypes_OperModeNtf_t *) (pmadapter->
pscan_table
[del_idx].
pbeacon_buf
+
pmadapter->
pscan_table
[del_idx].
oper_mode_offset);
}
}
}
/* The last entry is invalid now that it has been deleted or moved back
*/
memset(pmadapter,
pmadapter->pscan_table + pmadapter->num_in_scan_table - 1, 0x00,
sizeof(BSSDescriptor_t));
pmadapter->num_in_scan_table--;
LEAVE();
}
/**
* @brief Delete all occurrences of a given SSID from the scan table
*
* Iterate through the scan table and delete all entries that match a given
* SSID. Compact the remaining scan table entries.
*
* @param pmpriv A pointer to mlan_private structure
* @param pdel_ssid Pointer to an SSID to be used in deleting all
* matching SSIDs from the scan table
*
* @return MLAN_STATUS_SUCCESS or MLAN_STATUS_FAILURE
*/
static mlan_status
wlan_scan_delete_ssid_table_entry(IN mlan_private * pmpriv,
IN mlan_802_11_ssid * pdel_ssid)
{
mlan_status ret = MLAN_STATUS_FAILURE;
t_s32 table_idx;
ENTER();
PRINTM(MINFO, "Scan: Delete Ssid Entry: %-32s\n", pdel_ssid->ssid);
/* If the requested SSID is found in the table, delete it. Then keep
searching the table for multiple entries for the SSID until no more
are found */
while ((table_idx = wlan_find_ssid_in_list(pmpriv,
pdel_ssid,
MNULL,
MLAN_BSS_MODE_AUTO)) >= 0) {
PRINTM(MINFO, "Scan: Delete SSID Entry: Found Idx = %d\n",
table_idx);
ret = MLAN_STATUS_SUCCESS;
wlan_scan_delete_table_entry(pmpriv, table_idx);
}
LEAVE();
return ret;
}
/********************************************************
Global Functions
********************************************************/
/**
* @brief Check if a scanned network compatible with the driver settings
*
* WEP WPA WPA2 ad-hoc encrypt Network
* enabled enabled enabled AES mode Privacy WPA WPA2 Compatible
* 0 0 0 0 NONE 0 0 0 yes No security
* 0 1 0 0 x 1x 1 x yes WPA (disable HT if no AES)
* 0 0 1 0 x 1x x 1 yes WPA2 (disable HT if no AES)
* 0 0 0 1 NONE 1 0 0 yes Ad-hoc AES
* 1 0 0 0 NONE 1 0 0 yes Static WEP (disable HT)
* 0 0 0 0 !=NONE 1 0 0 yes Dynamic WEP
*
* @param pmpriv A pointer to mlan_private
* @param index Index in scan table to check against current driver settings
* @param mode Network mode: Infrastructure or IBSS
*
* @return Index in ScanTable, or negative value if error
*/
t_s32
wlan_is_network_compatible(IN mlan_private * pmpriv,
IN t_u32 index, IN t_u32 mode)
{
mlan_adapter *pmadapter = pmpriv->adapter;
BSSDescriptor_t *pbss_desc;
ENTER();
pbss_desc = &pmadapter->pscan_table[index];
pbss_desc->disable_11n = MFALSE;
/* Don't check for compatibility if roaming */
if ((pmpriv->media_connected == MTRUE)
&& (pmpriv->bss_mode == MLAN_BSS_MODE_INFRA)
&& (pbss_desc->bss_mode == MLAN_BSS_MODE_INFRA)) {
LEAVE();
return index;
}
/* if the VHT CAP IE exists, the HT CAP IE should exist too */
if (pbss_desc->pvht_cap && !pbss_desc->pht_cap) {
PRINTM(MINFO,
"Don't connect to AP as HT CAP IE is not found from the 11AC AP.\n");
LEAVE();
return -1;
}
if (pbss_desc->wlan_11h_bss_info.chan_switch_ann.element_id ==
CHANNEL_SWITCH_ANN) {
PRINTM(MINFO,
"Don't connect to AP with CHANNEL_SWITCH_ANN IE.\n");
LEAVE();
return -1;
}
if (pmpriv->wps.session_enable == MTRUE) {
PRINTM(MINFO, "Return success directly in WPS period\n");
LEAVE();
return index;
}
if ((pbss_desc->bss_mode == mode) &&
(pmpriv->sec_info.ewpa_enabled == MTRUE)) {
if (((pbss_desc->pwpa_ie) &&
((*(pbss_desc->pwpa_ie)).vend_hdr.element_id == WPA_IE)) ||
((pbss_desc->prsn_ie) &&
((*(pbss_desc->prsn_ie)).ieee_hdr.element_id == RSN_IE))) {
if (((pmpriv->adapter->config_bands & BAND_GN ||
pmpriv->adapter->config_bands & BAND_AN) &&
pbss_desc->pht_cap)
&& (pmpriv->bss_mode == MLAN_BSS_MODE_INFRA)
&& !is_wpa_oui_present(pmpriv->adapter, pbss_desc,
CIPHER_SUITE_CCMP)
&& !is_rsn_oui_present(pmpriv->adapter, pbss_desc,
CIPHER_SUITE_CCMP)) {
if (is_wpa_oui_present
(pmpriv->adapter, pbss_desc,
CIPHER_SUITE_TKIP)
|| is_rsn_oui_present(pmpriv->adapter,
pbss_desc,
CIPHER_SUITE_TKIP)) {
PRINTM(MINFO,
"Disable 11n if AES is not supported by AP\n");
pbss_desc->disable_11n = MTRUE;
} else {
LEAVE();
return -1;
}
}
LEAVE();
return index;
} else {
PRINTM(MINFO,
"ewpa_enabled: Ignore none WPA/WPA2 AP\n");
LEAVE();
return -1;
}
}
if (pmpriv->sec_info.wapi_enabled &&
(pbss_desc->pwapi_ie &&
((*(pbss_desc->pwapi_ie)).ieee_hdr.element_id == WAPI_IE))) {
PRINTM(MINFO, "Return success for WAPI AP\n");
LEAVE();
return index;
}
if (pbss_desc->bss_mode == mode) {
if (pmpriv->sec_info.wep_status == Wlan802_11WEPDisabled
&& !pmpriv->sec_info.wpa_enabled
&& !pmpriv->sec_info.wpa2_enabled
&& ((!pbss_desc->pwpa_ie) ||
((*(pbss_desc->pwpa_ie)).vend_hdr.element_id != WPA_IE))
&& ((!pbss_desc->prsn_ie) ||
((*(pbss_desc->prsn_ie)).ieee_hdr.element_id != RSN_IE))
&& !pmpriv->adhoc_aes_enabled &&
pmpriv->sec_info.encryption_mode ==
MLAN_ENCRYPTION_MODE_NONE && !pbss_desc->privacy) {
/* No security */
LEAVE();
return index;
} else if (pmpriv->sec_info.wep_status == Wlan802_11WEPEnabled
&& !pmpriv->sec_info.wpa_enabled
&& !pmpriv->sec_info.wpa2_enabled
&& !pmpriv->adhoc_aes_enabled
&& pbss_desc->privacy) {
/* Static WEP enabled */
PRINTM(MINFO, "Disable 11n in WEP mode\n");
pbss_desc->disable_11n = MTRUE;
/* Reject the following cases: */
/* case 1: RSN IE w/o WEP OUI and WPA IE w/o WEP OUI
case 2: RSN IE w/o WEP OUI and No WPA IE case 3: WPA
IE w/o WEP OUI and No RSN IE * */
if (((pbss_desc->prsn_ie) &&
((*(pbss_desc->prsn_ie)).ieee_hdr.element_id ==
RSN_IE)) || ((pbss_desc->pwpa_ie) &&
((*(pbss_desc->pwpa_ie)).vend_hdr.
element_id == WPA_IE))) {
if (!is_rsn_oui_present
(pmpriv->adapter, pbss_desc,
CIPHER_SUITE_WEP40) &&
!is_rsn_oui_present(pmpriv->adapter,
pbss_desc,
CIPHER_SUITE_WEP104) &&
!is_wpa_oui_present(pmpriv->adapter,
pbss_desc,
CIPHER_SUITE_WEP40) &&
!is_wpa_oui_present(pmpriv->adapter,
pbss_desc,
CIPHER_SUITE_WEP104))
index = -1;
}
LEAVE();
return index;
} else if (pmpriv->sec_info.wep_status == Wlan802_11WEPDisabled
&& pmpriv->sec_info.wpa_enabled
&& !pmpriv->sec_info.wpa2_enabled
&& ((pbss_desc->pwpa_ie) &&
((*(pbss_desc->pwpa_ie)).vend_hdr.element_id ==
WPA_IE))
&& !pmpriv->adhoc_aes_enabled
/*
* Privacy bit may NOT be set in some APs like LinkSys WRT54G
* && pbss_desc->privacy
*/
) {
/* WPA enabled */
PRINTM(MINFO,
"wlan_is_network_compatible() WPA: index=%d wpa_ie=%#x "
"rsn_ie=%#x WEP=%s WPA=%s WPA2=%s EncMode=%#x "
"privacy=%#x\n", index,
(pbss_desc->pwpa_ie) ? (*(pbss_desc->pwpa_ie)).
vend_hdr.element_id : 0,
(pbss_desc->prsn_ie) ? (*(pbss_desc->prsn_ie)).
ieee_hdr.element_id : 0,
(pmpriv->sec_info.wep_status ==
Wlan802_11WEPEnabled) ? "e" : "d",
(pmpriv->sec_info.wpa_enabled) ? "e" : "d",
(pmpriv->sec_info.wpa2_enabled) ? "e" : "d",
pmpriv->sec_info.encryption_mode,
pbss_desc->privacy);
if (((pmpriv->adapter->config_bands & BAND_GN ||
pmpriv->adapter->config_bands & BAND_AN) &&
pbss_desc->pht_cap)
&& (pmpriv->bss_mode == MLAN_BSS_MODE_INFRA)
&& !is_wpa_oui_present(pmpriv->adapter, pbss_desc,
CIPHER_SUITE_CCMP)) {
if (is_wpa_oui_present
(pmpriv->adapter, pbss_desc,
CIPHER_SUITE_TKIP)) {
PRINTM(MINFO,
"Disable 11n if AES is not supported by AP\n");
pbss_desc->disable_11n = MTRUE;
} else {
LEAVE();
return -1;
}
}
LEAVE();
return index;
} else if (pmpriv->sec_info.wep_status == Wlan802_11WEPDisabled
&& !pmpriv->sec_info.wpa_enabled
&& pmpriv->sec_info.wpa2_enabled
&& ((pbss_desc->prsn_ie) &&
((*(pbss_desc->prsn_ie)).ieee_hdr.element_id ==
RSN_IE))
&& !pmpriv->adhoc_aes_enabled
/*
* Privacy bit may NOT be set in some APs like LinkSys WRT54G
* && pbss_desc->privacy
*/
) {
/* WPA2 enabled */
PRINTM(MINFO,
"wlan_is_network_compatible() WPA2: index=%d wpa_ie=%#x "
"rsn_ie=%#x WEP=%s WPA=%s WPA2=%s EncMode=%#x "
"privacy=%#x\n", index,
(pbss_desc->pwpa_ie) ? (*(pbss_desc->pwpa_ie)).
vend_hdr.element_id : 0,
(pbss_desc->prsn_ie) ? (*(pbss_desc->prsn_ie)).
ieee_hdr.element_id : 0,
(pmpriv->sec_info.wep_status ==
Wlan802_11WEPEnabled) ? "e" : "d",
(pmpriv->sec_info.wpa_enabled) ? "e" : "d",
(pmpriv->sec_info.wpa2_enabled) ? "e" : "d",
pmpriv->sec_info.encryption_mode,
pbss_desc->privacy);
if (((pmpriv->adapter->config_bands & BAND_GN ||
pmpriv->adapter->config_bands & BAND_AN) &&
pbss_desc->pht_cap)
&& (pmpriv->bss_mode == MLAN_BSS_MODE_INFRA)
&& !is_rsn_oui_present(pmpriv->adapter, pbss_desc,
CIPHER_SUITE_CCMP)) {
if (is_rsn_oui_present
(pmpriv->adapter, pbss_desc,
CIPHER_SUITE_TKIP)) {
PRINTM(MINFO,
"Disable 11n if AES is not supported by AP\n");
pbss_desc->disable_11n = MTRUE;
} else {
LEAVE();
return -1;
}
}
LEAVE();
return index;
} else if (pmpriv->sec_info.wep_status == Wlan802_11WEPDisabled
&& !pmpriv->sec_info.wpa_enabled
&& !pmpriv->sec_info.wpa2_enabled
&& ((!pbss_desc->pwpa_ie) ||
((*(pbss_desc->pwpa_ie)).vend_hdr.element_id !=
WPA_IE))
&& ((!pbss_desc->prsn_ie) ||
((*(pbss_desc->prsn_ie)).ieee_hdr.element_id !=
RSN_IE))
&& pmpriv->adhoc_aes_enabled &&
pmpriv->sec_info.encryption_mode ==
MLAN_ENCRYPTION_MODE_NONE && pbss_desc->privacy) {
/* Ad-hoc AES enabled */
LEAVE();
return index;
} else if (pmpriv->sec_info.wep_status == Wlan802_11WEPDisabled
&& !pmpriv->sec_info.wpa_enabled
&& !pmpriv->sec_info.wpa2_enabled
&& ((!pbss_desc->pwpa_ie) ||
((*(pbss_desc->pwpa_ie)).vend_hdr.element_id !=
WPA_IE))
&& ((!pbss_desc->prsn_ie) ||
((*(pbss_desc->prsn_ie)).ieee_hdr.element_id !=
RSN_IE))
&& !pmpriv->adhoc_aes_enabled &&
pmpriv->sec_info.encryption_mode !=
MLAN_ENCRYPTION_MODE_NONE && pbss_desc->privacy) {
/* Dynamic WEP enabled */
PRINTM(MINFO,
"wlan_is_network_compatible() dynamic WEP: index=%d "
"wpa_ie=%#x rsn_ie=%#x EncMode=%#x privacy=%#x\n",
index,
(pbss_desc->pwpa_ie) ? (*(pbss_desc->pwpa_ie)).
vend_hdr.element_id : 0,
(pbss_desc->prsn_ie) ? (*(pbss_desc->prsn_ie)).
ieee_hdr.element_id : 0,
pmpriv->sec_info.encryption_mode,
pbss_desc->privacy);
LEAVE();
return index;
}
/* Security doesn't match */
PRINTM(MINFO,
"wlan_is_network_compatible() FAILED: index=%d wpa_ie=%#x "
"rsn_ie=%#x WEP=%s WPA=%s WPA2=%s EncMode=%#x privacy=%#x\n",
index,
(pbss_desc->pwpa_ie) ? (*(pbss_desc->pwpa_ie)).vend_hdr.
element_id : 0,
(pbss_desc->prsn_ie) ? (*(pbss_desc->prsn_ie)).ieee_hdr.
element_id : 0,
(pmpriv->sec_info.wep_status ==
Wlan802_11WEPEnabled) ? "e" : "d",
(pmpriv->sec_info.wpa_enabled) ? "e" : "d",
(pmpriv->sec_info.wpa2_enabled) ? "e" : "d",
pmpriv->sec_info.encryption_mode, pbss_desc->privacy);
LEAVE();
return -1;
}
/* Mode doesn't match */
LEAVE();
return -1;
}
/**
* @brief Internal function used to flush the scan list
*
* @param pmadapter A pointer to mlan_adapter structure
*
* @return MLAN_STATUS_SUCCESS
*/
mlan_status
wlan_flush_scan_table(IN pmlan_adapter pmadapter)
{
t_u8 i = 0;
ENTER();
PRINTM(MINFO, "Flushing scan table\n");
memset(pmadapter, pmadapter->pscan_table, 0,
(sizeof(BSSDescriptor_t) * MRVDRV_MAX_BSSID_LIST));
pmadapter->num_in_scan_table = 0;
memset(pmadapter, pmadapter->bcn_buf, 0, pmadapter->bcn_buf_size);
pmadapter->pbcn_buf_end = pmadapter->bcn_buf;
for (i = 0; i < pmadapter->num_in_chan_stats; i++)
pmadapter->pchan_stats[i].cca_scan_duration = 0;
LEAVE();
return MLAN_STATUS_SUCCESS;
}
/**
* @brief Internal function used to start a scan based on an input config
*
* Use the input user scan configuration information when provided in
* order to send the appropriate scan commands to firmware to populate or
* update the internal driver scan table
*
* @param pmpriv A pointer to mlan_private structure
* @param pioctl_buf A pointer to MLAN IOCTL Request buffer
* @param puser_scan_in Pointer to the input configuration for the requested
* scan.
*
* @return MLAN_STATUS_SUCCESS or < 0 if error
*/
mlan_status
wlan_scan_networks(IN mlan_private * pmpriv,
IN t_void * pioctl_buf,
IN const wlan_user_scan_cfg * puser_scan_in)
{
mlan_status ret = MLAN_STATUS_SUCCESS;
mlan_adapter *pmadapter = pmpriv->adapter;
mlan_callbacks *pcb = (mlan_callbacks *) & pmadapter->callbacks;
cmd_ctrl_node *pcmd_node = MNULL;
pmlan_ioctl_req pioctl_req = (mlan_ioctl_req *) pioctl_buf;
wlan_scan_cmd_config_tlv *pscan_cfg_out = MNULL;
MrvlIEtypes_ChanListParamSet_t *pchan_list_out;
t_u32 buf_size;
ChanScanParamSet_t *pscan_chan_list;
t_u8 keep_previous_scan;
t_u8 filtered_scan;
t_u8 scan_current_chan_only;
t_u8 max_chan_per_scan;
t_u8 i;
ENTER();
ret = pcb->moal_malloc(pmadapter->pmoal_handle,
sizeof(wlan_scan_cmd_config_tlv), MLAN_MEM_DEF,
(t_u8 **) & pscan_cfg_out);
if (ret != MLAN_STATUS_SUCCESS || !pscan_cfg_out) {
PRINTM(MERROR, "Memory allocation for pscan_cfg_out failed!\n");
if (pioctl_req)
pioctl_req->status_code = MLAN_ERROR_NO_MEM;
LEAVE();
return MLAN_STATUS_FAILURE;
}
buf_size = sizeof(ChanScanParamSet_t) * WLAN_USER_SCAN_CHAN_MAX;
ret = pcb->moal_malloc(pmadapter->pmoal_handle, buf_size, MLAN_MEM_DEF,
(t_u8 **) & pscan_chan_list);
if (ret != MLAN_STATUS_SUCCESS || !pscan_chan_list) {
PRINTM(MERROR, "Failed to allocate scan_chan_list\n");
if (pscan_cfg_out)
pcb->moal_mfree(pmadapter->pmoal_handle,
(t_u8 *) pscan_cfg_out);
if (pioctl_req)
pioctl_req->status_code = MLAN_ERROR_NO_MEM;
LEAVE();
return MLAN_STATUS_FAILURE;
}
memset(pmadapter, pscan_chan_list, 0x00, buf_size);
memset(pmadapter, pscan_cfg_out, 0x00,
sizeof(wlan_scan_cmd_config_tlv));
keep_previous_scan = MFALSE;
ret = wlan_scan_setup_scan_config(pmpriv,
puser_scan_in,
&pscan_cfg_out->config,
&pchan_list_out,
pscan_chan_list,
&max_chan_per_scan,
&filtered_scan,
&scan_current_chan_only);
if (ret != MLAN_STATUS_SUCCESS) {
PRINTM(MERROR, "Failed to setup scan config\n");
if (pscan_cfg_out)
pcb->moal_mfree(pmadapter->pmoal_handle,
(t_u8 *) pscan_cfg_out);
if (pscan_chan_list)
pcb->moal_mfree(pmadapter->pmoal_handle,
(t_u8 *) pscan_chan_list);
if (pioctl_req)
pioctl_req->status_code = MLAN_ERROR_INVALID_PARAMETER;
LEAVE();
return MLAN_STATUS_FAILURE;
}
if (puser_scan_in)
keep_previous_scan = puser_scan_in->keep_previous_scan;
if (keep_previous_scan == MFALSE) {
memset(pmadapter, pmadapter->pscan_table, 0x00,
sizeof(BSSDescriptor_t) * MRVDRV_MAX_BSSID_LIST);
pmadapter->num_in_scan_table = 0;
pmadapter->pbcn_buf_end = pmadapter->bcn_buf;
for (i = 0; i < pmadapter->num_in_chan_stats; i++)
pmadapter->pchan_stats[i].cca_scan_duration = 0;
}
ret = wlan_scan_channel_list(pmpriv,
pioctl_buf,
max_chan_per_scan,
filtered_scan,
&pscan_cfg_out->config,
pchan_list_out, pscan_chan_list);
/* Get scan command from scan_pending_q and put to cmd_pending_q */
if (ret == MLAN_STATUS_SUCCESS) {
if (util_peek_list
(pmadapter->pmoal_handle, &pmadapter->scan_pending_q,
pcb->moal_spin_lock, pcb->moal_spin_unlock)) {
pcmd_node =
(cmd_ctrl_node *) util_dequeue_list(pmadapter->
pmoal_handle,
&pmadapter->
scan_pending_q,
pcb->
moal_spin_lock,
pcb->
moal_spin_unlock);
wlan_request_cmd_lock(pmadapter);
pmadapter->pscan_ioctl_req = pioctl_req;
pmadapter->scan_processing = MTRUE;
wlan_release_cmd_lock(pmadapter);
wlan_insert_cmd_to_pending_q(pmadapter, pcmd_node,
MTRUE);
}
}
if (pscan_cfg_out)
pcb->moal_mfree(pmadapter->pmoal_handle,
(t_u8 *) pscan_cfg_out);
if (pscan_chan_list)
pcb->moal_mfree(pmadapter->pmoal_handle,
(t_u8 *) pscan_chan_list);
LEAVE();
return ret;
}
/**
* @brief Prepare a scan command to be sent to the firmware
*
* Use the wlan_scan_cmd_config sent to the command processing module in
* the wlan_prepare_cmd to configure a HostCmd_DS_802_11_SCAN command
* struct to send to firmware.
*
* The fixed fields specifying the BSS type and BSSID filters as well as a
* variable number/length of TLVs are sent in the command to firmware.
*
* @param pmpriv A pointer to mlan_private structure
* @param pcmd A pointer to HostCmd_DS_COMMAND structure to be sent to
* firmware with the HostCmd_DS_801_11_SCAN structure
* @param pdata_buf Void pointer cast of a wlan_scan_cmd_config struct used
* to set the fields/TLVs for the command sent to firmware
*
* @return MLAN_STATUS_SUCCESS
*/
mlan_status
wlan_cmd_802_11_scan(IN mlan_private * pmpriv,
IN HostCmd_DS_COMMAND * pcmd, IN t_void * pdata_buf)
{
HostCmd_DS_802_11_SCAN *pscan_cmd = &pcmd->params.scan;
wlan_scan_cmd_config *pscan_cfg;
ENTER();
pscan_cfg = (wlan_scan_cmd_config *) pdata_buf;
/* Set fixed field variables in scan command */
pscan_cmd->bss_mode = pscan_cfg->bss_mode;
memcpy(pmpriv->adapter, pscan_cmd->bssid, pscan_cfg->specific_bssid,
sizeof(pscan_cmd->bssid));
memcpy(pmpriv->adapter, pscan_cmd->tlv_buffer, pscan_cfg->tlv_buf,
pscan_cfg->tlv_buf_len);
pcmd->command = wlan_cpu_to_le16(HostCmd_CMD_802_11_SCAN);
/* Size is equal to the sizeof(fixed portions) + the TLV len + header */
pcmd->size =
(t_u16) wlan_cpu_to_le16((t_u16) (sizeof(pscan_cmd->bss_mode)
+ sizeof(pscan_cmd->bssid)
+ pscan_cfg->tlv_buf_len
+ S_DS_GEN));
LEAVE();
return MLAN_STATUS_SUCCESS;
}
/**
* @brief This function handles the command response of scan
*
* The response buffer for the scan command has the following
* memory layout:
*
* .-------------------------------------------------------------.
* | Header (4 * sizeof(t_u16)): Standard command response hdr |
* .-------------------------------------------------------------.
* | BufSize (t_u16) : sizeof the BSS Description data |
* .-------------------------------------------------------------.
* | NumOfSet (t_u8) : Number of BSS Descs returned |
* .-------------------------------------------------------------.
* | BSSDescription data (variable, size given in BufSize) |
* .-------------------------------------------------------------.
* | TLV data (variable, size calculated using Header->Size, |
* | BufSize and sizeof the fixed fields above) |
* .-------------------------------------------------------------.
*
* @param pmpriv A pointer to mlan_private structure
* @param resp A pointer to HostCmd_DS_COMMAND
* @param pioctl_buf A pointer to mlan_ioctl_req structure
*
* @return MLAN_STATUS_SUCCESS or MLAN_STATUS_FAILURE
*/
mlan_status
wlan_ret_802_11_scan(IN mlan_private * pmpriv,
IN HostCmd_DS_COMMAND * resp, IN t_void * pioctl_buf)
{
mlan_status ret = MLAN_STATUS_SUCCESS;
mlan_adapter *pmadapter = pmpriv->adapter;
mlan_callbacks *pcb = MNULL;
cmd_ctrl_node *pcmd_node = MNULL;
HostCmd_DS_802_11_SCAN_RSP *pscan_rsp = MNULL;
BSSDescriptor_t *bss_new_entry = MNULL;
MrvlIEtypes_Data_t *ptlv;
MrvlIEtypes_TsfTimestamp_t *ptsf_tlv = MNULL;
MrvlIEtypes_ChannelStats_t *pchanstats_tlv = MNULL;
t_u8 *pbss_info;
t_u32 scan_resp_size;
t_u32 bytes_left;
t_u32 num_in_table;
t_u32 bss_idx;
t_u32 idx;
t_u32 tlv_buf_size;
t_u64 tsf_val;
chan_freq_power_t *cfp;
MrvlIEtypes_ChanBandListParamSet_t *pchan_band_tlv = MNULL;
ChanBandParamSet_t *pchan_band;
t_u8 band;
t_u8 is_bgscan_resp;
t_u32 age_ts_usec;
t_u8 null_ssid[MLAN_MAX_SSID_LENGTH] = { 0 };
t_u32 status_code = 0;
pmlan_ioctl_req pscan_ioctl_req = MNULL;
ENTER();
pcb = (pmlan_callbacks) & pmadapter->callbacks;
is_bgscan_resp = (resp->command == HostCmd_CMD_802_11_BG_SCAN_QUERY);
if (is_bgscan_resp)
pscan_rsp = &resp->params.bg_scan_query_resp.scan_resp;
else
pscan_rsp = &resp->params.scan_resp;
if (pscan_rsp->number_of_sets > MRVDRV_MAX_BSSID_LIST) {
PRINTM(MERROR,
"SCAN_RESP: Invalid number of AP returned (%d)!!\n",
pscan_rsp->number_of_sets);
status_code = MLAN_ERROR_CMD_SCAN_FAIL;
ret = MLAN_STATUS_FAILURE;
goto done;
}
bytes_left = wlan_le16_to_cpu(pscan_rsp->bss_descript_size);
PRINTM(MINFO, "SCAN_RESP: bss_descript_size %d\n", bytes_left);
scan_resp_size = resp->size;
PRINTM(MINFO, "SCAN_RESP: returned %d APs before parsing\n",
pscan_rsp->number_of_sets);
num_in_table = pmadapter->num_in_scan_table;
pbss_info = pscan_rsp->bss_desc_and_tlv_buffer;
/*
* The size of the TLV buffer is equal to the entire command response
* size (scan_resp_size) minus the fixed fields (sizeof()'s), the
* BSS Descriptions (bss_descript_size as bytesLef) and the command
* response header (S_DS_GEN)
*/
tlv_buf_size = scan_resp_size - (bytes_left
+ sizeof(pscan_rsp->bss_descript_size)
+ sizeof(pscan_rsp->number_of_sets)
+ S_DS_GEN);
if (is_bgscan_resp)
tlv_buf_size -=
sizeof(resp->params.bg_scan_query_resp.
report_condition);
ptlv = (MrvlIEtypes_Data_t *) (pscan_rsp->bss_desc_and_tlv_buffer +
bytes_left);
/* Search the TLV buffer space in the scan response for any valid TLVs */
wlan_ret_802_11_scan_get_tlv_ptrs(pmadapter,
ptlv,
tlv_buf_size,
TLV_TYPE_TSFTIMESTAMP,
(MrvlIEtypes_Data_t **) & ptsf_tlv);
/* Search the TLV buffer space in the scan response for any valid TLVs */
wlan_ret_802_11_scan_get_tlv_ptrs(pmadapter,
ptlv,
tlv_buf_size,
TLV_TYPE_CHANNELBANDLIST,
(MrvlIEtypes_Data_t **) &
pchan_band_tlv);
wlan_ret_802_11_scan_get_tlv_ptrs(pmadapter, ptlv, tlv_buf_size,
TLV_TYPE_CHANNEL_STATS,
(MrvlIEtypes_Data_t **) &
pchanstats_tlv);
if (pchanstats_tlv)
wlan_update_chan_statistics(pmpriv, pchanstats_tlv);
/*
* Process each scan response returned (pscan_rsp->number_of_sets). Save
* the information in the bss_new_entry and then insert into the
* driver scan table either as an update to an existing entry
* or as an addition at the end of the table
*/
ret = pcb->moal_malloc(pmadapter->pmoal_handle, sizeof(BSSDescriptor_t),
MLAN_MEM_DEF, (t_u8 **) & bss_new_entry);
if (ret != MLAN_STATUS_SUCCESS || !bss_new_entry) {
PRINTM(MERROR, "Memory allocation for bss_new_entry failed!\n");
status_code = MLAN_ERROR_NO_MEM;
ret = MLAN_STATUS_FAILURE;
goto done;
}
for (idx = 0; idx < pscan_rsp->number_of_sets && bytes_left; idx++) {
/* Zero out the bss_new_entry we are about to store info in */
memset(pmadapter, bss_new_entry, 0x00, sizeof(BSSDescriptor_t));
/* Process the data fields and IEs returned for this BSS */
if (wlan_interpret_bss_desc_with_ie(pmadapter,
bss_new_entry,
&pbss_info,
&bytes_left,
MFALSE) ==
MLAN_STATUS_SUCCESS) {
PRINTM(MINFO, "SCAN_RESP: BSSID = " MACSTR "\n",
MAC2STR(bss_new_entry->mac_address));
band = BAND_G;
if (pchan_band_tlv) {
pchan_band =
&pchan_band_tlv->chan_band_param[idx];
band = radio_type_to_band(pchan_band->
radio_type & (MBIT(0)
|
MBIT
(1)));
if (!bss_new_entry->channel)
bss_new_entry->channel =
pchan_band->chan_number;
}
/* Save the band designation for this entry for use in
join */
bss_new_entry->bss_band = band;
cfp = wlan_find_cfp_by_band_and_channel(pmadapter,
(t_u8)
bss_new_entry->
bss_band,
(t_u16)
bss_new_entry->
channel);
if (cfp)
bss_new_entry->freq = cfp->freq;
else
bss_new_entry->freq = 0;
/* Skip entry if on blacklisted channel */
if (cfp && cfp->dynamic.blacklist) {
PRINTM(MINFO,
"SCAN_RESP: dropping entry on blacklist channel.\n");
continue;
}
/*
* Search the scan table for the same bssid
*/
for (bss_idx = 0; bss_idx < num_in_table; bss_idx++) {
if (!memcmp
(pmadapter, bss_new_entry->mac_address,
pmadapter->pscan_table[bss_idx].
mac_address,
sizeof(bss_new_entry->mac_address))) {
/*
* If the SSID matches as well, it is a duplicate of
* this entry. Keep the bss_idx set to this
* entry so we replace the old contents in the table
*/
if ((bss_new_entry->ssid.ssid_len ==
pmadapter->pscan_table[bss_idx].
ssid.ssid_len)
&&
(!memcmp
(pmadapter,
bss_new_entry->ssid.ssid,
pmadapter->pscan_table[bss_idx].
ssid.ssid,
bss_new_entry->ssid.ssid_len))) {
PRINTM(MINFO,
"SCAN_RESP: Duplicate of index: %d\n",
bss_idx);
break;
}
/*
* If the SSID is NULL for same BSSID keep the bss_idx set
* to this entry so we replace the old contents in the table
*/
if (!memcmp
(pmadapter,
pmadapter->pscan_table[bss_idx].
ssid.ssid, null_ssid,
pmadapter->pscan_table[bss_idx].
ssid.ssid_len)) {
PRINTM(MINFO,
"SCAN_RESP: Duplicate of index: %d\n",
bss_idx);
break;
}
}
}
/*
* If the bss_idx is equal to the number of entries in the table,
* the new entry was not a duplicate; append it to the scan
* table
*/
if (bss_idx == num_in_table) {
/* Range check the bss_idx, keep it limited to
the last entry */
if (bss_idx == MRVDRV_MAX_BSSID_LIST)
bss_idx--;
else
num_in_table++;
}
/*
* Save the beacon/probe response returned for later application
* retrieval. Duplicate beacon/probe responses are updated if
* possible
*/
wlan_ret_802_11_scan_store_beacon(pmpriv,
bss_idx,
num_in_table,
bss_new_entry);
if (bss_new_entry->pbeacon_buf == MNULL) {
PRINTM(MCMND,
"No space for beacon, drop this entry\n");
num_in_table--;
continue;
}
/*
* If the TSF TLV was appended to the scan results, save
* this entry's TSF value in the networkTSF field. The
* networkTSF is the firmware's TSF value at the time the
* beacon or probe response was received.
*/
if (ptsf_tlv) {
memcpy(pmpriv->adapter, &tsf_val,
&ptsf_tlv->tsf_data[idx * TSF_DATA_SIZE],
sizeof(tsf_val));
tsf_val = wlan_le64_to_cpu(tsf_val);
memcpy(pmpriv->adapter,
&bss_new_entry->network_tsf, &tsf_val,
sizeof(bss_new_entry->network_tsf));
}
/* Copy the locally created bss_new_entry to the scan
table */
memcpy(pmadapter, &pmadapter->pscan_table[bss_idx],
bss_new_entry,
sizeof(pmadapter->pscan_table[bss_idx]));
} else {
/* Error parsing/interpreting the scan response,
skipped */
PRINTM(MERROR,
"SCAN_RESP: wlan_interpret_bss_desc_with_ie returned error\n");
}
}
PRINTM(MINFO, "SCAN_RESP: Scanned %2d APs, %d valid, %d total\n",
pscan_rsp->number_of_sets,
num_in_table - pmadapter->num_in_scan_table, num_in_table);
/* Update the total number of BSSIDs in the scan table */
pmadapter->num_in_scan_table = num_in_table;
/* Update the age_in_second */
pmadapter->callbacks.moal_get_system_time(pmadapter->pmoal_handle,
&pmadapter->age_in_secs,
&age_ts_usec);
if (is_bgscan_resp)
goto done;
if (!util_peek_list
(pmadapter->pmoal_handle, &pmadapter->scan_pending_q,
pcb->moal_spin_lock, pcb->moal_spin_unlock)) {
/*
* Process the resulting scan table:
* - Remove any bad ssids
* - Update our current BSS information from scan data
*/
wlan_scan_process_results(pmpriv);
wlan_request_cmd_lock(pmadapter);
pmadapter->scan_processing = MFALSE;
pscan_ioctl_req = pmadapter->pscan_ioctl_req;
pmadapter->pscan_ioctl_req = MNULL;
/* Need to indicate IOCTL complete */
if (pscan_ioctl_req) {
pscan_ioctl_req->status_code = MLAN_ERROR_NO_ERROR;
/* Indicate ioctl complete */
pcb->moal_ioctl_complete(pmadapter->pmoal_handle,
(pmlan_ioctl_req)
pscan_ioctl_req,
MLAN_STATUS_SUCCESS);
}
wlan_release_cmd_lock(pmadapter);
pmadapter->bgscan_reported = MFALSE;
wlan_recv_event(pmpriv, MLAN_EVENT_ID_DRV_SCAN_REPORT, MNULL);
} else {
/* If firmware not ready, do not issue any more scan commands */
if (pmadapter->hw_status != WlanHardwareStatusReady) {
status_code = MLAN_ERROR_FW_NOT_READY;
ret = MLAN_STATUS_FAILURE;
goto done;
} else {
/* Get scan command from scan_pending_q and put to
cmd_pending_q */
pcmd_node =
(cmd_ctrl_node *) util_dequeue_list(pmadapter->
pmoal_handle,
&pmadapter->
scan_pending_q,
pcb->
moal_spin_lock,
pcb->
moal_spin_unlock);
wlan_insert_cmd_to_pending_q(pmadapter, pcmd_node,
MTRUE);
}
}
done:
if (bss_new_entry)
pcb->moal_mfree(pmadapter->pmoal_handle,
(t_u8 *) bss_new_entry);
if (ret) {
/* Flush all pending scan commands */
wlan_flush_scan_queue(pmadapter);
wlan_request_cmd_lock(pmadapter);
pmadapter->scan_processing = MFALSE;
pscan_ioctl_req = pmadapter->pscan_ioctl_req;
pmadapter->pscan_ioctl_req = MNULL;
if (pscan_ioctl_req) {
pscan_ioctl_req->status_code = status_code;
/* Indicate ioctl complete */
pcb->moal_ioctl_complete(pmadapter->pmoal_handle,
pscan_ioctl_req,
MLAN_STATUS_FAILURE);
}
wlan_release_cmd_lock(pmadapter);
}
LEAVE();
return ret;
}
/**
* @brief Prepare an extended scan command to be sent to the firmware
*
* Use the wlan_scan_cmd_config sent to the command processing module in
* the wlan_prepare_cmd to configure a HostCmd_DS_802_11_SCAN_EXT command
* struct to send to firmware.
*
* @param pmpriv A pointer to mlan_private structure
* @param pcmd A pointer to HostCmd_DS_COMMAND structure to be sent to
* firmware with the HostCmd_DS_802_11_SCAN_EXT structure
* @param pdata_buf Void pointer cast of a wlan_scan_cmd_config struct used
* to set the fields/TLVs for the command sent to firmware
*
* @return MLAN_STATUS_SUCCESS
*/
mlan_status
wlan_cmd_802_11_scan_ext(IN mlan_private * pmpriv,
IN HostCmd_DS_COMMAND * pcmd, IN t_void * pdata_buf)
{
HostCmd_DS_802_11_SCAN_EXT *pext_scan_cmd = &pcmd->params.ext_scan;
wlan_scan_cmd_config *pscan_cfg = MNULL;
ENTER();
pscan_cfg = (wlan_scan_cmd_config *) pdata_buf;
memcpy(pmpriv->adapter, pext_scan_cmd->tlv_buffer,
pscan_cfg->tlv_buf, pscan_cfg->tlv_buf_len);
pcmd->command = wlan_cpu_to_le16(HostCmd_CMD_802_11_SCAN_EXT);
/* Size is equal to the sizeof(fixed portions) + the TLV len + header */
pcmd->size = wlan_cpu_to_le16((t_u16) (sizeof(pext_scan_cmd->reserved)
+ pscan_cfg->tlv_buf_len
+ S_DS_GEN));
LEAVE();
return MLAN_STATUS_SUCCESS;
}
/**
* @brief This function handles the command response of extended scan
*
* @param pmpriv A pointer to mlan_private structure
* @param resp A pointer to HostCmd_DS_COMMAND
* @param pioctl_buf A pointer to mlan_ioctl_req structure
*
* @return MLAN_STATUS_SUCCESS or MLAN_STATUS_FAILURE
*/
mlan_status
wlan_ret_802_11_scan_ext(IN mlan_private * pmpriv,
IN HostCmd_DS_COMMAND * resp, IN t_void * pioctl_buf)
{
HostCmd_DS_802_11_SCAN_EXT *pext_scan_cmd = &(resp->params.ext_scan);
MrvlIEtypesHeader_t *tlv = MNULL;
MrvlIEtypes_ChannelStats_t *tlv_chanstats = MNULL;
t_u16 tlv_buf_left = 0;
t_u16 tlv_type = 0;
t_u16 tlv_len = 0;
ENTER();
PRINTM(MINFO, "EXT scan returns successfully\n");
tlv = (MrvlIEtypesHeader_t *) pext_scan_cmd->tlv_buffer;
tlv_buf_left =
resp->size - (sizeof(HostCmd_DS_802_11_SCAN_EXT) - 1 +
S_DS_GEN);
while (tlv_buf_left >= sizeof(MrvlIEtypesHeader_t)) {
tlv_type = wlan_le16_to_cpu(tlv->type);
tlv_len = wlan_le16_to_cpu(tlv->len);
if (tlv_buf_left < (tlv_len + sizeof(MrvlIEtypesHeader_t))) {
PRINTM(MERROR,
"Error processing uAP sys config TLVs, bytes left < TLV length\n");
break;
}
switch (tlv_type) {
case TLV_TYPE_CHANNEL_STATS:
tlv_chanstats = (MrvlIEtypes_ChannelStats_t *) tlv;
wlan_update_chan_statistics(pmpriv, tlv_chanstats);
break;
default:
break;
}
tlv_buf_left -= tlv_len + sizeof(MrvlIEtypesHeader_t);
tlv = (MrvlIEtypesHeader_t *) ((t_u8 *) tlv + tlv_len +
sizeof(MrvlIEtypesHeader_t));
}
LEAVE();
return MLAN_STATUS_SUCCESS;
}
/**
* @brief This function parse and store the extended scan results
*
* @param pmpriv A pointer to mlan_private structure
* @param number_of_sets Number of BSS
* @param pscan_resp A pointer to scan response buffer
* @param scan_resp_size Size of scan response buffer
*
* @return MLAN_STATUS_SUCCESS or MLAN_STATUS_FAILURE
*/
static mlan_status
wlan_parse_ext_scan_result(IN mlan_private * pmpriv,
IN t_u8 number_of_sets,
IN t_u8 * pscan_resp, IN t_u16 scan_resp_size)
{
mlan_status ret = MLAN_STATUS_SUCCESS;
mlan_adapter *pmadapter = pmpriv->adapter;
mlan_callbacks *pcb = MNULL;
BSSDescriptor_t *bss_new_entry = MNULL;
t_u8 *pbss_info;
t_u32 bytes_left;
t_u32 bytes_left_for_tlv;
t_u32 num_in_table;
t_u32 bss_idx;
t_u32 idx;
t_u64 tsf_val;
chan_freq_power_t *cfp;
t_u16 tlv_type, tlv_len;
MrvlIEtypes_Data_t *ptlv = MNULL;
MrvlIEtypes_Bss_Scan_Rsp_t *pscan_rsp_tlv = MNULL;
MrvlIEtypes_Bss_Scan_Info_t *pscan_info_tlv = MNULL;
t_u8 band;
t_u32 age_ts_usec;
t_u8 null_ssid[MLAN_MAX_SSID_LENGTH] = { 0 };
ENTER();
pcb = (pmlan_callbacks) & pmadapter->callbacks;
if (number_of_sets > MRVDRV_MAX_BSSID_LIST) {
PRINTM(MERROR,
"EXT_SCAN: Invalid number of AP returned (%d)!!\n",
number_of_sets);
ret = MLAN_STATUS_FAILURE;
goto done;
}
bytes_left = scan_resp_size;
PRINTM(MINFO, "EXT_SCAN: bss_descript_size %d\n", scan_resp_size);
PRINTM(MINFO, "EXT_SCAN: returned %d APs before parsing\n",
number_of_sets);
num_in_table = pmadapter->num_in_scan_table;
ptlv = (MrvlIEtypes_Data_t *) pscan_resp;
/*
* Process each scan response returned number_of_sets. Save
* the information in the bss_new_entry and then insert into the
* driver scan table either as an update to an existing entry
* or as an addition at the end of the table
*/
ret = pcb->moal_malloc(pmadapter->pmoal_handle, sizeof(BSSDescriptor_t),
MLAN_MEM_DEF, (t_u8 **) & bss_new_entry);
if (ret != MLAN_STATUS_SUCCESS || !bss_new_entry) {
PRINTM(MERROR, "Memory allocation for bss_new_entry failed!\n");
ret = MLAN_STATUS_FAILURE;
goto done;
}
for (idx = 0; idx < number_of_sets && bytes_left >
sizeof(MrvlIEtypesHeader_t); idx++) {
tlv_type = wlan_le16_to_cpu(ptlv->header.type);
tlv_len = wlan_le16_to_cpu(ptlv->header.len);
if (bytes_left < sizeof(MrvlIEtypesHeader_t) + tlv_len) {
PRINTM(MERROR,
"EXT_SCAN: Error bytes left < TLV length\n");
break;
}
pscan_rsp_tlv = MNULL;
pscan_info_tlv = MNULL;
bytes_left_for_tlv = bytes_left;
/* BSS response TLV with beacon or probe response buffer at the
initial position of each descriptor */
if (tlv_type == TLV_TYPE_BSS_SCAN_RSP) {
pbss_info = (t_u8 *) ptlv;
pscan_rsp_tlv = (MrvlIEtypes_Bss_Scan_Rsp_t *) ptlv;
ptlv = (MrvlIEtypes_Data_t *) (ptlv->data + tlv_len);
bytes_left_for_tlv -=
(tlv_len + sizeof(MrvlIEtypesHeader_t));
} else
break;
/* Process variable TLV */
while (bytes_left_for_tlv >= sizeof(MrvlIEtypesHeader_t) &&
wlan_le16_to_cpu(ptlv->header.type) !=
TLV_TYPE_BSS_SCAN_RSP) {
tlv_type = wlan_le16_to_cpu(ptlv->header.type);
tlv_len = wlan_le16_to_cpu(ptlv->header.len);
if (bytes_left_for_tlv <
sizeof(MrvlIEtypesHeader_t) + tlv_len) {
PRINTM(MERROR,
"EXT_SCAN: Error in processing TLV, "
"bytes left < TLV length\n");
pscan_rsp_tlv = MNULL;
bytes_left_for_tlv = 0;
continue;
}
switch (tlv_type) {
case TLV_TYPE_BSS_SCAN_INFO:
pscan_info_tlv =
(MrvlIEtypes_Bss_Scan_Info_t *) ptlv;
if (tlv_len !=
sizeof(MrvlIEtypes_Bss_Scan_Info_t) -
sizeof(MrvlIEtypesHeader_t)) {
bytes_left_for_tlv = 0;
continue;
}
break;
default:
break;
}
ptlv = (MrvlIEtypes_Data_t *) (ptlv->data + tlv_len);
bytes_left -= (tlv_len + sizeof(MrvlIEtypesHeader_t));
bytes_left_for_tlv -=
(tlv_len + sizeof(MrvlIEtypesHeader_t));
}
/* No BSS response TLV */
if (pscan_rsp_tlv == MNULL)
break;
/* Advance pointer to the beacon buffer length and update the
bytes count so that the function
wlan_interpret_bss_desc_with_ie() can handle the scan buffer
withut any change */
pbss_info += sizeof(t_u16);
bytes_left -= sizeof(t_u16);
/* Zero out the bss_new_entry we are about to store info in */
memset(pmadapter, bss_new_entry, 0x00, sizeof(BSSDescriptor_t));
/* Process the data fields and IEs returned for this BSS */
if (wlan_interpret_bss_desc_with_ie(pmadapter,
bss_new_entry,
&pbss_info,
&bytes_left,
MTRUE) ==
MLAN_STATUS_SUCCESS) {
PRINTM(MINFO, "EXT_SCAN: BSSID = " MACSTR "\n",
MAC2STR(bss_new_entry->mac_address));
band = BAND_G;
/*
* If the BSS info TLV was appended to the scan results, save
* this entry's TSF value in the networkTSF field. The
* networkTSF is the firmware's TSF value at the time the
* beacon or probe response was received.
*/
if (pscan_info_tlv) {
/* RSSI is 2 byte long */
bss_new_entry->rssi =
-(t_s32) (wlan_le16_to_cpu
(pscan_info_tlv->rssi));
PRINTM(MINFO, "EXT_SCAN: RSSI=%d\n",
bss_new_entry->rssi);
memcpy(pmpriv->adapter, &tsf_val,
&pscan_info_tlv->tsf, sizeof(tsf_val));
tsf_val = wlan_le64_to_cpu(tsf_val);
memcpy(pmpriv->adapter,
&bss_new_entry->network_tsf, &tsf_val,
sizeof(bss_new_entry->network_tsf));
band = radio_type_to_band(pscan_info_tlv->band);
}
/* Save the band designation for this entry for use in
join */
bss_new_entry->bss_band = band;
cfp = wlan_find_cfp_by_band_and_channel(pmadapter,
(t_u8)
bss_new_entry->
bss_band,
(t_u16)
bss_new_entry->
channel);
if (cfp)
bss_new_entry->freq = cfp->freq;
else
bss_new_entry->freq = 0;
/* Skip entry if on blacklisted channel */
if (cfp && cfp->dynamic.blacklist) {
PRINTM(MINFO,
"EXT_SCAN: dropping entry on blacklist channel.\n");
continue;
}
/*
* Search the scan table for the same bssid
*/
for (bss_idx = 0; bss_idx < num_in_table; bss_idx++) {
if (!memcmp
(pmadapter, bss_new_entry->mac_address,
pmadapter->pscan_table[bss_idx].
mac_address,
sizeof(bss_new_entry->mac_address))) {
/*
* If the SSID matches as well, it is a duplicate of
* this entry. Keep the bss_idx set to this
* entry so we replace the old contents in the table
*/
if ((bss_new_entry->ssid.ssid_len ==
pmadapter->pscan_table[bss_idx].
ssid.ssid_len)
&&
(!memcmp
(pmadapter,
bss_new_entry->ssid.ssid,
pmadapter->pscan_table[bss_idx].
ssid.ssid,
bss_new_entry->ssid.ssid_len))) {
PRINTM(MINFO,
"EXT_SCAN: Duplicate of index: %d\n",
bss_idx);
break;
}
/*
* If the SSID is NULL for same BSSID keep the bss_idx set
* to this entry so we replace the old contents in the table
*/
if (!memcmp
(pmadapter,
pmadapter->pscan_table[bss_idx].
ssid.ssid, null_ssid,
pmadapter->pscan_table[bss_idx].
ssid.ssid_len)) {
PRINTM(MINFO,
"EXT_SCAN: Duplicate of index: %d\n",
bss_idx);
break;
}
}
}
/*
* If the bss_idx is equal to the number of entries in the table,
* the new entry was not a duplicate; append it to the scan
* table
*/
if (bss_idx == num_in_table) {
/* Range check the bss_idx, keep it limited to
the last entry */
if (bss_idx == MRVDRV_MAX_BSSID_LIST)
bss_idx--;
else
num_in_table++;
}
/*
* Save the beacon/probe response returned for later application
* retrieval. Duplicate beacon/probe responses are updated if
* possible
*/
wlan_ret_802_11_scan_store_beacon(pmpriv,
bss_idx,
num_in_table,
bss_new_entry);
if (bss_new_entry->pbeacon_buf == MNULL) {
PRINTM(MCMND,
"No space for beacon, drop this entry\n");
num_in_table--;
continue;
}
/* Copy the locally created bss_new_entry to the scan
table */
memcpy(pmadapter, &pmadapter->pscan_table[bss_idx],
bss_new_entry,
sizeof(pmadapter->pscan_table[bss_idx]));
} else {
/* Error parsing/interpreting the scan response,
skipped */
PRINTM(MERROR,
"EXT_SCAN: wlan_interpret_bss_desc_with_ie returned error\n");
}
}
PRINTM(MINFO, "EXT_SCAN: Scanned %2d APs, %d valid, %d total\n",
number_of_sets, num_in_table - pmadapter->num_in_scan_table,
num_in_table);
/* Update the total number of BSSIDs in the scan table */
pmadapter->num_in_scan_table = num_in_table;
/* Update the age_in_second */
pmadapter->callbacks.moal_get_system_time(pmadapter->pmoal_handle,
&pmadapter->age_in_secs,
&age_ts_usec);
done:
if (bss_new_entry)
pcb->moal_mfree(pmadapter->pmoal_handle,
(t_u8 *) bss_new_entry);
LEAVE();
return ret;
}
/**
* @brief This function handles the event extended scan report
*
* @param pmpriv A pointer to mlan_private structure
* @param pmbuf A pointer to mlan_buffer
*
* @return MLAN_STATUS_SUCCESS or MLAN_STATUS_FAILURE
*/
mlan_status
wlan_handle_event_ext_scan_report(IN mlan_private * pmpriv,
IN mlan_buffer * pmbuf)
{
mlan_adapter *pmadapter = pmpriv->adapter;
mlan_callbacks *pcb = &pmadapter->callbacks;
mlan_ioctl_req *pioctl_req = MNULL;
cmd_ctrl_node *pcmd_node = MNULL;
mlan_status ret = MLAN_STATUS_SUCCESS;
mlan_event_scan_result *pevent_scan = (pmlan_event_scan_result)
(pmbuf->pbuf + pmbuf->data_offset);
t_u8 *ptlv = (pmbuf->pbuf + pmbuf->data_offset
+ sizeof(mlan_event_scan_result));
t_u16 tlv_buf_left = wlan_cpu_to_le16(pevent_scan->buf_size);
DBG_HEXDUMP(MCMD_D, "EVENT EXT_SCAN", pmbuf->pbuf +
pmbuf->data_offset, pmbuf->data_len);
wlan_parse_ext_scan_result(pmpriv, pevent_scan->num_of_set,
ptlv, tlv_buf_left);
if (!pevent_scan->more_event) {
if (!util_peek_list(pmadapter->pmoal_handle,
&pmadapter->scan_pending_q,
pcb->moal_spin_lock,
pcb->moal_spin_unlock)) {
/*
* Process the resulting scan table:
* - Remove any bad ssids
* - Update our current BSS information from scan data
*/
wlan_scan_process_results(pmpriv);
wlan_request_cmd_lock(pmadapter);
pmadapter->scan_processing = MFALSE;
pioctl_req = pmadapter->pscan_ioctl_req;
pmadapter->pscan_ioctl_req = MNULL;
/* Need to indicate IOCTL complete */
if (pioctl_req != MNULL) {
pioctl_req->status_code = MLAN_ERROR_NO_ERROR;
/* Indicate ioctl complete */
pcb->moal_ioctl_complete(pmadapter->
pmoal_handle,
(pmlan_ioctl_req)
pioctl_req,
MLAN_STATUS_SUCCESS);
}
wlan_release_cmd_lock(pmadapter);
pmadapter->bgscan_reported = MFALSE;
wlan_recv_event(pmpriv, MLAN_EVENT_ID_DRV_SCAN_REPORT,
MNULL);
} else {
/* If firmware not ready, do not issue any more scan
commands */
if (pmadapter->hw_status != WlanHardwareStatusReady) {
/* Flush all pending scan commands */
wlan_flush_scan_queue(pmadapter);
wlan_request_cmd_lock(pmadapter);
pmadapter->scan_processing = MFALSE;
pioctl_req = pmadapter->pscan_ioctl_req;
pmadapter->pscan_ioctl_req = MNULL;
/* Indicate IOCTL complete */
if (pioctl_req != MNULL) {
pioctl_req->status_code =
MLAN_ERROR_FW_NOT_READY;
/* Indicate ioctl complete */
pcb->moal_ioctl_complete(pmadapter->
pmoal_handle,
(pmlan_ioctl_req)
pioctl_req,
MLAN_STATUS_FAILURE);
}
wlan_release_cmd_lock(pmadapter);
} else {
/* Get scan command from scan_pending_q and put
to cmd_pending_q */
pcmd_node =
(cmd_ctrl_node *)
util_dequeue_list(pmadapter->
pmoal_handle,
&pmadapter->
scan_pending_q,
pcb->moal_spin_lock,
pcb->
moal_spin_unlock);
wlan_insert_cmd_to_pending_q(pmadapter,
pcmd_node, MTRUE);
}
}
}
LEAVE();
return ret;
}
/**
* @brief This function prepares command of bg_scan_query.
*
* @param pmpriv A pointer to mlan_private structure
* @param pcmd A pointer to HostCmd_DS_COMMAND structure
* @param pdata_buf Void pointer cast of a wlan_scan_cmd_config struct used
* to set the fields/TLVs for the command sent to firmware
*
* @return MLAN_STATUS_SUCCESS
*/
mlan_status
wlan_cmd_802_11_bg_scan_query(IN mlan_private * pmpriv,
IN HostCmd_DS_COMMAND * pcmd,
IN t_void * pdata_buf)
{
HostCmd_DS_802_11_BG_SCAN_QUERY *bg_query = &pcmd->params.bg_scan_query;
ENTER();
pcmd->command = wlan_cpu_to_le16(HostCmd_CMD_802_11_BG_SCAN_QUERY);
pcmd->size =
wlan_cpu_to_le16(sizeof(HostCmd_DS_802_11_BG_SCAN_QUERY) +
S_DS_GEN);
bg_query->flush = MTRUE;
LEAVE();
return MLAN_STATUS_SUCCESS;
}
/**
* @brief Create a channel list for the driver to scan based on region info
*
* Use the driver region/band information to construct a comprehensive list
* of channels to scan. This routine is used for any scan that is not
* provided a specific channel list to scan.
*
* @param pmpriv A pointer to mlan_private structure
* @param pbg_scan_in pointer to scan configuration parameters
* @param tlv_chan_list A pointer to structure MrvlIEtypes_ChanListParamSet_t
*
* @return channel number
*/
static t_u8
wlan_bgscan_create_channel_list(IN mlan_private * pmpriv,
IN const wlan_bgscan_cfg * pbg_scan_in,
MrvlIEtypes_ChanListParamSet_t * tlv_chan_list)
{
mlan_adapter *pmadapter = pmpriv->adapter;
region_chan_t *pscan_region;
chan_freq_power_t *cfp;
t_u32 region_idx;
t_u32 chan_idx = 0;
t_u32 next_chan;
t_u8 scan_type;
t_u8 radio_type;
ENTER();
for (region_idx = 0;
region_idx < NELEMENTS(pmadapter->region_channel); region_idx++) {
if (wlan_11d_is_enabled(pmpriv) &&
pmpriv->media_connected != MTRUE) {
/* Scan all the supported chan for the first scan */
if (!pmadapter->universal_channel[region_idx].valid)
continue;
pscan_region =
&pmadapter->universal_channel[region_idx];
} else {
if (!pmadapter->region_channel[region_idx].valid)
continue;
pscan_region = &pmadapter->region_channel[region_idx];
}
if (pbg_scan_in && !pbg_scan_in->chan_list[0].chan_number &&
pbg_scan_in->chan_list[0].radio_type & BAND_SPECIFIED) {
radio_type =
pbg_scan_in->chan_list[0].
radio_type & ~BAND_SPECIFIED;
if (!radio_type && (pscan_region->band != BAND_B) &&
(pscan_region->band != BAND_G))
continue;
if (radio_type && (pscan_region->band != BAND_A))
continue;
}
if (!wlan_is_band_compatible
(pmpriv->config_bands | pmadapter->adhoc_start_band,
pscan_region->band))
continue;
for (next_chan = 0;
next_chan < pscan_region->num_cfp;
next_chan++, chan_idx++) {
if (chan_idx >= WLAN_BG_SCAN_CHAN_MAX)
break;
/* Set the default scan type to ACTIVE SCAN type, will
later be changed to passive on a per channel basis
if restricted by regulatory requirements (11d or
11h) */
scan_type = MLAN_SCAN_TYPE_ACTIVE;
cfp = pscan_region->pcfp + next_chan;
if (scan_type == MLAN_SCAN_TYPE_ACTIVE
&& wlan_11d_is_enabled(pmpriv)) {
scan_type = wlan_11d_get_scan_type(pmadapter,
pscan_region->
band,
(t_u8) cfp->
channel,
&pmadapter->
parsed_region_chan);
}
switch (pscan_region->band) {
case BAND_A:
tlv_chan_list->chan_scan_param[chan_idx].
radio_type = HostCmd_SCAN_RADIO_TYPE_A;
if (!wlan_11d_is_enabled(pmpriv)) {
/* 11D not available... play it safe on
DFS channels */
if (wlan_11h_radar_detect_required
(pmpriv, (t_u8) cfp->channel))
scan_type =
MLAN_SCAN_TYPE_PASSIVE;
}
break;
case BAND_B:
case BAND_G:
if (!wlan_11d_is_enabled(pmpriv))
if (wlan_bg_scan_type_is_passive
(pmpriv, (t_u8) cfp->channel))
scan_type =
MLAN_SCAN_TYPE_PASSIVE;
tlv_chan_list->chan_scan_param[chan_idx].
radio_type = HostCmd_SCAN_RADIO_TYPE_BG;
break;
default:
tlv_chan_list->chan_scan_param[chan_idx].
radio_type = HostCmd_SCAN_RADIO_TYPE_BG;
break;
}
if (pbg_scan_in && pbg_scan_in->chan_list[0].scan_time) {
tlv_chan_list->chan_scan_param[chan_idx].
max_scan_time =
wlan_cpu_to_le16((t_u16) pbg_scan_in->
chan_list[0].
scan_time);
tlv_chan_list->chan_scan_param[chan_idx].
min_scan_time =
wlan_cpu_to_le16((t_u16) pbg_scan_in->
chan_list[0].
scan_time);
} else if (scan_type == MLAN_SCAN_TYPE_PASSIVE) {
tlv_chan_list->chan_scan_param[chan_idx].
max_scan_time =
wlan_cpu_to_le16(pmadapter->
passive_scan_time);
tlv_chan_list->chan_scan_param[chan_idx].
min_scan_time =
wlan_cpu_to_le16(pmadapter->
passive_scan_time);
} else {
tlv_chan_list->chan_scan_param[chan_idx].
max_scan_time =
wlan_cpu_to_le16(pmadapter->
specific_scan_time);
tlv_chan_list->chan_scan_param[chan_idx].
min_scan_time =
wlan_cpu_to_le16(pmadapter->
specific_scan_time);
}
if (scan_type == MLAN_SCAN_TYPE_PASSIVE) {
tlv_chan_list->chan_scan_param[chan_idx].
chan_scan_mode.passive_scan = MTRUE;
} else {
tlv_chan_list->chan_scan_param[chan_idx].
chan_scan_mode.passive_scan = MFALSE;
}
tlv_chan_list->chan_scan_param[chan_idx].chan_number =
(t_u8) cfp->channel;
tlv_chan_list->chan_scan_param[chan_idx].chan_scan_mode.
disable_chan_filt = MTRUE;
}
}
LEAVE();
return chan_idx;
}
/**
* @brief This function prepares command of bg_scan_config
*
* @param pmpriv A pointer to mlan_private structure
* @param pcmd A pointer to HostCmd_DS_COMMAND structure
* @param pdata_buf Void pointer cast of a wlan_scan_cmd_config struct used
* to set the fields/TLVs for the command sent to firmware
*
* @return MLAN_STATUS_SUCCESS
*/
mlan_status
wlan_cmd_bgscan_config(IN mlan_private * pmpriv,
IN HostCmd_DS_COMMAND * pcmd, IN t_void * pdata_buf)
{
mlan_adapter *pmadapter = pmpriv->adapter;
HostCmd_DS_802_11_BG_SCAN_CONFIG *bg_scan =
&pcmd->params.bg_scan_config;
wlan_bgscan_cfg *bg_scan_in = (wlan_bgscan_cfg *) pdata_buf;
t_u16 cmd_size = 0;
MrvlIEtypes_NumProbes_t *pnum_probes_tlv = MNULL;
MrvlIEtypes_BeaconLowRssiThreshold_t *rssi_tlv = MNULL;
MrvlIEtypes_BeaconLowSnrThreshold_t *snr_tlv = MNULL;
MrvlIEtypes_WildCardSsIdParamSet_t *pwildcard_ssid_tlv = MNULL;
MrvlIEtypes_ChanListParamSet_t *tlv_chan_list = MNULL;
MrvlIEtypes_StartLater_t *tlv_start_later = MNULL;
MrvlIEtypes_RepeatCount_t *tlv_repeat = MNULL;
t_u8 *tlv = MNULL;
t_u16 num_probes = 0;
t_u32 ssid_idx;
t_u32 ssid_len = 0;
t_u32 chan_idx;
t_u32 chan_num;
t_u8 radio_type;
t_u16 scan_dur;
t_u8 scan_type;
ENTER();
pcmd->command = wlan_cpu_to_le16(HostCmd_CMD_802_11_BG_SCAN_CONFIG);
bg_scan->action = wlan_cpu_to_le16(bg_scan_in->action);
bg_scan->enable = bg_scan_in->enable;
bg_scan->bss_type = bg_scan_in->bss_type;
cmd_size = sizeof(HostCmd_DS_802_11_BG_SCAN_CONFIG) + S_DS_GEN;
if (bg_scan_in->chan_per_scan)
bg_scan->chan_per_scan = bg_scan_in->chan_per_scan;
else
bg_scan->chan_per_scan = MRVDRV_MAX_CHANNELS_PER_SPECIFIC_SCAN;
if (bg_scan_in->scan_interval)
bg_scan->scan_interval =
wlan_cpu_to_le32(bg_scan_in->scan_interval);
else
bg_scan->scan_interval =
wlan_cpu_to_le32(DEFAULT_BGSCAN_INTERVAL);
bg_scan->report_condition =
wlan_cpu_to_le32(bg_scan_in->report_condition);
if ((bg_scan_in->action == BG_SCAN_ACT_GET) ||
(bg_scan_in->action == BG_SCAN_ACT_GET_PPS_UAPSD) ||
(!bg_scan->enable))
goto done;
tlv = (t_u8 *) bg_scan + sizeof(HostCmd_DS_802_11_BG_SCAN_CONFIG);
num_probes = (bg_scan_in->num_probes ? bg_scan_in->num_probes :
pmadapter->scan_probes);
if (num_probes) {
pnum_probes_tlv = (MrvlIEtypes_NumProbes_t *) tlv;
pnum_probes_tlv->header.type =
wlan_cpu_to_le16(TLV_TYPE_NUMPROBES);
pnum_probes_tlv->header.len =
wlan_cpu_to_le16(sizeof(pnum_probes_tlv->num_probes));
pnum_probes_tlv->num_probes =
wlan_cpu_to_le16((t_u16) num_probes);
tlv += sizeof(MrvlIEtypes_NumProbes_t);
cmd_size += sizeof(MrvlIEtypes_NumProbes_t);
}
if (bg_scan_in->rssi_threshold) {
rssi_tlv = (MrvlIEtypes_BeaconLowRssiThreshold_t *) tlv;
rssi_tlv->header.type = wlan_cpu_to_le16(TLV_TYPE_RSSI_LOW);
rssi_tlv->header.len =
wlan_cpu_to_le16(sizeof
(MrvlIEtypes_BeaconLowRssiThreshold_t)
- sizeof(MrvlIEtypesHeader_t));
rssi_tlv->value = bg_scan_in->rssi_threshold;
rssi_tlv->frequency = 0;
tlv += sizeof(MrvlIEtypes_BeaconLowRssiThreshold_t);
cmd_size += sizeof(MrvlIEtypes_BeaconLowRssiThreshold_t);
}
if (bg_scan_in->snr_threshold) {
snr_tlv = (MrvlIEtypes_BeaconLowSnrThreshold_t *) tlv;
snr_tlv->header.type = wlan_cpu_to_le16(TLV_TYPE_SNR_LOW);
snr_tlv->header.len =
wlan_cpu_to_le16(sizeof
(MrvlIEtypes_BeaconLowSnrThreshold_t) -
sizeof(MrvlIEtypesHeader_t));
snr_tlv->value = bg_scan_in->snr_threshold;
snr_tlv->frequency = 0;
tlv += sizeof(MrvlIEtypes_BeaconLowRssiThreshold_t);
cmd_size += sizeof(MrvlIEtypes_BeaconLowRssiThreshold_t);
}
if (bg_scan_in->repeat_count) {
tlv_repeat = (MrvlIEtypes_RepeatCount_t *) tlv;
tlv_repeat->header.type =
wlan_cpu_to_le16(TLV_TYPE_REPEAT_COUNT);
tlv_repeat->header.len =
wlan_cpu_to_le16(sizeof(MrvlIEtypes_RepeatCount_t) -
sizeof(MrvlIEtypesHeader_t));
tlv_repeat->repeat_count =
wlan_cpu_to_le16(bg_scan_in->repeat_count);
tlv += sizeof(MrvlIEtypes_RepeatCount_t);
cmd_size += sizeof(MrvlIEtypes_RepeatCount_t);
}
for (ssid_idx = 0; ((ssid_idx < NELEMENTS(bg_scan_in->ssid_list))
&& (*bg_scan_in->ssid_list[ssid_idx].ssid ||
bg_scan_in->ssid_list[ssid_idx].max_len));
ssid_idx++) {
ssid_len =
wlan_strlen((char *)bg_scan_in->ssid_list[ssid_idx].
ssid);
pwildcard_ssid_tlv = (MrvlIEtypes_WildCardSsIdParamSet_t *) tlv;
pwildcard_ssid_tlv->header.type =
wlan_cpu_to_le16(TLV_TYPE_WILDCARDSSID);
pwildcard_ssid_tlv->header.len =
(t_u16) (ssid_len +
sizeof(pwildcard_ssid_tlv->max_ssid_length));
pwildcard_ssid_tlv->max_ssid_length =
bg_scan_in->ssid_list[ssid_idx].max_len;
memcpy(pmadapter, pwildcard_ssid_tlv->ssid,
bg_scan_in->ssid_list[ssid_idx].ssid,
MIN(MLAN_MAX_SSID_LENGTH, ssid_len));
tlv += sizeof(pwildcard_ssid_tlv->header) +
pwildcard_ssid_tlv->header.len;
cmd_size +=
sizeof(pwildcard_ssid_tlv->header) +
pwildcard_ssid_tlv->header.len;
pwildcard_ssid_tlv->header.len =
wlan_cpu_to_le16(pwildcard_ssid_tlv->header.len);
PRINTM(MINFO, "Scan: ssid_list[%d]: %s, %d\n", ssid_idx,
pwildcard_ssid_tlv->ssid,
pwildcard_ssid_tlv->max_ssid_length);
}
if (bg_scan_in->chan_list[0].chan_number) {
tlv_chan_list = (MrvlIEtypes_ChanListParamSet_t *) tlv;
PRINTM(MINFO, "Scan: Using supplied channel list\n");
chan_num = 0;
for (chan_idx = 0; chan_idx < WLAN_BG_SCAN_CHAN_MAX
&& bg_scan_in->chan_list[chan_idx].chan_number;
chan_idx++) {
radio_type = bg_scan_in->chan_list[chan_idx].radio_type;
if (!wlan_is_band_compatible
(pmpriv->config_bands | pmadapter->adhoc_start_band,
radio_type_to_band(radio_type)))
continue;
scan_type = bg_scan_in->chan_list[chan_idx].scan_type;
/* Prevent active scanning on a radar controlled
channel */
if (radio_type == HostCmd_SCAN_RADIO_TYPE_A) {
if (wlan_11h_radar_detect_required
(pmpriv,
bg_scan_in->chan_list[chan_idx].
chan_number)) {
scan_type = MLAN_SCAN_TYPE_PASSIVE;
}
}
if (radio_type == HostCmd_SCAN_RADIO_TYPE_BG) {
if (wlan_bg_scan_type_is_passive
(pmpriv,
bg_scan_in->chan_list[chan_idx].
chan_number)) {
scan_type = MLAN_SCAN_TYPE_PASSIVE;
}
}
tlv_chan_list->chan_scan_param[chan_num].chan_number =
bg_scan_in->chan_list[chan_idx].chan_number;
tlv_chan_list->chan_scan_param[chan_num].radio_type =
bg_scan_in->chan_list[chan_idx].radio_type;
if (scan_type == MLAN_SCAN_TYPE_PASSIVE) {
tlv_chan_list->chan_scan_param[chan_num].
chan_scan_mode.passive_scan = MTRUE;
} else {
tlv_chan_list->chan_scan_param[chan_num].
chan_scan_mode.passive_scan = MFALSE;
}
if (bg_scan_in->chan_list[chan_idx].scan_time) {
scan_dur =
(t_u16) bg_scan_in->chan_list[chan_idx].
scan_time;
} else {
if (scan_type == MLAN_SCAN_TYPE_PASSIVE) {
scan_dur = pmadapter->passive_scan_time;
} else {
scan_dur =
pmadapter->specific_scan_time;
}
}
tlv_chan_list->chan_scan_param[chan_num].min_scan_time =
wlan_cpu_to_le16(scan_dur);
tlv_chan_list->chan_scan_param[chan_num].max_scan_time =
wlan_cpu_to_le16(scan_dur);
chan_num++;
}
tlv_chan_list->header.type =
wlan_cpu_to_le16(TLV_TYPE_CHANLIST);
tlv_chan_list->header.len =
wlan_cpu_to_le16(sizeof(ChanScanParamSet_t) * chan_num);
tlv += sizeof(MrvlIEtypesHeader_t) +
sizeof(ChanScanParamSet_t) * chan_num;
cmd_size +=
sizeof(MrvlIEtypesHeader_t) +
sizeof(ChanScanParamSet_t) * chan_num;
} else {
tlv_chan_list = (MrvlIEtypes_ChanListParamSet_t *) tlv;
chan_num =
wlan_bgscan_create_channel_list(pmpriv, bg_scan_in,
tlv_chan_list);
tlv_chan_list->header.type =
wlan_cpu_to_le16(TLV_TYPE_CHANLIST);
tlv_chan_list->header.len =
wlan_cpu_to_le16(sizeof(ChanScanParamSet_t) * chan_num);
tlv += sizeof(MrvlIEtypesHeader_t) +
sizeof(ChanScanParamSet_t) * chan_num;
cmd_size +=
sizeof(MrvlIEtypesHeader_t) +
sizeof(ChanScanParamSet_t) * chan_num;
}
tlv_start_later = (MrvlIEtypes_StartLater_t *) tlv;
tlv_start_later->header.type =
wlan_cpu_to_le16(TLV_TYPE_STARTBGSCANLATER);
tlv_start_later->header.len =
wlan_cpu_to_le16(sizeof(MrvlIEtypes_StartLater_t) -
sizeof(MrvlIEtypesHeader_t));
tlv_start_later->value = wlan_cpu_to_le16(bg_scan_in->start_later);
tlv += sizeof(MrvlIEtypes_StartLater_t);
cmd_size += sizeof(MrvlIEtypes_StartLater_t);
done:
pcmd->size = wlan_cpu_to_le16(cmd_size);
LEAVE();
return MLAN_STATUS_SUCCESS;
}
/**
* @brief This function handles the command response of extended scan
*
* @param pmpriv A pointer to mlan_private structure
* @param resp A pointer to HostCmd_DS_COMMAND
* @param pioctl_buf A pointer to mlan_ioctl_req structure
*
* @return MLAN_STATUS_SUCCESS or MLAN_STATUS_FAILURE
*/
mlan_status
wlan_ret_bgscan_config(IN mlan_private * pmpriv,
IN HostCmd_DS_COMMAND * resp,
IN mlan_ioctl_req * pioctl_buf)
{
mlan_ds_scan *pscan = MNULL;
HostCmd_DS_802_11_BG_SCAN_CONFIG *bg_scan =
&resp->params.bg_scan_config;
wlan_bgscan_cfg *bg_scan_out = MNULL;
ENTER();
if (pioctl_buf) {
pscan = (mlan_ds_scan *) pioctl_buf->pbuf;
bg_scan_out =
(wlan_bgscan_cfg *) pscan->param.user_scan.scan_cfg_buf;
bg_scan_out->action = wlan_le16_to_cpu(bg_scan->action);
if ((bg_scan_out->action == BG_SCAN_ACT_GET) &&
(bg_scan_out->action == BG_SCAN_ACT_GET_PPS_UAPSD)) {
bg_scan_out->enable = bg_scan->enable;
bg_scan_out->bss_type = bg_scan->bss_type;
bg_scan_out->chan_per_scan = bg_scan->chan_per_scan;
bg_scan_out->scan_interval =
wlan_le32_to_cpu(bg_scan->scan_interval);
bg_scan_out->report_condition =
wlan_le32_to_cpu(bg_scan->report_condition);
pioctl_buf->data_read_written =
sizeof(mlan_ds_scan) + MLAN_SUB_COMMAND_SIZE;
}
}
LEAVE();
return MLAN_STATUS_SUCCESS;
}
/**
* @brief This function handles the command response of bgscan_query
* @param pmpriv A pointer to mlan_private structure
* @param resp A pointer to HostCmd_DS_COMMAND
* @param pioctl_buf A pointer to mlan_ioctl_req structure
*
* @return MLAN_STATUS_SUCCESS or MLAN_STATUS_FAILURE
*/
mlan_status
wlan_ret_802_11_bgscan_query(IN mlan_private * pmpriv,
IN HostCmd_DS_COMMAND * resp,
IN mlan_ioctl_req * pioctl_buf)
{
mlan_ds_scan *pscan = MNULL;
mlan_adapter *pmadapter = pmpriv->adapter;
ENTER();
wlan_ret_802_11_scan(pmpriv, resp, MNULL);
if (pioctl_buf) {
pscan = (mlan_ds_scan *) pioctl_buf->pbuf;
pscan->param.scan_resp.pscan_table =
(t_u8 *) pmadapter->pscan_table;
pscan->param.scan_resp.num_in_scan_table =
pmadapter->num_in_scan_table;
pscan->param.scan_resp.age_in_secs = pmadapter->age_in_secs;
pscan->param.scan_resp.pchan_stats =
(t_u8 *) pmadapter->pchan_stats;
pscan->param.scan_resp.num_in_chan_stats =
pmadapter->num_in_chan_stats;
pioctl_buf->data_read_written = sizeof(mlan_scan_resp) +
MLAN_SUB_COMMAND_SIZE;
}
LEAVE();
return MLAN_STATUS_SUCCESS;
}
/**
* @brief This function finds ssid in ssid list.
*
* @param pmpriv A pointer to mlan_private structure
* @param ssid SSID to find in the list
* @param bssid BSSID to qualify the SSID selection (if provided)
* @param mode Network mode: Infrastructure or IBSS
*
* @return index in BSSID list or < 0 if error
*/
t_s32
wlan_find_ssid_in_list(IN mlan_private * pmpriv,
IN mlan_802_11_ssid * ssid,
IN t_u8 * bssid, IN t_u32 mode)
{
mlan_adapter *pmadapter = pmpriv->adapter;
t_s32 net = -1, j;
t_u8 best_rssi = 0;
t_u32 i;
ENTER();
PRINTM(MINFO, "Num of entries in scan table = %d\n",
pmadapter->num_in_scan_table);
/*
* Loop through the table until the maximum is reached or until a match
* is found based on the bssid field comparison
*/
for (i = 0;
i < pmadapter->num_in_scan_table && (!bssid || (bssid && net < 0));
i++) {
if (!wlan_ssid_cmp
(pmadapter, &pmadapter->pscan_table[i].ssid, ssid) &&
(!bssid ||
!memcmp(pmadapter, pmadapter->pscan_table[i].mac_address,
bssid, MLAN_MAC_ADDR_LENGTH))) {
if ((mode == MLAN_BSS_MODE_INFRA) &&
!wlan_is_band_compatible(pmpriv->config_bands,
pmadapter->pscan_table[i].
bss_band))
continue;
switch (mode) {
case MLAN_BSS_MODE_INFRA:
case MLAN_BSS_MODE_IBSS:
j = wlan_is_network_compatible(pmpriv, i, mode);
if (j >= 0) {
if (SCAN_RSSI
(pmadapter->pscan_table[i].rssi) >
best_rssi) {
best_rssi =
SCAN_RSSI(pmadapter->
pscan_table
[i].rssi);
net = i;
}
} else {
if (net == -1)
net = j;
}
break;
case MLAN_BSS_MODE_AUTO:
default:
/*
* Do not check compatibility if the mode requested is
* Auto/Unknown. Allows generic find to work without
* verifying against the Adapter security settings
*/
if (SCAN_RSSI(pmadapter->pscan_table[i].rssi) >
best_rssi) {
best_rssi =
SCAN_RSSI(pmadapter->
pscan_table[i].rssi);
net = i;
}
break;
}
}
}
LEAVE();
return net;
}
/**
* @brief This function finds a specific compatible BSSID in the scan list
*
* @param pmpriv A pointer to mlan_private structure
* @param bssid BSSID to find in the scan list
* @param mode Network mode: Infrastructure or IBSS
*
* @return index in BSSID list or < 0 if error
*/
t_s32
wlan_find_bssid_in_list(IN mlan_private * pmpriv,
IN t_u8 * bssid, IN t_u32 mode)
{
mlan_adapter *pmadapter = pmpriv->adapter;
t_s32 net = -1;
t_u32 i;
ENTER();
if (!bssid) {
LEAVE();
return -1;
}
PRINTM(MINFO, "FindBSSID: Num of BSSIDs = %d\n",
pmadapter->num_in_scan_table);
/*
* Look through the scan table for a compatible match. The ret return
* variable will be equal to the index in the scan table (greater
* than zero) if the network is compatible. The loop will continue
* past a matched bssid that is not compatible in case there is an
* AP with multiple SSIDs assigned to the same BSSID
*/
for (i = 0; net < 0 && i < pmadapter->num_in_scan_table; i++) {
if (!memcmp
(pmadapter, pmadapter->pscan_table[i].mac_address, bssid,
MLAN_MAC_ADDR_LENGTH)) {
if ((mode == MLAN_BSS_MODE_INFRA) &&
!wlan_is_band_compatible(pmpriv->config_bands,
pmadapter->pscan_table[i].
bss_band))
continue;
switch (mode) {
case MLAN_BSS_MODE_INFRA:
case MLAN_BSS_MODE_IBSS:
net = wlan_is_network_compatible(pmpriv, i,
mode);
break;
default:
net = i;
break;
}
}
}
LEAVE();
return net;
}
/**
* @brief Compare two SSIDs
*
* @param pmadapter A pointer to mlan_adapter structure
* @param ssid1 A pointer to ssid to compare
* @param ssid2 A pointer to ssid to compare
*
* @return 0--ssid is same, otherwise is different
*/
t_s32
wlan_ssid_cmp(IN pmlan_adapter pmadapter,
IN mlan_802_11_ssid * ssid1, IN mlan_802_11_ssid * ssid2)
{
ENTER();
if (!ssid1 || !ssid2) {
LEAVE();
return -1;
}
if (ssid1->ssid_len != ssid2->ssid_len) {
LEAVE();
return -1;
}
LEAVE();
return memcmp(pmadapter, ssid1->ssid, ssid2->ssid, ssid1->ssid_len);
}
/**
* @brief This function inserts scan command node to scan_pending_q.
*
* @param pmpriv A pointer to mlan_private structure
* @param pcmd_node A pointer to cmd_ctrl_node structure
* @return N/A
*/
t_void
wlan_queue_scan_cmd(IN mlan_private * pmpriv, IN cmd_ctrl_node * pcmd_node)
{
mlan_adapter *pmadapter = pmpriv->adapter;
ENTER();
if (pcmd_node == MNULL)
goto done;
util_enqueue_list_tail(pmadapter->pmoal_handle,
&pmadapter->scan_pending_q,
(pmlan_linked_list) pcmd_node,
pmadapter->callbacks.moal_spin_lock,
pmadapter->callbacks.moal_spin_unlock);
done:
LEAVE();
}
/**
* @brief Find the AP with specific ssid in the scan list
*
* @param pmpriv A pointer to mlan_private structure
* @param preq_ssid_bssid A pointer to AP's ssid returned
*
* @return MLAN_STATUS_SUCCESS--success, otherwise--fail
*/
mlan_status
wlan_find_best_network(IN mlan_private * pmpriv,
OUT mlan_ssid_bssid * preq_ssid_bssid)
{
mlan_adapter *pmadapter = pmpriv->adapter;
mlan_status ret = MLAN_STATUS_SUCCESS;
BSSDescriptor_t *preq_bss;
t_s32 i;
ENTER();
memset(pmadapter, preq_ssid_bssid, 0, sizeof(mlan_ssid_bssid));
i = wlan_find_best_network_in_list(pmpriv);
if (i >= 0) {
preq_bss = &pmadapter->pscan_table[i];
memcpy(pmadapter, &preq_ssid_bssid->ssid, &preq_bss->ssid,
sizeof(mlan_802_11_ssid));
memcpy(pmadapter, (t_u8 *) & preq_ssid_bssid->bssid,
(t_u8 *) & preq_bss->mac_address, MLAN_MAC_ADDR_LENGTH);
/* Make sure we are in the right mode */
if (pmpriv->bss_mode == MLAN_BSS_MODE_AUTO)
pmpriv->bss_mode = preq_bss->bss_mode;
}
if (!preq_ssid_bssid->ssid.ssid_len) {
ret = MLAN_STATUS_FAILURE;
goto done;
}
PRINTM(MINFO, "Best network found = [%s], "
"[" MACSTR "]\n",
preq_ssid_bssid->ssid.ssid, MAC2STR(preq_ssid_bssid->bssid));
done:
LEAVE();
return ret;
}
/**
* @brief Send a scan command for all available channels filtered on a spec
*
* @param pmpriv A pointer to mlan_private structure
* @param pioctl_buf A pointer to MLAN IOCTL Request buffer
* @param preq_ssid A pointer to AP's ssid returned
*
* @return MLAN_STATUS_SUCCESS--success, otherwise--fail
*/
mlan_status
wlan_scan_specific_ssid(IN mlan_private * pmpriv,
IN t_void * pioctl_buf, IN mlan_802_11_ssid * preq_ssid)
{
mlan_status ret = MLAN_STATUS_SUCCESS;
mlan_callbacks *pcb = (mlan_callbacks *) & pmpriv->adapter->callbacks;
wlan_user_scan_cfg *pscan_cfg;
pmlan_ioctl_req pioctl_req = (mlan_ioctl_req *) pioctl_buf;
ENTER();
if (!preq_ssid) {
if (pioctl_req)
pioctl_req->status_code = MLAN_ERROR_CMD_SCAN_FAIL;
ret = MLAN_STATUS_FAILURE;
goto done;
}
wlan_scan_delete_ssid_table_entry(pmpriv, preq_ssid);
ret = pcb->moal_malloc(pmpriv->adapter->pmoal_handle,
sizeof(wlan_user_scan_cfg), MLAN_MEM_DEF,
(t_u8 **) & pscan_cfg);
if (ret != MLAN_STATUS_SUCCESS || !pscan_cfg) {
PRINTM(MERROR, "Memory allocation for pscan_cfg failed!\n");
if (pioctl_req)
pioctl_req->status_code = MLAN_ERROR_NO_MEM;
ret = MLAN_STATUS_FAILURE;
goto done;
}
memset(pmpriv->adapter, pscan_cfg, 0x00, sizeof(wlan_user_scan_cfg));
memcpy(pmpriv->adapter, pscan_cfg->ssid_list[0].ssid,
preq_ssid->ssid, preq_ssid->ssid_len);
pscan_cfg->keep_previous_scan = MTRUE;
ret = wlan_scan_networks(pmpriv, pioctl_buf, pscan_cfg);
if (pscan_cfg)
pcb->moal_mfree(pmpriv->adapter->pmoal_handle,
(t_u8 *) pscan_cfg);
done:
LEAVE();
return ret;
}
/**
* @brief Save a beacon buffer of the current bss descriptor
* Save the current beacon buffer to restore in the following cases that
* makes the bcn_buf not to contain the current ssid's beacon buffer.
* - the current ssid was not found somehow in the last scan.
* - the current ssid was the last entry of the scan table and overloaded.
*
* @param pmpriv A pointer to mlan_private structure
*
* @return N/A
*/
t_void
wlan_save_curr_bcn(IN mlan_private * pmpriv)
{
mlan_adapter *pmadapter = pmpriv->adapter;
mlan_callbacks *pcb = (pmlan_callbacks) & pmadapter->callbacks;
BSSDescriptor_t *pcurr_bss = &pmpriv->curr_bss_params.bss_descriptor;
mlan_status ret = MLAN_STATUS_SUCCESS;
ENTER();
/* save the beacon buffer if it is not saved or updated */
if ((pmpriv->pcurr_bcn_buf == MNULL) ||
(pmpriv->curr_bcn_size != pcurr_bss->beacon_buf_size) ||
(memcmp
(pmpriv->adapter, pmpriv->pcurr_bcn_buf, pcurr_bss->pbeacon_buf,
pcurr_bss->beacon_buf_size))) {
if (pmpriv->pcurr_bcn_buf) {
pcb->moal_mfree(pmadapter->pmoal_handle,
pmpriv->pcurr_bcn_buf);
pmpriv->pcurr_bcn_buf = MNULL;
}
pmpriv->curr_bcn_size = pcurr_bss->beacon_buf_size;
if (pmpriv->curr_bcn_size) {
ret = pcb->moal_malloc(pmadapter->pmoal_handle,
pcurr_bss->beacon_buf_size,
MLAN_MEM_DEF,
&pmpriv->pcurr_bcn_buf);
if ((ret == MLAN_STATUS_SUCCESS) &&
pmpriv->pcurr_bcn_buf) {
memcpy(pmpriv->adapter, pmpriv->pcurr_bcn_buf,
pcurr_bss->pbeacon_buf,
pcurr_bss->beacon_buf_size);
PRINTM(MINFO, "current beacon saved %d\n",
pmpriv->curr_bcn_size);
}
}
}
LEAVE();
}
/**
* @brief Free a beacon buffer of the current bss descriptor
*
* @param pmpriv A pointer to mlan_private structure
*
* @return N/A
*/
t_void
wlan_free_curr_bcn(IN mlan_private * pmpriv)
{
mlan_adapter *pmadapter = pmpriv->adapter;
mlan_callbacks *pcb = (pmlan_callbacks) & pmadapter->callbacks;
ENTER();
if (pmpriv->pcurr_bcn_buf) {
pcb->moal_mfree(pmadapter->pmoal_handle, pmpriv->pcurr_bcn_buf);
pmpriv->pcurr_bcn_buf = MNULL;
}
LEAVE();
}
| gpl-2.0 |
asturel/android_tegra3_grouper | arch/powerpc/kernel/setup_64.c | 343 | 17661 | /*
*
* Common boot and setup code.
*
* Copyright (C) 2001 PPC64 Team, IBM Corp
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#undef DEBUG
#include <linux/module.h>
#include <linux/string.h>
#include <linux/sched.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/reboot.h>
#include <linux/delay.h>
#include <linux/initrd.h>
#include <linux/seq_file.h>
#include <linux/ioport.h>
#include <linux/console.h>
#include <linux/utsname.h>
#include <linux/tty.h>
#include <linux/root_dev.h>
#include <linux/notifier.h>
#include <linux/cpu.h>
#include <linux/unistd.h>
#include <linux/serial.h>
#include <linux/serial_8250.h>
#include <linux/bootmem.h>
#include <linux/pci.h>
#include <linux/lockdep.h>
#include <linux/memblock.h>
#include <asm/io.h>
#include <asm/kdump.h>
#include <asm/prom.h>
#include <asm/processor.h>
#include <asm/pgtable.h>
#include <asm/smp.h>
#include <asm/elf.h>
#include <asm/machdep.h>
#include <asm/paca.h>
#include <asm/time.h>
#include <asm/cputable.h>
#include <asm/sections.h>
#include <asm/btext.h>
#include <asm/nvram.h>
#include <asm/setup.h>
#include <asm/system.h>
#include <asm/rtas.h>
#include <asm/iommu.h>
#include <asm/serial.h>
#include <asm/cache.h>
#include <asm/page.h>
#include <asm/mmu.h>
#include <asm/firmware.h>
#include <asm/xmon.h>
#include <asm/udbg.h>
#include <asm/kexec.h>
#include <asm/mmu_context.h>
#include <asm/code-patching.h>
#include <asm/kvm_ppc.h>
#include "setup.h"
#ifdef DEBUG
#define DBG(fmt...) udbg_printf(fmt)
#else
#define DBG(fmt...)
#endif
int boot_cpuid = 0;
int __initdata spinning_secondaries;
u64 ppc64_pft_size;
/* Pick defaults since we might want to patch instructions
* before we've read this from the device tree.
*/
struct ppc64_caches ppc64_caches = {
.dline_size = 0x40,
.log_dline_size = 6,
.iline_size = 0x40,
.log_iline_size = 6
};
EXPORT_SYMBOL_GPL(ppc64_caches);
/*
* These are used in binfmt_elf.c to put aux entries on the stack
* for each elf executable being started.
*/
int dcache_bsize;
int icache_bsize;
int ucache_bsize;
#ifdef CONFIG_SMP
static char *smt_enabled_cmdline;
/* Look for ibm,smt-enabled OF option */
static void check_smt_enabled(void)
{
struct device_node *dn;
const char *smt_option;
/* Default to enabling all threads */
smt_enabled_at_boot = threads_per_core;
/* Allow the command line to overrule the OF option */
if (smt_enabled_cmdline) {
if (!strcmp(smt_enabled_cmdline, "on"))
smt_enabled_at_boot = threads_per_core;
else if (!strcmp(smt_enabled_cmdline, "off"))
smt_enabled_at_boot = 0;
else {
long smt;
int rc;
rc = strict_strtol(smt_enabled_cmdline, 10, &smt);
if (!rc)
smt_enabled_at_boot =
min(threads_per_core, (int)smt);
}
} else {
dn = of_find_node_by_path("/options");
if (dn) {
smt_option = of_get_property(dn, "ibm,smt-enabled",
NULL);
if (smt_option) {
if (!strcmp(smt_option, "on"))
smt_enabled_at_boot = threads_per_core;
else if (!strcmp(smt_option, "off"))
smt_enabled_at_boot = 0;
}
of_node_put(dn);
}
}
}
/* Look for smt-enabled= cmdline option */
static int __init early_smt_enabled(char *p)
{
smt_enabled_cmdline = p;
return 0;
}
early_param("smt-enabled", early_smt_enabled);
#else
#define check_smt_enabled()
#endif /* CONFIG_SMP */
/*
* Early initialization entry point. This is called by head.S
* with MMU translation disabled. We rely on the "feature" of
* the CPU that ignores the top 2 bits of the address in real
* mode so we can access kernel globals normally provided we
* only toy with things in the RMO region. From here, we do
* some early parsing of the device-tree to setup out MEMBLOCK
* data structures, and allocate & initialize the hash table
* and segment tables so we can start running with translation
* enabled.
*
* It is this function which will call the probe() callback of
* the various platform types and copy the matching one to the
* global ppc_md structure. Your platform can eventually do
* some very early initializations from the probe() routine, but
* this is not recommended, be very careful as, for example, the
* device-tree is not accessible via normal means at this point.
*/
void __init early_setup(unsigned long dt_ptr)
{
/* -------- printk is _NOT_ safe to use here ! ------- */
/* Identify CPU type */
identify_cpu(0, mfspr(SPRN_PVR));
/* Assume we're on cpu 0 for now. Don't write to the paca yet! */
initialise_paca(&boot_paca, 0);
setup_paca(&boot_paca);
/* Initialize lockdep early or else spinlocks will blow */
lockdep_init();
/* -------- printk is now safe to use ------- */
/* Enable early debugging if any specified (see udbg.h) */
udbg_early_init();
DBG(" -> early_setup(), dt_ptr: 0x%lx\n", dt_ptr);
/*
* Do early initialization using the flattened device
* tree, such as retrieving the physical memory map or
* calculating/retrieving the hash table size.
*/
early_init_devtree(__va(dt_ptr));
/* Now we know the logical id of our boot cpu, setup the paca. */
setup_paca(&paca[boot_cpuid]);
/* Fix up paca fields required for the boot cpu */
get_paca()->cpu_start = 1;
/* Probe the machine type */
probe_machine();
setup_kdump_trampoline();
DBG("Found, Initializing memory management...\n");
/* Initialize the hash table or TLB handling */
early_init_mmu();
DBG(" <- early_setup()\n");
}
#ifdef CONFIG_SMP
void early_setup_secondary(void)
{
/* Mark interrupts enabled in PACA */
get_paca()->soft_enabled = 0;
/* Initialize the hash table or TLB handling */
early_init_mmu_secondary();
}
#endif /* CONFIG_SMP */
#if defined(CONFIG_SMP) || defined(CONFIG_KEXEC)
void smp_release_cpus(void)
{
unsigned long *ptr;
int i;
DBG(" -> smp_release_cpus()\n");
/* All secondary cpus are spinning on a common spinloop, release them
* all now so they can start to spin on their individual paca
* spinloops. For non SMP kernels, the secondary cpus never get out
* of the common spinloop.
*/
ptr = (unsigned long *)((unsigned long)&__secondary_hold_spinloop
- PHYSICAL_START);
*ptr = __pa(generic_secondary_smp_init);
/* And wait a bit for them to catch up */
for (i = 0; i < 100000; i++) {
mb();
HMT_low();
if (spinning_secondaries == 0)
break;
udelay(1);
}
DBG("spinning_secondaries = %d\n", spinning_secondaries);
DBG(" <- smp_release_cpus()\n");
}
#endif /* CONFIG_SMP || CONFIG_KEXEC */
/*
* Initialize some remaining members of the ppc64_caches and systemcfg
* structures
* (at least until we get rid of them completely). This is mostly some
* cache informations about the CPU that will be used by cache flush
* routines and/or provided to userland
*/
static void __init initialize_cache_info(void)
{
struct device_node *np;
unsigned long num_cpus = 0;
DBG(" -> initialize_cache_info()\n");
for (np = NULL; (np = of_find_node_by_type(np, "cpu"));) {
num_cpus += 1;
/* We're assuming *all* of the CPUs have the same
* d-cache and i-cache sizes... -Peter
*/
if ( num_cpus == 1 ) {
const u32 *sizep, *lsizep;
u32 size, lsize;
size = 0;
lsize = cur_cpu_spec->dcache_bsize;
sizep = of_get_property(np, "d-cache-size", NULL);
if (sizep != NULL)
size = *sizep;
lsizep = of_get_property(np, "d-cache-block-size", NULL);
/* fallback if block size missing */
if (lsizep == NULL)
lsizep = of_get_property(np, "d-cache-line-size", NULL);
if (lsizep != NULL)
lsize = *lsizep;
if (sizep == 0 || lsizep == 0)
DBG("Argh, can't find dcache properties ! "
"sizep: %p, lsizep: %p\n", sizep, lsizep);
ppc64_caches.dsize = size;
ppc64_caches.dline_size = lsize;
ppc64_caches.log_dline_size = __ilog2(lsize);
ppc64_caches.dlines_per_page = PAGE_SIZE / lsize;
size = 0;
lsize = cur_cpu_spec->icache_bsize;
sizep = of_get_property(np, "i-cache-size", NULL);
if (sizep != NULL)
size = *sizep;
lsizep = of_get_property(np, "i-cache-block-size", NULL);
if (lsizep == NULL)
lsizep = of_get_property(np, "i-cache-line-size", NULL);
if (lsizep != NULL)
lsize = *lsizep;
if (sizep == 0 || lsizep == 0)
DBG("Argh, can't find icache properties ! "
"sizep: %p, lsizep: %p\n", sizep, lsizep);
ppc64_caches.isize = size;
ppc64_caches.iline_size = lsize;
ppc64_caches.log_iline_size = __ilog2(lsize);
ppc64_caches.ilines_per_page = PAGE_SIZE / lsize;
}
}
DBG(" <- initialize_cache_info()\n");
}
/*
* Do some initial setup of the system. The parameters are those which
* were passed in from the bootloader.
*/
void __init setup_system(void)
{
DBG(" -> setup_system()\n");
/* Apply the CPUs-specific and firmware specific fixups to kernel
* text (nop out sections not relevant to this CPU or this firmware)
*/
do_feature_fixups(cur_cpu_spec->cpu_features,
&__start___ftr_fixup, &__stop___ftr_fixup);
do_feature_fixups(cur_cpu_spec->mmu_features,
&__start___mmu_ftr_fixup, &__stop___mmu_ftr_fixup);
do_feature_fixups(powerpc_firmware_features,
&__start___fw_ftr_fixup, &__stop___fw_ftr_fixup);
do_lwsync_fixups(cur_cpu_spec->cpu_features,
&__start___lwsync_fixup, &__stop___lwsync_fixup);
do_final_fixups();
/*
* Unflatten the device-tree passed by prom_init or kexec
*/
unflatten_device_tree();
/*
* Fill the ppc64_caches & systemcfg structures with informations
* retrieved from the device-tree.
*/
initialize_cache_info();
#ifdef CONFIG_PPC_RTAS
/*
* Initialize RTAS if available
*/
rtas_initialize();
#endif /* CONFIG_PPC_RTAS */
/*
* Check if we have an initrd provided via the device-tree
*/
check_for_initrd();
/*
* Do some platform specific early initializations, that includes
* setting up the hash table pointers. It also sets up some interrupt-mapping
* related options that will be used by finish_device_tree()
*/
if (ppc_md.init_early)
ppc_md.init_early();
/*
* We can discover serial ports now since the above did setup the
* hash table management for us, thus ioremap works. We do that early
* so that further code can be debugged
*/
find_legacy_serial_ports();
/*
* Register early console
*/
register_early_udbg_console();
/*
* Initialize xmon
*/
xmon_setup();
smp_setup_cpu_maps();
check_smt_enabled();
#ifdef CONFIG_SMP
/* Release secondary cpus out of their spinloops at 0x60 now that
* we can map physical -> logical CPU ids
*/
smp_release_cpus();
#endif
printk("Starting Linux PPC64 %s\n", init_utsname()->version);
printk("-----------------------------------------------------\n");
printk("ppc64_pft_size = 0x%llx\n", ppc64_pft_size);
printk("physicalMemorySize = 0x%llx\n", memblock_phys_mem_size());
if (ppc64_caches.dline_size != 0x80)
printk("ppc64_caches.dcache_line_size = 0x%x\n",
ppc64_caches.dline_size);
if (ppc64_caches.iline_size != 0x80)
printk("ppc64_caches.icache_line_size = 0x%x\n",
ppc64_caches.iline_size);
#ifdef CONFIG_PPC_STD_MMU_64
if (htab_address)
printk("htab_address = 0x%p\n", htab_address);
printk("htab_hash_mask = 0x%lx\n", htab_hash_mask);
#endif /* CONFIG_PPC_STD_MMU_64 */
if (PHYSICAL_START > 0)
printk("physical_start = 0x%llx\n",
(unsigned long long)PHYSICAL_START);
printk("-----------------------------------------------------\n");
DBG(" <- setup_system()\n");
}
/* This returns the limit below which memory accesses to the linear
* mapping are guarnateed not to cause a TLB or SLB miss. This is
* used to allocate interrupt or emergency stacks for which our
* exception entry path doesn't deal with being interrupted.
*/
static u64 safe_stack_limit(void)
{
#ifdef CONFIG_PPC_BOOK3E
/* Freescale BookE bolts the entire linear mapping */
if (mmu_has_feature(MMU_FTR_TYPE_FSL_E))
return linear_map_top;
/* Other BookE, we assume the first GB is bolted */
return 1ul << 30;
#else
/* BookS, the first segment is bolted */
if (mmu_has_feature(MMU_FTR_1T_SEGMENT))
return 1UL << SID_SHIFT_1T;
return 1UL << SID_SHIFT;
#endif
}
static void __init irqstack_early_init(void)
{
u64 limit = safe_stack_limit();
unsigned int i;
/*
* Interrupt stacks must be in the first segment since we
* cannot afford to take SLB misses on them.
*/
for_each_possible_cpu(i) {
softirq_ctx[i] = (struct thread_info *)
__va(memblock_alloc_base(THREAD_SIZE,
THREAD_SIZE, limit));
hardirq_ctx[i] = (struct thread_info *)
__va(memblock_alloc_base(THREAD_SIZE,
THREAD_SIZE, limit));
}
}
#ifdef CONFIG_PPC_BOOK3E
static void __init exc_lvl_early_init(void)
{
extern unsigned int interrupt_base_book3e;
extern unsigned int exc_debug_debug_book3e;
unsigned int i;
for_each_possible_cpu(i) {
critirq_ctx[i] = (struct thread_info *)
__va(memblock_alloc(THREAD_SIZE, THREAD_SIZE));
dbgirq_ctx[i] = (struct thread_info *)
__va(memblock_alloc(THREAD_SIZE, THREAD_SIZE));
mcheckirq_ctx[i] = (struct thread_info *)
__va(memblock_alloc(THREAD_SIZE, THREAD_SIZE));
}
if (cpu_has_feature(CPU_FTR_DEBUG_LVL_EXC))
patch_branch(&interrupt_base_book3e + (0x040 / 4) + 1,
(unsigned long)&exc_debug_debug_book3e, 0);
}
#else
#define exc_lvl_early_init()
#endif
/*
* Stack space used when we detect a bad kernel stack pointer, and
* early in SMP boots before relocation is enabled.
*/
static void __init emergency_stack_init(void)
{
u64 limit;
unsigned int i;
/*
* Emergency stacks must be under 256MB, we cannot afford to take
* SLB misses on them. The ABI also requires them to be 128-byte
* aligned.
*
* Since we use these as temporary stacks during secondary CPU
* bringup, we need to get at them in real mode. This means they
* must also be within the RMO region.
*/
limit = min(safe_stack_limit(), ppc64_rma_size);
for_each_possible_cpu(i) {
unsigned long sp;
sp = memblock_alloc_base(THREAD_SIZE, THREAD_SIZE, limit);
sp += THREAD_SIZE;
paca[i].emergency_sp = __va(sp);
}
}
/*
* Called into from start_kernel this initializes bootmem, which is used
* to manage page allocation until mem_init is called.
*/
void __init setup_arch(char **cmdline_p)
{
ppc64_boot_msg(0x12, "Setup Arch");
*cmdline_p = cmd_line;
/*
* Set cache line size based on type of cpu as a default.
* Systems with OF can look in the properties on the cpu node(s)
* for a possibly more accurate value.
*/
dcache_bsize = ppc64_caches.dline_size;
icache_bsize = ppc64_caches.iline_size;
/* reboot on panic */
panic_timeout = 180;
if (ppc_md.panic)
setup_panic();
init_mm.start_code = (unsigned long)_stext;
init_mm.end_code = (unsigned long) _etext;
init_mm.end_data = (unsigned long) _edata;
init_mm.brk = klimit;
irqstack_early_init();
exc_lvl_early_init();
emergency_stack_init();
#ifdef CONFIG_PPC_STD_MMU_64
stabs_alloc();
#endif
/* set up the bootmem stuff with available memory */
do_init_bootmem();
sparse_init();
#ifdef CONFIG_DUMMY_CONSOLE
conswitchp = &dummy_con;
#endif
if (ppc_md.setup_arch)
ppc_md.setup_arch();
paging_init();
/* Initialize the MMU context management stuff */
mmu_context_init();
kvm_rma_init();
ppc64_boot_msg(0x15, "Setup Done");
}
/* ToDo: do something useful if ppc_md is not yet setup. */
#define PPC64_LINUX_FUNCTION 0x0f000000
#define PPC64_IPL_MESSAGE 0xc0000000
#define PPC64_TERM_MESSAGE 0xb0000000
static void ppc64_do_msg(unsigned int src, const char *msg)
{
if (ppc_md.progress) {
char buf[128];
sprintf(buf, "%08X\n", src);
ppc_md.progress(buf, 0);
snprintf(buf, 128, "%s", msg);
ppc_md.progress(buf, 0);
}
}
/* Print a boot progress message. */
void ppc64_boot_msg(unsigned int src, const char *msg)
{
ppc64_do_msg(PPC64_LINUX_FUNCTION|PPC64_IPL_MESSAGE|src, msg);
printk("[boot]%04x %s\n", src, msg);
}
#ifdef CONFIG_SMP
#define PCPU_DYN_SIZE ()
static void * __init pcpu_fc_alloc(unsigned int cpu, size_t size, size_t align)
{
return __alloc_bootmem_node(NODE_DATA(cpu_to_node(cpu)), size, align,
__pa(MAX_DMA_ADDRESS));
}
static void __init pcpu_fc_free(void *ptr, size_t size)
{
free_bootmem(__pa(ptr), size);
}
static int pcpu_cpu_distance(unsigned int from, unsigned int to)
{
if (cpu_to_node(from) == cpu_to_node(to))
return LOCAL_DISTANCE;
else
return REMOTE_DISTANCE;
}
unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
EXPORT_SYMBOL(__per_cpu_offset);
void __init setup_per_cpu_areas(void)
{
const size_t dyn_size = PERCPU_MODULE_RESERVE + PERCPU_DYNAMIC_RESERVE;
size_t atom_size;
unsigned long delta;
unsigned int cpu;
int rc;
/*
* Linear mapping is one of 4K, 1M and 16M. For 4K, no need
* to group units. For larger mappings, use 1M atom which
* should be large enough to contain a number of units.
*/
if (mmu_linear_psize == MMU_PAGE_4K)
atom_size = PAGE_SIZE;
else
atom_size = 1 << 20;
rc = pcpu_embed_first_chunk(0, dyn_size, atom_size, pcpu_cpu_distance,
pcpu_fc_alloc, pcpu_fc_free);
if (rc < 0)
panic("cannot initialize percpu area (err=%d)", rc);
delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
for_each_possible_cpu(cpu) {
__per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu];
paca[cpu].data_offset = __per_cpu_offset[cpu];
}
}
#endif
#ifdef CONFIG_PPC_INDIRECT_IO
struct ppc_pci_io ppc_pci_io;
EXPORT_SYMBOL(ppc_pci_io);
#endif /* CONFIG_PPC_INDIRECT_IO */
| gpl-2.0 |
faux123/lge-FR-kernel | net/ipv4/ip_gre.c | 343 | 41321 | /*
* Linux NET3: GRE over IP protocol decoder.
*
* Authors: Alexey Kuznetsov (kuznet@ms2.inr.ac.ru)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
*/
#include <linux/capability.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <asm/uaccess.h>
#include <linux/skbuff.h>
#include <linux/netdevice.h>
#include <linux/in.h>
#include <linux/tcp.h>
#include <linux/udp.h>
#include <linux/if_arp.h>
#include <linux/mroute.h>
#include <linux/init.h>
#include <linux/in6.h>
#include <linux/inetdevice.h>
#include <linux/igmp.h>
#include <linux/netfilter_ipv4.h>
#include <linux/etherdevice.h>
#include <linux/if_ether.h>
#include <net/sock.h>
#include <net/ip.h>
#include <net/icmp.h>
#include <net/protocol.h>
#include <net/ipip.h>
#include <net/arp.h>
#include <net/checksum.h>
#include <net/dsfield.h>
#include <net/inet_ecn.h>
#include <net/xfrm.h>
#include <net/net_namespace.h>
#include <net/netns/generic.h>
#include <net/rtnetlink.h>
#ifdef CONFIG_IPV6
#include <net/ipv6.h>
#include <net/ip6_fib.h>
#include <net/ip6_route.h>
#endif
/*
Problems & solutions
--------------------
1. The most important issue is detecting local dead loops.
They would cause complete host lockup in transmit, which
would be "resolved" by stack overflow or, if queueing is enabled,
with infinite looping in net_bh.
We cannot track such dead loops during route installation,
it is infeasible task. The most general solutions would be
to keep skb->encapsulation counter (sort of local ttl),
and silently drop packet when it expires. It is the best
solution, but it supposes maintaing new variable in ALL
skb, even if no tunneling is used.
Current solution: HARD_TX_LOCK lock breaks dead loops.
2. Networking dead loops would not kill routers, but would really
kill network. IP hop limit plays role of "t->recursion" in this case,
if we copy it from packet being encapsulated to upper header.
It is very good solution, but it introduces two problems:
- Routing protocols, using packets with ttl=1 (OSPF, RIP2),
do not work over tunnels.
- traceroute does not work. I planned to relay ICMP from tunnel,
so that this problem would be solved and traceroute output
would even more informative. This idea appeared to be wrong:
only Linux complies to rfc1812 now (yes, guys, Linux is the only
true router now :-)), all routers (at least, in neighbourhood of mine)
return only 8 bytes of payload. It is the end.
Hence, if we want that OSPF worked or traceroute said something reasonable,
we should search for another solution.
One of them is to parse packet trying to detect inner encapsulation
made by our node. It is difficult or even impossible, especially,
taking into account fragmentation. TO be short, tt is not solution at all.
Current solution: The solution was UNEXPECTEDLY SIMPLE.
We force DF flag on tunnels with preconfigured hop limit,
that is ALL. :-) Well, it does not remove the problem completely,
but exponential growth of network traffic is changed to linear
(branches, that exceed pmtu are pruned) and tunnel mtu
fastly degrades to value <68, where looping stops.
Yes, it is not good if there exists a router in the loop,
which does not force DF, even when encapsulating packets have DF set.
But it is not our problem! Nobody could accuse us, we made
all that we could make. Even if it is your gated who injected
fatal route to network, even if it were you who configured
fatal static route: you are innocent. :-)
3. Really, ipv4/ipip.c, ipv4/ip_gre.c and ipv6/sit.c contain
practically identical code. It would be good to glue them
together, but it is not very evident, how to make them modular.
sit is integral part of IPv6, ipip and gre are naturally modular.
We could extract common parts (hash table, ioctl etc)
to a separate module (ip_tunnel.c).
Alexey Kuznetsov.
*/
static struct rtnl_link_ops ipgre_link_ops __read_mostly;
static int ipgre_tunnel_init(struct net_device *dev);
static void ipgre_tunnel_setup(struct net_device *dev);
static int ipgre_tunnel_bind_dev(struct net_device *dev);
/* Fallback tunnel: no source, no destination, no key, no options */
#define HASH_SIZE 16
static int ipgre_net_id;
struct ipgre_net {
struct ip_tunnel *tunnels[4][HASH_SIZE];
struct net_device *fb_tunnel_dev;
};
/* Tunnel hash table */
/*
4 hash tables:
3: (remote,local)
2: (remote,*)
1: (*,local)
0: (*,*)
We require exact key match i.e. if a key is present in packet
it will match only tunnel with the same key; if it is not present,
it will match only keyless tunnel.
All keysless packets, if not matched configured keyless tunnels
will match fallback tunnel.
*/
#define HASH(addr) (((__force u32)addr^((__force u32)addr>>4))&0xF)
#define tunnels_r_l tunnels[3]
#define tunnels_r tunnels[2]
#define tunnels_l tunnels[1]
#define tunnels_wc tunnels[0]
static DEFINE_RWLOCK(ipgre_lock);
/* Given src, dst and key, find appropriate for input tunnel. */
static struct ip_tunnel * ipgre_tunnel_lookup(struct net_device *dev,
__be32 remote, __be32 local,
__be32 key, __be16 gre_proto)
{
struct net *net = dev_net(dev);
int link = dev->ifindex;
unsigned h0 = HASH(remote);
unsigned h1 = HASH(key);
struct ip_tunnel *t, *cand = NULL;
struct ipgre_net *ign = net_generic(net, ipgre_net_id);
int dev_type = (gre_proto == htons(ETH_P_TEB)) ?
ARPHRD_ETHER : ARPHRD_IPGRE;
int score, cand_score = 4;
for (t = ign->tunnels_r_l[h0^h1]; t; t = t->next) {
if (local != t->parms.iph.saddr ||
remote != t->parms.iph.daddr ||
key != t->parms.i_key ||
!(t->dev->flags & IFF_UP))
continue;
if (t->dev->type != ARPHRD_IPGRE &&
t->dev->type != dev_type)
continue;
score = 0;
if (t->parms.link != link)
score |= 1;
if (t->dev->type != dev_type)
score |= 2;
if (score == 0)
return t;
if (score < cand_score) {
cand = t;
cand_score = score;
}
}
for (t = ign->tunnels_r[h0^h1]; t; t = t->next) {
if (remote != t->parms.iph.daddr ||
key != t->parms.i_key ||
!(t->dev->flags & IFF_UP))
continue;
if (t->dev->type != ARPHRD_IPGRE &&
t->dev->type != dev_type)
continue;
score = 0;
if (t->parms.link != link)
score |= 1;
if (t->dev->type != dev_type)
score |= 2;
if (score == 0)
return t;
if (score < cand_score) {
cand = t;
cand_score = score;
}
}
for (t = ign->tunnels_l[h1]; t; t = t->next) {
if ((local != t->parms.iph.saddr &&
(local != t->parms.iph.daddr ||
!ipv4_is_multicast(local))) ||
key != t->parms.i_key ||
!(t->dev->flags & IFF_UP))
continue;
if (t->dev->type != ARPHRD_IPGRE &&
t->dev->type != dev_type)
continue;
score = 0;
if (t->parms.link != link)
score |= 1;
if (t->dev->type != dev_type)
score |= 2;
if (score == 0)
return t;
if (score < cand_score) {
cand = t;
cand_score = score;
}
}
for (t = ign->tunnels_wc[h1]; t; t = t->next) {
if (t->parms.i_key != key ||
!(t->dev->flags & IFF_UP))
continue;
if (t->dev->type != ARPHRD_IPGRE &&
t->dev->type != dev_type)
continue;
score = 0;
if (t->parms.link != link)
score |= 1;
if (t->dev->type != dev_type)
score |= 2;
if (score == 0)
return t;
if (score < cand_score) {
cand = t;
cand_score = score;
}
}
if (cand != NULL)
return cand;
if (ign->fb_tunnel_dev->flags & IFF_UP)
return netdev_priv(ign->fb_tunnel_dev);
return NULL;
}
static struct ip_tunnel **__ipgre_bucket(struct ipgre_net *ign,
struct ip_tunnel_parm *parms)
{
__be32 remote = parms->iph.daddr;
__be32 local = parms->iph.saddr;
__be32 key = parms->i_key;
unsigned h = HASH(key);
int prio = 0;
if (local)
prio |= 1;
if (remote && !ipv4_is_multicast(remote)) {
prio |= 2;
h ^= HASH(remote);
}
return &ign->tunnels[prio][h];
}
static inline struct ip_tunnel **ipgre_bucket(struct ipgre_net *ign,
struct ip_tunnel *t)
{
return __ipgre_bucket(ign, &t->parms);
}
static void ipgre_tunnel_link(struct ipgre_net *ign, struct ip_tunnel *t)
{
struct ip_tunnel **tp = ipgre_bucket(ign, t);
t->next = *tp;
write_lock_bh(&ipgre_lock);
*tp = t;
write_unlock_bh(&ipgre_lock);
}
static void ipgre_tunnel_unlink(struct ipgre_net *ign, struct ip_tunnel *t)
{
struct ip_tunnel **tp;
for (tp = ipgre_bucket(ign, t); *tp; tp = &(*tp)->next) {
if (t == *tp) {
write_lock_bh(&ipgre_lock);
*tp = t->next;
write_unlock_bh(&ipgre_lock);
break;
}
}
}
static struct ip_tunnel *ipgre_tunnel_find(struct net *net,
struct ip_tunnel_parm *parms,
int type)
{
__be32 remote = parms->iph.daddr;
__be32 local = parms->iph.saddr;
__be32 key = parms->i_key;
int link = parms->link;
struct ip_tunnel *t, **tp;
struct ipgre_net *ign = net_generic(net, ipgre_net_id);
for (tp = __ipgre_bucket(ign, parms); (t = *tp) != NULL; tp = &t->next)
if (local == t->parms.iph.saddr &&
remote == t->parms.iph.daddr &&
key == t->parms.i_key &&
link == t->parms.link &&
type == t->dev->type)
break;
return t;
}
static struct ip_tunnel * ipgre_tunnel_locate(struct net *net,
struct ip_tunnel_parm *parms, int create)
{
struct ip_tunnel *t, *nt;
struct net_device *dev;
char name[IFNAMSIZ];
struct ipgre_net *ign = net_generic(net, ipgre_net_id);
t = ipgre_tunnel_find(net, parms, ARPHRD_IPGRE);
if (t || !create)
return t;
if (parms->name[0])
strlcpy(name, parms->name, IFNAMSIZ);
else
sprintf(name, "gre%%d");
dev = alloc_netdev(sizeof(*t), name, ipgre_tunnel_setup);
if (!dev)
return NULL;
dev_net_set(dev, net);
if (strchr(name, '%')) {
if (dev_alloc_name(dev, name) < 0)
goto failed_free;
}
nt = netdev_priv(dev);
nt->parms = *parms;
dev->rtnl_link_ops = &ipgre_link_ops;
dev->mtu = ipgre_tunnel_bind_dev(dev);
if (register_netdevice(dev) < 0)
goto failed_free;
dev_hold(dev);
ipgre_tunnel_link(ign, nt);
return nt;
failed_free:
free_netdev(dev);
return NULL;
}
static void ipgre_tunnel_uninit(struct net_device *dev)
{
struct net *net = dev_net(dev);
struct ipgre_net *ign = net_generic(net, ipgre_net_id);
ipgre_tunnel_unlink(ign, netdev_priv(dev));
dev_put(dev);
}
static void ipgre_err(struct sk_buff *skb, u32 info)
{
/* All the routers (except for Linux) return only
8 bytes of packet payload. It means, that precise relaying of
ICMP in the real Internet is absolutely infeasible.
Moreover, Cisco "wise men" put GRE key to the third word
in GRE header. It makes impossible maintaining even soft state for keyed
GRE tunnels with enabled checksum. Tell them "thank you".
Well, I wonder, rfc1812 was written by Cisco employee,
what the hell these idiots break standrads established
by themself???
*/
struct iphdr *iph = (struct iphdr *)skb->data;
__be16 *p = (__be16*)(skb->data+(iph->ihl<<2));
int grehlen = (iph->ihl<<2) + 4;
const int type = icmp_hdr(skb)->type;
const int code = icmp_hdr(skb)->code;
struct ip_tunnel *t;
__be16 flags;
flags = p[0];
if (flags&(GRE_CSUM|GRE_KEY|GRE_SEQ|GRE_ROUTING|GRE_VERSION)) {
if (flags&(GRE_VERSION|GRE_ROUTING))
return;
if (flags&GRE_KEY) {
grehlen += 4;
if (flags&GRE_CSUM)
grehlen += 4;
}
}
/* If only 8 bytes returned, keyed message will be dropped here */
if (skb_headlen(skb) < grehlen)
return;
switch (type) {
default:
case ICMP_PARAMETERPROB:
return;
case ICMP_DEST_UNREACH:
switch (code) {
case ICMP_SR_FAILED:
case ICMP_PORT_UNREACH:
/* Impossible event. */
return;
case ICMP_FRAG_NEEDED:
/* Soft state for pmtu is maintained by IP core. */
return;
default:
/* All others are translated to HOST_UNREACH.
rfc2003 contains "deep thoughts" about NET_UNREACH,
I believe they are just ether pollution. --ANK
*/
break;
}
break;
case ICMP_TIME_EXCEEDED:
if (code != ICMP_EXC_TTL)
return;
break;
}
read_lock(&ipgre_lock);
t = ipgre_tunnel_lookup(skb->dev, iph->daddr, iph->saddr,
flags & GRE_KEY ?
*(((__be32 *)p) + (grehlen / 4) - 1) : 0,
p[1]);
if (t == NULL || t->parms.iph.daddr == 0 ||
ipv4_is_multicast(t->parms.iph.daddr))
goto out;
if (t->parms.iph.ttl == 0 && type == ICMP_TIME_EXCEEDED)
goto out;
if (time_before(jiffies, t->err_time + IPTUNNEL_ERR_TIMEO))
t->err_count++;
else
t->err_count = 1;
t->err_time = jiffies;
out:
read_unlock(&ipgre_lock);
return;
}
static inline void ipgre_ecn_decapsulate(struct iphdr *iph, struct sk_buff *skb)
{
if (INET_ECN_is_ce(iph->tos)) {
if (skb->protocol == htons(ETH_P_IP)) {
IP_ECN_set_ce(ip_hdr(skb));
} else if (skb->protocol == htons(ETH_P_IPV6)) {
IP6_ECN_set_ce(ipv6_hdr(skb));
}
}
}
static inline u8
ipgre_ecn_encapsulate(u8 tos, struct iphdr *old_iph, struct sk_buff *skb)
{
u8 inner = 0;
if (skb->protocol == htons(ETH_P_IP))
inner = old_iph->tos;
else if (skb->protocol == htons(ETH_P_IPV6))
inner = ipv6_get_dsfield((struct ipv6hdr *)old_iph);
return INET_ECN_encapsulate(tos, inner);
}
static int ipgre_rcv(struct sk_buff *skb)
{
struct iphdr *iph;
u8 *h;
__be16 flags;
__sum16 csum = 0;
__be32 key = 0;
u32 seqno = 0;
struct ip_tunnel *tunnel;
int offset = 4;
__be16 gre_proto;
unsigned int len;
if (!pskb_may_pull(skb, 16))
goto drop_nolock;
iph = ip_hdr(skb);
h = skb->data;
flags = *(__be16*)h;
if (flags&(GRE_CSUM|GRE_KEY|GRE_ROUTING|GRE_SEQ|GRE_VERSION)) {
/* - Version must be 0.
- We do not support routing headers.
*/
if (flags&(GRE_VERSION|GRE_ROUTING))
goto drop_nolock;
if (flags&GRE_CSUM) {
switch (skb->ip_summed) {
case CHECKSUM_COMPLETE:
csum = csum_fold(skb->csum);
if (!csum)
break;
/* fall through */
case CHECKSUM_NONE:
skb->csum = 0;
csum = __skb_checksum_complete(skb);
skb->ip_summed = CHECKSUM_COMPLETE;
}
offset += 4;
}
if (flags&GRE_KEY) {
key = *(__be32*)(h + offset);
offset += 4;
}
if (flags&GRE_SEQ) {
seqno = ntohl(*(__be32*)(h + offset));
offset += 4;
}
}
gre_proto = *(__be16 *)(h + 2);
read_lock(&ipgre_lock);
if ((tunnel = ipgre_tunnel_lookup(skb->dev,
iph->saddr, iph->daddr, key,
gre_proto))) {
struct net_device_stats *stats = &tunnel->dev->stats;
secpath_reset(skb);
skb->protocol = gre_proto;
/* WCCP version 1 and 2 protocol decoding.
* - Change protocol to IP
* - When dealing with WCCPv2, Skip extra 4 bytes in GRE header
*/
if (flags == 0 && gre_proto == htons(ETH_P_WCCP)) {
skb->protocol = htons(ETH_P_IP);
if ((*(h + offset) & 0xF0) != 0x40)
offset += 4;
}
skb->mac_header = skb->network_header;
__pskb_pull(skb, offset);
skb_postpull_rcsum(skb, skb_transport_header(skb), offset);
skb->pkt_type = PACKET_HOST;
#ifdef CONFIG_NET_IPGRE_BROADCAST
if (ipv4_is_multicast(iph->daddr)) {
/* Looped back packet, drop it! */
if (skb_rtable(skb)->fl.iif == 0)
goto drop;
stats->multicast++;
skb->pkt_type = PACKET_BROADCAST;
}
#endif
if (((flags&GRE_CSUM) && csum) ||
(!(flags&GRE_CSUM) && tunnel->parms.i_flags&GRE_CSUM)) {
stats->rx_crc_errors++;
stats->rx_errors++;
goto drop;
}
if (tunnel->parms.i_flags&GRE_SEQ) {
if (!(flags&GRE_SEQ) ||
(tunnel->i_seqno && (s32)(seqno - tunnel->i_seqno) < 0)) {
stats->rx_fifo_errors++;
stats->rx_errors++;
goto drop;
}
tunnel->i_seqno = seqno + 1;
}
len = skb->len;
/* Warning: All skb pointers will be invalidated! */
if (tunnel->dev->type == ARPHRD_ETHER) {
if (!pskb_may_pull(skb, ETH_HLEN)) {
stats->rx_length_errors++;
stats->rx_errors++;
goto drop;
}
iph = ip_hdr(skb);
skb->protocol = eth_type_trans(skb, tunnel->dev);
skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
}
stats->rx_packets++;
stats->rx_bytes += len;
skb->dev = tunnel->dev;
skb_dst_drop(skb);
nf_reset(skb);
skb_reset_network_header(skb);
ipgre_ecn_decapsulate(iph, skb);
netif_rx(skb);
read_unlock(&ipgre_lock);
return(0);
}
icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
drop:
read_unlock(&ipgre_lock);
drop_nolock:
kfree_skb(skb);
return(0);
}
static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct ip_tunnel *tunnel = netdev_priv(dev);
struct net_device_stats *stats = &tunnel->dev->stats;
struct iphdr *old_iph = ip_hdr(skb);
struct iphdr *tiph;
u8 tos;
__be16 df;
struct rtable *rt; /* Route to the other host */
struct net_device *tdev; /* Device to other host */
struct iphdr *iph; /* Our new IP header */
unsigned int max_headroom; /* The extra header space needed */
int gre_hlen;
__be32 dst;
int mtu;
if (dev->type == ARPHRD_ETHER)
IPCB(skb)->flags = 0;
if (dev->header_ops && dev->type == ARPHRD_IPGRE) {
gre_hlen = 0;
tiph = (struct iphdr *)skb->data;
} else {
gre_hlen = tunnel->hlen;
tiph = &tunnel->parms.iph;
}
if ((dst = tiph->daddr) == 0) {
/* NBMA tunnel */
if (skb_dst(skb) == NULL) {
stats->tx_fifo_errors++;
goto tx_error;
}
if (skb->protocol == htons(ETH_P_IP)) {
rt = skb_rtable(skb);
if ((dst = rt->rt_gateway) == 0)
goto tx_error_icmp;
}
#ifdef CONFIG_IPV6
else if (skb->protocol == htons(ETH_P_IPV6)) {
struct in6_addr *addr6;
int addr_type;
struct neighbour *neigh = skb_dst(skb)->neighbour;
if (neigh == NULL)
goto tx_error;
addr6 = (struct in6_addr *)&neigh->primary_key;
addr_type = ipv6_addr_type(addr6);
if (addr_type == IPV6_ADDR_ANY) {
addr6 = &ipv6_hdr(skb)->daddr;
addr_type = ipv6_addr_type(addr6);
}
if ((addr_type & IPV6_ADDR_COMPATv4) == 0)
goto tx_error_icmp;
dst = addr6->s6_addr32[3];
}
#endif
else
goto tx_error;
}
tos = tiph->tos;
if (tos == 1) {
tos = 0;
if (skb->protocol == htons(ETH_P_IP))
tos = old_iph->tos;
}
{
struct flowi fl = { .oif = tunnel->parms.link,
.nl_u = { .ip4_u =
{ .daddr = dst,
.saddr = tiph->saddr,
.tos = RT_TOS(tos) } },
.proto = IPPROTO_GRE };
if (ip_route_output_key(dev_net(dev), &rt, &fl)) {
stats->tx_carrier_errors++;
goto tx_error;
}
}
tdev = rt->u.dst.dev;
if (tdev == dev) {
ip_rt_put(rt);
stats->collisions++;
goto tx_error;
}
df = tiph->frag_off;
if (df)
mtu = dst_mtu(&rt->u.dst) - dev->hard_header_len - tunnel->hlen;
else
mtu = skb_dst(skb) ? dst_mtu(skb_dst(skb)) : dev->mtu;
if (skb_dst(skb))
skb_dst(skb)->ops->update_pmtu(skb_dst(skb), mtu);
if (skb->protocol == htons(ETH_P_IP)) {
df |= (old_iph->frag_off&htons(IP_DF));
if ((old_iph->frag_off&htons(IP_DF)) &&
mtu < ntohs(old_iph->tot_len)) {
icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu));
ip_rt_put(rt);
goto tx_error;
}
}
#ifdef CONFIG_IPV6
else if (skb->protocol == htons(ETH_P_IPV6)) {
struct rt6_info *rt6 = (struct rt6_info *)skb_dst(skb);
if (rt6 && mtu < dst_mtu(skb_dst(skb)) && mtu >= IPV6_MIN_MTU) {
if ((tunnel->parms.iph.daddr &&
!ipv4_is_multicast(tunnel->parms.iph.daddr)) ||
rt6->rt6i_dst.plen == 128) {
rt6->rt6i_flags |= RTF_MODIFIED;
skb_dst(skb)->metrics[RTAX_MTU-1] = mtu;
}
}
if (mtu >= IPV6_MIN_MTU && mtu < skb->len - tunnel->hlen + gre_hlen) {
icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, dev);
ip_rt_put(rt);
goto tx_error;
}
}
#endif
if (tunnel->err_count > 0) {
if (time_before(jiffies,
tunnel->err_time + IPTUNNEL_ERR_TIMEO)) {
tunnel->err_count--;
dst_link_failure(skb);
} else
tunnel->err_count = 0;
}
max_headroom = LL_RESERVED_SPACE(tdev) + gre_hlen;
if (skb_headroom(skb) < max_headroom || skb_shared(skb)||
(skb_cloned(skb) && !skb_clone_writable(skb, 0))) {
struct sk_buff *new_skb = skb_realloc_headroom(skb, max_headroom);
if (!new_skb) {
ip_rt_put(rt);
stats->tx_dropped++;
dev_kfree_skb(skb);
return NETDEV_TX_OK;
}
if (skb->sk)
skb_set_owner_w(new_skb, skb->sk);
dev_kfree_skb(skb);
skb = new_skb;
old_iph = ip_hdr(skb);
}
skb_reset_transport_header(skb);
skb_push(skb, gre_hlen);
skb_reset_network_header(skb);
memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED |
IPSKB_REROUTED);
skb_dst_drop(skb);
skb_dst_set(skb, &rt->u.dst);
/*
* Push down and install the IPIP header.
*/
iph = ip_hdr(skb);
iph->version = 4;
iph->ihl = sizeof(struct iphdr) >> 2;
iph->frag_off = df;
iph->protocol = IPPROTO_GRE;
iph->tos = ipgre_ecn_encapsulate(tos, old_iph, skb);
iph->daddr = rt->rt_dst;
iph->saddr = rt->rt_src;
if ((iph->ttl = tiph->ttl) == 0) {
if (skb->protocol == htons(ETH_P_IP))
iph->ttl = old_iph->ttl;
#ifdef CONFIG_IPV6
else if (skb->protocol == htons(ETH_P_IPV6))
iph->ttl = ((struct ipv6hdr *)old_iph)->hop_limit;
#endif
else
iph->ttl = dst_metric(&rt->u.dst, RTAX_HOPLIMIT);
}
((__be16 *)(iph + 1))[0] = tunnel->parms.o_flags;
((__be16 *)(iph + 1))[1] = (dev->type == ARPHRD_ETHER) ?
htons(ETH_P_TEB) : skb->protocol;
if (tunnel->parms.o_flags&(GRE_KEY|GRE_CSUM|GRE_SEQ)) {
__be32 *ptr = (__be32*)(((u8*)iph) + tunnel->hlen - 4);
if (tunnel->parms.o_flags&GRE_SEQ) {
++tunnel->o_seqno;
*ptr = htonl(tunnel->o_seqno);
ptr--;
}
if (tunnel->parms.o_flags&GRE_KEY) {
*ptr = tunnel->parms.o_key;
ptr--;
}
if (tunnel->parms.o_flags&GRE_CSUM) {
*ptr = 0;
*(__sum16*)ptr = ip_compute_csum((void*)(iph+1), skb->len - sizeof(struct iphdr));
}
}
nf_reset(skb);
IPTUNNEL_XMIT();
return NETDEV_TX_OK;
tx_error_icmp:
dst_link_failure(skb);
tx_error:
stats->tx_errors++;
dev_kfree_skb(skb);
return NETDEV_TX_OK;
}
static int ipgre_tunnel_bind_dev(struct net_device *dev)
{
struct net_device *tdev = NULL;
struct ip_tunnel *tunnel;
struct iphdr *iph;
int hlen = LL_MAX_HEADER;
int mtu = ETH_DATA_LEN;
int addend = sizeof(struct iphdr) + 4;
tunnel = netdev_priv(dev);
iph = &tunnel->parms.iph;
/* Guess output device to choose reasonable mtu and needed_headroom */
if (iph->daddr) {
struct flowi fl = { .oif = tunnel->parms.link,
.nl_u = { .ip4_u =
{ .daddr = iph->daddr,
.saddr = iph->saddr,
.tos = RT_TOS(iph->tos) } },
.proto = IPPROTO_GRE };
struct rtable *rt;
if (!ip_route_output_key(dev_net(dev), &rt, &fl)) {
tdev = rt->u.dst.dev;
ip_rt_put(rt);
}
if (dev->type != ARPHRD_ETHER)
dev->flags |= IFF_POINTOPOINT;
}
if (!tdev && tunnel->parms.link)
tdev = __dev_get_by_index(dev_net(dev), tunnel->parms.link);
if (tdev) {
hlen = tdev->hard_header_len + tdev->needed_headroom;
mtu = tdev->mtu;
}
dev->iflink = tunnel->parms.link;
/* Precalculate GRE options length */
if (tunnel->parms.o_flags&(GRE_CSUM|GRE_KEY|GRE_SEQ)) {
if (tunnel->parms.o_flags&GRE_CSUM)
addend += 4;
if (tunnel->parms.o_flags&GRE_KEY)
addend += 4;
if (tunnel->parms.o_flags&GRE_SEQ)
addend += 4;
}
dev->needed_headroom = addend + hlen;
mtu -= dev->hard_header_len + addend;
if (mtu < 68)
mtu = 68;
tunnel->hlen = addend;
return mtu;
}
static int
ipgre_tunnel_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd)
{
int err = 0;
struct ip_tunnel_parm p;
struct ip_tunnel *t;
struct net *net = dev_net(dev);
struct ipgre_net *ign = net_generic(net, ipgre_net_id);
switch (cmd) {
case SIOCGETTUNNEL:
t = NULL;
if (dev == ign->fb_tunnel_dev) {
if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p))) {
err = -EFAULT;
break;
}
t = ipgre_tunnel_locate(net, &p, 0);
}
if (t == NULL)
t = netdev_priv(dev);
memcpy(&p, &t->parms, sizeof(p));
if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p)))
err = -EFAULT;
break;
case SIOCADDTUNNEL:
case SIOCCHGTUNNEL:
err = -EPERM;
if (!capable(CAP_NET_ADMIN))
goto done;
err = -EFAULT;
if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
goto done;
err = -EINVAL;
if (p.iph.version != 4 || p.iph.protocol != IPPROTO_GRE ||
p.iph.ihl != 5 || (p.iph.frag_off&htons(~IP_DF)) ||
((p.i_flags|p.o_flags)&(GRE_VERSION|GRE_ROUTING)))
goto done;
if (p.iph.ttl)
p.iph.frag_off |= htons(IP_DF);
if (!(p.i_flags&GRE_KEY))
p.i_key = 0;
if (!(p.o_flags&GRE_KEY))
p.o_key = 0;
t = ipgre_tunnel_locate(net, &p, cmd == SIOCADDTUNNEL);
if (dev != ign->fb_tunnel_dev && cmd == SIOCCHGTUNNEL) {
if (t != NULL) {
if (t->dev != dev) {
err = -EEXIST;
break;
}
} else {
unsigned nflags = 0;
t = netdev_priv(dev);
if (ipv4_is_multicast(p.iph.daddr))
nflags = IFF_BROADCAST;
else if (p.iph.daddr)
nflags = IFF_POINTOPOINT;
if ((dev->flags^nflags)&(IFF_POINTOPOINT|IFF_BROADCAST)) {
err = -EINVAL;
break;
}
ipgre_tunnel_unlink(ign, t);
t->parms.iph.saddr = p.iph.saddr;
t->parms.iph.daddr = p.iph.daddr;
t->parms.i_key = p.i_key;
t->parms.o_key = p.o_key;
memcpy(dev->dev_addr, &p.iph.saddr, 4);
memcpy(dev->broadcast, &p.iph.daddr, 4);
ipgre_tunnel_link(ign, t);
netdev_state_change(dev);
}
}
if (t) {
err = 0;
if (cmd == SIOCCHGTUNNEL) {
t->parms.iph.ttl = p.iph.ttl;
t->parms.iph.tos = p.iph.tos;
t->parms.iph.frag_off = p.iph.frag_off;
if (t->parms.link != p.link) {
t->parms.link = p.link;
dev->mtu = ipgre_tunnel_bind_dev(dev);
netdev_state_change(dev);
}
}
if (copy_to_user(ifr->ifr_ifru.ifru_data, &t->parms, sizeof(p)))
err = -EFAULT;
} else
err = (cmd == SIOCADDTUNNEL ? -ENOBUFS : -ENOENT);
break;
case SIOCDELTUNNEL:
err = -EPERM;
if (!capable(CAP_NET_ADMIN))
goto done;
if (dev == ign->fb_tunnel_dev) {
err = -EFAULT;
if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
goto done;
err = -ENOENT;
if ((t = ipgre_tunnel_locate(net, &p, 0)) == NULL)
goto done;
err = -EPERM;
if (t == netdev_priv(ign->fb_tunnel_dev))
goto done;
dev = t->dev;
}
unregister_netdevice(dev);
err = 0;
break;
default:
err = -EINVAL;
}
done:
return err;
}
static int ipgre_tunnel_change_mtu(struct net_device *dev, int new_mtu)
{
struct ip_tunnel *tunnel = netdev_priv(dev);
if (new_mtu < 68 ||
new_mtu > 0xFFF8 - dev->hard_header_len - tunnel->hlen)
return -EINVAL;
dev->mtu = new_mtu;
return 0;
}
/* Nice toy. Unfortunately, useless in real life :-)
It allows to construct virtual multiprotocol broadcast "LAN"
over the Internet, provided multicast routing is tuned.
I have no idea was this bicycle invented before me,
so that I had to set ARPHRD_IPGRE to a random value.
I have an impression, that Cisco could make something similar,
but this feature is apparently missing in IOS<=11.2(8).
I set up 10.66.66/24 and fec0:6666:6666::0/96 as virtual networks
with broadcast 224.66.66.66. If you have access to mbone, play with me :-)
ping -t 255 224.66.66.66
If nobody answers, mbone does not work.
ip tunnel add Universe mode gre remote 224.66.66.66 local <Your_real_addr> ttl 255
ip addr add 10.66.66.<somewhat>/24 dev Universe
ifconfig Universe up
ifconfig Universe add fe80::<Your_real_addr>/10
ifconfig Universe add fec0:6666:6666::<Your_real_addr>/96
ftp 10.66.66.66
...
ftp fec0:6666:6666::193.233.7.65
...
*/
static int ipgre_header(struct sk_buff *skb, struct net_device *dev,
unsigned short type,
const void *daddr, const void *saddr, unsigned len)
{
struct ip_tunnel *t = netdev_priv(dev);
struct iphdr *iph = (struct iphdr *)skb_push(skb, t->hlen);
__be16 *p = (__be16*)(iph+1);
memcpy(iph, &t->parms.iph, sizeof(struct iphdr));
p[0] = t->parms.o_flags;
p[1] = htons(type);
/*
* Set the source hardware address.
*/
if (saddr)
memcpy(&iph->saddr, saddr, 4);
if (daddr) {
memcpy(&iph->daddr, daddr, 4);
return t->hlen;
}
if (iph->daddr && !ipv4_is_multicast(iph->daddr))
return t->hlen;
return -t->hlen;
}
static int ipgre_header_parse(const struct sk_buff *skb, unsigned char *haddr)
{
struct iphdr *iph = (struct iphdr *) skb_mac_header(skb);
memcpy(haddr, &iph->saddr, 4);
return 4;
}
static const struct header_ops ipgre_header_ops = {
.create = ipgre_header,
.parse = ipgre_header_parse,
};
#ifdef CONFIG_NET_IPGRE_BROADCAST
static int ipgre_open(struct net_device *dev)
{
struct ip_tunnel *t = netdev_priv(dev);
if (ipv4_is_multicast(t->parms.iph.daddr)) {
struct flowi fl = { .oif = t->parms.link,
.nl_u = { .ip4_u =
{ .daddr = t->parms.iph.daddr,
.saddr = t->parms.iph.saddr,
.tos = RT_TOS(t->parms.iph.tos) } },
.proto = IPPROTO_GRE };
struct rtable *rt;
if (ip_route_output_key(dev_net(dev), &rt, &fl))
return -EADDRNOTAVAIL;
dev = rt->u.dst.dev;
ip_rt_put(rt);
if (__in_dev_get_rtnl(dev) == NULL)
return -EADDRNOTAVAIL;
t->mlink = dev->ifindex;
ip_mc_inc_group(__in_dev_get_rtnl(dev), t->parms.iph.daddr);
}
return 0;
}
static int ipgre_close(struct net_device *dev)
{
struct ip_tunnel *t = netdev_priv(dev);
if (ipv4_is_multicast(t->parms.iph.daddr) && t->mlink) {
struct in_device *in_dev;
in_dev = inetdev_by_index(dev_net(dev), t->mlink);
if (in_dev) {
ip_mc_dec_group(in_dev, t->parms.iph.daddr);
in_dev_put(in_dev);
}
}
return 0;
}
#endif
static const struct net_device_ops ipgre_netdev_ops = {
.ndo_init = ipgre_tunnel_init,
.ndo_uninit = ipgre_tunnel_uninit,
#ifdef CONFIG_NET_IPGRE_BROADCAST
.ndo_open = ipgre_open,
.ndo_stop = ipgre_close,
#endif
.ndo_start_xmit = ipgre_tunnel_xmit,
.ndo_do_ioctl = ipgre_tunnel_ioctl,
.ndo_change_mtu = ipgre_tunnel_change_mtu,
};
static void ipgre_tunnel_setup(struct net_device *dev)
{
dev->netdev_ops = &ipgre_netdev_ops;
dev->destructor = free_netdev;
dev->type = ARPHRD_IPGRE;
dev->needed_headroom = LL_MAX_HEADER + sizeof(struct iphdr) + 4;
dev->mtu = ETH_DATA_LEN - sizeof(struct iphdr) - 4;
dev->flags = IFF_NOARP;
dev->iflink = 0;
dev->addr_len = 4;
dev->features |= NETIF_F_NETNS_LOCAL;
dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
}
static int ipgre_tunnel_init(struct net_device *dev)
{
struct ip_tunnel *tunnel;
struct iphdr *iph;
tunnel = netdev_priv(dev);
iph = &tunnel->parms.iph;
tunnel->dev = dev;
strcpy(tunnel->parms.name, dev->name);
memcpy(dev->dev_addr, &tunnel->parms.iph.saddr, 4);
memcpy(dev->broadcast, &tunnel->parms.iph.daddr, 4);
if (iph->daddr) {
#ifdef CONFIG_NET_IPGRE_BROADCAST
if (ipv4_is_multicast(iph->daddr)) {
if (!iph->saddr)
return -EINVAL;
dev->flags = IFF_BROADCAST;
dev->header_ops = &ipgre_header_ops;
}
#endif
} else
dev->header_ops = &ipgre_header_ops;
return 0;
}
static void ipgre_fb_tunnel_init(struct net_device *dev)
{
struct ip_tunnel *tunnel = netdev_priv(dev);
struct iphdr *iph = &tunnel->parms.iph;
struct ipgre_net *ign = net_generic(dev_net(dev), ipgre_net_id);
tunnel->dev = dev;
strcpy(tunnel->parms.name, dev->name);
iph->version = 4;
iph->protocol = IPPROTO_GRE;
iph->ihl = 5;
tunnel->hlen = sizeof(struct iphdr) + 4;
dev_hold(dev);
ign->tunnels_wc[0] = tunnel;
}
static const struct net_protocol ipgre_protocol = {
.handler = ipgre_rcv,
.err_handler = ipgre_err,
.netns_ok = 1,
};
static void ipgre_destroy_tunnels(struct ipgre_net *ign)
{
int prio;
for (prio = 0; prio < 4; prio++) {
int h;
for (h = 0; h < HASH_SIZE; h++) {
struct ip_tunnel *t;
while ((t = ign->tunnels[prio][h]) != NULL)
unregister_netdevice(t->dev);
}
}
}
static int ipgre_init_net(struct net *net)
{
int err;
struct ipgre_net *ign;
err = -ENOMEM;
ign = kzalloc(sizeof(struct ipgre_net), GFP_KERNEL);
if (ign == NULL)
goto err_alloc;
err = net_assign_generic(net, ipgre_net_id, ign);
if (err < 0)
goto err_assign;
ign->fb_tunnel_dev = alloc_netdev(sizeof(struct ip_tunnel), "gre0",
ipgre_tunnel_setup);
if (!ign->fb_tunnel_dev) {
err = -ENOMEM;
goto err_alloc_dev;
}
dev_net_set(ign->fb_tunnel_dev, net);
ipgre_fb_tunnel_init(ign->fb_tunnel_dev);
ign->fb_tunnel_dev->rtnl_link_ops = &ipgre_link_ops;
if ((err = register_netdev(ign->fb_tunnel_dev)))
goto err_reg_dev;
return 0;
err_reg_dev:
free_netdev(ign->fb_tunnel_dev);
err_alloc_dev:
/* nothing */
err_assign:
kfree(ign);
err_alloc:
return err;
}
static void ipgre_exit_net(struct net *net)
{
struct ipgre_net *ign;
ign = net_generic(net, ipgre_net_id);
rtnl_lock();
ipgre_destroy_tunnels(ign);
rtnl_unlock();
kfree(ign);
}
static struct pernet_operations ipgre_net_ops = {
.init = ipgre_init_net,
.exit = ipgre_exit_net,
};
static int ipgre_tunnel_validate(struct nlattr *tb[], struct nlattr *data[])
{
__be16 flags;
if (!data)
return 0;
flags = 0;
if (data[IFLA_GRE_IFLAGS])
flags |= nla_get_be16(data[IFLA_GRE_IFLAGS]);
if (data[IFLA_GRE_OFLAGS])
flags |= nla_get_be16(data[IFLA_GRE_OFLAGS]);
if (flags & (GRE_VERSION|GRE_ROUTING))
return -EINVAL;
return 0;
}
static int ipgre_tap_validate(struct nlattr *tb[], struct nlattr *data[])
{
__be32 daddr;
if (tb[IFLA_ADDRESS]) {
if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
return -EINVAL;
if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
return -EADDRNOTAVAIL;
}
if (!data)
goto out;
if (data[IFLA_GRE_REMOTE]) {
memcpy(&daddr, nla_data(data[IFLA_GRE_REMOTE]), 4);
if (!daddr)
return -EINVAL;
}
out:
return ipgre_tunnel_validate(tb, data);
}
static void ipgre_netlink_parms(struct nlattr *data[],
struct ip_tunnel_parm *parms)
{
memset(parms, 0, sizeof(*parms));
parms->iph.protocol = IPPROTO_GRE;
if (!data)
return;
if (data[IFLA_GRE_LINK])
parms->link = nla_get_u32(data[IFLA_GRE_LINK]);
if (data[IFLA_GRE_IFLAGS])
parms->i_flags = nla_get_be16(data[IFLA_GRE_IFLAGS]);
if (data[IFLA_GRE_OFLAGS])
parms->o_flags = nla_get_be16(data[IFLA_GRE_OFLAGS]);
if (data[IFLA_GRE_IKEY])
parms->i_key = nla_get_be32(data[IFLA_GRE_IKEY]);
if (data[IFLA_GRE_OKEY])
parms->o_key = nla_get_be32(data[IFLA_GRE_OKEY]);
if (data[IFLA_GRE_LOCAL])
parms->iph.saddr = nla_get_be32(data[IFLA_GRE_LOCAL]);
if (data[IFLA_GRE_REMOTE])
parms->iph.daddr = nla_get_be32(data[IFLA_GRE_REMOTE]);
if (data[IFLA_GRE_TTL])
parms->iph.ttl = nla_get_u8(data[IFLA_GRE_TTL]);
if (data[IFLA_GRE_TOS])
parms->iph.tos = nla_get_u8(data[IFLA_GRE_TOS]);
if (!data[IFLA_GRE_PMTUDISC] || nla_get_u8(data[IFLA_GRE_PMTUDISC]))
parms->iph.frag_off = htons(IP_DF);
}
static int ipgre_tap_init(struct net_device *dev)
{
struct ip_tunnel *tunnel;
tunnel = netdev_priv(dev);
tunnel->dev = dev;
strcpy(tunnel->parms.name, dev->name);
ipgre_tunnel_bind_dev(dev);
return 0;
}
static const struct net_device_ops ipgre_tap_netdev_ops = {
.ndo_init = ipgre_tap_init,
.ndo_uninit = ipgre_tunnel_uninit,
.ndo_start_xmit = ipgre_tunnel_xmit,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
.ndo_change_mtu = ipgre_tunnel_change_mtu,
};
static void ipgre_tap_setup(struct net_device *dev)
{
ether_setup(dev);
dev->netdev_ops = &ipgre_tap_netdev_ops;
dev->destructor = free_netdev;
dev->iflink = 0;
dev->features |= NETIF_F_NETNS_LOCAL;
}
static int ipgre_newlink(struct net_device *dev, struct nlattr *tb[],
struct nlattr *data[])
{
struct ip_tunnel *nt;
struct net *net = dev_net(dev);
struct ipgre_net *ign = net_generic(net, ipgre_net_id);
int mtu;
int err;
nt = netdev_priv(dev);
ipgre_netlink_parms(data, &nt->parms);
if (ipgre_tunnel_find(net, &nt->parms, dev->type))
return -EEXIST;
if (dev->type == ARPHRD_ETHER && !tb[IFLA_ADDRESS])
random_ether_addr(dev->dev_addr);
mtu = ipgre_tunnel_bind_dev(dev);
if (!tb[IFLA_MTU])
dev->mtu = mtu;
err = register_netdevice(dev);
if (err)
goto out;
dev_hold(dev);
ipgre_tunnel_link(ign, nt);
out:
return err;
}
static int ipgre_changelink(struct net_device *dev, struct nlattr *tb[],
struct nlattr *data[])
{
struct ip_tunnel *t, *nt;
struct net *net = dev_net(dev);
struct ipgre_net *ign = net_generic(net, ipgre_net_id);
struct ip_tunnel_parm p;
int mtu;
if (dev == ign->fb_tunnel_dev)
return -EINVAL;
nt = netdev_priv(dev);
ipgre_netlink_parms(data, &p);
t = ipgre_tunnel_locate(net, &p, 0);
if (t) {
if (t->dev != dev)
return -EEXIST;
} else {
t = nt;
if (dev->type != ARPHRD_ETHER) {
unsigned nflags = 0;
if (ipv4_is_multicast(p.iph.daddr))
nflags = IFF_BROADCAST;
else if (p.iph.daddr)
nflags = IFF_POINTOPOINT;
if ((dev->flags ^ nflags) &
(IFF_POINTOPOINT | IFF_BROADCAST))
return -EINVAL;
}
ipgre_tunnel_unlink(ign, t);
t->parms.iph.saddr = p.iph.saddr;
t->parms.iph.daddr = p.iph.daddr;
t->parms.i_key = p.i_key;
if (dev->type != ARPHRD_ETHER) {
memcpy(dev->dev_addr, &p.iph.saddr, 4);
memcpy(dev->broadcast, &p.iph.daddr, 4);
}
ipgre_tunnel_link(ign, t);
netdev_state_change(dev);
}
t->parms.o_key = p.o_key;
t->parms.iph.ttl = p.iph.ttl;
t->parms.iph.tos = p.iph.tos;
t->parms.iph.frag_off = p.iph.frag_off;
if (t->parms.link != p.link) {
t->parms.link = p.link;
mtu = ipgre_tunnel_bind_dev(dev);
if (!tb[IFLA_MTU])
dev->mtu = mtu;
netdev_state_change(dev);
}
return 0;
}
static size_t ipgre_get_size(const struct net_device *dev)
{
return
/* IFLA_GRE_LINK */
nla_total_size(4) +
/* IFLA_GRE_IFLAGS */
nla_total_size(2) +
/* IFLA_GRE_OFLAGS */
nla_total_size(2) +
/* IFLA_GRE_IKEY */
nla_total_size(4) +
/* IFLA_GRE_OKEY */
nla_total_size(4) +
/* IFLA_GRE_LOCAL */
nla_total_size(4) +
/* IFLA_GRE_REMOTE */
nla_total_size(4) +
/* IFLA_GRE_TTL */
nla_total_size(1) +
/* IFLA_GRE_TOS */
nla_total_size(1) +
/* IFLA_GRE_PMTUDISC */
nla_total_size(1) +
0;
}
static int ipgre_fill_info(struct sk_buff *skb, const struct net_device *dev)
{
struct ip_tunnel *t = netdev_priv(dev);
struct ip_tunnel_parm *p = &t->parms;
NLA_PUT_U32(skb, IFLA_GRE_LINK, p->link);
NLA_PUT_BE16(skb, IFLA_GRE_IFLAGS, p->i_flags);
NLA_PUT_BE16(skb, IFLA_GRE_OFLAGS, p->o_flags);
NLA_PUT_BE32(skb, IFLA_GRE_IKEY, p->i_key);
NLA_PUT_BE32(skb, IFLA_GRE_OKEY, p->o_key);
NLA_PUT_BE32(skb, IFLA_GRE_LOCAL, p->iph.saddr);
NLA_PUT_BE32(skb, IFLA_GRE_REMOTE, p->iph.daddr);
NLA_PUT_U8(skb, IFLA_GRE_TTL, p->iph.ttl);
NLA_PUT_U8(skb, IFLA_GRE_TOS, p->iph.tos);
NLA_PUT_U8(skb, IFLA_GRE_PMTUDISC, !!(p->iph.frag_off & htons(IP_DF)));
return 0;
nla_put_failure:
return -EMSGSIZE;
}
static const struct nla_policy ipgre_policy[IFLA_GRE_MAX + 1] = {
[IFLA_GRE_LINK] = { .type = NLA_U32 },
[IFLA_GRE_IFLAGS] = { .type = NLA_U16 },
[IFLA_GRE_OFLAGS] = { .type = NLA_U16 },
[IFLA_GRE_IKEY] = { .type = NLA_U32 },
[IFLA_GRE_OKEY] = { .type = NLA_U32 },
[IFLA_GRE_LOCAL] = { .len = FIELD_SIZEOF(struct iphdr, saddr) },
[IFLA_GRE_REMOTE] = { .len = FIELD_SIZEOF(struct iphdr, daddr) },
[IFLA_GRE_TTL] = { .type = NLA_U8 },
[IFLA_GRE_TOS] = { .type = NLA_U8 },
[IFLA_GRE_PMTUDISC] = { .type = NLA_U8 },
};
static struct rtnl_link_ops ipgre_link_ops __read_mostly = {
.kind = "gre",
.maxtype = IFLA_GRE_MAX,
.policy = ipgre_policy,
.priv_size = sizeof(struct ip_tunnel),
.setup = ipgre_tunnel_setup,
.validate = ipgre_tunnel_validate,
.newlink = ipgre_newlink,
.changelink = ipgre_changelink,
.get_size = ipgre_get_size,
.fill_info = ipgre_fill_info,
};
static struct rtnl_link_ops ipgre_tap_ops __read_mostly = {
.kind = "gretap",
.maxtype = IFLA_GRE_MAX,
.policy = ipgre_policy,
.priv_size = sizeof(struct ip_tunnel),
.setup = ipgre_tap_setup,
.validate = ipgre_tap_validate,
.newlink = ipgre_newlink,
.changelink = ipgre_changelink,
.get_size = ipgre_get_size,
.fill_info = ipgre_fill_info,
};
/*
* And now the modules code and kernel interface.
*/
static int __init ipgre_init(void)
{
int err;
printk(KERN_INFO "GRE over IPv4 tunneling driver\n");
if (inet_add_protocol(&ipgre_protocol, IPPROTO_GRE) < 0) {
printk(KERN_INFO "ipgre init: can't add protocol\n");
return -EAGAIN;
}
err = register_pernet_gen_device(&ipgre_net_id, &ipgre_net_ops);
if (err < 0)
goto gen_device_failed;
err = rtnl_link_register(&ipgre_link_ops);
if (err < 0)
goto rtnl_link_failed;
err = rtnl_link_register(&ipgre_tap_ops);
if (err < 0)
goto tap_ops_failed;
out:
return err;
tap_ops_failed:
rtnl_link_unregister(&ipgre_link_ops);
rtnl_link_failed:
unregister_pernet_gen_device(ipgre_net_id, &ipgre_net_ops);
gen_device_failed:
inet_del_protocol(&ipgre_protocol, IPPROTO_GRE);
goto out;
}
static void __exit ipgre_fini(void)
{
rtnl_link_unregister(&ipgre_tap_ops);
rtnl_link_unregister(&ipgre_link_ops);
unregister_pernet_gen_device(ipgre_net_id, &ipgre_net_ops);
if (inet_del_protocol(&ipgre_protocol, IPPROTO_GRE) < 0)
printk(KERN_INFO "ipgre close: can't remove protocol\n");
}
module_init(ipgre_init);
module_exit(ipgre_fini);
MODULE_LICENSE("GPL");
MODULE_ALIAS_RTNL_LINK("gre");
MODULE_ALIAS_RTNL_LINK("gretap");
| gpl-2.0 |
nikitines/zte-kernel-roamer2 | kernel/cpuset.c | 343 | 75848 | /*
* kernel/cpuset.c
*
* Processor and Memory placement constraints for sets of tasks.
*
* Copyright (C) 2003 BULL SA.
* Copyright (C) 2004-2007 Silicon Graphics, Inc.
* Copyright (C) 2006 Google, Inc
*
* Portions derived from Patrick Mochel's sysfs code.
* sysfs is Copyright (c) 2001-3 Patrick Mochel
*
* 2003-10-10 Written by Simon Derr.
* 2003-10-22 Updates by Stephen Hemminger.
* 2004 May-July Rework by Paul Jackson.
* 2006 Rework by Paul Menage to use generic cgroups
* 2008 Rework of the scheduler domains and CPU hotplug handling
* by Max Krasnyansky
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file COPYING in the main directory of the Linux
* distribution for more details.
*/
#include <linux/cpu.h>
#include <linux/cpumask.h>
#include <linux/cpuset.h>
#include <linux/err.h>
#include <linux/errno.h>
#include <linux/file.h>
#include <linux/fs.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/kmod.h>
#include <linux/list.h>
#include <linux/mempolicy.h>
#include <linux/mm.h>
#include <linux/memory.h>
#include <linux/module.h>
#include <linux/mount.h>
#include <linux/namei.h>
#include <linux/pagemap.h>
#include <linux/proc_fs.h>
#include <linux/rcupdate.h>
#include <linux/sched.h>
#include <linux/seq_file.h>
#include <linux/security.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/stat.h>
#include <linux/string.h>
#include <linux/time.h>
#include <linux/backing-dev.h>
#include <linux/sort.h>
#include <asm/uaccess.h>
#include <asm/atomic.h>
#include <linux/mutex.h>
#include <linux/workqueue.h>
#include <linux/cgroup.h>
/*
* Workqueue for cpuset related tasks.
*
* Using kevent workqueue may cause deadlock when memory_migrate
* is set. So we create a separate workqueue thread for cpuset.
*/
static struct workqueue_struct *cpuset_wq;
/*
* Tracks how many cpusets are currently defined in system.
* When there is only one cpuset (the root cpuset) we can
* short circuit some hooks.
*/
int number_of_cpusets __read_mostly;
/* Forward declare cgroup structures */
struct cgroup_subsys cpuset_subsys;
struct cpuset;
/* See "Frequency meter" comments, below. */
struct fmeter {
int cnt; /* unprocessed events count */
int val; /* most recent output value */
time_t time; /* clock (secs) when val computed */
spinlock_t lock; /* guards read or write of above */
};
struct cpuset {
struct cgroup_subsys_state css;
unsigned long flags; /* "unsigned long" so bitops work */
cpumask_var_t cpus_allowed; /* CPUs allowed to tasks in cpuset */
nodemask_t mems_allowed; /* Memory Nodes allowed to tasks */
struct cpuset *parent; /* my parent */
struct fmeter fmeter; /* memory_pressure filter */
/* partition number for rebuild_sched_domains() */
int pn;
/* for custom sched domain */
int relax_domain_level;
/* used for walking a cpuset hierarchy */
struct list_head stack_list;
};
/* Retrieve the cpuset for a cgroup */
static inline struct cpuset *cgroup_cs(struct cgroup *cont)
{
return container_of(cgroup_subsys_state(cont, cpuset_subsys_id),
struct cpuset, css);
}
/* Retrieve the cpuset for a task */
static inline struct cpuset *task_cs(struct task_struct *task)
{
return container_of(task_subsys_state(task, cpuset_subsys_id),
struct cpuset, css);
}
/* bits in struct cpuset flags field */
typedef enum {
CS_CPU_EXCLUSIVE,
CS_MEM_EXCLUSIVE,
CS_MEM_HARDWALL,
CS_MEMORY_MIGRATE,
CS_SCHED_LOAD_BALANCE,
CS_SPREAD_PAGE,
CS_SPREAD_SLAB,
} cpuset_flagbits_t;
/* convenient tests for these bits */
static inline int is_cpu_exclusive(const struct cpuset *cs)
{
return test_bit(CS_CPU_EXCLUSIVE, &cs->flags);
}
static inline int is_mem_exclusive(const struct cpuset *cs)
{
return test_bit(CS_MEM_EXCLUSIVE, &cs->flags);
}
static inline int is_mem_hardwall(const struct cpuset *cs)
{
return test_bit(CS_MEM_HARDWALL, &cs->flags);
}
static inline int is_sched_load_balance(const struct cpuset *cs)
{
return test_bit(CS_SCHED_LOAD_BALANCE, &cs->flags);
}
static inline int is_memory_migrate(const struct cpuset *cs)
{
return test_bit(CS_MEMORY_MIGRATE, &cs->flags);
}
static inline int is_spread_page(const struct cpuset *cs)
{
return test_bit(CS_SPREAD_PAGE, &cs->flags);
}
static inline int is_spread_slab(const struct cpuset *cs)
{
return test_bit(CS_SPREAD_SLAB, &cs->flags);
}
static struct cpuset top_cpuset = {
.flags = ((1 << CS_CPU_EXCLUSIVE) | (1 << CS_MEM_EXCLUSIVE)),
};
/*
* There are two global mutexes guarding cpuset structures. The first
* is the main control groups cgroup_mutex, accessed via
* cgroup_lock()/cgroup_unlock(). The second is the cpuset-specific
* callback_mutex, below. They can nest. It is ok to first take
* cgroup_mutex, then nest callback_mutex. We also require taking
* task_lock() when dereferencing a task's cpuset pointer. See "The
* task_lock() exception", at the end of this comment.
*
* A task must hold both mutexes to modify cpusets. If a task
* holds cgroup_mutex, then it blocks others wanting that mutex,
* ensuring that it is the only task able to also acquire callback_mutex
* and be able to modify cpusets. It can perform various checks on
* the cpuset structure first, knowing nothing will change. It can
* also allocate memory while just holding cgroup_mutex. While it is
* performing these checks, various callback routines can briefly
* acquire callback_mutex to query cpusets. Once it is ready to make
* the changes, it takes callback_mutex, blocking everyone else.
*
* Calls to the kernel memory allocator can not be made while holding
* callback_mutex, as that would risk double tripping on callback_mutex
* from one of the callbacks into the cpuset code from within
* __alloc_pages().
*
* If a task is only holding callback_mutex, then it has read-only
* access to cpusets.
*
* Now, the task_struct fields mems_allowed and mempolicy may be changed
* by other task, we use alloc_lock in the task_struct fields to protect
* them.
*
* The cpuset_common_file_read() handlers only hold callback_mutex across
* small pieces of code, such as when reading out possibly multi-word
* cpumasks and nodemasks.
*
* Accessing a task's cpuset should be done in accordance with the
* guidelines for accessing subsystem state in kernel/cgroup.c
*/
static DEFINE_MUTEX(callback_mutex);
/*
* cpuset_buffer_lock protects both the cpuset_name and cpuset_nodelist
* buffers. They are statically allocated to prevent using excess stack
* when calling cpuset_print_task_mems_allowed().
*/
#define CPUSET_NAME_LEN (128)
#define CPUSET_NODELIST_LEN (256)
static char cpuset_name[CPUSET_NAME_LEN];
static char cpuset_nodelist[CPUSET_NODELIST_LEN];
static DEFINE_SPINLOCK(cpuset_buffer_lock);
/*
* This is ugly, but preserves the userspace API for existing cpuset
* users. If someone tries to mount the "cpuset" filesystem, we
* silently switch it to mount "cgroup" instead
*/
static struct dentry *cpuset_mount(struct file_system_type *fs_type,
int flags, const char *unused_dev_name, void *data)
{
struct file_system_type *cgroup_fs = get_fs_type("cgroup");
struct dentry *ret = ERR_PTR(-ENODEV);
if (cgroup_fs) {
char mountopts[] =
"cpuset,noprefix,"
"release_agent=/sbin/cpuset_release_agent";
ret = cgroup_fs->mount(cgroup_fs, flags,
unused_dev_name, mountopts);
put_filesystem(cgroup_fs);
}
return ret;
}
static struct file_system_type cpuset_fs_type = {
.name = "cpuset",
.mount = cpuset_mount,
};
/*
* Return in pmask the portion of a cpusets's cpus_allowed that
* are online. If none are online, walk up the cpuset hierarchy
* until we find one that does have some online cpus. If we get
* all the way to the top and still haven't found any online cpus,
* return cpu_online_map. Or if passed a NULL cs from an exit'ing
* task, return cpu_online_map.
*
* One way or another, we guarantee to return some non-empty subset
* of cpu_online_map.
*
* Call with callback_mutex held.
*/
static void guarantee_online_cpus(const struct cpuset *cs,
struct cpumask *pmask)
{
while (cs && !cpumask_intersects(cs->cpus_allowed, cpu_online_mask))
cs = cs->parent;
if (cs)
cpumask_and(pmask, cs->cpus_allowed, cpu_online_mask);
else
cpumask_copy(pmask, cpu_online_mask);
BUG_ON(!cpumask_intersects(pmask, cpu_online_mask));
}
/*
* Return in *pmask the portion of a cpusets's mems_allowed that
* are online, with memory. If none are online with memory, walk
* up the cpuset hierarchy until we find one that does have some
* online mems. If we get all the way to the top and still haven't
* found any online mems, return node_states[N_HIGH_MEMORY].
*
* One way or another, we guarantee to return some non-empty subset
* of node_states[N_HIGH_MEMORY].
*
* Call with callback_mutex held.
*/
static void guarantee_online_mems(const struct cpuset *cs, nodemask_t *pmask)
{
while (cs && !nodes_intersects(cs->mems_allowed,
node_states[N_HIGH_MEMORY]))
cs = cs->parent;
if (cs)
nodes_and(*pmask, cs->mems_allowed,
node_states[N_HIGH_MEMORY]);
else
*pmask = node_states[N_HIGH_MEMORY];
BUG_ON(!nodes_intersects(*pmask, node_states[N_HIGH_MEMORY]));
}
/*
* update task's spread flag if cpuset's page/slab spread flag is set
*
* Called with callback_mutex/cgroup_mutex held
*/
static void cpuset_update_task_spread_flag(struct cpuset *cs,
struct task_struct *tsk)
{
if (is_spread_page(cs))
tsk->flags |= PF_SPREAD_PAGE;
else
tsk->flags &= ~PF_SPREAD_PAGE;
if (is_spread_slab(cs))
tsk->flags |= PF_SPREAD_SLAB;
else
tsk->flags &= ~PF_SPREAD_SLAB;
}
/*
* is_cpuset_subset(p, q) - Is cpuset p a subset of cpuset q?
*
* One cpuset is a subset of another if all its allowed CPUs and
* Memory Nodes are a subset of the other, and its exclusive flags
* are only set if the other's are set. Call holding cgroup_mutex.
*/
static int is_cpuset_subset(const struct cpuset *p, const struct cpuset *q)
{
return cpumask_subset(p->cpus_allowed, q->cpus_allowed) &&
nodes_subset(p->mems_allowed, q->mems_allowed) &&
is_cpu_exclusive(p) <= is_cpu_exclusive(q) &&
is_mem_exclusive(p) <= is_mem_exclusive(q);
}
/**
* alloc_trial_cpuset - allocate a trial cpuset
* @cs: the cpuset that the trial cpuset duplicates
*/
static struct cpuset *alloc_trial_cpuset(const struct cpuset *cs)
{
struct cpuset *trial;
trial = kmemdup(cs, sizeof(*cs), GFP_KERNEL);
if (!trial)
return NULL;
if (!alloc_cpumask_var(&trial->cpus_allowed, GFP_KERNEL)) {
kfree(trial);
return NULL;
}
cpumask_copy(trial->cpus_allowed, cs->cpus_allowed);
return trial;
}
/**
* free_trial_cpuset - free the trial cpuset
* @trial: the trial cpuset to be freed
*/
static void free_trial_cpuset(struct cpuset *trial)
{
free_cpumask_var(trial->cpus_allowed);
kfree(trial);
}
/*
* validate_change() - Used to validate that any proposed cpuset change
* follows the structural rules for cpusets.
*
* If we replaced the flag and mask values of the current cpuset
* (cur) with those values in the trial cpuset (trial), would
* our various subset and exclusive rules still be valid? Presumes
* cgroup_mutex held.
*
* 'cur' is the address of an actual, in-use cpuset. Operations
* such as list traversal that depend on the actual address of the
* cpuset in the list must use cur below, not trial.
*
* 'trial' is the address of bulk structure copy of cur, with
* perhaps one or more of the fields cpus_allowed, mems_allowed,
* or flags changed to new, trial values.
*
* Return 0 if valid, -errno if not.
*/
static int validate_change(const struct cpuset *cur, const struct cpuset *trial)
{
struct cgroup *cont;
struct cpuset *c, *par;
/* Each of our child cpusets must be a subset of us */
list_for_each_entry(cont, &cur->css.cgroup->children, sibling) {
if (!is_cpuset_subset(cgroup_cs(cont), trial))
return -EBUSY;
}
/* Remaining checks don't apply to root cpuset */
if (cur == &top_cpuset)
return 0;
par = cur->parent;
/* We must be a subset of our parent cpuset */
if (!is_cpuset_subset(trial, par))
return -EACCES;
/*
* If either I or some sibling (!= me) is exclusive, we can't
* overlap
*/
list_for_each_entry(cont, &par->css.cgroup->children, sibling) {
c = cgroup_cs(cont);
if ((is_cpu_exclusive(trial) || is_cpu_exclusive(c)) &&
c != cur &&
cpumask_intersects(trial->cpus_allowed, c->cpus_allowed))
return -EINVAL;
if ((is_mem_exclusive(trial) || is_mem_exclusive(c)) &&
c != cur &&
nodes_intersects(trial->mems_allowed, c->mems_allowed))
return -EINVAL;
}
/* Cpusets with tasks can't have empty cpus_allowed or mems_allowed */
if (cgroup_task_count(cur->css.cgroup)) {
if (cpumask_empty(trial->cpus_allowed) ||
nodes_empty(trial->mems_allowed)) {
return -ENOSPC;
}
}
return 0;
}
#ifdef CONFIG_SMP
/*
* Helper routine for generate_sched_domains().
* Do cpusets a, b have overlapping cpus_allowed masks?
*/
static int cpusets_overlap(struct cpuset *a, struct cpuset *b)
{
return cpumask_intersects(a->cpus_allowed, b->cpus_allowed);
}
static void
update_domain_attr(struct sched_domain_attr *dattr, struct cpuset *c)
{
if (dattr->relax_domain_level < c->relax_domain_level)
dattr->relax_domain_level = c->relax_domain_level;
return;
}
static void
update_domain_attr_tree(struct sched_domain_attr *dattr, struct cpuset *c)
{
LIST_HEAD(q);
list_add(&c->stack_list, &q);
while (!list_empty(&q)) {
struct cpuset *cp;
struct cgroup *cont;
struct cpuset *child;
cp = list_first_entry(&q, struct cpuset, stack_list);
list_del(q.next);
if (cpumask_empty(cp->cpus_allowed))
continue;
if (is_sched_load_balance(cp))
update_domain_attr(dattr, cp);
list_for_each_entry(cont, &cp->css.cgroup->children, sibling) {
child = cgroup_cs(cont);
list_add_tail(&child->stack_list, &q);
}
}
}
/*
* generate_sched_domains()
*
* This function builds a partial partition of the systems CPUs
* A 'partial partition' is a set of non-overlapping subsets whose
* union is a subset of that set.
* The output of this function needs to be passed to kernel/sched.c
* partition_sched_domains() routine, which will rebuild the scheduler's
* load balancing domains (sched domains) as specified by that partial
* partition.
*
* See "What is sched_load_balance" in Documentation/cgroups/cpusets.txt
* for a background explanation of this.
*
* Does not return errors, on the theory that the callers of this
* routine would rather not worry about failures to rebuild sched
* domains when operating in the severe memory shortage situations
* that could cause allocation failures below.
*
* Must be called with cgroup_lock held.
*
* The three key local variables below are:
* q - a linked-list queue of cpuset pointers, used to implement a
* top-down scan of all cpusets. This scan loads a pointer
* to each cpuset marked is_sched_load_balance into the
* array 'csa'. For our purposes, rebuilding the schedulers
* sched domains, we can ignore !is_sched_load_balance cpusets.
* csa - (for CpuSet Array) Array of pointers to all the cpusets
* that need to be load balanced, for convenient iterative
* access by the subsequent code that finds the best partition,
* i.e the set of domains (subsets) of CPUs such that the
* cpus_allowed of every cpuset marked is_sched_load_balance
* is a subset of one of these domains, while there are as
* many such domains as possible, each as small as possible.
* doms - Conversion of 'csa' to an array of cpumasks, for passing to
* the kernel/sched.c routine partition_sched_domains() in a
* convenient format, that can be easily compared to the prior
* value to determine what partition elements (sched domains)
* were changed (added or removed.)
*
* Finding the best partition (set of domains):
* The triple nested loops below over i, j, k scan over the
* load balanced cpusets (using the array of cpuset pointers in
* csa[]) looking for pairs of cpusets that have overlapping
* cpus_allowed, but which don't have the same 'pn' partition
* number and gives them in the same partition number. It keeps
* looping on the 'restart' label until it can no longer find
* any such pairs.
*
* The union of the cpus_allowed masks from the set of
* all cpusets having the same 'pn' value then form the one
* element of the partition (one sched domain) to be passed to
* partition_sched_domains().
*/
static int generate_sched_domains(cpumask_var_t **domains,
struct sched_domain_attr **attributes)
{
LIST_HEAD(q); /* queue of cpusets to be scanned */
struct cpuset *cp; /* scans q */
struct cpuset **csa; /* array of all cpuset ptrs */
int csn; /* how many cpuset ptrs in csa so far */
int i, j, k; /* indices for partition finding loops */
cpumask_var_t *doms; /* resulting partition; i.e. sched domains */
struct sched_domain_attr *dattr; /* attributes for custom domains */
int ndoms = 0; /* number of sched domains in result */
int nslot; /* next empty doms[] struct cpumask slot */
doms = NULL;
dattr = NULL;
csa = NULL;
/* Special case for the 99% of systems with one, full, sched domain */
if (is_sched_load_balance(&top_cpuset)) {
ndoms = 1;
doms = alloc_sched_domains(ndoms);
if (!doms)
goto done;
dattr = kmalloc(sizeof(struct sched_domain_attr), GFP_KERNEL);
if (dattr) {
*dattr = SD_ATTR_INIT;
update_domain_attr_tree(dattr, &top_cpuset);
}
cpumask_copy(doms[0], top_cpuset.cpus_allowed);
goto done;
}
csa = kmalloc(number_of_cpusets * sizeof(cp), GFP_KERNEL);
if (!csa)
goto done;
csn = 0;
list_add(&top_cpuset.stack_list, &q);
while (!list_empty(&q)) {
struct cgroup *cont;
struct cpuset *child; /* scans child cpusets of cp */
cp = list_first_entry(&q, struct cpuset, stack_list);
list_del(q.next);
if (cpumask_empty(cp->cpus_allowed))
continue;
/*
* All child cpusets contain a subset of the parent's cpus, so
* just skip them, and then we call update_domain_attr_tree()
* to calc relax_domain_level of the corresponding sched
* domain.
*/
if (is_sched_load_balance(cp)) {
csa[csn++] = cp;
continue;
}
list_for_each_entry(cont, &cp->css.cgroup->children, sibling) {
child = cgroup_cs(cont);
list_add_tail(&child->stack_list, &q);
}
}
for (i = 0; i < csn; i++)
csa[i]->pn = i;
ndoms = csn;
restart:
/* Find the best partition (set of sched domains) */
for (i = 0; i < csn; i++) {
struct cpuset *a = csa[i];
int apn = a->pn;
for (j = 0; j < csn; j++) {
struct cpuset *b = csa[j];
int bpn = b->pn;
if (apn != bpn && cpusets_overlap(a, b)) {
for (k = 0; k < csn; k++) {
struct cpuset *c = csa[k];
if (c->pn == bpn)
c->pn = apn;
}
ndoms--; /* one less element */
goto restart;
}
}
}
/*
* Now we know how many domains to create.
* Convert <csn, csa> to <ndoms, doms> and populate cpu masks.
*/
doms = alloc_sched_domains(ndoms);
if (!doms)
goto done;
/*
* The rest of the code, including the scheduler, can deal with
* dattr==NULL case. No need to abort if alloc fails.
*/
dattr = kmalloc(ndoms * sizeof(struct sched_domain_attr), GFP_KERNEL);
for (nslot = 0, i = 0; i < csn; i++) {
struct cpuset *a = csa[i];
struct cpumask *dp;
int apn = a->pn;
if (apn < 0) {
/* Skip completed partitions */
continue;
}
dp = doms[nslot];
if (nslot == ndoms) {
static int warnings = 10;
if (warnings) {
printk(KERN_WARNING
"rebuild_sched_domains confused:"
" nslot %d, ndoms %d, csn %d, i %d,"
" apn %d\n",
nslot, ndoms, csn, i, apn);
warnings--;
}
continue;
}
cpumask_clear(dp);
if (dattr)
*(dattr + nslot) = SD_ATTR_INIT;
for (j = i; j < csn; j++) {
struct cpuset *b = csa[j];
if (apn == b->pn) {
cpumask_or(dp, dp, b->cpus_allowed);
if (dattr)
update_domain_attr_tree(dattr + nslot, b);
/* Done with this partition */
b->pn = -1;
}
}
nslot++;
}
BUG_ON(nslot != ndoms);
done:
kfree(csa);
/*
* Fallback to the default domain if kmalloc() failed.
* See comments in partition_sched_domains().
*/
if (doms == NULL)
ndoms = 1;
*domains = doms;
*attributes = dattr;
return ndoms;
}
/*
* Rebuild scheduler domains.
*
* Call with neither cgroup_mutex held nor within get_online_cpus().
* Takes both cgroup_mutex and get_online_cpus().
*
* Cannot be directly called from cpuset code handling changes
* to the cpuset pseudo-filesystem, because it cannot be called
* from code that already holds cgroup_mutex.
*/
static void do_rebuild_sched_domains(struct work_struct *unused)
{
struct sched_domain_attr *attr;
cpumask_var_t *doms;
int ndoms;
get_online_cpus();
/* Generate domain masks and attrs */
cgroup_lock();
ndoms = generate_sched_domains(&doms, &attr);
cgroup_unlock();
/* Have scheduler rebuild the domains */
partition_sched_domains(ndoms, doms, attr);
put_online_cpus();
}
#else /* !CONFIG_SMP */
static void do_rebuild_sched_domains(struct work_struct *unused)
{
}
static int generate_sched_domains(cpumask_var_t **domains,
struct sched_domain_attr **attributes)
{
*domains = NULL;
return 1;
}
#endif /* CONFIG_SMP */
static DECLARE_WORK(rebuild_sched_domains_work, do_rebuild_sched_domains);
/*
* Rebuild scheduler domains, asynchronously via workqueue.
*
* If the flag 'sched_load_balance' of any cpuset with non-empty
* 'cpus' changes, or if the 'cpus' allowed changes in any cpuset
* which has that flag enabled, or if any cpuset with a non-empty
* 'cpus' is removed, then call this routine to rebuild the
* scheduler's dynamic sched domains.
*
* The rebuild_sched_domains() and partition_sched_domains()
* routines must nest cgroup_lock() inside get_online_cpus(),
* but such cpuset changes as these must nest that locking the
* other way, holding cgroup_lock() for much of the code.
*
* So in order to avoid an ABBA deadlock, the cpuset code handling
* these user changes delegates the actual sched domain rebuilding
* to a separate workqueue thread, which ends up processing the
* above do_rebuild_sched_domains() function.
*/
static void async_rebuild_sched_domains(void)
{
queue_work(cpuset_wq, &rebuild_sched_domains_work);
}
/*
* Accomplishes the same scheduler domain rebuild as the above
* async_rebuild_sched_domains(), however it directly calls the
* rebuild routine synchronously rather than calling it via an
* asynchronous work thread.
*
* This can only be called from code that is not holding
* cgroup_mutex (not nested in a cgroup_lock() call.)
*/
void rebuild_sched_domains(void)
{
do_rebuild_sched_domains(NULL);
}
/**
* cpuset_test_cpumask - test a task's cpus_allowed versus its cpuset's
* @tsk: task to test
* @scan: struct cgroup_scanner contained in its struct cpuset_hotplug_scanner
*
* Call with cgroup_mutex held. May take callback_mutex during call.
* Called for each task in a cgroup by cgroup_scan_tasks().
* Return nonzero if this tasks's cpus_allowed mask should be changed (in other
* words, if its mask is not equal to its cpuset's mask).
*/
static int cpuset_test_cpumask(struct task_struct *tsk,
struct cgroup_scanner *scan)
{
return !cpumask_equal(&tsk->cpus_allowed,
(cgroup_cs(scan->cg))->cpus_allowed);
}
/**
* cpuset_change_cpumask - make a task's cpus_allowed the same as its cpuset's
* @tsk: task to test
* @scan: struct cgroup_scanner containing the cgroup of the task
*
* Called by cgroup_scan_tasks() for each task in a cgroup whose
* cpus_allowed mask needs to be changed.
*
* We don't need to re-check for the cgroup/cpuset membership, since we're
* holding cgroup_lock() at this point.
*/
static void cpuset_change_cpumask(struct task_struct *tsk,
struct cgroup_scanner *scan)
{
set_cpus_allowed_ptr(tsk, ((cgroup_cs(scan->cg))->cpus_allowed));
}
/**
* update_tasks_cpumask - Update the cpumasks of tasks in the cpuset.
* @cs: the cpuset in which each task's cpus_allowed mask needs to be changed
* @heap: if NULL, defer allocating heap memory to cgroup_scan_tasks()
*
* Called with cgroup_mutex held
*
* The cgroup_scan_tasks() function will scan all the tasks in a cgroup,
* calling callback functions for each.
*
* No return value. It's guaranteed that cgroup_scan_tasks() always returns 0
* if @heap != NULL.
*/
static void update_tasks_cpumask(struct cpuset *cs, struct ptr_heap *heap)
{
struct cgroup_scanner scan;
scan.cg = cs->css.cgroup;
scan.test_task = cpuset_test_cpumask;
scan.process_task = cpuset_change_cpumask;
scan.heap = heap;
cgroup_scan_tasks(&scan);
}
/**
* update_cpumask - update the cpus_allowed mask of a cpuset and all tasks in it
* @cs: the cpuset to consider
* @buf: buffer of cpu numbers written to this cpuset
*/
static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs,
const char *buf)
{
struct ptr_heap heap;
int retval;
int is_load_balanced;
/* top_cpuset.cpus_allowed tracks cpu_online_map; it's read-only */
if (cs == &top_cpuset)
return -EACCES;
/*
* An empty cpus_allowed is ok only if the cpuset has no tasks.
* Since cpulist_parse() fails on an empty mask, we special case
* that parsing. The validate_change() call ensures that cpusets
* with tasks have cpus.
*/
if (!*buf) {
cpumask_clear(trialcs->cpus_allowed);
} else {
retval = cpulist_parse(buf, trialcs->cpus_allowed);
if (retval < 0)
return retval;
if (!cpumask_subset(trialcs->cpus_allowed, cpu_active_mask))
return -EINVAL;
}
retval = validate_change(cs, trialcs);
if (retval < 0)
return retval;
/* Nothing to do if the cpus didn't change */
if (cpumask_equal(cs->cpus_allowed, trialcs->cpus_allowed))
return 0;
retval = heap_init(&heap, PAGE_SIZE, GFP_KERNEL, NULL);
if (retval)
return retval;
is_load_balanced = is_sched_load_balance(trialcs);
mutex_lock(&callback_mutex);
cpumask_copy(cs->cpus_allowed, trialcs->cpus_allowed);
mutex_unlock(&callback_mutex);
/*
* Scan tasks in the cpuset, and update the cpumasks of any
* that need an update.
*/
update_tasks_cpumask(cs, &heap);
heap_free(&heap);
if (is_load_balanced)
async_rebuild_sched_domains();
return 0;
}
/*
* cpuset_migrate_mm
*
* Migrate memory region from one set of nodes to another.
*
* Temporarilly set tasks mems_allowed to target nodes of migration,
* so that the migration code can allocate pages on these nodes.
*
* Call holding cgroup_mutex, so current's cpuset won't change
* during this call, as manage_mutex holds off any cpuset_attach()
* calls. Therefore we don't need to take task_lock around the
* call to guarantee_online_mems(), as we know no one is changing
* our task's cpuset.
*
* While the mm_struct we are migrating is typically from some
* other task, the task_struct mems_allowed that we are hacking
* is for our current task, which must allocate new pages for that
* migrating memory region.
*/
static void cpuset_migrate_mm(struct mm_struct *mm, const nodemask_t *from,
const nodemask_t *to)
{
struct task_struct *tsk = current;
tsk->mems_allowed = *to;
do_migrate_pages(mm, from, to, MPOL_MF_MOVE_ALL);
guarantee_online_mems(task_cs(tsk),&tsk->mems_allowed);
}
/*
* cpuset_change_task_nodemask - change task's mems_allowed and mempolicy
* @tsk: the task to change
* @newmems: new nodes that the task will be set
*
* In order to avoid seeing no nodes if the old and new nodes are disjoint,
* we structure updates as setting all new allowed nodes, then clearing newly
* disallowed ones.
*/
static void cpuset_change_task_nodemask(struct task_struct *tsk,
nodemask_t *newmems)
{
repeat:
/*
* Allow tasks that have access to memory reserves because they have
* been OOM killed to get memory anywhere.
*/
if (unlikely(test_thread_flag(TIF_MEMDIE)))
return;
if (current->flags & PF_EXITING) /* Let dying task have memory */
return;
task_lock(tsk);
nodes_or(tsk->mems_allowed, tsk->mems_allowed, *newmems);
mpol_rebind_task(tsk, newmems, MPOL_REBIND_STEP1);
/*
* ensure checking ->mems_allowed_change_disable after setting all new
* allowed nodes.
*
* the read-side task can see an nodemask with new allowed nodes and
* old allowed nodes. and if it allocates page when cpuset clears newly
* disallowed ones continuous, it can see the new allowed bits.
*
* And if setting all new allowed nodes is after the checking, setting
* all new allowed nodes and clearing newly disallowed ones will be done
* continuous, and the read-side task may find no node to alloc page.
*/
smp_mb();
/*
* Allocation of memory is very fast, we needn't sleep when waiting
* for the read-side.
*/
while (ACCESS_ONCE(tsk->mems_allowed_change_disable)) {
task_unlock(tsk);
if (!task_curr(tsk))
yield();
goto repeat;
}
/*
* ensure checking ->mems_allowed_change_disable before clearing all new
* disallowed nodes.
*
* if clearing newly disallowed bits before the checking, the read-side
* task may find no node to alloc page.
*/
smp_mb();
mpol_rebind_task(tsk, newmems, MPOL_REBIND_STEP2);
tsk->mems_allowed = *newmems;
task_unlock(tsk);
}
/*
* Update task's mems_allowed and rebind its mempolicy and vmas' mempolicy
* of it to cpuset's new mems_allowed, and migrate pages to new nodes if
* memory_migrate flag is set. Called with cgroup_mutex held.
*/
static void cpuset_change_nodemask(struct task_struct *p,
struct cgroup_scanner *scan)
{
struct mm_struct *mm;
struct cpuset *cs;
int migrate;
const nodemask_t *oldmem = scan->data;
static nodemask_t newmems; /* protected by cgroup_mutex */
cs = cgroup_cs(scan->cg);
guarantee_online_mems(cs, &newmems);
cpuset_change_task_nodemask(p, &newmems);
mm = get_task_mm(p);
if (!mm)
return;
migrate = is_memory_migrate(cs);
mpol_rebind_mm(mm, &cs->mems_allowed);
if (migrate)
cpuset_migrate_mm(mm, oldmem, &cs->mems_allowed);
mmput(mm);
}
static void *cpuset_being_rebound;
/**
* update_tasks_nodemask - Update the nodemasks of tasks in the cpuset.
* @cs: the cpuset in which each task's mems_allowed mask needs to be changed
* @oldmem: old mems_allowed of cpuset cs
* @heap: if NULL, defer allocating heap memory to cgroup_scan_tasks()
*
* Called with cgroup_mutex held
* No return value. It's guaranteed that cgroup_scan_tasks() always returns 0
* if @heap != NULL.
*/
static void update_tasks_nodemask(struct cpuset *cs, const nodemask_t *oldmem,
struct ptr_heap *heap)
{
struct cgroup_scanner scan;
cpuset_being_rebound = cs; /* causes mpol_dup() rebind */
scan.cg = cs->css.cgroup;
scan.test_task = NULL;
scan.process_task = cpuset_change_nodemask;
scan.heap = heap;
scan.data = (nodemask_t *)oldmem;
/*
* The mpol_rebind_mm() call takes mmap_sem, which we couldn't
* take while holding tasklist_lock. Forks can happen - the
* mpol_dup() cpuset_being_rebound check will catch such forks,
* and rebind their vma mempolicies too. Because we still hold
* the global cgroup_mutex, we know that no other rebind effort
* will be contending for the global variable cpuset_being_rebound.
* It's ok if we rebind the same mm twice; mpol_rebind_mm()
* is idempotent. Also migrate pages in each mm to new nodes.
*/
cgroup_scan_tasks(&scan);
/* We're done rebinding vmas to this cpuset's new mems_allowed. */
cpuset_being_rebound = NULL;
}
/*
* Handle user request to change the 'mems' memory placement
* of a cpuset. Needs to validate the request, update the
* cpusets mems_allowed, and for each task in the cpuset,
* update mems_allowed and rebind task's mempolicy and any vma
* mempolicies and if the cpuset is marked 'memory_migrate',
* migrate the tasks pages to the new memory.
*
* Call with cgroup_mutex held. May take callback_mutex during call.
* Will take tasklist_lock, scan tasklist for tasks in cpuset cs,
* lock each such tasks mm->mmap_sem, scan its vma's and rebind
* their mempolicies to the cpusets new mems_allowed.
*/
static int update_nodemask(struct cpuset *cs, struct cpuset *trialcs,
const char *buf)
{
NODEMASK_ALLOC(nodemask_t, oldmem, GFP_KERNEL);
int retval;
struct ptr_heap heap;
if (!oldmem)
return -ENOMEM;
/*
* top_cpuset.mems_allowed tracks node_stats[N_HIGH_MEMORY];
* it's read-only
*/
if (cs == &top_cpuset) {
retval = -EACCES;
goto done;
}
/*
* An empty mems_allowed is ok iff there are no tasks in the cpuset.
* Since nodelist_parse() fails on an empty mask, we special case
* that parsing. The validate_change() call ensures that cpusets
* with tasks have memory.
*/
if (!*buf) {
nodes_clear(trialcs->mems_allowed);
} else {
retval = nodelist_parse(buf, trialcs->mems_allowed);
if (retval < 0)
goto done;
if (!nodes_subset(trialcs->mems_allowed,
node_states[N_HIGH_MEMORY])) {
retval = -EINVAL;
goto done;
}
}
*oldmem = cs->mems_allowed;
if (nodes_equal(*oldmem, trialcs->mems_allowed)) {
retval = 0; /* Too easy - nothing to do */
goto done;
}
retval = validate_change(cs, trialcs);
if (retval < 0)
goto done;
retval = heap_init(&heap, PAGE_SIZE, GFP_KERNEL, NULL);
if (retval < 0)
goto done;
mutex_lock(&callback_mutex);
cs->mems_allowed = trialcs->mems_allowed;
mutex_unlock(&callback_mutex);
update_tasks_nodemask(cs, oldmem, &heap);
heap_free(&heap);
done:
NODEMASK_FREE(oldmem);
return retval;
}
int current_cpuset_is_being_rebound(void)
{
return task_cs(current) == cpuset_being_rebound;
}
static int update_relax_domain_level(struct cpuset *cs, s64 val)
{
#ifdef CONFIG_SMP
if (val < -1 || val >= sched_domain_level_max)
return -EINVAL;
#endif
if (val != cs->relax_domain_level) {
cs->relax_domain_level = val;
if (!cpumask_empty(cs->cpus_allowed) &&
is_sched_load_balance(cs))
async_rebuild_sched_domains();
}
return 0;
}
/*
* cpuset_change_flag - make a task's spread flags the same as its cpuset's
* @tsk: task to be updated
* @scan: struct cgroup_scanner containing the cgroup of the task
*
* Called by cgroup_scan_tasks() for each task in a cgroup.
*
* We don't need to re-check for the cgroup/cpuset membership, since we're
* holding cgroup_lock() at this point.
*/
static void cpuset_change_flag(struct task_struct *tsk,
struct cgroup_scanner *scan)
{
cpuset_update_task_spread_flag(cgroup_cs(scan->cg), tsk);
}
/*
* update_tasks_flags - update the spread flags of tasks in the cpuset.
* @cs: the cpuset in which each task's spread flags needs to be changed
* @heap: if NULL, defer allocating heap memory to cgroup_scan_tasks()
*
* Called with cgroup_mutex held
*
* The cgroup_scan_tasks() function will scan all the tasks in a cgroup,
* calling callback functions for each.
*
* No return value. It's guaranteed that cgroup_scan_tasks() always returns 0
* if @heap != NULL.
*/
static void update_tasks_flags(struct cpuset *cs, struct ptr_heap *heap)
{
struct cgroup_scanner scan;
scan.cg = cs->css.cgroup;
scan.test_task = NULL;
scan.process_task = cpuset_change_flag;
scan.heap = heap;
cgroup_scan_tasks(&scan);
}
/*
* update_flag - read a 0 or a 1 in a file and update associated flag
* bit: the bit to update (see cpuset_flagbits_t)
* cs: the cpuset to update
* turning_on: whether the flag is being set or cleared
*
* Call with cgroup_mutex held.
*/
static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs,
int turning_on)
{
struct cpuset *trialcs;
int balance_flag_changed;
int spread_flag_changed;
struct ptr_heap heap;
int err;
trialcs = alloc_trial_cpuset(cs);
if (!trialcs)
return -ENOMEM;
if (turning_on)
set_bit(bit, &trialcs->flags);
else
clear_bit(bit, &trialcs->flags);
err = validate_change(cs, trialcs);
if (err < 0)
goto out;
err = heap_init(&heap, PAGE_SIZE, GFP_KERNEL, NULL);
if (err < 0)
goto out;
balance_flag_changed = (is_sched_load_balance(cs) !=
is_sched_load_balance(trialcs));
spread_flag_changed = ((is_spread_slab(cs) != is_spread_slab(trialcs))
|| (is_spread_page(cs) != is_spread_page(trialcs)));
mutex_lock(&callback_mutex);
cs->flags = trialcs->flags;
mutex_unlock(&callback_mutex);
if (!cpumask_empty(trialcs->cpus_allowed) && balance_flag_changed)
async_rebuild_sched_domains();
if (spread_flag_changed)
update_tasks_flags(cs, &heap);
heap_free(&heap);
out:
free_trial_cpuset(trialcs);
return err;
}
/*
* Frequency meter - How fast is some event occurring?
*
* These routines manage a digitally filtered, constant time based,
* event frequency meter. There are four routines:
* fmeter_init() - initialize a frequency meter.
* fmeter_markevent() - called each time the event happens.
* fmeter_getrate() - returns the recent rate of such events.
* fmeter_update() - internal routine used to update fmeter.
*
* A common data structure is passed to each of these routines,
* which is used to keep track of the state required to manage the
* frequency meter and its digital filter.
*
* The filter works on the number of events marked per unit time.
* The filter is single-pole low-pass recursive (IIR). The time unit
* is 1 second. Arithmetic is done using 32-bit integers scaled to
* simulate 3 decimal digits of precision (multiplied by 1000).
*
* With an FM_COEF of 933, and a time base of 1 second, the filter
* has a half-life of 10 seconds, meaning that if the events quit
* happening, then the rate returned from the fmeter_getrate()
* will be cut in half each 10 seconds, until it converges to zero.
*
* It is not worth doing a real infinitely recursive filter. If more
* than FM_MAXTICKS ticks have elapsed since the last filter event,
* just compute FM_MAXTICKS ticks worth, by which point the level
* will be stable.
*
* Limit the count of unprocessed events to FM_MAXCNT, so as to avoid
* arithmetic overflow in the fmeter_update() routine.
*
* Given the simple 32 bit integer arithmetic used, this meter works
* best for reporting rates between one per millisecond (msec) and
* one per 32 (approx) seconds. At constant rates faster than one
* per msec it maxes out at values just under 1,000,000. At constant
* rates between one per msec, and one per second it will stabilize
* to a value N*1000, where N is the rate of events per second.
* At constant rates between one per second and one per 32 seconds,
* it will be choppy, moving up on the seconds that have an event,
* and then decaying until the next event. At rates slower than
* about one in 32 seconds, it decays all the way back to zero between
* each event.
*/
#define FM_COEF 933 /* coefficient for half-life of 10 secs */
#define FM_MAXTICKS ((time_t)99) /* useless computing more ticks than this */
#define FM_MAXCNT 1000000 /* limit cnt to avoid overflow */
#define FM_SCALE 1000 /* faux fixed point scale */
/* Initialize a frequency meter */
static void fmeter_init(struct fmeter *fmp)
{
fmp->cnt = 0;
fmp->val = 0;
fmp->time = 0;
spin_lock_init(&fmp->lock);
}
/* Internal meter update - process cnt events and update value */
static void fmeter_update(struct fmeter *fmp)
{
time_t now = get_seconds();
time_t ticks = now - fmp->time;
if (ticks == 0)
return;
ticks = min(FM_MAXTICKS, ticks);
while (ticks-- > 0)
fmp->val = (FM_COEF * fmp->val) / FM_SCALE;
fmp->time = now;
fmp->val += ((FM_SCALE - FM_COEF) * fmp->cnt) / FM_SCALE;
fmp->cnt = 0;
}
/* Process any previous ticks, then bump cnt by one (times scale). */
static void fmeter_markevent(struct fmeter *fmp)
{
spin_lock(&fmp->lock);
fmeter_update(fmp);
fmp->cnt = min(FM_MAXCNT, fmp->cnt + FM_SCALE);
spin_unlock(&fmp->lock);
}
/* Process any previous ticks, then return current value. */
static int fmeter_getrate(struct fmeter *fmp)
{
int val;
spin_lock(&fmp->lock);
fmeter_update(fmp);
val = fmp->val;
spin_unlock(&fmp->lock);
return val;
}
/* Called by cgroups to determine if a cpuset is usable; cgroup_mutex held */
static int cpuset_can_attach(struct cgroup_subsys *ss, struct cgroup *cont,
struct task_struct *tsk)
{
struct cpuset *cs = cgroup_cs(cont);
if ((current != tsk) && (!capable(CAP_SYS_ADMIN))) {
const struct cred *cred = current_cred(), *tcred;
if (cred->euid != tcred->uid && cred->euid != tcred->suid)
return -EPERM;
}
if (cpumask_empty(cs->cpus_allowed) || nodes_empty(cs->mems_allowed))
return -ENOSPC;
/*
* Kthreads bound to specific cpus cannot be moved to a new cpuset; we
* cannot change their cpu affinity and isolating such threads by their
* set of allowed nodes is unnecessary. Thus, cpusets are not
* applicable for such threads. This prevents checking for success of
* set_cpus_allowed_ptr() on all attached tasks before cpus_allowed may
* be changed.
*/
if (tsk->flags & PF_THREAD_BOUND)
return -EINVAL;
return 0;
}
static int cpuset_can_attach_task(struct cgroup *cgrp, struct task_struct *task)
{
return security_task_setscheduler(task);
}
/*
* Protected by cgroup_lock. The nodemasks must be stored globally because
* dynamically allocating them is not allowed in pre_attach, and they must
* persist among pre_attach, attach_task, and attach.
*/
static cpumask_var_t cpus_attach;
static nodemask_t cpuset_attach_nodemask_from;
static nodemask_t cpuset_attach_nodemask_to;
/* Set-up work for before attaching each task. */
static void cpuset_pre_attach(struct cgroup *cont)
{
struct cpuset *cs = cgroup_cs(cont);
if (cs == &top_cpuset)
cpumask_copy(cpus_attach, cpu_possible_mask);
else
guarantee_online_cpus(cs, cpus_attach);
guarantee_online_mems(cs, &cpuset_attach_nodemask_to);
}
/* Per-thread attachment work. */
static void cpuset_attach_task(struct cgroup *cont, struct task_struct *tsk)
{
int err;
struct cpuset *cs = cgroup_cs(cont);
/*
* can_attach beforehand should guarantee that this doesn't fail.
* TODO: have a better way to handle failure here
*/
err = set_cpus_allowed_ptr(tsk, cpus_attach);
WARN_ON_ONCE(err);
cpuset_change_task_nodemask(tsk, &cpuset_attach_nodemask_to);
cpuset_update_task_spread_flag(cs, tsk);
}
static void cpuset_attach(struct cgroup_subsys *ss, struct cgroup *cont,
struct cgroup *oldcont, struct task_struct *tsk)
{
struct mm_struct *mm;
struct cpuset *cs = cgroup_cs(cont);
struct cpuset *oldcs = cgroup_cs(oldcont);
/*
* Change mm, possibly for multiple threads in a threadgroup. This is
* expensive and may sleep.
*/
cpuset_attach_nodemask_from = oldcs->mems_allowed;
cpuset_attach_nodemask_to = cs->mems_allowed;
mm = get_task_mm(tsk);
if (mm) {
mpol_rebind_mm(mm, &cpuset_attach_nodemask_to);
if (is_memory_migrate(cs))
cpuset_migrate_mm(mm, &cpuset_attach_nodemask_from,
&cpuset_attach_nodemask_to);
mmput(mm);
}
}
/* The various types of files and directories in a cpuset file system */
typedef enum {
FILE_MEMORY_MIGRATE,
FILE_CPULIST,
FILE_MEMLIST,
FILE_CPU_EXCLUSIVE,
FILE_MEM_EXCLUSIVE,
FILE_MEM_HARDWALL,
FILE_SCHED_LOAD_BALANCE,
FILE_SCHED_RELAX_DOMAIN_LEVEL,
FILE_MEMORY_PRESSURE_ENABLED,
FILE_MEMORY_PRESSURE,
FILE_SPREAD_PAGE,
FILE_SPREAD_SLAB,
} cpuset_filetype_t;
static int cpuset_write_u64(struct cgroup *cgrp, struct cftype *cft, u64 val)
{
int retval = 0;
struct cpuset *cs = cgroup_cs(cgrp);
cpuset_filetype_t type = cft->private;
if (!cgroup_lock_live_group(cgrp))
return -ENODEV;
switch (type) {
case FILE_CPU_EXCLUSIVE:
retval = update_flag(CS_CPU_EXCLUSIVE, cs, val);
break;
case FILE_MEM_EXCLUSIVE:
retval = update_flag(CS_MEM_EXCLUSIVE, cs, val);
break;
case FILE_MEM_HARDWALL:
retval = update_flag(CS_MEM_HARDWALL, cs, val);
break;
case FILE_SCHED_LOAD_BALANCE:
retval = update_flag(CS_SCHED_LOAD_BALANCE, cs, val);
break;
case FILE_MEMORY_MIGRATE:
retval = update_flag(CS_MEMORY_MIGRATE, cs, val);
break;
case FILE_MEMORY_PRESSURE_ENABLED:
cpuset_memory_pressure_enabled = !!val;
break;
case FILE_MEMORY_PRESSURE:
retval = -EACCES;
break;
case FILE_SPREAD_PAGE:
retval = update_flag(CS_SPREAD_PAGE, cs, val);
break;
case FILE_SPREAD_SLAB:
retval = update_flag(CS_SPREAD_SLAB, cs, val);
break;
default:
retval = -EINVAL;
break;
}
cgroup_unlock();
return retval;
}
static int cpuset_write_s64(struct cgroup *cgrp, struct cftype *cft, s64 val)
{
int retval = 0;
struct cpuset *cs = cgroup_cs(cgrp);
cpuset_filetype_t type = cft->private;
if (!cgroup_lock_live_group(cgrp))
return -ENODEV;
switch (type) {
case FILE_SCHED_RELAX_DOMAIN_LEVEL:
retval = update_relax_domain_level(cs, val);
break;
default:
retval = -EINVAL;
break;
}
cgroup_unlock();
return retval;
}
/*
* Common handling for a write to a "cpus" or "mems" file.
*/
static int cpuset_write_resmask(struct cgroup *cgrp, struct cftype *cft,
const char *buf)
{
int retval = 0;
struct cpuset *cs = cgroup_cs(cgrp);
struct cpuset *trialcs;
if (!cgroup_lock_live_group(cgrp))
return -ENODEV;
trialcs = alloc_trial_cpuset(cs);
if (!trialcs) {
retval = -ENOMEM;
goto out;
}
switch (cft->private) {
case FILE_CPULIST:
retval = update_cpumask(cs, trialcs, buf);
break;
case FILE_MEMLIST:
retval = update_nodemask(cs, trialcs, buf);
break;
default:
retval = -EINVAL;
break;
}
free_trial_cpuset(trialcs);
out:
cgroup_unlock();
return retval;
}
/*
* These ascii lists should be read in a single call, by using a user
* buffer large enough to hold the entire map. If read in smaller
* chunks, there is no guarantee of atomicity. Since the display format
* used, list of ranges of sequential numbers, is variable length,
* and since these maps can change value dynamically, one could read
* gibberish by doing partial reads while a list was changing.
* A single large read to a buffer that crosses a page boundary is
* ok, because the result being copied to user land is not recomputed
* across a page fault.
*/
static size_t cpuset_sprintf_cpulist(char *page, struct cpuset *cs)
{
size_t count;
mutex_lock(&callback_mutex);
count = cpulist_scnprintf(page, PAGE_SIZE, cs->cpus_allowed);
mutex_unlock(&callback_mutex);
return count;
}
static size_t cpuset_sprintf_memlist(char *page, struct cpuset *cs)
{
size_t count;
mutex_lock(&callback_mutex);
count = nodelist_scnprintf(page, PAGE_SIZE, cs->mems_allowed);
mutex_unlock(&callback_mutex);
return count;
}
static ssize_t cpuset_common_file_read(struct cgroup *cont,
struct cftype *cft,
struct file *file,
char __user *buf,
size_t nbytes, loff_t *ppos)
{
struct cpuset *cs = cgroup_cs(cont);
cpuset_filetype_t type = cft->private;
char *page;
ssize_t retval = 0;
char *s;
if (!(page = (char *)__get_free_page(GFP_TEMPORARY)))
return -ENOMEM;
s = page;
switch (type) {
case FILE_CPULIST:
s += cpuset_sprintf_cpulist(s, cs);
break;
case FILE_MEMLIST:
s += cpuset_sprintf_memlist(s, cs);
break;
default:
retval = -EINVAL;
goto out;
}
*s++ = '\n';
retval = simple_read_from_buffer(buf, nbytes, ppos, page, s - page);
out:
free_page((unsigned long)page);
return retval;
}
static u64 cpuset_read_u64(struct cgroup *cont, struct cftype *cft)
{
struct cpuset *cs = cgroup_cs(cont);
cpuset_filetype_t type = cft->private;
switch (type) {
case FILE_CPU_EXCLUSIVE:
return is_cpu_exclusive(cs);
case FILE_MEM_EXCLUSIVE:
return is_mem_exclusive(cs);
case FILE_MEM_HARDWALL:
return is_mem_hardwall(cs);
case FILE_SCHED_LOAD_BALANCE:
return is_sched_load_balance(cs);
case FILE_MEMORY_MIGRATE:
return is_memory_migrate(cs);
case FILE_MEMORY_PRESSURE_ENABLED:
return cpuset_memory_pressure_enabled;
case FILE_MEMORY_PRESSURE:
return fmeter_getrate(&cs->fmeter);
case FILE_SPREAD_PAGE:
return is_spread_page(cs);
case FILE_SPREAD_SLAB:
return is_spread_slab(cs);
default:
BUG();
}
/* Unreachable but makes gcc happy */
return 0;
}
static s64 cpuset_read_s64(struct cgroup *cont, struct cftype *cft)
{
struct cpuset *cs = cgroup_cs(cont);
cpuset_filetype_t type = cft->private;
switch (type) {
case FILE_SCHED_RELAX_DOMAIN_LEVEL:
return cs->relax_domain_level;
default:
BUG();
}
/* Unrechable but makes gcc happy */
return 0;
}
/*
* for the common functions, 'private' gives the type of file
*/
static struct cftype files[] = {
{
.name = "cpus",
.read = cpuset_common_file_read,
.write_string = cpuset_write_resmask,
.max_write_len = (100U + 6 * NR_CPUS),
.private = FILE_CPULIST,
},
{
.name = "mems",
.read = cpuset_common_file_read,
.write_string = cpuset_write_resmask,
.max_write_len = (100U + 6 * MAX_NUMNODES),
.private = FILE_MEMLIST,
},
{
.name = "cpu_exclusive",
.read_u64 = cpuset_read_u64,
.write_u64 = cpuset_write_u64,
.private = FILE_CPU_EXCLUSIVE,
},
{
.name = "mem_exclusive",
.read_u64 = cpuset_read_u64,
.write_u64 = cpuset_write_u64,
.private = FILE_MEM_EXCLUSIVE,
},
{
.name = "mem_hardwall",
.read_u64 = cpuset_read_u64,
.write_u64 = cpuset_write_u64,
.private = FILE_MEM_HARDWALL,
},
{
.name = "sched_load_balance",
.read_u64 = cpuset_read_u64,
.write_u64 = cpuset_write_u64,
.private = FILE_SCHED_LOAD_BALANCE,
},
{
.name = "sched_relax_domain_level",
.read_s64 = cpuset_read_s64,
.write_s64 = cpuset_write_s64,
.private = FILE_SCHED_RELAX_DOMAIN_LEVEL,
},
{
.name = "memory_migrate",
.read_u64 = cpuset_read_u64,
.write_u64 = cpuset_write_u64,
.private = FILE_MEMORY_MIGRATE,
},
{
.name = "memory_pressure",
.read_u64 = cpuset_read_u64,
.write_u64 = cpuset_write_u64,
.private = FILE_MEMORY_PRESSURE,
.mode = S_IRUGO,
},
{
.name = "memory_spread_page",
.read_u64 = cpuset_read_u64,
.write_u64 = cpuset_write_u64,
.private = FILE_SPREAD_PAGE,
},
{
.name = "memory_spread_slab",
.read_u64 = cpuset_read_u64,
.write_u64 = cpuset_write_u64,
.private = FILE_SPREAD_SLAB,
},
};
static struct cftype cft_memory_pressure_enabled = {
.name = "memory_pressure_enabled",
.read_u64 = cpuset_read_u64,
.write_u64 = cpuset_write_u64,
.private = FILE_MEMORY_PRESSURE_ENABLED,
};
static int cpuset_populate(struct cgroup_subsys *ss, struct cgroup *cont)
{
int err;
err = cgroup_add_files(cont, ss, files, ARRAY_SIZE(files));
if (err)
return err;
/* memory_pressure_enabled is in root cpuset only */
if (!cont->parent)
err = cgroup_add_file(cont, ss,
&cft_memory_pressure_enabled);
return err;
}
/*
* post_clone() is called during cgroup_create() when the
* clone_children mount argument was specified. The cgroup
* can not yet have any tasks.
*
* Currently we refuse to set up the cgroup - thereby
* refusing the task to be entered, and as a result refusing
* the sys_unshare() or clone() which initiated it - if any
* sibling cpusets have exclusive cpus or mem.
*
* If this becomes a problem for some users who wish to
* allow that scenario, then cpuset_post_clone() could be
* changed to grant parent->cpus_allowed-sibling_cpus_exclusive
* (and likewise for mems) to the new cgroup. Called with cgroup_mutex
* held.
*/
static void cpuset_post_clone(struct cgroup_subsys *ss,
struct cgroup *cgroup)
{
struct cgroup *parent, *child;
struct cpuset *cs, *parent_cs;
parent = cgroup->parent;
list_for_each_entry(child, &parent->children, sibling) {
cs = cgroup_cs(child);
if (is_mem_exclusive(cs) || is_cpu_exclusive(cs))
return;
}
cs = cgroup_cs(cgroup);
parent_cs = cgroup_cs(parent);
mutex_lock(&callback_mutex);
cs->mems_allowed = parent_cs->mems_allowed;
cpumask_copy(cs->cpus_allowed, parent_cs->cpus_allowed);
mutex_unlock(&callback_mutex);
return;
}
/*
* cpuset_create - create a cpuset
* ss: cpuset cgroup subsystem
* cont: control group that the new cpuset will be part of
*/
static struct cgroup_subsys_state *cpuset_create(
struct cgroup_subsys *ss,
struct cgroup *cont)
{
struct cpuset *cs;
struct cpuset *parent;
if (!cont->parent) {
return &top_cpuset.css;
}
parent = cgroup_cs(cont->parent);
cs = kmalloc(sizeof(*cs), GFP_KERNEL);
if (!cs)
return ERR_PTR(-ENOMEM);
if (!alloc_cpumask_var(&cs->cpus_allowed, GFP_KERNEL)) {
kfree(cs);
return ERR_PTR(-ENOMEM);
}
cs->flags = 0;
if (is_spread_page(parent))
set_bit(CS_SPREAD_PAGE, &cs->flags);
if (is_spread_slab(parent))
set_bit(CS_SPREAD_SLAB, &cs->flags);
set_bit(CS_SCHED_LOAD_BALANCE, &cs->flags);
cpumask_clear(cs->cpus_allowed);
nodes_clear(cs->mems_allowed);
fmeter_init(&cs->fmeter);
cs->relax_domain_level = -1;
cs->parent = parent;
number_of_cpusets++;
return &cs->css ;
}
/*
* If the cpuset being removed has its flag 'sched_load_balance'
* enabled, then simulate turning sched_load_balance off, which
* will call async_rebuild_sched_domains().
*/
static void cpuset_destroy(struct cgroup_subsys *ss, struct cgroup *cont)
{
struct cpuset *cs = cgroup_cs(cont);
if (is_sched_load_balance(cs))
update_flag(CS_SCHED_LOAD_BALANCE, cs, 0);
number_of_cpusets--;
free_cpumask_var(cs->cpus_allowed);
kfree(cs);
}
struct cgroup_subsys cpuset_subsys = {
.name = "cpuset",
.create = cpuset_create,
.destroy = cpuset_destroy,
.can_attach = cpuset_can_attach,
.can_attach_task = cpuset_can_attach_task,
.pre_attach = cpuset_pre_attach,
.attach_task = cpuset_attach_task,
.attach = cpuset_attach,
.populate = cpuset_populate,
.post_clone = cpuset_post_clone,
.subsys_id = cpuset_subsys_id,
.early_init = 1,
};
/**
* cpuset_init - initialize cpusets at system boot
*
* Description: Initialize top_cpuset and the cpuset internal file system,
**/
int __init cpuset_init(void)
{
int err = 0;
if (!alloc_cpumask_var(&top_cpuset.cpus_allowed, GFP_KERNEL))
BUG();
cpumask_setall(top_cpuset.cpus_allowed);
nodes_setall(top_cpuset.mems_allowed);
fmeter_init(&top_cpuset.fmeter);
set_bit(CS_SCHED_LOAD_BALANCE, &top_cpuset.flags);
top_cpuset.relax_domain_level = -1;
err = register_filesystem(&cpuset_fs_type);
if (err < 0)
return err;
if (!alloc_cpumask_var(&cpus_attach, GFP_KERNEL))
BUG();
number_of_cpusets = 1;
return 0;
}
/**
* cpuset_do_move_task - move a given task to another cpuset
* @tsk: pointer to task_struct the task to move
* @scan: struct cgroup_scanner contained in its struct cpuset_hotplug_scanner
*
* Called by cgroup_scan_tasks() for each task in a cgroup.
* Return nonzero to stop the walk through the tasks.
*/
static void cpuset_do_move_task(struct task_struct *tsk,
struct cgroup_scanner *scan)
{
struct cgroup *new_cgroup = scan->data;
cgroup_attach_task(new_cgroup, tsk);
}
/**
* move_member_tasks_to_cpuset - move tasks from one cpuset to another
* @from: cpuset in which the tasks currently reside
* @to: cpuset to which the tasks will be moved
*
* Called with cgroup_mutex held
* callback_mutex must not be held, as cpuset_attach() will take it.
*
* The cgroup_scan_tasks() function will scan all the tasks in a cgroup,
* calling callback functions for each.
*/
static void move_member_tasks_to_cpuset(struct cpuset *from, struct cpuset *to)
{
struct cgroup_scanner scan;
scan.cg = from->css.cgroup;
scan.test_task = NULL; /* select all tasks in cgroup */
scan.process_task = cpuset_do_move_task;
scan.heap = NULL;
scan.data = to->css.cgroup;
if (cgroup_scan_tasks(&scan))
printk(KERN_ERR "move_member_tasks_to_cpuset: "
"cgroup_scan_tasks failed\n");
}
/*
* If CPU and/or memory hotplug handlers, below, unplug any CPUs
* or memory nodes, we need to walk over the cpuset hierarchy,
* removing that CPU or node from all cpusets. If this removes the
* last CPU or node from a cpuset, then move the tasks in the empty
* cpuset to its next-highest non-empty parent.
*
* Called with cgroup_mutex held
* callback_mutex must not be held, as cpuset_attach() will take it.
*/
static void remove_tasks_in_empty_cpuset(struct cpuset *cs)
{
struct cpuset *parent;
/*
* The cgroup's css_sets list is in use if there are tasks
* in the cpuset; the list is empty if there are none;
* the cs->css.refcnt seems always 0.
*/
if (list_empty(&cs->css.cgroup->css_sets))
return;
/*
* Find its next-highest non-empty parent, (top cpuset
* has online cpus, so can't be empty).
*/
parent = cs->parent;
while (cpumask_empty(parent->cpus_allowed) ||
nodes_empty(parent->mems_allowed))
parent = parent->parent;
move_member_tasks_to_cpuset(cs, parent);
}
/*
* Walk the specified cpuset subtree and look for empty cpusets.
* The tasks of such cpuset must be moved to a parent cpuset.
*
* Called with cgroup_mutex held. We take callback_mutex to modify
* cpus_allowed and mems_allowed.
*
* This walk processes the tree from top to bottom, completing one layer
* before dropping down to the next. It always processes a node before
* any of its children.
*
* For now, since we lack memory hot unplug, we'll never see a cpuset
* that has tasks along with an empty 'mems'. But if we did see such
* a cpuset, we'd handle it just like we do if its 'cpus' was empty.
*/
static void scan_for_empty_cpusets(struct cpuset *root)
{
LIST_HEAD(queue);
struct cpuset *cp; /* scans cpusets being updated */
struct cpuset *child; /* scans child cpusets of cp */
struct cgroup *cont;
static nodemask_t oldmems; /* protected by cgroup_mutex */
list_add_tail((struct list_head *)&root->stack_list, &queue);
while (!list_empty(&queue)) {
cp = list_first_entry(&queue, struct cpuset, stack_list);
list_del(queue.next);
list_for_each_entry(cont, &cp->css.cgroup->children, sibling) {
child = cgroup_cs(cont);
list_add_tail(&child->stack_list, &queue);
}
/* Continue past cpusets with all cpus, mems online */
if (cpumask_subset(cp->cpus_allowed, cpu_active_mask) &&
nodes_subset(cp->mems_allowed, node_states[N_HIGH_MEMORY]))
continue;
oldmems = cp->mems_allowed;
/* Remove offline cpus and mems from this cpuset. */
mutex_lock(&callback_mutex);
cpumask_and(cp->cpus_allowed, cp->cpus_allowed,
cpu_active_mask);
nodes_and(cp->mems_allowed, cp->mems_allowed,
node_states[N_HIGH_MEMORY]);
mutex_unlock(&callback_mutex);
/* Move tasks from the empty cpuset to a parent */
if (cpumask_empty(cp->cpus_allowed) ||
nodes_empty(cp->mems_allowed))
remove_tasks_in_empty_cpuset(cp);
else {
update_tasks_cpumask(cp, NULL);
update_tasks_nodemask(cp, &oldmems, NULL);
}
}
}
/*
* The top_cpuset tracks what CPUs and Memory Nodes are online,
* period. This is necessary in order to make cpusets transparent
* (of no affect) on systems that are actively using CPU hotplug
* but making no active use of cpusets.
*
* This routine ensures that top_cpuset.cpus_allowed tracks
* cpu_active_mask on each CPU hotplug (cpuhp) event.
*
* Called within get_online_cpus(). Needs to call cgroup_lock()
* before calling generate_sched_domains().
*/
void cpuset_update_active_cpus(void)
{
struct sched_domain_attr *attr;
cpumask_var_t *doms;
int ndoms;
cgroup_lock();
mutex_lock(&callback_mutex);
cpumask_copy(top_cpuset.cpus_allowed, cpu_active_mask);
mutex_unlock(&callback_mutex);
scan_for_empty_cpusets(&top_cpuset);
ndoms = generate_sched_domains(&doms, &attr);
cgroup_unlock();
/* Have scheduler rebuild the domains */
partition_sched_domains(ndoms, doms, attr);
}
#ifdef CONFIG_MEMORY_HOTPLUG
/*
* Keep top_cpuset.mems_allowed tracking node_states[N_HIGH_MEMORY].
* Call this routine anytime after node_states[N_HIGH_MEMORY] changes.
* See also the previous routine cpuset_track_online_cpus().
*/
static int cpuset_track_online_nodes(struct notifier_block *self,
unsigned long action, void *arg)
{
static nodemask_t oldmems; /* protected by cgroup_mutex */
cgroup_lock();
switch (action) {
case MEM_ONLINE:
oldmems = top_cpuset.mems_allowed;
mutex_lock(&callback_mutex);
top_cpuset.mems_allowed = node_states[N_HIGH_MEMORY];
mutex_unlock(&callback_mutex);
update_tasks_nodemask(&top_cpuset, &oldmems, NULL);
break;
case MEM_OFFLINE:
/*
* needn't update top_cpuset.mems_allowed explicitly because
* scan_for_empty_cpusets() will update it.
*/
scan_for_empty_cpusets(&top_cpuset);
break;
default:
break;
}
cgroup_unlock();
return NOTIFY_OK;
}
#endif
/**
* cpuset_init_smp - initialize cpus_allowed
*
* Description: Finish top cpuset after cpu, node maps are initialized
**/
void __init cpuset_init_smp(void)
{
cpumask_copy(top_cpuset.cpus_allowed, cpu_active_mask);
top_cpuset.mems_allowed = node_states[N_HIGH_MEMORY];
hotplug_memory_notifier(cpuset_track_online_nodes, 10);
cpuset_wq = create_singlethread_workqueue("cpuset");
BUG_ON(!cpuset_wq);
}
/**
* cpuset_cpus_allowed - return cpus_allowed mask from a tasks cpuset.
* @tsk: pointer to task_struct from which to obtain cpuset->cpus_allowed.
* @pmask: pointer to struct cpumask variable to receive cpus_allowed set.
*
* Description: Returns the cpumask_var_t cpus_allowed of the cpuset
* attached to the specified @tsk. Guaranteed to return some non-empty
* subset of cpu_online_map, even if this means going outside the
* tasks cpuset.
**/
void cpuset_cpus_allowed(struct task_struct *tsk, struct cpumask *pmask)
{
mutex_lock(&callback_mutex);
task_lock(tsk);
guarantee_online_cpus(task_cs(tsk), pmask);
task_unlock(tsk);
mutex_unlock(&callback_mutex);
}
int cpuset_cpus_allowed_fallback(struct task_struct *tsk)
{
const struct cpuset *cs;
int cpu;
rcu_read_lock();
cs = task_cs(tsk);
if (cs)
do_set_cpus_allowed(tsk, cs->cpus_allowed);
rcu_read_unlock();
/*
* We own tsk->cpus_allowed, nobody can change it under us.
*
* But we used cs && cs->cpus_allowed lockless and thus can
* race with cgroup_attach_task() or update_cpumask() and get
* the wrong tsk->cpus_allowed. However, both cases imply the
* subsequent cpuset_change_cpumask()->set_cpus_allowed_ptr()
* which takes task_rq_lock().
*
* If we are called after it dropped the lock we must see all
* changes in tsk_cs()->cpus_allowed. Otherwise we can temporary
* set any mask even if it is not right from task_cs() pov,
* the pending set_cpus_allowed_ptr() will fix things.
*/
cpu = cpumask_any_and(&tsk->cpus_allowed, cpu_active_mask);
if (cpu >= nr_cpu_ids) {
/*
* Either tsk->cpus_allowed is wrong (see above) or it
* is actually empty. The latter case is only possible
* if we are racing with remove_tasks_in_empty_cpuset().
* Like above we can temporary set any mask and rely on
* set_cpus_allowed_ptr() as synchronization point.
*/
do_set_cpus_allowed(tsk, cpu_possible_mask);
cpu = cpumask_any(cpu_active_mask);
}
return cpu;
}
void cpuset_init_current_mems_allowed(void)
{
nodes_setall(current->mems_allowed);
}
/**
* cpuset_mems_allowed - return mems_allowed mask from a tasks cpuset.
* @tsk: pointer to task_struct from which to obtain cpuset->mems_allowed.
*
* Description: Returns the nodemask_t mems_allowed of the cpuset
* attached to the specified @tsk. Guaranteed to return some non-empty
* subset of node_states[N_HIGH_MEMORY], even if this means going outside the
* tasks cpuset.
**/
nodemask_t cpuset_mems_allowed(struct task_struct *tsk)
{
nodemask_t mask;
mutex_lock(&callback_mutex);
task_lock(tsk);
guarantee_online_mems(task_cs(tsk), &mask);
task_unlock(tsk);
mutex_unlock(&callback_mutex);
return mask;
}
/**
* cpuset_nodemask_valid_mems_allowed - check nodemask vs. curremt mems_allowed
* @nodemask: the nodemask to be checked
*
* Are any of the nodes in the nodemask allowed in current->mems_allowed?
*/
int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask)
{
return nodes_intersects(*nodemask, current->mems_allowed);
}
/*
* nearest_hardwall_ancestor() - Returns the nearest mem_exclusive or
* mem_hardwall ancestor to the specified cpuset. Call holding
* callback_mutex. If no ancestor is mem_exclusive or mem_hardwall
* (an unusual configuration), then returns the root cpuset.
*/
static const struct cpuset *nearest_hardwall_ancestor(const struct cpuset *cs)
{
while (!(is_mem_exclusive(cs) || is_mem_hardwall(cs)) && cs->parent)
cs = cs->parent;
return cs;
}
/**
* cpuset_node_allowed_softwall - Can we allocate on a memory node?
* @node: is this an allowed node?
* @gfp_mask: memory allocation flags
*
* If we're in interrupt, yes, we can always allocate. If __GFP_THISNODE is
* set, yes, we can always allocate. If node is in our task's mems_allowed,
* yes. If it's not a __GFP_HARDWALL request and this node is in the nearest
* hardwalled cpuset ancestor to this task's cpuset, yes. If the task has been
* OOM killed and has access to memory reserves as specified by the TIF_MEMDIE
* flag, yes.
* Otherwise, no.
*
* If __GFP_HARDWALL is set, cpuset_node_allowed_softwall() reduces to
* cpuset_node_allowed_hardwall(). Otherwise, cpuset_node_allowed_softwall()
* might sleep, and might allow a node from an enclosing cpuset.
*
* cpuset_node_allowed_hardwall() only handles the simpler case of hardwall
* cpusets, and never sleeps.
*
* The __GFP_THISNODE placement logic is really handled elsewhere,
* by forcibly using a zonelist starting at a specified node, and by
* (in get_page_from_freelist()) refusing to consider the zones for
* any node on the zonelist except the first. By the time any such
* calls get to this routine, we should just shut up and say 'yes'.
*
* GFP_USER allocations are marked with the __GFP_HARDWALL bit,
* and do not allow allocations outside the current tasks cpuset
* unless the task has been OOM killed as is marked TIF_MEMDIE.
* GFP_KERNEL allocations are not so marked, so can escape to the
* nearest enclosing hardwalled ancestor cpuset.
*
* Scanning up parent cpusets requires callback_mutex. The
* __alloc_pages() routine only calls here with __GFP_HARDWALL bit
* _not_ set if it's a GFP_KERNEL allocation, and all nodes in the
* current tasks mems_allowed came up empty on the first pass over
* the zonelist. So only GFP_KERNEL allocations, if all nodes in the
* cpuset are short of memory, might require taking the callback_mutex
* mutex.
*
* The first call here from mm/page_alloc:get_page_from_freelist()
* has __GFP_HARDWALL set in gfp_mask, enforcing hardwall cpusets,
* so no allocation on a node outside the cpuset is allowed (unless
* in interrupt, of course).
*
* The second pass through get_page_from_freelist() doesn't even call
* here for GFP_ATOMIC calls. For those calls, the __alloc_pages()
* variable 'wait' is not set, and the bit ALLOC_CPUSET is not set
* in alloc_flags. That logic and the checks below have the combined
* affect that:
* in_interrupt - any node ok (current task context irrelevant)
* GFP_ATOMIC - any node ok
* TIF_MEMDIE - any node ok
* GFP_KERNEL - any node in enclosing hardwalled cpuset ok
* GFP_USER - only nodes in current tasks mems allowed ok.
*
* Rule:
* Don't call cpuset_node_allowed_softwall if you can't sleep, unless you
* pass in the __GFP_HARDWALL flag set in gfp_flag, which disables
* the code that might scan up ancestor cpusets and sleep.
*/
int __cpuset_node_allowed_softwall(int node, gfp_t gfp_mask)
{
const struct cpuset *cs; /* current cpuset ancestors */
int allowed; /* is allocation in zone z allowed? */
if (in_interrupt() || (gfp_mask & __GFP_THISNODE))
return 1;
might_sleep_if(!(gfp_mask & __GFP_HARDWALL));
if (node_isset(node, current->mems_allowed))
return 1;
/*
* Allow tasks that have access to memory reserves because they have
* been OOM killed to get memory anywhere.
*/
if (unlikely(test_thread_flag(TIF_MEMDIE)))
return 1;
if (gfp_mask & __GFP_HARDWALL) /* If hardwall request, stop here */
return 0;
if (current->flags & PF_EXITING) /* Let dying task have memory */
return 1;
/* Not hardwall and node outside mems_allowed: scan up cpusets */
mutex_lock(&callback_mutex);
task_lock(current);
cs = nearest_hardwall_ancestor(task_cs(current));
task_unlock(current);
allowed = node_isset(node, cs->mems_allowed);
mutex_unlock(&callback_mutex);
return allowed;
}
/*
* cpuset_node_allowed_hardwall - Can we allocate on a memory node?
* @node: is this an allowed node?
* @gfp_mask: memory allocation flags
*
* If we're in interrupt, yes, we can always allocate. If __GFP_THISNODE is
* set, yes, we can always allocate. If node is in our task's mems_allowed,
* yes. If the task has been OOM killed and has access to memory reserves as
* specified by the TIF_MEMDIE flag, yes.
* Otherwise, no.
*
* The __GFP_THISNODE placement logic is really handled elsewhere,
* by forcibly using a zonelist starting at a specified node, and by
* (in get_page_from_freelist()) refusing to consider the zones for
* any node on the zonelist except the first. By the time any such
* calls get to this routine, we should just shut up and say 'yes'.
*
* Unlike the cpuset_node_allowed_softwall() variant, above,
* this variant requires that the node be in the current task's
* mems_allowed or that we're in interrupt. It does not scan up the
* cpuset hierarchy for the nearest enclosing mem_exclusive cpuset.
* It never sleeps.
*/
int __cpuset_node_allowed_hardwall(int node, gfp_t gfp_mask)
{
if (in_interrupt() || (gfp_mask & __GFP_THISNODE))
return 1;
if (node_isset(node, current->mems_allowed))
return 1;
/*
* Allow tasks that have access to memory reserves because they have
* been OOM killed to get memory anywhere.
*/
if (unlikely(test_thread_flag(TIF_MEMDIE)))
return 1;
return 0;
}
/**
* cpuset_unlock - release lock on cpuset changes
*
* Undo the lock taken in a previous cpuset_lock() call.
*/
void cpuset_unlock(void)
{
mutex_unlock(&callback_mutex);
}
/**
* cpuset_mem_spread_node() - On which node to begin search for a file page
* cpuset_slab_spread_node() - On which node to begin search for a slab page
*
* If a task is marked PF_SPREAD_PAGE or PF_SPREAD_SLAB (as for
* tasks in a cpuset with is_spread_page or is_spread_slab set),
* and if the memory allocation used cpuset_mem_spread_node()
* to determine on which node to start looking, as it will for
* certain page cache or slab cache pages such as used for file
* system buffers and inode caches, then instead of starting on the
* local node to look for a free page, rather spread the starting
* node around the tasks mems_allowed nodes.
*
* We don't have to worry about the returned node being offline
* because "it can't happen", and even if it did, it would be ok.
*
* The routines calling guarantee_online_mems() are careful to
* only set nodes in task->mems_allowed that are online. So it
* should not be possible for the following code to return an
* offline node. But if it did, that would be ok, as this routine
* is not returning the node where the allocation must be, only
* the node where the search should start. The zonelist passed to
* __alloc_pages() will include all nodes. If the slab allocator
* is passed an offline node, it will fall back to the local node.
* See kmem_cache_alloc_node().
*/
static int cpuset_spread_node(int *rotor)
{
int node;
node = next_node(*rotor, current->mems_allowed);
if (node == MAX_NUMNODES)
node = first_node(current->mems_allowed);
*rotor = node;
return node;
}
int cpuset_mem_spread_node(void)
{
return cpuset_spread_node(¤t->cpuset_mem_spread_rotor);
}
int cpuset_slab_spread_node(void)
{
return cpuset_spread_node(¤t->cpuset_slab_spread_rotor);
}
EXPORT_SYMBOL_GPL(cpuset_mem_spread_node);
/**
* cpuset_mems_allowed_intersects - Does @tsk1's mems_allowed intersect @tsk2's?
* @tsk1: pointer to task_struct of some task.
* @tsk2: pointer to task_struct of some other task.
*
* Description: Return true if @tsk1's mems_allowed intersects the
* mems_allowed of @tsk2. Used by the OOM killer to determine if
* one of the task's memory usage might impact the memory available
* to the other.
**/
int cpuset_mems_allowed_intersects(const struct task_struct *tsk1,
const struct task_struct *tsk2)
{
return nodes_intersects(tsk1->mems_allowed, tsk2->mems_allowed);
}
/**
* cpuset_print_task_mems_allowed - prints task's cpuset and mems_allowed
* @task: pointer to task_struct of some task.
*
* Description: Prints @task's name, cpuset name, and cached copy of its
* mems_allowed to the kernel log. Must hold task_lock(task) to allow
* dereferencing task_cs(task).
*/
void cpuset_print_task_mems_allowed(struct task_struct *tsk)
{
struct dentry *dentry;
dentry = task_cs(tsk)->css.cgroup->dentry;
spin_lock(&cpuset_buffer_lock);
snprintf(cpuset_name, CPUSET_NAME_LEN,
dentry ? (const char *)dentry->d_name.name : "/");
nodelist_scnprintf(cpuset_nodelist, CPUSET_NODELIST_LEN,
tsk->mems_allowed);
printk(KERN_INFO "%s cpuset=%s mems_allowed=%s\n",
tsk->comm, cpuset_name, cpuset_nodelist);
spin_unlock(&cpuset_buffer_lock);
}
/*
* Collection of memory_pressure is suppressed unless
* this flag is enabled by writing "1" to the special
* cpuset file 'memory_pressure_enabled' in the root cpuset.
*/
int cpuset_memory_pressure_enabled __read_mostly;
/**
* cpuset_memory_pressure_bump - keep stats of per-cpuset reclaims.
*
* Keep a running average of the rate of synchronous (direct)
* page reclaim efforts initiated by tasks in each cpuset.
*
* This represents the rate at which some task in the cpuset
* ran low on memory on all nodes it was allowed to use, and
* had to enter the kernels page reclaim code in an effort to
* create more free memory by tossing clean pages or swapping
* or writing dirty pages.
*
* Display to user space in the per-cpuset read-only file
* "memory_pressure". Value displayed is an integer
* representing the recent rate of entry into the synchronous
* (direct) page reclaim by any task attached to the cpuset.
**/
void __cpuset_memory_pressure_bump(void)
{
task_lock(current);
fmeter_markevent(&task_cs(current)->fmeter);
task_unlock(current);
}
#ifdef CONFIG_PROC_PID_CPUSET
/*
* proc_cpuset_show()
* - Print tasks cpuset path into seq_file.
* - Used for /proc/<pid>/cpuset.
* - No need to task_lock(tsk) on this tsk->cpuset reference, as it
* doesn't really matter if tsk->cpuset changes after we read it,
* and we take cgroup_mutex, keeping cpuset_attach() from changing it
* anyway.
*/
static int proc_cpuset_show(struct seq_file *m, void *unused_v)
{
struct pid *pid;
struct task_struct *tsk;
char *buf;
struct cgroup_subsys_state *css;
int retval;
retval = -ENOMEM;
buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
if (!buf)
goto out;
retval = -ESRCH;
pid = m->private;
tsk = get_pid_task(pid, PIDTYPE_PID);
if (!tsk)
goto out_free;
retval = -EINVAL;
cgroup_lock();
css = task_subsys_state(tsk, cpuset_subsys_id);
retval = cgroup_path(css->cgroup, buf, PAGE_SIZE);
if (retval < 0)
goto out_unlock;
seq_puts(m, buf);
seq_putc(m, '\n');
out_unlock:
cgroup_unlock();
put_task_struct(tsk);
out_free:
kfree(buf);
out:
return retval;
}
static int cpuset_open(struct inode *inode, struct file *file)
{
struct pid *pid = PROC_I(inode)->pid;
return single_open(file, proc_cpuset_show, pid);
}
const struct file_operations proc_cpuset_operations = {
.open = cpuset_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
#endif /* CONFIG_PROC_PID_CPUSET */
/* Display task mems_allowed in /proc/<pid>/status file. */
void cpuset_task_status_allowed(struct seq_file *m, struct task_struct *task)
{
seq_printf(m, "Mems_allowed:\t");
seq_nodemask(m, &task->mems_allowed);
seq_printf(m, "\n");
seq_printf(m, "Mems_allowed_list:\t");
seq_nodemask_list(m, &task->mems_allowed);
seq_printf(m, "\n");
}
| gpl-2.0 |
xedp3x/openwrt | package/libs/libnl-tiny/src/object.c | 343 | 5903 | /*
* lib/object.c Generic Cacheable Object
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation version 2.1
* of the License.
*
* Copyright (c) 2003-2008 Thomas Graf <tgraf@suug.ch>
*/
/**
* @ingroup cache
* @defgroup object Object
* @{
*/
#include <netlink-local.h>
#include <netlink/netlink.h>
#include <netlink/cache.h>
#include <netlink/object.h>
#include <netlink/utils.h>
static inline struct nl_object_ops *obj_ops(struct nl_object *obj)
{
if (!obj->ce_ops)
BUG();
return obj->ce_ops;
}
/**
* @name Object Creation/Deletion
* @{
*/
/**
* Allocate a new object of kind specified by the operations handle
* @arg ops cache operations handle
* @return The new object or NULL
*/
struct nl_object *nl_object_alloc(struct nl_object_ops *ops)
{
struct nl_object *new;
if (ops->oo_size < sizeof(*new))
BUG();
new = calloc(1, ops->oo_size);
if (!new)
return NULL;
new->ce_refcnt = 1;
nl_init_list_head(&new->ce_list);
new->ce_ops = ops;
if (ops->oo_constructor)
ops->oo_constructor(new);
NL_DBG(4, "Allocated new object %p\n", new);
return new;
}
#ifdef disabled
/**
* Allocate a new object of kind specified by the name
* @arg kind name of object type
* @return The new object or nULL
*/
int nl_object_alloc_name(const char *kind, struct nl_object **result)
{
struct nl_cache_ops *ops;
ops = nl_cache_ops_lookup(kind);
if (!ops)
return -NLE_OPNOTSUPP;
if (!(*result = nl_object_alloc(ops->co_obj_ops)))
return -NLE_NOMEM;
return 0;
}
#endif
struct nl_derived_object {
NLHDR_COMMON
char data;
};
/**
* Allocate a new object and copy all data from an existing object
* @arg obj object to inherite data from
* @return The new object or NULL.
*/
struct nl_object *nl_object_clone(struct nl_object *obj)
{
struct nl_object *new;
struct nl_object_ops *ops = obj_ops(obj);
int doff = offsetof(struct nl_derived_object, data);
int size;
new = nl_object_alloc(ops);
if (!new)
return NULL;
size = ops->oo_size - doff;
if (size < 0)
BUG();
new->ce_ops = obj->ce_ops;
new->ce_msgtype = obj->ce_msgtype;
if (size)
memcpy((void *)new + doff, (void *)obj + doff, size);
if (ops->oo_clone) {
if (ops->oo_clone(new, obj) < 0) {
nl_object_free(new);
return NULL;
}
} else if (size && ops->oo_free_data)
BUG();
return new;
}
/**
* Free a cacheable object
* @arg obj object to free
*
* @return 0 or a negative error code.
*/
void nl_object_free(struct nl_object *obj)
{
struct nl_object_ops *ops = obj_ops(obj);
if (obj->ce_refcnt > 0)
NL_DBG(1, "Warning: Freeing object in use...\n");
if (obj->ce_cache)
nl_cache_remove(obj);
if (ops->oo_free_data)
ops->oo_free_data(obj);
free(obj);
NL_DBG(4, "Freed object %p\n", obj);
}
/** @} */
/**
* @name Reference Management
* @{
*/
/** @} */
/**
* @name Utillities
* @{
*/
#ifdef disabled
/**
* Dump this object according to the specified parameters
* @arg obj object to dump
* @arg params dumping parameters
*/
void nl_object_dump(struct nl_object *obj, struct nl_dump_params *params)
{
dump_from_ops(obj, params);
}
/**
* Check if the identifiers of two objects are identical
* @arg a an object
* @arg b another object of same type
*
* @return true if both objects have equal identifiers, otherwise false.
*/
int nl_object_identical(struct nl_object *a, struct nl_object *b)
{
struct nl_object_ops *ops = obj_ops(a);
int req_attrs;
/* Both objects must be of same type */
if (ops != obj_ops(b))
return 0;
req_attrs = ops->oo_id_attrs;
/* Both objects must provide all required attributes to uniquely
* identify an object */
if ((a->ce_mask & req_attrs) != req_attrs ||
(b->ce_mask & req_attrs) != req_attrs)
return 0;
/* Can't judge unless we can compare */
if (ops->oo_compare == NULL)
return 0;
return !(ops->oo_compare(a, b, req_attrs, 0));
}
/**
* Compute bitmask representing difference in attribute values
* @arg a an object
* @arg b another object of same type
*
* The bitmask returned is specific to an object type, each bit set represents
* an attribute which mismatches in either of the two objects. Unavailability
* of an attribute in one object and presence in the other is regarded a
* mismatch as well.
*
* @return Bitmask describing differences or 0 if they are completely identical.
*/
uint32_t nl_object_diff(struct nl_object *a, struct nl_object *b)
{
struct nl_object_ops *ops = obj_ops(a);
if (ops != obj_ops(b) || ops->oo_compare == NULL)
return UINT_MAX;
return ops->oo_compare(a, b, ~0, 0);
}
/**
* Match a filter against an object
* @arg obj object to check
* @arg filter object of same type acting as filter
*
* @return 1 if the object matches the filter or 0
* if no filter procedure is available or if the
* filter does not match.
*/
int nl_object_match_filter(struct nl_object *obj, struct nl_object *filter)
{
struct nl_object_ops *ops = obj_ops(obj);
if (ops != obj_ops(filter) || ops->oo_compare == NULL)
return 0;
return !(ops->oo_compare(obj, filter, filter->ce_mask,
LOOSE_COMPARISON));
}
/**
* Convert bitmask of attributes to a character string
* @arg obj object of same type as attribute bitmask
* @arg attrs bitmask of attribute types
* @arg buf destination buffer
* @arg len length of destination buffer
*
* Converts the bitmask of attribute types into a list of attribute
* names separated by comas.
*
* @return destination buffer.
*/
char *nl_object_attrs2str(struct nl_object *obj, uint32_t attrs,
char *buf, size_t len)
{
struct nl_object_ops *ops = obj_ops(obj);
if (ops->oo_attrs2str != NULL)
return ops->oo_attrs2str(attrs, buf, len);
else {
memset(buf, 0, len);
return buf;
}
}
#endif
/** @} */
/** @} */
| gpl-2.0 |
uoaerg/linux-dccp | drivers/media/usb/dvb-usb-v2/usb_urb.c | 343 | 9688 | /* usb-urb.c is part of the DVB USB library.
*
* Copyright (C) 2004-6 Patrick Boettcher (patrick.boettcher@posteo.de)
* see dvb-usb-init.c for copyright information.
*
* This file keeps functions for initializing and handling the
* BULK and ISOC USB data transfers in a generic way.
* Can be used for DVB-only and also, that's the plan, for
* Hybrid USB devices (analog and DVB).
*/
#include "dvb_usb_common.h"
/* URB stuff for streaming */
int usb_urb_reconfig(struct usb_data_stream *stream,
struct usb_data_stream_properties *props);
static void usb_urb_complete(struct urb *urb)
{
struct usb_data_stream *stream = urb->context;
int ptype = usb_pipetype(urb->pipe);
int i;
u8 *b;
dev_dbg_ratelimited(&stream->udev->dev,
"%s: %s urb completed status=%d length=%d/%d pack_num=%d errors=%d\n",
__func__, ptype == PIPE_ISOCHRONOUS ? "isoc" : "bulk",
urb->status, urb->actual_length,
urb->transfer_buffer_length,
urb->number_of_packets, urb->error_count);
switch (urb->status) {
case 0: /* success */
case -ETIMEDOUT: /* NAK */
break;
case -ECONNRESET: /* kill */
case -ENOENT:
case -ESHUTDOWN:
return;
default: /* error */
dev_dbg_ratelimited(&stream->udev->dev,
"%s: urb completition failed=%d\n",
__func__, urb->status);
break;
}
b = (u8 *) urb->transfer_buffer;
switch (ptype) {
case PIPE_ISOCHRONOUS:
for (i = 0; i < urb->number_of_packets; i++) {
if (urb->iso_frame_desc[i].status != 0)
dev_dbg(&stream->udev->dev,
"%s: iso frame descriptor has an error=%d\n",
__func__,
urb->iso_frame_desc[i].status);
else if (urb->iso_frame_desc[i].actual_length > 0)
stream->complete(stream,
b + urb->iso_frame_desc[i].offset,
urb->iso_frame_desc[i].actual_length);
urb->iso_frame_desc[i].status = 0;
urb->iso_frame_desc[i].actual_length = 0;
}
break;
case PIPE_BULK:
if (urb->actual_length > 0)
stream->complete(stream, b, urb->actual_length);
break;
default:
dev_err(&stream->udev->dev,
"%s: unknown endpoint type in completition handler\n",
KBUILD_MODNAME);
return;
}
usb_submit_urb(urb, GFP_ATOMIC);
}
int usb_urb_killv2(struct usb_data_stream *stream)
{
int i;
for (i = 0; i < stream->urbs_submitted; i++) {
dev_dbg(&stream->udev->dev, "%s: kill urb=%d\n", __func__, i);
/* stop the URB */
usb_kill_urb(stream->urb_list[i]);
}
stream->urbs_submitted = 0;
return 0;
}
int usb_urb_submitv2(struct usb_data_stream *stream,
struct usb_data_stream_properties *props)
{
int i, ret;
if (props) {
ret = usb_urb_reconfig(stream, props);
if (ret < 0)
return ret;
}
for (i = 0; i < stream->urbs_initialized; i++) {
dev_dbg(&stream->udev->dev, "%s: submit urb=%d\n", __func__, i);
ret = usb_submit_urb(stream->urb_list[i], GFP_ATOMIC);
if (ret) {
dev_err(&stream->udev->dev,
"%s: could not submit urb no. %d - get them all back\n",
KBUILD_MODNAME, i);
usb_urb_killv2(stream);
return ret;
}
stream->urbs_submitted++;
}
return 0;
}
static int usb_urb_free_urbs(struct usb_data_stream *stream)
{
int i;
usb_urb_killv2(stream);
for (i = stream->urbs_initialized - 1; i >= 0; i--) {
if (stream->urb_list[i]) {
dev_dbg(&stream->udev->dev, "%s: free urb=%d\n",
__func__, i);
/* free the URBs */
usb_free_urb(stream->urb_list[i]);
}
}
stream->urbs_initialized = 0;
return 0;
}
static int usb_urb_alloc_bulk_urbs(struct usb_data_stream *stream)
{
int i, j;
/* allocate the URBs */
for (i = 0; i < stream->props.count; i++) {
dev_dbg(&stream->udev->dev, "%s: alloc urb=%d\n", __func__, i);
stream->urb_list[i] = usb_alloc_urb(0, GFP_ATOMIC);
if (!stream->urb_list[i]) {
dev_dbg(&stream->udev->dev, "%s: failed\n", __func__);
for (j = 0; j < i; j++)
usb_free_urb(stream->urb_list[j]);
return -ENOMEM;
}
usb_fill_bulk_urb(stream->urb_list[i],
stream->udev,
usb_rcvbulkpipe(stream->udev,
stream->props.endpoint),
stream->buf_list[i],
stream->props.u.bulk.buffersize,
usb_urb_complete, stream);
stream->urb_list[i]->transfer_flags = URB_NO_TRANSFER_DMA_MAP;
stream->urb_list[i]->transfer_dma = stream->dma_addr[i];
stream->urbs_initialized++;
}
return 0;
}
static int usb_urb_alloc_isoc_urbs(struct usb_data_stream *stream)
{
int i, j;
/* allocate the URBs */
for (i = 0; i < stream->props.count; i++) {
struct urb *urb;
int frame_offset = 0;
dev_dbg(&stream->udev->dev, "%s: alloc urb=%d\n", __func__, i);
stream->urb_list[i] = usb_alloc_urb(
stream->props.u.isoc.framesperurb, GFP_ATOMIC);
if (!stream->urb_list[i]) {
dev_dbg(&stream->udev->dev, "%s: failed\n", __func__);
for (j = 0; j < i; j++)
usb_free_urb(stream->urb_list[j]);
return -ENOMEM;
}
urb = stream->urb_list[i];
urb->dev = stream->udev;
urb->context = stream;
urb->complete = usb_urb_complete;
urb->pipe = usb_rcvisocpipe(stream->udev,
stream->props.endpoint);
urb->transfer_flags = URB_ISO_ASAP | URB_NO_TRANSFER_DMA_MAP;
urb->interval = stream->props.u.isoc.interval;
urb->number_of_packets = stream->props.u.isoc.framesperurb;
urb->transfer_buffer_length = stream->props.u.isoc.framesize *
stream->props.u.isoc.framesperurb;
urb->transfer_buffer = stream->buf_list[i];
urb->transfer_dma = stream->dma_addr[i];
for (j = 0; j < stream->props.u.isoc.framesperurb; j++) {
urb->iso_frame_desc[j].offset = frame_offset;
urb->iso_frame_desc[j].length =
stream->props.u.isoc.framesize;
frame_offset += stream->props.u.isoc.framesize;
}
stream->urbs_initialized++;
}
return 0;
}
static int usb_free_stream_buffers(struct usb_data_stream *stream)
{
if (stream->state & USB_STATE_URB_BUF) {
while (stream->buf_num) {
stream->buf_num--;
dev_dbg(&stream->udev->dev, "%s: free buf=%d\n",
__func__, stream->buf_num);
usb_free_coherent(stream->udev, stream->buf_size,
stream->buf_list[stream->buf_num],
stream->dma_addr[stream->buf_num]);
}
}
stream->state &= ~USB_STATE_URB_BUF;
return 0;
}
static int usb_alloc_stream_buffers(struct usb_data_stream *stream, int num,
unsigned long size)
{
stream->buf_num = 0;
stream->buf_size = size;
dev_dbg(&stream->udev->dev,
"%s: all in all I will use %lu bytes for streaming\n",
__func__, num * size);
for (stream->buf_num = 0; stream->buf_num < num; stream->buf_num++) {
stream->buf_list[stream->buf_num] = usb_alloc_coherent(
stream->udev, size, GFP_ATOMIC,
&stream->dma_addr[stream->buf_num]);
if (!stream->buf_list[stream->buf_num]) {
dev_dbg(&stream->udev->dev, "%s: alloc buf=%d failed\n",
__func__, stream->buf_num);
usb_free_stream_buffers(stream);
return -ENOMEM;
}
dev_dbg(&stream->udev->dev, "%s: alloc buf=%d %p (dma %llu)\n",
__func__, stream->buf_num,
stream->buf_list[stream->buf_num],
(long long)stream->dma_addr[stream->buf_num]);
memset(stream->buf_list[stream->buf_num], 0, size);
stream->state |= USB_STATE_URB_BUF;
}
return 0;
}
int usb_urb_reconfig(struct usb_data_stream *stream,
struct usb_data_stream_properties *props)
{
int buf_size;
if (!props)
return 0;
/* check allocated buffers are large enough for the request */
if (props->type == USB_BULK) {
buf_size = stream->props.u.bulk.buffersize;
} else if (props->type == USB_ISOC) {
buf_size = props->u.isoc.framesize * props->u.isoc.framesperurb;
} else {
dev_err(&stream->udev->dev, "%s: invalid endpoint type=%d\n",
KBUILD_MODNAME, props->type);
return -EINVAL;
}
if (stream->buf_num < props->count || stream->buf_size < buf_size) {
dev_err(&stream->udev->dev,
"%s: cannot reconfigure as allocated buffers are too small\n",
KBUILD_MODNAME);
return -EINVAL;
}
/* check if all fields are same */
if (stream->props.type == props->type &&
stream->props.count == props->count &&
stream->props.endpoint == props->endpoint) {
if (props->type == USB_BULK &&
props->u.bulk.buffersize ==
stream->props.u.bulk.buffersize)
return 0;
else if (props->type == USB_ISOC &&
props->u.isoc.framesperurb ==
stream->props.u.isoc.framesperurb &&
props->u.isoc.framesize ==
stream->props.u.isoc.framesize &&
props->u.isoc.interval ==
stream->props.u.isoc.interval)
return 0;
}
dev_dbg(&stream->udev->dev, "%s: re-alloc urbs\n", __func__);
usb_urb_free_urbs(stream);
memcpy(&stream->props, props, sizeof(*props));
if (props->type == USB_BULK)
return usb_urb_alloc_bulk_urbs(stream);
else if (props->type == USB_ISOC)
return usb_urb_alloc_isoc_urbs(stream);
return 0;
}
int usb_urb_initv2(struct usb_data_stream *stream,
const struct usb_data_stream_properties *props)
{
int ret;
if (!stream || !props)
return -EINVAL;
memcpy(&stream->props, props, sizeof(*props));
if (!stream->complete) {
dev_err(&stream->udev->dev,
"%s: there is no data callback - this doesn't make sense\n",
KBUILD_MODNAME);
return -EINVAL;
}
switch (stream->props.type) {
case USB_BULK:
ret = usb_alloc_stream_buffers(stream, stream->props.count,
stream->props.u.bulk.buffersize);
if (ret < 0)
return ret;
return usb_urb_alloc_bulk_urbs(stream);
case USB_ISOC:
ret = usb_alloc_stream_buffers(stream, stream->props.count,
stream->props.u.isoc.framesize *
stream->props.u.isoc.framesperurb);
if (ret < 0)
return ret;
return usb_urb_alloc_isoc_urbs(stream);
default:
dev_err(&stream->udev->dev,
"%s: unknown urb-type for data transfer\n",
KBUILD_MODNAME);
return -EINVAL;
}
}
int usb_urb_exitv2(struct usb_data_stream *stream)
{
usb_urb_free_urbs(stream);
usb_free_stream_buffers(stream);
return 0;
}
| gpl-2.0 |
fanyukui/linux3.12.10 | arch/arm/mach-zynq/hotplug.c | 599 | 1271 | /*
* Copyright (C) 2012-2013 Xilinx
*
* based on linux/arch/arm/mach-realview/hotplug.c
*
* Copyright (C) 2002 ARM Ltd.
* All Rights Reserved
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/smp.h>
#include <asm/cacheflush.h>
#include <asm/cp15.h>
#include "common.h"
static inline void zynq_cpu_enter_lowpower(void)
{
unsigned int v;
flush_cache_all();
asm volatile(
" mcr p15, 0, %1, c7, c5, 0\n"
" dsb\n"
/*
* Turn off coherency
*/
" mrc p15, 0, %0, c1, c0, 1\n"
" bic %0, %0, #0x40\n"
" mcr p15, 0, %0, c1, c0, 1\n"
" mrc p15, 0, %0, c1, c0, 0\n"
" bic %0, %0, %2\n"
" mcr p15, 0, %0, c1, c0, 0\n"
: "=&r" (v)
: "r" (0), "Ir" (CR_C)
: "cc");
}
/*
* platform-specific code to shutdown a CPU
*
* Called with IRQs disabled
*/
void zynq_platform_cpu_die(unsigned int cpu)
{
zynq_cpu_enter_lowpower();
/*
* there is no power-control hardware on this platform, so all
* we can do is put the core into WFI; this is safe as the calling
* code will have already disabled interrupts
*/
for (;;)
cpu_do_idle();
}
| gpl-2.0 |
Negamann303/kernel-ng2-negalite | arch/arm/mach-msm/acpuclock.c | 1111 | 1750 | /* Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/cpu.h>
#include <linux/smp.h>
#include "acpuclock.h"
#include <trace/events/power.h>
static struct acpuclk_data *acpuclk_data;
unsigned long acpuclk_get_rate(int cpu)
{
if (!acpuclk_data->get_rate)
return 0;
return acpuclk_data->get_rate(cpu);
}
int acpuclk_set_rate(int cpu, unsigned long rate, enum setrate_reason reason)
{
int ret;
if (!acpuclk_data->set_rate)
return 0;
trace_cpu_frequency_switch_start(acpuclk_get_rate(cpu), rate, cpu);
ret = acpuclk_data->set_rate(cpu, rate, reason);
if (!ret) {
trace_cpu_frequency_switch_end(cpu);
trace_cpu_frequency(rate, cpu);
}
return ret;
}
uint32_t acpuclk_get_switch_time(void)
{
return acpuclk_data->switch_time_us;
}
unsigned long acpuclk_power_collapse(void)
{
unsigned long rate = acpuclk_get_rate(smp_processor_id());
acpuclk_set_rate(smp_processor_id(), acpuclk_data->power_collapse_khz,
SETRATE_PC);
return rate;
}
unsigned long acpuclk_wait_for_irq(void)
{
unsigned long rate = acpuclk_get_rate(smp_processor_id());
acpuclk_set_rate(smp_processor_id(), acpuclk_data->wait_for_irq_khz,
SETRATE_SWFI);
return rate;
}
void __devinit acpuclk_register(struct acpuclk_data *data)
{
acpuclk_data = data;
}
| gpl-2.0 |
EloYGomeZ/test_kernel_g620s | drivers/misc/qcom/qdsp6v2/ultrasound/usfcdev.c | 1111 | 11189 | /* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/miscdevice.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/input/mt.h>
#include <linux/syscalls.h>
#include "usfcdev.h"
#define UNDEF_ID 0xffffffff
#define SLOT_CMD_ID 0
#define MAX_RETRIES 10
enum usdev_event_status {
USFCDEV_EVENT_ENABLED,
USFCDEV_EVENT_DISABLING,
USFCDEV_EVENT_DISABLED,
};
struct usfcdev_event {
bool (*match_cb)(uint16_t, struct input_dev *dev);
bool registered_event;
bool interleaved;
enum usdev_event_status event_status;
};
static struct usfcdev_event s_usfcdev_events[MAX_EVENT_TYPE_NUM];
struct usfcdev_input_command {
unsigned int type;
unsigned int code;
unsigned int value;
};
static long s_usf_pid;
static bool usfcdev_filter(struct input_handle *handle,
unsigned int type, unsigned int code, int value);
static bool usfcdev_match(struct input_handler *handler,
struct input_dev *dev);
static int usfcdev_connect(struct input_handler *handler,
struct input_dev *dev,
const struct input_device_id *id);
static void usfcdev_disconnect(struct input_handle *handle);
static const struct input_device_id usfc_tsc_ids[] = {
{
.flags = INPUT_DEVICE_ID_MATCH_EVBIT |
INPUT_DEVICE_ID_MATCH_KEYBIT |
INPUT_DEVICE_ID_MATCH_ABSBIT,
.evbit = { BIT_MASK(EV_ABS) | BIT_MASK(EV_KEY) },
.keybit = { [BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH) },
/* assumption: ABS_X & ABS_Y are in the same long */
.absbit = { [BIT_WORD(ABS_X)] = BIT_MASK(ABS_X) |
BIT_MASK(ABS_Y) },
},
{
.flags = INPUT_DEVICE_ID_MATCH_EVBIT |
INPUT_DEVICE_ID_MATCH_KEYBIT |
INPUT_DEVICE_ID_MATCH_ABSBIT,
.evbit = { BIT_MASK(EV_ABS) | BIT_MASK(EV_KEY) },
.keybit = { [BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH) },
/* assumption: MT_.._X & MT_.._Y are in the same long */
.absbit = { [BIT_WORD(ABS_MT_POSITION_X)] =
BIT_MASK(ABS_MT_POSITION_X) |
BIT_MASK(ABS_MT_POSITION_Y) },
},
{ } /* Terminating entry */
};
MODULE_DEVICE_TABLE(input, usfc_tsc_ids);
static struct input_handler s_usfc_handlers[MAX_EVENT_TYPE_NUM] = {
{ /* TSC handler */
.filter = usfcdev_filter,
.match = usfcdev_match,
.connect = usfcdev_connect,
.disconnect = usfcdev_disconnect,
/* .minor can be used as index in the container, */
/* because .fops isn't supported */
.minor = TSC_EVENT_TYPE_IND,
.name = "usfc_tsc_handler",
.id_table = usfc_tsc_ids,
},
};
/*
* For each event type, there are a number conflicting devices (handles)
* The first registered device (primary) is real TSC device; it's mandatory
* Optionally, later registered devices are simulated ones.
* They are dynamically managed
* The primary device's handles are stored in the below static array
*/
static struct input_handle s_usfc_primary_handles[MAX_EVENT_TYPE_NUM] = {
{ /* TSC handle */
.handler = &s_usfc_handlers[TSC_EVENT_TYPE_IND],
.name = "usfc_tsc_handle",
},
};
static struct usfcdev_input_command initial_clear_cmds[] = {
{EV_ABS, ABS_PRESSURE, 0},
{EV_KEY, BTN_TOUCH, 0},
};
static struct usfcdev_input_command slot_clear_cmds[] = {
{EV_ABS, ABS_MT_SLOT, 0},
{EV_ABS, ABS_MT_TRACKING_ID, UNDEF_ID},
};
static struct usfcdev_input_command no_filter_cmds[] = {
{EV_ABS, ABS_MT_SLOT, 0},
{EV_ABS, ABS_MT_TRACKING_ID, UNDEF_ID},
{EV_SYN, SYN_REPORT, 0},
};
static bool usfcdev_match(struct input_handler *handler, struct input_dev *dev)
{
bool rc = false;
int ind = handler->minor;
pr_debug("%s: name=[%s]; ind=%d\n", __func__, dev->name, ind);
if (s_usfcdev_events[ind].registered_event &&
s_usfcdev_events[ind].match_cb) {
rc = (*s_usfcdev_events[ind].match_cb)((uint16_t)ind, dev);
pr_debug("%s: [%s]; rc=%d\n", __func__, dev->name, rc);
}
return rc;
}
static int usfcdev_connect(struct input_handler *handler, struct input_dev *dev,
const struct input_device_id *id)
{
int ret = 0;
uint16_t ind = handler->minor;
struct input_handle *usfc_handle = NULL;
if (s_usfc_primary_handles[ind].dev == NULL) {
pr_debug("%s: primary device; ind=%d\n",
__func__,
ind);
usfc_handle = &s_usfc_primary_handles[ind];
} else {
pr_debug("%s: secondary device; ind=%d\n",
__func__,
ind);
usfc_handle = kzalloc(sizeof(struct input_handle),
GFP_KERNEL);
if (!usfc_handle) {
pr_err("%s: memory allocation failed; ind=%d\n",
__func__,
ind);
return -ENOMEM;
}
usfc_handle->handler = &s_usfc_handlers[ind];
usfc_handle->name = s_usfc_primary_handles[ind].name;
}
usfc_handle->dev = dev;
ret = input_register_handle(usfc_handle);
pr_debug("%s: name=[%s]; ind=%d; dev=0x%p\n",
__func__,
dev->name,
ind,
usfc_handle->dev);
if (ret)
pr_err("%s: input_register_handle[%d] failed: ret=%d\n",
__func__,
ind,
ret);
else {
ret = input_open_device(usfc_handle);
if (ret) {
pr_err("%s: input_open_device[%d] failed: ret=%d\n",
__func__,
ind,
ret);
input_unregister_handle(usfc_handle);
} else
pr_debug("%s: device[%d] is opened\n",
__func__,
ind);
}
return ret;
}
static void usfcdev_disconnect(struct input_handle *handle)
{
int ind = handle->handler->minor;
input_close_device(handle);
input_unregister_handle(handle);
pr_debug("%s: handle[%d], name=[%s] is disconnected\n",
__func__,
ind,
handle->dev->name);
if (s_usfc_primary_handles[ind].dev == handle->dev)
s_usfc_primary_handles[ind].dev = NULL;
else
kfree(handle);
}
static bool usfcdev_filter(struct input_handle *handle,
unsigned int type, unsigned int code, int value)
{
uint16_t i = 0;
uint16_t ind = (uint16_t)handle->handler->minor;
bool rc = (s_usfcdev_events[ind].event_status != USFCDEV_EVENT_ENABLED);
if (s_usf_pid == sys_getpid()) {
/* Pass events from usfcdev driver */
rc = false;
pr_debug("%s: event_type=%d; type=%d; code=%d; val=%d",
__func__,
ind,
type,
code,
value);
} else if (s_usfcdev_events[ind].event_status ==
USFCDEV_EVENT_DISABLING) {
uint32_t u_value = value;
s_usfcdev_events[ind].interleaved = true;
/* Pass events for freeing slots from TSC driver */
for (i = 0; i < ARRAY_SIZE(no_filter_cmds); ++i) {
if ((no_filter_cmds[i].type == type) &&
(no_filter_cmds[i].code == code) &&
(no_filter_cmds[i].value <= u_value)) {
rc = false;
pr_debug("%s: no_filter_cmds[%d]; %d",
__func__,
i,
no_filter_cmds[i].value);
break;
}
}
}
return rc;
}
bool usfcdev_register(
uint16_t event_type_ind,
bool (*match_cb)(uint16_t, struct input_dev *dev))
{
int ret = 0;
bool rc = false;
if ((event_type_ind >= MAX_EVENT_TYPE_NUM) || !match_cb) {
pr_err("%s: wrong input: event_type_ind=%d; match_cb=0x%p\n",
__func__,
event_type_ind,
match_cb);
return false;
}
if (s_usfcdev_events[event_type_ind].registered_event) {
pr_info("%s: handler[%d] was already registered\n",
__func__,
event_type_ind);
return true;
}
s_usfcdev_events[event_type_ind].registered_event = true;
s_usfcdev_events[event_type_ind].match_cb = match_cb;
s_usfcdev_events[event_type_ind].event_status = USFCDEV_EVENT_ENABLED;
ret = input_register_handler(&s_usfc_handlers[event_type_ind]);
if (!ret) {
rc = true;
pr_debug("%s: handler[%d] was registered\n",
__func__,
event_type_ind);
} else {
s_usfcdev_events[event_type_ind].registered_event = false;
s_usfcdev_events[event_type_ind].match_cb = NULL;
pr_err("%s: handler[%d] registration failed: ret=%d\n",
__func__,
event_type_ind,
ret);
}
return rc;
}
void usfcdev_unregister(uint16_t event_type_ind)
{
if (event_type_ind >= MAX_EVENT_TYPE_NUM) {
pr_err("%s: wrong input: event_type_ind=%d\n",
__func__,
event_type_ind);
return;
}
if (s_usfcdev_events[event_type_ind].registered_event) {
input_unregister_handler(&s_usfc_handlers[event_type_ind]);
pr_debug("%s: handler[%d] was unregistered\n",
__func__,
event_type_ind);
s_usfcdev_events[event_type_ind].registered_event = false;
s_usfcdev_events[event_type_ind].match_cb = NULL;
s_usfcdev_events[event_type_ind].event_status =
USFCDEV_EVENT_ENABLED;
}
}
static inline void usfcdev_send_cmd(
struct input_dev *dev,
struct usfcdev_input_command cmd)
{
input_event(dev, cmd.type, cmd.code, cmd.value);
}
static void usfcdev_clean_dev(uint16_t event_type_ind)
{
struct input_dev *dev = NULL;
int i;
int j;
int retries = 0;
if (event_type_ind >= MAX_EVENT_TYPE_NUM) {
pr_err("%s: wrong input: event_type_ind=%d\n",
__func__,
event_type_ind);
return;
}
/* Only primary device must exist */
dev = s_usfc_primary_handles[event_type_ind].dev;
if (dev == NULL) {
pr_err("%s: NULL primary device\n",
__func__);
return;
}
for (i = 0; i < ARRAY_SIZE(initial_clear_cmds); i++)
usfcdev_send_cmd(dev, initial_clear_cmds[i]);
input_sync(dev);
/* Send commands to free all slots */
for (i = 0; i < dev->mt->num_slots; i++) {
s_usfcdev_events[event_type_ind].interleaved = false;
if (input_mt_get_value(&dev->mt->slots[i],
ABS_MT_TRACKING_ID) < 0) {
pr_debug("%s: skipping slot %d",
__func__, i);
continue;
}
slot_clear_cmds[SLOT_CMD_ID].value = i;
for (j = 0; j < ARRAY_SIZE(slot_clear_cmds); j++)
usfcdev_send_cmd(dev, slot_clear_cmds[j]);
if (s_usfcdev_events[event_type_ind].interleaved) {
pr_debug("%s: interleaved(%d): slot(%d)",
__func__, i, dev->mt->slot);
if (retries++ < MAX_RETRIES) {
--i;
continue;
}
pr_warn("%s: index(%d) reached max retires",
__func__, i);
}
retries = 0;
input_sync(dev);
}
}
bool usfcdev_set_filter(uint16_t event_type_ind, bool filter)
{
bool rc = true;
if (event_type_ind >= MAX_EVENT_TYPE_NUM) {
pr_err("%s: wrong input: event_type_ind=%d\n",
__func__,
event_type_ind);
return false;
}
if (s_usfcdev_events[event_type_ind].registered_event) {
pr_debug("%s: event_type[%d]; filter=%d\n",
__func__,
event_type_ind,
filter
);
if (filter) {
s_usfcdev_events[event_type_ind].event_status =
USFCDEV_EVENT_DISABLING;
s_usf_pid = sys_getpid();
usfcdev_clean_dev(event_type_ind);
s_usfcdev_events[event_type_ind].event_status =
USFCDEV_EVENT_DISABLED;
} else
s_usfcdev_events[event_type_ind].event_status =
USFCDEV_EVENT_ENABLED;
} else {
pr_err("%s: event_type[%d] isn't registered\n",
__func__,
event_type_ind);
rc = false;
}
return rc;
}
static int __init usfcdev_init(void)
{
return 0;
}
device_initcall(usfcdev_init);
MODULE_DESCRIPTION("Handle of events from devices, conflicting with USF");
| gpl-2.0 |
t0mm13b/ZTE-Blade-2.6.35.11 | drivers/staging/rtl8192e/ieee80211/dot11d.c | 1367 | 5448 | #ifdef ENABLE_DOT11D
//-----------------------------------------------------------------------------
// File:
// Dot11d.c
//
// Description:
// Implement 802.11d.
//
//-----------------------------------------------------------------------------
#include "dot11d.h"
void
Dot11d_Init(struct ieee80211_device *ieee)
{
PRT_DOT11D_INFO pDot11dInfo = GET_DOT11D_INFO(ieee);
pDot11dInfo->bEnabled = 0;
pDot11dInfo->State = DOT11D_STATE_NONE;
pDot11dInfo->CountryIeLen = 0;
memset(pDot11dInfo->channel_map, 0, MAX_CHANNEL_NUMBER+1);
memset(pDot11dInfo->MaxTxPwrDbmList, 0xFF, MAX_CHANNEL_NUMBER+1);
RESET_CIE_WATCHDOG(ieee);
printk("Dot11d_Init()\n");
}
//
// Description:
// Reset to the state as we are just entering a regulatory domain.
//
void
Dot11d_Reset(struct ieee80211_device *ieee)
{
u32 i;
PRT_DOT11D_INFO pDot11dInfo = GET_DOT11D_INFO(ieee);
#if 0
if(!pDot11dInfo->bEnabled)
return;
#endif
// Clear old channel map
memset(pDot11dInfo->channel_map, 0, MAX_CHANNEL_NUMBER+1);
memset(pDot11dInfo->MaxTxPwrDbmList, 0xFF, MAX_CHANNEL_NUMBER+1);
// Set new channel map
for (i=1; i<=11; i++) {
(pDot11dInfo->channel_map)[i] = 1;
}
for (i=12; i<=14; i++) {
(pDot11dInfo->channel_map)[i] = 2;
}
pDot11dInfo->State = DOT11D_STATE_NONE;
pDot11dInfo->CountryIeLen = 0;
RESET_CIE_WATCHDOG(ieee);
//printk("Dot11d_Reset()\n");
}
//
// Description:
// Update country IE from Beacon or Probe Resopnse
// and configure PHY for operation in the regulatory domain.
//
// TODO:
// Configure Tx power.
//
// Assumption:
// 1. IS_DOT11D_ENABLE() is TRUE.
// 2. Input IE is an valid one.
//
void
Dot11d_UpdateCountryIe(
struct ieee80211_device *dev,
u8 * pTaddr,
u16 CoutryIeLen,
u8 * pCoutryIe
)
{
PRT_DOT11D_INFO pDot11dInfo = GET_DOT11D_INFO(dev);
u8 i, j, NumTriples, MaxChnlNum;
PCHNL_TXPOWER_TRIPLE pTriple;
memset(pDot11dInfo->channel_map, 0, MAX_CHANNEL_NUMBER+1);
memset(pDot11dInfo->MaxTxPwrDbmList, 0xFF, MAX_CHANNEL_NUMBER+1);
MaxChnlNum = 0;
NumTriples = (CoutryIeLen - 3) / 3; // skip 3-byte country string.
pTriple = (PCHNL_TXPOWER_TRIPLE)(pCoutryIe + 3);
for(i = 0; i < NumTriples; i++)
{
if(MaxChnlNum >= pTriple->FirstChnl)
{ // It is not in a monotonically increasing order, so stop processing.
printk("Dot11d_UpdateCountryIe(): Invalid country IE, skip it........1\n");
return;
}
if(MAX_CHANNEL_NUMBER < (pTriple->FirstChnl + pTriple->NumChnls))
{ // It is not a valid set of channel id, so stop processing.
printk("Dot11d_UpdateCountryIe(): Invalid country IE, skip it........2\n");
return;
}
for(j = 0 ; j < pTriple->NumChnls; j++)
{
pDot11dInfo->channel_map[pTriple->FirstChnl + j] = 1;
pDot11dInfo->MaxTxPwrDbmList[pTriple->FirstChnl + j] = pTriple->MaxTxPowerInDbm;
MaxChnlNum = pTriple->FirstChnl + j;
}
pTriple = (PCHNL_TXPOWER_TRIPLE)((u8*)pTriple + 3);
}
#if 1
//printk("Dot11d_UpdateCountryIe(): Channel List:\n");
printk("Channel List:");
for(i=1; i<= MAX_CHANNEL_NUMBER; i++)
if(pDot11dInfo->channel_map[i] > 0)
printk(" %d", i);
printk("\n");
#endif
UPDATE_CIE_SRC(dev, pTaddr);
pDot11dInfo->CountryIeLen = CoutryIeLen;
memcpy(pDot11dInfo->CountryIeBuf, pCoutryIe,CoutryIeLen);
pDot11dInfo->State = DOT11D_STATE_LEARNED;
}
u8
DOT11D_GetMaxTxPwrInDbm(
struct ieee80211_device *dev,
u8 Channel
)
{
PRT_DOT11D_INFO pDot11dInfo = GET_DOT11D_INFO(dev);
u8 MaxTxPwrInDbm = 255;
if(MAX_CHANNEL_NUMBER < Channel)
{
printk("DOT11D_GetMaxTxPwrInDbm(): Invalid Channel\n");
return MaxTxPwrInDbm;
}
if(pDot11dInfo->channel_map[Channel])
{
MaxTxPwrInDbm = pDot11dInfo->MaxTxPwrDbmList[Channel];
}
return MaxTxPwrInDbm;
}
void
DOT11D_ScanComplete(
struct ieee80211_device * dev
)
{
PRT_DOT11D_INFO pDot11dInfo = GET_DOT11D_INFO(dev);
switch(pDot11dInfo->State)
{
case DOT11D_STATE_LEARNED:
pDot11dInfo->State = DOT11D_STATE_DONE;
break;
case DOT11D_STATE_DONE:
if( GET_CIE_WATCHDOG(dev) == 0 )
{ // Reset country IE if previous one is gone.
Dot11d_Reset(dev);
}
break;
case DOT11D_STATE_NONE:
break;
}
}
int IsLegalChannel(
struct ieee80211_device * dev,
u8 channel
)
{
PRT_DOT11D_INFO pDot11dInfo = GET_DOT11D_INFO(dev);
if(MAX_CHANNEL_NUMBER < channel)
{
printk("IsLegalChannel(): Invalid Channel\n");
return 0;
}
if(pDot11dInfo->channel_map[channel] > 0)
return 1;
return 0;
}
int ToLegalChannel(
struct ieee80211_device * dev,
u8 channel
)
{
PRT_DOT11D_INFO pDot11dInfo = GET_DOT11D_INFO(dev);
u8 default_chn = 0;
u32 i = 0;
for (i=1; i<= MAX_CHANNEL_NUMBER; i++)
{
if(pDot11dInfo->channel_map[i] > 0)
{
default_chn = i;
break;
}
}
if(MAX_CHANNEL_NUMBER < channel)
{
printk("IsLegalChannel(): Invalid Channel\n");
return default_chn;
}
if(pDot11dInfo->channel_map[channel] > 0)
return channel;
return default_chn;
}
#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0))
//EXPORT_SYMBOL(Dot11d_Init);
//EXPORT_SYMBOL(Dot11d_Reset);
//EXPORT_SYMBOL(Dot11d_UpdateCountryIe);
//EXPORT_SYMBOL(DOT11D_GetMaxTxPwrInDbm);
//EXPORT_SYMBOL(DOT11D_ScanComplete);
//EXPORT_SYMBOL(IsLegalChannel);
//EXPORT_SYMBOL(ToLegalChannel);
#else
EXPORT_SYMBOL_NOVERS(Dot11d_Init);
EXPORT_SYMBOL_NOVERS(Dot11d_Reset);
EXPORT_SYMBOL_NOVERS(Dot11d_UpdateCountryIe);
EXPORT_SYMBOL_NOVERS(DOT11D_GetMaxTxPwrInDbm);
EXPORT_SYMBOL_NOVERS(DOT11D_ScanComplete);
EXPORT_SYMBOL_NOVERS(IsLegalChannel);
EXPORT_SYMBOL_NOVERS(ToLegalChannel);
#endif
#endif
| gpl-2.0 |
Davletvm/linux | arch/arm/mach-omap2/powerdomains43xx_data.c | 2391 | 3486 | /*
* AM43xx Power domains framework
*
* Copyright (C) 2013 Texas Instruments, Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include "powerdomain.h"
#include "prcm-common.h"
#include "prcm44xx.h"
#include "prcm43xx.h"
static struct powerdomain gfx_43xx_pwrdm = {
.name = "gfx_pwrdm",
.voltdm = { .name = "core" },
.prcm_offs = AM43XX_PRM_GFX_INST,
.prcm_partition = AM43XX_PRM_PARTITION,
.pwrsts = PWRSTS_OFF_ON,
.banks = 1,
.pwrsts_mem_on = {
[0] = PWRSTS_ON, /* gfx_mem */
},
.flags = PWRDM_HAS_LOWPOWERSTATECHANGE,
};
static struct powerdomain mpu_43xx_pwrdm = {
.name = "mpu_pwrdm",
.voltdm = { .name = "mpu" },
.prcm_offs = AM43XX_PRM_MPU_INST,
.prcm_partition = AM43XX_PRM_PARTITION,
.pwrsts = PWRSTS_OFF_RET_ON,
.pwrsts_logic_ret = PWRSTS_OFF_RET,
.banks = 3,
.pwrsts_mem_ret = {
[0] = PWRSTS_OFF_RET, /* mpu_l1 */
[1] = PWRSTS_OFF_RET, /* mpu_l2 */
[2] = PWRSTS_OFF_RET, /* mpu_ram */
},
.pwrsts_mem_on = {
[0] = PWRSTS_ON, /* mpu_l1 */
[1] = PWRSTS_ON, /* mpu_l2 */
[2] = PWRSTS_ON, /* mpu_ram */
},
.flags = PWRDM_HAS_LOWPOWERSTATECHANGE,
};
static struct powerdomain rtc_43xx_pwrdm = {
.name = "rtc_pwrdm",
.voltdm = { .name = "rtc" },
.prcm_offs = AM43XX_PRM_RTC_INST,
.prcm_partition = AM43XX_PRM_PARTITION,
.pwrsts = PWRSTS_ON,
};
static struct powerdomain wkup_43xx_pwrdm = {
.name = "wkup_pwrdm",
.voltdm = { .name = "core" },
.prcm_offs = AM43XX_PRM_WKUP_INST,
.prcm_partition = AM43XX_PRM_PARTITION,
.pwrsts = PWRSTS_ON,
.banks = 1,
.pwrsts_mem_on = {
[0] = PWRSTS_ON, /* debugss_mem */
},
};
static struct powerdomain tamper_43xx_pwrdm = {
.name = "tamper_pwrdm",
.voltdm = { .name = "tamper" },
.prcm_offs = AM43XX_PRM_TAMPER_INST,
.prcm_partition = AM43XX_PRM_PARTITION,
.pwrsts = PWRSTS_ON,
};
static struct powerdomain cefuse_43xx_pwrdm = {
.name = "cefuse_pwrdm",
.voltdm = { .name = "core" },
.prcm_offs = AM43XX_PRM_CEFUSE_INST,
.prcm_partition = AM43XX_PRM_PARTITION,
.pwrsts = PWRSTS_OFF_ON,
.flags = PWRDM_HAS_LOWPOWERSTATECHANGE,
};
static struct powerdomain per_43xx_pwrdm = {
.name = "per_pwrdm",
.voltdm = { .name = "core" },
.prcm_offs = AM43XX_PRM_PER_INST,
.prcm_partition = AM43XX_PRM_PARTITION,
.pwrsts = PWRSTS_OFF_RET_ON,
.pwrsts_logic_ret = PWRSTS_OFF_RET,
.banks = 4,
.pwrsts_mem_ret = {
[0] = PWRSTS_OFF_RET, /* icss_mem */
[1] = PWRSTS_OFF_RET, /* per_mem */
[2] = PWRSTS_OFF_RET, /* ram1_mem */
[3] = PWRSTS_OFF_RET, /* ram2_mem */
},
.pwrsts_mem_on = {
[0] = PWRSTS_ON, /* icss_mem */
[1] = PWRSTS_ON, /* per_mem */
[2] = PWRSTS_ON, /* ram1_mem */
[3] = PWRSTS_ON, /* ram2_mem */
},
.flags = PWRDM_HAS_LOWPOWERSTATECHANGE,
};
static struct powerdomain *powerdomains_am43xx[] __initdata = {
&gfx_43xx_pwrdm,
&mpu_43xx_pwrdm,
&rtc_43xx_pwrdm,
&wkup_43xx_pwrdm,
&tamper_43xx_pwrdm,
&cefuse_43xx_pwrdm,
&per_43xx_pwrdm,
NULL
};
static int am43xx_check_vcvp(void)
{
return 0;
}
void __init am43xx_powerdomains_init(void)
{
omap4_pwrdm_operations.pwrdm_has_voltdm = am43xx_check_vcvp;
pwrdm_register_platform_funcs(&omap4_pwrdm_operations);
pwrdm_register_pwrdms(powerdomains_am43xx);
pwrdm_complete_init();
}
| gpl-2.0 |
RuanJG/uTouch-kernel | arch/arm/plat-omap/cpu-omap.c | 2647 | 4050 | /*
* linux/arch/arm/plat-omap/cpu-omap.c
*
* CPU frequency scaling for OMAP
*
* Copyright (C) 2005 Nokia Corporation
* Written by Tony Lindgren <tony@atomide.com>
*
* Based on cpu-sa1110.c, Copyright (C) 2001 Russell King
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/cpufreq.h>
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/err.h>
#include <linux/clk.h>
#include <linux/io.h>
#include <mach/hardware.h>
#include <plat/clock.h>
#include <asm/system.h>
#define VERY_HI_RATE 900000000
static struct cpufreq_frequency_table *freq_table;
#ifdef CONFIG_ARCH_OMAP1
#define MPU_CLK "mpu"
#else
#define MPU_CLK "virt_prcm_set"
#endif
static struct clk *mpu_clk;
/* TODO: Add support for SDRAM timing changes */
static int omap_verify_speed(struct cpufreq_policy *policy)
{
if (freq_table)
return cpufreq_frequency_table_verify(policy, freq_table);
if (policy->cpu)
return -EINVAL;
cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq,
policy->cpuinfo.max_freq);
policy->min = clk_round_rate(mpu_clk, policy->min * 1000) / 1000;
policy->max = clk_round_rate(mpu_clk, policy->max * 1000) / 1000;
cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq,
policy->cpuinfo.max_freq);
return 0;
}
static unsigned int omap_getspeed(unsigned int cpu)
{
unsigned long rate;
if (cpu)
return 0;
rate = clk_get_rate(mpu_clk) / 1000;
return rate;
}
static int omap_target(struct cpufreq_policy *policy,
unsigned int target_freq,
unsigned int relation)
{
struct cpufreq_freqs freqs;
int ret = 0;
/* Ensure desired rate is within allowed range. Some govenors
* (ondemand) will just pass target_freq=0 to get the minimum. */
if (target_freq < policy->min)
target_freq = policy->min;
if (target_freq > policy->max)
target_freq = policy->max;
freqs.old = omap_getspeed(0);
freqs.new = clk_round_rate(mpu_clk, target_freq * 1000) / 1000;
freqs.cpu = 0;
if (freqs.old == freqs.new)
return ret;
cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
#ifdef CONFIG_CPU_FREQ_DEBUG
printk(KERN_DEBUG "cpufreq-omap: transition: %u --> %u\n",
freqs.old, freqs.new);
#endif
ret = clk_set_rate(mpu_clk, freqs.new * 1000);
cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
return ret;
}
static int __cpuinit omap_cpu_init(struct cpufreq_policy *policy)
{
int result = 0;
mpu_clk = clk_get(NULL, MPU_CLK);
if (IS_ERR(mpu_clk))
return PTR_ERR(mpu_clk);
if (policy->cpu != 0)
return -EINVAL;
policy->cur = policy->min = policy->max = omap_getspeed(0);
clk_init_cpufreq_table(&freq_table);
if (freq_table) {
result = cpufreq_frequency_table_cpuinfo(policy, freq_table);
if (!result)
cpufreq_frequency_table_get_attr(freq_table,
policy->cpu);
} else {
policy->cpuinfo.min_freq = clk_round_rate(mpu_clk, 0) / 1000;
policy->cpuinfo.max_freq = clk_round_rate(mpu_clk,
VERY_HI_RATE) / 1000;
}
/* FIXME: what's the actual transition time? */
policy->cpuinfo.transition_latency = 300 * 1000;
return 0;
}
static int omap_cpu_exit(struct cpufreq_policy *policy)
{
clk_exit_cpufreq_table(&freq_table);
clk_put(mpu_clk);
return 0;
}
static struct freq_attr *omap_cpufreq_attr[] = {
&cpufreq_freq_attr_scaling_available_freqs,
NULL,
};
static struct cpufreq_driver omap_driver = {
.flags = CPUFREQ_STICKY,
.verify = omap_verify_speed,
.target = omap_target,
.get = omap_getspeed,
.init = omap_cpu_init,
.exit = omap_cpu_exit,
.name = "omap",
.attr = omap_cpufreq_attr,
};
static int __init omap_cpufreq_init(void)
{
return cpufreq_register_driver(&omap_driver);
}
arch_initcall(omap_cpufreq_init);
/*
* if ever we want to remove this, upon cleanup call:
*
* cpufreq_unregister_driver()
* cpufreq_frequency_table_put_attr()
*/
| gpl-2.0 |
ktoonsez/KTSGS5 | arch/s390/kernel/debug.c | 4439 | 36450 | /*
* arch/s390/kernel/debug.c
* S/390 debug facility
*
* Copyright IBM Corp. 1999, 2012
*
* Author(s): Michael Holzheu (holzheu@de.ibm.com),
* Holger Smolinski (Holger.Smolinski@de.ibm.com)
*
* Bugreports to: <Linux390@de.ibm.com>
*/
#define KMSG_COMPONENT "s390dbf"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/stddef.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/ctype.h>
#include <linux/string.h>
#include <linux/sysctl.h>
#include <asm/uaccess.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/fs.h>
#include <linux/debugfs.h>
#include <asm/debug.h>
#define DEBUG_PROLOG_ENTRY -1
#define ALL_AREAS 0 /* copy all debug areas */
#define NO_AREAS 1 /* copy no debug areas */
/* typedefs */
typedef struct file_private_info {
loff_t offset; /* offset of last read in file */
int act_area; /* number of last formated area */
int act_page; /* act page in given area */
int act_entry; /* last formated entry (offset */
/* relative to beginning of last */
/* formated page) */
size_t act_entry_offset; /* up to this offset we copied */
/* in last read the last formated */
/* entry to userland */
char temp_buf[2048]; /* buffer for output */
debug_info_t *debug_info_org; /* original debug information */
debug_info_t *debug_info_snap; /* snapshot of debug information */
struct debug_view *view; /* used view of debug info */
} file_private_info_t;
typedef struct
{
char *string;
/*
* This assumes that all args are converted into longs
* on L/390 this is the case for all types of parameter
* except of floats, and long long (32 bit)
*
*/
long args[0];
} debug_sprintf_entry_t;
/* internal function prototyes */
static int debug_init(void);
static ssize_t debug_output(struct file *file, char __user *user_buf,
size_t user_len, loff_t * offset);
static ssize_t debug_input(struct file *file, const char __user *user_buf,
size_t user_len, loff_t * offset);
static int debug_open(struct inode *inode, struct file *file);
static int debug_close(struct inode *inode, struct file *file);
static debug_info_t *debug_info_create(const char *name, int pages_per_area,
int nr_areas, int buf_size, umode_t mode);
static void debug_info_get(debug_info_t *);
static void debug_info_put(debug_info_t *);
static int debug_prolog_level_fn(debug_info_t * id,
struct debug_view *view, char *out_buf);
static int debug_input_level_fn(debug_info_t * id, struct debug_view *view,
struct file *file, const char __user *user_buf,
size_t user_buf_size, loff_t * offset);
static int debug_prolog_pages_fn(debug_info_t * id,
struct debug_view *view, char *out_buf);
static int debug_input_pages_fn(debug_info_t * id, struct debug_view *view,
struct file *file, const char __user *user_buf,
size_t user_buf_size, loff_t * offset);
static int debug_input_flush_fn(debug_info_t * id, struct debug_view *view,
struct file *file, const char __user *user_buf,
size_t user_buf_size, loff_t * offset);
static int debug_hex_ascii_format_fn(debug_info_t * id, struct debug_view *view,
char *out_buf, const char *in_buf);
static int debug_raw_format_fn(debug_info_t * id,
struct debug_view *view, char *out_buf,
const char *in_buf);
static int debug_raw_header_fn(debug_info_t * id, struct debug_view *view,
int area, debug_entry_t * entry, char *out_buf);
static int debug_sprintf_format_fn(debug_info_t * id, struct debug_view *view,
char *out_buf, debug_sprintf_entry_t *curr_event);
/* globals */
struct debug_view debug_raw_view = {
"raw",
NULL,
&debug_raw_header_fn,
&debug_raw_format_fn,
NULL,
NULL
};
struct debug_view debug_hex_ascii_view = {
"hex_ascii",
NULL,
&debug_dflt_header_fn,
&debug_hex_ascii_format_fn,
NULL,
NULL
};
static struct debug_view debug_level_view = {
"level",
&debug_prolog_level_fn,
NULL,
NULL,
&debug_input_level_fn,
NULL
};
static struct debug_view debug_pages_view = {
"pages",
&debug_prolog_pages_fn,
NULL,
NULL,
&debug_input_pages_fn,
NULL
};
static struct debug_view debug_flush_view = {
"flush",
NULL,
NULL,
NULL,
&debug_input_flush_fn,
NULL
};
struct debug_view debug_sprintf_view = {
"sprintf",
NULL,
&debug_dflt_header_fn,
(debug_format_proc_t*)&debug_sprintf_format_fn,
NULL,
NULL
};
/* used by dump analysis tools to determine version of debug feature */
static unsigned int __used debug_feature_version = __DEBUG_FEATURE_VERSION;
/* static globals */
static debug_info_t *debug_area_first = NULL;
static debug_info_t *debug_area_last = NULL;
static DEFINE_MUTEX(debug_mutex);
static int initialized;
static int debug_critical;
static const struct file_operations debug_file_ops = {
.owner = THIS_MODULE,
.read = debug_output,
.write = debug_input,
.open = debug_open,
.release = debug_close,
.llseek = no_llseek,
};
static struct dentry *debug_debugfs_root_entry;
/* functions */
/*
* debug_areas_alloc
* - Debug areas are implemented as a threedimensonal array:
* areas[areanumber][pagenumber][pageoffset]
*/
static debug_entry_t***
debug_areas_alloc(int pages_per_area, int nr_areas)
{
debug_entry_t*** areas;
int i,j;
areas = kmalloc(nr_areas *
sizeof(debug_entry_t**),
GFP_KERNEL);
if (!areas)
goto fail_malloc_areas;
for (i = 0; i < nr_areas; i++) {
areas[i] = kmalloc(pages_per_area *
sizeof(debug_entry_t*),GFP_KERNEL);
if (!areas[i]) {
goto fail_malloc_areas2;
}
for(j = 0; j < pages_per_area; j++) {
areas[i][j] = kzalloc(PAGE_SIZE, GFP_KERNEL);
if(!areas[i][j]) {
for(j--; j >=0 ; j--) {
kfree(areas[i][j]);
}
kfree(areas[i]);
goto fail_malloc_areas2;
}
}
}
return areas;
fail_malloc_areas2:
for(i--; i >= 0; i--){
for(j=0; j < pages_per_area;j++){
kfree(areas[i][j]);
}
kfree(areas[i]);
}
kfree(areas);
fail_malloc_areas:
return NULL;
}
/*
* debug_info_alloc
* - alloc new debug-info
*/
static debug_info_t*
debug_info_alloc(const char *name, int pages_per_area, int nr_areas,
int buf_size, int level, int mode)
{
debug_info_t* rc;
/* alloc everything */
rc = kmalloc(sizeof(debug_info_t), GFP_KERNEL);
if(!rc)
goto fail_malloc_rc;
rc->active_entries = kcalloc(nr_areas, sizeof(int), GFP_KERNEL);
if(!rc->active_entries)
goto fail_malloc_active_entries;
rc->active_pages = kcalloc(nr_areas, sizeof(int), GFP_KERNEL);
if(!rc->active_pages)
goto fail_malloc_active_pages;
if((mode == ALL_AREAS) && (pages_per_area != 0)){
rc->areas = debug_areas_alloc(pages_per_area, nr_areas);
if(!rc->areas)
goto fail_malloc_areas;
} else {
rc->areas = NULL;
}
/* initialize members */
spin_lock_init(&rc->lock);
rc->pages_per_area = pages_per_area;
rc->nr_areas = nr_areas;
rc->active_area = 0;
rc->level = level;
rc->buf_size = buf_size;
rc->entry_size = sizeof(debug_entry_t) + buf_size;
strlcpy(rc->name, name, sizeof(rc->name));
memset(rc->views, 0, DEBUG_MAX_VIEWS * sizeof(struct debug_view *));
memset(rc->debugfs_entries, 0 ,DEBUG_MAX_VIEWS *
sizeof(struct dentry*));
atomic_set(&(rc->ref_count), 0);
return rc;
fail_malloc_areas:
kfree(rc->active_pages);
fail_malloc_active_pages:
kfree(rc->active_entries);
fail_malloc_active_entries:
kfree(rc);
fail_malloc_rc:
return NULL;
}
/*
* debug_areas_free
* - free all debug areas
*/
static void
debug_areas_free(debug_info_t* db_info)
{
int i,j;
if(!db_info->areas)
return;
for (i = 0; i < db_info->nr_areas; i++) {
for(j = 0; j < db_info->pages_per_area; j++) {
kfree(db_info->areas[i][j]);
}
kfree(db_info->areas[i]);
}
kfree(db_info->areas);
db_info->areas = NULL;
}
/*
* debug_info_free
* - free memory debug-info
*/
static void
debug_info_free(debug_info_t* db_info){
debug_areas_free(db_info);
kfree(db_info->active_entries);
kfree(db_info->active_pages);
kfree(db_info);
}
/*
* debug_info_create
* - create new debug-info
*/
static debug_info_t*
debug_info_create(const char *name, int pages_per_area, int nr_areas,
int buf_size, umode_t mode)
{
debug_info_t* rc;
rc = debug_info_alloc(name, pages_per_area, nr_areas, buf_size,
DEBUG_DEFAULT_LEVEL, ALL_AREAS);
if(!rc)
goto out;
rc->mode = mode & ~S_IFMT;
/* create root directory */
rc->debugfs_root_entry = debugfs_create_dir(rc->name,
debug_debugfs_root_entry);
/* append new element to linked list */
if (!debug_area_first) {
/* first element in list */
debug_area_first = rc;
rc->prev = NULL;
} else {
/* append element to end of list */
debug_area_last->next = rc;
rc->prev = debug_area_last;
}
debug_area_last = rc;
rc->next = NULL;
debug_info_get(rc);
out:
return rc;
}
/*
* debug_info_copy
* - copy debug-info
*/
static debug_info_t*
debug_info_copy(debug_info_t* in, int mode)
{
int i,j;
debug_info_t* rc;
unsigned long flags;
/* get a consistent copy of the debug areas */
do {
rc = debug_info_alloc(in->name, in->pages_per_area,
in->nr_areas, in->buf_size, in->level, mode);
spin_lock_irqsave(&in->lock, flags);
if(!rc)
goto out;
/* has something changed in the meantime ? */
if((rc->pages_per_area == in->pages_per_area) &&
(rc->nr_areas == in->nr_areas)) {
break;
}
spin_unlock_irqrestore(&in->lock, flags);
debug_info_free(rc);
} while (1);
if (mode == NO_AREAS)
goto out;
for(i = 0; i < in->nr_areas; i++){
for(j = 0; j < in->pages_per_area; j++) {
memcpy(rc->areas[i][j], in->areas[i][j],PAGE_SIZE);
}
}
out:
spin_unlock_irqrestore(&in->lock, flags);
return rc;
}
/*
* debug_info_get
* - increments reference count for debug-info
*/
static void
debug_info_get(debug_info_t * db_info)
{
if (db_info)
atomic_inc(&db_info->ref_count);
}
/*
* debug_info_put:
* - decreases reference count for debug-info and frees it if necessary
*/
static void
debug_info_put(debug_info_t *db_info)
{
int i;
if (!db_info)
return;
if (atomic_dec_and_test(&db_info->ref_count)) {
for (i = 0; i < DEBUG_MAX_VIEWS; i++) {
if (!db_info->views[i])
continue;
debugfs_remove(db_info->debugfs_entries[i]);
}
debugfs_remove(db_info->debugfs_root_entry);
if(db_info == debug_area_first)
debug_area_first = db_info->next;
if(db_info == debug_area_last)
debug_area_last = db_info->prev;
if(db_info->prev) db_info->prev->next = db_info->next;
if(db_info->next) db_info->next->prev = db_info->prev;
debug_info_free(db_info);
}
}
/*
* debug_format_entry:
* - format one debug entry and return size of formated data
*/
static int
debug_format_entry(file_private_info_t *p_info)
{
debug_info_t *id_snap = p_info->debug_info_snap;
struct debug_view *view = p_info->view;
debug_entry_t *act_entry;
size_t len = 0;
if(p_info->act_entry == DEBUG_PROLOG_ENTRY){
/* print prolog */
if (view->prolog_proc)
len += view->prolog_proc(id_snap,view,p_info->temp_buf);
goto out;
}
if (!id_snap->areas) /* this is true, if we have a prolog only view */
goto out; /* or if 'pages_per_area' is 0 */
act_entry = (debug_entry_t *) ((char*)id_snap->areas[p_info->act_area]
[p_info->act_page] + p_info->act_entry);
if (act_entry->id.stck == 0LL)
goto out; /* empty entry */
if (view->header_proc)
len += view->header_proc(id_snap, view, p_info->act_area,
act_entry, p_info->temp_buf + len);
if (view->format_proc)
len += view->format_proc(id_snap, view, p_info->temp_buf + len,
DEBUG_DATA(act_entry));
out:
return len;
}
/*
* debug_next_entry:
* - goto next entry in p_info
*/
static inline int
debug_next_entry(file_private_info_t *p_info)
{
debug_info_t *id;
id = p_info->debug_info_snap;
if(p_info->act_entry == DEBUG_PROLOG_ENTRY){
p_info->act_entry = 0;
p_info->act_page = 0;
goto out;
}
if(!id->areas)
return 1;
p_info->act_entry += id->entry_size;
/* switch to next page, if we reached the end of the page */
if (p_info->act_entry > (PAGE_SIZE - id->entry_size)){
/* next page */
p_info->act_entry = 0;
p_info->act_page += 1;
if((p_info->act_page % id->pages_per_area) == 0) {
/* next area */
p_info->act_area++;
p_info->act_page=0;
}
if(p_info->act_area >= id->nr_areas)
return 1;
}
out:
return 0;
}
/*
* debug_output:
* - called for user read()
* - copies formated debug entries to the user buffer
*/
static ssize_t
debug_output(struct file *file, /* file descriptor */
char __user *user_buf, /* user buffer */
size_t len, /* length of buffer */
loff_t *offset) /* offset in the file */
{
size_t count = 0;
size_t entry_offset;
file_private_info_t *p_info;
p_info = ((file_private_info_t *) file->private_data);
if (*offset != p_info->offset)
return -EPIPE;
if(p_info->act_area >= p_info->debug_info_snap->nr_areas)
return 0;
entry_offset = p_info->act_entry_offset;
while(count < len){
int formatted_line_size;
int formatted_line_residue;
int user_buf_residue;
size_t copy_size;
formatted_line_size = debug_format_entry(p_info);
formatted_line_residue = formatted_line_size - entry_offset;
user_buf_residue = len-count;
copy_size = min(user_buf_residue, formatted_line_residue);
if(copy_size){
if (copy_to_user(user_buf + count, p_info->temp_buf
+ entry_offset, copy_size))
return -EFAULT;
count += copy_size;
entry_offset += copy_size;
}
if(copy_size == formatted_line_residue){
entry_offset = 0;
if(debug_next_entry(p_info))
goto out;
}
}
out:
p_info->offset = *offset + count;
p_info->act_entry_offset = entry_offset;
*offset = p_info->offset;
return count;
}
/*
* debug_input:
* - called for user write()
* - calls input function of view
*/
static ssize_t
debug_input(struct file *file, const char __user *user_buf, size_t length,
loff_t *offset)
{
int rc = 0;
file_private_info_t *p_info;
mutex_lock(&debug_mutex);
p_info = ((file_private_info_t *) file->private_data);
if (p_info->view->input_proc)
rc = p_info->view->input_proc(p_info->debug_info_org,
p_info->view, file, user_buf,
length, offset);
else
rc = -EPERM;
mutex_unlock(&debug_mutex);
return rc; /* number of input characters */
}
/*
* debug_open:
* - called for user open()
* - copies formated output to private_data area of the file
* handle
*/
static int
debug_open(struct inode *inode, struct file *file)
{
int i, rc = 0;
file_private_info_t *p_info;
debug_info_t *debug_info, *debug_info_snapshot;
mutex_lock(&debug_mutex);
debug_info = file->f_path.dentry->d_inode->i_private;
/* find debug view */
for (i = 0; i < DEBUG_MAX_VIEWS; i++) {
if (!debug_info->views[i])
continue;
else if (debug_info->debugfs_entries[i] ==
file->f_path.dentry) {
goto found; /* found view ! */
}
}
/* no entry found */
rc = -EINVAL;
goto out;
found:
/* Make snapshot of current debug areas to get it consistent. */
/* To copy all the areas is only needed, if we have a view which */
/* formats the debug areas. */
if(!debug_info->views[i]->format_proc &&
!debug_info->views[i]->header_proc){
debug_info_snapshot = debug_info_copy(debug_info, NO_AREAS);
} else {
debug_info_snapshot = debug_info_copy(debug_info, ALL_AREAS);
}
if(!debug_info_snapshot){
rc = -ENOMEM;
goto out;
}
p_info = kmalloc(sizeof(file_private_info_t),
GFP_KERNEL);
if(!p_info){
debug_info_free(debug_info_snapshot);
rc = -ENOMEM;
goto out;
}
p_info->offset = 0;
p_info->debug_info_snap = debug_info_snapshot;
p_info->debug_info_org = debug_info;
p_info->view = debug_info->views[i];
p_info->act_area = 0;
p_info->act_page = 0;
p_info->act_entry = DEBUG_PROLOG_ENTRY;
p_info->act_entry_offset = 0;
file->private_data = p_info;
debug_info_get(debug_info);
nonseekable_open(inode, file);
out:
mutex_unlock(&debug_mutex);
return rc;
}
/*
* debug_close:
* - called for user close()
* - deletes private_data area of the file handle
*/
static int
debug_close(struct inode *inode, struct file *file)
{
file_private_info_t *p_info;
p_info = (file_private_info_t *) file->private_data;
if(p_info->debug_info_snap)
debug_info_free(p_info->debug_info_snap);
debug_info_put(p_info->debug_info_org);
kfree(file->private_data);
return 0; /* success */
}
/*
* debug_register_mode:
* - Creates and initializes debug area for the caller
* The mode parameter allows to specify access rights for the s390dbf files
* - Returns handle for debug area
*/
debug_info_t *debug_register_mode(const char *name, int pages_per_area,
int nr_areas, int buf_size, umode_t mode,
uid_t uid, gid_t gid)
{
debug_info_t *rc = NULL;
/* Since debugfs currently does not support uid/gid other than root, */
/* we do not allow gid/uid != 0 until we get support for that. */
if ((uid != 0) || (gid != 0))
pr_warning("Root becomes the owner of all s390dbf files "
"in sysfs\n");
BUG_ON(!initialized);
mutex_lock(&debug_mutex);
/* create new debug_info */
rc = debug_info_create(name, pages_per_area, nr_areas, buf_size, mode);
if(!rc)
goto out;
debug_register_view(rc, &debug_level_view);
debug_register_view(rc, &debug_flush_view);
debug_register_view(rc, &debug_pages_view);
out:
if (!rc){
pr_err("Registering debug feature %s failed\n", name);
}
mutex_unlock(&debug_mutex);
return rc;
}
EXPORT_SYMBOL(debug_register_mode);
/*
* debug_register:
* - creates and initializes debug area for the caller
* - returns handle for debug area
*/
debug_info_t *debug_register(const char *name, int pages_per_area,
int nr_areas, int buf_size)
{
return debug_register_mode(name, pages_per_area, nr_areas, buf_size,
S_IRUSR | S_IWUSR, 0, 0);
}
/*
* debug_unregister:
* - give back debug area
*/
void
debug_unregister(debug_info_t * id)
{
if (!id)
goto out;
mutex_lock(&debug_mutex);
debug_info_put(id);
mutex_unlock(&debug_mutex);
out:
return;
}
/*
* debug_set_size:
* - set area size (number of pages) and number of areas
*/
static int
debug_set_size(debug_info_t* id, int nr_areas, int pages_per_area)
{
unsigned long flags;
debug_entry_t *** new_areas;
int rc=0;
if(!id || (nr_areas <= 0) || (pages_per_area < 0))
return -EINVAL;
if(pages_per_area > 0){
new_areas = debug_areas_alloc(pages_per_area, nr_areas);
if(!new_areas) {
pr_info("Allocating memory for %i pages failed\n",
pages_per_area);
rc = -ENOMEM;
goto out;
}
} else {
new_areas = NULL;
}
spin_lock_irqsave(&id->lock,flags);
debug_areas_free(id);
id->areas = new_areas;
id->nr_areas = nr_areas;
id->pages_per_area = pages_per_area;
id->active_area = 0;
memset(id->active_entries,0,sizeof(int)*id->nr_areas);
memset(id->active_pages, 0, sizeof(int)*id->nr_areas);
spin_unlock_irqrestore(&id->lock,flags);
pr_info("%s: set new size (%i pages)\n" ,id->name, pages_per_area);
out:
return rc;
}
/*
* debug_set_level:
* - set actual debug level
*/
void
debug_set_level(debug_info_t* id, int new_level)
{
unsigned long flags;
if(!id)
return;
spin_lock_irqsave(&id->lock,flags);
if(new_level == DEBUG_OFF_LEVEL){
id->level = DEBUG_OFF_LEVEL;
pr_info("%s: switched off\n",id->name);
} else if ((new_level > DEBUG_MAX_LEVEL) || (new_level < 0)) {
pr_info("%s: level %i is out of range (%i - %i)\n",
id->name, new_level, 0, DEBUG_MAX_LEVEL);
} else {
id->level = new_level;
}
spin_unlock_irqrestore(&id->lock,flags);
}
/*
* proceed_active_entry:
* - set active entry to next in the ring buffer
*/
static inline void
proceed_active_entry(debug_info_t * id)
{
if ((id->active_entries[id->active_area] += id->entry_size)
> (PAGE_SIZE - id->entry_size)){
id->active_entries[id->active_area] = 0;
id->active_pages[id->active_area] =
(id->active_pages[id->active_area] + 1) %
id->pages_per_area;
}
}
/*
* proceed_active_area:
* - set active area to next in the ring buffer
*/
static inline void
proceed_active_area(debug_info_t * id)
{
id->active_area++;
id->active_area = id->active_area % id->nr_areas;
}
/*
* get_active_entry:
*/
static inline debug_entry_t*
get_active_entry(debug_info_t * id)
{
return (debug_entry_t *) (((char *) id->areas[id->active_area]
[id->active_pages[id->active_area]]) +
id->active_entries[id->active_area]);
}
/*
* debug_finish_entry:
* - set timestamp, caller address, cpu number etc.
*/
static inline void
debug_finish_entry(debug_info_t * id, debug_entry_t* active, int level,
int exception)
{
active->id.stck = get_clock();
active->id.fields.cpuid = smp_processor_id();
active->caller = __builtin_return_address(0);
active->id.fields.exception = exception;
active->id.fields.level = level;
proceed_active_entry(id);
if(exception)
proceed_active_area(id);
}
static int debug_stoppable=1;
static int debug_active=1;
#define CTL_S390DBF_STOPPABLE 5678
#define CTL_S390DBF_ACTIVE 5679
/*
* proc handler for the running debug_active sysctl
* always allow read, allow write only if debug_stoppable is set or
* if debug_active is already off
*/
static int
s390dbf_procactive(ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
if (!write || debug_stoppable || !debug_active)
return proc_dointvec(table, write, buffer, lenp, ppos);
else
return 0;
}
static struct ctl_table s390dbf_table[] = {
{
.procname = "debug_stoppable",
.data = &debug_stoppable,
.maxlen = sizeof(int),
.mode = S_IRUGO | S_IWUSR,
.proc_handler = proc_dointvec,
},
{
.procname = "debug_active",
.data = &debug_active,
.maxlen = sizeof(int),
.mode = S_IRUGO | S_IWUSR,
.proc_handler = s390dbf_procactive,
},
{ }
};
static struct ctl_table s390dbf_dir_table[] = {
{
.procname = "s390dbf",
.maxlen = 0,
.mode = S_IRUGO | S_IXUGO,
.child = s390dbf_table,
},
{ }
};
static struct ctl_table_header *s390dbf_sysctl_header;
void
debug_stop_all(void)
{
if (debug_stoppable)
debug_active = 0;
}
void debug_set_critical(void)
{
debug_critical = 1;
}
/*
* debug_event_common:
* - write debug entry with given size
*/
debug_entry_t*
debug_event_common(debug_info_t * id, int level, const void *buf, int len)
{
unsigned long flags;
debug_entry_t *active;
if (!debug_active || !id->areas)
return NULL;
if (debug_critical) {
if (!spin_trylock_irqsave(&id->lock, flags))
return NULL;
} else
spin_lock_irqsave(&id->lock, flags);
active = get_active_entry(id);
memset(DEBUG_DATA(active), 0, id->buf_size);
memcpy(DEBUG_DATA(active), buf, min(len, id->buf_size));
debug_finish_entry(id, active, level, 0);
spin_unlock_irqrestore(&id->lock, flags);
return active;
}
/*
* debug_exception_common:
* - write debug entry with given size and switch to next debug area
*/
debug_entry_t
*debug_exception_common(debug_info_t * id, int level, const void *buf, int len)
{
unsigned long flags;
debug_entry_t *active;
if (!debug_active || !id->areas)
return NULL;
if (debug_critical) {
if (!spin_trylock_irqsave(&id->lock, flags))
return NULL;
} else
spin_lock_irqsave(&id->lock, flags);
active = get_active_entry(id);
memset(DEBUG_DATA(active), 0, id->buf_size);
memcpy(DEBUG_DATA(active), buf, min(len, id->buf_size));
debug_finish_entry(id, active, level, 1);
spin_unlock_irqrestore(&id->lock, flags);
return active;
}
/*
* counts arguments in format string for sprintf view
*/
static inline int
debug_count_numargs(char *string)
{
int numargs=0;
while(*string) {
if(*string++=='%')
numargs++;
}
return(numargs);
}
/*
* debug_sprintf_event:
*/
debug_entry_t*
debug_sprintf_event(debug_info_t* id, int level,char *string,...)
{
va_list ap;
int numargs,idx;
unsigned long flags;
debug_sprintf_entry_t *curr_event;
debug_entry_t *active;
if((!id) || (level > id->level))
return NULL;
if (!debug_active || !id->areas)
return NULL;
numargs=debug_count_numargs(string);
if (debug_critical) {
if (!spin_trylock_irqsave(&id->lock, flags))
return NULL;
} else
spin_lock_irqsave(&id->lock, flags);
active = get_active_entry(id);
curr_event=(debug_sprintf_entry_t *) DEBUG_DATA(active);
va_start(ap,string);
curr_event->string=string;
for(idx=0;idx<min(numargs,(int)(id->buf_size / sizeof(long))-1);idx++)
curr_event->args[idx]=va_arg(ap,long);
va_end(ap);
debug_finish_entry(id, active, level, 0);
spin_unlock_irqrestore(&id->lock, flags);
return active;
}
/*
* debug_sprintf_exception:
*/
debug_entry_t*
debug_sprintf_exception(debug_info_t* id, int level,char *string,...)
{
va_list ap;
int numargs,idx;
unsigned long flags;
debug_sprintf_entry_t *curr_event;
debug_entry_t *active;
if((!id) || (level > id->level))
return NULL;
if (!debug_active || !id->areas)
return NULL;
numargs=debug_count_numargs(string);
if (debug_critical) {
if (!spin_trylock_irqsave(&id->lock, flags))
return NULL;
} else
spin_lock_irqsave(&id->lock, flags);
active = get_active_entry(id);
curr_event=(debug_sprintf_entry_t *)DEBUG_DATA(active);
va_start(ap,string);
curr_event->string=string;
for(idx=0;idx<min(numargs,(int)(id->buf_size / sizeof(long))-1);idx++)
curr_event->args[idx]=va_arg(ap,long);
va_end(ap);
debug_finish_entry(id, active, level, 1);
spin_unlock_irqrestore(&id->lock, flags);
return active;
}
/*
* debug_init:
* - is called exactly once to initialize the debug feature
*/
static int
__init debug_init(void)
{
int rc = 0;
s390dbf_sysctl_header = register_sysctl_table(s390dbf_dir_table);
mutex_lock(&debug_mutex);
debug_debugfs_root_entry = debugfs_create_dir(DEBUG_DIR_ROOT,NULL);
initialized = 1;
mutex_unlock(&debug_mutex);
return rc;
}
/*
* debug_register_view:
*/
int
debug_register_view(debug_info_t * id, struct debug_view *view)
{
int rc = 0;
int i;
unsigned long flags;
umode_t mode;
struct dentry *pde;
if (!id)
goto out;
mode = (id->mode | S_IFREG) & ~S_IXUGO;
if (!(view->prolog_proc || view->format_proc || view->header_proc))
mode &= ~(S_IRUSR | S_IRGRP | S_IROTH);
if (!view->input_proc)
mode &= ~(S_IWUSR | S_IWGRP | S_IWOTH);
pde = debugfs_create_file(view->name, mode, id->debugfs_root_entry,
id , &debug_file_ops);
if (!pde){
pr_err("Registering view %s/%s failed due to out of "
"memory\n", id->name,view->name);
rc = -1;
goto out;
}
spin_lock_irqsave(&id->lock, flags);
for (i = 0; i < DEBUG_MAX_VIEWS; i++) {
if (!id->views[i])
break;
}
if (i == DEBUG_MAX_VIEWS) {
pr_err("Registering view %s/%s would exceed the maximum "
"number of views %i\n", id->name, view->name, i);
debugfs_remove(pde);
rc = -1;
} else {
id->views[i] = view;
id->debugfs_entries[i] = pde;
}
spin_unlock_irqrestore(&id->lock, flags);
out:
return rc;
}
/*
* debug_unregister_view:
*/
int
debug_unregister_view(debug_info_t * id, struct debug_view *view)
{
int rc = 0;
int i;
unsigned long flags;
if (!id)
goto out;
spin_lock_irqsave(&id->lock, flags);
for (i = 0; i < DEBUG_MAX_VIEWS; i++) {
if (id->views[i] == view)
break;
}
if (i == DEBUG_MAX_VIEWS)
rc = -1;
else {
debugfs_remove(id->debugfs_entries[i]);
id->views[i] = NULL;
}
spin_unlock_irqrestore(&id->lock, flags);
out:
return rc;
}
static inline char *
debug_get_user_string(const char __user *user_buf, size_t user_len)
{
char* buffer;
buffer = kmalloc(user_len + 1, GFP_KERNEL);
if (!buffer)
return ERR_PTR(-ENOMEM);
if (copy_from_user(buffer, user_buf, user_len) != 0) {
kfree(buffer);
return ERR_PTR(-EFAULT);
}
/* got the string, now strip linefeed. */
if (buffer[user_len - 1] == '\n')
buffer[user_len - 1] = 0;
else
buffer[user_len] = 0;
return buffer;
}
static inline int
debug_get_uint(char *buf)
{
int rc;
buf = skip_spaces(buf);
rc = simple_strtoul(buf, &buf, 10);
if(*buf){
rc = -EINVAL;
}
return rc;
}
/*
* functions for debug-views
***********************************
*/
/*
* prints out actual debug level
*/
static int
debug_prolog_pages_fn(debug_info_t * id,
struct debug_view *view, char *out_buf)
{
return sprintf(out_buf, "%i\n", id->pages_per_area);
}
/*
* reads new size (number of pages per debug area)
*/
static int
debug_input_pages_fn(debug_info_t * id, struct debug_view *view,
struct file *file, const char __user *user_buf,
size_t user_len, loff_t * offset)
{
char *str;
int rc,new_pages;
if (user_len > 0x10000)
user_len = 0x10000;
if (*offset != 0){
rc = -EPIPE;
goto out;
}
str = debug_get_user_string(user_buf,user_len);
if(IS_ERR(str)){
rc = PTR_ERR(str);
goto out;
}
new_pages = debug_get_uint(str);
if(new_pages < 0){
rc = -EINVAL;
goto free_str;
}
rc = debug_set_size(id,id->nr_areas, new_pages);
if(rc != 0){
rc = -EINVAL;
goto free_str;
}
rc = user_len;
free_str:
kfree(str);
out:
*offset += user_len;
return rc; /* number of input characters */
}
/*
* prints out actual debug level
*/
static int
debug_prolog_level_fn(debug_info_t * id, struct debug_view *view, char *out_buf)
{
int rc = 0;
if(id->level == DEBUG_OFF_LEVEL) {
rc = sprintf(out_buf,"-\n");
}
else {
rc = sprintf(out_buf, "%i\n", id->level);
}
return rc;
}
/*
* reads new debug level
*/
static int
debug_input_level_fn(debug_info_t * id, struct debug_view *view,
struct file *file, const char __user *user_buf,
size_t user_len, loff_t * offset)
{
char *str;
int rc,new_level;
if (user_len > 0x10000)
user_len = 0x10000;
if (*offset != 0){
rc = -EPIPE;
goto out;
}
str = debug_get_user_string(user_buf,user_len);
if(IS_ERR(str)){
rc = PTR_ERR(str);
goto out;
}
if(str[0] == '-'){
debug_set_level(id, DEBUG_OFF_LEVEL);
rc = user_len;
goto free_str;
} else {
new_level = debug_get_uint(str);
}
if(new_level < 0) {
pr_warning("%s is not a valid level for a debug "
"feature\n", str);
rc = -EINVAL;
} else {
debug_set_level(id, new_level);
rc = user_len;
}
free_str:
kfree(str);
out:
*offset += user_len;
return rc; /* number of input characters */
}
/*
* flushes debug areas
*/
static void debug_flush(debug_info_t* id, int area)
{
unsigned long flags;
int i,j;
if(!id || !id->areas)
return;
spin_lock_irqsave(&id->lock,flags);
if(area == DEBUG_FLUSH_ALL){
id->active_area = 0;
memset(id->active_entries, 0, id->nr_areas * sizeof(int));
for (i = 0; i < id->nr_areas; i++) {
id->active_pages[i] = 0;
for(j = 0; j < id->pages_per_area; j++) {
memset(id->areas[i][j], 0, PAGE_SIZE);
}
}
} else if(area >= 0 && area < id->nr_areas) {
id->active_entries[area] = 0;
id->active_pages[area] = 0;
for(i = 0; i < id->pages_per_area; i++) {
memset(id->areas[area][i],0,PAGE_SIZE);
}
}
spin_unlock_irqrestore(&id->lock,flags);
}
/*
* view function: flushes debug areas
*/
static int
debug_input_flush_fn(debug_info_t * id, struct debug_view *view,
struct file *file, const char __user *user_buf,
size_t user_len, loff_t * offset)
{
char input_buf[1];
int rc = user_len;
if (user_len > 0x10000)
user_len = 0x10000;
if (*offset != 0){
rc = -EPIPE;
goto out;
}
if (copy_from_user(input_buf, user_buf, 1)){
rc = -EFAULT;
goto out;
}
if(input_buf[0] == '-') {
debug_flush(id, DEBUG_FLUSH_ALL);
goto out;
}
if (isdigit(input_buf[0])) {
int area = ((int) input_buf[0] - (int) '0');
debug_flush(id, area);
goto out;
}
pr_info("Flushing debug data failed because %c is not a valid "
"area\n", input_buf[0]);
out:
*offset += user_len;
return rc; /* number of input characters */
}
/*
* prints debug header in raw format
*/
static int
debug_raw_header_fn(debug_info_t * id, struct debug_view *view,
int area, debug_entry_t * entry, char *out_buf)
{
int rc;
rc = sizeof(debug_entry_t);
memcpy(out_buf,entry,sizeof(debug_entry_t));
return rc;
}
/*
* prints debug data in raw format
*/
static int
debug_raw_format_fn(debug_info_t * id, struct debug_view *view,
char *out_buf, const char *in_buf)
{
int rc;
rc = id->buf_size;
memcpy(out_buf, in_buf, id->buf_size);
return rc;
}
/*
* prints debug data in hex/ascii format
*/
static int
debug_hex_ascii_format_fn(debug_info_t * id, struct debug_view *view,
char *out_buf, const char *in_buf)
{
int i, rc = 0;
for (i = 0; i < id->buf_size; i++) {
rc += sprintf(out_buf + rc, "%02x ",
((unsigned char *) in_buf)[i]);
}
rc += sprintf(out_buf + rc, "| ");
for (i = 0; i < id->buf_size; i++) {
unsigned char c = in_buf[i];
if (isascii(c) && isprint(c))
rc += sprintf(out_buf + rc, "%c", c);
else
rc += sprintf(out_buf + rc, ".");
}
rc += sprintf(out_buf + rc, "\n");
return rc;
}
/*
* prints header for debug entry
*/
int
debug_dflt_header_fn(debug_info_t * id, struct debug_view *view,
int area, debug_entry_t * entry, char *out_buf)
{
struct timespec time_spec;
char *except_str;
unsigned long caller;
int rc = 0;
unsigned int level;
level = entry->id.fields.level;
stck_to_timespec(entry->id.stck, &time_spec);
if (entry->id.fields.exception)
except_str = "*";
else
except_str = "-";
caller = ((unsigned long) entry->caller) & PSW_ADDR_INSN;
rc += sprintf(out_buf, "%02i %011lu:%06lu %1u %1s %02i %p ",
area, time_spec.tv_sec, time_spec.tv_nsec / 1000, level,
except_str, entry->id.fields.cpuid, (void *) caller);
return rc;
}
/*
* prints debug data sprintf-formated:
* debug_sprinf_event/exception calls must be used together with this view
*/
#define DEBUG_SPRINTF_MAX_ARGS 10
static int
debug_sprintf_format_fn(debug_info_t * id, struct debug_view *view,
char *out_buf, debug_sprintf_entry_t *curr_event)
{
int num_longs, num_used_args = 0,i, rc = 0;
int index[DEBUG_SPRINTF_MAX_ARGS];
/* count of longs fit into one entry */
num_longs = id->buf_size / sizeof(long);
if(num_longs < 1)
goto out; /* bufsize of entry too small */
if(num_longs == 1) {
/* no args, we use only the string */
strcpy(out_buf, curr_event->string);
rc = strlen(curr_event->string);
goto out;
}
/* number of arguments used for sprintf (without the format string) */
num_used_args = min(DEBUG_SPRINTF_MAX_ARGS, (num_longs - 1));
memset(index,0, DEBUG_SPRINTF_MAX_ARGS * sizeof(int));
for(i = 0; i < num_used_args; i++)
index[i] = i;
rc = sprintf(out_buf, curr_event->string, curr_event->args[index[0]],
curr_event->args[index[1]], curr_event->args[index[2]],
curr_event->args[index[3]], curr_event->args[index[4]],
curr_event->args[index[5]], curr_event->args[index[6]],
curr_event->args[index[7]], curr_event->args[index[8]],
curr_event->args[index[9]]);
out:
return rc;
}
/*
* clean up module
*/
static void __exit debug_exit(void)
{
debugfs_remove(debug_debugfs_root_entry);
unregister_sysctl_table(s390dbf_sysctl_header);
return;
}
/*
* module definitions
*/
postcore_initcall(debug_init);
module_exit(debug_exit);
MODULE_LICENSE("GPL");
EXPORT_SYMBOL(debug_register);
EXPORT_SYMBOL(debug_unregister);
EXPORT_SYMBOL(debug_set_level);
EXPORT_SYMBOL(debug_stop_all);
EXPORT_SYMBOL(debug_register_view);
EXPORT_SYMBOL(debug_unregister_view);
EXPORT_SYMBOL(debug_event_common);
EXPORT_SYMBOL(debug_exception_common);
EXPORT_SYMBOL(debug_hex_ascii_view);
EXPORT_SYMBOL(debug_raw_view);
EXPORT_SYMBOL(debug_dflt_header_fn);
EXPORT_SYMBOL(debug_sprintf_view);
EXPORT_SYMBOL(debug_sprintf_exception);
EXPORT_SYMBOL(debug_sprintf_event);
| gpl-2.0 |
lolhi/ef52-kernel | net/ethernet/eth.c | 4439 | 10997 | /*
* INET An implementation of the TCP/IP protocol suite for the LINUX
* operating system. INET is implemented using the BSD Socket
* interface as the means of communication with the user level.
*
* Ethernet-type device handling.
*
* Version: @(#)eth.c 1.0.7 05/25/93
*
* Authors: Ross Biro
* Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
* Mark Evans, <evansmp@uhura.aston.ac.uk>
* Florian La Roche, <rzsfl@rz.uni-sb.de>
* Alan Cox, <gw4pts@gw4pts.ampr.org>
*
* Fixes:
* Mr Linux : Arp problems
* Alan Cox : Generic queue tidyup (very tiny here)
* Alan Cox : eth_header ntohs should be htons
* Alan Cox : eth_rebuild_header missing an htons and
* minor other things.
* Tegge : Arp bug fixes.
* Florian : Removed many unnecessary functions, code cleanup
* and changes for new arp and skbuff.
* Alan Cox : Redid header building to reflect new format.
* Alan Cox : ARP only when compiled with CONFIG_INET
* Greg Page : 802.2 and SNAP stuff.
* Alan Cox : MAC layer pointers/new format.
* Paul Gortmaker : eth_copy_and_sum shouldn't csum padding.
* Alan Cox : Protect against forwarding explosions with
* older network drivers and IFF_ALLMULTI.
* Christer Weinigel : Better rebuild header message.
* Andrew Morton : 26Feb01: kill ether_setup() - use netdev_boot_setup().
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/mm.h>
#include <linux/socket.h>
#include <linux/in.h>
#include <linux/inet.h>
#include <linux/ip.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/if_ether.h>
#include <net/dst.h>
#include <net/arp.h>
#include <net/sock.h>
#include <net/ipv6.h>
#include <net/ip.h>
#include <net/dsa.h>
#include <asm/uaccess.h>
__setup("ether=", netdev_boot_setup);
/**
* eth_header - create the Ethernet header
* @skb: buffer to alter
* @dev: source device
* @type: Ethernet type field
* @daddr: destination address (NULL leave destination address)
* @saddr: source address (NULL use device source address)
* @len: packet length (<= skb->len)
*
*
* Set the protocol type. For a packet of type ETH_P_802_3/2 we put the length
* in here instead.
*/
int eth_header(struct sk_buff *skb, struct net_device *dev,
unsigned short type,
const void *daddr, const void *saddr, unsigned len)
{
struct ethhdr *eth = (struct ethhdr *)skb_push(skb, ETH_HLEN);
if (type != ETH_P_802_3 && type != ETH_P_802_2)
eth->h_proto = htons(type);
else
eth->h_proto = htons(len);
/*
* Set the source hardware address.
*/
if (!saddr)
saddr = dev->dev_addr;
memcpy(eth->h_source, saddr, ETH_ALEN);
if (daddr) {
memcpy(eth->h_dest, daddr, ETH_ALEN);
return ETH_HLEN;
}
/*
* Anyway, the loopback-device should never use this function...
*/
if (dev->flags & (IFF_LOOPBACK | IFF_NOARP)) {
memset(eth->h_dest, 0, ETH_ALEN);
return ETH_HLEN;
}
return -ETH_HLEN;
}
EXPORT_SYMBOL(eth_header);
/**
* eth_rebuild_header- rebuild the Ethernet MAC header.
* @skb: socket buffer to update
*
* This is called after an ARP or IPV6 ndisc it's resolution on this
* sk_buff. We now let protocol (ARP) fill in the other fields.
*
* This routine CANNOT use cached dst->neigh!
* Really, it is used only when dst->neigh is wrong.
*/
int eth_rebuild_header(struct sk_buff *skb)
{
struct ethhdr *eth = (struct ethhdr *)skb->data;
struct net_device *dev = skb->dev;
switch (eth->h_proto) {
#ifdef CONFIG_INET
case htons(ETH_P_IP):
return arp_find(eth->h_dest, skb);
#endif
default:
printk(KERN_DEBUG
"%s: unable to resolve type %X addresses.\n",
dev->name, ntohs(eth->h_proto));
memcpy(eth->h_source, dev->dev_addr, ETH_ALEN);
break;
}
return 0;
}
EXPORT_SYMBOL(eth_rebuild_header);
/**
* eth_type_trans - determine the packet's protocol ID.
* @skb: received socket data
* @dev: receiving network device
*
* The rule here is that we
* assume 802.3 if the type field is short enough to be a length.
* This is normal practice and works for any 'now in use' protocol.
*/
__be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev)
{
struct ethhdr *eth;
skb->dev = dev;
skb_reset_mac_header(skb);
skb_pull_inline(skb, ETH_HLEN);
eth = eth_hdr(skb);
if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
if (!compare_ether_addr_64bits(eth->h_dest, dev->broadcast))
skb->pkt_type = PACKET_BROADCAST;
else
skb->pkt_type = PACKET_MULTICAST;
}
/*
* This ALLMULTI check should be redundant by 1.4
* so don't forget to remove it.
*
* Seems, you forgot to remove it. All silly devices
* seems to set IFF_PROMISC.
*/
else if (1 /*dev->flags&IFF_PROMISC */ ) {
if (unlikely(compare_ether_addr_64bits(eth->h_dest, dev->dev_addr)))
skb->pkt_type = PACKET_OTHERHOST;
}
/*
* Some variants of DSA tagging don't have an ethertype field
* at all, so we check here whether one of those tagging
* variants has been configured on the receiving interface,
* and if so, set skb->protocol without looking at the packet.
*/
if (netdev_uses_dsa_tags(dev))
return htons(ETH_P_DSA);
if (netdev_uses_trailer_tags(dev))
return htons(ETH_P_TRAILER);
if (ntohs(eth->h_proto) >= 1536)
return eth->h_proto;
/*
* This is a magic hack to spot IPX packets. Older Novell breaks
* the protocol design and runs IPX over 802.3 without an 802.2 LLC
* layer. We look for FFFF which isn't a used 802.2 SSAP/DSAP. This
* won't work for fault tolerant netware but does for the rest.
*/
if (skb->len >= 2 && *(unsigned short *)(skb->data) == 0xFFFF)
return htons(ETH_P_802_3);
/*
* Real 802.2 LLC
*/
return htons(ETH_P_802_2);
}
EXPORT_SYMBOL(eth_type_trans);
/**
* eth_header_parse - extract hardware address from packet
* @skb: packet to extract header from
* @haddr: destination buffer
*/
int eth_header_parse(const struct sk_buff *skb, unsigned char *haddr)
{
const struct ethhdr *eth = eth_hdr(skb);
memcpy(haddr, eth->h_source, ETH_ALEN);
return ETH_ALEN;
}
EXPORT_SYMBOL(eth_header_parse);
/**
* eth_header_cache - fill cache entry from neighbour
* @neigh: source neighbour
* @hh: destination cache entry
* @type: Ethernet type field
* Create an Ethernet header template from the neighbour.
*/
int eth_header_cache(const struct neighbour *neigh, struct hh_cache *hh, __be16 type)
{
struct ethhdr *eth;
const struct net_device *dev = neigh->dev;
eth = (struct ethhdr *)
(((u8 *) hh->hh_data) + (HH_DATA_OFF(sizeof(*eth))));
if (type == htons(ETH_P_802_3))
return -1;
eth->h_proto = type;
memcpy(eth->h_source, dev->dev_addr, ETH_ALEN);
memcpy(eth->h_dest, neigh->ha, ETH_ALEN);
hh->hh_len = ETH_HLEN;
return 0;
}
EXPORT_SYMBOL(eth_header_cache);
/**
* eth_header_cache_update - update cache entry
* @hh: destination cache entry
* @dev: network device
* @haddr: new hardware address
*
* Called by Address Resolution module to notify changes in address.
*/
void eth_header_cache_update(struct hh_cache *hh,
const struct net_device *dev,
const unsigned char *haddr)
{
memcpy(((u8 *) hh->hh_data) + HH_DATA_OFF(sizeof(struct ethhdr)),
haddr, ETH_ALEN);
}
EXPORT_SYMBOL(eth_header_cache_update);
/**
* eth_mac_addr - set new Ethernet hardware address
* @dev: network device
* @p: socket address
* Change hardware address of device.
*
* This doesn't change hardware matching, so needs to be overridden
* for most real devices.
*/
int eth_mac_addr(struct net_device *dev, void *p)
{
struct sockaddr *addr = p;
if (netif_running(dev))
return -EBUSY;
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
/* if device marked as NET_ADDR_RANDOM, reset it */
dev->addr_assign_type &= ~NET_ADDR_RANDOM;
return 0;
}
EXPORT_SYMBOL(eth_mac_addr);
/**
* eth_change_mtu - set new MTU size
* @dev: network device
* @new_mtu: new Maximum Transfer Unit
*
* Allow changing MTU size. Needs to be overridden for devices
* supporting jumbo frames.
*/
int eth_change_mtu(struct net_device *dev, int new_mtu)
{
if (new_mtu < 68 || new_mtu > ETH_DATA_LEN)
return -EINVAL;
dev->mtu = new_mtu;
return 0;
}
EXPORT_SYMBOL(eth_change_mtu);
int eth_validate_addr(struct net_device *dev)
{
if (!is_valid_ether_addr(dev->dev_addr))
return -EADDRNOTAVAIL;
return 0;
}
EXPORT_SYMBOL(eth_validate_addr);
const struct header_ops eth_header_ops ____cacheline_aligned = {
.create = eth_header,
.parse = eth_header_parse,
.rebuild = eth_rebuild_header,
.cache = eth_header_cache,
.cache_update = eth_header_cache_update,
};
/**
* ether_setup - setup Ethernet network device
* @dev: network device
* Fill in the fields of the device structure with Ethernet-generic values.
*/
void ether_setup(struct net_device *dev)
{
dev->header_ops = ð_header_ops;
dev->type = ARPHRD_ETHER;
dev->hard_header_len = ETH_HLEN;
dev->mtu = ETH_DATA_LEN;
dev->addr_len = ETH_ALEN;
dev->tx_queue_len = 1000; /* Ethernet wants good queues */
dev->flags = IFF_BROADCAST|IFF_MULTICAST;
dev->priv_flags |= IFF_TX_SKB_SHARING;
memset(dev->broadcast, 0xFF, ETH_ALEN);
}
EXPORT_SYMBOL(ether_setup);
/**
* alloc_etherdev_mqs - Allocates and sets up an Ethernet device
* @sizeof_priv: Size of additional driver-private structure to be allocated
* for this Ethernet device
* @txqs: The number of TX queues this device has.
* @rxqs: The number of RX queues this device has.
*
* Fill in the fields of the device structure with Ethernet-generic
* values. Basically does everything except registering the device.
*
* Constructs a new net device, complete with a private data area of
* size (sizeof_priv). A 32-byte (not bit) alignment is enforced for
* this private data area.
*/
struct net_device *alloc_etherdev_mqs(int sizeof_priv, unsigned int txqs,
unsigned int rxqs)
{
return alloc_netdev_mqs(sizeof_priv, "eth%d", ether_setup, txqs, rxqs);
}
EXPORT_SYMBOL(alloc_etherdev_mqs);
static size_t _format_mac_addr(char *buf, int buflen,
const unsigned char *addr, int len)
{
int i;
char *cp = buf;
for (i = 0; i < len; i++) {
cp += scnprintf(cp, buflen - (cp - buf), "%02x", addr[i]);
if (i == len - 1)
break;
cp += scnprintf(cp, buflen - (cp - buf), ":");
}
return cp - buf;
}
ssize_t sysfs_format_mac(char *buf, const unsigned char *addr, int len)
{
size_t l;
l = _format_mac_addr(buf, PAGE_SIZE, addr, len);
l += scnprintf(buf + l, PAGE_SIZE - l, "\n");
return (ssize_t)l;
}
EXPORT_SYMBOL(sysfs_format_mac);
| gpl-2.0 |
MoKee/android_kernel_samsung_msm8930-common | drivers/net/ethernet/mellanox/mlx4/catas.c | 4951 | 4600 | /*
* Copyright (c) 2007 Cisco Systems, Inc. All rights reserved.
* Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/workqueue.h>
#include <linux/module.h>
#include "mlx4.h"
enum {
MLX4_CATAS_POLL_INTERVAL = 5 * HZ,
};
static DEFINE_SPINLOCK(catas_lock);
static LIST_HEAD(catas_list);
static struct work_struct catas_work;
static int internal_err_reset = 1;
module_param(internal_err_reset, int, 0644);
MODULE_PARM_DESC(internal_err_reset,
"Reset device on internal errors if non-zero"
" (default 1, in SRIOV mode default is 0)");
static void dump_err_buf(struct mlx4_dev *dev)
{
struct mlx4_priv *priv = mlx4_priv(dev);
int i;
mlx4_err(dev, "Internal error detected:\n");
for (i = 0; i < priv->fw.catas_size; ++i)
mlx4_err(dev, " buf[%02x]: %08x\n",
i, swab32(readl(priv->catas_err.map + i)));
}
static void poll_catas(unsigned long dev_ptr)
{
struct mlx4_dev *dev = (struct mlx4_dev *) dev_ptr;
struct mlx4_priv *priv = mlx4_priv(dev);
if (readl(priv->catas_err.map)) {
dump_err_buf(dev);
mlx4_dispatch_event(dev, MLX4_DEV_EVENT_CATASTROPHIC_ERROR, 0);
if (internal_err_reset) {
spin_lock(&catas_lock);
list_add(&priv->catas_err.list, &catas_list);
spin_unlock(&catas_lock);
queue_work(mlx4_wq, &catas_work);
}
} else
mod_timer(&priv->catas_err.timer,
round_jiffies(jiffies + MLX4_CATAS_POLL_INTERVAL));
}
static void catas_reset(struct work_struct *work)
{
struct mlx4_priv *priv, *tmppriv;
struct mlx4_dev *dev;
LIST_HEAD(tlist);
int ret;
spin_lock_irq(&catas_lock);
list_splice_init(&catas_list, &tlist);
spin_unlock_irq(&catas_lock);
list_for_each_entry_safe(priv, tmppriv, &tlist, catas_err.list) {
struct pci_dev *pdev = priv->dev.pdev;
ret = mlx4_restart_one(priv->dev.pdev);
/* 'priv' now is not valid */
if (ret)
pr_err("mlx4 %s: Reset failed (%d)\n",
pci_name(pdev), ret);
else {
dev = pci_get_drvdata(pdev);
mlx4_dbg(dev, "Reset succeeded\n");
}
}
}
void mlx4_start_catas_poll(struct mlx4_dev *dev)
{
struct mlx4_priv *priv = mlx4_priv(dev);
phys_addr_t addr;
/*If we are in SRIOV the default of the module param must be 0*/
if (mlx4_is_mfunc(dev))
internal_err_reset = 0;
INIT_LIST_HEAD(&priv->catas_err.list);
init_timer(&priv->catas_err.timer);
priv->catas_err.map = NULL;
addr = pci_resource_start(dev->pdev, priv->fw.catas_bar) +
priv->fw.catas_offset;
priv->catas_err.map = ioremap(addr, priv->fw.catas_size * 4);
if (!priv->catas_err.map) {
mlx4_warn(dev, "Failed to map internal error buffer at 0x%llx\n",
(unsigned long long) addr);
return;
}
priv->catas_err.timer.data = (unsigned long) dev;
priv->catas_err.timer.function = poll_catas;
priv->catas_err.timer.expires =
round_jiffies(jiffies + MLX4_CATAS_POLL_INTERVAL);
add_timer(&priv->catas_err.timer);
}
void mlx4_stop_catas_poll(struct mlx4_dev *dev)
{
struct mlx4_priv *priv = mlx4_priv(dev);
del_timer_sync(&priv->catas_err.timer);
if (priv->catas_err.map)
iounmap(priv->catas_err.map);
spin_lock_irq(&catas_lock);
list_del(&priv->catas_err.list);
spin_unlock_irq(&catas_lock);
}
void __init mlx4_catas_init(void)
{
INIT_WORK(&catas_work, catas_reset);
}
| gpl-2.0 |
spacecaker/android_kernel_acer_swing_cm10.1_MDP4 | drivers/net/ethernet/mellanox/mlx4/icm.c | 4951 | 10813 | /*
* Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
* Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/errno.h>
#include <linux/mm.h>
#include <linux/scatterlist.h>
#include <linux/slab.h>
#include <linux/mlx4/cmd.h>
#include "mlx4.h"
#include "icm.h"
#include "fw.h"
/*
* We allocate in as big chunks as we can, up to a maximum of 256 KB
* per chunk.
*/
enum {
MLX4_ICM_ALLOC_SIZE = 1 << 18,
MLX4_TABLE_CHUNK_SIZE = 1 << 18
};
static void mlx4_free_icm_pages(struct mlx4_dev *dev, struct mlx4_icm_chunk *chunk)
{
int i;
if (chunk->nsg > 0)
pci_unmap_sg(dev->pdev, chunk->mem, chunk->npages,
PCI_DMA_BIDIRECTIONAL);
for (i = 0; i < chunk->npages; ++i)
__free_pages(sg_page(&chunk->mem[i]),
get_order(chunk->mem[i].length));
}
static void mlx4_free_icm_coherent(struct mlx4_dev *dev, struct mlx4_icm_chunk *chunk)
{
int i;
for (i = 0; i < chunk->npages; ++i)
dma_free_coherent(&dev->pdev->dev, chunk->mem[i].length,
lowmem_page_address(sg_page(&chunk->mem[i])),
sg_dma_address(&chunk->mem[i]));
}
void mlx4_free_icm(struct mlx4_dev *dev, struct mlx4_icm *icm, int coherent)
{
struct mlx4_icm_chunk *chunk, *tmp;
if (!icm)
return;
list_for_each_entry_safe(chunk, tmp, &icm->chunk_list, list) {
if (coherent)
mlx4_free_icm_coherent(dev, chunk);
else
mlx4_free_icm_pages(dev, chunk);
kfree(chunk);
}
kfree(icm);
}
static int mlx4_alloc_icm_pages(struct scatterlist *mem, int order, gfp_t gfp_mask)
{
struct page *page;
page = alloc_pages(gfp_mask, order);
if (!page)
return -ENOMEM;
sg_set_page(mem, page, PAGE_SIZE << order, 0);
return 0;
}
static int mlx4_alloc_icm_coherent(struct device *dev, struct scatterlist *mem,
int order, gfp_t gfp_mask)
{
void *buf = dma_alloc_coherent(dev, PAGE_SIZE << order,
&sg_dma_address(mem), gfp_mask);
if (!buf)
return -ENOMEM;
sg_set_buf(mem, buf, PAGE_SIZE << order);
BUG_ON(mem->offset);
sg_dma_len(mem) = PAGE_SIZE << order;
return 0;
}
struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages,
gfp_t gfp_mask, int coherent)
{
struct mlx4_icm *icm;
struct mlx4_icm_chunk *chunk = NULL;
int cur_order;
int ret;
/* We use sg_set_buf for coherent allocs, which assumes low memory */
BUG_ON(coherent && (gfp_mask & __GFP_HIGHMEM));
icm = kmalloc(sizeof *icm, gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN));
if (!icm)
return NULL;
icm->refcount = 0;
INIT_LIST_HEAD(&icm->chunk_list);
cur_order = get_order(MLX4_ICM_ALLOC_SIZE);
while (npages > 0) {
if (!chunk) {
chunk = kmalloc(sizeof *chunk,
gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN));
if (!chunk)
goto fail;
sg_init_table(chunk->mem, MLX4_ICM_CHUNK_LEN);
chunk->npages = 0;
chunk->nsg = 0;
list_add_tail(&chunk->list, &icm->chunk_list);
}
while (1 << cur_order > npages)
--cur_order;
if (coherent)
ret = mlx4_alloc_icm_coherent(&dev->pdev->dev,
&chunk->mem[chunk->npages],
cur_order, gfp_mask);
else
ret = mlx4_alloc_icm_pages(&chunk->mem[chunk->npages],
cur_order, gfp_mask);
if (ret) {
if (--cur_order < 0)
goto fail;
else
continue;
}
++chunk->npages;
if (coherent)
++chunk->nsg;
else if (chunk->npages == MLX4_ICM_CHUNK_LEN) {
chunk->nsg = pci_map_sg(dev->pdev, chunk->mem,
chunk->npages,
PCI_DMA_BIDIRECTIONAL);
if (chunk->nsg <= 0)
goto fail;
}
if (chunk->npages == MLX4_ICM_CHUNK_LEN)
chunk = NULL;
npages -= 1 << cur_order;
}
if (!coherent && chunk) {
chunk->nsg = pci_map_sg(dev->pdev, chunk->mem,
chunk->npages,
PCI_DMA_BIDIRECTIONAL);
if (chunk->nsg <= 0)
goto fail;
}
return icm;
fail:
mlx4_free_icm(dev, icm, coherent);
return NULL;
}
static int mlx4_MAP_ICM(struct mlx4_dev *dev, struct mlx4_icm *icm, u64 virt)
{
return mlx4_map_cmd(dev, MLX4_CMD_MAP_ICM, icm, virt);
}
static int mlx4_UNMAP_ICM(struct mlx4_dev *dev, u64 virt, u32 page_count)
{
return mlx4_cmd(dev, virt, page_count, 0, MLX4_CMD_UNMAP_ICM,
MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
}
int mlx4_MAP_ICM_AUX(struct mlx4_dev *dev, struct mlx4_icm *icm)
{
return mlx4_map_cmd(dev, MLX4_CMD_MAP_ICM_AUX, icm, -1);
}
int mlx4_UNMAP_ICM_AUX(struct mlx4_dev *dev)
{
return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_UNMAP_ICM_AUX,
MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
}
int mlx4_table_get(struct mlx4_dev *dev, struct mlx4_icm_table *table, int obj)
{
int i = (obj & (table->num_obj - 1)) / (MLX4_TABLE_CHUNK_SIZE / table->obj_size);
int ret = 0;
mutex_lock(&table->mutex);
if (table->icm[i]) {
++table->icm[i]->refcount;
goto out;
}
table->icm[i] = mlx4_alloc_icm(dev, MLX4_TABLE_CHUNK_SIZE >> PAGE_SHIFT,
(table->lowmem ? GFP_KERNEL : GFP_HIGHUSER) |
__GFP_NOWARN, table->coherent);
if (!table->icm[i]) {
ret = -ENOMEM;
goto out;
}
if (mlx4_MAP_ICM(dev, table->icm[i], table->virt +
(u64) i * MLX4_TABLE_CHUNK_SIZE)) {
mlx4_free_icm(dev, table->icm[i], table->coherent);
table->icm[i] = NULL;
ret = -ENOMEM;
goto out;
}
++table->icm[i]->refcount;
out:
mutex_unlock(&table->mutex);
return ret;
}
void mlx4_table_put(struct mlx4_dev *dev, struct mlx4_icm_table *table, int obj)
{
int i;
i = (obj & (table->num_obj - 1)) / (MLX4_TABLE_CHUNK_SIZE / table->obj_size);
mutex_lock(&table->mutex);
if (--table->icm[i]->refcount == 0) {
mlx4_UNMAP_ICM(dev, table->virt + i * MLX4_TABLE_CHUNK_SIZE,
MLX4_TABLE_CHUNK_SIZE / MLX4_ICM_PAGE_SIZE);
mlx4_free_icm(dev, table->icm[i], table->coherent);
table->icm[i] = NULL;
}
mutex_unlock(&table->mutex);
}
void *mlx4_table_find(struct mlx4_icm_table *table, int obj, dma_addr_t *dma_handle)
{
int idx, offset, dma_offset, i;
struct mlx4_icm_chunk *chunk;
struct mlx4_icm *icm;
struct page *page = NULL;
if (!table->lowmem)
return NULL;
mutex_lock(&table->mutex);
idx = (obj & (table->num_obj - 1)) * table->obj_size;
icm = table->icm[idx / MLX4_TABLE_CHUNK_SIZE];
dma_offset = offset = idx % MLX4_TABLE_CHUNK_SIZE;
if (!icm)
goto out;
list_for_each_entry(chunk, &icm->chunk_list, list) {
for (i = 0; i < chunk->npages; ++i) {
if (dma_handle && dma_offset >= 0) {
if (sg_dma_len(&chunk->mem[i]) > dma_offset)
*dma_handle = sg_dma_address(&chunk->mem[i]) +
dma_offset;
dma_offset -= sg_dma_len(&chunk->mem[i]);
}
/*
* DMA mapping can merge pages but not split them,
* so if we found the page, dma_handle has already
* been assigned to.
*/
if (chunk->mem[i].length > offset) {
page = sg_page(&chunk->mem[i]);
goto out;
}
offset -= chunk->mem[i].length;
}
}
out:
mutex_unlock(&table->mutex);
return page ? lowmem_page_address(page) + offset : NULL;
}
int mlx4_table_get_range(struct mlx4_dev *dev, struct mlx4_icm_table *table,
int start, int end)
{
int inc = MLX4_TABLE_CHUNK_SIZE / table->obj_size;
int i, err;
for (i = start; i <= end; i += inc) {
err = mlx4_table_get(dev, table, i);
if (err)
goto fail;
}
return 0;
fail:
while (i > start) {
i -= inc;
mlx4_table_put(dev, table, i);
}
return err;
}
void mlx4_table_put_range(struct mlx4_dev *dev, struct mlx4_icm_table *table,
int start, int end)
{
int i;
for (i = start; i <= end; i += MLX4_TABLE_CHUNK_SIZE / table->obj_size)
mlx4_table_put(dev, table, i);
}
int mlx4_init_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table,
u64 virt, int obj_size, int nobj, int reserved,
int use_lowmem, int use_coherent)
{
int obj_per_chunk;
int num_icm;
unsigned chunk_size;
int i;
obj_per_chunk = MLX4_TABLE_CHUNK_SIZE / obj_size;
num_icm = (nobj + obj_per_chunk - 1) / obj_per_chunk;
table->icm = kcalloc(num_icm, sizeof *table->icm, GFP_KERNEL);
if (!table->icm)
return -ENOMEM;
table->virt = virt;
table->num_icm = num_icm;
table->num_obj = nobj;
table->obj_size = obj_size;
table->lowmem = use_lowmem;
table->coherent = use_coherent;
mutex_init(&table->mutex);
for (i = 0; i * MLX4_TABLE_CHUNK_SIZE < reserved * obj_size; ++i) {
chunk_size = MLX4_TABLE_CHUNK_SIZE;
if ((i + 1) * MLX4_TABLE_CHUNK_SIZE > nobj * obj_size)
chunk_size = PAGE_ALIGN(nobj * obj_size - i * MLX4_TABLE_CHUNK_SIZE);
table->icm[i] = mlx4_alloc_icm(dev, chunk_size >> PAGE_SHIFT,
(use_lowmem ? GFP_KERNEL : GFP_HIGHUSER) |
__GFP_NOWARN, use_coherent);
if (!table->icm[i])
goto err;
if (mlx4_MAP_ICM(dev, table->icm[i], virt + i * MLX4_TABLE_CHUNK_SIZE)) {
mlx4_free_icm(dev, table->icm[i], use_coherent);
table->icm[i] = NULL;
goto err;
}
/*
* Add a reference to this ICM chunk so that it never
* gets freed (since it contains reserved firmware objects).
*/
++table->icm[i]->refcount;
}
return 0;
err:
for (i = 0; i < num_icm; ++i)
if (table->icm[i]) {
mlx4_UNMAP_ICM(dev, virt + i * MLX4_TABLE_CHUNK_SIZE,
MLX4_TABLE_CHUNK_SIZE / MLX4_ICM_PAGE_SIZE);
mlx4_free_icm(dev, table->icm[i], use_coherent);
}
return -ENOMEM;
}
void mlx4_cleanup_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table)
{
int i;
for (i = 0; i < table->num_icm; ++i)
if (table->icm[i]) {
mlx4_UNMAP_ICM(dev, table->virt + i * MLX4_TABLE_CHUNK_SIZE,
MLX4_TABLE_CHUNK_SIZE / MLX4_ICM_PAGE_SIZE);
mlx4_free_icm(dev, table->icm[i], table->coherent);
}
kfree(table->icm);
}
| gpl-2.0 |
curbthepain/revkernel_titan | drivers/media/video/pms.c | 5463 | 25880 | /*
* Media Vision Pro Movie Studio
* or
* "all you need is an I2C bus some RAM and a prayer"
*
* This draws heavily on code
*
* (c) Wolfgang Koehler, wolf@first.gmd.de, Dec. 1994
* Kiefernring 15
* 14478 Potsdam, Germany
*
* Most of this code is directly derived from his userspace driver.
* His driver works so send any reports to alan@lxorguk.ukuu.org.uk
* unless the userspace driver also doesn't work for you...
*
* Changes:
* 25-11-2009 Hans Verkuil <hverkuil@xs4all.nl>
* - converted to version 2 of the V4L API.
* 08/07/2003 Daniele Bellucci <bellucda@tiscali.it>
* - pms_capture: report back -EFAULT
*/
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/errno.h>
#include <linux/fs.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/ioport.h>
#include <linux/init.h>
#include <linux/mutex.h>
#include <linux/uaccess.h>
#include <asm/io.h>
#include <linux/videodev2.h>
#include <media/v4l2-common.h>
#include <media/v4l2-ioctl.h>
#include <media/v4l2-device.h>
MODULE_LICENSE("GPL");
MODULE_VERSION("0.0.4");
#define MOTOROLA 1
#define PHILIPS2 2 /* SAA7191 */
#define PHILIPS1 3
#define MVVMEMORYWIDTH 0x40 /* 512 bytes */
struct i2c_info {
u8 slave;
u8 sub;
u8 data;
u8 hits;
};
struct pms {
struct v4l2_device v4l2_dev;
struct video_device vdev;
int height;
int width;
int depth;
int input;
s32 brightness, saturation, hue, contrast;
struct mutex lock;
int i2c_count;
struct i2c_info i2cinfo[64];
int decoder;
int standard; /* 0 - auto 1 - ntsc 2 - pal 3 - secam */
v4l2_std_id std;
int io;
int data;
void __iomem *mem;
};
static struct pms pms_card;
/*
* I/O ports and Shared Memory
*/
static int io_port = 0x250;
module_param(io_port, int, 0);
static int mem_base = 0xc8000;
module_param(mem_base, int, 0);
static int video_nr = -1;
module_param(video_nr, int, 0);
static inline void mvv_write(struct pms *dev, u8 index, u8 value)
{
outw(index | (value << 8), dev->io);
}
static inline u8 mvv_read(struct pms *dev, u8 index)
{
outb(index, dev->io);
return inb(dev->data);
}
static int pms_i2c_stat(struct pms *dev, u8 slave)
{
int counter = 0;
int i;
outb(0x28, dev->io);
while ((inb(dev->data) & 0x01) == 0)
if (counter++ == 256)
break;
while ((inb(dev->data) & 0x01) != 0)
if (counter++ == 256)
break;
outb(slave, dev->io);
counter = 0;
while ((inb(dev->data) & 0x01) == 0)
if (counter++ == 256)
break;
while ((inb(dev->data) & 0x01) != 0)
if (counter++ == 256)
break;
for (i = 0; i < 12; i++) {
char st = inb(dev->data);
if ((st & 2) != 0)
return -1;
if ((st & 1) == 0)
break;
}
outb(0x29, dev->io);
return inb(dev->data);
}
static int pms_i2c_write(struct pms *dev, u16 slave, u16 sub, u16 data)
{
int skip = 0;
int count;
int i;
for (i = 0; i < dev->i2c_count; i++) {
if ((dev->i2cinfo[i].slave == slave) &&
(dev->i2cinfo[i].sub == sub)) {
if (dev->i2cinfo[i].data == data)
skip = 1;
dev->i2cinfo[i].data = data;
i = dev->i2c_count + 1;
}
}
if (i == dev->i2c_count && dev->i2c_count < 64) {
dev->i2cinfo[dev->i2c_count].slave = slave;
dev->i2cinfo[dev->i2c_count].sub = sub;
dev->i2cinfo[dev->i2c_count].data = data;
dev->i2c_count++;
}
if (skip)
return 0;
mvv_write(dev, 0x29, sub);
mvv_write(dev, 0x2A, data);
mvv_write(dev, 0x28, slave);
outb(0x28, dev->io);
count = 0;
while ((inb(dev->data) & 1) == 0)
if (count > 255)
break;
while ((inb(dev->data) & 1) != 0)
if (count > 255)
break;
count = inb(dev->data);
if (count & 2)
return -1;
return count;
}
static int pms_i2c_read(struct pms *dev, int slave, int sub)
{
int i;
for (i = 0; i < dev->i2c_count; i++) {
if (dev->i2cinfo[i].slave == slave && dev->i2cinfo[i].sub == sub)
return dev->i2cinfo[i].data;
}
return 0;
}
static void pms_i2c_andor(struct pms *dev, int slave, int sub, int and, int or)
{
u8 tmp;
tmp = pms_i2c_read(dev, slave, sub);
tmp = (tmp & and) | or;
pms_i2c_write(dev, slave, sub, tmp);
}
/*
* Control functions
*/
static void pms_videosource(struct pms *dev, short source)
{
switch (dev->decoder) {
case MOTOROLA:
break;
case PHILIPS2:
pms_i2c_andor(dev, 0x8a, 0x06, 0x7f, source ? 0x80 : 0);
break;
case PHILIPS1:
break;
}
mvv_write(dev, 0x2E, 0x31);
/* Was: mvv_write(dev, 0x2E, source ? 0x31 : 0x30);
But could not make this work correctly. Only Composite input
worked for me. */
}
static void pms_hue(struct pms *dev, short hue)
{
switch (dev->decoder) {
case MOTOROLA:
pms_i2c_write(dev, 0x8a, 0x00, hue);
break;
case PHILIPS2:
pms_i2c_write(dev, 0x8a, 0x07, hue);
break;
case PHILIPS1:
pms_i2c_write(dev, 0x42, 0x07, hue);
break;
}
}
static void pms_saturation(struct pms *dev, short sat)
{
switch (dev->decoder) {
case MOTOROLA:
pms_i2c_write(dev, 0x8a, 0x00, sat);
break;
case PHILIPS1:
pms_i2c_write(dev, 0x42, 0x12, sat);
break;
}
}
static void pms_contrast(struct pms *dev, short contrast)
{
switch (dev->decoder) {
case MOTOROLA:
pms_i2c_write(dev, 0x8a, 0x00, contrast);
break;
case PHILIPS1:
pms_i2c_write(dev, 0x42, 0x13, contrast);
break;
}
}
static void pms_brightness(struct pms *dev, short brightness)
{
switch (dev->decoder) {
case MOTOROLA:
pms_i2c_write(dev, 0x8a, 0x00, brightness);
pms_i2c_write(dev, 0x8a, 0x00, brightness);
pms_i2c_write(dev, 0x8a, 0x00, brightness);
break;
case PHILIPS1:
pms_i2c_write(dev, 0x42, 0x19, brightness);
break;
}
}
static void pms_format(struct pms *dev, short format)
{
int target;
dev->standard = format;
if (dev->decoder == PHILIPS1)
target = 0x42;
else if (dev->decoder == PHILIPS2)
target = 0x8a;
else
return;
switch (format) {
case 0: /* Auto */
pms_i2c_andor(dev, target, 0x0d, 0xfe, 0x00);
pms_i2c_andor(dev, target, 0x0f, 0x3f, 0x80);
break;
case 1: /* NTSC */
pms_i2c_andor(dev, target, 0x0d, 0xfe, 0x00);
pms_i2c_andor(dev, target, 0x0f, 0x3f, 0x40);
break;
case 2: /* PAL */
pms_i2c_andor(dev, target, 0x0d, 0xfe, 0x00);
pms_i2c_andor(dev, target, 0x0f, 0x3f, 0x00);
break;
case 3: /* SECAM */
pms_i2c_andor(dev, target, 0x0d, 0xfe, 0x01);
pms_i2c_andor(dev, target, 0x0f, 0x3f, 0x00);
break;
}
}
#ifdef FOR_FUTURE_EXPANSION
/*
* These features of the PMS card are not currently exposes. They
* could become a private v4l ioctl for PMSCONFIG or somesuch if
* people need it. We also don't yet use the PMS interrupt.
*/
static void pms_hstart(struct pms *dev, short start)
{
switch (dev->decoder) {
case PHILIPS1:
pms_i2c_write(dev, 0x8a, 0x05, start);
pms_i2c_write(dev, 0x8a, 0x18, start);
break;
case PHILIPS2:
pms_i2c_write(dev, 0x42, 0x05, start);
pms_i2c_write(dev, 0x42, 0x18, start);
break;
}
}
/*
* Bandpass filters
*/
static void pms_bandpass(struct pms *dev, short pass)
{
if (dev->decoder == PHILIPS2)
pms_i2c_andor(dev, 0x8a, 0x06, 0xcf, (pass & 0x03) << 4);
else if (dev->decoder == PHILIPS1)
pms_i2c_andor(dev, 0x42, 0x06, 0xcf, (pass & 0x03) << 4);
}
static void pms_antisnow(struct pms *dev, short snow)
{
if (dev->decoder == PHILIPS2)
pms_i2c_andor(dev, 0x8a, 0x06, 0xf3, (snow & 0x03) << 2);
else if (dev->decoder == PHILIPS1)
pms_i2c_andor(dev, 0x42, 0x06, 0xf3, (snow & 0x03) << 2);
}
static void pms_sharpness(struct pms *dev, short sharp)
{
if (dev->decoder == PHILIPS2)
pms_i2c_andor(dev, 0x8a, 0x06, 0xfc, sharp & 0x03);
else if (dev->decoder == PHILIPS1)
pms_i2c_andor(dev, 0x42, 0x06, 0xfc, sharp & 0x03);
}
static void pms_chromaagc(struct pms *dev, short agc)
{
if (dev->decoder == PHILIPS2)
pms_i2c_andor(dev, 0x8a, 0x0c, 0x9f, (agc & 0x03) << 5);
else if (dev->decoder == PHILIPS1)
pms_i2c_andor(dev, 0x42, 0x0c, 0x9f, (agc & 0x03) << 5);
}
static void pms_vertnoise(struct pms *dev, short noise)
{
if (dev->decoder == PHILIPS2)
pms_i2c_andor(dev, 0x8a, 0x10, 0xfc, noise & 3);
else if (dev->decoder == PHILIPS1)
pms_i2c_andor(dev, 0x42, 0x10, 0xfc, noise & 3);
}
static void pms_forcecolour(struct pms *dev, short colour)
{
if (dev->decoder == PHILIPS2)
pms_i2c_andor(dev, 0x8a, 0x0c, 0x7f, (colour & 1) << 7);
else if (dev->decoder == PHILIPS1)
pms_i2c_andor(dev, 0x42, 0x0c, 0x7, (colour & 1) << 7);
}
static void pms_antigamma(struct pms *dev, short gamma)
{
if (dev->decoder == PHILIPS2)
pms_i2c_andor(dev, 0xb8, 0x00, 0x7f, (gamma & 1) << 7);
else if (dev->decoder == PHILIPS1)
pms_i2c_andor(dev, 0x42, 0x20, 0x7, (gamma & 1) << 7);
}
static void pms_prefilter(struct pms *dev, short filter)
{
if (dev->decoder == PHILIPS2)
pms_i2c_andor(dev, 0x8a, 0x06, 0xbf, (filter & 1) << 6);
else if (dev->decoder == PHILIPS1)
pms_i2c_andor(dev, 0x42, 0x06, 0xbf, (filter & 1) << 6);
}
static void pms_hfilter(struct pms *dev, short filter)
{
if (dev->decoder == PHILIPS2)
pms_i2c_andor(dev, 0xb8, 0x04, 0x1f, (filter & 7) << 5);
else if (dev->decoder == PHILIPS1)
pms_i2c_andor(dev, 0x42, 0x24, 0x1f, (filter & 7) << 5);
}
static void pms_vfilter(struct pms *dev, short filter)
{
if (dev->decoder == PHILIPS2)
pms_i2c_andor(dev, 0xb8, 0x08, 0x9f, (filter & 3) << 5);
else if (dev->decoder == PHILIPS1)
pms_i2c_andor(dev, 0x42, 0x28, 0x9f, (filter & 3) << 5);
}
static void pms_killcolour(struct pms *dev, short colour)
{
if (dev->decoder == PHILIPS2) {
pms_i2c_andor(dev, 0x8a, 0x08, 0x07, (colour & 0x1f) << 3);
pms_i2c_andor(dev, 0x8a, 0x09, 0x07, (colour & 0x1f) << 3);
} else if (dev->decoder == PHILIPS1) {
pms_i2c_andor(dev, 0x42, 0x08, 0x07, (colour & 0x1f) << 3);
pms_i2c_andor(dev, 0x42, 0x09, 0x07, (colour & 0x1f) << 3);
}
}
static void pms_chromagain(struct pms *dev, short chroma)
{
if (dev->decoder == PHILIPS2)
pms_i2c_write(dev, 0x8a, 0x11, chroma);
else if (dev->decoder == PHILIPS1)
pms_i2c_write(dev, 0x42, 0x11, chroma);
}
static void pms_spacialcompl(struct pms *dev, short data)
{
mvv_write(dev, 0x3b, data);
}
static void pms_spacialcomph(struct pms *dev, short data)
{
mvv_write(dev, 0x3a, data);
}
static void pms_vstart(struct pms *dev, short start)
{
mvv_write(dev, 0x16, start);
mvv_write(dev, 0x17, (start >> 8) & 0x01);
}
#endif
static void pms_secamcross(struct pms *dev, short cross)
{
if (dev->decoder == PHILIPS2)
pms_i2c_andor(dev, 0x8a, 0x0f, 0xdf, (cross & 1) << 5);
else if (dev->decoder == PHILIPS1)
pms_i2c_andor(dev, 0x42, 0x0f, 0xdf, (cross & 1) << 5);
}
static void pms_swsense(struct pms *dev, short sense)
{
if (dev->decoder == PHILIPS2) {
pms_i2c_write(dev, 0x8a, 0x0a, sense);
pms_i2c_write(dev, 0x8a, 0x0b, sense);
} else if (dev->decoder == PHILIPS1) {
pms_i2c_write(dev, 0x42, 0x0a, sense);
pms_i2c_write(dev, 0x42, 0x0b, sense);
}
}
static void pms_framerate(struct pms *dev, short frr)
{
int fps = (dev->std & V4L2_STD_525_60) ? 30 : 25;
if (frr == 0)
return;
fps = fps/frr;
mvv_write(dev, 0x14, 0x80 | fps);
mvv_write(dev, 0x15, 1);
}
static void pms_vert(struct pms *dev, u8 deciden, u8 decinum)
{
mvv_write(dev, 0x1c, deciden); /* Denominator */
mvv_write(dev, 0x1d, decinum); /* Numerator */
}
/*
* Turn 16bit ratios into best small ratio the chipset can grok
*/
static void pms_vertdeci(struct pms *dev, unsigned short decinum, unsigned short deciden)
{
/* Knock it down by / 5 once */
if (decinum % 5 == 0) {
deciden /= 5;
decinum /= 5;
}
/*
* 3's
*/
while (decinum % 3 == 0 && deciden % 3 == 0) {
deciden /= 3;
decinum /= 3;
}
/*
* 2's
*/
while (decinum % 2 == 0 && deciden % 2 == 0) {
decinum /= 2;
deciden /= 2;
}
/*
* Fudgyify
*/
while (deciden > 32) {
deciden /= 2;
decinum = (decinum + 1) / 2;
}
if (deciden == 32)
deciden--;
pms_vert(dev, deciden, decinum);
}
static void pms_horzdeci(struct pms *dev, short decinum, short deciden)
{
if (decinum <= 512) {
if (decinum % 5 == 0) {
decinum /= 5;
deciden /= 5;
}
} else {
decinum = 512;
deciden = 640; /* 768 would be ideal */
}
while (((decinum | deciden) & 1) == 0) {
decinum >>= 1;
deciden >>= 1;
}
while (deciden > 32) {
deciden >>= 1;
decinum = (decinum + 1) >> 1;
}
if (deciden == 32)
deciden--;
mvv_write(dev, 0x24, 0x80 | deciden);
mvv_write(dev, 0x25, decinum);
}
static void pms_resolution(struct pms *dev, short width, short height)
{
int fg_height;
fg_height = height;
if (fg_height > 280)
fg_height = 280;
mvv_write(dev, 0x18, fg_height);
mvv_write(dev, 0x19, fg_height >> 8);
if (dev->std & V4L2_STD_525_60) {
mvv_write(dev, 0x1a, 0xfc);
mvv_write(dev, 0x1b, 0x00);
if (height > fg_height)
pms_vertdeci(dev, 240, 240);
else
pms_vertdeci(dev, fg_height, 240);
} else {
mvv_write(dev, 0x1a, 0x1a);
mvv_write(dev, 0x1b, 0x01);
if (fg_height > 256)
pms_vertdeci(dev, 270, 270);
else
pms_vertdeci(dev, fg_height, 270);
}
mvv_write(dev, 0x12, 0);
mvv_write(dev, 0x13, MVVMEMORYWIDTH);
mvv_write(dev, 0x42, 0x00);
mvv_write(dev, 0x43, 0x00);
mvv_write(dev, 0x44, MVVMEMORYWIDTH);
mvv_write(dev, 0x22, width + 8);
mvv_write(dev, 0x23, (width + 8) >> 8);
if (dev->std & V4L2_STD_525_60)
pms_horzdeci(dev, width, 640);
else
pms_horzdeci(dev, width + 8, 768);
mvv_write(dev, 0x30, mvv_read(dev, 0x30) & 0xfe);
mvv_write(dev, 0x08, mvv_read(dev, 0x08) | 0x01);
mvv_write(dev, 0x01, mvv_read(dev, 0x01) & 0xfd);
mvv_write(dev, 0x32, 0x00);
mvv_write(dev, 0x33, MVVMEMORYWIDTH);
}
/*
* Set Input
*/
static void pms_vcrinput(struct pms *dev, short input)
{
if (dev->decoder == PHILIPS2)
pms_i2c_andor(dev, 0x8a, 0x0d, 0x7f, (input & 1) << 7);
else if (dev->decoder == PHILIPS1)
pms_i2c_andor(dev, 0x42, 0x0d, 0x7f, (input & 1) << 7);
}
static int pms_capture(struct pms *dev, char __user *buf, int rgb555, int count)
{
int y;
int dw = 2 * dev->width;
char tmp[dw + 32]; /* using a temp buffer is faster than direct */
int cnt = 0;
int len = 0;
unsigned char r8 = 0x5; /* value for reg8 */
if (rgb555)
r8 |= 0x20; /* else use untranslated rgb = 565 */
mvv_write(dev, 0x08, r8); /* capture rgb555/565, init DRAM, PC enable */
/* printf("%d %d %d %d %d %x %x\n",width,height,voff,nom,den,mvv_buf); */
for (y = 0; y < dev->height; y++) {
writeb(0, dev->mem); /* synchronisiert neue Zeile */
/*
* This is in truth a fifo, be very careful as if you
* forgot this odd things will occur 8)
*/
memcpy_fromio(tmp, dev->mem, dw + 32); /* discard 16 word */
cnt -= dev->height;
while (cnt <= 0) {
/*
* Don't copy too far
*/
int dt = dw;
if (dt + len > count)
dt = count - len;
cnt += dev->height;
if (copy_to_user(buf, tmp + 32, dt))
return len ? len : -EFAULT;
buf += dt;
len += dt;
}
}
return len;
}
/*
* Video4linux interfacing
*/
static int pms_querycap(struct file *file, void *priv,
struct v4l2_capability *vcap)
{
struct pms *dev = video_drvdata(file);
strlcpy(vcap->driver, dev->v4l2_dev.name, sizeof(vcap->driver));
strlcpy(vcap->card, "Mediavision PMS", sizeof(vcap->card));
strlcpy(vcap->bus_info, "ISA", sizeof(vcap->bus_info));
vcap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_READWRITE;
return 0;
}
static int pms_enum_input(struct file *file, void *fh, struct v4l2_input *vin)
{
static const char *inputs[4] = {
"Composite",
"S-Video",
"Composite (VCR)",
"S-Video (VCR)"
};
if (vin->index > 3)
return -EINVAL;
strlcpy(vin->name, inputs[vin->index], sizeof(vin->name));
vin->type = V4L2_INPUT_TYPE_CAMERA;
vin->audioset = 0;
vin->tuner = 0;
vin->std = V4L2_STD_ALL;
vin->status = 0;
return 0;
}
static int pms_g_input(struct file *file, void *fh, unsigned int *inp)
{
struct pms *dev = video_drvdata(file);
*inp = dev->input;
return 0;
}
static int pms_s_input(struct file *file, void *fh, unsigned int inp)
{
struct pms *dev = video_drvdata(file);
if (inp > 3)
return -EINVAL;
mutex_lock(&dev->lock);
dev->input = inp;
pms_videosource(dev, inp & 1);
pms_vcrinput(dev, inp >> 1);
mutex_unlock(&dev->lock);
return 0;
}
static int pms_g_std(struct file *file, void *fh, v4l2_std_id *std)
{
struct pms *dev = video_drvdata(file);
*std = dev->std;
return 0;
}
static int pms_s_std(struct file *file, void *fh, v4l2_std_id *std)
{
struct pms *dev = video_drvdata(file);
int ret = 0;
dev->std = *std;
mutex_lock(&dev->lock);
if (dev->std & V4L2_STD_NTSC) {
pms_framerate(dev, 30);
pms_secamcross(dev, 0);
pms_format(dev, 1);
} else if (dev->std & V4L2_STD_PAL) {
pms_framerate(dev, 25);
pms_secamcross(dev, 0);
pms_format(dev, 2);
} else if (dev->std & V4L2_STD_SECAM) {
pms_framerate(dev, 25);
pms_secamcross(dev, 1);
pms_format(dev, 2);
} else {
ret = -EINVAL;
}
/*
switch (v->mode) {
case VIDEO_MODE_AUTO:
pms_framerate(dev, 25);
pms_secamcross(dev, 0);
pms_format(dev, 0);
break;
}*/
mutex_unlock(&dev->lock);
return 0;
}
static int pms_queryctrl(struct file *file, void *priv,
struct v4l2_queryctrl *qc)
{
switch (qc->id) {
case V4L2_CID_BRIGHTNESS:
return v4l2_ctrl_query_fill(qc, 0, 255, 1, 139);
case V4L2_CID_CONTRAST:
return v4l2_ctrl_query_fill(qc, 0, 255, 1, 70);
case V4L2_CID_SATURATION:
return v4l2_ctrl_query_fill(qc, 0, 255, 1, 64);
case V4L2_CID_HUE:
return v4l2_ctrl_query_fill(qc, 0, 255, 1, 0);
}
return -EINVAL;
}
static int pms_g_ctrl(struct file *file, void *priv,
struct v4l2_control *ctrl)
{
struct pms *dev = video_drvdata(file);
int ret = 0;
switch (ctrl->id) {
case V4L2_CID_BRIGHTNESS:
ctrl->value = dev->brightness;
break;
case V4L2_CID_CONTRAST:
ctrl->value = dev->contrast;
break;
case V4L2_CID_SATURATION:
ctrl->value = dev->saturation;
break;
case V4L2_CID_HUE:
ctrl->value = dev->hue;
break;
default:
ret = -EINVAL;
break;
}
return ret;
}
static int pms_s_ctrl(struct file *file, void *priv,
struct v4l2_control *ctrl)
{
struct pms *dev = video_drvdata(file);
int ret = 0;
mutex_lock(&dev->lock);
switch (ctrl->id) {
case V4L2_CID_BRIGHTNESS:
dev->brightness = ctrl->value;
pms_brightness(dev, dev->brightness);
break;
case V4L2_CID_CONTRAST:
dev->contrast = ctrl->value;
pms_contrast(dev, dev->contrast);
break;
case V4L2_CID_SATURATION:
dev->saturation = ctrl->value;
pms_saturation(dev, dev->saturation);
break;
case V4L2_CID_HUE:
dev->hue = ctrl->value;
pms_hue(dev, dev->hue);
break;
default:
ret = -EINVAL;
break;
}
mutex_unlock(&dev->lock);
return ret;
}
static int pms_g_fmt_vid_cap(struct file *file, void *fh, struct v4l2_format *fmt)
{
struct pms *dev = video_drvdata(file);
struct v4l2_pix_format *pix = &fmt->fmt.pix;
pix->width = dev->width;
pix->height = dev->height;
pix->pixelformat = dev->width == 15 ?
V4L2_PIX_FMT_RGB555 : V4L2_PIX_FMT_RGB565;
pix->field = V4L2_FIELD_NONE;
pix->bytesperline = 2 * dev->width;
pix->sizeimage = 2 * dev->width * dev->height;
/* Just a guess */
pix->colorspace = V4L2_COLORSPACE_SRGB;
return 0;
}
static int pms_try_fmt_vid_cap(struct file *file, void *fh, struct v4l2_format *fmt)
{
struct v4l2_pix_format *pix = &fmt->fmt.pix;
if (pix->height < 16 || pix->height > 480)
return -EINVAL;
if (pix->width < 16 || pix->width > 640)
return -EINVAL;
if (pix->pixelformat != V4L2_PIX_FMT_RGB555 &&
pix->pixelformat != V4L2_PIX_FMT_RGB565)
return -EINVAL;
pix->field = V4L2_FIELD_NONE;
pix->bytesperline = 2 * pix->width;
pix->sizeimage = 2 * pix->width * pix->height;
/* Just a guess */
pix->colorspace = V4L2_COLORSPACE_SRGB;
return 0;
}
static int pms_s_fmt_vid_cap(struct file *file, void *fh, struct v4l2_format *fmt)
{
struct pms *dev = video_drvdata(file);
struct v4l2_pix_format *pix = &fmt->fmt.pix;
int ret = pms_try_fmt_vid_cap(file, fh, fmt);
if (ret)
return ret;
mutex_lock(&dev->lock);
dev->width = pix->width;
dev->height = pix->height;
dev->depth = (pix->pixelformat == V4L2_PIX_FMT_RGB555) ? 15 : 16;
pms_resolution(dev, dev->width, dev->height);
/* Ok we figured out what to use from our wide choice */
mutex_unlock(&dev->lock);
return 0;
}
static int pms_enum_fmt_vid_cap(struct file *file, void *fh, struct v4l2_fmtdesc *fmt)
{
static struct v4l2_fmtdesc formats[] = {
{ 0, 0, 0,
"RGB 5:5:5", V4L2_PIX_FMT_RGB555,
{ 0, 0, 0, 0 }
},
{ 0, 0, 0,
"RGB 5:6:5", V4L2_PIX_FMT_RGB565,
{ 0, 0, 0, 0 }
},
};
enum v4l2_buf_type type = fmt->type;
if (fmt->index > 1)
return -EINVAL;
*fmt = formats[fmt->index];
fmt->type = type;
return 0;
}
static ssize_t pms_read(struct file *file, char __user *buf,
size_t count, loff_t *ppos)
{
struct pms *dev = video_drvdata(file);
int len;
mutex_lock(&dev->lock);
len = pms_capture(dev, buf, (dev->depth == 15), count);
mutex_unlock(&dev->lock);
return len;
}
static const struct v4l2_file_operations pms_fops = {
.owner = THIS_MODULE,
.unlocked_ioctl = video_ioctl2,
.read = pms_read,
};
static const struct v4l2_ioctl_ops pms_ioctl_ops = {
.vidioc_querycap = pms_querycap,
.vidioc_g_input = pms_g_input,
.vidioc_s_input = pms_s_input,
.vidioc_enum_input = pms_enum_input,
.vidioc_g_std = pms_g_std,
.vidioc_s_std = pms_s_std,
.vidioc_queryctrl = pms_queryctrl,
.vidioc_g_ctrl = pms_g_ctrl,
.vidioc_s_ctrl = pms_s_ctrl,
.vidioc_enum_fmt_vid_cap = pms_enum_fmt_vid_cap,
.vidioc_g_fmt_vid_cap = pms_g_fmt_vid_cap,
.vidioc_s_fmt_vid_cap = pms_s_fmt_vid_cap,
.vidioc_try_fmt_vid_cap = pms_try_fmt_vid_cap,
};
/*
* Probe for and initialise the Mediavision PMS
*/
static int init_mediavision(struct pms *dev)
{
int id;
int idec, decst;
int i;
static const unsigned char i2c_defs[] = {
0x4c, 0x30, 0x00, 0xe8,
0xb6, 0xe2, 0x00, 0x00,
0xff, 0xff, 0x00, 0x00,
0x00, 0x00, 0x78, 0x98,
0x00, 0x00, 0x00, 0x00,
0x34, 0x0a, 0xf4, 0xce,
0xe4
};
dev->mem = ioremap(mem_base, 0x800);
if (!dev->mem)
return -ENOMEM;
if (!request_region(0x9a01, 1, "Mediavision PMS config")) {
printk(KERN_WARNING "mediavision: unable to detect: 0x9a01 in use.\n");
iounmap(dev->mem);
return -EBUSY;
}
if (!request_region(dev->io, 3, "Mediavision PMS")) {
printk(KERN_WARNING "mediavision: I/O port %d in use.\n", dev->io);
release_region(0x9a01, 1);
iounmap(dev->mem);
return -EBUSY;
}
outb(0xb8, 0x9a01); /* Unlock */
outb(dev->io >> 4, 0x9a01); /* Set IO port */
id = mvv_read(dev, 3);
decst = pms_i2c_stat(dev, 0x43);
if (decst != -1)
idec = 2;
else if (pms_i2c_stat(dev, 0xb9) != -1)
idec = 3;
else if (pms_i2c_stat(dev, 0x8b) != -1)
idec = 1;
else
idec = 0;
printk(KERN_INFO "PMS type is %d\n", idec);
if (idec == 0) {
release_region(dev->io, 3);
release_region(0x9a01, 1);
iounmap(dev->mem);
return -ENODEV;
}
/*
* Ok we have a PMS of some sort
*/
mvv_write(dev, 0x04, mem_base >> 12); /* Set the memory area */
/* Ok now load the defaults */
for (i = 0; i < 0x19; i++) {
if (i2c_defs[i] == 0xff)
pms_i2c_andor(dev, 0x8a, i, 0x07, 0x00);
else
pms_i2c_write(dev, 0x8a, i, i2c_defs[i]);
}
pms_i2c_write(dev, 0xb8, 0x00, 0x12);
pms_i2c_write(dev, 0xb8, 0x04, 0x00);
pms_i2c_write(dev, 0xb8, 0x07, 0x00);
pms_i2c_write(dev, 0xb8, 0x08, 0x00);
pms_i2c_write(dev, 0xb8, 0x09, 0xff);
pms_i2c_write(dev, 0xb8, 0x0a, 0x00);
pms_i2c_write(dev, 0xb8, 0x0b, 0x10);
pms_i2c_write(dev, 0xb8, 0x10, 0x03);
mvv_write(dev, 0x01, 0x00);
mvv_write(dev, 0x05, 0xa0);
mvv_write(dev, 0x08, 0x25);
mvv_write(dev, 0x09, 0x00);
mvv_write(dev, 0x0a, 0x20 | MVVMEMORYWIDTH);
mvv_write(dev, 0x10, 0x02);
mvv_write(dev, 0x1e, 0x0c);
mvv_write(dev, 0x1f, 0x03);
mvv_write(dev, 0x26, 0x06);
mvv_write(dev, 0x2b, 0x00);
mvv_write(dev, 0x2c, 0x20);
mvv_write(dev, 0x2d, 0x00);
mvv_write(dev, 0x2f, 0x70);
mvv_write(dev, 0x32, 0x00);
mvv_write(dev, 0x33, MVVMEMORYWIDTH);
mvv_write(dev, 0x34, 0x00);
mvv_write(dev, 0x35, 0x00);
mvv_write(dev, 0x3a, 0x80);
mvv_write(dev, 0x3b, 0x10);
mvv_write(dev, 0x20, 0x00);
mvv_write(dev, 0x21, 0x00);
mvv_write(dev, 0x30, 0x22);
return 0;
}
/*
* Initialization and module stuff
*/
#ifndef MODULE
static int enable;
module_param(enable, int, 0);
#endif
static int __init pms_init(void)
{
struct pms *dev = &pms_card;
struct v4l2_device *v4l2_dev = &dev->v4l2_dev;
int res;
strlcpy(v4l2_dev->name, "pms", sizeof(v4l2_dev->name));
v4l2_info(v4l2_dev, "Mediavision Pro Movie Studio driver 0.03\n");
#ifndef MODULE
if (!enable) {
v4l2_err(v4l2_dev,
"PMS: not enabled, use pms.enable=1 to probe\n");
return -ENODEV;
}
#endif
dev->decoder = PHILIPS2;
dev->io = io_port;
dev->data = io_port + 1;
if (init_mediavision(dev)) {
v4l2_err(v4l2_dev, "Board not found.\n");
return -ENODEV;
}
res = v4l2_device_register(NULL, v4l2_dev);
if (res < 0) {
v4l2_err(v4l2_dev, "Could not register v4l2_device\n");
return res;
}
strlcpy(dev->vdev.name, v4l2_dev->name, sizeof(dev->vdev.name));
dev->vdev.v4l2_dev = v4l2_dev;
dev->vdev.fops = &pms_fops;
dev->vdev.ioctl_ops = &pms_ioctl_ops;
dev->vdev.release = video_device_release_empty;
video_set_drvdata(&dev->vdev, dev);
mutex_init(&dev->lock);
dev->std = V4L2_STD_NTSC_M;
dev->height = 240;
dev->width = 320;
dev->depth = 15;
dev->brightness = 139;
dev->contrast = 70;
dev->hue = 0;
dev->saturation = 64;
pms_swsense(dev, 75);
pms_resolution(dev, 320, 240);
pms_videosource(dev, 0);
pms_vcrinput(dev, 0);
if (video_register_device(&dev->vdev, VFL_TYPE_GRABBER, video_nr) < 0) {
v4l2_device_unregister(&dev->v4l2_dev);
release_region(dev->io, 3);
release_region(0x9a01, 1);
iounmap(dev->mem);
return -EINVAL;
}
return 0;
}
static void __exit pms_exit(void)
{
struct pms *dev = &pms_card;
video_unregister_device(&dev->vdev);
release_region(dev->io, 3);
release_region(0x9a01, 1);
iounmap(dev->mem);
}
module_init(pms_init);
module_exit(pms_exit);
| gpl-2.0 |
htc-first/android_kernel_htc_msm8930aa | fs/reiserfs/tail_conversion.c | 7767 | 9295 | /*
* Copyright 1999 Hans Reiser, see reiserfs/README for licensing and copyright details
*/
#include <linux/time.h>
#include <linux/pagemap.h>
#include <linux/buffer_head.h>
#include "reiserfs.h"
/* access to tail : when one is going to read tail it must make sure, that is not running.
direct2indirect and indirect2direct can not run concurrently */
/* Converts direct items to an unformatted node. Panics if file has no
tail. -ENOSPC if no disk space for conversion */
/* path points to first direct item of the file regarless of how many of
them are there */
int direct2indirect(struct reiserfs_transaction_handle *th, struct inode *inode,
struct treepath *path, struct buffer_head *unbh,
loff_t tail_offset)
{
struct super_block *sb = inode->i_sb;
struct buffer_head *up_to_date_bh;
struct item_head *p_le_ih = PATH_PITEM_HEAD(path);
unsigned long total_tail = 0;
struct cpu_key end_key; /* Key to search for the last byte of the
converted item. */
struct item_head ind_ih; /* new indirect item to be inserted or
key of unfm pointer to be pasted */
int blk_size, retval; /* returned value for reiserfs_insert_item and clones */
unp_t unfm_ptr; /* Handle on an unformatted node
that will be inserted in the
tree. */
BUG_ON(!th->t_trans_id);
REISERFS_SB(sb)->s_direct2indirect++;
blk_size = sb->s_blocksize;
/* and key to search for append or insert pointer to the new
unformatted node. */
copy_item_head(&ind_ih, p_le_ih);
set_le_ih_k_offset(&ind_ih, tail_offset);
set_le_ih_k_type(&ind_ih, TYPE_INDIRECT);
/* Set the key to search for the place for new unfm pointer */
make_cpu_key(&end_key, inode, tail_offset, TYPE_INDIRECT, 4);
/* FIXME: we could avoid this */
if (search_for_position_by_key(sb, &end_key, path) == POSITION_FOUND) {
reiserfs_error(sb, "PAP-14030",
"pasted or inserted byte exists in "
"the tree %K. Use fsck to repair.", &end_key);
pathrelse(path);
return -EIO;
}
p_le_ih = PATH_PITEM_HEAD(path);
unfm_ptr = cpu_to_le32(unbh->b_blocknr);
if (is_statdata_le_ih(p_le_ih)) {
/* Insert new indirect item. */
set_ih_free_space(&ind_ih, 0); /* delete at nearest future */
put_ih_item_len(&ind_ih, UNFM_P_SIZE);
PATH_LAST_POSITION(path)++;
retval =
reiserfs_insert_item(th, path, &end_key, &ind_ih, inode,
(char *)&unfm_ptr);
} else {
/* Paste into last indirect item of an object. */
retval = reiserfs_paste_into_item(th, path, &end_key, inode,
(char *)&unfm_ptr,
UNFM_P_SIZE);
}
if (retval) {
return retval;
}
// note: from here there are two keys which have matching first
// three key components. They only differ by the fourth one.
/* Set the key to search for the direct items of the file */
make_cpu_key(&end_key, inode, max_reiserfs_offset(inode), TYPE_DIRECT,
4);
/* Move bytes from the direct items to the new unformatted node
and delete them. */
while (1) {
int tail_size;
/* end_key.k_offset is set so, that we will always have found
last item of the file */
if (search_for_position_by_key(sb, &end_key, path) ==
POSITION_FOUND)
reiserfs_panic(sb, "PAP-14050",
"direct item (%K) not found", &end_key);
p_le_ih = PATH_PITEM_HEAD(path);
RFALSE(!is_direct_le_ih(p_le_ih),
"vs-14055: direct item expected(%K), found %h",
&end_key, p_le_ih);
tail_size = (le_ih_k_offset(p_le_ih) & (blk_size - 1))
+ ih_item_len(p_le_ih) - 1;
/* we only send the unbh pointer if the buffer is not up to date.
** this avoids overwriting good data from writepage() with old data
** from the disk or buffer cache
** Special case: unbh->b_page will be NULL if we are coming through
** DIRECT_IO handler here.
*/
if (!unbh->b_page || buffer_uptodate(unbh)
|| PageUptodate(unbh->b_page)) {
up_to_date_bh = NULL;
} else {
up_to_date_bh = unbh;
}
retval = reiserfs_delete_item(th, path, &end_key, inode,
up_to_date_bh);
total_tail += retval;
if (tail_size == retval)
// done: file does not have direct items anymore
break;
}
/* if we've copied bytes from disk into the page, we need to zero
** out the unused part of the block (it was not up to date before)
*/
if (up_to_date_bh) {
unsigned pgoff =
(tail_offset + total_tail - 1) & (PAGE_CACHE_SIZE - 1);
char *kaddr = kmap_atomic(up_to_date_bh->b_page);
memset(kaddr + pgoff, 0, blk_size - total_tail);
kunmap_atomic(kaddr);
}
REISERFS_I(inode)->i_first_direct_byte = U32_MAX;
return 0;
}
/* stolen from fs/buffer.c */
void reiserfs_unmap_buffer(struct buffer_head *bh)
{
lock_buffer(bh);
if (buffer_journaled(bh) || buffer_journal_dirty(bh)) {
BUG();
}
clear_buffer_dirty(bh);
/* Remove the buffer from whatever list it belongs to. We are mostly
interested in removing it from per-sb j_dirty_buffers list, to avoid
BUG() on attempt to write not mapped buffer */
if ((!list_empty(&bh->b_assoc_buffers) || bh->b_private) && bh->b_page) {
struct inode *inode = bh->b_page->mapping->host;
struct reiserfs_journal *j = SB_JOURNAL(inode->i_sb);
spin_lock(&j->j_dirty_buffers_lock);
list_del_init(&bh->b_assoc_buffers);
reiserfs_free_jh(bh);
spin_unlock(&j->j_dirty_buffers_lock);
}
clear_buffer_mapped(bh);
clear_buffer_req(bh);
clear_buffer_new(bh);
bh->b_bdev = NULL;
unlock_buffer(bh);
}
/* this first locks inode (neither reads nor sync are permitted),
reads tail through page cache, insert direct item. When direct item
inserted successfully inode is left locked. Return value is always
what we expect from it (number of cut bytes). But when tail remains
in the unformatted node, we set mode to SKIP_BALANCING and unlock
inode */
int indirect2direct(struct reiserfs_transaction_handle *th,
struct inode *inode, struct page *page,
struct treepath *path, /* path to the indirect item. */
const struct cpu_key *item_key, /* Key to look for
* unformatted node
* pointer to be cut. */
loff_t n_new_file_size, /* New file size. */
char *mode)
{
struct super_block *sb = inode->i_sb;
struct item_head s_ih;
unsigned long block_size = sb->s_blocksize;
char *tail;
int tail_len, round_tail_len;
loff_t pos, pos1; /* position of first byte of the tail */
struct cpu_key key;
BUG_ON(!th->t_trans_id);
REISERFS_SB(sb)->s_indirect2direct++;
*mode = M_SKIP_BALANCING;
/* store item head path points to. */
copy_item_head(&s_ih, PATH_PITEM_HEAD(path));
tail_len = (n_new_file_size & (block_size - 1));
if (get_inode_sd_version(inode) == STAT_DATA_V2)
round_tail_len = ROUND_UP(tail_len);
else
round_tail_len = tail_len;
pos =
le_ih_k_offset(&s_ih) - 1 + (ih_item_len(&s_ih) / UNFM_P_SIZE -
1) * sb->s_blocksize;
pos1 = pos;
// we are protected by i_mutex. The tail can not disapper, not
// append can be done either
// we are in truncate or packing tail in file_release
tail = (char *)kmap(page); /* this can schedule */
if (path_changed(&s_ih, path)) {
/* re-search indirect item */
if (search_for_position_by_key(sb, item_key, path)
== POSITION_NOT_FOUND)
reiserfs_panic(sb, "PAP-5520",
"item to be converted %K does not exist",
item_key);
copy_item_head(&s_ih, PATH_PITEM_HEAD(path));
#ifdef CONFIG_REISERFS_CHECK
pos = le_ih_k_offset(&s_ih) - 1 +
(ih_item_len(&s_ih) / UNFM_P_SIZE -
1) * sb->s_blocksize;
if (pos != pos1)
reiserfs_panic(sb, "vs-5530", "tail position "
"changed while we were reading it");
#endif
}
/* Set direct item header to insert. */
make_le_item_head(&s_ih, NULL, get_inode_item_key_version(inode),
pos1 + 1, TYPE_DIRECT, round_tail_len,
0xffff /*ih_free_space */ );
/* we want a pointer to the first byte of the tail in the page.
** the page was locked and this part of the page was up to date when
** indirect2direct was called, so we know the bytes are still valid
*/
tail = tail + (pos & (PAGE_CACHE_SIZE - 1));
PATH_LAST_POSITION(path)++;
key = *item_key;
set_cpu_key_k_type(&key, TYPE_DIRECT);
key.key_length = 4;
/* Insert tail as new direct item in the tree */
if (reiserfs_insert_item(th, path, &key, &s_ih, inode,
tail ? tail : NULL) < 0) {
/* No disk memory. So we can not convert last unformatted node
to the direct item. In this case we used to adjust
indirect items's ih_free_space. Now ih_free_space is not
used, it would be ideal to write zeros to corresponding
unformatted node. For now i_size is considered as guard for
going out of file size */
kunmap(page);
return block_size - round_tail_len;
}
kunmap(page);
/* make sure to get the i_blocks changes from reiserfs_insert_item */
reiserfs_update_sd(th, inode);
// note: we have now the same as in above direct2indirect
// conversion: there are two keys which have matching first three
// key components. They only differ by the fouhth one.
/* We have inserted new direct item and must remove last
unformatted node. */
*mode = M_CUT;
/* we store position of first direct item in the in-core inode */
/* mark_file_with_tail (inode, pos1 + 1); */
REISERFS_I(inode)->i_first_direct_byte = pos1 + 1;
return block_size - round_tail_len;
}
| gpl-2.0 |
milodky/kernel_for_nexus7 | drivers/mtd/maps/fortunet.c | 8023 | 6941 | /* fortunet.c memory map
*
*/
#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/string.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/map.h>
#include <linux/mtd/partitions.h>
#include <asm/io.h>
#define MAX_NUM_REGIONS 4
#define MAX_NUM_PARTITIONS 8
#define DEF_WINDOW_ADDR_PHY 0x00000000
#define DEF_WINDOW_SIZE 0x00800000 // 8 Mega Bytes
#define MTD_FORTUNET_PK "MTD FortuNet: "
#define MAX_NAME_SIZE 128
struct map_region
{
int window_addr_physical;
int altbankwidth;
struct map_info map_info;
struct mtd_info *mymtd;
struct mtd_partition parts[MAX_NUM_PARTITIONS];
char map_name[MAX_NAME_SIZE];
char parts_name[MAX_NUM_PARTITIONS][MAX_NAME_SIZE];
};
static struct map_region map_regions[MAX_NUM_REGIONS];
static int map_regions_set[MAX_NUM_REGIONS] = {0,0,0,0};
static int map_regions_parts[MAX_NUM_REGIONS] = {0,0,0,0};
struct map_info default_map = {
.size = DEF_WINDOW_SIZE,
.bankwidth = 4,
};
static char * __init get_string_option(char *dest,int dest_size,char *sor)
{
if(!dest_size)
return sor;
dest_size--;
while(*sor)
{
if(*sor==',')
{
sor++;
break;
}
else if(*sor=='\"')
{
sor++;
while(*sor)
{
if(*sor=='\"')
{
sor++;
break;
}
*dest = *sor;
dest++;
sor++;
dest_size--;
if(!dest_size)
{
*dest = 0;
return sor;
}
}
}
else
{
*dest = *sor;
dest++;
sor++;
dest_size--;
if(!dest_size)
{
*dest = 0;
return sor;
}
}
}
*dest = 0;
return sor;
}
static int __init MTD_New_Region(char *line)
{
char string[MAX_NAME_SIZE];
int params[6];
get_options (get_string_option(string,sizeof(string),line),6,params);
if(params[0]<1)
{
printk(MTD_FORTUNET_PK "Bad parameters for MTD Region "
" name,region-number[,base,size,bankwidth,altbankwidth]\n");
return 1;
}
if((params[1]<0)||(params[1]>=MAX_NUM_REGIONS))
{
printk(MTD_FORTUNET_PK "Bad region index of %d only have 0..%u regions\n",
params[1],MAX_NUM_REGIONS-1);
return 1;
}
memset(&map_regions[params[1]],0,sizeof(map_regions[params[1]]));
memcpy(&map_regions[params[1]].map_info,
&default_map,sizeof(map_regions[params[1]].map_info));
map_regions_set[params[1]] = 1;
map_regions[params[1]].window_addr_physical = DEF_WINDOW_ADDR_PHY;
map_regions[params[1]].altbankwidth = 2;
map_regions[params[1]].mymtd = NULL;
map_regions[params[1]].map_info.name = map_regions[params[1]].map_name;
strcpy(map_regions[params[1]].map_info.name,string);
if(params[0]>1)
{
map_regions[params[1]].window_addr_physical = params[2];
}
if(params[0]>2)
{
map_regions[params[1]].map_info.size = params[3];
}
if(params[0]>3)
{
map_regions[params[1]].map_info.bankwidth = params[4];
}
if(params[0]>4)
{
map_regions[params[1]].altbankwidth = params[5];
}
return 1;
}
static int __init MTD_New_Partition(char *line)
{
char string[MAX_NAME_SIZE];
int params[4];
get_options (get_string_option(string,sizeof(string),line),4,params);
if(params[0]<3)
{
printk(MTD_FORTUNET_PK "Bad parameters for MTD Partition "
" name,region-number,size,offset\n");
return 1;
}
if((params[1]<0)||(params[1]>=MAX_NUM_REGIONS))
{
printk(MTD_FORTUNET_PK "Bad region index of %d only have 0..%u regions\n",
params[1],MAX_NUM_REGIONS-1);
return 1;
}
if(map_regions_parts[params[1]]>=MAX_NUM_PARTITIONS)
{
printk(MTD_FORTUNET_PK "Out of space for partition in this region\n");
return 1;
}
map_regions[params[1]].parts[map_regions_parts[params[1]]].name =
map_regions[params[1]]. parts_name[map_regions_parts[params[1]]];
strcpy(map_regions[params[1]].parts[map_regions_parts[params[1]]].name,string);
map_regions[params[1]].parts[map_regions_parts[params[1]]].size =
params[2];
map_regions[params[1]].parts[map_regions_parts[params[1]]].offset =
params[3];
map_regions[params[1]].parts[map_regions_parts[params[1]]].mask_flags = 0;
map_regions_parts[params[1]]++;
return 1;
}
__setup("MTD_Region=", MTD_New_Region);
__setup("MTD_Partition=", MTD_New_Partition);
/* Backwards-spelling-compatibility */
__setup("MTD_Partion=", MTD_New_Partition);
static int __init init_fortunet(void)
{
int ix,iy;
for(iy=ix=0;ix<MAX_NUM_REGIONS;ix++)
{
if(map_regions_parts[ix]&&(!map_regions_set[ix]))
{
printk(MTD_FORTUNET_PK "Region %d is not setup (Setting to default)\n",
ix);
memset(&map_regions[ix],0,sizeof(map_regions[ix]));
memcpy(&map_regions[ix].map_info,&default_map,
sizeof(map_regions[ix].map_info));
map_regions_set[ix] = 1;
map_regions[ix].window_addr_physical = DEF_WINDOW_ADDR_PHY;
map_regions[ix].altbankwidth = 2;
map_regions[ix].mymtd = NULL;
map_regions[ix].map_info.name = map_regions[ix].map_name;
strcpy(map_regions[ix].map_info.name,"FORTUNET");
}
if(map_regions_set[ix])
{
iy++;
printk(KERN_NOTICE MTD_FORTUNET_PK "%s flash device at physically "
" address %x size %x\n",
map_regions[ix].map_info.name,
map_regions[ix].window_addr_physical,
map_regions[ix].map_info.size);
map_regions[ix].map_info.phys = map_regions[ix].window_addr_physical,
map_regions[ix].map_info.virt =
ioremap_nocache(
map_regions[ix].window_addr_physical,
map_regions[ix].map_info.size);
if(!map_regions[ix].map_info.virt)
{
int j = 0;
printk(MTD_FORTUNET_PK "%s flash failed to ioremap!\n",
map_regions[ix].map_info.name);
for (j = 0 ; j < ix; j++)
iounmap(map_regions[j].map_info.virt);
return -ENXIO;
}
simple_map_init(&map_regions[ix].map_info);
printk(KERN_NOTICE MTD_FORTUNET_PK "%s flash is virtually at: %x\n",
map_regions[ix].map_info.name,
map_regions[ix].map_info.virt);
map_regions[ix].mymtd = do_map_probe("cfi_probe",
&map_regions[ix].map_info);
if((!map_regions[ix].mymtd)&&(
map_regions[ix].altbankwidth!=map_regions[ix].map_info.bankwidth))
{
printk(KERN_NOTICE MTD_FORTUNET_PK "Trying alternate bankwidth "
"for %s flash.\n",
map_regions[ix].map_info.name);
map_regions[ix].map_info.bankwidth =
map_regions[ix].altbankwidth;
map_regions[ix].mymtd = do_map_probe("cfi_probe",
&map_regions[ix].map_info);
}
map_regions[ix].mymtd->owner = THIS_MODULE;
mtd_device_register(map_regions[ix].mymtd,
map_regions[ix].parts,
map_regions_parts[ix]);
}
}
if(iy)
return 0;
return -ENXIO;
}
static void __exit cleanup_fortunet(void)
{
int ix;
for(ix=0;ix<MAX_NUM_REGIONS;ix++)
{
if(map_regions_set[ix])
{
if( map_regions[ix].mymtd )
{
mtd_device_unregister(map_regions[ix].mymtd);
map_destroy( map_regions[ix].mymtd );
}
iounmap((void *)map_regions[ix].map_info.virt);
}
}
}
module_init(init_fortunet);
module_exit(cleanup_fortunet);
MODULE_AUTHOR("FortuNet, Inc.");
MODULE_DESCRIPTION("MTD map driver for FortuNet boards");
| gpl-2.0 |
alexey6600/M8_Sense_7.00 | arch/ia64/xen/xencomm.c | 9303 | 2788 | /*
* Copyright (C) 2006 Hollis Blanchard <hollisb@us.ibm.com>, IBM Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/mm.h>
static unsigned long kernel_virtual_offset;
static int is_xencomm_initialized;
/* for xen early printk. It uses console io hypercall which uses xencomm.
* However early printk may use it before xencomm initialization.
*/
int
xencomm_is_initialized(void)
{
return is_xencomm_initialized;
}
void
xencomm_initialize(void)
{
kernel_virtual_offset = KERNEL_START - ia64_tpa(KERNEL_START);
is_xencomm_initialized = 1;
}
/* Translate virtual address to physical address. */
unsigned long
xencomm_vtop(unsigned long vaddr)
{
struct page *page;
struct vm_area_struct *vma;
if (vaddr == 0)
return 0UL;
if (REGION_NUMBER(vaddr) == 5) {
pgd_t *pgd;
pud_t *pud;
pmd_t *pmd;
pte_t *ptep;
/* On ia64, TASK_SIZE refers to current. It is not initialized
during boot.
Furthermore the kernel is relocatable and __pa() doesn't
work on addresses. */
if (vaddr >= KERNEL_START
&& vaddr < (KERNEL_START + KERNEL_TR_PAGE_SIZE))
return vaddr - kernel_virtual_offset;
/* In kernel area -- virtually mapped. */
pgd = pgd_offset_k(vaddr);
if (pgd_none(*pgd) || pgd_bad(*pgd))
return ~0UL;
pud = pud_offset(pgd, vaddr);
if (pud_none(*pud) || pud_bad(*pud))
return ~0UL;
pmd = pmd_offset(pud, vaddr);
if (pmd_none(*pmd) || pmd_bad(*pmd))
return ~0UL;
ptep = pte_offset_kernel(pmd, vaddr);
if (!ptep)
return ~0UL;
return (pte_val(*ptep) & _PFN_MASK) | (vaddr & ~PAGE_MASK);
}
if (vaddr > TASK_SIZE) {
/* percpu variables */
if (REGION_NUMBER(vaddr) == 7 &&
REGION_OFFSET(vaddr) >= (1ULL << IA64_MAX_PHYS_BITS))
ia64_tpa(vaddr);
/* kernel address */
return __pa(vaddr);
}
/* XXX double-check (lack of) locking */
vma = find_extend_vma(current->mm, vaddr);
if (!vma)
return ~0UL;
/* We assume the page is modified. */
page = follow_page(vma, vaddr, FOLL_WRITE | FOLL_TOUCH);
if (!page)
return ~0UL;
return (page_to_pfn(page) << PAGE_SHIFT) | (vaddr & ~PAGE_MASK);
}
| gpl-2.0 |
GlitchKernel/Glitch-i9300 | sound/oss/uart6850.c | 10071 | 7242 | /*
* sound/oss/uart6850.c
*
*
* Copyright (C) by Hannu Savolainen 1993-1997
*
* OSS/Free for Linux is distributed under the GNU GENERAL PUBLIC LICENSE (GPL)
* Version 2 (June 1991). See the "COPYING" file distributed with this software
* for more info.
* Extended by Alan Cox for Red Hat Software. Now a loadable MIDI driver.
* 28/4/97 - (C) Copyright Alan Cox. Released under the GPL version 2.
*
* Alan Cox: Updated for new modular code. Removed snd_* irq handling. Now
* uses native linux resources
* Christoph Hellwig: Adapted to module_init/module_exit
* Jeff Garzik: Made it work again, in theory
* FIXME: If the request_irq() succeeds, the probe succeeds. Ug.
*
* Status: Testing required (no shit -jgarzik)
*
*
*/
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/spinlock.h>
/* Mon Nov 22 22:38:35 MET 1993 marco@driq.home.usn.nl:
* added 6850 support, used with COVOX SoundMaster II and custom cards.
*/
#include "sound_config.h"
static int uart6850_base = 0x330;
static int *uart6850_osp;
#define DATAPORT (uart6850_base)
#define COMDPORT (uart6850_base+1)
#define STATPORT (uart6850_base+1)
static int uart6850_status(void)
{
return inb(STATPORT);
}
#define input_avail() (uart6850_status()&INPUT_AVAIL)
#define output_ready() (uart6850_status()&OUTPUT_READY)
static void uart6850_cmd(unsigned char cmd)
{
outb(cmd, COMDPORT);
}
static int uart6850_read(void)
{
return inb(DATAPORT);
}
static void uart6850_write(unsigned char byte)
{
outb(byte, DATAPORT);
}
#define OUTPUT_READY 0x02 /* Mask for data ready Bit */
#define INPUT_AVAIL 0x01 /* Mask for Data Send Ready Bit */
#define UART_RESET 0x95
#define UART_MODE_ON 0x03
static int uart6850_opened;
static int uart6850_irq;
static int uart6850_detected;
static int my_dev;
static DEFINE_SPINLOCK(lock);
static void (*midi_input_intr) (int dev, unsigned char data);
static void poll_uart6850(unsigned long dummy);
static DEFINE_TIMER(uart6850_timer, poll_uart6850, 0, 0);
static void uart6850_input_loop(void)
{
int count = 10;
while (count)
{
/*
* Not timed out
*/
if (input_avail())
{
unsigned char c = uart6850_read();
count = 100;
if (uart6850_opened & OPEN_READ)
midi_input_intr(my_dev, c);
}
else
{
while (!input_avail() && count)
count--;
}
}
}
static irqreturn_t m6850intr(int irq, void *dev_id)
{
if (input_avail())
uart6850_input_loop();
return IRQ_HANDLED;
}
/*
* It looks like there is no input interrupts in the UART mode. Let's try
* polling.
*/
static void poll_uart6850(unsigned long dummy)
{
unsigned long flags;
if (!(uart6850_opened & OPEN_READ))
return; /* Device has been closed */
spin_lock_irqsave(&lock,flags);
if (input_avail())
uart6850_input_loop();
uart6850_timer.expires = 1 + jiffies;
add_timer(&uart6850_timer);
/*
* Come back later
*/
spin_unlock_irqrestore(&lock,flags);
}
static int uart6850_open(int dev, int mode,
void (*input) (int dev, unsigned char data),
void (*output) (int dev)
)
{
if (uart6850_opened)
{
/* printk("Midi6850: Midi busy\n");*/
return -EBUSY;
};
uart6850_cmd(UART_RESET);
uart6850_input_loop();
midi_input_intr = input;
uart6850_opened = mode;
poll_uart6850(0); /*
* Enable input polling
*/
return 0;
}
static void uart6850_close(int dev)
{
uart6850_cmd(UART_MODE_ON);
del_timer(&uart6850_timer);
uart6850_opened = 0;
}
static int uart6850_out(int dev, unsigned char midi_byte)
{
int timeout;
unsigned long flags;
/*
* Test for input since pending input seems to block the output.
*/
spin_lock_irqsave(&lock,flags);
if (input_avail())
uart6850_input_loop();
spin_unlock_irqrestore(&lock,flags);
/*
* Sometimes it takes about 13000 loops before the output becomes ready
* (After reset). Normally it takes just about 10 loops.
*/
for (timeout = 30000; timeout > 0 && !output_ready(); timeout--); /*
* Wait
*/
if (!output_ready())
{
printk(KERN_WARNING "Midi6850: Timeout\n");
return 0;
}
uart6850_write(midi_byte);
return 1;
}
static inline int uart6850_command(int dev, unsigned char *midi_byte)
{
return 1;
}
static inline int uart6850_start_read(int dev)
{
return 0;
}
static inline int uart6850_end_read(int dev)
{
return 0;
}
static inline void uart6850_kick(int dev)
{
}
static inline int uart6850_buffer_status(int dev)
{
return 0; /*
* No data in buffers
*/
}
#define MIDI_SYNTH_NAME "6850 UART Midi"
#define MIDI_SYNTH_CAPS SYNTH_CAP_INPUT
#include "midi_synth.h"
static struct midi_operations uart6850_operations =
{
.owner = THIS_MODULE,
.info = {"6850 UART", 0, 0, SNDCARD_UART6850},
.converter = &std_midi_synth,
.in_info = {0},
.open = uart6850_open,
.close = uart6850_close,
.outputc = uart6850_out,
.start_read = uart6850_start_read,
.end_read = uart6850_end_read,
.kick = uart6850_kick,
.command = uart6850_command,
.buffer_status = uart6850_buffer_status
};
static void __init attach_uart6850(struct address_info *hw_config)
{
int ok, timeout;
unsigned long flags;
if (!uart6850_detected)
return;
if ((my_dev = sound_alloc_mididev()) == -1)
{
printk(KERN_INFO "uart6850: Too many midi devices detected\n");
return;
}
uart6850_base = hw_config->io_base;
uart6850_osp = hw_config->osp;
uart6850_irq = hw_config->irq;
spin_lock_irqsave(&lock,flags);
for (timeout = 30000; timeout > 0 && !output_ready(); timeout--); /*
* Wait
*/
uart6850_cmd(UART_MODE_ON);
ok = 1;
spin_unlock_irqrestore(&lock,flags);
conf_printf("6850 Midi Interface", hw_config);
std_midi_synth.midi_dev = my_dev;
hw_config->slots[4] = my_dev;
midi_devs[my_dev] = &uart6850_operations;
sequencer_init();
}
static inline int reset_uart6850(void)
{
uart6850_read();
return 1; /*
* OK
*/
}
static int __init probe_uart6850(struct address_info *hw_config)
{
int ok;
uart6850_osp = hw_config->osp;
uart6850_base = hw_config->io_base;
uart6850_irq = hw_config->irq;
if (request_irq(uart6850_irq, m6850intr, 0, "MIDI6850", NULL) < 0)
return 0;
ok = reset_uart6850();
uart6850_detected = ok;
return ok;
}
static void __exit unload_uart6850(struct address_info *hw_config)
{
free_irq(hw_config->irq, NULL);
sound_unload_mididev(hw_config->slots[4]);
}
static struct address_info cfg_mpu;
static int __initdata io = -1;
static int __initdata irq = -1;
module_param(io, int, 0);
module_param(irq, int, 0);
static int __init init_uart6850(void)
{
cfg_mpu.io_base = io;
cfg_mpu.irq = irq;
if (cfg_mpu.io_base == -1 || cfg_mpu.irq == -1) {
printk(KERN_INFO "uart6850: irq and io must be set.\n");
return -EINVAL;
}
if (probe_uart6850(&cfg_mpu))
return -ENODEV;
attach_uart6850(&cfg_mpu);
return 0;
}
static void __exit cleanup_uart6850(void)
{
unload_uart6850(&cfg_mpu);
}
module_init(init_uart6850);
module_exit(cleanup_uart6850);
#ifndef MODULE
static int __init setup_uart6850(char *str)
{
/* io, irq */
int ints[3];
str = get_options(str, ARRAY_SIZE(ints), ints);
io = ints[1];
irq = ints[2];
return 1;
}
__setup("uart6850=", setup_uart6850);
#endif
MODULE_LICENSE("GPL");
| gpl-2.0 |
xcteki/limbo-android | jni/qemu/hw/grlib_gptimer.c | 88 | 11213 | /*
* QEMU GRLIB GPTimer Emulator
*
* Copyright (c) 2010-2011 AdaCore
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include "sysbus.h"
#include "qemu-timer.h"
#include "ptimer.h"
#include "trace.h"
#define UNIT_REG_SIZE 16 /* Size of memory mapped regs for the unit */
#define GPTIMER_REG_SIZE 16 /* Size of memory mapped regs for a GPTimer */
#define GPTIMER_MAX_TIMERS 8
/* GPTimer Config register fields */
#define GPTIMER_ENABLE (1 << 0)
#define GPTIMER_RESTART (1 << 1)
#define GPTIMER_LOAD (1 << 2)
#define GPTIMER_INT_ENABLE (1 << 3)
#define GPTIMER_INT_PENDING (1 << 4)
#define GPTIMER_CHAIN (1 << 5) /* Not supported */
#define GPTIMER_DEBUG_HALT (1 << 6) /* Not supported */
/* Memory mapped register offsets */
#define SCALER_OFFSET 0x00
#define SCALER_RELOAD_OFFSET 0x04
#define CONFIG_OFFSET 0x08
#define COUNTER_OFFSET 0x00
#define COUNTER_RELOAD_OFFSET 0x04
#define TIMER_BASE 0x10
typedef struct GPTimer GPTimer;
typedef struct GPTimerUnit GPTimerUnit;
struct GPTimer {
QEMUBH *bh;
struct ptimer_state *ptimer;
qemu_irq irq;
int id;
GPTimerUnit *unit;
/* registers */
uint32_t counter;
uint32_t reload;
uint32_t config;
};
struct GPTimerUnit {
SysBusDevice busdev;
MemoryRegion iomem;
uint32_t nr_timers; /* Number of timers available */
uint32_t freq_hz; /* System frequency */
uint32_t irq_line; /* Base irq line */
GPTimer *timers;
/* registers */
uint32_t scaler;
uint32_t reload;
uint32_t config;
};
static void grlib_gptimer_enable(GPTimer *timer)
{
assert(timer != NULL);
ptimer_stop(timer->ptimer);
if (!(timer->config & GPTIMER_ENABLE)) {
/* Timer disabled */
trace_grlib_gptimer_disabled(timer->id, timer->config);
return;
}
/* ptimer is triggered when the counter reach 0 but GPTimer is triggered at
underflow. Set count + 1 to simulate the GPTimer behavior. */
trace_grlib_gptimer_enable(timer->id, timer->counter + 1);
ptimer_set_count(timer->ptimer, timer->counter + 1);
ptimer_run(timer->ptimer, 1);
}
static void grlib_gptimer_restart(GPTimer *timer)
{
assert(timer != NULL);
trace_grlib_gptimer_restart(timer->id, timer->reload);
timer->counter = timer->reload;
grlib_gptimer_enable(timer);
}
static void grlib_gptimer_set_scaler(GPTimerUnit *unit, uint32_t scaler)
{
int i = 0;
uint32_t value = 0;
assert(unit != NULL);
if (scaler > 0) {
value = unit->freq_hz / (scaler + 1);
} else {
value = unit->freq_hz;
}
trace_grlib_gptimer_set_scaler(scaler, value);
for (i = 0; i < unit->nr_timers; i++) {
ptimer_set_freq(unit->timers[i].ptimer, value);
}
}
static void grlib_gptimer_hit(void *opaque)
{
GPTimer *timer = opaque;
assert(timer != NULL);
trace_grlib_gptimer_hit(timer->id);
/* Timer expired */
if (timer->config & GPTIMER_INT_ENABLE) {
/* Set the pending bit (only unset by write in the config register) */
timer->config |= GPTIMER_INT_PENDING;
qemu_irq_pulse(timer->irq);
}
if (timer->config & GPTIMER_RESTART) {
grlib_gptimer_restart(timer);
}
}
static uint64_t grlib_gptimer_read(void *opaque, target_phys_addr_t addr,
unsigned size)
{
GPTimerUnit *unit = opaque;
target_phys_addr_t timer_addr;
int id;
uint32_t value = 0;
addr &= 0xff;
/* Unit registers */
switch (addr) {
case SCALER_OFFSET:
trace_grlib_gptimer_readl(-1, addr, unit->scaler);
return unit->scaler;
case SCALER_RELOAD_OFFSET:
trace_grlib_gptimer_readl(-1, addr, unit->reload);
return unit->reload;
case CONFIG_OFFSET:
trace_grlib_gptimer_readl(-1, addr, unit->config);
return unit->config;
default:
break;
}
timer_addr = (addr % TIMER_BASE);
id = (addr - TIMER_BASE) / TIMER_BASE;
if (id >= 0 && id < unit->nr_timers) {
/* GPTimer registers */
switch (timer_addr) {
case COUNTER_OFFSET:
value = ptimer_get_count(unit->timers[id].ptimer);
trace_grlib_gptimer_readl(id, addr, value);
return value;
case COUNTER_RELOAD_OFFSET:
value = unit->timers[id].reload;
trace_grlib_gptimer_readl(id, addr, value);
return value;
case CONFIG_OFFSET:
trace_grlib_gptimer_readl(id, addr, unit->timers[id].config);
return unit->timers[id].config;
default:
break;
}
}
trace_grlib_gptimer_readl(-1, addr, 0);
return 0;
}
static void grlib_gptimer_write(void *opaque, target_phys_addr_t addr,
uint64_t value, unsigned size)
{
GPTimerUnit *unit = opaque;
target_phys_addr_t timer_addr;
int id;
addr &= 0xff;
/* Unit registers */
switch (addr) {
case SCALER_OFFSET:
value &= 0xFFFF; /* clean up the value */
unit->scaler = value;
trace_grlib_gptimer_writel(-1, addr, unit->scaler);
return;
case SCALER_RELOAD_OFFSET:
value &= 0xFFFF; /* clean up the value */
unit->reload = value;
trace_grlib_gptimer_writel(-1, addr, unit->reload);
grlib_gptimer_set_scaler(unit, value);
return;
case CONFIG_OFFSET:
/* Read Only (disable timer freeze not supported) */
trace_grlib_gptimer_writel(-1, addr, 0);
return;
default:
break;
}
timer_addr = (addr % TIMER_BASE);
id = (addr - TIMER_BASE) / TIMER_BASE;
if (id >= 0 && id < unit->nr_timers) {
/* GPTimer registers */
switch (timer_addr) {
case COUNTER_OFFSET:
trace_grlib_gptimer_writel(id, addr, value);
unit->timers[id].counter = value;
grlib_gptimer_enable(&unit->timers[id]);
return;
case COUNTER_RELOAD_OFFSET:
trace_grlib_gptimer_writel(id, addr, value);
unit->timers[id].reload = value;
return;
case CONFIG_OFFSET:
trace_grlib_gptimer_writel(id, addr, value);
if (value & GPTIMER_INT_PENDING) {
/* clear pending bit */
value &= ~GPTIMER_INT_PENDING;
} else {
/* keep pending bit */
value |= unit->timers[id].config & GPTIMER_INT_PENDING;
}
unit->timers[id].config = value;
/* gptimer_restart calls gptimer_enable, so if "enable" and "load"
bits are present, we just have to call restart. */
if (value & GPTIMER_LOAD) {
grlib_gptimer_restart(&unit->timers[id]);
} else if (value & GPTIMER_ENABLE) {
grlib_gptimer_enable(&unit->timers[id]);
}
/* These fields must always be read as 0 */
value &= ~(GPTIMER_LOAD & GPTIMER_DEBUG_HALT);
unit->timers[id].config = value;
return;
default:
break;
}
}
trace_grlib_gptimer_writel(-1, addr, value);
}
static const MemoryRegionOps grlib_gptimer_ops = {
.read = grlib_gptimer_read,
.write = grlib_gptimer_write,
.endianness = DEVICE_NATIVE_ENDIAN,
.valid = {
.min_access_size = 4,
.max_access_size = 4,
},
};
static void grlib_gptimer_reset(DeviceState *d)
{
GPTimerUnit *unit = container_of(d, GPTimerUnit, busdev.qdev);
int i = 0;
assert(unit != NULL);
unit->scaler = 0;
unit->reload = 0;
unit->config = 0;
unit->config = unit->nr_timers;
unit->config |= unit->irq_line << 3;
unit->config |= 1 << 8; /* separate interrupt */
unit->config |= 1 << 9; /* Disable timer freeze */
for (i = 0; i < unit->nr_timers; i++) {
GPTimer *timer = &unit->timers[i];
timer->counter = 0;
timer->reload = 0;
timer->config = 0;
ptimer_stop(timer->ptimer);
ptimer_set_count(timer->ptimer, 0);
ptimer_set_freq(timer->ptimer, unit->freq_hz);
}
}
static int grlib_gptimer_init(SysBusDevice *dev)
{
GPTimerUnit *unit = FROM_SYSBUS(typeof(*unit), dev);
unsigned int i;
assert(unit->nr_timers > 0);
assert(unit->nr_timers <= GPTIMER_MAX_TIMERS);
unit->timers = g_malloc0(sizeof unit->timers[0] * unit->nr_timers);
for (i = 0; i < unit->nr_timers; i++) {
GPTimer *timer = &unit->timers[i];
timer->unit = unit;
timer->bh = qemu_bh_new(grlib_gptimer_hit, timer);
timer->ptimer = ptimer_init(timer->bh);
timer->id = i;
/* One IRQ line for each timer */
sysbus_init_irq(dev, &timer->irq);
ptimer_set_freq(timer->ptimer, unit->freq_hz);
}
memory_region_init_io(&unit->iomem, &grlib_gptimer_ops, unit, "gptimer",
UNIT_REG_SIZE + GPTIMER_REG_SIZE * unit->nr_timers);
sysbus_init_mmio(dev, &unit->iomem);
return 0;
}
static Property grlib_gptimer_properties[] = {
DEFINE_PROP_UINT32("frequency", GPTimerUnit, freq_hz, 40000000),
DEFINE_PROP_UINT32("irq-line", GPTimerUnit, irq_line, 8),
DEFINE_PROP_UINT32("nr-timers", GPTimerUnit, nr_timers, 2),
DEFINE_PROP_END_OF_LIST(),
};
static void grlib_gptimer_class_init(ObjectClass *klass, void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
SysBusDeviceClass *k = SYS_BUS_DEVICE_CLASS(klass);
k->init = grlib_gptimer_init;
dc->reset = grlib_gptimer_reset;
dc->props = grlib_gptimer_properties;
}
static TypeInfo grlib_gptimer_info = {
.name = "grlib,gptimer",
.parent = TYPE_SYS_BUS_DEVICE,
.instance_size = sizeof(GPTimerUnit),
.class_init = grlib_gptimer_class_init,
};
static void grlib_gptimer_register_types(void)
{
type_register_static(&grlib_gptimer_info);
}
type_init(grlib_gptimer_register_types)
| gpl-2.0 |
Klozz/android_kernel_asus_grouper-1 | arch/s390/kvm/kvm-s390.c | 344 | 19936 | /*
* s390host.c -- hosting zSeries kernel virtual machines
*
* Copyright IBM Corp. 2008,2009
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License (version 2 only)
* as published by the Free Software Foundation.
*
* Author(s): Carsten Otte <cotte@de.ibm.com>
* Christian Borntraeger <borntraeger@de.ibm.com>
* Heiko Carstens <heiko.carstens@de.ibm.com>
* Christian Ehrhardt <ehrhardt@de.ibm.com>
*/
#include <linux/compiler.h>
#include <linux/err.h>
#include <linux/fs.h>
#include <linux/hrtimer.h>
#include <linux/init.h>
#include <linux/kvm.h>
#include <linux/kvm_host.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/timer.h>
#include <asm/asm-offsets.h>
#include <asm/lowcore.h>
#include <asm/pgtable.h>
#include <asm/nmi.h>
#include <asm/system.h>
#include "kvm-s390.h"
#include "gaccess.h"
#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
struct kvm_stats_debugfs_item debugfs_entries[] = {
{ "userspace_handled", VCPU_STAT(exit_userspace) },
{ "exit_null", VCPU_STAT(exit_null) },
{ "exit_validity", VCPU_STAT(exit_validity) },
{ "exit_stop_request", VCPU_STAT(exit_stop_request) },
{ "exit_external_request", VCPU_STAT(exit_external_request) },
{ "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
{ "exit_instruction", VCPU_STAT(exit_instruction) },
{ "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
{ "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
{ "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
{ "instruction_lctl", VCPU_STAT(instruction_lctl) },
{ "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
{ "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
{ "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
{ "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
{ "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
{ "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
{ "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
{ "exit_wait_state", VCPU_STAT(exit_wait_state) },
{ "instruction_stidp", VCPU_STAT(instruction_stidp) },
{ "instruction_spx", VCPU_STAT(instruction_spx) },
{ "instruction_stpx", VCPU_STAT(instruction_stpx) },
{ "instruction_stap", VCPU_STAT(instruction_stap) },
{ "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
{ "instruction_stsch", VCPU_STAT(instruction_stsch) },
{ "instruction_chsc", VCPU_STAT(instruction_chsc) },
{ "instruction_stsi", VCPU_STAT(instruction_stsi) },
{ "instruction_stfl", VCPU_STAT(instruction_stfl) },
{ "instruction_tprot", VCPU_STAT(instruction_tprot) },
{ "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
{ "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
{ "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
{ "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
{ "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
{ "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
{ "diagnose_44", VCPU_STAT(diagnose_44) },
{ NULL }
};
static unsigned long long *facilities;
/* Section: not file related */
int kvm_arch_hardware_enable(void *garbage)
{
/* every s390 is virtualization enabled ;-) */
return 0;
}
void kvm_arch_hardware_disable(void *garbage)
{
}
int kvm_arch_hardware_setup(void)
{
return 0;
}
void kvm_arch_hardware_unsetup(void)
{
}
void kvm_arch_check_processor_compat(void *rtn)
{
}
int kvm_arch_init(void *opaque)
{
return 0;
}
void kvm_arch_exit(void)
{
}
/* Section: device related */
long kvm_arch_dev_ioctl(struct file *filp,
unsigned int ioctl, unsigned long arg)
{
if (ioctl == KVM_S390_ENABLE_SIE)
return s390_enable_sie();
return -EINVAL;
}
int kvm_dev_ioctl_check_extension(long ext)
{
int r;
switch (ext) {
case KVM_CAP_S390_PSW:
case KVM_CAP_S390_GMAP:
r = 1;
break;
default:
r = 0;
}
return r;
}
/* Section: vm related */
/*
* Get (and clear) the dirty memory log for a memory slot.
*/
int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
struct kvm_dirty_log *log)
{
return 0;
}
long kvm_arch_vm_ioctl(struct file *filp,
unsigned int ioctl, unsigned long arg)
{
struct kvm *kvm = filp->private_data;
void __user *argp = (void __user *)arg;
int r;
switch (ioctl) {
case KVM_S390_INTERRUPT: {
struct kvm_s390_interrupt s390int;
r = -EFAULT;
if (copy_from_user(&s390int, argp, sizeof(s390int)))
break;
r = kvm_s390_inject_vm(kvm, &s390int);
break;
}
default:
r = -ENOTTY;
}
return r;
}
int kvm_arch_init_vm(struct kvm *kvm)
{
int rc;
char debug_name[16];
rc = s390_enable_sie();
if (rc)
goto out_err;
kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL);
if (!kvm->arch.sca)
goto out_err;
sprintf(debug_name, "kvm-%u", current->pid);
kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long));
if (!kvm->arch.dbf)
goto out_nodbf;
spin_lock_init(&kvm->arch.float_int.lock);
INIT_LIST_HEAD(&kvm->arch.float_int.list);
debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
VM_EVENT(kvm, 3, "%s", "vm created");
kvm->arch.gmap = gmap_alloc(current->mm);
if (!kvm->arch.gmap)
goto out_nogmap;
return 0;
out_nogmap:
debug_unregister(kvm->arch.dbf);
out_nodbf:
free_page((unsigned long)(kvm->arch.sca));
out_err:
return rc;
}
void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
{
VCPU_EVENT(vcpu, 3, "%s", "free cpu");
clear_bit(63 - vcpu->vcpu_id, (unsigned long *) &vcpu->kvm->arch.sca->mcn);
if (vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda ==
(__u64) vcpu->arch.sie_block)
vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda = 0;
smp_mb();
free_page((unsigned long)(vcpu->arch.sie_block));
kvm_vcpu_uninit(vcpu);
kfree(vcpu);
}
static void kvm_free_vcpus(struct kvm *kvm)
{
unsigned int i;
struct kvm_vcpu *vcpu;
kvm_for_each_vcpu(i, vcpu, kvm)
kvm_arch_vcpu_destroy(vcpu);
mutex_lock(&kvm->lock);
for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
kvm->vcpus[i] = NULL;
atomic_set(&kvm->online_vcpus, 0);
mutex_unlock(&kvm->lock);
}
void kvm_arch_sync_events(struct kvm *kvm)
{
}
void kvm_arch_destroy_vm(struct kvm *kvm)
{
kvm_free_vcpus(kvm);
free_page((unsigned long)(kvm->arch.sca));
debug_unregister(kvm->arch.dbf);
gmap_free(kvm->arch.gmap);
}
/* Section: vcpu related */
int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
{
vcpu->arch.gmap = vcpu->kvm->arch.gmap;
return 0;
}
void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
{
/* Nothing todo */
}
void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
{
save_fp_regs(&vcpu->arch.host_fpregs);
save_access_regs(vcpu->arch.host_acrs);
vcpu->arch.guest_fpregs.fpc &= FPC_VALID_MASK;
restore_fp_regs(&vcpu->arch.guest_fpregs);
restore_access_regs(vcpu->arch.guest_acrs);
gmap_enable(vcpu->arch.gmap);
atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
}
void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
{
atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
gmap_disable(vcpu->arch.gmap);
save_fp_regs(&vcpu->arch.guest_fpregs);
save_access_regs(vcpu->arch.guest_acrs);
restore_fp_regs(&vcpu->arch.host_fpregs);
restore_access_regs(vcpu->arch.host_acrs);
}
static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
{
/* this equals initial cpu reset in pop, but we don't switch to ESA */
vcpu->arch.sie_block->gpsw.mask = 0UL;
vcpu->arch.sie_block->gpsw.addr = 0UL;
vcpu->arch.sie_block->prefix = 0UL;
vcpu->arch.sie_block->ihcpu = 0xffff;
vcpu->arch.sie_block->cputm = 0UL;
vcpu->arch.sie_block->ckc = 0UL;
vcpu->arch.sie_block->todpr = 0;
memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
vcpu->arch.sie_block->gcr[0] = 0xE0UL;
vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
vcpu->arch.guest_fpregs.fpc = 0;
asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc));
vcpu->arch.sie_block->gbea = 1;
}
int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
{
atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
CPUSTAT_SM |
CPUSTAT_STOPPED);
vcpu->arch.sie_block->ecb = 6;
vcpu->arch.sie_block->eca = 0xC1002001U;
vcpu->arch.sie_block->fac = (int) (long) facilities;
hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
tasklet_init(&vcpu->arch.tasklet, kvm_s390_tasklet,
(unsigned long) vcpu);
vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
get_cpu_id(&vcpu->arch.cpu_id);
vcpu->arch.cpu_id.version = 0xff;
return 0;
}
struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
unsigned int id)
{
struct kvm_vcpu *vcpu;
int rc = -EINVAL;
if (id >= KVM_MAX_VCPUS)
goto out;
rc = -ENOMEM;
vcpu = kzalloc(sizeof(struct kvm_vcpu), GFP_KERNEL);
if (!vcpu)
goto out;
vcpu->arch.sie_block = (struct kvm_s390_sie_block *)
get_zeroed_page(GFP_KERNEL);
if (!vcpu->arch.sie_block)
goto out_free_cpu;
vcpu->arch.sie_block->icpua = id;
BUG_ON(!kvm->arch.sca);
if (!kvm->arch.sca->cpu[id].sda)
kvm->arch.sca->cpu[id].sda = (__u64) vcpu->arch.sie_block;
vcpu->arch.sie_block->scaoh = (__u32)(((__u64)kvm->arch.sca) >> 32);
vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn);
spin_lock_init(&vcpu->arch.local_int.lock);
INIT_LIST_HEAD(&vcpu->arch.local_int.list);
vcpu->arch.local_int.float_int = &kvm->arch.float_int;
spin_lock(&kvm->arch.float_int.lock);
kvm->arch.float_int.local_int[id] = &vcpu->arch.local_int;
init_waitqueue_head(&vcpu->arch.local_int.wq);
vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
spin_unlock(&kvm->arch.float_int.lock);
rc = kvm_vcpu_init(vcpu, kvm, id);
if (rc)
goto out_free_sie_block;
VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu,
vcpu->arch.sie_block);
return vcpu;
out_free_sie_block:
free_page((unsigned long)(vcpu->arch.sie_block));
out_free_cpu:
kfree(vcpu);
out:
return ERR_PTR(rc);
}
int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
{
/* kvm common code refers to this, but never calls it */
BUG();
return 0;
}
static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
{
kvm_s390_vcpu_initial_reset(vcpu);
return 0;
}
int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
{
memcpy(&vcpu->arch.guest_gprs, ®s->gprs, sizeof(regs->gprs));
return 0;
}
int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
{
memcpy(®s->gprs, &vcpu->arch.guest_gprs, sizeof(regs->gprs));
return 0;
}
int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
struct kvm_sregs *sregs)
{
memcpy(&vcpu->arch.guest_acrs, &sregs->acrs, sizeof(sregs->acrs));
memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
return 0;
}
int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
struct kvm_sregs *sregs)
{
memcpy(&sregs->acrs, &vcpu->arch.guest_acrs, sizeof(sregs->acrs));
memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
return 0;
}
int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
{
memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
vcpu->arch.guest_fpregs.fpc = fpu->fpc;
return 0;
}
int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
{
memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs));
fpu->fpc = vcpu->arch.guest_fpregs.fpc;
return 0;
}
static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
{
int rc = 0;
if (!(atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_STOPPED))
rc = -EBUSY;
else {
vcpu->run->psw_mask = psw.mask;
vcpu->run->psw_addr = psw.addr;
}
return rc;
}
int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
struct kvm_translation *tr)
{
return -EINVAL; /* not implemented yet */
}
int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
struct kvm_guest_debug *dbg)
{
return -EINVAL; /* not implemented yet */
}
int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
struct kvm_mp_state *mp_state)
{
return -EINVAL; /* not implemented yet */
}
int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
struct kvm_mp_state *mp_state)
{
return -EINVAL; /* not implemented yet */
}
static void __vcpu_run(struct kvm_vcpu *vcpu)
{
memcpy(&vcpu->arch.sie_block->gg14, &vcpu->arch.guest_gprs[14], 16);
if (need_resched())
schedule();
if (test_thread_flag(TIF_MCCK_PENDING))
s390_handle_mcck();
kvm_s390_deliver_pending_interrupts(vcpu);
vcpu->arch.sie_block->icptcode = 0;
local_irq_disable();
kvm_guest_enter();
local_irq_enable();
VCPU_EVENT(vcpu, 6, "entering sie flags %x",
atomic_read(&vcpu->arch.sie_block->cpuflags));
if (sie64a(vcpu->arch.sie_block, vcpu->arch.guest_gprs)) {
VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
}
VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
vcpu->arch.sie_block->icptcode);
local_irq_disable();
kvm_guest_exit();
local_irq_enable();
memcpy(&vcpu->arch.guest_gprs[14], &vcpu->arch.sie_block->gg14, 16);
}
int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
{
int rc;
sigset_t sigsaved;
rerun_vcpu:
if (vcpu->sigset_active)
sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
BUG_ON(vcpu->kvm->arch.float_int.local_int[vcpu->vcpu_id] == NULL);
switch (kvm_run->exit_reason) {
case KVM_EXIT_S390_SIEIC:
case KVM_EXIT_UNKNOWN:
case KVM_EXIT_INTR:
case KVM_EXIT_S390_RESET:
break;
default:
BUG();
}
vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
might_fault();
do {
__vcpu_run(vcpu);
rc = kvm_handle_sie_intercept(vcpu);
} while (!signal_pending(current) && !rc);
if (rc == SIE_INTERCEPT_RERUNVCPU)
goto rerun_vcpu;
if (signal_pending(current) && !rc) {
kvm_run->exit_reason = KVM_EXIT_INTR;
rc = -EINTR;
}
if (rc == -EOPNOTSUPP) {
/* intercept cannot be handled in-kernel, prepare kvm-run */
kvm_run->exit_reason = KVM_EXIT_S390_SIEIC;
kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
kvm_run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
kvm_run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
rc = 0;
}
if (rc == -EREMOTE) {
/* intercept was handled, but userspace support is needed
* kvm_run has been prepared by the handler */
rc = 0;
}
kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
if (vcpu->sigset_active)
sigprocmask(SIG_SETMASK, &sigsaved, NULL);
vcpu->stat.exit_userspace++;
return rc;
}
static int __guestcopy(struct kvm_vcpu *vcpu, u64 guestdest, void *from,
unsigned long n, int prefix)
{
if (prefix)
return copy_to_guest(vcpu, guestdest, from, n);
else
return copy_to_guest_absolute(vcpu, guestdest, from, n);
}
/*
* store status at address
* we use have two special cases:
* KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
* KVM_S390_STORE_STATUS_PREFIXED: -> prefix
*/
int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
{
unsigned char archmode = 1;
int prefix;
if (addr == KVM_S390_STORE_STATUS_NOADDR) {
if (copy_to_guest_absolute(vcpu, 163ul, &archmode, 1))
return -EFAULT;
addr = SAVE_AREA_BASE;
prefix = 0;
} else if (addr == KVM_S390_STORE_STATUS_PREFIXED) {
if (copy_to_guest(vcpu, 163ul, &archmode, 1))
return -EFAULT;
addr = SAVE_AREA_BASE;
prefix = 1;
} else
prefix = 0;
if (__guestcopy(vcpu, addr + offsetof(struct save_area, fp_regs),
vcpu->arch.guest_fpregs.fprs, 128, prefix))
return -EFAULT;
if (__guestcopy(vcpu, addr + offsetof(struct save_area, gp_regs),
vcpu->arch.guest_gprs, 128, prefix))
return -EFAULT;
if (__guestcopy(vcpu, addr + offsetof(struct save_area, psw),
&vcpu->arch.sie_block->gpsw, 16, prefix))
return -EFAULT;
if (__guestcopy(vcpu, addr + offsetof(struct save_area, pref_reg),
&vcpu->arch.sie_block->prefix, 4, prefix))
return -EFAULT;
if (__guestcopy(vcpu,
addr + offsetof(struct save_area, fp_ctrl_reg),
&vcpu->arch.guest_fpregs.fpc, 4, prefix))
return -EFAULT;
if (__guestcopy(vcpu, addr + offsetof(struct save_area, tod_reg),
&vcpu->arch.sie_block->todpr, 4, prefix))
return -EFAULT;
if (__guestcopy(vcpu, addr + offsetof(struct save_area, timer),
&vcpu->arch.sie_block->cputm, 8, prefix))
return -EFAULT;
if (__guestcopy(vcpu, addr + offsetof(struct save_area, clk_cmp),
&vcpu->arch.sie_block->ckc, 8, prefix))
return -EFAULT;
if (__guestcopy(vcpu, addr + offsetof(struct save_area, acc_regs),
&vcpu->arch.guest_acrs, 64, prefix))
return -EFAULT;
if (__guestcopy(vcpu,
addr + offsetof(struct save_area, ctrl_regs),
&vcpu->arch.sie_block->gcr, 128, prefix))
return -EFAULT;
return 0;
}
long kvm_arch_vcpu_ioctl(struct file *filp,
unsigned int ioctl, unsigned long arg)
{
struct kvm_vcpu *vcpu = filp->private_data;
void __user *argp = (void __user *)arg;
long r;
switch (ioctl) {
case KVM_S390_INTERRUPT: {
struct kvm_s390_interrupt s390int;
r = -EFAULT;
if (copy_from_user(&s390int, argp, sizeof(s390int)))
break;
r = kvm_s390_inject_vcpu(vcpu, &s390int);
break;
}
case KVM_S390_STORE_STATUS:
r = kvm_s390_vcpu_store_status(vcpu, arg);
break;
case KVM_S390_SET_INITIAL_PSW: {
psw_t psw;
r = -EFAULT;
if (copy_from_user(&psw, argp, sizeof(psw)))
break;
r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
break;
}
case KVM_S390_INITIAL_RESET:
r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
break;
default:
r = -EINVAL;
}
return r;
}
/* Section: memory related */
int kvm_arch_prepare_memory_region(struct kvm *kvm,
struct kvm_memory_slot *memslot,
struct kvm_memory_slot old,
struct kvm_userspace_memory_region *mem,
int user_alloc)
{
/* A few sanity checks. We can have exactly one memory slot which has
to start at guest virtual zero and which has to be located at a
page boundary in userland and which has to end at a page boundary.
The memory in userland is ok to be fragmented into various different
vmas. It is okay to mmap() and munmap() stuff in this slot after
doing this call at any time */
if (mem->slot)
return -EINVAL;
if (mem->guest_phys_addr)
return -EINVAL;
if (mem->userspace_addr & 0xffffful)
return -EINVAL;
if (mem->memory_size & 0xffffful)
return -EINVAL;
if (!user_alloc)
return -EINVAL;
return 0;
}
void kvm_arch_commit_memory_region(struct kvm *kvm,
struct kvm_userspace_memory_region *mem,
struct kvm_memory_slot old,
int user_alloc)
{
int rc;
rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
mem->guest_phys_addr, mem->memory_size);
if (rc)
printk(KERN_WARNING "kvm-s390: failed to commit memory region\n");
return;
}
void kvm_arch_flush_shadow(struct kvm *kvm)
{
}
static int __init kvm_s390_init(void)
{
int ret;
ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
if (ret)
return ret;
/*
* guests can ask for up to 255+1 double words, we need a full page
* to hold the maximum amount of facilities. On the other hand, we
* only set facilities that are known to work in KVM.
*/
facilities = (unsigned long long *) get_zeroed_page(GFP_KERNEL|GFP_DMA);
if (!facilities) {
kvm_exit();
return -ENOMEM;
}
memcpy(facilities, S390_lowcore.stfle_fac_list, 16);
facilities[0] &= 0xff00fff3f47c0000ULL;
facilities[1] &= 0x201c000000000000ULL;
return 0;
}
static void __exit kvm_s390_exit(void)
{
free_page((unsigned long) facilities);
kvm_exit();
}
module_init(kvm_s390_init);
module_exit(kvm_s390_exit);
| gpl-2.0 |
turtlepa/android_kernel_samsung_aries-old | drivers/usb/atm/cxacru.c | 600 | 40130 | /******************************************************************************
* cxacru.c - driver for USB ADSL modems based on
* Conexant AccessRunner chipset
*
* Copyright (C) 2004 David Woodhouse, Duncan Sands, Roman Kagan
* Copyright (C) 2005 Duncan Sands, Roman Kagan (rkagan % mail ! ru)
* Copyright (C) 2007 Simon Arlott
* Copyright (C) 2009 Simon Arlott
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc., 59
* Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*
******************************************************************************/
/*
* Credit is due for Josep Comas, who created the original patch to speedtch.c
* to support the different padding used by the AccessRunner (now generalized
* into usbatm), and the userspace firmware loading utility.
*/
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/kernel.h>
#include <linux/timer.h>
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/device.h>
#include <linux/firmware.h>
#include <linux/mutex.h>
#include <asm/unaligned.h>
#include "usbatm.h"
#define DRIVER_AUTHOR "Roman Kagan, David Woodhouse, Duncan Sands, Simon Arlott"
#define DRIVER_VERSION "0.4"
#define DRIVER_DESC "Conexant AccessRunner ADSL USB modem driver"
static const char cxacru_driver_name[] = "cxacru";
#define CXACRU_EP_CMD 0x01 /* Bulk/interrupt in/out */
#define CXACRU_EP_DATA 0x02 /* Bulk in/out */
#define CMD_PACKET_SIZE 64 /* Should be maxpacket(ep)? */
#define CMD_MAX_CONFIG ((CMD_PACKET_SIZE / 4 - 1) / 2)
/* Addresses */
#define PLLFCLK_ADDR 0x00350068
#define PLLBCLK_ADDR 0x0035006c
#define SDRAMEN_ADDR 0x00350010
#define FW_ADDR 0x00801000
#define BR_ADDR 0x00180600
#define SIG_ADDR 0x00180500
#define BR_STACK_ADDR 0x00187f10
/* Values */
#define SDRAM_ENA 0x1
#define CMD_TIMEOUT 2000 /* msecs */
#define POLL_INTERVAL 1 /* secs */
/* commands for interaction with the modem through the control channel before
* firmware is loaded */
enum cxacru_fw_request {
FW_CMD_ERR,
FW_GET_VER,
FW_READ_MEM,
FW_WRITE_MEM,
FW_RMW_MEM,
FW_CHECKSUM_MEM,
FW_GOTO_MEM,
};
/* commands for interaction with the modem through the control channel once
* firmware is loaded */
enum cxacru_cm_request {
CM_REQUEST_UNDEFINED = 0x80,
CM_REQUEST_TEST,
CM_REQUEST_CHIP_GET_MAC_ADDRESS,
CM_REQUEST_CHIP_GET_DP_VERSIONS,
CM_REQUEST_CHIP_ADSL_LINE_START,
CM_REQUEST_CHIP_ADSL_LINE_STOP,
CM_REQUEST_CHIP_ADSL_LINE_GET_STATUS,
CM_REQUEST_CHIP_ADSL_LINE_GET_SPEED,
CM_REQUEST_CARD_INFO_GET,
CM_REQUEST_CARD_DATA_GET,
CM_REQUEST_CARD_DATA_SET,
CM_REQUEST_COMMAND_HW_IO,
CM_REQUEST_INTERFACE_HW_IO,
CM_REQUEST_CARD_SERIAL_DATA_PATH_GET,
CM_REQUEST_CARD_SERIAL_DATA_PATH_SET,
CM_REQUEST_CARD_CONTROLLER_VERSION_GET,
CM_REQUEST_CARD_GET_STATUS,
CM_REQUEST_CARD_GET_MAC_ADDRESS,
CM_REQUEST_CARD_GET_DATA_LINK_STATUS,
CM_REQUEST_MAX,
};
/* commands for interaction with the flash memory
*
* read: response is the contents of the first 60 bytes of flash memory
* write: request contains the 60 bytes of data to write to flash memory
* response is the contents of the first 60 bytes of flash memory
*
* layout: PP PP VV VV MM MM MM MM MM MM ?? ?? SS SS SS SS SS SS SS SS
* SS SS SS SS SS SS SS SS 00 00 00 00 00 00 00 00 00 00 00 00
* 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
*
* P: le16 USB Product ID
* V: le16 USB Vendor ID
* M: be48 MAC Address
* S: le16 ASCII Serial Number
*/
enum cxacru_cm_flash {
CM_FLASH_READ = 0xa1,
CM_FLASH_WRITE = 0xa2
};
/* reply codes to the commands above */
enum cxacru_cm_status {
CM_STATUS_UNDEFINED,
CM_STATUS_SUCCESS,
CM_STATUS_ERROR,
CM_STATUS_UNSUPPORTED,
CM_STATUS_UNIMPLEMENTED,
CM_STATUS_PARAMETER_ERROR,
CM_STATUS_DBG_LOOPBACK,
CM_STATUS_MAX,
};
/* indices into CARD_INFO_GET return array */
enum cxacru_info_idx {
CXINF_DOWNSTREAM_RATE,
CXINF_UPSTREAM_RATE,
CXINF_LINK_STATUS,
CXINF_LINE_STATUS,
CXINF_MAC_ADDRESS_HIGH,
CXINF_MAC_ADDRESS_LOW,
CXINF_UPSTREAM_SNR_MARGIN,
CXINF_DOWNSTREAM_SNR_MARGIN,
CXINF_UPSTREAM_ATTENUATION,
CXINF_DOWNSTREAM_ATTENUATION,
CXINF_TRANSMITTER_POWER,
CXINF_UPSTREAM_BITS_PER_FRAME,
CXINF_DOWNSTREAM_BITS_PER_FRAME,
CXINF_STARTUP_ATTEMPTS,
CXINF_UPSTREAM_CRC_ERRORS,
CXINF_DOWNSTREAM_CRC_ERRORS,
CXINF_UPSTREAM_FEC_ERRORS,
CXINF_DOWNSTREAM_FEC_ERRORS,
CXINF_UPSTREAM_HEC_ERRORS,
CXINF_DOWNSTREAM_HEC_ERRORS,
CXINF_LINE_STARTABLE,
CXINF_MODULATION,
CXINF_ADSL_HEADEND,
CXINF_ADSL_HEADEND_ENVIRONMENT,
CXINF_CONTROLLER_VERSION,
/* dunno what the missing two mean */
CXINF_MAX = 0x1c,
};
enum cxacru_poll_state {
CXPOLL_STOPPING,
CXPOLL_STOPPED,
CXPOLL_POLLING,
CXPOLL_SHUTDOWN
};
struct cxacru_modem_type {
u32 pll_f_clk;
u32 pll_b_clk;
int boot_rom_patch;
};
struct cxacru_data {
struct usbatm_data *usbatm;
const struct cxacru_modem_type *modem_type;
int line_status;
struct mutex adsl_state_serialize;
int adsl_status;
struct delayed_work poll_work;
u32 card_info[CXINF_MAX];
struct mutex poll_state_serialize;
enum cxacru_poll_state poll_state;
/* contol handles */
struct mutex cm_serialize;
u8 *rcv_buf;
u8 *snd_buf;
struct urb *rcv_urb;
struct urb *snd_urb;
struct completion rcv_done;
struct completion snd_done;
};
static int cxacru_cm(struct cxacru_data *instance, enum cxacru_cm_request cm,
u8 *wdata, int wsize, u8 *rdata, int rsize);
static void cxacru_poll_status(struct work_struct *work);
/* Card info exported through sysfs */
#define CXACRU__ATTR_INIT(_name) \
static DEVICE_ATTR(_name, S_IRUGO, cxacru_sysfs_show_##_name, NULL)
#define CXACRU_CMD_INIT(_name) \
static DEVICE_ATTR(_name, S_IWUSR | S_IRUGO, \
cxacru_sysfs_show_##_name, cxacru_sysfs_store_##_name)
#define CXACRU_SET_INIT(_name) \
static DEVICE_ATTR(_name, S_IWUSR, \
NULL, cxacru_sysfs_store_##_name)
#define CXACRU_ATTR_INIT(_value, _type, _name) \
static ssize_t cxacru_sysfs_show_##_name(struct device *dev, \
struct device_attribute *attr, char *buf) \
{ \
struct cxacru_data *instance = to_usbatm_driver_data(\
to_usb_interface(dev)); \
\
if (instance == NULL) \
return -ENODEV; \
\
return cxacru_sysfs_showattr_##_type(instance->card_info[_value], buf); \
} \
CXACRU__ATTR_INIT(_name)
#define CXACRU_ATTR_CREATE(_v, _t, _name) CXACRU_DEVICE_CREATE_FILE(_name)
#define CXACRU_CMD_CREATE(_name) CXACRU_DEVICE_CREATE_FILE(_name)
#define CXACRU_SET_CREATE(_name) CXACRU_DEVICE_CREATE_FILE(_name)
#define CXACRU__ATTR_CREATE(_name) CXACRU_DEVICE_CREATE_FILE(_name)
#define CXACRU_ATTR_REMOVE(_v, _t, _name) CXACRU_DEVICE_REMOVE_FILE(_name)
#define CXACRU_CMD_REMOVE(_name) CXACRU_DEVICE_REMOVE_FILE(_name)
#define CXACRU_SET_REMOVE(_name) CXACRU_DEVICE_REMOVE_FILE(_name)
#define CXACRU__ATTR_REMOVE(_name) CXACRU_DEVICE_REMOVE_FILE(_name)
static ssize_t cxacru_sysfs_showattr_u32(u32 value, char *buf)
{
return snprintf(buf, PAGE_SIZE, "%u\n", value);
}
static ssize_t cxacru_sysfs_showattr_s8(s8 value, char *buf)
{
return snprintf(buf, PAGE_SIZE, "%d\n", value);
}
static ssize_t cxacru_sysfs_showattr_dB(s16 value, char *buf)
{
if (likely(value >= 0)) {
return snprintf(buf, PAGE_SIZE, "%u.%02u\n",
value / 100, value % 100);
} else {
value = -value;
return snprintf(buf, PAGE_SIZE, "-%u.%02u\n",
value / 100, value % 100);
}
}
static ssize_t cxacru_sysfs_showattr_bool(u32 value, char *buf)
{
static char *str[] = { "no", "yes" };
if (unlikely(value >= ARRAY_SIZE(str)))
return snprintf(buf, PAGE_SIZE, "%u\n", value);
return snprintf(buf, PAGE_SIZE, "%s\n", str[value]);
}
static ssize_t cxacru_sysfs_showattr_LINK(u32 value, char *buf)
{
static char *str[] = { NULL, "not connected", "connected", "lost" };
if (unlikely(value >= ARRAY_SIZE(str) || str[value] == NULL))
return snprintf(buf, PAGE_SIZE, "%u\n", value);
return snprintf(buf, PAGE_SIZE, "%s\n", str[value]);
}
static ssize_t cxacru_sysfs_showattr_LINE(u32 value, char *buf)
{
static char *str[] = { "down", "attempting to activate",
"training", "channel analysis", "exchange", "up",
"waiting", "initialising"
};
if (unlikely(value >= ARRAY_SIZE(str)))
return snprintf(buf, PAGE_SIZE, "%u\n", value);
return snprintf(buf, PAGE_SIZE, "%s\n", str[value]);
}
static ssize_t cxacru_sysfs_showattr_MODU(u32 value, char *buf)
{
static char *str[] = {
"",
"ANSI T1.413",
"ITU-T G.992.1 (G.DMT)",
"ITU-T G.992.2 (G.LITE)"
};
if (unlikely(value >= ARRAY_SIZE(str)))
return snprintf(buf, PAGE_SIZE, "%u\n", value);
return snprintf(buf, PAGE_SIZE, "%s\n", str[value]);
}
/*
* This could use MAC_ADDRESS_HIGH and MAC_ADDRESS_LOW, but since
* this data is already in atm_dev there's no point.
*
* MAC_ADDRESS_HIGH = 0x????5544
* MAC_ADDRESS_LOW = 0x33221100
* Where 00-55 are bytes 0-5 of the MAC.
*/
static ssize_t cxacru_sysfs_show_mac_address(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct cxacru_data *instance = to_usbatm_driver_data(
to_usb_interface(dev));
if (instance == NULL || instance->usbatm->atm_dev == NULL)
return -ENODEV;
return snprintf(buf, PAGE_SIZE, "%pM\n",
instance->usbatm->atm_dev->esi);
}
static ssize_t cxacru_sysfs_show_adsl_state(struct device *dev,
struct device_attribute *attr, char *buf)
{
static char *str[] = { "running", "stopped" };
struct cxacru_data *instance = to_usbatm_driver_data(
to_usb_interface(dev));
u32 value;
if (instance == NULL)
return -ENODEV;
value = instance->card_info[CXINF_LINE_STARTABLE];
if (unlikely(value >= ARRAY_SIZE(str)))
return snprintf(buf, PAGE_SIZE, "%u\n", value);
return snprintf(buf, PAGE_SIZE, "%s\n", str[value]);
}
static ssize_t cxacru_sysfs_store_adsl_state(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct cxacru_data *instance = to_usbatm_driver_data(
to_usb_interface(dev));
int ret;
int poll = -1;
char str_cmd[8];
int len = strlen(buf);
if (!capable(CAP_NET_ADMIN))
return -EACCES;
ret = sscanf(buf, "%7s", str_cmd);
if (ret != 1)
return -EINVAL;
ret = 0;
if (instance == NULL)
return -ENODEV;
if (mutex_lock_interruptible(&instance->adsl_state_serialize))
return -ERESTARTSYS;
if (!strcmp(str_cmd, "stop") || !strcmp(str_cmd, "restart")) {
ret = cxacru_cm(instance, CM_REQUEST_CHIP_ADSL_LINE_STOP, NULL, 0, NULL, 0);
if (ret < 0) {
atm_err(instance->usbatm, "change adsl state:"
" CHIP_ADSL_LINE_STOP returned %d\n", ret);
ret = -EIO;
} else {
ret = len;
poll = CXPOLL_STOPPED;
}
}
/* Line status is only updated every second
* and the device appears to only react to
* START/STOP every second too. Wait 1.5s to
* be sure that restart will have an effect. */
if (!strcmp(str_cmd, "restart"))
msleep(1500);
if (!strcmp(str_cmd, "start") || !strcmp(str_cmd, "restart")) {
ret = cxacru_cm(instance, CM_REQUEST_CHIP_ADSL_LINE_START, NULL, 0, NULL, 0);
if (ret < 0) {
atm_err(instance->usbatm, "change adsl state:"
" CHIP_ADSL_LINE_START returned %d\n", ret);
ret = -EIO;
} else {
ret = len;
poll = CXPOLL_POLLING;
}
}
if (!strcmp(str_cmd, "poll")) {
ret = len;
poll = CXPOLL_POLLING;
}
if (ret == 0) {
ret = -EINVAL;
poll = -1;
}
if (poll == CXPOLL_POLLING) {
mutex_lock(&instance->poll_state_serialize);
switch (instance->poll_state) {
case CXPOLL_STOPPED:
/* start polling */
instance->poll_state = CXPOLL_POLLING;
break;
case CXPOLL_STOPPING:
/* abort stop request */
instance->poll_state = CXPOLL_POLLING;
case CXPOLL_POLLING:
case CXPOLL_SHUTDOWN:
/* don't start polling */
poll = -1;
}
mutex_unlock(&instance->poll_state_serialize);
} else if (poll == CXPOLL_STOPPED) {
mutex_lock(&instance->poll_state_serialize);
/* request stop */
if (instance->poll_state == CXPOLL_POLLING)
instance->poll_state = CXPOLL_STOPPING;
mutex_unlock(&instance->poll_state_serialize);
}
mutex_unlock(&instance->adsl_state_serialize);
if (poll == CXPOLL_POLLING)
cxacru_poll_status(&instance->poll_work.work);
return ret;
}
/* CM_REQUEST_CARD_DATA_GET times out, so no show attribute */
static ssize_t cxacru_sysfs_store_adsl_config(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct cxacru_data *instance = to_usbatm_driver_data(
to_usb_interface(dev));
int len = strlen(buf);
int ret, pos, num;
__le32 data[CMD_PACKET_SIZE / 4];
if (!capable(CAP_NET_ADMIN))
return -EACCES;
if (instance == NULL)
return -ENODEV;
pos = 0;
num = 0;
while (pos < len) {
int tmp;
u32 index;
u32 value;
ret = sscanf(buf + pos, "%x=%x%n", &index, &value, &tmp);
if (ret < 2)
return -EINVAL;
if (index < 0 || index > 0x7f)
return -EINVAL;
pos += tmp;
/* skip trailing newline */
if (buf[pos] == '\n' && pos == len-1)
pos++;
data[num * 2 + 1] = cpu_to_le32(index);
data[num * 2 + 2] = cpu_to_le32(value);
num++;
/* send config values when data buffer is full
* or no more data
*/
if (pos >= len || num >= CMD_MAX_CONFIG) {
char log[CMD_MAX_CONFIG * 12 + 1]; /* %02x=%08x */
data[0] = cpu_to_le32(num);
ret = cxacru_cm(instance, CM_REQUEST_CARD_DATA_SET,
(u8 *) data, 4 + num * 8, NULL, 0);
if (ret < 0) {
atm_err(instance->usbatm,
"set card data returned %d\n", ret);
return -EIO;
}
for (tmp = 0; tmp < num; tmp++)
snprintf(log + tmp*12, 13, " %02x=%08x",
le32_to_cpu(data[tmp * 2 + 1]),
le32_to_cpu(data[tmp * 2 + 2]));
atm_info(instance->usbatm, "config%s\n", log);
num = 0;
}
}
return len;
}
/*
* All device attributes are included in CXACRU_ALL_FILES
* so that the same list can be used multiple times:
* INIT (define the device attributes)
* CREATE (create all the device files)
* REMOVE (remove all the device files)
*
* With the last two being defined as needed in the functions
* they are used in before calling CXACRU_ALL_FILES()
*/
#define CXACRU_ALL_FILES(_action) \
CXACRU_ATTR_##_action(CXINF_DOWNSTREAM_RATE, u32, downstream_rate); \
CXACRU_ATTR_##_action(CXINF_UPSTREAM_RATE, u32, upstream_rate); \
CXACRU_ATTR_##_action(CXINF_LINK_STATUS, LINK, link_status); \
CXACRU_ATTR_##_action(CXINF_LINE_STATUS, LINE, line_status); \
CXACRU__ATTR_##_action( mac_address); \
CXACRU_ATTR_##_action(CXINF_UPSTREAM_SNR_MARGIN, dB, upstream_snr_margin); \
CXACRU_ATTR_##_action(CXINF_DOWNSTREAM_SNR_MARGIN, dB, downstream_snr_margin); \
CXACRU_ATTR_##_action(CXINF_UPSTREAM_ATTENUATION, dB, upstream_attenuation); \
CXACRU_ATTR_##_action(CXINF_DOWNSTREAM_ATTENUATION, dB, downstream_attenuation); \
CXACRU_ATTR_##_action(CXINF_TRANSMITTER_POWER, s8, transmitter_power); \
CXACRU_ATTR_##_action(CXINF_UPSTREAM_BITS_PER_FRAME, u32, upstream_bits_per_frame); \
CXACRU_ATTR_##_action(CXINF_DOWNSTREAM_BITS_PER_FRAME, u32, downstream_bits_per_frame); \
CXACRU_ATTR_##_action(CXINF_STARTUP_ATTEMPTS, u32, startup_attempts); \
CXACRU_ATTR_##_action(CXINF_UPSTREAM_CRC_ERRORS, u32, upstream_crc_errors); \
CXACRU_ATTR_##_action(CXINF_DOWNSTREAM_CRC_ERRORS, u32, downstream_crc_errors); \
CXACRU_ATTR_##_action(CXINF_UPSTREAM_FEC_ERRORS, u32, upstream_fec_errors); \
CXACRU_ATTR_##_action(CXINF_DOWNSTREAM_FEC_ERRORS, u32, downstream_fec_errors); \
CXACRU_ATTR_##_action(CXINF_UPSTREAM_HEC_ERRORS, u32, upstream_hec_errors); \
CXACRU_ATTR_##_action(CXINF_DOWNSTREAM_HEC_ERRORS, u32, downstream_hec_errors); \
CXACRU_ATTR_##_action(CXINF_LINE_STARTABLE, bool, line_startable); \
CXACRU_ATTR_##_action(CXINF_MODULATION, MODU, modulation); \
CXACRU_ATTR_##_action(CXINF_ADSL_HEADEND, u32, adsl_headend); \
CXACRU_ATTR_##_action(CXINF_ADSL_HEADEND_ENVIRONMENT, u32, adsl_headend_environment); \
CXACRU_ATTR_##_action(CXINF_CONTROLLER_VERSION, u32, adsl_controller_version); \
CXACRU_CMD_##_action( adsl_state); \
CXACRU_SET_##_action( adsl_config);
CXACRU_ALL_FILES(INIT);
/* the following three functions are stolen from drivers/usb/core/message.c */
static void cxacru_blocking_completion(struct urb *urb)
{
complete(urb->context);
}
static void cxacru_timeout_kill(unsigned long data)
{
usb_unlink_urb((struct urb *) data);
}
static int cxacru_start_wait_urb(struct urb *urb, struct completion *done,
int *actual_length)
{
struct timer_list timer;
init_timer(&timer);
timer.expires = jiffies + msecs_to_jiffies(CMD_TIMEOUT);
timer.data = (unsigned long) urb;
timer.function = cxacru_timeout_kill;
add_timer(&timer);
wait_for_completion(done);
del_timer_sync(&timer);
if (actual_length)
*actual_length = urb->actual_length;
return urb->status; /* must read status after completion */
}
static int cxacru_cm(struct cxacru_data *instance, enum cxacru_cm_request cm,
u8 *wdata, int wsize, u8 *rdata, int rsize)
{
int ret, actlen;
int offb, offd;
const int stride = CMD_PACKET_SIZE - 4;
u8 *wbuf = instance->snd_buf;
u8 *rbuf = instance->rcv_buf;
int wbuflen = ((wsize - 1) / stride + 1) * CMD_PACKET_SIZE;
int rbuflen = ((rsize - 1) / stride + 1) * CMD_PACKET_SIZE;
if (wbuflen > PAGE_SIZE || rbuflen > PAGE_SIZE) {
if (printk_ratelimit())
usb_err(instance->usbatm, "requested transfer size too large (%d, %d)\n",
wbuflen, rbuflen);
ret = -ENOMEM;
goto err;
}
mutex_lock(&instance->cm_serialize);
/* submit reading urb before the writing one */
init_completion(&instance->rcv_done);
ret = usb_submit_urb(instance->rcv_urb, GFP_KERNEL);
if (ret < 0) {
if (printk_ratelimit())
usb_err(instance->usbatm, "submit of read urb for cm %#x failed (%d)\n",
cm, ret);
goto fail;
}
memset(wbuf, 0, wbuflen);
/* handle wsize == 0 */
wbuf[0] = cm;
for (offb = offd = 0; offd < wsize; offd += stride, offb += CMD_PACKET_SIZE) {
wbuf[offb] = cm;
memcpy(wbuf + offb + 4, wdata + offd, min_t(int, stride, wsize - offd));
}
instance->snd_urb->transfer_buffer_length = wbuflen;
init_completion(&instance->snd_done);
ret = usb_submit_urb(instance->snd_urb, GFP_KERNEL);
if (ret < 0) {
if (printk_ratelimit())
usb_err(instance->usbatm, "submit of write urb for cm %#x failed (%d)\n",
cm, ret);
goto fail;
}
ret = cxacru_start_wait_urb(instance->snd_urb, &instance->snd_done, NULL);
if (ret < 0) {
if (printk_ratelimit())
usb_err(instance->usbatm, "send of cm %#x failed (%d)\n", cm, ret);
goto fail;
}
ret = cxacru_start_wait_urb(instance->rcv_urb, &instance->rcv_done, &actlen);
if (ret < 0) {
if (printk_ratelimit())
usb_err(instance->usbatm, "receive of cm %#x failed (%d)\n", cm, ret);
goto fail;
}
if (actlen % CMD_PACKET_SIZE || !actlen) {
if (printk_ratelimit())
usb_err(instance->usbatm, "invalid response length to cm %#x: %d\n",
cm, actlen);
ret = -EIO;
goto fail;
}
/* check the return status and copy the data to the output buffer, if needed */
for (offb = offd = 0; offd < rsize && offb < actlen; offb += CMD_PACKET_SIZE) {
if (rbuf[offb] != cm) {
if (printk_ratelimit())
usb_err(instance->usbatm, "wrong cm %#x in response to cm %#x\n",
rbuf[offb], cm);
ret = -EIO;
goto fail;
}
if (rbuf[offb + 1] != CM_STATUS_SUCCESS) {
if (printk_ratelimit())
usb_err(instance->usbatm, "response to cm %#x failed: %#x\n",
cm, rbuf[offb + 1]);
ret = -EIO;
goto fail;
}
if (offd >= rsize)
break;
memcpy(rdata + offd, rbuf + offb + 4, min_t(int, stride, rsize - offd));
offd += stride;
}
ret = offd;
dbg("cm %#x", cm);
fail:
mutex_unlock(&instance->cm_serialize);
err:
return ret;
}
static int cxacru_cm_get_array(struct cxacru_data *instance, enum cxacru_cm_request cm,
u32 *data, int size)
{
int ret, len;
__le32 *buf;
int offb;
unsigned int offd;
const int stride = CMD_PACKET_SIZE / (4 * 2) - 1;
int buflen = ((size - 1) / stride + 1 + size * 2) * 4;
buf = kmalloc(buflen, GFP_KERNEL);
if (!buf)
return -ENOMEM;
ret = cxacru_cm(instance, cm, NULL, 0, (u8 *) buf, buflen);
if (ret < 0)
goto cleanup;
/* len > 0 && len % 4 == 0 guaranteed by cxacru_cm() */
len = ret / 4;
for (offb = 0; offb < len; ) {
int l = le32_to_cpu(buf[offb++]);
if (l < 0 || l > stride || l > (len - offb) / 2) {
if (printk_ratelimit())
usb_err(instance->usbatm, "invalid data length from cm %#x: %d\n",
cm, l);
ret = -EIO;
goto cleanup;
}
while (l--) {
offd = le32_to_cpu(buf[offb++]);
if (offd >= size) {
if (printk_ratelimit())
usb_err(instance->usbatm, "wrong index %#x in response to cm %#x\n",
offd, cm);
ret = -EIO;
goto cleanup;
}
data[offd] = le32_to_cpu(buf[offb++]);
}
}
ret = 0;
cleanup:
kfree(buf);
return ret;
}
static int cxacru_card_status(struct cxacru_data *instance)
{
int ret = cxacru_cm(instance, CM_REQUEST_CARD_GET_STATUS, NULL, 0, NULL, 0);
if (ret < 0) { /* firmware not loaded */
dbg("cxacru_adsl_start: CARD_GET_STATUS returned %d", ret);
return ret;
}
return 0;
}
static void cxacru_remove_device_files(struct usbatm_data *usbatm_instance,
struct atm_dev *atm_dev)
{
struct usb_interface *intf = usbatm_instance->usb_intf;
#define CXACRU_DEVICE_REMOVE_FILE(_name) \
device_remove_file(&intf->dev, &dev_attr_##_name);
CXACRU_ALL_FILES(REMOVE);
#undef CXACRU_DEVICE_REMOVE_FILE
}
static int cxacru_atm_start(struct usbatm_data *usbatm_instance,
struct atm_dev *atm_dev)
{
struct cxacru_data *instance = usbatm_instance->driver_data;
struct usb_interface *intf = usbatm_instance->usb_intf;
int ret;
int start_polling = 1;
dbg("cxacru_atm_start");
/* Read MAC address */
ret = cxacru_cm(instance, CM_REQUEST_CARD_GET_MAC_ADDRESS, NULL, 0,
atm_dev->esi, sizeof(atm_dev->esi));
if (ret < 0) {
atm_err(usbatm_instance, "cxacru_atm_start: CARD_GET_MAC_ADDRESS returned %d\n", ret);
return ret;
}
#define CXACRU_DEVICE_CREATE_FILE(_name) \
ret = device_create_file(&intf->dev, &dev_attr_##_name); \
if (unlikely(ret)) \
goto fail_sysfs;
CXACRU_ALL_FILES(CREATE);
#undef CXACRU_DEVICE_CREATE_FILE
/* start ADSL */
mutex_lock(&instance->adsl_state_serialize);
ret = cxacru_cm(instance, CM_REQUEST_CHIP_ADSL_LINE_START, NULL, 0, NULL, 0);
if (ret < 0)
atm_err(usbatm_instance, "cxacru_atm_start: CHIP_ADSL_LINE_START returned %d\n", ret);
/* Start status polling */
mutex_lock(&instance->poll_state_serialize);
switch (instance->poll_state) {
case CXPOLL_STOPPED:
/* start polling */
instance->poll_state = CXPOLL_POLLING;
break;
case CXPOLL_STOPPING:
/* abort stop request */
instance->poll_state = CXPOLL_POLLING;
case CXPOLL_POLLING:
case CXPOLL_SHUTDOWN:
/* don't start polling */
start_polling = 0;
}
mutex_unlock(&instance->poll_state_serialize);
mutex_unlock(&instance->adsl_state_serialize);
printk(KERN_INFO "%s%d: %s %pM\n", atm_dev->type, atm_dev->number,
usbatm_instance->description, atm_dev->esi);
if (start_polling)
cxacru_poll_status(&instance->poll_work.work);
return 0;
fail_sysfs:
usb_err(usbatm_instance, "cxacru_atm_start: device_create_file failed (%d)\n", ret);
cxacru_remove_device_files(usbatm_instance, atm_dev);
return ret;
}
static void cxacru_poll_status(struct work_struct *work)
{
struct cxacru_data *instance =
container_of(work, struct cxacru_data, poll_work.work);
u32 buf[CXINF_MAX] = {};
struct usbatm_data *usbatm = instance->usbatm;
struct atm_dev *atm_dev = usbatm->atm_dev;
int keep_polling = 1;
int ret;
ret = cxacru_cm_get_array(instance, CM_REQUEST_CARD_INFO_GET, buf, CXINF_MAX);
if (ret < 0) {
if (ret != -ESHUTDOWN)
atm_warn(usbatm, "poll status: error %d\n", ret);
mutex_lock(&instance->poll_state_serialize);
if (instance->poll_state != CXPOLL_SHUTDOWN) {
instance->poll_state = CXPOLL_STOPPED;
if (ret != -ESHUTDOWN)
atm_warn(usbatm, "polling disabled, set adsl_state"
" to 'start' or 'poll' to resume\n");
}
mutex_unlock(&instance->poll_state_serialize);
goto reschedule;
}
memcpy(instance->card_info, buf, sizeof(instance->card_info));
if (instance->adsl_status != buf[CXINF_LINE_STARTABLE]) {
instance->adsl_status = buf[CXINF_LINE_STARTABLE];
switch (instance->adsl_status) {
case 0:
atm_printk(KERN_INFO, usbatm, "ADSL state: running\n");
break;
case 1:
atm_printk(KERN_INFO, usbatm, "ADSL state: stopped\n");
break;
default:
atm_printk(KERN_INFO, usbatm, "Unknown adsl status %02x\n", instance->adsl_status);
break;
}
}
if (instance->line_status == buf[CXINF_LINE_STATUS])
goto reschedule;
instance->line_status = buf[CXINF_LINE_STATUS];
switch (instance->line_status) {
case 0:
atm_dev_signal_change(atm_dev, ATM_PHY_SIG_LOST);
atm_info(usbatm, "ADSL line: down\n");
break;
case 1:
atm_dev_signal_change(atm_dev, ATM_PHY_SIG_LOST);
atm_info(usbatm, "ADSL line: attempting to activate\n");
break;
case 2:
atm_dev_signal_change(atm_dev, ATM_PHY_SIG_LOST);
atm_info(usbatm, "ADSL line: training\n");
break;
case 3:
atm_dev_signal_change(atm_dev, ATM_PHY_SIG_LOST);
atm_info(usbatm, "ADSL line: channel analysis\n");
break;
case 4:
atm_dev_signal_change(atm_dev, ATM_PHY_SIG_LOST);
atm_info(usbatm, "ADSL line: exchange\n");
break;
case 5:
atm_dev->link_rate = buf[CXINF_DOWNSTREAM_RATE] * 1000 / 424;
atm_dev_signal_change(atm_dev, ATM_PHY_SIG_FOUND);
atm_info(usbatm, "ADSL line: up (%d kb/s down | %d kb/s up)\n",
buf[CXINF_DOWNSTREAM_RATE], buf[CXINF_UPSTREAM_RATE]);
break;
case 6:
atm_dev_signal_change(atm_dev, ATM_PHY_SIG_LOST);
atm_info(usbatm, "ADSL line: waiting\n");
break;
case 7:
atm_dev_signal_change(atm_dev, ATM_PHY_SIG_LOST);
atm_info(usbatm, "ADSL line: initializing\n");
break;
default:
atm_dev_signal_change(atm_dev, ATM_PHY_SIG_UNKNOWN);
atm_info(usbatm, "Unknown line state %02x\n", instance->line_status);
break;
}
reschedule:
mutex_lock(&instance->poll_state_serialize);
if (instance->poll_state == CXPOLL_STOPPING &&
instance->adsl_status == 1 && /* stopped */
instance->line_status == 0) /* down */
instance->poll_state = CXPOLL_STOPPED;
if (instance->poll_state == CXPOLL_STOPPED)
keep_polling = 0;
mutex_unlock(&instance->poll_state_serialize);
if (keep_polling)
schedule_delayed_work(&instance->poll_work,
round_jiffies_relative(POLL_INTERVAL*HZ));
}
static int cxacru_fw(struct usb_device *usb_dev, enum cxacru_fw_request fw,
u8 code1, u8 code2, u32 addr, const u8 *data, int size)
{
int ret;
u8 *buf;
int offd, offb;
const int stride = CMD_PACKET_SIZE - 8;
buf = (u8 *) __get_free_page(GFP_KERNEL);
if (!buf)
return -ENOMEM;
offb = offd = 0;
do {
int l = min_t(int, stride, size - offd);
buf[offb++] = fw;
buf[offb++] = l;
buf[offb++] = code1;
buf[offb++] = code2;
put_unaligned(cpu_to_le32(addr), (__le32 *)(buf + offb));
offb += 4;
addr += l;
if (l)
memcpy(buf + offb, data + offd, l);
if (l < stride)
memset(buf + offb + l, 0, stride - l);
offb += stride;
offd += stride;
if ((offb >= PAGE_SIZE) || (offd >= size)) {
ret = usb_bulk_msg(usb_dev, usb_sndbulkpipe(usb_dev, CXACRU_EP_CMD),
buf, offb, NULL, CMD_TIMEOUT);
if (ret < 0) {
dbg("sending fw %#x failed", fw);
goto cleanup;
}
offb = 0;
}
} while (offd < size);
dbg("sent fw %#x", fw);
ret = 0;
cleanup:
free_page((unsigned long) buf);
return ret;
}
static void cxacru_upload_firmware(struct cxacru_data *instance,
const struct firmware *fw,
const struct firmware *bp)
{
int ret;
struct usbatm_data *usbatm = instance->usbatm;
struct usb_device *usb_dev = usbatm->usb_dev;
__le16 signature[] = { usb_dev->descriptor.idVendor,
usb_dev->descriptor.idProduct };
__le32 val;
dbg("cxacru_upload_firmware");
/* FirmwarePllFClkValue */
val = cpu_to_le32(instance->modem_type->pll_f_clk);
ret = cxacru_fw(usb_dev, FW_WRITE_MEM, 0x2, 0x0, PLLFCLK_ADDR, (u8 *) &val, 4);
if (ret) {
usb_err(usbatm, "FirmwarePllFClkValue failed: %d\n", ret);
return;
}
/* FirmwarePllBClkValue */
val = cpu_to_le32(instance->modem_type->pll_b_clk);
ret = cxacru_fw(usb_dev, FW_WRITE_MEM, 0x2, 0x0, PLLBCLK_ADDR, (u8 *) &val, 4);
if (ret) {
usb_err(usbatm, "FirmwarePllBClkValue failed: %d\n", ret);
return;
}
/* Enable SDRAM */
val = cpu_to_le32(SDRAM_ENA);
ret = cxacru_fw(usb_dev, FW_WRITE_MEM, 0x2, 0x0, SDRAMEN_ADDR, (u8 *) &val, 4);
if (ret) {
usb_err(usbatm, "Enable SDRAM failed: %d\n", ret);
return;
}
/* Firmware */
usb_info(usbatm, "loading firmware\n");
ret = cxacru_fw(usb_dev, FW_WRITE_MEM, 0x2, 0x0, FW_ADDR, fw->data, fw->size);
if (ret) {
usb_err(usbatm, "Firmware upload failed: %d\n", ret);
return;
}
/* Boot ROM patch */
if (instance->modem_type->boot_rom_patch) {
usb_info(usbatm, "loading boot ROM patch\n");
ret = cxacru_fw(usb_dev, FW_WRITE_MEM, 0x2, 0x0, BR_ADDR, bp->data, bp->size);
if (ret) {
usb_err(usbatm, "Boot ROM patching failed: %d\n", ret);
return;
}
}
/* Signature */
ret = cxacru_fw(usb_dev, FW_WRITE_MEM, 0x2, 0x0, SIG_ADDR, (u8 *) signature, 4);
if (ret) {
usb_err(usbatm, "Signature storing failed: %d\n", ret);
return;
}
usb_info(usbatm, "starting device\n");
if (instance->modem_type->boot_rom_patch) {
val = cpu_to_le32(BR_ADDR);
ret = cxacru_fw(usb_dev, FW_WRITE_MEM, 0x2, 0x0, BR_STACK_ADDR, (u8 *) &val, 4);
} else {
ret = cxacru_fw(usb_dev, FW_GOTO_MEM, 0x0, 0x0, FW_ADDR, NULL, 0);
}
if (ret) {
usb_err(usbatm, "Passing control to firmware failed: %d\n", ret);
return;
}
/* Delay to allow firmware to start up. */
msleep_interruptible(1000);
usb_clear_halt(usb_dev, usb_sndbulkpipe(usb_dev, CXACRU_EP_CMD));
usb_clear_halt(usb_dev, usb_rcvbulkpipe(usb_dev, CXACRU_EP_CMD));
usb_clear_halt(usb_dev, usb_sndbulkpipe(usb_dev, CXACRU_EP_DATA));
usb_clear_halt(usb_dev, usb_rcvbulkpipe(usb_dev, CXACRU_EP_DATA));
ret = cxacru_cm(instance, CM_REQUEST_CARD_GET_STATUS, NULL, 0, NULL, 0);
if (ret < 0) {
usb_err(usbatm, "modem failed to initialize: %d\n", ret);
return;
}
}
static int cxacru_find_firmware(struct cxacru_data *instance,
char *phase, const struct firmware **fw_p)
{
struct usbatm_data *usbatm = instance->usbatm;
struct device *dev = &usbatm->usb_intf->dev;
char buf[16];
sprintf(buf, "cxacru-%s.bin", phase);
dbg("cxacru_find_firmware: looking for %s", buf);
if (request_firmware(fw_p, buf, dev)) {
usb_dbg(usbatm, "no stage %s firmware found\n", phase);
return -ENOENT;
}
usb_info(usbatm, "found firmware %s\n", buf);
return 0;
}
static int cxacru_heavy_init(struct usbatm_data *usbatm_instance,
struct usb_interface *usb_intf)
{
const struct firmware *fw, *bp;
struct cxacru_data *instance = usbatm_instance->driver_data;
int ret = cxacru_find_firmware(instance, "fw", &fw);
if (ret) {
usb_warn(usbatm_instance, "firmware (cxacru-fw.bin) unavailable (system misconfigured?)\n");
return ret;
}
if (instance->modem_type->boot_rom_patch) {
ret = cxacru_find_firmware(instance, "bp", &bp);
if (ret) {
usb_warn(usbatm_instance, "boot ROM patch (cxacru-bp.bin) unavailable (system misconfigured?)\n");
release_firmware(fw);
return ret;
}
}
cxacru_upload_firmware(instance, fw, bp);
if (instance->modem_type->boot_rom_patch)
release_firmware(bp);
release_firmware(fw);
ret = cxacru_card_status(instance);
if (ret)
dbg("modem initialisation failed");
else
dbg("done setting up the modem");
return ret;
}
static int cxacru_bind(struct usbatm_data *usbatm_instance,
struct usb_interface *intf, const struct usb_device_id *id)
{
struct cxacru_data *instance;
struct usb_device *usb_dev = interface_to_usbdev(intf);
struct usb_host_endpoint *cmd_ep = usb_dev->ep_in[CXACRU_EP_CMD];
int ret;
/* instance init */
instance = kzalloc(sizeof(*instance), GFP_KERNEL);
if (!instance) {
dbg("cxacru_bind: no memory for instance data");
return -ENOMEM;
}
instance->usbatm = usbatm_instance;
instance->modem_type = (struct cxacru_modem_type *) id->driver_info;
mutex_init(&instance->poll_state_serialize);
instance->poll_state = CXPOLL_STOPPED;
instance->line_status = -1;
instance->adsl_status = -1;
mutex_init(&instance->adsl_state_serialize);
instance->rcv_buf = (u8 *) __get_free_page(GFP_KERNEL);
if (!instance->rcv_buf) {
dbg("cxacru_bind: no memory for rcv_buf");
ret = -ENOMEM;
goto fail;
}
instance->snd_buf = (u8 *) __get_free_page(GFP_KERNEL);
if (!instance->snd_buf) {
dbg("cxacru_bind: no memory for snd_buf");
ret = -ENOMEM;
goto fail;
}
instance->rcv_urb = usb_alloc_urb(0, GFP_KERNEL);
if (!instance->rcv_urb) {
dbg("cxacru_bind: no memory for rcv_urb");
ret = -ENOMEM;
goto fail;
}
instance->snd_urb = usb_alloc_urb(0, GFP_KERNEL);
if (!instance->snd_urb) {
dbg("cxacru_bind: no memory for snd_urb");
ret = -ENOMEM;
goto fail;
}
if (!cmd_ep) {
dbg("cxacru_bind: no command endpoint");
ret = -ENODEV;
goto fail;
}
if ((cmd_ep->desc.bmAttributes & USB_ENDPOINT_XFERTYPE_MASK)
== USB_ENDPOINT_XFER_INT) {
usb_fill_int_urb(instance->rcv_urb,
usb_dev, usb_rcvintpipe(usb_dev, CXACRU_EP_CMD),
instance->rcv_buf, PAGE_SIZE,
cxacru_blocking_completion, &instance->rcv_done, 1);
usb_fill_int_urb(instance->snd_urb,
usb_dev, usb_sndintpipe(usb_dev, CXACRU_EP_CMD),
instance->snd_buf, PAGE_SIZE,
cxacru_blocking_completion, &instance->snd_done, 4);
} else {
usb_fill_bulk_urb(instance->rcv_urb,
usb_dev, usb_rcvbulkpipe(usb_dev, CXACRU_EP_CMD),
instance->rcv_buf, PAGE_SIZE,
cxacru_blocking_completion, &instance->rcv_done);
usb_fill_bulk_urb(instance->snd_urb,
usb_dev, usb_sndbulkpipe(usb_dev, CXACRU_EP_CMD),
instance->snd_buf, PAGE_SIZE,
cxacru_blocking_completion, &instance->snd_done);
}
mutex_init(&instance->cm_serialize);
INIT_DELAYED_WORK(&instance->poll_work, cxacru_poll_status);
usbatm_instance->driver_data = instance;
usbatm_instance->flags = (cxacru_card_status(instance) ? 0 : UDSL_SKIP_HEAVY_INIT);
return 0;
fail:
free_page((unsigned long) instance->snd_buf);
free_page((unsigned long) instance->rcv_buf);
usb_free_urb(instance->snd_urb);
usb_free_urb(instance->rcv_urb);
kfree(instance);
return ret;
}
static void cxacru_unbind(struct usbatm_data *usbatm_instance,
struct usb_interface *intf)
{
struct cxacru_data *instance = usbatm_instance->driver_data;
int is_polling = 1;
dbg("cxacru_unbind entered");
if (!instance) {
dbg("cxacru_unbind: NULL instance!");
return;
}
mutex_lock(&instance->poll_state_serialize);
BUG_ON(instance->poll_state == CXPOLL_SHUTDOWN);
/* ensure that status polling continues unless
* it has already stopped */
if (instance->poll_state == CXPOLL_STOPPED)
is_polling = 0;
/* stop polling from being stopped or started */
instance->poll_state = CXPOLL_SHUTDOWN;
mutex_unlock(&instance->poll_state_serialize);
if (is_polling)
cancel_delayed_work_sync(&instance->poll_work);
usb_kill_urb(instance->snd_urb);
usb_kill_urb(instance->rcv_urb);
usb_free_urb(instance->snd_urb);
usb_free_urb(instance->rcv_urb);
free_page((unsigned long) instance->snd_buf);
free_page((unsigned long) instance->rcv_buf);
kfree(instance);
usbatm_instance->driver_data = NULL;
}
static const struct cxacru_modem_type cxacru_cafe = {
.pll_f_clk = 0x02d874df,
.pll_b_clk = 0x0196a51a,
.boot_rom_patch = 1,
};
static const struct cxacru_modem_type cxacru_cb00 = {
.pll_f_clk = 0x5,
.pll_b_clk = 0x3,
.boot_rom_patch = 0,
};
static const struct usb_device_id cxacru_usb_ids[] = {
{ /* V = Conexant P = ADSL modem (Euphrates project) */
USB_DEVICE(0x0572, 0xcafe), .driver_info = (unsigned long) &cxacru_cafe
},
{ /* V = Conexant P = ADSL modem (Hasbani project) */
USB_DEVICE(0x0572, 0xcb00), .driver_info = (unsigned long) &cxacru_cb00
},
{ /* V = Conexant P = ADSL modem */
USB_DEVICE(0x0572, 0xcb01), .driver_info = (unsigned long) &cxacru_cb00
},
{ /* V = Conexant P = ADSL modem (Well PTI-800) */
USB_DEVICE(0x0572, 0xcb02), .driver_info = (unsigned long) &cxacru_cb00
},
{ /* V = Conexant P = ADSL modem */
USB_DEVICE(0x0572, 0xcb06), .driver_info = (unsigned long) &cxacru_cb00
},
{ /* V = Conexant P = ADSL modem (ZTE ZXDSL 852) */
USB_DEVICE(0x0572, 0xcb07), .driver_info = (unsigned long) &cxacru_cb00
},
{ /* V = Olitec P = ADSL modem version 2 */
USB_DEVICE(0x08e3, 0x0100), .driver_info = (unsigned long) &cxacru_cafe
},
{ /* V = Olitec P = ADSL modem version 3 */
USB_DEVICE(0x08e3, 0x0102), .driver_info = (unsigned long) &cxacru_cb00
},
{ /* V = Trust/Amigo Technology Co. P = AMX-CA86U */
USB_DEVICE(0x0eb0, 0x3457), .driver_info = (unsigned long) &cxacru_cafe
},
{ /* V = Zoom P = 5510 */
USB_DEVICE(0x1803, 0x5510), .driver_info = (unsigned long) &cxacru_cb00
},
{ /* V = Draytek P = Vigor 318 */
USB_DEVICE(0x0675, 0x0200), .driver_info = (unsigned long) &cxacru_cb00
},
{ /* V = Zyxel P = 630-C1 aka OMNI ADSL USB (Annex A) */
USB_DEVICE(0x0586, 0x330a), .driver_info = (unsigned long) &cxacru_cb00
},
{ /* V = Zyxel P = 630-C3 aka OMNI ADSL USB (Annex B) */
USB_DEVICE(0x0586, 0x330b), .driver_info = (unsigned long) &cxacru_cb00
},
{ /* V = Aethra P = Starmodem UM1020 */
USB_DEVICE(0x0659, 0x0020), .driver_info = (unsigned long) &cxacru_cb00
},
{ /* V = Aztech Systems P = ? AKA Pirelli AUA-010 */
USB_DEVICE(0x0509, 0x0812), .driver_info = (unsigned long) &cxacru_cb00
},
{ /* V = Netopia P = Cayman 3341(Annex A)/3351(Annex B) */
USB_DEVICE(0x100d, 0xcb01), .driver_info = (unsigned long) &cxacru_cb00
},
{ /* V = Netopia P = Cayman 3342(Annex A)/3352(Annex B) */
USB_DEVICE(0x100d, 0x3342), .driver_info = (unsigned long) &cxacru_cb00
},
{}
};
MODULE_DEVICE_TABLE(usb, cxacru_usb_ids);
static struct usbatm_driver cxacru_driver = {
.driver_name = cxacru_driver_name,
.bind = cxacru_bind,
.heavy_init = cxacru_heavy_init,
.unbind = cxacru_unbind,
.atm_start = cxacru_atm_start,
.atm_stop = cxacru_remove_device_files,
.bulk_in = CXACRU_EP_DATA,
.bulk_out = CXACRU_EP_DATA,
.rx_padding = 3,
.tx_padding = 11,
};
static int cxacru_usb_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
struct usb_device *usb_dev = interface_to_usbdev(intf);
char buf[15];
/* Avoid ADSL routers (cx82310_eth).
* Abort if bDeviceClass is 0xff and iProduct is "USB NET CARD".
*/
if (usb_dev->descriptor.bDeviceClass == USB_CLASS_VENDOR_SPEC
&& usb_string(usb_dev, usb_dev->descriptor.iProduct,
buf, sizeof(buf)) > 0) {
if (!strcmp(buf, "USB NET CARD")) {
dev_info(&intf->dev, "ignoring cx82310_eth device\n");
return -ENODEV;
}
}
return usbatm_usb_probe(intf, id, &cxacru_driver);
}
static struct usb_driver cxacru_usb_driver = {
.name = cxacru_driver_name,
.probe = cxacru_usb_probe,
.disconnect = usbatm_usb_disconnect,
.id_table = cxacru_usb_ids
};
static int __init cxacru_init(void)
{
return usb_register(&cxacru_usb_driver);
}
static void __exit cxacru_cleanup(void)
{
usb_deregister(&cxacru_usb_driver);
}
module_init(cxacru_init);
module_exit(cxacru_cleanup);
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
MODULE_VERSION(DRIVER_VERSION);
| gpl-2.0 |
lawnn/caf_kernel_msm | drivers/crypto/msm/qcrypto_fips.c | 1880 | 14327 | /* Qcrypto: FIPS 140-2 Selftests
*
* Copyright (c) 2014, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/interrupt.h>
#include <linux/miscdevice.h>
#include <crypto/hash.h>
#include <crypto/ctr.h>
#include <crypto/des.h>
#include <crypto/aes.h>
#include <crypto/sha.h>
#include <crypto/hash.h>
#include <crypto/algapi.h>
#include <crypto/aead.h>
#include <crypto/authenc.h>
#include <crypto/scatterwalk.h>
#include <crypto/internal/hash.h>
#include <linux/platform_data/qcom_crypto_device.h>
#include <mach/qcrypto.h>
#include "qcryptoi.h"
#include "qcrypto_fips.h"
/*
* Callback function
*/
static void _fips_cb(struct crypto_async_request *crypto_async_req, int err)
{
struct _fips_completion *fips_completion = crypto_async_req->data;
if (err == -EINPROGRESS)
return;
fips_completion->err = err;
complete(&fips_completion->completion);
}
/*
* Function to prefix if needed
*/
static int _fips_get_alg_cra_name(char cra_name[],
char *prefix, unsigned int size)
{
char new_cra_name[CRYPTO_MAX_ALG_NAME];
strlcpy(new_cra_name, prefix, CRYPTO_MAX_ALG_NAME);
if (CRYPTO_MAX_ALG_NAME < size + strlen(prefix))
return -EINVAL;
strlcat(new_cra_name, cra_name, CRYPTO_MAX_ALG_NAME);
strlcpy(cra_name, new_cra_name, CRYPTO_MAX_ALG_NAME);
return 0;
}
/*
* Sha/HMAC self tests
*/
int _fips_qcrypto_sha_selftest(struct fips_selftest_data *selftest_d)
{
int rc = 0, err, tv_index = 0, num_tv;
char *k_out_buf = NULL;
struct scatterlist fips_sg;
struct crypto_ahash *tfm;
struct ahash_request *ahash_req;
struct _fips_completion fips_completion;
struct _fips_test_vector_sha_hmac tv_sha_hmac;
num_tv = (sizeof(fips_test_vector_sha_hmac)) /
(sizeof(struct _fips_test_vector_sha_hmac));
/* One-by-one testing */
for (tv_index = 0; tv_index < num_tv; tv_index++) {
memcpy(&tv_sha_hmac, &fips_test_vector_sha_hmac[tv_index],
(sizeof(struct _fips_test_vector_sha_hmac)));
k_out_buf = kzalloc(tv_sha_hmac.diglen, GFP_KERNEL);
if (k_out_buf == NULL) {
pr_err("qcrypto: Failed to allocate memory for k_out_buf %ld\n",
PTR_ERR(k_out_buf));
return -ENOMEM;
}
memset(k_out_buf, 0, tv_sha_hmac.diglen);
init_completion(&fips_completion.completion);
/* use_sw flags are set in dtsi file which makes
default Linux API calls to go to s/w crypto instead
of h/w crypto. This code makes sure that all selftests
calls always go to h/w, independent of DTSI flags. */
if (tv_sha_hmac.klen == 0) {
if (selftest_d->prefix_ahash_algo)
if (_fips_get_alg_cra_name(tv_sha_hmac
.hash_alg, selftest_d->algo_prefix,
strlen(tv_sha_hmac.hash_alg))) {
rc = -1;
pr_err("Algo Name is too long for tv %d\n",
tv_index);
goto clr_buf;
}
} else {
if (selftest_d->prefix_hmac_algo)
if (_fips_get_alg_cra_name(tv_sha_hmac
.hash_alg, selftest_d->algo_prefix,
strlen(tv_sha_hmac.hash_alg))) {
rc = -1;
pr_err("Algo Name is too long for tv %d\n",
tv_index);
goto clr_buf;
}
}
tfm = crypto_alloc_ahash(tv_sha_hmac.hash_alg, 0, 0);
if (IS_ERR(tfm)) {
pr_err("qcrypto: %s algorithm not found\n",
tv_sha_hmac.hash_alg);
rc = PTR_ERR(tfm);
goto clr_buf;
}
ahash_req = ahash_request_alloc(tfm, GFP_KERNEL);
if (!ahash_req) {
pr_err("qcrypto: ahash_request_alloc failed\n");
rc = -ENOMEM;
goto clr_tfm;
}
rc = qcrypto_ahash_set_device(ahash_req, selftest_d->ce_device);
if (rc != 0) {
pr_err("%s qcrypto_cipher_set_device failed with err %d\n",
__func__, rc);
goto clr_ahash_req;
}
ahash_request_set_callback(ahash_req,
CRYPTO_TFM_REQ_MAY_BACKLOG,
_fips_cb, &fips_completion);
sg_init_one(&fips_sg, &tv_sha_hmac.input[0], tv_sha_hmac.ilen);
crypto_ahash_clear_flags(tfm, ~0);
if (tv_sha_hmac.klen != 0) {
rc = crypto_ahash_setkey(tfm, tv_sha_hmac.key,
tv_sha_hmac.klen);
if (rc) {
pr_err("qcrypto: crypto_ahash_setkey failed\n");
goto clr_ahash_req;
}
}
ahash_request_set_crypt(ahash_req, &fips_sg, k_out_buf,
tv_sha_hmac.ilen);
rc = crypto_ahash_digest(ahash_req);
if (rc == -EINPROGRESS || rc == -EBUSY) {
rc = wait_for_completion_interruptible(
&fips_completion.completion);
err = fips_completion.err;
if (!rc && !err) {
INIT_COMPLETION(fips_completion.completion);
} else {
pr_err("qcrypto:SHA: wait_for_completion failed\n");
goto clr_ahash_req;
}
}
if (memcmp(k_out_buf, tv_sha_hmac.digest,
tv_sha_hmac.diglen))
rc = -1;
clr_ahash_req:
ahash_request_free(ahash_req);
clr_tfm:
crypto_free_ahash(tfm);
clr_buf:
kzfree(k_out_buf);
/* For any failure, return error */
if (rc)
return rc;
}
return rc;
}
/*
* Cipher algorithm self tests
*/
int _fips_qcrypto_cipher_selftest(struct fips_selftest_data *selftest_d)
{
int rc = 0, err, tv_index, num_tv;
struct crypto_ablkcipher *tfm;
struct ablkcipher_request *ablkcipher_req;
struct _fips_completion fips_completion;
char *k_align_src = NULL;
struct scatterlist fips_sg;
struct _fips_test_vector_cipher tv_cipher;
num_tv = (sizeof(fips_test_vector_cipher)) /
(sizeof(struct _fips_test_vector_cipher));
/* One-by-one testing */
for (tv_index = 0; tv_index < num_tv; tv_index++) {
memcpy(&tv_cipher, &fips_test_vector_cipher[tv_index],
(sizeof(struct _fips_test_vector_cipher)));
/* Single buffer allocation for in place operation */
k_align_src = kzalloc(tv_cipher.pln_txt_len, GFP_KERNEL);
if (k_align_src == NULL) {
pr_err("qcrypto:, Failed to allocate memory for k_align_src %ld\n",
PTR_ERR(k_align_src));
return -ENOMEM;
}
memcpy(&k_align_src[0], tv_cipher.pln_txt,
tv_cipher.pln_txt_len);
/* use_sw flags are set in dtsi file which makes
default Linux API calls to go to s/w crypto instead
of h/w crypto. This code makes sure that all selftests
calls always go to h/w, independent of DTSI flags. */
if (!strcmp(tv_cipher.mod_alg, "xts(aes)")) {
if (selftest_d->prefix_aes_xts_algo)
if (_fips_get_alg_cra_name(
tv_cipher.mod_alg,
selftest_d->algo_prefix,
strlen(tv_cipher.mod_alg))) {
rc = -1;
pr_err("Algo Name is too long for tv %d\n",
tv_index);
goto clr_buf;
}
} else {
if (selftest_d->prefix_aes_cbc_ecb_ctr_algo)
if (_fips_get_alg_cra_name(
tv_cipher.mod_alg,
selftest_d->algo_prefix,
strlen(tv_cipher.mod_alg))) {
rc = -1;
pr_err("Algo Name is too long for tv %d\n",
tv_index);
goto clr_buf;
}
}
tfm = crypto_alloc_ablkcipher(tv_cipher.mod_alg, 0, 0);
if (IS_ERR(tfm)) {
pr_err("qcrypto: %s algorithm not found\n",
tv_cipher.mod_alg);
rc = -ENOMEM;
goto clr_buf;
}
ablkcipher_req = ablkcipher_request_alloc(tfm, GFP_KERNEL);
if (!ablkcipher_req) {
pr_err("qcrypto: ablkcipher_request_alloc failed\n");
rc = -ENOMEM;
goto clr_tfm;
}
rc = qcrypto_cipher_set_device(ablkcipher_req,
selftest_d->ce_device);
if (rc != 0) {
pr_err("%s qcrypto_cipher_set_device failed with err %d\n",
__func__, rc);
goto clr_ablkcipher_req;
}
ablkcipher_request_set_callback(ablkcipher_req,
CRYPTO_TFM_REQ_MAY_BACKLOG,
_fips_cb, &fips_completion);
crypto_ablkcipher_clear_flags(tfm, ~0);
rc = crypto_ablkcipher_setkey(tfm, tv_cipher.key,
tv_cipher.klen);
if (rc) {
pr_err("qcrypto: crypto_ablkcipher_setkey failed\n");
goto clr_ablkcipher_req;
}
sg_set_buf(&fips_sg, k_align_src, tv_cipher.enc_txt_len);
sg_mark_end(&fips_sg);
ablkcipher_request_set_crypt(ablkcipher_req,
&fips_sg, &fips_sg, tv_cipher.pln_txt_len,
tv_cipher.iv);
/**** Encryption Test ****/
init_completion(&fips_completion.completion);
rc = crypto_ablkcipher_encrypt(ablkcipher_req);
if (rc == -EINPROGRESS || rc == -EBUSY) {
rc = wait_for_completion_interruptible(
&fips_completion.completion);
err = fips_completion.err;
if (!rc && !err) {
INIT_COMPLETION(fips_completion.completion);
} else {
pr_err("qcrypto:cipher:ENC, wait_for_completion failed\n");
goto clr_ablkcipher_req;
}
}
if (memcmp(k_align_src, tv_cipher.enc_txt,
tv_cipher.enc_txt_len)) {
rc = -1;
goto clr_ablkcipher_req;
}
/**** Decryption test ****/
init_completion(&fips_completion.completion);
rc = crypto_ablkcipher_decrypt(ablkcipher_req);
if (rc == -EINPROGRESS || rc == -EBUSY) {
rc = wait_for_completion_interruptible(
&fips_completion.completion);
err = fips_completion.err;
if (!rc && !err) {
INIT_COMPLETION(fips_completion.completion);
} else {
pr_err("qcrypto:cipher:DEC, wait_for_completion failed\n");
goto clr_ablkcipher_req;
}
}
if (memcmp(k_align_src, tv_cipher.pln_txt,
tv_cipher.pln_txt_len))
rc = -1;
clr_ablkcipher_req:
ablkcipher_request_free(ablkcipher_req);
clr_tfm:
crypto_free_ablkcipher(tfm);
clr_buf:
kzfree(k_align_src);
if (rc)
return rc;
}
return rc;
}
/*
* AEAD algorithm self tests
*/
int _fips_qcrypto_aead_selftest(struct fips_selftest_data *selftest_d)
{
int rc = 0, err, tv_index, num_tv, authsize, buf_length;
struct crypto_aead *tfm;
struct aead_request *aead_req;
struct _fips_completion fips_completion;
struct scatterlist fips_sg, fips_assoc_sg;
char *k_align_src = NULL;
struct _fips_test_vector_aead tv_aead;
num_tv = (sizeof(fips_test_vector_aead)) /
(sizeof(struct _fips_test_vector_aead));
/* One-by-one testing */
for (tv_index = 0; tv_index < num_tv; tv_index++) {
memcpy(&tv_aead, &fips_test_vector_aead[tv_index],
(sizeof(struct _fips_test_vector_aead)));
if (tv_aead.pln_txt_len > tv_aead.enc_txt_len)
buf_length = tv_aead.pln_txt_len;
else
buf_length = tv_aead.enc_txt_len;
/* Single buffer allocation for in place operation */
k_align_src = kzalloc(buf_length, GFP_KERNEL);
if (k_align_src == NULL) {
pr_err("qcrypto:, Failed to allocate memory for k_align_src %ld\n",
PTR_ERR(k_align_src));
return -ENOMEM;
}
memcpy(&k_align_src[0], tv_aead.pln_txt,
tv_aead.pln_txt_len);
/* use_sw flags are set in dtsi file which makes
default Linux API calls to go to s/w crypto instead
of h/w crypto. This code makes sure that all selftests
calls always go to h/w, independent of DTSI flags. */
if (selftest_d->prefix_aead_algo) {
if (_fips_get_alg_cra_name(tv_aead.mod_alg,
selftest_d->algo_prefix,
strlen(tv_aead.mod_alg))) {
rc = -1;
pr_err("Algo Name is too long for tv %d\n",
tv_index);
goto clr_buf;
}
}
tfm = crypto_alloc_aead(tv_aead.mod_alg, 0, 0);
if (IS_ERR(tfm)) {
pr_err("qcrypto: %s algorithm not found\n",
tv_aead.mod_alg);
rc = -ENOMEM;
goto clr_buf;
}
aead_req = aead_request_alloc(tfm, GFP_KERNEL);
if (!aead_req) {
pr_err("qcrypto:aead_request_alloc failed\n");
rc = -ENOMEM;
goto clr_tfm;
}
rc = qcrypto_aead_set_device(aead_req, selftest_d->ce_device);
if (rc != 0) {
pr_err("%s qcrypto_cipher_set_device failed with err %d\n",
__func__, rc);
goto clr_aead_req;
}
init_completion(&fips_completion.completion);
aead_request_set_callback(aead_req,
CRYPTO_TFM_REQ_MAY_BACKLOG,
_fips_cb, &fips_completion);
crypto_aead_clear_flags(tfm, ~0);
rc = crypto_aead_setkey(tfm, tv_aead.key, tv_aead.klen);
if (rc) {
pr_err("qcrypto:crypto_aead_setkey failed\n");
goto clr_aead_req;
}
authsize = abs(tv_aead.enc_txt_len - tv_aead.pln_txt_len);
rc = crypto_aead_setauthsize(tfm, authsize);
if (rc) {
pr_err("qcrypto:crypto_aead_setauthsize failed\n");
goto clr_aead_req;
}
sg_init_one(&fips_sg, k_align_src,
tv_aead.pln_txt_len + authsize);
aead_request_set_crypt(aead_req, &fips_sg, &fips_sg,
tv_aead.pln_txt_len , tv_aead.iv);
sg_init_one(&fips_assoc_sg, tv_aead.assoc, tv_aead.alen);
aead_request_set_assoc(aead_req, &fips_assoc_sg, tv_aead.alen);
/**** Encryption test ****/
rc = crypto_aead_encrypt(aead_req);
if (rc == -EINPROGRESS || rc == -EBUSY) {
rc = wait_for_completion_interruptible(
&fips_completion.completion);
err = fips_completion.err;
if (!rc && !err) {
INIT_COMPLETION(fips_completion.completion);
} else {
pr_err("qcrypto:aead:ENC, wait_for_completion failed\n");
goto clr_aead_req;
}
}
if (memcmp(k_align_src, tv_aead.enc_txt, tv_aead.enc_txt_len)) {
rc = -1;
goto clr_aead_req;
}
/** Decryption test **/
init_completion(&fips_completion.completion);
aead_request_set_callback(aead_req,
CRYPTO_TFM_REQ_MAY_BACKLOG,
_fips_cb, &fips_completion);
crypto_aead_clear_flags(tfm, ~0);
rc = crypto_aead_setkey(tfm, tv_aead.key, tv_aead.klen);
if (rc) {
pr_err("qcrypto:aead:DEC, crypto_aead_setkey failed\n");
goto clr_aead_req;
}
authsize = abs(tv_aead.enc_txt_len - tv_aead.pln_txt_len);
rc = crypto_aead_setauthsize(tfm, authsize);
if (rc) {
pr_err("qcrypto:aead:DEC, crypto_aead_setauthsize failed\n");
goto clr_aead_req;
}
sg_init_one(&fips_sg, k_align_src,
tv_aead.enc_txt_len + authsize);
aead_request_set_crypt(aead_req, &fips_sg, &fips_sg,
tv_aead.enc_txt_len, tv_aead.iv);
sg_init_one(&fips_assoc_sg, tv_aead.assoc, tv_aead.alen);
aead_request_set_assoc(aead_req, &fips_assoc_sg,
tv_aead.alen);
rc = crypto_aead_decrypt(aead_req);
if (rc == -EINPROGRESS || rc == -EBUSY) {
rc = wait_for_completion_interruptible(
&fips_completion.completion);
err = fips_completion.err;
if (!rc && !err) {
INIT_COMPLETION(fips_completion.completion);
} else {
pr_err("qcrypto:aead:DEC, wait_for_completion failed\n");
goto clr_aead_req;
}
}
if (memcmp(k_align_src, tv_aead.pln_txt, tv_aead.pln_txt_len)) {
rc = -1;
goto clr_aead_req;
}
clr_aead_req:
aead_request_free(aead_req);
clr_tfm:
crypto_free_aead(tfm);
clr_buf:
kzfree(k_align_src);
/* In case of any failure, return error */
if (rc)
return rc;
}
return rc;
}
| gpl-2.0 |
SiennaStellar/linux-3.10.20_kelleni | tools/perf/util/top.c | 2136 | 3344 | /*
* Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
*
* Refactored from builtin-top.c, see that files for further copyright notes.
*
* Released under the GPL v2. (and only v2, not any later version)
*/
#include "cpumap.h"
#include "event.h"
#include "evlist.h"
#include "evsel.h"
#include "parse-events.h"
#include "symbol.h"
#include "top.h"
#include <inttypes.h>
#define SNPRINTF(buf, size, fmt, args...) \
({ \
size_t r = snprintf(buf, size, fmt, ## args); \
r > size ? size : r; \
})
size_t perf_top__header_snprintf(struct perf_top *top, char *bf, size_t size)
{
float samples_per_sec = top->samples / top->delay_secs;
float ksamples_per_sec = top->kernel_samples / top->delay_secs;
float esamples_percent = (100.0 * top->exact_samples) / top->samples;
struct perf_record_opts *opts = &top->record_opts;
struct perf_target *target = &opts->target;
size_t ret = 0;
if (!perf_guest) {
ret = SNPRINTF(bf, size,
" PerfTop:%8.0f irqs/sec kernel:%4.1f%%"
" exact: %4.1f%% [", samples_per_sec,
100.0 - (100.0 * ((samples_per_sec - ksamples_per_sec) /
samples_per_sec)),
esamples_percent);
} else {
float us_samples_per_sec = top->us_samples / top->delay_secs;
float guest_kernel_samples_per_sec = top->guest_kernel_samples / top->delay_secs;
float guest_us_samples_per_sec = top->guest_us_samples / top->delay_secs;
ret = SNPRINTF(bf, size,
" PerfTop:%8.0f irqs/sec kernel:%4.1f%% us:%4.1f%%"
" guest kernel:%4.1f%% guest us:%4.1f%%"
" exact: %4.1f%% [", samples_per_sec,
100.0 - (100.0 * ((samples_per_sec - ksamples_per_sec) /
samples_per_sec)),
100.0 - (100.0 * ((samples_per_sec - us_samples_per_sec) /
samples_per_sec)),
100.0 - (100.0 * ((samples_per_sec -
guest_kernel_samples_per_sec) /
samples_per_sec)),
100.0 - (100.0 * ((samples_per_sec -
guest_us_samples_per_sec) /
samples_per_sec)),
esamples_percent);
}
if (top->evlist->nr_entries == 1) {
struct perf_evsel *first = perf_evlist__first(top->evlist);
ret += SNPRINTF(bf + ret, size - ret, "%" PRIu64 "%s ",
(uint64_t)first->attr.sample_period,
opts->freq ? "Hz" : "");
}
ret += SNPRINTF(bf + ret, size - ret, "%s", perf_evsel__name(top->sym_evsel));
ret += SNPRINTF(bf + ret, size - ret, "], ");
if (target->pid)
ret += SNPRINTF(bf + ret, size - ret, " (target_pid: %s",
target->pid);
else if (target->tid)
ret += SNPRINTF(bf + ret, size - ret, " (target_tid: %s",
target->tid);
else if (target->uid_str != NULL)
ret += SNPRINTF(bf + ret, size - ret, " (uid: %s",
target->uid_str);
else
ret += SNPRINTF(bf + ret, size - ret, " (all");
if (target->cpu_list)
ret += SNPRINTF(bf + ret, size - ret, ", CPU%s: %s)",
top->evlist->cpus->nr > 1 ? "s" : "",
target->cpu_list);
else {
if (target->tid)
ret += SNPRINTF(bf + ret, size - ret, ")");
else
ret += SNPRINTF(bf + ret, size - ret, ", %d CPU%s)",
top->evlist->cpus->nr,
top->evlist->cpus->nr > 1 ? "s" : "");
}
return ret;
}
void perf_top__reset_sample_counters(struct perf_top *top)
{
top->samples = top->us_samples = top->kernel_samples =
top->exact_samples = top->guest_kernel_samples =
top->guest_us_samples = 0;
}
| gpl-2.0 |
yacuken/android_kernel_oneplus_msm8974 | arch/microblaze/mm/highmem.c | 4696 | 2242 | /*
* highmem.c: virtual kernel memory mappings for high memory
*
* PowerPC version, stolen from the i386 version.
*
* Used in CONFIG_HIGHMEM systems for memory pages which
* are not addressable by direct kernel virtual addresses.
*
* Copyright (C) 1999 Gerhard Wichert, Siemens AG
* Gerhard.Wichert@pdb.siemens.de
*
*
* Redesigned the x86 32-bit VM architecture to deal with
* up to 16 Terrabyte physical memory. With current x86 CPUs
* we now support up to 64 Gigabytes physical RAM.
*
* Copyright (C) 1999 Ingo Molnar <mingo@redhat.com>
*
* Reworked for PowerPC by various contributors. Moved from
* highmem.h by Benjamin Herrenschmidt (c) 2009 IBM Corp.
*/
#include <linux/highmem.h>
#include <linux/module.h>
/*
* The use of kmap_atomic/kunmap_atomic is discouraged - kmap/kunmap
* gives a more generic (and caching) interface. But kmap_atomic can
* be used in IRQ contexts, so in some (very limited) cases we need
* it.
*/
#include <asm/tlbflush.h>
void *kmap_atomic_prot(struct page *page, pgprot_t prot)
{
unsigned long vaddr;
int idx, type;
/* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
pagefault_disable();
if (!PageHighMem(page))
return page_address(page);
type = kmap_atomic_idx_push();
idx = type + KM_TYPE_NR*smp_processor_id();
vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
#ifdef CONFIG_DEBUG_HIGHMEM
BUG_ON(!pte_none(*(kmap_pte-idx)));
#endif
set_pte_at(&init_mm, vaddr, kmap_pte-idx, mk_pte(page, prot));
local_flush_tlb_page(NULL, vaddr);
return (void *) vaddr;
}
EXPORT_SYMBOL(kmap_atomic_prot);
void __kunmap_atomic(void *kvaddr)
{
unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
int type;
if (vaddr < __fix_to_virt(FIX_KMAP_END)) {
pagefault_enable();
return;
}
type = kmap_atomic_idx();
#ifdef CONFIG_DEBUG_HIGHMEM
{
unsigned int idx;
idx = type + KM_TYPE_NR * smp_processor_id();
BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
/*
* force other mappings to Oops if they'll try to access
* this pte without first remap it
*/
pte_clear(&init_mm, vaddr, kmap_pte-idx);
local_flush_tlb_page(NULL, vaddr);
}
#endif
kmap_atomic_idx_pop();
pagefault_enable();
}
EXPORT_SYMBOL(__kunmap_atomic);
| gpl-2.0 |
sailwang94/android_kernel_samsung_ks01lte | drivers/usb/host/ehci-xls.c | 4952 | 3774 | /*
* EHCI HCD for Netlogic XLS processors.
*
* (C) Copyright 2011 Netlogic Microsystems Inc.
*
* Based on various ehci-*.c drivers
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file COPYING in the main directory of this archive for
* more details.
*/
#include <linux/platform_device.h>
static int ehci_xls_setup(struct usb_hcd *hcd)
{
int retval;
struct ehci_hcd *ehci = hcd_to_ehci(hcd);
ehci->caps = hcd->regs;
ehci->regs = hcd->regs +
HC_LENGTH(ehci, ehci_readl(ehci, &ehci->caps->hc_capbase));
dbg_hcs_params(ehci, "reset");
dbg_hcc_params(ehci, "reset");
/* cache this readonly data; minimize chip reads */
ehci->hcs_params = ehci_readl(ehci, &ehci->caps->hcs_params);
retval = ehci_halt(ehci);
if (retval)
return retval;
/* data structure init */
retval = ehci_init(hcd);
if (retval)
return retval;
ehci_reset(ehci);
return retval;
}
int ehci_xls_probe_internal(const struct hc_driver *driver,
struct platform_device *pdev)
{
struct usb_hcd *hcd;
struct resource *res;
int retval, irq;
/* Get our IRQ from an earlier registered Platform Resource */
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
dev_err(&pdev->dev, "Found HC with no IRQ. Check %s setup!\n",
dev_name(&pdev->dev));
return -ENODEV;
}
/* Get our Memory Handle */
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
dev_err(&pdev->dev, "Error: MMIO Handle %s setup!\n",
dev_name(&pdev->dev));
return -ENODEV;
}
hcd = usb_create_hcd(driver, &pdev->dev, dev_name(&pdev->dev));
if (!hcd) {
retval = -ENOMEM;
goto err1;
}
hcd->rsrc_start = res->start;
hcd->rsrc_len = resource_size(res);
if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len,
driver->description)) {
dev_dbg(&pdev->dev, "controller already in use\n");
retval = -EBUSY;
goto err2;
}
hcd->regs = ioremap_nocache(hcd->rsrc_start, hcd->rsrc_len);
if (hcd->regs == NULL) {
dev_dbg(&pdev->dev, "error mapping memory\n");
retval = -EFAULT;
goto err3;
}
retval = usb_add_hcd(hcd, irq, IRQF_SHARED);
if (retval != 0)
goto err4;
return retval;
err4:
iounmap(hcd->regs);
err3:
release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
err2:
usb_put_hcd(hcd);
err1:
dev_err(&pdev->dev, "init %s fail, %d\n", dev_name(&pdev->dev),
retval);
return retval;
}
static struct hc_driver ehci_xls_hc_driver = {
.description = hcd_name,
.product_desc = "XLS EHCI Host Controller",
.hcd_priv_size = sizeof(struct ehci_hcd),
.irq = ehci_irq,
.flags = HCD_USB2 | HCD_MEMORY,
.reset = ehci_xls_setup,
.start = ehci_run,
.stop = ehci_stop,
.shutdown = ehci_shutdown,
.urb_enqueue = ehci_urb_enqueue,
.urb_dequeue = ehci_urb_dequeue,
.endpoint_disable = ehci_endpoint_disable,
.endpoint_reset = ehci_endpoint_reset,
.get_frame_number = ehci_get_frame,
.hub_status_data = ehci_hub_status_data,
.hub_control = ehci_hub_control,
.bus_suspend = ehci_bus_suspend,
.bus_resume = ehci_bus_resume,
.relinquish_port = ehci_relinquish_port,
.port_handed_over = ehci_port_handed_over,
.clear_tt_buffer_complete = ehci_clear_tt_buffer_complete,
};
static int ehci_xls_probe(struct platform_device *pdev)
{
if (usb_disabled())
return -ENODEV;
return ehci_xls_probe_internal(&ehci_xls_hc_driver, pdev);
}
static int ehci_xls_remove(struct platform_device *pdev)
{
struct usb_hcd *hcd = platform_get_drvdata(pdev);
usb_remove_hcd(hcd);
iounmap(hcd->regs);
release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
usb_put_hcd(hcd);
return 0;
}
MODULE_ALIAS("ehci-xls");
static struct platform_driver ehci_xls_driver = {
.probe = ehci_xls_probe,
.remove = ehci_xls_remove,
.shutdown = usb_hcd_platform_shutdown,
.driver = {
.name = "ehci-xls",
},
};
| gpl-2.0 |
andi34/android_kernel_oneplus_one | fs/affs/namei.c | 4952 | 11082 | /*
* linux/fs/affs/namei.c
*
* (c) 1996 Hans-Joachim Widmaier - Rewritten
*
* (C) 1993 Ray Burr - Modified for Amiga FFS filesystem.
*
* (C) 1991 Linus Torvalds - minix filesystem
*/
#include "affs.h"
typedef int (*toupper_t)(int);
static int affs_toupper(int ch);
static int affs_hash_dentry(const struct dentry *,
const struct inode *, struct qstr *);
static int affs_compare_dentry(const struct dentry *parent,
const struct inode *pinode,
const struct dentry *dentry, const struct inode *inode,
unsigned int len, const char *str, const struct qstr *name);
static int affs_intl_toupper(int ch);
static int affs_intl_hash_dentry(const struct dentry *,
const struct inode *, struct qstr *);
static int affs_intl_compare_dentry(const struct dentry *parent,
const struct inode *pinode,
const struct dentry *dentry, const struct inode *inode,
unsigned int len, const char *str, const struct qstr *name);
const struct dentry_operations affs_dentry_operations = {
.d_hash = affs_hash_dentry,
.d_compare = affs_compare_dentry,
};
const struct dentry_operations affs_intl_dentry_operations = {
.d_hash = affs_intl_hash_dentry,
.d_compare = affs_intl_compare_dentry,
};
/* Simple toupper() for DOS\1 */
static int
affs_toupper(int ch)
{
return ch >= 'a' && ch <= 'z' ? ch -= ('a' - 'A') : ch;
}
/* International toupper() for DOS\3 ("international") */
static int
affs_intl_toupper(int ch)
{
return (ch >= 'a' && ch <= 'z') || (ch >= 0xE0
&& ch <= 0xFE && ch != 0xF7) ?
ch - ('a' - 'A') : ch;
}
static inline toupper_t
affs_get_toupper(struct super_block *sb)
{
return AFFS_SB(sb)->s_flags & SF_INTL ? affs_intl_toupper : affs_toupper;
}
/*
* Note: the dentry argument is the parent dentry.
*/
static inline int
__affs_hash_dentry(struct qstr *qstr, toupper_t toupper)
{
const u8 *name = qstr->name;
unsigned long hash;
int i;
i = affs_check_name(qstr->name, qstr->len);
if (i)
return i;
hash = init_name_hash();
i = min(qstr->len, 30u);
for (; i > 0; name++, i--)
hash = partial_name_hash(toupper(*name), hash);
qstr->hash = end_name_hash(hash);
return 0;
}
static int
affs_hash_dentry(const struct dentry *dentry, const struct inode *inode,
struct qstr *qstr)
{
return __affs_hash_dentry(qstr, affs_toupper);
}
static int
affs_intl_hash_dentry(const struct dentry *dentry, const struct inode *inode,
struct qstr *qstr)
{
return __affs_hash_dentry(qstr, affs_intl_toupper);
}
static inline int __affs_compare_dentry(unsigned int len,
const char *str, const struct qstr *name, toupper_t toupper)
{
const u8 *aname = str;
const u8 *bname = name->name;
/*
* 'str' is the name of an already existing dentry, so the name
* must be valid. 'name' must be validated first.
*/
if (affs_check_name(name->name, name->len))
return 1;
/*
* If the names are longer than the allowed 30 chars,
* the excess is ignored, so their length may differ.
*/
if (len >= 30) {
if (name->len < 30)
return 1;
len = 30;
} else if (len != name->len)
return 1;
for (; len > 0; len--)
if (toupper(*aname++) != toupper(*bname++))
return 1;
return 0;
}
static int
affs_compare_dentry(const struct dentry *parent, const struct inode *pinode,
const struct dentry *dentry, const struct inode *inode,
unsigned int len, const char *str, const struct qstr *name)
{
return __affs_compare_dentry(len, str, name, affs_toupper);
}
static int
affs_intl_compare_dentry(const struct dentry *parent,const struct inode *pinode,
const struct dentry *dentry, const struct inode *inode,
unsigned int len, const char *str, const struct qstr *name)
{
return __affs_compare_dentry(len, str, name, affs_intl_toupper);
}
/*
* NOTE! unlike strncmp, affs_match returns 1 for success, 0 for failure.
*/
static inline int
affs_match(struct dentry *dentry, const u8 *name2, toupper_t toupper)
{
const u8 *name = dentry->d_name.name;
int len = dentry->d_name.len;
if (len >= 30) {
if (*name2 < 30)
return 0;
len = 30;
} else if (len != *name2)
return 0;
for (name2++; len > 0; len--)
if (toupper(*name++) != toupper(*name2++))
return 0;
return 1;
}
int
affs_hash_name(struct super_block *sb, const u8 *name, unsigned int len)
{
toupper_t toupper = affs_get_toupper(sb);
int hash;
hash = len = min(len, 30u);
for (; len > 0; len--)
hash = (hash * 13 + toupper(*name++)) & 0x7ff;
return hash % AFFS_SB(sb)->s_hashsize;
}
static struct buffer_head *
affs_find_entry(struct inode *dir, struct dentry *dentry)
{
struct super_block *sb = dir->i_sb;
struct buffer_head *bh;
toupper_t toupper = affs_get_toupper(sb);
u32 key;
pr_debug("AFFS: find_entry(\"%.*s\")\n", (int)dentry->d_name.len, dentry->d_name.name);
bh = affs_bread(sb, dir->i_ino);
if (!bh)
return ERR_PTR(-EIO);
key = be32_to_cpu(AFFS_HEAD(bh)->table[affs_hash_name(sb, dentry->d_name.name, dentry->d_name.len)]);
for (;;) {
affs_brelse(bh);
if (key == 0)
return NULL;
bh = affs_bread(sb, key);
if (!bh)
return ERR_PTR(-EIO);
if (affs_match(dentry, AFFS_TAIL(sb, bh)->name, toupper))
return bh;
key = be32_to_cpu(AFFS_TAIL(sb, bh)->hash_chain);
}
}
struct dentry *
affs_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd)
{
struct super_block *sb = dir->i_sb;
struct buffer_head *bh;
struct inode *inode = NULL;
pr_debug("AFFS: lookup(\"%.*s\")\n",(int)dentry->d_name.len,dentry->d_name.name);
affs_lock_dir(dir);
bh = affs_find_entry(dir, dentry);
affs_unlock_dir(dir);
if (IS_ERR(bh))
return ERR_CAST(bh);
if (bh) {
u32 ino = bh->b_blocknr;
/* store the real header ino in d_fsdata for faster lookups */
dentry->d_fsdata = (void *)(long)ino;
switch (be32_to_cpu(AFFS_TAIL(sb, bh)->stype)) {
//link to dirs disabled
//case ST_LINKDIR:
case ST_LINKFILE:
ino = be32_to_cpu(AFFS_TAIL(sb, bh)->original);
}
affs_brelse(bh);
inode = affs_iget(sb, ino);
if (IS_ERR(inode))
return ERR_CAST(inode);
}
d_add(dentry, inode);
return NULL;
}
int
affs_unlink(struct inode *dir, struct dentry *dentry)
{
pr_debug("AFFS: unlink(dir=%d, %lu \"%.*s\")\n", (u32)dir->i_ino,
dentry->d_inode->i_ino,
(int)dentry->d_name.len, dentry->d_name.name);
return affs_remove_header(dentry);
}
int
affs_create(struct inode *dir, struct dentry *dentry, umode_t mode, struct nameidata *nd)
{
struct super_block *sb = dir->i_sb;
struct inode *inode;
int error;
pr_debug("AFFS: create(%lu,\"%.*s\",0%ho)\n",dir->i_ino,(int)dentry->d_name.len,
dentry->d_name.name,mode);
inode = affs_new_inode(dir);
if (!inode)
return -ENOSPC;
inode->i_mode = mode;
mode_to_prot(inode);
mark_inode_dirty(inode);
inode->i_op = &affs_file_inode_operations;
inode->i_fop = &affs_file_operations;
inode->i_mapping->a_ops = (AFFS_SB(sb)->s_flags & SF_OFS) ? &affs_aops_ofs : &affs_aops;
error = affs_add_entry(dir, inode, dentry, ST_FILE);
if (error) {
clear_nlink(inode);
iput(inode);
return error;
}
return 0;
}
int
affs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
{
struct inode *inode;
int error;
pr_debug("AFFS: mkdir(%lu,\"%.*s\",0%ho)\n",dir->i_ino,
(int)dentry->d_name.len,dentry->d_name.name,mode);
inode = affs_new_inode(dir);
if (!inode)
return -ENOSPC;
inode->i_mode = S_IFDIR | mode;
mode_to_prot(inode);
inode->i_op = &affs_dir_inode_operations;
inode->i_fop = &affs_dir_operations;
error = affs_add_entry(dir, inode, dentry, ST_USERDIR);
if (error) {
clear_nlink(inode);
mark_inode_dirty(inode);
iput(inode);
return error;
}
return 0;
}
int
affs_rmdir(struct inode *dir, struct dentry *dentry)
{
pr_debug("AFFS: rmdir(dir=%u, %lu \"%.*s\")\n", (u32)dir->i_ino,
dentry->d_inode->i_ino,
(int)dentry->d_name.len, dentry->d_name.name);
return affs_remove_header(dentry);
}
int
affs_symlink(struct inode *dir, struct dentry *dentry, const char *symname)
{
struct super_block *sb = dir->i_sb;
struct buffer_head *bh;
struct inode *inode;
char *p;
int i, maxlen, error;
char c, lc;
pr_debug("AFFS: symlink(%lu,\"%.*s\" -> \"%s\")\n",dir->i_ino,
(int)dentry->d_name.len,dentry->d_name.name,symname);
maxlen = AFFS_SB(sb)->s_hashsize * sizeof(u32) - 1;
inode = affs_new_inode(dir);
if (!inode)
return -ENOSPC;
inode->i_op = &affs_symlink_inode_operations;
inode->i_data.a_ops = &affs_symlink_aops;
inode->i_mode = S_IFLNK | 0777;
mode_to_prot(inode);
error = -EIO;
bh = affs_bread(sb, inode->i_ino);
if (!bh)
goto err;
i = 0;
p = (char *)AFFS_HEAD(bh)->table;
lc = '/';
if (*symname == '/') {
struct affs_sb_info *sbi = AFFS_SB(sb);
while (*symname == '/')
symname++;
spin_lock(&sbi->symlink_lock);
while (sbi->s_volume[i]) /* Cannot overflow */
*p++ = sbi->s_volume[i++];
spin_unlock(&sbi->symlink_lock);
}
while (i < maxlen && (c = *symname++)) {
if (c == '.' && lc == '/' && *symname == '.' && symname[1] == '/') {
*p++ = '/';
i++;
symname += 2;
lc = '/';
} else if (c == '.' && lc == '/' && *symname == '/') {
symname++;
lc = '/';
} else {
*p++ = c;
lc = c;
i++;
}
if (lc == '/')
while (*symname == '/')
symname++;
}
*p = 0;
mark_buffer_dirty_inode(bh, inode);
affs_brelse(bh);
mark_inode_dirty(inode);
error = affs_add_entry(dir, inode, dentry, ST_SOFTLINK);
if (error)
goto err;
return 0;
err:
clear_nlink(inode);
mark_inode_dirty(inode);
iput(inode);
return error;
}
int
affs_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry)
{
struct inode *inode = old_dentry->d_inode;
pr_debug("AFFS: link(%u, %u, \"%.*s\")\n", (u32)inode->i_ino, (u32)dir->i_ino,
(int)dentry->d_name.len,dentry->d_name.name);
return affs_add_entry(dir, inode, dentry, ST_LINKFILE);
}
int
affs_rename(struct inode *old_dir, struct dentry *old_dentry,
struct inode *new_dir, struct dentry *new_dentry)
{
struct super_block *sb = old_dir->i_sb;
struct buffer_head *bh = NULL;
int retval;
pr_debug("AFFS: rename(old=%u,\"%*s\" to new=%u,\"%*s\")\n",
(u32)old_dir->i_ino, (int)old_dentry->d_name.len, old_dentry->d_name.name,
(u32)new_dir->i_ino, (int)new_dentry->d_name.len, new_dentry->d_name.name);
retval = affs_check_name(new_dentry->d_name.name,new_dentry->d_name.len);
if (retval)
return retval;
/* Unlink destination if it already exists */
if (new_dentry->d_inode) {
retval = affs_remove_header(new_dentry);
if (retval)
return retval;
}
bh = affs_bread(sb, old_dentry->d_inode->i_ino);
if (!bh)
return -EIO;
/* Remove header from its parent directory. */
affs_lock_dir(old_dir);
retval = affs_remove_hash(old_dir, bh);
affs_unlock_dir(old_dir);
if (retval)
goto done;
/* And insert it into the new directory with the new name. */
affs_copy_name(AFFS_TAIL(sb, bh)->name, new_dentry);
affs_fix_checksum(sb, bh);
affs_lock_dir(new_dir);
retval = affs_insert_hash(new_dir, bh);
affs_unlock_dir(new_dir);
/* TODO: move it back to old_dir, if error? */
done:
mark_buffer_dirty_inode(bh, retval ? old_dir : new_dir);
affs_brelse(bh);
return retval;
}
| gpl-2.0 |
twitish/SimpleKernel-4.2.2 | drivers/scsi/a100u2w.c | 8280 | 36929 | /*
* Initio A100 device driver for Linux.
*
* Copyright (c) 1994-1998 Initio Corporation
* Copyright (c) 2003-2004 Christoph Hellwig
* All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; see the file COPYING. If not, write to
* the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
/*
* Revision History:
* 07/02/98 hl - v.91n Initial drivers.
* 09/14/98 hl - v1.01 Support new Kernel.
* 09/22/98 hl - v1.01a Support reset.
* 09/24/98 hl - v1.01b Fixed reset.
* 10/05/98 hl - v1.02 split the source code and release.
* 12/19/98 bv - v1.02a Use spinlocks for 2.1.95 and up
* 01/31/99 bv - v1.02b Use mdelay instead of waitForPause
* 08/08/99 bv - v1.02c Use waitForPause again.
* 06/25/02 Doug Ledford <dledford@redhat.com> - v1.02d
* - Remove limit on number of controllers
* - Port to DMA mapping API
* - Clean up interrupt handler registration
* - Fix memory leaks
* - Fix allocation of scsi host structs and private data
* 11/18/03 Christoph Hellwig <hch@lst.de>
* - Port to new probing API
* - Fix some more leaks in init failure cases
* 9/28/04 Christoph Hellwig <hch@lst.de>
* - merge the two source files
* - remove internal queueing code
* 14/06/07 Alan Cox <alan@lxorguk.ukuu.org.uk>
* - Grand cleanup and Linuxisation
*/
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/spinlock.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/ioport.h>
#include <linux/dma-mapping.h>
#include <asm/io.h>
#include <asm/irq.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_host.h>
#include "a100u2w.h"
static struct orc_scb *__orc_alloc_scb(struct orc_host * host);
static void inia100_scb_handler(struct orc_host *host, struct orc_scb *scb);
static struct orc_nvram nvram, *nvramp = &nvram;
static u8 default_nvram[64] =
{
/*----------header -------------*/
0x01, /* 0x00: Sub System Vendor ID 0 */
0x11, /* 0x01: Sub System Vendor ID 1 */
0x60, /* 0x02: Sub System ID 0 */
0x10, /* 0x03: Sub System ID 1 */
0x00, /* 0x04: SubClass */
0x01, /* 0x05: Vendor ID 0 */
0x11, /* 0x06: Vendor ID 1 */
0x60, /* 0x07: Device ID 0 */
0x10, /* 0x08: Device ID 1 */
0x00, /* 0x09: Reserved */
0x00, /* 0x0A: Reserved */
0x01, /* 0x0B: Revision of Data Structure */
/* -- Host Adapter Structure --- */
0x01, /* 0x0C: Number Of SCSI Channel */
0x01, /* 0x0D: BIOS Configuration 1 */
0x00, /* 0x0E: BIOS Configuration 2 */
0x00, /* 0x0F: BIOS Configuration 3 */
/* --- SCSI Channel 0 Configuration --- */
0x07, /* 0x10: H/A ID */
0x83, /* 0x11: Channel Configuration */
0x20, /* 0x12: MAX TAG per target */
0x0A, /* 0x13: SCSI Reset Recovering time */
0x00, /* 0x14: Channel Configuration4 */
0x00, /* 0x15: Channel Configuration5 */
/* SCSI Channel 0 Target Configuration */
/* 0x16-0x25 */
0xC8, 0xC8, 0xC8, 0xC8, 0xC8, 0xC8, 0xC8, 0xC8,
0xC8, 0xC8, 0xC8, 0xC8, 0xC8, 0xC8, 0xC8, 0xC8,
/* --- SCSI Channel 1 Configuration --- */
0x07, /* 0x26: H/A ID */
0x83, /* 0x27: Channel Configuration */
0x20, /* 0x28: MAX TAG per target */
0x0A, /* 0x29: SCSI Reset Recovering time */
0x00, /* 0x2A: Channel Configuration4 */
0x00, /* 0x2B: Channel Configuration5 */
/* SCSI Channel 1 Target Configuration */
/* 0x2C-0x3B */
0xC8, 0xC8, 0xC8, 0xC8, 0xC8, 0xC8, 0xC8, 0xC8,
0xC8, 0xC8, 0xC8, 0xC8, 0xC8, 0xC8, 0xC8, 0xC8,
0x00, /* 0x3C: Reserved */
0x00, /* 0x3D: Reserved */
0x00, /* 0x3E: Reserved */
0x00 /* 0x3F: Checksum */
};
static u8 wait_chip_ready(struct orc_host * host)
{
int i;
for (i = 0; i < 10; i++) { /* Wait 1 second for report timeout */
if (inb(host->base + ORC_HCTRL) & HOSTSTOP) /* Wait HOSTSTOP set */
return 1;
mdelay(100);
}
return 0;
}
static u8 wait_firmware_ready(struct orc_host * host)
{
int i;
for (i = 0; i < 10; i++) { /* Wait 1 second for report timeout */
if (inb(host->base + ORC_HSTUS) & RREADY) /* Wait READY set */
return 1;
mdelay(100); /* wait 100ms before try again */
}
return 0;
}
/***************************************************************************/
static u8 wait_scsi_reset_done(struct orc_host * host)
{
int i;
for (i = 0; i < 10; i++) { /* Wait 1 second for report timeout */
if (!(inb(host->base + ORC_HCTRL) & SCSIRST)) /* Wait SCSIRST done */
return 1;
mdelay(100); /* wait 100ms before try again */
}
return 0;
}
/***************************************************************************/
static u8 wait_HDO_off(struct orc_host * host)
{
int i;
for (i = 0; i < 10; i++) { /* Wait 1 second for report timeout */
if (!(inb(host->base + ORC_HCTRL) & HDO)) /* Wait HDO off */
return 1;
mdelay(100); /* wait 100ms before try again */
}
return 0;
}
/***************************************************************************/
static u8 wait_hdi_set(struct orc_host * host, u8 * data)
{
int i;
for (i = 0; i < 10; i++) { /* Wait 1 second for report timeout */
if ((*data = inb(host->base + ORC_HSTUS)) & HDI)
return 1; /* Wait HDI set */
mdelay(100); /* wait 100ms before try again */
}
return 0;
}
/***************************************************************************/
static unsigned short orc_read_fwrev(struct orc_host * host)
{
u16 version;
u8 data;
outb(ORC_CMD_VERSION, host->base + ORC_HDATA);
outb(HDO, host->base + ORC_HCTRL);
if (wait_HDO_off(host) == 0) /* Wait HDO off */
return 0;
if (wait_hdi_set(host, &data) == 0) /* Wait HDI set */
return 0;
version = inb(host->base + ORC_HDATA);
outb(data, host->base + ORC_HSTUS); /* Clear HDI */
if (wait_hdi_set(host, &data) == 0) /* Wait HDI set */
return 0;
version |= inb(host->base + ORC_HDATA) << 8;
outb(data, host->base + ORC_HSTUS); /* Clear HDI */
return version;
}
/***************************************************************************/
static u8 orc_nv_write(struct orc_host * host, unsigned char address, unsigned char value)
{
outb(ORC_CMD_SET_NVM, host->base + ORC_HDATA); /* Write command */
outb(HDO, host->base + ORC_HCTRL);
if (wait_HDO_off(host) == 0) /* Wait HDO off */
return 0;
outb(address, host->base + ORC_HDATA); /* Write address */
outb(HDO, host->base + ORC_HCTRL);
if (wait_HDO_off(host) == 0) /* Wait HDO off */
return 0;
outb(value, host->base + ORC_HDATA); /* Write value */
outb(HDO, host->base + ORC_HCTRL);
if (wait_HDO_off(host) == 0) /* Wait HDO off */
return 0;
return 1;
}
/***************************************************************************/
static u8 orc_nv_read(struct orc_host * host, u8 address, u8 *ptr)
{
unsigned char data;
outb(ORC_CMD_GET_NVM, host->base + ORC_HDATA); /* Write command */
outb(HDO, host->base + ORC_HCTRL);
if (wait_HDO_off(host) == 0) /* Wait HDO off */
return 0;
outb(address, host->base + ORC_HDATA); /* Write address */
outb(HDO, host->base + ORC_HCTRL);
if (wait_HDO_off(host) == 0) /* Wait HDO off */
return 0;
if (wait_hdi_set(host, &data) == 0) /* Wait HDI set */
return 0;
*ptr = inb(host->base + ORC_HDATA);
outb(data, host->base + ORC_HSTUS); /* Clear HDI */
return 1;
}
/**
* orc_exec_sb - Queue an SCB with the HA
* @host: host adapter the SCB belongs to
* @scb: SCB to queue for execution
*/
static void orc_exec_scb(struct orc_host * host, struct orc_scb * scb)
{
scb->status = ORCSCB_POST;
outb(scb->scbidx, host->base + ORC_PQUEUE);
}
/**
* se2_rd_all - read SCSI parameters from EEPROM
* @host: Host whose EEPROM is being loaded
*
* Read SCSI H/A configuration parameters from serial EEPROM
*/
static int se2_rd_all(struct orc_host * host)
{
int i;
u8 *np, chksum = 0;
np = (u8 *) nvramp;
for (i = 0; i < 64; i++, np++) { /* <01> */
if (orc_nv_read(host, (u8) i, np) == 0)
return -1;
}
/*------ Is ckecksum ok ? ------*/
np = (u8 *) nvramp;
for (i = 0; i < 63; i++)
chksum += *np++;
if (nvramp->CheckSum != (u8) chksum)
return -1;
return 1;
}
/**
* se2_update_all - update the EEPROM
* @host: Host whose EEPROM is being updated
*
* Update changed bytes in the EEPROM image.
*/
static void se2_update_all(struct orc_host * host)
{ /* setup default pattern */
int i;
u8 *np, *np1, chksum = 0;
/* Calculate checksum first */
np = (u8 *) default_nvram;
for (i = 0; i < 63; i++)
chksum += *np++;
*np = chksum;
np = (u8 *) default_nvram;
np1 = (u8 *) nvramp;
for (i = 0; i < 64; i++, np++, np1++) {
if (*np != *np1)
orc_nv_write(host, (u8) i, *np);
}
}
/**
* read_eeprom - load EEPROM
* @host: Host EEPROM to read
*
* Read the EEPROM for a given host. If it is invalid or fails
* the restore the defaults and use them.
*/
static void read_eeprom(struct orc_host * host)
{
if (se2_rd_all(host) != 1) {
se2_update_all(host); /* setup default pattern */
se2_rd_all(host); /* load again */
}
}
/**
* orc_load_firmware - initialise firmware
* @host: Host to set up
*
* Load the firmware from the EEPROM into controller SRAM. This
* is basically a 4K block copy and then a 4K block read to check
* correctness. The rest is convulted by the indirect interfaces
* in the hardware
*/
static u8 orc_load_firmware(struct orc_host * host)
{
u32 data32;
u16 bios_addr;
u16 i;
u8 *data32_ptr, data;
/* Set up the EEPROM for access */
data = inb(host->base + ORC_GCFG);
outb(data | EEPRG, host->base + ORC_GCFG); /* Enable EEPROM programming */
outb(0x00, host->base + ORC_EBIOSADR2);
outw(0x0000, host->base + ORC_EBIOSADR0);
if (inb(host->base + ORC_EBIOSDATA) != 0x55) {
outb(data, host->base + ORC_GCFG); /* Disable EEPROM programming */
return 0;
}
outw(0x0001, host->base + ORC_EBIOSADR0);
if (inb(host->base + ORC_EBIOSDATA) != 0xAA) {
outb(data, host->base + ORC_GCFG); /* Disable EEPROM programming */
return 0;
}
outb(PRGMRST | DOWNLOAD, host->base + ORC_RISCCTL); /* Enable SRAM programming */
data32_ptr = (u8 *) & data32;
data32 = cpu_to_le32(0); /* Initial FW address to 0 */
outw(0x0010, host->base + ORC_EBIOSADR0);
*data32_ptr = inb(host->base + ORC_EBIOSDATA); /* Read from BIOS */
outw(0x0011, host->base + ORC_EBIOSADR0);
*(data32_ptr + 1) = inb(host->base + ORC_EBIOSDATA); /* Read from BIOS */
outw(0x0012, host->base + ORC_EBIOSADR0);
*(data32_ptr + 2) = inb(host->base + ORC_EBIOSDATA); /* Read from BIOS */
outw(*(data32_ptr + 2), host->base + ORC_EBIOSADR2);
outl(le32_to_cpu(data32), host->base + ORC_FWBASEADR); /* Write FW address */
/* Copy the code from the BIOS to the SRAM */
udelay(500); /* Required on Sun Ultra 5 ... 350 -> failures */
bios_addr = (u16) le32_to_cpu(data32); /* FW code locate at BIOS address + ? */
for (i = 0, data32_ptr = (u8 *) & data32; /* Download the code */
i < 0x1000; /* Firmware code size = 4K */
i++, bios_addr++) {
outw(bios_addr, host->base + ORC_EBIOSADR0);
*data32_ptr++ = inb(host->base + ORC_EBIOSDATA); /* Read from BIOS */
if ((i % 4) == 3) {
outl(le32_to_cpu(data32), host->base + ORC_RISCRAM); /* Write every 4 bytes */
data32_ptr = (u8 *) & data32;
}
}
/* Go back and check they match */
outb(PRGMRST | DOWNLOAD, host->base + ORC_RISCCTL); /* Reset program count 0 */
bios_addr -= 0x1000; /* Reset the BIOS address */
for (i = 0, data32_ptr = (u8 *) & data32; /* Check the code */
i < 0x1000; /* Firmware code size = 4K */
i++, bios_addr++) {
outw(bios_addr, host->base + ORC_EBIOSADR0);
*data32_ptr++ = inb(host->base + ORC_EBIOSDATA); /* Read from BIOS */
if ((i % 4) == 3) {
if (inl(host->base + ORC_RISCRAM) != le32_to_cpu(data32)) {
outb(PRGMRST, host->base + ORC_RISCCTL); /* Reset program to 0 */
outb(data, host->base + ORC_GCFG); /*Disable EEPROM programming */
return 0;
}
data32_ptr = (u8 *) & data32;
}
}
/* Success */
outb(PRGMRST, host->base + ORC_RISCCTL); /* Reset program to 0 */
outb(data, host->base + ORC_GCFG); /* Disable EEPROM programming */
return 1;
}
/***************************************************************************/
static void setup_SCBs(struct orc_host * host)
{
struct orc_scb *scb;
int i;
struct orc_extended_scb *escb;
dma_addr_t escb_phys;
/* Setup SCB base and SCB Size registers */
outb(ORC_MAXQUEUE, host->base + ORC_SCBSIZE); /* Total number of SCBs */
/* SCB base address 0 */
outl(host->scb_phys, host->base + ORC_SCBBASE0);
/* SCB base address 1 */
outl(host->scb_phys, host->base + ORC_SCBBASE1);
/* setup scatter list address with one buffer */
scb = host->scb_virt;
escb = host->escb_virt;
for (i = 0; i < ORC_MAXQUEUE; i++) {
escb_phys = (host->escb_phys + (sizeof(struct orc_extended_scb) * i));
scb->sg_addr = cpu_to_le32((u32) escb_phys);
scb->sense_addr = cpu_to_le32((u32) escb_phys);
scb->escb = escb;
scb->scbidx = i;
scb++;
escb++;
}
}
/**
* init_alloc_map - initialise allocation map
* @host: host map to configure
*
* Initialise the allocation maps for this device. If the device
* is not quiescent the caller must hold the allocation lock
*/
static void init_alloc_map(struct orc_host * host)
{
u8 i, j;
for (i = 0; i < MAX_CHANNELS; i++) {
for (j = 0; j < 8; j++) {
host->allocation_map[i][j] = 0xffffffff;
}
}
}
/**
* init_orchid - initialise the host adapter
* @host:host adapter to initialise
*
* Initialise the controller and if necessary load the firmware.
*
* Returns -1 if the initialisation fails.
*/
static int init_orchid(struct orc_host * host)
{
u8 *ptr;
u16 revision;
u8 i;
init_alloc_map(host);
outb(0xFF, host->base + ORC_GIMSK); /* Disable all interrupts */
if (inb(host->base + ORC_HSTUS) & RREADY) { /* Orchid is ready */
revision = orc_read_fwrev(host);
if (revision == 0xFFFF) {
outb(DEVRST, host->base + ORC_HCTRL); /* Reset Host Adapter */
if (wait_chip_ready(host) == 0)
return -1;
orc_load_firmware(host); /* Download FW */
setup_SCBs(host); /* Setup SCB base and SCB Size registers */
outb(0x00, host->base + ORC_HCTRL); /* clear HOSTSTOP */
if (wait_firmware_ready(host) == 0)
return -1;
/* Wait for firmware ready */
} else {
setup_SCBs(host); /* Setup SCB base and SCB Size registers */
}
} else { /* Orchid is not Ready */
outb(DEVRST, host->base + ORC_HCTRL); /* Reset Host Adapter */
if (wait_chip_ready(host) == 0)
return -1;
orc_load_firmware(host); /* Download FW */
setup_SCBs(host); /* Setup SCB base and SCB Size registers */
outb(HDO, host->base + ORC_HCTRL); /* Do Hardware Reset & */
/* clear HOSTSTOP */
if (wait_firmware_ready(host) == 0) /* Wait for firmware ready */
return -1;
}
/* Load an EEProm copy into RAM */
/* Assumes single threaded at this point */
read_eeprom(host);
if (nvramp->revision != 1)
return -1;
host->scsi_id = nvramp->scsi_id;
host->BIOScfg = nvramp->BIOSConfig1;
host->max_targets = MAX_TARGETS;
ptr = (u8 *) & (nvramp->Target00Config);
for (i = 0; i < 16; ptr++, i++) {
host->target_flag[i] = *ptr;
host->max_tags[i] = ORC_MAXTAGS;
}
if (nvramp->SCSI0Config & NCC_BUSRESET)
host->flags |= HCF_SCSI_RESET;
outb(0xFB, host->base + ORC_GIMSK); /* enable RP FIFO interrupt */
return 0;
}
/**
* orc_reset_scsi_bus - perform bus reset
* @host: host being reset
*
* Perform a full bus reset on the adapter.
*/
static int orc_reset_scsi_bus(struct orc_host * host)
{ /* I need Host Control Block Information */
unsigned long flags;
spin_lock_irqsave(&host->allocation_lock, flags);
init_alloc_map(host);
/* reset scsi bus */
outb(SCSIRST, host->base + ORC_HCTRL);
/* FIXME: We can spend up to a second with the lock held and
interrupts off here */
if (wait_scsi_reset_done(host) == 0) {
spin_unlock_irqrestore(&host->allocation_lock, flags);
return FAILED;
} else {
spin_unlock_irqrestore(&host->allocation_lock, flags);
return SUCCESS;
}
}
/**
* orc_device_reset - device reset handler
* @host: host to reset
* @cmd: command causing the reset
* @target; target device
*
* Reset registers, reset a hanging bus and kill active and disconnected
* commands for target w/o soft reset
*/
static int orc_device_reset(struct orc_host * host, struct scsi_cmnd *cmd, unsigned int target)
{ /* I need Host Control Block Information */
struct orc_scb *scb;
struct orc_extended_scb *escb;
struct orc_scb *host_scb;
u8 i;
unsigned long flags;
spin_lock_irqsave(&(host->allocation_lock), flags);
scb = (struct orc_scb *) NULL;
escb = (struct orc_extended_scb *) NULL;
/* setup scatter list address with one buffer */
host_scb = host->scb_virt;
/* FIXME: is this safe if we then fail to issue the reset or race
a completion ? */
init_alloc_map(host);
/* Find the scb corresponding to the command */
for (i = 0; i < ORC_MAXQUEUE; i++) {
escb = host_scb->escb;
if (host_scb->status && escb->srb == cmd)
break;
host_scb++;
}
if (i == ORC_MAXQUEUE) {
printk(KERN_ERR "Unable to Reset - No SCB Found\n");
spin_unlock_irqrestore(&(host->allocation_lock), flags);
return FAILED;
}
/* Allocate a new SCB for the reset command to the firmware */
if ((scb = __orc_alloc_scb(host)) == NULL) {
/* Can't happen.. */
spin_unlock_irqrestore(&(host->allocation_lock), flags);
return FAILED;
}
/* Reset device is handled by the firmware, we fill in an SCB and
fire it at the controller, it does the rest */
scb->opcode = ORC_BUSDEVRST;
scb->target = target;
scb->hastat = 0;
scb->tastat = 0;
scb->status = 0x0;
scb->link = 0xFF;
scb->reserved0 = 0;
scb->reserved1 = 0;
scb->xferlen = cpu_to_le32(0);
scb->sg_len = cpu_to_le32(0);
escb->srb = NULL;
escb->srb = cmd;
orc_exec_scb(host, scb); /* Start execute SCB */
spin_unlock_irqrestore(&host->allocation_lock, flags);
return SUCCESS;
}
/**
* __orc_alloc_scb - allocate an SCB
* @host: host to allocate from
*
* Allocate an SCB and return a pointer to the SCB object. NULL
* is returned if no SCB is free. The caller must already hold
* the allocator lock at this point.
*/
static struct orc_scb *__orc_alloc_scb(struct orc_host * host)
{
u8 channel;
unsigned long idx;
u8 index;
u8 i;
channel = host->index;
for (i = 0; i < 8; i++) {
for (index = 0; index < 32; index++) {
if ((host->allocation_map[channel][i] >> index) & 0x01) {
host->allocation_map[channel][i] &= ~(1 << index);
idx = index + 32 * i;
/*
* Translate the index to a structure instance
*/
return host->scb_virt + idx;
}
}
}
return NULL;
}
/**
* orc_alloc_scb - allocate an SCB
* @host: host to allocate from
*
* Allocate an SCB and return a pointer to the SCB object. NULL
* is returned if no SCB is free.
*/
static struct orc_scb *orc_alloc_scb(struct orc_host * host)
{
struct orc_scb *scb;
unsigned long flags;
spin_lock_irqsave(&host->allocation_lock, flags);
scb = __orc_alloc_scb(host);
spin_unlock_irqrestore(&host->allocation_lock, flags);
return scb;
}
/**
* orc_release_scb - release an SCB
* @host: host owning the SCB
* @scb: SCB that is now free
*
* Called to return a completed SCB to the allocation pool. Before
* calling the SCB must be out of use on both the host and the HA.
*/
static void orc_release_scb(struct orc_host *host, struct orc_scb *scb)
{
unsigned long flags;
u8 index, i, channel;
spin_lock_irqsave(&(host->allocation_lock), flags);
channel = host->index; /* Channel */
index = scb->scbidx;
i = index / 32;
index %= 32;
host->allocation_map[channel][i] |= (1 << index);
spin_unlock_irqrestore(&(host->allocation_lock), flags);
}
/**
* orchid_abort_scb - abort a command
*
* Abort a queued command that has been passed to the firmware layer
* if possible. This is all handled by the firmware. We aks the firmware
* and it either aborts the command or fails
*/
static int orchid_abort_scb(struct orc_host * host, struct orc_scb * scb)
{
unsigned char data, status;
outb(ORC_CMD_ABORT_SCB, host->base + ORC_HDATA); /* Write command */
outb(HDO, host->base + ORC_HCTRL);
if (wait_HDO_off(host) == 0) /* Wait HDO off */
return 0;
outb(scb->scbidx, host->base + ORC_HDATA); /* Write address */
outb(HDO, host->base + ORC_HCTRL);
if (wait_HDO_off(host) == 0) /* Wait HDO off */
return 0;
if (wait_hdi_set(host, &data) == 0) /* Wait HDI set */
return 0;
status = inb(host->base + ORC_HDATA);
outb(data, host->base + ORC_HSTUS); /* Clear HDI */
if (status == 1) /* 0 - Successfully */
return 0; /* 1 - Fail */
return 1;
}
static int inia100_abort_cmd(struct orc_host * host, struct scsi_cmnd *cmd)
{
struct orc_extended_scb *escb;
struct orc_scb *scb;
u8 i;
unsigned long flags;
spin_lock_irqsave(&(host->allocation_lock), flags);
scb = host->scb_virt;
/* Walk the queue until we find the SCB that belongs to the command
block. This isn't a performance critical path so a walk in the park
here does no harm */
for (i = 0; i < ORC_MAXQUEUE; i++, scb++) {
escb = scb->escb;
if (scb->status && escb->srb == cmd) {
if (scb->tag_msg == 0) {
goto out;
} else {
/* Issue an ABORT to the firmware */
if (orchid_abort_scb(host, scb)) {
escb->srb = NULL;
spin_unlock_irqrestore(&host->allocation_lock, flags);
return SUCCESS;
} else
goto out;
}
}
}
out:
spin_unlock_irqrestore(&host->allocation_lock, flags);
return FAILED;
}
/**
* orc_interrupt - IRQ processing
* @host: Host causing the interrupt
*
* This function is called from the IRQ handler and protected
* by the host lock. While the controller reports that there are
* scb's for processing we pull them off the controller, turn the
* index into a host address pointer to the scb and call the scb
* handler.
*
* Returns IRQ_HANDLED if any SCBs were processed, IRQ_NONE otherwise
*/
static irqreturn_t orc_interrupt(struct orc_host * host)
{
u8 scb_index;
struct orc_scb *scb;
/* Check if we have an SCB queued for servicing */
if (inb(host->base + ORC_RQUEUECNT) == 0)
return IRQ_NONE;
do {
/* Get the SCB index of the SCB to service */
scb_index = inb(host->base + ORC_RQUEUE);
/* Translate it back to a host pointer */
scb = (struct orc_scb *) ((unsigned long) host->scb_virt + (unsigned long) (sizeof(struct orc_scb) * scb_index));
scb->status = 0x0;
/* Process the SCB */
inia100_scb_handler(host, scb);
} while (inb(host->base + ORC_RQUEUECNT));
return IRQ_HANDLED;
} /* End of I1060Interrupt() */
/**
* inia100_build_scb - build SCB
* @host: host owing the control block
* @scb: control block to use
* @cmd: Mid layer command
*
* Build a host adapter control block from the SCSI mid layer command
*/
static int inia100_build_scb(struct orc_host * host, struct orc_scb * scb, struct scsi_cmnd * cmd)
{ /* Create corresponding SCB */
struct scatterlist *sg;
struct orc_sgent *sgent; /* Pointer to SG list */
int i, count_sg;
struct orc_extended_scb *escb;
/* Links between the escb, scb and Linux scsi midlayer cmd */
escb = scb->escb;
escb->srb = cmd;
sgent = NULL;
/* Set up the SCB to do a SCSI command block */
scb->opcode = ORC_EXECSCSI;
scb->flags = SCF_NO_DCHK; /* Clear done bit */
scb->target = cmd->device->id;
scb->lun = cmd->device->lun;
scb->reserved0 = 0;
scb->reserved1 = 0;
scb->sg_len = cpu_to_le32(0);
scb->xferlen = cpu_to_le32((u32) scsi_bufflen(cmd));
sgent = (struct orc_sgent *) & escb->sglist[0];
count_sg = scsi_dma_map(cmd);
if (count_sg < 0)
return count_sg;
BUG_ON(count_sg > TOTAL_SG_ENTRY);
/* Build the scatter gather lists */
if (count_sg) {
scb->sg_len = cpu_to_le32((u32) (count_sg * 8));
scsi_for_each_sg(cmd, sg, count_sg, i) {
sgent->base = cpu_to_le32((u32) sg_dma_address(sg));
sgent->length = cpu_to_le32((u32) sg_dma_len(sg));
sgent++;
}
} else {
scb->sg_len = cpu_to_le32(0);
sgent->base = cpu_to_le32(0);
sgent->length = cpu_to_le32(0);
}
scb->sg_addr = (u32) scb->sense_addr; /* sense_addr is already little endian */
scb->hastat = 0;
scb->tastat = 0;
scb->link = 0xFF;
scb->sense_len = SENSE_SIZE;
scb->cdb_len = cmd->cmd_len;
if (scb->cdb_len >= IMAX_CDB) {
printk("max cdb length= %x\b", cmd->cmd_len);
scb->cdb_len = IMAX_CDB;
}
scb->ident = cmd->device->lun | DISC_ALLOW;
if (cmd->device->tagged_supported) { /* Tag Support */
scb->tag_msg = SIMPLE_QUEUE_TAG; /* Do simple tag only */
} else {
scb->tag_msg = 0; /* No tag support */
}
memcpy(scb->cdb, cmd->cmnd, scb->cdb_len);
return 0;
}
/**
* inia100_queue - queue command with host
* @cmd: Command block
* @done: Completion function
*
* Called by the mid layer to queue a command. Process the command
* block, build the host specific scb structures and if there is room
* queue the command down to the controller
*/
static int inia100_queue_lck(struct scsi_cmnd * cmd, void (*done) (struct scsi_cmnd *))
{
struct orc_scb *scb;
struct orc_host *host; /* Point to Host adapter control block */
host = (struct orc_host *) cmd->device->host->hostdata;
cmd->scsi_done = done;
/* Get free SCSI control block */
if ((scb = orc_alloc_scb(host)) == NULL)
return SCSI_MLQUEUE_HOST_BUSY;
if (inia100_build_scb(host, scb, cmd)) {
orc_release_scb(host, scb);
return SCSI_MLQUEUE_HOST_BUSY;
}
orc_exec_scb(host, scb); /* Start execute SCB */
return 0;
}
static DEF_SCSI_QCMD(inia100_queue)
/*****************************************************************************
Function name : inia100_abort
Description : Abort a queued command.
(commands that are on the bus can't be aborted easily)
Input : host - Pointer to host adapter structure
Output : None.
Return : pSRB - Pointer to SCSI request block.
*****************************************************************************/
static int inia100_abort(struct scsi_cmnd * cmd)
{
struct orc_host *host;
host = (struct orc_host *) cmd->device->host->hostdata;
return inia100_abort_cmd(host, cmd);
}
/*****************************************************************************
Function name : inia100_reset
Description : Reset registers, reset a hanging bus and
kill active and disconnected commands for target w/o soft reset
Input : host - Pointer to host adapter structure
Output : None.
Return : pSRB - Pointer to SCSI request block.
*****************************************************************************/
static int inia100_bus_reset(struct scsi_cmnd * cmd)
{ /* I need Host Control Block Information */
struct orc_host *host;
host = (struct orc_host *) cmd->device->host->hostdata;
return orc_reset_scsi_bus(host);
}
/*****************************************************************************
Function name : inia100_device_reset
Description : Reset the device
Input : host - Pointer to host adapter structure
Output : None.
Return : pSRB - Pointer to SCSI request block.
*****************************************************************************/
static int inia100_device_reset(struct scsi_cmnd * cmd)
{ /* I need Host Control Block Information */
struct orc_host *host;
host = (struct orc_host *) cmd->device->host->hostdata;
return orc_device_reset(host, cmd, scmd_id(cmd));
}
/**
* inia100_scb_handler - interrupt callback
* @host: Host causing the interrupt
* @scb: SCB the controller returned as needing processing
*
* Perform completion processing on a control block. Do the conversions
* from host to SCSI midlayer error coding, save any sense data and
* the complete with the midlayer and recycle the scb.
*/
static void inia100_scb_handler(struct orc_host *host, struct orc_scb *scb)
{
struct scsi_cmnd *cmd; /* Pointer to SCSI request block */
struct orc_extended_scb *escb;
escb = scb->escb;
if ((cmd = (struct scsi_cmnd *) escb->srb) == NULL) {
printk(KERN_ERR "inia100_scb_handler: SRB pointer is empty\n");
orc_release_scb(host, scb); /* Release SCB for current channel */
return;
}
escb->srb = NULL;
switch (scb->hastat) {
case 0x0:
case 0xa: /* Linked command complete without error and linked normally */
case 0xb: /* Linked command complete without error interrupt generated */
scb->hastat = 0;
break;
case 0x11: /* Selection time out-The initiator selection or target
reselection was not complete within the SCSI Time out period */
scb->hastat = DID_TIME_OUT;
break;
case 0x14: /* Target bus phase sequence failure-An invalid bus phase or bus
phase sequence was requested by the target. The host adapter
will generate a SCSI Reset Condition, notifying the host with
a SCRD interrupt */
scb->hastat = DID_RESET;
break;
case 0x1a: /* SCB Aborted. 07/21/98 */
scb->hastat = DID_ABORT;
break;
case 0x12: /* Data overrun/underrun-The target attempted to transfer more data
than was allocated by the Data Length field or the sum of the
Scatter / Gather Data Length fields. */
case 0x13: /* Unexpected bus free-The target dropped the SCSI BSY at an unexpected time. */
case 0x16: /* Invalid CCB Operation Code-The first byte of the CCB was invalid. */
default:
printk(KERN_DEBUG "inia100: %x %x\n", scb->hastat, scb->tastat);
scb->hastat = DID_ERROR; /* Couldn't find any better */
break;
}
if (scb->tastat == 2) { /* Check condition */
memcpy((unsigned char *) &cmd->sense_buffer[0],
(unsigned char *) &escb->sglist[0], SENSE_SIZE);
}
cmd->result = scb->tastat | (scb->hastat << 16);
scsi_dma_unmap(cmd);
cmd->scsi_done(cmd); /* Notify system DONE */
orc_release_scb(host, scb); /* Release SCB for current channel */
}
/**
* inia100_intr - interrupt handler
* @irqno: Interrupt value
* @devid: Host adapter
*
* Entry point for IRQ handling. All the real work is performed
* by orc_interrupt.
*/
static irqreturn_t inia100_intr(int irqno, void *devid)
{
struct Scsi_Host *shost = (struct Scsi_Host *)devid;
struct orc_host *host = (struct orc_host *)shost->hostdata;
unsigned long flags;
irqreturn_t res;
spin_lock_irqsave(shost->host_lock, flags);
res = orc_interrupt(host);
spin_unlock_irqrestore(shost->host_lock, flags);
return res;
}
static struct scsi_host_template inia100_template = {
.proc_name = "inia100",
.name = inia100_REVID,
.queuecommand = inia100_queue,
.eh_abort_handler = inia100_abort,
.eh_bus_reset_handler = inia100_bus_reset,
.eh_device_reset_handler = inia100_device_reset,
.can_queue = 1,
.this_id = 1,
.sg_tablesize = SG_ALL,
.cmd_per_lun = 1,
.use_clustering = ENABLE_CLUSTERING,
};
static int __devinit inia100_probe_one(struct pci_dev *pdev,
const struct pci_device_id *id)
{
struct Scsi_Host *shost;
struct orc_host *host;
unsigned long port, bios;
int error = -ENODEV;
u32 sz;
unsigned long biosaddr;
char *bios_phys;
if (pci_enable_device(pdev))
goto out;
if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
printk(KERN_WARNING "Unable to set 32bit DMA "
"on inia100 adapter, ignoring.\n");
goto out_disable_device;
}
pci_set_master(pdev);
port = pci_resource_start(pdev, 0);
if (!request_region(port, 256, "inia100")) {
printk(KERN_WARNING "inia100: io port 0x%lx, is busy.\n", port);
goto out_disable_device;
}
/* <02> read from base address + 0x50 offset to get the bios value. */
bios = inw(port + 0x50);
shost = scsi_host_alloc(&inia100_template, sizeof(struct orc_host));
if (!shost)
goto out_release_region;
host = (struct orc_host *)shost->hostdata;
host->pdev = pdev;
host->base = port;
host->BIOScfg = bios;
spin_lock_init(&host->allocation_lock);
/* Get total memory needed for SCB */
sz = ORC_MAXQUEUE * sizeof(struct orc_scb);
host->scb_virt = pci_alloc_consistent(pdev, sz,
&host->scb_phys);
if (!host->scb_virt) {
printk("inia100: SCB memory allocation error\n");
goto out_host_put;
}
memset(host->scb_virt, 0, sz);
/* Get total memory needed for ESCB */
sz = ORC_MAXQUEUE * sizeof(struct orc_extended_scb);
host->escb_virt = pci_alloc_consistent(pdev, sz,
&host->escb_phys);
if (!host->escb_virt) {
printk("inia100: ESCB memory allocation error\n");
goto out_free_scb_array;
}
memset(host->escb_virt, 0, sz);
biosaddr = host->BIOScfg;
biosaddr = (biosaddr << 4);
bios_phys = phys_to_virt(biosaddr);
if (init_orchid(host)) { /* Initialize orchid chip */
printk("inia100: initial orchid fail!!\n");
goto out_free_escb_array;
}
shost->io_port = host->base;
shost->n_io_port = 0xff;
shost->can_queue = ORC_MAXQUEUE;
shost->unique_id = shost->io_port;
shost->max_id = host->max_targets;
shost->max_lun = 16;
shost->irq = pdev->irq;
shost->this_id = host->scsi_id; /* Assign HCS index */
shost->sg_tablesize = TOTAL_SG_ENTRY;
/* Initial orc chip */
error = request_irq(pdev->irq, inia100_intr, IRQF_SHARED,
"inia100", shost);
if (error < 0) {
printk(KERN_WARNING "inia100: unable to get irq %d\n",
pdev->irq);
goto out_free_escb_array;
}
pci_set_drvdata(pdev, shost);
error = scsi_add_host(shost, &pdev->dev);
if (error)
goto out_free_irq;
scsi_scan_host(shost);
return 0;
out_free_irq:
free_irq(shost->irq, shost);
out_free_escb_array:
pci_free_consistent(pdev, ORC_MAXQUEUE * sizeof(struct orc_extended_scb),
host->escb_virt, host->escb_phys);
out_free_scb_array:
pci_free_consistent(pdev, ORC_MAXQUEUE * sizeof(struct orc_scb),
host->scb_virt, host->scb_phys);
out_host_put:
scsi_host_put(shost);
out_release_region:
release_region(port, 256);
out_disable_device:
pci_disable_device(pdev);
out:
return error;
}
static void __devexit inia100_remove_one(struct pci_dev *pdev)
{
struct Scsi_Host *shost = pci_get_drvdata(pdev);
struct orc_host *host = (struct orc_host *)shost->hostdata;
scsi_remove_host(shost);
free_irq(shost->irq, shost);
pci_free_consistent(pdev, ORC_MAXQUEUE * sizeof(struct orc_extended_scb),
host->escb_virt, host->escb_phys);
pci_free_consistent(pdev, ORC_MAXQUEUE * sizeof(struct orc_scb),
host->scb_virt, host->scb_phys);
release_region(shost->io_port, 256);
scsi_host_put(shost);
}
static struct pci_device_id inia100_pci_tbl[] = {
{PCI_VENDOR_ID_INIT, 0x1060, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{0,}
};
MODULE_DEVICE_TABLE(pci, inia100_pci_tbl);
static struct pci_driver inia100_pci_driver = {
.name = "inia100",
.id_table = inia100_pci_tbl,
.probe = inia100_probe_one,
.remove = __devexit_p(inia100_remove_one),
};
static int __init inia100_init(void)
{
return pci_register_driver(&inia100_pci_driver);
}
static void __exit inia100_exit(void)
{
pci_unregister_driver(&inia100_pci_driver);
}
MODULE_DESCRIPTION("Initio A100U2W SCSI driver");
MODULE_AUTHOR("Initio Corporation");
MODULE_LICENSE("Dual BSD/GPL");
module_init(inia100_init);
module_exit(inia100_exit);
| gpl-2.0 |
metredigm/linux | arch/mips/pci/pci-tx4938.c | 9048 | 4146 | /*
* Based on linux/arch/mips/txx9/rbtx4938/setup.c,
* and RBTX49xx patch from CELF patch archive.
*
* Copyright 2001, 2003-2005 MontaVista Software Inc.
* Copyright (C) 2004 by Ralf Baechle (ralf@linux-mips.org)
* (C) Copyright TOSHIBA CORPORATION 2000-2001, 2004-2007
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#include <linux/init.h>
#include <linux/pci.h>
#include <linux/kernel.h>
#include <linux/interrupt.h>
#include <asm/txx9/generic.h>
#include <asm/txx9/tx4938.h>
int __init tx4938_report_pciclk(void)
{
int pciclk = 0;
printk(KERN_INFO "PCIC --%s PCICLK:",
(__raw_readq(&tx4938_ccfgptr->ccfg) & TX4938_CCFG_PCI66) ?
" PCI66" : "");
if (__raw_readq(&tx4938_ccfgptr->pcfg) & TX4938_PCFG_PCICLKEN_ALL) {
u64 ccfg = __raw_readq(&tx4938_ccfgptr->ccfg);
switch ((unsigned long)ccfg &
TX4938_CCFG_PCIDIVMODE_MASK) {
case TX4938_CCFG_PCIDIVMODE_4:
pciclk = txx9_cpu_clock / 4; break;
case TX4938_CCFG_PCIDIVMODE_4_5:
pciclk = txx9_cpu_clock * 2 / 9; break;
case TX4938_CCFG_PCIDIVMODE_5:
pciclk = txx9_cpu_clock / 5; break;
case TX4938_CCFG_PCIDIVMODE_5_5:
pciclk = txx9_cpu_clock * 2 / 11; break;
case TX4938_CCFG_PCIDIVMODE_8:
pciclk = txx9_cpu_clock / 8; break;
case TX4938_CCFG_PCIDIVMODE_9:
pciclk = txx9_cpu_clock / 9; break;
case TX4938_CCFG_PCIDIVMODE_10:
pciclk = txx9_cpu_clock / 10; break;
case TX4938_CCFG_PCIDIVMODE_11:
pciclk = txx9_cpu_clock / 11; break;
}
printk("Internal(%u.%uMHz)",
(pciclk + 50000) / 1000000,
((pciclk + 50000) / 100000) % 10);
} else {
printk("External");
pciclk = -1;
}
printk("\n");
return pciclk;
}
void __init tx4938_report_pci1clk(void)
{
__u64 ccfg = __raw_readq(&tx4938_ccfgptr->ccfg);
unsigned int pciclk =
txx9_gbus_clock / ((ccfg & TX4938_CCFG_PCI1DMD) ? 4 : 2);
printk(KERN_INFO "PCIC1 -- %sPCICLK:%u.%uMHz\n",
(ccfg & TX4938_CCFG_PCI1_66) ? "PCI66 " : "",
(pciclk + 50000) / 1000000,
((pciclk + 50000) / 100000) % 10);
}
int __init tx4938_pciclk66_setup(void)
{
int pciclk;
/* Assert M66EN */
tx4938_ccfg_set(TX4938_CCFG_PCI66);
/* Double PCICLK (if possible) */
if (__raw_readq(&tx4938_ccfgptr->pcfg) & TX4938_PCFG_PCICLKEN_ALL) {
unsigned int pcidivmode = 0;
u64 ccfg = __raw_readq(&tx4938_ccfgptr->ccfg);
pcidivmode = (unsigned long)ccfg &
TX4938_CCFG_PCIDIVMODE_MASK;
switch (pcidivmode) {
case TX4938_CCFG_PCIDIVMODE_8:
case TX4938_CCFG_PCIDIVMODE_4:
pcidivmode = TX4938_CCFG_PCIDIVMODE_4;
pciclk = txx9_cpu_clock / 4;
break;
case TX4938_CCFG_PCIDIVMODE_9:
case TX4938_CCFG_PCIDIVMODE_4_5:
pcidivmode = TX4938_CCFG_PCIDIVMODE_4_5;
pciclk = txx9_cpu_clock * 2 / 9;
break;
case TX4938_CCFG_PCIDIVMODE_10:
case TX4938_CCFG_PCIDIVMODE_5:
pcidivmode = TX4938_CCFG_PCIDIVMODE_5;
pciclk = txx9_cpu_clock / 5;
break;
case TX4938_CCFG_PCIDIVMODE_11:
case TX4938_CCFG_PCIDIVMODE_5_5:
default:
pcidivmode = TX4938_CCFG_PCIDIVMODE_5_5;
pciclk = txx9_cpu_clock * 2 / 11;
break;
}
tx4938_ccfg_change(TX4938_CCFG_PCIDIVMODE_MASK,
pcidivmode);
printk(KERN_DEBUG "PCICLK: ccfg:%08lx\n",
(unsigned long)__raw_readq(&tx4938_ccfgptr->ccfg));
} else
pciclk = -1;
return pciclk;
}
int __init tx4938_pcic1_map_irq(const struct pci_dev *dev, u8 slot)
{
if (get_tx4927_pcicptr(dev->bus->sysdata) == tx4938_pcic1ptr) {
switch (slot) {
case TX4927_PCIC_IDSEL_AD_TO_SLOT(31):
if (__raw_readq(&tx4938_ccfgptr->pcfg) &
TX4938_PCFG_ETH0_SEL)
return TXX9_IRQ_BASE + TX4938_IR_ETH0;
break;
case TX4927_PCIC_IDSEL_AD_TO_SLOT(30):
if (__raw_readq(&tx4938_ccfgptr->pcfg) &
TX4938_PCFG_ETH1_SEL)
return TXX9_IRQ_BASE + TX4938_IR_ETH1;
break;
}
return 0;
}
return -1;
}
void __init tx4938_setup_pcierr_irq(void)
{
if (request_irq(TXX9_IRQ_BASE + TX4938_IR_PCIERR,
tx4927_pcierr_interrupt,
0, "PCI error",
(void *)TX4927_PCIC_REG))
printk(KERN_WARNING "Failed to request irq for PCIERR\n");
}
| gpl-2.0 |
NoelMacwan/SXDNickiKat | arch/sh/kernel/cpu/sh4a/pinmux-sh7724.c | 9304 | 77737 | /*
* SH7724 Pinmux
*
* Copyright (C) 2009 Renesas Solutions Corp.
*
* Kuninori Morimoto <morimoto.kuninori@renesas.com>
*
* Based on SH7723 Pinmux
* Copyright (C) 2008 Magnus Damm
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/gpio.h>
#include <cpu/sh7724.h>
enum {
PINMUX_RESERVED = 0,
PINMUX_DATA_BEGIN,
PTA7_DATA, PTA6_DATA, PTA5_DATA, PTA4_DATA,
PTA3_DATA, PTA2_DATA, PTA1_DATA, PTA0_DATA,
PTB7_DATA, PTB6_DATA, PTB5_DATA, PTB4_DATA,
PTB3_DATA, PTB2_DATA, PTB1_DATA, PTB0_DATA,
PTC7_DATA, PTC6_DATA, PTC5_DATA, PTC4_DATA,
PTC3_DATA, PTC2_DATA, PTC1_DATA, PTC0_DATA,
PTD7_DATA, PTD6_DATA, PTD5_DATA, PTD4_DATA,
PTD3_DATA, PTD2_DATA, PTD1_DATA, PTD0_DATA,
PTE7_DATA, PTE6_DATA, PTE5_DATA, PTE4_DATA,
PTE3_DATA, PTE2_DATA, PTE1_DATA, PTE0_DATA,
PTF7_DATA, PTF6_DATA, PTF5_DATA, PTF4_DATA,
PTF3_DATA, PTF2_DATA, PTF1_DATA, PTF0_DATA,
PTG5_DATA, PTG4_DATA,
PTG3_DATA, PTG2_DATA, PTG1_DATA, PTG0_DATA,
PTH7_DATA, PTH6_DATA, PTH5_DATA, PTH4_DATA,
PTH3_DATA, PTH2_DATA, PTH1_DATA, PTH0_DATA,
PTJ7_DATA, PTJ6_DATA, PTJ5_DATA,
PTJ3_DATA, PTJ2_DATA, PTJ1_DATA, PTJ0_DATA,
PTK7_DATA, PTK6_DATA, PTK5_DATA, PTK4_DATA,
PTK3_DATA, PTK2_DATA, PTK1_DATA, PTK0_DATA,
PTL7_DATA, PTL6_DATA, PTL5_DATA, PTL4_DATA,
PTL3_DATA, PTL2_DATA, PTL1_DATA, PTL0_DATA,
PTM7_DATA, PTM6_DATA, PTM5_DATA, PTM4_DATA,
PTM3_DATA, PTM2_DATA, PTM1_DATA, PTM0_DATA,
PTN7_DATA, PTN6_DATA, PTN5_DATA, PTN4_DATA,
PTN3_DATA, PTN2_DATA, PTN1_DATA, PTN0_DATA,
PTQ7_DATA, PTQ6_DATA, PTQ5_DATA, PTQ4_DATA,
PTQ3_DATA, PTQ2_DATA, PTQ1_DATA, PTQ0_DATA,
PTR7_DATA, PTR6_DATA, PTR5_DATA, PTR4_DATA,
PTR3_DATA, PTR2_DATA, PTR1_DATA, PTR0_DATA,
PTS6_DATA, PTS5_DATA, PTS4_DATA,
PTS3_DATA, PTS2_DATA, PTS1_DATA, PTS0_DATA,
PTT7_DATA, PTT6_DATA, PTT5_DATA, PTT4_DATA,
PTT3_DATA, PTT2_DATA, PTT1_DATA, PTT0_DATA,
PTU7_DATA, PTU6_DATA, PTU5_DATA, PTU4_DATA,
PTU3_DATA, PTU2_DATA, PTU1_DATA, PTU0_DATA,
PTV7_DATA, PTV6_DATA, PTV5_DATA, PTV4_DATA,
PTV3_DATA, PTV2_DATA, PTV1_DATA, PTV0_DATA,
PTW7_DATA, PTW6_DATA, PTW5_DATA, PTW4_DATA,
PTW3_DATA, PTW2_DATA, PTW1_DATA, PTW0_DATA,
PTX7_DATA, PTX6_DATA, PTX5_DATA, PTX4_DATA,
PTX3_DATA, PTX2_DATA, PTX1_DATA, PTX0_DATA,
PTY7_DATA, PTY6_DATA, PTY5_DATA, PTY4_DATA,
PTY3_DATA, PTY2_DATA, PTY1_DATA, PTY0_DATA,
PTZ7_DATA, PTZ6_DATA, PTZ5_DATA, PTZ4_DATA,
PTZ3_DATA, PTZ2_DATA, PTZ1_DATA, PTZ0_DATA,
PINMUX_DATA_END,
PINMUX_INPUT_BEGIN,
PTA7_IN, PTA6_IN, PTA5_IN, PTA4_IN,
PTA3_IN, PTA2_IN, PTA1_IN, PTA0_IN,
PTB7_IN, PTB6_IN, PTB5_IN, PTB4_IN,
PTB3_IN, PTB2_IN, PTB1_IN, PTB0_IN,
PTC7_IN, PTC6_IN, PTC5_IN, PTC4_IN,
PTC3_IN, PTC2_IN, PTC1_IN, PTC0_IN,
PTD7_IN, PTD6_IN, PTD5_IN, PTD4_IN,
PTD3_IN, PTD2_IN, PTD1_IN, PTD0_IN,
PTE7_IN, PTE6_IN, PTE5_IN, PTE4_IN,
PTE3_IN, PTE2_IN, PTE1_IN, PTE0_IN,
PTF7_IN, PTF6_IN, PTF5_IN, PTF4_IN,
PTF3_IN, PTF2_IN, PTF1_IN, PTF0_IN,
PTH7_IN, PTH6_IN, PTH5_IN, PTH4_IN,
PTH3_IN, PTH2_IN, PTH1_IN, PTH0_IN,
PTJ3_IN, PTJ2_IN, PTJ1_IN, PTJ0_IN,
PTK7_IN, PTK6_IN, PTK5_IN, PTK4_IN,
PTK3_IN, PTK2_IN, PTK1_IN, PTK0_IN,
PTL7_IN, PTL6_IN, PTL5_IN, PTL4_IN,
PTL3_IN, PTL2_IN, PTL1_IN, PTL0_IN,
PTM7_IN, PTM6_IN, PTM5_IN, PTM4_IN,
PTM3_IN, PTM2_IN, PTM1_IN, PTM0_IN,
PTN7_IN, PTN6_IN, PTN5_IN, PTN4_IN,
PTN3_IN, PTN2_IN, PTN1_IN, PTN0_IN,
PTQ7_IN, PTQ6_IN, PTQ5_IN, PTQ4_IN,
PTQ3_IN, PTQ2_IN, PTQ1_IN, PTQ0_IN,
PTR7_IN, PTR6_IN, PTR5_IN, PTR4_IN,
PTR3_IN, PTR2_IN, PTR1_IN, PTR0_IN,
PTS6_IN, PTS5_IN, PTS4_IN,
PTS3_IN, PTS2_IN, PTS1_IN, PTS0_IN,
PTT7_IN, PTT6_IN, PTT5_IN, PTT4_IN,
PTT3_IN, PTT2_IN, PTT1_IN, PTT0_IN,
PTU7_IN, PTU6_IN, PTU5_IN, PTU4_IN,
PTU3_IN, PTU2_IN, PTU1_IN, PTU0_IN,
PTV7_IN, PTV6_IN, PTV5_IN, PTV4_IN,
PTV3_IN, PTV2_IN, PTV1_IN, PTV0_IN,
PTW7_IN, PTW6_IN, PTW5_IN, PTW4_IN,
PTW3_IN, PTW2_IN, PTW1_IN, PTW0_IN,
PTX7_IN, PTX6_IN, PTX5_IN, PTX4_IN,
PTX3_IN, PTX2_IN, PTX1_IN, PTX0_IN,
PTY7_IN, PTY6_IN, PTY5_IN, PTY4_IN,
PTY3_IN, PTY2_IN, PTY1_IN, PTY0_IN,
PTZ7_IN, PTZ6_IN, PTZ5_IN, PTZ4_IN,
PTZ3_IN, PTZ2_IN, PTZ1_IN, PTZ0_IN,
PINMUX_INPUT_END,
PINMUX_INPUT_PULLUP_BEGIN,
PTA7_IN_PU, PTA6_IN_PU, PTA5_IN_PU, PTA4_IN_PU,
PTA3_IN_PU, PTA2_IN_PU, PTA1_IN_PU, PTA0_IN_PU,
PTB7_IN_PU, PTB6_IN_PU, PTB5_IN_PU, PTB4_IN_PU,
PTB3_IN_PU, PTB2_IN_PU, PTB1_IN_PU, PTB0_IN_PU,
PTC7_IN_PU, PTC6_IN_PU, PTC5_IN_PU, PTC4_IN_PU,
PTC3_IN_PU, PTC2_IN_PU, PTC1_IN_PU, PTC0_IN_PU,
PTD7_IN_PU, PTD6_IN_PU, PTD5_IN_PU, PTD4_IN_PU,
PTD3_IN_PU, PTD2_IN_PU, PTD1_IN_PU, PTD0_IN_PU,
PTE7_IN_PU, PTE6_IN_PU, PTE5_IN_PU, PTE4_IN_PU,
PTE3_IN_PU, PTE2_IN_PU, PTE1_IN_PU, PTE0_IN_PU,
PTF7_IN_PU, PTF6_IN_PU, PTF5_IN_PU, PTF4_IN_PU,
PTF3_IN_PU, PTF2_IN_PU, PTF1_IN_PU, PTF0_IN_PU,
PTH7_IN_PU, PTH6_IN_PU, PTH5_IN_PU, PTH4_IN_PU,
PTH3_IN_PU, PTH2_IN_PU, PTH1_IN_PU, PTH0_IN_PU,
PTJ3_IN_PU, PTJ2_IN_PU, PTJ1_IN_PU, PTJ0_IN_PU,
PTK7_IN_PU, PTK6_IN_PU, PTK5_IN_PU, PTK4_IN_PU,
PTK3_IN_PU, PTK2_IN_PU, PTK1_IN_PU, PTK0_IN_PU,
PTL7_IN_PU, PTL6_IN_PU, PTL5_IN_PU, PTL4_IN_PU,
PTL3_IN_PU, PTL2_IN_PU, PTL1_IN_PU, PTL0_IN_PU,
PTM7_IN_PU, PTM6_IN_PU, PTM5_IN_PU, PTM4_IN_PU,
PTM3_IN_PU, PTM2_IN_PU, PTM1_IN_PU, PTM0_IN_PU,
PTN7_IN_PU, PTN6_IN_PU, PTN5_IN_PU, PTN4_IN_PU,
PTN3_IN_PU, PTN2_IN_PU, PTN1_IN_PU, PTN0_IN_PU,
PTQ7_IN_PU, PTQ6_IN_PU, PTQ5_IN_PU, PTQ4_IN_PU,
PTQ3_IN_PU, PTQ2_IN_PU, PTQ1_IN_PU, PTQ0_IN_PU,
PTR7_IN_PU, PTR6_IN_PU, PTR5_IN_PU, PTR4_IN_PU,
PTR3_IN_PU, PTR2_IN_PU, PTR1_IN_PU, PTR0_IN_PU,
PTS6_IN_PU, PTS5_IN_PU, PTS4_IN_PU,
PTS3_IN_PU, PTS2_IN_PU, PTS1_IN_PU, PTS0_IN_PU,
PTT7_IN_PU, PTT6_IN_PU, PTT5_IN_PU, PTT4_IN_PU,
PTT3_IN_PU, PTT2_IN_PU, PTT1_IN_PU, PTT0_IN_PU,
PTU7_IN_PU, PTU6_IN_PU, PTU5_IN_PU, PTU4_IN_PU,
PTU3_IN_PU, PTU2_IN_PU, PTU1_IN_PU, PTU0_IN_PU,
PTV7_IN_PU, PTV6_IN_PU, PTV5_IN_PU, PTV4_IN_PU,
PTV3_IN_PU, PTV2_IN_PU, PTV1_IN_PU, PTV0_IN_PU,
PTW7_IN_PU, PTW6_IN_PU, PTW5_IN_PU, PTW4_IN_PU,
PTW3_IN_PU, PTW2_IN_PU, PTW1_IN_PU, PTW0_IN_PU,
PTX7_IN_PU, PTX6_IN_PU, PTX5_IN_PU, PTX4_IN_PU,
PTX3_IN_PU, PTX2_IN_PU, PTX1_IN_PU, PTX0_IN_PU,
PTY7_IN_PU, PTY6_IN_PU, PTY5_IN_PU, PTY4_IN_PU,
PTY3_IN_PU, PTY2_IN_PU, PTY1_IN_PU, PTY0_IN_PU,
PTZ7_IN_PU, PTZ6_IN_PU, PTZ5_IN_PU, PTZ4_IN_PU,
PTZ3_IN_PU, PTZ2_IN_PU, PTZ1_IN_PU, PTZ0_IN_PU,
PINMUX_INPUT_PULLUP_END,
PINMUX_OUTPUT_BEGIN,
PTA7_OUT, PTA6_OUT, PTA5_OUT, PTA4_OUT,
PTA3_OUT, PTA2_OUT, PTA1_OUT, PTA0_OUT,
PTB7_OUT, PTB6_OUT, PTB5_OUT, PTB4_OUT,
PTB3_OUT, PTB2_OUT, PTB1_OUT, PTB0_OUT,
PTC7_OUT, PTC6_OUT, PTC5_OUT, PTC4_OUT,
PTC3_OUT, PTC2_OUT, PTC1_OUT, PTC0_OUT,
PTD7_OUT, PTD6_OUT, PTD5_OUT, PTD4_OUT,
PTD3_OUT, PTD2_OUT, PTD1_OUT, PTD0_OUT,
PTE7_OUT, PTE6_OUT, PTE5_OUT, PTE4_OUT,
PTE3_OUT, PTE2_OUT, PTE1_OUT, PTE0_OUT,
PTF7_OUT, PTF6_OUT, PTF5_OUT, PTF4_OUT,
PTF3_OUT, PTF2_OUT, PTF1_OUT, PTF0_OUT,
PTG5_OUT, PTG4_OUT,
PTG3_OUT, PTG2_OUT, PTG1_OUT, PTG0_OUT,
PTH7_OUT, PTH6_OUT, PTH5_OUT, PTH4_OUT,
PTH3_OUT, PTH2_OUT, PTH1_OUT, PTH0_OUT,
PTJ7_OUT, PTJ6_OUT, PTJ5_OUT,
PTJ3_OUT, PTJ2_OUT, PTJ1_OUT, PTJ0_OUT,
PTK7_OUT, PTK6_OUT, PTK5_OUT, PTK4_OUT,
PTK3_OUT, PTK2_OUT, PTK1_OUT, PTK0_OUT,
PTL7_OUT, PTL6_OUT, PTL5_OUT, PTL4_OUT,
PTL3_OUT, PTL2_OUT, PTL1_OUT, PTL0_OUT,
PTM7_OUT, PTM6_OUT, PTM5_OUT, PTM4_OUT,
PTM3_OUT, PTM2_OUT, PTM1_OUT, PTM0_OUT,
PTN7_OUT, PTN6_OUT, PTN5_OUT, PTN4_OUT,
PTN3_OUT, PTN2_OUT, PTN1_OUT, PTN0_OUT,
PTQ7_OUT, PTQ6_OUT, PTQ5_OUT, PTQ4_OUT,
PTQ3_OUT, PTQ2_OUT, PTQ1_OUT, PTQ0_OUT,
PTR7_OUT, PTR6_OUT, PTR5_OUT, PTR4_OUT,
PTR1_OUT, PTR0_OUT,
PTS6_OUT, PTS5_OUT, PTS4_OUT,
PTS3_OUT, PTS2_OUT, PTS1_OUT, PTS0_OUT,
PTT7_OUT, PTT6_OUT, PTT5_OUT, PTT4_OUT,
PTT3_OUT, PTT2_OUT, PTT1_OUT, PTT0_OUT,
PTU7_OUT, PTU6_OUT, PTU5_OUT, PTU4_OUT,
PTU3_OUT, PTU2_OUT, PTU1_OUT, PTU0_OUT,
PTV7_OUT, PTV6_OUT, PTV5_OUT, PTV4_OUT,
PTV3_OUT, PTV2_OUT, PTV1_OUT, PTV0_OUT,
PTW7_OUT, PTW6_OUT, PTW5_OUT, PTW4_OUT,
PTW3_OUT, PTW2_OUT, PTW1_OUT, PTW0_OUT,
PTX7_OUT, PTX6_OUT, PTX5_OUT, PTX4_OUT,
PTX3_OUT, PTX2_OUT, PTX1_OUT, PTX0_OUT,
PTY7_OUT, PTY6_OUT, PTY5_OUT, PTY4_OUT,
PTY3_OUT, PTY2_OUT, PTY1_OUT, PTY0_OUT,
PTZ7_OUT, PTZ6_OUT, PTZ5_OUT, PTZ4_OUT,
PTZ3_OUT, PTZ2_OUT, PTZ1_OUT, PTZ0_OUT,
PINMUX_OUTPUT_END,
PINMUX_FUNCTION_BEGIN,
PTA7_FN, PTA6_FN, PTA5_FN, PTA4_FN,
PTA3_FN, PTA2_FN, PTA1_FN, PTA0_FN,
PTB7_FN, PTB6_FN, PTB5_FN, PTB4_FN,
PTB3_FN, PTB2_FN, PTB1_FN, PTB0_FN,
PTC7_FN, PTC6_FN, PTC5_FN, PTC4_FN,
PTC3_FN, PTC2_FN, PTC1_FN, PTC0_FN,
PTD7_FN, PTD6_FN, PTD5_FN, PTD4_FN,
PTD3_FN, PTD2_FN, PTD1_FN, PTD0_FN,
PTE7_FN, PTE6_FN, PTE5_FN, PTE4_FN,
PTE3_FN, PTE2_FN, PTE1_FN, PTE0_FN,
PTF7_FN, PTF6_FN, PTF5_FN, PTF4_FN,
PTF3_FN, PTF2_FN, PTF1_FN, PTF0_FN,
PTG5_FN, PTG4_FN,
PTG3_FN, PTG2_FN, PTG1_FN, PTG0_FN,
PTH7_FN, PTH6_FN, PTH5_FN, PTH4_FN,
PTH3_FN, PTH2_FN, PTH1_FN, PTH0_FN,
PTJ7_FN, PTJ6_FN, PTJ5_FN,
PTJ3_FN, PTJ2_FN, PTJ1_FN, PTJ0_FN,
PTK7_FN, PTK6_FN, PTK5_FN, PTK4_FN,
PTK3_FN, PTK2_FN, PTK1_FN, PTK0_FN,
PTL7_FN, PTL6_FN, PTL5_FN, PTL4_FN,
PTL3_FN, PTL2_FN, PTL1_FN, PTL0_FN,
PTM7_FN, PTM6_FN, PTM5_FN, PTM4_FN,
PTM3_FN, PTM2_FN, PTM1_FN, PTM0_FN,
PTN7_FN, PTN6_FN, PTN5_FN, PTN4_FN,
PTN3_FN, PTN2_FN, PTN1_FN, PTN0_FN,
PTQ7_FN, PTQ6_FN, PTQ5_FN, PTQ4_FN,
PTQ3_FN, PTQ2_FN, PTQ1_FN, PTQ0_FN,
PTR7_FN, PTR6_FN, PTR5_FN, PTR4_FN,
PTR3_FN, PTR2_FN, PTR1_FN, PTR0_FN,
PTS6_FN, PTS5_FN, PTS4_FN,
PTS3_FN, PTS2_FN, PTS1_FN, PTS0_FN,
PTT7_FN, PTT6_FN, PTT5_FN, PTT4_FN,
PTT3_FN, PTT2_FN, PTT1_FN, PTT0_FN,
PTU7_FN, PTU6_FN, PTU5_FN, PTU4_FN,
PTU3_FN, PTU2_FN, PTU1_FN, PTU0_FN,
PTV7_FN, PTV6_FN, PTV5_FN, PTV4_FN,
PTV3_FN, PTV2_FN, PTV1_FN, PTV0_FN,
PTW7_FN, PTW6_FN, PTW5_FN, PTW4_FN,
PTW3_FN, PTW2_FN, PTW1_FN, PTW0_FN,
PTX7_FN, PTX6_FN, PTX5_FN, PTX4_FN,
PTX3_FN, PTX2_FN, PTX1_FN, PTX0_FN,
PTY7_FN, PTY6_FN, PTY5_FN, PTY4_FN,
PTY3_FN, PTY2_FN, PTY1_FN, PTY0_FN,
PTZ7_FN, PTZ6_FN, PTZ5_FN, PTZ4_FN,
PTZ3_FN, PTZ2_FN, PTZ1_FN, PTZ0_FN,
PSA15_0, PSA15_1,
PSA14_0, PSA14_1,
PSA13_0, PSA13_1,
PSA12_0, PSA12_1,
PSA10_0, PSA10_1,
PSA9_0, PSA9_1,
PSA8_0, PSA8_1,
PSA7_0, PSA7_1,
PSA6_0, PSA6_1,
PSA5_0, PSA5_1,
PSA3_0, PSA3_1,
PSA2_0, PSA2_1,
PSA1_0, PSA1_1,
PSA0_0, PSA0_1,
PSB14_0, PSB14_1,
PSB13_0, PSB13_1,
PSB12_0, PSB12_1,
PSB11_0, PSB11_1,
PSB10_0, PSB10_1,
PSB9_0, PSB9_1,
PSB8_0, PSB8_1,
PSB7_0, PSB7_1,
PSB6_0, PSB6_1,
PSB5_0, PSB5_1,
PSB4_0, PSB4_1,
PSB3_0, PSB3_1,
PSB2_0, PSB2_1,
PSB1_0, PSB1_1,
PSB0_0, PSB0_1,
PSC15_0, PSC15_1,
PSC14_0, PSC14_1,
PSC13_0, PSC13_1,
PSC12_0, PSC12_1,
PSC11_0, PSC11_1,
PSC10_0, PSC10_1,
PSC9_0, PSC9_1,
PSC8_0, PSC8_1,
PSC7_0, PSC7_1,
PSC6_0, PSC6_1,
PSC5_0, PSC5_1,
PSC4_0, PSC4_1,
PSC2_0, PSC2_1,
PSC1_0, PSC1_1,
PSC0_0, PSC0_1,
PSD15_0, PSD15_1,
PSD14_0, PSD14_1,
PSD13_0, PSD13_1,
PSD12_0, PSD12_1,
PSD11_0, PSD11_1,
PSD10_0, PSD10_1,
PSD9_0, PSD9_1,
PSD8_0, PSD8_1,
PSD7_0, PSD7_1,
PSD6_0, PSD6_1,
PSD5_0, PSD5_1,
PSD4_0, PSD4_1,
PSD3_0, PSD3_1,
PSD2_0, PSD2_1,
PSD1_0, PSD1_1,
PSD0_0, PSD0_1,
PSE15_0, PSE15_1,
PSE14_0, PSE14_1,
PSE13_0, PSE13_1,
PSE12_0, PSE12_1,
PSE11_0, PSE11_1,
PSE10_0, PSE10_1,
PSE9_0, PSE9_1,
PSE8_0, PSE8_1,
PSE7_0, PSE7_1,
PSE6_0, PSE6_1,
PSE5_0, PSE5_1,
PSE4_0, PSE4_1,
PSE3_0, PSE3_1,
PSE2_0, PSE2_1,
PSE1_0, PSE1_1,
PSE0_0, PSE0_1,
PINMUX_FUNCTION_END,
PINMUX_MARK_BEGIN,
/*PTA*/
D23_MARK, KEYOUT2_MARK, IDED15_MARK,
D22_MARK, KEYOUT1_MARK, IDED14_MARK,
D21_MARK, KEYOUT0_MARK, IDED13_MARK,
D20_MARK, KEYIN4_MARK, IDED12_MARK,
D19_MARK, KEYIN3_MARK, IDED11_MARK,
D18_MARK, KEYIN2_MARK, IDED10_MARK,
D17_MARK, KEYIN1_MARK, IDED9_MARK,
D16_MARK, KEYIN0_MARK, IDED8_MARK,
/*PTB*/
D31_MARK, TPUTO1_MARK, IDEA1_MARK,
D30_MARK, TPUTO0_MARK, IDEA0_MARK,
D29_MARK, IODREQ_MARK,
D28_MARK, IDECS0_MARK,
D27_MARK, IDECS1_MARK,
D26_MARK, KEYOUT5_IN5_MARK, IDEIORD_MARK,
D25_MARK, KEYOUT4_IN6_MARK, IDEIOWR_MARK,
D24_MARK, KEYOUT3_MARK, IDEINT_MARK,
/*PTC*/
LCDD7_MARK,
LCDD6_MARK,
LCDD5_MARK,
LCDD4_MARK,
LCDD3_MARK,
LCDD2_MARK,
LCDD1_MARK,
LCDD0_MARK,
/*PTD*/
LCDD15_MARK,
LCDD14_MARK,
LCDD13_MARK,
LCDD12_MARK,
LCDD11_MARK,
LCDD10_MARK,
LCDD9_MARK,
LCDD8_MARK,
/*PTE*/
FSIMCKB_MARK,
FSIMCKA_MARK,
LCDD21_MARK, SCIF2_L_TXD_MARK,
LCDD20_MARK, SCIF4_SCK_MARK,
LCDD19_MARK, SCIF4_RXD_MARK,
LCDD18_MARK, SCIF4_TXD_MARK,
LCDD17_MARK,
LCDD16_MARK,
/*PTF*/
LCDVSYN_MARK,
LCDDISP_MARK, LCDRS_MARK,
LCDHSYN_MARK, LCDCS_MARK,
LCDDON_MARK,
LCDDCK_MARK, LCDWR_MARK,
LCDVEPWC_MARK, SCIF0_TXD_MARK,
LCDD23_MARK, SCIF2_L_SCK_MARK,
LCDD22_MARK, SCIF2_L_RXD_MARK,
/*PTG*/
AUDCK_MARK,
AUDSYNC_MARK,
AUDATA3_MARK,
AUDATA2_MARK,
AUDATA1_MARK,
AUDATA0_MARK,
/*PTH*/
VIO0_VD_MARK,
VIO0_CLK_MARK,
VIO0_D7_MARK,
VIO0_D6_MARK,
VIO0_D5_MARK,
VIO0_D4_MARK,
VIO0_D3_MARK,
VIO0_D2_MARK,
/*PTJ*/
PDSTATUS_MARK,
STATUS2_MARK,
STATUS0_MARK,
A25_MARK, BS_MARK,
A24_MARK,
A23_MARK,
A22_MARK,
/*PTK*/
VIO1_D5_MARK, VIO0_D13_MARK, IDED5_MARK,
VIO1_D4_MARK, VIO0_D12_MARK, IDED4_MARK,
VIO1_D3_MARK, VIO0_D11_MARK, IDED3_MARK,
VIO1_D2_MARK, VIO0_D10_MARK, IDED2_MARK,
VIO1_D1_MARK, VIO0_D9_MARK, IDED1_MARK,
VIO1_D0_MARK, VIO0_D8_MARK, IDED0_MARK,
VIO0_FLD_MARK,
VIO0_HD_MARK,
/*PTL*/
DV_D5_MARK, SCIF3_V_SCK_MARK, RMII_RXD0_MARK,
DV_D4_MARK, SCIF3_V_RXD_MARK, RMII_RXD1_MARK,
DV_D3_MARK, SCIF3_V_TXD_MARK, RMII_REF_CLK_MARK,
DV_D2_MARK, SCIF1_SCK_MARK, RMII_TX_EN_MARK,
DV_D1_MARK, SCIF1_RXD_MARK, RMII_TXD0_MARK,
DV_D0_MARK, SCIF1_TXD_MARK, RMII_TXD1_MARK,
DV_D15_MARK,
DV_D14_MARK, MSIOF0_MCK_MARK,
/*PTM*/
DV_D13_MARK, MSIOF0_TSCK_MARK,
DV_D12_MARK, MSIOF0_RXD_MARK,
DV_D11_MARK, MSIOF0_TXD_MARK,
DV_D10_MARK, MSIOF0_TSYNC_MARK,
DV_D9_MARK, MSIOF0_SS1_MARK, MSIOF0_RSCK_MARK,
DV_D8_MARK, MSIOF0_SS2_MARK, MSIOF0_RSYNC_MARK,
LCDVCPWC_MARK, SCIF0_RXD_MARK,
LCDRD_MARK, SCIF0_SCK_MARK,
/*PTN*/
VIO0_D1_MARK,
VIO0_D0_MARK,
DV_CLKI_MARK,
DV_CLK_MARK, SCIF2_V_SCK_MARK,
DV_VSYNC_MARK, SCIF2_V_RXD_MARK,
DV_HSYNC_MARK, SCIF2_V_TXD_MARK,
DV_D7_MARK, SCIF3_V_CTS_MARK, RMII_RX_ER_MARK,
DV_D6_MARK, SCIF3_V_RTS_MARK, RMII_CRS_DV_MARK,
/*PTQ*/
D7_MARK,
D6_MARK,
D5_MARK,
D4_MARK,
D3_MARK,
D2_MARK,
D1_MARK,
D0_MARK,
/*PTR*/
CS6B_CE1B_MARK,
CS6A_CE2B_MARK,
CS5B_CE1A_MARK,
CS5A_CE2A_MARK,
IOIS16_MARK, LCDLCLK_MARK,
WAIT_MARK,
WE3_ICIOWR_MARK, TPUTO3_MARK, TPUTI3_MARK,
WE2_ICIORD_MARK, TPUTO2_MARK, IDEA2_MARK,
/*PTS*/
VIO_CKO_MARK,
VIO1_FLD_MARK, TPUTI2_MARK, IDEIORDY_MARK,
VIO1_HD_MARK, SCIF5_SCK_MARK,
VIO1_VD_MARK, SCIF5_RXD_MARK,
VIO1_CLK_MARK, SCIF5_TXD_MARK,
VIO1_D7_MARK, VIO0_D15_MARK, IDED7_MARK,
VIO1_D6_MARK, VIO0_D14_MARK, IDED6_MARK,
/*PTT*/
D15_MARK,
D14_MARK,
D13_MARK,
D12_MARK,
D11_MARK,
D10_MARK,
D9_MARK,
D8_MARK,
/*PTU*/
DMAC_DACK0_MARK,
DMAC_DREQ0_MARK,
FSIOASD_MARK,
FSIIABCK_MARK,
FSIIALRCK_MARK,
FSIOABCK_MARK,
FSIOALRCK_MARK,
CLKAUDIOAO_MARK,
/*PTV*/
FSIIBSD_MARK, MSIOF1_SS2_MARK, MSIOF1_RSYNC_MARK,
FSIOBSD_MARK, MSIOF1_SS1_MARK, MSIOF1_RSCK_MARK,
FSIIBBCK_MARK, MSIOF1_RXD_MARK,
FSIIBLRCK_MARK, MSIOF1_TSYNC_MARK,
FSIOBBCK_MARK, MSIOF1_TSCK_MARK,
FSIOBLRCK_MARK, MSIOF1_TXD_MARK,
CLKAUDIOBO_MARK, MSIOF1_MCK_MARK,
FSIIASD_MARK,
/*PTW*/
MMC_D7_MARK, SDHI1CD_MARK, IODACK_MARK,
MMC_D6_MARK, SDHI1WP_MARK, IDERST_MARK,
MMC_D5_MARK, SDHI1D3_MARK, EXBUF_ENB_MARK,
MMC_D4_MARK, SDHI1D2_MARK, DIRECTION_MARK,
MMC_D3_MARK, SDHI1D1_MARK,
MMC_D2_MARK, SDHI1D0_MARK,
MMC_D1_MARK, SDHI1CMD_MARK,
MMC_D0_MARK, SDHI1CLK_MARK,
/*PTX*/
DMAC_DACK1_MARK, IRDA_OUT_MARK,
DMAC_DREQ1_MARK, IRDA_IN_MARK,
TSIF_TS0_SDAT_MARK, LNKSTA_MARK,
TSIF_TS0_SCK_MARK, MDIO_MARK,
TSIF_TS0_SDEN_MARK, MDC_MARK,
TSIF_TS0_SPSYNC_MARK,
MMC_CLK_MARK,
MMC_CMD_MARK,
/*PTY*/
SDHI0CD_MARK,
SDHI0WP_MARK,
SDHI0D3_MARK,
SDHI0D2_MARK,
SDHI0D1_MARK,
SDHI0D0_MARK,
SDHI0CMD_MARK,
SDHI0CLK_MARK,
/*PTZ*/
INTC_IRQ7_MARK, SCIF3_I_CTS_MARK,
INTC_IRQ6_MARK, SCIF3_I_RTS_MARK,
INTC_IRQ5_MARK, SCIF3_I_SCK_MARK,
INTC_IRQ4_MARK, SCIF3_I_RXD_MARK,
INTC_IRQ3_MARK, SCIF3_I_TXD_MARK,
INTC_IRQ2_MARK,
INTC_IRQ1_MARK,
INTC_IRQ0_MARK,
PINMUX_MARK_END,
};
static pinmux_enum_t pinmux_data[] = {
/* PTA GPIO */
PINMUX_DATA(PTA7_DATA, PTA7_IN, PTA7_OUT, PTA7_IN_PU),
PINMUX_DATA(PTA6_DATA, PTA6_IN, PTA6_OUT, PTA6_IN_PU),
PINMUX_DATA(PTA5_DATA, PTA5_IN, PTA5_OUT, PTA5_IN_PU),
PINMUX_DATA(PTA4_DATA, PTA4_IN, PTA4_OUT, PTA4_IN_PU),
PINMUX_DATA(PTA3_DATA, PTA3_IN, PTA3_OUT, PTA3_IN_PU),
PINMUX_DATA(PTA2_DATA, PTA2_IN, PTA2_OUT, PTA2_IN_PU),
PINMUX_DATA(PTA1_DATA, PTA1_IN, PTA1_OUT, PTA1_IN_PU),
PINMUX_DATA(PTA0_DATA, PTA0_IN, PTA0_OUT, PTA0_IN_PU),
/* PTB GPIO */
PINMUX_DATA(PTB7_DATA, PTB7_IN, PTB7_OUT, PTB7_IN_PU),
PINMUX_DATA(PTB6_DATA, PTB6_IN, PTB6_OUT, PTB6_IN_PU),
PINMUX_DATA(PTB5_DATA, PTB5_IN, PTB5_OUT, PTB5_IN_PU),
PINMUX_DATA(PTB4_DATA, PTB4_IN, PTB4_OUT, PTB4_IN_PU),
PINMUX_DATA(PTB3_DATA, PTB3_IN, PTB3_OUT, PTB3_IN_PU),
PINMUX_DATA(PTB2_DATA, PTB2_IN, PTB2_OUT, PTB2_IN_PU),
PINMUX_DATA(PTB1_DATA, PTB1_IN, PTB1_OUT, PTB1_IN_PU),
PINMUX_DATA(PTB0_DATA, PTB0_IN, PTB0_OUT, PTB0_IN_PU),
/* PTC GPIO */
PINMUX_DATA(PTC7_DATA, PTC7_IN, PTC7_OUT, PTC7_IN_PU),
PINMUX_DATA(PTC6_DATA, PTC6_IN, PTC6_OUT, PTC6_IN_PU),
PINMUX_DATA(PTC5_DATA, PTC5_IN, PTC5_OUT, PTC5_IN_PU),
PINMUX_DATA(PTC4_DATA, PTC4_IN, PTC4_OUT, PTC4_IN_PU),
PINMUX_DATA(PTC3_DATA, PTC3_IN, PTC3_OUT, PTC3_IN_PU),
PINMUX_DATA(PTC2_DATA, PTC2_IN, PTC2_OUT, PTC2_IN_PU),
PINMUX_DATA(PTC1_DATA, PTC1_IN, PTC1_OUT, PTC1_IN_PU),
PINMUX_DATA(PTC0_DATA, PTC0_IN, PTC0_OUT, PTC0_IN_PU),
/* PTD GPIO */
PINMUX_DATA(PTD7_DATA, PTD7_IN, PTD7_OUT, PTD7_IN_PU),
PINMUX_DATA(PTD6_DATA, PTD6_IN, PTD6_OUT, PTD6_IN_PU),
PINMUX_DATA(PTD5_DATA, PTD5_IN, PTD5_OUT, PTD5_IN_PU),
PINMUX_DATA(PTD4_DATA, PTD4_IN, PTD4_OUT, PTD4_IN_PU),
PINMUX_DATA(PTD3_DATA, PTD3_IN, PTD3_OUT, PTD3_IN_PU),
PINMUX_DATA(PTD2_DATA, PTD2_IN, PTD2_OUT, PTD2_IN_PU),
PINMUX_DATA(PTD1_DATA, PTD1_IN, PTD1_OUT, PTD1_IN_PU),
PINMUX_DATA(PTD0_DATA, PTD0_IN, PTD0_OUT, PTD0_IN_PU),
/* PTE GPIO */
PINMUX_DATA(PTE7_DATA, PTE7_IN, PTE7_OUT, PTE7_IN_PU),
PINMUX_DATA(PTE6_DATA, PTE6_IN, PTE6_OUT, PTE6_IN_PU),
PINMUX_DATA(PTE5_DATA, PTE5_IN, PTE5_OUT, PTE5_IN_PU),
PINMUX_DATA(PTE4_DATA, PTE4_IN, PTE4_OUT, PTE4_IN_PU),
PINMUX_DATA(PTE3_DATA, PTE3_IN, PTE3_OUT, PTE3_IN_PU),
PINMUX_DATA(PTE2_DATA, PTE2_IN, PTE2_OUT, PTE2_IN_PU),
PINMUX_DATA(PTE1_DATA, PTE1_IN, PTE1_OUT, PTE1_IN_PU),
PINMUX_DATA(PTE0_DATA, PTE0_IN, PTE0_OUT, PTE0_IN_PU),
/* PTF GPIO */
PINMUX_DATA(PTF7_DATA, PTF7_IN, PTF7_OUT, PTF7_IN_PU),
PINMUX_DATA(PTF6_DATA, PTF6_IN, PTF6_OUT, PTF6_IN_PU),
PINMUX_DATA(PTF5_DATA, PTF5_IN, PTF5_OUT, PTF5_IN_PU),
PINMUX_DATA(PTF4_DATA, PTF4_IN, PTF4_OUT, PTF4_IN_PU),
PINMUX_DATA(PTF3_DATA, PTF3_IN, PTF3_OUT, PTF3_IN_PU),
PINMUX_DATA(PTF2_DATA, PTF2_IN, PTF2_OUT, PTF2_IN_PU),
PINMUX_DATA(PTF1_DATA, PTF1_IN, PTF1_OUT, PTF1_IN_PU),
PINMUX_DATA(PTF0_DATA, PTF0_IN, PTF0_OUT, PTF0_IN_PU),
/* PTG GPIO */
PINMUX_DATA(PTG5_DATA, PTG5_OUT),
PINMUX_DATA(PTG4_DATA, PTG4_OUT),
PINMUX_DATA(PTG3_DATA, PTG3_OUT),
PINMUX_DATA(PTG2_DATA, PTG2_OUT),
PINMUX_DATA(PTG1_DATA, PTG1_OUT),
PINMUX_DATA(PTG0_DATA, PTG0_OUT),
/* PTH GPIO */
PINMUX_DATA(PTH7_DATA, PTH7_IN, PTH7_OUT, PTH7_IN_PU),
PINMUX_DATA(PTH6_DATA, PTH6_IN, PTH6_OUT, PTH6_IN_PU),
PINMUX_DATA(PTH5_DATA, PTH5_IN, PTH5_OUT, PTH5_IN_PU),
PINMUX_DATA(PTH4_DATA, PTH4_IN, PTH4_OUT, PTH4_IN_PU),
PINMUX_DATA(PTH3_DATA, PTH3_IN, PTH3_OUT, PTH3_IN_PU),
PINMUX_DATA(PTH2_DATA, PTH2_IN, PTH2_OUT, PTH2_IN_PU),
PINMUX_DATA(PTH1_DATA, PTH1_IN, PTH1_OUT, PTH1_IN_PU),
PINMUX_DATA(PTH0_DATA, PTH0_IN, PTH0_OUT, PTH0_IN_PU),
/* PTJ GPIO */
PINMUX_DATA(PTJ7_DATA, PTJ7_OUT),
PINMUX_DATA(PTJ6_DATA, PTJ6_OUT),
PINMUX_DATA(PTJ5_DATA, PTJ5_OUT),
PINMUX_DATA(PTJ3_DATA, PTJ3_IN, PTJ3_OUT, PTJ3_IN_PU),
PINMUX_DATA(PTJ2_DATA, PTJ2_IN, PTJ2_OUT, PTJ2_IN_PU),
PINMUX_DATA(PTJ1_DATA, PTJ1_IN, PTJ1_OUT, PTJ1_IN_PU),
PINMUX_DATA(PTJ0_DATA, PTJ0_IN, PTJ0_OUT, PTJ0_IN_PU),
/* PTK GPIO */
PINMUX_DATA(PTK7_DATA, PTK7_IN, PTK7_OUT, PTK7_IN_PU),
PINMUX_DATA(PTK6_DATA, PTK6_IN, PTK6_OUT, PTK6_IN_PU),
PINMUX_DATA(PTK5_DATA, PTK5_IN, PTK5_OUT, PTK5_IN_PU),
PINMUX_DATA(PTK4_DATA, PTK4_IN, PTK4_OUT, PTK4_IN_PU),
PINMUX_DATA(PTK3_DATA, PTK3_IN, PTK3_OUT, PTK3_IN_PU),
PINMUX_DATA(PTK2_DATA, PTK2_IN, PTK2_OUT, PTK2_IN_PU),
PINMUX_DATA(PTK1_DATA, PTK1_IN, PTK1_OUT, PTK1_IN_PU),
PINMUX_DATA(PTK0_DATA, PTK0_IN, PTK0_OUT, PTK0_IN_PU),
/* PTL GPIO */
PINMUX_DATA(PTL7_DATA, PTL7_IN, PTL7_OUT, PTL7_IN_PU),
PINMUX_DATA(PTL6_DATA, PTL6_IN, PTL6_OUT, PTL6_IN_PU),
PINMUX_DATA(PTL5_DATA, PTL5_IN, PTL5_OUT, PTL5_IN_PU),
PINMUX_DATA(PTL4_DATA, PTL4_IN, PTL4_OUT, PTL4_IN_PU),
PINMUX_DATA(PTL3_DATA, PTL3_IN, PTL3_OUT, PTL3_IN_PU),
PINMUX_DATA(PTL2_DATA, PTL2_IN, PTL2_OUT, PTL2_IN_PU),
PINMUX_DATA(PTL1_DATA, PTL1_IN, PTL1_OUT, PTL1_IN_PU),
PINMUX_DATA(PTL0_DATA, PTL0_IN, PTL0_OUT, PTL0_IN_PU),
/* PTM GPIO */
PINMUX_DATA(PTM7_DATA, PTM7_IN, PTM7_OUT, PTM7_IN_PU),
PINMUX_DATA(PTM6_DATA, PTM6_IN, PTM6_OUT, PTM6_IN_PU),
PINMUX_DATA(PTM5_DATA, PTM5_IN, PTM5_OUT, PTM5_IN_PU),
PINMUX_DATA(PTM4_DATA, PTM4_IN, PTM4_OUT, PTM4_IN_PU),
PINMUX_DATA(PTM3_DATA, PTM3_IN, PTM3_OUT, PTM3_IN_PU),
PINMUX_DATA(PTM2_DATA, PTM2_IN, PTM2_OUT, PTM2_IN_PU),
PINMUX_DATA(PTM1_DATA, PTM1_IN, PTM1_OUT, PTM1_IN_PU),
PINMUX_DATA(PTM0_DATA, PTM0_IN, PTM0_OUT, PTM0_IN_PU),
/* PTN GPIO */
PINMUX_DATA(PTN7_DATA, PTN7_IN, PTN7_OUT, PTN7_IN_PU),
PINMUX_DATA(PTN6_DATA, PTN6_IN, PTN6_OUT, PTN6_IN_PU),
PINMUX_DATA(PTN5_DATA, PTN5_IN, PTN5_OUT, PTN5_IN_PU),
PINMUX_DATA(PTN4_DATA, PTN4_IN, PTN4_OUT, PTN4_IN_PU),
PINMUX_DATA(PTN3_DATA, PTN3_IN, PTN3_OUT, PTN3_IN_PU),
PINMUX_DATA(PTN2_DATA, PTN2_IN, PTN2_OUT, PTN2_IN_PU),
PINMUX_DATA(PTN1_DATA, PTN1_IN, PTN1_OUT, PTN1_IN_PU),
PINMUX_DATA(PTN0_DATA, PTN0_IN, PTN0_OUT, PTN0_IN_PU),
/* PTQ GPIO */
PINMUX_DATA(PTQ7_DATA, PTQ7_IN, PTQ7_OUT, PTQ7_IN_PU),
PINMUX_DATA(PTQ6_DATA, PTQ6_IN, PTQ6_OUT, PTQ6_IN_PU),
PINMUX_DATA(PTQ5_DATA, PTQ5_IN, PTQ5_OUT, PTQ5_IN_PU),
PINMUX_DATA(PTQ4_DATA, PTQ4_IN, PTQ4_OUT, PTQ4_IN_PU),
PINMUX_DATA(PTQ3_DATA, PTQ3_IN, PTQ3_OUT, PTQ3_IN_PU),
PINMUX_DATA(PTQ2_DATA, PTQ2_IN, PTQ2_OUT, PTQ2_IN_PU),
PINMUX_DATA(PTQ1_DATA, PTQ1_IN, PTQ1_OUT, PTQ1_IN_PU),
PINMUX_DATA(PTQ0_DATA, PTQ0_IN, PTQ0_OUT, PTQ0_IN_PU),
/* PTR GPIO */
PINMUX_DATA(PTR7_DATA, PTR7_IN, PTR7_OUT, PTR7_IN_PU),
PINMUX_DATA(PTR6_DATA, PTR6_IN, PTR6_OUT, PTR6_IN_PU),
PINMUX_DATA(PTR5_DATA, PTR5_IN, PTR5_OUT, PTR5_IN_PU),
PINMUX_DATA(PTR4_DATA, PTR4_IN, PTR4_OUT, PTR4_IN_PU),
PINMUX_DATA(PTR3_DATA, PTR3_IN, PTR3_IN_PU),
PINMUX_DATA(PTR2_DATA, PTR2_IN, PTR2_IN_PU),
PINMUX_DATA(PTR1_DATA, PTR1_IN, PTR1_OUT, PTR1_IN_PU),
PINMUX_DATA(PTR0_DATA, PTR0_IN, PTR0_OUT, PTR0_IN_PU),
/* PTS GPIO */
PINMUX_DATA(PTS6_DATA, PTS6_IN, PTS6_OUT, PTS6_IN_PU),
PINMUX_DATA(PTS5_DATA, PTS5_IN, PTS5_OUT, PTS5_IN_PU),
PINMUX_DATA(PTS4_DATA, PTS4_IN, PTS4_OUT, PTS4_IN_PU),
PINMUX_DATA(PTS3_DATA, PTS3_IN, PTS3_OUT, PTS3_IN_PU),
PINMUX_DATA(PTS2_DATA, PTS2_IN, PTS2_OUT, PTS2_IN_PU),
PINMUX_DATA(PTS1_DATA, PTS1_IN, PTS1_OUT, PTS1_IN_PU),
PINMUX_DATA(PTS0_DATA, PTS0_IN, PTS0_OUT, PTS0_IN_PU),
/* PTT GPIO */
PINMUX_DATA(PTT7_DATA, PTT7_IN, PTT7_OUT, PTT7_IN_PU),
PINMUX_DATA(PTT6_DATA, PTT6_IN, PTT6_OUT, PTT6_IN_PU),
PINMUX_DATA(PTT5_DATA, PTT5_IN, PTT5_OUT, PTT5_IN_PU),
PINMUX_DATA(PTT4_DATA, PTT4_IN, PTT4_OUT, PTT4_IN_PU),
PINMUX_DATA(PTT3_DATA, PTT3_IN, PTT3_OUT, PTT3_IN_PU),
PINMUX_DATA(PTT2_DATA, PTT2_IN, PTT2_OUT, PTT2_IN_PU),
PINMUX_DATA(PTT1_DATA, PTT1_IN, PTT1_OUT, PTT1_IN_PU),
PINMUX_DATA(PTT0_DATA, PTT0_IN, PTT0_OUT, PTT0_IN_PU),
/* PTU GPIO */
PINMUX_DATA(PTU7_DATA, PTU7_IN, PTU7_OUT, PTU7_IN_PU),
PINMUX_DATA(PTU6_DATA, PTU6_IN, PTU6_OUT, PTU6_IN_PU),
PINMUX_DATA(PTU5_DATA, PTU5_IN, PTU5_OUT, PTU5_IN_PU),
PINMUX_DATA(PTU4_DATA, PTU4_IN, PTU4_OUT, PTU4_IN_PU),
PINMUX_DATA(PTU3_DATA, PTU3_IN, PTU3_OUT, PTU3_IN_PU),
PINMUX_DATA(PTU2_DATA, PTU2_IN, PTU2_OUT, PTU2_IN_PU),
PINMUX_DATA(PTU1_DATA, PTU1_IN, PTU1_OUT, PTU1_IN_PU),
PINMUX_DATA(PTU0_DATA, PTU0_IN, PTU0_OUT, PTU0_IN_PU),
/* PTV GPIO */
PINMUX_DATA(PTV7_DATA, PTV7_IN, PTV7_OUT, PTV7_IN_PU),
PINMUX_DATA(PTV6_DATA, PTV6_IN, PTV6_OUT, PTV6_IN_PU),
PINMUX_DATA(PTV5_DATA, PTV5_IN, PTV5_OUT, PTV5_IN_PU),
PINMUX_DATA(PTV4_DATA, PTV4_IN, PTV4_OUT, PTV4_IN_PU),
PINMUX_DATA(PTV3_DATA, PTV3_IN, PTV3_OUT, PTV3_IN_PU),
PINMUX_DATA(PTV2_DATA, PTV2_IN, PTV2_OUT, PTV2_IN_PU),
PINMUX_DATA(PTV1_DATA, PTV1_IN, PTV1_OUT, PTV1_IN_PU),
PINMUX_DATA(PTV0_DATA, PTV0_IN, PTV0_OUT, PTV0_IN_PU),
/* PTW GPIO */
PINMUX_DATA(PTW7_DATA, PTW7_IN, PTW7_OUT, PTW7_IN_PU),
PINMUX_DATA(PTW6_DATA, PTW6_IN, PTW6_OUT, PTW6_IN_PU),
PINMUX_DATA(PTW5_DATA, PTW5_IN, PTW5_OUT, PTW5_IN_PU),
PINMUX_DATA(PTW4_DATA, PTW4_IN, PTW4_OUT, PTW4_IN_PU),
PINMUX_DATA(PTW3_DATA, PTW3_IN, PTW3_OUT, PTW3_IN_PU),
PINMUX_DATA(PTW2_DATA, PTW2_IN, PTW2_OUT, PTW2_IN_PU),
PINMUX_DATA(PTW1_DATA, PTW1_IN, PTW1_OUT, PTW1_IN_PU),
PINMUX_DATA(PTW0_DATA, PTW0_IN, PTW0_OUT, PTW0_IN_PU),
/* PTX GPIO */
PINMUX_DATA(PTX7_DATA, PTX7_IN, PTX7_OUT, PTX7_IN_PU),
PINMUX_DATA(PTX6_DATA, PTX6_IN, PTX6_OUT, PTX6_IN_PU),
PINMUX_DATA(PTX5_DATA, PTX5_IN, PTX5_OUT, PTX5_IN_PU),
PINMUX_DATA(PTX4_DATA, PTX4_IN, PTX4_OUT, PTX4_IN_PU),
PINMUX_DATA(PTX3_DATA, PTX3_IN, PTX3_OUT, PTX3_IN_PU),
PINMUX_DATA(PTX2_DATA, PTX2_IN, PTX2_OUT, PTX2_IN_PU),
PINMUX_DATA(PTX1_DATA, PTX1_IN, PTX1_OUT, PTX1_IN_PU),
PINMUX_DATA(PTX0_DATA, PTX0_IN, PTX0_OUT, PTX0_IN_PU),
/* PTY GPIO */
PINMUX_DATA(PTY7_DATA, PTY7_IN, PTY7_OUT, PTY7_IN_PU),
PINMUX_DATA(PTY6_DATA, PTY6_IN, PTY6_OUT, PTY6_IN_PU),
PINMUX_DATA(PTY5_DATA, PTY5_IN, PTY5_OUT, PTY5_IN_PU),
PINMUX_DATA(PTY4_DATA, PTY4_IN, PTY4_OUT, PTY4_IN_PU),
PINMUX_DATA(PTY3_DATA, PTY3_IN, PTY3_OUT, PTY3_IN_PU),
PINMUX_DATA(PTY2_DATA, PTY2_IN, PTY2_OUT, PTY2_IN_PU),
PINMUX_DATA(PTY1_DATA, PTY1_IN, PTY1_OUT, PTY1_IN_PU),
PINMUX_DATA(PTY0_DATA, PTY0_IN, PTY0_OUT, PTY0_IN_PU),
/* PTZ GPIO */
PINMUX_DATA(PTZ7_DATA, PTZ7_IN, PTZ7_OUT, PTZ7_IN_PU),
PINMUX_DATA(PTZ6_DATA, PTZ6_IN, PTZ6_OUT, PTZ6_IN_PU),
PINMUX_DATA(PTZ5_DATA, PTZ5_IN, PTZ5_OUT, PTZ5_IN_PU),
PINMUX_DATA(PTZ4_DATA, PTZ4_IN, PTZ4_OUT, PTZ4_IN_PU),
PINMUX_DATA(PTZ3_DATA, PTZ3_IN, PTZ3_OUT, PTZ3_IN_PU),
PINMUX_DATA(PTZ2_DATA, PTZ2_IN, PTZ2_OUT, PTZ2_IN_PU),
PINMUX_DATA(PTZ1_DATA, PTZ1_IN, PTZ1_OUT, PTZ1_IN_PU),
PINMUX_DATA(PTZ0_DATA, PTZ0_IN, PTZ0_OUT, PTZ0_IN_PU),
/* PTA FN */
PINMUX_DATA(D23_MARK, PSA15_0, PSA14_0, PTA7_FN),
PINMUX_DATA(D22_MARK, PSA15_0, PSA14_0, PTA6_FN),
PINMUX_DATA(D21_MARK, PSA15_0, PSA14_0, PTA5_FN),
PINMUX_DATA(D20_MARK, PSA15_0, PSA14_0, PTA4_FN),
PINMUX_DATA(D19_MARK, PSA15_0, PSA14_0, PTA3_FN),
PINMUX_DATA(D18_MARK, PSA15_0, PSA14_0, PTA2_FN),
PINMUX_DATA(D17_MARK, PSA15_0, PSA14_0, PTA1_FN),
PINMUX_DATA(D16_MARK, PSA15_0, PSA14_0, PTA0_FN),
PINMUX_DATA(KEYOUT2_MARK, PSA15_0, PSA14_1, PTA7_FN),
PINMUX_DATA(KEYOUT1_MARK, PSA15_0, PSA14_1, PTA6_FN),
PINMUX_DATA(KEYOUT0_MARK, PSA15_0, PSA14_1, PTA5_FN),
PINMUX_DATA(KEYIN4_MARK, PSA15_0, PSA14_1, PTA4_FN),
PINMUX_DATA(KEYIN3_MARK, PSA15_0, PSA14_1, PTA3_FN),
PINMUX_DATA(KEYIN2_MARK, PSA15_0, PSA14_1, PTA2_FN),
PINMUX_DATA(KEYIN1_MARK, PSA15_0, PSA14_1, PTA1_FN),
PINMUX_DATA(KEYIN0_MARK, PSA15_0, PSA14_1, PTA0_FN),
PINMUX_DATA(IDED15_MARK, PSA15_1, PSA14_0, PTA7_FN),
PINMUX_DATA(IDED14_MARK, PSA15_1, PSA14_0, PTA6_FN),
PINMUX_DATA(IDED13_MARK, PSA15_1, PSA14_0, PTA5_FN),
PINMUX_DATA(IDED12_MARK, PSA15_1, PSA14_0, PTA4_FN),
PINMUX_DATA(IDED11_MARK, PSA15_1, PSA14_0, PTA3_FN),
PINMUX_DATA(IDED10_MARK, PSA15_1, PSA14_0, PTA2_FN),
PINMUX_DATA(IDED9_MARK, PSA15_1, PSA14_0, PTA1_FN),
PINMUX_DATA(IDED8_MARK, PSA15_1, PSA14_0, PTA0_FN),
/* PTB FN */
PINMUX_DATA(D31_MARK, PSE15_0, PSE14_0, PTB7_FN),
PINMUX_DATA(D30_MARK, PSE15_0, PSE14_0, PTB6_FN),
PINMUX_DATA(D29_MARK, PSE11_0, PTB5_FN),
PINMUX_DATA(D28_MARK, PSE11_0, PTB4_FN),
PINMUX_DATA(D27_MARK, PSE11_0, PTB3_FN),
PINMUX_DATA(D26_MARK, PSA15_0, PSA14_0, PTB2_FN),
PINMUX_DATA(D25_MARK, PSA15_0, PSA14_0, PTB1_FN),
PINMUX_DATA(D24_MARK, PSA15_0, PSA14_0, PTB0_FN),
PINMUX_DATA(IDEA1_MARK, PSE15_1, PSE14_0, PTB7_FN),
PINMUX_DATA(IDEA0_MARK, PSE15_1, PSE14_0, PTB6_FN),
PINMUX_DATA(IODREQ_MARK, PSE11_1, PTB5_FN),
PINMUX_DATA(IDECS0_MARK, PSE11_1, PTB4_FN),
PINMUX_DATA(IDECS1_MARK, PSE11_1, PTB3_FN),
PINMUX_DATA(IDEIORD_MARK, PSA15_1, PSA14_0, PTB2_FN),
PINMUX_DATA(IDEIOWR_MARK, PSA15_1, PSA14_0, PTB1_FN),
PINMUX_DATA(IDEINT_MARK, PSA15_1, PSA14_0, PTB0_FN),
PINMUX_DATA(TPUTO1_MARK, PSE15_0, PSE14_1, PTB7_FN),
PINMUX_DATA(TPUTO0_MARK, PSE15_0, PSE14_1, PTB6_FN),
PINMUX_DATA(KEYOUT5_IN5_MARK, PSA15_0, PSA14_1, PTB2_FN),
PINMUX_DATA(KEYOUT4_IN6_MARK, PSA15_0, PSA14_1, PTB1_FN),
PINMUX_DATA(KEYOUT3_MARK, PSA15_0, PSA14_1, PTB0_FN),
/* PTC FN */
PINMUX_DATA(LCDD7_MARK, PSD5_0, PTC7_FN),
PINMUX_DATA(LCDD6_MARK, PSD5_0, PTC6_FN),
PINMUX_DATA(LCDD5_MARK, PSD5_0, PTC5_FN),
PINMUX_DATA(LCDD4_MARK, PSD5_0, PTC4_FN),
PINMUX_DATA(LCDD3_MARK, PSD5_0, PTC3_FN),
PINMUX_DATA(LCDD2_MARK, PSD5_0, PTC2_FN),
PINMUX_DATA(LCDD1_MARK, PSD5_0, PTC1_FN),
PINMUX_DATA(LCDD0_MARK, PSD5_0, PTC0_FN),
/* PTD FN */
PINMUX_DATA(LCDD15_MARK, PSD5_0, PTD7_FN),
PINMUX_DATA(LCDD14_MARK, PSD5_0, PTD6_FN),
PINMUX_DATA(LCDD13_MARK, PSD5_0, PTD5_FN),
PINMUX_DATA(LCDD12_MARK, PSD5_0, PTD4_FN),
PINMUX_DATA(LCDD11_MARK, PSD5_0, PTD3_FN),
PINMUX_DATA(LCDD10_MARK, PSD5_0, PTD2_FN),
PINMUX_DATA(LCDD9_MARK, PSD5_0, PTD1_FN),
PINMUX_DATA(LCDD8_MARK, PSD5_0, PTD0_FN),
/* PTE FN */
PINMUX_DATA(FSIMCKB_MARK, PTE7_FN),
PINMUX_DATA(FSIMCKA_MARK, PTE6_FN),
PINMUX_DATA(LCDD21_MARK, PSC5_0, PSC4_0, PTE5_FN),
PINMUX_DATA(LCDD20_MARK, PSD3_0, PSD2_0, PTE4_FN),
PINMUX_DATA(LCDD19_MARK, PSA3_0, PSA2_0, PTE3_FN),
PINMUX_DATA(LCDD18_MARK, PSA3_0, PSA2_0, PTE2_FN),
PINMUX_DATA(LCDD17_MARK, PSD5_0, PTE1_FN),
PINMUX_DATA(LCDD16_MARK, PSD5_0, PTE0_FN),
PINMUX_DATA(SCIF2_L_TXD_MARK, PSC5_0, PSC4_1, PTE5_FN),
PINMUX_DATA(SCIF4_SCK_MARK, PSD3_0, PSD2_1, PTE4_FN),
PINMUX_DATA(SCIF4_RXD_MARK, PSA3_0, PSA2_1, PTE3_FN),
PINMUX_DATA(SCIF4_TXD_MARK, PSA3_0, PSA2_1, PTE2_FN),
/* PTF FN */
PINMUX_DATA(LCDVSYN_MARK, PSD8_0, PTF7_FN),
PINMUX_DATA(LCDDISP_MARK, PSD10_0, PSD9_0, PTF6_FN),
PINMUX_DATA(LCDHSYN_MARK, PSD10_0, PSD9_0, PTF5_FN),
PINMUX_DATA(LCDDON_MARK, PSD8_0, PTF4_FN),
PINMUX_DATA(LCDDCK_MARK, PSD10_0, PSD9_0, PTF3_FN),
PINMUX_DATA(LCDVEPWC_MARK, PSA6_0, PTF2_FN),
PINMUX_DATA(LCDD23_MARK, PSC7_0, PSC6_0, PTF1_FN),
PINMUX_DATA(LCDD22_MARK, PSC5_0, PSC4_0, PTF0_FN),
PINMUX_DATA(LCDRS_MARK, PSD10_0, PSD9_1, PTF6_FN),
PINMUX_DATA(LCDCS_MARK, PSD10_0, PSD9_1, PTF5_FN),
PINMUX_DATA(LCDWR_MARK, PSD10_0, PSD9_1, PTF3_FN),
PINMUX_DATA(SCIF0_TXD_MARK, PSA6_1, PTF2_FN),
PINMUX_DATA(SCIF2_L_SCK_MARK, PSC7_0, PSC6_1, PTF1_FN),
PINMUX_DATA(SCIF2_L_RXD_MARK, PSC5_0, PSC4_1, PTF0_FN),
/* PTG FN */
PINMUX_DATA(AUDCK_MARK, PTG5_FN),
PINMUX_DATA(AUDSYNC_MARK, PTG4_FN),
PINMUX_DATA(AUDATA3_MARK, PTG3_FN),
PINMUX_DATA(AUDATA2_MARK, PTG2_FN),
PINMUX_DATA(AUDATA1_MARK, PTG1_FN),
PINMUX_DATA(AUDATA0_MARK, PTG0_FN),
/* PTH FN */
PINMUX_DATA(VIO0_VD_MARK, PTH7_FN),
PINMUX_DATA(VIO0_CLK_MARK, PTH6_FN),
PINMUX_DATA(VIO0_D7_MARK, PTH5_FN),
PINMUX_DATA(VIO0_D6_MARK, PTH4_FN),
PINMUX_DATA(VIO0_D5_MARK, PTH3_FN),
PINMUX_DATA(VIO0_D4_MARK, PTH2_FN),
PINMUX_DATA(VIO0_D3_MARK, PTH1_FN),
PINMUX_DATA(VIO0_D2_MARK, PTH0_FN),
/* PTJ FN */
PINMUX_DATA(PDSTATUS_MARK, PTJ7_FN),
PINMUX_DATA(STATUS2_MARK, PTJ6_FN),
PINMUX_DATA(STATUS0_MARK, PTJ5_FN),
PINMUX_DATA(A25_MARK, PSA8_0, PTJ3_FN),
PINMUX_DATA(BS_MARK, PSA8_1, PTJ3_FN),
PINMUX_DATA(A24_MARK, PTJ2_FN),
PINMUX_DATA(A23_MARK, PTJ1_FN),
PINMUX_DATA(A22_MARK, PTJ0_FN),
/* PTK FN */
PINMUX_DATA(VIO1_D5_MARK, PSB7_0, PSB6_0, PTK7_FN),
PINMUX_DATA(VIO1_D4_MARK, PSB7_0, PSB6_0, PTK6_FN),
PINMUX_DATA(VIO1_D3_MARK, PSB7_0, PSB6_0, PTK5_FN),
PINMUX_DATA(VIO1_D2_MARK, PSB7_0, PSB6_0, PTK4_FN),
PINMUX_DATA(VIO1_D1_MARK, PSB7_0, PSB6_0, PTK3_FN),
PINMUX_DATA(VIO1_D0_MARK, PSB7_0, PSB6_0, PTK2_FN),
PINMUX_DATA(VIO0_D13_MARK, PSB7_0, PSB6_1, PTK7_FN),
PINMUX_DATA(VIO0_D12_MARK, PSB7_0, PSB6_1, PTK6_FN),
PINMUX_DATA(VIO0_D11_MARK, PSB7_0, PSB6_1, PTK5_FN),
PINMUX_DATA(VIO0_D10_MARK, PSB7_0, PSB6_1, PTK4_FN),
PINMUX_DATA(VIO0_D9_MARK, PSB7_0, PSB6_1, PTK3_FN),
PINMUX_DATA(VIO0_D8_MARK, PSB7_0, PSB6_1, PTK2_FN),
PINMUX_DATA(IDED5_MARK, PSB7_1, PSB6_0, PTK7_FN),
PINMUX_DATA(IDED4_MARK, PSB7_1, PSB6_0, PTK6_FN),
PINMUX_DATA(IDED3_MARK, PSB7_1, PSB6_0, PTK5_FN),
PINMUX_DATA(IDED2_MARK, PSB7_1, PSB6_0, PTK4_FN),
PINMUX_DATA(IDED1_MARK, PSB7_1, PSB6_0, PTK3_FN),
PINMUX_DATA(IDED0_MARK, PSB7_1, PSB6_0, PTK2_FN),
PINMUX_DATA(VIO0_FLD_MARK, PTK1_FN),
PINMUX_DATA(VIO0_HD_MARK, PTK0_FN),
/* PTL FN */
PINMUX_DATA(DV_D5_MARK, PSB9_0, PSB8_0, PTL7_FN),
PINMUX_DATA(DV_D4_MARK, PSB9_0, PSB8_0, PTL6_FN),
PINMUX_DATA(DV_D3_MARK, PSE7_0, PSE6_0, PTL5_FN),
PINMUX_DATA(DV_D2_MARK, PSC9_0, PSC8_0, PTL4_FN),
PINMUX_DATA(DV_D1_MARK, PSC9_0, PSC8_0, PTL3_FN),
PINMUX_DATA(DV_D0_MARK, PSC9_0, PSC8_0, PTL2_FN),
PINMUX_DATA(DV_D15_MARK, PSD4_0, PTL1_FN),
PINMUX_DATA(DV_D14_MARK, PSE5_0, PSE4_0, PTL0_FN),
PINMUX_DATA(SCIF3_V_SCK_MARK, PSB9_0, PSB8_1, PTL7_FN),
PINMUX_DATA(SCIF3_V_RXD_MARK, PSB9_0, PSB8_1, PTL6_FN),
PINMUX_DATA(SCIF3_V_TXD_MARK, PSE7_0, PSE6_1, PTL5_FN),
PINMUX_DATA(SCIF1_SCK_MARK, PSC9_0, PSC8_1, PTL4_FN),
PINMUX_DATA(SCIF1_RXD_MARK, PSC9_0, PSC8_1, PTL3_FN),
PINMUX_DATA(SCIF1_TXD_MARK, PSC9_0, PSC8_1, PTL2_FN),
PINMUX_DATA(RMII_RXD0_MARK, PSB9_1, PSB8_0, PTL7_FN),
PINMUX_DATA(RMII_RXD1_MARK, PSB9_1, PSB8_0, PTL6_FN),
PINMUX_DATA(RMII_REF_CLK_MARK, PSE7_1, PSE6_0, PTL5_FN),
PINMUX_DATA(RMII_TX_EN_MARK, PSC9_1, PSC8_0, PTL4_FN),
PINMUX_DATA(RMII_TXD0_MARK, PSC9_1, PSC8_0, PTL3_FN),
PINMUX_DATA(RMII_TXD1_MARK, PSC9_1, PSC8_0, PTL2_FN),
PINMUX_DATA(MSIOF0_MCK_MARK, PSE5_0, PSE4_1, PTL0_FN),
/* PTM FN */
PINMUX_DATA(DV_D13_MARK, PSC13_0, PSC12_0, PTM7_FN),
PINMUX_DATA(DV_D12_MARK, PSC13_0, PSC12_0, PTM6_FN),
PINMUX_DATA(DV_D11_MARK, PSC13_0, PSC12_0, PTM5_FN),
PINMUX_DATA(DV_D10_MARK, PSC13_0, PSC12_0, PTM4_FN),
PINMUX_DATA(DV_D9_MARK, PSC11_0, PSC10_0, PTM3_FN),
PINMUX_DATA(DV_D8_MARK, PSC11_0, PSC10_0, PTM2_FN),
PINMUX_DATA(MSIOF0_TSCK_MARK, PSC13_0, PSC12_1, PTM7_FN),
PINMUX_DATA(MSIOF0_RXD_MARK, PSC13_0, PSC12_1, PTM6_FN),
PINMUX_DATA(MSIOF0_TXD_MARK, PSC13_0, PSC12_1, PTM5_FN),
PINMUX_DATA(MSIOF0_TSYNC_MARK, PSC13_0, PSC12_1, PTM4_FN),
PINMUX_DATA(MSIOF0_SS1_MARK, PSC11_0, PSC10_1, PTM3_FN),
PINMUX_DATA(MSIOF0_RSCK_MARK, PSC11_1, PSC10_0, PTM3_FN),
PINMUX_DATA(MSIOF0_SS2_MARK, PSC11_0, PSC10_1, PTM2_FN),
PINMUX_DATA(MSIOF0_RSYNC_MARK, PSC11_1, PSC10_0, PTM2_FN),
PINMUX_DATA(LCDVCPWC_MARK, PSA6_0, PTM1_FN),
PINMUX_DATA(LCDRD_MARK, PSA7_0, PTM0_FN),
PINMUX_DATA(SCIF0_RXD_MARK, PSA6_1, PTM1_FN),
PINMUX_DATA(SCIF0_SCK_MARK, PSA7_1, PTM0_FN),
/* PTN FN */
PINMUX_DATA(VIO0_D1_MARK, PTN7_FN),
PINMUX_DATA(VIO0_D0_MARK, PTN6_FN),
PINMUX_DATA(DV_CLKI_MARK, PSD11_0, PTN5_FN),
PINMUX_DATA(DV_CLK_MARK, PSD13_0, PSD12_0, PTN4_FN),
PINMUX_DATA(DV_VSYNC_MARK, PSD15_0, PSD14_0, PTN3_FN),
PINMUX_DATA(DV_HSYNC_MARK, PSB5_0, PSB4_0, PTN2_FN),
PINMUX_DATA(DV_D7_MARK, PSB3_0, PSB2_0, PTN1_FN),
PINMUX_DATA(DV_D6_MARK, PSB1_0, PSB0_0, PTN0_FN),
PINMUX_DATA(SCIF2_V_SCK_MARK, PSD13_0, PSD12_1, PTN4_FN),
PINMUX_DATA(SCIF2_V_RXD_MARK, PSD15_0, PSD14_1, PTN3_FN),
PINMUX_DATA(SCIF2_V_TXD_MARK, PSB5_0, PSB4_1, PTN2_FN),
PINMUX_DATA(SCIF3_V_CTS_MARK, PSB3_0, PSB2_1, PTN1_FN),
PINMUX_DATA(SCIF3_V_RTS_MARK, PSB1_0, PSB0_1, PTN0_FN),
PINMUX_DATA(RMII_RX_ER_MARK, PSB3_1, PSB2_0, PTN1_FN),
PINMUX_DATA(RMII_CRS_DV_MARK, PSB1_1, PSB0_0, PTN0_FN),
/* PTQ FN */
PINMUX_DATA(D7_MARK, PTQ7_FN),
PINMUX_DATA(D6_MARK, PTQ6_FN),
PINMUX_DATA(D5_MARK, PTQ5_FN),
PINMUX_DATA(D4_MARK, PTQ4_FN),
PINMUX_DATA(D3_MARK, PTQ3_FN),
PINMUX_DATA(D2_MARK, PTQ2_FN),
PINMUX_DATA(D1_MARK, PTQ1_FN),
PINMUX_DATA(D0_MARK, PTQ0_FN),
/* PTR FN */
PINMUX_DATA(CS6B_CE1B_MARK, PTR7_FN),
PINMUX_DATA(CS6A_CE2B_MARK, PTR6_FN),
PINMUX_DATA(CS5B_CE1A_MARK, PTR5_FN),
PINMUX_DATA(CS5A_CE2A_MARK, PTR4_FN),
PINMUX_DATA(IOIS16_MARK, PSA5_0, PTR3_FN),
PINMUX_DATA(WAIT_MARK, PTR2_FN),
PINMUX_DATA(WE3_ICIOWR_MARK, PSA1_0, PSA0_0, PTR1_FN),
PINMUX_DATA(WE2_ICIORD_MARK, PSD1_0, PSD0_0, PTR0_FN),
PINMUX_DATA(LCDLCLK_MARK, PSA5_1, PTR3_FN),
PINMUX_DATA(IDEA2_MARK, PSD1_1, PSD0_0, PTR0_FN),
PINMUX_DATA(TPUTO3_MARK, PSA1_0, PSA0_1, PTR1_FN),
PINMUX_DATA(TPUTI3_MARK, PSA1_1, PSA0_0, PTR1_FN),
PINMUX_DATA(TPUTO2_MARK, PSD1_0, PSD0_1, PTR0_FN),
/* PTS FN */
PINMUX_DATA(VIO_CKO_MARK, PTS6_FN),
PINMUX_DATA(TPUTI2_MARK, PSE9_0, PSE8_1, PTS5_FN),
PINMUX_DATA(IDEIORDY_MARK, PSE9_1, PSE8_0, PTS5_FN),
PINMUX_DATA(VIO1_FLD_MARK, PSE9_0, PSE8_0, PTS5_FN),
PINMUX_DATA(VIO1_HD_MARK, PSA10_0, PTS4_FN),
PINMUX_DATA(VIO1_VD_MARK, PSA9_0, PTS3_FN),
PINMUX_DATA(VIO1_CLK_MARK, PSA9_0, PTS2_FN),
PINMUX_DATA(VIO1_D7_MARK, PSB7_0, PSB6_0, PTS1_FN),
PINMUX_DATA(VIO1_D6_MARK, PSB7_0, PSB6_0, PTS0_FN),
PINMUX_DATA(SCIF5_SCK_MARK, PSA10_1, PTS4_FN),
PINMUX_DATA(SCIF5_RXD_MARK, PSA9_1, PTS3_FN),
PINMUX_DATA(SCIF5_TXD_MARK, PSA9_1, PTS2_FN),
PINMUX_DATA(VIO0_D15_MARK, PSB7_0, PSB6_1, PTS1_FN),
PINMUX_DATA(VIO0_D14_MARK, PSB7_0, PSB6_1, PTS0_FN),
PINMUX_DATA(IDED7_MARK, PSB7_1, PSB6_0, PTS1_FN),
PINMUX_DATA(IDED6_MARK, PSB7_1, PSB6_0, PTS0_FN),
/* PTT FN */
PINMUX_DATA(D15_MARK, PTT7_FN),
PINMUX_DATA(D14_MARK, PTT6_FN),
PINMUX_DATA(D13_MARK, PTT5_FN),
PINMUX_DATA(D12_MARK, PTT4_FN),
PINMUX_DATA(D11_MARK, PTT3_FN),
PINMUX_DATA(D10_MARK, PTT2_FN),
PINMUX_DATA(D9_MARK, PTT1_FN),
PINMUX_DATA(D8_MARK, PTT0_FN),
/* PTU FN */
PINMUX_DATA(DMAC_DACK0_MARK, PTU7_FN),
PINMUX_DATA(DMAC_DREQ0_MARK, PTU6_FN),
PINMUX_DATA(FSIOASD_MARK, PSE1_0, PTU5_FN),
PINMUX_DATA(FSIIABCK_MARK, PSE1_0, PTU4_FN),
PINMUX_DATA(FSIIALRCK_MARK, PSE1_0, PTU3_FN),
PINMUX_DATA(FSIOABCK_MARK, PSE1_0, PTU2_FN),
PINMUX_DATA(FSIOALRCK_MARK, PSE1_0, PTU1_FN),
PINMUX_DATA(CLKAUDIOAO_MARK, PSE0_0, PTU0_FN),
/* PTV FN */
PINMUX_DATA(FSIIBSD_MARK, PSD7_0, PSD6_0, PTV7_FN),
PINMUX_DATA(FSIOBSD_MARK, PSD7_0, PSD6_0, PTV6_FN),
PINMUX_DATA(FSIIBBCK_MARK, PSC15_0, PSC14_0, PTV5_FN),
PINMUX_DATA(FSIIBLRCK_MARK, PSC15_0, PSC14_0, PTV4_FN),
PINMUX_DATA(FSIOBBCK_MARK, PSC15_0, PSC14_0, PTV3_FN),
PINMUX_DATA(FSIOBLRCK_MARK, PSC15_0, PSC14_0, PTV2_FN),
PINMUX_DATA(CLKAUDIOBO_MARK, PSE3_0, PSE2_0, PTV1_FN),
PINMUX_DATA(FSIIASD_MARK, PSE10_0, PTV0_FN),
PINMUX_DATA(MSIOF1_SS2_MARK, PSD7_0, PSD6_1, PTV7_FN),
PINMUX_DATA(MSIOF1_RSYNC_MARK, PSD7_1, PSD6_0, PTV7_FN),
PINMUX_DATA(MSIOF1_SS1_MARK, PSD7_0, PSD6_1, PTV6_FN),
PINMUX_DATA(MSIOF1_RSCK_MARK, PSD7_1, PSD6_0, PTV6_FN),
PINMUX_DATA(MSIOF1_RXD_MARK, PSC15_0, PSC14_1, PTV5_FN),
PINMUX_DATA(MSIOF1_TSYNC_MARK, PSC15_0, PSC14_1, PTV4_FN),
PINMUX_DATA(MSIOF1_TSCK_MARK, PSC15_0, PSC14_1, PTV3_FN),
PINMUX_DATA(MSIOF1_TXD_MARK, PSC15_0, PSC14_1, PTV2_FN),
PINMUX_DATA(MSIOF1_MCK_MARK, PSE3_0, PSE2_1, PTV1_FN),
/* PTW FN */
PINMUX_DATA(MMC_D7_MARK, PSE13_0, PSE12_0, PTW7_FN),
PINMUX_DATA(MMC_D6_MARK, PSE13_0, PSE12_0, PTW6_FN),
PINMUX_DATA(MMC_D5_MARK, PSE13_0, PSE12_0, PTW5_FN),
PINMUX_DATA(MMC_D4_MARK, PSE13_0, PSE12_0, PTW4_FN),
PINMUX_DATA(MMC_D3_MARK, PSA13_0, PTW3_FN),
PINMUX_DATA(MMC_D2_MARK, PSA13_0, PTW2_FN),
PINMUX_DATA(MMC_D1_MARK, PSA13_0, PTW1_FN),
PINMUX_DATA(MMC_D0_MARK, PSA13_0, PTW0_FN),
PINMUX_DATA(SDHI1CD_MARK, PSE13_0, PSE12_1, PTW7_FN),
PINMUX_DATA(SDHI1WP_MARK, PSE13_0, PSE12_1, PTW6_FN),
PINMUX_DATA(SDHI1D3_MARK, PSE13_0, PSE12_1, PTW5_FN),
PINMUX_DATA(SDHI1D2_MARK, PSE13_0, PSE12_1, PTW4_FN),
PINMUX_DATA(SDHI1D1_MARK, PSA13_1, PTW3_FN),
PINMUX_DATA(SDHI1D0_MARK, PSA13_1, PTW2_FN),
PINMUX_DATA(SDHI1CMD_MARK, PSA13_1, PTW1_FN),
PINMUX_DATA(SDHI1CLK_MARK, PSA13_1, PTW0_FN),
PINMUX_DATA(IODACK_MARK, PSE13_1, PSE12_0, PTW7_FN),
PINMUX_DATA(IDERST_MARK, PSE13_1, PSE12_0, PTW6_FN),
PINMUX_DATA(EXBUF_ENB_MARK, PSE13_1, PSE12_0, PTW5_FN),
PINMUX_DATA(DIRECTION_MARK, PSE13_1, PSE12_0, PTW4_FN),
/* PTX FN */
PINMUX_DATA(DMAC_DACK1_MARK, PSA12_0, PTX7_FN),
PINMUX_DATA(DMAC_DREQ1_MARK, PSA12_0, PTX6_FN),
PINMUX_DATA(IRDA_OUT_MARK, PSA12_1, PTX7_FN),
PINMUX_DATA(IRDA_IN_MARK, PSA12_1, PTX6_FN),
PINMUX_DATA(TSIF_TS0_SDAT_MARK, PSC0_0, PTX5_FN),
PINMUX_DATA(TSIF_TS0_SCK_MARK, PSC1_0, PTX4_FN),
PINMUX_DATA(TSIF_TS0_SDEN_MARK, PSC2_0, PTX3_FN),
PINMUX_DATA(TSIF_TS0_SPSYNC_MARK, PTX2_FN),
PINMUX_DATA(LNKSTA_MARK, PSC0_1, PTX5_FN),
PINMUX_DATA(MDIO_MARK, PSC1_1, PTX4_FN),
PINMUX_DATA(MDC_MARK, PSC2_1, PTX3_FN),
PINMUX_DATA(MMC_CLK_MARK, PTX1_FN),
PINMUX_DATA(MMC_CMD_MARK, PTX0_FN),
/* PTY FN */
PINMUX_DATA(SDHI0CD_MARK, PTY7_FN),
PINMUX_DATA(SDHI0WP_MARK, PTY6_FN),
PINMUX_DATA(SDHI0D3_MARK, PTY5_FN),
PINMUX_DATA(SDHI0D2_MARK, PTY4_FN),
PINMUX_DATA(SDHI0D1_MARK, PTY3_FN),
PINMUX_DATA(SDHI0D0_MARK, PTY2_FN),
PINMUX_DATA(SDHI0CMD_MARK, PTY1_FN),
PINMUX_DATA(SDHI0CLK_MARK, PTY0_FN),
/* PTZ FN */
PINMUX_DATA(INTC_IRQ7_MARK, PSB10_0, PTZ7_FN),
PINMUX_DATA(INTC_IRQ6_MARK, PSB11_0, PTZ6_FN),
PINMUX_DATA(INTC_IRQ5_MARK, PSB12_0, PTZ5_FN),
PINMUX_DATA(INTC_IRQ4_MARK, PSB13_0, PTZ4_FN),
PINMUX_DATA(INTC_IRQ3_MARK, PSB14_0, PTZ3_FN),
PINMUX_DATA(INTC_IRQ2_MARK, PTZ2_FN),
PINMUX_DATA(INTC_IRQ1_MARK, PTZ1_FN),
PINMUX_DATA(INTC_IRQ0_MARK, PTZ0_FN),
PINMUX_DATA(SCIF3_I_CTS_MARK, PSB10_1, PTZ7_FN),
PINMUX_DATA(SCIF3_I_RTS_MARK, PSB11_1, PTZ6_FN),
PINMUX_DATA(SCIF3_I_SCK_MARK, PSB12_1, PTZ5_FN),
PINMUX_DATA(SCIF3_I_RXD_MARK, PSB13_1, PTZ4_FN),
PINMUX_DATA(SCIF3_I_TXD_MARK, PSB14_1, PTZ3_FN),
};
static struct pinmux_gpio pinmux_gpios[] = {
/* PTA */
PINMUX_GPIO(GPIO_PTA7, PTA7_DATA),
PINMUX_GPIO(GPIO_PTA6, PTA6_DATA),
PINMUX_GPIO(GPIO_PTA5, PTA5_DATA),
PINMUX_GPIO(GPIO_PTA4, PTA4_DATA),
PINMUX_GPIO(GPIO_PTA3, PTA3_DATA),
PINMUX_GPIO(GPIO_PTA2, PTA2_DATA),
PINMUX_GPIO(GPIO_PTA1, PTA1_DATA),
PINMUX_GPIO(GPIO_PTA0, PTA0_DATA),
/* PTB */
PINMUX_GPIO(GPIO_PTB7, PTB7_DATA),
PINMUX_GPIO(GPIO_PTB6, PTB6_DATA),
PINMUX_GPIO(GPIO_PTB5, PTB5_DATA),
PINMUX_GPIO(GPIO_PTB4, PTB4_DATA),
PINMUX_GPIO(GPIO_PTB3, PTB3_DATA),
PINMUX_GPIO(GPIO_PTB2, PTB2_DATA),
PINMUX_GPIO(GPIO_PTB1, PTB1_DATA),
PINMUX_GPIO(GPIO_PTB0, PTB0_DATA),
/* PTC */
PINMUX_GPIO(GPIO_PTC7, PTC7_DATA),
PINMUX_GPIO(GPIO_PTC6, PTC6_DATA),
PINMUX_GPIO(GPIO_PTC5, PTC5_DATA),
PINMUX_GPIO(GPIO_PTC4, PTC4_DATA),
PINMUX_GPIO(GPIO_PTC3, PTC3_DATA),
PINMUX_GPIO(GPIO_PTC2, PTC2_DATA),
PINMUX_GPIO(GPIO_PTC1, PTC1_DATA),
PINMUX_GPIO(GPIO_PTC0, PTC0_DATA),
/* PTD */
PINMUX_GPIO(GPIO_PTD7, PTD7_DATA),
PINMUX_GPIO(GPIO_PTD6, PTD6_DATA),
PINMUX_GPIO(GPIO_PTD5, PTD5_DATA),
PINMUX_GPIO(GPIO_PTD4, PTD4_DATA),
PINMUX_GPIO(GPIO_PTD3, PTD3_DATA),
PINMUX_GPIO(GPIO_PTD2, PTD2_DATA),
PINMUX_GPIO(GPIO_PTD1, PTD1_DATA),
PINMUX_GPIO(GPIO_PTD0, PTD0_DATA),
/* PTE */
PINMUX_GPIO(GPIO_PTE7, PTE7_DATA),
PINMUX_GPIO(GPIO_PTE6, PTE6_DATA),
PINMUX_GPIO(GPIO_PTE5, PTE5_DATA),
PINMUX_GPIO(GPIO_PTE4, PTE4_DATA),
PINMUX_GPIO(GPIO_PTE3, PTE3_DATA),
PINMUX_GPIO(GPIO_PTE2, PTE2_DATA),
PINMUX_GPIO(GPIO_PTE1, PTE1_DATA),
PINMUX_GPIO(GPIO_PTE0, PTE0_DATA),
/* PTF */
PINMUX_GPIO(GPIO_PTF7, PTF7_DATA),
PINMUX_GPIO(GPIO_PTF6, PTF6_DATA),
PINMUX_GPIO(GPIO_PTF5, PTF5_DATA),
PINMUX_GPIO(GPIO_PTF4, PTF4_DATA),
PINMUX_GPIO(GPIO_PTF3, PTF3_DATA),
PINMUX_GPIO(GPIO_PTF2, PTF2_DATA),
PINMUX_GPIO(GPIO_PTF1, PTF1_DATA),
PINMUX_GPIO(GPIO_PTF0, PTF0_DATA),
/* PTG */
PINMUX_GPIO(GPIO_PTG5, PTG5_DATA),
PINMUX_GPIO(GPIO_PTG4, PTG4_DATA),
PINMUX_GPIO(GPIO_PTG3, PTG3_DATA),
PINMUX_GPIO(GPIO_PTG2, PTG2_DATA),
PINMUX_GPIO(GPIO_PTG1, PTG1_DATA),
PINMUX_GPIO(GPIO_PTG0, PTG0_DATA),
/* PTH */
PINMUX_GPIO(GPIO_PTH7, PTH7_DATA),
PINMUX_GPIO(GPIO_PTH6, PTH6_DATA),
PINMUX_GPIO(GPIO_PTH5, PTH5_DATA),
PINMUX_GPIO(GPIO_PTH4, PTH4_DATA),
PINMUX_GPIO(GPIO_PTH3, PTH3_DATA),
PINMUX_GPIO(GPIO_PTH2, PTH2_DATA),
PINMUX_GPIO(GPIO_PTH1, PTH1_DATA),
PINMUX_GPIO(GPIO_PTH0, PTH0_DATA),
/* PTJ */
PINMUX_GPIO(GPIO_PTJ7, PTJ7_DATA),
PINMUX_GPIO(GPIO_PTJ6, PTJ6_DATA),
PINMUX_GPIO(GPIO_PTJ5, PTJ5_DATA),
PINMUX_GPIO(GPIO_PTJ3, PTJ3_DATA),
PINMUX_GPIO(GPIO_PTJ2, PTJ2_DATA),
PINMUX_GPIO(GPIO_PTJ1, PTJ1_DATA),
PINMUX_GPIO(GPIO_PTJ0, PTJ0_DATA),
/* PTK */
PINMUX_GPIO(GPIO_PTK7, PTK7_DATA),
PINMUX_GPIO(GPIO_PTK6, PTK6_DATA),
PINMUX_GPIO(GPIO_PTK5, PTK5_DATA),
PINMUX_GPIO(GPIO_PTK4, PTK4_DATA),
PINMUX_GPIO(GPIO_PTK3, PTK3_DATA),
PINMUX_GPIO(GPIO_PTK2, PTK2_DATA),
PINMUX_GPIO(GPIO_PTK1, PTK1_DATA),
PINMUX_GPIO(GPIO_PTK0, PTK0_DATA),
/* PTL */
PINMUX_GPIO(GPIO_PTL7, PTL7_DATA),
PINMUX_GPIO(GPIO_PTL6, PTL6_DATA),
PINMUX_GPIO(GPIO_PTL5, PTL5_DATA),
PINMUX_GPIO(GPIO_PTL4, PTL4_DATA),
PINMUX_GPIO(GPIO_PTL3, PTL3_DATA),
PINMUX_GPIO(GPIO_PTL2, PTL2_DATA),
PINMUX_GPIO(GPIO_PTL1, PTL1_DATA),
PINMUX_GPIO(GPIO_PTL0, PTL0_DATA),
/* PTM */
PINMUX_GPIO(GPIO_PTM7, PTM7_DATA),
PINMUX_GPIO(GPIO_PTM6, PTM6_DATA),
PINMUX_GPIO(GPIO_PTM5, PTM5_DATA),
PINMUX_GPIO(GPIO_PTM4, PTM4_DATA),
PINMUX_GPIO(GPIO_PTM3, PTM3_DATA),
PINMUX_GPIO(GPIO_PTM2, PTM2_DATA),
PINMUX_GPIO(GPIO_PTM1, PTM1_DATA),
PINMUX_GPIO(GPIO_PTM0, PTM0_DATA),
/* PTN */
PINMUX_GPIO(GPIO_PTN7, PTN7_DATA),
PINMUX_GPIO(GPIO_PTN6, PTN6_DATA),
PINMUX_GPIO(GPIO_PTN5, PTN5_DATA),
PINMUX_GPIO(GPIO_PTN4, PTN4_DATA),
PINMUX_GPIO(GPIO_PTN3, PTN3_DATA),
PINMUX_GPIO(GPIO_PTN2, PTN2_DATA),
PINMUX_GPIO(GPIO_PTN1, PTN1_DATA),
PINMUX_GPIO(GPIO_PTN0, PTN0_DATA),
/* PTQ */
PINMUX_GPIO(GPIO_PTQ7, PTQ7_DATA),
PINMUX_GPIO(GPIO_PTQ6, PTQ6_DATA),
PINMUX_GPIO(GPIO_PTQ5, PTQ5_DATA),
PINMUX_GPIO(GPIO_PTQ4, PTQ4_DATA),
PINMUX_GPIO(GPIO_PTQ3, PTQ3_DATA),
PINMUX_GPIO(GPIO_PTQ2, PTQ2_DATA),
PINMUX_GPIO(GPIO_PTQ1, PTQ1_DATA),
PINMUX_GPIO(GPIO_PTQ0, PTQ0_DATA),
/* PTR */
PINMUX_GPIO(GPIO_PTR7, PTR7_DATA),
PINMUX_GPIO(GPIO_PTR6, PTR6_DATA),
PINMUX_GPIO(GPIO_PTR5, PTR5_DATA),
PINMUX_GPIO(GPIO_PTR4, PTR4_DATA),
PINMUX_GPIO(GPIO_PTR3, PTR3_DATA),
PINMUX_GPIO(GPIO_PTR2, PTR2_DATA),
PINMUX_GPIO(GPIO_PTR1, PTR1_DATA),
PINMUX_GPIO(GPIO_PTR0, PTR0_DATA),
/* PTS */
PINMUX_GPIO(GPIO_PTS6, PTS6_DATA),
PINMUX_GPIO(GPIO_PTS5, PTS5_DATA),
PINMUX_GPIO(GPIO_PTS4, PTS4_DATA),
PINMUX_GPIO(GPIO_PTS3, PTS3_DATA),
PINMUX_GPIO(GPIO_PTS2, PTS2_DATA),
PINMUX_GPIO(GPIO_PTS1, PTS1_DATA),
PINMUX_GPIO(GPIO_PTS0, PTS0_DATA),
/* PTT */
PINMUX_GPIO(GPIO_PTT7, PTT7_DATA),
PINMUX_GPIO(GPIO_PTT6, PTT6_DATA),
PINMUX_GPIO(GPIO_PTT5, PTT5_DATA),
PINMUX_GPIO(GPIO_PTT4, PTT4_DATA),
PINMUX_GPIO(GPIO_PTT3, PTT3_DATA),
PINMUX_GPIO(GPIO_PTT2, PTT2_DATA),
PINMUX_GPIO(GPIO_PTT1, PTT1_DATA),
PINMUX_GPIO(GPIO_PTT0, PTT0_DATA),
/* PTU */
PINMUX_GPIO(GPIO_PTU7, PTU7_DATA),
PINMUX_GPIO(GPIO_PTU6, PTU6_DATA),
PINMUX_GPIO(GPIO_PTU5, PTU5_DATA),
PINMUX_GPIO(GPIO_PTU4, PTU4_DATA),
PINMUX_GPIO(GPIO_PTU3, PTU3_DATA),
PINMUX_GPIO(GPIO_PTU2, PTU2_DATA),
PINMUX_GPIO(GPIO_PTU1, PTU1_DATA),
PINMUX_GPIO(GPIO_PTU0, PTU0_DATA),
/* PTV */
PINMUX_GPIO(GPIO_PTV7, PTV7_DATA),
PINMUX_GPIO(GPIO_PTV6, PTV6_DATA),
PINMUX_GPIO(GPIO_PTV5, PTV5_DATA),
PINMUX_GPIO(GPIO_PTV4, PTV4_DATA),
PINMUX_GPIO(GPIO_PTV3, PTV3_DATA),
PINMUX_GPIO(GPIO_PTV2, PTV2_DATA),
PINMUX_GPIO(GPIO_PTV1, PTV1_DATA),
PINMUX_GPIO(GPIO_PTV0, PTV0_DATA),
/* PTW */
PINMUX_GPIO(GPIO_PTW7, PTW7_DATA),
PINMUX_GPIO(GPIO_PTW6, PTW6_DATA),
PINMUX_GPIO(GPIO_PTW5, PTW5_DATA),
PINMUX_GPIO(GPIO_PTW4, PTW4_DATA),
PINMUX_GPIO(GPIO_PTW3, PTW3_DATA),
PINMUX_GPIO(GPIO_PTW2, PTW2_DATA),
PINMUX_GPIO(GPIO_PTW1, PTW1_DATA),
PINMUX_GPIO(GPIO_PTW0, PTW0_DATA),
/* PTX */
PINMUX_GPIO(GPIO_PTX7, PTX7_DATA),
PINMUX_GPIO(GPIO_PTX6, PTX6_DATA),
PINMUX_GPIO(GPIO_PTX5, PTX5_DATA),
PINMUX_GPIO(GPIO_PTX4, PTX4_DATA),
PINMUX_GPIO(GPIO_PTX3, PTX3_DATA),
PINMUX_GPIO(GPIO_PTX2, PTX2_DATA),
PINMUX_GPIO(GPIO_PTX1, PTX1_DATA),
PINMUX_GPIO(GPIO_PTX0, PTX0_DATA),
/* PTY */
PINMUX_GPIO(GPIO_PTY7, PTY7_DATA),
PINMUX_GPIO(GPIO_PTY6, PTY6_DATA),
PINMUX_GPIO(GPIO_PTY5, PTY5_DATA),
PINMUX_GPIO(GPIO_PTY4, PTY4_DATA),
PINMUX_GPIO(GPIO_PTY3, PTY3_DATA),
PINMUX_GPIO(GPIO_PTY2, PTY2_DATA),
PINMUX_GPIO(GPIO_PTY1, PTY1_DATA),
PINMUX_GPIO(GPIO_PTY0, PTY0_DATA),
/* PTZ */
PINMUX_GPIO(GPIO_PTZ7, PTZ7_DATA),
PINMUX_GPIO(GPIO_PTZ6, PTZ6_DATA),
PINMUX_GPIO(GPIO_PTZ5, PTZ5_DATA),
PINMUX_GPIO(GPIO_PTZ4, PTZ4_DATA),
PINMUX_GPIO(GPIO_PTZ3, PTZ3_DATA),
PINMUX_GPIO(GPIO_PTZ2, PTZ2_DATA),
PINMUX_GPIO(GPIO_PTZ1, PTZ1_DATA),
PINMUX_GPIO(GPIO_PTZ0, PTZ0_DATA),
/* BSC */
PINMUX_GPIO(GPIO_FN_D31, D31_MARK),
PINMUX_GPIO(GPIO_FN_D30, D30_MARK),
PINMUX_GPIO(GPIO_FN_D29, D29_MARK),
PINMUX_GPIO(GPIO_FN_D28, D28_MARK),
PINMUX_GPIO(GPIO_FN_D27, D27_MARK),
PINMUX_GPIO(GPIO_FN_D26, D26_MARK),
PINMUX_GPIO(GPIO_FN_D25, D25_MARK),
PINMUX_GPIO(GPIO_FN_D24, D24_MARK),
PINMUX_GPIO(GPIO_FN_D23, D23_MARK),
PINMUX_GPIO(GPIO_FN_D22, D22_MARK),
PINMUX_GPIO(GPIO_FN_D21, D21_MARK),
PINMUX_GPIO(GPIO_FN_D20, D20_MARK),
PINMUX_GPIO(GPIO_FN_D19, D19_MARK),
PINMUX_GPIO(GPIO_FN_D18, D18_MARK),
PINMUX_GPIO(GPIO_FN_D17, D17_MARK),
PINMUX_GPIO(GPIO_FN_D16, D16_MARK),
PINMUX_GPIO(GPIO_FN_D15, D15_MARK),
PINMUX_GPIO(GPIO_FN_D14, D14_MARK),
PINMUX_GPIO(GPIO_FN_D13, D13_MARK),
PINMUX_GPIO(GPIO_FN_D12, D12_MARK),
PINMUX_GPIO(GPIO_FN_D11, D11_MARK),
PINMUX_GPIO(GPIO_FN_D10, D10_MARK),
PINMUX_GPIO(GPIO_FN_D9, D9_MARK),
PINMUX_GPIO(GPIO_FN_D8, D8_MARK),
PINMUX_GPIO(GPIO_FN_D7, D7_MARK),
PINMUX_GPIO(GPIO_FN_D6, D6_MARK),
PINMUX_GPIO(GPIO_FN_D5, D5_MARK),
PINMUX_GPIO(GPIO_FN_D4, D4_MARK),
PINMUX_GPIO(GPIO_FN_D3, D3_MARK),
PINMUX_GPIO(GPIO_FN_D2, D2_MARK),
PINMUX_GPIO(GPIO_FN_D1, D1_MARK),
PINMUX_GPIO(GPIO_FN_D0, D0_MARK),
PINMUX_GPIO(GPIO_FN_A25, A25_MARK),
PINMUX_GPIO(GPIO_FN_A24, A24_MARK),
PINMUX_GPIO(GPIO_FN_A23, A23_MARK),
PINMUX_GPIO(GPIO_FN_A22, A22_MARK),
PINMUX_GPIO(GPIO_FN_CS6B_CE1B, CS6B_CE1B_MARK),
PINMUX_GPIO(GPIO_FN_CS6A_CE2B, CS6A_CE2B_MARK),
PINMUX_GPIO(GPIO_FN_CS5B_CE1A, CS5B_CE1A_MARK),
PINMUX_GPIO(GPIO_FN_CS5A_CE2A, CS5A_CE2A_MARK),
PINMUX_GPIO(GPIO_FN_WE3_ICIOWR, WE3_ICIOWR_MARK),
PINMUX_GPIO(GPIO_FN_WE2_ICIORD, WE2_ICIORD_MARK),
PINMUX_GPIO(GPIO_FN_IOIS16, IOIS16_MARK),
PINMUX_GPIO(GPIO_FN_WAIT, WAIT_MARK),
PINMUX_GPIO(GPIO_FN_BS, BS_MARK),
/* KEYSC */
PINMUX_GPIO(GPIO_FN_KEYOUT5_IN5, KEYOUT5_IN5_MARK),
PINMUX_GPIO(GPIO_FN_KEYOUT4_IN6, KEYOUT4_IN6_MARK),
PINMUX_GPIO(GPIO_FN_KEYIN4, KEYIN4_MARK),
PINMUX_GPIO(GPIO_FN_KEYIN3, KEYIN3_MARK),
PINMUX_GPIO(GPIO_FN_KEYIN2, KEYIN2_MARK),
PINMUX_GPIO(GPIO_FN_KEYIN1, KEYIN1_MARK),
PINMUX_GPIO(GPIO_FN_KEYIN0, KEYIN0_MARK),
PINMUX_GPIO(GPIO_FN_KEYOUT3, KEYOUT3_MARK),
PINMUX_GPIO(GPIO_FN_KEYOUT2, KEYOUT2_MARK),
PINMUX_GPIO(GPIO_FN_KEYOUT1, KEYOUT1_MARK),
PINMUX_GPIO(GPIO_FN_KEYOUT0, KEYOUT0_MARK),
/* ATAPI */
PINMUX_GPIO(GPIO_FN_IDED15, IDED15_MARK),
PINMUX_GPIO(GPIO_FN_IDED14, IDED14_MARK),
PINMUX_GPIO(GPIO_FN_IDED13, IDED13_MARK),
PINMUX_GPIO(GPIO_FN_IDED12, IDED12_MARK),
PINMUX_GPIO(GPIO_FN_IDED11, IDED11_MARK),
PINMUX_GPIO(GPIO_FN_IDED10, IDED10_MARK),
PINMUX_GPIO(GPIO_FN_IDED9, IDED9_MARK),
PINMUX_GPIO(GPIO_FN_IDED8, IDED8_MARK),
PINMUX_GPIO(GPIO_FN_IDED7, IDED7_MARK),
PINMUX_GPIO(GPIO_FN_IDED6, IDED6_MARK),
PINMUX_GPIO(GPIO_FN_IDED5, IDED5_MARK),
PINMUX_GPIO(GPIO_FN_IDED4, IDED4_MARK),
PINMUX_GPIO(GPIO_FN_IDED3, IDED3_MARK),
PINMUX_GPIO(GPIO_FN_IDED2, IDED2_MARK),
PINMUX_GPIO(GPIO_FN_IDED1, IDED1_MARK),
PINMUX_GPIO(GPIO_FN_IDED0, IDED0_MARK),
PINMUX_GPIO(GPIO_FN_IDEA2, IDEA2_MARK),
PINMUX_GPIO(GPIO_FN_IDEA1, IDEA1_MARK),
PINMUX_GPIO(GPIO_FN_IDEA0, IDEA0_MARK),
PINMUX_GPIO(GPIO_FN_IDEIOWR, IDEIOWR_MARK),
PINMUX_GPIO(GPIO_FN_IODREQ, IODREQ_MARK),
PINMUX_GPIO(GPIO_FN_IDECS0, IDECS0_MARK),
PINMUX_GPIO(GPIO_FN_IDECS1, IDECS1_MARK),
PINMUX_GPIO(GPIO_FN_IDEIORD, IDEIORD_MARK),
PINMUX_GPIO(GPIO_FN_DIRECTION, DIRECTION_MARK),
PINMUX_GPIO(GPIO_FN_EXBUF_ENB, EXBUF_ENB_MARK),
PINMUX_GPIO(GPIO_FN_IDERST, IDERST_MARK),
PINMUX_GPIO(GPIO_FN_IODACK, IODACK_MARK),
PINMUX_GPIO(GPIO_FN_IDEINT, IDEINT_MARK),
PINMUX_GPIO(GPIO_FN_IDEIORDY, IDEIORDY_MARK),
/* TPU */
PINMUX_GPIO(GPIO_FN_TPUTO3, TPUTO3_MARK),
PINMUX_GPIO(GPIO_FN_TPUTO2, TPUTO2_MARK),
PINMUX_GPIO(GPIO_FN_TPUTO1, TPUTO1_MARK),
PINMUX_GPIO(GPIO_FN_TPUTO0, TPUTO0_MARK),
PINMUX_GPIO(GPIO_FN_TPUTI3, TPUTI3_MARK),
PINMUX_GPIO(GPIO_FN_TPUTI2, TPUTI2_MARK),
/* LCDC */
PINMUX_GPIO(GPIO_FN_LCDD23, LCDD23_MARK),
PINMUX_GPIO(GPIO_FN_LCDD22, LCDD22_MARK),
PINMUX_GPIO(GPIO_FN_LCDD21, LCDD21_MARK),
PINMUX_GPIO(GPIO_FN_LCDD20, LCDD20_MARK),
PINMUX_GPIO(GPIO_FN_LCDD19, LCDD19_MARK),
PINMUX_GPIO(GPIO_FN_LCDD18, LCDD18_MARK),
PINMUX_GPIO(GPIO_FN_LCDD17, LCDD17_MARK),
PINMUX_GPIO(GPIO_FN_LCDD16, LCDD16_MARK),
PINMUX_GPIO(GPIO_FN_LCDD15, LCDD15_MARK),
PINMUX_GPIO(GPIO_FN_LCDD14, LCDD14_MARK),
PINMUX_GPIO(GPIO_FN_LCDD13, LCDD13_MARK),
PINMUX_GPIO(GPIO_FN_LCDD12, LCDD12_MARK),
PINMUX_GPIO(GPIO_FN_LCDD11, LCDD11_MARK),
PINMUX_GPIO(GPIO_FN_LCDD10, LCDD10_MARK),
PINMUX_GPIO(GPIO_FN_LCDD9, LCDD9_MARK),
PINMUX_GPIO(GPIO_FN_LCDD8, LCDD8_MARK),
PINMUX_GPIO(GPIO_FN_LCDD7, LCDD7_MARK),
PINMUX_GPIO(GPIO_FN_LCDD6, LCDD6_MARK),
PINMUX_GPIO(GPIO_FN_LCDD5, LCDD5_MARK),
PINMUX_GPIO(GPIO_FN_LCDD4, LCDD4_MARK),
PINMUX_GPIO(GPIO_FN_LCDD3, LCDD3_MARK),
PINMUX_GPIO(GPIO_FN_LCDD2, LCDD2_MARK),
PINMUX_GPIO(GPIO_FN_LCDD1, LCDD1_MARK),
PINMUX_GPIO(GPIO_FN_LCDD0, LCDD0_MARK),
PINMUX_GPIO(GPIO_FN_LCDVSYN, LCDVSYN_MARK),
PINMUX_GPIO(GPIO_FN_LCDDISP, LCDDISP_MARK),
PINMUX_GPIO(GPIO_FN_LCDRS, LCDRS_MARK),
PINMUX_GPIO(GPIO_FN_LCDHSYN, LCDHSYN_MARK),
PINMUX_GPIO(GPIO_FN_LCDCS, LCDCS_MARK),
PINMUX_GPIO(GPIO_FN_LCDDON, LCDDON_MARK),
PINMUX_GPIO(GPIO_FN_LCDDCK, LCDDCK_MARK),
PINMUX_GPIO(GPIO_FN_LCDWR, LCDWR_MARK),
PINMUX_GPIO(GPIO_FN_LCDVEPWC, LCDVEPWC_MARK),
PINMUX_GPIO(GPIO_FN_LCDVCPWC, LCDVCPWC_MARK),
PINMUX_GPIO(GPIO_FN_LCDRD, LCDRD_MARK),
PINMUX_GPIO(GPIO_FN_LCDLCLK, LCDLCLK_MARK),
/* SCIF0 */
PINMUX_GPIO(GPIO_FN_SCIF0_TXD, SCIF0_TXD_MARK),
PINMUX_GPIO(GPIO_FN_SCIF0_RXD, SCIF0_RXD_MARK),
PINMUX_GPIO(GPIO_FN_SCIF0_SCK, SCIF0_SCK_MARK),
/* SCIF1 */
PINMUX_GPIO(GPIO_FN_SCIF1_SCK, SCIF1_SCK_MARK),
PINMUX_GPIO(GPIO_FN_SCIF1_RXD, SCIF1_RXD_MARK),
PINMUX_GPIO(GPIO_FN_SCIF1_TXD, SCIF1_TXD_MARK),
/* SCIF2 */
PINMUX_GPIO(GPIO_FN_SCIF2_L_TXD, SCIF2_L_TXD_MARK),
PINMUX_GPIO(GPIO_FN_SCIF2_L_SCK, SCIF2_L_SCK_MARK),
PINMUX_GPIO(GPIO_FN_SCIF2_L_RXD, SCIF2_L_RXD_MARK),
PINMUX_GPIO(GPIO_FN_SCIF2_V_TXD, SCIF2_V_TXD_MARK),
PINMUX_GPIO(GPIO_FN_SCIF2_V_SCK, SCIF2_V_SCK_MARK),
PINMUX_GPIO(GPIO_FN_SCIF2_V_RXD, SCIF2_V_RXD_MARK),
/* SCIF3 */
PINMUX_GPIO(GPIO_FN_SCIF3_V_SCK, SCIF3_V_SCK_MARK),
PINMUX_GPIO(GPIO_FN_SCIF3_V_RXD, SCIF3_V_RXD_MARK),
PINMUX_GPIO(GPIO_FN_SCIF3_V_TXD, SCIF3_V_TXD_MARK),
PINMUX_GPIO(GPIO_FN_SCIF3_V_CTS, SCIF3_V_CTS_MARK),
PINMUX_GPIO(GPIO_FN_SCIF3_V_RTS, SCIF3_V_RTS_MARK),
PINMUX_GPIO(GPIO_FN_SCIF3_I_SCK, SCIF3_I_SCK_MARK),
PINMUX_GPIO(GPIO_FN_SCIF3_I_RXD, SCIF3_I_RXD_MARK),
PINMUX_GPIO(GPIO_FN_SCIF3_I_TXD, SCIF3_I_TXD_MARK),
PINMUX_GPIO(GPIO_FN_SCIF3_I_CTS, SCIF3_I_CTS_MARK),
PINMUX_GPIO(GPIO_FN_SCIF3_I_RTS, SCIF3_I_RTS_MARK),
/* SCIF4 */
PINMUX_GPIO(GPIO_FN_SCIF4_SCK, SCIF4_SCK_MARK),
PINMUX_GPIO(GPIO_FN_SCIF4_RXD, SCIF4_RXD_MARK),
PINMUX_GPIO(GPIO_FN_SCIF4_TXD, SCIF4_TXD_MARK),
/* SCIF5 */
PINMUX_GPIO(GPIO_FN_SCIF5_SCK, SCIF5_SCK_MARK),
PINMUX_GPIO(GPIO_FN_SCIF5_RXD, SCIF5_RXD_MARK),
PINMUX_GPIO(GPIO_FN_SCIF5_TXD, SCIF5_TXD_MARK),
/* FSI */
PINMUX_GPIO(GPIO_FN_FSIMCKB, FSIMCKB_MARK),
PINMUX_GPIO(GPIO_FN_FSIMCKA, FSIMCKA_MARK),
PINMUX_GPIO(GPIO_FN_FSIOASD, FSIOASD_MARK),
PINMUX_GPIO(GPIO_FN_FSIIABCK, FSIIABCK_MARK),
PINMUX_GPIO(GPIO_FN_FSIIALRCK, FSIIALRCK_MARK),
PINMUX_GPIO(GPIO_FN_FSIOABCK, FSIOABCK_MARK),
PINMUX_GPIO(GPIO_FN_FSIOALRCK, FSIOALRCK_MARK),
PINMUX_GPIO(GPIO_FN_CLKAUDIOAO, CLKAUDIOAO_MARK),
PINMUX_GPIO(GPIO_FN_FSIIBSD, FSIIBSD_MARK),
PINMUX_GPIO(GPIO_FN_FSIOBSD, FSIOBSD_MARK),
PINMUX_GPIO(GPIO_FN_FSIIBBCK, FSIIBBCK_MARK),
PINMUX_GPIO(GPIO_FN_FSIIBLRCK, FSIIBLRCK_MARK),
PINMUX_GPIO(GPIO_FN_FSIOBBCK, FSIOBBCK_MARK),
PINMUX_GPIO(GPIO_FN_FSIOBLRCK, FSIOBLRCK_MARK),
PINMUX_GPIO(GPIO_FN_CLKAUDIOBO, CLKAUDIOBO_MARK),
PINMUX_GPIO(GPIO_FN_FSIIASD, FSIIASD_MARK),
/* AUD */
PINMUX_GPIO(GPIO_FN_AUDCK, AUDCK_MARK),
PINMUX_GPIO(GPIO_FN_AUDSYNC, AUDSYNC_MARK),
PINMUX_GPIO(GPIO_FN_AUDATA3, AUDATA3_MARK),
PINMUX_GPIO(GPIO_FN_AUDATA2, AUDATA2_MARK),
PINMUX_GPIO(GPIO_FN_AUDATA1, AUDATA1_MARK),
PINMUX_GPIO(GPIO_FN_AUDATA0, AUDATA0_MARK),
/* VIO */
PINMUX_GPIO(GPIO_FN_VIO_CKO, VIO_CKO_MARK),
/* VIO0 */
PINMUX_GPIO(GPIO_FN_VIO0_D15, VIO0_D15_MARK),
PINMUX_GPIO(GPIO_FN_VIO0_D14, VIO0_D14_MARK),
PINMUX_GPIO(GPIO_FN_VIO0_D13, VIO0_D13_MARK),
PINMUX_GPIO(GPIO_FN_VIO0_D12, VIO0_D12_MARK),
PINMUX_GPIO(GPIO_FN_VIO0_D11, VIO0_D11_MARK),
PINMUX_GPIO(GPIO_FN_VIO0_D10, VIO0_D10_MARK),
PINMUX_GPIO(GPIO_FN_VIO0_D9, VIO0_D9_MARK),
PINMUX_GPIO(GPIO_FN_VIO0_D8, VIO0_D8_MARK),
PINMUX_GPIO(GPIO_FN_VIO0_D7, VIO0_D7_MARK),
PINMUX_GPIO(GPIO_FN_VIO0_D6, VIO0_D6_MARK),
PINMUX_GPIO(GPIO_FN_VIO0_D5, VIO0_D5_MARK),
PINMUX_GPIO(GPIO_FN_VIO0_D4, VIO0_D4_MARK),
PINMUX_GPIO(GPIO_FN_VIO0_D3, VIO0_D3_MARK),
PINMUX_GPIO(GPIO_FN_VIO0_D2, VIO0_D2_MARK),
PINMUX_GPIO(GPIO_FN_VIO0_D1, VIO0_D1_MARK),
PINMUX_GPIO(GPIO_FN_VIO0_D0, VIO0_D0_MARK),
PINMUX_GPIO(GPIO_FN_VIO0_VD, VIO0_VD_MARK),
PINMUX_GPIO(GPIO_FN_VIO0_CLK, VIO0_CLK_MARK),
PINMUX_GPIO(GPIO_FN_VIO0_FLD, VIO0_FLD_MARK),
PINMUX_GPIO(GPIO_FN_VIO0_HD, VIO0_HD_MARK),
/* VIO1 */
PINMUX_GPIO(GPIO_FN_VIO1_D7, VIO1_D7_MARK),
PINMUX_GPIO(GPIO_FN_VIO1_D6, VIO1_D6_MARK),
PINMUX_GPIO(GPIO_FN_VIO1_D5, VIO1_D5_MARK),
PINMUX_GPIO(GPIO_FN_VIO1_D4, VIO1_D4_MARK),
PINMUX_GPIO(GPIO_FN_VIO1_D3, VIO1_D3_MARK),
PINMUX_GPIO(GPIO_FN_VIO1_D2, VIO1_D2_MARK),
PINMUX_GPIO(GPIO_FN_VIO1_D1, VIO1_D1_MARK),
PINMUX_GPIO(GPIO_FN_VIO1_D0, VIO1_D0_MARK),
PINMUX_GPIO(GPIO_FN_VIO1_FLD, VIO1_FLD_MARK),
PINMUX_GPIO(GPIO_FN_VIO1_HD, VIO1_HD_MARK),
PINMUX_GPIO(GPIO_FN_VIO1_VD, VIO1_VD_MARK),
PINMUX_GPIO(GPIO_FN_VIO1_CLK, VIO1_CLK_MARK),
/* Eth */
PINMUX_GPIO(GPIO_FN_RMII_RXD0, RMII_RXD0_MARK),
PINMUX_GPIO(GPIO_FN_RMII_RXD1, RMII_RXD1_MARK),
PINMUX_GPIO(GPIO_FN_RMII_TXD0, RMII_TXD0_MARK),
PINMUX_GPIO(GPIO_FN_RMII_TXD1, RMII_TXD1_MARK),
PINMUX_GPIO(GPIO_FN_RMII_REF_CLK, RMII_REF_CLK_MARK),
PINMUX_GPIO(GPIO_FN_RMII_TX_EN, RMII_TX_EN_MARK),
PINMUX_GPIO(GPIO_FN_RMII_RX_ER, RMII_RX_ER_MARK),
PINMUX_GPIO(GPIO_FN_RMII_CRS_DV, RMII_CRS_DV_MARK),
PINMUX_GPIO(GPIO_FN_LNKSTA, LNKSTA_MARK),
PINMUX_GPIO(GPIO_FN_MDIO, MDIO_MARK),
PINMUX_GPIO(GPIO_FN_MDC, MDC_MARK),
/* System */
PINMUX_GPIO(GPIO_FN_PDSTATUS, PDSTATUS_MARK),
PINMUX_GPIO(GPIO_FN_STATUS2, STATUS2_MARK),
PINMUX_GPIO(GPIO_FN_STATUS0, STATUS0_MARK),
/* VOU */
PINMUX_GPIO(GPIO_FN_DV_D15, DV_D15_MARK),
PINMUX_GPIO(GPIO_FN_DV_D14, DV_D14_MARK),
PINMUX_GPIO(GPIO_FN_DV_D13, DV_D13_MARK),
PINMUX_GPIO(GPIO_FN_DV_D12, DV_D12_MARK),
PINMUX_GPIO(GPIO_FN_DV_D11, DV_D11_MARK),
PINMUX_GPIO(GPIO_FN_DV_D10, DV_D10_MARK),
PINMUX_GPIO(GPIO_FN_DV_D9, DV_D9_MARK),
PINMUX_GPIO(GPIO_FN_DV_D8, DV_D8_MARK),
PINMUX_GPIO(GPIO_FN_DV_D7, DV_D7_MARK),
PINMUX_GPIO(GPIO_FN_DV_D6, DV_D6_MARK),
PINMUX_GPIO(GPIO_FN_DV_D5, DV_D5_MARK),
PINMUX_GPIO(GPIO_FN_DV_D4, DV_D4_MARK),
PINMUX_GPIO(GPIO_FN_DV_D3, DV_D3_MARK),
PINMUX_GPIO(GPIO_FN_DV_D2, DV_D2_MARK),
PINMUX_GPIO(GPIO_FN_DV_D1, DV_D1_MARK),
PINMUX_GPIO(GPIO_FN_DV_D0, DV_D0_MARK),
PINMUX_GPIO(GPIO_FN_DV_CLKI, DV_CLKI_MARK),
PINMUX_GPIO(GPIO_FN_DV_CLK, DV_CLK_MARK),
PINMUX_GPIO(GPIO_FN_DV_VSYNC, DV_VSYNC_MARK),
PINMUX_GPIO(GPIO_FN_DV_HSYNC, DV_HSYNC_MARK),
/* MSIOF0 */
PINMUX_GPIO(GPIO_FN_MSIOF0_RXD, MSIOF0_RXD_MARK),
PINMUX_GPIO(GPIO_FN_MSIOF0_TXD, MSIOF0_TXD_MARK),
PINMUX_GPIO(GPIO_FN_MSIOF0_MCK, MSIOF0_MCK_MARK),
PINMUX_GPIO(GPIO_FN_MSIOF0_TSCK, MSIOF0_TSCK_MARK),
PINMUX_GPIO(GPIO_FN_MSIOF0_SS1, MSIOF0_SS1_MARK),
PINMUX_GPIO(GPIO_FN_MSIOF0_SS2, MSIOF0_SS2_MARK),
PINMUX_GPIO(GPIO_FN_MSIOF0_TSYNC, MSIOF0_TSYNC_MARK),
PINMUX_GPIO(GPIO_FN_MSIOF0_RSCK, MSIOF0_RSCK_MARK),
PINMUX_GPIO(GPIO_FN_MSIOF0_RSYNC, MSIOF0_RSYNC_MARK),
/* MSIOF1 */
PINMUX_GPIO(GPIO_FN_MSIOF1_RXD, MSIOF1_RXD_MARK),
PINMUX_GPIO(GPIO_FN_MSIOF1_TXD, MSIOF1_TXD_MARK),
PINMUX_GPIO(GPIO_FN_MSIOF1_MCK, MSIOF1_MCK_MARK),
PINMUX_GPIO(GPIO_FN_MSIOF1_TSCK, MSIOF1_TSCK_MARK),
PINMUX_GPIO(GPIO_FN_MSIOF1_SS1, MSIOF1_SS1_MARK),
PINMUX_GPIO(GPIO_FN_MSIOF1_SS2, MSIOF1_SS2_MARK),
PINMUX_GPIO(GPIO_FN_MSIOF1_TSYNC, MSIOF1_TSYNC_MARK),
PINMUX_GPIO(GPIO_FN_MSIOF1_RSCK, MSIOF1_RSCK_MARK),
PINMUX_GPIO(GPIO_FN_MSIOF1_RSYNC, MSIOF1_RSYNC_MARK),
/* DMAC */
PINMUX_GPIO(GPIO_FN_DMAC_DACK0, DMAC_DACK0_MARK),
PINMUX_GPIO(GPIO_FN_DMAC_DREQ0, DMAC_DREQ0_MARK),
PINMUX_GPIO(GPIO_FN_DMAC_DACK1, DMAC_DACK1_MARK),
PINMUX_GPIO(GPIO_FN_DMAC_DREQ1, DMAC_DREQ1_MARK),
/* SDHI0 */
PINMUX_GPIO(GPIO_FN_SDHI0CD, SDHI0CD_MARK),
PINMUX_GPIO(GPIO_FN_SDHI0WP, SDHI0WP_MARK),
PINMUX_GPIO(GPIO_FN_SDHI0CMD, SDHI0CMD_MARK),
PINMUX_GPIO(GPIO_FN_SDHI0CLK, SDHI0CLK_MARK),
PINMUX_GPIO(GPIO_FN_SDHI0D3, SDHI0D3_MARK),
PINMUX_GPIO(GPIO_FN_SDHI0D2, SDHI0D2_MARK),
PINMUX_GPIO(GPIO_FN_SDHI0D1, SDHI0D1_MARK),
PINMUX_GPIO(GPIO_FN_SDHI0D0, SDHI0D0_MARK),
/* SDHI1 */
PINMUX_GPIO(GPIO_FN_SDHI1CD, SDHI1CD_MARK),
PINMUX_GPIO(GPIO_FN_SDHI1WP, SDHI1WP_MARK),
PINMUX_GPIO(GPIO_FN_SDHI1CMD, SDHI1CMD_MARK),
PINMUX_GPIO(GPIO_FN_SDHI1CLK, SDHI1CLK_MARK),
PINMUX_GPIO(GPIO_FN_SDHI1D3, SDHI1D3_MARK),
PINMUX_GPIO(GPIO_FN_SDHI1D2, SDHI1D2_MARK),
PINMUX_GPIO(GPIO_FN_SDHI1D1, SDHI1D1_MARK),
PINMUX_GPIO(GPIO_FN_SDHI1D0, SDHI1D0_MARK),
/* MMC */
PINMUX_GPIO(GPIO_FN_MMC_D7, MMC_D7_MARK),
PINMUX_GPIO(GPIO_FN_MMC_D6, MMC_D6_MARK),
PINMUX_GPIO(GPIO_FN_MMC_D5, MMC_D5_MARK),
PINMUX_GPIO(GPIO_FN_MMC_D4, MMC_D4_MARK),
PINMUX_GPIO(GPIO_FN_MMC_D3, MMC_D3_MARK),
PINMUX_GPIO(GPIO_FN_MMC_D2, MMC_D2_MARK),
PINMUX_GPIO(GPIO_FN_MMC_D1, MMC_D1_MARK),
PINMUX_GPIO(GPIO_FN_MMC_D0, MMC_D0_MARK),
PINMUX_GPIO(GPIO_FN_MMC_CLK, MMC_CLK_MARK),
PINMUX_GPIO(GPIO_FN_MMC_CMD, MMC_CMD_MARK),
/* IrDA */
PINMUX_GPIO(GPIO_FN_IRDA_OUT, IRDA_OUT_MARK),
PINMUX_GPIO(GPIO_FN_IRDA_IN, IRDA_IN_MARK),
/* TSIF */
PINMUX_GPIO(GPIO_FN_TSIF_TS0_SDAT, TSIF_TS0_SDAT_MARK),
PINMUX_GPIO(GPIO_FN_TSIF_TS0_SCK, TSIF_TS0_SCK_MARK),
PINMUX_GPIO(GPIO_FN_TSIF_TS0_SDEN, TSIF_TS0_SDEN_MARK),
PINMUX_GPIO(GPIO_FN_TSIF_TS0_SPSYNC, TSIF_TS0_SPSYNC_MARK),
/* IRQ */
PINMUX_GPIO(GPIO_FN_INTC_IRQ7, INTC_IRQ7_MARK),
PINMUX_GPIO(GPIO_FN_INTC_IRQ6, INTC_IRQ6_MARK),
PINMUX_GPIO(GPIO_FN_INTC_IRQ5, INTC_IRQ5_MARK),
PINMUX_GPIO(GPIO_FN_INTC_IRQ4, INTC_IRQ4_MARK),
PINMUX_GPIO(GPIO_FN_INTC_IRQ3, INTC_IRQ3_MARK),
PINMUX_GPIO(GPIO_FN_INTC_IRQ2, INTC_IRQ2_MARK),
PINMUX_GPIO(GPIO_FN_INTC_IRQ1, INTC_IRQ1_MARK),
PINMUX_GPIO(GPIO_FN_INTC_IRQ0, INTC_IRQ0_MARK),
};
static struct pinmux_cfg_reg pinmux_config_regs[] = {
{ PINMUX_CFG_REG("PACR", 0xa4050100, 16, 2) {
PTA7_FN, PTA7_OUT, PTA7_IN_PU, PTA7_IN,
PTA6_FN, PTA6_OUT, PTA6_IN_PU, PTA6_IN,
PTA5_FN, PTA5_OUT, PTA5_IN_PU, PTA5_IN,
PTA4_FN, PTA4_OUT, PTA4_IN_PU, PTA4_IN,
PTA3_FN, PTA3_OUT, PTA3_IN_PU, PTA3_IN,
PTA2_FN, PTA2_OUT, PTA2_IN_PU, PTA2_IN,
PTA1_FN, PTA1_OUT, PTA1_IN_PU, PTA1_IN,
PTA0_FN, PTA0_OUT, PTA0_IN_PU, PTA0_IN }
},
{ PINMUX_CFG_REG("PBCR", 0xa4050102, 16, 2) {
PTB7_FN, PTB7_OUT, PTB7_IN_PU, PTB7_IN,
PTB6_FN, PTB6_OUT, PTB6_IN_PU, PTB6_IN,
PTB5_FN, PTB5_OUT, PTB5_IN_PU, PTB5_IN,
PTB4_FN, PTB4_OUT, PTB4_IN_PU, PTB4_IN,
PTB3_FN, PTB3_OUT, PTB3_IN_PU, PTB3_IN,
PTB2_FN, PTB2_OUT, PTB2_IN_PU, PTB2_IN,
PTB1_FN, PTB1_OUT, PTB1_IN_PU, PTB1_IN,
PTB0_FN, PTB0_OUT, PTB0_IN_PU, PTB0_IN }
},
{ PINMUX_CFG_REG("PCCR", 0xa4050104, 16, 2) {
PTC7_FN, PTC7_OUT, PTC7_IN_PU, PTC7_IN,
PTC6_FN, PTC6_OUT, PTC6_IN_PU, PTC6_IN,
PTC5_FN, PTC5_OUT, PTC5_IN_PU, PTC5_IN,
PTC4_FN, PTC4_OUT, PTC4_IN_PU, PTC4_IN,
PTC3_FN, PTC3_OUT, PTC3_IN_PU, PTC3_IN,
PTC2_FN, PTC2_OUT, PTC2_IN_PU, PTC2_IN,
PTC1_FN, PTC1_OUT, PTC1_IN_PU, PTC1_IN,
PTC0_FN, PTC0_OUT, PTC0_IN_PU, PTC0_IN }
},
{ PINMUX_CFG_REG("PDCR", 0xa4050106, 16, 2) {
PTD7_FN, PTD7_OUT, PTD7_IN_PU, PTD7_IN,
PTD6_FN, PTD6_OUT, PTD6_IN_PU, PTD6_IN,
PTD5_FN, PTD5_OUT, PTD5_IN_PU, PTD5_IN,
PTD4_FN, PTD4_OUT, PTD4_IN_PU, PTD4_IN,
PTD3_FN, PTD3_OUT, PTD3_IN_PU, PTD3_IN,
PTD2_FN, PTD2_OUT, PTD2_IN_PU, PTD2_IN,
PTD1_FN, PTD1_OUT, PTD1_IN_PU, PTD1_IN,
PTD0_FN, PTD0_OUT, PTD0_IN_PU, PTD0_IN }
},
{ PINMUX_CFG_REG("PECR", 0xa4050108, 16, 2) {
PTE7_FN, PTE7_OUT, PTE7_IN_PU, PTE7_IN,
PTE6_FN, PTE6_OUT, PTE6_IN_PU, PTE6_IN,
PTE5_FN, PTE5_OUT, PTE5_IN_PU, PTE5_IN,
PTE4_FN, PTE4_OUT, PTE4_IN_PU, PTE4_IN,
PTE3_FN, PTE3_OUT, PTE3_IN_PU, PTE3_IN,
PTE2_FN, PTE2_OUT, PTE2_IN_PU, PTE2_IN,
PTE1_FN, PTE1_OUT, PTE1_IN_PU, PTE1_IN,
PTE0_FN, PTE0_OUT, PTE0_IN_PU, PTE0_IN }
},
{ PINMUX_CFG_REG("PFCR", 0xa405010a, 16, 2) {
PTF7_FN, PTF7_OUT, PTF7_IN_PU, PTF7_IN,
PTF6_FN, PTF6_OUT, PTF6_IN_PU, PTF6_IN,
PTF5_FN, PTF5_OUT, PTF5_IN_PU, PTF5_IN,
PTF4_FN, PTF4_OUT, PTF4_IN_PU, PTF4_IN,
PTF3_FN, PTF3_OUT, PTF3_IN_PU, PTF3_IN,
PTF2_FN, PTF2_OUT, PTF2_IN_PU, PTF2_IN,
PTF1_FN, PTF1_OUT, PTF1_IN_PU, PTF1_IN,
PTF0_FN, PTF0_OUT, PTF0_IN_PU, PTF0_IN }
},
{ PINMUX_CFG_REG("PGCR", 0xa405010c, 16, 2) {
0, 0, 0, 0,
0, 0, 0, 0,
PTG5_FN, PTG5_OUT, 0, 0,
PTG4_FN, PTG4_OUT, 0, 0,
PTG3_FN, PTG3_OUT, 0, 0,
PTG2_FN, PTG2_OUT, 0, 0,
PTG1_FN, PTG1_OUT, 0, 0,
PTG0_FN, PTG0_OUT, 0, 0 }
},
{ PINMUX_CFG_REG("PHCR", 0xa405010e, 16, 2) {
PTH7_FN, PTH7_OUT, PTH7_IN_PU, PTH7_IN,
PTH6_FN, PTH6_OUT, PTH6_IN_PU, PTH6_IN,
PTH5_FN, PTH5_OUT, PTH5_IN_PU, PTH5_IN,
PTH4_FN, PTH4_OUT, PTH4_IN_PU, PTH4_IN,
PTH3_FN, PTH3_OUT, PTH3_IN_PU, PTH3_IN,
PTH2_FN, PTH2_OUT, PTH2_IN_PU, PTH2_IN,
PTH1_FN, PTH1_OUT, PTH1_IN_PU, PTH1_IN,
PTH0_FN, PTH0_OUT, PTH0_IN_PU, PTH0_IN }
},
{ PINMUX_CFG_REG("PJCR", 0xa4050110, 16, 2) {
PTJ7_FN, PTJ7_OUT, 0, 0,
PTJ6_FN, PTJ6_OUT, 0, 0,
PTJ5_FN, PTJ5_OUT, 0, 0,
0, 0, 0, 0,
PTJ3_FN, PTJ3_OUT, PTJ3_IN_PU, PTJ3_IN,
PTJ2_FN, PTJ2_OUT, PTJ2_IN_PU, PTJ2_IN,
PTJ1_FN, PTJ1_OUT, PTJ1_IN_PU, PTJ1_IN,
PTJ0_FN, PTJ0_OUT, PTJ0_IN_PU, PTJ0_IN }
},
{ PINMUX_CFG_REG("PKCR", 0xa4050112, 16, 2) {
PTK7_FN, PTK7_OUT, PTK7_IN_PU, PTK7_IN,
PTK6_FN, PTK6_OUT, PTK6_IN_PU, PTK6_IN,
PTK5_FN, PTK5_OUT, PTK5_IN_PU, PTK5_IN,
PTK4_FN, PTK4_OUT, PTK4_IN_PU, PTK4_IN,
PTK3_FN, PTK3_OUT, PTK3_IN_PU, PTK3_IN,
PTK2_FN, PTK2_OUT, PTK2_IN_PU, PTK2_IN,
PTK1_FN, PTK1_OUT, PTK1_IN_PU, PTK1_IN,
PTK0_FN, PTK0_OUT, PTK0_IN_PU, PTK0_IN }
},
{ PINMUX_CFG_REG("PLCR", 0xa4050114, 16, 2) {
PTL7_FN, PTL7_OUT, PTL7_IN_PU, PTL7_IN,
PTL6_FN, PTL6_OUT, PTL6_IN_PU, PTL6_IN,
PTL5_FN, PTL5_OUT, PTL5_IN_PU, PTL5_IN,
PTL4_FN, PTL4_OUT, PTL4_IN_PU, PTL4_IN,
PTL3_FN, PTL3_OUT, PTL3_IN_PU, PTL3_IN,
PTL2_FN, PTL2_OUT, PTL2_IN_PU, PTL2_IN,
PTL1_FN, PTL1_OUT, PTL1_IN_PU, PTL1_IN,
PTL0_FN, PTL0_OUT, PTL0_IN_PU, PTL0_IN }
},
{ PINMUX_CFG_REG("PMCR", 0xa4050116, 16, 2) {
PTM7_FN, PTM7_OUT, PTM7_IN_PU, PTM7_IN,
PTM6_FN, PTM6_OUT, PTM6_IN_PU, PTM6_IN,
PTM5_FN, PTM5_OUT, PTM5_IN_PU, PTM5_IN,
PTM4_FN, PTM4_OUT, PTM4_IN_PU, PTM4_IN,
PTM3_FN, PTM3_OUT, PTM3_IN_PU, PTM3_IN,
PTM2_FN, PTM2_OUT, PTM2_IN_PU, PTM2_IN,
PTM1_FN, PTM1_OUT, PTM1_IN_PU, PTM1_IN,
PTM0_FN, PTM0_OUT, PTM0_IN_PU, PTM0_IN }
},
{ PINMUX_CFG_REG("PNCR", 0xa4050118, 16, 2) {
PTN7_FN, PTN7_OUT, PTN7_IN_PU, PTN7_IN,
PTN6_FN, PTN6_OUT, PTN6_IN_PU, PTN6_IN,
PTN5_FN, PTN5_OUT, PTN5_IN_PU, PTN5_IN,
PTN4_FN, PTN4_OUT, PTN4_IN_PU, PTN4_IN,
PTN3_FN, PTN3_OUT, PTN3_IN_PU, PTN3_IN,
PTN2_FN, PTN2_OUT, PTN2_IN_PU, PTN2_IN,
PTN1_FN, PTN1_OUT, PTN1_IN_PU, PTN1_IN,
PTN0_FN, PTN0_OUT, PTN0_IN_PU, PTN0_IN }
},
{ PINMUX_CFG_REG("PQCR", 0xa405011a, 16, 2) {
PTQ7_FN, PTQ7_OUT, PTQ7_IN_PU, PTQ7_IN,
PTQ6_FN, PTQ6_OUT, PTQ6_IN_PU, PTQ6_IN,
PTQ5_FN, PTQ5_OUT, PTQ5_IN_PU, PTQ5_IN,
PTQ4_FN, PTQ4_OUT, PTQ4_IN_PU, PTQ4_IN,
PTQ3_FN, PTQ3_OUT, PTQ3_IN_PU, PTQ3_IN,
PTQ2_FN, PTQ2_OUT, PTQ2_IN_PU, PTQ2_IN,
PTQ1_FN, PTQ1_OUT, PTQ1_IN_PU, PTQ1_IN,
PTQ0_FN, PTQ0_OUT, PTQ0_IN_PU, PTQ0_IN }
},
{ PINMUX_CFG_REG("PRCR", 0xa405011c, 16, 2) {
PTR7_FN, PTR7_OUT, PTR7_IN_PU, PTR7_IN,
PTR6_FN, PTR6_OUT, PTR6_IN_PU, PTR6_IN,
PTR5_FN, PTR5_OUT, PTR5_IN_PU, PTR5_IN,
PTR4_FN, PTR4_OUT, PTR4_IN_PU, PTR4_IN,
PTR3_FN, 0, PTR3_IN_PU, PTR3_IN,
PTR2_FN, 0, PTR2_IN_PU, PTR2_IN,
PTR1_FN, PTR1_OUT, PTR1_IN_PU, PTR1_IN,
PTR0_FN, PTR0_OUT, PTR0_IN_PU, PTR0_IN }
},
{ PINMUX_CFG_REG("PSCR", 0xa405011e, 16, 2) {
0, 0, 0, 0,
PTS6_FN, PTS6_OUT, PTS6_IN_PU, PTS6_IN,
PTS5_FN, PTS5_OUT, PTS5_IN_PU, PTS5_IN,
PTS4_FN, PTS4_OUT, PTS4_IN_PU, PTS4_IN,
PTS3_FN, PTS3_OUT, PTS3_IN_PU, PTS3_IN,
PTS2_FN, PTS2_OUT, PTS2_IN_PU, PTS2_IN,
PTS1_FN, PTS1_OUT, PTS1_IN_PU, PTS1_IN,
PTS0_FN, PTS0_OUT, PTS0_IN_PU, PTS0_IN }
},
{ PINMUX_CFG_REG("PTCR", 0xa4050140, 16, 2) {
PTT7_FN, PTT7_OUT, PTT7_IN_PU, PTT7_IN,
PTT6_FN, PTT6_OUT, PTT6_IN_PU, PTT6_IN,
PTT5_FN, PTT5_OUT, PTT5_IN_PU, PTT5_IN,
PTT4_FN, PTT4_OUT, PTT4_IN_PU, PTT4_IN,
PTT3_FN, PTT3_OUT, PTT3_IN_PU, PTT3_IN,
PTT2_FN, PTT2_OUT, PTT2_IN_PU, PTT2_IN,
PTT1_FN, PTT1_OUT, PTT1_IN_PU, PTT1_IN,
PTT0_FN, PTT0_OUT, PTT0_IN_PU, PTT0_IN }
},
{ PINMUX_CFG_REG("PUCR", 0xa4050142, 16, 2) {
PTU7_FN, PTU7_OUT, PTU7_IN_PU, PTU7_IN,
PTU6_FN, PTU6_OUT, PTU6_IN_PU, PTU6_IN,
PTU5_FN, PTU5_OUT, PTU5_IN_PU, PTU5_IN,
PTU4_FN, PTU4_OUT, PTU4_IN_PU, PTU4_IN,
PTU3_FN, PTU3_OUT, PTU3_IN_PU, PTU3_IN,
PTU2_FN, PTU2_OUT, PTU2_IN_PU, PTU2_IN,
PTU1_FN, PTU1_OUT, PTU1_IN_PU, PTU1_IN,
PTU0_FN, PTU0_OUT, PTU0_IN_PU, PTU0_IN }
},
{ PINMUX_CFG_REG("PVCR", 0xa4050144, 16, 2) {
PTV7_FN, PTV7_OUT, PTV7_IN_PU, PTV7_IN,
PTV6_FN, PTV6_OUT, PTV6_IN_PU, PTV6_IN,
PTV5_FN, PTV5_OUT, PTV5_IN_PU, PTV5_IN,
PTV4_FN, PTV4_OUT, PTV4_IN_PU, PTV4_IN,
PTV3_FN, PTV3_OUT, PTV3_IN_PU, PTV3_IN,
PTV2_FN, PTV2_OUT, PTV2_IN_PU, PTV2_IN,
PTV1_FN, PTV1_OUT, PTV1_IN_PU, PTV1_IN,
PTV0_FN, PTV0_OUT, PTV0_IN_PU, PTV0_IN }
},
{ PINMUX_CFG_REG("PWCR", 0xa4050146, 16, 2) {
PTW7_FN, PTW7_OUT, PTW7_IN_PU, PTW7_IN,
PTW6_FN, PTW6_OUT, PTW6_IN_PU, PTW6_IN,
PTW5_FN, PTW5_OUT, PTW5_IN_PU, PTW5_IN,
PTW4_FN, PTW4_OUT, PTW4_IN_PU, PTW4_IN,
PTW3_FN, PTW3_OUT, PTW3_IN_PU, PTW3_IN,
PTW2_FN, PTW2_OUT, PTW2_IN_PU, PTW2_IN,
PTW1_FN, PTW1_OUT, PTW1_IN_PU, PTW1_IN,
PTW0_FN, PTW0_OUT, PTW0_IN_PU, PTW0_IN }
},
{ PINMUX_CFG_REG("PXCR", 0xa4050148, 16, 2) {
PTX7_FN, PTX7_OUT, PTX7_IN_PU, PTX7_IN,
PTX6_FN, PTX6_OUT, PTX6_IN_PU, PTX6_IN,
PTX5_FN, PTX5_OUT, PTX5_IN_PU, PTX5_IN,
PTX4_FN, PTX4_OUT, PTX4_IN_PU, PTX4_IN,
PTX3_FN, PTX3_OUT, PTX3_IN_PU, PTX3_IN,
PTX2_FN, PTX2_OUT, PTX2_IN_PU, PTX2_IN,
PTX1_FN, PTX1_OUT, PTX1_IN_PU, PTX1_IN,
PTX0_FN, PTX0_OUT, PTX0_IN_PU, PTX0_IN }
},
{ PINMUX_CFG_REG("PYCR", 0xa405014a, 16, 2) {
PTY7_FN, PTY7_OUT, PTY7_IN_PU, PTY7_IN,
PTY6_FN, PTY6_OUT, PTY6_IN_PU, PTY6_IN,
PTY5_FN, PTY5_OUT, PTY5_IN_PU, PTY5_IN,
PTY4_FN, PTY4_OUT, PTY4_IN_PU, PTY4_IN,
PTY3_FN, PTY3_OUT, PTY3_IN_PU, PTY3_IN,
PTY2_FN, PTY2_OUT, PTY2_IN_PU, PTY2_IN,
PTY1_FN, PTY1_OUT, PTY1_IN_PU, PTY1_IN,
PTY0_FN, PTY0_OUT, PTY0_IN_PU, PTY0_IN }
},
{ PINMUX_CFG_REG("PZCR", 0xa405014c, 16, 2) {
PTZ7_FN, PTZ7_OUT, PTZ7_IN_PU, PTZ7_IN,
PTZ6_FN, PTZ6_OUT, PTZ6_IN_PU, PTZ6_IN,
PTZ5_FN, PTZ5_OUT, PTZ5_IN_PU, PTZ5_IN,
PTZ4_FN, PTZ4_OUT, PTZ4_IN_PU, PTZ4_IN,
PTZ3_FN, PTZ3_OUT, PTZ3_IN_PU, PTZ3_IN,
PTZ2_FN, PTZ2_OUT, PTZ2_IN_PU, PTZ2_IN,
PTZ1_FN, PTZ1_OUT, PTZ1_IN_PU, PTZ1_IN,
PTZ0_FN, PTZ0_OUT, PTZ0_IN_PU, PTZ0_IN }
},
{ PINMUX_CFG_REG("PSELA", 0xa405014e, 16, 1) {
PSA15_0, PSA15_1,
PSA14_0, PSA14_1,
PSA13_0, PSA13_1,
PSA12_0, PSA12_1,
0, 0,
PSA10_0, PSA10_1,
PSA9_0, PSA9_1,
PSA8_0, PSA8_1,
PSA7_0, PSA7_1,
PSA6_0, PSA6_1,
PSA5_0, PSA5_1,
0, 0,
PSA3_0, PSA3_1,
PSA2_0, PSA2_1,
PSA1_0, PSA1_1,
PSA0_0, PSA0_1}
},
{ PINMUX_CFG_REG("PSELB", 0xa4050150, 16, 1) {
0, 0,
PSB14_0, PSB14_1,
PSB13_0, PSB13_1,
PSB12_0, PSB12_1,
PSB11_0, PSB11_1,
PSB10_0, PSB10_1,
PSB9_0, PSB9_1,
PSB8_0, PSB8_1,
PSB7_0, PSB7_1,
PSB6_0, PSB6_1,
PSB5_0, PSB5_1,
PSB4_0, PSB4_1,
PSB3_0, PSB3_1,
PSB2_0, PSB2_1,
PSB1_0, PSB1_1,
PSB0_0, PSB0_1}
},
{ PINMUX_CFG_REG("PSELC", 0xa4050152, 16, 1) {
PSC15_0, PSC15_1,
PSC14_0, PSC14_1,
PSC13_0, PSC13_1,
PSC12_0, PSC12_1,
PSC11_0, PSC11_1,
PSC10_0, PSC10_1,
PSC9_0, PSC9_1,
PSC8_0, PSC8_1,
PSC7_0, PSC7_1,
PSC6_0, PSC6_1,
PSC5_0, PSC5_1,
PSC4_0, PSC4_1,
0, 0,
PSC2_0, PSC2_1,
PSC1_0, PSC1_1,
PSC0_0, PSC0_1}
},
{ PINMUX_CFG_REG("PSELD", 0xa4050154, 16, 1) {
PSD15_0, PSD15_1,
PSD14_0, PSD14_1,
PSD13_0, PSD13_1,
PSD12_0, PSD12_1,
PSD11_0, PSD11_1,
PSD10_0, PSD10_1,
PSD9_0, PSD9_1,
PSD8_0, PSD8_1,
PSD7_0, PSD7_1,
PSD6_0, PSD6_1,
PSD5_0, PSD5_1,
PSD4_0, PSD4_1,
PSD3_0, PSD3_1,
PSD2_0, PSD2_1,
PSD1_0, PSD1_1,
PSD0_0, PSD0_1}
},
{ PINMUX_CFG_REG("PSELE", 0xa4050156, 16, 1) {
PSE15_0, PSE15_1,
PSE14_0, PSE14_1,
PSE13_0, PSE13_1,
PSE12_0, PSE12_1,
PSE11_0, PSE11_1,
PSE10_0, PSE10_1,
PSE9_0, PSE9_1,
PSE8_0, PSE8_1,
PSE7_0, PSE7_1,
PSE6_0, PSE6_1,
PSE5_0, PSE5_1,
PSE4_0, PSE4_1,
PSE3_0, PSE3_1,
PSE2_0, PSE2_1,
PSE1_0, PSE1_1,
PSE0_0, PSE0_1}
},
{}
};
static struct pinmux_data_reg pinmux_data_regs[] = {
{ PINMUX_DATA_REG("PADR", 0xa4050120, 8) {
PTA7_DATA, PTA6_DATA, PTA5_DATA, PTA4_DATA,
PTA3_DATA, PTA2_DATA, PTA1_DATA, PTA0_DATA }
},
{ PINMUX_DATA_REG("PBDR", 0xa4050122, 8) {
PTB7_DATA, PTB6_DATA, PTB5_DATA, PTB4_DATA,
PTB3_DATA, PTB2_DATA, PTB1_DATA, PTB0_DATA }
},
{ PINMUX_DATA_REG("PCDR", 0xa4050124, 8) {
PTC7_DATA, PTC6_DATA, PTC5_DATA, PTC4_DATA,
PTC3_DATA, PTC2_DATA, PTC1_DATA, PTC0_DATA }
},
{ PINMUX_DATA_REG("PDDR", 0xa4050126, 8) {
PTD7_DATA, PTD6_DATA, PTD5_DATA, PTD4_DATA,
PTD3_DATA, PTD2_DATA, PTD1_DATA, PTD0_DATA }
},
{ PINMUX_DATA_REG("PEDR", 0xa4050128, 8) {
PTE7_DATA, PTE6_DATA, PTE5_DATA, PTE4_DATA,
PTE3_DATA, PTE2_DATA, PTE1_DATA, PTE0_DATA }
},
{ PINMUX_DATA_REG("PFDR", 0xa405012a, 8) {
PTF7_DATA, PTF6_DATA, PTF5_DATA, PTF4_DATA,
PTF3_DATA, PTF2_DATA, PTF1_DATA, PTF0_DATA }
},
{ PINMUX_DATA_REG("PGDR", 0xa405012c, 8) {
0, 0, PTG5_DATA, PTG4_DATA,
PTG3_DATA, PTG2_DATA, PTG1_DATA, PTG0_DATA }
},
{ PINMUX_DATA_REG("PHDR", 0xa405012e, 8) {
PTH7_DATA, PTH6_DATA, PTH5_DATA, PTH4_DATA,
PTH3_DATA, PTH2_DATA, PTH1_DATA, PTH0_DATA }
},
{ PINMUX_DATA_REG("PJDR", 0xa4050130, 8) {
PTJ7_DATA, PTJ6_DATA, PTJ5_DATA, 0,
PTJ3_DATA, PTJ2_DATA, PTJ1_DATA, PTJ0_DATA }
},
{ PINMUX_DATA_REG("PKDR", 0xa4050132, 8) {
PTK7_DATA, PTK6_DATA, PTK5_DATA, PTK4_DATA,
PTK3_DATA, PTK2_DATA, PTK1_DATA, PTK0_DATA }
},
{ PINMUX_DATA_REG("PLDR", 0xa4050134, 8) {
PTL7_DATA, PTL6_DATA, PTL5_DATA, PTL4_DATA,
PTL3_DATA, PTL2_DATA, PTL1_DATA, PTL0_DATA }
},
{ PINMUX_DATA_REG("PMDR", 0xa4050136, 8) {
PTM7_DATA, PTM6_DATA, PTM5_DATA, PTM4_DATA,
PTM3_DATA, PTM2_DATA, PTM1_DATA, PTM0_DATA }
},
{ PINMUX_DATA_REG("PNDR", 0xa4050138, 8) {
PTN7_DATA, PTN6_DATA, PTN5_DATA, PTN4_DATA,
PTN3_DATA, PTN2_DATA, PTN1_DATA, PTN0_DATA }
},
{ PINMUX_DATA_REG("PQDR", 0xa405013a, 8) {
PTQ7_DATA, PTQ6_DATA, PTQ5_DATA, PTQ4_DATA,
PTQ3_DATA, PTQ2_DATA, PTQ1_DATA, PTQ0_DATA }
},
{ PINMUX_DATA_REG("PRDR", 0xa405013c, 8) {
PTR7_DATA, PTR6_DATA, PTR5_DATA, PTR4_DATA,
PTR3_DATA, PTR2_DATA, PTR1_DATA, PTR0_DATA }
},
{ PINMUX_DATA_REG("PSDR", 0xa405013e, 8) {
0, PTS6_DATA, PTS5_DATA, PTS4_DATA,
PTS3_DATA, PTS2_DATA, PTS1_DATA, PTS0_DATA }
},
{ PINMUX_DATA_REG("PTDR", 0xa4050160, 8) {
PTT7_DATA, PTT6_DATA, PTT5_DATA, PTT4_DATA,
PTT3_DATA, PTT2_DATA, PTT1_DATA, PTT0_DATA }
},
{ PINMUX_DATA_REG("PUDR", 0xa4050162, 8) {
PTU7_DATA, PTU6_DATA, PTU5_DATA, PTU4_DATA,
PTU3_DATA, PTU2_DATA, PTU1_DATA, PTU0_DATA }
},
{ PINMUX_DATA_REG("PVDR", 0xa4050164, 8) {
PTV7_DATA, PTV6_DATA, PTV5_DATA, PTV4_DATA,
PTV3_DATA, PTV2_DATA, PTV1_DATA, PTV0_DATA }
},
{ PINMUX_DATA_REG("PWDR", 0xa4050166, 8) {
PTW7_DATA, PTW6_DATA, PTW5_DATA, PTW4_DATA,
PTW3_DATA, PTW2_DATA, PTW1_DATA, PTW0_DATA }
},
{ PINMUX_DATA_REG("PXDR", 0xa4050168, 8) {
PTX7_DATA, PTX6_DATA, PTX5_DATA, PTX4_DATA,
PTX3_DATA, PTX2_DATA, PTX1_DATA, PTX0_DATA }
},
{ PINMUX_DATA_REG("PYDR", 0xa405016a, 8) {
PTY7_DATA, PTY6_DATA, PTY5_DATA, PTY4_DATA,
PTY3_DATA, PTY2_DATA, PTY1_DATA, PTY0_DATA }
},
{ PINMUX_DATA_REG("PZDR", 0xa405016c, 8) {
PTZ7_DATA, PTZ6_DATA, PTZ5_DATA, PTZ4_DATA,
PTZ3_DATA, PTZ2_DATA, PTZ1_DATA, PTZ0_DATA }
},
{ },
};
static struct pinmux_info sh7724_pinmux_info = {
.name = "sh7724_pfc",
.reserved_id = PINMUX_RESERVED,
.data = { PINMUX_DATA_BEGIN, PINMUX_DATA_END },
.input = { PINMUX_INPUT_BEGIN, PINMUX_INPUT_END },
.input_pu = { PINMUX_INPUT_PULLUP_BEGIN, PINMUX_INPUT_PULLUP_END },
.output = { PINMUX_OUTPUT_BEGIN, PINMUX_OUTPUT_END },
.mark = { PINMUX_MARK_BEGIN, PINMUX_MARK_END },
.function = { PINMUX_FUNCTION_BEGIN, PINMUX_FUNCTION_END },
.first_gpio = GPIO_PTA7,
.last_gpio = GPIO_FN_INTC_IRQ0,
.gpios = pinmux_gpios,
.cfg_regs = pinmux_config_regs,
.data_regs = pinmux_data_regs,
.gpio_data = pinmux_data,
.gpio_data_size = ARRAY_SIZE(pinmux_data),
};
static int __init plat_pinmux_setup(void)
{
return register_pinmux(&sh7724_pinmux_info);
}
arch_initcall(plat_pinmux_setup);
| gpl-2.0 |
parheliamm/T440p-kernel | arch/powerpc/boot/cuboot-c2k.c | 13656 | 4884 | /*
* GEFanuc C2K platform code.
*
* Author: Remi Machet <rmachet@slac.stanford.edu>
*
* Originated from prpmc2800.c
*
* 2008 (c) Stanford University
* 2007 (c) MontaVista, Software, Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation.
*/
#include "types.h"
#include "stdio.h"
#include "io.h"
#include "ops.h"
#include "elf.h"
#include "gunzip_util.h"
#include "mv64x60.h"
#include "cuboot.h"
#include "ppcboot.h"
static u8 *bridge_base;
static void c2k_bridge_setup(u32 mem_size)
{
u32 i, v[30], enables, acc_bits;
u32 pci_base_hi, pci_base_lo, size, buf[2];
unsigned long cpu_base;
int rc;
void *devp, *mv64x60_devp;
u8 *bridge_pbase, is_coherent;
struct mv64x60_cpu2pci_win *tbl;
int bus;
bridge_pbase = mv64x60_get_bridge_pbase();
is_coherent = mv64x60_is_coherent();
if (is_coherent)
acc_bits = MV64x60_PCI_ACC_CNTL_SNOOP_WB
| MV64x60_PCI_ACC_CNTL_SWAP_NONE
| MV64x60_PCI_ACC_CNTL_MBURST_32_BYTES
| MV64x60_PCI_ACC_CNTL_RDSIZE_32_BYTES;
else
acc_bits = MV64x60_PCI_ACC_CNTL_SNOOP_NONE
| MV64x60_PCI_ACC_CNTL_SWAP_NONE
| MV64x60_PCI_ACC_CNTL_MBURST_128_BYTES
| MV64x60_PCI_ACC_CNTL_RDSIZE_256_BYTES;
mv64x60_config_ctlr_windows(bridge_base, bridge_pbase, is_coherent);
mv64x60_devp = find_node_by_compatible(NULL, "marvell,mv64360");
if (mv64x60_devp == NULL)
fatal("Error: Missing marvell,mv64360 device tree node\n\r");
enables = in_le32((u32 *)(bridge_base + MV64x60_CPU_BAR_ENABLE));
enables |= 0x007ffe00; /* Disable all cpu->pci windows */
out_le32((u32 *)(bridge_base + MV64x60_CPU_BAR_ENABLE), enables);
/* Get the cpu -> pci i/o & mem mappings from the device tree */
devp = NULL;
for (bus = 0; ; bus++) {
char name[] = "pci ";
name[strlen(name)-1] = bus+'0';
devp = find_node_by_alias(name);
if (devp == NULL)
break;
if (bus >= 2)
fatal("Error: Only 2 PCI controllers are supported at" \
" this time.\n");
mv64x60_config_pci_windows(bridge_base, bridge_pbase, bus, 0,
mem_size, acc_bits);
rc = getprop(devp, "ranges", v, sizeof(v));
if (rc == 0)
fatal("Error: Can't find marvell,mv64360-pci ranges"
" property\n\r");
/* Get the cpu -> pci i/o & mem mappings from the device tree */
for (i = 0; i < rc; i += 6) {
switch (v[i] & 0xff000000) {
case 0x01000000: /* PCI I/O Space */
tbl = mv64x60_cpu2pci_io;
break;
case 0x02000000: /* PCI MEM Space */
tbl = mv64x60_cpu2pci_mem;
break;
default:
continue;
}
pci_base_hi = v[i+1];
pci_base_lo = v[i+2];
cpu_base = v[i+3];
size = v[i+5];
buf[0] = cpu_base;
buf[1] = size;
if (!dt_xlate_addr(devp, buf, sizeof(buf), &cpu_base))
fatal("Error: Can't translate PCI address " \
"0x%x\n\r", (u32)cpu_base);
mv64x60_config_cpu2pci_window(bridge_base, bus,
pci_base_hi, pci_base_lo, cpu_base, size, tbl);
}
enables &= ~(3<<(9+bus*5)); /* Enable cpu->pci<bus> i/o,
cpu->pci<bus> mem0 */
out_le32((u32 *)(bridge_base + MV64x60_CPU_BAR_ENABLE),
enables);
};
}
static void c2k_fixups(void)
{
u32 mem_size;
mem_size = mv64x60_get_mem_size(bridge_base);
c2k_bridge_setup(mem_size); /* Do necessary bridge setup */
}
#define MV64x60_MPP_CNTL_0 0xf000
#define MV64x60_MPP_CNTL_2 0xf008
#define MV64x60_GPP_IO_CNTL 0xf100
#define MV64x60_GPP_LEVEL_CNTL 0xf110
#define MV64x60_GPP_VALUE_SET 0xf118
static void c2k_reset(void)
{
u32 temp;
udelay(5000000);
if (bridge_base != 0) {
temp = in_le32((u32 *)(bridge_base + MV64x60_MPP_CNTL_0));
temp &= 0xFFFF0FFF;
out_le32((u32 *)(bridge_base + MV64x60_MPP_CNTL_0), temp);
temp = in_le32((u32 *)(bridge_base + MV64x60_GPP_LEVEL_CNTL));
temp |= 0x00000004;
out_le32((u32 *)(bridge_base + MV64x60_GPP_LEVEL_CNTL), temp);
temp = in_le32((u32 *)(bridge_base + MV64x60_GPP_IO_CNTL));
temp |= 0x00000004;
out_le32((u32 *)(bridge_base + MV64x60_GPP_IO_CNTL), temp);
temp = in_le32((u32 *)(bridge_base + MV64x60_MPP_CNTL_2));
temp &= 0xFFFF0FFF;
out_le32((u32 *)(bridge_base + MV64x60_MPP_CNTL_2), temp);
temp = in_le32((u32 *)(bridge_base + MV64x60_GPP_LEVEL_CNTL));
temp |= 0x00080000;
out_le32((u32 *)(bridge_base + MV64x60_GPP_LEVEL_CNTL), temp);
temp = in_le32((u32 *)(bridge_base + MV64x60_GPP_IO_CNTL));
temp |= 0x00080000;
out_le32((u32 *)(bridge_base + MV64x60_GPP_IO_CNTL), temp);
out_le32((u32 *)(bridge_base + MV64x60_GPP_VALUE_SET),
0x00080004);
}
for (;;);
}
static bd_t bd;
void platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
unsigned long r6, unsigned long r7)
{
CUBOOT_INIT();
fdt_init(_dtb_start);
bridge_base = mv64x60_get_bridge_base();
platform_ops.fixups = c2k_fixups;
platform_ops.exit = c2k_reset;
if (serial_console_init() < 0)
exit();
}
| gpl-2.0 |
abhishekamralkar/webscalesql-5.6 | storage/ndb/src/common/debugger/signaldata/CloseComReqConf.cpp | 89 | 1531 | /*
Copyright (C) 2003-2006 MySQL AB
All rights reserved. Use is subject to license terms.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; version 2 of the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <kernel_types.h>
#include <BlockNumbers.h>
#include <signaldata/CloseComReqConf.hpp>
bool
printCLOSECOMREQCONF(FILE * output,
const Uint32 * theData,
Uint32 len,
Uint16 receiverBlockNo){
CloseComReqConf * cc = (CloseComReqConf*)theData;
fprintf(output, " xxxBlockRef = (%d, %d) failNo = %d noOfNodes = %d\n",
refToBlock(cc->xxxBlockRef), refToNode(cc->xxxBlockRef),
cc->failNo, cc->noOfNodes);
int hits = 0;
fprintf(output, " Nodes: ");
for(int i = 0; i<MAX_NODES; i++){
if(NodeBitmask::get(cc->theNodes, i)){
hits++;
fprintf(output, " %d", i);
}
if(hits == 16){
fprintf(output, "\n Nodes: ");
hits = 0;
}
}
if(hits != 0)
fprintf(output, "\n");
return true;
}
| gpl-2.0 |
wujf/qemu | tests/test-qmp-input-strict.c | 89 | 7206 | /*
* QMP Input Visitor unit-tests (strict mode).
*
* Copyright (C) 2011-2012 Red Hat Inc.
*
* Authors:
* Luiz Capitulino <lcapitulino@redhat.com>
* Paolo Bonzini <pbonzini@redhat.com>
*
* This work is licensed under the terms of the GNU GPL, version 2 or later.
* See the COPYING file in the top-level directory.
*/
#include <glib.h>
#include <stdarg.h>
#include "qemu-common.h"
#include "qapi/qmp-input-visitor.h"
#include "test-qapi-types.h"
#include "test-qapi-visit.h"
#include "qapi/qmp/types.h"
typedef struct TestInputVisitorData {
QObject *obj;
QmpInputVisitor *qiv;
} TestInputVisitorData;
static void validate_teardown(TestInputVisitorData *data,
const void *unused)
{
qobject_decref(data->obj);
data->obj = NULL;
if (data->qiv) {
qmp_input_visitor_cleanup(data->qiv);
data->qiv = NULL;
}
}
/* This is provided instead of a test setup function so that the JSON
string used by the tests are kept in the test functions (and not
int main()) */
static GCC_FMT_ATTR(2, 3)
Visitor *validate_test_init(TestInputVisitorData *data,
const char *json_string, ...)
{
Visitor *v;
va_list ap;
va_start(ap, json_string);
data->obj = qobject_from_jsonv(json_string, &ap);
va_end(ap);
g_assert(data->obj != NULL);
data->qiv = qmp_input_visitor_new_strict(data->obj);
g_assert(data->qiv != NULL);
v = qmp_input_get_visitor(data->qiv);
g_assert(v != NULL);
return v;
}
typedef struct TestStruct
{
int64_t integer;
bool boolean;
char *string;
} TestStruct;
static void visit_type_TestStruct(Visitor *v, TestStruct **obj,
const char *name, Error **errp)
{
visit_start_struct(v, (void **)obj, "TestStruct", name, sizeof(TestStruct),
errp);
visit_type_int(v, &(*obj)->integer, "integer", errp);
visit_type_bool(v, &(*obj)->boolean, "boolean", errp);
visit_type_str(v, &(*obj)->string, "string", errp);
visit_end_struct(v, errp);
}
static void test_validate_struct(TestInputVisitorData *data,
const void *unused)
{
TestStruct *p = NULL;
Error *errp = NULL;
Visitor *v;
v = validate_test_init(data, "{ 'integer': -42, 'boolean': true, 'string': 'foo' }");
visit_type_TestStruct(v, &p, NULL, &errp);
g_assert(!error_is_set(&errp));
g_free(p->string);
g_free(p);
}
static void test_validate_struct_nested(TestInputVisitorData *data,
const void *unused)
{
UserDefNested *udp = NULL;
Error *errp = NULL;
Visitor *v;
v = validate_test_init(data, "{ 'string0': 'string0', 'dict1': { 'string1': 'string1', 'dict2': { 'userdef1': { 'integer': 42, 'string': 'string' }, 'string2': 'string2'}}}");
visit_type_UserDefNested(v, &udp, NULL, &errp);
g_assert(!error_is_set(&errp));
qapi_free_UserDefNested(udp);
}
static void test_validate_list(TestInputVisitorData *data,
const void *unused)
{
UserDefOneList *head = NULL;
Error *errp = NULL;
Visitor *v;
v = validate_test_init(data, "[ { 'string': 'string0', 'integer': 42 }, { 'string': 'string1', 'integer': 43 }, { 'string': 'string2', 'integer': 44 } ]");
visit_type_UserDefOneList(v, &head, NULL, &errp);
g_assert(!error_is_set(&errp));
qapi_free_UserDefOneList(head);
}
static void test_validate_union(TestInputVisitorData *data,
const void *unused)
{
UserDefUnion *tmp = NULL;
Visitor *v;
Error *errp = NULL;
v = validate_test_init(data, "{ 'type': 'b', 'data' : { 'integer': 42 } }");
visit_type_UserDefUnion(v, &tmp, NULL, &errp);
g_assert(!error_is_set(&errp));
qapi_free_UserDefUnion(tmp);
}
static void test_validate_fail_struct(TestInputVisitorData *data,
const void *unused)
{
TestStruct *p = NULL;
Error *errp = NULL;
Visitor *v;
v = validate_test_init(data, "{ 'integer': -42, 'boolean': true, 'string': 'foo', 'extra': 42 }");
visit_type_TestStruct(v, &p, NULL, &errp);
g_assert(error_is_set(&errp));
if (p) {
g_free(p->string);
}
g_free(p);
}
static void test_validate_fail_struct_nested(TestInputVisitorData *data,
const void *unused)
{
UserDefNested *udp = NULL;
Error *errp = NULL;
Visitor *v;
v = validate_test_init(data, "{ 'string0': 'string0', 'dict1': { 'string1': 'string1', 'dict2': { 'userdef1': { 'integer': 42, 'string': 'string', 'extra': [42, 23, {'foo':'bar'}] }, 'string2': 'string2'}}}");
visit_type_UserDefNested(v, &udp, NULL, &errp);
g_assert(error_is_set(&errp));
qapi_free_UserDefNested(udp);
}
static void test_validate_fail_list(TestInputVisitorData *data,
const void *unused)
{
UserDefOneList *head = NULL;
Error *errp = NULL;
Visitor *v;
v = validate_test_init(data, "[ { 'string': 'string0', 'integer': 42 }, { 'string': 'string1', 'integer': 43 }, { 'string': 'string2', 'integer': 44, 'extra': 'ggg' } ]");
visit_type_UserDefOneList(v, &head, NULL, &errp);
g_assert(error_is_set(&errp));
qapi_free_UserDefOneList(head);
}
static void test_validate_fail_union(TestInputVisitorData *data,
const void *unused)
{
UserDefUnion *tmp = NULL;
Error *errp = NULL;
Visitor *v;
v = validate_test_init(data, "{ 'type': 'b', 'data' : { 'integer': 42 }, 'extra': 'yyy' }");
visit_type_UserDefUnion(v, &tmp, NULL, &errp);
g_assert(error_is_set(&errp));
qapi_free_UserDefUnion(tmp);
}
static void validate_test_add(const char *testpath,
TestInputVisitorData *data,
void (*test_func)(TestInputVisitorData *data, const void *user_data))
{
g_test_add(testpath, TestInputVisitorData, data, NULL, test_func,
validate_teardown);
}
int main(int argc, char **argv)
{
TestInputVisitorData testdata;
g_test_init(&argc, &argv, NULL);
validate_test_add("/visitor/input-strict/pass/struct",
&testdata, test_validate_struct);
validate_test_add("/visitor/input-strict/pass/struct-nested",
&testdata, test_validate_struct_nested);
validate_test_add("/visitor/input-strict/pass/list",
&testdata, test_validate_list);
validate_test_add("/visitor/input-strict/pass/union",
&testdata, test_validate_union);
validate_test_add("/visitor/input-strict/fail/struct",
&testdata, test_validate_fail_struct);
validate_test_add("/visitor/input-strict/fail/struct-nested",
&testdata, test_validate_fail_struct_nested);
validate_test_add("/visitor/input-strict/fail/list",
&testdata, test_validate_fail_list);
validate_test_add("/visitor/input-strict/fail/union",
&testdata, test_validate_fail_union);
g_test_run();
return 0;
}
| gpl-2.0 |
Seagate/SMR_FS-EXT4 | kernel/fs/overlayfs/inode.c | 345 | 9082 | /*
*
* Copyright (C) 2011 Novell Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published by
* the Free Software Foundation.
*/
#include <linux/fs.h>
#include <linux/slab.h>
#include <linux/xattr.h>
#include "overlayfs.h"
static int ovl_copy_up_last(struct dentry *dentry, struct iattr *attr,
bool no_data)
{
int err;
struct dentry *parent;
struct kstat stat;
struct path lowerpath;
parent = dget_parent(dentry);
err = ovl_copy_up(parent);
if (err)
goto out_dput_parent;
ovl_path_lower(dentry, &lowerpath);
err = vfs_getattr(&lowerpath, &stat);
if (err)
goto out_dput_parent;
if (no_data)
stat.size = 0;
err = ovl_copy_up_one(parent, dentry, &lowerpath, &stat, attr);
out_dput_parent:
dput(parent);
return err;
}
int ovl_setattr(struct dentry *dentry, struct iattr *attr)
{
int err;
struct dentry *upperdentry;
err = ovl_want_write(dentry);
if (err)
goto out;
upperdentry = ovl_dentry_upper(dentry);
if (upperdentry) {
mutex_lock(&upperdentry->d_inode->i_mutex);
err = notify_change(upperdentry, attr, NULL);
mutex_unlock(&upperdentry->d_inode->i_mutex);
} else {
err = ovl_copy_up_last(dentry, attr, false);
}
ovl_drop_write(dentry);
out:
return err;
}
static int ovl_getattr(struct vfsmount *mnt, struct dentry *dentry,
struct kstat *stat)
{
struct path realpath;
ovl_path_real(dentry, &realpath);
return vfs_getattr(&realpath, stat);
}
int ovl_permission(struct inode *inode, int mask)
{
struct ovl_entry *oe;
struct dentry *alias = NULL;
struct inode *realinode;
struct dentry *realdentry;
bool is_upper;
int err;
if (S_ISDIR(inode->i_mode)) {
oe = inode->i_private;
} else if (mask & MAY_NOT_BLOCK) {
return -ECHILD;
} else {
/*
* For non-directories find an alias and get the info
* from there.
*/
alias = d_find_any_alias(inode);
if (WARN_ON(!alias))
return -ENOENT;
oe = alias->d_fsdata;
}
realdentry = ovl_entry_real(oe, &is_upper);
/* Careful in RCU walk mode */
realinode = ACCESS_ONCE(realdentry->d_inode);
if (!realinode) {
WARN_ON(!(mask & MAY_NOT_BLOCK));
err = -ENOENT;
goto out_dput;
}
if (mask & MAY_WRITE) {
umode_t mode = realinode->i_mode;
/*
* Writes will always be redirected to upper layer, so
* ignore lower layer being read-only.
*
* If the overlay itself is read-only then proceed
* with the permission check, don't return EROFS.
* This will only happen if this is the lower layer of
* another overlayfs.
*
* If upper fs becomes read-only after the overlay was
* constructed return EROFS to prevent modification of
* upper layer.
*/
err = -EROFS;
if (is_upper && !IS_RDONLY(inode) && IS_RDONLY(realinode) &&
(S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode)))
goto out_dput;
}
err = __inode_permission(realinode, mask);
out_dput:
dput(alias);
return err;
}
struct ovl_link_data {
struct dentry *realdentry;
void *cookie;
};
static const char *ovl_follow_link(struct dentry *dentry, void **cookie)
{
struct dentry *realdentry;
struct inode *realinode;
struct ovl_link_data *data = NULL;
const char *ret;
realdentry = ovl_dentry_real(dentry);
realinode = realdentry->d_inode;
if (WARN_ON(!realinode->i_op->follow_link))
return ERR_PTR(-EPERM);
if (realinode->i_op->put_link) {
data = kmalloc(sizeof(struct ovl_link_data), GFP_KERNEL);
if (!data)
return ERR_PTR(-ENOMEM);
data->realdentry = realdentry;
}
ret = realinode->i_op->follow_link(realdentry, cookie);
if (IS_ERR_OR_NULL(ret)) {
kfree(data);
return ret;
}
if (data)
data->cookie = *cookie;
*cookie = data;
return ret;
}
static void ovl_put_link(struct inode *unused, void *c)
{
struct inode *realinode;
struct ovl_link_data *data = c;
if (!data)
return;
realinode = data->realdentry->d_inode;
realinode->i_op->put_link(realinode, data->cookie);
kfree(data);
}
static int ovl_readlink(struct dentry *dentry, char __user *buf, int bufsiz)
{
struct path realpath;
struct inode *realinode;
ovl_path_real(dentry, &realpath);
realinode = realpath.dentry->d_inode;
if (!realinode->i_op->readlink)
return -EINVAL;
touch_atime(&realpath);
return realinode->i_op->readlink(realpath.dentry, buf, bufsiz);
}
static bool ovl_is_private_xattr(const char *name)
{
return strncmp(name, OVL_XATTR_PRE_NAME, OVL_XATTR_PRE_LEN) == 0;
}
int ovl_setxattr(struct dentry *dentry, const char *name,
const void *value, size_t size, int flags)
{
int err;
struct dentry *upperdentry;
err = ovl_want_write(dentry);
if (err)
goto out;
err = -EPERM;
if (ovl_is_private_xattr(name))
goto out_drop_write;
err = ovl_copy_up(dentry);
if (err)
goto out_drop_write;
upperdentry = ovl_dentry_upper(dentry);
err = vfs_setxattr(upperdentry, name, value, size, flags);
out_drop_write:
ovl_drop_write(dentry);
out:
return err;
}
static bool ovl_need_xattr_filter(struct dentry *dentry,
enum ovl_path_type type)
{
if ((type & (__OVL_PATH_PURE | __OVL_PATH_UPPER)) == __OVL_PATH_UPPER)
return S_ISDIR(dentry->d_inode->i_mode);
else
return false;
}
ssize_t ovl_getxattr(struct dentry *dentry, const char *name,
void *value, size_t size)
{
struct path realpath;
enum ovl_path_type type = ovl_path_real(dentry, &realpath);
if (ovl_need_xattr_filter(dentry, type) && ovl_is_private_xattr(name))
return -ENODATA;
return vfs_getxattr(realpath.dentry, name, value, size);
}
ssize_t ovl_listxattr(struct dentry *dentry, char *list, size_t size)
{
struct path realpath;
enum ovl_path_type type = ovl_path_real(dentry, &realpath);
ssize_t res;
int off;
res = vfs_listxattr(realpath.dentry, list, size);
if (res <= 0 || size == 0)
return res;
if (!ovl_need_xattr_filter(dentry, type))
return res;
/* filter out private xattrs */
for (off = 0; off < res;) {
char *s = list + off;
size_t slen = strlen(s) + 1;
BUG_ON(off + slen > res);
if (ovl_is_private_xattr(s)) {
res -= slen;
memmove(s, s + slen, res - off);
} else {
off += slen;
}
}
return res;
}
int ovl_removexattr(struct dentry *dentry, const char *name)
{
int err;
struct path realpath;
enum ovl_path_type type = ovl_path_real(dentry, &realpath);
err = ovl_want_write(dentry);
if (err)
goto out;
err = -ENODATA;
if (ovl_need_xattr_filter(dentry, type) && ovl_is_private_xattr(name))
goto out_drop_write;
if (!OVL_TYPE_UPPER(type)) {
err = vfs_getxattr(realpath.dentry, name, NULL, 0);
if (err < 0)
goto out_drop_write;
err = ovl_copy_up(dentry);
if (err)
goto out_drop_write;
ovl_path_upper(dentry, &realpath);
}
err = vfs_removexattr(realpath.dentry, name);
out_drop_write:
ovl_drop_write(dentry);
out:
return err;
}
static bool ovl_open_need_copy_up(int flags, enum ovl_path_type type,
struct dentry *realdentry)
{
if (OVL_TYPE_UPPER(type))
return false;
if (special_file(realdentry->d_inode->i_mode))
return false;
if (!(OPEN_FMODE(flags) & FMODE_WRITE) && !(flags & O_TRUNC))
return false;
return true;
}
struct inode *ovl_d_select_inode(struct dentry *dentry, unsigned file_flags)
{
int err;
struct path realpath;
enum ovl_path_type type;
if (d_is_dir(dentry))
return d_backing_inode(dentry);
type = ovl_path_real(dentry, &realpath);
if (ovl_open_need_copy_up(file_flags, type, realpath.dentry)) {
err = ovl_want_write(dentry);
if (err)
return ERR_PTR(err);
if (file_flags & O_TRUNC)
err = ovl_copy_up_last(dentry, NULL, true);
else
err = ovl_copy_up(dentry);
ovl_drop_write(dentry);
if (err)
return ERR_PTR(err);
ovl_path_upper(dentry, &realpath);
}
return d_backing_inode(realpath.dentry);
}
static const struct inode_operations ovl_file_inode_operations = {
.setattr = ovl_setattr,
.permission = ovl_permission,
.getattr = ovl_getattr,
.setxattr = ovl_setxattr,
.getxattr = ovl_getxattr,
.listxattr = ovl_listxattr,
.removexattr = ovl_removexattr,
};
static const struct inode_operations ovl_symlink_inode_operations = {
.setattr = ovl_setattr,
.follow_link = ovl_follow_link,
.put_link = ovl_put_link,
.readlink = ovl_readlink,
.getattr = ovl_getattr,
.setxattr = ovl_setxattr,
.getxattr = ovl_getxattr,
.listxattr = ovl_listxattr,
.removexattr = ovl_removexattr,
};
struct inode *ovl_new_inode(struct super_block *sb, umode_t mode,
struct ovl_entry *oe)
{
struct inode *inode;
inode = new_inode(sb);
if (!inode)
return NULL;
mode &= S_IFMT;
inode->i_ino = get_next_ino();
inode->i_mode = mode;
inode->i_flags |= S_NOATIME | S_NOCMTIME;
switch (mode) {
case S_IFDIR:
inode->i_private = oe;
inode->i_op = &ovl_dir_inode_operations;
inode->i_fop = &ovl_dir_operations;
break;
case S_IFLNK:
inode->i_op = &ovl_symlink_inode_operations;
break;
case S_IFREG:
case S_IFSOCK:
case S_IFBLK:
case S_IFCHR:
case S_IFIFO:
inode->i_op = &ovl_file_inode_operations;
break;
default:
WARN(1, "illegal file type: %i\n", mode);
iput(inode);
inode = NULL;
}
return inode;
}
| gpl-2.0 |
teamblueridge/SuperSickKernel | sound/core/rawmidi.c | 857 | 49392 | /*
* Abstract layer for MIDI v1.0 stream
* Copyright (c) by Jaroslav Kysela <perex@perex.cz>
*
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
#include <sound/core.h>
#include <linux/major.h>
#include <linux/init.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/time.h>
#include <linux/wait.h>
#include <linux/mutex.h>
#include <linux/moduleparam.h>
#include <linux/delay.h>
#include <sound/rawmidi.h>
#include <sound/info.h>
#include <sound/control.h>
#include <sound/minors.h>
#include <sound/initval.h>
MODULE_AUTHOR("Jaroslav Kysela <perex@perex.cz>");
MODULE_DESCRIPTION("Midlevel RawMidi code for ALSA.");
MODULE_LICENSE("GPL");
#ifdef CONFIG_SND_OSSEMUL
static int midi_map[SNDRV_CARDS];
static int amidi_map[SNDRV_CARDS] = {[0 ... (SNDRV_CARDS-1)] = 1};
module_param_array(midi_map, int, NULL, 0444);
MODULE_PARM_DESC(midi_map, "Raw MIDI device number assigned to 1st OSS device.");
module_param_array(amidi_map, int, NULL, 0444);
MODULE_PARM_DESC(amidi_map, "Raw MIDI device number assigned to 2nd OSS device.");
#endif /* CONFIG_SND_OSSEMUL */
static int snd_rawmidi_free(struct snd_rawmidi *rawmidi);
static int snd_rawmidi_dev_free(struct snd_device *device);
static int snd_rawmidi_dev_register(struct snd_device *device);
static int snd_rawmidi_dev_disconnect(struct snd_device *device);
static LIST_HEAD(snd_rawmidi_devices);
static DEFINE_MUTEX(register_mutex);
static struct snd_rawmidi *snd_rawmidi_search(struct snd_card *card, int device)
{
struct snd_rawmidi *rawmidi;
list_for_each_entry(rawmidi, &snd_rawmidi_devices, list)
if (rawmidi->card == card && rawmidi->device == device)
return rawmidi;
return NULL;
}
static inline unsigned short snd_rawmidi_file_flags(struct file *file)
{
switch (file->f_mode & (FMODE_READ | FMODE_WRITE)) {
case FMODE_WRITE:
return SNDRV_RAWMIDI_LFLG_OUTPUT;
case FMODE_READ:
return SNDRV_RAWMIDI_LFLG_INPUT;
default:
return SNDRV_RAWMIDI_LFLG_OPEN;
}
}
static inline int snd_rawmidi_ready(struct snd_rawmidi_substream *substream)
{
struct snd_rawmidi_runtime *runtime = substream->runtime;
return runtime->avail >= runtime->avail_min;
}
static inline int snd_rawmidi_ready_append(struct snd_rawmidi_substream *substream,
size_t count)
{
struct snd_rawmidi_runtime *runtime = substream->runtime;
return runtime->avail >= runtime->avail_min &&
(!substream->append || runtime->avail >= count);
}
static void snd_rawmidi_input_event_tasklet(unsigned long data)
{
struct snd_rawmidi_substream *substream = (struct snd_rawmidi_substream *)data;
substream->runtime->event(substream);
}
static void snd_rawmidi_output_trigger_tasklet(unsigned long data)
{
struct snd_rawmidi_substream *substream = (struct snd_rawmidi_substream *)data;
substream->ops->trigger(substream, 1);
}
static int snd_rawmidi_runtime_create(struct snd_rawmidi_substream *substream)
{
struct snd_rawmidi_runtime *runtime;
if ((runtime = kzalloc(sizeof(*runtime), GFP_KERNEL)) == NULL)
return -ENOMEM;
spin_lock_init(&runtime->lock);
init_waitqueue_head(&runtime->sleep);
if (substream->stream == SNDRV_RAWMIDI_STREAM_INPUT)
tasklet_init(&runtime->tasklet,
snd_rawmidi_input_event_tasklet,
(unsigned long)substream);
else
tasklet_init(&runtime->tasklet,
snd_rawmidi_output_trigger_tasklet,
(unsigned long)substream);
runtime->event = NULL;
runtime->buffer_size = PAGE_SIZE;
runtime->avail_min = 1;
if (substream->stream == SNDRV_RAWMIDI_STREAM_INPUT)
runtime->avail = 0;
else
runtime->avail = runtime->buffer_size;
if ((runtime->buffer = kmalloc(runtime->buffer_size, GFP_KERNEL)) == NULL) {
kfree(runtime);
return -ENOMEM;
}
runtime->appl_ptr = runtime->hw_ptr = 0;
substream->runtime = runtime;
return 0;
}
static int snd_rawmidi_runtime_free(struct snd_rawmidi_substream *substream)
{
struct snd_rawmidi_runtime *runtime = substream->runtime;
kfree(runtime->buffer);
kfree(runtime);
substream->runtime = NULL;
return 0;
}
static inline void snd_rawmidi_output_trigger(struct snd_rawmidi_substream *substream,int up)
{
if (!substream->opened)
return;
if (up) {
tasklet_schedule(&substream->runtime->tasklet);
} else {
tasklet_kill(&substream->runtime->tasklet);
substream->ops->trigger(substream, 0);
}
}
static void snd_rawmidi_input_trigger(struct snd_rawmidi_substream *substream, int up)
{
if (!substream->opened)
return;
substream->ops->trigger(substream, up);
if (!up && substream->runtime->event)
tasklet_kill(&substream->runtime->tasklet);
}
int snd_rawmidi_drop_output(struct snd_rawmidi_substream *substream)
{
unsigned long flags;
struct snd_rawmidi_runtime *runtime = substream->runtime;
snd_rawmidi_output_trigger(substream, 0);
runtime->drain = 0;
spin_lock_irqsave(&runtime->lock, flags);
runtime->appl_ptr = runtime->hw_ptr = 0;
runtime->avail = runtime->buffer_size;
spin_unlock_irqrestore(&runtime->lock, flags);
return 0;
}
int snd_rawmidi_drain_output(struct snd_rawmidi_substream *substream)
{
int err;
long timeout;
struct snd_rawmidi_runtime *runtime = substream->runtime;
err = 0;
runtime->drain = 1;
timeout = wait_event_interruptible_timeout(runtime->sleep,
(runtime->avail >= runtime->buffer_size),
10*HZ);
if (signal_pending(current))
err = -ERESTARTSYS;
if (runtime->avail < runtime->buffer_size && !timeout) {
snd_printk(KERN_WARNING "rawmidi drain error (avail = %li, buffer_size = %li)\n", (long)runtime->avail, (long)runtime->buffer_size);
err = -EIO;
}
runtime->drain = 0;
if (err != -ERESTARTSYS) {
/* we need wait a while to make sure that Tx FIFOs are empty */
if (substream->ops->drain)
substream->ops->drain(substream);
else
msleep(50);
snd_rawmidi_drop_output(substream);
}
return err;
}
int snd_rawmidi_drain_input(struct snd_rawmidi_substream *substream)
{
unsigned long flags;
struct snd_rawmidi_runtime *runtime = substream->runtime;
snd_rawmidi_input_trigger(substream, 0);
runtime->drain = 0;
spin_lock_irqsave(&runtime->lock, flags);
runtime->appl_ptr = runtime->hw_ptr = 0;
runtime->avail = 0;
spin_unlock_irqrestore(&runtime->lock, flags);
return 0;
}
/* look for an available substream for the given stream direction;
* if a specific subdevice is given, try to assign it
*/
static int assign_substream(struct snd_rawmidi *rmidi, int subdevice,
int stream, int mode,
struct snd_rawmidi_substream **sub_ret)
{
struct snd_rawmidi_substream *substream;
struct snd_rawmidi_str *s = &rmidi->streams[stream];
static unsigned int info_flags[2] = {
[SNDRV_RAWMIDI_STREAM_OUTPUT] = SNDRV_RAWMIDI_INFO_OUTPUT,
[SNDRV_RAWMIDI_STREAM_INPUT] = SNDRV_RAWMIDI_INFO_INPUT,
};
if (!(rmidi->info_flags & info_flags[stream]))
return -ENXIO;
if (subdevice >= 0 && subdevice >= s->substream_count)
return -ENODEV;
list_for_each_entry(substream, &s->substreams, list) {
if (substream->opened) {
if (stream == SNDRV_RAWMIDI_STREAM_INPUT ||
!(mode & SNDRV_RAWMIDI_LFLG_APPEND) ||
!substream->append)
continue;
}
if (subdevice < 0 || subdevice == substream->number) {
*sub_ret = substream;
return 0;
}
}
return -EAGAIN;
}
/* open and do ref-counting for the given substream */
static int open_substream(struct snd_rawmidi *rmidi,
struct snd_rawmidi_substream *substream,
int mode)
{
int err;
if (substream->use_count == 0) {
err = snd_rawmidi_runtime_create(substream);
if (err < 0)
return err;
err = substream->ops->open(substream);
if (err < 0) {
snd_rawmidi_runtime_free(substream);
return err;
}
substream->opened = 1;
substream->active_sensing = 0;
if (mode & SNDRV_RAWMIDI_LFLG_APPEND)
substream->append = 1;
substream->pid = get_pid(task_pid(current));
rmidi->streams[substream->stream].substream_opened++;
}
substream->use_count++;
return 0;
}
static void close_substream(struct snd_rawmidi *rmidi,
struct snd_rawmidi_substream *substream,
int cleanup);
static int rawmidi_open_priv(struct snd_rawmidi *rmidi, int subdevice, int mode,
struct snd_rawmidi_file *rfile)
{
struct snd_rawmidi_substream *sinput = NULL, *soutput = NULL;
int err;
rfile->input = rfile->output = NULL;
if (mode & SNDRV_RAWMIDI_LFLG_INPUT) {
err = assign_substream(rmidi, subdevice,
SNDRV_RAWMIDI_STREAM_INPUT,
mode, &sinput);
if (err < 0)
return err;
}
if (mode & SNDRV_RAWMIDI_LFLG_OUTPUT) {
err = assign_substream(rmidi, subdevice,
SNDRV_RAWMIDI_STREAM_OUTPUT,
mode, &soutput);
if (err < 0)
return err;
}
if (sinput) {
err = open_substream(rmidi, sinput, mode);
if (err < 0)
return err;
}
if (soutput) {
err = open_substream(rmidi, soutput, mode);
if (err < 0) {
if (sinput)
close_substream(rmidi, sinput, 0);
return err;
}
}
rfile->rmidi = rmidi;
rfile->input = sinput;
rfile->output = soutput;
return 0;
}
/* called from sound/core/seq/seq_midi.c */
int snd_rawmidi_kernel_open(struct snd_card *card, int device, int subdevice,
int mode, struct snd_rawmidi_file * rfile)
{
struct snd_rawmidi *rmidi;
int err;
if (snd_BUG_ON(!rfile))
return -EINVAL;
mutex_lock(®ister_mutex);
rmidi = snd_rawmidi_search(card, device);
if (rmidi == NULL) {
mutex_unlock(®ister_mutex);
return -ENODEV;
}
if (!try_module_get(rmidi->card->module)) {
mutex_unlock(®ister_mutex);
return -ENXIO;
}
mutex_unlock(®ister_mutex);
mutex_lock(&rmidi->open_mutex);
err = rawmidi_open_priv(rmidi, subdevice, mode, rfile);
mutex_unlock(&rmidi->open_mutex);
if (err < 0)
module_put(rmidi->card->module);
return err;
}
static int snd_rawmidi_open(struct inode *inode, struct file *file)
{
int maj = imajor(inode);
struct snd_card *card;
int subdevice;
unsigned short fflags;
int err;
struct snd_rawmidi *rmidi;
struct snd_rawmidi_file *rawmidi_file = NULL;
wait_queue_t wait;
struct snd_ctl_file *kctl;
if ((file->f_flags & O_APPEND) && !(file->f_flags & O_NONBLOCK))
return -EINVAL; /* invalid combination */
err = nonseekable_open(inode, file);
if (err < 0)
return err;
if (maj == snd_major) {
rmidi = snd_lookup_minor_data(iminor(inode),
SNDRV_DEVICE_TYPE_RAWMIDI);
#ifdef CONFIG_SND_OSSEMUL
} else if (maj == SOUND_MAJOR) {
rmidi = snd_lookup_oss_minor_data(iminor(inode),
SNDRV_OSS_DEVICE_TYPE_MIDI);
#endif
} else
return -ENXIO;
if (rmidi == NULL)
return -ENODEV;
if (!try_module_get(rmidi->card->module)) {
snd_card_unref(rmidi->card);
return -ENXIO;
}
mutex_lock(&rmidi->open_mutex);
card = rmidi->card;
err = snd_card_file_add(card, file);
if (err < 0)
goto __error_card;
fflags = snd_rawmidi_file_flags(file);
if ((file->f_flags & O_APPEND) || maj == SOUND_MAJOR) /* OSS emul? */
fflags |= SNDRV_RAWMIDI_LFLG_APPEND;
rawmidi_file = kmalloc(sizeof(*rawmidi_file), GFP_KERNEL);
if (rawmidi_file == NULL) {
err = -ENOMEM;
goto __error;
}
init_waitqueue_entry(&wait, current);
add_wait_queue(&rmidi->open_wait, &wait);
while (1) {
subdevice = -1;
read_lock(&card->ctl_files_rwlock);
list_for_each_entry(kctl, &card->ctl_files, list) {
if (kctl->pid == task_pid(current)) {
subdevice = kctl->prefer_rawmidi_subdevice;
if (subdevice != -1)
break;
}
}
read_unlock(&card->ctl_files_rwlock);
err = rawmidi_open_priv(rmidi, subdevice, fflags, rawmidi_file);
if (err >= 0)
break;
if (err == -EAGAIN) {
if (file->f_flags & O_NONBLOCK) {
err = -EBUSY;
break;
}
} else
break;
set_current_state(TASK_INTERRUPTIBLE);
mutex_unlock(&rmidi->open_mutex);
schedule();
mutex_lock(&rmidi->open_mutex);
if (rmidi->card->shutdown) {
err = -ENODEV;
break;
}
if (signal_pending(current)) {
err = -ERESTARTSYS;
break;
}
}
remove_wait_queue(&rmidi->open_wait, &wait);
if (err < 0) {
kfree(rawmidi_file);
goto __error;
}
#ifdef CONFIG_SND_OSSEMUL
if (rawmidi_file->input && rawmidi_file->input->runtime)
rawmidi_file->input->runtime->oss = (maj == SOUND_MAJOR);
if (rawmidi_file->output && rawmidi_file->output->runtime)
rawmidi_file->output->runtime->oss = (maj == SOUND_MAJOR);
#endif
file->private_data = rawmidi_file;
mutex_unlock(&rmidi->open_mutex);
snd_card_unref(rmidi->card);
return 0;
__error:
snd_card_file_remove(card, file);
__error_card:
mutex_unlock(&rmidi->open_mutex);
module_put(rmidi->card->module);
snd_card_unref(rmidi->card);
return err;
}
static void close_substream(struct snd_rawmidi *rmidi,
struct snd_rawmidi_substream *substream,
int cleanup)
{
if (--substream->use_count)
return;
if (cleanup) {
if (substream->stream == SNDRV_RAWMIDI_STREAM_INPUT)
snd_rawmidi_input_trigger(substream, 0);
else {
if (substream->active_sensing) {
unsigned char buf = 0xfe;
/* sending single active sensing message
* to shut the device up
*/
snd_rawmidi_kernel_write(substream, &buf, 1);
}
if (snd_rawmidi_drain_output(substream) == -ERESTARTSYS)
snd_rawmidi_output_trigger(substream, 0);
}
}
substream->ops->close(substream);
if (substream->runtime->private_free)
substream->runtime->private_free(substream);
snd_rawmidi_runtime_free(substream);
substream->opened = 0;
substream->append = 0;
put_pid(substream->pid);
substream->pid = NULL;
rmidi->streams[substream->stream].substream_opened--;
}
static void rawmidi_release_priv(struct snd_rawmidi_file *rfile)
{
struct snd_rawmidi *rmidi;
rmidi = rfile->rmidi;
mutex_lock(&rmidi->open_mutex);
if (rfile->input) {
close_substream(rmidi, rfile->input, 1);
rfile->input = NULL;
}
if (rfile->output) {
close_substream(rmidi, rfile->output, 1);
rfile->output = NULL;
}
rfile->rmidi = NULL;
mutex_unlock(&rmidi->open_mutex);
wake_up(&rmidi->open_wait);
}
/* called from sound/core/seq/seq_midi.c */
int snd_rawmidi_kernel_release(struct snd_rawmidi_file *rfile)
{
struct snd_rawmidi *rmidi;
if (snd_BUG_ON(!rfile))
return -ENXIO;
rmidi = rfile->rmidi;
rawmidi_release_priv(rfile);
module_put(rmidi->card->module);
return 0;
}
static int snd_rawmidi_release(struct inode *inode, struct file *file)
{
struct snd_rawmidi_file *rfile;
struct snd_rawmidi *rmidi;
struct module *module;
rfile = file->private_data;
rmidi = rfile->rmidi;
rawmidi_release_priv(rfile);
kfree(rfile);
module = rmidi->card->module;
snd_card_file_remove(rmidi->card, file);
module_put(module);
return 0;
}
static int snd_rawmidi_info(struct snd_rawmidi_substream *substream,
struct snd_rawmidi_info *info)
{
struct snd_rawmidi *rmidi;
if (substream == NULL)
return -ENODEV;
rmidi = substream->rmidi;
memset(info, 0, sizeof(*info));
info->card = rmidi->card->number;
info->device = rmidi->device;
info->subdevice = substream->number;
info->stream = substream->stream;
info->flags = rmidi->info_flags;
strcpy(info->id, rmidi->id);
strcpy(info->name, rmidi->name);
strcpy(info->subname, substream->name);
info->subdevices_count = substream->pstr->substream_count;
info->subdevices_avail = (substream->pstr->substream_count -
substream->pstr->substream_opened);
return 0;
}
static int snd_rawmidi_info_user(struct snd_rawmidi_substream *substream,
struct snd_rawmidi_info __user * _info)
{
struct snd_rawmidi_info info;
int err;
if ((err = snd_rawmidi_info(substream, &info)) < 0)
return err;
if (copy_to_user(_info, &info, sizeof(struct snd_rawmidi_info)))
return -EFAULT;
return 0;
}
int snd_rawmidi_info_select(struct snd_card *card, struct snd_rawmidi_info *info)
{
struct snd_rawmidi *rmidi;
struct snd_rawmidi_str *pstr;
struct snd_rawmidi_substream *substream;
mutex_lock(®ister_mutex);
rmidi = snd_rawmidi_search(card, info->device);
mutex_unlock(®ister_mutex);
if (!rmidi)
return -ENXIO;
if (info->stream < 0 || info->stream > 1)
return -EINVAL;
pstr = &rmidi->streams[info->stream];
if (pstr->substream_count == 0)
return -ENOENT;
if (info->subdevice >= pstr->substream_count)
return -ENXIO;
list_for_each_entry(substream, &pstr->substreams, list) {
if ((unsigned int)substream->number == info->subdevice)
return snd_rawmidi_info(substream, info);
}
return -ENXIO;
}
static int snd_rawmidi_info_select_user(struct snd_card *card,
struct snd_rawmidi_info __user *_info)
{
int err;
struct snd_rawmidi_info info;
if (get_user(info.device, &_info->device))
return -EFAULT;
if (get_user(info.stream, &_info->stream))
return -EFAULT;
if (get_user(info.subdevice, &_info->subdevice))
return -EFAULT;
if ((err = snd_rawmidi_info_select(card, &info)) < 0)
return err;
if (copy_to_user(_info, &info, sizeof(struct snd_rawmidi_info)))
return -EFAULT;
return 0;
}
int snd_rawmidi_output_params(struct snd_rawmidi_substream *substream,
struct snd_rawmidi_params * params)
{
char *newbuf;
struct snd_rawmidi_runtime *runtime = substream->runtime;
if (substream->append && substream->use_count > 1)
return -EBUSY;
snd_rawmidi_drain_output(substream);
if (params->buffer_size < 32 || params->buffer_size > 1024L * 1024L) {
return -EINVAL;
}
if (params->avail_min < 1 || params->avail_min > params->buffer_size) {
return -EINVAL;
}
if (params->buffer_size != runtime->buffer_size) {
newbuf = kmalloc(params->buffer_size, GFP_KERNEL);
if (!newbuf)
return -ENOMEM;
kfree(runtime->buffer);
runtime->buffer = newbuf;
runtime->buffer_size = params->buffer_size;
runtime->avail = runtime->buffer_size;
}
runtime->avail_min = params->avail_min;
substream->active_sensing = !params->no_active_sensing;
return 0;
}
int snd_rawmidi_input_params(struct snd_rawmidi_substream *substream,
struct snd_rawmidi_params * params)
{
char *newbuf;
struct snd_rawmidi_runtime *runtime = substream->runtime;
snd_rawmidi_drain_input(substream);
if (params->buffer_size < 32 || params->buffer_size > 1024L * 1024L) {
return -EINVAL;
}
if (params->avail_min < 1 || params->avail_min > params->buffer_size) {
return -EINVAL;
}
if (params->buffer_size != runtime->buffer_size) {
newbuf = kmalloc(params->buffer_size, GFP_KERNEL);
if (!newbuf)
return -ENOMEM;
kfree(runtime->buffer);
runtime->buffer = newbuf;
runtime->buffer_size = params->buffer_size;
}
runtime->avail_min = params->avail_min;
return 0;
}
static int snd_rawmidi_output_status(struct snd_rawmidi_substream *substream,
struct snd_rawmidi_status * status)
{
struct snd_rawmidi_runtime *runtime = substream->runtime;
memset(status, 0, sizeof(*status));
status->stream = SNDRV_RAWMIDI_STREAM_OUTPUT;
spin_lock_irq(&runtime->lock);
status->avail = runtime->avail;
spin_unlock_irq(&runtime->lock);
return 0;
}
static int snd_rawmidi_input_status(struct snd_rawmidi_substream *substream,
struct snd_rawmidi_status * status)
{
struct snd_rawmidi_runtime *runtime = substream->runtime;
memset(status, 0, sizeof(*status));
status->stream = SNDRV_RAWMIDI_STREAM_INPUT;
spin_lock_irq(&runtime->lock);
status->avail = runtime->avail;
status->xruns = runtime->xruns;
runtime->xruns = 0;
spin_unlock_irq(&runtime->lock);
return 0;
}
static long snd_rawmidi_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
struct snd_rawmidi_file *rfile;
void __user *argp = (void __user *)arg;
rfile = file->private_data;
if (((cmd >> 8) & 0xff) != 'W')
return -ENOTTY;
switch (cmd) {
case SNDRV_RAWMIDI_IOCTL_PVERSION:
return put_user(SNDRV_RAWMIDI_VERSION, (int __user *)argp) ? -EFAULT : 0;
case SNDRV_RAWMIDI_IOCTL_INFO:
{
int stream;
struct snd_rawmidi_info __user *info = argp;
if (get_user(stream, &info->stream))
return -EFAULT;
switch (stream) {
case SNDRV_RAWMIDI_STREAM_INPUT:
return snd_rawmidi_info_user(rfile->input, info);
case SNDRV_RAWMIDI_STREAM_OUTPUT:
return snd_rawmidi_info_user(rfile->output, info);
default:
return -EINVAL;
}
}
case SNDRV_RAWMIDI_IOCTL_PARAMS:
{
struct snd_rawmidi_params params;
if (copy_from_user(¶ms, argp, sizeof(struct snd_rawmidi_params)))
return -EFAULT;
switch (params.stream) {
case SNDRV_RAWMIDI_STREAM_OUTPUT:
if (rfile->output == NULL)
return -EINVAL;
return snd_rawmidi_output_params(rfile->output, ¶ms);
case SNDRV_RAWMIDI_STREAM_INPUT:
if (rfile->input == NULL)
return -EINVAL;
return snd_rawmidi_input_params(rfile->input, ¶ms);
default:
return -EINVAL;
}
}
case SNDRV_RAWMIDI_IOCTL_STATUS:
{
int err = 0;
struct snd_rawmidi_status status;
if (copy_from_user(&status, argp, sizeof(struct snd_rawmidi_status)))
return -EFAULT;
switch (status.stream) {
case SNDRV_RAWMIDI_STREAM_OUTPUT:
if (rfile->output == NULL)
return -EINVAL;
err = snd_rawmidi_output_status(rfile->output, &status);
break;
case SNDRV_RAWMIDI_STREAM_INPUT:
if (rfile->input == NULL)
return -EINVAL;
err = snd_rawmidi_input_status(rfile->input, &status);
break;
default:
return -EINVAL;
}
if (err < 0)
return err;
if (copy_to_user(argp, &status, sizeof(struct snd_rawmidi_status)))
return -EFAULT;
return 0;
}
case SNDRV_RAWMIDI_IOCTL_DROP:
{
int val;
if (get_user(val, (int __user *) argp))
return -EFAULT;
switch (val) {
case SNDRV_RAWMIDI_STREAM_OUTPUT:
if (rfile->output == NULL)
return -EINVAL;
return snd_rawmidi_drop_output(rfile->output);
default:
return -EINVAL;
}
}
case SNDRV_RAWMIDI_IOCTL_DRAIN:
{
int val;
if (get_user(val, (int __user *) argp))
return -EFAULT;
switch (val) {
case SNDRV_RAWMIDI_STREAM_OUTPUT:
if (rfile->output == NULL)
return -EINVAL;
return snd_rawmidi_drain_output(rfile->output);
case SNDRV_RAWMIDI_STREAM_INPUT:
if (rfile->input == NULL)
return -EINVAL;
return snd_rawmidi_drain_input(rfile->input);
default:
return -EINVAL;
}
}
#ifdef CONFIG_SND_DEBUG
default:
snd_printk(KERN_WARNING "rawmidi: unknown command = 0x%x\n", cmd);
#endif
}
return -ENOTTY;
}
static int snd_rawmidi_control_ioctl(struct snd_card *card,
struct snd_ctl_file *control,
unsigned int cmd,
unsigned long arg)
{
void __user *argp = (void __user *)arg;
switch (cmd) {
case SNDRV_CTL_IOCTL_RAWMIDI_NEXT_DEVICE:
{
int device;
if (get_user(device, (int __user *)argp))
return -EFAULT;
if (device >= SNDRV_RAWMIDI_DEVICES) /* next device is -1 */
device = SNDRV_RAWMIDI_DEVICES - 1;
mutex_lock(®ister_mutex);
device = device < 0 ? 0 : device + 1;
while (device < SNDRV_RAWMIDI_DEVICES) {
if (snd_rawmidi_search(card, device))
break;
device++;
}
if (device == SNDRV_RAWMIDI_DEVICES)
device = -1;
mutex_unlock(®ister_mutex);
if (put_user(device, (int __user *)argp))
return -EFAULT;
return 0;
}
case SNDRV_CTL_IOCTL_RAWMIDI_PREFER_SUBDEVICE:
{
int val;
if (get_user(val, (int __user *)argp))
return -EFAULT;
control->prefer_rawmidi_subdevice = val;
return 0;
}
case SNDRV_CTL_IOCTL_RAWMIDI_INFO:
return snd_rawmidi_info_select_user(card, argp);
}
return -ENOIOCTLCMD;
}
/**
* snd_rawmidi_receive - receive the input data from the device
* @substream: the rawmidi substream
* @buffer: the buffer pointer
* @count: the data size to read
*
* Reads the data from the internal buffer.
*
* Returns the size of read data, or a negative error code on failure.
*/
int snd_rawmidi_receive(struct snd_rawmidi_substream *substream,
const unsigned char *buffer, int count)
{
unsigned long flags;
int result = 0, count1;
struct snd_rawmidi_runtime *runtime = substream->runtime;
if (!substream->opened)
return -EBADFD;
if (runtime->buffer == NULL) {
snd_printd("snd_rawmidi_receive: input is not active!!!\n");
return -EINVAL;
}
spin_lock_irqsave(&runtime->lock, flags);
if (count == 1) { /* special case, faster code */
substream->bytes++;
if (runtime->avail < runtime->buffer_size) {
runtime->buffer[runtime->hw_ptr++] = buffer[0];
runtime->hw_ptr %= runtime->buffer_size;
runtime->avail++;
result++;
} else {
runtime->xruns++;
}
} else {
substream->bytes += count;
count1 = runtime->buffer_size - runtime->hw_ptr;
if (count1 > count)
count1 = count;
if (count1 > (int)(runtime->buffer_size - runtime->avail))
count1 = runtime->buffer_size - runtime->avail;
memcpy(runtime->buffer + runtime->hw_ptr, buffer, count1);
runtime->hw_ptr += count1;
runtime->hw_ptr %= runtime->buffer_size;
runtime->avail += count1;
count -= count1;
result += count1;
if (count > 0) {
buffer += count1;
count1 = count;
if (count1 > (int)(runtime->buffer_size - runtime->avail)) {
count1 = runtime->buffer_size - runtime->avail;
runtime->xruns += count - count1;
}
if (count1 > 0) {
memcpy(runtime->buffer, buffer, count1);
runtime->hw_ptr = count1;
runtime->avail += count1;
result += count1;
}
}
}
if (result > 0) {
if (runtime->event)
tasklet_schedule(&runtime->tasklet);
else if (snd_rawmidi_ready(substream))
wake_up(&runtime->sleep);
}
spin_unlock_irqrestore(&runtime->lock, flags);
return result;
}
static long snd_rawmidi_kernel_read1(struct snd_rawmidi_substream *substream,
unsigned char __user *userbuf,
unsigned char *kernelbuf, long count)
{
unsigned long flags;
long result = 0, count1;
struct snd_rawmidi_runtime *runtime = substream->runtime;
while (count > 0 && runtime->avail) {
count1 = runtime->buffer_size - runtime->appl_ptr;
if (count1 > count)
count1 = count;
spin_lock_irqsave(&runtime->lock, flags);
if (count1 > (int)runtime->avail)
count1 = runtime->avail;
if (kernelbuf)
memcpy(kernelbuf + result, runtime->buffer + runtime->appl_ptr, count1);
if (userbuf) {
spin_unlock_irqrestore(&runtime->lock, flags);
if (copy_to_user(userbuf + result,
runtime->buffer + runtime->appl_ptr, count1)) {
return result > 0 ? result : -EFAULT;
}
spin_lock_irqsave(&runtime->lock, flags);
}
runtime->appl_ptr += count1;
runtime->appl_ptr %= runtime->buffer_size;
runtime->avail -= count1;
spin_unlock_irqrestore(&runtime->lock, flags);
result += count1;
count -= count1;
}
return result;
}
long snd_rawmidi_kernel_read(struct snd_rawmidi_substream *substream,
unsigned char *buf, long count)
{
snd_rawmidi_input_trigger(substream, 1);
return snd_rawmidi_kernel_read1(substream, NULL/*userbuf*/, buf, count);
}
static ssize_t snd_rawmidi_read(struct file *file, char __user *buf, size_t count,
loff_t *offset)
{
long result;
int count1;
struct snd_rawmidi_file *rfile;
struct snd_rawmidi_substream *substream;
struct snd_rawmidi_runtime *runtime;
rfile = file->private_data;
substream = rfile->input;
if (substream == NULL)
return -EIO;
runtime = substream->runtime;
snd_rawmidi_input_trigger(substream, 1);
result = 0;
while (count > 0) {
spin_lock_irq(&runtime->lock);
while (!snd_rawmidi_ready(substream)) {
wait_queue_t wait;
if ((file->f_flags & O_NONBLOCK) != 0 || result > 0) {
spin_unlock_irq(&runtime->lock);
return result > 0 ? result : -EAGAIN;
}
init_waitqueue_entry(&wait, current);
add_wait_queue(&runtime->sleep, &wait);
set_current_state(TASK_INTERRUPTIBLE);
spin_unlock_irq(&runtime->lock);
schedule();
remove_wait_queue(&runtime->sleep, &wait);
if (rfile->rmidi->card->shutdown)
return -ENODEV;
if (signal_pending(current))
return result > 0 ? result : -ERESTARTSYS;
if (!runtime->avail)
return result > 0 ? result : -EIO;
spin_lock_irq(&runtime->lock);
}
spin_unlock_irq(&runtime->lock);
count1 = snd_rawmidi_kernel_read1(substream,
(unsigned char __user *)buf,
NULL/*kernelbuf*/,
count);
if (count1 < 0)
return result > 0 ? result : count1;
result += count1;
buf += count1;
count -= count1;
}
return result;
}
/**
* snd_rawmidi_transmit_empty - check whether the output buffer is empty
* @substream: the rawmidi substream
*
* Returns 1 if the internal output buffer is empty, 0 if not.
*/
int snd_rawmidi_transmit_empty(struct snd_rawmidi_substream *substream)
{
struct snd_rawmidi_runtime *runtime = substream->runtime;
int result;
unsigned long flags;
if (runtime->buffer == NULL) {
snd_printd("snd_rawmidi_transmit_empty: output is not active!!!\n");
return 1;
}
spin_lock_irqsave(&runtime->lock, flags);
result = runtime->avail >= runtime->buffer_size;
spin_unlock_irqrestore(&runtime->lock, flags);
return result;
}
/**
* snd_rawmidi_transmit_peek - copy data from the internal buffer
* @substream: the rawmidi substream
* @buffer: the buffer pointer
* @count: data size to transfer
*
* Copies data from the internal output buffer to the given buffer.
*
* Call this in the interrupt handler when the midi output is ready,
* and call snd_rawmidi_transmit_ack() after the transmission is
* finished.
*
* Returns the size of copied data, or a negative error code on failure.
*/
int snd_rawmidi_transmit_peek(struct snd_rawmidi_substream *substream,
unsigned char *buffer, int count)
{
unsigned long flags;
int result, count1;
struct snd_rawmidi_runtime *runtime = substream->runtime;
if (runtime->buffer == NULL) {
snd_printd("snd_rawmidi_transmit_peek: output is not active!!!\n");
return -EINVAL;
}
result = 0;
spin_lock_irqsave(&runtime->lock, flags);
if (runtime->avail >= runtime->buffer_size) {
/* warning: lowlevel layer MUST trigger down the hardware */
goto __skip;
}
if (count == 1) { /* special case, faster code */
*buffer = runtime->buffer[runtime->hw_ptr];
result++;
} else {
count1 = runtime->buffer_size - runtime->hw_ptr;
if (count1 > count)
count1 = count;
if (count1 > (int)(runtime->buffer_size - runtime->avail))
count1 = runtime->buffer_size - runtime->avail;
memcpy(buffer, runtime->buffer + runtime->hw_ptr, count1);
count -= count1;
result += count1;
if (count > 0) {
if (count > (int)(runtime->buffer_size - runtime->avail - count1))
count = runtime->buffer_size - runtime->avail - count1;
memcpy(buffer + count1, runtime->buffer, count);
result += count;
}
}
__skip:
spin_unlock_irqrestore(&runtime->lock, flags);
return result;
}
/**
* snd_rawmidi_transmit_ack - acknowledge the transmission
* @substream: the rawmidi substream
* @count: the tranferred count
*
* Advances the hardware pointer for the internal output buffer with
* the given size and updates the condition.
* Call after the transmission is finished.
*
* Returns the advanced size if successful, or a negative error code on failure.
*/
int snd_rawmidi_transmit_ack(struct snd_rawmidi_substream *substream, int count)
{
unsigned long flags;
struct snd_rawmidi_runtime *runtime = substream->runtime;
if (runtime->buffer == NULL) {
snd_printd("snd_rawmidi_transmit_ack: output is not active!!!\n");
return -EINVAL;
}
spin_lock_irqsave(&runtime->lock, flags);
snd_BUG_ON(runtime->avail + count > runtime->buffer_size);
runtime->hw_ptr += count;
runtime->hw_ptr %= runtime->buffer_size;
runtime->avail += count;
substream->bytes += count;
if (count > 0) {
if (runtime->drain || snd_rawmidi_ready(substream))
wake_up(&runtime->sleep);
}
spin_unlock_irqrestore(&runtime->lock, flags);
return count;
}
/**
* snd_rawmidi_transmit - copy from the buffer to the device
* @substream: the rawmidi substream
* @buffer: the buffer pointer
* @count: the data size to transfer
*
* Copies data from the buffer to the device and advances the pointer.
*
* Returns the copied size if successful, or a negative error code on failure.
*/
int snd_rawmidi_transmit(struct snd_rawmidi_substream *substream,
unsigned char *buffer, int count)
{
if (!substream->opened)
return -EBADFD;
count = snd_rawmidi_transmit_peek(substream, buffer, count);
if (count < 0)
return count;
return snd_rawmidi_transmit_ack(substream, count);
}
static long snd_rawmidi_kernel_write1(struct snd_rawmidi_substream *substream,
const unsigned char __user *userbuf,
const unsigned char *kernelbuf,
long count)
{
unsigned long flags;
long count1, result;
struct snd_rawmidi_runtime *runtime = substream->runtime;
if (snd_BUG_ON(!kernelbuf && !userbuf))
return -EINVAL;
if (snd_BUG_ON(!runtime->buffer))
return -EINVAL;
result = 0;
spin_lock_irqsave(&runtime->lock, flags);
if (substream->append) {
if ((long)runtime->avail < count) {
spin_unlock_irqrestore(&runtime->lock, flags);
return -EAGAIN;
}
}
while (count > 0 && runtime->avail > 0) {
count1 = runtime->buffer_size - runtime->appl_ptr;
if (count1 > count)
count1 = count;
if (count1 > (long)runtime->avail)
count1 = runtime->avail;
if (kernelbuf)
memcpy(runtime->buffer + runtime->appl_ptr,
kernelbuf + result, count1);
else if (userbuf) {
spin_unlock_irqrestore(&runtime->lock, flags);
if (copy_from_user(runtime->buffer + runtime->appl_ptr,
userbuf + result, count1)) {
spin_lock_irqsave(&runtime->lock, flags);
result = result > 0 ? result : -EFAULT;
goto __end;
}
spin_lock_irqsave(&runtime->lock, flags);
}
runtime->appl_ptr += count1;
runtime->appl_ptr %= runtime->buffer_size;
runtime->avail -= count1;
result += count1;
count -= count1;
}
__end:
count1 = runtime->avail < runtime->buffer_size;
spin_unlock_irqrestore(&runtime->lock, flags);
if (count1)
snd_rawmidi_output_trigger(substream, 1);
return result;
}
long snd_rawmidi_kernel_write(struct snd_rawmidi_substream *substream,
const unsigned char *buf, long count)
{
return snd_rawmidi_kernel_write1(substream, NULL, buf, count);
}
static ssize_t snd_rawmidi_write(struct file *file, const char __user *buf,
size_t count, loff_t *offset)
{
long result, timeout;
int count1;
struct snd_rawmidi_file *rfile;
struct snd_rawmidi_runtime *runtime;
struct snd_rawmidi_substream *substream;
rfile = file->private_data;
substream = rfile->output;
runtime = substream->runtime;
/* we cannot put an atomic message to our buffer */
if (substream->append && count > runtime->buffer_size)
return -EIO;
result = 0;
while (count > 0) {
spin_lock_irq(&runtime->lock);
while (!snd_rawmidi_ready_append(substream, count)) {
wait_queue_t wait;
if (file->f_flags & O_NONBLOCK) {
spin_unlock_irq(&runtime->lock);
return result > 0 ? result : -EAGAIN;
}
init_waitqueue_entry(&wait, current);
add_wait_queue(&runtime->sleep, &wait);
set_current_state(TASK_INTERRUPTIBLE);
spin_unlock_irq(&runtime->lock);
timeout = schedule_timeout(30 * HZ);
remove_wait_queue(&runtime->sleep, &wait);
if (rfile->rmidi->card->shutdown)
return -ENODEV;
if (signal_pending(current))
return result > 0 ? result : -ERESTARTSYS;
if (!runtime->avail && !timeout)
return result > 0 ? result : -EIO;
spin_lock_irq(&runtime->lock);
}
spin_unlock_irq(&runtime->lock);
count1 = snd_rawmidi_kernel_write1(substream, buf, NULL, count);
if (count1 < 0)
return result > 0 ? result : count1;
result += count1;
buf += count1;
if ((size_t)count1 < count && (file->f_flags & O_NONBLOCK))
break;
count -= count1;
}
if (file->f_flags & O_DSYNC) {
spin_lock_irq(&runtime->lock);
while (runtime->avail != runtime->buffer_size) {
wait_queue_t wait;
unsigned int last_avail = runtime->avail;
init_waitqueue_entry(&wait, current);
add_wait_queue(&runtime->sleep, &wait);
set_current_state(TASK_INTERRUPTIBLE);
spin_unlock_irq(&runtime->lock);
timeout = schedule_timeout(30 * HZ);
remove_wait_queue(&runtime->sleep, &wait);
if (signal_pending(current))
return result > 0 ? result : -ERESTARTSYS;
if (runtime->avail == last_avail && !timeout)
return result > 0 ? result : -EIO;
spin_lock_irq(&runtime->lock);
}
spin_unlock_irq(&runtime->lock);
}
return result;
}
static unsigned int snd_rawmidi_poll(struct file *file, poll_table * wait)
{
struct snd_rawmidi_file *rfile;
struct snd_rawmidi_runtime *runtime;
unsigned int mask;
rfile = file->private_data;
if (rfile->input != NULL) {
runtime = rfile->input->runtime;
snd_rawmidi_input_trigger(rfile->input, 1);
poll_wait(file, &runtime->sleep, wait);
}
if (rfile->output != NULL) {
runtime = rfile->output->runtime;
poll_wait(file, &runtime->sleep, wait);
}
mask = 0;
if (rfile->input != NULL) {
if (snd_rawmidi_ready(rfile->input))
mask |= POLLIN | POLLRDNORM;
}
if (rfile->output != NULL) {
if (snd_rawmidi_ready(rfile->output))
mask |= POLLOUT | POLLWRNORM;
}
return mask;
}
/*
*/
#ifdef CONFIG_COMPAT
#include "rawmidi_compat.c"
#else
#define snd_rawmidi_ioctl_compat NULL
#endif
/*
*/
static void snd_rawmidi_proc_info_read(struct snd_info_entry *entry,
struct snd_info_buffer *buffer)
{
struct snd_rawmidi *rmidi;
struct snd_rawmidi_substream *substream;
struct snd_rawmidi_runtime *runtime;
rmidi = entry->private_data;
snd_iprintf(buffer, "%s\n\n", rmidi->name);
mutex_lock(&rmidi->open_mutex);
if (rmidi->info_flags & SNDRV_RAWMIDI_INFO_OUTPUT) {
list_for_each_entry(substream,
&rmidi->streams[SNDRV_RAWMIDI_STREAM_OUTPUT].substreams,
list) {
snd_iprintf(buffer,
"Output %d\n"
" Tx bytes : %lu\n",
substream->number,
(unsigned long) substream->bytes);
if (substream->opened) {
snd_iprintf(buffer,
" Owner PID : %d\n",
pid_vnr(substream->pid));
runtime = substream->runtime;
snd_iprintf(buffer,
" Mode : %s\n"
" Buffer size : %lu\n"
" Avail : %lu\n",
runtime->oss ? "OSS compatible" : "native",
(unsigned long) runtime->buffer_size,
(unsigned long) runtime->avail);
}
}
}
if (rmidi->info_flags & SNDRV_RAWMIDI_INFO_INPUT) {
list_for_each_entry(substream,
&rmidi->streams[SNDRV_RAWMIDI_STREAM_INPUT].substreams,
list) {
snd_iprintf(buffer,
"Input %d\n"
" Rx bytes : %lu\n",
substream->number,
(unsigned long) substream->bytes);
if (substream->opened) {
snd_iprintf(buffer,
" Owner PID : %d\n",
pid_vnr(substream->pid));
runtime = substream->runtime;
snd_iprintf(buffer,
" Buffer size : %lu\n"
" Avail : %lu\n"
" Overruns : %lu\n",
(unsigned long) runtime->buffer_size,
(unsigned long) runtime->avail,
(unsigned long) runtime->xruns);
}
}
}
mutex_unlock(&rmidi->open_mutex);
}
/*
* Register functions
*/
static const struct file_operations snd_rawmidi_f_ops =
{
.owner = THIS_MODULE,
.read = snd_rawmidi_read,
.write = snd_rawmidi_write,
.open = snd_rawmidi_open,
.release = snd_rawmidi_release,
.llseek = no_llseek,
.poll = snd_rawmidi_poll,
.unlocked_ioctl = snd_rawmidi_ioctl,
.compat_ioctl = snd_rawmidi_ioctl_compat,
};
static int snd_rawmidi_alloc_substreams(struct snd_rawmidi *rmidi,
struct snd_rawmidi_str *stream,
int direction,
int count)
{
struct snd_rawmidi_substream *substream;
int idx;
for (idx = 0; idx < count; idx++) {
substream = kzalloc(sizeof(*substream), GFP_KERNEL);
if (substream == NULL) {
snd_printk(KERN_ERR "rawmidi: cannot allocate substream\n");
return -ENOMEM;
}
substream->stream = direction;
substream->number = idx;
substream->rmidi = rmidi;
substream->pstr = stream;
list_add_tail(&substream->list, &stream->substreams);
stream->substream_count++;
}
return 0;
}
/**
* snd_rawmidi_new - create a rawmidi instance
* @card: the card instance
* @id: the id string
* @device: the device index
* @output_count: the number of output streams
* @input_count: the number of input streams
* @rrawmidi: the pointer to store the new rawmidi instance
*
* Creates a new rawmidi instance.
* Use snd_rawmidi_set_ops() to set the operators to the new instance.
*
* Returns zero if successful, or a negative error code on failure.
*/
int snd_rawmidi_new(struct snd_card *card, char *id, int device,
int output_count, int input_count,
struct snd_rawmidi ** rrawmidi)
{
struct snd_rawmidi *rmidi;
int err;
static struct snd_device_ops ops = {
.dev_free = snd_rawmidi_dev_free,
.dev_register = snd_rawmidi_dev_register,
.dev_disconnect = snd_rawmidi_dev_disconnect,
};
if (snd_BUG_ON(!card))
return -ENXIO;
if (rrawmidi)
*rrawmidi = NULL;
rmidi = kzalloc(sizeof(*rmidi), GFP_KERNEL);
if (rmidi == NULL) {
snd_printk(KERN_ERR "rawmidi: cannot allocate\n");
return -ENOMEM;
}
rmidi->card = card;
rmidi->device = device;
mutex_init(&rmidi->open_mutex);
init_waitqueue_head(&rmidi->open_wait);
INIT_LIST_HEAD(&rmidi->streams[SNDRV_RAWMIDI_STREAM_INPUT].substreams);
INIT_LIST_HEAD(&rmidi->streams[SNDRV_RAWMIDI_STREAM_OUTPUT].substreams);
if (id != NULL)
strlcpy(rmidi->id, id, sizeof(rmidi->id));
if ((err = snd_rawmidi_alloc_substreams(rmidi,
&rmidi->streams[SNDRV_RAWMIDI_STREAM_INPUT],
SNDRV_RAWMIDI_STREAM_INPUT,
input_count)) < 0) {
snd_rawmidi_free(rmidi);
return err;
}
if ((err = snd_rawmidi_alloc_substreams(rmidi,
&rmidi->streams[SNDRV_RAWMIDI_STREAM_OUTPUT],
SNDRV_RAWMIDI_STREAM_OUTPUT,
output_count)) < 0) {
snd_rawmidi_free(rmidi);
return err;
}
if ((err = snd_device_new(card, SNDRV_DEV_RAWMIDI, rmidi, &ops)) < 0) {
snd_rawmidi_free(rmidi);
return err;
}
if (rrawmidi)
*rrawmidi = rmidi;
return 0;
}
static void snd_rawmidi_free_substreams(struct snd_rawmidi_str *stream)
{
struct snd_rawmidi_substream *substream;
while (!list_empty(&stream->substreams)) {
substream = list_entry(stream->substreams.next, struct snd_rawmidi_substream, list);
list_del(&substream->list);
kfree(substream);
}
}
static int snd_rawmidi_free(struct snd_rawmidi *rmidi)
{
if (!rmidi)
return 0;
snd_info_free_entry(rmidi->proc_entry);
rmidi->proc_entry = NULL;
mutex_lock(®ister_mutex);
if (rmidi->ops && rmidi->ops->dev_unregister)
rmidi->ops->dev_unregister(rmidi);
mutex_unlock(®ister_mutex);
snd_rawmidi_free_substreams(&rmidi->streams[SNDRV_RAWMIDI_STREAM_INPUT]);
snd_rawmidi_free_substreams(&rmidi->streams[SNDRV_RAWMIDI_STREAM_OUTPUT]);
if (rmidi->private_free)
rmidi->private_free(rmidi);
kfree(rmidi);
return 0;
}
static int snd_rawmidi_dev_free(struct snd_device *device)
{
struct snd_rawmidi *rmidi = device->device_data;
return snd_rawmidi_free(rmidi);
}
#if defined(CONFIG_SND_SEQUENCER) || (defined(MODULE) && defined(CONFIG_SND_SEQUENCER_MODULE))
static void snd_rawmidi_dev_seq_free(struct snd_seq_device *device)
{
struct snd_rawmidi *rmidi = device->private_data;
rmidi->seq_dev = NULL;
}
#endif
static int snd_rawmidi_dev_register(struct snd_device *device)
{
int err;
struct snd_info_entry *entry;
char name[16];
struct snd_rawmidi *rmidi = device->device_data;
if (rmidi->device >= SNDRV_RAWMIDI_DEVICES)
return -ENOMEM;
mutex_lock(®ister_mutex);
if (snd_rawmidi_search(rmidi->card, rmidi->device)) {
mutex_unlock(®ister_mutex);
return -EBUSY;
}
list_add_tail(&rmidi->list, &snd_rawmidi_devices);
sprintf(name, "midiC%iD%i", rmidi->card->number, rmidi->device);
if ((err = snd_register_device(SNDRV_DEVICE_TYPE_RAWMIDI,
rmidi->card, rmidi->device,
&snd_rawmidi_f_ops, rmidi, name)) < 0) {
snd_printk(KERN_ERR "unable to register rawmidi device %i:%i\n", rmidi->card->number, rmidi->device);
list_del(&rmidi->list);
mutex_unlock(®ister_mutex);
return err;
}
if (rmidi->ops && rmidi->ops->dev_register &&
(err = rmidi->ops->dev_register(rmidi)) < 0) {
snd_unregister_device(SNDRV_DEVICE_TYPE_RAWMIDI, rmidi->card, rmidi->device);
list_del(&rmidi->list);
mutex_unlock(®ister_mutex);
return err;
}
#ifdef CONFIG_SND_OSSEMUL
rmidi->ossreg = 0;
if ((int)rmidi->device == midi_map[rmidi->card->number]) {
if (snd_register_oss_device(SNDRV_OSS_DEVICE_TYPE_MIDI,
rmidi->card, 0, &snd_rawmidi_f_ops,
rmidi, name) < 0) {
snd_printk(KERN_ERR "unable to register OSS rawmidi device %i:%i\n", rmidi->card->number, 0);
} else {
rmidi->ossreg++;
#ifdef SNDRV_OSS_INFO_DEV_MIDI
snd_oss_info_register(SNDRV_OSS_INFO_DEV_MIDI, rmidi->card->number, rmidi->name);
#endif
}
}
if ((int)rmidi->device == amidi_map[rmidi->card->number]) {
if (snd_register_oss_device(SNDRV_OSS_DEVICE_TYPE_MIDI,
rmidi->card, 1, &snd_rawmidi_f_ops,
rmidi, name) < 0) {
snd_printk(KERN_ERR "unable to register OSS rawmidi device %i:%i\n", rmidi->card->number, 1);
} else {
rmidi->ossreg++;
}
}
#endif /* CONFIG_SND_OSSEMUL */
mutex_unlock(®ister_mutex);
sprintf(name, "midi%d", rmidi->device);
entry = snd_info_create_card_entry(rmidi->card, name, rmidi->card->proc_root);
if (entry) {
entry->private_data = rmidi;
entry->c.text.read = snd_rawmidi_proc_info_read;
if (snd_info_register(entry) < 0) {
snd_info_free_entry(entry);
entry = NULL;
}
}
rmidi->proc_entry = entry;
#if defined(CONFIG_SND_SEQUENCER) || (defined(MODULE) && defined(CONFIG_SND_SEQUENCER_MODULE))
if (!rmidi->ops || !rmidi->ops->dev_register) { /* own registration mechanism */
if (snd_seq_device_new(rmidi->card, rmidi->device, SNDRV_SEQ_DEV_ID_MIDISYNTH, 0, &rmidi->seq_dev) >= 0) {
rmidi->seq_dev->private_data = rmidi;
rmidi->seq_dev->private_free = snd_rawmidi_dev_seq_free;
sprintf(rmidi->seq_dev->name, "MIDI %d-%d", rmidi->card->number, rmidi->device);
snd_device_register(rmidi->card, rmidi->seq_dev);
}
}
#endif
return 0;
}
static int snd_rawmidi_dev_disconnect(struct snd_device *device)
{
struct snd_rawmidi *rmidi = device->device_data;
int dir;
mutex_lock(®ister_mutex);
mutex_lock(&rmidi->open_mutex);
wake_up(&rmidi->open_wait);
list_del_init(&rmidi->list);
for (dir = 0; dir < 2; dir++) {
struct snd_rawmidi_substream *s;
list_for_each_entry(s, &rmidi->streams[dir].substreams, list) {
if (s->runtime)
wake_up(&s->runtime->sleep);
}
}
#ifdef CONFIG_SND_OSSEMUL
if (rmidi->ossreg) {
if ((int)rmidi->device == midi_map[rmidi->card->number]) {
snd_unregister_oss_device(SNDRV_OSS_DEVICE_TYPE_MIDI, rmidi->card, 0);
#ifdef SNDRV_OSS_INFO_DEV_MIDI
snd_oss_info_unregister(SNDRV_OSS_INFO_DEV_MIDI, rmidi->card->number);
#endif
}
if ((int)rmidi->device == amidi_map[rmidi->card->number])
snd_unregister_oss_device(SNDRV_OSS_DEVICE_TYPE_MIDI, rmidi->card, 1);
rmidi->ossreg = 0;
}
#endif /* CONFIG_SND_OSSEMUL */
snd_unregister_device(SNDRV_DEVICE_TYPE_RAWMIDI, rmidi->card, rmidi->device);
mutex_unlock(&rmidi->open_mutex);
mutex_unlock(®ister_mutex);
return 0;
}
/**
* snd_rawmidi_set_ops - set the rawmidi operators
* @rmidi: the rawmidi instance
* @stream: the stream direction, SNDRV_RAWMIDI_STREAM_XXX
* @ops: the operator table
*
* Sets the rawmidi operators for the given stream direction.
*/
void snd_rawmidi_set_ops(struct snd_rawmidi *rmidi, int stream,
struct snd_rawmidi_ops *ops)
{
struct snd_rawmidi_substream *substream;
list_for_each_entry(substream, &rmidi->streams[stream].substreams, list)
substream->ops = ops;
}
/*
* ENTRY functions
*/
static int __init alsa_rawmidi_init(void)
{
snd_ctl_register_ioctl(snd_rawmidi_control_ioctl);
snd_ctl_register_ioctl_compat(snd_rawmidi_control_ioctl);
#ifdef CONFIG_SND_OSSEMUL
{ int i;
/* check device map table */
for (i = 0; i < SNDRV_CARDS; i++) {
if (midi_map[i] < 0 || midi_map[i] >= SNDRV_RAWMIDI_DEVICES) {
snd_printk(KERN_ERR "invalid midi_map[%d] = %d\n", i, midi_map[i]);
midi_map[i] = 0;
}
if (amidi_map[i] < 0 || amidi_map[i] >= SNDRV_RAWMIDI_DEVICES) {
snd_printk(KERN_ERR "invalid amidi_map[%d] = %d\n", i, amidi_map[i]);
amidi_map[i] = 1;
}
}
}
#endif /* CONFIG_SND_OSSEMUL */
return 0;
}
static void __exit alsa_rawmidi_exit(void)
{
snd_ctl_unregister_ioctl(snd_rawmidi_control_ioctl);
snd_ctl_unregister_ioctl_compat(snd_rawmidi_control_ioctl);
}
module_init(alsa_rawmidi_init)
module_exit(alsa_rawmidi_exit)
EXPORT_SYMBOL(snd_rawmidi_output_params);
EXPORT_SYMBOL(snd_rawmidi_input_params);
EXPORT_SYMBOL(snd_rawmidi_drop_output);
EXPORT_SYMBOL(snd_rawmidi_drain_output);
EXPORT_SYMBOL(snd_rawmidi_drain_input);
EXPORT_SYMBOL(snd_rawmidi_receive);
EXPORT_SYMBOL(snd_rawmidi_transmit_empty);
EXPORT_SYMBOL(snd_rawmidi_transmit_peek);
EXPORT_SYMBOL(snd_rawmidi_transmit_ack);
EXPORT_SYMBOL(snd_rawmidi_transmit);
EXPORT_SYMBOL(snd_rawmidi_new);
EXPORT_SYMBOL(snd_rawmidi_set_ops);
EXPORT_SYMBOL(snd_rawmidi_info_select);
EXPORT_SYMBOL(snd_rawmidi_kernel_open);
EXPORT_SYMBOL(snd_rawmidi_kernel_release);
EXPORT_SYMBOL(snd_rawmidi_kernel_read);
EXPORT_SYMBOL(snd_rawmidi_kernel_write);
| gpl-2.0 |
bkero/android_kernel_teclast_x98pro | net/bluetooth/bnep/netdev.c | 857 | 5949 | /*
BNEP implementation for Linux Bluetooth stack (BlueZ).
Copyright (C) 2001-2002 Inventel Systemes
Written 2001-2002 by
Clément Moreau <clement.moreau@inventel.fr>
David Libault <david.libault@inventel.fr>
Copyright (C) 2002 Maxim Krasnyansky <maxk@qualcomm.com>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License version 2 as
published by the Free Software Foundation;
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
SOFTWARE IS DISCLAIMED.
*/
#include <linux/etherdevice.h>
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
#include <net/bluetooth/l2cap.h>
#include "bnep.h"
#define BNEP_TX_QUEUE_LEN 20
static int bnep_net_open(struct net_device *dev)
{
netif_start_queue(dev);
return 0;
}
static int bnep_net_close(struct net_device *dev)
{
netif_stop_queue(dev);
return 0;
}
static void bnep_net_set_mc_list(struct net_device *dev)
{
#ifdef CONFIG_BT_BNEP_MC_FILTER
struct bnep_session *s = netdev_priv(dev);
struct sock *sk = s->sock->sk;
struct bnep_set_filter_req *r;
struct sk_buff *skb;
int size;
BT_DBG("%s mc_count %d", dev->name, netdev_mc_count(dev));
size = sizeof(*r) + (BNEP_MAX_MULTICAST_FILTERS + 1) * ETH_ALEN * 2;
skb = alloc_skb(size, GFP_ATOMIC);
if (!skb) {
BT_ERR("%s Multicast list allocation failed", dev->name);
return;
}
r = (void *) skb->data;
__skb_put(skb, sizeof(*r));
r->type = BNEP_CONTROL;
r->ctrl = BNEP_FILTER_MULTI_ADDR_SET;
if (dev->flags & (IFF_PROMISC | IFF_ALLMULTI)) {
u8 start[ETH_ALEN] = { 0x01 };
/* Request all addresses */
memcpy(__skb_put(skb, ETH_ALEN), start, ETH_ALEN);
memcpy(__skb_put(skb, ETH_ALEN), dev->broadcast, ETH_ALEN);
r->len = htons(ETH_ALEN * 2);
} else {
struct netdev_hw_addr *ha;
int i, len = skb->len;
if (dev->flags & IFF_BROADCAST) {
memcpy(__skb_put(skb, ETH_ALEN), dev->broadcast, ETH_ALEN);
memcpy(__skb_put(skb, ETH_ALEN), dev->broadcast, ETH_ALEN);
}
/* FIXME: We should group addresses here. */
i = 0;
netdev_for_each_mc_addr(ha, dev) {
if (i == BNEP_MAX_MULTICAST_FILTERS)
break;
memcpy(__skb_put(skb, ETH_ALEN), ha->addr, ETH_ALEN);
memcpy(__skb_put(skb, ETH_ALEN), ha->addr, ETH_ALEN);
i++;
}
r->len = htons(skb->len - len);
}
skb_queue_tail(&sk->sk_write_queue, skb);
wake_up_interruptible(sk_sleep(sk));
#endif
}
static int bnep_net_set_mac_addr(struct net_device *dev, void *arg)
{
BT_DBG("%s", dev->name);
return 0;
}
static void bnep_net_timeout(struct net_device *dev)
{
BT_DBG("net_timeout");
netif_wake_queue(dev);
}
#ifdef CONFIG_BT_BNEP_MC_FILTER
static int bnep_net_mc_filter(struct sk_buff *skb, struct bnep_session *s)
{
struct ethhdr *eh = (void *) skb->data;
if ((eh->h_dest[0] & 1) && !test_bit(bnep_mc_hash(eh->h_dest), (ulong *) &s->mc_filter))
return 1;
return 0;
}
#endif
#ifdef CONFIG_BT_BNEP_PROTO_FILTER
/* Determine ether protocol. Based on eth_type_trans. */
static u16 bnep_net_eth_proto(struct sk_buff *skb)
{
struct ethhdr *eh = (void *) skb->data;
u16 proto = ntohs(eh->h_proto);
if (proto >= ETH_P_802_3_MIN)
return proto;
if (get_unaligned((__be16 *) skb->data) == htons(0xFFFF))
return ETH_P_802_3;
return ETH_P_802_2;
}
static int bnep_net_proto_filter(struct sk_buff *skb, struct bnep_session *s)
{
u16 proto = bnep_net_eth_proto(skb);
struct bnep_proto_filter *f = s->proto_filter;
int i;
for (i = 0; i < BNEP_MAX_PROTO_FILTERS && f[i].end; i++) {
if (proto >= f[i].start && proto <= f[i].end)
return 0;
}
BT_DBG("BNEP: filtered skb %p, proto 0x%.4x", skb, proto);
return 1;
}
#endif
static netdev_tx_t bnep_net_xmit(struct sk_buff *skb,
struct net_device *dev)
{
struct bnep_session *s = netdev_priv(dev);
struct sock *sk = s->sock->sk;
BT_DBG("skb %p, dev %p", skb, dev);
#ifdef CONFIG_BT_BNEP_MC_FILTER
if (bnep_net_mc_filter(skb, s)) {
kfree_skb(skb);
return NETDEV_TX_OK;
}
#endif
#ifdef CONFIG_BT_BNEP_PROTO_FILTER
if (bnep_net_proto_filter(skb, s)) {
kfree_skb(skb);
return NETDEV_TX_OK;
}
#endif
/*
* We cannot send L2CAP packets from here as we are potentially in a bh.
* So we have to queue them and wake up session thread which is sleeping
* on the sk_sleep(sk).
*/
dev->trans_start = jiffies;
skb_queue_tail(&sk->sk_write_queue, skb);
wake_up_interruptible(sk_sleep(sk));
if (skb_queue_len(&sk->sk_write_queue) >= BNEP_TX_QUEUE_LEN) {
BT_DBG("tx queue is full");
/* Stop queuing.
* Session thread will do netif_wake_queue() */
netif_stop_queue(dev);
}
return NETDEV_TX_OK;
}
static const struct net_device_ops bnep_netdev_ops = {
.ndo_open = bnep_net_open,
.ndo_stop = bnep_net_close,
.ndo_start_xmit = bnep_net_xmit,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_rx_mode = bnep_net_set_mc_list,
.ndo_set_mac_address = bnep_net_set_mac_addr,
.ndo_tx_timeout = bnep_net_timeout,
.ndo_change_mtu = eth_change_mtu,
};
void bnep_net_setup(struct net_device *dev)
{
eth_broadcast_addr(dev->broadcast);
dev->addr_len = ETH_ALEN;
ether_setup(dev);
dev->priv_flags &= ~IFF_TX_SKB_SHARING;
dev->netdev_ops = &bnep_netdev_ops;
dev->watchdog_timeo = HZ * 2;
}
| gpl-2.0 |
cjdoucette/XIA-for-Linux | drivers/video/backlight/ep93xx_bl.c | 1881 | 3493 | /*
* Driver for the Cirrus EP93xx lcd backlight
*
* Copyright (c) 2010 H Hartley Sweeten <hsweeten@visionengravers.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This driver controls the pulse width modulated brightness control output,
* BRIGHT, on the Cirrus EP9307, EP9312, and EP9315 processors.
*/
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/io.h>
#include <linux/fb.h>
#include <linux/backlight.h>
#define EP93XX_MAX_COUNT 255
#define EP93XX_MAX_BRIGHT 255
#define EP93XX_DEF_BRIGHT 128
struct ep93xxbl {
void __iomem *mmio;
int brightness;
};
static int ep93xxbl_set(struct backlight_device *bl, int brightness)
{
struct ep93xxbl *ep93xxbl = bl_get_data(bl);
writel((brightness << 8) | EP93XX_MAX_COUNT, ep93xxbl->mmio);
ep93xxbl->brightness = brightness;
return 0;
}
static int ep93xxbl_update_status(struct backlight_device *bl)
{
int brightness = bl->props.brightness;
if (bl->props.power != FB_BLANK_UNBLANK ||
bl->props.fb_blank != FB_BLANK_UNBLANK)
brightness = 0;
return ep93xxbl_set(bl, brightness);
}
static int ep93xxbl_get_brightness(struct backlight_device *bl)
{
struct ep93xxbl *ep93xxbl = bl_get_data(bl);
return ep93xxbl->brightness;
}
static const struct backlight_ops ep93xxbl_ops = {
.update_status = ep93xxbl_update_status,
.get_brightness = ep93xxbl_get_brightness,
};
static int ep93xxbl_probe(struct platform_device *dev)
{
struct ep93xxbl *ep93xxbl;
struct backlight_device *bl;
struct backlight_properties props;
struct resource *res;
ep93xxbl = devm_kzalloc(&dev->dev, sizeof(*ep93xxbl), GFP_KERNEL);
if (!ep93xxbl)
return -ENOMEM;
res = platform_get_resource(dev, IORESOURCE_MEM, 0);
if (!res)
return -ENXIO;
/*
* FIXME - We don't do a request_mem_region here because we are
* sharing the register space with the framebuffer driver (see
* drivers/video/ep93xx-fb.c) and doing so will cause the second
* loaded driver to return -EBUSY.
*
* NOTE: No locking is required; the framebuffer does not touch
* this register.
*/
ep93xxbl->mmio = devm_ioremap(&dev->dev, res->start,
resource_size(res));
if (!ep93xxbl->mmio)
return -ENXIO;
memset(&props, 0, sizeof(struct backlight_properties));
props.type = BACKLIGHT_RAW;
props.max_brightness = EP93XX_MAX_BRIGHT;
bl = devm_backlight_device_register(&dev->dev, dev->name, &dev->dev,
ep93xxbl, &ep93xxbl_ops, &props);
if (IS_ERR(bl))
return PTR_ERR(bl);
bl->props.brightness = EP93XX_DEF_BRIGHT;
platform_set_drvdata(dev, bl);
ep93xxbl_update_status(bl);
return 0;
}
#ifdef CONFIG_PM_SLEEP
static int ep93xxbl_suspend(struct device *dev)
{
struct backlight_device *bl = dev_get_drvdata(dev);
return ep93xxbl_set(bl, 0);
}
static int ep93xxbl_resume(struct device *dev)
{
struct backlight_device *bl = dev_get_drvdata(dev);
backlight_update_status(bl);
return 0;
}
#endif
static SIMPLE_DEV_PM_OPS(ep93xxbl_pm_ops, ep93xxbl_suspend, ep93xxbl_resume);
static struct platform_driver ep93xxbl_driver = {
.driver = {
.name = "ep93xx-bl",
.pm = &ep93xxbl_pm_ops,
},
.probe = ep93xxbl_probe,
};
module_platform_driver(ep93xxbl_driver);
MODULE_DESCRIPTION("EP93xx Backlight Driver");
MODULE_AUTHOR("H Hartley Sweeten <hsweeten@visionengravers.com>");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:ep93xx-bl");
| gpl-2.0 |
djvoleur/kernel_samsung_exynos7420 | drivers/infiniband/hw/cxgb4/resource.c | 2905 | 12208 | /*
* Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
/* Crude resource management */
#include <linux/spinlock.h>
#include <linux/genalloc.h>
#include <linux/ratelimit.h>
#include "iw_cxgb4.h"
static int c4iw_init_qid_table(struct c4iw_rdev *rdev)
{
u32 i;
if (c4iw_id_table_alloc(&rdev->resource.qid_table,
rdev->lldi.vr->qp.start,
rdev->lldi.vr->qp.size,
rdev->lldi.vr->qp.size, 0))
return -ENOMEM;
for (i = rdev->lldi.vr->qp.start;
i < rdev->lldi.vr->qp.start + rdev->lldi.vr->qp.size; i++)
if (!(i & rdev->qpmask))
c4iw_id_free(&rdev->resource.qid_table, i);
return 0;
}
/* nr_* must be power of 2 */
int c4iw_init_resource(struct c4iw_rdev *rdev, u32 nr_tpt, u32 nr_pdid)
{
int err = 0;
err = c4iw_id_table_alloc(&rdev->resource.tpt_table, 0, nr_tpt, 1,
C4IW_ID_TABLE_F_RANDOM);
if (err)
goto tpt_err;
err = c4iw_init_qid_table(rdev);
if (err)
goto qid_err;
err = c4iw_id_table_alloc(&rdev->resource.pdid_table, 0,
nr_pdid, 1, 0);
if (err)
goto pdid_err;
return 0;
pdid_err:
c4iw_id_table_free(&rdev->resource.qid_table);
qid_err:
c4iw_id_table_free(&rdev->resource.tpt_table);
tpt_err:
return -ENOMEM;
}
/*
* returns 0 if no resource available
*/
u32 c4iw_get_resource(struct c4iw_id_table *id_table)
{
u32 entry;
entry = c4iw_id_alloc(id_table);
if (entry == (u32)(-1))
return 0;
return entry;
}
void c4iw_put_resource(struct c4iw_id_table *id_table, u32 entry)
{
PDBG("%s entry 0x%x\n", __func__, entry);
c4iw_id_free(id_table, entry);
}
u32 c4iw_get_cqid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx)
{
struct c4iw_qid_list *entry;
u32 qid;
int i;
mutex_lock(&uctx->lock);
if (!list_empty(&uctx->cqids)) {
entry = list_entry(uctx->cqids.next, struct c4iw_qid_list,
entry);
list_del(&entry->entry);
qid = entry->qid;
kfree(entry);
} else {
qid = c4iw_get_resource(&rdev->resource.qid_table);
if (!qid)
goto out;
mutex_lock(&rdev->stats.lock);
rdev->stats.qid.cur += rdev->qpmask + 1;
mutex_unlock(&rdev->stats.lock);
for (i = qid+1; i & rdev->qpmask; i++) {
entry = kmalloc(sizeof *entry, GFP_KERNEL);
if (!entry)
goto out;
entry->qid = i;
list_add_tail(&entry->entry, &uctx->cqids);
}
/*
* now put the same ids on the qp list since they all
* map to the same db/gts page.
*/
entry = kmalloc(sizeof *entry, GFP_KERNEL);
if (!entry)
goto out;
entry->qid = qid;
list_add_tail(&entry->entry, &uctx->qpids);
for (i = qid+1; i & rdev->qpmask; i++) {
entry = kmalloc(sizeof *entry, GFP_KERNEL);
if (!entry)
goto out;
entry->qid = i;
list_add_tail(&entry->entry, &uctx->qpids);
}
}
out:
mutex_unlock(&uctx->lock);
PDBG("%s qid 0x%x\n", __func__, qid);
mutex_lock(&rdev->stats.lock);
if (rdev->stats.qid.cur > rdev->stats.qid.max)
rdev->stats.qid.max = rdev->stats.qid.cur;
mutex_unlock(&rdev->stats.lock);
return qid;
}
void c4iw_put_cqid(struct c4iw_rdev *rdev, u32 qid,
struct c4iw_dev_ucontext *uctx)
{
struct c4iw_qid_list *entry;
entry = kmalloc(sizeof *entry, GFP_KERNEL);
if (!entry)
return;
PDBG("%s qid 0x%x\n", __func__, qid);
entry->qid = qid;
mutex_lock(&uctx->lock);
list_add_tail(&entry->entry, &uctx->cqids);
mutex_unlock(&uctx->lock);
}
u32 c4iw_get_qpid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx)
{
struct c4iw_qid_list *entry;
u32 qid;
int i;
mutex_lock(&uctx->lock);
if (!list_empty(&uctx->qpids)) {
entry = list_entry(uctx->qpids.next, struct c4iw_qid_list,
entry);
list_del(&entry->entry);
qid = entry->qid;
kfree(entry);
} else {
qid = c4iw_get_resource(&rdev->resource.qid_table);
if (!qid)
goto out;
mutex_lock(&rdev->stats.lock);
rdev->stats.qid.cur += rdev->qpmask + 1;
mutex_unlock(&rdev->stats.lock);
for (i = qid+1; i & rdev->qpmask; i++) {
entry = kmalloc(sizeof *entry, GFP_KERNEL);
if (!entry)
goto out;
entry->qid = i;
list_add_tail(&entry->entry, &uctx->qpids);
}
/*
* now put the same ids on the cq list since they all
* map to the same db/gts page.
*/
entry = kmalloc(sizeof *entry, GFP_KERNEL);
if (!entry)
goto out;
entry->qid = qid;
list_add_tail(&entry->entry, &uctx->cqids);
for (i = qid; i & rdev->qpmask; i++) {
entry = kmalloc(sizeof *entry, GFP_KERNEL);
if (!entry)
goto out;
entry->qid = i;
list_add_tail(&entry->entry, &uctx->cqids);
}
}
out:
mutex_unlock(&uctx->lock);
PDBG("%s qid 0x%x\n", __func__, qid);
mutex_lock(&rdev->stats.lock);
if (rdev->stats.qid.cur > rdev->stats.qid.max)
rdev->stats.qid.max = rdev->stats.qid.cur;
mutex_unlock(&rdev->stats.lock);
return qid;
}
void c4iw_put_qpid(struct c4iw_rdev *rdev, u32 qid,
struct c4iw_dev_ucontext *uctx)
{
struct c4iw_qid_list *entry;
entry = kmalloc(sizeof *entry, GFP_KERNEL);
if (!entry)
return;
PDBG("%s qid 0x%x\n", __func__, qid);
entry->qid = qid;
mutex_lock(&uctx->lock);
list_add_tail(&entry->entry, &uctx->qpids);
mutex_unlock(&uctx->lock);
}
void c4iw_destroy_resource(struct c4iw_resource *rscp)
{
c4iw_id_table_free(&rscp->tpt_table);
c4iw_id_table_free(&rscp->qid_table);
c4iw_id_table_free(&rscp->pdid_table);
}
/*
* PBL Memory Manager. Uses Linux generic allocator.
*/
#define MIN_PBL_SHIFT 8 /* 256B == min PBL size (32 entries) */
u32 c4iw_pblpool_alloc(struct c4iw_rdev *rdev, int size)
{
unsigned long addr = gen_pool_alloc(rdev->pbl_pool, size);
PDBG("%s addr 0x%x size %d\n", __func__, (u32)addr, size);
mutex_lock(&rdev->stats.lock);
if (addr) {
rdev->stats.pbl.cur += roundup(size, 1 << MIN_PBL_SHIFT);
if (rdev->stats.pbl.cur > rdev->stats.pbl.max)
rdev->stats.pbl.max = rdev->stats.pbl.cur;
} else
rdev->stats.pbl.fail++;
mutex_unlock(&rdev->stats.lock);
return (u32)addr;
}
void c4iw_pblpool_free(struct c4iw_rdev *rdev, u32 addr, int size)
{
PDBG("%s addr 0x%x size %d\n", __func__, addr, size);
mutex_lock(&rdev->stats.lock);
rdev->stats.pbl.cur -= roundup(size, 1 << MIN_PBL_SHIFT);
mutex_unlock(&rdev->stats.lock);
gen_pool_free(rdev->pbl_pool, (unsigned long)addr, size);
}
int c4iw_pblpool_create(struct c4iw_rdev *rdev)
{
unsigned pbl_start, pbl_chunk, pbl_top;
rdev->pbl_pool = gen_pool_create(MIN_PBL_SHIFT, -1);
if (!rdev->pbl_pool)
return -ENOMEM;
pbl_start = rdev->lldi.vr->pbl.start;
pbl_chunk = rdev->lldi.vr->pbl.size;
pbl_top = pbl_start + pbl_chunk;
while (pbl_start < pbl_top) {
pbl_chunk = min(pbl_top - pbl_start + 1, pbl_chunk);
if (gen_pool_add(rdev->pbl_pool, pbl_start, pbl_chunk, -1)) {
PDBG("%s failed to add PBL chunk (%x/%x)\n",
__func__, pbl_start, pbl_chunk);
if (pbl_chunk <= 1024 << MIN_PBL_SHIFT) {
printk(KERN_WARNING MOD
"Failed to add all PBL chunks (%x/%x)\n",
pbl_start,
pbl_top - pbl_start);
return 0;
}
pbl_chunk >>= 1;
} else {
PDBG("%s added PBL chunk (%x/%x)\n",
__func__, pbl_start, pbl_chunk);
pbl_start += pbl_chunk;
}
}
return 0;
}
void c4iw_pblpool_destroy(struct c4iw_rdev *rdev)
{
gen_pool_destroy(rdev->pbl_pool);
}
/*
* RQT Memory Manager. Uses Linux generic allocator.
*/
#define MIN_RQT_SHIFT 10 /* 1KB == min RQT size (16 entries) */
u32 c4iw_rqtpool_alloc(struct c4iw_rdev *rdev, int size)
{
unsigned long addr = gen_pool_alloc(rdev->rqt_pool, size << 6);
PDBG("%s addr 0x%x size %d\n", __func__, (u32)addr, size << 6);
if (!addr)
printk_ratelimited(KERN_WARNING MOD "%s: Out of RQT memory\n",
pci_name(rdev->lldi.pdev));
mutex_lock(&rdev->stats.lock);
if (addr) {
rdev->stats.rqt.cur += roundup(size << 6, 1 << MIN_RQT_SHIFT);
if (rdev->stats.rqt.cur > rdev->stats.rqt.max)
rdev->stats.rqt.max = rdev->stats.rqt.cur;
} else
rdev->stats.rqt.fail++;
mutex_unlock(&rdev->stats.lock);
return (u32)addr;
}
void c4iw_rqtpool_free(struct c4iw_rdev *rdev, u32 addr, int size)
{
PDBG("%s addr 0x%x size %d\n", __func__, addr, size << 6);
mutex_lock(&rdev->stats.lock);
rdev->stats.rqt.cur -= roundup(size << 6, 1 << MIN_RQT_SHIFT);
mutex_unlock(&rdev->stats.lock);
gen_pool_free(rdev->rqt_pool, (unsigned long)addr, size << 6);
}
int c4iw_rqtpool_create(struct c4iw_rdev *rdev)
{
unsigned rqt_start, rqt_chunk, rqt_top;
rdev->rqt_pool = gen_pool_create(MIN_RQT_SHIFT, -1);
if (!rdev->rqt_pool)
return -ENOMEM;
rqt_start = rdev->lldi.vr->rq.start;
rqt_chunk = rdev->lldi.vr->rq.size;
rqt_top = rqt_start + rqt_chunk;
while (rqt_start < rqt_top) {
rqt_chunk = min(rqt_top - rqt_start + 1, rqt_chunk);
if (gen_pool_add(rdev->rqt_pool, rqt_start, rqt_chunk, -1)) {
PDBG("%s failed to add RQT chunk (%x/%x)\n",
__func__, rqt_start, rqt_chunk);
if (rqt_chunk <= 1024 << MIN_RQT_SHIFT) {
printk(KERN_WARNING MOD
"Failed to add all RQT chunks (%x/%x)\n",
rqt_start, rqt_top - rqt_start);
return 0;
}
rqt_chunk >>= 1;
} else {
PDBG("%s added RQT chunk (%x/%x)\n",
__func__, rqt_start, rqt_chunk);
rqt_start += rqt_chunk;
}
}
return 0;
}
void c4iw_rqtpool_destroy(struct c4iw_rdev *rdev)
{
gen_pool_destroy(rdev->rqt_pool);
}
/*
* On-Chip QP Memory.
*/
#define MIN_OCQP_SHIFT 12 /* 4KB == min ocqp size */
u32 c4iw_ocqp_pool_alloc(struct c4iw_rdev *rdev, int size)
{
unsigned long addr = gen_pool_alloc(rdev->ocqp_pool, size);
PDBG("%s addr 0x%x size %d\n", __func__, (u32)addr, size);
if (addr) {
mutex_lock(&rdev->stats.lock);
rdev->stats.ocqp.cur += roundup(size, 1 << MIN_OCQP_SHIFT);
if (rdev->stats.ocqp.cur > rdev->stats.ocqp.max)
rdev->stats.ocqp.max = rdev->stats.ocqp.cur;
mutex_unlock(&rdev->stats.lock);
}
return (u32)addr;
}
void c4iw_ocqp_pool_free(struct c4iw_rdev *rdev, u32 addr, int size)
{
PDBG("%s addr 0x%x size %d\n", __func__, addr, size);
mutex_lock(&rdev->stats.lock);
rdev->stats.ocqp.cur -= roundup(size, 1 << MIN_OCQP_SHIFT);
mutex_unlock(&rdev->stats.lock);
gen_pool_free(rdev->ocqp_pool, (unsigned long)addr, size);
}
int c4iw_ocqp_pool_create(struct c4iw_rdev *rdev)
{
unsigned start, chunk, top;
rdev->ocqp_pool = gen_pool_create(MIN_OCQP_SHIFT, -1);
if (!rdev->ocqp_pool)
return -ENOMEM;
start = rdev->lldi.vr->ocq.start;
chunk = rdev->lldi.vr->ocq.size;
top = start + chunk;
while (start < top) {
chunk = min(top - start + 1, chunk);
if (gen_pool_add(rdev->ocqp_pool, start, chunk, -1)) {
PDBG("%s failed to add OCQP chunk (%x/%x)\n",
__func__, start, chunk);
if (chunk <= 1024 << MIN_OCQP_SHIFT) {
printk(KERN_WARNING MOD
"Failed to add all OCQP chunks (%x/%x)\n",
start, top - start);
return 0;
}
chunk >>= 1;
} else {
PDBG("%s added OCQP chunk (%x/%x)\n",
__func__, start, chunk);
start += chunk;
}
}
return 0;
}
void c4iw_ocqp_pool_destroy(struct c4iw_rdev *rdev)
{
gen_pool_destroy(rdev->ocqp_pool);
}
| gpl-2.0 |
ngxson/SXDNickiLolly | arch/um/kernel/skas/mmu.c | 4697 | 3625 | /*
* Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
* Licensed under the GPL
*/
#include "linux/mm.h"
#include "linux/sched.h"
#include "linux/slab.h"
#include "asm/pgalloc.h"
#include "asm/pgtable.h"
#include "as-layout.h"
#include "os.h"
#include "skas.h"
extern int __syscall_stub_start;
static int init_stub_pte(struct mm_struct *mm, unsigned long proc,
unsigned long kernel)
{
pgd_t *pgd;
pud_t *pud;
pmd_t *pmd;
pte_t *pte;
pgd = pgd_offset(mm, proc);
pud = pud_alloc(mm, pgd, proc);
if (!pud)
goto out;
pmd = pmd_alloc(mm, pud, proc);
if (!pmd)
goto out_pmd;
pte = pte_alloc_map(mm, NULL, pmd, proc);
if (!pte)
goto out_pte;
*pte = mk_pte(virt_to_page(kernel), __pgprot(_PAGE_PRESENT));
*pte = pte_mkread(*pte);
return 0;
out_pte:
pmd_free(mm, pmd);
out_pmd:
pud_free(mm, pud);
out:
return -ENOMEM;
}
int init_new_context(struct task_struct *task, struct mm_struct *mm)
{
struct mm_context *from_mm = NULL;
struct mm_context *to_mm = &mm->context;
unsigned long stack = 0;
int ret = -ENOMEM;
if (skas_needs_stub) {
stack = get_zeroed_page(GFP_KERNEL);
if (stack == 0)
goto out;
}
to_mm->id.stack = stack;
if (current->mm != NULL && current->mm != &init_mm)
from_mm = ¤t->mm->context;
if (proc_mm) {
ret = new_mm(stack);
if (ret < 0) {
printk(KERN_ERR "init_new_context_skas - "
"new_mm failed, errno = %d\n", ret);
goto out_free;
}
to_mm->id.u.mm_fd = ret;
}
else {
if (from_mm)
to_mm->id.u.pid = copy_context_skas0(stack,
from_mm->id.u.pid);
else to_mm->id.u.pid = start_userspace(stack);
if (to_mm->id.u.pid < 0) {
ret = to_mm->id.u.pid;
goto out_free;
}
}
ret = init_new_ldt(to_mm, from_mm);
if (ret < 0) {
printk(KERN_ERR "init_new_context_skas - init_ldt"
" failed, errno = %d\n", ret);
goto out_free;
}
return 0;
out_free:
if (to_mm->id.stack != 0)
free_page(to_mm->id.stack);
out:
return ret;
}
void uml_setup_stubs(struct mm_struct *mm)
{
int err, ret;
if (!skas_needs_stub)
return;
ret = init_stub_pte(mm, STUB_CODE,
(unsigned long) &__syscall_stub_start);
if (ret)
goto out;
ret = init_stub_pte(mm, STUB_DATA, mm->context.id.stack);
if (ret)
goto out;
mm->context.stub_pages[0] = virt_to_page(&__syscall_stub_start);
mm->context.stub_pages[1] = virt_to_page(mm->context.id.stack);
/* dup_mmap already holds mmap_sem */
err = install_special_mapping(mm, STUB_START, STUB_END - STUB_START,
VM_READ | VM_MAYREAD | VM_EXEC |
VM_MAYEXEC | VM_DONTCOPY,
mm->context.stub_pages);
if (err) {
printk(KERN_ERR "install_special_mapping returned %d\n", err);
goto out;
}
return;
out:
force_sigsegv(SIGSEGV, current);
}
void arch_exit_mmap(struct mm_struct *mm)
{
pte_t *pte;
pte = virt_to_pte(mm, STUB_CODE);
if (pte != NULL)
pte_clear(mm, STUB_CODE, pte);
pte = virt_to_pte(mm, STUB_DATA);
if (pte == NULL)
return;
pte_clear(mm, STUB_DATA, pte);
}
void destroy_context(struct mm_struct *mm)
{
struct mm_context *mmu = &mm->context;
if (proc_mm)
os_close_file(mmu->id.u.mm_fd);
else {
/*
* If init_new_context wasn't called, this will be
* zero, resulting in a kill(0), which will result in the
* whole UML suddenly dying. Also, cover negative and
* 1 cases, since they shouldn't happen either.
*/
if (mmu->id.u.pid < 2) {
printk(KERN_ERR "corrupt mm_context - pid = %d\n",
mmu->id.u.pid);
return;
}
os_kill_ptraced_process(mmu->id.u.pid, 1);
}
if (skas_needs_stub)
free_page(mmu->id.stack);
free_ldt(mmu);
}
| gpl-2.0 |
halaszk/halaszk-UNIVERSAL5420 | arch/arm/mach-clps711x/autcpu12.c | 4953 | 2144 | /*
* linux/arch/arm/mach-clps711x/autcpu12.c
*
* (c) 2001 Thomas Gleixner, autronix automation <gleixner@autronix.de>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/string.h>
#include <linux/mm.h>
#include <linux/io.h>
#include <mach/hardware.h>
#include <asm/sizes.h>
#include <asm/setup.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
#include <asm/pgtable.h>
#include <asm/page.h>
#include <asm/mach/map.h>
#include <mach/autcpu12.h>
#include "common.h"
/*
* The on-chip registers are given a size of 1MB so that a section can
* be used to map them; this saves a page table. This is the place to
* add mappings for ROM, expansion memory, PCMCIA, etc. (if static
* mappings are chosen for those areas).
*
*/
static struct map_desc autcpu12_io_desc[] __initdata = {
/* memory-mapped extra io and CS8900A Ethernet chip */
/* ethernet chip */
{
.virtual = AUTCPU12_VIRT_CS8900A,
.pfn = __phys_to_pfn(AUTCPU12_PHYS_CS8900A),
.length = SZ_1M,
.type = MT_DEVICE
}
};
void __init autcpu12_map_io(void)
{
clps711x_map_io();
iotable_init(autcpu12_io_desc, ARRAY_SIZE(autcpu12_io_desc));
}
MACHINE_START(AUTCPU12, "autronix autcpu12")
/* Maintainer: Thomas Gleixner */
.atag_offset = 0x20000,
.map_io = autcpu12_map_io,
.init_irq = clps711x_init_irq,
.timer = &clps711x_timer,
.restart = clps711x_restart,
MACHINE_END
| gpl-2.0 |
damienyong/Kernel-3.0.8 | kernel/drivers/tty/serial/bcm63xx_uart.c | 8025 | 21239 | /*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Derived from many drivers using generic_serial interface.
*
* Copyright (C) 2008 Maxime Bizon <mbizon@freebox.fr>
*
* Serial driver for BCM63xx integrated UART.
*
* Hardware flow control was _not_ tested since I only have RX/TX on
* my board.
*/
#if defined(CONFIG_SERIAL_BCM63XX_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
#define SUPPORT_SYSRQ
#endif
#include <linux/kernel.h>
#include <linux/platform_device.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/module.h>
#include <linux/console.h>
#include <linux/clk.h>
#include <linux/tty.h>
#include <linux/tty_flip.h>
#include <linux/sysrq.h>
#include <linux/serial.h>
#include <linux/serial_core.h>
#include <bcm63xx_clk.h>
#include <bcm63xx_irq.h>
#include <bcm63xx_regs.h>
#include <bcm63xx_io.h>
#define BCM63XX_NR_UARTS 2
static struct uart_port ports[BCM63XX_NR_UARTS];
/*
* rx interrupt mask / stat
*
* mask:
* - rx fifo full
* - rx fifo above threshold
* - rx fifo not empty for too long
*/
#define UART_RX_INT_MASK (UART_IR_MASK(UART_IR_RXOVER) | \
UART_IR_MASK(UART_IR_RXTHRESH) | \
UART_IR_MASK(UART_IR_RXTIMEOUT))
#define UART_RX_INT_STAT (UART_IR_STAT(UART_IR_RXOVER) | \
UART_IR_STAT(UART_IR_RXTHRESH) | \
UART_IR_STAT(UART_IR_RXTIMEOUT))
/*
* tx interrupt mask / stat
*
* mask:
* - tx fifo empty
* - tx fifo below threshold
*/
#define UART_TX_INT_MASK (UART_IR_MASK(UART_IR_TXEMPTY) | \
UART_IR_MASK(UART_IR_TXTRESH))
#define UART_TX_INT_STAT (UART_IR_STAT(UART_IR_TXEMPTY) | \
UART_IR_STAT(UART_IR_TXTRESH))
/*
* external input interrupt
*
* mask: any edge on CTS, DCD
*/
#define UART_EXTINP_INT_MASK (UART_EXTINP_IRMASK(UART_EXTINP_IR_CTS) | \
UART_EXTINP_IRMASK(UART_EXTINP_IR_DCD))
/*
* handy uart register accessor
*/
static inline unsigned int bcm_uart_readl(struct uart_port *port,
unsigned int offset)
{
return bcm_readl(port->membase + offset);
}
static inline void bcm_uart_writel(struct uart_port *port,
unsigned int value, unsigned int offset)
{
bcm_writel(value, port->membase + offset);
}
/*
* serial core request to check if uart tx fifo is empty
*/
static unsigned int bcm_uart_tx_empty(struct uart_port *port)
{
unsigned int val;
val = bcm_uart_readl(port, UART_IR_REG);
return (val & UART_IR_STAT(UART_IR_TXEMPTY)) ? 1 : 0;
}
/*
* serial core request to set RTS and DTR pin state and loopback mode
*/
static void bcm_uart_set_mctrl(struct uart_port *port, unsigned int mctrl)
{
unsigned int val;
val = bcm_uart_readl(port, UART_MCTL_REG);
val &= ~(UART_MCTL_DTR_MASK | UART_MCTL_RTS_MASK);
/* invert of written value is reflected on the pin */
if (!(mctrl & TIOCM_DTR))
val |= UART_MCTL_DTR_MASK;
if (!(mctrl & TIOCM_RTS))
val |= UART_MCTL_RTS_MASK;
bcm_uart_writel(port, val, UART_MCTL_REG);
val = bcm_uart_readl(port, UART_CTL_REG);
if (mctrl & TIOCM_LOOP)
val |= UART_CTL_LOOPBACK_MASK;
else
val &= ~UART_CTL_LOOPBACK_MASK;
bcm_uart_writel(port, val, UART_CTL_REG);
}
/*
* serial core request to return RI, CTS, DCD and DSR pin state
*/
static unsigned int bcm_uart_get_mctrl(struct uart_port *port)
{
unsigned int val, mctrl;
mctrl = 0;
val = bcm_uart_readl(port, UART_EXTINP_REG);
if (val & UART_EXTINP_RI_MASK)
mctrl |= TIOCM_RI;
if (val & UART_EXTINP_CTS_MASK)
mctrl |= TIOCM_CTS;
if (val & UART_EXTINP_DCD_MASK)
mctrl |= TIOCM_CD;
if (val & UART_EXTINP_DSR_MASK)
mctrl |= TIOCM_DSR;
return mctrl;
}
/*
* serial core request to disable tx ASAP (used for flow control)
*/
static void bcm_uart_stop_tx(struct uart_port *port)
{
unsigned int val;
val = bcm_uart_readl(port, UART_CTL_REG);
val &= ~(UART_CTL_TXEN_MASK);
bcm_uart_writel(port, val, UART_CTL_REG);
val = bcm_uart_readl(port, UART_IR_REG);
val &= ~UART_TX_INT_MASK;
bcm_uart_writel(port, val, UART_IR_REG);
}
/*
* serial core request to (re)enable tx
*/
static void bcm_uart_start_tx(struct uart_port *port)
{
unsigned int val;
val = bcm_uart_readl(port, UART_IR_REG);
val |= UART_TX_INT_MASK;
bcm_uart_writel(port, val, UART_IR_REG);
val = bcm_uart_readl(port, UART_CTL_REG);
val |= UART_CTL_TXEN_MASK;
bcm_uart_writel(port, val, UART_CTL_REG);
}
/*
* serial core request to stop rx, called before port shutdown
*/
static void bcm_uart_stop_rx(struct uart_port *port)
{
unsigned int val;
val = bcm_uart_readl(port, UART_IR_REG);
val &= ~UART_RX_INT_MASK;
bcm_uart_writel(port, val, UART_IR_REG);
}
/*
* serial core request to enable modem status interrupt reporting
*/
static void bcm_uart_enable_ms(struct uart_port *port)
{
unsigned int val;
val = bcm_uart_readl(port, UART_IR_REG);
val |= UART_IR_MASK(UART_IR_EXTIP);
bcm_uart_writel(port, val, UART_IR_REG);
}
/*
* serial core request to start/stop emitting break char
*/
static void bcm_uart_break_ctl(struct uart_port *port, int ctl)
{
unsigned long flags;
unsigned int val;
spin_lock_irqsave(&port->lock, flags);
val = bcm_uart_readl(port, UART_CTL_REG);
if (ctl)
val |= UART_CTL_XMITBRK_MASK;
else
val &= ~UART_CTL_XMITBRK_MASK;
bcm_uart_writel(port, val, UART_CTL_REG);
spin_unlock_irqrestore(&port->lock, flags);
}
/*
* return port type in string format
*/
static const char *bcm_uart_type(struct uart_port *port)
{
return (port->type == PORT_BCM63XX) ? "bcm63xx_uart" : NULL;
}
/*
* read all chars in rx fifo and send them to core
*/
static void bcm_uart_do_rx(struct uart_port *port)
{
struct tty_struct *tty;
unsigned int max_count;
/* limit number of char read in interrupt, should not be
* higher than fifo size anyway since we're much faster than
* serial port */
max_count = 32;
tty = port->state->port.tty;
do {
unsigned int iestat, c, cstat;
char flag;
/* get overrun/fifo empty information from ier
* register */
iestat = bcm_uart_readl(port, UART_IR_REG);
if (unlikely(iestat & UART_IR_STAT(UART_IR_RXOVER))) {
unsigned int val;
/* fifo reset is required to clear
* interrupt */
val = bcm_uart_readl(port, UART_CTL_REG);
val |= UART_CTL_RSTRXFIFO_MASK;
bcm_uart_writel(port, val, UART_CTL_REG);
port->icount.overrun++;
tty_insert_flip_char(tty, 0, TTY_OVERRUN);
}
if (!(iestat & UART_IR_STAT(UART_IR_RXNOTEMPTY)))
break;
cstat = c = bcm_uart_readl(port, UART_FIFO_REG);
port->icount.rx++;
flag = TTY_NORMAL;
c &= 0xff;
if (unlikely((cstat & UART_FIFO_ANYERR_MASK))) {
/* do stats first */
if (cstat & UART_FIFO_BRKDET_MASK) {
port->icount.brk++;
if (uart_handle_break(port))
continue;
}
if (cstat & UART_FIFO_PARERR_MASK)
port->icount.parity++;
if (cstat & UART_FIFO_FRAMEERR_MASK)
port->icount.frame++;
/* update flag wrt read_status_mask */
cstat &= port->read_status_mask;
if (cstat & UART_FIFO_BRKDET_MASK)
flag = TTY_BREAK;
if (cstat & UART_FIFO_FRAMEERR_MASK)
flag = TTY_FRAME;
if (cstat & UART_FIFO_PARERR_MASK)
flag = TTY_PARITY;
}
if (uart_handle_sysrq_char(port, c))
continue;
if ((cstat & port->ignore_status_mask) == 0)
tty_insert_flip_char(tty, c, flag);
} while (--max_count);
tty_flip_buffer_push(tty);
}
/*
* fill tx fifo with chars to send, stop when fifo is about to be full
* or when all chars have been sent.
*/
static void bcm_uart_do_tx(struct uart_port *port)
{
struct circ_buf *xmit;
unsigned int val, max_count;
if (port->x_char) {
bcm_uart_writel(port, port->x_char, UART_FIFO_REG);
port->icount.tx++;
port->x_char = 0;
return;
}
if (uart_tx_stopped(port)) {
bcm_uart_stop_tx(port);
return;
}
xmit = &port->state->xmit;
if (uart_circ_empty(xmit))
goto txq_empty;
val = bcm_uart_readl(port, UART_MCTL_REG);
val = (val & UART_MCTL_TXFIFOFILL_MASK) >> UART_MCTL_TXFIFOFILL_SHIFT;
max_count = port->fifosize - val;
while (max_count--) {
unsigned int c;
c = xmit->buf[xmit->tail];
bcm_uart_writel(port, c, UART_FIFO_REG);
xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
port->icount.tx++;
if (uart_circ_empty(xmit))
break;
}
if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
uart_write_wakeup(port);
if (uart_circ_empty(xmit))
goto txq_empty;
return;
txq_empty:
/* nothing to send, disable transmit interrupt */
val = bcm_uart_readl(port, UART_IR_REG);
val &= ~UART_TX_INT_MASK;
bcm_uart_writel(port, val, UART_IR_REG);
return;
}
/*
* process uart interrupt
*/
static irqreturn_t bcm_uart_interrupt(int irq, void *dev_id)
{
struct uart_port *port;
unsigned int irqstat;
port = dev_id;
spin_lock(&port->lock);
irqstat = bcm_uart_readl(port, UART_IR_REG);
if (irqstat & UART_RX_INT_STAT)
bcm_uart_do_rx(port);
if (irqstat & UART_TX_INT_STAT)
bcm_uart_do_tx(port);
if (irqstat & UART_IR_MASK(UART_IR_EXTIP)) {
unsigned int estat;
estat = bcm_uart_readl(port, UART_EXTINP_REG);
if (estat & UART_EXTINP_IRSTAT(UART_EXTINP_IR_CTS))
uart_handle_cts_change(port,
estat & UART_EXTINP_CTS_MASK);
if (estat & UART_EXTINP_IRSTAT(UART_EXTINP_IR_DCD))
uart_handle_dcd_change(port,
estat & UART_EXTINP_DCD_MASK);
}
spin_unlock(&port->lock);
return IRQ_HANDLED;
}
/*
* enable rx & tx operation on uart
*/
static void bcm_uart_enable(struct uart_port *port)
{
unsigned int val;
val = bcm_uart_readl(port, UART_CTL_REG);
val |= (UART_CTL_BRGEN_MASK | UART_CTL_TXEN_MASK | UART_CTL_RXEN_MASK);
bcm_uart_writel(port, val, UART_CTL_REG);
}
/*
* disable rx & tx operation on uart
*/
static void bcm_uart_disable(struct uart_port *port)
{
unsigned int val;
val = bcm_uart_readl(port, UART_CTL_REG);
val &= ~(UART_CTL_BRGEN_MASK | UART_CTL_TXEN_MASK |
UART_CTL_RXEN_MASK);
bcm_uart_writel(port, val, UART_CTL_REG);
}
/*
* clear all unread data in rx fifo and unsent data in tx fifo
*/
static void bcm_uart_flush(struct uart_port *port)
{
unsigned int val;
/* empty rx and tx fifo */
val = bcm_uart_readl(port, UART_CTL_REG);
val |= UART_CTL_RSTRXFIFO_MASK | UART_CTL_RSTTXFIFO_MASK;
bcm_uart_writel(port, val, UART_CTL_REG);
/* read any pending char to make sure all irq status are
* cleared */
(void)bcm_uart_readl(port, UART_FIFO_REG);
}
/*
* serial core request to initialize uart and start rx operation
*/
static int bcm_uart_startup(struct uart_port *port)
{
unsigned int val;
int ret;
/* mask all irq and flush port */
bcm_uart_disable(port);
bcm_uart_writel(port, 0, UART_IR_REG);
bcm_uart_flush(port);
/* clear any pending external input interrupt */
(void)bcm_uart_readl(port, UART_EXTINP_REG);
/* set rx/tx fifo thresh to fifo half size */
val = bcm_uart_readl(port, UART_MCTL_REG);
val &= ~(UART_MCTL_RXFIFOTHRESH_MASK | UART_MCTL_TXFIFOTHRESH_MASK);
val |= (port->fifosize / 2) << UART_MCTL_RXFIFOTHRESH_SHIFT;
val |= (port->fifosize / 2) << UART_MCTL_TXFIFOTHRESH_SHIFT;
bcm_uart_writel(port, val, UART_MCTL_REG);
/* set rx fifo timeout to 1 char time */
val = bcm_uart_readl(port, UART_CTL_REG);
val &= ~UART_CTL_RXTMOUTCNT_MASK;
val |= 1 << UART_CTL_RXTMOUTCNT_SHIFT;
bcm_uart_writel(port, val, UART_CTL_REG);
/* report any edge on dcd and cts */
val = UART_EXTINP_INT_MASK;
val |= UART_EXTINP_DCD_NOSENSE_MASK;
val |= UART_EXTINP_CTS_NOSENSE_MASK;
bcm_uart_writel(port, val, UART_EXTINP_REG);
/* register irq and enable rx interrupts */
ret = request_irq(port->irq, bcm_uart_interrupt, 0,
bcm_uart_type(port), port);
if (ret)
return ret;
bcm_uart_writel(port, UART_RX_INT_MASK, UART_IR_REG);
bcm_uart_enable(port);
return 0;
}
/*
* serial core request to flush & disable uart
*/
static void bcm_uart_shutdown(struct uart_port *port)
{
unsigned long flags;
spin_lock_irqsave(&port->lock, flags);
bcm_uart_writel(port, 0, UART_IR_REG);
spin_unlock_irqrestore(&port->lock, flags);
bcm_uart_disable(port);
bcm_uart_flush(port);
free_irq(port->irq, port);
}
/*
* serial core request to change current uart setting
*/
static void bcm_uart_set_termios(struct uart_port *port,
struct ktermios *new,
struct ktermios *old)
{
unsigned int ctl, baud, quot, ier;
unsigned long flags;
spin_lock_irqsave(&port->lock, flags);
/* disable uart while changing speed */
bcm_uart_disable(port);
bcm_uart_flush(port);
/* update Control register */
ctl = bcm_uart_readl(port, UART_CTL_REG);
ctl &= ~UART_CTL_BITSPERSYM_MASK;
switch (new->c_cflag & CSIZE) {
case CS5:
ctl |= (0 << UART_CTL_BITSPERSYM_SHIFT);
break;
case CS6:
ctl |= (1 << UART_CTL_BITSPERSYM_SHIFT);
break;
case CS7:
ctl |= (2 << UART_CTL_BITSPERSYM_SHIFT);
break;
default:
ctl |= (3 << UART_CTL_BITSPERSYM_SHIFT);
break;
}
ctl &= ~UART_CTL_STOPBITS_MASK;
if (new->c_cflag & CSTOPB)
ctl |= UART_CTL_STOPBITS_2;
else
ctl |= UART_CTL_STOPBITS_1;
ctl &= ~(UART_CTL_RXPAREN_MASK | UART_CTL_TXPAREN_MASK);
if (new->c_cflag & PARENB)
ctl |= (UART_CTL_RXPAREN_MASK | UART_CTL_TXPAREN_MASK);
ctl &= ~(UART_CTL_RXPAREVEN_MASK | UART_CTL_TXPAREVEN_MASK);
if (new->c_cflag & PARODD)
ctl |= (UART_CTL_RXPAREVEN_MASK | UART_CTL_TXPAREVEN_MASK);
bcm_uart_writel(port, ctl, UART_CTL_REG);
/* update Baudword register */
baud = uart_get_baud_rate(port, new, old, 0, port->uartclk / 16);
quot = uart_get_divisor(port, baud) - 1;
bcm_uart_writel(port, quot, UART_BAUD_REG);
/* update Interrupt register */
ier = bcm_uart_readl(port, UART_IR_REG);
ier &= ~UART_IR_MASK(UART_IR_EXTIP);
if (UART_ENABLE_MS(port, new->c_cflag))
ier |= UART_IR_MASK(UART_IR_EXTIP);
bcm_uart_writel(port, ier, UART_IR_REG);
/* update read/ignore mask */
port->read_status_mask = UART_FIFO_VALID_MASK;
if (new->c_iflag & INPCK) {
port->read_status_mask |= UART_FIFO_FRAMEERR_MASK;
port->read_status_mask |= UART_FIFO_PARERR_MASK;
}
if (new->c_iflag & (BRKINT))
port->read_status_mask |= UART_FIFO_BRKDET_MASK;
port->ignore_status_mask = 0;
if (new->c_iflag & IGNPAR)
port->ignore_status_mask |= UART_FIFO_PARERR_MASK;
if (new->c_iflag & IGNBRK)
port->ignore_status_mask |= UART_FIFO_BRKDET_MASK;
if (!(new->c_cflag & CREAD))
port->ignore_status_mask |= UART_FIFO_VALID_MASK;
uart_update_timeout(port, new->c_cflag, baud);
bcm_uart_enable(port);
spin_unlock_irqrestore(&port->lock, flags);
}
/*
* serial core request to claim uart iomem
*/
static int bcm_uart_request_port(struct uart_port *port)
{
unsigned int size;
size = RSET_UART_SIZE;
if (!request_mem_region(port->mapbase, size, "bcm63xx")) {
dev_err(port->dev, "Memory region busy\n");
return -EBUSY;
}
port->membase = ioremap(port->mapbase, size);
if (!port->membase) {
dev_err(port->dev, "Unable to map registers\n");
release_mem_region(port->mapbase, size);
return -EBUSY;
}
return 0;
}
/*
* serial core request to release uart iomem
*/
static void bcm_uart_release_port(struct uart_port *port)
{
release_mem_region(port->mapbase, RSET_UART_SIZE);
iounmap(port->membase);
}
/*
* serial core request to do any port required autoconfiguration
*/
static void bcm_uart_config_port(struct uart_port *port, int flags)
{
if (flags & UART_CONFIG_TYPE) {
if (bcm_uart_request_port(port))
return;
port->type = PORT_BCM63XX;
}
}
/*
* serial core request to check that port information in serinfo are
* suitable
*/
static int bcm_uart_verify_port(struct uart_port *port,
struct serial_struct *serinfo)
{
if (port->type != PORT_BCM63XX)
return -EINVAL;
if (port->irq != serinfo->irq)
return -EINVAL;
if (port->iotype != serinfo->io_type)
return -EINVAL;
if (port->mapbase != (unsigned long)serinfo->iomem_base)
return -EINVAL;
return 0;
}
/* serial core callbacks */
static struct uart_ops bcm_uart_ops = {
.tx_empty = bcm_uart_tx_empty,
.get_mctrl = bcm_uart_get_mctrl,
.set_mctrl = bcm_uart_set_mctrl,
.start_tx = bcm_uart_start_tx,
.stop_tx = bcm_uart_stop_tx,
.stop_rx = bcm_uart_stop_rx,
.enable_ms = bcm_uart_enable_ms,
.break_ctl = bcm_uart_break_ctl,
.startup = bcm_uart_startup,
.shutdown = bcm_uart_shutdown,
.set_termios = bcm_uart_set_termios,
.type = bcm_uart_type,
.release_port = bcm_uart_release_port,
.request_port = bcm_uart_request_port,
.config_port = bcm_uart_config_port,
.verify_port = bcm_uart_verify_port,
};
#ifdef CONFIG_SERIAL_BCM63XX_CONSOLE
static inline void wait_for_xmitr(struct uart_port *port)
{
unsigned int tmout;
/* Wait up to 10ms for the character(s) to be sent. */
tmout = 10000;
while (--tmout) {
unsigned int val;
val = bcm_uart_readl(port, UART_IR_REG);
if (val & UART_IR_STAT(UART_IR_TXEMPTY))
break;
udelay(1);
}
/* Wait up to 1s for flow control if necessary */
if (port->flags & UPF_CONS_FLOW) {
tmout = 1000000;
while (--tmout) {
unsigned int val;
val = bcm_uart_readl(port, UART_EXTINP_REG);
if (val & UART_EXTINP_CTS_MASK)
break;
udelay(1);
}
}
}
/*
* output given char
*/
static void bcm_console_putchar(struct uart_port *port, int ch)
{
wait_for_xmitr(port);
bcm_uart_writel(port, ch, UART_FIFO_REG);
}
/*
* console core request to output given string
*/
static void bcm_console_write(struct console *co, const char *s,
unsigned int count)
{
struct uart_port *port;
unsigned long flags;
int locked;
port = &ports[co->index];
local_irq_save(flags);
if (port->sysrq) {
/* bcm_uart_interrupt() already took the lock */
locked = 0;
} else if (oops_in_progress) {
locked = spin_trylock(&port->lock);
} else {
spin_lock(&port->lock);
locked = 1;
}
/* call helper to deal with \r\n */
uart_console_write(port, s, count, bcm_console_putchar);
/* and wait for char to be transmitted */
wait_for_xmitr(port);
if (locked)
spin_unlock(&port->lock);
local_irq_restore(flags);
}
/*
* console core request to setup given console, find matching uart
* port and setup it.
*/
static int bcm_console_setup(struct console *co, char *options)
{
struct uart_port *port;
int baud = 9600;
int bits = 8;
int parity = 'n';
int flow = 'n';
if (co->index < 0 || co->index >= BCM63XX_NR_UARTS)
return -EINVAL;
port = &ports[co->index];
if (!port->membase)
return -ENODEV;
if (options)
uart_parse_options(options, &baud, &parity, &bits, &flow);
return uart_set_options(port, co, baud, parity, bits, flow);
}
static struct uart_driver bcm_uart_driver;
static struct console bcm63xx_console = {
.name = "ttyS",
.write = bcm_console_write,
.device = uart_console_device,
.setup = bcm_console_setup,
.flags = CON_PRINTBUFFER,
.index = -1,
.data = &bcm_uart_driver,
};
static int __init bcm63xx_console_init(void)
{
register_console(&bcm63xx_console);
return 0;
}
console_initcall(bcm63xx_console_init);
#define BCM63XX_CONSOLE (&bcm63xx_console)
#else
#define BCM63XX_CONSOLE NULL
#endif /* CONFIG_SERIAL_BCM63XX_CONSOLE */
static struct uart_driver bcm_uart_driver = {
.owner = THIS_MODULE,
.driver_name = "bcm63xx_uart",
.dev_name = "ttyS",
.major = TTY_MAJOR,
.minor = 64,
.nr = BCM63XX_NR_UARTS,
.cons = BCM63XX_CONSOLE,
};
/*
* platform driver probe/remove callback
*/
static int __devinit bcm_uart_probe(struct platform_device *pdev)
{
struct resource *res_mem, *res_irq;
struct uart_port *port;
struct clk *clk;
int ret;
if (pdev->id < 0 || pdev->id >= BCM63XX_NR_UARTS)
return -EINVAL;
if (ports[pdev->id].membase)
return -EBUSY;
res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res_mem)
return -ENODEV;
res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
if (!res_irq)
return -ENODEV;
clk = clk_get(&pdev->dev, "periph");
if (IS_ERR(clk))
return -ENODEV;
port = &ports[pdev->id];
memset(port, 0, sizeof(*port));
port->iotype = UPIO_MEM;
port->mapbase = res_mem->start;
port->irq = res_irq->start;
port->ops = &bcm_uart_ops;
port->flags = UPF_BOOT_AUTOCONF;
port->dev = &pdev->dev;
port->fifosize = 16;
port->uartclk = clk_get_rate(clk) / 2;
port->line = pdev->id;
clk_put(clk);
ret = uart_add_one_port(&bcm_uart_driver, port);
if (ret) {
ports[pdev->id].membase = 0;
return ret;
}
platform_set_drvdata(pdev, port);
return 0;
}
static int __devexit bcm_uart_remove(struct platform_device *pdev)
{
struct uart_port *port;
port = platform_get_drvdata(pdev);
uart_remove_one_port(&bcm_uart_driver, port);
platform_set_drvdata(pdev, NULL);
/* mark port as free */
ports[pdev->id].membase = 0;
return 0;
}
/*
* platform driver stuff
*/
static struct platform_driver bcm_uart_platform_driver = {
.probe = bcm_uart_probe,
.remove = __devexit_p(bcm_uart_remove),
.driver = {
.owner = THIS_MODULE,
.name = "bcm63xx_uart",
},
};
static int __init bcm_uart_init(void)
{
int ret;
ret = uart_register_driver(&bcm_uart_driver);
if (ret)
return ret;
ret = platform_driver_register(&bcm_uart_platform_driver);
if (ret)
uart_unregister_driver(&bcm_uart_driver);
return ret;
}
static void __exit bcm_uart_exit(void)
{
platform_driver_unregister(&bcm_uart_platform_driver);
uart_unregister_driver(&bcm_uart_driver);
}
module_init(bcm_uart_init);
module_exit(bcm_uart_exit);
MODULE_AUTHOR("Maxime Bizon <mbizon@freebox.fr>");
MODULE_DESCRIPTION("Broadcom 63<xx integrated uart driver");
MODULE_LICENSE("GPL");
| gpl-2.0 |
Split-Screen/android_kernel_asus_fugu | arch/sh/mm/flush-sh4.c | 9049 | 2653 | #include <linux/mm.h>
#include <asm/mmu_context.h>
#include <asm/cache_insns.h>
#include <asm/cacheflush.h>
#include <asm/traps.h>
/*
* Write back the dirty D-caches, but not invalidate them.
*
* START: Virtual Address (U0, P1, or P3)
* SIZE: Size of the region.
*/
static void sh4__flush_wback_region(void *start, int size)
{
reg_size_t aligned_start, v, cnt, end;
aligned_start = register_align(start);
v = aligned_start & ~(L1_CACHE_BYTES-1);
end = (aligned_start + size + L1_CACHE_BYTES-1)
& ~(L1_CACHE_BYTES-1);
cnt = (end - v) / L1_CACHE_BYTES;
while (cnt >= 8) {
__ocbwb(v); v += L1_CACHE_BYTES;
__ocbwb(v); v += L1_CACHE_BYTES;
__ocbwb(v); v += L1_CACHE_BYTES;
__ocbwb(v); v += L1_CACHE_BYTES;
__ocbwb(v); v += L1_CACHE_BYTES;
__ocbwb(v); v += L1_CACHE_BYTES;
__ocbwb(v); v += L1_CACHE_BYTES;
__ocbwb(v); v += L1_CACHE_BYTES;
cnt -= 8;
}
while (cnt) {
__ocbwb(v); v += L1_CACHE_BYTES;
cnt--;
}
}
/*
* Write back the dirty D-caches and invalidate them.
*
* START: Virtual Address (U0, P1, or P3)
* SIZE: Size of the region.
*/
static void sh4__flush_purge_region(void *start, int size)
{
reg_size_t aligned_start, v, cnt, end;
aligned_start = register_align(start);
v = aligned_start & ~(L1_CACHE_BYTES-1);
end = (aligned_start + size + L1_CACHE_BYTES-1)
& ~(L1_CACHE_BYTES-1);
cnt = (end - v) / L1_CACHE_BYTES;
while (cnt >= 8) {
__ocbp(v); v += L1_CACHE_BYTES;
__ocbp(v); v += L1_CACHE_BYTES;
__ocbp(v); v += L1_CACHE_BYTES;
__ocbp(v); v += L1_CACHE_BYTES;
__ocbp(v); v += L1_CACHE_BYTES;
__ocbp(v); v += L1_CACHE_BYTES;
__ocbp(v); v += L1_CACHE_BYTES;
__ocbp(v); v += L1_CACHE_BYTES;
cnt -= 8;
}
while (cnt) {
__ocbp(v); v += L1_CACHE_BYTES;
cnt--;
}
}
/*
* No write back please
*/
static void sh4__flush_invalidate_region(void *start, int size)
{
reg_size_t aligned_start, v, cnt, end;
aligned_start = register_align(start);
v = aligned_start & ~(L1_CACHE_BYTES-1);
end = (aligned_start + size + L1_CACHE_BYTES-1)
& ~(L1_CACHE_BYTES-1);
cnt = (end - v) / L1_CACHE_BYTES;
while (cnt >= 8) {
__ocbi(v); v += L1_CACHE_BYTES;
__ocbi(v); v += L1_CACHE_BYTES;
__ocbi(v); v += L1_CACHE_BYTES;
__ocbi(v); v += L1_CACHE_BYTES;
__ocbi(v); v += L1_CACHE_BYTES;
__ocbi(v); v += L1_CACHE_BYTES;
__ocbi(v); v += L1_CACHE_BYTES;
__ocbi(v); v += L1_CACHE_BYTES;
cnt -= 8;
}
while (cnt) {
__ocbi(v); v += L1_CACHE_BYTES;
cnt--;
}
}
void __init sh4__flush_region_init(void)
{
__flush_wback_region = sh4__flush_wback_region;
__flush_invalidate_region = sh4__flush_invalidate_region;
__flush_purge_region = sh4__flush_purge_region;
}
| gpl-2.0 |
sztena/DG08_android4.2 | security/selinux/ss/sidtab.c | 12633 | 6011 | /*
* Implementation of the SID table type.
*
* Author : Stephen Smalley, <sds@epoch.ncsc.mil>
*/
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/errno.h>
#include "flask.h"
#include "security.h"
#include "sidtab.h"
#define SIDTAB_HASH(sid) \
(sid & SIDTAB_HASH_MASK)
int sidtab_init(struct sidtab *s)
{
int i;
s->htable = kmalloc(sizeof(*(s->htable)) * SIDTAB_SIZE, GFP_ATOMIC);
if (!s->htable)
return -ENOMEM;
for (i = 0; i < SIDTAB_SIZE; i++)
s->htable[i] = NULL;
s->nel = 0;
s->next_sid = 1;
s->shutdown = 0;
spin_lock_init(&s->lock);
return 0;
}
int sidtab_insert(struct sidtab *s, u32 sid, struct context *context)
{
int hvalue, rc = 0;
struct sidtab_node *prev, *cur, *newnode;
if (!s) {
rc = -ENOMEM;
goto out;
}
hvalue = SIDTAB_HASH(sid);
prev = NULL;
cur = s->htable[hvalue];
while (cur && sid > cur->sid) {
prev = cur;
cur = cur->next;
}
if (cur && sid == cur->sid) {
rc = -EEXIST;
goto out;
}
newnode = kmalloc(sizeof(*newnode), GFP_ATOMIC);
if (newnode == NULL) {
rc = -ENOMEM;
goto out;
}
newnode->sid = sid;
if (context_cpy(&newnode->context, context)) {
kfree(newnode);
rc = -ENOMEM;
goto out;
}
if (prev) {
newnode->next = prev->next;
wmb();
prev->next = newnode;
} else {
newnode->next = s->htable[hvalue];
wmb();
s->htable[hvalue] = newnode;
}
s->nel++;
if (sid >= s->next_sid)
s->next_sid = sid + 1;
out:
return rc;
}
static struct context *sidtab_search_core(struct sidtab *s, u32 sid, int force)
{
int hvalue;
struct sidtab_node *cur;
if (!s)
return NULL;
hvalue = SIDTAB_HASH(sid);
cur = s->htable[hvalue];
while (cur && sid > cur->sid)
cur = cur->next;
if (force && cur && sid == cur->sid && cur->context.len)
return &cur->context;
if (cur == NULL || sid != cur->sid || cur->context.len) {
/* Remap invalid SIDs to the unlabeled SID. */
sid = SECINITSID_UNLABELED;
hvalue = SIDTAB_HASH(sid);
cur = s->htable[hvalue];
while (cur && sid > cur->sid)
cur = cur->next;
if (!cur || sid != cur->sid)
return NULL;
}
return &cur->context;
}
struct context *sidtab_search(struct sidtab *s, u32 sid)
{
return sidtab_search_core(s, sid, 0);
}
struct context *sidtab_search_force(struct sidtab *s, u32 sid)
{
return sidtab_search_core(s, sid, 1);
}
int sidtab_map(struct sidtab *s,
int (*apply) (u32 sid,
struct context *context,
void *args),
void *args)
{
int i, rc = 0;
struct sidtab_node *cur;
if (!s)
goto out;
for (i = 0; i < SIDTAB_SIZE; i++) {
cur = s->htable[i];
while (cur) {
rc = apply(cur->sid, &cur->context, args);
if (rc)
goto out;
cur = cur->next;
}
}
out:
return rc;
}
static void sidtab_update_cache(struct sidtab *s, struct sidtab_node *n, int loc)
{
BUG_ON(loc >= SIDTAB_CACHE_LEN);
while (loc > 0) {
s->cache[loc] = s->cache[loc - 1];
loc--;
}
s->cache[0] = n;
}
static inline u32 sidtab_search_context(struct sidtab *s,
struct context *context)
{
int i;
struct sidtab_node *cur;
for (i = 0; i < SIDTAB_SIZE; i++) {
cur = s->htable[i];
while (cur) {
if (context_cmp(&cur->context, context)) {
sidtab_update_cache(s, cur, SIDTAB_CACHE_LEN - 1);
return cur->sid;
}
cur = cur->next;
}
}
return 0;
}
static inline u32 sidtab_search_cache(struct sidtab *s, struct context *context)
{
int i;
struct sidtab_node *node;
for (i = 0; i < SIDTAB_CACHE_LEN; i++) {
node = s->cache[i];
if (unlikely(!node))
return 0;
if (context_cmp(&node->context, context)) {
sidtab_update_cache(s, node, i);
return node->sid;
}
}
return 0;
}
int sidtab_context_to_sid(struct sidtab *s,
struct context *context,
u32 *out_sid)
{
u32 sid;
int ret = 0;
unsigned long flags;
*out_sid = SECSID_NULL;
sid = sidtab_search_cache(s, context);
if (!sid)
sid = sidtab_search_context(s, context);
if (!sid) {
spin_lock_irqsave(&s->lock, flags);
/* Rescan now that we hold the lock. */
sid = sidtab_search_context(s, context);
if (sid)
goto unlock_out;
/* No SID exists for the context. Allocate a new one. */
if (s->next_sid == UINT_MAX || s->shutdown) {
ret = -ENOMEM;
goto unlock_out;
}
sid = s->next_sid++;
if (context->len)
printk(KERN_INFO
"SELinux: Context %s is not valid (left unmapped).\n",
context->str);
ret = sidtab_insert(s, sid, context);
if (ret)
s->next_sid--;
unlock_out:
spin_unlock_irqrestore(&s->lock, flags);
}
if (ret)
return ret;
*out_sid = sid;
return 0;
}
void sidtab_hash_eval(struct sidtab *h, char *tag)
{
int i, chain_len, slots_used, max_chain_len;
struct sidtab_node *cur;
slots_used = 0;
max_chain_len = 0;
for (i = 0; i < SIDTAB_SIZE; i++) {
cur = h->htable[i];
if (cur) {
slots_used++;
chain_len = 0;
while (cur) {
chain_len++;
cur = cur->next;
}
if (chain_len > max_chain_len)
max_chain_len = chain_len;
}
}
printk(KERN_DEBUG "%s: %d entries and %d/%d buckets used, longest "
"chain length %d\n", tag, h->nel, slots_used, SIDTAB_SIZE,
max_chain_len);
}
void sidtab_destroy(struct sidtab *s)
{
int i;
struct sidtab_node *cur, *temp;
if (!s)
return;
for (i = 0; i < SIDTAB_SIZE; i++) {
cur = s->htable[i];
while (cur) {
temp = cur;
cur = cur->next;
context_destroy(&temp->context);
kfree(temp);
}
s->htable[i] = NULL;
}
kfree(s->htable);
s->htable = NULL;
s->nel = 0;
s->next_sid = 1;
}
void sidtab_set(struct sidtab *dst, struct sidtab *src)
{
unsigned long flags;
int i;
spin_lock_irqsave(&src->lock, flags);
dst->htable = src->htable;
dst->nel = src->nel;
dst->next_sid = src->next_sid;
dst->shutdown = 0;
for (i = 0; i < SIDTAB_CACHE_LEN; i++)
dst->cache[i] = NULL;
spin_unlock_irqrestore(&src->lock, flags);
}
void sidtab_shutdown(struct sidtab *s)
{
unsigned long flags;
spin_lock_irqsave(&s->lock, flags);
s->shutdown = 1;
spin_unlock_irqrestore(&s->lock, flags);
}
| gpl-2.0 |
ankur850/android_kernel_samsung_msm7x27 | sound/isa/gus/gus_instr.c | 13401 | 4974 | /*
* Routines for Gravis UltraSound soundcards - Synthesizer
* Copyright (c) by Jaroslav Kysela <perex@perex.cz>
*
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
#include <linux/time.h>
#include <sound/core.h>
#include <sound/gus.h>
/*
*
*/
int snd_gus_iwffff_put_sample(void *private_data, struct iwffff_wave *wave,
char __user *data, long len, int atomic)
{
struct snd_gus_card *gus = private_data;
struct snd_gf1_mem_block *block;
int err;
if (wave->format & IWFFFF_WAVE_ROM)
return 0; /* it's probably ok - verify the address? */
if (wave->format & IWFFFF_WAVE_STEREO)
return -EINVAL; /* not supported */
block = snd_gf1_mem_alloc(&gus->gf1.mem_alloc,
SNDRV_GF1_MEM_OWNER_WAVE_IWFFFF,
NULL, wave->size,
wave->format & IWFFFF_WAVE_16BIT, 1,
wave->share_id);
if (block == NULL)
return -ENOMEM;
err = snd_gus_dram_write(gus, data,
block->ptr, wave->size);
if (err < 0) {
snd_gf1_mem_lock(&gus->gf1.mem_alloc, 0);
snd_gf1_mem_xfree(&gus->gf1.mem_alloc, block);
snd_gf1_mem_lock(&gus->gf1.mem_alloc, 1);
return err;
}
wave->address.memory = block->ptr;
return 0;
}
int snd_gus_iwffff_get_sample(void *private_data, struct iwffff_wave *wave,
char __user *data, long len, int atomic)
{
struct snd_gus_card *gus = private_data;
return snd_gus_dram_read(gus, data, wave->address.memory, wave->size,
wave->format & IWFFFF_WAVE_ROM ? 1 : 0);
}
int snd_gus_iwffff_remove_sample(void *private_data, struct iwffff_wave *wave,
int atomic)
{
struct snd_gus_card *gus = private_data;
if (wave->format & IWFFFF_WAVE_ROM)
return 0; /* it's probably ok - verify the address? */
return snd_gf1_mem_free(&gus->gf1.mem_alloc, wave->address.memory);
}
/*
*
*/
int snd_gus_gf1_put_sample(void *private_data, struct gf1_wave *wave,
char __user *data, long len, int atomic)
{
struct snd_gus_card *gus = private_data;
struct snd_gf1_mem_block *block;
int err;
if (wave->format & GF1_WAVE_STEREO)
return -EINVAL; /* not supported */
block = snd_gf1_mem_alloc(&gus->gf1.mem_alloc,
SNDRV_GF1_MEM_OWNER_WAVE_GF1,
NULL, wave->size,
wave->format & GF1_WAVE_16BIT, 1,
wave->share_id);
if (block == NULL)
return -ENOMEM;
err = snd_gus_dram_write(gus, data,
block->ptr, wave->size);
if (err < 0) {
snd_gf1_mem_lock(&gus->gf1.mem_alloc, 0);
snd_gf1_mem_xfree(&gus->gf1.mem_alloc, block);
snd_gf1_mem_lock(&gus->gf1.mem_alloc, 1);
return err;
}
wave->address.memory = block->ptr;
return 0;
}
int snd_gus_gf1_get_sample(void *private_data, struct gf1_wave *wave,
char __user *data, long len, int atomic)
{
struct snd_gus_card *gus = private_data;
return snd_gus_dram_read(gus, data, wave->address.memory, wave->size, 0);
}
int snd_gus_gf1_remove_sample(void *private_data, struct gf1_wave *wave,
int atomic)
{
struct snd_gus_card *gus = private_data;
return snd_gf1_mem_free(&gus->gf1.mem_alloc, wave->address.memory);
}
/*
*
*/
int snd_gus_simple_put_sample(void *private_data, struct simple_instrument *instr,
char __user *data, long len, int atomic)
{
struct snd_gus_card *gus = private_data;
struct snd_gf1_mem_block *block;
int err;
if (instr->format & SIMPLE_WAVE_STEREO)
return -EINVAL; /* not supported */
block = snd_gf1_mem_alloc(&gus->gf1.mem_alloc,
SNDRV_GF1_MEM_OWNER_WAVE_SIMPLE,
NULL, instr->size,
instr->format & SIMPLE_WAVE_16BIT, 1,
instr->share_id);
if (block == NULL)
return -ENOMEM;
err = snd_gus_dram_write(gus, data, block->ptr, instr->size);
if (err < 0) {
snd_gf1_mem_lock(&gus->gf1.mem_alloc, 0);
snd_gf1_mem_xfree(&gus->gf1.mem_alloc, block);
snd_gf1_mem_lock(&gus->gf1.mem_alloc, 1);
return err;
}
instr->address.memory = block->ptr;
return 0;
}
int snd_gus_simple_get_sample(void *private_data, struct simple_instrument *instr,
char __user *data, long len, int atomic)
{
struct snd_gus_card *gus = private_data;
return snd_gus_dram_read(gus, data, instr->address.memory, instr->size, 0);
}
int snd_gus_simple_remove_sample(void *private_data, struct simple_instrument *instr,
int atomic)
{
struct snd_gus_card *gus = private_data;
return snd_gf1_mem_free(&gus->gf1.mem_alloc, instr->address.memory);
}
| gpl-2.0 |
q-li/linux-sunxi | drivers/net/wireless/rtxx7x/tools/bin2h.c | 90 | 4112 | /*
*************************************************************************
* Ralink Tech Inc.
* 5F., No.36, Taiyuan St., Jhubei City,
* Hsinchu County 302,
* Taiwan, R.O.C.
*
* (c) Copyright 2002-2010, Ralink Technology, Inc.
*
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
* This program is distributed in the hope that it will be useful, *
* but WITHOUT ANY WARRANTY; without even the implied warranty of *
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
* GNU General Public License for more details. *
* *
* You should have received a copy of the GNU General Public License *
* along with this program; if not, write to the *
* Free Software Foundation, Inc., *
* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
* *
*************************************************************************/
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
int main(int argc ,char *argv[])
{
FILE *infile, *outfile;
char infname[1024];
char outfname[1024];
char *rt28xxdir;
char *chipset;
int i=0;//,n=0;
unsigned char c;
memset(infname,0,1024);
memset(outfname,0,1024);
rt28xxdir = (char *)getenv("RT28xx_DIR");
chipset = (char *)getenv("CHIPSET");
if(!rt28xxdir)
{
printf("Environment value \"RT28xx_DIR\" not export \n");
return -1;
}
if(!chipset)
{
printf("Environment value \"CHIPSET\" not export \n");
return -1;
}
if (strlen(rt28xxdir) > (sizeof(infname)-100))
{
printf("Environment value \"RT28xx_DIR\" is too long!\n");
return -1;
}
strcat(infname,rt28xxdir);
if(strncmp(chipset, "2860",4)==0)
strcat(infname,"/common/rt2860.bin");
else if(strncmp(chipset, "2870",4)==0)
strcat(infname,"/common/rt2870.bin");
else if(strncmp(chipset, "3090",4)==0)
strcat(infname,"/common/rt2860.bin");
else if(strncmp(chipset, "2070",4)==0)
strcat(infname,"/common/rt2870.bin");
else if(strncmp(chipset, "3070",4)==0)
strcat(infname,"/common/rt2870.bin");
else if(strncmp(chipset, "3572",4)==0)
strcat(infname,"/common/rt2870.bin");
else if(strncmp(chipset, "3370",4)==0)
strcat(infname,"/common/rt2870.bin");
else if(strncmp(chipset, "5370",4)==0)
strcat(infname,"/common/rt2870.bin");
else if(strncmp(chipset, "USB",3)==0)
strcat(infname,"/common/rt2870.bin");
else if(strncmp(chipset, "PCI",3)==0)
strcat(infname,"/common/rt2860.bin");
else
strcat(infname,"/common/rt2860.bin");
strcat(outfname,rt28xxdir);
strcat(outfname,"/include/firmware.h");
infile = fopen(infname,"r");
if (infile == (FILE *) NULL)
{
printf("Can't read file %s \n",infname);
return -1;
}
outfile = fopen(outfname,"w");
if (outfile == (FILE *) NULL)
{
printf("Can't open write file %s \n",outfname);
return -1;
}
fputs("/* AUTO GEN PLEASE DO NOT MODIFY IT */ \n",outfile);
fputs("/* AUTO GEN PLEASE DO NOT MODIFY IT */ \n",outfile);
fputs("\n",outfile);
fputs("\n",outfile);
fputs("UCHAR FirmwareImage [] = { \n",outfile);
while(1)
{
char cc[3];
c = getc(infile);
if (feof(infile))
break;
memset(cc,0,2);
if (i>=16)
{
fputs("\n", outfile);
i = 0;
}
fputs("0x", outfile);
sprintf(cc,"%02x",c);
fputs(cc, outfile);
fputs(", ", outfile);
i++;
}
fputs("} ;\n", outfile);
fclose(infile);
fclose(outfile);
exit(0);
}
| gpl-2.0 |
boddob/linux | drivers/bcma/main.c | 90 | 16997 | /*
* Broadcom specific AMBA
* Bus subsystem
*
* Licensed under the GNU/GPL. See COPYING for details.
*/
#include "bcma_private.h"
#include <linux/module.h>
#include <linux/mmc/sdio_func.h>
#include <linux/platform_device.h>
#include <linux/pci.h>
#include <linux/bcma/bcma.h>
#include <linux/slab.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/of_platform.h>
MODULE_DESCRIPTION("Broadcom's specific AMBA driver");
MODULE_LICENSE("GPL");
/* contains the number the next bus should get. */
static unsigned int bcma_bus_next_num = 0;
/* bcma_buses_mutex locks the bcma_bus_next_num */
static DEFINE_MUTEX(bcma_buses_mutex);
static int bcma_bus_match(struct device *dev, struct device_driver *drv);
static int bcma_device_probe(struct device *dev);
static int bcma_device_remove(struct device *dev);
static int bcma_device_uevent(struct device *dev, struct kobj_uevent_env *env);
static ssize_t manuf_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct bcma_device *core = container_of(dev, struct bcma_device, dev);
return sprintf(buf, "0x%03X\n", core->id.manuf);
}
static DEVICE_ATTR_RO(manuf);
static ssize_t id_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct bcma_device *core = container_of(dev, struct bcma_device, dev);
return sprintf(buf, "0x%03X\n", core->id.id);
}
static DEVICE_ATTR_RO(id);
static ssize_t rev_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct bcma_device *core = container_of(dev, struct bcma_device, dev);
return sprintf(buf, "0x%02X\n", core->id.rev);
}
static DEVICE_ATTR_RO(rev);
static ssize_t class_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct bcma_device *core = container_of(dev, struct bcma_device, dev);
return sprintf(buf, "0x%X\n", core->id.class);
}
static DEVICE_ATTR_RO(class);
static struct attribute *bcma_device_attrs[] = {
&dev_attr_manuf.attr,
&dev_attr_id.attr,
&dev_attr_rev.attr,
&dev_attr_class.attr,
NULL,
};
ATTRIBUTE_GROUPS(bcma_device);
static struct bus_type bcma_bus_type = {
.name = "bcma",
.match = bcma_bus_match,
.probe = bcma_device_probe,
.remove = bcma_device_remove,
.uevent = bcma_device_uevent,
.dev_groups = bcma_device_groups,
};
static u16 bcma_cc_core_id(struct bcma_bus *bus)
{
if (bus->chipinfo.id == BCMA_CHIP_ID_BCM4706)
return BCMA_CORE_4706_CHIPCOMMON;
return BCMA_CORE_CHIPCOMMON;
}
struct bcma_device *bcma_find_core_unit(struct bcma_bus *bus, u16 coreid,
u8 unit)
{
struct bcma_device *core;
list_for_each_entry(core, &bus->cores, list) {
if (core->id.id == coreid && core->core_unit == unit)
return core;
}
return NULL;
}
EXPORT_SYMBOL_GPL(bcma_find_core_unit);
bool bcma_wait_value(struct bcma_device *core, u16 reg, u32 mask, u32 value,
int timeout)
{
unsigned long deadline = jiffies + timeout;
u32 val;
do {
val = bcma_read32(core, reg);
if ((val & mask) == value)
return true;
cpu_relax();
udelay(10);
} while (!time_after_eq(jiffies, deadline));
bcma_warn(core->bus, "Timeout waiting for register 0x%04X!\n", reg);
return false;
}
static void bcma_release_core_dev(struct device *dev)
{
struct bcma_device *core = container_of(dev, struct bcma_device, dev);
if (core->io_addr)
iounmap(core->io_addr);
if (core->io_wrap)
iounmap(core->io_wrap);
kfree(core);
}
static bool bcma_is_core_needed_early(u16 core_id)
{
switch (core_id) {
case BCMA_CORE_NS_NAND:
case BCMA_CORE_NS_QSPI:
return true;
}
return false;
}
static struct device_node *bcma_of_find_child_device(struct platform_device *parent,
struct bcma_device *core)
{
struct device_node *node;
u64 size;
const __be32 *reg;
if (!parent || !parent->dev.of_node)
return NULL;
for_each_child_of_node(parent->dev.of_node, node) {
reg = of_get_address(node, 0, &size, NULL);
if (!reg)
continue;
if (of_translate_address(node, reg) == core->addr)
return node;
}
return NULL;
}
static int bcma_of_irq_parse(struct platform_device *parent,
struct bcma_device *core,
struct of_phandle_args *out_irq, int num)
{
__be32 laddr[1];
int rc;
if (core->dev.of_node) {
rc = of_irq_parse_one(core->dev.of_node, num, out_irq);
if (!rc)
return rc;
}
out_irq->np = parent->dev.of_node;
out_irq->args_count = 1;
out_irq->args[0] = num;
laddr[0] = cpu_to_be32(core->addr);
return of_irq_parse_raw(laddr, out_irq);
}
static unsigned int bcma_of_get_irq(struct platform_device *parent,
struct bcma_device *core, int num)
{
struct of_phandle_args out_irq;
int ret;
if (!IS_ENABLED(CONFIG_OF_IRQ) || !parent || !parent->dev.of_node)
return 0;
ret = bcma_of_irq_parse(parent, core, &out_irq, num);
if (ret) {
bcma_debug(core->bus, "bcma_of_get_irq() failed with rc=%d\n",
ret);
return 0;
}
return irq_create_of_mapping(&out_irq);
}
static void bcma_of_fill_device(struct platform_device *parent,
struct bcma_device *core)
{
struct device_node *node;
if (!IS_ENABLED(CONFIG_OF_IRQ))
return;
node = bcma_of_find_child_device(parent, core);
if (node)
core->dev.of_node = node;
core->irq = bcma_of_get_irq(parent, core, 0);
of_dma_configure(&core->dev, node);
}
unsigned int bcma_core_irq(struct bcma_device *core, int num)
{
struct bcma_bus *bus = core->bus;
unsigned int mips_irq;
switch (bus->hosttype) {
case BCMA_HOSTTYPE_PCI:
return bus->host_pci->irq;
case BCMA_HOSTTYPE_SOC:
if (bus->drv_mips.core && num == 0) {
mips_irq = bcma_core_mips_irq(core);
return mips_irq <= 4 ? mips_irq + 2 : 0;
}
if (bus->host_pdev)
return bcma_of_get_irq(bus->host_pdev, core, num);
return 0;
case BCMA_HOSTTYPE_SDIO:
return 0;
}
return 0;
}
EXPORT_SYMBOL(bcma_core_irq);
void bcma_prepare_core(struct bcma_bus *bus, struct bcma_device *core)
{
core->dev.release = bcma_release_core_dev;
core->dev.bus = &bcma_bus_type;
dev_set_name(&core->dev, "bcma%d:%d", bus->num, core->core_index);
switch (bus->hosttype) {
case BCMA_HOSTTYPE_PCI:
core->dev.parent = &bus->host_pci->dev;
core->dma_dev = &bus->host_pci->dev;
core->irq = bus->host_pci->irq;
break;
case BCMA_HOSTTYPE_SOC:
if (IS_ENABLED(CONFIG_OF) && bus->host_pdev) {
core->dma_dev = &bus->host_pdev->dev;
core->dev.parent = &bus->host_pdev->dev;
bcma_of_fill_device(bus->host_pdev, core);
} else {
core->dev.dma_mask = &core->dev.coherent_dma_mask;
core->dma_dev = &core->dev;
}
break;
case BCMA_HOSTTYPE_SDIO:
break;
}
}
struct device *bcma_bus_get_host_dev(struct bcma_bus *bus)
{
switch (bus->hosttype) {
case BCMA_HOSTTYPE_PCI:
if (bus->host_pci)
return &bus->host_pci->dev;
else
return NULL;
case BCMA_HOSTTYPE_SOC:
if (bus->host_pdev)
return &bus->host_pdev->dev;
else
return NULL;
case BCMA_HOSTTYPE_SDIO:
if (bus->host_sdio)
return &bus->host_sdio->dev;
else
return NULL;
}
return NULL;
}
void bcma_init_bus(struct bcma_bus *bus)
{
mutex_lock(&bcma_buses_mutex);
bus->num = bcma_bus_next_num++;
mutex_unlock(&bcma_buses_mutex);
INIT_LIST_HEAD(&bus->cores);
bus->nr_cores = 0;
bcma_detect_chip(bus);
}
static void bcma_register_core(struct bcma_bus *bus, struct bcma_device *core)
{
int err;
err = device_register(&core->dev);
if (err) {
bcma_err(bus, "Could not register dev for core 0x%03X\n",
core->id.id);
put_device(&core->dev);
return;
}
core->dev_registered = true;
}
static int bcma_register_devices(struct bcma_bus *bus)
{
struct bcma_device *core;
int err;
list_for_each_entry(core, &bus->cores, list) {
/* We support that cores ourself */
switch (core->id.id) {
case BCMA_CORE_4706_CHIPCOMMON:
case BCMA_CORE_CHIPCOMMON:
case BCMA_CORE_NS_CHIPCOMMON_B:
case BCMA_CORE_PCI:
case BCMA_CORE_PCIE:
case BCMA_CORE_PCIE2:
case BCMA_CORE_MIPS_74K:
case BCMA_CORE_4706_MAC_GBIT_COMMON:
continue;
}
/* Early cores were already registered */
if (bcma_is_core_needed_early(core->id.id))
continue;
/* Only first GMAC core on BCM4706 is connected and working */
if (core->id.id == BCMA_CORE_4706_MAC_GBIT &&
core->core_unit > 0)
continue;
bcma_register_core(bus, core);
}
#ifdef CONFIG_BCMA_PFLASH
if (bus->drv_cc.pflash.present) {
err = platform_device_register(&bcma_pflash_dev);
if (err)
bcma_err(bus, "Error registering parallel flash\n");
}
#endif
#ifdef CONFIG_BCMA_SFLASH
if (bus->drv_cc.sflash.present) {
err = platform_device_register(&bcma_sflash_dev);
if (err)
bcma_err(bus, "Error registering serial flash\n");
}
#endif
#ifdef CONFIG_BCMA_NFLASH
if (bus->drv_cc.nflash.present) {
err = platform_device_register(&bcma_nflash_dev);
if (err)
bcma_err(bus, "Error registering NAND flash\n");
}
#endif
err = bcma_gpio_init(&bus->drv_cc);
if (err == -ENOTSUPP)
bcma_debug(bus, "GPIO driver not activated\n");
else if (err)
bcma_err(bus, "Error registering GPIO driver: %i\n", err);
if (bus->hosttype == BCMA_HOSTTYPE_SOC) {
err = bcma_chipco_watchdog_register(&bus->drv_cc);
if (err)
bcma_err(bus, "Error registering watchdog driver\n");
}
return 0;
}
void bcma_unregister_cores(struct bcma_bus *bus)
{
struct bcma_device *core, *tmp;
list_for_each_entry_safe(core, tmp, &bus->cores, list) {
if (!core->dev_registered)
continue;
list_del(&core->list);
device_unregister(&core->dev);
}
if (bus->hosttype == BCMA_HOSTTYPE_SOC)
platform_device_unregister(bus->drv_cc.watchdog);
/* Now noone uses internally-handled cores, we can free them */
list_for_each_entry_safe(core, tmp, &bus->cores, list) {
list_del(&core->list);
kfree(core);
}
}
int bcma_bus_register(struct bcma_bus *bus)
{
int err;
struct bcma_device *core;
struct device *dev;
/* Scan for devices (cores) */
err = bcma_bus_scan(bus);
if (err) {
bcma_err(bus, "Failed to scan: %d\n", err);
return err;
}
/* Early init CC core */
core = bcma_find_core(bus, bcma_cc_core_id(bus));
if (core) {
bus->drv_cc.core = core;
bcma_core_chipcommon_early_init(&bus->drv_cc);
}
/* Early init PCIE core */
core = bcma_find_core(bus, BCMA_CORE_PCIE);
if (core) {
bus->drv_pci[0].core = core;
bcma_core_pci_early_init(&bus->drv_pci[0]);
}
dev = bcma_bus_get_host_dev(bus);
if (dev) {
of_platform_default_populate(dev->of_node, NULL, dev);
}
/* Cores providing flash access go before SPROM init */
list_for_each_entry(core, &bus->cores, list) {
if (bcma_is_core_needed_early(core->id.id))
bcma_register_core(bus, core);
}
/* Try to get SPROM */
err = bcma_sprom_get(bus);
if (err == -ENOENT) {
bcma_err(bus, "No SPROM available\n");
} else if (err)
bcma_err(bus, "Failed to get SPROM: %d\n", err);
/* Init CC core */
core = bcma_find_core(bus, bcma_cc_core_id(bus));
if (core) {
bus->drv_cc.core = core;
bcma_core_chipcommon_init(&bus->drv_cc);
}
/* Init CC core */
core = bcma_find_core(bus, BCMA_CORE_NS_CHIPCOMMON_B);
if (core) {
bus->drv_cc_b.core = core;
bcma_core_chipcommon_b_init(&bus->drv_cc_b);
}
/* Init MIPS core */
core = bcma_find_core(bus, BCMA_CORE_MIPS_74K);
if (core) {
bus->drv_mips.core = core;
bcma_core_mips_init(&bus->drv_mips);
}
/* Init PCIE core */
core = bcma_find_core_unit(bus, BCMA_CORE_PCIE, 0);
if (core) {
bus->drv_pci[0].core = core;
bcma_core_pci_init(&bus->drv_pci[0]);
}
/* Init PCIE core */
core = bcma_find_core_unit(bus, BCMA_CORE_PCIE, 1);
if (core) {
bus->drv_pci[1].core = core;
bcma_core_pci_init(&bus->drv_pci[1]);
}
/* Init PCIe Gen 2 core */
core = bcma_find_core_unit(bus, BCMA_CORE_PCIE2, 0);
if (core) {
bus->drv_pcie2.core = core;
bcma_core_pcie2_init(&bus->drv_pcie2);
}
/* Init GBIT MAC COMMON core */
core = bcma_find_core(bus, BCMA_CORE_4706_MAC_GBIT_COMMON);
if (core) {
bus->drv_gmac_cmn.core = core;
bcma_core_gmac_cmn_init(&bus->drv_gmac_cmn);
}
/* Register found cores */
bcma_register_devices(bus);
bcma_info(bus, "Bus registered\n");
return 0;
}
void bcma_bus_unregister(struct bcma_bus *bus)
{
int err;
err = bcma_gpio_unregister(&bus->drv_cc);
if (err == -EBUSY)
bcma_err(bus, "Some GPIOs are still in use.\n");
else if (err)
bcma_err(bus, "Can not unregister GPIO driver: %i\n", err);
bcma_core_chipcommon_b_free(&bus->drv_cc_b);
bcma_unregister_cores(bus);
}
/*
* This is a special version of bus registration function designed for SoCs.
* It scans bus and performs basic initialization of main cores only.
* Please note it requires memory allocation, however it won't try to sleep.
*/
int __init bcma_bus_early_register(struct bcma_bus *bus)
{
int err;
struct bcma_device *core;
/* Scan for devices (cores) */
err = bcma_bus_scan(bus);
if (err) {
bcma_err(bus, "Failed to scan bus: %d\n", err);
return -1;
}
/* Early init CC core */
core = bcma_find_core(bus, bcma_cc_core_id(bus));
if (core) {
bus->drv_cc.core = core;
bcma_core_chipcommon_early_init(&bus->drv_cc);
}
/* Early init MIPS core */
core = bcma_find_core(bus, BCMA_CORE_MIPS_74K);
if (core) {
bus->drv_mips.core = core;
bcma_core_mips_early_init(&bus->drv_mips);
}
bcma_info(bus, "Early bus registered\n");
return 0;
}
#ifdef CONFIG_PM
int bcma_bus_suspend(struct bcma_bus *bus)
{
struct bcma_device *core;
list_for_each_entry(core, &bus->cores, list) {
struct device_driver *drv = core->dev.driver;
if (drv) {
struct bcma_driver *adrv = container_of(drv, struct bcma_driver, drv);
if (adrv->suspend)
adrv->suspend(core);
}
}
return 0;
}
int bcma_bus_resume(struct bcma_bus *bus)
{
struct bcma_device *core;
/* Init CC core */
if (bus->drv_cc.core) {
bus->drv_cc.setup_done = false;
bcma_core_chipcommon_init(&bus->drv_cc);
}
list_for_each_entry(core, &bus->cores, list) {
struct device_driver *drv = core->dev.driver;
if (drv) {
struct bcma_driver *adrv = container_of(drv, struct bcma_driver, drv);
if (adrv->resume)
adrv->resume(core);
}
}
return 0;
}
#endif
int __bcma_driver_register(struct bcma_driver *drv, struct module *owner)
{
drv->drv.name = drv->name;
drv->drv.bus = &bcma_bus_type;
drv->drv.owner = owner;
return driver_register(&drv->drv);
}
EXPORT_SYMBOL_GPL(__bcma_driver_register);
void bcma_driver_unregister(struct bcma_driver *drv)
{
driver_unregister(&drv->drv);
}
EXPORT_SYMBOL_GPL(bcma_driver_unregister);
static int bcma_bus_match(struct device *dev, struct device_driver *drv)
{
struct bcma_device *core = container_of(dev, struct bcma_device, dev);
struct bcma_driver *adrv = container_of(drv, struct bcma_driver, drv);
const struct bcma_device_id *cid = &core->id;
const struct bcma_device_id *did;
for (did = adrv->id_table; did->manuf || did->id || did->rev; did++) {
if ((did->manuf == cid->manuf || did->manuf == BCMA_ANY_MANUF) &&
(did->id == cid->id || did->id == BCMA_ANY_ID) &&
(did->rev == cid->rev || did->rev == BCMA_ANY_REV) &&
(did->class == cid->class || did->class == BCMA_ANY_CLASS))
return 1;
}
return 0;
}
static int bcma_device_probe(struct device *dev)
{
struct bcma_device *core = container_of(dev, struct bcma_device, dev);
struct bcma_driver *adrv = container_of(dev->driver, struct bcma_driver,
drv);
int err = 0;
if (adrv->probe)
err = adrv->probe(core);
return err;
}
static int bcma_device_remove(struct device *dev)
{
struct bcma_device *core = container_of(dev, struct bcma_device, dev);
struct bcma_driver *adrv = container_of(dev->driver, struct bcma_driver,
drv);
if (adrv->remove)
adrv->remove(core);
return 0;
}
static int bcma_device_uevent(struct device *dev, struct kobj_uevent_env *env)
{
struct bcma_device *core = container_of(dev, struct bcma_device, dev);
return add_uevent_var(env,
"MODALIAS=bcma:m%04Xid%04Xrev%02Xcl%02X",
core->id.manuf, core->id.id,
core->id.rev, core->id.class);
}
static unsigned int bcma_bus_registered;
/*
* If built-in, bus has to be registered early, before any driver calls
* bcma_driver_register.
* Otherwise registering driver would trigger BUG in driver_register.
*/
static int __init bcma_init_bus_register(void)
{
int err;
if (bcma_bus_registered)
return 0;
err = bus_register(&bcma_bus_type);
if (!err)
bcma_bus_registered = 1;
return err;
}
#ifndef MODULE
fs_initcall(bcma_init_bus_register);
#endif
/* Main initialization has to be done with SPI/mtd/NAND/SPROM available */
static int __init bcma_modinit(void)
{
int err;
err = bcma_init_bus_register();
if (err)
return err;
err = bcma_host_soc_register_driver();
if (err) {
pr_err("SoC host initialization failed\n");
err = 0;
}
#ifdef CONFIG_BCMA_HOST_PCI
err = bcma_host_pci_init();
if (err) {
pr_err("PCI host initialization failed\n");
err = 0;
}
#endif
return err;
}
module_init(bcma_modinit);
static void __exit bcma_modexit(void)
{
#ifdef CONFIG_BCMA_HOST_PCI
bcma_host_pci_exit();
#endif
bcma_host_soc_unregister_driver();
bus_unregister(&bcma_bus_type);
}
module_exit(bcma_modexit)
| gpl-2.0 |
skristiansson/eco32-linux | arch/arm64/kernel/perf_event.c | 90 | 35038 | /*
* PMU support
*
* Copyright (C) 2012 ARM Limited
* Author: Will Deacon <will.deacon@arm.com>
*
* This code is based heavily on the ARMv7 perf event code.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#define pr_fmt(fmt) "hw perfevents: " fmt
#include <linux/bitmap.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/export.h>
#include <linux/perf_event.h>
#include <linux/platform_device.h>
#include <linux/spinlock.h>
#include <linux/uaccess.h>
#include <asm/cputype.h>
#include <asm/irq.h>
#include <asm/irq_regs.h>
#include <asm/pmu.h>
#include <asm/stacktrace.h>
/*
* ARMv8 supports a maximum of 32 events.
* The cycle counter is included in this total.
*/
#define ARMPMU_MAX_HWEVENTS 32
static DEFINE_PER_CPU(struct perf_event * [ARMPMU_MAX_HWEVENTS], hw_events);
static DEFINE_PER_CPU(unsigned long [BITS_TO_LONGS(ARMPMU_MAX_HWEVENTS)], used_mask);
static DEFINE_PER_CPU(struct pmu_hw_events, cpu_hw_events);
#define to_arm_pmu(p) (container_of(p, struct arm_pmu, pmu))
/* Set at runtime when we know what CPU type we are. */
static struct arm_pmu *cpu_pmu;
int
armpmu_get_max_events(void)
{
int max_events = 0;
if (cpu_pmu != NULL)
max_events = cpu_pmu->num_events;
return max_events;
}
EXPORT_SYMBOL_GPL(armpmu_get_max_events);
int perf_num_counters(void)
{
return armpmu_get_max_events();
}
EXPORT_SYMBOL_GPL(perf_num_counters);
#define HW_OP_UNSUPPORTED 0xFFFF
#define C(_x) \
PERF_COUNT_HW_CACHE_##_x
#define CACHE_OP_UNSUPPORTED 0xFFFF
static int
armpmu_map_cache_event(const unsigned (*cache_map)
[PERF_COUNT_HW_CACHE_MAX]
[PERF_COUNT_HW_CACHE_OP_MAX]
[PERF_COUNT_HW_CACHE_RESULT_MAX],
u64 config)
{
unsigned int cache_type, cache_op, cache_result, ret;
cache_type = (config >> 0) & 0xff;
if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
return -EINVAL;
cache_op = (config >> 8) & 0xff;
if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
return -EINVAL;
cache_result = (config >> 16) & 0xff;
if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
return -EINVAL;
ret = (int)(*cache_map)[cache_type][cache_op][cache_result];
if (ret == CACHE_OP_UNSUPPORTED)
return -ENOENT;
return ret;
}
static int
armpmu_map_event(const unsigned (*event_map)[PERF_COUNT_HW_MAX], u64 config)
{
int mapping;
if (config >= PERF_COUNT_HW_MAX)
return -EINVAL;
mapping = (*event_map)[config];
return mapping == HW_OP_UNSUPPORTED ? -ENOENT : mapping;
}
static int
armpmu_map_raw_event(u32 raw_event_mask, u64 config)
{
return (int)(config & raw_event_mask);
}
static int map_cpu_event(struct perf_event *event,
const unsigned (*event_map)[PERF_COUNT_HW_MAX],
const unsigned (*cache_map)
[PERF_COUNT_HW_CACHE_MAX]
[PERF_COUNT_HW_CACHE_OP_MAX]
[PERF_COUNT_HW_CACHE_RESULT_MAX],
u32 raw_event_mask)
{
u64 config = event->attr.config;
switch (event->attr.type) {
case PERF_TYPE_HARDWARE:
return armpmu_map_event(event_map, config);
case PERF_TYPE_HW_CACHE:
return armpmu_map_cache_event(cache_map, config);
case PERF_TYPE_RAW:
return armpmu_map_raw_event(raw_event_mask, config);
}
return -ENOENT;
}
int
armpmu_event_set_period(struct perf_event *event,
struct hw_perf_event *hwc,
int idx)
{
struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
s64 left = local64_read(&hwc->period_left);
s64 period = hwc->sample_period;
int ret = 0;
if (unlikely(left <= -period)) {
left = period;
local64_set(&hwc->period_left, left);
hwc->last_period = period;
ret = 1;
}
if (unlikely(left <= 0)) {
left += period;
local64_set(&hwc->period_left, left);
hwc->last_period = period;
ret = 1;
}
if (left > (s64)armpmu->max_period)
left = armpmu->max_period;
local64_set(&hwc->prev_count, (u64)-left);
armpmu->write_counter(idx, (u64)(-left) & 0xffffffff);
perf_event_update_userpage(event);
return ret;
}
u64
armpmu_event_update(struct perf_event *event,
struct hw_perf_event *hwc,
int idx)
{
struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
u64 delta, prev_raw_count, new_raw_count;
again:
prev_raw_count = local64_read(&hwc->prev_count);
new_raw_count = armpmu->read_counter(idx);
if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
new_raw_count) != prev_raw_count)
goto again;
delta = (new_raw_count - prev_raw_count) & armpmu->max_period;
local64_add(delta, &event->count);
local64_sub(delta, &hwc->period_left);
return new_raw_count;
}
static void
armpmu_read(struct perf_event *event)
{
struct hw_perf_event *hwc = &event->hw;
/* Don't read disabled counters! */
if (hwc->idx < 0)
return;
armpmu_event_update(event, hwc, hwc->idx);
}
static void
armpmu_stop(struct perf_event *event, int flags)
{
struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
struct hw_perf_event *hwc = &event->hw;
/*
* ARM pmu always has to update the counter, so ignore
* PERF_EF_UPDATE, see comments in armpmu_start().
*/
if (!(hwc->state & PERF_HES_STOPPED)) {
armpmu->disable(hwc, hwc->idx);
barrier(); /* why? */
armpmu_event_update(event, hwc, hwc->idx);
hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
}
}
static void
armpmu_start(struct perf_event *event, int flags)
{
struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
struct hw_perf_event *hwc = &event->hw;
/*
* ARM pmu always has to reprogram the period, so ignore
* PERF_EF_RELOAD, see the comment below.
*/
if (flags & PERF_EF_RELOAD)
WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
hwc->state = 0;
/*
* Set the period again. Some counters can't be stopped, so when we
* were stopped we simply disabled the IRQ source and the counter
* may have been left counting. If we don't do this step then we may
* get an interrupt too soon or *way* too late if the overflow has
* happened since disabling.
*/
armpmu_event_set_period(event, hwc, hwc->idx);
armpmu->enable(hwc, hwc->idx);
}
static void
armpmu_del(struct perf_event *event, int flags)
{
struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
struct pmu_hw_events *hw_events = armpmu->get_hw_events();
struct hw_perf_event *hwc = &event->hw;
int idx = hwc->idx;
WARN_ON(idx < 0);
armpmu_stop(event, PERF_EF_UPDATE);
hw_events->events[idx] = NULL;
clear_bit(idx, hw_events->used_mask);
perf_event_update_userpage(event);
}
static int
armpmu_add(struct perf_event *event, int flags)
{
struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
struct pmu_hw_events *hw_events = armpmu->get_hw_events();
struct hw_perf_event *hwc = &event->hw;
int idx;
int err = 0;
perf_pmu_disable(event->pmu);
/* If we don't have a space for the counter then finish early. */
idx = armpmu->get_event_idx(hw_events, hwc);
if (idx < 0) {
err = idx;
goto out;
}
/*
* If there is an event in the counter we are going to use then make
* sure it is disabled.
*/
event->hw.idx = idx;
armpmu->disable(hwc, idx);
hw_events->events[idx] = event;
hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
if (flags & PERF_EF_START)
armpmu_start(event, PERF_EF_RELOAD);
/* Propagate our changes to the userspace mapping. */
perf_event_update_userpage(event);
out:
perf_pmu_enable(event->pmu);
return err;
}
static int
validate_event(struct pmu_hw_events *hw_events,
struct perf_event *event)
{
struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
struct hw_perf_event fake_event = event->hw;
struct pmu *leader_pmu = event->group_leader->pmu;
if (is_software_event(event))
return 1;
if (event->pmu != leader_pmu || event->state < PERF_EVENT_STATE_OFF)
return 1;
if (event->state == PERF_EVENT_STATE_OFF && !event->attr.enable_on_exec)
return 1;
return armpmu->get_event_idx(hw_events, &fake_event) >= 0;
}
static int
validate_group(struct perf_event *event)
{
struct perf_event *sibling, *leader = event->group_leader;
struct pmu_hw_events fake_pmu;
DECLARE_BITMAP(fake_used_mask, ARMPMU_MAX_HWEVENTS);
/*
* Initialise the fake PMU. We only need to populate the
* used_mask for the purposes of validation.
*/
memset(fake_used_mask, 0, sizeof(fake_used_mask));
fake_pmu.used_mask = fake_used_mask;
if (!validate_event(&fake_pmu, leader))
return -EINVAL;
list_for_each_entry(sibling, &leader->sibling_list, group_entry) {
if (!validate_event(&fake_pmu, sibling))
return -EINVAL;
}
if (!validate_event(&fake_pmu, event))
return -EINVAL;
return 0;
}
static void
armpmu_release_hardware(struct arm_pmu *armpmu)
{
int i, irq, irqs;
struct platform_device *pmu_device = armpmu->plat_device;
irqs = min(pmu_device->num_resources, num_possible_cpus());
for (i = 0; i < irqs; ++i) {
if (!cpumask_test_and_clear_cpu(i, &armpmu->active_irqs))
continue;
irq = platform_get_irq(pmu_device, i);
if (irq >= 0)
free_irq(irq, armpmu);
}
}
static int
armpmu_reserve_hardware(struct arm_pmu *armpmu)
{
int i, err, irq, irqs;
struct platform_device *pmu_device = armpmu->plat_device;
if (!pmu_device) {
pr_err("no PMU device registered\n");
return -ENODEV;
}
irqs = min(pmu_device->num_resources, num_possible_cpus());
if (irqs < 1) {
pr_err("no irqs for PMUs defined\n");
return -ENODEV;
}
for (i = 0; i < irqs; ++i) {
err = 0;
irq = platform_get_irq(pmu_device, i);
if (irq < 0)
continue;
/*
* If we have a single PMU interrupt that we can't shift,
* assume that we're running on a uniprocessor machine and
* continue. Otherwise, continue without this interrupt.
*/
if (irq_set_affinity(irq, cpumask_of(i)) && irqs > 1) {
pr_warning("unable to set irq affinity (irq=%d, cpu=%u)\n",
irq, i);
continue;
}
err = request_irq(irq, armpmu->handle_irq,
IRQF_NOBALANCING,
"arm-pmu", armpmu);
if (err) {
pr_err("unable to request IRQ%d for ARM PMU counters\n",
irq);
armpmu_release_hardware(armpmu);
return err;
}
cpumask_set_cpu(i, &armpmu->active_irqs);
}
return 0;
}
static void
hw_perf_event_destroy(struct perf_event *event)
{
struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
atomic_t *active_events = &armpmu->active_events;
struct mutex *pmu_reserve_mutex = &armpmu->reserve_mutex;
if (atomic_dec_and_mutex_lock(active_events, pmu_reserve_mutex)) {
armpmu_release_hardware(armpmu);
mutex_unlock(pmu_reserve_mutex);
}
}
static int
event_requires_mode_exclusion(struct perf_event_attr *attr)
{
return attr->exclude_idle || attr->exclude_user ||
attr->exclude_kernel || attr->exclude_hv;
}
static int
__hw_perf_event_init(struct perf_event *event)
{
struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
struct hw_perf_event *hwc = &event->hw;
int mapping, err;
mapping = armpmu->map_event(event);
if (mapping < 0) {
pr_debug("event %x:%llx not supported\n", event->attr.type,
event->attr.config);
return mapping;
}
/*
* We don't assign an index until we actually place the event onto
* hardware. Use -1 to signify that we haven't decided where to put it
* yet. For SMP systems, each core has it's own PMU so we can't do any
* clever allocation or constraints checking at this point.
*/
hwc->idx = -1;
hwc->config_base = 0;
hwc->config = 0;
hwc->event_base = 0;
/*
* Check whether we need to exclude the counter from certain modes.
*/
if ((!armpmu->set_event_filter ||
armpmu->set_event_filter(hwc, &event->attr)) &&
event_requires_mode_exclusion(&event->attr)) {
pr_debug("ARM performance counters do not support mode exclusion\n");
return -EPERM;
}
/*
* Store the event encoding into the config_base field.
*/
hwc->config_base |= (unsigned long)mapping;
if (!hwc->sample_period) {
/*
* For non-sampling runs, limit the sample_period to half
* of the counter width. That way, the new counter value
* is far less likely to overtake the previous one unless
* you have some serious IRQ latency issues.
*/
hwc->sample_period = armpmu->max_period >> 1;
hwc->last_period = hwc->sample_period;
local64_set(&hwc->period_left, hwc->sample_period);
}
err = 0;
if (event->group_leader != event) {
err = validate_group(event);
if (err)
return -EINVAL;
}
return err;
}
static int armpmu_event_init(struct perf_event *event)
{
struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
int err = 0;
atomic_t *active_events = &armpmu->active_events;
if (armpmu->map_event(event) == -ENOENT)
return -ENOENT;
event->destroy = hw_perf_event_destroy;
if (!atomic_inc_not_zero(active_events)) {
mutex_lock(&armpmu->reserve_mutex);
if (atomic_read(active_events) == 0)
err = armpmu_reserve_hardware(armpmu);
if (!err)
atomic_inc(active_events);
mutex_unlock(&armpmu->reserve_mutex);
}
if (err)
return err;
err = __hw_perf_event_init(event);
if (err)
hw_perf_event_destroy(event);
return err;
}
static void armpmu_enable(struct pmu *pmu)
{
struct arm_pmu *armpmu = to_arm_pmu(pmu);
struct pmu_hw_events *hw_events = armpmu->get_hw_events();
int enabled = bitmap_weight(hw_events->used_mask, armpmu->num_events);
if (enabled)
armpmu->start();
}
static void armpmu_disable(struct pmu *pmu)
{
struct arm_pmu *armpmu = to_arm_pmu(pmu);
armpmu->stop();
}
static void __init armpmu_init(struct arm_pmu *armpmu)
{
atomic_set(&armpmu->active_events, 0);
mutex_init(&armpmu->reserve_mutex);
armpmu->pmu = (struct pmu) {
.pmu_enable = armpmu_enable,
.pmu_disable = armpmu_disable,
.event_init = armpmu_event_init,
.add = armpmu_add,
.del = armpmu_del,
.start = armpmu_start,
.stop = armpmu_stop,
.read = armpmu_read,
};
}
int __init armpmu_register(struct arm_pmu *armpmu, char *name, int type)
{
armpmu_init(armpmu);
return perf_pmu_register(&armpmu->pmu, name, type);
}
/*
* ARMv8 PMUv3 Performance Events handling code.
* Common event types.
*/
enum armv8_pmuv3_perf_types {
/* Required events. */
ARMV8_PMUV3_PERFCTR_PMNC_SW_INCR = 0x00,
ARMV8_PMUV3_PERFCTR_L1_DCACHE_REFILL = 0x03,
ARMV8_PMUV3_PERFCTR_L1_DCACHE_ACCESS = 0x04,
ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED = 0x10,
ARMV8_PMUV3_PERFCTR_CLOCK_CYCLES = 0x11,
ARMV8_PMUV3_PERFCTR_PC_BRANCH_PRED = 0x12,
/* At least one of the following is required. */
ARMV8_PMUV3_PERFCTR_INSTR_EXECUTED = 0x08,
ARMV8_PMUV3_PERFCTR_OP_SPEC = 0x1B,
/* Common architectural events. */
ARMV8_PMUV3_PERFCTR_MEM_READ = 0x06,
ARMV8_PMUV3_PERFCTR_MEM_WRITE = 0x07,
ARMV8_PMUV3_PERFCTR_EXC_TAKEN = 0x09,
ARMV8_PMUV3_PERFCTR_EXC_EXECUTED = 0x0A,
ARMV8_PMUV3_PERFCTR_CID_WRITE = 0x0B,
ARMV8_PMUV3_PERFCTR_PC_WRITE = 0x0C,
ARMV8_PMUV3_PERFCTR_PC_IMM_BRANCH = 0x0D,
ARMV8_PMUV3_PERFCTR_PC_PROC_RETURN = 0x0E,
ARMV8_PMUV3_PERFCTR_MEM_UNALIGNED_ACCESS = 0x0F,
ARMV8_PMUV3_PERFCTR_TTBR_WRITE = 0x1C,
/* Common microarchitectural events. */
ARMV8_PMUV3_PERFCTR_L1_ICACHE_REFILL = 0x01,
ARMV8_PMUV3_PERFCTR_ITLB_REFILL = 0x02,
ARMV8_PMUV3_PERFCTR_DTLB_REFILL = 0x05,
ARMV8_PMUV3_PERFCTR_MEM_ACCESS = 0x13,
ARMV8_PMUV3_PERFCTR_L1_ICACHE_ACCESS = 0x14,
ARMV8_PMUV3_PERFCTR_L1_DCACHE_WB = 0x15,
ARMV8_PMUV3_PERFCTR_L2_CACHE_ACCESS = 0x16,
ARMV8_PMUV3_PERFCTR_L2_CACHE_REFILL = 0x17,
ARMV8_PMUV3_PERFCTR_L2_CACHE_WB = 0x18,
ARMV8_PMUV3_PERFCTR_BUS_ACCESS = 0x19,
ARMV8_PMUV3_PERFCTR_MEM_ERROR = 0x1A,
ARMV8_PMUV3_PERFCTR_BUS_CYCLES = 0x1D,
};
/* PMUv3 HW events mapping. */
static const unsigned armv8_pmuv3_perf_map[PERF_COUNT_HW_MAX] = {
[PERF_COUNT_HW_CPU_CYCLES] = ARMV8_PMUV3_PERFCTR_CLOCK_CYCLES,
[PERF_COUNT_HW_INSTRUCTIONS] = ARMV8_PMUV3_PERFCTR_INSTR_EXECUTED,
[PERF_COUNT_HW_CACHE_REFERENCES] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_ACCESS,
[PERF_COUNT_HW_CACHE_MISSES] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_REFILL,
[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = HW_OP_UNSUPPORTED,
[PERF_COUNT_HW_BRANCH_MISSES] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED,
[PERF_COUNT_HW_BUS_CYCLES] = HW_OP_UNSUPPORTED,
[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = HW_OP_UNSUPPORTED,
[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = HW_OP_UNSUPPORTED,
};
static const unsigned armv8_pmuv3_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
[PERF_COUNT_HW_CACHE_OP_MAX]
[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
[C(L1D)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_ACCESS,
[C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_REFILL,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_ACCESS,
[C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_REFILL,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
},
[C(L1I)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
},
[C(LL)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
},
[C(DTLB)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
},
[C(ITLB)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
},
[C(BPU)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_PRED,
[C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_PRED,
[C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
},
[C(NODE)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
},
};
/*
* Perf Events' indices
*/
#define ARMV8_IDX_CYCLE_COUNTER 0
#define ARMV8_IDX_COUNTER0 1
#define ARMV8_IDX_COUNTER_LAST (ARMV8_IDX_CYCLE_COUNTER + cpu_pmu->num_events - 1)
#define ARMV8_MAX_COUNTERS 32
#define ARMV8_COUNTER_MASK (ARMV8_MAX_COUNTERS - 1)
/*
* ARMv8 low level PMU access
*/
/*
* Perf Event to low level counters mapping
*/
#define ARMV8_IDX_TO_COUNTER(x) \
(((x) - ARMV8_IDX_COUNTER0) & ARMV8_COUNTER_MASK)
/*
* Per-CPU PMCR: config reg
*/
#define ARMV8_PMCR_E (1 << 0) /* Enable all counters */
#define ARMV8_PMCR_P (1 << 1) /* Reset all counters */
#define ARMV8_PMCR_C (1 << 2) /* Cycle counter reset */
#define ARMV8_PMCR_D (1 << 3) /* CCNT counts every 64th cpu cycle */
#define ARMV8_PMCR_X (1 << 4) /* Export to ETM */
#define ARMV8_PMCR_DP (1 << 5) /* Disable CCNT if non-invasive debug*/
#define ARMV8_PMCR_N_SHIFT 11 /* Number of counters supported */
#define ARMV8_PMCR_N_MASK 0x1f
#define ARMV8_PMCR_MASK 0x3f /* Mask for writable bits */
/*
* PMOVSR: counters overflow flag status reg
*/
#define ARMV8_OVSR_MASK 0xffffffff /* Mask for writable bits */
#define ARMV8_OVERFLOWED_MASK ARMV8_OVSR_MASK
/*
* PMXEVTYPER: Event selection reg
*/
#define ARMV8_EVTYPE_MASK 0xc80003ff /* Mask for writable bits */
#define ARMV8_EVTYPE_EVENT 0x3ff /* Mask for EVENT bits */
/*
* Event filters for PMUv3
*/
#define ARMV8_EXCLUDE_EL1 (1 << 31)
#define ARMV8_EXCLUDE_EL0 (1 << 30)
#define ARMV8_INCLUDE_EL2 (1 << 27)
static inline u32 armv8pmu_pmcr_read(void)
{
u32 val;
asm volatile("mrs %0, pmcr_el0" : "=r" (val));
return val;
}
static inline void armv8pmu_pmcr_write(u32 val)
{
val &= ARMV8_PMCR_MASK;
isb();
asm volatile("msr pmcr_el0, %0" :: "r" (val));
}
static inline int armv8pmu_has_overflowed(u32 pmovsr)
{
return pmovsr & ARMV8_OVERFLOWED_MASK;
}
static inline int armv8pmu_counter_valid(int idx)
{
return idx >= ARMV8_IDX_CYCLE_COUNTER && idx <= ARMV8_IDX_COUNTER_LAST;
}
static inline int armv8pmu_counter_has_overflowed(u32 pmnc, int idx)
{
int ret = 0;
u32 counter;
if (!armv8pmu_counter_valid(idx)) {
pr_err("CPU%u checking wrong counter %d overflow status\n",
smp_processor_id(), idx);
} else {
counter = ARMV8_IDX_TO_COUNTER(idx);
ret = pmnc & BIT(counter);
}
return ret;
}
static inline int armv8pmu_select_counter(int idx)
{
u32 counter;
if (!armv8pmu_counter_valid(idx)) {
pr_err("CPU%u selecting wrong PMNC counter %d\n",
smp_processor_id(), idx);
return -EINVAL;
}
counter = ARMV8_IDX_TO_COUNTER(idx);
asm volatile("msr pmselr_el0, %0" :: "r" (counter));
isb();
return idx;
}
static inline u32 armv8pmu_read_counter(int idx)
{
u32 value = 0;
if (!armv8pmu_counter_valid(idx))
pr_err("CPU%u reading wrong counter %d\n",
smp_processor_id(), idx);
else if (idx == ARMV8_IDX_CYCLE_COUNTER)
asm volatile("mrs %0, pmccntr_el0" : "=r" (value));
else if (armv8pmu_select_counter(idx) == idx)
asm volatile("mrs %0, pmxevcntr_el0" : "=r" (value));
return value;
}
static inline void armv8pmu_write_counter(int idx, u32 value)
{
if (!armv8pmu_counter_valid(idx))
pr_err("CPU%u writing wrong counter %d\n",
smp_processor_id(), idx);
else if (idx == ARMV8_IDX_CYCLE_COUNTER)
asm volatile("msr pmccntr_el0, %0" :: "r" (value));
else if (armv8pmu_select_counter(idx) == idx)
asm volatile("msr pmxevcntr_el0, %0" :: "r" (value));
}
static inline void armv8pmu_write_evtype(int idx, u32 val)
{
if (armv8pmu_select_counter(idx) == idx) {
val &= ARMV8_EVTYPE_MASK;
asm volatile("msr pmxevtyper_el0, %0" :: "r" (val));
}
}
static inline int armv8pmu_enable_counter(int idx)
{
u32 counter;
if (!armv8pmu_counter_valid(idx)) {
pr_err("CPU%u enabling wrong PMNC counter %d\n",
smp_processor_id(), idx);
return -EINVAL;
}
counter = ARMV8_IDX_TO_COUNTER(idx);
asm volatile("msr pmcntenset_el0, %0" :: "r" (BIT(counter)));
return idx;
}
static inline int armv8pmu_disable_counter(int idx)
{
u32 counter;
if (!armv8pmu_counter_valid(idx)) {
pr_err("CPU%u disabling wrong PMNC counter %d\n",
smp_processor_id(), idx);
return -EINVAL;
}
counter = ARMV8_IDX_TO_COUNTER(idx);
asm volatile("msr pmcntenclr_el0, %0" :: "r" (BIT(counter)));
return idx;
}
static inline int armv8pmu_enable_intens(int idx)
{
u32 counter;
if (!armv8pmu_counter_valid(idx)) {
pr_err("CPU%u enabling wrong PMNC counter IRQ enable %d\n",
smp_processor_id(), idx);
return -EINVAL;
}
counter = ARMV8_IDX_TO_COUNTER(idx);
asm volatile("msr pmintenset_el1, %0" :: "r" (BIT(counter)));
return idx;
}
static inline int armv8pmu_disable_intens(int idx)
{
u32 counter;
if (!armv8pmu_counter_valid(idx)) {
pr_err("CPU%u disabling wrong PMNC counter IRQ enable %d\n",
smp_processor_id(), idx);
return -EINVAL;
}
counter = ARMV8_IDX_TO_COUNTER(idx);
asm volatile("msr pmintenclr_el1, %0" :: "r" (BIT(counter)));
isb();
/* Clear the overflow flag in case an interrupt is pending. */
asm volatile("msr pmovsclr_el0, %0" :: "r" (BIT(counter)));
isb();
return idx;
}
static inline u32 armv8pmu_getreset_flags(void)
{
u32 value;
/* Read */
asm volatile("mrs %0, pmovsclr_el0" : "=r" (value));
/* Write to clear flags */
value &= ARMV8_OVSR_MASK;
asm volatile("msr pmovsclr_el0, %0" :: "r" (value));
return value;
}
static void armv8pmu_enable_event(struct hw_perf_event *hwc, int idx)
{
unsigned long flags;
struct pmu_hw_events *events = cpu_pmu->get_hw_events();
/*
* Enable counter and interrupt, and set the counter to count
* the event that we're interested in.
*/
raw_spin_lock_irqsave(&events->pmu_lock, flags);
/*
* Disable counter
*/
armv8pmu_disable_counter(idx);
/*
* Set event (if destined for PMNx counters).
*/
armv8pmu_write_evtype(idx, hwc->config_base);
/*
* Enable interrupt for this counter
*/
armv8pmu_enable_intens(idx);
/*
* Enable counter
*/
armv8pmu_enable_counter(idx);
raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
}
static void armv8pmu_disable_event(struct hw_perf_event *hwc, int idx)
{
unsigned long flags;
struct pmu_hw_events *events = cpu_pmu->get_hw_events();
/*
* Disable counter and interrupt
*/
raw_spin_lock_irqsave(&events->pmu_lock, flags);
/*
* Disable counter
*/
armv8pmu_disable_counter(idx);
/*
* Disable interrupt for this counter
*/
armv8pmu_disable_intens(idx);
raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
}
static irqreturn_t armv8pmu_handle_irq(int irq_num, void *dev)
{
u32 pmovsr;
struct perf_sample_data data;
struct pmu_hw_events *cpuc;
struct pt_regs *regs;
int idx;
/*
* Get and reset the IRQ flags
*/
pmovsr = armv8pmu_getreset_flags();
/*
* Did an overflow occur?
*/
if (!armv8pmu_has_overflowed(pmovsr))
return IRQ_NONE;
/*
* Handle the counter(s) overflow(s)
*/
regs = get_irq_regs();
cpuc = this_cpu_ptr(&cpu_hw_events);
for (idx = 0; idx < cpu_pmu->num_events; ++idx) {
struct perf_event *event = cpuc->events[idx];
struct hw_perf_event *hwc;
/* Ignore if we don't have an event. */
if (!event)
continue;
/*
* We have a single interrupt for all counters. Check that
* each counter has overflowed before we process it.
*/
if (!armv8pmu_counter_has_overflowed(pmovsr, idx))
continue;
hwc = &event->hw;
armpmu_event_update(event, hwc, idx);
perf_sample_data_init(&data, 0, hwc->last_period);
if (!armpmu_event_set_period(event, hwc, idx))
continue;
if (perf_event_overflow(event, &data, regs))
cpu_pmu->disable(hwc, idx);
}
/*
* Handle the pending perf events.
*
* Note: this call *must* be run with interrupts disabled. For
* platforms that can have the PMU interrupts raised as an NMI, this
* will not work.
*/
irq_work_run();
return IRQ_HANDLED;
}
static void armv8pmu_start(void)
{
unsigned long flags;
struct pmu_hw_events *events = cpu_pmu->get_hw_events();
raw_spin_lock_irqsave(&events->pmu_lock, flags);
/* Enable all counters */
armv8pmu_pmcr_write(armv8pmu_pmcr_read() | ARMV8_PMCR_E);
raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
}
static void armv8pmu_stop(void)
{
unsigned long flags;
struct pmu_hw_events *events = cpu_pmu->get_hw_events();
raw_spin_lock_irqsave(&events->pmu_lock, flags);
/* Disable all counters */
armv8pmu_pmcr_write(armv8pmu_pmcr_read() & ~ARMV8_PMCR_E);
raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
}
static int armv8pmu_get_event_idx(struct pmu_hw_events *cpuc,
struct hw_perf_event *event)
{
int idx;
unsigned long evtype = event->config_base & ARMV8_EVTYPE_EVENT;
/* Always place a cycle counter into the cycle counter. */
if (evtype == ARMV8_PMUV3_PERFCTR_CLOCK_CYCLES) {
if (test_and_set_bit(ARMV8_IDX_CYCLE_COUNTER, cpuc->used_mask))
return -EAGAIN;
return ARMV8_IDX_CYCLE_COUNTER;
}
/*
* For anything other than a cycle counter, try and use
* the events counters
*/
for (idx = ARMV8_IDX_COUNTER0; idx < cpu_pmu->num_events; ++idx) {
if (!test_and_set_bit(idx, cpuc->used_mask))
return idx;
}
/* The counters are all in use. */
return -EAGAIN;
}
/*
* Add an event filter to a given event. This will only work for PMUv2 PMUs.
*/
static int armv8pmu_set_event_filter(struct hw_perf_event *event,
struct perf_event_attr *attr)
{
unsigned long config_base = 0;
if (attr->exclude_idle)
return -EPERM;
if (attr->exclude_user)
config_base |= ARMV8_EXCLUDE_EL0;
if (attr->exclude_kernel)
config_base |= ARMV8_EXCLUDE_EL1;
if (!attr->exclude_hv)
config_base |= ARMV8_INCLUDE_EL2;
/*
* Install the filter into config_base as this is used to
* construct the event type.
*/
event->config_base = config_base;
return 0;
}
static void armv8pmu_reset(void *info)
{
u32 idx, nb_cnt = cpu_pmu->num_events;
/* The counter and interrupt enable registers are unknown at reset. */
for (idx = ARMV8_IDX_CYCLE_COUNTER; idx < nb_cnt; ++idx)
armv8pmu_disable_event(NULL, idx);
/* Initialize & Reset PMNC: C and P bits. */
armv8pmu_pmcr_write(ARMV8_PMCR_P | ARMV8_PMCR_C);
/* Disable access from userspace. */
asm volatile("msr pmuserenr_el0, %0" :: "r" (0));
}
static int armv8_pmuv3_map_event(struct perf_event *event)
{
return map_cpu_event(event, &armv8_pmuv3_perf_map,
&armv8_pmuv3_perf_cache_map,
ARMV8_EVTYPE_EVENT);
}
static struct arm_pmu armv8pmu = {
.handle_irq = armv8pmu_handle_irq,
.enable = armv8pmu_enable_event,
.disable = armv8pmu_disable_event,
.read_counter = armv8pmu_read_counter,
.write_counter = armv8pmu_write_counter,
.get_event_idx = armv8pmu_get_event_idx,
.start = armv8pmu_start,
.stop = armv8pmu_stop,
.reset = armv8pmu_reset,
.max_period = (1LLU << 32) - 1,
};
static u32 __init armv8pmu_read_num_pmnc_events(void)
{
u32 nb_cnt;
/* Read the nb of CNTx counters supported from PMNC */
nb_cnt = (armv8pmu_pmcr_read() >> ARMV8_PMCR_N_SHIFT) & ARMV8_PMCR_N_MASK;
/* Add the CPU cycles counter and return */
return nb_cnt + 1;
}
static struct arm_pmu *__init armv8_pmuv3_pmu_init(void)
{
armv8pmu.name = "arm/armv8-pmuv3";
armv8pmu.map_event = armv8_pmuv3_map_event;
armv8pmu.num_events = armv8pmu_read_num_pmnc_events();
armv8pmu.set_event_filter = armv8pmu_set_event_filter;
return &armv8pmu;
}
/*
* Ensure the PMU has sane values out of reset.
* This requires SMP to be available, so exists as a separate initcall.
*/
static int __init
cpu_pmu_reset(void)
{
if (cpu_pmu && cpu_pmu->reset)
return on_each_cpu(cpu_pmu->reset, NULL, 1);
return 0;
}
arch_initcall(cpu_pmu_reset);
/*
* PMU platform driver and devicetree bindings.
*/
static struct of_device_id armpmu_of_device_ids[] = {
{.compatible = "arm,armv8-pmuv3"},
{},
};
static int armpmu_device_probe(struct platform_device *pdev)
{
if (!cpu_pmu)
return -ENODEV;
cpu_pmu->plat_device = pdev;
return 0;
}
static struct platform_driver armpmu_driver = {
.driver = {
.name = "arm-pmu",
.of_match_table = armpmu_of_device_ids,
},
.probe = armpmu_device_probe,
};
static int __init register_pmu_driver(void)
{
return platform_driver_register(&armpmu_driver);
}
device_initcall(register_pmu_driver);
static struct pmu_hw_events *armpmu_get_cpu_events(void)
{
return this_cpu_ptr(&cpu_hw_events);
}
static void __init cpu_pmu_init(struct arm_pmu *armpmu)
{
int cpu;
for_each_possible_cpu(cpu) {
struct pmu_hw_events *events = &per_cpu(cpu_hw_events, cpu);
events->events = per_cpu(hw_events, cpu);
events->used_mask = per_cpu(used_mask, cpu);
raw_spin_lock_init(&events->pmu_lock);
}
armpmu->get_hw_events = armpmu_get_cpu_events;
}
static int __init init_hw_perf_events(void)
{
u64 dfr = read_cpuid(ID_AA64DFR0_EL1);
switch ((dfr >> 8) & 0xf) {
case 0x1: /* PMUv3 */
cpu_pmu = armv8_pmuv3_pmu_init();
break;
}
if (cpu_pmu) {
pr_info("enabled with %s PMU driver, %d counters available\n",
cpu_pmu->name, cpu_pmu->num_events);
cpu_pmu_init(cpu_pmu);
armpmu_register(cpu_pmu, "cpu", PERF_TYPE_RAW);
} else {
pr_info("no hardware support available\n");
}
return 0;
}
early_initcall(init_hw_perf_events);
/*
* Callchain handling code.
*/
struct frame_tail {
struct frame_tail __user *fp;
unsigned long lr;
} __attribute__((packed));
/*
* Get the return address for a single stackframe and return a pointer to the
* next frame tail.
*/
static struct frame_tail __user *
user_backtrace(struct frame_tail __user *tail,
struct perf_callchain_entry *entry)
{
struct frame_tail buftail;
unsigned long err;
/* Also check accessibility of one struct frame_tail beyond */
if (!access_ok(VERIFY_READ, tail, sizeof(buftail)))
return NULL;
pagefault_disable();
err = __copy_from_user_inatomic(&buftail, tail, sizeof(buftail));
pagefault_enable();
if (err)
return NULL;
perf_callchain_store(entry, buftail.lr);
/*
* Frame pointers should strictly progress back up the stack
* (towards higher addresses).
*/
if (tail >= buftail.fp)
return NULL;
return buftail.fp;
}
void perf_callchain_user(struct perf_callchain_entry *entry,
struct pt_regs *regs)
{
struct frame_tail __user *tail;
if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
/* We don't support guest os callchain now */
return;
}
perf_callchain_store(entry, regs->pc);
tail = (struct frame_tail __user *)regs->regs[29];
while (entry->nr < PERF_MAX_STACK_DEPTH &&
tail && !((unsigned long)tail & 0xf))
tail = user_backtrace(tail, entry);
}
/*
* Gets called by walk_stackframe() for every stackframe. This will be called
* whist unwinding the stackframe and is like a subroutine return so we use
* the PC.
*/
static int callchain_trace(struct stackframe *frame, void *data)
{
struct perf_callchain_entry *entry = data;
perf_callchain_store(entry, frame->pc);
return 0;
}
void perf_callchain_kernel(struct perf_callchain_entry *entry,
struct pt_regs *regs)
{
struct stackframe frame;
if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
/* We don't support guest os callchain now */
return;
}
frame.fp = regs->regs[29];
frame.sp = regs->sp;
frame.pc = regs->pc;
walk_stackframe(&frame, callchain_trace, entry);
}
unsigned long perf_instruction_pointer(struct pt_regs *regs)
{
if (perf_guest_cbs && perf_guest_cbs->is_in_guest())
return perf_guest_cbs->get_guest_ip();
return instruction_pointer(regs);
}
unsigned long perf_misc_flags(struct pt_regs *regs)
{
int misc = 0;
if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
if (perf_guest_cbs->is_user_mode())
misc |= PERF_RECORD_MISC_GUEST_USER;
else
misc |= PERF_RECORD_MISC_GUEST_KERNEL;
} else {
if (user_mode(regs))
misc |= PERF_RECORD_MISC_USER;
else
misc |= PERF_RECORD_MISC_KERNEL;
}
return misc;
}
| gpl-2.0 |
limbo127/KVMGT-kernel | net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c | 346 | 15321 |
/* (C) 1999-2001 Paul `Rusty' Russell
* (C) 2002-2004 Netfilter Core Team <coreteam@netfilter.org>
* (C) 2006-2012 Patrick McHardy <kaber@trash.net>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/types.h>
#include <linux/ip.h>
#include <linux/netfilter.h>
#include <linux/module.h>
#include <linux/skbuff.h>
#include <linux/icmp.h>
#include <linux/sysctl.h>
#include <net/route.h>
#include <net/ip.h>
#include <linux/netfilter_ipv4.h>
#include <net/netfilter/nf_conntrack.h>
#include <net/netfilter/nf_conntrack_helper.h>
#include <net/netfilter/nf_conntrack_l4proto.h>
#include <net/netfilter/nf_conntrack_l3proto.h>
#include <net/netfilter/nf_conntrack_zones.h>
#include <net/netfilter/nf_conntrack_core.h>
#include <net/netfilter/nf_conntrack_seqadj.h>
#include <net/netfilter/ipv4/nf_conntrack_ipv4.h>
#include <net/netfilter/nf_nat_helper.h>
#include <net/netfilter/ipv4/nf_defrag_ipv4.h>
#include <net/netfilter/nf_log.h>
static bool ipv4_pkt_to_tuple(const struct sk_buff *skb, unsigned int nhoff,
struct nf_conntrack_tuple *tuple)
{
const __be32 *ap;
__be32 _addrs[2];
ap = skb_header_pointer(skb, nhoff + offsetof(struct iphdr, saddr),
sizeof(u_int32_t) * 2, _addrs);
if (ap == NULL)
return false;
tuple->src.u3.ip = ap[0];
tuple->dst.u3.ip = ap[1];
return true;
}
static bool ipv4_invert_tuple(struct nf_conntrack_tuple *tuple,
const struct nf_conntrack_tuple *orig)
{
tuple->src.u3.ip = orig->dst.u3.ip;
tuple->dst.u3.ip = orig->src.u3.ip;
return true;
}
static int ipv4_print_tuple(struct seq_file *s,
const struct nf_conntrack_tuple *tuple)
{
return seq_printf(s, "src=%pI4 dst=%pI4 ",
&tuple->src.u3.ip, &tuple->dst.u3.ip);
}
static int ipv4_get_l4proto(const struct sk_buff *skb, unsigned int nhoff,
unsigned int *dataoff, u_int8_t *protonum)
{
const struct iphdr *iph;
struct iphdr _iph;
iph = skb_header_pointer(skb, nhoff, sizeof(_iph), &_iph);
if (iph == NULL)
return -NF_ACCEPT;
/* Conntrack defragments packets, we might still see fragments
* inside ICMP packets though. */
if (iph->frag_off & htons(IP_OFFSET))
return -NF_ACCEPT;
*dataoff = nhoff + (iph->ihl << 2);
*protonum = iph->protocol;
/* Check bogus IP headers */
if (*dataoff > skb->len) {
pr_debug("nf_conntrack_ipv4: bogus IPv4 packet: "
"nhoff %u, ihl %u, skblen %u\n",
nhoff, iph->ihl << 2, skb->len);
return -NF_ACCEPT;
}
return NF_ACCEPT;
}
static unsigned int ipv4_helper(const struct nf_hook_ops *ops,
struct sk_buff *skb,
const struct net_device *in,
const struct net_device *out,
int (*okfn)(struct sk_buff *))
{
struct nf_conn *ct;
enum ip_conntrack_info ctinfo;
const struct nf_conn_help *help;
const struct nf_conntrack_helper *helper;
/* This is where we call the helper: as the packet goes out. */
ct = nf_ct_get(skb, &ctinfo);
if (!ct || ctinfo == IP_CT_RELATED_REPLY)
return NF_ACCEPT;
help = nfct_help(ct);
if (!help)
return NF_ACCEPT;
/* rcu_read_lock()ed by nf_hook_slow */
helper = rcu_dereference(help->helper);
if (!helper)
return NF_ACCEPT;
return helper->help(skb, skb_network_offset(skb) + ip_hdrlen(skb),
ct, ctinfo);
}
static unsigned int ipv4_confirm(const struct nf_hook_ops *ops,
struct sk_buff *skb,
const struct net_device *in,
const struct net_device *out,
int (*okfn)(struct sk_buff *))
{
struct nf_conn *ct;
enum ip_conntrack_info ctinfo;
ct = nf_ct_get(skb, &ctinfo);
if (!ct || ctinfo == IP_CT_RELATED_REPLY)
goto out;
/* adjust seqs for loopback traffic only in outgoing direction */
if (test_bit(IPS_SEQ_ADJUST_BIT, &ct->status) &&
!nf_is_loopback_packet(skb)) {
if (!nf_ct_seq_adjust(skb, ct, ctinfo, ip_hdrlen(skb))) {
NF_CT_STAT_INC_ATOMIC(nf_ct_net(ct), drop);
return NF_DROP;
}
}
out:
/* We've seen it coming out the other side: confirm it */
return nf_conntrack_confirm(skb);
}
static unsigned int ipv4_conntrack_in(const struct nf_hook_ops *ops,
struct sk_buff *skb,
const struct net_device *in,
const struct net_device *out,
int (*okfn)(struct sk_buff *))
{
return nf_conntrack_in(dev_net(in), PF_INET, ops->hooknum, skb);
}
static unsigned int ipv4_conntrack_local(const struct nf_hook_ops *ops,
struct sk_buff *skb,
const struct net_device *in,
const struct net_device *out,
int (*okfn)(struct sk_buff *))
{
/* root is playing with raw sockets. */
if (skb->len < sizeof(struct iphdr) ||
ip_hdrlen(skb) < sizeof(struct iphdr))
return NF_ACCEPT;
return nf_conntrack_in(dev_net(out), PF_INET, ops->hooknum, skb);
}
/* Connection tracking may drop packets, but never alters them, so
make it the first hook. */
static struct nf_hook_ops ipv4_conntrack_ops[] __read_mostly = {
{
.hook = ipv4_conntrack_in,
.owner = THIS_MODULE,
.pf = NFPROTO_IPV4,
.hooknum = NF_INET_PRE_ROUTING,
.priority = NF_IP_PRI_CONNTRACK,
},
{
.hook = ipv4_conntrack_local,
.owner = THIS_MODULE,
.pf = NFPROTO_IPV4,
.hooknum = NF_INET_LOCAL_OUT,
.priority = NF_IP_PRI_CONNTRACK,
},
{
.hook = ipv4_helper,
.owner = THIS_MODULE,
.pf = NFPROTO_IPV4,
.hooknum = NF_INET_POST_ROUTING,
.priority = NF_IP_PRI_CONNTRACK_HELPER,
},
{
.hook = ipv4_confirm,
.owner = THIS_MODULE,
.pf = NFPROTO_IPV4,
.hooknum = NF_INET_POST_ROUTING,
.priority = NF_IP_PRI_CONNTRACK_CONFIRM,
},
{
.hook = ipv4_helper,
.owner = THIS_MODULE,
.pf = NFPROTO_IPV4,
.hooknum = NF_INET_LOCAL_IN,
.priority = NF_IP_PRI_CONNTRACK_HELPER,
},
{
.hook = ipv4_confirm,
.owner = THIS_MODULE,
.pf = NFPROTO_IPV4,
.hooknum = NF_INET_LOCAL_IN,
.priority = NF_IP_PRI_CONNTRACK_CONFIRM,
},
};
#if defined(CONFIG_SYSCTL) && defined(CONFIG_NF_CONNTRACK_PROC_COMPAT)
static int log_invalid_proto_min = 0;
static int log_invalid_proto_max = 255;
static struct ctl_table ip_ct_sysctl_table[] = {
{
.procname = "ip_conntrack_max",
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
{
.procname = "ip_conntrack_count",
.maxlen = sizeof(int),
.mode = 0444,
.proc_handler = proc_dointvec,
},
{
.procname = "ip_conntrack_buckets",
.maxlen = sizeof(unsigned int),
.mode = 0444,
.proc_handler = proc_dointvec,
},
{
.procname = "ip_conntrack_checksum",
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
{
.procname = "ip_conntrack_log_invalid",
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = &log_invalid_proto_min,
.extra2 = &log_invalid_proto_max,
},
{ }
};
#endif /* CONFIG_SYSCTL && CONFIG_NF_CONNTRACK_PROC_COMPAT */
/* Fast function for those who don't want to parse /proc (and I don't
blame them). */
/* Reversing the socket's dst/src point of view gives us the reply
mapping. */
static int
getorigdst(struct sock *sk, int optval, void __user *user, int *len)
{
const struct inet_sock *inet = inet_sk(sk);
const struct nf_conntrack_tuple_hash *h;
struct nf_conntrack_tuple tuple;
memset(&tuple, 0, sizeof(tuple));
tuple.src.u3.ip = inet->inet_rcv_saddr;
tuple.src.u.tcp.port = inet->inet_sport;
tuple.dst.u3.ip = inet->inet_daddr;
tuple.dst.u.tcp.port = inet->inet_dport;
tuple.src.l3num = PF_INET;
tuple.dst.protonum = sk->sk_protocol;
/* We only do TCP and SCTP at the moment: is there a better way? */
if (sk->sk_protocol != IPPROTO_TCP && sk->sk_protocol != IPPROTO_SCTP) {
pr_debug("SO_ORIGINAL_DST: Not a TCP/SCTP socket\n");
return -ENOPROTOOPT;
}
if ((unsigned int) *len < sizeof(struct sockaddr_in)) {
pr_debug("SO_ORIGINAL_DST: len %d not %Zu\n",
*len, sizeof(struct sockaddr_in));
return -EINVAL;
}
h = nf_conntrack_find_get(sock_net(sk), NF_CT_DEFAULT_ZONE, &tuple);
if (h) {
struct sockaddr_in sin;
struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(h);
sin.sin_family = AF_INET;
sin.sin_port = ct->tuplehash[IP_CT_DIR_ORIGINAL]
.tuple.dst.u.tcp.port;
sin.sin_addr.s_addr = ct->tuplehash[IP_CT_DIR_ORIGINAL]
.tuple.dst.u3.ip;
memset(sin.sin_zero, 0, sizeof(sin.sin_zero));
pr_debug("SO_ORIGINAL_DST: %pI4 %u\n",
&sin.sin_addr.s_addr, ntohs(sin.sin_port));
nf_ct_put(ct);
if (copy_to_user(user, &sin, sizeof(sin)) != 0)
return -EFAULT;
else
return 0;
}
pr_debug("SO_ORIGINAL_DST: Can't find %pI4/%u-%pI4/%u.\n",
&tuple.src.u3.ip, ntohs(tuple.src.u.tcp.port),
&tuple.dst.u3.ip, ntohs(tuple.dst.u.tcp.port));
return -ENOENT;
}
#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
#include <linux/netfilter/nfnetlink.h>
#include <linux/netfilter/nfnetlink_conntrack.h>
static int ipv4_tuple_to_nlattr(struct sk_buff *skb,
const struct nf_conntrack_tuple *tuple)
{
if (nla_put_be32(skb, CTA_IP_V4_SRC, tuple->src.u3.ip) ||
nla_put_be32(skb, CTA_IP_V4_DST, tuple->dst.u3.ip))
goto nla_put_failure;
return 0;
nla_put_failure:
return -1;
}
static const struct nla_policy ipv4_nla_policy[CTA_IP_MAX+1] = {
[CTA_IP_V4_SRC] = { .type = NLA_U32 },
[CTA_IP_V4_DST] = { .type = NLA_U32 },
};
static int ipv4_nlattr_to_tuple(struct nlattr *tb[],
struct nf_conntrack_tuple *t)
{
if (!tb[CTA_IP_V4_SRC] || !tb[CTA_IP_V4_DST])
return -EINVAL;
t->src.u3.ip = nla_get_be32(tb[CTA_IP_V4_SRC]);
t->dst.u3.ip = nla_get_be32(tb[CTA_IP_V4_DST]);
return 0;
}
static int ipv4_nlattr_tuple_size(void)
{
return nla_policy_len(ipv4_nla_policy, CTA_IP_MAX + 1);
}
#endif
static struct nf_sockopt_ops so_getorigdst = {
.pf = PF_INET,
.get_optmin = SO_ORIGINAL_DST,
.get_optmax = SO_ORIGINAL_DST+1,
.get = &getorigdst,
.owner = THIS_MODULE,
};
static int ipv4_init_net(struct net *net)
{
#if defined(CONFIG_SYSCTL) && defined(CONFIG_NF_CONNTRACK_PROC_COMPAT)
struct nf_ip_net *in = &net->ct.nf_ct_proto;
in->ctl_table = kmemdup(ip_ct_sysctl_table,
sizeof(ip_ct_sysctl_table),
GFP_KERNEL);
if (!in->ctl_table)
return -ENOMEM;
in->ctl_table[0].data = &nf_conntrack_max;
in->ctl_table[1].data = &net->ct.count;
in->ctl_table[2].data = &net->ct.htable_size;
in->ctl_table[3].data = &net->ct.sysctl_checksum;
in->ctl_table[4].data = &net->ct.sysctl_log_invalid;
#endif
return 0;
}
struct nf_conntrack_l3proto nf_conntrack_l3proto_ipv4 __read_mostly = {
.l3proto = PF_INET,
.name = "ipv4",
.pkt_to_tuple = ipv4_pkt_to_tuple,
.invert_tuple = ipv4_invert_tuple,
.print_tuple = ipv4_print_tuple,
.get_l4proto = ipv4_get_l4proto,
#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
.tuple_to_nlattr = ipv4_tuple_to_nlattr,
.nlattr_tuple_size = ipv4_nlattr_tuple_size,
.nlattr_to_tuple = ipv4_nlattr_to_tuple,
.nla_policy = ipv4_nla_policy,
#endif
#if defined(CONFIG_SYSCTL) && defined(CONFIG_NF_CONNTRACK_PROC_COMPAT)
.ctl_table_path = "net/ipv4/netfilter",
#endif
.init_net = ipv4_init_net,
.me = THIS_MODULE,
};
module_param_call(hashsize, nf_conntrack_set_hashsize, param_get_uint,
&nf_conntrack_htable_size, 0600);
MODULE_ALIAS("nf_conntrack-" __stringify(AF_INET));
MODULE_ALIAS("ip_conntrack");
MODULE_LICENSE("GPL");
static int ipv4_net_init(struct net *net)
{
int ret = 0;
ret = nf_ct_l4proto_pernet_register(net, &nf_conntrack_l4proto_tcp4);
if (ret < 0) {
pr_err("nf_conntrack_tcp4: pernet registration failed\n");
goto out_tcp;
}
ret = nf_ct_l4proto_pernet_register(net, &nf_conntrack_l4proto_udp4);
if (ret < 0) {
pr_err("nf_conntrack_udp4: pernet registration failed\n");
goto out_udp;
}
ret = nf_ct_l4proto_pernet_register(net, &nf_conntrack_l4proto_icmp);
if (ret < 0) {
pr_err("nf_conntrack_icmp4: pernet registration failed\n");
goto out_icmp;
}
ret = nf_ct_l3proto_pernet_register(net, &nf_conntrack_l3proto_ipv4);
if (ret < 0) {
pr_err("nf_conntrack_ipv4: pernet registration failed\n");
goto out_ipv4;
}
return 0;
out_ipv4:
nf_ct_l4proto_pernet_unregister(net, &nf_conntrack_l4proto_icmp);
out_icmp:
nf_ct_l4proto_pernet_unregister(net, &nf_conntrack_l4proto_udp4);
out_udp:
nf_ct_l4proto_pernet_unregister(net, &nf_conntrack_l4proto_tcp4);
out_tcp:
return ret;
}
static void ipv4_net_exit(struct net *net)
{
nf_ct_l3proto_pernet_unregister(net, &nf_conntrack_l3proto_ipv4);
nf_ct_l4proto_pernet_unregister(net, &nf_conntrack_l4proto_icmp);
nf_ct_l4proto_pernet_unregister(net, &nf_conntrack_l4proto_udp4);
nf_ct_l4proto_pernet_unregister(net, &nf_conntrack_l4proto_tcp4);
}
static struct pernet_operations ipv4_net_ops = {
.init = ipv4_net_init,
.exit = ipv4_net_exit,
};
static int __init nf_conntrack_l3proto_ipv4_init(void)
{
int ret = 0;
need_conntrack();
nf_defrag_ipv4_enable();
ret = nf_register_sockopt(&so_getorigdst);
if (ret < 0) {
printk(KERN_ERR "Unable to register netfilter socket option\n");
return ret;
}
ret = register_pernet_subsys(&ipv4_net_ops);
if (ret < 0) {
pr_err("nf_conntrack_ipv4: can't register pernet ops\n");
goto cleanup_sockopt;
}
ret = nf_register_hooks(ipv4_conntrack_ops,
ARRAY_SIZE(ipv4_conntrack_ops));
if (ret < 0) {
pr_err("nf_conntrack_ipv4: can't register hooks.\n");
goto cleanup_pernet;
}
ret = nf_ct_l4proto_register(&nf_conntrack_l4proto_tcp4);
if (ret < 0) {
pr_err("nf_conntrack_ipv4: can't register tcp4 proto.\n");
goto cleanup_hooks;
}
ret = nf_ct_l4proto_register(&nf_conntrack_l4proto_udp4);
if (ret < 0) {
pr_err("nf_conntrack_ipv4: can't register udp4 proto.\n");
goto cleanup_tcp4;
}
ret = nf_ct_l4proto_register(&nf_conntrack_l4proto_icmp);
if (ret < 0) {
pr_err("nf_conntrack_ipv4: can't register icmpv4 proto.\n");
goto cleanup_udp4;
}
ret = nf_ct_l3proto_register(&nf_conntrack_l3proto_ipv4);
if (ret < 0) {
pr_err("nf_conntrack_ipv4: can't register ipv4 proto.\n");
goto cleanup_icmpv4;
}
#if defined(CONFIG_PROC_FS) && defined(CONFIG_NF_CONNTRACK_PROC_COMPAT)
ret = nf_conntrack_ipv4_compat_init();
if (ret < 0)
goto cleanup_proto;
#endif
return ret;
#if defined(CONFIG_PROC_FS) && defined(CONFIG_NF_CONNTRACK_PROC_COMPAT)
cleanup_proto:
nf_ct_l3proto_unregister(&nf_conntrack_l3proto_ipv4);
#endif
cleanup_icmpv4:
nf_ct_l4proto_unregister(&nf_conntrack_l4proto_icmp);
cleanup_udp4:
nf_ct_l4proto_unregister(&nf_conntrack_l4proto_udp4);
cleanup_tcp4:
nf_ct_l4proto_unregister(&nf_conntrack_l4proto_tcp4);
cleanup_hooks:
nf_unregister_hooks(ipv4_conntrack_ops, ARRAY_SIZE(ipv4_conntrack_ops));
cleanup_pernet:
unregister_pernet_subsys(&ipv4_net_ops);
cleanup_sockopt:
nf_unregister_sockopt(&so_getorigdst);
return ret;
}
static void __exit nf_conntrack_l3proto_ipv4_fini(void)
{
synchronize_net();
#if defined(CONFIG_PROC_FS) && defined(CONFIG_NF_CONNTRACK_PROC_COMPAT)
nf_conntrack_ipv4_compat_fini();
#endif
nf_ct_l3proto_unregister(&nf_conntrack_l3proto_ipv4);
nf_ct_l4proto_unregister(&nf_conntrack_l4proto_icmp);
nf_ct_l4proto_unregister(&nf_conntrack_l4proto_udp4);
nf_ct_l4proto_unregister(&nf_conntrack_l4proto_tcp4);
nf_unregister_hooks(ipv4_conntrack_ops, ARRAY_SIZE(ipv4_conntrack_ops));
unregister_pernet_subsys(&ipv4_net_ops);
nf_unregister_sockopt(&so_getorigdst);
}
module_init(nf_conntrack_l3proto_ipv4_init);
module_exit(nf_conntrack_l3proto_ipv4_fini);
| gpl-2.0 |
chrisc93/bullhead | drivers/net/ethernet/toshiba/tc35815.c | 2138 | 65723 | /*
* tc35815.c: A TOSHIBA TC35815CF PCI 10/100Mbps ethernet driver for linux.
*
* Based on skelton.c by Donald Becker.
*
* This driver is a replacement of older and less maintained version.
* This is a header of the older version:
* -----<snip>-----
* Copyright 2001 MontaVista Software Inc.
* Author: MontaVista Software, Inc.
* ahennessy@mvista.com
* Copyright (C) 2000-2001 Toshiba Corporation
* static const char *version =
* "tc35815.c:v0.00 26/07/2000 by Toshiba Corporation\n";
* -----<snip>-----
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* (C) Copyright TOSHIBA CORPORATION 2004-2005
* All Rights Reserved.
*/
#define DRV_VERSION "1.39"
static const char *version = "tc35815.c:v" DRV_VERSION "\n";
#define MODNAME "tc35815"
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/fcntl.h>
#include <linux/interrupt.h>
#include <linux/ioport.h>
#include <linux/in.h>
#include <linux/if_vlan.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/spinlock.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
#include <linux/delay.h>
#include <linux/pci.h>
#include <linux/phy.h>
#include <linux/workqueue.h>
#include <linux/platform_device.h>
#include <linux/prefetch.h>
#include <asm/io.h>
#include <asm/byteorder.h>
enum tc35815_chiptype {
TC35815CF = 0,
TC35815_NWU,
TC35815_TX4939,
};
/* indexed by tc35815_chiptype, above */
static const struct {
const char *name;
} chip_info[] = {
{ "TOSHIBA TC35815CF 10/100BaseTX" },
{ "TOSHIBA TC35815 with Wake on LAN" },
{ "TOSHIBA TC35815/TX4939" },
};
static DEFINE_PCI_DEVICE_TABLE(tc35815_pci_tbl) = {
{PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA_2, PCI_DEVICE_ID_TOSHIBA_TC35815CF), .driver_data = TC35815CF },
{PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA_2, PCI_DEVICE_ID_TOSHIBA_TC35815_NWU), .driver_data = TC35815_NWU },
{PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA_2, PCI_DEVICE_ID_TOSHIBA_TC35815_TX4939), .driver_data = TC35815_TX4939 },
{0,}
};
MODULE_DEVICE_TABLE(pci, tc35815_pci_tbl);
/* see MODULE_PARM_DESC */
static struct tc35815_options {
int speed;
int duplex;
} options;
/*
* Registers
*/
struct tc35815_regs {
__u32 DMA_Ctl; /* 0x00 */
__u32 TxFrmPtr;
__u32 TxThrsh;
__u32 TxPollCtr;
__u32 BLFrmPtr;
__u32 RxFragSize;
__u32 Int_En;
__u32 FDA_Bas;
__u32 FDA_Lim; /* 0x20 */
__u32 Int_Src;
__u32 unused0[2];
__u32 PauseCnt;
__u32 RemPauCnt;
__u32 TxCtlFrmStat;
__u32 unused1;
__u32 MAC_Ctl; /* 0x40 */
__u32 CAM_Ctl;
__u32 Tx_Ctl;
__u32 Tx_Stat;
__u32 Rx_Ctl;
__u32 Rx_Stat;
__u32 MD_Data;
__u32 MD_CA;
__u32 CAM_Adr; /* 0x60 */
__u32 CAM_Data;
__u32 CAM_Ena;
__u32 PROM_Ctl;
__u32 PROM_Data;
__u32 Algn_Cnt;
__u32 CRC_Cnt;
__u32 Miss_Cnt;
};
/*
* Bit assignments
*/
/* DMA_Ctl bit assign ------------------------------------------------------- */
#define DMA_RxAlign 0x00c00000 /* 1:Reception Alignment */
#define DMA_RxAlign_1 0x00400000
#define DMA_RxAlign_2 0x00800000
#define DMA_RxAlign_3 0x00c00000
#define DMA_M66EnStat 0x00080000 /* 1:66MHz Enable State */
#define DMA_IntMask 0x00040000 /* 1:Interrupt mask */
#define DMA_SWIntReq 0x00020000 /* 1:Software Interrupt request */
#define DMA_TxWakeUp 0x00010000 /* 1:Transmit Wake Up */
#define DMA_RxBigE 0x00008000 /* 1:Receive Big Endian */
#define DMA_TxBigE 0x00004000 /* 1:Transmit Big Endian */
#define DMA_TestMode 0x00002000 /* 1:Test Mode */
#define DMA_PowrMgmnt 0x00001000 /* 1:Power Management */
#define DMA_DmBurst_Mask 0x000001fc /* DMA Burst size */
/* RxFragSize bit assign ---------------------------------------------------- */
#define RxFrag_EnPack 0x00008000 /* 1:Enable Packing */
#define RxFrag_MinFragMask 0x00000ffc /* Minimum Fragment */
/* MAC_Ctl bit assign ------------------------------------------------------- */
#define MAC_Link10 0x00008000 /* 1:Link Status 10Mbits */
#define MAC_EnMissRoll 0x00002000 /* 1:Enable Missed Roll */
#define MAC_MissRoll 0x00000400 /* 1:Missed Roll */
#define MAC_Loop10 0x00000080 /* 1:Loop 10 Mbps */
#define MAC_Conn_Auto 0x00000000 /*00:Connection mode (Automatic) */
#define MAC_Conn_10M 0x00000020 /*01: (10Mbps endec)*/
#define MAC_Conn_Mll 0x00000040 /*10: (Mll clock) */
#define MAC_MacLoop 0x00000010 /* 1:MAC Loopback */
#define MAC_FullDup 0x00000008 /* 1:Full Duplex 0:Half Duplex */
#define MAC_Reset 0x00000004 /* 1:Software Reset */
#define MAC_HaltImm 0x00000002 /* 1:Halt Immediate */
#define MAC_HaltReq 0x00000001 /* 1:Halt request */
/* PROM_Ctl bit assign ------------------------------------------------------ */
#define PROM_Busy 0x00008000 /* 1:Busy (Start Operation) */
#define PROM_Read 0x00004000 /*10:Read operation */
#define PROM_Write 0x00002000 /*01:Write operation */
#define PROM_Erase 0x00006000 /*11:Erase operation */
/*00:Enable or Disable Writting, */
/* as specified in PROM_Addr. */
#define PROM_Addr_Ena 0x00000030 /*11xxxx:PROM Write enable */
/*00xxxx: disable */
/* CAM_Ctl bit assign ------------------------------------------------------- */
#define CAM_CompEn 0x00000010 /* 1:CAM Compare Enable */
#define CAM_NegCAM 0x00000008 /* 1:Reject packets CAM recognizes,*/
/* accept other */
#define CAM_BroadAcc 0x00000004 /* 1:Broadcast assept */
#define CAM_GroupAcc 0x00000002 /* 1:Multicast assept */
#define CAM_StationAcc 0x00000001 /* 1:unicast accept */
/* CAM_Ena bit assign ------------------------------------------------------- */
#define CAM_ENTRY_MAX 21 /* CAM Data entry max count */
#define CAM_Ena_Mask ((1<<CAM_ENTRY_MAX)-1) /* CAM Enable bits (Max 21bits) */
#define CAM_Ena_Bit(index) (1 << (index))
#define CAM_ENTRY_DESTINATION 0
#define CAM_ENTRY_SOURCE 1
#define CAM_ENTRY_MACCTL 20
/* Tx_Ctl bit assign -------------------------------------------------------- */
#define Tx_En 0x00000001 /* 1:Transmit enable */
#define Tx_TxHalt 0x00000002 /* 1:Transmit Halt Request */
#define Tx_NoPad 0x00000004 /* 1:Suppress Padding */
#define Tx_NoCRC 0x00000008 /* 1:Suppress Padding */
#define Tx_FBack 0x00000010 /* 1:Fast Back-off */
#define Tx_EnUnder 0x00000100 /* 1:Enable Underrun */
#define Tx_EnExDefer 0x00000200 /* 1:Enable Excessive Deferral */
#define Tx_EnLCarr 0x00000400 /* 1:Enable Lost Carrier */
#define Tx_EnExColl 0x00000800 /* 1:Enable Excessive Collision */
#define Tx_EnLateColl 0x00001000 /* 1:Enable Late Collision */
#define Tx_EnTxPar 0x00002000 /* 1:Enable Transmit Parity */
#define Tx_EnComp 0x00004000 /* 1:Enable Completion */
/* Tx_Stat bit assign ------------------------------------------------------- */
#define Tx_TxColl_MASK 0x0000000F /* Tx Collision Count */
#define Tx_ExColl 0x00000010 /* Excessive Collision */
#define Tx_TXDefer 0x00000020 /* Transmit Defered */
#define Tx_Paused 0x00000040 /* Transmit Paused */
#define Tx_IntTx 0x00000080 /* Interrupt on Tx */
#define Tx_Under 0x00000100 /* Underrun */
#define Tx_Defer 0x00000200 /* Deferral */
#define Tx_NCarr 0x00000400 /* No Carrier */
#define Tx_10Stat 0x00000800 /* 10Mbps Status */
#define Tx_LateColl 0x00001000 /* Late Collision */
#define Tx_TxPar 0x00002000 /* Tx Parity Error */
#define Tx_Comp 0x00004000 /* Completion */
#define Tx_Halted 0x00008000 /* Tx Halted */
#define Tx_SQErr 0x00010000 /* Signal Quality Error(SQE) */
/* Rx_Ctl bit assign -------------------------------------------------------- */
#define Rx_EnGood 0x00004000 /* 1:Enable Good */
#define Rx_EnRxPar 0x00002000 /* 1:Enable Receive Parity */
#define Rx_EnLongErr 0x00000800 /* 1:Enable Long Error */
#define Rx_EnOver 0x00000400 /* 1:Enable OverFlow */
#define Rx_EnCRCErr 0x00000200 /* 1:Enable CRC Error */
#define Rx_EnAlign 0x00000100 /* 1:Enable Alignment */
#define Rx_IgnoreCRC 0x00000040 /* 1:Ignore CRC Value */
#define Rx_StripCRC 0x00000010 /* 1:Strip CRC Value */
#define Rx_ShortEn 0x00000008 /* 1:Short Enable */
#define Rx_LongEn 0x00000004 /* 1:Long Enable */
#define Rx_RxHalt 0x00000002 /* 1:Receive Halt Request */
#define Rx_RxEn 0x00000001 /* 1:Receive Intrrupt Enable */
/* Rx_Stat bit assign ------------------------------------------------------- */
#define Rx_Halted 0x00008000 /* Rx Halted */
#define Rx_Good 0x00004000 /* Rx Good */
#define Rx_RxPar 0x00002000 /* Rx Parity Error */
#define Rx_TypePkt 0x00001000 /* Rx Type Packet */
#define Rx_LongErr 0x00000800 /* Rx Long Error */
#define Rx_Over 0x00000400 /* Rx Overflow */
#define Rx_CRCErr 0x00000200 /* Rx CRC Error */
#define Rx_Align 0x00000100 /* Rx Alignment Error */
#define Rx_10Stat 0x00000080 /* Rx 10Mbps Status */
#define Rx_IntRx 0x00000040 /* Rx Interrupt */
#define Rx_CtlRecd 0x00000020 /* Rx Control Receive */
#define Rx_InLenErr 0x00000010 /* Rx In Range Frame Length Error */
#define Rx_Stat_Mask 0x0000FFF0 /* Rx All Status Mask */
/* Int_En bit assign -------------------------------------------------------- */
#define Int_NRAbtEn 0x00000800 /* 1:Non-recoverable Abort Enable */
#define Int_TxCtlCmpEn 0x00000400 /* 1:Transmit Ctl Complete Enable */
#define Int_DmParErrEn 0x00000200 /* 1:DMA Parity Error Enable */
#define Int_DParDEn 0x00000100 /* 1:Data Parity Error Enable */
#define Int_EarNotEn 0x00000080 /* 1:Early Notify Enable */
#define Int_DParErrEn 0x00000040 /* 1:Detected Parity Error Enable */
#define Int_SSysErrEn 0x00000020 /* 1:Signalled System Error Enable */
#define Int_RMasAbtEn 0x00000010 /* 1:Received Master Abort Enable */
#define Int_RTargAbtEn 0x00000008 /* 1:Received Target Abort Enable */
#define Int_STargAbtEn 0x00000004 /* 1:Signalled Target Abort Enable */
#define Int_BLExEn 0x00000002 /* 1:Buffer List Exhausted Enable */
#define Int_FDAExEn 0x00000001 /* 1:Free Descriptor Area */
/* Exhausted Enable */
/* Int_Src bit assign ------------------------------------------------------- */
#define Int_NRabt 0x00004000 /* 1:Non Recoverable error */
#define Int_DmParErrStat 0x00002000 /* 1:DMA Parity Error & Clear */
#define Int_BLEx 0x00001000 /* 1:Buffer List Empty & Clear */
#define Int_FDAEx 0x00000800 /* 1:FDA Empty & Clear */
#define Int_IntNRAbt 0x00000400 /* 1:Non Recoverable Abort */
#define Int_IntCmp 0x00000200 /* 1:MAC control packet complete */
#define Int_IntExBD 0x00000100 /* 1:Interrupt Extra BD & Clear */
#define Int_DmParErr 0x00000080 /* 1:DMA Parity Error & Clear */
#define Int_IntEarNot 0x00000040 /* 1:Receive Data write & Clear */
#define Int_SWInt 0x00000020 /* 1:Software request & Clear */
#define Int_IntBLEx 0x00000010 /* 1:Buffer List Empty & Clear */
#define Int_IntFDAEx 0x00000008 /* 1:FDA Empty & Clear */
#define Int_IntPCI 0x00000004 /* 1:PCI controller & Clear */
#define Int_IntMacRx 0x00000002 /* 1:Rx controller & Clear */
#define Int_IntMacTx 0x00000001 /* 1:Tx controller & Clear */
/* MD_CA bit assign --------------------------------------------------------- */
#define MD_CA_PreSup 0x00001000 /* 1:Preamble Suppress */
#define MD_CA_Busy 0x00000800 /* 1:Busy (Start Operation) */
#define MD_CA_Wr 0x00000400 /* 1:Write 0:Read */
/*
* Descriptors
*/
/* Frame descripter */
struct FDesc {
volatile __u32 FDNext;
volatile __u32 FDSystem;
volatile __u32 FDStat;
volatile __u32 FDCtl;
};
/* Buffer descripter */
struct BDesc {
volatile __u32 BuffData;
volatile __u32 BDCtl;
};
#define FD_ALIGN 16
/* Frame Descripter bit assign ---------------------------------------------- */
#define FD_FDLength_MASK 0x0000FFFF /* Length MASK */
#define FD_BDCnt_MASK 0x001F0000 /* BD count MASK in FD */
#define FD_FrmOpt_MASK 0x7C000000 /* Frame option MASK */
#define FD_FrmOpt_BigEndian 0x40000000 /* Tx/Rx */
#define FD_FrmOpt_IntTx 0x20000000 /* Tx only */
#define FD_FrmOpt_NoCRC 0x10000000 /* Tx only */
#define FD_FrmOpt_NoPadding 0x08000000 /* Tx only */
#define FD_FrmOpt_Packing 0x04000000 /* Rx only */
#define FD_CownsFD 0x80000000 /* FD Controller owner bit */
#define FD_Next_EOL 0x00000001 /* FD EOL indicator */
#define FD_BDCnt_SHIFT 16
/* Buffer Descripter bit assign --------------------------------------------- */
#define BD_BuffLength_MASK 0x0000FFFF /* Receive Data Size */
#define BD_RxBDID_MASK 0x00FF0000 /* BD ID Number MASK */
#define BD_RxBDSeqN_MASK 0x7F000000 /* Rx BD Sequence Number */
#define BD_CownsBD 0x80000000 /* BD Controller owner bit */
#define BD_RxBDID_SHIFT 16
#define BD_RxBDSeqN_SHIFT 24
/* Some useful constants. */
#define TX_CTL_CMD (Tx_EnTxPar | Tx_EnLateColl | \
Tx_EnExColl | Tx_EnLCarr | Tx_EnExDefer | Tx_EnUnder | \
Tx_En) /* maybe 0x7b01 */
/* Do not use Rx_StripCRC -- it causes trouble on BLEx/FDAEx condition */
#define RX_CTL_CMD (Rx_EnGood | Rx_EnRxPar | Rx_EnLongErr | Rx_EnOver \
| Rx_EnCRCErr | Rx_EnAlign | Rx_RxEn) /* maybe 0x6f01 */
#define INT_EN_CMD (Int_NRAbtEn | \
Int_DmParErrEn | Int_DParDEn | Int_DParErrEn | \
Int_SSysErrEn | Int_RMasAbtEn | Int_RTargAbtEn | \
Int_STargAbtEn | \
Int_BLExEn | Int_FDAExEn) /* maybe 0xb7f*/
#define DMA_CTL_CMD DMA_BURST_SIZE
#define HAVE_DMA_RXALIGN(lp) likely((lp)->chiptype != TC35815CF)
/* Tuning parameters */
#define DMA_BURST_SIZE 32
#define TX_THRESHOLD 1024
/* used threshold with packet max byte for low pci transfer ability.*/
#define TX_THRESHOLD_MAX 1536
/* setting threshold max value when overrun error occurred this count. */
#define TX_THRESHOLD_KEEP_LIMIT 10
/* 16 + RX_BUF_NUM * 8 + RX_FD_NUM * 16 + TX_FD_NUM * 32 <= PAGE_SIZE*FD_PAGE_NUM */
#define FD_PAGE_NUM 4
#define RX_BUF_NUM 128 /* < 256 */
#define RX_FD_NUM 256 /* >= 32 */
#define TX_FD_NUM 128
#if RX_CTL_CMD & Rx_LongEn
#define RX_BUF_SIZE PAGE_SIZE
#elif RX_CTL_CMD & Rx_StripCRC
#define RX_BUF_SIZE \
L1_CACHE_ALIGN(ETH_FRAME_LEN + VLAN_HLEN + NET_IP_ALIGN)
#else
#define RX_BUF_SIZE \
L1_CACHE_ALIGN(ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN + NET_IP_ALIGN)
#endif
#define RX_FD_RESERVE (2 / 2) /* max 2 BD per RxFD */
#define NAPI_WEIGHT 16
struct TxFD {
struct FDesc fd;
struct BDesc bd;
struct BDesc unused;
};
struct RxFD {
struct FDesc fd;
struct BDesc bd[0]; /* variable length */
};
struct FrFD {
struct FDesc fd;
struct BDesc bd[RX_BUF_NUM];
};
#define tc_readl(addr) ioread32(addr)
#define tc_writel(d, addr) iowrite32(d, addr)
#define TC35815_TX_TIMEOUT msecs_to_jiffies(400)
/* Information that need to be kept for each controller. */
struct tc35815_local {
struct pci_dev *pci_dev;
struct net_device *dev;
struct napi_struct napi;
/* statistics */
struct {
int max_tx_qlen;
int tx_ints;
int rx_ints;
int tx_underrun;
} lstats;
/* Tx control lock. This protects the transmit buffer ring
* state along with the "tx full" state of the driver. This
* means all netif_queue flow control actions are protected
* by this lock as well.
*/
spinlock_t lock;
spinlock_t rx_lock;
struct mii_bus *mii_bus;
struct phy_device *phy_dev;
int duplex;
int speed;
int link;
struct work_struct restart_work;
/*
* Transmitting: Batch Mode.
* 1 BD in 1 TxFD.
* Receiving: Non-Packing Mode.
* 1 circular FD for Free Buffer List.
* RX_BUF_NUM BD in Free Buffer FD.
* One Free Buffer BD has ETH_FRAME_LEN data buffer.
*/
void *fd_buf; /* for TxFD, RxFD, FrFD */
dma_addr_t fd_buf_dma;
struct TxFD *tfd_base;
unsigned int tfd_start;
unsigned int tfd_end;
struct RxFD *rfd_base;
struct RxFD *rfd_limit;
struct RxFD *rfd_cur;
struct FrFD *fbl_ptr;
unsigned int fbl_count;
struct {
struct sk_buff *skb;
dma_addr_t skb_dma;
} tx_skbs[TX_FD_NUM], rx_skbs[RX_BUF_NUM];
u32 msg_enable;
enum tc35815_chiptype chiptype;
};
static inline dma_addr_t fd_virt_to_bus(struct tc35815_local *lp, void *virt)
{
return lp->fd_buf_dma + ((u8 *)virt - (u8 *)lp->fd_buf);
}
#ifdef DEBUG
static inline void *fd_bus_to_virt(struct tc35815_local *lp, dma_addr_t bus)
{
return (void *)((u8 *)lp->fd_buf + (bus - lp->fd_buf_dma));
}
#endif
static struct sk_buff *alloc_rxbuf_skb(struct net_device *dev,
struct pci_dev *hwdev,
dma_addr_t *dma_handle)
{
struct sk_buff *skb;
skb = netdev_alloc_skb(dev, RX_BUF_SIZE);
if (!skb)
return NULL;
*dma_handle = pci_map_single(hwdev, skb->data, RX_BUF_SIZE,
PCI_DMA_FROMDEVICE);
if (pci_dma_mapping_error(hwdev, *dma_handle)) {
dev_kfree_skb_any(skb);
return NULL;
}
skb_reserve(skb, 2); /* make IP header 4byte aligned */
return skb;
}
static void free_rxbuf_skb(struct pci_dev *hwdev, struct sk_buff *skb, dma_addr_t dma_handle)
{
pci_unmap_single(hwdev, dma_handle, RX_BUF_SIZE,
PCI_DMA_FROMDEVICE);
dev_kfree_skb_any(skb);
}
/* Index to functions, as function prototypes. */
static int tc35815_open(struct net_device *dev);
static int tc35815_send_packet(struct sk_buff *skb, struct net_device *dev);
static irqreturn_t tc35815_interrupt(int irq, void *dev_id);
static int tc35815_rx(struct net_device *dev, int limit);
static int tc35815_poll(struct napi_struct *napi, int budget);
static void tc35815_txdone(struct net_device *dev);
static int tc35815_close(struct net_device *dev);
static struct net_device_stats *tc35815_get_stats(struct net_device *dev);
static void tc35815_set_multicast_list(struct net_device *dev);
static void tc35815_tx_timeout(struct net_device *dev);
static int tc35815_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
#ifdef CONFIG_NET_POLL_CONTROLLER
static void tc35815_poll_controller(struct net_device *dev);
#endif
static const struct ethtool_ops tc35815_ethtool_ops;
/* Example routines you must write ;->. */
static void tc35815_chip_reset(struct net_device *dev);
static void tc35815_chip_init(struct net_device *dev);
#ifdef DEBUG
static void panic_queues(struct net_device *dev);
#endif
static void tc35815_restart_work(struct work_struct *work);
static int tc_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
{
struct net_device *dev = bus->priv;
struct tc35815_regs __iomem *tr =
(struct tc35815_regs __iomem *)dev->base_addr;
unsigned long timeout = jiffies + HZ;
tc_writel(MD_CA_Busy | (mii_id << 5) | (regnum & 0x1f), &tr->MD_CA);
udelay(12); /* it takes 32 x 400ns at least */
while (tc_readl(&tr->MD_CA) & MD_CA_Busy) {
if (time_after(jiffies, timeout))
return -EIO;
cpu_relax();
}
return tc_readl(&tr->MD_Data) & 0xffff;
}
static int tc_mdio_write(struct mii_bus *bus, int mii_id, int regnum, u16 val)
{
struct net_device *dev = bus->priv;
struct tc35815_regs __iomem *tr =
(struct tc35815_regs __iomem *)dev->base_addr;
unsigned long timeout = jiffies + HZ;
tc_writel(val, &tr->MD_Data);
tc_writel(MD_CA_Busy | MD_CA_Wr | (mii_id << 5) | (regnum & 0x1f),
&tr->MD_CA);
udelay(12); /* it takes 32 x 400ns at least */
while (tc_readl(&tr->MD_CA) & MD_CA_Busy) {
if (time_after(jiffies, timeout))
return -EIO;
cpu_relax();
}
return 0;
}
static void tc_handle_link_change(struct net_device *dev)
{
struct tc35815_local *lp = netdev_priv(dev);
struct phy_device *phydev = lp->phy_dev;
unsigned long flags;
int status_change = 0;
spin_lock_irqsave(&lp->lock, flags);
if (phydev->link &&
(lp->speed != phydev->speed || lp->duplex != phydev->duplex)) {
struct tc35815_regs __iomem *tr =
(struct tc35815_regs __iomem *)dev->base_addr;
u32 reg;
reg = tc_readl(&tr->MAC_Ctl);
reg |= MAC_HaltReq;
tc_writel(reg, &tr->MAC_Ctl);
if (phydev->duplex == DUPLEX_FULL)
reg |= MAC_FullDup;
else
reg &= ~MAC_FullDup;
tc_writel(reg, &tr->MAC_Ctl);
reg &= ~MAC_HaltReq;
tc_writel(reg, &tr->MAC_Ctl);
/*
* TX4939 PCFG.SPEEDn bit will be changed on
* NETDEV_CHANGE event.
*/
/*
* WORKAROUND: enable LostCrS only if half duplex
* operation.
* (TX4939 does not have EnLCarr)
*/
if (phydev->duplex == DUPLEX_HALF &&
lp->chiptype != TC35815_TX4939)
tc_writel(tc_readl(&tr->Tx_Ctl) | Tx_EnLCarr,
&tr->Tx_Ctl);
lp->speed = phydev->speed;
lp->duplex = phydev->duplex;
status_change = 1;
}
if (phydev->link != lp->link) {
if (phydev->link) {
/* delayed promiscuous enabling */
if (dev->flags & IFF_PROMISC)
tc35815_set_multicast_list(dev);
} else {
lp->speed = 0;
lp->duplex = -1;
}
lp->link = phydev->link;
status_change = 1;
}
spin_unlock_irqrestore(&lp->lock, flags);
if (status_change && netif_msg_link(lp)) {
phy_print_status(phydev);
pr_debug("%s: MII BMCR %04x BMSR %04x LPA %04x\n",
dev->name,
phy_read(phydev, MII_BMCR),
phy_read(phydev, MII_BMSR),
phy_read(phydev, MII_LPA));
}
}
static int tc_mii_probe(struct net_device *dev)
{
struct tc35815_local *lp = netdev_priv(dev);
struct phy_device *phydev = NULL;
int phy_addr;
u32 dropmask;
/* find the first phy */
for (phy_addr = 0; phy_addr < PHY_MAX_ADDR; phy_addr++) {
if (lp->mii_bus->phy_map[phy_addr]) {
if (phydev) {
printk(KERN_ERR "%s: multiple PHYs found\n",
dev->name);
return -EINVAL;
}
phydev = lp->mii_bus->phy_map[phy_addr];
break;
}
}
if (!phydev) {
printk(KERN_ERR "%s: no PHY found\n", dev->name);
return -ENODEV;
}
/* attach the mac to the phy */
phydev = phy_connect(dev, dev_name(&phydev->dev),
&tc_handle_link_change,
lp->chiptype == TC35815_TX4939 ? PHY_INTERFACE_MODE_RMII : PHY_INTERFACE_MODE_MII);
if (IS_ERR(phydev)) {
printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name);
return PTR_ERR(phydev);
}
printk(KERN_INFO "%s: attached PHY driver [%s] "
"(mii_bus:phy_addr=%s, id=%x)\n",
dev->name, phydev->drv->name, dev_name(&phydev->dev),
phydev->phy_id);
/* mask with MAC supported features */
phydev->supported &= PHY_BASIC_FEATURES;
dropmask = 0;
if (options.speed == 10)
dropmask |= SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full;
else if (options.speed == 100)
dropmask |= SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full;
if (options.duplex == 1)
dropmask |= SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Full;
else if (options.duplex == 2)
dropmask |= SUPPORTED_10baseT_Half | SUPPORTED_100baseT_Half;
phydev->supported &= ~dropmask;
phydev->advertising = phydev->supported;
lp->link = 0;
lp->speed = 0;
lp->duplex = -1;
lp->phy_dev = phydev;
return 0;
}
static int tc_mii_init(struct net_device *dev)
{
struct tc35815_local *lp = netdev_priv(dev);
int err;
int i;
lp->mii_bus = mdiobus_alloc();
if (lp->mii_bus == NULL) {
err = -ENOMEM;
goto err_out;
}
lp->mii_bus->name = "tc35815_mii_bus";
lp->mii_bus->read = tc_mdio_read;
lp->mii_bus->write = tc_mdio_write;
snprintf(lp->mii_bus->id, MII_BUS_ID_SIZE, "%x",
(lp->pci_dev->bus->number << 8) | lp->pci_dev->devfn);
lp->mii_bus->priv = dev;
lp->mii_bus->parent = &lp->pci_dev->dev;
lp->mii_bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL);
if (!lp->mii_bus->irq) {
err = -ENOMEM;
goto err_out_free_mii_bus;
}
for (i = 0; i < PHY_MAX_ADDR; i++)
lp->mii_bus->irq[i] = PHY_POLL;
err = mdiobus_register(lp->mii_bus);
if (err)
goto err_out_free_mdio_irq;
err = tc_mii_probe(dev);
if (err)
goto err_out_unregister_bus;
return 0;
err_out_unregister_bus:
mdiobus_unregister(lp->mii_bus);
err_out_free_mdio_irq:
kfree(lp->mii_bus->irq);
err_out_free_mii_bus:
mdiobus_free(lp->mii_bus);
err_out:
return err;
}
#ifdef CONFIG_CPU_TX49XX
/*
* Find a platform_device providing a MAC address. The platform code
* should provide a "tc35815-mac" device with a MAC address in its
* platform_data.
*/
static int tc35815_mac_match(struct device *dev, void *data)
{
struct platform_device *plat_dev = to_platform_device(dev);
struct pci_dev *pci_dev = data;
unsigned int id = pci_dev->irq;
return !strcmp(plat_dev->name, "tc35815-mac") && plat_dev->id == id;
}
static int tc35815_read_plat_dev_addr(struct net_device *dev)
{
struct tc35815_local *lp = netdev_priv(dev);
struct device *pd = bus_find_device(&platform_bus_type, NULL,
lp->pci_dev, tc35815_mac_match);
if (pd) {
if (pd->platform_data)
memcpy(dev->dev_addr, pd->platform_data, ETH_ALEN);
put_device(pd);
return is_valid_ether_addr(dev->dev_addr) ? 0 : -ENODEV;
}
return -ENODEV;
}
#else
static int tc35815_read_plat_dev_addr(struct net_device *dev)
{
return -ENODEV;
}
#endif
static int tc35815_init_dev_addr(struct net_device *dev)
{
struct tc35815_regs __iomem *tr =
(struct tc35815_regs __iomem *)dev->base_addr;
int i;
while (tc_readl(&tr->PROM_Ctl) & PROM_Busy)
;
for (i = 0; i < 6; i += 2) {
unsigned short data;
tc_writel(PROM_Busy | PROM_Read | (i / 2 + 2), &tr->PROM_Ctl);
while (tc_readl(&tr->PROM_Ctl) & PROM_Busy)
;
data = tc_readl(&tr->PROM_Data);
dev->dev_addr[i] = data & 0xff;
dev->dev_addr[i+1] = data >> 8;
}
if (!is_valid_ether_addr(dev->dev_addr))
return tc35815_read_plat_dev_addr(dev);
return 0;
}
static const struct net_device_ops tc35815_netdev_ops = {
.ndo_open = tc35815_open,
.ndo_stop = tc35815_close,
.ndo_start_xmit = tc35815_send_packet,
.ndo_get_stats = tc35815_get_stats,
.ndo_set_rx_mode = tc35815_set_multicast_list,
.ndo_tx_timeout = tc35815_tx_timeout,
.ndo_do_ioctl = tc35815_ioctl,
.ndo_validate_addr = eth_validate_addr,
.ndo_change_mtu = eth_change_mtu,
.ndo_set_mac_address = eth_mac_addr,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = tc35815_poll_controller,
#endif
};
static int tc35815_init_one(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
void __iomem *ioaddr = NULL;
struct net_device *dev;
struct tc35815_local *lp;
int rc;
static int printed_version;
if (!printed_version++) {
printk(version);
dev_printk(KERN_DEBUG, &pdev->dev,
"speed:%d duplex:%d\n",
options.speed, options.duplex);
}
if (!pdev->irq) {
dev_warn(&pdev->dev, "no IRQ assigned.\n");
return -ENODEV;
}
/* dev zeroed in alloc_etherdev */
dev = alloc_etherdev(sizeof(*lp));
if (dev == NULL)
return -ENOMEM;
SET_NETDEV_DEV(dev, &pdev->dev);
lp = netdev_priv(dev);
lp->dev = dev;
/* enable device (incl. PCI PM wakeup), and bus-mastering */
rc = pcim_enable_device(pdev);
if (rc)
goto err_out;
rc = pcim_iomap_regions(pdev, 1 << 1, MODNAME);
if (rc)
goto err_out;
pci_set_master(pdev);
ioaddr = pcim_iomap_table(pdev)[1];
/* Initialize the device structure. */
dev->netdev_ops = &tc35815_netdev_ops;
dev->ethtool_ops = &tc35815_ethtool_ops;
dev->watchdog_timeo = TC35815_TX_TIMEOUT;
netif_napi_add(dev, &lp->napi, tc35815_poll, NAPI_WEIGHT);
dev->irq = pdev->irq;
dev->base_addr = (unsigned long)ioaddr;
INIT_WORK(&lp->restart_work, tc35815_restart_work);
spin_lock_init(&lp->lock);
spin_lock_init(&lp->rx_lock);
lp->pci_dev = pdev;
lp->chiptype = ent->driver_data;
lp->msg_enable = NETIF_MSG_TX_ERR | NETIF_MSG_HW | NETIF_MSG_DRV | NETIF_MSG_LINK;
pci_set_drvdata(pdev, dev);
/* Soft reset the chip. */
tc35815_chip_reset(dev);
/* Retrieve the ethernet address. */
if (tc35815_init_dev_addr(dev)) {
dev_warn(&pdev->dev, "not valid ether addr\n");
eth_hw_addr_random(dev);
}
rc = register_netdev(dev);
if (rc)
goto err_out;
printk(KERN_INFO "%s: %s at 0x%lx, %pM, IRQ %d\n",
dev->name,
chip_info[ent->driver_data].name,
dev->base_addr,
dev->dev_addr,
dev->irq);
rc = tc_mii_init(dev);
if (rc)
goto err_out_unregister;
return 0;
err_out_unregister:
unregister_netdev(dev);
err_out:
free_netdev(dev);
return rc;
}
static void tc35815_remove_one(struct pci_dev *pdev)
{
struct net_device *dev = pci_get_drvdata(pdev);
struct tc35815_local *lp = netdev_priv(dev);
phy_disconnect(lp->phy_dev);
mdiobus_unregister(lp->mii_bus);
kfree(lp->mii_bus->irq);
mdiobus_free(lp->mii_bus);
unregister_netdev(dev);
free_netdev(dev);
pci_set_drvdata(pdev, NULL);
}
static int
tc35815_init_queues(struct net_device *dev)
{
struct tc35815_local *lp = netdev_priv(dev);
int i;
unsigned long fd_addr;
if (!lp->fd_buf) {
BUG_ON(sizeof(struct FDesc) +
sizeof(struct BDesc) * RX_BUF_NUM +
sizeof(struct FDesc) * RX_FD_NUM +
sizeof(struct TxFD) * TX_FD_NUM >
PAGE_SIZE * FD_PAGE_NUM);
lp->fd_buf = pci_alloc_consistent(lp->pci_dev,
PAGE_SIZE * FD_PAGE_NUM,
&lp->fd_buf_dma);
if (!lp->fd_buf)
return -ENOMEM;
for (i = 0; i < RX_BUF_NUM; i++) {
lp->rx_skbs[i].skb =
alloc_rxbuf_skb(dev, lp->pci_dev,
&lp->rx_skbs[i].skb_dma);
if (!lp->rx_skbs[i].skb) {
while (--i >= 0) {
free_rxbuf_skb(lp->pci_dev,
lp->rx_skbs[i].skb,
lp->rx_skbs[i].skb_dma);
lp->rx_skbs[i].skb = NULL;
}
pci_free_consistent(lp->pci_dev,
PAGE_SIZE * FD_PAGE_NUM,
lp->fd_buf,
lp->fd_buf_dma);
lp->fd_buf = NULL;
return -ENOMEM;
}
}
printk(KERN_DEBUG "%s: FD buf %p DataBuf",
dev->name, lp->fd_buf);
printk("\n");
} else {
for (i = 0; i < FD_PAGE_NUM; i++)
clear_page((void *)((unsigned long)lp->fd_buf +
i * PAGE_SIZE));
}
fd_addr = (unsigned long)lp->fd_buf;
/* Free Descriptors (for Receive) */
lp->rfd_base = (struct RxFD *)fd_addr;
fd_addr += sizeof(struct RxFD) * RX_FD_NUM;
for (i = 0; i < RX_FD_NUM; i++)
lp->rfd_base[i].fd.FDCtl = cpu_to_le32(FD_CownsFD);
lp->rfd_cur = lp->rfd_base;
lp->rfd_limit = (struct RxFD *)fd_addr - (RX_FD_RESERVE + 1);
/* Transmit Descriptors */
lp->tfd_base = (struct TxFD *)fd_addr;
fd_addr += sizeof(struct TxFD) * TX_FD_NUM;
for (i = 0; i < TX_FD_NUM; i++) {
lp->tfd_base[i].fd.FDNext = cpu_to_le32(fd_virt_to_bus(lp, &lp->tfd_base[i+1]));
lp->tfd_base[i].fd.FDSystem = cpu_to_le32(0xffffffff);
lp->tfd_base[i].fd.FDCtl = cpu_to_le32(0);
}
lp->tfd_base[TX_FD_NUM-1].fd.FDNext = cpu_to_le32(fd_virt_to_bus(lp, &lp->tfd_base[0]));
lp->tfd_start = 0;
lp->tfd_end = 0;
/* Buffer List (for Receive) */
lp->fbl_ptr = (struct FrFD *)fd_addr;
lp->fbl_ptr->fd.FDNext = cpu_to_le32(fd_virt_to_bus(lp, lp->fbl_ptr));
lp->fbl_ptr->fd.FDCtl = cpu_to_le32(RX_BUF_NUM | FD_CownsFD);
/*
* move all allocated skbs to head of rx_skbs[] array.
* fbl_count mighe not be RX_BUF_NUM if alloc_rxbuf_skb() in
* tc35815_rx() had failed.
*/
lp->fbl_count = 0;
for (i = 0; i < RX_BUF_NUM; i++) {
if (lp->rx_skbs[i].skb) {
if (i != lp->fbl_count) {
lp->rx_skbs[lp->fbl_count].skb =
lp->rx_skbs[i].skb;
lp->rx_skbs[lp->fbl_count].skb_dma =
lp->rx_skbs[i].skb_dma;
}
lp->fbl_count++;
}
}
for (i = 0; i < RX_BUF_NUM; i++) {
if (i >= lp->fbl_count) {
lp->fbl_ptr->bd[i].BuffData = 0;
lp->fbl_ptr->bd[i].BDCtl = 0;
continue;
}
lp->fbl_ptr->bd[i].BuffData =
cpu_to_le32(lp->rx_skbs[i].skb_dma);
/* BDID is index of FrFD.bd[] */
lp->fbl_ptr->bd[i].BDCtl =
cpu_to_le32(BD_CownsBD | (i << BD_RxBDID_SHIFT) |
RX_BUF_SIZE);
}
printk(KERN_DEBUG "%s: TxFD %p RxFD %p FrFD %p\n",
dev->name, lp->tfd_base, lp->rfd_base, lp->fbl_ptr);
return 0;
}
static void
tc35815_clear_queues(struct net_device *dev)
{
struct tc35815_local *lp = netdev_priv(dev);
int i;
for (i = 0; i < TX_FD_NUM; i++) {
u32 fdsystem = le32_to_cpu(lp->tfd_base[i].fd.FDSystem);
struct sk_buff *skb =
fdsystem != 0xffffffff ?
lp->tx_skbs[fdsystem].skb : NULL;
#ifdef DEBUG
if (lp->tx_skbs[i].skb != skb) {
printk("%s: tx_skbs mismatch(%d).\n", dev->name, i);
panic_queues(dev);
}
#else
BUG_ON(lp->tx_skbs[i].skb != skb);
#endif
if (skb) {
pci_unmap_single(lp->pci_dev, lp->tx_skbs[i].skb_dma, skb->len, PCI_DMA_TODEVICE);
lp->tx_skbs[i].skb = NULL;
lp->tx_skbs[i].skb_dma = 0;
dev_kfree_skb_any(skb);
}
lp->tfd_base[i].fd.FDSystem = cpu_to_le32(0xffffffff);
}
tc35815_init_queues(dev);
}
static void
tc35815_free_queues(struct net_device *dev)
{
struct tc35815_local *lp = netdev_priv(dev);
int i;
if (lp->tfd_base) {
for (i = 0; i < TX_FD_NUM; i++) {
u32 fdsystem = le32_to_cpu(lp->tfd_base[i].fd.FDSystem);
struct sk_buff *skb =
fdsystem != 0xffffffff ?
lp->tx_skbs[fdsystem].skb : NULL;
#ifdef DEBUG
if (lp->tx_skbs[i].skb != skb) {
printk("%s: tx_skbs mismatch(%d).\n", dev->name, i);
panic_queues(dev);
}
#else
BUG_ON(lp->tx_skbs[i].skb != skb);
#endif
if (skb) {
dev_kfree_skb(skb);
pci_unmap_single(lp->pci_dev, lp->tx_skbs[i].skb_dma, skb->len, PCI_DMA_TODEVICE);
lp->tx_skbs[i].skb = NULL;
lp->tx_skbs[i].skb_dma = 0;
}
lp->tfd_base[i].fd.FDSystem = cpu_to_le32(0xffffffff);
}
}
lp->rfd_base = NULL;
lp->rfd_limit = NULL;
lp->rfd_cur = NULL;
lp->fbl_ptr = NULL;
for (i = 0; i < RX_BUF_NUM; i++) {
if (lp->rx_skbs[i].skb) {
free_rxbuf_skb(lp->pci_dev, lp->rx_skbs[i].skb,
lp->rx_skbs[i].skb_dma);
lp->rx_skbs[i].skb = NULL;
}
}
if (lp->fd_buf) {
pci_free_consistent(lp->pci_dev, PAGE_SIZE * FD_PAGE_NUM,
lp->fd_buf, lp->fd_buf_dma);
lp->fd_buf = NULL;
}
}
static void
dump_txfd(struct TxFD *fd)
{
printk("TxFD(%p): %08x %08x %08x %08x\n", fd,
le32_to_cpu(fd->fd.FDNext),
le32_to_cpu(fd->fd.FDSystem),
le32_to_cpu(fd->fd.FDStat),
le32_to_cpu(fd->fd.FDCtl));
printk("BD: ");
printk(" %08x %08x",
le32_to_cpu(fd->bd.BuffData),
le32_to_cpu(fd->bd.BDCtl));
printk("\n");
}
static int
dump_rxfd(struct RxFD *fd)
{
int i, bd_count = (le32_to_cpu(fd->fd.FDCtl) & FD_BDCnt_MASK) >> FD_BDCnt_SHIFT;
if (bd_count > 8)
bd_count = 8;
printk("RxFD(%p): %08x %08x %08x %08x\n", fd,
le32_to_cpu(fd->fd.FDNext),
le32_to_cpu(fd->fd.FDSystem),
le32_to_cpu(fd->fd.FDStat),
le32_to_cpu(fd->fd.FDCtl));
if (le32_to_cpu(fd->fd.FDCtl) & FD_CownsFD)
return 0;
printk("BD: ");
for (i = 0; i < bd_count; i++)
printk(" %08x %08x",
le32_to_cpu(fd->bd[i].BuffData),
le32_to_cpu(fd->bd[i].BDCtl));
printk("\n");
return bd_count;
}
#ifdef DEBUG
static void
dump_frfd(struct FrFD *fd)
{
int i;
printk("FrFD(%p): %08x %08x %08x %08x\n", fd,
le32_to_cpu(fd->fd.FDNext),
le32_to_cpu(fd->fd.FDSystem),
le32_to_cpu(fd->fd.FDStat),
le32_to_cpu(fd->fd.FDCtl));
printk("BD: ");
for (i = 0; i < RX_BUF_NUM; i++)
printk(" %08x %08x",
le32_to_cpu(fd->bd[i].BuffData),
le32_to_cpu(fd->bd[i].BDCtl));
printk("\n");
}
static void
panic_queues(struct net_device *dev)
{
struct tc35815_local *lp = netdev_priv(dev);
int i;
printk("TxFD base %p, start %u, end %u\n",
lp->tfd_base, lp->tfd_start, lp->tfd_end);
printk("RxFD base %p limit %p cur %p\n",
lp->rfd_base, lp->rfd_limit, lp->rfd_cur);
printk("FrFD %p\n", lp->fbl_ptr);
for (i = 0; i < TX_FD_NUM; i++)
dump_txfd(&lp->tfd_base[i]);
for (i = 0; i < RX_FD_NUM; i++) {
int bd_count = dump_rxfd(&lp->rfd_base[i]);
i += (bd_count + 1) / 2; /* skip BDs */
}
dump_frfd(lp->fbl_ptr);
panic("%s: Illegal queue state.", dev->name);
}
#endif
static void print_eth(const u8 *add)
{
printk(KERN_DEBUG "print_eth(%p)\n", add);
printk(KERN_DEBUG " %pM => %pM : %02x%02x\n",
add + 6, add, add[12], add[13]);
}
static int tc35815_tx_full(struct net_device *dev)
{
struct tc35815_local *lp = netdev_priv(dev);
return (lp->tfd_start + 1) % TX_FD_NUM == lp->tfd_end;
}
static void tc35815_restart(struct net_device *dev)
{
struct tc35815_local *lp = netdev_priv(dev);
if (lp->phy_dev) {
int timeout;
phy_write(lp->phy_dev, MII_BMCR, BMCR_RESET);
timeout = 100;
while (--timeout) {
if (!(phy_read(lp->phy_dev, MII_BMCR) & BMCR_RESET))
break;
udelay(1);
}
if (!timeout)
printk(KERN_ERR "%s: BMCR reset failed.\n", dev->name);
}
spin_lock_bh(&lp->rx_lock);
spin_lock_irq(&lp->lock);
tc35815_chip_reset(dev);
tc35815_clear_queues(dev);
tc35815_chip_init(dev);
/* Reconfigure CAM again since tc35815_chip_init() initialize it. */
tc35815_set_multicast_list(dev);
spin_unlock_irq(&lp->lock);
spin_unlock_bh(&lp->rx_lock);
netif_wake_queue(dev);
}
static void tc35815_restart_work(struct work_struct *work)
{
struct tc35815_local *lp =
container_of(work, struct tc35815_local, restart_work);
struct net_device *dev = lp->dev;
tc35815_restart(dev);
}
static void tc35815_schedule_restart(struct net_device *dev)
{
struct tc35815_local *lp = netdev_priv(dev);
struct tc35815_regs __iomem *tr =
(struct tc35815_regs __iomem *)dev->base_addr;
unsigned long flags;
/* disable interrupts */
spin_lock_irqsave(&lp->lock, flags);
tc_writel(0, &tr->Int_En);
tc_writel(tc_readl(&tr->DMA_Ctl) | DMA_IntMask, &tr->DMA_Ctl);
schedule_work(&lp->restart_work);
spin_unlock_irqrestore(&lp->lock, flags);
}
static void tc35815_tx_timeout(struct net_device *dev)
{
struct tc35815_regs __iomem *tr =
(struct tc35815_regs __iomem *)dev->base_addr;
printk(KERN_WARNING "%s: transmit timed out, status %#x\n",
dev->name, tc_readl(&tr->Tx_Stat));
/* Try to restart the adaptor. */
tc35815_schedule_restart(dev);
dev->stats.tx_errors++;
}
/*
* Open/initialize the controller. This is called (in the current kernel)
* sometime after booting when the 'ifconfig' program is run.
*
* This routine should set everything up anew at each open, even
* registers that "should" only need to be set once at boot, so that
* there is non-reboot way to recover if something goes wrong.
*/
static int
tc35815_open(struct net_device *dev)
{
struct tc35815_local *lp = netdev_priv(dev);
/*
* This is used if the interrupt line can turned off (shared).
* See 3c503.c for an example of selecting the IRQ at config-time.
*/
if (request_irq(dev->irq, tc35815_interrupt, IRQF_SHARED,
dev->name, dev))
return -EAGAIN;
tc35815_chip_reset(dev);
if (tc35815_init_queues(dev) != 0) {
free_irq(dev->irq, dev);
return -EAGAIN;
}
napi_enable(&lp->napi);
/* Reset the hardware here. Don't forget to set the station address. */
spin_lock_irq(&lp->lock);
tc35815_chip_init(dev);
spin_unlock_irq(&lp->lock);
netif_carrier_off(dev);
/* schedule a link state check */
phy_start(lp->phy_dev);
/* We are now ready to accept transmit requeusts from
* the queueing layer of the networking.
*/
netif_start_queue(dev);
return 0;
}
/* This will only be invoked if your driver is _not_ in XOFF state.
* What this means is that you need not check it, and that this
* invariant will hold if you make sure that the netif_*_queue()
* calls are done at the proper times.
*/
static int tc35815_send_packet(struct sk_buff *skb, struct net_device *dev)
{
struct tc35815_local *lp = netdev_priv(dev);
struct TxFD *txfd;
unsigned long flags;
/* If some error occurs while trying to transmit this
* packet, you should return '1' from this function.
* In such a case you _may not_ do anything to the
* SKB, it is still owned by the network queueing
* layer when an error is returned. This means you
* may not modify any SKB fields, you may not free
* the SKB, etc.
*/
/* This is the most common case for modern hardware.
* The spinlock protects this code from the TX complete
* hardware interrupt handler. Queue flow control is
* thus managed under this lock as well.
*/
spin_lock_irqsave(&lp->lock, flags);
/* failsafe... (handle txdone now if half of FDs are used) */
if ((lp->tfd_start + TX_FD_NUM - lp->tfd_end) % TX_FD_NUM >
TX_FD_NUM / 2)
tc35815_txdone(dev);
if (netif_msg_pktdata(lp))
print_eth(skb->data);
#ifdef DEBUG
if (lp->tx_skbs[lp->tfd_start].skb) {
printk("%s: tx_skbs conflict.\n", dev->name);
panic_queues(dev);
}
#else
BUG_ON(lp->tx_skbs[lp->tfd_start].skb);
#endif
lp->tx_skbs[lp->tfd_start].skb = skb;
lp->tx_skbs[lp->tfd_start].skb_dma = pci_map_single(lp->pci_dev, skb->data, skb->len, PCI_DMA_TODEVICE);
/*add to ring */
txfd = &lp->tfd_base[lp->tfd_start];
txfd->bd.BuffData = cpu_to_le32(lp->tx_skbs[lp->tfd_start].skb_dma);
txfd->bd.BDCtl = cpu_to_le32(skb->len);
txfd->fd.FDSystem = cpu_to_le32(lp->tfd_start);
txfd->fd.FDCtl = cpu_to_le32(FD_CownsFD | (1 << FD_BDCnt_SHIFT));
if (lp->tfd_start == lp->tfd_end) {
struct tc35815_regs __iomem *tr =
(struct tc35815_regs __iomem *)dev->base_addr;
/* Start DMA Transmitter. */
txfd->fd.FDNext |= cpu_to_le32(FD_Next_EOL);
txfd->fd.FDCtl |= cpu_to_le32(FD_FrmOpt_IntTx);
if (netif_msg_tx_queued(lp)) {
printk("%s: starting TxFD.\n", dev->name);
dump_txfd(txfd);
}
tc_writel(fd_virt_to_bus(lp, txfd), &tr->TxFrmPtr);
} else {
txfd->fd.FDNext &= cpu_to_le32(~FD_Next_EOL);
if (netif_msg_tx_queued(lp)) {
printk("%s: queueing TxFD.\n", dev->name);
dump_txfd(txfd);
}
}
lp->tfd_start = (lp->tfd_start + 1) % TX_FD_NUM;
/* If we just used up the very last entry in the
* TX ring on this device, tell the queueing
* layer to send no more.
*/
if (tc35815_tx_full(dev)) {
if (netif_msg_tx_queued(lp))
printk(KERN_WARNING "%s: TxFD Exhausted.\n", dev->name);
netif_stop_queue(dev);
}
/* When the TX completion hw interrupt arrives, this
* is when the transmit statistics are updated.
*/
spin_unlock_irqrestore(&lp->lock, flags);
return NETDEV_TX_OK;
}
#define FATAL_ERROR_INT \
(Int_IntPCI | Int_DmParErr | Int_IntNRAbt)
static void tc35815_fatal_error_interrupt(struct net_device *dev, u32 status)
{
static int count;
printk(KERN_WARNING "%s: Fatal Error Intterrupt (%#x):",
dev->name, status);
if (status & Int_IntPCI)
printk(" IntPCI");
if (status & Int_DmParErr)
printk(" DmParErr");
if (status & Int_IntNRAbt)
printk(" IntNRAbt");
printk("\n");
if (count++ > 100)
panic("%s: Too many fatal errors.", dev->name);
printk(KERN_WARNING "%s: Resetting ...\n", dev->name);
/* Try to restart the adaptor. */
tc35815_schedule_restart(dev);
}
static int tc35815_do_interrupt(struct net_device *dev, u32 status, int limit)
{
struct tc35815_local *lp = netdev_priv(dev);
int ret = -1;
/* Fatal errors... */
if (status & FATAL_ERROR_INT) {
tc35815_fatal_error_interrupt(dev, status);
return 0;
}
/* recoverable errors */
if (status & Int_IntFDAEx) {
if (netif_msg_rx_err(lp))
dev_warn(&dev->dev,
"Free Descriptor Area Exhausted (%#x).\n",
status);
dev->stats.rx_dropped++;
ret = 0;
}
if (status & Int_IntBLEx) {
if (netif_msg_rx_err(lp))
dev_warn(&dev->dev,
"Buffer List Exhausted (%#x).\n",
status);
dev->stats.rx_dropped++;
ret = 0;
}
if (status & Int_IntExBD) {
if (netif_msg_rx_err(lp))
dev_warn(&dev->dev,
"Excessive Buffer Descriptiors (%#x).\n",
status);
dev->stats.rx_length_errors++;
ret = 0;
}
/* normal notification */
if (status & Int_IntMacRx) {
/* Got a packet(s). */
ret = tc35815_rx(dev, limit);
lp->lstats.rx_ints++;
}
if (status & Int_IntMacTx) {
/* Transmit complete. */
lp->lstats.tx_ints++;
spin_lock_irq(&lp->lock);
tc35815_txdone(dev);
spin_unlock_irq(&lp->lock);
if (ret < 0)
ret = 0;
}
return ret;
}
/*
* The typical workload of the driver:
* Handle the network interface interrupts.
*/
static irqreturn_t tc35815_interrupt(int irq, void *dev_id)
{
struct net_device *dev = dev_id;
struct tc35815_local *lp = netdev_priv(dev);
struct tc35815_regs __iomem *tr =
(struct tc35815_regs __iomem *)dev->base_addr;
u32 dmactl = tc_readl(&tr->DMA_Ctl);
if (!(dmactl & DMA_IntMask)) {
/* disable interrupts */
tc_writel(dmactl | DMA_IntMask, &tr->DMA_Ctl);
if (napi_schedule_prep(&lp->napi))
__napi_schedule(&lp->napi);
else {
printk(KERN_ERR "%s: interrupt taken in poll\n",
dev->name);
BUG();
}
(void)tc_readl(&tr->Int_Src); /* flush */
return IRQ_HANDLED;
}
return IRQ_NONE;
}
#ifdef CONFIG_NET_POLL_CONTROLLER
static void tc35815_poll_controller(struct net_device *dev)
{
disable_irq(dev->irq);
tc35815_interrupt(dev->irq, dev);
enable_irq(dev->irq);
}
#endif
/* We have a good packet(s), get it/them out of the buffers. */
static int
tc35815_rx(struct net_device *dev, int limit)
{
struct tc35815_local *lp = netdev_priv(dev);
unsigned int fdctl;
int i;
int received = 0;
while (!((fdctl = le32_to_cpu(lp->rfd_cur->fd.FDCtl)) & FD_CownsFD)) {
int status = le32_to_cpu(lp->rfd_cur->fd.FDStat);
int pkt_len = fdctl & FD_FDLength_MASK;
int bd_count = (fdctl & FD_BDCnt_MASK) >> FD_BDCnt_SHIFT;
#ifdef DEBUG
struct RxFD *next_rfd;
#endif
#if (RX_CTL_CMD & Rx_StripCRC) == 0
pkt_len -= ETH_FCS_LEN;
#endif
if (netif_msg_rx_status(lp))
dump_rxfd(lp->rfd_cur);
if (status & Rx_Good) {
struct sk_buff *skb;
unsigned char *data;
int cur_bd;
if (--limit < 0)
break;
BUG_ON(bd_count > 1);
cur_bd = (le32_to_cpu(lp->rfd_cur->bd[0].BDCtl)
& BD_RxBDID_MASK) >> BD_RxBDID_SHIFT;
#ifdef DEBUG
if (cur_bd >= RX_BUF_NUM) {
printk("%s: invalid BDID.\n", dev->name);
panic_queues(dev);
}
BUG_ON(lp->rx_skbs[cur_bd].skb_dma !=
(le32_to_cpu(lp->rfd_cur->bd[0].BuffData) & ~3));
if (!lp->rx_skbs[cur_bd].skb) {
printk("%s: NULL skb.\n", dev->name);
panic_queues(dev);
}
#else
BUG_ON(cur_bd >= RX_BUF_NUM);
#endif
skb = lp->rx_skbs[cur_bd].skb;
prefetch(skb->data);
lp->rx_skbs[cur_bd].skb = NULL;
pci_unmap_single(lp->pci_dev,
lp->rx_skbs[cur_bd].skb_dma,
RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
if (!HAVE_DMA_RXALIGN(lp) && NET_IP_ALIGN)
memmove(skb->data, skb->data - NET_IP_ALIGN,
pkt_len);
data = skb_put(skb, pkt_len);
if (netif_msg_pktdata(lp))
print_eth(data);
skb->protocol = eth_type_trans(skb, dev);
netif_receive_skb(skb);
received++;
dev->stats.rx_packets++;
dev->stats.rx_bytes += pkt_len;
} else {
dev->stats.rx_errors++;
if (netif_msg_rx_err(lp))
dev_info(&dev->dev, "Rx error (status %x)\n",
status & Rx_Stat_Mask);
/* WORKAROUND: LongErr and CRCErr means Overflow. */
if ((status & Rx_LongErr) && (status & Rx_CRCErr)) {
status &= ~(Rx_LongErr|Rx_CRCErr);
status |= Rx_Over;
}
if (status & Rx_LongErr)
dev->stats.rx_length_errors++;
if (status & Rx_Over)
dev->stats.rx_fifo_errors++;
if (status & Rx_CRCErr)
dev->stats.rx_crc_errors++;
if (status & Rx_Align)
dev->stats.rx_frame_errors++;
}
if (bd_count > 0) {
/* put Free Buffer back to controller */
int bdctl = le32_to_cpu(lp->rfd_cur->bd[bd_count - 1].BDCtl);
unsigned char id =
(bdctl & BD_RxBDID_MASK) >> BD_RxBDID_SHIFT;
#ifdef DEBUG
if (id >= RX_BUF_NUM) {
printk("%s: invalid BDID.\n", dev->name);
panic_queues(dev);
}
#else
BUG_ON(id >= RX_BUF_NUM);
#endif
/* free old buffers */
lp->fbl_count--;
while (lp->fbl_count < RX_BUF_NUM)
{
unsigned char curid =
(id + 1 + lp->fbl_count) % RX_BUF_NUM;
struct BDesc *bd = &lp->fbl_ptr->bd[curid];
#ifdef DEBUG
bdctl = le32_to_cpu(bd->BDCtl);
if (bdctl & BD_CownsBD) {
printk("%s: Freeing invalid BD.\n",
dev->name);
panic_queues(dev);
}
#endif
/* pass BD to controller */
if (!lp->rx_skbs[curid].skb) {
lp->rx_skbs[curid].skb =
alloc_rxbuf_skb(dev,
lp->pci_dev,
&lp->rx_skbs[curid].skb_dma);
if (!lp->rx_skbs[curid].skb)
break; /* try on next reception */
bd->BuffData = cpu_to_le32(lp->rx_skbs[curid].skb_dma);
}
/* Note: BDLength was modified by chip. */
bd->BDCtl = cpu_to_le32(BD_CownsBD |
(curid << BD_RxBDID_SHIFT) |
RX_BUF_SIZE);
lp->fbl_count++;
}
}
/* put RxFD back to controller */
#ifdef DEBUG
next_rfd = fd_bus_to_virt(lp,
le32_to_cpu(lp->rfd_cur->fd.FDNext));
if (next_rfd < lp->rfd_base || next_rfd > lp->rfd_limit) {
printk("%s: RxFD FDNext invalid.\n", dev->name);
panic_queues(dev);
}
#endif
for (i = 0; i < (bd_count + 1) / 2 + 1; i++) {
/* pass FD to controller */
#ifdef DEBUG
lp->rfd_cur->fd.FDNext = cpu_to_le32(0xdeaddead);
#else
lp->rfd_cur->fd.FDNext = cpu_to_le32(FD_Next_EOL);
#endif
lp->rfd_cur->fd.FDCtl = cpu_to_le32(FD_CownsFD);
lp->rfd_cur++;
}
if (lp->rfd_cur > lp->rfd_limit)
lp->rfd_cur = lp->rfd_base;
#ifdef DEBUG
if (lp->rfd_cur != next_rfd)
printk("rfd_cur = %p, next_rfd %p\n",
lp->rfd_cur, next_rfd);
#endif
}
return received;
}
static int tc35815_poll(struct napi_struct *napi, int budget)
{
struct tc35815_local *lp = container_of(napi, struct tc35815_local, napi);
struct net_device *dev = lp->dev;
struct tc35815_regs __iomem *tr =
(struct tc35815_regs __iomem *)dev->base_addr;
int received = 0, handled;
u32 status;
spin_lock(&lp->rx_lock);
status = tc_readl(&tr->Int_Src);
do {
/* BLEx, FDAEx will be cleared later */
tc_writel(status & ~(Int_BLEx | Int_FDAEx),
&tr->Int_Src); /* write to clear */
handled = tc35815_do_interrupt(dev, status, budget - received);
if (status & (Int_BLEx | Int_FDAEx))
tc_writel(status & (Int_BLEx | Int_FDAEx),
&tr->Int_Src);
if (handled >= 0) {
received += handled;
if (received >= budget)
break;
}
status = tc_readl(&tr->Int_Src);
} while (status);
spin_unlock(&lp->rx_lock);
if (received < budget) {
napi_complete(napi);
/* enable interrupts */
tc_writel(tc_readl(&tr->DMA_Ctl) & ~DMA_IntMask, &tr->DMA_Ctl);
}
return received;
}
#define TX_STA_ERR (Tx_ExColl|Tx_Under|Tx_Defer|Tx_NCarr|Tx_LateColl|Tx_TxPar|Tx_SQErr)
static void
tc35815_check_tx_stat(struct net_device *dev, int status)
{
struct tc35815_local *lp = netdev_priv(dev);
const char *msg = NULL;
/* count collisions */
if (status & Tx_ExColl)
dev->stats.collisions += 16;
if (status & Tx_TxColl_MASK)
dev->stats.collisions += status & Tx_TxColl_MASK;
/* TX4939 does not have NCarr */
if (lp->chiptype == TC35815_TX4939)
status &= ~Tx_NCarr;
/* WORKAROUND: ignore LostCrS in full duplex operation */
if (!lp->link || lp->duplex == DUPLEX_FULL)
status &= ~Tx_NCarr;
if (!(status & TX_STA_ERR)) {
/* no error. */
dev->stats.tx_packets++;
return;
}
dev->stats.tx_errors++;
if (status & Tx_ExColl) {
dev->stats.tx_aborted_errors++;
msg = "Excessive Collision.";
}
if (status & Tx_Under) {
dev->stats.tx_fifo_errors++;
msg = "Tx FIFO Underrun.";
if (lp->lstats.tx_underrun < TX_THRESHOLD_KEEP_LIMIT) {
lp->lstats.tx_underrun++;
if (lp->lstats.tx_underrun >= TX_THRESHOLD_KEEP_LIMIT) {
struct tc35815_regs __iomem *tr =
(struct tc35815_regs __iomem *)dev->base_addr;
tc_writel(TX_THRESHOLD_MAX, &tr->TxThrsh);
msg = "Tx FIFO Underrun.Change Tx threshold to max.";
}
}
}
if (status & Tx_Defer) {
dev->stats.tx_fifo_errors++;
msg = "Excessive Deferral.";
}
if (status & Tx_NCarr) {
dev->stats.tx_carrier_errors++;
msg = "Lost Carrier Sense.";
}
if (status & Tx_LateColl) {
dev->stats.tx_aborted_errors++;
msg = "Late Collision.";
}
if (status & Tx_TxPar) {
dev->stats.tx_fifo_errors++;
msg = "Transmit Parity Error.";
}
if (status & Tx_SQErr) {
dev->stats.tx_heartbeat_errors++;
msg = "Signal Quality Error.";
}
if (msg && netif_msg_tx_err(lp))
printk(KERN_WARNING "%s: %s (%#x)\n", dev->name, msg, status);
}
/* This handles TX complete events posted by the device
* via interrupts.
*/
static void
tc35815_txdone(struct net_device *dev)
{
struct tc35815_local *lp = netdev_priv(dev);
struct TxFD *txfd;
unsigned int fdctl;
txfd = &lp->tfd_base[lp->tfd_end];
while (lp->tfd_start != lp->tfd_end &&
!((fdctl = le32_to_cpu(txfd->fd.FDCtl)) & FD_CownsFD)) {
int status = le32_to_cpu(txfd->fd.FDStat);
struct sk_buff *skb;
unsigned long fdnext = le32_to_cpu(txfd->fd.FDNext);
u32 fdsystem = le32_to_cpu(txfd->fd.FDSystem);
if (netif_msg_tx_done(lp)) {
printk("%s: complete TxFD.\n", dev->name);
dump_txfd(txfd);
}
tc35815_check_tx_stat(dev, status);
skb = fdsystem != 0xffffffff ?
lp->tx_skbs[fdsystem].skb : NULL;
#ifdef DEBUG
if (lp->tx_skbs[lp->tfd_end].skb != skb) {
printk("%s: tx_skbs mismatch.\n", dev->name);
panic_queues(dev);
}
#else
BUG_ON(lp->tx_skbs[lp->tfd_end].skb != skb);
#endif
if (skb) {
dev->stats.tx_bytes += skb->len;
pci_unmap_single(lp->pci_dev, lp->tx_skbs[lp->tfd_end].skb_dma, skb->len, PCI_DMA_TODEVICE);
lp->tx_skbs[lp->tfd_end].skb = NULL;
lp->tx_skbs[lp->tfd_end].skb_dma = 0;
dev_kfree_skb_any(skb);
}
txfd->fd.FDSystem = cpu_to_le32(0xffffffff);
lp->tfd_end = (lp->tfd_end + 1) % TX_FD_NUM;
txfd = &lp->tfd_base[lp->tfd_end];
#ifdef DEBUG
if ((fdnext & ~FD_Next_EOL) != fd_virt_to_bus(lp, txfd)) {
printk("%s: TxFD FDNext invalid.\n", dev->name);
panic_queues(dev);
}
#endif
if (fdnext & FD_Next_EOL) {
/* DMA Transmitter has been stopping... */
if (lp->tfd_end != lp->tfd_start) {
struct tc35815_regs __iomem *tr =
(struct tc35815_regs __iomem *)dev->base_addr;
int head = (lp->tfd_start + TX_FD_NUM - 1) % TX_FD_NUM;
struct TxFD *txhead = &lp->tfd_base[head];
int qlen = (lp->tfd_start + TX_FD_NUM
- lp->tfd_end) % TX_FD_NUM;
#ifdef DEBUG
if (!(le32_to_cpu(txfd->fd.FDCtl) & FD_CownsFD)) {
printk("%s: TxFD FDCtl invalid.\n", dev->name);
panic_queues(dev);
}
#endif
/* log max queue length */
if (lp->lstats.max_tx_qlen < qlen)
lp->lstats.max_tx_qlen = qlen;
/* start DMA Transmitter again */
txhead->fd.FDNext |= cpu_to_le32(FD_Next_EOL);
txhead->fd.FDCtl |= cpu_to_le32(FD_FrmOpt_IntTx);
if (netif_msg_tx_queued(lp)) {
printk("%s: start TxFD on queue.\n",
dev->name);
dump_txfd(txfd);
}
tc_writel(fd_virt_to_bus(lp, txfd), &tr->TxFrmPtr);
}
break;
}
}
/* If we had stopped the queue due to a "tx full"
* condition, and space has now been made available,
* wake up the queue.
*/
if (netif_queue_stopped(dev) && !tc35815_tx_full(dev))
netif_wake_queue(dev);
}
/* The inverse routine to tc35815_open(). */
static int
tc35815_close(struct net_device *dev)
{
struct tc35815_local *lp = netdev_priv(dev);
netif_stop_queue(dev);
napi_disable(&lp->napi);
if (lp->phy_dev)
phy_stop(lp->phy_dev);
cancel_work_sync(&lp->restart_work);
/* Flush the Tx and disable Rx here. */
tc35815_chip_reset(dev);
free_irq(dev->irq, dev);
tc35815_free_queues(dev);
return 0;
}
/*
* Get the current statistics.
* This may be called with the card open or closed.
*/
static struct net_device_stats *tc35815_get_stats(struct net_device *dev)
{
struct tc35815_regs __iomem *tr =
(struct tc35815_regs __iomem *)dev->base_addr;
if (netif_running(dev))
/* Update the statistics from the device registers. */
dev->stats.rx_missed_errors += tc_readl(&tr->Miss_Cnt);
return &dev->stats;
}
static void tc35815_set_cam_entry(struct net_device *dev, int index, unsigned char *addr)
{
struct tc35815_local *lp = netdev_priv(dev);
struct tc35815_regs __iomem *tr =
(struct tc35815_regs __iomem *)dev->base_addr;
int cam_index = index * 6;
u32 cam_data;
u32 saved_addr;
saved_addr = tc_readl(&tr->CAM_Adr);
if (netif_msg_hw(lp))
printk(KERN_DEBUG "%s: CAM %d: %pM\n",
dev->name, index, addr);
if (index & 1) {
/* read modify write */
tc_writel(cam_index - 2, &tr->CAM_Adr);
cam_data = tc_readl(&tr->CAM_Data) & 0xffff0000;
cam_data |= addr[0] << 8 | addr[1];
tc_writel(cam_data, &tr->CAM_Data);
/* write whole word */
tc_writel(cam_index + 2, &tr->CAM_Adr);
cam_data = (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) | addr[5];
tc_writel(cam_data, &tr->CAM_Data);
} else {
/* write whole word */
tc_writel(cam_index, &tr->CAM_Adr);
cam_data = (addr[0] << 24) | (addr[1] << 16) | (addr[2] << 8) | addr[3];
tc_writel(cam_data, &tr->CAM_Data);
/* read modify write */
tc_writel(cam_index + 4, &tr->CAM_Adr);
cam_data = tc_readl(&tr->CAM_Data) & 0x0000ffff;
cam_data |= addr[4] << 24 | (addr[5] << 16);
tc_writel(cam_data, &tr->CAM_Data);
}
tc_writel(saved_addr, &tr->CAM_Adr);
}
/*
* Set or clear the multicast filter for this adaptor.
* num_addrs == -1 Promiscuous mode, receive all packets
* num_addrs == 0 Normal mode, clear multicast list
* num_addrs > 0 Multicast mode, receive normal and MC packets,
* and do best-effort filtering.
*/
static void
tc35815_set_multicast_list(struct net_device *dev)
{
struct tc35815_regs __iomem *tr =
(struct tc35815_regs __iomem *)dev->base_addr;
if (dev->flags & IFF_PROMISC) {
/* With some (all?) 100MHalf HUB, controller will hang
* if we enabled promiscuous mode before linkup... */
struct tc35815_local *lp = netdev_priv(dev);
if (!lp->link)
return;
/* Enable promiscuous mode */
tc_writel(CAM_CompEn | CAM_BroadAcc | CAM_GroupAcc | CAM_StationAcc, &tr->CAM_Ctl);
} else if ((dev->flags & IFF_ALLMULTI) ||
netdev_mc_count(dev) > CAM_ENTRY_MAX - 3) {
/* CAM 0, 1, 20 are reserved. */
/* Disable promiscuous mode, use normal mode. */
tc_writel(CAM_CompEn | CAM_BroadAcc | CAM_GroupAcc, &tr->CAM_Ctl);
} else if (!netdev_mc_empty(dev)) {
struct netdev_hw_addr *ha;
int i;
int ena_bits = CAM_Ena_Bit(CAM_ENTRY_SOURCE);
tc_writel(0, &tr->CAM_Ctl);
/* Walk the address list, and load the filter */
i = 0;
netdev_for_each_mc_addr(ha, dev) {
/* entry 0,1 is reserved. */
tc35815_set_cam_entry(dev, i + 2, ha->addr);
ena_bits |= CAM_Ena_Bit(i + 2);
i++;
}
tc_writel(ena_bits, &tr->CAM_Ena);
tc_writel(CAM_CompEn | CAM_BroadAcc, &tr->CAM_Ctl);
} else {
tc_writel(CAM_Ena_Bit(CAM_ENTRY_SOURCE), &tr->CAM_Ena);
tc_writel(CAM_CompEn | CAM_BroadAcc, &tr->CAM_Ctl);
}
}
static void tc35815_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
struct tc35815_local *lp = netdev_priv(dev);
strlcpy(info->driver, MODNAME, sizeof(info->driver));
strlcpy(info->version, DRV_VERSION, sizeof(info->version));
strlcpy(info->bus_info, pci_name(lp->pci_dev), sizeof(info->bus_info));
}
static int tc35815_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
struct tc35815_local *lp = netdev_priv(dev);
if (!lp->phy_dev)
return -ENODEV;
return phy_ethtool_gset(lp->phy_dev, cmd);
}
static int tc35815_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
struct tc35815_local *lp = netdev_priv(dev);
if (!lp->phy_dev)
return -ENODEV;
return phy_ethtool_sset(lp->phy_dev, cmd);
}
static u32 tc35815_get_msglevel(struct net_device *dev)
{
struct tc35815_local *lp = netdev_priv(dev);
return lp->msg_enable;
}
static void tc35815_set_msglevel(struct net_device *dev, u32 datum)
{
struct tc35815_local *lp = netdev_priv(dev);
lp->msg_enable = datum;
}
static int tc35815_get_sset_count(struct net_device *dev, int sset)
{
struct tc35815_local *lp = netdev_priv(dev);
switch (sset) {
case ETH_SS_STATS:
return sizeof(lp->lstats) / sizeof(int);
default:
return -EOPNOTSUPP;
}
}
static void tc35815_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *stats, u64 *data)
{
struct tc35815_local *lp = netdev_priv(dev);
data[0] = lp->lstats.max_tx_qlen;
data[1] = lp->lstats.tx_ints;
data[2] = lp->lstats.rx_ints;
data[3] = lp->lstats.tx_underrun;
}
static struct {
const char str[ETH_GSTRING_LEN];
} ethtool_stats_keys[] = {
{ "max_tx_qlen" },
{ "tx_ints" },
{ "rx_ints" },
{ "tx_underrun" },
};
static void tc35815_get_strings(struct net_device *dev, u32 stringset, u8 *data)
{
memcpy(data, ethtool_stats_keys, sizeof(ethtool_stats_keys));
}
static const struct ethtool_ops tc35815_ethtool_ops = {
.get_drvinfo = tc35815_get_drvinfo,
.get_settings = tc35815_get_settings,
.set_settings = tc35815_set_settings,
.get_link = ethtool_op_get_link,
.get_msglevel = tc35815_get_msglevel,
.set_msglevel = tc35815_set_msglevel,
.get_strings = tc35815_get_strings,
.get_sset_count = tc35815_get_sset_count,
.get_ethtool_stats = tc35815_get_ethtool_stats,
};
static int tc35815_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
struct tc35815_local *lp = netdev_priv(dev);
if (!netif_running(dev))
return -EINVAL;
if (!lp->phy_dev)
return -ENODEV;
return phy_mii_ioctl(lp->phy_dev, rq, cmd);
}
static void tc35815_chip_reset(struct net_device *dev)
{
struct tc35815_regs __iomem *tr =
(struct tc35815_regs __iomem *)dev->base_addr;
int i;
/* reset the controller */
tc_writel(MAC_Reset, &tr->MAC_Ctl);
udelay(4); /* 3200ns */
i = 0;
while (tc_readl(&tr->MAC_Ctl) & MAC_Reset) {
if (i++ > 100) {
printk(KERN_ERR "%s: MAC reset failed.\n", dev->name);
break;
}
mdelay(1);
}
tc_writel(0, &tr->MAC_Ctl);
/* initialize registers to default value */
tc_writel(0, &tr->DMA_Ctl);
tc_writel(0, &tr->TxThrsh);
tc_writel(0, &tr->TxPollCtr);
tc_writel(0, &tr->RxFragSize);
tc_writel(0, &tr->Int_En);
tc_writel(0, &tr->FDA_Bas);
tc_writel(0, &tr->FDA_Lim);
tc_writel(0xffffffff, &tr->Int_Src); /* Write 1 to clear */
tc_writel(0, &tr->CAM_Ctl);
tc_writel(0, &tr->Tx_Ctl);
tc_writel(0, &tr->Rx_Ctl);
tc_writel(0, &tr->CAM_Ena);
(void)tc_readl(&tr->Miss_Cnt); /* Read to clear */
/* initialize internal SRAM */
tc_writel(DMA_TestMode, &tr->DMA_Ctl);
for (i = 0; i < 0x1000; i += 4) {
tc_writel(i, &tr->CAM_Adr);
tc_writel(0, &tr->CAM_Data);
}
tc_writel(0, &tr->DMA_Ctl);
}
static void tc35815_chip_init(struct net_device *dev)
{
struct tc35815_local *lp = netdev_priv(dev);
struct tc35815_regs __iomem *tr =
(struct tc35815_regs __iomem *)dev->base_addr;
unsigned long txctl = TX_CTL_CMD;
/* load station address to CAM */
tc35815_set_cam_entry(dev, CAM_ENTRY_SOURCE, dev->dev_addr);
/* Enable CAM (broadcast and unicast) */
tc_writel(CAM_Ena_Bit(CAM_ENTRY_SOURCE), &tr->CAM_Ena);
tc_writel(CAM_CompEn | CAM_BroadAcc, &tr->CAM_Ctl);
/* Use DMA_RxAlign_2 to make IP header 4-byte aligned. */
if (HAVE_DMA_RXALIGN(lp))
tc_writel(DMA_BURST_SIZE | DMA_RxAlign_2, &tr->DMA_Ctl);
else
tc_writel(DMA_BURST_SIZE, &tr->DMA_Ctl);
tc_writel(0, &tr->TxPollCtr); /* Batch mode */
tc_writel(TX_THRESHOLD, &tr->TxThrsh);
tc_writel(INT_EN_CMD, &tr->Int_En);
/* set queues */
tc_writel(fd_virt_to_bus(lp, lp->rfd_base), &tr->FDA_Bas);
tc_writel((unsigned long)lp->rfd_limit - (unsigned long)lp->rfd_base,
&tr->FDA_Lim);
/*
* Activation method:
* First, enable the MAC Transmitter and the DMA Receive circuits.
* Then enable the DMA Transmitter and the MAC Receive circuits.
*/
tc_writel(fd_virt_to_bus(lp, lp->fbl_ptr), &tr->BLFrmPtr); /* start DMA receiver */
tc_writel(RX_CTL_CMD, &tr->Rx_Ctl); /* start MAC receiver */
/* start MAC transmitter */
/* TX4939 does not have EnLCarr */
if (lp->chiptype == TC35815_TX4939)
txctl &= ~Tx_EnLCarr;
/* WORKAROUND: ignore LostCrS in full duplex operation */
if (!lp->phy_dev || !lp->link || lp->duplex == DUPLEX_FULL)
txctl &= ~Tx_EnLCarr;
tc_writel(txctl, &tr->Tx_Ctl);
}
#ifdef CONFIG_PM
static int tc35815_suspend(struct pci_dev *pdev, pm_message_t state)
{
struct net_device *dev = pci_get_drvdata(pdev);
struct tc35815_local *lp = netdev_priv(dev);
unsigned long flags;
pci_save_state(pdev);
if (!netif_running(dev))
return 0;
netif_device_detach(dev);
if (lp->phy_dev)
phy_stop(lp->phy_dev);
spin_lock_irqsave(&lp->lock, flags);
tc35815_chip_reset(dev);
spin_unlock_irqrestore(&lp->lock, flags);
pci_set_power_state(pdev, PCI_D3hot);
return 0;
}
static int tc35815_resume(struct pci_dev *pdev)
{
struct net_device *dev = pci_get_drvdata(pdev);
struct tc35815_local *lp = netdev_priv(dev);
pci_restore_state(pdev);
if (!netif_running(dev))
return 0;
pci_set_power_state(pdev, PCI_D0);
tc35815_restart(dev);
netif_carrier_off(dev);
if (lp->phy_dev)
phy_start(lp->phy_dev);
netif_device_attach(dev);
return 0;
}
#endif /* CONFIG_PM */
static struct pci_driver tc35815_pci_driver = {
.name = MODNAME,
.id_table = tc35815_pci_tbl,
.probe = tc35815_init_one,
.remove = tc35815_remove_one,
#ifdef CONFIG_PM
.suspend = tc35815_suspend,
.resume = tc35815_resume,
#endif
};
module_param_named(speed, options.speed, int, 0);
MODULE_PARM_DESC(speed, "0:auto, 10:10Mbps, 100:100Mbps");
module_param_named(duplex, options.duplex, int, 0);
MODULE_PARM_DESC(duplex, "0:auto, 1:half, 2:full");
static int __init tc35815_init_module(void)
{
return pci_register_driver(&tc35815_pci_driver);
}
static void __exit tc35815_cleanup_module(void)
{
pci_unregister_driver(&tc35815_pci_driver);
}
module_init(tc35815_init_module);
module_exit(tc35815_cleanup_module);
MODULE_DESCRIPTION("TOSHIBA TC35815 PCI 10M/100M Ethernet driver");
MODULE_LICENSE("GPL");
| gpl-2.0 |
proximo256/kernel_samsung_exynos7420 | drivers/acpi/acpica/dsmthdat.c | 2138 | 21512 | /*******************************************************************************
*
* Module Name: dsmthdat - control method arguments and local variables
*
******************************************************************************/
/*
* Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions, and the following disclaimer,
* without modification.
* 2. Redistributions in binary form must reproduce at minimum a disclaimer
* substantially similar to the "NO WARRANTY" disclaimer below
* ("Disclaimer") and any redistribution must be conditioned upon
* including a substantially similar Disclaimer requirement for further
* binary redistribution.
* 3. Neither the names of the above-listed copyright holders nor the names
* of any contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* Alternatively, this software may be distributed under the terms of the
* GNU General Public License ("GPL") version 2 as published by the Free
* Software Foundation.
*
* NO WARRANTY
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGES.
*/
#include <acpi/acpi.h>
#include "accommon.h"
#include "acdispat.h"
#include "acnamesp.h"
#include "acinterp.h"
#define _COMPONENT ACPI_DISPATCHER
ACPI_MODULE_NAME("dsmthdat")
/* Local prototypes */
static void
acpi_ds_method_data_delete_value(u8 type,
u32 index, struct acpi_walk_state *walk_state);
static acpi_status
acpi_ds_method_data_set_value(u8 type,
u32 index,
union acpi_operand_object *object,
struct acpi_walk_state *walk_state);
#ifdef ACPI_OBSOLETE_FUNCTIONS
acpi_object_type
acpi_ds_method_data_get_type(u16 opcode,
u32 index, struct acpi_walk_state *walk_state);
#endif
/*******************************************************************************
*
* FUNCTION: acpi_ds_method_data_init
*
* PARAMETERS: walk_state - Current walk state object
*
* RETURN: Status
*
* DESCRIPTION: Initialize the data structures that hold the method's arguments
* and locals. The data struct is an array of namespace nodes for
* each - this allows ref_of and de_ref_of to work properly for these
* special data types.
*
* NOTES: walk_state fields are initialized to zero by the
* ACPI_ALLOCATE_ZEROED().
*
* A pseudo-Namespace Node is assigned to each argument and local
* so that ref_of() can return a pointer to the Node.
*
******************************************************************************/
void acpi_ds_method_data_init(struct acpi_walk_state *walk_state)
{
u32 i;
ACPI_FUNCTION_TRACE(ds_method_data_init);
/* Init the method arguments */
for (i = 0; i < ACPI_METHOD_NUM_ARGS; i++) {
ACPI_MOVE_32_TO_32(&walk_state->arguments[i].name,
NAMEOF_ARG_NTE);
walk_state->arguments[i].name.integer |= (i << 24);
walk_state->arguments[i].descriptor_type = ACPI_DESC_TYPE_NAMED;
walk_state->arguments[i].type = ACPI_TYPE_ANY;
walk_state->arguments[i].flags = ANOBJ_METHOD_ARG;
}
/* Init the method locals */
for (i = 0; i < ACPI_METHOD_NUM_LOCALS; i++) {
ACPI_MOVE_32_TO_32(&walk_state->local_variables[i].name,
NAMEOF_LOCAL_NTE);
walk_state->local_variables[i].name.integer |= (i << 24);
walk_state->local_variables[i].descriptor_type =
ACPI_DESC_TYPE_NAMED;
walk_state->local_variables[i].type = ACPI_TYPE_ANY;
walk_state->local_variables[i].flags = ANOBJ_METHOD_LOCAL;
}
return_VOID;
}
/*******************************************************************************
*
* FUNCTION: acpi_ds_method_data_delete_all
*
* PARAMETERS: walk_state - Current walk state object
*
* RETURN: None
*
* DESCRIPTION: Delete method locals and arguments. Arguments are only
* deleted if this method was called from another method.
*
******************************************************************************/
void acpi_ds_method_data_delete_all(struct acpi_walk_state *walk_state)
{
u32 index;
ACPI_FUNCTION_TRACE(ds_method_data_delete_all);
/* Detach the locals */
for (index = 0; index < ACPI_METHOD_NUM_LOCALS; index++) {
if (walk_state->local_variables[index].object) {
ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Deleting Local%u=%p\n",
index,
walk_state->local_variables[index].
object));
/* Detach object (if present) and remove a reference */
acpi_ns_detach_object(&walk_state->
local_variables[index]);
}
}
/* Detach the arguments */
for (index = 0; index < ACPI_METHOD_NUM_ARGS; index++) {
if (walk_state->arguments[index].object) {
ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Deleting Arg%u=%p\n",
index,
walk_state->arguments[index].object));
/* Detach object (if present) and remove a reference */
acpi_ns_detach_object(&walk_state->arguments[index]);
}
}
return_VOID;
}
/*******************************************************************************
*
* FUNCTION: acpi_ds_method_data_init_args
*
* PARAMETERS: *params - Pointer to a parameter list for the method
* max_param_count - The arg count for this method
* walk_state - Current walk state object
*
* RETURN: Status
*
* DESCRIPTION: Initialize arguments for a method. The parameter list is a list
* of ACPI operand objects, either null terminated or whose length
* is defined by max_param_count.
*
******************************************************************************/
acpi_status
acpi_ds_method_data_init_args(union acpi_operand_object **params,
u32 max_param_count,
struct acpi_walk_state *walk_state)
{
acpi_status status;
u32 index = 0;
ACPI_FUNCTION_TRACE_PTR(ds_method_data_init_args, params);
if (!params) {
ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
"No param list passed to method\n"));
return_ACPI_STATUS(AE_OK);
}
/* Copy passed parameters into the new method stack frame */
while ((index < ACPI_METHOD_NUM_ARGS) &&
(index < max_param_count) && params[index]) {
/*
* A valid parameter.
* Store the argument in the method/walk descriptor.
* Do not copy the arg in order to implement call by reference
*/
status = acpi_ds_method_data_set_value(ACPI_REFCLASS_ARG, index,
params[index],
walk_state);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
index++;
}
ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "%u args passed to method\n", index));
return_ACPI_STATUS(AE_OK);
}
/*******************************************************************************
*
* FUNCTION: acpi_ds_method_data_get_node
*
* PARAMETERS: type - Either ACPI_REFCLASS_LOCAL or
* ACPI_REFCLASS_ARG
* index - Which Local or Arg whose type to get
* walk_state - Current walk state object
* node - Where the node is returned.
*
* RETURN: Status and node
*
* DESCRIPTION: Get the Node associated with a local or arg.
*
******************************************************************************/
acpi_status
acpi_ds_method_data_get_node(u8 type,
u32 index,
struct acpi_walk_state *walk_state,
struct acpi_namespace_node **node)
{
ACPI_FUNCTION_TRACE(ds_method_data_get_node);
/*
* Method Locals and Arguments are supported
*/
switch (type) {
case ACPI_REFCLASS_LOCAL:
if (index > ACPI_METHOD_MAX_LOCAL) {
ACPI_ERROR((AE_INFO,
"Local index %u is invalid (max %u)",
index, ACPI_METHOD_MAX_LOCAL));
return_ACPI_STATUS(AE_AML_INVALID_INDEX);
}
/* Return a pointer to the pseudo-node */
*node = &walk_state->local_variables[index];
break;
case ACPI_REFCLASS_ARG:
if (index > ACPI_METHOD_MAX_ARG) {
ACPI_ERROR((AE_INFO,
"Arg index %u is invalid (max %u)",
index, ACPI_METHOD_MAX_ARG));
return_ACPI_STATUS(AE_AML_INVALID_INDEX);
}
/* Return a pointer to the pseudo-node */
*node = &walk_state->arguments[index];
break;
default:
ACPI_ERROR((AE_INFO, "Type %u is invalid", type));
return_ACPI_STATUS(AE_TYPE);
}
return_ACPI_STATUS(AE_OK);
}
/*******************************************************************************
*
* FUNCTION: acpi_ds_method_data_set_value
*
* PARAMETERS: type - Either ACPI_REFCLASS_LOCAL or
* ACPI_REFCLASS_ARG
* index - Which Local or Arg to get
* object - Object to be inserted into the stack entry
* walk_state - Current walk state object
*
* RETURN: Status
*
* DESCRIPTION: Insert an object onto the method stack at entry Opcode:Index.
* Note: There is no "implicit conversion" for locals.
*
******************************************************************************/
static acpi_status
acpi_ds_method_data_set_value(u8 type,
u32 index,
union acpi_operand_object *object,
struct acpi_walk_state *walk_state)
{
acpi_status status;
struct acpi_namespace_node *node;
ACPI_FUNCTION_TRACE(ds_method_data_set_value);
ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
"NewObj %p Type %2.2X, Refs=%u [%s]\n", object,
type, object->common.reference_count,
acpi_ut_get_type_name(object->common.type)));
/* Get the namespace node for the arg/local */
status = acpi_ds_method_data_get_node(type, index, walk_state, &node);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
/*
* Increment ref count so object can't be deleted while installed.
* NOTE: We do not copy the object in order to preserve the call by
* reference semantics of ACPI Control Method invocation.
* (See ACPI Specification 2.0C)
*/
acpi_ut_add_reference(object);
/* Install the object */
node->object = object;
return_ACPI_STATUS(status);
}
/*******************************************************************************
*
* FUNCTION: acpi_ds_method_data_get_value
*
* PARAMETERS: type - Either ACPI_REFCLASS_LOCAL or
* ACPI_REFCLASS_ARG
* index - Which localVar or argument to get
* walk_state - Current walk state object
* dest_desc - Where Arg or Local value is returned
*
* RETURN: Status
*
* DESCRIPTION: Retrieve value of selected Arg or Local for this method
* Used only in acpi_ex_resolve_to_value().
*
******************************************************************************/
acpi_status
acpi_ds_method_data_get_value(u8 type,
u32 index,
struct acpi_walk_state *walk_state,
union acpi_operand_object **dest_desc)
{
acpi_status status;
struct acpi_namespace_node *node;
union acpi_operand_object *object;
ACPI_FUNCTION_TRACE(ds_method_data_get_value);
/* Validate the object descriptor */
if (!dest_desc) {
ACPI_ERROR((AE_INFO, "Null object descriptor pointer"));
return_ACPI_STATUS(AE_BAD_PARAMETER);
}
/* Get the namespace node for the arg/local */
status = acpi_ds_method_data_get_node(type, index, walk_state, &node);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
/* Get the object from the node */
object = node->object;
/* Examine the returned object, it must be valid. */
if (!object) {
/*
* Index points to uninitialized object.
* This means that either 1) The expected argument was
* not passed to the method, or 2) A local variable
* was referenced by the method (via the ASL)
* before it was initialized. Either case is an error.
*/
/* If slack enabled, init the local_x/arg_x to an Integer of value zero */
if (acpi_gbl_enable_interpreter_slack) {
object = acpi_ut_create_integer_object((u64) 0);
if (!object) {
return_ACPI_STATUS(AE_NO_MEMORY);
}
node->object = object;
}
/* Otherwise, return the error */
else
switch (type) {
case ACPI_REFCLASS_ARG:
ACPI_ERROR((AE_INFO,
"Uninitialized Arg[%u] at node %p",
index, node));
return_ACPI_STATUS(AE_AML_UNINITIALIZED_ARG);
case ACPI_REFCLASS_LOCAL:
/*
* No error message for this case, will be trapped again later to
* detect and ignore cases of Store(local_x,local_x)
*/
return_ACPI_STATUS(AE_AML_UNINITIALIZED_LOCAL);
default:
ACPI_ERROR((AE_INFO,
"Not a Arg/Local opcode: 0x%X",
type));
return_ACPI_STATUS(AE_AML_INTERNAL);
}
}
/*
* The Index points to an initialized and valid object.
* Return an additional reference to the object
*/
*dest_desc = object;
acpi_ut_add_reference(object);
return_ACPI_STATUS(AE_OK);
}
/*******************************************************************************
*
* FUNCTION: acpi_ds_method_data_delete_value
*
* PARAMETERS: type - Either ACPI_REFCLASS_LOCAL or
* ACPI_REFCLASS_ARG
* index - Which localVar or argument to delete
* walk_state - Current walk state object
*
* RETURN: None
*
* DESCRIPTION: Delete the entry at Opcode:Index. Inserts
* a null into the stack slot after the object is deleted.
*
******************************************************************************/
static void
acpi_ds_method_data_delete_value(u8 type,
u32 index, struct acpi_walk_state *walk_state)
{
acpi_status status;
struct acpi_namespace_node *node;
union acpi_operand_object *object;
ACPI_FUNCTION_TRACE(ds_method_data_delete_value);
/* Get the namespace node for the arg/local */
status = acpi_ds_method_data_get_node(type, index, walk_state, &node);
if (ACPI_FAILURE(status)) {
return_VOID;
}
/* Get the associated object */
object = acpi_ns_get_attached_object(node);
/*
* Undefine the Arg or Local by setting its descriptor
* pointer to NULL. Locals/Args can contain both
* ACPI_OPERAND_OBJECTS and ACPI_NAMESPACE_NODEs
*/
node->object = NULL;
if ((object) &&
(ACPI_GET_DESCRIPTOR_TYPE(object) == ACPI_DESC_TYPE_OPERAND)) {
/*
* There is a valid object.
* Decrement the reference count by one to balance the
* increment when the object was stored.
*/
acpi_ut_remove_reference(object);
}
return_VOID;
}
/*******************************************************************************
*
* FUNCTION: acpi_ds_store_object_to_local
*
* PARAMETERS: type - Either ACPI_REFCLASS_LOCAL or
* ACPI_REFCLASS_ARG
* index - Which Local or Arg to set
* obj_desc - Value to be stored
* walk_state - Current walk state
*
* RETURN: Status
*
* DESCRIPTION: Store a value in an Arg or Local. The obj_desc is installed
* as the new value for the Arg or Local and the reference count
* for obj_desc is incremented.
*
******************************************************************************/
acpi_status
acpi_ds_store_object_to_local(u8 type,
u32 index,
union acpi_operand_object *obj_desc,
struct acpi_walk_state *walk_state)
{
acpi_status status;
struct acpi_namespace_node *node;
union acpi_operand_object *current_obj_desc;
union acpi_operand_object *new_obj_desc;
ACPI_FUNCTION_TRACE(ds_store_object_to_local);
ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Type=%2.2X Index=%u Obj=%p\n",
type, index, obj_desc));
/* Parameter validation */
if (!obj_desc) {
return_ACPI_STATUS(AE_BAD_PARAMETER);
}
/* Get the namespace node for the arg/local */
status = acpi_ds_method_data_get_node(type, index, walk_state, &node);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
current_obj_desc = acpi_ns_get_attached_object(node);
if (current_obj_desc == obj_desc) {
ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Obj=%p already installed!\n",
obj_desc));
return_ACPI_STATUS(status);
}
/*
* If the reference count on the object is more than one, we must
* take a copy of the object before we store. A reference count
* of exactly 1 means that the object was just created during the
* evaluation of an expression, and we can safely use it since it
* is not used anywhere else.
*/
new_obj_desc = obj_desc;
if (obj_desc->common.reference_count > 1) {
status =
acpi_ut_copy_iobject_to_iobject(obj_desc, &new_obj_desc,
walk_state);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
}
/*
* If there is an object already in this slot, we either
* have to delete it, or if this is an argument and there
* is an object reference stored there, we have to do
* an indirect store!
*/
if (current_obj_desc) {
/*
* Check for an indirect store if an argument
* contains an object reference (stored as an Node).
* We don't allow this automatic dereferencing for
* locals, since a store to a local should overwrite
* anything there, including an object reference.
*
* If both Arg0 and Local0 contain ref_of (Local4):
*
* Store (1, Arg0) - Causes indirect store to local4
* Store (1, Local0) - Stores 1 in local0, overwriting
* the reference to local4
* Store (1, de_refof (Local0)) - Causes indirect store to local4
*
* Weird, but true.
*/
if (type == ACPI_REFCLASS_ARG) {
/*
* If we have a valid reference object that came from ref_of(),
* do the indirect store
*/
if ((ACPI_GET_DESCRIPTOR_TYPE(current_obj_desc) ==
ACPI_DESC_TYPE_OPERAND)
&& (current_obj_desc->common.type ==
ACPI_TYPE_LOCAL_REFERENCE)
&& (current_obj_desc->reference.class ==
ACPI_REFCLASS_REFOF)) {
ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
"Arg (%p) is an ObjRef(Node), storing in node %p\n",
new_obj_desc,
current_obj_desc));
/*
* Store this object to the Node (perform the indirect store)
* NOTE: No implicit conversion is performed, as per the ACPI
* specification rules on storing to Locals/Args.
*/
status =
acpi_ex_store_object_to_node(new_obj_desc,
current_obj_desc->
reference.
object,
walk_state,
ACPI_NO_IMPLICIT_CONVERSION);
/* Remove local reference if we copied the object above */
if (new_obj_desc != obj_desc) {
acpi_ut_remove_reference(new_obj_desc);
}
return_ACPI_STATUS(status);
}
}
/* Delete the existing object before storing the new one */
acpi_ds_method_data_delete_value(type, index, walk_state);
}
/*
* Install the Obj descriptor (*new_obj_desc) into
* the descriptor for the Arg or Local.
* (increments the object reference count by one)
*/
status =
acpi_ds_method_data_set_value(type, index, new_obj_desc,
walk_state);
/* Remove local reference if we copied the object above */
if (new_obj_desc != obj_desc) {
acpi_ut_remove_reference(new_obj_desc);
}
return_ACPI_STATUS(status);
}
#ifdef ACPI_OBSOLETE_FUNCTIONS
/*******************************************************************************
*
* FUNCTION: acpi_ds_method_data_get_type
*
* PARAMETERS: opcode - Either AML_LOCAL_OP or AML_ARG_OP
* index - Which Local or Arg whose type to get
* walk_state - Current walk state object
*
* RETURN: Data type of current value of the selected Arg or Local
*
* DESCRIPTION: Get the type of the object stored in the Local or Arg
*
******************************************************************************/
acpi_object_type
acpi_ds_method_data_get_type(u16 opcode,
u32 index, struct acpi_walk_state *walk_state)
{
acpi_status status;
struct acpi_namespace_node *node;
union acpi_operand_object *object;
ACPI_FUNCTION_TRACE(ds_method_data_get_type);
/* Get the namespace node for the arg/local */
status = acpi_ds_method_data_get_node(opcode, index, walk_state, &node);
if (ACPI_FAILURE(status)) {
return_VALUE((ACPI_TYPE_NOT_FOUND));
}
/* Get the object */
object = acpi_ns_get_attached_object(node);
if (!object) {
/* Uninitialized local/arg, return TYPE_ANY */
return_VALUE(ACPI_TYPE_ANY);
}
/* Get the object type */
return_VALUE(object->type);
}
#endif
| gpl-2.0 |
utilite-computer/linux-kernel | drivers/acpi/acpica/exconvrt.c | 2138 | 17865 | /******************************************************************************
*
* Module Name: exconvrt - Object conversion routines
*
*****************************************************************************/
/*
* Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions, and the following disclaimer,
* without modification.
* 2. Redistributions in binary form must reproduce at minimum a disclaimer
* substantially similar to the "NO WARRANTY" disclaimer below
* ("Disclaimer") and any redistribution must be conditioned upon
* including a substantially similar Disclaimer requirement for further
* binary redistribution.
* 3. Neither the names of the above-listed copyright holders nor the names
* of any contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* Alternatively, this software may be distributed under the terms of the
* GNU General Public License ("GPL") version 2 as published by the Free
* Software Foundation.
*
* NO WARRANTY
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGES.
*/
#include <acpi/acpi.h>
#include "accommon.h"
#include "acinterp.h"
#include "amlcode.h"
#define _COMPONENT ACPI_EXECUTER
ACPI_MODULE_NAME("exconvrt")
/* Local prototypes */
static u32
acpi_ex_convert_to_ascii(u64 integer, u16 base, u8 *string, u8 max_length);
/*******************************************************************************
*
* FUNCTION: acpi_ex_convert_to_integer
*
* PARAMETERS: obj_desc - Object to be converted. Must be an
* Integer, Buffer, or String
* result_desc - Where the new Integer object is returned
* flags - Used for string conversion
*
* RETURN: Status
*
* DESCRIPTION: Convert an ACPI Object to an integer.
*
******************************************************************************/
acpi_status
acpi_ex_convert_to_integer(union acpi_operand_object *obj_desc,
union acpi_operand_object **result_desc, u32 flags)
{
union acpi_operand_object *return_desc;
u8 *pointer;
u64 result;
u32 i;
u32 count;
acpi_status status;
ACPI_FUNCTION_TRACE_PTR(ex_convert_to_integer, obj_desc);
switch (obj_desc->common.type) {
case ACPI_TYPE_INTEGER:
/* No conversion necessary */
*result_desc = obj_desc;
return_ACPI_STATUS(AE_OK);
case ACPI_TYPE_BUFFER:
case ACPI_TYPE_STRING:
/* Note: Takes advantage of common buffer/string fields */
pointer = obj_desc->buffer.pointer;
count = obj_desc->buffer.length;
break;
default:
return_ACPI_STATUS(AE_TYPE);
}
/*
* Convert the buffer/string to an integer. Note that both buffers and
* strings are treated as raw data - we don't convert ascii to hex for
* strings.
*
* There are two terminating conditions for the loop:
* 1) The size of an integer has been reached, or
* 2) The end of the buffer or string has been reached
*/
result = 0;
/* String conversion is different than Buffer conversion */
switch (obj_desc->common.type) {
case ACPI_TYPE_STRING:
/*
* Convert string to an integer - for most cases, the string must be
* hexadecimal as per the ACPI specification. The only exception (as
* of ACPI 3.0) is that the to_integer() operator allows both decimal
* and hexadecimal strings (hex prefixed with "0x").
*/
status = acpi_ut_strtoul64((char *)pointer, flags, &result);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
break;
case ACPI_TYPE_BUFFER:
/* Check for zero-length buffer */
if (!count) {
return_ACPI_STATUS(AE_AML_BUFFER_LIMIT);
}
/* Transfer no more than an integer's worth of data */
if (count > acpi_gbl_integer_byte_width) {
count = acpi_gbl_integer_byte_width;
}
/*
* Convert buffer to an integer - we simply grab enough raw data
* from the buffer to fill an integer
*/
for (i = 0; i < count; i++) {
/*
* Get next byte and shift it into the Result.
* Little endian is used, meaning that the first byte of the buffer
* is the LSB of the integer
*/
result |= (((u64) pointer[i]) << (i * 8));
}
break;
default:
/* No other types can get here */
break;
}
/* Create a new integer */
return_desc = acpi_ut_create_integer_object(result);
if (!return_desc) {
return_ACPI_STATUS(AE_NO_MEMORY);
}
ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Converted value: %8.8X%8.8X\n",
ACPI_FORMAT_UINT64(result)));
/* Save the Result */
(void)acpi_ex_truncate_for32bit_table(return_desc);
*result_desc = return_desc;
return_ACPI_STATUS(AE_OK);
}
/*******************************************************************************
*
* FUNCTION: acpi_ex_convert_to_buffer
*
* PARAMETERS: obj_desc - Object to be converted. Must be an
* Integer, Buffer, or String
* result_desc - Where the new buffer object is returned
*
* RETURN: Status
*
* DESCRIPTION: Convert an ACPI Object to a Buffer
*
******************************************************************************/
acpi_status
acpi_ex_convert_to_buffer(union acpi_operand_object *obj_desc,
union acpi_operand_object **result_desc)
{
union acpi_operand_object *return_desc;
u8 *new_buf;
ACPI_FUNCTION_TRACE_PTR(ex_convert_to_buffer, obj_desc);
switch (obj_desc->common.type) {
case ACPI_TYPE_BUFFER:
/* No conversion necessary */
*result_desc = obj_desc;
return_ACPI_STATUS(AE_OK);
case ACPI_TYPE_INTEGER:
/*
* Create a new Buffer object.
* Need enough space for one integer
*/
return_desc =
acpi_ut_create_buffer_object(acpi_gbl_integer_byte_width);
if (!return_desc) {
return_ACPI_STATUS(AE_NO_MEMORY);
}
/* Copy the integer to the buffer, LSB first */
new_buf = return_desc->buffer.pointer;
ACPI_MEMCPY(new_buf,
&obj_desc->integer.value,
acpi_gbl_integer_byte_width);
break;
case ACPI_TYPE_STRING:
/*
* Create a new Buffer object
* Size will be the string length
*
* NOTE: Add one to the string length to include the null terminator.
* The ACPI spec is unclear on this subject, but there is existing
* ASL/AML code that depends on the null being transferred to the new
* buffer.
*/
return_desc = acpi_ut_create_buffer_object((acpi_size)
obj_desc->string.
length + 1);
if (!return_desc) {
return_ACPI_STATUS(AE_NO_MEMORY);
}
/* Copy the string to the buffer */
new_buf = return_desc->buffer.pointer;
ACPI_STRNCPY((char *)new_buf, (char *)obj_desc->string.pointer,
obj_desc->string.length);
break;
default:
return_ACPI_STATUS(AE_TYPE);
}
/* Mark buffer initialized */
return_desc->common.flags |= AOPOBJ_DATA_VALID;
*result_desc = return_desc;
return_ACPI_STATUS(AE_OK);
}
/*******************************************************************************
*
* FUNCTION: acpi_ex_convert_to_ascii
*
* PARAMETERS: integer - Value to be converted
* base - ACPI_STRING_DECIMAL or ACPI_STRING_HEX
* string - Where the string is returned
* data_width - Size of data item to be converted, in bytes
*
* RETURN: Actual string length
*
* DESCRIPTION: Convert an ACPI Integer to a hex or decimal string
*
******************************************************************************/
static u32
acpi_ex_convert_to_ascii(u64 integer, u16 base, u8 *string, u8 data_width)
{
u64 digit;
u32 i;
u32 j;
u32 k = 0;
u32 hex_length;
u32 decimal_length;
u32 remainder;
u8 supress_zeros;
ACPI_FUNCTION_ENTRY();
switch (base) {
case 10:
/* Setup max length for the decimal number */
switch (data_width) {
case 1:
decimal_length = ACPI_MAX8_DECIMAL_DIGITS;
break;
case 4:
decimal_length = ACPI_MAX32_DECIMAL_DIGITS;
break;
case 8:
default:
decimal_length = ACPI_MAX64_DECIMAL_DIGITS;
break;
}
supress_zeros = TRUE; /* No leading zeros */
remainder = 0;
for (i = decimal_length; i > 0; i--) {
/* Divide by nth factor of 10 */
digit = integer;
for (j = 0; j < i; j++) {
(void)acpi_ut_short_divide(digit, 10, &digit,
&remainder);
}
/* Handle leading zeros */
if (remainder != 0) {
supress_zeros = FALSE;
}
if (!supress_zeros) {
string[k] = (u8) (ACPI_ASCII_ZERO + remainder);
k++;
}
}
break;
case 16:
/* hex_length: 2 ascii hex chars per data byte */
hex_length = ACPI_MUL_2(data_width);
for (i = 0, j = (hex_length - 1); i < hex_length; i++, j--) {
/* Get one hex digit, most significant digits first */
string[k] =
(u8) acpi_ut_hex_to_ascii_char(integer,
ACPI_MUL_4(j));
k++;
}
break;
default:
return (0);
}
/*
* Since leading zeros are suppressed, we must check for the case where
* the integer equals 0
*
* Finally, null terminate the string and return the length
*/
if (!k) {
string[0] = ACPI_ASCII_ZERO;
k = 1;
}
string[k] = 0;
return ((u32) k);
}
/*******************************************************************************
*
* FUNCTION: acpi_ex_convert_to_string
*
* PARAMETERS: obj_desc - Object to be converted. Must be an
* Integer, Buffer, or String
* result_desc - Where the string object is returned
* type - String flags (base and conversion type)
*
* RETURN: Status
*
* DESCRIPTION: Convert an ACPI Object to a string
*
******************************************************************************/
acpi_status
acpi_ex_convert_to_string(union acpi_operand_object * obj_desc,
union acpi_operand_object ** result_desc, u32 type)
{
union acpi_operand_object *return_desc;
u8 *new_buf;
u32 i;
u32 string_length = 0;
u16 base = 16;
u8 separator = ',';
ACPI_FUNCTION_TRACE_PTR(ex_convert_to_string, obj_desc);
switch (obj_desc->common.type) {
case ACPI_TYPE_STRING:
/* No conversion necessary */
*result_desc = obj_desc;
return_ACPI_STATUS(AE_OK);
case ACPI_TYPE_INTEGER:
switch (type) {
case ACPI_EXPLICIT_CONVERT_DECIMAL:
/* Make room for maximum decimal number */
string_length = ACPI_MAX_DECIMAL_DIGITS;
base = 10;
break;
default:
/* Two hex string characters for each integer byte */
string_length = ACPI_MUL_2(acpi_gbl_integer_byte_width);
break;
}
/*
* Create a new String
* Need enough space for one ASCII integer (plus null terminator)
*/
return_desc =
acpi_ut_create_string_object((acpi_size) string_length);
if (!return_desc) {
return_ACPI_STATUS(AE_NO_MEMORY);
}
new_buf = return_desc->buffer.pointer;
/* Convert integer to string */
string_length =
acpi_ex_convert_to_ascii(obj_desc->integer.value, base,
new_buf,
acpi_gbl_integer_byte_width);
/* Null terminate at the correct place */
return_desc->string.length = string_length;
new_buf[string_length] = 0;
break;
case ACPI_TYPE_BUFFER:
/* Setup string length, base, and separator */
switch (type) {
case ACPI_EXPLICIT_CONVERT_DECIMAL: /* Used by to_decimal_string */
/*
* From ACPI: "If Data is a buffer, it is converted to a string of
* decimal values separated by commas."
*/
base = 10;
/*
* Calculate the final string length. Individual string values
* are variable length (include separator for each)
*/
for (i = 0; i < obj_desc->buffer.length; i++) {
if (obj_desc->buffer.pointer[i] >= 100) {
string_length += 4;
} else if (obj_desc->buffer.pointer[i] >= 10) {
string_length += 3;
} else {
string_length += 2;
}
}
break;
case ACPI_IMPLICIT_CONVERT_HEX:
/*
* From the ACPI spec:
*"The entire contents of the buffer are converted to a string of
* two-character hexadecimal numbers, each separated by a space."
*/
separator = ' ';
string_length = (obj_desc->buffer.length * 3);
break;
case ACPI_EXPLICIT_CONVERT_HEX: /* Used by to_hex_string */
/*
* From ACPI: "If Data is a buffer, it is converted to a string of
* hexadecimal values separated by commas."
*/
string_length = (obj_desc->buffer.length * 3);
break;
default:
return_ACPI_STATUS(AE_BAD_PARAMETER);
}
/*
* Create a new string object and string buffer
* (-1 because of extra separator included in string_length from above)
* Allow creation of zero-length strings from zero-length buffers.
*/
if (string_length) {
string_length--;
}
return_desc =
acpi_ut_create_string_object((acpi_size) string_length);
if (!return_desc) {
return_ACPI_STATUS(AE_NO_MEMORY);
}
new_buf = return_desc->buffer.pointer;
/*
* Convert buffer bytes to hex or decimal values
* (separated by commas or spaces)
*/
for (i = 0; i < obj_desc->buffer.length; i++) {
new_buf += acpi_ex_convert_to_ascii((u64) obj_desc->
buffer.pointer[i],
base, new_buf, 1);
*new_buf++ = separator; /* each separated by a comma or space */
}
/*
* Null terminate the string
* (overwrites final comma/space from above)
*/
if (obj_desc->buffer.length) {
new_buf--;
}
*new_buf = 0;
break;
default:
return_ACPI_STATUS(AE_TYPE);
}
*result_desc = return_desc;
return_ACPI_STATUS(AE_OK);
}
/*******************************************************************************
*
* FUNCTION: acpi_ex_convert_to_target_type
*
* PARAMETERS: destination_type - Current type of the destination
* source_desc - Source object to be converted.
* result_desc - Where the converted object is returned
* walk_state - Current method state
*
* RETURN: Status
*
* DESCRIPTION: Implements "implicit conversion" rules for storing an object.
*
******************************************************************************/
acpi_status
acpi_ex_convert_to_target_type(acpi_object_type destination_type,
union acpi_operand_object *source_desc,
union acpi_operand_object **result_desc,
struct acpi_walk_state *walk_state)
{
acpi_status status = AE_OK;
ACPI_FUNCTION_TRACE(ex_convert_to_target_type);
/* Default behavior */
*result_desc = source_desc;
/*
* If required by the target,
* perform implicit conversion on the source before we store it.
*/
switch (GET_CURRENT_ARG_TYPE(walk_state->op_info->runtime_args)) {
case ARGI_SIMPLE_TARGET:
case ARGI_FIXED_TARGET:
case ARGI_INTEGER_REF: /* Handles Increment, Decrement cases */
switch (destination_type) {
case ACPI_TYPE_LOCAL_REGION_FIELD:
/*
* Named field can always handle conversions
*/
break;
default:
/* No conversion allowed for these types */
if (destination_type != source_desc->common.type) {
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
"Explicit operator, will store (%s) over existing type (%s)\n",
acpi_ut_get_object_type_name
(source_desc),
acpi_ut_get_type_name
(destination_type)));
status = AE_TYPE;
}
}
break;
case ARGI_TARGETREF:
switch (destination_type) {
case ACPI_TYPE_INTEGER:
case ACPI_TYPE_BUFFER_FIELD:
case ACPI_TYPE_LOCAL_BANK_FIELD:
case ACPI_TYPE_LOCAL_INDEX_FIELD:
/*
* These types require an Integer operand. We can convert
* a Buffer or a String to an Integer if necessary.
*/
status =
acpi_ex_convert_to_integer(source_desc, result_desc,
16);
break;
case ACPI_TYPE_STRING:
/*
* The operand must be a String. We can convert an
* Integer or Buffer if necessary
*/
status =
acpi_ex_convert_to_string(source_desc, result_desc,
ACPI_IMPLICIT_CONVERT_HEX);
break;
case ACPI_TYPE_BUFFER:
/*
* The operand must be a Buffer. We can convert an
* Integer or String if necessary
*/
status =
acpi_ex_convert_to_buffer(source_desc, result_desc);
break;
default:
ACPI_ERROR((AE_INFO,
"Bad destination type during conversion: 0x%X",
destination_type));
status = AE_AML_INTERNAL;
break;
}
break;
case ARGI_REFERENCE:
/*
* create_xxxx_field cases - we are storing the field object into the name
*/
break;
default:
ACPI_ERROR((AE_INFO,
"Unknown Target type ID 0x%X AmlOpcode 0x%X DestType %s",
GET_CURRENT_ARG_TYPE(walk_state->op_info->
runtime_args),
walk_state->opcode,
acpi_ut_get_type_name(destination_type)));
status = AE_AML_INTERNAL;
}
/*
* Source-to-Target conversion semantics:
*
* If conversion to the target type cannot be performed, then simply
* overwrite the target with the new object and type.
*/
if (status == AE_TYPE) {
status = AE_OK;
}
return_ACPI_STATUS(status);
}
| gpl-2.0 |
slayher/android_kernel_samsung_zerofltetmo | fs/btrfs/check-integrity.c | 2138 | 105157 | /*
* Copyright (C) STRATO AG 2011. All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public
* License v2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public
* License along with this program; if not, write to the
* Free Software Foundation, Inc., 59 Temple Place - Suite 330,
* Boston, MA 021110-1307, USA.
*/
/*
* This module can be used to catch cases when the btrfs kernel
* code executes write requests to the disk that bring the file
* system in an inconsistent state. In such a state, a power-loss
* or kernel panic event would cause that the data on disk is
* lost or at least damaged.
*
* Code is added that examines all block write requests during
* runtime (including writes of the super block). Three rules
* are verified and an error is printed on violation of the
* rules:
* 1. It is not allowed to write a disk block which is
* currently referenced by the super block (either directly
* or indirectly).
* 2. When a super block is written, it is verified that all
* referenced (directly or indirectly) blocks fulfill the
* following requirements:
* 2a. All referenced blocks have either been present when
* the file system was mounted, (i.e., they have been
* referenced by the super block) or they have been
* written since then and the write completion callback
* was called and no write error was indicated and a
* FLUSH request to the device where these blocks are
* located was received and completed.
* 2b. All referenced blocks need to have a generation
* number which is equal to the parent's number.
*
* One issue that was found using this module was that the log
* tree on disk became temporarily corrupted because disk blocks
* that had been in use for the log tree had been freed and
* reused too early, while being referenced by the written super
* block.
*
* The search term in the kernel log that can be used to filter
* on the existence of detected integrity issues is
* "btrfs: attempt".
*
* The integrity check is enabled via mount options. These
* mount options are only supported if the integrity check
* tool is compiled by defining BTRFS_FS_CHECK_INTEGRITY.
*
* Example #1, apply integrity checks to all metadata:
* mount /dev/sdb1 /mnt -o check_int
*
* Example #2, apply integrity checks to all metadata and
* to data extents:
* mount /dev/sdb1 /mnt -o check_int_data
*
* Example #3, apply integrity checks to all metadata and dump
* the tree that the super block references to kernel messages
* each time after a super block was written:
* mount /dev/sdb1 /mnt -o check_int,check_int_print_mask=263
*
* If the integrity check tool is included and activated in
* the mount options, plenty of kernel memory is used, and
* plenty of additional CPU cycles are spent. Enabling this
* functionality is not intended for normal use. In most
* cases, unless you are a btrfs developer who needs to verify
* the integrity of (super)-block write requests, do not
* enable the config option BTRFS_FS_CHECK_INTEGRITY to
* include and compile the integrity check tool.
*/
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/buffer_head.h>
#include <linux/mutex.h>
#include <linux/crc32c.h>
#include <linux/genhd.h>
#include <linux/blkdev.h>
#include "ctree.h"
#include "disk-io.h"
#include "transaction.h"
#include "extent_io.h"
#include "volumes.h"
#include "print-tree.h"
#include "locking.h"
#include "check-integrity.h"
#include "rcu-string.h"
#define BTRFSIC_BLOCK_HASHTABLE_SIZE 0x10000
#define BTRFSIC_BLOCK_LINK_HASHTABLE_SIZE 0x10000
#define BTRFSIC_DEV2STATE_HASHTABLE_SIZE 0x100
#define BTRFSIC_BLOCK_MAGIC_NUMBER 0x14491051
#define BTRFSIC_BLOCK_LINK_MAGIC_NUMBER 0x11070807
#define BTRFSIC_DEV2STATE_MAGIC_NUMBER 0x20111530
#define BTRFSIC_BLOCK_STACK_FRAME_MAGIC_NUMBER 20111300
#define BTRFSIC_TREE_DUMP_MAX_INDENT_LEVEL (200 - 6) /* in characters,
* excluding " [...]" */
#define BTRFSIC_GENERATION_UNKNOWN ((u64)-1)
/*
* The definition of the bitmask fields for the print_mask.
* They are specified with the mount option check_integrity_print_mask.
*/
#define BTRFSIC_PRINT_MASK_SUPERBLOCK_WRITE 0x00000001
#define BTRFSIC_PRINT_MASK_ROOT_CHUNK_LOG_TREE_LOCATION 0x00000002
#define BTRFSIC_PRINT_MASK_TREE_AFTER_SB_WRITE 0x00000004
#define BTRFSIC_PRINT_MASK_TREE_BEFORE_SB_WRITE 0x00000008
#define BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH 0x00000010
#define BTRFSIC_PRINT_MASK_END_IO_BIO_BH 0x00000020
#define BTRFSIC_PRINT_MASK_VERBOSE 0x00000040
#define BTRFSIC_PRINT_MASK_VERY_VERBOSE 0x00000080
#define BTRFSIC_PRINT_MASK_INITIAL_TREE 0x00000100
#define BTRFSIC_PRINT_MASK_INITIAL_ALL_TREES 0x00000200
#define BTRFSIC_PRINT_MASK_INITIAL_DATABASE 0x00000400
#define BTRFSIC_PRINT_MASK_NUM_COPIES 0x00000800
#define BTRFSIC_PRINT_MASK_TREE_WITH_ALL_MIRRORS 0x00001000
struct btrfsic_dev_state;
struct btrfsic_state;
struct btrfsic_block {
u32 magic_num; /* only used for debug purposes */
unsigned int is_metadata:1; /* if it is meta-data, not data-data */
unsigned int is_superblock:1; /* if it is one of the superblocks */
unsigned int is_iodone:1; /* if is done by lower subsystem */
unsigned int iodone_w_error:1; /* error was indicated to endio */
unsigned int never_written:1; /* block was added because it was
* referenced, not because it was
* written */
unsigned int mirror_num; /* large enough to hold
* BTRFS_SUPER_MIRROR_MAX */
struct btrfsic_dev_state *dev_state;
u64 dev_bytenr; /* key, physical byte num on disk */
u64 logical_bytenr; /* logical byte num on disk */
u64 generation;
struct btrfs_disk_key disk_key; /* extra info to print in case of
* issues, will not always be correct */
struct list_head collision_resolving_node; /* list node */
struct list_head all_blocks_node; /* list node */
/* the following two lists contain block_link items */
struct list_head ref_to_list; /* list */
struct list_head ref_from_list; /* list */
struct btrfsic_block *next_in_same_bio;
void *orig_bio_bh_private;
union {
bio_end_io_t *bio;
bh_end_io_t *bh;
} orig_bio_bh_end_io;
int submit_bio_bh_rw;
u64 flush_gen; /* only valid if !never_written */
};
/*
* Elements of this type are allocated dynamically and required because
* each block object can refer to and can be ref from multiple blocks.
* The key to lookup them in the hashtable is the dev_bytenr of
* the block ref to plus the one from the block refered from.
* The fact that they are searchable via a hashtable and that a
* ref_cnt is maintained is not required for the btrfs integrity
* check algorithm itself, it is only used to make the output more
* beautiful in case that an error is detected (an error is defined
* as a write operation to a block while that block is still referenced).
*/
struct btrfsic_block_link {
u32 magic_num; /* only used for debug purposes */
u32 ref_cnt;
struct list_head node_ref_to; /* list node */
struct list_head node_ref_from; /* list node */
struct list_head collision_resolving_node; /* list node */
struct btrfsic_block *block_ref_to;
struct btrfsic_block *block_ref_from;
u64 parent_generation;
};
struct btrfsic_dev_state {
u32 magic_num; /* only used for debug purposes */
struct block_device *bdev;
struct btrfsic_state *state;
struct list_head collision_resolving_node; /* list node */
struct btrfsic_block dummy_block_for_bio_bh_flush;
u64 last_flush_gen;
char name[BDEVNAME_SIZE];
};
struct btrfsic_block_hashtable {
struct list_head table[BTRFSIC_BLOCK_HASHTABLE_SIZE];
};
struct btrfsic_block_link_hashtable {
struct list_head table[BTRFSIC_BLOCK_LINK_HASHTABLE_SIZE];
};
struct btrfsic_dev_state_hashtable {
struct list_head table[BTRFSIC_DEV2STATE_HASHTABLE_SIZE];
};
struct btrfsic_block_data_ctx {
u64 start; /* virtual bytenr */
u64 dev_bytenr; /* physical bytenr on device */
u32 len;
struct btrfsic_dev_state *dev;
char **datav;
struct page **pagev;
void *mem_to_free;
};
/* This structure is used to implement recursion without occupying
* any stack space, refer to btrfsic_process_metablock() */
struct btrfsic_stack_frame {
u32 magic;
u32 nr;
int error;
int i;
int limit_nesting;
int num_copies;
int mirror_num;
struct btrfsic_block *block;
struct btrfsic_block_data_ctx *block_ctx;
struct btrfsic_block *next_block;
struct btrfsic_block_data_ctx next_block_ctx;
struct btrfs_header *hdr;
struct btrfsic_stack_frame *prev;
};
/* Some state per mounted filesystem */
struct btrfsic_state {
u32 print_mask;
int include_extent_data;
int csum_size;
struct list_head all_blocks_list;
struct btrfsic_block_hashtable block_hashtable;
struct btrfsic_block_link_hashtable block_link_hashtable;
struct btrfs_root *root;
u64 max_superblock_generation;
struct btrfsic_block *latest_superblock;
u32 metablock_size;
u32 datablock_size;
};
static void btrfsic_block_init(struct btrfsic_block *b);
static struct btrfsic_block *btrfsic_block_alloc(void);
static void btrfsic_block_free(struct btrfsic_block *b);
static void btrfsic_block_link_init(struct btrfsic_block_link *n);
static struct btrfsic_block_link *btrfsic_block_link_alloc(void);
static void btrfsic_block_link_free(struct btrfsic_block_link *n);
static void btrfsic_dev_state_init(struct btrfsic_dev_state *ds);
static struct btrfsic_dev_state *btrfsic_dev_state_alloc(void);
static void btrfsic_dev_state_free(struct btrfsic_dev_state *ds);
static void btrfsic_block_hashtable_init(struct btrfsic_block_hashtable *h);
static void btrfsic_block_hashtable_add(struct btrfsic_block *b,
struct btrfsic_block_hashtable *h);
static void btrfsic_block_hashtable_remove(struct btrfsic_block *b);
static struct btrfsic_block *btrfsic_block_hashtable_lookup(
struct block_device *bdev,
u64 dev_bytenr,
struct btrfsic_block_hashtable *h);
static void btrfsic_block_link_hashtable_init(
struct btrfsic_block_link_hashtable *h);
static void btrfsic_block_link_hashtable_add(
struct btrfsic_block_link *l,
struct btrfsic_block_link_hashtable *h);
static void btrfsic_block_link_hashtable_remove(struct btrfsic_block_link *l);
static struct btrfsic_block_link *btrfsic_block_link_hashtable_lookup(
struct block_device *bdev_ref_to,
u64 dev_bytenr_ref_to,
struct block_device *bdev_ref_from,
u64 dev_bytenr_ref_from,
struct btrfsic_block_link_hashtable *h);
static void btrfsic_dev_state_hashtable_init(
struct btrfsic_dev_state_hashtable *h);
static void btrfsic_dev_state_hashtable_add(
struct btrfsic_dev_state *ds,
struct btrfsic_dev_state_hashtable *h);
static void btrfsic_dev_state_hashtable_remove(struct btrfsic_dev_state *ds);
static struct btrfsic_dev_state *btrfsic_dev_state_hashtable_lookup(
struct block_device *bdev,
struct btrfsic_dev_state_hashtable *h);
static struct btrfsic_stack_frame *btrfsic_stack_frame_alloc(void);
static void btrfsic_stack_frame_free(struct btrfsic_stack_frame *sf);
static int btrfsic_process_superblock(struct btrfsic_state *state,
struct btrfs_fs_devices *fs_devices);
static int btrfsic_process_metablock(struct btrfsic_state *state,
struct btrfsic_block *block,
struct btrfsic_block_data_ctx *block_ctx,
int limit_nesting, int force_iodone_flag);
static void btrfsic_read_from_block_data(
struct btrfsic_block_data_ctx *block_ctx,
void *dst, u32 offset, size_t len);
static int btrfsic_create_link_to_next_block(
struct btrfsic_state *state,
struct btrfsic_block *block,
struct btrfsic_block_data_ctx
*block_ctx, u64 next_bytenr,
int limit_nesting,
struct btrfsic_block_data_ctx *next_block_ctx,
struct btrfsic_block **next_blockp,
int force_iodone_flag,
int *num_copiesp, int *mirror_nump,
struct btrfs_disk_key *disk_key,
u64 parent_generation);
static int btrfsic_handle_extent_data(struct btrfsic_state *state,
struct btrfsic_block *block,
struct btrfsic_block_data_ctx *block_ctx,
u32 item_offset, int force_iodone_flag);
static int btrfsic_map_block(struct btrfsic_state *state, u64 bytenr, u32 len,
struct btrfsic_block_data_ctx *block_ctx_out,
int mirror_num);
static int btrfsic_map_superblock(struct btrfsic_state *state, u64 bytenr,
u32 len, struct block_device *bdev,
struct btrfsic_block_data_ctx *block_ctx_out);
static void btrfsic_release_block_ctx(struct btrfsic_block_data_ctx *block_ctx);
static int btrfsic_read_block(struct btrfsic_state *state,
struct btrfsic_block_data_ctx *block_ctx);
static void btrfsic_dump_database(struct btrfsic_state *state);
static void btrfsic_complete_bio_end_io(struct bio *bio, int err);
static int btrfsic_test_for_metadata(struct btrfsic_state *state,
char **datav, unsigned int num_pages);
static void btrfsic_process_written_block(struct btrfsic_dev_state *dev_state,
u64 dev_bytenr, char **mapped_datav,
unsigned int num_pages,
struct bio *bio, int *bio_is_patched,
struct buffer_head *bh,
int submit_bio_bh_rw);
static int btrfsic_process_written_superblock(
struct btrfsic_state *state,
struct btrfsic_block *const block,
struct btrfs_super_block *const super_hdr);
static void btrfsic_bio_end_io(struct bio *bp, int bio_error_status);
static void btrfsic_bh_end_io(struct buffer_head *bh, int uptodate);
static int btrfsic_is_block_ref_by_superblock(const struct btrfsic_state *state,
const struct btrfsic_block *block,
int recursion_level);
static int btrfsic_check_all_ref_blocks(struct btrfsic_state *state,
struct btrfsic_block *const block,
int recursion_level);
static void btrfsic_print_add_link(const struct btrfsic_state *state,
const struct btrfsic_block_link *l);
static void btrfsic_print_rem_link(const struct btrfsic_state *state,
const struct btrfsic_block_link *l);
static char btrfsic_get_block_type(const struct btrfsic_state *state,
const struct btrfsic_block *block);
static void btrfsic_dump_tree(const struct btrfsic_state *state);
static void btrfsic_dump_tree_sub(const struct btrfsic_state *state,
const struct btrfsic_block *block,
int indent_level);
static struct btrfsic_block_link *btrfsic_block_link_lookup_or_add(
struct btrfsic_state *state,
struct btrfsic_block_data_ctx *next_block_ctx,
struct btrfsic_block *next_block,
struct btrfsic_block *from_block,
u64 parent_generation);
static struct btrfsic_block *btrfsic_block_lookup_or_add(
struct btrfsic_state *state,
struct btrfsic_block_data_ctx *block_ctx,
const char *additional_string,
int is_metadata,
int is_iodone,
int never_written,
int mirror_num,
int *was_created);
static int btrfsic_process_superblock_dev_mirror(
struct btrfsic_state *state,
struct btrfsic_dev_state *dev_state,
struct btrfs_device *device,
int superblock_mirror_num,
struct btrfsic_dev_state **selected_dev_state,
struct btrfs_super_block *selected_super);
static struct btrfsic_dev_state *btrfsic_dev_state_lookup(
struct block_device *bdev);
static void btrfsic_cmp_log_and_dev_bytenr(struct btrfsic_state *state,
u64 bytenr,
struct btrfsic_dev_state *dev_state,
u64 dev_bytenr);
static struct mutex btrfsic_mutex;
static int btrfsic_is_initialized;
static struct btrfsic_dev_state_hashtable btrfsic_dev_state_hashtable;
static void btrfsic_block_init(struct btrfsic_block *b)
{
b->magic_num = BTRFSIC_BLOCK_MAGIC_NUMBER;
b->dev_state = NULL;
b->dev_bytenr = 0;
b->logical_bytenr = 0;
b->generation = BTRFSIC_GENERATION_UNKNOWN;
b->disk_key.objectid = 0;
b->disk_key.type = 0;
b->disk_key.offset = 0;
b->is_metadata = 0;
b->is_superblock = 0;
b->is_iodone = 0;
b->iodone_w_error = 0;
b->never_written = 0;
b->mirror_num = 0;
b->next_in_same_bio = NULL;
b->orig_bio_bh_private = NULL;
b->orig_bio_bh_end_io.bio = NULL;
INIT_LIST_HEAD(&b->collision_resolving_node);
INIT_LIST_HEAD(&b->all_blocks_node);
INIT_LIST_HEAD(&b->ref_to_list);
INIT_LIST_HEAD(&b->ref_from_list);
b->submit_bio_bh_rw = 0;
b->flush_gen = 0;
}
static struct btrfsic_block *btrfsic_block_alloc(void)
{
struct btrfsic_block *b;
b = kzalloc(sizeof(*b), GFP_NOFS);
if (NULL != b)
btrfsic_block_init(b);
return b;
}
static void btrfsic_block_free(struct btrfsic_block *b)
{
BUG_ON(!(NULL == b || BTRFSIC_BLOCK_MAGIC_NUMBER == b->magic_num));
kfree(b);
}
static void btrfsic_block_link_init(struct btrfsic_block_link *l)
{
l->magic_num = BTRFSIC_BLOCK_LINK_MAGIC_NUMBER;
l->ref_cnt = 1;
INIT_LIST_HEAD(&l->node_ref_to);
INIT_LIST_HEAD(&l->node_ref_from);
INIT_LIST_HEAD(&l->collision_resolving_node);
l->block_ref_to = NULL;
l->block_ref_from = NULL;
}
static struct btrfsic_block_link *btrfsic_block_link_alloc(void)
{
struct btrfsic_block_link *l;
l = kzalloc(sizeof(*l), GFP_NOFS);
if (NULL != l)
btrfsic_block_link_init(l);
return l;
}
static void btrfsic_block_link_free(struct btrfsic_block_link *l)
{
BUG_ON(!(NULL == l || BTRFSIC_BLOCK_LINK_MAGIC_NUMBER == l->magic_num));
kfree(l);
}
static void btrfsic_dev_state_init(struct btrfsic_dev_state *ds)
{
ds->magic_num = BTRFSIC_DEV2STATE_MAGIC_NUMBER;
ds->bdev = NULL;
ds->state = NULL;
ds->name[0] = '\0';
INIT_LIST_HEAD(&ds->collision_resolving_node);
ds->last_flush_gen = 0;
btrfsic_block_init(&ds->dummy_block_for_bio_bh_flush);
ds->dummy_block_for_bio_bh_flush.is_iodone = 1;
ds->dummy_block_for_bio_bh_flush.dev_state = ds;
}
static struct btrfsic_dev_state *btrfsic_dev_state_alloc(void)
{
struct btrfsic_dev_state *ds;
ds = kzalloc(sizeof(*ds), GFP_NOFS);
if (NULL != ds)
btrfsic_dev_state_init(ds);
return ds;
}
static void btrfsic_dev_state_free(struct btrfsic_dev_state *ds)
{
BUG_ON(!(NULL == ds ||
BTRFSIC_DEV2STATE_MAGIC_NUMBER == ds->magic_num));
kfree(ds);
}
static void btrfsic_block_hashtable_init(struct btrfsic_block_hashtable *h)
{
int i;
for (i = 0; i < BTRFSIC_BLOCK_HASHTABLE_SIZE; i++)
INIT_LIST_HEAD(h->table + i);
}
static void btrfsic_block_hashtable_add(struct btrfsic_block *b,
struct btrfsic_block_hashtable *h)
{
const unsigned int hashval =
(((unsigned int)(b->dev_bytenr >> 16)) ^
((unsigned int)((uintptr_t)b->dev_state->bdev))) &
(BTRFSIC_BLOCK_HASHTABLE_SIZE - 1);
list_add(&b->collision_resolving_node, h->table + hashval);
}
static void btrfsic_block_hashtable_remove(struct btrfsic_block *b)
{
list_del(&b->collision_resolving_node);
}
static struct btrfsic_block *btrfsic_block_hashtable_lookup(
struct block_device *bdev,
u64 dev_bytenr,
struct btrfsic_block_hashtable *h)
{
const unsigned int hashval =
(((unsigned int)(dev_bytenr >> 16)) ^
((unsigned int)((uintptr_t)bdev))) &
(BTRFSIC_BLOCK_HASHTABLE_SIZE - 1);
struct list_head *elem;
list_for_each(elem, h->table + hashval) {
struct btrfsic_block *const b =
list_entry(elem, struct btrfsic_block,
collision_resolving_node);
if (b->dev_state->bdev == bdev && b->dev_bytenr == dev_bytenr)
return b;
}
return NULL;
}
static void btrfsic_block_link_hashtable_init(
struct btrfsic_block_link_hashtable *h)
{
int i;
for (i = 0; i < BTRFSIC_BLOCK_LINK_HASHTABLE_SIZE; i++)
INIT_LIST_HEAD(h->table + i);
}
static void btrfsic_block_link_hashtable_add(
struct btrfsic_block_link *l,
struct btrfsic_block_link_hashtable *h)
{
const unsigned int hashval =
(((unsigned int)(l->block_ref_to->dev_bytenr >> 16)) ^
((unsigned int)(l->block_ref_from->dev_bytenr >> 16)) ^
((unsigned int)((uintptr_t)l->block_ref_to->dev_state->bdev)) ^
((unsigned int)((uintptr_t)l->block_ref_from->dev_state->bdev)))
& (BTRFSIC_BLOCK_LINK_HASHTABLE_SIZE - 1);
BUG_ON(NULL == l->block_ref_to);
BUG_ON(NULL == l->block_ref_from);
list_add(&l->collision_resolving_node, h->table + hashval);
}
static void btrfsic_block_link_hashtable_remove(struct btrfsic_block_link *l)
{
list_del(&l->collision_resolving_node);
}
static struct btrfsic_block_link *btrfsic_block_link_hashtable_lookup(
struct block_device *bdev_ref_to,
u64 dev_bytenr_ref_to,
struct block_device *bdev_ref_from,
u64 dev_bytenr_ref_from,
struct btrfsic_block_link_hashtable *h)
{
const unsigned int hashval =
(((unsigned int)(dev_bytenr_ref_to >> 16)) ^
((unsigned int)(dev_bytenr_ref_from >> 16)) ^
((unsigned int)((uintptr_t)bdev_ref_to)) ^
((unsigned int)((uintptr_t)bdev_ref_from))) &
(BTRFSIC_BLOCK_LINK_HASHTABLE_SIZE - 1);
struct list_head *elem;
list_for_each(elem, h->table + hashval) {
struct btrfsic_block_link *const l =
list_entry(elem, struct btrfsic_block_link,
collision_resolving_node);
BUG_ON(NULL == l->block_ref_to);
BUG_ON(NULL == l->block_ref_from);
if (l->block_ref_to->dev_state->bdev == bdev_ref_to &&
l->block_ref_to->dev_bytenr == dev_bytenr_ref_to &&
l->block_ref_from->dev_state->bdev == bdev_ref_from &&
l->block_ref_from->dev_bytenr == dev_bytenr_ref_from)
return l;
}
return NULL;
}
static void btrfsic_dev_state_hashtable_init(
struct btrfsic_dev_state_hashtable *h)
{
int i;
for (i = 0; i < BTRFSIC_DEV2STATE_HASHTABLE_SIZE; i++)
INIT_LIST_HEAD(h->table + i);
}
static void btrfsic_dev_state_hashtable_add(
struct btrfsic_dev_state *ds,
struct btrfsic_dev_state_hashtable *h)
{
const unsigned int hashval =
(((unsigned int)((uintptr_t)ds->bdev)) &
(BTRFSIC_DEV2STATE_HASHTABLE_SIZE - 1));
list_add(&ds->collision_resolving_node, h->table + hashval);
}
static void btrfsic_dev_state_hashtable_remove(struct btrfsic_dev_state *ds)
{
list_del(&ds->collision_resolving_node);
}
static struct btrfsic_dev_state *btrfsic_dev_state_hashtable_lookup(
struct block_device *bdev,
struct btrfsic_dev_state_hashtable *h)
{
const unsigned int hashval =
(((unsigned int)((uintptr_t)bdev)) &
(BTRFSIC_DEV2STATE_HASHTABLE_SIZE - 1));
struct list_head *elem;
list_for_each(elem, h->table + hashval) {
struct btrfsic_dev_state *const ds =
list_entry(elem, struct btrfsic_dev_state,
collision_resolving_node);
if (ds->bdev == bdev)
return ds;
}
return NULL;
}
static int btrfsic_process_superblock(struct btrfsic_state *state,
struct btrfs_fs_devices *fs_devices)
{
int ret = 0;
struct btrfs_super_block *selected_super;
struct list_head *dev_head = &fs_devices->devices;
struct btrfs_device *device;
struct btrfsic_dev_state *selected_dev_state = NULL;
int pass;
BUG_ON(NULL == state);
selected_super = kzalloc(sizeof(*selected_super), GFP_NOFS);
if (NULL == selected_super) {
printk(KERN_INFO "btrfsic: error, kmalloc failed!\n");
return -1;
}
list_for_each_entry(device, dev_head, dev_list) {
int i;
struct btrfsic_dev_state *dev_state;
if (!device->bdev || !device->name)
continue;
dev_state = btrfsic_dev_state_lookup(device->bdev);
BUG_ON(NULL == dev_state);
for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
ret = btrfsic_process_superblock_dev_mirror(
state, dev_state, device, i,
&selected_dev_state, selected_super);
if (0 != ret && 0 == i) {
kfree(selected_super);
return ret;
}
}
}
if (NULL == state->latest_superblock) {
printk(KERN_INFO "btrfsic: no superblock found!\n");
kfree(selected_super);
return -1;
}
state->csum_size = btrfs_super_csum_size(selected_super);
for (pass = 0; pass < 3; pass++) {
int num_copies;
int mirror_num;
u64 next_bytenr;
switch (pass) {
case 0:
next_bytenr = btrfs_super_root(selected_super);
if (state->print_mask &
BTRFSIC_PRINT_MASK_ROOT_CHUNK_LOG_TREE_LOCATION)
printk(KERN_INFO "root@%llu\n",
(unsigned long long)next_bytenr);
break;
case 1:
next_bytenr = btrfs_super_chunk_root(selected_super);
if (state->print_mask &
BTRFSIC_PRINT_MASK_ROOT_CHUNK_LOG_TREE_LOCATION)
printk(KERN_INFO "chunk@%llu\n",
(unsigned long long)next_bytenr);
break;
case 2:
next_bytenr = btrfs_super_log_root(selected_super);
if (0 == next_bytenr)
continue;
if (state->print_mask &
BTRFSIC_PRINT_MASK_ROOT_CHUNK_LOG_TREE_LOCATION)
printk(KERN_INFO "log@%llu\n",
(unsigned long long)next_bytenr);
break;
}
num_copies =
btrfs_num_copies(state->root->fs_info,
next_bytenr, state->metablock_size);
if (state->print_mask & BTRFSIC_PRINT_MASK_NUM_COPIES)
printk(KERN_INFO "num_copies(log_bytenr=%llu) = %d\n",
(unsigned long long)next_bytenr, num_copies);
for (mirror_num = 1; mirror_num <= num_copies; mirror_num++) {
struct btrfsic_block *next_block;
struct btrfsic_block_data_ctx tmp_next_block_ctx;
struct btrfsic_block_link *l;
ret = btrfsic_map_block(state, next_bytenr,
state->metablock_size,
&tmp_next_block_ctx,
mirror_num);
if (ret) {
printk(KERN_INFO "btrfsic:"
" btrfsic_map_block(root @%llu,"
" mirror %d) failed!\n",
(unsigned long long)next_bytenr,
mirror_num);
kfree(selected_super);
return -1;
}
next_block = btrfsic_block_hashtable_lookup(
tmp_next_block_ctx.dev->bdev,
tmp_next_block_ctx.dev_bytenr,
&state->block_hashtable);
BUG_ON(NULL == next_block);
l = btrfsic_block_link_hashtable_lookup(
tmp_next_block_ctx.dev->bdev,
tmp_next_block_ctx.dev_bytenr,
state->latest_superblock->dev_state->
bdev,
state->latest_superblock->dev_bytenr,
&state->block_link_hashtable);
BUG_ON(NULL == l);
ret = btrfsic_read_block(state, &tmp_next_block_ctx);
if (ret < (int)PAGE_CACHE_SIZE) {
printk(KERN_INFO
"btrfsic: read @logical %llu failed!\n",
(unsigned long long)
tmp_next_block_ctx.start);
btrfsic_release_block_ctx(&tmp_next_block_ctx);
kfree(selected_super);
return -1;
}
ret = btrfsic_process_metablock(state,
next_block,
&tmp_next_block_ctx,
BTRFS_MAX_LEVEL + 3, 1);
btrfsic_release_block_ctx(&tmp_next_block_ctx);
}
}
kfree(selected_super);
return ret;
}
static int btrfsic_process_superblock_dev_mirror(
struct btrfsic_state *state,
struct btrfsic_dev_state *dev_state,
struct btrfs_device *device,
int superblock_mirror_num,
struct btrfsic_dev_state **selected_dev_state,
struct btrfs_super_block *selected_super)
{
struct btrfs_super_block *super_tmp;
u64 dev_bytenr;
struct buffer_head *bh;
struct btrfsic_block *superblock_tmp;
int pass;
struct block_device *const superblock_bdev = device->bdev;
/* super block bytenr is always the unmapped device bytenr */
dev_bytenr = btrfs_sb_offset(superblock_mirror_num);
if (dev_bytenr + BTRFS_SUPER_INFO_SIZE > device->total_bytes)
return -1;
bh = __bread(superblock_bdev, dev_bytenr / 4096,
BTRFS_SUPER_INFO_SIZE);
if (NULL == bh)
return -1;
super_tmp = (struct btrfs_super_block *)
(bh->b_data + (dev_bytenr & 4095));
if (btrfs_super_bytenr(super_tmp) != dev_bytenr ||
super_tmp->magic != cpu_to_le64(BTRFS_MAGIC) ||
memcmp(device->uuid, super_tmp->dev_item.uuid, BTRFS_UUID_SIZE) ||
btrfs_super_nodesize(super_tmp) != state->metablock_size ||
btrfs_super_leafsize(super_tmp) != state->metablock_size ||
btrfs_super_sectorsize(super_tmp) != state->datablock_size) {
brelse(bh);
return 0;
}
superblock_tmp =
btrfsic_block_hashtable_lookup(superblock_bdev,
dev_bytenr,
&state->block_hashtable);
if (NULL == superblock_tmp) {
superblock_tmp = btrfsic_block_alloc();
if (NULL == superblock_tmp) {
printk(KERN_INFO "btrfsic: error, kmalloc failed!\n");
brelse(bh);
return -1;
}
/* for superblock, only the dev_bytenr makes sense */
superblock_tmp->dev_bytenr = dev_bytenr;
superblock_tmp->dev_state = dev_state;
superblock_tmp->logical_bytenr = dev_bytenr;
superblock_tmp->generation = btrfs_super_generation(super_tmp);
superblock_tmp->is_metadata = 1;
superblock_tmp->is_superblock = 1;
superblock_tmp->is_iodone = 1;
superblock_tmp->never_written = 0;
superblock_tmp->mirror_num = 1 + superblock_mirror_num;
if (state->print_mask & BTRFSIC_PRINT_MASK_SUPERBLOCK_WRITE)
printk_in_rcu(KERN_INFO "New initial S-block (bdev %p, %s)"
" @%llu (%s/%llu/%d)\n",
superblock_bdev,
rcu_str_deref(device->name),
(unsigned long long)dev_bytenr,
dev_state->name,
(unsigned long long)dev_bytenr,
superblock_mirror_num);
list_add(&superblock_tmp->all_blocks_node,
&state->all_blocks_list);
btrfsic_block_hashtable_add(superblock_tmp,
&state->block_hashtable);
}
/* select the one with the highest generation field */
if (btrfs_super_generation(super_tmp) >
state->max_superblock_generation ||
0 == state->max_superblock_generation) {
memcpy(selected_super, super_tmp, sizeof(*selected_super));
*selected_dev_state = dev_state;
state->max_superblock_generation =
btrfs_super_generation(super_tmp);
state->latest_superblock = superblock_tmp;
}
for (pass = 0; pass < 3; pass++) {
u64 next_bytenr;
int num_copies;
int mirror_num;
const char *additional_string = NULL;
struct btrfs_disk_key tmp_disk_key;
tmp_disk_key.type = BTRFS_ROOT_ITEM_KEY;
tmp_disk_key.offset = 0;
switch (pass) {
case 0:
tmp_disk_key.objectid =
cpu_to_le64(BTRFS_ROOT_TREE_OBJECTID);
additional_string = "initial root ";
next_bytenr = btrfs_super_root(super_tmp);
break;
case 1:
tmp_disk_key.objectid =
cpu_to_le64(BTRFS_CHUNK_TREE_OBJECTID);
additional_string = "initial chunk ";
next_bytenr = btrfs_super_chunk_root(super_tmp);
break;
case 2:
tmp_disk_key.objectid =
cpu_to_le64(BTRFS_TREE_LOG_OBJECTID);
additional_string = "initial log ";
next_bytenr = btrfs_super_log_root(super_tmp);
if (0 == next_bytenr)
continue;
break;
}
num_copies =
btrfs_num_copies(state->root->fs_info,
next_bytenr, state->metablock_size);
if (state->print_mask & BTRFSIC_PRINT_MASK_NUM_COPIES)
printk(KERN_INFO "num_copies(log_bytenr=%llu) = %d\n",
(unsigned long long)next_bytenr, num_copies);
for (mirror_num = 1; mirror_num <= num_copies; mirror_num++) {
struct btrfsic_block *next_block;
struct btrfsic_block_data_ctx tmp_next_block_ctx;
struct btrfsic_block_link *l;
if (btrfsic_map_block(state, next_bytenr,
state->metablock_size,
&tmp_next_block_ctx,
mirror_num)) {
printk(KERN_INFO "btrfsic: btrfsic_map_block("
"bytenr @%llu, mirror %d) failed!\n",
(unsigned long long)next_bytenr,
mirror_num);
brelse(bh);
return -1;
}
next_block = btrfsic_block_lookup_or_add(
state, &tmp_next_block_ctx,
additional_string, 1, 1, 0,
mirror_num, NULL);
if (NULL == next_block) {
btrfsic_release_block_ctx(&tmp_next_block_ctx);
brelse(bh);
return -1;
}
next_block->disk_key = tmp_disk_key;
next_block->generation = BTRFSIC_GENERATION_UNKNOWN;
l = btrfsic_block_link_lookup_or_add(
state, &tmp_next_block_ctx,
next_block, superblock_tmp,
BTRFSIC_GENERATION_UNKNOWN);
btrfsic_release_block_ctx(&tmp_next_block_ctx);
if (NULL == l) {
brelse(bh);
return -1;
}
}
}
if (state->print_mask & BTRFSIC_PRINT_MASK_INITIAL_ALL_TREES)
btrfsic_dump_tree_sub(state, superblock_tmp, 0);
brelse(bh);
return 0;
}
static struct btrfsic_stack_frame *btrfsic_stack_frame_alloc(void)
{
struct btrfsic_stack_frame *sf;
sf = kzalloc(sizeof(*sf), GFP_NOFS);
if (NULL == sf)
printk(KERN_INFO "btrfsic: alloc memory failed!\n");
else
sf->magic = BTRFSIC_BLOCK_STACK_FRAME_MAGIC_NUMBER;
return sf;
}
static void btrfsic_stack_frame_free(struct btrfsic_stack_frame *sf)
{
BUG_ON(!(NULL == sf ||
BTRFSIC_BLOCK_STACK_FRAME_MAGIC_NUMBER == sf->magic));
kfree(sf);
}
static int btrfsic_process_metablock(
struct btrfsic_state *state,
struct btrfsic_block *const first_block,
struct btrfsic_block_data_ctx *const first_block_ctx,
int first_limit_nesting, int force_iodone_flag)
{
struct btrfsic_stack_frame initial_stack_frame = { 0 };
struct btrfsic_stack_frame *sf;
struct btrfsic_stack_frame *next_stack;
struct btrfs_header *const first_hdr =
(struct btrfs_header *)first_block_ctx->datav[0];
BUG_ON(!first_hdr);
sf = &initial_stack_frame;
sf->error = 0;
sf->i = -1;
sf->limit_nesting = first_limit_nesting;
sf->block = first_block;
sf->block_ctx = first_block_ctx;
sf->next_block = NULL;
sf->hdr = first_hdr;
sf->prev = NULL;
continue_with_new_stack_frame:
sf->block->generation = le64_to_cpu(sf->hdr->generation);
if (0 == sf->hdr->level) {
struct btrfs_leaf *const leafhdr =
(struct btrfs_leaf *)sf->hdr;
if (-1 == sf->i) {
sf->nr = le32_to_cpu(leafhdr->header.nritems);
if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
printk(KERN_INFO
"leaf %llu items %d generation %llu"
" owner %llu\n",
(unsigned long long)
sf->block_ctx->start,
sf->nr,
(unsigned long long)
le64_to_cpu(leafhdr->header.generation),
(unsigned long long)
le64_to_cpu(leafhdr->header.owner));
}
continue_with_current_leaf_stack_frame:
if (0 == sf->num_copies || sf->mirror_num > sf->num_copies) {
sf->i++;
sf->num_copies = 0;
}
if (sf->i < sf->nr) {
struct btrfs_item disk_item;
u32 disk_item_offset =
(uintptr_t)(leafhdr->items + sf->i) -
(uintptr_t)leafhdr;
struct btrfs_disk_key *disk_key;
u8 type;
u32 item_offset;
u32 item_size;
if (disk_item_offset + sizeof(struct btrfs_item) >
sf->block_ctx->len) {
leaf_item_out_of_bounce_error:
printk(KERN_INFO
"btrfsic: leaf item out of bounce at logical %llu, dev %s\n",
sf->block_ctx->start,
sf->block_ctx->dev->name);
goto one_stack_frame_backwards;
}
btrfsic_read_from_block_data(sf->block_ctx,
&disk_item,
disk_item_offset,
sizeof(struct btrfs_item));
item_offset = le32_to_cpu(disk_item.offset);
item_size = le32_to_cpu(disk_item.size);
disk_key = &disk_item.key;
type = disk_key->type;
if (BTRFS_ROOT_ITEM_KEY == type) {
struct btrfs_root_item root_item;
u32 root_item_offset;
u64 next_bytenr;
root_item_offset = item_offset +
offsetof(struct btrfs_leaf, items);
if (root_item_offset + item_size >
sf->block_ctx->len)
goto leaf_item_out_of_bounce_error;
btrfsic_read_from_block_data(
sf->block_ctx, &root_item,
root_item_offset,
item_size);
next_bytenr = le64_to_cpu(root_item.bytenr);
sf->error =
btrfsic_create_link_to_next_block(
state,
sf->block,
sf->block_ctx,
next_bytenr,
sf->limit_nesting,
&sf->next_block_ctx,
&sf->next_block,
force_iodone_flag,
&sf->num_copies,
&sf->mirror_num,
disk_key,
le64_to_cpu(root_item.
generation));
if (sf->error)
goto one_stack_frame_backwards;
if (NULL != sf->next_block) {
struct btrfs_header *const next_hdr =
(struct btrfs_header *)
sf->next_block_ctx.datav[0];
next_stack =
btrfsic_stack_frame_alloc();
if (NULL == next_stack) {
btrfsic_release_block_ctx(
&sf->
next_block_ctx);
goto one_stack_frame_backwards;
}
next_stack->i = -1;
next_stack->block = sf->next_block;
next_stack->block_ctx =
&sf->next_block_ctx;
next_stack->next_block = NULL;
next_stack->hdr = next_hdr;
next_stack->limit_nesting =
sf->limit_nesting - 1;
next_stack->prev = sf;
sf = next_stack;
goto continue_with_new_stack_frame;
}
} else if (BTRFS_EXTENT_DATA_KEY == type &&
state->include_extent_data) {
sf->error = btrfsic_handle_extent_data(
state,
sf->block,
sf->block_ctx,
item_offset,
force_iodone_flag);
if (sf->error)
goto one_stack_frame_backwards;
}
goto continue_with_current_leaf_stack_frame;
}
} else {
struct btrfs_node *const nodehdr = (struct btrfs_node *)sf->hdr;
if (-1 == sf->i) {
sf->nr = le32_to_cpu(nodehdr->header.nritems);
if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
printk(KERN_INFO "node %llu level %d items %d"
" generation %llu owner %llu\n",
(unsigned long long)
sf->block_ctx->start,
nodehdr->header.level, sf->nr,
(unsigned long long)
le64_to_cpu(nodehdr->header.generation),
(unsigned long long)
le64_to_cpu(nodehdr->header.owner));
}
continue_with_current_node_stack_frame:
if (0 == sf->num_copies || sf->mirror_num > sf->num_copies) {
sf->i++;
sf->num_copies = 0;
}
if (sf->i < sf->nr) {
struct btrfs_key_ptr key_ptr;
u32 key_ptr_offset;
u64 next_bytenr;
key_ptr_offset = (uintptr_t)(nodehdr->ptrs + sf->i) -
(uintptr_t)nodehdr;
if (key_ptr_offset + sizeof(struct btrfs_key_ptr) >
sf->block_ctx->len) {
printk(KERN_INFO
"btrfsic: node item out of bounce at logical %llu, dev %s\n",
sf->block_ctx->start,
sf->block_ctx->dev->name);
goto one_stack_frame_backwards;
}
btrfsic_read_from_block_data(
sf->block_ctx, &key_ptr, key_ptr_offset,
sizeof(struct btrfs_key_ptr));
next_bytenr = le64_to_cpu(key_ptr.blockptr);
sf->error = btrfsic_create_link_to_next_block(
state,
sf->block,
sf->block_ctx,
next_bytenr,
sf->limit_nesting,
&sf->next_block_ctx,
&sf->next_block,
force_iodone_flag,
&sf->num_copies,
&sf->mirror_num,
&key_ptr.key,
le64_to_cpu(key_ptr.generation));
if (sf->error)
goto one_stack_frame_backwards;
if (NULL != sf->next_block) {
struct btrfs_header *const next_hdr =
(struct btrfs_header *)
sf->next_block_ctx.datav[0];
next_stack = btrfsic_stack_frame_alloc();
if (NULL == next_stack)
goto one_stack_frame_backwards;
next_stack->i = -1;
next_stack->block = sf->next_block;
next_stack->block_ctx = &sf->next_block_ctx;
next_stack->next_block = NULL;
next_stack->hdr = next_hdr;
next_stack->limit_nesting =
sf->limit_nesting - 1;
next_stack->prev = sf;
sf = next_stack;
goto continue_with_new_stack_frame;
}
goto continue_with_current_node_stack_frame;
}
}
one_stack_frame_backwards:
if (NULL != sf->prev) {
struct btrfsic_stack_frame *const prev = sf->prev;
/* the one for the initial block is freed in the caller */
btrfsic_release_block_ctx(sf->block_ctx);
if (sf->error) {
prev->error = sf->error;
btrfsic_stack_frame_free(sf);
sf = prev;
goto one_stack_frame_backwards;
}
btrfsic_stack_frame_free(sf);
sf = prev;
goto continue_with_new_stack_frame;
} else {
BUG_ON(&initial_stack_frame != sf);
}
return sf->error;
}
static void btrfsic_read_from_block_data(
struct btrfsic_block_data_ctx *block_ctx,
void *dstv, u32 offset, size_t len)
{
size_t cur;
size_t offset_in_page;
char *kaddr;
char *dst = (char *)dstv;
size_t start_offset = block_ctx->start & ((u64)PAGE_CACHE_SIZE - 1);
unsigned long i = (start_offset + offset) >> PAGE_CACHE_SHIFT;
WARN_ON(offset + len > block_ctx->len);
offset_in_page = (start_offset + offset) &
((unsigned long)PAGE_CACHE_SIZE - 1);
while (len > 0) {
cur = min(len, ((size_t)PAGE_CACHE_SIZE - offset_in_page));
BUG_ON(i >= (block_ctx->len + PAGE_CACHE_SIZE - 1) >>
PAGE_CACHE_SHIFT);
kaddr = block_ctx->datav[i];
memcpy(dst, kaddr + offset_in_page, cur);
dst += cur;
len -= cur;
offset_in_page = 0;
i++;
}
}
static int btrfsic_create_link_to_next_block(
struct btrfsic_state *state,
struct btrfsic_block *block,
struct btrfsic_block_data_ctx *block_ctx,
u64 next_bytenr,
int limit_nesting,
struct btrfsic_block_data_ctx *next_block_ctx,
struct btrfsic_block **next_blockp,
int force_iodone_flag,
int *num_copiesp, int *mirror_nump,
struct btrfs_disk_key *disk_key,
u64 parent_generation)
{
struct btrfsic_block *next_block = NULL;
int ret;
struct btrfsic_block_link *l;
int did_alloc_block_link;
int block_was_created;
*next_blockp = NULL;
if (0 == *num_copiesp) {
*num_copiesp =
btrfs_num_copies(state->root->fs_info,
next_bytenr, state->metablock_size);
if (state->print_mask & BTRFSIC_PRINT_MASK_NUM_COPIES)
printk(KERN_INFO "num_copies(log_bytenr=%llu) = %d\n",
(unsigned long long)next_bytenr, *num_copiesp);
*mirror_nump = 1;
}
if (*mirror_nump > *num_copiesp)
return 0;
if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
printk(KERN_INFO
"btrfsic_create_link_to_next_block(mirror_num=%d)\n",
*mirror_nump);
ret = btrfsic_map_block(state, next_bytenr,
state->metablock_size,
next_block_ctx, *mirror_nump);
if (ret) {
printk(KERN_INFO
"btrfsic: btrfsic_map_block(@%llu, mirror=%d) failed!\n",
(unsigned long long)next_bytenr, *mirror_nump);
btrfsic_release_block_ctx(next_block_ctx);
*next_blockp = NULL;
return -1;
}
next_block = btrfsic_block_lookup_or_add(state,
next_block_ctx, "referenced ",
1, force_iodone_flag,
!force_iodone_flag,
*mirror_nump,
&block_was_created);
if (NULL == next_block) {
btrfsic_release_block_ctx(next_block_ctx);
*next_blockp = NULL;
return -1;
}
if (block_was_created) {
l = NULL;
next_block->generation = BTRFSIC_GENERATION_UNKNOWN;
} else {
if (next_block->logical_bytenr != next_bytenr &&
!(!next_block->is_metadata &&
0 == next_block->logical_bytenr)) {
printk(KERN_INFO
"Referenced block @%llu (%s/%llu/%d)"
" found in hash table, %c,"
" bytenr mismatch (!= stored %llu).\n",
(unsigned long long)next_bytenr,
next_block_ctx->dev->name,
(unsigned long long)next_block_ctx->dev_bytenr,
*mirror_nump,
btrfsic_get_block_type(state, next_block),
(unsigned long long)next_block->logical_bytenr);
} else if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
printk(KERN_INFO
"Referenced block @%llu (%s/%llu/%d)"
" found in hash table, %c.\n",
(unsigned long long)next_bytenr,
next_block_ctx->dev->name,
(unsigned long long)next_block_ctx->dev_bytenr,
*mirror_nump,
btrfsic_get_block_type(state, next_block));
next_block->logical_bytenr = next_bytenr;
next_block->mirror_num = *mirror_nump;
l = btrfsic_block_link_hashtable_lookup(
next_block_ctx->dev->bdev,
next_block_ctx->dev_bytenr,
block_ctx->dev->bdev,
block_ctx->dev_bytenr,
&state->block_link_hashtable);
}
next_block->disk_key = *disk_key;
if (NULL == l) {
l = btrfsic_block_link_alloc();
if (NULL == l) {
printk(KERN_INFO "btrfsic: error, kmalloc failed!\n");
btrfsic_release_block_ctx(next_block_ctx);
*next_blockp = NULL;
return -1;
}
did_alloc_block_link = 1;
l->block_ref_to = next_block;
l->block_ref_from = block;
l->ref_cnt = 1;
l->parent_generation = parent_generation;
if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
btrfsic_print_add_link(state, l);
list_add(&l->node_ref_to, &block->ref_to_list);
list_add(&l->node_ref_from, &next_block->ref_from_list);
btrfsic_block_link_hashtable_add(l,
&state->block_link_hashtable);
} else {
did_alloc_block_link = 0;
if (0 == limit_nesting) {
l->ref_cnt++;
l->parent_generation = parent_generation;
if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
btrfsic_print_add_link(state, l);
}
}
if (limit_nesting > 0 && did_alloc_block_link) {
ret = btrfsic_read_block(state, next_block_ctx);
if (ret < (int)next_block_ctx->len) {
printk(KERN_INFO
"btrfsic: read block @logical %llu failed!\n",
(unsigned long long)next_bytenr);
btrfsic_release_block_ctx(next_block_ctx);
*next_blockp = NULL;
return -1;
}
*next_blockp = next_block;
} else {
*next_blockp = NULL;
}
(*mirror_nump)++;
return 0;
}
static int btrfsic_handle_extent_data(
struct btrfsic_state *state,
struct btrfsic_block *block,
struct btrfsic_block_data_ctx *block_ctx,
u32 item_offset, int force_iodone_flag)
{
int ret;
struct btrfs_file_extent_item file_extent_item;
u64 file_extent_item_offset;
u64 next_bytenr;
u64 num_bytes;
u64 generation;
struct btrfsic_block_link *l;
file_extent_item_offset = offsetof(struct btrfs_leaf, items) +
item_offset;
if (file_extent_item_offset +
offsetof(struct btrfs_file_extent_item, disk_num_bytes) >
block_ctx->len) {
printk(KERN_INFO
"btrfsic: file item out of bounce at logical %llu, dev %s\n",
block_ctx->start, block_ctx->dev->name);
return -1;
}
btrfsic_read_from_block_data(block_ctx, &file_extent_item,
file_extent_item_offset,
offsetof(struct btrfs_file_extent_item, disk_num_bytes));
if (BTRFS_FILE_EXTENT_REG != file_extent_item.type ||
((u64)0) == le64_to_cpu(file_extent_item.disk_bytenr)) {
if (state->print_mask & BTRFSIC_PRINT_MASK_VERY_VERBOSE)
printk(KERN_INFO "extent_data: type %u, disk_bytenr = %llu\n",
file_extent_item.type,
(unsigned long long)
le64_to_cpu(file_extent_item.disk_bytenr));
return 0;
}
if (file_extent_item_offset + sizeof(struct btrfs_file_extent_item) >
block_ctx->len) {
printk(KERN_INFO
"btrfsic: file item out of bounce at logical %llu, dev %s\n",
block_ctx->start, block_ctx->dev->name);
return -1;
}
btrfsic_read_from_block_data(block_ctx, &file_extent_item,
file_extent_item_offset,
sizeof(struct btrfs_file_extent_item));
next_bytenr = le64_to_cpu(file_extent_item.disk_bytenr) +
le64_to_cpu(file_extent_item.offset);
generation = le64_to_cpu(file_extent_item.generation);
num_bytes = le64_to_cpu(file_extent_item.num_bytes);
generation = le64_to_cpu(file_extent_item.generation);
if (state->print_mask & BTRFSIC_PRINT_MASK_VERY_VERBOSE)
printk(KERN_INFO "extent_data: type %u, disk_bytenr = %llu,"
" offset = %llu, num_bytes = %llu\n",
file_extent_item.type,
(unsigned long long)
le64_to_cpu(file_extent_item.disk_bytenr),
(unsigned long long)le64_to_cpu(file_extent_item.offset),
(unsigned long long)num_bytes);
while (num_bytes > 0) {
u32 chunk_len;
int num_copies;
int mirror_num;
if (num_bytes > state->datablock_size)
chunk_len = state->datablock_size;
else
chunk_len = num_bytes;
num_copies =
btrfs_num_copies(state->root->fs_info,
next_bytenr, state->datablock_size);
if (state->print_mask & BTRFSIC_PRINT_MASK_NUM_COPIES)
printk(KERN_INFO "num_copies(log_bytenr=%llu) = %d\n",
(unsigned long long)next_bytenr, num_copies);
for (mirror_num = 1; mirror_num <= num_copies; mirror_num++) {
struct btrfsic_block_data_ctx next_block_ctx;
struct btrfsic_block *next_block;
int block_was_created;
if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
printk(KERN_INFO "btrfsic_handle_extent_data("
"mirror_num=%d)\n", mirror_num);
if (state->print_mask & BTRFSIC_PRINT_MASK_VERY_VERBOSE)
printk(KERN_INFO
"\tdisk_bytenr = %llu, num_bytes %u\n",
(unsigned long long)next_bytenr,
chunk_len);
ret = btrfsic_map_block(state, next_bytenr,
chunk_len, &next_block_ctx,
mirror_num);
if (ret) {
printk(KERN_INFO
"btrfsic: btrfsic_map_block(@%llu,"
" mirror=%d) failed!\n",
(unsigned long long)next_bytenr,
mirror_num);
return -1;
}
next_block = btrfsic_block_lookup_or_add(
state,
&next_block_ctx,
"referenced ",
0,
force_iodone_flag,
!force_iodone_flag,
mirror_num,
&block_was_created);
if (NULL == next_block) {
printk(KERN_INFO
"btrfsic: error, kmalloc failed!\n");
btrfsic_release_block_ctx(&next_block_ctx);
return -1;
}
if (!block_was_created) {
if (next_block->logical_bytenr != next_bytenr &&
!(!next_block->is_metadata &&
0 == next_block->logical_bytenr)) {
printk(KERN_INFO
"Referenced block"
" @%llu (%s/%llu/%d)"
" found in hash table, D,"
" bytenr mismatch"
" (!= stored %llu).\n",
(unsigned long long)next_bytenr,
next_block_ctx.dev->name,
(unsigned long long)
next_block_ctx.dev_bytenr,
mirror_num,
(unsigned long long)
next_block->logical_bytenr);
}
next_block->logical_bytenr = next_bytenr;
next_block->mirror_num = mirror_num;
}
l = btrfsic_block_link_lookup_or_add(state,
&next_block_ctx,
next_block, block,
generation);
btrfsic_release_block_ctx(&next_block_ctx);
if (NULL == l)
return -1;
}
next_bytenr += chunk_len;
num_bytes -= chunk_len;
}
return 0;
}
static int btrfsic_map_block(struct btrfsic_state *state, u64 bytenr, u32 len,
struct btrfsic_block_data_ctx *block_ctx_out,
int mirror_num)
{
int ret;
u64 length;
struct btrfs_bio *multi = NULL;
struct btrfs_device *device;
length = len;
ret = btrfs_map_block(state->root->fs_info, READ,
bytenr, &length, &multi, mirror_num);
if (ret) {
block_ctx_out->start = 0;
block_ctx_out->dev_bytenr = 0;
block_ctx_out->len = 0;
block_ctx_out->dev = NULL;
block_ctx_out->datav = NULL;
block_ctx_out->pagev = NULL;
block_ctx_out->mem_to_free = NULL;
return ret;
}
device = multi->stripes[0].dev;
block_ctx_out->dev = btrfsic_dev_state_lookup(device->bdev);
block_ctx_out->dev_bytenr = multi->stripes[0].physical;
block_ctx_out->start = bytenr;
block_ctx_out->len = len;
block_ctx_out->datav = NULL;
block_ctx_out->pagev = NULL;
block_ctx_out->mem_to_free = NULL;
kfree(multi);
if (NULL == block_ctx_out->dev) {
ret = -ENXIO;
printk(KERN_INFO "btrfsic: error, cannot lookup dev (#1)!\n");
}
return ret;
}
static int btrfsic_map_superblock(struct btrfsic_state *state, u64 bytenr,
u32 len, struct block_device *bdev,
struct btrfsic_block_data_ctx *block_ctx_out)
{
block_ctx_out->dev = btrfsic_dev_state_lookup(bdev);
block_ctx_out->dev_bytenr = bytenr;
block_ctx_out->start = bytenr;
block_ctx_out->len = len;
block_ctx_out->datav = NULL;
block_ctx_out->pagev = NULL;
block_ctx_out->mem_to_free = NULL;
if (NULL != block_ctx_out->dev) {
return 0;
} else {
printk(KERN_INFO "btrfsic: error, cannot lookup dev (#2)!\n");
return -ENXIO;
}
}
static void btrfsic_release_block_ctx(struct btrfsic_block_data_ctx *block_ctx)
{
if (block_ctx->mem_to_free) {
unsigned int num_pages;
BUG_ON(!block_ctx->datav);
BUG_ON(!block_ctx->pagev);
num_pages = (block_ctx->len + (u64)PAGE_CACHE_SIZE - 1) >>
PAGE_CACHE_SHIFT;
while (num_pages > 0) {
num_pages--;
if (block_ctx->datav[num_pages]) {
kunmap(block_ctx->pagev[num_pages]);
block_ctx->datav[num_pages] = NULL;
}
if (block_ctx->pagev[num_pages]) {
__free_page(block_ctx->pagev[num_pages]);
block_ctx->pagev[num_pages] = NULL;
}
}
kfree(block_ctx->mem_to_free);
block_ctx->mem_to_free = NULL;
block_ctx->pagev = NULL;
block_ctx->datav = NULL;
}
}
static int btrfsic_read_block(struct btrfsic_state *state,
struct btrfsic_block_data_ctx *block_ctx)
{
unsigned int num_pages;
unsigned int i;
u64 dev_bytenr;
int ret;
BUG_ON(block_ctx->datav);
BUG_ON(block_ctx->pagev);
BUG_ON(block_ctx->mem_to_free);
if (block_ctx->dev_bytenr & ((u64)PAGE_CACHE_SIZE - 1)) {
printk(KERN_INFO
"btrfsic: read_block() with unaligned bytenr %llu\n",
(unsigned long long)block_ctx->dev_bytenr);
return -1;
}
num_pages = (block_ctx->len + (u64)PAGE_CACHE_SIZE - 1) >>
PAGE_CACHE_SHIFT;
block_ctx->mem_to_free = kzalloc((sizeof(*block_ctx->datav) +
sizeof(*block_ctx->pagev)) *
num_pages, GFP_NOFS);
if (!block_ctx->mem_to_free)
return -1;
block_ctx->datav = block_ctx->mem_to_free;
block_ctx->pagev = (struct page **)(block_ctx->datav + num_pages);
for (i = 0; i < num_pages; i++) {
block_ctx->pagev[i] = alloc_page(GFP_NOFS);
if (!block_ctx->pagev[i])
return -1;
}
dev_bytenr = block_ctx->dev_bytenr;
for (i = 0; i < num_pages;) {
struct bio *bio;
unsigned int j;
DECLARE_COMPLETION_ONSTACK(complete);
bio = btrfs_io_bio_alloc(GFP_NOFS, num_pages - i);
if (!bio) {
printk(KERN_INFO
"btrfsic: bio_alloc() for %u pages failed!\n",
num_pages - i);
return -1;
}
bio->bi_bdev = block_ctx->dev->bdev;
bio->bi_sector = dev_bytenr >> 9;
bio->bi_end_io = btrfsic_complete_bio_end_io;
bio->bi_private = &complete;
for (j = i; j < num_pages; j++) {
ret = bio_add_page(bio, block_ctx->pagev[j],
PAGE_CACHE_SIZE, 0);
if (PAGE_CACHE_SIZE != ret)
break;
}
if (j == i) {
printk(KERN_INFO
"btrfsic: error, failed to add a single page!\n");
return -1;
}
submit_bio(READ, bio);
/* this will also unplug the queue */
wait_for_completion(&complete);
if (!test_bit(BIO_UPTODATE, &bio->bi_flags)) {
printk(KERN_INFO
"btrfsic: read error at logical %llu dev %s!\n",
block_ctx->start, block_ctx->dev->name);
bio_put(bio);
return -1;
}
bio_put(bio);
dev_bytenr += (j - i) * PAGE_CACHE_SIZE;
i = j;
}
for (i = 0; i < num_pages; i++) {
block_ctx->datav[i] = kmap(block_ctx->pagev[i]);
if (!block_ctx->datav[i]) {
printk(KERN_INFO "btrfsic: kmap() failed (dev %s)!\n",
block_ctx->dev->name);
return -1;
}
}
return block_ctx->len;
}
static void btrfsic_complete_bio_end_io(struct bio *bio, int err)
{
complete((struct completion *)bio->bi_private);
}
static void btrfsic_dump_database(struct btrfsic_state *state)
{
struct list_head *elem_all;
BUG_ON(NULL == state);
printk(KERN_INFO "all_blocks_list:\n");
list_for_each(elem_all, &state->all_blocks_list) {
const struct btrfsic_block *const b_all =
list_entry(elem_all, struct btrfsic_block,
all_blocks_node);
struct list_head *elem_ref_to;
struct list_head *elem_ref_from;
printk(KERN_INFO "%c-block @%llu (%s/%llu/%d)\n",
btrfsic_get_block_type(state, b_all),
(unsigned long long)b_all->logical_bytenr,
b_all->dev_state->name,
(unsigned long long)b_all->dev_bytenr,
b_all->mirror_num);
list_for_each(elem_ref_to, &b_all->ref_to_list) {
const struct btrfsic_block_link *const l =
list_entry(elem_ref_to,
struct btrfsic_block_link,
node_ref_to);
printk(KERN_INFO " %c @%llu (%s/%llu/%d)"
" refers %u* to"
" %c @%llu (%s/%llu/%d)\n",
btrfsic_get_block_type(state, b_all),
(unsigned long long)b_all->logical_bytenr,
b_all->dev_state->name,
(unsigned long long)b_all->dev_bytenr,
b_all->mirror_num,
l->ref_cnt,
btrfsic_get_block_type(state, l->block_ref_to),
(unsigned long long)
l->block_ref_to->logical_bytenr,
l->block_ref_to->dev_state->name,
(unsigned long long)l->block_ref_to->dev_bytenr,
l->block_ref_to->mirror_num);
}
list_for_each(elem_ref_from, &b_all->ref_from_list) {
const struct btrfsic_block_link *const l =
list_entry(elem_ref_from,
struct btrfsic_block_link,
node_ref_from);
printk(KERN_INFO " %c @%llu (%s/%llu/%d)"
" is ref %u* from"
" %c @%llu (%s/%llu/%d)\n",
btrfsic_get_block_type(state, b_all),
(unsigned long long)b_all->logical_bytenr,
b_all->dev_state->name,
(unsigned long long)b_all->dev_bytenr,
b_all->mirror_num,
l->ref_cnt,
btrfsic_get_block_type(state, l->block_ref_from),
(unsigned long long)
l->block_ref_from->logical_bytenr,
l->block_ref_from->dev_state->name,
(unsigned long long)
l->block_ref_from->dev_bytenr,
l->block_ref_from->mirror_num);
}
printk(KERN_INFO "\n");
}
}
/*
* Test whether the disk block contains a tree block (leaf or node)
* (note that this test fails for the super block)
*/
static int btrfsic_test_for_metadata(struct btrfsic_state *state,
char **datav, unsigned int num_pages)
{
struct btrfs_header *h;
u8 csum[BTRFS_CSUM_SIZE];
u32 crc = ~(u32)0;
unsigned int i;
if (num_pages * PAGE_CACHE_SIZE < state->metablock_size)
return 1; /* not metadata */
num_pages = state->metablock_size >> PAGE_CACHE_SHIFT;
h = (struct btrfs_header *)datav[0];
if (memcmp(h->fsid, state->root->fs_info->fsid, BTRFS_UUID_SIZE))
return 1;
for (i = 0; i < num_pages; i++) {
u8 *data = i ? datav[i] : (datav[i] + BTRFS_CSUM_SIZE);
size_t sublen = i ? PAGE_CACHE_SIZE :
(PAGE_CACHE_SIZE - BTRFS_CSUM_SIZE);
crc = crc32c(crc, data, sublen);
}
btrfs_csum_final(crc, csum);
if (memcmp(csum, h->csum, state->csum_size))
return 1;
return 0; /* is metadata */
}
static void btrfsic_process_written_block(struct btrfsic_dev_state *dev_state,
u64 dev_bytenr, char **mapped_datav,
unsigned int num_pages,
struct bio *bio, int *bio_is_patched,
struct buffer_head *bh,
int submit_bio_bh_rw)
{
int is_metadata;
struct btrfsic_block *block;
struct btrfsic_block_data_ctx block_ctx;
int ret;
struct btrfsic_state *state = dev_state->state;
struct block_device *bdev = dev_state->bdev;
unsigned int processed_len;
if (NULL != bio_is_patched)
*bio_is_patched = 0;
again:
if (num_pages == 0)
return;
processed_len = 0;
is_metadata = (0 == btrfsic_test_for_metadata(state, mapped_datav,
num_pages));
block = btrfsic_block_hashtable_lookup(bdev, dev_bytenr,
&state->block_hashtable);
if (NULL != block) {
u64 bytenr = 0;
struct list_head *elem_ref_to;
struct list_head *tmp_ref_to;
if (block->is_superblock) {
bytenr = le64_to_cpu(((struct btrfs_super_block *)
mapped_datav[0])->bytenr);
if (num_pages * PAGE_CACHE_SIZE <
BTRFS_SUPER_INFO_SIZE) {
printk(KERN_INFO
"btrfsic: cannot work with too short bios!\n");
return;
}
is_metadata = 1;
BUG_ON(BTRFS_SUPER_INFO_SIZE & (PAGE_CACHE_SIZE - 1));
processed_len = BTRFS_SUPER_INFO_SIZE;
if (state->print_mask &
BTRFSIC_PRINT_MASK_TREE_BEFORE_SB_WRITE) {
printk(KERN_INFO
"[before new superblock is written]:\n");
btrfsic_dump_tree_sub(state, block, 0);
}
}
if (is_metadata) {
if (!block->is_superblock) {
if (num_pages * PAGE_CACHE_SIZE <
state->metablock_size) {
printk(KERN_INFO
"btrfsic: cannot work with too short bios!\n");
return;
}
processed_len = state->metablock_size;
bytenr = le64_to_cpu(((struct btrfs_header *)
mapped_datav[0])->bytenr);
btrfsic_cmp_log_and_dev_bytenr(state, bytenr,
dev_state,
dev_bytenr);
}
if (block->logical_bytenr != bytenr) {
printk(KERN_INFO
"Written block @%llu (%s/%llu/%d)"
" found in hash table, %c,"
" bytenr mismatch"
" (!= stored %llu).\n",
(unsigned long long)bytenr,
dev_state->name,
(unsigned long long)dev_bytenr,
block->mirror_num,
btrfsic_get_block_type(state, block),
(unsigned long long)
block->logical_bytenr);
block->logical_bytenr = bytenr;
} else if (state->print_mask &
BTRFSIC_PRINT_MASK_VERBOSE)
printk(KERN_INFO
"Written block @%llu (%s/%llu/%d)"
" found in hash table, %c.\n",
(unsigned long long)bytenr,
dev_state->name,
(unsigned long long)dev_bytenr,
block->mirror_num,
btrfsic_get_block_type(state, block));
} else {
if (num_pages * PAGE_CACHE_SIZE <
state->datablock_size) {
printk(KERN_INFO
"btrfsic: cannot work with too short bios!\n");
return;
}
processed_len = state->datablock_size;
bytenr = block->logical_bytenr;
if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
printk(KERN_INFO
"Written block @%llu (%s/%llu/%d)"
" found in hash table, %c.\n",
(unsigned long long)bytenr,
dev_state->name,
(unsigned long long)dev_bytenr,
block->mirror_num,
btrfsic_get_block_type(state, block));
}
if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
printk(KERN_INFO
"ref_to_list: %cE, ref_from_list: %cE\n",
list_empty(&block->ref_to_list) ? ' ' : '!',
list_empty(&block->ref_from_list) ? ' ' : '!');
if (btrfsic_is_block_ref_by_superblock(state, block, 0)) {
printk(KERN_INFO "btrfs: attempt to overwrite %c-block"
" @%llu (%s/%llu/%d), old(gen=%llu,"
" objectid=%llu, type=%d, offset=%llu),"
" new(gen=%llu),"
" which is referenced by most recent superblock"
" (superblockgen=%llu)!\n",
btrfsic_get_block_type(state, block),
(unsigned long long)bytenr,
dev_state->name,
(unsigned long long)dev_bytenr,
block->mirror_num,
(unsigned long long)block->generation,
(unsigned long long)
le64_to_cpu(block->disk_key.objectid),
block->disk_key.type,
(unsigned long long)
le64_to_cpu(block->disk_key.offset),
(unsigned long long)
le64_to_cpu(((struct btrfs_header *)
mapped_datav[0])->generation),
(unsigned long long)
state->max_superblock_generation);
btrfsic_dump_tree(state);
}
if (!block->is_iodone && !block->never_written) {
printk(KERN_INFO "btrfs: attempt to overwrite %c-block"
" @%llu (%s/%llu/%d), oldgen=%llu, newgen=%llu,"
" which is not yet iodone!\n",
btrfsic_get_block_type(state, block),
(unsigned long long)bytenr,
dev_state->name,
(unsigned long long)dev_bytenr,
block->mirror_num,
(unsigned long long)block->generation,
(unsigned long long)
le64_to_cpu(((struct btrfs_header *)
mapped_datav[0])->generation));
/* it would not be safe to go on */
btrfsic_dump_tree(state);
goto continue_loop;
}
/*
* Clear all references of this block. Do not free
* the block itself even if is not referenced anymore
* because it still carries valueable information
* like whether it was ever written and IO completed.
*/
list_for_each_safe(elem_ref_to, tmp_ref_to,
&block->ref_to_list) {
struct btrfsic_block_link *const l =
list_entry(elem_ref_to,
struct btrfsic_block_link,
node_ref_to);
if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
btrfsic_print_rem_link(state, l);
l->ref_cnt--;
if (0 == l->ref_cnt) {
list_del(&l->node_ref_to);
list_del(&l->node_ref_from);
btrfsic_block_link_hashtable_remove(l);
btrfsic_block_link_free(l);
}
}
if (block->is_superblock)
ret = btrfsic_map_superblock(state, bytenr,
processed_len,
bdev, &block_ctx);
else
ret = btrfsic_map_block(state, bytenr, processed_len,
&block_ctx, 0);
if (ret) {
printk(KERN_INFO
"btrfsic: btrfsic_map_block(root @%llu)"
" failed!\n", (unsigned long long)bytenr);
goto continue_loop;
}
block_ctx.datav = mapped_datav;
/* the following is required in case of writes to mirrors,
* use the same that was used for the lookup */
block_ctx.dev = dev_state;
block_ctx.dev_bytenr = dev_bytenr;
if (is_metadata || state->include_extent_data) {
block->never_written = 0;
block->iodone_w_error = 0;
if (NULL != bio) {
block->is_iodone = 0;
BUG_ON(NULL == bio_is_patched);
if (!*bio_is_patched) {
block->orig_bio_bh_private =
bio->bi_private;
block->orig_bio_bh_end_io.bio =
bio->bi_end_io;
block->next_in_same_bio = NULL;
bio->bi_private = block;
bio->bi_end_io = btrfsic_bio_end_io;
*bio_is_patched = 1;
} else {
struct btrfsic_block *chained_block =
(struct btrfsic_block *)
bio->bi_private;
BUG_ON(NULL == chained_block);
block->orig_bio_bh_private =
chained_block->orig_bio_bh_private;
block->orig_bio_bh_end_io.bio =
chained_block->orig_bio_bh_end_io.
bio;
block->next_in_same_bio = chained_block;
bio->bi_private = block;
}
} else if (NULL != bh) {
block->is_iodone = 0;
block->orig_bio_bh_private = bh->b_private;
block->orig_bio_bh_end_io.bh = bh->b_end_io;
block->next_in_same_bio = NULL;
bh->b_private = block;
bh->b_end_io = btrfsic_bh_end_io;
} else {
block->is_iodone = 1;
block->orig_bio_bh_private = NULL;
block->orig_bio_bh_end_io.bio = NULL;
block->next_in_same_bio = NULL;
}
}
block->flush_gen = dev_state->last_flush_gen + 1;
block->submit_bio_bh_rw = submit_bio_bh_rw;
if (is_metadata) {
block->logical_bytenr = bytenr;
block->is_metadata = 1;
if (block->is_superblock) {
BUG_ON(PAGE_CACHE_SIZE !=
BTRFS_SUPER_INFO_SIZE);
ret = btrfsic_process_written_superblock(
state,
block,
(struct btrfs_super_block *)
mapped_datav[0]);
if (state->print_mask &
BTRFSIC_PRINT_MASK_TREE_AFTER_SB_WRITE) {
printk(KERN_INFO
"[after new superblock is written]:\n");
btrfsic_dump_tree_sub(state, block, 0);
}
} else {
block->mirror_num = 0; /* unknown */
ret = btrfsic_process_metablock(
state,
block,
&block_ctx,
0, 0);
}
if (ret)
printk(KERN_INFO
"btrfsic: btrfsic_process_metablock"
"(root @%llu) failed!\n",
(unsigned long long)dev_bytenr);
} else {
block->is_metadata = 0;
block->mirror_num = 0; /* unknown */
block->generation = BTRFSIC_GENERATION_UNKNOWN;
if (!state->include_extent_data
&& list_empty(&block->ref_from_list)) {
/*
* disk block is overwritten with extent
* data (not meta data) and we are configured
* to not include extent data: take the
* chance and free the block's memory
*/
btrfsic_block_hashtable_remove(block);
list_del(&block->all_blocks_node);
btrfsic_block_free(block);
}
}
btrfsic_release_block_ctx(&block_ctx);
} else {
/* block has not been found in hash table */
u64 bytenr;
if (!is_metadata) {
processed_len = state->datablock_size;
if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
printk(KERN_INFO "Written block (%s/%llu/?)"
" !found in hash table, D.\n",
dev_state->name,
(unsigned long long)dev_bytenr);
if (!state->include_extent_data) {
/* ignore that written D block */
goto continue_loop;
}
/* this is getting ugly for the
* include_extent_data case... */
bytenr = 0; /* unknown */
block_ctx.start = bytenr;
block_ctx.len = processed_len;
block_ctx.mem_to_free = NULL;
block_ctx.pagev = NULL;
} else {
processed_len = state->metablock_size;
bytenr = le64_to_cpu(((struct btrfs_header *)
mapped_datav[0])->bytenr);
btrfsic_cmp_log_and_dev_bytenr(state, bytenr, dev_state,
dev_bytenr);
if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
printk(KERN_INFO
"Written block @%llu (%s/%llu/?)"
" !found in hash table, M.\n",
(unsigned long long)bytenr,
dev_state->name,
(unsigned long long)dev_bytenr);
ret = btrfsic_map_block(state, bytenr, processed_len,
&block_ctx, 0);
if (ret) {
printk(KERN_INFO
"btrfsic: btrfsic_map_block(root @%llu)"
" failed!\n",
(unsigned long long)dev_bytenr);
goto continue_loop;
}
}
block_ctx.datav = mapped_datav;
/* the following is required in case of writes to mirrors,
* use the same that was used for the lookup */
block_ctx.dev = dev_state;
block_ctx.dev_bytenr = dev_bytenr;
block = btrfsic_block_alloc();
if (NULL == block) {
printk(KERN_INFO "btrfsic: error, kmalloc failed!\n");
btrfsic_release_block_ctx(&block_ctx);
goto continue_loop;
}
block->dev_state = dev_state;
block->dev_bytenr = dev_bytenr;
block->logical_bytenr = bytenr;
block->is_metadata = is_metadata;
block->never_written = 0;
block->iodone_w_error = 0;
block->mirror_num = 0; /* unknown */
block->flush_gen = dev_state->last_flush_gen + 1;
block->submit_bio_bh_rw = submit_bio_bh_rw;
if (NULL != bio) {
block->is_iodone = 0;
BUG_ON(NULL == bio_is_patched);
if (!*bio_is_patched) {
block->orig_bio_bh_private = bio->bi_private;
block->orig_bio_bh_end_io.bio = bio->bi_end_io;
block->next_in_same_bio = NULL;
bio->bi_private = block;
bio->bi_end_io = btrfsic_bio_end_io;
*bio_is_patched = 1;
} else {
struct btrfsic_block *chained_block =
(struct btrfsic_block *)
bio->bi_private;
BUG_ON(NULL == chained_block);
block->orig_bio_bh_private =
chained_block->orig_bio_bh_private;
block->orig_bio_bh_end_io.bio =
chained_block->orig_bio_bh_end_io.bio;
block->next_in_same_bio = chained_block;
bio->bi_private = block;
}
} else if (NULL != bh) {
block->is_iodone = 0;
block->orig_bio_bh_private = bh->b_private;
block->orig_bio_bh_end_io.bh = bh->b_end_io;
block->next_in_same_bio = NULL;
bh->b_private = block;
bh->b_end_io = btrfsic_bh_end_io;
} else {
block->is_iodone = 1;
block->orig_bio_bh_private = NULL;
block->orig_bio_bh_end_io.bio = NULL;
block->next_in_same_bio = NULL;
}
if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
printk(KERN_INFO
"New written %c-block @%llu (%s/%llu/%d)\n",
is_metadata ? 'M' : 'D',
(unsigned long long)block->logical_bytenr,
block->dev_state->name,
(unsigned long long)block->dev_bytenr,
block->mirror_num);
list_add(&block->all_blocks_node, &state->all_blocks_list);
btrfsic_block_hashtable_add(block, &state->block_hashtable);
if (is_metadata) {
ret = btrfsic_process_metablock(state, block,
&block_ctx, 0, 0);
if (ret)
printk(KERN_INFO
"btrfsic: process_metablock(root @%llu)"
" failed!\n",
(unsigned long long)dev_bytenr);
}
btrfsic_release_block_ctx(&block_ctx);
}
continue_loop:
BUG_ON(!processed_len);
dev_bytenr += processed_len;
mapped_datav += processed_len >> PAGE_CACHE_SHIFT;
num_pages -= processed_len >> PAGE_CACHE_SHIFT;
goto again;
}
static void btrfsic_bio_end_io(struct bio *bp, int bio_error_status)
{
struct btrfsic_block *block = (struct btrfsic_block *)bp->bi_private;
int iodone_w_error;
/* mutex is not held! This is not save if IO is not yet completed
* on umount */
iodone_w_error = 0;
if (bio_error_status)
iodone_w_error = 1;
BUG_ON(NULL == block);
bp->bi_private = block->orig_bio_bh_private;
bp->bi_end_io = block->orig_bio_bh_end_io.bio;
do {
struct btrfsic_block *next_block;
struct btrfsic_dev_state *const dev_state = block->dev_state;
if ((dev_state->state->print_mask &
BTRFSIC_PRINT_MASK_END_IO_BIO_BH))
printk(KERN_INFO
"bio_end_io(err=%d) for %c @%llu (%s/%llu/%d)\n",
bio_error_status,
btrfsic_get_block_type(dev_state->state, block),
(unsigned long long)block->logical_bytenr,
dev_state->name,
(unsigned long long)block->dev_bytenr,
block->mirror_num);
next_block = block->next_in_same_bio;
block->iodone_w_error = iodone_w_error;
if (block->submit_bio_bh_rw & REQ_FLUSH) {
dev_state->last_flush_gen++;
if ((dev_state->state->print_mask &
BTRFSIC_PRINT_MASK_END_IO_BIO_BH))
printk(KERN_INFO
"bio_end_io() new %s flush_gen=%llu\n",
dev_state->name,
(unsigned long long)
dev_state->last_flush_gen);
}
if (block->submit_bio_bh_rw & REQ_FUA)
block->flush_gen = 0; /* FUA completed means block is
* on disk */
block->is_iodone = 1; /* for FLUSH, this releases the block */
block = next_block;
} while (NULL != block);
bp->bi_end_io(bp, bio_error_status);
}
static void btrfsic_bh_end_io(struct buffer_head *bh, int uptodate)
{
struct btrfsic_block *block = (struct btrfsic_block *)bh->b_private;
int iodone_w_error = !uptodate;
struct btrfsic_dev_state *dev_state;
BUG_ON(NULL == block);
dev_state = block->dev_state;
if ((dev_state->state->print_mask & BTRFSIC_PRINT_MASK_END_IO_BIO_BH))
printk(KERN_INFO
"bh_end_io(error=%d) for %c @%llu (%s/%llu/%d)\n",
iodone_w_error,
btrfsic_get_block_type(dev_state->state, block),
(unsigned long long)block->logical_bytenr,
block->dev_state->name,
(unsigned long long)block->dev_bytenr,
block->mirror_num);
block->iodone_w_error = iodone_w_error;
if (block->submit_bio_bh_rw & REQ_FLUSH) {
dev_state->last_flush_gen++;
if ((dev_state->state->print_mask &
BTRFSIC_PRINT_MASK_END_IO_BIO_BH))
printk(KERN_INFO
"bh_end_io() new %s flush_gen=%llu\n",
dev_state->name,
(unsigned long long)dev_state->last_flush_gen);
}
if (block->submit_bio_bh_rw & REQ_FUA)
block->flush_gen = 0; /* FUA completed means block is on disk */
bh->b_private = block->orig_bio_bh_private;
bh->b_end_io = block->orig_bio_bh_end_io.bh;
block->is_iodone = 1; /* for FLUSH, this releases the block */
bh->b_end_io(bh, uptodate);
}
static int btrfsic_process_written_superblock(
struct btrfsic_state *state,
struct btrfsic_block *const superblock,
struct btrfs_super_block *const super_hdr)
{
int pass;
superblock->generation = btrfs_super_generation(super_hdr);
if (!(superblock->generation > state->max_superblock_generation ||
0 == state->max_superblock_generation)) {
if (state->print_mask & BTRFSIC_PRINT_MASK_SUPERBLOCK_WRITE)
printk(KERN_INFO
"btrfsic: superblock @%llu (%s/%llu/%d)"
" with old gen %llu <= %llu\n",
(unsigned long long)superblock->logical_bytenr,
superblock->dev_state->name,
(unsigned long long)superblock->dev_bytenr,
superblock->mirror_num,
(unsigned long long)
btrfs_super_generation(super_hdr),
(unsigned long long)
state->max_superblock_generation);
} else {
if (state->print_mask & BTRFSIC_PRINT_MASK_SUPERBLOCK_WRITE)
printk(KERN_INFO
"btrfsic: got new superblock @%llu (%s/%llu/%d)"
" with new gen %llu > %llu\n",
(unsigned long long)superblock->logical_bytenr,
superblock->dev_state->name,
(unsigned long long)superblock->dev_bytenr,
superblock->mirror_num,
(unsigned long long)
btrfs_super_generation(super_hdr),
(unsigned long long)
state->max_superblock_generation);
state->max_superblock_generation =
btrfs_super_generation(super_hdr);
state->latest_superblock = superblock;
}
for (pass = 0; pass < 3; pass++) {
int ret;
u64 next_bytenr;
struct btrfsic_block *next_block;
struct btrfsic_block_data_ctx tmp_next_block_ctx;
struct btrfsic_block_link *l;
int num_copies;
int mirror_num;
const char *additional_string = NULL;
struct btrfs_disk_key tmp_disk_key;
tmp_disk_key.type = BTRFS_ROOT_ITEM_KEY;
tmp_disk_key.offset = 0;
switch (pass) {
case 0:
tmp_disk_key.objectid =
cpu_to_le64(BTRFS_ROOT_TREE_OBJECTID);
additional_string = "root ";
next_bytenr = btrfs_super_root(super_hdr);
if (state->print_mask &
BTRFSIC_PRINT_MASK_ROOT_CHUNK_LOG_TREE_LOCATION)
printk(KERN_INFO "root@%llu\n",
(unsigned long long)next_bytenr);
break;
case 1:
tmp_disk_key.objectid =
cpu_to_le64(BTRFS_CHUNK_TREE_OBJECTID);
additional_string = "chunk ";
next_bytenr = btrfs_super_chunk_root(super_hdr);
if (state->print_mask &
BTRFSIC_PRINT_MASK_ROOT_CHUNK_LOG_TREE_LOCATION)
printk(KERN_INFO "chunk@%llu\n",
(unsigned long long)next_bytenr);
break;
case 2:
tmp_disk_key.objectid =
cpu_to_le64(BTRFS_TREE_LOG_OBJECTID);
additional_string = "log ";
next_bytenr = btrfs_super_log_root(super_hdr);
if (0 == next_bytenr)
continue;
if (state->print_mask &
BTRFSIC_PRINT_MASK_ROOT_CHUNK_LOG_TREE_LOCATION)
printk(KERN_INFO "log@%llu\n",
(unsigned long long)next_bytenr);
break;
}
num_copies =
btrfs_num_copies(state->root->fs_info,
next_bytenr, BTRFS_SUPER_INFO_SIZE);
if (state->print_mask & BTRFSIC_PRINT_MASK_NUM_COPIES)
printk(KERN_INFO "num_copies(log_bytenr=%llu) = %d\n",
(unsigned long long)next_bytenr, num_copies);
for (mirror_num = 1; mirror_num <= num_copies; mirror_num++) {
int was_created;
if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
printk(KERN_INFO
"btrfsic_process_written_superblock("
"mirror_num=%d)\n", mirror_num);
ret = btrfsic_map_block(state, next_bytenr,
BTRFS_SUPER_INFO_SIZE,
&tmp_next_block_ctx,
mirror_num);
if (ret) {
printk(KERN_INFO
"btrfsic: btrfsic_map_block(@%llu,"
" mirror=%d) failed!\n",
(unsigned long long)next_bytenr,
mirror_num);
return -1;
}
next_block = btrfsic_block_lookup_or_add(
state,
&tmp_next_block_ctx,
additional_string,
1, 0, 1,
mirror_num,
&was_created);
if (NULL == next_block) {
printk(KERN_INFO
"btrfsic: error, kmalloc failed!\n");
btrfsic_release_block_ctx(&tmp_next_block_ctx);
return -1;
}
next_block->disk_key = tmp_disk_key;
if (was_created)
next_block->generation =
BTRFSIC_GENERATION_UNKNOWN;
l = btrfsic_block_link_lookup_or_add(
state,
&tmp_next_block_ctx,
next_block,
superblock,
BTRFSIC_GENERATION_UNKNOWN);
btrfsic_release_block_ctx(&tmp_next_block_ctx);
if (NULL == l)
return -1;
}
}
if (-1 == btrfsic_check_all_ref_blocks(state, superblock, 0)) {
WARN_ON(1);
btrfsic_dump_tree(state);
}
return 0;
}
static int btrfsic_check_all_ref_blocks(struct btrfsic_state *state,
struct btrfsic_block *const block,
int recursion_level)
{
struct list_head *elem_ref_to;
int ret = 0;
if (recursion_level >= 3 + BTRFS_MAX_LEVEL) {
/*
* Note that this situation can happen and does not
* indicate an error in regular cases. It happens
* when disk blocks are freed and later reused.
* The check-integrity module is not aware of any
* block free operations, it just recognizes block
* write operations. Therefore it keeps the linkage
* information for a block until a block is
* rewritten. This can temporarily cause incorrect
* and even circular linkage informations. This
* causes no harm unless such blocks are referenced
* by the most recent super block.
*/
if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
printk(KERN_INFO
"btrfsic: abort cyclic linkage (case 1).\n");
return ret;
}
/*
* This algorithm is recursive because the amount of used stack
* space is very small and the max recursion depth is limited.
*/
list_for_each(elem_ref_to, &block->ref_to_list) {
const struct btrfsic_block_link *const l =
list_entry(elem_ref_to, struct btrfsic_block_link,
node_ref_to);
if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
printk(KERN_INFO
"rl=%d, %c @%llu (%s/%llu/%d)"
" %u* refers to %c @%llu (%s/%llu/%d)\n",
recursion_level,
btrfsic_get_block_type(state, block),
(unsigned long long)block->logical_bytenr,
block->dev_state->name,
(unsigned long long)block->dev_bytenr,
block->mirror_num,
l->ref_cnt,
btrfsic_get_block_type(state, l->block_ref_to),
(unsigned long long)
l->block_ref_to->logical_bytenr,
l->block_ref_to->dev_state->name,
(unsigned long long)l->block_ref_to->dev_bytenr,
l->block_ref_to->mirror_num);
if (l->block_ref_to->never_written) {
printk(KERN_INFO "btrfs: attempt to write superblock"
" which references block %c @%llu (%s/%llu/%d)"
" which is never written!\n",
btrfsic_get_block_type(state, l->block_ref_to),
(unsigned long long)
l->block_ref_to->logical_bytenr,
l->block_ref_to->dev_state->name,
(unsigned long long)l->block_ref_to->dev_bytenr,
l->block_ref_to->mirror_num);
ret = -1;
} else if (!l->block_ref_to->is_iodone) {
printk(KERN_INFO "btrfs: attempt to write superblock"
" which references block %c @%llu (%s/%llu/%d)"
" which is not yet iodone!\n",
btrfsic_get_block_type(state, l->block_ref_to),
(unsigned long long)
l->block_ref_to->logical_bytenr,
l->block_ref_to->dev_state->name,
(unsigned long long)l->block_ref_to->dev_bytenr,
l->block_ref_to->mirror_num);
ret = -1;
} else if (l->block_ref_to->iodone_w_error) {
printk(KERN_INFO "btrfs: attempt to write superblock"
" which references block %c @%llu (%s/%llu/%d)"
" which has write error!\n",
btrfsic_get_block_type(state, l->block_ref_to),
(unsigned long long)
l->block_ref_to->logical_bytenr,
l->block_ref_to->dev_state->name,
(unsigned long long)l->block_ref_to->dev_bytenr,
l->block_ref_to->mirror_num);
ret = -1;
} else if (l->parent_generation !=
l->block_ref_to->generation &&
BTRFSIC_GENERATION_UNKNOWN !=
l->parent_generation &&
BTRFSIC_GENERATION_UNKNOWN !=
l->block_ref_to->generation) {
printk(KERN_INFO "btrfs: attempt to write superblock"
" which references block %c @%llu (%s/%llu/%d)"
" with generation %llu !="
" parent generation %llu!\n",
btrfsic_get_block_type(state, l->block_ref_to),
(unsigned long long)
l->block_ref_to->logical_bytenr,
l->block_ref_to->dev_state->name,
(unsigned long long)l->block_ref_to->dev_bytenr,
l->block_ref_to->mirror_num,
(unsigned long long)l->block_ref_to->generation,
(unsigned long long)l->parent_generation);
ret = -1;
} else if (l->block_ref_to->flush_gen >
l->block_ref_to->dev_state->last_flush_gen) {
printk(KERN_INFO "btrfs: attempt to write superblock"
" which references block %c @%llu (%s/%llu/%d)"
" which is not flushed out of disk's write cache"
" (block flush_gen=%llu,"
" dev->flush_gen=%llu)!\n",
btrfsic_get_block_type(state, l->block_ref_to),
(unsigned long long)
l->block_ref_to->logical_bytenr,
l->block_ref_to->dev_state->name,
(unsigned long long)l->block_ref_to->dev_bytenr,
l->block_ref_to->mirror_num,
(unsigned long long)block->flush_gen,
(unsigned long long)
l->block_ref_to->dev_state->last_flush_gen);
ret = -1;
} else if (-1 == btrfsic_check_all_ref_blocks(state,
l->block_ref_to,
recursion_level +
1)) {
ret = -1;
}
}
return ret;
}
static int btrfsic_is_block_ref_by_superblock(
const struct btrfsic_state *state,
const struct btrfsic_block *block,
int recursion_level)
{
struct list_head *elem_ref_from;
if (recursion_level >= 3 + BTRFS_MAX_LEVEL) {
/* refer to comment at "abort cyclic linkage (case 1)" */
if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
printk(KERN_INFO
"btrfsic: abort cyclic linkage (case 2).\n");
return 0;
}
/*
* This algorithm is recursive because the amount of used stack space
* is very small and the max recursion depth is limited.
*/
list_for_each(elem_ref_from, &block->ref_from_list) {
const struct btrfsic_block_link *const l =
list_entry(elem_ref_from, struct btrfsic_block_link,
node_ref_from);
if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
printk(KERN_INFO
"rl=%d, %c @%llu (%s/%llu/%d)"
" is ref %u* from %c @%llu (%s/%llu/%d)\n",
recursion_level,
btrfsic_get_block_type(state, block),
(unsigned long long)block->logical_bytenr,
block->dev_state->name,
(unsigned long long)block->dev_bytenr,
block->mirror_num,
l->ref_cnt,
btrfsic_get_block_type(state, l->block_ref_from),
(unsigned long long)
l->block_ref_from->logical_bytenr,
l->block_ref_from->dev_state->name,
(unsigned long long)
l->block_ref_from->dev_bytenr,
l->block_ref_from->mirror_num);
if (l->block_ref_from->is_superblock &&
state->latest_superblock->dev_bytenr ==
l->block_ref_from->dev_bytenr &&
state->latest_superblock->dev_state->bdev ==
l->block_ref_from->dev_state->bdev)
return 1;
else if (btrfsic_is_block_ref_by_superblock(state,
l->block_ref_from,
recursion_level +
1))
return 1;
}
return 0;
}
static void btrfsic_print_add_link(const struct btrfsic_state *state,
const struct btrfsic_block_link *l)
{
printk(KERN_INFO
"Add %u* link from %c @%llu (%s/%llu/%d)"
" to %c @%llu (%s/%llu/%d).\n",
l->ref_cnt,
btrfsic_get_block_type(state, l->block_ref_from),
(unsigned long long)l->block_ref_from->logical_bytenr,
l->block_ref_from->dev_state->name,
(unsigned long long)l->block_ref_from->dev_bytenr,
l->block_ref_from->mirror_num,
btrfsic_get_block_type(state, l->block_ref_to),
(unsigned long long)l->block_ref_to->logical_bytenr,
l->block_ref_to->dev_state->name,
(unsigned long long)l->block_ref_to->dev_bytenr,
l->block_ref_to->mirror_num);
}
static void btrfsic_print_rem_link(const struct btrfsic_state *state,
const struct btrfsic_block_link *l)
{
printk(KERN_INFO
"Rem %u* link from %c @%llu (%s/%llu/%d)"
" to %c @%llu (%s/%llu/%d).\n",
l->ref_cnt,
btrfsic_get_block_type(state, l->block_ref_from),
(unsigned long long)l->block_ref_from->logical_bytenr,
l->block_ref_from->dev_state->name,
(unsigned long long)l->block_ref_from->dev_bytenr,
l->block_ref_from->mirror_num,
btrfsic_get_block_type(state, l->block_ref_to),
(unsigned long long)l->block_ref_to->logical_bytenr,
l->block_ref_to->dev_state->name,
(unsigned long long)l->block_ref_to->dev_bytenr,
l->block_ref_to->mirror_num);
}
static char btrfsic_get_block_type(const struct btrfsic_state *state,
const struct btrfsic_block *block)
{
if (block->is_superblock &&
state->latest_superblock->dev_bytenr == block->dev_bytenr &&
state->latest_superblock->dev_state->bdev == block->dev_state->bdev)
return 'S';
else if (block->is_superblock)
return 's';
else if (block->is_metadata)
return 'M';
else
return 'D';
}
static void btrfsic_dump_tree(const struct btrfsic_state *state)
{
btrfsic_dump_tree_sub(state, state->latest_superblock, 0);
}
static void btrfsic_dump_tree_sub(const struct btrfsic_state *state,
const struct btrfsic_block *block,
int indent_level)
{
struct list_head *elem_ref_to;
int indent_add;
static char buf[80];
int cursor_position;
/*
* Should better fill an on-stack buffer with a complete line and
* dump it at once when it is time to print a newline character.
*/
/*
* This algorithm is recursive because the amount of used stack space
* is very small and the max recursion depth is limited.
*/
indent_add = sprintf(buf, "%c-%llu(%s/%llu/%d)",
btrfsic_get_block_type(state, block),
(unsigned long long)block->logical_bytenr,
block->dev_state->name,
(unsigned long long)block->dev_bytenr,
block->mirror_num);
if (indent_level + indent_add > BTRFSIC_TREE_DUMP_MAX_INDENT_LEVEL) {
printk("[...]\n");
return;
}
printk(buf);
indent_level += indent_add;
if (list_empty(&block->ref_to_list)) {
printk("\n");
return;
}
if (block->mirror_num > 1 &&
!(state->print_mask & BTRFSIC_PRINT_MASK_TREE_WITH_ALL_MIRRORS)) {
printk(" [...]\n");
return;
}
cursor_position = indent_level;
list_for_each(elem_ref_to, &block->ref_to_list) {
const struct btrfsic_block_link *const l =
list_entry(elem_ref_to, struct btrfsic_block_link,
node_ref_to);
while (cursor_position < indent_level) {
printk(" ");
cursor_position++;
}
if (l->ref_cnt > 1)
indent_add = sprintf(buf, " %d*--> ", l->ref_cnt);
else
indent_add = sprintf(buf, " --> ");
if (indent_level + indent_add >
BTRFSIC_TREE_DUMP_MAX_INDENT_LEVEL) {
printk("[...]\n");
cursor_position = 0;
continue;
}
printk(buf);
btrfsic_dump_tree_sub(state, l->block_ref_to,
indent_level + indent_add);
cursor_position = 0;
}
}
static struct btrfsic_block_link *btrfsic_block_link_lookup_or_add(
struct btrfsic_state *state,
struct btrfsic_block_data_ctx *next_block_ctx,
struct btrfsic_block *next_block,
struct btrfsic_block *from_block,
u64 parent_generation)
{
struct btrfsic_block_link *l;
l = btrfsic_block_link_hashtable_lookup(next_block_ctx->dev->bdev,
next_block_ctx->dev_bytenr,
from_block->dev_state->bdev,
from_block->dev_bytenr,
&state->block_link_hashtable);
if (NULL == l) {
l = btrfsic_block_link_alloc();
if (NULL == l) {
printk(KERN_INFO
"btrfsic: error, kmalloc" " failed!\n");
return NULL;
}
l->block_ref_to = next_block;
l->block_ref_from = from_block;
l->ref_cnt = 1;
l->parent_generation = parent_generation;
if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
btrfsic_print_add_link(state, l);
list_add(&l->node_ref_to, &from_block->ref_to_list);
list_add(&l->node_ref_from, &next_block->ref_from_list);
btrfsic_block_link_hashtable_add(l,
&state->block_link_hashtable);
} else {
l->ref_cnt++;
l->parent_generation = parent_generation;
if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
btrfsic_print_add_link(state, l);
}
return l;
}
static struct btrfsic_block *btrfsic_block_lookup_or_add(
struct btrfsic_state *state,
struct btrfsic_block_data_ctx *block_ctx,
const char *additional_string,
int is_metadata,
int is_iodone,
int never_written,
int mirror_num,
int *was_created)
{
struct btrfsic_block *block;
block = btrfsic_block_hashtable_lookup(block_ctx->dev->bdev,
block_ctx->dev_bytenr,
&state->block_hashtable);
if (NULL == block) {
struct btrfsic_dev_state *dev_state;
block = btrfsic_block_alloc();
if (NULL == block) {
printk(KERN_INFO "btrfsic: error, kmalloc failed!\n");
return NULL;
}
dev_state = btrfsic_dev_state_lookup(block_ctx->dev->bdev);
if (NULL == dev_state) {
printk(KERN_INFO
"btrfsic: error, lookup dev_state failed!\n");
btrfsic_block_free(block);
return NULL;
}
block->dev_state = dev_state;
block->dev_bytenr = block_ctx->dev_bytenr;
block->logical_bytenr = block_ctx->start;
block->is_metadata = is_metadata;
block->is_iodone = is_iodone;
block->never_written = never_written;
block->mirror_num = mirror_num;
if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
printk(KERN_INFO
"New %s%c-block @%llu (%s/%llu/%d)\n",
additional_string,
btrfsic_get_block_type(state, block),
(unsigned long long)block->logical_bytenr,
dev_state->name,
(unsigned long long)block->dev_bytenr,
mirror_num);
list_add(&block->all_blocks_node, &state->all_blocks_list);
btrfsic_block_hashtable_add(block, &state->block_hashtable);
if (NULL != was_created)
*was_created = 1;
} else {
if (NULL != was_created)
*was_created = 0;
}
return block;
}
static void btrfsic_cmp_log_and_dev_bytenr(struct btrfsic_state *state,
u64 bytenr,
struct btrfsic_dev_state *dev_state,
u64 dev_bytenr)
{
int num_copies;
int mirror_num;
int ret;
struct btrfsic_block_data_ctx block_ctx;
int match = 0;
num_copies = btrfs_num_copies(state->root->fs_info,
bytenr, state->metablock_size);
for (mirror_num = 1; mirror_num <= num_copies; mirror_num++) {
ret = btrfsic_map_block(state, bytenr, state->metablock_size,
&block_ctx, mirror_num);
if (ret) {
printk(KERN_INFO "btrfsic:"
" btrfsic_map_block(logical @%llu,"
" mirror %d) failed!\n",
(unsigned long long)bytenr, mirror_num);
continue;
}
if (dev_state->bdev == block_ctx.dev->bdev &&
dev_bytenr == block_ctx.dev_bytenr) {
match++;
btrfsic_release_block_ctx(&block_ctx);
break;
}
btrfsic_release_block_ctx(&block_ctx);
}
if (!match) {
printk(KERN_INFO "btrfs: attempt to write M-block which contains logical bytenr that doesn't map to dev+physical bytenr of submit_bio,"
" buffer->log_bytenr=%llu, submit_bio(bdev=%s,"
" phys_bytenr=%llu)!\n",
(unsigned long long)bytenr, dev_state->name,
(unsigned long long)dev_bytenr);
for (mirror_num = 1; mirror_num <= num_copies; mirror_num++) {
ret = btrfsic_map_block(state, bytenr,
state->metablock_size,
&block_ctx, mirror_num);
if (ret)
continue;
printk(KERN_INFO "Read logical bytenr @%llu maps to"
" (%s/%llu/%d)\n",
(unsigned long long)bytenr,
block_ctx.dev->name,
(unsigned long long)block_ctx.dev_bytenr,
mirror_num);
}
WARN_ON(1);
}
}
static struct btrfsic_dev_state *btrfsic_dev_state_lookup(
struct block_device *bdev)
{
struct btrfsic_dev_state *ds;
ds = btrfsic_dev_state_hashtable_lookup(bdev,
&btrfsic_dev_state_hashtable);
return ds;
}
int btrfsic_submit_bh(int rw, struct buffer_head *bh)
{
struct btrfsic_dev_state *dev_state;
if (!btrfsic_is_initialized)
return submit_bh(rw, bh);
mutex_lock(&btrfsic_mutex);
/* since btrfsic_submit_bh() might also be called before
* btrfsic_mount(), this might return NULL */
dev_state = btrfsic_dev_state_lookup(bh->b_bdev);
/* Only called to write the superblock (incl. FLUSH/FUA) */
if (NULL != dev_state &&
(rw & WRITE) && bh->b_size > 0) {
u64 dev_bytenr;
dev_bytenr = 4096 * bh->b_blocknr;
if (dev_state->state->print_mask &
BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH)
printk(KERN_INFO
"submit_bh(rw=0x%x, blocknr=%lu (bytenr %llu),"
" size=%lu, data=%p, bdev=%p)\n",
rw, (unsigned long)bh->b_blocknr,
(unsigned long long)dev_bytenr,
(unsigned long)bh->b_size, bh->b_data,
bh->b_bdev);
btrfsic_process_written_block(dev_state, dev_bytenr,
&bh->b_data, 1, NULL,
NULL, bh, rw);
} else if (NULL != dev_state && (rw & REQ_FLUSH)) {
if (dev_state->state->print_mask &
BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH)
printk(KERN_INFO
"submit_bh(rw=0x%x FLUSH, bdev=%p)\n",
rw, bh->b_bdev);
if (!dev_state->dummy_block_for_bio_bh_flush.is_iodone) {
if ((dev_state->state->print_mask &
(BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH |
BTRFSIC_PRINT_MASK_VERBOSE)))
printk(KERN_INFO
"btrfsic_submit_bh(%s) with FLUSH"
" but dummy block already in use"
" (ignored)!\n",
dev_state->name);
} else {
struct btrfsic_block *const block =
&dev_state->dummy_block_for_bio_bh_flush;
block->is_iodone = 0;
block->never_written = 0;
block->iodone_w_error = 0;
block->flush_gen = dev_state->last_flush_gen + 1;
block->submit_bio_bh_rw = rw;
block->orig_bio_bh_private = bh->b_private;
block->orig_bio_bh_end_io.bh = bh->b_end_io;
block->next_in_same_bio = NULL;
bh->b_private = block;
bh->b_end_io = btrfsic_bh_end_io;
}
}
mutex_unlock(&btrfsic_mutex);
return submit_bh(rw, bh);
}
void btrfsic_submit_bio(int rw, struct bio *bio)
{
struct btrfsic_dev_state *dev_state;
if (!btrfsic_is_initialized) {
submit_bio(rw, bio);
return;
}
mutex_lock(&btrfsic_mutex);
/* since btrfsic_submit_bio() is also called before
* btrfsic_mount(), this might return NULL */
dev_state = btrfsic_dev_state_lookup(bio->bi_bdev);
if (NULL != dev_state &&
(rw & WRITE) && NULL != bio->bi_io_vec) {
unsigned int i;
u64 dev_bytenr;
int bio_is_patched;
char **mapped_datav;
dev_bytenr = 512 * bio->bi_sector;
bio_is_patched = 0;
if (dev_state->state->print_mask &
BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH)
printk(KERN_INFO
"submit_bio(rw=0x%x, bi_vcnt=%u,"
" bi_sector=%lu (bytenr %llu), bi_bdev=%p)\n",
rw, bio->bi_vcnt, (unsigned long)bio->bi_sector,
(unsigned long long)dev_bytenr,
bio->bi_bdev);
mapped_datav = kmalloc(sizeof(*mapped_datav) * bio->bi_vcnt,
GFP_NOFS);
if (!mapped_datav)
goto leave;
for (i = 0; i < bio->bi_vcnt; i++) {
BUG_ON(bio->bi_io_vec[i].bv_len != PAGE_CACHE_SIZE);
mapped_datav[i] = kmap(bio->bi_io_vec[i].bv_page);
if (!mapped_datav[i]) {
while (i > 0) {
i--;
kunmap(bio->bi_io_vec[i].bv_page);
}
kfree(mapped_datav);
goto leave;
}
if ((BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH |
BTRFSIC_PRINT_MASK_VERBOSE) ==
(dev_state->state->print_mask &
(BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH |
BTRFSIC_PRINT_MASK_VERBOSE)))
printk(KERN_INFO
"#%u: page=%p, len=%u, offset=%u\n",
i, bio->bi_io_vec[i].bv_page,
bio->bi_io_vec[i].bv_len,
bio->bi_io_vec[i].bv_offset);
}
btrfsic_process_written_block(dev_state, dev_bytenr,
mapped_datav, bio->bi_vcnt,
bio, &bio_is_patched,
NULL, rw);
while (i > 0) {
i--;
kunmap(bio->bi_io_vec[i].bv_page);
}
kfree(mapped_datav);
} else if (NULL != dev_state && (rw & REQ_FLUSH)) {
if (dev_state->state->print_mask &
BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH)
printk(KERN_INFO
"submit_bio(rw=0x%x FLUSH, bdev=%p)\n",
rw, bio->bi_bdev);
if (!dev_state->dummy_block_for_bio_bh_flush.is_iodone) {
if ((dev_state->state->print_mask &
(BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH |
BTRFSIC_PRINT_MASK_VERBOSE)))
printk(KERN_INFO
"btrfsic_submit_bio(%s) with FLUSH"
" but dummy block already in use"
" (ignored)!\n",
dev_state->name);
} else {
struct btrfsic_block *const block =
&dev_state->dummy_block_for_bio_bh_flush;
block->is_iodone = 0;
block->never_written = 0;
block->iodone_w_error = 0;
block->flush_gen = dev_state->last_flush_gen + 1;
block->submit_bio_bh_rw = rw;
block->orig_bio_bh_private = bio->bi_private;
block->orig_bio_bh_end_io.bio = bio->bi_end_io;
block->next_in_same_bio = NULL;
bio->bi_private = block;
bio->bi_end_io = btrfsic_bio_end_io;
}
}
leave:
mutex_unlock(&btrfsic_mutex);
submit_bio(rw, bio);
}
int btrfsic_mount(struct btrfs_root *root,
struct btrfs_fs_devices *fs_devices,
int including_extent_data, u32 print_mask)
{
int ret;
struct btrfsic_state *state;
struct list_head *dev_head = &fs_devices->devices;
struct btrfs_device *device;
if (root->nodesize != root->leafsize) {
printk(KERN_INFO
"btrfsic: cannot handle nodesize %d != leafsize %d!\n",
root->nodesize, root->leafsize);
return -1;
}
if (root->nodesize & ((u64)PAGE_CACHE_SIZE - 1)) {
printk(KERN_INFO
"btrfsic: cannot handle nodesize %d not being a multiple of PAGE_CACHE_SIZE %ld!\n",
root->nodesize, (unsigned long)PAGE_CACHE_SIZE);
return -1;
}
if (root->leafsize & ((u64)PAGE_CACHE_SIZE - 1)) {
printk(KERN_INFO
"btrfsic: cannot handle leafsize %d not being a multiple of PAGE_CACHE_SIZE %ld!\n",
root->leafsize, (unsigned long)PAGE_CACHE_SIZE);
return -1;
}
if (root->sectorsize & ((u64)PAGE_CACHE_SIZE - 1)) {
printk(KERN_INFO
"btrfsic: cannot handle sectorsize %d not being a multiple of PAGE_CACHE_SIZE %ld!\n",
root->sectorsize, (unsigned long)PAGE_CACHE_SIZE);
return -1;
}
state = kzalloc(sizeof(*state), GFP_NOFS);
if (NULL == state) {
printk(KERN_INFO "btrfs check-integrity: kmalloc() failed!\n");
return -1;
}
if (!btrfsic_is_initialized) {
mutex_init(&btrfsic_mutex);
btrfsic_dev_state_hashtable_init(&btrfsic_dev_state_hashtable);
btrfsic_is_initialized = 1;
}
mutex_lock(&btrfsic_mutex);
state->root = root;
state->print_mask = print_mask;
state->include_extent_data = including_extent_data;
state->csum_size = 0;
state->metablock_size = root->nodesize;
state->datablock_size = root->sectorsize;
INIT_LIST_HEAD(&state->all_blocks_list);
btrfsic_block_hashtable_init(&state->block_hashtable);
btrfsic_block_link_hashtable_init(&state->block_link_hashtable);
state->max_superblock_generation = 0;
state->latest_superblock = NULL;
list_for_each_entry(device, dev_head, dev_list) {
struct btrfsic_dev_state *ds;
char *p;
if (!device->bdev || !device->name)
continue;
ds = btrfsic_dev_state_alloc();
if (NULL == ds) {
printk(KERN_INFO
"btrfs check-integrity: kmalloc() failed!\n");
mutex_unlock(&btrfsic_mutex);
return -1;
}
ds->bdev = device->bdev;
ds->state = state;
bdevname(ds->bdev, ds->name);
ds->name[BDEVNAME_SIZE - 1] = '\0';
for (p = ds->name; *p != '\0'; p++);
while (p > ds->name && *p != '/')
p--;
if (*p == '/')
p++;
strlcpy(ds->name, p, sizeof(ds->name));
btrfsic_dev_state_hashtable_add(ds,
&btrfsic_dev_state_hashtable);
}
ret = btrfsic_process_superblock(state, fs_devices);
if (0 != ret) {
mutex_unlock(&btrfsic_mutex);
btrfsic_unmount(root, fs_devices);
return ret;
}
if (state->print_mask & BTRFSIC_PRINT_MASK_INITIAL_DATABASE)
btrfsic_dump_database(state);
if (state->print_mask & BTRFSIC_PRINT_MASK_INITIAL_TREE)
btrfsic_dump_tree(state);
mutex_unlock(&btrfsic_mutex);
return 0;
}
void btrfsic_unmount(struct btrfs_root *root,
struct btrfs_fs_devices *fs_devices)
{
struct list_head *elem_all;
struct list_head *tmp_all;
struct btrfsic_state *state;
struct list_head *dev_head = &fs_devices->devices;
struct btrfs_device *device;
if (!btrfsic_is_initialized)
return;
mutex_lock(&btrfsic_mutex);
state = NULL;
list_for_each_entry(device, dev_head, dev_list) {
struct btrfsic_dev_state *ds;
if (!device->bdev || !device->name)
continue;
ds = btrfsic_dev_state_hashtable_lookup(
device->bdev,
&btrfsic_dev_state_hashtable);
if (NULL != ds) {
state = ds->state;
btrfsic_dev_state_hashtable_remove(ds);
btrfsic_dev_state_free(ds);
}
}
if (NULL == state) {
printk(KERN_INFO
"btrfsic: error, cannot find state information"
" on umount!\n");
mutex_unlock(&btrfsic_mutex);
return;
}
/*
* Don't care about keeping the lists' state up to date,
* just free all memory that was allocated dynamically.
* Free the blocks and the block_links.
*/
list_for_each_safe(elem_all, tmp_all, &state->all_blocks_list) {
struct btrfsic_block *const b_all =
list_entry(elem_all, struct btrfsic_block,
all_blocks_node);
struct list_head *elem_ref_to;
struct list_head *tmp_ref_to;
list_for_each_safe(elem_ref_to, tmp_ref_to,
&b_all->ref_to_list) {
struct btrfsic_block_link *const l =
list_entry(elem_ref_to,
struct btrfsic_block_link,
node_ref_to);
if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
btrfsic_print_rem_link(state, l);
l->ref_cnt--;
if (0 == l->ref_cnt)
btrfsic_block_link_free(l);
}
if (b_all->is_iodone || b_all->never_written)
btrfsic_block_free(b_all);
else
printk(KERN_INFO "btrfs: attempt to free %c-block"
" @%llu (%s/%llu/%d) on umount which is"
" not yet iodone!\n",
btrfsic_get_block_type(state, b_all),
(unsigned long long)b_all->logical_bytenr,
b_all->dev_state->name,
(unsigned long long)b_all->dev_bytenr,
b_all->mirror_num);
}
mutex_unlock(&btrfsic_mutex);
kfree(state);
}
| gpl-2.0 |
erikcas/android_kernel_caf_msm8x26 | drivers/video/fb_defio.c | 2906 | 6515 | /*
* linux/drivers/video/fb_defio.c
*
* Copyright (C) 2006 Jaya Kumar
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file COPYING in the main directory of this archive
* for more details.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/mm.h>
#include <linux/vmalloc.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/fb.h>
#include <linux/list.h>
/* to support deferred IO */
#include <linux/rmap.h>
#include <linux/pagemap.h>
static struct page *fb_deferred_io_page(struct fb_info *info, unsigned long offs)
{
void *screen_base = (void __force *) info->screen_base;
struct page *page;
if (is_vmalloc_addr(screen_base + offs))
page = vmalloc_to_page(screen_base + offs);
else
page = pfn_to_page((info->fix.smem_start + offs) >> PAGE_SHIFT);
return page;
}
/* this is to find and return the vmalloc-ed fb pages */
static int fb_deferred_io_fault(struct vm_area_struct *vma,
struct vm_fault *vmf)
{
unsigned long offset;
struct page *page;
struct fb_info *info = vma->vm_private_data;
offset = vmf->pgoff << PAGE_SHIFT;
if (offset >= info->fix.smem_len)
return VM_FAULT_SIGBUS;
page = fb_deferred_io_page(info, offset);
if (!page)
return VM_FAULT_SIGBUS;
get_page(page);
if (vma->vm_file)
page->mapping = vma->vm_file->f_mapping;
else
printk(KERN_ERR "no mapping available\n");
BUG_ON(!page->mapping);
page->index = vmf->pgoff;
vmf->page = page;
return 0;
}
int fb_deferred_io_fsync(struct file *file, loff_t start, loff_t end, int datasync)
{
struct fb_info *info = file->private_data;
struct inode *inode = file_inode(file);
int err = filemap_write_and_wait_range(inode->i_mapping, start, end);
if (err)
return err;
/* Skip if deferred io is compiled-in but disabled on this fbdev */
if (!info->fbdefio)
return 0;
mutex_lock(&inode->i_mutex);
/* Kill off the delayed work */
cancel_delayed_work_sync(&info->deferred_work);
/* Run it immediately */
err = schedule_delayed_work(&info->deferred_work, 0);
mutex_unlock(&inode->i_mutex);
return err;
}
EXPORT_SYMBOL_GPL(fb_deferred_io_fsync);
/* vm_ops->page_mkwrite handler */
static int fb_deferred_io_mkwrite(struct vm_area_struct *vma,
struct vm_fault *vmf)
{
struct page *page = vmf->page;
struct fb_info *info = vma->vm_private_data;
struct fb_deferred_io *fbdefio = info->fbdefio;
struct page *cur;
/* this is a callback we get when userspace first tries to
write to the page. we schedule a workqueue. that workqueue
will eventually mkclean the touched pages and execute the
deferred framebuffer IO. then if userspace touches a page
again, we repeat the same scheme */
file_update_time(vma->vm_file);
/* protect against the workqueue changing the page list */
mutex_lock(&fbdefio->lock);
/* first write in this cycle, notify the driver */
if (fbdefio->first_io && list_empty(&fbdefio->pagelist))
fbdefio->first_io(info);
/*
* We want the page to remain locked from ->page_mkwrite until
* the PTE is marked dirty to avoid page_mkclean() being called
* before the PTE is updated, which would leave the page ignored
* by defio.
* Do this by locking the page here and informing the caller
* about it with VM_FAULT_LOCKED.
*/
lock_page(page);
/* we loop through the pagelist before adding in order
to keep the pagelist sorted */
list_for_each_entry(cur, &fbdefio->pagelist, lru) {
/* this check is to catch the case where a new
process could start writing to the same page
through a new pte. this new access can cause the
mkwrite even when the original ps's pte is marked
writable */
if (unlikely(cur == page))
goto page_already_added;
else if (cur->index > page->index)
break;
}
list_add_tail(&page->lru, &cur->lru);
page_already_added:
mutex_unlock(&fbdefio->lock);
/* come back after delay to process the deferred IO */
schedule_delayed_work(&info->deferred_work, fbdefio->delay);
return VM_FAULT_LOCKED;
}
static const struct vm_operations_struct fb_deferred_io_vm_ops = {
.fault = fb_deferred_io_fault,
.page_mkwrite = fb_deferred_io_mkwrite,
};
static int fb_deferred_io_set_page_dirty(struct page *page)
{
if (!PageDirty(page))
SetPageDirty(page);
return 0;
}
static const struct address_space_operations fb_deferred_io_aops = {
.set_page_dirty = fb_deferred_io_set_page_dirty,
};
static int fb_deferred_io_mmap(struct fb_info *info, struct vm_area_struct *vma)
{
vma->vm_ops = &fb_deferred_io_vm_ops;
vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
if (!(info->flags & FBINFO_VIRTFB))
vma->vm_flags |= VM_IO;
vma->vm_private_data = info;
return 0;
}
/* workqueue callback */
static void fb_deferred_io_work(struct work_struct *work)
{
struct fb_info *info = container_of(work, struct fb_info,
deferred_work.work);
struct list_head *node, *next;
struct page *cur;
struct fb_deferred_io *fbdefio = info->fbdefio;
/* here we mkclean the pages, then do all deferred IO */
mutex_lock(&fbdefio->lock);
list_for_each_entry(cur, &fbdefio->pagelist, lru) {
lock_page(cur);
page_mkclean(cur);
unlock_page(cur);
}
/* driver's callback with pagelist */
fbdefio->deferred_io(info, &fbdefio->pagelist);
/* clear the list */
list_for_each_safe(node, next, &fbdefio->pagelist) {
list_del(node);
}
mutex_unlock(&fbdefio->lock);
}
void fb_deferred_io_init(struct fb_info *info)
{
struct fb_deferred_io *fbdefio = info->fbdefio;
BUG_ON(!fbdefio);
mutex_init(&fbdefio->lock);
info->fbops->fb_mmap = fb_deferred_io_mmap;
INIT_DELAYED_WORK(&info->deferred_work, fb_deferred_io_work);
INIT_LIST_HEAD(&fbdefio->pagelist);
if (fbdefio->delay == 0) /* set a default of 1 s */
fbdefio->delay = HZ;
}
EXPORT_SYMBOL_GPL(fb_deferred_io_init);
void fb_deferred_io_open(struct fb_info *info,
struct inode *inode,
struct file *file)
{
file->f_mapping->a_ops = &fb_deferred_io_aops;
}
EXPORT_SYMBOL_GPL(fb_deferred_io_open);
void fb_deferred_io_cleanup(struct fb_info *info)
{
struct fb_deferred_io *fbdefio = info->fbdefio;
struct page *page;
int i;
BUG_ON(!fbdefio);
cancel_delayed_work_sync(&info->deferred_work);
/* clear out the mapping that we setup */
for (i = 0 ; i < info->fix.smem_len; i += PAGE_SIZE) {
page = fb_deferred_io_page(info, i);
page->mapping = NULL;
}
info->fbops->fb_mmap = NULL;
mutex_destroy(&fbdefio->lock);
}
EXPORT_SYMBOL_GPL(fb_deferred_io_cleanup);
MODULE_LICENSE("GPL");
| gpl-2.0 |
piccolo-dev/aquaris-M5 | drivers/s390/scsi/zfcp_aux.c | 3162 | 15645 | /*
* zfcp device driver
*
* Module interface and handling of zfcp data structures.
*
* Copyright IBM Corp. 2002, 2013
*/
/*
* Driver authors:
* Martin Peschke (originator of the driver)
* Raimund Schroeder
* Aron Zeh
* Wolfgang Taphorn
* Stefan Bader
* Heiko Carstens (kernel 2.6 port of the driver)
* Andreas Herrmann
* Maxim Shchetynin
* Volker Sameske
* Ralph Wuerthner
* Michael Loehr
* Swen Schillig
* Christof Schmitt
* Martin Petermann
* Sven Schuetz
* Steffen Maier
*/
#define KMSG_COMPONENT "zfcp"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/miscdevice.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/module.h>
#include "zfcp_ext.h"
#include "zfcp_fc.h"
#include "zfcp_reqlist.h"
#define ZFCP_BUS_ID_SIZE 20
MODULE_AUTHOR("IBM Deutschland Entwicklung GmbH - linux390@de.ibm.com");
MODULE_DESCRIPTION("FCP HBA driver");
MODULE_LICENSE("GPL");
static char *init_device;
module_param_named(device, init_device, charp, 0400);
MODULE_PARM_DESC(device, "specify initial device");
static struct kmem_cache * __init zfcp_cache_hw_align(const char *name,
unsigned long size)
{
return kmem_cache_create(name, size, roundup_pow_of_two(size), 0, NULL);
}
static void __init zfcp_init_device_configure(char *busid, u64 wwpn, u64 lun)
{
struct ccw_device *cdev;
struct zfcp_adapter *adapter;
struct zfcp_port *port;
cdev = get_ccwdev_by_busid(&zfcp_ccw_driver, busid);
if (!cdev)
return;
if (ccw_device_set_online(cdev))
goto out_ccw_device;
adapter = zfcp_ccw_adapter_by_cdev(cdev);
if (!adapter)
goto out_ccw_device;
port = zfcp_get_port_by_wwpn(adapter, wwpn);
if (!port)
goto out_port;
flush_work(&port->rport_work);
zfcp_unit_add(port, lun);
put_device(&port->dev);
out_port:
zfcp_ccw_adapter_put(adapter);
out_ccw_device:
put_device(&cdev->dev);
return;
}
static void __init zfcp_init_device_setup(char *devstr)
{
char *token;
char *str, *str_saved;
char busid[ZFCP_BUS_ID_SIZE];
u64 wwpn, lun;
/* duplicate devstr and keep the original for sysfs presentation*/
str_saved = kstrdup(devstr, GFP_KERNEL);
str = str_saved;
if (!str)
return;
token = strsep(&str, ",");
if (!token || strlen(token) >= ZFCP_BUS_ID_SIZE)
goto err_out;
strncpy(busid, token, ZFCP_BUS_ID_SIZE);
token = strsep(&str, ",");
if (!token || strict_strtoull(token, 0, (unsigned long long *) &wwpn))
goto err_out;
token = strsep(&str, ",");
if (!token || strict_strtoull(token, 0, (unsigned long long *) &lun))
goto err_out;
kfree(str_saved);
zfcp_init_device_configure(busid, wwpn, lun);
return;
err_out:
kfree(str_saved);
pr_err("%s is not a valid SCSI device\n", devstr);
}
static int __init zfcp_module_init(void)
{
int retval = -ENOMEM;
zfcp_fsf_qtcb_cache = zfcp_cache_hw_align("zfcp_fsf_qtcb",
sizeof(struct fsf_qtcb));
if (!zfcp_fsf_qtcb_cache)
goto out_qtcb_cache;
zfcp_fc_req_cache = zfcp_cache_hw_align("zfcp_fc_req",
sizeof(struct zfcp_fc_req));
if (!zfcp_fc_req_cache)
goto out_fc_cache;
zfcp_scsi_transport_template =
fc_attach_transport(&zfcp_transport_functions);
if (!zfcp_scsi_transport_template)
goto out_transport;
scsi_transport_reserve_device(zfcp_scsi_transport_template,
sizeof(struct zfcp_scsi_dev));
retval = misc_register(&zfcp_cfdc_misc);
if (retval) {
pr_err("Registering the misc device zfcp_cfdc failed\n");
goto out_misc;
}
retval = ccw_driver_register(&zfcp_ccw_driver);
if (retval) {
pr_err("The zfcp device driver could not register with "
"the common I/O layer\n");
goto out_ccw_register;
}
if (init_device)
zfcp_init_device_setup(init_device);
return 0;
out_ccw_register:
misc_deregister(&zfcp_cfdc_misc);
out_misc:
fc_release_transport(zfcp_scsi_transport_template);
out_transport:
kmem_cache_destroy(zfcp_fc_req_cache);
out_fc_cache:
kmem_cache_destroy(zfcp_fsf_qtcb_cache);
out_qtcb_cache:
return retval;
}
module_init(zfcp_module_init);
static void __exit zfcp_module_exit(void)
{
ccw_driver_unregister(&zfcp_ccw_driver);
misc_deregister(&zfcp_cfdc_misc);
fc_release_transport(zfcp_scsi_transport_template);
kmem_cache_destroy(zfcp_fc_req_cache);
kmem_cache_destroy(zfcp_fsf_qtcb_cache);
}
module_exit(zfcp_module_exit);
/**
* zfcp_get_port_by_wwpn - find port in port list of adapter by wwpn
* @adapter: pointer to adapter to search for port
* @wwpn: wwpn to search for
*
* Returns: pointer to zfcp_port or NULL
*/
struct zfcp_port *zfcp_get_port_by_wwpn(struct zfcp_adapter *adapter,
u64 wwpn)
{
unsigned long flags;
struct zfcp_port *port;
read_lock_irqsave(&adapter->port_list_lock, flags);
list_for_each_entry(port, &adapter->port_list, list)
if (port->wwpn == wwpn) {
if (!get_device(&port->dev))
port = NULL;
read_unlock_irqrestore(&adapter->port_list_lock, flags);
return port;
}
read_unlock_irqrestore(&adapter->port_list_lock, flags);
return NULL;
}
static int zfcp_allocate_low_mem_buffers(struct zfcp_adapter *adapter)
{
adapter->pool.erp_req =
mempool_create_kmalloc_pool(1, sizeof(struct zfcp_fsf_req));
if (!adapter->pool.erp_req)
return -ENOMEM;
adapter->pool.gid_pn_req =
mempool_create_kmalloc_pool(1, sizeof(struct zfcp_fsf_req));
if (!adapter->pool.gid_pn_req)
return -ENOMEM;
adapter->pool.scsi_req =
mempool_create_kmalloc_pool(1, sizeof(struct zfcp_fsf_req));
if (!adapter->pool.scsi_req)
return -ENOMEM;
adapter->pool.scsi_abort =
mempool_create_kmalloc_pool(1, sizeof(struct zfcp_fsf_req));
if (!adapter->pool.scsi_abort)
return -ENOMEM;
adapter->pool.status_read_req =
mempool_create_kmalloc_pool(FSF_STATUS_READS_RECOM,
sizeof(struct zfcp_fsf_req));
if (!adapter->pool.status_read_req)
return -ENOMEM;
adapter->pool.qtcb_pool =
mempool_create_slab_pool(4, zfcp_fsf_qtcb_cache);
if (!adapter->pool.qtcb_pool)
return -ENOMEM;
BUILD_BUG_ON(sizeof(struct fsf_status_read_buffer) > PAGE_SIZE);
adapter->pool.sr_data =
mempool_create_page_pool(FSF_STATUS_READS_RECOM, 0);
if (!adapter->pool.sr_data)
return -ENOMEM;
adapter->pool.gid_pn =
mempool_create_slab_pool(1, zfcp_fc_req_cache);
if (!adapter->pool.gid_pn)
return -ENOMEM;
return 0;
}
static void zfcp_free_low_mem_buffers(struct zfcp_adapter *adapter)
{
if (adapter->pool.erp_req)
mempool_destroy(adapter->pool.erp_req);
if (adapter->pool.scsi_req)
mempool_destroy(adapter->pool.scsi_req);
if (adapter->pool.scsi_abort)
mempool_destroy(adapter->pool.scsi_abort);
if (adapter->pool.qtcb_pool)
mempool_destroy(adapter->pool.qtcb_pool);
if (adapter->pool.status_read_req)
mempool_destroy(adapter->pool.status_read_req);
if (adapter->pool.sr_data)
mempool_destroy(adapter->pool.sr_data);
if (adapter->pool.gid_pn)
mempool_destroy(adapter->pool.gid_pn);
}
/**
* zfcp_status_read_refill - refill the long running status_read_requests
* @adapter: ptr to struct zfcp_adapter for which the buffers should be refilled
*
* Returns: 0 on success, 1 otherwise
*
* if there are 16 or more status_read requests missing an adapter_reopen
* is triggered
*/
int zfcp_status_read_refill(struct zfcp_adapter *adapter)
{
while (atomic_read(&adapter->stat_miss) > 0)
if (zfcp_fsf_status_read(adapter->qdio)) {
if (atomic_read(&adapter->stat_miss) >=
adapter->stat_read_buf_num) {
zfcp_erp_adapter_reopen(adapter, 0, "axsref1");
return 1;
}
break;
} else
atomic_dec(&adapter->stat_miss);
return 0;
}
static void _zfcp_status_read_scheduler(struct work_struct *work)
{
zfcp_status_read_refill(container_of(work, struct zfcp_adapter,
stat_work));
}
static void zfcp_print_sl(struct seq_file *m, struct service_level *sl)
{
struct zfcp_adapter *adapter =
container_of(sl, struct zfcp_adapter, service_level);
seq_printf(m, "zfcp: %s microcode level %x\n",
dev_name(&adapter->ccw_device->dev),
adapter->fsf_lic_version);
}
static int zfcp_setup_adapter_work_queue(struct zfcp_adapter *adapter)
{
char name[TASK_COMM_LEN];
snprintf(name, sizeof(name), "zfcp_q_%s",
dev_name(&adapter->ccw_device->dev));
adapter->work_queue = create_singlethread_workqueue(name);
if (adapter->work_queue)
return 0;
return -ENOMEM;
}
static void zfcp_destroy_adapter_work_queue(struct zfcp_adapter *adapter)
{
if (adapter->work_queue)
destroy_workqueue(adapter->work_queue);
adapter->work_queue = NULL;
}
/**
* zfcp_adapter_enqueue - enqueue a new adapter to the list
* @ccw_device: pointer to the struct cc_device
*
* Returns: struct zfcp_adapter*
* Enqueues an adapter at the end of the adapter list in the driver data.
* All adapter internal structures are set up.
* Proc-fs entries are also created.
*/
struct zfcp_adapter *zfcp_adapter_enqueue(struct ccw_device *ccw_device)
{
struct zfcp_adapter *adapter;
if (!get_device(&ccw_device->dev))
return ERR_PTR(-ENODEV);
adapter = kzalloc(sizeof(struct zfcp_adapter), GFP_KERNEL);
if (!adapter) {
put_device(&ccw_device->dev);
return ERR_PTR(-ENOMEM);
}
kref_init(&adapter->ref);
ccw_device->handler = NULL;
adapter->ccw_device = ccw_device;
INIT_WORK(&adapter->stat_work, _zfcp_status_read_scheduler);
INIT_WORK(&adapter->scan_work, zfcp_fc_scan_ports);
INIT_WORK(&adapter->ns_up_work, zfcp_fc_sym_name_update);
if (zfcp_qdio_setup(adapter))
goto failed;
if (zfcp_allocate_low_mem_buffers(adapter))
goto failed;
adapter->req_list = zfcp_reqlist_alloc();
if (!adapter->req_list)
goto failed;
if (zfcp_dbf_adapter_register(adapter))
goto failed;
if (zfcp_setup_adapter_work_queue(adapter))
goto failed;
if (zfcp_fc_gs_setup(adapter))
goto failed;
rwlock_init(&adapter->port_list_lock);
INIT_LIST_HEAD(&adapter->port_list);
INIT_LIST_HEAD(&adapter->events.list);
INIT_WORK(&adapter->events.work, zfcp_fc_post_event);
spin_lock_init(&adapter->events.list_lock);
init_waitqueue_head(&adapter->erp_ready_wq);
init_waitqueue_head(&adapter->erp_done_wqh);
INIT_LIST_HEAD(&adapter->erp_ready_head);
INIT_LIST_HEAD(&adapter->erp_running_head);
rwlock_init(&adapter->erp_lock);
rwlock_init(&adapter->abort_lock);
if (zfcp_erp_thread_setup(adapter))
goto failed;
adapter->service_level.seq_print = zfcp_print_sl;
dev_set_drvdata(&ccw_device->dev, adapter);
if (sysfs_create_group(&ccw_device->dev.kobj,
&zfcp_sysfs_adapter_attrs))
goto failed;
/* report size limit per scatter-gather segment */
adapter->dma_parms.max_segment_size = ZFCP_QDIO_SBALE_LEN;
adapter->ccw_device->dev.dma_parms = &adapter->dma_parms;
adapter->stat_read_buf_num = FSF_STATUS_READS_RECOM;
if (!zfcp_scsi_adapter_register(adapter))
return adapter;
failed:
zfcp_adapter_unregister(adapter);
return ERR_PTR(-ENOMEM);
}
void zfcp_adapter_unregister(struct zfcp_adapter *adapter)
{
struct ccw_device *cdev = adapter->ccw_device;
cancel_work_sync(&adapter->scan_work);
cancel_work_sync(&adapter->stat_work);
cancel_work_sync(&adapter->ns_up_work);
zfcp_destroy_adapter_work_queue(adapter);
zfcp_fc_wka_ports_force_offline(adapter->gs);
zfcp_scsi_adapter_unregister(adapter);
sysfs_remove_group(&cdev->dev.kobj, &zfcp_sysfs_adapter_attrs);
zfcp_erp_thread_kill(adapter);
zfcp_dbf_adapter_unregister(adapter);
zfcp_qdio_destroy(adapter->qdio);
zfcp_ccw_adapter_put(adapter); /* final put to release */
}
/**
* zfcp_adapter_release - remove the adapter from the resource list
* @ref: pointer to struct kref
* locks: adapter list write lock is assumed to be held by caller
*/
void zfcp_adapter_release(struct kref *ref)
{
struct zfcp_adapter *adapter = container_of(ref, struct zfcp_adapter,
ref);
struct ccw_device *cdev = adapter->ccw_device;
dev_set_drvdata(&adapter->ccw_device->dev, NULL);
zfcp_fc_gs_destroy(adapter);
zfcp_free_low_mem_buffers(adapter);
kfree(adapter->req_list);
kfree(adapter->fc_stats);
kfree(adapter->stats_reset_data);
kfree(adapter);
put_device(&cdev->dev);
}
/**
* zfcp_device_unregister - remove port, unit from system
* @dev: reference to device which is to be removed
* @grp: related reference to attribute group
*
* Helper function to unregister port, unit from system
*/
void zfcp_device_unregister(struct device *dev,
const struct attribute_group *grp)
{
sysfs_remove_group(&dev->kobj, grp);
device_unregister(dev);
}
static void zfcp_port_release(struct device *dev)
{
struct zfcp_port *port = container_of(dev, struct zfcp_port, dev);
zfcp_ccw_adapter_put(port->adapter);
kfree(port);
}
/**
* zfcp_port_enqueue - enqueue port to port list of adapter
* @adapter: adapter where remote port is added
* @wwpn: WWPN of the remote port to be enqueued
* @status: initial status for the port
* @d_id: destination id of the remote port to be enqueued
* Returns: pointer to enqueued port on success, ERR_PTR on error
*
* All port internal structures are set up and the sysfs entry is generated.
* d_id is used to enqueue ports with a well known address like the Directory
* Service for nameserver lookup.
*/
struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *adapter, u64 wwpn,
u32 status, u32 d_id)
{
struct zfcp_port *port;
int retval = -ENOMEM;
kref_get(&adapter->ref);
port = zfcp_get_port_by_wwpn(adapter, wwpn);
if (port) {
put_device(&port->dev);
retval = -EEXIST;
goto err_out;
}
port = kzalloc(sizeof(struct zfcp_port), GFP_KERNEL);
if (!port)
goto err_out;
rwlock_init(&port->unit_list_lock);
INIT_LIST_HEAD(&port->unit_list);
atomic_set(&port->units, 0);
INIT_WORK(&port->gid_pn_work, zfcp_fc_port_did_lookup);
INIT_WORK(&port->test_link_work, zfcp_fc_link_test_work);
INIT_WORK(&port->rport_work, zfcp_scsi_rport_work);
port->adapter = adapter;
port->d_id = d_id;
port->wwpn = wwpn;
port->rport_task = RPORT_NONE;
port->dev.parent = &adapter->ccw_device->dev;
port->dev.release = zfcp_port_release;
if (dev_set_name(&port->dev, "0x%016llx", (unsigned long long)wwpn)) {
kfree(port);
goto err_out;
}
retval = -EINVAL;
if (device_register(&port->dev)) {
put_device(&port->dev);
goto err_out;
}
if (sysfs_create_group(&port->dev.kobj,
&zfcp_sysfs_port_attrs))
goto err_out_put;
write_lock_irq(&adapter->port_list_lock);
list_add_tail(&port->list, &adapter->port_list);
write_unlock_irq(&adapter->port_list_lock);
atomic_set_mask(status | ZFCP_STATUS_COMMON_RUNNING, &port->status);
return port;
err_out_put:
device_unregister(&port->dev);
err_out:
zfcp_ccw_adapter_put(adapter);
return ERR_PTR(retval);
}
/**
* zfcp_sg_free_table - free memory used by scatterlists
* @sg: pointer to scatterlist
* @count: number of scatterlist which are to be free'ed
* the scatterlist are expected to reference pages always
*/
void zfcp_sg_free_table(struct scatterlist *sg, int count)
{
int i;
for (i = 0; i < count; i++, sg++)
if (sg)
free_page((unsigned long) sg_virt(sg));
else
break;
}
/**
* zfcp_sg_setup_table - init scatterlist and allocate, assign buffers
* @sg: pointer to struct scatterlist
* @count: number of scatterlists which should be assigned with buffers
* of size page
*
* Returns: 0 on success, -ENOMEM otherwise
*/
int zfcp_sg_setup_table(struct scatterlist *sg, int count)
{
void *addr;
int i;
sg_init_table(sg, count);
for (i = 0; i < count; i++, sg++) {
addr = (void *) get_zeroed_page(GFP_KERNEL);
if (!addr) {
zfcp_sg_free_table(sg, i);
return -ENOMEM;
}
sg_set_buf(sg, addr, PAGE_SIZE);
}
return 0;
}
| gpl-2.0 |
jfdsmabalot/kernel_mako | drivers/hid/hid-lgff.c | 3930 | 5021 | /*
* Force feedback support for hid-compliant for some of the devices from
* Logitech, namely:
* - WingMan Cordless RumblePad
* - WingMan Force 3D
*
* Copyright (c) 2002-2004 Johann Deneux
* Copyright (c) 2006 Anssi Hannula <anssi.hannula@gmail.com>
*/
/*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
* Should you need to contact me, the author, you can do so by
* e-mail - mail your message to <johann.deneux@it.uu.se>
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/input.h>
#include <linux/usb.h>
#include <linux/hid.h>
#include "usbhid/usbhid.h"
#include "hid-lg.h"
struct dev_type {
u16 idVendor;
u16 idProduct;
const signed short *ff;
};
static const signed short ff_rumble[] = {
FF_RUMBLE,
-1
};
static const signed short ff_joystick[] = {
FF_CONSTANT,
-1
};
static const signed short ff_joystick_ac[] = {
FF_CONSTANT,
FF_AUTOCENTER,
-1
};
static const struct dev_type devices[] = {
{ 0x046d, 0xc211, ff_rumble },
{ 0x046d, 0xc219, ff_rumble },
{ 0x046d, 0xc283, ff_joystick },
{ 0x046d, 0xc286, ff_joystick_ac },
{ 0x046d, 0xc287, ff_joystick_ac },
{ 0x046d, 0xc293, ff_joystick },
{ 0x046d, 0xc295, ff_joystick },
};
static int hid_lgff_play(struct input_dev *dev, void *data, struct ff_effect *effect)
{
struct hid_device *hid = input_get_drvdata(dev);
struct list_head *report_list = &hid->report_enum[HID_OUTPUT_REPORT].report_list;
struct hid_report *report = list_entry(report_list->next, struct hid_report, list);
int x, y;
unsigned int left, right;
#define CLAMP(x) if (x < 0) x = 0; if (x > 0xff) x = 0xff
switch (effect->type) {
case FF_CONSTANT:
x = effect->u.ramp.start_level + 0x7f; /* 0x7f is center */
y = effect->u.ramp.end_level + 0x7f;
CLAMP(x);
CLAMP(y);
report->field[0]->value[0] = 0x51;
report->field[0]->value[1] = 0x08;
report->field[0]->value[2] = x;
report->field[0]->value[3] = y;
dbg_hid("(x, y)=(%04x, %04x)\n", x, y);
usbhid_submit_report(hid, report, USB_DIR_OUT);
break;
case FF_RUMBLE:
right = effect->u.rumble.strong_magnitude;
left = effect->u.rumble.weak_magnitude;
right = right * 0xff / 0xffff;
left = left * 0xff / 0xffff;
CLAMP(left);
CLAMP(right);
report->field[0]->value[0] = 0x42;
report->field[0]->value[1] = 0x00;
report->field[0]->value[2] = left;
report->field[0]->value[3] = right;
dbg_hid("(left, right)=(%04x, %04x)\n", left, right);
usbhid_submit_report(hid, report, USB_DIR_OUT);
break;
}
return 0;
}
static void hid_lgff_set_autocenter(struct input_dev *dev, u16 magnitude)
{
struct hid_device *hid = input_get_drvdata(dev);
struct list_head *report_list = &hid->report_enum[HID_OUTPUT_REPORT].report_list;
struct hid_report *report = list_entry(report_list->next, struct hid_report, list);
__s32 *value = report->field[0]->value;
magnitude = (magnitude >> 12) & 0xf;
*value++ = 0xfe;
*value++ = 0x0d;
*value++ = magnitude; /* clockwise strength */
*value++ = magnitude; /* counter-clockwise strength */
*value++ = 0x80;
*value++ = 0x00;
*value = 0x00;
usbhid_submit_report(hid, report, USB_DIR_OUT);
}
int lgff_init(struct hid_device* hid)
{
struct hid_input *hidinput = list_entry(hid->inputs.next, struct hid_input, list);
struct list_head *report_list = &hid->report_enum[HID_OUTPUT_REPORT].report_list;
struct input_dev *dev = hidinput->input;
struct hid_report *report;
struct hid_field *field;
const signed short *ff_bits = ff_joystick;
int error;
int i;
/* Find the report to use */
if (list_empty(report_list)) {
hid_err(hid, "No output report found\n");
return -1;
}
/* Check that the report looks ok */
report = list_entry(report_list->next, struct hid_report, list);
field = report->field[0];
if (!field) {
hid_err(hid, "NULL field\n");
return -1;
}
for (i = 0; i < ARRAY_SIZE(devices); i++) {
if (dev->id.vendor == devices[i].idVendor &&
dev->id.product == devices[i].idProduct) {
ff_bits = devices[i].ff;
break;
}
}
for (i = 0; ff_bits[i] >= 0; i++)
set_bit(ff_bits[i], dev->ffbit);
error = input_ff_create_memless(dev, NULL, hid_lgff_play);
if (error)
return error;
if ( test_bit(FF_AUTOCENTER, dev->ffbit) )
dev->ff->set_autocenter = hid_lgff_set_autocenter;
pr_info("Force feedback for Logitech force feedback devices by Johann Deneux <johann.deneux@it.uu.se>\n");
return 0;
}
| gpl-2.0 |
davidftv/rk3x_kernel_3.0.36 | drivers/hwmon/acpi_power_meter.c | 4186 | 24367 | /*
* A hwmon driver for ACPI 4.0 power meters
* Copyright (C) 2009 IBM
*
* Author: Darrick J. Wong <djwong@us.ibm.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/module.h>
#include <linux/hwmon.h>
#include <linux/hwmon-sysfs.h>
#include <linux/jiffies.h>
#include <linux/mutex.h>
#include <linux/dmi.h>
#include <linux/slab.h>
#include <linux/kdev_t.h>
#include <linux/sched.h>
#include <linux/time.h>
#include <acpi/acpi_drivers.h>
#include <acpi/acpi_bus.h>
#define ACPI_POWER_METER_NAME "power_meter"
ACPI_MODULE_NAME(ACPI_POWER_METER_NAME);
#define ACPI_POWER_METER_DEVICE_NAME "Power Meter"
#define ACPI_POWER_METER_CLASS "pwr_meter_resource"
#define NUM_SENSORS 17
#define POWER_METER_CAN_MEASURE (1 << 0)
#define POWER_METER_CAN_TRIP (1 << 1)
#define POWER_METER_CAN_CAP (1 << 2)
#define POWER_METER_CAN_NOTIFY (1 << 3)
#define POWER_METER_IS_BATTERY (1 << 8)
#define UNKNOWN_HYSTERESIS 0xFFFFFFFF
#define METER_NOTIFY_CONFIG 0x80
#define METER_NOTIFY_TRIP 0x81
#define METER_NOTIFY_CAP 0x82
#define METER_NOTIFY_CAPPING 0x83
#define METER_NOTIFY_INTERVAL 0x84
#define POWER_AVERAGE_NAME "power1_average"
#define POWER_CAP_NAME "power1_cap"
#define POWER_AVG_INTERVAL_NAME "power1_average_interval"
#define POWER_ALARM_NAME "power1_alarm"
static int cap_in_hardware;
static int force_cap_on;
static int can_cap_in_hardware(void)
{
return force_cap_on || cap_in_hardware;
}
static const struct acpi_device_id power_meter_ids[] = {
{"ACPI000D", 0},
{"", 0},
};
MODULE_DEVICE_TABLE(acpi, power_meter_ids);
struct acpi_power_meter_capabilities {
u64 flags;
u64 units;
u64 type;
u64 accuracy;
u64 sampling_time;
u64 min_avg_interval;
u64 max_avg_interval;
u64 hysteresis;
u64 configurable_cap;
u64 min_cap;
u64 max_cap;
};
struct acpi_power_meter_resource {
struct acpi_device *acpi_dev;
acpi_bus_id name;
struct mutex lock;
struct device *hwmon_dev;
struct acpi_power_meter_capabilities caps;
acpi_string model_number;
acpi_string serial_number;
acpi_string oem_info;
u64 power;
u64 cap;
u64 avg_interval;
int sensors_valid;
unsigned long sensors_last_updated;
struct sensor_device_attribute sensors[NUM_SENSORS];
int num_sensors;
int trip[2];
int num_domain_devices;
struct acpi_device **domain_devices;
struct kobject *holders_dir;
};
struct ro_sensor_template {
char *label;
ssize_t (*show)(struct device *dev,
struct device_attribute *devattr,
char *buf);
int index;
};
struct rw_sensor_template {
char *label;
ssize_t (*show)(struct device *dev,
struct device_attribute *devattr,
char *buf);
ssize_t (*set)(struct device *dev,
struct device_attribute *devattr,
const char *buf, size_t count);
int index;
};
/* Averaging interval */
static int update_avg_interval(struct acpi_power_meter_resource *resource)
{
unsigned long long data;
acpi_status status;
status = acpi_evaluate_integer(resource->acpi_dev->handle, "_GAI",
NULL, &data);
if (ACPI_FAILURE(status)) {
ACPI_EXCEPTION((AE_INFO, status, "Evaluating _GAI"));
return -ENODEV;
}
resource->avg_interval = data;
return 0;
}
static ssize_t show_avg_interval(struct device *dev,
struct device_attribute *devattr,
char *buf)
{
struct acpi_device *acpi_dev = to_acpi_device(dev);
struct acpi_power_meter_resource *resource = acpi_dev->driver_data;
mutex_lock(&resource->lock);
update_avg_interval(resource);
mutex_unlock(&resource->lock);
return sprintf(buf, "%llu\n", resource->avg_interval);
}
static ssize_t set_avg_interval(struct device *dev,
struct device_attribute *devattr,
const char *buf, size_t count)
{
struct acpi_device *acpi_dev = to_acpi_device(dev);
struct acpi_power_meter_resource *resource = acpi_dev->driver_data;
union acpi_object arg0 = { ACPI_TYPE_INTEGER };
struct acpi_object_list args = { 1, &arg0 };
int res;
unsigned long temp;
unsigned long long data;
acpi_status status;
res = strict_strtoul(buf, 10, &temp);
if (res)
return res;
if (temp > resource->caps.max_avg_interval ||
temp < resource->caps.min_avg_interval)
return -EINVAL;
arg0.integer.value = temp;
mutex_lock(&resource->lock);
status = acpi_evaluate_integer(resource->acpi_dev->handle, "_PAI",
&args, &data);
if (!ACPI_FAILURE(status))
resource->avg_interval = temp;
mutex_unlock(&resource->lock);
if (ACPI_FAILURE(status)) {
ACPI_EXCEPTION((AE_INFO, status, "Evaluating _PAI"));
return -EINVAL;
}
/* _PAI returns 0 on success, nonzero otherwise */
if (data)
return -EINVAL;
return count;
}
/* Cap functions */
static int update_cap(struct acpi_power_meter_resource *resource)
{
unsigned long long data;
acpi_status status;
status = acpi_evaluate_integer(resource->acpi_dev->handle, "_GHL",
NULL, &data);
if (ACPI_FAILURE(status)) {
ACPI_EXCEPTION((AE_INFO, status, "Evaluating _GHL"));
return -ENODEV;
}
resource->cap = data;
return 0;
}
static ssize_t show_cap(struct device *dev,
struct device_attribute *devattr,
char *buf)
{
struct acpi_device *acpi_dev = to_acpi_device(dev);
struct acpi_power_meter_resource *resource = acpi_dev->driver_data;
mutex_lock(&resource->lock);
update_cap(resource);
mutex_unlock(&resource->lock);
return sprintf(buf, "%llu\n", resource->cap * 1000);
}
static ssize_t set_cap(struct device *dev, struct device_attribute *devattr,
const char *buf, size_t count)
{
struct acpi_device *acpi_dev = to_acpi_device(dev);
struct acpi_power_meter_resource *resource = acpi_dev->driver_data;
union acpi_object arg0 = { ACPI_TYPE_INTEGER };
struct acpi_object_list args = { 1, &arg0 };
int res;
unsigned long temp;
unsigned long long data;
acpi_status status;
res = strict_strtoul(buf, 10, &temp);
if (res)
return res;
temp /= 1000;
if (temp > resource->caps.max_cap || temp < resource->caps.min_cap)
return -EINVAL;
arg0.integer.value = temp;
mutex_lock(&resource->lock);
status = acpi_evaluate_integer(resource->acpi_dev->handle, "_SHL",
&args, &data);
if (!ACPI_FAILURE(status))
resource->cap = temp;
mutex_unlock(&resource->lock);
if (ACPI_FAILURE(status)) {
ACPI_EXCEPTION((AE_INFO, status, "Evaluating _SHL"));
return -EINVAL;
}
/* _SHL returns 0 on success, nonzero otherwise */
if (data)
return -EINVAL;
return count;
}
/* Power meter trip points */
static int set_acpi_trip(struct acpi_power_meter_resource *resource)
{
union acpi_object arg_objs[] = {
{ACPI_TYPE_INTEGER},
{ACPI_TYPE_INTEGER}
};
struct acpi_object_list args = { 2, arg_objs };
unsigned long long data;
acpi_status status;
/* Both trip levels must be set */
if (resource->trip[0] < 0 || resource->trip[1] < 0)
return 0;
/* This driver stores min, max; ACPI wants max, min. */
arg_objs[0].integer.value = resource->trip[1];
arg_objs[1].integer.value = resource->trip[0];
status = acpi_evaluate_integer(resource->acpi_dev->handle, "_PTP",
&args, &data);
if (ACPI_FAILURE(status)) {
ACPI_EXCEPTION((AE_INFO, status, "Evaluating _PTP"));
return -EINVAL;
}
/* _PTP returns 0 on success, nonzero otherwise */
if (data)
return -EINVAL;
return 0;
}
static ssize_t set_trip(struct device *dev, struct device_attribute *devattr,
const char *buf, size_t count)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct acpi_device *acpi_dev = to_acpi_device(dev);
struct acpi_power_meter_resource *resource = acpi_dev->driver_data;
int res;
unsigned long temp;
res = strict_strtoul(buf, 10, &temp);
if (res)
return res;
temp /= 1000;
if (temp < 0)
return -EINVAL;
mutex_lock(&resource->lock);
resource->trip[attr->index - 7] = temp;
res = set_acpi_trip(resource);
mutex_unlock(&resource->lock);
if (res)
return res;
return count;
}
/* Power meter */
static int update_meter(struct acpi_power_meter_resource *resource)
{
unsigned long long data;
acpi_status status;
unsigned long local_jiffies = jiffies;
if (time_before(local_jiffies, resource->sensors_last_updated +
msecs_to_jiffies(resource->caps.sampling_time)) &&
resource->sensors_valid)
return 0;
status = acpi_evaluate_integer(resource->acpi_dev->handle, "_PMM",
NULL, &data);
if (ACPI_FAILURE(status)) {
ACPI_EXCEPTION((AE_INFO, status, "Evaluating _PMM"));
return -ENODEV;
}
resource->power = data;
resource->sensors_valid = 1;
resource->sensors_last_updated = jiffies;
return 0;
}
static ssize_t show_power(struct device *dev,
struct device_attribute *devattr,
char *buf)
{
struct acpi_device *acpi_dev = to_acpi_device(dev);
struct acpi_power_meter_resource *resource = acpi_dev->driver_data;
mutex_lock(&resource->lock);
update_meter(resource);
mutex_unlock(&resource->lock);
return sprintf(buf, "%llu\n", resource->power * 1000);
}
/* Miscellaneous */
static ssize_t show_str(struct device *dev,
struct device_attribute *devattr,
char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct acpi_device *acpi_dev = to_acpi_device(dev);
struct acpi_power_meter_resource *resource = acpi_dev->driver_data;
acpi_string val;
switch (attr->index) {
case 0:
val = resource->model_number;
break;
case 1:
val = resource->serial_number;
break;
case 2:
val = resource->oem_info;
break;
default:
BUG();
}
return sprintf(buf, "%s\n", val);
}
static ssize_t show_val(struct device *dev,
struct device_attribute *devattr,
char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct acpi_device *acpi_dev = to_acpi_device(dev);
struct acpi_power_meter_resource *resource = acpi_dev->driver_data;
u64 val = 0;
switch (attr->index) {
case 0:
val = resource->caps.min_avg_interval;
break;
case 1:
val = resource->caps.max_avg_interval;
break;
case 2:
val = resource->caps.min_cap * 1000;
break;
case 3:
val = resource->caps.max_cap * 1000;
break;
case 4:
if (resource->caps.hysteresis == UNKNOWN_HYSTERESIS)
return sprintf(buf, "unknown\n");
val = resource->caps.hysteresis * 1000;
break;
case 5:
if (resource->caps.flags & POWER_METER_IS_BATTERY)
val = 1;
else
val = 0;
break;
case 6:
if (resource->power > resource->cap)
val = 1;
else
val = 0;
break;
case 7:
case 8:
if (resource->trip[attr->index - 7] < 0)
return sprintf(buf, "unknown\n");
val = resource->trip[attr->index - 7] * 1000;
break;
default:
BUG();
}
return sprintf(buf, "%llu\n", val);
}
static ssize_t show_accuracy(struct device *dev,
struct device_attribute *devattr,
char *buf)
{
struct acpi_device *acpi_dev = to_acpi_device(dev);
struct acpi_power_meter_resource *resource = acpi_dev->driver_data;
unsigned int acc = resource->caps.accuracy;
return sprintf(buf, "%u.%u%%\n", acc / 1000, acc % 1000);
}
static ssize_t show_name(struct device *dev,
struct device_attribute *devattr,
char *buf)
{
return sprintf(buf, "%s\n", ACPI_POWER_METER_NAME);
}
/* Sensor descriptions. If you add a sensor, update NUM_SENSORS above! */
static struct ro_sensor_template meter_ro_attrs[] = {
{POWER_AVERAGE_NAME, show_power, 0},
{"power1_accuracy", show_accuracy, 0},
{"power1_average_interval_min", show_val, 0},
{"power1_average_interval_max", show_val, 1},
{"power1_is_battery", show_val, 5},
{NULL, NULL, 0},
};
static struct rw_sensor_template meter_rw_attrs[] = {
{POWER_AVG_INTERVAL_NAME, show_avg_interval, set_avg_interval, 0},
{NULL, NULL, NULL, 0},
};
static struct ro_sensor_template misc_cap_attrs[] = {
{"power1_cap_min", show_val, 2},
{"power1_cap_max", show_val, 3},
{"power1_cap_hyst", show_val, 4},
{POWER_ALARM_NAME, show_val, 6},
{NULL, NULL, 0},
};
static struct ro_sensor_template ro_cap_attrs[] = {
{POWER_CAP_NAME, show_cap, 0},
{NULL, NULL, 0},
};
static struct rw_sensor_template rw_cap_attrs[] = {
{POWER_CAP_NAME, show_cap, set_cap, 0},
{NULL, NULL, NULL, 0},
};
static struct rw_sensor_template trip_attrs[] = {
{"power1_average_min", show_val, set_trip, 7},
{"power1_average_max", show_val, set_trip, 8},
{NULL, NULL, NULL, 0},
};
static struct ro_sensor_template misc_attrs[] = {
{"name", show_name, 0},
{"power1_model_number", show_str, 0},
{"power1_oem_info", show_str, 2},
{"power1_serial_number", show_str, 1},
{NULL, NULL, 0},
};
/* Read power domain data */
static void remove_domain_devices(struct acpi_power_meter_resource *resource)
{
int i;
if (!resource->num_domain_devices)
return;
for (i = 0; i < resource->num_domain_devices; i++) {
struct acpi_device *obj = resource->domain_devices[i];
if (!obj)
continue;
sysfs_remove_link(resource->holders_dir,
kobject_name(&obj->dev.kobj));
put_device(&obj->dev);
}
kfree(resource->domain_devices);
kobject_put(resource->holders_dir);
resource->num_domain_devices = 0;
}
static int read_domain_devices(struct acpi_power_meter_resource *resource)
{
int res = 0;
int i;
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
union acpi_object *pss;
acpi_status status;
status = acpi_evaluate_object(resource->acpi_dev->handle, "_PMD", NULL,
&buffer);
if (ACPI_FAILURE(status)) {
ACPI_EXCEPTION((AE_INFO, status, "Evaluating _PMD"));
return -ENODEV;
}
pss = buffer.pointer;
if (!pss ||
pss->type != ACPI_TYPE_PACKAGE) {
dev_err(&resource->acpi_dev->dev, ACPI_POWER_METER_NAME
"Invalid _PMD data\n");
res = -EFAULT;
goto end;
}
if (!pss->package.count)
goto end;
resource->domain_devices = kzalloc(sizeof(struct acpi_device *) *
pss->package.count, GFP_KERNEL);
if (!resource->domain_devices) {
res = -ENOMEM;
goto end;
}
resource->holders_dir = kobject_create_and_add("measures",
&resource->acpi_dev->dev.kobj);
if (!resource->holders_dir) {
res = -ENOMEM;
goto exit_free;
}
resource->num_domain_devices = pss->package.count;
for (i = 0; i < pss->package.count; i++) {
struct acpi_device *obj;
union acpi_object *element = &(pss->package.elements[i]);
/* Refuse non-references */
if (element->type != ACPI_TYPE_LOCAL_REFERENCE)
continue;
/* Create a symlink to domain objects */
resource->domain_devices[i] = NULL;
status = acpi_bus_get_device(element->reference.handle,
&resource->domain_devices[i]);
if (ACPI_FAILURE(status))
continue;
obj = resource->domain_devices[i];
get_device(&obj->dev);
res = sysfs_create_link(resource->holders_dir, &obj->dev.kobj,
kobject_name(&obj->dev.kobj));
if (res) {
put_device(&obj->dev);
resource->domain_devices[i] = NULL;
}
}
res = 0;
goto end;
exit_free:
kfree(resource->domain_devices);
end:
kfree(buffer.pointer);
return res;
}
/* Registration and deregistration */
static int register_ro_attrs(struct acpi_power_meter_resource *resource,
struct ro_sensor_template *ro)
{
struct device *dev = &resource->acpi_dev->dev;
struct sensor_device_attribute *sensors =
&resource->sensors[resource->num_sensors];
int res = 0;
while (ro->label) {
sensors->dev_attr.attr.name = ro->label;
sensors->dev_attr.attr.mode = S_IRUGO;
sensors->dev_attr.show = ro->show;
sensors->index = ro->index;
res = device_create_file(dev, &sensors->dev_attr);
if (res) {
sensors->dev_attr.attr.name = NULL;
goto error;
}
sensors++;
resource->num_sensors++;
ro++;
}
error:
return res;
}
static int register_rw_attrs(struct acpi_power_meter_resource *resource,
struct rw_sensor_template *rw)
{
struct device *dev = &resource->acpi_dev->dev;
struct sensor_device_attribute *sensors =
&resource->sensors[resource->num_sensors];
int res = 0;
while (rw->label) {
sensors->dev_attr.attr.name = rw->label;
sensors->dev_attr.attr.mode = S_IRUGO | S_IWUSR;
sensors->dev_attr.show = rw->show;
sensors->dev_attr.store = rw->set;
sensors->index = rw->index;
res = device_create_file(dev, &sensors->dev_attr);
if (res) {
sensors->dev_attr.attr.name = NULL;
goto error;
}
sensors++;
resource->num_sensors++;
rw++;
}
error:
return res;
}
static void remove_attrs(struct acpi_power_meter_resource *resource)
{
int i;
for (i = 0; i < resource->num_sensors; i++) {
if (!resource->sensors[i].dev_attr.attr.name)
continue;
device_remove_file(&resource->acpi_dev->dev,
&resource->sensors[i].dev_attr);
}
remove_domain_devices(resource);
resource->num_sensors = 0;
}
static int setup_attrs(struct acpi_power_meter_resource *resource)
{
int res = 0;
res = read_domain_devices(resource);
if (res)
return res;
if (resource->caps.flags & POWER_METER_CAN_MEASURE) {
res = register_ro_attrs(resource, meter_ro_attrs);
if (res)
goto error;
res = register_rw_attrs(resource, meter_rw_attrs);
if (res)
goto error;
}
if (resource->caps.flags & POWER_METER_CAN_CAP) {
if (!can_cap_in_hardware()) {
dev_err(&resource->acpi_dev->dev,
"Ignoring unsafe software power cap!\n");
goto skip_unsafe_cap;
}
if (resource->caps.configurable_cap) {
res = register_rw_attrs(resource, rw_cap_attrs);
if (res)
goto error;
} else {
res = register_ro_attrs(resource, ro_cap_attrs);
if (res)
goto error;
}
res = register_ro_attrs(resource, misc_cap_attrs);
if (res)
goto error;
}
skip_unsafe_cap:
if (resource->caps.flags & POWER_METER_CAN_TRIP) {
res = register_rw_attrs(resource, trip_attrs);
if (res)
goto error;
}
res = register_ro_attrs(resource, misc_attrs);
if (res)
goto error;
return res;
error:
remove_attrs(resource);
return res;
}
static void free_capabilities(struct acpi_power_meter_resource *resource)
{
acpi_string *str;
int i;
str = &resource->model_number;
for (i = 0; i < 3; i++, str++)
kfree(*str);
}
static int read_capabilities(struct acpi_power_meter_resource *resource)
{
int res = 0;
int i;
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
struct acpi_buffer state = { 0, NULL };
struct acpi_buffer format = { sizeof("NNNNNNNNNNN"), "NNNNNNNNNNN" };
union acpi_object *pss;
acpi_string *str;
acpi_status status;
status = acpi_evaluate_object(resource->acpi_dev->handle, "_PMC", NULL,
&buffer);
if (ACPI_FAILURE(status)) {
ACPI_EXCEPTION((AE_INFO, status, "Evaluating _PMC"));
return -ENODEV;
}
pss = buffer.pointer;
if (!pss ||
pss->type != ACPI_TYPE_PACKAGE ||
pss->package.count != 14) {
dev_err(&resource->acpi_dev->dev, ACPI_POWER_METER_NAME
"Invalid _PMC data\n");
res = -EFAULT;
goto end;
}
/* Grab all the integer data at once */
state.length = sizeof(struct acpi_power_meter_capabilities);
state.pointer = &resource->caps;
status = acpi_extract_package(pss, &format, &state);
if (ACPI_FAILURE(status)) {
ACPI_EXCEPTION((AE_INFO, status, "Invalid data"));
res = -EFAULT;
goto end;
}
if (resource->caps.units) {
dev_err(&resource->acpi_dev->dev, ACPI_POWER_METER_NAME
"Unknown units %llu.\n",
resource->caps.units);
res = -EINVAL;
goto end;
}
/* Grab the string data */
str = &resource->model_number;
for (i = 11; i < 14; i++) {
union acpi_object *element = &(pss->package.elements[i]);
if (element->type != ACPI_TYPE_STRING) {
res = -EINVAL;
goto error;
}
*str = kzalloc(sizeof(u8) * (element->string.length + 1),
GFP_KERNEL);
if (!*str) {
res = -ENOMEM;
goto error;
}
strncpy(*str, element->string.pointer, element->string.length);
str++;
}
dev_info(&resource->acpi_dev->dev, "Found ACPI power meter.\n");
goto end;
error:
str = &resource->model_number;
for (i = 0; i < 3; i++, str++)
kfree(*str);
end:
kfree(buffer.pointer);
return res;
}
/* Handle ACPI event notifications */
static void acpi_power_meter_notify(struct acpi_device *device, u32 event)
{
struct acpi_power_meter_resource *resource;
int res;
if (!device || !acpi_driver_data(device))
return;
resource = acpi_driver_data(device);
mutex_lock(&resource->lock);
switch (event) {
case METER_NOTIFY_CONFIG:
free_capabilities(resource);
res = read_capabilities(resource);
if (res)
break;
remove_attrs(resource);
setup_attrs(resource);
break;
case METER_NOTIFY_TRIP:
sysfs_notify(&device->dev.kobj, NULL, POWER_AVERAGE_NAME);
update_meter(resource);
break;
case METER_NOTIFY_CAP:
sysfs_notify(&device->dev.kobj, NULL, POWER_CAP_NAME);
update_cap(resource);
break;
case METER_NOTIFY_INTERVAL:
sysfs_notify(&device->dev.kobj, NULL, POWER_AVG_INTERVAL_NAME);
update_avg_interval(resource);
break;
case METER_NOTIFY_CAPPING:
sysfs_notify(&device->dev.kobj, NULL, POWER_ALARM_NAME);
dev_info(&device->dev, "Capping in progress.\n");
break;
default:
BUG();
}
mutex_unlock(&resource->lock);
acpi_bus_generate_netlink_event(ACPI_POWER_METER_CLASS,
dev_name(&device->dev), event, 0);
}
static int acpi_power_meter_add(struct acpi_device *device)
{
int res;
struct acpi_power_meter_resource *resource;
if (!device)
return -EINVAL;
resource = kzalloc(sizeof(struct acpi_power_meter_resource),
GFP_KERNEL);
if (!resource)
return -ENOMEM;
resource->sensors_valid = 0;
resource->acpi_dev = device;
mutex_init(&resource->lock);
strcpy(acpi_device_name(device), ACPI_POWER_METER_DEVICE_NAME);
strcpy(acpi_device_class(device), ACPI_POWER_METER_CLASS);
device->driver_data = resource;
free_capabilities(resource);
res = read_capabilities(resource);
if (res)
goto exit_free;
resource->trip[0] = resource->trip[1] = -1;
res = setup_attrs(resource);
if (res)
goto exit_free;
resource->hwmon_dev = hwmon_device_register(&device->dev);
if (IS_ERR(resource->hwmon_dev)) {
res = PTR_ERR(resource->hwmon_dev);
goto exit_remove;
}
res = 0;
goto exit;
exit_remove:
remove_attrs(resource);
exit_free:
kfree(resource);
exit:
return res;
}
static int acpi_power_meter_remove(struct acpi_device *device, int type)
{
struct acpi_power_meter_resource *resource;
if (!device || !acpi_driver_data(device))
return -EINVAL;
resource = acpi_driver_data(device);
hwmon_device_unregister(resource->hwmon_dev);
free_capabilities(resource);
remove_attrs(resource);
kfree(resource);
return 0;
}
static int acpi_power_meter_resume(struct acpi_device *device)
{
struct acpi_power_meter_resource *resource;
if (!device || !acpi_driver_data(device))
return -EINVAL;
resource = acpi_driver_data(device);
free_capabilities(resource);
read_capabilities(resource);
return 0;
}
static struct acpi_driver acpi_power_meter_driver = {
.name = "power_meter",
.class = ACPI_POWER_METER_CLASS,
.ids = power_meter_ids,
.ops = {
.add = acpi_power_meter_add,
.remove = acpi_power_meter_remove,
.resume = acpi_power_meter_resume,
.notify = acpi_power_meter_notify,
},
};
/* Module init/exit routines */
static int __init enable_cap_knobs(const struct dmi_system_id *d)
{
cap_in_hardware = 1;
return 0;
}
static struct dmi_system_id __initdata pm_dmi_table[] = {
{
enable_cap_knobs, "IBM Active Energy Manager",
{
DMI_MATCH(DMI_SYS_VENDOR, "IBM")
},
},
{}
};
static int __init acpi_power_meter_init(void)
{
int result;
if (acpi_disabled)
return -ENODEV;
dmi_check_system(pm_dmi_table);
result = acpi_bus_register_driver(&acpi_power_meter_driver);
if (result < 0)
return -ENODEV;
return 0;
}
static void __exit acpi_power_meter_exit(void)
{
acpi_bus_unregister_driver(&acpi_power_meter_driver);
}
MODULE_AUTHOR("Darrick J. Wong <djwong@us.ibm.com>");
MODULE_DESCRIPTION("ACPI 4.0 power meter driver");
MODULE_LICENSE("GPL");
module_param(force_cap_on, bool, 0644);
MODULE_PARM_DESC(force_cap_on, "Enable power cap even it is unsafe to do so.");
module_init(acpi_power_meter_init);
module_exit(acpi_power_meter_exit);
| gpl-2.0 |
anoane/HTC_Ville-4.3_Sense | arch/x86/pci/pcbios.c | 4954 | 11174 | /*
* BIOS32 and PCI BIOS handling.
*/
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/uaccess.h>
#include <asm/pci_x86.h>
#include <asm/pci-functions.h>
#include <asm/cacheflush.h>
/* BIOS32 signature: "_32_" */
#define BIOS32_SIGNATURE (('_' << 0) + ('3' << 8) + ('2' << 16) + ('_' << 24))
/* PCI signature: "PCI " */
#define PCI_SIGNATURE (('P' << 0) + ('C' << 8) + ('I' << 16) + (' ' << 24))
/* PCI service signature: "$PCI" */
#define PCI_SERVICE (('$' << 0) + ('P' << 8) + ('C' << 16) + ('I' << 24))
/* PCI BIOS hardware mechanism flags */
#define PCIBIOS_HW_TYPE1 0x01
#define PCIBIOS_HW_TYPE2 0x02
#define PCIBIOS_HW_TYPE1_SPEC 0x10
#define PCIBIOS_HW_TYPE2_SPEC 0x20
int pcibios_enabled;
/* According to the BIOS specification at:
* http://members.datafast.net.au/dft0802/specs/bios21.pdf, we could
* restrict the x zone to some pages and make it ro. But this may be
* broken on some bios, complex to handle with static_protections.
* We could make the 0xe0000-0x100000 range rox, but this can break
* some ISA mapping.
*
* So we let's an rw and x hole when pcibios is used. This shouldn't
* happen for modern system with mmconfig, and if you don't want it
* you could disable pcibios...
*/
static inline void set_bios_x(void)
{
pcibios_enabled = 1;
set_memory_x(PAGE_OFFSET + BIOS_BEGIN, (BIOS_END - BIOS_BEGIN) >> PAGE_SHIFT);
if (__supported_pte_mask & _PAGE_NX)
printk(KERN_INFO "PCI : PCI BIOS area is rw and x. Use pci=nobios if you want it NX.\n");
}
/*
* This is the standard structure used to identify the entry point
* to the BIOS32 Service Directory, as documented in
* Standard BIOS 32-bit Service Directory Proposal
* Revision 0.4 May 24, 1993
* Phoenix Technologies Ltd.
* Norwood, MA
* and the PCI BIOS specification.
*/
union bios32 {
struct {
unsigned long signature; /* _32_ */
unsigned long entry; /* 32 bit physical address */
unsigned char revision; /* Revision level, 0 */
unsigned char length; /* Length in paragraphs should be 01 */
unsigned char checksum; /* All bytes must add up to zero */
unsigned char reserved[5]; /* Must be zero */
} fields;
char chars[16];
};
/*
* Physical address of the service directory. I don't know if we're
* allowed to have more than one of these or not, so just in case
* we'll make pcibios_present() take a memory start parameter and store
* the array there.
*/
static struct {
unsigned long address;
unsigned short segment;
} bios32_indirect = { 0, __KERNEL_CS };
/*
* Returns the entry point for the given service, NULL on error
*/
static unsigned long bios32_service(unsigned long service)
{
unsigned char return_code; /* %al */
unsigned long address; /* %ebx */
unsigned long length; /* %ecx */
unsigned long entry; /* %edx */
unsigned long flags;
local_irq_save(flags);
__asm__("lcall *(%%edi); cld"
: "=a" (return_code),
"=b" (address),
"=c" (length),
"=d" (entry)
: "0" (service),
"1" (0),
"D" (&bios32_indirect));
local_irq_restore(flags);
switch (return_code) {
case 0:
return address + entry;
case 0x80: /* Not present */
printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
return 0;
default: /* Shouldn't happen */
printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
service, return_code);
return 0;
}
}
static struct {
unsigned long address;
unsigned short segment;
} pci_indirect = { 0, __KERNEL_CS };
static int pci_bios_present;
static int __devinit check_pcibios(void)
{
u32 signature, eax, ebx, ecx;
u8 status, major_ver, minor_ver, hw_mech;
unsigned long flags, pcibios_entry;
if ((pcibios_entry = bios32_service(PCI_SERVICE))) {
pci_indirect.address = pcibios_entry + PAGE_OFFSET;
local_irq_save(flags);
__asm__(
"lcall *(%%edi); cld\n\t"
"jc 1f\n\t"
"xor %%ah, %%ah\n"
"1:"
: "=d" (signature),
"=a" (eax),
"=b" (ebx),
"=c" (ecx)
: "1" (PCIBIOS_PCI_BIOS_PRESENT),
"D" (&pci_indirect)
: "memory");
local_irq_restore(flags);
status = (eax >> 8) & 0xff;
hw_mech = eax & 0xff;
major_ver = (ebx >> 8) & 0xff;
minor_ver = ebx & 0xff;
if (pcibios_last_bus < 0)
pcibios_last_bus = ecx & 0xff;
DBG("PCI: BIOS probe returned s=%02x hw=%02x ver=%02x.%02x l=%02x\n",
status, hw_mech, major_ver, minor_ver, pcibios_last_bus);
if (status || signature != PCI_SIGNATURE) {
printk (KERN_ERR "PCI: BIOS BUG #%x[%08x] found\n",
status, signature);
return 0;
}
printk(KERN_INFO "PCI: PCI BIOS revision %x.%02x entry at 0x%lx, last bus=%d\n",
major_ver, minor_ver, pcibios_entry, pcibios_last_bus);
#ifdef CONFIG_PCI_DIRECT
if (!(hw_mech & PCIBIOS_HW_TYPE1))
pci_probe &= ~PCI_PROBE_CONF1;
if (!(hw_mech & PCIBIOS_HW_TYPE2))
pci_probe &= ~PCI_PROBE_CONF2;
#endif
return 1;
}
return 0;
}
static int pci_bios_read(unsigned int seg, unsigned int bus,
unsigned int devfn, int reg, int len, u32 *value)
{
unsigned long result = 0;
unsigned long flags;
unsigned long bx = (bus << 8) | devfn;
WARN_ON(seg);
if (!value || (bus > 255) || (devfn > 255) || (reg > 255))
return -EINVAL;
raw_spin_lock_irqsave(&pci_config_lock, flags);
switch (len) {
case 1:
__asm__("lcall *(%%esi); cld\n\t"
"jc 1f\n\t"
"xor %%ah, %%ah\n"
"1:"
: "=c" (*value),
"=a" (result)
: "1" (PCIBIOS_READ_CONFIG_BYTE),
"b" (bx),
"D" ((long)reg),
"S" (&pci_indirect));
/*
* Zero-extend the result beyond 8 bits, do not trust the
* BIOS having done it:
*/
*value &= 0xff;
break;
case 2:
__asm__("lcall *(%%esi); cld\n\t"
"jc 1f\n\t"
"xor %%ah, %%ah\n"
"1:"
: "=c" (*value),
"=a" (result)
: "1" (PCIBIOS_READ_CONFIG_WORD),
"b" (bx),
"D" ((long)reg),
"S" (&pci_indirect));
/*
* Zero-extend the result beyond 16 bits, do not trust the
* BIOS having done it:
*/
*value &= 0xffff;
break;
case 4:
__asm__("lcall *(%%esi); cld\n\t"
"jc 1f\n\t"
"xor %%ah, %%ah\n"
"1:"
: "=c" (*value),
"=a" (result)
: "1" (PCIBIOS_READ_CONFIG_DWORD),
"b" (bx),
"D" ((long)reg),
"S" (&pci_indirect));
break;
}
raw_spin_unlock_irqrestore(&pci_config_lock, flags);
return (int)((result & 0xff00) >> 8);
}
static int pci_bios_write(unsigned int seg, unsigned int bus,
unsigned int devfn, int reg, int len, u32 value)
{
unsigned long result = 0;
unsigned long flags;
unsigned long bx = (bus << 8) | devfn;
WARN_ON(seg);
if ((bus > 255) || (devfn > 255) || (reg > 255))
return -EINVAL;
raw_spin_lock_irqsave(&pci_config_lock, flags);
switch (len) {
case 1:
__asm__("lcall *(%%esi); cld\n\t"
"jc 1f\n\t"
"xor %%ah, %%ah\n"
"1:"
: "=a" (result)
: "0" (PCIBIOS_WRITE_CONFIG_BYTE),
"c" (value),
"b" (bx),
"D" ((long)reg),
"S" (&pci_indirect));
break;
case 2:
__asm__("lcall *(%%esi); cld\n\t"
"jc 1f\n\t"
"xor %%ah, %%ah\n"
"1:"
: "=a" (result)
: "0" (PCIBIOS_WRITE_CONFIG_WORD),
"c" (value),
"b" (bx),
"D" ((long)reg),
"S" (&pci_indirect));
break;
case 4:
__asm__("lcall *(%%esi); cld\n\t"
"jc 1f\n\t"
"xor %%ah, %%ah\n"
"1:"
: "=a" (result)
: "0" (PCIBIOS_WRITE_CONFIG_DWORD),
"c" (value),
"b" (bx),
"D" ((long)reg),
"S" (&pci_indirect));
break;
}
raw_spin_unlock_irqrestore(&pci_config_lock, flags);
return (int)((result & 0xff00) >> 8);
}
/*
* Function table for BIOS32 access
*/
static const struct pci_raw_ops pci_bios_access = {
.read = pci_bios_read,
.write = pci_bios_write
};
/*
* Try to find PCI BIOS.
*/
static const struct pci_raw_ops * __devinit pci_find_bios(void)
{
union bios32 *check;
unsigned char sum;
int i, length;
/*
* Follow the standard procedure for locating the BIOS32 Service
* directory by scanning the permissible address range from
* 0xe0000 through 0xfffff for a valid BIOS32 structure.
*/
for (check = (union bios32 *) __va(0xe0000);
check <= (union bios32 *) __va(0xffff0);
++check) {
long sig;
if (probe_kernel_address(&check->fields.signature, sig))
continue;
if (check->fields.signature != BIOS32_SIGNATURE)
continue;
length = check->fields.length * 16;
if (!length)
continue;
sum = 0;
for (i = 0; i < length ; ++i)
sum += check->chars[i];
if (sum != 0)
continue;
if (check->fields.revision != 0) {
printk("PCI: unsupported BIOS32 revision %d at 0x%p\n",
check->fields.revision, check);
continue;
}
DBG("PCI: BIOS32 Service Directory structure at 0x%p\n", check);
if (check->fields.entry >= 0x100000) {
printk("PCI: BIOS32 entry (0x%p) in high memory, "
"cannot use.\n", check);
return NULL;
} else {
unsigned long bios32_entry = check->fields.entry;
DBG("PCI: BIOS32 Service Directory entry at 0x%lx\n",
bios32_entry);
bios32_indirect.address = bios32_entry + PAGE_OFFSET;
set_bios_x();
if (check_pcibios())
return &pci_bios_access;
}
break; /* Hopefully more than one BIOS32 cannot happen... */
}
return NULL;
}
/*
* BIOS Functions for IRQ Routing
*/
struct irq_routing_options {
u16 size;
struct irq_info *table;
u16 segment;
} __attribute__((packed));
struct irq_routing_table * pcibios_get_irq_routing_table(void)
{
struct irq_routing_options opt;
struct irq_routing_table *rt = NULL;
int ret, map;
unsigned long page;
if (!pci_bios_present)
return NULL;
page = __get_free_page(GFP_KERNEL);
if (!page)
return NULL;
opt.table = (struct irq_info *) page;
opt.size = PAGE_SIZE;
opt.segment = __KERNEL_DS;
DBG("PCI: Fetching IRQ routing table... ");
__asm__("push %%es\n\t"
"push %%ds\n\t"
"pop %%es\n\t"
"lcall *(%%esi); cld\n\t"
"pop %%es\n\t"
"jc 1f\n\t"
"xor %%ah, %%ah\n"
"1:"
: "=a" (ret),
"=b" (map),
"=m" (opt)
: "0" (PCIBIOS_GET_ROUTING_OPTIONS),
"1" (0),
"D" ((long) &opt),
"S" (&pci_indirect),
"m" (opt)
: "memory");
DBG("OK ret=%d, size=%d, map=%x\n", ret, opt.size, map);
if (ret & 0xff00)
printk(KERN_ERR "PCI: Error %02x when fetching IRQ routing table.\n", (ret >> 8) & 0xff);
else if (opt.size) {
rt = kmalloc(sizeof(struct irq_routing_table) + opt.size, GFP_KERNEL);
if (rt) {
memset(rt, 0, sizeof(struct irq_routing_table));
rt->size = opt.size + sizeof(struct irq_routing_table);
rt->exclusive_irqs = map;
memcpy(rt->slots, (void *) page, opt.size);
printk(KERN_INFO "PCI: Using BIOS Interrupt Routing Table\n");
}
}
free_page(page);
return rt;
}
EXPORT_SYMBOL(pcibios_get_irq_routing_table);
int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
{
int ret;
__asm__("lcall *(%%esi); cld\n\t"
"jc 1f\n\t"
"xor %%ah, %%ah\n"
"1:"
: "=a" (ret)
: "0" (PCIBIOS_SET_PCI_HW_INT),
"b" ((dev->bus->number << 8) | dev->devfn),
"c" ((irq << 8) | (pin + 10)),
"S" (&pci_indirect));
return !(ret & 0xff00);
}
EXPORT_SYMBOL(pcibios_set_irq_routing);
void __init pci_pcbios_init(void)
{
if ((pci_probe & PCI_PROBE_BIOS)
&& ((raw_pci_ops = pci_find_bios()))) {
pci_bios_present = 1;
}
}
| gpl-2.0 |
friedrich420/HTC-ONE-M7-AEL-Kernel-5.0.2 | drivers/input/touchscreen/max11801_ts.c | 4954 | 7516 | /*
* Driver for MAXI MAX11801 - A Resistive touch screen controller with
* i2c interface
*
* Copyright (C) 2011 Freescale Semiconductor, Inc.
* Author: Zhang Jiejing <jiejing.zhang@freescale.com>
*
* Based on mcs5000_ts.c
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*/
/*
* This driver aims to support the series of MAXI touch chips max11801
* through max11803. The main difference between these 4 chips can be
* found in the table below:
* -----------------------------------------------------
* | CHIP | AUTO MODE SUPPORT(FIFO) | INTERFACE |
* |----------------------------------------------------|
* | max11800 | YES | SPI |
* | max11801 | YES | I2C |
* | max11802 | NO | SPI |
* | max11803 | NO | I2C |
* ------------------------------------------------------
*
* Currently, this driver only supports max11801.
*
* Data Sheet:
* http://www.maxim-ic.com/datasheet/index.mvp/id/5943
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/i2c.h>
#include <linux/interrupt.h>
#include <linux/input.h>
#include <linux/slab.h>
#include <linux/bitops.h>
/* Register Address define */
#define GENERNAL_STATUS_REG 0x00
#define GENERNAL_CONF_REG 0x01
#define MESURE_RES_CONF_REG 0x02
#define MESURE_AVER_CONF_REG 0x03
#define ADC_SAMPLE_TIME_CONF_REG 0x04
#define PANEL_SETUPTIME_CONF_REG 0x05
#define DELAY_CONVERSION_CONF_REG 0x06
#define TOUCH_DETECT_PULLUP_CONF_REG 0x07
#define AUTO_MODE_TIME_CONF_REG 0x08 /* only for max11800/max11801 */
#define APERTURE_CONF_REG 0x09 /* only for max11800/max11801 */
#define AUX_MESURE_CONF_REG 0x0a
#define OP_MODE_CONF_REG 0x0b
/* FIFO is found only in max11800 and max11801 */
#define FIFO_RD_CMD (0x50 << 1)
#define MAX11801_FIFO_INT (1 << 2)
#define MAX11801_FIFO_OVERFLOW (1 << 3)
#define XY_BUFSIZE 4
#define XY_BUF_OFFSET 4
#define MAX11801_MAX_X 0xfff
#define MAX11801_MAX_Y 0xfff
#define MEASURE_TAG_OFFSET 2
#define MEASURE_TAG_MASK (3 << MEASURE_TAG_OFFSET)
#define EVENT_TAG_OFFSET 0
#define EVENT_TAG_MASK (3 << EVENT_TAG_OFFSET)
#define MEASURE_X_TAG (0 << MEASURE_TAG_OFFSET)
#define MEASURE_Y_TAG (1 << MEASURE_TAG_OFFSET)
/* These are the state of touch event state machine */
enum {
EVENT_INIT,
EVENT_MIDDLE,
EVENT_RELEASE,
EVENT_FIFO_END
};
struct max11801_data {
struct i2c_client *client;
struct input_dev *input_dev;
};
static u8 read_register(struct i2c_client *client, int addr)
{
/* XXX: The chip ignores LSB of register address */
return i2c_smbus_read_byte_data(client, addr << 1);
}
static int max11801_write_reg(struct i2c_client *client, int addr, int data)
{
/* XXX: The chip ignores LSB of register address */
return i2c_smbus_write_byte_data(client, addr << 1, data);
}
static irqreturn_t max11801_ts_interrupt(int irq, void *dev_id)
{
struct max11801_data *data = dev_id;
struct i2c_client *client = data->client;
int status, i, ret;
u8 buf[XY_BUFSIZE];
int x = -1;
int y = -1;
status = read_register(data->client, GENERNAL_STATUS_REG);
if (status & (MAX11801_FIFO_INT | MAX11801_FIFO_OVERFLOW)) {
status = read_register(data->client, GENERNAL_STATUS_REG);
ret = i2c_smbus_read_i2c_block_data(client, FIFO_RD_CMD,
XY_BUFSIZE, buf);
/*
* We should get 4 bytes buffer that contains X,Y
* and event tag
*/
if (ret < XY_BUFSIZE)
goto out;
for (i = 0; i < XY_BUFSIZE; i += XY_BUFSIZE / 2) {
if ((buf[i + 1] & MEASURE_TAG_MASK) == MEASURE_X_TAG)
x = (buf[i] << XY_BUF_OFFSET) +
(buf[i + 1] >> XY_BUF_OFFSET);
else if ((buf[i + 1] & MEASURE_TAG_MASK) == MEASURE_Y_TAG)
y = (buf[i] << XY_BUF_OFFSET) +
(buf[i + 1] >> XY_BUF_OFFSET);
}
if ((buf[1] & EVENT_TAG_MASK) != (buf[3] & EVENT_TAG_MASK))
goto out;
switch (buf[1] & EVENT_TAG_MASK) {
case EVENT_INIT:
/* fall through */
case EVENT_MIDDLE:
input_report_abs(data->input_dev, ABS_X, x);
input_report_abs(data->input_dev, ABS_Y, y);
input_event(data->input_dev, EV_KEY, BTN_TOUCH, 1);
input_sync(data->input_dev);
break;
case EVENT_RELEASE:
input_event(data->input_dev, EV_KEY, BTN_TOUCH, 0);
input_sync(data->input_dev);
break;
case EVENT_FIFO_END:
break;
}
}
out:
return IRQ_HANDLED;
}
static void __devinit max11801_ts_phy_init(struct max11801_data *data)
{
struct i2c_client *client = data->client;
/* Average X,Y, take 16 samples, average eight media sample */
max11801_write_reg(client, MESURE_AVER_CONF_REG, 0xff);
/* X,Y panel setup time set to 20us */
max11801_write_reg(client, PANEL_SETUPTIME_CONF_REG, 0x11);
/* Rough pullup time (2uS), Fine pullup time (10us) */
max11801_write_reg(client, TOUCH_DETECT_PULLUP_CONF_REG, 0x10);
/* Auto mode init period = 5ms , scan period = 5ms*/
max11801_write_reg(client, AUTO_MODE_TIME_CONF_REG, 0xaa);
/* Aperture X,Y set to +- 4LSB */
max11801_write_reg(client, APERTURE_CONF_REG, 0x33);
/* Enable Power, enable Automode, enable Aperture, enable Average X,Y */
max11801_write_reg(client, OP_MODE_CONF_REG, 0x36);
}
static int __devinit max11801_ts_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct max11801_data *data;
struct input_dev *input_dev;
int error;
data = kzalloc(sizeof(struct max11801_data), GFP_KERNEL);
input_dev = input_allocate_device();
if (!data || !input_dev) {
dev_err(&client->dev, "Failed to allocate memory\n");
error = -ENOMEM;
goto err_free_mem;
}
data->client = client;
data->input_dev = input_dev;
input_dev->name = "max11801_ts";
input_dev->id.bustype = BUS_I2C;
input_dev->dev.parent = &client->dev;
__set_bit(EV_ABS, input_dev->evbit);
__set_bit(EV_KEY, input_dev->evbit);
__set_bit(BTN_TOUCH, input_dev->keybit);
input_set_abs_params(input_dev, ABS_X, 0, MAX11801_MAX_X, 0, 0);
input_set_abs_params(input_dev, ABS_Y, 0, MAX11801_MAX_Y, 0, 0);
input_set_drvdata(input_dev, data);
max11801_ts_phy_init(data);
error = request_threaded_irq(client->irq, NULL, max11801_ts_interrupt,
IRQF_TRIGGER_LOW | IRQF_ONESHOT,
"max11801_ts", data);
if (error) {
dev_err(&client->dev, "Failed to register interrupt\n");
goto err_free_mem;
}
error = input_register_device(data->input_dev);
if (error)
goto err_free_irq;
i2c_set_clientdata(client, data);
return 0;
err_free_irq:
free_irq(client->irq, data);
err_free_mem:
input_free_device(input_dev);
kfree(data);
return error;
}
static __devexit int max11801_ts_remove(struct i2c_client *client)
{
struct max11801_data *data = i2c_get_clientdata(client);
free_irq(client->irq, data);
input_unregister_device(data->input_dev);
kfree(data);
return 0;
}
static const struct i2c_device_id max11801_ts_id[] = {
{"max11801", 0},
{ }
};
MODULE_DEVICE_TABLE(i2c, max11801_ts_id);
static struct i2c_driver max11801_ts_driver = {
.driver = {
.name = "max11801_ts",
.owner = THIS_MODULE,
},
.id_table = max11801_ts_id,
.probe = max11801_ts_probe,
.remove = __devexit_p(max11801_ts_remove),
};
module_i2c_driver(max11801_ts_driver);
MODULE_AUTHOR("Zhang Jiejing <jiejing.zhang@freescale.com>");
MODULE_DESCRIPTION("Touchscreen driver for MAXI MAX11801 controller");
MODULE_LICENSE("GPL");
| gpl-2.0 |
zarboz/Ville-Z.238 | drivers/staging/comedi/drivers/pcl725.c | 8282 | 2677 | /*
* comedi/drivers/pcl725.c
* Driver for PCL725 and clones
* David A. Schleef
*/
/*
Driver: pcl725
Description: Advantech PCL-725 (& compatibles)
Author: ds
Status: unknown
Devices: [Advantech] PCL-725 (pcl725)
*/
#include "../comedidev.h"
#include <linux/ioport.h>
#define PCL725_SIZE 2
#define PCL725_DO 0
#define PCL725_DI 1
static int pcl725_attach(struct comedi_device *dev,
struct comedi_devconfig *it);
static int pcl725_detach(struct comedi_device *dev);
static struct comedi_driver driver_pcl725 = {
.driver_name = "pcl725",
.module = THIS_MODULE,
.attach = pcl725_attach,
.detach = pcl725_detach,
};
static int __init driver_pcl725_init_module(void)
{
return comedi_driver_register(&driver_pcl725);
}
static void __exit driver_pcl725_cleanup_module(void)
{
comedi_driver_unregister(&driver_pcl725);
}
module_init(driver_pcl725_init_module);
module_exit(driver_pcl725_cleanup_module);
static int pcl725_do_insn(struct comedi_device *dev, struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data)
{
if (insn->n != 2)
return -EINVAL;
if (data[0]) {
s->state &= ~data[0];
s->state |= (data[0] & data[1]);
outb(s->state, dev->iobase + PCL725_DO);
}
data[1] = s->state;
return 2;
}
static int pcl725_di_insn(struct comedi_device *dev, struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data)
{
if (insn->n != 2)
return -EINVAL;
data[1] = inb(dev->iobase + PCL725_DI);
return 2;
}
static int pcl725_attach(struct comedi_device *dev, struct comedi_devconfig *it)
{
struct comedi_subdevice *s;
unsigned long iobase;
iobase = it->options[0];
printk(KERN_INFO "comedi%d: pcl725: 0x%04lx ", dev->minor, iobase);
if (!request_region(iobase, PCL725_SIZE, "pcl725")) {
printk("I/O port conflict\n");
return -EIO;
}
dev->board_name = "pcl725";
dev->iobase = iobase;
dev->irq = 0;
if (alloc_subdevices(dev, 2) < 0)
return -ENOMEM;
s = dev->subdevices + 0;
/* do */
s->type = COMEDI_SUBD_DO;
s->subdev_flags = SDF_WRITABLE;
s->maxdata = 1;
s->n_chan = 8;
s->insn_bits = pcl725_do_insn;
s->range_table = &range_digital;
s = dev->subdevices + 1;
/* di */
s->type = COMEDI_SUBD_DI;
s->subdev_flags = SDF_READABLE;
s->maxdata = 1;
s->n_chan = 8;
s->insn_bits = pcl725_di_insn;
s->range_table = &range_digital;
printk(KERN_INFO "\n");
return 0;
}
static int pcl725_detach(struct comedi_device *dev)
{
printk(KERN_INFO "comedi%d: pcl725: remove\n", dev->minor);
if (dev->iobase)
release_region(dev->iobase, PCL725_SIZE);
return 0;
}
MODULE_AUTHOR("Comedi http://www.comedi.org");
MODULE_DESCRIPTION("Comedi low-level driver");
MODULE_LICENSE("GPL");
| gpl-2.0 |
Evervolv/android_kernel_lge_mako | drivers/staging/comedi/drivers/pcl726.c | 8282 | 10231 | /*
comedi/drivers/pcl726.c
hardware driver for Advantech cards:
card: PCL-726, PCL-727, PCL-728
driver: pcl726, pcl727, pcl728
and for ADLink cards:
card: ACL-6126, ACL-6128
driver: acl6126, acl6128
COMEDI - Linux Control and Measurement Device Interface
Copyright (C) 1998 David A. Schleef <ds@schleef.org>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
/*
Driver: pcl726
Description: Advantech PCL-726 & compatibles
Author: ds
Status: untested
Devices: [Advantech] PCL-726 (pcl726), PCL-727 (pcl727), PCL-728 (pcl728),
[ADLink] ACL-6126 (acl6126), ACL-6128 (acl6128)
Interrupts are not supported.
Options for PCL-726:
[0] - IO Base
[2]...[7] - D/A output range for channel 1-6:
0: 0-5V, 1: 0-10V, 2: +/-5V, 3: +/-10V,
4: 4-20mA, 5: unknown (external reference)
Options for PCL-727:
[0] - IO Base
[2]...[13] - D/A output range for channel 1-12:
0: 0-5V, 1: 0-10V, 2: +/-5V,
3: 4-20mA
Options for PCL-728 and ACL-6128:
[0] - IO Base
[2], [3] - D/A output range for channel 1 and 2:
0: 0-5V, 1: 0-10V, 2: +/-5V, 3: +/-10V,
4: 4-20mA, 5: 0-20mA
Options for ACL-6126:
[0] - IO Base
[1] - IRQ (0=disable, 3, 5, 6, 7, 9, 10, 11, 12, 15) (currently ignored)
[2]...[7] - D/A output range for channel 1-6:
0: 0-5V, 1: 0-10V, 2: +/-5V, 3: +/-10V,
4: 4-20mA
*/
/*
Thanks to Circuit Specialists for having programming info (!) on
their web page. (http://www.cir.com/)
*/
#include "../comedidev.h"
#include <linux/ioport.h>
#undef ACL6126_IRQ /* no interrupt support (yet) */
#define PCL726_SIZE 16
#define PCL727_SIZE 32
#define PCL728_SIZE 8
#define PCL726_DAC0_HI 0
#define PCL726_DAC0_LO 1
#define PCL726_DO_HI 12
#define PCL726_DO_LO 13
#define PCL726_DI_HI 14
#define PCL726_DI_LO 15
#define PCL727_DO_HI 24
#define PCL727_DO_LO 25
#define PCL727_DI_HI 0
#define PCL727_DI_LO 1
static const struct comedi_lrange range_4_20mA = { 1, {RANGE_mA(4, 20)} };
static const struct comedi_lrange range_0_20mA = { 1, {RANGE_mA(0, 20)} };
static const struct comedi_lrange *const rangelist_726[] = {
&range_unipolar5, &range_unipolar10,
&range_bipolar5, &range_bipolar10,
&range_4_20mA, &range_unknown
};
static const struct comedi_lrange *const rangelist_727[] = {
&range_unipolar5, &range_unipolar10,
&range_bipolar5,
&range_4_20mA
};
static const struct comedi_lrange *const rangelist_728[] = {
&range_unipolar5, &range_unipolar10,
&range_bipolar5, &range_bipolar10,
&range_4_20mA, &range_0_20mA
};
static int pcl726_attach(struct comedi_device *dev,
struct comedi_devconfig *it);
static int pcl726_detach(struct comedi_device *dev);
struct pcl726_board {
const char *name; /* driver name */
int n_aochan; /* num of D/A chans */
int num_of_ranges; /* num of ranges */
unsigned int IRQbits; /* allowed interrupts */
unsigned int io_range; /* len of IO space */
char have_dio; /* 1=card have DI/DO ports */
int di_hi; /* ports for DI/DO operations */
int di_lo;
int do_hi;
int do_lo;
const struct comedi_lrange *const *range_type_list;
/* list of supported ranges */
};
static const struct pcl726_board boardtypes[] = {
{"pcl726", 6, 6, 0x0000, PCL726_SIZE, 1,
PCL726_DI_HI, PCL726_DI_LO, PCL726_DO_HI, PCL726_DO_LO,
&rangelist_726[0],},
{"pcl727", 12, 4, 0x0000, PCL727_SIZE, 1,
PCL727_DI_HI, PCL727_DI_LO, PCL727_DO_HI, PCL727_DO_LO,
&rangelist_727[0],},
{"pcl728", 2, 6, 0x0000, PCL728_SIZE, 0,
0, 0, 0, 0,
&rangelist_728[0],},
{"acl6126", 6, 5, 0x96e8, PCL726_SIZE, 1,
PCL726_DI_HI, PCL726_DI_LO, PCL726_DO_HI, PCL726_DO_LO,
&rangelist_726[0],},
{"acl6128", 2, 6, 0x0000, PCL728_SIZE, 0,
0, 0, 0, 0,
&rangelist_728[0],},
};
#define n_boardtypes (sizeof(boardtypes)/sizeof(struct pcl726_board))
#define this_board ((const struct pcl726_board *)dev->board_ptr)
static struct comedi_driver driver_pcl726 = {
.driver_name = "pcl726",
.module = THIS_MODULE,
.attach = pcl726_attach,
.detach = pcl726_detach,
.board_name = &boardtypes[0].name,
.num_names = n_boardtypes,
.offset = sizeof(struct pcl726_board),
};
static int __init driver_pcl726_init_module(void)
{
return comedi_driver_register(&driver_pcl726);
}
static void __exit driver_pcl726_cleanup_module(void)
{
comedi_driver_unregister(&driver_pcl726);
}
module_init(driver_pcl726_init_module);
module_exit(driver_pcl726_cleanup_module);
struct pcl726_private {
int bipolar[12];
const struct comedi_lrange *rangelist[12];
unsigned int ao_readback[12];
};
#define devpriv ((struct pcl726_private *)dev->private)
static int pcl726_ao_insn(struct comedi_device *dev, struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data)
{
int hi, lo;
int n;
int chan = CR_CHAN(insn->chanspec);
for (n = 0; n < insn->n; n++) {
lo = data[n] & 0xff;
hi = (data[n] >> 8) & 0xf;
if (devpriv->bipolar[chan])
hi ^= 0x8;
/*
* the programming info did not say which order
* to write bytes. switch the order of the next
* two lines if you get glitches.
*/
outb(hi, dev->iobase + PCL726_DAC0_HI + 2 * chan);
outb(lo, dev->iobase + PCL726_DAC0_LO + 2 * chan);
devpriv->ao_readback[chan] = data[n];
}
return n;
}
static int pcl726_ao_insn_read(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data)
{
int chan = CR_CHAN(insn->chanspec);
int n;
for (n = 0; n < insn->n; n++)
data[n] = devpriv->ao_readback[chan];
return n;
}
static int pcl726_di_insn_bits(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data)
{
if (insn->n != 2)
return -EINVAL;
data[1] = inb(dev->iobase + this_board->di_lo) |
(inb(dev->iobase + this_board->di_hi) << 8);
return 2;
}
static int pcl726_do_insn_bits(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data)
{
if (insn->n != 2)
return -EINVAL;
if (data[0]) {
s->state &= ~data[0];
s->state |= data[0] & data[1];
}
if (data[1] & 0x00ff)
outb(s->state & 0xff, dev->iobase + this_board->do_lo);
if (data[1] & 0xff00)
outb((s->state >> 8), dev->iobase + this_board->do_hi);
data[1] = s->state;
return 2;
}
static int pcl726_attach(struct comedi_device *dev, struct comedi_devconfig *it)
{
struct comedi_subdevice *s;
unsigned long iobase;
unsigned int iorange;
int ret, i;
#ifdef ACL6126_IRQ
unsigned int irq;
#endif
iobase = it->options[0];
iorange = this_board->io_range;
printk(KERN_WARNING "comedi%d: pcl726: board=%s, 0x%03lx ", dev->minor,
this_board->name, iobase);
if (!request_region(iobase, iorange, "pcl726")) {
printk(KERN_WARNING "I/O port conflict\n");
return -EIO;
}
dev->iobase = iobase;
dev->board_name = this_board->name;
ret = alloc_private(dev, sizeof(struct pcl726_private));
if (ret < 0)
return -ENOMEM;
for (i = 0; i < 12; i++) {
devpriv->bipolar[i] = 0;
devpriv->rangelist[i] = &range_unknown;
}
#ifdef ACL6126_IRQ
irq = 0;
if (boardtypes[board].IRQbits != 0) { /* board support IRQ */
irq = it->options[1];
devpriv->first_chan = 2;
if (irq) { /* we want to use IRQ */
if (((1 << irq) & boardtypes[board].IRQbits) == 0) {
printk(KERN_WARNING
", IRQ %d is out of allowed range,"
" DISABLING IT", irq);
irq = 0; /* Bad IRQ */
} else {
if (request_irq(irq, interrupt_pcl818, 0,
"pcl726", dev)) {
printk(KERN_WARNING
", unable to allocate IRQ %d,"
" DISABLING IT", irq);
irq = 0; /* Can't use IRQ */
} else {
printk(", irq=%d", irq);
}
}
}
}
dev->irq = irq;
#endif
printk("\n");
ret = alloc_subdevices(dev, 3);
if (ret < 0)
return ret;
s = dev->subdevices + 0;
/* ao */
s->type = COMEDI_SUBD_AO;
s->subdev_flags = SDF_WRITABLE | SDF_GROUND;
s->n_chan = this_board->n_aochan;
s->maxdata = 0xfff;
s->len_chanlist = 1;
s->insn_write = pcl726_ao_insn;
s->insn_read = pcl726_ao_insn_read;
s->range_table_list = devpriv->rangelist;
for (i = 0; i < this_board->n_aochan; i++) {
int j;
j = it->options[2 + 1];
if ((j < 0) || (j >= this_board->num_of_ranges)) {
printk
("Invalid range for channel %d! Must be 0<=%d<%d\n",
i, j, this_board->num_of_ranges - 1);
j = 0;
}
devpriv->rangelist[i] = this_board->range_type_list[j];
if (devpriv->rangelist[i]->range[0].min ==
-devpriv->rangelist[i]->range[0].max)
devpriv->bipolar[i] = 1; /* bipolar range */
}
s = dev->subdevices + 1;
/* di */
if (!this_board->have_dio) {
s->type = COMEDI_SUBD_UNUSED;
} else {
s->type = COMEDI_SUBD_DI;
s->subdev_flags = SDF_READABLE | SDF_GROUND;
s->n_chan = 16;
s->maxdata = 1;
s->len_chanlist = 1;
s->insn_bits = pcl726_di_insn_bits;
s->range_table = &range_digital;
}
s = dev->subdevices + 2;
/* do */
if (!this_board->have_dio) {
s->type = COMEDI_SUBD_UNUSED;
} else {
s->type = COMEDI_SUBD_DO;
s->subdev_flags = SDF_WRITABLE | SDF_GROUND;
s->n_chan = 16;
s->maxdata = 1;
s->len_chanlist = 1;
s->insn_bits = pcl726_do_insn_bits;
s->range_table = &range_digital;
}
return 0;
}
static int pcl726_detach(struct comedi_device *dev)
{
/* printk("comedi%d: pcl726: remove\n",dev->minor); */
#ifdef ACL6126_IRQ
if (dev->irq)
free_irq(dev->irq, dev);
#endif
if (dev->iobase)
release_region(dev->iobase, this_board->io_range);
return 0;
}
MODULE_AUTHOR("Comedi http://www.comedi.org");
MODULE_DESCRIPTION("Comedi low-level driver");
MODULE_LICENSE("GPL");
| gpl-2.0 |
Cyantist-Arcane9/msm-3.10 | drivers/cpufreq/mperf.c | 11610 | 1495 | #include <linux/kernel.h>
#include <linux/smp.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/cpufreq.h>
#include <linux/slab.h>
#include "mperf.h"
static DEFINE_PER_CPU(struct aperfmperf, acfreq_old_perf);
/* Called via smp_call_function_single(), on the target CPU */
static void read_measured_perf_ctrs(void *_cur)
{
struct aperfmperf *am = _cur;
get_aperfmperf(am);
}
/*
* Return the measured active (C0) frequency on this CPU since last call
* to this function.
* Input: cpu number
* Return: Average CPU frequency in terms of max frequency (zero on error)
*
* We use IA32_MPERF and IA32_APERF MSRs to get the measured performance
* over a period of time, while CPU is in C0 state.
* IA32_MPERF counts at the rate of max advertised frequency
* IA32_APERF counts at the rate of actual CPU frequency
* Only IA32_APERF/IA32_MPERF ratio is architecturally defined and
* no meaning should be associated with absolute values of these MSRs.
*/
unsigned int cpufreq_get_measured_perf(struct cpufreq_policy *policy,
unsigned int cpu)
{
struct aperfmperf perf;
unsigned long ratio;
unsigned int retval;
if (smp_call_function_single(cpu, read_measured_perf_ctrs, &perf, 1))
return 0;
ratio = calc_aperfmperf_ratio(&per_cpu(acfreq_old_perf, cpu), &perf);
per_cpu(acfreq_old_perf, cpu) = perf;
retval = (policy->cpuinfo.max_freq * ratio) >> APERFMPERF_SHIFT;
return retval;
}
EXPORT_SYMBOL_GPL(cpufreq_get_measured_perf);
MODULE_LICENSE("GPL");
| gpl-2.0 |
chillwater/Padfone-A66-Jelly-Bean | drivers/cpufreq/mperf.c | 11610 | 1495 | #include <linux/kernel.h>
#include <linux/smp.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/cpufreq.h>
#include <linux/slab.h>
#include "mperf.h"
static DEFINE_PER_CPU(struct aperfmperf, acfreq_old_perf);
/* Called via smp_call_function_single(), on the target CPU */
static void read_measured_perf_ctrs(void *_cur)
{
struct aperfmperf *am = _cur;
get_aperfmperf(am);
}
/*
* Return the measured active (C0) frequency on this CPU since last call
* to this function.
* Input: cpu number
* Return: Average CPU frequency in terms of max frequency (zero on error)
*
* We use IA32_MPERF and IA32_APERF MSRs to get the measured performance
* over a period of time, while CPU is in C0 state.
* IA32_MPERF counts at the rate of max advertised frequency
* IA32_APERF counts at the rate of actual CPU frequency
* Only IA32_APERF/IA32_MPERF ratio is architecturally defined and
* no meaning should be associated with absolute values of these MSRs.
*/
unsigned int cpufreq_get_measured_perf(struct cpufreq_policy *policy,
unsigned int cpu)
{
struct aperfmperf perf;
unsigned long ratio;
unsigned int retval;
if (smp_call_function_single(cpu, read_measured_perf_ctrs, &perf, 1))
return 0;
ratio = calc_aperfmperf_ratio(&per_cpu(acfreq_old_perf, cpu), &perf);
per_cpu(acfreq_old_perf, cpu) = perf;
retval = (policy->cpuinfo.max_freq * ratio) >> APERFMPERF_SHIFT;
return retval;
}
EXPORT_SYMBOL_GPL(cpufreq_get_measured_perf);
MODULE_LICENSE("GPL");
| gpl-2.0 |
falstaff84/u-boot | drivers/net/smc911x.c | 91 | 7298 | /*
* SMSC LAN9[12]1[567] Network driver
*
* (c) 2007 Pengutronix, Sascha Hauer <s.hauer@pengutronix.de>
*
* See file CREDITS for list of people who contributed to this
* project.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 2 of
* the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston,
* MA 02111-1307 USA
*/
#include <common.h>
#include <command.h>
#include <malloc.h>
#include <net.h>
#include <miiphy.h>
#include "smc911x.h"
u32 pkt_data_pull(struct eth_device *dev, u32 addr) \
__attribute__ ((weak, alias ("smc911x_reg_read")));
void pkt_data_push(struct eth_device *dev, u32 addr, u32 val) \
__attribute__ ((weak, alias ("smc911x_reg_write")));
static void smc911x_handle_mac_address(struct eth_device *dev)
{
unsigned long addrh, addrl;
uchar *m = dev->enetaddr;
addrl = m[0] | (m[1] << 8) | (m[2] << 16) | (m[3] << 24);
addrh = m[4] | (m[5] << 8);
smc911x_set_mac_csr(dev, ADDRL, addrl);
smc911x_set_mac_csr(dev, ADDRH, addrh);
printf(DRIVERNAME ": MAC %pM\n", m);
}
static int smc911x_eth_phy_read(struct eth_device *dev,
u8 phy, u8 reg, u16 *val)
{
while (smc911x_get_mac_csr(dev, MII_ACC) & MII_ACC_MII_BUSY)
;
smc911x_set_mac_csr(dev, MII_ACC, phy << 11 | reg << 6 |
MII_ACC_MII_BUSY);
while (smc911x_get_mac_csr(dev, MII_ACC) & MII_ACC_MII_BUSY)
;
*val = smc911x_get_mac_csr(dev, MII_DATA);
return 0;
}
static int smc911x_eth_phy_write(struct eth_device *dev,
u8 phy, u8 reg, u16 val)
{
while (smc911x_get_mac_csr(dev, MII_ACC) & MII_ACC_MII_BUSY)
;
smc911x_set_mac_csr(dev, MII_DATA, val);
smc911x_set_mac_csr(dev, MII_ACC,
phy << 11 | reg << 6 | MII_ACC_MII_BUSY | MII_ACC_MII_WRITE);
while (smc911x_get_mac_csr(dev, MII_ACC) & MII_ACC_MII_BUSY)
;
return 0;
}
static int smc911x_phy_reset(struct eth_device *dev)
{
u32 reg;
reg = smc911x_reg_read(dev, PMT_CTRL);
reg &= ~0xfffff030;
reg |= PMT_CTRL_PHY_RST;
smc911x_reg_write(dev, PMT_CTRL, reg);
mdelay(100);
return 0;
}
static void smc911x_phy_configure(struct eth_device *dev)
{
int timeout;
u16 status;
smc911x_phy_reset(dev);
smc911x_eth_phy_write(dev, 1, MII_BMCR, BMCR_RESET);
mdelay(1);
smc911x_eth_phy_write(dev, 1, MII_ADVERTISE, 0x01e1);
smc911x_eth_phy_write(dev, 1, MII_BMCR, BMCR_ANENABLE |
BMCR_ANRESTART);
timeout = 5000;
do {
mdelay(1);
if ((timeout--) == 0)
goto err_out;
if (smc911x_eth_phy_read(dev, 1, MII_BMSR, &status) != 0)
goto err_out;
} while (!(status & BMSR_LSTATUS));
printf(DRIVERNAME ": phy initialized\n");
return;
err_out:
printf(DRIVERNAME ": autonegotiation timed out\n");
}
static void smc911x_enable(struct eth_device *dev)
{
/* Enable TX */
smc911x_reg_write(dev, HW_CFG, 8 << 16 | HW_CFG_SF);
smc911x_reg_write(dev, GPT_CFG, GPT_CFG_TIMER_EN | 10000);
smc911x_reg_write(dev, TX_CFG, TX_CFG_TX_ON);
/* no padding to start of packets */
smc911x_reg_write(dev, RX_CFG, 0);
smc911x_set_mac_csr(dev, MAC_CR, MAC_CR_TXEN | MAC_CR_RXEN |
MAC_CR_HBDIS);
}
static int smc911x_init(struct eth_device *dev, bd_t * bd)
{
struct chip_id *id = dev->priv;
printf(DRIVERNAME ": detected %s controller\n", id->name);
smc911x_reset(dev);
/* Configure the PHY, initialize the link state */
smc911x_phy_configure(dev);
smc911x_handle_mac_address(dev);
/* Turn on Tx + Rx */
smc911x_enable(dev);
return 0;
}
static int smc911x_send(struct eth_device *dev, void *packet, int length)
{
u32 *data = (u32*)packet;
u32 tmplen;
u32 status;
smc911x_reg_write(dev, TX_DATA_FIFO, TX_CMD_A_INT_FIRST_SEG |
TX_CMD_A_INT_LAST_SEG | length);
smc911x_reg_write(dev, TX_DATA_FIFO, length);
tmplen = (length + 3) / 4;
while (tmplen--)
pkt_data_push(dev, TX_DATA_FIFO, *data++);
/* wait for transmission */
while (!((smc911x_reg_read(dev, TX_FIFO_INF) &
TX_FIFO_INF_TSUSED) >> 16));
/* get status. Ignore 'no carrier' error, it has no meaning for
* full duplex operation
*/
status = smc911x_reg_read(dev, TX_STATUS_FIFO) &
(TX_STS_LOC | TX_STS_LATE_COLL | TX_STS_MANY_COLL |
TX_STS_MANY_DEFER | TX_STS_UNDERRUN);
if (!status)
return 0;
printf(DRIVERNAME ": failed to send packet: %s%s%s%s%s\n",
status & TX_STS_LOC ? "TX_STS_LOC " : "",
status & TX_STS_LATE_COLL ? "TX_STS_LATE_COLL " : "",
status & TX_STS_MANY_COLL ? "TX_STS_MANY_COLL " : "",
status & TX_STS_MANY_DEFER ? "TX_STS_MANY_DEFER " : "",
status & TX_STS_UNDERRUN ? "TX_STS_UNDERRUN" : "");
return -1;
}
static void smc911x_halt(struct eth_device *dev)
{
smc911x_reset(dev);
}
static int smc911x_rx(struct eth_device *dev)
{
u32 *data = (u32 *)NetRxPackets[0];
u32 pktlen, tmplen;
u32 status;
if ((smc911x_reg_read(dev, RX_FIFO_INF) & RX_FIFO_INF_RXSUSED) >> 16) {
status = smc911x_reg_read(dev, RX_STATUS_FIFO);
pktlen = (status & RX_STS_PKT_LEN) >> 16;
smc911x_reg_write(dev, RX_CFG, 0);
tmplen = (pktlen + 3) / 4;
while (tmplen--)
*data++ = pkt_data_pull(dev, RX_DATA_FIFO);
if (status & RX_STS_ES)
printf(DRIVERNAME
": dropped bad packet. Status: 0x%08x\n",
status);
else
NetReceive(NetRxPackets[0], pktlen);
}
return 0;
}
#if defined(CONFIG_MII) || defined(CONFIG_CMD_MII)
/* wrapper for smc911x_eth_phy_read */
static int smc911x_miiphy_read(const char *devname, u8 phy, u8 reg, u16 *val)
{
struct eth_device *dev = eth_get_dev_by_name(devname);
if (dev)
return smc911x_eth_phy_read(dev, phy, reg, val);
return -1;
}
/* wrapper for smc911x_eth_phy_write */
static int smc911x_miiphy_write(const char *devname, u8 phy, u8 reg, u16 val)
{
struct eth_device *dev = eth_get_dev_by_name(devname);
if (dev)
return smc911x_eth_phy_write(dev, phy, reg, val);
return -1;
}
#endif
int smc911x_initialize(u8 dev_num, int base_addr)
{
unsigned long addrl, addrh;
struct eth_device *dev;
dev = malloc(sizeof(*dev));
if (!dev) {
return -1;
}
memset(dev, 0, sizeof(*dev));
dev->iobase = base_addr;
/* Try to detect chip. Will fail if not present. */
if (smc911x_detect_chip(dev)) {
free(dev);
return 0;
}
addrh = smc911x_get_mac_csr(dev, ADDRH);
addrl = smc911x_get_mac_csr(dev, ADDRL);
if (!(addrl == 0xffffffff && addrh == 0x0000ffff)) {
/* address is obtained from optional eeprom */
dev->enetaddr[0] = addrl;
dev->enetaddr[1] = addrl >> 8;
dev->enetaddr[2] = addrl >> 16;
dev->enetaddr[3] = addrl >> 24;
dev->enetaddr[4] = addrh;
dev->enetaddr[5] = addrh >> 8;
}
dev->init = smc911x_init;
dev->halt = smc911x_halt;
dev->send = smc911x_send;
dev->recv = smc911x_rx;
sprintf(dev->name, "%s-%hu", DRIVERNAME, dev_num);
eth_register(dev);
#if defined(CONFIG_MII) || defined(CONFIG_CMD_MII)
miiphy_register(dev->name, smc911x_miiphy_read, smc911x_miiphy_write);
#endif
return 1;
}
| gpl-2.0 |
CSE3320/kernel-code | .backup_do_not_remove/drivers/watchdog/cadence_wdt.c | 91 | 12005 | // SPDX-License-Identifier: GPL-2.0+
/*
* Cadence WDT driver - Used by Xilinx Zynq
*
* Copyright (C) 2010 - 2014 Xilinx, Inc.
*
*/
#include <linux/clk.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/irq.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/watchdog.h>
#define CDNS_WDT_DEFAULT_TIMEOUT 10
/* Supports 1 - 516 sec */
#define CDNS_WDT_MIN_TIMEOUT 1
#define CDNS_WDT_MAX_TIMEOUT 516
/* Restart key */
#define CDNS_WDT_RESTART_KEY 0x00001999
/* Counter register access key */
#define CDNS_WDT_REGISTER_ACCESS_KEY 0x00920000
/* Counter value divisor */
#define CDNS_WDT_COUNTER_VALUE_DIVISOR 0x1000
/* Clock prescaler value and selection */
#define CDNS_WDT_PRESCALE_64 64
#define CDNS_WDT_PRESCALE_512 512
#define CDNS_WDT_PRESCALE_4096 4096
#define CDNS_WDT_PRESCALE_SELECT_64 1
#define CDNS_WDT_PRESCALE_SELECT_512 2
#define CDNS_WDT_PRESCALE_SELECT_4096 3
/* Input clock frequency */
#define CDNS_WDT_CLK_10MHZ 10000000
#define CDNS_WDT_CLK_75MHZ 75000000
/* Counter maximum value */
#define CDNS_WDT_COUNTER_MAX 0xFFF
static int wdt_timeout;
static int nowayout = WATCHDOG_NOWAYOUT;
module_param(wdt_timeout, int, 0644);
MODULE_PARM_DESC(wdt_timeout,
"Watchdog time in seconds. (default="
__MODULE_STRING(CDNS_WDT_DEFAULT_TIMEOUT) ")");
module_param(nowayout, int, 0644);
MODULE_PARM_DESC(nowayout,
"Watchdog cannot be stopped once started (default="
__MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
/**
* struct cdns_wdt - Watchdog device structure
* @regs: baseaddress of device
* @rst: reset flag
* @clk: struct clk * of a clock source
* @prescaler: for saving prescaler value
* @ctrl_clksel: counter clock prescaler selection
* @io_lock: spinlock for IO register access
* @cdns_wdt_device: watchdog device structure
*
* Structure containing parameters specific to cadence watchdog.
*/
struct cdns_wdt {
void __iomem *regs;
bool rst;
struct clk *clk;
u32 prescaler;
u32 ctrl_clksel;
spinlock_t io_lock;
struct watchdog_device cdns_wdt_device;
};
/* Write access to Registers */
static inline void cdns_wdt_writereg(struct cdns_wdt *wdt, u32 offset, u32 val)
{
writel_relaxed(val, wdt->regs + offset);
}
/*************************Register Map**************************************/
/* Register Offsets for the WDT */
#define CDNS_WDT_ZMR_OFFSET 0x0 /* Zero Mode Register */
#define CDNS_WDT_CCR_OFFSET 0x4 /* Counter Control Register */
#define CDNS_WDT_RESTART_OFFSET 0x8 /* Restart Register */
#define CDNS_WDT_SR_OFFSET 0xC /* Status Register */
/*
* Zero Mode Register - This register controls how the time out is indicated
* and also contains the access code to allow writes to the register (0xABC).
*/
#define CDNS_WDT_ZMR_WDEN_MASK 0x00000001 /* Enable the WDT */
#define CDNS_WDT_ZMR_RSTEN_MASK 0x00000002 /* Enable the reset output */
#define CDNS_WDT_ZMR_IRQEN_MASK 0x00000004 /* Enable IRQ output */
#define CDNS_WDT_ZMR_RSTLEN_16 0x00000030 /* Reset pulse of 16 pclk cycles */
#define CDNS_WDT_ZMR_ZKEY_VAL 0x00ABC000 /* Access key, 0xABC << 12 */
/*
* Counter Control register - This register controls how fast the timer runs
* and the reset value and also contains the access code to allow writes to
* the register.
*/
#define CDNS_WDT_CCR_CRV_MASK 0x00003FFC /* Counter reset value */
/**
* cdns_wdt_stop - Stop the watchdog.
*
* @wdd: watchdog device
*
* Read the contents of the ZMR register, clear the WDEN bit
* in the register and set the access key for successful write.
*
* Return: always 0
*/
static int cdns_wdt_stop(struct watchdog_device *wdd)
{
struct cdns_wdt *wdt = watchdog_get_drvdata(wdd);
spin_lock(&wdt->io_lock);
cdns_wdt_writereg(wdt, CDNS_WDT_ZMR_OFFSET,
CDNS_WDT_ZMR_ZKEY_VAL & (~CDNS_WDT_ZMR_WDEN_MASK));
spin_unlock(&wdt->io_lock);
return 0;
}
/**
* cdns_wdt_reload - Reload the watchdog timer (i.e. pat the watchdog).
*
* @wdd: watchdog device
*
* Write the restart key value (0x00001999) to the restart register.
*
* Return: always 0
*/
static int cdns_wdt_reload(struct watchdog_device *wdd)
{
struct cdns_wdt *wdt = watchdog_get_drvdata(wdd);
spin_lock(&wdt->io_lock);
cdns_wdt_writereg(wdt, CDNS_WDT_RESTART_OFFSET,
CDNS_WDT_RESTART_KEY);
spin_unlock(&wdt->io_lock);
return 0;
}
/**
* cdns_wdt_start - Enable and start the watchdog.
*
* @wdd: watchdog device
*
* The counter value is calculated according to the formula:
* calculated count = (timeout * clock) / prescaler + 1.
* The calculated count is divided by 0x1000 to obtain the field value
* to write to counter control register.
* Clears the contents of prescaler and counter reset value. Sets the
* prescaler to 4096 and the calculated count and access key
* to write to CCR Register.
* Sets the WDT (WDEN bit) and either the Reset signal(RSTEN bit)
* or Interrupt signal(IRQEN) with a specified cycles and the access
* key to write to ZMR Register.
*
* Return: always 0
*/
static int cdns_wdt_start(struct watchdog_device *wdd)
{
struct cdns_wdt *wdt = watchdog_get_drvdata(wdd);
unsigned int data = 0;
unsigned short count;
unsigned long clock_f = clk_get_rate(wdt->clk);
/*
* Counter value divisor to obtain the value of
* counter reset to be written to control register.
*/
count = (wdd->timeout * (clock_f / wdt->prescaler)) /
CDNS_WDT_COUNTER_VALUE_DIVISOR + 1;
if (count > CDNS_WDT_COUNTER_MAX)
count = CDNS_WDT_COUNTER_MAX;
spin_lock(&wdt->io_lock);
cdns_wdt_writereg(wdt, CDNS_WDT_ZMR_OFFSET,
CDNS_WDT_ZMR_ZKEY_VAL);
count = (count << 2) & CDNS_WDT_CCR_CRV_MASK;
/* Write counter access key first to be able write to register */
data = count | CDNS_WDT_REGISTER_ACCESS_KEY | wdt->ctrl_clksel;
cdns_wdt_writereg(wdt, CDNS_WDT_CCR_OFFSET, data);
data = CDNS_WDT_ZMR_WDEN_MASK | CDNS_WDT_ZMR_RSTLEN_16 |
CDNS_WDT_ZMR_ZKEY_VAL;
/* Reset on timeout if specified in device tree. */
if (wdt->rst) {
data |= CDNS_WDT_ZMR_RSTEN_MASK;
data &= ~CDNS_WDT_ZMR_IRQEN_MASK;
} else {
data &= ~CDNS_WDT_ZMR_RSTEN_MASK;
data |= CDNS_WDT_ZMR_IRQEN_MASK;
}
cdns_wdt_writereg(wdt, CDNS_WDT_ZMR_OFFSET, data);
cdns_wdt_writereg(wdt, CDNS_WDT_RESTART_OFFSET,
CDNS_WDT_RESTART_KEY);
spin_unlock(&wdt->io_lock);
return 0;
}
/**
* cdns_wdt_settimeout - Set a new timeout value for the watchdog device.
*
* @wdd: watchdog device
* @new_time: new timeout value that needs to be set
* Return: 0 on success
*
* Update the watchdog_device timeout with new value which is used when
* cdns_wdt_start is called.
*/
static int cdns_wdt_settimeout(struct watchdog_device *wdd,
unsigned int new_time)
{
wdd->timeout = new_time;
return cdns_wdt_start(wdd);
}
/**
* cdns_wdt_irq_handler - Notifies of watchdog timeout.
*
* @irq: interrupt number
* @dev_id: pointer to a platform device structure
* Return: IRQ_HANDLED
*
* The handler is invoked when the watchdog times out and a
* reset on timeout has not been enabled.
*/
static irqreturn_t cdns_wdt_irq_handler(int irq, void *dev_id)
{
struct platform_device *pdev = dev_id;
dev_info(&pdev->dev,
"Watchdog timed out. Internal reset not enabled\n");
return IRQ_HANDLED;
}
/*
* Info structure used to indicate the features supported by the device
* to the upper layers. This is defined in watchdog.h header file.
*/
static const struct watchdog_info cdns_wdt_info = {
.identity = "cdns_wdt watchdog",
.options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING |
WDIOF_MAGICCLOSE,
};
/* Watchdog Core Ops */
static const struct watchdog_ops cdns_wdt_ops = {
.owner = THIS_MODULE,
.start = cdns_wdt_start,
.stop = cdns_wdt_stop,
.ping = cdns_wdt_reload,
.set_timeout = cdns_wdt_settimeout,
};
static void cdns_clk_disable_unprepare(void *data)
{
clk_disable_unprepare(data);
}
/************************Platform Operations*****************************/
/**
* cdns_wdt_probe - Probe call for the device.
*
* @pdev: handle to the platform device structure.
* Return: 0 on success, negative error otherwise.
*
* It does all the memory allocation and registration for the device.
*/
static int cdns_wdt_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
int ret, irq;
unsigned long clock_f;
struct cdns_wdt *wdt;
struct watchdog_device *cdns_wdt_device;
wdt = devm_kzalloc(dev, sizeof(*wdt), GFP_KERNEL);
if (!wdt)
return -ENOMEM;
cdns_wdt_device = &wdt->cdns_wdt_device;
cdns_wdt_device->info = &cdns_wdt_info;
cdns_wdt_device->ops = &cdns_wdt_ops;
cdns_wdt_device->timeout = CDNS_WDT_DEFAULT_TIMEOUT;
cdns_wdt_device->min_timeout = CDNS_WDT_MIN_TIMEOUT;
cdns_wdt_device->max_timeout = CDNS_WDT_MAX_TIMEOUT;
wdt->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(wdt->regs))
return PTR_ERR(wdt->regs);
/* Register the interrupt */
wdt->rst = of_property_read_bool(dev->of_node, "reset-on-timeout");
irq = platform_get_irq(pdev, 0);
if (!wdt->rst && irq >= 0) {
ret = devm_request_irq(dev, irq, cdns_wdt_irq_handler, 0,
pdev->name, pdev);
if (ret) {
dev_err(dev,
"cannot register interrupt handler err=%d\n",
ret);
return ret;
}
}
/* Initialize the members of cdns_wdt structure */
cdns_wdt_device->parent = dev;
watchdog_init_timeout(cdns_wdt_device, wdt_timeout, dev);
watchdog_set_nowayout(cdns_wdt_device, nowayout);
watchdog_stop_on_reboot(cdns_wdt_device);
watchdog_set_drvdata(cdns_wdt_device, wdt);
wdt->clk = devm_clk_get(dev, NULL);
if (IS_ERR(wdt->clk)) {
ret = PTR_ERR(wdt->clk);
if (ret != -EPROBE_DEFER)
dev_err(dev, "input clock not found\n");
return ret;
}
ret = clk_prepare_enable(wdt->clk);
if (ret) {
dev_err(dev, "unable to enable clock\n");
return ret;
}
ret = devm_add_action_or_reset(dev, cdns_clk_disable_unprepare,
wdt->clk);
if (ret)
return ret;
clock_f = clk_get_rate(wdt->clk);
if (clock_f <= CDNS_WDT_CLK_75MHZ) {
wdt->prescaler = CDNS_WDT_PRESCALE_512;
wdt->ctrl_clksel = CDNS_WDT_PRESCALE_SELECT_512;
} else {
wdt->prescaler = CDNS_WDT_PRESCALE_4096;
wdt->ctrl_clksel = CDNS_WDT_PRESCALE_SELECT_4096;
}
spin_lock_init(&wdt->io_lock);
watchdog_stop_on_reboot(cdns_wdt_device);
watchdog_stop_on_unregister(cdns_wdt_device);
ret = devm_watchdog_register_device(dev, cdns_wdt_device);
if (ret)
return ret;
platform_set_drvdata(pdev, wdt);
dev_info(dev, "Xilinx Watchdog Timer with timeout %ds%s\n",
cdns_wdt_device->timeout, nowayout ? ", nowayout" : "");
return 0;
}
/**
* cdns_wdt_suspend - Stop the device.
*
* @dev: handle to the device structure.
* Return: 0 always.
*/
static int __maybe_unused cdns_wdt_suspend(struct device *dev)
{
struct cdns_wdt *wdt = dev_get_drvdata(dev);
if (watchdog_active(&wdt->cdns_wdt_device)) {
cdns_wdt_stop(&wdt->cdns_wdt_device);
clk_disable_unprepare(wdt->clk);
}
return 0;
}
/**
* cdns_wdt_resume - Resume the device.
*
* @dev: handle to the device structure.
* Return: 0 on success, errno otherwise.
*/
static int __maybe_unused cdns_wdt_resume(struct device *dev)
{
int ret;
struct cdns_wdt *wdt = dev_get_drvdata(dev);
if (watchdog_active(&wdt->cdns_wdt_device)) {
ret = clk_prepare_enable(wdt->clk);
if (ret) {
dev_err(dev, "unable to enable clock\n");
return ret;
}
cdns_wdt_start(&wdt->cdns_wdt_device);
}
return 0;
}
static SIMPLE_DEV_PM_OPS(cdns_wdt_pm_ops, cdns_wdt_suspend, cdns_wdt_resume);
static const struct of_device_id cdns_wdt_of_match[] = {
{ .compatible = "cdns,wdt-r1p2", },
{ /* end of table */ }
};
MODULE_DEVICE_TABLE(of, cdns_wdt_of_match);
/* Driver Structure */
static struct platform_driver cdns_wdt_driver = {
.probe = cdns_wdt_probe,
.driver = {
.name = "cdns-wdt",
.of_match_table = cdns_wdt_of_match,
.pm = &cdns_wdt_pm_ops,
},
};
module_platform_driver(cdns_wdt_driver);
MODULE_AUTHOR("Xilinx, Inc.");
MODULE_DESCRIPTION("Watchdog driver for Cadence WDT");
MODULE_LICENSE("GPL");
| gpl-2.0 |
bftg/gcc-5.3.0 | gcc/testsuite/gcc.target/i386/avx512bw-vpmulhw-1.c | 91 | 1621 | /* { dg-do compile } */
/* { dg-options "-mavx512bw -mavx512vl -O2" } */
/* { dg-final { scan-assembler-times "vpmulhw\[ \\t\]+\[^\{\n\]*%zmm\[0-9\]+\[^\n\]*%zmm\[0-9\]+\[^\n\]*%zmm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
/* { dg-final { scan-assembler-times "vpmulhw\[ \\t\]+\[^\{\n\]*%zmm\[0-9\]+\[^\n\]*%zmm\[0-9\]+\[^\n\]*%zmm\[0-9\]+\{%k\[1-7\]\}\{z\}(?:\n|\[ \\t\]+#)" 1 } } */
/* { dg-final { scan-assembler-times "vpmulhw\[ \\t\]+\[^\{\n\]*%zmm\[0-9\]+\[^\n\]*%zmm\[0-9\]+\[^\n\]*%zmm\[0-9\]+\{%k\[1-7\]\}(?:\n|\[ \\t\]+#)" 1 } } */
/* { dg-final { scan-assembler-times "vpmulhw\[ \\t\]+\[^\{\n\]*%xmm\[0-9\]+\[^\n\]*%xmm\[0-9\]+\[^\n\]*%xmm\[0-9\]+\{%k\[1-7\]\}\{z\}(?:\n|\[ \\t\]+#)" 1 } } */
/* { dg-final { scan-assembler-times "vpmulhw\[ \\t\]+\[^\{\n\]*%xmm\[0-9\]+\[^\n\]*%xmm\[0-9\]+\[^\n\]*%xmm\[0-9\]+\{%k\[1-7\]\}\{z\}(?:\n|\[ \\t\]+#)" 1 } } */
/* { dg-final { scan-assembler-times "vpmulhw\[ \\t\]+\[^\{\n\]*%ymm\[0-9\]+\[^\n\]*%ymm\[0-9\]+\[^\n\]*%ymm\[0-9\]+\{%k\[1-7\]\}(?:\n|\[ \\t\]+#)" 1 } } */
/* { dg-final { scan-assembler-times "vpmulhw\[ \\t\]+\[^\{\n\]*%ymm\[0-9\]+\[^\n\]*%ymm\[0-9\]+\[^\n\]*%ymm\[0-9\]+\{%k\[1-7\]\}(?:\n|\[ \\t\]+#)" 1 } } */
#include <immintrin.h>
volatile __m512i x, y, z;
volatile __m256i xq, yq, zq;
volatile __m128i xw, yw, zw;
void extern
avx512bw_test (void)
{
x = _mm512_mulhi_epi16 (y, z);
x = _mm512_mask_mulhi_epi16 (x, 2, y, z);
x = _mm512_maskz_mulhi_epi16 (2, y, z);
xq = _mm256_mask_mulhi_epi16 (xq, 2, yq, zq);
xq = _mm256_maskz_mulhi_epi16 (2, yq, zq);
xw = _mm_mask_mulhi_epi16 (xw, 2, yw, zw);
xw = _mm_maskz_mulhi_epi16 (2, yw, zw);
}
| gpl-2.0 |
aospan/linux-stable-netup-universal-dvb-1.4 | drivers/video/backlight/adp8870_bl.c | 347 | 26790 | /*
* Backlight driver for Analog Devices ADP8870 Backlight Devices
*
* Copyright 2009-2011 Analog Devices Inc.
*
* Licensed under the GPL-2 or later.
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/pm.h>
#include <linux/platform_device.h>
#include <linux/i2c.h>
#include <linux/fb.h>
#include <linux/backlight.h>
#include <linux/leds.h>
#include <linux/workqueue.h>
#include <linux/slab.h>
#include <linux/i2c/adp8870.h>
#define ADP8870_EXT_FEATURES
#define ADP8870_USE_LEDS
#define ADP8870_MFDVID 0x00 /* Manufacturer and device ID */
#define ADP8870_MDCR 0x01 /* Device mode and status */
#define ADP8870_INT_STAT 0x02 /* Interrupts status */
#define ADP8870_INT_EN 0x03 /* Interrupts enable */
#define ADP8870_CFGR 0x04 /* Configuration register */
#define ADP8870_BLSEL 0x05 /* Sink enable backlight or independent */
#define ADP8870_PWMLED 0x06 /* PWM Enable Selection Register */
#define ADP8870_BLOFF 0x07 /* Backlight off timeout */
#define ADP8870_BLDIM 0x08 /* Backlight dim timeout */
#define ADP8870_BLFR 0x09 /* Backlight fade in and out rates */
#define ADP8870_BLMX1 0x0A /* Backlight (Brightness Level 1-daylight) maximum current */
#define ADP8870_BLDM1 0x0B /* Backlight (Brightness Level 1-daylight) dim current */
#define ADP8870_BLMX2 0x0C /* Backlight (Brightness Level 2-bright) maximum current */
#define ADP8870_BLDM2 0x0D /* Backlight (Brightness Level 2-bright) dim current */
#define ADP8870_BLMX3 0x0E /* Backlight (Brightness Level 3-office) maximum current */
#define ADP8870_BLDM3 0x0F /* Backlight (Brightness Level 3-office) dim current */
#define ADP8870_BLMX4 0x10 /* Backlight (Brightness Level 4-indoor) maximum current */
#define ADP8870_BLDM4 0x11 /* Backlight (Brightness Level 4-indoor) dim current */
#define ADP8870_BLMX5 0x12 /* Backlight (Brightness Level 5-dark) maximum current */
#define ADP8870_BLDM5 0x13 /* Backlight (Brightness Level 5-dark) dim current */
#define ADP8870_ISCLAW 0x1A /* Independent sink current fade law register */
#define ADP8870_ISCC 0x1B /* Independent sink current control register */
#define ADP8870_ISCT1 0x1C /* Independent Sink Current Timer Register LED[7:5] */
#define ADP8870_ISCT2 0x1D /* Independent Sink Current Timer Register LED[4:1] */
#define ADP8870_ISCF 0x1E /* Independent sink current fade register */
#define ADP8870_ISC1 0x1F /* Independent Sink Current LED1 */
#define ADP8870_ISC2 0x20 /* Independent Sink Current LED2 */
#define ADP8870_ISC3 0x21 /* Independent Sink Current LED3 */
#define ADP8870_ISC4 0x22 /* Independent Sink Current LED4 */
#define ADP8870_ISC5 0x23 /* Independent Sink Current LED5 */
#define ADP8870_ISC6 0x24 /* Independent Sink Current LED6 */
#define ADP8870_ISC7 0x25 /* Independent Sink Current LED7 (Brightness Level 1-daylight) */
#define ADP8870_ISC7_L2 0x26 /* Independent Sink Current LED7 (Brightness Level 2-bright) */
#define ADP8870_ISC7_L3 0x27 /* Independent Sink Current LED7 (Brightness Level 3-office) */
#define ADP8870_ISC7_L4 0x28 /* Independent Sink Current LED7 (Brightness Level 4-indoor) */
#define ADP8870_ISC7_L5 0x29 /* Independent Sink Current LED7 (Brightness Level 5-dark) */
#define ADP8870_CMP_CTL 0x2D /* ALS Comparator Control Register */
#define ADP8870_ALS1_EN 0x2E /* Main ALS comparator level enable */
#define ADP8870_ALS2_EN 0x2F /* Second ALS comparator level enable */
#define ADP8870_ALS1_STAT 0x30 /* Main ALS Comparator Status Register */
#define ADP8870_ALS2_STAT 0x31 /* Second ALS Comparator Status Register */
#define ADP8870_L2TRP 0x32 /* L2 comparator reference */
#define ADP8870_L2HYS 0x33 /* L2 hysteresis */
#define ADP8870_L3TRP 0x34 /* L3 comparator reference */
#define ADP8870_L3HYS 0x35 /* L3 hysteresis */
#define ADP8870_L4TRP 0x36 /* L4 comparator reference */
#define ADP8870_L4HYS 0x37 /* L4 hysteresis */
#define ADP8870_L5TRP 0x38 /* L5 comparator reference */
#define ADP8870_L5HYS 0x39 /* L5 hysteresis */
#define ADP8870_PH1LEVL 0x40 /* First phototransistor ambient light level-low byte register */
#define ADP8870_PH1LEVH 0x41 /* First phototransistor ambient light level-high byte register */
#define ADP8870_PH2LEVL 0x42 /* Second phototransistor ambient light level-low byte register */
#define ADP8870_PH2LEVH 0x43 /* Second phototransistor ambient light level-high byte register */
#define ADP8870_MANUFID 0x3 /* Analog Devices AD8870 Manufacturer and device ID */
#define ADP8870_DEVID(x) ((x) & 0xF)
#define ADP8870_MANID(x) ((x) >> 4)
/* MDCR Device mode and status */
#define D7ALSEN (1 << 7)
#define INT_CFG (1 << 6)
#define NSTBY (1 << 5)
#define DIM_EN (1 << 4)
#define GDWN_DIS (1 << 3)
#define SIS_EN (1 << 2)
#define CMP_AUTOEN (1 << 1)
#define BLEN (1 << 0)
/* ADP8870_ALS1_EN Main ALS comparator level enable */
#define L5_EN (1 << 3)
#define L4_EN (1 << 2)
#define L3_EN (1 << 1)
#define L2_EN (1 << 0)
#define CFGR_BLV_SHIFT 3
#define CFGR_BLV_MASK 0x7
#define ADP8870_FLAG_LED_MASK 0xFF
#define FADE_VAL(in, out) ((0xF & (in)) | ((0xF & (out)) << 4))
#define BL_CFGR_VAL(law, blv) ((((blv) & CFGR_BLV_MASK) << CFGR_BLV_SHIFT) | ((0x3 & (law)) << 1))
#define ALS_CMPR_CFG_VAL(filt) ((0x7 & (filt)) << 1)
struct adp8870_bl {
struct i2c_client *client;
struct backlight_device *bl;
struct adp8870_led *led;
struct adp8870_backlight_platform_data *pdata;
struct mutex lock;
unsigned long cached_daylight_max;
int id;
int revid;
int current_brightness;
};
struct adp8870_led {
struct led_classdev cdev;
struct work_struct work;
struct i2c_client *client;
enum led_brightness new_brightness;
int id;
int flags;
};
static int adp8870_read(struct i2c_client *client, int reg, uint8_t *val)
{
int ret;
ret = i2c_smbus_read_byte_data(client, reg);
if (ret < 0) {
dev_err(&client->dev, "failed reading at 0x%02x\n", reg);
return ret;
}
*val = ret;
return 0;
}
static int adp8870_write(struct i2c_client *client, u8 reg, u8 val)
{
int ret = i2c_smbus_write_byte_data(client, reg, val);
if (ret)
dev_err(&client->dev, "failed to write\n");
return ret;
}
static int adp8870_set_bits(struct i2c_client *client, int reg, uint8_t bit_mask)
{
struct adp8870_bl *data = i2c_get_clientdata(client);
uint8_t reg_val;
int ret;
mutex_lock(&data->lock);
ret = adp8870_read(client, reg, ®_val);
if (!ret && ((reg_val & bit_mask) != bit_mask)) {
reg_val |= bit_mask;
ret = adp8870_write(client, reg, reg_val);
}
mutex_unlock(&data->lock);
return ret;
}
static int adp8870_clr_bits(struct i2c_client *client, int reg, uint8_t bit_mask)
{
struct adp8870_bl *data = i2c_get_clientdata(client);
uint8_t reg_val;
int ret;
mutex_lock(&data->lock);
ret = adp8870_read(client, reg, ®_val);
if (!ret && (reg_val & bit_mask)) {
reg_val &= ~bit_mask;
ret = adp8870_write(client, reg, reg_val);
}
mutex_unlock(&data->lock);
return ret;
}
/*
* Independent sink / LED
*/
#if defined(ADP8870_USE_LEDS)
static void adp8870_led_work(struct work_struct *work)
{
struct adp8870_led *led = container_of(work, struct adp8870_led, work);
adp8870_write(led->client, ADP8870_ISC1 + led->id - 1,
led->new_brightness >> 1);
}
static void adp8870_led_set(struct led_classdev *led_cdev,
enum led_brightness value)
{
struct adp8870_led *led;
led = container_of(led_cdev, struct adp8870_led, cdev);
led->new_brightness = value;
/*
* Use workqueue for IO since I2C operations can sleep.
*/
schedule_work(&led->work);
}
static int adp8870_led_setup(struct adp8870_led *led)
{
struct i2c_client *client = led->client;
int ret = 0;
ret = adp8870_write(client, ADP8870_ISC1 + led->id - 1, 0);
if (ret)
return ret;
ret = adp8870_set_bits(client, ADP8870_ISCC, 1 << (led->id - 1));
if (ret)
return ret;
if (led->id > 4)
ret = adp8870_set_bits(client, ADP8870_ISCT1,
(led->flags & 0x3) << ((led->id - 5) * 2));
else
ret = adp8870_set_bits(client, ADP8870_ISCT2,
(led->flags & 0x3) << ((led->id - 1) * 2));
return ret;
}
static int adp8870_led_probe(struct i2c_client *client)
{
struct adp8870_backlight_platform_data *pdata =
dev_get_platdata(&client->dev);
struct adp8870_bl *data = i2c_get_clientdata(client);
struct adp8870_led *led, *led_dat;
struct led_info *cur_led;
int ret, i;
led = devm_kzalloc(&client->dev, pdata->num_leds * sizeof(*led),
GFP_KERNEL);
if (led == NULL)
return -ENOMEM;
ret = adp8870_write(client, ADP8870_ISCLAW, pdata->led_fade_law);
if (ret)
return ret;
ret = adp8870_write(client, ADP8870_ISCT1,
(pdata->led_on_time & 0x3) << 6);
if (ret)
return ret;
ret = adp8870_write(client, ADP8870_ISCF,
FADE_VAL(pdata->led_fade_in, pdata->led_fade_out));
if (ret)
return ret;
for (i = 0; i < pdata->num_leds; ++i) {
cur_led = &pdata->leds[i];
led_dat = &led[i];
led_dat->id = cur_led->flags & ADP8870_FLAG_LED_MASK;
if (led_dat->id > 7 || led_dat->id < 1) {
dev_err(&client->dev, "Invalid LED ID %d\n",
led_dat->id);
ret = -EINVAL;
goto err;
}
if (pdata->bl_led_assign & (1 << (led_dat->id - 1))) {
dev_err(&client->dev, "LED %d used by Backlight\n",
led_dat->id);
ret = -EBUSY;
goto err;
}
led_dat->cdev.name = cur_led->name;
led_dat->cdev.default_trigger = cur_led->default_trigger;
led_dat->cdev.brightness_set = adp8870_led_set;
led_dat->cdev.brightness = LED_OFF;
led_dat->flags = cur_led->flags >> FLAG_OFFT_SHIFT;
led_dat->client = client;
led_dat->new_brightness = LED_OFF;
INIT_WORK(&led_dat->work, adp8870_led_work);
ret = led_classdev_register(&client->dev, &led_dat->cdev);
if (ret) {
dev_err(&client->dev, "failed to register LED %d\n",
led_dat->id);
goto err;
}
ret = adp8870_led_setup(led_dat);
if (ret) {
dev_err(&client->dev, "failed to write\n");
i++;
goto err;
}
}
data->led = led;
return 0;
err:
for (i = i - 1; i >= 0; --i) {
led_classdev_unregister(&led[i].cdev);
cancel_work_sync(&led[i].work);
}
return ret;
}
static int adp8870_led_remove(struct i2c_client *client)
{
struct adp8870_backlight_platform_data *pdata =
dev_get_platdata(&client->dev);
struct adp8870_bl *data = i2c_get_clientdata(client);
int i;
for (i = 0; i < pdata->num_leds; i++) {
led_classdev_unregister(&data->led[i].cdev);
cancel_work_sync(&data->led[i].work);
}
return 0;
}
#else
static int adp8870_led_probe(struct i2c_client *client)
{
return 0;
}
static int adp8870_led_remove(struct i2c_client *client)
{
return 0;
}
#endif
static int adp8870_bl_set(struct backlight_device *bl, int brightness)
{
struct adp8870_bl *data = bl_get_data(bl);
struct i2c_client *client = data->client;
int ret = 0;
if (data->pdata->en_ambl_sens) {
if ((brightness > 0) && (brightness < ADP8870_MAX_BRIGHTNESS)) {
/* Disable Ambient Light auto adjust */
ret = adp8870_clr_bits(client, ADP8870_MDCR,
CMP_AUTOEN);
if (ret)
return ret;
ret = adp8870_write(client, ADP8870_BLMX1, brightness);
if (ret)
return ret;
} else {
/*
* MAX_BRIGHTNESS -> Enable Ambient Light auto adjust
* restore daylight l1 sysfs brightness
*/
ret = adp8870_write(client, ADP8870_BLMX1,
data->cached_daylight_max);
if (ret)
return ret;
ret = adp8870_set_bits(client, ADP8870_MDCR,
CMP_AUTOEN);
if (ret)
return ret;
}
} else {
ret = adp8870_write(client, ADP8870_BLMX1, brightness);
if (ret)
return ret;
}
if (data->current_brightness && brightness == 0)
ret = adp8870_set_bits(client,
ADP8870_MDCR, DIM_EN);
else if (data->current_brightness == 0 && brightness)
ret = adp8870_clr_bits(client,
ADP8870_MDCR, DIM_EN);
if (!ret)
data->current_brightness = brightness;
return ret;
}
static int adp8870_bl_update_status(struct backlight_device *bl)
{
int brightness = bl->props.brightness;
if (bl->props.power != FB_BLANK_UNBLANK)
brightness = 0;
if (bl->props.fb_blank != FB_BLANK_UNBLANK)
brightness = 0;
return adp8870_bl_set(bl, brightness);
}
static int adp8870_bl_get_brightness(struct backlight_device *bl)
{
struct adp8870_bl *data = bl_get_data(bl);
return data->current_brightness;
}
static const struct backlight_ops adp8870_bl_ops = {
.update_status = adp8870_bl_update_status,
.get_brightness = adp8870_bl_get_brightness,
};
static int adp8870_bl_setup(struct backlight_device *bl)
{
struct adp8870_bl *data = bl_get_data(bl);
struct i2c_client *client = data->client;
struct adp8870_backlight_platform_data *pdata = data->pdata;
int ret = 0;
ret = adp8870_write(client, ADP8870_BLSEL, ~pdata->bl_led_assign);
if (ret)
return ret;
ret = adp8870_write(client, ADP8870_PWMLED, pdata->pwm_assign);
if (ret)
return ret;
ret = adp8870_write(client, ADP8870_BLMX1, pdata->l1_daylight_max);
if (ret)
return ret;
ret = adp8870_write(client, ADP8870_BLDM1, pdata->l1_daylight_dim);
if (ret)
return ret;
if (pdata->en_ambl_sens) {
data->cached_daylight_max = pdata->l1_daylight_max;
ret = adp8870_write(client, ADP8870_BLMX2,
pdata->l2_bright_max);
if (ret)
return ret;
ret = adp8870_write(client, ADP8870_BLDM2,
pdata->l2_bright_dim);
if (ret)
return ret;
ret = adp8870_write(client, ADP8870_BLMX3,
pdata->l3_office_max);
if (ret)
return ret;
ret = adp8870_write(client, ADP8870_BLDM3,
pdata->l3_office_dim);
if (ret)
return ret;
ret = adp8870_write(client, ADP8870_BLMX4,
pdata->l4_indoor_max);
if (ret)
return ret;
ret = adp8870_write(client, ADP8870_BLDM4,
pdata->l4_indor_dim);
if (ret)
return ret;
ret = adp8870_write(client, ADP8870_BLMX5,
pdata->l5_dark_max);
if (ret)
return ret;
ret = adp8870_write(client, ADP8870_BLDM5,
pdata->l5_dark_dim);
if (ret)
return ret;
ret = adp8870_write(client, ADP8870_L2TRP, pdata->l2_trip);
if (ret)
return ret;
ret = adp8870_write(client, ADP8870_L2HYS, pdata->l2_hyst);
if (ret)
return ret;
ret = adp8870_write(client, ADP8870_L3TRP, pdata->l3_trip);
if (ret)
return ret;
ret = adp8870_write(client, ADP8870_L3HYS, pdata->l3_hyst);
if (ret)
return ret;
ret = adp8870_write(client, ADP8870_L4TRP, pdata->l4_trip);
if (ret)
return ret;
ret = adp8870_write(client, ADP8870_L4HYS, pdata->l4_hyst);
if (ret)
return ret;
ret = adp8870_write(client, ADP8870_L5TRP, pdata->l5_trip);
if (ret)
return ret;
ret = adp8870_write(client, ADP8870_L5HYS, pdata->l5_hyst);
if (ret)
return ret;
ret = adp8870_write(client, ADP8870_ALS1_EN, L5_EN | L4_EN |
L3_EN | L2_EN);
if (ret)
return ret;
ret = adp8870_write(client, ADP8870_CMP_CTL,
ALS_CMPR_CFG_VAL(pdata->abml_filt));
if (ret)
return ret;
}
ret = adp8870_write(client, ADP8870_CFGR,
BL_CFGR_VAL(pdata->bl_fade_law, 0));
if (ret)
return ret;
ret = adp8870_write(client, ADP8870_BLFR, FADE_VAL(pdata->bl_fade_in,
pdata->bl_fade_out));
if (ret)
return ret;
/*
* ADP8870 Rev0 requires GDWN_DIS bit set
*/
ret = adp8870_set_bits(client, ADP8870_MDCR, BLEN | DIM_EN | NSTBY |
(data->revid == 0 ? GDWN_DIS : 0));
return ret;
}
static ssize_t adp8870_show(struct device *dev, char *buf, int reg)
{
struct adp8870_bl *data = dev_get_drvdata(dev);
int error;
uint8_t reg_val;
mutex_lock(&data->lock);
error = adp8870_read(data->client, reg, ®_val);
mutex_unlock(&data->lock);
if (error < 0)
return error;
return sprintf(buf, "%u\n", reg_val);
}
static ssize_t adp8870_store(struct device *dev, const char *buf,
size_t count, int reg)
{
struct adp8870_bl *data = dev_get_drvdata(dev);
unsigned long val;
int ret;
ret = kstrtoul(buf, 10, &val);
if (ret)
return ret;
mutex_lock(&data->lock);
adp8870_write(data->client, reg, val);
mutex_unlock(&data->lock);
return count;
}
static ssize_t adp8870_bl_l5_dark_max_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
return adp8870_show(dev, buf, ADP8870_BLMX5);
}
static ssize_t adp8870_bl_l5_dark_max_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
return adp8870_store(dev, buf, count, ADP8870_BLMX5);
}
static DEVICE_ATTR(l5_dark_max, 0664, adp8870_bl_l5_dark_max_show,
adp8870_bl_l5_dark_max_store);
static ssize_t adp8870_bl_l4_indoor_max_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
return adp8870_show(dev, buf, ADP8870_BLMX4);
}
static ssize_t adp8870_bl_l4_indoor_max_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
return adp8870_store(dev, buf, count, ADP8870_BLMX4);
}
static DEVICE_ATTR(l4_indoor_max, 0664, adp8870_bl_l4_indoor_max_show,
adp8870_bl_l4_indoor_max_store);
static ssize_t adp8870_bl_l3_office_max_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
return adp8870_show(dev, buf, ADP8870_BLMX3);
}
static ssize_t adp8870_bl_l3_office_max_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
return adp8870_store(dev, buf, count, ADP8870_BLMX3);
}
static DEVICE_ATTR(l3_office_max, 0664, adp8870_bl_l3_office_max_show,
adp8870_bl_l3_office_max_store);
static ssize_t adp8870_bl_l2_bright_max_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
return adp8870_show(dev, buf, ADP8870_BLMX2);
}
static ssize_t adp8870_bl_l2_bright_max_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
return adp8870_store(dev, buf, count, ADP8870_BLMX2);
}
static DEVICE_ATTR(l2_bright_max, 0664, adp8870_bl_l2_bright_max_show,
adp8870_bl_l2_bright_max_store);
static ssize_t adp8870_bl_l1_daylight_max_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
return adp8870_show(dev, buf, ADP8870_BLMX1);
}
static ssize_t adp8870_bl_l1_daylight_max_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct adp8870_bl *data = dev_get_drvdata(dev);
int ret = kstrtoul(buf, 10, &data->cached_daylight_max);
if (ret)
return ret;
return adp8870_store(dev, buf, count, ADP8870_BLMX1);
}
static DEVICE_ATTR(l1_daylight_max, 0664, adp8870_bl_l1_daylight_max_show,
adp8870_bl_l1_daylight_max_store);
static ssize_t adp8870_bl_l5_dark_dim_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
return adp8870_show(dev, buf, ADP8870_BLDM5);
}
static ssize_t adp8870_bl_l5_dark_dim_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
return adp8870_store(dev, buf, count, ADP8870_BLDM5);
}
static DEVICE_ATTR(l5_dark_dim, 0664, adp8870_bl_l5_dark_dim_show,
adp8870_bl_l5_dark_dim_store);
static ssize_t adp8870_bl_l4_indoor_dim_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
return adp8870_show(dev, buf, ADP8870_BLDM4);
}
static ssize_t adp8870_bl_l4_indoor_dim_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
return adp8870_store(dev, buf, count, ADP8870_BLDM4);
}
static DEVICE_ATTR(l4_indoor_dim, 0664, adp8870_bl_l4_indoor_dim_show,
adp8870_bl_l4_indoor_dim_store);
static ssize_t adp8870_bl_l3_office_dim_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
return adp8870_show(dev, buf, ADP8870_BLDM3);
}
static ssize_t adp8870_bl_l3_office_dim_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
return adp8870_store(dev, buf, count, ADP8870_BLDM3);
}
static DEVICE_ATTR(l3_office_dim, 0664, adp8870_bl_l3_office_dim_show,
adp8870_bl_l3_office_dim_store);
static ssize_t adp8870_bl_l2_bright_dim_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
return adp8870_show(dev, buf, ADP8870_BLDM2);
}
static ssize_t adp8870_bl_l2_bright_dim_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
return adp8870_store(dev, buf, count, ADP8870_BLDM2);
}
static DEVICE_ATTR(l2_bright_dim, 0664, adp8870_bl_l2_bright_dim_show,
adp8870_bl_l2_bright_dim_store);
static ssize_t adp8870_bl_l1_daylight_dim_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
return adp8870_show(dev, buf, ADP8870_BLDM1);
}
static ssize_t adp8870_bl_l1_daylight_dim_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
return adp8870_store(dev, buf, count, ADP8870_BLDM1);
}
static DEVICE_ATTR(l1_daylight_dim, 0664, adp8870_bl_l1_daylight_dim_show,
adp8870_bl_l1_daylight_dim_store);
#ifdef ADP8870_EXT_FEATURES
static ssize_t adp8870_bl_ambient_light_level_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct adp8870_bl *data = dev_get_drvdata(dev);
int error;
uint8_t reg_val;
uint16_t ret_val;
mutex_lock(&data->lock);
error = adp8870_read(data->client, ADP8870_PH1LEVL, ®_val);
if (error < 0) {
mutex_unlock(&data->lock);
return error;
}
ret_val = reg_val;
error = adp8870_read(data->client, ADP8870_PH1LEVH, ®_val);
mutex_unlock(&data->lock);
if (error < 0)
return error;
/* Return 13-bit conversion value for the first light sensor */
ret_val += (reg_val & 0x1F) << 8;
return sprintf(buf, "%u\n", ret_val);
}
static DEVICE_ATTR(ambient_light_level, 0444,
adp8870_bl_ambient_light_level_show, NULL);
static ssize_t adp8870_bl_ambient_light_zone_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct adp8870_bl *data = dev_get_drvdata(dev);
int error;
uint8_t reg_val;
mutex_lock(&data->lock);
error = adp8870_read(data->client, ADP8870_CFGR, ®_val);
mutex_unlock(&data->lock);
if (error < 0)
return error;
return sprintf(buf, "%u\n",
((reg_val >> CFGR_BLV_SHIFT) & CFGR_BLV_MASK) + 1);
}
static ssize_t adp8870_bl_ambient_light_zone_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct adp8870_bl *data = dev_get_drvdata(dev);
unsigned long val;
uint8_t reg_val;
int ret;
ret = kstrtoul(buf, 10, &val);
if (ret)
return ret;
if (val == 0) {
/* Enable automatic ambient light sensing */
adp8870_set_bits(data->client, ADP8870_MDCR, CMP_AUTOEN);
} else if ((val > 0) && (val < 6)) {
/* Disable automatic ambient light sensing */
adp8870_clr_bits(data->client, ADP8870_MDCR, CMP_AUTOEN);
/* Set user supplied ambient light zone */
mutex_lock(&data->lock);
ret = adp8870_read(data->client, ADP8870_CFGR, ®_val);
if (!ret) {
reg_val &= ~(CFGR_BLV_MASK << CFGR_BLV_SHIFT);
reg_val |= (val - 1) << CFGR_BLV_SHIFT;
adp8870_write(data->client, ADP8870_CFGR, reg_val);
}
mutex_unlock(&data->lock);
}
return count;
}
static DEVICE_ATTR(ambient_light_zone, 0664,
adp8870_bl_ambient_light_zone_show,
adp8870_bl_ambient_light_zone_store);
#endif
static struct attribute *adp8870_bl_attributes[] = {
&dev_attr_l5_dark_max.attr,
&dev_attr_l5_dark_dim.attr,
&dev_attr_l4_indoor_max.attr,
&dev_attr_l4_indoor_dim.attr,
&dev_attr_l3_office_max.attr,
&dev_attr_l3_office_dim.attr,
&dev_attr_l2_bright_max.attr,
&dev_attr_l2_bright_dim.attr,
&dev_attr_l1_daylight_max.attr,
&dev_attr_l1_daylight_dim.attr,
#ifdef ADP8870_EXT_FEATURES
&dev_attr_ambient_light_level.attr,
&dev_attr_ambient_light_zone.attr,
#endif
NULL
};
static const struct attribute_group adp8870_bl_attr_group = {
.attrs = adp8870_bl_attributes,
};
static int adp8870_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct backlight_properties props;
struct backlight_device *bl;
struct adp8870_bl *data;
struct adp8870_backlight_platform_data *pdata =
dev_get_platdata(&client->dev);
uint8_t reg_val;
int ret;
if (!i2c_check_functionality(client->adapter,
I2C_FUNC_SMBUS_BYTE_DATA)) {
dev_err(&client->dev, "SMBUS Byte Data not Supported\n");
return -EIO;
}
if (!pdata) {
dev_err(&client->dev, "no platform data?\n");
return -EINVAL;
}
ret = adp8870_read(client, ADP8870_MFDVID, ®_val);
if (ret < 0)
return -EIO;
if (ADP8870_MANID(reg_val) != ADP8870_MANUFID) {
dev_err(&client->dev, "failed to probe\n");
return -ENODEV;
}
data = devm_kzalloc(&client->dev, sizeof(*data), GFP_KERNEL);
if (data == NULL)
return -ENOMEM;
data->revid = ADP8870_DEVID(reg_val);
data->client = client;
data->pdata = pdata;
data->id = id->driver_data;
data->current_brightness = 0;
i2c_set_clientdata(client, data);
mutex_init(&data->lock);
memset(&props, 0, sizeof(props));
props.type = BACKLIGHT_RAW;
props.max_brightness = props.brightness = ADP8870_MAX_BRIGHTNESS;
bl = devm_backlight_device_register(&client->dev,
dev_driver_string(&client->dev),
&client->dev, data, &adp8870_bl_ops, &props);
if (IS_ERR(bl)) {
dev_err(&client->dev, "failed to register backlight\n");
return PTR_ERR(bl);
}
data->bl = bl;
if (pdata->en_ambl_sens) {
ret = sysfs_create_group(&bl->dev.kobj,
&adp8870_bl_attr_group);
if (ret) {
dev_err(&client->dev, "failed to register sysfs\n");
return ret;
}
}
ret = adp8870_bl_setup(bl);
if (ret) {
ret = -EIO;
goto out;
}
backlight_update_status(bl);
dev_info(&client->dev, "Rev.%d Backlight\n", data->revid);
if (pdata->num_leds)
adp8870_led_probe(client);
return 0;
out:
if (data->pdata->en_ambl_sens)
sysfs_remove_group(&data->bl->dev.kobj,
&adp8870_bl_attr_group);
return ret;
}
static int adp8870_remove(struct i2c_client *client)
{
struct adp8870_bl *data = i2c_get_clientdata(client);
adp8870_clr_bits(client, ADP8870_MDCR, NSTBY);
if (data->led)
adp8870_led_remove(client);
if (data->pdata->en_ambl_sens)
sysfs_remove_group(&data->bl->dev.kobj,
&adp8870_bl_attr_group);
return 0;
}
#ifdef CONFIG_PM_SLEEP
static int adp8870_i2c_suspend(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
adp8870_clr_bits(client, ADP8870_MDCR, NSTBY);
return 0;
}
static int adp8870_i2c_resume(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
adp8870_set_bits(client, ADP8870_MDCR, NSTBY | BLEN);
return 0;
}
#endif
static SIMPLE_DEV_PM_OPS(adp8870_i2c_pm_ops, adp8870_i2c_suspend,
adp8870_i2c_resume);
static const struct i2c_device_id adp8870_id[] = {
{ "adp8870", 0 },
{ }
};
MODULE_DEVICE_TABLE(i2c, adp8870_id);
static struct i2c_driver adp8870_driver = {
.driver = {
.name = KBUILD_MODNAME,
.pm = &adp8870_i2c_pm_ops,
},
.probe = adp8870_probe,
.remove = adp8870_remove,
.id_table = adp8870_id,
};
module_i2c_driver(adp8870_driver);
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
MODULE_DESCRIPTION("ADP8870 Backlight driver");
| gpl-2.0 |
penhoi/linux-3.13.11 | drivers/staging/comedi/drivers/pcmda12.c | 603 | 4879 | /*
* pcmda12.c
* Driver for Winsystems PC-104 based PCM-D/A-12 8-channel AO board.
*
* COMEDI - Linux Control and Measurement Device Interface
* Copyright (C) 2006 Calin A. Culianu <calin@ajvar.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
/*
* Driver: pcmda12
* Description: A driver for the Winsystems PCM-D/A-12
* Devices: (Winsystems) PCM-D/A-12 [pcmda12]
* Author: Calin Culianu <calin@ajvar.org>
* Updated: Fri, 13 Jan 2006 12:01:01 -0500
* Status: works
*
* A driver for the relatively straightforward-to-program PCM-D/A-12.
* This board doesn't support commands, and the only way to set its
* analog output range is to jumper the board. As such,
* comedi_data_write() ignores the range value specified.
*
* The board uses 16 consecutive I/O addresses starting at the I/O port
* base address. Each address corresponds to the LSB then MSB of a
* particular channel from 0-7.
*
* Note that the board is not ISA-PNP capable and thus needs the I/O
* port comedi_config parameter.
*
* Note that passing a nonzero value as the second config option will
* enable "simultaneous xfer" mode for this board, in which AO writes
* will not take effect until a subsequent read of any AO channel. This
* is so that one can speed up programming by preloading all AO registers
* with values before simultaneously setting them to take effect with one
* read command.
*
* Configuration Options:
* [0] - I/O port base address
* [1] - Do Simultaneous Xfer (see description)
*/
#include <linux/module.h>
#include "../comedidev.h"
/* AI range is not configurable, it's set by jumpers on the board */
static const struct comedi_lrange pcmda12_ranges = {
3, {
UNI_RANGE(5),
UNI_RANGE(10),
BIP_RANGE(5)
}
};
struct pcmda12_private {
unsigned int ao_readback[8];
int simultaneous_xfer_mode;
};
static int pcmda12_ao_insn_write(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned int *data)
{
struct pcmda12_private *devpriv = dev->private;
unsigned int chan = CR_CHAN(insn->chanspec);
unsigned int val = devpriv->ao_readback[chan];
unsigned long ioreg = dev->iobase + (chan * 2);
int i;
for (i = 0; i < insn->n; ++i) {
val = data[i];
outb(val & 0xff, ioreg);
outb((val >> 8) & 0xff, ioreg + 1);
/*
* Initiate transfer if not in simultaneaous xfer
* mode by reading one of the AO registers.
*/
if (!devpriv->simultaneous_xfer_mode)
inb(ioreg);
}
devpriv->ao_readback[chan] = val;
return insn->n;
}
static int pcmda12_ao_insn_read(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned int *data)
{
struct pcmda12_private *devpriv = dev->private;
unsigned int chan = CR_CHAN(insn->chanspec);
int i;
/*
* Initiate simultaneaous xfer mode by reading one of the
* AO registers. All analog outputs will then be updated.
*/
if (devpriv->simultaneous_xfer_mode)
inb(dev->iobase);
for (i = 0; i < insn->n; i++)
data[i] = devpriv->ao_readback[chan];
return insn->n;
}
static void pcmda12_ao_reset(struct comedi_device *dev,
struct comedi_subdevice *s)
{
int i;
for (i = 0; i < s->n_chan; ++i) {
outb(0, dev->iobase + (i * 2));
outb(0, dev->iobase + (i * 2) + 1);
}
/* Initiate transfer by reading one of the AO registers. */
inb(dev->iobase);
}
static int pcmda12_attach(struct comedi_device *dev,
struct comedi_devconfig *it)
{
struct pcmda12_private *devpriv;
struct comedi_subdevice *s;
int ret;
ret = comedi_request_region(dev, it->options[0], 0x10);
if (ret)
return ret;
devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
return -ENOMEM;
devpriv->simultaneous_xfer_mode = it->options[1];
ret = comedi_alloc_subdevices(dev, 1);
if (ret)
return ret;
s = &dev->subdevices[0];
s->type = COMEDI_SUBD_AO;
s->subdev_flags = SDF_READABLE | SDF_WRITABLE;
s->n_chan = 8;
s->maxdata = 0x0fff;
s->range_table = &pcmda12_ranges;
s->insn_write = pcmda12_ao_insn_write;
s->insn_read = pcmda12_ao_insn_read;
pcmda12_ao_reset(dev, s);
return 0;
}
static struct comedi_driver pcmda12_driver = {
.driver_name = "pcmda12",
.module = THIS_MODULE,
.attach = pcmda12_attach,
.detach = comedi_legacy_detach,
};
module_comedi_driver(pcmda12_driver);
MODULE_AUTHOR("Comedi http://www.comedi.org");
MODULE_DESCRIPTION("Comedi low-level driver");
MODULE_LICENSE("GPL");
| gpl-2.0 |
Cold-D/linux | drivers/media/usb/dvb-usb-v2/mxl111sf-i2c.c | 859 | 18614 | /*
* mxl111sf-i2c.c - driver for the MaxLinear MXL111SF
*
* Copyright (C) 2010-2014 Michael Krufky <mkrufky@linuxtv.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include "mxl111sf-i2c.h"
#include "mxl111sf.h"
/* SW-I2C ----------------------------------------------------------------- */
#define SW_I2C_ADDR 0x1a
#define SW_I2C_EN 0x02
#define SW_SCL_OUT 0x04
#define SW_SDA_OUT 0x08
#define SW_SDA_IN 0x04
#define SW_I2C_BUSY_ADDR 0x2f
#define SW_I2C_BUSY 0x02
static int mxl111sf_i2c_bitbang_sendbyte(struct mxl111sf_state *state,
u8 byte)
{
int i, ret;
u8 data = 0;
mxl_i2c("(0x%02x)", byte);
ret = mxl111sf_read_reg(state, SW_I2C_BUSY_ADDR, &data);
if (mxl_fail(ret))
goto fail;
for (i = 0; i < 8; i++) {
data = (byte & (0x80 >> i)) ? SW_SDA_OUT : 0;
ret = mxl111sf_write_reg(state, SW_I2C_ADDR,
0x10 | SW_I2C_EN | data);
if (mxl_fail(ret))
goto fail;
ret = mxl111sf_write_reg(state, SW_I2C_ADDR,
0x10 | SW_I2C_EN | data | SW_SCL_OUT);
if (mxl_fail(ret))
goto fail;
ret = mxl111sf_write_reg(state, SW_I2C_ADDR,
0x10 | SW_I2C_EN | data);
if (mxl_fail(ret))
goto fail;
}
/* last bit was 0 so we need to release SDA */
if (!(byte & 1)) {
ret = mxl111sf_write_reg(state, SW_I2C_ADDR,
0x10 | SW_I2C_EN | SW_SDA_OUT);
if (mxl_fail(ret))
goto fail;
}
/* CLK high for ACK readback */
ret = mxl111sf_write_reg(state, SW_I2C_ADDR,
0x10 | SW_I2C_EN | SW_SCL_OUT | SW_SDA_OUT);
if (mxl_fail(ret))
goto fail;
ret = mxl111sf_read_reg(state, SW_I2C_BUSY_ADDR, &data);
if (mxl_fail(ret))
goto fail;
/* drop the CLK after getting ACK, SDA will go high right away */
ret = mxl111sf_write_reg(state, SW_I2C_ADDR,
0x10 | SW_I2C_EN | SW_SDA_OUT);
if (mxl_fail(ret))
goto fail;
if (data & SW_SDA_IN)
ret = -EIO;
fail:
return ret;
}
static int mxl111sf_i2c_bitbang_recvbyte(struct mxl111sf_state *state,
u8 *pbyte)
{
int i, ret;
u8 byte = 0;
u8 data = 0;
mxl_i2c("()");
*pbyte = 0;
ret = mxl111sf_write_reg(state, SW_I2C_ADDR,
0x10 | SW_I2C_EN | SW_SDA_OUT);
if (mxl_fail(ret))
goto fail;
for (i = 0; i < 8; i++) {
ret = mxl111sf_write_reg(state, SW_I2C_ADDR,
0x10 | SW_I2C_EN |
SW_SCL_OUT | SW_SDA_OUT);
if (mxl_fail(ret))
goto fail;
ret = mxl111sf_read_reg(state, SW_I2C_BUSY_ADDR, &data);
if (mxl_fail(ret))
goto fail;
if (data & SW_SDA_IN)
byte |= (0x80 >> i);
ret = mxl111sf_write_reg(state, SW_I2C_ADDR,
0x10 | SW_I2C_EN | SW_SDA_OUT);
if (mxl_fail(ret))
goto fail;
}
*pbyte = byte;
fail:
return ret;
}
static int mxl111sf_i2c_start(struct mxl111sf_state *state)
{
int ret;
mxl_i2c("()");
ret = mxl111sf_write_reg(state, SW_I2C_ADDR,
0x10 | SW_I2C_EN | SW_SCL_OUT | SW_SDA_OUT);
if (mxl_fail(ret))
goto fail;
ret = mxl111sf_write_reg(state, SW_I2C_ADDR,
0x10 | SW_I2C_EN | SW_SCL_OUT);
if (mxl_fail(ret))
goto fail;
ret = mxl111sf_write_reg(state, SW_I2C_ADDR,
0x10 | SW_I2C_EN); /* start */
mxl_fail(ret);
fail:
return ret;
}
static int mxl111sf_i2c_stop(struct mxl111sf_state *state)
{
int ret;
mxl_i2c("()");
ret = mxl111sf_write_reg(state, SW_I2C_ADDR,
0x10 | SW_I2C_EN); /* stop */
if (mxl_fail(ret))
goto fail;
ret = mxl111sf_write_reg(state, SW_I2C_ADDR,
0x10 | SW_I2C_EN | SW_SCL_OUT);
if (mxl_fail(ret))
goto fail;
ret = mxl111sf_write_reg(state, SW_I2C_ADDR,
0x10 | SW_I2C_EN | SW_SCL_OUT | SW_SDA_OUT);
if (mxl_fail(ret))
goto fail;
ret = mxl111sf_write_reg(state, SW_I2C_ADDR,
0x10 | SW_SCL_OUT | SW_SDA_OUT);
mxl_fail(ret);
fail:
return ret;
}
static int mxl111sf_i2c_ack(struct mxl111sf_state *state)
{
int ret;
u8 b = 0;
mxl_i2c("()");
ret = mxl111sf_read_reg(state, SW_I2C_BUSY_ADDR, &b);
if (mxl_fail(ret))
goto fail;
ret = mxl111sf_write_reg(state, SW_I2C_ADDR,
0x10 | SW_I2C_EN);
if (mxl_fail(ret))
goto fail;
/* pull SDA low */
ret = mxl111sf_write_reg(state, SW_I2C_ADDR,
0x10 | SW_I2C_EN | SW_SCL_OUT);
if (mxl_fail(ret))
goto fail;
ret = mxl111sf_write_reg(state, SW_I2C_ADDR,
0x10 | SW_I2C_EN | SW_SDA_OUT);
mxl_fail(ret);
fail:
return ret;
}
static int mxl111sf_i2c_nack(struct mxl111sf_state *state)
{
int ret;
mxl_i2c("()");
/* SDA high to signal last byte read from slave */
ret = mxl111sf_write_reg(state, SW_I2C_ADDR,
0x10 | SW_I2C_EN | SW_SCL_OUT | SW_SDA_OUT);
if (mxl_fail(ret))
goto fail;
ret = mxl111sf_write_reg(state, SW_I2C_ADDR,
0x10 | SW_I2C_EN | SW_SDA_OUT);
mxl_fail(ret);
fail:
return ret;
}
/* ------------------------------------------------------------------------ */
static int mxl111sf_i2c_sw_xfer_msg(struct mxl111sf_state *state,
struct i2c_msg *msg)
{
int i, ret;
mxl_i2c("()");
if (msg->flags & I2C_M_RD) {
ret = mxl111sf_i2c_start(state);
if (mxl_fail(ret))
goto fail;
ret = mxl111sf_i2c_bitbang_sendbyte(state,
(msg->addr << 1) | 0x01);
if (mxl_fail(ret)) {
mxl111sf_i2c_stop(state);
goto fail;
}
for (i = 0; i < msg->len; i++) {
ret = mxl111sf_i2c_bitbang_recvbyte(state,
&msg->buf[i]);
if (mxl_fail(ret)) {
mxl111sf_i2c_stop(state);
goto fail;
}
if (i < msg->len - 1)
mxl111sf_i2c_ack(state);
}
mxl111sf_i2c_nack(state);
ret = mxl111sf_i2c_stop(state);
if (mxl_fail(ret))
goto fail;
} else {
ret = mxl111sf_i2c_start(state);
if (mxl_fail(ret))
goto fail;
ret = mxl111sf_i2c_bitbang_sendbyte(state,
(msg->addr << 1) & 0xfe);
if (mxl_fail(ret)) {
mxl111sf_i2c_stop(state);
goto fail;
}
for (i = 0; i < msg->len; i++) {
ret = mxl111sf_i2c_bitbang_sendbyte(state,
msg->buf[i]);
if (mxl_fail(ret)) {
mxl111sf_i2c_stop(state);
goto fail;
}
}
/* FIXME: we only want to do this on the last transaction */
mxl111sf_i2c_stop(state);
}
fail:
return ret;
}
/* HW-I2C ----------------------------------------------------------------- */
#define USB_WRITE_I2C_CMD 0x99
#define USB_READ_I2C_CMD 0xdd
#define USB_END_I2C_CMD 0xfe
#define USB_WRITE_I2C_CMD_LEN 26
#define USB_READ_I2C_CMD_LEN 24
#define I2C_MUX_REG 0x30
#define I2C_CONTROL_REG 0x00
#define I2C_SLAVE_ADDR_REG 0x08
#define I2C_DATA_REG 0x0c
#define I2C_INT_STATUS_REG 0x10
static int mxl111sf_i2c_send_data(struct mxl111sf_state *state,
u8 index, u8 *wdata)
{
int ret = mxl111sf_ctrl_msg(state->d, wdata[0],
&wdata[1], 25, NULL, 0);
mxl_fail(ret);
return ret;
}
static int mxl111sf_i2c_get_data(struct mxl111sf_state *state,
u8 index, u8 *wdata, u8 *rdata)
{
int ret = mxl111sf_ctrl_msg(state->d, wdata[0],
&wdata[1], 25, rdata, 24);
mxl_fail(ret);
return ret;
}
static u8 mxl111sf_i2c_check_status(struct mxl111sf_state *state)
{
u8 status = 0;
u8 buf[26];
mxl_i2c_adv("()");
buf[0] = USB_READ_I2C_CMD;
buf[1] = 0x00;
buf[2] = I2C_INT_STATUS_REG;
buf[3] = 0x00;
buf[4] = 0x00;
buf[5] = USB_END_I2C_CMD;
mxl111sf_i2c_get_data(state, 0, buf, buf);
if (buf[1] & 0x04)
status = 1;
return status;
}
static u8 mxl111sf_i2c_check_fifo(struct mxl111sf_state *state)
{
u8 status = 0;
u8 buf[26];
mxl_i2c("()");
buf[0] = USB_READ_I2C_CMD;
buf[1] = 0x00;
buf[2] = I2C_MUX_REG;
buf[3] = 0x00;
buf[4] = 0x00;
buf[5] = I2C_INT_STATUS_REG;
buf[6] = 0x00;
buf[7] = 0x00;
buf[8] = USB_END_I2C_CMD;
mxl111sf_i2c_get_data(state, 0, buf, buf);
if (0x08 == (buf[1] & 0x08))
status = 1;
if ((buf[5] & 0x02) == 0x02)
mxl_i2c("(buf[5] & 0x02) == 0x02"); /* FIXME */
return status;
}
static int mxl111sf_i2c_readagain(struct mxl111sf_state *state,
u8 count, u8 *rbuf)
{
u8 i2c_w_data[26];
u8 i2c_r_data[24];
u8 i = 0;
u8 fifo_status = 0;
int status = 0;
mxl_i2c("read %d bytes", count);
while ((fifo_status == 0) && (i++ < 5))
fifo_status = mxl111sf_i2c_check_fifo(state);
i2c_w_data[0] = 0xDD;
i2c_w_data[1] = 0x00;
for (i = 2; i < 26; i++)
i2c_w_data[i] = 0xFE;
for (i = 0; i < count; i++) {
i2c_w_data[2+(i*3)] = 0x0C;
i2c_w_data[3+(i*3)] = 0x00;
i2c_w_data[4+(i*3)] = 0x00;
}
mxl111sf_i2c_get_data(state, 0, i2c_w_data, i2c_r_data);
/* Check for I2C NACK status */
if (mxl111sf_i2c_check_status(state) == 1) {
mxl_i2c("error!");
} else {
for (i = 0; i < count; i++) {
rbuf[i] = i2c_r_data[(i*3)+1];
mxl_i2c("%02x\t %02x",
i2c_r_data[(i*3)+1],
i2c_r_data[(i*3)+2]);
}
status = 1;
}
return status;
}
#define HWI2C400 1
static int mxl111sf_i2c_hw_xfer_msg(struct mxl111sf_state *state,
struct i2c_msg *msg)
{
int i, k, ret = 0;
u16 index = 0;
u8 buf[26];
u8 i2c_r_data[24];
u16 block_len;
u16 left_over_len;
u8 rd_status[8];
u8 ret_status;
u8 readbuff[26];
mxl_i2c("addr: 0x%02x, read buff len: %d, write buff len: %d",
msg->addr, (msg->flags & I2C_M_RD) ? msg->len : 0,
(!(msg->flags & I2C_M_RD)) ? msg->len : 0);
for (index = 0; index < 26; index++)
buf[index] = USB_END_I2C_CMD;
/* command to indicate data payload is destined for I2C interface */
buf[0] = USB_WRITE_I2C_CMD;
buf[1] = 0x00;
/* enable I2C interface */
buf[2] = I2C_MUX_REG;
buf[3] = 0x80;
buf[4] = 0x00;
/* enable I2C interface */
buf[5] = I2C_MUX_REG;
buf[6] = 0x81;
buf[7] = 0x00;
/* set Timeout register on I2C interface */
buf[8] = 0x14;
buf[9] = 0xff;
buf[10] = 0x00;
#if 0
/* enable Interrupts on I2C interface */
buf[8] = 0x24;
buf[9] = 0xF7;
buf[10] = 0x00;
#endif
buf[11] = 0x24;
buf[12] = 0xF7;
buf[13] = 0x00;
ret = mxl111sf_i2c_send_data(state, 0, buf);
/* write data on I2C bus */
if (!(msg->flags & I2C_M_RD) && (msg->len > 0)) {
mxl_i2c("%d\t%02x", msg->len, msg->buf[0]);
/* control register on I2C interface to initialize I2C bus */
buf[2] = I2C_CONTROL_REG;
buf[3] = 0x5E;
buf[4] = (HWI2C400) ? 0x03 : 0x0D;
/* I2C Slave device Address */
buf[5] = I2C_SLAVE_ADDR_REG;
buf[6] = (msg->addr);
buf[7] = 0x00;
buf[8] = USB_END_I2C_CMD;
ret = mxl111sf_i2c_send_data(state, 0, buf);
/* check for slave device status */
if (mxl111sf_i2c_check_status(state) == 1) {
mxl_i2c("NACK writing slave address %02x",
msg->addr);
/* if NACK, stop I2C bus and exit */
buf[2] = I2C_CONTROL_REG;
buf[3] = 0x4E;
buf[4] = (HWI2C400) ? 0x03 : 0x0D;
ret = -EIO;
goto exit;
}
/* I2C interface can do I2C operations in block of 8 bytes of
I2C data. calculation to figure out number of blocks of i2c
data required to program */
block_len = (msg->len / 8);
left_over_len = (msg->len % 8);
index = 0;
mxl_i2c("block_len %d, left_over_len %d",
block_len, left_over_len);
for (index = 0; index < block_len; index++) {
for (i = 0; i < 8; i++) {
/* write data on I2C interface */
buf[2+(i*3)] = I2C_DATA_REG;
buf[3+(i*3)] = msg->buf[(index*8)+i];
buf[4+(i*3)] = 0x00;
}
ret = mxl111sf_i2c_send_data(state, 0, buf);
/* check for I2C NACK status */
if (mxl111sf_i2c_check_status(state) == 1) {
mxl_i2c("NACK writing slave address %02x",
msg->addr);
/* if NACK, stop I2C bus and exit */
buf[2] = I2C_CONTROL_REG;
buf[3] = 0x4E;
buf[4] = (HWI2C400) ? 0x03 : 0x0D;
ret = -EIO;
goto exit;
}
}
if (left_over_len) {
for (k = 0; k < 26; k++)
buf[k] = USB_END_I2C_CMD;
buf[0] = 0x99;
buf[1] = 0x00;
for (i = 0; i < left_over_len; i++) {
buf[2+(i*3)] = I2C_DATA_REG;
buf[3+(i*3)] = msg->buf[(index*8)+i];
mxl_i2c("index = %d %d data %d",
index, i, msg->buf[(index*8)+i]);
buf[4+(i*3)] = 0x00;
}
ret = mxl111sf_i2c_send_data(state, 0, buf);
/* check for I2C NACK status */
if (mxl111sf_i2c_check_status(state) == 1) {
mxl_i2c("NACK writing slave address %02x",
msg->addr);
/* if NACK, stop I2C bus and exit */
buf[2] = I2C_CONTROL_REG;
buf[3] = 0x4E;
buf[4] = (HWI2C400) ? 0x03 : 0x0D;
ret = -EIO;
goto exit;
}
}
/* issue I2C STOP after write */
buf[2] = I2C_CONTROL_REG;
buf[3] = 0x4E;
buf[4] = (HWI2C400) ? 0x03 : 0x0D;
}
/* read data from I2C bus */
if ((msg->flags & I2C_M_RD) && (msg->len > 0)) {
mxl_i2c("read buf len %d", msg->len);
/* command to indicate data payload is
destined for I2C interface */
buf[2] = I2C_CONTROL_REG;
buf[3] = 0xDF;
buf[4] = (HWI2C400) ? 0x03 : 0x0D;
/* I2C xfer length */
buf[5] = 0x14;
buf[6] = (msg->len & 0xFF);
buf[7] = 0;
/* I2C slave device Address */
buf[8] = I2C_SLAVE_ADDR_REG;
buf[9] = msg->addr;
buf[10] = 0x00;
buf[11] = USB_END_I2C_CMD;
ret = mxl111sf_i2c_send_data(state, 0, buf);
/* check for I2C NACK status */
if (mxl111sf_i2c_check_status(state) == 1) {
mxl_i2c("NACK reading slave address %02x",
msg->addr);
/* if NACK, stop I2C bus and exit */
buf[2] = I2C_CONTROL_REG;
buf[3] = 0xC7;
buf[4] = (HWI2C400) ? 0x03 : 0x0D;
ret = -EIO;
goto exit;
}
/* I2C interface can do I2C operations in block of 8 bytes of
I2C data. calculation to figure out number of blocks of
i2c data required to program */
block_len = ((msg->len) / 8);
left_over_len = ((msg->len) % 8);
index = 0;
mxl_i2c("block_len %d, left_over_len %d",
block_len, left_over_len);
/* command to read data from I2C interface */
buf[0] = USB_READ_I2C_CMD;
buf[1] = 0x00;
for (index = 0; index < block_len; index++) {
/* setup I2C read request packet on I2C interface */
for (i = 0; i < 8; i++) {
buf[2+(i*3)] = I2C_DATA_REG;
buf[3+(i*3)] = 0x00;
buf[4+(i*3)] = 0x00;
}
ret = mxl111sf_i2c_get_data(state, 0, buf, i2c_r_data);
/* check for I2C NACK status */
if (mxl111sf_i2c_check_status(state) == 1) {
mxl_i2c("NACK reading slave address %02x",
msg->addr);
/* if NACK, stop I2C bus and exit */
buf[2] = I2C_CONTROL_REG;
buf[3] = 0xC7;
buf[4] = (HWI2C400) ? 0x03 : 0x0D;
ret = -EIO;
goto exit;
}
/* copy data from i2c data payload to read buffer */
for (i = 0; i < 8; i++) {
rd_status[i] = i2c_r_data[(i*3)+2];
if (rd_status[i] == 0x04) {
if (i < 7) {
mxl_i2c("i2c fifo empty!"
" @ %d", i);
msg->buf[(index*8)+i] =
i2c_r_data[(i*3)+1];
/* read again */
ret_status =
mxl111sf_i2c_readagain(
state, 8-(i+1),
readbuff);
if (ret_status == 1) {
for (k = 0;
k < 8-(i+1);
k++) {
msg->buf[(index*8)+(k+i+1)] =
readbuff[k];
mxl_i2c("read data: %02x\t %02x",
msg->buf[(index*8)+(k+i)],
(index*8)+(k+i));
mxl_i2c("read data: %02x\t %02x",
msg->buf[(index*8)+(k+i+1)],
readbuff[k]);
}
goto stop_copy;
} else {
mxl_i2c("readagain "
"ERROR!");
}
} else {
msg->buf[(index*8)+i] =
i2c_r_data[(i*3)+1];
}
} else {
msg->buf[(index*8)+i] =
i2c_r_data[(i*3)+1];
}
}
stop_copy:
;
}
if (left_over_len) {
for (k = 0; k < 26; k++)
buf[k] = USB_END_I2C_CMD;
buf[0] = 0xDD;
buf[1] = 0x00;
for (i = 0; i < left_over_len; i++) {
buf[2+(i*3)] = I2C_DATA_REG;
buf[3+(i*3)] = 0x00;
buf[4+(i*3)] = 0x00;
}
ret = mxl111sf_i2c_get_data(state, 0, buf,
i2c_r_data);
/* check for I2C NACK status */
if (mxl111sf_i2c_check_status(state) == 1) {
mxl_i2c("NACK reading slave address %02x",
msg->addr);
/* if NACK, stop I2C bus and exit */
buf[2] = I2C_CONTROL_REG;
buf[3] = 0xC7;
buf[4] = (HWI2C400) ? 0x03 : 0x0D;
ret = -EIO;
goto exit;
}
for (i = 0; i < left_over_len; i++) {
msg->buf[(block_len*8)+i] =
i2c_r_data[(i*3)+1];
mxl_i2c("read data: %02x\t %02x",
i2c_r_data[(i*3)+1],
i2c_r_data[(i*3)+2]);
}
}
/* indicate I2C interface to issue NACK
after next I2C read op */
buf[0] = USB_WRITE_I2C_CMD;
buf[1] = 0x00;
/* control register */
buf[2] = I2C_CONTROL_REG;
buf[3] = 0x17;
buf[4] = (HWI2C400) ? 0x03 : 0x0D;
buf[5] = USB_END_I2C_CMD;
ret = mxl111sf_i2c_send_data(state, 0, buf);
/* control register */
buf[2] = I2C_CONTROL_REG;
buf[3] = 0xC7;
buf[4] = (HWI2C400) ? 0x03 : 0x0D;
}
exit:
/* STOP and disable I2C MUX */
buf[0] = USB_WRITE_I2C_CMD;
buf[1] = 0x00;
/* de-initilize I2C BUS */
buf[5] = USB_END_I2C_CMD;
mxl111sf_i2c_send_data(state, 0, buf);
/* Control Register */
buf[2] = I2C_CONTROL_REG;
buf[3] = 0xDF;
buf[4] = 0x03;
/* disable I2C interface */
buf[5] = I2C_MUX_REG;
buf[6] = 0x00;
buf[7] = 0x00;
/* de-initilize I2C BUS */
buf[8] = USB_END_I2C_CMD;
mxl111sf_i2c_send_data(state, 0, buf);
/* disable I2C interface */
buf[2] = I2C_MUX_REG;
buf[3] = 0x81;
buf[4] = 0x00;
/* disable I2C interface */
buf[5] = I2C_MUX_REG;
buf[6] = 0x00;
buf[7] = 0x00;
/* disable I2C interface */
buf[8] = I2C_MUX_REG;
buf[9] = 0x00;
buf[10] = 0x00;
buf[11] = USB_END_I2C_CMD;
mxl111sf_i2c_send_data(state, 0, buf);
return ret;
}
/* ------------------------------------------------------------------------ */
int mxl111sf_i2c_xfer(struct i2c_adapter *adap,
struct i2c_msg msg[], int num)
{
struct dvb_usb_device *d = i2c_get_adapdata(adap);
struct mxl111sf_state *state = d->priv;
int hwi2c = (state->chip_rev > MXL111SF_V6);
int i, ret;
if (mutex_lock_interruptible(&d->i2c_mutex) < 0)
return -EAGAIN;
for (i = 0; i < num; i++) {
ret = (hwi2c) ?
mxl111sf_i2c_hw_xfer_msg(state, &msg[i]) :
mxl111sf_i2c_sw_xfer_msg(state, &msg[i]);
if (mxl_fail(ret)) {
mxl_debug_adv("failed with error %d on i2c "
"transaction %d of %d, %sing %d bytes "
"to/from 0x%02x", ret, i+1, num,
(msg[i].flags & I2C_M_RD) ?
"read" : "writ",
msg[i].len, msg[i].addr);
break;
}
}
mutex_unlock(&d->i2c_mutex);
return i == num ? num : -EREMOTEIO;
}
/*
* Local variables:
* c-basic-offset: 8
* End:
*/
| gpl-2.0 |
akca/android_kernel_xiaomi_msm8996 | drivers/media/usb/dvb-usb-v2/mxl111sf-demod.c | 859 | 15025 | /*
* mxl111sf-demod.c - driver for the MaxLinear MXL111SF DVB-T demodulator
*
* Copyright (C) 2010-2014 Michael Krufky <mkrufky@linuxtv.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include "mxl111sf-demod.h"
#include "mxl111sf-reg.h"
/* debug */
static int mxl111sf_demod_debug;
module_param_named(debug, mxl111sf_demod_debug, int, 0644);
MODULE_PARM_DESC(debug, "set debugging level (1=info (or-able)).");
#define mxl_dbg(fmt, arg...) \
if (mxl111sf_demod_debug) \
mxl_printk(KERN_DEBUG, fmt, ##arg)
/* ------------------------------------------------------------------------ */
struct mxl111sf_demod_state {
struct mxl111sf_state *mxl_state;
struct mxl111sf_demod_config *cfg;
struct dvb_frontend fe;
};
/* ------------------------------------------------------------------------ */
static int mxl111sf_demod_read_reg(struct mxl111sf_demod_state *state,
u8 addr, u8 *data)
{
return (state->cfg->read_reg) ?
state->cfg->read_reg(state->mxl_state, addr, data) :
-EINVAL;
}
static int mxl111sf_demod_write_reg(struct mxl111sf_demod_state *state,
u8 addr, u8 data)
{
return (state->cfg->write_reg) ?
state->cfg->write_reg(state->mxl_state, addr, data) :
-EINVAL;
}
static
int mxl111sf_demod_program_regs(struct mxl111sf_demod_state *state,
struct mxl111sf_reg_ctrl_info *ctrl_reg_info)
{
return (state->cfg->program_regs) ?
state->cfg->program_regs(state->mxl_state, ctrl_reg_info) :
-EINVAL;
}
/* ------------------------------------------------------------------------ */
/* TPS */
static
int mxl1x1sf_demod_get_tps_code_rate(struct mxl111sf_demod_state *state,
fe_code_rate_t *code_rate)
{
u8 val;
int ret = mxl111sf_demod_read_reg(state, V6_CODE_RATE_TPS_REG, &val);
/* bit<2:0> - 000:1/2, 001:2/3, 010:3/4, 011:5/6, 100:7/8 */
if (mxl_fail(ret))
goto fail;
switch (val & V6_CODE_RATE_TPS_MASK) {
case 0:
*code_rate = FEC_1_2;
break;
case 1:
*code_rate = FEC_2_3;
break;
case 2:
*code_rate = FEC_3_4;
break;
case 3:
*code_rate = FEC_5_6;
break;
case 4:
*code_rate = FEC_7_8;
break;
}
fail:
return ret;
}
static
int mxl1x1sf_demod_get_tps_modulation(struct mxl111sf_demod_state *state,
fe_modulation_t *modulation)
{
u8 val;
int ret = mxl111sf_demod_read_reg(state, V6_MODORDER_TPS_REG, &val);
/* Constellation, 00 : QPSK, 01 : 16QAM, 10:64QAM */
if (mxl_fail(ret))
goto fail;
switch ((val & V6_PARAM_CONSTELLATION_MASK) >> 4) {
case 0:
*modulation = QPSK;
break;
case 1:
*modulation = QAM_16;
break;
case 2:
*modulation = QAM_64;
break;
}
fail:
return ret;
}
static
int mxl1x1sf_demod_get_tps_guard_fft_mode(struct mxl111sf_demod_state *state,
fe_transmit_mode_t *fft_mode)
{
u8 val;
int ret = mxl111sf_demod_read_reg(state, V6_MODE_TPS_REG, &val);
/* FFT Mode, 00:2K, 01:8K, 10:4K */
if (mxl_fail(ret))
goto fail;
switch ((val & V6_PARAM_FFT_MODE_MASK) >> 2) {
case 0:
*fft_mode = TRANSMISSION_MODE_2K;
break;
case 1:
*fft_mode = TRANSMISSION_MODE_8K;
break;
case 2:
*fft_mode = TRANSMISSION_MODE_4K;
break;
}
fail:
return ret;
}
static
int mxl1x1sf_demod_get_tps_guard_interval(struct mxl111sf_demod_state *state,
fe_guard_interval_t *guard)
{
u8 val;
int ret = mxl111sf_demod_read_reg(state, V6_CP_TPS_REG, &val);
/* 00:1/32, 01:1/16, 10:1/8, 11:1/4 */
if (mxl_fail(ret))
goto fail;
switch ((val & V6_PARAM_GI_MASK) >> 4) {
case 0:
*guard = GUARD_INTERVAL_1_32;
break;
case 1:
*guard = GUARD_INTERVAL_1_16;
break;
case 2:
*guard = GUARD_INTERVAL_1_8;
break;
case 3:
*guard = GUARD_INTERVAL_1_4;
break;
}
fail:
return ret;
}
static
int mxl1x1sf_demod_get_tps_hierarchy(struct mxl111sf_demod_state *state,
fe_hierarchy_t *hierarchy)
{
u8 val;
int ret = mxl111sf_demod_read_reg(state, V6_TPS_HIERACHY_REG, &val);
/* bit<6:4> - 000:Non hierarchy, 001:1, 010:2, 011:4 */
if (mxl_fail(ret))
goto fail;
switch ((val & V6_TPS_HIERARCHY_INFO_MASK) >> 6) {
case 0:
*hierarchy = HIERARCHY_NONE;
break;
case 1:
*hierarchy = HIERARCHY_1;
break;
case 2:
*hierarchy = HIERARCHY_2;
break;
case 3:
*hierarchy = HIERARCHY_4;
break;
}
fail:
return ret;
}
/* ------------------------------------------------------------------------ */
/* LOCKS */
static
int mxl1x1sf_demod_get_sync_lock_status(struct mxl111sf_demod_state *state,
int *sync_lock)
{
u8 val = 0;
int ret = mxl111sf_demod_read_reg(state, V6_SYNC_LOCK_REG, &val);
if (mxl_fail(ret))
goto fail;
*sync_lock = (val & SYNC_LOCK_MASK) >> 4;
fail:
return ret;
}
static
int mxl1x1sf_demod_get_rs_lock_status(struct mxl111sf_demod_state *state,
int *rs_lock)
{
u8 val = 0;
int ret = mxl111sf_demod_read_reg(state, V6_RS_LOCK_DET_REG, &val);
if (mxl_fail(ret))
goto fail;
*rs_lock = (val & RS_LOCK_DET_MASK) >> 3;
fail:
return ret;
}
static
int mxl1x1sf_demod_get_tps_lock_status(struct mxl111sf_demod_state *state,
int *tps_lock)
{
u8 val = 0;
int ret = mxl111sf_demod_read_reg(state, V6_TPS_LOCK_REG, &val);
if (mxl_fail(ret))
goto fail;
*tps_lock = (val & V6_PARAM_TPS_LOCK_MASK) >> 6;
fail:
return ret;
}
static
int mxl1x1sf_demod_get_fec_lock_status(struct mxl111sf_demod_state *state,
int *fec_lock)
{
u8 val = 0;
int ret = mxl111sf_demod_read_reg(state, V6_IRQ_STATUS_REG, &val);
if (mxl_fail(ret))
goto fail;
*fec_lock = (val & IRQ_MASK_FEC_LOCK) >> 4;
fail:
return ret;
}
#if 0
static
int mxl1x1sf_demod_get_cp_lock_status(struct mxl111sf_demod_state *state,
int *cp_lock)
{
u8 val = 0;
int ret = mxl111sf_demod_read_reg(state, V6_CP_LOCK_DET_REG, &val);
if (mxl_fail(ret))
goto fail;
*cp_lock = (val & V6_CP_LOCK_DET_MASK) >> 2;
fail:
return ret;
}
#endif
static int mxl1x1sf_demod_reset_irq_status(struct mxl111sf_demod_state *state)
{
return mxl111sf_demod_write_reg(state, 0x0e, 0xff);
}
/* ------------------------------------------------------------------------ */
static int mxl111sf_demod_set_frontend(struct dvb_frontend *fe)
{
struct mxl111sf_demod_state *state = fe->demodulator_priv;
int ret = 0;
struct mxl111sf_reg_ctrl_info phy_pll_patch[] = {
{0x00, 0xff, 0x01}, /* change page to 1 */
{0x40, 0xff, 0x05},
{0x40, 0xff, 0x01},
{0x41, 0xff, 0xca},
{0x41, 0xff, 0xc0},
{0x00, 0xff, 0x00}, /* change page to 0 */
{0, 0, 0}
};
mxl_dbg("()");
if (fe->ops.tuner_ops.set_params) {
ret = fe->ops.tuner_ops.set_params(fe);
if (mxl_fail(ret))
goto fail;
msleep(50);
}
ret = mxl111sf_demod_program_regs(state, phy_pll_patch);
mxl_fail(ret);
msleep(50);
ret = mxl1x1sf_demod_reset_irq_status(state);
mxl_fail(ret);
msleep(100);
fail:
return ret;
}
/* ------------------------------------------------------------------------ */
#if 0
/* resets TS Packet error count */
/* After setting 7th bit of V5_PER_COUNT_RESET_REG, it should be reset to 0. */
static
int mxl1x1sf_demod_reset_packet_error_count(struct mxl111sf_demod_state *state)
{
struct mxl111sf_reg_ctrl_info reset_per_count[] = {
{0x20, 0x01, 0x01},
{0x20, 0x01, 0x00},
{0, 0, 0}
};
return mxl111sf_demod_program_regs(state, reset_per_count);
}
#endif
/* returns TS Packet error count */
/* PER Count = FEC_PER_COUNT * (2 ** (FEC_PER_SCALE * 4)) */
static int mxl111sf_demod_read_ucblocks(struct dvb_frontend *fe, u32 *ucblocks)
{
struct mxl111sf_demod_state *state = fe->demodulator_priv;
u32 fec_per_count, fec_per_scale;
u8 val;
int ret;
*ucblocks = 0;
/* FEC_PER_COUNT Register */
ret = mxl111sf_demod_read_reg(state, V6_FEC_PER_COUNT_REG, &val);
if (mxl_fail(ret))
goto fail;
fec_per_count = val;
/* FEC_PER_SCALE Register */
ret = mxl111sf_demod_read_reg(state, V6_FEC_PER_SCALE_REG, &val);
if (mxl_fail(ret))
goto fail;
val &= V6_FEC_PER_SCALE_MASK;
val *= 4;
fec_per_scale = 1 << val;
fec_per_count *= fec_per_scale;
*ucblocks = fec_per_count;
fail:
return ret;
}
#ifdef MXL111SF_DEMOD_ENABLE_CALCULATIONS
/* FIXME: leaving this enabled breaks the build on some architectures,
* and we shouldn't have any floating point math in the kernel, anyway.
*
* These macros need to be re-written, but it's harmless to simply
* return zero for now. */
#define CALCULATE_BER(avg_errors, count) \
((u32)(avg_errors * 4)/(count*64*188*8))
#define CALCULATE_SNR(data) \
((u32)((10 * (u32)data / 64) - 2.5))
#else
#define CALCULATE_BER(avg_errors, count) 0
#define CALCULATE_SNR(data) 0
#endif
static int mxl111sf_demod_read_ber(struct dvb_frontend *fe, u32 *ber)
{
struct mxl111sf_demod_state *state = fe->demodulator_priv;
u8 val1, val2, val3;
int ret;
*ber = 0;
ret = mxl111sf_demod_read_reg(state, V6_RS_AVG_ERRORS_LSB_REG, &val1);
if (mxl_fail(ret))
goto fail;
ret = mxl111sf_demod_read_reg(state, V6_RS_AVG_ERRORS_MSB_REG, &val2);
if (mxl_fail(ret))
goto fail;
ret = mxl111sf_demod_read_reg(state, V6_N_ACCUMULATE_REG, &val3);
if (mxl_fail(ret))
goto fail;
*ber = CALCULATE_BER((val1 | (val2 << 8)), val3);
fail:
return ret;
}
static int mxl111sf_demod_calc_snr(struct mxl111sf_demod_state *state,
u16 *snr)
{
u8 val1, val2;
int ret;
*snr = 0;
ret = mxl111sf_demod_read_reg(state, V6_SNR_RB_LSB_REG, &val1);
if (mxl_fail(ret))
goto fail;
ret = mxl111sf_demod_read_reg(state, V6_SNR_RB_MSB_REG, &val2);
if (mxl_fail(ret))
goto fail;
*snr = CALCULATE_SNR(val1 | ((val2 & 0x03) << 8));
fail:
return ret;
}
static int mxl111sf_demod_read_snr(struct dvb_frontend *fe, u16 *snr)
{
struct mxl111sf_demod_state *state = fe->demodulator_priv;
int ret = mxl111sf_demod_calc_snr(state, snr);
if (mxl_fail(ret))
goto fail;
*snr /= 10; /* 0.1 dB */
fail:
return ret;
}
static int mxl111sf_demod_read_status(struct dvb_frontend *fe,
fe_status_t *status)
{
struct mxl111sf_demod_state *state = fe->demodulator_priv;
int ret, locked, cr_lock, sync_lock, fec_lock;
*status = 0;
ret = mxl1x1sf_demod_get_rs_lock_status(state, &locked);
if (mxl_fail(ret))
goto fail;
ret = mxl1x1sf_demod_get_tps_lock_status(state, &cr_lock);
if (mxl_fail(ret))
goto fail;
ret = mxl1x1sf_demod_get_sync_lock_status(state, &sync_lock);
if (mxl_fail(ret))
goto fail;
ret = mxl1x1sf_demod_get_fec_lock_status(state, &fec_lock);
if (mxl_fail(ret))
goto fail;
if (locked)
*status |= FE_HAS_SIGNAL;
if (cr_lock)
*status |= FE_HAS_CARRIER;
if (sync_lock)
*status |= FE_HAS_SYNC;
if (fec_lock) /* false positives? */
*status |= FE_HAS_VITERBI;
if ((locked) && (cr_lock) && (sync_lock))
*status |= FE_HAS_LOCK;
fail:
return ret;
}
static int mxl111sf_demod_read_signal_strength(struct dvb_frontend *fe,
u16 *signal_strength)
{
struct mxl111sf_demod_state *state = fe->demodulator_priv;
fe_modulation_t modulation;
u16 snr;
mxl111sf_demod_calc_snr(state, &snr);
mxl1x1sf_demod_get_tps_modulation(state, &modulation);
switch (modulation) {
case QPSK:
*signal_strength = (snr >= 1300) ?
min(65535, snr * 44) : snr * 38;
break;
case QAM_16:
*signal_strength = (snr >= 1500) ?
min(65535, snr * 38) : snr * 33;
break;
case QAM_64:
*signal_strength = (snr >= 2000) ?
min(65535, snr * 29) : snr * 25;
break;
default:
*signal_strength = 0;
return -EINVAL;
}
return 0;
}
static int mxl111sf_demod_get_frontend(struct dvb_frontend *fe)
{
struct dtv_frontend_properties *p = &fe->dtv_property_cache;
struct mxl111sf_demod_state *state = fe->demodulator_priv;
mxl_dbg("()");
#if 0
p->inversion = /* FIXME */ ? INVERSION_ON : INVERSION_OFF;
#endif
if (fe->ops.tuner_ops.get_bandwidth)
fe->ops.tuner_ops.get_bandwidth(fe, &p->bandwidth_hz);
if (fe->ops.tuner_ops.get_frequency)
fe->ops.tuner_ops.get_frequency(fe, &p->frequency);
mxl1x1sf_demod_get_tps_code_rate(state, &p->code_rate_HP);
mxl1x1sf_demod_get_tps_code_rate(state, &p->code_rate_LP);
mxl1x1sf_demod_get_tps_modulation(state, &p->modulation);
mxl1x1sf_demod_get_tps_guard_fft_mode(state,
&p->transmission_mode);
mxl1x1sf_demod_get_tps_guard_interval(state,
&p->guard_interval);
mxl1x1sf_demod_get_tps_hierarchy(state,
&p->hierarchy);
return 0;
}
static
int mxl111sf_demod_get_tune_settings(struct dvb_frontend *fe,
struct dvb_frontend_tune_settings *tune)
{
tune->min_delay_ms = 1000;
return 0;
}
static void mxl111sf_demod_release(struct dvb_frontend *fe)
{
struct mxl111sf_demod_state *state = fe->demodulator_priv;
mxl_dbg("()");
kfree(state);
fe->demodulator_priv = NULL;
}
static struct dvb_frontend_ops mxl111sf_demod_ops = {
.delsys = { SYS_DVBT },
.info = {
.name = "MaxLinear MxL111SF DVB-T demodulator",
.frequency_min = 177000000,
.frequency_max = 858000000,
.frequency_stepsize = 166666,
.caps = FE_CAN_FEC_1_2 | FE_CAN_FEC_2_3 | FE_CAN_FEC_3_4 |
FE_CAN_FEC_5_6 | FE_CAN_FEC_7_8 | FE_CAN_FEC_AUTO |
FE_CAN_QPSK | FE_CAN_QAM_16 | FE_CAN_QAM_64 |
FE_CAN_QAM_AUTO |
FE_CAN_HIERARCHY_AUTO | FE_CAN_GUARD_INTERVAL_AUTO |
FE_CAN_TRANSMISSION_MODE_AUTO | FE_CAN_RECOVER
},
.release = mxl111sf_demod_release,
#if 0
.init = mxl111sf_init,
.i2c_gate_ctrl = mxl111sf_i2c_gate_ctrl,
#endif
.set_frontend = mxl111sf_demod_set_frontend,
.get_frontend = mxl111sf_demod_get_frontend,
.get_tune_settings = mxl111sf_demod_get_tune_settings,
.read_status = mxl111sf_demod_read_status,
.read_signal_strength = mxl111sf_demod_read_signal_strength,
.read_ber = mxl111sf_demod_read_ber,
.read_snr = mxl111sf_demod_read_snr,
.read_ucblocks = mxl111sf_demod_read_ucblocks,
};
struct dvb_frontend *mxl111sf_demod_attach(struct mxl111sf_state *mxl_state,
struct mxl111sf_demod_config *cfg)
{
struct mxl111sf_demod_state *state = NULL;
mxl_dbg("()");
state = kzalloc(sizeof(struct mxl111sf_demod_state), GFP_KERNEL);
if (state == NULL)
return NULL;
state->mxl_state = mxl_state;
state->cfg = cfg;
memcpy(&state->fe.ops, &mxl111sf_demod_ops,
sizeof(struct dvb_frontend_ops));
state->fe.demodulator_priv = state;
return &state->fe;
}
EXPORT_SYMBOL_GPL(mxl111sf_demod_attach);
MODULE_DESCRIPTION("MaxLinear MxL111SF DVB-T demodulator driver");
MODULE_AUTHOR("Michael Krufky <mkrufky@linuxtv.org>");
MODULE_LICENSE("GPL");
MODULE_VERSION("0.1");
/*
* Local variables:
* c-basic-offset: 8
* End:
*/
| gpl-2.0 |
bndmag/linux | sound/soc/atmel/atmel-pcm-dma.c | 1115 | 4361 | /*
* atmel-pcm-dma.c -- ALSA PCM DMA support for the Atmel SoC.
*
* Copyright (C) 2012 Atmel
*
* Author: Bo Shen <voice.shen@atmel.com>
*
* Based on atmel-pcm by:
* Sedji Gaouaou <sedji.gaouaou@atmel.com>
* Copyright 2008 Atmel
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
#include <linux/atmel-ssc.h>
#include <linux/platform_data/dma-atmel.h>
#include <sound/core.h>
#include <sound/pcm.h>
#include <sound/pcm_params.h>
#include <sound/soc.h>
#include <sound/dmaengine_pcm.h>
#include "atmel-pcm.h"
/*--------------------------------------------------------------------------*\
* Hardware definition
\*--------------------------------------------------------------------------*/
static const struct snd_pcm_hardware atmel_pcm_dma_hardware = {
.info = SNDRV_PCM_INFO_MMAP |
SNDRV_PCM_INFO_MMAP_VALID |
SNDRV_PCM_INFO_INTERLEAVED |
SNDRV_PCM_INFO_RESUME |
SNDRV_PCM_INFO_PAUSE,
.period_bytes_min = 256, /* lighting DMA overhead */
.period_bytes_max = 2 * 0xffff, /* if 2 bytes format */
.periods_min = 8,
.periods_max = 1024, /* no limit */
.buffer_bytes_max = 512 * 1024,
};
/**
* atmel_pcm_dma_irq: SSC interrupt handler for DMAENGINE enabled SSC
*
* We use DMAENGINE to send/receive data to/from SSC so this ISR is only to
* check if any overrun occured.
*/
static void atmel_pcm_dma_irq(u32 ssc_sr,
struct snd_pcm_substream *substream)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct atmel_pcm_dma_params *prtd;
prtd = snd_soc_dai_get_dma_data(rtd->cpu_dai, substream);
if (ssc_sr & prtd->mask->ssc_error) {
if (snd_pcm_running(substream))
pr_warn("atmel-pcm: buffer %s on %s (SSC_SR=%#x)\n",
substream->stream == SNDRV_PCM_STREAM_PLAYBACK
? "underrun" : "overrun", prtd->name,
ssc_sr);
/* stop RX and capture: will be enabled again at restart */
ssc_writex(prtd->ssc->regs, SSC_CR, prtd->mask->ssc_disable);
snd_pcm_stop_xrun(substream);
/* now drain RHR and read status to remove xrun condition */
ssc_readx(prtd->ssc->regs, SSC_RHR);
ssc_readx(prtd->ssc->regs, SSC_SR);
}
}
static int atmel_pcm_configure_dma(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params, struct dma_slave_config *slave_config)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct atmel_pcm_dma_params *prtd;
struct ssc_device *ssc;
int ret;
prtd = snd_soc_dai_get_dma_data(rtd->cpu_dai, substream);
ssc = prtd->ssc;
ret = snd_hwparams_to_dma_slave_config(substream, params, slave_config);
if (ret) {
pr_err("atmel-pcm: hwparams to dma slave configure failed\n");
return ret;
}
slave_config->dst_addr = ssc->phybase + SSC_THR;
slave_config->dst_maxburst = 1;
slave_config->src_addr = ssc->phybase + SSC_RHR;
slave_config->src_maxburst = 1;
prtd->dma_intr_handler = atmel_pcm_dma_irq;
return 0;
}
static const struct snd_dmaengine_pcm_config atmel_dmaengine_pcm_config = {
.prepare_slave_config = atmel_pcm_configure_dma,
.pcm_hardware = &atmel_pcm_dma_hardware,
.prealloc_buffer_size = 64 * 1024,
};
int atmel_pcm_dma_platform_register(struct device *dev)
{
return snd_dmaengine_pcm_register(dev, &atmel_dmaengine_pcm_config, 0);
}
EXPORT_SYMBOL(atmel_pcm_dma_platform_register);
void atmel_pcm_dma_platform_unregister(struct device *dev)
{
snd_dmaengine_pcm_unregister(dev);
}
EXPORT_SYMBOL(atmel_pcm_dma_platform_unregister);
MODULE_AUTHOR("Bo Shen <voice.shen@atmel.com>");
MODULE_DESCRIPTION("Atmel DMA based PCM module");
MODULE_LICENSE("GPL");
| gpl-2.0 |
Radium-Devices/Radium_shamu | drivers/acpi/acpica/uttrack.c | 2139 | 20188 | /******************************************************************************
*
* Module Name: uttrack - Memory allocation tracking routines (debug only)
*
*****************************************************************************/
/*
* Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions, and the following disclaimer,
* without modification.
* 2. Redistributions in binary form must reproduce at minimum a disclaimer
* substantially similar to the "NO WARRANTY" disclaimer below
* ("Disclaimer") and any redistribution must be conditioned upon
* including a substantially similar Disclaimer requirement for further
* binary redistribution.
* 3. Neither the names of the above-listed copyright holders nor the names
* of any contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* Alternatively, this software may be distributed under the terms of the
* GNU General Public License ("GPL") version 2 as published by the Free
* Software Foundation.
*
* NO WARRANTY
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGES.
*/
/*
* These procedures are used for tracking memory leaks in the subsystem, and
* they get compiled out when the ACPI_DBG_TRACK_ALLOCATIONS is not set.
*
* Each memory allocation is tracked via a doubly linked list. Each
* element contains the caller's component, module name, function name, and
* line number. acpi_ut_allocate and acpi_ut_allocate_zeroed call
* acpi_ut_track_allocation to add an element to the list; deletion
* occurs in the body of acpi_ut_free.
*/
#include <acpi/acpi.h>
#include "accommon.h"
#ifdef ACPI_DBG_TRACK_ALLOCATIONS
#define _COMPONENT ACPI_UTILITIES
ACPI_MODULE_NAME("uttrack")
/* Local prototypes */
static struct acpi_debug_mem_block *acpi_ut_find_allocation(struct
acpi_debug_mem_block
*allocation);
static acpi_status
acpi_ut_track_allocation(struct acpi_debug_mem_block *address,
acpi_size size,
u8 alloc_type,
u32 component, const char *module, u32 line);
static acpi_status
acpi_ut_remove_allocation(struct acpi_debug_mem_block *address,
u32 component, const char *module, u32 line);
/*******************************************************************************
*
* FUNCTION: acpi_ut_create_list
*
* PARAMETERS: cache_name - Ascii name for the cache
* object_size - Size of each cached object
* return_cache - Where the new cache object is returned
*
* RETURN: Status
*
* DESCRIPTION: Create a local memory list for tracking purposed
*
******************************************************************************/
acpi_status
acpi_ut_create_list(char *list_name,
u16 object_size, struct acpi_memory_list **return_cache)
{
struct acpi_memory_list *cache;
cache = acpi_os_allocate(sizeof(struct acpi_memory_list));
if (!cache) {
return (AE_NO_MEMORY);
}
ACPI_MEMSET(cache, 0, sizeof(struct acpi_memory_list));
cache->list_name = list_name;
cache->object_size = object_size;
*return_cache = cache;
return (AE_OK);
}
/*******************************************************************************
*
* FUNCTION: acpi_ut_allocate_and_track
*
* PARAMETERS: size - Size of the allocation
* component - Component type of caller
* module - Source file name of caller
* line - Line number of caller
*
* RETURN: Address of the allocated memory on success, NULL on failure.
*
* DESCRIPTION: The subsystem's equivalent of malloc.
*
******************************************************************************/
void *acpi_ut_allocate_and_track(acpi_size size,
u32 component, const char *module, u32 line)
{
struct acpi_debug_mem_block *allocation;
acpi_status status;
allocation =
acpi_ut_allocate(size + sizeof(struct acpi_debug_mem_header),
component, module, line);
if (!allocation) {
return (NULL);
}
status = acpi_ut_track_allocation(allocation, size,
ACPI_MEM_MALLOC, component, module,
line);
if (ACPI_FAILURE(status)) {
acpi_os_free(allocation);
return (NULL);
}
acpi_gbl_global_list->total_allocated++;
acpi_gbl_global_list->total_size += (u32)size;
acpi_gbl_global_list->current_total_size += (u32)size;
if (acpi_gbl_global_list->current_total_size >
acpi_gbl_global_list->max_occupied) {
acpi_gbl_global_list->max_occupied =
acpi_gbl_global_list->current_total_size;
}
return ((void *)&allocation->user_space);
}
/*******************************************************************************
*
* FUNCTION: acpi_ut_allocate_zeroed_and_track
*
* PARAMETERS: size - Size of the allocation
* component - Component type of caller
* module - Source file name of caller
* line - Line number of caller
*
* RETURN: Address of the allocated memory on success, NULL on failure.
*
* DESCRIPTION: Subsystem equivalent of calloc.
*
******************************************************************************/
void *acpi_ut_allocate_zeroed_and_track(acpi_size size,
u32 component,
const char *module, u32 line)
{
struct acpi_debug_mem_block *allocation;
acpi_status status;
allocation =
acpi_ut_allocate_zeroed(size + sizeof(struct acpi_debug_mem_header),
component, module, line);
if (!allocation) {
/* Report allocation error */
ACPI_ERROR((module, line,
"Could not allocate size %u", (u32)size));
return (NULL);
}
status = acpi_ut_track_allocation(allocation, size,
ACPI_MEM_CALLOC, component, module,
line);
if (ACPI_FAILURE(status)) {
acpi_os_free(allocation);
return (NULL);
}
acpi_gbl_global_list->total_allocated++;
acpi_gbl_global_list->total_size += (u32)size;
acpi_gbl_global_list->current_total_size += (u32)size;
if (acpi_gbl_global_list->current_total_size >
acpi_gbl_global_list->max_occupied) {
acpi_gbl_global_list->max_occupied =
acpi_gbl_global_list->current_total_size;
}
return ((void *)&allocation->user_space);
}
/*******************************************************************************
*
* FUNCTION: acpi_ut_free_and_track
*
* PARAMETERS: allocation - Address of the memory to deallocate
* component - Component type of caller
* module - Source file name of caller
* line - Line number of caller
*
* RETURN: None
*
* DESCRIPTION: Frees the memory at Allocation
*
******************************************************************************/
void
acpi_ut_free_and_track(void *allocation,
u32 component, const char *module, u32 line)
{
struct acpi_debug_mem_block *debug_block;
acpi_status status;
ACPI_FUNCTION_TRACE_PTR(ut_free, allocation);
if (NULL == allocation) {
ACPI_ERROR((module, line, "Attempt to delete a NULL address"));
return_VOID;
}
debug_block = ACPI_CAST_PTR(struct acpi_debug_mem_block,
(((char *)allocation) -
sizeof(struct acpi_debug_mem_header)));
acpi_gbl_global_list->total_freed++;
acpi_gbl_global_list->current_total_size -= debug_block->size;
status = acpi_ut_remove_allocation(debug_block,
component, module, line);
if (ACPI_FAILURE(status)) {
ACPI_EXCEPTION((AE_INFO, status, "Could not free memory"));
}
acpi_os_free(debug_block);
ACPI_DEBUG_PRINT((ACPI_DB_ALLOCATIONS, "%p freed\n", allocation));
return_VOID;
}
/*******************************************************************************
*
* FUNCTION: acpi_ut_find_allocation
*
* PARAMETERS: allocation - Address of allocated memory
*
* RETURN: Three cases:
* 1) List is empty, NULL is returned.
* 2) Element was found. Returns Allocation parameter.
* 3) Element was not found. Returns position where it should be
* inserted into the list.
*
* DESCRIPTION: Searches for an element in the global allocation tracking list.
* If the element is not found, returns the location within the
* list where the element should be inserted.
*
* Note: The list is ordered by larger-to-smaller addresses.
*
* This global list is used to detect memory leaks in ACPICA as
* well as other issues such as an attempt to release the same
* internal object more than once. Although expensive as far
* as cpu time, this list is much more helpful for finding these
* types of issues than using memory leak detectors outside of
* the ACPICA code.
*
******************************************************************************/
static struct acpi_debug_mem_block *acpi_ut_find_allocation(struct
acpi_debug_mem_block
*allocation)
{
struct acpi_debug_mem_block *element;
element = acpi_gbl_global_list->list_head;
if (!element) {
return (NULL);
}
/*
* Search for the address.
*
* Note: List is ordered by larger-to-smaller addresses, on the
* assumption that a new allocation usually has a larger address
* than previous allocations.
*/
while (element > allocation) {
/* Check for end-of-list */
if (!element->next) {
return (element);
}
element = element->next;
}
if (element == allocation) {
return (element);
}
return (element->previous);
}
/*******************************************************************************
*
* FUNCTION: acpi_ut_track_allocation
*
* PARAMETERS: allocation - Address of allocated memory
* size - Size of the allocation
* alloc_type - MEM_MALLOC or MEM_CALLOC
* component - Component type of caller
* module - Source file name of caller
* line - Line number of caller
*
* RETURN: Status
*
* DESCRIPTION: Inserts an element into the global allocation tracking list.
*
******************************************************************************/
static acpi_status
acpi_ut_track_allocation(struct acpi_debug_mem_block *allocation,
acpi_size size,
u8 alloc_type,
u32 component, const char *module, u32 line)
{
struct acpi_memory_list *mem_list;
struct acpi_debug_mem_block *element;
acpi_status status = AE_OK;
ACPI_FUNCTION_TRACE_PTR(ut_track_allocation, allocation);
if (acpi_gbl_disable_mem_tracking) {
return_ACPI_STATUS(AE_OK);
}
mem_list = acpi_gbl_global_list;
status = acpi_ut_acquire_mutex(ACPI_MTX_MEMORY);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
/*
* Search the global list for this address to make sure it is not
* already present. This will catch several kinds of problems.
*/
element = acpi_ut_find_allocation(allocation);
if (element == allocation) {
ACPI_ERROR((AE_INFO,
"UtTrackAllocation: Allocation (%p) already present in global list!",
allocation));
goto unlock_and_exit;
}
/* Fill in the instance data */
allocation->size = (u32)size;
allocation->alloc_type = alloc_type;
allocation->component = component;
allocation->line = line;
ACPI_STRNCPY(allocation->module, module, ACPI_MAX_MODULE_NAME);
allocation->module[ACPI_MAX_MODULE_NAME - 1] = 0;
if (!element) {
/* Insert at list head */
if (mem_list->list_head) {
((struct acpi_debug_mem_block *)(mem_list->list_head))->
previous = allocation;
}
allocation->next = mem_list->list_head;
allocation->previous = NULL;
mem_list->list_head = allocation;
} else {
/* Insert after element */
allocation->next = element->next;
allocation->previous = element;
if (element->next) {
(element->next)->previous = allocation;
}
element->next = allocation;
}
unlock_and_exit:
status = acpi_ut_release_mutex(ACPI_MTX_MEMORY);
return_ACPI_STATUS(status);
}
/*******************************************************************************
*
* FUNCTION: acpi_ut_remove_allocation
*
* PARAMETERS: allocation - Address of allocated memory
* component - Component type of caller
* module - Source file name of caller
* line - Line number of caller
*
* RETURN: Status
*
* DESCRIPTION: Deletes an element from the global allocation tracking list.
*
******************************************************************************/
static acpi_status
acpi_ut_remove_allocation(struct acpi_debug_mem_block *allocation,
u32 component, const char *module, u32 line)
{
struct acpi_memory_list *mem_list;
acpi_status status;
ACPI_FUNCTION_NAME(ut_remove_allocation);
if (acpi_gbl_disable_mem_tracking) {
return (AE_OK);
}
mem_list = acpi_gbl_global_list;
if (NULL == mem_list->list_head) {
/* No allocations! */
ACPI_ERROR((module, line,
"Empty allocation list, nothing to free!"));
return (AE_OK);
}
status = acpi_ut_acquire_mutex(ACPI_MTX_MEMORY);
if (ACPI_FAILURE(status)) {
return (status);
}
/* Unlink */
if (allocation->previous) {
(allocation->previous)->next = allocation->next;
} else {
mem_list->list_head = allocation->next;
}
if (allocation->next) {
(allocation->next)->previous = allocation->previous;
}
ACPI_DEBUG_PRINT((ACPI_DB_ALLOCATIONS, "Freeing %p, size 0%X\n",
&allocation->user_space, allocation->size));
/* Mark the segment as deleted */
ACPI_MEMSET(&allocation->user_space, 0xEA, allocation->size);
status = acpi_ut_release_mutex(ACPI_MTX_MEMORY);
return (status);
}
/*******************************************************************************
*
* FUNCTION: acpi_ut_dump_allocation_info
*
* PARAMETERS: None
*
* RETURN: None
*
* DESCRIPTION: Print some info about the outstanding allocations.
*
******************************************************************************/
void acpi_ut_dump_allocation_info(void)
{
/*
struct acpi_memory_list *mem_list;
*/
ACPI_FUNCTION_TRACE(ut_dump_allocation_info);
/*
ACPI_DEBUG_PRINT (TRACE_ALLOCATIONS | TRACE_TABLES,
("%30s: %4d (%3d Kb)\n", "Current allocations",
mem_list->current_count,
ROUND_UP_TO_1K (mem_list->current_size)));
ACPI_DEBUG_PRINT (TRACE_ALLOCATIONS | TRACE_TABLES,
("%30s: %4d (%3d Kb)\n", "Max concurrent allocations",
mem_list->max_concurrent_count,
ROUND_UP_TO_1K (mem_list->max_concurrent_size)));
ACPI_DEBUG_PRINT (TRACE_ALLOCATIONS | TRACE_TABLES,
("%30s: %4d (%3d Kb)\n", "Total (all) internal objects",
running_object_count,
ROUND_UP_TO_1K (running_object_size)));
ACPI_DEBUG_PRINT (TRACE_ALLOCATIONS | TRACE_TABLES,
("%30s: %4d (%3d Kb)\n", "Total (all) allocations",
running_alloc_count,
ROUND_UP_TO_1K (running_alloc_size)));
ACPI_DEBUG_PRINT (TRACE_ALLOCATIONS | TRACE_TABLES,
("%30s: %4d (%3d Kb)\n", "Current Nodes",
acpi_gbl_current_node_count,
ROUND_UP_TO_1K (acpi_gbl_current_node_size)));
ACPI_DEBUG_PRINT (TRACE_ALLOCATIONS | TRACE_TABLES,
("%30s: %4d (%3d Kb)\n", "Max Nodes",
acpi_gbl_max_concurrent_node_count,
ROUND_UP_TO_1K ((acpi_gbl_max_concurrent_node_count *
sizeof (struct acpi_namespace_node)))));
*/
return_VOID;
}
/*******************************************************************************
*
* FUNCTION: acpi_ut_dump_allocations
*
* PARAMETERS: component - Component(s) to dump info for.
* module - Module to dump info for. NULL means all.
*
* RETURN: None
*
* DESCRIPTION: Print a list of all outstanding allocations.
*
******************************************************************************/
void acpi_ut_dump_allocations(u32 component, const char *module)
{
struct acpi_debug_mem_block *element;
union acpi_descriptor *descriptor;
u32 num_outstanding = 0;
u8 descriptor_type;
ACPI_FUNCTION_TRACE(ut_dump_allocations);
if (acpi_gbl_disable_mem_tracking) {
return_VOID;
}
/*
* Walk the allocation list.
*/
if (ACPI_FAILURE(acpi_ut_acquire_mutex(ACPI_MTX_MEMORY))) {
return_VOID;
}
element = acpi_gbl_global_list->list_head;
while (element) {
if ((element->component & component) &&
((module == NULL)
|| (0 == ACPI_STRCMP(module, element->module)))) {
descriptor =
ACPI_CAST_PTR(union acpi_descriptor,
&element->user_space);
if (element->size <
sizeof(struct acpi_common_descriptor)) {
acpi_os_printf("%p Length 0x%04X %9.9s-%u "
"[Not a Descriptor - too small]\n",
descriptor, element->size,
element->module, element->line);
} else {
/* Ignore allocated objects that are in a cache */
if (ACPI_GET_DESCRIPTOR_TYPE(descriptor) !=
ACPI_DESC_TYPE_CACHED) {
acpi_os_printf
("%p Length 0x%04X %9.9s-%u [%s] ",
descriptor, element->size,
element->module, element->line,
acpi_ut_get_descriptor_name
(descriptor));
/* Validate the descriptor type using Type field and length */
descriptor_type = 0; /* Not a valid descriptor type */
switch (ACPI_GET_DESCRIPTOR_TYPE
(descriptor)) {
case ACPI_DESC_TYPE_OPERAND:
if (element->size ==
sizeof(union
acpi_operand_object))
{
descriptor_type =
ACPI_DESC_TYPE_OPERAND;
}
break;
case ACPI_DESC_TYPE_PARSER:
if (element->size ==
sizeof(union
acpi_parse_object)) {
descriptor_type =
ACPI_DESC_TYPE_PARSER;
}
break;
case ACPI_DESC_TYPE_NAMED:
if (element->size ==
sizeof(struct
acpi_namespace_node))
{
descriptor_type =
ACPI_DESC_TYPE_NAMED;
}
break;
default:
break;
}
/* Display additional info for the major descriptor types */
switch (descriptor_type) {
case ACPI_DESC_TYPE_OPERAND:
acpi_os_printf
("%12.12s RefCount 0x%04X\n",
acpi_ut_get_type_name
(descriptor->object.common.
type),
descriptor->object.common.
reference_count);
break;
case ACPI_DESC_TYPE_PARSER:
acpi_os_printf
("AmlOpcode 0x%04hX\n",
descriptor->op.asl.
aml_opcode);
break;
case ACPI_DESC_TYPE_NAMED:
acpi_os_printf("%4.4s\n",
acpi_ut_get_node_name
(&descriptor->
node));
break;
default:
acpi_os_printf("\n");
break;
}
}
}
num_outstanding++;
}
element = element->next;
}
(void)acpi_ut_release_mutex(ACPI_MTX_MEMORY);
/* Print summary */
if (!num_outstanding) {
ACPI_INFO((AE_INFO, "No outstanding allocations"));
} else {
ACPI_ERROR((AE_INFO, "%u(0x%X) Outstanding allocations",
num_outstanding, num_outstanding));
}
return_VOID;
}
#endif /* ACPI_DBG_TRACK_ALLOCATIONS */
| gpl-2.0 |
Zenfone2-development/android_kernel_asus_moorefield | drivers/video/sis/sis_main.c | 2395 | 187918 | /*
* SiS 300/540/630[S]/730[S],
* SiS 315[E|PRO]/550/[M]65x/[M]66x[F|M|G]X/[M]74x[GX]/330/[M]76x[GX],
* XGI V3XT/V5/V8, Z7
* frame buffer driver for Linux kernels >= 2.4.14 and >=2.6.3
*
* Copyright (C) 2001-2005 Thomas Winischhofer, Vienna, Austria.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the named License,
* or any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA
*
* Author: Thomas Winischhofer <thomas@winischhofer.net>
*
* Author of (practically wiped) code base:
* SiS (www.sis.com)
* Copyright (C) 1999 Silicon Integrated Systems, Inc.
*
* See http://www.winischhofer.net/ for more information and updates
*
* Originally based on the VBE 2.0 compliant graphic boards framebuffer driver,
* which is (c) 1998 Gerd Knorr <kraxel@goldbach.in-berlin.de>
*
*/
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/kernel.h>
#include <linux/spinlock.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/mm.h>
#include <linux/screen_info.h>
#include <linux/slab.h>
#include <linux/fb.h>
#include <linux/selection.h>
#include <linux/ioport.h>
#include <linux/init.h>
#include <linux/pci.h>
#include <linux/vmalloc.h>
#include <linux/capability.h>
#include <linux/fs.h>
#include <linux/types.h>
#include <linux/uaccess.h>
#include <asm/io.h>
#ifdef CONFIG_MTRR
#include <asm/mtrr.h>
#endif
#include "sis.h"
#include "sis_main.h"
#if !defined(CONFIG_FB_SIS_300) && !defined(CONFIG_FB_SIS_315)
#warning Neither CONFIG_FB_SIS_300 nor CONFIG_FB_SIS_315 is set
#warning sisfb will not work!
#endif
static void sisfb_handle_command(struct sis_video_info *ivideo,
struct sisfb_cmd *sisfb_command);
/* ------------------ Internal helper routines ----------------- */
static void __init
sisfb_setdefaultparms(void)
{
sisfb_off = 0;
sisfb_parm_mem = 0;
sisfb_accel = -1;
sisfb_ypan = -1;
sisfb_max = -1;
sisfb_userom = -1;
sisfb_useoem = -1;
sisfb_mode_idx = -1;
sisfb_parm_rate = -1;
sisfb_crt1off = 0;
sisfb_forcecrt1 = -1;
sisfb_crt2type = -1;
sisfb_crt2flags = 0;
sisfb_pdc = 0xff;
sisfb_pdca = 0xff;
sisfb_scalelcd = -1;
sisfb_specialtiming = CUT_NONE;
sisfb_lvdshl = -1;
sisfb_dstn = 0;
sisfb_fstn = 0;
sisfb_tvplug = -1;
sisfb_tvstd = -1;
sisfb_tvxposoffset = 0;
sisfb_tvyposoffset = 0;
sisfb_nocrt2rate = 0;
#if !defined(__i386__) && !defined(__x86_64__)
sisfb_resetcard = 0;
sisfb_videoram = 0;
#endif
}
/* ------------- Parameter parsing -------------- */
static void sisfb_search_vesamode(unsigned int vesamode, bool quiet)
{
int i = 0, j = 0;
/* We don't know the hardware specs yet and there is no ivideo */
if(vesamode == 0) {
if(!quiet)
printk(KERN_ERR "sisfb: Invalid mode. Using default.\n");
sisfb_mode_idx = DEFAULT_MODE;
return;
}
vesamode &= 0x1dff; /* Clean VESA mode number from other flags */
while(sisbios_mode[i++].mode_no[0] != 0) {
if( (sisbios_mode[i-1].vesa_mode_no_1 == vesamode) ||
(sisbios_mode[i-1].vesa_mode_no_2 == vesamode) ) {
if(sisfb_fstn) {
if(sisbios_mode[i-1].mode_no[1] == 0x50 ||
sisbios_mode[i-1].mode_no[1] == 0x56 ||
sisbios_mode[i-1].mode_no[1] == 0x53)
continue;
} else {
if(sisbios_mode[i-1].mode_no[1] == 0x5a ||
sisbios_mode[i-1].mode_no[1] == 0x5b)
continue;
}
sisfb_mode_idx = i - 1;
j = 1;
break;
}
}
if((!j) && !quiet)
printk(KERN_ERR "sisfb: Invalid VESA mode 0x%x'\n", vesamode);
}
static void sisfb_search_mode(char *name, bool quiet)
{
unsigned int j = 0, xres = 0, yres = 0, depth = 0, rate = 0;
int i = 0;
char strbuf[16], strbuf1[20];
char *nameptr = name;
/* We don't know the hardware specs yet and there is no ivideo */
if(name == NULL) {
if(!quiet)
printk(KERN_ERR "sisfb: Internal error, using default mode.\n");
sisfb_mode_idx = DEFAULT_MODE;
return;
}
if(!strnicmp(name, sisbios_mode[MODE_INDEX_NONE].name, strlen(name))) {
if(!quiet)
printk(KERN_ERR "sisfb: Mode 'none' not supported anymore. Using default.\n");
sisfb_mode_idx = DEFAULT_MODE;
return;
}
if(strlen(name) <= 19) {
strcpy(strbuf1, name);
for(i = 0; i < strlen(strbuf1); i++) {
if(strbuf1[i] < '0' || strbuf1[i] > '9') strbuf1[i] = ' ';
}
/* This does some fuzzy mode naming detection */
if(sscanf(strbuf1, "%u %u %u %u", &xres, &yres, &depth, &rate) == 4) {
if((rate <= 32) || (depth > 32)) {
j = rate; rate = depth; depth = j;
}
sprintf(strbuf, "%ux%ux%u", xres, yres, depth);
nameptr = strbuf;
sisfb_parm_rate = rate;
} else if(sscanf(strbuf1, "%u %u %u", &xres, &yres, &depth) == 3) {
sprintf(strbuf, "%ux%ux%u", xres, yres, depth);
nameptr = strbuf;
} else {
xres = 0;
if((sscanf(strbuf1, "%u %u", &xres, &yres) == 2) && (xres != 0)) {
sprintf(strbuf, "%ux%ux8", xres, yres);
nameptr = strbuf;
} else {
sisfb_search_vesamode(simple_strtoul(name, NULL, 0), quiet);
return;
}
}
}
i = 0; j = 0;
while(sisbios_mode[i].mode_no[0] != 0) {
if(!strnicmp(nameptr, sisbios_mode[i++].name, strlen(nameptr))) {
if(sisfb_fstn) {
if(sisbios_mode[i-1].mode_no[1] == 0x50 ||
sisbios_mode[i-1].mode_no[1] == 0x56 ||
sisbios_mode[i-1].mode_no[1] == 0x53)
continue;
} else {
if(sisbios_mode[i-1].mode_no[1] == 0x5a ||
sisbios_mode[i-1].mode_no[1] == 0x5b)
continue;
}
sisfb_mode_idx = i - 1;
j = 1;
break;
}
}
if((!j) && !quiet)
printk(KERN_ERR "sisfb: Invalid mode '%s'\n", nameptr);
}
#ifndef MODULE
static void sisfb_get_vga_mode_from_kernel(void)
{
#ifdef CONFIG_X86
char mymode[32];
int mydepth = screen_info.lfb_depth;
if(screen_info.orig_video_isVGA != VIDEO_TYPE_VLFB) return;
if( (screen_info.lfb_width >= 320) && (screen_info.lfb_width <= 2048) &&
(screen_info.lfb_height >= 200) && (screen_info.lfb_height <= 1536) &&
(mydepth >= 8) && (mydepth <= 32) ) {
if(mydepth == 24) mydepth = 32;
sprintf(mymode, "%ux%ux%u", screen_info.lfb_width,
screen_info.lfb_height,
mydepth);
printk(KERN_DEBUG
"sisfb: Using vga mode %s pre-set by kernel as default\n",
mymode);
sisfb_search_mode(mymode, true);
}
#endif
return;
}
#endif
static void __init
sisfb_search_crt2type(const char *name)
{
int i = 0;
/* We don't know the hardware specs yet and there is no ivideo */
if(name == NULL) return;
while(sis_crt2type[i].type_no != -1) {
if(!strnicmp(name, sis_crt2type[i].name, strlen(sis_crt2type[i].name))) {
sisfb_crt2type = sis_crt2type[i].type_no;
sisfb_tvplug = sis_crt2type[i].tvplug_no;
sisfb_crt2flags = sis_crt2type[i].flags;
break;
}
i++;
}
sisfb_dstn = (sisfb_crt2flags & FL_550_DSTN) ? 1 : 0;
sisfb_fstn = (sisfb_crt2flags & FL_550_FSTN) ? 1 : 0;
if(sisfb_crt2type < 0)
printk(KERN_ERR "sisfb: Invalid CRT2 type: %s\n", name);
}
static void __init
sisfb_search_tvstd(const char *name)
{
int i = 0;
/* We don't know the hardware specs yet and there is no ivideo */
if(name == NULL)
return;
while(sis_tvtype[i].type_no != -1) {
if(!strnicmp(name, sis_tvtype[i].name, strlen(sis_tvtype[i].name))) {
sisfb_tvstd = sis_tvtype[i].type_no;
break;
}
i++;
}
}
static void __init
sisfb_search_specialtiming(const char *name)
{
int i = 0;
bool found = false;
/* We don't know the hardware specs yet and there is no ivideo */
if(name == NULL)
return;
if(!strnicmp(name, "none", 4)) {
sisfb_specialtiming = CUT_FORCENONE;
printk(KERN_DEBUG "sisfb: Special timing disabled\n");
} else {
while(mycustomttable[i].chipID != 0) {
if(!strnicmp(name,mycustomttable[i].optionName,
strlen(mycustomttable[i].optionName))) {
sisfb_specialtiming = mycustomttable[i].SpecialID;
found = true;
printk(KERN_INFO "sisfb: Special timing for %s %s forced (\"%s\")\n",
mycustomttable[i].vendorName,
mycustomttable[i].cardName,
mycustomttable[i].optionName);
break;
}
i++;
}
if(!found) {
printk(KERN_WARNING "sisfb: Invalid SpecialTiming parameter, valid are:");
printk(KERN_WARNING "\t\"none\" (to disable special timings)\n");
i = 0;
while(mycustomttable[i].chipID != 0) {
printk(KERN_WARNING "\t\"%s\" (for %s %s)\n",
mycustomttable[i].optionName,
mycustomttable[i].vendorName,
mycustomttable[i].cardName);
i++;
}
}
}
}
/* ----------- Various detection routines ----------- */
static void sisfb_detect_custom_timing(struct sis_video_info *ivideo)
{
unsigned char *biosver = NULL;
unsigned char *biosdate = NULL;
bool footprint;
u32 chksum = 0;
int i, j;
if(ivideo->SiS_Pr.UseROM) {
biosver = ivideo->SiS_Pr.VirtualRomBase + 0x06;
biosdate = ivideo->SiS_Pr.VirtualRomBase + 0x2c;
for(i = 0; i < 32768; i++)
chksum += ivideo->SiS_Pr.VirtualRomBase[i];
}
i = 0;
do {
if( (mycustomttable[i].chipID == ivideo->chip) &&
((!strlen(mycustomttable[i].biosversion)) ||
(ivideo->SiS_Pr.UseROM &&
(!strncmp(mycustomttable[i].biosversion, biosver,
strlen(mycustomttable[i].biosversion))))) &&
((!strlen(mycustomttable[i].biosdate)) ||
(ivideo->SiS_Pr.UseROM &&
(!strncmp(mycustomttable[i].biosdate, biosdate,
strlen(mycustomttable[i].biosdate))))) &&
((!mycustomttable[i].bioschksum) ||
(ivideo->SiS_Pr.UseROM &&
(mycustomttable[i].bioschksum == chksum))) &&
(mycustomttable[i].pcisubsysvendor == ivideo->subsysvendor) &&
(mycustomttable[i].pcisubsyscard == ivideo->subsysdevice) ) {
footprint = true;
for(j = 0; j < 5; j++) {
if(mycustomttable[i].biosFootprintAddr[j]) {
if(ivideo->SiS_Pr.UseROM) {
if(ivideo->SiS_Pr.VirtualRomBase[mycustomttable[i].biosFootprintAddr[j]] !=
mycustomttable[i].biosFootprintData[j]) {
footprint = false;
}
} else
footprint = false;
}
}
if(footprint) {
ivideo->SiS_Pr.SiS_CustomT = mycustomttable[i].SpecialID;
printk(KERN_DEBUG "sisfb: Identified [%s %s], special timing applies\n",
mycustomttable[i].vendorName,
mycustomttable[i].cardName);
printk(KERN_DEBUG "sisfb: [specialtiming parameter name: %s]\n",
mycustomttable[i].optionName);
break;
}
}
i++;
} while(mycustomttable[i].chipID);
}
static bool sisfb_interpret_edid(struct sisfb_monitor *monitor, u8 *buffer)
{
int i, j, xres, yres, refresh, index;
u32 emodes;
if(buffer[0] != 0x00 || buffer[1] != 0xff ||
buffer[2] != 0xff || buffer[3] != 0xff ||
buffer[4] != 0xff || buffer[5] != 0xff ||
buffer[6] != 0xff || buffer[7] != 0x00) {
printk(KERN_DEBUG "sisfb: Bad EDID header\n");
return false;
}
if(buffer[0x12] != 0x01) {
printk(KERN_INFO "sisfb: EDID version %d not supported\n",
buffer[0x12]);
return false;
}
monitor->feature = buffer[0x18];
if(!(buffer[0x14] & 0x80)) {
if(!(buffer[0x14] & 0x08)) {
printk(KERN_INFO
"sisfb: WARNING: Monitor does not support separate syncs\n");
}
}
if(buffer[0x13] >= 0x01) {
/* EDID V1 rev 1 and 2: Search for monitor descriptor
* to extract ranges
*/
j = 0x36;
for(i=0; i<4; i++) {
if(buffer[j] == 0x00 && buffer[j + 1] == 0x00 &&
buffer[j + 2] == 0x00 && buffer[j + 3] == 0xfd &&
buffer[j + 4] == 0x00) {
monitor->hmin = buffer[j + 7];
monitor->hmax = buffer[j + 8];
monitor->vmin = buffer[j + 5];
monitor->vmax = buffer[j + 6];
monitor->dclockmax = buffer[j + 9] * 10 * 1000;
monitor->datavalid = true;
break;
}
j += 18;
}
}
if(!monitor->datavalid) {
/* Otherwise: Get a range from the list of supported
* Estabished Timings. This is not entirely accurate,
* because fixed frequency monitors are not supported
* that way.
*/
monitor->hmin = 65535; monitor->hmax = 0;
monitor->vmin = 65535; monitor->vmax = 0;
monitor->dclockmax = 0;
emodes = buffer[0x23] | (buffer[0x24] << 8) | (buffer[0x25] << 16);
for(i = 0; i < 13; i++) {
if(emodes & sisfb_ddcsmodes[i].mask) {
if(monitor->hmin > sisfb_ddcsmodes[i].h) monitor->hmin = sisfb_ddcsmodes[i].h;
if(monitor->hmax < sisfb_ddcsmodes[i].h) monitor->hmax = sisfb_ddcsmodes[i].h + 1;
if(monitor->vmin > sisfb_ddcsmodes[i].v) monitor->vmin = sisfb_ddcsmodes[i].v;
if(monitor->vmax < sisfb_ddcsmodes[i].v) monitor->vmax = sisfb_ddcsmodes[i].v;
if(monitor->dclockmax < sisfb_ddcsmodes[i].d) monitor->dclockmax = sisfb_ddcsmodes[i].d;
}
}
index = 0x26;
for(i = 0; i < 8; i++) {
xres = (buffer[index] + 31) * 8;
switch(buffer[index + 1] & 0xc0) {
case 0xc0: yres = (xres * 9) / 16; break;
case 0x80: yres = (xres * 4) / 5; break;
case 0x40: yres = (xres * 3) / 4; break;
default: yres = xres; break;
}
refresh = (buffer[index + 1] & 0x3f) + 60;
if((xres >= 640) && (yres >= 480)) {
for(j = 0; j < 8; j++) {
if((xres == sisfb_ddcfmodes[j].x) &&
(yres == sisfb_ddcfmodes[j].y) &&
(refresh == sisfb_ddcfmodes[j].v)) {
if(monitor->hmin > sisfb_ddcfmodes[j].h) monitor->hmin = sisfb_ddcfmodes[j].h;
if(monitor->hmax < sisfb_ddcfmodes[j].h) monitor->hmax = sisfb_ddcfmodes[j].h + 1;
if(monitor->vmin > sisfb_ddcsmodes[j].v) monitor->vmin = sisfb_ddcsmodes[j].v;
if(monitor->vmax < sisfb_ddcsmodes[j].v) monitor->vmax = sisfb_ddcsmodes[j].v;
if(monitor->dclockmax < sisfb_ddcsmodes[j].d) monitor->dclockmax = sisfb_ddcsmodes[j].d;
}
}
}
index += 2;
}
if((monitor->hmin <= monitor->hmax) && (monitor->vmin <= monitor->vmax)) {
monitor->datavalid = true;
}
}
return monitor->datavalid;
}
static void sisfb_handle_ddc(struct sis_video_info *ivideo,
struct sisfb_monitor *monitor, int crtno)
{
unsigned short temp, i, realcrtno = crtno;
unsigned char buffer[256];
monitor->datavalid = false;
if(crtno) {
if(ivideo->vbflags & CRT2_LCD) realcrtno = 1;
else if(ivideo->vbflags & CRT2_VGA) realcrtno = 2;
else return;
}
if((ivideo->sisfb_crt1off) && (!crtno))
return;
temp = SiS_HandleDDC(&ivideo->SiS_Pr, ivideo->vbflags, ivideo->sisvga_engine,
realcrtno, 0, &buffer[0], ivideo->vbflags2);
if((!temp) || (temp == 0xffff)) {
printk(KERN_INFO "sisfb: CRT%d DDC probing failed\n", crtno + 1);
return;
} else {
printk(KERN_INFO "sisfb: CRT%d DDC supported\n", crtno + 1);
printk(KERN_INFO "sisfb: CRT%d DDC level: %s%s%s%s\n",
crtno + 1,
(temp & 0x1a) ? "" : "[none of the supported]",
(temp & 0x02) ? "2 " : "",
(temp & 0x08) ? "D&P" : "",
(temp & 0x10) ? "FPDI-2" : "");
if(temp & 0x02) {
i = 3; /* Number of retrys */
do {
temp = SiS_HandleDDC(&ivideo->SiS_Pr, ivideo->vbflags, ivideo->sisvga_engine,
realcrtno, 1, &buffer[0], ivideo->vbflags2);
} while((temp) && i--);
if(!temp) {
if(sisfb_interpret_edid(monitor, &buffer[0])) {
printk(KERN_INFO "sisfb: Monitor range H %d-%dKHz, V %d-%dHz, Max. dotclock %dMHz\n",
monitor->hmin, monitor->hmax, monitor->vmin, monitor->vmax,
monitor->dclockmax / 1000);
} else {
printk(KERN_INFO "sisfb: CRT%d DDC EDID corrupt\n", crtno + 1);
}
} else {
printk(KERN_INFO "sisfb: CRT%d DDC reading failed\n", crtno + 1);
}
} else {
printk(KERN_INFO "sisfb: VESA D&P and FPDI-2 not supported yet\n");
}
}
}
/* -------------- Mode validation --------------- */
static bool
sisfb_verify_rate(struct sis_video_info *ivideo, struct sisfb_monitor *monitor,
int mode_idx, int rate_idx, int rate)
{
int htotal, vtotal;
unsigned int dclock, hsync;
if(!monitor->datavalid)
return true;
if(mode_idx < 0)
return false;
/* Skip for 320x200, 320x240, 640x400 */
switch(sisbios_mode[mode_idx].mode_no[ivideo->mni]) {
case 0x59:
case 0x41:
case 0x4f:
case 0x50:
case 0x56:
case 0x53:
case 0x2f:
case 0x5d:
case 0x5e:
return true;
#ifdef CONFIG_FB_SIS_315
case 0x5a:
case 0x5b:
if(ivideo->sisvga_engine == SIS_315_VGA) return true;
#endif
}
if(rate < (monitor->vmin - 1))
return false;
if(rate > (monitor->vmax + 1))
return false;
if(sisfb_gettotalfrommode(&ivideo->SiS_Pr,
sisbios_mode[mode_idx].mode_no[ivideo->mni],
&htotal, &vtotal, rate_idx)) {
dclock = (htotal * vtotal * rate) / 1000;
if(dclock > (monitor->dclockmax + 1000))
return false;
hsync = dclock / htotal;
if(hsync < (monitor->hmin - 1))
return false;
if(hsync > (monitor->hmax + 1))
return false;
} else {
return false;
}
return true;
}
static int
sisfb_validate_mode(struct sis_video_info *ivideo, int myindex, u32 vbflags)
{
u16 xres=0, yres, myres;
#ifdef CONFIG_FB_SIS_300
if(ivideo->sisvga_engine == SIS_300_VGA) {
if(!(sisbios_mode[myindex].chipset & MD_SIS300))
return -1 ;
}
#endif
#ifdef CONFIG_FB_SIS_315
if(ivideo->sisvga_engine == SIS_315_VGA) {
if(!(sisbios_mode[myindex].chipset & MD_SIS315))
return -1;
}
#endif
myres = sisbios_mode[myindex].yres;
switch(vbflags & VB_DISPTYPE_DISP2) {
case CRT2_LCD:
xres = ivideo->lcdxres; yres = ivideo->lcdyres;
if((ivideo->SiS_Pr.SiS_CustomT != CUT_PANEL848) &&
(ivideo->SiS_Pr.SiS_CustomT != CUT_PANEL856)) {
if(sisbios_mode[myindex].xres > xres)
return -1;
if(myres > yres)
return -1;
}
if(ivideo->sisfb_fstn) {
if(sisbios_mode[myindex].xres == 320) {
if(myres == 240) {
switch(sisbios_mode[myindex].mode_no[1]) {
case 0x50: myindex = MODE_FSTN_8; break;
case 0x56: myindex = MODE_FSTN_16; break;
case 0x53: return -1;
}
}
}
}
if(SiS_GetModeID_LCD(ivideo->sisvga_engine, vbflags, sisbios_mode[myindex].xres,
sisbios_mode[myindex].yres, 0, ivideo->sisfb_fstn,
ivideo->SiS_Pr.SiS_CustomT, xres, yres, ivideo->vbflags2) < 0x14) {
return -1;
}
break;
case CRT2_TV:
if(SiS_GetModeID_TV(ivideo->sisvga_engine, vbflags, sisbios_mode[myindex].xres,
sisbios_mode[myindex].yres, 0, ivideo->vbflags2) < 0x14) {
return -1;
}
break;
case CRT2_VGA:
if(SiS_GetModeID_VGA2(ivideo->sisvga_engine, vbflags, sisbios_mode[myindex].xres,
sisbios_mode[myindex].yres, 0, ivideo->vbflags2) < 0x14) {
return -1;
}
break;
}
return myindex;
}
static u8
sisfb_search_refresh_rate(struct sis_video_info *ivideo, unsigned int rate, int mode_idx)
{
int i = 0;
u16 xres = sisbios_mode[mode_idx].xres;
u16 yres = sisbios_mode[mode_idx].yres;
ivideo->rate_idx = 0;
while((sisfb_vrate[i].idx != 0) && (sisfb_vrate[i].xres <= xres)) {
if((sisfb_vrate[i].xres == xres) && (sisfb_vrate[i].yres == yres)) {
if(sisfb_vrate[i].refresh == rate) {
ivideo->rate_idx = sisfb_vrate[i].idx;
break;
} else if(sisfb_vrate[i].refresh > rate) {
if((sisfb_vrate[i].refresh - rate) <= 3) {
DPRINTK("sisfb: Adjusting rate from %d up to %d\n",
rate, sisfb_vrate[i].refresh);
ivideo->rate_idx = sisfb_vrate[i].idx;
ivideo->refresh_rate = sisfb_vrate[i].refresh;
} else if((sisfb_vrate[i].idx != 1) &&
((rate - sisfb_vrate[i-1].refresh) <= 2)) {
DPRINTK("sisfb: Adjusting rate from %d down to %d\n",
rate, sisfb_vrate[i-1].refresh);
ivideo->rate_idx = sisfb_vrate[i-1].idx;
ivideo->refresh_rate = sisfb_vrate[i-1].refresh;
}
break;
} else if((rate - sisfb_vrate[i].refresh) <= 2) {
DPRINTK("sisfb: Adjusting rate from %d down to %d\n",
rate, sisfb_vrate[i].refresh);
ivideo->rate_idx = sisfb_vrate[i].idx;
break;
}
}
i++;
}
if(ivideo->rate_idx > 0) {
return ivideo->rate_idx;
} else {
printk(KERN_INFO "sisfb: Unsupported rate %d for %dx%d\n",
rate, xres, yres);
return 0;
}
}
static bool
sisfb_bridgeisslave(struct sis_video_info *ivideo)
{
unsigned char P1_00;
if(!(ivideo->vbflags2 & VB2_VIDEOBRIDGE))
return false;
P1_00 = SiS_GetReg(SISPART1, 0x00);
if( ((ivideo->sisvga_engine == SIS_300_VGA) && (P1_00 & 0xa0) == 0x20) ||
((ivideo->sisvga_engine == SIS_315_VGA) && (P1_00 & 0x50) == 0x10) ) {
return true;
} else {
return false;
}
}
static bool
sisfballowretracecrt1(struct sis_video_info *ivideo)
{
u8 temp;
temp = SiS_GetReg(SISCR, 0x17);
if(!(temp & 0x80))
return false;
temp = SiS_GetReg(SISSR, 0x1f);
if(temp & 0xc0)
return false;
return true;
}
static bool
sisfbcheckvretracecrt1(struct sis_video_info *ivideo)
{
if(!sisfballowretracecrt1(ivideo))
return false;
if (SiS_GetRegByte(SISINPSTAT) & 0x08)
return true;
else
return false;
}
static void
sisfbwaitretracecrt1(struct sis_video_info *ivideo)
{
int watchdog;
if(!sisfballowretracecrt1(ivideo))
return;
watchdog = 65536;
while ((!(SiS_GetRegByte(SISINPSTAT) & 0x08)) && --watchdog);
watchdog = 65536;
while ((SiS_GetRegByte(SISINPSTAT) & 0x08) && --watchdog);
}
static bool
sisfbcheckvretracecrt2(struct sis_video_info *ivideo)
{
unsigned char temp, reg;
switch(ivideo->sisvga_engine) {
case SIS_300_VGA: reg = 0x25; break;
case SIS_315_VGA: reg = 0x30; break;
default: return false;
}
temp = SiS_GetReg(SISPART1, reg);
if(temp & 0x02)
return true;
else
return false;
}
static bool
sisfb_CheckVBRetrace(struct sis_video_info *ivideo)
{
if(ivideo->currentvbflags & VB_DISPTYPE_DISP2) {
if(!sisfb_bridgeisslave(ivideo)) {
return sisfbcheckvretracecrt2(ivideo);
}
}
return sisfbcheckvretracecrt1(ivideo);
}
static u32
sisfb_setupvbblankflags(struct sis_video_info *ivideo, u32 *vcount, u32 *hcount)
{
u8 idx, reg1, reg2, reg3, reg4;
u32 ret = 0;
(*vcount) = (*hcount) = 0;
if((ivideo->currentvbflags & VB_DISPTYPE_DISP2) && (!(sisfb_bridgeisslave(ivideo)))) {
ret |= (FB_VBLANK_HAVE_VSYNC |
FB_VBLANK_HAVE_HBLANK |
FB_VBLANK_HAVE_VBLANK |
FB_VBLANK_HAVE_VCOUNT |
FB_VBLANK_HAVE_HCOUNT);
switch(ivideo->sisvga_engine) {
case SIS_300_VGA: idx = 0x25; break;
default:
case SIS_315_VGA: idx = 0x30; break;
}
reg1 = SiS_GetReg(SISPART1, (idx+0)); /* 30 */
reg2 = SiS_GetReg(SISPART1, (idx+1)); /* 31 */
reg3 = SiS_GetReg(SISPART1, (idx+2)); /* 32 */
reg4 = SiS_GetReg(SISPART1, (idx+3)); /* 33 */
if(reg1 & 0x01) ret |= FB_VBLANK_VBLANKING;
if(reg1 & 0x02) ret |= FB_VBLANK_VSYNCING;
if(reg4 & 0x80) ret |= FB_VBLANK_HBLANKING;
(*vcount) = reg3 | ((reg4 & 0x70) << 4);
(*hcount) = reg2 | ((reg4 & 0x0f) << 8);
} else if(sisfballowretracecrt1(ivideo)) {
ret |= (FB_VBLANK_HAVE_VSYNC |
FB_VBLANK_HAVE_VBLANK |
FB_VBLANK_HAVE_VCOUNT |
FB_VBLANK_HAVE_HCOUNT);
reg1 = SiS_GetRegByte(SISINPSTAT);
if(reg1 & 0x08) ret |= FB_VBLANK_VSYNCING;
if(reg1 & 0x01) ret |= FB_VBLANK_VBLANKING;
reg1 = SiS_GetReg(SISCR, 0x20);
reg1 = SiS_GetReg(SISCR, 0x1b);
reg2 = SiS_GetReg(SISCR, 0x1c);
reg3 = SiS_GetReg(SISCR, 0x1d);
(*vcount) = reg2 | ((reg3 & 0x07) << 8);
(*hcount) = (reg1 | ((reg3 & 0x10) << 4)) << 3;
}
return ret;
}
static int
sisfb_myblank(struct sis_video_info *ivideo, int blank)
{
u8 sr01, sr11, sr1f, cr63=0, p2_0, p1_13;
bool backlight = true;
switch(blank) {
case FB_BLANK_UNBLANK: /* on */
sr01 = 0x00;
sr11 = 0x00;
sr1f = 0x00;
cr63 = 0x00;
p2_0 = 0x20;
p1_13 = 0x00;
backlight = true;
break;
case FB_BLANK_NORMAL: /* blank */
sr01 = 0x20;
sr11 = 0x00;
sr1f = 0x00;
cr63 = 0x00;
p2_0 = 0x20;
p1_13 = 0x00;
backlight = true;
break;
case FB_BLANK_VSYNC_SUSPEND: /* no vsync */
sr01 = 0x20;
sr11 = 0x08;
sr1f = 0x80;
cr63 = 0x40;
p2_0 = 0x40;
p1_13 = 0x80;
backlight = false;
break;
case FB_BLANK_HSYNC_SUSPEND: /* no hsync */
sr01 = 0x20;
sr11 = 0x08;
sr1f = 0x40;
cr63 = 0x40;
p2_0 = 0x80;
p1_13 = 0x40;
backlight = false;
break;
case FB_BLANK_POWERDOWN: /* off */
sr01 = 0x20;
sr11 = 0x08;
sr1f = 0xc0;
cr63 = 0x40;
p2_0 = 0xc0;
p1_13 = 0xc0;
backlight = false;
break;
default:
return 1;
}
if(ivideo->currentvbflags & VB_DISPTYPE_CRT1) {
if( (!ivideo->sisfb_thismonitor.datavalid) ||
((ivideo->sisfb_thismonitor.datavalid) &&
(ivideo->sisfb_thismonitor.feature & 0xe0))) {
if(ivideo->sisvga_engine == SIS_315_VGA) {
SiS_SetRegANDOR(SISCR, ivideo->SiS_Pr.SiS_MyCR63, 0xbf, cr63);
}
if(!(sisfb_bridgeisslave(ivideo))) {
SiS_SetRegANDOR(SISSR, 0x01, ~0x20, sr01);
SiS_SetRegANDOR(SISSR, 0x1f, 0x3f, sr1f);
}
}
}
if(ivideo->currentvbflags & CRT2_LCD) {
if(ivideo->vbflags2 & VB2_SISLVDSBRIDGE) {
if(backlight) {
SiS_SiS30xBLOn(&ivideo->SiS_Pr);
} else {
SiS_SiS30xBLOff(&ivideo->SiS_Pr);
}
} else if(ivideo->sisvga_engine == SIS_315_VGA) {
#ifdef CONFIG_FB_SIS_315
if(ivideo->vbflags2 & VB2_CHRONTEL) {
if(backlight) {
SiS_Chrontel701xBLOn(&ivideo->SiS_Pr);
} else {
SiS_Chrontel701xBLOff(&ivideo->SiS_Pr);
}
}
#endif
}
if(((ivideo->sisvga_engine == SIS_300_VGA) &&
(ivideo->vbflags2 & (VB2_301|VB2_30xBDH|VB2_LVDS))) ||
((ivideo->sisvga_engine == SIS_315_VGA) &&
((ivideo->vbflags2 & (VB2_LVDS | VB2_CHRONTEL)) == VB2_LVDS))) {
SiS_SetRegANDOR(SISSR, 0x11, ~0x0c, sr11);
}
if(ivideo->sisvga_engine == SIS_300_VGA) {
if((ivideo->vbflags2 & VB2_30xB) &&
(!(ivideo->vbflags2 & VB2_30xBDH))) {
SiS_SetRegANDOR(SISPART1, 0x13, 0x3f, p1_13);
}
} else if(ivideo->sisvga_engine == SIS_315_VGA) {
if((ivideo->vbflags2 & VB2_30xB) &&
(!(ivideo->vbflags2 & VB2_30xBDH))) {
SiS_SetRegANDOR(SISPART2, 0x00, 0x1f, p2_0);
}
}
} else if(ivideo->currentvbflags & CRT2_VGA) {
if(ivideo->vbflags2 & VB2_30xB) {
SiS_SetRegANDOR(SISPART2, 0x00, 0x1f, p2_0);
}
}
return 0;
}
/* ------------- Callbacks from init.c/init301.c -------------- */
#ifdef CONFIG_FB_SIS_300
unsigned int
sisfb_read_nbridge_pci_dword(struct SiS_Private *SiS_Pr, int reg)
{
struct sis_video_info *ivideo = (struct sis_video_info *)SiS_Pr->ivideo;
u32 val = 0;
pci_read_config_dword(ivideo->nbridge, reg, &val);
return (unsigned int)val;
}
void
sisfb_write_nbridge_pci_dword(struct SiS_Private *SiS_Pr, int reg, unsigned int val)
{
struct sis_video_info *ivideo = (struct sis_video_info *)SiS_Pr->ivideo;
pci_write_config_dword(ivideo->nbridge, reg, (u32)val);
}
unsigned int
sisfb_read_lpc_pci_dword(struct SiS_Private *SiS_Pr, int reg)
{
struct sis_video_info *ivideo = (struct sis_video_info *)SiS_Pr->ivideo;
u32 val = 0;
if(!ivideo->lpcdev) return 0;
pci_read_config_dword(ivideo->lpcdev, reg, &val);
return (unsigned int)val;
}
#endif
#ifdef CONFIG_FB_SIS_315
void
sisfb_write_nbridge_pci_byte(struct SiS_Private *SiS_Pr, int reg, unsigned char val)
{
struct sis_video_info *ivideo = (struct sis_video_info *)SiS_Pr->ivideo;
pci_write_config_byte(ivideo->nbridge, reg, (u8)val);
}
unsigned int
sisfb_read_mio_pci_word(struct SiS_Private *SiS_Pr, int reg)
{
struct sis_video_info *ivideo = (struct sis_video_info *)SiS_Pr->ivideo;
u16 val = 0;
if(!ivideo->lpcdev) return 0;
pci_read_config_word(ivideo->lpcdev, reg, &val);
return (unsigned int)val;
}
#endif
/* ----------- FBDev related routines for all series ----------- */
static int
sisfb_get_cmap_len(const struct fb_var_screeninfo *var)
{
return (var->bits_per_pixel == 8) ? 256 : 16;
}
static void
sisfb_set_vparms(struct sis_video_info *ivideo)
{
switch(ivideo->video_bpp) {
case 8:
ivideo->DstColor = 0x0000;
ivideo->SiS310_AccelDepth = 0x00000000;
ivideo->video_cmap_len = 256;
break;
case 16:
ivideo->DstColor = 0x8000;
ivideo->SiS310_AccelDepth = 0x00010000;
ivideo->video_cmap_len = 16;
break;
case 32:
ivideo->DstColor = 0xC000;
ivideo->SiS310_AccelDepth = 0x00020000;
ivideo->video_cmap_len = 16;
break;
default:
ivideo->video_cmap_len = 16;
printk(KERN_ERR "sisfb: Unsupported depth %d", ivideo->video_bpp);
ivideo->accel = 0;
}
}
static int
sisfb_calc_maxyres(struct sis_video_info *ivideo, struct fb_var_screeninfo *var)
{
int maxyres = ivideo->sisfb_mem / (var->xres_virtual * (var->bits_per_pixel >> 3));
if(maxyres > 32767) maxyres = 32767;
return maxyres;
}
static void
sisfb_calc_pitch(struct sis_video_info *ivideo, struct fb_var_screeninfo *var)
{
ivideo->video_linelength = var->xres_virtual * (var->bits_per_pixel >> 3);
ivideo->scrnpitchCRT1 = ivideo->video_linelength;
if(!(ivideo->currentvbflags & CRT1_LCDA)) {
if((var->vmode & FB_VMODE_MASK) == FB_VMODE_INTERLACED) {
ivideo->scrnpitchCRT1 <<= 1;
}
}
}
static void
sisfb_set_pitch(struct sis_video_info *ivideo)
{
bool isslavemode = false;
unsigned short HDisplay1 = ivideo->scrnpitchCRT1 >> 3;
unsigned short HDisplay2 = ivideo->video_linelength >> 3;
if(sisfb_bridgeisslave(ivideo)) isslavemode = true;
/* We need to set pitch for CRT1 if bridge is in slave mode, too */
if((ivideo->currentvbflags & VB_DISPTYPE_DISP1) || (isslavemode)) {
SiS_SetReg(SISCR, 0x13, (HDisplay1 & 0xFF));
SiS_SetRegANDOR(SISSR, 0x0E, 0xF0, (HDisplay1 >> 8));
}
/* We must not set the pitch for CRT2 if bridge is in slave mode */
if((ivideo->currentvbflags & VB_DISPTYPE_DISP2) && (!isslavemode)) {
SiS_SetRegOR(SISPART1, ivideo->CRT2_write_enable, 0x01);
SiS_SetReg(SISPART1, 0x07, (HDisplay2 & 0xFF));
SiS_SetRegANDOR(SISPART1, 0x09, 0xF0, (HDisplay2 >> 8));
}
}
static void
sisfb_bpp_to_var(struct sis_video_info *ivideo, struct fb_var_screeninfo *var)
{
ivideo->video_cmap_len = sisfb_get_cmap_len(var);
switch(var->bits_per_pixel) {
case 8:
var->red.offset = var->green.offset = var->blue.offset = 0;
var->red.length = var->green.length = var->blue.length = 8;
break;
case 16:
var->red.offset = 11;
var->red.length = 5;
var->green.offset = 5;
var->green.length = 6;
var->blue.offset = 0;
var->blue.length = 5;
var->transp.offset = 0;
var->transp.length = 0;
break;
case 32:
var->red.offset = 16;
var->red.length = 8;
var->green.offset = 8;
var->green.length = 8;
var->blue.offset = 0;
var->blue.length = 8;
var->transp.offset = 24;
var->transp.length = 8;
break;
}
}
static int
sisfb_set_mode(struct sis_video_info *ivideo, int clrscrn)
{
unsigned short modeno = ivideo->mode_no;
/* >=2.6.12's fbcon clears the screen anyway */
modeno |= 0x80;
SiS_SetReg(SISSR, IND_SIS_PASSWORD, SIS_PASSWORD);
sisfb_pre_setmode(ivideo);
if(!SiSSetMode(&ivideo->SiS_Pr, modeno)) {
printk(KERN_ERR "sisfb: Setting mode[0x%x] failed\n", ivideo->mode_no);
return -EINVAL;
}
SiS_SetReg(SISSR, IND_SIS_PASSWORD, SIS_PASSWORD);
sisfb_post_setmode(ivideo);
return 0;
}
static int
sisfb_do_set_var(struct fb_var_screeninfo *var, int isactive, struct fb_info *info)
{
struct sis_video_info *ivideo = (struct sis_video_info *)info->par;
unsigned int htotal = 0, vtotal = 0;
unsigned int drate = 0, hrate = 0;
int found_mode = 0, ret;
int old_mode;
u32 pixclock;
htotal = var->left_margin + var->xres + var->right_margin + var->hsync_len;
vtotal = var->upper_margin + var->lower_margin + var->vsync_len;
pixclock = var->pixclock;
if((var->vmode & FB_VMODE_MASK) == FB_VMODE_NONINTERLACED) {
vtotal += var->yres;
vtotal <<= 1;
} else if((var->vmode & FB_VMODE_MASK) == FB_VMODE_DOUBLE) {
vtotal += var->yres;
vtotal <<= 2;
} else if((var->vmode & FB_VMODE_MASK) == FB_VMODE_INTERLACED) {
vtotal += var->yres;
vtotal <<= 1;
} else vtotal += var->yres;
if(!(htotal) || !(vtotal)) {
DPRINTK("sisfb: Invalid 'var' information\n");
return -EINVAL;
}
if(pixclock && htotal && vtotal) {
drate = 1000000000 / pixclock;
hrate = (drate * 1000) / htotal;
ivideo->refresh_rate = (unsigned int) (hrate * 2 / vtotal);
} else {
ivideo->refresh_rate = 60;
}
old_mode = ivideo->sisfb_mode_idx;
ivideo->sisfb_mode_idx = 0;
while( (sisbios_mode[ivideo->sisfb_mode_idx].mode_no[0] != 0) &&
(sisbios_mode[ivideo->sisfb_mode_idx].xres <= var->xres) ) {
if( (sisbios_mode[ivideo->sisfb_mode_idx].xres == var->xres) &&
(sisbios_mode[ivideo->sisfb_mode_idx].yres == var->yres) &&
(sisbios_mode[ivideo->sisfb_mode_idx].bpp == var->bits_per_pixel)) {
ivideo->mode_no = sisbios_mode[ivideo->sisfb_mode_idx].mode_no[ivideo->mni];
found_mode = 1;
break;
}
ivideo->sisfb_mode_idx++;
}
if(found_mode) {
ivideo->sisfb_mode_idx = sisfb_validate_mode(ivideo,
ivideo->sisfb_mode_idx, ivideo->currentvbflags);
} else {
ivideo->sisfb_mode_idx = -1;
}
if(ivideo->sisfb_mode_idx < 0) {
printk(KERN_ERR "sisfb: Mode %dx%dx%d not supported\n", var->xres,
var->yres, var->bits_per_pixel);
ivideo->sisfb_mode_idx = old_mode;
return -EINVAL;
}
ivideo->mode_no = sisbios_mode[ivideo->sisfb_mode_idx].mode_no[ivideo->mni];
if(sisfb_search_refresh_rate(ivideo, ivideo->refresh_rate, ivideo->sisfb_mode_idx) == 0) {
ivideo->rate_idx = sisbios_mode[ivideo->sisfb_mode_idx].rate_idx;
ivideo->refresh_rate = 60;
}
if(isactive) {
/* If acceleration to be used? Need to know
* before pre/post_set_mode()
*/
ivideo->accel = 0;
#if defined(FBINFO_HWACCEL_DISABLED) && defined(FBINFO_HWACCEL_XPAN)
#ifdef STUPID_ACCELF_TEXT_SHIT
if(var->accel_flags & FB_ACCELF_TEXT) {
info->flags &= ~FBINFO_HWACCEL_DISABLED;
} else {
info->flags |= FBINFO_HWACCEL_DISABLED;
}
#endif
if(!(info->flags & FBINFO_HWACCEL_DISABLED)) ivideo->accel = -1;
#else
if(var->accel_flags & FB_ACCELF_TEXT) ivideo->accel = -1;
#endif
if((ret = sisfb_set_mode(ivideo, 1))) {
return ret;
}
ivideo->video_bpp = sisbios_mode[ivideo->sisfb_mode_idx].bpp;
ivideo->video_width = sisbios_mode[ivideo->sisfb_mode_idx].xres;
ivideo->video_height = sisbios_mode[ivideo->sisfb_mode_idx].yres;
sisfb_calc_pitch(ivideo, var);
sisfb_set_pitch(ivideo);
sisfb_set_vparms(ivideo);
ivideo->current_width = ivideo->video_width;
ivideo->current_height = ivideo->video_height;
ivideo->current_bpp = ivideo->video_bpp;
ivideo->current_htotal = htotal;
ivideo->current_vtotal = vtotal;
ivideo->current_linelength = ivideo->video_linelength;
ivideo->current_pixclock = var->pixclock;
ivideo->current_refresh_rate = ivideo->refresh_rate;
ivideo->sisfb_lastrates[ivideo->mode_no] = ivideo->refresh_rate;
}
return 0;
}
static void
sisfb_set_base_CRT1(struct sis_video_info *ivideo, unsigned int base)
{
SiS_SetReg(SISSR, IND_SIS_PASSWORD, SIS_PASSWORD);
SiS_SetReg(SISCR, 0x0D, base & 0xFF);
SiS_SetReg(SISCR, 0x0C, (base >> 8) & 0xFF);
SiS_SetReg(SISSR, 0x0D, (base >> 16) & 0xFF);
if(ivideo->sisvga_engine == SIS_315_VGA) {
SiS_SetRegANDOR(SISSR, 0x37, 0xFE, (base >> 24) & 0x01);
}
}
static void
sisfb_set_base_CRT2(struct sis_video_info *ivideo, unsigned int base)
{
if(ivideo->currentvbflags & VB_DISPTYPE_DISP2) {
SiS_SetRegOR(SISPART1, ivideo->CRT2_write_enable, 0x01);
SiS_SetReg(SISPART1, 0x06, (base & 0xFF));
SiS_SetReg(SISPART1, 0x05, ((base >> 8) & 0xFF));
SiS_SetReg(SISPART1, 0x04, ((base >> 16) & 0xFF));
if(ivideo->sisvga_engine == SIS_315_VGA) {
SiS_SetRegANDOR(SISPART1, 0x02, 0x7F, ((base >> 24) & 0x01) << 7);
}
}
}
static int
sisfb_pan_var(struct sis_video_info *ivideo, struct fb_info *info,
struct fb_var_screeninfo *var)
{
ivideo->current_base = var->yoffset * info->var.xres_virtual
+ var->xoffset;
/* calculate base bpp dep. */
switch (info->var.bits_per_pixel) {
case 32:
break;
case 16:
ivideo->current_base >>= 1;
break;
case 8:
default:
ivideo->current_base >>= 2;
break;
}
ivideo->current_base += (ivideo->video_offset >> 2);
sisfb_set_base_CRT1(ivideo, ivideo->current_base);
sisfb_set_base_CRT2(ivideo, ivideo->current_base);
return 0;
}
static int
sisfb_open(struct fb_info *info, int user)
{
return 0;
}
static int
sisfb_release(struct fb_info *info, int user)
{
return 0;
}
static int
sisfb_setcolreg(unsigned regno, unsigned red, unsigned green, unsigned blue,
unsigned transp, struct fb_info *info)
{
struct sis_video_info *ivideo = (struct sis_video_info *)info->par;
if(regno >= sisfb_get_cmap_len(&info->var))
return 1;
switch(info->var.bits_per_pixel) {
case 8:
SiS_SetRegByte(SISDACA, regno);
SiS_SetRegByte(SISDACD, (red >> 10));
SiS_SetRegByte(SISDACD, (green >> 10));
SiS_SetRegByte(SISDACD, (blue >> 10));
if(ivideo->currentvbflags & VB_DISPTYPE_DISP2) {
SiS_SetRegByte(SISDAC2A, regno);
SiS_SetRegByte(SISDAC2D, (red >> 8));
SiS_SetRegByte(SISDAC2D, (green >> 8));
SiS_SetRegByte(SISDAC2D, (blue >> 8));
}
break;
case 16:
if (regno >= 16)
break;
((u32 *)(info->pseudo_palette))[regno] =
(red & 0xf800) |
((green & 0xfc00) >> 5) |
((blue & 0xf800) >> 11);
break;
case 32:
if (regno >= 16)
break;
red >>= 8;
green >>= 8;
blue >>= 8;
((u32 *)(info->pseudo_palette))[regno] =
(red << 16) | (green << 8) | (blue);
break;
}
return 0;
}
static int
sisfb_set_par(struct fb_info *info)
{
int err;
if((err = sisfb_do_set_var(&info->var, 1, info)))
return err;
sisfb_get_fix(&info->fix, -1, info);
return 0;
}
static int
sisfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
{
struct sis_video_info *ivideo = (struct sis_video_info *)info->par;
unsigned int htotal = 0, vtotal = 0, myrateindex = 0;
unsigned int drate = 0, hrate = 0, maxyres;
int found_mode = 0;
int refresh_rate, search_idx, tidx;
bool recalc_clock = false;
u32 pixclock;
htotal = var->left_margin + var->xres + var->right_margin + var->hsync_len;
vtotal = var->upper_margin + var->lower_margin + var->vsync_len;
pixclock = var->pixclock;
if((var->vmode & FB_VMODE_MASK) == FB_VMODE_NONINTERLACED) {
vtotal += var->yres;
vtotal <<= 1;
} else if((var->vmode & FB_VMODE_MASK) == FB_VMODE_DOUBLE) {
vtotal += var->yres;
vtotal <<= 2;
} else if((var->vmode & FB_VMODE_MASK) == FB_VMODE_INTERLACED) {
vtotal += var->yres;
vtotal <<= 1;
} else
vtotal += var->yres;
if(!(htotal) || !(vtotal)) {
SISFAIL("sisfb: no valid timing data");
}
search_idx = 0;
while( (sisbios_mode[search_idx].mode_no[0] != 0) &&
(sisbios_mode[search_idx].xres <= var->xres) ) {
if( (sisbios_mode[search_idx].xres == var->xres) &&
(sisbios_mode[search_idx].yres == var->yres) &&
(sisbios_mode[search_idx].bpp == var->bits_per_pixel)) {
if((tidx = sisfb_validate_mode(ivideo, search_idx,
ivideo->currentvbflags)) > 0) {
found_mode = 1;
search_idx = tidx;
break;
}
}
search_idx++;
}
if(!found_mode) {
search_idx = 0;
while(sisbios_mode[search_idx].mode_no[0] != 0) {
if( (var->xres <= sisbios_mode[search_idx].xres) &&
(var->yres <= sisbios_mode[search_idx].yres) &&
(var->bits_per_pixel == sisbios_mode[search_idx].bpp) ) {
if((tidx = sisfb_validate_mode(ivideo,search_idx,
ivideo->currentvbflags)) > 0) {
found_mode = 1;
search_idx = tidx;
break;
}
}
search_idx++;
}
if(found_mode) {
printk(KERN_DEBUG
"sisfb: Adapted from %dx%dx%d to %dx%dx%d\n",
var->xres, var->yres, var->bits_per_pixel,
sisbios_mode[search_idx].xres,
sisbios_mode[search_idx].yres,
var->bits_per_pixel);
var->xres = sisbios_mode[search_idx].xres;
var->yres = sisbios_mode[search_idx].yres;
} else {
printk(KERN_ERR
"sisfb: Failed to find supported mode near %dx%dx%d\n",
var->xres, var->yres, var->bits_per_pixel);
return -EINVAL;
}
}
if( ((ivideo->vbflags2 & VB2_LVDS) ||
((ivideo->vbflags2 & VB2_30xBDH) && (ivideo->currentvbflags & CRT2_LCD))) &&
(var->bits_per_pixel == 8) ) {
/* Slave modes on LVDS and 301B-DH */
refresh_rate = 60;
recalc_clock = true;
} else if( (ivideo->current_htotal == htotal) &&
(ivideo->current_vtotal == vtotal) &&
(ivideo->current_pixclock == pixclock) ) {
/* x=x & y=y & c=c -> assume depth change */
drate = 1000000000 / pixclock;
hrate = (drate * 1000) / htotal;
refresh_rate = (unsigned int) (hrate * 2 / vtotal);
} else if( ( (ivideo->current_htotal != htotal) ||
(ivideo->current_vtotal != vtotal) ) &&
(ivideo->current_pixclock == var->pixclock) ) {
/* x!=x | y!=y & c=c -> invalid pixclock */
if(ivideo->sisfb_lastrates[sisbios_mode[search_idx].mode_no[ivideo->mni]]) {
refresh_rate =
ivideo->sisfb_lastrates[sisbios_mode[search_idx].mode_no[ivideo->mni]];
} else if(ivideo->sisfb_parm_rate != -1) {
/* Sic, sisfb_parm_rate - want to know originally desired rate here */
refresh_rate = ivideo->sisfb_parm_rate;
} else {
refresh_rate = 60;
}
recalc_clock = true;
} else if((pixclock) && (htotal) && (vtotal)) {
drate = 1000000000 / pixclock;
hrate = (drate * 1000) / htotal;
refresh_rate = (unsigned int) (hrate * 2 / vtotal);
} else if(ivideo->current_refresh_rate) {
refresh_rate = ivideo->current_refresh_rate;
recalc_clock = true;
} else {
refresh_rate = 60;
recalc_clock = true;
}
myrateindex = sisfb_search_refresh_rate(ivideo, refresh_rate, search_idx);
/* Eventually recalculate timing and clock */
if(recalc_clock) {
if(!myrateindex) myrateindex = sisbios_mode[search_idx].rate_idx;
var->pixclock = (u32) (1000000000 / sisfb_mode_rate_to_dclock(&ivideo->SiS_Pr,
sisbios_mode[search_idx].mode_no[ivideo->mni],
myrateindex));
sisfb_mode_rate_to_ddata(&ivideo->SiS_Pr,
sisbios_mode[search_idx].mode_no[ivideo->mni],
myrateindex, var);
if((var->vmode & FB_VMODE_MASK) == FB_VMODE_DOUBLE) {
var->pixclock <<= 1;
}
}
if(ivideo->sisfb_thismonitor.datavalid) {
if(!sisfb_verify_rate(ivideo, &ivideo->sisfb_thismonitor, search_idx,
myrateindex, refresh_rate)) {
printk(KERN_INFO
"sisfb: WARNING: Refresh rate exceeds monitor specs!\n");
}
}
/* Adapt RGB settings */
sisfb_bpp_to_var(ivideo, var);
/* Sanity check for offsets */
if(var->xoffset < 0) var->xoffset = 0;
if(var->yoffset < 0) var->yoffset = 0;
if(var->xres > var->xres_virtual)
var->xres_virtual = var->xres;
if(ivideo->sisfb_ypan) {
maxyres = sisfb_calc_maxyres(ivideo, var);
if(ivideo->sisfb_max) {
var->yres_virtual = maxyres;
} else {
if(var->yres_virtual > maxyres) {
var->yres_virtual = maxyres;
}
}
if(var->yres_virtual <= var->yres) {
var->yres_virtual = var->yres;
}
} else {
if(var->yres != var->yres_virtual) {
var->yres_virtual = var->yres;
}
var->xoffset = 0;
var->yoffset = 0;
}
/* Truncate offsets to maximum if too high */
if(var->xoffset > var->xres_virtual - var->xres) {
var->xoffset = var->xres_virtual - var->xres - 1;
}
if(var->yoffset > var->yres_virtual - var->yres) {
var->yoffset = var->yres_virtual - var->yres - 1;
}
/* Set everything else to 0 */
var->red.msb_right =
var->green.msb_right =
var->blue.msb_right =
var->transp.offset =
var->transp.length =
var->transp.msb_right = 0;
return 0;
}
static int
sisfb_pan_display(struct fb_var_screeninfo *var, struct fb_info* info)
{
struct sis_video_info *ivideo = (struct sis_video_info *)info->par;
int err;
if (var->vmode & FB_VMODE_YWRAP)
return -EINVAL;
if (var->xoffset + info->var.xres > info->var.xres_virtual ||
var->yoffset + info->var.yres > info->var.yres_virtual)
return -EINVAL;
err = sisfb_pan_var(ivideo, info, var);
if (err < 0)
return err;
info->var.xoffset = var->xoffset;
info->var.yoffset = var->yoffset;
return 0;
}
static int
sisfb_blank(int blank, struct fb_info *info)
{
struct sis_video_info *ivideo = (struct sis_video_info *)info->par;
return sisfb_myblank(ivideo, blank);
}
/* ----------- FBDev related routines for all series ---------- */
static int sisfb_ioctl(struct fb_info *info, unsigned int cmd,
unsigned long arg)
{
struct sis_video_info *ivideo = (struct sis_video_info *)info->par;
struct sis_memreq sismemreq;
struct fb_vblank sisvbblank;
u32 gpu32 = 0;
#ifndef __user
#define __user
#endif
u32 __user *argp = (u32 __user *)arg;
switch(cmd) {
case FBIO_ALLOC:
if(!capable(CAP_SYS_RAWIO))
return -EPERM;
if(copy_from_user(&sismemreq, (void __user *)arg, sizeof(sismemreq)))
return -EFAULT;
sis_malloc(&sismemreq);
if(copy_to_user((void __user *)arg, &sismemreq, sizeof(sismemreq))) {
sis_free((u32)sismemreq.offset);
return -EFAULT;
}
break;
case FBIO_FREE:
if(!capable(CAP_SYS_RAWIO))
return -EPERM;
if(get_user(gpu32, argp))
return -EFAULT;
sis_free(gpu32);
break;
case FBIOGET_VBLANK:
memset(&sisvbblank, 0, sizeof(struct fb_vblank));
sisvbblank.count = 0;
sisvbblank.flags = sisfb_setupvbblankflags(ivideo, &sisvbblank.vcount, &sisvbblank.hcount);
if(copy_to_user((void __user *)arg, &sisvbblank, sizeof(sisvbblank)))
return -EFAULT;
break;
case SISFB_GET_INFO_SIZE:
return put_user(sizeof(struct sisfb_info), argp);
case SISFB_GET_INFO_OLD:
if(ivideo->warncount++ < 10)
printk(KERN_INFO
"sisfb: Deprecated ioctl call received - update your application!\n");
case SISFB_GET_INFO: /* For communication with X driver */
ivideo->sisfb_infoblock.sisfb_id = SISFB_ID;
ivideo->sisfb_infoblock.sisfb_version = VER_MAJOR;
ivideo->sisfb_infoblock.sisfb_revision = VER_MINOR;
ivideo->sisfb_infoblock.sisfb_patchlevel = VER_LEVEL;
ivideo->sisfb_infoblock.chip_id = ivideo->chip_id;
ivideo->sisfb_infoblock.sisfb_pci_vendor = ivideo->chip_vendor;
ivideo->sisfb_infoblock.memory = ivideo->video_size / 1024;
ivideo->sisfb_infoblock.heapstart = ivideo->heapstart / 1024;
if(ivideo->modechanged) {
ivideo->sisfb_infoblock.fbvidmode = ivideo->mode_no;
} else {
ivideo->sisfb_infoblock.fbvidmode = ivideo->modeprechange;
}
ivideo->sisfb_infoblock.sisfb_caps = ivideo->caps;
ivideo->sisfb_infoblock.sisfb_tqlen = ivideo->cmdQueueSize / 1024;
ivideo->sisfb_infoblock.sisfb_pcibus = ivideo->pcibus;
ivideo->sisfb_infoblock.sisfb_pcislot = ivideo->pcislot;
ivideo->sisfb_infoblock.sisfb_pcifunc = ivideo->pcifunc;
ivideo->sisfb_infoblock.sisfb_lcdpdc = ivideo->detectedpdc;
ivideo->sisfb_infoblock.sisfb_lcdpdca = ivideo->detectedpdca;
ivideo->sisfb_infoblock.sisfb_lcda = ivideo->detectedlcda;
ivideo->sisfb_infoblock.sisfb_vbflags = ivideo->vbflags;
ivideo->sisfb_infoblock.sisfb_currentvbflags = ivideo->currentvbflags;
ivideo->sisfb_infoblock.sisfb_scalelcd = ivideo->SiS_Pr.UsePanelScaler;
ivideo->sisfb_infoblock.sisfb_specialtiming = ivideo->SiS_Pr.SiS_CustomT;
ivideo->sisfb_infoblock.sisfb_haveemi = ivideo->SiS_Pr.HaveEMI ? 1 : 0;
ivideo->sisfb_infoblock.sisfb_haveemilcd = ivideo->SiS_Pr.HaveEMILCD ? 1 : 0;
ivideo->sisfb_infoblock.sisfb_emi30 = ivideo->SiS_Pr.EMI_30;
ivideo->sisfb_infoblock.sisfb_emi31 = ivideo->SiS_Pr.EMI_31;
ivideo->sisfb_infoblock.sisfb_emi32 = ivideo->SiS_Pr.EMI_32;
ivideo->sisfb_infoblock.sisfb_emi33 = ivideo->SiS_Pr.EMI_33;
ivideo->sisfb_infoblock.sisfb_tvxpos = (u16)(ivideo->tvxpos + 32);
ivideo->sisfb_infoblock.sisfb_tvypos = (u16)(ivideo->tvypos + 32);
ivideo->sisfb_infoblock.sisfb_heapsize = ivideo->sisfb_heap_size / 1024;
ivideo->sisfb_infoblock.sisfb_videooffset = ivideo->video_offset;
ivideo->sisfb_infoblock.sisfb_curfstn = ivideo->curFSTN;
ivideo->sisfb_infoblock.sisfb_curdstn = ivideo->curDSTN;
ivideo->sisfb_infoblock.sisfb_vbflags2 = ivideo->vbflags2;
ivideo->sisfb_infoblock.sisfb_can_post = ivideo->sisfb_can_post ? 1 : 0;
ivideo->sisfb_infoblock.sisfb_card_posted = ivideo->sisfb_card_posted ? 1 : 0;
ivideo->sisfb_infoblock.sisfb_was_boot_device = ivideo->sisfb_was_boot_device ? 1 : 0;
if(copy_to_user((void __user *)arg, &ivideo->sisfb_infoblock,
sizeof(ivideo->sisfb_infoblock)))
return -EFAULT;
break;
case SISFB_GET_VBRSTATUS_OLD:
if(ivideo->warncount++ < 10)
printk(KERN_INFO
"sisfb: Deprecated ioctl call received - update your application!\n");
case SISFB_GET_VBRSTATUS:
if(sisfb_CheckVBRetrace(ivideo))
return put_user((u32)1, argp);
else
return put_user((u32)0, argp);
case SISFB_GET_AUTOMAXIMIZE_OLD:
if(ivideo->warncount++ < 10)
printk(KERN_INFO
"sisfb: Deprecated ioctl call received - update your application!\n");
case SISFB_GET_AUTOMAXIMIZE:
if(ivideo->sisfb_max)
return put_user((u32)1, argp);
else
return put_user((u32)0, argp);
case SISFB_SET_AUTOMAXIMIZE_OLD:
if(ivideo->warncount++ < 10)
printk(KERN_INFO
"sisfb: Deprecated ioctl call received - update your application!\n");
case SISFB_SET_AUTOMAXIMIZE:
if(get_user(gpu32, argp))
return -EFAULT;
ivideo->sisfb_max = (gpu32) ? 1 : 0;
break;
case SISFB_SET_TVPOSOFFSET:
if(get_user(gpu32, argp))
return -EFAULT;
sisfb_set_TVxposoffset(ivideo, ((int)(gpu32 >> 16)) - 32);
sisfb_set_TVyposoffset(ivideo, ((int)(gpu32 & 0xffff)) - 32);
break;
case SISFB_GET_TVPOSOFFSET:
return put_user((u32)(((ivideo->tvxpos+32)<<16)|((ivideo->tvypos+32)&0xffff)),
argp);
case SISFB_COMMAND:
if(copy_from_user(&ivideo->sisfb_command, (void __user *)arg,
sizeof(struct sisfb_cmd)))
return -EFAULT;
sisfb_handle_command(ivideo, &ivideo->sisfb_command);
if(copy_to_user((void __user *)arg, &ivideo->sisfb_command,
sizeof(struct sisfb_cmd)))
return -EFAULT;
break;
case SISFB_SET_LOCK:
if(get_user(gpu32, argp))
return -EFAULT;
ivideo->sisfblocked = (gpu32) ? 1 : 0;
break;
default:
#ifdef SIS_NEW_CONFIG_COMPAT
return -ENOIOCTLCMD;
#else
return -EINVAL;
#endif
}
return 0;
}
static int
sisfb_get_fix(struct fb_fix_screeninfo *fix, int con, struct fb_info *info)
{
struct sis_video_info *ivideo = (struct sis_video_info *)info->par;
memset(fix, 0, sizeof(struct fb_fix_screeninfo));
strlcpy(fix->id, ivideo->myid, sizeof(fix->id));
mutex_lock(&info->mm_lock);
fix->smem_start = ivideo->video_base + ivideo->video_offset;
fix->smem_len = ivideo->sisfb_mem;
mutex_unlock(&info->mm_lock);
fix->type = FB_TYPE_PACKED_PIXELS;
fix->type_aux = 0;
fix->visual = (ivideo->video_bpp == 8) ? FB_VISUAL_PSEUDOCOLOR : FB_VISUAL_TRUECOLOR;
fix->xpanstep = 1;
fix->ypanstep = (ivideo->sisfb_ypan) ? 1 : 0;
fix->ywrapstep = 0;
fix->line_length = ivideo->video_linelength;
fix->mmio_start = ivideo->mmio_base;
fix->mmio_len = ivideo->mmio_size;
if(ivideo->sisvga_engine == SIS_300_VGA) {
fix->accel = FB_ACCEL_SIS_GLAMOUR;
} else if((ivideo->chip == SIS_330) ||
(ivideo->chip == SIS_760) ||
(ivideo->chip == SIS_761)) {
fix->accel = FB_ACCEL_SIS_XABRE;
} else if(ivideo->chip == XGI_20) {
fix->accel = FB_ACCEL_XGI_VOLARI_Z;
} else if(ivideo->chip >= XGI_40) {
fix->accel = FB_ACCEL_XGI_VOLARI_V;
} else {
fix->accel = FB_ACCEL_SIS_GLAMOUR_2;
}
return 0;
}
/* ---------------- fb_ops structures ----------------- */
static struct fb_ops sisfb_ops = {
.owner = THIS_MODULE,
.fb_open = sisfb_open,
.fb_release = sisfb_release,
.fb_check_var = sisfb_check_var,
.fb_set_par = sisfb_set_par,
.fb_setcolreg = sisfb_setcolreg,
.fb_pan_display = sisfb_pan_display,
.fb_blank = sisfb_blank,
.fb_fillrect = fbcon_sis_fillrect,
.fb_copyarea = fbcon_sis_copyarea,
.fb_imageblit = cfb_imageblit,
.fb_sync = fbcon_sis_sync,
#ifdef SIS_NEW_CONFIG_COMPAT
.fb_compat_ioctl= sisfb_ioctl,
#endif
.fb_ioctl = sisfb_ioctl
};
/* ---------------- Chip generation dependent routines ---------------- */
static struct pci_dev *sisfb_get_northbridge(int basechipid)
{
struct pci_dev *pdev = NULL;
int nbridgenum, nbridgeidx, i;
static const unsigned short nbridgeids[] = {
PCI_DEVICE_ID_SI_540, /* for SiS 540 VGA */
PCI_DEVICE_ID_SI_630, /* for SiS 630/730 VGA */
PCI_DEVICE_ID_SI_730,
PCI_DEVICE_ID_SI_550, /* for SiS 550 VGA */
PCI_DEVICE_ID_SI_650, /* for SiS 650/651/740 VGA */
PCI_DEVICE_ID_SI_651,
PCI_DEVICE_ID_SI_740,
PCI_DEVICE_ID_SI_661, /* for SiS 661/741/660/760/761 VGA */
PCI_DEVICE_ID_SI_741,
PCI_DEVICE_ID_SI_660,
PCI_DEVICE_ID_SI_760,
PCI_DEVICE_ID_SI_761
};
switch(basechipid) {
#ifdef CONFIG_FB_SIS_300
case SIS_540: nbridgeidx = 0; nbridgenum = 1; break;
case SIS_630: nbridgeidx = 1; nbridgenum = 2; break;
#endif
#ifdef CONFIG_FB_SIS_315
case SIS_550: nbridgeidx = 3; nbridgenum = 1; break;
case SIS_650: nbridgeidx = 4; nbridgenum = 3; break;
case SIS_660: nbridgeidx = 7; nbridgenum = 5; break;
#endif
default: return NULL;
}
for(i = 0; i < nbridgenum; i++) {
if((pdev = pci_get_device(PCI_VENDOR_ID_SI,
nbridgeids[nbridgeidx+i], NULL)))
break;
}
return pdev;
}
static int sisfb_get_dram_size(struct sis_video_info *ivideo)
{
#if defined(CONFIG_FB_SIS_300) || defined(CONFIG_FB_SIS_315)
u8 reg;
#endif
ivideo->video_size = 0;
ivideo->UMAsize = ivideo->LFBsize = 0;
switch(ivideo->chip) {
#ifdef CONFIG_FB_SIS_300
case SIS_300:
reg = SiS_GetReg(SISSR, 0x14);
ivideo->video_size = ((reg & 0x3F) + 1) << 20;
break;
case SIS_540:
case SIS_630:
case SIS_730:
if(!ivideo->nbridge)
return -1;
pci_read_config_byte(ivideo->nbridge, 0x63, ®);
ivideo->video_size = 1 << (((reg & 0x70) >> 4) + 21);
break;
#endif
#ifdef CONFIG_FB_SIS_315
case SIS_315H:
case SIS_315PRO:
case SIS_315:
reg = SiS_GetReg(SISSR, 0x14);
ivideo->video_size = (1 << ((reg & 0xf0) >> 4)) << 20;
switch((reg >> 2) & 0x03) {
case 0x01:
case 0x03:
ivideo->video_size <<= 1;
break;
case 0x02:
ivideo->video_size += (ivideo->video_size/2);
}
break;
case SIS_330:
reg = SiS_GetReg(SISSR, 0x14);
ivideo->video_size = (1 << ((reg & 0xf0) >> 4)) << 20;
if(reg & 0x0c) ivideo->video_size <<= 1;
break;
case SIS_550:
case SIS_650:
case SIS_740:
reg = SiS_GetReg(SISSR, 0x14);
ivideo->video_size = (((reg & 0x3f) + 1) << 2) << 20;
break;
case SIS_661:
case SIS_741:
reg = SiS_GetReg(SISCR, 0x79);
ivideo->video_size = (1 << ((reg & 0xf0) >> 4)) << 20;
break;
case SIS_660:
case SIS_760:
case SIS_761:
reg = SiS_GetReg(SISCR, 0x79);
reg = (reg & 0xf0) >> 4;
if(reg) {
ivideo->video_size = (1 << reg) << 20;
ivideo->UMAsize = ivideo->video_size;
}
reg = SiS_GetReg(SISCR, 0x78);
reg &= 0x30;
if(reg) {
if(reg == 0x10) {
ivideo->LFBsize = (32 << 20);
} else {
ivideo->LFBsize = (64 << 20);
}
ivideo->video_size += ivideo->LFBsize;
}
break;
case SIS_340:
case XGI_20:
case XGI_40:
reg = SiS_GetReg(SISSR, 0x14);
ivideo->video_size = (1 << ((reg & 0xf0) >> 4)) << 20;
if(ivideo->chip != XGI_20) {
reg = (reg & 0x0c) >> 2;
if(ivideo->revision_id == 2) {
if(reg & 0x01) reg = 0x02;
else reg = 0x00;
}
if(reg == 0x02) ivideo->video_size <<= 1;
else if(reg == 0x03) ivideo->video_size <<= 2;
}
break;
#endif
default:
return -1;
}
return 0;
}
/* -------------- video bridge device detection --------------- */
static void sisfb_detect_VB_connect(struct sis_video_info *ivideo)
{
u8 cr32, temp;
/* No CRT2 on XGI Z7 */
if(ivideo->chip == XGI_20) {
ivideo->sisfb_crt1off = 0;
return;
}
#ifdef CONFIG_FB_SIS_300
if(ivideo->sisvga_engine == SIS_300_VGA) {
temp = SiS_GetReg(SISSR, 0x17);
if((temp & 0x0F) && (ivideo->chip != SIS_300)) {
/* PAL/NTSC is stored on SR16 on such machines */
if(!(ivideo->vbflags & (TV_PAL | TV_NTSC | TV_PALM | TV_PALN))) {
temp = SiS_GetReg(SISSR, 0x16);
if(temp & 0x20)
ivideo->vbflags |= TV_PAL;
else
ivideo->vbflags |= TV_NTSC;
}
}
}
#endif
cr32 = SiS_GetReg(SISCR, 0x32);
if(cr32 & SIS_CRT1) {
ivideo->sisfb_crt1off = 0;
} else {
ivideo->sisfb_crt1off = (cr32 & 0xDF) ? 1 : 0;
}
ivideo->vbflags &= ~(CRT2_TV | CRT2_LCD | CRT2_VGA);
if(cr32 & SIS_VB_TV) ivideo->vbflags |= CRT2_TV;
if(cr32 & SIS_VB_LCD) ivideo->vbflags |= CRT2_LCD;
if(cr32 & SIS_VB_CRT2) ivideo->vbflags |= CRT2_VGA;
/* Check given parms for hardware compatibility.
* (Cannot do this in the search_xx routines since we don't
* know what hardware we are running on then)
*/
if(ivideo->chip != SIS_550) {
ivideo->sisfb_dstn = ivideo->sisfb_fstn = 0;
}
if(ivideo->sisfb_tvplug != -1) {
if( (ivideo->sisvga_engine != SIS_315_VGA) ||
(!(ivideo->vbflags2 & VB2_SISYPBPRBRIDGE)) ) {
if(ivideo->sisfb_tvplug & TV_YPBPR) {
ivideo->sisfb_tvplug = -1;
printk(KERN_ERR "sisfb: YPbPr not supported\n");
}
}
}
if(ivideo->sisfb_tvplug != -1) {
if( (ivideo->sisvga_engine != SIS_315_VGA) ||
(!(ivideo->vbflags2 & VB2_SISHIVISIONBRIDGE)) ) {
if(ivideo->sisfb_tvplug & TV_HIVISION) {
ivideo->sisfb_tvplug = -1;
printk(KERN_ERR "sisfb: HiVision not supported\n");
}
}
}
if(ivideo->sisfb_tvstd != -1) {
if( (!(ivideo->vbflags2 & VB2_SISBRIDGE)) &&
(!((ivideo->sisvga_engine == SIS_315_VGA) &&
(ivideo->vbflags2 & VB2_CHRONTEL))) ) {
if(ivideo->sisfb_tvstd & (TV_PALM | TV_PALN | TV_NTSCJ)) {
ivideo->sisfb_tvstd = -1;
printk(KERN_ERR "sisfb: PALM/PALN/NTSCJ not supported\n");
}
}
}
/* Detect/set TV plug & type */
if(ivideo->sisfb_tvplug != -1) {
ivideo->vbflags |= ivideo->sisfb_tvplug;
} else {
if(cr32 & SIS_VB_YPBPR) ivideo->vbflags |= (TV_YPBPR|TV_YPBPR525I); /* default: 480i */
else if(cr32 & SIS_VB_HIVISION) ivideo->vbflags |= TV_HIVISION;
else if(cr32 & SIS_VB_SCART) ivideo->vbflags |= TV_SCART;
else {
if(cr32 & SIS_VB_SVIDEO) ivideo->vbflags |= TV_SVIDEO;
if(cr32 & SIS_VB_COMPOSITE) ivideo->vbflags |= TV_AVIDEO;
}
}
if(!(ivideo->vbflags & (TV_YPBPR | TV_HIVISION))) {
if(ivideo->sisfb_tvstd != -1) {
ivideo->vbflags &= ~(TV_NTSC | TV_PAL | TV_PALM | TV_PALN | TV_NTSCJ);
ivideo->vbflags |= ivideo->sisfb_tvstd;
}
if(ivideo->vbflags & TV_SCART) {
ivideo->vbflags &= ~(TV_NTSC | TV_PALM | TV_PALN | TV_NTSCJ);
ivideo->vbflags |= TV_PAL;
}
if(!(ivideo->vbflags & (TV_PAL | TV_NTSC | TV_PALM | TV_PALN | TV_NTSCJ))) {
if(ivideo->sisvga_engine == SIS_300_VGA) {
temp = SiS_GetReg(SISSR, 0x38);
if(temp & 0x01) ivideo->vbflags |= TV_PAL;
else ivideo->vbflags |= TV_NTSC;
} else if((ivideo->chip <= SIS_315PRO) || (ivideo->chip >= SIS_330)) {
temp = SiS_GetReg(SISSR, 0x38);
if(temp & 0x01) ivideo->vbflags |= TV_PAL;
else ivideo->vbflags |= TV_NTSC;
} else {
temp = SiS_GetReg(SISCR, 0x79);
if(temp & 0x20) ivideo->vbflags |= TV_PAL;
else ivideo->vbflags |= TV_NTSC;
}
}
}
/* Copy forceCRT1 option to CRT1off if option is given */
if(ivideo->sisfb_forcecrt1 != -1) {
ivideo->sisfb_crt1off = (ivideo->sisfb_forcecrt1) ? 0 : 1;
}
}
/* ------------------ Sensing routines ------------------ */
static bool sisfb_test_DDC1(struct sis_video_info *ivideo)
{
unsigned short old;
int count = 48;
old = SiS_ReadDDC1Bit(&ivideo->SiS_Pr);
do {
if(old != SiS_ReadDDC1Bit(&ivideo->SiS_Pr)) break;
} while(count--);
return (count != -1);
}
static void sisfb_sense_crt1(struct sis_video_info *ivideo)
{
bool mustwait = false;
u8 sr1F, cr17;
#ifdef CONFIG_FB_SIS_315
u8 cr63=0;
#endif
u16 temp = 0xffff;
int i;
sr1F = SiS_GetReg(SISSR, 0x1F);
SiS_SetRegOR(SISSR, 0x1F, 0x04);
SiS_SetRegAND(SISSR, 0x1F, 0x3F);
if(sr1F & 0xc0) mustwait = true;
#ifdef CONFIG_FB_SIS_315
if(ivideo->sisvga_engine == SIS_315_VGA) {
cr63 = SiS_GetReg(SISCR, ivideo->SiS_Pr.SiS_MyCR63);
cr63 &= 0x40;
SiS_SetRegAND(SISCR, ivideo->SiS_Pr.SiS_MyCR63, 0xBF);
}
#endif
cr17 = SiS_GetReg(SISCR, 0x17);
cr17 &= 0x80;
if(!cr17) {
SiS_SetRegOR(SISCR, 0x17, 0x80);
mustwait = true;
SiS_SetReg(SISSR, 0x00, 0x01);
SiS_SetReg(SISSR, 0x00, 0x03);
}
if(mustwait) {
for(i=0; i < 10; i++) sisfbwaitretracecrt1(ivideo);
}
#ifdef CONFIG_FB_SIS_315
if(ivideo->chip >= SIS_330) {
SiS_SetRegAND(SISCR, 0x32, ~0x20);
if(ivideo->chip >= SIS_340) {
SiS_SetReg(SISCR, 0x57, 0x4a);
} else {
SiS_SetReg(SISCR, 0x57, 0x5f);
}
SiS_SetRegOR(SISCR, 0x53, 0x02);
while ((SiS_GetRegByte(SISINPSTAT)) & 0x01) break;
while (!((SiS_GetRegByte(SISINPSTAT)) & 0x01)) break;
if ((SiS_GetRegByte(SISMISCW)) & 0x10) temp = 1;
SiS_SetRegAND(SISCR, 0x53, 0xfd);
SiS_SetRegAND(SISCR, 0x57, 0x00);
}
#endif
if(temp == 0xffff) {
i = 3;
do {
temp = SiS_HandleDDC(&ivideo->SiS_Pr, ivideo->vbflags,
ivideo->sisvga_engine, 0, 0, NULL, ivideo->vbflags2);
} while(((temp == 0) || (temp == 0xffff)) && i--);
if((temp == 0) || (temp == 0xffff)) {
if(sisfb_test_DDC1(ivideo)) temp = 1;
}
}
if((temp) && (temp != 0xffff)) {
SiS_SetRegOR(SISCR, 0x32, 0x20);
}
#ifdef CONFIG_FB_SIS_315
if(ivideo->sisvga_engine == SIS_315_VGA) {
SiS_SetRegANDOR(SISCR, ivideo->SiS_Pr.SiS_MyCR63, 0xBF, cr63);
}
#endif
SiS_SetRegANDOR(SISCR, 0x17, 0x7F, cr17);
SiS_SetReg(SISSR, 0x1F, sr1F);
}
/* Determine and detect attached devices on SiS30x */
static void SiS_SenseLCD(struct sis_video_info *ivideo)
{
unsigned char buffer[256];
unsigned short temp, realcrtno, i;
u8 reg, cr37 = 0, paneltype = 0;
u16 xres, yres;
ivideo->SiS_Pr.PanelSelfDetected = false;
/* LCD detection only for TMDS bridges */
if(!(ivideo->vbflags2 & VB2_SISTMDSBRIDGE))
return;
if(ivideo->vbflags2 & VB2_30xBDH)
return;
/* If LCD already set up by BIOS, skip it */
reg = SiS_GetReg(SISCR, 0x32);
if(reg & 0x08)
return;
realcrtno = 1;
if(ivideo->SiS_Pr.DDCPortMixup)
realcrtno = 0;
/* Check DDC capabilities */
temp = SiS_HandleDDC(&ivideo->SiS_Pr, ivideo->vbflags, ivideo->sisvga_engine,
realcrtno, 0, &buffer[0], ivideo->vbflags2);
if((!temp) || (temp == 0xffff) || (!(temp & 0x02)))
return;
/* Read DDC data */
i = 3; /* Number of retrys */
do {
temp = SiS_HandleDDC(&ivideo->SiS_Pr, ivideo->vbflags,
ivideo->sisvga_engine, realcrtno, 1,
&buffer[0], ivideo->vbflags2);
} while((temp) && i--);
if(temp)
return;
/* No digital device */
if(!(buffer[0x14] & 0x80))
return;
/* First detailed timing preferred timing? */
if(!(buffer[0x18] & 0x02))
return;
xres = buffer[0x38] | ((buffer[0x3a] & 0xf0) << 4);
yres = buffer[0x3b] | ((buffer[0x3d] & 0xf0) << 4);
switch(xres) {
case 1024:
if(yres == 768)
paneltype = 0x02;
break;
case 1280:
if(yres == 1024)
paneltype = 0x03;
break;
case 1600:
if((yres == 1200) && (ivideo->vbflags2 & VB2_30xC))
paneltype = 0x0b;
break;
}
if(!paneltype)
return;
if(buffer[0x23])
cr37 |= 0x10;
if((buffer[0x47] & 0x18) == 0x18)
cr37 |= ((((buffer[0x47] & 0x06) ^ 0x06) << 5) | 0x20);
else
cr37 |= 0xc0;
SiS_SetReg(SISCR, 0x36, paneltype);
cr37 &= 0xf1;
SiS_SetRegANDOR(SISCR, 0x37, 0x0c, cr37);
SiS_SetRegOR(SISCR, 0x32, 0x08);
ivideo->SiS_Pr.PanelSelfDetected = true;
}
static int SISDoSense(struct sis_video_info *ivideo, u16 type, u16 test)
{
int temp, mytest, result, i, j;
for(j = 0; j < 10; j++) {
result = 0;
for(i = 0; i < 3; i++) {
mytest = test;
SiS_SetReg(SISPART4, 0x11, (type & 0x00ff));
temp = (type >> 8) | (mytest & 0x00ff);
SiS_SetRegANDOR(SISPART4, 0x10, 0xe0, temp);
SiS_DDC2Delay(&ivideo->SiS_Pr, 0x1500);
mytest >>= 8;
mytest &= 0x7f;
temp = SiS_GetReg(SISPART4, 0x03);
temp ^= 0x0e;
temp &= mytest;
if(temp == mytest) result++;
#if 1
SiS_SetReg(SISPART4, 0x11, 0x00);
SiS_SetRegAND(SISPART4, 0x10, 0xe0);
SiS_DDC2Delay(&ivideo->SiS_Pr, 0x1000);
#endif
}
if((result == 0) || (result >= 2)) break;
}
return result;
}
static void SiS_Sense30x(struct sis_video_info *ivideo)
{
u8 backupP4_0d,backupP2_00,backupP2_4d,backupSR_1e,biosflag=0;
u16 svhs=0, svhs_c=0;
u16 cvbs=0, cvbs_c=0;
u16 vga2=0, vga2_c=0;
int myflag, result;
char stdstr[] = "sisfb: Detected";
char tvstr[] = "TV connected to";
if(ivideo->vbflags2 & VB2_301) {
svhs = 0x00b9; cvbs = 0x00b3; vga2 = 0x00d1;
myflag = SiS_GetReg(SISPART4, 0x01);
if(myflag & 0x04) {
svhs = 0x00dd; cvbs = 0x00ee; vga2 = 0x00fd;
}
} else if(ivideo->vbflags2 & (VB2_301B | VB2_302B)) {
svhs = 0x016b; cvbs = 0x0174; vga2 = 0x0190;
} else if(ivideo->vbflags2 & (VB2_301LV | VB2_302LV)) {
svhs = 0x0200; cvbs = 0x0100;
} else if(ivideo->vbflags2 & (VB2_301C | VB2_302ELV | VB2_307T | VB2_307LV)) {
svhs = 0x016b; cvbs = 0x0110; vga2 = 0x0190;
} else
return;
vga2_c = 0x0e08; svhs_c = 0x0404; cvbs_c = 0x0804;
if(ivideo->vbflags & (VB2_301LV|VB2_302LV|VB2_302ELV|VB2_307LV)) {
svhs_c = 0x0408; cvbs_c = 0x0808;
}
biosflag = 2;
if(ivideo->haveXGIROM) {
biosflag = ivideo->bios_abase[0x58] & 0x03;
} else if(ivideo->newrom) {
if(ivideo->bios_abase[0x5d] & 0x04) biosflag |= 0x01;
} else if(ivideo->sisvga_engine == SIS_300_VGA) {
if(ivideo->bios_abase) {
biosflag = ivideo->bios_abase[0xfe] & 0x03;
}
}
if(ivideo->chip == SIS_300) {
myflag = SiS_GetReg(SISSR, 0x3b);
if(!(myflag & 0x01)) vga2 = vga2_c = 0;
}
if(!(ivideo->vbflags2 & VB2_SISVGA2BRIDGE)) {
vga2 = vga2_c = 0;
}
backupSR_1e = SiS_GetReg(SISSR, 0x1e);
SiS_SetRegOR(SISSR, 0x1e, 0x20);
backupP4_0d = SiS_GetReg(SISPART4, 0x0d);
if(ivideo->vbflags2 & VB2_30xC) {
SiS_SetRegANDOR(SISPART4, 0x0d, ~0x07, 0x01);
} else {
SiS_SetRegOR(SISPART4, 0x0d, 0x04);
}
SiS_DDC2Delay(&ivideo->SiS_Pr, 0x2000);
backupP2_00 = SiS_GetReg(SISPART2, 0x00);
SiS_SetReg(SISPART2, 0x00, ((backupP2_00 | 0x1c) & 0xfc));
backupP2_4d = SiS_GetReg(SISPART2, 0x4d);
if(ivideo->vbflags2 & VB2_SISYPBPRBRIDGE) {
SiS_SetReg(SISPART2, 0x4d, (backupP2_4d & ~0x10));
}
if(!(ivideo->vbflags2 & VB2_30xCLV)) {
SISDoSense(ivideo, 0, 0);
}
SiS_SetRegAND(SISCR, 0x32, ~0x14);
if(vga2_c || vga2) {
if(SISDoSense(ivideo, vga2, vga2_c)) {
if(biosflag & 0x01) {
printk(KERN_INFO "%s %s SCART output\n", stdstr, tvstr);
SiS_SetRegOR(SISCR, 0x32, 0x04);
} else {
printk(KERN_INFO "%s secondary VGA connection\n", stdstr);
SiS_SetRegOR(SISCR, 0x32, 0x10);
}
}
}
SiS_SetRegAND(SISCR, 0x32, 0x3f);
if(ivideo->vbflags2 & VB2_30xCLV) {
SiS_SetRegOR(SISPART4, 0x0d, 0x04);
}
if((ivideo->sisvga_engine == SIS_315_VGA) && (ivideo->vbflags2 & VB2_SISYPBPRBRIDGE)) {
SiS_SetReg(SISPART2, 0x4d, (backupP2_4d | 0x10));
SiS_DDC2Delay(&ivideo->SiS_Pr, 0x2000);
if((result = SISDoSense(ivideo, svhs, 0x0604))) {
if((result = SISDoSense(ivideo, cvbs, 0x0804))) {
printk(KERN_INFO "%s %s YPbPr component output\n", stdstr, tvstr);
SiS_SetRegOR(SISCR, 0x32, 0x80);
}
}
SiS_SetReg(SISPART2, 0x4d, backupP2_4d);
}
SiS_SetRegAND(SISCR, 0x32, ~0x03);
if(!(ivideo->vbflags & TV_YPBPR)) {
if((result = SISDoSense(ivideo, svhs, svhs_c))) {
printk(KERN_INFO "%s %s SVIDEO output\n", stdstr, tvstr);
SiS_SetRegOR(SISCR, 0x32, 0x02);
}
if((biosflag & 0x02) || (!result)) {
if(SISDoSense(ivideo, cvbs, cvbs_c)) {
printk(KERN_INFO "%s %s COMPOSITE output\n", stdstr, tvstr);
SiS_SetRegOR(SISCR, 0x32, 0x01);
}
}
}
SISDoSense(ivideo, 0, 0);
SiS_SetReg(SISPART2, 0x00, backupP2_00);
SiS_SetReg(SISPART4, 0x0d, backupP4_0d);
SiS_SetReg(SISSR, 0x1e, backupSR_1e);
if(ivideo->vbflags2 & VB2_30xCLV) {
biosflag = SiS_GetReg(SISPART2, 0x00);
if(biosflag & 0x20) {
for(myflag = 2; myflag > 0; myflag--) {
biosflag ^= 0x20;
SiS_SetReg(SISPART2, 0x00, biosflag);
}
}
}
SiS_SetReg(SISPART2, 0x00, backupP2_00);
}
/* Determine and detect attached TV's on Chrontel */
static void SiS_SenseCh(struct sis_video_info *ivideo)
{
#if defined(CONFIG_FB_SIS_300) || defined(CONFIG_FB_SIS_315)
u8 temp1, temp2;
char stdstr[] = "sisfb: Chrontel: Detected TV connected to";
#endif
#ifdef CONFIG_FB_SIS_300
unsigned char test[3];
int i;
#endif
if(ivideo->chip < SIS_315H) {
#ifdef CONFIG_FB_SIS_300
ivideo->SiS_Pr.SiS_IF_DEF_CH70xx = 1; /* Chrontel 700x */
SiS_SetChrontelGPIO(&ivideo->SiS_Pr, 0x9c); /* Set general purpose IO for Chrontel communication */
SiS_DDC2Delay(&ivideo->SiS_Pr, 1000);
temp1 = SiS_GetCH700x(&ivideo->SiS_Pr, 0x25);
/* See Chrontel TB31 for explanation */
temp2 = SiS_GetCH700x(&ivideo->SiS_Pr, 0x0e);
if(((temp2 & 0x07) == 0x01) || (temp2 & 0x04)) {
SiS_SetCH700x(&ivideo->SiS_Pr, 0x0e, 0x0b);
SiS_DDC2Delay(&ivideo->SiS_Pr, 300);
}
temp2 = SiS_GetCH700x(&ivideo->SiS_Pr, 0x25);
if(temp2 != temp1) temp1 = temp2;
if((temp1 >= 0x22) && (temp1 <= 0x50)) {
/* Read power status */
temp1 = SiS_GetCH700x(&ivideo->SiS_Pr, 0x0e);
if((temp1 & 0x03) != 0x03) {
/* Power all outputs */
SiS_SetCH700x(&ivideo->SiS_Pr, 0x0e,0x0b);
SiS_DDC2Delay(&ivideo->SiS_Pr, 300);
}
/* Sense connected TV devices */
for(i = 0; i < 3; i++) {
SiS_SetCH700x(&ivideo->SiS_Pr, 0x10, 0x01);
SiS_DDC2Delay(&ivideo->SiS_Pr, 0x96);
SiS_SetCH700x(&ivideo->SiS_Pr, 0x10, 0x00);
SiS_DDC2Delay(&ivideo->SiS_Pr, 0x96);
temp1 = SiS_GetCH700x(&ivideo->SiS_Pr, 0x10);
if(!(temp1 & 0x08)) test[i] = 0x02;
else if(!(temp1 & 0x02)) test[i] = 0x01;
else test[i] = 0;
SiS_DDC2Delay(&ivideo->SiS_Pr, 0x96);
}
if(test[0] == test[1]) temp1 = test[0];
else if(test[0] == test[2]) temp1 = test[0];
else if(test[1] == test[2]) temp1 = test[1];
else {
printk(KERN_INFO
"sisfb: TV detection unreliable - test results varied\n");
temp1 = test[2];
}
if(temp1 == 0x02) {
printk(KERN_INFO "%s SVIDEO output\n", stdstr);
ivideo->vbflags |= TV_SVIDEO;
SiS_SetRegOR(SISCR, 0x32, 0x02);
SiS_SetRegAND(SISCR, 0x32, ~0x05);
} else if (temp1 == 0x01) {
printk(KERN_INFO "%s CVBS output\n", stdstr);
ivideo->vbflags |= TV_AVIDEO;
SiS_SetRegOR(SISCR, 0x32, 0x01);
SiS_SetRegAND(SISCR, 0x32, ~0x06);
} else {
SiS_SetCH70xxANDOR(&ivideo->SiS_Pr, 0x0e, 0x01, 0xF8);
SiS_SetRegAND(SISCR, 0x32, ~0x07);
}
} else if(temp1 == 0) {
SiS_SetCH70xxANDOR(&ivideo->SiS_Pr, 0x0e, 0x01, 0xF8);
SiS_SetRegAND(SISCR, 0x32, ~0x07);
}
/* Set general purpose IO for Chrontel communication */
SiS_SetChrontelGPIO(&ivideo->SiS_Pr, 0x00);
#endif
} else {
#ifdef CONFIG_FB_SIS_315
ivideo->SiS_Pr.SiS_IF_DEF_CH70xx = 2; /* Chrontel 7019 */
temp1 = SiS_GetCH701x(&ivideo->SiS_Pr, 0x49);
SiS_SetCH701x(&ivideo->SiS_Pr, 0x49, 0x20);
SiS_DDC2Delay(&ivideo->SiS_Pr, 0x96);
temp2 = SiS_GetCH701x(&ivideo->SiS_Pr, 0x20);
temp2 |= 0x01;
SiS_SetCH701x(&ivideo->SiS_Pr, 0x20, temp2);
SiS_DDC2Delay(&ivideo->SiS_Pr, 0x96);
temp2 ^= 0x01;
SiS_SetCH701x(&ivideo->SiS_Pr, 0x20, temp2);
SiS_DDC2Delay(&ivideo->SiS_Pr, 0x96);
temp2 = SiS_GetCH701x(&ivideo->SiS_Pr, 0x20);
SiS_SetCH701x(&ivideo->SiS_Pr, 0x49, temp1);
temp1 = 0;
if(temp2 & 0x02) temp1 |= 0x01;
if(temp2 & 0x10) temp1 |= 0x01;
if(temp2 & 0x04) temp1 |= 0x02;
if( (temp1 & 0x01) && (temp1 & 0x02) ) temp1 = 0x04;
switch(temp1) {
case 0x01:
printk(KERN_INFO "%s CVBS output\n", stdstr);
ivideo->vbflags |= TV_AVIDEO;
SiS_SetRegOR(SISCR, 0x32, 0x01);
SiS_SetRegAND(SISCR, 0x32, ~0x06);
break;
case 0x02:
printk(KERN_INFO "%s SVIDEO output\n", stdstr);
ivideo->vbflags |= TV_SVIDEO;
SiS_SetRegOR(SISCR, 0x32, 0x02);
SiS_SetRegAND(SISCR, 0x32, ~0x05);
break;
case 0x04:
printk(KERN_INFO "%s SCART output\n", stdstr);
SiS_SetRegOR(SISCR, 0x32, 0x04);
SiS_SetRegAND(SISCR, 0x32, ~0x03);
break;
default:
SiS_SetRegAND(SISCR, 0x32, ~0x07);
}
#endif
}
}
static void sisfb_get_VB_type(struct sis_video_info *ivideo)
{
char stdstr[] = "sisfb: Detected";
char bridgestr[] = "video bridge";
u8 vb_chipid;
u8 reg;
/* No CRT2 on XGI Z7 */
if(ivideo->chip == XGI_20)
return;
vb_chipid = SiS_GetReg(SISPART4, 0x00);
switch(vb_chipid) {
case 0x01:
reg = SiS_GetReg(SISPART4, 0x01);
if(reg < 0xb0) {
ivideo->vbflags |= VB_301; /* Deprecated */
ivideo->vbflags2 |= VB2_301;
printk(KERN_INFO "%s SiS301 %s\n", stdstr, bridgestr);
} else if(reg < 0xc0) {
ivideo->vbflags |= VB_301B; /* Deprecated */
ivideo->vbflags2 |= VB2_301B;
reg = SiS_GetReg(SISPART4, 0x23);
if(!(reg & 0x02)) {
ivideo->vbflags |= VB_30xBDH; /* Deprecated */
ivideo->vbflags2 |= VB2_30xBDH;
printk(KERN_INFO "%s SiS301B-DH %s\n", stdstr, bridgestr);
} else {
printk(KERN_INFO "%s SiS301B %s\n", stdstr, bridgestr);
}
} else if(reg < 0xd0) {
ivideo->vbflags |= VB_301C; /* Deprecated */
ivideo->vbflags2 |= VB2_301C;
printk(KERN_INFO "%s SiS301C %s\n", stdstr, bridgestr);
} else if(reg < 0xe0) {
ivideo->vbflags |= VB_301LV; /* Deprecated */
ivideo->vbflags2 |= VB2_301LV;
printk(KERN_INFO "%s SiS301LV %s\n", stdstr, bridgestr);
} else if(reg <= 0xe1) {
reg = SiS_GetReg(SISPART4, 0x39);
if(reg == 0xff) {
ivideo->vbflags |= VB_302LV; /* Deprecated */
ivideo->vbflags2 |= VB2_302LV;
printk(KERN_INFO "%s SiS302LV %s\n", stdstr, bridgestr);
} else {
ivideo->vbflags |= VB_301C; /* Deprecated */
ivideo->vbflags2 |= VB2_301C;
printk(KERN_INFO "%s SiS301C(P4) %s\n", stdstr, bridgestr);
#if 0
ivideo->vbflags |= VB_302ELV; /* Deprecated */
ivideo->vbflags2 |= VB2_302ELV;
printk(KERN_INFO "%s SiS302ELV %s\n", stdstr, bridgestr);
#endif
}
}
break;
case 0x02:
ivideo->vbflags |= VB_302B; /* Deprecated */
ivideo->vbflags2 |= VB2_302B;
printk(KERN_INFO "%s SiS302B %s\n", stdstr, bridgestr);
break;
}
if((!(ivideo->vbflags2 & VB2_VIDEOBRIDGE)) && (ivideo->chip != SIS_300)) {
reg = SiS_GetReg(SISCR, 0x37);
reg &= SIS_EXTERNAL_CHIP_MASK;
reg >>= 1;
if(ivideo->sisvga_engine == SIS_300_VGA) {
#ifdef CONFIG_FB_SIS_300
switch(reg) {
case SIS_EXTERNAL_CHIP_LVDS:
ivideo->vbflags |= VB_LVDS; /* Deprecated */
ivideo->vbflags2 |= VB2_LVDS;
break;
case SIS_EXTERNAL_CHIP_TRUMPION:
ivideo->vbflags |= (VB_LVDS | VB_TRUMPION); /* Deprecated */
ivideo->vbflags2 |= (VB2_LVDS | VB2_TRUMPION);
break;
case SIS_EXTERNAL_CHIP_CHRONTEL:
ivideo->vbflags |= VB_CHRONTEL; /* Deprecated */
ivideo->vbflags2 |= VB2_CHRONTEL;
break;
case SIS_EXTERNAL_CHIP_LVDS_CHRONTEL:
ivideo->vbflags |= (VB_LVDS | VB_CHRONTEL); /* Deprecated */
ivideo->vbflags2 |= (VB2_LVDS | VB2_CHRONTEL);
break;
}
if(ivideo->vbflags2 & VB2_CHRONTEL) ivideo->chronteltype = 1;
#endif
} else if(ivideo->chip < SIS_661) {
#ifdef CONFIG_FB_SIS_315
switch (reg) {
case SIS310_EXTERNAL_CHIP_LVDS:
ivideo->vbflags |= VB_LVDS; /* Deprecated */
ivideo->vbflags2 |= VB2_LVDS;
break;
case SIS310_EXTERNAL_CHIP_LVDS_CHRONTEL:
ivideo->vbflags |= (VB_LVDS | VB_CHRONTEL); /* Deprecated */
ivideo->vbflags2 |= (VB2_LVDS | VB2_CHRONTEL);
break;
}
if(ivideo->vbflags2 & VB2_CHRONTEL) ivideo->chronteltype = 2;
#endif
} else if(ivideo->chip >= SIS_661) {
#ifdef CONFIG_FB_SIS_315
reg = SiS_GetReg(SISCR, 0x38);
reg >>= 5;
switch(reg) {
case 0x02:
ivideo->vbflags |= VB_LVDS; /* Deprecated */
ivideo->vbflags2 |= VB2_LVDS;
break;
case 0x03:
ivideo->vbflags |= (VB_LVDS | VB_CHRONTEL); /* Deprecated */
ivideo->vbflags2 |= (VB2_LVDS | VB2_CHRONTEL);
break;
case 0x04:
ivideo->vbflags |= (VB_LVDS | VB_CONEXANT); /* Deprecated */
ivideo->vbflags2 |= (VB2_LVDS | VB2_CONEXANT);
break;
}
if(ivideo->vbflags2 & VB2_CHRONTEL) ivideo->chronteltype = 2;
#endif
}
if(ivideo->vbflags2 & VB2_LVDS) {
printk(KERN_INFO "%s LVDS transmitter\n", stdstr);
}
if((ivideo->sisvga_engine == SIS_300_VGA) && (ivideo->vbflags2 & VB2_TRUMPION)) {
printk(KERN_INFO "%s Trumpion Zurac LCD scaler\n", stdstr);
}
if(ivideo->vbflags2 & VB2_CHRONTEL) {
printk(KERN_INFO "%s Chrontel TV encoder\n", stdstr);
}
if((ivideo->chip >= SIS_661) && (ivideo->vbflags2 & VB2_CONEXANT)) {
printk(KERN_INFO "%s Conexant external device\n", stdstr);
}
}
if(ivideo->vbflags2 & VB2_SISBRIDGE) {
SiS_SenseLCD(ivideo);
SiS_Sense30x(ivideo);
} else if(ivideo->vbflags2 & VB2_CHRONTEL) {
SiS_SenseCh(ivideo);
}
}
/* ---------- Engine initialization routines ------------ */
static void
sisfb_engine_init(struct sis_video_info *ivideo)
{
/* Initialize command queue (we use MMIO only) */
/* BEFORE THIS IS CALLED, THE ENGINES *MUST* BE SYNC'ED */
ivideo->caps &= ~(TURBO_QUEUE_CAP |
MMIO_CMD_QUEUE_CAP |
VM_CMD_QUEUE_CAP |
AGP_CMD_QUEUE_CAP);
#ifdef CONFIG_FB_SIS_300
if(ivideo->sisvga_engine == SIS_300_VGA) {
u32 tqueue_pos;
u8 tq_state;
tqueue_pos = (ivideo->video_size - ivideo->cmdQueueSize) / (64 * 1024);
tq_state = SiS_GetReg(SISSR, IND_SIS_TURBOQUEUE_SET);
tq_state |= 0xf0;
tq_state &= 0xfc;
tq_state |= (u8)(tqueue_pos >> 8);
SiS_SetReg(SISSR, IND_SIS_TURBOQUEUE_SET, tq_state);
SiS_SetReg(SISSR, IND_SIS_TURBOQUEUE_ADR, (u8)(tqueue_pos & 0xff));
ivideo->caps |= TURBO_QUEUE_CAP;
}
#endif
#ifdef CONFIG_FB_SIS_315
if(ivideo->sisvga_engine == SIS_315_VGA) {
u32 tempq = 0, templ;
u8 temp;
if(ivideo->chip == XGI_20) {
switch(ivideo->cmdQueueSize) {
case (64 * 1024):
temp = SIS_CMD_QUEUE_SIZE_Z7_64k;
break;
case (128 * 1024):
default:
temp = SIS_CMD_QUEUE_SIZE_Z7_128k;
}
} else {
switch(ivideo->cmdQueueSize) {
case (4 * 1024 * 1024):
temp = SIS_CMD_QUEUE_SIZE_4M;
break;
case (2 * 1024 * 1024):
temp = SIS_CMD_QUEUE_SIZE_2M;
break;
case (1 * 1024 * 1024):
temp = SIS_CMD_QUEUE_SIZE_1M;
break;
default:
case (512 * 1024):
temp = SIS_CMD_QUEUE_SIZE_512k;
}
}
SiS_SetReg(SISSR, IND_SIS_CMDQUEUE_THRESHOLD, COMMAND_QUEUE_THRESHOLD);
SiS_SetReg(SISSR, IND_SIS_CMDQUEUE_SET, SIS_CMD_QUEUE_RESET);
if((ivideo->chip >= XGI_40) && ivideo->modechanged) {
/* Must disable dual pipe on XGI_40. Can't do
* this in MMIO mode, because it requires
* setting/clearing a bit in the MMIO fire trigger
* register.
*/
if(!((templ = MMIO_IN32(ivideo->mmio_vbase, 0x8240)) & (1 << 10))) {
MMIO_OUT32(ivideo->mmio_vbase, Q_WRITE_PTR, 0);
SiS_SetReg(SISSR, IND_SIS_CMDQUEUE_SET, (temp | SIS_VRAM_CMDQUEUE_ENABLE));
tempq = MMIO_IN32(ivideo->mmio_vbase, Q_READ_PTR);
MMIO_OUT32(ivideo->mmio_vbase, Q_WRITE_PTR, tempq);
tempq = (u32)(ivideo->video_size - ivideo->cmdQueueSize);
MMIO_OUT32(ivideo->mmio_vbase, Q_BASE_ADDR, tempq);
writel(0x16800000 + 0x8240, ivideo->video_vbase + tempq);
writel(templ | (1 << 10), ivideo->video_vbase + tempq + 4);
writel(0x168F0000, ivideo->video_vbase + tempq + 8);
writel(0x168F0000, ivideo->video_vbase + tempq + 12);
MMIO_OUT32(ivideo->mmio_vbase, Q_WRITE_PTR, (tempq + 16));
sisfb_syncaccel(ivideo);
SiS_SetReg(SISSR, IND_SIS_CMDQUEUE_SET, SIS_CMD_QUEUE_RESET);
}
}
tempq = MMIO_IN32(ivideo->mmio_vbase, MMIO_QUEUE_READPORT);
MMIO_OUT32(ivideo->mmio_vbase, MMIO_QUEUE_WRITEPORT, tempq);
temp |= (SIS_MMIO_CMD_ENABLE | SIS_CMD_AUTO_CORR);
SiS_SetReg(SISSR, IND_SIS_CMDQUEUE_SET, temp);
tempq = (u32)(ivideo->video_size - ivideo->cmdQueueSize);
MMIO_OUT32(ivideo->mmio_vbase, MMIO_QUEUE_PHYBASE, tempq);
ivideo->caps |= MMIO_CMD_QUEUE_CAP;
}
#endif
ivideo->engineok = 1;
}
static void sisfb_detect_lcd_type(struct sis_video_info *ivideo)
{
u8 reg;
int i;
reg = SiS_GetReg(SISCR, 0x36);
reg &= 0x0f;
if(ivideo->sisvga_engine == SIS_300_VGA) {
ivideo->CRT2LCDType = sis300paneltype[reg];
} else if(ivideo->chip >= SIS_661) {
ivideo->CRT2LCDType = sis661paneltype[reg];
} else {
ivideo->CRT2LCDType = sis310paneltype[reg];
if((ivideo->chip == SIS_550) && (sisfb_fstn)) {
if((ivideo->CRT2LCDType != LCD_320x240_2) &&
(ivideo->CRT2LCDType != LCD_320x240_3)) {
ivideo->CRT2LCDType = LCD_320x240;
}
}
}
if(ivideo->CRT2LCDType == LCD_UNKNOWN) {
/* For broken BIOSes: Assume 1024x768, RGB18 */
ivideo->CRT2LCDType = LCD_1024x768;
SiS_SetRegANDOR(SISCR, 0x36, 0xf0, 0x02);
SiS_SetRegANDOR(SISCR, 0x37, 0xee, 0x01);
printk(KERN_DEBUG "sisfb: Invalid panel ID (%02x), assuming 1024x768, RGB18\n", reg);
}
for(i = 0; i < SIS_LCD_NUMBER; i++) {
if(ivideo->CRT2LCDType == sis_lcd_data[i].lcdtype) {
ivideo->lcdxres = sis_lcd_data[i].xres;
ivideo->lcdyres = sis_lcd_data[i].yres;
ivideo->lcddefmodeidx = sis_lcd_data[i].default_mode_idx;
break;
}
}
#ifdef CONFIG_FB_SIS_300
if(ivideo->SiS_Pr.SiS_CustomT == CUT_BARCO1366) {
ivideo->lcdxres = 1360; ivideo->lcdyres = 1024;
ivideo->lcddefmodeidx = DEFAULT_MODE_1360;
} else if(ivideo->SiS_Pr.SiS_CustomT == CUT_PANEL848) {
ivideo->lcdxres = 848; ivideo->lcdyres = 480;
ivideo->lcddefmodeidx = DEFAULT_MODE_848;
} else if(ivideo->SiS_Pr.SiS_CustomT == CUT_PANEL856) {
ivideo->lcdxres = 856; ivideo->lcdyres = 480;
ivideo->lcddefmodeidx = DEFAULT_MODE_856;
}
#endif
printk(KERN_DEBUG "sisfb: Detected %dx%d flat panel\n",
ivideo->lcdxres, ivideo->lcdyres);
}
static void sisfb_save_pdc_emi(struct sis_video_info *ivideo)
{
#ifdef CONFIG_FB_SIS_300
/* Save the current PanelDelayCompensation if the LCD is currently used */
if(ivideo->sisvga_engine == SIS_300_VGA) {
if(ivideo->vbflags2 & (VB2_LVDS | VB2_30xBDH)) {
int tmp;
tmp = SiS_GetReg(SISCR, 0x30);
if(tmp & 0x20) {
/* Currently on LCD? If yes, read current pdc */
ivideo->detectedpdc = SiS_GetReg(SISPART1, 0x13);
ivideo->detectedpdc &= 0x3c;
if(ivideo->SiS_Pr.PDC == -1) {
/* Let option override detection */
ivideo->SiS_Pr.PDC = ivideo->detectedpdc;
}
printk(KERN_INFO "sisfb: Detected LCD PDC 0x%02x\n",
ivideo->detectedpdc);
}
if((ivideo->SiS_Pr.PDC != -1) &&
(ivideo->SiS_Pr.PDC != ivideo->detectedpdc)) {
printk(KERN_INFO "sisfb: Using LCD PDC 0x%02x\n",
ivideo->SiS_Pr.PDC);
}
}
}
#endif
#ifdef CONFIG_FB_SIS_315
if(ivideo->sisvga_engine == SIS_315_VGA) {
/* Try to find about LCDA */
if(ivideo->vbflags2 & VB2_SISLCDABRIDGE) {
int tmp;
tmp = SiS_GetReg(SISPART1, 0x13);
if(tmp & 0x04) {
ivideo->SiS_Pr.SiS_UseLCDA = true;
ivideo->detectedlcda = 0x03;
}
}
/* Save PDC */
if(ivideo->vbflags2 & VB2_SISLVDSBRIDGE) {
int tmp;
tmp = SiS_GetReg(SISCR, 0x30);
if((tmp & 0x20) || (ivideo->detectedlcda != 0xff)) {
/* Currently on LCD? If yes, read current pdc */
u8 pdc;
pdc = SiS_GetReg(SISPART1, 0x2D);
ivideo->detectedpdc = (pdc & 0x0f) << 1;
ivideo->detectedpdca = (pdc & 0xf0) >> 3;
pdc = SiS_GetReg(SISPART1, 0x35);
ivideo->detectedpdc |= ((pdc >> 7) & 0x01);
pdc = SiS_GetReg(SISPART1, 0x20);
ivideo->detectedpdca |= ((pdc >> 6) & 0x01);
if(ivideo->newrom) {
/* New ROM invalidates other PDC resp. */
if(ivideo->detectedlcda != 0xff) {
ivideo->detectedpdc = 0xff;
} else {
ivideo->detectedpdca = 0xff;
}
}
if(ivideo->SiS_Pr.PDC == -1) {
if(ivideo->detectedpdc != 0xff) {
ivideo->SiS_Pr.PDC = ivideo->detectedpdc;
}
}
if(ivideo->SiS_Pr.PDCA == -1) {
if(ivideo->detectedpdca != 0xff) {
ivideo->SiS_Pr.PDCA = ivideo->detectedpdca;
}
}
if(ivideo->detectedpdc != 0xff) {
printk(KERN_INFO
"sisfb: Detected LCD PDC 0x%02x (for LCD=CRT2)\n",
ivideo->detectedpdc);
}
if(ivideo->detectedpdca != 0xff) {
printk(KERN_INFO
"sisfb: Detected LCD PDC1 0x%02x (for LCD=CRT1)\n",
ivideo->detectedpdca);
}
}
/* Save EMI */
if(ivideo->vbflags2 & VB2_SISEMIBRIDGE) {
ivideo->SiS_Pr.EMI_30 = SiS_GetReg(SISPART4, 0x30);
ivideo->SiS_Pr.EMI_31 = SiS_GetReg(SISPART4, 0x31);
ivideo->SiS_Pr.EMI_32 = SiS_GetReg(SISPART4, 0x32);
ivideo->SiS_Pr.EMI_33 = SiS_GetReg(SISPART4, 0x33);
ivideo->SiS_Pr.HaveEMI = true;
if((tmp & 0x20) || (ivideo->detectedlcda != 0xff)) {
ivideo->SiS_Pr.HaveEMILCD = true;
}
}
}
/* Let user override detected PDCs (all bridges) */
if(ivideo->vbflags2 & VB2_30xBLV) {
if((ivideo->SiS_Pr.PDC != -1) &&
(ivideo->SiS_Pr.PDC != ivideo->detectedpdc)) {
printk(KERN_INFO "sisfb: Using LCD PDC 0x%02x (for LCD=CRT2)\n",
ivideo->SiS_Pr.PDC);
}
if((ivideo->SiS_Pr.PDCA != -1) &&
(ivideo->SiS_Pr.PDCA != ivideo->detectedpdca)) {
printk(KERN_INFO "sisfb: Using LCD PDC1 0x%02x (for LCD=CRT1)\n",
ivideo->SiS_Pr.PDCA);
}
}
}
#endif
}
/* -------------------- Memory manager routines ---------------------- */
static u32 sisfb_getheapstart(struct sis_video_info *ivideo)
{
u32 ret = ivideo->sisfb_parm_mem * 1024;
u32 maxoffs = ivideo->video_size - ivideo->hwcursor_size - ivideo->cmdQueueSize;
u32 def;
/* Calculate heap start = end of memory for console
*
* CCCCCCCCDDDDDDDDDDDDDDDDDDDDDDDDDDDDHHHHQQQQQQQQQQ
* C = console, D = heap, H = HWCursor, Q = cmd-queue
*
* On 76x in UMA+LFB mode, the layout is as follows:
* DDDDDDDDDDDCCCCCCCCCCCCCCCCCCCCCCCCHHHHQQQQQQQQQQQ
* where the heap is the entire UMA area, eventually
* into the LFB area if the given mem parameter is
* higher than the size of the UMA memory.
*
* Basically given by "mem" parameter
*
* maximum = videosize - cmd_queue - hwcursor
* (results in a heap of size 0)
* default = SiS 300: depends on videosize
* SiS 315/330/340/XGI: 32k below max
*/
if(ivideo->sisvga_engine == SIS_300_VGA) {
if(ivideo->video_size > 0x1000000) {
def = 0xc00000;
} else if(ivideo->video_size > 0x800000) {
def = 0x800000;
} else {
def = 0x400000;
}
} else if(ivideo->UMAsize && ivideo->LFBsize) {
ret = def = 0;
} else {
def = maxoffs - 0x8000;
}
/* Use default for secondary card for now (FIXME) */
if((!ret) || (ret > maxoffs) || (ivideo->cardnumber != 0))
ret = def;
return ret;
}
static u32 sisfb_getheapsize(struct sis_video_info *ivideo)
{
u32 max = ivideo->video_size - ivideo->hwcursor_size - ivideo->cmdQueueSize;
u32 ret = 0;
if(ivideo->UMAsize && ivideo->LFBsize) {
if( (!ivideo->sisfb_parm_mem) ||
((ivideo->sisfb_parm_mem * 1024) > max) ||
((max - (ivideo->sisfb_parm_mem * 1024)) < ivideo->UMAsize) ) {
ret = ivideo->UMAsize;
max -= ivideo->UMAsize;
} else {
ret = max - (ivideo->sisfb_parm_mem * 1024);
max = ivideo->sisfb_parm_mem * 1024;
}
ivideo->video_offset = ret;
ivideo->sisfb_mem = max;
} else {
ret = max - ivideo->heapstart;
ivideo->sisfb_mem = ivideo->heapstart;
}
return ret;
}
static int sisfb_heap_init(struct sis_video_info *ivideo)
{
struct SIS_OH *poh;
ivideo->video_offset = 0;
if(ivideo->sisfb_parm_mem) {
if( (ivideo->sisfb_parm_mem < (2 * 1024 * 1024)) ||
(ivideo->sisfb_parm_mem > ivideo->video_size) ) {
ivideo->sisfb_parm_mem = 0;
}
}
ivideo->heapstart = sisfb_getheapstart(ivideo);
ivideo->sisfb_heap_size = sisfb_getheapsize(ivideo);
ivideo->sisfb_heap_start = ivideo->video_vbase + ivideo->heapstart;
ivideo->sisfb_heap_end = ivideo->sisfb_heap_start + ivideo->sisfb_heap_size;
printk(KERN_INFO "sisfb: Memory heap starting at %dK, size %dK\n",
(int)(ivideo->heapstart / 1024), (int)(ivideo->sisfb_heap_size / 1024));
ivideo->sisfb_heap.vinfo = ivideo;
ivideo->sisfb_heap.poha_chain = NULL;
ivideo->sisfb_heap.poh_freelist = NULL;
poh = sisfb_poh_new_node(&ivideo->sisfb_heap);
if(poh == NULL)
return 1;
poh->poh_next = &ivideo->sisfb_heap.oh_free;
poh->poh_prev = &ivideo->sisfb_heap.oh_free;
poh->size = ivideo->sisfb_heap_size;
poh->offset = ivideo->heapstart;
ivideo->sisfb_heap.oh_free.poh_next = poh;
ivideo->sisfb_heap.oh_free.poh_prev = poh;
ivideo->sisfb_heap.oh_free.size = 0;
ivideo->sisfb_heap.max_freesize = poh->size;
ivideo->sisfb_heap.oh_used.poh_next = &ivideo->sisfb_heap.oh_used;
ivideo->sisfb_heap.oh_used.poh_prev = &ivideo->sisfb_heap.oh_used;
ivideo->sisfb_heap.oh_used.size = SENTINEL;
if(ivideo->cardnumber == 0) {
/* For the first card, make this heap the "global" one
* for old DRM (which could handle only one card)
*/
sisfb_heap = &ivideo->sisfb_heap;
}
return 0;
}
static struct SIS_OH *
sisfb_poh_new_node(struct SIS_HEAP *memheap)
{
struct SIS_OHALLOC *poha;
struct SIS_OH *poh;
unsigned long cOhs;
int i;
if(memheap->poh_freelist == NULL) {
poha = kmalloc(SIS_OH_ALLOC_SIZE, GFP_KERNEL);
if(!poha)
return NULL;
poha->poha_next = memheap->poha_chain;
memheap->poha_chain = poha;
cOhs = (SIS_OH_ALLOC_SIZE - sizeof(struct SIS_OHALLOC)) / sizeof(struct SIS_OH) + 1;
poh = &poha->aoh[0];
for(i = cOhs - 1; i != 0; i--) {
poh->poh_next = poh + 1;
poh = poh + 1;
}
poh->poh_next = NULL;
memheap->poh_freelist = &poha->aoh[0];
}
poh = memheap->poh_freelist;
memheap->poh_freelist = poh->poh_next;
return poh;
}
static struct SIS_OH *
sisfb_poh_allocate(struct SIS_HEAP *memheap, u32 size)
{
struct SIS_OH *pohThis;
struct SIS_OH *pohRoot;
int bAllocated = 0;
if(size > memheap->max_freesize) {
DPRINTK("sisfb: Can't allocate %dk video memory\n",
(unsigned int) size / 1024);
return NULL;
}
pohThis = memheap->oh_free.poh_next;
while(pohThis != &memheap->oh_free) {
if(size <= pohThis->size) {
bAllocated = 1;
break;
}
pohThis = pohThis->poh_next;
}
if(!bAllocated) {
DPRINTK("sisfb: Can't allocate %dk video memory\n",
(unsigned int) size / 1024);
return NULL;
}
if(size == pohThis->size) {
pohRoot = pohThis;
sisfb_delete_node(pohThis);
} else {
pohRoot = sisfb_poh_new_node(memheap);
if(pohRoot == NULL)
return NULL;
pohRoot->offset = pohThis->offset;
pohRoot->size = size;
pohThis->offset += size;
pohThis->size -= size;
}
memheap->max_freesize -= size;
pohThis = &memheap->oh_used;
sisfb_insert_node(pohThis, pohRoot);
return pohRoot;
}
static void
sisfb_delete_node(struct SIS_OH *poh)
{
poh->poh_prev->poh_next = poh->poh_next;
poh->poh_next->poh_prev = poh->poh_prev;
}
static void
sisfb_insert_node(struct SIS_OH *pohList, struct SIS_OH *poh)
{
struct SIS_OH *pohTemp = pohList->poh_next;
pohList->poh_next = poh;
pohTemp->poh_prev = poh;
poh->poh_prev = pohList;
poh->poh_next = pohTemp;
}
static struct SIS_OH *
sisfb_poh_free(struct SIS_HEAP *memheap, u32 base)
{
struct SIS_OH *pohThis;
struct SIS_OH *poh_freed;
struct SIS_OH *poh_prev;
struct SIS_OH *poh_next;
u32 ulUpper;
u32 ulLower;
int foundNode = 0;
poh_freed = memheap->oh_used.poh_next;
while(poh_freed != &memheap->oh_used) {
if(poh_freed->offset == base) {
foundNode = 1;
break;
}
poh_freed = poh_freed->poh_next;
}
if(!foundNode)
return NULL;
memheap->max_freesize += poh_freed->size;
poh_prev = poh_next = NULL;
ulUpper = poh_freed->offset + poh_freed->size;
ulLower = poh_freed->offset;
pohThis = memheap->oh_free.poh_next;
while(pohThis != &memheap->oh_free) {
if(pohThis->offset == ulUpper) {
poh_next = pohThis;
} else if((pohThis->offset + pohThis->size) == ulLower) {
poh_prev = pohThis;
}
pohThis = pohThis->poh_next;
}
sisfb_delete_node(poh_freed);
if(poh_prev && poh_next) {
poh_prev->size += (poh_freed->size + poh_next->size);
sisfb_delete_node(poh_next);
sisfb_free_node(memheap, poh_freed);
sisfb_free_node(memheap, poh_next);
return poh_prev;
}
if(poh_prev) {
poh_prev->size += poh_freed->size;
sisfb_free_node(memheap, poh_freed);
return poh_prev;
}
if(poh_next) {
poh_next->size += poh_freed->size;
poh_next->offset = poh_freed->offset;
sisfb_free_node(memheap, poh_freed);
return poh_next;
}
sisfb_insert_node(&memheap->oh_free, poh_freed);
return poh_freed;
}
static void
sisfb_free_node(struct SIS_HEAP *memheap, struct SIS_OH *poh)
{
if(poh == NULL)
return;
poh->poh_next = memheap->poh_freelist;
memheap->poh_freelist = poh;
}
static void
sis_int_malloc(struct sis_video_info *ivideo, struct sis_memreq *req)
{
struct SIS_OH *poh = NULL;
if((ivideo) && (ivideo->sisfb_id == SISFB_ID) && (!ivideo->havenoheap))
poh = sisfb_poh_allocate(&ivideo->sisfb_heap, (u32)req->size);
if(poh == NULL) {
req->offset = req->size = 0;
DPRINTK("sisfb: Video RAM allocation failed\n");
} else {
req->offset = poh->offset;
req->size = poh->size;
DPRINTK("sisfb: Video RAM allocation succeeded: 0x%lx\n",
(poh->offset + ivideo->video_vbase));
}
}
void
sis_malloc(struct sis_memreq *req)
{
struct sis_video_info *ivideo = sisfb_heap->vinfo;
if(&ivideo->sisfb_heap == sisfb_heap)
sis_int_malloc(ivideo, req);
else
req->offset = req->size = 0;
}
void
sis_malloc_new(struct pci_dev *pdev, struct sis_memreq *req)
{
struct sis_video_info *ivideo = pci_get_drvdata(pdev);
sis_int_malloc(ivideo, req);
}
/* sis_free: u32 because "base" is offset inside video ram, can never be >4GB */
static void
sis_int_free(struct sis_video_info *ivideo, u32 base)
{
struct SIS_OH *poh;
if((!ivideo) || (ivideo->sisfb_id != SISFB_ID) || (ivideo->havenoheap))
return;
poh = sisfb_poh_free(&ivideo->sisfb_heap, base);
if(poh == NULL) {
DPRINTK("sisfb: sisfb_poh_free() failed at base 0x%x\n",
(unsigned int) base);
}
}
void
sis_free(u32 base)
{
struct sis_video_info *ivideo = sisfb_heap->vinfo;
sis_int_free(ivideo, base);
}
void
sis_free_new(struct pci_dev *pdev, u32 base)
{
struct sis_video_info *ivideo = pci_get_drvdata(pdev);
sis_int_free(ivideo, base);
}
/* --------------------- SetMode routines ------------------------- */
static void
sisfb_check_engine_and_sync(struct sis_video_info *ivideo)
{
u8 cr30, cr31;
/* Check if MMIO and engines are enabled,
* and sync in case they are. Can't use
* ivideo->accel here, as this might have
* been changed before this is called.
*/
cr30 = SiS_GetReg(SISSR, IND_SIS_PCI_ADDRESS_SET);
cr31 = SiS_GetReg(SISSR, IND_SIS_MODULE_ENABLE);
/* MMIO and 2D/3D engine enabled? */
if((cr30 & SIS_MEM_MAP_IO_ENABLE) && (cr31 & 0x42)) {
#ifdef CONFIG_FB_SIS_300
if(ivideo->sisvga_engine == SIS_300_VGA) {
/* Don't care about TurboQueue. It's
* enough to know that the engines
* are enabled
*/
sisfb_syncaccel(ivideo);
}
#endif
#ifdef CONFIG_FB_SIS_315
if(ivideo->sisvga_engine == SIS_315_VGA) {
/* Check that any queue mode is
* enabled, and that the queue
* is not in the state of "reset"
*/
cr30 = SiS_GetReg(SISSR, 0x26);
if((cr30 & 0xe0) && (!(cr30 & 0x01))) {
sisfb_syncaccel(ivideo);
}
}
#endif
}
}
static void
sisfb_pre_setmode(struct sis_video_info *ivideo)
{
u8 cr30 = 0, cr31 = 0, cr33 = 0, cr35 = 0, cr38 = 0;
int tvregnum = 0;
ivideo->currentvbflags &= (VB_VIDEOBRIDGE | VB_DISPTYPE_DISP2);
SiS_SetReg(SISSR, 0x05, 0x86);
cr31 = SiS_GetReg(SISCR, 0x31);
cr31 &= ~0x60;
cr31 |= 0x04;
cr33 = ivideo->rate_idx & 0x0F;
#ifdef CONFIG_FB_SIS_315
if(ivideo->sisvga_engine == SIS_315_VGA) {
if(ivideo->chip >= SIS_661) {
cr38 = SiS_GetReg(SISCR, 0x38);
cr38 &= ~0x07; /* Clear LCDA/DualEdge and YPbPr bits */
} else {
tvregnum = 0x38;
cr38 = SiS_GetReg(SISCR, tvregnum);
cr38 &= ~0x3b; /* Clear LCDA/DualEdge and YPbPr bits */
}
}
#endif
#ifdef CONFIG_FB_SIS_300
if(ivideo->sisvga_engine == SIS_300_VGA) {
tvregnum = 0x35;
cr38 = SiS_GetReg(SISCR, tvregnum);
}
#endif
SiS_SetEnableDstn(&ivideo->SiS_Pr, false);
SiS_SetEnableFstn(&ivideo->SiS_Pr, false);
ivideo->curFSTN = ivideo->curDSTN = 0;
switch(ivideo->currentvbflags & VB_DISPTYPE_DISP2) {
case CRT2_TV:
cr38 &= ~0xc0; /* Clear PAL-M / PAL-N bits */
if((ivideo->vbflags & TV_YPBPR) && (ivideo->vbflags2 & VB2_SISYPBPRBRIDGE)) {
#ifdef CONFIG_FB_SIS_315
if(ivideo->chip >= SIS_661) {
cr38 |= 0x04;
if(ivideo->vbflags & TV_YPBPR525P) cr35 |= 0x20;
else if(ivideo->vbflags & TV_YPBPR750P) cr35 |= 0x40;
else if(ivideo->vbflags & TV_YPBPR1080I) cr35 |= 0x60;
cr30 |= SIS_SIMULTANEOUS_VIEW_ENABLE;
cr35 &= ~0x01;
ivideo->currentvbflags |= (TV_YPBPR | (ivideo->vbflags & TV_YPBPRALL));
} else if(ivideo->sisvga_engine == SIS_315_VGA) {
cr30 |= (0x80 | SIS_SIMULTANEOUS_VIEW_ENABLE);
cr38 |= 0x08;
if(ivideo->vbflags & TV_YPBPR525P) cr38 |= 0x10;
else if(ivideo->vbflags & TV_YPBPR750P) cr38 |= 0x20;
else if(ivideo->vbflags & TV_YPBPR1080I) cr38 |= 0x30;
cr31 &= ~0x01;
ivideo->currentvbflags |= (TV_YPBPR | (ivideo->vbflags & TV_YPBPRALL));
}
#endif
} else if((ivideo->vbflags & TV_HIVISION) &&
(ivideo->vbflags2 & VB2_SISHIVISIONBRIDGE)) {
if(ivideo->chip >= SIS_661) {
cr38 |= 0x04;
cr35 |= 0x60;
} else {
cr30 |= 0x80;
}
cr30 |= SIS_SIMULTANEOUS_VIEW_ENABLE;
cr31 |= 0x01;
cr35 |= 0x01;
ivideo->currentvbflags |= TV_HIVISION;
} else if(ivideo->vbflags & TV_SCART) {
cr30 = (SIS_VB_OUTPUT_SCART | SIS_SIMULTANEOUS_VIEW_ENABLE);
cr31 |= 0x01;
cr35 |= 0x01;
ivideo->currentvbflags |= TV_SCART;
} else {
if(ivideo->vbflags & TV_SVIDEO) {
cr30 = (SIS_VB_OUTPUT_SVIDEO | SIS_SIMULTANEOUS_VIEW_ENABLE);
ivideo->currentvbflags |= TV_SVIDEO;
}
if(ivideo->vbflags & TV_AVIDEO) {
cr30 = (SIS_VB_OUTPUT_COMPOSITE | SIS_SIMULTANEOUS_VIEW_ENABLE);
ivideo->currentvbflags |= TV_AVIDEO;
}
}
cr31 |= SIS_DRIVER_MODE;
if(ivideo->vbflags & (TV_AVIDEO | TV_SVIDEO)) {
if(ivideo->vbflags & TV_PAL) {
cr31 |= 0x01; cr35 |= 0x01;
ivideo->currentvbflags |= TV_PAL;
if(ivideo->vbflags & TV_PALM) {
cr38 |= 0x40; cr35 |= 0x04;
ivideo->currentvbflags |= TV_PALM;
} else if(ivideo->vbflags & TV_PALN) {
cr38 |= 0x80; cr35 |= 0x08;
ivideo->currentvbflags |= TV_PALN;
}
} else {
cr31 &= ~0x01; cr35 &= ~0x01;
ivideo->currentvbflags |= TV_NTSC;
if(ivideo->vbflags & TV_NTSCJ) {
cr38 |= 0x40; cr35 |= 0x02;
ivideo->currentvbflags |= TV_NTSCJ;
}
}
}
break;
case CRT2_LCD:
cr30 = (SIS_VB_OUTPUT_LCD | SIS_SIMULTANEOUS_VIEW_ENABLE);
cr31 |= SIS_DRIVER_MODE;
SiS_SetEnableDstn(&ivideo->SiS_Pr, ivideo->sisfb_dstn);
SiS_SetEnableFstn(&ivideo->SiS_Pr, ivideo->sisfb_fstn);
ivideo->curFSTN = ivideo->sisfb_fstn;
ivideo->curDSTN = ivideo->sisfb_dstn;
break;
case CRT2_VGA:
cr30 = (SIS_VB_OUTPUT_CRT2 | SIS_SIMULTANEOUS_VIEW_ENABLE);
cr31 |= SIS_DRIVER_MODE;
if(ivideo->sisfb_nocrt2rate) {
cr33 |= (sisbios_mode[ivideo->sisfb_mode_idx].rate_idx << 4);
} else {
cr33 |= ((ivideo->rate_idx & 0x0F) << 4);
}
break;
default: /* disable CRT2 */
cr30 = 0x00;
cr31 |= (SIS_DRIVER_MODE | SIS_VB_OUTPUT_DISABLE);
}
SiS_SetReg(SISCR, 0x30, cr30);
SiS_SetReg(SISCR, 0x33, cr33);
if(ivideo->chip >= SIS_661) {
#ifdef CONFIG_FB_SIS_315
cr31 &= ~0x01; /* Clear PAL flag (now in CR35) */
SiS_SetRegANDOR(SISCR, 0x35, ~0x10, cr35); /* Leave overscan bit alone */
cr38 &= 0x07; /* Use only LCDA and HiVision/YPbPr bits */
SiS_SetRegANDOR(SISCR, 0x38, 0xf8, cr38);
#endif
} else if(ivideo->chip != SIS_300) {
SiS_SetReg(SISCR, tvregnum, cr38);
}
SiS_SetReg(SISCR, 0x31, cr31);
ivideo->SiS_Pr.SiS_UseOEM = ivideo->sisfb_useoem;
sisfb_check_engine_and_sync(ivideo);
}
/* Fix SR11 for 661 and later */
#ifdef CONFIG_FB_SIS_315
static void
sisfb_fixup_SR11(struct sis_video_info *ivideo)
{
u8 tmpreg;
if(ivideo->chip >= SIS_661) {
tmpreg = SiS_GetReg(SISSR, 0x11);
if(tmpreg & 0x20) {
tmpreg = SiS_GetReg(SISSR, 0x3e);
tmpreg = (tmpreg + 1) & 0xff;
SiS_SetReg(SISSR, 0x3e, tmpreg);
tmpreg = SiS_GetReg(SISSR, 0x11);
}
if(tmpreg & 0xf0) {
SiS_SetRegAND(SISSR, 0x11, 0x0f);
}
}
}
#endif
static void
sisfb_set_TVxposoffset(struct sis_video_info *ivideo, int val)
{
if(val > 32) val = 32;
if(val < -32) val = -32;
ivideo->tvxpos = val;
if(ivideo->sisfblocked) return;
if(!ivideo->modechanged) return;
if(ivideo->currentvbflags & CRT2_TV) {
if(ivideo->vbflags2 & VB2_CHRONTEL) {
int x = ivideo->tvx;
switch(ivideo->chronteltype) {
case 1:
x += val;
if(x < 0) x = 0;
SiS_SetReg(SISSR, 0x05, 0x86);
SiS_SetCH700x(&ivideo->SiS_Pr, 0x0a, (x & 0xff));
SiS_SetCH70xxANDOR(&ivideo->SiS_Pr, 0x08, ((x & 0x0100) >> 7), 0xFD);
break;
case 2:
/* Not supported by hardware */
break;
}
} else if(ivideo->vbflags2 & VB2_SISBRIDGE) {
u8 p2_1f,p2_20,p2_2b,p2_42,p2_43;
unsigned short temp;
p2_1f = ivideo->p2_1f;
p2_20 = ivideo->p2_20;
p2_2b = ivideo->p2_2b;
p2_42 = ivideo->p2_42;
p2_43 = ivideo->p2_43;
temp = p2_1f | ((p2_20 & 0xf0) << 4);
temp += (val * 2);
p2_1f = temp & 0xff;
p2_20 = (temp & 0xf00) >> 4;
p2_2b = ((p2_2b & 0x0f) + (val * 2)) & 0x0f;
temp = p2_43 | ((p2_42 & 0xf0) << 4);
temp += (val * 2);
p2_43 = temp & 0xff;
p2_42 = (temp & 0xf00) >> 4;
SiS_SetReg(SISPART2, 0x1f, p2_1f);
SiS_SetRegANDOR(SISPART2, 0x20, 0x0F, p2_20);
SiS_SetRegANDOR(SISPART2, 0x2b, 0xF0, p2_2b);
SiS_SetRegANDOR(SISPART2, 0x42, 0x0F, p2_42);
SiS_SetReg(SISPART2, 0x43, p2_43);
}
}
}
static void
sisfb_set_TVyposoffset(struct sis_video_info *ivideo, int val)
{
if(val > 32) val = 32;
if(val < -32) val = -32;
ivideo->tvypos = val;
if(ivideo->sisfblocked) return;
if(!ivideo->modechanged) return;
if(ivideo->currentvbflags & CRT2_TV) {
if(ivideo->vbflags2 & VB2_CHRONTEL) {
int y = ivideo->tvy;
switch(ivideo->chronteltype) {
case 1:
y -= val;
if(y < 0) y = 0;
SiS_SetReg(SISSR, 0x05, 0x86);
SiS_SetCH700x(&ivideo->SiS_Pr, 0x0b, (y & 0xff));
SiS_SetCH70xxANDOR(&ivideo->SiS_Pr, 0x08, ((y & 0x0100) >> 8), 0xFE);
break;
case 2:
/* Not supported by hardware */
break;
}
} else if(ivideo->vbflags2 & VB2_SISBRIDGE) {
char p2_01, p2_02;
val /= 2;
p2_01 = ivideo->p2_01;
p2_02 = ivideo->p2_02;
p2_01 += val;
p2_02 += val;
if(!(ivideo->currentvbflags & (TV_HIVISION | TV_YPBPR))) {
while((p2_01 <= 0) || (p2_02 <= 0)) {
p2_01 += 2;
p2_02 += 2;
}
}
SiS_SetReg(SISPART2, 0x01, p2_01);
SiS_SetReg(SISPART2, 0x02, p2_02);
}
}
}
static void
sisfb_post_setmode(struct sis_video_info *ivideo)
{
bool crt1isoff = false;
bool doit = true;
#if defined(CONFIG_FB_SIS_300) || defined(CONFIG_FB_SIS_315)
u8 reg;
#endif
#ifdef CONFIG_FB_SIS_315
u8 reg1;
#endif
SiS_SetReg(SISSR, 0x05, 0x86);
#ifdef CONFIG_FB_SIS_315
sisfb_fixup_SR11(ivideo);
#endif
/* Now we actually HAVE changed the display mode */
ivideo->modechanged = 1;
/* We can't switch off CRT1 if bridge is in slave mode */
if(ivideo->vbflags2 & VB2_VIDEOBRIDGE) {
if(sisfb_bridgeisslave(ivideo)) doit = false;
} else
ivideo->sisfb_crt1off = 0;
#ifdef CONFIG_FB_SIS_300
if(ivideo->sisvga_engine == SIS_300_VGA) {
if((ivideo->sisfb_crt1off) && (doit)) {
crt1isoff = true;
reg = 0x00;
} else {
crt1isoff = false;
reg = 0x80;
}
SiS_SetRegANDOR(SISCR, 0x17, 0x7f, reg);
}
#endif
#ifdef CONFIG_FB_SIS_315
if(ivideo->sisvga_engine == SIS_315_VGA) {
if((ivideo->sisfb_crt1off) && (doit)) {
crt1isoff = true;
reg = 0x40;
reg1 = 0xc0;
} else {
crt1isoff = false;
reg = 0x00;
reg1 = 0x00;
}
SiS_SetRegANDOR(SISCR, ivideo->SiS_Pr.SiS_MyCR63, ~0x40, reg);
SiS_SetRegANDOR(SISSR, 0x1f, 0x3f, reg1);
}
#endif
if(crt1isoff) {
ivideo->currentvbflags &= ~VB_DISPTYPE_CRT1;
ivideo->currentvbflags |= VB_SINGLE_MODE;
} else {
ivideo->currentvbflags |= VB_DISPTYPE_CRT1;
if(ivideo->currentvbflags & VB_DISPTYPE_CRT2) {
ivideo->currentvbflags |= VB_MIRROR_MODE;
} else {
ivideo->currentvbflags |= VB_SINGLE_MODE;
}
}
SiS_SetRegAND(SISSR, IND_SIS_RAMDAC_CONTROL, ~0x04);
if(ivideo->currentvbflags & CRT2_TV) {
if(ivideo->vbflags2 & VB2_SISBRIDGE) {
ivideo->p2_1f = SiS_GetReg(SISPART2, 0x1f);
ivideo->p2_20 = SiS_GetReg(SISPART2, 0x20);
ivideo->p2_2b = SiS_GetReg(SISPART2, 0x2b);
ivideo->p2_42 = SiS_GetReg(SISPART2, 0x42);
ivideo->p2_43 = SiS_GetReg(SISPART2, 0x43);
ivideo->p2_01 = SiS_GetReg(SISPART2, 0x01);
ivideo->p2_02 = SiS_GetReg(SISPART2, 0x02);
} else if(ivideo->vbflags2 & VB2_CHRONTEL) {
if(ivideo->chronteltype == 1) {
ivideo->tvx = SiS_GetCH700x(&ivideo->SiS_Pr, 0x0a);
ivideo->tvx |= (((SiS_GetCH700x(&ivideo->SiS_Pr, 0x08) & 0x02) >> 1) << 8);
ivideo->tvy = SiS_GetCH700x(&ivideo->SiS_Pr, 0x0b);
ivideo->tvy |= ((SiS_GetCH700x(&ivideo->SiS_Pr, 0x08) & 0x01) << 8);
}
}
}
if(ivideo->tvxpos) {
sisfb_set_TVxposoffset(ivideo, ivideo->tvxpos);
}
if(ivideo->tvypos) {
sisfb_set_TVyposoffset(ivideo, ivideo->tvypos);
}
/* Eventually sync engines */
sisfb_check_engine_and_sync(ivideo);
/* (Re-)Initialize chip engines */
if(ivideo->accel) {
sisfb_engine_init(ivideo);
} else {
ivideo->engineok = 0;
}
}
static int
sisfb_reset_mode(struct sis_video_info *ivideo)
{
if(sisfb_set_mode(ivideo, 0))
return 1;
sisfb_set_pitch(ivideo);
sisfb_set_base_CRT1(ivideo, ivideo->current_base);
sisfb_set_base_CRT2(ivideo, ivideo->current_base);
return 0;
}
static void
sisfb_handle_command(struct sis_video_info *ivideo, struct sisfb_cmd *sisfb_command)
{
int mycrt1off;
switch(sisfb_command->sisfb_cmd) {
case SISFB_CMD_GETVBFLAGS:
if(!ivideo->modechanged) {
sisfb_command->sisfb_result[0] = SISFB_CMD_ERR_EARLY;
} else {
sisfb_command->sisfb_result[0] = SISFB_CMD_ERR_OK;
sisfb_command->sisfb_result[1] = ivideo->currentvbflags;
sisfb_command->sisfb_result[2] = ivideo->vbflags2;
}
break;
case SISFB_CMD_SWITCHCRT1:
/* arg[0]: 0 = off, 1 = on, 99 = query */
if(!ivideo->modechanged) {
sisfb_command->sisfb_result[0] = SISFB_CMD_ERR_EARLY;
} else if(sisfb_command->sisfb_arg[0] == 99) {
/* Query */
sisfb_command->sisfb_result[1] = ivideo->sisfb_crt1off ? 0 : 1;
sisfb_command->sisfb_result[0] = SISFB_CMD_ERR_OK;
} else if(ivideo->sisfblocked) {
sisfb_command->sisfb_result[0] = SISFB_CMD_ERR_LOCKED;
} else if((!(ivideo->currentvbflags & CRT2_ENABLE)) &&
(sisfb_command->sisfb_arg[0] == 0)) {
sisfb_command->sisfb_result[0] = SISFB_CMD_ERR_NOCRT2;
} else {
sisfb_command->sisfb_result[0] = SISFB_CMD_ERR_OK;
mycrt1off = sisfb_command->sisfb_arg[0] ? 0 : 1;
if( ((ivideo->currentvbflags & VB_DISPTYPE_CRT1) && mycrt1off) ||
((!(ivideo->currentvbflags & VB_DISPTYPE_CRT1)) && !mycrt1off) ) {
ivideo->sisfb_crt1off = mycrt1off;
if(sisfb_reset_mode(ivideo)) {
sisfb_command->sisfb_result[0] = SISFB_CMD_ERR_OTHER;
}
}
sisfb_command->sisfb_result[1] = ivideo->sisfb_crt1off ? 0 : 1;
}
break;
/* more to come */
default:
sisfb_command->sisfb_result[0] = SISFB_CMD_ERR_UNKNOWN;
printk(KERN_ERR "sisfb: Unknown command 0x%x\n",
sisfb_command->sisfb_cmd);
}
}
#ifndef MODULE
static int __init sisfb_setup(char *options)
{
char *this_opt;
sisfb_setdefaultparms();
if(!options || !(*options))
return 0;
while((this_opt = strsep(&options, ",")) != NULL) {
if(!(*this_opt)) continue;
if(!strnicmp(this_opt, "off", 3)) {
sisfb_off = 1;
} else if(!strnicmp(this_opt, "forcecrt2type:", 14)) {
/* Need to check crt2 type first for fstn/dstn */
sisfb_search_crt2type(this_opt + 14);
} else if(!strnicmp(this_opt, "tvmode:",7)) {
sisfb_search_tvstd(this_opt + 7);
} else if(!strnicmp(this_opt, "tvstandard:",11)) {
sisfb_search_tvstd(this_opt + 11);
} else if(!strnicmp(this_opt, "mode:", 5)) {
sisfb_search_mode(this_opt + 5, false);
} else if(!strnicmp(this_opt, "vesa:", 5)) {
sisfb_search_vesamode(simple_strtoul(this_opt + 5, NULL, 0), false);
} else if(!strnicmp(this_opt, "rate:", 5)) {
sisfb_parm_rate = simple_strtoul(this_opt + 5, NULL, 0);
} else if(!strnicmp(this_opt, "forcecrt1:", 10)) {
sisfb_forcecrt1 = (int)simple_strtoul(this_opt + 10, NULL, 0);
} else if(!strnicmp(this_opt, "mem:",4)) {
sisfb_parm_mem = simple_strtoul(this_opt + 4, NULL, 0);
} else if(!strnicmp(this_opt, "pdc:", 4)) {
sisfb_pdc = simple_strtoul(this_opt + 4, NULL, 0);
} else if(!strnicmp(this_opt, "pdc1:", 5)) {
sisfb_pdca = simple_strtoul(this_opt + 5, NULL, 0);
} else if(!strnicmp(this_opt, "noaccel", 7)) {
sisfb_accel = 0;
} else if(!strnicmp(this_opt, "accel", 5)) {
sisfb_accel = -1;
} else if(!strnicmp(this_opt, "noypan", 6)) {
sisfb_ypan = 0;
} else if(!strnicmp(this_opt, "ypan", 4)) {
sisfb_ypan = -1;
} else if(!strnicmp(this_opt, "nomax", 5)) {
sisfb_max = 0;
} else if(!strnicmp(this_opt, "max", 3)) {
sisfb_max = -1;
} else if(!strnicmp(this_opt, "userom:", 7)) {
sisfb_userom = (int)simple_strtoul(this_opt + 7, NULL, 0);
} else if(!strnicmp(this_opt, "useoem:", 7)) {
sisfb_useoem = (int)simple_strtoul(this_opt + 7, NULL, 0);
} else if(!strnicmp(this_opt, "nocrt2rate", 10)) {
sisfb_nocrt2rate = 1;
} else if(!strnicmp(this_opt, "scalelcd:", 9)) {
unsigned long temp = 2;
temp = simple_strtoul(this_opt + 9, NULL, 0);
if((temp == 0) || (temp == 1)) {
sisfb_scalelcd = temp ^ 1;
}
} else if(!strnicmp(this_opt, "tvxposoffset:", 13)) {
int temp = 0;
temp = (int)simple_strtol(this_opt + 13, NULL, 0);
if((temp >= -32) && (temp <= 32)) {
sisfb_tvxposoffset = temp;
}
} else if(!strnicmp(this_opt, "tvyposoffset:", 13)) {
int temp = 0;
temp = (int)simple_strtol(this_opt + 13, NULL, 0);
if((temp >= -32) && (temp <= 32)) {
sisfb_tvyposoffset = temp;
}
} else if(!strnicmp(this_opt, "specialtiming:", 14)) {
sisfb_search_specialtiming(this_opt + 14);
} else if(!strnicmp(this_opt, "lvdshl:", 7)) {
int temp = 4;
temp = simple_strtoul(this_opt + 7, NULL, 0);
if((temp >= 0) && (temp <= 3)) {
sisfb_lvdshl = temp;
}
} else if(this_opt[0] >= '0' && this_opt[0] <= '9') {
sisfb_search_mode(this_opt, true);
#if !defined(__i386__) && !defined(__x86_64__)
} else if(!strnicmp(this_opt, "resetcard", 9)) {
sisfb_resetcard = 1;
} else if(!strnicmp(this_opt, "videoram:", 9)) {
sisfb_videoram = simple_strtoul(this_opt + 9, NULL, 0);
#endif
} else {
printk(KERN_INFO "sisfb: Invalid option %s\n", this_opt);
}
}
return 0;
}
#endif
static int sisfb_check_rom(void __iomem *rom_base,
struct sis_video_info *ivideo)
{
void __iomem *rom;
int romptr;
if((readb(rom_base) != 0x55) || (readb(rom_base + 1) != 0xaa))
return 0;
romptr = (readb(rom_base + 0x18) | (readb(rom_base + 0x19) << 8));
if(romptr > (0x10000 - 8))
return 0;
rom = rom_base + romptr;
if((readb(rom) != 'P') || (readb(rom + 1) != 'C') ||
(readb(rom + 2) != 'I') || (readb(rom + 3) != 'R'))
return 0;
if((readb(rom + 4) | (readb(rom + 5) << 8)) != ivideo->chip_vendor)
return 0;
if((readb(rom + 6) | (readb(rom + 7) << 8)) != ivideo->chip_id)
return 0;
return 1;
}
static unsigned char *sisfb_find_rom(struct pci_dev *pdev)
{
struct sis_video_info *ivideo = pci_get_drvdata(pdev);
void __iomem *rom_base;
unsigned char *myrombase = NULL;
size_t romsize;
/* First, try the official pci ROM functions (except
* on integrated chipsets which have no ROM).
*/
if(!ivideo->nbridge) {
if((rom_base = pci_map_rom(pdev, &romsize))) {
if(sisfb_check_rom(rom_base, ivideo)) {
if((myrombase = vmalloc(65536))) {
memcpy_fromio(myrombase, rom_base,
(romsize > 65536) ? 65536 : romsize);
}
}
pci_unmap_rom(pdev, rom_base);
}
}
if(myrombase) return myrombase;
/* Otherwise do it the conventional way. */
#if defined(__i386__) || defined(__x86_64__)
{
u32 temp;
for (temp = 0x000c0000; temp < 0x000f0000; temp += 0x00001000) {
rom_base = ioremap(temp, 65536);
if (!rom_base)
continue;
if (!sisfb_check_rom(rom_base, ivideo)) {
iounmap(rom_base);
continue;
}
if ((myrombase = vmalloc(65536)))
memcpy_fromio(myrombase, rom_base, 65536);
iounmap(rom_base);
break;
}
}
#endif
return myrombase;
}
static void sisfb_post_map_vram(struct sis_video_info *ivideo,
unsigned int *mapsize, unsigned int min)
{
if (*mapsize < (min << 20))
return;
ivideo->video_vbase = ioremap(ivideo->video_base, (*mapsize));
if(!ivideo->video_vbase) {
printk(KERN_ERR
"sisfb: Unable to map maximum video RAM for size detection\n");
(*mapsize) >>= 1;
while((!(ivideo->video_vbase = ioremap(ivideo->video_base, (*mapsize))))) {
(*mapsize) >>= 1;
if((*mapsize) < (min << 20))
break;
}
if(ivideo->video_vbase) {
printk(KERN_ERR
"sisfb: Video RAM size detection limited to %dMB\n",
(int)((*mapsize) >> 20));
}
}
}
#ifdef CONFIG_FB_SIS_300
static int sisfb_post_300_buswidth(struct sis_video_info *ivideo)
{
void __iomem *FBAddress = ivideo->video_vbase;
unsigned short temp;
unsigned char reg;
int i, j;
SiS_SetRegAND(SISSR, 0x15, 0xFB);
SiS_SetRegOR(SISSR, 0x15, 0x04);
SiS_SetReg(SISSR, 0x13, 0x00);
SiS_SetReg(SISSR, 0x14, 0xBF);
for(i = 0; i < 2; i++) {
temp = 0x1234;
for(j = 0; j < 4; j++) {
writew(temp, FBAddress);
if(readw(FBAddress) == temp)
break;
SiS_SetRegOR(SISSR, 0x3c, 0x01);
reg = SiS_GetReg(SISSR, 0x05);
reg = SiS_GetReg(SISSR, 0x05);
SiS_SetRegAND(SISSR, 0x3c, 0xfe);
reg = SiS_GetReg(SISSR, 0x05);
reg = SiS_GetReg(SISSR, 0x05);
temp++;
}
}
writel(0x01234567L, FBAddress);
writel(0x456789ABL, (FBAddress + 4));
writel(0x89ABCDEFL, (FBAddress + 8));
writel(0xCDEF0123L, (FBAddress + 12));
reg = SiS_GetReg(SISSR, 0x3b);
if(reg & 0x01) {
if(readl((FBAddress + 12)) == 0xCDEF0123L)
return 4; /* Channel A 128bit */
}
if(readl((FBAddress + 4)) == 0x456789ABL)
return 2; /* Channel B 64bit */
return 1; /* 32bit */
}
static const unsigned short SiS_DRAMType[17][5] = {
{0x0C,0x0A,0x02,0x40,0x39},
{0x0D,0x0A,0x01,0x40,0x48},
{0x0C,0x09,0x02,0x20,0x35},
{0x0D,0x09,0x01,0x20,0x44},
{0x0C,0x08,0x02,0x10,0x31},
{0x0D,0x08,0x01,0x10,0x40},
{0x0C,0x0A,0x01,0x20,0x34},
{0x0C,0x09,0x01,0x08,0x32},
{0x0B,0x08,0x02,0x08,0x21},
{0x0C,0x08,0x01,0x08,0x30},
{0x0A,0x08,0x02,0x04,0x11},
{0x0B,0x0A,0x01,0x10,0x28},
{0x09,0x08,0x02,0x02,0x01},
{0x0B,0x09,0x01,0x08,0x24},
{0x0B,0x08,0x01,0x04,0x20},
{0x0A,0x08,0x01,0x02,0x10},
{0x09,0x08,0x01,0x01,0x00}
};
static int sisfb_post_300_rwtest(struct sis_video_info *ivideo, int iteration,
int buswidth, int PseudoRankCapacity,
int PseudoAdrPinCount, unsigned int mapsize)
{
void __iomem *FBAddr = ivideo->video_vbase;
unsigned short sr14;
unsigned int k, RankCapacity, PageCapacity, BankNumHigh, BankNumMid;
unsigned int PhysicalAdrOtherPage, PhysicalAdrHigh, PhysicalAdrHalfPage;
for(k = 0; k < ARRAY_SIZE(SiS_DRAMType); k++) {
RankCapacity = buswidth * SiS_DRAMType[k][3];
if(RankCapacity != PseudoRankCapacity)
continue;
if((SiS_DRAMType[k][2] + SiS_DRAMType[k][0]) > PseudoAdrPinCount)
continue;
BankNumHigh = RankCapacity * 16 * iteration - 1;
if(iteration == 3) { /* Rank No */
BankNumMid = RankCapacity * 16 - 1;
} else {
BankNumMid = RankCapacity * 16 * iteration / 2 - 1;
}
PageCapacity = (1 << SiS_DRAMType[k][1]) * buswidth * 4;
PhysicalAdrHigh = BankNumHigh;
PhysicalAdrHalfPage = (PageCapacity / 2 + PhysicalAdrHigh) % PageCapacity;
PhysicalAdrOtherPage = PageCapacity * SiS_DRAMType[k][2] + PhysicalAdrHigh;
SiS_SetRegAND(SISSR, 0x15, 0xFB); /* Test */
SiS_SetRegOR(SISSR, 0x15, 0x04); /* Test */
sr14 = (SiS_DRAMType[k][3] * buswidth) - 1;
if(buswidth == 4) sr14 |= 0x80;
else if(buswidth == 2) sr14 |= 0x40;
SiS_SetReg(SISSR, 0x13, SiS_DRAMType[k][4]);
SiS_SetReg(SISSR, 0x14, sr14);
BankNumHigh <<= 16;
BankNumMid <<= 16;
if((BankNumHigh + PhysicalAdrHigh >= mapsize) ||
(BankNumMid + PhysicalAdrHigh >= mapsize) ||
(BankNumHigh + PhysicalAdrHalfPage >= mapsize) ||
(BankNumHigh + PhysicalAdrOtherPage >= mapsize))
continue;
/* Write data */
writew(((unsigned short)PhysicalAdrHigh),
(FBAddr + BankNumHigh + PhysicalAdrHigh));
writew(((unsigned short)BankNumMid),
(FBAddr + BankNumMid + PhysicalAdrHigh));
writew(((unsigned short)PhysicalAdrHalfPage),
(FBAddr + BankNumHigh + PhysicalAdrHalfPage));
writew(((unsigned short)PhysicalAdrOtherPage),
(FBAddr + BankNumHigh + PhysicalAdrOtherPage));
/* Read data */
if(readw(FBAddr + BankNumHigh + PhysicalAdrHigh) == PhysicalAdrHigh)
return 1;
}
return 0;
}
static void sisfb_post_300_ramsize(struct pci_dev *pdev, unsigned int mapsize)
{
struct sis_video_info *ivideo = pci_get_drvdata(pdev);
int i, j, buswidth;
int PseudoRankCapacity, PseudoAdrPinCount;
buswidth = sisfb_post_300_buswidth(ivideo);
for(i = 6; i >= 0; i--) {
PseudoRankCapacity = 1 << i;
for(j = 4; j >= 1; j--) {
PseudoAdrPinCount = 15 - j;
if((PseudoRankCapacity * j) <= 64) {
if(sisfb_post_300_rwtest(ivideo,
j,
buswidth,
PseudoRankCapacity,
PseudoAdrPinCount,
mapsize))
return;
}
}
}
}
static void sisfb_post_sis300(struct pci_dev *pdev)
{
struct sis_video_info *ivideo = pci_get_drvdata(pdev);
unsigned char *bios = ivideo->SiS_Pr.VirtualRomBase;
u8 reg, v1, v2, v3, v4, v5, v6, v7, v8;
u16 index, rindex, memtype = 0;
unsigned int mapsize;
if(!ivideo->SiS_Pr.UseROM)
bios = NULL;
SiS_SetReg(SISSR, 0x05, 0x86);
if(bios) {
if(bios[0x52] & 0x80) {
memtype = bios[0x52];
} else {
memtype = SiS_GetReg(SISSR, 0x3a);
}
memtype &= 0x07;
}
v3 = 0x80; v6 = 0x80;
if(ivideo->revision_id <= 0x13) {
v1 = 0x44; v2 = 0x42;
v4 = 0x44; v5 = 0x42;
} else {
v1 = 0x68; v2 = 0x43; /* Assume 125Mhz MCLK */
v4 = 0x68; v5 = 0x43; /* Assume 125Mhz ECLK */
if(bios) {
index = memtype * 5;
rindex = index + 0x54;
v1 = bios[rindex++];
v2 = bios[rindex++];
v3 = bios[rindex++];
rindex = index + 0x7c;
v4 = bios[rindex++];
v5 = bios[rindex++];
v6 = bios[rindex++];
}
}
SiS_SetReg(SISSR, 0x28, v1);
SiS_SetReg(SISSR, 0x29, v2);
SiS_SetReg(SISSR, 0x2a, v3);
SiS_SetReg(SISSR, 0x2e, v4);
SiS_SetReg(SISSR, 0x2f, v5);
SiS_SetReg(SISSR, 0x30, v6);
v1 = 0x10;
if(bios)
v1 = bios[0xa4];
SiS_SetReg(SISSR, 0x07, v1); /* DAC speed */
SiS_SetReg(SISSR, 0x11, 0x0f); /* DDC, power save */
v1 = 0x01; v2 = 0x43; v3 = 0x1e; v4 = 0x2a;
v5 = 0x06; v6 = 0x00; v7 = 0x00; v8 = 0x00;
if(bios) {
memtype += 0xa5;
v1 = bios[memtype];
v2 = bios[memtype + 8];
v3 = bios[memtype + 16];
v4 = bios[memtype + 24];
v5 = bios[memtype + 32];
v6 = bios[memtype + 40];
v7 = bios[memtype + 48];
v8 = bios[memtype + 56];
}
if(ivideo->revision_id >= 0x80)
v3 &= 0xfd;
SiS_SetReg(SISSR, 0x15, v1); /* Ram type (assuming 0, BIOS 0xa5 step 8) */
SiS_SetReg(SISSR, 0x16, v2);
SiS_SetReg(SISSR, 0x17, v3);
SiS_SetReg(SISSR, 0x18, v4);
SiS_SetReg(SISSR, 0x19, v5);
SiS_SetReg(SISSR, 0x1a, v6);
SiS_SetReg(SISSR, 0x1b, v7);
SiS_SetReg(SISSR, 0x1c, v8); /* ---- */
SiS_SetRegAND(SISSR, 0x15, 0xfb);
SiS_SetRegOR(SISSR, 0x15, 0x04);
if(bios) {
if(bios[0x53] & 0x02) {
SiS_SetRegOR(SISSR, 0x19, 0x20);
}
}
v1 = 0x04; /* DAC pedestal (BIOS 0xe5) */
if(ivideo->revision_id >= 0x80)
v1 |= 0x01;
SiS_SetReg(SISSR, 0x1f, v1);
SiS_SetReg(SISSR, 0x20, 0xa4); /* linear & relocated io & disable a0000 */
v1 = 0xf6; v2 = 0x0d; v3 = 0x00;
if(bios) {
v1 = bios[0xe8];
v2 = bios[0xe9];
v3 = bios[0xea];
}
SiS_SetReg(SISSR, 0x23, v1);
SiS_SetReg(SISSR, 0x24, v2);
SiS_SetReg(SISSR, 0x25, v3);
SiS_SetReg(SISSR, 0x21, 0x84);
SiS_SetReg(SISSR, 0x22, 0x00);
SiS_SetReg(SISCR, 0x37, 0x00);
SiS_SetRegOR(SISPART1, 0x24, 0x01); /* unlock crt2 */
SiS_SetReg(SISPART1, 0x00, 0x00);
v1 = 0x40; v2 = 0x11;
if(bios) {
v1 = bios[0xec];
v2 = bios[0xeb];
}
SiS_SetReg(SISPART1, 0x02, v1);
if(ivideo->revision_id >= 0x80)
v2 &= ~0x01;
reg = SiS_GetReg(SISPART4, 0x00);
if((reg == 1) || (reg == 2)) {
SiS_SetReg(SISCR, 0x37, 0x02);
SiS_SetReg(SISPART2, 0x00, 0x1c);
v4 = 0x00; v5 = 0x00; v6 = 0x10;
if(ivideo->SiS_Pr.UseROM) {
v4 = bios[0xf5];
v5 = bios[0xf6];
v6 = bios[0xf7];
}
SiS_SetReg(SISPART4, 0x0d, v4);
SiS_SetReg(SISPART4, 0x0e, v5);
SiS_SetReg(SISPART4, 0x10, v6);
SiS_SetReg(SISPART4, 0x0f, 0x3f);
reg = SiS_GetReg(SISPART4, 0x01);
if(reg >= 0xb0) {
reg = SiS_GetReg(SISPART4, 0x23);
reg &= 0x20;
reg <<= 1;
SiS_SetReg(SISPART4, 0x23, reg);
}
} else {
v2 &= ~0x10;
}
SiS_SetReg(SISSR, 0x32, v2);
SiS_SetRegAND(SISPART1, 0x24, 0xfe); /* Lock CRT2 */
reg = SiS_GetReg(SISSR, 0x16);
reg &= 0xc3;
SiS_SetReg(SISCR, 0x35, reg);
SiS_SetReg(SISCR, 0x83, 0x00);
#if !defined(__i386__) && !defined(__x86_64__)
if(sisfb_videoram) {
SiS_SetReg(SISSR, 0x13, 0x28); /* ? */
reg = ((sisfb_videoram >> 10) - 1) | 0x40;
SiS_SetReg(SISSR, 0x14, reg);
} else {
#endif
/* Need to map max FB size for finding out about RAM size */
mapsize = ivideo->video_size;
sisfb_post_map_vram(ivideo, &mapsize, 4);
if(ivideo->video_vbase) {
sisfb_post_300_ramsize(pdev, mapsize);
iounmap(ivideo->video_vbase);
} else {
printk(KERN_DEBUG
"sisfb: Failed to map memory for size detection, assuming 8MB\n");
SiS_SetReg(SISSR, 0x13, 0x28); /* ? */
SiS_SetReg(SISSR, 0x14, 0x47); /* 8MB, 64bit default */
}
#if !defined(__i386__) && !defined(__x86_64__)
}
#endif
if(bios) {
v1 = bios[0xe6];
v2 = bios[0xe7];
} else {
reg = SiS_GetReg(SISSR, 0x3a);
if((reg & 0x30) == 0x30) {
v1 = 0x04; /* PCI */
v2 = 0x92;
} else {
v1 = 0x14; /* AGP */
v2 = 0xb2;
}
}
SiS_SetReg(SISSR, 0x21, v1);
SiS_SetReg(SISSR, 0x22, v2);
/* Sense CRT1 */
sisfb_sense_crt1(ivideo);
/* Set default mode, don't clear screen */
ivideo->SiS_Pr.SiS_UseOEM = false;
SiS_SetEnableDstn(&ivideo->SiS_Pr, false);
SiS_SetEnableFstn(&ivideo->SiS_Pr, false);
ivideo->curFSTN = ivideo->curDSTN = 0;
ivideo->SiS_Pr.VideoMemorySize = 8 << 20;
SiSSetMode(&ivideo->SiS_Pr, 0x2e | 0x80);
SiS_SetReg(SISSR, 0x05, 0x86);
/* Display off */
SiS_SetRegOR(SISSR, 0x01, 0x20);
/* Save mode number in CR34 */
SiS_SetReg(SISCR, 0x34, 0x2e);
/* Let everyone know what the current mode is */
ivideo->modeprechange = 0x2e;
}
#endif
#ifdef CONFIG_FB_SIS_315
#if 0
static void sisfb_post_sis315330(struct pci_dev *pdev)
{
/* TODO */
}
#endif
static inline int sisfb_xgi_is21(struct sis_video_info *ivideo)
{
return ivideo->chip_real_id == XGI_21;
}
static void sisfb_post_xgi_delay(struct sis_video_info *ivideo, int delay)
{
unsigned int i;
u8 reg;
for(i = 0; i <= (delay * 10 * 36); i++) {
reg = SiS_GetReg(SISSR, 0x05);
reg++;
}
}
static int sisfb_find_host_bridge(struct sis_video_info *ivideo,
struct pci_dev *mypdev,
unsigned short pcivendor)
{
struct pci_dev *pdev = NULL;
unsigned short temp;
int ret = 0;
while((pdev = pci_get_class(PCI_CLASS_BRIDGE_HOST, pdev))) {
temp = pdev->vendor;
if(temp == pcivendor) {
ret = 1;
pci_dev_put(pdev);
break;
}
}
return ret;
}
static int sisfb_post_xgi_rwtest(struct sis_video_info *ivideo, int starta,
unsigned int enda, unsigned int mapsize)
{
unsigned int pos;
int i;
writel(0, ivideo->video_vbase);
for(i = starta; i <= enda; i++) {
pos = 1 << i;
if(pos < mapsize)
writel(pos, ivideo->video_vbase + pos);
}
sisfb_post_xgi_delay(ivideo, 150);
if(readl(ivideo->video_vbase) != 0)
return 0;
for(i = starta; i <= enda; i++) {
pos = 1 << i;
if(pos < mapsize) {
if(readl(ivideo->video_vbase + pos) != pos)
return 0;
} else
return 0;
}
return 1;
}
static int sisfb_post_xgi_ramsize(struct sis_video_info *ivideo)
{
unsigned int buswidth, ranksize, channelab, mapsize;
int i, j, k, l, status;
u8 reg, sr14;
static const u8 dramsr13[12 * 5] = {
0x02, 0x0e, 0x0b, 0x80, 0x5d,
0x02, 0x0e, 0x0a, 0x40, 0x59,
0x02, 0x0d, 0x0b, 0x40, 0x4d,
0x02, 0x0e, 0x09, 0x20, 0x55,
0x02, 0x0d, 0x0a, 0x20, 0x49,
0x02, 0x0c, 0x0b, 0x20, 0x3d,
0x02, 0x0e, 0x08, 0x10, 0x51,
0x02, 0x0d, 0x09, 0x10, 0x45,
0x02, 0x0c, 0x0a, 0x10, 0x39,
0x02, 0x0d, 0x08, 0x08, 0x41,
0x02, 0x0c, 0x09, 0x08, 0x35,
0x02, 0x0c, 0x08, 0x04, 0x31
};
static const u8 dramsr13_4[4 * 5] = {
0x02, 0x0d, 0x09, 0x40, 0x45,
0x02, 0x0c, 0x09, 0x20, 0x35,
0x02, 0x0c, 0x08, 0x10, 0x31,
0x02, 0x0b, 0x08, 0x08, 0x21
};
/* Enable linear mode, disable 0xa0000 address decoding */
/* We disable a0000 address decoding, because
* - if running on x86, if the card is disabled, it means
* that another card is in the system. We don't want
* to interphere with that primary card's textmode.
* - if running on non-x86, there usually is no VGA window
* at a0000.
*/
SiS_SetRegOR(SISSR, 0x20, (0x80 | 0x04));
/* Need to map max FB size for finding out about RAM size */
mapsize = ivideo->video_size;
sisfb_post_map_vram(ivideo, &mapsize, 32);
if(!ivideo->video_vbase) {
printk(KERN_ERR "sisfb: Unable to detect RAM size. Setting default.\n");
SiS_SetReg(SISSR, 0x13, 0x35);
SiS_SetReg(SISSR, 0x14, 0x41);
/* TODO */
return -ENOMEM;
}
/* Non-interleaving */
SiS_SetReg(SISSR, 0x15, 0x00);
/* No tiling */
SiS_SetReg(SISSR, 0x1c, 0x00);
if(ivideo->chip == XGI_20) {
channelab = 1;
reg = SiS_GetReg(SISCR, 0x97);
if(!(reg & 0x01)) { /* Single 32/16 */
buswidth = 32;
SiS_SetReg(SISSR, 0x13, 0xb1);
SiS_SetReg(SISSR, 0x14, 0x52);
sisfb_post_xgi_delay(ivideo, 1);
sr14 = 0x02;
if(sisfb_post_xgi_rwtest(ivideo, 23, 24, mapsize))
goto bail_out;
SiS_SetReg(SISSR, 0x13, 0x31);
SiS_SetReg(SISSR, 0x14, 0x42);
sisfb_post_xgi_delay(ivideo, 1);
if(sisfb_post_xgi_rwtest(ivideo, 23, 23, mapsize))
goto bail_out;
buswidth = 16;
SiS_SetReg(SISSR, 0x13, 0xb1);
SiS_SetReg(SISSR, 0x14, 0x41);
sisfb_post_xgi_delay(ivideo, 1);
sr14 = 0x01;
if(sisfb_post_xgi_rwtest(ivideo, 22, 23, mapsize))
goto bail_out;
else
SiS_SetReg(SISSR, 0x13, 0x31);
} else { /* Dual 16/8 */
buswidth = 16;
SiS_SetReg(SISSR, 0x13, 0xb1);
SiS_SetReg(SISSR, 0x14, 0x41);
sisfb_post_xgi_delay(ivideo, 1);
sr14 = 0x01;
if(sisfb_post_xgi_rwtest(ivideo, 22, 23, mapsize))
goto bail_out;
SiS_SetReg(SISSR, 0x13, 0x31);
SiS_SetReg(SISSR, 0x14, 0x31);
sisfb_post_xgi_delay(ivideo, 1);
if(sisfb_post_xgi_rwtest(ivideo, 22, 22, mapsize))
goto bail_out;
buswidth = 8;
SiS_SetReg(SISSR, 0x13, 0xb1);
SiS_SetReg(SISSR, 0x14, 0x30);
sisfb_post_xgi_delay(ivideo, 1);
sr14 = 0x00;
if(sisfb_post_xgi_rwtest(ivideo, 21, 22, mapsize))
goto bail_out;
else
SiS_SetReg(SISSR, 0x13, 0x31);
}
} else { /* XGI_40 */
reg = SiS_GetReg(SISCR, 0x97);
if(!(reg & 0x10)) {
reg = SiS_GetReg(SISSR, 0x39);
reg >>= 1;
}
if(reg & 0x01) { /* DDRII */
buswidth = 32;
if(ivideo->revision_id == 2) {
channelab = 2;
SiS_SetReg(SISSR, 0x13, 0xa1);
SiS_SetReg(SISSR, 0x14, 0x44);
sr14 = 0x04;
sisfb_post_xgi_delay(ivideo, 1);
if(sisfb_post_xgi_rwtest(ivideo, 23, 24, mapsize))
goto bail_out;
SiS_SetReg(SISSR, 0x13, 0x21);
SiS_SetReg(SISSR, 0x14, 0x34);
if(sisfb_post_xgi_rwtest(ivideo, 22, 23, mapsize))
goto bail_out;
channelab = 1;
SiS_SetReg(SISSR, 0x13, 0xa1);
SiS_SetReg(SISSR, 0x14, 0x40);
sr14 = 0x00;
if(sisfb_post_xgi_rwtest(ivideo, 22, 23, mapsize))
goto bail_out;
SiS_SetReg(SISSR, 0x13, 0x21);
SiS_SetReg(SISSR, 0x14, 0x30);
} else {
channelab = 3;
SiS_SetReg(SISSR, 0x13, 0xa1);
SiS_SetReg(SISSR, 0x14, 0x4c);
sr14 = 0x0c;
sisfb_post_xgi_delay(ivideo, 1);
if(sisfb_post_xgi_rwtest(ivideo, 23, 25, mapsize))
goto bail_out;
channelab = 2;
SiS_SetReg(SISSR, 0x14, 0x48);
sisfb_post_xgi_delay(ivideo, 1);
sr14 = 0x08;
if(sisfb_post_xgi_rwtest(ivideo, 23, 24, mapsize))
goto bail_out;
SiS_SetReg(SISSR, 0x13, 0x21);
SiS_SetReg(SISSR, 0x14, 0x3c);
sr14 = 0x0c;
if(sisfb_post_xgi_rwtest(ivideo, 23, 24, mapsize)) {
channelab = 3;
} else {
channelab = 2;
SiS_SetReg(SISSR, 0x14, 0x38);
sr14 = 0x08;
}
}
sisfb_post_xgi_delay(ivideo, 1);
} else { /* DDR */
buswidth = 64;
if(ivideo->revision_id == 2) {
channelab = 1;
SiS_SetReg(SISSR, 0x13, 0xa1);
SiS_SetReg(SISSR, 0x14, 0x52);
sisfb_post_xgi_delay(ivideo, 1);
sr14 = 0x02;
if(sisfb_post_xgi_rwtest(ivideo, 23, 24, mapsize))
goto bail_out;
SiS_SetReg(SISSR, 0x13, 0x21);
SiS_SetReg(SISSR, 0x14, 0x42);
} else {
channelab = 2;
SiS_SetReg(SISSR, 0x13, 0xa1);
SiS_SetReg(SISSR, 0x14, 0x5a);
sisfb_post_xgi_delay(ivideo, 1);
sr14 = 0x0a;
if(sisfb_post_xgi_rwtest(ivideo, 24, 25, mapsize))
goto bail_out;
SiS_SetReg(SISSR, 0x13, 0x21);
SiS_SetReg(SISSR, 0x14, 0x4a);
}
sisfb_post_xgi_delay(ivideo, 1);
}
}
bail_out:
SiS_SetRegANDOR(SISSR, 0x14, 0xf0, sr14);
sisfb_post_xgi_delay(ivideo, 1);
j = (ivideo->chip == XGI_20) ? 5 : 9;
k = (ivideo->chip == XGI_20) ? 12 : 4;
status = -EIO;
for(i = 0; i < k; i++) {
reg = (ivideo->chip == XGI_20) ?
dramsr13[(i * 5) + 4] : dramsr13_4[(i * 5) + 4];
SiS_SetRegANDOR(SISSR, 0x13, 0x80, reg);
sisfb_post_xgi_delay(ivideo, 50);
ranksize = (ivideo->chip == XGI_20) ?
dramsr13[(i * 5) + 3] : dramsr13_4[(i * 5) + 3];
reg = SiS_GetReg(SISSR, 0x13);
if(reg & 0x80) ranksize <<= 1;
if(ivideo->chip == XGI_20) {
if(buswidth == 16) ranksize <<= 1;
else if(buswidth == 32) ranksize <<= 2;
} else {
if(buswidth == 64) ranksize <<= 1;
}
reg = 0;
l = channelab;
if(l == 3) l = 4;
if((ranksize * l) <= 256) {
while((ranksize >>= 1)) reg += 0x10;
}
if(!reg) continue;
SiS_SetRegANDOR(SISSR, 0x14, 0x0f, (reg & 0xf0));
sisfb_post_xgi_delay(ivideo, 1);
if (sisfb_post_xgi_rwtest(ivideo, j, ((reg >> 4) + channelab - 2 + 20), mapsize)) {
status = 0;
break;
}
}
iounmap(ivideo->video_vbase);
return status;
}
static void sisfb_post_xgi_setclocks(struct sis_video_info *ivideo, u8 regb)
{
u8 v1, v2, v3;
int index;
static const u8 cs90[8 * 3] = {
0x16, 0x01, 0x01,
0x3e, 0x03, 0x01,
0x7c, 0x08, 0x01,
0x79, 0x06, 0x01,
0x29, 0x01, 0x81,
0x5c, 0x23, 0x01,
0x5c, 0x23, 0x01,
0x5c, 0x23, 0x01
};
static const u8 csb8[8 * 3] = {
0x5c, 0x23, 0x01,
0x29, 0x01, 0x01,
0x7c, 0x08, 0x01,
0x79, 0x06, 0x01,
0x29, 0x01, 0x81,
0x5c, 0x23, 0x01,
0x5c, 0x23, 0x01,
0x5c, 0x23, 0x01
};
regb = 0; /* ! */
index = regb * 3;
v1 = cs90[index]; v2 = cs90[index + 1]; v3 = cs90[index + 2];
if(ivideo->haveXGIROM) {
v1 = ivideo->bios_abase[0x90 + index];
v2 = ivideo->bios_abase[0x90 + index + 1];
v3 = ivideo->bios_abase[0x90 + index + 2];
}
SiS_SetReg(SISSR, 0x28, v1);
SiS_SetReg(SISSR, 0x29, v2);
SiS_SetReg(SISSR, 0x2a, v3);
sisfb_post_xgi_delay(ivideo, 0x43);
sisfb_post_xgi_delay(ivideo, 0x43);
sisfb_post_xgi_delay(ivideo, 0x43);
index = regb * 3;
v1 = csb8[index]; v2 = csb8[index + 1]; v3 = csb8[index + 2];
if(ivideo->haveXGIROM) {
v1 = ivideo->bios_abase[0xb8 + index];
v2 = ivideo->bios_abase[0xb8 + index + 1];
v3 = ivideo->bios_abase[0xb8 + index + 2];
}
SiS_SetReg(SISSR, 0x2e, v1);
SiS_SetReg(SISSR, 0x2f, v2);
SiS_SetReg(SISSR, 0x30, v3);
sisfb_post_xgi_delay(ivideo, 0x43);
sisfb_post_xgi_delay(ivideo, 0x43);
sisfb_post_xgi_delay(ivideo, 0x43);
}
static void sisfb_post_xgi_ddr2_mrs_default(struct sis_video_info *ivideo,
u8 regb)
{
unsigned char *bios = ivideo->bios_abase;
u8 v1;
SiS_SetReg(SISSR, 0x28, 0x64);
SiS_SetReg(SISSR, 0x29, 0x63);
sisfb_post_xgi_delay(ivideo, 15);
SiS_SetReg(SISSR, 0x18, 0x00);
SiS_SetReg(SISSR, 0x19, 0x20);
SiS_SetReg(SISSR, 0x16, 0x00);
SiS_SetReg(SISSR, 0x16, 0x80);
SiS_SetReg(SISSR, 0x18, 0xc5);
SiS_SetReg(SISSR, 0x19, 0x23);
SiS_SetReg(SISSR, 0x16, 0x00);
SiS_SetReg(SISSR, 0x16, 0x80);
sisfb_post_xgi_delay(ivideo, 1);
SiS_SetReg(SISCR, 0x97, 0x11);
sisfb_post_xgi_setclocks(ivideo, regb);
sisfb_post_xgi_delay(ivideo, 0x46);
SiS_SetReg(SISSR, 0x18, 0xc5);
SiS_SetReg(SISSR, 0x19, 0x23);
SiS_SetReg(SISSR, 0x16, 0x00);
SiS_SetReg(SISSR, 0x16, 0x80);
sisfb_post_xgi_delay(ivideo, 1);
SiS_SetReg(SISSR, 0x1b, 0x04);
sisfb_post_xgi_delay(ivideo, 1);
SiS_SetReg(SISSR, 0x1b, 0x00);
sisfb_post_xgi_delay(ivideo, 1);
v1 = 0x31;
if (ivideo->haveXGIROM) {
v1 = bios[0xf0];
}
SiS_SetReg(SISSR, 0x18, v1);
SiS_SetReg(SISSR, 0x19, 0x06);
SiS_SetReg(SISSR, 0x16, 0x04);
SiS_SetReg(SISSR, 0x16, 0x84);
sisfb_post_xgi_delay(ivideo, 1);
}
static void sisfb_post_xgi_ddr2_mrs_xg21(struct sis_video_info *ivideo)
{
sisfb_post_xgi_setclocks(ivideo, 1);
SiS_SetReg(SISCR, 0x97, 0x11);
sisfb_post_xgi_delay(ivideo, 0x46);
SiS_SetReg(SISSR, 0x18, 0x00); /* EMRS2 */
SiS_SetReg(SISSR, 0x19, 0x80);
SiS_SetReg(SISSR, 0x16, 0x05);
SiS_SetReg(SISSR, 0x16, 0x85);
SiS_SetReg(SISSR, 0x18, 0x00); /* EMRS3 */
SiS_SetReg(SISSR, 0x19, 0xc0);
SiS_SetReg(SISSR, 0x16, 0x05);
SiS_SetReg(SISSR, 0x16, 0x85);
SiS_SetReg(SISSR, 0x18, 0x00); /* EMRS1 */
SiS_SetReg(SISSR, 0x19, 0x40);
SiS_SetReg(SISSR, 0x16, 0x05);
SiS_SetReg(SISSR, 0x16, 0x85);
SiS_SetReg(SISSR, 0x18, 0x42); /* MRS1 */
SiS_SetReg(SISSR, 0x19, 0x02);
SiS_SetReg(SISSR, 0x16, 0x05);
SiS_SetReg(SISSR, 0x16, 0x85);
sisfb_post_xgi_delay(ivideo, 1);
SiS_SetReg(SISSR, 0x1b, 0x04);
sisfb_post_xgi_delay(ivideo, 1);
SiS_SetReg(SISSR, 0x1b, 0x00);
sisfb_post_xgi_delay(ivideo, 1);
SiS_SetReg(SISSR, 0x18, 0x42); /* MRS1 */
SiS_SetReg(SISSR, 0x19, 0x00);
SiS_SetReg(SISSR, 0x16, 0x05);
SiS_SetReg(SISSR, 0x16, 0x85);
sisfb_post_xgi_delay(ivideo, 1);
}
static void sisfb_post_xgi_ddr2(struct sis_video_info *ivideo, u8 regb)
{
unsigned char *bios = ivideo->bios_abase;
static const u8 cs158[8] = {
0x88, 0xaa, 0x48, 0x00, 0x00, 0x00, 0x00, 0x00
};
static const u8 cs160[8] = {
0x44, 0x77, 0x77, 0x00, 0x00, 0x00, 0x00, 0x00
};
static const u8 cs168[8] = {
0x48, 0x78, 0x88, 0x00, 0x00, 0x00, 0x00, 0x00
};
u8 reg;
u8 v1;
u8 v2;
u8 v3;
SiS_SetReg(SISCR, 0xb0, 0x80); /* DDR2 dual frequency mode */
SiS_SetReg(SISCR, 0x82, 0x77);
SiS_SetReg(SISCR, 0x86, 0x00);
reg = SiS_GetReg(SISCR, 0x86);
SiS_SetReg(SISCR, 0x86, 0x88);
reg = SiS_GetReg(SISCR, 0x86);
v1 = cs168[regb]; v2 = cs160[regb]; v3 = cs158[regb];
if (ivideo->haveXGIROM) {
v1 = bios[regb + 0x168];
v2 = bios[regb + 0x160];
v3 = bios[regb + 0x158];
}
SiS_SetReg(SISCR, 0x86, v1);
SiS_SetReg(SISCR, 0x82, 0x77);
SiS_SetReg(SISCR, 0x85, 0x00);
reg = SiS_GetReg(SISCR, 0x85);
SiS_SetReg(SISCR, 0x85, 0x88);
reg = SiS_GetReg(SISCR, 0x85);
SiS_SetReg(SISCR, 0x85, v2);
SiS_SetReg(SISCR, 0x82, v3);
SiS_SetReg(SISCR, 0x98, 0x01);
SiS_SetReg(SISCR, 0x9a, 0x02);
if (sisfb_xgi_is21(ivideo))
sisfb_post_xgi_ddr2_mrs_xg21(ivideo);
else
sisfb_post_xgi_ddr2_mrs_default(ivideo, regb);
}
static u8 sisfb_post_xgi_ramtype(struct sis_video_info *ivideo)
{
unsigned char *bios = ivideo->bios_abase;
u8 ramtype;
u8 reg;
u8 v1;
ramtype = 0x00; v1 = 0x10;
if (ivideo->haveXGIROM) {
ramtype = bios[0x62];
v1 = bios[0x1d2];
}
if (!(ramtype & 0x80)) {
if (sisfb_xgi_is21(ivideo)) {
SiS_SetRegAND(SISCR, 0xb4, 0xfd); /* GPIO control */
SiS_SetRegOR(SISCR, 0x4a, 0x80); /* GPIOH EN */
reg = SiS_GetReg(SISCR, 0x48);
SiS_SetRegOR(SISCR, 0xb4, 0x02);
ramtype = reg & 0x01; /* GPIOH */
} else if (ivideo->chip == XGI_20) {
SiS_SetReg(SISCR, 0x97, v1);
reg = SiS_GetReg(SISCR, 0x97);
if (reg & 0x10) {
ramtype = (reg & 0x01) << 1;
}
} else {
reg = SiS_GetReg(SISSR, 0x39);
ramtype = reg & 0x02;
if (!(ramtype)) {
reg = SiS_GetReg(SISSR, 0x3a);
ramtype = (reg >> 1) & 0x01;
}
}
}
ramtype &= 0x07;
return ramtype;
}
static int sisfb_post_xgi(struct pci_dev *pdev)
{
struct sis_video_info *ivideo = pci_get_drvdata(pdev);
unsigned char *bios = ivideo->bios_abase;
struct pci_dev *mypdev = NULL;
const u8 *ptr, *ptr2;
u8 v1, v2, v3, v4, v5, reg, ramtype;
u32 rega, regb, regd;
int i, j, k, index;
static const u8 cs78[3] = { 0xf6, 0x0d, 0x00 };
static const u8 cs76[2] = { 0xa3, 0xfb };
static const u8 cs7b[3] = { 0xc0, 0x11, 0x00 };
static const u8 cs158[8] = {
0x88, 0xaa, 0x48, 0x00, 0x00, 0x00, 0x00, 0x00
};
static const u8 cs160[8] = {
0x44, 0x77, 0x77, 0x00, 0x00, 0x00, 0x00, 0x00
};
static const u8 cs168[8] = {
0x48, 0x78, 0x88, 0x00, 0x00, 0x00, 0x00, 0x00
};
static const u8 cs128[3 * 8] = {
0x90, 0x28, 0x24, 0x00, 0x00, 0x00, 0x00, 0x00,
0x77, 0x44, 0x44, 0x00, 0x00, 0x00, 0x00, 0x00,
0x77, 0x44, 0x44, 0x00, 0x00, 0x00, 0x00, 0x00
};
static const u8 cs148[2 * 8] = {
0x55, 0x55, 0x55, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
};
static const u8 cs31a[8 * 4] = {
0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
0xaa, 0xaa, 0xaa, 0xaa, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
};
static const u8 cs33a[8 * 4] = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
};
static const u8 cs45a[8 * 2] = {
0x00, 0x00, 0xa0, 0x00, 0xa0, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
};
static const u8 cs170[7 * 8] = {
0x54, 0x32, 0x44, 0x00, 0x00, 0x00, 0x00, 0x00,
0x54, 0x43, 0x44, 0x00, 0x00, 0x00, 0x00, 0x00,
0x0a, 0x05, 0x07, 0x00, 0x00, 0x00, 0x00, 0x00,
0x44, 0x34, 0x44, 0x00, 0x00, 0x00, 0x00, 0x00,
0x10, 0x0a, 0x0a, 0x00, 0x00, 0x00, 0x00, 0x00,
0x11, 0x0c, 0x0c, 0x00, 0x00, 0x00, 0x00, 0x00,
0x05, 0x05, 0x05, 0x00, 0x00, 0x00, 0x00, 0x00
};
static const u8 cs1a8[3 * 8] = {
0xf0, 0xf0, 0xf0, 0x00, 0x00, 0x00, 0x00, 0x00,
0x05, 0x02, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
};
static const u8 cs100[2 * 8] = {
0xc4, 0x04, 0x84, 0x00, 0x00, 0x00, 0x00, 0x00,
0xc4, 0x04, 0x84, 0x00, 0x00, 0x00, 0x00, 0x00
};
/* VGA enable */
reg = SiS_GetRegByte(SISVGAENABLE) | 0x01;
SiS_SetRegByte(SISVGAENABLE, reg);
/* Misc */
reg = SiS_GetRegByte(SISMISCR) | 0x01;
SiS_SetRegByte(SISMISCW, reg);
/* Unlock SR */
SiS_SetReg(SISSR, 0x05, 0x86);
reg = SiS_GetReg(SISSR, 0x05);
if(reg != 0xa1)
return 0;
/* Clear some regs */
for(i = 0; i < 0x22; i++) {
if(0x06 + i == 0x20) continue;
SiS_SetReg(SISSR, 0x06 + i, 0x00);
}
for(i = 0; i < 0x0b; i++) {
SiS_SetReg(SISSR, 0x31 + i, 0x00);
}
for(i = 0; i < 0x10; i++) {
SiS_SetReg(SISCR, 0x30 + i, 0x00);
}
ptr = cs78;
if(ivideo->haveXGIROM) {
ptr = (const u8 *)&bios[0x78];
}
for(i = 0; i < 3; i++) {
SiS_SetReg(SISSR, 0x23 + i, ptr[i]);
}
ptr = cs76;
if(ivideo->haveXGIROM) {
ptr = (const u8 *)&bios[0x76];
}
for(i = 0; i < 2; i++) {
SiS_SetReg(SISSR, 0x21 + i, ptr[i]);
}
v1 = 0x18; v2 = 0x00;
if(ivideo->haveXGIROM) {
v1 = bios[0x74];
v2 = bios[0x75];
}
SiS_SetReg(SISSR, 0x07, v1);
SiS_SetReg(SISSR, 0x11, 0x0f);
SiS_SetReg(SISSR, 0x1f, v2);
/* PCI linear mode, RelIO enabled, A0000 decoding disabled */
SiS_SetReg(SISSR, 0x20, 0x80 | 0x20 | 0x04);
SiS_SetReg(SISSR, 0x27, 0x74);
ptr = cs7b;
if(ivideo->haveXGIROM) {
ptr = (const u8 *)&bios[0x7b];
}
for(i = 0; i < 3; i++) {
SiS_SetReg(SISSR, 0x31 + i, ptr[i]);
}
if(ivideo->chip == XGI_40) {
if(ivideo->revision_id == 2) {
SiS_SetRegANDOR(SISSR, 0x3b, 0x3f, 0xc0);
}
SiS_SetReg(SISCR, 0x7d, 0xfe);
SiS_SetReg(SISCR, 0x7e, 0x0f);
}
if(ivideo->revision_id == 0) { /* 40 *and* 20? */
SiS_SetRegAND(SISCR, 0x58, 0xd7);
reg = SiS_GetReg(SISCR, 0xcb);
if(reg & 0x20) {
SiS_SetRegANDOR(SISCR, 0x58, 0xd7, (reg & 0x10) ? 0x08 : 0x20); /* =0x28 Z7 ? */
}
}
reg = (ivideo->chip == XGI_40) ? 0x20 : 0x00;
SiS_SetRegANDOR(SISCR, 0x38, 0x1f, reg);
if(ivideo->chip == XGI_20) {
SiS_SetReg(SISSR, 0x36, 0x70);
} else {
SiS_SetReg(SISVID, 0x00, 0x86);
SiS_SetReg(SISVID, 0x32, 0x00);
SiS_SetReg(SISVID, 0x30, 0x00);
SiS_SetReg(SISVID, 0x32, 0x01);
SiS_SetReg(SISVID, 0x30, 0x00);
SiS_SetRegAND(SISVID, 0x2f, 0xdf);
SiS_SetRegAND(SISCAP, 0x00, 0x3f);
SiS_SetReg(SISPART1, 0x2f, 0x01);
SiS_SetReg(SISPART1, 0x00, 0x00);
SiS_SetReg(SISPART1, 0x02, bios[0x7e]);
SiS_SetReg(SISPART1, 0x2e, 0x08);
SiS_SetRegAND(SISPART1, 0x35, 0x7f);
SiS_SetRegAND(SISPART1, 0x50, 0xfe);
reg = SiS_GetReg(SISPART4, 0x00);
if(reg == 1 || reg == 2) {
SiS_SetReg(SISPART2, 0x00, 0x1c);
SiS_SetReg(SISPART4, 0x0d, bios[0x7f]);
SiS_SetReg(SISPART4, 0x0e, bios[0x80]);
SiS_SetReg(SISPART4, 0x10, bios[0x81]);
SiS_SetRegAND(SISPART4, 0x0f, 0x3f);
reg = SiS_GetReg(SISPART4, 0x01);
if((reg & 0xf0) >= 0xb0) {
reg = SiS_GetReg(SISPART4, 0x23);
if(reg & 0x20) reg |= 0x40;
SiS_SetReg(SISPART4, 0x23, reg);
reg = (reg & 0x20) ? 0x02 : 0x00;
SiS_SetRegANDOR(SISPART1, 0x1e, 0xfd, reg);
}
}
v1 = bios[0x77];
reg = SiS_GetReg(SISSR, 0x3b);
if(reg & 0x02) {
reg = SiS_GetReg(SISSR, 0x3a);
v2 = (reg & 0x30) >> 3;
if(!(v2 & 0x04)) v2 ^= 0x02;
reg = SiS_GetReg(SISSR, 0x39);
if(reg & 0x80) v2 |= 0x80;
v2 |= 0x01;
if((mypdev = pci_get_device(PCI_VENDOR_ID_SI, 0x0730, NULL))) {
pci_dev_put(mypdev);
if(((v2 & 0x06) == 2) || ((v2 & 0x06) == 4))
v2 &= 0xf9;
v2 |= 0x08;
v1 &= 0xfe;
} else {
mypdev = pci_get_device(PCI_VENDOR_ID_SI, 0x0735, NULL);
if(!mypdev)
mypdev = pci_get_device(PCI_VENDOR_ID_SI, 0x0645, NULL);
if(!mypdev)
mypdev = pci_get_device(PCI_VENDOR_ID_SI, 0x0650, NULL);
if(mypdev) {
pci_read_config_dword(mypdev, 0x94, ®d);
regd &= 0xfffffeff;
pci_write_config_dword(mypdev, 0x94, regd);
v1 &= 0xfe;
pci_dev_put(mypdev);
} else if(sisfb_find_host_bridge(ivideo, pdev, PCI_VENDOR_ID_SI)) {
v1 &= 0xfe;
} else if(sisfb_find_host_bridge(ivideo, pdev, 0x1106) ||
sisfb_find_host_bridge(ivideo, pdev, 0x1022) ||
sisfb_find_host_bridge(ivideo, pdev, 0x700e) ||
sisfb_find_host_bridge(ivideo, pdev, 0x10de)) {
if((v2 & 0x06) == 4)
v2 ^= 0x06;
v2 |= 0x08;
}
}
SiS_SetRegANDOR(SISCR, 0x5f, 0xf0, v2);
}
SiS_SetReg(SISSR, 0x22, v1);
if(ivideo->revision_id == 2) {
v1 = SiS_GetReg(SISSR, 0x3b);
v2 = SiS_GetReg(SISSR, 0x3a);
regd = bios[0x90 + 3] | (bios[0x90 + 4] << 8);
if( (!(v1 & 0x02)) && (v2 & 0x30) && (regd < 0xcf) )
SiS_SetRegANDOR(SISCR, 0x5f, 0xf1, 0x01);
if((mypdev = pci_get_device(0x10de, 0x01e0, NULL))) {
/* TODO: set CR5f &0xf1 | 0x01 for version 6570
* of nforce 2 ROM
*/
if(0)
SiS_SetRegANDOR(SISCR, 0x5f, 0xf1, 0x01);
pci_dev_put(mypdev);
}
}
v1 = 0x30;
reg = SiS_GetReg(SISSR, 0x3b);
v2 = SiS_GetReg(SISCR, 0x5f);
if((!(reg & 0x02)) && (v2 & 0x0e))
v1 |= 0x08;
SiS_SetReg(SISSR, 0x27, v1);
if(bios[0x64] & 0x01) {
SiS_SetRegANDOR(SISCR, 0x5f, 0xf0, bios[0x64]);
}
v1 = bios[0x4f7];
pci_read_config_dword(pdev, 0x50, ®d);
regd = (regd >> 20) & 0x0f;
if(regd == 1) {
v1 &= 0xfc;
SiS_SetRegOR(SISCR, 0x5f, 0x08);
}
SiS_SetReg(SISCR, 0x48, v1);
SiS_SetRegANDOR(SISCR, 0x47, 0x04, bios[0x4f6] & 0xfb);
SiS_SetRegANDOR(SISCR, 0x49, 0xf0, bios[0x4f8] & 0x0f);
SiS_SetRegANDOR(SISCR, 0x4a, 0x60, bios[0x4f9] & 0x9f);
SiS_SetRegANDOR(SISCR, 0x4b, 0x08, bios[0x4fa] & 0xf7);
SiS_SetRegANDOR(SISCR, 0x4c, 0x80, bios[0x4fb] & 0x7f);
SiS_SetReg(SISCR, 0x70, bios[0x4fc]);
SiS_SetRegANDOR(SISCR, 0x71, 0xf0, bios[0x4fd] & 0x0f);
SiS_SetReg(SISCR, 0x74, 0xd0);
SiS_SetRegANDOR(SISCR, 0x74, 0xcf, bios[0x4fe] & 0x30);
SiS_SetRegANDOR(SISCR, 0x75, 0xe0, bios[0x4ff] & 0x1f);
SiS_SetRegANDOR(SISCR, 0x76, 0xe0, bios[0x500] & 0x1f);
v1 = bios[0x501];
if((mypdev = pci_get_device(0x8086, 0x2530, NULL))) {
v1 = 0xf0;
pci_dev_put(mypdev);
}
SiS_SetReg(SISCR, 0x77, v1);
}
/* RAM type:
*
* 0 == DDR1, 1 == DDR2, 2..7 == reserved?
*
* The code seems to written so that regb should equal ramtype,
* however, so far it has been hardcoded to 0. Enable other values only
* on XGI Z9, as it passes the POST, and add a warning for others.
*/
ramtype = sisfb_post_xgi_ramtype(ivideo);
if (!sisfb_xgi_is21(ivideo) && ramtype) {
dev_warn(&pdev->dev,
"RAM type something else than expected: %d\n",
ramtype);
regb = 0;
} else {
regb = ramtype;
}
v1 = 0xff;
if(ivideo->haveXGIROM) {
v1 = bios[0x140 + regb];
}
SiS_SetReg(SISCR, 0x6d, v1);
ptr = cs128;
if(ivideo->haveXGIROM) {
ptr = (const u8 *)&bios[0x128];
}
for(i = 0, j = 0; i < 3; i++, j += 8) {
SiS_SetReg(SISCR, 0x68 + i, ptr[j + regb]);
}
ptr = cs31a;
ptr2 = cs33a;
if(ivideo->haveXGIROM) {
index = (ivideo->chip == XGI_20) ? 0x31a : 0x3a6;
ptr = (const u8 *)&bios[index];
ptr2 = (const u8 *)&bios[index + 0x20];
}
for(i = 0; i < 2; i++) {
if(i == 0) {
regd = le32_to_cpu(((u32 *)ptr)[regb]);
rega = 0x6b;
} else {
regd = le32_to_cpu(((u32 *)ptr2)[regb]);
rega = 0x6e;
}
reg = 0x00;
for(j = 0; j < 16; j++) {
reg &= 0xf3;
if(regd & 0x01) reg |= 0x04;
if(regd & 0x02) reg |= 0x08;
regd >>= 2;
SiS_SetReg(SISCR, rega, reg);
reg = SiS_GetReg(SISCR, rega);
reg = SiS_GetReg(SISCR, rega);
reg += 0x10;
}
}
SiS_SetRegAND(SISCR, 0x6e, 0xfc);
ptr = NULL;
if(ivideo->haveXGIROM) {
index = (ivideo->chip == XGI_20) ? 0x35a : 0x3e6;
ptr = (const u8 *)&bios[index];
}
for(i = 0; i < 4; i++) {
SiS_SetRegANDOR(SISCR, 0x6e, 0xfc, i);
reg = 0x00;
for(j = 0; j < 2; j++) {
regd = 0;
if(ptr) {
regd = le32_to_cpu(((u32 *)ptr)[regb * 8]);
ptr += 4;
}
/* reg = 0x00; */
for(k = 0; k < 16; k++) {
reg &= 0xfc;
if(regd & 0x01) reg |= 0x01;
if(regd & 0x02) reg |= 0x02;
regd >>= 2;
SiS_SetReg(SISCR, 0x6f, reg);
reg = SiS_GetReg(SISCR, 0x6f);
reg = SiS_GetReg(SISCR, 0x6f);
reg += 0x08;
}
}
}
ptr = cs148;
if(ivideo->haveXGIROM) {
ptr = (const u8 *)&bios[0x148];
}
for(i = 0, j = 0; i < 2; i++, j += 8) {
SiS_SetReg(SISCR, 0x80 + i, ptr[j + regb]);
}
SiS_SetRegAND(SISCR, 0x89, 0x8f);
ptr = cs45a;
if(ivideo->haveXGIROM) {
index = (ivideo->chip == XGI_20) ? 0x45a : 0x4e6;
ptr = (const u8 *)&bios[index];
}
regd = le16_to_cpu(((const u16 *)ptr)[regb]);
reg = 0x80;
for(i = 0; i < 5; i++) {
reg &= 0xfc;
if(regd & 0x01) reg |= 0x01;
if(regd & 0x02) reg |= 0x02;
regd >>= 2;
SiS_SetReg(SISCR, 0x89, reg);
reg = SiS_GetReg(SISCR, 0x89);
reg = SiS_GetReg(SISCR, 0x89);
reg += 0x10;
}
v1 = 0xb5; v2 = 0x20; v3 = 0xf0; v4 = 0x13;
if(ivideo->haveXGIROM) {
v1 = bios[0x118 + regb];
v2 = bios[0xf8 + regb];
v3 = bios[0x120 + regb];
v4 = bios[0x1ca];
}
SiS_SetReg(SISCR, 0x45, v1 & 0x0f);
SiS_SetReg(SISCR, 0x99, (v1 >> 4) & 0x07);
SiS_SetRegOR(SISCR, 0x40, v1 & 0x80);
SiS_SetReg(SISCR, 0x41, v2);
ptr = cs170;
if(ivideo->haveXGIROM) {
ptr = (const u8 *)&bios[0x170];
}
for(i = 0, j = 0; i < 7; i++, j += 8) {
SiS_SetReg(SISCR, 0x90 + i, ptr[j + regb]);
}
SiS_SetReg(SISCR, 0x59, v3);
ptr = cs1a8;
if(ivideo->haveXGIROM) {
ptr = (const u8 *)&bios[0x1a8];
}
for(i = 0, j = 0; i < 3; i++, j += 8) {
SiS_SetReg(SISCR, 0xc3 + i, ptr[j + regb]);
}
ptr = cs100;
if(ivideo->haveXGIROM) {
ptr = (const u8 *)&bios[0x100];
}
for(i = 0, j = 0; i < 2; i++, j += 8) {
SiS_SetReg(SISCR, 0x8a + i, ptr[j + regb]);
}
SiS_SetReg(SISCR, 0xcf, v4);
SiS_SetReg(SISCR, 0x83, 0x09);
SiS_SetReg(SISCR, 0x87, 0x00);
if(ivideo->chip == XGI_40) {
if( (ivideo->revision_id == 1) ||
(ivideo->revision_id == 2) ) {
SiS_SetReg(SISCR, 0x8c, 0x87);
}
}
if (regb == 1)
SiS_SetReg(SISSR, 0x17, 0x80); /* DDR2 */
else
SiS_SetReg(SISSR, 0x17, 0x00); /* DDR1 */
SiS_SetReg(SISSR, 0x1a, 0x87);
if(ivideo->chip == XGI_20) {
SiS_SetReg(SISSR, 0x15, 0x00);
SiS_SetReg(SISSR, 0x1c, 0x00);
}
switch(ramtype) {
case 0:
sisfb_post_xgi_setclocks(ivideo, regb);
if((ivideo->chip == XGI_20) ||
(ivideo->revision_id == 1) ||
(ivideo->revision_id == 2)) {
v1 = cs158[regb]; v2 = cs160[regb]; v3 = cs168[regb];
if(ivideo->haveXGIROM) {
v1 = bios[regb + 0x158];
v2 = bios[regb + 0x160];
v3 = bios[regb + 0x168];
}
SiS_SetReg(SISCR, 0x82, v1);
SiS_SetReg(SISCR, 0x85, v2);
SiS_SetReg(SISCR, 0x86, v3);
} else {
SiS_SetReg(SISCR, 0x82, 0x88);
SiS_SetReg(SISCR, 0x86, 0x00);
reg = SiS_GetReg(SISCR, 0x86);
SiS_SetReg(SISCR, 0x86, 0x88);
reg = SiS_GetReg(SISCR, 0x86);
SiS_SetReg(SISCR, 0x86, bios[regb + 0x168]);
SiS_SetReg(SISCR, 0x82, 0x77);
SiS_SetReg(SISCR, 0x85, 0x00);
reg = SiS_GetReg(SISCR, 0x85);
SiS_SetReg(SISCR, 0x85, 0x88);
reg = SiS_GetReg(SISCR, 0x85);
SiS_SetReg(SISCR, 0x85, bios[regb + 0x160]);
SiS_SetReg(SISCR, 0x82, bios[regb + 0x158]);
}
if(ivideo->chip == XGI_40) {
SiS_SetReg(SISCR, 0x97, 0x00);
}
SiS_SetReg(SISCR, 0x98, 0x01);
SiS_SetReg(SISCR, 0x9a, 0x02);
SiS_SetReg(SISSR, 0x18, 0x01);
if((ivideo->chip == XGI_20) ||
(ivideo->revision_id == 2)) {
SiS_SetReg(SISSR, 0x19, 0x40);
} else {
SiS_SetReg(SISSR, 0x19, 0x20);
}
SiS_SetReg(SISSR, 0x16, 0x00);
SiS_SetReg(SISSR, 0x16, 0x80);
if((ivideo->chip == XGI_20) || (bios[0x1cb] != 0x0c)) {
sisfb_post_xgi_delay(ivideo, 0x43);
sisfb_post_xgi_delay(ivideo, 0x43);
sisfb_post_xgi_delay(ivideo, 0x43);
SiS_SetReg(SISSR, 0x18, 0x00);
if((ivideo->chip == XGI_20) ||
(ivideo->revision_id == 2)) {
SiS_SetReg(SISSR, 0x19, 0x40);
} else {
SiS_SetReg(SISSR, 0x19, 0x20);
}
} else if((ivideo->chip == XGI_40) && (bios[0x1cb] == 0x0c)) {
/* SiS_SetReg(SISSR, 0x16, 0x0c); */ /* ? */
}
SiS_SetReg(SISSR, 0x16, 0x00);
SiS_SetReg(SISSR, 0x16, 0x80);
sisfb_post_xgi_delay(ivideo, 4);
v1 = 0x31; v2 = 0x03; v3 = 0x83; v4 = 0x03; v5 = 0x83;
if(ivideo->haveXGIROM) {
v1 = bios[0xf0];
index = (ivideo->chip == XGI_20) ? 0x4b2 : 0x53e;
v2 = bios[index];
v3 = bios[index + 1];
v4 = bios[index + 2];
v5 = bios[index + 3];
}
SiS_SetReg(SISSR, 0x18, v1);
SiS_SetReg(SISSR, 0x19, ((ivideo->chip == XGI_20) ? 0x02 : 0x01));
SiS_SetReg(SISSR, 0x16, v2);
SiS_SetReg(SISSR, 0x16, v3);
sisfb_post_xgi_delay(ivideo, 0x43);
SiS_SetReg(SISSR, 0x1b, 0x03);
sisfb_post_xgi_delay(ivideo, 0x22);
SiS_SetReg(SISSR, 0x18, v1);
SiS_SetReg(SISSR, 0x19, 0x00);
SiS_SetReg(SISSR, 0x16, v4);
SiS_SetReg(SISSR, 0x16, v5);
SiS_SetReg(SISSR, 0x1b, 0x00);
break;
case 1:
sisfb_post_xgi_ddr2(ivideo, regb);
break;
default:
sisfb_post_xgi_setclocks(ivideo, regb);
if((ivideo->chip == XGI_40) &&
((ivideo->revision_id == 1) ||
(ivideo->revision_id == 2))) {
SiS_SetReg(SISCR, 0x82, bios[regb + 0x158]);
SiS_SetReg(SISCR, 0x85, bios[regb + 0x160]);
SiS_SetReg(SISCR, 0x86, bios[regb + 0x168]);
} else {
SiS_SetReg(SISCR, 0x82, 0x88);
SiS_SetReg(SISCR, 0x86, 0x00);
reg = SiS_GetReg(SISCR, 0x86);
SiS_SetReg(SISCR, 0x86, 0x88);
SiS_SetReg(SISCR, 0x82, 0x77);
SiS_SetReg(SISCR, 0x85, 0x00);
reg = SiS_GetReg(SISCR, 0x85);
SiS_SetReg(SISCR, 0x85, 0x88);
reg = SiS_GetReg(SISCR, 0x85);
v1 = cs160[regb]; v2 = cs158[regb];
if(ivideo->haveXGIROM) {
v1 = bios[regb + 0x160];
v2 = bios[regb + 0x158];
}
SiS_SetReg(SISCR, 0x85, v1);
SiS_SetReg(SISCR, 0x82, v2);
}
if(ivideo->chip == XGI_40) {
SiS_SetReg(SISCR, 0x97, 0x11);
}
if((ivideo->chip == XGI_40) && (ivideo->revision_id == 2)) {
SiS_SetReg(SISCR, 0x98, 0x01);
} else {
SiS_SetReg(SISCR, 0x98, 0x03);
}
SiS_SetReg(SISCR, 0x9a, 0x02);
if(ivideo->chip == XGI_40) {
SiS_SetReg(SISSR, 0x18, 0x01);
} else {
SiS_SetReg(SISSR, 0x18, 0x00);
}
SiS_SetReg(SISSR, 0x19, 0x40);
SiS_SetReg(SISSR, 0x16, 0x00);
SiS_SetReg(SISSR, 0x16, 0x80);
if((ivideo->chip == XGI_40) && (bios[0x1cb] != 0x0c)) {
sisfb_post_xgi_delay(ivideo, 0x43);
sisfb_post_xgi_delay(ivideo, 0x43);
sisfb_post_xgi_delay(ivideo, 0x43);
SiS_SetReg(SISSR, 0x18, 0x00);
SiS_SetReg(SISSR, 0x19, 0x40);
SiS_SetReg(SISSR, 0x16, 0x00);
SiS_SetReg(SISSR, 0x16, 0x80);
}
sisfb_post_xgi_delay(ivideo, 4);
v1 = 0x31;
if(ivideo->haveXGIROM) {
v1 = bios[0xf0];
}
SiS_SetReg(SISSR, 0x18, v1);
SiS_SetReg(SISSR, 0x19, 0x01);
if(ivideo->chip == XGI_40) {
SiS_SetReg(SISSR, 0x16, bios[0x53e]);
SiS_SetReg(SISSR, 0x16, bios[0x53f]);
} else {
SiS_SetReg(SISSR, 0x16, 0x05);
SiS_SetReg(SISSR, 0x16, 0x85);
}
sisfb_post_xgi_delay(ivideo, 0x43);
if(ivideo->chip == XGI_40) {
SiS_SetReg(SISSR, 0x1b, 0x01);
} else {
SiS_SetReg(SISSR, 0x1b, 0x03);
}
sisfb_post_xgi_delay(ivideo, 0x22);
SiS_SetReg(SISSR, 0x18, v1);
SiS_SetReg(SISSR, 0x19, 0x00);
if(ivideo->chip == XGI_40) {
SiS_SetReg(SISSR, 0x16, bios[0x540]);
SiS_SetReg(SISSR, 0x16, bios[0x541]);
} else {
SiS_SetReg(SISSR, 0x16, 0x05);
SiS_SetReg(SISSR, 0x16, 0x85);
}
SiS_SetReg(SISSR, 0x1b, 0x00);
}
regb = 0; /* ! */
v1 = 0x03;
if(ivideo->haveXGIROM) {
v1 = bios[0x110 + regb];
}
SiS_SetReg(SISSR, 0x1b, v1);
/* RAM size */
v1 = 0x00; v2 = 0x00;
if(ivideo->haveXGIROM) {
v1 = bios[0x62];
v2 = bios[0x63];
}
regb = 0; /* ! */
regd = 1 << regb;
if((v1 & 0x40) && (v2 & regd) && ivideo->haveXGIROM) {
SiS_SetReg(SISSR, 0x13, bios[regb + 0xe0]);
SiS_SetReg(SISSR, 0x14, bios[regb + 0xe0 + 8]);
} else {
int err;
/* Set default mode, don't clear screen */
ivideo->SiS_Pr.SiS_UseOEM = false;
SiS_SetEnableDstn(&ivideo->SiS_Pr, false);
SiS_SetEnableFstn(&ivideo->SiS_Pr, false);
ivideo->curFSTN = ivideo->curDSTN = 0;
ivideo->SiS_Pr.VideoMemorySize = 8 << 20;
SiSSetMode(&ivideo->SiS_Pr, 0x2e | 0x80);
SiS_SetReg(SISSR, 0x05, 0x86);
/* Disable read-cache */
SiS_SetRegAND(SISSR, 0x21, 0xdf);
err = sisfb_post_xgi_ramsize(ivideo);
/* Enable read-cache */
SiS_SetRegOR(SISSR, 0x21, 0x20);
if (err) {
dev_err(&pdev->dev,
"%s: RAM size detection failed: %d\n",
__func__, err);
return 0;
}
}
#if 0
printk(KERN_DEBUG "-----------------\n");
for(i = 0; i < 0xff; i++) {
reg = SiS_GetReg(SISCR, i);
printk(KERN_DEBUG "CR%02x(%x) = 0x%02x\n", i, SISCR, reg);
}
for(i = 0; i < 0x40; i++) {
reg = SiS_GetReg(SISSR, i);
printk(KERN_DEBUG "SR%02x(%x) = 0x%02x\n", i, SISSR, reg);
}
printk(KERN_DEBUG "-----------------\n");
#endif
/* Sense CRT1 */
if(ivideo->chip == XGI_20) {
SiS_SetRegOR(SISCR, 0x32, 0x20);
} else {
reg = SiS_GetReg(SISPART4, 0x00);
if((reg == 1) || (reg == 2)) {
sisfb_sense_crt1(ivideo);
} else {
SiS_SetRegOR(SISCR, 0x32, 0x20);
}
}
/* Set default mode, don't clear screen */
ivideo->SiS_Pr.SiS_UseOEM = false;
SiS_SetEnableDstn(&ivideo->SiS_Pr, false);
SiS_SetEnableFstn(&ivideo->SiS_Pr, false);
ivideo->curFSTN = ivideo->curDSTN = 0;
SiSSetMode(&ivideo->SiS_Pr, 0x2e | 0x80);
SiS_SetReg(SISSR, 0x05, 0x86);
/* Display off */
SiS_SetRegOR(SISSR, 0x01, 0x20);
/* Save mode number in CR34 */
SiS_SetReg(SISCR, 0x34, 0x2e);
/* Let everyone know what the current mode is */
ivideo->modeprechange = 0x2e;
if(ivideo->chip == XGI_40) {
reg = SiS_GetReg(SISCR, 0xca);
v1 = SiS_GetReg(SISCR, 0xcc);
if((reg & 0x10) && (!(v1 & 0x04))) {
printk(KERN_ERR
"sisfb: Please connect power to the card.\n");
return 0;
}
}
return 1;
}
#endif
static int sisfb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct sisfb_chip_info *chipinfo = &sisfb_chip_info[ent->driver_data];
struct sis_video_info *ivideo = NULL;
struct fb_info *sis_fb_info = NULL;
u16 reg16;
u8 reg;
int i, ret;
if(sisfb_off)
return -ENXIO;
sis_fb_info = framebuffer_alloc(sizeof(*ivideo), &pdev->dev);
if(!sis_fb_info)
return -ENOMEM;
ivideo = (struct sis_video_info *)sis_fb_info->par;
ivideo->memyselfandi = sis_fb_info;
ivideo->sisfb_id = SISFB_ID;
if(card_list == NULL) {
ivideo->cardnumber = 0;
} else {
struct sis_video_info *countvideo = card_list;
ivideo->cardnumber = 1;
while((countvideo = countvideo->next) != NULL)
ivideo->cardnumber++;
}
strncpy(ivideo->myid, chipinfo->chip_name, 30);
ivideo->warncount = 0;
ivideo->chip_id = pdev->device;
ivideo->chip_vendor = pdev->vendor;
ivideo->revision_id = pdev->revision;
ivideo->SiS_Pr.ChipRevision = ivideo->revision_id;
pci_read_config_word(pdev, PCI_COMMAND, ®16);
ivideo->sisvga_enabled = reg16 & 0x01;
ivideo->pcibus = pdev->bus->number;
ivideo->pcislot = PCI_SLOT(pdev->devfn);
ivideo->pcifunc = PCI_FUNC(pdev->devfn);
ivideo->subsysvendor = pdev->subsystem_vendor;
ivideo->subsysdevice = pdev->subsystem_device;
#ifndef MODULE
if(sisfb_mode_idx == -1) {
sisfb_get_vga_mode_from_kernel();
}
#endif
ivideo->chip = chipinfo->chip;
ivideo->chip_real_id = chipinfo->chip;
ivideo->sisvga_engine = chipinfo->vgaengine;
ivideo->hwcursor_size = chipinfo->hwcursor_size;
ivideo->CRT2_write_enable = chipinfo->CRT2_write_enable;
ivideo->mni = chipinfo->mni;
ivideo->detectedpdc = 0xff;
ivideo->detectedpdca = 0xff;
ivideo->detectedlcda = 0xff;
ivideo->sisfb_thismonitor.datavalid = false;
ivideo->current_base = 0;
ivideo->engineok = 0;
ivideo->sisfb_was_boot_device = 0;
if(pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW) {
if(ivideo->sisvga_enabled)
ivideo->sisfb_was_boot_device = 1;
else {
printk(KERN_DEBUG "sisfb: PCI device is disabled, "
"but marked as boot video device ???\n");
printk(KERN_DEBUG "sisfb: I will not accept this "
"as the primary VGA device\n");
}
}
ivideo->sisfb_parm_mem = sisfb_parm_mem;
ivideo->sisfb_accel = sisfb_accel;
ivideo->sisfb_ypan = sisfb_ypan;
ivideo->sisfb_max = sisfb_max;
ivideo->sisfb_userom = sisfb_userom;
ivideo->sisfb_useoem = sisfb_useoem;
ivideo->sisfb_mode_idx = sisfb_mode_idx;
ivideo->sisfb_parm_rate = sisfb_parm_rate;
ivideo->sisfb_crt1off = sisfb_crt1off;
ivideo->sisfb_forcecrt1 = sisfb_forcecrt1;
ivideo->sisfb_crt2type = sisfb_crt2type;
ivideo->sisfb_crt2flags = sisfb_crt2flags;
/* pdc(a), scalelcd, special timing, lvdshl handled below */
ivideo->sisfb_dstn = sisfb_dstn;
ivideo->sisfb_fstn = sisfb_fstn;
ivideo->sisfb_tvplug = sisfb_tvplug;
ivideo->sisfb_tvstd = sisfb_tvstd;
ivideo->tvxpos = sisfb_tvxposoffset;
ivideo->tvypos = sisfb_tvyposoffset;
ivideo->sisfb_nocrt2rate = sisfb_nocrt2rate;
ivideo->refresh_rate = 0;
if(ivideo->sisfb_parm_rate != -1) {
ivideo->refresh_rate = ivideo->sisfb_parm_rate;
}
ivideo->SiS_Pr.UsePanelScaler = sisfb_scalelcd;
ivideo->SiS_Pr.CenterScreen = -1;
ivideo->SiS_Pr.SiS_CustomT = sisfb_specialtiming;
ivideo->SiS_Pr.LVDSHL = sisfb_lvdshl;
ivideo->SiS_Pr.SiS_Backup70xx = 0xff;
ivideo->SiS_Pr.SiS_CHOverScan = -1;
ivideo->SiS_Pr.SiS_ChSW = false;
ivideo->SiS_Pr.SiS_UseLCDA = false;
ivideo->SiS_Pr.HaveEMI = false;
ivideo->SiS_Pr.HaveEMILCD = false;
ivideo->SiS_Pr.OverruleEMI = false;
ivideo->SiS_Pr.SiS_SensibleSR11 = false;
ivideo->SiS_Pr.SiS_MyCR63 = 0x63;
ivideo->SiS_Pr.PDC = -1;
ivideo->SiS_Pr.PDCA = -1;
ivideo->SiS_Pr.DDCPortMixup = false;
#ifdef CONFIG_FB_SIS_315
if(ivideo->chip >= SIS_330) {
ivideo->SiS_Pr.SiS_MyCR63 = 0x53;
if(ivideo->chip >= SIS_661) {
ivideo->SiS_Pr.SiS_SensibleSR11 = true;
}
}
#endif
memcpy(&ivideo->default_var, &my_default_var, sizeof(my_default_var));
pci_set_drvdata(pdev, ivideo);
/* Patch special cases */
if((ivideo->nbridge = sisfb_get_northbridge(ivideo->chip))) {
switch(ivideo->nbridge->device) {
#ifdef CONFIG_FB_SIS_300
case PCI_DEVICE_ID_SI_730:
ivideo->chip = SIS_730;
strcpy(ivideo->myid, "SiS 730");
break;
#endif
#ifdef CONFIG_FB_SIS_315
case PCI_DEVICE_ID_SI_651:
/* ivideo->chip is ok */
strcpy(ivideo->myid, "SiS 651");
break;
case PCI_DEVICE_ID_SI_740:
ivideo->chip = SIS_740;
strcpy(ivideo->myid, "SiS 740");
break;
case PCI_DEVICE_ID_SI_661:
ivideo->chip = SIS_661;
strcpy(ivideo->myid, "SiS 661");
break;
case PCI_DEVICE_ID_SI_741:
ivideo->chip = SIS_741;
strcpy(ivideo->myid, "SiS 741");
break;
case PCI_DEVICE_ID_SI_760:
ivideo->chip = SIS_760;
strcpy(ivideo->myid, "SiS 760");
break;
case PCI_DEVICE_ID_SI_761:
ivideo->chip = SIS_761;
strcpy(ivideo->myid, "SiS 761");
break;
#endif
default:
break;
}
}
ivideo->SiS_Pr.ChipType = ivideo->chip;
ivideo->SiS_Pr.ivideo = (void *)ivideo;
#ifdef CONFIG_FB_SIS_315
if((ivideo->SiS_Pr.ChipType == SIS_315PRO) ||
(ivideo->SiS_Pr.ChipType == SIS_315)) {
ivideo->SiS_Pr.ChipType = SIS_315H;
}
#endif
if(!ivideo->sisvga_enabled) {
if(pci_enable_device(pdev)) {
if(ivideo->nbridge) pci_dev_put(ivideo->nbridge);
pci_set_drvdata(pdev, NULL);
framebuffer_release(sis_fb_info);
return -EIO;
}
}
ivideo->video_base = pci_resource_start(pdev, 0);
ivideo->video_size = pci_resource_len(pdev, 0);
ivideo->mmio_base = pci_resource_start(pdev, 1);
ivideo->mmio_size = pci_resource_len(pdev, 1);
ivideo->SiS_Pr.RelIO = pci_resource_start(pdev, 2) + 0x30;
ivideo->SiS_Pr.IOAddress = ivideo->vga_base = ivideo->SiS_Pr.RelIO;
SiSRegInit(&ivideo->SiS_Pr, ivideo->SiS_Pr.IOAddress);
#ifdef CONFIG_FB_SIS_300
/* Find PCI systems for Chrontel/GPIO communication setup */
if(ivideo->chip == SIS_630) {
i = 0;
do {
if(mychswtable[i].subsysVendor == ivideo->subsysvendor &&
mychswtable[i].subsysCard == ivideo->subsysdevice) {
ivideo->SiS_Pr.SiS_ChSW = true;
printk(KERN_DEBUG "sisfb: Identified [%s %s] "
"requiring Chrontel/GPIO setup\n",
mychswtable[i].vendorName,
mychswtable[i].cardName);
ivideo->lpcdev = pci_get_device(PCI_VENDOR_ID_SI, 0x0008, NULL);
break;
}
i++;
} while(mychswtable[i].subsysVendor != 0);
}
#endif
#ifdef CONFIG_FB_SIS_315
if((ivideo->chip == SIS_760) && (ivideo->nbridge)) {
ivideo->lpcdev = pci_get_slot(ivideo->nbridge->bus, (2 << 3));
}
#endif
SiS_SetReg(SISSR, 0x05, 0x86);
if( (!ivideo->sisvga_enabled)
#if !defined(__i386__) && !defined(__x86_64__)
|| (sisfb_resetcard)
#endif
) {
for(i = 0x30; i <= 0x3f; i++) {
SiS_SetReg(SISCR, i, 0x00);
}
}
/* Find out about current video mode */
ivideo->modeprechange = 0x03;
reg = SiS_GetReg(SISCR, 0x34);
if(reg & 0x7f) {
ivideo->modeprechange = reg & 0x7f;
} else if(ivideo->sisvga_enabled) {
#if defined(__i386__) || defined(__x86_64__)
unsigned char __iomem *tt = ioremap(0x400, 0x100);
if(tt) {
ivideo->modeprechange = readb(tt + 0x49);
iounmap(tt);
}
#endif
}
/* Search and copy ROM image */
ivideo->bios_abase = NULL;
ivideo->SiS_Pr.VirtualRomBase = NULL;
ivideo->SiS_Pr.UseROM = false;
ivideo->haveXGIROM = ivideo->SiS_Pr.SiS_XGIROM = false;
if(ivideo->sisfb_userom) {
ivideo->SiS_Pr.VirtualRomBase = sisfb_find_rom(pdev);
ivideo->bios_abase = ivideo->SiS_Pr.VirtualRomBase;
ivideo->SiS_Pr.UseROM = (bool)(ivideo->SiS_Pr.VirtualRomBase);
printk(KERN_INFO "sisfb: Video ROM %sfound\n",
ivideo->SiS_Pr.UseROM ? "" : "not ");
if((ivideo->SiS_Pr.UseROM) && (ivideo->chip >= XGI_20)) {
ivideo->SiS_Pr.UseROM = false;
ivideo->haveXGIROM = ivideo->SiS_Pr.SiS_XGIROM = true;
if( (ivideo->revision_id == 2) &&
(!(ivideo->bios_abase[0x1d1] & 0x01)) ) {
ivideo->SiS_Pr.DDCPortMixup = true;
}
}
} else {
printk(KERN_INFO "sisfb: Video ROM usage disabled\n");
}
/* Find systems for special custom timing */
if(ivideo->SiS_Pr.SiS_CustomT == CUT_NONE) {
sisfb_detect_custom_timing(ivideo);
}
#ifdef CONFIG_FB_SIS_315
if (ivideo->chip == XGI_20) {
/* Check if our Z7 chip is actually Z9 */
SiS_SetRegOR(SISCR, 0x4a, 0x40); /* GPIOG EN */
reg = SiS_GetReg(SISCR, 0x48);
if (reg & 0x02) { /* GPIOG */
ivideo->chip_real_id = XGI_21;
dev_info(&pdev->dev, "Z9 detected\n");
}
}
#endif
/* POST card in case this has not been done by the BIOS */
if( (!ivideo->sisvga_enabled)
#if !defined(__i386__) && !defined(__x86_64__)
|| (sisfb_resetcard)
#endif
) {
#ifdef CONFIG_FB_SIS_300
if(ivideo->sisvga_engine == SIS_300_VGA) {
if(ivideo->chip == SIS_300) {
sisfb_post_sis300(pdev);
ivideo->sisfb_can_post = 1;
}
}
#endif
#ifdef CONFIG_FB_SIS_315
if(ivideo->sisvga_engine == SIS_315_VGA) {
int result = 1;
/* if((ivideo->chip == SIS_315H) ||
(ivideo->chip == SIS_315) ||
(ivideo->chip == SIS_315PRO) ||
(ivideo->chip == SIS_330)) {
sisfb_post_sis315330(pdev);
} else */ if(ivideo->chip == XGI_20) {
result = sisfb_post_xgi(pdev);
ivideo->sisfb_can_post = 1;
} else if((ivideo->chip == XGI_40) && ivideo->haveXGIROM) {
result = sisfb_post_xgi(pdev);
ivideo->sisfb_can_post = 1;
} else {
printk(KERN_INFO "sisfb: Card is not "
"POSTed and sisfb can't do this either.\n");
}
if(!result) {
printk(KERN_ERR "sisfb: Failed to POST card\n");
ret = -ENODEV;
goto error_3;
}
}
#endif
}
ivideo->sisfb_card_posted = 1;
/* Find out about RAM size */
if(sisfb_get_dram_size(ivideo)) {
printk(KERN_INFO "sisfb: Fatal error: Unable to determine VRAM size.\n");
ret = -ENODEV;
goto error_3;
}
/* Enable PCI addressing and MMIO */
if((ivideo->sisfb_mode_idx < 0) ||
((sisbios_mode[ivideo->sisfb_mode_idx].mode_no[ivideo->mni]) != 0xFF)) {
/* Enable PCI_LINEAR_ADDRESSING and MMIO_ENABLE */
SiS_SetRegOR(SISSR, IND_SIS_PCI_ADDRESS_SET, (SIS_PCI_ADDR_ENABLE | SIS_MEM_MAP_IO_ENABLE));
/* Enable 2D accelerator engine */
SiS_SetRegOR(SISSR, IND_SIS_MODULE_ENABLE, SIS_ENABLE_2D);
}
if(sisfb_pdc != 0xff) {
if(ivideo->sisvga_engine == SIS_300_VGA)
sisfb_pdc &= 0x3c;
else
sisfb_pdc &= 0x1f;
ivideo->SiS_Pr.PDC = sisfb_pdc;
}
#ifdef CONFIG_FB_SIS_315
if(ivideo->sisvga_engine == SIS_315_VGA) {
if(sisfb_pdca != 0xff)
ivideo->SiS_Pr.PDCA = sisfb_pdca & 0x1f;
}
#endif
if(!request_mem_region(ivideo->video_base, ivideo->video_size, "sisfb FB")) {
printk(KERN_ERR "sisfb: Fatal error: Unable to reserve %dMB framebuffer memory\n",
(int)(ivideo->video_size >> 20));
printk(KERN_ERR "sisfb: Is there another framebuffer driver active?\n");
ret = -ENODEV;
goto error_3;
}
if(!request_mem_region(ivideo->mmio_base, ivideo->mmio_size, "sisfb MMIO")) {
printk(KERN_ERR "sisfb: Fatal error: Unable to reserve MMIO region\n");
ret = -ENODEV;
goto error_2;
}
ivideo->video_vbase = ioremap(ivideo->video_base, ivideo->video_size);
ivideo->SiS_Pr.VideoMemoryAddress = ivideo->video_vbase;
if(!ivideo->video_vbase) {
printk(KERN_ERR "sisfb: Fatal error: Unable to map framebuffer memory\n");
ret = -ENODEV;
goto error_1;
}
ivideo->mmio_vbase = ioremap(ivideo->mmio_base, ivideo->mmio_size);
if(!ivideo->mmio_vbase) {
printk(KERN_ERR "sisfb: Fatal error: Unable to map MMIO region\n");
ret = -ENODEV;
error_0: iounmap(ivideo->video_vbase);
error_1: release_mem_region(ivideo->video_base, ivideo->video_size);
error_2: release_mem_region(ivideo->mmio_base, ivideo->mmio_size);
error_3: vfree(ivideo->bios_abase);
if(ivideo->lpcdev)
pci_dev_put(ivideo->lpcdev);
if(ivideo->nbridge)
pci_dev_put(ivideo->nbridge);
pci_set_drvdata(pdev, NULL);
if(!ivideo->sisvga_enabled)
pci_disable_device(pdev);
framebuffer_release(sis_fb_info);
return ret;
}
printk(KERN_INFO "sisfb: Video RAM at 0x%lx, mapped to 0x%lx, size %ldk\n",
ivideo->video_base, (unsigned long)ivideo->video_vbase, ivideo->video_size / 1024);
if(ivideo->video_offset) {
printk(KERN_INFO "sisfb: Viewport offset %ldk\n",
ivideo->video_offset / 1024);
}
printk(KERN_INFO "sisfb: MMIO at 0x%lx, mapped to 0x%lx, size %ldk\n",
ivideo->mmio_base, (unsigned long)ivideo->mmio_vbase, ivideo->mmio_size / 1024);
/* Determine the size of the command queue */
if(ivideo->sisvga_engine == SIS_300_VGA) {
ivideo->cmdQueueSize = TURBO_QUEUE_AREA_SIZE;
} else {
if(ivideo->chip == XGI_20) {
ivideo->cmdQueueSize = COMMAND_QUEUE_AREA_SIZE_Z7;
} else {
ivideo->cmdQueueSize = COMMAND_QUEUE_AREA_SIZE;
}
}
/* Engines are no longer initialized here; this is
* now done after the first mode-switch (if the
* submitted var has its acceleration flags set).
*/
/* Calculate the base of the (unused) hw cursor */
ivideo->hwcursor_vbase = ivideo->video_vbase
+ ivideo->video_size
- ivideo->cmdQueueSize
- ivideo->hwcursor_size;
ivideo->caps |= HW_CURSOR_CAP;
/* Initialize offscreen memory manager */
if((ivideo->havenoheap = sisfb_heap_init(ivideo))) {
printk(KERN_WARNING "sisfb: Failed to initialize offscreen memory heap\n");
}
/* Used for clearing the screen only, therefore respect our mem limit */
ivideo->SiS_Pr.VideoMemoryAddress += ivideo->video_offset;
ivideo->SiS_Pr.VideoMemorySize = ivideo->sisfb_mem;
ivideo->mtrr = -1;
ivideo->vbflags = 0;
ivideo->lcddefmodeidx = DEFAULT_LCDMODE;
ivideo->tvdefmodeidx = DEFAULT_TVMODE;
ivideo->defmodeidx = DEFAULT_MODE;
ivideo->newrom = 0;
if(ivideo->chip < XGI_20) {
if(ivideo->bios_abase) {
ivideo->newrom = SiSDetermineROMLayout661(&ivideo->SiS_Pr);
}
}
if((ivideo->sisfb_mode_idx < 0) ||
((sisbios_mode[ivideo->sisfb_mode_idx].mode_no[ivideo->mni]) != 0xFF)) {
sisfb_sense_crt1(ivideo);
sisfb_get_VB_type(ivideo);
if(ivideo->vbflags2 & VB2_VIDEOBRIDGE) {
sisfb_detect_VB_connect(ivideo);
}
ivideo->currentvbflags = ivideo->vbflags & (VB_VIDEOBRIDGE | TV_STANDARD);
/* Decide on which CRT2 device to use */
if(ivideo->vbflags2 & VB2_VIDEOBRIDGE) {
if(ivideo->sisfb_crt2type != -1) {
if((ivideo->sisfb_crt2type == CRT2_LCD) &&
(ivideo->vbflags & CRT2_LCD)) {
ivideo->currentvbflags |= CRT2_LCD;
} else if(ivideo->sisfb_crt2type != CRT2_LCD) {
ivideo->currentvbflags |= ivideo->sisfb_crt2type;
}
} else {
/* Chrontel 700x TV detection often unreliable, therefore
* use a different default order on such machines
*/
if((ivideo->sisvga_engine == SIS_300_VGA) &&
(ivideo->vbflags2 & VB2_CHRONTEL)) {
if(ivideo->vbflags & CRT2_LCD)
ivideo->currentvbflags |= CRT2_LCD;
else if(ivideo->vbflags & CRT2_TV)
ivideo->currentvbflags |= CRT2_TV;
else if(ivideo->vbflags & CRT2_VGA)
ivideo->currentvbflags |= CRT2_VGA;
} else {
if(ivideo->vbflags & CRT2_TV)
ivideo->currentvbflags |= CRT2_TV;
else if(ivideo->vbflags & CRT2_LCD)
ivideo->currentvbflags |= CRT2_LCD;
else if(ivideo->vbflags & CRT2_VGA)
ivideo->currentvbflags |= CRT2_VGA;
}
}
}
if(ivideo->vbflags & CRT2_LCD) {
sisfb_detect_lcd_type(ivideo);
}
sisfb_save_pdc_emi(ivideo);
if(!ivideo->sisfb_crt1off) {
sisfb_handle_ddc(ivideo, &ivideo->sisfb_thismonitor, 0);
} else {
if((ivideo->vbflags2 & VB2_SISTMDSBRIDGE) &&
(ivideo->vbflags & (CRT2_VGA | CRT2_LCD))) {
sisfb_handle_ddc(ivideo, &ivideo->sisfb_thismonitor, 1);
}
}
if(ivideo->sisfb_mode_idx >= 0) {
int bu = ivideo->sisfb_mode_idx;
ivideo->sisfb_mode_idx = sisfb_validate_mode(ivideo,
ivideo->sisfb_mode_idx, ivideo->currentvbflags);
if(bu != ivideo->sisfb_mode_idx) {
printk(KERN_ERR "Mode %dx%dx%d failed validation\n",
sisbios_mode[bu].xres,
sisbios_mode[bu].yres,
sisbios_mode[bu].bpp);
}
}
if(ivideo->sisfb_mode_idx < 0) {
switch(ivideo->currentvbflags & VB_DISPTYPE_DISP2) {
case CRT2_LCD:
ivideo->sisfb_mode_idx = ivideo->lcddefmodeidx;
break;
case CRT2_TV:
ivideo->sisfb_mode_idx = ivideo->tvdefmodeidx;
break;
default:
ivideo->sisfb_mode_idx = ivideo->defmodeidx;
break;
}
}
ivideo->mode_no = sisbios_mode[ivideo->sisfb_mode_idx].mode_no[ivideo->mni];
if(ivideo->refresh_rate != 0) {
sisfb_search_refresh_rate(ivideo, ivideo->refresh_rate,
ivideo->sisfb_mode_idx);
}
if(ivideo->rate_idx == 0) {
ivideo->rate_idx = sisbios_mode[ivideo->sisfb_mode_idx].rate_idx;
ivideo->refresh_rate = 60;
}
if(ivideo->sisfb_thismonitor.datavalid) {
if(!sisfb_verify_rate(ivideo, &ivideo->sisfb_thismonitor,
ivideo->sisfb_mode_idx,
ivideo->rate_idx,
ivideo->refresh_rate)) {
printk(KERN_INFO "sisfb: WARNING: Refresh rate "
"exceeds monitor specs!\n");
}
}
ivideo->video_bpp = sisbios_mode[ivideo->sisfb_mode_idx].bpp;
ivideo->video_width = sisbios_mode[ivideo->sisfb_mode_idx].xres;
ivideo->video_height = sisbios_mode[ivideo->sisfb_mode_idx].yres;
sisfb_set_vparms(ivideo);
printk(KERN_INFO "sisfb: Default mode is %dx%dx%d (%dHz)\n",
ivideo->video_width, ivideo->video_height, ivideo->video_bpp,
ivideo->refresh_rate);
/* Set up the default var according to chosen default display mode */
ivideo->default_var.xres = ivideo->default_var.xres_virtual = ivideo->video_width;
ivideo->default_var.yres = ivideo->default_var.yres_virtual = ivideo->video_height;
ivideo->default_var.bits_per_pixel = ivideo->video_bpp;
sisfb_bpp_to_var(ivideo, &ivideo->default_var);
ivideo->default_var.pixclock = (u32) (1000000000 /
sisfb_mode_rate_to_dclock(&ivideo->SiS_Pr, ivideo->mode_no, ivideo->rate_idx));
if(sisfb_mode_rate_to_ddata(&ivideo->SiS_Pr, ivideo->mode_no,
ivideo->rate_idx, &ivideo->default_var)) {
if((ivideo->default_var.vmode & FB_VMODE_MASK) == FB_VMODE_DOUBLE) {
ivideo->default_var.pixclock <<= 1;
}
}
if(ivideo->sisfb_ypan) {
/* Maximize regardless of sisfb_max at startup */
ivideo->default_var.yres_virtual =
sisfb_calc_maxyres(ivideo, &ivideo->default_var);
if(ivideo->default_var.yres_virtual < ivideo->default_var.yres) {
ivideo->default_var.yres_virtual = ivideo->default_var.yres;
}
}
sisfb_calc_pitch(ivideo, &ivideo->default_var);
ivideo->accel = 0;
if(ivideo->sisfb_accel) {
ivideo->accel = -1;
#ifdef STUPID_ACCELF_TEXT_SHIT
ivideo->default_var.accel_flags |= FB_ACCELF_TEXT;
#endif
}
sisfb_initaccel(ivideo);
#if defined(FBINFO_HWACCEL_DISABLED) && defined(FBINFO_HWACCEL_XPAN)
sis_fb_info->flags = FBINFO_DEFAULT |
FBINFO_HWACCEL_YPAN |
FBINFO_HWACCEL_XPAN |
FBINFO_HWACCEL_COPYAREA |
FBINFO_HWACCEL_FILLRECT |
((ivideo->accel) ? 0 : FBINFO_HWACCEL_DISABLED);
#else
sis_fb_info->flags = FBINFO_FLAG_DEFAULT;
#endif
sis_fb_info->var = ivideo->default_var;
sis_fb_info->fix = ivideo->sisfb_fix;
sis_fb_info->screen_base = ivideo->video_vbase + ivideo->video_offset;
sis_fb_info->fbops = &sisfb_ops;
sis_fb_info->pseudo_palette = ivideo->pseudo_palette;
fb_alloc_cmap(&sis_fb_info->cmap, 256 , 0);
printk(KERN_DEBUG "sisfb: Initial vbflags 0x%x\n", (int)ivideo->vbflags);
#ifdef CONFIG_MTRR
ivideo->mtrr = mtrr_add(ivideo->video_base, ivideo->video_size,
MTRR_TYPE_WRCOMB, 1);
if(ivideo->mtrr < 0) {
printk(KERN_DEBUG "sisfb: Failed to add MTRRs\n");
}
#endif
if(register_framebuffer(sis_fb_info) < 0) {
printk(KERN_ERR "sisfb: Fatal error: Failed to register framebuffer\n");
ret = -EINVAL;
iounmap(ivideo->mmio_vbase);
goto error_0;
}
ivideo->registered = 1;
/* Enlist us */
ivideo->next = card_list;
card_list = ivideo;
printk(KERN_INFO "sisfb: 2D acceleration is %s, y-panning %s\n",
ivideo->sisfb_accel ? "enabled" : "disabled",
ivideo->sisfb_ypan ?
(ivideo->sisfb_max ? "enabled (auto-max)" :
"enabled (no auto-max)") :
"disabled");
printk(KERN_INFO "fb%d: %s frame buffer device version %d.%d.%d\n",
sis_fb_info->node, ivideo->myid, VER_MAJOR, VER_MINOR, VER_LEVEL);
printk(KERN_INFO "sisfb: Copyright (C) 2001-2005 Thomas Winischhofer\n");
} /* if mode = "none" */
return 0;
}
/*****************************************************/
/* PCI DEVICE HANDLING */
/*****************************************************/
static void sisfb_remove(struct pci_dev *pdev)
{
struct sis_video_info *ivideo = pci_get_drvdata(pdev);
struct fb_info *sis_fb_info = ivideo->memyselfandi;
int registered = ivideo->registered;
int modechanged = ivideo->modechanged;
/* Unmap */
iounmap(ivideo->mmio_vbase);
iounmap(ivideo->video_vbase);
/* Release mem regions */
release_mem_region(ivideo->video_base, ivideo->video_size);
release_mem_region(ivideo->mmio_base, ivideo->mmio_size);
vfree(ivideo->bios_abase);
if(ivideo->lpcdev)
pci_dev_put(ivideo->lpcdev);
if(ivideo->nbridge)
pci_dev_put(ivideo->nbridge);
#ifdef CONFIG_MTRR
/* Release MTRR region */
if(ivideo->mtrr >= 0)
mtrr_del(ivideo->mtrr, ivideo->video_base, ivideo->video_size);
#endif
pci_set_drvdata(pdev, NULL);
/* If device was disabled when starting, disable
* it when quitting.
*/
if(!ivideo->sisvga_enabled)
pci_disable_device(pdev);
/* Unregister the framebuffer */
if(ivideo->registered) {
unregister_framebuffer(sis_fb_info);
framebuffer_release(sis_fb_info);
}
/* OK, our ivideo is gone for good from here. */
/* TODO: Restore the initial mode
* This sounds easy but is as good as impossible
* on many machines with SiS chip and video bridge
* since text modes are always set up differently
* from machine to machine. Depends on the type
* of integration between chipset and bridge.
*/
if(registered && modechanged)
printk(KERN_INFO
"sisfb: Restoring of text mode not supported yet\n");
};
static struct pci_driver sisfb_driver = {
.name = "sisfb",
.id_table = sisfb_pci_table,
.probe = sisfb_probe,
.remove = sisfb_remove,
};
static int __init sisfb_init(void)
{
#ifndef MODULE
char *options = NULL;
if(fb_get_options("sisfb", &options))
return -ENODEV;
sisfb_setup(options);
#endif
return pci_register_driver(&sisfb_driver);
}
#ifndef MODULE
module_init(sisfb_init);
#endif
/*****************************************************/
/* MODULE */
/*****************************************************/
#ifdef MODULE
static char *mode = NULL;
static int vesa = -1;
static unsigned int rate = 0;
static unsigned int crt1off = 1;
static unsigned int mem = 0;
static char *forcecrt2type = NULL;
static int forcecrt1 = -1;
static int pdc = -1;
static int pdc1 = -1;
static int noaccel = -1;
static int noypan = -1;
static int nomax = -1;
static int userom = -1;
static int useoem = -1;
static char *tvstandard = NULL;
static int nocrt2rate = 0;
static int scalelcd = -1;
static char *specialtiming = NULL;
static int lvdshl = -1;
static int tvxposoffset = 0, tvyposoffset = 0;
#if !defined(__i386__) && !defined(__x86_64__)
static int resetcard = 0;
static int videoram = 0;
#endif
static int __init sisfb_init_module(void)
{
sisfb_setdefaultparms();
if(rate)
sisfb_parm_rate = rate;
if((scalelcd == 0) || (scalelcd == 1))
sisfb_scalelcd = scalelcd ^ 1;
/* Need to check crt2 type first for fstn/dstn */
if(forcecrt2type)
sisfb_search_crt2type(forcecrt2type);
if(tvstandard)
sisfb_search_tvstd(tvstandard);
if(mode)
sisfb_search_mode(mode, false);
else if(vesa != -1)
sisfb_search_vesamode(vesa, false);
sisfb_crt1off = (crt1off == 0) ? 1 : 0;
sisfb_forcecrt1 = forcecrt1;
if(forcecrt1 == 1)
sisfb_crt1off = 0;
else if(forcecrt1 == 0)
sisfb_crt1off = 1;
if(noaccel == 1)
sisfb_accel = 0;
else if(noaccel == 0)
sisfb_accel = 1;
if(noypan == 1)
sisfb_ypan = 0;
else if(noypan == 0)
sisfb_ypan = 1;
if(nomax == 1)
sisfb_max = 0;
else if(nomax == 0)
sisfb_max = 1;
if(mem)
sisfb_parm_mem = mem;
if(userom != -1)
sisfb_userom = userom;
if(useoem != -1)
sisfb_useoem = useoem;
if(pdc != -1)
sisfb_pdc = (pdc & 0x7f);
if(pdc1 != -1)
sisfb_pdca = (pdc1 & 0x1f);
sisfb_nocrt2rate = nocrt2rate;
if(specialtiming)
sisfb_search_specialtiming(specialtiming);
if((lvdshl >= 0) && (lvdshl <= 3))
sisfb_lvdshl = lvdshl;
sisfb_tvxposoffset = tvxposoffset;
sisfb_tvyposoffset = tvyposoffset;
#if !defined(__i386__) && !defined(__x86_64__)
sisfb_resetcard = (resetcard) ? 1 : 0;
if(videoram)
sisfb_videoram = videoram;
#endif
return sisfb_init();
}
static void __exit sisfb_remove_module(void)
{
pci_unregister_driver(&sisfb_driver);
printk(KERN_DEBUG "sisfb: Module unloaded\n");
}
module_init(sisfb_init_module);
module_exit(sisfb_remove_module);
MODULE_DESCRIPTION("SiS 300/540/630/730/315/55x/65x/661/74x/330/76x/34x, XGI V3XT/V5/V8/Z7 framebuffer device driver");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Thomas Winischhofer <thomas@winischhofer.net>, Others");
module_param(mem, int, 0);
module_param(noaccel, int, 0);
module_param(noypan, int, 0);
module_param(nomax, int, 0);
module_param(userom, int, 0);
module_param(useoem, int, 0);
module_param(mode, charp, 0);
module_param(vesa, int, 0);
module_param(rate, int, 0);
module_param(forcecrt1, int, 0);
module_param(forcecrt2type, charp, 0);
module_param(scalelcd, int, 0);
module_param(pdc, int, 0);
module_param(pdc1, int, 0);
module_param(specialtiming, charp, 0);
module_param(lvdshl, int, 0);
module_param(tvstandard, charp, 0);
module_param(tvxposoffset, int, 0);
module_param(tvyposoffset, int, 0);
module_param(nocrt2rate, int, 0);
#if !defined(__i386__) && !defined(__x86_64__)
module_param(resetcard, int, 0);
module_param(videoram, int, 0);
#endif
MODULE_PARM_DESC(mem,
"\nDetermines the beginning of the video memory heap in KB. This heap is used\n"
"for video RAM management for eg. DRM/DRI. On 300 series, the default depends\n"
"on the amount of video RAM available. If 8MB of video RAM or less is available,\n"
"the heap starts at 4096KB, if between 8 and 16MB are available at 8192KB,\n"
"otherwise at 12288KB. On 315/330/340 series, the heap size is 32KB by default.\n"
"The value is to be specified without 'KB'.\n");
MODULE_PARM_DESC(noaccel,
"\nIf set to anything other than 0, 2D acceleration will be disabled.\n"
"(default: 0)\n");
MODULE_PARM_DESC(noypan,
"\nIf set to anything other than 0, y-panning will be disabled and scrolling\n"
"will be performed by redrawing the screen. (default: 0)\n");
MODULE_PARM_DESC(nomax,
"\nIf y-panning is enabled, sisfb will by default use the entire available video\n"
"memory for the virtual screen in order to optimize scrolling performance. If\n"
"this is set to anything other than 0, sisfb will not do this and thereby \n"
"enable the user to positively specify a virtual Y size of the screen using\n"
"fbset. (default: 0)\n");
MODULE_PARM_DESC(mode,
"\nSelects the desired default display mode in the format XxYxDepth,\n"
"eg. 1024x768x16. Other formats supported include XxY-Depth and\n"
"XxY-Depth@Rate. If the parameter is only one (decimal or hexadecimal)\n"
"number, it will be interpreted as a VESA mode number. (default: 800x600x8)\n");
MODULE_PARM_DESC(vesa,
"\nSelects the desired default display mode by VESA defined mode number, eg.\n"
"0x117 (default: 0x0103)\n");
MODULE_PARM_DESC(rate,
"\nSelects the desired vertical refresh rate for CRT1 (external VGA) in Hz.\n"
"If the mode is specified in the format XxY-Depth@Rate, this parameter\n"
"will be ignored (default: 60)\n");
MODULE_PARM_DESC(forcecrt1,
"\nNormally, the driver autodetects whether or not CRT1 (external VGA) is \n"
"connected. With this option, the detection can be overridden (1=CRT1 ON,\n"
"0=CRT1 OFF) (default: [autodetected])\n");
MODULE_PARM_DESC(forcecrt2type,
"\nIf this option is omitted, the driver autodetects CRT2 output devices, such as\n"
"LCD, TV or secondary VGA. With this option, this autodetection can be\n"
"overridden. Possible parameters are LCD, TV, VGA or NONE. NONE disables CRT2.\n"
"On systems with a SiS video bridge, parameters SVIDEO, COMPOSITE or SCART can\n"
"be used instead of TV to override the TV detection. Furthermore, on systems\n"
"with a SiS video bridge, SVIDEO+COMPOSITE, HIVISION, YPBPR480I, YPBPR480P,\n"
"YPBPR720P and YPBPR1080I are understood. However, whether or not these work\n"
"depends on the very hardware in use. (default: [autodetected])\n");
MODULE_PARM_DESC(scalelcd,
"\nSetting this to 1 will force the driver to scale the LCD image to the panel's\n"
"native resolution. Setting it to 0 will disable scaling; LVDS panels will\n"
"show black bars around the image, TMDS panels will probably do the scaling\n"
"themselves. Default: 1 on LVDS panels, 0 on TMDS panels\n");
MODULE_PARM_DESC(pdc,
"\nThis is for manually selecting the LCD panel delay compensation. The driver\n"
"should detect this correctly in most cases; however, sometimes this is not\n"
"possible. If you see 'small waves' on the LCD, try setting this to 4, 32 or 24\n"
"on a 300 series chipset; 6 on other chipsets. If the problem persists, try\n"
"other values (on 300 series: between 4 and 60 in steps of 4; otherwise: any\n"
"value from 0 to 31). (default: autodetected, if LCD is active during start)\n");
#ifdef CONFIG_FB_SIS_315
MODULE_PARM_DESC(pdc1,
"\nThis is same as pdc, but for LCD-via CRT1. Hence, this is for the 315/330/340\n"
"series only. (default: autodetected if LCD is in LCD-via-CRT1 mode during\n"
"startup) - Note: currently, this has no effect because LCD-via-CRT1 is not\n"
"implemented yet.\n");
#endif
MODULE_PARM_DESC(specialtiming,
"\nPlease refer to documentation for more information on this option.\n");
MODULE_PARM_DESC(lvdshl,
"\nPlease refer to documentation for more information on this option.\n");
MODULE_PARM_DESC(tvstandard,
"\nThis allows overriding the BIOS default for the TV standard. Valid choices are\n"
"pal, ntsc, palm and paln. (default: [auto; pal or ntsc only])\n");
MODULE_PARM_DESC(tvxposoffset,
"\nRelocate TV output horizontally. Possible parameters: -32 through 32.\n"
"Default: 0\n");
MODULE_PARM_DESC(tvyposoffset,
"\nRelocate TV output vertically. Possible parameters: -32 through 32.\n"
"Default: 0\n");
MODULE_PARM_DESC(nocrt2rate,
"\nSetting this to 1 will force the driver to use the default refresh rate for\n"
"CRT2 if CRT2 type is VGA. (default: 0, use same rate as CRT1)\n");
#if !defined(__i386__) && !defined(__x86_64__)
#ifdef CONFIG_FB_SIS_300
MODULE_PARM_DESC(resetcard,
"\nSet this to 1 in order to reset (POST) the card on non-x86 machines where\n"
"the BIOS did not POST the card (only supported for SiS 300/305 and XGI cards\n"
"currently). Default: 0\n");
MODULE_PARM_DESC(videoram,
"\nSet this to the amount of video RAM (in kilobyte) the card has. Required on\n"
"some non-x86 architectures where the memory auto detection fails. Only\n"
"relevant if resetcard is set, too. SiS300/305 only. Default: [auto-detect]\n");
#endif
#endif
#endif /* /MODULE */
/* _GPL only for new symbols. */
EXPORT_SYMBOL(sis_malloc);
EXPORT_SYMBOL(sis_free);
EXPORT_SYMBOL_GPL(sis_malloc_new);
EXPORT_SYMBOL_GPL(sis_free_new);
| gpl-2.0 |
DESHONOR/android_kernel_huawei_msm8916 | drivers/video/pmag-ba-fb.c | 2395 | 6638 | /*
* linux/drivers/video/pmag-ba-fb.c
*
* PMAG-BA TURBOchannel Color Frame Buffer (CFB) card support,
* derived from:
* "HP300 Topcat framebuffer support (derived from macfb of all things)
* Phil Blundell <philb@gnu.org> 1998", the original code can be
* found in the file hpfb.c in the same directory.
*
* Based on digital document:
* "PMAG-BA TURBOchannel Color Frame Buffer
* Functional Specification", Revision 1.2, August 27, 1990
*
* DECstation related code Copyright (C) 1999, 2000, 2001 by
* Michael Engel <engel@unix-ag.org>,
* Karsten Merker <merker@linuxtag.org> and
* Harald Koerfgen.
* Copyright (c) 2005, 2006 Maciej W. Rozycki
* Copyright (c) 2005 James Simmons
*
* This file is subject to the terms and conditions of the GNU General
* Public License. See the file COPYING in the main directory of this
* archive for more details.
*/
#include <linux/compiler.h>
#include <linux/errno.h>
#include <linux/fb.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/tc.h>
#include <linux/types.h>
#include <asm/io.h>
#include <video/pmag-ba-fb.h>
struct pmagbafb_par {
volatile void __iomem *mmio;
volatile u32 __iomem *dac;
};
static struct fb_var_screeninfo pmagbafb_defined = {
.xres = 1024,
.yres = 864,
.xres_virtual = 1024,
.yres_virtual = 864,
.bits_per_pixel = 8,
.red.length = 8,
.green.length = 8,
.blue.length = 8,
.activate = FB_ACTIVATE_NOW,
.height = -1,
.width = -1,
.accel_flags = FB_ACCEL_NONE,
.pixclock = 14452,
.left_margin = 116,
.right_margin = 12,
.upper_margin = 34,
.lower_margin = 12,
.hsync_len = 128,
.vsync_len = 3,
.sync = FB_SYNC_ON_GREEN,
.vmode = FB_VMODE_NONINTERLACED,
};
static struct fb_fix_screeninfo pmagbafb_fix = {
.id = "PMAG-BA",
.smem_len = (1024 * 1024),
.type = FB_TYPE_PACKED_PIXELS,
.visual = FB_VISUAL_PSEUDOCOLOR,
.line_length = 1024,
.mmio_len = PMAG_BA_SIZE - PMAG_BA_BT459,
};
static inline void dac_write(struct pmagbafb_par *par, unsigned int reg, u8 v)
{
writeb(v, par->dac + reg / 4);
}
static inline u8 dac_read(struct pmagbafb_par *par, unsigned int reg)
{
return readb(par->dac + reg / 4);
}
/*
* Set the palette.
*/
static int pmagbafb_setcolreg(unsigned int regno, unsigned int red,
unsigned int green, unsigned int blue,
unsigned int transp, struct fb_info *info)
{
struct pmagbafb_par *par = info->par;
if (regno >= info->cmap.len)
return 1;
red >>= 8; /* The cmap fields are 16 bits */
green >>= 8; /* wide, but the hardware colormap */
blue >>= 8; /* registers are only 8 bits wide */
mb();
dac_write(par, BT459_ADDR_LO, regno);
dac_write(par, BT459_ADDR_HI, 0x00);
wmb();
dac_write(par, BT459_CMAP, red);
wmb();
dac_write(par, BT459_CMAP, green);
wmb();
dac_write(par, BT459_CMAP, blue);
return 0;
}
static struct fb_ops pmagbafb_ops = {
.owner = THIS_MODULE,
.fb_setcolreg = pmagbafb_setcolreg,
.fb_fillrect = cfb_fillrect,
.fb_copyarea = cfb_copyarea,
.fb_imageblit = cfb_imageblit,
};
/*
* Turn the hardware cursor off.
*/
static void __init pmagbafb_erase_cursor(struct fb_info *info)
{
struct pmagbafb_par *par = info->par;
mb();
dac_write(par, BT459_ADDR_LO, 0x00);
dac_write(par, BT459_ADDR_HI, 0x03);
wmb();
dac_write(par, BT459_DATA, 0x00);
}
static int pmagbafb_probe(struct device *dev)
{
struct tc_dev *tdev = to_tc_dev(dev);
resource_size_t start, len;
struct fb_info *info;
struct pmagbafb_par *par;
int err;
info = framebuffer_alloc(sizeof(struct pmagbafb_par), dev);
if (!info) {
printk(KERN_ERR "%s: Cannot allocate memory\n", dev_name(dev));
return -ENOMEM;
}
par = info->par;
dev_set_drvdata(dev, info);
if (fb_alloc_cmap(&info->cmap, 256, 0) < 0) {
printk(KERN_ERR "%s: Cannot allocate color map\n",
dev_name(dev));
err = -ENOMEM;
goto err_alloc;
}
info->fbops = &pmagbafb_ops;
info->fix = pmagbafb_fix;
info->var = pmagbafb_defined;
info->flags = FBINFO_DEFAULT;
/* Request the I/O MEM resource. */
start = tdev->resource.start;
len = tdev->resource.end - start + 1;
if (!request_mem_region(start, len, dev_name(dev))) {
printk(KERN_ERR "%s: Cannot reserve FB region\n",
dev_name(dev));
err = -EBUSY;
goto err_cmap;
}
/* MMIO mapping setup. */
info->fix.mmio_start = start;
par->mmio = ioremap_nocache(info->fix.mmio_start, info->fix.mmio_len);
if (!par->mmio) {
printk(KERN_ERR "%s: Cannot map MMIO\n", dev_name(dev));
err = -ENOMEM;
goto err_resource;
}
par->dac = par->mmio + PMAG_BA_BT459;
/* Frame buffer mapping setup. */
info->fix.smem_start = start + PMAG_BA_FBMEM;
info->screen_base = ioremap_nocache(info->fix.smem_start,
info->fix.smem_len);
if (!info->screen_base) {
printk(KERN_ERR "%s: Cannot map FB\n", dev_name(dev));
err = -ENOMEM;
goto err_mmio_map;
}
info->screen_size = info->fix.smem_len;
pmagbafb_erase_cursor(info);
err = register_framebuffer(info);
if (err < 0) {
printk(KERN_ERR "%s: Cannot register framebuffer\n",
dev_name(dev));
goto err_smem_map;
}
get_device(dev);
pr_info("fb%d: %s frame buffer device at %s\n",
info->node, info->fix.id, dev_name(dev));
return 0;
err_smem_map:
iounmap(info->screen_base);
err_mmio_map:
iounmap(par->mmio);
err_resource:
release_mem_region(start, len);
err_cmap:
fb_dealloc_cmap(&info->cmap);
err_alloc:
framebuffer_release(info);
return err;
}
static int __exit pmagbafb_remove(struct device *dev)
{
struct tc_dev *tdev = to_tc_dev(dev);
struct fb_info *info = dev_get_drvdata(dev);
struct pmagbafb_par *par = info->par;
resource_size_t start, len;
put_device(dev);
unregister_framebuffer(info);
iounmap(info->screen_base);
iounmap(par->mmio);
start = tdev->resource.start;
len = tdev->resource.end - start + 1;
release_mem_region(start, len);
fb_dealloc_cmap(&info->cmap);
framebuffer_release(info);
return 0;
}
/*
* Initialize the framebuffer.
*/
static const struct tc_device_id pmagbafb_tc_table[] = {
{ "DEC ", "PMAG-BA " },
{ }
};
MODULE_DEVICE_TABLE(tc, pmagbafb_tc_table);
static struct tc_driver pmagbafb_driver = {
.id_table = pmagbafb_tc_table,
.driver = {
.name = "pmagbafb",
.bus = &tc_bus_type,
.probe = pmagbafb_probe,
.remove = __exit_p(pmagbafb_remove),
},
};
static int __init pmagbafb_init(void)
{
#ifndef MODULE
if (fb_get_options("pmagbafb", NULL))
return -ENXIO;
#endif
return tc_register_driver(&pmagbafb_driver);
}
static void __exit pmagbafb_exit(void)
{
tc_unregister_driver(&pmagbafb_driver);
}
module_init(pmagbafb_init);
module_exit(pmagbafb_exit);
MODULE_LICENSE("GPL");
| gpl-2.0 |
ayushtyagi28/android_kernel_cyanogen_msm8994 | arch/frv/mb93090-mb00/pci-irq.c | 2651 | 1730 | /* pci-irq.c: PCI IRQ routing on the FRV motherboard
*
* Copyright (C) 2003 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
* derived from: arch/i386/kernel/pci-irq.c: (c) 1999--2000 Martin Mares <mj@suse.cz>
*/
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <asm/io.h>
#include <asm/smp.h>
#include "pci-frv.h"
/*
* DEVICE DEVNO INT#A INT#B INT#C INT#D
* ======= ======= ======= ======= ======= =======
* MB86943 0 fpga.10 - - -
* RTL8029 16 fpga.12 - - -
* SLOT 1 19 fpga.6 fpga.5 fpga.4 fpga.3
* SLOT 2 18 fpga.5 fpga.4 fpga.3 fpga.6
* SLOT 3 17 fpga.4 fpga.3 fpga.6 fpga.5
*
*/
static const uint8_t __initconst pci_bus0_irq_routing[32][4] = {
[0 ] = { IRQ_FPGA_MB86943_PCI_INTA },
[16] = { IRQ_FPGA_RTL8029_INTA },
[17] = { IRQ_FPGA_PCI_INTC, IRQ_FPGA_PCI_INTD, IRQ_FPGA_PCI_INTA, IRQ_FPGA_PCI_INTB },
[18] = { IRQ_FPGA_PCI_INTB, IRQ_FPGA_PCI_INTC, IRQ_FPGA_PCI_INTD, IRQ_FPGA_PCI_INTA },
[19] = { IRQ_FPGA_PCI_INTA, IRQ_FPGA_PCI_INTB, IRQ_FPGA_PCI_INTC, IRQ_FPGA_PCI_INTD },
};
void __init pcibios_irq_init(void)
{
}
void __init pcibios_fixup_irqs(void)
{
struct pci_dev *dev = NULL;
uint8_t line, pin;
for_each_pci_dev(dev) {
pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin);
if (pin) {
dev->irq = pci_bus0_irq_routing[PCI_SLOT(dev->devfn)][pin - 1];
pci_write_config_byte(dev, PCI_INTERRUPT_LINE, dev->irq);
}
pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &line);
}
}
void __init pcibios_penalize_isa_irq(int irq)
{
}
void pcibios_enable_irq(struct pci_dev *dev)
{
pci_write_config_byte(dev, PCI_INTERRUPT_LINE, dev->irq);
}
| gpl-2.0 |
lnfamous/Kernel_Htc_Pico_CyanogenMod9 | sound/core/pcm_compat.c | 7259 | 15394 | /*
* 32bit -> 64bit ioctl wrapper for PCM API
* Copyright (c) by Takashi Iwai <tiwai@suse.de>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
/* This file included from pcm_native.c */
#include <linux/compat.h>
#include <linux/slab.h>
static int snd_pcm_ioctl_delay_compat(struct snd_pcm_substream *substream,
s32 __user *src)
{
snd_pcm_sframes_t delay;
mm_segment_t fs;
int err;
fs = snd_enter_user();
err = snd_pcm_delay(substream, &delay);
snd_leave_user(fs);
if (err < 0)
return err;
if (put_user(delay, src))
return -EFAULT;
return err;
}
static int snd_pcm_ioctl_rewind_compat(struct snd_pcm_substream *substream,
u32 __user *src)
{
snd_pcm_uframes_t frames;
int err;
if (get_user(frames, src))
return -EFAULT;
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
err = snd_pcm_playback_rewind(substream, frames);
else
err = snd_pcm_capture_rewind(substream, frames);
if (put_user(err, src))
return -EFAULT;
return err < 0 ? err : 0;
}
static int snd_pcm_ioctl_forward_compat(struct snd_pcm_substream *substream,
u32 __user *src)
{
snd_pcm_uframes_t frames;
int err;
if (get_user(frames, src))
return -EFAULT;
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
err = snd_pcm_playback_forward(substream, frames);
else
err = snd_pcm_capture_forward(substream, frames);
if (put_user(err, src))
return -EFAULT;
return err < 0 ? err : 0;
}
struct snd_pcm_hw_params32 {
u32 flags;
struct snd_mask masks[SNDRV_PCM_HW_PARAM_LAST_MASK - SNDRV_PCM_HW_PARAM_FIRST_MASK + 1]; /* this must be identical */
struct snd_mask mres[5]; /* reserved masks */
struct snd_interval intervals[SNDRV_PCM_HW_PARAM_LAST_INTERVAL - SNDRV_PCM_HW_PARAM_FIRST_INTERVAL + 1];
struct snd_interval ires[9]; /* reserved intervals */
u32 rmask;
u32 cmask;
u32 info;
u32 msbits;
u32 rate_num;
u32 rate_den;
u32 fifo_size;
unsigned char reserved[64];
};
struct snd_pcm_sw_params32 {
s32 tstamp_mode;
u32 period_step;
u32 sleep_min;
u32 avail_min;
u32 xfer_align;
u32 start_threshold;
u32 stop_threshold;
u32 silence_threshold;
u32 silence_size;
u32 boundary;
unsigned char reserved[64];
};
/* recalcuate the boundary within 32bit */
static snd_pcm_uframes_t recalculate_boundary(struct snd_pcm_runtime *runtime)
{
snd_pcm_uframes_t boundary;
if (! runtime->buffer_size)
return 0;
boundary = runtime->buffer_size;
while (boundary * 2 <= 0x7fffffffUL - runtime->buffer_size)
boundary *= 2;
return boundary;
}
static int snd_pcm_ioctl_sw_params_compat(struct snd_pcm_substream *substream,
struct snd_pcm_sw_params32 __user *src)
{
struct snd_pcm_sw_params params;
snd_pcm_uframes_t boundary;
int err;
memset(¶ms, 0, sizeof(params));
if (get_user(params.tstamp_mode, &src->tstamp_mode) ||
get_user(params.period_step, &src->period_step) ||
get_user(params.sleep_min, &src->sleep_min) ||
get_user(params.avail_min, &src->avail_min) ||
get_user(params.xfer_align, &src->xfer_align) ||
get_user(params.start_threshold, &src->start_threshold) ||
get_user(params.stop_threshold, &src->stop_threshold) ||
get_user(params.silence_threshold, &src->silence_threshold) ||
get_user(params.silence_size, &src->silence_size))
return -EFAULT;
/*
* Check silent_size parameter. Since we have 64bit boundary,
* silence_size must be compared with the 32bit boundary.
*/
boundary = recalculate_boundary(substream->runtime);
if (boundary && params.silence_size >= boundary)
params.silence_size = substream->runtime->boundary;
err = snd_pcm_sw_params(substream, ¶ms);
if (err < 0)
return err;
if (boundary && put_user(boundary, &src->boundary))
return -EFAULT;
return err;
}
struct snd_pcm_channel_info32 {
u32 channel;
u32 offset;
u32 first;
u32 step;
};
static int snd_pcm_ioctl_channel_info_compat(struct snd_pcm_substream *substream,
struct snd_pcm_channel_info32 __user *src)
{
struct snd_pcm_channel_info info;
int err;
if (get_user(info.channel, &src->channel) ||
get_user(info.offset, &src->offset) ||
get_user(info.first, &src->first) ||
get_user(info.step, &src->step))
return -EFAULT;
err = snd_pcm_channel_info(substream, &info);
if (err < 0)
return err;
if (put_user(info.channel, &src->channel) ||
put_user(info.offset, &src->offset) ||
put_user(info.first, &src->first) ||
put_user(info.step, &src->step))
return -EFAULT;
return err;
}
struct snd_pcm_status32 {
s32 state;
struct compat_timespec trigger_tstamp;
struct compat_timespec tstamp;
u32 appl_ptr;
u32 hw_ptr;
s32 delay;
u32 avail;
u32 avail_max;
u32 overrange;
s32 suspended_state;
unsigned char reserved[60];
} __attribute__((packed));
static int snd_pcm_status_user_compat(struct snd_pcm_substream *substream,
struct snd_pcm_status32 __user *src)
{
struct snd_pcm_status status;
int err;
err = snd_pcm_status(substream, &status);
if (err < 0)
return err;
if (put_user(status.state, &src->state) ||
put_user(status.trigger_tstamp.tv_sec, &src->trigger_tstamp.tv_sec) ||
put_user(status.trigger_tstamp.tv_nsec, &src->trigger_tstamp.tv_nsec) ||
put_user(status.tstamp.tv_sec, &src->tstamp.tv_sec) ||
put_user(status.tstamp.tv_nsec, &src->tstamp.tv_nsec) ||
put_user(status.appl_ptr, &src->appl_ptr) ||
put_user(status.hw_ptr, &src->hw_ptr) ||
put_user(status.delay, &src->delay) ||
put_user(status.avail, &src->avail) ||
put_user(status.avail_max, &src->avail_max) ||
put_user(status.overrange, &src->overrange) ||
put_user(status.suspended_state, &src->suspended_state))
return -EFAULT;
return err;
}
/* both for HW_PARAMS and HW_REFINE */
static int snd_pcm_ioctl_hw_params_compat(struct snd_pcm_substream *substream,
int refine,
struct snd_pcm_hw_params32 __user *data32)
{
struct snd_pcm_hw_params *data;
struct snd_pcm_runtime *runtime;
int err;
if (! (runtime = substream->runtime))
return -ENOTTY;
/* only fifo_size is different, so just copy all */
data = memdup_user(data32, sizeof(*data32));
if (IS_ERR(data))
return PTR_ERR(data);
if (refine)
err = snd_pcm_hw_refine(substream, data);
else
err = snd_pcm_hw_params(substream, data);
if (err < 0)
goto error;
if (copy_to_user(data32, data, sizeof(*data32)) ||
put_user(data->fifo_size, &data32->fifo_size)) {
err = -EFAULT;
goto error;
}
if (! refine) {
unsigned int new_boundary = recalculate_boundary(runtime);
if (new_boundary)
runtime->boundary = new_boundary;
}
error:
kfree(data);
return err;
}
/*
*/
struct snd_xferi32 {
s32 result;
u32 buf;
u32 frames;
};
static int snd_pcm_ioctl_xferi_compat(struct snd_pcm_substream *substream,
int dir, struct snd_xferi32 __user *data32)
{
compat_caddr_t buf;
u32 frames;
int err;
if (! substream->runtime)
return -ENOTTY;
if (substream->stream != dir)
return -EINVAL;
if (substream->runtime->status->state == SNDRV_PCM_STATE_OPEN)
return -EBADFD;
if (get_user(buf, &data32->buf) ||
get_user(frames, &data32->frames))
return -EFAULT;
if (dir == SNDRV_PCM_STREAM_PLAYBACK)
err = snd_pcm_lib_write(substream, compat_ptr(buf), frames);
else
err = snd_pcm_lib_read(substream, compat_ptr(buf), frames);
if (err < 0)
return err;
/* copy the result */
if (put_user(err, &data32->result))
return -EFAULT;
return 0;
}
/* snd_xfern needs remapping of bufs */
struct snd_xfern32 {
s32 result;
u32 bufs; /* this is void **; */
u32 frames;
};
/*
* xfern ioctl nees to copy (up to) 128 pointers on stack.
* although we may pass the copied pointers through f_op->ioctl, but the ioctl
* handler there expands again the same 128 pointers on stack, so it is better
* to handle the function (calling pcm_readv/writev) directly in this handler.
*/
static int snd_pcm_ioctl_xfern_compat(struct snd_pcm_substream *substream,
int dir, struct snd_xfern32 __user *data32)
{
compat_caddr_t buf;
compat_caddr_t __user *bufptr;
u32 frames;
void __user **bufs;
int err, ch, i;
if (! substream->runtime)
return -ENOTTY;
if (substream->stream != dir)
return -EINVAL;
if ((ch = substream->runtime->channels) > 128)
return -EINVAL;
if (get_user(buf, &data32->bufs) ||
get_user(frames, &data32->frames))
return -EFAULT;
bufptr = compat_ptr(buf);
bufs = kmalloc(sizeof(void __user *) * ch, GFP_KERNEL);
if (bufs == NULL)
return -ENOMEM;
for (i = 0; i < ch; i++) {
u32 ptr;
if (get_user(ptr, bufptr)) {
kfree(bufs);
return -EFAULT;
}
bufs[i] = compat_ptr(ptr);
bufptr++;
}
if (dir == SNDRV_PCM_STREAM_PLAYBACK)
err = snd_pcm_lib_writev(substream, bufs, frames);
else
err = snd_pcm_lib_readv(substream, bufs, frames);
if (err >= 0) {
if (put_user(err, &data32->result))
err = -EFAULT;
}
kfree(bufs);
return err;
}
struct snd_pcm_mmap_status32 {
s32 state;
s32 pad1;
u32 hw_ptr;
struct compat_timespec tstamp;
s32 suspended_state;
} __attribute__((packed));
struct snd_pcm_mmap_control32 {
u32 appl_ptr;
u32 avail_min;
};
struct snd_pcm_sync_ptr32 {
u32 flags;
union {
struct snd_pcm_mmap_status32 status;
unsigned char reserved[64];
} s;
union {
struct snd_pcm_mmap_control32 control;
unsigned char reserved[64];
} c;
} __attribute__((packed));
static int snd_pcm_ioctl_sync_ptr_compat(struct snd_pcm_substream *substream,
struct snd_pcm_sync_ptr32 __user *src)
{
struct snd_pcm_runtime *runtime = substream->runtime;
volatile struct snd_pcm_mmap_status *status;
volatile struct snd_pcm_mmap_control *control;
u32 sflags;
struct snd_pcm_mmap_control scontrol;
struct snd_pcm_mmap_status sstatus;
snd_pcm_uframes_t boundary;
int err;
if (snd_BUG_ON(!runtime))
return -EINVAL;
if (get_user(sflags, &src->flags) ||
get_user(scontrol.appl_ptr, &src->c.control.appl_ptr) ||
get_user(scontrol.avail_min, &src->c.control.avail_min))
return -EFAULT;
if (sflags & SNDRV_PCM_SYNC_PTR_HWSYNC) {
err = snd_pcm_hwsync(substream);
if (err < 0)
return err;
}
status = runtime->status;
control = runtime->control;
boundary = recalculate_boundary(runtime);
if (! boundary)
boundary = 0x7fffffff;
snd_pcm_stream_lock_irq(substream);
/* FIXME: we should consider the boundary for the sync from app */
if (!(sflags & SNDRV_PCM_SYNC_PTR_APPL))
control->appl_ptr = scontrol.appl_ptr;
else
scontrol.appl_ptr = control->appl_ptr % boundary;
if (!(sflags & SNDRV_PCM_SYNC_PTR_AVAIL_MIN))
control->avail_min = scontrol.avail_min;
else
scontrol.avail_min = control->avail_min;
sstatus.state = status->state;
sstatus.hw_ptr = status->hw_ptr % boundary;
sstatus.tstamp = status->tstamp;
sstatus.suspended_state = status->suspended_state;
snd_pcm_stream_unlock_irq(substream);
if (put_user(sstatus.state, &src->s.status.state) ||
put_user(sstatus.hw_ptr, &src->s.status.hw_ptr) ||
put_user(sstatus.tstamp.tv_sec, &src->s.status.tstamp.tv_sec) ||
put_user(sstatus.tstamp.tv_nsec, &src->s.status.tstamp.tv_nsec) ||
put_user(sstatus.suspended_state, &src->s.status.suspended_state) ||
put_user(scontrol.appl_ptr, &src->c.control.appl_ptr) ||
put_user(scontrol.avail_min, &src->c.control.avail_min))
return -EFAULT;
return 0;
}
/*
*/
enum {
SNDRV_PCM_IOCTL_HW_REFINE32 = _IOWR('A', 0x10, struct snd_pcm_hw_params32),
SNDRV_PCM_IOCTL_HW_PARAMS32 = _IOWR('A', 0x11, struct snd_pcm_hw_params32),
SNDRV_PCM_IOCTL_SW_PARAMS32 = _IOWR('A', 0x13, struct snd_pcm_sw_params32),
SNDRV_PCM_IOCTL_STATUS32 = _IOR('A', 0x20, struct snd_pcm_status32),
SNDRV_PCM_IOCTL_DELAY32 = _IOR('A', 0x21, s32),
SNDRV_PCM_IOCTL_CHANNEL_INFO32 = _IOR('A', 0x32, struct snd_pcm_channel_info32),
SNDRV_PCM_IOCTL_REWIND32 = _IOW('A', 0x46, u32),
SNDRV_PCM_IOCTL_FORWARD32 = _IOW('A', 0x49, u32),
SNDRV_PCM_IOCTL_WRITEI_FRAMES32 = _IOW('A', 0x50, struct snd_xferi32),
SNDRV_PCM_IOCTL_READI_FRAMES32 = _IOR('A', 0x51, struct snd_xferi32),
SNDRV_PCM_IOCTL_WRITEN_FRAMES32 = _IOW('A', 0x52, struct snd_xfern32),
SNDRV_PCM_IOCTL_READN_FRAMES32 = _IOR('A', 0x53, struct snd_xfern32),
SNDRV_PCM_IOCTL_SYNC_PTR32 = _IOWR('A', 0x23, struct snd_pcm_sync_ptr32),
};
static long snd_pcm_ioctl_compat(struct file *file, unsigned int cmd, unsigned long arg)
{
struct snd_pcm_file *pcm_file;
struct snd_pcm_substream *substream;
void __user *argp = compat_ptr(arg);
pcm_file = file->private_data;
if (! pcm_file)
return -ENOTTY;
substream = pcm_file->substream;
if (! substream)
return -ENOTTY;
/*
* When PCM is used on 32bit mode, we need to disable
* mmap of PCM status/control records because of the size
* incompatibility.
*/
pcm_file->no_compat_mmap = 1;
switch (cmd) {
case SNDRV_PCM_IOCTL_PVERSION:
case SNDRV_PCM_IOCTL_INFO:
case SNDRV_PCM_IOCTL_TSTAMP:
case SNDRV_PCM_IOCTL_TTSTAMP:
case SNDRV_PCM_IOCTL_HWSYNC:
case SNDRV_PCM_IOCTL_PREPARE:
case SNDRV_PCM_IOCTL_RESET:
case SNDRV_PCM_IOCTL_START:
case SNDRV_PCM_IOCTL_DROP:
case SNDRV_PCM_IOCTL_DRAIN:
case SNDRV_PCM_IOCTL_PAUSE:
case SNDRV_PCM_IOCTL_HW_FREE:
case SNDRV_PCM_IOCTL_RESUME:
case SNDRV_PCM_IOCTL_XRUN:
case SNDRV_PCM_IOCTL_LINK:
case SNDRV_PCM_IOCTL_UNLINK:
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
return snd_pcm_playback_ioctl1(file, substream, cmd, argp);
else
return snd_pcm_capture_ioctl1(file, substream, cmd, argp);
case SNDRV_PCM_IOCTL_HW_REFINE32:
return snd_pcm_ioctl_hw_params_compat(substream, 1, argp);
case SNDRV_PCM_IOCTL_HW_PARAMS32:
return snd_pcm_ioctl_hw_params_compat(substream, 0, argp);
case SNDRV_PCM_IOCTL_SW_PARAMS32:
return snd_pcm_ioctl_sw_params_compat(substream, argp);
case SNDRV_PCM_IOCTL_STATUS32:
return snd_pcm_status_user_compat(substream, argp);
case SNDRV_PCM_IOCTL_SYNC_PTR32:
return snd_pcm_ioctl_sync_ptr_compat(substream, argp);
case SNDRV_PCM_IOCTL_CHANNEL_INFO32:
return snd_pcm_ioctl_channel_info_compat(substream, argp);
case SNDRV_PCM_IOCTL_WRITEI_FRAMES32:
return snd_pcm_ioctl_xferi_compat(substream, SNDRV_PCM_STREAM_PLAYBACK, argp);
case SNDRV_PCM_IOCTL_READI_FRAMES32:
return snd_pcm_ioctl_xferi_compat(substream, SNDRV_PCM_STREAM_CAPTURE, argp);
case SNDRV_PCM_IOCTL_WRITEN_FRAMES32:
return snd_pcm_ioctl_xfern_compat(substream, SNDRV_PCM_STREAM_PLAYBACK, argp);
case SNDRV_PCM_IOCTL_READN_FRAMES32:
return snd_pcm_ioctl_xfern_compat(substream, SNDRV_PCM_STREAM_CAPTURE, argp);
case SNDRV_PCM_IOCTL_DELAY32:
return snd_pcm_ioctl_delay_compat(substream, argp);
case SNDRV_PCM_IOCTL_REWIND32:
return snd_pcm_ioctl_rewind_compat(substream, argp);
case SNDRV_PCM_IOCTL_FORWARD32:
return snd_pcm_ioctl_forward_compat(substream, argp);
}
return -ENOIOCTLCMD;
}
| gpl-2.0 |
litepro/DK_S2_ICS_KERNEL | sound/core/pcm_compat.c | 7259 | 15394 | /*
* 32bit -> 64bit ioctl wrapper for PCM API
* Copyright (c) by Takashi Iwai <tiwai@suse.de>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
/* This file included from pcm_native.c */
#include <linux/compat.h>
#include <linux/slab.h>
static int snd_pcm_ioctl_delay_compat(struct snd_pcm_substream *substream,
s32 __user *src)
{
snd_pcm_sframes_t delay;
mm_segment_t fs;
int err;
fs = snd_enter_user();
err = snd_pcm_delay(substream, &delay);
snd_leave_user(fs);
if (err < 0)
return err;
if (put_user(delay, src))
return -EFAULT;
return err;
}
static int snd_pcm_ioctl_rewind_compat(struct snd_pcm_substream *substream,
u32 __user *src)
{
snd_pcm_uframes_t frames;
int err;
if (get_user(frames, src))
return -EFAULT;
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
err = snd_pcm_playback_rewind(substream, frames);
else
err = snd_pcm_capture_rewind(substream, frames);
if (put_user(err, src))
return -EFAULT;
return err < 0 ? err : 0;
}
static int snd_pcm_ioctl_forward_compat(struct snd_pcm_substream *substream,
u32 __user *src)
{
snd_pcm_uframes_t frames;
int err;
if (get_user(frames, src))
return -EFAULT;
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
err = snd_pcm_playback_forward(substream, frames);
else
err = snd_pcm_capture_forward(substream, frames);
if (put_user(err, src))
return -EFAULT;
return err < 0 ? err : 0;
}
struct snd_pcm_hw_params32 {
u32 flags;
struct snd_mask masks[SNDRV_PCM_HW_PARAM_LAST_MASK - SNDRV_PCM_HW_PARAM_FIRST_MASK + 1]; /* this must be identical */
struct snd_mask mres[5]; /* reserved masks */
struct snd_interval intervals[SNDRV_PCM_HW_PARAM_LAST_INTERVAL - SNDRV_PCM_HW_PARAM_FIRST_INTERVAL + 1];
struct snd_interval ires[9]; /* reserved intervals */
u32 rmask;
u32 cmask;
u32 info;
u32 msbits;
u32 rate_num;
u32 rate_den;
u32 fifo_size;
unsigned char reserved[64];
};
struct snd_pcm_sw_params32 {
s32 tstamp_mode;
u32 period_step;
u32 sleep_min;
u32 avail_min;
u32 xfer_align;
u32 start_threshold;
u32 stop_threshold;
u32 silence_threshold;
u32 silence_size;
u32 boundary;
unsigned char reserved[64];
};
/* recalcuate the boundary within 32bit */
static snd_pcm_uframes_t recalculate_boundary(struct snd_pcm_runtime *runtime)
{
snd_pcm_uframes_t boundary;
if (! runtime->buffer_size)
return 0;
boundary = runtime->buffer_size;
while (boundary * 2 <= 0x7fffffffUL - runtime->buffer_size)
boundary *= 2;
return boundary;
}
static int snd_pcm_ioctl_sw_params_compat(struct snd_pcm_substream *substream,
struct snd_pcm_sw_params32 __user *src)
{
struct snd_pcm_sw_params params;
snd_pcm_uframes_t boundary;
int err;
memset(¶ms, 0, sizeof(params));
if (get_user(params.tstamp_mode, &src->tstamp_mode) ||
get_user(params.period_step, &src->period_step) ||
get_user(params.sleep_min, &src->sleep_min) ||
get_user(params.avail_min, &src->avail_min) ||
get_user(params.xfer_align, &src->xfer_align) ||
get_user(params.start_threshold, &src->start_threshold) ||
get_user(params.stop_threshold, &src->stop_threshold) ||
get_user(params.silence_threshold, &src->silence_threshold) ||
get_user(params.silence_size, &src->silence_size))
return -EFAULT;
/*
* Check silent_size parameter. Since we have 64bit boundary,
* silence_size must be compared with the 32bit boundary.
*/
boundary = recalculate_boundary(substream->runtime);
if (boundary && params.silence_size >= boundary)
params.silence_size = substream->runtime->boundary;
err = snd_pcm_sw_params(substream, ¶ms);
if (err < 0)
return err;
if (boundary && put_user(boundary, &src->boundary))
return -EFAULT;
return err;
}
struct snd_pcm_channel_info32 {
u32 channel;
u32 offset;
u32 first;
u32 step;
};
static int snd_pcm_ioctl_channel_info_compat(struct snd_pcm_substream *substream,
struct snd_pcm_channel_info32 __user *src)
{
struct snd_pcm_channel_info info;
int err;
if (get_user(info.channel, &src->channel) ||
get_user(info.offset, &src->offset) ||
get_user(info.first, &src->first) ||
get_user(info.step, &src->step))
return -EFAULT;
err = snd_pcm_channel_info(substream, &info);
if (err < 0)
return err;
if (put_user(info.channel, &src->channel) ||
put_user(info.offset, &src->offset) ||
put_user(info.first, &src->first) ||
put_user(info.step, &src->step))
return -EFAULT;
return err;
}
struct snd_pcm_status32 {
s32 state;
struct compat_timespec trigger_tstamp;
struct compat_timespec tstamp;
u32 appl_ptr;
u32 hw_ptr;
s32 delay;
u32 avail;
u32 avail_max;
u32 overrange;
s32 suspended_state;
unsigned char reserved[60];
} __attribute__((packed));
static int snd_pcm_status_user_compat(struct snd_pcm_substream *substream,
struct snd_pcm_status32 __user *src)
{
struct snd_pcm_status status;
int err;
err = snd_pcm_status(substream, &status);
if (err < 0)
return err;
if (put_user(status.state, &src->state) ||
put_user(status.trigger_tstamp.tv_sec, &src->trigger_tstamp.tv_sec) ||
put_user(status.trigger_tstamp.tv_nsec, &src->trigger_tstamp.tv_nsec) ||
put_user(status.tstamp.tv_sec, &src->tstamp.tv_sec) ||
put_user(status.tstamp.tv_nsec, &src->tstamp.tv_nsec) ||
put_user(status.appl_ptr, &src->appl_ptr) ||
put_user(status.hw_ptr, &src->hw_ptr) ||
put_user(status.delay, &src->delay) ||
put_user(status.avail, &src->avail) ||
put_user(status.avail_max, &src->avail_max) ||
put_user(status.overrange, &src->overrange) ||
put_user(status.suspended_state, &src->suspended_state))
return -EFAULT;
return err;
}
/* both for HW_PARAMS and HW_REFINE */
static int snd_pcm_ioctl_hw_params_compat(struct snd_pcm_substream *substream,
int refine,
struct snd_pcm_hw_params32 __user *data32)
{
struct snd_pcm_hw_params *data;
struct snd_pcm_runtime *runtime;
int err;
if (! (runtime = substream->runtime))
return -ENOTTY;
/* only fifo_size is different, so just copy all */
data = memdup_user(data32, sizeof(*data32));
if (IS_ERR(data))
return PTR_ERR(data);
if (refine)
err = snd_pcm_hw_refine(substream, data);
else
err = snd_pcm_hw_params(substream, data);
if (err < 0)
goto error;
if (copy_to_user(data32, data, sizeof(*data32)) ||
put_user(data->fifo_size, &data32->fifo_size)) {
err = -EFAULT;
goto error;
}
if (! refine) {
unsigned int new_boundary = recalculate_boundary(runtime);
if (new_boundary)
runtime->boundary = new_boundary;
}
error:
kfree(data);
return err;
}
/*
*/
struct snd_xferi32 {
s32 result;
u32 buf;
u32 frames;
};
static int snd_pcm_ioctl_xferi_compat(struct snd_pcm_substream *substream,
int dir, struct snd_xferi32 __user *data32)
{
compat_caddr_t buf;
u32 frames;
int err;
if (! substream->runtime)
return -ENOTTY;
if (substream->stream != dir)
return -EINVAL;
if (substream->runtime->status->state == SNDRV_PCM_STATE_OPEN)
return -EBADFD;
if (get_user(buf, &data32->buf) ||
get_user(frames, &data32->frames))
return -EFAULT;
if (dir == SNDRV_PCM_STREAM_PLAYBACK)
err = snd_pcm_lib_write(substream, compat_ptr(buf), frames);
else
err = snd_pcm_lib_read(substream, compat_ptr(buf), frames);
if (err < 0)
return err;
/* copy the result */
if (put_user(err, &data32->result))
return -EFAULT;
return 0;
}
/* snd_xfern needs remapping of bufs */
struct snd_xfern32 {
s32 result;
u32 bufs; /* this is void **; */
u32 frames;
};
/*
* xfern ioctl nees to copy (up to) 128 pointers on stack.
* although we may pass the copied pointers through f_op->ioctl, but the ioctl
* handler there expands again the same 128 pointers on stack, so it is better
* to handle the function (calling pcm_readv/writev) directly in this handler.
*/
static int snd_pcm_ioctl_xfern_compat(struct snd_pcm_substream *substream,
int dir, struct snd_xfern32 __user *data32)
{
compat_caddr_t buf;
compat_caddr_t __user *bufptr;
u32 frames;
void __user **bufs;
int err, ch, i;
if (! substream->runtime)
return -ENOTTY;
if (substream->stream != dir)
return -EINVAL;
if ((ch = substream->runtime->channels) > 128)
return -EINVAL;
if (get_user(buf, &data32->bufs) ||
get_user(frames, &data32->frames))
return -EFAULT;
bufptr = compat_ptr(buf);
bufs = kmalloc(sizeof(void __user *) * ch, GFP_KERNEL);
if (bufs == NULL)
return -ENOMEM;
for (i = 0; i < ch; i++) {
u32 ptr;
if (get_user(ptr, bufptr)) {
kfree(bufs);
return -EFAULT;
}
bufs[i] = compat_ptr(ptr);
bufptr++;
}
if (dir == SNDRV_PCM_STREAM_PLAYBACK)
err = snd_pcm_lib_writev(substream, bufs, frames);
else
err = snd_pcm_lib_readv(substream, bufs, frames);
if (err >= 0) {
if (put_user(err, &data32->result))
err = -EFAULT;
}
kfree(bufs);
return err;
}
struct snd_pcm_mmap_status32 {
s32 state;
s32 pad1;
u32 hw_ptr;
struct compat_timespec tstamp;
s32 suspended_state;
} __attribute__((packed));
struct snd_pcm_mmap_control32 {
u32 appl_ptr;
u32 avail_min;
};
struct snd_pcm_sync_ptr32 {
u32 flags;
union {
struct snd_pcm_mmap_status32 status;
unsigned char reserved[64];
} s;
union {
struct snd_pcm_mmap_control32 control;
unsigned char reserved[64];
} c;
} __attribute__((packed));
static int snd_pcm_ioctl_sync_ptr_compat(struct snd_pcm_substream *substream,
struct snd_pcm_sync_ptr32 __user *src)
{
struct snd_pcm_runtime *runtime = substream->runtime;
volatile struct snd_pcm_mmap_status *status;
volatile struct snd_pcm_mmap_control *control;
u32 sflags;
struct snd_pcm_mmap_control scontrol;
struct snd_pcm_mmap_status sstatus;
snd_pcm_uframes_t boundary;
int err;
if (snd_BUG_ON(!runtime))
return -EINVAL;
if (get_user(sflags, &src->flags) ||
get_user(scontrol.appl_ptr, &src->c.control.appl_ptr) ||
get_user(scontrol.avail_min, &src->c.control.avail_min))
return -EFAULT;
if (sflags & SNDRV_PCM_SYNC_PTR_HWSYNC) {
err = snd_pcm_hwsync(substream);
if (err < 0)
return err;
}
status = runtime->status;
control = runtime->control;
boundary = recalculate_boundary(runtime);
if (! boundary)
boundary = 0x7fffffff;
snd_pcm_stream_lock_irq(substream);
/* FIXME: we should consider the boundary for the sync from app */
if (!(sflags & SNDRV_PCM_SYNC_PTR_APPL))
control->appl_ptr = scontrol.appl_ptr;
else
scontrol.appl_ptr = control->appl_ptr % boundary;
if (!(sflags & SNDRV_PCM_SYNC_PTR_AVAIL_MIN))
control->avail_min = scontrol.avail_min;
else
scontrol.avail_min = control->avail_min;
sstatus.state = status->state;
sstatus.hw_ptr = status->hw_ptr % boundary;
sstatus.tstamp = status->tstamp;
sstatus.suspended_state = status->suspended_state;
snd_pcm_stream_unlock_irq(substream);
if (put_user(sstatus.state, &src->s.status.state) ||
put_user(sstatus.hw_ptr, &src->s.status.hw_ptr) ||
put_user(sstatus.tstamp.tv_sec, &src->s.status.tstamp.tv_sec) ||
put_user(sstatus.tstamp.tv_nsec, &src->s.status.tstamp.tv_nsec) ||
put_user(sstatus.suspended_state, &src->s.status.suspended_state) ||
put_user(scontrol.appl_ptr, &src->c.control.appl_ptr) ||
put_user(scontrol.avail_min, &src->c.control.avail_min))
return -EFAULT;
return 0;
}
/*
*/
enum {
SNDRV_PCM_IOCTL_HW_REFINE32 = _IOWR('A', 0x10, struct snd_pcm_hw_params32),
SNDRV_PCM_IOCTL_HW_PARAMS32 = _IOWR('A', 0x11, struct snd_pcm_hw_params32),
SNDRV_PCM_IOCTL_SW_PARAMS32 = _IOWR('A', 0x13, struct snd_pcm_sw_params32),
SNDRV_PCM_IOCTL_STATUS32 = _IOR('A', 0x20, struct snd_pcm_status32),
SNDRV_PCM_IOCTL_DELAY32 = _IOR('A', 0x21, s32),
SNDRV_PCM_IOCTL_CHANNEL_INFO32 = _IOR('A', 0x32, struct snd_pcm_channel_info32),
SNDRV_PCM_IOCTL_REWIND32 = _IOW('A', 0x46, u32),
SNDRV_PCM_IOCTL_FORWARD32 = _IOW('A', 0x49, u32),
SNDRV_PCM_IOCTL_WRITEI_FRAMES32 = _IOW('A', 0x50, struct snd_xferi32),
SNDRV_PCM_IOCTL_READI_FRAMES32 = _IOR('A', 0x51, struct snd_xferi32),
SNDRV_PCM_IOCTL_WRITEN_FRAMES32 = _IOW('A', 0x52, struct snd_xfern32),
SNDRV_PCM_IOCTL_READN_FRAMES32 = _IOR('A', 0x53, struct snd_xfern32),
SNDRV_PCM_IOCTL_SYNC_PTR32 = _IOWR('A', 0x23, struct snd_pcm_sync_ptr32),
};
static long snd_pcm_ioctl_compat(struct file *file, unsigned int cmd, unsigned long arg)
{
struct snd_pcm_file *pcm_file;
struct snd_pcm_substream *substream;
void __user *argp = compat_ptr(arg);
pcm_file = file->private_data;
if (! pcm_file)
return -ENOTTY;
substream = pcm_file->substream;
if (! substream)
return -ENOTTY;
/*
* When PCM is used on 32bit mode, we need to disable
* mmap of PCM status/control records because of the size
* incompatibility.
*/
pcm_file->no_compat_mmap = 1;
switch (cmd) {
case SNDRV_PCM_IOCTL_PVERSION:
case SNDRV_PCM_IOCTL_INFO:
case SNDRV_PCM_IOCTL_TSTAMP:
case SNDRV_PCM_IOCTL_TTSTAMP:
case SNDRV_PCM_IOCTL_HWSYNC:
case SNDRV_PCM_IOCTL_PREPARE:
case SNDRV_PCM_IOCTL_RESET:
case SNDRV_PCM_IOCTL_START:
case SNDRV_PCM_IOCTL_DROP:
case SNDRV_PCM_IOCTL_DRAIN:
case SNDRV_PCM_IOCTL_PAUSE:
case SNDRV_PCM_IOCTL_HW_FREE:
case SNDRV_PCM_IOCTL_RESUME:
case SNDRV_PCM_IOCTL_XRUN:
case SNDRV_PCM_IOCTL_LINK:
case SNDRV_PCM_IOCTL_UNLINK:
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
return snd_pcm_playback_ioctl1(file, substream, cmd, argp);
else
return snd_pcm_capture_ioctl1(file, substream, cmd, argp);
case SNDRV_PCM_IOCTL_HW_REFINE32:
return snd_pcm_ioctl_hw_params_compat(substream, 1, argp);
case SNDRV_PCM_IOCTL_HW_PARAMS32:
return snd_pcm_ioctl_hw_params_compat(substream, 0, argp);
case SNDRV_PCM_IOCTL_SW_PARAMS32:
return snd_pcm_ioctl_sw_params_compat(substream, argp);
case SNDRV_PCM_IOCTL_STATUS32:
return snd_pcm_status_user_compat(substream, argp);
case SNDRV_PCM_IOCTL_SYNC_PTR32:
return snd_pcm_ioctl_sync_ptr_compat(substream, argp);
case SNDRV_PCM_IOCTL_CHANNEL_INFO32:
return snd_pcm_ioctl_channel_info_compat(substream, argp);
case SNDRV_PCM_IOCTL_WRITEI_FRAMES32:
return snd_pcm_ioctl_xferi_compat(substream, SNDRV_PCM_STREAM_PLAYBACK, argp);
case SNDRV_PCM_IOCTL_READI_FRAMES32:
return snd_pcm_ioctl_xferi_compat(substream, SNDRV_PCM_STREAM_CAPTURE, argp);
case SNDRV_PCM_IOCTL_WRITEN_FRAMES32:
return snd_pcm_ioctl_xfern_compat(substream, SNDRV_PCM_STREAM_PLAYBACK, argp);
case SNDRV_PCM_IOCTL_READN_FRAMES32:
return snd_pcm_ioctl_xfern_compat(substream, SNDRV_PCM_STREAM_CAPTURE, argp);
case SNDRV_PCM_IOCTL_DELAY32:
return snd_pcm_ioctl_delay_compat(substream, argp);
case SNDRV_PCM_IOCTL_REWIND32:
return snd_pcm_ioctl_rewind_compat(substream, argp);
case SNDRV_PCM_IOCTL_FORWARD32:
return snd_pcm_ioctl_forward_compat(substream, argp);
}
return -ENOIOCTLCMD;
}
| gpl-2.0 |
wwbhl/android_kernel_samsung_piranha | drivers/net/arcnet/arcnet.c | 7771 | 30420 | /*
* Linux ARCnet driver - device-independent routines
*
* Written 1997 by David Woodhouse.
* Written 1994-1999 by Avery Pennarun.
* Written 1999-2000 by Martin Mares <mj@ucw.cz>.
* Derived from skeleton.c by Donald Becker.
*
* Special thanks to Contemporary Controls, Inc. (www.ccontrols.com)
* for sponsoring the further development of this driver.
*
* **********************
*
* The original copyright was as follows:
*
* skeleton.c Written 1993 by Donald Becker.
* Copyright 1993 United States Government as represented by the
* Director, National Security Agency. This software may only be used
* and distributed according to the terms of the GNU General Public License as
* modified by SRC, incorporated herein by reference.
*
* **********************
*
* The change log is now in a file called ChangeLog in this directory.
*
* Sources:
* - Crynwr arcnet.com/arcether.com packet drivers.
* - arcnet.c v0.00 dated 1/1/94 and apparently by
* Donald Becker - it didn't work :)
* - skeleton.c v0.05 dated 11/16/93 by Donald Becker
* (from Linux Kernel 1.1.45)
* - RFC's 1201 and 1051 - re: TCP/IP over ARCnet
* - The official ARCnet COM9026 data sheets (!) thanks to
* Ken Cornetet <kcornete@nyx10.cs.du.edu>
* - The official ARCnet COM20020 data sheets.
* - Information on some more obscure ARCnet controller chips, thanks
* to the nice people at SMSC.
* - net/inet/eth.c (from kernel 1.1.50) for header-building info.
* - Alternate Linux ARCnet source by V.Shergin <vsher@sao.stavropol.su>
* - Textual information and more alternate source from Joachim Koenig
* <jojo@repas.de>
*/
#define VERSION "arcnet: v3.94 BETA 2007/02/08 - by Avery Pennarun et al.\n"
#include <linux/module.h>
#include <linux/types.h>
#include <linux/delay.h>
#include <linux/netdevice.h>
#include <linux/if_arp.h>
#include <net/arp.h>
#include <linux/init.h>
#include <linux/arcdevice.h>
#include <linux/jiffies.h>
/* "do nothing" functions for protocol drivers */
static void null_rx(struct net_device *dev, int bufnum,
struct archdr *pkthdr, int length);
static int null_build_header(struct sk_buff *skb, struct net_device *dev,
unsigned short type, uint8_t daddr);
static int null_prepare_tx(struct net_device *dev, struct archdr *pkt,
int length, int bufnum);
static void arcnet_rx(struct net_device *dev, int bufnum);
/*
* one ArcProto per possible proto ID. None of the elements of
* arc_proto_map are allowed to be NULL; they will get set to
* arc_proto_default instead. It also must not be NULL; if you would like
* to set it to NULL, set it to &arc_proto_null instead.
*/
struct ArcProto *arc_proto_map[256], *arc_proto_default,
*arc_bcast_proto, *arc_raw_proto;
static struct ArcProto arc_proto_null =
{
.suffix = '?',
.mtu = XMTU,
.is_ip = 0,
.rx = null_rx,
.build_header = null_build_header,
.prepare_tx = null_prepare_tx,
.continue_tx = NULL,
.ack_tx = NULL
};
/* Exported function prototypes */
int arcnet_debug = ARCNET_DEBUG;
EXPORT_SYMBOL(arc_proto_map);
EXPORT_SYMBOL(arc_proto_default);
EXPORT_SYMBOL(arc_bcast_proto);
EXPORT_SYMBOL(arc_raw_proto);
EXPORT_SYMBOL(arcnet_unregister_proto);
EXPORT_SYMBOL(arcnet_debug);
EXPORT_SYMBOL(alloc_arcdev);
EXPORT_SYMBOL(arcnet_interrupt);
EXPORT_SYMBOL(arcnet_open);
EXPORT_SYMBOL(arcnet_close);
EXPORT_SYMBOL(arcnet_send_packet);
EXPORT_SYMBOL(arcnet_timeout);
/* Internal function prototypes */
static int arcnet_header(struct sk_buff *skb, struct net_device *dev,
unsigned short type, const void *daddr,
const void *saddr, unsigned len);
static int arcnet_rebuild_header(struct sk_buff *skb);
static int go_tx(struct net_device *dev);
static int debug = ARCNET_DEBUG;
module_param(debug, int, 0);
MODULE_LICENSE("GPL");
static int __init arcnet_init(void)
{
int count;
arcnet_debug = debug;
printk("arcnet loaded.\n");
#ifdef ALPHA_WARNING
BUGLVL(D_EXTRA) {
printk("arcnet: ***\n"
"arcnet: * Read arcnet.txt for important release notes!\n"
"arcnet: *\n"
"arcnet: * This is an ALPHA version! (Last stable release: v3.02) E-mail\n"
"arcnet: * me if you have any questions, comments, or bug reports.\n"
"arcnet: ***\n");
}
#endif
/* initialize the protocol map */
arc_raw_proto = arc_proto_default = arc_bcast_proto = &arc_proto_null;
for (count = 0; count < 256; count++)
arc_proto_map[count] = arc_proto_default;
BUGLVL(D_DURING)
printk("arcnet: struct sizes: %Zd %Zd %Zd %Zd %Zd\n",
sizeof(struct arc_hardware), sizeof(struct arc_rfc1201),
sizeof(struct arc_rfc1051), sizeof(struct arc_eth_encap),
sizeof(struct archdr));
return 0;
}
static void __exit arcnet_exit(void)
{
}
module_init(arcnet_init);
module_exit(arcnet_exit);
/*
* Dump the contents of an sk_buff
*/
#if ARCNET_DEBUG_MAX & D_SKB
void arcnet_dump_skb(struct net_device *dev,
struct sk_buff *skb, char *desc)
{
char hdr[32];
/* dump the packet */
snprintf(hdr, sizeof(hdr), "%6s:%s skb->data:", dev->name, desc);
print_hex_dump(KERN_DEBUG, hdr, DUMP_PREFIX_OFFSET,
16, 1, skb->data, skb->len, true);
}
EXPORT_SYMBOL(arcnet_dump_skb);
#endif
/*
* Dump the contents of an ARCnet buffer
*/
#if (ARCNET_DEBUG_MAX & (D_RX | D_TX))
static void arcnet_dump_packet(struct net_device *dev, int bufnum,
char *desc, int take_arcnet_lock)
{
struct arcnet_local *lp = netdev_priv(dev);
int i, length;
unsigned long flags = 0;
static uint8_t buf[512];
char hdr[32];
/* hw.copy_from_card expects IRQ context so take the IRQ lock
to keep it single threaded */
if(take_arcnet_lock)
spin_lock_irqsave(&lp->lock, flags);
lp->hw.copy_from_card(dev, bufnum, 0, buf, 512);
if(take_arcnet_lock)
spin_unlock_irqrestore(&lp->lock, flags);
/* if the offset[0] byte is nonzero, this is a 256-byte packet */
length = (buf[2] ? 256 : 512);
/* dump the packet */
snprintf(hdr, sizeof(hdr), "%6s:%s packet dump:", dev->name, desc);
print_hex_dump(KERN_DEBUG, hdr, DUMP_PREFIX_OFFSET,
16, 1, buf, length, true);
}
#else
#define arcnet_dump_packet(dev, bufnum, desc,take_arcnet_lock) do { } while (0)
#endif
/*
* Unregister a protocol driver from the arc_proto_map. Protocol drivers
* are responsible for registering themselves, but the unregister routine
* is pretty generic so we'll do it here.
*/
void arcnet_unregister_proto(struct ArcProto *proto)
{
int count;
if (arc_proto_default == proto)
arc_proto_default = &arc_proto_null;
if (arc_bcast_proto == proto)
arc_bcast_proto = arc_proto_default;
if (arc_raw_proto == proto)
arc_raw_proto = arc_proto_default;
for (count = 0; count < 256; count++) {
if (arc_proto_map[count] == proto)
arc_proto_map[count] = arc_proto_default;
}
}
/*
* Add a buffer to the queue. Only the interrupt handler is allowed to do
* this, unless interrupts are disabled.
*
* Note: we don't check for a full queue, since there aren't enough buffers
* to more than fill it.
*/
static void release_arcbuf(struct net_device *dev, int bufnum)
{
struct arcnet_local *lp = netdev_priv(dev);
int i;
lp->buf_queue[lp->first_free_buf++] = bufnum;
lp->first_free_buf %= 5;
BUGLVL(D_DURING) {
BUGMSG(D_DURING, "release_arcbuf: freed #%d; buffer queue is now: ",
bufnum);
for (i = lp->next_buf; i != lp->first_free_buf; i = (i+1) % 5)
BUGMSG2(D_DURING, "#%d ", lp->buf_queue[i]);
BUGMSG2(D_DURING, "\n");
}
}
/*
* Get a buffer from the queue. If this returns -1, there are no buffers
* available.
*/
static int get_arcbuf(struct net_device *dev)
{
struct arcnet_local *lp = netdev_priv(dev);
int buf = -1, i;
if (!atomic_dec_and_test(&lp->buf_lock)) {
/* already in this function */
BUGMSG(D_NORMAL, "get_arcbuf: overlap (%d)!\n",
lp->buf_lock.counter);
}
else { /* we can continue */
if (lp->next_buf >= 5)
lp->next_buf -= 5;
if (lp->next_buf == lp->first_free_buf)
BUGMSG(D_NORMAL, "get_arcbuf: BUG: no buffers are available??\n");
else {
buf = lp->buf_queue[lp->next_buf++];
lp->next_buf %= 5;
}
}
BUGLVL(D_DURING) {
BUGMSG(D_DURING, "get_arcbuf: got #%d; buffer queue is now: ", buf);
for (i = lp->next_buf; i != lp->first_free_buf; i = (i+1) % 5)
BUGMSG2(D_DURING, "#%d ", lp->buf_queue[i]);
BUGMSG2(D_DURING, "\n");
}
atomic_inc(&lp->buf_lock);
return buf;
}
static int choose_mtu(void)
{
int count, mtu = 65535;
/* choose the smallest MTU of all available encaps */
for (count = 0; count < 256; count++) {
if (arc_proto_map[count] != &arc_proto_null &&
arc_proto_map[count]->mtu < mtu) {
mtu = arc_proto_map[count]->mtu;
}
}
return mtu == 65535 ? XMTU : mtu;
}
static const struct header_ops arcnet_header_ops = {
.create = arcnet_header,
.rebuild = arcnet_rebuild_header,
};
static const struct net_device_ops arcnet_netdev_ops = {
.ndo_open = arcnet_open,
.ndo_stop = arcnet_close,
.ndo_start_xmit = arcnet_send_packet,
.ndo_tx_timeout = arcnet_timeout,
};
/* Setup a struct device for ARCnet. */
static void arcdev_setup(struct net_device *dev)
{
dev->type = ARPHRD_ARCNET;
dev->netdev_ops = &arcnet_netdev_ops;
dev->header_ops = &arcnet_header_ops;
dev->hard_header_len = sizeof(struct archdr);
dev->mtu = choose_mtu();
dev->addr_len = ARCNET_ALEN;
dev->tx_queue_len = 100;
dev->broadcast[0] = 0x00; /* for us, broadcasts are address 0 */
dev->watchdog_timeo = TX_TIMEOUT;
/* New-style flags. */
dev->flags = IFF_BROADCAST;
}
struct net_device *alloc_arcdev(const char *name)
{
struct net_device *dev;
dev = alloc_netdev(sizeof(struct arcnet_local),
name && *name ? name : "arc%d", arcdev_setup);
if(dev) {
struct arcnet_local *lp = netdev_priv(dev);
spin_lock_init(&lp->lock);
}
return dev;
}
/*
* Open/initialize the board. This is called sometime after booting when
* the 'ifconfig' program is run.
*
* This routine should set everything up anew at each open, even registers
* that "should" only need to be set once at boot, so that there is
* non-reboot way to recover if something goes wrong.
*/
int arcnet_open(struct net_device *dev)
{
struct arcnet_local *lp = netdev_priv(dev);
int count, newmtu, error;
BUGMSG(D_INIT,"opened.");
if (!try_module_get(lp->hw.owner))
return -ENODEV;
BUGLVL(D_PROTO) {
BUGMSG(D_PROTO, "protocol map (default is '%c'): ",
arc_proto_default->suffix);
for (count = 0; count < 256; count++)
BUGMSG2(D_PROTO, "%c", arc_proto_map[count]->suffix);
BUGMSG2(D_PROTO, "\n");
}
BUGMSG(D_INIT, "arcnet_open: resetting card.\n");
/* try to put the card in a defined state - if it fails the first
* time, actually reset it.
*/
error = -ENODEV;
if (ARCRESET(0) && ARCRESET(1))
goto out_module_put;
newmtu = choose_mtu();
if (newmtu < dev->mtu)
dev->mtu = newmtu;
BUGMSG(D_INIT, "arcnet_open: mtu: %d.\n", dev->mtu);
/* autodetect the encapsulation for each host. */
memset(lp->default_proto, 0, sizeof(lp->default_proto));
/* the broadcast address is special - use the 'bcast' protocol */
for (count = 0; count < 256; count++) {
if (arc_proto_map[count] == arc_bcast_proto) {
lp->default_proto[0] = count;
break;
}
}
/* initialize buffers */
atomic_set(&lp->buf_lock, 1);
lp->next_buf = lp->first_free_buf = 0;
release_arcbuf(dev, 0);
release_arcbuf(dev, 1);
release_arcbuf(dev, 2);
release_arcbuf(dev, 3);
lp->cur_tx = lp->next_tx = -1;
lp->cur_rx = -1;
lp->rfc1201.sequence = 1;
/* bring up the hardware driver */
if (lp->hw.open)
lp->hw.open(dev);
if (dev->dev_addr[0] == 0)
BUGMSG(D_NORMAL, "WARNING! Station address 00 is reserved "
"for broadcasts!\n");
else if (dev->dev_addr[0] == 255)
BUGMSG(D_NORMAL, "WARNING! Station address FF may confuse "
"DOS networking programs!\n");
BUGMSG(D_DEBUG, "%s: %d: %s\n",__FILE__,__LINE__,__func__);
if (ASTATUS() & RESETflag) {
BUGMSG(D_DEBUG, "%s: %d: %s\n",__FILE__,__LINE__,__func__);
ACOMMAND(CFLAGScmd | RESETclear);
}
BUGMSG(D_DEBUG, "%s: %d: %s\n",__FILE__,__LINE__,__func__);
/* make sure we're ready to receive IRQ's. */
AINTMASK(0);
udelay(1); /* give it time to set the mask before
* we reset it again. (may not even be
* necessary)
*/
BUGMSG(D_DEBUG, "%s: %d: %s\n",__FILE__,__LINE__,__func__);
lp->intmask = NORXflag | RECONflag;
AINTMASK(lp->intmask);
BUGMSG(D_DEBUG, "%s: %d: %s\n",__FILE__,__LINE__,__func__);
netif_start_queue(dev);
return 0;
out_module_put:
module_put(lp->hw.owner);
return error;
}
/* The inverse routine to arcnet_open - shuts down the card. */
int arcnet_close(struct net_device *dev)
{
struct arcnet_local *lp = netdev_priv(dev);
netif_stop_queue(dev);
/* flush TX and disable RX */
AINTMASK(0);
ACOMMAND(NOTXcmd); /* stop transmit */
ACOMMAND(NORXcmd); /* disable receive */
mdelay(1);
/* shut down the card */
lp->hw.close(dev);
module_put(lp->hw.owner);
return 0;
}
static int arcnet_header(struct sk_buff *skb, struct net_device *dev,
unsigned short type, const void *daddr,
const void *saddr, unsigned len)
{
const struct arcnet_local *lp = netdev_priv(dev);
uint8_t _daddr, proto_num;
struct ArcProto *proto;
BUGMSG(D_DURING,
"create header from %d to %d; protocol %d (%Xh); size %u.\n",
saddr ? *(uint8_t *) saddr : -1,
daddr ? *(uint8_t *) daddr : -1,
type, type, len);
if (skb->len!=0 && len != skb->len)
BUGMSG(D_NORMAL, "arcnet_header: Yikes! skb->len(%d) != len(%d)!\n",
skb->len, len);
/* Type is host order - ? */
if(type == ETH_P_ARCNET) {
proto = arc_raw_proto;
BUGMSG(D_DEBUG, "arc_raw_proto used. proto='%c'\n",proto->suffix);
_daddr = daddr ? *(uint8_t *) daddr : 0;
}
else if (!daddr) {
/*
* if the dest addr isn't provided, we can't choose an encapsulation!
* Store the packet type (eg. ETH_P_IP) for now, and we'll push on a
* real header when we do rebuild_header.
*/
*(uint16_t *) skb_push(skb, 2) = type;
/*
* XXX: Why not use skb->mac_len?
*/
if (skb->network_header - skb->mac_header != 2)
BUGMSG(D_NORMAL, "arcnet_header: Yikes! diff (%d) is not 2!\n",
(int)(skb->network_header - skb->mac_header));
return -2; /* return error -- can't transmit yet! */
}
else {
/* otherwise, we can just add the header as usual. */
_daddr = *(uint8_t *) daddr;
proto_num = lp->default_proto[_daddr];
proto = arc_proto_map[proto_num];
BUGMSG(D_DURING, "building header for %02Xh using protocol '%c'\n",
proto_num, proto->suffix);
if (proto == &arc_proto_null && arc_bcast_proto != proto) {
BUGMSG(D_DURING, "actually, let's use '%c' instead.\n",
arc_bcast_proto->suffix);
proto = arc_bcast_proto;
}
}
return proto->build_header(skb, dev, type, _daddr);
}
/*
* Rebuild the ARCnet hard header. This is called after an ARP (or in the
* future other address resolution) has completed on this sk_buff. We now
* let ARP fill in the destination field.
*/
static int arcnet_rebuild_header(struct sk_buff *skb)
{
struct net_device *dev = skb->dev;
struct arcnet_local *lp = netdev_priv(dev);
int status = 0; /* default is failure */
unsigned short type;
uint8_t daddr=0;
struct ArcProto *proto;
/*
* XXX: Why not use skb->mac_len?
*/
if (skb->network_header - skb->mac_header != 2) {
BUGMSG(D_NORMAL,
"rebuild_header: shouldn't be here! (hdrsize=%d)\n",
(int)(skb->network_header - skb->mac_header));
return 0;
}
type = *(uint16_t *) skb_pull(skb, 2);
BUGMSG(D_DURING, "rebuild header for protocol %Xh\n", type);
if (type == ETH_P_IP) {
#ifdef CONFIG_INET
BUGMSG(D_DURING, "rebuild header for ethernet protocol %Xh\n", type);
status = arp_find(&daddr, skb) ? 1 : 0;
BUGMSG(D_DURING, " rebuilt: dest is %d; protocol %Xh\n",
daddr, type);
#endif
} else {
BUGMSG(D_NORMAL,
"I don't understand ethernet protocol %Xh addresses!\n", type);
dev->stats.tx_errors++;
dev->stats.tx_aborted_errors++;
}
/* if we couldn't resolve the address... give up. */
if (!status)
return 0;
/* add the _real_ header this time! */
proto = arc_proto_map[lp->default_proto[daddr]];
proto->build_header(skb, dev, type, daddr);
return 1; /* success */
}
/* Called by the kernel in order to transmit a packet. */
netdev_tx_t arcnet_send_packet(struct sk_buff *skb,
struct net_device *dev)
{
struct arcnet_local *lp = netdev_priv(dev);
struct archdr *pkt;
struct arc_rfc1201 *soft;
struct ArcProto *proto;
int txbuf;
unsigned long flags;
int freeskb, retval;
BUGMSG(D_DURING,
"transmit requested (status=%Xh, txbufs=%d/%d, len=%d, protocol %x)\n",
ASTATUS(), lp->cur_tx, lp->next_tx, skb->len,skb->protocol);
pkt = (struct archdr *) skb->data;
soft = &pkt->soft.rfc1201;
proto = arc_proto_map[soft->proto];
BUGMSG(D_SKB_SIZE, "skb: transmitting %d bytes to %02X\n",
skb->len, pkt->hard.dest);
BUGLVL(D_SKB) arcnet_dump_skb(dev, skb, "tx");
/* fits in one packet? */
if (skb->len - ARC_HDR_SIZE > XMTU && !proto->continue_tx) {
BUGMSG(D_NORMAL, "fixme: packet too large: compensating badly!\n");
dev_kfree_skb(skb);
return NETDEV_TX_OK; /* don't try again */
}
/* We're busy transmitting a packet... */
netif_stop_queue(dev);
spin_lock_irqsave(&lp->lock, flags);
AINTMASK(0);
if(lp->next_tx == -1)
txbuf = get_arcbuf(dev);
else {
txbuf = -1;
}
if (txbuf != -1) {
if (proto->prepare_tx(dev, pkt, skb->len, txbuf) &&
!proto->ack_tx) {
/* done right away and we don't want to acknowledge
the package later - forget about it now */
dev->stats.tx_bytes += skb->len;
freeskb = 1;
} else {
/* do it the 'split' way */
lp->outgoing.proto = proto;
lp->outgoing.skb = skb;
lp->outgoing.pkt = pkt;
freeskb = 0;
if (proto->continue_tx &&
proto->continue_tx(dev, txbuf)) {
BUGMSG(D_NORMAL,
"bug! continue_tx finished the first time! "
"(proto='%c')\n", proto->suffix);
}
}
retval = NETDEV_TX_OK;
lp->next_tx = txbuf;
} else {
retval = NETDEV_TX_BUSY;
freeskb = 0;
}
BUGMSG(D_DEBUG, "%s: %d: %s, status: %x\n",__FILE__,__LINE__,__func__,ASTATUS());
/* make sure we didn't ignore a TX IRQ while we were in here */
AINTMASK(0);
BUGMSG(D_DEBUG, "%s: %d: %s\n",__FILE__,__LINE__,__func__);
lp->intmask |= TXFREEflag|EXCNAKflag;
AINTMASK(lp->intmask);
BUGMSG(D_DEBUG, "%s: %d: %s, status: %x\n",__FILE__,__LINE__,__func__,ASTATUS());
spin_unlock_irqrestore(&lp->lock, flags);
if (freeskb) {
dev_kfree_skb(skb);
}
return retval; /* no need to try again */
}
/*
* Actually start transmitting a packet that was loaded into a buffer
* by prepare_tx. This should _only_ be called by the interrupt handler.
*/
static int go_tx(struct net_device *dev)
{
struct arcnet_local *lp = netdev_priv(dev);
BUGMSG(D_DURING, "go_tx: status=%Xh, intmask=%Xh, next_tx=%d, cur_tx=%d\n",
ASTATUS(), lp->intmask, lp->next_tx, lp->cur_tx);
if (lp->cur_tx != -1 || lp->next_tx == -1)
return 0;
BUGLVL(D_TX) arcnet_dump_packet(dev, lp->next_tx, "go_tx", 0);
lp->cur_tx = lp->next_tx;
lp->next_tx = -1;
/* start sending */
ACOMMAND(TXcmd | (lp->cur_tx << 3));
dev->stats.tx_packets++;
lp->lasttrans_dest = lp->lastload_dest;
lp->lastload_dest = 0;
lp->excnak_pending = 0;
lp->intmask |= TXFREEflag|EXCNAKflag;
return 1;
}
/* Called by the kernel when transmit times out */
void arcnet_timeout(struct net_device *dev)
{
unsigned long flags;
struct arcnet_local *lp = netdev_priv(dev);
int status = ASTATUS();
char *msg;
spin_lock_irqsave(&lp->lock, flags);
if (status & TXFREEflag) { /* transmit _DID_ finish */
msg = " - missed IRQ?";
} else {
msg = "";
dev->stats.tx_aborted_errors++;
lp->timed_out = 1;
ACOMMAND(NOTXcmd | (lp->cur_tx << 3));
}
dev->stats.tx_errors++;
/* make sure we didn't miss a TX or a EXC NAK IRQ */
AINTMASK(0);
lp->intmask |= TXFREEflag|EXCNAKflag;
AINTMASK(lp->intmask);
spin_unlock_irqrestore(&lp->lock, flags);
if (time_after(jiffies, lp->last_timeout + 10*HZ)) {
BUGMSG(D_EXTRA, "tx timed out%s (status=%Xh, intmask=%Xh, dest=%02Xh)\n",
msg, status, lp->intmask, lp->lasttrans_dest);
lp->last_timeout = jiffies;
}
if (lp->cur_tx == -1)
netif_wake_queue(dev);
}
/*
* The typical workload of the driver: Handle the network interface
* interrupts. Establish which device needs attention, and call the correct
* chipset interrupt handler.
*/
irqreturn_t arcnet_interrupt(int irq, void *dev_id)
{
struct net_device *dev = dev_id;
struct arcnet_local *lp;
int recbuf, status, diagstatus, didsomething, boguscount;
int retval = IRQ_NONE;
BUGMSG(D_DURING, "\n");
BUGMSG(D_DURING, "in arcnet_interrupt\n");
lp = netdev_priv(dev);
BUG_ON(!lp);
spin_lock(&lp->lock);
/*
* RESET flag was enabled - if device is not running, we must clear it right
* away (but nothing else).
*/
if (!netif_running(dev)) {
if (ASTATUS() & RESETflag)
ACOMMAND(CFLAGScmd | RESETclear);
AINTMASK(0);
spin_unlock(&lp->lock);
return IRQ_HANDLED;
}
BUGMSG(D_DURING, "in arcnet_inthandler (status=%Xh, intmask=%Xh)\n",
ASTATUS(), lp->intmask);
boguscount = 5;
do {
status = ASTATUS();
diagstatus = (status >> 8) & 0xFF;
BUGMSG(D_DEBUG, "%s: %d: %s: status=%x\n",
__FILE__,__LINE__,__func__,status);
didsomething = 0;
/*
* RESET flag was enabled - card is resetting and if RX is
* disabled, it's NOT because we just got a packet.
*
* The card is in an undefined state. Clear it out and start over.
*/
if (status & RESETflag) {
BUGMSG(D_NORMAL, "spurious reset (status=%Xh)\n", status);
arcnet_close(dev);
arcnet_open(dev);
/* get out of the interrupt handler! */
break;
}
/*
* RX is inhibited - we must have received something. Prepare to
* receive into the next buffer.
*
* We don't actually copy the received packet from the card until
* after the transmit handler runs (and possibly launches the next
* tx); this should improve latency slightly if we get both types
* of interrupts at once.
*/
recbuf = -1;
if (status & lp->intmask & NORXflag) {
recbuf = lp->cur_rx;
BUGMSG(D_DURING, "Buffer #%d: receive irq (status=%Xh)\n",
recbuf, status);
lp->cur_rx = get_arcbuf(dev);
if (lp->cur_rx != -1) {
BUGMSG(D_DURING, "enabling receive to buffer #%d\n",
lp->cur_rx);
ACOMMAND(RXcmd | (lp->cur_rx << 3) | RXbcasts);
}
didsomething++;
}
if((diagstatus & EXCNAKflag)) {
BUGMSG(D_DURING, "EXCNAK IRQ (diagstat=%Xh)\n",
diagstatus);
ACOMMAND(NOTXcmd); /* disable transmit */
lp->excnak_pending = 1;
ACOMMAND(EXCNAKclear);
lp->intmask &= ~(EXCNAKflag);
didsomething++;
}
/* a transmit finished, and we're interested in it. */
if ((status & lp->intmask & TXFREEflag) || lp->timed_out) {
lp->intmask &= ~(TXFREEflag|EXCNAKflag);
BUGMSG(D_DURING, "TX IRQ (stat=%Xh)\n", status);
if (lp->cur_tx != -1 && !lp->timed_out) {
if(!(status & TXACKflag)) {
if (lp->lasttrans_dest != 0) {
BUGMSG(D_EXTRA,
"transmit was not acknowledged! "
"(status=%Xh, dest=%02Xh)\n",
status, lp->lasttrans_dest);
dev->stats.tx_errors++;
dev->stats.tx_carrier_errors++;
} else {
BUGMSG(D_DURING,
"broadcast was not acknowledged; that's normal "
"(status=%Xh, dest=%02Xh)\n",
status, lp->lasttrans_dest);
}
}
if (lp->outgoing.proto &&
lp->outgoing.proto->ack_tx) {
int ackstatus;
if(status & TXACKflag)
ackstatus=2;
else if(lp->excnak_pending)
ackstatus=1;
else
ackstatus=0;
lp->outgoing.proto
->ack_tx(dev, ackstatus);
}
}
if (lp->cur_tx != -1)
release_arcbuf(dev, lp->cur_tx);
lp->cur_tx = -1;
lp->timed_out = 0;
didsomething++;
/* send another packet if there is one */
go_tx(dev);
/* continue a split packet, if any */
if (lp->outgoing.proto && lp->outgoing.proto->continue_tx) {
int txbuf = get_arcbuf(dev);
if (txbuf != -1) {
if (lp->outgoing.proto->continue_tx(dev, txbuf)) {
/* that was the last segment */
dev->stats.tx_bytes += lp->outgoing.skb->len;
if(!lp->outgoing.proto->ack_tx)
{
dev_kfree_skb_irq(lp->outgoing.skb);
lp->outgoing.proto = NULL;
}
}
lp->next_tx = txbuf;
}
}
/* inform upper layers of idleness, if necessary */
if (lp->cur_tx == -1)
netif_wake_queue(dev);
}
/* now process the received packet, if any */
if (recbuf != -1) {
BUGLVL(D_RX) arcnet_dump_packet(dev, recbuf, "rx irq", 0);
arcnet_rx(dev, recbuf);
release_arcbuf(dev, recbuf);
didsomething++;
}
if (status & lp->intmask & RECONflag) {
ACOMMAND(CFLAGScmd | CONFIGclear);
dev->stats.tx_carrier_errors++;
BUGMSG(D_RECON, "Network reconfiguration detected (status=%Xh)\n",
status);
/* MYRECON bit is at bit 7 of diagstatus */
if(diagstatus & 0x80)
BUGMSG(D_RECON,"Put out that recon myself\n");
/* is the RECON info empty or old? */
if (!lp->first_recon || !lp->last_recon ||
time_after(jiffies, lp->last_recon + HZ * 10)) {
if (lp->network_down)
BUGMSG(D_NORMAL, "reconfiguration detected: cabling restored?\n");
lp->first_recon = lp->last_recon = jiffies;
lp->num_recons = lp->network_down = 0;
BUGMSG(D_DURING, "recon: clearing counters.\n");
} else { /* add to current RECON counter */
lp->last_recon = jiffies;
lp->num_recons++;
BUGMSG(D_DURING, "recon: counter=%d, time=%lds, net=%d\n",
lp->num_recons,
(lp->last_recon - lp->first_recon) / HZ,
lp->network_down);
/* if network is marked up;
* and first_recon and last_recon are 60+ apart;
* and the average no. of recons counted is
* > RECON_THRESHOLD/min;
* then print a warning message.
*/
if (!lp->network_down &&
(lp->last_recon - lp->first_recon) <= HZ * 60 &&
lp->num_recons >= RECON_THRESHOLD) {
lp->network_down = 1;
BUGMSG(D_NORMAL, "many reconfigurations detected: cabling problem?\n");
} else if (!lp->network_down &&
lp->last_recon - lp->first_recon > HZ * 60) {
/* reset counters if we've gone for over a minute. */
lp->first_recon = lp->last_recon;
lp->num_recons = 1;
}
}
} else if (lp->network_down &&
time_after(jiffies, lp->last_recon + HZ * 10)) {
if (lp->network_down)
BUGMSG(D_NORMAL, "cabling restored?\n");
lp->first_recon = lp->last_recon = 0;
lp->num_recons = lp->network_down = 0;
BUGMSG(D_DURING, "not recon: clearing counters anyway.\n");
}
if(didsomething) {
retval |= IRQ_HANDLED;
}
}
while (--boguscount && didsomething);
BUGMSG(D_DURING, "arcnet_interrupt complete (status=%Xh, count=%d)\n",
ASTATUS(), boguscount);
BUGMSG(D_DURING, "\n");
AINTMASK(0);
udelay(1);
AINTMASK(lp->intmask);
spin_unlock(&lp->lock);
return retval;
}
/*
* This is a generic packet receiver that calls arcnet??_rx depending on the
* protocol ID found.
*/
static void arcnet_rx(struct net_device *dev, int bufnum)
{
struct arcnet_local *lp = netdev_priv(dev);
struct archdr pkt;
struct arc_rfc1201 *soft;
int length, ofs;
soft = &pkt.soft.rfc1201;
lp->hw.copy_from_card(dev, bufnum, 0, &pkt, sizeof(ARC_HDR_SIZE));
if (pkt.hard.offset[0]) {
ofs = pkt.hard.offset[0];
length = 256 - ofs;
} else {
ofs = pkt.hard.offset[1];
length = 512 - ofs;
}
/* get the full header, if possible */
if (sizeof(pkt.soft) <= length)
lp->hw.copy_from_card(dev, bufnum, ofs, soft, sizeof(pkt.soft));
else {
memset(&pkt.soft, 0, sizeof(pkt.soft));
lp->hw.copy_from_card(dev, bufnum, ofs, soft, length);
}
BUGMSG(D_DURING, "Buffer #%d: received packet from %02Xh to %02Xh "
"(%d+4 bytes)\n",
bufnum, pkt.hard.source, pkt.hard.dest, length);
dev->stats.rx_packets++;
dev->stats.rx_bytes += length + ARC_HDR_SIZE;
/* call the right receiver for the protocol */
if (arc_proto_map[soft->proto]->is_ip) {
BUGLVL(D_PROTO) {
struct ArcProto
*oldp = arc_proto_map[lp->default_proto[pkt.hard.source]],
*newp = arc_proto_map[soft->proto];
if (oldp != newp) {
BUGMSG(D_PROTO,
"got protocol %02Xh; encap for host %02Xh is now '%c'"
" (was '%c')\n", soft->proto, pkt.hard.source,
newp->suffix, oldp->suffix);
}
}
/* broadcasts will always be done with the last-used encap. */
lp->default_proto[0] = soft->proto;
/* in striking contrast, the following isn't a hack. */
lp->default_proto[pkt.hard.source] = soft->proto;
}
/* call the protocol-specific receiver. */
arc_proto_map[soft->proto]->rx(dev, bufnum, &pkt, length);
}
static void null_rx(struct net_device *dev, int bufnum,
struct archdr *pkthdr, int length)
{
BUGMSG(D_PROTO,
"rx: don't know how to deal with proto %02Xh from host %02Xh.\n",
pkthdr->soft.rfc1201.proto, pkthdr->hard.source);
}
static int null_build_header(struct sk_buff *skb, struct net_device *dev,
unsigned short type, uint8_t daddr)
{
struct arcnet_local *lp = netdev_priv(dev);
BUGMSG(D_PROTO,
"tx: can't build header for encap %02Xh; load a protocol driver.\n",
lp->default_proto[daddr]);
/* always fails */
return 0;
}
/* the "do nothing" prepare_tx function warns that there's nothing to do. */
static int null_prepare_tx(struct net_device *dev, struct archdr *pkt,
int length, int bufnum)
{
struct arcnet_local *lp = netdev_priv(dev);
struct arc_hardware newpkt;
BUGMSG(D_PROTO, "tx: no encap for this host; load a protocol driver.\n");
/* send a packet to myself -- will never get received, of course */
newpkt.source = newpkt.dest = dev->dev_addr[0];
/* only one byte of actual data (and it's random) */
newpkt.offset[0] = 0xFF;
lp->hw.copy_to_card(dev, bufnum, 0, &newpkt, ARC_HDR_SIZE);
return 1; /* done */
}
| gpl-2.0 |
droidsec-cn/android_kernel_oneplus_msm8994 | arch/frv/kernel/time.c | 10331 | 3146 | /* time.c: FRV arch-specific time handling
*
* Copyright (C) 2003-5 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
* - Derived from arch/m68k/kernel/time.c
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/param.h>
#include <linux/string.h>
#include <linux/interrupt.h>
#include <linux/profile.h>
#include <linux/irq.h>
#include <linux/mm.h>
#include <asm/io.h>
#include <asm/timer-regs.h>
#include <asm/mb-regs.h>
#include <asm/mb86943a.h>
#include <linux/timex.h>
#define TICK_SIZE (tick_nsec / 1000)
unsigned long __nongprelbss __clkin_clock_speed_HZ;
unsigned long __nongprelbss __ext_bus_clock_speed_HZ;
unsigned long __nongprelbss __res_bus_clock_speed_HZ;
unsigned long __nongprelbss __sdram_clock_speed_HZ;
unsigned long __nongprelbss __core_bus_clock_speed_HZ;
unsigned long __nongprelbss __core_clock_speed_HZ;
unsigned long __nongprelbss __dsu_clock_speed_HZ;
unsigned long __nongprelbss __serial_clock_speed_HZ;
unsigned long __delay_loops_MHz;
static irqreturn_t timer_interrupt(int irq, void *dummy);
static struct irqaction timer_irq = {
.handler = timer_interrupt,
.flags = IRQF_DISABLED,
.name = "timer",
};
/*
* timer_interrupt() needs to keep up the real-time clock,
* as well as call the "xtime_update()" routine every clocktick
*/
static irqreturn_t timer_interrupt(int irq, void *dummy)
{
profile_tick(CPU_PROFILING);
xtime_update(1);
#ifdef CONFIG_HEARTBEAT
static unsigned short n;
n++;
__set_LEDS(n);
#endif /* CONFIG_HEARTBEAT */
update_process_times(user_mode(get_irq_regs()));
return IRQ_HANDLED;
}
void time_divisor_init(void)
{
unsigned short base, pre, prediv;
/* set the scheduling timer going */
pre = 1;
prediv = 4;
base = __res_bus_clock_speed_HZ / pre / HZ / (1 << prediv);
__set_TPRV(pre);
__set_TxCKSL_DATA(0, prediv);
__set_TCTR(TCTR_SC_CTR0 | TCTR_RL_RW_LH8 | TCTR_MODE_2);
__set_TCSR_DATA(0, base & 0xff);
__set_TCSR_DATA(0, base >> 8);
}
void read_persistent_clock(struct timespec *ts)
{
unsigned int year, mon, day, hour, min, sec;
extern void arch_gettod(int *year, int *mon, int *day, int *hour, int *min, int *sec);
/* FIX by dqg : Set to zero for platforms that don't have tod */
/* without this time is undefined and can overflow time_t, causing */
/* very strange errors */
year = 1980;
mon = day = 1;
hour = min = sec = 0;
arch_gettod (&year, &mon, &day, &hour, &min, &sec);
if ((year += 1900) < 1970)
year += 100;
ts->tv_sec = mktime(year, mon, day, hour, min, sec);
ts->tv_nsec = 0;
}
void time_init(void)
{
/* install scheduling interrupt handler */
setup_irq(IRQ_CPU_TIMER0, &timer_irq);
time_divisor_init();
}
/*
* Scheduler clock - returns current time in nanosec units.
*/
unsigned long long sched_clock(void)
{
return jiffies_64 * (1000000000 / HZ);
}
| gpl-2.0 |
JetBrains/jdk8u_hotspot | src/share/vm/oops/typeArrayOop.cpp | 92 | 1198 | /*
* Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "oops/oop.inline.hpp"
#include "oops/typeArrayOop.hpp"
// <<this page is intentionally left blank>>
| gpl-2.0 |
linyvxiang/linux-zswap | drivers/gpu/drm/drm_vm.c | 92 | 18299 | /**
* \file drm_vm.c
* Memory mapping for DRM
*
* \author Rickard E. (Rik) Faith <faith@valinux.com>
* \author Gareth Hughes <gareth@valinux.com>
*/
/*
* Created: Mon Jan 4 08:58:31 1999 by faith@valinux.com
*
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include <drm/drmP.h>
#include <linux/export.h>
#if defined(__ia64__)
#include <linux/efi.h>
#include <linux/slab.h>
#endif
static void drm_vm_open(struct vm_area_struct *vma);
static void drm_vm_close(struct vm_area_struct *vma);
static pgprot_t drm_io_prot(struct drm_local_map *map,
struct vm_area_struct *vma)
{
pgprot_t tmp = vm_get_page_prot(vma->vm_flags);
#if defined(__i386__) || defined(__x86_64__)
if (map->type == _DRM_REGISTERS && !(map->flags & _DRM_WRITE_COMBINING))
tmp = pgprot_noncached(tmp);
else
tmp = pgprot_writecombine(tmp);
#elif defined(__powerpc__)
pgprot_val(tmp) |= _PAGE_NO_CACHE;
if (map->type == _DRM_REGISTERS)
pgprot_val(tmp) |= _PAGE_GUARDED;
#elif defined(__ia64__)
if (efi_range_is_wc(vma->vm_start, vma->vm_end -
vma->vm_start))
tmp = pgprot_writecombine(tmp);
else
tmp = pgprot_noncached(tmp);
#elif defined(__sparc__) || defined(__arm__) || defined(__mips__)
tmp = pgprot_noncached(tmp);
#endif
return tmp;
}
static pgprot_t drm_dma_prot(uint32_t map_type, struct vm_area_struct *vma)
{
pgprot_t tmp = vm_get_page_prot(vma->vm_flags);
#if defined(__powerpc__) && defined(CONFIG_NOT_COHERENT_CACHE)
tmp |= _PAGE_NO_CACHE;
#endif
return tmp;
}
/**
* \c fault method for AGP virtual memory.
*
* \param vma virtual memory area.
* \param address access address.
* \return pointer to the page structure.
*
* Find the right map and if it's AGP memory find the real physical page to
* map, get the page, increment the use count and return it.
*/
#if __OS_HAS_AGP
static int drm_do_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
struct drm_file *priv = vma->vm_file->private_data;
struct drm_device *dev = priv->minor->dev;
struct drm_local_map *map = NULL;
struct drm_map_list *r_list;
struct drm_hash_item *hash;
/*
* Find the right map
*/
if (!drm_core_has_AGP(dev))
goto vm_fault_error;
if (!dev->agp || !dev->agp->cant_use_aperture)
goto vm_fault_error;
if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff, &hash))
goto vm_fault_error;
r_list = drm_hash_entry(hash, struct drm_map_list, hash);
map = r_list->map;
if (map && map->type == _DRM_AGP) {
/*
* Using vm_pgoff as a selector forces us to use this unusual
* addressing scheme.
*/
resource_size_t offset = (unsigned long)vmf->virtual_address -
vma->vm_start;
resource_size_t baddr = map->offset + offset;
struct drm_agp_mem *agpmem;
struct page *page;
#ifdef __alpha__
/*
* Adjust to a bus-relative address
*/
baddr -= dev->hose->mem_space->start;
#endif
/*
* It's AGP memory - find the real physical page to map
*/
list_for_each_entry(agpmem, &dev->agp->memory, head) {
if (agpmem->bound <= baddr &&
agpmem->bound + agpmem->pages * PAGE_SIZE > baddr)
break;
}
if (&agpmem->head == &dev->agp->memory)
goto vm_fault_error;
/*
* Get the page, inc the use count, and return it
*/
offset = (baddr - agpmem->bound) >> PAGE_SHIFT;
page = agpmem->memory->pages[offset];
get_page(page);
vmf->page = page;
DRM_DEBUG
("baddr = 0x%llx page = 0x%p, offset = 0x%llx, count=%d\n",
(unsigned long long)baddr,
agpmem->memory->pages[offset],
(unsigned long long)offset,
page_count(page));
return 0;
}
vm_fault_error:
return VM_FAULT_SIGBUS; /* Disallow mremap */
}
#else /* __OS_HAS_AGP */
static int drm_do_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
return VM_FAULT_SIGBUS;
}
#endif /* __OS_HAS_AGP */
/**
* \c nopage method for shared virtual memory.
*
* \param vma virtual memory area.
* \param address access address.
* \return pointer to the page structure.
*
* Get the mapping, find the real physical page to map, get the page, and
* return it.
*/
static int drm_do_vm_shm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
struct drm_local_map *map = vma->vm_private_data;
unsigned long offset;
unsigned long i;
struct page *page;
if (!map)
return VM_FAULT_SIGBUS; /* Nothing allocated */
offset = (unsigned long)vmf->virtual_address - vma->vm_start;
i = (unsigned long)map->handle + offset;
page = vmalloc_to_page((void *)i);
if (!page)
return VM_FAULT_SIGBUS;
get_page(page);
vmf->page = page;
DRM_DEBUG("shm_fault 0x%lx\n", offset);
return 0;
}
/**
* \c close method for shared virtual memory.
*
* \param vma virtual memory area.
*
* Deletes map information if we are the last
* person to close a mapping and it's not in the global maplist.
*/
static void drm_vm_shm_close(struct vm_area_struct *vma)
{
struct drm_file *priv = vma->vm_file->private_data;
struct drm_device *dev = priv->minor->dev;
struct drm_vma_entry *pt, *temp;
struct drm_local_map *map;
struct drm_map_list *r_list;
int found_maps = 0;
DRM_DEBUG("0x%08lx,0x%08lx\n",
vma->vm_start, vma->vm_end - vma->vm_start);
atomic_dec(&dev->vma_count);
map = vma->vm_private_data;
mutex_lock(&dev->struct_mutex);
list_for_each_entry_safe(pt, temp, &dev->vmalist, head) {
if (pt->vma->vm_private_data == map)
found_maps++;
if (pt->vma == vma) {
list_del(&pt->head);
kfree(pt);
}
}
/* We were the only map that was found */
if (found_maps == 1 && map->flags & _DRM_REMOVABLE) {
/* Check to see if we are in the maplist, if we are not, then
* we delete this mappings information.
*/
found_maps = 0;
list_for_each_entry(r_list, &dev->maplist, head) {
if (r_list->map == map)
found_maps++;
}
if (!found_maps) {
drm_dma_handle_t dmah;
switch (map->type) {
case _DRM_REGISTERS:
case _DRM_FRAME_BUFFER:
arch_phys_wc_del(map->mtrr);
iounmap(map->handle);
break;
case _DRM_SHM:
vfree(map->handle);
break;
case _DRM_AGP:
case _DRM_SCATTER_GATHER:
break;
case _DRM_CONSISTENT:
dmah.vaddr = map->handle;
dmah.busaddr = map->offset;
dmah.size = map->size;
__drm_pci_free(dev, &dmah);
break;
case _DRM_GEM:
DRM_ERROR("tried to rmmap GEM object\n");
break;
}
kfree(map);
}
}
mutex_unlock(&dev->struct_mutex);
}
/**
* \c fault method for DMA virtual memory.
*
* \param vma virtual memory area.
* \param address access address.
* \return pointer to the page structure.
*
* Determine the page number from the page offset and get it from drm_device_dma::pagelist.
*/
static int drm_do_vm_dma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
struct drm_file *priv = vma->vm_file->private_data;
struct drm_device *dev = priv->minor->dev;
struct drm_device_dma *dma = dev->dma;
unsigned long offset;
unsigned long page_nr;
struct page *page;
if (!dma)
return VM_FAULT_SIGBUS; /* Error */
if (!dma->pagelist)
return VM_FAULT_SIGBUS; /* Nothing allocated */
offset = (unsigned long)vmf->virtual_address - vma->vm_start; /* vm_[pg]off[set] should be 0 */
page_nr = offset >> PAGE_SHIFT; /* page_nr could just be vmf->pgoff */
page = virt_to_page((dma->pagelist[page_nr] + (offset & (~PAGE_MASK))));
get_page(page);
vmf->page = page;
DRM_DEBUG("dma_fault 0x%lx (page %lu)\n", offset, page_nr);
return 0;
}
/**
* \c fault method for scatter-gather virtual memory.
*
* \param vma virtual memory area.
* \param address access address.
* \return pointer to the page structure.
*
* Determine the map offset from the page offset and get it from drm_sg_mem::pagelist.
*/
static int drm_do_vm_sg_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
struct drm_local_map *map = vma->vm_private_data;
struct drm_file *priv = vma->vm_file->private_data;
struct drm_device *dev = priv->minor->dev;
struct drm_sg_mem *entry = dev->sg;
unsigned long offset;
unsigned long map_offset;
unsigned long page_offset;
struct page *page;
if (!entry)
return VM_FAULT_SIGBUS; /* Error */
if (!entry->pagelist)
return VM_FAULT_SIGBUS; /* Nothing allocated */
offset = (unsigned long)vmf->virtual_address - vma->vm_start;
map_offset = map->offset - (unsigned long)dev->sg->virtual;
page_offset = (offset >> PAGE_SHIFT) + (map_offset >> PAGE_SHIFT);
page = entry->pagelist[page_offset];
get_page(page);
vmf->page = page;
return 0;
}
static int drm_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
return drm_do_vm_fault(vma, vmf);
}
static int drm_vm_shm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
return drm_do_vm_shm_fault(vma, vmf);
}
static int drm_vm_dma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
return drm_do_vm_dma_fault(vma, vmf);
}
static int drm_vm_sg_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
return drm_do_vm_sg_fault(vma, vmf);
}
/** AGP virtual memory operations */
static const struct vm_operations_struct drm_vm_ops = {
.fault = drm_vm_fault,
.open = drm_vm_open,
.close = drm_vm_close,
};
/** Shared virtual memory operations */
static const struct vm_operations_struct drm_vm_shm_ops = {
.fault = drm_vm_shm_fault,
.open = drm_vm_open,
.close = drm_vm_shm_close,
};
/** DMA virtual memory operations */
static const struct vm_operations_struct drm_vm_dma_ops = {
.fault = drm_vm_dma_fault,
.open = drm_vm_open,
.close = drm_vm_close,
};
/** Scatter-gather virtual memory operations */
static const struct vm_operations_struct drm_vm_sg_ops = {
.fault = drm_vm_sg_fault,
.open = drm_vm_open,
.close = drm_vm_close,
};
/**
* \c open method for shared virtual memory.
*
* \param vma virtual memory area.
*
* Create a new drm_vma_entry structure as the \p vma private data entry and
* add it to drm_device::vmalist.
*/
void drm_vm_open_locked(struct drm_device *dev,
struct vm_area_struct *vma)
{
struct drm_vma_entry *vma_entry;
DRM_DEBUG("0x%08lx,0x%08lx\n",
vma->vm_start, vma->vm_end - vma->vm_start);
atomic_inc(&dev->vma_count);
vma_entry = kmalloc(sizeof(*vma_entry), GFP_KERNEL);
if (vma_entry) {
vma_entry->vma = vma;
vma_entry->pid = current->pid;
list_add(&vma_entry->head, &dev->vmalist);
}
}
EXPORT_SYMBOL_GPL(drm_vm_open_locked);
static void drm_vm_open(struct vm_area_struct *vma)
{
struct drm_file *priv = vma->vm_file->private_data;
struct drm_device *dev = priv->minor->dev;
mutex_lock(&dev->struct_mutex);
drm_vm_open_locked(dev, vma);
mutex_unlock(&dev->struct_mutex);
}
void drm_vm_close_locked(struct drm_device *dev,
struct vm_area_struct *vma)
{
struct drm_vma_entry *pt, *temp;
DRM_DEBUG("0x%08lx,0x%08lx\n",
vma->vm_start, vma->vm_end - vma->vm_start);
atomic_dec(&dev->vma_count);
list_for_each_entry_safe(pt, temp, &dev->vmalist, head) {
if (pt->vma == vma) {
list_del(&pt->head);
kfree(pt);
break;
}
}
}
/**
* \c close method for all virtual memory types.
*
* \param vma virtual memory area.
*
* Search the \p vma private data entry in drm_device::vmalist, unlink it, and
* free it.
*/
static void drm_vm_close(struct vm_area_struct *vma)
{
struct drm_file *priv = vma->vm_file->private_data;
struct drm_device *dev = priv->minor->dev;
mutex_lock(&dev->struct_mutex);
drm_vm_close_locked(dev, vma);
mutex_unlock(&dev->struct_mutex);
}
/**
* mmap DMA memory.
*
* \param file_priv DRM file private.
* \param vma virtual memory area.
* \return zero on success or a negative number on failure.
*
* Sets the virtual memory area operations structure to vm_dma_ops, the file
* pointer, and calls vm_open().
*/
static int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma)
{
struct drm_file *priv = filp->private_data;
struct drm_device *dev;
struct drm_device_dma *dma;
unsigned long length = vma->vm_end - vma->vm_start;
dev = priv->minor->dev;
dma = dev->dma;
DRM_DEBUG("start = 0x%lx, end = 0x%lx, page offset = 0x%lx\n",
vma->vm_start, vma->vm_end, vma->vm_pgoff);
/* Length must match exact page count */
if (!dma || (length >> PAGE_SHIFT) != dma->page_count) {
return -EINVAL;
}
if (!capable(CAP_SYS_ADMIN) &&
(dma->flags & _DRM_DMA_USE_PCI_RO)) {
vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE);
#if defined(__i386__) || defined(__x86_64__)
pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW;
#else
/* Ye gads this is ugly. With more thought
we could move this up higher and use
`protection_map' instead. */
vma->vm_page_prot =
__pgprot(pte_val
(pte_wrprotect
(__pte(pgprot_val(vma->vm_page_prot)))));
#endif
}
vma->vm_ops = &drm_vm_dma_ops;
vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
drm_vm_open_locked(dev, vma);
return 0;
}
static resource_size_t drm_core_get_reg_ofs(struct drm_device *dev)
{
#ifdef __alpha__
return dev->hose->dense_mem_base;
#else
return 0;
#endif
}
/**
* mmap DMA memory.
*
* \param file_priv DRM file private.
* \param vma virtual memory area.
* \return zero on success or a negative number on failure.
*
* If the virtual memory area has no offset associated with it then it's a DMA
* area, so calls mmap_dma(). Otherwise searches the map in drm_device::maplist,
* checks that the restricted flag is not set, sets the virtual memory operations
* according to the mapping type and remaps the pages. Finally sets the file
* pointer and calls vm_open().
*/
int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma)
{
struct drm_file *priv = filp->private_data;
struct drm_device *dev = priv->minor->dev;
struct drm_local_map *map = NULL;
resource_size_t offset = 0;
struct drm_hash_item *hash;
DRM_DEBUG("start = 0x%lx, end = 0x%lx, page offset = 0x%lx\n",
vma->vm_start, vma->vm_end, vma->vm_pgoff);
if (!priv->authenticated)
return -EACCES;
/* We check for "dma". On Apple's UniNorth, it's valid to have
* the AGP mapped at physical address 0
* --BenH.
*/
if (!vma->vm_pgoff
#if __OS_HAS_AGP
&& (!dev->agp
|| dev->agp->agp_info.device->vendor != PCI_VENDOR_ID_APPLE)
#endif
)
return drm_mmap_dma(filp, vma);
if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff, &hash)) {
DRM_ERROR("Could not find map\n");
return -EINVAL;
}
map = drm_hash_entry(hash, struct drm_map_list, hash)->map;
if (!map || ((map->flags & _DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN)))
return -EPERM;
/* Check for valid size. */
if (map->size < vma->vm_end - vma->vm_start)
return -EINVAL;
if (!capable(CAP_SYS_ADMIN) && (map->flags & _DRM_READ_ONLY)) {
vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE);
#if defined(__i386__) || defined(__x86_64__)
pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW;
#else
/* Ye gads this is ugly. With more thought
we could move this up higher and use
`protection_map' instead. */
vma->vm_page_prot =
__pgprot(pte_val
(pte_wrprotect
(__pte(pgprot_val(vma->vm_page_prot)))));
#endif
}
switch (map->type) {
#if !defined(__arm__)
case _DRM_AGP:
if (drm_core_has_AGP(dev) && dev->agp->cant_use_aperture) {
/*
* On some platforms we can't talk to bus dma address from the CPU, so for
* memory of type DRM_AGP, we'll deal with sorting out the real physical
* pages and mappings in fault()
*/
#if defined(__powerpc__)
pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE;
#endif
vma->vm_ops = &drm_vm_ops;
break;
}
/* fall through to _DRM_FRAME_BUFFER... */
#endif
case _DRM_FRAME_BUFFER:
case _DRM_REGISTERS:
offset = drm_core_get_reg_ofs(dev);
vma->vm_page_prot = drm_io_prot(map, vma);
if (io_remap_pfn_range(vma, vma->vm_start,
(map->offset + offset) >> PAGE_SHIFT,
vma->vm_end - vma->vm_start,
vma->vm_page_prot))
return -EAGAIN;
DRM_DEBUG(" Type = %d; start = 0x%lx, end = 0x%lx,"
" offset = 0x%llx\n",
map->type,
vma->vm_start, vma->vm_end, (unsigned long long)(map->offset + offset));
vma->vm_ops = &drm_vm_ops;
break;
case _DRM_CONSISTENT:
/* Consistent memory is really like shared memory. But
* it's allocated in a different way, so avoid fault */
if (remap_pfn_range(vma, vma->vm_start,
page_to_pfn(virt_to_page(map->handle)),
vma->vm_end - vma->vm_start, vma->vm_page_prot))
return -EAGAIN;
vma->vm_page_prot = drm_dma_prot(map->type, vma);
/* fall through to _DRM_SHM */
case _DRM_SHM:
vma->vm_ops = &drm_vm_shm_ops;
vma->vm_private_data = (void *)map;
break;
case _DRM_SCATTER_GATHER:
vma->vm_ops = &drm_vm_sg_ops;
vma->vm_private_data = (void *)map;
vma->vm_page_prot = drm_dma_prot(map->type, vma);
break;
default:
return -EINVAL; /* This should never happen. */
}
vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
drm_vm_open_locked(dev, vma);
return 0;
}
int drm_mmap(struct file *filp, struct vm_area_struct *vma)
{
struct drm_file *priv = filp->private_data;
struct drm_device *dev = priv->minor->dev;
int ret;
if (drm_device_is_unplugged(dev))
return -ENODEV;
mutex_lock(&dev->struct_mutex);
ret = drm_mmap_locked(filp, vma);
mutex_unlock(&dev->struct_mutex);
return ret;
}
EXPORT_SYMBOL(drm_mmap);
| gpl-2.0 |
BHSPitMonkey/linwizard-strtrk | arch/ppc/platforms/pq2ads.c | 92 | 1588 | /*
* PQ2ADS platform support
*
* Author: Kumar Gala <galak@kernel.crashing.org>
* Derived from: est8260_setup.c by Allen Curtis
*
* Copyright 2004 Freescale Semiconductor, Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
#include <linux/init.h>
#include <asm/io.h>
#include <asm/mpc8260.h>
#include <asm/cpm2.h>
#include <asm/immap_cpm2.h>
void __init
m82xx_board_setup(void)
{
cpm2_map_t* immap = ioremap(CPM_MAP_ADDR, sizeof(cpm2_map_t));
u32 *bcsr = ioremap(BCSR_ADDR+4, sizeof(u32));
/* Enable the 2nd UART port */
clrbits32(bcsr, BCSR1_RS232_EN2);
#ifdef CONFIG_SERIAL_CPM_SCC1
clrbits32((u32*)&immap->im_scc[0].scc_sccm, UART_SCCM_TX | UART_SCCM_RX);
clrbits32((u32*)&immap->im_scc[0].scc_gsmrl, SCC_GSMRL_ENR | SCC_GSMRL_ENT);
#endif
#ifdef CONFIG_SERIAL_CPM_SCC2
clrbits32((u32*)&immap->im_scc[1].scc_sccm, UART_SCCM_TX | UART_SCCM_RX);
clrbits32((u32*)&immap->im_scc[1].scc_gsmrl, SCC_GSMRL_ENR | SCC_GSMRL_ENT);
#endif
#ifdef CONFIG_SERIAL_CPM_SCC3
clrbits32((u32*)&immap->im_scc[2].scc_sccm, UART_SCCM_TX | UART_SCCM_RX);
clrbits32((u32*)&immap->im_scc[2].scc_gsmrl, SCC_GSMRL_ENR | SCC_GSMRL_ENT);
#endif
#ifdef CONFIG_SERIAL_CPM_SCC4
clrbits32((u32*)&immap->im_scc[3].scc_sccm, UART_SCCM_TX | UART_SCCM_RX);
clrbits32((u32*)&immap->im_scc[3].scc_gsmrl, SCC_GSMRL_ENR | SCC_GSMRL_ENT);
#endif
iounmap(bcsr);
iounmap(immap);
}
| gpl-2.0 |
andyhui/iproute2 | tc/m_csum.c | 92 | 5350 | /*
* m_csum.c checksum updating action
*
* This program is free software; you can distribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
* Authors: Gregoire Baron <baronchon@n7mm.org>
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <linux/tc_act/tc_csum.h>
#include "utils.h"
#include "tc_util.h"
static void
explain(void)
{
fprintf(stderr, "Usage: ... csum <UPDATE>\n"
"Where: UPDATE := <TARGET> [<UPDATE>]\n"
" TARGET := { ip4h | icmp | igmp |"
" tcp | udp | udplite | <SWEETS> }\n"
" SWEETS := { and | or | \'+\' }\n");
}
static void
usage(void)
{
explain();
exit(-1);
}
static int
parse_csum_args(int *argc_p, char ***argv_p, struct tc_csum *sel)
{
int argc = *argc_p;
char **argv = *argv_p;
if (argc <= 0)
return -1;
while(argc > 0) {
if ((matches(*argv, "iph") == 0) ||
(matches(*argv, "ip4h") == 0) ||
(matches(*argv, "ipv4h") == 0))
sel->update_flags |= TCA_CSUM_UPDATE_FLAG_IPV4HDR;
else if (matches(*argv, "icmp") == 0)
sel->update_flags |= TCA_CSUM_UPDATE_FLAG_ICMP;
else if (matches(*argv, "igmp") == 0)
sel->update_flags |= TCA_CSUM_UPDATE_FLAG_IGMP;
else if (matches(*argv, "tcp") == 0)
sel->update_flags |= TCA_CSUM_UPDATE_FLAG_TCP;
else if (matches(*argv, "udp") == 0)
sel->update_flags |= TCA_CSUM_UPDATE_FLAG_UDP;
else if (matches(*argv, "udplite") == 0)
sel->update_flags |= TCA_CSUM_UPDATE_FLAG_UDPLITE;
else if ((matches(*argv, "and") == 0) ||
(matches(*argv, "or") == 0) ||
(matches(*argv, "+") == 0))
; /* just ignore: ... csum iph and tcp or udp */
else
break;
argc--;
argv++;
}
*argc_p = argc;
*argv_p = argv;
return 0;
}
static int
parse_csum(struct action_util *a, int *argc_p,
char ***argv_p, int tca_id, struct nlmsghdr *n)
{
struct tc_csum sel;
int argc = *argc_p;
char **argv = *argv_p;
int ok = 0;
struct rtattr *tail;
memset(&sel, 0, sizeof(sel));
while (argc > 0) {
if (matches(*argv, "csum") == 0) {
NEXT_ARG();
if (parse_csum_args(&argc, &argv, &sel)) {
fprintf(stderr, "Illegal csum construct (%s)\n",
*argv);
explain();
return -1;
}
ok++;
continue;
} else if (matches(*argv, "help") == 0) {
usage();
}
else {
break;
}
}
if (!ok) {
explain();
return -1;
}
if (sel.update_flags == 0) {
fprintf(stderr, "Illegal csum construct, empty <UPDATE> list\n");
return -1;
}
if (argc) {
if (matches(*argv, "reclassify") == 0) {
sel.action = TC_ACT_RECLASSIFY;
argc--;
argv++;
} else if (matches(*argv, "pipe") == 0) {
sel.action = TC_ACT_PIPE;
argc--;
argv++;
} else if (matches(*argv, "drop") == 0 ||
matches(*argv, "shot") == 0) {
sel.action = TC_ACT_SHOT;
argc--;
argv++;
} else if (matches(*argv, "continue") == 0) {
sel.action = TC_ACT_UNSPEC;
argc--;
argv++;
} else if (matches(*argv, "pass") == 0) {
sel.action = TC_ACT_OK;
argc--;
argv++;
}
}
if (argc) {
if (matches(*argv, "index") == 0) {
NEXT_ARG();
if (get_u32(&sel.index, *argv, 10)) {
fprintf(stderr, "Illegal \"index\" (%s) <csum>\n",
*argv);
return -1;
}
argc--;
argv++;
}
}
tail = NLMSG_TAIL(n);
addattr_l(n, MAX_MSG, tca_id, NULL, 0);
addattr_l(n, MAX_MSG, TCA_CSUM_PARMS, &sel, sizeof(sel));
tail->rta_len = (char *)NLMSG_TAIL(n) - (char *)tail;
*argc_p = argc;
*argv_p = argv;
return 0;
}
static int
print_csum(struct action_util *au, FILE * f, struct rtattr *arg)
{
struct tc_csum *sel;
struct rtattr *tb[TCA_CSUM_MAX + 1];
char *uflag_1 = "";
char *uflag_2 = "";
char *uflag_3 = "";
char *uflag_4 = "";
char *uflag_5 = "";
char *uflag_6 = "";
SPRINT_BUF(action_buf);
int uflag_count = 0;
if (arg == NULL)
return -1;
parse_rtattr_nested(tb, TCA_CSUM_MAX, arg);
if (tb[TCA_CSUM_PARMS] == NULL) {
fprintf(f, "[NULL csum parameters]");
return -1;
}
sel = RTA_DATA(tb[TCA_CSUM_PARMS]);
if (sel->update_flags & TCA_CSUM_UPDATE_FLAG_IPV4HDR) {
uflag_1 = "iph";
uflag_count++;
}
#define CSUM_UFLAG_BUFFER(flag_buffer, flag_value, flag_string) \
do { \
if (sel->update_flags & flag_value) { \
flag_buffer = uflag_count > 0 ? \
", " flag_string : flag_string; \
uflag_count++; \
} \
} while(0)
CSUM_UFLAG_BUFFER(uflag_2, TCA_CSUM_UPDATE_FLAG_ICMP, "icmp");
CSUM_UFLAG_BUFFER(uflag_3, TCA_CSUM_UPDATE_FLAG_IGMP, "igmp");
CSUM_UFLAG_BUFFER(uflag_4, TCA_CSUM_UPDATE_FLAG_TCP, "tcp");
CSUM_UFLAG_BUFFER(uflag_5, TCA_CSUM_UPDATE_FLAG_UDP, "udp");
CSUM_UFLAG_BUFFER(uflag_6, TCA_CSUM_UPDATE_FLAG_UDPLITE, "udplite");
if (!uflag_count) {
uflag_1 = "?empty";
}
fprintf(f, "csum (%s%s%s%s%s%s) action %s\n",
uflag_1, uflag_2, uflag_3,
uflag_4, uflag_5, uflag_6,
action_n2a(sel->action, action_buf, sizeof(action_buf)));
fprintf(f, "\tindex %d ref %d bind %d", sel->index, sel->refcnt, sel->bindcnt);
if (show_stats) {
if (tb[TCA_CSUM_TM]) {
struct tcf_t *tm = RTA_DATA(tb[TCA_CSUM_TM]);
print_tm(f,tm);
}
}
fprintf(f, "\n");
return 0;
}
struct action_util csum_action_util = {
.id = "csum",
.parse_aopt = parse_csum,
.print_aopt = print_csum,
};
| gpl-2.0 |
grgbr/linux-iio | drivers/infiniband/hw/qib/qib_iba7322.c | 92 | 271173 | /*
* Copyright (c) 2012 Intel Corporation. All rights reserved.
* Copyright (c) 2008 - 2012 QLogic Corporation. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
/*
* This file contains all of the code that is specific to the
* InfiniPath 7322 chip
*/
#include <linux/interrupt.h>
#include <linux/pci.h>
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/jiffies.h>
#include <linux/module.h>
#include <rdma/ib_verbs.h>
#include <rdma/ib_smi.h>
#ifdef CONFIG_INFINIBAND_QIB_DCA
#include <linux/dca.h>
#endif
#include "qib.h"
#include "qib_7322_regs.h"
#include "qib_qsfp.h"
#include "qib_mad.h"
#include "qib_verbs.h"
#undef pr_fmt
#define pr_fmt(fmt) QIB_DRV_NAME " " fmt
static void qib_setup_7322_setextled(struct qib_pportdata *, u32);
static void qib_7322_handle_hwerrors(struct qib_devdata *, char *, size_t);
static void sendctrl_7322_mod(struct qib_pportdata *ppd, u32 op);
static irqreturn_t qib_7322intr(int irq, void *data);
static irqreturn_t qib_7322bufavail(int irq, void *data);
static irqreturn_t sdma_intr(int irq, void *data);
static irqreturn_t sdma_idle_intr(int irq, void *data);
static irqreturn_t sdma_progress_intr(int irq, void *data);
static irqreturn_t sdma_cleanup_intr(int irq, void *data);
static void qib_7322_txchk_change(struct qib_devdata *, u32, u32, u32,
struct qib_ctxtdata *rcd);
static u8 qib_7322_phys_portstate(u64);
static u32 qib_7322_iblink_state(u64);
static void qib_set_ib_7322_lstate(struct qib_pportdata *ppd, u16 linkcmd,
u16 linitcmd);
static void force_h1(struct qib_pportdata *);
static void adj_tx_serdes(struct qib_pportdata *);
static u32 qib_7322_setpbc_control(struct qib_pportdata *, u32, u8, u8);
static void qib_7322_mini_pcs_reset(struct qib_pportdata *);
static u32 ahb_mod(struct qib_devdata *, int, int, int, u32, u32);
static void ibsd_wr_allchans(struct qib_pportdata *, int, unsigned, unsigned);
static void serdes_7322_los_enable(struct qib_pportdata *, int);
static int serdes_7322_init_old(struct qib_pportdata *);
static int serdes_7322_init_new(struct qib_pportdata *);
static void dump_sdma_7322_state(struct qib_pportdata *);
#define BMASK(msb, lsb) (((1 << ((msb) + 1 - (lsb))) - 1) << (lsb))
/* LE2 serdes values for different cases */
#define LE2_DEFAULT 5
#define LE2_5m 4
#define LE2_QME 0
/* Below is special-purpose, so only really works for the IB SerDes blocks. */
#define IBSD(hw_pidx) (hw_pidx + 2)
/* these are variables for documentation and experimentation purposes */
static const unsigned rcv_int_timeout = 375;
static const unsigned rcv_int_count = 16;
static const unsigned sdma_idle_cnt = 64;
/* Time to stop altering Rx Equalization parameters, after link up. */
#define RXEQ_DISABLE_MSECS 2500
/*
* Number of VLs we are configured to use (to allow for more
* credits per vl, etc.)
*/
ushort qib_num_cfg_vls = 2;
module_param_named(num_vls, qib_num_cfg_vls, ushort, S_IRUGO);
MODULE_PARM_DESC(num_vls, "Set number of Virtual Lanes to use (1-8)");
static ushort qib_chase = 1;
module_param_named(chase, qib_chase, ushort, S_IRUGO);
MODULE_PARM_DESC(chase, "Enable state chase handling");
static ushort qib_long_atten = 10; /* 10 dB ~= 5m length */
module_param_named(long_attenuation, qib_long_atten, ushort, S_IRUGO);
MODULE_PARM_DESC(long_attenuation,
"attenuation cutoff (dB) for long copper cable setup");
static ushort qib_singleport;
module_param_named(singleport, qib_singleport, ushort, S_IRUGO);
MODULE_PARM_DESC(singleport, "Use only IB port 1; more per-port buffer space");
static ushort qib_krcvq01_no_msi;
module_param_named(krcvq01_no_msi, qib_krcvq01_no_msi, ushort, S_IRUGO);
MODULE_PARM_DESC(krcvq01_no_msi, "No MSI for kctx < 2");
/*
* Receive header queue sizes
*/
static unsigned qib_rcvhdrcnt;
module_param_named(rcvhdrcnt, qib_rcvhdrcnt, uint, S_IRUGO);
MODULE_PARM_DESC(rcvhdrcnt, "receive header count");
static unsigned qib_rcvhdrsize;
module_param_named(rcvhdrsize, qib_rcvhdrsize, uint, S_IRUGO);
MODULE_PARM_DESC(rcvhdrsize, "receive header size in 32-bit words");
static unsigned qib_rcvhdrentsize;
module_param_named(rcvhdrentsize, qib_rcvhdrentsize, uint, S_IRUGO);
MODULE_PARM_DESC(rcvhdrentsize, "receive header entry size in 32-bit words");
#define MAX_ATTEN_LEN 64 /* plenty for any real system */
/* for read back, default index is ~5m copper cable */
static char txselect_list[MAX_ATTEN_LEN] = "10";
static struct kparam_string kp_txselect = {
.string = txselect_list,
.maxlen = MAX_ATTEN_LEN
};
static int setup_txselect(const char *, struct kernel_param *);
module_param_call(txselect, setup_txselect, param_get_string,
&kp_txselect, S_IWUSR | S_IRUGO);
MODULE_PARM_DESC(txselect,
"Tx serdes indices (for no QSFP or invalid QSFP data)");
#define BOARD_QME7342 5
#define BOARD_QMH7342 6
#define BOARD_QMH7360 9
#define IS_QMH(dd) (SYM_FIELD((dd)->revision, Revision, BoardID) == \
BOARD_QMH7342)
#define IS_QME(dd) (SYM_FIELD((dd)->revision, Revision, BoardID) == \
BOARD_QME7342)
#define KREG_IDX(regname) (QIB_7322_##regname##_OFFS / sizeof(u64))
#define KREG_IBPORT_IDX(regname) ((QIB_7322_##regname##_0_OFFS / sizeof(u64)))
#define MASK_ACROSS(lsb, msb) \
(((1ULL << ((msb) + 1 - (lsb))) - 1) << (lsb))
#define SYM_RMASK(regname, fldname) ((u64) \
QIB_7322_##regname##_##fldname##_RMASK)
#define SYM_MASK(regname, fldname) ((u64) \
QIB_7322_##regname##_##fldname##_RMASK << \
QIB_7322_##regname##_##fldname##_LSB)
#define SYM_FIELD(value, regname, fldname) ((u64) \
(((value) >> SYM_LSB(regname, fldname)) & \
SYM_RMASK(regname, fldname)))
/* useful for things like LaFifoEmpty_0...7, TxCreditOK_0...7, etc. */
#define SYM_FIELD_ACROSS(value, regname, fldname, nbits) \
(((value) >> SYM_LSB(regname, fldname)) & MASK_ACROSS(0, nbits))
#define HWE_MASK(fldname) SYM_MASK(HwErrMask, fldname##Mask)
#define ERR_MASK(fldname) SYM_MASK(ErrMask, fldname##Mask)
#define ERR_MASK_N(fldname) SYM_MASK(ErrMask_0, fldname##Mask)
#define INT_MASK(fldname) SYM_MASK(IntMask, fldname##IntMask)
#define INT_MASK_P(fldname, port) SYM_MASK(IntMask, fldname##IntMask##_##port)
/* Below because most, but not all, fields of IntMask have that full suffix */
#define INT_MASK_PM(fldname, port) SYM_MASK(IntMask, fldname##Mask##_##port)
#define SYM_LSB(regname, fldname) (QIB_7322_##regname##_##fldname##_LSB)
/*
* the size bits give us 2^N, in KB units. 0 marks as invalid,
* and 7 is reserved. We currently use only 2KB and 4KB
*/
#define IBA7322_TID_SZ_SHIFT QIB_7322_RcvTIDArray0_RT_BufSize_LSB
#define IBA7322_TID_SZ_2K (1UL<<IBA7322_TID_SZ_SHIFT) /* 2KB */
#define IBA7322_TID_SZ_4K (2UL<<IBA7322_TID_SZ_SHIFT) /* 4KB */
#define IBA7322_TID_PA_SHIFT 11U /* TID addr in chip stored w/o low bits */
#define SendIBSLIDAssignMask \
QIB_7322_SendIBSLIDAssign_0_SendIBSLIDAssign_15_0_RMASK
#define SendIBSLMCMask \
QIB_7322_SendIBSLIDMask_0_SendIBSLIDMask_15_0_RMASK
#define ExtLED_IB1_YEL SYM_MASK(EXTCtrl, LEDPort0YellowOn)
#define ExtLED_IB1_GRN SYM_MASK(EXTCtrl, LEDPort0GreenOn)
#define ExtLED_IB2_YEL SYM_MASK(EXTCtrl, LEDPort1YellowOn)
#define ExtLED_IB2_GRN SYM_MASK(EXTCtrl, LEDPort1GreenOn)
#define ExtLED_IB1_MASK (ExtLED_IB1_YEL | ExtLED_IB1_GRN)
#define ExtLED_IB2_MASK (ExtLED_IB2_YEL | ExtLED_IB2_GRN)
#define _QIB_GPIO_SDA_NUM 1
#define _QIB_GPIO_SCL_NUM 0
#define QIB_EEPROM_WEN_NUM 14
#define QIB_TWSI_EEPROM_DEV 0xA2 /* All Production 7322 cards. */
/* HW counter clock is at 4nsec */
#define QIB_7322_PSXMITWAIT_CHECK_RATE 4000
/* full speed IB port 1 only */
#define PORT_SPD_CAP (QIB_IB_SDR | QIB_IB_DDR | QIB_IB_QDR)
#define PORT_SPD_CAP_SHIFT 3
/* full speed featuremask, both ports */
#define DUAL_PORT_CAP (PORT_SPD_CAP | (PORT_SPD_CAP << PORT_SPD_CAP_SHIFT))
/*
* This file contains almost all the chip-specific register information and
* access functions for the FAKED QLogic InfiniPath 7322 PCI-Express chip.
*/
/* Use defines to tie machine-generated names to lower-case names */
#define kr_contextcnt KREG_IDX(ContextCnt)
#define kr_control KREG_IDX(Control)
#define kr_counterregbase KREG_IDX(CntrRegBase)
#define kr_errclear KREG_IDX(ErrClear)
#define kr_errmask KREG_IDX(ErrMask)
#define kr_errstatus KREG_IDX(ErrStatus)
#define kr_extctrl KREG_IDX(EXTCtrl)
#define kr_extstatus KREG_IDX(EXTStatus)
#define kr_gpio_clear KREG_IDX(GPIOClear)
#define kr_gpio_mask KREG_IDX(GPIOMask)
#define kr_gpio_out KREG_IDX(GPIOOut)
#define kr_gpio_status KREG_IDX(GPIOStatus)
#define kr_hwdiagctrl KREG_IDX(HwDiagCtrl)
#define kr_debugportval KREG_IDX(DebugPortValueReg)
#define kr_fmask KREG_IDX(feature_mask)
#define kr_act_fmask KREG_IDX(active_feature_mask)
#define kr_hwerrclear KREG_IDX(HwErrClear)
#define kr_hwerrmask KREG_IDX(HwErrMask)
#define kr_hwerrstatus KREG_IDX(HwErrStatus)
#define kr_intclear KREG_IDX(IntClear)
#define kr_intmask KREG_IDX(IntMask)
#define kr_intredirect KREG_IDX(IntRedirect0)
#define kr_intstatus KREG_IDX(IntStatus)
#define kr_pagealign KREG_IDX(PageAlign)
#define kr_rcvavailtimeout KREG_IDX(RcvAvailTimeOut0)
#define kr_rcvctrl KREG_IDX(RcvCtrl) /* Common, but chip also has per-port */
#define kr_rcvegrbase KREG_IDX(RcvEgrBase)
#define kr_rcvegrcnt KREG_IDX(RcvEgrCnt)
#define kr_rcvhdrcnt KREG_IDX(RcvHdrCnt)
#define kr_rcvhdrentsize KREG_IDX(RcvHdrEntSize)
#define kr_rcvhdrsize KREG_IDX(RcvHdrSize)
#define kr_rcvtidbase KREG_IDX(RcvTIDBase)
#define kr_rcvtidcnt KREG_IDX(RcvTIDCnt)
#define kr_revision KREG_IDX(Revision)
#define kr_scratch KREG_IDX(Scratch)
#define kr_sendbuffererror KREG_IDX(SendBufErr0) /* and base for 1 and 2 */
#define kr_sendcheckmask KREG_IDX(SendCheckMask0) /* and 1, 2 */
#define kr_sendctrl KREG_IDX(SendCtrl)
#define kr_sendgrhcheckmask KREG_IDX(SendGRHCheckMask0) /* and 1, 2 */
#define kr_sendibpktmask KREG_IDX(SendIBPacketMask0) /* and 1, 2 */
#define kr_sendpioavailaddr KREG_IDX(SendBufAvailAddr)
#define kr_sendpiobufbase KREG_IDX(SendBufBase)
#define kr_sendpiobufcnt KREG_IDX(SendBufCnt)
#define kr_sendpiosize KREG_IDX(SendBufSize)
#define kr_sendregbase KREG_IDX(SendRegBase)
#define kr_sendbufavail0 KREG_IDX(SendBufAvail0)
#define kr_userregbase KREG_IDX(UserRegBase)
#define kr_intgranted KREG_IDX(Int_Granted)
#define kr_vecclr_wo_int KREG_IDX(vec_clr_without_int)
#define kr_intblocked KREG_IDX(IntBlocked)
#define kr_r_access KREG_IDX(SPC_JTAG_ACCESS_REG)
/*
* per-port kernel registers. Access only with qib_read_kreg_port()
* or qib_write_kreg_port()
*/
#define krp_errclear KREG_IBPORT_IDX(ErrClear)
#define krp_errmask KREG_IBPORT_IDX(ErrMask)
#define krp_errstatus KREG_IBPORT_IDX(ErrStatus)
#define krp_highprio_0 KREG_IBPORT_IDX(HighPriority0)
#define krp_highprio_limit KREG_IBPORT_IDX(HighPriorityLimit)
#define krp_hrtbt_guid KREG_IBPORT_IDX(HRTBT_GUID)
#define krp_ib_pcsconfig KREG_IBPORT_IDX(IBPCSConfig)
#define krp_ibcctrl_a KREG_IBPORT_IDX(IBCCtrlA)
#define krp_ibcctrl_b KREG_IBPORT_IDX(IBCCtrlB)
#define krp_ibcctrl_c KREG_IBPORT_IDX(IBCCtrlC)
#define krp_ibcstatus_a KREG_IBPORT_IDX(IBCStatusA)
#define krp_ibcstatus_b KREG_IBPORT_IDX(IBCStatusB)
#define krp_txestatus KREG_IBPORT_IDX(TXEStatus)
#define krp_lowprio_0 KREG_IBPORT_IDX(LowPriority0)
#define krp_ncmodectrl KREG_IBPORT_IDX(IBNCModeCtrl)
#define krp_partitionkey KREG_IBPORT_IDX(RcvPartitionKey)
#define krp_psinterval KREG_IBPORT_IDX(PSInterval)
#define krp_psstart KREG_IBPORT_IDX(PSStart)
#define krp_psstat KREG_IBPORT_IDX(PSStat)
#define krp_rcvbthqp KREG_IBPORT_IDX(RcvBTHQP)
#define krp_rcvctrl KREG_IBPORT_IDX(RcvCtrl)
#define krp_rcvpktledcnt KREG_IBPORT_IDX(RcvPktLEDCnt)
#define krp_rcvqpmaptable KREG_IBPORT_IDX(RcvQPMapTableA)
#define krp_rxcreditvl0 KREG_IBPORT_IDX(RxCreditVL0)
#define krp_rxcreditvl15 (KREG_IBPORT_IDX(RxCreditVL0)+15)
#define krp_sendcheckcontrol KREG_IBPORT_IDX(SendCheckControl)
#define krp_sendctrl KREG_IBPORT_IDX(SendCtrl)
#define krp_senddmabase KREG_IBPORT_IDX(SendDmaBase)
#define krp_senddmabufmask0 KREG_IBPORT_IDX(SendDmaBufMask0)
#define krp_senddmabufmask1 (KREG_IBPORT_IDX(SendDmaBufMask0) + 1)
#define krp_senddmabufmask2 (KREG_IBPORT_IDX(SendDmaBufMask0) + 2)
#define krp_senddmabuf_use0 KREG_IBPORT_IDX(SendDmaBufUsed0)
#define krp_senddmabuf_use1 (KREG_IBPORT_IDX(SendDmaBufUsed0) + 1)
#define krp_senddmabuf_use2 (KREG_IBPORT_IDX(SendDmaBufUsed0) + 2)
#define krp_senddmadesccnt KREG_IBPORT_IDX(SendDmaDescCnt)
#define krp_senddmahead KREG_IBPORT_IDX(SendDmaHead)
#define krp_senddmaheadaddr KREG_IBPORT_IDX(SendDmaHeadAddr)
#define krp_senddmaidlecnt KREG_IBPORT_IDX(SendDmaIdleCnt)
#define krp_senddmalengen KREG_IBPORT_IDX(SendDmaLenGen)
#define krp_senddmaprioritythld KREG_IBPORT_IDX(SendDmaPriorityThld)
#define krp_senddmareloadcnt KREG_IBPORT_IDX(SendDmaReloadCnt)
#define krp_senddmastatus KREG_IBPORT_IDX(SendDmaStatus)
#define krp_senddmatail KREG_IBPORT_IDX(SendDmaTail)
#define krp_sendhdrsymptom KREG_IBPORT_IDX(SendHdrErrSymptom)
#define krp_sendslid KREG_IBPORT_IDX(SendIBSLIDAssign)
#define krp_sendslidmask KREG_IBPORT_IDX(SendIBSLIDMask)
#define krp_ibsdtestiftx KREG_IBPORT_IDX(IB_SDTEST_IF_TX)
#define krp_adapt_dis_timer KREG_IBPORT_IDX(ADAPT_DISABLE_TIMER_THRESHOLD)
#define krp_tx_deemph_override KREG_IBPORT_IDX(IBSD_TX_DEEMPHASIS_OVERRIDE)
#define krp_serdesctrl KREG_IBPORT_IDX(IBSerdesCtrl)
/*
* Per-context kernel registers. Access only with qib_read_kreg_ctxt()
* or qib_write_kreg_ctxt()
*/
#define krc_rcvhdraddr KREG_IDX(RcvHdrAddr0)
#define krc_rcvhdrtailaddr KREG_IDX(RcvHdrTailAddr0)
/*
* TID Flow table, per context. Reduces
* number of hdrq updates to one per flow (or on errors).
* context 0 and 1 share same memory, but have distinct
* addresses. Since for now, we never use expected sends
* on kernel contexts, we don't worry about that (we initialize
* those entries for ctxt 0/1 on driver load twice, for example).
*/
#define NUM_TIDFLOWS_CTXT 0x20 /* 0x20 per context; have to hardcode */
#define ur_rcvflowtable (KREG_IDX(RcvTIDFlowTable0) - KREG_IDX(RcvHdrTail0))
/* these are the error bits in the tid flows, and are W1C */
#define TIDFLOW_ERRBITS ( \
(SYM_MASK(RcvTIDFlowTable0, GenMismatch) << \
SYM_LSB(RcvTIDFlowTable0, GenMismatch)) | \
(SYM_MASK(RcvTIDFlowTable0, SeqMismatch) << \
SYM_LSB(RcvTIDFlowTable0, SeqMismatch)))
/* Most (not all) Counters are per-IBport.
* Requires LBIntCnt is at offset 0 in the group
*/
#define CREG_IDX(regname) \
((QIB_7322_##regname##_0_OFFS - QIB_7322_LBIntCnt_OFFS) / sizeof(u64))
#define crp_badformat CREG_IDX(RxVersionErrCnt)
#define crp_err_rlen CREG_IDX(RxLenErrCnt)
#define crp_erricrc CREG_IDX(RxICRCErrCnt)
#define crp_errlink CREG_IDX(RxLinkMalformCnt)
#define crp_errlpcrc CREG_IDX(RxLPCRCErrCnt)
#define crp_errpkey CREG_IDX(RxPKeyMismatchCnt)
#define crp_errvcrc CREG_IDX(RxVCRCErrCnt)
#define crp_excessbufferovfl CREG_IDX(ExcessBufferOvflCnt)
#define crp_iblinkdown CREG_IDX(IBLinkDownedCnt)
#define crp_iblinkerrrecov CREG_IDX(IBLinkErrRecoveryCnt)
#define crp_ibstatuschange CREG_IDX(IBStatusChangeCnt)
#define crp_ibsymbolerr CREG_IDX(IBSymbolErrCnt)
#define crp_invalidrlen CREG_IDX(RxMaxMinLenErrCnt)
#define crp_locallinkintegrityerr CREG_IDX(LocalLinkIntegrityErrCnt)
#define crp_pktrcv CREG_IDX(RxDataPktCnt)
#define crp_pktrcvflowctrl CREG_IDX(RxFlowPktCnt)
#define crp_pktsend CREG_IDX(TxDataPktCnt)
#define crp_pktsendflow CREG_IDX(TxFlowPktCnt)
#define crp_psrcvdatacount CREG_IDX(PSRcvDataCount)
#define crp_psrcvpktscount CREG_IDX(PSRcvPktsCount)
#define crp_psxmitdatacount CREG_IDX(PSXmitDataCount)
#define crp_psxmitpktscount CREG_IDX(PSXmitPktsCount)
#define crp_psxmitwaitcount CREG_IDX(PSXmitWaitCount)
#define crp_rcvebp CREG_IDX(RxEBPCnt)
#define crp_rcvflowctrlviol CREG_IDX(RxFlowCtrlViolCnt)
#define crp_rcvovfl CREG_IDX(RxBufOvflCnt)
#define crp_rxdlidfltr CREG_IDX(RxDlidFltrCnt)
#define crp_rxdroppkt CREG_IDX(RxDroppedPktCnt)
#define crp_rxotherlocalphyerr CREG_IDX(RxOtherLocalPhyErrCnt)
#define crp_rxqpinvalidctxt CREG_IDX(RxQPInvalidContextCnt)
#define crp_rxvlerr CREG_IDX(RxVlErrCnt)
#define crp_sendstall CREG_IDX(TxFlowStallCnt)
#define crp_txdroppedpkt CREG_IDX(TxDroppedPktCnt)
#define crp_txhdrerr CREG_IDX(TxHeadersErrCnt)
#define crp_txlenerr CREG_IDX(TxLenErrCnt)
#define crp_txminmaxlenerr CREG_IDX(TxMaxMinLenErrCnt)
#define crp_txsdmadesc CREG_IDX(TxSDmaDescCnt)
#define crp_txunderrun CREG_IDX(TxUnderrunCnt)
#define crp_txunsupvl CREG_IDX(TxUnsupVLErrCnt)
#define crp_vl15droppedpkt CREG_IDX(RxVL15DroppedPktCnt)
#define crp_wordrcv CREG_IDX(RxDwordCnt)
#define crp_wordsend CREG_IDX(TxDwordCnt)
#define crp_tx_creditstalls CREG_IDX(TxCreditUpToDateTimeOut)
/* these are the (few) counters that are not port-specific */
#define CREG_DEVIDX(regname) ((QIB_7322_##regname##_OFFS - \
QIB_7322_LBIntCnt_OFFS) / sizeof(u64))
#define cr_base_egrovfl CREG_DEVIDX(RxP0HdrEgrOvflCnt)
#define cr_lbint CREG_DEVIDX(LBIntCnt)
#define cr_lbstall CREG_DEVIDX(LBFlowStallCnt)
#define cr_pcieretrydiag CREG_DEVIDX(PcieRetryBufDiagQwordCnt)
#define cr_rxtidflowdrop CREG_DEVIDX(RxTidFlowDropCnt)
#define cr_tidfull CREG_DEVIDX(RxTIDFullErrCnt)
#define cr_tidinvalid CREG_DEVIDX(RxTIDValidErrCnt)
/* no chip register for # of IB ports supported, so define */
#define NUM_IB_PORTS 2
/* 1 VL15 buffer per hardware IB port, no register for this, so define */
#define NUM_VL15_BUFS NUM_IB_PORTS
/*
* context 0 and 1 are special, and there is no chip register that
* defines this value, so we have to define it here.
* These are all allocated to either 0 or 1 for single port
* hardware configuration, otherwise each gets half
*/
#define KCTXT0_EGRCNT 2048
/* values for vl and port fields in PBC, 7322-specific */
#define PBC_PORT_SEL_LSB 26
#define PBC_PORT_SEL_RMASK 1
#define PBC_VL_NUM_LSB 27
#define PBC_VL_NUM_RMASK 7
#define PBC_7322_VL15_SEND (1ULL << 63) /* pbc; VL15, no credit check */
#define PBC_7322_VL15_SEND_CTRL (1ULL << 31) /* control version of same */
static u8 ib_rate_to_delay[IB_RATE_120_GBPS + 1] = {
[IB_RATE_2_5_GBPS] = 16,
[IB_RATE_5_GBPS] = 8,
[IB_RATE_10_GBPS] = 4,
[IB_RATE_20_GBPS] = 2,
[IB_RATE_30_GBPS] = 2,
[IB_RATE_40_GBPS] = 1
};
#define IBA7322_LINKSPEED_SHIFT SYM_LSB(IBCStatusA_0, LinkSpeedActive)
#define IBA7322_LINKWIDTH_SHIFT SYM_LSB(IBCStatusA_0, LinkWidthActive)
/* link training states, from IBC */
#define IB_7322_LT_STATE_DISABLED 0x00
#define IB_7322_LT_STATE_LINKUP 0x01
#define IB_7322_LT_STATE_POLLACTIVE 0x02
#define IB_7322_LT_STATE_POLLQUIET 0x03
#define IB_7322_LT_STATE_SLEEPDELAY 0x04
#define IB_7322_LT_STATE_SLEEPQUIET 0x05
#define IB_7322_LT_STATE_CFGDEBOUNCE 0x08
#define IB_7322_LT_STATE_CFGRCVFCFG 0x09
#define IB_7322_LT_STATE_CFGWAITRMT 0x0a
#define IB_7322_LT_STATE_CFGIDLE 0x0b
#define IB_7322_LT_STATE_RECOVERRETRAIN 0x0c
#define IB_7322_LT_STATE_TXREVLANES 0x0d
#define IB_7322_LT_STATE_RECOVERWAITRMT 0x0e
#define IB_7322_LT_STATE_RECOVERIDLE 0x0f
#define IB_7322_LT_STATE_CFGENH 0x10
#define IB_7322_LT_STATE_CFGTEST 0x11
#define IB_7322_LT_STATE_CFGWAITRMTTEST 0x12
#define IB_7322_LT_STATE_CFGWAITENH 0x13
/* link state machine states from IBC */
#define IB_7322_L_STATE_DOWN 0x0
#define IB_7322_L_STATE_INIT 0x1
#define IB_7322_L_STATE_ARM 0x2
#define IB_7322_L_STATE_ACTIVE 0x3
#define IB_7322_L_STATE_ACT_DEFER 0x4
static const u8 qib_7322_physportstate[0x20] = {
[IB_7322_LT_STATE_DISABLED] = IB_PHYSPORTSTATE_DISABLED,
[IB_7322_LT_STATE_LINKUP] = IB_PHYSPORTSTATE_LINKUP,
[IB_7322_LT_STATE_POLLACTIVE] = IB_PHYSPORTSTATE_POLL,
[IB_7322_LT_STATE_POLLQUIET] = IB_PHYSPORTSTATE_POLL,
[IB_7322_LT_STATE_SLEEPDELAY] = IB_PHYSPORTSTATE_SLEEP,
[IB_7322_LT_STATE_SLEEPQUIET] = IB_PHYSPORTSTATE_SLEEP,
[IB_7322_LT_STATE_CFGDEBOUNCE] = IB_PHYSPORTSTATE_CFG_TRAIN,
[IB_7322_LT_STATE_CFGRCVFCFG] =
IB_PHYSPORTSTATE_CFG_TRAIN,
[IB_7322_LT_STATE_CFGWAITRMT] =
IB_PHYSPORTSTATE_CFG_TRAIN,
[IB_7322_LT_STATE_CFGIDLE] = IB_PHYSPORTSTATE_CFG_IDLE,
[IB_7322_LT_STATE_RECOVERRETRAIN] =
IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
[IB_7322_LT_STATE_RECOVERWAITRMT] =
IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
[IB_7322_LT_STATE_RECOVERIDLE] =
IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
[IB_7322_LT_STATE_CFGENH] = IB_PHYSPORTSTATE_CFG_ENH,
[IB_7322_LT_STATE_CFGTEST] = IB_PHYSPORTSTATE_CFG_TRAIN,
[IB_7322_LT_STATE_CFGWAITRMTTEST] =
IB_PHYSPORTSTATE_CFG_TRAIN,
[IB_7322_LT_STATE_CFGWAITENH] =
IB_PHYSPORTSTATE_CFG_WAIT_ENH,
[0x14] = IB_PHYSPORTSTATE_CFG_TRAIN,
[0x15] = IB_PHYSPORTSTATE_CFG_TRAIN,
[0x16] = IB_PHYSPORTSTATE_CFG_TRAIN,
[0x17] = IB_PHYSPORTSTATE_CFG_TRAIN
};
#ifdef CONFIG_INFINIBAND_QIB_DCA
struct qib_irq_notify {
int rcv;
void *arg;
struct irq_affinity_notify notify;
};
#endif
struct qib_chip_specific {
u64 __iomem *cregbase;
u64 *cntrs;
spinlock_t rcvmod_lock; /* protect rcvctrl shadow changes */
spinlock_t gpio_lock; /* RMW of shadows/regs for ExtCtrl and GPIO */
u64 main_int_mask; /* clear bits which have dedicated handlers */
u64 int_enable_mask; /* for per port interrupts in single port mode */
u64 errormask;
u64 hwerrmask;
u64 gpio_out; /* shadow of kr_gpio_out, for rmw ops */
u64 gpio_mask; /* shadow the gpio mask register */
u64 extctrl; /* shadow the gpio output enable, etc... */
u32 ncntrs;
u32 nportcntrs;
u32 cntrnamelen;
u32 portcntrnamelen;
u32 numctxts;
u32 rcvegrcnt;
u32 updthresh; /* current AvailUpdThld */
u32 updthresh_dflt; /* default AvailUpdThld */
u32 r1;
int irq;
u32 num_msix_entries;
u32 sdmabufcnt;
u32 lastbuf_for_pio;
u32 stay_in_freeze;
u32 recovery_ports_initted;
#ifdef CONFIG_INFINIBAND_QIB_DCA
u32 dca_ctrl;
int rhdr_cpu[18];
int sdma_cpu[2];
u64 dca_rcvhdr_ctrl[5]; /* B, C, D, E, F */
#endif
struct qib_msix_entry *msix_entries;
unsigned long *sendchkenable;
unsigned long *sendgrhchk;
unsigned long *sendibchk;
u32 rcvavail_timeout[18];
char emsgbuf[128]; /* for device error interrupt msg buffer */
};
/* Table of entries in "human readable" form Tx Emphasis. */
struct txdds_ent {
u8 amp;
u8 pre;
u8 main;
u8 post;
};
struct vendor_txdds_ent {
u8 oui[QSFP_VOUI_LEN];
u8 *partnum;
struct txdds_ent sdr;
struct txdds_ent ddr;
struct txdds_ent qdr;
};
static void write_tx_serdes_param(struct qib_pportdata *, struct txdds_ent *);
#define TXDDS_TABLE_SZ 16 /* number of entries per speed in onchip table */
#define TXDDS_EXTRA_SZ 18 /* number of extra tx settings entries */
#define TXDDS_MFG_SZ 2 /* number of mfg tx settings entries */
#define SERDES_CHANS 4 /* yes, it's obvious, but one less magic number */
#define H1_FORCE_VAL 8
#define H1_FORCE_QME 1 /* may be overridden via setup_txselect() */
#define H1_FORCE_QMH 7 /* may be overridden via setup_txselect() */
/* The static and dynamic registers are paired, and the pairs indexed by spd */
#define krp_static_adapt_dis(spd) (KREG_IBPORT_IDX(ADAPT_DISABLE_STATIC_SDR) \
+ ((spd) * 2))
#define QDR_DFE_DISABLE_DELAY 4000 /* msec after LINKUP */
#define QDR_STATIC_ADAPT_DOWN 0xf0f0f0f0ULL /* link down, H1-H4 QDR adapts */
#define QDR_STATIC_ADAPT_DOWN_R1 0ULL /* r1 link down, H1-H4 QDR adapts */
#define QDR_STATIC_ADAPT_INIT 0xffffffffffULL /* up, disable H0,H1-8, LE */
#define QDR_STATIC_ADAPT_INIT_R1 0xf0ffffffffULL /* r1 up, disable H0,H1-8 */
struct qib_chippport_specific {
u64 __iomem *kpregbase;
u64 __iomem *cpregbase;
u64 *portcntrs;
struct qib_pportdata *ppd;
wait_queue_head_t autoneg_wait;
struct delayed_work autoneg_work;
struct delayed_work ipg_work;
struct timer_list chase_timer;
/*
* these 5 fields are used to establish deltas for IB symbol
* errors and linkrecovery errors. They can be reported on
* some chips during link negotiation prior to INIT, and with
* DDR when faking DDR negotiations with non-IBTA switches.
* The chip counters are adjusted at driver unload if there is
* a non-zero delta.
*/
u64 ibdeltainprog;
u64 ibsymdelta;
u64 ibsymsnap;
u64 iblnkerrdelta;
u64 iblnkerrsnap;
u64 iblnkdownsnap;
u64 iblnkdowndelta;
u64 ibmalfdelta;
u64 ibmalfsnap;
u64 ibcctrl_a; /* krp_ibcctrl_a shadow */
u64 ibcctrl_b; /* krp_ibcctrl_b shadow */
unsigned long qdr_dfe_time;
unsigned long chase_end;
u32 autoneg_tries;
u32 recovery_init;
u32 qdr_dfe_on;
u32 qdr_reforce;
/*
* Per-bay per-channel rcv QMH H1 values and Tx values for QDR.
* entry zero is unused, to simplify indexing
*/
u8 h1_val;
u8 no_eep; /* txselect table index to use if no qsfp info */
u8 ipg_tries;
u8 ibmalfusesnap;
struct qib_qsfp_data qsfp_data;
char epmsgbuf[192]; /* for port error interrupt msg buffer */
char sdmamsgbuf[192]; /* for per-port sdma error messages */
};
static struct {
const char *name;
irq_handler_t handler;
int lsb;
int port; /* 0 if not port-specific, else port # */
int dca;
} irq_table[] = {
{ "", qib_7322intr, -1, 0, 0 },
{ " (buf avail)", qib_7322bufavail,
SYM_LSB(IntStatus, SendBufAvail), 0, 0},
{ " (sdma 0)", sdma_intr,
SYM_LSB(IntStatus, SDmaInt_0), 1, 1 },
{ " (sdma 1)", sdma_intr,
SYM_LSB(IntStatus, SDmaInt_1), 2, 1 },
{ " (sdmaI 0)", sdma_idle_intr,
SYM_LSB(IntStatus, SDmaIdleInt_0), 1, 1},
{ " (sdmaI 1)", sdma_idle_intr,
SYM_LSB(IntStatus, SDmaIdleInt_1), 2, 1},
{ " (sdmaP 0)", sdma_progress_intr,
SYM_LSB(IntStatus, SDmaProgressInt_0), 1, 1 },
{ " (sdmaP 1)", sdma_progress_intr,
SYM_LSB(IntStatus, SDmaProgressInt_1), 2, 1 },
{ " (sdmaC 0)", sdma_cleanup_intr,
SYM_LSB(IntStatus, SDmaCleanupDone_0), 1, 0 },
{ " (sdmaC 1)", sdma_cleanup_intr,
SYM_LSB(IntStatus, SDmaCleanupDone_1), 2 , 0},
};
#ifdef CONFIG_INFINIBAND_QIB_DCA
static const struct dca_reg_map {
int shadow_inx;
int lsb;
u64 mask;
u16 regno;
} dca_rcvhdr_reg_map[] = {
{ 0, SYM_LSB(DCACtrlB, RcvHdrq0DCAOPH),
~SYM_MASK(DCACtrlB, RcvHdrq0DCAOPH) , KREG_IDX(DCACtrlB) },
{ 0, SYM_LSB(DCACtrlB, RcvHdrq1DCAOPH),
~SYM_MASK(DCACtrlB, RcvHdrq1DCAOPH) , KREG_IDX(DCACtrlB) },
{ 0, SYM_LSB(DCACtrlB, RcvHdrq2DCAOPH),
~SYM_MASK(DCACtrlB, RcvHdrq2DCAOPH) , KREG_IDX(DCACtrlB) },
{ 0, SYM_LSB(DCACtrlB, RcvHdrq3DCAOPH),
~SYM_MASK(DCACtrlB, RcvHdrq3DCAOPH) , KREG_IDX(DCACtrlB) },
{ 1, SYM_LSB(DCACtrlC, RcvHdrq4DCAOPH),
~SYM_MASK(DCACtrlC, RcvHdrq4DCAOPH) , KREG_IDX(DCACtrlC) },
{ 1, SYM_LSB(DCACtrlC, RcvHdrq5DCAOPH),
~SYM_MASK(DCACtrlC, RcvHdrq5DCAOPH) , KREG_IDX(DCACtrlC) },
{ 1, SYM_LSB(DCACtrlC, RcvHdrq6DCAOPH),
~SYM_MASK(DCACtrlC, RcvHdrq6DCAOPH) , KREG_IDX(DCACtrlC) },
{ 1, SYM_LSB(DCACtrlC, RcvHdrq7DCAOPH),
~SYM_MASK(DCACtrlC, RcvHdrq7DCAOPH) , KREG_IDX(DCACtrlC) },
{ 2, SYM_LSB(DCACtrlD, RcvHdrq8DCAOPH),
~SYM_MASK(DCACtrlD, RcvHdrq8DCAOPH) , KREG_IDX(DCACtrlD) },
{ 2, SYM_LSB(DCACtrlD, RcvHdrq9DCAOPH),
~SYM_MASK(DCACtrlD, RcvHdrq9DCAOPH) , KREG_IDX(DCACtrlD) },
{ 2, SYM_LSB(DCACtrlD, RcvHdrq10DCAOPH),
~SYM_MASK(DCACtrlD, RcvHdrq10DCAOPH) , KREG_IDX(DCACtrlD) },
{ 2, SYM_LSB(DCACtrlD, RcvHdrq11DCAOPH),
~SYM_MASK(DCACtrlD, RcvHdrq11DCAOPH) , KREG_IDX(DCACtrlD) },
{ 3, SYM_LSB(DCACtrlE, RcvHdrq12DCAOPH),
~SYM_MASK(DCACtrlE, RcvHdrq12DCAOPH) , KREG_IDX(DCACtrlE) },
{ 3, SYM_LSB(DCACtrlE, RcvHdrq13DCAOPH),
~SYM_MASK(DCACtrlE, RcvHdrq13DCAOPH) , KREG_IDX(DCACtrlE) },
{ 3, SYM_LSB(DCACtrlE, RcvHdrq14DCAOPH),
~SYM_MASK(DCACtrlE, RcvHdrq14DCAOPH) , KREG_IDX(DCACtrlE) },
{ 3, SYM_LSB(DCACtrlE, RcvHdrq15DCAOPH),
~SYM_MASK(DCACtrlE, RcvHdrq15DCAOPH) , KREG_IDX(DCACtrlE) },
{ 4, SYM_LSB(DCACtrlF, RcvHdrq16DCAOPH),
~SYM_MASK(DCACtrlF, RcvHdrq16DCAOPH) , KREG_IDX(DCACtrlF) },
{ 4, SYM_LSB(DCACtrlF, RcvHdrq17DCAOPH),
~SYM_MASK(DCACtrlF, RcvHdrq17DCAOPH) , KREG_IDX(DCACtrlF) },
};
#endif
/* ibcctrl bits */
#define QLOGIC_IB_IBCC_LINKINITCMD_DISABLE 1
/* cycle through TS1/TS2 till OK */
#define QLOGIC_IB_IBCC_LINKINITCMD_POLL 2
/* wait for TS1, then go on */
#define QLOGIC_IB_IBCC_LINKINITCMD_SLEEP 3
#define QLOGIC_IB_IBCC_LINKINITCMD_SHIFT 16
#define QLOGIC_IB_IBCC_LINKCMD_DOWN 1 /* move to 0x11 */
#define QLOGIC_IB_IBCC_LINKCMD_ARMED 2 /* move to 0x21 */
#define QLOGIC_IB_IBCC_LINKCMD_ACTIVE 3 /* move to 0x31 */
#define BLOB_7322_IBCHG 0x101
static inline void qib_write_kreg(const struct qib_devdata *dd,
const u32 regno, u64 value);
static inline u32 qib_read_kreg32(const struct qib_devdata *, const u32);
static void write_7322_initregs(struct qib_devdata *);
static void write_7322_init_portregs(struct qib_pportdata *);
static void setup_7322_link_recovery(struct qib_pportdata *, u32);
static void check_7322_rxe_status(struct qib_pportdata *);
static u32 __iomem *qib_7322_getsendbuf(struct qib_pportdata *, u64, u32 *);
#ifdef CONFIG_INFINIBAND_QIB_DCA
static void qib_setup_dca(struct qib_devdata *dd);
static void setup_dca_notifier(struct qib_devdata *dd,
struct qib_msix_entry *m);
static void reset_dca_notifier(struct qib_devdata *dd,
struct qib_msix_entry *m);
#endif
/**
* qib_read_ureg32 - read 32-bit virtualized per-context register
* @dd: device
* @regno: register number
* @ctxt: context number
*
* Return the contents of a register that is virtualized to be per context.
* Returns -1 on errors (not distinguishable from valid contents at
* runtime; we may add a separate error variable at some point).
*/
static inline u32 qib_read_ureg32(const struct qib_devdata *dd,
enum qib_ureg regno, int ctxt)
{
if (!dd->kregbase || !(dd->flags & QIB_PRESENT))
return 0;
return readl(regno + (u64 __iomem *)(
(dd->ureg_align * ctxt) + (dd->userbase ?
(char __iomem *)dd->userbase :
(char __iomem *)dd->kregbase + dd->uregbase)));
}
/**
* qib_read_ureg - read virtualized per-context register
* @dd: device
* @regno: register number
* @ctxt: context number
*
* Return the contents of a register that is virtualized to be per context.
* Returns -1 on errors (not distinguishable from valid contents at
* runtime; we may add a separate error variable at some point).
*/
static inline u64 qib_read_ureg(const struct qib_devdata *dd,
enum qib_ureg regno, int ctxt)
{
if (!dd->kregbase || !(dd->flags & QIB_PRESENT))
return 0;
return readq(regno + (u64 __iomem *)(
(dd->ureg_align * ctxt) + (dd->userbase ?
(char __iomem *)dd->userbase :
(char __iomem *)dd->kregbase + dd->uregbase)));
}
/**
* qib_write_ureg - write virtualized per-context register
* @dd: device
* @regno: register number
* @value: value
* @ctxt: context
*
* Write the contents of a register that is virtualized to be per context.
*/
static inline void qib_write_ureg(const struct qib_devdata *dd,
enum qib_ureg regno, u64 value, int ctxt)
{
u64 __iomem *ubase;
if (dd->userbase)
ubase = (u64 __iomem *)
((char __iomem *) dd->userbase +
dd->ureg_align * ctxt);
else
ubase = (u64 __iomem *)
(dd->uregbase +
(char __iomem *) dd->kregbase +
dd->ureg_align * ctxt);
if (dd->kregbase && (dd->flags & QIB_PRESENT))
writeq(value, &ubase[regno]);
}
static inline u32 qib_read_kreg32(const struct qib_devdata *dd,
const u32 regno)
{
if (!dd->kregbase || !(dd->flags & QIB_PRESENT))
return -1;
return readl((u32 __iomem *) &dd->kregbase[regno]);
}
static inline u64 qib_read_kreg64(const struct qib_devdata *dd,
const u32 regno)
{
if (!dd->kregbase || !(dd->flags & QIB_PRESENT))
return -1;
return readq(&dd->kregbase[regno]);
}
static inline void qib_write_kreg(const struct qib_devdata *dd,
const u32 regno, u64 value)
{
if (dd->kregbase && (dd->flags & QIB_PRESENT))
writeq(value, &dd->kregbase[regno]);
}
/*
* not many sanity checks for the port-specific kernel register routines,
* since they are only used when it's known to be safe.
*/
static inline u64 qib_read_kreg_port(const struct qib_pportdata *ppd,
const u16 regno)
{
if (!ppd->cpspec->kpregbase || !(ppd->dd->flags & QIB_PRESENT))
return 0ULL;
return readq(&ppd->cpspec->kpregbase[regno]);
}
static inline void qib_write_kreg_port(const struct qib_pportdata *ppd,
const u16 regno, u64 value)
{
if (ppd->cpspec && ppd->dd && ppd->cpspec->kpregbase &&
(ppd->dd->flags & QIB_PRESENT))
writeq(value, &ppd->cpspec->kpregbase[regno]);
}
/**
* qib_write_kreg_ctxt - write a device's per-ctxt 64-bit kernel register
* @dd: the qlogic_ib device
* @regno: the register number to write
* @ctxt: the context containing the register
* @value: the value to write
*/
static inline void qib_write_kreg_ctxt(const struct qib_devdata *dd,
const u16 regno, unsigned ctxt,
u64 value)
{
qib_write_kreg(dd, regno + ctxt, value);
}
static inline u64 read_7322_creg(const struct qib_devdata *dd, u16 regno)
{
if (!dd->cspec->cregbase || !(dd->flags & QIB_PRESENT))
return 0;
return readq(&dd->cspec->cregbase[regno]);
}
static inline u32 read_7322_creg32(const struct qib_devdata *dd, u16 regno)
{
if (!dd->cspec->cregbase || !(dd->flags & QIB_PRESENT))
return 0;
return readl(&dd->cspec->cregbase[regno]);
}
static inline void write_7322_creg_port(const struct qib_pportdata *ppd,
u16 regno, u64 value)
{
if (ppd->cpspec && ppd->cpspec->cpregbase &&
(ppd->dd->flags & QIB_PRESENT))
writeq(value, &ppd->cpspec->cpregbase[regno]);
}
static inline u64 read_7322_creg_port(const struct qib_pportdata *ppd,
u16 regno)
{
if (!ppd->cpspec || !ppd->cpspec->cpregbase ||
!(ppd->dd->flags & QIB_PRESENT))
return 0;
return readq(&ppd->cpspec->cpregbase[regno]);
}
static inline u32 read_7322_creg32_port(const struct qib_pportdata *ppd,
u16 regno)
{
if (!ppd->cpspec || !ppd->cpspec->cpregbase ||
!(ppd->dd->flags & QIB_PRESENT))
return 0;
return readl(&ppd->cpspec->cpregbase[regno]);
}
/* bits in Control register */
#define QLOGIC_IB_C_RESET SYM_MASK(Control, SyncReset)
#define QLOGIC_IB_C_SDMAFETCHPRIOEN SYM_MASK(Control, SDmaDescFetchPriorityEn)
/* bits in general interrupt regs */
#define QIB_I_RCVURG_LSB SYM_LSB(IntMask, RcvUrg0IntMask)
#define QIB_I_RCVURG_RMASK MASK_ACROSS(0, 17)
#define QIB_I_RCVURG_MASK (QIB_I_RCVURG_RMASK << QIB_I_RCVURG_LSB)
#define QIB_I_RCVAVAIL_LSB SYM_LSB(IntMask, RcvAvail0IntMask)
#define QIB_I_RCVAVAIL_RMASK MASK_ACROSS(0, 17)
#define QIB_I_RCVAVAIL_MASK (QIB_I_RCVAVAIL_RMASK << QIB_I_RCVAVAIL_LSB)
#define QIB_I_C_ERROR INT_MASK(Err)
#define QIB_I_SPIOSENT (INT_MASK_P(SendDone, 0) | INT_MASK_P(SendDone, 1))
#define QIB_I_SPIOBUFAVAIL INT_MASK(SendBufAvail)
#define QIB_I_GPIO INT_MASK(AssertGPIO)
#define QIB_I_P_SDMAINT(pidx) \
(INT_MASK_P(SDma, pidx) | INT_MASK_P(SDmaIdle, pidx) | \
INT_MASK_P(SDmaProgress, pidx) | \
INT_MASK_PM(SDmaCleanupDone, pidx))
/* Interrupt bits that are "per port" */
#define QIB_I_P_BITSEXTANT(pidx) \
(INT_MASK_P(Err, pidx) | INT_MASK_P(SendDone, pidx) | \
INT_MASK_P(SDma, pidx) | INT_MASK_P(SDmaIdle, pidx) | \
INT_MASK_P(SDmaProgress, pidx) | \
INT_MASK_PM(SDmaCleanupDone, pidx))
/* Interrupt bits that are common to a device */
/* currently unused: QIB_I_SPIOSENT */
#define QIB_I_C_BITSEXTANT \
(QIB_I_RCVURG_MASK | QIB_I_RCVAVAIL_MASK | \
QIB_I_SPIOSENT | \
QIB_I_C_ERROR | QIB_I_SPIOBUFAVAIL | QIB_I_GPIO)
#define QIB_I_BITSEXTANT (QIB_I_C_BITSEXTANT | \
QIB_I_P_BITSEXTANT(0) | QIB_I_P_BITSEXTANT(1))
/*
* Error bits that are "per port".
*/
#define QIB_E_P_IBSTATUSCHANGED ERR_MASK_N(IBStatusChanged)
#define QIB_E_P_SHDR ERR_MASK_N(SHeadersErr)
#define QIB_E_P_VL15_BUF_MISUSE ERR_MASK_N(VL15BufMisuseErr)
#define QIB_E_P_SND_BUF_MISUSE ERR_MASK_N(SendBufMisuseErr)
#define QIB_E_P_SUNSUPVL ERR_MASK_N(SendUnsupportedVLErr)
#define QIB_E_P_SUNEXP_PKTNUM ERR_MASK_N(SendUnexpectedPktNumErr)
#define QIB_E_P_SDROP_DATA ERR_MASK_N(SendDroppedDataPktErr)
#define QIB_E_P_SDROP_SMP ERR_MASK_N(SendDroppedSmpPktErr)
#define QIB_E_P_SPKTLEN ERR_MASK_N(SendPktLenErr)
#define QIB_E_P_SUNDERRUN ERR_MASK_N(SendUnderRunErr)
#define QIB_E_P_SMAXPKTLEN ERR_MASK_N(SendMaxPktLenErr)
#define QIB_E_P_SMINPKTLEN ERR_MASK_N(SendMinPktLenErr)
#define QIB_E_P_RIBLOSTLINK ERR_MASK_N(RcvIBLostLinkErr)
#define QIB_E_P_RHDR ERR_MASK_N(RcvHdrErr)
#define QIB_E_P_RHDRLEN ERR_MASK_N(RcvHdrLenErr)
#define QIB_E_P_RBADTID ERR_MASK_N(RcvBadTidErr)
#define QIB_E_P_RBADVERSION ERR_MASK_N(RcvBadVersionErr)
#define QIB_E_P_RIBFLOW ERR_MASK_N(RcvIBFlowErr)
#define QIB_E_P_REBP ERR_MASK_N(RcvEBPErr)
#define QIB_E_P_RUNSUPVL ERR_MASK_N(RcvUnsupportedVLErr)
#define QIB_E_P_RUNEXPCHAR ERR_MASK_N(RcvUnexpectedCharErr)
#define QIB_E_P_RSHORTPKTLEN ERR_MASK_N(RcvShortPktLenErr)
#define QIB_E_P_RLONGPKTLEN ERR_MASK_N(RcvLongPktLenErr)
#define QIB_E_P_RMAXPKTLEN ERR_MASK_N(RcvMaxPktLenErr)
#define QIB_E_P_RMINPKTLEN ERR_MASK_N(RcvMinPktLenErr)
#define QIB_E_P_RICRC ERR_MASK_N(RcvICRCErr)
#define QIB_E_P_RVCRC ERR_MASK_N(RcvVCRCErr)
#define QIB_E_P_RFORMATERR ERR_MASK_N(RcvFormatErr)
#define QIB_E_P_SDMA1STDESC ERR_MASK_N(SDma1stDescErr)
#define QIB_E_P_SDMABASE ERR_MASK_N(SDmaBaseErr)
#define QIB_E_P_SDMADESCADDRMISALIGN ERR_MASK_N(SDmaDescAddrMisalignErr)
#define QIB_E_P_SDMADWEN ERR_MASK_N(SDmaDwEnErr)
#define QIB_E_P_SDMAGENMISMATCH ERR_MASK_N(SDmaGenMismatchErr)
#define QIB_E_P_SDMAHALT ERR_MASK_N(SDmaHaltErr)
#define QIB_E_P_SDMAMISSINGDW ERR_MASK_N(SDmaMissingDwErr)
#define QIB_E_P_SDMAOUTOFBOUND ERR_MASK_N(SDmaOutOfBoundErr)
#define QIB_E_P_SDMARPYTAG ERR_MASK_N(SDmaRpyTagErr)
#define QIB_E_P_SDMATAILOUTOFBOUND ERR_MASK_N(SDmaTailOutOfBoundErr)
#define QIB_E_P_SDMAUNEXPDATA ERR_MASK_N(SDmaUnexpDataErr)
/* Error bits that are common to a device */
#define QIB_E_RESET ERR_MASK(ResetNegated)
#define QIB_E_HARDWARE ERR_MASK(HardwareErr)
#define QIB_E_INVALIDADDR ERR_MASK(InvalidAddrErr)
/*
* Per chip (rather than per-port) errors. Most either do
* nothing but trigger a print (because they self-recover, or
* always occur in tandem with other errors that handle the
* issue), or because they indicate errors with no recovery,
* but we want to know that they happened.
*/
#define QIB_E_SBUF_VL15_MISUSE ERR_MASK(SBufVL15MisUseErr)
#define QIB_E_BADEEP ERR_MASK(InvalidEEPCmd)
#define QIB_E_VLMISMATCH ERR_MASK(SendVLMismatchErr)
#define QIB_E_ARMLAUNCH ERR_MASK(SendArmLaunchErr)
#define QIB_E_SPCLTRIG ERR_MASK(SendSpecialTriggerErr)
#define QIB_E_RRCVHDRFULL ERR_MASK(RcvHdrFullErr)
#define QIB_E_RRCVEGRFULL ERR_MASK(RcvEgrFullErr)
#define QIB_E_RCVCTXTSHARE ERR_MASK(RcvContextShareErr)
/* SDMA chip errors (not per port)
* QIB_E_SDMA_BUF_DUP needs no special handling, because we will also get
* the SDMAHALT error immediately, so we just print the dup error via the
* E_AUTO mechanism. This is true of most of the per-port fatal errors
* as well, but since this is port-independent, by definition, it's
* handled a bit differently. SDMA_VL15 and SDMA_WRONG_PORT are per
* packet send errors, and so are handled in the same manner as other
* per-packet errors.
*/
#define QIB_E_SDMA_VL15 ERR_MASK(SDmaVL15Err)
#define QIB_E_SDMA_WRONG_PORT ERR_MASK(SDmaWrongPortErr)
#define QIB_E_SDMA_BUF_DUP ERR_MASK(SDmaBufMaskDuplicateErr)
/*
* Below functionally equivalent to legacy QLOGIC_IB_E_PKTERRS
* it is used to print "common" packet errors.
*/
#define QIB_E_P_PKTERRS (QIB_E_P_SPKTLEN |\
QIB_E_P_SDROP_DATA | QIB_E_P_RVCRC |\
QIB_E_P_RICRC | QIB_E_P_RSHORTPKTLEN |\
QIB_E_P_VL15_BUF_MISUSE | QIB_E_P_SHDR | \
QIB_E_P_REBP)
/* Error Bits that Packet-related (Receive, per-port) */
#define QIB_E_P_RPKTERRS (\
QIB_E_P_RHDRLEN | QIB_E_P_RBADTID | \
QIB_E_P_RBADVERSION | QIB_E_P_RHDR | \
QIB_E_P_RLONGPKTLEN | QIB_E_P_RSHORTPKTLEN |\
QIB_E_P_RMAXPKTLEN | QIB_E_P_RMINPKTLEN | \
QIB_E_P_RFORMATERR | QIB_E_P_RUNSUPVL | \
QIB_E_P_RUNEXPCHAR | QIB_E_P_RIBFLOW | QIB_E_P_REBP)
/*
* Error bits that are Send-related (per port)
* (ARMLAUNCH excluded from E_SPKTERRS because it gets special handling).
* All of these potentially need to have a buffer disarmed
*/
#define QIB_E_P_SPKTERRS (\
QIB_E_P_SUNEXP_PKTNUM |\
QIB_E_P_SDROP_DATA | QIB_E_P_SDROP_SMP |\
QIB_E_P_SMAXPKTLEN |\
QIB_E_P_VL15_BUF_MISUSE | QIB_E_P_SHDR | \
QIB_E_P_SMINPKTLEN | QIB_E_P_SPKTLEN | \
QIB_E_P_SND_BUF_MISUSE | QIB_E_P_SUNSUPVL)
#define QIB_E_SPKTERRS ( \
QIB_E_SBUF_VL15_MISUSE | QIB_E_VLMISMATCH | \
ERR_MASK_N(SendUnsupportedVLErr) | \
QIB_E_SPCLTRIG | QIB_E_SDMA_VL15 | QIB_E_SDMA_WRONG_PORT)
#define QIB_E_P_SDMAERRS ( \
QIB_E_P_SDMAHALT | \
QIB_E_P_SDMADESCADDRMISALIGN | \
QIB_E_P_SDMAUNEXPDATA | \
QIB_E_P_SDMAMISSINGDW | \
QIB_E_P_SDMADWEN | \
QIB_E_P_SDMARPYTAG | \
QIB_E_P_SDMA1STDESC | \
QIB_E_P_SDMABASE | \
QIB_E_P_SDMATAILOUTOFBOUND | \
QIB_E_P_SDMAOUTOFBOUND | \
QIB_E_P_SDMAGENMISMATCH)
/*
* This sets some bits more than once, but makes it more obvious which
* bits are not handled under other categories, and the repeat definition
* is not a problem.
*/
#define QIB_E_P_BITSEXTANT ( \
QIB_E_P_SPKTERRS | QIB_E_P_PKTERRS | QIB_E_P_RPKTERRS | \
QIB_E_P_RIBLOSTLINK | QIB_E_P_IBSTATUSCHANGED | \
QIB_E_P_SND_BUF_MISUSE | QIB_E_P_SUNDERRUN | \
QIB_E_P_SHDR | QIB_E_P_VL15_BUF_MISUSE | QIB_E_P_SDMAERRS \
)
/*
* These are errors that can occur when the link
* changes state while a packet is being sent or received. This doesn't
* cover things like EBP or VCRC that can be the result of a sending
* having the link change state, so we receive a "known bad" packet.
* All of these are "per port", so renamed:
*/
#define QIB_E_P_LINK_PKTERRS (\
QIB_E_P_SDROP_DATA | QIB_E_P_SDROP_SMP |\
QIB_E_P_SMINPKTLEN | QIB_E_P_SPKTLEN |\
QIB_E_P_RSHORTPKTLEN | QIB_E_P_RMINPKTLEN |\
QIB_E_P_RUNEXPCHAR)
/*
* This sets some bits more than once, but makes it more obvious which
* bits are not handled under other categories (such as QIB_E_SPKTERRS),
* and the repeat definition is not a problem.
*/
#define QIB_E_C_BITSEXTANT (\
QIB_E_HARDWARE | QIB_E_INVALIDADDR | QIB_E_BADEEP |\
QIB_E_ARMLAUNCH | QIB_E_VLMISMATCH | QIB_E_RRCVHDRFULL |\
QIB_E_RRCVEGRFULL | QIB_E_RESET | QIB_E_SBUF_VL15_MISUSE)
/* Likewise Neuter E_SPKT_ERRS_IGNORE */
#define E_SPKT_ERRS_IGNORE 0
#define QIB_EXTS_MEMBIST_DISABLED \
SYM_MASK(EXTStatus, MemBISTDisabled)
#define QIB_EXTS_MEMBIST_ENDTEST \
SYM_MASK(EXTStatus, MemBISTEndTest)
#define QIB_E_SPIOARMLAUNCH \
ERR_MASK(SendArmLaunchErr)
#define IBA7322_IBCC_LINKINITCMD_MASK SYM_RMASK(IBCCtrlA_0, LinkInitCmd)
#define IBA7322_IBCC_LINKCMD_SHIFT SYM_LSB(IBCCtrlA_0, LinkCmd)
/*
* IBTA_1_2 is set when multiple speeds are enabled (normal),
* and also if forced QDR (only QDR enabled). It's enabled for the
* forced QDR case so that scrambling will be enabled by the TS3
* exchange, when supported by both sides of the link.
*/
#define IBA7322_IBC_IBTA_1_2_MASK SYM_MASK(IBCCtrlB_0, IB_ENHANCED_MODE)
#define IBA7322_IBC_MAX_SPEED_MASK SYM_MASK(IBCCtrlB_0, SD_SPEED)
#define IBA7322_IBC_SPEED_QDR SYM_MASK(IBCCtrlB_0, SD_SPEED_QDR)
#define IBA7322_IBC_SPEED_DDR SYM_MASK(IBCCtrlB_0, SD_SPEED_DDR)
#define IBA7322_IBC_SPEED_SDR SYM_MASK(IBCCtrlB_0, SD_SPEED_SDR)
#define IBA7322_IBC_SPEED_MASK (SYM_MASK(IBCCtrlB_0, SD_SPEED_SDR) | \
SYM_MASK(IBCCtrlB_0, SD_SPEED_DDR) | SYM_MASK(IBCCtrlB_0, SD_SPEED_QDR))
#define IBA7322_IBC_SPEED_LSB SYM_LSB(IBCCtrlB_0, SD_SPEED_SDR)
#define IBA7322_LEDBLINK_OFF_SHIFT SYM_LSB(RcvPktLEDCnt_0, OFFperiod)
#define IBA7322_LEDBLINK_ON_SHIFT SYM_LSB(RcvPktLEDCnt_0, ONperiod)
#define IBA7322_IBC_WIDTH_AUTONEG SYM_MASK(IBCCtrlB_0, IB_NUM_CHANNELS)
#define IBA7322_IBC_WIDTH_4X_ONLY (1<<SYM_LSB(IBCCtrlB_0, IB_NUM_CHANNELS))
#define IBA7322_IBC_WIDTH_1X_ONLY (0<<SYM_LSB(IBCCtrlB_0, IB_NUM_CHANNELS))
#define IBA7322_IBC_RXPOL_MASK SYM_MASK(IBCCtrlB_0, IB_POLARITY_REV_SUPP)
#define IBA7322_IBC_RXPOL_LSB SYM_LSB(IBCCtrlB_0, IB_POLARITY_REV_SUPP)
#define IBA7322_IBC_HRTBT_MASK (SYM_MASK(IBCCtrlB_0, HRTBT_AUTO) | \
SYM_MASK(IBCCtrlB_0, HRTBT_ENB))
#define IBA7322_IBC_HRTBT_RMASK (IBA7322_IBC_HRTBT_MASK >> \
SYM_LSB(IBCCtrlB_0, HRTBT_ENB))
#define IBA7322_IBC_HRTBT_LSB SYM_LSB(IBCCtrlB_0, HRTBT_ENB)
#define IBA7322_REDIRECT_VEC_PER_REG 12
#define IBA7322_SENDCHK_PKEY SYM_MASK(SendCheckControl_0, PKey_En)
#define IBA7322_SENDCHK_BTHQP SYM_MASK(SendCheckControl_0, BTHQP_En)
#define IBA7322_SENDCHK_SLID SYM_MASK(SendCheckControl_0, SLID_En)
#define IBA7322_SENDCHK_RAW_IPV6 SYM_MASK(SendCheckControl_0, RawIPV6_En)
#define IBA7322_SENDCHK_MINSZ SYM_MASK(SendCheckControl_0, PacketTooSmall_En)
#define AUTONEG_TRIES 3 /* sequential retries to negotiate DDR */
#define HWE_AUTO(fldname) { .mask = SYM_MASK(HwErrMask, fldname##Mask), \
.msg = #fldname , .sz = sizeof(#fldname) }
#define HWE_AUTO_P(fldname, port) { .mask = SYM_MASK(HwErrMask, \
fldname##Mask##_##port), .msg = #fldname , .sz = sizeof(#fldname) }
static const struct qib_hwerror_msgs qib_7322_hwerror_msgs[] = {
HWE_AUTO_P(IBSerdesPClkNotDetect, 1),
HWE_AUTO_P(IBSerdesPClkNotDetect, 0),
HWE_AUTO(PCIESerdesPClkNotDetect),
HWE_AUTO(PowerOnBISTFailed),
HWE_AUTO(TempsenseTholdReached),
HWE_AUTO(MemoryErr),
HWE_AUTO(PCIeBusParityErr),
HWE_AUTO(PcieCplTimeout),
HWE_AUTO(PciePoisonedTLP),
HWE_AUTO_P(SDmaMemReadErr, 1),
HWE_AUTO_P(SDmaMemReadErr, 0),
HWE_AUTO_P(IBCBusFromSPCParityErr, 1),
HWE_AUTO_P(IBCBusToSPCParityErr, 1),
HWE_AUTO_P(IBCBusFromSPCParityErr, 0),
HWE_AUTO(statusValidNoEop),
HWE_AUTO(LATriggered),
{ .mask = 0, .sz = 0 }
};
#define E_AUTO(fldname) { .mask = SYM_MASK(ErrMask, fldname##Mask), \
.msg = #fldname, .sz = sizeof(#fldname) }
#define E_P_AUTO(fldname) { .mask = SYM_MASK(ErrMask_0, fldname##Mask), \
.msg = #fldname, .sz = sizeof(#fldname) }
static const struct qib_hwerror_msgs qib_7322error_msgs[] = {
E_AUTO(RcvEgrFullErr),
E_AUTO(RcvHdrFullErr),
E_AUTO(ResetNegated),
E_AUTO(HardwareErr),
E_AUTO(InvalidAddrErr),
E_AUTO(SDmaVL15Err),
E_AUTO(SBufVL15MisUseErr),
E_AUTO(InvalidEEPCmd),
E_AUTO(RcvContextShareErr),
E_AUTO(SendVLMismatchErr),
E_AUTO(SendArmLaunchErr),
E_AUTO(SendSpecialTriggerErr),
E_AUTO(SDmaWrongPortErr),
E_AUTO(SDmaBufMaskDuplicateErr),
{ .mask = 0, .sz = 0 }
};
static const struct qib_hwerror_msgs qib_7322p_error_msgs[] = {
E_P_AUTO(IBStatusChanged),
E_P_AUTO(SHeadersErr),
E_P_AUTO(VL15BufMisuseErr),
/*
* SDmaHaltErr is not really an error, make it clearer;
*/
{.mask = SYM_MASK(ErrMask_0, SDmaHaltErrMask), .msg = "SDmaHalted",
.sz = 11},
E_P_AUTO(SDmaDescAddrMisalignErr),
E_P_AUTO(SDmaUnexpDataErr),
E_P_AUTO(SDmaMissingDwErr),
E_P_AUTO(SDmaDwEnErr),
E_P_AUTO(SDmaRpyTagErr),
E_P_AUTO(SDma1stDescErr),
E_P_AUTO(SDmaBaseErr),
E_P_AUTO(SDmaTailOutOfBoundErr),
E_P_AUTO(SDmaOutOfBoundErr),
E_P_AUTO(SDmaGenMismatchErr),
E_P_AUTO(SendBufMisuseErr),
E_P_AUTO(SendUnsupportedVLErr),
E_P_AUTO(SendUnexpectedPktNumErr),
E_P_AUTO(SendDroppedDataPktErr),
E_P_AUTO(SendDroppedSmpPktErr),
E_P_AUTO(SendPktLenErr),
E_P_AUTO(SendUnderRunErr),
E_P_AUTO(SendMaxPktLenErr),
E_P_AUTO(SendMinPktLenErr),
E_P_AUTO(RcvIBLostLinkErr),
E_P_AUTO(RcvHdrErr),
E_P_AUTO(RcvHdrLenErr),
E_P_AUTO(RcvBadTidErr),
E_P_AUTO(RcvBadVersionErr),
E_P_AUTO(RcvIBFlowErr),
E_P_AUTO(RcvEBPErr),
E_P_AUTO(RcvUnsupportedVLErr),
E_P_AUTO(RcvUnexpectedCharErr),
E_P_AUTO(RcvShortPktLenErr),
E_P_AUTO(RcvLongPktLenErr),
E_P_AUTO(RcvMaxPktLenErr),
E_P_AUTO(RcvMinPktLenErr),
E_P_AUTO(RcvICRCErr),
E_P_AUTO(RcvVCRCErr),
E_P_AUTO(RcvFormatErr),
{ .mask = 0, .sz = 0 }
};
/*
* Below generates "auto-message" for interrupts not specific to any port or
* context
*/
#define INTR_AUTO(fldname) { .mask = SYM_MASK(IntMask, fldname##Mask), \
.msg = #fldname, .sz = sizeof(#fldname) }
/* Below generates "auto-message" for interrupts specific to a port */
#define INTR_AUTO_P(fldname) { .mask = MASK_ACROSS(\
SYM_LSB(IntMask, fldname##Mask##_0), \
SYM_LSB(IntMask, fldname##Mask##_1)), \
.msg = #fldname "_P", .sz = sizeof(#fldname "_P") }
/* For some reason, the SerDesTrimDone bits are reversed */
#define INTR_AUTO_PI(fldname) { .mask = MASK_ACROSS(\
SYM_LSB(IntMask, fldname##Mask##_1), \
SYM_LSB(IntMask, fldname##Mask##_0)), \
.msg = #fldname "_P", .sz = sizeof(#fldname "_P") }
/*
* Below generates "auto-message" for interrupts specific to a context,
* with ctxt-number appended
*/
#define INTR_AUTO_C(fldname) { .mask = MASK_ACROSS(\
SYM_LSB(IntMask, fldname##0IntMask), \
SYM_LSB(IntMask, fldname##17IntMask)), \
.msg = #fldname "_C", .sz = sizeof(#fldname "_C") }
#define TXSYMPTOM_AUTO_P(fldname) \
{ .mask = SYM_MASK(SendHdrErrSymptom_0, fldname), \
.msg = #fldname, .sz = sizeof(#fldname) }
static const struct qib_hwerror_msgs hdrchk_msgs[] = {
TXSYMPTOM_AUTO_P(NonKeyPacket),
TXSYMPTOM_AUTO_P(GRHFail),
TXSYMPTOM_AUTO_P(PkeyFail),
TXSYMPTOM_AUTO_P(QPFail),
TXSYMPTOM_AUTO_P(SLIDFail),
TXSYMPTOM_AUTO_P(RawIPV6),
TXSYMPTOM_AUTO_P(PacketTooSmall),
{ .mask = 0, .sz = 0 }
};
#define IBA7322_HDRHEAD_PKTINT_SHIFT 32 /* interrupt cnt in upper 32 bits */
/*
* Called when we might have an error that is specific to a particular
* PIO buffer, and may need to cancel that buffer, so it can be re-used,
* because we don't need to force the update of pioavail
*/
static void qib_disarm_7322_senderrbufs(struct qib_pportdata *ppd)
{
struct qib_devdata *dd = ppd->dd;
u32 i;
int any;
u32 piobcnt = dd->piobcnt2k + dd->piobcnt4k + NUM_VL15_BUFS;
u32 regcnt = (piobcnt + BITS_PER_LONG - 1) / BITS_PER_LONG;
unsigned long sbuf[4];
/*
* It's possible that sendbuffererror could have bits set; might
* have already done this as a result of hardware error handling.
*/
any = 0;
for (i = 0; i < regcnt; ++i) {
sbuf[i] = qib_read_kreg64(dd, kr_sendbuffererror + i);
if (sbuf[i]) {
any = 1;
qib_write_kreg(dd, kr_sendbuffererror + i, sbuf[i]);
}
}
if (any)
qib_disarm_piobufs_set(dd, sbuf, piobcnt);
}
/* No txe_recover yet, if ever */
/* No decode__errors yet */
static void err_decode(char *msg, size_t len, u64 errs,
const struct qib_hwerror_msgs *msp)
{
u64 these, lmask;
int took, multi, n = 0;
while (errs && msp && msp->mask) {
multi = (msp->mask & (msp->mask - 1));
while (errs & msp->mask) {
these = (errs & msp->mask);
lmask = (these & (these - 1)) ^ these;
if (len) {
if (n++) {
/* separate the strings */
*msg++ = ',';
len--;
}
BUG_ON(!msp->sz);
/* msp->sz counts the nul */
took = min_t(size_t, msp->sz - (size_t)1, len);
memcpy(msg, msp->msg, took);
len -= took;
msg += took;
if (len)
*msg = '\0';
}
errs &= ~lmask;
if (len && multi) {
/* More than one bit this mask */
int idx = -1;
while (lmask & msp->mask) {
++idx;
lmask >>= 1;
}
took = scnprintf(msg, len, "_%d", idx);
len -= took;
msg += took;
}
}
++msp;
}
/* If some bits are left, show in hex. */
if (len && errs)
snprintf(msg, len, "%sMORE:%llX", n ? "," : "",
(unsigned long long) errs);
}
/* only called if r1 set */
static void flush_fifo(struct qib_pportdata *ppd)
{
struct qib_devdata *dd = ppd->dd;
u32 __iomem *piobuf;
u32 bufn;
u32 *hdr;
u64 pbc;
const unsigned hdrwords = 7;
static struct qib_ib_header ibhdr = {
.lrh[0] = cpu_to_be16(0xF000 | QIB_LRH_BTH),
.lrh[1] = IB_LID_PERMISSIVE,
.lrh[2] = cpu_to_be16(hdrwords + SIZE_OF_CRC),
.lrh[3] = IB_LID_PERMISSIVE,
.u.oth.bth[0] = cpu_to_be32(
(IB_OPCODE_UD_SEND_ONLY << 24) | QIB_DEFAULT_P_KEY),
.u.oth.bth[1] = cpu_to_be32(0),
.u.oth.bth[2] = cpu_to_be32(0),
.u.oth.u.ud.deth[0] = cpu_to_be32(0),
.u.oth.u.ud.deth[1] = cpu_to_be32(0),
};
/*
* Send a dummy VL15 packet to flush the launch FIFO.
* This will not actually be sent since the TxeBypassIbc bit is set.
*/
pbc = PBC_7322_VL15_SEND |
(((u64)ppd->hw_pidx) << (PBC_PORT_SEL_LSB + 32)) |
(hdrwords + SIZE_OF_CRC);
piobuf = qib_7322_getsendbuf(ppd, pbc, &bufn);
if (!piobuf)
return;
writeq(pbc, piobuf);
hdr = (u32 *) &ibhdr;
if (dd->flags & QIB_PIO_FLUSH_WC) {
qib_flush_wc();
qib_pio_copy(piobuf + 2, hdr, hdrwords - 1);
qib_flush_wc();
__raw_writel(hdr[hdrwords - 1], piobuf + hdrwords + 1);
qib_flush_wc();
} else
qib_pio_copy(piobuf + 2, hdr, hdrwords);
qib_sendbuf_done(dd, bufn);
}
/*
* This is called with interrupts disabled and sdma_lock held.
*/
static void qib_7322_sdma_sendctrl(struct qib_pportdata *ppd, unsigned op)
{
struct qib_devdata *dd = ppd->dd;
u64 set_sendctrl = 0;
u64 clr_sendctrl = 0;
if (op & QIB_SDMA_SENDCTRL_OP_ENABLE)
set_sendctrl |= SYM_MASK(SendCtrl_0, SDmaEnable);
else
clr_sendctrl |= SYM_MASK(SendCtrl_0, SDmaEnable);
if (op & QIB_SDMA_SENDCTRL_OP_INTENABLE)
set_sendctrl |= SYM_MASK(SendCtrl_0, SDmaIntEnable);
else
clr_sendctrl |= SYM_MASK(SendCtrl_0, SDmaIntEnable);
if (op & QIB_SDMA_SENDCTRL_OP_HALT)
set_sendctrl |= SYM_MASK(SendCtrl_0, SDmaHalt);
else
clr_sendctrl |= SYM_MASK(SendCtrl_0, SDmaHalt);
if (op & QIB_SDMA_SENDCTRL_OP_DRAIN)
set_sendctrl |= SYM_MASK(SendCtrl_0, TxeBypassIbc) |
SYM_MASK(SendCtrl_0, TxeAbortIbc) |
SYM_MASK(SendCtrl_0, TxeDrainRmFifo);
else
clr_sendctrl |= SYM_MASK(SendCtrl_0, TxeBypassIbc) |
SYM_MASK(SendCtrl_0, TxeAbortIbc) |
SYM_MASK(SendCtrl_0, TxeDrainRmFifo);
spin_lock(&dd->sendctrl_lock);
/* If we are draining everything, block sends first */
if (op & QIB_SDMA_SENDCTRL_OP_DRAIN) {
ppd->p_sendctrl &= ~SYM_MASK(SendCtrl_0, SendEnable);
qib_write_kreg_port(ppd, krp_sendctrl, ppd->p_sendctrl);
qib_write_kreg(dd, kr_scratch, 0);
}
ppd->p_sendctrl |= set_sendctrl;
ppd->p_sendctrl &= ~clr_sendctrl;
if (op & QIB_SDMA_SENDCTRL_OP_CLEANUP)
qib_write_kreg_port(ppd, krp_sendctrl,
ppd->p_sendctrl |
SYM_MASK(SendCtrl_0, SDmaCleanup));
else
qib_write_kreg_port(ppd, krp_sendctrl, ppd->p_sendctrl);
qib_write_kreg(dd, kr_scratch, 0);
if (op & QIB_SDMA_SENDCTRL_OP_DRAIN) {
ppd->p_sendctrl |= SYM_MASK(SendCtrl_0, SendEnable);
qib_write_kreg_port(ppd, krp_sendctrl, ppd->p_sendctrl);
qib_write_kreg(dd, kr_scratch, 0);
}
spin_unlock(&dd->sendctrl_lock);
if ((op & QIB_SDMA_SENDCTRL_OP_DRAIN) && ppd->dd->cspec->r1)
flush_fifo(ppd);
}
static void qib_7322_sdma_hw_clean_up(struct qib_pportdata *ppd)
{
__qib_sdma_process_event(ppd, qib_sdma_event_e50_hw_cleaned);
}
static void qib_sdma_7322_setlengen(struct qib_pportdata *ppd)
{
/*
* Set SendDmaLenGen and clear and set
* the MSB of the generation count to enable generation checking
* and load the internal generation counter.
*/
qib_write_kreg_port(ppd, krp_senddmalengen, ppd->sdma_descq_cnt);
qib_write_kreg_port(ppd, krp_senddmalengen,
ppd->sdma_descq_cnt |
(1ULL << QIB_7322_SendDmaLenGen_0_Generation_MSB));
}
/*
* Must be called with sdma_lock held, or before init finished.
*/
static void qib_sdma_update_7322_tail(struct qib_pportdata *ppd, u16 tail)
{
/* Commit writes to memory and advance the tail on the chip */
wmb();
ppd->sdma_descq_tail = tail;
qib_write_kreg_port(ppd, krp_senddmatail, tail);
}
/*
* This is called with interrupts disabled and sdma_lock held.
*/
static void qib_7322_sdma_hw_start_up(struct qib_pportdata *ppd)
{
/*
* Drain all FIFOs.
* The hardware doesn't require this but we do it so that verbs
* and user applications don't wait for link active to send stale
* data.
*/
sendctrl_7322_mod(ppd, QIB_SENDCTRL_FLUSH);
qib_sdma_7322_setlengen(ppd);
qib_sdma_update_7322_tail(ppd, 0); /* Set SendDmaTail */
ppd->sdma_head_dma[0] = 0;
qib_7322_sdma_sendctrl(ppd,
ppd->sdma_state.current_op | QIB_SDMA_SENDCTRL_OP_CLEANUP);
}
#define DISABLES_SDMA ( \
QIB_E_P_SDMAHALT | \
QIB_E_P_SDMADESCADDRMISALIGN | \
QIB_E_P_SDMAMISSINGDW | \
QIB_E_P_SDMADWEN | \
QIB_E_P_SDMARPYTAG | \
QIB_E_P_SDMA1STDESC | \
QIB_E_P_SDMABASE | \
QIB_E_P_SDMATAILOUTOFBOUND | \
QIB_E_P_SDMAOUTOFBOUND | \
QIB_E_P_SDMAGENMISMATCH)
static void sdma_7322_p_errors(struct qib_pportdata *ppd, u64 errs)
{
unsigned long flags;
struct qib_devdata *dd = ppd->dd;
errs &= QIB_E_P_SDMAERRS;
err_decode(ppd->cpspec->sdmamsgbuf, sizeof(ppd->cpspec->sdmamsgbuf),
errs, qib_7322p_error_msgs);
if (errs & QIB_E_P_SDMAUNEXPDATA)
qib_dev_err(dd, "IB%u:%u SDmaUnexpData\n", dd->unit,
ppd->port);
spin_lock_irqsave(&ppd->sdma_lock, flags);
if (errs != QIB_E_P_SDMAHALT) {
/* SDMA errors have QIB_E_P_SDMAHALT and another bit set */
qib_dev_porterr(dd, ppd->port,
"SDMA %s 0x%016llx %s\n",
qib_sdma_state_names[ppd->sdma_state.current_state],
errs, ppd->cpspec->sdmamsgbuf);
dump_sdma_7322_state(ppd);
}
switch (ppd->sdma_state.current_state) {
case qib_sdma_state_s00_hw_down:
break;
case qib_sdma_state_s10_hw_start_up_wait:
if (errs & QIB_E_P_SDMAHALT)
__qib_sdma_process_event(ppd,
qib_sdma_event_e20_hw_started);
break;
case qib_sdma_state_s20_idle:
break;
case qib_sdma_state_s30_sw_clean_up_wait:
break;
case qib_sdma_state_s40_hw_clean_up_wait:
if (errs & QIB_E_P_SDMAHALT)
__qib_sdma_process_event(ppd,
qib_sdma_event_e50_hw_cleaned);
break;
case qib_sdma_state_s50_hw_halt_wait:
if (errs & QIB_E_P_SDMAHALT)
__qib_sdma_process_event(ppd,
qib_sdma_event_e60_hw_halted);
break;
case qib_sdma_state_s99_running:
__qib_sdma_process_event(ppd, qib_sdma_event_e7322_err_halted);
__qib_sdma_process_event(ppd, qib_sdma_event_e60_hw_halted);
break;
}
spin_unlock_irqrestore(&ppd->sdma_lock, flags);
}
/*
* handle per-device errors (not per-port errors)
*/
static noinline void handle_7322_errors(struct qib_devdata *dd)
{
char *msg;
u64 iserr = 0;
u64 errs;
u64 mask;
int log_idx;
qib_stats.sps_errints++;
errs = qib_read_kreg64(dd, kr_errstatus);
if (!errs) {
qib_devinfo(dd->pcidev,
"device error interrupt, but no error bits set!\n");
goto done;
}
/* don't report errors that are masked */
errs &= dd->cspec->errormask;
msg = dd->cspec->emsgbuf;
/* do these first, they are most important */
if (errs & QIB_E_HARDWARE) {
*msg = '\0';
qib_7322_handle_hwerrors(dd, msg, sizeof(dd->cspec->emsgbuf));
} else
for (log_idx = 0; log_idx < QIB_EEP_LOG_CNT; ++log_idx)
if (errs & dd->eep_st_masks[log_idx].errs_to_log)
qib_inc_eeprom_err(dd, log_idx, 1);
if (errs & QIB_E_SPKTERRS) {
qib_disarm_7322_senderrbufs(dd->pport);
qib_stats.sps_txerrs++;
} else if (errs & QIB_E_INVALIDADDR)
qib_stats.sps_txerrs++;
else if (errs & QIB_E_ARMLAUNCH) {
qib_stats.sps_txerrs++;
qib_disarm_7322_senderrbufs(dd->pport);
}
qib_write_kreg(dd, kr_errclear, errs);
/*
* The ones we mask off are handled specially below
* or above. Also mask SDMADISABLED by default as it
* is too chatty.
*/
mask = QIB_E_HARDWARE;
*msg = '\0';
err_decode(msg, sizeof(dd->cspec->emsgbuf), errs & ~mask,
qib_7322error_msgs);
/*
* Getting reset is a tragedy for all ports. Mark the device
* _and_ the ports as "offline" in way meaningful to each.
*/
if (errs & QIB_E_RESET) {
int pidx;
qib_dev_err(dd,
"Got reset, requires re-init (unload and reload driver)\n");
dd->flags &= ~QIB_INITTED; /* needs re-init */
/* mark as having had error */
*dd->devstatusp |= QIB_STATUS_HWERROR;
for (pidx = 0; pidx < dd->num_pports; ++pidx)
if (dd->pport[pidx].link_speed_supported)
*dd->pport[pidx].statusp &= ~QIB_STATUS_IB_CONF;
}
if (*msg && iserr)
qib_dev_err(dd, "%s error\n", msg);
/*
* If there were hdrq or egrfull errors, wake up any processes
* waiting in poll. We used to try to check which contexts had
* the overflow, but given the cost of that and the chip reads
* to support it, it's better to just wake everybody up if we
* get an overflow; waiters can poll again if it's not them.
*/
if (errs & (ERR_MASK(RcvEgrFullErr) | ERR_MASK(RcvHdrFullErr))) {
qib_handle_urcv(dd, ~0U);
if (errs & ERR_MASK(RcvEgrFullErr))
qib_stats.sps_buffull++;
else
qib_stats.sps_hdrfull++;
}
done:
return;
}
static void qib_error_tasklet(unsigned long data)
{
struct qib_devdata *dd = (struct qib_devdata *)data;
handle_7322_errors(dd);
qib_write_kreg(dd, kr_errmask, dd->cspec->errormask);
}
static void reenable_chase(unsigned long opaque)
{
struct qib_pportdata *ppd = (struct qib_pportdata *)opaque;
ppd->cpspec->chase_timer.expires = 0;
qib_set_ib_7322_lstate(ppd, QLOGIC_IB_IBCC_LINKCMD_DOWN,
QLOGIC_IB_IBCC_LINKINITCMD_POLL);
}
static void disable_chase(struct qib_pportdata *ppd, unsigned long tnow,
u8 ibclt)
{
ppd->cpspec->chase_end = 0;
if (!qib_chase)
return;
qib_set_ib_7322_lstate(ppd, QLOGIC_IB_IBCC_LINKCMD_DOWN,
QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
ppd->cpspec->chase_timer.expires = jiffies + QIB_CHASE_DIS_TIME;
add_timer(&ppd->cpspec->chase_timer);
}
static void handle_serdes_issues(struct qib_pportdata *ppd, u64 ibcst)
{
u8 ibclt;
unsigned long tnow;
ibclt = (u8)SYM_FIELD(ibcst, IBCStatusA_0, LinkTrainingState);
/*
* Detect and handle the state chase issue, where we can
* get stuck if we are unlucky on timing on both sides of
* the link. If we are, we disable, set a timer, and
* then re-enable.
*/
switch (ibclt) {
case IB_7322_LT_STATE_CFGRCVFCFG:
case IB_7322_LT_STATE_CFGWAITRMT:
case IB_7322_LT_STATE_TXREVLANES:
case IB_7322_LT_STATE_CFGENH:
tnow = jiffies;
if (ppd->cpspec->chase_end &&
time_after(tnow, ppd->cpspec->chase_end))
disable_chase(ppd, tnow, ibclt);
else if (!ppd->cpspec->chase_end)
ppd->cpspec->chase_end = tnow + QIB_CHASE_TIME;
break;
default:
ppd->cpspec->chase_end = 0;
break;
}
if (((ibclt >= IB_7322_LT_STATE_CFGTEST &&
ibclt <= IB_7322_LT_STATE_CFGWAITENH) ||
ibclt == IB_7322_LT_STATE_LINKUP) &&
(ibcst & SYM_MASK(IBCStatusA_0, LinkSpeedQDR))) {
force_h1(ppd);
ppd->cpspec->qdr_reforce = 1;
if (!ppd->dd->cspec->r1)
serdes_7322_los_enable(ppd, 0);
} else if (ppd->cpspec->qdr_reforce &&
(ibcst & SYM_MASK(IBCStatusA_0, LinkSpeedQDR)) &&
(ibclt == IB_7322_LT_STATE_CFGENH ||
ibclt == IB_7322_LT_STATE_CFGIDLE ||
ibclt == IB_7322_LT_STATE_LINKUP))
force_h1(ppd);
if ((IS_QMH(ppd->dd) || IS_QME(ppd->dd)) &&
ppd->link_speed_enabled == QIB_IB_QDR &&
(ibclt == IB_7322_LT_STATE_CFGTEST ||
ibclt == IB_7322_LT_STATE_CFGENH ||
(ibclt >= IB_7322_LT_STATE_POLLACTIVE &&
ibclt <= IB_7322_LT_STATE_SLEEPQUIET)))
adj_tx_serdes(ppd);
if (ibclt != IB_7322_LT_STATE_LINKUP) {
u8 ltstate = qib_7322_phys_portstate(ibcst);
u8 pibclt = (u8)SYM_FIELD(ppd->lastibcstat, IBCStatusA_0,
LinkTrainingState);
if (!ppd->dd->cspec->r1 &&
pibclt == IB_7322_LT_STATE_LINKUP &&
ltstate != IB_PHYSPORTSTATE_LINK_ERR_RECOVER &&
ltstate != IB_PHYSPORTSTATE_RECOVERY_RETRAIN &&
ltstate != IB_PHYSPORTSTATE_RECOVERY_WAITRMT &&
ltstate != IB_PHYSPORTSTATE_RECOVERY_IDLE)
/* If the link went down (but no into recovery,
* turn LOS back on */
serdes_7322_los_enable(ppd, 1);
if (!ppd->cpspec->qdr_dfe_on &&
ibclt <= IB_7322_LT_STATE_SLEEPQUIET) {
ppd->cpspec->qdr_dfe_on = 1;
ppd->cpspec->qdr_dfe_time = 0;
/* On link down, reenable QDR adaptation */
qib_write_kreg_port(ppd, krp_static_adapt_dis(2),
ppd->dd->cspec->r1 ?
QDR_STATIC_ADAPT_DOWN_R1 :
QDR_STATIC_ADAPT_DOWN);
pr_info(
"IB%u:%u re-enabled QDR adaptation ibclt %x\n",
ppd->dd->unit, ppd->port, ibclt);
}
}
}
static int qib_7322_set_ib_cfg(struct qib_pportdata *, int, u32);
/*
* This is per-pport error handling.
* will likely get it's own MSIx interrupt (one for each port,
* although just a single handler).
*/
static noinline void handle_7322_p_errors(struct qib_pportdata *ppd)
{
char *msg;
u64 ignore_this_time = 0, iserr = 0, errs, fmask;
struct qib_devdata *dd = ppd->dd;
/* do this as soon as possible */
fmask = qib_read_kreg64(dd, kr_act_fmask);
if (!fmask)
check_7322_rxe_status(ppd);
errs = qib_read_kreg_port(ppd, krp_errstatus);
if (!errs)
qib_devinfo(dd->pcidev,
"Port%d error interrupt, but no error bits set!\n",
ppd->port);
if (!fmask)
errs &= ~QIB_E_P_IBSTATUSCHANGED;
if (!errs)
goto done;
msg = ppd->cpspec->epmsgbuf;
*msg = '\0';
if (errs & ~QIB_E_P_BITSEXTANT) {
err_decode(msg, sizeof(ppd->cpspec->epmsgbuf),
errs & ~QIB_E_P_BITSEXTANT, qib_7322p_error_msgs);
if (!*msg)
snprintf(msg, sizeof(ppd->cpspec->epmsgbuf),
"no others");
qib_dev_porterr(dd, ppd->port,
"error interrupt with unknown errors 0x%016Lx set (and %s)\n",
(errs & ~QIB_E_P_BITSEXTANT), msg);
*msg = '\0';
}
if (errs & QIB_E_P_SHDR) {
u64 symptom;
/* determine cause, then write to clear */
symptom = qib_read_kreg_port(ppd, krp_sendhdrsymptom);
qib_write_kreg_port(ppd, krp_sendhdrsymptom, 0);
err_decode(msg, sizeof(ppd->cpspec->epmsgbuf), symptom,
hdrchk_msgs);
*msg = '\0';
/* senderrbuf cleared in SPKTERRS below */
}
if (errs & QIB_E_P_SPKTERRS) {
if ((errs & QIB_E_P_LINK_PKTERRS) &&
!(ppd->lflags & QIBL_LINKACTIVE)) {
/*
* This can happen when trying to bring the link
* up, but the IB link changes state at the "wrong"
* time. The IB logic then complains that the packet
* isn't valid. We don't want to confuse people, so
* we just don't print them, except at debug
*/
err_decode(msg, sizeof(ppd->cpspec->epmsgbuf),
(errs & QIB_E_P_LINK_PKTERRS),
qib_7322p_error_msgs);
*msg = '\0';
ignore_this_time = errs & QIB_E_P_LINK_PKTERRS;
}
qib_disarm_7322_senderrbufs(ppd);
} else if ((errs & QIB_E_P_LINK_PKTERRS) &&
!(ppd->lflags & QIBL_LINKACTIVE)) {
/*
* This can happen when SMA is trying to bring the link
* up, but the IB link changes state at the "wrong" time.
* The IB logic then complains that the packet isn't
* valid. We don't want to confuse people, so we just
* don't print them, except at debug
*/
err_decode(msg, sizeof(ppd->cpspec->epmsgbuf), errs,
qib_7322p_error_msgs);
ignore_this_time = errs & QIB_E_P_LINK_PKTERRS;
*msg = '\0';
}
qib_write_kreg_port(ppd, krp_errclear, errs);
errs &= ~ignore_this_time;
if (!errs)
goto done;
if (errs & QIB_E_P_RPKTERRS)
qib_stats.sps_rcverrs++;
if (errs & QIB_E_P_SPKTERRS)
qib_stats.sps_txerrs++;
iserr = errs & ~(QIB_E_P_RPKTERRS | QIB_E_P_PKTERRS);
if (errs & QIB_E_P_SDMAERRS)
sdma_7322_p_errors(ppd, errs);
if (errs & QIB_E_P_IBSTATUSCHANGED) {
u64 ibcs;
u8 ltstate;
ibcs = qib_read_kreg_port(ppd, krp_ibcstatus_a);
ltstate = qib_7322_phys_portstate(ibcs);
if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG))
handle_serdes_issues(ppd, ibcs);
if (!(ppd->cpspec->ibcctrl_a &
SYM_MASK(IBCCtrlA_0, IBStatIntReductionEn))) {
/*
* We got our interrupt, so init code should be
* happy and not try alternatives. Now squelch
* other "chatter" from link-negotiation (pre Init)
*/
ppd->cpspec->ibcctrl_a |=
SYM_MASK(IBCCtrlA_0, IBStatIntReductionEn);
qib_write_kreg_port(ppd, krp_ibcctrl_a,
ppd->cpspec->ibcctrl_a);
}
/* Update our picture of width and speed from chip */
ppd->link_width_active =
(ibcs & SYM_MASK(IBCStatusA_0, LinkWidthActive)) ?
IB_WIDTH_4X : IB_WIDTH_1X;
ppd->link_speed_active = (ibcs & SYM_MASK(IBCStatusA_0,
LinkSpeedQDR)) ? QIB_IB_QDR : (ibcs &
SYM_MASK(IBCStatusA_0, LinkSpeedActive)) ?
QIB_IB_DDR : QIB_IB_SDR;
if ((ppd->lflags & QIBL_IB_LINK_DISABLED) && ltstate !=
IB_PHYSPORTSTATE_DISABLED)
qib_set_ib_7322_lstate(ppd, 0,
QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
else
/*
* Since going into a recovery state causes the link
* state to go down and since recovery is transitory,
* it is better if we "miss" ever seeing the link
* training state go into recovery (i.e., ignore this
* transition for link state special handling purposes)
* without updating lastibcstat.
*/
if (ltstate != IB_PHYSPORTSTATE_LINK_ERR_RECOVER &&
ltstate != IB_PHYSPORTSTATE_RECOVERY_RETRAIN &&
ltstate != IB_PHYSPORTSTATE_RECOVERY_WAITRMT &&
ltstate != IB_PHYSPORTSTATE_RECOVERY_IDLE)
qib_handle_e_ibstatuschanged(ppd, ibcs);
}
if (*msg && iserr)
qib_dev_porterr(dd, ppd->port, "%s error\n", msg);
if (ppd->state_wanted & ppd->lflags)
wake_up_interruptible(&ppd->state_wait);
done:
return;
}
/* enable/disable chip from delivering interrupts */
static void qib_7322_set_intr_state(struct qib_devdata *dd, u32 enable)
{
if (enable) {
if (dd->flags & QIB_BADINTR)
return;
qib_write_kreg(dd, kr_intmask, dd->cspec->int_enable_mask);
/* cause any pending enabled interrupts to be re-delivered */
qib_write_kreg(dd, kr_intclear, 0ULL);
if (dd->cspec->num_msix_entries) {
/* and same for MSIx */
u64 val = qib_read_kreg64(dd, kr_intgranted);
if (val)
qib_write_kreg(dd, kr_intgranted, val);
}
} else
qib_write_kreg(dd, kr_intmask, 0ULL);
}
/*
* Try to cleanup as much as possible for anything that might have gone
* wrong while in freeze mode, such as pio buffers being written by user
* processes (causing armlaunch), send errors due to going into freeze mode,
* etc., and try to avoid causing extra interrupts while doing so.
* Forcibly update the in-memory pioavail register copies after cleanup
* because the chip won't do it while in freeze mode (the register values
* themselves are kept correct).
* Make sure that we don't lose any important interrupts by using the chip
* feature that says that writing 0 to a bit in *clear that is set in
* *status will cause an interrupt to be generated again (if allowed by
* the *mask value).
* This is in chip-specific code because of all of the register accesses,
* even though the details are similar on most chips.
*/
static void qib_7322_clear_freeze(struct qib_devdata *dd)
{
int pidx;
/* disable error interrupts, to avoid confusion */
qib_write_kreg(dd, kr_errmask, 0ULL);
for (pidx = 0; pidx < dd->num_pports; ++pidx)
if (dd->pport[pidx].link_speed_supported)
qib_write_kreg_port(dd->pport + pidx, krp_errmask,
0ULL);
/* also disable interrupts; errormask is sometimes overwriten */
qib_7322_set_intr_state(dd, 0);
/* clear the freeze, and be sure chip saw it */
qib_write_kreg(dd, kr_control, dd->control);
qib_read_kreg32(dd, kr_scratch);
/*
* Force new interrupt if any hwerr, error or interrupt bits are
* still set, and clear "safe" send packet errors related to freeze
* and cancelling sends. Re-enable error interrupts before possible
* force of re-interrupt on pending interrupts.
*/
qib_write_kreg(dd, kr_hwerrclear, 0ULL);
qib_write_kreg(dd, kr_errclear, E_SPKT_ERRS_IGNORE);
qib_write_kreg(dd, kr_errmask, dd->cspec->errormask);
/* We need to purge per-port errs and reset mask, too */
for (pidx = 0; pidx < dd->num_pports; ++pidx) {
if (!dd->pport[pidx].link_speed_supported)
continue;
qib_write_kreg_port(dd->pport + pidx, krp_errclear, ~0Ull);
qib_write_kreg_port(dd->pport + pidx, krp_errmask, ~0Ull);
}
qib_7322_set_intr_state(dd, 1);
}
/* no error handling to speak of */
/**
* qib_7322_handle_hwerrors - display hardware errors.
* @dd: the qlogic_ib device
* @msg: the output buffer
* @msgl: the size of the output buffer
*
* Use same msg buffer as regular errors to avoid excessive stack
* use. Most hardware errors are catastrophic, but for right now,
* we'll print them and continue. We reuse the same message buffer as
* qib_handle_errors() to avoid excessive stack usage.
*/
static void qib_7322_handle_hwerrors(struct qib_devdata *dd, char *msg,
size_t msgl)
{
u64 hwerrs;
u32 ctrl;
int isfatal = 0;
hwerrs = qib_read_kreg64(dd, kr_hwerrstatus);
if (!hwerrs)
goto bail;
if (hwerrs == ~0ULL) {
qib_dev_err(dd,
"Read of hardware error status failed (all bits set); ignoring\n");
goto bail;
}
qib_stats.sps_hwerrs++;
/* Always clear the error status register, except BIST fail */
qib_write_kreg(dd, kr_hwerrclear, hwerrs &
~HWE_MASK(PowerOnBISTFailed));
hwerrs &= dd->cspec->hwerrmask;
/* no EEPROM logging, yet */
if (hwerrs)
qib_devinfo(dd->pcidev,
"Hardware error: hwerr=0x%llx (cleared)\n",
(unsigned long long) hwerrs);
ctrl = qib_read_kreg32(dd, kr_control);
if ((ctrl & SYM_MASK(Control, FreezeMode)) && !dd->diag_client) {
/*
* No recovery yet...
*/
if ((hwerrs & ~HWE_MASK(LATriggered)) ||
dd->cspec->stay_in_freeze) {
/*
* If any set that we aren't ignoring only make the
* complaint once, in case it's stuck or recurring,
* and we get here multiple times
* Force link down, so switch knows, and
* LEDs are turned off.
*/
if (dd->flags & QIB_INITTED)
isfatal = 1;
} else
qib_7322_clear_freeze(dd);
}
if (hwerrs & HWE_MASK(PowerOnBISTFailed)) {
isfatal = 1;
strlcpy(msg,
"[Memory BIST test failed, InfiniPath hardware unusable]",
msgl);
/* ignore from now on, so disable until driver reloaded */
dd->cspec->hwerrmask &= ~HWE_MASK(PowerOnBISTFailed);
qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);
}
err_decode(msg, msgl, hwerrs, qib_7322_hwerror_msgs);
/* Ignore esoteric PLL failures et al. */
qib_dev_err(dd, "%s hardware error\n", msg);
if (hwerrs &
(SYM_MASK(HwErrMask, SDmaMemReadErrMask_0) |
SYM_MASK(HwErrMask, SDmaMemReadErrMask_1))) {
int pidx = 0;
int err;
unsigned long flags;
struct qib_pportdata *ppd = dd->pport;
for (; pidx < dd->num_pports; ++pidx, ppd++) {
err = 0;
if (pidx == 0 && (hwerrs &
SYM_MASK(HwErrMask, SDmaMemReadErrMask_0)))
err++;
if (pidx == 1 && (hwerrs &
SYM_MASK(HwErrMask, SDmaMemReadErrMask_1)))
err++;
if (err) {
spin_lock_irqsave(&ppd->sdma_lock, flags);
dump_sdma_7322_state(ppd);
spin_unlock_irqrestore(&ppd->sdma_lock, flags);
}
}
}
if (isfatal && !dd->diag_client) {
qib_dev_err(dd,
"Fatal Hardware Error, no longer usable, SN %.16s\n",
dd->serial);
/*
* for /sys status file and user programs to print; if no
* trailing brace is copied, we'll know it was truncated.
*/
if (dd->freezemsg)
snprintf(dd->freezemsg, dd->freezelen,
"{%s}", msg);
qib_disable_after_error(dd);
}
bail:;
}
/**
* qib_7322_init_hwerrors - enable hardware errors
* @dd: the qlogic_ib device
*
* now that we have finished initializing everything that might reasonably
* cause a hardware error, and cleared those errors bits as they occur,
* we can enable hardware errors in the mask (potentially enabling
* freeze mode), and enable hardware errors as errors (along with
* everything else) in errormask
*/
static void qib_7322_init_hwerrors(struct qib_devdata *dd)
{
int pidx;
u64 extsval;
extsval = qib_read_kreg64(dd, kr_extstatus);
if (!(extsval & (QIB_EXTS_MEMBIST_DISABLED |
QIB_EXTS_MEMBIST_ENDTEST)))
qib_dev_err(dd, "MemBIST did not complete!\n");
/* never clear BIST failure, so reported on each driver load */
qib_write_kreg(dd, kr_hwerrclear, ~HWE_MASK(PowerOnBISTFailed));
qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);
/* clear all */
qib_write_kreg(dd, kr_errclear, ~0ULL);
/* enable errors that are masked, at least this first time. */
qib_write_kreg(dd, kr_errmask, ~0ULL);
dd->cspec->errormask = qib_read_kreg64(dd, kr_errmask);
for (pidx = 0; pidx < dd->num_pports; ++pidx)
if (dd->pport[pidx].link_speed_supported)
qib_write_kreg_port(dd->pport + pidx, krp_errmask,
~0ULL);
}
/*
* Disable and enable the armlaunch error. Used for PIO bandwidth testing
* on chips that are count-based, rather than trigger-based. There is no
* reference counting, but that's also fine, given the intended use.
* Only chip-specific because it's all register accesses
*/
static void qib_set_7322_armlaunch(struct qib_devdata *dd, u32 enable)
{
if (enable) {
qib_write_kreg(dd, kr_errclear, QIB_E_SPIOARMLAUNCH);
dd->cspec->errormask |= QIB_E_SPIOARMLAUNCH;
} else
dd->cspec->errormask &= ~QIB_E_SPIOARMLAUNCH;
qib_write_kreg(dd, kr_errmask, dd->cspec->errormask);
}
/*
* Formerly took parameter <which> in pre-shifted,
* pre-merged form with LinkCmd and LinkInitCmd
* together, and assuming the zero was NOP.
*/
static void qib_set_ib_7322_lstate(struct qib_pportdata *ppd, u16 linkcmd,
u16 linitcmd)
{
u64 mod_wd;
struct qib_devdata *dd = ppd->dd;
unsigned long flags;
if (linitcmd == QLOGIC_IB_IBCC_LINKINITCMD_DISABLE) {
/*
* If we are told to disable, note that so link-recovery
* code does not attempt to bring us back up.
* Also reset everything that we can, so we start
* completely clean when re-enabled (before we
* actually issue the disable to the IBC)
*/
qib_7322_mini_pcs_reset(ppd);
spin_lock_irqsave(&ppd->lflags_lock, flags);
ppd->lflags |= QIBL_IB_LINK_DISABLED;
spin_unlock_irqrestore(&ppd->lflags_lock, flags);
} else if (linitcmd || linkcmd == QLOGIC_IB_IBCC_LINKCMD_DOWN) {
/*
* Any other linkinitcmd will lead to LINKDOWN and then
* to INIT (if all is well), so clear flag to let
* link-recovery code attempt to bring us back up.
*/
spin_lock_irqsave(&ppd->lflags_lock, flags);
ppd->lflags &= ~QIBL_IB_LINK_DISABLED;
spin_unlock_irqrestore(&ppd->lflags_lock, flags);
/*
* Clear status change interrupt reduction so the
* new state is seen.
*/
ppd->cpspec->ibcctrl_a &=
~SYM_MASK(IBCCtrlA_0, IBStatIntReductionEn);
}
mod_wd = (linkcmd << IBA7322_IBCC_LINKCMD_SHIFT) |
(linitcmd << QLOGIC_IB_IBCC_LINKINITCMD_SHIFT);
qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a |
mod_wd);
/* write to chip to prevent back-to-back writes of ibc reg */
qib_write_kreg(dd, kr_scratch, 0);
}
/*
* The total RCV buffer memory is 64KB, used for both ports, and is
* in units of 64 bytes (same as IB flow control credit unit).
* The consumedVL unit in the same registers are in 32 byte units!
* So, a VL15 packet needs 4.50 IB credits, and 9 rx buffer chunks,
* and we can therefore allocate just 9 IB credits for 2 VL15 packets
* in krp_rxcreditvl15, rather than 10.
*/
#define RCV_BUF_UNITSZ 64
#define NUM_RCV_BUF_UNITS(dd) ((64 * 1024) / (RCV_BUF_UNITSZ * dd->num_pports))
static void set_vls(struct qib_pportdata *ppd)
{
int i, numvls, totcred, cred_vl, vl0extra;
struct qib_devdata *dd = ppd->dd;
u64 val;
numvls = qib_num_vls(ppd->vls_operational);
/*
* Set up per-VL credits. Below is kluge based on these assumptions:
* 1) port is disabled at the time early_init is called.
* 2) give VL15 17 credits, for two max-plausible packets.
* 3) Give VL0-N the rest, with any rounding excess used for VL0
*/
/* 2 VL15 packets @ 288 bytes each (including IB headers) */
totcred = NUM_RCV_BUF_UNITS(dd);
cred_vl = (2 * 288 + RCV_BUF_UNITSZ - 1) / RCV_BUF_UNITSZ;
totcred -= cred_vl;
qib_write_kreg_port(ppd, krp_rxcreditvl15, (u64) cred_vl);
cred_vl = totcred / numvls;
vl0extra = totcred - cred_vl * numvls;
qib_write_kreg_port(ppd, krp_rxcreditvl0, cred_vl + vl0extra);
for (i = 1; i < numvls; i++)
qib_write_kreg_port(ppd, krp_rxcreditvl0 + i, cred_vl);
for (; i < 8; i++) /* no buffer space for other VLs */
qib_write_kreg_port(ppd, krp_rxcreditvl0 + i, 0);
/* Notify IBC that credits need to be recalculated */
val = qib_read_kreg_port(ppd, krp_ibsdtestiftx);
val |= SYM_MASK(IB_SDTEST_IF_TX_0, CREDIT_CHANGE);
qib_write_kreg_port(ppd, krp_ibsdtestiftx, val);
qib_write_kreg(dd, kr_scratch, 0ULL);
val &= ~SYM_MASK(IB_SDTEST_IF_TX_0, CREDIT_CHANGE);
qib_write_kreg_port(ppd, krp_ibsdtestiftx, val);
for (i = 0; i < numvls; i++)
val = qib_read_kreg_port(ppd, krp_rxcreditvl0 + i);
val = qib_read_kreg_port(ppd, krp_rxcreditvl15);
/* Change the number of operational VLs */
ppd->cpspec->ibcctrl_a = (ppd->cpspec->ibcctrl_a &
~SYM_MASK(IBCCtrlA_0, NumVLane)) |
((u64)(numvls - 1) << SYM_LSB(IBCCtrlA_0, NumVLane));
qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a);
qib_write_kreg(dd, kr_scratch, 0ULL);
}
/*
* The code that deals with actual SerDes is in serdes_7322_init().
* Compared to the code for iba7220, it is minimal.
*/
static int serdes_7322_init(struct qib_pportdata *ppd);
/**
* qib_7322_bringup_serdes - bring up the serdes
* @ppd: physical port on the qlogic_ib device
*/
static int qib_7322_bringup_serdes(struct qib_pportdata *ppd)
{
struct qib_devdata *dd = ppd->dd;
u64 val, guid, ibc;
unsigned long flags;
int ret = 0;
/*
* SerDes model not in Pd, but still need to
* set up much of IBCCtrl and IBCDDRCtrl; move elsewhere
* eventually.
*/
/* Put IBC in reset, sends disabled (should be in reset already) */
ppd->cpspec->ibcctrl_a &= ~SYM_MASK(IBCCtrlA_0, IBLinkEn);
qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a);
qib_write_kreg(dd, kr_scratch, 0ULL);
/* ensure previous Tx parameters are not still forced */
qib_write_kreg_port(ppd, krp_tx_deemph_override,
SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
reset_tx_deemphasis_override));
if (qib_compat_ddr_negotiate) {
ppd->cpspec->ibdeltainprog = 1;
ppd->cpspec->ibsymsnap = read_7322_creg32_port(ppd,
crp_ibsymbolerr);
ppd->cpspec->iblnkerrsnap = read_7322_creg32_port(ppd,
crp_iblinkerrrecov);
}
/* flowcontrolwatermark is in units of KBytes */
ibc = 0x5ULL << SYM_LSB(IBCCtrlA_0, FlowCtrlWaterMark);
/*
* Flow control is sent this often, even if no changes in
* buffer space occur. Units are 128ns for this chip.
* Set to 3usec.
*/
ibc |= 24ULL << SYM_LSB(IBCCtrlA_0, FlowCtrlPeriod);
/* max error tolerance */
ibc |= 0xfULL << SYM_LSB(IBCCtrlA_0, PhyerrThreshold);
/* IB credit flow control. */
ibc |= 0xfULL << SYM_LSB(IBCCtrlA_0, OverrunThreshold);
/*
* set initial max size pkt IBC will send, including ICRC; it's the
* PIO buffer size in dwords, less 1; also see qib_set_mtu()
*/
ibc |= ((u64)(ppd->ibmaxlen >> 2) + 1) <<
SYM_LSB(IBCCtrlA_0, MaxPktLen);
ppd->cpspec->ibcctrl_a = ibc; /* without linkcmd or linkinitcmd! */
/*
* Reset the PCS interface to the serdes (and also ibc, which is still
* in reset from above). Writes new value of ibcctrl_a as last step.
*/
qib_7322_mini_pcs_reset(ppd);
if (!ppd->cpspec->ibcctrl_b) {
unsigned lse = ppd->link_speed_enabled;
/*
* Not on re-init after reset, establish shadow
* and force initial config.
*/
ppd->cpspec->ibcctrl_b = qib_read_kreg_port(ppd,
krp_ibcctrl_b);
ppd->cpspec->ibcctrl_b &= ~(IBA7322_IBC_SPEED_QDR |
IBA7322_IBC_SPEED_DDR |
IBA7322_IBC_SPEED_SDR |
IBA7322_IBC_WIDTH_AUTONEG |
SYM_MASK(IBCCtrlB_0, IB_LANE_REV_SUPPORTED));
if (lse & (lse - 1)) /* Muliple speeds enabled */
ppd->cpspec->ibcctrl_b |=
(lse << IBA7322_IBC_SPEED_LSB) |
IBA7322_IBC_IBTA_1_2_MASK |
IBA7322_IBC_MAX_SPEED_MASK;
else
ppd->cpspec->ibcctrl_b |= (lse == QIB_IB_QDR) ?
IBA7322_IBC_SPEED_QDR |
IBA7322_IBC_IBTA_1_2_MASK :
(lse == QIB_IB_DDR) ?
IBA7322_IBC_SPEED_DDR :
IBA7322_IBC_SPEED_SDR;
if ((ppd->link_width_enabled & (IB_WIDTH_1X | IB_WIDTH_4X)) ==
(IB_WIDTH_1X | IB_WIDTH_4X))
ppd->cpspec->ibcctrl_b |= IBA7322_IBC_WIDTH_AUTONEG;
else
ppd->cpspec->ibcctrl_b |=
ppd->link_width_enabled == IB_WIDTH_4X ?
IBA7322_IBC_WIDTH_4X_ONLY :
IBA7322_IBC_WIDTH_1X_ONLY;
/* always enable these on driver reload, not sticky */
ppd->cpspec->ibcctrl_b |= (IBA7322_IBC_RXPOL_MASK |
IBA7322_IBC_HRTBT_MASK);
}
qib_write_kreg_port(ppd, krp_ibcctrl_b, ppd->cpspec->ibcctrl_b);
/* setup so we have more time at CFGTEST to change H1 */
val = qib_read_kreg_port(ppd, krp_ibcctrl_c);
val &= ~SYM_MASK(IBCCtrlC_0, IB_FRONT_PORCH);
val |= 0xfULL << SYM_LSB(IBCCtrlC_0, IB_FRONT_PORCH);
qib_write_kreg_port(ppd, krp_ibcctrl_c, val);
serdes_7322_init(ppd);
guid = be64_to_cpu(ppd->guid);
if (!guid) {
if (dd->base_guid)
guid = be64_to_cpu(dd->base_guid) + ppd->port - 1;
ppd->guid = cpu_to_be64(guid);
}
qib_write_kreg_port(ppd, krp_hrtbt_guid, guid);
/* write to chip to prevent back-to-back writes of ibc reg */
qib_write_kreg(dd, kr_scratch, 0);
/* Enable port */
ppd->cpspec->ibcctrl_a |= SYM_MASK(IBCCtrlA_0, IBLinkEn);
set_vls(ppd);
/* initially come up DISABLED, without sending anything. */
val = ppd->cpspec->ibcctrl_a | (QLOGIC_IB_IBCC_LINKINITCMD_DISABLE <<
QLOGIC_IB_IBCC_LINKINITCMD_SHIFT);
qib_write_kreg_port(ppd, krp_ibcctrl_a, val);
qib_write_kreg(dd, kr_scratch, 0ULL);
/* clear the linkinit cmds */
ppd->cpspec->ibcctrl_a = val & ~SYM_MASK(IBCCtrlA_0, LinkInitCmd);
/* be paranoid against later code motion, etc. */
spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags);
ppd->p_rcvctrl |= SYM_MASK(RcvCtrl_0, RcvIBPortEnable);
qib_write_kreg_port(ppd, krp_rcvctrl, ppd->p_rcvctrl);
spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags);
/* Also enable IBSTATUSCHG interrupt. */
val = qib_read_kreg_port(ppd, krp_errmask);
qib_write_kreg_port(ppd, krp_errmask,
val | ERR_MASK_N(IBStatusChanged));
/* Always zero until we start messing with SerDes for real */
return ret;
}
/**
* qib_7322_quiet_serdes - set serdes to txidle
* @dd: the qlogic_ib device
* Called when driver is being unloaded
*/
static void qib_7322_mini_quiet_serdes(struct qib_pportdata *ppd)
{
u64 val;
unsigned long flags;
qib_set_ib_7322_lstate(ppd, 0, QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
spin_lock_irqsave(&ppd->lflags_lock, flags);
ppd->lflags &= ~QIBL_IB_AUTONEG_INPROG;
spin_unlock_irqrestore(&ppd->lflags_lock, flags);
wake_up(&ppd->cpspec->autoneg_wait);
cancel_delayed_work_sync(&ppd->cpspec->autoneg_work);
if (ppd->dd->cspec->r1)
cancel_delayed_work_sync(&ppd->cpspec->ipg_work);
ppd->cpspec->chase_end = 0;
if (ppd->cpspec->chase_timer.data) /* if initted */
del_timer_sync(&ppd->cpspec->chase_timer);
/*
* Despite the name, actually disables IBC as well. Do it when
* we are as sure as possible that no more packets can be
* received, following the down and the PCS reset.
* The actual disabling happens in qib_7322_mini_pci_reset(),
* along with the PCS being reset.
*/
ppd->cpspec->ibcctrl_a &= ~SYM_MASK(IBCCtrlA_0, IBLinkEn);
qib_7322_mini_pcs_reset(ppd);
/*
* Update the adjusted counters so the adjustment persists
* across driver reload.
*/
if (ppd->cpspec->ibsymdelta || ppd->cpspec->iblnkerrdelta ||
ppd->cpspec->ibdeltainprog || ppd->cpspec->iblnkdowndelta) {
struct qib_devdata *dd = ppd->dd;
u64 diagc;
/* enable counter writes */
diagc = qib_read_kreg64(dd, kr_hwdiagctrl);
qib_write_kreg(dd, kr_hwdiagctrl,
diagc | SYM_MASK(HwDiagCtrl, CounterWrEnable));
if (ppd->cpspec->ibsymdelta || ppd->cpspec->ibdeltainprog) {
val = read_7322_creg32_port(ppd, crp_ibsymbolerr);
if (ppd->cpspec->ibdeltainprog)
val -= val - ppd->cpspec->ibsymsnap;
val -= ppd->cpspec->ibsymdelta;
write_7322_creg_port(ppd, crp_ibsymbolerr, val);
}
if (ppd->cpspec->iblnkerrdelta || ppd->cpspec->ibdeltainprog) {
val = read_7322_creg32_port(ppd, crp_iblinkerrrecov);
if (ppd->cpspec->ibdeltainprog)
val -= val - ppd->cpspec->iblnkerrsnap;
val -= ppd->cpspec->iblnkerrdelta;
write_7322_creg_port(ppd, crp_iblinkerrrecov, val);
}
if (ppd->cpspec->iblnkdowndelta) {
val = read_7322_creg32_port(ppd, crp_iblinkdown);
val += ppd->cpspec->iblnkdowndelta;
write_7322_creg_port(ppd, crp_iblinkdown, val);
}
/*
* No need to save ibmalfdelta since IB perfcounters
* are cleared on driver reload.
*/
/* and disable counter writes */
qib_write_kreg(dd, kr_hwdiagctrl, diagc);
}
}
/**
* qib_setup_7322_setextled - set the state of the two external LEDs
* @ppd: physical port on the qlogic_ib device
* @on: whether the link is up or not
*
* The exact combo of LEDs if on is true is determined by looking
* at the ibcstatus.
*
* These LEDs indicate the physical and logical state of IB link.
* For this chip (at least with recommended board pinouts), LED1
* is Yellow (logical state) and LED2 is Green (physical state),
*
* Note: We try to match the Mellanox HCA LED behavior as best
* we can. Green indicates physical link state is OK (something is
* plugged in, and we can train).
* Amber indicates the link is logically up (ACTIVE).
* Mellanox further blinks the amber LED to indicate data packet
* activity, but we have no hardware support for that, so it would
* require waking up every 10-20 msecs and checking the counters
* on the chip, and then turning the LED off if appropriate. That's
* visible overhead, so not something we will do.
*/
static void qib_setup_7322_setextled(struct qib_pportdata *ppd, u32 on)
{
struct qib_devdata *dd = ppd->dd;
u64 extctl, ledblink = 0, val;
unsigned long flags;
int yel, grn;
/*
* The diags use the LED to indicate diag info, so we leave
* the external LED alone when the diags are running.
*/
if (dd->diag_client)
return;
/* Allow override of LED display for, e.g. Locating system in rack */
if (ppd->led_override) {
grn = (ppd->led_override & QIB_LED_PHYS);
yel = (ppd->led_override & QIB_LED_LOG);
} else if (on) {
val = qib_read_kreg_port(ppd, krp_ibcstatus_a);
grn = qib_7322_phys_portstate(val) ==
IB_PHYSPORTSTATE_LINKUP;
yel = qib_7322_iblink_state(val) == IB_PORT_ACTIVE;
} else {
grn = 0;
yel = 0;
}
spin_lock_irqsave(&dd->cspec->gpio_lock, flags);
extctl = dd->cspec->extctrl & (ppd->port == 1 ?
~ExtLED_IB1_MASK : ~ExtLED_IB2_MASK);
if (grn) {
extctl |= ppd->port == 1 ? ExtLED_IB1_GRN : ExtLED_IB2_GRN;
/*
* Counts are in chip clock (4ns) periods.
* This is 1/16 sec (66.6ms) on,
* 3/16 sec (187.5 ms) off, with packets rcvd.
*/
ledblink = ((66600 * 1000UL / 4) << IBA7322_LEDBLINK_ON_SHIFT) |
((187500 * 1000UL / 4) << IBA7322_LEDBLINK_OFF_SHIFT);
}
if (yel)
extctl |= ppd->port == 1 ? ExtLED_IB1_YEL : ExtLED_IB2_YEL;
dd->cspec->extctrl = extctl;
qib_write_kreg(dd, kr_extctrl, dd->cspec->extctrl);
spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags);
if (ledblink) /* blink the LED on packet receive */
qib_write_kreg_port(ppd, krp_rcvpktledcnt, ledblink);
}
#ifdef CONFIG_INFINIBAND_QIB_DCA
static int qib_7322_notify_dca(struct qib_devdata *dd, unsigned long event)
{
switch (event) {
case DCA_PROVIDER_ADD:
if (dd->flags & QIB_DCA_ENABLED)
break;
if (!dca_add_requester(&dd->pcidev->dev)) {
qib_devinfo(dd->pcidev, "DCA enabled\n");
dd->flags |= QIB_DCA_ENABLED;
qib_setup_dca(dd);
}
break;
case DCA_PROVIDER_REMOVE:
if (dd->flags & QIB_DCA_ENABLED) {
dca_remove_requester(&dd->pcidev->dev);
dd->flags &= ~QIB_DCA_ENABLED;
dd->cspec->dca_ctrl = 0;
qib_write_kreg(dd, KREG_IDX(DCACtrlA),
dd->cspec->dca_ctrl);
}
break;
}
return 0;
}
static void qib_update_rhdrq_dca(struct qib_ctxtdata *rcd, int cpu)
{
struct qib_devdata *dd = rcd->dd;
struct qib_chip_specific *cspec = dd->cspec;
if (!(dd->flags & QIB_DCA_ENABLED))
return;
if (cspec->rhdr_cpu[rcd->ctxt] != cpu) {
const struct dca_reg_map *rmp;
cspec->rhdr_cpu[rcd->ctxt] = cpu;
rmp = &dca_rcvhdr_reg_map[rcd->ctxt];
cspec->dca_rcvhdr_ctrl[rmp->shadow_inx] &= rmp->mask;
cspec->dca_rcvhdr_ctrl[rmp->shadow_inx] |=
(u64) dca3_get_tag(&dd->pcidev->dev, cpu) << rmp->lsb;
qib_devinfo(dd->pcidev,
"Ctxt %d cpu %d dca %llx\n", rcd->ctxt, cpu,
(long long) cspec->dca_rcvhdr_ctrl[rmp->shadow_inx]);
qib_write_kreg(dd, rmp->regno,
cspec->dca_rcvhdr_ctrl[rmp->shadow_inx]);
cspec->dca_ctrl |= SYM_MASK(DCACtrlA, RcvHdrqDCAEnable);
qib_write_kreg(dd, KREG_IDX(DCACtrlA), cspec->dca_ctrl);
}
}
static void qib_update_sdma_dca(struct qib_pportdata *ppd, int cpu)
{
struct qib_devdata *dd = ppd->dd;
struct qib_chip_specific *cspec = dd->cspec;
unsigned pidx = ppd->port - 1;
if (!(dd->flags & QIB_DCA_ENABLED))
return;
if (cspec->sdma_cpu[pidx] != cpu) {
cspec->sdma_cpu[pidx] = cpu;
cspec->dca_rcvhdr_ctrl[4] &= ~(ppd->hw_pidx ?
SYM_MASK(DCACtrlF, SendDma1DCAOPH) :
SYM_MASK(DCACtrlF, SendDma0DCAOPH));
cspec->dca_rcvhdr_ctrl[4] |=
(u64) dca3_get_tag(&dd->pcidev->dev, cpu) <<
(ppd->hw_pidx ?
SYM_LSB(DCACtrlF, SendDma1DCAOPH) :
SYM_LSB(DCACtrlF, SendDma0DCAOPH));
qib_devinfo(dd->pcidev,
"sdma %d cpu %d dca %llx\n", ppd->hw_pidx, cpu,
(long long) cspec->dca_rcvhdr_ctrl[4]);
qib_write_kreg(dd, KREG_IDX(DCACtrlF),
cspec->dca_rcvhdr_ctrl[4]);
cspec->dca_ctrl |= ppd->hw_pidx ?
SYM_MASK(DCACtrlA, SendDMAHead1DCAEnable) :
SYM_MASK(DCACtrlA, SendDMAHead0DCAEnable);
qib_write_kreg(dd, KREG_IDX(DCACtrlA), cspec->dca_ctrl);
}
}
static void qib_setup_dca(struct qib_devdata *dd)
{
struct qib_chip_specific *cspec = dd->cspec;
int i;
for (i = 0; i < ARRAY_SIZE(cspec->rhdr_cpu); i++)
cspec->rhdr_cpu[i] = -1;
for (i = 0; i < ARRAY_SIZE(cspec->sdma_cpu); i++)
cspec->sdma_cpu[i] = -1;
cspec->dca_rcvhdr_ctrl[0] =
(1ULL << SYM_LSB(DCACtrlB, RcvHdrq0DCAXfrCnt)) |
(1ULL << SYM_LSB(DCACtrlB, RcvHdrq1DCAXfrCnt)) |
(1ULL << SYM_LSB(DCACtrlB, RcvHdrq2DCAXfrCnt)) |
(1ULL << SYM_LSB(DCACtrlB, RcvHdrq3DCAXfrCnt));
cspec->dca_rcvhdr_ctrl[1] =
(1ULL << SYM_LSB(DCACtrlC, RcvHdrq4DCAXfrCnt)) |
(1ULL << SYM_LSB(DCACtrlC, RcvHdrq5DCAXfrCnt)) |
(1ULL << SYM_LSB(DCACtrlC, RcvHdrq6DCAXfrCnt)) |
(1ULL << SYM_LSB(DCACtrlC, RcvHdrq7DCAXfrCnt));
cspec->dca_rcvhdr_ctrl[2] =
(1ULL << SYM_LSB(DCACtrlD, RcvHdrq8DCAXfrCnt)) |
(1ULL << SYM_LSB(DCACtrlD, RcvHdrq9DCAXfrCnt)) |
(1ULL << SYM_LSB(DCACtrlD, RcvHdrq10DCAXfrCnt)) |
(1ULL << SYM_LSB(DCACtrlD, RcvHdrq11DCAXfrCnt));
cspec->dca_rcvhdr_ctrl[3] =
(1ULL << SYM_LSB(DCACtrlE, RcvHdrq12DCAXfrCnt)) |
(1ULL << SYM_LSB(DCACtrlE, RcvHdrq13DCAXfrCnt)) |
(1ULL << SYM_LSB(DCACtrlE, RcvHdrq14DCAXfrCnt)) |
(1ULL << SYM_LSB(DCACtrlE, RcvHdrq15DCAXfrCnt));
cspec->dca_rcvhdr_ctrl[4] =
(1ULL << SYM_LSB(DCACtrlF, RcvHdrq16DCAXfrCnt)) |
(1ULL << SYM_LSB(DCACtrlF, RcvHdrq17DCAXfrCnt));
for (i = 0; i < ARRAY_SIZE(cspec->sdma_cpu); i++)
qib_write_kreg(dd, KREG_IDX(DCACtrlB) + i,
cspec->dca_rcvhdr_ctrl[i]);
for (i = 0; i < cspec->num_msix_entries; i++)
setup_dca_notifier(dd, &cspec->msix_entries[i]);
}
static void qib_irq_notifier_notify(struct irq_affinity_notify *notify,
const cpumask_t *mask)
{
struct qib_irq_notify *n =
container_of(notify, struct qib_irq_notify, notify);
int cpu = cpumask_first(mask);
if (n->rcv) {
struct qib_ctxtdata *rcd = (struct qib_ctxtdata *)n->arg;
qib_update_rhdrq_dca(rcd, cpu);
} else {
struct qib_pportdata *ppd = (struct qib_pportdata *)n->arg;
qib_update_sdma_dca(ppd, cpu);
}
}
static void qib_irq_notifier_release(struct kref *ref)
{
struct qib_irq_notify *n =
container_of(ref, struct qib_irq_notify, notify.kref);
struct qib_devdata *dd;
if (n->rcv) {
struct qib_ctxtdata *rcd = (struct qib_ctxtdata *)n->arg;
dd = rcd->dd;
} else {
struct qib_pportdata *ppd = (struct qib_pportdata *)n->arg;
dd = ppd->dd;
}
qib_devinfo(dd->pcidev,
"release on HCA notify 0x%p n 0x%p\n", ref, n);
kfree(n);
}
#endif
/*
* Disable MSIx interrupt if enabled, call generic MSIx code
* to cleanup, and clear pending MSIx interrupts.
* Used for fallback to INTx, after reset, and when MSIx setup fails.
*/
static void qib_7322_nomsix(struct qib_devdata *dd)
{
u64 intgranted;
int n;
dd->cspec->main_int_mask = ~0ULL;
n = dd->cspec->num_msix_entries;
if (n) {
int i;
dd->cspec->num_msix_entries = 0;
for (i = 0; i < n; i++) {
#ifdef CONFIG_INFINIBAND_QIB_DCA
reset_dca_notifier(dd, &dd->cspec->msix_entries[i]);
#endif
irq_set_affinity_hint(
dd->cspec->msix_entries[i].msix.vector, NULL);
free_cpumask_var(dd->cspec->msix_entries[i].mask);
free_irq(dd->cspec->msix_entries[i].msix.vector,
dd->cspec->msix_entries[i].arg);
}
qib_nomsix(dd);
}
/* make sure no MSIx interrupts are left pending */
intgranted = qib_read_kreg64(dd, kr_intgranted);
if (intgranted)
qib_write_kreg(dd, kr_intgranted, intgranted);
}
static void qib_7322_free_irq(struct qib_devdata *dd)
{
if (dd->cspec->irq) {
free_irq(dd->cspec->irq, dd);
dd->cspec->irq = 0;
}
qib_7322_nomsix(dd);
}
static void qib_setup_7322_cleanup(struct qib_devdata *dd)
{
int i;
#ifdef CONFIG_INFINIBAND_QIB_DCA
if (dd->flags & QIB_DCA_ENABLED) {
dca_remove_requester(&dd->pcidev->dev);
dd->flags &= ~QIB_DCA_ENABLED;
dd->cspec->dca_ctrl = 0;
qib_write_kreg(dd, KREG_IDX(DCACtrlA), dd->cspec->dca_ctrl);
}
#endif
qib_7322_free_irq(dd);
kfree(dd->cspec->cntrs);
kfree(dd->cspec->sendchkenable);
kfree(dd->cspec->sendgrhchk);
kfree(dd->cspec->sendibchk);
kfree(dd->cspec->msix_entries);
for (i = 0; i < dd->num_pports; i++) {
unsigned long flags;
u32 mask = QSFP_GPIO_MOD_PRS_N |
(QSFP_GPIO_MOD_PRS_N << QSFP_GPIO_PORT2_SHIFT);
kfree(dd->pport[i].cpspec->portcntrs);
if (dd->flags & QIB_HAS_QSFP) {
spin_lock_irqsave(&dd->cspec->gpio_lock, flags);
dd->cspec->gpio_mask &= ~mask;
qib_write_kreg(dd, kr_gpio_mask, dd->cspec->gpio_mask);
spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags);
qib_qsfp_deinit(&dd->pport[i].cpspec->qsfp_data);
}
}
}
/* handle SDMA interrupts */
static void sdma_7322_intr(struct qib_devdata *dd, u64 istat)
{
struct qib_pportdata *ppd0 = &dd->pport[0];
struct qib_pportdata *ppd1 = &dd->pport[1];
u64 intr0 = istat & (INT_MASK_P(SDma, 0) |
INT_MASK_P(SDmaIdle, 0) | INT_MASK_P(SDmaProgress, 0));
u64 intr1 = istat & (INT_MASK_P(SDma, 1) |
INT_MASK_P(SDmaIdle, 1) | INT_MASK_P(SDmaProgress, 1));
if (intr0)
qib_sdma_intr(ppd0);
if (intr1)
qib_sdma_intr(ppd1);
if (istat & INT_MASK_PM(SDmaCleanupDone, 0))
qib_sdma_process_event(ppd0, qib_sdma_event_e20_hw_started);
if (istat & INT_MASK_PM(SDmaCleanupDone, 1))
qib_sdma_process_event(ppd1, qib_sdma_event_e20_hw_started);
}
/*
* Set or clear the Send buffer available interrupt enable bit.
*/
static void qib_wantpiobuf_7322_intr(struct qib_devdata *dd, u32 needint)
{
unsigned long flags;
spin_lock_irqsave(&dd->sendctrl_lock, flags);
if (needint)
dd->sendctrl |= SYM_MASK(SendCtrl, SendIntBufAvail);
else
dd->sendctrl &= ~SYM_MASK(SendCtrl, SendIntBufAvail);
qib_write_kreg(dd, kr_sendctrl, dd->sendctrl);
qib_write_kreg(dd, kr_scratch, 0ULL);
spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
}
/*
* Somehow got an interrupt with reserved bits set in interrupt status.
* Print a message so we know it happened, then clear them.
* keep mainline interrupt handler cache-friendly
*/
static noinline void unknown_7322_ibits(struct qib_devdata *dd, u64 istat)
{
u64 kills;
char msg[128];
kills = istat & ~QIB_I_BITSEXTANT;
qib_dev_err(dd,
"Clearing reserved interrupt(s) 0x%016llx: %s\n",
(unsigned long long) kills, msg);
qib_write_kreg(dd, kr_intmask, (dd->cspec->int_enable_mask & ~kills));
}
/* keep mainline interrupt handler cache-friendly */
static noinline void unknown_7322_gpio_intr(struct qib_devdata *dd)
{
u32 gpiostatus;
int handled = 0;
int pidx;
/*
* Boards for this chip currently don't use GPIO interrupts,
* so clear by writing GPIOstatus to GPIOclear, and complain
* to developer. To avoid endless repeats, clear
* the bits in the mask, since there is some kind of
* programming error or chip problem.
*/
gpiostatus = qib_read_kreg32(dd, kr_gpio_status);
/*
* In theory, writing GPIOstatus to GPIOclear could
* have a bad side-effect on some diagnostic that wanted
* to poll for a status-change, but the various shadows
* make that problematic at best. Diags will just suppress
* all GPIO interrupts during such tests.
*/
qib_write_kreg(dd, kr_gpio_clear, gpiostatus);
/*
* Check for QSFP MOD_PRS changes
* only works for single port if IB1 != pidx1
*/
for (pidx = 0; pidx < dd->num_pports && (dd->flags & QIB_HAS_QSFP);
++pidx) {
struct qib_pportdata *ppd;
struct qib_qsfp_data *qd;
u32 mask;
if (!dd->pport[pidx].link_speed_supported)
continue;
mask = QSFP_GPIO_MOD_PRS_N;
ppd = dd->pport + pidx;
mask <<= (QSFP_GPIO_PORT2_SHIFT * ppd->hw_pidx);
if (gpiostatus & dd->cspec->gpio_mask & mask) {
u64 pins;
qd = &ppd->cpspec->qsfp_data;
gpiostatus &= ~mask;
pins = qib_read_kreg64(dd, kr_extstatus);
pins >>= SYM_LSB(EXTStatus, GPIOIn);
if (!(pins & mask)) {
++handled;
qd->t_insert = jiffies;
queue_work(ib_wq, &qd->work);
}
}
}
if (gpiostatus && !handled) {
const u32 mask = qib_read_kreg32(dd, kr_gpio_mask);
u32 gpio_irq = mask & gpiostatus;
/*
* Clear any troublemakers, and update chip from shadow
*/
dd->cspec->gpio_mask &= ~gpio_irq;
qib_write_kreg(dd, kr_gpio_mask, dd->cspec->gpio_mask);
}
}
/*
* Handle errors and unusual events first, separate function
* to improve cache hits for fast path interrupt handling.
*/
static noinline void unlikely_7322_intr(struct qib_devdata *dd, u64 istat)
{
if (istat & ~QIB_I_BITSEXTANT)
unknown_7322_ibits(dd, istat);
if (istat & QIB_I_GPIO)
unknown_7322_gpio_intr(dd);
if (istat & QIB_I_C_ERROR) {
qib_write_kreg(dd, kr_errmask, 0ULL);
tasklet_schedule(&dd->error_tasklet);
}
if (istat & INT_MASK_P(Err, 0) && dd->rcd[0])
handle_7322_p_errors(dd->rcd[0]->ppd);
if (istat & INT_MASK_P(Err, 1) && dd->rcd[1])
handle_7322_p_errors(dd->rcd[1]->ppd);
}
/*
* Dynamically adjust the rcv int timeout for a context based on incoming
* packet rate.
*/
static void adjust_rcv_timeout(struct qib_ctxtdata *rcd, int npkts)
{
struct qib_devdata *dd = rcd->dd;
u32 timeout = dd->cspec->rcvavail_timeout[rcd->ctxt];
/*
* Dynamically adjust idle timeout on chip
* based on number of packets processed.
*/
if (npkts < rcv_int_count && timeout > 2)
timeout >>= 1;
else if (npkts >= rcv_int_count && timeout < rcv_int_timeout)
timeout = min(timeout << 1, rcv_int_timeout);
else
return;
dd->cspec->rcvavail_timeout[rcd->ctxt] = timeout;
qib_write_kreg(dd, kr_rcvavailtimeout + rcd->ctxt, timeout);
}
/*
* This is the main interrupt handler.
* It will normally only be used for low frequency interrupts but may
* have to handle all interrupts if INTx is enabled or fewer than normal
* MSIx interrupts were allocated.
* This routine should ignore the interrupt bits for any of the
* dedicated MSIx handlers.
*/
static irqreturn_t qib_7322intr(int irq, void *data)
{
struct qib_devdata *dd = data;
irqreturn_t ret;
u64 istat;
u64 ctxtrbits;
u64 rmask;
unsigned i;
u32 npkts;
if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT) {
/*
* This return value is not great, but we do not want the
* interrupt core code to remove our interrupt handler
* because we don't appear to be handling an interrupt
* during a chip reset.
*/
ret = IRQ_HANDLED;
goto bail;
}
istat = qib_read_kreg64(dd, kr_intstatus);
if (unlikely(istat == ~0ULL)) {
qib_bad_intrstatus(dd);
qib_dev_err(dd, "Interrupt status all f's, skipping\n");
/* don't know if it was our interrupt or not */
ret = IRQ_NONE;
goto bail;
}
istat &= dd->cspec->main_int_mask;
if (unlikely(!istat)) {
/* already handled, or shared and not us */
ret = IRQ_NONE;
goto bail;
}
this_cpu_inc(*dd->int_counter);
/* handle "errors" of various kinds first, device ahead of port */
if (unlikely(istat & (~QIB_I_BITSEXTANT | QIB_I_GPIO |
QIB_I_C_ERROR | INT_MASK_P(Err, 0) |
INT_MASK_P(Err, 1))))
unlikely_7322_intr(dd, istat);
/*
* Clear the interrupt bits we found set, relatively early, so we
* "know" know the chip will have seen this by the time we process
* the queue, and will re-interrupt if necessary. The processor
* itself won't take the interrupt again until we return.
*/
qib_write_kreg(dd, kr_intclear, istat);
/*
* Handle kernel receive queues before checking for pio buffers
* available since receives can overflow; piobuf waiters can afford
* a few extra cycles, since they were waiting anyway.
*/
ctxtrbits = istat & (QIB_I_RCVAVAIL_MASK | QIB_I_RCVURG_MASK);
if (ctxtrbits) {
rmask = (1ULL << QIB_I_RCVAVAIL_LSB) |
(1ULL << QIB_I_RCVURG_LSB);
for (i = 0; i < dd->first_user_ctxt; i++) {
if (ctxtrbits & rmask) {
ctxtrbits &= ~rmask;
if (dd->rcd[i])
qib_kreceive(dd->rcd[i], NULL, &npkts);
}
rmask <<= 1;
}
if (ctxtrbits) {
ctxtrbits = (ctxtrbits >> QIB_I_RCVAVAIL_LSB) |
(ctxtrbits >> QIB_I_RCVURG_LSB);
qib_handle_urcv(dd, ctxtrbits);
}
}
if (istat & (QIB_I_P_SDMAINT(0) | QIB_I_P_SDMAINT(1)))
sdma_7322_intr(dd, istat);
if ((istat & QIB_I_SPIOBUFAVAIL) && (dd->flags & QIB_INITTED))
qib_ib_piobufavail(dd);
ret = IRQ_HANDLED;
bail:
return ret;
}
/*
* Dedicated receive packet available interrupt handler.
*/
static irqreturn_t qib_7322pintr(int irq, void *data)
{
struct qib_ctxtdata *rcd = data;
struct qib_devdata *dd = rcd->dd;
u32 npkts;
if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
/*
* This return value is not great, but we do not want the
* interrupt core code to remove our interrupt handler
* because we don't appear to be handling an interrupt
* during a chip reset.
*/
return IRQ_HANDLED;
this_cpu_inc(*dd->int_counter);
/* Clear the interrupt bit we expect to be set. */
qib_write_kreg(dd, kr_intclear, ((1ULL << QIB_I_RCVAVAIL_LSB) |
(1ULL << QIB_I_RCVURG_LSB)) << rcd->ctxt);
qib_kreceive(rcd, NULL, &npkts);
return IRQ_HANDLED;
}
/*
* Dedicated Send buffer available interrupt handler.
*/
static irqreturn_t qib_7322bufavail(int irq, void *data)
{
struct qib_devdata *dd = data;
if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
/*
* This return value is not great, but we do not want the
* interrupt core code to remove our interrupt handler
* because we don't appear to be handling an interrupt
* during a chip reset.
*/
return IRQ_HANDLED;
this_cpu_inc(*dd->int_counter);
/* Clear the interrupt bit we expect to be set. */
qib_write_kreg(dd, kr_intclear, QIB_I_SPIOBUFAVAIL);
/* qib_ib_piobufavail() will clear the want PIO interrupt if needed */
if (dd->flags & QIB_INITTED)
qib_ib_piobufavail(dd);
else
qib_wantpiobuf_7322_intr(dd, 0);
return IRQ_HANDLED;
}
/*
* Dedicated Send DMA interrupt handler.
*/
static irqreturn_t sdma_intr(int irq, void *data)
{
struct qib_pportdata *ppd = data;
struct qib_devdata *dd = ppd->dd;
if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
/*
* This return value is not great, but we do not want the
* interrupt core code to remove our interrupt handler
* because we don't appear to be handling an interrupt
* during a chip reset.
*/
return IRQ_HANDLED;
this_cpu_inc(*dd->int_counter);
/* Clear the interrupt bit we expect to be set. */
qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ?
INT_MASK_P(SDma, 1) : INT_MASK_P(SDma, 0));
qib_sdma_intr(ppd);
return IRQ_HANDLED;
}
/*
* Dedicated Send DMA idle interrupt handler.
*/
static irqreturn_t sdma_idle_intr(int irq, void *data)
{
struct qib_pportdata *ppd = data;
struct qib_devdata *dd = ppd->dd;
if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
/*
* This return value is not great, but we do not want the
* interrupt core code to remove our interrupt handler
* because we don't appear to be handling an interrupt
* during a chip reset.
*/
return IRQ_HANDLED;
this_cpu_inc(*dd->int_counter);
/* Clear the interrupt bit we expect to be set. */
qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ?
INT_MASK_P(SDmaIdle, 1) : INT_MASK_P(SDmaIdle, 0));
qib_sdma_intr(ppd);
return IRQ_HANDLED;
}
/*
* Dedicated Send DMA progress interrupt handler.
*/
static irqreturn_t sdma_progress_intr(int irq, void *data)
{
struct qib_pportdata *ppd = data;
struct qib_devdata *dd = ppd->dd;
if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
/*
* This return value is not great, but we do not want the
* interrupt core code to remove our interrupt handler
* because we don't appear to be handling an interrupt
* during a chip reset.
*/
return IRQ_HANDLED;
this_cpu_inc(*dd->int_counter);
/* Clear the interrupt bit we expect to be set. */
qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ?
INT_MASK_P(SDmaProgress, 1) :
INT_MASK_P(SDmaProgress, 0));
qib_sdma_intr(ppd);
return IRQ_HANDLED;
}
/*
* Dedicated Send DMA cleanup interrupt handler.
*/
static irqreturn_t sdma_cleanup_intr(int irq, void *data)
{
struct qib_pportdata *ppd = data;
struct qib_devdata *dd = ppd->dd;
if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
/*
* This return value is not great, but we do not want the
* interrupt core code to remove our interrupt handler
* because we don't appear to be handling an interrupt
* during a chip reset.
*/
return IRQ_HANDLED;
this_cpu_inc(*dd->int_counter);
/* Clear the interrupt bit we expect to be set. */
qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ?
INT_MASK_PM(SDmaCleanupDone, 1) :
INT_MASK_PM(SDmaCleanupDone, 0));
qib_sdma_process_event(ppd, qib_sdma_event_e20_hw_started);
return IRQ_HANDLED;
}
#ifdef CONFIG_INFINIBAND_QIB_DCA
static void reset_dca_notifier(struct qib_devdata *dd, struct qib_msix_entry *m)
{
if (!m->dca)
return;
qib_devinfo(dd->pcidev,
"Disabling notifier on HCA %d irq %d\n",
dd->unit,
m->msix.vector);
irq_set_affinity_notifier(
m->msix.vector,
NULL);
m->notifier = NULL;
}
static void setup_dca_notifier(struct qib_devdata *dd, struct qib_msix_entry *m)
{
struct qib_irq_notify *n;
if (!m->dca)
return;
n = kzalloc(sizeof(*n), GFP_KERNEL);
if (n) {
int ret;
m->notifier = n;
n->notify.irq = m->msix.vector;
n->notify.notify = qib_irq_notifier_notify;
n->notify.release = qib_irq_notifier_release;
n->arg = m->arg;
n->rcv = m->rcv;
qib_devinfo(dd->pcidev,
"set notifier irq %d rcv %d notify %p\n",
n->notify.irq, n->rcv, &n->notify);
ret = irq_set_affinity_notifier(
n->notify.irq,
&n->notify);
if (ret) {
m->notifier = NULL;
kfree(n);
}
}
}
#endif
/*
* Set up our chip-specific interrupt handler.
* The interrupt type has already been setup, so
* we just need to do the registration and error checking.
* If we are using MSIx interrupts, we may fall back to
* INTx later, if the interrupt handler doesn't get called
* within 1/2 second (see verify_interrupt()).
*/
static void qib_setup_7322_interrupt(struct qib_devdata *dd, int clearpend)
{
int ret, i, msixnum;
u64 redirect[6];
u64 mask;
const struct cpumask *local_mask;
int firstcpu, secondcpu = 0, currrcvcpu = 0;
if (!dd->num_pports)
return;
if (clearpend) {
/*
* if not switching interrupt types, be sure interrupts are
* disabled, and then clear anything pending at this point,
* because we are starting clean.
*/
qib_7322_set_intr_state(dd, 0);
/* clear the reset error, init error/hwerror mask */
qib_7322_init_hwerrors(dd);
/* clear any interrupt bits that might be set */
qib_write_kreg(dd, kr_intclear, ~0ULL);
/* make sure no pending MSIx intr, and clear diag reg */
qib_write_kreg(dd, kr_intgranted, ~0ULL);
qib_write_kreg(dd, kr_vecclr_wo_int, ~0ULL);
}
if (!dd->cspec->num_msix_entries) {
/* Try to get INTx interrupt */
try_intx:
if (!dd->pcidev->irq) {
qib_dev_err(dd,
"irq is 0, BIOS error? Interrupts won't work\n");
goto bail;
}
ret = request_irq(dd->pcidev->irq, qib_7322intr,
IRQF_SHARED, QIB_DRV_NAME, dd);
if (ret) {
qib_dev_err(dd,
"Couldn't setup INTx interrupt (irq=%d): %d\n",
dd->pcidev->irq, ret);
goto bail;
}
dd->cspec->irq = dd->pcidev->irq;
dd->cspec->main_int_mask = ~0ULL;
goto bail;
}
/* Try to get MSIx interrupts */
memset(redirect, 0, sizeof(redirect));
mask = ~0ULL;
msixnum = 0;
local_mask = cpumask_of_pcibus(dd->pcidev->bus);
firstcpu = cpumask_first(local_mask);
if (firstcpu >= nr_cpu_ids ||
cpumask_weight(local_mask) == num_online_cpus()) {
local_mask = topology_core_cpumask(0);
firstcpu = cpumask_first(local_mask);
}
if (firstcpu < nr_cpu_ids) {
secondcpu = cpumask_next(firstcpu, local_mask);
if (secondcpu >= nr_cpu_ids)
secondcpu = firstcpu;
currrcvcpu = secondcpu;
}
for (i = 0; msixnum < dd->cspec->num_msix_entries; i++) {
irq_handler_t handler;
void *arg;
u64 val;
int lsb, reg, sh;
#ifdef CONFIG_INFINIBAND_QIB_DCA
int dca = 0;
#endif
dd->cspec->msix_entries[msixnum].
name[sizeof(dd->cspec->msix_entries[msixnum].name) - 1]
= '\0';
if (i < ARRAY_SIZE(irq_table)) {
if (irq_table[i].port) {
/* skip if for a non-configured port */
if (irq_table[i].port > dd->num_pports)
continue;
arg = dd->pport + irq_table[i].port - 1;
} else
arg = dd;
#ifdef CONFIG_INFINIBAND_QIB_DCA
dca = irq_table[i].dca;
#endif
lsb = irq_table[i].lsb;
handler = irq_table[i].handler;
snprintf(dd->cspec->msix_entries[msixnum].name,
sizeof(dd->cspec->msix_entries[msixnum].name)
- 1,
QIB_DRV_NAME "%d%s", dd->unit,
irq_table[i].name);
} else {
unsigned ctxt;
ctxt = i - ARRAY_SIZE(irq_table);
/* per krcvq context receive interrupt */
arg = dd->rcd[ctxt];
if (!arg)
continue;
if (qib_krcvq01_no_msi && ctxt < 2)
continue;
#ifdef CONFIG_INFINIBAND_QIB_DCA
dca = 1;
#endif
lsb = QIB_I_RCVAVAIL_LSB + ctxt;
handler = qib_7322pintr;
snprintf(dd->cspec->msix_entries[msixnum].name,
sizeof(dd->cspec->msix_entries[msixnum].name)
- 1,
QIB_DRV_NAME "%d (kctx)", dd->unit);
}
ret = request_irq(
dd->cspec->msix_entries[msixnum].msix.vector,
handler, 0, dd->cspec->msix_entries[msixnum].name,
arg);
if (ret) {
/*
* Shouldn't happen since the enable said we could
* have as many as we are trying to setup here.
*/
qib_dev_err(dd,
"Couldn't setup MSIx interrupt (vec=%d, irq=%d): %d\n",
msixnum,
dd->cspec->msix_entries[msixnum].msix.vector,
ret);
qib_7322_nomsix(dd);
goto try_intx;
}
dd->cspec->msix_entries[msixnum].arg = arg;
#ifdef CONFIG_INFINIBAND_QIB_DCA
dd->cspec->msix_entries[msixnum].dca = dca;
dd->cspec->msix_entries[msixnum].rcv =
handler == qib_7322pintr;
#endif
if (lsb >= 0) {
reg = lsb / IBA7322_REDIRECT_VEC_PER_REG;
sh = (lsb % IBA7322_REDIRECT_VEC_PER_REG) *
SYM_LSB(IntRedirect0, vec1);
mask &= ~(1ULL << lsb);
redirect[reg] |= ((u64) msixnum) << sh;
}
val = qib_read_kreg64(dd, 2 * msixnum + 1 +
(QIB_7322_MsixTable_OFFS / sizeof(u64)));
if (firstcpu < nr_cpu_ids &&
zalloc_cpumask_var(
&dd->cspec->msix_entries[msixnum].mask,
GFP_KERNEL)) {
if (handler == qib_7322pintr) {
cpumask_set_cpu(currrcvcpu,
dd->cspec->msix_entries[msixnum].mask);
currrcvcpu = cpumask_next(currrcvcpu,
local_mask);
if (currrcvcpu >= nr_cpu_ids)
currrcvcpu = secondcpu;
} else {
cpumask_set_cpu(firstcpu,
dd->cspec->msix_entries[msixnum].mask);
}
irq_set_affinity_hint(
dd->cspec->msix_entries[msixnum].msix.vector,
dd->cspec->msix_entries[msixnum].mask);
}
msixnum++;
}
/* Initialize the vector mapping */
for (i = 0; i < ARRAY_SIZE(redirect); i++)
qib_write_kreg(dd, kr_intredirect + i, redirect[i]);
dd->cspec->main_int_mask = mask;
tasklet_init(&dd->error_tasklet, qib_error_tasklet,
(unsigned long)dd);
bail:;
}
/**
* qib_7322_boardname - fill in the board name and note features
* @dd: the qlogic_ib device
*
* info will be based on the board revision register
*/
static unsigned qib_7322_boardname(struct qib_devdata *dd)
{
/* Will need enumeration of board-types here */
char *n;
u32 boardid, namelen;
unsigned features = DUAL_PORT_CAP;
boardid = SYM_FIELD(dd->revision, Revision, BoardID);
switch (boardid) {
case 0:
n = "InfiniPath_QLE7342_Emulation";
break;
case 1:
n = "InfiniPath_QLE7340";
dd->flags |= QIB_HAS_QSFP;
features = PORT_SPD_CAP;
break;
case 2:
n = "InfiniPath_QLE7342";
dd->flags |= QIB_HAS_QSFP;
break;
case 3:
n = "InfiniPath_QMI7342";
break;
case 4:
n = "InfiniPath_Unsupported7342";
qib_dev_err(dd, "Unsupported version of QMH7342\n");
features = 0;
break;
case BOARD_QMH7342:
n = "InfiniPath_QMH7342";
features = 0x24;
break;
case BOARD_QME7342:
n = "InfiniPath_QME7342";
break;
case 8:
n = "InfiniPath_QME7362";
dd->flags |= QIB_HAS_QSFP;
break;
case BOARD_QMH7360:
n = "Intel IB QDR 1P FLR-QSFP Adptr";
dd->flags |= QIB_HAS_QSFP;
break;
case 15:
n = "InfiniPath_QLE7342_TEST";
dd->flags |= QIB_HAS_QSFP;
break;
default:
n = "InfiniPath_QLE73xy_UNKNOWN";
qib_dev_err(dd, "Unknown 7322 board type %u\n", boardid);
break;
}
dd->board_atten = 1; /* index into txdds_Xdr */
namelen = strlen(n) + 1;
dd->boardname = kmalloc(namelen, GFP_KERNEL);
if (!dd->boardname)
qib_dev_err(dd, "Failed allocation for board name: %s\n", n);
else
snprintf(dd->boardname, namelen, "%s", n);
snprintf(dd->boardversion, sizeof(dd->boardversion),
"ChipABI %u.%u, %s, InfiniPath%u %u.%u, SW Compat %u\n",
QIB_CHIP_VERS_MAJ, QIB_CHIP_VERS_MIN, dd->boardname,
(unsigned)SYM_FIELD(dd->revision, Revision_R, Arch),
dd->majrev, dd->minrev,
(unsigned)SYM_FIELD(dd->revision, Revision_R, SW));
if (qib_singleport && (features >> PORT_SPD_CAP_SHIFT) & PORT_SPD_CAP) {
qib_devinfo(dd->pcidev,
"IB%u: Forced to single port mode by module parameter\n",
dd->unit);
features &= PORT_SPD_CAP;
}
return features;
}
/*
* This routine sleeps, so it can only be called from user context, not
* from interrupt context.
*/
static int qib_do_7322_reset(struct qib_devdata *dd)
{
u64 val;
u64 *msix_vecsave;
int i, msix_entries, ret = 1;
u16 cmdval;
u8 int_line, clinesz;
unsigned long flags;
/* Use dev_err so it shows up in logs, etc. */
qib_dev_err(dd, "Resetting InfiniPath unit %u\n", dd->unit);
qib_pcie_getcmd(dd, &cmdval, &int_line, &clinesz);
msix_entries = dd->cspec->num_msix_entries;
/* no interrupts till re-initted */
qib_7322_set_intr_state(dd, 0);
if (msix_entries) {
qib_7322_nomsix(dd);
/* can be up to 512 bytes, too big for stack */
msix_vecsave = kmalloc(2 * dd->cspec->num_msix_entries *
sizeof(u64), GFP_KERNEL);
if (!msix_vecsave)
qib_dev_err(dd, "No mem to save MSIx data\n");
} else
msix_vecsave = NULL;
/*
* Core PCI (as of 2.6.18) doesn't save or rewrite the full vector
* info that is set up by the BIOS, so we have to save and restore
* it ourselves. There is some risk something could change it,
* after we save it, but since we have disabled the MSIx, it
* shouldn't be touched...
*/
for (i = 0; i < msix_entries; i++) {
u64 vecaddr, vecdata;
vecaddr = qib_read_kreg64(dd, 2 * i +
(QIB_7322_MsixTable_OFFS / sizeof(u64)));
vecdata = qib_read_kreg64(dd, 1 + 2 * i +
(QIB_7322_MsixTable_OFFS / sizeof(u64)));
if (msix_vecsave) {
msix_vecsave[2 * i] = vecaddr;
/* save it without the masked bit set */
msix_vecsave[1 + 2 * i] = vecdata & ~0x100000000ULL;
}
}
dd->pport->cpspec->ibdeltainprog = 0;
dd->pport->cpspec->ibsymdelta = 0;
dd->pport->cpspec->iblnkerrdelta = 0;
dd->pport->cpspec->ibmalfdelta = 0;
/* so we check interrupts work again */
dd->z_int_counter = qib_int_counter(dd);
/*
* Keep chip from being accessed until we are ready. Use
* writeq() directly, to allow the write even though QIB_PRESENT
* isn't set.
*/
dd->flags &= ~(QIB_INITTED | QIB_PRESENT | QIB_BADINTR);
dd->flags |= QIB_DOING_RESET;
val = dd->control | QLOGIC_IB_C_RESET;
writeq(val, &dd->kregbase[kr_control]);
for (i = 1; i <= 5; i++) {
/*
* Allow MBIST, etc. to complete; longer on each retry.
* We sometimes get machine checks from bus timeout if no
* response, so for now, make it *really* long.
*/
msleep(1000 + (1 + i) * 3000);
qib_pcie_reenable(dd, cmdval, int_line, clinesz);
/*
* Use readq directly, so we don't need to mark it as PRESENT
* until we get a successful indication that all is well.
*/
val = readq(&dd->kregbase[kr_revision]);
if (val == dd->revision)
break;
if (i == 5) {
qib_dev_err(dd,
"Failed to initialize after reset, unusable\n");
ret = 0;
goto bail;
}
}
dd->flags |= QIB_PRESENT; /* it's back */
if (msix_entries) {
/* restore the MSIx vector address and data if saved above */
for (i = 0; i < msix_entries; i++) {
dd->cspec->msix_entries[i].msix.entry = i;
if (!msix_vecsave || !msix_vecsave[2 * i])
continue;
qib_write_kreg(dd, 2 * i +
(QIB_7322_MsixTable_OFFS / sizeof(u64)),
msix_vecsave[2 * i]);
qib_write_kreg(dd, 1 + 2 * i +
(QIB_7322_MsixTable_OFFS / sizeof(u64)),
msix_vecsave[1 + 2 * i]);
}
}
/* initialize the remaining registers. */
for (i = 0; i < dd->num_pports; ++i)
write_7322_init_portregs(&dd->pport[i]);
write_7322_initregs(dd);
if (qib_pcie_params(dd, dd->lbus_width,
&dd->cspec->num_msix_entries,
dd->cspec->msix_entries))
qib_dev_err(dd,
"Reset failed to setup PCIe or interrupts; continuing anyway\n");
qib_setup_7322_interrupt(dd, 1);
for (i = 0; i < dd->num_pports; ++i) {
struct qib_pportdata *ppd = &dd->pport[i];
spin_lock_irqsave(&ppd->lflags_lock, flags);
ppd->lflags |= QIBL_IB_FORCE_NOTIFY;
ppd->lflags &= ~QIBL_IB_AUTONEG_FAILED;
spin_unlock_irqrestore(&ppd->lflags_lock, flags);
}
bail:
dd->flags &= ~QIB_DOING_RESET; /* OK or not, no longer resetting */
kfree(msix_vecsave);
return ret;
}
/**
* qib_7322_put_tid - write a TID to the chip
* @dd: the qlogic_ib device
* @tidptr: pointer to the expected TID (in chip) to update
* @tidtype: 0 for eager, 1 for expected
* @pa: physical address of in memory buffer; tidinvalid if freeing
*/
static void qib_7322_put_tid(struct qib_devdata *dd, u64 __iomem *tidptr,
u32 type, unsigned long pa)
{
if (!(dd->flags & QIB_PRESENT))
return;
if (pa != dd->tidinvalid) {
u64 chippa = pa >> IBA7322_TID_PA_SHIFT;
/* paranoia checks */
if (pa != (chippa << IBA7322_TID_PA_SHIFT)) {
qib_dev_err(dd, "Physaddr %lx not 2KB aligned!\n",
pa);
return;
}
if (chippa >= (1UL << IBA7322_TID_SZ_SHIFT)) {
qib_dev_err(dd,
"Physical page address 0x%lx larger than supported\n",
pa);
return;
}
if (type == RCVHQ_RCV_TYPE_EAGER)
chippa |= dd->tidtemplate;
else /* for now, always full 4KB page */
chippa |= IBA7322_TID_SZ_4K;
pa = chippa;
}
writeq(pa, tidptr);
mmiowb();
}
/**
* qib_7322_clear_tids - clear all TID entries for a ctxt, expected and eager
* @dd: the qlogic_ib device
* @ctxt: the ctxt
*
* clear all TID entries for a ctxt, expected and eager.
* Used from qib_close().
*/
static void qib_7322_clear_tids(struct qib_devdata *dd,
struct qib_ctxtdata *rcd)
{
u64 __iomem *tidbase;
unsigned long tidinv;
u32 ctxt;
int i;
if (!dd->kregbase || !rcd)
return;
ctxt = rcd->ctxt;
tidinv = dd->tidinvalid;
tidbase = (u64 __iomem *)
((char __iomem *) dd->kregbase +
dd->rcvtidbase +
ctxt * dd->rcvtidcnt * sizeof(*tidbase));
for (i = 0; i < dd->rcvtidcnt; i++)
qib_7322_put_tid(dd, &tidbase[i], RCVHQ_RCV_TYPE_EXPECTED,
tidinv);
tidbase = (u64 __iomem *)
((char __iomem *) dd->kregbase +
dd->rcvegrbase +
rcd->rcvegr_tid_base * sizeof(*tidbase));
for (i = 0; i < rcd->rcvegrcnt; i++)
qib_7322_put_tid(dd, &tidbase[i], RCVHQ_RCV_TYPE_EAGER,
tidinv);
}
/**
* qib_7322_tidtemplate - setup constants for TID updates
* @dd: the qlogic_ib device
*
* We setup stuff that we use a lot, to avoid calculating each time
*/
static void qib_7322_tidtemplate(struct qib_devdata *dd)
{
/*
* For now, we always allocate 4KB buffers (at init) so we can
* receive max size packets. We may want a module parameter to
* specify 2KB or 4KB and/or make it per port instead of per device
* for those who want to reduce memory footprint. Note that the
* rcvhdrentsize size must be large enough to hold the largest
* IB header (currently 96 bytes) that we expect to handle (plus of
* course the 2 dwords of RHF).
*/
if (dd->rcvegrbufsize == 2048)
dd->tidtemplate = IBA7322_TID_SZ_2K;
else if (dd->rcvegrbufsize == 4096)
dd->tidtemplate = IBA7322_TID_SZ_4K;
dd->tidinvalid = 0;
}
/**
* qib_init_7322_get_base_info - set chip-specific flags for user code
* @rcd: the qlogic_ib ctxt
* @kbase: qib_base_info pointer
*
* We set the PCIE flag because the lower bandwidth on PCIe vs
* HyperTransport can affect some user packet algorithims.
*/
static int qib_7322_get_base_info(struct qib_ctxtdata *rcd,
struct qib_base_info *kinfo)
{
kinfo->spi_runtime_flags |= QIB_RUNTIME_CTXT_MSB_IN_QP |
QIB_RUNTIME_PCIE | QIB_RUNTIME_NODMA_RTAIL |
QIB_RUNTIME_HDRSUPP | QIB_RUNTIME_SDMA;
if (rcd->dd->cspec->r1)
kinfo->spi_runtime_flags |= QIB_RUNTIME_RCHK;
if (rcd->dd->flags & QIB_USE_SPCL_TRIG)
kinfo->spi_runtime_flags |= QIB_RUNTIME_SPECIAL_TRIGGER;
return 0;
}
static struct qib_message_header *
qib_7322_get_msgheader(struct qib_devdata *dd, __le32 *rhf_addr)
{
u32 offset = qib_hdrget_offset(rhf_addr);
return (struct qib_message_header *)
(rhf_addr - dd->rhf_offset + offset);
}
/*
* Configure number of contexts.
*/
static void qib_7322_config_ctxts(struct qib_devdata *dd)
{
unsigned long flags;
u32 nchipctxts;
nchipctxts = qib_read_kreg32(dd, kr_contextcnt);
dd->cspec->numctxts = nchipctxts;
if (qib_n_krcv_queues > 1 && dd->num_pports) {
dd->first_user_ctxt = NUM_IB_PORTS +
(qib_n_krcv_queues - 1) * dd->num_pports;
if (dd->first_user_ctxt > nchipctxts)
dd->first_user_ctxt = nchipctxts;
dd->n_krcv_queues = dd->first_user_ctxt / dd->num_pports;
} else {
dd->first_user_ctxt = NUM_IB_PORTS;
dd->n_krcv_queues = 1;
}
if (!qib_cfgctxts) {
int nctxts = dd->first_user_ctxt + num_online_cpus();
if (nctxts <= 6)
dd->ctxtcnt = 6;
else if (nctxts <= 10)
dd->ctxtcnt = 10;
else if (nctxts <= nchipctxts)
dd->ctxtcnt = nchipctxts;
} else if (qib_cfgctxts < dd->num_pports)
dd->ctxtcnt = dd->num_pports;
else if (qib_cfgctxts <= nchipctxts)
dd->ctxtcnt = qib_cfgctxts;
if (!dd->ctxtcnt) /* none of the above, set to max */
dd->ctxtcnt = nchipctxts;
/*
* Chip can be configured for 6, 10, or 18 ctxts, and choice
* affects number of eager TIDs per ctxt (1K, 2K, 4K).
* Lock to be paranoid about later motion, etc.
*/
spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags);
if (dd->ctxtcnt > 10)
dd->rcvctrl |= 2ULL << SYM_LSB(RcvCtrl, ContextCfg);
else if (dd->ctxtcnt > 6)
dd->rcvctrl |= 1ULL << SYM_LSB(RcvCtrl, ContextCfg);
/* else configure for default 6 receive ctxts */
/* The XRC opcode is 5. */
dd->rcvctrl |= 5ULL << SYM_LSB(RcvCtrl, XrcTypeCode);
/*
* RcvCtrl *must* be written here so that the
* chip understands how to change rcvegrcnt below.
*/
qib_write_kreg(dd, kr_rcvctrl, dd->rcvctrl);
spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags);
/* kr_rcvegrcnt changes based on the number of contexts enabled */
dd->cspec->rcvegrcnt = qib_read_kreg32(dd, kr_rcvegrcnt);
if (qib_rcvhdrcnt)
dd->rcvhdrcnt = max(dd->cspec->rcvegrcnt, qib_rcvhdrcnt);
else
dd->rcvhdrcnt = 2 * max(dd->cspec->rcvegrcnt,
dd->num_pports > 1 ? 1024U : 2048U);
}
static int qib_7322_get_ib_cfg(struct qib_pportdata *ppd, int which)
{
int lsb, ret = 0;
u64 maskr; /* right-justified mask */
switch (which) {
case QIB_IB_CFG_LWID_ENB: /* Get allowed Link-width */
ret = ppd->link_width_enabled;
goto done;
case QIB_IB_CFG_LWID: /* Get currently active Link-width */
ret = ppd->link_width_active;
goto done;
case QIB_IB_CFG_SPD_ENB: /* Get allowed Link speeds */
ret = ppd->link_speed_enabled;
goto done;
case QIB_IB_CFG_SPD: /* Get current Link spd */
ret = ppd->link_speed_active;
goto done;
case QIB_IB_CFG_RXPOL_ENB: /* Get Auto-RX-polarity enable */
lsb = SYM_LSB(IBCCtrlB_0, IB_POLARITY_REV_SUPP);
maskr = SYM_RMASK(IBCCtrlB_0, IB_POLARITY_REV_SUPP);
break;
case QIB_IB_CFG_LREV_ENB: /* Get Auto-Lane-reversal enable */
lsb = SYM_LSB(IBCCtrlB_0, IB_LANE_REV_SUPPORTED);
maskr = SYM_RMASK(IBCCtrlB_0, IB_LANE_REV_SUPPORTED);
break;
case QIB_IB_CFG_LINKLATENCY:
ret = qib_read_kreg_port(ppd, krp_ibcstatus_b) &
SYM_MASK(IBCStatusB_0, LinkRoundTripLatency);
goto done;
case QIB_IB_CFG_OP_VLS:
ret = ppd->vls_operational;
goto done;
case QIB_IB_CFG_VL_HIGH_CAP:
ret = 16;
goto done;
case QIB_IB_CFG_VL_LOW_CAP:
ret = 16;
goto done;
case QIB_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
ret = SYM_FIELD(ppd->cpspec->ibcctrl_a, IBCCtrlA_0,
OverrunThreshold);
goto done;
case QIB_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
ret = SYM_FIELD(ppd->cpspec->ibcctrl_a, IBCCtrlA_0,
PhyerrThreshold);
goto done;
case QIB_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
/* will only take effect when the link state changes */
ret = (ppd->cpspec->ibcctrl_a &
SYM_MASK(IBCCtrlA_0, LinkDownDefaultState)) ?
IB_LINKINITCMD_SLEEP : IB_LINKINITCMD_POLL;
goto done;
case QIB_IB_CFG_HRTBT: /* Get Heartbeat off/enable/auto */
lsb = IBA7322_IBC_HRTBT_LSB;
maskr = IBA7322_IBC_HRTBT_RMASK; /* OR of AUTO and ENB */
break;
case QIB_IB_CFG_PMA_TICKS:
/*
* 0x00 = 10x link transfer rate or 4 nsec. for 2.5Gbs
* Since the clock is always 250MHz, the value is 3, 1 or 0.
*/
if (ppd->link_speed_active == QIB_IB_QDR)
ret = 3;
else if (ppd->link_speed_active == QIB_IB_DDR)
ret = 1;
else
ret = 0;
goto done;
default:
ret = -EINVAL;
goto done;
}
ret = (int)((ppd->cpspec->ibcctrl_b >> lsb) & maskr);
done:
return ret;
}
/*
* Below again cribbed liberally from older version. Do not lean
* heavily on it.
*/
#define IBA7322_IBC_DLIDLMC_SHIFT QIB_7322_IBCCtrlB_0_IB_DLID_LSB
#define IBA7322_IBC_DLIDLMC_MASK (QIB_7322_IBCCtrlB_0_IB_DLID_RMASK \
| (QIB_7322_IBCCtrlB_0_IB_DLID_MASK_RMASK << 16))
static int qib_7322_set_ib_cfg(struct qib_pportdata *ppd, int which, u32 val)
{
struct qib_devdata *dd = ppd->dd;
u64 maskr; /* right-justified mask */
int lsb, ret = 0;
u16 lcmd, licmd;
unsigned long flags;
switch (which) {
case QIB_IB_CFG_LIDLMC:
/*
* Set LID and LMC. Combined to avoid possible hazard
* caller puts LMC in 16MSbits, DLID in 16LSbits of val
*/
lsb = IBA7322_IBC_DLIDLMC_SHIFT;
maskr = IBA7322_IBC_DLIDLMC_MASK;
/*
* For header-checking, the SLID in the packet will
* be masked with SendIBSLMCMask, and compared
* with SendIBSLIDAssignMask. Make sure we do not
* set any bits not covered by the mask, or we get
* false-positives.
*/
qib_write_kreg_port(ppd, krp_sendslid,
val & (val >> 16) & SendIBSLIDAssignMask);
qib_write_kreg_port(ppd, krp_sendslidmask,
(val >> 16) & SendIBSLMCMask);
break;
case QIB_IB_CFG_LWID_ENB: /* set allowed Link-width */
ppd->link_width_enabled = val;
/* convert IB value to chip register value */
if (val == IB_WIDTH_1X)
val = 0;
else if (val == IB_WIDTH_4X)
val = 1;
else
val = 3;
maskr = SYM_RMASK(IBCCtrlB_0, IB_NUM_CHANNELS);
lsb = SYM_LSB(IBCCtrlB_0, IB_NUM_CHANNELS);
break;
case QIB_IB_CFG_SPD_ENB: /* set allowed Link speeds */
/*
* As with width, only write the actual register if the
* link is currently down, otherwise takes effect on next
* link change. Since setting is being explicitly requested
* (via MAD or sysfs), clear autoneg failure status if speed
* autoneg is enabled.
*/
ppd->link_speed_enabled = val;
val <<= IBA7322_IBC_SPEED_LSB;
maskr = IBA7322_IBC_SPEED_MASK | IBA7322_IBC_IBTA_1_2_MASK |
IBA7322_IBC_MAX_SPEED_MASK;
if (val & (val - 1)) {
/* Muliple speeds enabled */
val |= IBA7322_IBC_IBTA_1_2_MASK |
IBA7322_IBC_MAX_SPEED_MASK;
spin_lock_irqsave(&ppd->lflags_lock, flags);
ppd->lflags &= ~QIBL_IB_AUTONEG_FAILED;
spin_unlock_irqrestore(&ppd->lflags_lock, flags);
} else if (val & IBA7322_IBC_SPEED_QDR)
val |= IBA7322_IBC_IBTA_1_2_MASK;
/* IBTA 1.2 mode + min/max + speed bits are contiguous */
lsb = SYM_LSB(IBCCtrlB_0, IB_ENHANCED_MODE);
break;
case QIB_IB_CFG_RXPOL_ENB: /* set Auto-RX-polarity enable */
lsb = SYM_LSB(IBCCtrlB_0, IB_POLARITY_REV_SUPP);
maskr = SYM_RMASK(IBCCtrlB_0, IB_POLARITY_REV_SUPP);
break;
case QIB_IB_CFG_LREV_ENB: /* set Auto-Lane-reversal enable */
lsb = SYM_LSB(IBCCtrlB_0, IB_LANE_REV_SUPPORTED);
maskr = SYM_RMASK(IBCCtrlB_0, IB_LANE_REV_SUPPORTED);
break;
case QIB_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
maskr = SYM_FIELD(ppd->cpspec->ibcctrl_a, IBCCtrlA_0,
OverrunThreshold);
if (maskr != val) {
ppd->cpspec->ibcctrl_a &=
~SYM_MASK(IBCCtrlA_0, OverrunThreshold);
ppd->cpspec->ibcctrl_a |= (u64) val <<
SYM_LSB(IBCCtrlA_0, OverrunThreshold);
qib_write_kreg_port(ppd, krp_ibcctrl_a,
ppd->cpspec->ibcctrl_a);
qib_write_kreg(dd, kr_scratch, 0ULL);
}
goto bail;
case QIB_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
maskr = SYM_FIELD(ppd->cpspec->ibcctrl_a, IBCCtrlA_0,
PhyerrThreshold);
if (maskr != val) {
ppd->cpspec->ibcctrl_a &=
~SYM_MASK(IBCCtrlA_0, PhyerrThreshold);
ppd->cpspec->ibcctrl_a |= (u64) val <<
SYM_LSB(IBCCtrlA_0, PhyerrThreshold);
qib_write_kreg_port(ppd, krp_ibcctrl_a,
ppd->cpspec->ibcctrl_a);
qib_write_kreg(dd, kr_scratch, 0ULL);
}
goto bail;
case QIB_IB_CFG_PKEYS: /* update pkeys */
maskr = (u64) ppd->pkeys[0] | ((u64) ppd->pkeys[1] << 16) |
((u64) ppd->pkeys[2] << 32) |
((u64) ppd->pkeys[3] << 48);
qib_write_kreg_port(ppd, krp_partitionkey, maskr);
goto bail;
case QIB_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
/* will only take effect when the link state changes */
if (val == IB_LINKINITCMD_POLL)
ppd->cpspec->ibcctrl_a &=
~SYM_MASK(IBCCtrlA_0, LinkDownDefaultState);
else /* SLEEP */
ppd->cpspec->ibcctrl_a |=
SYM_MASK(IBCCtrlA_0, LinkDownDefaultState);
qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a);
qib_write_kreg(dd, kr_scratch, 0ULL);
goto bail;
case QIB_IB_CFG_MTU: /* update the MTU in IBC */
/*
* Update our housekeeping variables, and set IBC max
* size, same as init code; max IBC is max we allow in
* buffer, less the qword pbc, plus 1 for ICRC, in dwords
* Set even if it's unchanged, print debug message only
* on changes.
*/
val = (ppd->ibmaxlen >> 2) + 1;
ppd->cpspec->ibcctrl_a &= ~SYM_MASK(IBCCtrlA_0, MaxPktLen);
ppd->cpspec->ibcctrl_a |= (u64)val <<
SYM_LSB(IBCCtrlA_0, MaxPktLen);
qib_write_kreg_port(ppd, krp_ibcctrl_a,
ppd->cpspec->ibcctrl_a);
qib_write_kreg(dd, kr_scratch, 0ULL);
goto bail;
case QIB_IB_CFG_LSTATE: /* set the IB link state */
switch (val & 0xffff0000) {
case IB_LINKCMD_DOWN:
lcmd = QLOGIC_IB_IBCC_LINKCMD_DOWN;
ppd->cpspec->ibmalfusesnap = 1;
ppd->cpspec->ibmalfsnap = read_7322_creg32_port(ppd,
crp_errlink);
if (!ppd->cpspec->ibdeltainprog &&
qib_compat_ddr_negotiate) {
ppd->cpspec->ibdeltainprog = 1;
ppd->cpspec->ibsymsnap =
read_7322_creg32_port(ppd,
crp_ibsymbolerr);
ppd->cpspec->iblnkerrsnap =
read_7322_creg32_port(ppd,
crp_iblinkerrrecov);
}
break;
case IB_LINKCMD_ARMED:
lcmd = QLOGIC_IB_IBCC_LINKCMD_ARMED;
if (ppd->cpspec->ibmalfusesnap) {
ppd->cpspec->ibmalfusesnap = 0;
ppd->cpspec->ibmalfdelta +=
read_7322_creg32_port(ppd,
crp_errlink) -
ppd->cpspec->ibmalfsnap;
}
break;
case IB_LINKCMD_ACTIVE:
lcmd = QLOGIC_IB_IBCC_LINKCMD_ACTIVE;
break;
default:
ret = -EINVAL;
qib_dev_err(dd, "bad linkcmd req 0x%x\n", val >> 16);
goto bail;
}
switch (val & 0xffff) {
case IB_LINKINITCMD_NOP:
licmd = 0;
break;
case IB_LINKINITCMD_POLL:
licmd = QLOGIC_IB_IBCC_LINKINITCMD_POLL;
break;
case IB_LINKINITCMD_SLEEP:
licmd = QLOGIC_IB_IBCC_LINKINITCMD_SLEEP;
break;
case IB_LINKINITCMD_DISABLE:
licmd = QLOGIC_IB_IBCC_LINKINITCMD_DISABLE;
ppd->cpspec->chase_end = 0;
/*
* stop state chase counter and timer, if running.
* wait forpending timer, but don't clear .data (ppd)!
*/
if (ppd->cpspec->chase_timer.expires) {
del_timer_sync(&ppd->cpspec->chase_timer);
ppd->cpspec->chase_timer.expires = 0;
}
break;
default:
ret = -EINVAL;
qib_dev_err(dd, "bad linkinitcmd req 0x%x\n",
val & 0xffff);
goto bail;
}
qib_set_ib_7322_lstate(ppd, lcmd, licmd);
goto bail;
case QIB_IB_CFG_OP_VLS:
if (ppd->vls_operational != val) {
ppd->vls_operational = val;
set_vls(ppd);
}
goto bail;
case QIB_IB_CFG_VL_HIGH_LIMIT:
qib_write_kreg_port(ppd, krp_highprio_limit, val);
goto bail;
case QIB_IB_CFG_HRTBT: /* set Heartbeat off/enable/auto */
if (val > 3) {
ret = -EINVAL;
goto bail;
}
lsb = IBA7322_IBC_HRTBT_LSB;
maskr = IBA7322_IBC_HRTBT_RMASK; /* OR of AUTO and ENB */
break;
case QIB_IB_CFG_PORT:
/* val is the port number of the switch we are connected to. */
if (ppd->dd->cspec->r1) {
cancel_delayed_work(&ppd->cpspec->ipg_work);
ppd->cpspec->ipg_tries = 0;
}
goto bail;
default:
ret = -EINVAL;
goto bail;
}
ppd->cpspec->ibcctrl_b &= ~(maskr << lsb);
ppd->cpspec->ibcctrl_b |= (((u64) val & maskr) << lsb);
qib_write_kreg_port(ppd, krp_ibcctrl_b, ppd->cpspec->ibcctrl_b);
qib_write_kreg(dd, kr_scratch, 0);
bail:
return ret;
}
static int qib_7322_set_loopback(struct qib_pportdata *ppd, const char *what)
{
int ret = 0;
u64 val, ctrlb;
/* only IBC loopback, may add serdes and xgxs loopbacks later */
if (!strncmp(what, "ibc", 3)) {
ppd->cpspec->ibcctrl_a |= SYM_MASK(IBCCtrlA_0,
Loopback);
val = 0; /* disable heart beat, so link will come up */
qib_devinfo(ppd->dd->pcidev, "Enabling IB%u:%u IBC loopback\n",
ppd->dd->unit, ppd->port);
} else if (!strncmp(what, "off", 3)) {
ppd->cpspec->ibcctrl_a &= ~SYM_MASK(IBCCtrlA_0,
Loopback);
/* enable heart beat again */
val = IBA7322_IBC_HRTBT_RMASK << IBA7322_IBC_HRTBT_LSB;
qib_devinfo(ppd->dd->pcidev,
"Disabling IB%u:%u IBC loopback (normal)\n",
ppd->dd->unit, ppd->port);
} else
ret = -EINVAL;
if (!ret) {
qib_write_kreg_port(ppd, krp_ibcctrl_a,
ppd->cpspec->ibcctrl_a);
ctrlb = ppd->cpspec->ibcctrl_b & ~(IBA7322_IBC_HRTBT_MASK
<< IBA7322_IBC_HRTBT_LSB);
ppd->cpspec->ibcctrl_b = ctrlb | val;
qib_write_kreg_port(ppd, krp_ibcctrl_b,
ppd->cpspec->ibcctrl_b);
qib_write_kreg(ppd->dd, kr_scratch, 0);
}
return ret;
}
static void get_vl_weights(struct qib_pportdata *ppd, unsigned regno,
struct ib_vl_weight_elem *vl)
{
unsigned i;
for (i = 0; i < 16; i++, regno++, vl++) {
u32 val = qib_read_kreg_port(ppd, regno);
vl->vl = (val >> SYM_LSB(LowPriority0_0, VirtualLane)) &
SYM_RMASK(LowPriority0_0, VirtualLane);
vl->weight = (val >> SYM_LSB(LowPriority0_0, Weight)) &
SYM_RMASK(LowPriority0_0, Weight);
}
}
static void set_vl_weights(struct qib_pportdata *ppd, unsigned regno,
struct ib_vl_weight_elem *vl)
{
unsigned i;
for (i = 0; i < 16; i++, regno++, vl++) {
u64 val;
val = ((vl->vl & SYM_RMASK(LowPriority0_0, VirtualLane)) <<
SYM_LSB(LowPriority0_0, VirtualLane)) |
((vl->weight & SYM_RMASK(LowPriority0_0, Weight)) <<
SYM_LSB(LowPriority0_0, Weight));
qib_write_kreg_port(ppd, regno, val);
}
if (!(ppd->p_sendctrl & SYM_MASK(SendCtrl_0, IBVLArbiterEn))) {
struct qib_devdata *dd = ppd->dd;
unsigned long flags;
spin_lock_irqsave(&dd->sendctrl_lock, flags);
ppd->p_sendctrl |= SYM_MASK(SendCtrl_0, IBVLArbiterEn);
qib_write_kreg_port(ppd, krp_sendctrl, ppd->p_sendctrl);
qib_write_kreg(dd, kr_scratch, 0);
spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
}
}
static int qib_7322_get_ib_table(struct qib_pportdata *ppd, int which, void *t)
{
switch (which) {
case QIB_IB_TBL_VL_HIGH_ARB:
get_vl_weights(ppd, krp_highprio_0, t);
break;
case QIB_IB_TBL_VL_LOW_ARB:
get_vl_weights(ppd, krp_lowprio_0, t);
break;
default:
return -EINVAL;
}
return 0;
}
static int qib_7322_set_ib_table(struct qib_pportdata *ppd, int which, void *t)
{
switch (which) {
case QIB_IB_TBL_VL_HIGH_ARB:
set_vl_weights(ppd, krp_highprio_0, t);
break;
case QIB_IB_TBL_VL_LOW_ARB:
set_vl_weights(ppd, krp_lowprio_0, t);
break;
default:
return -EINVAL;
}
return 0;
}
static void qib_update_7322_usrhead(struct qib_ctxtdata *rcd, u64 hd,
u32 updegr, u32 egrhd, u32 npkts)
{
/*
* Need to write timeout register before updating rcvhdrhead to ensure
* that the timer is enabled on reception of a packet.
*/
if (hd >> IBA7322_HDRHEAD_PKTINT_SHIFT)
adjust_rcv_timeout(rcd, npkts);
if (updegr)
qib_write_ureg(rcd->dd, ur_rcvegrindexhead, egrhd, rcd->ctxt);
mmiowb();
qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt);
qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt);
mmiowb();
}
static u32 qib_7322_hdrqempty(struct qib_ctxtdata *rcd)
{
u32 head, tail;
head = qib_read_ureg32(rcd->dd, ur_rcvhdrhead, rcd->ctxt);
if (rcd->rcvhdrtail_kvaddr)
tail = qib_get_rcvhdrtail(rcd);
else
tail = qib_read_ureg32(rcd->dd, ur_rcvhdrtail, rcd->ctxt);
return head == tail;
}
#define RCVCTRL_COMMON_MODS (QIB_RCVCTRL_CTXT_ENB | \
QIB_RCVCTRL_CTXT_DIS | \
QIB_RCVCTRL_TIDFLOW_ENB | \
QIB_RCVCTRL_TIDFLOW_DIS | \
QIB_RCVCTRL_TAILUPD_ENB | \
QIB_RCVCTRL_TAILUPD_DIS | \
QIB_RCVCTRL_INTRAVAIL_ENB | \
QIB_RCVCTRL_INTRAVAIL_DIS | \
QIB_RCVCTRL_BP_ENB | \
QIB_RCVCTRL_BP_DIS)
#define RCVCTRL_PORT_MODS (QIB_RCVCTRL_CTXT_ENB | \
QIB_RCVCTRL_CTXT_DIS | \
QIB_RCVCTRL_PKEY_DIS | \
QIB_RCVCTRL_PKEY_ENB)
/*
* Modify the RCVCTRL register in chip-specific way. This
* is a function because bit positions and (future) register
* location is chip-specifc, but the needed operations are
* generic. <op> is a bit-mask because we often want to
* do multiple modifications.
*/
static void rcvctrl_7322_mod(struct qib_pportdata *ppd, unsigned int op,
int ctxt)
{
struct qib_devdata *dd = ppd->dd;
struct qib_ctxtdata *rcd;
u64 mask, val;
unsigned long flags;
spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags);
if (op & QIB_RCVCTRL_TIDFLOW_ENB)
dd->rcvctrl |= SYM_MASK(RcvCtrl, TidFlowEnable);
if (op & QIB_RCVCTRL_TIDFLOW_DIS)
dd->rcvctrl &= ~SYM_MASK(RcvCtrl, TidFlowEnable);
if (op & QIB_RCVCTRL_TAILUPD_ENB)
dd->rcvctrl |= SYM_MASK(RcvCtrl, TailUpd);
if (op & QIB_RCVCTRL_TAILUPD_DIS)
dd->rcvctrl &= ~SYM_MASK(RcvCtrl, TailUpd);
if (op & QIB_RCVCTRL_PKEY_ENB)
ppd->p_rcvctrl &= ~SYM_MASK(RcvCtrl_0, RcvPartitionKeyDisable);
if (op & QIB_RCVCTRL_PKEY_DIS)
ppd->p_rcvctrl |= SYM_MASK(RcvCtrl_0, RcvPartitionKeyDisable);
if (ctxt < 0) {
mask = (1ULL << dd->ctxtcnt) - 1;
rcd = NULL;
} else {
mask = (1ULL << ctxt);
rcd = dd->rcd[ctxt];
}
if ((op & QIB_RCVCTRL_CTXT_ENB) && rcd) {
ppd->p_rcvctrl |=
(mask << SYM_LSB(RcvCtrl_0, ContextEnableKernel));
if (!(dd->flags & QIB_NODMA_RTAIL)) {
op |= QIB_RCVCTRL_TAILUPD_ENB; /* need reg write */
dd->rcvctrl |= SYM_MASK(RcvCtrl, TailUpd);
}
/* Write these registers before the context is enabled. */
qib_write_kreg_ctxt(dd, krc_rcvhdrtailaddr, ctxt,
rcd->rcvhdrqtailaddr_phys);
qib_write_kreg_ctxt(dd, krc_rcvhdraddr, ctxt,
rcd->rcvhdrq_phys);
rcd->seq_cnt = 1;
}
if (op & QIB_RCVCTRL_CTXT_DIS)
ppd->p_rcvctrl &=
~(mask << SYM_LSB(RcvCtrl_0, ContextEnableKernel));
if (op & QIB_RCVCTRL_BP_ENB)
dd->rcvctrl |= mask << SYM_LSB(RcvCtrl, dontDropRHQFull);
if (op & QIB_RCVCTRL_BP_DIS)
dd->rcvctrl &= ~(mask << SYM_LSB(RcvCtrl, dontDropRHQFull));
if (op & QIB_RCVCTRL_INTRAVAIL_ENB)
dd->rcvctrl |= (mask << SYM_LSB(RcvCtrl, IntrAvail));
if (op & QIB_RCVCTRL_INTRAVAIL_DIS)
dd->rcvctrl &= ~(mask << SYM_LSB(RcvCtrl, IntrAvail));
/*
* Decide which registers to write depending on the ops enabled.
* Special case is "flush" (no bits set at all)
* which needs to write both.
*/
if (op == 0 || (op & RCVCTRL_COMMON_MODS))
qib_write_kreg(dd, kr_rcvctrl, dd->rcvctrl);
if (op == 0 || (op & RCVCTRL_PORT_MODS))
qib_write_kreg_port(ppd, krp_rcvctrl, ppd->p_rcvctrl);
if ((op & QIB_RCVCTRL_CTXT_ENB) && dd->rcd[ctxt]) {
/*
* Init the context registers also; if we were
* disabled, tail and head should both be zero
* already from the enable, but since we don't
* know, we have to do it explicitly.
*/
val = qib_read_ureg32(dd, ur_rcvegrindextail, ctxt);
qib_write_ureg(dd, ur_rcvegrindexhead, val, ctxt);
/* be sure enabling write seen; hd/tl should be 0 */
(void) qib_read_kreg32(dd, kr_scratch);
val = qib_read_ureg32(dd, ur_rcvhdrtail, ctxt);
dd->rcd[ctxt]->head = val;
/* If kctxt, interrupt on next receive. */
if (ctxt < dd->first_user_ctxt)
val |= dd->rhdrhead_intr_off;
qib_write_ureg(dd, ur_rcvhdrhead, val, ctxt);
} else if ((op & QIB_RCVCTRL_INTRAVAIL_ENB) &&
dd->rcd[ctxt] && dd->rhdrhead_intr_off) {
/* arm rcv interrupt */
val = dd->rcd[ctxt]->head | dd->rhdrhead_intr_off;
qib_write_ureg(dd, ur_rcvhdrhead, val, ctxt);
}
if (op & QIB_RCVCTRL_CTXT_DIS) {
unsigned f;
/* Now that the context is disabled, clear these registers. */
if (ctxt >= 0) {
qib_write_kreg_ctxt(dd, krc_rcvhdrtailaddr, ctxt, 0);
qib_write_kreg_ctxt(dd, krc_rcvhdraddr, ctxt, 0);
for (f = 0; f < NUM_TIDFLOWS_CTXT; f++)
qib_write_ureg(dd, ur_rcvflowtable + f,
TIDFLOW_ERRBITS, ctxt);
} else {
unsigned i;
for (i = 0; i < dd->cfgctxts; i++) {
qib_write_kreg_ctxt(dd, krc_rcvhdrtailaddr,
i, 0);
qib_write_kreg_ctxt(dd, krc_rcvhdraddr, i, 0);
for (f = 0; f < NUM_TIDFLOWS_CTXT; f++)
qib_write_ureg(dd, ur_rcvflowtable + f,
TIDFLOW_ERRBITS, i);
}
}
}
spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags);
}
/*
* Modify the SENDCTRL register in chip-specific way. This
* is a function where there are multiple such registers with
* slightly different layouts.
* The chip doesn't allow back-to-back sendctrl writes, so write
* the scratch register after writing sendctrl.
*
* Which register is written depends on the operation.
* Most operate on the common register, while
* SEND_ENB and SEND_DIS operate on the per-port ones.
* SEND_ENB is included in common because it can change SPCL_TRIG
*/
#define SENDCTRL_COMMON_MODS (\
QIB_SENDCTRL_CLEAR | \
QIB_SENDCTRL_AVAIL_DIS | \
QIB_SENDCTRL_AVAIL_ENB | \
QIB_SENDCTRL_AVAIL_BLIP | \
QIB_SENDCTRL_DISARM | \
QIB_SENDCTRL_DISARM_ALL | \
QIB_SENDCTRL_SEND_ENB)
#define SENDCTRL_PORT_MODS (\
QIB_SENDCTRL_CLEAR | \
QIB_SENDCTRL_SEND_ENB | \
QIB_SENDCTRL_SEND_DIS | \
QIB_SENDCTRL_FLUSH)
static void sendctrl_7322_mod(struct qib_pportdata *ppd, u32 op)
{
struct qib_devdata *dd = ppd->dd;
u64 tmp_dd_sendctrl;
unsigned long flags;
spin_lock_irqsave(&dd->sendctrl_lock, flags);
/* First the dd ones that are "sticky", saved in shadow */
if (op & QIB_SENDCTRL_CLEAR)
dd->sendctrl = 0;
if (op & QIB_SENDCTRL_AVAIL_DIS)
dd->sendctrl &= ~SYM_MASK(SendCtrl, SendBufAvailUpd);
else if (op & QIB_SENDCTRL_AVAIL_ENB) {
dd->sendctrl |= SYM_MASK(SendCtrl, SendBufAvailUpd);
if (dd->flags & QIB_USE_SPCL_TRIG)
dd->sendctrl |= SYM_MASK(SendCtrl, SpecialTriggerEn);
}
/* Then the ppd ones that are "sticky", saved in shadow */
if (op & QIB_SENDCTRL_SEND_DIS)
ppd->p_sendctrl &= ~SYM_MASK(SendCtrl_0, SendEnable);
else if (op & QIB_SENDCTRL_SEND_ENB)
ppd->p_sendctrl |= SYM_MASK(SendCtrl_0, SendEnable);
if (op & QIB_SENDCTRL_DISARM_ALL) {
u32 i, last;
tmp_dd_sendctrl = dd->sendctrl;
last = dd->piobcnt2k + dd->piobcnt4k + NUM_VL15_BUFS;
/*
* Disarm any buffers that are not yet launched,
* disabling updates until done.
*/
tmp_dd_sendctrl &= ~SYM_MASK(SendCtrl, SendBufAvailUpd);
for (i = 0; i < last; i++) {
qib_write_kreg(dd, kr_sendctrl,
tmp_dd_sendctrl |
SYM_MASK(SendCtrl, Disarm) | i);
qib_write_kreg(dd, kr_scratch, 0);
}
}
if (op & QIB_SENDCTRL_FLUSH) {
u64 tmp_ppd_sendctrl = ppd->p_sendctrl;
/*
* Now drain all the fifos. The Abort bit should never be
* needed, so for now, at least, we don't use it.
*/
tmp_ppd_sendctrl |=
SYM_MASK(SendCtrl_0, TxeDrainRmFifo) |
SYM_MASK(SendCtrl_0, TxeDrainLaFifo) |
SYM_MASK(SendCtrl_0, TxeBypassIbc);
qib_write_kreg_port(ppd, krp_sendctrl, tmp_ppd_sendctrl);
qib_write_kreg(dd, kr_scratch, 0);
}
tmp_dd_sendctrl = dd->sendctrl;
if (op & QIB_SENDCTRL_DISARM)
tmp_dd_sendctrl |= SYM_MASK(SendCtrl, Disarm) |
((op & QIB_7322_SendCtrl_DisarmSendBuf_RMASK) <<
SYM_LSB(SendCtrl, DisarmSendBuf));
if ((op & QIB_SENDCTRL_AVAIL_BLIP) &&
(dd->sendctrl & SYM_MASK(SendCtrl, SendBufAvailUpd)))
tmp_dd_sendctrl &= ~SYM_MASK(SendCtrl, SendBufAvailUpd);
if (op == 0 || (op & SENDCTRL_COMMON_MODS)) {
qib_write_kreg(dd, kr_sendctrl, tmp_dd_sendctrl);
qib_write_kreg(dd, kr_scratch, 0);
}
if (op == 0 || (op & SENDCTRL_PORT_MODS)) {
qib_write_kreg_port(ppd, krp_sendctrl, ppd->p_sendctrl);
qib_write_kreg(dd, kr_scratch, 0);
}
if (op & QIB_SENDCTRL_AVAIL_BLIP) {
qib_write_kreg(dd, kr_sendctrl, dd->sendctrl);
qib_write_kreg(dd, kr_scratch, 0);
}
spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
if (op & QIB_SENDCTRL_FLUSH) {
u32 v;
/*
* ensure writes have hit chip, then do a few
* more reads, to allow DMA of pioavail registers
* to occur, so in-memory copy is in sync with
* the chip. Not always safe to sleep.
*/
v = qib_read_kreg32(dd, kr_scratch);
qib_write_kreg(dd, kr_scratch, v);
v = qib_read_kreg32(dd, kr_scratch);
qib_write_kreg(dd, kr_scratch, v);
qib_read_kreg32(dd, kr_scratch);
}
}
#define _PORT_VIRT_FLAG 0x8000U /* "virtual", need adjustments */
#define _PORT_64BIT_FLAG 0x10000U /* not "virtual", but 64bit */
#define _PORT_CNTR_IDXMASK 0x7fffU /* mask off flags above */
/**
* qib_portcntr_7322 - read a per-port chip counter
* @ppd: the qlogic_ib pport
* @creg: the counter to read (not a chip offset)
*/
static u64 qib_portcntr_7322(struct qib_pportdata *ppd, u32 reg)
{
struct qib_devdata *dd = ppd->dd;
u64 ret = 0ULL;
u16 creg;
/* 0xffff for unimplemented or synthesized counters */
static const u32 xlator[] = {
[QIBPORTCNTR_PKTSEND] = crp_pktsend | _PORT_64BIT_FLAG,
[QIBPORTCNTR_WORDSEND] = crp_wordsend | _PORT_64BIT_FLAG,
[QIBPORTCNTR_PSXMITDATA] = crp_psxmitdatacount,
[QIBPORTCNTR_PSXMITPKTS] = crp_psxmitpktscount,
[QIBPORTCNTR_PSXMITWAIT] = crp_psxmitwaitcount,
[QIBPORTCNTR_SENDSTALL] = crp_sendstall,
[QIBPORTCNTR_PKTRCV] = crp_pktrcv | _PORT_64BIT_FLAG,
[QIBPORTCNTR_PSRCVDATA] = crp_psrcvdatacount,
[QIBPORTCNTR_PSRCVPKTS] = crp_psrcvpktscount,
[QIBPORTCNTR_RCVEBP] = crp_rcvebp,
[QIBPORTCNTR_RCVOVFL] = crp_rcvovfl,
[QIBPORTCNTR_WORDRCV] = crp_wordrcv | _PORT_64BIT_FLAG,
[QIBPORTCNTR_RXDROPPKT] = 0xffff, /* not needed for 7322 */
[QIBPORTCNTR_RXLOCALPHYERR] = crp_rxotherlocalphyerr,
[QIBPORTCNTR_RXVLERR] = crp_rxvlerr,
[QIBPORTCNTR_ERRICRC] = crp_erricrc,
[QIBPORTCNTR_ERRVCRC] = crp_errvcrc,
[QIBPORTCNTR_ERRLPCRC] = crp_errlpcrc,
[QIBPORTCNTR_BADFORMAT] = crp_badformat,
[QIBPORTCNTR_ERR_RLEN] = crp_err_rlen,
[QIBPORTCNTR_IBSYMBOLERR] = crp_ibsymbolerr,
[QIBPORTCNTR_INVALIDRLEN] = crp_invalidrlen,
[QIBPORTCNTR_UNSUPVL] = crp_txunsupvl,
[QIBPORTCNTR_EXCESSBUFOVFL] = crp_excessbufferovfl,
[QIBPORTCNTR_ERRLINK] = crp_errlink,
[QIBPORTCNTR_IBLINKDOWN] = crp_iblinkdown,
[QIBPORTCNTR_IBLINKERRRECOV] = crp_iblinkerrrecov,
[QIBPORTCNTR_LLI] = crp_locallinkintegrityerr,
[QIBPORTCNTR_VL15PKTDROP] = crp_vl15droppedpkt,
[QIBPORTCNTR_ERRPKEY] = crp_errpkey,
/*
* the next 3 aren't really counters, but were implemented
* as counters in older chips, so still get accessed as
* though they were counters from this code.
*/
[QIBPORTCNTR_PSINTERVAL] = krp_psinterval,
[QIBPORTCNTR_PSSTART] = krp_psstart,
[QIBPORTCNTR_PSSTAT] = krp_psstat,
/* pseudo-counter, summed for all ports */
[QIBPORTCNTR_KHDROVFL] = 0xffff,
};
if (reg >= ARRAY_SIZE(xlator)) {
qib_devinfo(ppd->dd->pcidev,
"Unimplemented portcounter %u\n", reg);
goto done;
}
creg = xlator[reg] & _PORT_CNTR_IDXMASK;
/* handle non-counters and special cases first */
if (reg == QIBPORTCNTR_KHDROVFL) {
int i;
/* sum over all kernel contexts (skip if mini_init) */
for (i = 0; dd->rcd && i < dd->first_user_ctxt; i++) {
struct qib_ctxtdata *rcd = dd->rcd[i];
if (!rcd || rcd->ppd != ppd)
continue;
ret += read_7322_creg32(dd, cr_base_egrovfl + i);
}
goto done;
} else if (reg == QIBPORTCNTR_RXDROPPKT) {
/*
* Used as part of the synthesis of port_rcv_errors
* in the verbs code for IBTA counters. Not needed for 7322,
* because all the errors are already counted by other cntrs.
*/
goto done;
} else if (reg == QIBPORTCNTR_PSINTERVAL ||
reg == QIBPORTCNTR_PSSTART || reg == QIBPORTCNTR_PSSTAT) {
/* were counters in older chips, now per-port kernel regs */
ret = qib_read_kreg_port(ppd, creg);
goto done;
}
/*
* Only fast increment counters are 64 bits; use 32 bit reads to
* avoid two independent reads when on Opteron.
*/
if (xlator[reg] & _PORT_64BIT_FLAG)
ret = read_7322_creg_port(ppd, creg);
else
ret = read_7322_creg32_port(ppd, creg);
if (creg == crp_ibsymbolerr) {
if (ppd->cpspec->ibdeltainprog)
ret -= ret - ppd->cpspec->ibsymsnap;
ret -= ppd->cpspec->ibsymdelta;
} else if (creg == crp_iblinkerrrecov) {
if (ppd->cpspec->ibdeltainprog)
ret -= ret - ppd->cpspec->iblnkerrsnap;
ret -= ppd->cpspec->iblnkerrdelta;
} else if (creg == crp_errlink)
ret -= ppd->cpspec->ibmalfdelta;
else if (creg == crp_iblinkdown)
ret += ppd->cpspec->iblnkdowndelta;
done:
return ret;
}
/*
* Device counter names (not port-specific), one line per stat,
* single string. Used by utilities like ipathstats to print the stats
* in a way which works for different versions of drivers, without changing
* the utility. Names need to be 12 chars or less (w/o newline), for proper
* display by utility.
* Non-error counters are first.
* Start of "error" conters is indicated by a leading "E " on the first
* "error" counter, and doesn't count in label length.
* The EgrOvfl list needs to be last so we truncate them at the configured
* context count for the device.
* cntr7322indices contains the corresponding register indices.
*/
static const char cntr7322names[] =
"Interrupts\n"
"HostBusStall\n"
"E RxTIDFull\n"
"RxTIDInvalid\n"
"RxTIDFloDrop\n" /* 7322 only */
"Ctxt0EgrOvfl\n"
"Ctxt1EgrOvfl\n"
"Ctxt2EgrOvfl\n"
"Ctxt3EgrOvfl\n"
"Ctxt4EgrOvfl\n"
"Ctxt5EgrOvfl\n"
"Ctxt6EgrOvfl\n"
"Ctxt7EgrOvfl\n"
"Ctxt8EgrOvfl\n"
"Ctxt9EgrOvfl\n"
"Ctx10EgrOvfl\n"
"Ctx11EgrOvfl\n"
"Ctx12EgrOvfl\n"
"Ctx13EgrOvfl\n"
"Ctx14EgrOvfl\n"
"Ctx15EgrOvfl\n"
"Ctx16EgrOvfl\n"
"Ctx17EgrOvfl\n"
;
static const u32 cntr7322indices[] = {
cr_lbint | _PORT_64BIT_FLAG,
cr_lbstall | _PORT_64BIT_FLAG,
cr_tidfull,
cr_tidinvalid,
cr_rxtidflowdrop,
cr_base_egrovfl + 0,
cr_base_egrovfl + 1,
cr_base_egrovfl + 2,
cr_base_egrovfl + 3,
cr_base_egrovfl + 4,
cr_base_egrovfl + 5,
cr_base_egrovfl + 6,
cr_base_egrovfl + 7,
cr_base_egrovfl + 8,
cr_base_egrovfl + 9,
cr_base_egrovfl + 10,
cr_base_egrovfl + 11,
cr_base_egrovfl + 12,
cr_base_egrovfl + 13,
cr_base_egrovfl + 14,
cr_base_egrovfl + 15,
cr_base_egrovfl + 16,
cr_base_egrovfl + 17,
};
/*
* same as cntr7322names and cntr7322indices, but for port-specific counters.
* portcntr7322indices is somewhat complicated by some registers needing
* adjustments of various kinds, and those are ORed with _PORT_VIRT_FLAG
*/
static const char portcntr7322names[] =
"TxPkt\n"
"TxFlowPkt\n"
"TxWords\n"
"RxPkt\n"
"RxFlowPkt\n"
"RxWords\n"
"TxFlowStall\n"
"TxDmaDesc\n" /* 7220 and 7322-only */
"E RxDlidFltr\n" /* 7220 and 7322-only */
"IBStatusChng\n"
"IBLinkDown\n"
"IBLnkRecov\n"
"IBRxLinkErr\n"
"IBSymbolErr\n"
"RxLLIErr\n"
"RxBadFormat\n"
"RxBadLen\n"
"RxBufOvrfl\n"
"RxEBP\n"
"RxFlowCtlErr\n"
"RxICRCerr\n"
"RxLPCRCerr\n"
"RxVCRCerr\n"
"RxInvalLen\n"
"RxInvalPKey\n"
"RxPktDropped\n"
"TxBadLength\n"
"TxDropped\n"
"TxInvalLen\n"
"TxUnderrun\n"
"TxUnsupVL\n"
"RxLclPhyErr\n" /* 7220 and 7322-only from here down */
"RxVL15Drop\n"
"RxVlErr\n"
"XcessBufOvfl\n"
"RxQPBadCtxt\n" /* 7322-only from here down */
"TXBadHeader\n"
;
static const u32 portcntr7322indices[] = {
QIBPORTCNTR_PKTSEND | _PORT_VIRT_FLAG,
crp_pktsendflow,
QIBPORTCNTR_WORDSEND | _PORT_VIRT_FLAG,
QIBPORTCNTR_PKTRCV | _PORT_VIRT_FLAG,
crp_pktrcvflowctrl,
QIBPORTCNTR_WORDRCV | _PORT_VIRT_FLAG,
QIBPORTCNTR_SENDSTALL | _PORT_VIRT_FLAG,
crp_txsdmadesc | _PORT_64BIT_FLAG,
crp_rxdlidfltr,
crp_ibstatuschange,
QIBPORTCNTR_IBLINKDOWN | _PORT_VIRT_FLAG,
QIBPORTCNTR_IBLINKERRRECOV | _PORT_VIRT_FLAG,
QIBPORTCNTR_ERRLINK | _PORT_VIRT_FLAG,
QIBPORTCNTR_IBSYMBOLERR | _PORT_VIRT_FLAG,
QIBPORTCNTR_LLI | _PORT_VIRT_FLAG,
QIBPORTCNTR_BADFORMAT | _PORT_VIRT_FLAG,
QIBPORTCNTR_ERR_RLEN | _PORT_VIRT_FLAG,
QIBPORTCNTR_RCVOVFL | _PORT_VIRT_FLAG,
QIBPORTCNTR_RCVEBP | _PORT_VIRT_FLAG,
crp_rcvflowctrlviol,
QIBPORTCNTR_ERRICRC | _PORT_VIRT_FLAG,
QIBPORTCNTR_ERRLPCRC | _PORT_VIRT_FLAG,
QIBPORTCNTR_ERRVCRC | _PORT_VIRT_FLAG,
QIBPORTCNTR_INVALIDRLEN | _PORT_VIRT_FLAG,
QIBPORTCNTR_ERRPKEY | _PORT_VIRT_FLAG,
QIBPORTCNTR_RXDROPPKT | _PORT_VIRT_FLAG,
crp_txminmaxlenerr,
crp_txdroppedpkt,
crp_txlenerr,
crp_txunderrun,
crp_txunsupvl,
QIBPORTCNTR_RXLOCALPHYERR | _PORT_VIRT_FLAG,
QIBPORTCNTR_VL15PKTDROP | _PORT_VIRT_FLAG,
QIBPORTCNTR_RXVLERR | _PORT_VIRT_FLAG,
QIBPORTCNTR_EXCESSBUFOVFL | _PORT_VIRT_FLAG,
crp_rxqpinvalidctxt,
crp_txhdrerr,
};
/* do all the setup to make the counter reads efficient later */
static void init_7322_cntrnames(struct qib_devdata *dd)
{
int i, j = 0;
char *s;
for (i = 0, s = (char *)cntr7322names; s && j <= dd->cfgctxts;
i++) {
/* we always have at least one counter before the egrovfl */
if (!j && !strncmp("Ctxt0EgrOvfl", s + 1, 12))
j = 1;
s = strchr(s + 1, '\n');
if (s && j)
j++;
}
dd->cspec->ncntrs = i;
if (!s)
/* full list; size is without terminating null */
dd->cspec->cntrnamelen = sizeof(cntr7322names) - 1;
else
dd->cspec->cntrnamelen = 1 + s - cntr7322names;
dd->cspec->cntrs = kmalloc(dd->cspec->ncntrs
* sizeof(u64), GFP_KERNEL);
if (!dd->cspec->cntrs)
qib_dev_err(dd, "Failed allocation for counters\n");
for (i = 0, s = (char *)portcntr7322names; s; i++)
s = strchr(s + 1, '\n');
dd->cspec->nportcntrs = i - 1;
dd->cspec->portcntrnamelen = sizeof(portcntr7322names) - 1;
for (i = 0; i < dd->num_pports; ++i) {
dd->pport[i].cpspec->portcntrs = kmalloc(dd->cspec->nportcntrs
* sizeof(u64), GFP_KERNEL);
if (!dd->pport[i].cpspec->portcntrs)
qib_dev_err(dd,
"Failed allocation for portcounters\n");
}
}
static u32 qib_read_7322cntrs(struct qib_devdata *dd, loff_t pos, char **namep,
u64 **cntrp)
{
u32 ret;
if (namep) {
ret = dd->cspec->cntrnamelen;
if (pos >= ret)
ret = 0; /* final read after getting everything */
else
*namep = (char *) cntr7322names;
} else {
u64 *cntr = dd->cspec->cntrs;
int i;
ret = dd->cspec->ncntrs * sizeof(u64);
if (!cntr || pos >= ret) {
/* everything read, or couldn't get memory */
ret = 0;
goto done;
}
*cntrp = cntr;
for (i = 0; i < dd->cspec->ncntrs; i++)
if (cntr7322indices[i] & _PORT_64BIT_FLAG)
*cntr++ = read_7322_creg(dd,
cntr7322indices[i] &
_PORT_CNTR_IDXMASK);
else
*cntr++ = read_7322_creg32(dd,
cntr7322indices[i]);
}
done:
return ret;
}
static u32 qib_read_7322portcntrs(struct qib_devdata *dd, loff_t pos, u32 port,
char **namep, u64 **cntrp)
{
u32 ret;
if (namep) {
ret = dd->cspec->portcntrnamelen;
if (pos >= ret)
ret = 0; /* final read after getting everything */
else
*namep = (char *)portcntr7322names;
} else {
struct qib_pportdata *ppd = &dd->pport[port];
u64 *cntr = ppd->cpspec->portcntrs;
int i;
ret = dd->cspec->nportcntrs * sizeof(u64);
if (!cntr || pos >= ret) {
/* everything read, or couldn't get memory */
ret = 0;
goto done;
}
*cntrp = cntr;
for (i = 0; i < dd->cspec->nportcntrs; i++) {
if (portcntr7322indices[i] & _PORT_VIRT_FLAG)
*cntr++ = qib_portcntr_7322(ppd,
portcntr7322indices[i] &
_PORT_CNTR_IDXMASK);
else if (portcntr7322indices[i] & _PORT_64BIT_FLAG)
*cntr++ = read_7322_creg_port(ppd,
portcntr7322indices[i] &
_PORT_CNTR_IDXMASK);
else
*cntr++ = read_7322_creg32_port(ppd,
portcntr7322indices[i]);
}
}
done:
return ret;
}
/**
* qib_get_7322_faststats - get word counters from chip before they overflow
* @opaque - contains a pointer to the qlogic_ib device qib_devdata
*
* VESTIGIAL IBA7322 has no "small fast counters", so the only
* real purpose of this function is to maintain the notion of
* "active time", which in turn is only logged into the eeprom,
* which we don;t have, yet, for 7322-based boards.
*
* called from add_timer
*/
static void qib_get_7322_faststats(unsigned long opaque)
{
struct qib_devdata *dd = (struct qib_devdata *) opaque;
struct qib_pportdata *ppd;
unsigned long flags;
u64 traffic_wds;
int pidx;
for (pidx = 0; pidx < dd->num_pports; ++pidx) {
ppd = dd->pport + pidx;
/*
* If port isn't enabled or not operational ports, or
* diags is running (can cause memory diags to fail)
* skip this port this time.
*/
if (!ppd->link_speed_supported || !(dd->flags & QIB_INITTED)
|| dd->diag_client)
continue;
/*
* Maintain an activity timer, based on traffic
* exceeding a threshold, so we need to check the word-counts
* even if they are 64-bit.
*/
traffic_wds = qib_portcntr_7322(ppd, QIBPORTCNTR_WORDRCV) +
qib_portcntr_7322(ppd, QIBPORTCNTR_WORDSEND);
spin_lock_irqsave(&ppd->dd->eep_st_lock, flags);
traffic_wds -= ppd->dd->traffic_wds;
ppd->dd->traffic_wds += traffic_wds;
spin_unlock_irqrestore(&ppd->dd->eep_st_lock, flags);
if (ppd->cpspec->qdr_dfe_on && (ppd->link_speed_active &
QIB_IB_QDR) &&
(ppd->lflags & (QIBL_LINKINIT | QIBL_LINKARMED |
QIBL_LINKACTIVE)) &&
ppd->cpspec->qdr_dfe_time &&
time_is_before_jiffies(ppd->cpspec->qdr_dfe_time)) {
ppd->cpspec->qdr_dfe_on = 0;
qib_write_kreg_port(ppd, krp_static_adapt_dis(2),
ppd->dd->cspec->r1 ?
QDR_STATIC_ADAPT_INIT_R1 :
QDR_STATIC_ADAPT_INIT);
force_h1(ppd);
}
}
mod_timer(&dd->stats_timer, jiffies + HZ * ACTIVITY_TIMER);
}
/*
* If we were using MSIx, try to fallback to INTx.
*/
static int qib_7322_intr_fallback(struct qib_devdata *dd)
{
if (!dd->cspec->num_msix_entries)
return 0; /* already using INTx */
qib_devinfo(dd->pcidev,
"MSIx interrupt not detected, trying INTx interrupts\n");
qib_7322_nomsix(dd);
qib_enable_intx(dd->pcidev);
qib_setup_7322_interrupt(dd, 0);
return 1;
}
/*
* Reset the XGXS (between serdes and IBC). Slightly less intrusive
* than resetting the IBC or external link state, and useful in some
* cases to cause some retraining. To do this right, we reset IBC
* as well, then return to previous state (which may be still in reset)
* NOTE: some callers of this "know" this writes the current value
* of cpspec->ibcctrl_a as part of it's operation, so if that changes,
* check all callers.
*/
static void qib_7322_mini_pcs_reset(struct qib_pportdata *ppd)
{
u64 val;
struct qib_devdata *dd = ppd->dd;
const u64 reset_bits = SYM_MASK(IBPCSConfig_0, xcv_rreset) |
SYM_MASK(IBPCSConfig_0, xcv_treset) |
SYM_MASK(IBPCSConfig_0, tx_rx_reset);
val = qib_read_kreg_port(ppd, krp_ib_pcsconfig);
qib_write_kreg(dd, kr_hwerrmask,
dd->cspec->hwerrmask & ~HWE_MASK(statusValidNoEop));
qib_write_kreg_port(ppd, krp_ibcctrl_a,
ppd->cpspec->ibcctrl_a &
~SYM_MASK(IBCCtrlA_0, IBLinkEn));
qib_write_kreg_port(ppd, krp_ib_pcsconfig, val | reset_bits);
qib_read_kreg32(dd, kr_scratch);
qib_write_kreg_port(ppd, krp_ib_pcsconfig, val & ~reset_bits);
qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a);
qib_write_kreg(dd, kr_scratch, 0ULL);
qib_write_kreg(dd, kr_hwerrclear,
SYM_MASK(HwErrClear, statusValidNoEopClear));
qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);
}
/*
* This code for non-IBTA-compliant IB speed negotiation is only known to
* work for the SDR to DDR transition, and only between an HCA and a switch
* with recent firmware. It is based on observed heuristics, rather than
* actual knowledge of the non-compliant speed negotiation.
* It has a number of hard-coded fields, since the hope is to rewrite this
* when a spec is available on how the negoation is intended to work.
*/
static void autoneg_7322_sendpkt(struct qib_pportdata *ppd, u32 *hdr,
u32 dcnt, u32 *data)
{
int i;
u64 pbc;
u32 __iomem *piobuf;
u32 pnum, control, len;
struct qib_devdata *dd = ppd->dd;
i = 0;
len = 7 + dcnt + 1; /* 7 dword header, dword data, icrc */
control = qib_7322_setpbc_control(ppd, len, 0, 15);
pbc = ((u64) control << 32) | len;
while (!(piobuf = qib_7322_getsendbuf(ppd, pbc, &pnum))) {
if (i++ > 15)
return;
udelay(2);
}
/* disable header check on this packet, since it can't be valid */
dd->f_txchk_change(dd, pnum, 1, TXCHK_CHG_TYPE_DIS1, NULL);
writeq(pbc, piobuf);
qib_flush_wc();
qib_pio_copy(piobuf + 2, hdr, 7);
qib_pio_copy(piobuf + 9, data, dcnt);
if (dd->flags & QIB_USE_SPCL_TRIG) {
u32 spcl_off = (pnum >= dd->piobcnt2k) ? 2047 : 1023;
qib_flush_wc();
__raw_writel(0xaebecede, piobuf + spcl_off);
}
qib_flush_wc();
qib_sendbuf_done(dd, pnum);
/* and re-enable hdr check */
dd->f_txchk_change(dd, pnum, 1, TXCHK_CHG_TYPE_ENAB1, NULL);
}
/*
* _start packet gets sent twice at start, _done gets sent twice at end
*/
static void qib_autoneg_7322_send(struct qib_pportdata *ppd, int which)
{
struct qib_devdata *dd = ppd->dd;
static u32 swapped;
u32 dw, i, hcnt, dcnt, *data;
static u32 hdr[7] = { 0xf002ffff, 0x48ffff, 0x6400abba };
static u32 madpayload_start[0x40] = {
0x1810103, 0x1, 0x0, 0x0, 0x2c90000, 0x2c9, 0x0, 0x0,
0xffffffff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x1, 0x1388, 0x15e, 0x1, /* rest 0's */
};
static u32 madpayload_done[0x40] = {
0x1810103, 0x1, 0x0, 0x0, 0x2c90000, 0x2c9, 0x0, 0x0,
0xffffffff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x40000001, 0x1388, 0x15e, /* rest 0's */
};
dcnt = ARRAY_SIZE(madpayload_start);
hcnt = ARRAY_SIZE(hdr);
if (!swapped) {
/* for maintainability, do it at runtime */
for (i = 0; i < hcnt; i++) {
dw = (__force u32) cpu_to_be32(hdr[i]);
hdr[i] = dw;
}
for (i = 0; i < dcnt; i++) {
dw = (__force u32) cpu_to_be32(madpayload_start[i]);
madpayload_start[i] = dw;
dw = (__force u32) cpu_to_be32(madpayload_done[i]);
madpayload_done[i] = dw;
}
swapped = 1;
}
data = which ? madpayload_done : madpayload_start;
autoneg_7322_sendpkt(ppd, hdr, dcnt, data);
qib_read_kreg64(dd, kr_scratch);
udelay(2);
autoneg_7322_sendpkt(ppd, hdr, dcnt, data);
qib_read_kreg64(dd, kr_scratch);
udelay(2);
}
/*
* Do the absolute minimum to cause an IB speed change, and make it
* ready, but don't actually trigger the change. The caller will
* do that when ready (if link is in Polling training state, it will
* happen immediately, otherwise when link next goes down)
*
* This routine should only be used as part of the DDR autonegotation
* code for devices that are not compliant with IB 1.2 (or code that
* fixes things up for same).
*
* When link has gone down, and autoneg enabled, or autoneg has
* failed and we give up until next time we set both speeds, and
* then we want IBTA enabled as well as "use max enabled speed.
*/
static void set_7322_ibspeed_fast(struct qib_pportdata *ppd, u32 speed)
{
u64 newctrlb;
newctrlb = ppd->cpspec->ibcctrl_b & ~(IBA7322_IBC_SPEED_MASK |
IBA7322_IBC_IBTA_1_2_MASK |
IBA7322_IBC_MAX_SPEED_MASK);
if (speed & (speed - 1)) /* multiple speeds */
newctrlb |= (speed << IBA7322_IBC_SPEED_LSB) |
IBA7322_IBC_IBTA_1_2_MASK |
IBA7322_IBC_MAX_SPEED_MASK;
else
newctrlb |= speed == QIB_IB_QDR ?
IBA7322_IBC_SPEED_QDR | IBA7322_IBC_IBTA_1_2_MASK :
((speed == QIB_IB_DDR ?
IBA7322_IBC_SPEED_DDR : IBA7322_IBC_SPEED_SDR));
if (newctrlb == ppd->cpspec->ibcctrl_b)
return;
ppd->cpspec->ibcctrl_b = newctrlb;
qib_write_kreg_port(ppd, krp_ibcctrl_b, ppd->cpspec->ibcctrl_b);
qib_write_kreg(ppd->dd, kr_scratch, 0);
}
/*
* This routine is only used when we are not talking to another
* IB 1.2-compliant device that we think can do DDR.
* (This includes all existing switch chips as of Oct 2007.)
* 1.2-compliant devices go directly to DDR prior to reaching INIT
*/
static void try_7322_autoneg(struct qib_pportdata *ppd)
{
unsigned long flags;
spin_lock_irqsave(&ppd->lflags_lock, flags);
ppd->lflags |= QIBL_IB_AUTONEG_INPROG;
spin_unlock_irqrestore(&ppd->lflags_lock, flags);
qib_autoneg_7322_send(ppd, 0);
set_7322_ibspeed_fast(ppd, QIB_IB_DDR);
qib_7322_mini_pcs_reset(ppd);
/* 2 msec is minimum length of a poll cycle */
queue_delayed_work(ib_wq, &ppd->cpspec->autoneg_work,
msecs_to_jiffies(2));
}
/*
* Handle the empirically determined mechanism for auto-negotiation
* of DDR speed with switches.
*/
static void autoneg_7322_work(struct work_struct *work)
{
struct qib_pportdata *ppd;
struct qib_devdata *dd;
u64 startms;
u32 i;
unsigned long flags;
ppd = container_of(work, struct qib_chippport_specific,
autoneg_work.work)->ppd;
dd = ppd->dd;
startms = jiffies_to_msecs(jiffies);
/*
* Busy wait for this first part, it should be at most a
* few hundred usec, since we scheduled ourselves for 2msec.
*/
for (i = 0; i < 25; i++) {
if (SYM_FIELD(ppd->lastibcstat, IBCStatusA_0, LinkState)
== IB_7322_LT_STATE_POLLQUIET) {
qib_set_linkstate(ppd, QIB_IB_LINKDOWN_DISABLE);
break;
}
udelay(100);
}
if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG))
goto done; /* we got there early or told to stop */
/* we expect this to timeout */
if (wait_event_timeout(ppd->cpspec->autoneg_wait,
!(ppd->lflags & QIBL_IB_AUTONEG_INPROG),
msecs_to_jiffies(90)))
goto done;
qib_7322_mini_pcs_reset(ppd);
/* we expect this to timeout */
if (wait_event_timeout(ppd->cpspec->autoneg_wait,
!(ppd->lflags & QIBL_IB_AUTONEG_INPROG),
msecs_to_jiffies(1700)))
goto done;
qib_7322_mini_pcs_reset(ppd);
set_7322_ibspeed_fast(ppd, QIB_IB_SDR);
/*
* Wait up to 250 msec for link to train and get to INIT at DDR;
* this should terminate early.
*/
wait_event_timeout(ppd->cpspec->autoneg_wait,
!(ppd->lflags & QIBL_IB_AUTONEG_INPROG),
msecs_to_jiffies(250));
done:
if (ppd->lflags & QIBL_IB_AUTONEG_INPROG) {
spin_lock_irqsave(&ppd->lflags_lock, flags);
ppd->lflags &= ~QIBL_IB_AUTONEG_INPROG;
if (ppd->cpspec->autoneg_tries == AUTONEG_TRIES) {
ppd->lflags |= QIBL_IB_AUTONEG_FAILED;
ppd->cpspec->autoneg_tries = 0;
}
spin_unlock_irqrestore(&ppd->lflags_lock, flags);
set_7322_ibspeed_fast(ppd, ppd->link_speed_enabled);
}
}
/*
* This routine is used to request IPG set in the QLogic switch.
* Only called if r1.
*/
static void try_7322_ipg(struct qib_pportdata *ppd)
{
struct qib_ibport *ibp = &ppd->ibport_data;
struct ib_mad_send_buf *send_buf;
struct ib_mad_agent *agent;
struct ib_smp *smp;
unsigned delay;
int ret;
agent = ibp->rvp.send_agent;
if (!agent)
goto retry;
send_buf = ib_create_send_mad(agent, 0, 0, 0, IB_MGMT_MAD_HDR,
IB_MGMT_MAD_DATA, GFP_ATOMIC,
IB_MGMT_BASE_VERSION);
if (IS_ERR(send_buf))
goto retry;
if (!ibp->smi_ah) {
struct ib_ah *ah;
ah = qib_create_qp0_ah(ibp, be16_to_cpu(IB_LID_PERMISSIVE));
if (IS_ERR(ah))
ret = PTR_ERR(ah);
else {
send_buf->ah = ah;
ibp->smi_ah = ibah_to_rvtah(ah);
ret = 0;
}
} else {
send_buf->ah = &ibp->smi_ah->ibah;
ret = 0;
}
smp = send_buf->mad;
smp->base_version = IB_MGMT_BASE_VERSION;
smp->mgmt_class = IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE;
smp->class_version = 1;
smp->method = IB_MGMT_METHOD_SEND;
smp->hop_cnt = 1;
smp->attr_id = QIB_VENDOR_IPG;
smp->attr_mod = 0;
if (!ret)
ret = ib_post_send_mad(send_buf, NULL);
if (ret)
ib_free_send_mad(send_buf);
retry:
delay = 2 << ppd->cpspec->ipg_tries;
queue_delayed_work(ib_wq, &ppd->cpspec->ipg_work,
msecs_to_jiffies(delay));
}
/*
* Timeout handler for setting IPG.
* Only called if r1.
*/
static void ipg_7322_work(struct work_struct *work)
{
struct qib_pportdata *ppd;
ppd = container_of(work, struct qib_chippport_specific,
ipg_work.work)->ppd;
if ((ppd->lflags & (QIBL_LINKINIT | QIBL_LINKARMED | QIBL_LINKACTIVE))
&& ++ppd->cpspec->ipg_tries <= 10)
try_7322_ipg(ppd);
}
static u32 qib_7322_iblink_state(u64 ibcs)
{
u32 state = (u32)SYM_FIELD(ibcs, IBCStatusA_0, LinkState);
switch (state) {
case IB_7322_L_STATE_INIT:
state = IB_PORT_INIT;
break;
case IB_7322_L_STATE_ARM:
state = IB_PORT_ARMED;
break;
case IB_7322_L_STATE_ACTIVE:
/* fall through */
case IB_7322_L_STATE_ACT_DEFER:
state = IB_PORT_ACTIVE;
break;
default: /* fall through */
case IB_7322_L_STATE_DOWN:
state = IB_PORT_DOWN;
break;
}
return state;
}
/* returns the IBTA port state, rather than the IBC link training state */
static u8 qib_7322_phys_portstate(u64 ibcs)
{
u8 state = (u8)SYM_FIELD(ibcs, IBCStatusA_0, LinkTrainingState);
return qib_7322_physportstate[state];
}
static int qib_7322_ib_updown(struct qib_pportdata *ppd, int ibup, u64 ibcs)
{
int ret = 0, symadj = 0;
unsigned long flags;
int mult;
spin_lock_irqsave(&ppd->lflags_lock, flags);
ppd->lflags &= ~QIBL_IB_FORCE_NOTIFY;
spin_unlock_irqrestore(&ppd->lflags_lock, flags);
/* Update our picture of width and speed from chip */
if (ibcs & SYM_MASK(IBCStatusA_0, LinkSpeedQDR)) {
ppd->link_speed_active = QIB_IB_QDR;
mult = 4;
} else if (ibcs & SYM_MASK(IBCStatusA_0, LinkSpeedActive)) {
ppd->link_speed_active = QIB_IB_DDR;
mult = 2;
} else {
ppd->link_speed_active = QIB_IB_SDR;
mult = 1;
}
if (ibcs & SYM_MASK(IBCStatusA_0, LinkWidthActive)) {
ppd->link_width_active = IB_WIDTH_4X;
mult *= 4;
} else
ppd->link_width_active = IB_WIDTH_1X;
ppd->delay_mult = ib_rate_to_delay[mult_to_ib_rate(mult)];
if (!ibup) {
u64 clr;
/* Link went down. */
/* do IPG MAD again after linkdown, even if last time failed */
ppd->cpspec->ipg_tries = 0;
clr = qib_read_kreg_port(ppd, krp_ibcstatus_b) &
(SYM_MASK(IBCStatusB_0, heartbeat_timed_out) |
SYM_MASK(IBCStatusB_0, heartbeat_crosstalk));
if (clr)
qib_write_kreg_port(ppd, krp_ibcstatus_b, clr);
if (!(ppd->lflags & (QIBL_IB_AUTONEG_FAILED |
QIBL_IB_AUTONEG_INPROG)))
set_7322_ibspeed_fast(ppd, ppd->link_speed_enabled);
if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG)) {
struct qib_qsfp_data *qd =
&ppd->cpspec->qsfp_data;
/* unlock the Tx settings, speed may change */
qib_write_kreg_port(ppd, krp_tx_deemph_override,
SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
reset_tx_deemphasis_override));
qib_cancel_sends(ppd);
/* on link down, ensure sane pcs state */
qib_7322_mini_pcs_reset(ppd);
/* schedule the qsfp refresh which should turn the link
off */
if (ppd->dd->flags & QIB_HAS_QSFP) {
qd->t_insert = jiffies;
queue_work(ib_wq, &qd->work);
}
spin_lock_irqsave(&ppd->sdma_lock, flags);
if (__qib_sdma_running(ppd))
__qib_sdma_process_event(ppd,
qib_sdma_event_e70_go_idle);
spin_unlock_irqrestore(&ppd->sdma_lock, flags);
}
clr = read_7322_creg32_port(ppd, crp_iblinkdown);
if (clr == ppd->cpspec->iblnkdownsnap)
ppd->cpspec->iblnkdowndelta++;
} else {
if (qib_compat_ddr_negotiate &&
!(ppd->lflags & (QIBL_IB_AUTONEG_FAILED |
QIBL_IB_AUTONEG_INPROG)) &&
ppd->link_speed_active == QIB_IB_SDR &&
(ppd->link_speed_enabled & QIB_IB_DDR)
&& ppd->cpspec->autoneg_tries < AUTONEG_TRIES) {
/* we are SDR, and auto-negotiation enabled */
++ppd->cpspec->autoneg_tries;
if (!ppd->cpspec->ibdeltainprog) {
ppd->cpspec->ibdeltainprog = 1;
ppd->cpspec->ibsymdelta +=
read_7322_creg32_port(ppd,
crp_ibsymbolerr) -
ppd->cpspec->ibsymsnap;
ppd->cpspec->iblnkerrdelta +=
read_7322_creg32_port(ppd,
crp_iblinkerrrecov) -
ppd->cpspec->iblnkerrsnap;
}
try_7322_autoneg(ppd);
ret = 1; /* no other IB status change processing */
} else if ((ppd->lflags & QIBL_IB_AUTONEG_INPROG) &&
ppd->link_speed_active == QIB_IB_SDR) {
qib_autoneg_7322_send(ppd, 1);
set_7322_ibspeed_fast(ppd, QIB_IB_DDR);
qib_7322_mini_pcs_reset(ppd);
udelay(2);
ret = 1; /* no other IB status change processing */
} else if ((ppd->lflags & QIBL_IB_AUTONEG_INPROG) &&
(ppd->link_speed_active & QIB_IB_DDR)) {
spin_lock_irqsave(&ppd->lflags_lock, flags);
ppd->lflags &= ~(QIBL_IB_AUTONEG_INPROG |
QIBL_IB_AUTONEG_FAILED);
spin_unlock_irqrestore(&ppd->lflags_lock, flags);
ppd->cpspec->autoneg_tries = 0;
/* re-enable SDR, for next link down */
set_7322_ibspeed_fast(ppd, ppd->link_speed_enabled);
wake_up(&ppd->cpspec->autoneg_wait);
symadj = 1;
} else if (ppd->lflags & QIBL_IB_AUTONEG_FAILED) {
/*
* Clear autoneg failure flag, and do setup
* so we'll try next time link goes down and
* back to INIT (possibly connected to a
* different device).
*/
spin_lock_irqsave(&ppd->lflags_lock, flags);
ppd->lflags &= ~QIBL_IB_AUTONEG_FAILED;
spin_unlock_irqrestore(&ppd->lflags_lock, flags);
ppd->cpspec->ibcctrl_b |= IBA7322_IBC_IBTA_1_2_MASK;
symadj = 1;
}
if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG)) {
symadj = 1;
if (ppd->dd->cspec->r1 && ppd->cpspec->ipg_tries <= 10)
try_7322_ipg(ppd);
if (!ppd->cpspec->recovery_init)
setup_7322_link_recovery(ppd, 0);
ppd->cpspec->qdr_dfe_time = jiffies +
msecs_to_jiffies(QDR_DFE_DISABLE_DELAY);
}
ppd->cpspec->ibmalfusesnap = 0;
ppd->cpspec->ibmalfsnap = read_7322_creg32_port(ppd,
crp_errlink);
}
if (symadj) {
ppd->cpspec->iblnkdownsnap =
read_7322_creg32_port(ppd, crp_iblinkdown);
if (ppd->cpspec->ibdeltainprog) {
ppd->cpspec->ibdeltainprog = 0;
ppd->cpspec->ibsymdelta += read_7322_creg32_port(ppd,
crp_ibsymbolerr) - ppd->cpspec->ibsymsnap;
ppd->cpspec->iblnkerrdelta += read_7322_creg32_port(ppd,
crp_iblinkerrrecov) - ppd->cpspec->iblnkerrsnap;
}
} else if (!ibup && qib_compat_ddr_negotiate &&
!ppd->cpspec->ibdeltainprog &&
!(ppd->lflags & QIBL_IB_AUTONEG_INPROG)) {
ppd->cpspec->ibdeltainprog = 1;
ppd->cpspec->ibsymsnap = read_7322_creg32_port(ppd,
crp_ibsymbolerr);
ppd->cpspec->iblnkerrsnap = read_7322_creg32_port(ppd,
crp_iblinkerrrecov);
}
if (!ret)
qib_setup_7322_setextled(ppd, ibup);
return ret;
}
/*
* Does read/modify/write to appropriate registers to
* set output and direction bits selected by mask.
* these are in their canonical postions (e.g. lsb of
* dir will end up in D48 of extctrl on existing chips).
* returns contents of GP Inputs.
*/
static int gpio_7322_mod(struct qib_devdata *dd, u32 out, u32 dir, u32 mask)
{
u64 read_val, new_out;
unsigned long flags;
if (mask) {
/* some bits being written, lock access to GPIO */
dir &= mask;
out &= mask;
spin_lock_irqsave(&dd->cspec->gpio_lock, flags);
dd->cspec->extctrl &= ~((u64)mask << SYM_LSB(EXTCtrl, GPIOOe));
dd->cspec->extctrl |= ((u64) dir << SYM_LSB(EXTCtrl, GPIOOe));
new_out = (dd->cspec->gpio_out & ~mask) | out;
qib_write_kreg(dd, kr_extctrl, dd->cspec->extctrl);
qib_write_kreg(dd, kr_gpio_out, new_out);
dd->cspec->gpio_out = new_out;
spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags);
}
/*
* It is unlikely that a read at this time would get valid
* data on a pin whose direction line was set in the same
* call to this function. We include the read here because
* that allows us to potentially combine a change on one pin with
* a read on another, and because the old code did something like
* this.
*/
read_val = qib_read_kreg64(dd, kr_extstatus);
return SYM_FIELD(read_val, EXTStatus, GPIOIn);
}
/* Enable writes to config EEPROM, if possible. Returns previous state */
static int qib_7322_eeprom_wen(struct qib_devdata *dd, int wen)
{
int prev_wen;
u32 mask;
mask = 1 << QIB_EEPROM_WEN_NUM;
prev_wen = ~gpio_7322_mod(dd, 0, 0, 0) >> QIB_EEPROM_WEN_NUM;
gpio_7322_mod(dd, wen ? 0 : mask, mask, mask);
return prev_wen & 1;
}
/*
* Read fundamental info we need to use the chip. These are
* the registers that describe chip capabilities, and are
* saved in shadow registers.
*/
static void get_7322_chip_params(struct qib_devdata *dd)
{
u64 val;
u32 piobufs;
int mtu;
dd->palign = qib_read_kreg32(dd, kr_pagealign);
dd->uregbase = qib_read_kreg32(dd, kr_userregbase);
dd->rcvtidcnt = qib_read_kreg32(dd, kr_rcvtidcnt);
dd->rcvtidbase = qib_read_kreg32(dd, kr_rcvtidbase);
dd->rcvegrbase = qib_read_kreg32(dd, kr_rcvegrbase);
dd->piobufbase = qib_read_kreg64(dd, kr_sendpiobufbase);
dd->pio2k_bufbase = dd->piobufbase & 0xffffffff;
val = qib_read_kreg64(dd, kr_sendpiobufcnt);
dd->piobcnt2k = val & ~0U;
dd->piobcnt4k = val >> 32;
val = qib_read_kreg64(dd, kr_sendpiosize);
dd->piosize2k = val & ~0U;
dd->piosize4k = val >> 32;
mtu = ib_mtu_enum_to_int(qib_ibmtu);
if (mtu == -1)
mtu = QIB_DEFAULT_MTU;
dd->pport[0].ibmtu = (u32)mtu;
dd->pport[1].ibmtu = (u32)mtu;
/* these may be adjusted in init_chip_wc_pat() */
dd->pio2kbase = (u32 __iomem *)
((char __iomem *) dd->kregbase + dd->pio2k_bufbase);
dd->pio4kbase = (u32 __iomem *)
((char __iomem *) dd->kregbase +
(dd->piobufbase >> 32));
/*
* 4K buffers take 2 pages; we use roundup just to be
* paranoid; we calculate it once here, rather than on
* ever buf allocate
*/
dd->align4k = ALIGN(dd->piosize4k, dd->palign);
piobufs = dd->piobcnt4k + dd->piobcnt2k + NUM_VL15_BUFS;
dd->pioavregs = ALIGN(piobufs, sizeof(u64) * BITS_PER_BYTE / 2) /
(sizeof(u64) * BITS_PER_BYTE / 2);
}
/*
* The chip base addresses in cspec and cpspec have to be set
* after possible init_chip_wc_pat(), rather than in
* get_7322_chip_params(), so split out as separate function
*/
static void qib_7322_set_baseaddrs(struct qib_devdata *dd)
{
u32 cregbase;
cregbase = qib_read_kreg32(dd, kr_counterregbase);
dd->cspec->cregbase = (u64 __iomem *)(cregbase +
(char __iomem *)dd->kregbase);
dd->egrtidbase = (u64 __iomem *)
((char __iomem *) dd->kregbase + dd->rcvegrbase);
/* port registers are defined as relative to base of chip */
dd->pport[0].cpspec->kpregbase =
(u64 __iomem *)((char __iomem *)dd->kregbase);
dd->pport[1].cpspec->kpregbase =
(u64 __iomem *)(dd->palign +
(char __iomem *)dd->kregbase);
dd->pport[0].cpspec->cpregbase =
(u64 __iomem *)(qib_read_kreg_port(&dd->pport[0],
kr_counterregbase) + (char __iomem *)dd->kregbase);
dd->pport[1].cpspec->cpregbase =
(u64 __iomem *)(qib_read_kreg_port(&dd->pport[1],
kr_counterregbase) + (char __iomem *)dd->kregbase);
}
/*
* This is a fairly special-purpose observer, so we only support
* the port-specific parts of SendCtrl
*/
#define SENDCTRL_SHADOWED (SYM_MASK(SendCtrl_0, SendEnable) | \
SYM_MASK(SendCtrl_0, SDmaEnable) | \
SYM_MASK(SendCtrl_0, SDmaIntEnable) | \
SYM_MASK(SendCtrl_0, SDmaSingleDescriptor) | \
SYM_MASK(SendCtrl_0, SDmaHalt) | \
SYM_MASK(SendCtrl_0, IBVLArbiterEn) | \
SYM_MASK(SendCtrl_0, ForceCreditUpToDate))
static int sendctrl_hook(struct qib_devdata *dd,
const struct diag_observer *op, u32 offs,
u64 *data, u64 mask, int only_32)
{
unsigned long flags;
unsigned idx;
unsigned pidx;
struct qib_pportdata *ppd = NULL;
u64 local_data, all_bits;
/*
* The fixed correspondence between Physical ports and pports is
* severed. We need to hunt for the ppd that corresponds
* to the offset we got. And we have to do that without admitting
* we know the stride, apparently.
*/
for (pidx = 0; pidx < dd->num_pports; ++pidx) {
u64 __iomem *psptr;
u32 psoffs;
ppd = dd->pport + pidx;
if (!ppd->cpspec->kpregbase)
continue;
psptr = ppd->cpspec->kpregbase + krp_sendctrl;
psoffs = (u32) (psptr - dd->kregbase) * sizeof(*psptr);
if (psoffs == offs)
break;
}
/* If pport is not being managed by driver, just avoid shadows. */
if (pidx >= dd->num_pports)
ppd = NULL;
/* In any case, "idx" is flat index in kreg space */
idx = offs / sizeof(u64);
all_bits = ~0ULL;
if (only_32)
all_bits >>= 32;
spin_lock_irqsave(&dd->sendctrl_lock, flags);
if (!ppd || (mask & all_bits) != all_bits) {
/*
* At least some mask bits are zero, so we need
* to read. The judgement call is whether from
* reg or shadow. First-cut: read reg, and complain
* if any bits which should be shadowed are different
* from their shadowed value.
*/
if (only_32)
local_data = (u64)qib_read_kreg32(dd, idx);
else
local_data = qib_read_kreg64(dd, idx);
*data = (local_data & ~mask) | (*data & mask);
}
if (mask) {
/*
* At least some mask bits are one, so we need
* to write, but only shadow some bits.
*/
u64 sval, tval; /* Shadowed, transient */
/*
* New shadow val is bits we don't want to touch,
* ORed with bits we do, that are intended for shadow.
*/
if (ppd) {
sval = ppd->p_sendctrl & ~mask;
sval |= *data & SENDCTRL_SHADOWED & mask;
ppd->p_sendctrl = sval;
} else
sval = *data & SENDCTRL_SHADOWED & mask;
tval = sval | (*data & ~SENDCTRL_SHADOWED & mask);
qib_write_kreg(dd, idx, tval);
qib_write_kreg(dd, kr_scratch, 0Ull);
}
spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
return only_32 ? 4 : 8;
}
static const struct diag_observer sendctrl_0_observer = {
sendctrl_hook, KREG_IDX(SendCtrl_0) * sizeof(u64),
KREG_IDX(SendCtrl_0) * sizeof(u64)
};
static const struct diag_observer sendctrl_1_observer = {
sendctrl_hook, KREG_IDX(SendCtrl_1) * sizeof(u64),
KREG_IDX(SendCtrl_1) * sizeof(u64)
};
static ushort sdma_fetch_prio = 8;
module_param_named(sdma_fetch_prio, sdma_fetch_prio, ushort, S_IRUGO);
MODULE_PARM_DESC(sdma_fetch_prio, "SDMA descriptor fetch priority");
/* Besides logging QSFP events, we set appropriate TxDDS values */
static void init_txdds_table(struct qib_pportdata *ppd, int override);
static void qsfp_7322_event(struct work_struct *work)
{
struct qib_qsfp_data *qd;
struct qib_pportdata *ppd;
unsigned long pwrup;
unsigned long flags;
int ret;
u32 le2;
qd = container_of(work, struct qib_qsfp_data, work);
ppd = qd->ppd;
pwrup = qd->t_insert +
msecs_to_jiffies(QSFP_PWR_LAG_MSEC - QSFP_MODPRS_LAG_MSEC);
/* Delay for 20 msecs to allow ModPrs resistor to setup */
mdelay(QSFP_MODPRS_LAG_MSEC);
if (!qib_qsfp_mod_present(ppd)) {
ppd->cpspec->qsfp_data.modpresent = 0;
/* Set the physical link to disabled */
qib_set_ib_7322_lstate(ppd, 0,
QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
spin_lock_irqsave(&ppd->lflags_lock, flags);
ppd->lflags &= ~QIBL_LINKV;
spin_unlock_irqrestore(&ppd->lflags_lock, flags);
} else {
/*
* Some QSFP's not only do not respond until the full power-up
* time, but may behave badly if we try. So hold off responding
* to insertion.
*/
while (1) {
if (time_is_before_jiffies(pwrup))
break;
msleep(20);
}
ret = qib_refresh_qsfp_cache(ppd, &qd->cache);
/*
* Need to change LE2 back to defaults if we couldn't
* read the cable type (to handle cable swaps), so do this
* even on failure to read cable information. We don't
* get here for QME, so IS_QME check not needed here.
*/
if (!ret && !ppd->dd->cspec->r1) {
if (QSFP_IS_ACTIVE_FAR(qd->cache.tech))
le2 = LE2_QME;
else if (qd->cache.atten[1] >= qib_long_atten &&
QSFP_IS_CU(qd->cache.tech))
le2 = LE2_5m;
else
le2 = LE2_DEFAULT;
} else
le2 = LE2_DEFAULT;
ibsd_wr_allchans(ppd, 13, (le2 << 7), BMASK(9, 7));
/*
* We always change parameteters, since we can choose
* values for cables without eeproms, and the cable may have
* changed from a cable with full or partial eeprom content
* to one with partial or no content.
*/
init_txdds_table(ppd, 0);
/* The physical link is being re-enabled only when the
* previous state was DISABLED and the VALID bit is not
* set. This should only happen when the cable has been
* physically pulled. */
if (!ppd->cpspec->qsfp_data.modpresent &&
(ppd->lflags & (QIBL_LINKV | QIBL_IB_LINK_DISABLED))) {
ppd->cpspec->qsfp_data.modpresent = 1;
qib_set_ib_7322_lstate(ppd, 0,
QLOGIC_IB_IBCC_LINKINITCMD_SLEEP);
spin_lock_irqsave(&ppd->lflags_lock, flags);
ppd->lflags |= QIBL_LINKV;
spin_unlock_irqrestore(&ppd->lflags_lock, flags);
}
}
}
/*
* There is little we can do but complain to the user if QSFP
* initialization fails.
*/
static void qib_init_7322_qsfp(struct qib_pportdata *ppd)
{
unsigned long flags;
struct qib_qsfp_data *qd = &ppd->cpspec->qsfp_data;
struct qib_devdata *dd = ppd->dd;
u64 mod_prs_bit = QSFP_GPIO_MOD_PRS_N;
mod_prs_bit <<= (QSFP_GPIO_PORT2_SHIFT * ppd->hw_pidx);
qd->ppd = ppd;
qib_qsfp_init(qd, qsfp_7322_event);
spin_lock_irqsave(&dd->cspec->gpio_lock, flags);
dd->cspec->extctrl |= (mod_prs_bit << SYM_LSB(EXTCtrl, GPIOInvert));
dd->cspec->gpio_mask |= mod_prs_bit;
qib_write_kreg(dd, kr_extctrl, dd->cspec->extctrl);
qib_write_kreg(dd, kr_gpio_mask, dd->cspec->gpio_mask);
spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags);
}
/*
* called at device initialization time, and also if the txselect
* module parameter is changed. This is used for cables that don't
* have valid QSFP EEPROMs (not present, or attenuation is zero).
* We initialize to the default, then if there is a specific
* unit,port match, we use that (and set it immediately, for the
* current speed, if the link is at INIT or better).
* String format is "default# unit#,port#=# ... u,p=#", separators must
* be a SPACE character. A newline terminates. The u,p=# tuples may
* optionally have "u,p=#,#", where the final # is the H1 value
* The last specific match is used (actually, all are used, but last
* one is the one that winds up set); if none at all, fall back on default.
*/
static void set_no_qsfp_atten(struct qib_devdata *dd, int change)
{
char *nxt, *str;
u32 pidx, unit, port, deflt, h1;
unsigned long val;
int any = 0, seth1;
int txdds_size;
str = txselect_list;
/* default number is validated in setup_txselect() */
deflt = simple_strtoul(str, &nxt, 0);
for (pidx = 0; pidx < dd->num_pports; ++pidx)
dd->pport[pidx].cpspec->no_eep = deflt;
txdds_size = TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ;
if (IS_QME(dd) || IS_QMH(dd))
txdds_size += TXDDS_MFG_SZ;
while (*nxt && nxt[1]) {
str = ++nxt;
unit = simple_strtoul(str, &nxt, 0);
if (nxt == str || !*nxt || *nxt != ',') {
while (*nxt && *nxt++ != ' ') /* skip to next, if any */
;
continue;
}
str = ++nxt;
port = simple_strtoul(str, &nxt, 0);
if (nxt == str || *nxt != '=') {
while (*nxt && *nxt++ != ' ') /* skip to next, if any */
;
continue;
}
str = ++nxt;
val = simple_strtoul(str, &nxt, 0);
if (nxt == str) {
while (*nxt && *nxt++ != ' ') /* skip to next, if any */
;
continue;
}
if (val >= txdds_size)
continue;
seth1 = 0;
h1 = 0; /* gcc thinks it might be used uninitted */
if (*nxt == ',' && nxt[1]) {
str = ++nxt;
h1 = (u32)simple_strtoul(str, &nxt, 0);
if (nxt == str)
while (*nxt && *nxt++ != ' ') /* skip */
;
else
seth1 = 1;
}
for (pidx = 0; dd->unit == unit && pidx < dd->num_pports;
++pidx) {
struct qib_pportdata *ppd = &dd->pport[pidx];
if (ppd->port != port || !ppd->link_speed_supported)
continue;
ppd->cpspec->no_eep = val;
if (seth1)
ppd->cpspec->h1_val = h1;
/* now change the IBC and serdes, overriding generic */
init_txdds_table(ppd, 1);
/* Re-enable the physical state machine on mezz boards
* now that the correct settings have been set.
* QSFP boards are handles by the QSFP event handler */
if (IS_QMH(dd) || IS_QME(dd))
qib_set_ib_7322_lstate(ppd, 0,
QLOGIC_IB_IBCC_LINKINITCMD_SLEEP);
any++;
}
if (*nxt == '\n')
break; /* done */
}
if (change && !any) {
/* no specific setting, use the default.
* Change the IBC and serdes, but since it's
* general, don't override specific settings.
*/
for (pidx = 0; pidx < dd->num_pports; ++pidx)
if (dd->pport[pidx].link_speed_supported)
init_txdds_table(&dd->pport[pidx], 0);
}
}
/* handle the txselect parameter changing */
static int setup_txselect(const char *str, struct kernel_param *kp)
{
struct qib_devdata *dd;
unsigned long val;
char *n;
if (strlen(str) >= MAX_ATTEN_LEN) {
pr_info("txselect_values string too long\n");
return -ENOSPC;
}
val = simple_strtoul(str, &n, 0);
if (n == str || val >= (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ +
TXDDS_MFG_SZ)) {
pr_info("txselect_values must start with a number < %d\n",
TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ + TXDDS_MFG_SZ);
return -EINVAL;
}
strcpy(txselect_list, str);
list_for_each_entry(dd, &qib_dev_list, list)
if (dd->deviceid == PCI_DEVICE_ID_QLOGIC_IB_7322)
set_no_qsfp_atten(dd, 1);
return 0;
}
/*
* Write the final few registers that depend on some of the
* init setup. Done late in init, just before bringing up
* the serdes.
*/
static int qib_late_7322_initreg(struct qib_devdata *dd)
{
int ret = 0, n;
u64 val;
qib_write_kreg(dd, kr_rcvhdrentsize, dd->rcvhdrentsize);
qib_write_kreg(dd, kr_rcvhdrsize, dd->rcvhdrsize);
qib_write_kreg(dd, kr_rcvhdrcnt, dd->rcvhdrcnt);
qib_write_kreg(dd, kr_sendpioavailaddr, dd->pioavailregs_phys);
val = qib_read_kreg64(dd, kr_sendpioavailaddr);
if (val != dd->pioavailregs_phys) {
qib_dev_err(dd,
"Catastrophic software error, SendPIOAvailAddr written as %lx, read back as %llx\n",
(unsigned long) dd->pioavailregs_phys,
(unsigned long long) val);
ret = -EINVAL;
}
n = dd->piobcnt2k + dd->piobcnt4k + NUM_VL15_BUFS;
qib_7322_txchk_change(dd, 0, n, TXCHK_CHG_TYPE_KERN, NULL);
/* driver sends get pkey, lid, etc. checking also, to catch bugs */
qib_7322_txchk_change(dd, 0, n, TXCHK_CHG_TYPE_ENAB1, NULL);
qib_register_observer(dd, &sendctrl_0_observer);
qib_register_observer(dd, &sendctrl_1_observer);
dd->control &= ~QLOGIC_IB_C_SDMAFETCHPRIOEN;
qib_write_kreg(dd, kr_control, dd->control);
/*
* Set SendDmaFetchPriority and init Tx params, including
* QSFP handler on boards that have QSFP.
* First set our default attenuation entry for cables that
* don't have valid attenuation.
*/
set_no_qsfp_atten(dd, 0);
for (n = 0; n < dd->num_pports; ++n) {
struct qib_pportdata *ppd = dd->pport + n;
qib_write_kreg_port(ppd, krp_senddmaprioritythld,
sdma_fetch_prio & 0xf);
/* Initialize qsfp if present on board. */
if (dd->flags & QIB_HAS_QSFP)
qib_init_7322_qsfp(ppd);
}
dd->control |= QLOGIC_IB_C_SDMAFETCHPRIOEN;
qib_write_kreg(dd, kr_control, dd->control);
return ret;
}
/* per IB port errors. */
#define SENDCTRL_PIBP (MASK_ACROSS(0, 1) | MASK_ACROSS(3, 3) | \
MASK_ACROSS(8, 15))
#define RCVCTRL_PIBP (MASK_ACROSS(0, 17) | MASK_ACROSS(39, 41))
#define ERRS_PIBP (MASK_ACROSS(57, 58) | MASK_ACROSS(54, 54) | \
MASK_ACROSS(36, 49) | MASK_ACROSS(29, 34) | MASK_ACROSS(14, 17) | \
MASK_ACROSS(0, 11))
/*
* Write the initialization per-port registers that need to be done at
* driver load and after reset completes (i.e., that aren't done as part
* of other init procedures called from qib_init.c).
* Some of these should be redundant on reset, but play safe.
*/
static void write_7322_init_portregs(struct qib_pportdata *ppd)
{
u64 val;
int i;
if (!ppd->link_speed_supported) {
/* no buffer credits for this port */
for (i = 1; i < 8; i++)
qib_write_kreg_port(ppd, krp_rxcreditvl0 + i, 0);
qib_write_kreg_port(ppd, krp_ibcctrl_b, 0);
qib_write_kreg(ppd->dd, kr_scratch, 0);
return;
}
/*
* Set the number of supported virtual lanes in IBC,
* for flow control packet handling on unsupported VLs
*/
val = qib_read_kreg_port(ppd, krp_ibsdtestiftx);
val &= ~SYM_MASK(IB_SDTEST_IF_TX_0, VL_CAP);
val |= (u64)(ppd->vls_supported - 1) <<
SYM_LSB(IB_SDTEST_IF_TX_0, VL_CAP);
qib_write_kreg_port(ppd, krp_ibsdtestiftx, val);
qib_write_kreg_port(ppd, krp_rcvbthqp, QIB_KD_QP);
/* enable tx header checking */
qib_write_kreg_port(ppd, krp_sendcheckcontrol, IBA7322_SENDCHK_PKEY |
IBA7322_SENDCHK_BTHQP | IBA7322_SENDCHK_SLID |
IBA7322_SENDCHK_RAW_IPV6 | IBA7322_SENDCHK_MINSZ);
qib_write_kreg_port(ppd, krp_ncmodectrl,
SYM_MASK(IBNCModeCtrl_0, ScrambleCapLocal));
/*
* Unconditionally clear the bufmask bits. If SDMA is
* enabled, we'll set them appropriately later.
*/
qib_write_kreg_port(ppd, krp_senddmabufmask0, 0);
qib_write_kreg_port(ppd, krp_senddmabufmask1, 0);
qib_write_kreg_port(ppd, krp_senddmabufmask2, 0);
if (ppd->dd->cspec->r1)
ppd->p_sendctrl |= SYM_MASK(SendCtrl_0, ForceCreditUpToDate);
}
/*
* Write the initialization per-device registers that need to be done at
* driver load and after reset completes (i.e., that aren't done as part
* of other init procedures called from qib_init.c). Also write per-port
* registers that are affected by overall device config, such as QP mapping
* Some of these should be redundant on reset, but play safe.
*/
static void write_7322_initregs(struct qib_devdata *dd)
{
struct qib_pportdata *ppd;
int i, pidx;
u64 val;
/* Set Multicast QPs received by port 2 to map to context one. */
qib_write_kreg(dd, KREG_IDX(RcvQPMulticastContext_1), 1);
for (pidx = 0; pidx < dd->num_pports; ++pidx) {
unsigned n, regno;
unsigned long flags;
if (dd->n_krcv_queues < 2 ||
!dd->pport[pidx].link_speed_supported)
continue;
ppd = &dd->pport[pidx];
/* be paranoid against later code motion, etc. */
spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags);
ppd->p_rcvctrl |= SYM_MASK(RcvCtrl_0, RcvQPMapEnable);
spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags);
/* Initialize QP to context mapping */
regno = krp_rcvqpmaptable;
val = 0;
if (dd->num_pports > 1)
n = dd->first_user_ctxt / dd->num_pports;
else
n = dd->first_user_ctxt - 1;
for (i = 0; i < 32; ) {
unsigned ctxt;
if (dd->num_pports > 1)
ctxt = (i % n) * dd->num_pports + pidx;
else if (i % n)
ctxt = (i % n) + 1;
else
ctxt = ppd->hw_pidx;
val |= ctxt << (5 * (i % 6));
i++;
if (i % 6 == 0) {
qib_write_kreg_port(ppd, regno, val);
val = 0;
regno++;
}
}
qib_write_kreg_port(ppd, regno, val);
}
/*
* Setup up interrupt mitigation for kernel contexts, but
* not user contexts (user contexts use interrupts when
* stalled waiting for any packet, so want those interrupts
* right away).
*/
for (i = 0; i < dd->first_user_ctxt; i++) {
dd->cspec->rcvavail_timeout[i] = rcv_int_timeout;
qib_write_kreg(dd, kr_rcvavailtimeout + i, rcv_int_timeout);
}
/*
* Initialize as (disabled) rcvflow tables. Application code
* will setup each flow as it uses the flow.
* Doesn't clear any of the error bits that might be set.
*/
val = TIDFLOW_ERRBITS; /* these are W1C */
for (i = 0; i < dd->cfgctxts; i++) {
int flow;
for (flow = 0; flow < NUM_TIDFLOWS_CTXT; flow++)
qib_write_ureg(dd, ur_rcvflowtable+flow, val, i);
}
/*
* dual cards init to dual port recovery, single port cards to
* the one port. Dual port cards may later adjust to 1 port,
* and then back to dual port if both ports are connected
* */
if (dd->num_pports)
setup_7322_link_recovery(dd->pport, dd->num_pports > 1);
}
static int qib_init_7322_variables(struct qib_devdata *dd)
{
struct qib_pportdata *ppd;
unsigned features, pidx, sbufcnt;
int ret, mtu;
u32 sbufs, updthresh;
resource_size_t vl15off;
/* pport structs are contiguous, allocated after devdata */
ppd = (struct qib_pportdata *)(dd + 1);
dd->pport = ppd;
ppd[0].dd = dd;
ppd[1].dd = dd;
dd->cspec = (struct qib_chip_specific *)(ppd + 2);
ppd[0].cpspec = (struct qib_chippport_specific *)(dd->cspec + 1);
ppd[1].cpspec = &ppd[0].cpspec[1];
ppd[0].cpspec->ppd = &ppd[0]; /* for autoneg_7322_work() */
ppd[1].cpspec->ppd = &ppd[1]; /* for autoneg_7322_work() */
spin_lock_init(&dd->cspec->rcvmod_lock);
spin_lock_init(&dd->cspec->gpio_lock);
/* we haven't yet set QIB_PRESENT, so use read directly */
dd->revision = readq(&dd->kregbase[kr_revision]);
if ((dd->revision & 0xffffffffU) == 0xffffffffU) {
qib_dev_err(dd,
"Revision register read failure, giving up initialization\n");
ret = -ENODEV;
goto bail;
}
dd->flags |= QIB_PRESENT; /* now register routines work */
dd->majrev = (u8) SYM_FIELD(dd->revision, Revision_R, ChipRevMajor);
dd->minrev = (u8) SYM_FIELD(dd->revision, Revision_R, ChipRevMinor);
dd->cspec->r1 = dd->minrev == 1;
get_7322_chip_params(dd);
features = qib_7322_boardname(dd);
/* now that piobcnt2k and 4k set, we can allocate these */
sbufcnt = dd->piobcnt2k + dd->piobcnt4k +
NUM_VL15_BUFS + BITS_PER_LONG - 1;
sbufcnt /= BITS_PER_LONG;
dd->cspec->sendchkenable = kmalloc(sbufcnt *
sizeof(*dd->cspec->sendchkenable), GFP_KERNEL);
dd->cspec->sendgrhchk = kmalloc(sbufcnt *
sizeof(*dd->cspec->sendgrhchk), GFP_KERNEL);
dd->cspec->sendibchk = kmalloc(sbufcnt *
sizeof(*dd->cspec->sendibchk), GFP_KERNEL);
if (!dd->cspec->sendchkenable || !dd->cspec->sendgrhchk ||
!dd->cspec->sendibchk) {
qib_dev_err(dd, "Failed allocation for hdrchk bitmaps\n");
ret = -ENOMEM;
goto bail;
}
ppd = dd->pport;
/*
* GPIO bits for TWSI data and clock,
* used for serial EEPROM.
*/
dd->gpio_sda_num = _QIB_GPIO_SDA_NUM;
dd->gpio_scl_num = _QIB_GPIO_SCL_NUM;
dd->twsi_eeprom_dev = QIB_TWSI_EEPROM_DEV;
dd->flags |= QIB_HAS_INTX | QIB_HAS_LINK_LATENCY |
QIB_NODMA_RTAIL | QIB_HAS_VLSUPP | QIB_HAS_HDRSUPP |
QIB_HAS_THRESH_UPDATE |
(sdma_idle_cnt ? QIB_HAS_SDMA_TIMEOUT : 0);
dd->flags |= qib_special_trigger ?
QIB_USE_SPCL_TRIG : QIB_HAS_SEND_DMA;
/*
* Setup initial values. These may change when PAT is enabled, but
* we need these to do initial chip register accesses.
*/
qib_7322_set_baseaddrs(dd);
mtu = ib_mtu_enum_to_int(qib_ibmtu);
if (mtu == -1)
mtu = QIB_DEFAULT_MTU;
dd->cspec->int_enable_mask = QIB_I_BITSEXTANT;
/* all hwerrors become interrupts, unless special purposed */
dd->cspec->hwerrmask = ~0ULL;
/* link_recovery setup causes these errors, so ignore them,
* other than clearing them when they occur */
dd->cspec->hwerrmask &=
~(SYM_MASK(HwErrMask, IBSerdesPClkNotDetectMask_0) |
SYM_MASK(HwErrMask, IBSerdesPClkNotDetectMask_1) |
HWE_MASK(LATriggered));
for (pidx = 0; pidx < NUM_IB_PORTS; ++pidx) {
struct qib_chippport_specific *cp = ppd->cpspec;
ppd->link_speed_supported = features & PORT_SPD_CAP;
features >>= PORT_SPD_CAP_SHIFT;
if (!ppd->link_speed_supported) {
/* single port mode (7340, or configured) */
dd->skip_kctxt_mask |= 1 << pidx;
if (pidx == 0) {
/* Make sure port is disabled. */
qib_write_kreg_port(ppd, krp_rcvctrl, 0);
qib_write_kreg_port(ppd, krp_ibcctrl_a, 0);
ppd[0] = ppd[1];
dd->cspec->hwerrmask &= ~(SYM_MASK(HwErrMask,
IBSerdesPClkNotDetectMask_0)
| SYM_MASK(HwErrMask,
SDmaMemReadErrMask_0));
dd->cspec->int_enable_mask &= ~(
SYM_MASK(IntMask, SDmaCleanupDoneMask_0) |
SYM_MASK(IntMask, SDmaIdleIntMask_0) |
SYM_MASK(IntMask, SDmaProgressIntMask_0) |
SYM_MASK(IntMask, SDmaIntMask_0) |
SYM_MASK(IntMask, ErrIntMask_0) |
SYM_MASK(IntMask, SendDoneIntMask_0));
} else {
/* Make sure port is disabled. */
qib_write_kreg_port(ppd, krp_rcvctrl, 0);
qib_write_kreg_port(ppd, krp_ibcctrl_a, 0);
dd->cspec->hwerrmask &= ~(SYM_MASK(HwErrMask,
IBSerdesPClkNotDetectMask_1)
| SYM_MASK(HwErrMask,
SDmaMemReadErrMask_1));
dd->cspec->int_enable_mask &= ~(
SYM_MASK(IntMask, SDmaCleanupDoneMask_1) |
SYM_MASK(IntMask, SDmaIdleIntMask_1) |
SYM_MASK(IntMask, SDmaProgressIntMask_1) |
SYM_MASK(IntMask, SDmaIntMask_1) |
SYM_MASK(IntMask, ErrIntMask_1) |
SYM_MASK(IntMask, SendDoneIntMask_1));
}
continue;
}
dd->num_pports++;
ret = qib_init_pportdata(ppd, dd, pidx, dd->num_pports);
if (ret) {
dd->num_pports--;
goto bail;
}
ppd->link_width_supported = IB_WIDTH_1X | IB_WIDTH_4X;
ppd->link_width_enabled = IB_WIDTH_4X;
ppd->link_speed_enabled = ppd->link_speed_supported;
/*
* Set the initial values to reasonable default, will be set
* for real when link is up.
*/
ppd->link_width_active = IB_WIDTH_4X;
ppd->link_speed_active = QIB_IB_SDR;
ppd->delay_mult = ib_rate_to_delay[IB_RATE_10_GBPS];
switch (qib_num_cfg_vls) {
case 1:
ppd->vls_supported = IB_VL_VL0;
break;
case 2:
ppd->vls_supported = IB_VL_VL0_1;
break;
default:
qib_devinfo(dd->pcidev,
"Invalid num_vls %u, using 4 VLs\n",
qib_num_cfg_vls);
qib_num_cfg_vls = 4;
/* fall through */
case 4:
ppd->vls_supported = IB_VL_VL0_3;
break;
case 8:
if (mtu <= 2048)
ppd->vls_supported = IB_VL_VL0_7;
else {
qib_devinfo(dd->pcidev,
"Invalid num_vls %u for MTU %d , using 4 VLs\n",
qib_num_cfg_vls, mtu);
ppd->vls_supported = IB_VL_VL0_3;
qib_num_cfg_vls = 4;
}
break;
}
ppd->vls_operational = ppd->vls_supported;
init_waitqueue_head(&cp->autoneg_wait);
INIT_DELAYED_WORK(&cp->autoneg_work,
autoneg_7322_work);
if (ppd->dd->cspec->r1)
INIT_DELAYED_WORK(&cp->ipg_work, ipg_7322_work);
/*
* For Mez and similar cards, no qsfp info, so do
* the "cable info" setup here. Can be overridden
* in adapter-specific routines.
*/
if (!(dd->flags & QIB_HAS_QSFP)) {
if (!IS_QMH(dd) && !IS_QME(dd))
qib_devinfo(dd->pcidev,
"IB%u:%u: Unknown mezzanine card type\n",
dd->unit, ppd->port);
cp->h1_val = IS_QMH(dd) ? H1_FORCE_QMH : H1_FORCE_QME;
/*
* Choose center value as default tx serdes setting
* until changed through module parameter.
*/
ppd->cpspec->no_eep = IS_QMH(dd) ?
TXDDS_TABLE_SZ + 2 : TXDDS_TABLE_SZ + 4;
} else
cp->h1_val = H1_FORCE_VAL;
/* Avoid writes to chip for mini_init */
if (!qib_mini_init)
write_7322_init_portregs(ppd);
init_timer(&cp->chase_timer);
cp->chase_timer.function = reenable_chase;
cp->chase_timer.data = (unsigned long)ppd;
ppd++;
}
dd->rcvhdrentsize = qib_rcvhdrentsize ?
qib_rcvhdrentsize : QIB_RCVHDR_ENTSIZE;
dd->rcvhdrsize = qib_rcvhdrsize ?
qib_rcvhdrsize : QIB_DFLT_RCVHDRSIZE;
dd->rhf_offset = dd->rcvhdrentsize - sizeof(u64) / sizeof(u32);
/* we always allocate at least 2048 bytes for eager buffers */
dd->rcvegrbufsize = max(mtu, 2048);
BUG_ON(!is_power_of_2(dd->rcvegrbufsize));
dd->rcvegrbufsize_shift = ilog2(dd->rcvegrbufsize);
qib_7322_tidtemplate(dd);
/*
* We can request a receive interrupt for 1 or
* more packets from current offset.
*/
dd->rhdrhead_intr_off =
(u64) rcv_int_count << IBA7322_HDRHEAD_PKTINT_SHIFT;
/* setup the stats timer; the add_timer is done at end of init */
init_timer(&dd->stats_timer);
dd->stats_timer.function = qib_get_7322_faststats;
dd->stats_timer.data = (unsigned long) dd;
dd->ureg_align = 0x10000; /* 64KB alignment */
dd->piosize2kmax_dwords = dd->piosize2k >> 2;
qib_7322_config_ctxts(dd);
qib_set_ctxtcnt(dd);
/*
* We do not set WC on the VL15 buffers to avoid
* a rare problem with unaligned writes from
* interrupt-flushed store buffers, so we need
* to map those separately here. We can't solve
* this for the rarely used mtrr case.
*/
ret = init_chip_wc_pat(dd, 0);
if (ret)
goto bail;
/* vl15 buffers start just after the 4k buffers */
vl15off = dd->physaddr + (dd->piobufbase >> 32) +
dd->piobcnt4k * dd->align4k;
dd->piovl15base = ioremap_nocache(vl15off,
NUM_VL15_BUFS * dd->align4k);
if (!dd->piovl15base) {
ret = -ENOMEM;
goto bail;
}
qib_7322_set_baseaddrs(dd); /* set chip access pointers now */
ret = 0;
if (qib_mini_init)
goto bail;
if (!dd->num_pports) {
qib_dev_err(dd, "No ports enabled, giving up initialization\n");
goto bail; /* no error, so can still figure out why err */
}
write_7322_initregs(dd);
ret = qib_create_ctxts(dd);
init_7322_cntrnames(dd);
updthresh = 8U; /* update threshold */
/* use all of 4KB buffers for the kernel SDMA, zero if !SDMA.
* reserve the update threshold amount for other kernel use, such
* as sending SMI, MAD, and ACKs, or 3, whichever is greater,
* unless we aren't enabling SDMA, in which case we want to use
* all the 4k bufs for the kernel.
* if this was less than the update threshold, we could wait
* a long time for an update. Coded this way because we
* sometimes change the update threshold for various reasons,
* and we want this to remain robust.
*/
if (dd->flags & QIB_HAS_SEND_DMA) {
dd->cspec->sdmabufcnt = dd->piobcnt4k;
sbufs = updthresh > 3 ? updthresh : 3;
} else {
dd->cspec->sdmabufcnt = 0;
sbufs = dd->piobcnt4k;
}
dd->cspec->lastbuf_for_pio = dd->piobcnt2k + dd->piobcnt4k -
dd->cspec->sdmabufcnt;
dd->lastctxt_piobuf = dd->cspec->lastbuf_for_pio - sbufs;
dd->cspec->lastbuf_for_pio--; /* range is <= , not < */
dd->last_pio = dd->cspec->lastbuf_for_pio;
dd->pbufsctxt = (dd->cfgctxts > dd->first_user_ctxt) ?
dd->lastctxt_piobuf / (dd->cfgctxts - dd->first_user_ctxt) : 0;
/*
* If we have 16 user contexts, we will have 7 sbufs
* per context, so reduce the update threshold to match. We
* want to update before we actually run out, at low pbufs/ctxt
* so give ourselves some margin.
*/
if (dd->pbufsctxt >= 2 && dd->pbufsctxt - 2 < updthresh)
updthresh = dd->pbufsctxt - 2;
dd->cspec->updthresh_dflt = updthresh;
dd->cspec->updthresh = updthresh;
/* before full enable, no interrupts, no locking needed */
dd->sendctrl |= ((updthresh & SYM_RMASK(SendCtrl, AvailUpdThld))
<< SYM_LSB(SendCtrl, AvailUpdThld)) |
SYM_MASK(SendCtrl, SendBufAvailPad64Byte);
dd->psxmitwait_supported = 1;
dd->psxmitwait_check_rate = QIB_7322_PSXMITWAIT_CHECK_RATE;
bail:
if (!dd->ctxtcnt)
dd->ctxtcnt = 1; /* for other initialization code */
return ret;
}
static u32 __iomem *qib_7322_getsendbuf(struct qib_pportdata *ppd, u64 pbc,
u32 *pbufnum)
{
u32 first, last, plen = pbc & QIB_PBC_LENGTH_MASK;
struct qib_devdata *dd = ppd->dd;
/* last is same for 2k and 4k, because we use 4k if all 2k busy */
if (pbc & PBC_7322_VL15_SEND) {
first = dd->piobcnt2k + dd->piobcnt4k + ppd->hw_pidx;
last = first;
} else {
if ((plen + 1) > dd->piosize2kmax_dwords)
first = dd->piobcnt2k;
else
first = 0;
last = dd->cspec->lastbuf_for_pio;
}
return qib_getsendbuf_range(dd, pbufnum, first, last);
}
static void qib_set_cntr_7322_sample(struct qib_pportdata *ppd, u32 intv,
u32 start)
{
qib_write_kreg_port(ppd, krp_psinterval, intv);
qib_write_kreg_port(ppd, krp_psstart, start);
}
/*
* Must be called with sdma_lock held, or before init finished.
*/
static void qib_sdma_set_7322_desc_cnt(struct qib_pportdata *ppd, unsigned cnt)
{
qib_write_kreg_port(ppd, krp_senddmadesccnt, cnt);
}
/*
* sdma_lock should be acquired before calling this routine
*/
static void dump_sdma_7322_state(struct qib_pportdata *ppd)
{
u64 reg, reg1, reg2;
reg = qib_read_kreg_port(ppd, krp_senddmastatus);
qib_dev_porterr(ppd->dd, ppd->port,
"SDMA senddmastatus: 0x%016llx\n", reg);
reg = qib_read_kreg_port(ppd, krp_sendctrl);
qib_dev_porterr(ppd->dd, ppd->port,
"SDMA sendctrl: 0x%016llx\n", reg);
reg = qib_read_kreg_port(ppd, krp_senddmabase);
qib_dev_porterr(ppd->dd, ppd->port,
"SDMA senddmabase: 0x%016llx\n", reg);
reg = qib_read_kreg_port(ppd, krp_senddmabufmask0);
reg1 = qib_read_kreg_port(ppd, krp_senddmabufmask1);
reg2 = qib_read_kreg_port(ppd, krp_senddmabufmask2);
qib_dev_porterr(ppd->dd, ppd->port,
"SDMA senddmabufmask 0:%llx 1:%llx 2:%llx\n",
reg, reg1, reg2);
/* get bufuse bits, clear them, and print them again if non-zero */
reg = qib_read_kreg_port(ppd, krp_senddmabuf_use0);
qib_write_kreg_port(ppd, krp_senddmabuf_use0, reg);
reg1 = qib_read_kreg_port(ppd, krp_senddmabuf_use1);
qib_write_kreg_port(ppd, krp_senddmabuf_use0, reg1);
reg2 = qib_read_kreg_port(ppd, krp_senddmabuf_use2);
qib_write_kreg_port(ppd, krp_senddmabuf_use0, reg2);
/* 0 and 1 should always be zero, so print as short form */
qib_dev_porterr(ppd->dd, ppd->port,
"SDMA current senddmabuf_use 0:%llx 1:%llx 2:%llx\n",
reg, reg1, reg2);
reg = qib_read_kreg_port(ppd, krp_senddmabuf_use0);
reg1 = qib_read_kreg_port(ppd, krp_senddmabuf_use1);
reg2 = qib_read_kreg_port(ppd, krp_senddmabuf_use2);
/* 0 and 1 should always be zero, so print as short form */
qib_dev_porterr(ppd->dd, ppd->port,
"SDMA cleared senddmabuf_use 0:%llx 1:%llx 2:%llx\n",
reg, reg1, reg2);
reg = qib_read_kreg_port(ppd, krp_senddmatail);
qib_dev_porterr(ppd->dd, ppd->port,
"SDMA senddmatail: 0x%016llx\n", reg);
reg = qib_read_kreg_port(ppd, krp_senddmahead);
qib_dev_porterr(ppd->dd, ppd->port,
"SDMA senddmahead: 0x%016llx\n", reg);
reg = qib_read_kreg_port(ppd, krp_senddmaheadaddr);
qib_dev_porterr(ppd->dd, ppd->port,
"SDMA senddmaheadaddr: 0x%016llx\n", reg);
reg = qib_read_kreg_port(ppd, krp_senddmalengen);
qib_dev_porterr(ppd->dd, ppd->port,
"SDMA senddmalengen: 0x%016llx\n", reg);
reg = qib_read_kreg_port(ppd, krp_senddmadesccnt);
qib_dev_porterr(ppd->dd, ppd->port,
"SDMA senddmadesccnt: 0x%016llx\n", reg);
reg = qib_read_kreg_port(ppd, krp_senddmaidlecnt);
qib_dev_porterr(ppd->dd, ppd->port,
"SDMA senddmaidlecnt: 0x%016llx\n", reg);
reg = qib_read_kreg_port(ppd, krp_senddmaprioritythld);
qib_dev_porterr(ppd->dd, ppd->port,
"SDMA senddmapriorityhld: 0x%016llx\n", reg);
reg = qib_read_kreg_port(ppd, krp_senddmareloadcnt);
qib_dev_porterr(ppd->dd, ppd->port,
"SDMA senddmareloadcnt: 0x%016llx\n", reg);
dump_sdma_state(ppd);
}
static struct sdma_set_state_action sdma_7322_action_table[] = {
[qib_sdma_state_s00_hw_down] = {
.go_s99_running_tofalse = 1,
.op_enable = 0,
.op_intenable = 0,
.op_halt = 0,
.op_drain = 0,
},
[qib_sdma_state_s10_hw_start_up_wait] = {
.op_enable = 0,
.op_intenable = 1,
.op_halt = 1,
.op_drain = 0,
},
[qib_sdma_state_s20_idle] = {
.op_enable = 1,
.op_intenable = 1,
.op_halt = 1,
.op_drain = 0,
},
[qib_sdma_state_s30_sw_clean_up_wait] = {
.op_enable = 0,
.op_intenable = 1,
.op_halt = 1,
.op_drain = 0,
},
[qib_sdma_state_s40_hw_clean_up_wait] = {
.op_enable = 1,
.op_intenable = 1,
.op_halt = 1,
.op_drain = 0,
},
[qib_sdma_state_s50_hw_halt_wait] = {
.op_enable = 1,
.op_intenable = 1,
.op_halt = 1,
.op_drain = 1,
},
[qib_sdma_state_s99_running] = {
.op_enable = 1,
.op_intenable = 1,
.op_halt = 0,
.op_drain = 0,
.go_s99_running_totrue = 1,
},
};
static void qib_7322_sdma_init_early(struct qib_pportdata *ppd)
{
ppd->sdma_state.set_state_action = sdma_7322_action_table;
}
static int init_sdma_7322_regs(struct qib_pportdata *ppd)
{
struct qib_devdata *dd = ppd->dd;
unsigned lastbuf, erstbuf;
u64 senddmabufmask[3] = { 0 };
int n, ret = 0;
qib_write_kreg_port(ppd, krp_senddmabase, ppd->sdma_descq_phys);
qib_sdma_7322_setlengen(ppd);
qib_sdma_update_7322_tail(ppd, 0); /* Set SendDmaTail */
qib_write_kreg_port(ppd, krp_senddmareloadcnt, sdma_idle_cnt);
qib_write_kreg_port(ppd, krp_senddmadesccnt, 0);
qib_write_kreg_port(ppd, krp_senddmaheadaddr, ppd->sdma_head_phys);
if (dd->num_pports)
n = dd->cspec->sdmabufcnt / dd->num_pports; /* no remainder */
else
n = dd->cspec->sdmabufcnt; /* failsafe for init */
erstbuf = (dd->piobcnt2k + dd->piobcnt4k) -
((dd->num_pports == 1 || ppd->port == 2) ? n :
dd->cspec->sdmabufcnt);
lastbuf = erstbuf + n;
ppd->sdma_state.first_sendbuf = erstbuf;
ppd->sdma_state.last_sendbuf = lastbuf;
for (; erstbuf < lastbuf; ++erstbuf) {
unsigned word = erstbuf / BITS_PER_LONG;
unsigned bit = erstbuf & (BITS_PER_LONG - 1);
BUG_ON(word >= 3);
senddmabufmask[word] |= 1ULL << bit;
}
qib_write_kreg_port(ppd, krp_senddmabufmask0, senddmabufmask[0]);
qib_write_kreg_port(ppd, krp_senddmabufmask1, senddmabufmask[1]);
qib_write_kreg_port(ppd, krp_senddmabufmask2, senddmabufmask[2]);
return ret;
}
/* sdma_lock must be held */
static u16 qib_sdma_7322_gethead(struct qib_pportdata *ppd)
{
struct qib_devdata *dd = ppd->dd;
int sane;
int use_dmahead;
u16 swhead;
u16 swtail;
u16 cnt;
u16 hwhead;
use_dmahead = __qib_sdma_running(ppd) &&
(dd->flags & QIB_HAS_SDMA_TIMEOUT);
retry:
hwhead = use_dmahead ?
(u16) le64_to_cpu(*ppd->sdma_head_dma) :
(u16) qib_read_kreg_port(ppd, krp_senddmahead);
swhead = ppd->sdma_descq_head;
swtail = ppd->sdma_descq_tail;
cnt = ppd->sdma_descq_cnt;
if (swhead < swtail)
/* not wrapped */
sane = (hwhead >= swhead) & (hwhead <= swtail);
else if (swhead > swtail)
/* wrapped around */
sane = ((hwhead >= swhead) && (hwhead < cnt)) ||
(hwhead <= swtail);
else
/* empty */
sane = (hwhead == swhead);
if (unlikely(!sane)) {
if (use_dmahead) {
/* try one more time, directly from the register */
use_dmahead = 0;
goto retry;
}
/* proceed as if no progress */
hwhead = swhead;
}
return hwhead;
}
static int qib_sdma_7322_busy(struct qib_pportdata *ppd)
{
u64 hwstatus = qib_read_kreg_port(ppd, krp_senddmastatus);
return (hwstatus & SYM_MASK(SendDmaStatus_0, ScoreBoardDrainInProg)) ||
(hwstatus & SYM_MASK(SendDmaStatus_0, HaltInProg)) ||
!(hwstatus & SYM_MASK(SendDmaStatus_0, InternalSDmaHalt)) ||
!(hwstatus & SYM_MASK(SendDmaStatus_0, ScbEmpty));
}
/*
* Compute the amount of delay before sending the next packet if the
* port's send rate differs from the static rate set for the QP.
* The delay affects the next packet and the amount of the delay is
* based on the length of the this packet.
*/
static u32 qib_7322_setpbc_control(struct qib_pportdata *ppd, u32 plen,
u8 srate, u8 vl)
{
u8 snd_mult = ppd->delay_mult;
u8 rcv_mult = ib_rate_to_delay[srate];
u32 ret;
ret = rcv_mult > snd_mult ? ((plen + 1) >> 1) * snd_mult : 0;
/* Indicate VL15, else set the VL in the control word */
if (vl == 15)
ret |= PBC_7322_VL15_SEND_CTRL;
else
ret |= vl << PBC_VL_NUM_LSB;
ret |= ((u32)(ppd->hw_pidx)) << PBC_PORT_SEL_LSB;
return ret;
}
/*
* Enable the per-port VL15 send buffers for use.
* They follow the rest of the buffers, without a config parameter.
* This was in initregs, but that is done before the shadow
* is set up, and this has to be done after the shadow is
* set up.
*/
static void qib_7322_initvl15_bufs(struct qib_devdata *dd)
{
unsigned vl15bufs;
vl15bufs = dd->piobcnt2k + dd->piobcnt4k;
qib_chg_pioavailkernel(dd, vl15bufs, NUM_VL15_BUFS,
TXCHK_CHG_TYPE_KERN, NULL);
}
static void qib_7322_init_ctxt(struct qib_ctxtdata *rcd)
{
if (rcd->ctxt < NUM_IB_PORTS) {
if (rcd->dd->num_pports > 1) {
rcd->rcvegrcnt = KCTXT0_EGRCNT / 2;
rcd->rcvegr_tid_base = rcd->ctxt ? rcd->rcvegrcnt : 0;
} else {
rcd->rcvegrcnt = KCTXT0_EGRCNT;
rcd->rcvegr_tid_base = 0;
}
} else {
rcd->rcvegrcnt = rcd->dd->cspec->rcvegrcnt;
rcd->rcvegr_tid_base = KCTXT0_EGRCNT +
(rcd->ctxt - NUM_IB_PORTS) * rcd->rcvegrcnt;
}
}
#define QTXSLEEPS 5000
static void qib_7322_txchk_change(struct qib_devdata *dd, u32 start,
u32 len, u32 which, struct qib_ctxtdata *rcd)
{
int i;
const int last = start + len - 1;
const int lastr = last / BITS_PER_LONG;
u32 sleeps = 0;
int wait = rcd != NULL;
unsigned long flags;
while (wait) {
unsigned long shadow;
int cstart, previ = -1;
/*
* when flipping from kernel to user, we can't change
* the checking type if the buffer is allocated to the
* driver. It's OK the other direction, because it's
* from close, and we have just disarm'ed all the
* buffers. All the kernel to kernel changes are also
* OK.
*/
for (cstart = start; cstart <= last; cstart++) {
i = ((2 * cstart) + QLOGIC_IB_SENDPIOAVAIL_BUSY_SHIFT)
/ BITS_PER_LONG;
if (i != previ) {
shadow = (unsigned long)
le64_to_cpu(dd->pioavailregs_dma[i]);
previ = i;
}
if (test_bit(((2 * cstart) +
QLOGIC_IB_SENDPIOAVAIL_BUSY_SHIFT)
% BITS_PER_LONG, &shadow))
break;
}
if (cstart > last)
break;
if (sleeps == QTXSLEEPS)
break;
/* make sure we see an updated copy next time around */
sendctrl_7322_mod(dd->pport, QIB_SENDCTRL_AVAIL_BLIP);
sleeps++;
msleep(20);
}
switch (which) {
case TXCHK_CHG_TYPE_DIS1:
/*
* disable checking on a range; used by diags; just
* one buffer, but still written generically
*/
for (i = start; i <= last; i++)
clear_bit(i, dd->cspec->sendchkenable);
break;
case TXCHK_CHG_TYPE_ENAB1:
/*
* (re)enable checking on a range; used by diags; just
* one buffer, but still written generically; read
* scratch to be sure buffer actually triggered, not
* just flushed from processor.
*/
qib_read_kreg32(dd, kr_scratch);
for (i = start; i <= last; i++)
set_bit(i, dd->cspec->sendchkenable);
break;
case TXCHK_CHG_TYPE_KERN:
/* usable by kernel */
for (i = start; i <= last; i++) {
set_bit(i, dd->cspec->sendibchk);
clear_bit(i, dd->cspec->sendgrhchk);
}
spin_lock_irqsave(&dd->uctxt_lock, flags);
/* see if we need to raise avail update threshold */
for (i = dd->first_user_ctxt;
dd->cspec->updthresh != dd->cspec->updthresh_dflt
&& i < dd->cfgctxts; i++)
if (dd->rcd[i] && dd->rcd[i]->subctxt_cnt &&
((dd->rcd[i]->piocnt / dd->rcd[i]->subctxt_cnt) - 1)
< dd->cspec->updthresh_dflt)
break;
spin_unlock_irqrestore(&dd->uctxt_lock, flags);
if (i == dd->cfgctxts) {
spin_lock_irqsave(&dd->sendctrl_lock, flags);
dd->cspec->updthresh = dd->cspec->updthresh_dflt;
dd->sendctrl &= ~SYM_MASK(SendCtrl, AvailUpdThld);
dd->sendctrl |= (dd->cspec->updthresh &
SYM_RMASK(SendCtrl, AvailUpdThld)) <<
SYM_LSB(SendCtrl, AvailUpdThld);
spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
sendctrl_7322_mod(dd->pport, QIB_SENDCTRL_AVAIL_BLIP);
}
break;
case TXCHK_CHG_TYPE_USER:
/* for user process */
for (i = start; i <= last; i++) {
clear_bit(i, dd->cspec->sendibchk);
set_bit(i, dd->cspec->sendgrhchk);
}
spin_lock_irqsave(&dd->sendctrl_lock, flags);
if (rcd && rcd->subctxt_cnt && ((rcd->piocnt
/ rcd->subctxt_cnt) - 1) < dd->cspec->updthresh) {
dd->cspec->updthresh = (rcd->piocnt /
rcd->subctxt_cnt) - 1;
dd->sendctrl &= ~SYM_MASK(SendCtrl, AvailUpdThld);
dd->sendctrl |= (dd->cspec->updthresh &
SYM_RMASK(SendCtrl, AvailUpdThld))
<< SYM_LSB(SendCtrl, AvailUpdThld);
spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
sendctrl_7322_mod(dd->pport, QIB_SENDCTRL_AVAIL_BLIP);
} else
spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
break;
default:
break;
}
for (i = start / BITS_PER_LONG; which >= 2 && i <= lastr; ++i)
qib_write_kreg(dd, kr_sendcheckmask + i,
dd->cspec->sendchkenable[i]);
for (i = start / BITS_PER_LONG; which < 2 && i <= lastr; ++i) {
qib_write_kreg(dd, kr_sendgrhcheckmask + i,
dd->cspec->sendgrhchk[i]);
qib_write_kreg(dd, kr_sendibpktmask + i,
dd->cspec->sendibchk[i]);
}
/*
* Be sure whatever we did was seen by the chip and acted upon,
* before we return. Mostly important for which >= 2.
*/
qib_read_kreg32(dd, kr_scratch);
}
/* useful for trigger analyzers, etc. */
static void writescratch(struct qib_devdata *dd, u32 val)
{
qib_write_kreg(dd, kr_scratch, val);
}
/* Dummy for now, use chip regs soon */
static int qib_7322_tempsense_rd(struct qib_devdata *dd, int regnum)
{
return -ENXIO;
}
/**
* qib_init_iba7322_funcs - set up the chip-specific function pointers
* @dev: the pci_dev for qlogic_ib device
* @ent: pci_device_id struct for this dev
*
* Also allocates, inits, and returns the devdata struct for this
* device instance
*
* This is global, and is called directly at init to set up the
* chip-specific function pointers for later use.
*/
struct qib_devdata *qib_init_iba7322_funcs(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
struct qib_devdata *dd;
int ret, i;
u32 tabsize, actual_cnt = 0;
dd = qib_alloc_devdata(pdev,
NUM_IB_PORTS * sizeof(struct qib_pportdata) +
sizeof(struct qib_chip_specific) +
NUM_IB_PORTS * sizeof(struct qib_chippport_specific));
if (IS_ERR(dd))
goto bail;
dd->f_bringup_serdes = qib_7322_bringup_serdes;
dd->f_cleanup = qib_setup_7322_cleanup;
dd->f_clear_tids = qib_7322_clear_tids;
dd->f_free_irq = qib_7322_free_irq;
dd->f_get_base_info = qib_7322_get_base_info;
dd->f_get_msgheader = qib_7322_get_msgheader;
dd->f_getsendbuf = qib_7322_getsendbuf;
dd->f_gpio_mod = gpio_7322_mod;
dd->f_eeprom_wen = qib_7322_eeprom_wen;
dd->f_hdrqempty = qib_7322_hdrqempty;
dd->f_ib_updown = qib_7322_ib_updown;
dd->f_init_ctxt = qib_7322_init_ctxt;
dd->f_initvl15_bufs = qib_7322_initvl15_bufs;
dd->f_intr_fallback = qib_7322_intr_fallback;
dd->f_late_initreg = qib_late_7322_initreg;
dd->f_setpbc_control = qib_7322_setpbc_control;
dd->f_portcntr = qib_portcntr_7322;
dd->f_put_tid = qib_7322_put_tid;
dd->f_quiet_serdes = qib_7322_mini_quiet_serdes;
dd->f_rcvctrl = rcvctrl_7322_mod;
dd->f_read_cntrs = qib_read_7322cntrs;
dd->f_read_portcntrs = qib_read_7322portcntrs;
dd->f_reset = qib_do_7322_reset;
dd->f_init_sdma_regs = init_sdma_7322_regs;
dd->f_sdma_busy = qib_sdma_7322_busy;
dd->f_sdma_gethead = qib_sdma_7322_gethead;
dd->f_sdma_sendctrl = qib_7322_sdma_sendctrl;
dd->f_sdma_set_desc_cnt = qib_sdma_set_7322_desc_cnt;
dd->f_sdma_update_tail = qib_sdma_update_7322_tail;
dd->f_sendctrl = sendctrl_7322_mod;
dd->f_set_armlaunch = qib_set_7322_armlaunch;
dd->f_set_cntr_sample = qib_set_cntr_7322_sample;
dd->f_iblink_state = qib_7322_iblink_state;
dd->f_ibphys_portstate = qib_7322_phys_portstate;
dd->f_get_ib_cfg = qib_7322_get_ib_cfg;
dd->f_set_ib_cfg = qib_7322_set_ib_cfg;
dd->f_set_ib_loopback = qib_7322_set_loopback;
dd->f_get_ib_table = qib_7322_get_ib_table;
dd->f_set_ib_table = qib_7322_set_ib_table;
dd->f_set_intr_state = qib_7322_set_intr_state;
dd->f_setextled = qib_setup_7322_setextled;
dd->f_txchk_change = qib_7322_txchk_change;
dd->f_update_usrhead = qib_update_7322_usrhead;
dd->f_wantpiobuf_intr = qib_wantpiobuf_7322_intr;
dd->f_xgxs_reset = qib_7322_mini_pcs_reset;
dd->f_sdma_hw_clean_up = qib_7322_sdma_hw_clean_up;
dd->f_sdma_hw_start_up = qib_7322_sdma_hw_start_up;
dd->f_sdma_init_early = qib_7322_sdma_init_early;
dd->f_writescratch = writescratch;
dd->f_tempsense_rd = qib_7322_tempsense_rd;
#ifdef CONFIG_INFINIBAND_QIB_DCA
dd->f_notify_dca = qib_7322_notify_dca;
#endif
/*
* Do remaining PCIe setup and save PCIe values in dd.
* Any error printing is already done by the init code.
* On return, we have the chip mapped, but chip registers
* are not set up until start of qib_init_7322_variables.
*/
ret = qib_pcie_ddinit(dd, pdev, ent);
if (ret < 0)
goto bail_free;
/* initialize chip-specific variables */
ret = qib_init_7322_variables(dd);
if (ret)
goto bail_cleanup;
if (qib_mini_init || !dd->num_pports)
goto bail;
/*
* Determine number of vectors we want; depends on port count
* and number of configured kernel receive queues actually used.
* Should also depend on whether sdma is enabled or not, but
* that's such a rare testing case it's not worth worrying about.
*/
tabsize = dd->first_user_ctxt + ARRAY_SIZE(irq_table);
for (i = 0; i < tabsize; i++)
if ((i < ARRAY_SIZE(irq_table) &&
irq_table[i].port <= dd->num_pports) ||
(i >= ARRAY_SIZE(irq_table) &&
dd->rcd[i - ARRAY_SIZE(irq_table)]))
actual_cnt++;
/* reduce by ctxt's < 2 */
if (qib_krcvq01_no_msi)
actual_cnt -= dd->num_pports;
tabsize = actual_cnt;
dd->cspec->msix_entries = kzalloc(tabsize *
sizeof(struct qib_msix_entry), GFP_KERNEL);
if (!dd->cspec->msix_entries) {
qib_dev_err(dd, "No memory for MSIx table\n");
tabsize = 0;
}
for (i = 0; i < tabsize; i++)
dd->cspec->msix_entries[i].msix.entry = i;
if (qib_pcie_params(dd, 8, &tabsize, dd->cspec->msix_entries))
qib_dev_err(dd,
"Failed to setup PCIe or interrupts; continuing anyway\n");
/* may be less than we wanted, if not enough available */
dd->cspec->num_msix_entries = tabsize;
/* setup interrupt handler */
qib_setup_7322_interrupt(dd, 1);
/* clear diagctrl register, in case diags were running and crashed */
qib_write_kreg(dd, kr_hwdiagctrl, 0);
#ifdef CONFIG_INFINIBAND_QIB_DCA
if (!dca_add_requester(&pdev->dev)) {
qib_devinfo(dd->pcidev, "DCA enabled\n");
dd->flags |= QIB_DCA_ENABLED;
qib_setup_dca(dd);
}
#endif
goto bail;
bail_cleanup:
qib_pcie_ddcleanup(dd);
bail_free:
qib_free_devdata(dd);
dd = ERR_PTR(ret);
bail:
return dd;
}
/*
* Set the table entry at the specified index from the table specifed.
* There are 3 * TXDDS_TABLE_SZ entries in all per port, with the first
* TXDDS_TABLE_SZ for SDR, the next for DDR, and the last for QDR.
* 'idx' below addresses the correct entry, while its 4 LSBs select the
* corresponding entry (one of TXDDS_TABLE_SZ) from the selected table.
*/
#define DDS_ENT_AMP_LSB 14
#define DDS_ENT_MAIN_LSB 9
#define DDS_ENT_POST_LSB 5
#define DDS_ENT_PRE_XTRA_LSB 3
#define DDS_ENT_PRE_LSB 0
/*
* Set one entry in the TxDDS table for spec'd port
* ridx picks one of the entries, while tp points
* to the appropriate table entry.
*/
static void set_txdds(struct qib_pportdata *ppd, int ridx,
const struct txdds_ent *tp)
{
struct qib_devdata *dd = ppd->dd;
u32 pack_ent;
int regidx;
/* Get correct offset in chip-space, and in source table */
regidx = KREG_IBPORT_IDX(IBSD_DDS_MAP_TABLE) + ridx;
/*
* We do not use qib_write_kreg_port() because it was intended
* only for registers in the lower "port specific" pages.
* So do index calculation by hand.
*/
if (ppd->hw_pidx)
regidx += (dd->palign / sizeof(u64));
pack_ent = tp->amp << DDS_ENT_AMP_LSB;
pack_ent |= tp->main << DDS_ENT_MAIN_LSB;
pack_ent |= tp->pre << DDS_ENT_PRE_LSB;
pack_ent |= tp->post << DDS_ENT_POST_LSB;
qib_write_kreg(dd, regidx, pack_ent);
/* Prevent back-to-back writes by hitting scratch */
qib_write_kreg(ppd->dd, kr_scratch, 0);
}
static const struct vendor_txdds_ent vendor_txdds[] = {
{ /* Amphenol 1m 30awg NoEq */
{ 0x41, 0x50, 0x48 }, "584470002 ",
{ 10, 0, 0, 5 }, { 10, 0, 0, 9 }, { 7, 1, 0, 13 },
},
{ /* Amphenol 3m 28awg NoEq */
{ 0x41, 0x50, 0x48 }, "584470004 ",
{ 0, 0, 0, 8 }, { 0, 0, 0, 11 }, { 0, 1, 7, 15 },
},
{ /* Finisar 3m OM2 Optical */
{ 0x00, 0x90, 0x65 }, "FCBG410QB1C03-QL",
{ 0, 0, 0, 3 }, { 0, 0, 0, 4 }, { 0, 0, 0, 13 },
},
{ /* Finisar 30m OM2 Optical */
{ 0x00, 0x90, 0x65 }, "FCBG410QB1C30-QL",
{ 0, 0, 0, 1 }, { 0, 0, 0, 5 }, { 0, 0, 0, 11 },
},
{ /* Finisar Default OM2 Optical */
{ 0x00, 0x90, 0x65 }, NULL,
{ 0, 0, 0, 2 }, { 0, 0, 0, 5 }, { 0, 0, 0, 12 },
},
{ /* Gore 1m 30awg NoEq */
{ 0x00, 0x21, 0x77 }, "QSN3300-1 ",
{ 0, 0, 0, 6 }, { 0, 0, 0, 9 }, { 0, 1, 0, 15 },
},
{ /* Gore 2m 30awg NoEq */
{ 0x00, 0x21, 0x77 }, "QSN3300-2 ",
{ 0, 0, 0, 8 }, { 0, 0, 0, 10 }, { 0, 1, 7, 15 },
},
{ /* Gore 1m 28awg NoEq */
{ 0x00, 0x21, 0x77 }, "QSN3800-1 ",
{ 0, 0, 0, 6 }, { 0, 0, 0, 8 }, { 0, 1, 0, 15 },
},
{ /* Gore 3m 28awg NoEq */
{ 0x00, 0x21, 0x77 }, "QSN3800-3 ",
{ 0, 0, 0, 9 }, { 0, 0, 0, 13 }, { 0, 1, 7, 15 },
},
{ /* Gore 5m 24awg Eq */
{ 0x00, 0x21, 0x77 }, "QSN7000-5 ",
{ 0, 0, 0, 7 }, { 0, 0, 0, 9 }, { 0, 1, 3, 15 },
},
{ /* Gore 7m 24awg Eq */
{ 0x00, 0x21, 0x77 }, "QSN7000-7 ",
{ 0, 0, 0, 9 }, { 0, 0, 0, 11 }, { 0, 2, 6, 15 },
},
{ /* Gore 5m 26awg Eq */
{ 0x00, 0x21, 0x77 }, "QSN7600-5 ",
{ 0, 0, 0, 8 }, { 0, 0, 0, 11 }, { 0, 1, 9, 13 },
},
{ /* Gore 7m 26awg Eq */
{ 0x00, 0x21, 0x77 }, "QSN7600-7 ",
{ 0, 0, 0, 8 }, { 0, 0, 0, 11 }, { 10, 1, 8, 15 },
},
{ /* Intersil 12m 24awg Active */
{ 0x00, 0x30, 0xB4 }, "QLX4000CQSFP1224",
{ 0, 0, 0, 2 }, { 0, 0, 0, 5 }, { 0, 3, 0, 9 },
},
{ /* Intersil 10m 28awg Active */
{ 0x00, 0x30, 0xB4 }, "QLX4000CQSFP1028",
{ 0, 0, 0, 6 }, { 0, 0, 0, 4 }, { 0, 2, 0, 2 },
},
{ /* Intersil 7m 30awg Active */
{ 0x00, 0x30, 0xB4 }, "QLX4000CQSFP0730",
{ 0, 0, 0, 6 }, { 0, 0, 0, 4 }, { 0, 1, 0, 3 },
},
{ /* Intersil 5m 32awg Active */
{ 0x00, 0x30, 0xB4 }, "QLX4000CQSFP0532",
{ 0, 0, 0, 6 }, { 0, 0, 0, 6 }, { 0, 2, 0, 8 },
},
{ /* Intersil Default Active */
{ 0x00, 0x30, 0xB4 }, NULL,
{ 0, 0, 0, 6 }, { 0, 0, 0, 5 }, { 0, 2, 0, 5 },
},
{ /* Luxtera 20m Active Optical */
{ 0x00, 0x25, 0x63 }, NULL,
{ 0, 0, 0, 5 }, { 0, 0, 0, 8 }, { 0, 2, 0, 12 },
},
{ /* Molex 1M Cu loopback */
{ 0x00, 0x09, 0x3A }, "74763-0025 ",
{ 2, 2, 6, 15 }, { 2, 2, 6, 15 }, { 2, 2, 6, 15 },
},
{ /* Molex 2m 28awg NoEq */
{ 0x00, 0x09, 0x3A }, "74757-2201 ",
{ 0, 0, 0, 6 }, { 0, 0, 0, 9 }, { 0, 1, 1, 15 },
},
};
static const struct txdds_ent txdds_sdr[TXDDS_TABLE_SZ] = {
/* amp, pre, main, post */
{ 2, 2, 15, 6 }, /* Loopback */
{ 0, 0, 0, 1 }, /* 2 dB */
{ 0, 0, 0, 2 }, /* 3 dB */
{ 0, 0, 0, 3 }, /* 4 dB */
{ 0, 0, 0, 4 }, /* 5 dB */
{ 0, 0, 0, 5 }, /* 6 dB */
{ 0, 0, 0, 6 }, /* 7 dB */
{ 0, 0, 0, 7 }, /* 8 dB */
{ 0, 0, 0, 8 }, /* 9 dB */
{ 0, 0, 0, 9 }, /* 10 dB */
{ 0, 0, 0, 10 }, /* 11 dB */
{ 0, 0, 0, 11 }, /* 12 dB */
{ 0, 0, 0, 12 }, /* 13 dB */
{ 0, 0, 0, 13 }, /* 14 dB */
{ 0, 0, 0, 14 }, /* 15 dB */
{ 0, 0, 0, 15 }, /* 16 dB */
};
static const struct txdds_ent txdds_ddr[TXDDS_TABLE_SZ] = {
/* amp, pre, main, post */
{ 2, 2, 15, 6 }, /* Loopback */
{ 0, 0, 0, 8 }, /* 2 dB */
{ 0, 0, 0, 8 }, /* 3 dB */
{ 0, 0, 0, 9 }, /* 4 dB */
{ 0, 0, 0, 9 }, /* 5 dB */
{ 0, 0, 0, 10 }, /* 6 dB */
{ 0, 0, 0, 10 }, /* 7 dB */
{ 0, 0, 0, 11 }, /* 8 dB */
{ 0, 0, 0, 11 }, /* 9 dB */
{ 0, 0, 0, 12 }, /* 10 dB */
{ 0, 0, 0, 12 }, /* 11 dB */
{ 0, 0, 0, 13 }, /* 12 dB */
{ 0, 0, 0, 13 }, /* 13 dB */
{ 0, 0, 0, 14 }, /* 14 dB */
{ 0, 0, 0, 14 }, /* 15 dB */
{ 0, 0, 0, 15 }, /* 16 dB */
};
static const struct txdds_ent txdds_qdr[TXDDS_TABLE_SZ] = {
/* amp, pre, main, post */
{ 2, 2, 15, 6 }, /* Loopback */
{ 0, 1, 0, 7 }, /* 2 dB (also QMH7342) */
{ 0, 1, 0, 9 }, /* 3 dB (also QMH7342) */
{ 0, 1, 0, 11 }, /* 4 dB */
{ 0, 1, 0, 13 }, /* 5 dB */
{ 0, 1, 0, 15 }, /* 6 dB */
{ 0, 1, 3, 15 }, /* 7 dB */
{ 0, 1, 7, 15 }, /* 8 dB */
{ 0, 1, 7, 15 }, /* 9 dB */
{ 0, 1, 8, 15 }, /* 10 dB */
{ 0, 1, 9, 15 }, /* 11 dB */
{ 0, 1, 10, 15 }, /* 12 dB */
{ 0, 2, 6, 15 }, /* 13 dB */
{ 0, 2, 7, 15 }, /* 14 dB */
{ 0, 2, 8, 15 }, /* 15 dB */
{ 0, 2, 9, 15 }, /* 16 dB */
};
/*
* extra entries for use with txselect, for indices >= TXDDS_TABLE_SZ.
* These are mostly used for mez cards going through connectors
* and backplane traces, but can be used to add other "unusual"
* table values as well.
*/
static const struct txdds_ent txdds_extra_sdr[TXDDS_EXTRA_SZ] = {
/* amp, pre, main, post */
{ 0, 0, 0, 1 }, /* QMH7342 backplane settings */
{ 0, 0, 0, 1 }, /* QMH7342 backplane settings */
{ 0, 0, 0, 2 }, /* QMH7342 backplane settings */
{ 0, 0, 0, 2 }, /* QMH7342 backplane settings */
{ 0, 0, 0, 3 }, /* QMH7342 backplane settings */
{ 0, 0, 0, 4 }, /* QMH7342 backplane settings */
{ 0, 1, 4, 15 }, /* QME7342 backplane settings 1.0 */
{ 0, 1, 3, 15 }, /* QME7342 backplane settings 1.0 */
{ 0, 1, 0, 12 }, /* QME7342 backplane settings 1.0 */
{ 0, 1, 0, 11 }, /* QME7342 backplane settings 1.0 */
{ 0, 1, 0, 9 }, /* QME7342 backplane settings 1.0 */
{ 0, 1, 0, 14 }, /* QME7342 backplane settings 1.0 */
{ 0, 1, 2, 15 }, /* QME7342 backplane settings 1.0 */
{ 0, 1, 0, 11 }, /* QME7342 backplane settings 1.1 */
{ 0, 1, 0, 7 }, /* QME7342 backplane settings 1.1 */
{ 0, 1, 0, 9 }, /* QME7342 backplane settings 1.1 */
{ 0, 1, 0, 6 }, /* QME7342 backplane settings 1.1 */
{ 0, 1, 0, 8 }, /* QME7342 backplane settings 1.1 */
};
static const struct txdds_ent txdds_extra_ddr[TXDDS_EXTRA_SZ] = {
/* amp, pre, main, post */
{ 0, 0, 0, 7 }, /* QMH7342 backplane settings */
{ 0, 0, 0, 7 }, /* QMH7342 backplane settings */
{ 0, 0, 0, 8 }, /* QMH7342 backplane settings */
{ 0, 0, 0, 8 }, /* QMH7342 backplane settings */
{ 0, 0, 0, 9 }, /* QMH7342 backplane settings */
{ 0, 0, 0, 10 }, /* QMH7342 backplane settings */
{ 0, 1, 4, 15 }, /* QME7342 backplane settings 1.0 */
{ 0, 1, 3, 15 }, /* QME7342 backplane settings 1.0 */
{ 0, 1, 0, 12 }, /* QME7342 backplane settings 1.0 */
{ 0, 1, 0, 11 }, /* QME7342 backplane settings 1.0 */
{ 0, 1, 0, 9 }, /* QME7342 backplane settings 1.0 */
{ 0, 1, 0, 14 }, /* QME7342 backplane settings 1.0 */
{ 0, 1, 2, 15 }, /* QME7342 backplane settings 1.0 */
{ 0, 1, 0, 11 }, /* QME7342 backplane settings 1.1 */
{ 0, 1, 0, 7 }, /* QME7342 backplane settings 1.1 */
{ 0, 1, 0, 9 }, /* QME7342 backplane settings 1.1 */
{ 0, 1, 0, 6 }, /* QME7342 backplane settings 1.1 */
{ 0, 1, 0, 8 }, /* QME7342 backplane settings 1.1 */
};
static const struct txdds_ent txdds_extra_qdr[TXDDS_EXTRA_SZ] = {
/* amp, pre, main, post */
{ 0, 1, 0, 4 }, /* QMH7342 backplane settings */
{ 0, 1, 0, 5 }, /* QMH7342 backplane settings */
{ 0, 1, 0, 6 }, /* QMH7342 backplane settings */
{ 0, 1, 0, 8 }, /* QMH7342 backplane settings */
{ 0, 1, 0, 10 }, /* QMH7342 backplane settings */
{ 0, 1, 0, 12 }, /* QMH7342 backplane settings */
{ 0, 1, 4, 15 }, /* QME7342 backplane settings 1.0 */
{ 0, 1, 3, 15 }, /* QME7342 backplane settings 1.0 */
{ 0, 1, 0, 12 }, /* QME7342 backplane settings 1.0 */
{ 0, 1, 0, 11 }, /* QME7342 backplane settings 1.0 */
{ 0, 1, 0, 9 }, /* QME7342 backplane settings 1.0 */
{ 0, 1, 0, 14 }, /* QME7342 backplane settings 1.0 */
{ 0, 1, 2, 15 }, /* QME7342 backplane settings 1.0 */
{ 0, 1, 0, 11 }, /* QME7342 backplane settings 1.1 */
{ 0, 1, 0, 7 }, /* QME7342 backplane settings 1.1 */
{ 0, 1, 0, 9 }, /* QME7342 backplane settings 1.1 */
{ 0, 1, 0, 6 }, /* QME7342 backplane settings 1.1 */
{ 0, 1, 0, 8 }, /* QME7342 backplane settings 1.1 */
};
static const struct txdds_ent txdds_extra_mfg[TXDDS_MFG_SZ] = {
/* amp, pre, main, post */
{ 0, 0, 0, 0 }, /* QME7342 mfg settings */
{ 0, 0, 0, 6 }, /* QME7342 P2 mfg settings */
};
static const struct txdds_ent *get_atten_table(const struct txdds_ent *txdds,
unsigned atten)
{
/*
* The attenuation table starts at 2dB for entry 1,
* with entry 0 being the loopback entry.
*/
if (atten <= 2)
atten = 1;
else if (atten > TXDDS_TABLE_SZ)
atten = TXDDS_TABLE_SZ - 1;
else
atten--;
return txdds + atten;
}
/*
* if override is set, the module parameter txselect has a value
* for this specific port, so use it, rather than our normal mechanism.
*/
static void find_best_ent(struct qib_pportdata *ppd,
const struct txdds_ent **sdr_dds,
const struct txdds_ent **ddr_dds,
const struct txdds_ent **qdr_dds, int override)
{
struct qib_qsfp_cache *qd = &ppd->cpspec->qsfp_data.cache;
int idx;
/* Search table of known cables */
for (idx = 0; !override && idx < ARRAY_SIZE(vendor_txdds); ++idx) {
const struct vendor_txdds_ent *v = vendor_txdds + idx;
if (!memcmp(v->oui, qd->oui, QSFP_VOUI_LEN) &&
(!v->partnum ||
!memcmp(v->partnum, qd->partnum, QSFP_PN_LEN))) {
*sdr_dds = &v->sdr;
*ddr_dds = &v->ddr;
*qdr_dds = &v->qdr;
return;
}
}
/* Active cables don't have attenuation so we only set SERDES
* settings to account for the attenuation of the board traces. */
if (!override && QSFP_IS_ACTIVE(qd->tech)) {
*sdr_dds = txdds_sdr + ppd->dd->board_atten;
*ddr_dds = txdds_ddr + ppd->dd->board_atten;
*qdr_dds = txdds_qdr + ppd->dd->board_atten;
return;
}
if (!override && QSFP_HAS_ATTEN(qd->tech) && (qd->atten[0] ||
qd->atten[1])) {
*sdr_dds = get_atten_table(txdds_sdr, qd->atten[0]);
*ddr_dds = get_atten_table(txdds_ddr, qd->atten[0]);
*qdr_dds = get_atten_table(txdds_qdr, qd->atten[1]);
return;
} else if (ppd->cpspec->no_eep < TXDDS_TABLE_SZ) {
/*
* If we have no (or incomplete) data from the cable
* EEPROM, or no QSFP, or override is set, use the
* module parameter value to index into the attentuation
* table.
*/
idx = ppd->cpspec->no_eep;
*sdr_dds = &txdds_sdr[idx];
*ddr_dds = &txdds_ddr[idx];
*qdr_dds = &txdds_qdr[idx];
} else if (ppd->cpspec->no_eep < (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ)) {
/* similar to above, but index into the "extra" table. */
idx = ppd->cpspec->no_eep - TXDDS_TABLE_SZ;
*sdr_dds = &txdds_extra_sdr[idx];
*ddr_dds = &txdds_extra_ddr[idx];
*qdr_dds = &txdds_extra_qdr[idx];
} else if ((IS_QME(ppd->dd) || IS_QMH(ppd->dd)) &&
ppd->cpspec->no_eep < (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ +
TXDDS_MFG_SZ)) {
idx = ppd->cpspec->no_eep - (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ);
pr_info("IB%u:%u use idx %u into txdds_mfg\n",
ppd->dd->unit, ppd->port, idx);
*sdr_dds = &txdds_extra_mfg[idx];
*ddr_dds = &txdds_extra_mfg[idx];
*qdr_dds = &txdds_extra_mfg[idx];
} else {
/* this shouldn't happen, it's range checked */
*sdr_dds = txdds_sdr + qib_long_atten;
*ddr_dds = txdds_ddr + qib_long_atten;
*qdr_dds = txdds_qdr + qib_long_atten;
}
}
static void init_txdds_table(struct qib_pportdata *ppd, int override)
{
const struct txdds_ent *sdr_dds, *ddr_dds, *qdr_dds;
struct txdds_ent *dds;
int idx;
int single_ent = 0;
find_best_ent(ppd, &sdr_dds, &ddr_dds, &qdr_dds, override);
/* for mez cards or override, use the selected value for all entries */
if (!(ppd->dd->flags & QIB_HAS_QSFP) || override)
single_ent = 1;
/* Fill in the first entry with the best entry found. */
set_txdds(ppd, 0, sdr_dds);
set_txdds(ppd, TXDDS_TABLE_SZ, ddr_dds);
set_txdds(ppd, 2 * TXDDS_TABLE_SZ, qdr_dds);
if (ppd->lflags & (QIBL_LINKINIT | QIBL_LINKARMED |
QIBL_LINKACTIVE)) {
dds = (struct txdds_ent *)(ppd->link_speed_active ==
QIB_IB_QDR ? qdr_dds :
(ppd->link_speed_active ==
QIB_IB_DDR ? ddr_dds : sdr_dds));
write_tx_serdes_param(ppd, dds);
}
/* Fill in the remaining entries with the default table values. */
for (idx = 1; idx < ARRAY_SIZE(txdds_sdr); ++idx) {
set_txdds(ppd, idx, single_ent ? sdr_dds : txdds_sdr + idx);
set_txdds(ppd, idx + TXDDS_TABLE_SZ,
single_ent ? ddr_dds : txdds_ddr + idx);
set_txdds(ppd, idx + 2 * TXDDS_TABLE_SZ,
single_ent ? qdr_dds : txdds_qdr + idx);
}
}
#define KR_AHB_ACC KREG_IDX(ahb_access_ctrl)
#define KR_AHB_TRANS KREG_IDX(ahb_transaction_reg)
#define AHB_TRANS_RDY SYM_MASK(ahb_transaction_reg, ahb_rdy)
#define AHB_ADDR_LSB SYM_LSB(ahb_transaction_reg, ahb_address)
#define AHB_DATA_LSB SYM_LSB(ahb_transaction_reg, ahb_data)
#define AHB_WR SYM_MASK(ahb_transaction_reg, write_not_read)
#define AHB_TRANS_TRIES 10
/*
* The chan argument is 0=chan0, 1=chan1, 2=pll, 3=chan2, 4=chan4,
* 5=subsystem which is why most calls have "chan + chan >> 1"
* for the channel argument.
*/
static u32 ahb_mod(struct qib_devdata *dd, int quad, int chan, int addr,
u32 data, u32 mask)
{
u32 rd_data, wr_data, sz_mask;
u64 trans, acc, prev_acc;
u32 ret = 0xBAD0BAD;
int tries;
prev_acc = qib_read_kreg64(dd, KR_AHB_ACC);
/* From this point on, make sure we return access */
acc = (quad << 1) | 1;
qib_write_kreg(dd, KR_AHB_ACC, acc);
for (tries = 1; tries < AHB_TRANS_TRIES; ++tries) {
trans = qib_read_kreg64(dd, KR_AHB_TRANS);
if (trans & AHB_TRANS_RDY)
break;
}
if (tries >= AHB_TRANS_TRIES) {
qib_dev_err(dd, "No ahb_rdy in %d tries\n", AHB_TRANS_TRIES);
goto bail;
}
/* If mask is not all 1s, we need to read, but different SerDes
* entities have different sizes
*/
sz_mask = (1UL << ((quad == 1) ? 32 : 16)) - 1;
wr_data = data & mask & sz_mask;
if ((~mask & sz_mask) != 0) {
trans = ((chan << 6) | addr) << (AHB_ADDR_LSB + 1);
qib_write_kreg(dd, KR_AHB_TRANS, trans);
for (tries = 1; tries < AHB_TRANS_TRIES; ++tries) {
trans = qib_read_kreg64(dd, KR_AHB_TRANS);
if (trans & AHB_TRANS_RDY)
break;
}
if (tries >= AHB_TRANS_TRIES) {
qib_dev_err(dd, "No Rd ahb_rdy in %d tries\n",
AHB_TRANS_TRIES);
goto bail;
}
/* Re-read in case host split reads and read data first */
trans = qib_read_kreg64(dd, KR_AHB_TRANS);
rd_data = (uint32_t)(trans >> AHB_DATA_LSB);
wr_data |= (rd_data & ~mask & sz_mask);
}
/* If mask is not zero, we need to write. */
if (mask & sz_mask) {
trans = ((chan << 6) | addr) << (AHB_ADDR_LSB + 1);
trans |= ((uint64_t)wr_data << AHB_DATA_LSB);
trans |= AHB_WR;
qib_write_kreg(dd, KR_AHB_TRANS, trans);
for (tries = 1; tries < AHB_TRANS_TRIES; ++tries) {
trans = qib_read_kreg64(dd, KR_AHB_TRANS);
if (trans & AHB_TRANS_RDY)
break;
}
if (tries >= AHB_TRANS_TRIES) {
qib_dev_err(dd, "No Wr ahb_rdy in %d tries\n",
AHB_TRANS_TRIES);
goto bail;
}
}
ret = wr_data;
bail:
qib_write_kreg(dd, KR_AHB_ACC, prev_acc);
return ret;
}
static void ibsd_wr_allchans(struct qib_pportdata *ppd, int addr, unsigned data,
unsigned mask)
{
struct qib_devdata *dd = ppd->dd;
int chan;
u32 rbc;
for (chan = 0; chan < SERDES_CHANS; ++chan) {
ahb_mod(dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)), addr,
data, mask);
rbc = ahb_mod(dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
addr, 0, 0);
}
}
static void serdes_7322_los_enable(struct qib_pportdata *ppd, int enable)
{
u64 data = qib_read_kreg_port(ppd, krp_serdesctrl);
u8 state = SYM_FIELD(data, IBSerdesCtrl_0, RXLOSEN);
if (enable && !state) {
pr_info("IB%u:%u Turning LOS on\n",
ppd->dd->unit, ppd->port);
data |= SYM_MASK(IBSerdesCtrl_0, RXLOSEN);
} else if (!enable && state) {
pr_info("IB%u:%u Turning LOS off\n",
ppd->dd->unit, ppd->port);
data &= ~SYM_MASK(IBSerdesCtrl_0, RXLOSEN);
}
qib_write_kreg_port(ppd, krp_serdesctrl, data);
}
static int serdes_7322_init(struct qib_pportdata *ppd)
{
int ret = 0;
if (ppd->dd->cspec->r1)
ret = serdes_7322_init_old(ppd);
else
ret = serdes_7322_init_new(ppd);
return ret;
}
static int serdes_7322_init_old(struct qib_pportdata *ppd)
{
u32 le_val;
/*
* Initialize the Tx DDS tables. Also done every QSFP event,
* for adapters with QSFP
*/
init_txdds_table(ppd, 0);
/* ensure no tx overrides from earlier driver loads */
qib_write_kreg_port(ppd, krp_tx_deemph_override,
SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
reset_tx_deemphasis_override));
/* Patch some SerDes defaults to "Better for IB" */
/* Timing Loop Bandwidth: cdr_timing[11:9] = 0 */
ibsd_wr_allchans(ppd, 2, 0, BMASK(11, 9));
/* Termination: rxtermctrl_r2d addr 11 bits [12:11] = 1 */
ibsd_wr_allchans(ppd, 11, (1 << 11), BMASK(12, 11));
/* Enable LE2: rxle2en_r2a addr 13 bit [6] = 1 */
ibsd_wr_allchans(ppd, 13, (1 << 6), (1 << 6));
/* May be overridden in qsfp_7322_event */
le_val = IS_QME(ppd->dd) ? LE2_QME : LE2_DEFAULT;
ibsd_wr_allchans(ppd, 13, (le_val << 7), BMASK(9, 7));
/* enable LE1 adaptation for all but QME, which is disabled */
le_val = IS_QME(ppd->dd) ? 0 : 1;
ibsd_wr_allchans(ppd, 13, (le_val << 5), (1 << 5));
/* Clear cmode-override, may be set from older driver */
ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 0 << 14, 1 << 14);
/* Timing Recovery: rxtapsel addr 5 bits [9:8] = 0 */
ibsd_wr_allchans(ppd, 5, (0 << 8), BMASK(9, 8));
/* setup LoS params; these are subsystem, so chan == 5 */
/* LoS filter threshold_count on, ch 0-3, set to 8 */
ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 5, 8 << 11, BMASK(14, 11));
ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 7, 8 << 4, BMASK(7, 4));
ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 8, 8 << 11, BMASK(14, 11));
ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 8 << 4, BMASK(7, 4));
/* LoS filter threshold_count off, ch 0-3, set to 4 */
ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 6, 4 << 0, BMASK(3, 0));
ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 7, 4 << 8, BMASK(11, 8));
ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 9, 4 << 0, BMASK(3, 0));
ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 4 << 8, BMASK(11, 8));
/* LoS filter select enabled */
ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 9, 1 << 15, 1 << 15);
/* LoS target data: SDR=4, DDR=2, QDR=1 */
ibsd_wr_allchans(ppd, 14, (1 << 3), BMASK(5, 3)); /* QDR */
ibsd_wr_allchans(ppd, 20, (2 << 10), BMASK(12, 10)); /* DDR */
ibsd_wr_allchans(ppd, 20, (4 << 13), BMASK(15, 13)); /* SDR */
serdes_7322_los_enable(ppd, 1);
/* rxbistena; set 0 to avoid effects of it switch later */
ibsd_wr_allchans(ppd, 9, 0 << 15, 1 << 15);
/* Configure 4 DFE taps, and only they adapt */
ibsd_wr_allchans(ppd, 16, 0 << 0, BMASK(1, 0));
/* gain hi stop 32 (22) (6:1) lo stop 7 (10:7) target 22 (13) (15:11) */
le_val = (ppd->dd->cspec->r1 || IS_QME(ppd->dd)) ? 0xb6c0 : 0x6bac;
ibsd_wr_allchans(ppd, 21, le_val, 0xfffe);
/*
* Set receive adaptation mode. SDR and DDR adaptation are
* always on, and QDR is initially enabled; later disabled.
*/
qib_write_kreg_port(ppd, krp_static_adapt_dis(0), 0ULL);
qib_write_kreg_port(ppd, krp_static_adapt_dis(1), 0ULL);
qib_write_kreg_port(ppd, krp_static_adapt_dis(2),
ppd->dd->cspec->r1 ?
QDR_STATIC_ADAPT_DOWN_R1 : QDR_STATIC_ADAPT_DOWN);
ppd->cpspec->qdr_dfe_on = 1;
/* FLoop LOS gate: PPM filter enabled */
ibsd_wr_allchans(ppd, 38, 0 << 10, 1 << 10);
/* rx offset center enabled */
ibsd_wr_allchans(ppd, 12, 1 << 4, 1 << 4);
if (!ppd->dd->cspec->r1) {
ibsd_wr_allchans(ppd, 12, 1 << 12, 1 << 12);
ibsd_wr_allchans(ppd, 12, 2 << 8, 0x0f << 8);
}
/* Set the frequency loop bandwidth to 15 */
ibsd_wr_allchans(ppd, 2, 15 << 5, BMASK(8, 5));
return 0;
}
static int serdes_7322_init_new(struct qib_pportdata *ppd)
{
unsigned long tend;
u32 le_val, rxcaldone;
int chan, chan_done = (1 << SERDES_CHANS) - 1;
/* Clear cmode-override, may be set from older driver */
ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 0 << 14, 1 << 14);
/* ensure no tx overrides from earlier driver loads */
qib_write_kreg_port(ppd, krp_tx_deemph_override,
SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
reset_tx_deemphasis_override));
/* START OF LSI SUGGESTED SERDES BRINGUP */
/* Reset - Calibration Setup */
/* Stop DFE adaptaion */
ibsd_wr_allchans(ppd, 1, 0, BMASK(9, 1));
/* Disable LE1 */
ibsd_wr_allchans(ppd, 13, 0, BMASK(5, 5));
/* Disable autoadapt for LE1 */
ibsd_wr_allchans(ppd, 1, 0, BMASK(15, 15));
/* Disable LE2 */
ibsd_wr_allchans(ppd, 13, 0, BMASK(6, 6));
/* Disable VGA */
ibsd_wr_allchans(ppd, 5, 0, BMASK(0, 0));
/* Disable AFE Offset Cancel */
ibsd_wr_allchans(ppd, 12, 0, BMASK(12, 12));
/* Disable Timing Loop */
ibsd_wr_allchans(ppd, 2, 0, BMASK(3, 3));
/* Disable Frequency Loop */
ibsd_wr_allchans(ppd, 2, 0, BMASK(4, 4));
/* Disable Baseline Wander Correction */
ibsd_wr_allchans(ppd, 13, 0, BMASK(13, 13));
/* Disable RX Calibration */
ibsd_wr_allchans(ppd, 4, 0, BMASK(10, 10));
/* Disable RX Offset Calibration */
ibsd_wr_allchans(ppd, 12, 0, BMASK(4, 4));
/* Select BB CDR */
ibsd_wr_allchans(ppd, 2, (1 << 15), BMASK(15, 15));
/* CDR Step Size */
ibsd_wr_allchans(ppd, 5, 0, BMASK(9, 8));
/* Enable phase Calibration */
ibsd_wr_allchans(ppd, 12, (1 << 5), BMASK(5, 5));
/* DFE Bandwidth [2:14-12] */
ibsd_wr_allchans(ppd, 2, (4 << 12), BMASK(14, 12));
/* DFE Config (4 taps only) */
ibsd_wr_allchans(ppd, 16, 0, BMASK(1, 0));
/* Gain Loop Bandwidth */
if (!ppd->dd->cspec->r1) {
ibsd_wr_allchans(ppd, 12, 1 << 12, BMASK(12, 12));
ibsd_wr_allchans(ppd, 12, 2 << 8, BMASK(11, 8));
} else {
ibsd_wr_allchans(ppd, 19, (3 << 11), BMASK(13, 11));
}
/* Baseline Wander Correction Gain [13:4-0] (leave as default) */
/* Baseline Wander Correction Gain [3:7-5] (leave as default) */
/* Data Rate Select [5:7-6] (leave as default) */
/* RX Parallel Word Width [3:10-8] (leave as default) */
/* RX REST */
/* Single- or Multi-channel reset */
/* RX Analog reset */
/* RX Digital reset */
ibsd_wr_allchans(ppd, 0, 0, BMASK(15, 13));
msleep(20);
/* RX Analog reset */
ibsd_wr_allchans(ppd, 0, (1 << 14), BMASK(14, 14));
msleep(20);
/* RX Digital reset */
ibsd_wr_allchans(ppd, 0, (1 << 13), BMASK(13, 13));
msleep(20);
/* setup LoS params; these are subsystem, so chan == 5 */
/* LoS filter threshold_count on, ch 0-3, set to 8 */
ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 5, 8 << 11, BMASK(14, 11));
ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 7, 8 << 4, BMASK(7, 4));
ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 8, 8 << 11, BMASK(14, 11));
ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 8 << 4, BMASK(7, 4));
/* LoS filter threshold_count off, ch 0-3, set to 4 */
ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 6, 4 << 0, BMASK(3, 0));
ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 7, 4 << 8, BMASK(11, 8));
ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 9, 4 << 0, BMASK(3, 0));
ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 4 << 8, BMASK(11, 8));
/* LoS filter select enabled */
ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 9, 1 << 15, 1 << 15);
/* LoS target data: SDR=4, DDR=2, QDR=1 */
ibsd_wr_allchans(ppd, 14, (1 << 3), BMASK(5, 3)); /* QDR */
ibsd_wr_allchans(ppd, 20, (2 << 10), BMASK(12, 10)); /* DDR */
ibsd_wr_allchans(ppd, 20, (4 << 13), BMASK(15, 13)); /* SDR */
/* Turn on LOS on initial SERDES init */
serdes_7322_los_enable(ppd, 1);
/* FLoop LOS gate: PPM filter enabled */
ibsd_wr_allchans(ppd, 38, 0 << 10, 1 << 10);
/* RX LATCH CALIBRATION */
/* Enable Eyefinder Phase Calibration latch */
ibsd_wr_allchans(ppd, 15, 1, BMASK(0, 0));
/* Enable RX Offset Calibration latch */
ibsd_wr_allchans(ppd, 12, (1 << 4), BMASK(4, 4));
msleep(20);
/* Start Calibration */
ibsd_wr_allchans(ppd, 4, (1 << 10), BMASK(10, 10));
tend = jiffies + msecs_to_jiffies(500);
while (chan_done && !time_is_before_jiffies(tend)) {
msleep(20);
for (chan = 0; chan < SERDES_CHANS; ++chan) {
rxcaldone = ahb_mod(ppd->dd, IBSD(ppd->hw_pidx),
(chan + (chan >> 1)),
25, 0, 0);
if ((~rxcaldone & (u32)BMASK(9, 9)) == 0 &&
(~chan_done & (1 << chan)) == 0)
chan_done &= ~(1 << chan);
}
}
if (chan_done) {
pr_info("Serdes %d calibration not done after .5 sec: 0x%x\n",
IBSD(ppd->hw_pidx), chan_done);
} else {
for (chan = 0; chan < SERDES_CHANS; ++chan) {
rxcaldone = ahb_mod(ppd->dd, IBSD(ppd->hw_pidx),
(chan + (chan >> 1)),
25, 0, 0);
if ((~rxcaldone & (u32)BMASK(10, 10)) == 0)
pr_info("Serdes %d chan %d calibration failed\n",
IBSD(ppd->hw_pidx), chan);
}
}
/* Turn off Calibration */
ibsd_wr_allchans(ppd, 4, 0, BMASK(10, 10));
msleep(20);
/* BRING RX UP */
/* Set LE2 value (May be overridden in qsfp_7322_event) */
le_val = IS_QME(ppd->dd) ? LE2_QME : LE2_DEFAULT;
ibsd_wr_allchans(ppd, 13, (le_val << 7), BMASK(9, 7));
/* Set LE2 Loop bandwidth */
ibsd_wr_allchans(ppd, 3, (7 << 5), BMASK(7, 5));
/* Enable LE2 */
ibsd_wr_allchans(ppd, 13, (1 << 6), BMASK(6, 6));
msleep(20);
/* Enable H0 only */
ibsd_wr_allchans(ppd, 1, 1, BMASK(9, 1));
/* gain hi stop 32 (22) (6:1) lo stop 7 (10:7) target 22 (13) (15:11) */
le_val = (ppd->dd->cspec->r1 || IS_QME(ppd->dd)) ? 0xb6c0 : 0x6bac;
ibsd_wr_allchans(ppd, 21, le_val, 0xfffe);
/* Enable VGA */
ibsd_wr_allchans(ppd, 5, 0, BMASK(0, 0));
msleep(20);
/* Set Frequency Loop Bandwidth */
ibsd_wr_allchans(ppd, 2, (15 << 5), BMASK(8, 5));
/* Enable Frequency Loop */
ibsd_wr_allchans(ppd, 2, (1 << 4), BMASK(4, 4));
/* Set Timing Loop Bandwidth */
ibsd_wr_allchans(ppd, 2, 0, BMASK(11, 9));
/* Enable Timing Loop */
ibsd_wr_allchans(ppd, 2, (1 << 3), BMASK(3, 3));
msleep(50);
/* Enable DFE
* Set receive adaptation mode. SDR and DDR adaptation are
* always on, and QDR is initially enabled; later disabled.
*/
qib_write_kreg_port(ppd, krp_static_adapt_dis(0), 0ULL);
qib_write_kreg_port(ppd, krp_static_adapt_dis(1), 0ULL);
qib_write_kreg_port(ppd, krp_static_adapt_dis(2),
ppd->dd->cspec->r1 ?
QDR_STATIC_ADAPT_DOWN_R1 : QDR_STATIC_ADAPT_DOWN);
ppd->cpspec->qdr_dfe_on = 1;
/* Disable LE1 */
ibsd_wr_allchans(ppd, 13, (0 << 5), (1 << 5));
/* Disable auto adapt for LE1 */
ibsd_wr_allchans(ppd, 1, (0 << 15), BMASK(15, 15));
msleep(20);
/* Enable AFE Offset Cancel */
ibsd_wr_allchans(ppd, 12, (1 << 12), BMASK(12, 12));
/* Enable Baseline Wander Correction */
ibsd_wr_allchans(ppd, 12, (1 << 13), BMASK(13, 13));
/* Termination: rxtermctrl_r2d addr 11 bits [12:11] = 1 */
ibsd_wr_allchans(ppd, 11, (1 << 11), BMASK(12, 11));
/* VGA output common mode */
ibsd_wr_allchans(ppd, 12, (3 << 2), BMASK(3, 2));
/*
* Initialize the Tx DDS tables. Also done every QSFP event,
* for adapters with QSFP
*/
init_txdds_table(ppd, 0);
return 0;
}
/* start adjust QMH serdes parameters */
static void set_man_code(struct qib_pportdata *ppd, int chan, int code)
{
ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
9, code << 9, 0x3f << 9);
}
static void set_man_mode_h1(struct qib_pportdata *ppd, int chan,
int enable, u32 tapenable)
{
if (enable)
ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
1, 3 << 10, 0x1f << 10);
else
ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
1, 0, 0x1f << 10);
}
/* Set clock to 1, 0, 1, 0 */
static void clock_man(struct qib_pportdata *ppd, int chan)
{
ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
4, 0x4000, 0x4000);
ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
4, 0, 0x4000);
ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
4, 0x4000, 0x4000);
ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
4, 0, 0x4000);
}
/*
* write the current Tx serdes pre,post,main,amp settings into the serdes.
* The caller must pass the settings appropriate for the current speed,
* or not care if they are correct for the current speed.
*/
static void write_tx_serdes_param(struct qib_pportdata *ppd,
struct txdds_ent *txdds)
{
u64 deemph;
deemph = qib_read_kreg_port(ppd, krp_tx_deemph_override);
/* field names for amp, main, post, pre, respectively */
deemph &= ~(SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, txampcntl_d2a) |
SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, txc0_ena) |
SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, txcp1_ena) |
SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, txcn1_ena));
deemph |= SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
tx_override_deemphasis_select);
deemph |= (txdds->amp & SYM_RMASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
txampcntl_d2a)) << SYM_LSB(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
txampcntl_d2a);
deemph |= (txdds->main & SYM_RMASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
txc0_ena)) << SYM_LSB(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
txc0_ena);
deemph |= (txdds->post & SYM_RMASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
txcp1_ena)) << SYM_LSB(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
txcp1_ena);
deemph |= (txdds->pre & SYM_RMASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
txcn1_ena)) << SYM_LSB(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
txcn1_ena);
qib_write_kreg_port(ppd, krp_tx_deemph_override, deemph);
}
/*
* Set the parameters for mez cards on link bounce, so they are
* always exactly what was requested. Similar logic to init_txdds
* but does just the serdes.
*/
static void adj_tx_serdes(struct qib_pportdata *ppd)
{
const struct txdds_ent *sdr_dds, *ddr_dds, *qdr_dds;
struct txdds_ent *dds;
find_best_ent(ppd, &sdr_dds, &ddr_dds, &qdr_dds, 1);
dds = (struct txdds_ent *)(ppd->link_speed_active == QIB_IB_QDR ?
qdr_dds : (ppd->link_speed_active == QIB_IB_DDR ?
ddr_dds : sdr_dds));
write_tx_serdes_param(ppd, dds);
}
/* set QDR forced value for H1, if needed */
static void force_h1(struct qib_pportdata *ppd)
{
int chan;
ppd->cpspec->qdr_reforce = 0;
if (!ppd->dd->cspec->r1)
return;
for (chan = 0; chan < SERDES_CHANS; chan++) {
set_man_mode_h1(ppd, chan, 1, 0);
set_man_code(ppd, chan, ppd->cpspec->h1_val);
clock_man(ppd, chan);
set_man_mode_h1(ppd, chan, 0, 0);
}
}
#define SJA_EN SYM_MASK(SPC_JTAG_ACCESS_REG, SPC_JTAG_ACCESS_EN)
#define BISTEN_LSB SYM_LSB(SPC_JTAG_ACCESS_REG, bist_en)
#define R_OPCODE_LSB 3
#define R_OP_NOP 0
#define R_OP_SHIFT 2
#define R_OP_UPDATE 3
#define R_TDI_LSB 2
#define R_TDO_LSB 1
#define R_RDY 1
static int qib_r_grab(struct qib_devdata *dd)
{
u64 val = SJA_EN;
qib_write_kreg(dd, kr_r_access, val);
qib_read_kreg32(dd, kr_scratch);
return 0;
}
/* qib_r_wait_for_rdy() not only waits for the ready bit, it
* returns the current state of R_TDO
*/
static int qib_r_wait_for_rdy(struct qib_devdata *dd)
{
u64 val;
int timeout;
for (timeout = 0; timeout < 100 ; ++timeout) {
val = qib_read_kreg32(dd, kr_r_access);
if (val & R_RDY)
return (val >> R_TDO_LSB) & 1;
}
return -1;
}
static int qib_r_shift(struct qib_devdata *dd, int bisten,
int len, u8 *inp, u8 *outp)
{
u64 valbase, val;
int ret, pos;
valbase = SJA_EN | (bisten << BISTEN_LSB) |
(R_OP_SHIFT << R_OPCODE_LSB);
ret = qib_r_wait_for_rdy(dd);
if (ret < 0)
goto bail;
for (pos = 0; pos < len; ++pos) {
val = valbase;
if (outp) {
outp[pos >> 3] &= ~(1 << (pos & 7));
outp[pos >> 3] |= (ret << (pos & 7));
}
if (inp) {
int tdi = inp[pos >> 3] >> (pos & 7);
val |= ((tdi & 1) << R_TDI_LSB);
}
qib_write_kreg(dd, kr_r_access, val);
qib_read_kreg32(dd, kr_scratch);
ret = qib_r_wait_for_rdy(dd);
if (ret < 0)
break;
}
/* Restore to NOP between operations. */
val = SJA_EN | (bisten << BISTEN_LSB);
qib_write_kreg(dd, kr_r_access, val);
qib_read_kreg32(dd, kr_scratch);
ret = qib_r_wait_for_rdy(dd);
if (ret >= 0)
ret = pos;
bail:
return ret;
}
static int qib_r_update(struct qib_devdata *dd, int bisten)
{
u64 val;
int ret;
val = SJA_EN | (bisten << BISTEN_LSB) | (R_OP_UPDATE << R_OPCODE_LSB);
ret = qib_r_wait_for_rdy(dd);
if (ret >= 0) {
qib_write_kreg(dd, kr_r_access, val);
qib_read_kreg32(dd, kr_scratch);
}
return ret;
}
#define BISTEN_PORT_SEL 15
#define LEN_PORT_SEL 625
#define BISTEN_AT 17
#define LEN_AT 156
#define BISTEN_ETM 16
#define LEN_ETM 632
#define BIT2BYTE(x) (((x) + BITS_PER_BYTE - 1) / BITS_PER_BYTE)
/* these are common for all IB port use cases. */
static u8 reset_at[BIT2BYTE(LEN_AT)] = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x00,
};
static u8 reset_atetm[BIT2BYTE(LEN_ETM)] = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x80, 0xe3, 0x81, 0x73, 0x3c, 0x70, 0x8e,
0x07, 0xce, 0xf1, 0xc0, 0x39, 0x1e, 0x38, 0xc7, 0x03, 0xe7,
0x78, 0xe0, 0x1c, 0x0f, 0x9c, 0x7f, 0x80, 0x73, 0x0f, 0x70,
0xde, 0x01, 0xce, 0x39, 0xc0, 0xf9, 0x06, 0x38, 0xd7, 0x00,
0xe7, 0x19, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00,
};
static u8 at[BIT2BYTE(LEN_AT)] = {
0x00, 0x00, 0x18, 0x00, 0x00, 0x00, 0x18, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x00,
};
/* used for IB1 or IB2, only one in use */
static u8 atetm_1port[BIT2BYTE(LEN_ETM)] = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x10, 0xf2, 0x80, 0x83, 0x1e, 0x38, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x50, 0xf4, 0x41, 0x00, 0x18, 0x78, 0xc8, 0x03,
0x07, 0x7b, 0xa0, 0x3e, 0x00, 0x02, 0x00, 0x00, 0x18, 0x00,
0x18, 0x00, 0x00, 0x00, 0x00, 0x4b, 0x00, 0x00, 0x00,
};
/* used when both IB1 and IB2 are in use */
static u8 atetm_2port[BIT2BYTE(LEN_ETM)] = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x79,
0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0xf8, 0x80, 0x83, 0x1e, 0x38, 0xe0, 0x03, 0x05,
0x7b, 0xa0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80,
0xa2, 0x0f, 0x50, 0xf4, 0x41, 0x00, 0x18, 0x78, 0xd1, 0x07,
0x02, 0x7c, 0x80, 0x3e, 0x00, 0x02, 0x00, 0x00, 0x3e, 0x00,
0x02, 0x00, 0x00, 0x00, 0x00, 0x64, 0x00, 0x00, 0x00,
};
/* used when only IB1 is in use */
static u8 portsel_port1[BIT2BYTE(LEN_PORT_SEL)] = {
0x32, 0x65, 0xa4, 0x7b, 0x10, 0x98, 0xdc, 0xfe, 0x13, 0x13,
0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x73, 0x0c, 0x0c, 0x0c,
0x0c, 0x0c, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13,
0x13, 0x78, 0x78, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13,
0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x74, 0x32,
0x32, 0x32, 0x32, 0x32, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14,
0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14,
0x14, 0x14, 0x9f, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00,
};
/* used when only IB2 is in use */
static u8 portsel_port2[BIT2BYTE(LEN_PORT_SEL)] = {
0x32, 0x65, 0xa4, 0x7b, 0x10, 0x98, 0xdc, 0xfe, 0x39, 0x39,
0x39, 0x39, 0x39, 0x39, 0x39, 0x39, 0x73, 0x32, 0x32, 0x32,
0x32, 0x32, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39,
0x39, 0x78, 0x78, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39,
0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x74, 0x32,
0x32, 0x32, 0x32, 0x32, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a,
0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a,
0x3a, 0x3a, 0x9f, 0x01, 0x00, 0x00, 0x00, 0x00, 0x01,
};
/* used when both IB1 and IB2 are in use */
static u8 portsel_2port[BIT2BYTE(LEN_PORT_SEL)] = {
0x32, 0xba, 0x54, 0x76, 0x10, 0x98, 0xdc, 0xfe, 0x13, 0x13,
0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x73, 0x0c, 0x0c, 0x0c,
0x0c, 0x0c, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13,
0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13,
0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x74, 0x32,
0x32, 0x32, 0x32, 0x32, 0x14, 0x14, 0x14, 0x14, 0x14, 0x3a,
0x3a, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14,
0x14, 0x14, 0x9f, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00,
};
/*
* Do setup to properly handle IB link recovery; if port is zero, we
* are initializing to cover both ports; otherwise we are initializing
* to cover a single port card, or the port has reached INIT and we may
* need to switch coverage types.
*/
static void setup_7322_link_recovery(struct qib_pportdata *ppd, u32 both)
{
u8 *portsel, *etm;
struct qib_devdata *dd = ppd->dd;
if (!ppd->dd->cspec->r1)
return;
if (!both) {
dd->cspec->recovery_ports_initted++;
ppd->cpspec->recovery_init = 1;
}
if (!both && dd->cspec->recovery_ports_initted == 1) {
portsel = ppd->port == 1 ? portsel_port1 : portsel_port2;
etm = atetm_1port;
} else {
portsel = portsel_2port;
etm = atetm_2port;
}
if (qib_r_grab(dd) < 0 ||
qib_r_shift(dd, BISTEN_ETM, LEN_ETM, reset_atetm, NULL) < 0 ||
qib_r_update(dd, BISTEN_ETM) < 0 ||
qib_r_shift(dd, BISTEN_AT, LEN_AT, reset_at, NULL) < 0 ||
qib_r_update(dd, BISTEN_AT) < 0 ||
qib_r_shift(dd, BISTEN_PORT_SEL, LEN_PORT_SEL,
portsel, NULL) < 0 ||
qib_r_update(dd, BISTEN_PORT_SEL) < 0 ||
qib_r_shift(dd, BISTEN_AT, LEN_AT, at, NULL) < 0 ||
qib_r_update(dd, BISTEN_AT) < 0 ||
qib_r_shift(dd, BISTEN_ETM, LEN_ETM, etm, NULL) < 0 ||
qib_r_update(dd, BISTEN_ETM) < 0)
qib_dev_err(dd, "Failed IB link recovery setup\n");
}
static void check_7322_rxe_status(struct qib_pportdata *ppd)
{
struct qib_devdata *dd = ppd->dd;
u64 fmask;
if (dd->cspec->recovery_ports_initted != 1)
return; /* rest doesn't apply to dualport */
qib_write_kreg(dd, kr_control, dd->control |
SYM_MASK(Control, FreezeMode));
(void)qib_read_kreg64(dd, kr_scratch);
udelay(3); /* ibcreset asserted 400ns, be sure that's over */
fmask = qib_read_kreg64(dd, kr_act_fmask);
if (!fmask) {
/*
* require a powercycle before we'll work again, and make
* sure we get no more interrupts, and don't turn off
* freeze.
*/
ppd->dd->cspec->stay_in_freeze = 1;
qib_7322_set_intr_state(ppd->dd, 0);
qib_write_kreg(dd, kr_fmask, 0ULL);
qib_dev_err(dd, "HCA unusable until powercycled\n");
return; /* eventually reset */
}
qib_write_kreg(ppd->dd, kr_hwerrclear,
SYM_MASK(HwErrClear, IBSerdesPClkNotDetectClear_1));
/* don't do the full clear_freeze(), not needed for this */
qib_write_kreg(dd, kr_control, dd->control);
qib_read_kreg32(dd, kr_scratch);
/* take IBC out of reset */
if (ppd->link_speed_supported) {
ppd->cpspec->ibcctrl_a &=
~SYM_MASK(IBCCtrlA_0, IBStatIntReductionEn);
qib_write_kreg_port(ppd, krp_ibcctrl_a,
ppd->cpspec->ibcctrl_a);
qib_read_kreg32(dd, kr_scratch);
if (ppd->lflags & QIBL_IB_LINK_DISABLED)
qib_set_ib_7322_lstate(ppd, 0,
QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
}
}
| gpl-2.0 |
kula85/perf-sqlite3 | drivers/gpu/drm/drm_cache.c | 604 | 4025 | /**************************************************************************
*
* Copyright (c) 2006-2007 Tungsten Graphics, Inc., Cedar Park, TX., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
/*
* Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
*/
#include <linux/export.h>
#include <drm/drmP.h>
#if defined(CONFIG_X86)
#include <asm/smp.h>
/*
* clflushopt is an unordered instruction which needs fencing with mfence or
* sfence to avoid ordering issues. For drm_clflush_page this fencing happens
* in the caller.
*/
static void
drm_clflush_page(struct page *page)
{
uint8_t *page_virtual;
unsigned int i;
const int size = boot_cpu_data.x86_clflush_size;
if (unlikely(page == NULL))
return;
page_virtual = kmap_atomic(page);
for (i = 0; i < PAGE_SIZE; i += size)
clflushopt(page_virtual + i);
kunmap_atomic(page_virtual);
}
static void drm_cache_flush_clflush(struct page *pages[],
unsigned long num_pages)
{
unsigned long i;
mb();
for (i = 0; i < num_pages; i++)
drm_clflush_page(*pages++);
mb();
}
#endif
void
drm_clflush_pages(struct page *pages[], unsigned long num_pages)
{
#if defined(CONFIG_X86)
if (cpu_has_clflush) {
drm_cache_flush_clflush(pages, num_pages);
return;
}
if (wbinvd_on_all_cpus())
printk(KERN_ERR "Timed out waiting for cache flush.\n");
#elif defined(__powerpc__)
unsigned long i;
for (i = 0; i < num_pages; i++) {
struct page *page = pages[i];
void *page_virtual;
if (unlikely(page == NULL))
continue;
page_virtual = kmap_atomic(page);
flush_dcache_range((unsigned long)page_virtual,
(unsigned long)page_virtual + PAGE_SIZE);
kunmap_atomic(page_virtual);
}
#else
printk(KERN_ERR "Architecture has no drm_cache.c support\n");
WARN_ON_ONCE(1);
#endif
}
EXPORT_SYMBOL(drm_clflush_pages);
void
drm_clflush_sg(struct sg_table *st)
{
#if defined(CONFIG_X86)
if (cpu_has_clflush) {
struct sg_page_iter sg_iter;
mb();
for_each_sg_page(st->sgl, &sg_iter, st->nents, 0)
drm_clflush_page(sg_page_iter_page(&sg_iter));
mb();
return;
}
if (wbinvd_on_all_cpus())
printk(KERN_ERR "Timed out waiting for cache flush.\n");
#else
printk(KERN_ERR "Architecture has no drm_cache.c support\n");
WARN_ON_ONCE(1);
#endif
}
EXPORT_SYMBOL(drm_clflush_sg);
void
drm_clflush_virt_range(void *addr, unsigned long length)
{
#if defined(CONFIG_X86)
if (cpu_has_clflush) {
const int size = boot_cpu_data.x86_clflush_size;
void *end = addr + length;
addr = (void *)(((unsigned long)addr) & -size);
mb();
for (; addr < end; addr += size)
clflushopt(addr);
mb();
return;
}
if (wbinvd_on_all_cpus())
printk(KERN_ERR "Timed out waiting for cache flush.\n");
#else
printk(KERN_ERR "Architecture has no drm_cache.c support\n");
WARN_ON_ONCE(1);
#endif
}
EXPORT_SYMBOL(drm_clflush_virt_range);
| gpl-2.0 |
jakieu/linux-2.6-imx | drivers/video/fbdev/omap2/dss/hdmi_phy.c | 1116 | 5016 | /*
* HDMI PHY
*
* Copyright (C) 2013 Texas Instruments Incorporated
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published by
* the Free Software Foundation.
*/
#include <linux/kernel.h>
#include <linux/err.h>
#include <linux/io.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <video/omapdss.h>
#include "dss.h"
#include "hdmi.h"
struct hdmi_phy_features {
bool bist_ctrl;
bool ldo_voltage;
unsigned long max_phy;
};
static const struct hdmi_phy_features *phy_feat;
void hdmi_phy_dump(struct hdmi_phy_data *phy, struct seq_file *s)
{
#define DUMPPHY(r) seq_printf(s, "%-35s %08x\n", #r,\
hdmi_read_reg(phy->base, r))
DUMPPHY(HDMI_TXPHY_TX_CTRL);
DUMPPHY(HDMI_TXPHY_DIGITAL_CTRL);
DUMPPHY(HDMI_TXPHY_POWER_CTRL);
DUMPPHY(HDMI_TXPHY_PAD_CFG_CTRL);
if (phy_feat->bist_ctrl)
DUMPPHY(HDMI_TXPHY_BIST_CONTROL);
}
int hdmi_phy_parse_lanes(struct hdmi_phy_data *phy, const u32 *lanes)
{
int i;
for (i = 0; i < 8; i += 2) {
u8 lane, pol;
int dx, dy;
dx = lanes[i];
dy = lanes[i + 1];
if (dx < 0 || dx >= 8)
return -EINVAL;
if (dy < 0 || dy >= 8)
return -EINVAL;
if (dx & 1) {
if (dy != dx - 1)
return -EINVAL;
pol = 1;
} else {
if (dy != dx + 1)
return -EINVAL;
pol = 0;
}
lane = dx / 2;
phy->lane_function[lane] = i / 2;
phy->lane_polarity[lane] = pol;
}
return 0;
}
static void hdmi_phy_configure_lanes(struct hdmi_phy_data *phy)
{
static const u16 pad_cfg_list[] = {
0x0123,
0x0132,
0x0312,
0x0321,
0x0231,
0x0213,
0x1023,
0x1032,
0x3012,
0x3021,
0x2031,
0x2013,
0x1203,
0x1302,
0x3102,
0x3201,
0x2301,
0x2103,
0x1230,
0x1320,
0x3120,
0x3210,
0x2310,
0x2130,
};
u16 lane_cfg = 0;
int i;
unsigned lane_cfg_val;
u16 pol_val = 0;
for (i = 0; i < 4; ++i)
lane_cfg |= phy->lane_function[i] << ((3 - i) * 4);
pol_val |= phy->lane_polarity[0] << 0;
pol_val |= phy->lane_polarity[1] << 3;
pol_val |= phy->lane_polarity[2] << 2;
pol_val |= phy->lane_polarity[3] << 1;
for (i = 0; i < ARRAY_SIZE(pad_cfg_list); ++i)
if (pad_cfg_list[i] == lane_cfg)
break;
if (WARN_ON(i == ARRAY_SIZE(pad_cfg_list)))
i = 0;
lane_cfg_val = i;
REG_FLD_MOD(phy->base, HDMI_TXPHY_PAD_CFG_CTRL, lane_cfg_val, 26, 22);
REG_FLD_MOD(phy->base, HDMI_TXPHY_PAD_CFG_CTRL, pol_val, 30, 27);
}
int hdmi_phy_configure(struct hdmi_phy_data *phy, unsigned long hfbitclk,
unsigned long lfbitclk)
{
u8 freqout;
/*
* Read address 0 in order to get the SCP reset done completed
* Dummy access performed to make sure reset is done
*/
hdmi_read_reg(phy->base, HDMI_TXPHY_TX_CTRL);
/*
* In OMAP5+, the HFBITCLK must be divided by 2 before issuing the
* HDMI_PHYPWRCMD_LDOON command.
*/
if (phy_feat->bist_ctrl)
REG_FLD_MOD(phy->base, HDMI_TXPHY_BIST_CONTROL, 1, 11, 11);
/*
* If the hfbitclk != lfbitclk, it means the lfbitclk was configured
* to be used for TMDS.
*/
if (hfbitclk != lfbitclk)
freqout = 0;
else if (hfbitclk / 10 < phy_feat->max_phy)
freqout = 1;
else
freqout = 2;
/*
* Write to phy address 0 to configure the clock
* use HFBITCLK write HDMI_TXPHY_TX_CONTROL_FREQOUT field
*/
REG_FLD_MOD(phy->base, HDMI_TXPHY_TX_CTRL, freqout, 31, 30);
/* Write to phy address 1 to start HDMI line (TXVALID and TMDSCLKEN) */
hdmi_write_reg(phy->base, HDMI_TXPHY_DIGITAL_CTRL, 0xF0000000);
/* Setup max LDO voltage */
if (phy_feat->ldo_voltage)
REG_FLD_MOD(phy->base, HDMI_TXPHY_POWER_CTRL, 0xB, 3, 0);
hdmi_phy_configure_lanes(phy);
return 0;
}
static const struct hdmi_phy_features omap44xx_phy_feats = {
.bist_ctrl = false,
.ldo_voltage = true,
.max_phy = 185675000,
};
static const struct hdmi_phy_features omap54xx_phy_feats = {
.bist_ctrl = true,
.ldo_voltage = false,
.max_phy = 186000000,
};
static int hdmi_phy_init_features(struct platform_device *pdev)
{
struct hdmi_phy_features *dst;
const struct hdmi_phy_features *src;
dst = devm_kzalloc(&pdev->dev, sizeof(*dst), GFP_KERNEL);
if (!dst) {
dev_err(&pdev->dev, "Failed to allocate HDMI PHY Features\n");
return -ENOMEM;
}
switch (omapdss_get_version()) {
case OMAPDSS_VER_OMAP4430_ES1:
case OMAPDSS_VER_OMAP4430_ES2:
case OMAPDSS_VER_OMAP4:
src = &omap44xx_phy_feats;
break;
case OMAPDSS_VER_OMAP5:
case OMAPDSS_VER_DRA7xx:
src = &omap54xx_phy_feats;
break;
default:
return -ENODEV;
}
memcpy(dst, src, sizeof(*dst));
phy_feat = dst;
return 0;
}
int hdmi_phy_init(struct platform_device *pdev, struct hdmi_phy_data *phy)
{
int r;
struct resource *res;
r = hdmi_phy_init_features(pdev);
if (r)
return r;
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "phy");
if (!res) {
DSSERR("can't get PHY mem resource\n");
return -EINVAL;
}
phy->base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(phy->base)) {
DSSERR("can't ioremap TX PHY\n");
return PTR_ERR(phy->base);
}
return 0;
}
| gpl-2.0 |
Rashed97/caf-kernel-msm-3.10 | drivers/md/dm-mpath.c | 1372 | 42458 | /*
* Copyright (C) 2003 Sistina Software Limited.
* Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
*
* This file is released under the GPL.
*/
#include <linux/device-mapper.h>
#include "dm-path-selector.h"
#include "dm-uevent.h"
#include <linux/ctype.h>
#include <linux/init.h>
#include <linux/mempool.h>
#include <linux/module.h>
#include <linux/pagemap.h>
#include <linux/slab.h>
#include <linux/time.h>
#include <linux/workqueue.h>
#include <linux/delay.h>
#include <scsi/scsi_dh.h>
#include <linux/atomic.h>
#define DM_MSG_PREFIX "multipath"
#define DM_PG_INIT_DELAY_MSECS 2000
#define DM_PG_INIT_DELAY_DEFAULT ((unsigned) -1)
/* Path properties */
struct pgpath {
struct list_head list;
struct priority_group *pg; /* Owning PG */
unsigned is_active; /* Path status */
unsigned fail_count; /* Cumulative failure count */
struct dm_path path;
struct delayed_work activate_path;
};
#define path_to_pgpath(__pgp) container_of((__pgp), struct pgpath, path)
/*
* Paths are grouped into Priority Groups and numbered from 1 upwards.
* Each has a path selector which controls which path gets used.
*/
struct priority_group {
struct list_head list;
struct multipath *m; /* Owning multipath instance */
struct path_selector ps;
unsigned pg_num; /* Reference number */
unsigned bypassed; /* Temporarily bypass this PG? */
unsigned nr_pgpaths; /* Number of paths in PG */
struct list_head pgpaths;
};
/* Multipath context */
struct multipath {
struct list_head list;
struct dm_target *ti;
const char *hw_handler_name;
char *hw_handler_params;
spinlock_t lock;
unsigned nr_priority_groups;
struct list_head priority_groups;
wait_queue_head_t pg_init_wait; /* Wait for pg_init completion */
unsigned pg_init_required; /* pg_init needs calling? */
unsigned pg_init_in_progress; /* Only one pg_init allowed at once */
unsigned pg_init_delay_retry; /* Delay pg_init retry? */
unsigned nr_valid_paths; /* Total number of usable paths */
struct pgpath *current_pgpath;
struct priority_group *current_pg;
struct priority_group *next_pg; /* Switch to this PG if set */
unsigned repeat_count; /* I/Os left before calling PS again */
unsigned queue_io:1; /* Must we queue all I/O? */
unsigned queue_if_no_path:1; /* Queue I/O if last path fails? */
unsigned saved_queue_if_no_path:1; /* Saved state during suspension */
unsigned retain_attached_hw_handler:1; /* If there's already a hw_handler present, don't change it. */
unsigned pg_init_disabled:1; /* pg_init is not currently allowed */
unsigned pg_init_retries; /* Number of times to retry pg_init */
unsigned pg_init_count; /* Number of times pg_init called */
unsigned pg_init_delay_msecs; /* Number of msecs before pg_init retry */
unsigned queue_size;
struct work_struct process_queued_ios;
struct list_head queued_ios;
struct work_struct trigger_event;
/*
* We must use a mempool of dm_mpath_io structs so that we
* can resubmit bios on error.
*/
mempool_t *mpio_pool;
struct mutex work_mutex;
};
/*
* Context information attached to each bio we process.
*/
struct dm_mpath_io {
struct pgpath *pgpath;
size_t nr_bytes;
};
typedef int (*action_fn) (struct pgpath *pgpath);
#define MIN_IOS 256 /* Mempool size */
static struct kmem_cache *_mpio_cache;
static struct workqueue_struct *kmultipathd, *kmpath_handlerd;
static void process_queued_ios(struct work_struct *work);
static void trigger_event(struct work_struct *work);
static void activate_path(struct work_struct *work);
/*-----------------------------------------------
* Allocation routines
*-----------------------------------------------*/
static struct pgpath *alloc_pgpath(void)
{
struct pgpath *pgpath = kzalloc(sizeof(*pgpath), GFP_KERNEL);
if (pgpath) {
pgpath->is_active = 1;
INIT_DELAYED_WORK(&pgpath->activate_path, activate_path);
}
return pgpath;
}
static void free_pgpath(struct pgpath *pgpath)
{
kfree(pgpath);
}
static struct priority_group *alloc_priority_group(void)
{
struct priority_group *pg;
pg = kzalloc(sizeof(*pg), GFP_KERNEL);
if (pg)
INIT_LIST_HEAD(&pg->pgpaths);
return pg;
}
static void free_pgpaths(struct list_head *pgpaths, struct dm_target *ti)
{
struct pgpath *pgpath, *tmp;
struct multipath *m = ti->private;
list_for_each_entry_safe(pgpath, tmp, pgpaths, list) {
list_del(&pgpath->list);
if (m->hw_handler_name)
scsi_dh_detach(bdev_get_queue(pgpath->path.dev->bdev));
dm_put_device(ti, pgpath->path.dev);
free_pgpath(pgpath);
}
}
static void free_priority_group(struct priority_group *pg,
struct dm_target *ti)
{
struct path_selector *ps = &pg->ps;
if (ps->type) {
ps->type->destroy(ps);
dm_put_path_selector(ps->type);
}
free_pgpaths(&pg->pgpaths, ti);
kfree(pg);
}
static struct multipath *alloc_multipath(struct dm_target *ti)
{
struct multipath *m;
m = kzalloc(sizeof(*m), GFP_KERNEL);
if (m) {
INIT_LIST_HEAD(&m->priority_groups);
INIT_LIST_HEAD(&m->queued_ios);
spin_lock_init(&m->lock);
m->queue_io = 1;
m->pg_init_delay_msecs = DM_PG_INIT_DELAY_DEFAULT;
INIT_WORK(&m->process_queued_ios, process_queued_ios);
INIT_WORK(&m->trigger_event, trigger_event);
init_waitqueue_head(&m->pg_init_wait);
mutex_init(&m->work_mutex);
m->mpio_pool = mempool_create_slab_pool(MIN_IOS, _mpio_cache);
if (!m->mpio_pool) {
kfree(m);
return NULL;
}
m->ti = ti;
ti->private = m;
}
return m;
}
static void free_multipath(struct multipath *m)
{
struct priority_group *pg, *tmp;
list_for_each_entry_safe(pg, tmp, &m->priority_groups, list) {
list_del(&pg->list);
free_priority_group(pg, m->ti);
}
kfree(m->hw_handler_name);
kfree(m->hw_handler_params);
mempool_destroy(m->mpio_pool);
kfree(m);
}
static int set_mapinfo(struct multipath *m, union map_info *info)
{
struct dm_mpath_io *mpio;
mpio = mempool_alloc(m->mpio_pool, GFP_ATOMIC);
if (!mpio)
return -ENOMEM;
memset(mpio, 0, sizeof(*mpio));
info->ptr = mpio;
return 0;
}
static void clear_mapinfo(struct multipath *m, union map_info *info)
{
struct dm_mpath_io *mpio = info->ptr;
info->ptr = NULL;
mempool_free(mpio, m->mpio_pool);
}
/*-----------------------------------------------
* Path selection
*-----------------------------------------------*/
static void __pg_init_all_paths(struct multipath *m)
{
struct pgpath *pgpath;
unsigned long pg_init_delay = 0;
m->pg_init_count++;
m->pg_init_required = 0;
if (m->pg_init_delay_retry)
pg_init_delay = msecs_to_jiffies(m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT ?
m->pg_init_delay_msecs : DM_PG_INIT_DELAY_MSECS);
list_for_each_entry(pgpath, &m->current_pg->pgpaths, list) {
/* Skip failed paths */
if (!pgpath->is_active)
continue;
if (queue_delayed_work(kmpath_handlerd, &pgpath->activate_path,
pg_init_delay))
m->pg_init_in_progress++;
}
}
static void __switch_pg(struct multipath *m, struct pgpath *pgpath)
{
m->current_pg = pgpath->pg;
/* Must we initialise the PG first, and queue I/O till it's ready? */
if (m->hw_handler_name) {
m->pg_init_required = 1;
m->queue_io = 1;
} else {
m->pg_init_required = 0;
m->queue_io = 0;
}
m->pg_init_count = 0;
}
static int __choose_path_in_pg(struct multipath *m, struct priority_group *pg,
size_t nr_bytes)
{
struct dm_path *path;
path = pg->ps.type->select_path(&pg->ps, &m->repeat_count, nr_bytes);
if (!path)
return -ENXIO;
m->current_pgpath = path_to_pgpath(path);
if (m->current_pg != pg)
__switch_pg(m, m->current_pgpath);
return 0;
}
static void __choose_pgpath(struct multipath *m, size_t nr_bytes)
{
struct priority_group *pg;
unsigned bypassed = 1;
if (!m->nr_valid_paths)
goto failed;
/* Were we instructed to switch PG? */
if (m->next_pg) {
pg = m->next_pg;
m->next_pg = NULL;
if (!__choose_path_in_pg(m, pg, nr_bytes))
return;
}
/* Don't change PG until it has no remaining paths */
if (m->current_pg && !__choose_path_in_pg(m, m->current_pg, nr_bytes))
return;
/*
* Loop through priority groups until we find a valid path.
* First time we skip PGs marked 'bypassed'.
* Second time we only try the ones we skipped, but set
* pg_init_delay_retry so we do not hammer controllers.
*/
do {
list_for_each_entry(pg, &m->priority_groups, list) {
if (pg->bypassed == bypassed)
continue;
if (!__choose_path_in_pg(m, pg, nr_bytes)) {
if (!bypassed)
m->pg_init_delay_retry = 1;
return;
}
}
} while (bypassed--);
failed:
m->current_pgpath = NULL;
m->current_pg = NULL;
}
/*
* Check whether bios must be queued in the device-mapper core rather
* than here in the target.
*
* m->lock must be held on entry.
*
* If m->queue_if_no_path and m->saved_queue_if_no_path hold the
* same value then we are not between multipath_presuspend()
* and multipath_resume() calls and we have no need to check
* for the DMF_NOFLUSH_SUSPENDING flag.
*/
static int __must_push_back(struct multipath *m)
{
return (m->queue_if_no_path != m->saved_queue_if_no_path &&
dm_noflush_suspending(m->ti));
}
static int map_io(struct multipath *m, struct request *clone,
union map_info *map_context, unsigned was_queued)
{
int r = DM_MAPIO_REMAPPED;
size_t nr_bytes = blk_rq_bytes(clone);
unsigned long flags;
struct pgpath *pgpath;
struct block_device *bdev;
struct dm_mpath_io *mpio = map_context->ptr;
spin_lock_irqsave(&m->lock, flags);
/* Do we need to select a new pgpath? */
if (!m->current_pgpath ||
(!m->queue_io && (m->repeat_count && --m->repeat_count == 0)))
__choose_pgpath(m, nr_bytes);
pgpath = m->current_pgpath;
if (was_queued)
m->queue_size--;
if ((pgpath && m->queue_io) ||
(!pgpath && m->queue_if_no_path)) {
/* Queue for the daemon to resubmit */
list_add_tail(&clone->queuelist, &m->queued_ios);
m->queue_size++;
if ((m->pg_init_required && !m->pg_init_in_progress) ||
!m->queue_io)
queue_work(kmultipathd, &m->process_queued_ios);
pgpath = NULL;
r = DM_MAPIO_SUBMITTED;
} else if (pgpath) {
bdev = pgpath->path.dev->bdev;
clone->q = bdev_get_queue(bdev);
clone->rq_disk = bdev->bd_disk;
} else if (__must_push_back(m))
r = DM_MAPIO_REQUEUE;
else
r = -EIO; /* Failed */
mpio->pgpath = pgpath;
mpio->nr_bytes = nr_bytes;
if (r == DM_MAPIO_REMAPPED && pgpath->pg->ps.type->start_io)
pgpath->pg->ps.type->start_io(&pgpath->pg->ps, &pgpath->path,
nr_bytes);
spin_unlock_irqrestore(&m->lock, flags);
return r;
}
/*
* If we run out of usable paths, should we queue I/O or error it?
*/
static int queue_if_no_path(struct multipath *m, unsigned queue_if_no_path,
unsigned save_old_value)
{
unsigned long flags;
spin_lock_irqsave(&m->lock, flags);
if (save_old_value)
m->saved_queue_if_no_path = m->queue_if_no_path;
else
m->saved_queue_if_no_path = queue_if_no_path;
m->queue_if_no_path = queue_if_no_path;
if (!m->queue_if_no_path && m->queue_size)
queue_work(kmultipathd, &m->process_queued_ios);
spin_unlock_irqrestore(&m->lock, flags);
return 0;
}
/*-----------------------------------------------------------------
* The multipath daemon is responsible for resubmitting queued ios.
*---------------------------------------------------------------*/
static void dispatch_queued_ios(struct multipath *m)
{
int r;
unsigned long flags;
union map_info *info;
struct request *clone, *n;
LIST_HEAD(cl);
spin_lock_irqsave(&m->lock, flags);
list_splice_init(&m->queued_ios, &cl);
spin_unlock_irqrestore(&m->lock, flags);
list_for_each_entry_safe(clone, n, &cl, queuelist) {
list_del_init(&clone->queuelist);
info = dm_get_rq_mapinfo(clone);
r = map_io(m, clone, info, 1);
if (r < 0) {
clear_mapinfo(m, info);
dm_kill_unmapped_request(clone, r);
} else if (r == DM_MAPIO_REMAPPED)
dm_dispatch_request(clone);
else if (r == DM_MAPIO_REQUEUE) {
clear_mapinfo(m, info);
dm_requeue_unmapped_request(clone);
}
}
}
static void process_queued_ios(struct work_struct *work)
{
struct multipath *m =
container_of(work, struct multipath, process_queued_ios);
struct pgpath *pgpath = NULL;
unsigned must_queue = 1;
unsigned long flags;
spin_lock_irqsave(&m->lock, flags);
if (!m->current_pgpath)
__choose_pgpath(m, 0);
pgpath = m->current_pgpath;
if ((pgpath && !m->queue_io) ||
(!pgpath && !m->queue_if_no_path))
must_queue = 0;
if (m->pg_init_required && !m->pg_init_in_progress && pgpath &&
!m->pg_init_disabled)
__pg_init_all_paths(m);
spin_unlock_irqrestore(&m->lock, flags);
if (!must_queue)
dispatch_queued_ios(m);
}
/*
* An event is triggered whenever a path is taken out of use.
* Includes path failure and PG bypass.
*/
static void trigger_event(struct work_struct *work)
{
struct multipath *m =
container_of(work, struct multipath, trigger_event);
dm_table_event(m->ti->table);
}
/*-----------------------------------------------------------------
* Constructor/argument parsing:
* <#multipath feature args> [<arg>]*
* <#hw_handler args> [hw_handler [<arg>]*]
* <#priority groups>
* <initial priority group>
* [<selector> <#selector args> [<arg>]*
* <#paths> <#per-path selector args>
* [<path> [<arg>]* ]+ ]+
*---------------------------------------------------------------*/
static int parse_path_selector(struct dm_arg_set *as, struct priority_group *pg,
struct dm_target *ti)
{
int r;
struct path_selector_type *pst;
unsigned ps_argc;
static struct dm_arg _args[] = {
{0, 1024, "invalid number of path selector args"},
};
pst = dm_get_path_selector(dm_shift_arg(as));
if (!pst) {
ti->error = "unknown path selector type";
return -EINVAL;
}
r = dm_read_arg_group(_args, as, &ps_argc, &ti->error);
if (r) {
dm_put_path_selector(pst);
return -EINVAL;
}
r = pst->create(&pg->ps, ps_argc, as->argv);
if (r) {
dm_put_path_selector(pst);
ti->error = "path selector constructor failed";
return r;
}
pg->ps.type = pst;
dm_consume_args(as, ps_argc);
return 0;
}
static struct pgpath *parse_path(struct dm_arg_set *as, struct path_selector *ps,
struct dm_target *ti)
{
int r;
struct pgpath *p;
struct multipath *m = ti->private;
struct request_queue *q = NULL;
const char *attached_handler_name;
/* we need at least a path arg */
if (as->argc < 1) {
ti->error = "no device given";
return ERR_PTR(-EINVAL);
}
p = alloc_pgpath();
if (!p)
return ERR_PTR(-ENOMEM);
r = dm_get_device(ti, dm_shift_arg(as), dm_table_get_mode(ti->table),
&p->path.dev);
if (r) {
ti->error = "error getting device";
goto bad;
}
if (m->retain_attached_hw_handler || m->hw_handler_name)
q = bdev_get_queue(p->path.dev->bdev);
if (m->retain_attached_hw_handler) {
attached_handler_name = scsi_dh_attached_handler_name(q, GFP_KERNEL);
if (attached_handler_name) {
/*
* Reset hw_handler_name to match the attached handler
* and clear any hw_handler_params associated with the
* ignored handler.
*
* NB. This modifies the table line to show the actual
* handler instead of the original table passed in.
*/
kfree(m->hw_handler_name);
m->hw_handler_name = attached_handler_name;
kfree(m->hw_handler_params);
m->hw_handler_params = NULL;
}
}
if (m->hw_handler_name) {
/*
* Increments scsi_dh reference, even when using an
* already-attached handler.
*/
r = scsi_dh_attach(q, m->hw_handler_name);
if (r == -EBUSY) {
/*
* Already attached to different hw_handler:
* try to reattach with correct one.
*/
scsi_dh_detach(q);
r = scsi_dh_attach(q, m->hw_handler_name);
}
if (r < 0) {
ti->error = "error attaching hardware handler";
dm_put_device(ti, p->path.dev);
goto bad;
}
if (m->hw_handler_params) {
r = scsi_dh_set_params(q, m->hw_handler_params);
if (r < 0) {
ti->error = "unable to set hardware "
"handler parameters";
scsi_dh_detach(q);
dm_put_device(ti, p->path.dev);
goto bad;
}
}
}
r = ps->type->add_path(ps, &p->path, as->argc, as->argv, &ti->error);
if (r) {
dm_put_device(ti, p->path.dev);
goto bad;
}
return p;
bad:
free_pgpath(p);
return ERR_PTR(r);
}
static struct priority_group *parse_priority_group(struct dm_arg_set *as,
struct multipath *m)
{
static struct dm_arg _args[] = {
{1, 1024, "invalid number of paths"},
{0, 1024, "invalid number of selector args"}
};
int r;
unsigned i, nr_selector_args, nr_args;
struct priority_group *pg;
struct dm_target *ti = m->ti;
if (as->argc < 2) {
as->argc = 0;
ti->error = "not enough priority group arguments";
return ERR_PTR(-EINVAL);
}
pg = alloc_priority_group();
if (!pg) {
ti->error = "couldn't allocate priority group";
return ERR_PTR(-ENOMEM);
}
pg->m = m;
r = parse_path_selector(as, pg, ti);
if (r)
goto bad;
/*
* read the paths
*/
r = dm_read_arg(_args, as, &pg->nr_pgpaths, &ti->error);
if (r)
goto bad;
r = dm_read_arg(_args + 1, as, &nr_selector_args, &ti->error);
if (r)
goto bad;
nr_args = 1 + nr_selector_args;
for (i = 0; i < pg->nr_pgpaths; i++) {
struct pgpath *pgpath;
struct dm_arg_set path_args;
if (as->argc < nr_args) {
ti->error = "not enough path parameters";
r = -EINVAL;
goto bad;
}
path_args.argc = nr_args;
path_args.argv = as->argv;
pgpath = parse_path(&path_args, &pg->ps, ti);
if (IS_ERR(pgpath)) {
r = PTR_ERR(pgpath);
goto bad;
}
pgpath->pg = pg;
list_add_tail(&pgpath->list, &pg->pgpaths);
dm_consume_args(as, nr_args);
}
return pg;
bad:
free_priority_group(pg, ti);
return ERR_PTR(r);
}
static int parse_hw_handler(struct dm_arg_set *as, struct multipath *m)
{
unsigned hw_argc;
int ret;
struct dm_target *ti = m->ti;
static struct dm_arg _args[] = {
{0, 1024, "invalid number of hardware handler args"},
};
if (dm_read_arg_group(_args, as, &hw_argc, &ti->error))
return -EINVAL;
if (!hw_argc)
return 0;
m->hw_handler_name = kstrdup(dm_shift_arg(as), GFP_KERNEL);
if (!try_then_request_module(scsi_dh_handler_exist(m->hw_handler_name),
"scsi_dh_%s", m->hw_handler_name)) {
ti->error = "unknown hardware handler type";
ret = -EINVAL;
goto fail;
}
if (hw_argc > 1) {
char *p;
int i, j, len = 4;
for (i = 0; i <= hw_argc - 2; i++)
len += strlen(as->argv[i]) + 1;
p = m->hw_handler_params = kzalloc(len, GFP_KERNEL);
if (!p) {
ti->error = "memory allocation failed";
ret = -ENOMEM;
goto fail;
}
j = sprintf(p, "%d", hw_argc - 1);
for (i = 0, p+=j+1; i <= hw_argc - 2; i++, p+=j+1)
j = sprintf(p, "%s", as->argv[i]);
}
dm_consume_args(as, hw_argc - 1);
return 0;
fail:
kfree(m->hw_handler_name);
m->hw_handler_name = NULL;
return ret;
}
static int parse_features(struct dm_arg_set *as, struct multipath *m)
{
int r;
unsigned argc;
struct dm_target *ti = m->ti;
const char *arg_name;
static struct dm_arg _args[] = {
{0, 6, "invalid number of feature args"},
{1, 50, "pg_init_retries must be between 1 and 50"},
{0, 60000, "pg_init_delay_msecs must be between 0 and 60000"},
};
r = dm_read_arg_group(_args, as, &argc, &ti->error);
if (r)
return -EINVAL;
if (!argc)
return 0;
do {
arg_name = dm_shift_arg(as);
argc--;
if (!strcasecmp(arg_name, "queue_if_no_path")) {
r = queue_if_no_path(m, 1, 0);
continue;
}
if (!strcasecmp(arg_name, "retain_attached_hw_handler")) {
m->retain_attached_hw_handler = 1;
continue;
}
if (!strcasecmp(arg_name, "pg_init_retries") &&
(argc >= 1)) {
r = dm_read_arg(_args + 1, as, &m->pg_init_retries, &ti->error);
argc--;
continue;
}
if (!strcasecmp(arg_name, "pg_init_delay_msecs") &&
(argc >= 1)) {
r = dm_read_arg(_args + 2, as, &m->pg_init_delay_msecs, &ti->error);
argc--;
continue;
}
ti->error = "Unrecognised multipath feature request";
r = -EINVAL;
} while (argc && !r);
return r;
}
static int multipath_ctr(struct dm_target *ti, unsigned int argc,
char **argv)
{
/* target arguments */
static struct dm_arg _args[] = {
{0, 1024, "invalid number of priority groups"},
{0, 1024, "invalid initial priority group number"},
};
int r;
struct multipath *m;
struct dm_arg_set as;
unsigned pg_count = 0;
unsigned next_pg_num;
as.argc = argc;
as.argv = argv;
m = alloc_multipath(ti);
if (!m) {
ti->error = "can't allocate multipath";
return -EINVAL;
}
r = parse_features(&as, m);
if (r)
goto bad;
r = parse_hw_handler(&as, m);
if (r)
goto bad;
r = dm_read_arg(_args, &as, &m->nr_priority_groups, &ti->error);
if (r)
goto bad;
r = dm_read_arg(_args + 1, &as, &next_pg_num, &ti->error);
if (r)
goto bad;
if ((!m->nr_priority_groups && next_pg_num) ||
(m->nr_priority_groups && !next_pg_num)) {
ti->error = "invalid initial priority group";
r = -EINVAL;
goto bad;
}
/* parse the priority groups */
while (as.argc) {
struct priority_group *pg;
pg = parse_priority_group(&as, m);
if (IS_ERR(pg)) {
r = PTR_ERR(pg);
goto bad;
}
m->nr_valid_paths += pg->nr_pgpaths;
list_add_tail(&pg->list, &m->priority_groups);
pg_count++;
pg->pg_num = pg_count;
if (!--next_pg_num)
m->next_pg = pg;
}
if (pg_count != m->nr_priority_groups) {
ti->error = "priority group count mismatch";
r = -EINVAL;
goto bad;
}
ti->num_flush_bios = 1;
ti->num_discard_bios = 1;
ti->num_write_same_bios = 1;
return 0;
bad:
free_multipath(m);
return r;
}
static void multipath_wait_for_pg_init_completion(struct multipath *m)
{
DECLARE_WAITQUEUE(wait, current);
unsigned long flags;
add_wait_queue(&m->pg_init_wait, &wait);
while (1) {
set_current_state(TASK_UNINTERRUPTIBLE);
spin_lock_irqsave(&m->lock, flags);
if (!m->pg_init_in_progress) {
spin_unlock_irqrestore(&m->lock, flags);
break;
}
spin_unlock_irqrestore(&m->lock, flags);
io_schedule();
}
set_current_state(TASK_RUNNING);
remove_wait_queue(&m->pg_init_wait, &wait);
}
static void flush_multipath_work(struct multipath *m)
{
unsigned long flags;
spin_lock_irqsave(&m->lock, flags);
m->pg_init_disabled = 1;
spin_unlock_irqrestore(&m->lock, flags);
flush_workqueue(kmpath_handlerd);
multipath_wait_for_pg_init_completion(m);
flush_workqueue(kmultipathd);
flush_work(&m->trigger_event);
spin_lock_irqsave(&m->lock, flags);
m->pg_init_disabled = 0;
spin_unlock_irqrestore(&m->lock, flags);
}
static void multipath_dtr(struct dm_target *ti)
{
struct multipath *m = ti->private;
flush_multipath_work(m);
free_multipath(m);
}
/*
* Map cloned requests
*/
static int multipath_map(struct dm_target *ti, struct request *clone,
union map_info *map_context)
{
int r;
struct multipath *m = (struct multipath *) ti->private;
if (set_mapinfo(m, map_context) < 0)
/* ENOMEM, requeue */
return DM_MAPIO_REQUEUE;
clone->cmd_flags |= REQ_FAILFAST_TRANSPORT;
r = map_io(m, clone, map_context, 0);
if (r < 0 || r == DM_MAPIO_REQUEUE)
clear_mapinfo(m, map_context);
return r;
}
/*
* Take a path out of use.
*/
static int fail_path(struct pgpath *pgpath)
{
unsigned long flags;
struct multipath *m = pgpath->pg->m;
spin_lock_irqsave(&m->lock, flags);
if (!pgpath->is_active)
goto out;
DMWARN("Failing path %s.", pgpath->path.dev->name);
pgpath->pg->ps.type->fail_path(&pgpath->pg->ps, &pgpath->path);
pgpath->is_active = 0;
pgpath->fail_count++;
m->nr_valid_paths--;
if (pgpath == m->current_pgpath)
m->current_pgpath = NULL;
dm_path_uevent(DM_UEVENT_PATH_FAILED, m->ti,
pgpath->path.dev->name, m->nr_valid_paths);
schedule_work(&m->trigger_event);
out:
spin_unlock_irqrestore(&m->lock, flags);
return 0;
}
/*
* Reinstate a previously-failed path
*/
static int reinstate_path(struct pgpath *pgpath)
{
int r = 0;
unsigned long flags;
struct multipath *m = pgpath->pg->m;
spin_lock_irqsave(&m->lock, flags);
if (pgpath->is_active)
goto out;
if (!pgpath->pg->ps.type->reinstate_path) {
DMWARN("Reinstate path not supported by path selector %s",
pgpath->pg->ps.type->name);
r = -EINVAL;
goto out;
}
r = pgpath->pg->ps.type->reinstate_path(&pgpath->pg->ps, &pgpath->path);
if (r)
goto out;
pgpath->is_active = 1;
if (!m->nr_valid_paths++ && m->queue_size) {
m->current_pgpath = NULL;
queue_work(kmultipathd, &m->process_queued_ios);
} else if (m->hw_handler_name && (m->current_pg == pgpath->pg)) {
if (queue_work(kmpath_handlerd, &pgpath->activate_path.work))
m->pg_init_in_progress++;
}
dm_path_uevent(DM_UEVENT_PATH_REINSTATED, m->ti,
pgpath->path.dev->name, m->nr_valid_paths);
schedule_work(&m->trigger_event);
out:
spin_unlock_irqrestore(&m->lock, flags);
return r;
}
/*
* Fail or reinstate all paths that match the provided struct dm_dev.
*/
static int action_dev(struct multipath *m, struct dm_dev *dev,
action_fn action)
{
int r = -EINVAL;
struct pgpath *pgpath;
struct priority_group *pg;
list_for_each_entry(pg, &m->priority_groups, list) {
list_for_each_entry(pgpath, &pg->pgpaths, list) {
if (pgpath->path.dev == dev)
r = action(pgpath);
}
}
return r;
}
/*
* Temporarily try to avoid having to use the specified PG
*/
static void bypass_pg(struct multipath *m, struct priority_group *pg,
int bypassed)
{
unsigned long flags;
spin_lock_irqsave(&m->lock, flags);
pg->bypassed = bypassed;
m->current_pgpath = NULL;
m->current_pg = NULL;
spin_unlock_irqrestore(&m->lock, flags);
schedule_work(&m->trigger_event);
}
/*
* Switch to using the specified PG from the next I/O that gets mapped
*/
static int switch_pg_num(struct multipath *m, const char *pgstr)
{
struct priority_group *pg;
unsigned pgnum;
unsigned long flags;
char dummy;
if (!pgstr || (sscanf(pgstr, "%u%c", &pgnum, &dummy) != 1) || !pgnum ||
(pgnum > m->nr_priority_groups)) {
DMWARN("invalid PG number supplied to switch_pg_num");
return -EINVAL;
}
spin_lock_irqsave(&m->lock, flags);
list_for_each_entry(pg, &m->priority_groups, list) {
pg->bypassed = 0;
if (--pgnum)
continue;
m->current_pgpath = NULL;
m->current_pg = NULL;
m->next_pg = pg;
}
spin_unlock_irqrestore(&m->lock, flags);
schedule_work(&m->trigger_event);
return 0;
}
/*
* Set/clear bypassed status of a PG.
* PGs are numbered upwards from 1 in the order they were declared.
*/
static int bypass_pg_num(struct multipath *m, const char *pgstr, int bypassed)
{
struct priority_group *pg;
unsigned pgnum;
char dummy;
if (!pgstr || (sscanf(pgstr, "%u%c", &pgnum, &dummy) != 1) || !pgnum ||
(pgnum > m->nr_priority_groups)) {
DMWARN("invalid PG number supplied to bypass_pg");
return -EINVAL;
}
list_for_each_entry(pg, &m->priority_groups, list) {
if (!--pgnum)
break;
}
bypass_pg(m, pg, bypassed);
return 0;
}
/*
* Should we retry pg_init immediately?
*/
static int pg_init_limit_reached(struct multipath *m, struct pgpath *pgpath)
{
unsigned long flags;
int limit_reached = 0;
spin_lock_irqsave(&m->lock, flags);
if (m->pg_init_count <= m->pg_init_retries && !m->pg_init_disabled)
m->pg_init_required = 1;
else
limit_reached = 1;
spin_unlock_irqrestore(&m->lock, flags);
return limit_reached;
}
static void pg_init_done(void *data, int errors)
{
struct pgpath *pgpath = data;
struct priority_group *pg = pgpath->pg;
struct multipath *m = pg->m;
unsigned long flags;
unsigned delay_retry = 0;
/* device or driver problems */
switch (errors) {
case SCSI_DH_OK:
break;
case SCSI_DH_NOSYS:
if (!m->hw_handler_name) {
errors = 0;
break;
}
DMERR("Could not failover the device: Handler scsi_dh_%s "
"Error %d.", m->hw_handler_name, errors);
/*
* Fail path for now, so we do not ping pong
*/
fail_path(pgpath);
break;
case SCSI_DH_DEV_TEMP_BUSY:
/*
* Probably doing something like FW upgrade on the
* controller so try the other pg.
*/
bypass_pg(m, pg, 1);
break;
case SCSI_DH_RETRY:
/* Wait before retrying. */
delay_retry = 1;
case SCSI_DH_IMM_RETRY:
case SCSI_DH_RES_TEMP_UNAVAIL:
if (pg_init_limit_reached(m, pgpath))
fail_path(pgpath);
errors = 0;
break;
default:
/*
* We probably do not want to fail the path for a device
* error, but this is what the old dm did. In future
* patches we can do more advanced handling.
*/
fail_path(pgpath);
}
spin_lock_irqsave(&m->lock, flags);
if (errors) {
if (pgpath == m->current_pgpath) {
DMERR("Could not failover device. Error %d.", errors);
m->current_pgpath = NULL;
m->current_pg = NULL;
}
} else if (!m->pg_init_required)
pg->bypassed = 0;
if (--m->pg_init_in_progress)
/* Activations of other paths are still on going */
goto out;
if (!m->pg_init_required)
m->queue_io = 0;
m->pg_init_delay_retry = delay_retry;
queue_work(kmultipathd, &m->process_queued_ios);
/*
* Wake up any thread waiting to suspend.
*/
wake_up(&m->pg_init_wait);
out:
spin_unlock_irqrestore(&m->lock, flags);
}
static void activate_path(struct work_struct *work)
{
struct pgpath *pgpath =
container_of(work, struct pgpath, activate_path.work);
scsi_dh_activate(bdev_get_queue(pgpath->path.dev->bdev),
pg_init_done, pgpath);
}
/*
* end_io handling
*/
static int do_end_io(struct multipath *m, struct request *clone,
int error, struct dm_mpath_io *mpio)
{
/*
* We don't queue any clone request inside the multipath target
* during end I/O handling, since those clone requests don't have
* bio clones. If we queue them inside the multipath target,
* we need to make bio clones, that requires memory allocation.
* (See drivers/md/dm.c:end_clone_bio() about why the clone requests
* don't have bio clones.)
* Instead of queueing the clone request here, we queue the original
* request into dm core, which will remake a clone request and
* clone bios for it and resubmit it later.
*/
int r = DM_ENDIO_REQUEUE;
unsigned long flags;
if (!error && !clone->errors)
return 0; /* I/O complete */
if (error == -EOPNOTSUPP || error == -EREMOTEIO || error == -EILSEQ) {
if ((clone->cmd_flags & REQ_WRITE_SAME) &&
!clone->q->limits.max_write_same_sectors) {
struct queue_limits *limits;
/* device doesn't really support WRITE SAME, disable it */
limits = dm_get_queue_limits(dm_table_get_md(m->ti->table));
limits->max_write_same_sectors = 0;
}
return error;
}
if (mpio->pgpath)
fail_path(mpio->pgpath);
spin_lock_irqsave(&m->lock, flags);
if (!m->nr_valid_paths) {
if (!m->queue_if_no_path) {
if (!__must_push_back(m))
r = -EIO;
} else {
if (error == -EBADE)
r = error;
}
}
spin_unlock_irqrestore(&m->lock, flags);
return r;
}
static int multipath_end_io(struct dm_target *ti, struct request *clone,
int error, union map_info *map_context)
{
struct multipath *m = ti->private;
struct dm_mpath_io *mpio = map_context->ptr;
struct pgpath *pgpath;
struct path_selector *ps;
int r;
BUG_ON(!mpio);
r = do_end_io(m, clone, error, mpio);
pgpath = mpio->pgpath;
if (pgpath) {
ps = &pgpath->pg->ps;
if (ps->type->end_io)
ps->type->end_io(ps, &pgpath->path, mpio->nr_bytes);
}
clear_mapinfo(m, map_context);
return r;
}
/*
* Suspend can't complete until all the I/O is processed so if
* the last path fails we must error any remaining I/O.
* Note that if the freeze_bdev fails while suspending, the
* queue_if_no_path state is lost - userspace should reset it.
*/
static void multipath_presuspend(struct dm_target *ti)
{
struct multipath *m = (struct multipath *) ti->private;
queue_if_no_path(m, 0, 1);
}
static void multipath_postsuspend(struct dm_target *ti)
{
struct multipath *m = ti->private;
mutex_lock(&m->work_mutex);
flush_multipath_work(m);
mutex_unlock(&m->work_mutex);
}
/*
* Restore the queue_if_no_path setting.
*/
static void multipath_resume(struct dm_target *ti)
{
struct multipath *m = (struct multipath *) ti->private;
unsigned long flags;
spin_lock_irqsave(&m->lock, flags);
m->queue_if_no_path = m->saved_queue_if_no_path;
spin_unlock_irqrestore(&m->lock, flags);
}
/*
* Info output has the following format:
* num_multipath_feature_args [multipath_feature_args]*
* num_handler_status_args [handler_status_args]*
* num_groups init_group_number
* [A|D|E num_ps_status_args [ps_status_args]*
* num_paths num_selector_args
* [path_dev A|F fail_count [selector_args]* ]+ ]+
*
* Table output has the following format (identical to the constructor string):
* num_feature_args [features_args]*
* num_handler_args hw_handler [hw_handler_args]*
* num_groups init_group_number
* [priority selector-name num_ps_args [ps_args]*
* num_paths num_selector_args [path_dev [selector_args]* ]+ ]+
*/
static void multipath_status(struct dm_target *ti, status_type_t type,
unsigned status_flags, char *result, unsigned maxlen)
{
int sz = 0;
unsigned long flags;
struct multipath *m = (struct multipath *) ti->private;
struct priority_group *pg;
struct pgpath *p;
unsigned pg_num;
char state;
spin_lock_irqsave(&m->lock, flags);
/* Features */
if (type == STATUSTYPE_INFO)
DMEMIT("2 %u %u ", m->queue_size, m->pg_init_count);
else {
DMEMIT("%u ", m->queue_if_no_path +
(m->pg_init_retries > 0) * 2 +
(m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT) * 2 +
m->retain_attached_hw_handler);
if (m->queue_if_no_path)
DMEMIT("queue_if_no_path ");
if (m->pg_init_retries)
DMEMIT("pg_init_retries %u ", m->pg_init_retries);
if (m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT)
DMEMIT("pg_init_delay_msecs %u ", m->pg_init_delay_msecs);
if (m->retain_attached_hw_handler)
DMEMIT("retain_attached_hw_handler ");
}
if (!m->hw_handler_name || type == STATUSTYPE_INFO)
DMEMIT("0 ");
else
DMEMIT("1 %s ", m->hw_handler_name);
DMEMIT("%u ", m->nr_priority_groups);
if (m->next_pg)
pg_num = m->next_pg->pg_num;
else if (m->current_pg)
pg_num = m->current_pg->pg_num;
else
pg_num = (m->nr_priority_groups ? 1 : 0);
DMEMIT("%u ", pg_num);
switch (type) {
case STATUSTYPE_INFO:
list_for_each_entry(pg, &m->priority_groups, list) {
if (pg->bypassed)
state = 'D'; /* Disabled */
else if (pg == m->current_pg)
state = 'A'; /* Currently Active */
else
state = 'E'; /* Enabled */
DMEMIT("%c ", state);
if (pg->ps.type->status)
sz += pg->ps.type->status(&pg->ps, NULL, type,
result + sz,
maxlen - sz);
else
DMEMIT("0 ");
DMEMIT("%u %u ", pg->nr_pgpaths,
pg->ps.type->info_args);
list_for_each_entry(p, &pg->pgpaths, list) {
DMEMIT("%s %s %u ", p->path.dev->name,
p->is_active ? "A" : "F",
p->fail_count);
if (pg->ps.type->status)
sz += pg->ps.type->status(&pg->ps,
&p->path, type, result + sz,
maxlen - sz);
}
}
break;
case STATUSTYPE_TABLE:
list_for_each_entry(pg, &m->priority_groups, list) {
DMEMIT("%s ", pg->ps.type->name);
if (pg->ps.type->status)
sz += pg->ps.type->status(&pg->ps, NULL, type,
result + sz,
maxlen - sz);
else
DMEMIT("0 ");
DMEMIT("%u %u ", pg->nr_pgpaths,
pg->ps.type->table_args);
list_for_each_entry(p, &pg->pgpaths, list) {
DMEMIT("%s ", p->path.dev->name);
if (pg->ps.type->status)
sz += pg->ps.type->status(&pg->ps,
&p->path, type, result + sz,
maxlen - sz);
}
}
break;
}
spin_unlock_irqrestore(&m->lock, flags);
}
static int multipath_message(struct dm_target *ti, unsigned argc, char **argv)
{
int r = -EINVAL;
struct dm_dev *dev;
struct multipath *m = (struct multipath *) ti->private;
action_fn action;
mutex_lock(&m->work_mutex);
if (dm_suspended(ti)) {
r = -EBUSY;
goto out;
}
if (argc == 1) {
if (!strcasecmp(argv[0], "queue_if_no_path")) {
r = queue_if_no_path(m, 1, 0);
goto out;
} else if (!strcasecmp(argv[0], "fail_if_no_path")) {
r = queue_if_no_path(m, 0, 0);
goto out;
}
}
if (argc != 2) {
DMWARN("Unrecognised multipath message received.");
goto out;
}
if (!strcasecmp(argv[0], "disable_group")) {
r = bypass_pg_num(m, argv[1], 1);
goto out;
} else if (!strcasecmp(argv[0], "enable_group")) {
r = bypass_pg_num(m, argv[1], 0);
goto out;
} else if (!strcasecmp(argv[0], "switch_group")) {
r = switch_pg_num(m, argv[1]);
goto out;
} else if (!strcasecmp(argv[0], "reinstate_path"))
action = reinstate_path;
else if (!strcasecmp(argv[0], "fail_path"))
action = fail_path;
else {
DMWARN("Unrecognised multipath message received.");
goto out;
}
r = dm_get_device(ti, argv[1], dm_table_get_mode(ti->table), &dev);
if (r) {
DMWARN("message: error getting device %s",
argv[1]);
goto out;
}
r = action_dev(m, dev, action);
dm_put_device(ti, dev);
out:
mutex_unlock(&m->work_mutex);
return r;
}
static int multipath_ioctl(struct dm_target *ti, unsigned int cmd,
unsigned long arg)
{
struct multipath *m = ti->private;
struct pgpath *pgpath;
struct block_device *bdev;
fmode_t mode;
unsigned long flags;
int r;
bdev = NULL;
mode = 0;
r = 0;
spin_lock_irqsave(&m->lock, flags);
if (!m->current_pgpath)
__choose_pgpath(m, 0);
pgpath = m->current_pgpath;
if (pgpath) {
bdev = pgpath->path.dev->bdev;
mode = pgpath->path.dev->mode;
}
if ((pgpath && m->queue_io) || (!pgpath && m->queue_if_no_path))
r = -ENOTCONN;
else if (!bdev)
r = -EIO;
spin_unlock_irqrestore(&m->lock, flags);
/*
* Only pass ioctls through if the device sizes match exactly.
*/
if (!bdev || ti->len != i_size_read(bdev->bd_inode) >> SECTOR_SHIFT) {
int err = scsi_verify_blk_ioctl(NULL, cmd);
if (err)
r = err;
}
if (r == -ENOTCONN && !fatal_signal_pending(current))
queue_work(kmultipathd, &m->process_queued_ios);
return r ? : __blkdev_driver_ioctl(bdev, mode, cmd, arg);
}
static int multipath_iterate_devices(struct dm_target *ti,
iterate_devices_callout_fn fn, void *data)
{
struct multipath *m = ti->private;
struct priority_group *pg;
struct pgpath *p;
int ret = 0;
list_for_each_entry(pg, &m->priority_groups, list) {
list_for_each_entry(p, &pg->pgpaths, list) {
ret = fn(ti, p->path.dev, ti->begin, ti->len, data);
if (ret)
goto out;
}
}
out:
return ret;
}
static int __pgpath_busy(struct pgpath *pgpath)
{
struct request_queue *q = bdev_get_queue(pgpath->path.dev->bdev);
return dm_underlying_device_busy(q);
}
/*
* We return "busy", only when we can map I/Os but underlying devices
* are busy (so even if we map I/Os now, the I/Os will wait on
* the underlying queue).
* In other words, if we want to kill I/Os or queue them inside us
* due to map unavailability, we don't return "busy". Otherwise,
* dm core won't give us the I/Os and we can't do what we want.
*/
static int multipath_busy(struct dm_target *ti)
{
int busy = 0, has_active = 0;
struct multipath *m = ti->private;
struct priority_group *pg;
struct pgpath *pgpath;
unsigned long flags;
spin_lock_irqsave(&m->lock, flags);
/* Guess which priority_group will be used at next mapping time */
if (unlikely(!m->current_pgpath && m->next_pg))
pg = m->next_pg;
else if (likely(m->current_pg))
pg = m->current_pg;
else
/*
* We don't know which pg will be used at next mapping time.
* We don't call __choose_pgpath() here to avoid to trigger
* pg_init just by busy checking.
* So we don't know whether underlying devices we will be using
* at next mapping time are busy or not. Just try mapping.
*/
goto out;
/*
* If there is one non-busy active path at least, the path selector
* will be able to select it. So we consider such a pg as not busy.
*/
busy = 1;
list_for_each_entry(pgpath, &pg->pgpaths, list)
if (pgpath->is_active) {
has_active = 1;
if (!__pgpath_busy(pgpath)) {
busy = 0;
break;
}
}
if (!has_active)
/*
* No active path in this pg, so this pg won't be used and
* the current_pg will be changed at next mapping time.
* We need to try mapping to determine it.
*/
busy = 0;
out:
spin_unlock_irqrestore(&m->lock, flags);
return busy;
}
/*-----------------------------------------------------------------
* Module setup
*---------------------------------------------------------------*/
static struct target_type multipath_target = {
.name = "multipath",
.version = {1, 6, 0},
.module = THIS_MODULE,
.ctr = multipath_ctr,
.dtr = multipath_dtr,
.map_rq = multipath_map,
.rq_end_io = multipath_end_io,
.presuspend = multipath_presuspend,
.postsuspend = multipath_postsuspend,
.resume = multipath_resume,
.status = multipath_status,
.message = multipath_message,
.ioctl = multipath_ioctl,
.iterate_devices = multipath_iterate_devices,
.busy = multipath_busy,
};
static int __init dm_multipath_init(void)
{
int r;
/* allocate a slab for the dm_ios */
_mpio_cache = KMEM_CACHE(dm_mpath_io, 0);
if (!_mpio_cache)
return -ENOMEM;
r = dm_register_target(&multipath_target);
if (r < 0) {
DMERR("register failed %d", r);
kmem_cache_destroy(_mpio_cache);
return -EINVAL;
}
kmultipathd = alloc_workqueue("kmpathd", WQ_MEM_RECLAIM, 0);
if (!kmultipathd) {
DMERR("failed to create workqueue kmpathd");
dm_unregister_target(&multipath_target);
kmem_cache_destroy(_mpio_cache);
return -ENOMEM;
}
/*
* A separate workqueue is used to handle the device handlers
* to avoid overloading existing workqueue. Overloading the
* old workqueue would also create a bottleneck in the
* path of the storage hardware device activation.
*/
kmpath_handlerd = alloc_ordered_workqueue("kmpath_handlerd",
WQ_MEM_RECLAIM);
if (!kmpath_handlerd) {
DMERR("failed to create workqueue kmpath_handlerd");
destroy_workqueue(kmultipathd);
dm_unregister_target(&multipath_target);
kmem_cache_destroy(_mpio_cache);
return -ENOMEM;
}
DMINFO("version %u.%u.%u loaded",
multipath_target.version[0], multipath_target.version[1],
multipath_target.version[2]);
return r;
}
static void __exit dm_multipath_exit(void)
{
destroy_workqueue(kmpath_handlerd);
destroy_workqueue(kmultipathd);
dm_unregister_target(&multipath_target);
kmem_cache_destroy(_mpio_cache);
}
module_init(dm_multipath_init);
module_exit(dm_multipath_exit);
MODULE_DESCRIPTION(DM_NAME " multipath target");
MODULE_AUTHOR("Sistina Software <dm-devel@redhat.com>");
MODULE_LICENSE("GPL");
| gpl-2.0 |
Mysteryagr/Mystery-Kernel-3.18 | drivers/clk/mvebu/orion.c | 1628 | 4821 | /*
* Marvell Orion SoC clocks
*
* Copyright (C) 2014 Thomas Petazzoni
*
* Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
* warranty of any kind, whether express or implied.
*/
#include <linux/kernel.h>
#include <linux/clk-provider.h>
#include <linux/io.h>
#include <linux/of.h>
#include "common.h"
static const struct coreclk_ratio orion_coreclk_ratios[] __initconst = {
{ .id = 0, .name = "ddrclk", }
};
/*
* Orion 5182
*/
#define SAR_MV88F5182_TCLK_FREQ 8
#define SAR_MV88F5182_TCLK_FREQ_MASK 0x3
static u32 __init mv88f5182_get_tclk_freq(void __iomem *sar)
{
u32 opt = (readl(sar) >> SAR_MV88F5182_TCLK_FREQ) &
SAR_MV88F5182_TCLK_FREQ_MASK;
if (opt == 1)
return 150000000;
else if (opt == 2)
return 166666667;
else
return 0;
}
#define SAR_MV88F5182_CPU_FREQ 4
#define SAR_MV88F5182_CPU_FREQ_MASK 0xf
static u32 __init mv88f5182_get_cpu_freq(void __iomem *sar)
{
u32 opt = (readl(sar) >> SAR_MV88F5182_CPU_FREQ) &
SAR_MV88F5182_CPU_FREQ_MASK;
if (opt == 0)
return 333333333;
else if (opt == 1 || opt == 2)
return 400000000;
else if (opt == 3)
return 500000000;
else
return 0;
}
static void __init mv88f5182_get_clk_ratio(void __iomem *sar, int id,
int *mult, int *div)
{
u32 opt = (readl(sar) >> SAR_MV88F5182_CPU_FREQ) &
SAR_MV88F5182_CPU_FREQ_MASK;
if (opt == 0 || opt == 1) {
*mult = 1;
*div = 2;
} else if (opt == 2 || opt == 3) {
*mult = 1;
*div = 3;
} else {
*mult = 0;
*div = 1;
}
}
static const struct coreclk_soc_desc mv88f5182_coreclks = {
.get_tclk_freq = mv88f5182_get_tclk_freq,
.get_cpu_freq = mv88f5182_get_cpu_freq,
.get_clk_ratio = mv88f5182_get_clk_ratio,
.ratios = orion_coreclk_ratios,
.num_ratios = ARRAY_SIZE(orion_coreclk_ratios),
};
static void __init mv88f5182_clk_init(struct device_node *np)
{
return mvebu_coreclk_setup(np, &mv88f5182_coreclks);
}
CLK_OF_DECLARE(mv88f5182_clk, "marvell,mv88f5182-core-clock", mv88f5182_clk_init);
/*
* Orion 5281
*/
static u32 __init mv88f5281_get_tclk_freq(void __iomem *sar)
{
/* On 5281, tclk is always 166 Mhz */
return 166666667;
}
#define SAR_MV88F5281_CPU_FREQ 4
#define SAR_MV88F5281_CPU_FREQ_MASK 0xf
static u32 __init mv88f5281_get_cpu_freq(void __iomem *sar)
{
u32 opt = (readl(sar) >> SAR_MV88F5281_CPU_FREQ) &
SAR_MV88F5281_CPU_FREQ_MASK;
if (opt == 1 || opt == 2)
return 400000000;
else if (opt == 3)
return 500000000;
else
return 0;
}
static void __init mv88f5281_get_clk_ratio(void __iomem *sar, int id,
int *mult, int *div)
{
u32 opt = (readl(sar) >> SAR_MV88F5281_CPU_FREQ) &
SAR_MV88F5281_CPU_FREQ_MASK;
if (opt == 1) {
*mult = 1;
*div = 2;
} else if (opt == 2 || opt == 3) {
*mult = 1;
*div = 3;
} else {
*mult = 0;
*div = 1;
}
}
static const struct coreclk_soc_desc mv88f5281_coreclks = {
.get_tclk_freq = mv88f5281_get_tclk_freq,
.get_cpu_freq = mv88f5281_get_cpu_freq,
.get_clk_ratio = mv88f5281_get_clk_ratio,
.ratios = orion_coreclk_ratios,
.num_ratios = ARRAY_SIZE(orion_coreclk_ratios),
};
static void __init mv88f5281_clk_init(struct device_node *np)
{
return mvebu_coreclk_setup(np, &mv88f5281_coreclks);
}
CLK_OF_DECLARE(mv88f5281_clk, "marvell,mv88f5281-core-clock", mv88f5281_clk_init);
/*
* Orion 6183
*/
#define SAR_MV88F6183_TCLK_FREQ 9
#define SAR_MV88F6183_TCLK_FREQ_MASK 0x1
static u32 __init mv88f6183_get_tclk_freq(void __iomem *sar)
{
u32 opt = (readl(sar) >> SAR_MV88F6183_TCLK_FREQ) &
SAR_MV88F6183_TCLK_FREQ_MASK;
if (opt == 0)
return 133333333;
else if (opt == 1)
return 166666667;
else
return 0;
}
#define SAR_MV88F6183_CPU_FREQ 1
#define SAR_MV88F6183_CPU_FREQ_MASK 0x3f
static u32 __init mv88f6183_get_cpu_freq(void __iomem *sar)
{
u32 opt = (readl(sar) >> SAR_MV88F6183_CPU_FREQ) &
SAR_MV88F6183_CPU_FREQ_MASK;
if (opt == 9)
return 333333333;
else if (opt == 17)
return 400000000;
else
return 0;
}
static void __init mv88f6183_get_clk_ratio(void __iomem *sar, int id,
int *mult, int *div)
{
u32 opt = (readl(sar) >> SAR_MV88F6183_CPU_FREQ) &
SAR_MV88F6183_CPU_FREQ_MASK;
if (opt == 9 || opt == 17) {
*mult = 1;
*div = 2;
} else {
*mult = 0;
*div = 1;
}
}
static const struct coreclk_soc_desc mv88f6183_coreclks = {
.get_tclk_freq = mv88f6183_get_tclk_freq,
.get_cpu_freq = mv88f6183_get_cpu_freq,
.get_clk_ratio = mv88f6183_get_clk_ratio,
.ratios = orion_coreclk_ratios,
.num_ratios = ARRAY_SIZE(orion_coreclk_ratios),
};
static void __init mv88f6183_clk_init(struct device_node *np)
{
return mvebu_coreclk_setup(np, &mv88f6183_coreclks);
}
CLK_OF_DECLARE(mv88f6183_clk, "marvell,mv88f6183-core-clock", mv88f6183_clk_init);
| gpl-2.0 |
wujichang/linux | net/netfilter/ipvs/ip_vs_nq.c | 1628 | 3622 | /*
* IPVS: Never Queue scheduling module
*
* Authors: Wensong Zhang <wensong@linuxvirtualserver.org>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
* Changes:
*
*/
/*
* The NQ algorithm adopts a two-speed model. When there is an idle server
* available, the job will be sent to the idle server, instead of waiting
* for a fast one. When there is no idle server available, the job will be
* sent to the server that minimize its expected delay (The Shortest
* Expected Delay scheduling algorithm).
*
* See the following paper for more information:
* A. Weinrib and S. Shenker, Greed is not enough: Adaptive load sharing
* in large heterogeneous systems. In Proceedings IEEE INFOCOM'88,
* pages 986-994, 1988.
*
* Thanks must go to Marko Buuri <marko@buuri.name> for talking NQ to me.
*
* The difference between NQ and SED is that NQ can improve overall
* system utilization.
*
*/
#define KMSG_COMPONENT "IPVS"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/module.h>
#include <linux/kernel.h>
#include <net/ip_vs.h>
static inline int
ip_vs_nq_dest_overhead(struct ip_vs_dest *dest)
{
/*
* We only use the active connection number in the cost
* calculation here.
*/
return atomic_read(&dest->activeconns) + 1;
}
/*
* Weighted Least Connection scheduling
*/
static struct ip_vs_dest *
ip_vs_nq_schedule(struct ip_vs_service *svc, const struct sk_buff *skb,
struct ip_vs_iphdr *iph)
{
struct ip_vs_dest *dest, *least = NULL;
int loh = 0, doh;
IP_VS_DBG(6, "%s(): Scheduling...\n", __func__);
/*
* We calculate the load of each dest server as follows:
* (server expected overhead) / dest->weight
*
* Remember -- no floats in kernel mode!!!
* The comparison of h1*w2 > h2*w1 is equivalent to that of
* h1/w1 > h2/w2
* if every weight is larger than zero.
*
* The server with weight=0 is quiesced and will not receive any
* new connections.
*/
list_for_each_entry_rcu(dest, &svc->destinations, n_list) {
if (dest->flags & IP_VS_DEST_F_OVERLOAD ||
!atomic_read(&dest->weight))
continue;
doh = ip_vs_nq_dest_overhead(dest);
/* return the server directly if it is idle */
if (atomic_read(&dest->activeconns) == 0) {
least = dest;
loh = doh;
goto out;
}
if (!least ||
((__s64)loh * atomic_read(&dest->weight) >
(__s64)doh * atomic_read(&least->weight))) {
least = dest;
loh = doh;
}
}
if (!least) {
ip_vs_scheduler_err(svc, "no destination available");
return NULL;
}
out:
IP_VS_DBG_BUF(6, "NQ: server %s:%u "
"activeconns %d refcnt %d weight %d overhead %d\n",
IP_VS_DBG_ADDR(least->af, &least->addr),
ntohs(least->port),
atomic_read(&least->activeconns),
atomic_read(&least->refcnt),
atomic_read(&least->weight), loh);
return least;
}
static struct ip_vs_scheduler ip_vs_nq_scheduler =
{
.name = "nq",
.refcnt = ATOMIC_INIT(0),
.module = THIS_MODULE,
.n_list = LIST_HEAD_INIT(ip_vs_nq_scheduler.n_list),
.schedule = ip_vs_nq_schedule,
};
static int __init ip_vs_nq_init(void)
{
return register_ip_vs_scheduler(&ip_vs_nq_scheduler);
}
static void __exit ip_vs_nq_cleanup(void)
{
unregister_ip_vs_scheduler(&ip_vs_nq_scheduler);
synchronize_rcu();
}
module_init(ip_vs_nq_init);
module_exit(ip_vs_nq_cleanup);
MODULE_LICENSE("GPL");
| gpl-2.0 |
TheTypoMaster/android_kernel_samsung_exynos5433 | drivers/platform/x86/asus-wmi.c | 2140 | 50165 | /*
* Asus PC WMI hotkey driver
*
* Copyright(C) 2010 Intel Corporation.
* Copyright(C) 2010-2011 Corentin Chary <corentin.chary@gmail.com>
*
* Portions based on wistron_btns.c:
* Copyright (C) 2005 Miloslav Trmac <mitr@volny.cz>
* Copyright (C) 2005 Bernhard Rosenkraenzer <bero@arklinux.org>
* Copyright (C) 2005 Dmitry Torokhov <dtor@mail.ru>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/input.h>
#include <linux/input/sparse-keymap.h>
#include <linux/fb.h>
#include <linux/backlight.h>
#include <linux/leds.h>
#include <linux/rfkill.h>
#include <linux/pci.h>
#include <linux/pci_hotplug.h>
#include <linux/hwmon.h>
#include <linux/hwmon-sysfs.h>
#include <linux/debugfs.h>
#include <linux/seq_file.h>
#include <linux/platform_device.h>
#include <linux/thermal.h>
#include <acpi/acpi_bus.h>
#include <acpi/acpi_drivers.h>
#include <acpi/video.h>
#include "asus-wmi.h"
MODULE_AUTHOR("Corentin Chary <corentin.chary@gmail.com>, "
"Yong Wang <yong.y.wang@intel.com>");
MODULE_DESCRIPTION("Asus Generic WMI Driver");
MODULE_LICENSE("GPL");
#define to_platform_driver(drv) \
(container_of((drv), struct platform_driver, driver))
#define to_asus_wmi_driver(pdrv) \
(container_of((pdrv), struct asus_wmi_driver, platform_driver))
#define ASUS_WMI_MGMT_GUID "97845ED0-4E6D-11DE-8A39-0800200C9A66"
#define NOTIFY_BRNUP_MIN 0x11
#define NOTIFY_BRNUP_MAX 0x1f
#define NOTIFY_BRNDOWN_MIN 0x20
#define NOTIFY_BRNDOWN_MAX 0x2e
#define NOTIFY_KBD_BRTUP 0xc4
#define NOTIFY_KBD_BRTDWN 0xc5
/* WMI Methods */
#define ASUS_WMI_METHODID_SPEC 0x43455053 /* BIOS SPECification */
#define ASUS_WMI_METHODID_SFBD 0x44424653 /* Set First Boot Device */
#define ASUS_WMI_METHODID_GLCD 0x44434C47 /* Get LCD status */
#define ASUS_WMI_METHODID_GPID 0x44495047 /* Get Panel ID?? (Resol) */
#define ASUS_WMI_METHODID_QMOD 0x444F4D51 /* Quiet MODe */
#define ASUS_WMI_METHODID_SPLV 0x4C425053 /* Set Panel Light Value */
#define ASUS_WMI_METHODID_SFUN 0x4E554653 /* FUNCtionalities */
#define ASUS_WMI_METHODID_SDSP 0x50534453 /* Set DiSPlay output */
#define ASUS_WMI_METHODID_GDSP 0x50534447 /* Get DiSPlay output */
#define ASUS_WMI_METHODID_DEVP 0x50564544 /* DEVice Policy */
#define ASUS_WMI_METHODID_OSVR 0x5256534F /* OS VeRsion */
#define ASUS_WMI_METHODID_DSTS 0x53544344 /* Device STatuS */
#define ASUS_WMI_METHODID_DSTS2 0x53545344 /* Device STatuS #2*/
#define ASUS_WMI_METHODID_BSTS 0x53545342 /* Bios STatuS ? */
#define ASUS_WMI_METHODID_DEVS 0x53564544 /* DEVice Set */
#define ASUS_WMI_METHODID_CFVS 0x53564643 /* CPU Frequency Volt Set */
#define ASUS_WMI_METHODID_KBFT 0x5446424B /* KeyBoard FilTer */
#define ASUS_WMI_METHODID_INIT 0x54494E49 /* INITialize */
#define ASUS_WMI_METHODID_HKEY 0x59454B48 /* Hot KEY ?? */
#define ASUS_WMI_UNSUPPORTED_METHOD 0xFFFFFFFE
/* Wireless */
#define ASUS_WMI_DEVID_HW_SWITCH 0x00010001
#define ASUS_WMI_DEVID_WIRELESS_LED 0x00010002
#define ASUS_WMI_DEVID_CWAP 0x00010003
#define ASUS_WMI_DEVID_WLAN 0x00010011
#define ASUS_WMI_DEVID_WLAN_LED 0x00010012
#define ASUS_WMI_DEVID_BLUETOOTH 0x00010013
#define ASUS_WMI_DEVID_GPS 0x00010015
#define ASUS_WMI_DEVID_WIMAX 0x00010017
#define ASUS_WMI_DEVID_WWAN3G 0x00010019
#define ASUS_WMI_DEVID_UWB 0x00010021
/* Leds */
/* 0x000200XX and 0x000400XX */
#define ASUS_WMI_DEVID_LED1 0x00020011
#define ASUS_WMI_DEVID_LED2 0x00020012
#define ASUS_WMI_DEVID_LED3 0x00020013
#define ASUS_WMI_DEVID_LED4 0x00020014
#define ASUS_WMI_DEVID_LED5 0x00020015
#define ASUS_WMI_DEVID_LED6 0x00020016
/* Backlight and Brightness */
#define ASUS_WMI_DEVID_BACKLIGHT 0x00050011
#define ASUS_WMI_DEVID_BRIGHTNESS 0x00050012
#define ASUS_WMI_DEVID_KBD_BACKLIGHT 0x00050021
#define ASUS_WMI_DEVID_LIGHT_SENSOR 0x00050022 /* ?? */
/* Misc */
#define ASUS_WMI_DEVID_CAMERA 0x00060013
/* Storage */
#define ASUS_WMI_DEVID_CARDREADER 0x00080013
/* Input */
#define ASUS_WMI_DEVID_TOUCHPAD 0x00100011
#define ASUS_WMI_DEVID_TOUCHPAD_LED 0x00100012
/* Fan, Thermal */
#define ASUS_WMI_DEVID_THERMAL_CTRL 0x00110011
#define ASUS_WMI_DEVID_FAN_CTRL 0x00110012
/* Power */
#define ASUS_WMI_DEVID_PROCESSOR_STATE 0x00120012
/* Deep S3 / Resume on LID open */
#define ASUS_WMI_DEVID_LID_RESUME 0x00120031
/* DSTS masks */
#define ASUS_WMI_DSTS_STATUS_BIT 0x00000001
#define ASUS_WMI_DSTS_UNKNOWN_BIT 0x00000002
#define ASUS_WMI_DSTS_PRESENCE_BIT 0x00010000
#define ASUS_WMI_DSTS_USER_BIT 0x00020000
#define ASUS_WMI_DSTS_BIOS_BIT 0x00040000
#define ASUS_WMI_DSTS_BRIGHTNESS_MASK 0x000000FF
#define ASUS_WMI_DSTS_MAX_BRIGTH_MASK 0x0000FF00
struct bios_args {
u32 arg0;
u32 arg1;
} __packed;
/*
* <platform>/ - debugfs root directory
* dev_id - current dev_id
* ctrl_param - current ctrl_param
* method_id - current method_id
* devs - call DEVS(dev_id, ctrl_param) and print result
* dsts - call DSTS(dev_id) and print result
* call - call method_id(dev_id, ctrl_param) and print result
*/
struct asus_wmi_debug {
struct dentry *root;
u32 method_id;
u32 dev_id;
u32 ctrl_param;
};
struct asus_rfkill {
struct asus_wmi *asus;
struct rfkill *rfkill;
u32 dev_id;
};
struct asus_wmi {
int dsts_id;
int spec;
int sfun;
struct input_dev *inputdev;
struct backlight_device *backlight_device;
struct device *hwmon_device;
struct platform_device *platform_device;
struct led_classdev wlan_led;
int wlan_led_wk;
struct led_classdev tpd_led;
int tpd_led_wk;
struct led_classdev kbd_led;
int kbd_led_wk;
struct workqueue_struct *led_workqueue;
struct work_struct tpd_led_work;
struct work_struct kbd_led_work;
struct work_struct wlan_led_work;
struct asus_rfkill wlan;
struct asus_rfkill bluetooth;
struct asus_rfkill wimax;
struct asus_rfkill wwan3g;
struct asus_rfkill gps;
struct asus_rfkill uwb;
struct hotplug_slot *hotplug_slot;
struct mutex hotplug_lock;
struct mutex wmi_lock;
struct workqueue_struct *hotplug_workqueue;
struct work_struct hotplug_work;
struct asus_wmi_debug debug;
struct asus_wmi_driver *driver;
};
static int asus_wmi_input_init(struct asus_wmi *asus)
{
int err;
asus->inputdev = input_allocate_device();
if (!asus->inputdev)
return -ENOMEM;
asus->inputdev->name = asus->driver->input_name;
asus->inputdev->phys = asus->driver->input_phys;
asus->inputdev->id.bustype = BUS_HOST;
asus->inputdev->dev.parent = &asus->platform_device->dev;
set_bit(EV_REP, asus->inputdev->evbit);
err = sparse_keymap_setup(asus->inputdev, asus->driver->keymap, NULL);
if (err)
goto err_free_dev;
err = input_register_device(asus->inputdev);
if (err)
goto err_free_keymap;
return 0;
err_free_keymap:
sparse_keymap_free(asus->inputdev);
err_free_dev:
input_free_device(asus->inputdev);
return err;
}
static void asus_wmi_input_exit(struct asus_wmi *asus)
{
if (asus->inputdev) {
sparse_keymap_free(asus->inputdev);
input_unregister_device(asus->inputdev);
}
asus->inputdev = NULL;
}
static int asus_wmi_evaluate_method(u32 method_id, u32 arg0, u32 arg1,
u32 *retval)
{
struct bios_args args = {
.arg0 = arg0,
.arg1 = arg1,
};
struct acpi_buffer input = { (acpi_size) sizeof(args), &args };
struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
acpi_status status;
union acpi_object *obj;
u32 tmp;
status = wmi_evaluate_method(ASUS_WMI_MGMT_GUID, 1, method_id,
&input, &output);
if (ACPI_FAILURE(status))
goto exit;
obj = (union acpi_object *)output.pointer;
if (obj && obj->type == ACPI_TYPE_INTEGER)
tmp = (u32) obj->integer.value;
else
tmp = 0;
if (retval)
*retval = tmp;
kfree(obj);
exit:
if (ACPI_FAILURE(status))
return -EIO;
if (tmp == ASUS_WMI_UNSUPPORTED_METHOD)
return -ENODEV;
return 0;
}
static int asus_wmi_get_devstate(struct asus_wmi *asus, u32 dev_id, u32 *retval)
{
return asus_wmi_evaluate_method(asus->dsts_id, dev_id, 0, retval);
}
static int asus_wmi_set_devstate(u32 dev_id, u32 ctrl_param,
u32 *retval)
{
return asus_wmi_evaluate_method(ASUS_WMI_METHODID_DEVS, dev_id,
ctrl_param, retval);
}
/* Helper for special devices with magic return codes */
static int asus_wmi_get_devstate_bits(struct asus_wmi *asus,
u32 dev_id, u32 mask)
{
u32 retval = 0;
int err;
err = asus_wmi_get_devstate(asus, dev_id, &retval);
if (err < 0)
return err;
if (!(retval & ASUS_WMI_DSTS_PRESENCE_BIT))
return -ENODEV;
if (mask == ASUS_WMI_DSTS_STATUS_BIT) {
if (retval & ASUS_WMI_DSTS_UNKNOWN_BIT)
return -ENODEV;
}
return retval & mask;
}
static int asus_wmi_get_devstate_simple(struct asus_wmi *asus, u32 dev_id)
{
return asus_wmi_get_devstate_bits(asus, dev_id,
ASUS_WMI_DSTS_STATUS_BIT);
}
/*
* LEDs
*/
/*
* These functions actually update the LED's, and are called from a
* workqueue. By doing this as separate work rather than when the LED
* subsystem asks, we avoid messing with the Asus ACPI stuff during a
* potentially bad time, such as a timer interrupt.
*/
static void tpd_led_update(struct work_struct *work)
{
int ctrl_param;
struct asus_wmi *asus;
asus = container_of(work, struct asus_wmi, tpd_led_work);
ctrl_param = asus->tpd_led_wk;
asus_wmi_set_devstate(ASUS_WMI_DEVID_TOUCHPAD_LED, ctrl_param, NULL);
}
static void tpd_led_set(struct led_classdev *led_cdev,
enum led_brightness value)
{
struct asus_wmi *asus;
asus = container_of(led_cdev, struct asus_wmi, tpd_led);
asus->tpd_led_wk = !!value;
queue_work(asus->led_workqueue, &asus->tpd_led_work);
}
static int read_tpd_led_state(struct asus_wmi *asus)
{
return asus_wmi_get_devstate_simple(asus, ASUS_WMI_DEVID_TOUCHPAD_LED);
}
static enum led_brightness tpd_led_get(struct led_classdev *led_cdev)
{
struct asus_wmi *asus;
asus = container_of(led_cdev, struct asus_wmi, tpd_led);
return read_tpd_led_state(asus);
}
static void kbd_led_update(struct work_struct *work)
{
int ctrl_param = 0;
struct asus_wmi *asus;
asus = container_of(work, struct asus_wmi, kbd_led_work);
/*
* bits 0-2: level
* bit 7: light on/off
*/
if (asus->kbd_led_wk > 0)
ctrl_param = 0x80 | (asus->kbd_led_wk & 0x7F);
asus_wmi_set_devstate(ASUS_WMI_DEVID_KBD_BACKLIGHT, ctrl_param, NULL);
}
static int kbd_led_read(struct asus_wmi *asus, int *level, int *env)
{
int retval;
/*
* bits 0-2: level
* bit 7: light on/off
* bit 8-10: environment (0: dark, 1: normal, 2: light)
* bit 17: status unknown
*/
retval = asus_wmi_get_devstate_bits(asus, ASUS_WMI_DEVID_KBD_BACKLIGHT,
0xFFFF);
/* Unknown status is considered as off */
if (retval == 0x8000)
retval = 0;
if (retval >= 0) {
if (level)
*level = retval & 0x7F;
if (env)
*env = (retval >> 8) & 0x7F;
retval = 0;
}
return retval;
}
static void kbd_led_set(struct led_classdev *led_cdev,
enum led_brightness value)
{
struct asus_wmi *asus;
asus = container_of(led_cdev, struct asus_wmi, kbd_led);
if (value > asus->kbd_led.max_brightness)
value = asus->kbd_led.max_brightness;
else if (value < 0)
value = 0;
asus->kbd_led_wk = value;
queue_work(asus->led_workqueue, &asus->kbd_led_work);
}
static enum led_brightness kbd_led_get(struct led_classdev *led_cdev)
{
struct asus_wmi *asus;
int retval, value;
asus = container_of(led_cdev, struct asus_wmi, kbd_led);
retval = kbd_led_read(asus, &value, NULL);
if (retval < 0)
return retval;
return value;
}
static int wlan_led_unknown_state(struct asus_wmi *asus)
{
u32 result;
asus_wmi_get_devstate(asus, ASUS_WMI_DEVID_WIRELESS_LED, &result);
return result & ASUS_WMI_DSTS_UNKNOWN_BIT;
}
static int wlan_led_presence(struct asus_wmi *asus)
{
u32 result;
asus_wmi_get_devstate(asus, ASUS_WMI_DEVID_WIRELESS_LED, &result);
return result & ASUS_WMI_DSTS_PRESENCE_BIT;
}
static void wlan_led_update(struct work_struct *work)
{
int ctrl_param;
struct asus_wmi *asus;
asus = container_of(work, struct asus_wmi, wlan_led_work);
ctrl_param = asus->wlan_led_wk;
asus_wmi_set_devstate(ASUS_WMI_DEVID_WIRELESS_LED, ctrl_param, NULL);
}
static void wlan_led_set(struct led_classdev *led_cdev,
enum led_brightness value)
{
struct asus_wmi *asus;
asus = container_of(led_cdev, struct asus_wmi, wlan_led);
asus->wlan_led_wk = !!value;
queue_work(asus->led_workqueue, &asus->wlan_led_work);
}
static enum led_brightness wlan_led_get(struct led_classdev *led_cdev)
{
struct asus_wmi *asus;
u32 result;
asus = container_of(led_cdev, struct asus_wmi, wlan_led);
asus_wmi_get_devstate(asus, ASUS_WMI_DEVID_WIRELESS_LED, &result);
return result & ASUS_WMI_DSTS_BRIGHTNESS_MASK;
}
static void asus_wmi_led_exit(struct asus_wmi *asus)
{
if (!IS_ERR_OR_NULL(asus->kbd_led.dev))
led_classdev_unregister(&asus->kbd_led);
if (!IS_ERR_OR_NULL(asus->tpd_led.dev))
led_classdev_unregister(&asus->tpd_led);
if (!IS_ERR_OR_NULL(asus->wlan_led.dev))
led_classdev_unregister(&asus->wlan_led);
if (asus->led_workqueue)
destroy_workqueue(asus->led_workqueue);
}
static int asus_wmi_led_init(struct asus_wmi *asus)
{
int rv = 0;
asus->led_workqueue = create_singlethread_workqueue("led_workqueue");
if (!asus->led_workqueue)
return -ENOMEM;
if (read_tpd_led_state(asus) >= 0) {
INIT_WORK(&asus->tpd_led_work, tpd_led_update);
asus->tpd_led.name = "asus::touchpad";
asus->tpd_led.brightness_set = tpd_led_set;
asus->tpd_led.brightness_get = tpd_led_get;
asus->tpd_led.max_brightness = 1;
rv = led_classdev_register(&asus->platform_device->dev,
&asus->tpd_led);
if (rv)
goto error;
}
if (kbd_led_read(asus, NULL, NULL) >= 0) {
INIT_WORK(&asus->kbd_led_work, kbd_led_update);
asus->kbd_led.name = "asus::kbd_backlight";
asus->kbd_led.brightness_set = kbd_led_set;
asus->kbd_led.brightness_get = kbd_led_get;
asus->kbd_led.max_brightness = 3;
rv = led_classdev_register(&asus->platform_device->dev,
&asus->kbd_led);
if (rv)
goto error;
}
if (wlan_led_presence(asus)) {
INIT_WORK(&asus->wlan_led_work, wlan_led_update);
asus->wlan_led.name = "asus::wlan";
asus->wlan_led.brightness_set = wlan_led_set;
if (!wlan_led_unknown_state(asus))
asus->wlan_led.brightness_get = wlan_led_get;
asus->wlan_led.flags = LED_CORE_SUSPENDRESUME;
asus->wlan_led.max_brightness = 1;
asus->wlan_led.default_trigger = "asus-wlan";
rv = led_classdev_register(&asus->platform_device->dev,
&asus->wlan_led);
}
error:
if (rv)
asus_wmi_led_exit(asus);
return rv;
}
/*
* PCI hotplug (for wlan rfkill)
*/
static bool asus_wlan_rfkill_blocked(struct asus_wmi *asus)
{
int result = asus_wmi_get_devstate_simple(asus, ASUS_WMI_DEVID_WLAN);
if (result < 0)
return false;
return !result;
}
static void asus_rfkill_hotplug(struct asus_wmi *asus)
{
struct pci_dev *dev;
struct pci_bus *bus;
bool blocked;
bool absent;
u32 l;
mutex_lock(&asus->wmi_lock);
blocked = asus_wlan_rfkill_blocked(asus);
mutex_unlock(&asus->wmi_lock);
mutex_lock(&asus->hotplug_lock);
if (asus->wlan.rfkill)
rfkill_set_sw_state(asus->wlan.rfkill, blocked);
if (asus->hotplug_slot) {
bus = pci_find_bus(0, 1);
if (!bus) {
pr_warn("Unable to find PCI bus 1?\n");
goto out_unlock;
}
if (pci_bus_read_config_dword(bus, 0, PCI_VENDOR_ID, &l)) {
pr_err("Unable to read PCI config space?\n");
goto out_unlock;
}
absent = (l == 0xffffffff);
if (blocked != absent) {
pr_warn("BIOS says wireless lan is %s, "
"but the pci device is %s\n",
blocked ? "blocked" : "unblocked",
absent ? "absent" : "present");
pr_warn("skipped wireless hotplug as probably "
"inappropriate for this model\n");
goto out_unlock;
}
if (!blocked) {
dev = pci_get_slot(bus, 0);
if (dev) {
/* Device already present */
pci_dev_put(dev);
goto out_unlock;
}
dev = pci_scan_single_device(bus, 0);
if (dev) {
pci_bus_assign_resources(bus);
if (pci_bus_add_device(dev))
pr_err("Unable to hotplug wifi\n");
}
} else {
dev = pci_get_slot(bus, 0);
if (dev) {
pci_stop_and_remove_bus_device(dev);
pci_dev_put(dev);
}
}
}
out_unlock:
mutex_unlock(&asus->hotplug_lock);
}
static void asus_rfkill_notify(acpi_handle handle, u32 event, void *data)
{
struct asus_wmi *asus = data;
if (event != ACPI_NOTIFY_BUS_CHECK)
return;
/*
* We can't call directly asus_rfkill_hotplug because most
* of the time WMBC is still being executed and not reetrant.
* There is currently no way to tell ACPICA that we want this
* method to be serialized, we schedule a asus_rfkill_hotplug
* call later, in a safer context.
*/
queue_work(asus->hotplug_workqueue, &asus->hotplug_work);
}
static int asus_register_rfkill_notifier(struct asus_wmi *asus, char *node)
{
acpi_status status;
acpi_handle handle;
status = acpi_get_handle(NULL, node, &handle);
if (ACPI_SUCCESS(status)) {
status = acpi_install_notify_handler(handle,
ACPI_SYSTEM_NOTIFY,
asus_rfkill_notify, asus);
if (ACPI_FAILURE(status))
pr_warn("Failed to register notify on %s\n", node);
} else
return -ENODEV;
return 0;
}
static void asus_unregister_rfkill_notifier(struct asus_wmi *asus, char *node)
{
acpi_status status = AE_OK;
acpi_handle handle;
status = acpi_get_handle(NULL, node, &handle);
if (ACPI_SUCCESS(status)) {
status = acpi_remove_notify_handler(handle,
ACPI_SYSTEM_NOTIFY,
asus_rfkill_notify);
if (ACPI_FAILURE(status))
pr_err("Error removing rfkill notify handler %s\n",
node);
}
}
static int asus_get_adapter_status(struct hotplug_slot *hotplug_slot,
u8 *value)
{
struct asus_wmi *asus = hotplug_slot->private;
int result = asus_wmi_get_devstate_simple(asus, ASUS_WMI_DEVID_WLAN);
if (result < 0)
return result;
*value = !!result;
return 0;
}
static void asus_cleanup_pci_hotplug(struct hotplug_slot *hotplug_slot)
{
kfree(hotplug_slot->info);
kfree(hotplug_slot);
}
static struct hotplug_slot_ops asus_hotplug_slot_ops = {
.owner = THIS_MODULE,
.get_adapter_status = asus_get_adapter_status,
.get_power_status = asus_get_adapter_status,
};
static void asus_hotplug_work(struct work_struct *work)
{
struct asus_wmi *asus;
asus = container_of(work, struct asus_wmi, hotplug_work);
asus_rfkill_hotplug(asus);
}
static int asus_setup_pci_hotplug(struct asus_wmi *asus)
{
int ret = -ENOMEM;
struct pci_bus *bus = pci_find_bus(0, 1);
if (!bus) {
pr_err("Unable to find wifi PCI bus\n");
return -ENODEV;
}
asus->hotplug_workqueue =
create_singlethread_workqueue("hotplug_workqueue");
if (!asus->hotplug_workqueue)
goto error_workqueue;
INIT_WORK(&asus->hotplug_work, asus_hotplug_work);
asus->hotplug_slot = kzalloc(sizeof(struct hotplug_slot), GFP_KERNEL);
if (!asus->hotplug_slot)
goto error_slot;
asus->hotplug_slot->info = kzalloc(sizeof(struct hotplug_slot_info),
GFP_KERNEL);
if (!asus->hotplug_slot->info)
goto error_info;
asus->hotplug_slot->private = asus;
asus->hotplug_slot->release = &asus_cleanup_pci_hotplug;
asus->hotplug_slot->ops = &asus_hotplug_slot_ops;
asus_get_adapter_status(asus->hotplug_slot,
&asus->hotplug_slot->info->adapter_status);
ret = pci_hp_register(asus->hotplug_slot, bus, 0, "asus-wifi");
if (ret) {
pr_err("Unable to register hotplug slot - %d\n", ret);
goto error_register;
}
return 0;
error_register:
kfree(asus->hotplug_slot->info);
error_info:
kfree(asus->hotplug_slot);
asus->hotplug_slot = NULL;
error_slot:
destroy_workqueue(asus->hotplug_workqueue);
error_workqueue:
return ret;
}
/*
* Rfkill devices
*/
static int asus_rfkill_set(void *data, bool blocked)
{
struct asus_rfkill *priv = data;
u32 ctrl_param = !blocked;
u32 dev_id = priv->dev_id;
/*
* If the user bit is set, BIOS can't set and record the wlan status,
* it will report the value read from id ASUS_WMI_DEVID_WLAN_LED
* while we query the wlan status through WMI(ASUS_WMI_DEVID_WLAN).
* So, we have to record wlan status in id ASUS_WMI_DEVID_WLAN_LED
* while setting the wlan status through WMI.
* This is also the behavior that windows app will do.
*/
if ((dev_id == ASUS_WMI_DEVID_WLAN) &&
priv->asus->driver->wlan_ctrl_by_user)
dev_id = ASUS_WMI_DEVID_WLAN_LED;
return asus_wmi_set_devstate(dev_id, ctrl_param, NULL);
}
static void asus_rfkill_query(struct rfkill *rfkill, void *data)
{
struct asus_rfkill *priv = data;
int result;
result = asus_wmi_get_devstate_simple(priv->asus, priv->dev_id);
if (result < 0)
return;
rfkill_set_sw_state(priv->rfkill, !result);
}
static int asus_rfkill_wlan_set(void *data, bool blocked)
{
struct asus_rfkill *priv = data;
struct asus_wmi *asus = priv->asus;
int ret;
/*
* This handler is enabled only if hotplug is enabled.
* In this case, the asus_wmi_set_devstate() will
* trigger a wmi notification and we need to wait
* this call to finish before being able to call
* any wmi method
*/
mutex_lock(&asus->wmi_lock);
ret = asus_rfkill_set(data, blocked);
mutex_unlock(&asus->wmi_lock);
return ret;
}
static const struct rfkill_ops asus_rfkill_wlan_ops = {
.set_block = asus_rfkill_wlan_set,
.query = asus_rfkill_query,
};
static const struct rfkill_ops asus_rfkill_ops = {
.set_block = asus_rfkill_set,
.query = asus_rfkill_query,
};
static int asus_new_rfkill(struct asus_wmi *asus,
struct asus_rfkill *arfkill,
const char *name, enum rfkill_type type, int dev_id)
{
int result = asus_wmi_get_devstate_simple(asus, dev_id);
struct rfkill **rfkill = &arfkill->rfkill;
if (result < 0)
return result;
arfkill->dev_id = dev_id;
arfkill->asus = asus;
if (dev_id == ASUS_WMI_DEVID_WLAN &&
asus->driver->quirks->hotplug_wireless)
*rfkill = rfkill_alloc(name, &asus->platform_device->dev, type,
&asus_rfkill_wlan_ops, arfkill);
else
*rfkill = rfkill_alloc(name, &asus->platform_device->dev, type,
&asus_rfkill_ops, arfkill);
if (!*rfkill)
return -EINVAL;
if (dev_id == ASUS_WMI_DEVID_WLAN)
rfkill_set_led_trigger_name(*rfkill, "asus-wlan");
rfkill_init_sw_state(*rfkill, !result);
result = rfkill_register(*rfkill);
if (result) {
rfkill_destroy(*rfkill);
*rfkill = NULL;
return result;
}
return 0;
}
static void asus_wmi_rfkill_exit(struct asus_wmi *asus)
{
asus_unregister_rfkill_notifier(asus, "\\_SB.PCI0.P0P5");
asus_unregister_rfkill_notifier(asus, "\\_SB.PCI0.P0P6");
asus_unregister_rfkill_notifier(asus, "\\_SB.PCI0.P0P7");
if (asus->wlan.rfkill) {
rfkill_unregister(asus->wlan.rfkill);
rfkill_destroy(asus->wlan.rfkill);
asus->wlan.rfkill = NULL;
}
/*
* Refresh pci hotplug in case the rfkill state was changed after
* asus_unregister_rfkill_notifier()
*/
asus_rfkill_hotplug(asus);
if (asus->hotplug_slot)
pci_hp_deregister(asus->hotplug_slot);
if (asus->hotplug_workqueue)
destroy_workqueue(asus->hotplug_workqueue);
if (asus->bluetooth.rfkill) {
rfkill_unregister(asus->bluetooth.rfkill);
rfkill_destroy(asus->bluetooth.rfkill);
asus->bluetooth.rfkill = NULL;
}
if (asus->wimax.rfkill) {
rfkill_unregister(asus->wimax.rfkill);
rfkill_destroy(asus->wimax.rfkill);
asus->wimax.rfkill = NULL;
}
if (asus->wwan3g.rfkill) {
rfkill_unregister(asus->wwan3g.rfkill);
rfkill_destroy(asus->wwan3g.rfkill);
asus->wwan3g.rfkill = NULL;
}
if (asus->gps.rfkill) {
rfkill_unregister(asus->gps.rfkill);
rfkill_destroy(asus->gps.rfkill);
asus->gps.rfkill = NULL;
}
if (asus->uwb.rfkill) {
rfkill_unregister(asus->uwb.rfkill);
rfkill_destroy(asus->uwb.rfkill);
asus->uwb.rfkill = NULL;
}
}
static int asus_wmi_rfkill_init(struct asus_wmi *asus)
{
int result = 0;
mutex_init(&asus->hotplug_lock);
mutex_init(&asus->wmi_lock);
result = asus_new_rfkill(asus, &asus->wlan, "asus-wlan",
RFKILL_TYPE_WLAN, ASUS_WMI_DEVID_WLAN);
if (result && result != -ENODEV)
goto exit;
result = asus_new_rfkill(asus, &asus->bluetooth,
"asus-bluetooth", RFKILL_TYPE_BLUETOOTH,
ASUS_WMI_DEVID_BLUETOOTH);
if (result && result != -ENODEV)
goto exit;
result = asus_new_rfkill(asus, &asus->wimax, "asus-wimax",
RFKILL_TYPE_WIMAX, ASUS_WMI_DEVID_WIMAX);
if (result && result != -ENODEV)
goto exit;
result = asus_new_rfkill(asus, &asus->wwan3g, "asus-wwan3g",
RFKILL_TYPE_WWAN, ASUS_WMI_DEVID_WWAN3G);
if (result && result != -ENODEV)
goto exit;
result = asus_new_rfkill(asus, &asus->gps, "asus-gps",
RFKILL_TYPE_GPS, ASUS_WMI_DEVID_GPS);
if (result && result != -ENODEV)
goto exit;
result = asus_new_rfkill(asus, &asus->uwb, "asus-uwb",
RFKILL_TYPE_UWB, ASUS_WMI_DEVID_UWB);
if (result && result != -ENODEV)
goto exit;
if (!asus->driver->quirks->hotplug_wireless)
goto exit;
result = asus_setup_pci_hotplug(asus);
/*
* If we get -EBUSY then something else is handling the PCI hotplug -
* don't fail in this case
*/
if (result == -EBUSY)
result = 0;
asus_register_rfkill_notifier(asus, "\\_SB.PCI0.P0P5");
asus_register_rfkill_notifier(asus, "\\_SB.PCI0.P0P6");
asus_register_rfkill_notifier(asus, "\\_SB.PCI0.P0P7");
/*
* Refresh pci hotplug in case the rfkill state was changed during
* setup.
*/
asus_rfkill_hotplug(asus);
exit:
if (result && result != -ENODEV)
asus_wmi_rfkill_exit(asus);
if (result == -ENODEV)
result = 0;
return result;
}
/*
* Hwmon device
*/
static ssize_t asus_hwmon_pwm1(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct asus_wmi *asus = dev_get_drvdata(dev);
u32 value;
int err;
err = asus_wmi_get_devstate(asus, ASUS_WMI_DEVID_FAN_CTRL, &value);
if (err < 0)
return err;
value &= 0xFF;
if (value == 1) /* Low Speed */
value = 85;
else if (value == 2)
value = 170;
else if (value == 3)
value = 255;
else if (value != 0) {
pr_err("Unknown fan speed %#x", value);
value = -1;
}
return sprintf(buf, "%d\n", value);
}
static ssize_t asus_hwmon_temp1(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct asus_wmi *asus = dev_get_drvdata(dev);
u32 value;
int err;
err = asus_wmi_get_devstate(asus, ASUS_WMI_DEVID_THERMAL_CTRL, &value);
if (err < 0)
return err;
value = KELVIN_TO_CELSIUS((value & 0xFFFF)) * 1000;
return sprintf(buf, "%d\n", value);
}
static SENSOR_DEVICE_ATTR(pwm1, S_IRUGO, asus_hwmon_pwm1, NULL, 0);
static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, asus_hwmon_temp1, NULL, 0);
static ssize_t
show_name(struct device *dev, struct device_attribute *attr, char *buf)
{
return sprintf(buf, "asus\n");
}
static SENSOR_DEVICE_ATTR(name, S_IRUGO, show_name, NULL, 0);
static struct attribute *hwmon_attributes[] = {
&sensor_dev_attr_pwm1.dev_attr.attr,
&sensor_dev_attr_temp1_input.dev_attr.attr,
&sensor_dev_attr_name.dev_attr.attr,
NULL
};
static umode_t asus_hwmon_sysfs_is_visible(struct kobject *kobj,
struct attribute *attr, int idx)
{
struct device *dev = container_of(kobj, struct device, kobj);
struct platform_device *pdev = to_platform_device(dev->parent);
struct asus_wmi *asus = platform_get_drvdata(pdev);
bool ok = true;
int dev_id = -1;
u32 value = ASUS_WMI_UNSUPPORTED_METHOD;
if (attr == &sensor_dev_attr_pwm1.dev_attr.attr)
dev_id = ASUS_WMI_DEVID_FAN_CTRL;
else if (attr == &sensor_dev_attr_temp1_input.dev_attr.attr)
dev_id = ASUS_WMI_DEVID_THERMAL_CTRL;
if (dev_id != -1) {
int err = asus_wmi_get_devstate(asus, dev_id, &value);
if (err < 0)
return 0; /* can't return negative here */
}
if (dev_id == ASUS_WMI_DEVID_FAN_CTRL) {
/*
* We need to find a better way, probably using sfun,
* bits or spec ...
* Currently we disable it if:
* - ASUS_WMI_UNSUPPORTED_METHOD is returned
* - reverved bits are non-zero
* - sfun and presence bit are not set
*/
if (value == ASUS_WMI_UNSUPPORTED_METHOD || value & 0xFFF80000
|| (!asus->sfun && !(value & ASUS_WMI_DSTS_PRESENCE_BIT)))
ok = false;
} else if (dev_id == ASUS_WMI_DEVID_THERMAL_CTRL) {
/* If value is zero, something is clearly wrong */
if (value == 0)
ok = false;
}
return ok ? attr->mode : 0;
}
static struct attribute_group hwmon_attribute_group = {
.is_visible = asus_hwmon_sysfs_is_visible,
.attrs = hwmon_attributes
};
static void asus_wmi_hwmon_exit(struct asus_wmi *asus)
{
struct device *hwmon;
hwmon = asus->hwmon_device;
if (!hwmon)
return;
sysfs_remove_group(&hwmon->kobj, &hwmon_attribute_group);
hwmon_device_unregister(hwmon);
asus->hwmon_device = NULL;
}
static int asus_wmi_hwmon_init(struct asus_wmi *asus)
{
struct device *hwmon;
int result;
hwmon = hwmon_device_register(&asus->platform_device->dev);
if (IS_ERR(hwmon)) {
pr_err("Could not register asus hwmon device\n");
return PTR_ERR(hwmon);
}
dev_set_drvdata(hwmon, asus);
asus->hwmon_device = hwmon;
result = sysfs_create_group(&hwmon->kobj, &hwmon_attribute_group);
if (result)
asus_wmi_hwmon_exit(asus);
return result;
}
/*
* Backlight
*/
static int read_backlight_power(struct asus_wmi *asus)
{
int ret;
if (asus->driver->quirks->store_backlight_power)
ret = !asus->driver->panel_power;
else
ret = asus_wmi_get_devstate_simple(asus,
ASUS_WMI_DEVID_BACKLIGHT);
if (ret < 0)
return ret;
return ret ? FB_BLANK_UNBLANK : FB_BLANK_POWERDOWN;
}
static int read_brightness_max(struct asus_wmi *asus)
{
u32 retval;
int err;
err = asus_wmi_get_devstate(asus, ASUS_WMI_DEVID_BRIGHTNESS, &retval);
if (err < 0)
return err;
retval = retval & ASUS_WMI_DSTS_MAX_BRIGTH_MASK;
retval >>= 8;
if (!retval)
return -ENODEV;
return retval;
}
static int read_brightness(struct backlight_device *bd)
{
struct asus_wmi *asus = bl_get_data(bd);
u32 retval;
int err;
err = asus_wmi_get_devstate(asus, ASUS_WMI_DEVID_BRIGHTNESS, &retval);
if (err < 0)
return err;
return retval & ASUS_WMI_DSTS_BRIGHTNESS_MASK;
}
static u32 get_scalar_command(struct backlight_device *bd)
{
struct asus_wmi *asus = bl_get_data(bd);
u32 ctrl_param = 0;
if ((asus->driver->brightness < bd->props.brightness) ||
bd->props.brightness == bd->props.max_brightness)
ctrl_param = 0x00008001;
else if ((asus->driver->brightness > bd->props.brightness) ||
bd->props.brightness == 0)
ctrl_param = 0x00008000;
asus->driver->brightness = bd->props.brightness;
return ctrl_param;
}
static int update_bl_status(struct backlight_device *bd)
{
struct asus_wmi *asus = bl_get_data(bd);
u32 ctrl_param;
int power, err = 0;
power = read_backlight_power(asus);
if (power != -ENODEV && bd->props.power != power) {
ctrl_param = !!(bd->props.power == FB_BLANK_UNBLANK);
err = asus_wmi_set_devstate(ASUS_WMI_DEVID_BACKLIGHT,
ctrl_param, NULL);
if (asus->driver->quirks->store_backlight_power)
asus->driver->panel_power = bd->props.power;
/* When using scalar brightness, updating the brightness
* will mess with the backlight power */
if (asus->driver->quirks->scalar_panel_brightness)
return err;
}
if (asus->driver->quirks->scalar_panel_brightness)
ctrl_param = get_scalar_command(bd);
else
ctrl_param = bd->props.brightness;
err = asus_wmi_set_devstate(ASUS_WMI_DEVID_BRIGHTNESS,
ctrl_param, NULL);
return err;
}
static const struct backlight_ops asus_wmi_bl_ops = {
.get_brightness = read_brightness,
.update_status = update_bl_status,
};
static int asus_wmi_backlight_notify(struct asus_wmi *asus, int code)
{
struct backlight_device *bd = asus->backlight_device;
int old = bd->props.brightness;
int new = old;
if (code >= NOTIFY_BRNUP_MIN && code <= NOTIFY_BRNUP_MAX)
new = code - NOTIFY_BRNUP_MIN + 1;
else if (code >= NOTIFY_BRNDOWN_MIN && code <= NOTIFY_BRNDOWN_MAX)
new = code - NOTIFY_BRNDOWN_MIN;
bd->props.brightness = new;
backlight_update_status(bd);
backlight_force_update(bd, BACKLIGHT_UPDATE_HOTKEY);
return old;
}
static int asus_wmi_backlight_init(struct asus_wmi *asus)
{
struct backlight_device *bd;
struct backlight_properties props;
int max;
int power;
max = read_brightness_max(asus);
if (max == -ENODEV)
max = 0;
else if (max < 0)
return max;
power = read_backlight_power(asus);
if (power == -ENODEV)
power = FB_BLANK_UNBLANK;
else if (power < 0)
return power;
memset(&props, 0, sizeof(struct backlight_properties));
props.type = BACKLIGHT_PLATFORM;
props.max_brightness = max;
bd = backlight_device_register(asus->driver->name,
&asus->platform_device->dev, asus,
&asus_wmi_bl_ops, &props);
if (IS_ERR(bd)) {
pr_err("Could not register backlight device\n");
return PTR_ERR(bd);
}
asus->backlight_device = bd;
if (asus->driver->quirks->store_backlight_power)
asus->driver->panel_power = power;
bd->props.brightness = read_brightness(bd);
bd->props.power = power;
backlight_update_status(bd);
asus->driver->brightness = bd->props.brightness;
return 0;
}
static void asus_wmi_backlight_exit(struct asus_wmi *asus)
{
if (asus->backlight_device)
backlight_device_unregister(asus->backlight_device);
asus->backlight_device = NULL;
}
static int is_display_toggle(int code)
{
/* display toggle keys */
if ((code >= 0x61 && code <= 0x67) ||
(code >= 0x8c && code <= 0x93) ||
(code >= 0xa0 && code <= 0xa7) ||
(code >= 0xd0 && code <= 0xd5))
return 1;
return 0;
}
static void asus_wmi_notify(u32 value, void *context)
{
struct asus_wmi *asus = context;
struct acpi_buffer response = { ACPI_ALLOCATE_BUFFER, NULL };
union acpi_object *obj;
acpi_status status;
int code;
int orig_code;
unsigned int key_value = 1;
bool autorelease = 1;
status = wmi_get_event_data(value, &response);
if (status != AE_OK) {
pr_err("bad event status 0x%x\n", status);
return;
}
obj = (union acpi_object *)response.pointer;
if (!obj || obj->type != ACPI_TYPE_INTEGER)
goto exit;
code = obj->integer.value;
orig_code = code;
if (asus->driver->key_filter) {
asus->driver->key_filter(asus->driver, &code, &key_value,
&autorelease);
if (code == ASUS_WMI_KEY_IGNORE)
goto exit;
}
if (code >= NOTIFY_BRNUP_MIN && code <= NOTIFY_BRNUP_MAX)
code = ASUS_WMI_BRN_UP;
else if (code >= NOTIFY_BRNDOWN_MIN &&
code <= NOTIFY_BRNDOWN_MAX)
code = ASUS_WMI_BRN_DOWN;
if (code == ASUS_WMI_BRN_DOWN || code == ASUS_WMI_BRN_UP) {
if (!acpi_video_backlight_support()) {
asus_wmi_backlight_notify(asus, orig_code);
goto exit;
}
}
if (is_display_toggle(code) &&
asus->driver->quirks->no_display_toggle)
goto exit;
if (!sparse_keymap_report_event(asus->inputdev, code,
key_value, autorelease))
pr_info("Unknown key %x pressed\n", code);
exit:
kfree(obj);
}
/*
* Sys helpers
*/
static int parse_arg(const char *buf, unsigned long count, int *val)
{
if (!count)
return 0;
if (sscanf(buf, "%i", val) != 1)
return -EINVAL;
return count;
}
static ssize_t store_sys_wmi(struct asus_wmi *asus, int devid,
const char *buf, size_t count)
{
u32 retval;
int rv, err, value;
value = asus_wmi_get_devstate_simple(asus, devid);
if (value == -ENODEV) /* Check device presence */
return value;
rv = parse_arg(buf, count, &value);
err = asus_wmi_set_devstate(devid, value, &retval);
if (err < 0)
return err;
return rv;
}
static ssize_t show_sys_wmi(struct asus_wmi *asus, int devid, char *buf)
{
int value = asus_wmi_get_devstate_simple(asus, devid);
if (value < 0)
return value;
return sprintf(buf, "%d\n", value);
}
#define ASUS_WMI_CREATE_DEVICE_ATTR(_name, _mode, _cm) \
static ssize_t show_##_name(struct device *dev, \
struct device_attribute *attr, \
char *buf) \
{ \
struct asus_wmi *asus = dev_get_drvdata(dev); \
\
return show_sys_wmi(asus, _cm, buf); \
} \
static ssize_t store_##_name(struct device *dev, \
struct device_attribute *attr, \
const char *buf, size_t count) \
{ \
struct asus_wmi *asus = dev_get_drvdata(dev); \
\
return store_sys_wmi(asus, _cm, buf, count); \
} \
static struct device_attribute dev_attr_##_name = { \
.attr = { \
.name = __stringify(_name), \
.mode = _mode }, \
.show = show_##_name, \
.store = store_##_name, \
}
ASUS_WMI_CREATE_DEVICE_ATTR(touchpad, 0644, ASUS_WMI_DEVID_TOUCHPAD);
ASUS_WMI_CREATE_DEVICE_ATTR(camera, 0644, ASUS_WMI_DEVID_CAMERA);
ASUS_WMI_CREATE_DEVICE_ATTR(cardr, 0644, ASUS_WMI_DEVID_CARDREADER);
ASUS_WMI_CREATE_DEVICE_ATTR(lid_resume, 0644, ASUS_WMI_DEVID_LID_RESUME);
static ssize_t store_cpufv(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
int value, rv;
if (!count || sscanf(buf, "%i", &value) != 1)
return -EINVAL;
if (value < 0 || value > 2)
return -EINVAL;
rv = asus_wmi_evaluate_method(ASUS_WMI_METHODID_CFVS, value, 0, NULL);
if (rv < 0)
return rv;
return count;
}
static DEVICE_ATTR(cpufv, S_IRUGO | S_IWUSR, NULL, store_cpufv);
static struct attribute *platform_attributes[] = {
&dev_attr_cpufv.attr,
&dev_attr_camera.attr,
&dev_attr_cardr.attr,
&dev_attr_touchpad.attr,
&dev_attr_lid_resume.attr,
NULL
};
static umode_t asus_sysfs_is_visible(struct kobject *kobj,
struct attribute *attr, int idx)
{
struct device *dev = container_of(kobj, struct device, kobj);
struct platform_device *pdev = to_platform_device(dev);
struct asus_wmi *asus = platform_get_drvdata(pdev);
bool ok = true;
int devid = -1;
if (attr == &dev_attr_camera.attr)
devid = ASUS_WMI_DEVID_CAMERA;
else if (attr == &dev_attr_cardr.attr)
devid = ASUS_WMI_DEVID_CARDREADER;
else if (attr == &dev_attr_touchpad.attr)
devid = ASUS_WMI_DEVID_TOUCHPAD;
else if (attr == &dev_attr_lid_resume.attr)
devid = ASUS_WMI_DEVID_LID_RESUME;
if (devid != -1)
ok = !(asus_wmi_get_devstate_simple(asus, devid) < 0);
return ok ? attr->mode : 0;
}
static struct attribute_group platform_attribute_group = {
.is_visible = asus_sysfs_is_visible,
.attrs = platform_attributes
};
static void asus_wmi_sysfs_exit(struct platform_device *device)
{
sysfs_remove_group(&device->dev.kobj, &platform_attribute_group);
}
static int asus_wmi_sysfs_init(struct platform_device *device)
{
return sysfs_create_group(&device->dev.kobj, &platform_attribute_group);
}
/*
* Platform device
*/
static int asus_wmi_platform_init(struct asus_wmi *asus)
{
int rv;
/* INIT enable hotkeys on some models */
if (!asus_wmi_evaluate_method(ASUS_WMI_METHODID_INIT, 0, 0, &rv))
pr_info("Initialization: %#x", rv);
/* We don't know yet what to do with this version... */
if (!asus_wmi_evaluate_method(ASUS_WMI_METHODID_SPEC, 0, 0x9, &rv)) {
pr_info("BIOS WMI version: %d.%d", rv >> 16, rv & 0xFF);
asus->spec = rv;
}
/*
* The SFUN method probably allows the original driver to get the list
* of features supported by a given model. For now, 0x0100 or 0x0800
* bit signifies that the laptop is equipped with a Wi-Fi MiniPCI card.
* The significance of others is yet to be found.
*/
if (!asus_wmi_evaluate_method(ASUS_WMI_METHODID_SFUN, 0, 0, &rv)) {
pr_info("SFUN value: %#x", rv);
asus->sfun = rv;
}
/*
* Eee PC and Notebooks seems to have different method_id for DSTS,
* but it may also be related to the BIOS's SPEC.
* Note, on most Eeepc, there is no way to check if a method exist
* or note, while on notebooks, they returns 0xFFFFFFFE on failure,
* but once again, SPEC may probably be used for that kind of things.
*/
if (!asus_wmi_evaluate_method(ASUS_WMI_METHODID_DSTS, 0, 0, NULL))
asus->dsts_id = ASUS_WMI_METHODID_DSTS;
else
asus->dsts_id = ASUS_WMI_METHODID_DSTS2;
/* CWAP allow to define the behavior of the Fn+F2 key,
* this method doesn't seems to be present on Eee PCs */
if (asus->driver->quirks->wapf >= 0)
asus_wmi_set_devstate(ASUS_WMI_DEVID_CWAP,
asus->driver->quirks->wapf, NULL);
return asus_wmi_sysfs_init(asus->platform_device);
}
static void asus_wmi_platform_exit(struct asus_wmi *asus)
{
asus_wmi_sysfs_exit(asus->platform_device);
}
/*
* debugfs
*/
struct asus_wmi_debugfs_node {
struct asus_wmi *asus;
char *name;
int (*show) (struct seq_file *m, void *data);
};
static int show_dsts(struct seq_file *m, void *data)
{
struct asus_wmi *asus = m->private;
int err;
u32 retval = -1;
err = asus_wmi_get_devstate(asus, asus->debug.dev_id, &retval);
if (err < 0)
return err;
seq_printf(m, "DSTS(%#x) = %#x\n", asus->debug.dev_id, retval);
return 0;
}
static int show_devs(struct seq_file *m, void *data)
{
struct asus_wmi *asus = m->private;
int err;
u32 retval = -1;
err = asus_wmi_set_devstate(asus->debug.dev_id, asus->debug.ctrl_param,
&retval);
if (err < 0)
return err;
seq_printf(m, "DEVS(%#x, %#x) = %#x\n", asus->debug.dev_id,
asus->debug.ctrl_param, retval);
return 0;
}
static int show_call(struct seq_file *m, void *data)
{
struct asus_wmi *asus = m->private;
struct bios_args args = {
.arg0 = asus->debug.dev_id,
.arg1 = asus->debug.ctrl_param,
};
struct acpi_buffer input = { (acpi_size) sizeof(args), &args };
struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
union acpi_object *obj;
acpi_status status;
status = wmi_evaluate_method(ASUS_WMI_MGMT_GUID,
1, asus->debug.method_id,
&input, &output);
if (ACPI_FAILURE(status))
return -EIO;
obj = (union acpi_object *)output.pointer;
if (obj && obj->type == ACPI_TYPE_INTEGER)
seq_printf(m, "%#x(%#x, %#x) = %#x\n", asus->debug.method_id,
asus->debug.dev_id, asus->debug.ctrl_param,
(u32) obj->integer.value);
else
seq_printf(m, "%#x(%#x, %#x) = t:%d\n", asus->debug.method_id,
asus->debug.dev_id, asus->debug.ctrl_param,
obj ? obj->type : -1);
kfree(obj);
return 0;
}
static struct asus_wmi_debugfs_node asus_wmi_debug_files[] = {
{NULL, "devs", show_devs},
{NULL, "dsts", show_dsts},
{NULL, "call", show_call},
};
static int asus_wmi_debugfs_open(struct inode *inode, struct file *file)
{
struct asus_wmi_debugfs_node *node = inode->i_private;
return single_open(file, node->show, node->asus);
}
static const struct file_operations asus_wmi_debugfs_io_ops = {
.owner = THIS_MODULE,
.open = asus_wmi_debugfs_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static void asus_wmi_debugfs_exit(struct asus_wmi *asus)
{
debugfs_remove_recursive(asus->debug.root);
}
static int asus_wmi_debugfs_init(struct asus_wmi *asus)
{
struct dentry *dent;
int i;
asus->debug.root = debugfs_create_dir(asus->driver->name, NULL);
if (!asus->debug.root) {
pr_err("failed to create debugfs directory");
goto error_debugfs;
}
dent = debugfs_create_x32("method_id", S_IRUGO | S_IWUSR,
asus->debug.root, &asus->debug.method_id);
if (!dent)
goto error_debugfs;
dent = debugfs_create_x32("dev_id", S_IRUGO | S_IWUSR,
asus->debug.root, &asus->debug.dev_id);
if (!dent)
goto error_debugfs;
dent = debugfs_create_x32("ctrl_param", S_IRUGO | S_IWUSR,
asus->debug.root, &asus->debug.ctrl_param);
if (!dent)
goto error_debugfs;
for (i = 0; i < ARRAY_SIZE(asus_wmi_debug_files); i++) {
struct asus_wmi_debugfs_node *node = &asus_wmi_debug_files[i];
node->asus = asus;
dent = debugfs_create_file(node->name, S_IFREG | S_IRUGO,
asus->debug.root, node,
&asus_wmi_debugfs_io_ops);
if (!dent) {
pr_err("failed to create debug file: %s\n", node->name);
goto error_debugfs;
}
}
return 0;
error_debugfs:
asus_wmi_debugfs_exit(asus);
return -ENOMEM;
}
/*
* WMI Driver
*/
static int asus_wmi_add(struct platform_device *pdev)
{
struct platform_driver *pdrv = to_platform_driver(pdev->dev.driver);
struct asus_wmi_driver *wdrv = to_asus_wmi_driver(pdrv);
struct asus_wmi *asus;
acpi_status status;
int err;
u32 result;
asus = kzalloc(sizeof(struct asus_wmi), GFP_KERNEL);
if (!asus)
return -ENOMEM;
asus->driver = wdrv;
asus->platform_device = pdev;
wdrv->platform_device = pdev;
platform_set_drvdata(asus->platform_device, asus);
if (wdrv->detect_quirks)
wdrv->detect_quirks(asus->driver);
err = asus_wmi_platform_init(asus);
if (err)
goto fail_platform;
err = asus_wmi_input_init(asus);
if (err)
goto fail_input;
err = asus_wmi_hwmon_init(asus);
if (err)
goto fail_hwmon;
err = asus_wmi_led_init(asus);
if (err)
goto fail_leds;
err = asus_wmi_rfkill_init(asus);
if (err)
goto fail_rfkill;
if (asus->driver->quirks->wmi_backlight_power)
acpi_video_dmi_promote_vendor();
if (!acpi_video_backlight_support()) {
pr_info("Disabling ACPI video driver\n");
acpi_video_unregister();
err = asus_wmi_backlight_init(asus);
if (err && err != -ENODEV)
goto fail_backlight;
} else
pr_info("Backlight controlled by ACPI video driver\n");
status = wmi_install_notify_handler(asus->driver->event_guid,
asus_wmi_notify, asus);
if (ACPI_FAILURE(status)) {
pr_err("Unable to register notify handler - %d\n", status);
err = -ENODEV;
goto fail_wmi_handler;
}
err = asus_wmi_debugfs_init(asus);
if (err)
goto fail_debugfs;
asus_wmi_get_devstate(asus, ASUS_WMI_DEVID_WLAN, &result);
if (result & (ASUS_WMI_DSTS_PRESENCE_BIT | ASUS_WMI_DSTS_USER_BIT))
asus->driver->wlan_ctrl_by_user = 1;
return 0;
fail_debugfs:
wmi_remove_notify_handler(asus->driver->event_guid);
fail_wmi_handler:
asus_wmi_backlight_exit(asus);
fail_backlight:
asus_wmi_rfkill_exit(asus);
fail_rfkill:
asus_wmi_led_exit(asus);
fail_leds:
asus_wmi_hwmon_exit(asus);
fail_hwmon:
asus_wmi_input_exit(asus);
fail_input:
asus_wmi_platform_exit(asus);
fail_platform:
kfree(asus);
return err;
}
static int asus_wmi_remove(struct platform_device *device)
{
struct asus_wmi *asus;
asus = platform_get_drvdata(device);
wmi_remove_notify_handler(asus->driver->event_guid);
asus_wmi_backlight_exit(asus);
asus_wmi_input_exit(asus);
asus_wmi_hwmon_exit(asus);
asus_wmi_led_exit(asus);
asus_wmi_rfkill_exit(asus);
asus_wmi_debugfs_exit(asus);
asus_wmi_platform_exit(asus);
kfree(asus);
return 0;
}
/*
* Platform driver - hibernate/resume callbacks
*/
static int asus_hotk_thaw(struct device *device)
{
struct asus_wmi *asus = dev_get_drvdata(device);
if (asus->wlan.rfkill) {
bool wlan;
/*
* Work around bios bug - acpi _PTS turns off the wireless led
* during suspend. Normally it restores it on resume, but
* we should kick it ourselves in case hibernation is aborted.
*/
wlan = asus_wmi_get_devstate_simple(asus, ASUS_WMI_DEVID_WLAN);
asus_wmi_set_devstate(ASUS_WMI_DEVID_WLAN, wlan, NULL);
}
return 0;
}
static int asus_hotk_restore(struct device *device)
{
struct asus_wmi *asus = dev_get_drvdata(device);
int bl;
/* Refresh both wlan rfkill state and pci hotplug */
if (asus->wlan.rfkill)
asus_rfkill_hotplug(asus);
if (asus->bluetooth.rfkill) {
bl = !asus_wmi_get_devstate_simple(asus,
ASUS_WMI_DEVID_BLUETOOTH);
rfkill_set_sw_state(asus->bluetooth.rfkill, bl);
}
if (asus->wimax.rfkill) {
bl = !asus_wmi_get_devstate_simple(asus, ASUS_WMI_DEVID_WIMAX);
rfkill_set_sw_state(asus->wimax.rfkill, bl);
}
if (asus->wwan3g.rfkill) {
bl = !asus_wmi_get_devstate_simple(asus, ASUS_WMI_DEVID_WWAN3G);
rfkill_set_sw_state(asus->wwan3g.rfkill, bl);
}
if (asus->gps.rfkill) {
bl = !asus_wmi_get_devstate_simple(asus, ASUS_WMI_DEVID_GPS);
rfkill_set_sw_state(asus->gps.rfkill, bl);
}
if (asus->uwb.rfkill) {
bl = !asus_wmi_get_devstate_simple(asus, ASUS_WMI_DEVID_UWB);
rfkill_set_sw_state(asus->uwb.rfkill, bl);
}
return 0;
}
static const struct dev_pm_ops asus_pm_ops = {
.thaw = asus_hotk_thaw,
.restore = asus_hotk_restore,
};
static int asus_wmi_probe(struct platform_device *pdev)
{
struct platform_driver *pdrv = to_platform_driver(pdev->dev.driver);
struct asus_wmi_driver *wdrv = to_asus_wmi_driver(pdrv);
int ret;
if (!wmi_has_guid(ASUS_WMI_MGMT_GUID)) {
pr_warn("Management GUID not found\n");
return -ENODEV;
}
if (wdrv->event_guid && !wmi_has_guid(wdrv->event_guid)) {
pr_warn("Event GUID not found\n");
return -ENODEV;
}
if (wdrv->probe) {
ret = wdrv->probe(pdev);
if (ret)
return ret;
}
return asus_wmi_add(pdev);
}
static bool used;
int __init_or_module asus_wmi_register_driver(struct asus_wmi_driver *driver)
{
struct platform_driver *platform_driver;
struct platform_device *platform_device;
if (used)
return -EBUSY;
platform_driver = &driver->platform_driver;
platform_driver->remove = asus_wmi_remove;
platform_driver->driver.owner = driver->owner;
platform_driver->driver.name = driver->name;
platform_driver->driver.pm = &asus_pm_ops;
platform_device = platform_create_bundle(platform_driver,
asus_wmi_probe,
NULL, 0, NULL, 0);
if (IS_ERR(platform_device))
return PTR_ERR(platform_device);
used = true;
return 0;
}
EXPORT_SYMBOL_GPL(asus_wmi_register_driver);
void asus_wmi_unregister_driver(struct asus_wmi_driver *driver)
{
platform_device_unregister(driver->platform_device);
platform_driver_unregister(&driver->platform_driver);
used = false;
}
EXPORT_SYMBOL_GPL(asus_wmi_unregister_driver);
static int __init asus_wmi_init(void)
{
if (!wmi_has_guid(ASUS_WMI_MGMT_GUID)) {
pr_info("Asus Management GUID not found");
return -ENODEV;
}
pr_info("ASUS WMI generic driver loaded");
return 0;
}
static void __exit asus_wmi_exit(void)
{
pr_info("ASUS WMI generic driver unloaded");
}
module_init(asus_wmi_init);
module_exit(asus_wmi_exit);
| gpl-2.0 |
mdeejay/primou-ics | drivers/input/touchscreen/wacom_w8001.c | 2652 | 14363 | /*
* Wacom W8001 penabled serial touchscreen driver
*
* Copyright (c) 2008 Jaya Kumar
* Copyright (c) 2010 Red Hat, Inc.
* Copyright (c) 2010 - 2011 Ping Cheng, Wacom. <pingc@wacom.com>
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file COPYING in the main directory of this archive for
* more details.
*
* Layout based on Elo serial touchscreen driver by Vojtech Pavlik
*/
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/input/mt.h>
#include <linux/serio.h>
#include <linux/init.h>
#include <linux/ctype.h>
#include <linux/delay.h>
#define DRIVER_DESC "Wacom W8001 serial touchscreen driver"
MODULE_AUTHOR("Jaya Kumar <jayakumar.lkml@gmail.com>");
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
#define W8001_MAX_LENGTH 11
#define W8001_LEAD_MASK 0x80
#define W8001_LEAD_BYTE 0x80
#define W8001_TAB_MASK 0x40
#define W8001_TAB_BYTE 0x40
/* set in first byte of touch data packets */
#define W8001_TOUCH_MASK (0x10 | W8001_LEAD_MASK)
#define W8001_TOUCH_BYTE (0x10 | W8001_LEAD_BYTE)
#define W8001_QUERY_PACKET 0x20
#define W8001_CMD_STOP '0'
#define W8001_CMD_START '1'
#define W8001_CMD_QUERY '*'
#define W8001_CMD_TOUCHQUERY '%'
/* length of data packets in bytes, depends on device. */
#define W8001_PKTLEN_TOUCH93 5
#define W8001_PKTLEN_TOUCH9A 7
#define W8001_PKTLEN_TPCPEN 9
#define W8001_PKTLEN_TPCCTL 11 /* control packet */
#define W8001_PKTLEN_TOUCH2FG 13
/* resolution in points/mm */
#define W8001_PEN_RESOLUTION 100
#define W8001_TOUCH_RESOLUTION 10
struct w8001_coord {
u8 rdy;
u8 tsw;
u8 f1;
u8 f2;
u16 x;
u16 y;
u16 pen_pressure;
u8 tilt_x;
u8 tilt_y;
};
/* touch query reply packet */
struct w8001_touch_query {
u16 x;
u16 y;
u8 panel_res;
u8 capacity_res;
u8 sensor_id;
};
/*
* Per-touchscreen data.
*/
struct w8001 {
struct input_dev *dev;
struct serio *serio;
struct completion cmd_done;
int id;
int idx;
unsigned char response_type;
unsigned char response[W8001_MAX_LENGTH];
unsigned char data[W8001_MAX_LENGTH];
char phys[32];
int type;
unsigned int pktlen;
u16 max_touch_x;
u16 max_touch_y;
u16 max_pen_x;
u16 max_pen_y;
char name[64];
};
static void parse_pen_data(u8 *data, struct w8001_coord *coord)
{
memset(coord, 0, sizeof(*coord));
coord->rdy = data[0] & 0x20;
coord->tsw = data[0] & 0x01;
coord->f1 = data[0] & 0x02;
coord->f2 = data[0] & 0x04;
coord->x = (data[1] & 0x7F) << 9;
coord->x |= (data[2] & 0x7F) << 2;
coord->x |= (data[6] & 0x60) >> 5;
coord->y = (data[3] & 0x7F) << 9;
coord->y |= (data[4] & 0x7F) << 2;
coord->y |= (data[6] & 0x18) >> 3;
coord->pen_pressure = data[5] & 0x7F;
coord->pen_pressure |= (data[6] & 0x07) << 7 ;
coord->tilt_x = data[7] & 0x7F;
coord->tilt_y = data[8] & 0x7F;
}
static void parse_single_touch(u8 *data, struct w8001_coord *coord)
{
coord->x = (data[1] << 7) | data[2];
coord->y = (data[3] << 7) | data[4];
coord->tsw = data[0] & 0x01;
}
static void scale_touch_coordinates(struct w8001 *w8001,
unsigned int *x, unsigned int *y)
{
if (w8001->max_pen_x && w8001->max_touch_x)
*x = *x * w8001->max_pen_x / w8001->max_touch_x;
if (w8001->max_pen_y && w8001->max_touch_y)
*y = *y * w8001->max_pen_y / w8001->max_touch_y;
}
static void parse_multi_touch(struct w8001 *w8001)
{
struct input_dev *dev = w8001->dev;
unsigned char *data = w8001->data;
unsigned int x, y;
int i;
int count = 0;
for (i = 0; i < 2; i++) {
bool touch = data[0] & (1 << i);
input_mt_slot(dev, i);
input_mt_report_slot_state(dev, MT_TOOL_FINGER, touch);
if (touch) {
x = (data[6 * i + 1] << 7) | data[6 * i + 2];
y = (data[6 * i + 3] << 7) | data[6 * i + 4];
/* data[5,6] and [11,12] is finger capacity */
/* scale to pen maximum */
scale_touch_coordinates(w8001, &x, &y);
input_report_abs(dev, ABS_MT_POSITION_X, x);
input_report_abs(dev, ABS_MT_POSITION_Y, y);
count++;
}
}
/* emulate single touch events when stylus is out of proximity.
* This is to make single touch backward support consistent
* across all Wacom single touch devices.
*/
if (w8001->type != BTN_TOOL_PEN &&
w8001->type != BTN_TOOL_RUBBER) {
w8001->type = count == 1 ? BTN_TOOL_FINGER : KEY_RESERVED;
input_mt_report_pointer_emulation(dev, true);
}
input_sync(dev);
}
static void parse_touchquery(u8 *data, struct w8001_touch_query *query)
{
memset(query, 0, sizeof(*query));
query->panel_res = data[1];
query->sensor_id = data[2] & 0x7;
query->capacity_res = data[7];
query->x = data[3] << 9;
query->x |= data[4] << 2;
query->x |= (data[2] >> 5) & 0x3;
query->y = data[5] << 9;
query->y |= data[6] << 2;
query->y |= (data[2] >> 3) & 0x3;
/* Early days' single-finger touch models need the following defaults */
if (!query->x && !query->y) {
query->x = 1024;
query->y = 1024;
if (query->panel_res)
query->x = query->y = (1 << query->panel_res);
query->panel_res = W8001_TOUCH_RESOLUTION;
}
}
static void report_pen_events(struct w8001 *w8001, struct w8001_coord *coord)
{
struct input_dev *dev = w8001->dev;
/*
* We have 1 bit for proximity (rdy) and 3 bits for tip, side,
* side2/eraser. If rdy && f2 are set, this can be either pen + side2,
* or eraser. Assume:
* - if dev is already in proximity and f2 is toggled → pen + side2
* - if dev comes into proximity with f2 set → eraser
* If f2 disappears after assuming eraser, fake proximity out for
* eraser and in for pen.
*/
switch (w8001->type) {
case BTN_TOOL_RUBBER:
if (!coord->f2) {
input_report_abs(dev, ABS_PRESSURE, 0);
input_report_key(dev, BTN_TOUCH, 0);
input_report_key(dev, BTN_STYLUS, 0);
input_report_key(dev, BTN_STYLUS2, 0);
input_report_key(dev, BTN_TOOL_RUBBER, 0);
input_sync(dev);
w8001->type = BTN_TOOL_PEN;
}
break;
case BTN_TOOL_FINGER:
input_report_key(dev, BTN_TOUCH, 0);
input_report_key(dev, BTN_TOOL_FINGER, 0);
input_sync(dev);
/* fall through */
case KEY_RESERVED:
w8001->type = coord->f2 ? BTN_TOOL_RUBBER : BTN_TOOL_PEN;
break;
default:
input_report_key(dev, BTN_STYLUS2, coord->f2);
break;
}
input_report_abs(dev, ABS_X, coord->x);
input_report_abs(dev, ABS_Y, coord->y);
input_report_abs(dev, ABS_PRESSURE, coord->pen_pressure);
input_report_key(dev, BTN_TOUCH, coord->tsw);
input_report_key(dev, BTN_STYLUS, coord->f1);
input_report_key(dev, w8001->type, coord->rdy);
input_sync(dev);
if (!coord->rdy)
w8001->type = KEY_RESERVED;
}
static void report_single_touch(struct w8001 *w8001, struct w8001_coord *coord)
{
struct input_dev *dev = w8001->dev;
unsigned int x = coord->x;
unsigned int y = coord->y;
/* scale to pen maximum */
scale_touch_coordinates(w8001, &x, &y);
input_report_abs(dev, ABS_X, x);
input_report_abs(dev, ABS_Y, y);
input_report_key(dev, BTN_TOUCH, coord->tsw);
input_report_key(dev, BTN_TOOL_FINGER, coord->tsw);
input_sync(dev);
w8001->type = coord->tsw ? BTN_TOOL_FINGER : KEY_RESERVED;
}
static irqreturn_t w8001_interrupt(struct serio *serio,
unsigned char data, unsigned int flags)
{
struct w8001 *w8001 = serio_get_drvdata(serio);
struct w8001_coord coord;
unsigned char tmp;
w8001->data[w8001->idx] = data;
switch (w8001->idx++) {
case 0:
if ((data & W8001_LEAD_MASK) != W8001_LEAD_BYTE) {
pr_debug("w8001: unsynchronized data: 0x%02x\n", data);
w8001->idx = 0;
}
break;
case W8001_PKTLEN_TOUCH93 - 1:
case W8001_PKTLEN_TOUCH9A - 1:
tmp = w8001->data[0] & W8001_TOUCH_BYTE;
if (tmp != W8001_TOUCH_BYTE)
break;
if (w8001->pktlen == w8001->idx) {
w8001->idx = 0;
if (w8001->type != BTN_TOOL_PEN &&
w8001->type != BTN_TOOL_RUBBER) {
parse_single_touch(w8001->data, &coord);
report_single_touch(w8001, &coord);
}
}
break;
/* Pen coordinates packet */
case W8001_PKTLEN_TPCPEN - 1:
tmp = w8001->data[0] & W8001_TAB_MASK;
if (unlikely(tmp == W8001_TAB_BYTE))
break;
tmp = w8001->data[0] & W8001_TOUCH_BYTE;
if (tmp == W8001_TOUCH_BYTE)
break;
w8001->idx = 0;
parse_pen_data(w8001->data, &coord);
report_pen_events(w8001, &coord);
break;
/* control packet */
case W8001_PKTLEN_TPCCTL - 1:
tmp = w8001->data[0] & W8001_TOUCH_MASK;
if (tmp == W8001_TOUCH_BYTE)
break;
w8001->idx = 0;
memcpy(w8001->response, w8001->data, W8001_MAX_LENGTH);
w8001->response_type = W8001_QUERY_PACKET;
complete(&w8001->cmd_done);
break;
/* 2 finger touch packet */
case W8001_PKTLEN_TOUCH2FG - 1:
w8001->idx = 0;
parse_multi_touch(w8001);
break;
}
return IRQ_HANDLED;
}
static int w8001_command(struct w8001 *w8001, unsigned char command,
bool wait_response)
{
int rc;
w8001->response_type = 0;
init_completion(&w8001->cmd_done);
rc = serio_write(w8001->serio, command);
if (rc == 0 && wait_response) {
wait_for_completion_timeout(&w8001->cmd_done, HZ);
if (w8001->response_type != W8001_QUERY_PACKET)
rc = -EIO;
}
return rc;
}
static int w8001_setup(struct w8001 *w8001)
{
struct input_dev *dev = w8001->dev;
struct w8001_coord coord;
struct w8001_touch_query touch;
int error;
error = w8001_command(w8001, W8001_CMD_STOP, false);
if (error)
return error;
msleep(250); /* wait 250ms before querying the device */
dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
strlcat(w8001->name, "Wacom Serial", sizeof(w8001->name));
/* penabled? */
error = w8001_command(w8001, W8001_CMD_QUERY, true);
if (!error) {
__set_bit(BTN_TOUCH, dev->keybit);
__set_bit(BTN_TOOL_PEN, dev->keybit);
__set_bit(BTN_TOOL_RUBBER, dev->keybit);
__set_bit(BTN_STYLUS, dev->keybit);
__set_bit(BTN_STYLUS2, dev->keybit);
parse_pen_data(w8001->response, &coord);
w8001->max_pen_x = coord.x;
w8001->max_pen_y = coord.y;
input_set_abs_params(dev, ABS_X, 0, coord.x, 0, 0);
input_set_abs_params(dev, ABS_Y, 0, coord.y, 0, 0);
input_abs_set_res(dev, ABS_X, W8001_PEN_RESOLUTION);
input_abs_set_res(dev, ABS_Y, W8001_PEN_RESOLUTION);
input_set_abs_params(dev, ABS_PRESSURE, 0, coord.pen_pressure, 0, 0);
if (coord.tilt_x && coord.tilt_y) {
input_set_abs_params(dev, ABS_TILT_X, 0, coord.tilt_x, 0, 0);
input_set_abs_params(dev, ABS_TILT_Y, 0, coord.tilt_y, 0, 0);
}
w8001->id = 0x90;
strlcat(w8001->name, " Penabled", sizeof(w8001->name));
}
/* Touch enabled? */
error = w8001_command(w8001, W8001_CMD_TOUCHQUERY, true);
/*
* Some non-touch devices may reply to the touch query. But their
* second byte is empty, which indicates touch is not supported.
*/
if (!error && w8001->response[1]) {
__set_bit(BTN_TOUCH, dev->keybit);
__set_bit(BTN_TOOL_FINGER, dev->keybit);
parse_touchquery(w8001->response, &touch);
w8001->max_touch_x = touch.x;
w8001->max_touch_y = touch.y;
if (w8001->max_pen_x && w8001->max_pen_y) {
/* if pen is supported scale to pen maximum */
touch.x = w8001->max_pen_x;
touch.y = w8001->max_pen_y;
touch.panel_res = W8001_PEN_RESOLUTION;
}
input_set_abs_params(dev, ABS_X, 0, touch.x, 0, 0);
input_set_abs_params(dev, ABS_Y, 0, touch.y, 0, 0);
input_abs_set_res(dev, ABS_X, touch.panel_res);
input_abs_set_res(dev, ABS_Y, touch.panel_res);
switch (touch.sensor_id) {
case 0:
case 2:
w8001->pktlen = W8001_PKTLEN_TOUCH93;
w8001->id = 0x93;
strlcat(w8001->name, " 1FG", sizeof(w8001->name));
break;
case 1:
case 3:
case 4:
w8001->pktlen = W8001_PKTLEN_TOUCH9A;
strlcat(w8001->name, " 1FG", sizeof(w8001->name));
w8001->id = 0x9a;
break;
case 5:
w8001->pktlen = W8001_PKTLEN_TOUCH2FG;
input_mt_init_slots(dev, 2);
input_set_abs_params(dev, ABS_MT_POSITION_X,
0, touch.x, 0, 0);
input_set_abs_params(dev, ABS_MT_POSITION_Y,
0, touch.y, 0, 0);
input_set_abs_params(dev, ABS_MT_TOOL_TYPE,
0, MT_TOOL_MAX, 0, 0);
strlcat(w8001->name, " 2FG", sizeof(w8001->name));
if (w8001->max_pen_x && w8001->max_pen_y)
w8001->id = 0xE3;
else
w8001->id = 0xE2;
break;
}
}
strlcat(w8001->name, " Touchscreen", sizeof(w8001->name));
return w8001_command(w8001, W8001_CMD_START, false);
}
/*
* w8001_disconnect() is the opposite of w8001_connect()
*/
static void w8001_disconnect(struct serio *serio)
{
struct w8001 *w8001 = serio_get_drvdata(serio);
input_get_device(w8001->dev);
input_unregister_device(w8001->dev);
serio_close(serio);
serio_set_drvdata(serio, NULL);
input_put_device(w8001->dev);
kfree(w8001);
}
/*
* w8001_connect() is the routine that is called when someone adds a
* new serio device that supports the w8001 protocol and registers it as
* an input device.
*/
static int w8001_connect(struct serio *serio, struct serio_driver *drv)
{
struct w8001 *w8001;
struct input_dev *input_dev;
int err;
w8001 = kzalloc(sizeof(struct w8001), GFP_KERNEL);
input_dev = input_allocate_device();
if (!w8001 || !input_dev) {
err = -ENOMEM;
goto fail1;
}
w8001->serio = serio;
w8001->dev = input_dev;
init_completion(&w8001->cmd_done);
snprintf(w8001->phys, sizeof(w8001->phys), "%s/input0", serio->phys);
serio_set_drvdata(serio, w8001);
err = serio_open(serio, drv);
if (err)
goto fail2;
err = w8001_setup(w8001);
if (err)
goto fail3;
input_dev->name = w8001->name;
input_dev->phys = w8001->phys;
input_dev->id.product = w8001->id;
input_dev->id.bustype = BUS_RS232;
input_dev->id.vendor = 0x056a;
input_dev->id.version = 0x0100;
input_dev->dev.parent = &serio->dev;
err = input_register_device(w8001->dev);
if (err)
goto fail3;
return 0;
fail3:
serio_close(serio);
fail2:
serio_set_drvdata(serio, NULL);
fail1:
input_free_device(input_dev);
kfree(w8001);
return err;
}
static struct serio_device_id w8001_serio_ids[] = {
{
.type = SERIO_RS232,
.proto = SERIO_W8001,
.id = SERIO_ANY,
.extra = SERIO_ANY,
},
{ 0 }
};
MODULE_DEVICE_TABLE(serio, w8001_serio_ids);
static struct serio_driver w8001_drv = {
.driver = {
.name = "w8001",
},
.description = DRIVER_DESC,
.id_table = w8001_serio_ids,
.interrupt = w8001_interrupt,
.connect = w8001_connect,
.disconnect = w8001_disconnect,
};
static int __init w8001_init(void)
{
return serio_register_driver(&w8001_drv);
}
static void __exit w8001_exit(void)
{
serio_unregister_driver(&w8001_drv);
}
module_init(w8001_init);
module_exit(w8001_exit);
| gpl-2.0 |
TeamJB/kernel_samsung_smdk4412 | drivers/media/radio/radio-aimslab.c | 2652 | 10995 | /* radiotrack (radioreveal) driver for Linux radio support
* (c) 1997 M. Kirkwood
* Converted to V4L2 API by Mauro Carvalho Chehab <mchehab@infradead.org>
* Converted to new API by Alan Cox <alan@lxorguk.ukuu.org.uk>
* Various bugfixes and enhancements by Russell Kroll <rkroll@exploits.org>
*
* History:
* 1999-02-24 Russell Kroll <rkroll@exploits.org>
* Fine tuning/VIDEO_TUNER_LOW
* Frequency range expanded to start at 87 MHz
*
* TODO: Allow for more than one of these foolish entities :-)
*
* Notes on the hardware (reverse engineered from other peoples'
* reverse engineering of AIMS' code :-)
*
* Frequency control is done digitally -- ie out(port,encodefreq(95.8));
*
* The signal strength query is unsurprisingly inaccurate. And it seems
* to indicate that (on my card, at least) the frequency setting isn't
* too great. (I have to tune up .025MHz from what the freq should be
* to get a report that the thing is tuned.)
*
* Volume control is (ugh) analogue:
* out(port, start_increasing_volume);
* wait(a_wee_while);
* out(port, stop_changing_the_volume);
*
*/
#include <linux/module.h> /* Modules */
#include <linux/init.h> /* Initdata */
#include <linux/ioport.h> /* request_region */
#include <linux/delay.h> /* msleep */
#include <linux/videodev2.h> /* kernel radio structs */
#include <linux/version.h> /* for KERNEL_VERSION MACRO */
#include <linux/io.h> /* outb, outb_p */
#include <media/v4l2-device.h>
#include <media/v4l2-ioctl.h>
MODULE_AUTHOR("M.Kirkwood");
MODULE_DESCRIPTION("A driver for the RadioTrack/RadioReveal radio card.");
MODULE_LICENSE("GPL");
#ifndef CONFIG_RADIO_RTRACK_PORT
#define CONFIG_RADIO_RTRACK_PORT -1
#endif
static int io = CONFIG_RADIO_RTRACK_PORT;
static int radio_nr = -1;
module_param(io, int, 0);
MODULE_PARM_DESC(io, "I/O address of the RadioTrack card (0x20f or 0x30f)");
module_param(radio_nr, int, 0);
#define RADIO_VERSION KERNEL_VERSION(0, 0, 2)
struct rtrack
{
struct v4l2_device v4l2_dev;
struct video_device vdev;
int port;
int curvol;
unsigned long curfreq;
int muted;
int io;
struct mutex lock;
};
static struct rtrack rtrack_card;
/* local things */
static void rt_decvol(struct rtrack *rt)
{
outb(0x58, rt->io); /* volume down + sigstr + on */
msleep(100);
outb(0xd8, rt->io); /* volume steady + sigstr + on */
}
static void rt_incvol(struct rtrack *rt)
{
outb(0x98, rt->io); /* volume up + sigstr + on */
msleep(100);
outb(0xd8, rt->io); /* volume steady + sigstr + on */
}
static void rt_mute(struct rtrack *rt)
{
rt->muted = 1;
mutex_lock(&rt->lock);
outb(0xd0, rt->io); /* volume steady, off */
mutex_unlock(&rt->lock);
}
static int rt_setvol(struct rtrack *rt, int vol)
{
int i;
mutex_lock(&rt->lock);
if (vol == rt->curvol) { /* requested volume = current */
if (rt->muted) { /* user is unmuting the card */
rt->muted = 0;
outb(0xd8, rt->io); /* enable card */
}
mutex_unlock(&rt->lock);
return 0;
}
if (vol == 0) { /* volume = 0 means mute the card */
outb(0x48, rt->io); /* volume down but still "on" */
msleep(2000); /* make sure it's totally down */
outb(0xd0, rt->io); /* volume steady, off */
rt->curvol = 0; /* track the volume state! */
mutex_unlock(&rt->lock);
return 0;
}
rt->muted = 0;
if (vol > rt->curvol)
for (i = rt->curvol; i < vol; i++)
rt_incvol(rt);
else
for (i = rt->curvol; i > vol; i--)
rt_decvol(rt);
rt->curvol = vol;
mutex_unlock(&rt->lock);
return 0;
}
/* the 128+64 on these outb's is to keep the volume stable while tuning
* without them, the volume _will_ creep up with each frequency change
* and bit 4 (+16) is to keep the signal strength meter enabled
*/
static void send_0_byte(struct rtrack *rt)
{
if (rt->curvol == 0 || rt->muted) {
outb_p(128+64+16+ 1, rt->io); /* wr-enable + data low */
outb_p(128+64+16+2+1, rt->io); /* clock */
}
else {
outb_p(128+64+16+8+ 1, rt->io); /* on + wr-enable + data low */
outb_p(128+64+16+8+2+1, rt->io); /* clock */
}
msleep(1);
}
static void send_1_byte(struct rtrack *rt)
{
if (rt->curvol == 0 || rt->muted) {
outb_p(128+64+16+4 +1, rt->io); /* wr-enable+data high */
outb_p(128+64+16+4+2+1, rt->io); /* clock */
}
else {
outb_p(128+64+16+8+4 +1, rt->io); /* on+wr-enable+data high */
outb_p(128+64+16+8+4+2+1, rt->io); /* clock */
}
msleep(1);
}
static int rt_setfreq(struct rtrack *rt, unsigned long freq)
{
int i;
mutex_lock(&rt->lock); /* Stop other ops interfering */
rt->curfreq = freq;
/* now uses VIDEO_TUNER_LOW for fine tuning */
freq += 171200; /* Add 10.7 MHz IF */
freq /= 800; /* Convert to 50 kHz units */
send_0_byte(rt); /* 0: LSB of frequency */
for (i = 0; i < 13; i++) /* : frequency bits (1-13) */
if (freq & (1 << i))
send_1_byte(rt);
else
send_0_byte(rt);
send_0_byte(rt); /* 14: test bit - always 0 */
send_0_byte(rt); /* 15: test bit - always 0 */
send_0_byte(rt); /* 16: band data 0 - always 0 */
send_0_byte(rt); /* 17: band data 1 - always 0 */
send_0_byte(rt); /* 18: band data 2 - always 0 */
send_0_byte(rt); /* 19: time base - always 0 */
send_0_byte(rt); /* 20: spacing (0 = 25 kHz) */
send_1_byte(rt); /* 21: spacing (1 = 25 kHz) */
send_0_byte(rt); /* 22: spacing (0 = 25 kHz) */
send_1_byte(rt); /* 23: AM/FM (FM = 1, always) */
if (rt->curvol == 0 || rt->muted)
outb(0xd0, rt->io); /* volume steady + sigstr */
else
outb(0xd8, rt->io); /* volume steady + sigstr + on */
mutex_unlock(&rt->lock);
return 0;
}
static int rt_getsigstr(struct rtrack *rt)
{
int sig = 1;
mutex_lock(&rt->lock);
if (inb(rt->io) & 2) /* bit set = no signal present */
sig = 0;
mutex_unlock(&rt->lock);
return sig;
}
static int vidioc_querycap(struct file *file, void *priv,
struct v4l2_capability *v)
{
strlcpy(v->driver, "radio-aimslab", sizeof(v->driver));
strlcpy(v->card, "RadioTrack", sizeof(v->card));
strlcpy(v->bus_info, "ISA", sizeof(v->bus_info));
v->version = RADIO_VERSION;
v->capabilities = V4L2_CAP_TUNER | V4L2_CAP_RADIO;
return 0;
}
static int vidioc_g_tuner(struct file *file, void *priv,
struct v4l2_tuner *v)
{
struct rtrack *rt = video_drvdata(file);
if (v->index > 0)
return -EINVAL;
strlcpy(v->name, "FM", sizeof(v->name));
v->type = V4L2_TUNER_RADIO;
v->rangelow = 87 * 16000;
v->rangehigh = 108 * 16000;
v->rxsubchans = V4L2_TUNER_SUB_MONO;
v->capability = V4L2_TUNER_CAP_LOW;
v->audmode = V4L2_TUNER_MODE_MONO;
v->signal = 0xffff * rt_getsigstr(rt);
return 0;
}
static int vidioc_s_tuner(struct file *file, void *priv,
struct v4l2_tuner *v)
{
return v->index ? -EINVAL : 0;
}
static int vidioc_s_frequency(struct file *file, void *priv,
struct v4l2_frequency *f)
{
struct rtrack *rt = video_drvdata(file);
if (f->tuner != 0 || f->type != V4L2_TUNER_RADIO)
return -EINVAL;
rt_setfreq(rt, f->frequency);
return 0;
}
static int vidioc_g_frequency(struct file *file, void *priv,
struct v4l2_frequency *f)
{
struct rtrack *rt = video_drvdata(file);
if (f->tuner != 0)
return -EINVAL;
f->type = V4L2_TUNER_RADIO;
f->frequency = rt->curfreq;
return 0;
}
static int vidioc_queryctrl(struct file *file, void *priv,
struct v4l2_queryctrl *qc)
{
switch (qc->id) {
case V4L2_CID_AUDIO_MUTE:
return v4l2_ctrl_query_fill(qc, 0, 1, 1, 1);
case V4L2_CID_AUDIO_VOLUME:
return v4l2_ctrl_query_fill(qc, 0, 0xff, 1, 0xff);
}
return -EINVAL;
}
static int vidioc_g_ctrl(struct file *file, void *priv,
struct v4l2_control *ctrl)
{
struct rtrack *rt = video_drvdata(file);
switch (ctrl->id) {
case V4L2_CID_AUDIO_MUTE:
ctrl->value = rt->muted;
return 0;
case V4L2_CID_AUDIO_VOLUME:
ctrl->value = rt->curvol;
return 0;
}
return -EINVAL;
}
static int vidioc_s_ctrl(struct file *file, void *priv,
struct v4l2_control *ctrl)
{
struct rtrack *rt = video_drvdata(file);
switch (ctrl->id) {
case V4L2_CID_AUDIO_MUTE:
if (ctrl->value)
rt_mute(rt);
else
rt_setvol(rt, rt->curvol);
return 0;
case V4L2_CID_AUDIO_VOLUME:
rt_setvol(rt, ctrl->value);
return 0;
}
return -EINVAL;
}
static int vidioc_g_input(struct file *filp, void *priv, unsigned int *i)
{
*i = 0;
return 0;
}
static int vidioc_s_input(struct file *filp, void *priv, unsigned int i)
{
return i ? -EINVAL : 0;
}
static int vidioc_g_audio(struct file *file, void *priv,
struct v4l2_audio *a)
{
a->index = 0;
strlcpy(a->name, "Radio", sizeof(a->name));
a->capability = V4L2_AUDCAP_STEREO;
return 0;
}
static int vidioc_s_audio(struct file *file, void *priv,
struct v4l2_audio *a)
{
return a->index ? -EINVAL : 0;
}
static const struct v4l2_file_operations rtrack_fops = {
.owner = THIS_MODULE,
.unlocked_ioctl = video_ioctl2,
};
static const struct v4l2_ioctl_ops rtrack_ioctl_ops = {
.vidioc_querycap = vidioc_querycap,
.vidioc_g_tuner = vidioc_g_tuner,
.vidioc_s_tuner = vidioc_s_tuner,
.vidioc_g_audio = vidioc_g_audio,
.vidioc_s_audio = vidioc_s_audio,
.vidioc_g_input = vidioc_g_input,
.vidioc_s_input = vidioc_s_input,
.vidioc_g_frequency = vidioc_g_frequency,
.vidioc_s_frequency = vidioc_s_frequency,
.vidioc_queryctrl = vidioc_queryctrl,
.vidioc_g_ctrl = vidioc_g_ctrl,
.vidioc_s_ctrl = vidioc_s_ctrl,
};
static int __init rtrack_init(void)
{
struct rtrack *rt = &rtrack_card;
struct v4l2_device *v4l2_dev = &rt->v4l2_dev;
int res;
strlcpy(v4l2_dev->name, "rtrack", sizeof(v4l2_dev->name));
rt->io = io;
if (rt->io == -1) {
v4l2_err(v4l2_dev, "you must set an I/O address with io=0x20f or 0x30f\n");
return -EINVAL;
}
if (!request_region(rt->io, 2, "rtrack")) {
v4l2_err(v4l2_dev, "port 0x%x already in use\n", rt->io);
return -EBUSY;
}
res = v4l2_device_register(NULL, v4l2_dev);
if (res < 0) {
release_region(rt->io, 2);
v4l2_err(v4l2_dev, "could not register v4l2_device\n");
return res;
}
strlcpy(rt->vdev.name, v4l2_dev->name, sizeof(rt->vdev.name));
rt->vdev.v4l2_dev = v4l2_dev;
rt->vdev.fops = &rtrack_fops;
rt->vdev.ioctl_ops = &rtrack_ioctl_ops;
rt->vdev.release = video_device_release_empty;
video_set_drvdata(&rt->vdev, rt);
/* Set up the I/O locking */
mutex_init(&rt->lock);
/* mute card - prevents noisy bootups */
/* this ensures that the volume is all the way down */
outb(0x48, rt->io); /* volume down but still "on" */
msleep(2000); /* make sure it's totally down */
outb(0xc0, rt->io); /* steady volume, mute card */
if (video_register_device(&rt->vdev, VFL_TYPE_RADIO, radio_nr) < 0) {
v4l2_device_unregister(&rt->v4l2_dev);
release_region(rt->io, 2);
return -EINVAL;
}
v4l2_info(v4l2_dev, "AIMSlab RadioTrack/RadioReveal card driver.\n");
return 0;
}
static void __exit rtrack_exit(void)
{
struct rtrack *rt = &rtrack_card;
video_unregister_device(&rt->vdev);
v4l2_device_unregister(&rt->v4l2_dev);
release_region(rt->io, 2);
}
module_init(rtrack_init);
module_exit(rtrack_exit);
| gpl-2.0 |
JoinTheRealms/TF700-dualboot-hunds | drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c | 2652 | 3116 | /**************************************************************************
*
* Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
#include "vmwgfx_drv.h"
#include "vmwgfx_drm.h"
int vmw_getparam_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct vmw_private *dev_priv = vmw_priv(dev);
struct drm_vmw_getparam_arg *param =
(struct drm_vmw_getparam_arg *)data;
switch (param->param) {
case DRM_VMW_PARAM_NUM_STREAMS:
param->value = vmw_overlay_num_overlays(dev_priv);
break;
case DRM_VMW_PARAM_NUM_FREE_STREAMS:
param->value = vmw_overlay_num_free_overlays(dev_priv);
break;
case DRM_VMW_PARAM_3D:
param->value = vmw_fifo_have_3d(dev_priv) ? 1 : 0;
break;
case DRM_VMW_PARAM_FIFO_OFFSET:
param->value = dev_priv->mmio_start;
break;
case DRM_VMW_PARAM_HW_CAPS:
param->value = dev_priv->capabilities;
break;
case DRM_VMW_PARAM_FIFO_CAPS:
param->value = dev_priv->fifo.capabilities;
break;
case DRM_VMW_PARAM_MAX_FB_SIZE:
param->value = dev_priv->vram_size;
break;
default:
DRM_ERROR("Illegal vmwgfx get param request: %d\n",
param->param);
return -EINVAL;
}
return 0;
}
int vmw_fifo_debug_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct vmw_private *dev_priv = vmw_priv(dev);
struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
struct drm_vmw_fifo_debug_arg *arg =
(struct drm_vmw_fifo_debug_arg *)data;
__le32 __user *buffer = (__le32 __user *)
(unsigned long)arg->debug_buffer;
if (unlikely(fifo_state->last_buffer == NULL))
return -EINVAL;
if (arg->debug_buffer_size < fifo_state->last_data_size) {
arg->used_size = arg->debug_buffer_size;
arg->did_not_fit = 1;
} else {
arg->used_size = fifo_state->last_data_size;
arg->did_not_fit = 0;
}
return copy_to_user(buffer, fifo_state->last_buffer, arg->used_size);
}
| gpl-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.