repo_name string | path string | copies string | size string | content string | license string |
|---|---|---|---|---|---|
TeamWin/android_kernel_samsung_zerofltespr | drivers/gpu/ion/tegra/tegra_ion.c | 6832 | 2174 | /*
* drivers/gpu/tegra/tegra_ion.c
*
* Copyright (C) 2011 Google, Inc.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <linux/err.h>
#include <linux/ion.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include "../ion_priv.h"
struct ion_device *idev;
struct ion_mapper *tegra_user_mapper;
int num_heaps;
struct ion_heap **heaps;
int tegra_ion_probe(struct platform_device *pdev)
{
struct ion_platform_data *pdata = pdev->dev.platform_data;
int err;
int i;
num_heaps = pdata->nr;
heaps = kzalloc(sizeof(struct ion_heap *) * pdata->nr, GFP_KERNEL);
idev = ion_device_create(NULL);
if (IS_ERR_OR_NULL(idev)) {
kfree(heaps);
return PTR_ERR(idev);
}
/* create the heaps as specified in the board file */
for (i = 0; i < num_heaps; i++) {
struct ion_platform_heap *heap_data = &pdata->heaps[i];
heaps[i] = ion_heap_create(heap_data);
if (IS_ERR_OR_NULL(heaps[i])) {
err = PTR_ERR(heaps[i]);
goto err;
}
ion_device_add_heap(idev, heaps[i]);
}
platform_set_drvdata(pdev, idev);
return 0;
err:
for (i = 0; i < num_heaps; i++) {
if (heaps[i])
ion_heap_destroy(heaps[i]);
}
kfree(heaps);
return err;
}
int tegra_ion_remove(struct platform_device *pdev)
{
struct ion_device *idev = platform_get_drvdata(pdev);
int i;
ion_device_destroy(idev);
for (i = 0; i < num_heaps; i++)
ion_heap_destroy(heaps[i]);
kfree(heaps);
return 0;
}
static struct platform_driver ion_driver = {
.probe = tegra_ion_probe,
.remove = tegra_ion_remove,
.driver = { .name = "ion-tegra" }
};
static int __init ion_init(void)
{
return platform_driver_register(&ion_driver);
}
static void __exit ion_exit(void)
{
platform_driver_unregister(&ion_driver);
}
module_init(ion_init);
module_exit(ion_exit);
| gpl-2.0 |
zakee94/stellar_msm8226 | arch/parisc/lib/iomap.c | 8880 | 10309 | /*
* iomap.c - Implement iomap interface for PA-RISC
* Copyright (c) 2004 Matthew Wilcox
*/
#include <linux/ioport.h>
#include <linux/pci.h>
#include <linux/export.h>
#include <asm/io.h>
/*
* The iomap space on 32-bit PA-RISC is intended to look like this:
* 00000000-7fffffff virtual mapped IO
* 80000000-8fffffff ISA/EISA port space that can't be virtually mapped
* 90000000-9fffffff Dino port space
* a0000000-afffffff Astro port space
* b0000000-bfffffff PAT port space
* c0000000-cfffffff non-swapped memory IO
* f0000000-ffffffff legacy IO memory pointers
*
* For the moment, here's what it looks like:
* 80000000-8fffffff All ISA/EISA port space
* f0000000-ffffffff legacy IO memory pointers
*
* On 64-bit, everything is extended, so:
* 8000000000000000-8fffffffffffffff All ISA/EISA port space
* f000000000000000-ffffffffffffffff legacy IO memory pointers
*/
/*
* Technically, this should be 'if (VMALLOC_START < addr < VMALLOC_END),
* but that's slow and we know it'll be within the first 2GB.
*/
#ifdef CONFIG_64BIT
#define INDIRECT_ADDR(addr) (((unsigned long)(addr) & 1UL<<63) != 0)
#define ADDR_TO_REGION(addr) (((unsigned long)addr >> 60) & 7)
#define IOPORT_MAP_BASE (8UL << 60)
#else
#define INDIRECT_ADDR(addr) (((unsigned long)(addr) & 1UL<<31) != 0)
#define ADDR_TO_REGION(addr) (((unsigned long)addr >> 28) & 7)
#define IOPORT_MAP_BASE (8UL << 28)
#endif
struct iomap_ops {
unsigned int (*read8)(void __iomem *);
unsigned int (*read16)(void __iomem *);
unsigned int (*read16be)(void __iomem *);
unsigned int (*read32)(void __iomem *);
unsigned int (*read32be)(void __iomem *);
void (*write8)(u8, void __iomem *);
void (*write16)(u16, void __iomem *);
void (*write16be)(u16, void __iomem *);
void (*write32)(u32, void __iomem *);
void (*write32be)(u32, void __iomem *);
void (*read8r)(void __iomem *, void *, unsigned long);
void (*read16r)(void __iomem *, void *, unsigned long);
void (*read32r)(void __iomem *, void *, unsigned long);
void (*write8r)(void __iomem *, const void *, unsigned long);
void (*write16r)(void __iomem *, const void *, unsigned long);
void (*write32r)(void __iomem *, const void *, unsigned long);
};
/* Generic ioport ops. To be replaced later by specific dino/elroy/wax code */
#define ADDR2PORT(addr) ((unsigned long __force)(addr) & 0xffffff)
static unsigned int ioport_read8(void __iomem *addr)
{
return inb(ADDR2PORT(addr));
}
static unsigned int ioport_read16(void __iomem *addr)
{
return inw(ADDR2PORT(addr));
}
static unsigned int ioport_read32(void __iomem *addr)
{
return inl(ADDR2PORT(addr));
}
static void ioport_write8(u8 datum, void __iomem *addr)
{
outb(datum, ADDR2PORT(addr));
}
static void ioport_write16(u16 datum, void __iomem *addr)
{
outw(datum, ADDR2PORT(addr));
}
static void ioport_write32(u32 datum, void __iomem *addr)
{
outl(datum, ADDR2PORT(addr));
}
static void ioport_read8r(void __iomem *addr, void *dst, unsigned long count)
{
insb(ADDR2PORT(addr), dst, count);
}
static void ioport_read16r(void __iomem *addr, void *dst, unsigned long count)
{
insw(ADDR2PORT(addr), dst, count);
}
static void ioport_read32r(void __iomem *addr, void *dst, unsigned long count)
{
insl(ADDR2PORT(addr), dst, count);
}
static void ioport_write8r(void __iomem *addr, const void *s, unsigned long n)
{
outsb(ADDR2PORT(addr), s, n);
}
static void ioport_write16r(void __iomem *addr, const void *s, unsigned long n)
{
outsw(ADDR2PORT(addr), s, n);
}
static void ioport_write32r(void __iomem *addr, const void *s, unsigned long n)
{
outsl(ADDR2PORT(addr), s, n);
}
static const struct iomap_ops ioport_ops = {
ioport_read8,
ioport_read16,
ioport_read16,
ioport_read32,
ioport_read32,
ioport_write8,
ioport_write16,
ioport_write16,
ioport_write32,
ioport_write32,
ioport_read8r,
ioport_read16r,
ioport_read32r,
ioport_write8r,
ioport_write16r,
ioport_write32r,
};
/* Legacy I/O memory ops */
static unsigned int iomem_read8(void __iomem *addr)
{
return readb(addr);
}
static unsigned int iomem_read16(void __iomem *addr)
{
return readw(addr);
}
static unsigned int iomem_read16be(void __iomem *addr)
{
return __raw_readw(addr);
}
static unsigned int iomem_read32(void __iomem *addr)
{
return readl(addr);
}
static unsigned int iomem_read32be(void __iomem *addr)
{
return __raw_readl(addr);
}
static void iomem_write8(u8 datum, void __iomem *addr)
{
writeb(datum, addr);
}
static void iomem_write16(u16 datum, void __iomem *addr)
{
writew(datum, addr);
}
static void iomem_write16be(u16 datum, void __iomem *addr)
{
__raw_writew(datum, addr);
}
static void iomem_write32(u32 datum, void __iomem *addr)
{
writel(datum, addr);
}
static void iomem_write32be(u32 datum, void __iomem *addr)
{
__raw_writel(datum, addr);
}
static void iomem_read8r(void __iomem *addr, void *dst, unsigned long count)
{
while (count--) {
*(u8 *)dst = __raw_readb(addr);
dst++;
}
}
static void iomem_read16r(void __iomem *addr, void *dst, unsigned long count)
{
while (count--) {
*(u16 *)dst = __raw_readw(addr);
dst += 2;
}
}
static void iomem_read32r(void __iomem *addr, void *dst, unsigned long count)
{
while (count--) {
*(u32 *)dst = __raw_readl(addr);
dst += 4;
}
}
static void iomem_write8r(void __iomem *addr, const void *s, unsigned long n)
{
while (n--) {
__raw_writeb(*(u8 *)s, addr);
s++;
}
}
static void iomem_write16r(void __iomem *addr, const void *s, unsigned long n)
{
while (n--) {
__raw_writew(*(u16 *)s, addr);
s += 2;
}
}
static void iomem_write32r(void __iomem *addr, const void *s, unsigned long n)
{
while (n--) {
__raw_writel(*(u32 *)s, addr);
s += 4;
}
}
static const struct iomap_ops iomem_ops = {
iomem_read8,
iomem_read16,
iomem_read16be,
iomem_read32,
iomem_read32be,
iomem_write8,
iomem_write16,
iomem_write16be,
iomem_write32,
iomem_write32be,
iomem_read8r,
iomem_read16r,
iomem_read32r,
iomem_write8r,
iomem_write16r,
iomem_write32r,
};
static const struct iomap_ops *iomap_ops[8] = {
[0] = &ioport_ops,
[7] = &iomem_ops
};
unsigned int ioread8(void __iomem *addr)
{
if (unlikely(INDIRECT_ADDR(addr)))
return iomap_ops[ADDR_TO_REGION(addr)]->read8(addr);
return *((u8 *)addr);
}
unsigned int ioread16(void __iomem *addr)
{
if (unlikely(INDIRECT_ADDR(addr)))
return iomap_ops[ADDR_TO_REGION(addr)]->read16(addr);
return le16_to_cpup((u16 *)addr);
}
unsigned int ioread16be(void __iomem *addr)
{
if (unlikely(INDIRECT_ADDR(addr)))
return iomap_ops[ADDR_TO_REGION(addr)]->read16be(addr);
return *((u16 *)addr);
}
unsigned int ioread32(void __iomem *addr)
{
if (unlikely(INDIRECT_ADDR(addr)))
return iomap_ops[ADDR_TO_REGION(addr)]->read32(addr);
return le32_to_cpup((u32 *)addr);
}
unsigned int ioread32be(void __iomem *addr)
{
if (unlikely(INDIRECT_ADDR(addr)))
return iomap_ops[ADDR_TO_REGION(addr)]->read32be(addr);
return *((u32 *)addr);
}
void iowrite8(u8 datum, void __iomem *addr)
{
if (unlikely(INDIRECT_ADDR(addr))) {
iomap_ops[ADDR_TO_REGION(addr)]->write8(datum, addr);
} else {
*((u8 *)addr) = datum;
}
}
void iowrite16(u16 datum, void __iomem *addr)
{
if (unlikely(INDIRECT_ADDR(addr))) {
iomap_ops[ADDR_TO_REGION(addr)]->write16(datum, addr);
} else {
*((u16 *)addr) = cpu_to_le16(datum);
}
}
void iowrite16be(u16 datum, void __iomem *addr)
{
if (unlikely(INDIRECT_ADDR(addr))) {
iomap_ops[ADDR_TO_REGION(addr)]->write16be(datum, addr);
} else {
*((u16 *)addr) = datum;
}
}
void iowrite32(u32 datum, void __iomem *addr)
{
if (unlikely(INDIRECT_ADDR(addr))) {
iomap_ops[ADDR_TO_REGION(addr)]->write32(datum, addr);
} else {
*((u32 *)addr) = cpu_to_le32(datum);
}
}
void iowrite32be(u32 datum, void __iomem *addr)
{
if (unlikely(INDIRECT_ADDR(addr))) {
iomap_ops[ADDR_TO_REGION(addr)]->write32be(datum, addr);
} else {
*((u32 *)addr) = datum;
}
}
/* Repeating interfaces */
void ioread8_rep(void __iomem *addr, void *dst, unsigned long count)
{
if (unlikely(INDIRECT_ADDR(addr))) {
iomap_ops[ADDR_TO_REGION(addr)]->read8r(addr, dst, count);
} else {
while (count--) {
*(u8 *)dst = *(u8 *)addr;
dst++;
}
}
}
void ioread16_rep(void __iomem *addr, void *dst, unsigned long count)
{
if (unlikely(INDIRECT_ADDR(addr))) {
iomap_ops[ADDR_TO_REGION(addr)]->read16r(addr, dst, count);
} else {
while (count--) {
*(u16 *)dst = *(u16 *)addr;
dst += 2;
}
}
}
void ioread32_rep(void __iomem *addr, void *dst, unsigned long count)
{
if (unlikely(INDIRECT_ADDR(addr))) {
iomap_ops[ADDR_TO_REGION(addr)]->read32r(addr, dst, count);
} else {
while (count--) {
*(u32 *)dst = *(u32 *)addr;
dst += 4;
}
}
}
void iowrite8_rep(void __iomem *addr, const void *src, unsigned long count)
{
if (unlikely(INDIRECT_ADDR(addr))) {
iomap_ops[ADDR_TO_REGION(addr)]->write8r(addr, src, count);
} else {
while (count--) {
*(u8 *)addr = *(u8 *)src;
src++;
}
}
}
void iowrite16_rep(void __iomem *addr, const void *src, unsigned long count)
{
if (unlikely(INDIRECT_ADDR(addr))) {
iomap_ops[ADDR_TO_REGION(addr)]->write16r(addr, src, count);
} else {
while (count--) {
*(u16 *)addr = *(u16 *)src;
src += 2;
}
}
}
void iowrite32_rep(void __iomem *addr, const void *src, unsigned long count)
{
if (unlikely(INDIRECT_ADDR(addr))) {
iomap_ops[ADDR_TO_REGION(addr)]->write32r(addr, src, count);
} else {
while (count--) {
*(u32 *)addr = *(u32 *)src;
src += 4;
}
}
}
/* Mapping interfaces */
void __iomem *ioport_map(unsigned long port, unsigned int nr)
{
return (void __iomem *)(IOPORT_MAP_BASE | port);
}
void ioport_unmap(void __iomem *addr)
{
if (!INDIRECT_ADDR(addr)) {
iounmap(addr);
}
}
void pci_iounmap(struct pci_dev *dev, void __iomem * addr)
{
if (!INDIRECT_ADDR(addr)) {
iounmap(addr);
}
}
EXPORT_SYMBOL(ioread8);
EXPORT_SYMBOL(ioread16);
EXPORT_SYMBOL(ioread16be);
EXPORT_SYMBOL(ioread32);
EXPORT_SYMBOL(ioread32be);
EXPORT_SYMBOL(iowrite8);
EXPORT_SYMBOL(iowrite16);
EXPORT_SYMBOL(iowrite16be);
EXPORT_SYMBOL(iowrite32);
EXPORT_SYMBOL(iowrite32be);
EXPORT_SYMBOL(ioread8_rep);
EXPORT_SYMBOL(ioread16_rep);
EXPORT_SYMBOL(ioread32_rep);
EXPORT_SYMBOL(iowrite8_rep);
EXPORT_SYMBOL(iowrite16_rep);
EXPORT_SYMBOL(iowrite32_rep);
EXPORT_SYMBOL(ioport_map);
EXPORT_SYMBOL(ioport_unmap);
EXPORT_SYMBOL(pci_iounmap);
| gpl-2.0 |
atila1974/AK-OnePlusOne-CAF | fs/lockd/xdr.c | 10928 | 7362 | /*
* linux/fs/lockd/xdr.c
*
* XDR support for lockd and the lock client.
*
* Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>
*/
#include <linux/types.h>
#include <linux/sched.h>
#include <linux/nfs.h>
#include <linux/sunrpc/xdr.h>
#include <linux/sunrpc/clnt.h>
#include <linux/sunrpc/svc.h>
#include <linux/sunrpc/stats.h>
#include <linux/lockd/lockd.h>
#define NLMDBG_FACILITY NLMDBG_XDR
static inline loff_t
s32_to_loff_t(__s32 offset)
{
return (loff_t)offset;
}
static inline __s32
loff_t_to_s32(loff_t offset)
{
__s32 res;
if (offset >= NLM_OFFSET_MAX)
res = NLM_OFFSET_MAX;
else if (offset <= -NLM_OFFSET_MAX)
res = -NLM_OFFSET_MAX;
else
res = offset;
return res;
}
/*
* XDR functions for basic NLM types
*/
static __be32 *nlm_decode_cookie(__be32 *p, struct nlm_cookie *c)
{
unsigned int len;
len = ntohl(*p++);
if(len==0)
{
c->len=4;
memset(c->data, 0, 4); /* hockeypux brain damage */
}
else if(len<=NLM_MAXCOOKIELEN)
{
c->len=len;
memcpy(c->data, p, len);
p+=XDR_QUADLEN(len);
}
else
{
dprintk("lockd: bad cookie size %d (only cookies under "
"%d bytes are supported.)\n",
len, NLM_MAXCOOKIELEN);
return NULL;
}
return p;
}
static inline __be32 *
nlm_encode_cookie(__be32 *p, struct nlm_cookie *c)
{
*p++ = htonl(c->len);
memcpy(p, c->data, c->len);
p+=XDR_QUADLEN(c->len);
return p;
}
static __be32 *
nlm_decode_fh(__be32 *p, struct nfs_fh *f)
{
unsigned int len;
if ((len = ntohl(*p++)) != NFS2_FHSIZE) {
dprintk("lockd: bad fhandle size %d (should be %d)\n",
len, NFS2_FHSIZE);
return NULL;
}
f->size = NFS2_FHSIZE;
memset(f->data, 0, sizeof(f->data));
memcpy(f->data, p, NFS2_FHSIZE);
return p + XDR_QUADLEN(NFS2_FHSIZE);
}
static inline __be32 *
nlm_encode_fh(__be32 *p, struct nfs_fh *f)
{
*p++ = htonl(NFS2_FHSIZE);
memcpy(p, f->data, NFS2_FHSIZE);
return p + XDR_QUADLEN(NFS2_FHSIZE);
}
/*
* Encode and decode owner handle
*/
static inline __be32 *
nlm_decode_oh(__be32 *p, struct xdr_netobj *oh)
{
return xdr_decode_netobj(p, oh);
}
static inline __be32 *
nlm_encode_oh(__be32 *p, struct xdr_netobj *oh)
{
return xdr_encode_netobj(p, oh);
}
static __be32 *
nlm_decode_lock(__be32 *p, struct nlm_lock *lock)
{
struct file_lock *fl = &lock->fl;
s32 start, len, end;
if (!(p = xdr_decode_string_inplace(p, &lock->caller,
&lock->len,
NLM_MAXSTRLEN))
|| !(p = nlm_decode_fh(p, &lock->fh))
|| !(p = nlm_decode_oh(p, &lock->oh)))
return NULL;
lock->svid = ntohl(*p++);
locks_init_lock(fl);
fl->fl_owner = current->files;
fl->fl_pid = (pid_t)lock->svid;
fl->fl_flags = FL_POSIX;
fl->fl_type = F_RDLCK; /* as good as anything else */
start = ntohl(*p++);
len = ntohl(*p++);
end = start + len - 1;
fl->fl_start = s32_to_loff_t(start);
if (len == 0 || end < 0)
fl->fl_end = OFFSET_MAX;
else
fl->fl_end = s32_to_loff_t(end);
return p;
}
/*
* Encode result of a TEST/TEST_MSG call
*/
static __be32 *
nlm_encode_testres(__be32 *p, struct nlm_res *resp)
{
s32 start, len;
if (!(p = nlm_encode_cookie(p, &resp->cookie)))
return NULL;
*p++ = resp->status;
if (resp->status == nlm_lck_denied) {
struct file_lock *fl = &resp->lock.fl;
*p++ = (fl->fl_type == F_RDLCK)? xdr_zero : xdr_one;
*p++ = htonl(resp->lock.svid);
/* Encode owner handle. */
if (!(p = xdr_encode_netobj(p, &resp->lock.oh)))
return NULL;
start = loff_t_to_s32(fl->fl_start);
if (fl->fl_end == OFFSET_MAX)
len = 0;
else
len = loff_t_to_s32(fl->fl_end - fl->fl_start + 1);
*p++ = htonl(start);
*p++ = htonl(len);
}
return p;
}
/*
* First, the server side XDR functions
*/
int
nlmsvc_decode_testargs(struct svc_rqst *rqstp, __be32 *p, nlm_args *argp)
{
u32 exclusive;
if (!(p = nlm_decode_cookie(p, &argp->cookie)))
return 0;
exclusive = ntohl(*p++);
if (!(p = nlm_decode_lock(p, &argp->lock)))
return 0;
if (exclusive)
argp->lock.fl.fl_type = F_WRLCK;
return xdr_argsize_check(rqstp, p);
}
int
nlmsvc_encode_testres(struct svc_rqst *rqstp, __be32 *p, struct nlm_res *resp)
{
if (!(p = nlm_encode_testres(p, resp)))
return 0;
return xdr_ressize_check(rqstp, p);
}
int
nlmsvc_decode_lockargs(struct svc_rqst *rqstp, __be32 *p, nlm_args *argp)
{
u32 exclusive;
if (!(p = nlm_decode_cookie(p, &argp->cookie)))
return 0;
argp->block = ntohl(*p++);
exclusive = ntohl(*p++);
if (!(p = nlm_decode_lock(p, &argp->lock)))
return 0;
if (exclusive)
argp->lock.fl.fl_type = F_WRLCK;
argp->reclaim = ntohl(*p++);
argp->state = ntohl(*p++);
argp->monitor = 1; /* monitor client by default */
return xdr_argsize_check(rqstp, p);
}
int
nlmsvc_decode_cancargs(struct svc_rqst *rqstp, __be32 *p, nlm_args *argp)
{
u32 exclusive;
if (!(p = nlm_decode_cookie(p, &argp->cookie)))
return 0;
argp->block = ntohl(*p++);
exclusive = ntohl(*p++);
if (!(p = nlm_decode_lock(p, &argp->lock)))
return 0;
if (exclusive)
argp->lock.fl.fl_type = F_WRLCK;
return xdr_argsize_check(rqstp, p);
}
int
nlmsvc_decode_unlockargs(struct svc_rqst *rqstp, __be32 *p, nlm_args *argp)
{
if (!(p = nlm_decode_cookie(p, &argp->cookie))
|| !(p = nlm_decode_lock(p, &argp->lock)))
return 0;
argp->lock.fl.fl_type = F_UNLCK;
return xdr_argsize_check(rqstp, p);
}
int
nlmsvc_decode_shareargs(struct svc_rqst *rqstp, __be32 *p, nlm_args *argp)
{
struct nlm_lock *lock = &argp->lock;
memset(lock, 0, sizeof(*lock));
locks_init_lock(&lock->fl);
lock->svid = ~(u32) 0;
lock->fl.fl_pid = (pid_t)lock->svid;
if (!(p = nlm_decode_cookie(p, &argp->cookie))
|| !(p = xdr_decode_string_inplace(p, &lock->caller,
&lock->len, NLM_MAXSTRLEN))
|| !(p = nlm_decode_fh(p, &lock->fh))
|| !(p = nlm_decode_oh(p, &lock->oh)))
return 0;
argp->fsm_mode = ntohl(*p++);
argp->fsm_access = ntohl(*p++);
return xdr_argsize_check(rqstp, p);
}
int
nlmsvc_encode_shareres(struct svc_rqst *rqstp, __be32 *p, struct nlm_res *resp)
{
if (!(p = nlm_encode_cookie(p, &resp->cookie)))
return 0;
*p++ = resp->status;
*p++ = xdr_zero; /* sequence argument */
return xdr_ressize_check(rqstp, p);
}
int
nlmsvc_encode_res(struct svc_rqst *rqstp, __be32 *p, struct nlm_res *resp)
{
if (!(p = nlm_encode_cookie(p, &resp->cookie)))
return 0;
*p++ = resp->status;
return xdr_ressize_check(rqstp, p);
}
int
nlmsvc_decode_notify(struct svc_rqst *rqstp, __be32 *p, struct nlm_args *argp)
{
struct nlm_lock *lock = &argp->lock;
if (!(p = xdr_decode_string_inplace(p, &lock->caller,
&lock->len, NLM_MAXSTRLEN)))
return 0;
argp->state = ntohl(*p++);
return xdr_argsize_check(rqstp, p);
}
int
nlmsvc_decode_reboot(struct svc_rqst *rqstp, __be32 *p, struct nlm_reboot *argp)
{
if (!(p = xdr_decode_string_inplace(p, &argp->mon, &argp->len, SM_MAXSTRLEN)))
return 0;
argp->state = ntohl(*p++);
memcpy(&argp->priv.data, p, sizeof(argp->priv.data));
p += XDR_QUADLEN(SM_PRIV_SIZE);
return xdr_argsize_check(rqstp, p);
}
int
nlmsvc_decode_res(struct svc_rqst *rqstp, __be32 *p, struct nlm_res *resp)
{
if (!(p = nlm_decode_cookie(p, &resp->cookie)))
return 0;
resp->status = *p++;
return xdr_argsize_check(rqstp, p);
}
int
nlmsvc_decode_void(struct svc_rqst *rqstp, __be32 *p, void *dummy)
{
return xdr_argsize_check(rqstp, p);
}
int
nlmsvc_encode_void(struct svc_rqst *rqstp, __be32 *p, void *dummy)
{
return xdr_ressize_check(rqstp, p);
}
| gpl-2.0 |
Maxr1998/hellsCore-mako | fs/lockd/xdr.c | 10928 | 7362 | /*
* linux/fs/lockd/xdr.c
*
* XDR support for lockd and the lock client.
*
* Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>
*/
#include <linux/types.h>
#include <linux/sched.h>
#include <linux/nfs.h>
#include <linux/sunrpc/xdr.h>
#include <linux/sunrpc/clnt.h>
#include <linux/sunrpc/svc.h>
#include <linux/sunrpc/stats.h>
#include <linux/lockd/lockd.h>
#define NLMDBG_FACILITY NLMDBG_XDR
static inline loff_t
s32_to_loff_t(__s32 offset)
{
return (loff_t)offset;
}
static inline __s32
loff_t_to_s32(loff_t offset)
{
__s32 res;
if (offset >= NLM_OFFSET_MAX)
res = NLM_OFFSET_MAX;
else if (offset <= -NLM_OFFSET_MAX)
res = -NLM_OFFSET_MAX;
else
res = offset;
return res;
}
/*
* XDR functions for basic NLM types
*/
static __be32 *nlm_decode_cookie(__be32 *p, struct nlm_cookie *c)
{
unsigned int len;
len = ntohl(*p++);
if(len==0)
{
c->len=4;
memset(c->data, 0, 4); /* hockeypux brain damage */
}
else if(len<=NLM_MAXCOOKIELEN)
{
c->len=len;
memcpy(c->data, p, len);
p+=XDR_QUADLEN(len);
}
else
{
dprintk("lockd: bad cookie size %d (only cookies under "
"%d bytes are supported.)\n",
len, NLM_MAXCOOKIELEN);
return NULL;
}
return p;
}
static inline __be32 *
nlm_encode_cookie(__be32 *p, struct nlm_cookie *c)
{
*p++ = htonl(c->len);
memcpy(p, c->data, c->len);
p+=XDR_QUADLEN(c->len);
return p;
}
static __be32 *
nlm_decode_fh(__be32 *p, struct nfs_fh *f)
{
unsigned int len;
if ((len = ntohl(*p++)) != NFS2_FHSIZE) {
dprintk("lockd: bad fhandle size %d (should be %d)\n",
len, NFS2_FHSIZE);
return NULL;
}
f->size = NFS2_FHSIZE;
memset(f->data, 0, sizeof(f->data));
memcpy(f->data, p, NFS2_FHSIZE);
return p + XDR_QUADLEN(NFS2_FHSIZE);
}
static inline __be32 *
nlm_encode_fh(__be32 *p, struct nfs_fh *f)
{
*p++ = htonl(NFS2_FHSIZE);
memcpy(p, f->data, NFS2_FHSIZE);
return p + XDR_QUADLEN(NFS2_FHSIZE);
}
/*
* Encode and decode owner handle
*/
static inline __be32 *
nlm_decode_oh(__be32 *p, struct xdr_netobj *oh)
{
return xdr_decode_netobj(p, oh);
}
static inline __be32 *
nlm_encode_oh(__be32 *p, struct xdr_netobj *oh)
{
return xdr_encode_netobj(p, oh);
}
static __be32 *
nlm_decode_lock(__be32 *p, struct nlm_lock *lock)
{
struct file_lock *fl = &lock->fl;
s32 start, len, end;
if (!(p = xdr_decode_string_inplace(p, &lock->caller,
&lock->len,
NLM_MAXSTRLEN))
|| !(p = nlm_decode_fh(p, &lock->fh))
|| !(p = nlm_decode_oh(p, &lock->oh)))
return NULL;
lock->svid = ntohl(*p++);
locks_init_lock(fl);
fl->fl_owner = current->files;
fl->fl_pid = (pid_t)lock->svid;
fl->fl_flags = FL_POSIX;
fl->fl_type = F_RDLCK; /* as good as anything else */
start = ntohl(*p++);
len = ntohl(*p++);
end = start + len - 1;
fl->fl_start = s32_to_loff_t(start);
if (len == 0 || end < 0)
fl->fl_end = OFFSET_MAX;
else
fl->fl_end = s32_to_loff_t(end);
return p;
}
/*
* Encode result of a TEST/TEST_MSG call
*/
static __be32 *
nlm_encode_testres(__be32 *p, struct nlm_res *resp)
{
s32 start, len;
if (!(p = nlm_encode_cookie(p, &resp->cookie)))
return NULL;
*p++ = resp->status;
if (resp->status == nlm_lck_denied) {
struct file_lock *fl = &resp->lock.fl;
*p++ = (fl->fl_type == F_RDLCK)? xdr_zero : xdr_one;
*p++ = htonl(resp->lock.svid);
/* Encode owner handle. */
if (!(p = xdr_encode_netobj(p, &resp->lock.oh)))
return NULL;
start = loff_t_to_s32(fl->fl_start);
if (fl->fl_end == OFFSET_MAX)
len = 0;
else
len = loff_t_to_s32(fl->fl_end - fl->fl_start + 1);
*p++ = htonl(start);
*p++ = htonl(len);
}
return p;
}
/*
* First, the server side XDR functions
*/
int
nlmsvc_decode_testargs(struct svc_rqst *rqstp, __be32 *p, nlm_args *argp)
{
u32 exclusive;
if (!(p = nlm_decode_cookie(p, &argp->cookie)))
return 0;
exclusive = ntohl(*p++);
if (!(p = nlm_decode_lock(p, &argp->lock)))
return 0;
if (exclusive)
argp->lock.fl.fl_type = F_WRLCK;
return xdr_argsize_check(rqstp, p);
}
int
nlmsvc_encode_testres(struct svc_rqst *rqstp, __be32 *p, struct nlm_res *resp)
{
if (!(p = nlm_encode_testres(p, resp)))
return 0;
return xdr_ressize_check(rqstp, p);
}
int
nlmsvc_decode_lockargs(struct svc_rqst *rqstp, __be32 *p, nlm_args *argp)
{
u32 exclusive;
if (!(p = nlm_decode_cookie(p, &argp->cookie)))
return 0;
argp->block = ntohl(*p++);
exclusive = ntohl(*p++);
if (!(p = nlm_decode_lock(p, &argp->lock)))
return 0;
if (exclusive)
argp->lock.fl.fl_type = F_WRLCK;
argp->reclaim = ntohl(*p++);
argp->state = ntohl(*p++);
argp->monitor = 1; /* monitor client by default */
return xdr_argsize_check(rqstp, p);
}
int
nlmsvc_decode_cancargs(struct svc_rqst *rqstp, __be32 *p, nlm_args *argp)
{
u32 exclusive;
if (!(p = nlm_decode_cookie(p, &argp->cookie)))
return 0;
argp->block = ntohl(*p++);
exclusive = ntohl(*p++);
if (!(p = nlm_decode_lock(p, &argp->lock)))
return 0;
if (exclusive)
argp->lock.fl.fl_type = F_WRLCK;
return xdr_argsize_check(rqstp, p);
}
int
nlmsvc_decode_unlockargs(struct svc_rqst *rqstp, __be32 *p, nlm_args *argp)
{
if (!(p = nlm_decode_cookie(p, &argp->cookie))
|| !(p = nlm_decode_lock(p, &argp->lock)))
return 0;
argp->lock.fl.fl_type = F_UNLCK;
return xdr_argsize_check(rqstp, p);
}
int
nlmsvc_decode_shareargs(struct svc_rqst *rqstp, __be32 *p, nlm_args *argp)
{
struct nlm_lock *lock = &argp->lock;
memset(lock, 0, sizeof(*lock));
locks_init_lock(&lock->fl);
lock->svid = ~(u32) 0;
lock->fl.fl_pid = (pid_t)lock->svid;
if (!(p = nlm_decode_cookie(p, &argp->cookie))
|| !(p = xdr_decode_string_inplace(p, &lock->caller,
&lock->len, NLM_MAXSTRLEN))
|| !(p = nlm_decode_fh(p, &lock->fh))
|| !(p = nlm_decode_oh(p, &lock->oh)))
return 0;
argp->fsm_mode = ntohl(*p++);
argp->fsm_access = ntohl(*p++);
return xdr_argsize_check(rqstp, p);
}
int
nlmsvc_encode_shareres(struct svc_rqst *rqstp, __be32 *p, struct nlm_res *resp)
{
if (!(p = nlm_encode_cookie(p, &resp->cookie)))
return 0;
*p++ = resp->status;
*p++ = xdr_zero; /* sequence argument */
return xdr_ressize_check(rqstp, p);
}
int
nlmsvc_encode_res(struct svc_rqst *rqstp, __be32 *p, struct nlm_res *resp)
{
if (!(p = nlm_encode_cookie(p, &resp->cookie)))
return 0;
*p++ = resp->status;
return xdr_ressize_check(rqstp, p);
}
int
nlmsvc_decode_notify(struct svc_rqst *rqstp, __be32 *p, struct nlm_args *argp)
{
struct nlm_lock *lock = &argp->lock;
if (!(p = xdr_decode_string_inplace(p, &lock->caller,
&lock->len, NLM_MAXSTRLEN)))
return 0;
argp->state = ntohl(*p++);
return xdr_argsize_check(rqstp, p);
}
int
nlmsvc_decode_reboot(struct svc_rqst *rqstp, __be32 *p, struct nlm_reboot *argp)
{
if (!(p = xdr_decode_string_inplace(p, &argp->mon, &argp->len, SM_MAXSTRLEN)))
return 0;
argp->state = ntohl(*p++);
memcpy(&argp->priv.data, p, sizeof(argp->priv.data));
p += XDR_QUADLEN(SM_PRIV_SIZE);
return xdr_argsize_check(rqstp, p);
}
int
nlmsvc_decode_res(struct svc_rqst *rqstp, __be32 *p, struct nlm_res *resp)
{
if (!(p = nlm_decode_cookie(p, &resp->cookie)))
return 0;
resp->status = *p++;
return xdr_argsize_check(rqstp, p);
}
int
nlmsvc_decode_void(struct svc_rqst *rqstp, __be32 *p, void *dummy)
{
return xdr_argsize_check(rqstp, p);
}
int
nlmsvc_encode_void(struct svc_rqst *rqstp, __be32 *p, void *dummy)
{
return xdr_ressize_check(rqstp, p);
}
| gpl-2.0 |
karandeepdps/ics_p690_kernel_2.35 | fs/afs/vnode.c | 13744 | 24687 | /* AFS vnode management
*
* Copyright (C) 2002, 2007 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/fs.h>
#include <linux/sched.h>
#include "internal.h"
#if 0
static noinline bool dump_tree_aux(struct rb_node *node, struct rb_node *parent,
int depth, char lr)
{
struct afs_vnode *vnode;
bool bad = false;
if (!node)
return false;
if (node->rb_left)
bad = dump_tree_aux(node->rb_left, node, depth + 2, '/');
vnode = rb_entry(node, struct afs_vnode, cb_promise);
_debug("%c %*.*s%c%p {%d}",
rb_is_red(node) ? 'R' : 'B',
depth, depth, "", lr,
vnode, vnode->cb_expires_at);
if (rb_parent(node) != parent) {
printk("BAD: %p != %p\n", rb_parent(node), parent);
bad = true;
}
if (node->rb_right)
bad |= dump_tree_aux(node->rb_right, node, depth + 2, '\\');
return bad;
}
static noinline void dump_tree(const char *name, struct afs_server *server)
{
_enter("%s", name);
if (dump_tree_aux(server->cb_promises.rb_node, NULL, 0, '-'))
BUG();
}
#endif
/*
* insert a vnode into the backing server's vnode tree
*/
static void afs_install_vnode(struct afs_vnode *vnode,
struct afs_server *server)
{
struct afs_server *old_server = vnode->server;
struct afs_vnode *xvnode;
struct rb_node *parent, **p;
_enter("%p,%p", vnode, server);
if (old_server) {
spin_lock(&old_server->fs_lock);
rb_erase(&vnode->server_rb, &old_server->fs_vnodes);
spin_unlock(&old_server->fs_lock);
}
afs_get_server(server);
vnode->server = server;
afs_put_server(old_server);
/* insert into the server's vnode tree in FID order */
spin_lock(&server->fs_lock);
parent = NULL;
p = &server->fs_vnodes.rb_node;
while (*p) {
parent = *p;
xvnode = rb_entry(parent, struct afs_vnode, server_rb);
if (vnode->fid.vid < xvnode->fid.vid)
p = &(*p)->rb_left;
else if (vnode->fid.vid > xvnode->fid.vid)
p = &(*p)->rb_right;
else if (vnode->fid.vnode < xvnode->fid.vnode)
p = &(*p)->rb_left;
else if (vnode->fid.vnode > xvnode->fid.vnode)
p = &(*p)->rb_right;
else if (vnode->fid.unique < xvnode->fid.unique)
p = &(*p)->rb_left;
else if (vnode->fid.unique > xvnode->fid.unique)
p = &(*p)->rb_right;
else
BUG(); /* can't happen unless afs_iget() malfunctions */
}
rb_link_node(&vnode->server_rb, parent, p);
rb_insert_color(&vnode->server_rb, &server->fs_vnodes);
spin_unlock(&server->fs_lock);
_leave("");
}
/*
* insert a vnode into the promising server's update/expiration tree
* - caller must hold vnode->lock
*/
static void afs_vnode_note_promise(struct afs_vnode *vnode,
struct afs_server *server)
{
struct afs_server *old_server;
struct afs_vnode *xvnode;
struct rb_node *parent, **p;
_enter("%p,%p", vnode, server);
ASSERT(server != NULL);
old_server = vnode->server;
if (vnode->cb_promised) {
if (server == old_server &&
vnode->cb_expires == vnode->cb_expires_at) {
_leave(" [no change]");
return;
}
spin_lock(&old_server->cb_lock);
if (vnode->cb_promised) {
_debug("delete");
rb_erase(&vnode->cb_promise, &old_server->cb_promises);
vnode->cb_promised = false;
}
spin_unlock(&old_server->cb_lock);
}
if (vnode->server != server)
afs_install_vnode(vnode, server);
vnode->cb_expires_at = vnode->cb_expires;
_debug("PROMISE on %p {%lu}",
vnode, (unsigned long) vnode->cb_expires_at);
/* abuse an RB-tree to hold the expiration order (we may have multiple
* items with the same expiration time) */
spin_lock(&server->cb_lock);
parent = NULL;
p = &server->cb_promises.rb_node;
while (*p) {
parent = *p;
xvnode = rb_entry(parent, struct afs_vnode, cb_promise);
if (vnode->cb_expires_at < xvnode->cb_expires_at)
p = &(*p)->rb_left;
else
p = &(*p)->rb_right;
}
rb_link_node(&vnode->cb_promise, parent, p);
rb_insert_color(&vnode->cb_promise, &server->cb_promises);
vnode->cb_promised = true;
spin_unlock(&server->cb_lock);
_leave("");
}
/*
* handle remote file deletion by discarding the callback promise
*/
static void afs_vnode_deleted_remotely(struct afs_vnode *vnode)
{
struct afs_server *server;
_enter("{%p}", vnode->server);
set_bit(AFS_VNODE_DELETED, &vnode->flags);
server = vnode->server;
if (server) {
if (vnode->cb_promised) {
spin_lock(&server->cb_lock);
if (vnode->cb_promised) {
rb_erase(&vnode->cb_promise,
&server->cb_promises);
vnode->cb_promised = false;
}
spin_unlock(&server->cb_lock);
}
spin_lock(&server->fs_lock);
rb_erase(&vnode->server_rb, &server->fs_vnodes);
spin_unlock(&server->fs_lock);
vnode->server = NULL;
afs_put_server(server);
} else {
ASSERT(!vnode->cb_promised);
}
_leave("");
}
/*
* finish off updating the recorded status of a file after a successful
* operation completion
* - starts callback expiry timer
* - adds to server's callback list
*/
void afs_vnode_finalise_status_update(struct afs_vnode *vnode,
struct afs_server *server)
{
struct afs_server *oldserver = NULL;
_enter("%p,%p", vnode, server);
spin_lock(&vnode->lock);
clear_bit(AFS_VNODE_CB_BROKEN, &vnode->flags);
afs_vnode_note_promise(vnode, server);
vnode->update_cnt--;
ASSERTCMP(vnode->update_cnt, >=, 0);
spin_unlock(&vnode->lock);
wake_up_all(&vnode->update_waitq);
afs_put_server(oldserver);
_leave("");
}
/*
* finish off updating the recorded status of a file after an operation failed
*/
static void afs_vnode_status_update_failed(struct afs_vnode *vnode, int ret)
{
_enter("{%x:%u},%d", vnode->fid.vid, vnode->fid.vnode, ret);
spin_lock(&vnode->lock);
clear_bit(AFS_VNODE_CB_BROKEN, &vnode->flags);
if (ret == -ENOENT) {
/* the file was deleted on the server */
_debug("got NOENT from server - marking file deleted");
afs_vnode_deleted_remotely(vnode);
}
vnode->update_cnt--;
ASSERTCMP(vnode->update_cnt, >=, 0);
spin_unlock(&vnode->lock);
wake_up_all(&vnode->update_waitq);
_leave("");
}
/*
* fetch file status from the volume
* - don't issue a fetch if:
* - the changed bit is not set and there's a valid callback
* - there are any outstanding ops that will fetch the status
* - TODO implement local caching
*/
int afs_vnode_fetch_status(struct afs_vnode *vnode,
struct afs_vnode *auth_vnode, struct key *key)
{
struct afs_server *server;
unsigned long acl_order;
int ret;
DECLARE_WAITQUEUE(myself, current);
_enter("%s,{%x:%u.%u}",
vnode->volume->vlocation->vldb.name,
vnode->fid.vid, vnode->fid.vnode, vnode->fid.unique);
if (!test_bit(AFS_VNODE_CB_BROKEN, &vnode->flags) &&
vnode->cb_promised) {
_leave(" [unchanged]");
return 0;
}
if (test_bit(AFS_VNODE_DELETED, &vnode->flags)) {
_leave(" [deleted]");
return -ENOENT;
}
acl_order = 0;
if (auth_vnode)
acl_order = auth_vnode->acl_order;
spin_lock(&vnode->lock);
if (!test_bit(AFS_VNODE_CB_BROKEN, &vnode->flags) &&
vnode->cb_promised) {
spin_unlock(&vnode->lock);
_leave(" [unchanged]");
return 0;
}
ASSERTCMP(vnode->update_cnt, >=, 0);
if (vnode->update_cnt > 0) {
/* someone else started a fetch */
_debug("wait on fetch %d", vnode->update_cnt);
set_current_state(TASK_UNINTERRUPTIBLE);
ASSERT(myself.func != NULL);
add_wait_queue(&vnode->update_waitq, &myself);
/* wait for the status to be updated */
for (;;) {
if (!test_bit(AFS_VNODE_CB_BROKEN, &vnode->flags))
break;
if (test_bit(AFS_VNODE_DELETED, &vnode->flags))
break;
/* check to see if it got updated and invalidated all
* before we saw it */
if (vnode->update_cnt == 0) {
remove_wait_queue(&vnode->update_waitq,
&myself);
set_current_state(TASK_RUNNING);
goto get_anyway;
}
spin_unlock(&vnode->lock);
schedule();
set_current_state(TASK_UNINTERRUPTIBLE);
spin_lock(&vnode->lock);
}
remove_wait_queue(&vnode->update_waitq, &myself);
spin_unlock(&vnode->lock);
set_current_state(TASK_RUNNING);
return test_bit(AFS_VNODE_DELETED, &vnode->flags) ?
-ENOENT : 0;
}
get_anyway:
/* okay... we're going to have to initiate the op */
vnode->update_cnt++;
spin_unlock(&vnode->lock);
/* merge AFS status fetches and clear outstanding callback on this
* vnode */
do {
/* pick a server to query */
server = afs_volume_pick_fileserver(vnode);
if (IS_ERR(server))
goto no_server;
_debug("USING SERVER: %p{%08x}",
server, ntohl(server->addr.s_addr));
ret = afs_fs_fetch_file_status(server, key, vnode, NULL,
&afs_sync_call);
} while (!afs_volume_release_fileserver(vnode, server, ret));
/* adjust the flags */
if (ret == 0) {
_debug("adjust");
if (auth_vnode)
afs_cache_permit(vnode, key, acl_order);
afs_vnode_finalise_status_update(vnode, server);
afs_put_server(server);
} else {
_debug("failed [%d]", ret);
afs_vnode_status_update_failed(vnode, ret);
}
ASSERTCMP(vnode->update_cnt, >=, 0);
_leave(" = %d [cnt %d]", ret, vnode->update_cnt);
return ret;
no_server:
spin_lock(&vnode->lock);
vnode->update_cnt--;
ASSERTCMP(vnode->update_cnt, >=, 0);
spin_unlock(&vnode->lock);
_leave(" = %ld [cnt %d]", PTR_ERR(server), vnode->update_cnt);
return PTR_ERR(server);
}
/*
* fetch file data from the volume
* - TODO implement caching
*/
int afs_vnode_fetch_data(struct afs_vnode *vnode, struct key *key,
off_t offset, size_t length, struct page *page)
{
struct afs_server *server;
int ret;
_enter("%s{%x:%u.%u},%x,,,",
vnode->volume->vlocation->vldb.name,
vnode->fid.vid,
vnode->fid.vnode,
vnode->fid.unique,
key_serial(key));
/* this op will fetch the status */
spin_lock(&vnode->lock);
vnode->update_cnt++;
spin_unlock(&vnode->lock);
/* merge in AFS status fetches and clear outstanding callback on this
* vnode */
do {
/* pick a server to query */
server = afs_volume_pick_fileserver(vnode);
if (IS_ERR(server))
goto no_server;
_debug("USING SERVER: %08x\n", ntohl(server->addr.s_addr));
ret = afs_fs_fetch_data(server, key, vnode, offset, length,
page, &afs_sync_call);
} while (!afs_volume_release_fileserver(vnode, server, ret));
/* adjust the flags */
if (ret == 0) {
afs_vnode_finalise_status_update(vnode, server);
afs_put_server(server);
} else {
afs_vnode_status_update_failed(vnode, ret);
}
_leave(" = %d", ret);
return ret;
no_server:
spin_lock(&vnode->lock);
vnode->update_cnt--;
ASSERTCMP(vnode->update_cnt, >=, 0);
spin_unlock(&vnode->lock);
return PTR_ERR(server);
}
/*
* make a file or a directory
*/
int afs_vnode_create(struct afs_vnode *vnode, struct key *key,
const char *name, umode_t mode, struct afs_fid *newfid,
struct afs_file_status *newstatus,
struct afs_callback *newcb, struct afs_server **_server)
{
struct afs_server *server;
int ret;
_enter("%s{%x:%u.%u},%x,%s,,",
vnode->volume->vlocation->vldb.name,
vnode->fid.vid,
vnode->fid.vnode,
vnode->fid.unique,
key_serial(key),
name);
/* this op will fetch the status on the directory we're creating in */
spin_lock(&vnode->lock);
vnode->update_cnt++;
spin_unlock(&vnode->lock);
do {
/* pick a server to query */
server = afs_volume_pick_fileserver(vnode);
if (IS_ERR(server))
goto no_server;
_debug("USING SERVER: %08x\n", ntohl(server->addr.s_addr));
ret = afs_fs_create(server, key, vnode, name, mode, newfid,
newstatus, newcb, &afs_sync_call);
} while (!afs_volume_release_fileserver(vnode, server, ret));
/* adjust the flags */
if (ret == 0) {
afs_vnode_finalise_status_update(vnode, server);
*_server = server;
} else {
afs_vnode_status_update_failed(vnode, ret);
*_server = NULL;
}
_leave(" = %d [cnt %d]", ret, vnode->update_cnt);
return ret;
no_server:
spin_lock(&vnode->lock);
vnode->update_cnt--;
ASSERTCMP(vnode->update_cnt, >=, 0);
spin_unlock(&vnode->lock);
_leave(" = %ld [cnt %d]", PTR_ERR(server), vnode->update_cnt);
return PTR_ERR(server);
}
/*
* remove a file or directory
*/
int afs_vnode_remove(struct afs_vnode *vnode, struct key *key, const char *name,
bool isdir)
{
struct afs_server *server;
int ret;
_enter("%s{%x:%u.%u},%x,%s",
vnode->volume->vlocation->vldb.name,
vnode->fid.vid,
vnode->fid.vnode,
vnode->fid.unique,
key_serial(key),
name);
/* this op will fetch the status on the directory we're removing from */
spin_lock(&vnode->lock);
vnode->update_cnt++;
spin_unlock(&vnode->lock);
do {
/* pick a server to query */
server = afs_volume_pick_fileserver(vnode);
if (IS_ERR(server))
goto no_server;
_debug("USING SERVER: %08x\n", ntohl(server->addr.s_addr));
ret = afs_fs_remove(server, key, vnode, name, isdir,
&afs_sync_call);
} while (!afs_volume_release_fileserver(vnode, server, ret));
/* adjust the flags */
if (ret == 0) {
afs_vnode_finalise_status_update(vnode, server);
afs_put_server(server);
} else {
afs_vnode_status_update_failed(vnode, ret);
}
_leave(" = %d [cnt %d]", ret, vnode->update_cnt);
return ret;
no_server:
spin_lock(&vnode->lock);
vnode->update_cnt--;
ASSERTCMP(vnode->update_cnt, >=, 0);
spin_unlock(&vnode->lock);
_leave(" = %ld [cnt %d]", PTR_ERR(server), vnode->update_cnt);
return PTR_ERR(server);
}
/*
* create a hard link
*/
int afs_vnode_link(struct afs_vnode *dvnode, struct afs_vnode *vnode,
struct key *key, const char *name)
{
struct afs_server *server;
int ret;
_enter("%s{%x:%u.%u},%s{%x:%u.%u},%x,%s",
dvnode->volume->vlocation->vldb.name,
dvnode->fid.vid,
dvnode->fid.vnode,
dvnode->fid.unique,
vnode->volume->vlocation->vldb.name,
vnode->fid.vid,
vnode->fid.vnode,
vnode->fid.unique,
key_serial(key),
name);
/* this op will fetch the status on the directory we're removing from */
spin_lock(&vnode->lock);
vnode->update_cnt++;
spin_unlock(&vnode->lock);
spin_lock(&dvnode->lock);
dvnode->update_cnt++;
spin_unlock(&dvnode->lock);
do {
/* pick a server to query */
server = afs_volume_pick_fileserver(dvnode);
if (IS_ERR(server))
goto no_server;
_debug("USING SERVER: %08x\n", ntohl(server->addr.s_addr));
ret = afs_fs_link(server, key, dvnode, vnode, name,
&afs_sync_call);
} while (!afs_volume_release_fileserver(dvnode, server, ret));
/* adjust the flags */
if (ret == 0) {
afs_vnode_finalise_status_update(vnode, server);
afs_vnode_finalise_status_update(dvnode, server);
afs_put_server(server);
} else {
afs_vnode_status_update_failed(vnode, ret);
afs_vnode_status_update_failed(dvnode, ret);
}
_leave(" = %d [cnt %d]", ret, vnode->update_cnt);
return ret;
no_server:
spin_lock(&vnode->lock);
vnode->update_cnt--;
ASSERTCMP(vnode->update_cnt, >=, 0);
spin_unlock(&vnode->lock);
spin_lock(&dvnode->lock);
dvnode->update_cnt--;
ASSERTCMP(dvnode->update_cnt, >=, 0);
spin_unlock(&dvnode->lock);
_leave(" = %ld [cnt %d]", PTR_ERR(server), vnode->update_cnt);
return PTR_ERR(server);
}
/*
* create a symbolic link
*/
int afs_vnode_symlink(struct afs_vnode *vnode, struct key *key,
const char *name, const char *content,
struct afs_fid *newfid,
struct afs_file_status *newstatus,
struct afs_server **_server)
{
struct afs_server *server;
int ret;
_enter("%s{%x:%u.%u},%x,%s,%s,,,",
vnode->volume->vlocation->vldb.name,
vnode->fid.vid,
vnode->fid.vnode,
vnode->fid.unique,
key_serial(key),
name, content);
/* this op will fetch the status on the directory we're creating in */
spin_lock(&vnode->lock);
vnode->update_cnt++;
spin_unlock(&vnode->lock);
do {
/* pick a server to query */
server = afs_volume_pick_fileserver(vnode);
if (IS_ERR(server))
goto no_server;
_debug("USING SERVER: %08x\n", ntohl(server->addr.s_addr));
ret = afs_fs_symlink(server, key, vnode, name, content,
newfid, newstatus, &afs_sync_call);
} while (!afs_volume_release_fileserver(vnode, server, ret));
/* adjust the flags */
if (ret == 0) {
afs_vnode_finalise_status_update(vnode, server);
*_server = server;
} else {
afs_vnode_status_update_failed(vnode, ret);
*_server = NULL;
}
_leave(" = %d [cnt %d]", ret, vnode->update_cnt);
return ret;
no_server:
spin_lock(&vnode->lock);
vnode->update_cnt--;
ASSERTCMP(vnode->update_cnt, >=, 0);
spin_unlock(&vnode->lock);
_leave(" = %ld [cnt %d]", PTR_ERR(server), vnode->update_cnt);
return PTR_ERR(server);
}
/*
* rename a file
*/
int afs_vnode_rename(struct afs_vnode *orig_dvnode,
struct afs_vnode *new_dvnode,
struct key *key,
const char *orig_name,
const char *new_name)
{
struct afs_server *server;
int ret;
_enter("%s{%x:%u.%u},%s{%u,%u,%u},%x,%s,%s",
orig_dvnode->volume->vlocation->vldb.name,
orig_dvnode->fid.vid,
orig_dvnode->fid.vnode,
orig_dvnode->fid.unique,
new_dvnode->volume->vlocation->vldb.name,
new_dvnode->fid.vid,
new_dvnode->fid.vnode,
new_dvnode->fid.unique,
key_serial(key),
orig_name,
new_name);
/* this op will fetch the status on both the directories we're dealing
* with */
spin_lock(&orig_dvnode->lock);
orig_dvnode->update_cnt++;
spin_unlock(&orig_dvnode->lock);
if (new_dvnode != orig_dvnode) {
spin_lock(&new_dvnode->lock);
new_dvnode->update_cnt++;
spin_unlock(&new_dvnode->lock);
}
do {
/* pick a server to query */
server = afs_volume_pick_fileserver(orig_dvnode);
if (IS_ERR(server))
goto no_server;
_debug("USING SERVER: %08x\n", ntohl(server->addr.s_addr));
ret = afs_fs_rename(server, key, orig_dvnode, orig_name,
new_dvnode, new_name, &afs_sync_call);
} while (!afs_volume_release_fileserver(orig_dvnode, server, ret));
/* adjust the flags */
if (ret == 0) {
afs_vnode_finalise_status_update(orig_dvnode, server);
if (new_dvnode != orig_dvnode)
afs_vnode_finalise_status_update(new_dvnode, server);
afs_put_server(server);
} else {
afs_vnode_status_update_failed(orig_dvnode, ret);
if (new_dvnode != orig_dvnode)
afs_vnode_status_update_failed(new_dvnode, ret);
}
_leave(" = %d [cnt %d]", ret, orig_dvnode->update_cnt);
return ret;
no_server:
spin_lock(&orig_dvnode->lock);
orig_dvnode->update_cnt--;
ASSERTCMP(orig_dvnode->update_cnt, >=, 0);
spin_unlock(&orig_dvnode->lock);
if (new_dvnode != orig_dvnode) {
spin_lock(&new_dvnode->lock);
new_dvnode->update_cnt--;
ASSERTCMP(new_dvnode->update_cnt, >=, 0);
spin_unlock(&new_dvnode->lock);
}
_leave(" = %ld [cnt %d]", PTR_ERR(server), orig_dvnode->update_cnt);
return PTR_ERR(server);
}
/*
* write to a file
*/
int afs_vnode_store_data(struct afs_writeback *wb, pgoff_t first, pgoff_t last,
unsigned offset, unsigned to)
{
struct afs_server *server;
struct afs_vnode *vnode = wb->vnode;
int ret;
_enter("%s{%x:%u.%u},%x,%lx,%lx,%x,%x",
vnode->volume->vlocation->vldb.name,
vnode->fid.vid,
vnode->fid.vnode,
vnode->fid.unique,
key_serial(wb->key),
first, last, offset, to);
/* this op will fetch the status */
spin_lock(&vnode->lock);
vnode->update_cnt++;
spin_unlock(&vnode->lock);
do {
/* pick a server to query */
server = afs_volume_pick_fileserver(vnode);
if (IS_ERR(server))
goto no_server;
_debug("USING SERVER: %08x\n", ntohl(server->addr.s_addr));
ret = afs_fs_store_data(server, wb, first, last, offset, to,
&afs_sync_call);
} while (!afs_volume_release_fileserver(vnode, server, ret));
/* adjust the flags */
if (ret == 0) {
afs_vnode_finalise_status_update(vnode, server);
afs_put_server(server);
} else {
afs_vnode_status_update_failed(vnode, ret);
}
_leave(" = %d", ret);
return ret;
no_server:
spin_lock(&vnode->lock);
vnode->update_cnt--;
ASSERTCMP(vnode->update_cnt, >=, 0);
spin_unlock(&vnode->lock);
return PTR_ERR(server);
}
/*
* set the attributes on a file
*/
int afs_vnode_setattr(struct afs_vnode *vnode, struct key *key,
struct iattr *attr)
{
struct afs_server *server;
int ret;
_enter("%s{%x:%u.%u},%x",
vnode->volume->vlocation->vldb.name,
vnode->fid.vid,
vnode->fid.vnode,
vnode->fid.unique,
key_serial(key));
/* this op will fetch the status */
spin_lock(&vnode->lock);
vnode->update_cnt++;
spin_unlock(&vnode->lock);
do {
/* pick a server to query */
server = afs_volume_pick_fileserver(vnode);
if (IS_ERR(server))
goto no_server;
_debug("USING SERVER: %08x\n", ntohl(server->addr.s_addr));
ret = afs_fs_setattr(server, key, vnode, attr, &afs_sync_call);
} while (!afs_volume_release_fileserver(vnode, server, ret));
/* adjust the flags */
if (ret == 0) {
afs_vnode_finalise_status_update(vnode, server);
afs_put_server(server);
} else {
afs_vnode_status_update_failed(vnode, ret);
}
_leave(" = %d", ret);
return ret;
no_server:
spin_lock(&vnode->lock);
vnode->update_cnt--;
ASSERTCMP(vnode->update_cnt, >=, 0);
spin_unlock(&vnode->lock);
return PTR_ERR(server);
}
/*
* get the status of a volume
*/
int afs_vnode_get_volume_status(struct afs_vnode *vnode, struct key *key,
struct afs_volume_status *vs)
{
struct afs_server *server;
int ret;
_enter("%s{%x:%u.%u},%x,",
vnode->volume->vlocation->vldb.name,
vnode->fid.vid,
vnode->fid.vnode,
vnode->fid.unique,
key_serial(key));
do {
/* pick a server to query */
server = afs_volume_pick_fileserver(vnode);
if (IS_ERR(server))
goto no_server;
_debug("USING SERVER: %08x\n", ntohl(server->addr.s_addr));
ret = afs_fs_get_volume_status(server, key, vnode, vs, &afs_sync_call);
} while (!afs_volume_release_fileserver(vnode, server, ret));
/* adjust the flags */
if (ret == 0)
afs_put_server(server);
_leave(" = %d", ret);
return ret;
no_server:
return PTR_ERR(server);
}
/*
* get a lock on a file
*/
int afs_vnode_set_lock(struct afs_vnode *vnode, struct key *key,
afs_lock_type_t type)
{
struct afs_server *server;
int ret;
_enter("%s{%x:%u.%u},%x,%u",
vnode->volume->vlocation->vldb.name,
vnode->fid.vid,
vnode->fid.vnode,
vnode->fid.unique,
key_serial(key), type);
do {
/* pick a server to query */
server = afs_volume_pick_fileserver(vnode);
if (IS_ERR(server))
goto no_server;
_debug("USING SERVER: %08x\n", ntohl(server->addr.s_addr));
ret = afs_fs_set_lock(server, key, vnode, type, &afs_sync_call);
} while (!afs_volume_release_fileserver(vnode, server, ret));
/* adjust the flags */
if (ret == 0)
afs_put_server(server);
_leave(" = %d", ret);
return ret;
no_server:
return PTR_ERR(server);
}
/*
* extend a lock on a file
*/
int afs_vnode_extend_lock(struct afs_vnode *vnode, struct key *key)
{
struct afs_server *server;
int ret;
_enter("%s{%x:%u.%u},%x",
vnode->volume->vlocation->vldb.name,
vnode->fid.vid,
vnode->fid.vnode,
vnode->fid.unique,
key_serial(key));
do {
/* pick a server to query */
server = afs_volume_pick_fileserver(vnode);
if (IS_ERR(server))
goto no_server;
_debug("USING SERVER: %08x\n", ntohl(server->addr.s_addr));
ret = afs_fs_extend_lock(server, key, vnode, &afs_sync_call);
} while (!afs_volume_release_fileserver(vnode, server, ret));
/* adjust the flags */
if (ret == 0)
afs_put_server(server);
_leave(" = %d", ret);
return ret;
no_server:
return PTR_ERR(server);
}
/*
* release a lock on a file
*/
int afs_vnode_release_lock(struct afs_vnode *vnode, struct key *key)
{
struct afs_server *server;
int ret;
_enter("%s{%x:%u.%u},%x",
vnode->volume->vlocation->vldb.name,
vnode->fid.vid,
vnode->fid.vnode,
vnode->fid.unique,
key_serial(key));
do {
/* pick a server to query */
server = afs_volume_pick_fileserver(vnode);
if (IS_ERR(server))
goto no_server;
_debug("USING SERVER: %08x\n", ntohl(server->addr.s_addr));
ret = afs_fs_release_lock(server, key, vnode, &afs_sync_call);
} while (!afs_volume_release_fileserver(vnode, server, ret));
/* adjust the flags */
if (ret == 0)
afs_put_server(server);
_leave(" = %d", ret);
return ret;
no_server:
return PTR_ERR(server);
}
| gpl-2.0 |
kvinodhbabu/linux | drivers/scsi/mpt2sas/mpt2sas_transport.c | 177 | 66103 | /*
* SAS Transport Layer for MPT (Message Passing Technology) based controllers
*
* This code is based on drivers/scsi/mpt2sas/mpt2_transport.c
* Copyright (C) 2007-2014 LSI Corporation
* Copyright (C) 20013-2014 Avago Technologies
* (mailto: MPT-FusionLinux.pdl@avagotech.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* NO WARRANTY
* THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
* CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
* LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
* solely responsible for determining the appropriateness of using and
* distributing the Program and assumes all risks associated with its
* exercise of rights under this Agreement, including but not limited to
* the risks and costs of program errors, damage to or loss of data,
* programs or equipment, and unavailability or interruption of operations.
* DISCLAIMER OF LIABILITY
* NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
* TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
* USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
* HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
* USA.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/sched.h>
#include <linux/workqueue.h>
#include <linux/delay.h>
#include <linux/pci.h>
#include <linux/slab.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_transport_sas.h>
#include <scsi/scsi_dbg.h>
#include "mpt2sas_base.h"
/**
* _transport_sas_node_find_by_sas_address - sas node search
* @ioc: per adapter object
* @sas_address: sas address of expander or sas host
* Context: Calling function should acquire ioc->sas_node_lock.
*
* Search for either hba phys or expander device based on handle, then returns
* the sas_node object.
*/
static struct _sas_node *
_transport_sas_node_find_by_sas_address(struct MPT2SAS_ADAPTER *ioc,
u64 sas_address)
{
if (ioc->sas_hba.sas_address == sas_address)
return &ioc->sas_hba;
else
return mpt2sas_scsih_expander_find_by_sas_address(ioc,
sas_address);
}
/**
* _transport_convert_phy_link_rate -
* @link_rate: link rate returned from mpt firmware
*
* Convert link_rate from mpi fusion into sas_transport form.
*/
static enum sas_linkrate
_transport_convert_phy_link_rate(u8 link_rate)
{
enum sas_linkrate rc;
switch (link_rate) {
case MPI2_SAS_NEG_LINK_RATE_1_5:
rc = SAS_LINK_RATE_1_5_GBPS;
break;
case MPI2_SAS_NEG_LINK_RATE_3_0:
rc = SAS_LINK_RATE_3_0_GBPS;
break;
case MPI2_SAS_NEG_LINK_RATE_6_0:
rc = SAS_LINK_RATE_6_0_GBPS;
break;
case MPI2_SAS_NEG_LINK_RATE_PHY_DISABLED:
rc = SAS_PHY_DISABLED;
break;
case MPI2_SAS_NEG_LINK_RATE_NEGOTIATION_FAILED:
rc = SAS_LINK_RATE_FAILED;
break;
case MPI2_SAS_NEG_LINK_RATE_PORT_SELECTOR:
rc = SAS_SATA_PORT_SELECTOR;
break;
case MPI2_SAS_NEG_LINK_RATE_SMP_RESET_IN_PROGRESS:
rc = SAS_PHY_RESET_IN_PROGRESS;
break;
default:
case MPI2_SAS_NEG_LINK_RATE_SATA_OOB_COMPLETE:
case MPI2_SAS_NEG_LINK_RATE_UNKNOWN_LINK_RATE:
rc = SAS_LINK_RATE_UNKNOWN;
break;
}
return rc;
}
/**
* _transport_set_identify - set identify for phys and end devices
* @ioc: per adapter object
* @handle: device handle
* @identify: sas identify info
*
* Populates sas identify info.
*
* Returns 0 for success, non-zero for failure.
*/
static int
_transport_set_identify(struct MPT2SAS_ADAPTER *ioc, u16 handle,
struct sas_identify *identify)
{
Mpi2SasDevicePage0_t sas_device_pg0;
Mpi2ConfigReply_t mpi_reply;
u32 device_info;
u32 ioc_status;
if (ioc->shost_recovery || ioc->pci_error_recovery) {
printk(MPT2SAS_INFO_FMT "%s: host reset in progress!\n",
__func__, ioc->name);
return -EFAULT;
}
if ((mpt2sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
ioc->name, __FILE__, __LINE__, __func__);
return -ENXIO;
}
ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
MPI2_IOCSTATUS_MASK;
if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
printk(MPT2SAS_ERR_FMT "handle(0x%04x), ioc_status(0x%04x)"
"\nfailure at %s:%d/%s()!\n", ioc->name, handle, ioc_status,
__FILE__, __LINE__, __func__);
return -EIO;
}
memset(identify, 0, sizeof(struct sas_identify));
device_info = le32_to_cpu(sas_device_pg0.DeviceInfo);
/* sas_address */
identify->sas_address = le64_to_cpu(sas_device_pg0.SASAddress);
/* phy number of the parent device this device is linked to */
identify->phy_identifier = sas_device_pg0.PhyNum;
/* device_type */
switch (device_info & MPI2_SAS_DEVICE_INFO_MASK_DEVICE_TYPE) {
case MPI2_SAS_DEVICE_INFO_NO_DEVICE:
identify->device_type = SAS_PHY_UNUSED;
break;
case MPI2_SAS_DEVICE_INFO_END_DEVICE:
identify->device_type = SAS_END_DEVICE;
break;
case MPI2_SAS_DEVICE_INFO_EDGE_EXPANDER:
identify->device_type = SAS_EDGE_EXPANDER_DEVICE;
break;
case MPI2_SAS_DEVICE_INFO_FANOUT_EXPANDER:
identify->device_type = SAS_FANOUT_EXPANDER_DEVICE;
break;
}
/* initiator_port_protocols */
if (device_info & MPI2_SAS_DEVICE_INFO_SSP_INITIATOR)
identify->initiator_port_protocols |= SAS_PROTOCOL_SSP;
if (device_info & MPI2_SAS_DEVICE_INFO_STP_INITIATOR)
identify->initiator_port_protocols |= SAS_PROTOCOL_STP;
if (device_info & MPI2_SAS_DEVICE_INFO_SMP_INITIATOR)
identify->initiator_port_protocols |= SAS_PROTOCOL_SMP;
if (device_info & MPI2_SAS_DEVICE_INFO_SATA_HOST)
identify->initiator_port_protocols |= SAS_PROTOCOL_SATA;
/* target_port_protocols */
if (device_info & MPI2_SAS_DEVICE_INFO_SSP_TARGET)
identify->target_port_protocols |= SAS_PROTOCOL_SSP;
if (device_info & MPI2_SAS_DEVICE_INFO_STP_TARGET)
identify->target_port_protocols |= SAS_PROTOCOL_STP;
if (device_info & MPI2_SAS_DEVICE_INFO_SMP_TARGET)
identify->target_port_protocols |= SAS_PROTOCOL_SMP;
if (device_info & MPI2_SAS_DEVICE_INFO_SATA_DEVICE)
identify->target_port_protocols |= SAS_PROTOCOL_SATA;
return 0;
}
/**
* mpt2sas_transport_done - internal transport layer callback handler.
* @ioc: per adapter object
* @smid: system request message index
* @msix_index: MSIX table index supplied by the OS
* @reply: reply message frame(lower 32bit addr)
*
* Callback handler when sending internal generated transport cmds.
* The callback index passed is `ioc->transport_cb_idx`
*
* Return 1 meaning mf should be freed from _base_interrupt
* 0 means the mf is freed from this function.
*/
u8
mpt2sas_transport_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
u32 reply)
{
MPI2DefaultReply_t *mpi_reply;
mpi_reply = mpt2sas_base_get_reply_virt_addr(ioc, reply);
if (ioc->transport_cmds.status == MPT2_CMD_NOT_USED)
return 1;
if (ioc->transport_cmds.smid != smid)
return 1;
ioc->transport_cmds.status |= MPT2_CMD_COMPLETE;
if (mpi_reply) {
memcpy(ioc->transport_cmds.reply, mpi_reply,
mpi_reply->MsgLength*4);
ioc->transport_cmds.status |= MPT2_CMD_REPLY_VALID;
}
ioc->transport_cmds.status &= ~MPT2_CMD_PENDING;
complete(&ioc->transport_cmds.done);
return 1;
}
/* report manufacture request structure */
struct rep_manu_request{
u8 smp_frame_type;
u8 function;
u8 reserved;
u8 request_length;
};
/* report manufacture reply structure */
struct rep_manu_reply{
u8 smp_frame_type; /* 0x41 */
u8 function; /* 0x01 */
u8 function_result;
u8 response_length;
u16 expander_change_count;
u8 reserved0[2];
u8 sas_format;
u8 reserved2[3];
u8 vendor_id[SAS_EXPANDER_VENDOR_ID_LEN];
u8 product_id[SAS_EXPANDER_PRODUCT_ID_LEN];
u8 product_rev[SAS_EXPANDER_PRODUCT_REV_LEN];
u8 component_vendor_id[SAS_EXPANDER_COMPONENT_VENDOR_ID_LEN];
u16 component_id;
u8 component_revision_id;
u8 reserved3;
u8 vendor_specific[8];
};
/**
* _transport_expander_report_manufacture - obtain SMP report_manufacture
* @ioc: per adapter object
* @sas_address: expander sas address
* @edev: the sas_expander_device object
*
* Fills in the sas_expander_device object when SMP port is created.
*
* Returns 0 for success, non-zero for failure.
*/
static int
_transport_expander_report_manufacture(struct MPT2SAS_ADAPTER *ioc,
u64 sas_address, struct sas_expander_device *edev)
{
Mpi2SmpPassthroughRequest_t *mpi_request;
Mpi2SmpPassthroughReply_t *mpi_reply;
struct rep_manu_reply *manufacture_reply;
struct rep_manu_request *manufacture_request;
int rc;
u16 smid;
u32 ioc_state;
unsigned long timeleft;
void *psge;
u32 sgl_flags;
u8 issue_reset = 0;
void *data_out = NULL;
dma_addr_t data_out_dma;
u32 sz;
u16 wait_state_count;
if (ioc->shost_recovery || ioc->pci_error_recovery) {
printk(MPT2SAS_INFO_FMT "%s: host reset in progress!\n",
__func__, ioc->name);
return -EFAULT;
}
mutex_lock(&ioc->transport_cmds.mutex);
if (ioc->transport_cmds.status != MPT2_CMD_NOT_USED) {
printk(MPT2SAS_ERR_FMT "%s: transport_cmds in use\n",
ioc->name, __func__);
rc = -EAGAIN;
goto out;
}
ioc->transport_cmds.status = MPT2_CMD_PENDING;
wait_state_count = 0;
ioc_state = mpt2sas_base_get_iocstate(ioc, 1);
while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
if (wait_state_count++ == 10) {
printk(MPT2SAS_ERR_FMT
"%s: failed due to ioc not operational\n",
ioc->name, __func__);
rc = -EFAULT;
goto out;
}
ssleep(1);
ioc_state = mpt2sas_base_get_iocstate(ioc, 1);
printk(MPT2SAS_INFO_FMT "%s: waiting for "
"operational state(count=%d)\n", ioc->name,
__func__, wait_state_count);
}
if (wait_state_count)
printk(MPT2SAS_INFO_FMT "%s: ioc is operational\n",
ioc->name, __func__);
smid = mpt2sas_base_get_smid(ioc, ioc->transport_cb_idx);
if (!smid) {
printk(MPT2SAS_ERR_FMT "%s: failed obtaining a smid\n",
ioc->name, __func__);
rc = -EAGAIN;
goto out;
}
rc = 0;
mpi_request = mpt2sas_base_get_msg_frame(ioc, smid);
ioc->transport_cmds.smid = smid;
sz = sizeof(struct rep_manu_request) + sizeof(struct rep_manu_reply);
data_out = pci_alloc_consistent(ioc->pdev, sz, &data_out_dma);
if (!data_out) {
printk(KERN_ERR "failure at %s:%d/%s()!\n", __FILE__,
__LINE__, __func__);
rc = -ENOMEM;
mpt2sas_base_free_smid(ioc, smid);
goto out;
}
manufacture_request = data_out;
manufacture_request->smp_frame_type = 0x40;
manufacture_request->function = 1;
manufacture_request->reserved = 0;
manufacture_request->request_length = 0;
memset(mpi_request, 0, sizeof(Mpi2SmpPassthroughRequest_t));
mpi_request->Function = MPI2_FUNCTION_SMP_PASSTHROUGH;
mpi_request->PhysicalPort = 0xFF;
mpi_request->VF_ID = 0; /* TODO */
mpi_request->VP_ID = 0;
mpi_request->SASAddress = cpu_to_le64(sas_address);
mpi_request->RequestDataLength =
cpu_to_le16(sizeof(struct rep_manu_request));
psge = &mpi_request->SGL;
/* WRITE sgel first */
sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_HOST_TO_IOC);
sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
ioc->base_add_sg_single(psge, sgl_flags |
sizeof(struct rep_manu_request), data_out_dma);
/* incr sgel */
psge += ioc->sge_size;
/* READ sgel last */
sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER |
MPI2_SGE_FLAGS_END_OF_LIST);
sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
ioc->base_add_sg_single(psge, sgl_flags |
sizeof(struct rep_manu_reply), data_out_dma +
sizeof(struct rep_manu_request));
dtransportprintk(ioc, printk(MPT2SAS_INFO_FMT "report_manufacture - "
"send to sas_addr(0x%016llx)\n", ioc->name,
(unsigned long long)sas_address));
init_completion(&ioc->transport_cmds.done);
mpt2sas_base_put_smid_default(ioc, smid);
timeleft = wait_for_completion_timeout(&ioc->transport_cmds.done,
10*HZ);
if (!(ioc->transport_cmds.status & MPT2_CMD_COMPLETE)) {
printk(MPT2SAS_ERR_FMT "%s: timeout\n",
ioc->name, __func__);
_debug_dump_mf(mpi_request,
sizeof(Mpi2SmpPassthroughRequest_t)/4);
if (!(ioc->transport_cmds.status & MPT2_CMD_RESET))
issue_reset = 1;
goto issue_host_reset;
}
dtransportprintk(ioc, printk(MPT2SAS_INFO_FMT "report_manufacture - "
"complete\n", ioc->name));
if (ioc->transport_cmds.status & MPT2_CMD_REPLY_VALID) {
u8 *tmp;
mpi_reply = ioc->transport_cmds.reply;
dtransportprintk(ioc, printk(MPT2SAS_INFO_FMT
"report_manufacture - reply data transfer size(%d)\n",
ioc->name, le16_to_cpu(mpi_reply->ResponseDataLength)));
if (le16_to_cpu(mpi_reply->ResponseDataLength) !=
sizeof(struct rep_manu_reply))
goto out;
manufacture_reply = data_out + sizeof(struct rep_manu_request);
strncpy(edev->vendor_id, manufacture_reply->vendor_id,
SAS_EXPANDER_VENDOR_ID_LEN);
strncpy(edev->product_id, manufacture_reply->product_id,
SAS_EXPANDER_PRODUCT_ID_LEN);
strncpy(edev->product_rev, manufacture_reply->product_rev,
SAS_EXPANDER_PRODUCT_REV_LEN);
edev->level = manufacture_reply->sas_format & 1;
if (edev->level) {
strncpy(edev->component_vendor_id,
manufacture_reply->component_vendor_id,
SAS_EXPANDER_COMPONENT_VENDOR_ID_LEN);
tmp = (u8 *)&manufacture_reply->component_id;
edev->component_id = tmp[0] << 8 | tmp[1];
edev->component_revision_id =
manufacture_reply->component_revision_id;
}
} else
dtransportprintk(ioc, printk(MPT2SAS_INFO_FMT
"report_manufacture - no reply\n", ioc->name));
issue_host_reset:
if (issue_reset)
mpt2sas_base_hard_reset_handler(ioc, CAN_SLEEP,
FORCE_BIG_HAMMER);
out:
ioc->transport_cmds.status = MPT2_CMD_NOT_USED;
if (data_out)
pci_free_consistent(ioc->pdev, sz, data_out, data_out_dma);
mutex_unlock(&ioc->transport_cmds.mutex);
return rc;
}
/**
* _transport_delete_port - helper function to removing a port
* @ioc: per adapter object
* @mpt2sas_port: mpt2sas per port object
*
* Returns nothing.
*/
static void
_transport_delete_port(struct MPT2SAS_ADAPTER *ioc,
struct _sas_port *mpt2sas_port)
{
u64 sas_address = mpt2sas_port->remote_identify.sas_address;
enum sas_device_type device_type =
mpt2sas_port->remote_identify.device_type;
dev_printk(KERN_INFO, &mpt2sas_port->port->dev,
"remove: sas_addr(0x%016llx)\n",
(unsigned long long) sas_address);
ioc->logging_level |= MPT_DEBUG_TRANSPORT;
if (device_type == SAS_END_DEVICE)
mpt2sas_device_remove_by_sas_address(ioc, sas_address);
else if (device_type == SAS_EDGE_EXPANDER_DEVICE ||
device_type == SAS_FANOUT_EXPANDER_DEVICE)
mpt2sas_expander_remove(ioc, sas_address);
ioc->logging_level &= ~MPT_DEBUG_TRANSPORT;
}
/**
* _transport_delete_phy - helper function to removing single phy from port
* @ioc: per adapter object
* @mpt2sas_port: mpt2sas per port object
* @mpt2sas_phy: mpt2sas per phy object
*
* Returns nothing.
*/
static void
_transport_delete_phy(struct MPT2SAS_ADAPTER *ioc,
struct _sas_port *mpt2sas_port, struct _sas_phy *mpt2sas_phy)
{
u64 sas_address = mpt2sas_port->remote_identify.sas_address;
dev_printk(KERN_INFO, &mpt2sas_phy->phy->dev,
"remove: sas_addr(0x%016llx), phy(%d)\n",
(unsigned long long) sas_address, mpt2sas_phy->phy_id);
list_del(&mpt2sas_phy->port_siblings);
mpt2sas_port->num_phys--;
sas_port_delete_phy(mpt2sas_port->port, mpt2sas_phy->phy);
mpt2sas_phy->phy_belongs_to_port = 0;
}
/**
* _transport_add_phy - helper function to adding single phy to port
* @ioc: per adapter object
* @mpt2sas_port: mpt2sas per port object
* @mpt2sas_phy: mpt2sas per phy object
*
* Returns nothing.
*/
static void
_transport_add_phy(struct MPT2SAS_ADAPTER *ioc, struct _sas_port *mpt2sas_port,
struct _sas_phy *mpt2sas_phy)
{
u64 sas_address = mpt2sas_port->remote_identify.sas_address;
dev_printk(KERN_INFO, &mpt2sas_phy->phy->dev,
"add: sas_addr(0x%016llx), phy(%d)\n", (unsigned long long)
sas_address, mpt2sas_phy->phy_id);
list_add_tail(&mpt2sas_phy->port_siblings, &mpt2sas_port->phy_list);
mpt2sas_port->num_phys++;
sas_port_add_phy(mpt2sas_port->port, mpt2sas_phy->phy);
mpt2sas_phy->phy_belongs_to_port = 1;
}
/**
* _transport_add_phy_to_an_existing_port - adding new phy to existing port
* @ioc: per adapter object
* @sas_node: sas node object (either expander or sas host)
* @mpt2sas_phy: mpt2sas per phy object
* @sas_address: sas address of device/expander were phy needs to be added to
*
* Returns nothing.
*/
static void
_transport_add_phy_to_an_existing_port(struct MPT2SAS_ADAPTER *ioc,
struct _sas_node *sas_node, struct _sas_phy *mpt2sas_phy, u64 sas_address)
{
struct _sas_port *mpt2sas_port;
struct _sas_phy *phy_srch;
if (mpt2sas_phy->phy_belongs_to_port == 1)
return;
list_for_each_entry(mpt2sas_port, &sas_node->sas_port_list,
port_list) {
if (mpt2sas_port->remote_identify.sas_address !=
sas_address)
continue;
list_for_each_entry(phy_srch, &mpt2sas_port->phy_list,
port_siblings) {
if (phy_srch == mpt2sas_phy)
return;
}
_transport_add_phy(ioc, mpt2sas_port, mpt2sas_phy);
return;
}
}
/**
* _transport_del_phy_from_an_existing_port - delete phy from existing port
* @ioc: per adapter object
* @sas_node: sas node object (either expander or sas host)
* @mpt2sas_phy: mpt2sas per phy object
*
* Returns nothing.
*/
static void
_transport_del_phy_from_an_existing_port(struct MPT2SAS_ADAPTER *ioc,
struct _sas_node *sas_node, struct _sas_phy *mpt2sas_phy)
{
struct _sas_port *mpt2sas_port, *next;
struct _sas_phy *phy_srch;
if (mpt2sas_phy->phy_belongs_to_port == 0)
return;
list_for_each_entry_safe(mpt2sas_port, next, &sas_node->sas_port_list,
port_list) {
list_for_each_entry(phy_srch, &mpt2sas_port->phy_list,
port_siblings) {
if (phy_srch != mpt2sas_phy)
continue;
if (mpt2sas_port->num_phys == 1)
_transport_delete_port(ioc, mpt2sas_port);
else
_transport_delete_phy(ioc, mpt2sas_port,
mpt2sas_phy);
return;
}
}
}
/**
* _transport_sanity_check - sanity check when adding a new port
* @ioc: per adapter object
* @sas_node: sas node object (either expander or sas host)
* @sas_address: sas address of device being added
*
* See the explanation above from _transport_delete_duplicate_port
*/
static void
_transport_sanity_check(struct MPT2SAS_ADAPTER *ioc, struct _sas_node *sas_node,
u64 sas_address)
{
int i;
for (i = 0; i < sas_node->num_phys; i++) {
if (sas_node->phy[i].remote_identify.sas_address != sas_address)
continue;
if (sas_node->phy[i].phy_belongs_to_port == 1)
_transport_del_phy_from_an_existing_port(ioc, sas_node,
&sas_node->phy[i]);
}
}
/**
* mpt2sas_transport_port_add - insert port to the list
* @ioc: per adapter object
* @handle: handle of attached device
* @sas_address: sas address of parent expander or sas host
* Context: This function will acquire ioc->sas_node_lock.
*
* Adding new port object to the sas_node->sas_port_list.
*
* Returns mpt2sas_port.
*/
struct _sas_port *
mpt2sas_transport_port_add(struct MPT2SAS_ADAPTER *ioc, u16 handle,
u64 sas_address)
{
struct _sas_phy *mpt2sas_phy, *next;
struct _sas_port *mpt2sas_port;
unsigned long flags;
struct _sas_node *sas_node;
struct sas_rphy *rphy;
int i;
struct sas_port *port;
mpt2sas_port = kzalloc(sizeof(struct _sas_port),
GFP_KERNEL);
if (!mpt2sas_port) {
printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
ioc->name, __FILE__, __LINE__, __func__);
return NULL;
}
INIT_LIST_HEAD(&mpt2sas_port->port_list);
INIT_LIST_HEAD(&mpt2sas_port->phy_list);
spin_lock_irqsave(&ioc->sas_node_lock, flags);
sas_node = _transport_sas_node_find_by_sas_address(ioc, sas_address);
spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
if (!sas_node) {
printk(MPT2SAS_ERR_FMT "%s: Could not find "
"parent sas_address(0x%016llx)!\n", ioc->name,
__func__, (unsigned long long)sas_address);
goto out_fail;
}
if ((_transport_set_identify(ioc, handle,
&mpt2sas_port->remote_identify))) {
printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
ioc->name, __FILE__, __LINE__, __func__);
goto out_fail;
}
if (mpt2sas_port->remote_identify.device_type == SAS_PHY_UNUSED) {
printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
ioc->name, __FILE__, __LINE__, __func__);
goto out_fail;
}
_transport_sanity_check(ioc, sas_node,
mpt2sas_port->remote_identify.sas_address);
for (i = 0; i < sas_node->num_phys; i++) {
if (sas_node->phy[i].remote_identify.sas_address !=
mpt2sas_port->remote_identify.sas_address)
continue;
list_add_tail(&sas_node->phy[i].port_siblings,
&mpt2sas_port->phy_list);
mpt2sas_port->num_phys++;
}
if (!mpt2sas_port->num_phys) {
printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
ioc->name, __FILE__, __LINE__, __func__);
goto out_fail;
}
port = sas_port_alloc_num(sas_node->parent_dev);
if ((sas_port_add(port))) {
printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
ioc->name, __FILE__, __LINE__, __func__);
goto out_fail;
}
list_for_each_entry(mpt2sas_phy, &mpt2sas_port->phy_list,
port_siblings) {
if ((ioc->logging_level & MPT_DEBUG_TRANSPORT))
dev_printk(KERN_INFO, &port->dev, "add: handle(0x%04x)"
", sas_addr(0x%016llx), phy(%d)\n", handle,
(unsigned long long)
mpt2sas_port->remote_identify.sas_address,
mpt2sas_phy->phy_id);
sas_port_add_phy(port, mpt2sas_phy->phy);
mpt2sas_phy->phy_belongs_to_port = 1;
}
mpt2sas_port->port = port;
if (mpt2sas_port->remote_identify.device_type == SAS_END_DEVICE)
rphy = sas_end_device_alloc(port);
else
rphy = sas_expander_alloc(port,
mpt2sas_port->remote_identify.device_type);
rphy->identify = mpt2sas_port->remote_identify;
if ((sas_rphy_add(rphy))) {
printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
ioc->name, __FILE__, __LINE__, __func__);
}
if ((ioc->logging_level & MPT_DEBUG_TRANSPORT))
dev_printk(KERN_INFO, &rphy->dev, "add: handle(0x%04x), "
"sas_addr(0x%016llx)\n", handle,
(unsigned long long)
mpt2sas_port->remote_identify.sas_address);
mpt2sas_port->rphy = rphy;
spin_lock_irqsave(&ioc->sas_node_lock, flags);
list_add_tail(&mpt2sas_port->port_list, &sas_node->sas_port_list);
spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
/* fill in report manufacture */
if (mpt2sas_port->remote_identify.device_type ==
MPI2_SAS_DEVICE_INFO_EDGE_EXPANDER ||
mpt2sas_port->remote_identify.device_type ==
MPI2_SAS_DEVICE_INFO_FANOUT_EXPANDER)
_transport_expander_report_manufacture(ioc,
mpt2sas_port->remote_identify.sas_address,
rphy_to_expander_device(rphy));
return mpt2sas_port;
out_fail:
list_for_each_entry_safe(mpt2sas_phy, next, &mpt2sas_port->phy_list,
port_siblings)
list_del(&mpt2sas_phy->port_siblings);
kfree(mpt2sas_port);
return NULL;
}
/**
* mpt2sas_transport_port_remove - remove port from the list
* @ioc: per adapter object
* @sas_address: sas address of attached device
* @sas_address_parent: sas address of parent expander or sas host
* Context: This function will acquire ioc->sas_node_lock.
*
* Removing object and freeing associated memory from the
* ioc->sas_port_list.
*
* Return nothing.
*/
void
mpt2sas_transport_port_remove(struct MPT2SAS_ADAPTER *ioc, u64 sas_address,
u64 sas_address_parent)
{
int i;
unsigned long flags;
struct _sas_port *mpt2sas_port, *next;
struct _sas_node *sas_node;
u8 found = 0;
struct _sas_phy *mpt2sas_phy, *next_phy;
spin_lock_irqsave(&ioc->sas_node_lock, flags);
sas_node = _transport_sas_node_find_by_sas_address(ioc,
sas_address_parent);
if (!sas_node) {
spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
return;
}
list_for_each_entry_safe(mpt2sas_port, next, &sas_node->sas_port_list,
port_list) {
if (mpt2sas_port->remote_identify.sas_address != sas_address)
continue;
found = 1;
list_del(&mpt2sas_port->port_list);
goto out;
}
out:
if (!found) {
spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
return;
}
for (i = 0; i < sas_node->num_phys; i++) {
if (sas_node->phy[i].remote_identify.sas_address == sas_address)
memset(&sas_node->phy[i].remote_identify, 0 ,
sizeof(struct sas_identify));
}
spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
list_for_each_entry_safe(mpt2sas_phy, next_phy,
&mpt2sas_port->phy_list, port_siblings) {
if ((ioc->logging_level & MPT_DEBUG_TRANSPORT))
dev_printk(KERN_INFO, &mpt2sas_port->port->dev,
"remove: sas_addr(0x%016llx), phy(%d)\n",
(unsigned long long)
mpt2sas_port->remote_identify.sas_address,
mpt2sas_phy->phy_id);
mpt2sas_phy->phy_belongs_to_port = 0;
sas_port_delete_phy(mpt2sas_port->port, mpt2sas_phy->phy);
list_del(&mpt2sas_phy->port_siblings);
}
sas_port_delete(mpt2sas_port->port);
kfree(mpt2sas_port);
}
/**
* mpt2sas_transport_add_host_phy - report sas_host phy to transport
* @ioc: per adapter object
* @mpt2sas_phy: mpt2sas per phy object
* @phy_pg0: sas phy page 0
* @parent_dev: parent device class object
*
* Returns 0 for success, non-zero for failure.
*/
int
mpt2sas_transport_add_host_phy(struct MPT2SAS_ADAPTER *ioc, struct _sas_phy
*mpt2sas_phy, Mpi2SasPhyPage0_t phy_pg0, struct device *parent_dev)
{
struct sas_phy *phy;
int phy_index = mpt2sas_phy->phy_id;
INIT_LIST_HEAD(&mpt2sas_phy->port_siblings);
phy = sas_phy_alloc(parent_dev, phy_index);
if (!phy) {
printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
ioc->name, __FILE__, __LINE__, __func__);
return -1;
}
if ((_transport_set_identify(ioc, mpt2sas_phy->handle,
&mpt2sas_phy->identify))) {
printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
ioc->name, __FILE__, __LINE__, __func__);
return -1;
}
phy->identify = mpt2sas_phy->identify;
mpt2sas_phy->attached_handle = le16_to_cpu(phy_pg0.AttachedDevHandle);
if (mpt2sas_phy->attached_handle)
_transport_set_identify(ioc, mpt2sas_phy->attached_handle,
&mpt2sas_phy->remote_identify);
phy->identify.phy_identifier = mpt2sas_phy->phy_id;
phy->negotiated_linkrate = _transport_convert_phy_link_rate(
phy_pg0.NegotiatedLinkRate & MPI2_SAS_NEG_LINK_RATE_MASK_PHYSICAL);
phy->minimum_linkrate_hw = _transport_convert_phy_link_rate(
phy_pg0.HwLinkRate & MPI2_SAS_HWRATE_MIN_RATE_MASK);
phy->maximum_linkrate_hw = _transport_convert_phy_link_rate(
phy_pg0.HwLinkRate >> 4);
phy->minimum_linkrate = _transport_convert_phy_link_rate(
phy_pg0.ProgrammedLinkRate & MPI2_SAS_PRATE_MIN_RATE_MASK);
phy->maximum_linkrate = _transport_convert_phy_link_rate(
phy_pg0.ProgrammedLinkRate >> 4);
if ((sas_phy_add(phy))) {
printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
ioc->name, __FILE__, __LINE__, __func__);
sas_phy_free(phy);
return -1;
}
if ((ioc->logging_level & MPT_DEBUG_TRANSPORT))
dev_printk(KERN_INFO, &phy->dev,
"add: handle(0x%04x), sas_addr(0x%016llx)\n"
"\tattached_handle(0x%04x), sas_addr(0x%016llx)\n",
mpt2sas_phy->handle, (unsigned long long)
mpt2sas_phy->identify.sas_address,
mpt2sas_phy->attached_handle,
(unsigned long long)
mpt2sas_phy->remote_identify.sas_address);
mpt2sas_phy->phy = phy;
return 0;
}
/**
* mpt2sas_transport_add_expander_phy - report expander phy to transport
* @ioc: per adapter object
* @mpt2sas_phy: mpt2sas per phy object
* @expander_pg1: expander page 1
* @parent_dev: parent device class object
*
* Returns 0 for success, non-zero for failure.
*/
int
mpt2sas_transport_add_expander_phy(struct MPT2SAS_ADAPTER *ioc, struct _sas_phy
*mpt2sas_phy, Mpi2ExpanderPage1_t expander_pg1, struct device *parent_dev)
{
struct sas_phy *phy;
int phy_index = mpt2sas_phy->phy_id;
INIT_LIST_HEAD(&mpt2sas_phy->port_siblings);
phy = sas_phy_alloc(parent_dev, phy_index);
if (!phy) {
printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
ioc->name, __FILE__, __LINE__, __func__);
return -1;
}
if ((_transport_set_identify(ioc, mpt2sas_phy->handle,
&mpt2sas_phy->identify))) {
printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
ioc->name, __FILE__, __LINE__, __func__);
return -1;
}
phy->identify = mpt2sas_phy->identify;
mpt2sas_phy->attached_handle =
le16_to_cpu(expander_pg1.AttachedDevHandle);
if (mpt2sas_phy->attached_handle)
_transport_set_identify(ioc, mpt2sas_phy->attached_handle,
&mpt2sas_phy->remote_identify);
phy->identify.phy_identifier = mpt2sas_phy->phy_id;
phy->negotiated_linkrate = _transport_convert_phy_link_rate(
expander_pg1.NegotiatedLinkRate &
MPI2_SAS_NEG_LINK_RATE_MASK_PHYSICAL);
phy->minimum_linkrate_hw = _transport_convert_phy_link_rate(
expander_pg1.HwLinkRate & MPI2_SAS_HWRATE_MIN_RATE_MASK);
phy->maximum_linkrate_hw = _transport_convert_phy_link_rate(
expander_pg1.HwLinkRate >> 4);
phy->minimum_linkrate = _transport_convert_phy_link_rate(
expander_pg1.ProgrammedLinkRate & MPI2_SAS_PRATE_MIN_RATE_MASK);
phy->maximum_linkrate = _transport_convert_phy_link_rate(
expander_pg1.ProgrammedLinkRate >> 4);
if ((sas_phy_add(phy))) {
printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
ioc->name, __FILE__, __LINE__, __func__);
sas_phy_free(phy);
return -1;
}
if ((ioc->logging_level & MPT_DEBUG_TRANSPORT))
dev_printk(KERN_INFO, &phy->dev,
"add: handle(0x%04x), sas_addr(0x%016llx)\n"
"\tattached_handle(0x%04x), sas_addr(0x%016llx)\n",
mpt2sas_phy->handle, (unsigned long long)
mpt2sas_phy->identify.sas_address,
mpt2sas_phy->attached_handle,
(unsigned long long)
mpt2sas_phy->remote_identify.sas_address);
mpt2sas_phy->phy = phy;
return 0;
}
/**
* mpt2sas_transport_update_links - refreshing phy link changes
* @ioc: per adapter object
* @sas_address: sas address of parent expander or sas host
* @handle: attached device handle
* @phy_numberv: phy number
* @link_rate: new link rate
*
* Returns nothing.
*/
void
mpt2sas_transport_update_links(struct MPT2SAS_ADAPTER *ioc,
u64 sas_address, u16 handle, u8 phy_number, u8 link_rate)
{
unsigned long flags;
struct _sas_node *sas_node;
struct _sas_phy *mpt2sas_phy;
if (ioc->shost_recovery || ioc->pci_error_recovery)
return;
spin_lock_irqsave(&ioc->sas_node_lock, flags);
sas_node = _transport_sas_node_find_by_sas_address(ioc, sas_address);
if (!sas_node) {
spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
return;
}
mpt2sas_phy = &sas_node->phy[phy_number];
mpt2sas_phy->attached_handle = handle;
spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
if (handle && (link_rate >= MPI2_SAS_NEG_LINK_RATE_1_5)) {
_transport_set_identify(ioc, handle,
&mpt2sas_phy->remote_identify);
_transport_add_phy_to_an_existing_port(ioc, sas_node,
mpt2sas_phy, mpt2sas_phy->remote_identify.sas_address);
} else
memset(&mpt2sas_phy->remote_identify, 0 , sizeof(struct
sas_identify));
if (mpt2sas_phy->phy)
mpt2sas_phy->phy->negotiated_linkrate =
_transport_convert_phy_link_rate(link_rate);
if ((ioc->logging_level & MPT_DEBUG_TRANSPORT))
dev_printk(KERN_INFO, &mpt2sas_phy->phy->dev,
"refresh: parent sas_addr(0x%016llx),\n"
"\tlink_rate(0x%02x), phy(%d)\n"
"\tattached_handle(0x%04x), sas_addr(0x%016llx)\n",
(unsigned long long)sas_address,
link_rate, phy_number, handle, (unsigned long long)
mpt2sas_phy->remote_identify.sas_address);
}
static inline void *
phy_to_ioc(struct sas_phy *phy)
{
struct Scsi_Host *shost = dev_to_shost(phy->dev.parent);
return shost_priv(shost);
}
static inline void *
rphy_to_ioc(struct sas_rphy *rphy)
{
struct Scsi_Host *shost = dev_to_shost(rphy->dev.parent->parent);
return shost_priv(shost);
}
/* report phy error log structure */
struct phy_error_log_request{
u8 smp_frame_type; /* 0x40 */
u8 function; /* 0x11 */
u8 allocated_response_length;
u8 request_length; /* 02 */
u8 reserved_1[5];
u8 phy_identifier;
u8 reserved_2[2];
};
/* report phy error log reply structure */
struct phy_error_log_reply{
u8 smp_frame_type; /* 0x41 */
u8 function; /* 0x11 */
u8 function_result;
u8 response_length;
__be16 expander_change_count;
u8 reserved_1[3];
u8 phy_identifier;
u8 reserved_2[2];
__be32 invalid_dword;
__be32 running_disparity_error;
__be32 loss_of_dword_sync;
__be32 phy_reset_problem;
};
/**
* _transport_get_expander_phy_error_log - return expander counters
* @ioc: per adapter object
* @phy: The sas phy object
*
* Returns 0 for success, non-zero for failure.
*
*/
static int
_transport_get_expander_phy_error_log(struct MPT2SAS_ADAPTER *ioc,
struct sas_phy *phy)
{
Mpi2SmpPassthroughRequest_t *mpi_request;
Mpi2SmpPassthroughReply_t *mpi_reply;
struct phy_error_log_request *phy_error_log_request;
struct phy_error_log_reply *phy_error_log_reply;
int rc;
u16 smid;
u32 ioc_state;
unsigned long timeleft;
void *psge;
u32 sgl_flags;
u8 issue_reset = 0;
void *data_out = NULL;
dma_addr_t data_out_dma;
u32 sz;
u16 wait_state_count;
if (ioc->shost_recovery || ioc->pci_error_recovery) {
printk(MPT2SAS_INFO_FMT "%s: host reset in progress!\n",
__func__, ioc->name);
return -EFAULT;
}
mutex_lock(&ioc->transport_cmds.mutex);
if (ioc->transport_cmds.status != MPT2_CMD_NOT_USED) {
printk(MPT2SAS_ERR_FMT "%s: transport_cmds in use\n",
ioc->name, __func__);
rc = -EAGAIN;
goto out;
}
ioc->transport_cmds.status = MPT2_CMD_PENDING;
wait_state_count = 0;
ioc_state = mpt2sas_base_get_iocstate(ioc, 1);
while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
if (wait_state_count++ == 10) {
printk(MPT2SAS_ERR_FMT
"%s: failed due to ioc not operational\n",
ioc->name, __func__);
rc = -EFAULT;
goto out;
}
ssleep(1);
ioc_state = mpt2sas_base_get_iocstate(ioc, 1);
printk(MPT2SAS_INFO_FMT "%s: waiting for "
"operational state(count=%d)\n", ioc->name,
__func__, wait_state_count);
}
if (wait_state_count)
printk(MPT2SAS_INFO_FMT "%s: ioc is operational\n",
ioc->name, __func__);
smid = mpt2sas_base_get_smid(ioc, ioc->transport_cb_idx);
if (!smid) {
printk(MPT2SAS_ERR_FMT "%s: failed obtaining a smid\n",
ioc->name, __func__);
rc = -EAGAIN;
goto out;
}
mpi_request = mpt2sas_base_get_msg_frame(ioc, smid);
ioc->transport_cmds.smid = smid;
sz = sizeof(struct phy_error_log_request) +
sizeof(struct phy_error_log_reply);
data_out = pci_alloc_consistent(ioc->pdev, sz, &data_out_dma);
if (!data_out) {
printk(KERN_ERR "failure at %s:%d/%s()!\n", __FILE__,
__LINE__, __func__);
rc = -ENOMEM;
mpt2sas_base_free_smid(ioc, smid);
goto out;
}
rc = -EINVAL;
memset(data_out, 0, sz);
phy_error_log_request = data_out;
phy_error_log_request->smp_frame_type = 0x40;
phy_error_log_request->function = 0x11;
phy_error_log_request->request_length = 2;
phy_error_log_request->allocated_response_length = 0;
phy_error_log_request->phy_identifier = phy->number;
memset(mpi_request, 0, sizeof(Mpi2SmpPassthroughRequest_t));
mpi_request->Function = MPI2_FUNCTION_SMP_PASSTHROUGH;
mpi_request->PhysicalPort = 0xFF;
mpi_request->VF_ID = 0; /* TODO */
mpi_request->VP_ID = 0;
mpi_request->SASAddress = cpu_to_le64(phy->identify.sas_address);
mpi_request->RequestDataLength =
cpu_to_le16(sizeof(struct phy_error_log_request));
psge = &mpi_request->SGL;
/* WRITE sgel first */
sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_HOST_TO_IOC);
sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
ioc->base_add_sg_single(psge, sgl_flags |
sizeof(struct phy_error_log_request), data_out_dma);
/* incr sgel */
psge += ioc->sge_size;
/* READ sgel last */
sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER |
MPI2_SGE_FLAGS_END_OF_LIST);
sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
ioc->base_add_sg_single(psge, sgl_flags |
sizeof(struct phy_error_log_reply), data_out_dma +
sizeof(struct phy_error_log_request));
dtransportprintk(ioc, printk(MPT2SAS_INFO_FMT "phy_error_log - "
"send to sas_addr(0x%016llx), phy(%d)\n", ioc->name,
(unsigned long long)phy->identify.sas_address, phy->number));
init_completion(&ioc->transport_cmds.done);
mpt2sas_base_put_smid_default(ioc, smid);
timeleft = wait_for_completion_timeout(&ioc->transport_cmds.done,
10*HZ);
if (!(ioc->transport_cmds.status & MPT2_CMD_COMPLETE)) {
printk(MPT2SAS_ERR_FMT "%s: timeout\n",
ioc->name, __func__);
_debug_dump_mf(mpi_request,
sizeof(Mpi2SmpPassthroughRequest_t)/4);
if (!(ioc->transport_cmds.status & MPT2_CMD_RESET))
issue_reset = 1;
goto issue_host_reset;
}
dtransportprintk(ioc, printk(MPT2SAS_INFO_FMT "phy_error_log - "
"complete\n", ioc->name));
if (ioc->transport_cmds.status & MPT2_CMD_REPLY_VALID) {
mpi_reply = ioc->transport_cmds.reply;
dtransportprintk(ioc, printk(MPT2SAS_INFO_FMT
"phy_error_log - reply data transfer size(%d)\n",
ioc->name, le16_to_cpu(mpi_reply->ResponseDataLength)));
if (le16_to_cpu(mpi_reply->ResponseDataLength) !=
sizeof(struct phy_error_log_reply))
goto out;
phy_error_log_reply = data_out +
sizeof(struct phy_error_log_request);
dtransportprintk(ioc, printk(MPT2SAS_INFO_FMT
"phy_error_log - function_result(%d)\n",
ioc->name, phy_error_log_reply->function_result));
phy->invalid_dword_count =
be32_to_cpu(phy_error_log_reply->invalid_dword);
phy->running_disparity_error_count =
be32_to_cpu(phy_error_log_reply->running_disparity_error);
phy->loss_of_dword_sync_count =
be32_to_cpu(phy_error_log_reply->loss_of_dword_sync);
phy->phy_reset_problem_count =
be32_to_cpu(phy_error_log_reply->phy_reset_problem);
rc = 0;
} else
dtransportprintk(ioc, printk(MPT2SAS_INFO_FMT
"phy_error_log - no reply\n", ioc->name));
issue_host_reset:
if (issue_reset)
mpt2sas_base_hard_reset_handler(ioc, CAN_SLEEP,
FORCE_BIG_HAMMER);
out:
ioc->transport_cmds.status = MPT2_CMD_NOT_USED;
if (data_out)
pci_free_consistent(ioc->pdev, sz, data_out, data_out_dma);
mutex_unlock(&ioc->transport_cmds.mutex);
return rc;
}
/**
* _transport_get_linkerrors - return phy counters for both hba and expanders
* @phy: The sas phy object
*
* Returns 0 for success, non-zero for failure.
*
*/
static int
_transport_get_linkerrors(struct sas_phy *phy)
{
struct MPT2SAS_ADAPTER *ioc = phy_to_ioc(phy);
unsigned long flags;
Mpi2ConfigReply_t mpi_reply;
Mpi2SasPhyPage1_t phy_pg1;
spin_lock_irqsave(&ioc->sas_node_lock, flags);
if (_transport_sas_node_find_by_sas_address(ioc,
phy->identify.sas_address) == NULL) {
spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
return -EINVAL;
}
spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
if (phy->identify.sas_address != ioc->sas_hba.sas_address)
return _transport_get_expander_phy_error_log(ioc, phy);
/* get hba phy error logs */
if ((mpt2sas_config_get_phy_pg1(ioc, &mpi_reply, &phy_pg1,
phy->number))) {
printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
ioc->name, __FILE__, __LINE__, __func__);
return -ENXIO;
}
if (mpi_reply.IOCStatus || mpi_reply.IOCLogInfo)
printk(MPT2SAS_INFO_FMT "phy(%d), ioc_status"
"(0x%04x), loginfo(0x%08x)\n", ioc->name,
phy->number, le16_to_cpu(mpi_reply.IOCStatus),
le32_to_cpu(mpi_reply.IOCLogInfo));
phy->invalid_dword_count = le32_to_cpu(phy_pg1.InvalidDwordCount);
phy->running_disparity_error_count =
le32_to_cpu(phy_pg1.RunningDisparityErrorCount);
phy->loss_of_dword_sync_count =
le32_to_cpu(phy_pg1.LossDwordSynchCount);
phy->phy_reset_problem_count =
le32_to_cpu(phy_pg1.PhyResetProblemCount);
return 0;
}
/**
* _transport_get_enclosure_identifier -
* @phy: The sas phy object
*
* Obtain the enclosure logical id for an expander.
* Returns 0 for success, non-zero for failure.
*/
static int
_transport_get_enclosure_identifier(struct sas_rphy *rphy, u64 *identifier)
{
struct MPT2SAS_ADAPTER *ioc = rphy_to_ioc(rphy);
struct _sas_device *sas_device;
unsigned long flags;
int rc;
spin_lock_irqsave(&ioc->sas_device_lock, flags);
sas_device = __mpt2sas_get_sdev_by_addr(ioc,
rphy->identify.sas_address);
if (sas_device) {
*identifier = sas_device->enclosure_logical_id;
rc = 0;
sas_device_put(sas_device);
} else {
*identifier = 0;
rc = -ENXIO;
}
spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
return rc;
}
/**
* _transport_get_bay_identifier -
* @phy: The sas phy object
*
* Returns the slot id for a device that resides inside an enclosure.
*/
static int
_transport_get_bay_identifier(struct sas_rphy *rphy)
{
struct MPT2SAS_ADAPTER *ioc = rphy_to_ioc(rphy);
struct _sas_device *sas_device;
unsigned long flags;
int rc;
spin_lock_irqsave(&ioc->sas_device_lock, flags);
sas_device = __mpt2sas_get_sdev_by_addr(ioc,
rphy->identify.sas_address);
if (sas_device) {
rc = sas_device->slot;
sas_device_put(sas_device);
} else {
rc = -ENXIO;
}
spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
return rc;
}
/* phy control request structure */
struct phy_control_request{
u8 smp_frame_type; /* 0x40 */
u8 function; /* 0x91 */
u8 allocated_response_length;
u8 request_length; /* 0x09 */
u16 expander_change_count;
u8 reserved_1[3];
u8 phy_identifier;
u8 phy_operation;
u8 reserved_2[13];
u64 attached_device_name;
u8 programmed_min_physical_link_rate;
u8 programmed_max_physical_link_rate;
u8 reserved_3[6];
};
/* phy control reply structure */
struct phy_control_reply{
u8 smp_frame_type; /* 0x41 */
u8 function; /* 0x11 */
u8 function_result;
u8 response_length;
};
#define SMP_PHY_CONTROL_LINK_RESET (0x01)
#define SMP_PHY_CONTROL_HARD_RESET (0x02)
#define SMP_PHY_CONTROL_DISABLE (0x03)
/**
* _transport_expander_phy_control - expander phy control
* @ioc: per adapter object
* @phy: The sas phy object
*
* Returns 0 for success, non-zero for failure.
*
*/
static int
_transport_expander_phy_control(struct MPT2SAS_ADAPTER *ioc,
struct sas_phy *phy, u8 phy_operation)
{
Mpi2SmpPassthroughRequest_t *mpi_request;
Mpi2SmpPassthroughReply_t *mpi_reply;
struct phy_control_request *phy_control_request;
struct phy_control_reply *phy_control_reply;
int rc;
u16 smid;
u32 ioc_state;
unsigned long timeleft;
void *psge;
u32 sgl_flags;
u8 issue_reset = 0;
void *data_out = NULL;
dma_addr_t data_out_dma;
u32 sz;
u16 wait_state_count;
if (ioc->shost_recovery) {
printk(MPT2SAS_INFO_FMT "%s: host reset in progress!\n",
__func__, ioc->name);
return -EFAULT;
}
mutex_lock(&ioc->transport_cmds.mutex);
if (ioc->transport_cmds.status != MPT2_CMD_NOT_USED) {
printk(MPT2SAS_ERR_FMT "%s: transport_cmds in use\n",
ioc->name, __func__);
rc = -EAGAIN;
goto out;
}
ioc->transport_cmds.status = MPT2_CMD_PENDING;
wait_state_count = 0;
ioc_state = mpt2sas_base_get_iocstate(ioc, 1);
while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
if (wait_state_count++ == 10) {
printk(MPT2SAS_ERR_FMT
"%s: failed due to ioc not operational\n",
ioc->name, __func__);
rc = -EFAULT;
goto out;
}
ssleep(1);
ioc_state = mpt2sas_base_get_iocstate(ioc, 1);
printk(MPT2SAS_INFO_FMT "%s: waiting for "
"operational state(count=%d)\n", ioc->name,
__func__, wait_state_count);
}
if (wait_state_count)
printk(MPT2SAS_INFO_FMT "%s: ioc is operational\n",
ioc->name, __func__);
smid = mpt2sas_base_get_smid(ioc, ioc->transport_cb_idx);
if (!smid) {
printk(MPT2SAS_ERR_FMT "%s: failed obtaining a smid\n",
ioc->name, __func__);
rc = -EAGAIN;
goto out;
}
mpi_request = mpt2sas_base_get_msg_frame(ioc, smid);
ioc->transport_cmds.smid = smid;
sz = sizeof(struct phy_control_request) +
sizeof(struct phy_control_reply);
data_out = pci_alloc_consistent(ioc->pdev, sz, &data_out_dma);
if (!data_out) {
printk(KERN_ERR "failure at %s:%d/%s()!\n", __FILE__,
__LINE__, __func__);
rc = -ENOMEM;
mpt2sas_base_free_smid(ioc, smid);
goto out;
}
rc = -EINVAL;
memset(data_out, 0, sz);
phy_control_request = data_out;
phy_control_request->smp_frame_type = 0x40;
phy_control_request->function = 0x91;
phy_control_request->request_length = 9;
phy_control_request->allocated_response_length = 0;
phy_control_request->phy_identifier = phy->number;
phy_control_request->phy_operation = phy_operation;
phy_control_request->programmed_min_physical_link_rate =
phy->minimum_linkrate << 4;
phy_control_request->programmed_max_physical_link_rate =
phy->maximum_linkrate << 4;
memset(mpi_request, 0, sizeof(Mpi2SmpPassthroughRequest_t));
mpi_request->Function = MPI2_FUNCTION_SMP_PASSTHROUGH;
mpi_request->PhysicalPort = 0xFF;
mpi_request->VF_ID = 0; /* TODO */
mpi_request->VP_ID = 0;
mpi_request->SASAddress = cpu_to_le64(phy->identify.sas_address);
mpi_request->RequestDataLength =
cpu_to_le16(sizeof(struct phy_error_log_request));
psge = &mpi_request->SGL;
/* WRITE sgel first */
sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_HOST_TO_IOC);
sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
ioc->base_add_sg_single(psge, sgl_flags |
sizeof(struct phy_control_request), data_out_dma);
/* incr sgel */
psge += ioc->sge_size;
/* READ sgel last */
sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER |
MPI2_SGE_FLAGS_END_OF_LIST);
sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
ioc->base_add_sg_single(psge, sgl_flags |
sizeof(struct phy_control_reply), data_out_dma +
sizeof(struct phy_control_request));
dtransportprintk(ioc, printk(MPT2SAS_INFO_FMT "phy_control - "
"send to sas_addr(0x%016llx), phy(%d), opcode(%d)\n", ioc->name,
(unsigned long long)phy->identify.sas_address, phy->number,
phy_operation));
init_completion(&ioc->transport_cmds.done);
mpt2sas_base_put_smid_default(ioc, smid);
timeleft = wait_for_completion_timeout(&ioc->transport_cmds.done,
10*HZ);
if (!(ioc->transport_cmds.status & MPT2_CMD_COMPLETE)) {
printk(MPT2SAS_ERR_FMT "%s: timeout\n",
ioc->name, __func__);
_debug_dump_mf(mpi_request,
sizeof(Mpi2SmpPassthroughRequest_t)/4);
if (!(ioc->transport_cmds.status & MPT2_CMD_RESET))
issue_reset = 1;
goto issue_host_reset;
}
dtransportprintk(ioc, printk(MPT2SAS_INFO_FMT "phy_control - "
"complete\n", ioc->name));
if (ioc->transport_cmds.status & MPT2_CMD_REPLY_VALID) {
mpi_reply = ioc->transport_cmds.reply;
dtransportprintk(ioc, printk(MPT2SAS_INFO_FMT
"phy_control - reply data transfer size(%d)\n",
ioc->name, le16_to_cpu(mpi_reply->ResponseDataLength)));
if (le16_to_cpu(mpi_reply->ResponseDataLength) !=
sizeof(struct phy_control_reply))
goto out;
phy_control_reply = data_out +
sizeof(struct phy_control_request);
dtransportprintk(ioc, printk(MPT2SAS_INFO_FMT
"phy_control - function_result(%d)\n",
ioc->name, phy_control_reply->function_result));
rc = 0;
} else
dtransportprintk(ioc, printk(MPT2SAS_INFO_FMT
"phy_control - no reply\n", ioc->name));
issue_host_reset:
if (issue_reset)
mpt2sas_base_hard_reset_handler(ioc, CAN_SLEEP,
FORCE_BIG_HAMMER);
out:
ioc->transport_cmds.status = MPT2_CMD_NOT_USED;
if (data_out)
pci_free_consistent(ioc->pdev, sz, data_out, data_out_dma);
mutex_unlock(&ioc->transport_cmds.mutex);
return rc;
}
/**
* _transport_phy_reset -
* @phy: The sas phy object
* @hard_reset:
*
* Returns 0 for success, non-zero for failure.
*/
static int
_transport_phy_reset(struct sas_phy *phy, int hard_reset)
{
struct MPT2SAS_ADAPTER *ioc = phy_to_ioc(phy);
Mpi2SasIoUnitControlReply_t mpi_reply;
Mpi2SasIoUnitControlRequest_t mpi_request;
unsigned long flags;
spin_lock_irqsave(&ioc->sas_node_lock, flags);
if (_transport_sas_node_find_by_sas_address(ioc,
phy->identify.sas_address) == NULL) {
spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
return -EINVAL;
}
spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
/* handle expander phys */
if (phy->identify.sas_address != ioc->sas_hba.sas_address)
return _transport_expander_phy_control(ioc, phy,
(hard_reset == 1) ? SMP_PHY_CONTROL_HARD_RESET :
SMP_PHY_CONTROL_LINK_RESET);
/* handle hba phys */
memset(&mpi_request, 0, sizeof(Mpi2SasIoUnitControlReply_t));
mpi_request.Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
mpi_request.Operation = hard_reset ?
MPI2_SAS_OP_PHY_HARD_RESET : MPI2_SAS_OP_PHY_LINK_RESET;
mpi_request.PhyNum = phy->number;
if ((mpt2sas_base_sas_iounit_control(ioc, &mpi_reply, &mpi_request))) {
printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
ioc->name, __FILE__, __LINE__, __func__);
return -ENXIO;
}
if (mpi_reply.IOCStatus || mpi_reply.IOCLogInfo)
printk(MPT2SAS_INFO_FMT "phy(%d), ioc_status"
"(0x%04x), loginfo(0x%08x)\n", ioc->name,
phy->number, le16_to_cpu(mpi_reply.IOCStatus),
le32_to_cpu(mpi_reply.IOCLogInfo));
return 0;
}
/**
* _transport_phy_enable - enable/disable phys
* @phy: The sas phy object
* @enable: enable phy when true
*
* Only support sas_host direct attached phys.
* Returns 0 for success, non-zero for failure.
*/
static int
_transport_phy_enable(struct sas_phy *phy, int enable)
{
struct MPT2SAS_ADAPTER *ioc = phy_to_ioc(phy);
Mpi2SasIOUnitPage1_t *sas_iounit_pg1 = NULL;
Mpi2SasIOUnitPage0_t *sas_iounit_pg0 = NULL;
Mpi2ConfigReply_t mpi_reply;
u16 ioc_status;
u16 sz;
int rc = 0;
unsigned long flags;
int i, discovery_active;
spin_lock_irqsave(&ioc->sas_node_lock, flags);
if (_transport_sas_node_find_by_sas_address(ioc,
phy->identify.sas_address) == NULL) {
spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
return -EINVAL;
}
spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
/* handle expander phys */
if (phy->identify.sas_address != ioc->sas_hba.sas_address)
return _transport_expander_phy_control(ioc, phy,
(enable == 1) ? SMP_PHY_CONTROL_LINK_RESET :
SMP_PHY_CONTROL_DISABLE);
/* handle hba phys */
/* read sas_iounit page 0 */
sz = offsetof(Mpi2SasIOUnitPage0_t, PhyData) + (ioc->sas_hba.num_phys *
sizeof(Mpi2SasIOUnit0PhyData_t));
sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL);
if (!sas_iounit_pg0) {
printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
ioc->name, __FILE__, __LINE__, __func__);
rc = -ENOMEM;
goto out;
}
if ((mpt2sas_config_get_sas_iounit_pg0(ioc, &mpi_reply,
sas_iounit_pg0, sz))) {
printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
ioc->name, __FILE__, __LINE__, __func__);
rc = -ENXIO;
goto out;
}
ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
MPI2_IOCSTATUS_MASK;
if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
ioc->name, __FILE__, __LINE__, __func__);
rc = -EIO;
goto out;
}
/* unable to enable/disable phys when when discovery is active */
for (i = 0, discovery_active = 0; i < ioc->sas_hba.num_phys ; i++) {
if (sas_iounit_pg0->PhyData[i].PortFlags &
MPI2_SASIOUNIT0_PORTFLAGS_DISCOVERY_IN_PROGRESS) {
printk(MPT2SAS_ERR_FMT "discovery is active on "
"port = %d, phy = %d: unable to enable/disable "
"phys, try again later!\n", ioc->name,
sas_iounit_pg0->PhyData[i].Port, i);
discovery_active = 1;
}
}
if (discovery_active) {
rc = -EAGAIN;
goto out;
}
/* read sas_iounit page 1 */
sz = offsetof(Mpi2SasIOUnitPage1_t, PhyData) + (ioc->sas_hba.num_phys *
sizeof(Mpi2SasIOUnit1PhyData_t));
sas_iounit_pg1 = kzalloc(sz, GFP_KERNEL);
if (!sas_iounit_pg1) {
printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
ioc->name, __FILE__, __LINE__, __func__);
rc = -ENOMEM;
goto out;
}
if ((mpt2sas_config_get_sas_iounit_pg1(ioc, &mpi_reply,
sas_iounit_pg1, sz))) {
printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
ioc->name, __FILE__, __LINE__, __func__);
rc = -ENXIO;
goto out;
}
ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
MPI2_IOCSTATUS_MASK;
if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
ioc->name, __FILE__, __LINE__, __func__);
rc = -EIO;
goto out;
}
/* copy Port/PortFlags/PhyFlags from page 0 */
for (i = 0; i < ioc->sas_hba.num_phys ; i++) {
sas_iounit_pg1->PhyData[i].Port =
sas_iounit_pg0->PhyData[i].Port;
sas_iounit_pg1->PhyData[i].PortFlags =
(sas_iounit_pg0->PhyData[i].PortFlags &
MPI2_SASIOUNIT0_PORTFLAGS_AUTO_PORT_CONFIG);
sas_iounit_pg1->PhyData[i].PhyFlags =
(sas_iounit_pg0->PhyData[i].PhyFlags &
(MPI2_SASIOUNIT0_PHYFLAGS_ZONING_ENABLED +
MPI2_SASIOUNIT0_PHYFLAGS_PHY_DISABLED));
}
if (enable)
sas_iounit_pg1->PhyData[phy->number].PhyFlags
&= ~MPI2_SASIOUNIT1_PHYFLAGS_PHY_DISABLE;
else
sas_iounit_pg1->PhyData[phy->number].PhyFlags
|= MPI2_SASIOUNIT1_PHYFLAGS_PHY_DISABLE;
mpt2sas_config_set_sas_iounit_pg1(ioc, &mpi_reply, sas_iounit_pg1, sz);
/* link reset */
if (enable)
_transport_phy_reset(phy, 0);
out:
kfree(sas_iounit_pg1);
kfree(sas_iounit_pg0);
return rc;
}
/**
* _transport_phy_speed - set phy min/max link rates
* @phy: The sas phy object
* @rates: rates defined in sas_phy_linkrates
*
* Only support sas_host direct attached phys.
* Returns 0 for success, non-zero for failure.
*/
static int
_transport_phy_speed(struct sas_phy *phy, struct sas_phy_linkrates *rates)
{
struct MPT2SAS_ADAPTER *ioc = phy_to_ioc(phy);
Mpi2SasIOUnitPage1_t *sas_iounit_pg1 = NULL;
Mpi2SasPhyPage0_t phy_pg0;
Mpi2ConfigReply_t mpi_reply;
u16 ioc_status;
u16 sz;
int i;
int rc = 0;
unsigned long flags;
spin_lock_irqsave(&ioc->sas_node_lock, flags);
if (_transport_sas_node_find_by_sas_address(ioc,
phy->identify.sas_address) == NULL) {
spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
return -EINVAL;
}
spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
if (!rates->minimum_linkrate)
rates->minimum_linkrate = phy->minimum_linkrate;
else if (rates->minimum_linkrate < phy->minimum_linkrate_hw)
rates->minimum_linkrate = phy->minimum_linkrate_hw;
if (!rates->maximum_linkrate)
rates->maximum_linkrate = phy->maximum_linkrate;
else if (rates->maximum_linkrate > phy->maximum_linkrate_hw)
rates->maximum_linkrate = phy->maximum_linkrate_hw;
/* handle expander phys */
if (phy->identify.sas_address != ioc->sas_hba.sas_address) {
phy->minimum_linkrate = rates->minimum_linkrate;
phy->maximum_linkrate = rates->maximum_linkrate;
return _transport_expander_phy_control(ioc, phy,
SMP_PHY_CONTROL_LINK_RESET);
}
/* handle hba phys */
/* sas_iounit page 1 */
sz = offsetof(Mpi2SasIOUnitPage1_t, PhyData) + (ioc->sas_hba.num_phys *
sizeof(Mpi2SasIOUnit1PhyData_t));
sas_iounit_pg1 = kzalloc(sz, GFP_KERNEL);
if (!sas_iounit_pg1) {
printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
ioc->name, __FILE__, __LINE__, __func__);
rc = -ENOMEM;
goto out;
}
if ((mpt2sas_config_get_sas_iounit_pg1(ioc, &mpi_reply,
sas_iounit_pg1, sz))) {
printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
ioc->name, __FILE__, __LINE__, __func__);
rc = -ENXIO;
goto out;
}
ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
MPI2_IOCSTATUS_MASK;
if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
ioc->name, __FILE__, __LINE__, __func__);
rc = -EIO;
goto out;
}
for (i = 0; i < ioc->sas_hba.num_phys; i++) {
if (phy->number != i) {
sas_iounit_pg1->PhyData[i].MaxMinLinkRate =
(ioc->sas_hba.phy[i].phy->minimum_linkrate +
(ioc->sas_hba.phy[i].phy->maximum_linkrate << 4));
} else {
sas_iounit_pg1->PhyData[i].MaxMinLinkRate =
(rates->minimum_linkrate +
(rates->maximum_linkrate << 4));
}
}
if (mpt2sas_config_set_sas_iounit_pg1(ioc, &mpi_reply, sas_iounit_pg1,
sz)) {
printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
ioc->name, __FILE__, __LINE__, __func__);
rc = -ENXIO;
goto out;
}
/* link reset */
_transport_phy_reset(phy, 0);
/* read phy page 0, then update the rates in the sas transport phy */
if (!mpt2sas_config_get_phy_pg0(ioc, &mpi_reply, &phy_pg0,
phy->number)) {
phy->minimum_linkrate = _transport_convert_phy_link_rate(
phy_pg0.ProgrammedLinkRate & MPI2_SAS_PRATE_MIN_RATE_MASK);
phy->maximum_linkrate = _transport_convert_phy_link_rate(
phy_pg0.ProgrammedLinkRate >> 4);
phy->negotiated_linkrate = _transport_convert_phy_link_rate(
phy_pg0.NegotiatedLinkRate &
MPI2_SAS_NEG_LINK_RATE_MASK_PHYSICAL);
}
out:
kfree(sas_iounit_pg1);
return rc;
}
/**
* _transport_smp_handler - transport portal for smp passthru
* @shost: shost object
* @rphy: sas transport rphy object
* @req:
*
* This used primarily for smp_utils.
* Example:
* smp_rep_general /sys/class/bsg/expander-5:0
*/
static int
_transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
struct request *req)
{
struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
Mpi2SmpPassthroughRequest_t *mpi_request;
Mpi2SmpPassthroughReply_t *mpi_reply;
int rc;
u16 smid;
u32 ioc_state;
unsigned long timeleft;
void *psge;
u32 sgl_flags;
u8 issue_reset = 0;
dma_addr_t dma_addr_in = 0;
dma_addr_t dma_addr_out = 0;
dma_addr_t pci_dma_in = 0;
dma_addr_t pci_dma_out = 0;
void *pci_addr_in = NULL;
void *pci_addr_out = NULL;
u16 wait_state_count;
struct request *rsp = req->next_rq;
struct bio_vec bvec;
struct bvec_iter iter;
if (!rsp) {
printk(MPT2SAS_ERR_FMT "%s: the smp response space is "
"missing\n", ioc->name, __func__);
return -EINVAL;
}
if (ioc->shost_recovery || ioc->pci_error_recovery) {
printk(MPT2SAS_INFO_FMT "%s: host reset in progress!\n",
__func__, ioc->name);
return -EFAULT;
}
rc = mutex_lock_interruptible(&ioc->transport_cmds.mutex);
if (rc)
return rc;
if (ioc->transport_cmds.status != MPT2_CMD_NOT_USED) {
printk(MPT2SAS_ERR_FMT "%s: transport_cmds in use\n", ioc->name,
__func__);
rc = -EAGAIN;
goto out;
}
ioc->transport_cmds.status = MPT2_CMD_PENDING;
/* Check if the request is split across multiple segments */
if (bio_multiple_segments(req->bio)) {
u32 offset = 0;
/* Allocate memory and copy the request */
pci_addr_out = pci_alloc_consistent(ioc->pdev,
blk_rq_bytes(req), &pci_dma_out);
if (!pci_addr_out) {
printk(MPT2SAS_INFO_FMT "%s(): PCI Addr out = NULL\n",
ioc->name, __func__);
rc = -ENOMEM;
goto out;
}
bio_for_each_segment(bvec, req->bio, iter) {
memcpy(pci_addr_out + offset,
page_address(bvec.bv_page) + bvec.bv_offset,
bvec.bv_len);
offset += bvec.bv_len;
}
} else {
dma_addr_out = pci_map_single(ioc->pdev, bio_data(req->bio),
blk_rq_bytes(req), PCI_DMA_BIDIRECTIONAL);
if (!dma_addr_out) {
printk(MPT2SAS_INFO_FMT "%s(): DMA Addr out = NULL\n",
ioc->name, __func__);
rc = -ENOMEM;
goto free_pci;
}
}
/* Check if the response needs to be populated across
* multiple segments */
if (bio_multiple_segments(rsp->bio)) {
pci_addr_in = pci_alloc_consistent(ioc->pdev, blk_rq_bytes(rsp),
&pci_dma_in);
if (!pci_addr_in) {
printk(MPT2SAS_INFO_FMT "%s(): PCI Addr in = NULL\n",
ioc->name, __func__);
rc = -ENOMEM;
goto unmap;
}
} else {
dma_addr_in = pci_map_single(ioc->pdev, bio_data(rsp->bio),
blk_rq_bytes(rsp), PCI_DMA_BIDIRECTIONAL);
if (!dma_addr_in) {
printk(MPT2SAS_INFO_FMT "%s(): DMA Addr in = NULL\n",
ioc->name, __func__);
rc = -ENOMEM;
goto unmap;
}
}
wait_state_count = 0;
ioc_state = mpt2sas_base_get_iocstate(ioc, 1);
while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
if (wait_state_count++ == 10) {
printk(MPT2SAS_ERR_FMT
"%s: failed due to ioc not operational\n",
ioc->name, __func__);
rc = -EFAULT;
goto unmap;
}
ssleep(1);
ioc_state = mpt2sas_base_get_iocstate(ioc, 1);
printk(MPT2SAS_INFO_FMT "%s: waiting for "
"operational state(count=%d)\n", ioc->name,
__func__, wait_state_count);
}
if (wait_state_count)
printk(MPT2SAS_INFO_FMT "%s: ioc is operational\n",
ioc->name, __func__);
smid = mpt2sas_base_get_smid(ioc, ioc->transport_cb_idx);
if (!smid) {
printk(MPT2SAS_ERR_FMT "%s: failed obtaining a smid\n",
ioc->name, __func__);
rc = -EAGAIN;
goto unmap;
}
rc = 0;
mpi_request = mpt2sas_base_get_msg_frame(ioc, smid);
ioc->transport_cmds.smid = smid;
memset(mpi_request, 0, sizeof(Mpi2SmpPassthroughRequest_t));
mpi_request->Function = MPI2_FUNCTION_SMP_PASSTHROUGH;
mpi_request->PhysicalPort = 0xFF;
mpi_request->VF_ID = 0; /* TODO */
mpi_request->VP_ID = 0;
mpi_request->SASAddress = (rphy) ?
cpu_to_le64(rphy->identify.sas_address) :
cpu_to_le64(ioc->sas_hba.sas_address);
mpi_request->RequestDataLength = cpu_to_le16(blk_rq_bytes(req) - 4);
psge = &mpi_request->SGL;
/* WRITE sgel first */
sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_HOST_TO_IOC);
sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
if (bio_multiple_segments(req->bio)) {
ioc->base_add_sg_single(psge, sgl_flags |
(blk_rq_bytes(req) - 4), pci_dma_out);
} else {
ioc->base_add_sg_single(psge, sgl_flags |
(blk_rq_bytes(req) - 4), dma_addr_out);
}
/* incr sgel */
psge += ioc->sge_size;
/* READ sgel last */
sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER |
MPI2_SGE_FLAGS_END_OF_LIST);
sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
if (bio_multiple_segments(rsp->bio)) {
ioc->base_add_sg_single(psge, sgl_flags |
(blk_rq_bytes(rsp) + 4), pci_dma_in);
} else {
ioc->base_add_sg_single(psge, sgl_flags |
(blk_rq_bytes(rsp) + 4), dma_addr_in);
}
dtransportprintk(ioc, printk(MPT2SAS_INFO_FMT "%s - "
"sending smp request\n", ioc->name, __func__));
init_completion(&ioc->transport_cmds.done);
mpt2sas_base_put_smid_default(ioc, smid);
timeleft = wait_for_completion_timeout(&ioc->transport_cmds.done,
10*HZ);
if (!(ioc->transport_cmds.status & MPT2_CMD_COMPLETE)) {
printk(MPT2SAS_ERR_FMT "%s : timeout\n",
__func__, ioc->name);
_debug_dump_mf(mpi_request,
sizeof(Mpi2SmpPassthroughRequest_t)/4);
if (!(ioc->transport_cmds.status & MPT2_CMD_RESET))
issue_reset = 1;
goto issue_host_reset;
}
dtransportprintk(ioc, printk(MPT2SAS_INFO_FMT "%s - "
"complete\n", ioc->name, __func__));
if (ioc->transport_cmds.status & MPT2_CMD_REPLY_VALID) {
mpi_reply = ioc->transport_cmds.reply;
dtransportprintk(ioc, printk(MPT2SAS_INFO_FMT
"%s - reply data transfer size(%d)\n",
ioc->name, __func__,
le16_to_cpu(mpi_reply->ResponseDataLength)));
memcpy(req->sense, mpi_reply, sizeof(*mpi_reply));
req->sense_len = sizeof(*mpi_reply);
req->resid_len = 0;
rsp->resid_len -=
le16_to_cpu(mpi_reply->ResponseDataLength);
/* check if the resp needs to be copied from the allocated
* pci mem */
if (bio_multiple_segments(rsp->bio)) {
u32 offset = 0;
u32 bytes_to_copy =
le16_to_cpu(mpi_reply->ResponseDataLength);
bio_for_each_segment(bvec, rsp->bio, iter) {
if (bytes_to_copy <= bvec.bv_len) {
memcpy(page_address(bvec.bv_page) +
bvec.bv_offset, pci_addr_in +
offset, bytes_to_copy);
break;
} else {
memcpy(page_address(bvec.bv_page) +
bvec.bv_offset, pci_addr_in +
offset, bvec.bv_len);
bytes_to_copy -= bvec.bv_len;
}
offset += bvec.bv_len;
}
}
} else {
dtransportprintk(ioc, printk(MPT2SAS_INFO_FMT
"%s - no reply\n", ioc->name, __func__));
rc = -ENXIO;
}
issue_host_reset:
if (issue_reset) {
mpt2sas_base_hard_reset_handler(ioc, CAN_SLEEP,
FORCE_BIG_HAMMER);
rc = -ETIMEDOUT;
}
unmap:
if (dma_addr_out)
pci_unmap_single(ioc->pdev, dma_addr_out, blk_rq_bytes(req),
PCI_DMA_BIDIRECTIONAL);
if (dma_addr_in)
pci_unmap_single(ioc->pdev, dma_addr_in, blk_rq_bytes(rsp),
PCI_DMA_BIDIRECTIONAL);
free_pci:
if (pci_addr_out)
pci_free_consistent(ioc->pdev, blk_rq_bytes(req), pci_addr_out,
pci_dma_out);
if (pci_addr_in)
pci_free_consistent(ioc->pdev, blk_rq_bytes(rsp), pci_addr_in,
pci_dma_in);
out:
ioc->transport_cmds.status = MPT2_CMD_NOT_USED;
mutex_unlock(&ioc->transport_cmds.mutex);
return rc;
}
struct sas_function_template mpt2sas_transport_functions = {
.get_linkerrors = _transport_get_linkerrors,
.get_enclosure_identifier = _transport_get_enclosure_identifier,
.get_bay_identifier = _transport_get_bay_identifier,
.phy_reset = _transport_phy_reset,
.phy_enable = _transport_phy_enable,
.set_phy_speed = _transport_phy_speed,
.smp_handler = _transport_smp_handler,
};
struct scsi_transport_template *mpt2sas_transport_template;
| gpl-2.0 |
meta-debian/linux-ltsi | drivers/target/loopback/tcm_loop.c | 177 | 42174 | /*******************************************************************************
*
* This file contains the Linux/SCSI LLD virtual SCSI initiator driver
* for emulated SAS initiator ports
*
* © Copyright 2011-2013 Datera, Inc.
*
* Licensed to the Linux Foundation under the General Public License (GPL) version 2.
*
* Author: Nicholas A. Bellinger <nab@risingtidesystems.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
****************************************************************************/
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/configfs.h>
#include <scsi/scsi.h>
#include <scsi/scsi_tcq.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_cmnd.h>
#include <target/target_core_base.h>
#include <target/target_core_fabric.h>
#include <target/target_core_fabric_configfs.h>
#include <target/target_core_configfs.h>
#include "tcm_loop.h"
#define to_tcm_loop_hba(hba) container_of(hba, struct tcm_loop_hba, dev)
/* Local pointer to allocated TCM configfs fabric module */
static struct target_fabric_configfs *tcm_loop_fabric_configfs;
static struct workqueue_struct *tcm_loop_workqueue;
static struct kmem_cache *tcm_loop_cmd_cache;
static int tcm_loop_hba_no_cnt;
static int tcm_loop_queue_status(struct se_cmd *se_cmd);
/*
* Called from struct target_core_fabric_ops->check_stop_free()
*/
static int tcm_loop_check_stop_free(struct se_cmd *se_cmd)
{
/*
* Do not release struct se_cmd's containing a valid TMR
* pointer. These will be released directly in tcm_loop_device_reset()
* with transport_generic_free_cmd().
*/
if (se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)
return 0;
/*
* Release the struct se_cmd, which will make a callback to release
* struct tcm_loop_cmd * in tcm_loop_deallocate_core_cmd()
*/
transport_generic_free_cmd(se_cmd, 0);
return 1;
}
static void tcm_loop_release_cmd(struct se_cmd *se_cmd)
{
struct tcm_loop_cmd *tl_cmd = container_of(se_cmd,
struct tcm_loop_cmd, tl_se_cmd);
kmem_cache_free(tcm_loop_cmd_cache, tl_cmd);
}
static int tcm_loop_show_info(struct seq_file *m, struct Scsi_Host *host)
{
seq_printf(m, "tcm_loop_proc_info()\n");
return 0;
}
static int tcm_loop_driver_probe(struct device *);
static int tcm_loop_driver_remove(struct device *);
static int pseudo_lld_bus_match(struct device *dev,
struct device_driver *dev_driver)
{
return 1;
}
static struct bus_type tcm_loop_lld_bus = {
.name = "tcm_loop_bus",
.match = pseudo_lld_bus_match,
.probe = tcm_loop_driver_probe,
.remove = tcm_loop_driver_remove,
};
static struct device_driver tcm_loop_driverfs = {
.name = "tcm_loop",
.bus = &tcm_loop_lld_bus,
};
/*
* Used with root_device_register() in tcm_loop_alloc_core_bus() below
*/
struct device *tcm_loop_primary;
/*
* Copied from drivers/scsi/libfc/fc_fcp.c:fc_change_queue_depth() and
* drivers/scsi/libiscsi.c:iscsi_change_queue_depth()
*/
static int tcm_loop_change_queue_depth(
struct scsi_device *sdev,
int depth,
int reason)
{
switch (reason) {
case SCSI_QDEPTH_DEFAULT:
scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), depth);
break;
case SCSI_QDEPTH_QFULL:
scsi_track_queue_full(sdev, depth);
break;
case SCSI_QDEPTH_RAMP_UP:
scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), depth);
break;
default:
return -EOPNOTSUPP;
}
return sdev->queue_depth;
}
static int tcm_loop_change_queue_type(struct scsi_device *sdev, int tag)
{
if (sdev->tagged_supported) {
scsi_set_tag_type(sdev, tag);
if (tag)
scsi_activate_tcq(sdev, sdev->queue_depth);
else
scsi_deactivate_tcq(sdev, sdev->queue_depth);
} else
tag = 0;
return tag;
}
/*
* Locate the SAM Task Attr from struct scsi_cmnd *
*/
static int tcm_loop_sam_attr(struct scsi_cmnd *sc)
{
if (sc->device->tagged_supported) {
switch (sc->tag) {
case HEAD_OF_QUEUE_TAG:
return MSG_HEAD_TAG;
case ORDERED_QUEUE_TAG:
return MSG_ORDERED_TAG;
default:
break;
}
}
return MSG_SIMPLE_TAG;
}
static void tcm_loop_submission_work(struct work_struct *work)
{
struct tcm_loop_cmd *tl_cmd =
container_of(work, struct tcm_loop_cmd, work);
struct se_cmd *se_cmd = &tl_cmd->tl_se_cmd;
struct scsi_cmnd *sc = tl_cmd->sc;
struct tcm_loop_nexus *tl_nexus;
struct tcm_loop_hba *tl_hba;
struct tcm_loop_tpg *tl_tpg;
struct scatterlist *sgl_bidi = NULL;
u32 sgl_bidi_count = 0;
int rc;
tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];
/*
* Ensure that this tl_tpg reference from the incoming sc->device->id
* has already been configured via tcm_loop_make_naa_tpg().
*/
if (!tl_tpg->tl_hba) {
set_host_byte(sc, DID_NO_CONNECT);
goto out_done;
}
if (tl_tpg->tl_transport_status == TCM_TRANSPORT_OFFLINE) {
set_host_byte(sc, DID_TRANSPORT_DISRUPTED);
goto out_done;
}
tl_nexus = tl_hba->tl_nexus;
if (!tl_nexus) {
scmd_printk(KERN_ERR, sc, "TCM_Loop I_T Nexus"
" does not exist\n");
set_host_byte(sc, DID_ERROR);
goto out_done;
}
if (scsi_bidi_cmnd(sc)) {
struct scsi_data_buffer *sdb = scsi_in(sc);
sgl_bidi = sdb->table.sgl;
sgl_bidi_count = sdb->table.nents;
se_cmd->se_cmd_flags |= SCF_BIDI;
}
rc = target_submit_cmd_map_sgls(se_cmd, tl_nexus->se_sess, sc->cmnd,
&tl_cmd->tl_sense_buf[0], tl_cmd->sc->device->lun,
scsi_bufflen(sc), tcm_loop_sam_attr(sc),
sc->sc_data_direction, 0,
scsi_sglist(sc), scsi_sg_count(sc),
sgl_bidi, sgl_bidi_count,
scsi_prot_sglist(sc), scsi_prot_sg_count(sc));
if (rc < 0) {
set_host_byte(sc, DID_NO_CONNECT);
goto out_done;
}
return;
out_done:
sc->scsi_done(sc);
return;
}
/*
* ->queuecommand can be and usually is called from interrupt context, so
* defer the actual submission to a workqueue.
*/
static int tcm_loop_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *sc)
{
struct tcm_loop_cmd *tl_cmd;
pr_debug("tcm_loop_queuecommand() %d:%d:%d:%d got CDB: 0x%02x"
" scsi_buf_len: %u\n", sc->device->host->host_no,
sc->device->id, sc->device->channel, sc->device->lun,
sc->cmnd[0], scsi_bufflen(sc));
tl_cmd = kmem_cache_zalloc(tcm_loop_cmd_cache, GFP_ATOMIC);
if (!tl_cmd) {
pr_err("Unable to allocate struct tcm_loop_cmd\n");
set_host_byte(sc, DID_ERROR);
sc->scsi_done(sc);
return 0;
}
tl_cmd->sc = sc;
tl_cmd->sc_cmd_tag = sc->tag;
INIT_WORK(&tl_cmd->work, tcm_loop_submission_work);
queue_work(tcm_loop_workqueue, &tl_cmd->work);
return 0;
}
/*
* Called from SCSI EH process context to issue a LUN_RESET TMR
* to struct scsi_device
*/
static int tcm_loop_issue_tmr(struct tcm_loop_tpg *tl_tpg,
struct tcm_loop_nexus *tl_nexus,
int lun, int task, enum tcm_tmreq_table tmr)
{
struct se_cmd *se_cmd = NULL;
struct se_session *se_sess;
struct se_portal_group *se_tpg;
struct tcm_loop_cmd *tl_cmd = NULL;
struct tcm_loop_tmr *tl_tmr = NULL;
int ret = TMR_FUNCTION_FAILED, rc;
tl_cmd = kmem_cache_zalloc(tcm_loop_cmd_cache, GFP_KERNEL);
if (!tl_cmd) {
pr_err("Unable to allocate memory for tl_cmd\n");
return ret;
}
tl_tmr = kzalloc(sizeof(struct tcm_loop_tmr), GFP_KERNEL);
if (!tl_tmr) {
pr_err("Unable to allocate memory for tl_tmr\n");
goto release;
}
init_waitqueue_head(&tl_tmr->tl_tmr_wait);
se_cmd = &tl_cmd->tl_se_cmd;
se_tpg = &tl_tpg->tl_se_tpg;
se_sess = tl_nexus->se_sess;
/*
* Initialize struct se_cmd descriptor from target_core_mod infrastructure
*/
transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess, 0,
DMA_NONE, MSG_SIMPLE_TAG,
&tl_cmd->tl_sense_buf[0]);
rc = core_tmr_alloc_req(se_cmd, tl_tmr, tmr, GFP_KERNEL);
if (rc < 0)
goto release;
if (tmr == TMR_ABORT_TASK)
se_cmd->se_tmr_req->ref_task_tag = task;
/*
* Locate the underlying TCM struct se_lun
*/
if (transport_lookup_tmr_lun(se_cmd, lun) < 0) {
ret = TMR_LUN_DOES_NOT_EXIST;
goto release;
}
/*
* Queue the TMR to TCM Core and sleep waiting for
* tcm_loop_queue_tm_rsp() to wake us up.
*/
transport_generic_handle_tmr(se_cmd);
wait_event(tl_tmr->tl_tmr_wait, atomic_read(&tl_tmr->tmr_complete));
/*
* The TMR LUN_RESET has completed, check the response status and
* then release allocations.
*/
ret = se_cmd->se_tmr_req->response;
release:
if (se_cmd)
transport_generic_free_cmd(se_cmd, 1);
else
kmem_cache_free(tcm_loop_cmd_cache, tl_cmd);
kfree(tl_tmr);
return ret;
}
static int tcm_loop_abort_task(struct scsi_cmnd *sc)
{
struct tcm_loop_hba *tl_hba;
struct tcm_loop_nexus *tl_nexus;
struct tcm_loop_tpg *tl_tpg;
int ret = FAILED;
/*
* Locate the tcm_loop_hba_t pointer
*/
tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
/*
* Locate the tl_nexus and se_sess pointers
*/
tl_nexus = tl_hba->tl_nexus;
if (!tl_nexus) {
pr_err("Unable to perform device reset without"
" active I_T Nexus\n");
return FAILED;
}
/*
* Locate the tl_tpg pointer from TargetID in sc->device->id
*/
tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];
ret = tcm_loop_issue_tmr(tl_tpg, tl_nexus, sc->device->lun,
sc->tag, TMR_ABORT_TASK);
return (ret == TMR_FUNCTION_COMPLETE) ? SUCCESS : FAILED;
}
/*
* Called from SCSI EH process context to issue a LUN_RESET TMR
* to struct scsi_device
*/
static int tcm_loop_device_reset(struct scsi_cmnd *sc)
{
struct tcm_loop_hba *tl_hba;
struct tcm_loop_nexus *tl_nexus;
struct tcm_loop_tpg *tl_tpg;
int ret = FAILED;
/*
* Locate the tcm_loop_hba_t pointer
*/
tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
/*
* Locate the tl_nexus and se_sess pointers
*/
tl_nexus = tl_hba->tl_nexus;
if (!tl_nexus) {
pr_err("Unable to perform device reset without"
" active I_T Nexus\n");
return FAILED;
}
/*
* Locate the tl_tpg pointer from TargetID in sc->device->id
*/
tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];
ret = tcm_loop_issue_tmr(tl_tpg, tl_nexus, sc->device->lun,
0, TMR_LUN_RESET);
return (ret == TMR_FUNCTION_COMPLETE) ? SUCCESS : FAILED;
}
static int tcm_loop_target_reset(struct scsi_cmnd *sc)
{
struct tcm_loop_hba *tl_hba;
struct tcm_loop_tpg *tl_tpg;
/*
* Locate the tcm_loop_hba_t pointer
*/
tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
if (!tl_hba) {
pr_err("Unable to perform device reset without"
" active I_T Nexus\n");
return FAILED;
}
/*
* Locate the tl_tpg pointer from TargetID in sc->device->id
*/
tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];
if (tl_tpg) {
tl_tpg->tl_transport_status = TCM_TRANSPORT_ONLINE;
return SUCCESS;
}
return FAILED;
}
static int tcm_loop_slave_alloc(struct scsi_device *sd)
{
set_bit(QUEUE_FLAG_BIDI, &sd->request_queue->queue_flags);
return 0;
}
static int tcm_loop_slave_configure(struct scsi_device *sd)
{
if (sd->tagged_supported) {
scsi_activate_tcq(sd, sd->queue_depth);
scsi_adjust_queue_depth(sd, MSG_SIMPLE_TAG,
sd->host->cmd_per_lun);
} else {
scsi_adjust_queue_depth(sd, 0,
sd->host->cmd_per_lun);
}
return 0;
}
static struct scsi_host_template tcm_loop_driver_template = {
.show_info = tcm_loop_show_info,
.proc_name = "tcm_loopback",
.name = "TCM_Loopback",
.queuecommand = tcm_loop_queuecommand,
.change_queue_depth = tcm_loop_change_queue_depth,
.change_queue_type = tcm_loop_change_queue_type,
.eh_abort_handler = tcm_loop_abort_task,
.eh_device_reset_handler = tcm_loop_device_reset,
.eh_target_reset_handler = tcm_loop_target_reset,
.can_queue = 1024,
.this_id = -1,
.sg_tablesize = 256,
.cmd_per_lun = 1024,
.max_sectors = 0xFFFF,
.use_clustering = DISABLE_CLUSTERING,
.slave_alloc = tcm_loop_slave_alloc,
.slave_configure = tcm_loop_slave_configure,
.module = THIS_MODULE,
};
static int tcm_loop_driver_probe(struct device *dev)
{
struct tcm_loop_hba *tl_hba;
struct Scsi_Host *sh;
int error, host_prot;
tl_hba = to_tcm_loop_hba(dev);
sh = scsi_host_alloc(&tcm_loop_driver_template,
sizeof(struct tcm_loop_hba));
if (!sh) {
pr_err("Unable to allocate struct scsi_host\n");
return -ENODEV;
}
tl_hba->sh = sh;
/*
* Assign the struct tcm_loop_hba pointer to struct Scsi_Host->hostdata
*/
*((struct tcm_loop_hba **)sh->hostdata) = tl_hba;
/*
* Setup single ID, Channel and LUN for now..
*/
sh->max_id = 2;
sh->max_lun = 0;
sh->max_channel = 0;
sh->max_cmd_len = TL_SCSI_MAX_CMD_LEN;
host_prot = SHOST_DIF_TYPE1_PROTECTION | SHOST_DIF_TYPE2_PROTECTION |
SHOST_DIF_TYPE3_PROTECTION | SHOST_DIX_TYPE1_PROTECTION |
SHOST_DIX_TYPE2_PROTECTION | SHOST_DIX_TYPE3_PROTECTION;
scsi_host_set_prot(sh, host_prot);
scsi_host_set_guard(sh, SHOST_DIX_GUARD_CRC);
error = scsi_add_host(sh, &tl_hba->dev);
if (error) {
pr_err("%s: scsi_add_host failed\n", __func__);
scsi_host_put(sh);
return -ENODEV;
}
return 0;
}
static int tcm_loop_driver_remove(struct device *dev)
{
struct tcm_loop_hba *tl_hba;
struct Scsi_Host *sh;
tl_hba = to_tcm_loop_hba(dev);
sh = tl_hba->sh;
scsi_remove_host(sh);
scsi_host_put(sh);
return 0;
}
static void tcm_loop_release_adapter(struct device *dev)
{
struct tcm_loop_hba *tl_hba = to_tcm_loop_hba(dev);
kfree(tl_hba);
}
/*
* Called from tcm_loop_make_scsi_hba() in tcm_loop_configfs.c
*/
static int tcm_loop_setup_hba_bus(struct tcm_loop_hba *tl_hba, int tcm_loop_host_id)
{
int ret;
tl_hba->dev.bus = &tcm_loop_lld_bus;
tl_hba->dev.parent = tcm_loop_primary;
tl_hba->dev.release = &tcm_loop_release_adapter;
dev_set_name(&tl_hba->dev, "tcm_loop_adapter_%d", tcm_loop_host_id);
ret = device_register(&tl_hba->dev);
if (ret) {
pr_err("device_register() failed for"
" tl_hba->dev: %d\n", ret);
return -ENODEV;
}
return 0;
}
/*
* Called from tcm_loop_fabric_init() in tcl_loop_fabric.c to load the emulated
* tcm_loop SCSI bus.
*/
static int tcm_loop_alloc_core_bus(void)
{
int ret;
tcm_loop_primary = root_device_register("tcm_loop_0");
if (IS_ERR(tcm_loop_primary)) {
pr_err("Unable to allocate tcm_loop_primary\n");
return PTR_ERR(tcm_loop_primary);
}
ret = bus_register(&tcm_loop_lld_bus);
if (ret) {
pr_err("bus_register() failed for tcm_loop_lld_bus\n");
goto dev_unreg;
}
ret = driver_register(&tcm_loop_driverfs);
if (ret) {
pr_err("driver_register() failed for"
"tcm_loop_driverfs\n");
goto bus_unreg;
}
pr_debug("Initialized TCM Loop Core Bus\n");
return ret;
bus_unreg:
bus_unregister(&tcm_loop_lld_bus);
dev_unreg:
root_device_unregister(tcm_loop_primary);
return ret;
}
static void tcm_loop_release_core_bus(void)
{
driver_unregister(&tcm_loop_driverfs);
bus_unregister(&tcm_loop_lld_bus);
root_device_unregister(tcm_loop_primary);
pr_debug("Releasing TCM Loop Core BUS\n");
}
static char *tcm_loop_get_fabric_name(void)
{
return "loopback";
}
static u8 tcm_loop_get_fabric_proto_ident(struct se_portal_group *se_tpg)
{
struct tcm_loop_tpg *tl_tpg = se_tpg->se_tpg_fabric_ptr;
struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
/*
* tl_proto_id is set at tcm_loop_configfs.c:tcm_loop_make_scsi_hba()
* time based on the protocol dependent prefix of the passed configfs group.
*
* Based upon tl_proto_id, TCM_Loop emulates the requested fabric
* ProtocolID using target_core_fabric_lib.c symbols.
*/
switch (tl_hba->tl_proto_id) {
case SCSI_PROTOCOL_SAS:
return sas_get_fabric_proto_ident(se_tpg);
case SCSI_PROTOCOL_FCP:
return fc_get_fabric_proto_ident(se_tpg);
case SCSI_PROTOCOL_ISCSI:
return iscsi_get_fabric_proto_ident(se_tpg);
default:
pr_err("Unknown tl_proto_id: 0x%02x, using"
" SAS emulation\n", tl_hba->tl_proto_id);
break;
}
return sas_get_fabric_proto_ident(se_tpg);
}
static char *tcm_loop_get_endpoint_wwn(struct se_portal_group *se_tpg)
{
struct tcm_loop_tpg *tl_tpg = se_tpg->se_tpg_fabric_ptr;
/*
* Return the passed NAA identifier for the SAS Target Port
*/
return &tl_tpg->tl_hba->tl_wwn_address[0];
}
static u16 tcm_loop_get_tag(struct se_portal_group *se_tpg)
{
struct tcm_loop_tpg *tl_tpg = se_tpg->se_tpg_fabric_ptr;
/*
* This Tag is used when forming SCSI Name identifier in EVPD=1 0x83
* to represent the SCSI Target Port.
*/
return tl_tpg->tl_tpgt;
}
static u32 tcm_loop_get_default_depth(struct se_portal_group *se_tpg)
{
return 1;
}
static u32 tcm_loop_get_pr_transport_id(
struct se_portal_group *se_tpg,
struct se_node_acl *se_nacl,
struct t10_pr_registration *pr_reg,
int *format_code,
unsigned char *buf)
{
struct tcm_loop_tpg *tl_tpg = se_tpg->se_tpg_fabric_ptr;
struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
switch (tl_hba->tl_proto_id) {
case SCSI_PROTOCOL_SAS:
return sas_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
format_code, buf);
case SCSI_PROTOCOL_FCP:
return fc_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
format_code, buf);
case SCSI_PROTOCOL_ISCSI:
return iscsi_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
format_code, buf);
default:
pr_err("Unknown tl_proto_id: 0x%02x, using"
" SAS emulation\n", tl_hba->tl_proto_id);
break;
}
return sas_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
format_code, buf);
}
static u32 tcm_loop_get_pr_transport_id_len(
struct se_portal_group *se_tpg,
struct se_node_acl *se_nacl,
struct t10_pr_registration *pr_reg,
int *format_code)
{
struct tcm_loop_tpg *tl_tpg = se_tpg->se_tpg_fabric_ptr;
struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
switch (tl_hba->tl_proto_id) {
case SCSI_PROTOCOL_SAS:
return sas_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
format_code);
case SCSI_PROTOCOL_FCP:
return fc_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
format_code);
case SCSI_PROTOCOL_ISCSI:
return iscsi_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
format_code);
default:
pr_err("Unknown tl_proto_id: 0x%02x, using"
" SAS emulation\n", tl_hba->tl_proto_id);
break;
}
return sas_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
format_code);
}
/*
* Used for handling SCSI fabric dependent TransportIDs in SPC-3 and above
* Persistent Reservation SPEC_I_PT=1 and PROUT REGISTER_AND_MOVE operations.
*/
static char *tcm_loop_parse_pr_out_transport_id(
struct se_portal_group *se_tpg,
const char *buf,
u32 *out_tid_len,
char **port_nexus_ptr)
{
struct tcm_loop_tpg *tl_tpg = se_tpg->se_tpg_fabric_ptr;
struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
switch (tl_hba->tl_proto_id) {
case SCSI_PROTOCOL_SAS:
return sas_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
port_nexus_ptr);
case SCSI_PROTOCOL_FCP:
return fc_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
port_nexus_ptr);
case SCSI_PROTOCOL_ISCSI:
return iscsi_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
port_nexus_ptr);
default:
pr_err("Unknown tl_proto_id: 0x%02x, using"
" SAS emulation\n", tl_hba->tl_proto_id);
break;
}
return sas_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
port_nexus_ptr);
}
/*
* Returning (1) here allows for target_core_mod struct se_node_acl to be generated
* based upon the incoming fabric dependent SCSI Initiator Port
*/
static int tcm_loop_check_demo_mode(struct se_portal_group *se_tpg)
{
return 1;
}
static int tcm_loop_check_demo_mode_cache(struct se_portal_group *se_tpg)
{
return 0;
}
/*
* Allow I_T Nexus full READ-WRITE access without explict Initiator Node ACLs for
* local virtual Linux/SCSI LLD passthrough into VM hypervisor guest
*/
static int tcm_loop_check_demo_mode_write_protect(struct se_portal_group *se_tpg)
{
return 0;
}
/*
* Because TCM_Loop does not use explict ACLs and MappedLUNs, this will
* never be called for TCM_Loop by target_core_fabric_configfs.c code.
* It has been added here as a nop for target_fabric_tf_ops_check()
*/
static int tcm_loop_check_prod_mode_write_protect(struct se_portal_group *se_tpg)
{
return 0;
}
static struct se_node_acl *tcm_loop_tpg_alloc_fabric_acl(
struct se_portal_group *se_tpg)
{
struct tcm_loop_nacl *tl_nacl;
tl_nacl = kzalloc(sizeof(struct tcm_loop_nacl), GFP_KERNEL);
if (!tl_nacl) {
pr_err("Unable to allocate struct tcm_loop_nacl\n");
return NULL;
}
return &tl_nacl->se_node_acl;
}
static void tcm_loop_tpg_release_fabric_acl(
struct se_portal_group *se_tpg,
struct se_node_acl *se_nacl)
{
struct tcm_loop_nacl *tl_nacl = container_of(se_nacl,
struct tcm_loop_nacl, se_node_acl);
kfree(tl_nacl);
}
static u32 tcm_loop_get_inst_index(struct se_portal_group *se_tpg)
{
return 1;
}
static u32 tcm_loop_sess_get_index(struct se_session *se_sess)
{
return 1;
}
static void tcm_loop_set_default_node_attributes(struct se_node_acl *se_acl)
{
return;
}
static u32 tcm_loop_get_task_tag(struct se_cmd *se_cmd)
{
struct tcm_loop_cmd *tl_cmd = container_of(se_cmd,
struct tcm_loop_cmd, tl_se_cmd);
return tl_cmd->sc_cmd_tag;
}
static int tcm_loop_get_cmd_state(struct se_cmd *se_cmd)
{
struct tcm_loop_cmd *tl_cmd = container_of(se_cmd,
struct tcm_loop_cmd, tl_se_cmd);
return tl_cmd->sc_cmd_state;
}
static int tcm_loop_shutdown_session(struct se_session *se_sess)
{
return 0;
}
static void tcm_loop_close_session(struct se_session *se_sess)
{
return;
};
static int tcm_loop_write_pending(struct se_cmd *se_cmd)
{
/*
* Since Linux/SCSI has already sent down a struct scsi_cmnd
* sc->sc_data_direction of DMA_TO_DEVICE with struct scatterlist array
* memory, and memory has already been mapped to struct se_cmd->t_mem_list
* format with transport_generic_map_mem_to_cmd().
*
* We now tell TCM to add this WRITE CDB directly into the TCM storage
* object execution queue.
*/
target_execute_cmd(se_cmd);
return 0;
}
static int tcm_loop_write_pending_status(struct se_cmd *se_cmd)
{
return 0;
}
static int tcm_loop_queue_data_in(struct se_cmd *se_cmd)
{
struct tcm_loop_cmd *tl_cmd = container_of(se_cmd,
struct tcm_loop_cmd, tl_se_cmd);
struct scsi_cmnd *sc = tl_cmd->sc;
pr_debug("tcm_loop_queue_data_in() called for scsi_cmnd: %p"
" cdb: 0x%02x\n", sc, sc->cmnd[0]);
sc->result = SAM_STAT_GOOD;
set_host_byte(sc, DID_OK);
if ((se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) ||
(se_cmd->se_cmd_flags & SCF_UNDERFLOW_BIT))
scsi_set_resid(sc, se_cmd->residual_count);
sc->scsi_done(sc);
return 0;
}
static int tcm_loop_queue_status(struct se_cmd *se_cmd)
{
struct tcm_loop_cmd *tl_cmd = container_of(se_cmd,
struct tcm_loop_cmd, tl_se_cmd);
struct scsi_cmnd *sc = tl_cmd->sc;
pr_debug("tcm_loop_queue_status() called for scsi_cmnd: %p"
" cdb: 0x%02x\n", sc, sc->cmnd[0]);
if (se_cmd->sense_buffer &&
((se_cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) ||
(se_cmd->se_cmd_flags & SCF_EMULATED_TASK_SENSE))) {
memcpy(sc->sense_buffer, se_cmd->sense_buffer,
SCSI_SENSE_BUFFERSIZE);
sc->result = SAM_STAT_CHECK_CONDITION;
set_driver_byte(sc, DRIVER_SENSE);
} else
sc->result = se_cmd->scsi_status;
set_host_byte(sc, DID_OK);
if ((se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) ||
(se_cmd->se_cmd_flags & SCF_UNDERFLOW_BIT))
scsi_set_resid(sc, se_cmd->residual_count);
sc->scsi_done(sc);
return 0;
}
static void tcm_loop_queue_tm_rsp(struct se_cmd *se_cmd)
{
struct se_tmr_req *se_tmr = se_cmd->se_tmr_req;
struct tcm_loop_tmr *tl_tmr = se_tmr->fabric_tmr_ptr;
/*
* The SCSI EH thread will be sleeping on se_tmr->tl_tmr_wait, go ahead
* and wake up the wait_queue_head_t in tcm_loop_device_reset()
*/
atomic_set(&tl_tmr->tmr_complete, 1);
wake_up(&tl_tmr->tl_tmr_wait);
}
static char *tcm_loop_dump_proto_id(struct tcm_loop_hba *tl_hba)
{
switch (tl_hba->tl_proto_id) {
case SCSI_PROTOCOL_SAS:
return "SAS";
case SCSI_PROTOCOL_FCP:
return "FCP";
case SCSI_PROTOCOL_ISCSI:
return "iSCSI";
default:
break;
}
return "Unknown";
}
/* Start items for tcm_loop_port_cit */
static int tcm_loop_port_link(
struct se_portal_group *se_tpg,
struct se_lun *lun)
{
struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
struct tcm_loop_tpg, tl_se_tpg);
struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
atomic_inc(&tl_tpg->tl_tpg_port_count);
smp_mb__after_atomic_inc();
/*
* Add Linux/SCSI struct scsi_device by HCTL
*/
scsi_add_device(tl_hba->sh, 0, tl_tpg->tl_tpgt, lun->unpacked_lun);
pr_debug("TCM_Loop_ConfigFS: Port Link Successful\n");
return 0;
}
static void tcm_loop_port_unlink(
struct se_portal_group *se_tpg,
struct se_lun *se_lun)
{
struct scsi_device *sd;
struct tcm_loop_hba *tl_hba;
struct tcm_loop_tpg *tl_tpg;
tl_tpg = container_of(se_tpg, struct tcm_loop_tpg, tl_se_tpg);
tl_hba = tl_tpg->tl_hba;
sd = scsi_device_lookup(tl_hba->sh, 0, tl_tpg->tl_tpgt,
se_lun->unpacked_lun);
if (!sd) {
pr_err("Unable to locate struct scsi_device for %d:%d:"
"%d\n", 0, tl_tpg->tl_tpgt, se_lun->unpacked_lun);
return;
}
/*
* Remove Linux/SCSI struct scsi_device by HCTL
*/
scsi_remove_device(sd);
scsi_device_put(sd);
atomic_dec(&tl_tpg->tl_tpg_port_count);
smp_mb__after_atomic_dec();
pr_debug("TCM_Loop_ConfigFS: Port Unlink Successful\n");
}
/* End items for tcm_loop_port_cit */
/* Start items for tcm_loop_nexus_cit */
static int tcm_loop_make_nexus(
struct tcm_loop_tpg *tl_tpg,
const char *name)
{
struct se_portal_group *se_tpg;
struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
struct tcm_loop_nexus *tl_nexus;
int ret = -ENOMEM;
if (tl_tpg->tl_hba->tl_nexus) {
pr_debug("tl_tpg->tl_hba->tl_nexus already exists\n");
return -EEXIST;
}
se_tpg = &tl_tpg->tl_se_tpg;
tl_nexus = kzalloc(sizeof(struct tcm_loop_nexus), GFP_KERNEL);
if (!tl_nexus) {
pr_err("Unable to allocate struct tcm_loop_nexus\n");
return -ENOMEM;
}
/*
* Initialize the struct se_session pointer
*/
tl_nexus->se_sess = transport_init_session();
if (IS_ERR(tl_nexus->se_sess)) {
ret = PTR_ERR(tl_nexus->se_sess);
goto out;
}
/*
* Since we are running in 'demo mode' this call with generate a
* struct se_node_acl for the tcm_loop struct se_portal_group with the SCSI
* Initiator port name of the passed configfs group 'name'.
*/
tl_nexus->se_sess->se_node_acl = core_tpg_check_initiator_node_acl(
se_tpg, (unsigned char *)name);
if (!tl_nexus->se_sess->se_node_acl) {
transport_free_session(tl_nexus->se_sess);
goto out;
}
/*
* Now, register the SAS I_T Nexus as active with the call to
* transport_register_session()
*/
__transport_register_session(se_tpg, tl_nexus->se_sess->se_node_acl,
tl_nexus->se_sess, tl_nexus);
tl_tpg->tl_hba->tl_nexus = tl_nexus;
pr_debug("TCM_Loop_ConfigFS: Established I_T Nexus to emulated"
" %s Initiator Port: %s\n", tcm_loop_dump_proto_id(tl_hba),
name);
return 0;
out:
kfree(tl_nexus);
return ret;
}
static int tcm_loop_drop_nexus(
struct tcm_loop_tpg *tpg)
{
struct se_session *se_sess;
struct tcm_loop_nexus *tl_nexus;
struct tcm_loop_hba *tl_hba = tpg->tl_hba;
if (!tl_hba)
return -ENODEV;
tl_nexus = tl_hba->tl_nexus;
if (!tl_nexus)
return -ENODEV;
se_sess = tl_nexus->se_sess;
if (!se_sess)
return -ENODEV;
if (atomic_read(&tpg->tl_tpg_port_count)) {
pr_err("Unable to remove TCM_Loop I_T Nexus with"
" active TPG port count: %d\n",
atomic_read(&tpg->tl_tpg_port_count));
return -EPERM;
}
pr_debug("TCM_Loop_ConfigFS: Removing I_T Nexus to emulated"
" %s Initiator Port: %s\n", tcm_loop_dump_proto_id(tl_hba),
tl_nexus->se_sess->se_node_acl->initiatorname);
/*
* Release the SCSI I_T Nexus to the emulated SAS Target Port
*/
transport_deregister_session(tl_nexus->se_sess);
tpg->tl_hba->tl_nexus = NULL;
kfree(tl_nexus);
return 0;
}
/* End items for tcm_loop_nexus_cit */
static ssize_t tcm_loop_tpg_show_nexus(
struct se_portal_group *se_tpg,
char *page)
{
struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
struct tcm_loop_tpg, tl_se_tpg);
struct tcm_loop_nexus *tl_nexus;
ssize_t ret;
tl_nexus = tl_tpg->tl_hba->tl_nexus;
if (!tl_nexus)
return -ENODEV;
ret = snprintf(page, PAGE_SIZE, "%s\n",
tl_nexus->se_sess->se_node_acl->initiatorname);
return ret;
}
static ssize_t tcm_loop_tpg_store_nexus(
struct se_portal_group *se_tpg,
const char *page,
size_t count)
{
struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
struct tcm_loop_tpg, tl_se_tpg);
struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
unsigned char i_port[TL_WWN_ADDR_LEN], *ptr, *port_ptr;
int ret;
/*
* Shutdown the active I_T nexus if 'NULL' is passed..
*/
if (!strncmp(page, "NULL", 4)) {
ret = tcm_loop_drop_nexus(tl_tpg);
return (!ret) ? count : ret;
}
/*
* Otherwise make sure the passed virtual Initiator port WWN matches
* the fabric protocol_id set in tcm_loop_make_scsi_hba(), and call
* tcm_loop_make_nexus()
*/
if (strlen(page) >= TL_WWN_ADDR_LEN) {
pr_err("Emulated NAA Sas Address: %s, exceeds"
" max: %d\n", page, TL_WWN_ADDR_LEN);
return -EINVAL;
}
snprintf(&i_port[0], TL_WWN_ADDR_LEN, "%s", page);
ptr = strstr(i_port, "naa.");
if (ptr) {
if (tl_hba->tl_proto_id != SCSI_PROTOCOL_SAS) {
pr_err("Passed SAS Initiator Port %s does not"
" match target port protoid: %s\n", i_port,
tcm_loop_dump_proto_id(tl_hba));
return -EINVAL;
}
port_ptr = &i_port[0];
goto check_newline;
}
ptr = strstr(i_port, "fc.");
if (ptr) {
if (tl_hba->tl_proto_id != SCSI_PROTOCOL_FCP) {
pr_err("Passed FCP Initiator Port %s does not"
" match target port protoid: %s\n", i_port,
tcm_loop_dump_proto_id(tl_hba));
return -EINVAL;
}
port_ptr = &i_port[3]; /* Skip over "fc." */
goto check_newline;
}
ptr = strstr(i_port, "iqn.");
if (ptr) {
if (tl_hba->tl_proto_id != SCSI_PROTOCOL_ISCSI) {
pr_err("Passed iSCSI Initiator Port %s does not"
" match target port protoid: %s\n", i_port,
tcm_loop_dump_proto_id(tl_hba));
return -EINVAL;
}
port_ptr = &i_port[0];
goto check_newline;
}
pr_err("Unable to locate prefix for emulated Initiator Port:"
" %s\n", i_port);
return -EINVAL;
/*
* Clear any trailing newline for the NAA WWN
*/
check_newline:
if (i_port[strlen(i_port)-1] == '\n')
i_port[strlen(i_port)-1] = '\0';
ret = tcm_loop_make_nexus(tl_tpg, port_ptr);
if (ret < 0)
return ret;
return count;
}
TF_TPG_BASE_ATTR(tcm_loop, nexus, S_IRUGO | S_IWUSR);
static ssize_t tcm_loop_tpg_show_transport_status(
struct se_portal_group *se_tpg,
char *page)
{
struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
struct tcm_loop_tpg, tl_se_tpg);
const char *status = NULL;
ssize_t ret = -EINVAL;
switch (tl_tpg->tl_transport_status) {
case TCM_TRANSPORT_ONLINE:
status = "online";
break;
case TCM_TRANSPORT_OFFLINE:
status = "offline";
break;
default:
break;
}
if (status)
ret = snprintf(page, PAGE_SIZE, "%s\n", status);
return ret;
}
static ssize_t tcm_loop_tpg_store_transport_status(
struct se_portal_group *se_tpg,
const char *page,
size_t count)
{
struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
struct tcm_loop_tpg, tl_se_tpg);
if (!strncmp(page, "online", 6)) {
tl_tpg->tl_transport_status = TCM_TRANSPORT_ONLINE;
return count;
}
if (!strncmp(page, "offline", 7)) {
tl_tpg->tl_transport_status = TCM_TRANSPORT_OFFLINE;
return count;
}
return -EINVAL;
}
TF_TPG_BASE_ATTR(tcm_loop, transport_status, S_IRUGO | S_IWUSR);
static struct configfs_attribute *tcm_loop_tpg_attrs[] = {
&tcm_loop_tpg_nexus.attr,
&tcm_loop_tpg_transport_status.attr,
NULL,
};
/* Start items for tcm_loop_naa_cit */
static struct se_portal_group *tcm_loop_make_naa_tpg(
struct se_wwn *wwn,
struct config_group *group,
const char *name)
{
struct tcm_loop_hba *tl_hba = container_of(wwn,
struct tcm_loop_hba, tl_hba_wwn);
struct tcm_loop_tpg *tl_tpg;
char *tpgt_str, *end_ptr;
int ret;
unsigned short int tpgt;
tpgt_str = strstr(name, "tpgt_");
if (!tpgt_str) {
pr_err("Unable to locate \"tpgt_#\" directory"
" group\n");
return ERR_PTR(-EINVAL);
}
tpgt_str += 5; /* Skip ahead of "tpgt_" */
tpgt = (unsigned short int) simple_strtoul(tpgt_str, &end_ptr, 0);
if (tpgt >= TL_TPGS_PER_HBA) {
pr_err("Passed tpgt: %hu exceeds TL_TPGS_PER_HBA:"
" %u\n", tpgt, TL_TPGS_PER_HBA);
return ERR_PTR(-EINVAL);
}
tl_tpg = &tl_hba->tl_hba_tpgs[tpgt];
tl_tpg->tl_hba = tl_hba;
tl_tpg->tl_tpgt = tpgt;
/*
* Register the tl_tpg as a emulated SAS TCM Target Endpoint
*/
ret = core_tpg_register(&tcm_loop_fabric_configfs->tf_ops,
wwn, &tl_tpg->tl_se_tpg, tl_tpg,
TRANSPORT_TPG_TYPE_NORMAL);
if (ret < 0)
return ERR_PTR(-ENOMEM);
pr_debug("TCM_Loop_ConfigFS: Allocated Emulated %s"
" Target Port %s,t,0x%04x\n", tcm_loop_dump_proto_id(tl_hba),
config_item_name(&wwn->wwn_group.cg_item), tpgt);
return &tl_tpg->tl_se_tpg;
}
static void tcm_loop_drop_naa_tpg(
struct se_portal_group *se_tpg)
{
struct se_wwn *wwn = se_tpg->se_tpg_wwn;
struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
struct tcm_loop_tpg, tl_se_tpg);
struct tcm_loop_hba *tl_hba;
unsigned short tpgt;
tl_hba = tl_tpg->tl_hba;
tpgt = tl_tpg->tl_tpgt;
/*
* Release the I_T Nexus for the Virtual SAS link if present
*/
tcm_loop_drop_nexus(tl_tpg);
/*
* Deregister the tl_tpg as a emulated SAS TCM Target Endpoint
*/
core_tpg_deregister(se_tpg);
tl_tpg->tl_hba = NULL;
tl_tpg->tl_tpgt = 0;
pr_debug("TCM_Loop_ConfigFS: Deallocated Emulated %s"
" Target Port %s,t,0x%04x\n", tcm_loop_dump_proto_id(tl_hba),
config_item_name(&wwn->wwn_group.cg_item), tpgt);
}
/* End items for tcm_loop_naa_cit */
/* Start items for tcm_loop_cit */
static struct se_wwn *tcm_loop_make_scsi_hba(
struct target_fabric_configfs *tf,
struct config_group *group,
const char *name)
{
struct tcm_loop_hba *tl_hba;
struct Scsi_Host *sh;
char *ptr;
int ret, off = 0;
tl_hba = kzalloc(sizeof(struct tcm_loop_hba), GFP_KERNEL);
if (!tl_hba) {
pr_err("Unable to allocate struct tcm_loop_hba\n");
return ERR_PTR(-ENOMEM);
}
/*
* Determine the emulated Protocol Identifier and Target Port Name
* based on the incoming configfs directory name.
*/
ptr = strstr(name, "naa.");
if (ptr) {
tl_hba->tl_proto_id = SCSI_PROTOCOL_SAS;
goto check_len;
}
ptr = strstr(name, "fc.");
if (ptr) {
tl_hba->tl_proto_id = SCSI_PROTOCOL_FCP;
off = 3; /* Skip over "fc." */
goto check_len;
}
ptr = strstr(name, "iqn.");
if (!ptr) {
pr_err("Unable to locate prefix for emulated Target "
"Port: %s\n", name);
ret = -EINVAL;
goto out;
}
tl_hba->tl_proto_id = SCSI_PROTOCOL_ISCSI;
check_len:
if (strlen(name) >= TL_WWN_ADDR_LEN) {
pr_err("Emulated NAA %s Address: %s, exceeds"
" max: %d\n", name, tcm_loop_dump_proto_id(tl_hba),
TL_WWN_ADDR_LEN);
ret = -EINVAL;
goto out;
}
snprintf(&tl_hba->tl_wwn_address[0], TL_WWN_ADDR_LEN, "%s", &name[off]);
/*
* Call device_register(tl_hba->dev) to register the emulated
* Linux/SCSI LLD of type struct Scsi_Host at tl_hba->sh after
* device_register() callbacks in tcm_loop_driver_probe()
*/
ret = tcm_loop_setup_hba_bus(tl_hba, tcm_loop_hba_no_cnt);
if (ret)
goto out;
sh = tl_hba->sh;
tcm_loop_hba_no_cnt++;
pr_debug("TCM_Loop_ConfigFS: Allocated emulated Target"
" %s Address: %s at Linux/SCSI Host ID: %d\n",
tcm_loop_dump_proto_id(tl_hba), name, sh->host_no);
return &tl_hba->tl_hba_wwn;
out:
kfree(tl_hba);
return ERR_PTR(ret);
}
static void tcm_loop_drop_scsi_hba(
struct se_wwn *wwn)
{
struct tcm_loop_hba *tl_hba = container_of(wwn,
struct tcm_loop_hba, tl_hba_wwn);
pr_debug("TCM_Loop_ConfigFS: Deallocating emulated Target"
" SAS Address: %s at Linux/SCSI Host ID: %d\n",
tl_hba->tl_wwn_address, tl_hba->sh->host_no);
/*
* Call device_unregister() on the original tl_hba->dev.
* tcm_loop_fabric_scsi.c:tcm_loop_release_adapter() will
* release *tl_hba;
*/
device_unregister(&tl_hba->dev);
}
/* Start items for tcm_loop_cit */
static ssize_t tcm_loop_wwn_show_attr_version(
struct target_fabric_configfs *tf,
char *page)
{
return sprintf(page, "TCM Loopback Fabric module %s\n", TCM_LOOP_VERSION);
}
TF_WWN_ATTR_RO(tcm_loop, version);
static struct configfs_attribute *tcm_loop_wwn_attrs[] = {
&tcm_loop_wwn_version.attr,
NULL,
};
/* End items for tcm_loop_cit */
static int tcm_loop_register_configfs(void)
{
struct target_fabric_configfs *fabric;
int ret;
/*
* Set the TCM Loop HBA counter to zero
*/
tcm_loop_hba_no_cnt = 0;
/*
* Register the top level struct config_item_type with TCM core
*/
fabric = target_fabric_configfs_init(THIS_MODULE, "loopback");
if (IS_ERR(fabric)) {
pr_err("tcm_loop_register_configfs() failed!\n");
return PTR_ERR(fabric);
}
/*
* Setup the fabric API of function pointers used by target_core_mod
*/
fabric->tf_ops.get_fabric_name = &tcm_loop_get_fabric_name;
fabric->tf_ops.get_fabric_proto_ident = &tcm_loop_get_fabric_proto_ident;
fabric->tf_ops.tpg_get_wwn = &tcm_loop_get_endpoint_wwn;
fabric->tf_ops.tpg_get_tag = &tcm_loop_get_tag;
fabric->tf_ops.tpg_get_default_depth = &tcm_loop_get_default_depth;
fabric->tf_ops.tpg_get_pr_transport_id = &tcm_loop_get_pr_transport_id;
fabric->tf_ops.tpg_get_pr_transport_id_len =
&tcm_loop_get_pr_transport_id_len;
fabric->tf_ops.tpg_parse_pr_out_transport_id =
&tcm_loop_parse_pr_out_transport_id;
fabric->tf_ops.tpg_check_demo_mode = &tcm_loop_check_demo_mode;
fabric->tf_ops.tpg_check_demo_mode_cache =
&tcm_loop_check_demo_mode_cache;
fabric->tf_ops.tpg_check_demo_mode_write_protect =
&tcm_loop_check_demo_mode_write_protect;
fabric->tf_ops.tpg_check_prod_mode_write_protect =
&tcm_loop_check_prod_mode_write_protect;
/*
* The TCM loopback fabric module runs in demo-mode to a local
* virtual SCSI device, so fabric dependent initator ACLs are
* not required.
*/
fabric->tf_ops.tpg_alloc_fabric_acl = &tcm_loop_tpg_alloc_fabric_acl;
fabric->tf_ops.tpg_release_fabric_acl =
&tcm_loop_tpg_release_fabric_acl;
fabric->tf_ops.tpg_get_inst_index = &tcm_loop_get_inst_index;
/*
* Used for setting up remaining TCM resources in process context
*/
fabric->tf_ops.check_stop_free = &tcm_loop_check_stop_free;
fabric->tf_ops.release_cmd = &tcm_loop_release_cmd;
fabric->tf_ops.shutdown_session = &tcm_loop_shutdown_session;
fabric->tf_ops.close_session = &tcm_loop_close_session;
fabric->tf_ops.sess_get_index = &tcm_loop_sess_get_index;
fabric->tf_ops.sess_get_initiator_sid = NULL;
fabric->tf_ops.write_pending = &tcm_loop_write_pending;
fabric->tf_ops.write_pending_status = &tcm_loop_write_pending_status;
/*
* Not used for TCM loopback
*/
fabric->tf_ops.set_default_node_attributes =
&tcm_loop_set_default_node_attributes;
fabric->tf_ops.get_task_tag = &tcm_loop_get_task_tag;
fabric->tf_ops.get_cmd_state = &tcm_loop_get_cmd_state;
fabric->tf_ops.queue_data_in = &tcm_loop_queue_data_in;
fabric->tf_ops.queue_status = &tcm_loop_queue_status;
fabric->tf_ops.queue_tm_rsp = &tcm_loop_queue_tm_rsp;
/*
* Setup function pointers for generic logic in target_core_fabric_configfs.c
*/
fabric->tf_ops.fabric_make_wwn = &tcm_loop_make_scsi_hba;
fabric->tf_ops.fabric_drop_wwn = &tcm_loop_drop_scsi_hba;
fabric->tf_ops.fabric_make_tpg = &tcm_loop_make_naa_tpg;
fabric->tf_ops.fabric_drop_tpg = &tcm_loop_drop_naa_tpg;
/*
* fabric_post_link() and fabric_pre_unlink() are used for
* registration and release of TCM Loop Virtual SCSI LUNs.
*/
fabric->tf_ops.fabric_post_link = &tcm_loop_port_link;
fabric->tf_ops.fabric_pre_unlink = &tcm_loop_port_unlink;
fabric->tf_ops.fabric_make_np = NULL;
fabric->tf_ops.fabric_drop_np = NULL;
/*
* Setup default attribute lists for various fabric->tf_cit_tmpl
*/
fabric->tf_cit_tmpl.tfc_wwn_cit.ct_attrs = tcm_loop_wwn_attrs;
fabric->tf_cit_tmpl.tfc_tpg_base_cit.ct_attrs = tcm_loop_tpg_attrs;
fabric->tf_cit_tmpl.tfc_tpg_attrib_cit.ct_attrs = NULL;
fabric->tf_cit_tmpl.tfc_tpg_param_cit.ct_attrs = NULL;
fabric->tf_cit_tmpl.tfc_tpg_np_base_cit.ct_attrs = NULL;
/*
* Once fabric->tf_ops has been setup, now register the fabric for
* use within TCM
*/
ret = target_fabric_configfs_register(fabric);
if (ret < 0) {
pr_err("target_fabric_configfs_register() for"
" TCM_Loop failed!\n");
target_fabric_configfs_free(fabric);
return -1;
}
/*
* Setup our local pointer to *fabric.
*/
tcm_loop_fabric_configfs = fabric;
pr_debug("TCM_LOOP[0] - Set fabric ->"
" tcm_loop_fabric_configfs\n");
return 0;
}
static void tcm_loop_deregister_configfs(void)
{
if (!tcm_loop_fabric_configfs)
return;
target_fabric_configfs_deregister(tcm_loop_fabric_configfs);
tcm_loop_fabric_configfs = NULL;
pr_debug("TCM_LOOP[0] - Cleared"
" tcm_loop_fabric_configfs\n");
}
static int __init tcm_loop_fabric_init(void)
{
int ret = -ENOMEM;
tcm_loop_workqueue = alloc_workqueue("tcm_loop", 0, 0);
if (!tcm_loop_workqueue)
goto out;
tcm_loop_cmd_cache = kmem_cache_create("tcm_loop_cmd_cache",
sizeof(struct tcm_loop_cmd),
__alignof__(struct tcm_loop_cmd),
0, NULL);
if (!tcm_loop_cmd_cache) {
pr_debug("kmem_cache_create() for"
" tcm_loop_cmd_cache failed\n");
goto out_destroy_workqueue;
}
ret = tcm_loop_alloc_core_bus();
if (ret)
goto out_destroy_cache;
ret = tcm_loop_register_configfs();
if (ret)
goto out_release_core_bus;
return 0;
out_release_core_bus:
tcm_loop_release_core_bus();
out_destroy_cache:
kmem_cache_destroy(tcm_loop_cmd_cache);
out_destroy_workqueue:
destroy_workqueue(tcm_loop_workqueue);
out:
return ret;
}
static void __exit tcm_loop_fabric_exit(void)
{
tcm_loop_deregister_configfs();
tcm_loop_release_core_bus();
kmem_cache_destroy(tcm_loop_cmd_cache);
destroy_workqueue(tcm_loop_workqueue);
}
MODULE_DESCRIPTION("TCM loopback virtual Linux/SCSI fabric module");
MODULE_AUTHOR("Nicholas A. Bellinger <nab@risingtidesystems.com>");
MODULE_LICENSE("GPL");
module_init(tcm_loop_fabric_init);
module_exit(tcm_loop_fabric_exit);
| gpl-2.0 |
Lihis/android_huawei_kernel | drivers/staging/msm/mdp_hw_init.c | 433 | 24965 | /* Copyright (c) 2008-2009, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include "mdp.h"
/* mdp primary csc limit vector */
uint32 mdp_plv[] = { 0x10, 0xeb, 0x10, 0xf0 };
/* Color Coefficient matrix for YUV -> RGB */
struct mdp_ccs mdp_ccs_yuv2rgb = {
MDP_CCS_YUV2RGB,
{
0x254,
0x000,
0x331,
0x254,
0xff38,
0xfe61,
0x254,
0x409,
0x000,
},
{
#ifdef CONFIG_FB_MSM_MDP31
0x1f0,
0x180,
0x180
#else
0x10,
0x80,
0x80
#endif
}
};
/* Color Coefficient matrix for RGB -> YUV */
struct mdp_ccs mdp_ccs_rgb2yuv = {
MDP_CCS_RGB2YUV,
{
0x83,
0x102,
0x32,
0xffb5,
0xff6c,
0xe1,
0xe1,
0xff45,
0xffdc,
},
#ifdef CONFIG_FB_MSM_MDP31
{
0x10,
0x80,
0x80
}
#endif
};
static void mdp_load_lut_param(void)
{
outpdw(MDP_BASE + 0x40800, 0x0);
outpdw(MDP_BASE + 0x40804, 0x151515);
outpdw(MDP_BASE + 0x40808, 0x1d1d1d);
outpdw(MDP_BASE + 0x4080c, 0x232323);
outpdw(MDP_BASE + 0x40810, 0x272727);
outpdw(MDP_BASE + 0x40814, 0x2b2b2b);
outpdw(MDP_BASE + 0x40818, 0x2f2f2f);
outpdw(MDP_BASE + 0x4081c, 0x333333);
outpdw(MDP_BASE + 0x40820, 0x363636);
outpdw(MDP_BASE + 0x40824, 0x393939);
outpdw(MDP_BASE + 0x40828, 0x3b3b3b);
outpdw(MDP_BASE + 0x4082c, 0x3e3e3e);
outpdw(MDP_BASE + 0x40830, 0x404040);
outpdw(MDP_BASE + 0x40834, 0x434343);
outpdw(MDP_BASE + 0x40838, 0x454545);
outpdw(MDP_BASE + 0x4083c, 0x474747);
outpdw(MDP_BASE + 0x40840, 0x494949);
outpdw(MDP_BASE + 0x40844, 0x4b4b4b);
outpdw(MDP_BASE + 0x40848, 0x4d4d4d);
outpdw(MDP_BASE + 0x4084c, 0x4f4f4f);
outpdw(MDP_BASE + 0x40850, 0x515151);
outpdw(MDP_BASE + 0x40854, 0x535353);
outpdw(MDP_BASE + 0x40858, 0x555555);
outpdw(MDP_BASE + 0x4085c, 0x565656);
outpdw(MDP_BASE + 0x40860, 0x585858);
outpdw(MDP_BASE + 0x40864, 0x5a5a5a);
outpdw(MDP_BASE + 0x40868, 0x5b5b5b);
outpdw(MDP_BASE + 0x4086c, 0x5d5d5d);
outpdw(MDP_BASE + 0x40870, 0x5e5e5e);
outpdw(MDP_BASE + 0x40874, 0x606060);
outpdw(MDP_BASE + 0x40878, 0x616161);
outpdw(MDP_BASE + 0x4087c, 0x636363);
outpdw(MDP_BASE + 0x40880, 0x646464);
outpdw(MDP_BASE + 0x40884, 0x666666);
outpdw(MDP_BASE + 0x40888, 0x676767);
outpdw(MDP_BASE + 0x4088c, 0x686868);
outpdw(MDP_BASE + 0x40890, 0x6a6a6a);
outpdw(MDP_BASE + 0x40894, 0x6b6b6b);
outpdw(MDP_BASE + 0x40898, 0x6c6c6c);
outpdw(MDP_BASE + 0x4089c, 0x6e6e6e);
outpdw(MDP_BASE + 0x408a0, 0x6f6f6f);
outpdw(MDP_BASE + 0x408a4, 0x707070);
outpdw(MDP_BASE + 0x408a8, 0x717171);
outpdw(MDP_BASE + 0x408ac, 0x727272);
outpdw(MDP_BASE + 0x408b0, 0x747474);
outpdw(MDP_BASE + 0x408b4, 0x757575);
outpdw(MDP_BASE + 0x408b8, 0x767676);
outpdw(MDP_BASE + 0x408bc, 0x777777);
outpdw(MDP_BASE + 0x408c0, 0x787878);
outpdw(MDP_BASE + 0x408c4, 0x797979);
outpdw(MDP_BASE + 0x408c8, 0x7a7a7a);
outpdw(MDP_BASE + 0x408cc, 0x7c7c7c);
outpdw(MDP_BASE + 0x408d0, 0x7d7d7d);
outpdw(MDP_BASE + 0x408d4, 0x7e7e7e);
outpdw(MDP_BASE + 0x408d8, 0x7f7f7f);
outpdw(MDP_BASE + 0x408dc, 0x808080);
outpdw(MDP_BASE + 0x408e0, 0x818181);
outpdw(MDP_BASE + 0x408e4, 0x828282);
outpdw(MDP_BASE + 0x408e8, 0x838383);
outpdw(MDP_BASE + 0x408ec, 0x848484);
outpdw(MDP_BASE + 0x408f0, 0x858585);
outpdw(MDP_BASE + 0x408f4, 0x868686);
outpdw(MDP_BASE + 0x408f8, 0x878787);
outpdw(MDP_BASE + 0x408fc, 0x888888);
outpdw(MDP_BASE + 0x40900, 0x898989);
outpdw(MDP_BASE + 0x40904, 0x8a8a8a);
outpdw(MDP_BASE + 0x40908, 0x8b8b8b);
outpdw(MDP_BASE + 0x4090c, 0x8c8c8c);
outpdw(MDP_BASE + 0x40910, 0x8d8d8d);
outpdw(MDP_BASE + 0x40914, 0x8e8e8e);
outpdw(MDP_BASE + 0x40918, 0x8f8f8f);
outpdw(MDP_BASE + 0x4091c, 0x8f8f8f);
outpdw(MDP_BASE + 0x40920, 0x909090);
outpdw(MDP_BASE + 0x40924, 0x919191);
outpdw(MDP_BASE + 0x40928, 0x929292);
outpdw(MDP_BASE + 0x4092c, 0x939393);
outpdw(MDP_BASE + 0x40930, 0x949494);
outpdw(MDP_BASE + 0x40934, 0x959595);
outpdw(MDP_BASE + 0x40938, 0x969696);
outpdw(MDP_BASE + 0x4093c, 0x969696);
outpdw(MDP_BASE + 0x40940, 0x979797);
outpdw(MDP_BASE + 0x40944, 0x989898);
outpdw(MDP_BASE + 0x40948, 0x999999);
outpdw(MDP_BASE + 0x4094c, 0x9a9a9a);
outpdw(MDP_BASE + 0x40950, 0x9b9b9b);
outpdw(MDP_BASE + 0x40954, 0x9c9c9c);
outpdw(MDP_BASE + 0x40958, 0x9c9c9c);
outpdw(MDP_BASE + 0x4095c, 0x9d9d9d);
outpdw(MDP_BASE + 0x40960, 0x9e9e9e);
outpdw(MDP_BASE + 0x40964, 0x9f9f9f);
outpdw(MDP_BASE + 0x40968, 0xa0a0a0);
outpdw(MDP_BASE + 0x4096c, 0xa0a0a0);
outpdw(MDP_BASE + 0x40970, 0xa1a1a1);
outpdw(MDP_BASE + 0x40974, 0xa2a2a2);
outpdw(MDP_BASE + 0x40978, 0xa3a3a3);
outpdw(MDP_BASE + 0x4097c, 0xa4a4a4);
outpdw(MDP_BASE + 0x40980, 0xa4a4a4);
outpdw(MDP_BASE + 0x40984, 0xa5a5a5);
outpdw(MDP_BASE + 0x40988, 0xa6a6a6);
outpdw(MDP_BASE + 0x4098c, 0xa7a7a7);
outpdw(MDP_BASE + 0x40990, 0xa7a7a7);
outpdw(MDP_BASE + 0x40994, 0xa8a8a8);
outpdw(MDP_BASE + 0x40998, 0xa9a9a9);
outpdw(MDP_BASE + 0x4099c, 0xaaaaaa);
outpdw(MDP_BASE + 0x409a0, 0xaaaaaa);
outpdw(MDP_BASE + 0x409a4, 0xababab);
outpdw(MDP_BASE + 0x409a8, 0xacacac);
outpdw(MDP_BASE + 0x409ac, 0xadadad);
outpdw(MDP_BASE + 0x409b0, 0xadadad);
outpdw(MDP_BASE + 0x409b4, 0xaeaeae);
outpdw(MDP_BASE + 0x409b8, 0xafafaf);
outpdw(MDP_BASE + 0x409bc, 0xafafaf);
outpdw(MDP_BASE + 0x409c0, 0xb0b0b0);
outpdw(MDP_BASE + 0x409c4, 0xb1b1b1);
outpdw(MDP_BASE + 0x409c8, 0xb2b2b2);
outpdw(MDP_BASE + 0x409cc, 0xb2b2b2);
outpdw(MDP_BASE + 0x409d0, 0xb3b3b3);
outpdw(MDP_BASE + 0x409d4, 0xb4b4b4);
outpdw(MDP_BASE + 0x409d8, 0xb4b4b4);
outpdw(MDP_BASE + 0x409dc, 0xb5b5b5);
outpdw(MDP_BASE + 0x409e0, 0xb6b6b6);
outpdw(MDP_BASE + 0x409e4, 0xb6b6b6);
outpdw(MDP_BASE + 0x409e8, 0xb7b7b7);
outpdw(MDP_BASE + 0x409ec, 0xb8b8b8);
outpdw(MDP_BASE + 0x409f0, 0xb8b8b8);
outpdw(MDP_BASE + 0x409f4, 0xb9b9b9);
outpdw(MDP_BASE + 0x409f8, 0xbababa);
outpdw(MDP_BASE + 0x409fc, 0xbababa);
outpdw(MDP_BASE + 0x40a00, 0xbbbbbb);
outpdw(MDP_BASE + 0x40a04, 0xbcbcbc);
outpdw(MDP_BASE + 0x40a08, 0xbcbcbc);
outpdw(MDP_BASE + 0x40a0c, 0xbdbdbd);
outpdw(MDP_BASE + 0x40a10, 0xbebebe);
outpdw(MDP_BASE + 0x40a14, 0xbebebe);
outpdw(MDP_BASE + 0x40a18, 0xbfbfbf);
outpdw(MDP_BASE + 0x40a1c, 0xc0c0c0);
outpdw(MDP_BASE + 0x40a20, 0xc0c0c0);
outpdw(MDP_BASE + 0x40a24, 0xc1c1c1);
outpdw(MDP_BASE + 0x40a28, 0xc1c1c1);
outpdw(MDP_BASE + 0x40a2c, 0xc2c2c2);
outpdw(MDP_BASE + 0x40a30, 0xc3c3c3);
outpdw(MDP_BASE + 0x40a34, 0xc3c3c3);
outpdw(MDP_BASE + 0x40a38, 0xc4c4c4);
outpdw(MDP_BASE + 0x40a3c, 0xc5c5c5);
outpdw(MDP_BASE + 0x40a40, 0xc5c5c5);
outpdw(MDP_BASE + 0x40a44, 0xc6c6c6);
outpdw(MDP_BASE + 0x40a48, 0xc6c6c6);
outpdw(MDP_BASE + 0x40a4c, 0xc7c7c7);
outpdw(MDP_BASE + 0x40a50, 0xc8c8c8);
outpdw(MDP_BASE + 0x40a54, 0xc8c8c8);
outpdw(MDP_BASE + 0x40a58, 0xc9c9c9);
outpdw(MDP_BASE + 0x40a5c, 0xc9c9c9);
outpdw(MDP_BASE + 0x40a60, 0xcacaca);
outpdw(MDP_BASE + 0x40a64, 0xcbcbcb);
outpdw(MDP_BASE + 0x40a68, 0xcbcbcb);
outpdw(MDP_BASE + 0x40a6c, 0xcccccc);
outpdw(MDP_BASE + 0x40a70, 0xcccccc);
outpdw(MDP_BASE + 0x40a74, 0xcdcdcd);
outpdw(MDP_BASE + 0x40a78, 0xcecece);
outpdw(MDP_BASE + 0x40a7c, 0xcecece);
outpdw(MDP_BASE + 0x40a80, 0xcfcfcf);
outpdw(MDP_BASE + 0x40a84, 0xcfcfcf);
outpdw(MDP_BASE + 0x40a88, 0xd0d0d0);
outpdw(MDP_BASE + 0x40a8c, 0xd0d0d0);
outpdw(MDP_BASE + 0x40a90, 0xd1d1d1);
outpdw(MDP_BASE + 0x40a94, 0xd2d2d2);
outpdw(MDP_BASE + 0x40a98, 0xd2d2d2);
outpdw(MDP_BASE + 0x40a9c, 0xd3d3d3);
outpdw(MDP_BASE + 0x40aa0, 0xd3d3d3);
outpdw(MDP_BASE + 0x40aa4, 0xd4d4d4);
outpdw(MDP_BASE + 0x40aa8, 0xd4d4d4);
outpdw(MDP_BASE + 0x40aac, 0xd5d5d5);
outpdw(MDP_BASE + 0x40ab0, 0xd6d6d6);
outpdw(MDP_BASE + 0x40ab4, 0xd6d6d6);
outpdw(MDP_BASE + 0x40ab8, 0xd7d7d7);
outpdw(MDP_BASE + 0x40abc, 0xd7d7d7);
outpdw(MDP_BASE + 0x40ac0, 0xd8d8d8);
outpdw(MDP_BASE + 0x40ac4, 0xd8d8d8);
outpdw(MDP_BASE + 0x40ac8, 0xd9d9d9);
outpdw(MDP_BASE + 0x40acc, 0xd9d9d9);
outpdw(MDP_BASE + 0x40ad0, 0xdadada);
outpdw(MDP_BASE + 0x40ad4, 0xdbdbdb);
outpdw(MDP_BASE + 0x40ad8, 0xdbdbdb);
outpdw(MDP_BASE + 0x40adc, 0xdcdcdc);
outpdw(MDP_BASE + 0x40ae0, 0xdcdcdc);
outpdw(MDP_BASE + 0x40ae4, 0xdddddd);
outpdw(MDP_BASE + 0x40ae8, 0xdddddd);
outpdw(MDP_BASE + 0x40aec, 0xdedede);
outpdw(MDP_BASE + 0x40af0, 0xdedede);
outpdw(MDP_BASE + 0x40af4, 0xdfdfdf);
outpdw(MDP_BASE + 0x40af8, 0xdfdfdf);
outpdw(MDP_BASE + 0x40afc, 0xe0e0e0);
outpdw(MDP_BASE + 0x40b00, 0xe0e0e0);
outpdw(MDP_BASE + 0x40b04, 0xe1e1e1);
outpdw(MDP_BASE + 0x40b08, 0xe1e1e1);
outpdw(MDP_BASE + 0x40b0c, 0xe2e2e2);
outpdw(MDP_BASE + 0x40b10, 0xe3e3e3);
outpdw(MDP_BASE + 0x40b14, 0xe3e3e3);
outpdw(MDP_BASE + 0x40b18, 0xe4e4e4);
outpdw(MDP_BASE + 0x40b1c, 0xe4e4e4);
outpdw(MDP_BASE + 0x40b20, 0xe5e5e5);
outpdw(MDP_BASE + 0x40b24, 0xe5e5e5);
outpdw(MDP_BASE + 0x40b28, 0xe6e6e6);
outpdw(MDP_BASE + 0x40b2c, 0xe6e6e6);
outpdw(MDP_BASE + 0x40b30, 0xe7e7e7);
outpdw(MDP_BASE + 0x40b34, 0xe7e7e7);
outpdw(MDP_BASE + 0x40b38, 0xe8e8e8);
outpdw(MDP_BASE + 0x40b3c, 0xe8e8e8);
outpdw(MDP_BASE + 0x40b40, 0xe9e9e9);
outpdw(MDP_BASE + 0x40b44, 0xe9e9e9);
outpdw(MDP_BASE + 0x40b48, 0xeaeaea);
outpdw(MDP_BASE + 0x40b4c, 0xeaeaea);
outpdw(MDP_BASE + 0x40b50, 0xebebeb);
outpdw(MDP_BASE + 0x40b54, 0xebebeb);
outpdw(MDP_BASE + 0x40b58, 0xececec);
outpdw(MDP_BASE + 0x40b5c, 0xececec);
outpdw(MDP_BASE + 0x40b60, 0xededed);
outpdw(MDP_BASE + 0x40b64, 0xededed);
outpdw(MDP_BASE + 0x40b68, 0xeeeeee);
outpdw(MDP_BASE + 0x40b6c, 0xeeeeee);
outpdw(MDP_BASE + 0x40b70, 0xefefef);
outpdw(MDP_BASE + 0x40b74, 0xefefef);
outpdw(MDP_BASE + 0x40b78, 0xf0f0f0);
outpdw(MDP_BASE + 0x40b7c, 0xf0f0f0);
outpdw(MDP_BASE + 0x40b80, 0xf1f1f1);
outpdw(MDP_BASE + 0x40b84, 0xf1f1f1);
outpdw(MDP_BASE + 0x40b88, 0xf2f2f2);
outpdw(MDP_BASE + 0x40b8c, 0xf2f2f2);
outpdw(MDP_BASE + 0x40b90, 0xf2f2f2);
outpdw(MDP_BASE + 0x40b94, 0xf3f3f3);
outpdw(MDP_BASE + 0x40b98, 0xf3f3f3);
outpdw(MDP_BASE + 0x40b9c, 0xf4f4f4);
outpdw(MDP_BASE + 0x40ba0, 0xf4f4f4);
outpdw(MDP_BASE + 0x40ba4, 0xf5f5f5);
outpdw(MDP_BASE + 0x40ba8, 0xf5f5f5);
outpdw(MDP_BASE + 0x40bac, 0xf6f6f6);
outpdw(MDP_BASE + 0x40bb0, 0xf6f6f6);
outpdw(MDP_BASE + 0x40bb4, 0xf7f7f7);
outpdw(MDP_BASE + 0x40bb8, 0xf7f7f7);
outpdw(MDP_BASE + 0x40bbc, 0xf8f8f8);
outpdw(MDP_BASE + 0x40bc0, 0xf8f8f8);
outpdw(MDP_BASE + 0x40bc4, 0xf9f9f9);
outpdw(MDP_BASE + 0x40bc8, 0xf9f9f9);
outpdw(MDP_BASE + 0x40bcc, 0xfafafa);
outpdw(MDP_BASE + 0x40bd0, 0xfafafa);
outpdw(MDP_BASE + 0x40bd4, 0xfafafa);
outpdw(MDP_BASE + 0x40bd8, 0xfbfbfb);
outpdw(MDP_BASE + 0x40bdc, 0xfbfbfb);
outpdw(MDP_BASE + 0x40be0, 0xfcfcfc);
outpdw(MDP_BASE + 0x40be4, 0xfcfcfc);
outpdw(MDP_BASE + 0x40be8, 0xfdfdfd);
outpdw(MDP_BASE + 0x40bec, 0xfdfdfd);
outpdw(MDP_BASE + 0x40bf0, 0xfefefe);
outpdw(MDP_BASE + 0x40bf4, 0xfefefe);
outpdw(MDP_BASE + 0x40bf8, 0xffffff);
outpdw(MDP_BASE + 0x40bfc, 0xffffff);
outpdw(MDP_BASE + 0x40c00, 0x0);
outpdw(MDP_BASE + 0x40c04, 0x0);
outpdw(MDP_BASE + 0x40c08, 0x0);
outpdw(MDP_BASE + 0x40c0c, 0x0);
outpdw(MDP_BASE + 0x40c10, 0x0);
outpdw(MDP_BASE + 0x40c14, 0x0);
outpdw(MDP_BASE + 0x40c18, 0x0);
outpdw(MDP_BASE + 0x40c1c, 0x0);
outpdw(MDP_BASE + 0x40c20, 0x0);
outpdw(MDP_BASE + 0x40c24, 0x0);
outpdw(MDP_BASE + 0x40c28, 0x0);
outpdw(MDP_BASE + 0x40c2c, 0x0);
outpdw(MDP_BASE + 0x40c30, 0x0);
outpdw(MDP_BASE + 0x40c34, 0x0);
outpdw(MDP_BASE + 0x40c38, 0x0);
outpdw(MDP_BASE + 0x40c3c, 0x0);
outpdw(MDP_BASE + 0x40c40, 0x10101);
outpdw(MDP_BASE + 0x40c44, 0x10101);
outpdw(MDP_BASE + 0x40c48, 0x10101);
outpdw(MDP_BASE + 0x40c4c, 0x10101);
outpdw(MDP_BASE + 0x40c50, 0x10101);
outpdw(MDP_BASE + 0x40c54, 0x10101);
outpdw(MDP_BASE + 0x40c58, 0x10101);
outpdw(MDP_BASE + 0x40c5c, 0x10101);
outpdw(MDP_BASE + 0x40c60, 0x10101);
outpdw(MDP_BASE + 0x40c64, 0x10101);
outpdw(MDP_BASE + 0x40c68, 0x20202);
outpdw(MDP_BASE + 0x40c6c, 0x20202);
outpdw(MDP_BASE + 0x40c70, 0x20202);
outpdw(MDP_BASE + 0x40c74, 0x20202);
outpdw(MDP_BASE + 0x40c78, 0x20202);
outpdw(MDP_BASE + 0x40c7c, 0x20202);
outpdw(MDP_BASE + 0x40c80, 0x30303);
outpdw(MDP_BASE + 0x40c84, 0x30303);
outpdw(MDP_BASE + 0x40c88, 0x30303);
outpdw(MDP_BASE + 0x40c8c, 0x30303);
outpdw(MDP_BASE + 0x40c90, 0x30303);
outpdw(MDP_BASE + 0x40c94, 0x40404);
outpdw(MDP_BASE + 0x40c98, 0x40404);
outpdw(MDP_BASE + 0x40c9c, 0x40404);
outpdw(MDP_BASE + 0x40ca0, 0x40404);
outpdw(MDP_BASE + 0x40ca4, 0x40404);
outpdw(MDP_BASE + 0x40ca8, 0x50505);
outpdw(MDP_BASE + 0x40cac, 0x50505);
outpdw(MDP_BASE + 0x40cb0, 0x50505);
outpdw(MDP_BASE + 0x40cb4, 0x50505);
outpdw(MDP_BASE + 0x40cb8, 0x60606);
outpdw(MDP_BASE + 0x40cbc, 0x60606);
outpdw(MDP_BASE + 0x40cc0, 0x60606);
outpdw(MDP_BASE + 0x40cc4, 0x70707);
outpdw(MDP_BASE + 0x40cc8, 0x70707);
outpdw(MDP_BASE + 0x40ccc, 0x70707);
outpdw(MDP_BASE + 0x40cd0, 0x70707);
outpdw(MDP_BASE + 0x40cd4, 0x80808);
outpdw(MDP_BASE + 0x40cd8, 0x80808);
outpdw(MDP_BASE + 0x40cdc, 0x80808);
outpdw(MDP_BASE + 0x40ce0, 0x90909);
outpdw(MDP_BASE + 0x40ce4, 0x90909);
outpdw(MDP_BASE + 0x40ce8, 0xa0a0a);
outpdw(MDP_BASE + 0x40cec, 0xa0a0a);
outpdw(MDP_BASE + 0x40cf0, 0xa0a0a);
outpdw(MDP_BASE + 0x40cf4, 0xb0b0b);
outpdw(MDP_BASE + 0x40cf8, 0xb0b0b);
outpdw(MDP_BASE + 0x40cfc, 0xb0b0b);
outpdw(MDP_BASE + 0x40d00, 0xc0c0c);
outpdw(MDP_BASE + 0x40d04, 0xc0c0c);
outpdw(MDP_BASE + 0x40d08, 0xd0d0d);
outpdw(MDP_BASE + 0x40d0c, 0xd0d0d);
outpdw(MDP_BASE + 0x40d10, 0xe0e0e);
outpdw(MDP_BASE + 0x40d14, 0xe0e0e);
outpdw(MDP_BASE + 0x40d18, 0xe0e0e);
outpdw(MDP_BASE + 0x40d1c, 0xf0f0f);
outpdw(MDP_BASE + 0x40d20, 0xf0f0f);
outpdw(MDP_BASE + 0x40d24, 0x101010);
outpdw(MDP_BASE + 0x40d28, 0x101010);
outpdw(MDP_BASE + 0x40d2c, 0x111111);
outpdw(MDP_BASE + 0x40d30, 0x111111);
outpdw(MDP_BASE + 0x40d34, 0x121212);
outpdw(MDP_BASE + 0x40d38, 0x121212);
outpdw(MDP_BASE + 0x40d3c, 0x131313);
outpdw(MDP_BASE + 0x40d40, 0x131313);
outpdw(MDP_BASE + 0x40d44, 0x141414);
outpdw(MDP_BASE + 0x40d48, 0x151515);
outpdw(MDP_BASE + 0x40d4c, 0x151515);
outpdw(MDP_BASE + 0x40d50, 0x161616);
outpdw(MDP_BASE + 0x40d54, 0x161616);
outpdw(MDP_BASE + 0x40d58, 0x171717);
outpdw(MDP_BASE + 0x40d5c, 0x171717);
outpdw(MDP_BASE + 0x40d60, 0x181818);
outpdw(MDP_BASE + 0x40d64, 0x191919);
outpdw(MDP_BASE + 0x40d68, 0x191919);
outpdw(MDP_BASE + 0x40d6c, 0x1a1a1a);
outpdw(MDP_BASE + 0x40d70, 0x1b1b1b);
outpdw(MDP_BASE + 0x40d74, 0x1b1b1b);
outpdw(MDP_BASE + 0x40d78, 0x1c1c1c);
outpdw(MDP_BASE + 0x40d7c, 0x1c1c1c);
outpdw(MDP_BASE + 0x40d80, 0x1d1d1d);
outpdw(MDP_BASE + 0x40d84, 0x1e1e1e);
outpdw(MDP_BASE + 0x40d88, 0x1f1f1f);
outpdw(MDP_BASE + 0x40d8c, 0x1f1f1f);
outpdw(MDP_BASE + 0x40d90, 0x202020);
outpdw(MDP_BASE + 0x40d94, 0x212121);
outpdw(MDP_BASE + 0x40d98, 0x212121);
outpdw(MDP_BASE + 0x40d9c, 0x222222);
outpdw(MDP_BASE + 0x40da0, 0x232323);
outpdw(MDP_BASE + 0x40da4, 0x242424);
outpdw(MDP_BASE + 0x40da8, 0x242424);
outpdw(MDP_BASE + 0x40dac, 0x252525);
outpdw(MDP_BASE + 0x40db0, 0x262626);
outpdw(MDP_BASE + 0x40db4, 0x272727);
outpdw(MDP_BASE + 0x40db8, 0x272727);
outpdw(MDP_BASE + 0x40dbc, 0x282828);
outpdw(MDP_BASE + 0x40dc0, 0x292929);
outpdw(MDP_BASE + 0x40dc4, 0x2a2a2a);
outpdw(MDP_BASE + 0x40dc8, 0x2b2b2b);
outpdw(MDP_BASE + 0x40dcc, 0x2c2c2c);
outpdw(MDP_BASE + 0x40dd0, 0x2c2c2c);
outpdw(MDP_BASE + 0x40dd4, 0x2d2d2d);
outpdw(MDP_BASE + 0x40dd8, 0x2e2e2e);
outpdw(MDP_BASE + 0x40ddc, 0x2f2f2f);
outpdw(MDP_BASE + 0x40de0, 0x303030);
outpdw(MDP_BASE + 0x40de4, 0x313131);
outpdw(MDP_BASE + 0x40de8, 0x323232);
outpdw(MDP_BASE + 0x40dec, 0x333333);
outpdw(MDP_BASE + 0x40df0, 0x333333);
outpdw(MDP_BASE + 0x40df4, 0x343434);
outpdw(MDP_BASE + 0x40df8, 0x353535);
outpdw(MDP_BASE + 0x40dfc, 0x363636);
outpdw(MDP_BASE + 0x40e00, 0x373737);
outpdw(MDP_BASE + 0x40e04, 0x383838);
outpdw(MDP_BASE + 0x40e08, 0x393939);
outpdw(MDP_BASE + 0x40e0c, 0x3a3a3a);
outpdw(MDP_BASE + 0x40e10, 0x3b3b3b);
outpdw(MDP_BASE + 0x40e14, 0x3c3c3c);
outpdw(MDP_BASE + 0x40e18, 0x3d3d3d);
outpdw(MDP_BASE + 0x40e1c, 0x3e3e3e);
outpdw(MDP_BASE + 0x40e20, 0x3f3f3f);
outpdw(MDP_BASE + 0x40e24, 0x404040);
outpdw(MDP_BASE + 0x40e28, 0x414141);
outpdw(MDP_BASE + 0x40e2c, 0x424242);
outpdw(MDP_BASE + 0x40e30, 0x434343);
outpdw(MDP_BASE + 0x40e34, 0x444444);
outpdw(MDP_BASE + 0x40e38, 0x464646);
outpdw(MDP_BASE + 0x40e3c, 0x474747);
outpdw(MDP_BASE + 0x40e40, 0x484848);
outpdw(MDP_BASE + 0x40e44, 0x494949);
outpdw(MDP_BASE + 0x40e48, 0x4a4a4a);
outpdw(MDP_BASE + 0x40e4c, 0x4b4b4b);
outpdw(MDP_BASE + 0x40e50, 0x4c4c4c);
outpdw(MDP_BASE + 0x40e54, 0x4d4d4d);
outpdw(MDP_BASE + 0x40e58, 0x4f4f4f);
outpdw(MDP_BASE + 0x40e5c, 0x505050);
outpdw(MDP_BASE + 0x40e60, 0x515151);
outpdw(MDP_BASE + 0x40e64, 0x525252);
outpdw(MDP_BASE + 0x40e68, 0x535353);
outpdw(MDP_BASE + 0x40e6c, 0x545454);
outpdw(MDP_BASE + 0x40e70, 0x565656);
outpdw(MDP_BASE + 0x40e74, 0x575757);
outpdw(MDP_BASE + 0x40e78, 0x585858);
outpdw(MDP_BASE + 0x40e7c, 0x595959);
outpdw(MDP_BASE + 0x40e80, 0x5b5b5b);
outpdw(MDP_BASE + 0x40e84, 0x5c5c5c);
outpdw(MDP_BASE + 0x40e88, 0x5d5d5d);
outpdw(MDP_BASE + 0x40e8c, 0x5e5e5e);
outpdw(MDP_BASE + 0x40e90, 0x606060);
outpdw(MDP_BASE + 0x40e94, 0x616161);
outpdw(MDP_BASE + 0x40e98, 0x626262);
outpdw(MDP_BASE + 0x40e9c, 0x646464);
outpdw(MDP_BASE + 0x40ea0, 0x656565);
outpdw(MDP_BASE + 0x40ea4, 0x666666);
outpdw(MDP_BASE + 0x40ea8, 0x686868);
outpdw(MDP_BASE + 0x40eac, 0x696969);
outpdw(MDP_BASE + 0x40eb0, 0x6a6a6a);
outpdw(MDP_BASE + 0x40eb4, 0x6c6c6c);
outpdw(MDP_BASE + 0x40eb8, 0x6d6d6d);
outpdw(MDP_BASE + 0x40ebc, 0x6f6f6f);
outpdw(MDP_BASE + 0x40ec0, 0x707070);
outpdw(MDP_BASE + 0x40ec4, 0x717171);
outpdw(MDP_BASE + 0x40ec8, 0x737373);
outpdw(MDP_BASE + 0x40ecc, 0x747474);
outpdw(MDP_BASE + 0x40ed0, 0x767676);
outpdw(MDP_BASE + 0x40ed4, 0x777777);
outpdw(MDP_BASE + 0x40ed8, 0x797979);
outpdw(MDP_BASE + 0x40edc, 0x7a7a7a);
outpdw(MDP_BASE + 0x40ee0, 0x7c7c7c);
outpdw(MDP_BASE + 0x40ee4, 0x7d7d7d);
outpdw(MDP_BASE + 0x40ee8, 0x7f7f7f);
outpdw(MDP_BASE + 0x40eec, 0x808080);
outpdw(MDP_BASE + 0x40ef0, 0x828282);
outpdw(MDP_BASE + 0x40ef4, 0x838383);
outpdw(MDP_BASE + 0x40ef8, 0x858585);
outpdw(MDP_BASE + 0x40efc, 0x868686);
outpdw(MDP_BASE + 0x40f00, 0x888888);
outpdw(MDP_BASE + 0x40f04, 0x898989);
outpdw(MDP_BASE + 0x40f08, 0x8b8b8b);
outpdw(MDP_BASE + 0x40f0c, 0x8d8d8d);
outpdw(MDP_BASE + 0x40f10, 0x8e8e8e);
outpdw(MDP_BASE + 0x40f14, 0x909090);
outpdw(MDP_BASE + 0x40f18, 0x919191);
outpdw(MDP_BASE + 0x40f1c, 0x939393);
outpdw(MDP_BASE + 0x40f20, 0x959595);
outpdw(MDP_BASE + 0x40f24, 0x969696);
outpdw(MDP_BASE + 0x40f28, 0x989898);
outpdw(MDP_BASE + 0x40f2c, 0x9a9a9a);
outpdw(MDP_BASE + 0x40f30, 0x9b9b9b);
outpdw(MDP_BASE + 0x40f34, 0x9d9d9d);
outpdw(MDP_BASE + 0x40f38, 0x9f9f9f);
outpdw(MDP_BASE + 0x40f3c, 0xa1a1a1);
outpdw(MDP_BASE + 0x40f40, 0xa2a2a2);
outpdw(MDP_BASE + 0x40f44, 0xa4a4a4);
outpdw(MDP_BASE + 0x40f48, 0xa6a6a6);
outpdw(MDP_BASE + 0x40f4c, 0xa7a7a7);
outpdw(MDP_BASE + 0x40f50, 0xa9a9a9);
outpdw(MDP_BASE + 0x40f54, 0xababab);
outpdw(MDP_BASE + 0x40f58, 0xadadad);
outpdw(MDP_BASE + 0x40f5c, 0xafafaf);
outpdw(MDP_BASE + 0x40f60, 0xb0b0b0);
outpdw(MDP_BASE + 0x40f64, 0xb2b2b2);
outpdw(MDP_BASE + 0x40f68, 0xb4b4b4);
outpdw(MDP_BASE + 0x40f6c, 0xb6b6b6);
outpdw(MDP_BASE + 0x40f70, 0xb8b8b8);
outpdw(MDP_BASE + 0x40f74, 0xbababa);
outpdw(MDP_BASE + 0x40f78, 0xbbbbbb);
outpdw(MDP_BASE + 0x40f7c, 0xbdbdbd);
outpdw(MDP_BASE + 0x40f80, 0xbfbfbf);
outpdw(MDP_BASE + 0x40f84, 0xc1c1c1);
outpdw(MDP_BASE + 0x40f88, 0xc3c3c3);
outpdw(MDP_BASE + 0x40f8c, 0xc5c5c5);
outpdw(MDP_BASE + 0x40f90, 0xc7c7c7);
outpdw(MDP_BASE + 0x40f94, 0xc9c9c9);
outpdw(MDP_BASE + 0x40f98, 0xcbcbcb);
outpdw(MDP_BASE + 0x40f9c, 0xcdcdcd);
outpdw(MDP_BASE + 0x40fa0, 0xcfcfcf);
outpdw(MDP_BASE + 0x40fa4, 0xd1d1d1);
outpdw(MDP_BASE + 0x40fa8, 0xd3d3d3);
outpdw(MDP_BASE + 0x40fac, 0xd5d5d5);
outpdw(MDP_BASE + 0x40fb0, 0xd7d7d7);
outpdw(MDP_BASE + 0x40fb4, 0xd9d9d9);
outpdw(MDP_BASE + 0x40fb8, 0xdbdbdb);
outpdw(MDP_BASE + 0x40fbc, 0xdddddd);
outpdw(MDP_BASE + 0x40fc0, 0xdfdfdf);
outpdw(MDP_BASE + 0x40fc4, 0xe1e1e1);
outpdw(MDP_BASE + 0x40fc8, 0xe3e3e3);
outpdw(MDP_BASE + 0x40fcc, 0xe5e5e5);
outpdw(MDP_BASE + 0x40fd0, 0xe7e7e7);
outpdw(MDP_BASE + 0x40fd4, 0xe9e9e9);
outpdw(MDP_BASE + 0x40fd8, 0xebebeb);
outpdw(MDP_BASE + 0x40fdc, 0xeeeeee);
outpdw(MDP_BASE + 0x40fe0, 0xf0f0f0);
outpdw(MDP_BASE + 0x40fe4, 0xf2f2f2);
outpdw(MDP_BASE + 0x40fe8, 0xf4f4f4);
outpdw(MDP_BASE + 0x40fec, 0xf6f6f6);
outpdw(MDP_BASE + 0x40ff0, 0xf8f8f8);
outpdw(MDP_BASE + 0x40ff4, 0xfbfbfb);
outpdw(MDP_BASE + 0x40ff8, 0xfdfdfd);
outpdw(MDP_BASE + 0x40ffc, 0xffffff);
}
#define IRQ_EN_1__MDP_IRQ___M 0x00000800
void mdp_hw_init(void)
{
int i;
/* MDP cmd block enable */
mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
/* debug interface write access */
outpdw(MDP_BASE + 0x60, 1);
outp32(MDP_INTR_ENABLE, MDP_ANY_INTR_MASK);
outp32(MDP_EBI2_PORTMAP_MODE, 0x3);
outpdw(MDP_CMD_DEBUG_ACCESS_BASE + 0x01f8, 0x0);
outpdw(MDP_CMD_DEBUG_ACCESS_BASE + 0x01fc, 0x0);
outpdw(MDP_BASE + 0x60, 0x1);
mdp_load_lut_param();
/*
* clear up unused fg/main registers
*/
/* comp.plane 2&3 ystride */
MDP_OUTP(MDP_CMD_DEBUG_ACCESS_BASE + 0x0120, 0x0);
/* unpacked pattern */
MDP_OUTP(MDP_CMD_DEBUG_ACCESS_BASE + 0x012c, 0x0);
/* unpacked pattern */
MDP_OUTP(MDP_CMD_DEBUG_ACCESS_BASE + 0x0130, 0x0);
/* unpacked pattern */
MDP_OUTP(MDP_CMD_DEBUG_ACCESS_BASE + 0x0134, 0x0);
MDP_OUTP(MDP_CMD_DEBUG_ACCESS_BASE + 0x0158, 0x0);
MDP_OUTP(MDP_CMD_DEBUG_ACCESS_BASE + 0x15c, 0x0);
MDP_OUTP(MDP_CMD_DEBUG_ACCESS_BASE + 0x0160, 0x0);
MDP_OUTP(MDP_CMD_DEBUG_ACCESS_BASE + 0x0170, 0x0);
MDP_OUTP(MDP_CMD_DEBUG_ACCESS_BASE + 0x0174, 0x0);
MDP_OUTP(MDP_CMD_DEBUG_ACCESS_BASE + 0x017c, 0x0);
/* comp.plane 2 */
MDP_OUTP(MDP_CMD_DEBUG_ACCESS_BASE + 0x0114, 0x0);
/* comp.plane 3 */
MDP_OUTP(MDP_CMD_DEBUG_ACCESS_BASE + 0x0118, 0x0);
/* clear up unused bg registers */
MDP_OUTP(MDP_CMD_DEBUG_ACCESS_BASE + 0x01c8, 0);
MDP_OUTP(MDP_CMD_DEBUG_ACCESS_BASE + 0x01d0, 0);
MDP_OUTP(MDP_CMD_DEBUG_ACCESS_BASE + 0x01dc, 0);
MDP_OUTP(MDP_CMD_DEBUG_ACCESS_BASE + 0x01e0, 0);
MDP_OUTP(MDP_CMD_DEBUG_ACCESS_BASE + 0x01e4, 0);
#ifndef CONFIG_FB_MSM_MDP22
MDP_OUTP(MDP_BASE + 0xE0000, 0);
MDP_OUTP(MDP_BASE + 0x100, 0xffffffff);
MDP_OUTP(MDP_BASE + 0x90070, 0);
MDP_OUTP(MDP_BASE + 0x94010, 1);
MDP_OUTP(MDP_BASE + 0x9401c, 2);
#endif
/*
* limit vector
* pre gets applied before color matrix conversion
* post is after ccs
*/
writel(mdp_plv[0], MDP_CSC_PRE_LV1n(0));
writel(mdp_plv[1], MDP_CSC_PRE_LV1n(1));
writel(mdp_plv[2], MDP_CSC_PRE_LV1n(2));
writel(mdp_plv[3], MDP_CSC_PRE_LV1n(3));
#ifdef CONFIG_FB_MSM_MDP31
writel(mdp_plv[2], MDP_CSC_PRE_LV1n(4));
writel(mdp_plv[3], MDP_CSC_PRE_LV1n(5));
writel(0, MDP_CSC_POST_LV1n(0));
writel(0xff, MDP_CSC_POST_LV1n(1));
writel(0, MDP_CSC_POST_LV1n(2));
writel(0xff, MDP_CSC_POST_LV1n(3));
writel(0, MDP_CSC_POST_LV1n(4));
writel(0xff, MDP_CSC_POST_LV1n(5));
writel(0, MDP_CSC_PRE_LV2n(0));
writel(0xff, MDP_CSC_PRE_LV2n(1));
writel(0, MDP_CSC_PRE_LV2n(2));
writel(0xff, MDP_CSC_PRE_LV2n(3));
writel(0, MDP_CSC_PRE_LV2n(4));
writel(0xff, MDP_CSC_PRE_LV2n(5));
writel(mdp_plv[0], MDP_CSC_POST_LV2n(0));
writel(mdp_plv[1], MDP_CSC_POST_LV2n(1));
writel(mdp_plv[2], MDP_CSC_POST_LV2n(2));
writel(mdp_plv[3], MDP_CSC_POST_LV2n(3));
writel(mdp_plv[2], MDP_CSC_POST_LV2n(4));
writel(mdp_plv[3], MDP_CSC_POST_LV2n(5));
#endif
/* primary forward matrix */
for (i = 0; i < MDP_CCS_SIZE; i++)
writel(mdp_ccs_rgb2yuv.ccs[i], MDP_CSC_PFMVn(i));
#ifdef CONFIG_FB_MSM_MDP31
for (i = 0; i < MDP_BV_SIZE; i++)
writel(mdp_ccs_rgb2yuv.bv[i], MDP_CSC_POST_BV2n(i));
writel(0, MDP_CSC_PRE_BV2n(0));
writel(0, MDP_CSC_PRE_BV2n(1));
writel(0, MDP_CSC_PRE_BV2n(2));
#endif
/* primary reverse matrix */
for (i = 0; i < MDP_CCS_SIZE; i++)
writel(mdp_ccs_yuv2rgb.ccs[i], MDP_CSC_PRMVn(i));
for (i = 0; i < MDP_BV_SIZE; i++)
writel(mdp_ccs_yuv2rgb.bv[i], MDP_CSC_PRE_BV1n(i));
#ifdef CONFIG_FB_MSM_MDP31
writel(0, MDP_CSC_POST_BV1n(0));
writel(0, MDP_CSC_POST_BV1n(1));
writel(0, MDP_CSC_POST_BV1n(2));
outpdw(MDP_BASE + 0x30010, 0x03e0);
outpdw(MDP_BASE + 0x30014, 0x0360);
outpdw(MDP_BASE + 0x30018, 0x0120);
outpdw(MDP_BASE + 0x3001c, 0x0140);
#endif
mdp_init_scale_table();
#ifndef CONFIG_FB_MSM_MDP31
MDP_OUTP(MDP_CMD_DEBUG_ACCESS_BASE + 0x0104,
((16 << 6) << 16) | (16) << 6);
#endif
/* MDP cmd block disable */
mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
} | gpl-2.0 |
Davletvm/linux | arch/powerpc/kernel/pci-hotplug.c | 689 | 3291 | /*
* Derived from "arch/powerpc/platforms/pseries/pci_dlpar.c"
*
* Copyright (C) 2003 Linda Xie <lxie@us.ibm.com>
* Copyright (C) 2005 International Business Machines
*
* Updates, 2005, John Rose <johnrose@austin.ibm.com>
* Updates, 2005, Linas Vepstas <linas@austin.ibm.com>
* Updates, 2013, Gavin Shan <shangw@linux.vnet.ibm.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*/
#include <linux/pci.h>
#include <linux/export.h>
#include <asm/pci-bridge.h>
#include <asm/ppc-pci.h>
#include <asm/firmware.h>
#include <asm/eeh.h>
/**
* pcibios_release_device - release PCI device
* @dev: PCI device
*
* The function is called before releasing the indicated PCI device.
*/
void pcibios_release_device(struct pci_dev *dev)
{
eeh_remove_device(dev);
}
/**
* pcibios_remove_pci_devices - remove all devices under this bus
* @bus: the indicated PCI bus
*
* Remove all of the PCI devices under this bus both from the
* linux pci device tree, and from the powerpc EEH address cache.
*/
void pcibios_remove_pci_devices(struct pci_bus *bus)
{
struct pci_dev *dev, *tmp;
struct pci_bus *child_bus;
/* First go down child busses */
list_for_each_entry(child_bus, &bus->children, node)
pcibios_remove_pci_devices(child_bus);
pr_debug("PCI: Removing devices on bus %04x:%02x\n",
pci_domain_nr(bus), bus->number);
list_for_each_entry_safe(dev, tmp, &bus->devices, bus_list) {
pr_debug(" Removing %s...\n", pci_name(dev));
pci_stop_and_remove_bus_device(dev);
}
}
EXPORT_SYMBOL_GPL(pcibios_remove_pci_devices);
/**
* pcibios_add_pci_devices - adds new pci devices to bus
* @bus: the indicated PCI bus
*
* This routine will find and fixup new pci devices under
* the indicated bus. This routine presumes that there
* might already be some devices under this bridge, so
* it carefully tries to add only new devices. (And that
* is how this routine differs from other, similar pcibios
* routines.)
*/
void pcibios_add_pci_devices(struct pci_bus * bus)
{
int slotno, mode, pass, max;
struct pci_dev *dev;
struct device_node *dn = pci_bus_to_OF_node(bus);
eeh_add_device_tree_early(dn);
mode = PCI_PROBE_NORMAL;
if (ppc_md.pci_probe_mode)
mode = ppc_md.pci_probe_mode(bus);
if (mode == PCI_PROBE_DEVTREE) {
/* use ofdt-based probe */
of_rescan_bus(dn, bus);
} else if (mode == PCI_PROBE_NORMAL) {
/*
* Use legacy probe. In the partial hotplug case, we
* probably have grandchildren devices unplugged. So
* we don't check the return value from pci_scan_slot() in
* order for fully rescan all the way down to pick them up.
* They can have been removed during partial hotplug.
*/
slotno = PCI_SLOT(PCI_DN(dn->child)->devfn);
pci_scan_slot(bus, PCI_DEVFN(slotno, 0));
pcibios_setup_bus_devices(bus);
max = bus->busn_res.start;
for (pass = 0; pass < 2; pass++) {
list_for_each_entry(dev, &bus->devices, bus_list) {
if (pci_is_bridge(dev))
max = pci_scan_bridge(bus, dev,
max, pass);
}
}
}
pcibios_finish_adding_to_bus(bus);
}
EXPORT_SYMBOL_GPL(pcibios_add_pci_devices);
| gpl-2.0 |
invisiblek/kernel_808l | drivers/infiniband/hw/mthca/mthca_main.c | 945 | 37280 | /*
* Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
* Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
* Copyright (c) 2005 Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/pci.h>
#include <linux/interrupt.h>
#include <linux/gfp.h>
#include "mthca_dev.h"
#include "mthca_config_reg.h"
#include "mthca_cmd.h"
#include "mthca_profile.h"
#include "mthca_memfree.h"
#include "mthca_wqe.h"
MODULE_AUTHOR("Roland Dreier");
MODULE_DESCRIPTION("Mellanox InfiniBand HCA low-level driver");
MODULE_LICENSE("Dual BSD/GPL");
MODULE_VERSION(DRV_VERSION);
#ifdef CONFIG_INFINIBAND_MTHCA_DEBUG
int mthca_debug_level = 0;
module_param_named(debug_level, mthca_debug_level, int, 0644);
MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0");
#endif /* CONFIG_INFINIBAND_MTHCA_DEBUG */
#ifdef CONFIG_PCI_MSI
static int msi_x = 1;
module_param(msi_x, int, 0444);
MODULE_PARM_DESC(msi_x, "attempt to use MSI-X if nonzero");
#else /* CONFIG_PCI_MSI */
#define msi_x (0)
#endif /* CONFIG_PCI_MSI */
static int tune_pci = 0;
module_param(tune_pci, int, 0444);
MODULE_PARM_DESC(tune_pci, "increase PCI burst from the default set by BIOS if nonzero");
DEFINE_MUTEX(mthca_device_mutex);
#define MTHCA_DEFAULT_NUM_QP (1 << 16)
#define MTHCA_DEFAULT_RDB_PER_QP (1 << 2)
#define MTHCA_DEFAULT_NUM_CQ (1 << 16)
#define MTHCA_DEFAULT_NUM_MCG (1 << 13)
#define MTHCA_DEFAULT_NUM_MPT (1 << 17)
#define MTHCA_DEFAULT_NUM_MTT (1 << 20)
#define MTHCA_DEFAULT_NUM_UDAV (1 << 15)
#define MTHCA_DEFAULT_NUM_RESERVED_MTTS (1 << 18)
#define MTHCA_DEFAULT_NUM_UARC_SIZE (1 << 18)
static struct mthca_profile hca_profile = {
.num_qp = MTHCA_DEFAULT_NUM_QP,
.rdb_per_qp = MTHCA_DEFAULT_RDB_PER_QP,
.num_cq = MTHCA_DEFAULT_NUM_CQ,
.num_mcg = MTHCA_DEFAULT_NUM_MCG,
.num_mpt = MTHCA_DEFAULT_NUM_MPT,
.num_mtt = MTHCA_DEFAULT_NUM_MTT,
.num_udav = MTHCA_DEFAULT_NUM_UDAV, /* Tavor only */
.fmr_reserved_mtts = MTHCA_DEFAULT_NUM_RESERVED_MTTS, /* Tavor only */
.uarc_size = MTHCA_DEFAULT_NUM_UARC_SIZE, /* Arbel only */
};
module_param_named(num_qp, hca_profile.num_qp, int, 0444);
MODULE_PARM_DESC(num_qp, "maximum number of QPs per HCA");
module_param_named(rdb_per_qp, hca_profile.rdb_per_qp, int, 0444);
MODULE_PARM_DESC(rdb_per_qp, "number of RDB buffers per QP");
module_param_named(num_cq, hca_profile.num_cq, int, 0444);
MODULE_PARM_DESC(num_cq, "maximum number of CQs per HCA");
module_param_named(num_mcg, hca_profile.num_mcg, int, 0444);
MODULE_PARM_DESC(num_mcg, "maximum number of multicast groups per HCA");
module_param_named(num_mpt, hca_profile.num_mpt, int, 0444);
MODULE_PARM_DESC(num_mpt,
"maximum number of memory protection table entries per HCA");
module_param_named(num_mtt, hca_profile.num_mtt, int, 0444);
MODULE_PARM_DESC(num_mtt,
"maximum number of memory translation table segments per HCA");
module_param_named(num_udav, hca_profile.num_udav, int, 0444);
MODULE_PARM_DESC(num_udav, "maximum number of UD address vectors per HCA");
module_param_named(fmr_reserved_mtts, hca_profile.fmr_reserved_mtts, int, 0444);
MODULE_PARM_DESC(fmr_reserved_mtts,
"number of memory translation table segments reserved for FMR");
static int log_mtts_per_seg = ilog2(MTHCA_MTT_SEG_SIZE / 8);
module_param_named(log_mtts_per_seg, log_mtts_per_seg, int, 0444);
MODULE_PARM_DESC(log_mtts_per_seg, "Log2 number of MTT entries per segment (1-5)");
static char mthca_version[] __devinitdata =
DRV_NAME ": Mellanox InfiniBand HCA driver v"
DRV_VERSION " (" DRV_RELDATE ")\n";
static int mthca_tune_pci(struct mthca_dev *mdev)
{
if (!tune_pci)
return 0;
/* First try to max out Read Byte Count */
if (pci_find_capability(mdev->pdev, PCI_CAP_ID_PCIX)) {
if (pcix_set_mmrbc(mdev->pdev, pcix_get_max_mmrbc(mdev->pdev))) {
mthca_err(mdev, "Couldn't set PCI-X max read count, "
"aborting.\n");
return -ENODEV;
}
} else if (!(mdev->mthca_flags & MTHCA_FLAG_PCIE))
mthca_info(mdev, "No PCI-X capability, not setting RBC.\n");
if (pci_find_capability(mdev->pdev, PCI_CAP_ID_EXP)) {
if (pcie_set_readrq(mdev->pdev, 4096)) {
mthca_err(mdev, "Couldn't write PCI Express read request, "
"aborting.\n");
return -ENODEV;
}
} else if (mdev->mthca_flags & MTHCA_FLAG_PCIE)
mthca_info(mdev, "No PCI Express capability, "
"not setting Max Read Request Size.\n");
return 0;
}
static int mthca_dev_lim(struct mthca_dev *mdev, struct mthca_dev_lim *dev_lim)
{
int err;
u8 status;
mdev->limits.mtt_seg_size = (1 << log_mtts_per_seg) * 8;
err = mthca_QUERY_DEV_LIM(mdev, dev_lim, &status);
if (err) {
mthca_err(mdev, "QUERY_DEV_LIM command failed, aborting.\n");
return err;
}
if (status) {
mthca_err(mdev, "QUERY_DEV_LIM returned status 0x%02x, "
"aborting.\n", status);
return -EINVAL;
}
if (dev_lim->min_page_sz > PAGE_SIZE) {
mthca_err(mdev, "HCA minimum page size of %d bigger than "
"kernel PAGE_SIZE of %ld, aborting.\n",
dev_lim->min_page_sz, PAGE_SIZE);
return -ENODEV;
}
if (dev_lim->num_ports > MTHCA_MAX_PORTS) {
mthca_err(mdev, "HCA has %d ports, but we only support %d, "
"aborting.\n",
dev_lim->num_ports, MTHCA_MAX_PORTS);
return -ENODEV;
}
if (dev_lim->uar_size > pci_resource_len(mdev->pdev, 2)) {
mthca_err(mdev, "HCA reported UAR size of 0x%x bigger than "
"PCI resource 2 size of 0x%llx, aborting.\n",
dev_lim->uar_size,
(unsigned long long)pci_resource_len(mdev->pdev, 2));
return -ENODEV;
}
mdev->limits.num_ports = dev_lim->num_ports;
mdev->limits.vl_cap = dev_lim->max_vl;
mdev->limits.mtu_cap = dev_lim->max_mtu;
mdev->limits.gid_table_len = dev_lim->max_gids;
mdev->limits.pkey_table_len = dev_lim->max_pkeys;
mdev->limits.local_ca_ack_delay = dev_lim->local_ca_ack_delay;
/*
* Need to allow for worst case send WQE overhead and check
* whether max_desc_sz imposes a lower limit than max_sg; UD
* send has the biggest overhead.
*/
mdev->limits.max_sg = min_t(int, dev_lim->max_sg,
(dev_lim->max_desc_sz -
sizeof (struct mthca_next_seg) -
(mthca_is_memfree(mdev) ?
sizeof (struct mthca_arbel_ud_seg) :
sizeof (struct mthca_tavor_ud_seg))) /
sizeof (struct mthca_data_seg));
mdev->limits.max_wqes = dev_lim->max_qp_sz;
mdev->limits.max_qp_init_rdma = dev_lim->max_requester_per_qp;
mdev->limits.reserved_qps = dev_lim->reserved_qps;
mdev->limits.max_srq_wqes = dev_lim->max_srq_sz;
mdev->limits.reserved_srqs = dev_lim->reserved_srqs;
mdev->limits.reserved_eecs = dev_lim->reserved_eecs;
mdev->limits.max_desc_sz = dev_lim->max_desc_sz;
mdev->limits.max_srq_sge = mthca_max_srq_sge(mdev);
/*
* Subtract 1 from the limit because we need to allocate a
* spare CQE so the HCA HW can tell the difference between an
* empty CQ and a full CQ.
*/
mdev->limits.max_cqes = dev_lim->max_cq_sz - 1;
mdev->limits.reserved_cqs = dev_lim->reserved_cqs;
mdev->limits.reserved_eqs = dev_lim->reserved_eqs;
mdev->limits.reserved_mtts = dev_lim->reserved_mtts;
mdev->limits.reserved_mrws = dev_lim->reserved_mrws;
mdev->limits.reserved_uars = dev_lim->reserved_uars;
mdev->limits.reserved_pds = dev_lim->reserved_pds;
mdev->limits.port_width_cap = dev_lim->max_port_width;
mdev->limits.page_size_cap = ~(u32) (dev_lim->min_page_sz - 1);
mdev->limits.flags = dev_lim->flags;
/*
* For old FW that doesn't return static rate support, use a
* value of 0x3 (only static rate values of 0 or 1 are handled),
* except on Sinai, where even old FW can handle static rate
* values of 2 and 3.
*/
if (dev_lim->stat_rate_support)
mdev->limits.stat_rate_support = dev_lim->stat_rate_support;
else if (mdev->mthca_flags & MTHCA_FLAG_SINAI_OPT)
mdev->limits.stat_rate_support = 0xf;
else
mdev->limits.stat_rate_support = 0x3;
/* IB_DEVICE_RESIZE_MAX_WR not supported by driver.
May be doable since hardware supports it for SRQ.
IB_DEVICE_N_NOTIFY_CQ is supported by hardware but not by driver.
IB_DEVICE_SRQ_RESIZE is supported by hardware but SRQ is not
supported by driver. */
mdev->device_cap_flags = IB_DEVICE_CHANGE_PHY_PORT |
IB_DEVICE_PORT_ACTIVE_EVENT |
IB_DEVICE_SYS_IMAGE_GUID |
IB_DEVICE_RC_RNR_NAK_GEN;
if (dev_lim->flags & DEV_LIM_FLAG_BAD_PKEY_CNTR)
mdev->device_cap_flags |= IB_DEVICE_BAD_PKEY_CNTR;
if (dev_lim->flags & DEV_LIM_FLAG_BAD_QKEY_CNTR)
mdev->device_cap_flags |= IB_DEVICE_BAD_QKEY_CNTR;
if (dev_lim->flags & DEV_LIM_FLAG_RAW_MULTI)
mdev->device_cap_flags |= IB_DEVICE_RAW_MULTI;
if (dev_lim->flags & DEV_LIM_FLAG_AUTO_PATH_MIG)
mdev->device_cap_flags |= IB_DEVICE_AUTO_PATH_MIG;
if (dev_lim->flags & DEV_LIM_FLAG_UD_AV_PORT_ENFORCE)
mdev->device_cap_flags |= IB_DEVICE_UD_AV_PORT_ENFORCE;
if (dev_lim->flags & DEV_LIM_FLAG_SRQ)
mdev->mthca_flags |= MTHCA_FLAG_SRQ;
if (mthca_is_memfree(mdev))
if (dev_lim->flags & DEV_LIM_FLAG_IPOIB_CSUM)
mdev->device_cap_flags |= IB_DEVICE_UD_IP_CSUM;
return 0;
}
static int mthca_init_tavor(struct mthca_dev *mdev)
{
s64 size;
u8 status;
int err;
struct mthca_dev_lim dev_lim;
struct mthca_profile profile;
struct mthca_init_hca_param init_hca;
err = mthca_SYS_EN(mdev, &status);
if (err) {
mthca_err(mdev, "SYS_EN command failed, aborting.\n");
return err;
}
if (status) {
mthca_err(mdev, "SYS_EN returned status 0x%02x, "
"aborting.\n", status);
return -EINVAL;
}
err = mthca_QUERY_FW(mdev, &status);
if (err) {
mthca_err(mdev, "QUERY_FW command failed, aborting.\n");
goto err_disable;
}
if (status) {
mthca_err(mdev, "QUERY_FW returned status 0x%02x, "
"aborting.\n", status);
err = -EINVAL;
goto err_disable;
}
err = mthca_QUERY_DDR(mdev, &status);
if (err) {
mthca_err(mdev, "QUERY_DDR command failed, aborting.\n");
goto err_disable;
}
if (status) {
mthca_err(mdev, "QUERY_DDR returned status 0x%02x, "
"aborting.\n", status);
err = -EINVAL;
goto err_disable;
}
err = mthca_dev_lim(mdev, &dev_lim);
if (err) {
mthca_err(mdev, "QUERY_DEV_LIM command failed, aborting.\n");
goto err_disable;
}
profile = hca_profile;
profile.num_uar = dev_lim.uar_size / PAGE_SIZE;
profile.uarc_size = 0;
if (mdev->mthca_flags & MTHCA_FLAG_SRQ)
profile.num_srq = dev_lim.max_srqs;
size = mthca_make_profile(mdev, &profile, &dev_lim, &init_hca);
if (size < 0) {
err = size;
goto err_disable;
}
err = mthca_INIT_HCA(mdev, &init_hca, &status);
if (err) {
mthca_err(mdev, "INIT_HCA command failed, aborting.\n");
goto err_disable;
}
if (status) {
mthca_err(mdev, "INIT_HCA returned status 0x%02x, "
"aborting.\n", status);
err = -EINVAL;
goto err_disable;
}
return 0;
err_disable:
mthca_SYS_DIS(mdev, &status);
return err;
}
static int mthca_load_fw(struct mthca_dev *mdev)
{
u8 status;
int err;
/* FIXME: use HCA-attached memory for FW if present */
mdev->fw.arbel.fw_icm =
mthca_alloc_icm(mdev, mdev->fw.arbel.fw_pages,
GFP_HIGHUSER | __GFP_NOWARN, 0);
if (!mdev->fw.arbel.fw_icm) {
mthca_err(mdev, "Couldn't allocate FW area, aborting.\n");
return -ENOMEM;
}
err = mthca_MAP_FA(mdev, mdev->fw.arbel.fw_icm, &status);
if (err) {
mthca_err(mdev, "MAP_FA command failed, aborting.\n");
goto err_free;
}
if (status) {
mthca_err(mdev, "MAP_FA returned status 0x%02x, aborting.\n", status);
err = -EINVAL;
goto err_free;
}
err = mthca_RUN_FW(mdev, &status);
if (err) {
mthca_err(mdev, "RUN_FW command failed, aborting.\n");
goto err_unmap_fa;
}
if (status) {
mthca_err(mdev, "RUN_FW returned status 0x%02x, aborting.\n", status);
err = -EINVAL;
goto err_unmap_fa;
}
return 0;
err_unmap_fa:
mthca_UNMAP_FA(mdev, &status);
err_free:
mthca_free_icm(mdev, mdev->fw.arbel.fw_icm, 0);
return err;
}
static int mthca_init_icm(struct mthca_dev *mdev,
struct mthca_dev_lim *dev_lim,
struct mthca_init_hca_param *init_hca,
u64 icm_size)
{
u64 aux_pages;
u8 status;
int err;
err = mthca_SET_ICM_SIZE(mdev, icm_size, &aux_pages, &status);
if (err) {
mthca_err(mdev, "SET_ICM_SIZE command failed, aborting.\n");
return err;
}
if (status) {
mthca_err(mdev, "SET_ICM_SIZE returned status 0x%02x, "
"aborting.\n", status);
return -EINVAL;
}
mthca_dbg(mdev, "%lld KB of HCA context requires %lld KB aux memory.\n",
(unsigned long long) icm_size >> 10,
(unsigned long long) aux_pages << 2);
mdev->fw.arbel.aux_icm = mthca_alloc_icm(mdev, aux_pages,
GFP_HIGHUSER | __GFP_NOWARN, 0);
if (!mdev->fw.arbel.aux_icm) {
mthca_err(mdev, "Couldn't allocate aux memory, aborting.\n");
return -ENOMEM;
}
err = mthca_MAP_ICM_AUX(mdev, mdev->fw.arbel.aux_icm, &status);
if (err) {
mthca_err(mdev, "MAP_ICM_AUX command failed, aborting.\n");
goto err_free_aux;
}
if (status) {
mthca_err(mdev, "MAP_ICM_AUX returned status 0x%02x, aborting.\n", status);
err = -EINVAL;
goto err_free_aux;
}
err = mthca_map_eq_icm(mdev, init_hca->eqc_base);
if (err) {
mthca_err(mdev, "Failed to map EQ context memory, aborting.\n");
goto err_unmap_aux;
}
/* CPU writes to non-reserved MTTs, while HCA might DMA to reserved mtts */
mdev->limits.reserved_mtts = ALIGN(mdev->limits.reserved_mtts * mdev->limits.mtt_seg_size,
dma_get_cache_alignment()) / mdev->limits.mtt_seg_size;
mdev->mr_table.mtt_table = mthca_alloc_icm_table(mdev, init_hca->mtt_base,
mdev->limits.mtt_seg_size,
mdev->limits.num_mtt_segs,
mdev->limits.reserved_mtts,
1, 0);
if (!mdev->mr_table.mtt_table) {
mthca_err(mdev, "Failed to map MTT context memory, aborting.\n");
err = -ENOMEM;
goto err_unmap_eq;
}
mdev->mr_table.mpt_table = mthca_alloc_icm_table(mdev, init_hca->mpt_base,
dev_lim->mpt_entry_sz,
mdev->limits.num_mpts,
mdev->limits.reserved_mrws,
1, 1);
if (!mdev->mr_table.mpt_table) {
mthca_err(mdev, "Failed to map MPT context memory, aborting.\n");
err = -ENOMEM;
goto err_unmap_mtt;
}
mdev->qp_table.qp_table = mthca_alloc_icm_table(mdev, init_hca->qpc_base,
dev_lim->qpc_entry_sz,
mdev->limits.num_qps,
mdev->limits.reserved_qps,
0, 0);
if (!mdev->qp_table.qp_table) {
mthca_err(mdev, "Failed to map QP context memory, aborting.\n");
err = -ENOMEM;
goto err_unmap_mpt;
}
mdev->qp_table.eqp_table = mthca_alloc_icm_table(mdev, init_hca->eqpc_base,
dev_lim->eqpc_entry_sz,
mdev->limits.num_qps,
mdev->limits.reserved_qps,
0, 0);
if (!mdev->qp_table.eqp_table) {
mthca_err(mdev, "Failed to map EQP context memory, aborting.\n");
err = -ENOMEM;
goto err_unmap_qp;
}
mdev->qp_table.rdb_table = mthca_alloc_icm_table(mdev, init_hca->rdb_base,
MTHCA_RDB_ENTRY_SIZE,
mdev->limits.num_qps <<
mdev->qp_table.rdb_shift, 0,
0, 0);
if (!mdev->qp_table.rdb_table) {
mthca_err(mdev, "Failed to map RDB context memory, aborting\n");
err = -ENOMEM;
goto err_unmap_eqp;
}
mdev->cq_table.table = mthca_alloc_icm_table(mdev, init_hca->cqc_base,
dev_lim->cqc_entry_sz,
mdev->limits.num_cqs,
mdev->limits.reserved_cqs,
0, 0);
if (!mdev->cq_table.table) {
mthca_err(mdev, "Failed to map CQ context memory, aborting.\n");
err = -ENOMEM;
goto err_unmap_rdb;
}
if (mdev->mthca_flags & MTHCA_FLAG_SRQ) {
mdev->srq_table.table =
mthca_alloc_icm_table(mdev, init_hca->srqc_base,
dev_lim->srq_entry_sz,
mdev->limits.num_srqs,
mdev->limits.reserved_srqs,
0, 0);
if (!mdev->srq_table.table) {
mthca_err(mdev, "Failed to map SRQ context memory, "
"aborting.\n");
err = -ENOMEM;
goto err_unmap_cq;
}
}
/*
* It's not strictly required, but for simplicity just map the
* whole multicast group table now. The table isn't very big
* and it's a lot easier than trying to track ref counts.
*/
mdev->mcg_table.table = mthca_alloc_icm_table(mdev, init_hca->mc_base,
MTHCA_MGM_ENTRY_SIZE,
mdev->limits.num_mgms +
mdev->limits.num_amgms,
mdev->limits.num_mgms +
mdev->limits.num_amgms,
0, 0);
if (!mdev->mcg_table.table) {
mthca_err(mdev, "Failed to map MCG context memory, aborting.\n");
err = -ENOMEM;
goto err_unmap_srq;
}
return 0;
err_unmap_srq:
if (mdev->mthca_flags & MTHCA_FLAG_SRQ)
mthca_free_icm_table(mdev, mdev->srq_table.table);
err_unmap_cq:
mthca_free_icm_table(mdev, mdev->cq_table.table);
err_unmap_rdb:
mthca_free_icm_table(mdev, mdev->qp_table.rdb_table);
err_unmap_eqp:
mthca_free_icm_table(mdev, mdev->qp_table.eqp_table);
err_unmap_qp:
mthca_free_icm_table(mdev, mdev->qp_table.qp_table);
err_unmap_mpt:
mthca_free_icm_table(mdev, mdev->mr_table.mpt_table);
err_unmap_mtt:
mthca_free_icm_table(mdev, mdev->mr_table.mtt_table);
err_unmap_eq:
mthca_unmap_eq_icm(mdev);
err_unmap_aux:
mthca_UNMAP_ICM_AUX(mdev, &status);
err_free_aux:
mthca_free_icm(mdev, mdev->fw.arbel.aux_icm, 0);
return err;
}
static void mthca_free_icms(struct mthca_dev *mdev)
{
u8 status;
mthca_free_icm_table(mdev, mdev->mcg_table.table);
if (mdev->mthca_flags & MTHCA_FLAG_SRQ)
mthca_free_icm_table(mdev, mdev->srq_table.table);
mthca_free_icm_table(mdev, mdev->cq_table.table);
mthca_free_icm_table(mdev, mdev->qp_table.rdb_table);
mthca_free_icm_table(mdev, mdev->qp_table.eqp_table);
mthca_free_icm_table(mdev, mdev->qp_table.qp_table);
mthca_free_icm_table(mdev, mdev->mr_table.mpt_table);
mthca_free_icm_table(mdev, mdev->mr_table.mtt_table);
mthca_unmap_eq_icm(mdev);
mthca_UNMAP_ICM_AUX(mdev, &status);
mthca_free_icm(mdev, mdev->fw.arbel.aux_icm, 0);
}
static int mthca_init_arbel(struct mthca_dev *mdev)
{
struct mthca_dev_lim dev_lim;
struct mthca_profile profile;
struct mthca_init_hca_param init_hca;
s64 icm_size;
u8 status;
int err;
err = mthca_QUERY_FW(mdev, &status);
if (err) {
mthca_err(mdev, "QUERY_FW command failed, aborting.\n");
return err;
}
if (status) {
mthca_err(mdev, "QUERY_FW returned status 0x%02x, "
"aborting.\n", status);
return -EINVAL;
}
err = mthca_ENABLE_LAM(mdev, &status);
if (err) {
mthca_err(mdev, "ENABLE_LAM command failed, aborting.\n");
return err;
}
if (status == MTHCA_CMD_STAT_LAM_NOT_PRE) {
mthca_dbg(mdev, "No HCA-attached memory (running in MemFree mode)\n");
mdev->mthca_flags |= MTHCA_FLAG_NO_LAM;
} else if (status) {
mthca_err(mdev, "ENABLE_LAM returned status 0x%02x, "
"aborting.\n", status);
return -EINVAL;
}
err = mthca_load_fw(mdev);
if (err) {
mthca_err(mdev, "Failed to start FW, aborting.\n");
goto err_disable;
}
err = mthca_dev_lim(mdev, &dev_lim);
if (err) {
mthca_err(mdev, "QUERY_DEV_LIM command failed, aborting.\n");
goto err_stop_fw;
}
profile = hca_profile;
profile.num_uar = dev_lim.uar_size / PAGE_SIZE;
profile.num_udav = 0;
if (mdev->mthca_flags & MTHCA_FLAG_SRQ)
profile.num_srq = dev_lim.max_srqs;
icm_size = mthca_make_profile(mdev, &profile, &dev_lim, &init_hca);
if (icm_size < 0) {
err = icm_size;
goto err_stop_fw;
}
err = mthca_init_icm(mdev, &dev_lim, &init_hca, icm_size);
if (err)
goto err_stop_fw;
err = mthca_INIT_HCA(mdev, &init_hca, &status);
if (err) {
mthca_err(mdev, "INIT_HCA command failed, aborting.\n");
goto err_free_icm;
}
if (status) {
mthca_err(mdev, "INIT_HCA returned status 0x%02x, "
"aborting.\n", status);
err = -EINVAL;
goto err_free_icm;
}
return 0;
err_free_icm:
mthca_free_icms(mdev);
err_stop_fw:
mthca_UNMAP_FA(mdev, &status);
mthca_free_icm(mdev, mdev->fw.arbel.fw_icm, 0);
err_disable:
if (!(mdev->mthca_flags & MTHCA_FLAG_NO_LAM))
mthca_DISABLE_LAM(mdev, &status);
return err;
}
static void mthca_close_hca(struct mthca_dev *mdev)
{
u8 status;
mthca_CLOSE_HCA(mdev, 0, &status);
if (mthca_is_memfree(mdev)) {
mthca_free_icms(mdev);
mthca_UNMAP_FA(mdev, &status);
mthca_free_icm(mdev, mdev->fw.arbel.fw_icm, 0);
if (!(mdev->mthca_flags & MTHCA_FLAG_NO_LAM))
mthca_DISABLE_LAM(mdev, &status);
} else
mthca_SYS_DIS(mdev, &status);
}
static int mthca_init_hca(struct mthca_dev *mdev)
{
u8 status;
int err;
struct mthca_adapter adapter;
if (mthca_is_memfree(mdev))
err = mthca_init_arbel(mdev);
else
err = mthca_init_tavor(mdev);
if (err)
return err;
err = mthca_QUERY_ADAPTER(mdev, &adapter, &status);
if (err) {
mthca_err(mdev, "QUERY_ADAPTER command failed, aborting.\n");
goto err_close;
}
if (status) {
mthca_err(mdev, "QUERY_ADAPTER returned status 0x%02x, "
"aborting.\n", status);
err = -EINVAL;
goto err_close;
}
mdev->eq_table.inta_pin = adapter.inta_pin;
if (!mthca_is_memfree(mdev))
mdev->rev_id = adapter.revision_id;
memcpy(mdev->board_id, adapter.board_id, sizeof mdev->board_id);
return 0;
err_close:
mthca_close_hca(mdev);
return err;
}
static int mthca_setup_hca(struct mthca_dev *dev)
{
int err;
u8 status;
MTHCA_INIT_DOORBELL_LOCK(&dev->doorbell_lock);
err = mthca_init_uar_table(dev);
if (err) {
mthca_err(dev, "Failed to initialize "
"user access region table, aborting.\n");
return err;
}
err = mthca_uar_alloc(dev, &dev->driver_uar);
if (err) {
mthca_err(dev, "Failed to allocate driver access region, "
"aborting.\n");
goto err_uar_table_free;
}
dev->kar = ioremap(dev->driver_uar.pfn << PAGE_SHIFT, PAGE_SIZE);
if (!dev->kar) {
mthca_err(dev, "Couldn't map kernel access region, "
"aborting.\n");
err = -ENOMEM;
goto err_uar_free;
}
err = mthca_init_pd_table(dev);
if (err) {
mthca_err(dev, "Failed to initialize "
"protection domain table, aborting.\n");
goto err_kar_unmap;
}
err = mthca_init_mr_table(dev);
if (err) {
mthca_err(dev, "Failed to initialize "
"memory region table, aborting.\n");
goto err_pd_table_free;
}
err = mthca_pd_alloc(dev, 1, &dev->driver_pd);
if (err) {
mthca_err(dev, "Failed to create driver PD, "
"aborting.\n");
goto err_mr_table_free;
}
err = mthca_init_eq_table(dev);
if (err) {
mthca_err(dev, "Failed to initialize "
"event queue table, aborting.\n");
goto err_pd_free;
}
err = mthca_cmd_use_events(dev);
if (err) {
mthca_err(dev, "Failed to switch to event-driven "
"firmware commands, aborting.\n");
goto err_eq_table_free;
}
err = mthca_NOP(dev, &status);
if (err || status) {
if (dev->mthca_flags & MTHCA_FLAG_MSI_X) {
mthca_warn(dev, "NOP command failed to generate interrupt "
"(IRQ %d).\n",
dev->eq_table.eq[MTHCA_EQ_CMD].msi_x_vector);
mthca_warn(dev, "Trying again with MSI-X disabled.\n");
} else {
mthca_err(dev, "NOP command failed to generate interrupt "
"(IRQ %d), aborting.\n",
dev->pdev->irq);
mthca_err(dev, "BIOS or ACPI interrupt routing problem?\n");
}
goto err_cmd_poll;
}
mthca_dbg(dev, "NOP command IRQ test passed\n");
err = mthca_init_cq_table(dev);
if (err) {
mthca_err(dev, "Failed to initialize "
"completion queue table, aborting.\n");
goto err_cmd_poll;
}
err = mthca_init_srq_table(dev);
if (err) {
mthca_err(dev, "Failed to initialize "
"shared receive queue table, aborting.\n");
goto err_cq_table_free;
}
err = mthca_init_qp_table(dev);
if (err) {
mthca_err(dev, "Failed to initialize "
"queue pair table, aborting.\n");
goto err_srq_table_free;
}
err = mthca_init_av_table(dev);
if (err) {
mthca_err(dev, "Failed to initialize "
"address vector table, aborting.\n");
goto err_qp_table_free;
}
err = mthca_init_mcg_table(dev);
if (err) {
mthca_err(dev, "Failed to initialize "
"multicast group table, aborting.\n");
goto err_av_table_free;
}
return 0;
err_av_table_free:
mthca_cleanup_av_table(dev);
err_qp_table_free:
mthca_cleanup_qp_table(dev);
err_srq_table_free:
mthca_cleanup_srq_table(dev);
err_cq_table_free:
mthca_cleanup_cq_table(dev);
err_cmd_poll:
mthca_cmd_use_polling(dev);
err_eq_table_free:
mthca_cleanup_eq_table(dev);
err_pd_free:
mthca_pd_free(dev, &dev->driver_pd);
err_mr_table_free:
mthca_cleanup_mr_table(dev);
err_pd_table_free:
mthca_cleanup_pd_table(dev);
err_kar_unmap:
iounmap(dev->kar);
err_uar_free:
mthca_uar_free(dev, &dev->driver_uar);
err_uar_table_free:
mthca_cleanup_uar_table(dev);
return err;
}
static int mthca_enable_msi_x(struct mthca_dev *mdev)
{
struct msix_entry entries[3];
int err;
entries[0].entry = 0;
entries[1].entry = 1;
entries[2].entry = 2;
err = pci_enable_msix(mdev->pdev, entries, ARRAY_SIZE(entries));
if (err) {
if (err > 0)
mthca_info(mdev, "Only %d MSI-X vectors available, "
"not using MSI-X\n", err);
return err;
}
mdev->eq_table.eq[MTHCA_EQ_COMP ].msi_x_vector = entries[0].vector;
mdev->eq_table.eq[MTHCA_EQ_ASYNC].msi_x_vector = entries[1].vector;
mdev->eq_table.eq[MTHCA_EQ_CMD ].msi_x_vector = entries[2].vector;
return 0;
}
/* Types of supported HCA */
enum {
TAVOR, /* MT23108 */
ARBEL_COMPAT, /* MT25208 in Tavor compat mode */
ARBEL_NATIVE, /* MT25208 with extended features */
SINAI /* MT25204 */
};
#define MTHCA_FW_VER(major, minor, subminor) \
(((u64) (major) << 32) | ((u64) (minor) << 16) | (u64) (subminor))
static struct {
u64 latest_fw;
u32 flags;
} mthca_hca_table[] = {
[TAVOR] = { .latest_fw = MTHCA_FW_VER(3, 5, 0),
.flags = 0 },
[ARBEL_COMPAT] = { .latest_fw = MTHCA_FW_VER(4, 8, 200),
.flags = MTHCA_FLAG_PCIE },
[ARBEL_NATIVE] = { .latest_fw = MTHCA_FW_VER(5, 3, 0),
.flags = MTHCA_FLAG_MEMFREE |
MTHCA_FLAG_PCIE },
[SINAI] = { .latest_fw = MTHCA_FW_VER(1, 2, 0),
.flags = MTHCA_FLAG_MEMFREE |
MTHCA_FLAG_PCIE |
MTHCA_FLAG_SINAI_OPT }
};
static int __mthca_init_one(struct pci_dev *pdev, int hca_type)
{
int ddr_hidden = 0;
int err;
struct mthca_dev *mdev;
printk(KERN_INFO PFX "Initializing %s\n",
pci_name(pdev));
err = pci_enable_device(pdev);
if (err) {
dev_err(&pdev->dev, "Cannot enable PCI device, "
"aborting.\n");
return err;
}
/*
* Check for BARs. We expect 0: 1MB, 2: 8MB, 4: DDR (may not
* be present)
*/
if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM) ||
pci_resource_len(pdev, 0) != 1 << 20) {
dev_err(&pdev->dev, "Missing DCS, aborting.\n");
err = -ENODEV;
goto err_disable_pdev;
}
if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
dev_err(&pdev->dev, "Missing UAR, aborting.\n");
err = -ENODEV;
goto err_disable_pdev;
}
if (!(pci_resource_flags(pdev, 4) & IORESOURCE_MEM))
ddr_hidden = 1;
err = pci_request_regions(pdev, DRV_NAME);
if (err) {
dev_err(&pdev->dev, "Cannot obtain PCI resources, "
"aborting.\n");
goto err_disable_pdev;
}
pci_set_master(pdev);
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
if (err) {
dev_warn(&pdev->dev, "Warning: couldn't set 64-bit PCI DMA mask.\n");
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
if (err) {
dev_err(&pdev->dev, "Can't set PCI DMA mask, aborting.\n");
goto err_free_res;
}
}
err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
if (err) {
dev_warn(&pdev->dev, "Warning: couldn't set 64-bit "
"consistent PCI DMA mask.\n");
err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
if (err) {
dev_err(&pdev->dev, "Can't set consistent PCI DMA mask, "
"aborting.\n");
goto err_free_res;
}
}
mdev = (struct mthca_dev *) ib_alloc_device(sizeof *mdev);
if (!mdev) {
dev_err(&pdev->dev, "Device struct alloc failed, "
"aborting.\n");
err = -ENOMEM;
goto err_free_res;
}
mdev->pdev = pdev;
mdev->mthca_flags = mthca_hca_table[hca_type].flags;
if (ddr_hidden)
mdev->mthca_flags |= MTHCA_FLAG_DDR_HIDDEN;
/*
* Now reset the HCA before we touch the PCI capabilities or
* attempt a firmware command, since a boot ROM may have left
* the HCA in an undefined state.
*/
err = mthca_reset(mdev);
if (err) {
mthca_err(mdev, "Failed to reset HCA, aborting.\n");
goto err_free_dev;
}
if (mthca_cmd_init(mdev)) {
mthca_err(mdev, "Failed to init command interface, aborting.\n");
goto err_free_dev;
}
err = mthca_tune_pci(mdev);
if (err)
goto err_cmd;
err = mthca_init_hca(mdev);
if (err)
goto err_cmd;
if (mdev->fw_ver < mthca_hca_table[hca_type].latest_fw) {
mthca_warn(mdev, "HCA FW version %d.%d.%03d is old (%d.%d.%03d is current).\n",
(int) (mdev->fw_ver >> 32), (int) (mdev->fw_ver >> 16) & 0xffff,
(int) (mdev->fw_ver & 0xffff),
(int) (mthca_hca_table[hca_type].latest_fw >> 32),
(int) (mthca_hca_table[hca_type].latest_fw >> 16) & 0xffff,
(int) (mthca_hca_table[hca_type].latest_fw & 0xffff));
mthca_warn(mdev, "If you have problems, try updating your HCA FW.\n");
}
if (msi_x && !mthca_enable_msi_x(mdev))
mdev->mthca_flags |= MTHCA_FLAG_MSI_X;
err = mthca_setup_hca(mdev);
if (err == -EBUSY && (mdev->mthca_flags & MTHCA_FLAG_MSI_X)) {
if (mdev->mthca_flags & MTHCA_FLAG_MSI_X)
pci_disable_msix(pdev);
mdev->mthca_flags &= ~MTHCA_FLAG_MSI_X;
err = mthca_setup_hca(mdev);
}
if (err)
goto err_close;
err = mthca_register_device(mdev);
if (err)
goto err_cleanup;
err = mthca_create_agents(mdev);
if (err)
goto err_unregister;
pci_set_drvdata(pdev, mdev);
mdev->hca_type = hca_type;
mdev->active = true;
return 0;
err_unregister:
mthca_unregister_device(mdev);
err_cleanup:
mthca_cleanup_mcg_table(mdev);
mthca_cleanup_av_table(mdev);
mthca_cleanup_qp_table(mdev);
mthca_cleanup_srq_table(mdev);
mthca_cleanup_cq_table(mdev);
mthca_cmd_use_polling(mdev);
mthca_cleanup_eq_table(mdev);
mthca_pd_free(mdev, &mdev->driver_pd);
mthca_cleanup_mr_table(mdev);
mthca_cleanup_pd_table(mdev);
mthca_cleanup_uar_table(mdev);
err_close:
if (mdev->mthca_flags & MTHCA_FLAG_MSI_X)
pci_disable_msix(pdev);
mthca_close_hca(mdev);
err_cmd:
mthca_cmd_cleanup(mdev);
err_free_dev:
ib_dealloc_device(&mdev->ib_dev);
err_free_res:
pci_release_regions(pdev);
err_disable_pdev:
pci_disable_device(pdev);
pci_set_drvdata(pdev, NULL);
return err;
}
static void __mthca_remove_one(struct pci_dev *pdev)
{
struct mthca_dev *mdev = pci_get_drvdata(pdev);
u8 status;
int p;
if (mdev) {
mthca_free_agents(mdev);
mthca_unregister_device(mdev);
for (p = 1; p <= mdev->limits.num_ports; ++p)
mthca_CLOSE_IB(mdev, p, &status);
mthca_cleanup_mcg_table(mdev);
mthca_cleanup_av_table(mdev);
mthca_cleanup_qp_table(mdev);
mthca_cleanup_srq_table(mdev);
mthca_cleanup_cq_table(mdev);
mthca_cmd_use_polling(mdev);
mthca_cleanup_eq_table(mdev);
mthca_pd_free(mdev, &mdev->driver_pd);
mthca_cleanup_mr_table(mdev);
mthca_cleanup_pd_table(mdev);
iounmap(mdev->kar);
mthca_uar_free(mdev, &mdev->driver_uar);
mthca_cleanup_uar_table(mdev);
mthca_close_hca(mdev);
mthca_cmd_cleanup(mdev);
if (mdev->mthca_flags & MTHCA_FLAG_MSI_X)
pci_disable_msix(pdev);
ib_dealloc_device(&mdev->ib_dev);
pci_release_regions(pdev);
pci_disable_device(pdev);
pci_set_drvdata(pdev, NULL);
}
}
int __mthca_restart_one(struct pci_dev *pdev)
{
struct mthca_dev *mdev;
int hca_type;
mdev = pci_get_drvdata(pdev);
if (!mdev)
return -ENODEV;
hca_type = mdev->hca_type;
__mthca_remove_one(pdev);
return __mthca_init_one(pdev, hca_type);
}
static int __devinit mthca_init_one(struct pci_dev *pdev,
const struct pci_device_id *id)
{
int ret;
mutex_lock(&mthca_device_mutex);
printk_once(KERN_INFO "%s", mthca_version);
if (id->driver_data >= ARRAY_SIZE(mthca_hca_table)) {
printk(KERN_ERR PFX "%s has invalid driver data %lx\n",
pci_name(pdev), id->driver_data);
mutex_unlock(&mthca_device_mutex);
return -ENODEV;
}
ret = __mthca_init_one(pdev, id->driver_data);
mutex_unlock(&mthca_device_mutex);
return ret;
}
static void __devexit mthca_remove_one(struct pci_dev *pdev)
{
mutex_lock(&mthca_device_mutex);
__mthca_remove_one(pdev);
mutex_unlock(&mthca_device_mutex);
}
static struct pci_device_id mthca_pci_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, PCI_DEVICE_ID_MELLANOX_TAVOR),
.driver_data = TAVOR },
{ PCI_DEVICE(PCI_VENDOR_ID_TOPSPIN, PCI_DEVICE_ID_MELLANOX_TAVOR),
.driver_data = TAVOR },
{ PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, PCI_DEVICE_ID_MELLANOX_ARBEL_COMPAT),
.driver_data = ARBEL_COMPAT },
{ PCI_DEVICE(PCI_VENDOR_ID_TOPSPIN, PCI_DEVICE_ID_MELLANOX_ARBEL_COMPAT),
.driver_data = ARBEL_COMPAT },
{ PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, PCI_DEVICE_ID_MELLANOX_ARBEL),
.driver_data = ARBEL_NATIVE },
{ PCI_DEVICE(PCI_VENDOR_ID_TOPSPIN, PCI_DEVICE_ID_MELLANOX_ARBEL),
.driver_data = ARBEL_NATIVE },
{ PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, PCI_DEVICE_ID_MELLANOX_SINAI),
.driver_data = SINAI },
{ PCI_DEVICE(PCI_VENDOR_ID_TOPSPIN, PCI_DEVICE_ID_MELLANOX_SINAI),
.driver_data = SINAI },
{ PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, PCI_DEVICE_ID_MELLANOX_SINAI_OLD),
.driver_data = SINAI },
{ PCI_DEVICE(PCI_VENDOR_ID_TOPSPIN, PCI_DEVICE_ID_MELLANOX_SINAI_OLD),
.driver_data = SINAI },
{ 0, }
};
MODULE_DEVICE_TABLE(pci, mthca_pci_table);
static struct pci_driver mthca_driver = {
.name = DRV_NAME,
.id_table = mthca_pci_table,
.probe = mthca_init_one,
.remove = __devexit_p(mthca_remove_one)
};
static void __init __mthca_check_profile_val(const char *name, int *pval,
int pval_default)
{
/* value must be positive and power of 2 */
int old_pval = *pval;
if (old_pval <= 0)
*pval = pval_default;
else
*pval = roundup_pow_of_two(old_pval);
if (old_pval != *pval) {
printk(KERN_WARNING PFX "Invalid value %d for %s in module parameter.\n",
old_pval, name);
printk(KERN_WARNING PFX "Corrected %s to %d.\n", name, *pval);
}
}
#define mthca_check_profile_val(name, default) \
__mthca_check_profile_val(#name, &hca_profile.name, default)
static void __init mthca_validate_profile(void)
{
mthca_check_profile_val(num_qp, MTHCA_DEFAULT_NUM_QP);
mthca_check_profile_val(rdb_per_qp, MTHCA_DEFAULT_RDB_PER_QP);
mthca_check_profile_val(num_cq, MTHCA_DEFAULT_NUM_CQ);
mthca_check_profile_val(num_mcg, MTHCA_DEFAULT_NUM_MCG);
mthca_check_profile_val(num_mpt, MTHCA_DEFAULT_NUM_MPT);
mthca_check_profile_val(num_mtt, MTHCA_DEFAULT_NUM_MTT);
mthca_check_profile_val(num_udav, MTHCA_DEFAULT_NUM_UDAV);
mthca_check_profile_val(fmr_reserved_mtts, MTHCA_DEFAULT_NUM_RESERVED_MTTS);
if (hca_profile.fmr_reserved_mtts >= hca_profile.num_mtt) {
printk(KERN_WARNING PFX "Invalid fmr_reserved_mtts module parameter %d.\n",
hca_profile.fmr_reserved_mtts);
printk(KERN_WARNING PFX "(Must be smaller than num_mtt %d)\n",
hca_profile.num_mtt);
hca_profile.fmr_reserved_mtts = hca_profile.num_mtt / 2;
printk(KERN_WARNING PFX "Corrected fmr_reserved_mtts to %d.\n",
hca_profile.fmr_reserved_mtts);
}
if ((log_mtts_per_seg < 1) || (log_mtts_per_seg > 5)) {
printk(KERN_WARNING PFX "bad log_mtts_per_seg (%d). Using default - %d\n",
log_mtts_per_seg, ilog2(MTHCA_MTT_SEG_SIZE / 8));
log_mtts_per_seg = ilog2(MTHCA_MTT_SEG_SIZE / 8);
}
}
static int __init mthca_init(void)
{
int ret;
mthca_validate_profile();
ret = mthca_catas_init();
if (ret)
return ret;
ret = pci_register_driver(&mthca_driver);
if (ret < 0) {
mthca_catas_cleanup();
return ret;
}
return 0;
}
static void __exit mthca_cleanup(void)
{
pci_unregister_driver(&mthca_driver);
mthca_catas_cleanup();
}
module_init(mthca_init);
module_exit(mthca_cleanup);
| gpl-2.0 |
dankocher/android_kernel_lge_w7ds | drivers/staging/prima/CORE/BAP/src/bapRsn8021xSuppRsnFsm.c | 945 | 33790 | /*
* Copyright (c) 2012-2013 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all
* copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
* WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
* AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
* PERFORMANCE OF THIS SOFTWARE.
*/
/*
* This file was originally distributed by Qualcomm Atheros, Inc.
* under proprietary terms before Copyright ownership was assigned
* to the Linux Foundation.
*/
/*
* $File: //depot/software/projects/feature_branches/gen5_phase1/os/linux/classic/ap/apps/ssm/auth8021x/ani8021xSuppRsnFsm.c $
*
* Contains definitions for the RSN EAPOL-Key FSM on the
* supplicant side. This is based on 802.11i.
*
* Author: Mayank D. Upadhyay
* Date: 19-December-2002
* History:-
* Date Modified by Modification Information
* ------------------------------------------------------
*
*/
#include "bapRsnSsmServices.h"
#include "bapRsnSsmEapol.h"
#include "bapRsnErrors.h"
#include "bapRsn8021xSuppRsnFsm.h"
#include "vos_utils.h"
#include "bapRsnTxRx.h"
#include "btampFsm.h"
// The different states that this FSM transitions through
#define INITIALIZE 0
#define AUTHENTICATION 1
#define GOT_PMK 2
#define STA_KEY_START 3
#define STA_KEY_SET 4
#define KEY_UPDATE 5
#define REKEY_MSG 6
#define GROUP_KEY_SET 7
#define NUM_STATES (GROUP_KEY_SET + 1)
static tSuppRsnFsmConsts suppConsts = { 2000, 3 }; //timeout, retry limit
int gReadToSetKey;
/**************************************
* Static functions in this module
**************************************/
static
int zeroOutPtk(tSuppRsnFsm *fsm);
static
int checkMic(tSuppRsnFsm *fsm,
tAniEapolKeyAvailEventData *data,
v_BOOL_t pwKeyExchange);
static int checkInfoElement(tSuppRsnFsm *fsm,
tAniEapolKeyAvailEventData *data);
static
int checkPeerReplayCounter(tSuppRsnFsm *fsm,
tAniEapolKeyAvailEventData *data,
v_BOOL_t *retransmit,
v_BOOL_t actualMicFlag,
v_BOOL_t reTxMicFlag
);
static
int derivePtk(tSuppRsnFsm *fsm,
tAniEapolKeyAvailEventData *data);
static
int checkTransition(tSuppRsnFsm *fsm, void *arg);
static int
gotoStateInit(tSuppRsnFsm *fsm);
static int suppRsnRxFrameHandler( v_PVOID_t pvosGCtx, vos_pkt_t *pPacket );
static int suppRsnTxCompleteHandler( v_PVOID_t pvosGCtx, vos_pkt_t *pPacket, VOS_STATUS retStatus );
/*************************
* Internal Functions
*************************/
int suppRsnAuthStartEventHandler(tSuppRsnFsm *fsm);
/*************************
* The exported functions
*************************/
/**
* suppRsnFsmInit
*
* FUNCTION:
* Initializes the constants and the callbacks needed by this FSM
* module.
*
* @param cb callbacks to the various procedures needed by this FSM
*
* @return ANI_OK if the operation succeeds
*/
int
suppRsnFsmInit(void)
{
// TODO: Read the constants in from config
// consts = constsIn;
suppConsts.timeoutPeriod = 2000;
suppConsts.maxTries = 3;
return ANI_OK;
}
/**
* suppRsnFsmCreate
*
* FUNCTION
* Allocates and initializes the state of an RSN key FSM instance for
* the given BP context.
*
* @parm ctx the BP context whose instance is being created
* @param pskBased pass in eANI_BOOLEAN_TRUE is this BP is to be
* authenticated based on a pre-shared key as opposed to EAP.
*
* @return ANI_OK if the operation succeeds
*/
int
suppRsnFsmCreate(tBtampContext *ctx)
{
int retVal = ANI_OK;
tSuppRsnFsm *fsm = &ctx->uFsm.suppFsm;
// First, clear everything out
vos_mem_zero( fsm, sizeof(tSuppRsnFsm));
if( !VOS_IS_STATUS_SUCCESS( bapRsnRegisterTxRxCallbacks( suppRsnTxCompleteHandler,
suppRsnRxFrameHandler ) ) )
{
return ANI_ERROR;
}
if( !VOS_IS_STATUS_SUCCESS( bapRsnRegisterRxCallback( ctx->pvosGCtx ) ) )
{
return ANI_ERROR;
}
// Allocate the supplicant context
fsm->suppCtx = (tSuppContext *)vos_mem_malloc( sizeof(tSuppContext) );
if (fsm->suppCtx == NULL)
{
retVal = ANI_E_MALLOC_FAILED;
VOS_ASSERT( 0 );
goto error;
}
// Clear out the supplicant context
vos_mem_zero( fsm->suppCtx, sizeof(tSuppContext));
fsm->ctx = ctx;
//Only support CCMP
fsm->suppCtx->pwCipherType = eCSR_ENCRYPT_TYPE_AES;
retVal = aniAsfPacketAllocateExplicit(&fsm->lastEapol,
RSN_MAX_PACKET_SIZE,
EAPOL_TX_HEADER_SIZE);
if (retVal != ANI_OK)
{
VOS_ASSERT( 0 );
goto error;
}
aniAsfPacketAllocate(&fsm->suppCtx->pmk);
if (fsm->suppCtx->pmk == NULL)
{
retVal = ANI_E_MALLOC_FAILED;
VOS_ASSERT( 0 );
goto error;
}
fsm->suppCtx->ieAp = NULL;
fsm->cryptHandle = 0;
if( !VOS_IS_STATUS_SUCCESS( vos_crypto_init( &fsm->cryptHandle ) ) )
{
retVal = ANI_E_FAILED;
VOS_ASSERT( 0 );
}
fsm->currentState = INITIALIZE;
gotoStateInit(fsm);
suppRsnFsmProcessEvent( fsm, RSN_FSM_AUTH_START, NULL );
return ANI_OK;
error:
suppRsnFsmFree( ctx );
return retVal;
}
/**
* suppRsnFsmFree
*
* FUNCTION
* Frees a previously allocated RSN Key FSM in a BP context. If the
* RSN Key FSM is not yet allocated, then this is an error.
*
* @param ctx the BP context whose FSM instance is to be freed
*
* @return ANI_OK if the operation succeeds
*/
int
suppRsnFsmFree(tBtampContext *ctx)
{
tSuppRsnFsm *fsm;
fsm = &ctx->uFsm.suppFsm;
if( fsm->cryptHandle )
{
vos_crypto_deinit( fsm->cryptHandle );
}
bapRsnClearTxRxCallbacks();
if (fsm->lastEapol)
aniAsfPacketFree(fsm->lastEapol);
if( fsm->suppCtx )
{
if ( fsm->suppCtx->pmk )
{
aniAsfPacketFree(fsm->suppCtx->pmk);
}
vos_mem_free( fsm->suppCtx );
}
// Finally, clear everything out
vos_mem_zero( fsm, sizeof(tSuppRsnFsm));
return ANI_OK;
}
/**
* suppRsnFsmProcessEvent
*
* FUNCTION
* Passes an event to the RSN key FSM instance for immediate processing.
*
* @param fsm the RSN Key FSM instance
* @param eventId the AAG event to process
* @param arg an optional argument for this event
*
* @return ANI_OK if the operation succeeds
*/
int
suppRsnFsmProcessEvent(tSuppRsnFsm *fsm, tRsnFsmEvent eventId, void *arg)
{
switch (eventId)
{
case RSN_FSM_TIMER_EXPIRED:
// Proceed straight to checkTransition
break;
case RSN_FSM_AUTH_START:
fsm->authReq = eANI_BOOLEAN_TRUE;
suppRsnAuthStartEventHandler(fsm);
break;
case RSN_FSM_EAPOL_FRAME_AVAILABLE:
fsm->eapolAvail = eANI_BOOLEAN_TRUE;
break;
case RSN_FSM_INTEG_FAILED:
fsm->integFailed = eANI_BOOLEAN_TRUE;
break;
default:
VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_ERROR, "Supp unknown event for SuppFsm: %d\n",
eventId);
VOS_ASSERT( 0 );
return ANI_E_ILLEGAL_ARG;
break;
}
checkTransition(fsm, arg);
return ANI_OK;
}
int
suppRsnAuthStartEventHandler(tSuppRsnFsm *fsm)
{
// Copy required info
vos_mem_copy( &fsm->suppCtx->authMac, fsm->ctx->peer_mac_addr, 6);
vos_mem_copy( &fsm->suppCtx->suppMac, fsm->ctx->self_mac_addr, 6);
aniAsfPacketAppendBuffer( fsm->suppCtx->pmk, fsm->ctx->key_material, fsm->ctx->key_length);
return ANI_OK;
}
/***********************
* The static functions
***********************/
static int
gotoStateInit(tSuppRsnFsm *fsm)
{
fsm->currentState = INITIALIZE;
fsm->authReq = eANI_BOOLEAN_FALSE;
fsm->eapolAvail = eANI_BOOLEAN_FALSE;
fsm->integFailed = eANI_BOOLEAN_FALSE;
fsm->pmkAvail = eANI_BOOLEAN_FALSE;
// Create two replay counter's..one for our requests, and another
// for STA's requests. Initialize the first one randomly.
aniSsmReplayCtrCreate(fsm->cryptHandle, &fsm->localReplayCtr, ANI_EAPOL_KEY_RSN_RSC_SIZE, 0);
aniSsmReplayCtrCreate(fsm->cryptHandle, &fsm->peerReplayCtr, ANI_EAPOL_KEY_RSN_RSC_SIZE, 0);
return ANI_OK;
}
static int
gotoStateAuthentication(tSuppRsnFsm *fsm)
{
fsm->currentState = AUTHENTICATION;
if( VOS_IS_STATUS_SUCCESS( vos_rand_get_bytes( fsm->cryptHandle, fsm->sNonce, ANI_EAPOL_KEY_RSN_NONCE_SIZE ) ) )
{
zeroOutPtk(fsm);
// TODO: Zero out all GTK's
fsm->authReq = eANI_BOOLEAN_FALSE;
/////getPmk(fsm->suppCtx);
}
else
{
VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_ERROR, "Supp fail to random number\n" );
return ANI_ERROR;
}
return ANI_OK;
}
static int
gotoStateGotPmk(tSuppRsnFsm *fsm)
{
fsm->currentState = GOT_PMK;
return ANI_OK;
}
static int
gotoStateStaKeyStart(tSuppRsnFsm *fsm,
tAniEapolKeyAvailEventData *data,
v_BOOL_t retransmit)
{
int retVal;
tAniEapolRsnKeyDesc txDesc;
tAniEapolRsnKeyDesc *rxDesc;
static v_U8_t btampRSNIE[] = {0x30, 0x14, 0x01, 0x00, 0x00, 0x0f, 0xac, 0x04, 0x01, 0x00,
0x00, 0x0f, 0xac, 0x04, 0x01, 0x00, 0x00, 0x0f, 0xac, 0x02, 0x00, 0x00 };
fsm->currentState = STA_KEY_START;
// Create a new EAPOL frame if we don't have to retransmit
// if (!retransmit)
//{
rxDesc = data->keyDesc;
if( NULL == rxDesc)
{
return ANI_E_NULL_VALUE;
}
aniAsfPacketEmptyExplicit( fsm->lastEapol, EAPOL_TX_HEADER_SIZE );
retVal = derivePtk(fsm, data);
if( !ANI_IS_STATUS_SUCCESS( retVal ) )
{
VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_ERROR,
"Supp derivePtk failed with code %d!\n", retVal);
return retVal;
}
vos_mem_zero( &txDesc, sizeof(txDesc) );
// The Key Information bits...
if (fsm->suppCtx->pwCipherType == eCSR_ENCRYPT_TYPE_AES)
{
txDesc.info.keyDescVers = ANI_EAPOL_KEY_DESC_VERS_AES;
}
txDesc.info.unicastFlag = eANI_BOOLEAN_TRUE;
txDesc.info.micFlag = eANI_BOOLEAN_TRUE;
txDesc.keyLen = 0; //RSN_80211_KEY_LEN;
// Send back the same replayCtr that the authenticator sent
vos_mem_copy(txDesc.replayCounter,
rxDesc->replayCounter,
sizeof(txDesc.replayCounter));
vos_mem_copy(txDesc.keyNonce, fsm->sNonce, sizeof(txDesc.keyNonce));
txDesc.keyDataLen = sizeof(btampRSNIE);//aniAsfPacketGetBytes(fsm->suppCtx->ieBp,
//&txDesc.keyData);
txDesc.keyData = btampRSNIE;
retVal = aniEapolWriteKey(fsm->cryptHandle,
fsm->lastEapol,
fsm->suppCtx->authMac,
fsm->suppCtx->suppMac,
ANI_EAPOL_KEY_DESC_TYPE_RSN_NEW,
&txDesc,
fsm->suppCtx->ptk,
CSR_AES_KEY_LEN);
if( !ANI_IS_STATUS_SUCCESS( retVal ) )
{
VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_ERROR, "Supp gotoStateStaKeyStart fail to write key %d\n",
retVal);
return retVal;
}
//}
if( VOS_IS_STATUS_SUCCESS( bapRsnSendEapolFrame( fsm->ctx->pvosGCtx, fsm->lastEapol ) ) )
{
retVal = ANI_OK;
}
else
{
retVal = ANI_ERROR;
}
return retVal;
}
static int
gotoStateStaKeySet(tSuppRsnFsm *fsm,
tAniEapolKeyAvailEventData *data,
v_BOOL_t retransmit)
{
int retVal=0;
tAniEapolRsnKeyDesc txDesc;
tAniEapolRsnKeyDesc *rxDesc = NULL;
fsm->currentState = STA_KEY_SET;
if (data == NULL)
{
// We don't need to do anything
return ANI_OK;
}
// Create a new EAPOL frame if we don't have to retransmit
if (!retransmit)
{
// First check the IE that the AP sent
retVal = checkInfoElement(fsm, data);
if (retVal != ANI_OK)
{
//FIX_RSN aagSetSuppFailureAndCleanup(fsm->suppCtx);
// FSM does not exist after this...
return retVal;
}
// Create a new EAPOL frame
rxDesc = data->keyDesc;
if( NULL == rxDesc )
return ANI_E_NULL_VALUE;
aniAsfPacketEmptyExplicit(fsm->lastEapol,
EAPOL_TX_HEADER_SIZE );
vos_mem_zero( &txDesc, sizeof(txDesc) );
// The Key Information bits...
if (fsm->suppCtx->pwCipherType == eCSR_ENCRYPT_TYPE_AES)
{
txDesc.info.keyDescVers = ANI_EAPOL_KEY_DESC_VERS_AES;
}
txDesc.info.unicastFlag = eANI_BOOLEAN_TRUE;
txDesc.info.micFlag = eANI_BOOLEAN_TRUE;
txDesc.info.secureFlag = eANI_BOOLEAN_TRUE;
txDesc.keyLen = 0; //RSN_80211_KEY_LEN;
// Send back the same replayCtr that the authenticator sent
vos_mem_copy(txDesc.replayCounter,
rxDesc->replayCounter,
sizeof(txDesc.replayCounter));
retVal = aniEapolWriteKey(fsm->cryptHandle,
fsm->lastEapol,
fsm->suppCtx->authMac,
fsm->suppCtx->suppMac,
ANI_EAPOL_KEY_DESC_TYPE_RSN_NEW,
&txDesc,
fsm->suppCtx->ptk,
CSR_AES_KEY_LEN);
if( !ANI_IS_STATUS_SUCCESS( retVal ) )
{
return retVal;
}
}
gReadToSetKey = BAP_SET_RSN_KEY;
if( !VOS_IS_STATUS_SUCCESS( bapRsnSendEapolFrame( fsm->ctx->pvosGCtx, fsm->lastEapol ) ) )
{
/* making it global to access in bapTxRx file */
#if 0
tCsrRoamSetKey setKeyInfo;
vos_mem_zero( &setKeyInfo, sizeof( tCsrRoamSetKey ) );
setKeyInfo.encType = eCSR_ENCRYPT_TYPE_AES;
setKeyInfo.keyDirection = eSIR_TX_RX;
vos_mem_copy( setKeyInfo.peerMac, fsm->suppCtx->authMac, sizeof( tAniMacAddr ) );
setKeyInfo.paeRole = 0; //this is a supplicant
setKeyInfo.keyId = 0; //always
setKeyInfo.keyLength = CSR_AES_KEY_LEN;
vos_mem_copy( setKeyInfo.Key, (v_U8_t *)fsm->suppCtx->ptk + (2 * CSR_AES_KEY_LEN ), CSR_AES_KEY_LEN );
//fsm->suppCtx->ptk contains the 3 16-bytes keys. We need the last one.
/*
We will move the Set key to EAPOL Completion handler. We found a race condition betweem
sending EAPOL frame and setting Key */
if( !VOS_IS_STATUS_SUCCESS( bapSetKey( fsm->ctx->pvosGCtx, &setKeyInfo ) ) )
{
VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_ERROR, " Supp: gotoStateStaKeySet fail to set key\n" );
retVal = ANI_ERROR;
}
#endif
gReadToSetKey = BAP_RESET_RSN_KEY;
retVal = ANI_ERROR;
}
return retVal;
}
static int
gotoStateGroupKeySet(tSuppRsnFsm *fsm,
tAniEapolKeyAvailEventData *data)
{
int retVal;
tAniEapolRsnKeyDesc txDesc;
tAniEapolRsnKeyDesc *rxDesc;
int groupKeyLen;
fsm->currentState = GROUP_KEY_SET;
do
{
rxDesc = (tAniEapolRsnKeyDesc *) data->keyDesc;
if( NULL == rxDesc)
{
retVal = ANI_E_NULL_VALUE;
break;
}
if (rxDesc->keyDataLen == 0 || rxDesc->keyData == NULL)
{
VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_ERROR,
"Supp: AP sent no group key in group EAPOL-Key message!\n" );
retVal = ANI_E_ILLEGAL_ARG;
break;
}
if ( rxDesc->info.keyDescVers == ANI_EAPOL_KEY_DESC_VERS_AES )
{
groupKeyLen = rxDesc->keyDataLen - ANI_SSM_AES_KEY_WRAP_BLOCK_SIZE;
if( groupKeyLen <= 0 )
{
VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_ERROR,
"Supp: AP sent GTK too short\n" );
retVal = ANI_E_ILLEGAL_ARG;
break;
}
}
else
{
VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_ERROR,
"Supp: AP sent unsupported keyDescVer %d!\n", rxDesc->info.keyDescVers );
retVal = ANI_E_ILLEGAL_ARG;
break;
}
// Always create a new EAPOL frame
aniAsfPacketEmptyExplicit( fsm->lastEapol,
EAPOL_TX_HEADER_SIZE );
vos_mem_zero( &txDesc, sizeof(txDesc) );
// The Key Information bits...
if (fsm->suppCtx->grpCipherType == eCSR_ENCRYPT_TYPE_AES)
{
txDesc.info.keyDescVers = ANI_EAPOL_KEY_DESC_VERS_AES;
}
txDesc.info.unicastFlag = eANI_BOOLEAN_FALSE;
txDesc.info.keyId = rxDesc->info.keyId;
txDesc.info.micFlag = eANI_BOOLEAN_TRUE;
txDesc.info.secureFlag = eANI_BOOLEAN_TRUE;
txDesc.keyLen = RSN_80211_KEY_LEN;
// Send back the same replayCtr that the authenticator sent
vos_mem_copy(txDesc.replayCounter,
rxDesc->replayCounter,
sizeof(txDesc.replayCounter));
retVal = aniEapolWriteKey(fsm->cryptHandle,
fsm->lastEapol,
fsm->suppCtx->authMac,
fsm->suppCtx->suppMac,
ANI_EAPOL_KEY_DESC_TYPE_RSN_NEW,
&txDesc,
fsm->suppCtx->ptk,
CSR_AES_KEY_LEN);
if( !ANI_IS_STATUS_SUCCESS( retVal ) ) break;
if( !VOS_IS_STATUS_SUCCESS( bapRsnSendEapolFrame( fsm->ctx->pvosGCtx, fsm->lastEapol ) ) )
{
retVal = ANI_ERROR;
VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_ERROR, "Supp could not send eapol. Disconnect\n" );
break;
}
//FIX_RSN there is no need to set GTK retVal = setGtk(fsm->suppCtx, rxDesc->keyRecvSeqCounter);
// This is never retransmitted
aniAsfPacketEmptyExplicit( fsm->lastEapol,
EAPOL_TX_HEADER_SIZE );
checkTransition(fsm, NULL); // UCT rule
}while( 0 );
return retVal;
}
static int
gotoStateKeyUpdate(tSuppRsnFsm *fsm, tSirMicFailureInfo *micFailureInfo)
{
//we don't update keys
bapSuppDisconnect( fsm->ctx );
return ANI_OK;
}
static int
gotoStateRekeyMsg(tSuppRsnFsm *fsm, tSirMicFailureInfo *micFailureInfo)
{
//We don't support rekey, simply disconnect
bapSuppDisconnect( fsm->ctx );
return ANI_OK;
}
static
int zeroOutPtk(tSuppRsnFsm *fsm)
{
return ANI_OK;
}
static
int derivePtk(tSuppRsnFsm *fsm, tAniEapolKeyAvailEventData *data)
{
v_U32_t prfLen;
tAniEapolRsnKeyDesc *rxDesc;
switch (fsm->suppCtx->pwCipherType)
{
case eCSR_ENCRYPT_TYPE_AES:
prfLen = AAG_RSN_PTK_PRF_LEN_CCMP;
fsm->suppCtx->pwKeyLen = AAG_RSN_KEY_MATERIAL_LEN_CCMP;
break;
default:
VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_ERROR,
"Cannot generate PTK for BP for invalid algorithm %d\n",
fsm->suppCtx->pwCipherType);
return ANI_E_ILLEGAL_ARG;
break;
};
rxDesc = (tAniEapolRsnKeyDesc *) data->keyDesc;
return aagPtkPrf(fsm->cryptHandle,
fsm->suppCtx->ptk,
prfLen,
fsm->suppCtx->pmk,
fsm->suppCtx->authMac,
fsm->suppCtx->suppMac,
rxDesc->keyNonce,
fsm->sNonce);
}
static
int checkMic(tSuppRsnFsm *fsm,
tAniEapolKeyAvailEventData *data,
v_BOOL_t pwKeyExchange)
{
int retVal;
retVal = aniEapolKeyCheckMic(fsm->cryptHandle,
data->eapolFrame,
ANI_EAPOL_KEY_DESC_TYPE_RSN_NEW,
data->keyDesc,
fsm->suppCtx->ptk,
CSR_AES_KEY_LEN);
return retVal;
}
static int checkInfoElement(tSuppRsnFsm *fsm,
tAniEapolKeyAvailEventData *data)
{
tAniEapolRsnKeyDesc *desc;
v_U8_t *ieApBytes;
int ieApLen;
desc = (tAniEapolRsnKeyDesc *) data->keyDesc;
if( NULL == desc )
{
return ANI_E_NULL_VALUE;
}
ieApLen = aniAsfPacketGetBytes(fsm->suppCtx->ieAp, &ieApBytes);
if( ANI_IS_STATUS_SUCCESS( ieApLen ) )
{
if ((desc->keyDataLen != ieApLen) ||
( vos_mem_compare(desc->keyData, ieApBytes, ieApLen) ))
{
// TODO: Send a fault here
VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_ERROR,
"Supp AP sent inconsistent RSN IE!\n" );
return ANI_E_FAILED;
}
}
return ANI_OK;
}
static
int checkPeerReplayCounter(tSuppRsnFsm *fsm,
tAniEapolKeyAvailEventData *data,
v_BOOL_t *retransmit,
v_BOOL_t actualMicFlag,
v_BOOL_t reTxMicFlag)
{
int retVal = ANI_OK;
int cmp;
tAniEapolRsnKeyDesc *rxDesc;
rxDesc = data->keyDesc;
if( NULL == rxDesc )
{
return ANI_E_NULL_VALUE;
}
*retransmit = eANI_BOOLEAN_FALSE;
cmp = aniSsmReplayCtrCmp(fsm->peerReplayCtr, rxDesc->replayCounter);
// The AP should send us a replay counter greater than or equal to
// the last one it sent
/*Unless we are forgiving with this we will have interop issues with some vendros like CSR*/
if (cmp > 0)
{
VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_ERROR,
"BP got old EAPOL replay counter from AP" );
retVal = ANI_E_REPLAY_CHECK_FAILED;
}
else if (cmp <= 0)
{
if ( actualMicFlag == reTxMicFlag )
{
*retransmit = eANI_BOOLEAN_TRUE;
}
}
return retVal;
}
static
int checkTransition(tSuppRsnFsm *fsm, void *arg)
{
tAniEapolKeyAvailEventData *data;
tAniEapolRsnKeyDesc *rxDesc;
v_BOOL_t retransmit;
int retVal;
if (fsm->authReq)
{
gotoStateAuthentication(fsm);
return ANI_OK;
}
switch (fsm->currentState)
{
case INITIALIZE:
break;
case AUTHENTICATION:
gotoStateGotPmk(fsm);
checkTransition(fsm, arg);
break;
case GOT_PMK:
if (fsm->eapolAvail) {
fsm->eapolAvail = eANI_BOOLEAN_FALSE;
data = (tAniEapolKeyAvailEventData *) arg;
rxDesc = (tAniEapolRsnKeyDesc *) data->keyDesc;
if (rxDesc->info.ackFlag)
{
aniSsmReplayCtrUpdate(fsm->peerReplayCtr,
rxDesc->replayCounter);
// Going from one state to another cannot be a retransmit
retVal = gotoStateStaKeyStart(fsm, data, eANI_BOOLEAN_FALSE);
}
}
break;
case STA_KEY_START:
if (fsm->eapolAvail) {
fsm->eapolAvail = eANI_BOOLEAN_FALSE;
data = (tAniEapolKeyAvailEventData *) arg;
rxDesc = (tAniEapolRsnKeyDesc *) data->keyDesc;
if (rxDesc->info.ackFlag) {
retVal = checkPeerReplayCounter(
fsm,
data,
&retransmit,
rxDesc->info.micFlag,
0); // MIC not set means check for re-Tx M1.
if (retVal != ANI_OK)
return ANI_OK; // Caller should not fail
if (retransmit) {
VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_ERROR,
"Resending EAPOL-Key Msg2 from "
"supplicant to AP" );
retVal = gotoStateStaKeyStart(fsm, data, eANI_BOOLEAN_TRUE);
}
else {
retVal = checkMic(fsm, data, rxDesc->info.unicastFlag);
if (retVal != ANI_OK)
{
bapSuppDisconnect( fsm->ctx );
return retVal;
}
aniSsmReplayCtrUpdate(fsm->peerReplayCtr,
rxDesc->replayCounter);
gotoStateStaKeySet(fsm, data, eANI_BOOLEAN_FALSE);
}
}
}
break;
case STA_KEY_SET:
if (fsm->eapolAvail)
{
fsm->eapolAvail = eANI_BOOLEAN_FALSE;
data = (tAniEapolKeyAvailEventData *) arg;
rxDesc = (tAniEapolRsnKeyDesc *) data->keyDesc;
retVal = checkPeerReplayCounter(
fsm,
data,
&retransmit,
rxDesc->info.micFlag,
1); // MIC set means check for re-Tx M3.
if (retVal != ANI_OK)
return ANI_OK; // Caller should not fail
if (!retransmit)
{
retVal = checkMic(fsm, data, rxDesc->info.unicastFlag);
if (retVal != ANI_OK)
{
bapSuppDisconnect( fsm->ctx );
return retVal;
}
aniSsmReplayCtrUpdate(fsm->peerReplayCtr,
rxDesc->replayCounter);
}
if (rxDesc->info.unicastFlag)
{
/*
* Handle pairwise key message...in this state
* pairwise key messages can only be for retransmissions.
*/
if (retransmit)
{
VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_ERROR,
"Resending EAPOL-Key Msg4 from "
"supplicant \n" );
retVal = gotoStateStaKeySet(fsm, data, eANI_BOOLEAN_TRUE);
}
}
else
{
/*
* Handle group key message...with group key messages,
* the replay counter has to change on
* retransmissions.
*/
if (!retransmit)
{
retVal = gotoStateGroupKeySet(fsm, data);
if( !ANI_IS_STATUS_SUCCESS( retVal ) )
{
bapSuppDisconnect( fsm->ctx );
return retVal;
}
}
}
}
else {
if (fsm->integFailed)
{
gotoStateKeyUpdate(fsm, arg);
}
}
break;
case GROUP_KEY_SET:
gotoStateStaKeySet(fsm, NULL, eANI_BOOLEAN_FALSE);
break;
case KEY_UPDATE:
gotoStateRekeyMsg(fsm, arg);
break;
default:
VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_ERROR, "Illegal state for SuppRsnFsm: %d",
fsm->currentState);
VOS_ASSERT( 0 );
return ANI_E_FAILED;
}
return ANI_OK;
}
static int
suppEapolKeyHandler( tSuppRsnFsm *fsm,
tAniPacket *eapolFrame,
tAniMacAddr suppMac)
{
int retVal;
int descType;
void *keyDesc;
tAniEapolRsnKeyDesc *rsnDesc;
tAniEapolKeyAvailEventData data;
do
{
retVal = aniEapolParseKey(eapolFrame, &descType, &keyDesc);
if( !ANI_IS_STATUS_SUCCESS( retVal ) )
{
return retVal;
}
if (descType == ANI_EAPOL_KEY_DESC_TYPE_RSN_NEW)
{
rsnDesc = (tAniEapolRsnKeyDesc *) keyDesc;
/*
* Pass on the event to the RSN FSM irrespective if it is
* pairwise or not.
*/
data.keyDesc = keyDesc;
data.eapolFrame = eapolFrame;
retVal = suppRsnFsmProcessEvent(fsm,
RSN_FSM_EAPOL_FRAME_AVAILABLE,
&data);
}
else {
VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_ERROR,
"Supp: Got unexpected 802.1x RC4 Key message \n" );
retVal = ANI_E_FAILED;
break;
}
}while( 0 );
aniEapolKeyFreeDesc(descType, keyDesc);
return retVal;
}
//
//This function alwasy assume the incoming vos_packet is 802_3 frame.
static int suppRsnRxFrameHandler( v_PVOID_t pvosGCtx, vos_pkt_t *pPacket )
{
int retVal = ANI_ERROR;
tAniPacket *pAniPacket;
tBtampContext *ctx;
tSuppRsnFsm *fsm;
/* Validate params */
if ((pvosGCtx == NULL) || (NULL == pPacket))
{
VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_ERROR,
"param is NULL in %s", __func__);
return retVal;
}
ctx = (tBtampContext *)VOS_GET_BAP_CB( pvosGCtx );
if (NULL == ctx)
{
VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_ERROR,
"ctx is NULL in %s", __func__);
return retVal;
}
fsm = &ctx->uFsm.suppFsm;
if (NULL == fsm)
{
VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_ERROR,
"fsm is NULL in %s", __func__);
return retVal;
}
do
{
//ToDO: We need to synchronize this. For now, use the simplest form, drop the packet comes later.
if( fsm->fReceiving )
{
VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_ERROR,
" ******suppRsnRxFrameHandler receive eapol packet while processing. Drop the new comer\n" );
break;
}
fsm->fReceiving = VOS_TRUE;
retVal = bapRsnFormPktFromVosPkt( &pAniPacket, pPacket );
if( !ANI_IS_STATUS_SUCCESS( retVal ) ) break;
//Now we can process the eapol frame
//handler will free the pAniPacket
bapRsnEapolHandler( fsm, pAniPacket, VOS_FALSE );
}while( 0 );
fsm->fReceiving = VOS_FALSE;
vos_pkt_return_packet( pPacket );
return retVal;
}
static int suppRsnTxCompleteHandler( v_PVOID_t pvosGCtx, vos_pkt_t *pPacket, VOS_STATUS retStatus )
{
tBtampContext *ctx = (tBtampContext *)VOS_GET_BAP_CB( pvosGCtx );
tAuthRsnFsm *fsm;
vos_pkt_return_packet( pPacket );
if (pvosGCtx == NULL)
{
VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_ERROR,
"param is NULL in %s", __func__);
return ANI_ERROR;
}
if (NULL == ctx)
{
VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_ERROR,
"ctx is NULL in %s", __func__);
return ANI_ERROR;
}
fsm = &ctx->uFsm.authFsm;
if (NULL == fsm)
{
VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_ERROR,
"fsm is NULL in %s", __func__);
return ANI_ERROR;
}
//Synchronization needed
if(!VOS_IS_STATUS_SUCCESS( retStatus ) )
{
//This is bad.
VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_ERROR,
"Supp: TL Tx complete with error %d current state is %d\n", retStatus, fsm->currentState );
if( fsm->numTries <= suppConsts.maxTries )
{
//retransmit
fsm->numTries++;
if( !VOS_IS_STATUS_SUCCESS( bapRsnSendEapolFrame( fsm->ctx->pvosGCtx, fsm->lastEapol ) ) )
{
bapSuppDisconnect( fsm->ctx->pvosGCtx );
}
}
else
{
bapSuppDisconnect( fsm->ctx->pvosGCtx );
}
}
return ANI_OK;
}
/**
* suppEapolHandler
*
* Handles an incoming EAPOL frame on the supplicant side.
*
* @param eapolFrame the packet containing the EAPOL frame, with the
* head of the packet still at the start of the EAPOL frame
* @param dstMac the dstMac pointing inside the frame
* @param srcMac the srcMac pointing inside the frame
* @param type the type pointing inside the frame at the type field
*
* @return ANI_OK if the operation succeeds
*/
void suppEapolHandler( tSuppRsnFsm *fsm, tAniPacket *eapolFrame,
tAniMacAddr dstMac,
tAniMacAddr srcMac,
v_U8_t *type )
{
switch (*type)
{
case ANI_EAPOL_TYPE_PACKET:
// Ignore EAP becasue it is only WPA2-PSK
break;
case ANI_EAPOL_TYPE_KEY:
suppEapolKeyHandler( fsm, eapolFrame, dstMac );
break;
case ANI_EAPOL_TYPE_ASF_ALERT:
default:
VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_ERROR,
"Supp: EAPOL type not implemented: %.2x\n", *type);
break;
}
}
| gpl-2.0 |
jrfastab/flow-net-next | drivers/nfc/mei_phy.c | 1201 | 3861 | /*
* MEI Library for mei bus nfc device access
*
* Copyright (C) 2013 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/nfc.h>
#include "mei_phy.h"
struct mei_nfc_hdr {
u8 cmd;
u8 status;
u16 req_id;
u32 reserved;
u16 data_size;
} __packed;
#define MEI_NFC_MAX_READ (MEI_NFC_HEADER_SIZE + MEI_NFC_MAX_HCI_PAYLOAD)
#define MEI_DUMP_SKB_IN(info, skb) \
do { \
pr_debug("%s:\n", info); \
print_hex_dump_debug("mei in : ", DUMP_PREFIX_OFFSET, \
16, 1, (skb)->data, (skb)->len, false); \
} while (0)
#define MEI_DUMP_SKB_OUT(info, skb) \
do { \
pr_debug("%s:\n", info); \
print_hex_dump_debug("mei out: ", DUMP_PREFIX_OFFSET, \
16, 1, (skb)->data, (skb)->len, false); \
} while (0)
int nfc_mei_phy_enable(void *phy_id)
{
int r;
struct nfc_mei_phy *phy = phy_id;
pr_info("%s\n", __func__);
if (phy->powered == 1)
return 0;
r = mei_cl_enable_device(phy->device);
if (r < 0) {
pr_err("Could not enable device\n");
return r;
}
r = mei_cl_register_event_cb(phy->device, nfc_mei_event_cb, phy);
if (r) {
pr_err("Event cb registration failed\n");
mei_cl_disable_device(phy->device);
phy->powered = 0;
return r;
}
phy->powered = 1;
return 0;
}
EXPORT_SYMBOL_GPL(nfc_mei_phy_enable);
void nfc_mei_phy_disable(void *phy_id)
{
struct nfc_mei_phy *phy = phy_id;
pr_info("%s\n", __func__);
mei_cl_disable_device(phy->device);
phy->powered = 0;
}
EXPORT_SYMBOL_GPL(nfc_mei_phy_disable);
/*
* Writing a frame must not return the number of written bytes.
* It must return either zero for success, or <0 for error.
* In addition, it must not alter the skb
*/
static int nfc_mei_phy_write(void *phy_id, struct sk_buff *skb)
{
struct nfc_mei_phy *phy = phy_id;
int r;
MEI_DUMP_SKB_OUT("mei frame sent", skb);
r = mei_cl_send(phy->device, skb->data, skb->len);
if (r > 0)
r = 0;
return r;
}
void nfc_mei_event_cb(struct mei_cl_device *device, u32 events, void *context)
{
struct nfc_mei_phy *phy = context;
if (phy->hard_fault != 0)
return;
if (events & BIT(MEI_CL_EVENT_RX)) {
struct sk_buff *skb;
int reply_size;
skb = alloc_skb(MEI_NFC_MAX_READ, GFP_KERNEL);
if (!skb)
return;
reply_size = mei_cl_recv(device, skb->data, MEI_NFC_MAX_READ);
if (reply_size < MEI_NFC_HEADER_SIZE) {
kfree_skb(skb);
return;
}
skb_put(skb, reply_size);
skb_pull(skb, MEI_NFC_HEADER_SIZE);
MEI_DUMP_SKB_IN("mei frame read", skb);
nfc_hci_recv_frame(phy->hdev, skb);
}
}
EXPORT_SYMBOL_GPL(nfc_mei_event_cb);
struct nfc_phy_ops mei_phy_ops = {
.write = nfc_mei_phy_write,
.enable = nfc_mei_phy_enable,
.disable = nfc_mei_phy_disable,
};
EXPORT_SYMBOL_GPL(mei_phy_ops);
struct nfc_mei_phy *nfc_mei_phy_alloc(struct mei_cl_device *device)
{
struct nfc_mei_phy *phy;
phy = kzalloc(sizeof(struct nfc_mei_phy), GFP_KERNEL);
if (!phy)
return NULL;
phy->device = device;
mei_cl_set_drvdata(device, phy);
return phy;
}
EXPORT_SYMBOL_GPL(nfc_mei_phy_alloc);
void nfc_mei_phy_free(struct nfc_mei_phy *phy)
{
kfree(phy);
}
EXPORT_SYMBOL_GPL(nfc_mei_phy_free);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("mei bus NFC device interface");
| gpl-2.0 |
iamroot12a/kernel | fs/ocfs2/sysfile.c | 1969 | 4836 | /* -*- mode: c; c-basic-offset: 8; -*-
* vim: noexpandtab sw=8 ts=8 sts=0:
*
* sysfile.c
*
* Initialize, read, write, etc. system files.
*
* Copyright (C) 2002, 2004 Oracle. All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public
* License along with this program; if not, write to the
* Free Software Foundation, Inc., 59 Temple Place - Suite 330,
* Boston, MA 021110-1307, USA.
*/
#include <linux/fs.h>
#include <linux/types.h>
#include <linux/highmem.h>
#include <cluster/masklog.h>
#include "ocfs2.h"
#include "alloc.h"
#include "dir.h"
#include "inode.h"
#include "journal.h"
#include "sysfile.h"
#include "buffer_head_io.h"
static struct inode * _ocfs2_get_system_file_inode(struct ocfs2_super *osb,
int type,
u32 slot);
#ifdef CONFIG_DEBUG_LOCK_ALLOC
static struct lock_class_key ocfs2_sysfile_cluster_lock_key[NUM_SYSTEM_INODES];
#endif
static inline int is_global_system_inode(int type)
{
return type >= OCFS2_FIRST_ONLINE_SYSTEM_INODE &&
type <= OCFS2_LAST_GLOBAL_SYSTEM_INODE;
}
static struct inode **get_local_system_inode(struct ocfs2_super *osb,
int type,
u32 slot)
{
int index;
struct inode **local_system_inodes, **free = NULL;
BUG_ON(slot == OCFS2_INVALID_SLOT);
BUG_ON(type < OCFS2_FIRST_LOCAL_SYSTEM_INODE ||
type > OCFS2_LAST_LOCAL_SYSTEM_INODE);
spin_lock(&osb->osb_lock);
local_system_inodes = osb->local_system_inodes;
spin_unlock(&osb->osb_lock);
if (unlikely(!local_system_inodes)) {
local_system_inodes = kzalloc(sizeof(struct inode *) *
NUM_LOCAL_SYSTEM_INODES *
osb->max_slots,
GFP_NOFS);
if (!local_system_inodes) {
mlog_errno(-ENOMEM);
/*
* return NULL here so that ocfs2_get_sytem_file_inodes
* will try to create an inode and use it. We will try
* to initialize local_system_inodes next time.
*/
return NULL;
}
spin_lock(&osb->osb_lock);
if (osb->local_system_inodes) {
/* Someone has initialized it for us. */
free = local_system_inodes;
local_system_inodes = osb->local_system_inodes;
} else
osb->local_system_inodes = local_system_inodes;
spin_unlock(&osb->osb_lock);
kfree(free);
}
index = (slot * NUM_LOCAL_SYSTEM_INODES) +
(type - OCFS2_FIRST_LOCAL_SYSTEM_INODE);
return &local_system_inodes[index];
}
struct inode *ocfs2_get_system_file_inode(struct ocfs2_super *osb,
int type,
u32 slot)
{
struct inode *inode = NULL;
struct inode **arr = NULL;
/* avoid the lookup if cached in local system file array */
if (is_global_system_inode(type)) {
arr = &(osb->global_system_inodes[type]);
} else
arr = get_local_system_inode(osb, type, slot);
mutex_lock(&osb->system_file_mutex);
if (arr && ((inode = *arr) != NULL)) {
/* get a ref in addition to the array ref */
inode = igrab(inode);
mutex_unlock(&osb->system_file_mutex);
BUG_ON(!inode);
return inode;
}
/* this gets one ref thru iget */
inode = _ocfs2_get_system_file_inode(osb, type, slot);
/* add one more if putting into array for first time */
if (arr && inode) {
*arr = igrab(inode);
BUG_ON(!*arr);
}
mutex_unlock(&osb->system_file_mutex);
return inode;
}
static struct inode * _ocfs2_get_system_file_inode(struct ocfs2_super *osb,
int type,
u32 slot)
{
char namebuf[40];
struct inode *inode = NULL;
u64 blkno;
int status = 0;
ocfs2_sprintf_system_inode_name(namebuf,
sizeof(namebuf),
type, slot);
status = ocfs2_lookup_ino_from_name(osb->sys_root_inode, namebuf,
strlen(namebuf), &blkno);
if (status < 0) {
goto bail;
}
inode = ocfs2_iget(osb, blkno, OCFS2_FI_FLAG_SYSFILE, type);
if (IS_ERR(inode)) {
mlog_errno(PTR_ERR(inode));
inode = NULL;
goto bail;
}
#ifdef CONFIG_DEBUG_LOCK_ALLOC
if (type == LOCAL_USER_QUOTA_SYSTEM_INODE ||
type == LOCAL_GROUP_QUOTA_SYSTEM_INODE ||
type == JOURNAL_SYSTEM_INODE) {
/* Ignore inode lock on these inodes as the lock does not
* really belong to any process and lockdep cannot handle
* that */
OCFS2_I(inode)->ip_inode_lockres.l_lockdep_map.key = NULL;
} else {
lockdep_init_map(&OCFS2_I(inode)->ip_inode_lockres.
l_lockdep_map,
ocfs2_system_inodes[type].si_name,
&ocfs2_sysfile_cluster_lock_key[type], 0);
}
#endif
bail:
return inode;
}
| gpl-2.0 |
Scorpio92/linux_kernel_3.16.1 | drivers/isdn/hisax/w6692.c | 2225 | 29406 | /* $Id: w6692.c,v 1.18.2.4 2004/02/11 13:21:34 keil Exp $
*
* Winbond W6692 specific routines
*
* Author Petr Novak
* Copyright by Petr Novak <petr.novak@i.cz>
*
* This software may be used and distributed according to the terms
* of the GNU General Public License, incorporated herein by reference.
*
*/
#include <linux/init.h>
#include "hisax.h"
#include "w6692.h"
#include "isdnl1.h"
#include <linux/interrupt.h>
#include <linux/pci.h>
#include <linux/slab.h>
/* table entry in the PCI devices list */
typedef struct {
int vendor_id;
int device_id;
char *vendor_name;
char *card_name;
} PCI_ENTRY;
static const PCI_ENTRY id_list[] =
{
{PCI_VENDOR_ID_WINBOND2, PCI_DEVICE_ID_WINBOND2_6692, "Winbond", "W6692"},
{PCI_VENDOR_ID_DYNALINK, PCI_DEVICE_ID_DYNALINK_IS64PH, "Dynalink/AsusCom", "IS64PH"},
{0, 0, "U.S.Robotics", "ISDN PCI Card TA"}
};
#define W6692_SV_USR 0x16ec
#define W6692_SD_USR 0x3409
#define W6692_WINBOND 0
#define W6692_DYNALINK 1
#define W6692_USR 2
static const char *w6692_revision = "$Revision: 1.18.2.4 $";
#define DBUSY_TIMER_VALUE 80
static char *W6692Ver[] =
{"W6692 V00", "W6692 V01", "W6692 V10",
"W6692 V11"};
static void
W6692Version(struct IsdnCardState *cs, char *s)
{
int val;
val = cs->readW6692(cs, W_D_RBCH);
printk(KERN_INFO "%s Winbond W6692 version (%x): %s\n", s, val, W6692Ver[(val >> 6) & 3]);
}
static void
ph_command(struct IsdnCardState *cs, unsigned int command)
{
if (cs->debug & L1_DEB_ISAC)
debugl1(cs, "ph_command %x", command);
cs->writeisac(cs, W_CIX, command);
}
static void
W6692_new_ph(struct IsdnCardState *cs)
{
switch (cs->dc.w6692.ph_state) {
case (W_L1CMD_RST):
ph_command(cs, W_L1CMD_DRC);
l1_msg(cs, HW_RESET | INDICATION, NULL);
/* fallthru */
case (W_L1IND_CD):
l1_msg(cs, HW_DEACTIVATE | CONFIRM, NULL);
break;
case (W_L1IND_DRD):
l1_msg(cs, HW_DEACTIVATE | INDICATION, NULL);
break;
case (W_L1IND_CE):
l1_msg(cs, HW_POWERUP | CONFIRM, NULL);
break;
case (W_L1IND_LD):
l1_msg(cs, HW_RSYNC | INDICATION, NULL);
break;
case (W_L1IND_ARD):
l1_msg(cs, HW_INFO2 | INDICATION, NULL);
break;
case (W_L1IND_AI8):
l1_msg(cs, HW_INFO4_P8 | INDICATION, NULL);
break;
case (W_L1IND_AI10):
l1_msg(cs, HW_INFO4_P10 | INDICATION, NULL);
break;
default:
break;
}
}
static void
W6692_bh(struct work_struct *work)
{
struct IsdnCardState *cs =
container_of(work, struct IsdnCardState, tqueue);
struct PStack *stptr;
if (test_and_clear_bit(D_CLEARBUSY, &cs->event)) {
if (cs->debug)
debugl1(cs, "D-Channel Busy cleared");
stptr = cs->stlist;
while (stptr != NULL) {
stptr->l1.l1l2(stptr, PH_PAUSE | CONFIRM, NULL);
stptr = stptr->next;
}
}
if (test_and_clear_bit(D_L1STATECHANGE, &cs->event))
W6692_new_ph(cs);
if (test_and_clear_bit(D_RCVBUFREADY, &cs->event))
DChannel_proc_rcv(cs);
if (test_and_clear_bit(D_XMTBUFREADY, &cs->event))
DChannel_proc_xmt(cs);
/*
if (test_and_clear_bit(D_RX_MON1, &cs->event))
arcofi_fsm(cs, ARCOFI_RX_END, NULL);
if (test_and_clear_bit(D_TX_MON1, &cs->event))
arcofi_fsm(cs, ARCOFI_TX_END, NULL);
*/
}
static void
W6692_empty_fifo(struct IsdnCardState *cs, int count)
{
u_char *ptr;
if ((cs->debug & L1_DEB_ISAC) && !(cs->debug & L1_DEB_ISAC_FIFO))
debugl1(cs, "W6692_empty_fifo");
if ((cs->rcvidx + count) >= MAX_DFRAME_LEN_L1) {
if (cs->debug & L1_DEB_WARN)
debugl1(cs, "W6692_empty_fifo overrun %d",
cs->rcvidx + count);
cs->writeW6692(cs, W_D_CMDR, W_D_CMDR_RACK);
cs->rcvidx = 0;
return;
}
ptr = cs->rcvbuf + cs->rcvidx;
cs->rcvidx += count;
cs->readW6692fifo(cs, ptr, count);
cs->writeW6692(cs, W_D_CMDR, W_D_CMDR_RACK);
if (cs->debug & L1_DEB_ISAC_FIFO) {
char *t = cs->dlog;
t += sprintf(t, "W6692_empty_fifo cnt %d", count);
QuickHex(t, ptr, count);
debugl1(cs, "%s", cs->dlog);
}
}
static void
W6692_fill_fifo(struct IsdnCardState *cs)
{
int count, more;
u_char *ptr;
if ((cs->debug & L1_DEB_ISAC) && !(cs->debug & L1_DEB_ISAC_FIFO))
debugl1(cs, "W6692_fill_fifo");
if (!cs->tx_skb)
return;
count = cs->tx_skb->len;
if (count <= 0)
return;
more = 0;
if (count > W_D_FIFO_THRESH) {
more = !0;
count = W_D_FIFO_THRESH;
}
ptr = cs->tx_skb->data;
skb_pull(cs->tx_skb, count);
cs->tx_cnt += count;
cs->writeW6692fifo(cs, ptr, count);
cs->writeW6692(cs, W_D_CMDR, more ? W_D_CMDR_XMS : (W_D_CMDR_XMS | W_D_CMDR_XME));
if (test_and_set_bit(FLG_DBUSY_TIMER, &cs->HW_Flags)) {
debugl1(cs, "W6692_fill_fifo dbusytimer running");
del_timer(&cs->dbusytimer);
}
init_timer(&cs->dbusytimer);
cs->dbusytimer.expires = jiffies + ((DBUSY_TIMER_VALUE * HZ) / 1000);
add_timer(&cs->dbusytimer);
if (cs->debug & L1_DEB_ISAC_FIFO) {
char *t = cs->dlog;
t += sprintf(t, "W6692_fill_fifo cnt %d", count);
QuickHex(t, ptr, count);
debugl1(cs, "%s", cs->dlog);
}
}
static void
W6692B_empty_fifo(struct BCState *bcs, int count)
{
u_char *ptr;
struct IsdnCardState *cs = bcs->cs;
if ((cs->debug & L1_DEB_HSCX) && !(cs->debug & L1_DEB_HSCX_FIFO))
debugl1(cs, "W6692B_empty_fifo");
if (bcs->hw.w6692.rcvidx + count > HSCX_BUFMAX) {
if (cs->debug & L1_DEB_WARN)
debugl1(cs, "W6692B_empty_fifo: incoming packet too large");
cs->BC_Write_Reg(cs, bcs->channel, W_B_CMDR, W_B_CMDR_RACK | W_B_CMDR_RACT);
bcs->hw.w6692.rcvidx = 0;
return;
}
ptr = bcs->hw.w6692.rcvbuf + bcs->hw.w6692.rcvidx;
bcs->hw.w6692.rcvidx += count;
READW6692BFIFO(cs, bcs->channel, ptr, count);
cs->BC_Write_Reg(cs, bcs->channel, W_B_CMDR, W_B_CMDR_RACK | W_B_CMDR_RACT);
if (cs->debug & L1_DEB_HSCX_FIFO) {
char *t = bcs->blog;
t += sprintf(t, "W6692B_empty_fifo %c cnt %d",
bcs->channel + '1', count);
QuickHex(t, ptr, count);
debugl1(cs, "%s", bcs->blog);
}
}
static void
W6692B_fill_fifo(struct BCState *bcs)
{
struct IsdnCardState *cs = bcs->cs;
int more, count;
u_char *ptr;
if (!bcs->tx_skb)
return;
if (bcs->tx_skb->len <= 0)
return;
more = (bcs->mode == L1_MODE_TRANS) ? 1 : 0;
if (bcs->tx_skb->len > W_B_FIFO_THRESH) {
more = 1;
count = W_B_FIFO_THRESH;
} else
count = bcs->tx_skb->len;
if ((cs->debug & L1_DEB_HSCX) && !(cs->debug & L1_DEB_HSCX_FIFO))
debugl1(cs, "W6692B_fill_fifo%s%d", (more ? " " : " last "), count);
ptr = bcs->tx_skb->data;
skb_pull(bcs->tx_skb, count);
bcs->tx_cnt -= count;
bcs->hw.w6692.count += count;
WRITEW6692BFIFO(cs, bcs->channel, ptr, count);
cs->BC_Write_Reg(cs, bcs->channel, W_B_CMDR, W_B_CMDR_RACT | W_B_CMDR_XMS | (more ? 0 : W_B_CMDR_XME));
if (cs->debug & L1_DEB_HSCX_FIFO) {
char *t = bcs->blog;
t += sprintf(t, "W6692B_fill_fifo %c cnt %d",
bcs->channel + '1', count);
QuickHex(t, ptr, count);
debugl1(cs, "%s", bcs->blog);
}
}
static void
W6692B_interrupt(struct IsdnCardState *cs, u_char bchan)
{
u_char val;
u_char r;
struct BCState *bcs;
struct sk_buff *skb;
int count;
bcs = (cs->bcs->channel == bchan) ? cs->bcs : (cs->bcs + 1);
val = cs->BC_Read_Reg(cs, bchan, W_B_EXIR);
debugl1(cs, "W6692B chan %d B_EXIR 0x%02X", bchan, val);
if (!test_bit(BC_FLG_INIT, &bcs->Flag)) {
debugl1(cs, "W6692B not INIT yet");
return;
}
if (val & W_B_EXI_RME) { /* RME */
r = cs->BC_Read_Reg(cs, bchan, W_B_STAR);
if (r & (W_B_STAR_RDOV | W_B_STAR_CRCE | W_B_STAR_RMB)) {
if (cs->debug & L1_DEB_WARN)
debugl1(cs, "W6692 B STAR %x", r);
if ((r & W_B_STAR_RDOV) && bcs->mode)
if (cs->debug & L1_DEB_WARN)
debugl1(cs, "W6692 B RDOV mode=%d",
bcs->mode);
if (r & W_B_STAR_CRCE)
if (cs->debug & L1_DEB_WARN)
debugl1(cs, "W6692 B CRC error");
cs->BC_Write_Reg(cs, bchan, W_B_CMDR, W_B_CMDR_RACK | W_B_CMDR_RRST | W_B_CMDR_RACT);
} else {
count = cs->BC_Read_Reg(cs, bchan, W_B_RBCL) & (W_B_FIFO_THRESH - 1);
if (count == 0)
count = W_B_FIFO_THRESH;
W6692B_empty_fifo(bcs, count);
if ((count = bcs->hw.w6692.rcvidx) > 0) {
if (cs->debug & L1_DEB_HSCX_FIFO)
debugl1(cs, "W6692 Bchan Frame %d", count);
if (!(skb = dev_alloc_skb(count)))
printk(KERN_WARNING "W6692: Bchan receive out of memory\n");
else {
memcpy(skb_put(skb, count), bcs->hw.w6692.rcvbuf, count);
skb_queue_tail(&bcs->rqueue, skb);
}
}
}
bcs->hw.w6692.rcvidx = 0;
schedule_event(bcs, B_RCVBUFREADY);
}
if (val & W_B_EXI_RMR) { /* RMR */
W6692B_empty_fifo(bcs, W_B_FIFO_THRESH);
r = cs->BC_Read_Reg(cs, bchan, W_B_STAR);
if (r & W_B_STAR_RDOV) {
if (cs->debug & L1_DEB_WARN)
debugl1(cs, "W6692 B RDOV(RMR) mode=%d", bcs->mode);
cs->BC_Write_Reg(cs, bchan, W_B_CMDR, W_B_CMDR_RACK | W_B_CMDR_RRST | W_B_CMDR_RACT);
if (bcs->mode != L1_MODE_TRANS)
bcs->hw.w6692.rcvidx = 0;
}
if (bcs->mode == L1_MODE_TRANS) {
/* receive audio data */
if (!(skb = dev_alloc_skb(W_B_FIFO_THRESH)))
printk(KERN_WARNING "HiSax: receive out of memory\n");
else {
memcpy(skb_put(skb, W_B_FIFO_THRESH), bcs->hw.w6692.rcvbuf, W_B_FIFO_THRESH);
skb_queue_tail(&bcs->rqueue, skb);
}
bcs->hw.w6692.rcvidx = 0;
schedule_event(bcs, B_RCVBUFREADY);
}
}
if (val & W_B_EXI_XDUN) { /* XDUN */
cs->BC_Write_Reg(cs, bchan, W_B_CMDR, W_B_CMDR_XRST | W_B_CMDR_RACT);
if (cs->debug & L1_DEB_WARN)
debugl1(cs, "W6692 B EXIR %x Lost TX", val);
if (bcs->mode == 1)
W6692B_fill_fifo(bcs);
else {
/* Here we lost an TX interrupt, so
* restart transmitting the whole frame.
*/
if (bcs->tx_skb) {
skb_push(bcs->tx_skb, bcs->hw.w6692.count);
bcs->tx_cnt += bcs->hw.w6692.count;
bcs->hw.w6692.count = 0;
}
}
return;
}
if (val & W_B_EXI_XFR) { /* XFR */
r = cs->BC_Read_Reg(cs, bchan, W_B_STAR);
if (r & W_B_STAR_XDOW) {
if (cs->debug & L1_DEB_WARN)
debugl1(cs, "W6692 B STAR %x XDOW", r);
cs->BC_Write_Reg(cs, bchan, W_B_CMDR, W_B_CMDR_XRST | W_B_CMDR_RACT);
if (bcs->tx_skb && (bcs->mode != 1)) {
skb_push(bcs->tx_skb, bcs->hw.w6692.count);
bcs->tx_cnt += bcs->hw.w6692.count;
bcs->hw.w6692.count = 0;
}
}
if (bcs->tx_skb) {
if (bcs->tx_skb->len) {
W6692B_fill_fifo(bcs);
return;
} else {
if (test_bit(FLG_LLI_L1WAKEUP, &bcs->st->lli.flag) &&
(PACKET_NOACK != bcs->tx_skb->pkt_type)) {
u_long flags;
spin_lock_irqsave(&bcs->aclock, flags);
bcs->ackcnt += bcs->hw.w6692.count;
spin_unlock_irqrestore(&bcs->aclock, flags);
schedule_event(bcs, B_ACKPENDING);
}
dev_kfree_skb_irq(bcs->tx_skb);
bcs->hw.w6692.count = 0;
bcs->tx_skb = NULL;
}
}
if ((bcs->tx_skb = skb_dequeue(&bcs->squeue))) {
bcs->hw.w6692.count = 0;
test_and_set_bit(BC_FLG_BUSY, &bcs->Flag);
W6692B_fill_fifo(bcs);
} else {
test_and_clear_bit(BC_FLG_BUSY, &bcs->Flag);
schedule_event(bcs, B_XMTBUFREADY);
}
}
}
static irqreturn_t
W6692_interrupt(int intno, void *dev_id)
{
struct IsdnCardState *cs = dev_id;
u_char val, exval, v1;
struct sk_buff *skb;
u_int count;
u_long flags;
int icnt = 5;
spin_lock_irqsave(&cs->lock, flags);
val = cs->readW6692(cs, W_ISTA);
if (!val) {
spin_unlock_irqrestore(&cs->lock, flags);
return IRQ_NONE;
}
StartW6692:
if (cs->debug & L1_DEB_ISAC)
debugl1(cs, "W6692 ISTA %x", val);
if (val & W_INT_D_RME) { /* RME */
exval = cs->readW6692(cs, W_D_RSTA);
if (exval & (W_D_RSTA_RDOV | W_D_RSTA_CRCE | W_D_RSTA_RMB)) {
if (exval & W_D_RSTA_RDOV)
if (cs->debug & L1_DEB_WARN)
debugl1(cs, "W6692 RDOV");
if (exval & W_D_RSTA_CRCE)
if (cs->debug & L1_DEB_WARN)
debugl1(cs, "W6692 D-channel CRC error");
if (exval & W_D_RSTA_RMB)
if (cs->debug & L1_DEB_WARN)
debugl1(cs, "W6692 D-channel ABORT");
cs->writeW6692(cs, W_D_CMDR, W_D_CMDR_RACK | W_D_CMDR_RRST);
} else {
count = cs->readW6692(cs, W_D_RBCL) & (W_D_FIFO_THRESH - 1);
if (count == 0)
count = W_D_FIFO_THRESH;
W6692_empty_fifo(cs, count);
if ((count = cs->rcvidx) > 0) {
cs->rcvidx = 0;
if (!(skb = alloc_skb(count, GFP_ATOMIC)))
printk(KERN_WARNING "HiSax: D receive out of memory\n");
else {
memcpy(skb_put(skb, count), cs->rcvbuf, count);
skb_queue_tail(&cs->rq, skb);
}
}
}
cs->rcvidx = 0;
schedule_event(cs, D_RCVBUFREADY);
}
if (val & W_INT_D_RMR) { /* RMR */
W6692_empty_fifo(cs, W_D_FIFO_THRESH);
}
if (val & W_INT_D_XFR) { /* XFR */
if (test_and_clear_bit(FLG_DBUSY_TIMER, &cs->HW_Flags))
del_timer(&cs->dbusytimer);
if (test_and_clear_bit(FLG_L1_DBUSY, &cs->HW_Flags))
schedule_event(cs, D_CLEARBUSY);
if (cs->tx_skb) {
if (cs->tx_skb->len) {
W6692_fill_fifo(cs);
goto afterXFR;
} else {
dev_kfree_skb_irq(cs->tx_skb);
cs->tx_cnt = 0;
cs->tx_skb = NULL;
}
}
if ((cs->tx_skb = skb_dequeue(&cs->sq))) {
cs->tx_cnt = 0;
W6692_fill_fifo(cs);
} else
schedule_event(cs, D_XMTBUFREADY);
}
afterXFR:
if (val & (W_INT_XINT0 | W_INT_XINT1)) { /* XINT0/1 - never */
if (cs->debug & L1_DEB_ISAC)
debugl1(cs, "W6692 spurious XINT!");
}
if (val & W_INT_D_EXI) { /* EXI */
exval = cs->readW6692(cs, W_D_EXIR);
if (cs->debug & L1_DEB_WARN)
debugl1(cs, "W6692 D_EXIR %02x", exval);
if (exval & (W_D_EXI_XDUN | W_D_EXI_XCOL)) { /* Transmit underrun/collision */
debugl1(cs, "W6692 D-chan underrun/collision");
printk(KERN_WARNING "HiSax: W6692 XDUN/XCOL\n");
if (test_and_clear_bit(FLG_DBUSY_TIMER, &cs->HW_Flags))
del_timer(&cs->dbusytimer);
if (test_and_clear_bit(FLG_L1_DBUSY, &cs->HW_Flags))
schedule_event(cs, D_CLEARBUSY);
if (cs->tx_skb) { /* Restart frame */
skb_push(cs->tx_skb, cs->tx_cnt);
cs->tx_cnt = 0;
W6692_fill_fifo(cs);
} else {
printk(KERN_WARNING "HiSax: W6692 XDUN/XCOL no skb\n");
debugl1(cs, "W6692 XDUN/XCOL no skb");
cs->writeW6692(cs, W_D_CMDR, W_D_CMDR_XRST);
}
}
if (exval & W_D_EXI_RDOV) { /* RDOV */
debugl1(cs, "W6692 D-channel RDOV");
printk(KERN_WARNING "HiSax: W6692 D-RDOV\n");
cs->writeW6692(cs, W_D_CMDR, W_D_CMDR_RRST);
}
if (exval & W_D_EXI_TIN2) { /* TIN2 - never */
debugl1(cs, "W6692 spurious TIN2 interrupt");
}
if (exval & W_D_EXI_MOC) { /* MOC - not supported */
debugl1(cs, "W6692 spurious MOC interrupt");
v1 = cs->readW6692(cs, W_MOSR);
debugl1(cs, "W6692 MOSR %02x", v1);
}
if (exval & W_D_EXI_ISC) { /* ISC - Level1 change */
v1 = cs->readW6692(cs, W_CIR);
if (cs->debug & L1_DEB_ISAC)
debugl1(cs, "W6692 ISC CIR=0x%02X", v1);
if (v1 & W_CIR_ICC) {
cs->dc.w6692.ph_state = v1 & W_CIR_COD_MASK;
if (cs->debug & L1_DEB_ISAC)
debugl1(cs, "ph_state_change %x", cs->dc.w6692.ph_state);
schedule_event(cs, D_L1STATECHANGE);
}
if (v1 & W_CIR_SCC) {
v1 = cs->readW6692(cs, W_SQR);
debugl1(cs, "W6692 SCC SQR=0x%02X", v1);
}
}
if (exval & W_D_EXI_WEXP) {
debugl1(cs, "W6692 spurious WEXP interrupt!");
}
if (exval & W_D_EXI_TEXP) {
debugl1(cs, "W6692 spurious TEXP interrupt!");
}
}
if (val & W_INT_B1_EXI) {
debugl1(cs, "W6692 B channel 1 interrupt");
W6692B_interrupt(cs, 0);
}
if (val & W_INT_B2_EXI) {
debugl1(cs, "W6692 B channel 2 interrupt");
W6692B_interrupt(cs, 1);
}
val = cs->readW6692(cs, W_ISTA);
if (val && icnt) {
icnt--;
goto StartW6692;
}
if (!icnt) {
printk(KERN_WARNING "W6692 IRQ LOOP\n");
cs->writeW6692(cs, W_IMASK, 0xff);
}
spin_unlock_irqrestore(&cs->lock, flags);
return IRQ_HANDLED;
}
static void
W6692_l1hw(struct PStack *st, int pr, void *arg)
{
struct IsdnCardState *cs = (struct IsdnCardState *) st->l1.hardware;
struct sk_buff *skb = arg;
u_long flags;
int val;
switch (pr) {
case (PH_DATA | REQUEST):
if (cs->debug & DEB_DLOG_HEX)
LogFrame(cs, skb->data, skb->len);
if (cs->debug & DEB_DLOG_VERBOSE)
dlogframe(cs, skb, 0);
spin_lock_irqsave(&cs->lock, flags);
if (cs->tx_skb) {
skb_queue_tail(&cs->sq, skb);
#ifdef L2FRAME_DEBUG /* psa */
if (cs->debug & L1_DEB_LAPD)
Logl2Frame(cs, skb, "PH_DATA Queued", 0);
#endif
} else {
cs->tx_skb = skb;
cs->tx_cnt = 0;
#ifdef L2FRAME_DEBUG /* psa */
if (cs->debug & L1_DEB_LAPD)
Logl2Frame(cs, skb, "PH_DATA", 0);
#endif
W6692_fill_fifo(cs);
}
spin_unlock_irqrestore(&cs->lock, flags);
break;
case (PH_PULL | INDICATION):
spin_lock_irqsave(&cs->lock, flags);
if (cs->tx_skb) {
if (cs->debug & L1_DEB_WARN)
debugl1(cs, " l2l1 tx_skb exist this shouldn't happen");
skb_queue_tail(&cs->sq, skb);
spin_unlock_irqrestore(&cs->lock, flags);
break;
}
if (cs->debug & DEB_DLOG_HEX)
LogFrame(cs, skb->data, skb->len);
if (cs->debug & DEB_DLOG_VERBOSE)
dlogframe(cs, skb, 0);
cs->tx_skb = skb;
cs->tx_cnt = 0;
#ifdef L2FRAME_DEBUG /* psa */
if (cs->debug & L1_DEB_LAPD)
Logl2Frame(cs, skb, "PH_DATA_PULLED", 0);
#endif
W6692_fill_fifo(cs);
spin_unlock_irqrestore(&cs->lock, flags);
break;
case (PH_PULL | REQUEST):
#ifdef L2FRAME_DEBUG /* psa */
if (cs->debug & L1_DEB_LAPD)
debugl1(cs, "-> PH_REQUEST_PULL");
#endif
if (!cs->tx_skb) {
test_and_clear_bit(FLG_L1_PULL_REQ, &st->l1.Flags);
st->l1.l1l2(st, PH_PULL | CONFIRM, NULL);
} else
test_and_set_bit(FLG_L1_PULL_REQ, &st->l1.Flags);
break;
case (HW_RESET | REQUEST):
spin_lock_irqsave(&cs->lock, flags);
if ((cs->dc.w6692.ph_state == W_L1IND_DRD)) {
ph_command(cs, W_L1CMD_ECK);
spin_unlock_irqrestore(&cs->lock, flags);
} else {
ph_command(cs, W_L1CMD_RST);
cs->dc.w6692.ph_state = W_L1CMD_RST;
spin_unlock_irqrestore(&cs->lock, flags);
W6692_new_ph(cs);
}
break;
case (HW_ENABLE | REQUEST):
spin_lock_irqsave(&cs->lock, flags);
ph_command(cs, W_L1CMD_ECK);
spin_unlock_irqrestore(&cs->lock, flags);
break;
case (HW_INFO3 | REQUEST):
spin_lock_irqsave(&cs->lock, flags);
ph_command(cs, W_L1CMD_AR8);
spin_unlock_irqrestore(&cs->lock, flags);
break;
case (HW_TESTLOOP | REQUEST):
val = 0;
if (1 & (long) arg)
val |= 0x0c;
if (2 & (long) arg)
val |= 0x3;
/* !!! not implemented yet */
break;
case (HW_DEACTIVATE | RESPONSE):
skb_queue_purge(&cs->rq);
skb_queue_purge(&cs->sq);
if (cs->tx_skb) {
dev_kfree_skb_any(cs->tx_skb);
cs->tx_skb = NULL;
}
if (test_and_clear_bit(FLG_DBUSY_TIMER, &cs->HW_Flags))
del_timer(&cs->dbusytimer);
if (test_and_clear_bit(FLG_L1_DBUSY, &cs->HW_Flags))
schedule_event(cs, D_CLEARBUSY);
break;
default:
if (cs->debug & L1_DEB_WARN)
debugl1(cs, "W6692_l1hw unknown %04x", pr);
break;
}
}
static void
setstack_W6692(struct PStack *st, struct IsdnCardState *cs)
{
st->l1.l1hw = W6692_l1hw;
}
static void
DC_Close_W6692(struct IsdnCardState *cs)
{
}
static void
dbusy_timer_handler(struct IsdnCardState *cs)
{
struct PStack *stptr;
int rbch, star;
u_long flags;
spin_lock_irqsave(&cs->lock, flags);
if (test_bit(FLG_DBUSY_TIMER, &cs->HW_Flags)) {
rbch = cs->readW6692(cs, W_D_RBCH);
star = cs->readW6692(cs, W_D_STAR);
if (cs->debug)
debugl1(cs, "D-Channel Busy D_RBCH %02x D_STAR %02x",
rbch, star);
if (star & W_D_STAR_XBZ) { /* D-Channel Busy */
test_and_set_bit(FLG_L1_DBUSY, &cs->HW_Flags);
stptr = cs->stlist;
while (stptr != NULL) {
stptr->l1.l1l2(stptr, PH_PAUSE | INDICATION, NULL);
stptr = stptr->next;
}
} else {
/* discard frame; reset transceiver */
test_and_clear_bit(FLG_DBUSY_TIMER, &cs->HW_Flags);
if (cs->tx_skb) {
dev_kfree_skb_any(cs->tx_skb);
cs->tx_cnt = 0;
cs->tx_skb = NULL;
} else {
printk(KERN_WARNING "HiSax: W6692 D-Channel Busy no skb\n");
debugl1(cs, "D-Channel Busy no skb");
}
cs->writeW6692(cs, W_D_CMDR, W_D_CMDR_XRST); /* Transmitter reset */
spin_unlock_irqrestore(&cs->lock, flags);
cs->irq_func(cs->irq, cs);
return;
}
}
spin_unlock_irqrestore(&cs->lock, flags);
}
static void
W6692Bmode(struct BCState *bcs, int mode, int bchan)
{
struct IsdnCardState *cs = bcs->cs;
if (cs->debug & L1_DEB_HSCX)
debugl1(cs, "w6692 %c mode %d ichan %d",
'1' + bchan, mode, bchan);
bcs->mode = mode;
bcs->channel = bchan;
bcs->hw.w6692.bchan = bchan;
switch (mode) {
case (L1_MODE_NULL):
cs->BC_Write_Reg(cs, bchan, W_B_MODE, 0);
break;
case (L1_MODE_TRANS):
cs->BC_Write_Reg(cs, bchan, W_B_MODE, W_B_MODE_MMS);
break;
case (L1_MODE_HDLC):
cs->BC_Write_Reg(cs, bchan, W_B_MODE, W_B_MODE_ITF);
cs->BC_Write_Reg(cs, bchan, W_B_ADM1, 0xff);
cs->BC_Write_Reg(cs, bchan, W_B_ADM2, 0xff);
break;
}
if (mode)
cs->BC_Write_Reg(cs, bchan, W_B_CMDR, W_B_CMDR_RRST |
W_B_CMDR_RACT | W_B_CMDR_XRST);
cs->BC_Write_Reg(cs, bchan, W_B_EXIM, 0x00);
}
static void
W6692_l2l1(struct PStack *st, int pr, void *arg)
{
struct sk_buff *skb = arg;
struct BCState *bcs = st->l1.bcs;
u_long flags;
switch (pr) {
case (PH_DATA | REQUEST):
spin_lock_irqsave(&bcs->cs->lock, flags);
if (bcs->tx_skb) {
skb_queue_tail(&bcs->squeue, skb);
} else {
bcs->tx_skb = skb;
test_and_set_bit(BC_FLG_BUSY, &bcs->Flag);
bcs->hw.w6692.count = 0;
bcs->cs->BC_Send_Data(bcs);
}
spin_unlock_irqrestore(&bcs->cs->lock, flags);
break;
case (PH_PULL | INDICATION):
if (bcs->tx_skb) {
printk(KERN_WARNING "W6692_l2l1: this shouldn't happen\n");
break;
}
spin_lock_irqsave(&bcs->cs->lock, flags);
test_and_set_bit(BC_FLG_BUSY, &bcs->Flag);
bcs->tx_skb = skb;
bcs->hw.w6692.count = 0;
bcs->cs->BC_Send_Data(bcs);
spin_unlock_irqrestore(&bcs->cs->lock, flags);
break;
case (PH_PULL | REQUEST):
if (!bcs->tx_skb) {
test_and_clear_bit(FLG_L1_PULL_REQ, &st->l1.Flags);
st->l1.l1l2(st, PH_PULL | CONFIRM, NULL);
} else
test_and_set_bit(FLG_L1_PULL_REQ, &st->l1.Flags);
break;
case (PH_ACTIVATE | REQUEST):
spin_lock_irqsave(&bcs->cs->lock, flags);
test_and_set_bit(BC_FLG_ACTIV, &bcs->Flag);
W6692Bmode(bcs, st->l1.mode, st->l1.bc);
spin_unlock_irqrestore(&bcs->cs->lock, flags);
l1_msg_b(st, pr, arg);
break;
case (PH_DEACTIVATE | REQUEST):
l1_msg_b(st, pr, arg);
break;
case (PH_DEACTIVATE | CONFIRM):
spin_lock_irqsave(&bcs->cs->lock, flags);
test_and_clear_bit(BC_FLG_ACTIV, &bcs->Flag);
test_and_clear_bit(BC_FLG_BUSY, &bcs->Flag);
W6692Bmode(bcs, 0, st->l1.bc);
spin_unlock_irqrestore(&bcs->cs->lock, flags);
st->l1.l1l2(st, PH_DEACTIVATE | CONFIRM, NULL);
break;
}
}
static void
close_w6692state(struct BCState *bcs)
{
W6692Bmode(bcs, 0, bcs->channel);
if (test_and_clear_bit(BC_FLG_INIT, &bcs->Flag)) {
kfree(bcs->hw.w6692.rcvbuf);
bcs->hw.w6692.rcvbuf = NULL;
kfree(bcs->blog);
bcs->blog = NULL;
skb_queue_purge(&bcs->rqueue);
skb_queue_purge(&bcs->squeue);
if (bcs->tx_skb) {
dev_kfree_skb_any(bcs->tx_skb);
bcs->tx_skb = NULL;
test_and_clear_bit(BC_FLG_BUSY, &bcs->Flag);
}
}
}
static int
open_w6692state(struct IsdnCardState *cs, struct BCState *bcs)
{
if (!test_and_set_bit(BC_FLG_INIT, &bcs->Flag)) {
if (!(bcs->hw.w6692.rcvbuf = kmalloc(HSCX_BUFMAX, GFP_ATOMIC))) {
printk(KERN_WARNING
"HiSax: No memory for w6692.rcvbuf\n");
test_and_clear_bit(BC_FLG_INIT, &bcs->Flag);
return (1);
}
if (!(bcs->blog = kmalloc(MAX_BLOG_SPACE, GFP_ATOMIC))) {
printk(KERN_WARNING
"HiSax: No memory for bcs->blog\n");
test_and_clear_bit(BC_FLG_INIT, &bcs->Flag);
kfree(bcs->hw.w6692.rcvbuf);
bcs->hw.w6692.rcvbuf = NULL;
return (2);
}
skb_queue_head_init(&bcs->rqueue);
skb_queue_head_init(&bcs->squeue);
}
bcs->tx_skb = NULL;
test_and_clear_bit(BC_FLG_BUSY, &bcs->Flag);
bcs->event = 0;
bcs->hw.w6692.rcvidx = 0;
bcs->tx_cnt = 0;
return (0);
}
static int
setstack_w6692(struct PStack *st, struct BCState *bcs)
{
bcs->channel = st->l1.bc;
if (open_w6692state(st->l1.hardware, bcs))
return (-1);
st->l1.bcs = bcs;
st->l2.l2l1 = W6692_l2l1;
setstack_manager(st);
bcs->st = st;
setstack_l1_B(st);
return (0);
}
static void resetW6692(struct IsdnCardState *cs)
{
cs->writeW6692(cs, W_D_CTL, W_D_CTL_SRST);
mdelay(10);
cs->writeW6692(cs, W_D_CTL, 0x00);
mdelay(10);
cs->writeW6692(cs, W_IMASK, 0xff);
cs->writeW6692(cs, W_D_SAM, 0xff);
cs->writeW6692(cs, W_D_TAM, 0xff);
cs->writeW6692(cs, W_D_EXIM, 0x00);
cs->writeW6692(cs, W_D_MODE, W_D_MODE_RACT);
cs->writeW6692(cs, W_IMASK, 0x18);
if (cs->subtyp == W6692_USR) {
/* seems that USR implemented some power control features
* Pin 79 is connected to the oscilator circuit so we
* have to handle it here
*/
cs->writeW6692(cs, W_PCTL, 0x80);
cs->writeW6692(cs, W_XDATA, 0x00);
}
}
static void initW6692(struct IsdnCardState *cs, int part)
{
if (part & 1) {
cs->setstack_d = setstack_W6692;
cs->DC_Close = DC_Close_W6692;
cs->dbusytimer.function = (void *) dbusy_timer_handler;
cs->dbusytimer.data = (long) cs;
init_timer(&cs->dbusytimer);
resetW6692(cs);
ph_command(cs, W_L1CMD_RST);
cs->dc.w6692.ph_state = W_L1CMD_RST;
W6692_new_ph(cs);
ph_command(cs, W_L1CMD_ECK);
cs->bcs[0].BC_SetStack = setstack_w6692;
cs->bcs[1].BC_SetStack = setstack_w6692;
cs->bcs[0].BC_Close = close_w6692state;
cs->bcs[1].BC_Close = close_w6692state;
W6692Bmode(cs->bcs, 0, 0);
W6692Bmode(cs->bcs + 1, 0, 0);
}
if (part & 2) {
/* Reenable all IRQ */
cs->writeW6692(cs, W_IMASK, 0x18);
cs->writeW6692(cs, W_D_EXIM, 0x00);
cs->BC_Write_Reg(cs, 0, W_B_EXIM, 0x00);
cs->BC_Write_Reg(cs, 1, W_B_EXIM, 0x00);
/* Reset D-chan receiver and transmitter */
cs->writeW6692(cs, W_D_CMDR, W_D_CMDR_RRST | W_D_CMDR_XRST);
}
}
/* Interface functions */
static u_char
ReadW6692(struct IsdnCardState *cs, u_char offset)
{
return (inb(cs->hw.w6692.iobase + offset));
}
static void
WriteW6692(struct IsdnCardState *cs, u_char offset, u_char value)
{
outb(value, cs->hw.w6692.iobase + offset);
}
static void
ReadISACfifo(struct IsdnCardState *cs, u_char *data, int size)
{
insb(cs->hw.w6692.iobase + W_D_RFIFO, data, size);
}
static void
WriteISACfifo(struct IsdnCardState *cs, u_char *data, int size)
{
outsb(cs->hw.w6692.iobase + W_D_XFIFO, data, size);
}
static u_char
ReadW6692B(struct IsdnCardState *cs, int bchan, u_char offset)
{
return (inb(cs->hw.w6692.iobase + (bchan ? 0x40 : 0) + offset));
}
static void
WriteW6692B(struct IsdnCardState *cs, int bchan, u_char offset, u_char value)
{
outb(value, cs->hw.w6692.iobase + (bchan ? 0x40 : 0) + offset);
}
static int
w6692_card_msg(struct IsdnCardState *cs, int mt, void *arg)
{
switch (mt) {
case CARD_RESET:
resetW6692(cs);
return (0);
case CARD_RELEASE:
cs->writeW6692(cs, W_IMASK, 0xff);
release_region(cs->hw.w6692.iobase, 256);
if (cs->subtyp == W6692_USR) {
cs->writeW6692(cs, W_XDATA, 0x04);
}
return (0);
case CARD_INIT:
initW6692(cs, 3);
return (0);
case CARD_TEST:
return (0);
}
return (0);
}
static int id_idx;
static struct pci_dev *dev_w6692 = NULL;
int setup_w6692(struct IsdnCard *card)
{
struct IsdnCardState *cs = card->cs;
char tmp[64];
u_char found = 0;
u_char pci_irq = 0;
u_int pci_ioaddr = 0;
strcpy(tmp, w6692_revision);
printk(KERN_INFO "HiSax: W6692 driver Rev. %s\n", HiSax_getrev(tmp));
if (cs->typ != ISDN_CTYPE_W6692)
return (0);
while (id_list[id_idx].vendor_id) {
dev_w6692 = hisax_find_pci_device(id_list[id_idx].vendor_id,
id_list[id_idx].device_id,
dev_w6692);
if (dev_w6692) {
if (pci_enable_device(dev_w6692))
continue;
cs->subtyp = id_idx;
break;
}
id_idx++;
}
if (dev_w6692) {
found = 1;
pci_irq = dev_w6692->irq;
/* I think address 0 is allways the configuration area */
/* and address 1 is the real IO space KKe 03.09.99 */
pci_ioaddr = pci_resource_start(dev_w6692, 1);
/* USR ISDN PCI card TA need some special handling */
if (cs->subtyp == W6692_WINBOND) {
if ((W6692_SV_USR == dev_w6692->subsystem_vendor) &&
(W6692_SD_USR == dev_w6692->subsystem_device)) {
cs->subtyp = W6692_USR;
}
}
}
if (!found) {
printk(KERN_WARNING "W6692: No PCI card found\n");
return (0);
}
cs->irq = pci_irq;
if (!cs->irq) {
printk(KERN_WARNING "W6692: No IRQ for PCI card found\n");
return (0);
}
if (!pci_ioaddr) {
printk(KERN_WARNING "W6692: NO I/O Base Address found\n");
return (0);
}
cs->hw.w6692.iobase = pci_ioaddr;
printk(KERN_INFO "Found: %s %s, I/O base: 0x%x, irq: %d\n",
id_list[cs->subtyp].vendor_name, id_list[cs->subtyp].card_name,
pci_ioaddr, pci_irq);
if (!request_region(cs->hw.w6692.iobase, 256, id_list[cs->subtyp].card_name)) {
printk(KERN_WARNING
"HiSax: %s I/O ports %x-%x already in use\n",
id_list[cs->subtyp].card_name,
cs->hw.w6692.iobase,
cs->hw.w6692.iobase + 255);
return (0);
}
printk(KERN_INFO
"HiSax: %s config irq:%d I/O:%x\n",
id_list[cs->subtyp].card_name, cs->irq,
cs->hw.w6692.iobase);
INIT_WORK(&cs->tqueue, W6692_bh);
cs->readW6692 = &ReadW6692;
cs->writeW6692 = &WriteW6692;
cs->readisacfifo = &ReadISACfifo;
cs->writeisacfifo = &WriteISACfifo;
cs->BC_Read_Reg = &ReadW6692B;
cs->BC_Write_Reg = &WriteW6692B;
cs->BC_Send_Data = &W6692B_fill_fifo;
cs->cardmsg = &w6692_card_msg;
cs->irq_func = &W6692_interrupt;
cs->irq_flags |= IRQF_SHARED;
W6692Version(cs, "W6692:");
printk(KERN_INFO "W6692 ISTA=0x%X\n", ReadW6692(cs, W_ISTA));
printk(KERN_INFO "W6692 IMASK=0x%X\n", ReadW6692(cs, W_IMASK));
printk(KERN_INFO "W6692 D_EXIR=0x%X\n", ReadW6692(cs, W_D_EXIR));
printk(KERN_INFO "W6692 D_EXIM=0x%X\n", ReadW6692(cs, W_D_EXIM));
printk(KERN_INFO "W6692 D_RSTA=0x%X\n", ReadW6692(cs, W_D_RSTA));
return (1);
}
| gpl-2.0 |
danshuk/kernel-3.0.4-mini2440 | drivers/xen/xenfs/super.c | 3249 | 3041 | /*
* xenfs.c - a filesystem for passing info between the a domain and
* the hypervisor.
*
* 2008-10-07 Alex Zeffertt Replaced /proc/xen/xenbus with xenfs filesystem
* and /proc/xen compatibility mount point.
* Turned xenfs into a loadable module.
*/
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/module.h>
#include <linux/fs.h>
#include <linux/magic.h>
#include <xen/xen.h>
#include "xenfs.h"
#include <asm/xen/hypervisor.h>
MODULE_DESCRIPTION("Xen filesystem");
MODULE_LICENSE("GPL");
static struct inode *xenfs_make_inode(struct super_block *sb, int mode)
{
struct inode *ret = new_inode(sb);
if (ret) {
ret->i_mode = mode;
ret->i_uid = ret->i_gid = 0;
ret->i_blocks = 0;
ret->i_atime = ret->i_mtime = ret->i_ctime = CURRENT_TIME;
}
return ret;
}
static struct dentry *xenfs_create_file(struct super_block *sb,
struct dentry *parent,
const char *name,
const struct file_operations *fops,
void *data,
int mode)
{
struct dentry *dentry;
struct inode *inode;
dentry = d_alloc_name(parent, name);
if (!dentry)
return NULL;
inode = xenfs_make_inode(sb, S_IFREG | mode);
if (!inode) {
dput(dentry);
return NULL;
}
inode->i_fop = fops;
inode->i_private = data;
d_add(dentry, inode);
return dentry;
}
static ssize_t capabilities_read(struct file *file, char __user *buf,
size_t size, loff_t *off)
{
char *tmp = "";
if (xen_initial_domain())
tmp = "control_d\n";
return simple_read_from_buffer(buf, size, off, tmp, strlen(tmp));
}
static const struct file_operations capabilities_file_ops = {
.read = capabilities_read,
.llseek = default_llseek,
};
static int xenfs_fill_super(struct super_block *sb, void *data, int silent)
{
static struct tree_descr xenfs_files[] = {
[1] = {},
{ "xenbus", &xenbus_file_ops, S_IRUSR|S_IWUSR },
{ "capabilities", &capabilities_file_ops, S_IRUGO },
{ "privcmd", &privcmd_file_ops, S_IRUSR|S_IWUSR },
{""},
};
int rc;
rc = simple_fill_super(sb, XENFS_SUPER_MAGIC, xenfs_files);
if (rc < 0)
return rc;
if (xen_initial_domain()) {
xenfs_create_file(sb, sb->s_root, "xsd_kva",
&xsd_kva_file_ops, NULL, S_IRUSR|S_IWUSR);
xenfs_create_file(sb, sb->s_root, "xsd_port",
&xsd_port_file_ops, NULL, S_IRUSR|S_IWUSR);
}
return rc;
}
static struct dentry *xenfs_mount(struct file_system_type *fs_type,
int flags, const char *dev_name,
void *data)
{
return mount_single(fs_type, flags, data, xenfs_fill_super);
}
static struct file_system_type xenfs_type = {
.owner = THIS_MODULE,
.name = "xenfs",
.mount = xenfs_mount,
.kill_sb = kill_litter_super,
};
static int __init xenfs_init(void)
{
if (xen_domain())
return register_filesystem(&xenfs_type);
printk(KERN_INFO "XENFS: not registering filesystem on non-xen platform\n");
return 0;
}
static void __exit xenfs_exit(void)
{
if (xen_domain())
unregister_filesystem(&xenfs_type);
}
module_init(xenfs_init);
module_exit(xenfs_exit);
| gpl-2.0 |
leyarx/android_kernel_wexler_qc750 | drivers/mtd/chips/map_rom.c | 3505 | 2830 | /*
* Common code to handle map devices which are simple ROM
* (C) 2000 Red Hat. GPL'd.
*/
#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <asm/io.h>
#include <asm/byteorder.h>
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/map.h>
static int maprom_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
static int maprom_write (struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
static void maprom_nop (struct mtd_info *);
static struct mtd_info *map_rom_probe(struct map_info *map);
static int maprom_erase (struct mtd_info *mtd, struct erase_info *info);
static unsigned long maprom_unmapped_area(struct mtd_info *, unsigned long,
unsigned long, unsigned long);
static struct mtd_chip_driver maprom_chipdrv = {
.probe = map_rom_probe,
.name = "map_rom",
.module = THIS_MODULE
};
static struct mtd_info *map_rom_probe(struct map_info *map)
{
struct mtd_info *mtd;
mtd = kzalloc(sizeof(*mtd), GFP_KERNEL);
if (!mtd)
return NULL;
map->fldrv = &maprom_chipdrv;
mtd->priv = map;
mtd->name = map->name;
mtd->type = MTD_ROM;
mtd->size = map->size;
mtd->get_unmapped_area = maprom_unmapped_area;
mtd->read = maprom_read;
mtd->write = maprom_write;
mtd->sync = maprom_nop;
mtd->erase = maprom_erase;
mtd->flags = MTD_CAP_ROM;
mtd->erasesize = map->size;
mtd->writesize = 1;
__module_get(THIS_MODULE);
return mtd;
}
/*
* Allow NOMMU mmap() to directly map the device (if not NULL)
* - return the address to which the offset maps
* - return -ENOSYS to indicate refusal to do the mapping
*/
static unsigned long maprom_unmapped_area(struct mtd_info *mtd,
unsigned long len,
unsigned long offset,
unsigned long flags)
{
struct map_info *map = mtd->priv;
return (unsigned long) map->virt + offset;
}
static int maprom_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
{
struct map_info *map = mtd->priv;
map_copy_from(map, buf, from, len);
*retlen = len;
return 0;
}
static void maprom_nop(struct mtd_info *mtd)
{
/* Nothing to see here */
}
static int maprom_write (struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen, const u_char *buf)
{
printk(KERN_NOTICE "maprom_write called\n");
return -EIO;
}
static int maprom_erase (struct mtd_info *mtd, struct erase_info *info)
{
/* We do our best 8) */
return -EROFS;
}
static int __init map_rom_init(void)
{
register_mtd_chip_driver(&maprom_chipdrv);
return 0;
}
static void __exit map_rom_exit(void)
{
unregister_mtd_chip_driver(&maprom_chipdrv);
}
module_init(map_rom_init);
module_exit(map_rom_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
MODULE_DESCRIPTION("MTD chip driver for ROM chips");
| gpl-2.0 |
Renzo-Olivares/android_kernel_htc_vigor | arch/powerpc/kernel/sys_ppc32.c | 4529 | 20487 | /*
* sys_ppc32.c: Conversion between 32bit and 64bit native syscalls.
*
* Copyright (C) 2001 IBM
* Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
* Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
*
* These routines maintain argument size conversion between 32bit and 64bit
* environment.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/file.h>
#include <linux/signal.h>
#include <linux/resource.h>
#include <linux/times.h>
#include <linux/smp.h>
#include <linux/sem.h>
#include <linux/msg.h>
#include <linux/shm.h>
#include <linux/poll.h>
#include <linux/personality.h>
#include <linux/stat.h>
#include <linux/mman.h>
#include <linux/in.h>
#include <linux/syscalls.h>
#include <linux/unistd.h>
#include <linux/sysctl.h>
#include <linux/binfmts.h>
#include <linux/security.h>
#include <linux/compat.h>
#include <linux/ptrace.h>
#include <linux/elf.h>
#include <linux/ipc.h>
#include <linux/slab.h>
#include <asm/ptrace.h>
#include <asm/types.h>
#include <asm/uaccess.h>
#include <asm/unistd.h>
#include <asm/time.h>
#include <asm/mmu_context.h>
#include <asm/ppc-pci.h>
#include <asm/syscalls.h>
#include <asm/switch_to.h>
asmlinkage long ppc32_select(u32 n, compat_ulong_t __user *inp,
compat_ulong_t __user *outp, compat_ulong_t __user *exp,
compat_uptr_t tvp_x)
{
/* sign extend n */
return compat_sys_select((int)n, inp, outp, exp, compat_ptr(tvp_x));
}
/* Note: it is necessary to treat option as an unsigned int,
* with the corresponding cast to a signed int to insure that the
* proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
* and the register representation of a signed int (msr in 64-bit mode) is performed.
*/
asmlinkage long compat_sys_sysfs(u32 option, u32 arg1, u32 arg2)
{
return sys_sysfs((int)option, arg1, arg2);
}
#ifdef CONFIG_SYSVIPC
long compat_sys_ipc(u32 call, u32 first, u32 second, u32 third, compat_uptr_t ptr,
u32 fifth)
{
int version;
version = call >> 16; /* hack for backward compatibility */
call &= 0xffff;
switch (call) {
case SEMTIMEDOP:
if (fifth)
/* sign extend semid */
return compat_sys_semtimedop((int)first,
compat_ptr(ptr), second,
compat_ptr(fifth));
/* else fall through for normal semop() */
case SEMOP:
/* struct sembuf is the same on 32 and 64bit :)) */
/* sign extend semid */
return sys_semtimedop((int)first, compat_ptr(ptr), second,
NULL);
case SEMGET:
/* sign extend key, nsems */
return sys_semget((int)first, (int)second, third);
case SEMCTL:
/* sign extend semid, semnum */
return compat_sys_semctl((int)first, (int)second, third,
compat_ptr(ptr));
case MSGSND:
/* sign extend msqid */
return compat_sys_msgsnd((int)first, (int)second, third,
compat_ptr(ptr));
case MSGRCV:
/* sign extend msqid, msgtyp */
return compat_sys_msgrcv((int)first, second, (int)fifth,
third, version, compat_ptr(ptr));
case MSGGET:
/* sign extend key */
return sys_msgget((int)first, second);
case MSGCTL:
/* sign extend msqid */
return compat_sys_msgctl((int)first, second, compat_ptr(ptr));
case SHMAT:
/* sign extend shmid */
return compat_sys_shmat((int)first, second, third, version,
compat_ptr(ptr));
case SHMDT:
return sys_shmdt(compat_ptr(ptr));
case SHMGET:
/* sign extend key_t */
return sys_shmget((int)first, second, third);
case SHMCTL:
/* sign extend shmid */
return compat_sys_shmctl((int)first, second, compat_ptr(ptr));
default:
return -ENOSYS;
}
return -ENOSYS;
}
#endif
/* Note: it is necessary to treat out_fd and in_fd as unsigned ints,
* with the corresponding cast to a signed int to insure that the
* proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
* and the register representation of a signed int (msr in 64-bit mode) is performed.
*/
asmlinkage long compat_sys_sendfile(u32 out_fd, u32 in_fd, compat_off_t __user * offset, u32 count)
{
mm_segment_t old_fs = get_fs();
int ret;
off_t of;
off_t __user *up;
if (offset && get_user(of, offset))
return -EFAULT;
/* The __user pointer cast is valid because of the set_fs() */
set_fs(KERNEL_DS);
up = offset ? (off_t __user *) &of : NULL;
ret = sys_sendfile((int)out_fd, (int)in_fd, up, count);
set_fs(old_fs);
if (offset && put_user(of, offset))
return -EFAULT;
return ret;
}
asmlinkage int compat_sys_sendfile64(int out_fd, int in_fd, compat_loff_t __user *offset, s32 count)
{
mm_segment_t old_fs = get_fs();
int ret;
loff_t lof;
loff_t __user *up;
if (offset && get_user(lof, offset))
return -EFAULT;
/* The __user pointer cast is valid because of the set_fs() */
set_fs(KERNEL_DS);
up = offset ? (loff_t __user *) &lof : NULL;
ret = sys_sendfile64(out_fd, in_fd, up, count);
set_fs(old_fs);
if (offset && put_user(lof, offset))
return -EFAULT;
return ret;
}
long compat_sys_execve(unsigned long a0, unsigned long a1, unsigned long a2,
unsigned long a3, unsigned long a4, unsigned long a5,
struct pt_regs *regs)
{
int error;
char * filename;
filename = getname((char __user *) a0);
error = PTR_ERR(filename);
if (IS_ERR(filename))
goto out;
flush_fp_to_thread(current);
flush_altivec_to_thread(current);
error = compat_do_execve(filename, compat_ptr(a1), compat_ptr(a2), regs);
putname(filename);
out:
return error;
}
/* Note: it is necessary to treat option as an unsigned int,
* with the corresponding cast to a signed int to insure that the
* proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
* and the register representation of a signed int (msr in 64-bit mode) is performed.
*/
asmlinkage long compat_sys_prctl(u32 option, u32 arg2, u32 arg3, u32 arg4, u32 arg5)
{
return sys_prctl((int)option,
(unsigned long) arg2,
(unsigned long) arg3,
(unsigned long) arg4,
(unsigned long) arg5);
}
/* Note: it is necessary to treat pid as an unsigned int,
* with the corresponding cast to a signed int to insure that the
* proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
* and the register representation of a signed int (msr in 64-bit mode) is performed.
*/
asmlinkage long compat_sys_sched_rr_get_interval(u32 pid, struct compat_timespec __user *interval)
{
struct timespec t;
int ret;
mm_segment_t old_fs = get_fs ();
/* The __user pointer cast is valid because of the set_fs() */
set_fs (KERNEL_DS);
ret = sys_sched_rr_get_interval((int)pid, (struct timespec __user *) &t);
set_fs (old_fs);
if (put_compat_timespec(&t, interval))
return -EFAULT;
return ret;
}
/* Note: it is necessary to treat mode as an unsigned int,
* with the corresponding cast to a signed int to insure that the
* proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
* and the register representation of a signed int (msr in 64-bit mode) is performed.
*/
asmlinkage long compat_sys_access(const char __user * filename, u32 mode)
{
return sys_access(filename, (int)mode);
}
/* Note: it is necessary to treat mode as an unsigned int,
* with the corresponding cast to a signed int to insure that the
* proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
* and the register representation of a signed int (msr in 64-bit mode) is performed.
*/
asmlinkage long compat_sys_creat(const char __user * pathname, u32 mode)
{
return sys_creat(pathname, (int)mode);
}
/* Note: it is necessary to treat pid and options as unsigned ints,
* with the corresponding cast to a signed int to insure that the
* proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
* and the register representation of a signed int (msr in 64-bit mode) is performed.
*/
asmlinkage long compat_sys_waitpid(u32 pid, unsigned int __user * stat_addr, u32 options)
{
return sys_waitpid((int)pid, stat_addr, (int)options);
}
/* Note: it is necessary to treat gidsetsize as an unsigned int,
* with the corresponding cast to a signed int to insure that the
* proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
* and the register representation of a signed int (msr in 64-bit mode) is performed.
*/
asmlinkage long compat_sys_getgroups(u32 gidsetsize, gid_t __user *grouplist)
{
return sys_getgroups((int)gidsetsize, grouplist);
}
/* Note: it is necessary to treat pid as an unsigned int,
* with the corresponding cast to a signed int to insure that the
* proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
* and the register representation of a signed int (msr in 64-bit mode) is performed.
*/
asmlinkage long compat_sys_getpgid(u32 pid)
{
return sys_getpgid((int)pid);
}
/* Note: it is necessary to treat pid as an unsigned int,
* with the corresponding cast to a signed int to insure that the
* proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
* and the register representation of a signed int (msr in 64-bit mode) is performed.
*/
asmlinkage long compat_sys_getsid(u32 pid)
{
return sys_getsid((int)pid);
}
/* Note: it is necessary to treat pid and sig as unsigned ints,
* with the corresponding cast to a signed int to insure that the
* proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
* and the register representation of a signed int (msr in 64-bit mode) is performed.
*/
asmlinkage long compat_sys_kill(u32 pid, u32 sig)
{
return sys_kill((int)pid, (int)sig);
}
/* Note: it is necessary to treat mode as an unsigned int,
* with the corresponding cast to a signed int to insure that the
* proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
* and the register representation of a signed int (msr in 64-bit mode) is performed.
*/
asmlinkage long compat_sys_mkdir(const char __user * pathname, u32 mode)
{
return sys_mkdir(pathname, (int)mode);
}
long compat_sys_nice(u32 increment)
{
/* sign extend increment */
return sys_nice((int)increment);
}
off_t ppc32_lseek(unsigned int fd, u32 offset, unsigned int origin)
{
/* sign extend n */
return sys_lseek(fd, (int)offset, origin);
}
long compat_sys_truncate(const char __user * path, u32 length)
{
/* sign extend length */
return sys_truncate(path, (int)length);
}
long compat_sys_ftruncate(int fd, u32 length)
{
/* sign extend length */
return sys_ftruncate(fd, (int)length);
}
/* Note: it is necessary to treat bufsiz as an unsigned int,
* with the corresponding cast to a signed int to insure that the
* proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
* and the register representation of a signed int (msr in 64-bit mode) is performed.
*/
asmlinkage long compat_sys_readlink(const char __user * path, char __user * buf, u32 bufsiz)
{
return sys_readlink(path, buf, (int)bufsiz);
}
/* Note: it is necessary to treat option as an unsigned int,
* with the corresponding cast to a signed int to insure that the
* proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
* and the register representation of a signed int (msr in 64-bit mode) is performed.
*/
asmlinkage long compat_sys_sched_get_priority_max(u32 policy)
{
return sys_sched_get_priority_max((int)policy);
}
/* Note: it is necessary to treat policy as an unsigned int,
* with the corresponding cast to a signed int to insure that the
* proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
* and the register representation of a signed int (msr in 64-bit mode) is performed.
*/
asmlinkage long compat_sys_sched_get_priority_min(u32 policy)
{
return sys_sched_get_priority_min((int)policy);
}
/* Note: it is necessary to treat pid as an unsigned int,
* with the corresponding cast to a signed int to insure that the
* proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
* and the register representation of a signed int (msr in 64-bit mode) is performed.
*/
asmlinkage long compat_sys_sched_getparam(u32 pid, struct sched_param __user *param)
{
return sys_sched_getparam((int)pid, param);
}
/* Note: it is necessary to treat pid as an unsigned int,
* with the corresponding cast to a signed int to insure that the
* proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
* and the register representation of a signed int (msr in 64-bit mode) is performed.
*/
asmlinkage long compat_sys_sched_getscheduler(u32 pid)
{
return sys_sched_getscheduler((int)pid);
}
/* Note: it is necessary to treat pid as an unsigned int,
* with the corresponding cast to a signed int to insure that the
* proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
* and the register representation of a signed int (msr in 64-bit mode) is performed.
*/
asmlinkage long compat_sys_sched_setparam(u32 pid, struct sched_param __user *param)
{
return sys_sched_setparam((int)pid, param);
}
/* Note: it is necessary to treat pid and policy as unsigned ints,
* with the corresponding cast to a signed int to insure that the
* proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
* and the register representation of a signed int (msr in 64-bit mode) is performed.
*/
asmlinkage long compat_sys_sched_setscheduler(u32 pid, u32 policy, struct sched_param __user *param)
{
return sys_sched_setscheduler((int)pid, (int)policy, param);
}
/* Note: it is necessary to treat len as an unsigned int,
* with the corresponding cast to a signed int to insure that the
* proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
* and the register representation of a signed int (msr in 64-bit mode) is performed.
*/
asmlinkage long compat_sys_setdomainname(char __user *name, u32 len)
{
return sys_setdomainname(name, (int)len);
}
/* Note: it is necessary to treat gidsetsize as an unsigned int,
* with the corresponding cast to a signed int to insure that the
* proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
* and the register representation of a signed int (msr in 64-bit mode) is performed.
*/
asmlinkage long compat_sys_setgroups(u32 gidsetsize, gid_t __user *grouplist)
{
return sys_setgroups((int)gidsetsize, grouplist);
}
asmlinkage long compat_sys_sethostname(char __user *name, u32 len)
{
/* sign extend len */
return sys_sethostname(name, (int)len);
}
/* Note: it is necessary to treat pid and pgid as unsigned ints,
* with the corresponding cast to a signed int to insure that the
* proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
* and the register representation of a signed int (msr in 64-bit mode) is performed.
*/
asmlinkage long compat_sys_setpgid(u32 pid, u32 pgid)
{
return sys_setpgid((int)pid, (int)pgid);
}
long compat_sys_getpriority(u32 which, u32 who)
{
/* sign extend which and who */
return sys_getpriority((int)which, (int)who);
}
long compat_sys_setpriority(u32 which, u32 who, u32 niceval)
{
/* sign extend which, who and niceval */
return sys_setpriority((int)which, (int)who, (int)niceval);
}
long compat_sys_ioprio_get(u32 which, u32 who)
{
/* sign extend which and who */
return sys_ioprio_get((int)which, (int)who);
}
long compat_sys_ioprio_set(u32 which, u32 who, u32 ioprio)
{
/* sign extend which, who and ioprio */
return sys_ioprio_set((int)which, (int)who, (int)ioprio);
}
/* Note: it is necessary to treat newmask as an unsigned int,
* with the corresponding cast to a signed int to insure that the
* proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
* and the register representation of a signed int (msr in 64-bit mode) is performed.
*/
asmlinkage long compat_sys_ssetmask(u32 newmask)
{
return sys_ssetmask((int) newmask);
}
asmlinkage long compat_sys_syslog(u32 type, char __user * buf, u32 len)
{
/* sign extend len */
return sys_syslog(type, buf, (int)len);
}
/* Note: it is necessary to treat mask as an unsigned int,
* with the corresponding cast to a signed int to insure that the
* proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
* and the register representation of a signed int (msr in 64-bit mode) is performed.
*/
asmlinkage long compat_sys_umask(u32 mask)
{
return sys_umask((int)mask);
}
unsigned long compat_sys_mmap2(unsigned long addr, size_t len,
unsigned long prot, unsigned long flags,
unsigned long fd, unsigned long pgoff)
{
/* This should remain 12 even if PAGE_SIZE changes */
return sys_mmap(addr, len, prot, flags, fd, pgoff << 12);
}
long compat_sys_tgkill(u32 tgid, u32 pid, int sig)
{
/* sign extend tgid, pid */
return sys_tgkill((int)tgid, (int)pid, sig);
}
/*
* long long munging:
* The 32 bit ABI passes long longs in an odd even register pair.
*/
compat_ssize_t compat_sys_pread64(unsigned int fd, char __user *ubuf, compat_size_t count,
u32 reg6, u32 poshi, u32 poslo)
{
return sys_pread64(fd, ubuf, count, ((loff_t)poshi << 32) | poslo);
}
compat_ssize_t compat_sys_pwrite64(unsigned int fd, const char __user *ubuf, compat_size_t count,
u32 reg6, u32 poshi, u32 poslo)
{
return sys_pwrite64(fd, ubuf, count, ((loff_t)poshi << 32) | poslo);
}
compat_ssize_t compat_sys_readahead(int fd, u32 r4, u32 offhi, u32 offlo, u32 count)
{
return sys_readahead(fd, ((loff_t)offhi << 32) | offlo, count);
}
asmlinkage int compat_sys_truncate64(const char __user * path, u32 reg4,
unsigned long high, unsigned long low)
{
return sys_truncate(path, (high << 32) | low);
}
asmlinkage long compat_sys_fallocate(int fd, int mode, u32 offhi, u32 offlo,
u32 lenhi, u32 lenlo)
{
return sys_fallocate(fd, mode, ((loff_t)offhi << 32) | offlo,
((loff_t)lenhi << 32) | lenlo);
}
asmlinkage int compat_sys_ftruncate64(unsigned int fd, u32 reg4, unsigned long high,
unsigned long low)
{
return sys_ftruncate(fd, (high << 32) | low);
}
long ppc32_lookup_dcookie(u32 cookie_high, u32 cookie_low, char __user *buf,
size_t len)
{
return sys_lookup_dcookie((u64)cookie_high << 32 | cookie_low,
buf, len);
}
long ppc32_fadvise64(int fd, u32 unused, u32 offset_high, u32 offset_low,
size_t len, int advice)
{
return sys_fadvise64(fd, (u64)offset_high << 32 | offset_low, len,
advice);
}
asmlinkage long compat_sys_add_key(const char __user *_type,
const char __user *_description,
const void __user *_payload,
u32 plen,
u32 ringid)
{
return sys_add_key(_type, _description, _payload, plen, ringid);
}
asmlinkage long compat_sys_request_key(const char __user *_type,
const char __user *_description,
const char __user *_callout_info,
u32 destringid)
{
return sys_request_key(_type, _description, _callout_info, destringid);
}
asmlinkage long compat_sys_sync_file_range2(int fd, unsigned int flags,
unsigned offset_hi, unsigned offset_lo,
unsigned nbytes_hi, unsigned nbytes_lo)
{
loff_t offset = ((loff_t)offset_hi << 32) | offset_lo;
loff_t nbytes = ((loff_t)nbytes_hi << 32) | nbytes_lo;
return sys_sync_file_range(fd, offset, nbytes, flags);
}
asmlinkage long compat_sys_fanotify_mark(int fanotify_fd, unsigned int flags,
unsigned mask_hi, unsigned mask_lo,
int dfd, const char __user *pathname)
{
u64 mask = ((u64)mask_hi << 32) | mask_lo;
return sys_fanotify_mark(fanotify_fd, flags, mask, dfd, pathname);
}
| gpl-2.0 |
spock1104/android_kernel_zte_msm8930 | drivers/spi/spi-ep93xx.c | 4785 | 32107 | /*
* Driver for Cirrus Logic EP93xx SPI controller.
*
* Copyright (C) 2010-2011 Mika Westerberg
*
* Explicit FIFO handling code was inspired by amba-pl022 driver.
*
* Chip select support using other than built-in GPIOs by H. Hartley Sweeten.
*
* For more information about the SPI controller see documentation on Cirrus
* Logic web site:
* http://www.cirrus.com/en/pubs/manual/EP93xx_Users_Guide_UM1.pdf
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/io.h>
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/dmaengine.h>
#include <linux/bitops.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/workqueue.h>
#include <linux/sched.h>
#include <linux/scatterlist.h>
#include <linux/spi/spi.h>
#include <mach/dma.h>
#include <mach/ep93xx_spi.h>
#define SSPCR0 0x0000
#define SSPCR0_MODE_SHIFT 6
#define SSPCR0_SCR_SHIFT 8
#define SSPCR1 0x0004
#define SSPCR1_RIE BIT(0)
#define SSPCR1_TIE BIT(1)
#define SSPCR1_RORIE BIT(2)
#define SSPCR1_LBM BIT(3)
#define SSPCR1_SSE BIT(4)
#define SSPCR1_MS BIT(5)
#define SSPCR1_SOD BIT(6)
#define SSPDR 0x0008
#define SSPSR 0x000c
#define SSPSR_TFE BIT(0)
#define SSPSR_TNF BIT(1)
#define SSPSR_RNE BIT(2)
#define SSPSR_RFF BIT(3)
#define SSPSR_BSY BIT(4)
#define SSPCPSR 0x0010
#define SSPIIR 0x0014
#define SSPIIR_RIS BIT(0)
#define SSPIIR_TIS BIT(1)
#define SSPIIR_RORIS BIT(2)
#define SSPICR SSPIIR
/* timeout in milliseconds */
#define SPI_TIMEOUT 5
/* maximum depth of RX/TX FIFO */
#define SPI_FIFO_SIZE 8
/**
* struct ep93xx_spi - EP93xx SPI controller structure
* @lock: spinlock that protects concurrent accesses to fields @running,
* @current_msg and @msg_queue
* @pdev: pointer to platform device
* @clk: clock for the controller
* @regs_base: pointer to ioremap()'d registers
* @sspdr_phys: physical address of the SSPDR register
* @irq: IRQ number used by the driver
* @min_rate: minimum clock rate (in Hz) supported by the controller
* @max_rate: maximum clock rate (in Hz) supported by the controller
* @running: is the queue running
* @wq: workqueue used by the driver
* @msg_work: work that is queued for the driver
* @wait: wait here until given transfer is completed
* @msg_queue: queue for the messages
* @current_msg: message that is currently processed (or %NULL if none)
* @tx: current byte in transfer to transmit
* @rx: current byte in transfer to receive
* @fifo_level: how full is FIFO (%0..%SPI_FIFO_SIZE - %1). Receiving one
* frame decreases this level and sending one frame increases it.
* @dma_rx: RX DMA channel
* @dma_tx: TX DMA channel
* @dma_rx_data: RX parameters passed to the DMA engine
* @dma_tx_data: TX parameters passed to the DMA engine
* @rx_sgt: sg table for RX transfers
* @tx_sgt: sg table for TX transfers
* @zeropage: dummy page used as RX buffer when only TX buffer is passed in by
* the client
*
* This structure holds EP93xx SPI controller specific information. When
* @running is %true, driver accepts transfer requests from protocol drivers.
* @current_msg is used to hold pointer to the message that is currently
* processed. If @current_msg is %NULL, it means that no processing is going
* on.
*
* Most of the fields are only written once and they can be accessed without
* taking the @lock. Fields that are accessed concurrently are: @current_msg,
* @running, and @msg_queue.
*/
struct ep93xx_spi {
spinlock_t lock;
const struct platform_device *pdev;
struct clk *clk;
void __iomem *regs_base;
unsigned long sspdr_phys;
int irq;
unsigned long min_rate;
unsigned long max_rate;
bool running;
struct workqueue_struct *wq;
struct work_struct msg_work;
struct completion wait;
struct list_head msg_queue;
struct spi_message *current_msg;
size_t tx;
size_t rx;
size_t fifo_level;
struct dma_chan *dma_rx;
struct dma_chan *dma_tx;
struct ep93xx_dma_data dma_rx_data;
struct ep93xx_dma_data dma_tx_data;
struct sg_table rx_sgt;
struct sg_table tx_sgt;
void *zeropage;
};
/**
* struct ep93xx_spi_chip - SPI device hardware settings
* @spi: back pointer to the SPI device
* @rate: max rate in hz this chip supports
* @div_cpsr: cpsr (pre-scaler) divider
* @div_scr: scr divider
* @dss: bits per word (4 - 16 bits)
* @ops: private chip operations
*
* This structure is used to store hardware register specific settings for each
* SPI device. Settings are written to hardware by function
* ep93xx_spi_chip_setup().
*/
struct ep93xx_spi_chip {
const struct spi_device *spi;
unsigned long rate;
u8 div_cpsr;
u8 div_scr;
u8 dss;
struct ep93xx_spi_chip_ops *ops;
};
/* converts bits per word to CR0.DSS value */
#define bits_per_word_to_dss(bpw) ((bpw) - 1)
static inline void
ep93xx_spi_write_u8(const struct ep93xx_spi *espi, u16 reg, u8 value)
{
__raw_writeb(value, espi->regs_base + reg);
}
static inline u8
ep93xx_spi_read_u8(const struct ep93xx_spi *spi, u16 reg)
{
return __raw_readb(spi->regs_base + reg);
}
static inline void
ep93xx_spi_write_u16(const struct ep93xx_spi *espi, u16 reg, u16 value)
{
__raw_writew(value, espi->regs_base + reg);
}
static inline u16
ep93xx_spi_read_u16(const struct ep93xx_spi *spi, u16 reg)
{
return __raw_readw(spi->regs_base + reg);
}
static int ep93xx_spi_enable(const struct ep93xx_spi *espi)
{
u8 regval;
int err;
err = clk_enable(espi->clk);
if (err)
return err;
regval = ep93xx_spi_read_u8(espi, SSPCR1);
regval |= SSPCR1_SSE;
ep93xx_spi_write_u8(espi, SSPCR1, regval);
return 0;
}
static void ep93xx_spi_disable(const struct ep93xx_spi *espi)
{
u8 regval;
regval = ep93xx_spi_read_u8(espi, SSPCR1);
regval &= ~SSPCR1_SSE;
ep93xx_spi_write_u8(espi, SSPCR1, regval);
clk_disable(espi->clk);
}
static void ep93xx_spi_enable_interrupts(const struct ep93xx_spi *espi)
{
u8 regval;
regval = ep93xx_spi_read_u8(espi, SSPCR1);
regval |= (SSPCR1_RORIE | SSPCR1_TIE | SSPCR1_RIE);
ep93xx_spi_write_u8(espi, SSPCR1, regval);
}
static void ep93xx_spi_disable_interrupts(const struct ep93xx_spi *espi)
{
u8 regval;
regval = ep93xx_spi_read_u8(espi, SSPCR1);
regval &= ~(SSPCR1_RORIE | SSPCR1_TIE | SSPCR1_RIE);
ep93xx_spi_write_u8(espi, SSPCR1, regval);
}
/**
* ep93xx_spi_calc_divisors() - calculates SPI clock divisors
* @espi: ep93xx SPI controller struct
* @chip: divisors are calculated for this chip
* @rate: desired SPI output clock rate
*
* Function calculates cpsr (clock pre-scaler) and scr divisors based on
* given @rate and places them to @chip->div_cpsr and @chip->div_scr. If,
* for some reason, divisors cannot be calculated nothing is stored and
* %-EINVAL is returned.
*/
static int ep93xx_spi_calc_divisors(const struct ep93xx_spi *espi,
struct ep93xx_spi_chip *chip,
unsigned long rate)
{
unsigned long spi_clk_rate = clk_get_rate(espi->clk);
int cpsr, scr;
/*
* Make sure that max value is between values supported by the
* controller. Note that minimum value is already checked in
* ep93xx_spi_transfer().
*/
rate = clamp(rate, espi->min_rate, espi->max_rate);
/*
* Calculate divisors so that we can get speed according the
* following formula:
* rate = spi_clock_rate / (cpsr * (1 + scr))
*
* cpsr must be even number and starts from 2, scr can be any number
* between 0 and 255.
*/
for (cpsr = 2; cpsr <= 254; cpsr += 2) {
for (scr = 0; scr <= 255; scr++) {
if ((spi_clk_rate / (cpsr * (scr + 1))) <= rate) {
chip->div_scr = (u8)scr;
chip->div_cpsr = (u8)cpsr;
return 0;
}
}
}
return -EINVAL;
}
static void ep93xx_spi_cs_control(struct spi_device *spi, bool control)
{
struct ep93xx_spi_chip *chip = spi_get_ctldata(spi);
int value = (spi->mode & SPI_CS_HIGH) ? control : !control;
if (chip->ops && chip->ops->cs_control)
chip->ops->cs_control(spi, value);
}
/**
* ep93xx_spi_setup() - setup an SPI device
* @spi: SPI device to setup
*
* This function sets up SPI device mode, speed etc. Can be called multiple
* times for a single device. Returns %0 in case of success, negative error in
* case of failure. When this function returns success, the device is
* deselected.
*/
static int ep93xx_spi_setup(struct spi_device *spi)
{
struct ep93xx_spi *espi = spi_master_get_devdata(spi->master);
struct ep93xx_spi_chip *chip;
if (spi->bits_per_word < 4 || spi->bits_per_word > 16) {
dev_err(&espi->pdev->dev, "invalid bits per word %d\n",
spi->bits_per_word);
return -EINVAL;
}
chip = spi_get_ctldata(spi);
if (!chip) {
dev_dbg(&espi->pdev->dev, "initial setup for %s\n",
spi->modalias);
chip = kzalloc(sizeof(*chip), GFP_KERNEL);
if (!chip)
return -ENOMEM;
chip->spi = spi;
chip->ops = spi->controller_data;
if (chip->ops && chip->ops->setup) {
int ret = chip->ops->setup(spi);
if (ret) {
kfree(chip);
return ret;
}
}
spi_set_ctldata(spi, chip);
}
if (spi->max_speed_hz != chip->rate) {
int err;
err = ep93xx_spi_calc_divisors(espi, chip, spi->max_speed_hz);
if (err != 0) {
spi_set_ctldata(spi, NULL);
kfree(chip);
return err;
}
chip->rate = spi->max_speed_hz;
}
chip->dss = bits_per_word_to_dss(spi->bits_per_word);
ep93xx_spi_cs_control(spi, false);
return 0;
}
/**
* ep93xx_spi_transfer() - queue message to be transferred
* @spi: target SPI device
* @msg: message to be transferred
*
* This function is called by SPI device drivers when they are going to transfer
* a new message. It simply puts the message in the queue and schedules
* workqueue to perform the actual transfer later on.
*
* Returns %0 on success and negative error in case of failure.
*/
static int ep93xx_spi_transfer(struct spi_device *spi, struct spi_message *msg)
{
struct ep93xx_spi *espi = spi_master_get_devdata(spi->master);
struct spi_transfer *t;
unsigned long flags;
if (!msg || !msg->complete)
return -EINVAL;
/* first validate each transfer */
list_for_each_entry(t, &msg->transfers, transfer_list) {
if (t->bits_per_word) {
if (t->bits_per_word < 4 || t->bits_per_word > 16)
return -EINVAL;
}
if (t->speed_hz && t->speed_hz < espi->min_rate)
return -EINVAL;
}
/*
* Now that we own the message, let's initialize it so that it is
* suitable for us. We use @msg->status to signal whether there was
* error in transfer and @msg->state is used to hold pointer to the
* current transfer (or %NULL if no active current transfer).
*/
msg->state = NULL;
msg->status = 0;
msg->actual_length = 0;
spin_lock_irqsave(&espi->lock, flags);
if (!espi->running) {
spin_unlock_irqrestore(&espi->lock, flags);
return -ESHUTDOWN;
}
list_add_tail(&msg->queue, &espi->msg_queue);
queue_work(espi->wq, &espi->msg_work);
spin_unlock_irqrestore(&espi->lock, flags);
return 0;
}
/**
* ep93xx_spi_cleanup() - cleans up master controller specific state
* @spi: SPI device to cleanup
*
* This function releases master controller specific state for given @spi
* device.
*/
static void ep93xx_spi_cleanup(struct spi_device *spi)
{
struct ep93xx_spi_chip *chip;
chip = spi_get_ctldata(spi);
if (chip) {
if (chip->ops && chip->ops->cleanup)
chip->ops->cleanup(spi);
spi_set_ctldata(spi, NULL);
kfree(chip);
}
}
/**
* ep93xx_spi_chip_setup() - configures hardware according to given @chip
* @espi: ep93xx SPI controller struct
* @chip: chip specific settings
*
* This function sets up the actual hardware registers with settings given in
* @chip. Note that no validation is done so make sure that callers validate
* settings before calling this.
*/
static void ep93xx_spi_chip_setup(const struct ep93xx_spi *espi,
const struct ep93xx_spi_chip *chip)
{
u16 cr0;
cr0 = chip->div_scr << SSPCR0_SCR_SHIFT;
cr0 |= (chip->spi->mode & (SPI_CPHA|SPI_CPOL)) << SSPCR0_MODE_SHIFT;
cr0 |= chip->dss;
dev_dbg(&espi->pdev->dev, "setup: mode %d, cpsr %d, scr %d, dss %d\n",
chip->spi->mode, chip->div_cpsr, chip->div_scr, chip->dss);
dev_dbg(&espi->pdev->dev, "setup: cr0 %#x", cr0);
ep93xx_spi_write_u8(espi, SSPCPSR, chip->div_cpsr);
ep93xx_spi_write_u16(espi, SSPCR0, cr0);
}
static inline int bits_per_word(const struct ep93xx_spi *espi)
{
struct spi_message *msg = espi->current_msg;
struct spi_transfer *t = msg->state;
return t->bits_per_word ? t->bits_per_word : msg->spi->bits_per_word;
}
static void ep93xx_do_write(struct ep93xx_spi *espi, struct spi_transfer *t)
{
if (bits_per_word(espi) > 8) {
u16 tx_val = 0;
if (t->tx_buf)
tx_val = ((u16 *)t->tx_buf)[espi->tx];
ep93xx_spi_write_u16(espi, SSPDR, tx_val);
espi->tx += sizeof(tx_val);
} else {
u8 tx_val = 0;
if (t->tx_buf)
tx_val = ((u8 *)t->tx_buf)[espi->tx];
ep93xx_spi_write_u8(espi, SSPDR, tx_val);
espi->tx += sizeof(tx_val);
}
}
static void ep93xx_do_read(struct ep93xx_spi *espi, struct spi_transfer *t)
{
if (bits_per_word(espi) > 8) {
u16 rx_val;
rx_val = ep93xx_spi_read_u16(espi, SSPDR);
if (t->rx_buf)
((u16 *)t->rx_buf)[espi->rx] = rx_val;
espi->rx += sizeof(rx_val);
} else {
u8 rx_val;
rx_val = ep93xx_spi_read_u8(espi, SSPDR);
if (t->rx_buf)
((u8 *)t->rx_buf)[espi->rx] = rx_val;
espi->rx += sizeof(rx_val);
}
}
/**
* ep93xx_spi_read_write() - perform next RX/TX transfer
* @espi: ep93xx SPI controller struct
*
* This function transfers next bytes (or half-words) to/from RX/TX FIFOs. If
* called several times, the whole transfer will be completed. Returns
* %-EINPROGRESS when current transfer was not yet completed otherwise %0.
*
* When this function is finished, RX FIFO should be empty and TX FIFO should be
* full.
*/
static int ep93xx_spi_read_write(struct ep93xx_spi *espi)
{
struct spi_message *msg = espi->current_msg;
struct spi_transfer *t = msg->state;
/* read as long as RX FIFO has frames in it */
while ((ep93xx_spi_read_u8(espi, SSPSR) & SSPSR_RNE)) {
ep93xx_do_read(espi, t);
espi->fifo_level--;
}
/* write as long as TX FIFO has room */
while (espi->fifo_level < SPI_FIFO_SIZE && espi->tx < t->len) {
ep93xx_do_write(espi, t);
espi->fifo_level++;
}
if (espi->rx == t->len)
return 0;
return -EINPROGRESS;
}
static void ep93xx_spi_pio_transfer(struct ep93xx_spi *espi)
{
/*
* Now everything is set up for the current transfer. We prime the TX
* FIFO, enable interrupts, and wait for the transfer to complete.
*/
if (ep93xx_spi_read_write(espi)) {
ep93xx_spi_enable_interrupts(espi);
wait_for_completion(&espi->wait);
}
}
/**
* ep93xx_spi_dma_prepare() - prepares a DMA transfer
* @espi: ep93xx SPI controller struct
* @dir: DMA transfer direction
*
* Function configures the DMA, maps the buffer and prepares the DMA
* descriptor. Returns a valid DMA descriptor in case of success and ERR_PTR
* in case of failure.
*/
static struct dma_async_tx_descriptor *
ep93xx_spi_dma_prepare(struct ep93xx_spi *espi, enum dma_transfer_direction dir)
{
struct spi_transfer *t = espi->current_msg->state;
struct dma_async_tx_descriptor *txd;
enum dma_slave_buswidth buswidth;
struct dma_slave_config conf;
struct scatterlist *sg;
struct sg_table *sgt;
struct dma_chan *chan;
const void *buf, *pbuf;
size_t len = t->len;
int i, ret, nents;
if (bits_per_word(espi) > 8)
buswidth = DMA_SLAVE_BUSWIDTH_2_BYTES;
else
buswidth = DMA_SLAVE_BUSWIDTH_1_BYTE;
memset(&conf, 0, sizeof(conf));
conf.direction = dir;
if (dir == DMA_DEV_TO_MEM) {
chan = espi->dma_rx;
buf = t->rx_buf;
sgt = &espi->rx_sgt;
conf.src_addr = espi->sspdr_phys;
conf.src_addr_width = buswidth;
} else {
chan = espi->dma_tx;
buf = t->tx_buf;
sgt = &espi->tx_sgt;
conf.dst_addr = espi->sspdr_phys;
conf.dst_addr_width = buswidth;
}
ret = dmaengine_slave_config(chan, &conf);
if (ret)
return ERR_PTR(ret);
/*
* We need to split the transfer into PAGE_SIZE'd chunks. This is
* because we are using @espi->zeropage to provide a zero RX buffer
* for the TX transfers and we have only allocated one page for that.
*
* For performance reasons we allocate a new sg_table only when
* needed. Otherwise we will re-use the current one. Eventually the
* last sg_table is released in ep93xx_spi_release_dma().
*/
nents = DIV_ROUND_UP(len, PAGE_SIZE);
if (nents != sgt->nents) {
sg_free_table(sgt);
ret = sg_alloc_table(sgt, nents, GFP_KERNEL);
if (ret)
return ERR_PTR(ret);
}
pbuf = buf;
for_each_sg(sgt->sgl, sg, sgt->nents, i) {
size_t bytes = min_t(size_t, len, PAGE_SIZE);
if (buf) {
sg_set_page(sg, virt_to_page(pbuf), bytes,
offset_in_page(pbuf));
} else {
sg_set_page(sg, virt_to_page(espi->zeropage),
bytes, 0);
}
pbuf += bytes;
len -= bytes;
}
if (WARN_ON(len)) {
dev_warn(&espi->pdev->dev, "len = %d expected 0!", len);
return ERR_PTR(-EINVAL);
}
nents = dma_map_sg(chan->device->dev, sgt->sgl, sgt->nents, dir);
if (!nents)
return ERR_PTR(-ENOMEM);
txd = dmaengine_prep_slave_sg(chan, sgt->sgl, nents, dir, DMA_CTRL_ACK);
if (!txd) {
dma_unmap_sg(chan->device->dev, sgt->sgl, sgt->nents, dir);
return ERR_PTR(-ENOMEM);
}
return txd;
}
/**
* ep93xx_spi_dma_finish() - finishes with a DMA transfer
* @espi: ep93xx SPI controller struct
* @dir: DMA transfer direction
*
* Function finishes with the DMA transfer. After this, the DMA buffer is
* unmapped.
*/
static void ep93xx_spi_dma_finish(struct ep93xx_spi *espi,
enum dma_transfer_direction dir)
{
struct dma_chan *chan;
struct sg_table *sgt;
if (dir == DMA_DEV_TO_MEM) {
chan = espi->dma_rx;
sgt = &espi->rx_sgt;
} else {
chan = espi->dma_tx;
sgt = &espi->tx_sgt;
}
dma_unmap_sg(chan->device->dev, sgt->sgl, sgt->nents, dir);
}
static void ep93xx_spi_dma_callback(void *callback_param)
{
complete(callback_param);
}
static void ep93xx_spi_dma_transfer(struct ep93xx_spi *espi)
{
struct spi_message *msg = espi->current_msg;
struct dma_async_tx_descriptor *rxd, *txd;
rxd = ep93xx_spi_dma_prepare(espi, DMA_DEV_TO_MEM);
if (IS_ERR(rxd)) {
dev_err(&espi->pdev->dev, "DMA RX failed: %ld\n", PTR_ERR(rxd));
msg->status = PTR_ERR(rxd);
return;
}
txd = ep93xx_spi_dma_prepare(espi, DMA_MEM_TO_DEV);
if (IS_ERR(txd)) {
ep93xx_spi_dma_finish(espi, DMA_DEV_TO_MEM);
dev_err(&espi->pdev->dev, "DMA TX failed: %ld\n", PTR_ERR(rxd));
msg->status = PTR_ERR(txd);
return;
}
/* We are ready when RX is done */
rxd->callback = ep93xx_spi_dma_callback;
rxd->callback_param = &espi->wait;
/* Now submit both descriptors and wait while they finish */
dmaengine_submit(rxd);
dmaengine_submit(txd);
dma_async_issue_pending(espi->dma_rx);
dma_async_issue_pending(espi->dma_tx);
wait_for_completion(&espi->wait);
ep93xx_spi_dma_finish(espi, DMA_MEM_TO_DEV);
ep93xx_spi_dma_finish(espi, DMA_DEV_TO_MEM);
}
/**
* ep93xx_spi_process_transfer() - processes one SPI transfer
* @espi: ep93xx SPI controller struct
* @msg: current message
* @t: transfer to process
*
* This function processes one SPI transfer given in @t. Function waits until
* transfer is complete (may sleep) and updates @msg->status based on whether
* transfer was successfully processed or not.
*/
static void ep93xx_spi_process_transfer(struct ep93xx_spi *espi,
struct spi_message *msg,
struct spi_transfer *t)
{
struct ep93xx_spi_chip *chip = spi_get_ctldata(msg->spi);
msg->state = t;
/*
* Handle any transfer specific settings if needed. We use
* temporary chip settings here and restore original later when
* the transfer is finished.
*/
if (t->speed_hz || t->bits_per_word) {
struct ep93xx_spi_chip tmp_chip = *chip;
if (t->speed_hz) {
int err;
err = ep93xx_spi_calc_divisors(espi, &tmp_chip,
t->speed_hz);
if (err) {
dev_err(&espi->pdev->dev,
"failed to adjust speed\n");
msg->status = err;
return;
}
}
if (t->bits_per_word)
tmp_chip.dss = bits_per_word_to_dss(t->bits_per_word);
/*
* Set up temporary new hw settings for this transfer.
*/
ep93xx_spi_chip_setup(espi, &tmp_chip);
}
espi->rx = 0;
espi->tx = 0;
/*
* There is no point of setting up DMA for the transfers which will
* fit into the FIFO and can be transferred with a single interrupt.
* So in these cases we will be using PIO and don't bother for DMA.
*/
if (espi->dma_rx && t->len > SPI_FIFO_SIZE)
ep93xx_spi_dma_transfer(espi);
else
ep93xx_spi_pio_transfer(espi);
/*
* In case of error during transmit, we bail out from processing
* the message.
*/
if (msg->status)
return;
msg->actual_length += t->len;
/*
* After this transfer is finished, perform any possible
* post-transfer actions requested by the protocol driver.
*/
if (t->delay_usecs) {
set_current_state(TASK_UNINTERRUPTIBLE);
schedule_timeout(usecs_to_jiffies(t->delay_usecs));
}
if (t->cs_change) {
if (!list_is_last(&t->transfer_list, &msg->transfers)) {
/*
* In case protocol driver is asking us to drop the
* chipselect briefly, we let the scheduler to handle
* any "delay" here.
*/
ep93xx_spi_cs_control(msg->spi, false);
cond_resched();
ep93xx_spi_cs_control(msg->spi, true);
}
}
if (t->speed_hz || t->bits_per_word)
ep93xx_spi_chip_setup(espi, chip);
}
/*
* ep93xx_spi_process_message() - process one SPI message
* @espi: ep93xx SPI controller struct
* @msg: message to process
*
* This function processes a single SPI message. We go through all transfers in
* the message and pass them to ep93xx_spi_process_transfer(). Chipselect is
* asserted during the whole message (unless per transfer cs_change is set).
*
* @msg->status contains %0 in case of success or negative error code in case of
* failure.
*/
static void ep93xx_spi_process_message(struct ep93xx_spi *espi,
struct spi_message *msg)
{
unsigned long timeout;
struct spi_transfer *t;
int err;
/*
* Enable the SPI controller and its clock.
*/
err = ep93xx_spi_enable(espi);
if (err) {
dev_err(&espi->pdev->dev, "failed to enable SPI controller\n");
msg->status = err;
return;
}
/*
* Just to be sure: flush any data from RX FIFO.
*/
timeout = jiffies + msecs_to_jiffies(SPI_TIMEOUT);
while (ep93xx_spi_read_u16(espi, SSPSR) & SSPSR_RNE) {
if (time_after(jiffies, timeout)) {
dev_warn(&espi->pdev->dev,
"timeout while flushing RX FIFO\n");
msg->status = -ETIMEDOUT;
return;
}
ep93xx_spi_read_u16(espi, SSPDR);
}
/*
* We explicitly handle FIFO level. This way we don't have to check TX
* FIFO status using %SSPSR_TNF bit which may cause RX FIFO overruns.
*/
espi->fifo_level = 0;
/*
* Update SPI controller registers according to spi device and assert
* the chipselect.
*/
ep93xx_spi_chip_setup(espi, spi_get_ctldata(msg->spi));
ep93xx_spi_cs_control(msg->spi, true);
list_for_each_entry(t, &msg->transfers, transfer_list) {
ep93xx_spi_process_transfer(espi, msg, t);
if (msg->status)
break;
}
/*
* Now the whole message is transferred (or failed for some reason). We
* deselect the device and disable the SPI controller.
*/
ep93xx_spi_cs_control(msg->spi, false);
ep93xx_spi_disable(espi);
}
#define work_to_espi(work) (container_of((work), struct ep93xx_spi, msg_work))
/**
* ep93xx_spi_work() - EP93xx SPI workqueue worker function
* @work: work struct
*
* Workqueue worker function. This function is called when there are new
* SPI messages to be processed. Message is taken out from the queue and then
* passed to ep93xx_spi_process_message().
*
* After message is transferred, protocol driver is notified by calling
* @msg->complete(). In case of error, @msg->status is set to negative error
* number, otherwise it contains zero (and @msg->actual_length is updated).
*/
static void ep93xx_spi_work(struct work_struct *work)
{
struct ep93xx_spi *espi = work_to_espi(work);
struct spi_message *msg;
spin_lock_irq(&espi->lock);
if (!espi->running || espi->current_msg ||
list_empty(&espi->msg_queue)) {
spin_unlock_irq(&espi->lock);
return;
}
msg = list_first_entry(&espi->msg_queue, struct spi_message, queue);
list_del_init(&msg->queue);
espi->current_msg = msg;
spin_unlock_irq(&espi->lock);
ep93xx_spi_process_message(espi, msg);
/*
* Update the current message and re-schedule ourselves if there are
* more messages in the queue.
*/
spin_lock_irq(&espi->lock);
espi->current_msg = NULL;
if (espi->running && !list_empty(&espi->msg_queue))
queue_work(espi->wq, &espi->msg_work);
spin_unlock_irq(&espi->lock);
/* notify the protocol driver that we are done with this message */
msg->complete(msg->context);
}
static irqreturn_t ep93xx_spi_interrupt(int irq, void *dev_id)
{
struct ep93xx_spi *espi = dev_id;
u8 irq_status = ep93xx_spi_read_u8(espi, SSPIIR);
/*
* If we got ROR (receive overrun) interrupt we know that something is
* wrong. Just abort the message.
*/
if (unlikely(irq_status & SSPIIR_RORIS)) {
/* clear the overrun interrupt */
ep93xx_spi_write_u8(espi, SSPICR, 0);
dev_warn(&espi->pdev->dev,
"receive overrun, aborting the message\n");
espi->current_msg->status = -EIO;
} else {
/*
* Interrupt is either RX (RIS) or TX (TIS). For both cases we
* simply execute next data transfer.
*/
if (ep93xx_spi_read_write(espi)) {
/*
* In normal case, there still is some processing left
* for current transfer. Let's wait for the next
* interrupt then.
*/
return IRQ_HANDLED;
}
}
/*
* Current transfer is finished, either with error or with success. In
* any case we disable interrupts and notify the worker to handle
* any post-processing of the message.
*/
ep93xx_spi_disable_interrupts(espi);
complete(&espi->wait);
return IRQ_HANDLED;
}
static bool ep93xx_spi_dma_filter(struct dma_chan *chan, void *filter_param)
{
if (ep93xx_dma_chan_is_m2p(chan))
return false;
chan->private = filter_param;
return true;
}
static int ep93xx_spi_setup_dma(struct ep93xx_spi *espi)
{
dma_cap_mask_t mask;
int ret;
espi->zeropage = (void *)get_zeroed_page(GFP_KERNEL);
if (!espi->zeropage)
return -ENOMEM;
dma_cap_zero(mask);
dma_cap_set(DMA_SLAVE, mask);
espi->dma_rx_data.port = EP93XX_DMA_SSP;
espi->dma_rx_data.direction = DMA_DEV_TO_MEM;
espi->dma_rx_data.name = "ep93xx-spi-rx";
espi->dma_rx = dma_request_channel(mask, ep93xx_spi_dma_filter,
&espi->dma_rx_data);
if (!espi->dma_rx) {
ret = -ENODEV;
goto fail_free_page;
}
espi->dma_tx_data.port = EP93XX_DMA_SSP;
espi->dma_tx_data.direction = DMA_MEM_TO_DEV;
espi->dma_tx_data.name = "ep93xx-spi-tx";
espi->dma_tx = dma_request_channel(mask, ep93xx_spi_dma_filter,
&espi->dma_tx_data);
if (!espi->dma_tx) {
ret = -ENODEV;
goto fail_release_rx;
}
return 0;
fail_release_rx:
dma_release_channel(espi->dma_rx);
espi->dma_rx = NULL;
fail_free_page:
free_page((unsigned long)espi->zeropage);
return ret;
}
static void ep93xx_spi_release_dma(struct ep93xx_spi *espi)
{
if (espi->dma_rx) {
dma_release_channel(espi->dma_rx);
sg_free_table(&espi->rx_sgt);
}
if (espi->dma_tx) {
dma_release_channel(espi->dma_tx);
sg_free_table(&espi->tx_sgt);
}
if (espi->zeropage)
free_page((unsigned long)espi->zeropage);
}
static int __devinit ep93xx_spi_probe(struct platform_device *pdev)
{
struct spi_master *master;
struct ep93xx_spi_info *info;
struct ep93xx_spi *espi;
struct resource *res;
int error;
info = pdev->dev.platform_data;
master = spi_alloc_master(&pdev->dev, sizeof(*espi));
if (!master) {
dev_err(&pdev->dev, "failed to allocate spi master\n");
return -ENOMEM;
}
master->setup = ep93xx_spi_setup;
master->transfer = ep93xx_spi_transfer;
master->cleanup = ep93xx_spi_cleanup;
master->bus_num = pdev->id;
master->num_chipselect = info->num_chipselect;
master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
platform_set_drvdata(pdev, master);
espi = spi_master_get_devdata(master);
espi->clk = clk_get(&pdev->dev, NULL);
if (IS_ERR(espi->clk)) {
dev_err(&pdev->dev, "unable to get spi clock\n");
error = PTR_ERR(espi->clk);
goto fail_release_master;
}
spin_lock_init(&espi->lock);
init_completion(&espi->wait);
/*
* Calculate maximum and minimum supported clock rates
* for the controller.
*/
espi->max_rate = clk_get_rate(espi->clk) / 2;
espi->min_rate = clk_get_rate(espi->clk) / (254 * 256);
espi->pdev = pdev;
espi->irq = platform_get_irq(pdev, 0);
if (espi->irq < 0) {
error = -EBUSY;
dev_err(&pdev->dev, "failed to get irq resources\n");
goto fail_put_clock;
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
dev_err(&pdev->dev, "unable to get iomem resource\n");
error = -ENODEV;
goto fail_put_clock;
}
res = request_mem_region(res->start, resource_size(res), pdev->name);
if (!res) {
dev_err(&pdev->dev, "unable to request iomem resources\n");
error = -EBUSY;
goto fail_put_clock;
}
espi->sspdr_phys = res->start + SSPDR;
espi->regs_base = ioremap(res->start, resource_size(res));
if (!espi->regs_base) {
dev_err(&pdev->dev, "failed to map resources\n");
error = -ENODEV;
goto fail_free_mem;
}
error = request_irq(espi->irq, ep93xx_spi_interrupt, 0,
"ep93xx-spi", espi);
if (error) {
dev_err(&pdev->dev, "failed to request irq\n");
goto fail_unmap_regs;
}
if (info->use_dma && ep93xx_spi_setup_dma(espi))
dev_warn(&pdev->dev, "DMA setup failed. Falling back to PIO\n");
espi->wq = create_singlethread_workqueue("ep93xx_spid");
if (!espi->wq) {
dev_err(&pdev->dev, "unable to create workqueue\n");
goto fail_free_dma;
}
INIT_WORK(&espi->msg_work, ep93xx_spi_work);
INIT_LIST_HEAD(&espi->msg_queue);
espi->running = true;
/* make sure that the hardware is disabled */
ep93xx_spi_write_u8(espi, SSPCR1, 0);
error = spi_register_master(master);
if (error) {
dev_err(&pdev->dev, "failed to register SPI master\n");
goto fail_free_queue;
}
dev_info(&pdev->dev, "EP93xx SPI Controller at 0x%08lx irq %d\n",
(unsigned long)res->start, espi->irq);
return 0;
fail_free_queue:
destroy_workqueue(espi->wq);
fail_free_dma:
ep93xx_spi_release_dma(espi);
free_irq(espi->irq, espi);
fail_unmap_regs:
iounmap(espi->regs_base);
fail_free_mem:
release_mem_region(res->start, resource_size(res));
fail_put_clock:
clk_put(espi->clk);
fail_release_master:
spi_master_put(master);
platform_set_drvdata(pdev, NULL);
return error;
}
static int __devexit ep93xx_spi_remove(struct platform_device *pdev)
{
struct spi_master *master = platform_get_drvdata(pdev);
struct ep93xx_spi *espi = spi_master_get_devdata(master);
struct resource *res;
spin_lock_irq(&espi->lock);
espi->running = false;
spin_unlock_irq(&espi->lock);
destroy_workqueue(espi->wq);
/*
* Complete remaining messages with %-ESHUTDOWN status.
*/
spin_lock_irq(&espi->lock);
while (!list_empty(&espi->msg_queue)) {
struct spi_message *msg;
msg = list_first_entry(&espi->msg_queue,
struct spi_message, queue);
list_del_init(&msg->queue);
msg->status = -ESHUTDOWN;
spin_unlock_irq(&espi->lock);
msg->complete(msg->context);
spin_lock_irq(&espi->lock);
}
spin_unlock_irq(&espi->lock);
ep93xx_spi_release_dma(espi);
free_irq(espi->irq, espi);
iounmap(espi->regs_base);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
release_mem_region(res->start, resource_size(res));
clk_put(espi->clk);
platform_set_drvdata(pdev, NULL);
spi_unregister_master(master);
return 0;
}
static struct platform_driver ep93xx_spi_driver = {
.driver = {
.name = "ep93xx-spi",
.owner = THIS_MODULE,
},
.probe = ep93xx_spi_probe,
.remove = __devexit_p(ep93xx_spi_remove),
};
module_platform_driver(ep93xx_spi_driver);
MODULE_DESCRIPTION("EP93xx SPI Controller driver");
MODULE_AUTHOR("Mika Westerberg <mika.westerberg@iki.fi>");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:ep93xx-spi");
| gpl-2.0 |
FrancescoCG/Crazy-Kernel1-CM-Kernel | drivers/spi/spi-ep93xx.c | 4785 | 32107 | /*
* Driver for Cirrus Logic EP93xx SPI controller.
*
* Copyright (C) 2010-2011 Mika Westerberg
*
* Explicit FIFO handling code was inspired by amba-pl022 driver.
*
* Chip select support using other than built-in GPIOs by H. Hartley Sweeten.
*
* For more information about the SPI controller see documentation on Cirrus
* Logic web site:
* http://www.cirrus.com/en/pubs/manual/EP93xx_Users_Guide_UM1.pdf
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/io.h>
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/dmaengine.h>
#include <linux/bitops.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/workqueue.h>
#include <linux/sched.h>
#include <linux/scatterlist.h>
#include <linux/spi/spi.h>
#include <mach/dma.h>
#include <mach/ep93xx_spi.h>
#define SSPCR0 0x0000
#define SSPCR0_MODE_SHIFT 6
#define SSPCR0_SCR_SHIFT 8
#define SSPCR1 0x0004
#define SSPCR1_RIE BIT(0)
#define SSPCR1_TIE BIT(1)
#define SSPCR1_RORIE BIT(2)
#define SSPCR1_LBM BIT(3)
#define SSPCR1_SSE BIT(4)
#define SSPCR1_MS BIT(5)
#define SSPCR1_SOD BIT(6)
#define SSPDR 0x0008
#define SSPSR 0x000c
#define SSPSR_TFE BIT(0)
#define SSPSR_TNF BIT(1)
#define SSPSR_RNE BIT(2)
#define SSPSR_RFF BIT(3)
#define SSPSR_BSY BIT(4)
#define SSPCPSR 0x0010
#define SSPIIR 0x0014
#define SSPIIR_RIS BIT(0)
#define SSPIIR_TIS BIT(1)
#define SSPIIR_RORIS BIT(2)
#define SSPICR SSPIIR
/* timeout in milliseconds */
#define SPI_TIMEOUT 5
/* maximum depth of RX/TX FIFO */
#define SPI_FIFO_SIZE 8
/**
* struct ep93xx_spi - EP93xx SPI controller structure
* @lock: spinlock that protects concurrent accesses to fields @running,
* @current_msg and @msg_queue
* @pdev: pointer to platform device
* @clk: clock for the controller
* @regs_base: pointer to ioremap()'d registers
* @sspdr_phys: physical address of the SSPDR register
* @irq: IRQ number used by the driver
* @min_rate: minimum clock rate (in Hz) supported by the controller
* @max_rate: maximum clock rate (in Hz) supported by the controller
* @running: is the queue running
* @wq: workqueue used by the driver
* @msg_work: work that is queued for the driver
* @wait: wait here until given transfer is completed
* @msg_queue: queue for the messages
* @current_msg: message that is currently processed (or %NULL if none)
* @tx: current byte in transfer to transmit
* @rx: current byte in transfer to receive
* @fifo_level: how full is FIFO (%0..%SPI_FIFO_SIZE - %1). Receiving one
* frame decreases this level and sending one frame increases it.
* @dma_rx: RX DMA channel
* @dma_tx: TX DMA channel
* @dma_rx_data: RX parameters passed to the DMA engine
* @dma_tx_data: TX parameters passed to the DMA engine
* @rx_sgt: sg table for RX transfers
* @tx_sgt: sg table for TX transfers
* @zeropage: dummy page used as RX buffer when only TX buffer is passed in by
* the client
*
* This structure holds EP93xx SPI controller specific information. When
* @running is %true, driver accepts transfer requests from protocol drivers.
* @current_msg is used to hold pointer to the message that is currently
* processed. If @current_msg is %NULL, it means that no processing is going
* on.
*
* Most of the fields are only written once and they can be accessed without
* taking the @lock. Fields that are accessed concurrently are: @current_msg,
* @running, and @msg_queue.
*/
struct ep93xx_spi {
spinlock_t lock;
const struct platform_device *pdev;
struct clk *clk;
void __iomem *regs_base;
unsigned long sspdr_phys;
int irq;
unsigned long min_rate;
unsigned long max_rate;
bool running;
struct workqueue_struct *wq;
struct work_struct msg_work;
struct completion wait;
struct list_head msg_queue;
struct spi_message *current_msg;
size_t tx;
size_t rx;
size_t fifo_level;
struct dma_chan *dma_rx;
struct dma_chan *dma_tx;
struct ep93xx_dma_data dma_rx_data;
struct ep93xx_dma_data dma_tx_data;
struct sg_table rx_sgt;
struct sg_table tx_sgt;
void *zeropage;
};
/**
* struct ep93xx_spi_chip - SPI device hardware settings
* @spi: back pointer to the SPI device
* @rate: max rate in hz this chip supports
* @div_cpsr: cpsr (pre-scaler) divider
* @div_scr: scr divider
* @dss: bits per word (4 - 16 bits)
* @ops: private chip operations
*
* This structure is used to store hardware register specific settings for each
* SPI device. Settings are written to hardware by function
* ep93xx_spi_chip_setup().
*/
struct ep93xx_spi_chip {
const struct spi_device *spi;
unsigned long rate;
u8 div_cpsr;
u8 div_scr;
u8 dss;
struct ep93xx_spi_chip_ops *ops;
};
/* converts bits per word to CR0.DSS value */
#define bits_per_word_to_dss(bpw) ((bpw) - 1)
static inline void
ep93xx_spi_write_u8(const struct ep93xx_spi *espi, u16 reg, u8 value)
{
__raw_writeb(value, espi->regs_base + reg);
}
static inline u8
ep93xx_spi_read_u8(const struct ep93xx_spi *spi, u16 reg)
{
return __raw_readb(spi->regs_base + reg);
}
static inline void
ep93xx_spi_write_u16(const struct ep93xx_spi *espi, u16 reg, u16 value)
{
__raw_writew(value, espi->regs_base + reg);
}
static inline u16
ep93xx_spi_read_u16(const struct ep93xx_spi *spi, u16 reg)
{
return __raw_readw(spi->regs_base + reg);
}
static int ep93xx_spi_enable(const struct ep93xx_spi *espi)
{
u8 regval;
int err;
err = clk_enable(espi->clk);
if (err)
return err;
regval = ep93xx_spi_read_u8(espi, SSPCR1);
regval |= SSPCR1_SSE;
ep93xx_spi_write_u8(espi, SSPCR1, regval);
return 0;
}
static void ep93xx_spi_disable(const struct ep93xx_spi *espi)
{
u8 regval;
regval = ep93xx_spi_read_u8(espi, SSPCR1);
regval &= ~SSPCR1_SSE;
ep93xx_spi_write_u8(espi, SSPCR1, regval);
clk_disable(espi->clk);
}
static void ep93xx_spi_enable_interrupts(const struct ep93xx_spi *espi)
{
u8 regval;
regval = ep93xx_spi_read_u8(espi, SSPCR1);
regval |= (SSPCR1_RORIE | SSPCR1_TIE | SSPCR1_RIE);
ep93xx_spi_write_u8(espi, SSPCR1, regval);
}
static void ep93xx_spi_disable_interrupts(const struct ep93xx_spi *espi)
{
u8 regval;
regval = ep93xx_spi_read_u8(espi, SSPCR1);
regval &= ~(SSPCR1_RORIE | SSPCR1_TIE | SSPCR1_RIE);
ep93xx_spi_write_u8(espi, SSPCR1, regval);
}
/**
* ep93xx_spi_calc_divisors() - calculates SPI clock divisors
* @espi: ep93xx SPI controller struct
* @chip: divisors are calculated for this chip
* @rate: desired SPI output clock rate
*
* Function calculates cpsr (clock pre-scaler) and scr divisors based on
* given @rate and places them to @chip->div_cpsr and @chip->div_scr. If,
* for some reason, divisors cannot be calculated nothing is stored and
* %-EINVAL is returned.
*/
static int ep93xx_spi_calc_divisors(const struct ep93xx_spi *espi,
struct ep93xx_spi_chip *chip,
unsigned long rate)
{
unsigned long spi_clk_rate = clk_get_rate(espi->clk);
int cpsr, scr;
/*
* Make sure that max value is between values supported by the
* controller. Note that minimum value is already checked in
* ep93xx_spi_transfer().
*/
rate = clamp(rate, espi->min_rate, espi->max_rate);
/*
* Calculate divisors so that we can get speed according the
* following formula:
* rate = spi_clock_rate / (cpsr * (1 + scr))
*
* cpsr must be even number and starts from 2, scr can be any number
* between 0 and 255.
*/
for (cpsr = 2; cpsr <= 254; cpsr += 2) {
for (scr = 0; scr <= 255; scr++) {
if ((spi_clk_rate / (cpsr * (scr + 1))) <= rate) {
chip->div_scr = (u8)scr;
chip->div_cpsr = (u8)cpsr;
return 0;
}
}
}
return -EINVAL;
}
static void ep93xx_spi_cs_control(struct spi_device *spi, bool control)
{
struct ep93xx_spi_chip *chip = spi_get_ctldata(spi);
int value = (spi->mode & SPI_CS_HIGH) ? control : !control;
if (chip->ops && chip->ops->cs_control)
chip->ops->cs_control(spi, value);
}
/**
* ep93xx_spi_setup() - setup an SPI device
* @spi: SPI device to setup
*
* This function sets up SPI device mode, speed etc. Can be called multiple
* times for a single device. Returns %0 in case of success, negative error in
* case of failure. When this function returns success, the device is
* deselected.
*/
static int ep93xx_spi_setup(struct spi_device *spi)
{
struct ep93xx_spi *espi = spi_master_get_devdata(spi->master);
struct ep93xx_spi_chip *chip;
if (spi->bits_per_word < 4 || spi->bits_per_word > 16) {
dev_err(&espi->pdev->dev, "invalid bits per word %d\n",
spi->bits_per_word);
return -EINVAL;
}
chip = spi_get_ctldata(spi);
if (!chip) {
dev_dbg(&espi->pdev->dev, "initial setup for %s\n",
spi->modalias);
chip = kzalloc(sizeof(*chip), GFP_KERNEL);
if (!chip)
return -ENOMEM;
chip->spi = spi;
chip->ops = spi->controller_data;
if (chip->ops && chip->ops->setup) {
int ret = chip->ops->setup(spi);
if (ret) {
kfree(chip);
return ret;
}
}
spi_set_ctldata(spi, chip);
}
if (spi->max_speed_hz != chip->rate) {
int err;
err = ep93xx_spi_calc_divisors(espi, chip, spi->max_speed_hz);
if (err != 0) {
spi_set_ctldata(spi, NULL);
kfree(chip);
return err;
}
chip->rate = spi->max_speed_hz;
}
chip->dss = bits_per_word_to_dss(spi->bits_per_word);
ep93xx_spi_cs_control(spi, false);
return 0;
}
/**
* ep93xx_spi_transfer() - queue message to be transferred
* @spi: target SPI device
* @msg: message to be transferred
*
* This function is called by SPI device drivers when they are going to transfer
* a new message. It simply puts the message in the queue and schedules
* workqueue to perform the actual transfer later on.
*
* Returns %0 on success and negative error in case of failure.
*/
static int ep93xx_spi_transfer(struct spi_device *spi, struct spi_message *msg)
{
struct ep93xx_spi *espi = spi_master_get_devdata(spi->master);
struct spi_transfer *t;
unsigned long flags;
if (!msg || !msg->complete)
return -EINVAL;
/* first validate each transfer */
list_for_each_entry(t, &msg->transfers, transfer_list) {
if (t->bits_per_word) {
if (t->bits_per_word < 4 || t->bits_per_word > 16)
return -EINVAL;
}
if (t->speed_hz && t->speed_hz < espi->min_rate)
return -EINVAL;
}
/*
* Now that we own the message, let's initialize it so that it is
* suitable for us. We use @msg->status to signal whether there was
* error in transfer and @msg->state is used to hold pointer to the
* current transfer (or %NULL if no active current transfer).
*/
msg->state = NULL;
msg->status = 0;
msg->actual_length = 0;
spin_lock_irqsave(&espi->lock, flags);
if (!espi->running) {
spin_unlock_irqrestore(&espi->lock, flags);
return -ESHUTDOWN;
}
list_add_tail(&msg->queue, &espi->msg_queue);
queue_work(espi->wq, &espi->msg_work);
spin_unlock_irqrestore(&espi->lock, flags);
return 0;
}
/**
* ep93xx_spi_cleanup() - cleans up master controller specific state
* @spi: SPI device to cleanup
*
* This function releases master controller specific state for given @spi
* device.
*/
static void ep93xx_spi_cleanup(struct spi_device *spi)
{
struct ep93xx_spi_chip *chip;
chip = spi_get_ctldata(spi);
if (chip) {
if (chip->ops && chip->ops->cleanup)
chip->ops->cleanup(spi);
spi_set_ctldata(spi, NULL);
kfree(chip);
}
}
/**
* ep93xx_spi_chip_setup() - configures hardware according to given @chip
* @espi: ep93xx SPI controller struct
* @chip: chip specific settings
*
* This function sets up the actual hardware registers with settings given in
* @chip. Note that no validation is done so make sure that callers validate
* settings before calling this.
*/
static void ep93xx_spi_chip_setup(const struct ep93xx_spi *espi,
const struct ep93xx_spi_chip *chip)
{
u16 cr0;
cr0 = chip->div_scr << SSPCR0_SCR_SHIFT;
cr0 |= (chip->spi->mode & (SPI_CPHA|SPI_CPOL)) << SSPCR0_MODE_SHIFT;
cr0 |= chip->dss;
dev_dbg(&espi->pdev->dev, "setup: mode %d, cpsr %d, scr %d, dss %d\n",
chip->spi->mode, chip->div_cpsr, chip->div_scr, chip->dss);
dev_dbg(&espi->pdev->dev, "setup: cr0 %#x", cr0);
ep93xx_spi_write_u8(espi, SSPCPSR, chip->div_cpsr);
ep93xx_spi_write_u16(espi, SSPCR0, cr0);
}
static inline int bits_per_word(const struct ep93xx_spi *espi)
{
struct spi_message *msg = espi->current_msg;
struct spi_transfer *t = msg->state;
return t->bits_per_word ? t->bits_per_word : msg->spi->bits_per_word;
}
static void ep93xx_do_write(struct ep93xx_spi *espi, struct spi_transfer *t)
{
if (bits_per_word(espi) > 8) {
u16 tx_val = 0;
if (t->tx_buf)
tx_val = ((u16 *)t->tx_buf)[espi->tx];
ep93xx_spi_write_u16(espi, SSPDR, tx_val);
espi->tx += sizeof(tx_val);
} else {
u8 tx_val = 0;
if (t->tx_buf)
tx_val = ((u8 *)t->tx_buf)[espi->tx];
ep93xx_spi_write_u8(espi, SSPDR, tx_val);
espi->tx += sizeof(tx_val);
}
}
static void ep93xx_do_read(struct ep93xx_spi *espi, struct spi_transfer *t)
{
if (bits_per_word(espi) > 8) {
u16 rx_val;
rx_val = ep93xx_spi_read_u16(espi, SSPDR);
if (t->rx_buf)
((u16 *)t->rx_buf)[espi->rx] = rx_val;
espi->rx += sizeof(rx_val);
} else {
u8 rx_val;
rx_val = ep93xx_spi_read_u8(espi, SSPDR);
if (t->rx_buf)
((u8 *)t->rx_buf)[espi->rx] = rx_val;
espi->rx += sizeof(rx_val);
}
}
/**
* ep93xx_spi_read_write() - perform next RX/TX transfer
* @espi: ep93xx SPI controller struct
*
* This function transfers next bytes (or half-words) to/from RX/TX FIFOs. If
* called several times, the whole transfer will be completed. Returns
* %-EINPROGRESS when current transfer was not yet completed otherwise %0.
*
* When this function is finished, RX FIFO should be empty and TX FIFO should be
* full.
*/
static int ep93xx_spi_read_write(struct ep93xx_spi *espi)
{
struct spi_message *msg = espi->current_msg;
struct spi_transfer *t = msg->state;
/* read as long as RX FIFO has frames in it */
while ((ep93xx_spi_read_u8(espi, SSPSR) & SSPSR_RNE)) {
ep93xx_do_read(espi, t);
espi->fifo_level--;
}
/* write as long as TX FIFO has room */
while (espi->fifo_level < SPI_FIFO_SIZE && espi->tx < t->len) {
ep93xx_do_write(espi, t);
espi->fifo_level++;
}
if (espi->rx == t->len)
return 0;
return -EINPROGRESS;
}
static void ep93xx_spi_pio_transfer(struct ep93xx_spi *espi)
{
/*
* Now everything is set up for the current transfer. We prime the TX
* FIFO, enable interrupts, and wait for the transfer to complete.
*/
if (ep93xx_spi_read_write(espi)) {
ep93xx_spi_enable_interrupts(espi);
wait_for_completion(&espi->wait);
}
}
/**
* ep93xx_spi_dma_prepare() - prepares a DMA transfer
* @espi: ep93xx SPI controller struct
* @dir: DMA transfer direction
*
* Function configures the DMA, maps the buffer and prepares the DMA
* descriptor. Returns a valid DMA descriptor in case of success and ERR_PTR
* in case of failure.
*/
static struct dma_async_tx_descriptor *
ep93xx_spi_dma_prepare(struct ep93xx_spi *espi, enum dma_transfer_direction dir)
{
struct spi_transfer *t = espi->current_msg->state;
struct dma_async_tx_descriptor *txd;
enum dma_slave_buswidth buswidth;
struct dma_slave_config conf;
struct scatterlist *sg;
struct sg_table *sgt;
struct dma_chan *chan;
const void *buf, *pbuf;
size_t len = t->len;
int i, ret, nents;
if (bits_per_word(espi) > 8)
buswidth = DMA_SLAVE_BUSWIDTH_2_BYTES;
else
buswidth = DMA_SLAVE_BUSWIDTH_1_BYTE;
memset(&conf, 0, sizeof(conf));
conf.direction = dir;
if (dir == DMA_DEV_TO_MEM) {
chan = espi->dma_rx;
buf = t->rx_buf;
sgt = &espi->rx_sgt;
conf.src_addr = espi->sspdr_phys;
conf.src_addr_width = buswidth;
} else {
chan = espi->dma_tx;
buf = t->tx_buf;
sgt = &espi->tx_sgt;
conf.dst_addr = espi->sspdr_phys;
conf.dst_addr_width = buswidth;
}
ret = dmaengine_slave_config(chan, &conf);
if (ret)
return ERR_PTR(ret);
/*
* We need to split the transfer into PAGE_SIZE'd chunks. This is
* because we are using @espi->zeropage to provide a zero RX buffer
* for the TX transfers and we have only allocated one page for that.
*
* For performance reasons we allocate a new sg_table only when
* needed. Otherwise we will re-use the current one. Eventually the
* last sg_table is released in ep93xx_spi_release_dma().
*/
nents = DIV_ROUND_UP(len, PAGE_SIZE);
if (nents != sgt->nents) {
sg_free_table(sgt);
ret = sg_alloc_table(sgt, nents, GFP_KERNEL);
if (ret)
return ERR_PTR(ret);
}
pbuf = buf;
for_each_sg(sgt->sgl, sg, sgt->nents, i) {
size_t bytes = min_t(size_t, len, PAGE_SIZE);
if (buf) {
sg_set_page(sg, virt_to_page(pbuf), bytes,
offset_in_page(pbuf));
} else {
sg_set_page(sg, virt_to_page(espi->zeropage),
bytes, 0);
}
pbuf += bytes;
len -= bytes;
}
if (WARN_ON(len)) {
dev_warn(&espi->pdev->dev, "len = %d expected 0!", len);
return ERR_PTR(-EINVAL);
}
nents = dma_map_sg(chan->device->dev, sgt->sgl, sgt->nents, dir);
if (!nents)
return ERR_PTR(-ENOMEM);
txd = dmaengine_prep_slave_sg(chan, sgt->sgl, nents, dir, DMA_CTRL_ACK);
if (!txd) {
dma_unmap_sg(chan->device->dev, sgt->sgl, sgt->nents, dir);
return ERR_PTR(-ENOMEM);
}
return txd;
}
/**
* ep93xx_spi_dma_finish() - finishes with a DMA transfer
* @espi: ep93xx SPI controller struct
* @dir: DMA transfer direction
*
* Function finishes with the DMA transfer. After this, the DMA buffer is
* unmapped.
*/
static void ep93xx_spi_dma_finish(struct ep93xx_spi *espi,
enum dma_transfer_direction dir)
{
struct dma_chan *chan;
struct sg_table *sgt;
if (dir == DMA_DEV_TO_MEM) {
chan = espi->dma_rx;
sgt = &espi->rx_sgt;
} else {
chan = espi->dma_tx;
sgt = &espi->tx_sgt;
}
dma_unmap_sg(chan->device->dev, sgt->sgl, sgt->nents, dir);
}
static void ep93xx_spi_dma_callback(void *callback_param)
{
complete(callback_param);
}
static void ep93xx_spi_dma_transfer(struct ep93xx_spi *espi)
{
struct spi_message *msg = espi->current_msg;
struct dma_async_tx_descriptor *rxd, *txd;
rxd = ep93xx_spi_dma_prepare(espi, DMA_DEV_TO_MEM);
if (IS_ERR(rxd)) {
dev_err(&espi->pdev->dev, "DMA RX failed: %ld\n", PTR_ERR(rxd));
msg->status = PTR_ERR(rxd);
return;
}
txd = ep93xx_spi_dma_prepare(espi, DMA_MEM_TO_DEV);
if (IS_ERR(txd)) {
ep93xx_spi_dma_finish(espi, DMA_DEV_TO_MEM);
dev_err(&espi->pdev->dev, "DMA TX failed: %ld\n", PTR_ERR(rxd));
msg->status = PTR_ERR(txd);
return;
}
/* We are ready when RX is done */
rxd->callback = ep93xx_spi_dma_callback;
rxd->callback_param = &espi->wait;
/* Now submit both descriptors and wait while they finish */
dmaengine_submit(rxd);
dmaengine_submit(txd);
dma_async_issue_pending(espi->dma_rx);
dma_async_issue_pending(espi->dma_tx);
wait_for_completion(&espi->wait);
ep93xx_spi_dma_finish(espi, DMA_MEM_TO_DEV);
ep93xx_spi_dma_finish(espi, DMA_DEV_TO_MEM);
}
/**
* ep93xx_spi_process_transfer() - processes one SPI transfer
* @espi: ep93xx SPI controller struct
* @msg: current message
* @t: transfer to process
*
* This function processes one SPI transfer given in @t. Function waits until
* transfer is complete (may sleep) and updates @msg->status based on whether
* transfer was successfully processed or not.
*/
static void ep93xx_spi_process_transfer(struct ep93xx_spi *espi,
struct spi_message *msg,
struct spi_transfer *t)
{
struct ep93xx_spi_chip *chip = spi_get_ctldata(msg->spi);
msg->state = t;
/*
* Handle any transfer specific settings if needed. We use
* temporary chip settings here and restore original later when
* the transfer is finished.
*/
if (t->speed_hz || t->bits_per_word) {
struct ep93xx_spi_chip tmp_chip = *chip;
if (t->speed_hz) {
int err;
err = ep93xx_spi_calc_divisors(espi, &tmp_chip,
t->speed_hz);
if (err) {
dev_err(&espi->pdev->dev,
"failed to adjust speed\n");
msg->status = err;
return;
}
}
if (t->bits_per_word)
tmp_chip.dss = bits_per_word_to_dss(t->bits_per_word);
/*
* Set up temporary new hw settings for this transfer.
*/
ep93xx_spi_chip_setup(espi, &tmp_chip);
}
espi->rx = 0;
espi->tx = 0;
/*
* There is no point of setting up DMA for the transfers which will
* fit into the FIFO and can be transferred with a single interrupt.
* So in these cases we will be using PIO and don't bother for DMA.
*/
if (espi->dma_rx && t->len > SPI_FIFO_SIZE)
ep93xx_spi_dma_transfer(espi);
else
ep93xx_spi_pio_transfer(espi);
/*
* In case of error during transmit, we bail out from processing
* the message.
*/
if (msg->status)
return;
msg->actual_length += t->len;
/*
* After this transfer is finished, perform any possible
* post-transfer actions requested by the protocol driver.
*/
if (t->delay_usecs) {
set_current_state(TASK_UNINTERRUPTIBLE);
schedule_timeout(usecs_to_jiffies(t->delay_usecs));
}
if (t->cs_change) {
if (!list_is_last(&t->transfer_list, &msg->transfers)) {
/*
* In case protocol driver is asking us to drop the
* chipselect briefly, we let the scheduler to handle
* any "delay" here.
*/
ep93xx_spi_cs_control(msg->spi, false);
cond_resched();
ep93xx_spi_cs_control(msg->spi, true);
}
}
if (t->speed_hz || t->bits_per_word)
ep93xx_spi_chip_setup(espi, chip);
}
/*
* ep93xx_spi_process_message() - process one SPI message
* @espi: ep93xx SPI controller struct
* @msg: message to process
*
* This function processes a single SPI message. We go through all transfers in
* the message and pass them to ep93xx_spi_process_transfer(). Chipselect is
* asserted during the whole message (unless per transfer cs_change is set).
*
* @msg->status contains %0 in case of success or negative error code in case of
* failure.
*/
static void ep93xx_spi_process_message(struct ep93xx_spi *espi,
struct spi_message *msg)
{
unsigned long timeout;
struct spi_transfer *t;
int err;
/*
* Enable the SPI controller and its clock.
*/
err = ep93xx_spi_enable(espi);
if (err) {
dev_err(&espi->pdev->dev, "failed to enable SPI controller\n");
msg->status = err;
return;
}
/*
* Just to be sure: flush any data from RX FIFO.
*/
timeout = jiffies + msecs_to_jiffies(SPI_TIMEOUT);
while (ep93xx_spi_read_u16(espi, SSPSR) & SSPSR_RNE) {
if (time_after(jiffies, timeout)) {
dev_warn(&espi->pdev->dev,
"timeout while flushing RX FIFO\n");
msg->status = -ETIMEDOUT;
return;
}
ep93xx_spi_read_u16(espi, SSPDR);
}
/*
* We explicitly handle FIFO level. This way we don't have to check TX
* FIFO status using %SSPSR_TNF bit which may cause RX FIFO overruns.
*/
espi->fifo_level = 0;
/*
* Update SPI controller registers according to spi device and assert
* the chipselect.
*/
ep93xx_spi_chip_setup(espi, spi_get_ctldata(msg->spi));
ep93xx_spi_cs_control(msg->spi, true);
list_for_each_entry(t, &msg->transfers, transfer_list) {
ep93xx_spi_process_transfer(espi, msg, t);
if (msg->status)
break;
}
/*
* Now the whole message is transferred (or failed for some reason). We
* deselect the device and disable the SPI controller.
*/
ep93xx_spi_cs_control(msg->spi, false);
ep93xx_spi_disable(espi);
}
#define work_to_espi(work) (container_of((work), struct ep93xx_spi, msg_work))
/**
* ep93xx_spi_work() - EP93xx SPI workqueue worker function
* @work: work struct
*
* Workqueue worker function. This function is called when there are new
* SPI messages to be processed. Message is taken out from the queue and then
* passed to ep93xx_spi_process_message().
*
* After message is transferred, protocol driver is notified by calling
* @msg->complete(). In case of error, @msg->status is set to negative error
* number, otherwise it contains zero (and @msg->actual_length is updated).
*/
static void ep93xx_spi_work(struct work_struct *work)
{
struct ep93xx_spi *espi = work_to_espi(work);
struct spi_message *msg;
spin_lock_irq(&espi->lock);
if (!espi->running || espi->current_msg ||
list_empty(&espi->msg_queue)) {
spin_unlock_irq(&espi->lock);
return;
}
msg = list_first_entry(&espi->msg_queue, struct spi_message, queue);
list_del_init(&msg->queue);
espi->current_msg = msg;
spin_unlock_irq(&espi->lock);
ep93xx_spi_process_message(espi, msg);
/*
* Update the current message and re-schedule ourselves if there are
* more messages in the queue.
*/
spin_lock_irq(&espi->lock);
espi->current_msg = NULL;
if (espi->running && !list_empty(&espi->msg_queue))
queue_work(espi->wq, &espi->msg_work);
spin_unlock_irq(&espi->lock);
/* notify the protocol driver that we are done with this message */
msg->complete(msg->context);
}
static irqreturn_t ep93xx_spi_interrupt(int irq, void *dev_id)
{
struct ep93xx_spi *espi = dev_id;
u8 irq_status = ep93xx_spi_read_u8(espi, SSPIIR);
/*
* If we got ROR (receive overrun) interrupt we know that something is
* wrong. Just abort the message.
*/
if (unlikely(irq_status & SSPIIR_RORIS)) {
/* clear the overrun interrupt */
ep93xx_spi_write_u8(espi, SSPICR, 0);
dev_warn(&espi->pdev->dev,
"receive overrun, aborting the message\n");
espi->current_msg->status = -EIO;
} else {
/*
* Interrupt is either RX (RIS) or TX (TIS). For both cases we
* simply execute next data transfer.
*/
if (ep93xx_spi_read_write(espi)) {
/*
* In normal case, there still is some processing left
* for current transfer. Let's wait for the next
* interrupt then.
*/
return IRQ_HANDLED;
}
}
/*
* Current transfer is finished, either with error or with success. In
* any case we disable interrupts and notify the worker to handle
* any post-processing of the message.
*/
ep93xx_spi_disable_interrupts(espi);
complete(&espi->wait);
return IRQ_HANDLED;
}
static bool ep93xx_spi_dma_filter(struct dma_chan *chan, void *filter_param)
{
if (ep93xx_dma_chan_is_m2p(chan))
return false;
chan->private = filter_param;
return true;
}
static int ep93xx_spi_setup_dma(struct ep93xx_spi *espi)
{
dma_cap_mask_t mask;
int ret;
espi->zeropage = (void *)get_zeroed_page(GFP_KERNEL);
if (!espi->zeropage)
return -ENOMEM;
dma_cap_zero(mask);
dma_cap_set(DMA_SLAVE, mask);
espi->dma_rx_data.port = EP93XX_DMA_SSP;
espi->dma_rx_data.direction = DMA_DEV_TO_MEM;
espi->dma_rx_data.name = "ep93xx-spi-rx";
espi->dma_rx = dma_request_channel(mask, ep93xx_spi_dma_filter,
&espi->dma_rx_data);
if (!espi->dma_rx) {
ret = -ENODEV;
goto fail_free_page;
}
espi->dma_tx_data.port = EP93XX_DMA_SSP;
espi->dma_tx_data.direction = DMA_MEM_TO_DEV;
espi->dma_tx_data.name = "ep93xx-spi-tx";
espi->dma_tx = dma_request_channel(mask, ep93xx_spi_dma_filter,
&espi->dma_tx_data);
if (!espi->dma_tx) {
ret = -ENODEV;
goto fail_release_rx;
}
return 0;
fail_release_rx:
dma_release_channel(espi->dma_rx);
espi->dma_rx = NULL;
fail_free_page:
free_page((unsigned long)espi->zeropage);
return ret;
}
static void ep93xx_spi_release_dma(struct ep93xx_spi *espi)
{
if (espi->dma_rx) {
dma_release_channel(espi->dma_rx);
sg_free_table(&espi->rx_sgt);
}
if (espi->dma_tx) {
dma_release_channel(espi->dma_tx);
sg_free_table(&espi->tx_sgt);
}
if (espi->zeropage)
free_page((unsigned long)espi->zeropage);
}
static int __devinit ep93xx_spi_probe(struct platform_device *pdev)
{
struct spi_master *master;
struct ep93xx_spi_info *info;
struct ep93xx_spi *espi;
struct resource *res;
int error;
info = pdev->dev.platform_data;
master = spi_alloc_master(&pdev->dev, sizeof(*espi));
if (!master) {
dev_err(&pdev->dev, "failed to allocate spi master\n");
return -ENOMEM;
}
master->setup = ep93xx_spi_setup;
master->transfer = ep93xx_spi_transfer;
master->cleanup = ep93xx_spi_cleanup;
master->bus_num = pdev->id;
master->num_chipselect = info->num_chipselect;
master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
platform_set_drvdata(pdev, master);
espi = spi_master_get_devdata(master);
espi->clk = clk_get(&pdev->dev, NULL);
if (IS_ERR(espi->clk)) {
dev_err(&pdev->dev, "unable to get spi clock\n");
error = PTR_ERR(espi->clk);
goto fail_release_master;
}
spin_lock_init(&espi->lock);
init_completion(&espi->wait);
/*
* Calculate maximum and minimum supported clock rates
* for the controller.
*/
espi->max_rate = clk_get_rate(espi->clk) / 2;
espi->min_rate = clk_get_rate(espi->clk) / (254 * 256);
espi->pdev = pdev;
espi->irq = platform_get_irq(pdev, 0);
if (espi->irq < 0) {
error = -EBUSY;
dev_err(&pdev->dev, "failed to get irq resources\n");
goto fail_put_clock;
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
dev_err(&pdev->dev, "unable to get iomem resource\n");
error = -ENODEV;
goto fail_put_clock;
}
res = request_mem_region(res->start, resource_size(res), pdev->name);
if (!res) {
dev_err(&pdev->dev, "unable to request iomem resources\n");
error = -EBUSY;
goto fail_put_clock;
}
espi->sspdr_phys = res->start + SSPDR;
espi->regs_base = ioremap(res->start, resource_size(res));
if (!espi->regs_base) {
dev_err(&pdev->dev, "failed to map resources\n");
error = -ENODEV;
goto fail_free_mem;
}
error = request_irq(espi->irq, ep93xx_spi_interrupt, 0,
"ep93xx-spi", espi);
if (error) {
dev_err(&pdev->dev, "failed to request irq\n");
goto fail_unmap_regs;
}
if (info->use_dma && ep93xx_spi_setup_dma(espi))
dev_warn(&pdev->dev, "DMA setup failed. Falling back to PIO\n");
espi->wq = create_singlethread_workqueue("ep93xx_spid");
if (!espi->wq) {
dev_err(&pdev->dev, "unable to create workqueue\n");
goto fail_free_dma;
}
INIT_WORK(&espi->msg_work, ep93xx_spi_work);
INIT_LIST_HEAD(&espi->msg_queue);
espi->running = true;
/* make sure that the hardware is disabled */
ep93xx_spi_write_u8(espi, SSPCR1, 0);
error = spi_register_master(master);
if (error) {
dev_err(&pdev->dev, "failed to register SPI master\n");
goto fail_free_queue;
}
dev_info(&pdev->dev, "EP93xx SPI Controller at 0x%08lx irq %d\n",
(unsigned long)res->start, espi->irq);
return 0;
fail_free_queue:
destroy_workqueue(espi->wq);
fail_free_dma:
ep93xx_spi_release_dma(espi);
free_irq(espi->irq, espi);
fail_unmap_regs:
iounmap(espi->regs_base);
fail_free_mem:
release_mem_region(res->start, resource_size(res));
fail_put_clock:
clk_put(espi->clk);
fail_release_master:
spi_master_put(master);
platform_set_drvdata(pdev, NULL);
return error;
}
static int __devexit ep93xx_spi_remove(struct platform_device *pdev)
{
struct spi_master *master = platform_get_drvdata(pdev);
struct ep93xx_spi *espi = spi_master_get_devdata(master);
struct resource *res;
spin_lock_irq(&espi->lock);
espi->running = false;
spin_unlock_irq(&espi->lock);
destroy_workqueue(espi->wq);
/*
* Complete remaining messages with %-ESHUTDOWN status.
*/
spin_lock_irq(&espi->lock);
while (!list_empty(&espi->msg_queue)) {
struct spi_message *msg;
msg = list_first_entry(&espi->msg_queue,
struct spi_message, queue);
list_del_init(&msg->queue);
msg->status = -ESHUTDOWN;
spin_unlock_irq(&espi->lock);
msg->complete(msg->context);
spin_lock_irq(&espi->lock);
}
spin_unlock_irq(&espi->lock);
ep93xx_spi_release_dma(espi);
free_irq(espi->irq, espi);
iounmap(espi->regs_base);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
release_mem_region(res->start, resource_size(res));
clk_put(espi->clk);
platform_set_drvdata(pdev, NULL);
spi_unregister_master(master);
return 0;
}
static struct platform_driver ep93xx_spi_driver = {
.driver = {
.name = "ep93xx-spi",
.owner = THIS_MODULE,
},
.probe = ep93xx_spi_probe,
.remove = __devexit_p(ep93xx_spi_remove),
};
module_platform_driver(ep93xx_spi_driver);
MODULE_DESCRIPTION("EP93xx SPI Controller driver");
MODULE_AUTHOR("Mika Westerberg <mika.westerberg@iki.fi>");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:ep93xx-spi");
| gpl-2.0 |
alexax66/CM12.1_kernel_serranodsxx | drivers/input/touchscreen/w90p910_ts.c | 5041 | 8562 | /*
* Copyright (c) 2008 Nuvoton technology corporation.
*
* Wan ZongShun <mcuos.com@gmail.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation;version 2 of the License.
*
*/
#include <linux/delay.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/io.h>
#include <linux/clk.h>
#include <linux/input.h>
#include <linux/interrupt.h>
#include <linux/slab.h>
/* ADC controller bit defines */
#define ADC_DELAY 0xf00
#define ADC_DOWN 0x01
#define ADC_TSC_Y (0x01 << 8)
#define ADC_TSC_X (0x00 << 8)
#define TSC_FOURWIRE (~(0x03 << 1))
#define ADC_CLK_EN (0x01 << 28) /* ADC clock enable */
#define ADC_READ_CON (0x01 << 12)
#define ADC_CONV (0x01 << 13)
#define ADC_SEMIAUTO (0x01 << 14)
#define ADC_WAITTRIG (0x03 << 14)
#define ADC_RST1 (0x01 << 16)
#define ADC_RST0 (0x00 << 16)
#define ADC_EN (0x01 << 17)
#define ADC_INT (0x01 << 18)
#define WT_INT (0x01 << 20)
#define ADC_INT_EN (0x01 << 21)
#define LVD_INT_EN (0x01 << 22)
#define WT_INT_EN (0x01 << 23)
#define ADC_DIV (0x04 << 1) /* div = 6 */
enum ts_state {
TS_WAIT_NEW_PACKET, /* We are waiting next touch report */
TS_WAIT_X_COORD, /* We are waiting for ADC to report X coord */
TS_WAIT_Y_COORD, /* We are waiting for ADC to report Y coord */
TS_IDLE, /* Input device is closed, don't do anything */
};
struct w90p910_ts {
struct input_dev *input;
struct timer_list timer;
struct clk *clk;
int irq_num;
void __iomem *ts_reg;
spinlock_t lock;
enum ts_state state;
};
static void w90p910_report_event(struct w90p910_ts *w90p910_ts, bool down)
{
struct input_dev *dev = w90p910_ts->input;
if (down) {
input_report_abs(dev, ABS_X,
__raw_readl(w90p910_ts->ts_reg + 0x0c));
input_report_abs(dev, ABS_Y,
__raw_readl(w90p910_ts->ts_reg + 0x10));
}
input_report_key(dev, BTN_TOUCH, down);
input_sync(dev);
}
static void w90p910_prepare_x_reading(struct w90p910_ts *w90p910_ts)
{
unsigned long ctlreg;
__raw_writel(ADC_TSC_X, w90p910_ts->ts_reg + 0x04);
ctlreg = __raw_readl(w90p910_ts->ts_reg);
ctlreg &= ~(ADC_WAITTRIG | WT_INT | WT_INT_EN);
ctlreg |= ADC_SEMIAUTO | ADC_INT_EN | ADC_CONV;
__raw_writel(ctlreg, w90p910_ts->ts_reg);
w90p910_ts->state = TS_WAIT_X_COORD;
}
static void w90p910_prepare_y_reading(struct w90p910_ts *w90p910_ts)
{
unsigned long ctlreg;
__raw_writel(ADC_TSC_Y, w90p910_ts->ts_reg + 0x04);
ctlreg = __raw_readl(w90p910_ts->ts_reg);
ctlreg &= ~(ADC_WAITTRIG | ADC_INT | WT_INT_EN);
ctlreg |= ADC_SEMIAUTO | ADC_INT_EN | ADC_CONV;
__raw_writel(ctlreg, w90p910_ts->ts_reg);
w90p910_ts->state = TS_WAIT_Y_COORD;
}
static void w90p910_prepare_next_packet(struct w90p910_ts *w90p910_ts)
{
unsigned long ctlreg;
ctlreg = __raw_readl(w90p910_ts->ts_reg);
ctlreg &= ~(ADC_INT | ADC_INT_EN | ADC_SEMIAUTO | ADC_CONV);
ctlreg |= ADC_WAITTRIG | WT_INT_EN;
__raw_writel(ctlreg, w90p910_ts->ts_reg);
w90p910_ts->state = TS_WAIT_NEW_PACKET;
}
static irqreturn_t w90p910_ts_interrupt(int irq, void *dev_id)
{
struct w90p910_ts *w90p910_ts = dev_id;
unsigned long flags;
spin_lock_irqsave(&w90p910_ts->lock, flags);
switch (w90p910_ts->state) {
case TS_WAIT_NEW_PACKET:
/*
* The controller only generates interrupts when pen
* is down.
*/
del_timer(&w90p910_ts->timer);
w90p910_prepare_x_reading(w90p910_ts);
break;
case TS_WAIT_X_COORD:
w90p910_prepare_y_reading(w90p910_ts);
break;
case TS_WAIT_Y_COORD:
w90p910_report_event(w90p910_ts, true);
w90p910_prepare_next_packet(w90p910_ts);
mod_timer(&w90p910_ts->timer, jiffies + msecs_to_jiffies(100));
break;
case TS_IDLE:
break;
}
spin_unlock_irqrestore(&w90p910_ts->lock, flags);
return IRQ_HANDLED;
}
static void w90p910_check_pen_up(unsigned long data)
{
struct w90p910_ts *w90p910_ts = (struct w90p910_ts *) data;
unsigned long flags;
spin_lock_irqsave(&w90p910_ts->lock, flags);
if (w90p910_ts->state == TS_WAIT_NEW_PACKET &&
!(__raw_readl(w90p910_ts->ts_reg + 0x04) & ADC_DOWN)) {
w90p910_report_event(w90p910_ts, false);
}
spin_unlock_irqrestore(&w90p910_ts->lock, flags);
}
static int w90p910_open(struct input_dev *dev)
{
struct w90p910_ts *w90p910_ts = input_get_drvdata(dev);
unsigned long val;
/* enable the ADC clock */
clk_enable(w90p910_ts->clk);
__raw_writel(ADC_RST1, w90p910_ts->ts_reg);
msleep(1);
__raw_writel(ADC_RST0, w90p910_ts->ts_reg);
msleep(1);
/* set delay and screen type */
val = __raw_readl(w90p910_ts->ts_reg + 0x04);
__raw_writel(val & TSC_FOURWIRE, w90p910_ts->ts_reg + 0x04);
__raw_writel(ADC_DELAY, w90p910_ts->ts_reg + 0x08);
w90p910_ts->state = TS_WAIT_NEW_PACKET;
wmb();
/* set trigger mode */
val = __raw_readl(w90p910_ts->ts_reg);
val |= ADC_WAITTRIG | ADC_DIV | ADC_EN | WT_INT_EN;
__raw_writel(val, w90p910_ts->ts_reg);
return 0;
}
static void w90p910_close(struct input_dev *dev)
{
struct w90p910_ts *w90p910_ts = input_get_drvdata(dev);
unsigned long val;
/* disable trigger mode */
spin_lock_irq(&w90p910_ts->lock);
w90p910_ts->state = TS_IDLE;
val = __raw_readl(w90p910_ts->ts_reg);
val &= ~(ADC_WAITTRIG | ADC_DIV | ADC_EN | WT_INT_EN | ADC_INT_EN);
__raw_writel(val, w90p910_ts->ts_reg);
spin_unlock_irq(&w90p910_ts->lock);
/* Now that interrupts are shut off we can safely delete timer */
del_timer_sync(&w90p910_ts->timer);
/* stop the ADC clock */
clk_disable(w90p910_ts->clk);
}
static int __devinit w90x900ts_probe(struct platform_device *pdev)
{
struct w90p910_ts *w90p910_ts;
struct input_dev *input_dev;
struct resource *res;
int err;
w90p910_ts = kzalloc(sizeof(struct w90p910_ts), GFP_KERNEL);
input_dev = input_allocate_device();
if (!w90p910_ts || !input_dev) {
err = -ENOMEM;
goto fail1;
}
w90p910_ts->input = input_dev;
w90p910_ts->state = TS_IDLE;
spin_lock_init(&w90p910_ts->lock);
setup_timer(&w90p910_ts->timer, w90p910_check_pen_up,
(unsigned long)w90p910_ts);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
err = -ENXIO;
goto fail1;
}
if (!request_mem_region(res->start, resource_size(res),
pdev->name)) {
err = -EBUSY;
goto fail1;
}
w90p910_ts->ts_reg = ioremap(res->start, resource_size(res));
if (!w90p910_ts->ts_reg) {
err = -ENOMEM;
goto fail2;
}
w90p910_ts->clk = clk_get(&pdev->dev, NULL);
if (IS_ERR(w90p910_ts->clk)) {
err = PTR_ERR(w90p910_ts->clk);
goto fail3;
}
input_dev->name = "W90P910 TouchScreen";
input_dev->phys = "w90p910ts/event0";
input_dev->id.bustype = BUS_HOST;
input_dev->id.vendor = 0x0005;
input_dev->id.product = 0x0001;
input_dev->id.version = 0x0100;
input_dev->dev.parent = &pdev->dev;
input_dev->open = w90p910_open;
input_dev->close = w90p910_close;
input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
input_dev->keybit[BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH);
input_set_abs_params(input_dev, ABS_X, 0, 0x400, 0, 0);
input_set_abs_params(input_dev, ABS_Y, 0, 0x400, 0, 0);
input_set_drvdata(input_dev, w90p910_ts);
w90p910_ts->irq_num = platform_get_irq(pdev, 0);
if (request_irq(w90p910_ts->irq_num, w90p910_ts_interrupt,
0, "w90p910ts", w90p910_ts)) {
err = -EBUSY;
goto fail4;
}
err = input_register_device(w90p910_ts->input);
if (err)
goto fail5;
platform_set_drvdata(pdev, w90p910_ts);
return 0;
fail5: free_irq(w90p910_ts->irq_num, w90p910_ts);
fail4: clk_put(w90p910_ts->clk);
fail3: iounmap(w90p910_ts->ts_reg);
fail2: release_mem_region(res->start, resource_size(res));
fail1: input_free_device(input_dev);
kfree(w90p910_ts);
return err;
}
static int __devexit w90x900ts_remove(struct platform_device *pdev)
{
struct w90p910_ts *w90p910_ts = platform_get_drvdata(pdev);
struct resource *res;
free_irq(w90p910_ts->irq_num, w90p910_ts);
del_timer_sync(&w90p910_ts->timer);
iounmap(w90p910_ts->ts_reg);
clk_put(w90p910_ts->clk);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
release_mem_region(res->start, resource_size(res));
input_unregister_device(w90p910_ts->input);
kfree(w90p910_ts);
platform_set_drvdata(pdev, NULL);
return 0;
}
static struct platform_driver w90x900ts_driver = {
.probe = w90x900ts_probe,
.remove = __devexit_p(w90x900ts_remove),
.driver = {
.name = "nuc900-ts",
.owner = THIS_MODULE,
},
};
module_platform_driver(w90x900ts_driver);
MODULE_AUTHOR("Wan ZongShun <mcuos.com@gmail.com>");
MODULE_DESCRIPTION("w90p910 touch screen driver!");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:nuc900-ts");
| gpl-2.0 |
Loller79/Solid_Kernel-GEEHRC | drivers/input/touchscreen/w90p910_ts.c | 5041 | 8562 | /*
* Copyright (c) 2008 Nuvoton technology corporation.
*
* Wan ZongShun <mcuos.com@gmail.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation;version 2 of the License.
*
*/
#include <linux/delay.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/io.h>
#include <linux/clk.h>
#include <linux/input.h>
#include <linux/interrupt.h>
#include <linux/slab.h>
/* ADC controller bit defines */
#define ADC_DELAY 0xf00
#define ADC_DOWN 0x01
#define ADC_TSC_Y (0x01 << 8)
#define ADC_TSC_X (0x00 << 8)
#define TSC_FOURWIRE (~(0x03 << 1))
#define ADC_CLK_EN (0x01 << 28) /* ADC clock enable */
#define ADC_READ_CON (0x01 << 12)
#define ADC_CONV (0x01 << 13)
#define ADC_SEMIAUTO (0x01 << 14)
#define ADC_WAITTRIG (0x03 << 14)
#define ADC_RST1 (0x01 << 16)
#define ADC_RST0 (0x00 << 16)
#define ADC_EN (0x01 << 17)
#define ADC_INT (0x01 << 18)
#define WT_INT (0x01 << 20)
#define ADC_INT_EN (0x01 << 21)
#define LVD_INT_EN (0x01 << 22)
#define WT_INT_EN (0x01 << 23)
#define ADC_DIV (0x04 << 1) /* div = 6 */
enum ts_state {
TS_WAIT_NEW_PACKET, /* We are waiting next touch report */
TS_WAIT_X_COORD, /* We are waiting for ADC to report X coord */
TS_WAIT_Y_COORD, /* We are waiting for ADC to report Y coord */
TS_IDLE, /* Input device is closed, don't do anything */
};
struct w90p910_ts {
struct input_dev *input;
struct timer_list timer;
struct clk *clk;
int irq_num;
void __iomem *ts_reg;
spinlock_t lock;
enum ts_state state;
};
static void w90p910_report_event(struct w90p910_ts *w90p910_ts, bool down)
{
struct input_dev *dev = w90p910_ts->input;
if (down) {
input_report_abs(dev, ABS_X,
__raw_readl(w90p910_ts->ts_reg + 0x0c));
input_report_abs(dev, ABS_Y,
__raw_readl(w90p910_ts->ts_reg + 0x10));
}
input_report_key(dev, BTN_TOUCH, down);
input_sync(dev);
}
static void w90p910_prepare_x_reading(struct w90p910_ts *w90p910_ts)
{
unsigned long ctlreg;
__raw_writel(ADC_TSC_X, w90p910_ts->ts_reg + 0x04);
ctlreg = __raw_readl(w90p910_ts->ts_reg);
ctlreg &= ~(ADC_WAITTRIG | WT_INT | WT_INT_EN);
ctlreg |= ADC_SEMIAUTO | ADC_INT_EN | ADC_CONV;
__raw_writel(ctlreg, w90p910_ts->ts_reg);
w90p910_ts->state = TS_WAIT_X_COORD;
}
static void w90p910_prepare_y_reading(struct w90p910_ts *w90p910_ts)
{
unsigned long ctlreg;
__raw_writel(ADC_TSC_Y, w90p910_ts->ts_reg + 0x04);
ctlreg = __raw_readl(w90p910_ts->ts_reg);
ctlreg &= ~(ADC_WAITTRIG | ADC_INT | WT_INT_EN);
ctlreg |= ADC_SEMIAUTO | ADC_INT_EN | ADC_CONV;
__raw_writel(ctlreg, w90p910_ts->ts_reg);
w90p910_ts->state = TS_WAIT_Y_COORD;
}
static void w90p910_prepare_next_packet(struct w90p910_ts *w90p910_ts)
{
unsigned long ctlreg;
ctlreg = __raw_readl(w90p910_ts->ts_reg);
ctlreg &= ~(ADC_INT | ADC_INT_EN | ADC_SEMIAUTO | ADC_CONV);
ctlreg |= ADC_WAITTRIG | WT_INT_EN;
__raw_writel(ctlreg, w90p910_ts->ts_reg);
w90p910_ts->state = TS_WAIT_NEW_PACKET;
}
static irqreturn_t w90p910_ts_interrupt(int irq, void *dev_id)
{
struct w90p910_ts *w90p910_ts = dev_id;
unsigned long flags;
spin_lock_irqsave(&w90p910_ts->lock, flags);
switch (w90p910_ts->state) {
case TS_WAIT_NEW_PACKET:
/*
* The controller only generates interrupts when pen
* is down.
*/
del_timer(&w90p910_ts->timer);
w90p910_prepare_x_reading(w90p910_ts);
break;
case TS_WAIT_X_COORD:
w90p910_prepare_y_reading(w90p910_ts);
break;
case TS_WAIT_Y_COORD:
w90p910_report_event(w90p910_ts, true);
w90p910_prepare_next_packet(w90p910_ts);
mod_timer(&w90p910_ts->timer, jiffies + msecs_to_jiffies(100));
break;
case TS_IDLE:
break;
}
spin_unlock_irqrestore(&w90p910_ts->lock, flags);
return IRQ_HANDLED;
}
static void w90p910_check_pen_up(unsigned long data)
{
struct w90p910_ts *w90p910_ts = (struct w90p910_ts *) data;
unsigned long flags;
spin_lock_irqsave(&w90p910_ts->lock, flags);
if (w90p910_ts->state == TS_WAIT_NEW_PACKET &&
!(__raw_readl(w90p910_ts->ts_reg + 0x04) & ADC_DOWN)) {
w90p910_report_event(w90p910_ts, false);
}
spin_unlock_irqrestore(&w90p910_ts->lock, flags);
}
static int w90p910_open(struct input_dev *dev)
{
struct w90p910_ts *w90p910_ts = input_get_drvdata(dev);
unsigned long val;
/* enable the ADC clock */
clk_enable(w90p910_ts->clk);
__raw_writel(ADC_RST1, w90p910_ts->ts_reg);
msleep(1);
__raw_writel(ADC_RST0, w90p910_ts->ts_reg);
msleep(1);
/* set delay and screen type */
val = __raw_readl(w90p910_ts->ts_reg + 0x04);
__raw_writel(val & TSC_FOURWIRE, w90p910_ts->ts_reg + 0x04);
__raw_writel(ADC_DELAY, w90p910_ts->ts_reg + 0x08);
w90p910_ts->state = TS_WAIT_NEW_PACKET;
wmb();
/* set trigger mode */
val = __raw_readl(w90p910_ts->ts_reg);
val |= ADC_WAITTRIG | ADC_DIV | ADC_EN | WT_INT_EN;
__raw_writel(val, w90p910_ts->ts_reg);
return 0;
}
static void w90p910_close(struct input_dev *dev)
{
struct w90p910_ts *w90p910_ts = input_get_drvdata(dev);
unsigned long val;
/* disable trigger mode */
spin_lock_irq(&w90p910_ts->lock);
w90p910_ts->state = TS_IDLE;
val = __raw_readl(w90p910_ts->ts_reg);
val &= ~(ADC_WAITTRIG | ADC_DIV | ADC_EN | WT_INT_EN | ADC_INT_EN);
__raw_writel(val, w90p910_ts->ts_reg);
spin_unlock_irq(&w90p910_ts->lock);
/* Now that interrupts are shut off we can safely delete timer */
del_timer_sync(&w90p910_ts->timer);
/* stop the ADC clock */
clk_disable(w90p910_ts->clk);
}
static int __devinit w90x900ts_probe(struct platform_device *pdev)
{
struct w90p910_ts *w90p910_ts;
struct input_dev *input_dev;
struct resource *res;
int err;
w90p910_ts = kzalloc(sizeof(struct w90p910_ts), GFP_KERNEL);
input_dev = input_allocate_device();
if (!w90p910_ts || !input_dev) {
err = -ENOMEM;
goto fail1;
}
w90p910_ts->input = input_dev;
w90p910_ts->state = TS_IDLE;
spin_lock_init(&w90p910_ts->lock);
setup_timer(&w90p910_ts->timer, w90p910_check_pen_up,
(unsigned long)w90p910_ts);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
err = -ENXIO;
goto fail1;
}
if (!request_mem_region(res->start, resource_size(res),
pdev->name)) {
err = -EBUSY;
goto fail1;
}
w90p910_ts->ts_reg = ioremap(res->start, resource_size(res));
if (!w90p910_ts->ts_reg) {
err = -ENOMEM;
goto fail2;
}
w90p910_ts->clk = clk_get(&pdev->dev, NULL);
if (IS_ERR(w90p910_ts->clk)) {
err = PTR_ERR(w90p910_ts->clk);
goto fail3;
}
input_dev->name = "W90P910 TouchScreen";
input_dev->phys = "w90p910ts/event0";
input_dev->id.bustype = BUS_HOST;
input_dev->id.vendor = 0x0005;
input_dev->id.product = 0x0001;
input_dev->id.version = 0x0100;
input_dev->dev.parent = &pdev->dev;
input_dev->open = w90p910_open;
input_dev->close = w90p910_close;
input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
input_dev->keybit[BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH);
input_set_abs_params(input_dev, ABS_X, 0, 0x400, 0, 0);
input_set_abs_params(input_dev, ABS_Y, 0, 0x400, 0, 0);
input_set_drvdata(input_dev, w90p910_ts);
w90p910_ts->irq_num = platform_get_irq(pdev, 0);
if (request_irq(w90p910_ts->irq_num, w90p910_ts_interrupt,
0, "w90p910ts", w90p910_ts)) {
err = -EBUSY;
goto fail4;
}
err = input_register_device(w90p910_ts->input);
if (err)
goto fail5;
platform_set_drvdata(pdev, w90p910_ts);
return 0;
fail5: free_irq(w90p910_ts->irq_num, w90p910_ts);
fail4: clk_put(w90p910_ts->clk);
fail3: iounmap(w90p910_ts->ts_reg);
fail2: release_mem_region(res->start, resource_size(res));
fail1: input_free_device(input_dev);
kfree(w90p910_ts);
return err;
}
static int __devexit w90x900ts_remove(struct platform_device *pdev)
{
struct w90p910_ts *w90p910_ts = platform_get_drvdata(pdev);
struct resource *res;
free_irq(w90p910_ts->irq_num, w90p910_ts);
del_timer_sync(&w90p910_ts->timer);
iounmap(w90p910_ts->ts_reg);
clk_put(w90p910_ts->clk);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
release_mem_region(res->start, resource_size(res));
input_unregister_device(w90p910_ts->input);
kfree(w90p910_ts);
platform_set_drvdata(pdev, NULL);
return 0;
}
static struct platform_driver w90x900ts_driver = {
.probe = w90x900ts_probe,
.remove = __devexit_p(w90x900ts_remove),
.driver = {
.name = "nuc900-ts",
.owner = THIS_MODULE,
},
};
module_platform_driver(w90x900ts_driver);
MODULE_AUTHOR("Wan ZongShun <mcuos.com@gmail.com>");
MODULE_DESCRIPTION("w90p910 touch screen driver!");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:nuc900-ts");
| gpl-2.0 |
ztemt/A476_V1B_5.1_kernel | arch/powerpc/platforms/ps3/interrupt.c | 7601 | 20118 | /*
* PS3 interrupt routines.
*
* Copyright (C) 2006 Sony Computer Entertainment Inc.
* Copyright 2006 Sony Corp.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/kernel.h>
#include <linux/export.h>
#include <linux/irq.h>
#include <asm/machdep.h>
#include <asm/udbg.h>
#include <asm/lv1call.h>
#include <asm/smp.h>
#include "platform.h"
#if defined(DEBUG)
#define DBG udbg_printf
#define FAIL udbg_printf
#else
#define DBG pr_devel
#define FAIL pr_debug
#endif
/**
* struct ps3_bmp - a per cpu irq status and mask bitmap structure
* @status: 256 bit status bitmap indexed by plug
* @unused_1: Alignment
* @mask: 256 bit mask bitmap indexed by plug
* @unused_2: Alignment
*
* The HV maintains per SMT thread mappings of HV outlet to HV plug on
* behalf of the guest. These mappings are implemented as 256 bit guest
* supplied bitmaps indexed by plug number. The addresses of the bitmaps
* are registered with the HV through lv1_configure_irq_state_bitmap().
* The HV requires that the 512 bits of status + mask not cross a page
* boundary. PS3_BMP_MINALIGN is used to define this minimal 64 byte
* alignment.
*
* The HV supports 256 plugs per thread, assigned as {0..255}, for a total
* of 512 plugs supported on a processor. To simplify the logic this
* implementation equates HV plug value to Linux virq value, constrains each
* interrupt to have a system wide unique plug number, and limits the range
* of the plug values to map into the first dword of the bitmaps. This
* gives a usable range of plug values of {NUM_ISA_INTERRUPTS..63}. Note
* that there is no constraint on how many in this set an individual thread
* can acquire.
*
* The mask is declared as unsigned long so we can use set/clear_bit on it.
*/
#define PS3_BMP_MINALIGN 64
struct ps3_bmp {
struct {
u64 status;
u64 unused_1[3];
unsigned long mask;
u64 unused_2[3];
};
};
/**
* struct ps3_private - a per cpu data structure
* @bmp: ps3_bmp structure
* @bmp_lock: Syncronize access to bmp.
* @ipi_debug_brk_mask: Mask for debug break IPIs
* @ppe_id: HV logical_ppe_id
* @thread_id: HV thread_id
* @ipi_mask: Mask of IPI virqs
*/
struct ps3_private {
struct ps3_bmp bmp __attribute__ ((aligned (PS3_BMP_MINALIGN)));
spinlock_t bmp_lock;
u64 ppe_id;
u64 thread_id;
unsigned long ipi_debug_brk_mask;
unsigned long ipi_mask;
};
static DEFINE_PER_CPU(struct ps3_private, ps3_private);
/**
* ps3_chip_mask - Set an interrupt mask bit in ps3_bmp.
* @virq: The assigned Linux virq.
*
* Sets ps3_bmp.mask and calls lv1_did_update_interrupt_mask().
*/
static void ps3_chip_mask(struct irq_data *d)
{
struct ps3_private *pd = irq_data_get_irq_chip_data(d);
unsigned long flags;
DBG("%s:%d: thread_id %llu, virq %d\n", __func__, __LINE__,
pd->thread_id, d->irq);
local_irq_save(flags);
clear_bit(63 - d->irq, &pd->bmp.mask);
lv1_did_update_interrupt_mask(pd->ppe_id, pd->thread_id);
local_irq_restore(flags);
}
/**
* ps3_chip_unmask - Clear an interrupt mask bit in ps3_bmp.
* @virq: The assigned Linux virq.
*
* Clears ps3_bmp.mask and calls lv1_did_update_interrupt_mask().
*/
static void ps3_chip_unmask(struct irq_data *d)
{
struct ps3_private *pd = irq_data_get_irq_chip_data(d);
unsigned long flags;
DBG("%s:%d: thread_id %llu, virq %d\n", __func__, __LINE__,
pd->thread_id, d->irq);
local_irq_save(flags);
set_bit(63 - d->irq, &pd->bmp.mask);
lv1_did_update_interrupt_mask(pd->ppe_id, pd->thread_id);
local_irq_restore(flags);
}
/**
* ps3_chip_eoi - HV end-of-interrupt.
* @virq: The assigned Linux virq.
*
* Calls lv1_end_of_interrupt_ext().
*/
static void ps3_chip_eoi(struct irq_data *d)
{
const struct ps3_private *pd = irq_data_get_irq_chip_data(d);
/* non-IPIs are EOIed here. */
if (!test_bit(63 - d->irq, &pd->ipi_mask))
lv1_end_of_interrupt_ext(pd->ppe_id, pd->thread_id, d->irq);
}
/**
* ps3_irq_chip - Represents the ps3_bmp as a Linux struct irq_chip.
*/
static struct irq_chip ps3_irq_chip = {
.name = "ps3",
.irq_mask = ps3_chip_mask,
.irq_unmask = ps3_chip_unmask,
.irq_eoi = ps3_chip_eoi,
};
/**
* ps3_virq_setup - virq related setup.
* @cpu: enum ps3_cpu_binding indicating the cpu the interrupt should be
* serviced on.
* @outlet: The HV outlet from the various create outlet routines.
* @virq: The assigned Linux virq.
*
* Calls irq_create_mapping() to get a virq and sets the chip data to
* ps3_private data.
*/
static int ps3_virq_setup(enum ps3_cpu_binding cpu, unsigned long outlet,
unsigned int *virq)
{
int result;
struct ps3_private *pd;
/* This defines the default interrupt distribution policy. */
if (cpu == PS3_BINDING_CPU_ANY)
cpu = 0;
pd = &per_cpu(ps3_private, cpu);
*virq = irq_create_mapping(NULL, outlet);
if (*virq == NO_IRQ) {
FAIL("%s:%d: irq_create_mapping failed: outlet %lu\n",
__func__, __LINE__, outlet);
result = -ENOMEM;
goto fail_create;
}
DBG("%s:%d: outlet %lu => cpu %u, virq %u\n", __func__, __LINE__,
outlet, cpu, *virq);
result = irq_set_chip_data(*virq, pd);
if (result) {
FAIL("%s:%d: irq_set_chip_data failed\n",
__func__, __LINE__);
goto fail_set;
}
ps3_chip_mask(irq_get_irq_data(*virq));
return result;
fail_set:
irq_dispose_mapping(*virq);
fail_create:
return result;
}
/**
* ps3_virq_destroy - virq related teardown.
* @virq: The assigned Linux virq.
*
* Clears chip data and calls irq_dispose_mapping() for the virq.
*/
static int ps3_virq_destroy(unsigned int virq)
{
const struct ps3_private *pd = irq_get_chip_data(virq);
DBG("%s:%d: ppe_id %llu, thread_id %llu, virq %u\n", __func__,
__LINE__, pd->ppe_id, pd->thread_id, virq);
irq_set_chip_data(virq, NULL);
irq_dispose_mapping(virq);
DBG("%s:%d <-\n", __func__, __LINE__);
return 0;
}
/**
* ps3_irq_plug_setup - Generic outlet and virq related setup.
* @cpu: enum ps3_cpu_binding indicating the cpu the interrupt should be
* serviced on.
* @outlet: The HV outlet from the various create outlet routines.
* @virq: The assigned Linux virq.
*
* Sets up virq and connects the irq plug.
*/
int ps3_irq_plug_setup(enum ps3_cpu_binding cpu, unsigned long outlet,
unsigned int *virq)
{
int result;
struct ps3_private *pd;
result = ps3_virq_setup(cpu, outlet, virq);
if (result) {
FAIL("%s:%d: ps3_virq_setup failed\n", __func__, __LINE__);
goto fail_setup;
}
pd = irq_get_chip_data(*virq);
/* Binds outlet to cpu + virq. */
result = lv1_connect_irq_plug_ext(pd->ppe_id, pd->thread_id, *virq,
outlet, 0);
if (result) {
FAIL("%s:%d: lv1_connect_irq_plug_ext failed: %s\n",
__func__, __LINE__, ps3_result(result));
result = -EPERM;
goto fail_connect;
}
return result;
fail_connect:
ps3_virq_destroy(*virq);
fail_setup:
return result;
}
EXPORT_SYMBOL_GPL(ps3_irq_plug_setup);
/**
* ps3_irq_plug_destroy - Generic outlet and virq related teardown.
* @virq: The assigned Linux virq.
*
* Disconnects the irq plug and tears down virq.
* Do not call for system bus event interrupts setup with
* ps3_sb_event_receive_port_setup().
*/
int ps3_irq_plug_destroy(unsigned int virq)
{
int result;
const struct ps3_private *pd = irq_get_chip_data(virq);
DBG("%s:%d: ppe_id %llu, thread_id %llu, virq %u\n", __func__,
__LINE__, pd->ppe_id, pd->thread_id, virq);
ps3_chip_mask(irq_get_irq_data(virq));
result = lv1_disconnect_irq_plug_ext(pd->ppe_id, pd->thread_id, virq);
if (result)
FAIL("%s:%d: lv1_disconnect_irq_plug_ext failed: %s\n",
__func__, __LINE__, ps3_result(result));
ps3_virq_destroy(virq);
return result;
}
EXPORT_SYMBOL_GPL(ps3_irq_plug_destroy);
/**
* ps3_event_receive_port_setup - Setup an event receive port.
* @cpu: enum ps3_cpu_binding indicating the cpu the interrupt should be
* serviced on.
* @virq: The assigned Linux virq.
*
* The virq can be used with lv1_connect_interrupt_event_receive_port() to
* arrange to receive interrupts from system-bus devices, or with
* ps3_send_event_locally() to signal events.
*/
int ps3_event_receive_port_setup(enum ps3_cpu_binding cpu, unsigned int *virq)
{
int result;
u64 outlet;
result = lv1_construct_event_receive_port(&outlet);
if (result) {
FAIL("%s:%d: lv1_construct_event_receive_port failed: %s\n",
__func__, __LINE__, ps3_result(result));
*virq = NO_IRQ;
return result;
}
result = ps3_irq_plug_setup(cpu, outlet, virq);
BUG_ON(result);
return result;
}
EXPORT_SYMBOL_GPL(ps3_event_receive_port_setup);
/**
* ps3_event_receive_port_destroy - Destroy an event receive port.
* @virq: The assigned Linux virq.
*
* Since ps3_event_receive_port_destroy destroys the receive port outlet,
* SB devices need to call disconnect_interrupt_event_receive_port() before
* this.
*/
int ps3_event_receive_port_destroy(unsigned int virq)
{
int result;
DBG(" -> %s:%d virq %u\n", __func__, __LINE__, virq);
ps3_chip_mask(irq_get_irq_data(virq));
result = lv1_destruct_event_receive_port(virq_to_hw(virq));
if (result)
FAIL("%s:%d: lv1_destruct_event_receive_port failed: %s\n",
__func__, __LINE__, ps3_result(result));
/*
* Don't call ps3_virq_destroy() here since ps3_smp_cleanup_cpu()
* calls from interrupt context (smp_call_function) when kexecing.
*/
DBG(" <- %s:%d\n", __func__, __LINE__);
return result;
}
int ps3_send_event_locally(unsigned int virq)
{
return lv1_send_event_locally(virq_to_hw(virq));
}
/**
* ps3_sb_event_receive_port_setup - Setup a system bus event receive port.
* @cpu: enum ps3_cpu_binding indicating the cpu the interrupt should be
* serviced on.
* @dev: The system bus device instance.
* @virq: The assigned Linux virq.
*
* An event irq represents a virtual device interrupt. The interrupt_id
* coresponds to the software interrupt number.
*/
int ps3_sb_event_receive_port_setup(struct ps3_system_bus_device *dev,
enum ps3_cpu_binding cpu, unsigned int *virq)
{
/* this should go in system-bus.c */
int result;
result = ps3_event_receive_port_setup(cpu, virq);
if (result)
return result;
result = lv1_connect_interrupt_event_receive_port(dev->bus_id,
dev->dev_id, virq_to_hw(*virq), dev->interrupt_id);
if (result) {
FAIL("%s:%d: lv1_connect_interrupt_event_receive_port"
" failed: %s\n", __func__, __LINE__,
ps3_result(result));
ps3_event_receive_port_destroy(*virq);
*virq = NO_IRQ;
return result;
}
DBG("%s:%d: interrupt_id %u, virq %u\n", __func__, __LINE__,
dev->interrupt_id, *virq);
return 0;
}
EXPORT_SYMBOL(ps3_sb_event_receive_port_setup);
int ps3_sb_event_receive_port_destroy(struct ps3_system_bus_device *dev,
unsigned int virq)
{
/* this should go in system-bus.c */
int result;
DBG(" -> %s:%d: interrupt_id %u, virq %u\n", __func__, __LINE__,
dev->interrupt_id, virq);
result = lv1_disconnect_interrupt_event_receive_port(dev->bus_id,
dev->dev_id, virq_to_hw(virq), dev->interrupt_id);
if (result)
FAIL("%s:%d: lv1_disconnect_interrupt_event_receive_port"
" failed: %s\n", __func__, __LINE__,
ps3_result(result));
result = ps3_event_receive_port_destroy(virq);
BUG_ON(result);
/*
* ps3_event_receive_port_destroy() destroys the IRQ plug,
* so don't call ps3_irq_plug_destroy() here.
*/
result = ps3_virq_destroy(virq);
BUG_ON(result);
DBG(" <- %s:%d\n", __func__, __LINE__);
return result;
}
EXPORT_SYMBOL(ps3_sb_event_receive_port_destroy);
/**
* ps3_io_irq_setup - Setup a system bus io irq.
* @cpu: enum ps3_cpu_binding indicating the cpu the interrupt should be
* serviced on.
* @interrupt_id: The device interrupt id read from the system repository.
* @virq: The assigned Linux virq.
*
* An io irq represents a non-virtualized device interrupt. interrupt_id
* coresponds to the interrupt number of the interrupt controller.
*/
int ps3_io_irq_setup(enum ps3_cpu_binding cpu, unsigned int interrupt_id,
unsigned int *virq)
{
int result;
u64 outlet;
result = lv1_construct_io_irq_outlet(interrupt_id, &outlet);
if (result) {
FAIL("%s:%d: lv1_construct_io_irq_outlet failed: %s\n",
__func__, __LINE__, ps3_result(result));
return result;
}
result = ps3_irq_plug_setup(cpu, outlet, virq);
BUG_ON(result);
return result;
}
EXPORT_SYMBOL_GPL(ps3_io_irq_setup);
int ps3_io_irq_destroy(unsigned int virq)
{
int result;
unsigned long outlet = virq_to_hw(virq);
ps3_chip_mask(irq_get_irq_data(virq));
/*
* lv1_destruct_io_irq_outlet() will destroy the IRQ plug,
* so call ps3_irq_plug_destroy() first.
*/
result = ps3_irq_plug_destroy(virq);
BUG_ON(result);
result = lv1_destruct_io_irq_outlet(outlet);
if (result)
FAIL("%s:%d: lv1_destruct_io_irq_outlet failed: %s\n",
__func__, __LINE__, ps3_result(result));
return result;
}
EXPORT_SYMBOL_GPL(ps3_io_irq_destroy);
/**
* ps3_vuart_irq_setup - Setup the system virtual uart virq.
* @cpu: enum ps3_cpu_binding indicating the cpu the interrupt should be
* serviced on.
* @virt_addr_bmp: The caller supplied virtual uart interrupt bitmap.
* @virq: The assigned Linux virq.
*
* The system supports only a single virtual uart, so multiple calls without
* freeing the interrupt will return a wrong state error.
*/
int ps3_vuart_irq_setup(enum ps3_cpu_binding cpu, void* virt_addr_bmp,
unsigned int *virq)
{
int result;
u64 outlet;
u64 lpar_addr;
BUG_ON(!is_kernel_addr((u64)virt_addr_bmp));
lpar_addr = ps3_mm_phys_to_lpar(__pa(virt_addr_bmp));
result = lv1_configure_virtual_uart_irq(lpar_addr, &outlet);
if (result) {
FAIL("%s:%d: lv1_configure_virtual_uart_irq failed: %s\n",
__func__, __LINE__, ps3_result(result));
return result;
}
result = ps3_irq_plug_setup(cpu, outlet, virq);
BUG_ON(result);
return result;
}
EXPORT_SYMBOL_GPL(ps3_vuart_irq_setup);
int ps3_vuart_irq_destroy(unsigned int virq)
{
int result;
ps3_chip_mask(irq_get_irq_data(virq));
result = lv1_deconfigure_virtual_uart_irq();
if (result) {
FAIL("%s:%d: lv1_configure_virtual_uart_irq failed: %s\n",
__func__, __LINE__, ps3_result(result));
return result;
}
result = ps3_irq_plug_destroy(virq);
BUG_ON(result);
return result;
}
EXPORT_SYMBOL_GPL(ps3_vuart_irq_destroy);
/**
* ps3_spe_irq_setup - Setup an spe virq.
* @cpu: enum ps3_cpu_binding indicating the cpu the interrupt should be
* serviced on.
* @spe_id: The spe_id returned from lv1_construct_logical_spe().
* @class: The spe interrupt class {0,1,2}.
* @virq: The assigned Linux virq.
*
*/
int ps3_spe_irq_setup(enum ps3_cpu_binding cpu, unsigned long spe_id,
unsigned int class, unsigned int *virq)
{
int result;
u64 outlet;
BUG_ON(class > 2);
result = lv1_get_spe_irq_outlet(spe_id, class, &outlet);
if (result) {
FAIL("%s:%d: lv1_get_spe_irq_outlet failed: %s\n",
__func__, __LINE__, ps3_result(result));
return result;
}
result = ps3_irq_plug_setup(cpu, outlet, virq);
BUG_ON(result);
return result;
}
int ps3_spe_irq_destroy(unsigned int virq)
{
int result;
ps3_chip_mask(irq_get_irq_data(virq));
result = ps3_irq_plug_destroy(virq);
BUG_ON(result);
return result;
}
#define PS3_INVALID_OUTLET ((irq_hw_number_t)-1)
#define PS3_PLUG_MAX 63
#if defined(DEBUG)
static void _dump_64_bmp(const char *header, const u64 *p, unsigned cpu,
const char* func, int line)
{
pr_debug("%s:%d: %s %u {%04llx_%04llx_%04llx_%04llx}\n",
func, line, header, cpu,
*p >> 48, (*p >> 32) & 0xffff, (*p >> 16) & 0xffff,
*p & 0xffff);
}
static void __maybe_unused _dump_256_bmp(const char *header,
const u64 *p, unsigned cpu, const char* func, int line)
{
pr_debug("%s:%d: %s %u {%016llx:%016llx:%016llx:%016llx}\n",
func, line, header, cpu, p[0], p[1], p[2], p[3]);
}
#define dump_bmp(_x) _dump_bmp(_x, __func__, __LINE__)
static void _dump_bmp(struct ps3_private* pd, const char* func, int line)
{
unsigned long flags;
spin_lock_irqsave(&pd->bmp_lock, flags);
_dump_64_bmp("stat", &pd->bmp.status, pd->thread_id, func, line);
_dump_64_bmp("mask", (u64*)&pd->bmp.mask, pd->thread_id, func, line);
spin_unlock_irqrestore(&pd->bmp_lock, flags);
}
#define dump_mask(_x) _dump_mask(_x, __func__, __LINE__)
static void __maybe_unused _dump_mask(struct ps3_private *pd,
const char* func, int line)
{
unsigned long flags;
spin_lock_irqsave(&pd->bmp_lock, flags);
_dump_64_bmp("mask", (u64*)&pd->bmp.mask, pd->thread_id, func, line);
spin_unlock_irqrestore(&pd->bmp_lock, flags);
}
#else
static void dump_bmp(struct ps3_private* pd) {};
#endif /* defined(DEBUG) */
static int ps3_host_map(struct irq_domain *h, unsigned int virq,
irq_hw_number_t hwirq)
{
DBG("%s:%d: hwirq %lu, virq %u\n", __func__, __LINE__, hwirq,
virq);
irq_set_chip_and_handler(virq, &ps3_irq_chip, handle_fasteoi_irq);
return 0;
}
static int ps3_host_match(struct irq_domain *h, struct device_node *np)
{
/* Match all */
return 1;
}
static const struct irq_domain_ops ps3_host_ops = {
.map = ps3_host_map,
.match = ps3_host_match,
};
void __init ps3_register_ipi_debug_brk(unsigned int cpu, unsigned int virq)
{
struct ps3_private *pd = &per_cpu(ps3_private, cpu);
set_bit(63 - virq, &pd->ipi_debug_brk_mask);
DBG("%s:%d: cpu %u, virq %u, mask %lxh\n", __func__, __LINE__,
cpu, virq, pd->ipi_debug_brk_mask);
}
void __init ps3_register_ipi_irq(unsigned int cpu, unsigned int virq)
{
struct ps3_private *pd = &per_cpu(ps3_private, cpu);
set_bit(63 - virq, &pd->ipi_mask);
DBG("%s:%d: cpu %u, virq %u, ipi_mask %lxh\n", __func__, __LINE__,
cpu, virq, pd->ipi_mask);
}
static unsigned int ps3_get_irq(void)
{
struct ps3_private *pd = &__get_cpu_var(ps3_private);
u64 x = (pd->bmp.status & pd->bmp.mask);
unsigned int plug;
/* check for ipi break first to stop this cpu ASAP */
if (x & pd->ipi_debug_brk_mask)
x &= pd->ipi_debug_brk_mask;
asm volatile("cntlzd %0,%1" : "=r" (plug) : "r" (x));
plug &= 0x3f;
if (unlikely(plug == NO_IRQ)) {
DBG("%s:%d: no plug found: thread_id %llu\n", __func__,
__LINE__, pd->thread_id);
dump_bmp(&per_cpu(ps3_private, 0));
dump_bmp(&per_cpu(ps3_private, 1));
return NO_IRQ;
}
#if defined(DEBUG)
if (unlikely(plug < NUM_ISA_INTERRUPTS || plug > PS3_PLUG_MAX)) {
dump_bmp(&per_cpu(ps3_private, 0));
dump_bmp(&per_cpu(ps3_private, 1));
BUG();
}
#endif
/* IPIs are EOIed here. */
if (test_bit(63 - plug, &pd->ipi_mask))
lv1_end_of_interrupt_ext(pd->ppe_id, pd->thread_id, plug);
return plug;
}
void __init ps3_init_IRQ(void)
{
int result;
unsigned cpu;
struct irq_domain *host;
host = irq_domain_add_nomap(NULL, PS3_PLUG_MAX + 1, &ps3_host_ops, NULL);
irq_set_default_host(host);
for_each_possible_cpu(cpu) {
struct ps3_private *pd = &per_cpu(ps3_private, cpu);
lv1_get_logical_ppe_id(&pd->ppe_id);
pd->thread_id = get_hard_smp_processor_id(cpu);
spin_lock_init(&pd->bmp_lock);
DBG("%s:%d: ppe_id %llu, thread_id %llu, bmp %lxh\n",
__func__, __LINE__, pd->ppe_id, pd->thread_id,
ps3_mm_phys_to_lpar(__pa(&pd->bmp)));
result = lv1_configure_irq_state_bitmap(pd->ppe_id,
pd->thread_id, ps3_mm_phys_to_lpar(__pa(&pd->bmp)));
if (result)
FAIL("%s:%d: lv1_configure_irq_state_bitmap failed:"
" %s\n", __func__, __LINE__,
ps3_result(result));
}
ppc_md.get_irq = ps3_get_irq;
}
void ps3_shutdown_IRQ(int cpu)
{
int result;
u64 ppe_id;
u64 thread_id = get_hard_smp_processor_id(cpu);
lv1_get_logical_ppe_id(&ppe_id);
result = lv1_configure_irq_state_bitmap(ppe_id, thread_id, 0);
DBG("%s:%d: lv1_configure_irq_state_bitmap (%llu:%llu/%d) %s\n", __func__,
__LINE__, ppe_id, thread_id, cpu, ps3_result(result));
}
| gpl-2.0 |
Ameisentaetowierer/kernel-p9516-2.6.36.3 | net/bridge/netfilter/ebt_nflog.c | 9393 | 1769 | /*
* ebt_nflog
*
* Author:
* Peter Warasin <peter@endian.com>
*
* February, 2008
*
* Based on:
* xt_NFLOG.c, (C) 2006 by Patrick McHardy <kaber@trash.net>
* ebt_ulog.c, (C) 2004 by Bart De Schuymer <bdschuym@pandora.be>
*
*/
#include <linux/module.h>
#include <linux/spinlock.h>
#include <linux/netfilter/x_tables.h>
#include <linux/netfilter_bridge/ebtables.h>
#include <linux/netfilter_bridge/ebt_nflog.h>
#include <net/netfilter/nf_log.h>
static unsigned int
ebt_nflog_tg(struct sk_buff *skb, const struct xt_action_param *par)
{
const struct ebt_nflog_info *info = par->targinfo;
struct nf_loginfo li;
li.type = NF_LOG_TYPE_ULOG;
li.u.ulog.copy_len = info->len;
li.u.ulog.group = info->group;
li.u.ulog.qthreshold = info->threshold;
nf_log_packet(PF_BRIDGE, par->hooknum, skb, par->in, par->out,
&li, "%s", info->prefix);
return EBT_CONTINUE;
}
static int ebt_nflog_tg_check(const struct xt_tgchk_param *par)
{
struct ebt_nflog_info *info = par->targinfo;
if (info->flags & ~EBT_NFLOG_MASK)
return -EINVAL;
info->prefix[EBT_NFLOG_PREFIX_SIZE - 1] = '\0';
return 0;
}
static struct xt_target ebt_nflog_tg_reg __read_mostly = {
.name = "nflog",
.revision = 0,
.family = NFPROTO_BRIDGE,
.target = ebt_nflog_tg,
.checkentry = ebt_nflog_tg_check,
.targetsize = sizeof(struct ebt_nflog_info),
.me = THIS_MODULE,
};
static int __init ebt_nflog_init(void)
{
return xt_register_target(&ebt_nflog_tg_reg);
}
static void __exit ebt_nflog_fini(void)
{
xt_unregister_target(&ebt_nflog_tg_reg);
}
module_init(ebt_nflog_init);
module_exit(ebt_nflog_fini);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Peter Warasin <peter@endian.com>");
MODULE_DESCRIPTION("ebtables NFLOG netfilter logging module");
| gpl-2.0 |
fransklaver/linux | fs/jffs2/erase.c | 9649 | 14584 | /*
* JFFS2 -- Journalling Flash File System, Version 2.
*
* Copyright © 2001-2007 Red Hat, Inc.
* Copyright © 2004-2010 David Woodhouse <dwmw2@infradead.org>
*
* Created by David Woodhouse <dwmw2@infradead.org>
*
* For licensing information, see the file 'LICENCE' in this directory.
*
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/mtd/mtd.h>
#include <linux/compiler.h>
#include <linux/crc32.h>
#include <linux/sched.h>
#include <linux/pagemap.h>
#include "nodelist.h"
struct erase_priv_struct {
struct jffs2_eraseblock *jeb;
struct jffs2_sb_info *c;
};
#ifndef __ECOS
static void jffs2_erase_callback(struct erase_info *);
#endif
static void jffs2_erase_failed(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, uint32_t bad_offset);
static void jffs2_erase_succeeded(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb);
static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb);
static void jffs2_erase_block(struct jffs2_sb_info *c,
struct jffs2_eraseblock *jeb)
{
int ret;
uint32_t bad_offset;
#ifdef __ECOS
ret = jffs2_flash_erase(c, jeb);
if (!ret) {
jffs2_erase_succeeded(c, jeb);
return;
}
bad_offset = jeb->offset;
#else /* Linux */
struct erase_info *instr;
jffs2_dbg(1, "%s(): erase block %#08x (range %#08x-%#08x)\n",
__func__,
jeb->offset, jeb->offset, jeb->offset + c->sector_size);
instr = kmalloc(sizeof(struct erase_info) + sizeof(struct erase_priv_struct), GFP_KERNEL);
if (!instr) {
pr_warn("kmalloc for struct erase_info in jffs2_erase_block failed. Refiling block for later\n");
mutex_lock(&c->erase_free_sem);
spin_lock(&c->erase_completion_lock);
list_move(&jeb->list, &c->erase_pending_list);
c->erasing_size -= c->sector_size;
c->dirty_size += c->sector_size;
jeb->dirty_size = c->sector_size;
spin_unlock(&c->erase_completion_lock);
mutex_unlock(&c->erase_free_sem);
return;
}
memset(instr, 0, sizeof(*instr));
instr->mtd = c->mtd;
instr->addr = jeb->offset;
instr->len = c->sector_size;
instr->callback = jffs2_erase_callback;
instr->priv = (unsigned long)(&instr[1]);
((struct erase_priv_struct *)instr->priv)->jeb = jeb;
((struct erase_priv_struct *)instr->priv)->c = c;
ret = mtd_erase(c->mtd, instr);
if (!ret)
return;
bad_offset = instr->fail_addr;
kfree(instr);
#endif /* __ECOS */
if (ret == -ENOMEM || ret == -EAGAIN) {
/* Erase failed immediately. Refile it on the list */
jffs2_dbg(1, "Erase at 0x%08x failed: %d. Refiling on erase_pending_list\n",
jeb->offset, ret);
mutex_lock(&c->erase_free_sem);
spin_lock(&c->erase_completion_lock);
list_move(&jeb->list, &c->erase_pending_list);
c->erasing_size -= c->sector_size;
c->dirty_size += c->sector_size;
jeb->dirty_size = c->sector_size;
spin_unlock(&c->erase_completion_lock);
mutex_unlock(&c->erase_free_sem);
return;
}
if (ret == -EROFS)
pr_warn("Erase at 0x%08x failed immediately: -EROFS. Is the sector locked?\n",
jeb->offset);
else
pr_warn("Erase at 0x%08x failed immediately: errno %d\n",
jeb->offset, ret);
jffs2_erase_failed(c, jeb, bad_offset);
}
int jffs2_erase_pending_blocks(struct jffs2_sb_info *c, int count)
{
struct jffs2_eraseblock *jeb;
int work_done = 0;
mutex_lock(&c->erase_free_sem);
spin_lock(&c->erase_completion_lock);
while (!list_empty(&c->erase_complete_list) ||
!list_empty(&c->erase_pending_list)) {
if (!list_empty(&c->erase_complete_list)) {
jeb = list_entry(c->erase_complete_list.next, struct jffs2_eraseblock, list);
list_move(&jeb->list, &c->erase_checking_list);
spin_unlock(&c->erase_completion_lock);
mutex_unlock(&c->erase_free_sem);
jffs2_mark_erased_block(c, jeb);
work_done++;
if (!--count) {
jffs2_dbg(1, "Count reached. jffs2_erase_pending_blocks leaving\n");
goto done;
}
} else if (!list_empty(&c->erase_pending_list)) {
jeb = list_entry(c->erase_pending_list.next, struct jffs2_eraseblock, list);
jffs2_dbg(1, "Starting erase of pending block 0x%08x\n",
jeb->offset);
list_del(&jeb->list);
c->erasing_size += c->sector_size;
c->wasted_size -= jeb->wasted_size;
c->free_size -= jeb->free_size;
c->used_size -= jeb->used_size;
c->dirty_size -= jeb->dirty_size;
jeb->wasted_size = jeb->used_size = jeb->dirty_size = jeb->free_size = 0;
jffs2_free_jeb_node_refs(c, jeb);
list_add(&jeb->list, &c->erasing_list);
spin_unlock(&c->erase_completion_lock);
mutex_unlock(&c->erase_free_sem);
jffs2_erase_block(c, jeb);
} else {
BUG();
}
/* Be nice */
cond_resched();
mutex_lock(&c->erase_free_sem);
spin_lock(&c->erase_completion_lock);
}
spin_unlock(&c->erase_completion_lock);
mutex_unlock(&c->erase_free_sem);
done:
jffs2_dbg(1, "jffs2_erase_pending_blocks completed\n");
return work_done;
}
static void jffs2_erase_succeeded(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb)
{
jffs2_dbg(1, "Erase completed successfully at 0x%08x\n", jeb->offset);
mutex_lock(&c->erase_free_sem);
spin_lock(&c->erase_completion_lock);
list_move_tail(&jeb->list, &c->erase_complete_list);
/* Wake the GC thread to mark them clean */
jffs2_garbage_collect_trigger(c);
spin_unlock(&c->erase_completion_lock);
mutex_unlock(&c->erase_free_sem);
wake_up(&c->erase_wait);
}
static void jffs2_erase_failed(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, uint32_t bad_offset)
{
/* For NAND, if the failure did not occur at the device level for a
specific physical page, don't bother updating the bad block table. */
if (jffs2_cleanmarker_oob(c) && (bad_offset != (uint32_t)MTD_FAIL_ADDR_UNKNOWN)) {
/* We had a device-level failure to erase. Let's see if we've
failed too many times. */
if (!jffs2_write_nand_badblock(c, jeb, bad_offset)) {
/* We'd like to give this block another try. */
mutex_lock(&c->erase_free_sem);
spin_lock(&c->erase_completion_lock);
list_move(&jeb->list, &c->erase_pending_list);
c->erasing_size -= c->sector_size;
c->dirty_size += c->sector_size;
jeb->dirty_size = c->sector_size;
spin_unlock(&c->erase_completion_lock);
mutex_unlock(&c->erase_free_sem);
return;
}
}
mutex_lock(&c->erase_free_sem);
spin_lock(&c->erase_completion_lock);
c->erasing_size -= c->sector_size;
c->bad_size += c->sector_size;
list_move(&jeb->list, &c->bad_list);
c->nr_erasing_blocks--;
spin_unlock(&c->erase_completion_lock);
mutex_unlock(&c->erase_free_sem);
wake_up(&c->erase_wait);
}
#ifndef __ECOS
static void jffs2_erase_callback(struct erase_info *instr)
{
struct erase_priv_struct *priv = (void *)instr->priv;
if(instr->state != MTD_ERASE_DONE) {
pr_warn("Erase at 0x%08llx finished, but state != MTD_ERASE_DONE. State is 0x%x instead.\n",
(unsigned long long)instr->addr, instr->state);
jffs2_erase_failed(priv->c, priv->jeb, instr->fail_addr);
} else {
jffs2_erase_succeeded(priv->c, priv->jeb);
}
kfree(instr);
}
#endif /* !__ECOS */
/* Hmmm. Maybe we should accept the extra space it takes and make
this a standard doubly-linked list? */
static inline void jffs2_remove_node_refs_from_ino_list(struct jffs2_sb_info *c,
struct jffs2_raw_node_ref *ref, struct jffs2_eraseblock *jeb)
{
struct jffs2_inode_cache *ic = NULL;
struct jffs2_raw_node_ref **prev;
prev = &ref->next_in_ino;
/* Walk the inode's list once, removing any nodes from this eraseblock */
while (1) {
if (!(*prev)->next_in_ino) {
/* We're looking at the jffs2_inode_cache, which is
at the end of the linked list. Stash it and continue
from the beginning of the list */
ic = (struct jffs2_inode_cache *)(*prev);
prev = &ic->nodes;
continue;
}
if (SECTOR_ADDR((*prev)->flash_offset) == jeb->offset) {
/* It's in the block we're erasing */
struct jffs2_raw_node_ref *this;
this = *prev;
*prev = this->next_in_ino;
this->next_in_ino = NULL;
if (this == ref)
break;
continue;
}
/* Not to be deleted. Skip */
prev = &((*prev)->next_in_ino);
}
/* PARANOIA */
if (!ic) {
JFFS2_WARNING("inode_cache/xattr_datum/xattr_ref"
" not found in remove_node_refs()!!\n");
return;
}
jffs2_dbg(1, "Removed nodes in range 0x%08x-0x%08x from ino #%u\n",
jeb->offset, jeb->offset + c->sector_size, ic->ino);
D2({
int i=0;
struct jffs2_raw_node_ref *this;
printk(KERN_DEBUG "After remove_node_refs_from_ino_list: \n");
this = ic->nodes;
printk(KERN_DEBUG);
while(this) {
pr_cont("0x%08x(%d)->",
ref_offset(this), ref_flags(this));
if (++i == 5) {
printk(KERN_DEBUG);
i=0;
}
this = this->next_in_ino;
}
pr_cont("\n");
});
switch (ic->class) {
#ifdef CONFIG_JFFS2_FS_XATTR
case RAWNODE_CLASS_XATTR_DATUM:
jffs2_release_xattr_datum(c, (struct jffs2_xattr_datum *)ic);
break;
case RAWNODE_CLASS_XATTR_REF:
jffs2_release_xattr_ref(c, (struct jffs2_xattr_ref *)ic);
break;
#endif
default:
if (ic->nodes == (void *)ic && ic->pino_nlink == 0)
jffs2_del_ino_cache(c, ic);
}
}
void jffs2_free_jeb_node_refs(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb)
{
struct jffs2_raw_node_ref *block, *ref;
jffs2_dbg(1, "Freeing all node refs for eraseblock offset 0x%08x\n",
jeb->offset);
block = ref = jeb->first_node;
while (ref) {
if (ref->flash_offset == REF_LINK_NODE) {
ref = ref->next_in_ino;
jffs2_free_refblock(block);
block = ref;
continue;
}
if (ref->flash_offset != REF_EMPTY_NODE && ref->next_in_ino)
jffs2_remove_node_refs_from_ino_list(c, ref, jeb);
/* else it was a non-inode node or already removed, so don't bother */
ref++;
}
jeb->first_node = jeb->last_node = NULL;
}
static int jffs2_block_check_erase(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, uint32_t *bad_offset)
{
void *ebuf;
uint32_t ofs;
size_t retlen;
int ret;
unsigned long *wordebuf;
ret = mtd_point(c->mtd, jeb->offset, c->sector_size, &retlen,
&ebuf, NULL);
if (ret != -EOPNOTSUPP) {
if (ret) {
jffs2_dbg(1, "MTD point failed %d\n", ret);
goto do_flash_read;
}
if (retlen < c->sector_size) {
/* Don't muck about if it won't let us point to the whole erase sector */
jffs2_dbg(1, "MTD point returned len too short: 0x%zx\n",
retlen);
mtd_unpoint(c->mtd, jeb->offset, retlen);
goto do_flash_read;
}
wordebuf = ebuf-sizeof(*wordebuf);
retlen /= sizeof(*wordebuf);
do {
if (*++wordebuf != ~0)
break;
} while(--retlen);
mtd_unpoint(c->mtd, jeb->offset, c->sector_size);
if (retlen) {
pr_warn("Newly-erased block contained word 0x%lx at offset 0x%08tx\n",
*wordebuf,
jeb->offset +
c->sector_size-retlen * sizeof(*wordebuf));
return -EIO;
}
return 0;
}
do_flash_read:
ebuf = kmalloc(PAGE_SIZE, GFP_KERNEL);
if (!ebuf) {
pr_warn("Failed to allocate page buffer for verifying erase at 0x%08x. Refiling\n",
jeb->offset);
return -EAGAIN;
}
jffs2_dbg(1, "Verifying erase at 0x%08x\n", jeb->offset);
for (ofs = jeb->offset; ofs < jeb->offset + c->sector_size; ) {
uint32_t readlen = min((uint32_t)PAGE_SIZE, jeb->offset + c->sector_size - ofs);
int i;
*bad_offset = ofs;
ret = mtd_read(c->mtd, ofs, readlen, &retlen, ebuf);
if (ret) {
pr_warn("Read of newly-erased block at 0x%08x failed: %d. Putting on bad_list\n",
ofs, ret);
ret = -EIO;
goto fail;
}
if (retlen != readlen) {
pr_warn("Short read from newly-erased block at 0x%08x. Wanted %d, got %zd\n",
ofs, readlen, retlen);
ret = -EIO;
goto fail;
}
for (i=0; i<readlen; i += sizeof(unsigned long)) {
/* It's OK. We know it's properly aligned */
unsigned long *datum = ebuf + i;
if (*datum + 1) {
*bad_offset += i;
pr_warn("Newly-erased block contained word 0x%lx at offset 0x%08x\n",
*datum, *bad_offset);
ret = -EIO;
goto fail;
}
}
ofs += readlen;
cond_resched();
}
ret = 0;
fail:
kfree(ebuf);
return ret;
}
static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb)
{
size_t retlen;
int ret;
uint32_t uninitialized_var(bad_offset);
switch (jffs2_block_check_erase(c, jeb, &bad_offset)) {
case -EAGAIN: goto refile;
case -EIO: goto filebad;
}
/* Write the erase complete marker */
jffs2_dbg(1, "Writing erased marker to block at 0x%08x\n", jeb->offset);
bad_offset = jeb->offset;
/* Cleanmarker in oob area or no cleanmarker at all ? */
if (jffs2_cleanmarker_oob(c) || c->cleanmarker_size == 0) {
if (jffs2_cleanmarker_oob(c)) {
if (jffs2_write_nand_cleanmarker(c, jeb))
goto filebad;
}
} else {
struct kvec vecs[1];
struct jffs2_unknown_node marker = {
.magic = cpu_to_je16(JFFS2_MAGIC_BITMASK),
.nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
.totlen = cpu_to_je32(c->cleanmarker_size)
};
jffs2_prealloc_raw_node_refs(c, jeb, 1);
marker.hdr_crc = cpu_to_je32(crc32(0, &marker, sizeof(struct jffs2_unknown_node)-4));
vecs[0].iov_base = (unsigned char *) ▮
vecs[0].iov_len = sizeof(marker);
ret = jffs2_flash_direct_writev(c, vecs, 1, jeb->offset, &retlen);
if (ret || retlen != sizeof(marker)) {
if (ret)
pr_warn("Write clean marker to block at 0x%08x failed: %d\n",
jeb->offset, ret);
else
pr_warn("Short write to newly-erased block at 0x%08x: Wanted %zd, got %zd\n",
jeb->offset, sizeof(marker), retlen);
goto filebad;
}
}
/* Everything else got zeroed before the erase */
jeb->free_size = c->sector_size;
mutex_lock(&c->erase_free_sem);
spin_lock(&c->erase_completion_lock);
c->erasing_size -= c->sector_size;
c->free_size += c->sector_size;
/* Account for cleanmarker now, if it's in-band */
if (c->cleanmarker_size && !jffs2_cleanmarker_oob(c))
jffs2_link_node_ref(c, jeb, jeb->offset | REF_NORMAL, c->cleanmarker_size, NULL);
list_move_tail(&jeb->list, &c->free_list);
c->nr_erasing_blocks--;
c->nr_free_blocks++;
jffs2_dbg_acct_sanity_check_nolock(c, jeb);
jffs2_dbg_acct_paranoia_check_nolock(c, jeb);
spin_unlock(&c->erase_completion_lock);
mutex_unlock(&c->erase_free_sem);
wake_up(&c->erase_wait);
return;
filebad:
jffs2_erase_failed(c, jeb, bad_offset);
return;
refile:
/* Stick it back on the list from whence it came and come back later */
mutex_lock(&c->erase_free_sem);
spin_lock(&c->erase_completion_lock);
jffs2_garbage_collect_trigger(c);
list_move(&jeb->list, &c->erase_complete_list);
spin_unlock(&c->erase_completion_lock);
mutex_unlock(&c->erase_free_sem);
return;
}
| gpl-2.0 |
curbthepain/revkernel_titan | drivers/tty/serial/pnx8xxx_uart.c | 9905 | 21362 | /*
* UART driver for PNX8XXX SoCs
*
* Author: Per Hallsmark per.hallsmark@mvista.com
* Ported to 2.6 kernel by EmbeddedAlley
* Reworked by Vitaly Wool <vitalywool@gmail.com>
*
* Based on drivers/char/serial.c, by Linus Torvalds, Theodore Ts'o.
* Copyright (C) 2000 Deep Blue Solutions Ltd.
*
* This file is licensed under the terms of the GNU General Public License
* version 2. This program is licensed "as is" without any warranty of
* any kind, whether express or implied.
*
*/
#if defined(CONFIG_SERIAL_PNX8XXX_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
#define SUPPORT_SYSRQ
#endif
#include <linux/module.h>
#include <linux/ioport.h>
#include <linux/init.h>
#include <linux/console.h>
#include <linux/sysrq.h>
#include <linux/device.h>
#include <linux/platform_device.h>
#include <linux/tty.h>
#include <linux/tty_flip.h>
#include <linux/serial_core.h>
#include <linux/serial.h>
#include <linux/serial_pnx8xxx.h>
#include <asm/io.h>
#include <asm/irq.h>
/* We'll be using StrongARM sa1100 serial port major/minor */
#define SERIAL_PNX8XXX_MAJOR 204
#define MINOR_START 5
#define NR_PORTS 2
#define PNX8XXX_ISR_PASS_LIMIT 256
/*
* Convert from ignore_status_mask or read_status_mask to FIFO
* and interrupt status bits
*/
#define SM_TO_FIFO(x) ((x) >> 10)
#define SM_TO_ISTAT(x) ((x) & 0x000001ff)
#define FIFO_TO_SM(x) ((x) << 10)
#define ISTAT_TO_SM(x) ((x) & 0x000001ff)
/*
* This is the size of our serial port register set.
*/
#define UART_PORT_SIZE 0x1000
/*
* This determines how often we check the modem status signals
* for any change. They generally aren't connected to an IRQ
* so we have to poll them. We also check immediately before
* filling the TX fifo incase CTS has been dropped.
*/
#define MCTRL_TIMEOUT (250*HZ/1000)
extern struct pnx8xxx_port pnx8xxx_ports[];
static inline int serial_in(struct pnx8xxx_port *sport, int offset)
{
return (__raw_readl(sport->port.membase + offset));
}
static inline void serial_out(struct pnx8xxx_port *sport, int offset, int value)
{
__raw_writel(value, sport->port.membase + offset);
}
/*
* Handle any change of modem status signal since we were last called.
*/
static void pnx8xxx_mctrl_check(struct pnx8xxx_port *sport)
{
unsigned int status, changed;
status = sport->port.ops->get_mctrl(&sport->port);
changed = status ^ sport->old_status;
if (changed == 0)
return;
sport->old_status = status;
if (changed & TIOCM_RI)
sport->port.icount.rng++;
if (changed & TIOCM_DSR)
sport->port.icount.dsr++;
if (changed & TIOCM_CAR)
uart_handle_dcd_change(&sport->port, status & TIOCM_CAR);
if (changed & TIOCM_CTS)
uart_handle_cts_change(&sport->port, status & TIOCM_CTS);
wake_up_interruptible(&sport->port.state->port.delta_msr_wait);
}
/*
* This is our per-port timeout handler, for checking the
* modem status signals.
*/
static void pnx8xxx_timeout(unsigned long data)
{
struct pnx8xxx_port *sport = (struct pnx8xxx_port *)data;
unsigned long flags;
if (sport->port.state) {
spin_lock_irqsave(&sport->port.lock, flags);
pnx8xxx_mctrl_check(sport);
spin_unlock_irqrestore(&sport->port.lock, flags);
mod_timer(&sport->timer, jiffies + MCTRL_TIMEOUT);
}
}
/*
* interrupts disabled on entry
*/
static void pnx8xxx_stop_tx(struct uart_port *port)
{
struct pnx8xxx_port *sport = (struct pnx8xxx_port *)port;
u32 ien;
/* Disable TX intr */
ien = serial_in(sport, PNX8XXX_IEN);
serial_out(sport, PNX8XXX_IEN, ien & ~PNX8XXX_UART_INT_ALLTX);
/* Clear all pending TX intr */
serial_out(sport, PNX8XXX_ICLR, PNX8XXX_UART_INT_ALLTX);
}
/*
* interrupts may not be disabled on entry
*/
static void pnx8xxx_start_tx(struct uart_port *port)
{
struct pnx8xxx_port *sport = (struct pnx8xxx_port *)port;
u32 ien;
/* Clear all pending TX intr */
serial_out(sport, PNX8XXX_ICLR, PNX8XXX_UART_INT_ALLTX);
/* Enable TX intr */
ien = serial_in(sport, PNX8XXX_IEN);
serial_out(sport, PNX8XXX_IEN, ien | PNX8XXX_UART_INT_ALLTX);
}
/*
* Interrupts enabled
*/
static void pnx8xxx_stop_rx(struct uart_port *port)
{
struct pnx8xxx_port *sport = (struct pnx8xxx_port *)port;
u32 ien;
/* Disable RX intr */
ien = serial_in(sport, PNX8XXX_IEN);
serial_out(sport, PNX8XXX_IEN, ien & ~PNX8XXX_UART_INT_ALLRX);
/* Clear all pending RX intr */
serial_out(sport, PNX8XXX_ICLR, PNX8XXX_UART_INT_ALLRX);
}
/*
* Set the modem control timer to fire immediately.
*/
static void pnx8xxx_enable_ms(struct uart_port *port)
{
struct pnx8xxx_port *sport = (struct pnx8xxx_port *)port;
mod_timer(&sport->timer, jiffies);
}
static void pnx8xxx_rx_chars(struct pnx8xxx_port *sport)
{
struct tty_struct *tty = sport->port.state->port.tty;
unsigned int status, ch, flg;
status = FIFO_TO_SM(serial_in(sport, PNX8XXX_FIFO)) |
ISTAT_TO_SM(serial_in(sport, PNX8XXX_ISTAT));
while (status & FIFO_TO_SM(PNX8XXX_UART_FIFO_RXFIFO)) {
ch = serial_in(sport, PNX8XXX_FIFO) & 0xff;
sport->port.icount.rx++;
flg = TTY_NORMAL;
/*
* note that the error handling code is
* out of the main execution path
*/
if (status & (FIFO_TO_SM(PNX8XXX_UART_FIFO_RXFE |
PNX8XXX_UART_FIFO_RXPAR |
PNX8XXX_UART_FIFO_RXBRK) |
ISTAT_TO_SM(PNX8XXX_UART_INT_RXOVRN))) {
if (status & FIFO_TO_SM(PNX8XXX_UART_FIFO_RXBRK)) {
status &= ~(FIFO_TO_SM(PNX8XXX_UART_FIFO_RXFE) |
FIFO_TO_SM(PNX8XXX_UART_FIFO_RXPAR));
sport->port.icount.brk++;
if (uart_handle_break(&sport->port))
goto ignore_char;
} else if (status & FIFO_TO_SM(PNX8XXX_UART_FIFO_RXPAR))
sport->port.icount.parity++;
else if (status & FIFO_TO_SM(PNX8XXX_UART_FIFO_RXFE))
sport->port.icount.frame++;
if (status & ISTAT_TO_SM(PNX8XXX_UART_INT_RXOVRN))
sport->port.icount.overrun++;
status &= sport->port.read_status_mask;
if (status & FIFO_TO_SM(PNX8XXX_UART_FIFO_RXPAR))
flg = TTY_PARITY;
else if (status & FIFO_TO_SM(PNX8XXX_UART_FIFO_RXFE))
flg = TTY_FRAME;
#ifdef SUPPORT_SYSRQ
sport->port.sysrq = 0;
#endif
}
if (uart_handle_sysrq_char(&sport->port, ch))
goto ignore_char;
uart_insert_char(&sport->port, status,
ISTAT_TO_SM(PNX8XXX_UART_INT_RXOVRN), ch, flg);
ignore_char:
serial_out(sport, PNX8XXX_LCR, serial_in(sport, PNX8XXX_LCR) |
PNX8XXX_UART_LCR_RX_NEXT);
status = FIFO_TO_SM(serial_in(sport, PNX8XXX_FIFO)) |
ISTAT_TO_SM(serial_in(sport, PNX8XXX_ISTAT));
}
tty_flip_buffer_push(tty);
}
static void pnx8xxx_tx_chars(struct pnx8xxx_port *sport)
{
struct circ_buf *xmit = &sport->port.state->xmit;
if (sport->port.x_char) {
serial_out(sport, PNX8XXX_FIFO, sport->port.x_char);
sport->port.icount.tx++;
sport->port.x_char = 0;
return;
}
/*
* Check the modem control lines before
* transmitting anything.
*/
pnx8xxx_mctrl_check(sport);
if (uart_circ_empty(xmit) || uart_tx_stopped(&sport->port)) {
pnx8xxx_stop_tx(&sport->port);
return;
}
/*
* TX while bytes available
*/
while (((serial_in(sport, PNX8XXX_FIFO) &
PNX8XXX_UART_FIFO_TXFIFO) >> 16) < 16) {
serial_out(sport, PNX8XXX_FIFO, xmit->buf[xmit->tail]);
xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
sport->port.icount.tx++;
if (uart_circ_empty(xmit))
break;
}
if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
uart_write_wakeup(&sport->port);
if (uart_circ_empty(xmit))
pnx8xxx_stop_tx(&sport->port);
}
static irqreturn_t pnx8xxx_int(int irq, void *dev_id)
{
struct pnx8xxx_port *sport = dev_id;
unsigned int status;
spin_lock(&sport->port.lock);
/* Get the interrupts */
status = serial_in(sport, PNX8XXX_ISTAT) & serial_in(sport, PNX8XXX_IEN);
/* Byte or break signal received */
if (status & (PNX8XXX_UART_INT_RX | PNX8XXX_UART_INT_BREAK))
pnx8xxx_rx_chars(sport);
/* TX holding register empty - transmit a byte */
if (status & PNX8XXX_UART_INT_TX)
pnx8xxx_tx_chars(sport);
/* Clear the ISTAT register */
serial_out(sport, PNX8XXX_ICLR, status);
spin_unlock(&sport->port.lock);
return IRQ_HANDLED;
}
/*
* Return TIOCSER_TEMT when transmitter is not busy.
*/
static unsigned int pnx8xxx_tx_empty(struct uart_port *port)
{
struct pnx8xxx_port *sport = (struct pnx8xxx_port *)port;
return serial_in(sport, PNX8XXX_FIFO) & PNX8XXX_UART_FIFO_TXFIFO_STA ? 0 : TIOCSER_TEMT;
}
static unsigned int pnx8xxx_get_mctrl(struct uart_port *port)
{
struct pnx8xxx_port *sport = (struct pnx8xxx_port *)port;
unsigned int mctrl = TIOCM_DSR;
unsigned int msr;
/* REVISIT */
msr = serial_in(sport, PNX8XXX_MCR);
mctrl |= msr & PNX8XXX_UART_MCR_CTS ? TIOCM_CTS : 0;
mctrl |= msr & PNX8XXX_UART_MCR_DCD ? TIOCM_CAR : 0;
return mctrl;
}
static void pnx8xxx_set_mctrl(struct uart_port *port, unsigned int mctrl)
{
#if 0 /* FIXME */
struct pnx8xxx_port *sport = (struct pnx8xxx_port *)port;
unsigned int msr;
#endif
}
/*
* Interrupts always disabled.
*/
static void pnx8xxx_break_ctl(struct uart_port *port, int break_state)
{
struct pnx8xxx_port *sport = (struct pnx8xxx_port *)port;
unsigned long flags;
unsigned int lcr;
spin_lock_irqsave(&sport->port.lock, flags);
lcr = serial_in(sport, PNX8XXX_LCR);
if (break_state == -1)
lcr |= PNX8XXX_UART_LCR_TXBREAK;
else
lcr &= ~PNX8XXX_UART_LCR_TXBREAK;
serial_out(sport, PNX8XXX_LCR, lcr);
spin_unlock_irqrestore(&sport->port.lock, flags);
}
static int pnx8xxx_startup(struct uart_port *port)
{
struct pnx8xxx_port *sport = (struct pnx8xxx_port *)port;
int retval;
/*
* Allocate the IRQ
*/
retval = request_irq(sport->port.irq, pnx8xxx_int, 0,
"pnx8xxx-uart", sport);
if (retval)
return retval;
/*
* Finally, clear and enable interrupts
*/
serial_out(sport, PNX8XXX_ICLR, PNX8XXX_UART_INT_ALLRX |
PNX8XXX_UART_INT_ALLTX);
serial_out(sport, PNX8XXX_IEN, serial_in(sport, PNX8XXX_IEN) |
PNX8XXX_UART_INT_ALLRX |
PNX8XXX_UART_INT_ALLTX);
/*
* Enable modem status interrupts
*/
spin_lock_irq(&sport->port.lock);
pnx8xxx_enable_ms(&sport->port);
spin_unlock_irq(&sport->port.lock);
return 0;
}
static void pnx8xxx_shutdown(struct uart_port *port)
{
struct pnx8xxx_port *sport = (struct pnx8xxx_port *)port;
int lcr;
/*
* Stop our timer.
*/
del_timer_sync(&sport->timer);
/*
* Disable all interrupts
*/
serial_out(sport, PNX8XXX_IEN, 0);
/*
* Reset the Tx and Rx FIFOS, disable the break condition
*/
lcr = serial_in(sport, PNX8XXX_LCR);
lcr &= ~PNX8XXX_UART_LCR_TXBREAK;
lcr |= PNX8XXX_UART_LCR_TX_RST | PNX8XXX_UART_LCR_RX_RST;
serial_out(sport, PNX8XXX_LCR, lcr);
/*
* Clear all interrupts
*/
serial_out(sport, PNX8XXX_ICLR, PNX8XXX_UART_INT_ALLRX |
PNX8XXX_UART_INT_ALLTX);
/*
* Free the interrupt
*/
free_irq(sport->port.irq, sport);
}
static void
pnx8xxx_set_termios(struct uart_port *port, struct ktermios *termios,
struct ktermios *old)
{
struct pnx8xxx_port *sport = (struct pnx8xxx_port *)port;
unsigned long flags;
unsigned int lcr_fcr, old_ien, baud, quot;
unsigned int old_csize = old ? old->c_cflag & CSIZE : CS8;
/*
* We only support CS7 and CS8.
*/
while ((termios->c_cflag & CSIZE) != CS7 &&
(termios->c_cflag & CSIZE) != CS8) {
termios->c_cflag &= ~CSIZE;
termios->c_cflag |= old_csize;
old_csize = CS8;
}
if ((termios->c_cflag & CSIZE) == CS8)
lcr_fcr = PNX8XXX_UART_LCR_8BIT;
else
lcr_fcr = 0;
if (termios->c_cflag & CSTOPB)
lcr_fcr |= PNX8XXX_UART_LCR_2STOPB;
if (termios->c_cflag & PARENB) {
lcr_fcr |= PNX8XXX_UART_LCR_PAREN;
if (!(termios->c_cflag & PARODD))
lcr_fcr |= PNX8XXX_UART_LCR_PAREVN;
}
/*
* Ask the core to calculate the divisor for us.
*/
baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk/16);
quot = uart_get_divisor(port, baud);
spin_lock_irqsave(&sport->port.lock, flags);
sport->port.read_status_mask = ISTAT_TO_SM(PNX8XXX_UART_INT_RXOVRN) |
ISTAT_TO_SM(PNX8XXX_UART_INT_EMPTY) |
ISTAT_TO_SM(PNX8XXX_UART_INT_RX);
if (termios->c_iflag & INPCK)
sport->port.read_status_mask |=
FIFO_TO_SM(PNX8XXX_UART_FIFO_RXFE) |
FIFO_TO_SM(PNX8XXX_UART_FIFO_RXPAR);
if (termios->c_iflag & (BRKINT | PARMRK))
sport->port.read_status_mask |=
ISTAT_TO_SM(PNX8XXX_UART_INT_BREAK);
/*
* Characters to ignore
*/
sport->port.ignore_status_mask = 0;
if (termios->c_iflag & IGNPAR)
sport->port.ignore_status_mask |=
FIFO_TO_SM(PNX8XXX_UART_FIFO_RXFE) |
FIFO_TO_SM(PNX8XXX_UART_FIFO_RXPAR);
if (termios->c_iflag & IGNBRK) {
sport->port.ignore_status_mask |=
ISTAT_TO_SM(PNX8XXX_UART_INT_BREAK);
/*
* If we're ignoring parity and break indicators,
* ignore overruns too (for real raw support).
*/
if (termios->c_iflag & IGNPAR)
sport->port.ignore_status_mask |=
ISTAT_TO_SM(PNX8XXX_UART_INT_RXOVRN);
}
/*
* ignore all characters if CREAD is not set
*/
if ((termios->c_cflag & CREAD) == 0)
sport->port.ignore_status_mask |=
ISTAT_TO_SM(PNX8XXX_UART_INT_RX);
del_timer_sync(&sport->timer);
/*
* Update the per-port timeout.
*/
uart_update_timeout(port, termios->c_cflag, baud);
/*
* disable interrupts and drain transmitter
*/
old_ien = serial_in(sport, PNX8XXX_IEN);
serial_out(sport, PNX8XXX_IEN, old_ien & ~(PNX8XXX_UART_INT_ALLTX |
PNX8XXX_UART_INT_ALLRX));
while (serial_in(sport, PNX8XXX_FIFO) & PNX8XXX_UART_FIFO_TXFIFO_STA)
barrier();
/* then, disable everything */
serial_out(sport, PNX8XXX_IEN, 0);
/* Reset the Rx and Tx FIFOs too */
lcr_fcr |= PNX8XXX_UART_LCR_TX_RST;
lcr_fcr |= PNX8XXX_UART_LCR_RX_RST;
/* set the parity, stop bits and data size */
serial_out(sport, PNX8XXX_LCR, lcr_fcr);
/* set the baud rate */
quot -= 1;
serial_out(sport, PNX8XXX_BAUD, quot);
serial_out(sport, PNX8XXX_ICLR, -1);
serial_out(sport, PNX8XXX_IEN, old_ien);
if (UART_ENABLE_MS(&sport->port, termios->c_cflag))
pnx8xxx_enable_ms(&sport->port);
spin_unlock_irqrestore(&sport->port.lock, flags);
}
static const char *pnx8xxx_type(struct uart_port *port)
{
struct pnx8xxx_port *sport = (struct pnx8xxx_port *)port;
return sport->port.type == PORT_PNX8XXX ? "PNX8XXX" : NULL;
}
/*
* Release the memory region(s) being used by 'port'.
*/
static void pnx8xxx_release_port(struct uart_port *port)
{
struct pnx8xxx_port *sport = (struct pnx8xxx_port *)port;
release_mem_region(sport->port.mapbase, UART_PORT_SIZE);
}
/*
* Request the memory region(s) being used by 'port'.
*/
static int pnx8xxx_request_port(struct uart_port *port)
{
struct pnx8xxx_port *sport = (struct pnx8xxx_port *)port;
return request_mem_region(sport->port.mapbase, UART_PORT_SIZE,
"pnx8xxx-uart") != NULL ? 0 : -EBUSY;
}
/*
* Configure/autoconfigure the port.
*/
static void pnx8xxx_config_port(struct uart_port *port, int flags)
{
struct pnx8xxx_port *sport = (struct pnx8xxx_port *)port;
if (flags & UART_CONFIG_TYPE &&
pnx8xxx_request_port(&sport->port) == 0)
sport->port.type = PORT_PNX8XXX;
}
/*
* Verify the new serial_struct (for TIOCSSERIAL).
* The only change we allow are to the flags and type, and
* even then only between PORT_PNX8XXX and PORT_UNKNOWN
*/
static int
pnx8xxx_verify_port(struct uart_port *port, struct serial_struct *ser)
{
struct pnx8xxx_port *sport = (struct pnx8xxx_port *)port;
int ret = 0;
if (ser->type != PORT_UNKNOWN && ser->type != PORT_PNX8XXX)
ret = -EINVAL;
if (sport->port.irq != ser->irq)
ret = -EINVAL;
if (ser->io_type != SERIAL_IO_MEM)
ret = -EINVAL;
if (sport->port.uartclk / 16 != ser->baud_base)
ret = -EINVAL;
if ((void *)sport->port.mapbase != ser->iomem_base)
ret = -EINVAL;
if (sport->port.iobase != ser->port)
ret = -EINVAL;
if (ser->hub6 != 0)
ret = -EINVAL;
return ret;
}
static struct uart_ops pnx8xxx_pops = {
.tx_empty = pnx8xxx_tx_empty,
.set_mctrl = pnx8xxx_set_mctrl,
.get_mctrl = pnx8xxx_get_mctrl,
.stop_tx = pnx8xxx_stop_tx,
.start_tx = pnx8xxx_start_tx,
.stop_rx = pnx8xxx_stop_rx,
.enable_ms = pnx8xxx_enable_ms,
.break_ctl = pnx8xxx_break_ctl,
.startup = pnx8xxx_startup,
.shutdown = pnx8xxx_shutdown,
.set_termios = pnx8xxx_set_termios,
.type = pnx8xxx_type,
.release_port = pnx8xxx_release_port,
.request_port = pnx8xxx_request_port,
.config_port = pnx8xxx_config_port,
.verify_port = pnx8xxx_verify_port,
};
/*
* Setup the PNX8XXX serial ports.
*
* Note also that we support "console=ttySx" where "x" is either 0 or 1.
*/
static void __init pnx8xxx_init_ports(void)
{
static int first = 1;
int i;
if (!first)
return;
first = 0;
for (i = 0; i < NR_PORTS; i++) {
init_timer(&pnx8xxx_ports[i].timer);
pnx8xxx_ports[i].timer.function = pnx8xxx_timeout;
pnx8xxx_ports[i].timer.data = (unsigned long)&pnx8xxx_ports[i];
pnx8xxx_ports[i].port.ops = &pnx8xxx_pops;
}
}
#ifdef CONFIG_SERIAL_PNX8XXX_CONSOLE
static void pnx8xxx_console_putchar(struct uart_port *port, int ch)
{
struct pnx8xxx_port *sport = (struct pnx8xxx_port *)port;
int status;
do {
/* Wait for UART_TX register to empty */
status = serial_in(sport, PNX8XXX_FIFO);
} while (status & PNX8XXX_UART_FIFO_TXFIFO);
serial_out(sport, PNX8XXX_FIFO, ch);
}
/*
* Interrupts are disabled on entering
*/static void
pnx8xxx_console_write(struct console *co, const char *s, unsigned int count)
{
struct pnx8xxx_port *sport = &pnx8xxx_ports[co->index];
unsigned int old_ien, status;
/*
* First, save IEN and then disable interrupts
*/
old_ien = serial_in(sport, PNX8XXX_IEN);
serial_out(sport, PNX8XXX_IEN, old_ien & ~(PNX8XXX_UART_INT_ALLTX |
PNX8XXX_UART_INT_ALLRX));
uart_console_write(&sport->port, s, count, pnx8xxx_console_putchar);
/*
* Finally, wait for transmitter to become empty
* and restore IEN
*/
do {
/* Wait for UART_TX register to empty */
status = serial_in(sport, PNX8XXX_FIFO);
} while (status & PNX8XXX_UART_FIFO_TXFIFO);
/* Clear TX and EMPTY interrupt */
serial_out(sport, PNX8XXX_ICLR, PNX8XXX_UART_INT_TX |
PNX8XXX_UART_INT_EMPTY);
serial_out(sport, PNX8XXX_IEN, old_ien);
}
static int __init
pnx8xxx_console_setup(struct console *co, char *options)
{
struct pnx8xxx_port *sport;
int baud = 38400;
int bits = 8;
int parity = 'n';
int flow = 'n';
/*
* Check whether an invalid uart number has been specified, and
* if so, search for the first available port that does have
* console support.
*/
if (co->index == -1 || co->index >= NR_PORTS)
co->index = 0;
sport = &pnx8xxx_ports[co->index];
if (options)
uart_parse_options(options, &baud, &parity, &bits, &flow);
return uart_set_options(&sport->port, co, baud, parity, bits, flow);
}
static struct uart_driver pnx8xxx_reg;
static struct console pnx8xxx_console = {
.name = "ttyS",
.write = pnx8xxx_console_write,
.device = uart_console_device,
.setup = pnx8xxx_console_setup,
.flags = CON_PRINTBUFFER,
.index = -1,
.data = &pnx8xxx_reg,
};
static int __init pnx8xxx_rs_console_init(void)
{
pnx8xxx_init_ports();
register_console(&pnx8xxx_console);
return 0;
}
console_initcall(pnx8xxx_rs_console_init);
#define PNX8XXX_CONSOLE &pnx8xxx_console
#else
#define PNX8XXX_CONSOLE NULL
#endif
static struct uart_driver pnx8xxx_reg = {
.owner = THIS_MODULE,
.driver_name = "ttyS",
.dev_name = "ttyS",
.major = SERIAL_PNX8XXX_MAJOR,
.minor = MINOR_START,
.nr = NR_PORTS,
.cons = PNX8XXX_CONSOLE,
};
static int pnx8xxx_serial_suspend(struct platform_device *pdev, pm_message_t state)
{
struct pnx8xxx_port *sport = platform_get_drvdata(pdev);
return uart_suspend_port(&pnx8xxx_reg, &sport->port);
}
static int pnx8xxx_serial_resume(struct platform_device *pdev)
{
struct pnx8xxx_port *sport = platform_get_drvdata(pdev);
return uart_resume_port(&pnx8xxx_reg, &sport->port);
}
static int pnx8xxx_serial_probe(struct platform_device *pdev)
{
struct resource *res = pdev->resource;
int i;
for (i = 0; i < pdev->num_resources; i++, res++) {
if (!(res->flags & IORESOURCE_MEM))
continue;
for (i = 0; i < NR_PORTS; i++) {
if (pnx8xxx_ports[i].port.mapbase != res->start)
continue;
pnx8xxx_ports[i].port.dev = &pdev->dev;
uart_add_one_port(&pnx8xxx_reg, &pnx8xxx_ports[i].port);
platform_set_drvdata(pdev, &pnx8xxx_ports[i]);
break;
}
}
return 0;
}
static int pnx8xxx_serial_remove(struct platform_device *pdev)
{
struct pnx8xxx_port *sport = platform_get_drvdata(pdev);
platform_set_drvdata(pdev, NULL);
if (sport)
uart_remove_one_port(&pnx8xxx_reg, &sport->port);
return 0;
}
static struct platform_driver pnx8xxx_serial_driver = {
.driver = {
.name = "pnx8xxx-uart",
.owner = THIS_MODULE,
},
.probe = pnx8xxx_serial_probe,
.remove = pnx8xxx_serial_remove,
.suspend = pnx8xxx_serial_suspend,
.resume = pnx8xxx_serial_resume,
};
static int __init pnx8xxx_serial_init(void)
{
int ret;
printk(KERN_INFO "Serial: PNX8XXX driver\n");
pnx8xxx_init_ports();
ret = uart_register_driver(&pnx8xxx_reg);
if (ret == 0) {
ret = platform_driver_register(&pnx8xxx_serial_driver);
if (ret)
uart_unregister_driver(&pnx8xxx_reg);
}
return ret;
}
static void __exit pnx8xxx_serial_exit(void)
{
platform_driver_unregister(&pnx8xxx_serial_driver);
uart_unregister_driver(&pnx8xxx_reg);
}
module_init(pnx8xxx_serial_init);
module_exit(pnx8xxx_serial_exit);
MODULE_AUTHOR("Embedded Alley Solutions, Inc.");
MODULE_DESCRIPTION("PNX8XXX SoCs serial port driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS_CHARDEV_MAJOR(SERIAL_PNX8XXX_MAJOR);
MODULE_ALIAS("platform:pnx8xxx-uart");
| gpl-2.0 |
mtitinger/linux-pm | net/bridge/netfilter/ebt_limit.c | 13745 | 3247 | /*
* ebt_limit
*
* Authors:
* Tom Marshall <tommy@home.tig-grr.com>
*
* Mostly copied from netfilter's ipt_limit.c, see that file for
* more explanation
*
* September, 2003
*
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/spinlock.h>
#include <linux/netfilter/x_tables.h>
#include <linux/netfilter_bridge/ebtables.h>
#include <linux/netfilter_bridge/ebt_limit.h>
static DEFINE_SPINLOCK(limit_lock);
#define MAX_CPJ (0xFFFFFFFF / (HZ*60*60*24))
#define _POW2_BELOW2(x) ((x)|((x)>>1))
#define _POW2_BELOW4(x) (_POW2_BELOW2(x)|_POW2_BELOW2((x)>>2))
#define _POW2_BELOW8(x) (_POW2_BELOW4(x)|_POW2_BELOW4((x)>>4))
#define _POW2_BELOW16(x) (_POW2_BELOW8(x)|_POW2_BELOW8((x)>>8))
#define _POW2_BELOW32(x) (_POW2_BELOW16(x)|_POW2_BELOW16((x)>>16))
#define POW2_BELOW32(x) ((_POW2_BELOW32(x)>>1) + 1)
#define CREDITS_PER_JIFFY POW2_BELOW32(MAX_CPJ)
static bool
ebt_limit_mt(const struct sk_buff *skb, struct xt_action_param *par)
{
struct ebt_limit_info *info = (void *)par->matchinfo;
unsigned long now = jiffies;
spin_lock_bh(&limit_lock);
info->credit += (now - xchg(&info->prev, now)) * CREDITS_PER_JIFFY;
if (info->credit > info->credit_cap)
info->credit = info->credit_cap;
if (info->credit >= info->cost) {
/* We're not limited. */
info->credit -= info->cost;
spin_unlock_bh(&limit_lock);
return true;
}
spin_unlock_bh(&limit_lock);
return false;
}
/* Precision saver. */
static u_int32_t
user2credits(u_int32_t user)
{
/* If multiplying would overflow... */
if (user > 0xFFFFFFFF / (HZ*CREDITS_PER_JIFFY))
/* Divide first. */
return (user / EBT_LIMIT_SCALE) * HZ * CREDITS_PER_JIFFY;
return (user * HZ * CREDITS_PER_JIFFY) / EBT_LIMIT_SCALE;
}
static int ebt_limit_mt_check(const struct xt_mtchk_param *par)
{
struct ebt_limit_info *info = par->matchinfo;
/* Check for overflow. */
if (info->burst == 0 ||
user2credits(info->avg * info->burst) < user2credits(info->avg)) {
pr_info("overflow, try lower: %u/%u\n",
info->avg, info->burst);
return -EINVAL;
}
/* User avg in seconds * EBT_LIMIT_SCALE: convert to jiffies * 128. */
info->prev = jiffies;
info->credit = user2credits(info->avg * info->burst);
info->credit_cap = user2credits(info->avg * info->burst);
info->cost = user2credits(info->avg);
return 0;
}
#ifdef CONFIG_COMPAT
/*
* no conversion function needed --
* only avg/burst have meaningful values in userspace.
*/
struct ebt_compat_limit_info {
compat_uint_t avg, burst;
compat_ulong_t prev;
compat_uint_t credit, credit_cap, cost;
};
#endif
static struct xt_match ebt_limit_mt_reg __read_mostly = {
.name = "limit",
.revision = 0,
.family = NFPROTO_BRIDGE,
.match = ebt_limit_mt,
.checkentry = ebt_limit_mt_check,
.matchsize = sizeof(struct ebt_limit_info),
#ifdef CONFIG_COMPAT
.compatsize = sizeof(struct ebt_compat_limit_info),
#endif
.me = THIS_MODULE,
};
static int __init ebt_limit_init(void)
{
return xt_register_match(&ebt_limit_mt_reg);
}
static void __exit ebt_limit_fini(void)
{
xt_unregister_match(&ebt_limit_mt_reg);
}
module_init(ebt_limit_init);
module_exit(ebt_limit_fini);
MODULE_DESCRIPTION("Ebtables: Rate-limit match");
MODULE_LICENSE("GPL");
| gpl-2.0 |
Clust3r/android_kernel_oneplus_msm8994 | drivers/platform/msm/ipa/ipa_uc_wdi.c | 178 | 30881 | /* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include "ipa_i.h"
#include <linux/dmapool.h>
#include <linux/delay.h>
#define IPA_HOLB_TMR_DIS 0x0
#define IPA_HW_INTERFACE_WDI_VERSION 0x0001
#define IPA_HW_WDI_RX_MBOX_START_INDEX 48
#define IPA_HW_WDI_TX_MBOX_START_INDEX 50
#define IPA_WDI_DMA_POOL_SIZE (max(sizeof(struct IpaHwWdiTxSetUpCmdData_t), \
sizeof(struct IpaHwWdiRxSetUpCmdData_t)))
#define IPA_WDI_DMA_POOL_ALIGNMENT 8
#define IPA_WDI_DMA_POOL_BOUNDARY 1024
#define IPA_WDI_CONNECTED BIT(0)
#define IPA_WDI_ENABLED BIT(1)
#define IPA_WDI_RESUMED BIT(2)
#define IPA_UC_POLL_SLEEP_USEC 100
/**
* enum ipa_hw_2_cpu_wdi_events - Values that represent HW event to be sent to CPU.
* @IPA_HW_2_CPU_EVENT_WDI_ERROR : Event to specify that HW detected an error
* in WDI
*/
enum ipa_hw_2_cpu_wdi_events {
IPA_HW_2_CPU_EVENT_WDI_ERROR =
FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 0),
};
/**
* enum ipa_hw_wdi_channel_states - Values that represent WDI channel state
* machine.
* @IPA_HW_WDI_CHANNEL_STATE_INITED_DISABLED : Channel is initialized but
* disabled
* @IPA_HW_WDI_CHANNEL_STATE_ENABLED_SUSPEND : Channel is enabled but in
* suspended state
* @IPA_HW_WDI_CHANNEL_STATE_RUNNING : Channel is running. Entered after
* SET_UP_COMMAND is processed successfully
* @IPA_HW_WDI_CHANNEL_STATE_ERROR : Channel is in error state
* @IPA_HW_WDI_CHANNEL_STATE_INVALID : Invalid state. Shall not be in use in
* operational scenario
*
* These states apply to both Tx and Rx paths. These do not reflect the
* sub-state the state machine may be in.
*/
enum ipa_hw_wdi_channel_states {
IPA_HW_WDI_CHANNEL_STATE_INITED_DISABLED = 1,
IPA_HW_WDI_CHANNEL_STATE_ENABLED_SUSPEND = 2,
IPA_HW_WDI_CHANNEL_STATE_RUNNING = 3,
IPA_HW_WDI_CHANNEL_STATE_ERROR = 4,
IPA_HW_WDI_CHANNEL_STATE_INVALID = 0xFF
};
/**
* enum ipa_cpu_2_hw_commands - Values that represent the WDI commands from CPU
* @IPA_CPU_2_HW_CMD_WDI_TX_SET_UP : Command to set up WDI Tx Path
* @IPA_CPU_2_HW_CMD_WDI_RX_SET_UP : Command to set up WDI Rx Path
* @IPA_CPU_2_HW_CMD_WDI_RX_EXT_CFG : Provide extended config info for Rx path
* @IPA_CPU_2_HW_CMD_WDI_CH_ENABLE : Command to enable a channel
* @IPA_CPU_2_HW_CMD_WDI_CH_DISABLE : Command to disable a channel
* @IPA_CPU_2_HW_CMD_WDI_CH_SUSPEND : Command to suspend a channel
* @IPA_CPU_2_HW_CMD_WDI_CH_RESUME : Command to resume a channel
* @IPA_CPU_2_HW_CMD_WDI_TEAR_DOWN : Command to tear down WDI Tx/ Rx Path
*/
enum ipa_cpu_2_hw_wdi_commands {
IPA_CPU_2_HW_CMD_WDI_TX_SET_UP =
FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 0),
IPA_CPU_2_HW_CMD_WDI_RX_SET_UP =
FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 1),
IPA_CPU_2_HW_CMD_WDI_RX_EXT_CFG =
FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 2),
IPA_CPU_2_HW_CMD_WDI_CH_ENABLE =
FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 3),
IPA_CPU_2_HW_CMD_WDI_CH_DISABLE =
FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 4),
IPA_CPU_2_HW_CMD_WDI_CH_SUSPEND =
FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 5),
IPA_CPU_2_HW_CMD_WDI_CH_RESUME =
FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 6),
IPA_CPU_2_HW_CMD_WDI_TEAR_DOWN =
FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 7),
};
/**
* enum ipa_hw_2_cpu_cmd_resp_status - Values that represent WDI related
* command response status to be sent to CPU.
*/
enum ipa_hw_2_cpu_cmd_resp_status {
IPA_HW_2_CPU_WDI_CMD_STATUS_SUCCESS =
FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 0),
IPA_HW_2_CPU_MAX_WDI_TX_CHANNELS =
FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 1),
IPA_HW_2_CPU_WDI_CE_RING_OVERRUN_POSSIBILITY =
FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 2),
IPA_HW_2_CPU_WDI_CE_RING_SET_UP_FAILURE =
FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 3),
IPA_HW_2_CPU_WDI_CE_RING_PARAMS_UNALIGNED =
FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 4),
IPA_HW_2_CPU_WDI_COMP_RING_OVERRUN_POSSIBILITY =
FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 5),
IPA_HW_2_CPU_WDI_COMP_RING_SET_UP_FAILURE =
FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 6),
IPA_HW_2_CPU_WDI_COMP_RING_PARAMS_UNALIGNED =
FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 7),
IPA_HW_2_CPU_WDI_UNKNOWN_TX_CHANNEL =
FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 8),
IPA_HW_2_CPU_WDI_TX_INVALID_FSM_TRANSITION =
FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 9),
IPA_HW_2_CPU_WDI_TX_FSM_TRANSITION_ERROR =
FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 10),
IPA_HW_2_CPU_MAX_WDI_RX_CHANNELS =
FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 11),
IPA_HW_2_CPU_WDI_RX_RING_PARAMS_UNALIGNED =
FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 12),
IPA_HW_2_CPU_WDI_RX_RING_SET_UP_FAILURE =
FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 13),
IPA_HW_2_CPU_WDI_UNKNOWN_RX_CHANNEL =
FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 14),
IPA_HW_2_CPU_WDI_RX_INVALID_FSM_TRANSITION =
FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 15),
IPA_HW_2_CPU_WDI_RX_FSM_TRANSITION_ERROR =
FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 16),
};
/**
* enum ipa_hw_wdi_errors - WDI specific error types.
* @IPA_HW_WDI_ERROR_NONE : No error persists
* @IPA_HW_WDI_CHANNEL_ERROR : Error is specific to channel
*/
enum ipa_hw_wdi_errors {
IPA_HW_WDI_ERROR_NONE = 0,
IPA_HW_WDI_CHANNEL_ERROR = 1
};
/**
* enum ipa_hw_wdi_ch_errors = List of WDI Channel error types. This is present
* in the event param.
* @IPA_HW_WDI_CH_ERR_NONE : No error persists
* @IPA_HW_WDI_TX_COMP_RING_WP_UPDATE_FAIL : Write pointer update failed in Tx
* Completion ring
* @IPA_HW_WDI_TX_FSM_ERROR : Error in the state machine transition
* @IPA_HW_WDI_TX_COMP_RE_FETCH_FAIL : Error while calculating num RE to bring
* @IPA_HW_WDI_CH_ERR_RESERVED : Reserved - Not available for CPU to use
*/
enum ipa_hw_wdi_ch_errors {
IPA_HW_WDI_CH_ERR_NONE = 0,
IPA_HW_WDI_TX_COMP_RING_WP_UPDATE_FAIL = 1,
IPA_HW_WDI_TX_FSM_ERROR = 2,
IPA_HW_WDI_TX_COMP_RE_FETCH_FAIL = 3,
IPA_HW_WDI_CH_ERR_RESERVED = 0xFF
};
/**
* struct IpaHwSharedMemWdiMapping_t - Structure referring to the common and
* WDI section of 128B shared memory located in offset zero of SW Partition in
* IPA SRAM.
*
* The shared memory is used for communication between IPA HW and CPU.
*/
struct IpaHwSharedMemWdiMapping_t {
struct IpaHwSharedMemCommonMapping_t common;
u32 reserved_2B_28;
u32 reserved_2F_2C;
u32 reserved_33_30;
u32 reserved_37_34;
u32 reserved_3B_38;
u32 reserved_3F_3C;
u16 interfaceVersionWdi;
u16 reserved_43_42;
u8 wdi_tx_ch_0_state;
u8 wdi_rx_ch_0_state;
u16 reserved_47_46;
} __packed;
/**
* struct IpaHwWdiTxSetUpCmdData_t - Structure holding the parameters for
* IPA_CPU_2_HW_CMD_WDI_TX_SET_UP command.
* @comp_ring_base_pa : This is the physical address of the base of the Tx
* completion ring
* @comp_ring_size : This is the size of the Tx completion ring
* @reserved_comp_ring : Reserved field for expansion of Completion ring params
* @ce_ring_base_pa : This is the physical address of the base of the Copy
* Engine Source Ring
* @ce_ring_size : Copy Engine Ring size
* @reserved_ce_ring : Reserved field for expansion of CE ring params
* @ce_ring_doorbell_pa : This is the physical address of the doorbell that the
* IPA uC has to write into to trigger the copy engine
* @num_tx_buffers : Number of pkt buffers allocated. The size of the CE ring
* and the Tx completion ring has to be atleast ( num_tx_buffers + 1)
* @ipa_pipe_number : This is the IPA pipe number that has to be used for the
* Tx path
* @reserved : Reserved field
*
* Parameters are sent as pointer thus should be reside in address accessible
* to HW
*/
struct IpaHwWdiTxSetUpCmdData_t {
u32 comp_ring_base_pa;
u16 comp_ring_size;
u16 reserved_comp_ring;
u32 ce_ring_base_pa;
u16 ce_ring_size;
u16 reserved_ce_ring;
u32 ce_ring_doorbell_pa;
u16 num_tx_buffers;
u8 ipa_pipe_number;
u8 reserved;
} __packed;
/**
* struct IpaHwWdiRxSetUpCmdData_t - Structure holding the parameters for
* IPA_CPU_2_HW_CMD_WDI_RX_SET_UP command.
* @rx_ring_base_pa : This is the physical address of the base of the Rx ring
* (containing Rx buffers)
* @rx_ring_size : This is the size of the Rx ring
* @rx_ring_rp_pa : This is the physical address of the location through which
* IPA uc is expected to communicate about the Read pointer into the Rx Ring
* @ipa_pipe_number : This is the IPA pipe number that has to be used for the
* Rx path
*
* Parameters are sent as pointer thus should be reside in address accessible
* to HW
*/
struct IpaHwWdiRxSetUpCmdData_t {
u32 rx_ring_base_pa;
u32 rx_ring_size;
u32 rx_ring_rp_pa;
u8 ipa_pipe_number;
} __packed;
/**
* union IpaHwWdiRxExtCfgCmdData_t - Structure holding the parameters for
* IPA_CPU_2_HW_CMD_WDI_RX_EXT_CFG command.
* @ipa_pipe_number : The IPA pipe number for which this config is passed
* @qmap_id : QMAP ID to be set in the metadata register
* @reserved : Reserved
*
* The parameters are passed as immediate params in the shared memory
*/
union IpaHwWdiRxExtCfgCmdData_t {
struct IpaHwWdiRxExtCfgCmdParams_t {
u32 ipa_pipe_number:8;
u32 qmap_id:8;
u32 reserved:16;
} __packed params;
u32 raw32b;
} __packed;
/**
* union IpaHwWdiCommonChCmdData_t - Structure holding the parameters for
* IPA_CPU_2_HW_CMD_WDI_TEAR_DOWN,
* IPA_CPU_2_HW_CMD_WDI_CH_ENABLE,
* IPA_CPU_2_HW_CMD_WDI_CH_DISABLE,
* IPA_CPU_2_HW_CMD_WDI_CH_SUSPEND,
* IPA_CPU_2_HW_CMD_WDI_CH_RESUME command.
* @ipa_pipe_number : The IPA pipe number. This could be Tx or an Rx pipe
* @reserved : Reserved
*
* The parameters are passed as immediate params in the shared memory
*/
union IpaHwWdiCommonChCmdData_t {
struct IpaHwWdiCommonChCmdParams_t {
u32 ipa_pipe_number:8;
u32 reserved:24;
} __packed params;
u32 raw32b;
} __packed;
/**
* union IpaHwWdiErrorEventData_t - parameters for IPA_HW_2_CPU_EVENT_WDI_ERROR
* event.
* @wdi_error_type : The IPA pipe number to be torn down. This could be Tx or
* an Rx pipe
* @reserved : Reserved
* @ipa_pipe_number : IPA pipe number on which error has happened. Applicable
* only if error type indicates channel error
* @wdi_ch_err_type : Information about the channel error (if available)
*
* The parameters are passed as immediate params in the shared memory
*/
union IpaHwWdiErrorEventData_t {
struct IpaHwWdiErrorEventParams_t {
u32 wdi_error_type:8;
u32 reserved:8;
u32 ipa_pipe_number:8;
u32 wdi_ch_err_type:8;
} __packed params;
u32 raw32b;
} __packed;
static void ipa_uc_wdi_event_log_info_handler(
struct IpaHwEventLogInfoData_t *uc_event_top_mmio)
{
if ((uc_event_top_mmio->featureMask & (1 << IPA_HW_FEATURE_WDI)) == 0) {
IPAERR("WDI feature missing 0x%x\n",
uc_event_top_mmio->featureMask);
return;
}
if (uc_event_top_mmio->statsInfo.featureInfo[IPA_HW_FEATURE_WDI].
params.size != sizeof(struct IpaHwStatsWDIInfoData_t)) {
IPAERR("wdi stats sz invalid exp=%zu is=%u\n",
sizeof(struct IpaHwStatsWDIInfoData_t),
uc_event_top_mmio->statsInfo.
featureInfo[IPA_HW_FEATURE_WDI].params.size);
return;
}
ipa_ctx->uc_wdi_ctx.wdi_uc_stats_ofst = uc_event_top_mmio->
statsInfo.baseAddrOffset + uc_event_top_mmio->statsInfo.
featureInfo[IPA_HW_FEATURE_WDI].params.offset;
IPAERR("WDI stats ofst=0x%x\n", ipa_ctx->uc_wdi_ctx.wdi_uc_stats_ofst);
if (ipa_ctx->uc_wdi_ctx.wdi_uc_stats_ofst +
sizeof(struct IpaHwStatsWDIInfoData_t) >=
ipa_ctx->ctrl->ipa_reg_base_ofst +
IPA_SRAM_DIRECT_ACCESS_N_OFST_v2_0(0) +
ipa_ctx->smem_sz) {
IPAERR("uc_wdi_stats 0x%x outside SRAM\n",
ipa_ctx->uc_wdi_ctx.wdi_uc_stats_ofst);
return;
}
ipa_ctx->uc_wdi_ctx.wdi_uc_stats_mmio =
ioremap(ipa_ctx->ipa_wrapper_base +
ipa_ctx->uc_wdi_ctx.wdi_uc_stats_ofst,
sizeof(struct IpaHwStatsWDIInfoData_t));
if (!ipa_ctx->uc_wdi_ctx.wdi_uc_stats_mmio) {
IPAERR("fail to ioremap uc wdi stats\n");
return;
}
return;
}
static void ipa_uc_wdi_event_handler(struct IpaHwSharedMemCommonMapping_t
*uc_sram_mmio)
{
union IpaHwWdiErrorEventData_t wdi_evt;
struct IpaHwSharedMemWdiMapping_t *wdi_sram_mmio_ext;
if (uc_sram_mmio->eventOp ==
IPA_HW_2_CPU_EVENT_WDI_ERROR) {
wdi_evt.raw32b = uc_sram_mmio->eventParams;
IPADBG("uC WDI evt errType=%u pipe=%d cherrType=%u\n",
wdi_evt.params.wdi_error_type,
wdi_evt.params.ipa_pipe_number,
wdi_evt.params.wdi_ch_err_type);
wdi_sram_mmio_ext =
(struct IpaHwSharedMemWdiMapping_t *)
uc_sram_mmio;
IPADBG("tx_ch_state=%u rx_ch_state=%u\n",
wdi_sram_mmio_ext->wdi_tx_ch_0_state,
wdi_sram_mmio_ext->wdi_rx_ch_0_state);
}
}
/**
* ipa_get_wdi_stats() - Query WDI statistics from uc
* @stats: [inout] stats blob from client populated by driver
*
* Returns: 0 on success, negative on failure
*
* @note Cannot be called from atomic context
*
*/
int ipa_get_wdi_stats(struct IpaHwStatsWDIInfoData_t *stats)
{
#define TX_STATS(y) stats->tx_ch_stats.y = \
ipa_ctx->uc_wdi_ctx.wdi_uc_stats_mmio->tx_ch_stats.y
#define RX_STATS(y) stats->rx_ch_stats.y = \
ipa_ctx->uc_wdi_ctx.wdi_uc_stats_mmio->rx_ch_stats.y
if (!stats || !ipa_ctx->uc_wdi_ctx.wdi_uc_stats_mmio) {
IPAERR("bad parms stats=%p wdi_stats=%p\n",
stats,
ipa_ctx->uc_wdi_ctx.wdi_uc_stats_mmio);
return -EINVAL;
}
ipa_inc_client_enable_clks();
TX_STATS(num_pkts_processed);
TX_STATS(copy_engine_doorbell_value);
TX_STATS(num_db_fired);
TX_STATS(tx_comp_ring_stats.ringFull);
TX_STATS(tx_comp_ring_stats.ringEmpty);
TX_STATS(tx_comp_ring_stats.ringUsageHigh);
TX_STATS(tx_comp_ring_stats.ringUsageLow);
TX_STATS(bam_stats.bamFifoFull);
TX_STATS(bam_stats.bamFifoEmpty);
TX_STATS(bam_stats.bamFifoUsageHigh);
TX_STATS(bam_stats.bamFifoUsageLow);
TX_STATS(num_db);
TX_STATS(num_unexpected_db);
TX_STATS(num_bam_int_handled);
TX_STATS(num_bam_int_in_non_runnning_state);
TX_STATS(num_qmb_int_handled);
RX_STATS(max_outstanding_pkts);
RX_STATS(num_pkts_processed);
RX_STATS(rx_ring_rp_value);
RX_STATS(rx_ind_ring_stats.ringFull);
RX_STATS(rx_ind_ring_stats.ringEmpty);
RX_STATS(rx_ind_ring_stats.ringUsageHigh);
RX_STATS(rx_ind_ring_stats.ringUsageLow);
RX_STATS(bam_stats.bamFifoFull);
RX_STATS(bam_stats.bamFifoEmpty);
RX_STATS(bam_stats.bamFifoUsageHigh);
RX_STATS(bam_stats.bamFifoUsageLow);
RX_STATS(num_bam_int_handled);
RX_STATS(num_db);
RX_STATS(num_unexpected_db);
RX_STATS(reserved1);
RX_STATS(reserved2);
ipa_dec_client_disable_clks();
return 0;
}
EXPORT_SYMBOL(ipa_get_wdi_stats);
int ipa_wdi_init(void)
{
struct ipa_uc_hdlrs uc_wdi_cbs = { 0 };
ipa_ctx->uc_wdi_ctx.wdi_dma_pool = dma_pool_create("ipa_wdi1k",
ipa_ctx->pdev,
IPA_WDI_DMA_POOL_SIZE, IPA_WDI_DMA_POOL_ALIGNMENT,
IPA_WDI_DMA_POOL_BOUNDARY);
if (!ipa_ctx->uc_wdi_ctx.wdi_dma_pool) {
IPAERR("fail to setup DMA pool\n");
return -ENOMEM;
}
uc_wdi_cbs.ipa_uc_event_hdlr = ipa_uc_wdi_event_handler;
uc_wdi_cbs.ipa_uc_event_log_info_hdlr =
ipa_uc_wdi_event_log_info_handler;
ipa_uc_register_handlers(IPA_HW_FEATURE_WDI, &uc_wdi_cbs);
return 0;
}
/**
* ipa_connect_wdi_pipe() - WDI client connect
* @in: [in] input parameters from client
* @out: [out] output params to client
*
* Returns: 0 on success, negative on failure
*
* Note: Should not be called from atomic context
*/
int ipa_connect_wdi_pipe(struct ipa_wdi_in_params *in,
struct ipa_wdi_out_params *out)
{
int ipa_ep_idx;
int result = -EFAULT;
struct ipa_ep_context *ep;
struct ipa_mem_buffer cmd;
struct IpaHwWdiTxSetUpCmdData_t *tx;
struct IpaHwWdiRxSetUpCmdData_t *rx;
struct ipa_ep_cfg_ctrl ep_cfg_ctrl;
if (in == NULL || out == NULL || in->sys.client >= IPA_CLIENT_MAX) {
IPAERR("bad parm. in=%p out=%p\n", in, out);
if (in)
IPAERR("client = %d\n", in->sys.client);
return -EINVAL;
}
if (IPA_CLIENT_IS_CONS(in->sys.client)) {
if (in->u.dl.comp_ring_base_pa % IPA_WDI_DMA_POOL_ALIGNMENT ||
in->u.dl.ce_ring_base_pa % IPA_WDI_DMA_POOL_ALIGNMENT) {
IPAERR("alignment failure on TX\n");
return -EINVAL;
}
} else {
if (in->u.ul.rdy_ring_base_pa % IPA_WDI_DMA_POOL_ALIGNMENT) {
IPAERR("alignment failure on RX\n");
return -EINVAL;
}
}
result = ipa_uc_state_check();
if (result)
return result;
ipa_ep_idx = ipa_get_ep_mapping(in->sys.client);
if (ipa_ep_idx == -1) {
IPAERR("fail to alloc EP.\n");
goto fail;
}
ep = &ipa_ctx->ep[ipa_ep_idx];
if (ep->valid) {
IPAERR("EP already allocated.\n");
goto fail;
}
memset(&ipa_ctx->ep[ipa_ep_idx], 0, sizeof(struct ipa_ep_context));
ipa_inc_client_enable_clks();
IPADBG("client=%d ep=%d\n", in->sys.client, ipa_ep_idx);
if (IPA_CLIENT_IS_CONS(in->sys.client)) {
cmd.size = sizeof(*tx);
IPADBG("comp_ring_base_pa=0x%pa\n",
&in->u.dl.comp_ring_base_pa);
IPADBG("comp_ring_size=%d\n", in->u.dl.comp_ring_size);
IPADBG("ce_ring_base_pa=0x%pa\n", &in->u.dl.ce_ring_base_pa);
IPADBG("ce_ring_size=%d\n", in->u.dl.ce_ring_size);
IPADBG("ce_ring_doorbell_pa=0x%pa\n",
&in->u.dl.ce_door_bell_pa);
IPADBG("num_tx_buffers=%d\n", in->u.dl.num_tx_buffers);
} else {
cmd.size = sizeof(*rx);
IPADBG("rx_ring_base_pa=0x%pa\n", &in->u.ul.rdy_ring_base_pa);
IPADBG("rx_ring_size=%d\n", in->u.ul.rdy_ring_size);
IPADBG("rx_ring_rp_pa=0x%pa\n", &in->u.ul.rdy_ring_rp_pa);
}
cmd.base = dma_pool_alloc(ipa_ctx->uc_wdi_ctx.wdi_dma_pool, GFP_KERNEL,
&cmd.phys_base);
if (cmd.base == NULL) {
IPAERR("fail to get DMA memory.\n");
result = -ENOMEM;
goto dma_alloc_fail;
}
if (IPA_CLIENT_IS_CONS(in->sys.client)) {
tx = (struct IpaHwWdiTxSetUpCmdData_t *)cmd.base;
tx->comp_ring_base_pa = in->u.dl.comp_ring_base_pa;
tx->comp_ring_size = in->u.dl.comp_ring_size;
tx->ce_ring_base_pa = in->u.dl.ce_ring_base_pa;
tx->ce_ring_size = in->u.dl.ce_ring_size;
tx->ce_ring_doorbell_pa = in->u.dl.ce_door_bell_pa;
tx->num_tx_buffers = in->u.dl.num_tx_buffers;
tx->ipa_pipe_number = ipa_ep_idx;
if (ipa_ctx->ipa_hw_type == IPA_HW_v2_5) {
out->uc_door_bell_pa =
ipa_ctx->ipa_wrapper_base +
IPA_REG_BASE_OFST_v2_5 +
IPA_UC_MAILBOX_m_n_OFFS_v2_5(
IPA_HW_WDI_TX_MBOX_START_INDEX/32,
IPA_HW_WDI_TX_MBOX_START_INDEX % 32);
} else {
out->uc_door_bell_pa =
ipa_ctx->ipa_wrapper_base +
IPA_REG_BASE_OFST_v2_0 +
IPA_UC_MAILBOX_m_n_OFFS(
IPA_HW_WDI_TX_MBOX_START_INDEX/32,
IPA_HW_WDI_TX_MBOX_START_INDEX % 32);
}
} else {
rx = (struct IpaHwWdiRxSetUpCmdData_t *)cmd.base;
rx->rx_ring_base_pa = in->u.ul.rdy_ring_base_pa;
rx->rx_ring_size = in->u.ul.rdy_ring_size;
rx->rx_ring_rp_pa = in->u.ul.rdy_ring_rp_pa;
rx->ipa_pipe_number = ipa_ep_idx;
if (ipa_ctx->ipa_hw_type == IPA_HW_v2_5) {
out->uc_door_bell_pa =
ipa_ctx->ipa_wrapper_base +
IPA_REG_BASE_OFST_v2_5 +
IPA_UC_MAILBOX_m_n_OFFS_v2_5(
IPA_HW_WDI_RX_MBOX_START_INDEX/32,
IPA_HW_WDI_RX_MBOX_START_INDEX % 32);
} else {
out->uc_door_bell_pa =
ipa_ctx->ipa_wrapper_base +
IPA_REG_BASE_OFST_v2_0 +
IPA_UC_MAILBOX_m_n_OFFS(
IPA_HW_WDI_RX_MBOX_START_INDEX/32,
IPA_HW_WDI_RX_MBOX_START_INDEX % 32);
}
}
ep->valid = 1;
ep->client = in->sys.client;
ep->keep_ipa_awake = in->sys.keep_ipa_awake;
result = ipa_disable_data_path(ipa_ep_idx);
if (result) {
IPAERR("disable data path failed res=%d clnt=%d.\n", result,
ipa_ep_idx);
goto uc_timeout;
}
if (IPA_CLIENT_IS_PROD(in->sys.client)) {
memset(&ep_cfg_ctrl, 0 , sizeof(struct ipa_ep_cfg_ctrl));
ep_cfg_ctrl.ipa_ep_delay = true;
ipa_cfg_ep_ctrl(ipa_ep_idx, &ep_cfg_ctrl);
}
result = ipa_uc_send_cmd((u32)(cmd.phys_base),
IPA_CLIENT_IS_CONS(in->sys.client) ?
IPA_CPU_2_HW_CMD_WDI_TX_SET_UP :
IPA_CPU_2_HW_CMD_WDI_RX_SET_UP,
IPA_HW_2_CPU_WDI_CMD_STATUS_SUCCESS,
false, 10*HZ);
if (result) {
result = -EFAULT;
goto uc_timeout;
}
ep->skip_ep_cfg = in->sys.skip_ep_cfg;
ep->client_notify = in->sys.notify;
ep->priv = in->sys.priv;
if (!ep->skip_ep_cfg) {
if (ipa_cfg_ep(ipa_ep_idx, &in->sys.ipa_ep_cfg)) {
IPAERR("fail to configure EP.\n");
goto ipa_cfg_ep_fail;
}
IPADBG("ep configuration successful\n");
} else {
IPADBG("Skipping endpoint configuration.\n");
}
out->clnt_hdl = ipa_ep_idx;
if (!ep->skip_ep_cfg && IPA_CLIENT_IS_PROD(in->sys.client))
ipa_install_dflt_flt_rules(ipa_ep_idx);
if (!ep->keep_ipa_awake)
ipa_dec_client_disable_clks();
dma_pool_free(ipa_ctx->uc_wdi_ctx.wdi_dma_pool, cmd.base,
cmd.phys_base);
ep->wdi_state |= IPA_WDI_CONNECTED;
IPADBG("client %d (ep: %d) connected\n", in->sys.client, ipa_ep_idx);
return 0;
ipa_cfg_ep_fail:
memset(&ipa_ctx->ep[ipa_ep_idx], 0, sizeof(struct ipa_ep_context));
uc_timeout:
dma_pool_free(ipa_ctx->uc_wdi_ctx.wdi_dma_pool, cmd.base,
cmd.phys_base);
dma_alloc_fail:
ipa_dec_client_disable_clks();
fail:
return result;
}
EXPORT_SYMBOL(ipa_connect_wdi_pipe);
/**
* ipa_disconnect_wdi_pipe() - WDI client disconnect
* @clnt_hdl: [in] opaque client handle assigned by IPA to client
*
* Returns: 0 on success, negative on failure
*
* Note: Should not be called from atomic context
*/
int ipa_disconnect_wdi_pipe(u32 clnt_hdl)
{
int result = 0;
struct ipa_ep_context *ep;
union IpaHwWdiCommonChCmdData_t tear;
if (clnt_hdl >= IPA_NUM_PIPES || ipa_ctx->ep[clnt_hdl].valid == 0) {
IPAERR("bad parm.\n");
return -EINVAL;
}
result = ipa_uc_state_check();
if (result)
return result;
IPADBG("ep=%d\n", clnt_hdl);
ep = &ipa_ctx->ep[clnt_hdl];
if (ep->wdi_state != IPA_WDI_CONNECTED) {
IPAERR("WDI channel bad state %d\n", ep->wdi_state);
return -EFAULT;
}
if (!ep->keep_ipa_awake)
ipa_inc_client_enable_clks();
tear.params.ipa_pipe_number = clnt_hdl;
result = ipa_uc_send_cmd(tear.raw32b,
IPA_CPU_2_HW_CMD_WDI_TEAR_DOWN,
IPA_HW_2_CPU_WDI_CMD_STATUS_SUCCESS,
false, 10*HZ);
if (result) {
result = -EFAULT;
goto uc_timeout;
}
ipa_delete_dflt_flt_rules(clnt_hdl);
memset(&ipa_ctx->ep[clnt_hdl], 0, sizeof(struct ipa_ep_context));
ipa_dec_client_disable_clks();
IPADBG("client (ep: %d) disconnected\n", clnt_hdl);
uc_timeout:
return result;
}
EXPORT_SYMBOL(ipa_disconnect_wdi_pipe);
/**
* ipa_enable_wdi_pipe() - WDI client enable
* @clnt_hdl: [in] opaque client handle assigned by IPA to client
*
* Returns: 0 on success, negative on failure
*
* Note: Should not be called from atomic context
*/
int ipa_enable_wdi_pipe(u32 clnt_hdl)
{
int result = 0;
struct ipa_ep_context *ep;
union IpaHwWdiCommonChCmdData_t enable;
struct ipa_ep_cfg_holb holb_cfg;
if (clnt_hdl >= IPA_NUM_PIPES || ipa_ctx->ep[clnt_hdl].valid == 0) {
IPAERR("bad parm.\n");
return -EINVAL;
}
result = ipa_uc_state_check();
if (result)
return result;
IPADBG("ep=%d\n", clnt_hdl);
ep = &ipa_ctx->ep[clnt_hdl];
if (ep->wdi_state != IPA_WDI_CONNECTED) {
IPAERR("WDI channel bad state %d\n", ep->wdi_state);
return -EFAULT;
}
ipa_inc_client_enable_clks();
enable.params.ipa_pipe_number = clnt_hdl;
result = ipa_uc_send_cmd(enable.raw32b,
IPA_CPU_2_HW_CMD_WDI_CH_ENABLE,
IPA_HW_2_CPU_WDI_CMD_STATUS_SUCCESS,
false, 10*HZ);
if (result) {
result = -EFAULT;
goto uc_timeout;
}
if (IPA_CLIENT_IS_CONS(ep->client)) {
memset(&holb_cfg, 0 , sizeof(holb_cfg));
holb_cfg.en = IPA_HOLB_TMR_DIS;
holb_cfg.tmr_val = 0;
result = ipa_cfg_ep_holb(clnt_hdl, &holb_cfg);
}
ipa_dec_client_disable_clks();
ep->wdi_state |= IPA_WDI_ENABLED;
IPADBG("client (ep: %d) enabled\n", clnt_hdl);
uc_timeout:
return result;
}
EXPORT_SYMBOL(ipa_enable_wdi_pipe);
/**
* ipa_disable_wdi_pipe() - WDI client disable
* @clnt_hdl: [in] opaque client handle assigned by IPA to client
*
* Returns: 0 on success, negative on failure
*
* Note: Should not be called from atomic context
*/
int ipa_disable_wdi_pipe(u32 clnt_hdl)
{
int result = 0;
struct ipa_ep_context *ep;
union IpaHwWdiCommonChCmdData_t disable;
struct ipa_ep_cfg_ctrl ep_cfg_ctrl;
u32 prod_hdl;
if (clnt_hdl >= IPA_NUM_PIPES || ipa_ctx->ep[clnt_hdl].valid == 0) {
IPAERR("bad parm.\n");
return -EINVAL;
}
result = ipa_uc_state_check();
if (result)
return result;
IPADBG("ep=%d\n", clnt_hdl);
ep = &ipa_ctx->ep[clnt_hdl];
if (ep->wdi_state != (IPA_WDI_CONNECTED | IPA_WDI_ENABLED)) {
IPAERR("WDI channel bad state %d\n", ep->wdi_state);
return -EFAULT;
}
ipa_inc_client_enable_clks();
result = ipa_disable_data_path(clnt_hdl);
if (result) {
IPAERR("disable data path failed res=%d clnt=%d.\n", result,
clnt_hdl);
result = -EPERM;
goto uc_timeout;
}
/**
* To avoid data stall during continuous SAP on/off before
* setting delay to IPA Consumer pipe, remove delay and enable
* holb on IPA Producer pipe
*/
if (IPA_CLIENT_IS_PROD(ep->client)) {
memset(&ep_cfg_ctrl, 0 , sizeof(struct ipa_ep_cfg_ctrl));
ipa_cfg_ep_ctrl(clnt_hdl, &ep_cfg_ctrl);
prod_hdl = ipa_get_ep_mapping(IPA_CLIENT_WLAN1_CONS);
if (ipa_ctx->ep[prod_hdl].valid == 1) {
result = ipa_disable_data_path(prod_hdl);
if (result) {
IPAERR("disable data path failed\n");
IPAERR("res=%d clnt=%d\n",
result, prod_hdl);
result = -EPERM;
goto uc_timeout;
}
}
usleep(IPA_UC_POLL_SLEEP_USEC * IPA_UC_POLL_SLEEP_USEC);
}
disable.params.ipa_pipe_number = clnt_hdl;
result = ipa_uc_send_cmd(disable.raw32b,
IPA_CPU_2_HW_CMD_WDI_CH_DISABLE,
IPA_HW_2_CPU_WDI_CMD_STATUS_SUCCESS,
false, 10*HZ);
if (result) {
result = -EFAULT;
goto uc_timeout;
}
/* Set the delay after disabling IPA Producer pipe */
if (IPA_CLIENT_IS_PROD(ep->client)) {
memset(&ep_cfg_ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl));
ep_cfg_ctrl.ipa_ep_delay = true;
ipa_cfg_ep_ctrl(clnt_hdl, &ep_cfg_ctrl);
}
ipa_dec_client_disable_clks();
ep->wdi_state &= ~IPA_WDI_ENABLED;
IPADBG("client (ep: %d) disabled\n", clnt_hdl);
uc_timeout:
return result;
}
EXPORT_SYMBOL(ipa_disable_wdi_pipe);
/**
* ipa_resume_wdi_pipe() - WDI client resume
* @clnt_hdl: [in] opaque client handle assigned by IPA to client
*
* Returns: 0 on success, negative on failure
*
* Note: Should not be called from atomic context
*/
int ipa_resume_wdi_pipe(u32 clnt_hdl)
{
int result = 0;
struct ipa_ep_context *ep;
union IpaHwWdiCommonChCmdData_t resume;
struct ipa_ep_cfg_ctrl ep_cfg_ctrl;
if (clnt_hdl >= IPA_NUM_PIPES || ipa_ctx->ep[clnt_hdl].valid == 0) {
IPAERR("bad parm.\n");
return -EINVAL;
}
result = ipa_uc_state_check();
if (result)
return result;
IPADBG("ep=%d\n", clnt_hdl);
ep = &ipa_ctx->ep[clnt_hdl];
if (ep->wdi_state != (IPA_WDI_CONNECTED | IPA_WDI_ENABLED)) {
IPAERR("WDI channel bad state %d\n", ep->wdi_state);
return -EFAULT;
}
ipa_inc_client_enable_clks();
resume.params.ipa_pipe_number = clnt_hdl;
result = ipa_uc_send_cmd(resume.raw32b,
IPA_CPU_2_HW_CMD_WDI_CH_RESUME,
IPA_HW_2_CPU_WDI_CMD_STATUS_SUCCESS,
false, 10*HZ);
if (result) {
result = -EFAULT;
goto uc_timeout;
}
memset(&ep_cfg_ctrl, 0 , sizeof(struct ipa_ep_cfg_ctrl));
result = ipa_cfg_ep_ctrl(clnt_hdl, &ep_cfg_ctrl);
if (result)
IPAERR("client (ep: %d) fail un-susp/delay result=%d\n",
clnt_hdl, result);
else
IPADBG("client (ep: %d) un-susp/delay\n", clnt_hdl);
ep->wdi_state |= IPA_WDI_RESUMED;
IPADBG("client (ep: %d) resumed\n", clnt_hdl);
uc_timeout:
return result;
}
EXPORT_SYMBOL(ipa_resume_wdi_pipe);
/**
* ipa_suspend_wdi_pipe() - WDI client suspend
* @clnt_hdl: [in] opaque client handle assigned by IPA to client
*
* Returns: 0 on success, negative on failure
*
* Note: Should not be called from atomic context
*/
int ipa_suspend_wdi_pipe(u32 clnt_hdl)
{
int result = 0;
struct ipa_ep_context *ep;
union IpaHwWdiCommonChCmdData_t suspend;
struct ipa_ep_cfg_ctrl ep_cfg_ctrl;
if (clnt_hdl >= IPA_NUM_PIPES || ipa_ctx->ep[clnt_hdl].valid == 0) {
IPAERR("bad parm.\n");
return -EINVAL;
}
result = ipa_uc_state_check();
if (result)
return result;
IPADBG("ep=%d\n", clnt_hdl);
ep = &ipa_ctx->ep[clnt_hdl];
if (ep->wdi_state != (IPA_WDI_CONNECTED | IPA_WDI_ENABLED |
IPA_WDI_RESUMED)) {
IPAERR("WDI channel bad state %d\n", ep->wdi_state);
return -EFAULT;
}
suspend.params.ipa_pipe_number = clnt_hdl;
if (IPA_CLIENT_IS_PROD(ep->client)) {
IPADBG("Post suspend event first for IPA Producer\n");
IPADBG("Client: %d clnt_hdl: %d\n", ep->client, clnt_hdl);
result = ipa_uc_send_cmd(suspend.raw32b,
IPA_CPU_2_HW_CMD_WDI_CH_SUSPEND,
IPA_HW_2_CPU_WDI_CMD_STATUS_SUCCESS,
false, 10*HZ);
if (result) {
result = -EFAULT;
goto uc_timeout;
}
}
memset(&ep_cfg_ctrl, 0 , sizeof(struct ipa_ep_cfg_ctrl));
if (IPA_CLIENT_IS_CONS(ep->client)) {
ep_cfg_ctrl.ipa_ep_suspend = true;
result = ipa_cfg_ep_ctrl(clnt_hdl, &ep_cfg_ctrl);
if (result)
IPAERR("client (ep: %d) failed to suspend result=%d\n",
clnt_hdl, result);
else
IPADBG("client (ep: %d) suspended\n", clnt_hdl);
} else {
ep_cfg_ctrl.ipa_ep_delay = true;
result = ipa_cfg_ep_ctrl(clnt_hdl, &ep_cfg_ctrl);
if (result)
IPAERR("client (ep: %d) failed to delay result=%d\n",
clnt_hdl, result);
else
IPADBG("client (ep: %d) delayed\n", clnt_hdl);
}
if (IPA_CLIENT_IS_CONS(ep->client)) {
result = ipa_uc_send_cmd(suspend.raw32b,
IPA_CPU_2_HW_CMD_WDI_CH_SUSPEND,
IPA_HW_2_CPU_WDI_CMD_STATUS_SUCCESS,
false, 10*HZ);
if (result) {
result = -EFAULT;
goto uc_timeout;
}
}
ipa_ctx->tag_process_before_gating = true;
ipa_dec_client_disable_clks();
ep->wdi_state &= ~IPA_WDI_RESUMED;
IPADBG("client (ep: %d) suspended\n", clnt_hdl);
uc_timeout:
return result;
}
EXPORT_SYMBOL(ipa_suspend_wdi_pipe);
int ipa_write_qmapid_wdi_pipe(u32 clnt_hdl, u8 qmap_id)
{
int result = 0;
struct ipa_ep_context *ep;
union IpaHwWdiRxExtCfgCmdData_t qmap;
if (clnt_hdl >= IPA_NUM_PIPES || ipa_ctx->ep[clnt_hdl].valid == 0) {
IPAERR("bad parm.\n");
return -EINVAL;
}
result = ipa_uc_state_check();
if (result)
return result;
IPADBG("ep=%d\n", clnt_hdl);
ep = &ipa_ctx->ep[clnt_hdl];
if (!(ep->wdi_state & IPA_WDI_CONNECTED)) {
IPAERR("WDI channel bad state %d\n", ep->wdi_state);
return -EFAULT;
}
ipa_inc_client_enable_clks();
qmap.params.ipa_pipe_number = clnt_hdl;
qmap.params.qmap_id = qmap_id;
result = ipa_uc_send_cmd(qmap.raw32b,
IPA_CPU_2_HW_CMD_WDI_RX_EXT_CFG,
IPA_HW_2_CPU_WDI_CMD_STATUS_SUCCESS,
false, 10*HZ);
if (result) {
result = -EFAULT;
goto uc_timeout;
}
ipa_dec_client_disable_clks();
IPADBG("client (ep: %d) qmap_id %d updated\n", clnt_hdl, qmap_id);
uc_timeout:
return result;
}
| gpl-2.0 |
sricharanaz/venus | drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c | 178 | 22305 | /**************************************************************************
*
* Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
#include "vmwgfx_drv.h"
#include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_placement.h>
#include <drm/ttm/ttm_page_alloc.h>
static struct ttm_place vram_placement_flags = {
.fpfn = 0,
.lpfn = 0,
.flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED
};
static struct ttm_place vram_ne_placement_flags = {
.fpfn = 0,
.lpfn = 0,
.flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT
};
static struct ttm_place sys_placement_flags = {
.fpfn = 0,
.lpfn = 0,
.flags = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED
};
static struct ttm_place sys_ne_placement_flags = {
.fpfn = 0,
.lpfn = 0,
.flags = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT
};
static struct ttm_place gmr_placement_flags = {
.fpfn = 0,
.lpfn = 0,
.flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED
};
static struct ttm_place gmr_ne_placement_flags = {
.fpfn = 0,
.lpfn = 0,
.flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT
};
static struct ttm_place mob_placement_flags = {
.fpfn = 0,
.lpfn = 0,
.flags = VMW_PL_FLAG_MOB | TTM_PL_FLAG_CACHED
};
static struct ttm_place mob_ne_placement_flags = {
.fpfn = 0,
.lpfn = 0,
.flags = VMW_PL_FLAG_MOB | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT
};
struct ttm_placement vmw_vram_placement = {
.num_placement = 1,
.placement = &vram_placement_flags,
.num_busy_placement = 1,
.busy_placement = &vram_placement_flags
};
static struct ttm_place vram_gmr_placement_flags[] = {
{
.fpfn = 0,
.lpfn = 0,
.flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED
}, {
.fpfn = 0,
.lpfn = 0,
.flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED
}
};
static struct ttm_place gmr_vram_placement_flags[] = {
{
.fpfn = 0,
.lpfn = 0,
.flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED
}, {
.fpfn = 0,
.lpfn = 0,
.flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED
}
};
struct ttm_placement vmw_vram_gmr_placement = {
.num_placement = 2,
.placement = vram_gmr_placement_flags,
.num_busy_placement = 1,
.busy_placement = &gmr_placement_flags
};
static struct ttm_place vram_gmr_ne_placement_flags[] = {
{
.fpfn = 0,
.lpfn = 0,
.flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED |
TTM_PL_FLAG_NO_EVICT
}, {
.fpfn = 0,
.lpfn = 0,
.flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED |
TTM_PL_FLAG_NO_EVICT
}
};
struct ttm_placement vmw_vram_gmr_ne_placement = {
.num_placement = 2,
.placement = vram_gmr_ne_placement_flags,
.num_busy_placement = 1,
.busy_placement = &gmr_ne_placement_flags
};
struct ttm_placement vmw_vram_sys_placement = {
.num_placement = 1,
.placement = &vram_placement_flags,
.num_busy_placement = 1,
.busy_placement = &sys_placement_flags
};
struct ttm_placement vmw_vram_ne_placement = {
.num_placement = 1,
.placement = &vram_ne_placement_flags,
.num_busy_placement = 1,
.busy_placement = &vram_ne_placement_flags
};
struct ttm_placement vmw_sys_placement = {
.num_placement = 1,
.placement = &sys_placement_flags,
.num_busy_placement = 1,
.busy_placement = &sys_placement_flags
};
struct ttm_placement vmw_sys_ne_placement = {
.num_placement = 1,
.placement = &sys_ne_placement_flags,
.num_busy_placement = 1,
.busy_placement = &sys_ne_placement_flags
};
static struct ttm_place evictable_placement_flags[] = {
{
.fpfn = 0,
.lpfn = 0,
.flags = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED
}, {
.fpfn = 0,
.lpfn = 0,
.flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED
}, {
.fpfn = 0,
.lpfn = 0,
.flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED
}, {
.fpfn = 0,
.lpfn = 0,
.flags = VMW_PL_FLAG_MOB | TTM_PL_FLAG_CACHED
}
};
struct ttm_placement vmw_evictable_placement = {
.num_placement = 4,
.placement = evictable_placement_flags,
.num_busy_placement = 1,
.busy_placement = &sys_placement_flags
};
struct ttm_placement vmw_srf_placement = {
.num_placement = 1,
.num_busy_placement = 2,
.placement = &gmr_placement_flags,
.busy_placement = gmr_vram_placement_flags
};
struct ttm_placement vmw_mob_placement = {
.num_placement = 1,
.num_busy_placement = 1,
.placement = &mob_placement_flags,
.busy_placement = &mob_placement_flags
};
struct ttm_placement vmw_mob_ne_placement = {
.num_placement = 1,
.num_busy_placement = 1,
.placement = &mob_ne_placement_flags,
.busy_placement = &mob_ne_placement_flags
};
struct vmw_ttm_tt {
struct ttm_dma_tt dma_ttm;
struct vmw_private *dev_priv;
int gmr_id;
struct vmw_mob *mob;
int mem_type;
struct sg_table sgt;
struct vmw_sg_table vsgt;
uint64_t sg_alloc_size;
bool mapped;
};
const size_t vmw_tt_size = sizeof(struct vmw_ttm_tt);
/**
* Helper functions to advance a struct vmw_piter iterator.
*
* @viter: Pointer to the iterator.
*
* These functions return false if past the end of the list,
* true otherwise. Functions are selected depending on the current
* DMA mapping mode.
*/
static bool __vmw_piter_non_sg_next(struct vmw_piter *viter)
{
return ++(viter->i) < viter->num_pages;
}
static bool __vmw_piter_sg_next(struct vmw_piter *viter)
{
return __sg_page_iter_next(&viter->iter);
}
/**
* Helper functions to return a pointer to the current page.
*
* @viter: Pointer to the iterator
*
* These functions return a pointer to the page currently
* pointed to by @viter. Functions are selected depending on the
* current mapping mode.
*/
static struct page *__vmw_piter_non_sg_page(struct vmw_piter *viter)
{
return viter->pages[viter->i];
}
static struct page *__vmw_piter_sg_page(struct vmw_piter *viter)
{
return sg_page_iter_page(&viter->iter);
}
/**
* Helper functions to return the DMA address of the current page.
*
* @viter: Pointer to the iterator
*
* These functions return the DMA address of the page currently
* pointed to by @viter. Functions are selected depending on the
* current mapping mode.
*/
static dma_addr_t __vmw_piter_phys_addr(struct vmw_piter *viter)
{
return page_to_phys(viter->pages[viter->i]);
}
static dma_addr_t __vmw_piter_dma_addr(struct vmw_piter *viter)
{
return viter->addrs[viter->i];
}
static dma_addr_t __vmw_piter_sg_addr(struct vmw_piter *viter)
{
return sg_page_iter_dma_address(&viter->iter);
}
/**
* vmw_piter_start - Initialize a struct vmw_piter.
*
* @viter: Pointer to the iterator to initialize
* @vsgt: Pointer to a struct vmw_sg_table to initialize from
*
* Note that we're following the convention of __sg_page_iter_start, so that
* the iterator doesn't point to a valid page after initialization; it has
* to be advanced one step first.
*/
void vmw_piter_start(struct vmw_piter *viter, const struct vmw_sg_table *vsgt,
unsigned long p_offset)
{
viter->i = p_offset - 1;
viter->num_pages = vsgt->num_pages;
switch (vsgt->mode) {
case vmw_dma_phys:
viter->next = &__vmw_piter_non_sg_next;
viter->dma_address = &__vmw_piter_phys_addr;
viter->page = &__vmw_piter_non_sg_page;
viter->pages = vsgt->pages;
break;
case vmw_dma_alloc_coherent:
viter->next = &__vmw_piter_non_sg_next;
viter->dma_address = &__vmw_piter_dma_addr;
viter->page = &__vmw_piter_non_sg_page;
viter->addrs = vsgt->addrs;
viter->pages = vsgt->pages;
break;
case vmw_dma_map_populate:
case vmw_dma_map_bind:
viter->next = &__vmw_piter_sg_next;
viter->dma_address = &__vmw_piter_sg_addr;
viter->page = &__vmw_piter_sg_page;
__sg_page_iter_start(&viter->iter, vsgt->sgt->sgl,
vsgt->sgt->orig_nents, p_offset);
break;
default:
BUG();
}
}
/**
* vmw_ttm_unmap_from_dma - unmap device addresses previsouly mapped for
* TTM pages
*
* @vmw_tt: Pointer to a struct vmw_ttm_backend
*
* Used to free dma mappings previously mapped by vmw_ttm_map_for_dma.
*/
static void vmw_ttm_unmap_from_dma(struct vmw_ttm_tt *vmw_tt)
{
struct device *dev = vmw_tt->dev_priv->dev->dev;
dma_unmap_sg(dev, vmw_tt->sgt.sgl, vmw_tt->sgt.nents,
DMA_BIDIRECTIONAL);
vmw_tt->sgt.nents = vmw_tt->sgt.orig_nents;
}
/**
* vmw_ttm_map_for_dma - map TTM pages to get device addresses
*
* @vmw_tt: Pointer to a struct vmw_ttm_backend
*
* This function is used to get device addresses from the kernel DMA layer.
* However, it's violating the DMA API in that when this operation has been
* performed, it's illegal for the CPU to write to the pages without first
* unmapping the DMA mappings, or calling dma_sync_sg_for_cpu(). It is
* therefore only legal to call this function if we know that the function
* dma_sync_sg_for_cpu() is a NOP, and dma_sync_sg_for_device() is at most
* a CPU write buffer flush.
*/
static int vmw_ttm_map_for_dma(struct vmw_ttm_tt *vmw_tt)
{
struct device *dev = vmw_tt->dev_priv->dev->dev;
int ret;
ret = dma_map_sg(dev, vmw_tt->sgt.sgl, vmw_tt->sgt.orig_nents,
DMA_BIDIRECTIONAL);
if (unlikely(ret == 0))
return -ENOMEM;
vmw_tt->sgt.nents = ret;
return 0;
}
/**
* vmw_ttm_map_dma - Make sure TTM pages are visible to the device
*
* @vmw_tt: Pointer to a struct vmw_ttm_tt
*
* Select the correct function for and make sure the TTM pages are
* visible to the device. Allocate storage for the device mappings.
* If a mapping has already been performed, indicated by the storage
* pointer being non NULL, the function returns success.
*/
static int vmw_ttm_map_dma(struct vmw_ttm_tt *vmw_tt)
{
struct vmw_private *dev_priv = vmw_tt->dev_priv;
struct ttm_mem_global *glob = vmw_mem_glob(dev_priv);
struct vmw_sg_table *vsgt = &vmw_tt->vsgt;
struct vmw_piter iter;
dma_addr_t old;
int ret = 0;
static size_t sgl_size;
static size_t sgt_size;
if (vmw_tt->mapped)
return 0;
vsgt->mode = dev_priv->map_mode;
vsgt->pages = vmw_tt->dma_ttm.ttm.pages;
vsgt->num_pages = vmw_tt->dma_ttm.ttm.num_pages;
vsgt->addrs = vmw_tt->dma_ttm.dma_address;
vsgt->sgt = &vmw_tt->sgt;
switch (dev_priv->map_mode) {
case vmw_dma_map_bind:
case vmw_dma_map_populate:
if (unlikely(!sgl_size)) {
sgl_size = ttm_round_pot(sizeof(struct scatterlist));
sgt_size = ttm_round_pot(sizeof(struct sg_table));
}
vmw_tt->sg_alloc_size = sgt_size + sgl_size * vsgt->num_pages;
ret = ttm_mem_global_alloc(glob, vmw_tt->sg_alloc_size, false,
true);
if (unlikely(ret != 0))
return ret;
ret = sg_alloc_table_from_pages(&vmw_tt->sgt, vsgt->pages,
vsgt->num_pages, 0,
(unsigned long)
vsgt->num_pages << PAGE_SHIFT,
GFP_KERNEL);
if (unlikely(ret != 0))
goto out_sg_alloc_fail;
if (vsgt->num_pages > vmw_tt->sgt.nents) {
uint64_t over_alloc =
sgl_size * (vsgt->num_pages -
vmw_tt->sgt.nents);
ttm_mem_global_free(glob, over_alloc);
vmw_tt->sg_alloc_size -= over_alloc;
}
ret = vmw_ttm_map_for_dma(vmw_tt);
if (unlikely(ret != 0))
goto out_map_fail;
break;
default:
break;
}
old = ~((dma_addr_t) 0);
vmw_tt->vsgt.num_regions = 0;
for (vmw_piter_start(&iter, vsgt, 0); vmw_piter_next(&iter);) {
dma_addr_t cur = vmw_piter_dma_addr(&iter);
if (cur != old + PAGE_SIZE)
vmw_tt->vsgt.num_regions++;
old = cur;
}
vmw_tt->mapped = true;
return 0;
out_map_fail:
sg_free_table(vmw_tt->vsgt.sgt);
vmw_tt->vsgt.sgt = NULL;
out_sg_alloc_fail:
ttm_mem_global_free(glob, vmw_tt->sg_alloc_size);
return ret;
}
/**
* vmw_ttm_unmap_dma - Tear down any TTM page device mappings
*
* @vmw_tt: Pointer to a struct vmw_ttm_tt
*
* Tear down any previously set up device DMA mappings and free
* any storage space allocated for them. If there are no mappings set up,
* this function is a NOP.
*/
static void vmw_ttm_unmap_dma(struct vmw_ttm_tt *vmw_tt)
{
struct vmw_private *dev_priv = vmw_tt->dev_priv;
if (!vmw_tt->vsgt.sgt)
return;
switch (dev_priv->map_mode) {
case vmw_dma_map_bind:
case vmw_dma_map_populate:
vmw_ttm_unmap_from_dma(vmw_tt);
sg_free_table(vmw_tt->vsgt.sgt);
vmw_tt->vsgt.sgt = NULL;
ttm_mem_global_free(vmw_mem_glob(dev_priv),
vmw_tt->sg_alloc_size);
break;
default:
break;
}
vmw_tt->mapped = false;
}
/**
* vmw_bo_map_dma - Make sure buffer object pages are visible to the device
*
* @bo: Pointer to a struct ttm_buffer_object
*
* Wrapper around vmw_ttm_map_dma, that takes a TTM buffer object pointer
* instead of a pointer to a struct vmw_ttm_backend as argument.
* Note that the buffer object must be either pinned or reserved before
* calling this function.
*/
int vmw_bo_map_dma(struct ttm_buffer_object *bo)
{
struct vmw_ttm_tt *vmw_tt =
container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm.ttm);
return vmw_ttm_map_dma(vmw_tt);
}
/**
* vmw_bo_unmap_dma - Make sure buffer object pages are visible to the device
*
* @bo: Pointer to a struct ttm_buffer_object
*
* Wrapper around vmw_ttm_unmap_dma, that takes a TTM buffer object pointer
* instead of a pointer to a struct vmw_ttm_backend as argument.
*/
void vmw_bo_unmap_dma(struct ttm_buffer_object *bo)
{
struct vmw_ttm_tt *vmw_tt =
container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm.ttm);
vmw_ttm_unmap_dma(vmw_tt);
}
/**
* vmw_bo_sg_table - Return a struct vmw_sg_table object for a
* TTM buffer object
*
* @bo: Pointer to a struct ttm_buffer_object
*
* Returns a pointer to a struct vmw_sg_table object. The object should
* not be freed after use.
* Note that for the device addresses to be valid, the buffer object must
* either be reserved or pinned.
*/
const struct vmw_sg_table *vmw_bo_sg_table(struct ttm_buffer_object *bo)
{
struct vmw_ttm_tt *vmw_tt =
container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm.ttm);
return &vmw_tt->vsgt;
}
static int vmw_ttm_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
{
struct vmw_ttm_tt *vmw_be =
container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm);
int ret;
ret = vmw_ttm_map_dma(vmw_be);
if (unlikely(ret != 0))
return ret;
vmw_be->gmr_id = bo_mem->start;
vmw_be->mem_type = bo_mem->mem_type;
switch (bo_mem->mem_type) {
case VMW_PL_GMR:
return vmw_gmr_bind(vmw_be->dev_priv, &vmw_be->vsgt,
ttm->num_pages, vmw_be->gmr_id);
case VMW_PL_MOB:
if (unlikely(vmw_be->mob == NULL)) {
vmw_be->mob =
vmw_mob_create(ttm->num_pages);
if (unlikely(vmw_be->mob == NULL))
return -ENOMEM;
}
return vmw_mob_bind(vmw_be->dev_priv, vmw_be->mob,
&vmw_be->vsgt, ttm->num_pages,
vmw_be->gmr_id);
default:
BUG();
}
return 0;
}
static int vmw_ttm_unbind(struct ttm_tt *ttm)
{
struct vmw_ttm_tt *vmw_be =
container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm);
switch (vmw_be->mem_type) {
case VMW_PL_GMR:
vmw_gmr_unbind(vmw_be->dev_priv, vmw_be->gmr_id);
break;
case VMW_PL_MOB:
vmw_mob_unbind(vmw_be->dev_priv, vmw_be->mob);
break;
default:
BUG();
}
if (vmw_be->dev_priv->map_mode == vmw_dma_map_bind)
vmw_ttm_unmap_dma(vmw_be);
return 0;
}
static void vmw_ttm_destroy(struct ttm_tt *ttm)
{
struct vmw_ttm_tt *vmw_be =
container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm);
vmw_ttm_unmap_dma(vmw_be);
if (vmw_be->dev_priv->map_mode == vmw_dma_alloc_coherent)
ttm_dma_tt_fini(&vmw_be->dma_ttm);
else
ttm_tt_fini(ttm);
if (vmw_be->mob)
vmw_mob_destroy(vmw_be->mob);
kfree(vmw_be);
}
static int vmw_ttm_populate(struct ttm_tt *ttm)
{
struct vmw_ttm_tt *vmw_tt =
container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm);
struct vmw_private *dev_priv = vmw_tt->dev_priv;
struct ttm_mem_global *glob = vmw_mem_glob(dev_priv);
int ret;
if (ttm->state != tt_unpopulated)
return 0;
if (dev_priv->map_mode == vmw_dma_alloc_coherent) {
size_t size =
ttm_round_pot(ttm->num_pages * sizeof(dma_addr_t));
ret = ttm_mem_global_alloc(glob, size, false, true);
if (unlikely(ret != 0))
return ret;
ret = ttm_dma_populate(&vmw_tt->dma_ttm, dev_priv->dev->dev);
if (unlikely(ret != 0))
ttm_mem_global_free(glob, size);
} else
ret = ttm_pool_populate(ttm);
return ret;
}
static void vmw_ttm_unpopulate(struct ttm_tt *ttm)
{
struct vmw_ttm_tt *vmw_tt = container_of(ttm, struct vmw_ttm_tt,
dma_ttm.ttm);
struct vmw_private *dev_priv = vmw_tt->dev_priv;
struct ttm_mem_global *glob = vmw_mem_glob(dev_priv);
if (vmw_tt->mob) {
vmw_mob_destroy(vmw_tt->mob);
vmw_tt->mob = NULL;
}
vmw_ttm_unmap_dma(vmw_tt);
if (dev_priv->map_mode == vmw_dma_alloc_coherent) {
size_t size =
ttm_round_pot(ttm->num_pages * sizeof(dma_addr_t));
ttm_dma_unpopulate(&vmw_tt->dma_ttm, dev_priv->dev->dev);
ttm_mem_global_free(glob, size);
} else
ttm_pool_unpopulate(ttm);
}
static struct ttm_backend_func vmw_ttm_func = {
.bind = vmw_ttm_bind,
.unbind = vmw_ttm_unbind,
.destroy = vmw_ttm_destroy,
};
static struct ttm_tt *vmw_ttm_tt_create(struct ttm_bo_device *bdev,
unsigned long size, uint32_t page_flags,
struct page *dummy_read_page)
{
struct vmw_ttm_tt *vmw_be;
int ret;
vmw_be = kzalloc(sizeof(*vmw_be), GFP_KERNEL);
if (!vmw_be)
return NULL;
vmw_be->dma_ttm.ttm.func = &vmw_ttm_func;
vmw_be->dev_priv = container_of(bdev, struct vmw_private, bdev);
vmw_be->mob = NULL;
if (vmw_be->dev_priv->map_mode == vmw_dma_alloc_coherent)
ret = ttm_dma_tt_init(&vmw_be->dma_ttm, bdev, size, page_flags,
dummy_read_page);
else
ret = ttm_tt_init(&vmw_be->dma_ttm.ttm, bdev, size, page_flags,
dummy_read_page);
if (unlikely(ret != 0))
goto out_no_init;
return &vmw_be->dma_ttm.ttm;
out_no_init:
kfree(vmw_be);
return NULL;
}
static int vmw_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
{
return 0;
}
static int vmw_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
struct ttm_mem_type_manager *man)
{
switch (type) {
case TTM_PL_SYSTEM:
/* System memory */
man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
man->available_caching = TTM_PL_FLAG_CACHED;
man->default_caching = TTM_PL_FLAG_CACHED;
break;
case TTM_PL_VRAM:
/* "On-card" video ram */
man->func = &ttm_bo_manager_func;
man->gpu_offset = 0;
man->flags = TTM_MEMTYPE_FLAG_FIXED | TTM_MEMTYPE_FLAG_MAPPABLE;
man->available_caching = TTM_PL_FLAG_CACHED;
man->default_caching = TTM_PL_FLAG_CACHED;
break;
case VMW_PL_GMR:
case VMW_PL_MOB:
/*
* "Guest Memory Regions" is an aperture like feature with
* one slot per bo. There is an upper limit of the number of
* slots as well as the bo size.
*/
man->func = &vmw_gmrid_manager_func;
man->gpu_offset = 0;
man->flags = TTM_MEMTYPE_FLAG_CMA | TTM_MEMTYPE_FLAG_MAPPABLE;
man->available_caching = TTM_PL_FLAG_CACHED;
man->default_caching = TTM_PL_FLAG_CACHED;
break;
default:
DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
return -EINVAL;
}
return 0;
}
static void vmw_evict_flags(struct ttm_buffer_object *bo,
struct ttm_placement *placement)
{
*placement = vmw_sys_placement;
}
static int vmw_verify_access(struct ttm_buffer_object *bo, struct file *filp)
{
struct ttm_object_file *tfile =
vmw_fpriv((struct drm_file *)filp->private_data)->tfile;
return vmw_user_dmabuf_verify_access(bo, tfile);
}
static int vmw_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
{
struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
struct vmw_private *dev_priv = container_of(bdev, struct vmw_private, bdev);
mem->bus.addr = NULL;
mem->bus.is_iomem = false;
mem->bus.offset = 0;
mem->bus.size = mem->num_pages << PAGE_SHIFT;
mem->bus.base = 0;
if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
return -EINVAL;
switch (mem->mem_type) {
case TTM_PL_SYSTEM:
case VMW_PL_GMR:
case VMW_PL_MOB:
return 0;
case TTM_PL_VRAM:
mem->bus.offset = mem->start << PAGE_SHIFT;
mem->bus.base = dev_priv->vram_start;
mem->bus.is_iomem = true;
break;
default:
return -EINVAL;
}
return 0;
}
static void vmw_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
{
}
static int vmw_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
{
return 0;
}
/**
* vmw_move_notify - TTM move_notify_callback
*
* @bo: The TTM buffer object about to move.
* @mem: The struct ttm_mem_reg indicating to what memory
* region the move is taking place.
*
* Calls move_notify for all subsystems needing it.
* (currently only resources).
*/
static void vmw_move_notify(struct ttm_buffer_object *bo,
struct ttm_mem_reg *mem)
{
vmw_resource_move_notify(bo, mem);
vmw_query_move_notify(bo, mem);
}
/**
* vmw_swap_notify - TTM move_notify_callback
*
* @bo: The TTM buffer object about to be swapped out.
*/
static void vmw_swap_notify(struct ttm_buffer_object *bo)
{
ttm_bo_wait(bo, false, false);
}
struct ttm_bo_driver vmw_bo_driver = {
.ttm_tt_create = &vmw_ttm_tt_create,
.ttm_tt_populate = &vmw_ttm_populate,
.ttm_tt_unpopulate = &vmw_ttm_unpopulate,
.invalidate_caches = vmw_invalidate_caches,
.init_mem_type = vmw_init_mem_type,
.evict_flags = vmw_evict_flags,
.move = NULL,
.verify_access = vmw_verify_access,
.move_notify = vmw_move_notify,
.swap_notify = vmw_swap_notify,
.fault_reserve_notify = &vmw_ttm_fault_reserve_notify,
.io_mem_reserve = &vmw_ttm_io_mem_reserve,
.io_mem_free = &vmw_ttm_io_mem_free,
.lru_tail = &ttm_bo_default_lru_tail,
.swap_lru_tail = &ttm_bo_default_swap_lru_tail,
};
| gpl-2.0 |
cricard13/linux-raspberry-nfc | drivers/staging/lustre/lustre/ptlrpc/events.c | 434 | 16254 | /*
* GPL HEADER START
*
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 only,
* as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License version 2 for more details (a copy is included
* in the LICENSE file that accompanied this code).
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
* http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
*
* Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
* CA 95054 USA or visit www.sun.com if you need additional information or
* have any questions.
*
* GPL HEADER END
*/
/*
* Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
*
* Copyright (c) 2012, Intel Corporation.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
* Lustre is a trademark of Sun Microsystems, Inc.
*/
#define DEBUG_SUBSYSTEM S_RPC
#include "../../include/linux/libcfs/libcfs.h"
# ifdef __mips64__
# include <linux/kernel.h>
# endif
#include "../include/obd_class.h"
#include "../include/lustre_net.h"
#include "../include/lustre_sec.h"
#include "ptlrpc_internal.h"
lnet_handle_eq_t ptlrpc_eq_h;
/*
* Client's outgoing request callback
*/
void request_out_callback(lnet_event_t *ev)
{
struct ptlrpc_cb_id *cbid = ev->md.user_ptr;
struct ptlrpc_request *req = cbid->cbid_arg;
LASSERT(ev->type == LNET_EVENT_SEND ||
ev->type == LNET_EVENT_UNLINK);
LASSERT(ev->unlinked);
DEBUG_REQ(D_NET, req, "type %d, status %d", ev->type, ev->status);
sptlrpc_request_out_callback(req);
spin_lock(&req->rq_lock);
req->rq_real_sent = get_seconds();
if (ev->unlinked)
req->rq_req_unlink = 0;
if (ev->type == LNET_EVENT_UNLINK || ev->status != 0) {
/* Failed send: make it seem like the reply timed out, just
* like failing sends in client.c does currently... */
req->rq_net_err = 1;
ptlrpc_client_wake_req(req);
}
spin_unlock(&req->rq_lock);
ptlrpc_req_finished(req);
}
/*
* Client's incoming reply callback
*/
void reply_in_callback(lnet_event_t *ev)
{
struct ptlrpc_cb_id *cbid = ev->md.user_ptr;
struct ptlrpc_request *req = cbid->cbid_arg;
DEBUG_REQ(D_NET, req, "type %d, status %d", ev->type, ev->status);
LASSERT(ev->type == LNET_EVENT_PUT || ev->type == LNET_EVENT_UNLINK);
LASSERT(ev->md.start == req->rq_repbuf);
LASSERT(ev->offset + ev->mlength <= req->rq_repbuf_len);
/* We've set LNET_MD_MANAGE_REMOTE for all outgoing requests
for adaptive timeouts' early reply. */
LASSERT((ev->md.options & LNET_MD_MANAGE_REMOTE) != 0);
spin_lock(&req->rq_lock);
req->rq_receiving_reply = 0;
req->rq_early = 0;
if (ev->unlinked)
req->rq_reply_unlink = 0;
if (ev->status)
goto out_wake;
if (ev->type == LNET_EVENT_UNLINK) {
LASSERT(ev->unlinked);
DEBUG_REQ(D_NET, req, "unlink");
goto out_wake;
}
if (ev->mlength < ev->rlength) {
CDEBUG(D_RPCTRACE, "truncate req %p rpc %d - %d+%d\n", req,
req->rq_replen, ev->rlength, ev->offset);
req->rq_reply_truncate = 1;
req->rq_replied = 1;
req->rq_status = -EOVERFLOW;
req->rq_nob_received = ev->rlength + ev->offset;
goto out_wake;
}
if ((ev->offset == 0) &&
((lustre_msghdr_get_flags(req->rq_reqmsg) & MSGHDR_AT_SUPPORT))) {
/* Early reply */
DEBUG_REQ(D_ADAPTTO, req,
"Early reply received: mlen=%u offset=%d replen=%d replied=%d unlinked=%d",
ev->mlength, ev->offset,
req->rq_replen, req->rq_replied, ev->unlinked);
req->rq_early_count++; /* number received, client side */
if (req->rq_replied) /* already got the real reply */
goto out_wake;
req->rq_early = 1;
req->rq_reply_off = ev->offset;
req->rq_nob_received = ev->mlength;
/* And we're still receiving */
req->rq_receiving_reply = 1;
} else {
/* Real reply */
req->rq_rep_swab_mask = 0;
req->rq_replied = 1;
/* Got reply, no resend required */
req->rq_resend = 0;
req->rq_reply_off = ev->offset;
req->rq_nob_received = ev->mlength;
/* LNetMDUnlink can't be called under the LNET_LOCK,
so we must unlink in ptlrpc_unregister_reply */
DEBUG_REQ(D_INFO, req,
"reply in flags=%x mlen=%u offset=%d replen=%d",
lustre_msg_get_flags(req->rq_reqmsg),
ev->mlength, ev->offset, req->rq_replen);
}
req->rq_import->imp_last_reply_time = get_seconds();
out_wake:
/* NB don't unlock till after wakeup; req can disappear under us
* since we don't have our own ref */
ptlrpc_client_wake_req(req);
spin_unlock(&req->rq_lock);
}
/*
* Client's bulk has been written/read
*/
void client_bulk_callback(lnet_event_t *ev)
{
struct ptlrpc_cb_id *cbid = ev->md.user_ptr;
struct ptlrpc_bulk_desc *desc = cbid->cbid_arg;
struct ptlrpc_request *req;
LASSERT((desc->bd_type == BULK_PUT_SINK &&
ev->type == LNET_EVENT_PUT) ||
(desc->bd_type == BULK_GET_SOURCE &&
ev->type == LNET_EVENT_GET) ||
ev->type == LNET_EVENT_UNLINK);
LASSERT(ev->unlinked);
if (CFS_FAIL_CHECK_ORSET(OBD_FAIL_PTLRPC_CLIENT_BULK_CB, CFS_FAIL_ONCE))
ev->status = -EIO;
if (CFS_FAIL_CHECK_ORSET(OBD_FAIL_PTLRPC_CLIENT_BULK_CB2,
CFS_FAIL_ONCE))
ev->status = -EIO;
CDEBUG((ev->status == 0) ? D_NET : D_ERROR,
"event type %d, status %d, desc %p\n",
ev->type, ev->status, desc);
spin_lock(&desc->bd_lock);
req = desc->bd_req;
LASSERT(desc->bd_md_count > 0);
desc->bd_md_count--;
if (ev->type != LNET_EVENT_UNLINK && ev->status == 0) {
desc->bd_nob_transferred += ev->mlength;
desc->bd_sender = ev->sender;
} else {
/* start reconnect and resend if network error hit */
spin_lock(&req->rq_lock);
req->rq_net_err = 1;
spin_unlock(&req->rq_lock);
}
if (ev->status != 0)
desc->bd_failure = 1;
/* NB don't unlock till after wakeup; desc can disappear under us
* otherwise */
if (desc->bd_md_count == 0)
ptlrpc_client_wake_req(desc->bd_req);
spin_unlock(&desc->bd_lock);
}
/*
* We will have percpt request history list for ptlrpc service in upcoming
* patches because we don't want to be serialized by current per-service
* history operations. So we require history ID can (somehow) show arriving
* order w/o grabbing global lock, and user can sort them in userspace.
*
* This is how we generate history ID for ptlrpc_request:
* ----------------------------------------------------
* | 32 bits | 16 bits | (16 - X)bits | X bits |
* ----------------------------------------------------
* | seconds | usec / 16 | sequence | CPT id |
* ----------------------------------------------------
*
* it might not be precise but should be good enough.
*/
#define REQS_CPT_BITS(svcpt) ((svcpt)->scp_service->srv_cpt_bits)
#define REQS_SEC_SHIFT 32
#define REQS_USEC_SHIFT 16
#define REQS_SEQ_SHIFT(svcpt) REQS_CPT_BITS(svcpt)
static void ptlrpc_req_add_history(struct ptlrpc_service_part *svcpt,
struct ptlrpc_request *req)
{
__u64 sec = req->rq_arrival_time.tv_sec;
__u32 usec = req->rq_arrival_time.tv_usec >> 4; /* usec / 16 */
__u64 new_seq;
/* set sequence ID for request and add it to history list,
* it must be called with hold svcpt::scp_lock */
new_seq = (sec << REQS_SEC_SHIFT) |
(usec << REQS_USEC_SHIFT) |
(svcpt->scp_cpt < 0 ? 0 : svcpt->scp_cpt);
if (new_seq > svcpt->scp_hist_seq) {
/* This handles the initial case of scp_hist_seq == 0 or
* we just jumped into a new time window */
svcpt->scp_hist_seq = new_seq;
} else {
LASSERT(REQS_SEQ_SHIFT(svcpt) < REQS_USEC_SHIFT);
/* NB: increase sequence number in current usec bucket,
* however, it's possible that we used up all bits for
* sequence and jumped into the next usec bucket (future time),
* then we hope there will be less RPCs per bucket at some
* point, and sequence will catch up again */
svcpt->scp_hist_seq += (1U << REQS_SEQ_SHIFT(svcpt));
new_seq = svcpt->scp_hist_seq;
}
req->rq_history_seq = new_seq;
list_add_tail(&req->rq_history_list, &svcpt->scp_hist_reqs);
}
/*
* Server's incoming request callback
*/
void request_in_callback(lnet_event_t *ev)
{
struct ptlrpc_cb_id *cbid = ev->md.user_ptr;
struct ptlrpc_request_buffer_desc *rqbd = cbid->cbid_arg;
struct ptlrpc_service_part *svcpt = rqbd->rqbd_svcpt;
struct ptlrpc_service *service = svcpt->scp_service;
struct ptlrpc_request *req;
LASSERT(ev->type == LNET_EVENT_PUT ||
ev->type == LNET_EVENT_UNLINK);
LASSERT((char *)ev->md.start >= rqbd->rqbd_buffer);
LASSERT((char *)ev->md.start + ev->offset + ev->mlength <=
rqbd->rqbd_buffer + service->srv_buf_size);
CDEBUG((ev->status == 0) ? D_NET : D_ERROR,
"event type %d, status %d, service %s\n",
ev->type, ev->status, service->srv_name);
if (ev->unlinked) {
/* If this is the last request message to fit in the
* request buffer we can use the request object embedded in
* rqbd. Note that if we failed to allocate a request,
* we'd have to re-post the rqbd, which we can't do in this
* context. */
req = &rqbd->rqbd_req;
memset(req, 0, sizeof(*req));
} else {
LASSERT(ev->type == LNET_EVENT_PUT);
if (ev->status != 0) {
/* We moaned above already... */
return;
}
req = ptlrpc_request_cache_alloc(GFP_ATOMIC);
if (req == NULL) {
CERROR("Can't allocate incoming request descriptor: Dropping %s RPC from %s\n",
service->srv_name,
libcfs_id2str(ev->initiator));
return;
}
}
/* NB we ABSOLUTELY RELY on req being zeroed, so pointers are NULL,
* flags are reset and scalars are zero. We only set the message
* size to non-zero if this was a successful receive. */
req->rq_xid = ev->match_bits;
req->rq_reqbuf = ev->md.start + ev->offset;
if (ev->type == LNET_EVENT_PUT && ev->status == 0)
req->rq_reqdata_len = ev->mlength;
do_gettimeofday(&req->rq_arrival_time);
req->rq_peer = ev->initiator;
req->rq_self = ev->target.nid;
req->rq_rqbd = rqbd;
req->rq_phase = RQ_PHASE_NEW;
spin_lock_init(&req->rq_lock);
INIT_LIST_HEAD(&req->rq_timed_list);
INIT_LIST_HEAD(&req->rq_exp_list);
atomic_set(&req->rq_refcount, 1);
if (ev->type == LNET_EVENT_PUT)
CDEBUG(D_INFO, "incoming req@%p x%llu msgsize %u\n",
req, req->rq_xid, ev->mlength);
CDEBUG(D_RPCTRACE, "peer: %s\n", libcfs_id2str(req->rq_peer));
spin_lock(&svcpt->scp_lock);
ptlrpc_req_add_history(svcpt, req);
if (ev->unlinked) {
svcpt->scp_nrqbds_posted--;
CDEBUG(D_INFO, "Buffer complete: %d buffers still posted\n",
svcpt->scp_nrqbds_posted);
/* Normally, don't complain about 0 buffers posted; LNET won't
* drop incoming reqs since we set the portal lazy */
if (test_req_buffer_pressure &&
ev->type != LNET_EVENT_UNLINK &&
svcpt->scp_nrqbds_posted == 0)
CWARN("All %s request buffers busy\n",
service->srv_name);
/* req takes over the network's ref on rqbd */
} else {
/* req takes a ref on rqbd */
rqbd->rqbd_refcount++;
}
list_add_tail(&req->rq_list, &svcpt->scp_req_incoming);
svcpt->scp_nreqs_incoming++;
/* NB everything can disappear under us once the request
* has been queued and we unlock, so do the wake now... */
wake_up(&svcpt->scp_waitq);
spin_unlock(&svcpt->scp_lock);
}
/*
* Server's outgoing reply callback
*/
void reply_out_callback(lnet_event_t *ev)
{
struct ptlrpc_cb_id *cbid = ev->md.user_ptr;
struct ptlrpc_reply_state *rs = cbid->cbid_arg;
struct ptlrpc_service_part *svcpt = rs->rs_svcpt;
LASSERT(ev->type == LNET_EVENT_SEND ||
ev->type == LNET_EVENT_ACK ||
ev->type == LNET_EVENT_UNLINK);
if (!rs->rs_difficult) {
/* 'Easy' replies have no further processing so I drop the
* net's ref on 'rs' */
LASSERT(ev->unlinked);
ptlrpc_rs_decref(rs);
return;
}
LASSERT(rs->rs_on_net);
if (ev->unlinked) {
/* Last network callback. The net's ref on 'rs' stays put
* until ptlrpc_handle_rs() is done with it */
spin_lock(&svcpt->scp_rep_lock);
spin_lock(&rs->rs_lock);
rs->rs_on_net = 0;
if (!rs->rs_no_ack ||
rs->rs_transno <=
rs->rs_export->exp_obd->obd_last_committed)
ptlrpc_schedule_difficult_reply(rs);
spin_unlock(&rs->rs_lock);
spin_unlock(&svcpt->scp_rep_lock);
}
}
static void ptlrpc_master_callback(lnet_event_t *ev)
{
struct ptlrpc_cb_id *cbid = ev->md.user_ptr;
void (*callback)(lnet_event_t *ev) = cbid->cbid_fn;
/* Honestly, it's best to find out early. */
LASSERT(cbid->cbid_arg != LP_POISON);
LASSERT(callback == request_out_callback ||
callback == reply_in_callback ||
callback == client_bulk_callback ||
callback == request_in_callback ||
callback == reply_out_callback);
callback(ev);
}
int ptlrpc_uuid_to_peer(struct obd_uuid *uuid,
lnet_process_id_t *peer, lnet_nid_t *self)
{
int best_dist = 0;
__u32 best_order = 0;
int count = 0;
int rc = -ENOENT;
int portals_compatibility;
int dist;
__u32 order;
lnet_nid_t dst_nid;
lnet_nid_t src_nid;
portals_compatibility = LNetCtl(IOC_LIBCFS_PORTALS_COMPATIBILITY, NULL);
peer->pid = LUSTRE_SRV_LNET_PID;
/* Choose the matching UUID that's closest */
while (lustre_uuid_to_peer(uuid->uuid, &dst_nid, count++) == 0) {
dist = LNetDist(dst_nid, &src_nid, &order);
if (dist < 0)
continue;
if (dist == 0) { /* local! use loopback LND */
peer->nid = *self = LNET_MKNID(LNET_MKNET(LOLND, 0), 0);
rc = 0;
break;
}
if (rc < 0 ||
dist < best_dist ||
(dist == best_dist && order < best_order)) {
best_dist = dist;
best_order = order;
if (portals_compatibility > 1) {
/* Strong portals compatibility: Zero the nid's
* NET, so if I'm reading new config logs, or
* getting configured by (new) lconf I can
* still talk to old servers. */
dst_nid = LNET_MKNID(0, LNET_NIDADDR(dst_nid));
src_nid = LNET_MKNID(0, LNET_NIDADDR(src_nid));
}
peer->nid = dst_nid;
*self = src_nid;
rc = 0;
}
}
CDEBUG(D_NET, "%s->%s\n", uuid->uuid, libcfs_id2str(*peer));
return rc;
}
void ptlrpc_ni_fini(void)
{
wait_queue_head_t waitq;
struct l_wait_info lwi;
int rc;
int retries;
/* Wait for the event queue to become idle since there may still be
* messages in flight with pending events (i.e. the fire-and-forget
* messages == client requests and "non-difficult" server
* replies */
for (retries = 0;; retries++) {
rc = LNetEQFree(ptlrpc_eq_h);
switch (rc) {
default:
LBUG();
case 0:
LNetNIFini();
return;
case -EBUSY:
if (retries != 0)
CWARN("Event queue still busy\n");
/* Wait for a bit */
init_waitqueue_head(&waitq);
lwi = LWI_TIMEOUT(cfs_time_seconds(2), NULL, NULL);
l_wait_event(waitq, 0, &lwi);
break;
}
}
/* notreached */
}
lnet_pid_t ptl_get_pid(void)
{
lnet_pid_t pid;
pid = LUSTRE_SRV_LNET_PID;
return pid;
}
int ptlrpc_ni_init(void)
{
int rc;
lnet_pid_t pid;
pid = ptl_get_pid();
CDEBUG(D_NET, "My pid is: %x\n", pid);
/* We're not passing any limits yet... */
rc = LNetNIInit(pid);
if (rc < 0) {
CDEBUG(D_NET, "Can't init network interface: %d\n", rc);
return -ENOENT;
}
/* CAVEAT EMPTOR: how we process portals events is _radically_
* different depending on... */
/* kernel LNet calls our master callback when there are new event,
* because we are guaranteed to get every event via callback,
* so we just set EQ size to 0 to avoid overhead of serializing
* enqueue/dequeue operations in LNet. */
rc = LNetEQAlloc(0, ptlrpc_master_callback, &ptlrpc_eq_h);
if (rc == 0)
return 0;
CERROR("Failed to allocate event queue: %d\n", rc);
LNetNIFini();
return -ENOMEM;
}
int ptlrpc_init_portals(void)
{
int rc = ptlrpc_ni_init();
if (rc != 0) {
CERROR("network initialisation failed\n");
return -EIO;
}
rc = ptlrpcd_addref();
if (rc == 0)
return 0;
CERROR("rpcd initialisation failed\n");
ptlrpc_ni_fini();
return rc;
}
void ptlrpc_exit_portals(void)
{
ptlrpcd_decref();
ptlrpc_ni_fini();
}
| gpl-2.0 |
mephistophilis/samsung_nowplus_kernel | drivers/acpi/acpica/psxface.c | 946 | 11342 | /******************************************************************************
*
* Module Name: psxface - Parser external interfaces
*
*****************************************************************************/
/*
* Copyright (C) 2000 - 2010, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions, and the following disclaimer,
* without modification.
* 2. Redistributions in binary form must reproduce at minimum a disclaimer
* substantially similar to the "NO WARRANTY" disclaimer below
* ("Disclaimer") and any redistribution must be conditioned upon
* including a substantially similar Disclaimer requirement for further
* binary redistribution.
* 3. Neither the names of the above-listed copyright holders nor the names
* of any contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* Alternatively, this software may be distributed under the terms of the
* GNU General Public License ("GPL") version 2 as published by the Free
* Software Foundation.
*
* NO WARRANTY
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGES.
*/
#include <acpi/acpi.h>
#include "accommon.h"
#include "acparser.h"
#include "acdispat.h"
#include "acinterp.h"
#include "actables.h"
#include "amlcode.h"
#define _COMPONENT ACPI_PARSER
ACPI_MODULE_NAME("psxface")
/* Local Prototypes */
static void acpi_ps_start_trace(struct acpi_evaluate_info *info);
static void acpi_ps_stop_trace(struct acpi_evaluate_info *info);
static void
acpi_ps_update_parameter_list(struct acpi_evaluate_info *info, u16 action);
/*******************************************************************************
*
* FUNCTION: acpi_debug_trace
*
* PARAMETERS: method_name - Valid ACPI name string
* debug_level - Optional level mask. 0 to use default
* debug_layer - Optional layer mask. 0 to use default
* Flags - bit 1: one shot(1) or persistent(0)
*
* RETURN: Status
*
* DESCRIPTION: External interface to enable debug tracing during control
* method execution
*
******************************************************************************/
acpi_status
acpi_debug_trace(char *name, u32 debug_level, u32 debug_layer, u32 flags)
{
acpi_status status;
status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
if (ACPI_FAILURE(status)) {
return (status);
}
/* TBDs: Validate name, allow full path or just nameseg */
acpi_gbl_trace_method_name = *ACPI_CAST_PTR(u32, name);
acpi_gbl_trace_flags = flags;
if (debug_level) {
acpi_gbl_trace_dbg_level = debug_level;
}
if (debug_layer) {
acpi_gbl_trace_dbg_layer = debug_layer;
}
(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
return (AE_OK);
}
/*******************************************************************************
*
* FUNCTION: acpi_ps_start_trace
*
* PARAMETERS: Info - Method info struct
*
* RETURN: None
*
* DESCRIPTION: Start control method execution trace
*
******************************************************************************/
static void acpi_ps_start_trace(struct acpi_evaluate_info *info)
{
acpi_status status;
ACPI_FUNCTION_ENTRY();
status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
if (ACPI_FAILURE(status)) {
return;
}
if ((!acpi_gbl_trace_method_name) ||
(acpi_gbl_trace_method_name != info->resolved_node->name.integer)) {
goto exit;
}
acpi_gbl_original_dbg_level = acpi_dbg_level;
acpi_gbl_original_dbg_layer = acpi_dbg_layer;
acpi_dbg_level = 0x00FFFFFF;
acpi_dbg_layer = ACPI_UINT32_MAX;
if (acpi_gbl_trace_dbg_level) {
acpi_dbg_level = acpi_gbl_trace_dbg_level;
}
if (acpi_gbl_trace_dbg_layer) {
acpi_dbg_layer = acpi_gbl_trace_dbg_layer;
}
exit:
(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
}
/*******************************************************************************
*
* FUNCTION: acpi_ps_stop_trace
*
* PARAMETERS: Info - Method info struct
*
* RETURN: None
*
* DESCRIPTION: Stop control method execution trace
*
******************************************************************************/
static void acpi_ps_stop_trace(struct acpi_evaluate_info *info)
{
acpi_status status;
ACPI_FUNCTION_ENTRY();
status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
if (ACPI_FAILURE(status)) {
return;
}
if ((!acpi_gbl_trace_method_name) ||
(acpi_gbl_trace_method_name != info->resolved_node->name.integer)) {
goto exit;
}
/* Disable further tracing if type is one-shot */
if (acpi_gbl_trace_flags & 1) {
acpi_gbl_trace_method_name = 0;
acpi_gbl_trace_dbg_level = 0;
acpi_gbl_trace_dbg_layer = 0;
}
acpi_dbg_level = acpi_gbl_original_dbg_level;
acpi_dbg_layer = acpi_gbl_original_dbg_layer;
exit:
(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
}
/*******************************************************************************
*
* FUNCTION: acpi_ps_execute_method
*
* PARAMETERS: Info - Method info block, contains:
* Node - Method Node to execute
* obj_desc - Method object
* Parameters - List of parameters to pass to the method,
* terminated by NULL. Params itself may be
* NULL if no parameters are being passed.
* return_object - Where to put method's return value (if
* any). If NULL, no value is returned.
* parameter_type - Type of Parameter list
* return_object - Where to put method's return value (if
* any). If NULL, no value is returned.
* pass_number - Parse or execute pass
*
* RETURN: Status
*
* DESCRIPTION: Execute a control method
*
******************************************************************************/
acpi_status acpi_ps_execute_method(struct acpi_evaluate_info *info)
{
acpi_status status;
union acpi_parse_object *op;
struct acpi_walk_state *walk_state;
ACPI_FUNCTION_TRACE(ps_execute_method);
/* Quick validation of DSDT header */
acpi_tb_check_dsdt_header();
/* Validate the Info and method Node */
if (!info || !info->resolved_node) {
return_ACPI_STATUS(AE_NULL_ENTRY);
}
/* Init for new method, wait on concurrency semaphore */
status =
acpi_ds_begin_method_execution(info->resolved_node, info->obj_desc,
NULL);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
/*
* The caller "owns" the parameters, so give each one an extra reference
*/
acpi_ps_update_parameter_list(info, REF_INCREMENT);
/* Begin tracing if requested */
acpi_ps_start_trace(info);
/*
* Execute the method. Performs parse simultaneously
*/
ACPI_DEBUG_PRINT((ACPI_DB_PARSE,
"**** Begin Method Parse/Execute [%4.4s] **** Node=%p Obj=%p\n",
info->resolved_node->name.ascii, info->resolved_node,
info->obj_desc));
/* Create and init a Root Node */
op = acpi_ps_create_scope_op();
if (!op) {
status = AE_NO_MEMORY;
goto cleanup;
}
/* Create and initialize a new walk state */
info->pass_number = ACPI_IMODE_EXECUTE;
walk_state =
acpi_ds_create_walk_state(info->obj_desc->method.owner_id, NULL,
NULL, NULL);
if (!walk_state) {
status = AE_NO_MEMORY;
goto cleanup;
}
status = acpi_ds_init_aml_walk(walk_state, op, info->resolved_node,
info->obj_desc->method.aml_start,
info->obj_desc->method.aml_length, info,
info->pass_number);
if (ACPI_FAILURE(status)) {
acpi_ds_delete_walk_state(walk_state);
goto cleanup;
}
if (info->obj_desc->method.flags & AOPOBJ_MODULE_LEVEL) {
walk_state->parse_flags |= ACPI_PARSE_MODULE_LEVEL;
}
/* Invoke an internal method if necessary */
if (info->obj_desc->method.method_flags & AML_METHOD_INTERNAL_ONLY) {
status =
info->obj_desc->method.extra.implementation(walk_state);
info->return_object = walk_state->return_desc;
/* Cleanup states */
acpi_ds_scope_stack_clear(walk_state);
acpi_ps_cleanup_scope(&walk_state->parser_state);
acpi_ds_terminate_control_method(walk_state->method_desc,
walk_state);
acpi_ds_delete_walk_state(walk_state);
goto cleanup;
}
/*
* Start method evaluation with an implicit return of zero.
* This is done for Windows compatibility.
*/
if (acpi_gbl_enable_interpreter_slack) {
walk_state->implicit_return_obj =
acpi_ut_create_integer_object((u64) 0);
if (!walk_state->implicit_return_obj) {
status = AE_NO_MEMORY;
acpi_ds_delete_walk_state(walk_state);
goto cleanup;
}
}
/* Parse the AML */
status = acpi_ps_parse_aml(walk_state);
/* walk_state was deleted by parse_aml */
cleanup:
acpi_ps_delete_parse_tree(op);
/* End optional tracing */
acpi_ps_stop_trace(info);
/* Take away the extra reference that we gave the parameters above */
acpi_ps_update_parameter_list(info, REF_DECREMENT);
/* Exit now if error above */
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
/*
* If the method has returned an object, signal this to the caller with
* a control exception code
*/
if (info->return_object) {
ACPI_DEBUG_PRINT((ACPI_DB_PARSE, "Method returned ObjDesc=%p\n",
info->return_object));
ACPI_DUMP_STACK_ENTRY(info->return_object);
status = AE_CTRL_RETURN_VALUE;
}
return_ACPI_STATUS(status);
}
/*******************************************************************************
*
* FUNCTION: acpi_ps_update_parameter_list
*
* PARAMETERS: Info - See struct acpi_evaluate_info
* (Used: parameter_type and Parameters)
* Action - Add or Remove reference
*
* RETURN: Status
*
* DESCRIPTION: Update reference count on all method parameter objects
*
******************************************************************************/
static void
acpi_ps_update_parameter_list(struct acpi_evaluate_info *info, u16 action)
{
u32 i;
if (info->parameters) {
/* Update reference count for each parameter */
for (i = 0; info->parameters[i]; i++) {
/* Ignore errors, just do them all */
(void)acpi_ut_update_object_reference(info->
parameters[i],
action);
}
}
}
| gpl-2.0 |
buenajuan300/android_kernel_samsung_grandprimevelte | drivers/usb/atm/speedtch.c | 1202 | 28891 | /******************************************************************************
* speedtch.c - Alcatel SpeedTouch USB xDSL modem driver
*
* Copyright (C) 2001, Alcatel
* Copyright (C) 2003, Duncan Sands
* Copyright (C) 2004, David Woodhouse
*
* Based on "modem_run.c", copyright (C) 2001, Benoit Papillault
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc., 59
* Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*
******************************************************************************/
#include <asm/page.h>
#include <linux/device.h>
#include <linux/errno.h>
#include <linux/firmware.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/slab.h>
#include <linux/stat.h>
#include <linux/timer.h>
#include <linux/types.h>
#include <linux/usb/ch9.h>
#include <linux/workqueue.h>
#include "usbatm.h"
#define DRIVER_AUTHOR "Johan Verrept, Duncan Sands <duncan.sands@free.fr>"
#define DRIVER_VERSION "1.10"
#define DRIVER_DESC "Alcatel SpeedTouch USB driver version " DRIVER_VERSION
static const char speedtch_driver_name[] = "speedtch";
#define CTRL_TIMEOUT 2000 /* milliseconds */
#define DATA_TIMEOUT 2000 /* milliseconds */
#define OFFSET_7 0 /* size 1 */
#define OFFSET_b 1 /* size 8 */
#define OFFSET_d 9 /* size 4 */
#define OFFSET_e 13 /* size 1 */
#define OFFSET_f 14 /* size 1 */
#define SIZE_7 1
#define SIZE_b 8
#define SIZE_d 4
#define SIZE_e 1
#define SIZE_f 1
#define MIN_POLL_DELAY 5000 /* milliseconds */
#define MAX_POLL_DELAY 60000 /* milliseconds */
#define RESUBMIT_DELAY 1000 /* milliseconds */
#define DEFAULT_BULK_ALTSETTING 1
#define DEFAULT_ISOC_ALTSETTING 3
#define DEFAULT_DL_512_FIRST 0
#define DEFAULT_ENABLE_ISOC 0
#define DEFAULT_SW_BUFFERING 0
static unsigned int altsetting = 0; /* zero means: use the default */
static bool dl_512_first = DEFAULT_DL_512_FIRST;
static bool enable_isoc = DEFAULT_ENABLE_ISOC;
static bool sw_buffering = DEFAULT_SW_BUFFERING;
#define DEFAULT_B_MAX_DSL 8128
#define DEFAULT_MODEM_MODE 11
#define MODEM_OPTION_LENGTH 16
static const unsigned char DEFAULT_MODEM_OPTION[MODEM_OPTION_LENGTH] = {
0x10, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
};
static unsigned int BMaxDSL = DEFAULT_B_MAX_DSL;
static unsigned char ModemMode = DEFAULT_MODEM_MODE;
static unsigned char ModemOption[MODEM_OPTION_LENGTH];
static unsigned int num_ModemOption;
module_param(altsetting, uint, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(altsetting,
"Alternative setting for data interface (bulk_default: "
__MODULE_STRING(DEFAULT_BULK_ALTSETTING) "; isoc_default: "
__MODULE_STRING(DEFAULT_ISOC_ALTSETTING) ")");
module_param(dl_512_first, bool, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(dl_512_first,
"Read 512 bytes before sending firmware (default: "
__MODULE_STRING(DEFAULT_DL_512_FIRST) ")");
module_param(enable_isoc, bool, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(enable_isoc,
"Use isochronous transfers if available (default: "
__MODULE_STRING(DEFAULT_ENABLE_ISOC) ")");
module_param(sw_buffering, bool, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(sw_buffering,
"Enable software buffering (default: "
__MODULE_STRING(DEFAULT_SW_BUFFERING) ")");
module_param(BMaxDSL, uint, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(BMaxDSL,
"default: " __MODULE_STRING(DEFAULT_B_MAX_DSL));
module_param(ModemMode, byte, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(ModemMode,
"default: " __MODULE_STRING(DEFAULT_MODEM_MODE));
module_param_array(ModemOption, byte, &num_ModemOption, S_IRUGO);
MODULE_PARM_DESC(ModemOption, "default: 0x10,0x00,0x00,0x00,0x20");
#define INTERFACE_DATA 1
#define ENDPOINT_INT 0x81
#define ENDPOINT_BULK_DATA 0x07
#define ENDPOINT_ISOC_DATA 0x07
#define ENDPOINT_FIRMWARE 0x05
struct speedtch_params {
unsigned int altsetting;
unsigned int BMaxDSL;
unsigned char ModemMode;
unsigned char ModemOption[MODEM_OPTION_LENGTH];
};
struct speedtch_instance_data {
struct usbatm_data *usbatm;
struct speedtch_params params; /* set in probe, constant afterwards */
struct timer_list status_check_timer;
struct work_struct status_check_work;
unsigned char last_status;
int poll_delay; /* milliseconds */
struct timer_list resubmit_timer;
struct urb *int_urb;
unsigned char int_data[16];
unsigned char scratch_buffer[16];
};
/***************
** firmware **
***************/
static void speedtch_set_swbuff(struct speedtch_instance_data *instance, int state)
{
struct usbatm_data *usbatm = instance->usbatm;
struct usb_device *usb_dev = usbatm->usb_dev;
int ret;
ret = usb_control_msg(usb_dev, usb_sndctrlpipe(usb_dev, 0),
0x32, 0x40, state ? 0x01 : 0x00, 0x00, NULL, 0, CTRL_TIMEOUT);
if (ret < 0)
usb_warn(usbatm,
"%sabling SW buffering: usb_control_msg returned %d\n",
state ? "En" : "Dis", ret);
else
usb_dbg(usbatm, "speedtch_set_swbuff: %sbled SW buffering\n", state ? "En" : "Dis");
}
static void speedtch_test_sequence(struct speedtch_instance_data *instance)
{
struct usbatm_data *usbatm = instance->usbatm;
struct usb_device *usb_dev = usbatm->usb_dev;
unsigned char *buf = instance->scratch_buffer;
int ret;
/* URB 147 */
buf[0] = 0x1c;
buf[1] = 0x50;
ret = usb_control_msg(usb_dev, usb_sndctrlpipe(usb_dev, 0),
0x01, 0x40, 0x0b, 0x00, buf, 2, CTRL_TIMEOUT);
if (ret < 0)
usb_warn(usbatm, "%s failed on URB147: %d\n", __func__, ret);
/* URB 148 */
buf[0] = 0x32;
buf[1] = 0x00;
ret = usb_control_msg(usb_dev, usb_sndctrlpipe(usb_dev, 0),
0x01, 0x40, 0x02, 0x00, buf, 2, CTRL_TIMEOUT);
if (ret < 0)
usb_warn(usbatm, "%s failed on URB148: %d\n", __func__, ret);
/* URB 149 */
buf[0] = 0x01;
buf[1] = 0x00;
buf[2] = 0x01;
ret = usb_control_msg(usb_dev, usb_sndctrlpipe(usb_dev, 0),
0x01, 0x40, 0x03, 0x00, buf, 3, CTRL_TIMEOUT);
if (ret < 0)
usb_warn(usbatm, "%s failed on URB149: %d\n", __func__, ret);
/* URB 150 */
buf[0] = 0x01;
buf[1] = 0x00;
buf[2] = 0x01;
ret = usb_control_msg(usb_dev, usb_sndctrlpipe(usb_dev, 0),
0x01, 0x40, 0x04, 0x00, buf, 3, CTRL_TIMEOUT);
if (ret < 0)
usb_warn(usbatm, "%s failed on URB150: %d\n", __func__, ret);
/* Extra initialisation in recent drivers - gives higher speeds */
/* URBext1 */
buf[0] = instance->params.ModemMode;
ret = usb_control_msg(usb_dev, usb_sndctrlpipe(usb_dev, 0),
0x01, 0x40, 0x11, 0x00, buf, 1, CTRL_TIMEOUT);
if (ret < 0)
usb_warn(usbatm, "%s failed on URBext1: %d\n", __func__, ret);
/* URBext2 */
/* This seems to be the one which actually triggers the higher sync
rate -- it does require the new firmware too, although it works OK
with older firmware */
ret = usb_control_msg(usb_dev, usb_sndctrlpipe(usb_dev, 0),
0x01, 0x40, 0x14, 0x00,
instance->params.ModemOption,
MODEM_OPTION_LENGTH, CTRL_TIMEOUT);
if (ret < 0)
usb_warn(usbatm, "%s failed on URBext2: %d\n", __func__, ret);
/* URBext3 */
buf[0] = instance->params.BMaxDSL & 0xff;
buf[1] = instance->params.BMaxDSL >> 8;
ret = usb_control_msg(usb_dev, usb_sndctrlpipe(usb_dev, 0),
0x01, 0x40, 0x12, 0x00, buf, 2, CTRL_TIMEOUT);
if (ret < 0)
usb_warn(usbatm, "%s failed on URBext3: %d\n", __func__, ret);
}
static int speedtch_upload_firmware(struct speedtch_instance_data *instance,
const struct firmware *fw1,
const struct firmware *fw2)
{
unsigned char *buffer;
struct usbatm_data *usbatm = instance->usbatm;
struct usb_device *usb_dev = usbatm->usb_dev;
int actual_length;
int ret = 0;
int offset;
usb_dbg(usbatm, "%s entered\n", __func__);
if (!(buffer = (unsigned char *)__get_free_page(GFP_KERNEL))) {
ret = -ENOMEM;
usb_dbg(usbatm, "%s: no memory for buffer!\n", __func__);
goto out;
}
if (!usb_ifnum_to_if(usb_dev, 2)) {
ret = -ENODEV;
usb_dbg(usbatm, "%s: interface not found!\n", __func__);
goto out_free;
}
/* URB 7 */
if (dl_512_first) { /* some modems need a read before writing the firmware */
ret = usb_bulk_msg(usb_dev, usb_rcvbulkpipe(usb_dev, ENDPOINT_FIRMWARE),
buffer, 0x200, &actual_length, 2000);
if (ret < 0 && ret != -ETIMEDOUT)
usb_warn(usbatm, "%s: read BLOCK0 from modem failed (%d)!\n", __func__, ret);
else
usb_dbg(usbatm, "%s: BLOCK0 downloaded (%d bytes)\n", __func__, ret);
}
/* URB 8 : both leds are static green */
for (offset = 0; offset < fw1->size; offset += PAGE_SIZE) {
int thislen = min_t(int, PAGE_SIZE, fw1->size - offset);
memcpy(buffer, fw1->data + offset, thislen);
ret = usb_bulk_msg(usb_dev, usb_sndbulkpipe(usb_dev, ENDPOINT_FIRMWARE),
buffer, thislen, &actual_length, DATA_TIMEOUT);
if (ret < 0) {
usb_err(usbatm, "%s: write BLOCK1 to modem failed (%d)!\n", __func__, ret);
goto out_free;
}
usb_dbg(usbatm, "%s: BLOCK1 uploaded (%zu bytes)\n", __func__, fw1->size);
}
/* USB led blinking green, ADSL led off */
/* URB 11 */
ret = usb_bulk_msg(usb_dev, usb_rcvbulkpipe(usb_dev, ENDPOINT_FIRMWARE),
buffer, 0x200, &actual_length, DATA_TIMEOUT);
if (ret < 0) {
usb_err(usbatm, "%s: read BLOCK2 from modem failed (%d)!\n", __func__, ret);
goto out_free;
}
usb_dbg(usbatm, "%s: BLOCK2 downloaded (%d bytes)\n", __func__, actual_length);
/* URBs 12 to 139 - USB led blinking green, ADSL led off */
for (offset = 0; offset < fw2->size; offset += PAGE_SIZE) {
int thislen = min_t(int, PAGE_SIZE, fw2->size - offset);
memcpy(buffer, fw2->data + offset, thislen);
ret = usb_bulk_msg(usb_dev, usb_sndbulkpipe(usb_dev, ENDPOINT_FIRMWARE),
buffer, thislen, &actual_length, DATA_TIMEOUT);
if (ret < 0) {
usb_err(usbatm, "%s: write BLOCK3 to modem failed (%d)!\n", __func__, ret);
goto out_free;
}
}
usb_dbg(usbatm, "%s: BLOCK3 uploaded (%zu bytes)\n", __func__, fw2->size);
/* USB led static green, ADSL led static red */
/* URB 142 */
ret = usb_bulk_msg(usb_dev, usb_rcvbulkpipe(usb_dev, ENDPOINT_FIRMWARE),
buffer, 0x200, &actual_length, DATA_TIMEOUT);
if (ret < 0) {
usb_err(usbatm, "%s: read BLOCK4 from modem failed (%d)!\n", __func__, ret);
goto out_free;
}
/* success */
usb_dbg(usbatm, "%s: BLOCK4 downloaded (%d bytes)\n", __func__, actual_length);
/* Delay to allow firmware to start up. We can do this here
because we're in our own kernel thread anyway. */
msleep_interruptible(1000);
if ((ret = usb_set_interface(usb_dev, INTERFACE_DATA, instance->params.altsetting)) < 0) {
usb_err(usbatm, "%s: setting interface to %d failed (%d)!\n", __func__, instance->params.altsetting, ret);
goto out_free;
}
/* Enable software buffering, if requested */
if (sw_buffering)
speedtch_set_swbuff(instance, 1);
/* Magic spell; don't ask us what this does */
speedtch_test_sequence(instance);
ret = 0;
out_free:
free_page((unsigned long)buffer);
out:
return ret;
}
static int speedtch_find_firmware(struct usbatm_data *usbatm, struct usb_interface *intf,
int phase, const struct firmware **fw_p)
{
struct device *dev = &intf->dev;
const u16 bcdDevice = le16_to_cpu(interface_to_usbdev(intf)->descriptor.bcdDevice);
const u8 major_revision = bcdDevice >> 8;
const u8 minor_revision = bcdDevice & 0xff;
char buf[24];
sprintf(buf, "speedtch-%d.bin.%x.%02x", phase, major_revision, minor_revision);
usb_dbg(usbatm, "%s: looking for %s\n", __func__, buf);
if (request_firmware(fw_p, buf, dev)) {
sprintf(buf, "speedtch-%d.bin.%x", phase, major_revision);
usb_dbg(usbatm, "%s: looking for %s\n", __func__, buf);
if (request_firmware(fw_p, buf, dev)) {
sprintf(buf, "speedtch-%d.bin", phase);
usb_dbg(usbatm, "%s: looking for %s\n", __func__, buf);
if (request_firmware(fw_p, buf, dev)) {
usb_err(usbatm, "%s: no stage %d firmware found!\n", __func__, phase);
return -ENOENT;
}
}
}
usb_info(usbatm, "found stage %d firmware %s\n", phase, buf);
return 0;
}
static int speedtch_heavy_init(struct usbatm_data *usbatm, struct usb_interface *intf)
{
const struct firmware *fw1, *fw2;
struct speedtch_instance_data *instance = usbatm->driver_data;
int ret;
if ((ret = speedtch_find_firmware(usbatm, intf, 1, &fw1)) < 0)
return ret;
if ((ret = speedtch_find_firmware(usbatm, intf, 2, &fw2)) < 0) {
release_firmware(fw1);
return ret;
}
if ((ret = speedtch_upload_firmware(instance, fw1, fw2)) < 0)
usb_err(usbatm, "%s: firmware upload failed (%d)!\n", __func__, ret);
release_firmware(fw2);
release_firmware(fw1);
return ret;
}
/**********
** ATM **
**********/
static int speedtch_read_status(struct speedtch_instance_data *instance)
{
struct usbatm_data *usbatm = instance->usbatm;
struct usb_device *usb_dev = usbatm->usb_dev;
unsigned char *buf = instance->scratch_buffer;
int ret;
memset(buf, 0, 16);
ret = usb_control_msg(usb_dev, usb_rcvctrlpipe(usb_dev, 0),
0x12, 0xc0, 0x07, 0x00, buf + OFFSET_7, SIZE_7,
CTRL_TIMEOUT);
if (ret < 0) {
atm_dbg(usbatm, "%s: MSG 7 failed\n", __func__);
return ret;
}
ret = usb_control_msg(usb_dev, usb_rcvctrlpipe(usb_dev, 0),
0x12, 0xc0, 0x0b, 0x00, buf + OFFSET_b, SIZE_b,
CTRL_TIMEOUT);
if (ret < 0) {
atm_dbg(usbatm, "%s: MSG B failed\n", __func__);
return ret;
}
ret = usb_control_msg(usb_dev, usb_rcvctrlpipe(usb_dev, 0),
0x12, 0xc0, 0x0d, 0x00, buf + OFFSET_d, SIZE_d,
CTRL_TIMEOUT);
if (ret < 0) {
atm_dbg(usbatm, "%s: MSG D failed\n", __func__);
return ret;
}
ret = usb_control_msg(usb_dev, usb_rcvctrlpipe(usb_dev, 0),
0x01, 0xc0, 0x0e, 0x00, buf + OFFSET_e, SIZE_e,
CTRL_TIMEOUT);
if (ret < 0) {
atm_dbg(usbatm, "%s: MSG E failed\n", __func__);
return ret;
}
ret = usb_control_msg(usb_dev, usb_rcvctrlpipe(usb_dev, 0),
0x01, 0xc0, 0x0f, 0x00, buf + OFFSET_f, SIZE_f,
CTRL_TIMEOUT);
if (ret < 0) {
atm_dbg(usbatm, "%s: MSG F failed\n", __func__);
return ret;
}
return 0;
}
static int speedtch_start_synchro(struct speedtch_instance_data *instance)
{
struct usbatm_data *usbatm = instance->usbatm;
struct usb_device *usb_dev = usbatm->usb_dev;
unsigned char *buf = instance->scratch_buffer;
int ret;
atm_dbg(usbatm, "%s entered\n", __func__);
memset(buf, 0, 2);
ret = usb_control_msg(usb_dev, usb_rcvctrlpipe(usb_dev, 0),
0x12, 0xc0, 0x04, 0x00,
buf, 2, CTRL_TIMEOUT);
if (ret < 0)
atm_warn(usbatm, "failed to start ADSL synchronisation: %d\n", ret);
else
atm_dbg(usbatm, "%s: modem prodded. %d bytes returned: %02x %02x\n",
__func__, ret, buf[0], buf[1]);
return ret;
}
static void speedtch_check_status(struct work_struct *work)
{
struct speedtch_instance_data *instance =
container_of(work, struct speedtch_instance_data,
status_check_work);
struct usbatm_data *usbatm = instance->usbatm;
struct atm_dev *atm_dev = usbatm->atm_dev;
unsigned char *buf = instance->scratch_buffer;
int down_speed, up_speed, ret;
unsigned char status;
#ifdef VERBOSE_DEBUG
atm_dbg(usbatm, "%s entered\n", __func__);
#endif
ret = speedtch_read_status(instance);
if (ret < 0) {
atm_warn(usbatm, "error %d fetching device status\n", ret);
instance->poll_delay = min(2 * instance->poll_delay, MAX_POLL_DELAY);
return;
}
instance->poll_delay = max(instance->poll_delay / 2, MIN_POLL_DELAY);
status = buf[OFFSET_7];
if ((status != instance->last_status) || !status) {
atm_dbg(usbatm, "%s: line state 0x%02x\n", __func__, status);
switch (status) {
case 0:
atm_dev_signal_change(atm_dev, ATM_PHY_SIG_LOST);
if (instance->last_status)
atm_info(usbatm, "ADSL line is down\n");
/* It may never resync again unless we ask it to... */
ret = speedtch_start_synchro(instance);
break;
case 0x08:
atm_dev_signal_change(atm_dev, ATM_PHY_SIG_UNKNOWN);
atm_info(usbatm, "ADSL line is blocked?\n");
break;
case 0x10:
atm_dev_signal_change(atm_dev, ATM_PHY_SIG_LOST);
atm_info(usbatm, "ADSL line is synchronising\n");
break;
case 0x20:
down_speed = buf[OFFSET_b] | (buf[OFFSET_b + 1] << 8)
| (buf[OFFSET_b + 2] << 16) | (buf[OFFSET_b + 3] << 24);
up_speed = buf[OFFSET_b + 4] | (buf[OFFSET_b + 5] << 8)
| (buf[OFFSET_b + 6] << 16) | (buf[OFFSET_b + 7] << 24);
if (!(down_speed & 0x0000ffff) && !(up_speed & 0x0000ffff)) {
down_speed >>= 16;
up_speed >>= 16;
}
atm_dev->link_rate = down_speed * 1000 / 424;
atm_dev_signal_change(atm_dev, ATM_PHY_SIG_FOUND);
atm_info(usbatm,
"ADSL line is up (%d kb/s down | %d kb/s up)\n",
down_speed, up_speed);
break;
default:
atm_dev_signal_change(atm_dev, ATM_PHY_SIG_UNKNOWN);
atm_info(usbatm, "unknown line state %02x\n", status);
break;
}
instance->last_status = status;
}
}
static void speedtch_status_poll(unsigned long data)
{
struct speedtch_instance_data *instance = (void *)data;
schedule_work(&instance->status_check_work);
/* The following check is racy, but the race is harmless */
if (instance->poll_delay < MAX_POLL_DELAY)
mod_timer(&instance->status_check_timer, jiffies + msecs_to_jiffies(instance->poll_delay));
else
atm_warn(instance->usbatm, "Too many failures - disabling line status polling\n");
}
static void speedtch_resubmit_int(unsigned long data)
{
struct speedtch_instance_data *instance = (void *)data;
struct urb *int_urb = instance->int_urb;
int ret;
atm_dbg(instance->usbatm, "%s entered\n", __func__);
if (int_urb) {
ret = usb_submit_urb(int_urb, GFP_ATOMIC);
if (!ret)
schedule_work(&instance->status_check_work);
else {
atm_dbg(instance->usbatm, "%s: usb_submit_urb failed with result %d\n", __func__, ret);
mod_timer(&instance->resubmit_timer, jiffies + msecs_to_jiffies(RESUBMIT_DELAY));
}
}
}
static void speedtch_handle_int(struct urb *int_urb)
{
struct speedtch_instance_data *instance = int_urb->context;
struct usbatm_data *usbatm = instance->usbatm;
unsigned int count = int_urb->actual_length;
int status = int_urb->status;
int ret;
/* The magic interrupt for "up state" */
static const unsigned char up_int[6] = { 0xa1, 0x00, 0x01, 0x00, 0x00, 0x00 };
/* The magic interrupt for "down state" */
static const unsigned char down_int[6] = { 0xa1, 0x00, 0x00, 0x00, 0x00, 0x00 };
atm_dbg(usbatm, "%s entered\n", __func__);
if (status < 0) {
atm_dbg(usbatm, "%s: nonzero urb status %d!\n", __func__, status);
goto fail;
}
if ((count == 6) && !memcmp(up_int, instance->int_data, 6)) {
del_timer(&instance->status_check_timer);
atm_info(usbatm, "DSL line goes up\n");
} else if ((count == 6) && !memcmp(down_int, instance->int_data, 6)) {
atm_info(usbatm, "DSL line goes down\n");
} else {
int i;
atm_dbg(usbatm, "%s: unknown interrupt packet of length %d:", __func__, count);
for (i = 0; i < count; i++)
printk(" %02x", instance->int_data[i]);
printk("\n");
goto fail;
}
if ((int_urb = instance->int_urb)) {
ret = usb_submit_urb(int_urb, GFP_ATOMIC);
schedule_work(&instance->status_check_work);
if (ret < 0) {
atm_dbg(usbatm, "%s: usb_submit_urb failed with result %d\n", __func__, ret);
goto fail;
}
}
return;
fail:
if ((int_urb = instance->int_urb))
mod_timer(&instance->resubmit_timer, jiffies + msecs_to_jiffies(RESUBMIT_DELAY));
}
static int speedtch_atm_start(struct usbatm_data *usbatm, struct atm_dev *atm_dev)
{
struct usb_device *usb_dev = usbatm->usb_dev;
struct speedtch_instance_data *instance = usbatm->driver_data;
int i, ret;
unsigned char mac_str[13];
atm_dbg(usbatm, "%s entered\n", __func__);
/* Set MAC address, it is stored in the serial number */
memset(atm_dev->esi, 0, sizeof(atm_dev->esi));
if (usb_string(usb_dev, usb_dev->descriptor.iSerialNumber, mac_str, sizeof(mac_str)) == 12) {
for (i = 0; i < 6; i++)
atm_dev->esi[i] = (hex_to_bin(mac_str[i * 2]) << 4) +
hex_to_bin(mac_str[i * 2 + 1]);
}
/* Start modem synchronisation */
ret = speedtch_start_synchro(instance);
/* Set up interrupt endpoint */
if (instance->int_urb) {
ret = usb_submit_urb(instance->int_urb, GFP_KERNEL);
if (ret < 0) {
/* Doesn't matter; we'll poll anyway */
atm_dbg(usbatm, "%s: submission of interrupt URB failed (%d)!\n", __func__, ret);
usb_free_urb(instance->int_urb);
instance->int_urb = NULL;
}
}
/* Start status polling */
mod_timer(&instance->status_check_timer, jiffies + msecs_to_jiffies(1000));
return 0;
}
static void speedtch_atm_stop(struct usbatm_data *usbatm, struct atm_dev *atm_dev)
{
struct speedtch_instance_data *instance = usbatm->driver_data;
struct urb *int_urb = instance->int_urb;
atm_dbg(usbatm, "%s entered\n", __func__);
del_timer_sync(&instance->status_check_timer);
/*
* Since resubmit_timer and int_urb can schedule themselves and
* each other, shutting them down correctly takes some care
*/
instance->int_urb = NULL; /* signal shutdown */
mb();
usb_kill_urb(int_urb);
del_timer_sync(&instance->resubmit_timer);
/*
* At this point, speedtch_handle_int and speedtch_resubmit_int
* can run or be running, but instance->int_urb == NULL means that
* they will not reschedule
*/
usb_kill_urb(int_urb);
del_timer_sync(&instance->resubmit_timer);
usb_free_urb(int_urb);
flush_work(&instance->status_check_work);
}
static int speedtch_pre_reset(struct usb_interface *intf)
{
return 0;
}
static int speedtch_post_reset(struct usb_interface *intf)
{
return 0;
}
/**********
** USB **
**********/
static struct usb_device_id speedtch_usb_ids[] = {
{USB_DEVICE(0x06b9, 0x4061)},
{}
};
MODULE_DEVICE_TABLE(usb, speedtch_usb_ids);
static int speedtch_usb_probe(struct usb_interface *, const struct usb_device_id *);
static struct usb_driver speedtch_usb_driver = {
.name = speedtch_driver_name,
.probe = speedtch_usb_probe,
.disconnect = usbatm_usb_disconnect,
.pre_reset = speedtch_pre_reset,
.post_reset = speedtch_post_reset,
.id_table = speedtch_usb_ids
};
static void speedtch_release_interfaces(struct usb_device *usb_dev,
int num_interfaces)
{
struct usb_interface *cur_intf;
int i;
for (i = 0; i < num_interfaces; i++)
if ((cur_intf = usb_ifnum_to_if(usb_dev, i))) {
usb_set_intfdata(cur_intf, NULL);
usb_driver_release_interface(&speedtch_usb_driver, cur_intf);
}
}
static int speedtch_bind(struct usbatm_data *usbatm,
struct usb_interface *intf,
const struct usb_device_id *id)
{
struct usb_device *usb_dev = interface_to_usbdev(intf);
struct usb_interface *cur_intf, *data_intf;
struct speedtch_instance_data *instance;
int ifnum = intf->altsetting->desc.bInterfaceNumber;
int num_interfaces = usb_dev->actconfig->desc.bNumInterfaces;
int i, ret;
int use_isoc;
usb_dbg(usbatm, "%s entered\n", __func__);
/* sanity checks */
if (usb_dev->descriptor.bDeviceClass != USB_CLASS_VENDOR_SPEC) {
usb_err(usbatm, "%s: wrong device class %d\n", __func__, usb_dev->descriptor.bDeviceClass);
return -ENODEV;
}
if (!(data_intf = usb_ifnum_to_if(usb_dev, INTERFACE_DATA))) {
usb_err(usbatm, "%s: data interface not found!\n", __func__);
return -ENODEV;
}
/* claim all interfaces */
for (i = 0; i < num_interfaces; i++) {
cur_intf = usb_ifnum_to_if(usb_dev, i);
if ((i != ifnum) && cur_intf) {
ret = usb_driver_claim_interface(&speedtch_usb_driver, cur_intf, usbatm);
if (ret < 0) {
usb_err(usbatm, "%s: failed to claim interface %2d (%d)!\n", __func__, i, ret);
speedtch_release_interfaces(usb_dev, i);
return ret;
}
}
}
instance = kzalloc(sizeof(*instance), GFP_KERNEL);
if (!instance) {
usb_err(usbatm, "%s: no memory for instance data!\n", __func__);
ret = -ENOMEM;
goto fail_release;
}
instance->usbatm = usbatm;
/* module parameters may change at any moment, so take a snapshot */
instance->params.altsetting = altsetting;
instance->params.BMaxDSL = BMaxDSL;
instance->params.ModemMode = ModemMode;
memcpy(instance->params.ModemOption, DEFAULT_MODEM_OPTION, MODEM_OPTION_LENGTH);
memcpy(instance->params.ModemOption, ModemOption, num_ModemOption);
use_isoc = enable_isoc;
if (instance->params.altsetting)
if ((ret = usb_set_interface(usb_dev, INTERFACE_DATA, instance->params.altsetting)) < 0) {
usb_err(usbatm, "%s: setting interface to %2d failed (%d)!\n", __func__, instance->params.altsetting, ret);
instance->params.altsetting = 0; /* fall back to default */
}
if (!instance->params.altsetting && use_isoc)
if ((ret = usb_set_interface(usb_dev, INTERFACE_DATA, DEFAULT_ISOC_ALTSETTING)) < 0) {
usb_dbg(usbatm, "%s: setting interface to %2d failed (%d)!\n", __func__, DEFAULT_ISOC_ALTSETTING, ret);
use_isoc = 0; /* fall back to bulk */
}
if (use_isoc) {
const struct usb_host_interface *desc = data_intf->cur_altsetting;
const __u8 target_address = USB_DIR_IN | usbatm->driver->isoc_in;
use_isoc = 0; /* fall back to bulk if endpoint not found */
for (i = 0; i < desc->desc.bNumEndpoints; i++) {
const struct usb_endpoint_descriptor *endpoint_desc = &desc->endpoint[i].desc;
if ((endpoint_desc->bEndpointAddress == target_address)) {
use_isoc =
usb_endpoint_xfer_isoc(endpoint_desc);
break;
}
}
if (!use_isoc)
usb_info(usbatm, "isochronous transfer not supported - using bulk\n");
}
if (!use_isoc && !instance->params.altsetting)
if ((ret = usb_set_interface(usb_dev, INTERFACE_DATA, DEFAULT_BULK_ALTSETTING)) < 0) {
usb_err(usbatm, "%s: setting interface to %2d failed (%d)!\n", __func__, DEFAULT_BULK_ALTSETTING, ret);
goto fail_free;
}
if (!instance->params.altsetting)
instance->params.altsetting = use_isoc ? DEFAULT_ISOC_ALTSETTING : DEFAULT_BULK_ALTSETTING;
usbatm->flags |= (use_isoc ? UDSL_USE_ISOC : 0);
INIT_WORK(&instance->status_check_work, speedtch_check_status);
init_timer(&instance->status_check_timer);
instance->status_check_timer.function = speedtch_status_poll;
instance->status_check_timer.data = (unsigned long)instance;
instance->last_status = 0xff;
instance->poll_delay = MIN_POLL_DELAY;
init_timer(&instance->resubmit_timer);
instance->resubmit_timer.function = speedtch_resubmit_int;
instance->resubmit_timer.data = (unsigned long)instance;
instance->int_urb = usb_alloc_urb(0, GFP_KERNEL);
if (instance->int_urb)
usb_fill_int_urb(instance->int_urb, usb_dev,
usb_rcvintpipe(usb_dev, ENDPOINT_INT),
instance->int_data, sizeof(instance->int_data),
speedtch_handle_int, instance, 16);
else
usb_dbg(usbatm, "%s: no memory for interrupt urb!\n", __func__);
/* check whether the modem already seems to be alive */
ret = usb_control_msg(usb_dev, usb_rcvctrlpipe(usb_dev, 0),
0x12, 0xc0, 0x07, 0x00,
instance->scratch_buffer + OFFSET_7, SIZE_7, 500);
usbatm->flags |= (ret == SIZE_7 ? UDSL_SKIP_HEAVY_INIT : 0);
usb_dbg(usbatm, "%s: firmware %s loaded\n", __func__, usbatm->flags & UDSL_SKIP_HEAVY_INIT ? "already" : "not");
if (!(usbatm->flags & UDSL_SKIP_HEAVY_INIT))
if ((ret = usb_reset_device(usb_dev)) < 0) {
usb_err(usbatm, "%s: device reset failed (%d)!\n", __func__, ret);
goto fail_free;
}
usbatm->driver_data = instance;
return 0;
fail_free:
usb_free_urb(instance->int_urb);
kfree(instance);
fail_release:
speedtch_release_interfaces(usb_dev, num_interfaces);
return ret;
}
static void speedtch_unbind(struct usbatm_data *usbatm, struct usb_interface *intf)
{
struct usb_device *usb_dev = interface_to_usbdev(intf);
struct speedtch_instance_data *instance = usbatm->driver_data;
usb_dbg(usbatm, "%s entered\n", __func__);
speedtch_release_interfaces(usb_dev, usb_dev->actconfig->desc.bNumInterfaces);
usb_free_urb(instance->int_urb);
kfree(instance);
}
/***********
** init **
***********/
static struct usbatm_driver speedtch_usbatm_driver = {
.driver_name = speedtch_driver_name,
.bind = speedtch_bind,
.heavy_init = speedtch_heavy_init,
.unbind = speedtch_unbind,
.atm_start = speedtch_atm_start,
.atm_stop = speedtch_atm_stop,
.bulk_in = ENDPOINT_BULK_DATA,
.bulk_out = ENDPOINT_BULK_DATA,
.isoc_in = ENDPOINT_ISOC_DATA
};
static int speedtch_usb_probe(struct usb_interface *intf, const struct usb_device_id *id)
{
return usbatm_usb_probe(intf, id, &speedtch_usbatm_driver);
}
module_usb_driver(speedtch_usb_driver);
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
MODULE_VERSION(DRIVER_VERSION);
| gpl-2.0 |
titanxxh/xengt-ha-kernel | kernel/power/block_io.c | 1202 | 2436 | /*
* This file provides functions for block I/O operations on swap/file.
*
* Copyright (C) 1998,2001-2005 Pavel Machek <pavel@ucw.cz>
* Copyright (C) 2006 Rafael J. Wysocki <rjw@sisk.pl>
*
* This file is released under the GPLv2.
*/
#include <linux/bio.h>
#include <linux/kernel.h>
#include <linux/pagemap.h>
#include <linux/swap.h>
#include "power.h"
/**
* submit - submit BIO request.
* @rw: READ or WRITE.
* @off physical offset of page.
* @page: page we're reading or writing.
* @bio_chain: list of pending biod (for async reading)
*
* Straight from the textbook - allocate and initialize the bio.
* If we're reading, make sure the page is marked as dirty.
* Then submit it and, if @bio_chain == NULL, wait.
*/
static int submit(int rw, struct block_device *bdev, sector_t sector,
struct page *page, struct bio **bio_chain)
{
const int bio_rw = rw | REQ_SYNC;
struct bio *bio;
bio = bio_alloc(__GFP_WAIT | __GFP_HIGH, 1);
bio->bi_iter.bi_sector = sector;
bio->bi_bdev = bdev;
bio->bi_end_io = end_swap_bio_read;
if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
printk(KERN_ERR "PM: Adding page to bio failed at %llu\n",
(unsigned long long)sector);
bio_put(bio);
return -EFAULT;
}
lock_page(page);
bio_get(bio);
if (bio_chain == NULL) {
submit_bio(bio_rw, bio);
wait_on_page_locked(page);
if (rw == READ)
bio_set_pages_dirty(bio);
bio_put(bio);
} else {
if (rw == READ)
get_page(page); /* These pages are freed later */
bio->bi_private = *bio_chain;
*bio_chain = bio;
submit_bio(bio_rw, bio);
}
return 0;
}
int hib_bio_read_page(pgoff_t page_off, void *addr, struct bio **bio_chain)
{
return submit(READ, hib_resume_bdev, page_off * (PAGE_SIZE >> 9),
virt_to_page(addr), bio_chain);
}
int hib_bio_write_page(pgoff_t page_off, void *addr, struct bio **bio_chain)
{
return submit(WRITE, hib_resume_bdev, page_off * (PAGE_SIZE >> 9),
virt_to_page(addr), bio_chain);
}
int hib_wait_on_bio_chain(struct bio **bio_chain)
{
struct bio *bio;
struct bio *next_bio;
int ret = 0;
if (bio_chain == NULL)
return 0;
bio = *bio_chain;
if (bio == NULL)
return 0;
while (bio) {
struct page *page;
next_bio = bio->bi_private;
page = bio->bi_io_vec[0].bv_page;
wait_on_page_locked(page);
if (!PageUptodate(page) || PageError(page))
ret = -EIO;
put_page(page);
bio_put(bio);
bio = next_bio;
}
*bio_chain = NULL;
return ret;
}
| gpl-2.0 |
ChronoMonochrome/Chrono_Kernel | net/mac80211/wep.c | 2994 | 9035 | /*
* Software WEP encryption implementation
* Copyright 2002, Jouni Malinen <jkmaline@cc.hut.fi>
* Copyright 2003, Instant802 Networks, Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/netdevice.h>
#include <linux/types.h>
#include <linux/random.h>
#include <linux/compiler.h>
#include <linux/crc32.h>
#include <linux/crypto.h>
#include <linux/err.h>
#include <linux/mm.h>
#include <linux/scatterlist.h>
#include <linux/slab.h>
#include <asm/unaligned.h>
#include <net/mac80211.h>
#include "ieee80211_i.h"
#include "wep.h"
int ieee80211_wep_init(struct ieee80211_local *local)
{
/* start WEP IV from a random value */
get_random_bytes(&local->wep_iv, WEP_IV_LEN);
local->wep_tx_tfm = crypto_alloc_cipher("arc4", 0, CRYPTO_ALG_ASYNC);
if (IS_ERR(local->wep_tx_tfm)) {
local->wep_rx_tfm = ERR_PTR(-EINVAL);
return PTR_ERR(local->wep_tx_tfm);
}
local->wep_rx_tfm = crypto_alloc_cipher("arc4", 0, CRYPTO_ALG_ASYNC);
if (IS_ERR(local->wep_rx_tfm)) {
crypto_free_cipher(local->wep_tx_tfm);
local->wep_tx_tfm = ERR_PTR(-EINVAL);
return PTR_ERR(local->wep_rx_tfm);
}
return 0;
}
void ieee80211_wep_free(struct ieee80211_local *local)
{
if (!IS_ERR(local->wep_tx_tfm))
crypto_free_cipher(local->wep_tx_tfm);
if (!IS_ERR(local->wep_rx_tfm))
crypto_free_cipher(local->wep_rx_tfm);
}
static inline bool ieee80211_wep_weak_iv(u32 iv, int keylen)
{
/*
* Fluhrer, Mantin, and Shamir have reported weaknesses in the
* key scheduling algorithm of RC4. At least IVs (KeyByte + 3,
* 0xff, N) can be used to speedup attacks, so avoid using them.
*/
if ((iv & 0xff00) == 0xff00) {
u8 B = (iv >> 16) & 0xff;
if (B >= 3 && B < 3 + keylen)
return true;
}
return false;
}
static void ieee80211_wep_get_iv(struct ieee80211_local *local,
int keylen, int keyidx, u8 *iv)
{
local->wep_iv++;
if (ieee80211_wep_weak_iv(local->wep_iv, keylen))
local->wep_iv += 0x0100;
if (!iv)
return;
*iv++ = (local->wep_iv >> 16) & 0xff;
*iv++ = (local->wep_iv >> 8) & 0xff;
*iv++ = local->wep_iv & 0xff;
*iv++ = keyidx << 6;
}
static u8 *ieee80211_wep_add_iv(struct ieee80211_local *local,
struct sk_buff *skb,
int keylen, int keyidx)
{
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
unsigned int hdrlen;
u8 *newhdr;
hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_PROTECTED);
if (WARN_ON(skb_tailroom(skb) < WEP_ICV_LEN ||
skb_headroom(skb) < WEP_IV_LEN))
return NULL;
hdrlen = ieee80211_hdrlen(hdr->frame_control);
newhdr = skb_push(skb, WEP_IV_LEN);
memmove(newhdr, newhdr + WEP_IV_LEN, hdrlen);
ieee80211_wep_get_iv(local, keylen, keyidx, newhdr + hdrlen);
return newhdr + hdrlen;
}
static void ieee80211_wep_remove_iv(struct ieee80211_local *local,
struct sk_buff *skb,
struct ieee80211_key *key)
{
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
unsigned int hdrlen;
hdrlen = ieee80211_hdrlen(hdr->frame_control);
memmove(skb->data + WEP_IV_LEN, skb->data, hdrlen);
skb_pull(skb, WEP_IV_LEN);
}
/* Perform WEP encryption using given key. data buffer must have tailroom
* for 4-byte ICV. data_len must not include this ICV. Note: this function
* does _not_ add IV. data = RC4(data | CRC32(data)) */
int ieee80211_wep_encrypt_data(struct crypto_cipher *tfm, u8 *rc4key,
size_t klen, u8 *data, size_t data_len)
{
__le32 icv;
int i;
if (IS_ERR(tfm))
return -1;
icv = cpu_to_le32(~crc32_le(~0, data, data_len));
put_unaligned(icv, (__le32 *)(data + data_len));
crypto_cipher_setkey(tfm, rc4key, klen);
for (i = 0; i < data_len + WEP_ICV_LEN; i++)
crypto_cipher_encrypt_one(tfm, data + i, data + i);
return 0;
}
/* Perform WEP encryption on given skb. 4 bytes of extra space (IV) in the
* beginning of the buffer 4 bytes of extra space (ICV) in the end of the
* buffer will be added. Both IV and ICV will be transmitted, so the
* payload length increases with 8 bytes.
*
* WEP frame payload: IV + TX key idx, RC4(data), ICV = RC4(CRC32(data))
*/
int ieee80211_wep_encrypt(struct ieee80211_local *local,
struct sk_buff *skb,
const u8 *key, int keylen, int keyidx)
{
u8 *iv;
size_t len;
u8 rc4key[3 + WLAN_KEY_LEN_WEP104];
iv = ieee80211_wep_add_iv(local, skb, keylen, keyidx);
if (!iv)
return -1;
len = skb->len - (iv + WEP_IV_LEN - skb->data);
/* Prepend 24-bit IV to RC4 key */
memcpy(rc4key, iv, 3);
/* Copy rest of the WEP key (the secret part) */
memcpy(rc4key + 3, key, keylen);
/* Add room for ICV */
skb_put(skb, WEP_ICV_LEN);
return ieee80211_wep_encrypt_data(local->wep_tx_tfm, rc4key, keylen + 3,
iv + WEP_IV_LEN, len);
}
/* Perform WEP decryption using given key. data buffer includes encrypted
* payload, including 4-byte ICV, but _not_ IV. data_len must not include ICV.
* Return 0 on success and -1 on ICV mismatch. */
int ieee80211_wep_decrypt_data(struct crypto_cipher *tfm, u8 *rc4key,
size_t klen, u8 *data, size_t data_len)
{
__le32 crc;
int i;
if (IS_ERR(tfm))
return -1;
crypto_cipher_setkey(tfm, rc4key, klen);
for (i = 0; i < data_len + WEP_ICV_LEN; i++)
crypto_cipher_decrypt_one(tfm, data + i, data + i);
crc = cpu_to_le32(~crc32_le(~0, data, data_len));
if (memcmp(&crc, data + data_len, WEP_ICV_LEN) != 0)
/* ICV mismatch */
return -1;
return 0;
}
/* Perform WEP decryption on given skb. Buffer includes whole WEP part of
* the frame: IV (4 bytes), encrypted payload (including SNAP header),
* ICV (4 bytes). skb->len includes both IV and ICV.
*
* Returns 0 if frame was decrypted successfully and ICV was correct and -1 on
* failure. If frame is OK, IV and ICV will be removed, i.e., decrypted payload
* is moved to the beginning of the skb and skb length will be reduced.
*/
static int ieee80211_wep_decrypt(struct ieee80211_local *local,
struct sk_buff *skb,
struct ieee80211_key *key)
{
u32 klen;
u8 rc4key[3 + WLAN_KEY_LEN_WEP104];
u8 keyidx;
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
unsigned int hdrlen;
size_t len;
int ret = 0;
if (!ieee80211_has_protected(hdr->frame_control))
return -1;
hdrlen = ieee80211_hdrlen(hdr->frame_control);
if (skb->len < hdrlen + WEP_IV_LEN + WEP_ICV_LEN)
return -1;
len = skb->len - hdrlen - WEP_IV_LEN - WEP_ICV_LEN;
keyidx = skb->data[hdrlen + 3] >> 6;
if (!key || keyidx != key->conf.keyidx)
return -1;
klen = 3 + key->conf.keylen;
/* Prepend 24-bit IV to RC4 key */
memcpy(rc4key, skb->data + hdrlen, 3);
/* Copy rest of the WEP key (the secret part) */
memcpy(rc4key + 3, key->conf.key, key->conf.keylen);
if (ieee80211_wep_decrypt_data(local->wep_rx_tfm, rc4key, klen,
skb->data + hdrlen + WEP_IV_LEN,
len))
ret = -1;
/* Trim ICV */
skb_trim(skb, skb->len - WEP_ICV_LEN);
/* Remove IV */
memmove(skb->data + WEP_IV_LEN, skb->data, hdrlen);
skb_pull(skb, WEP_IV_LEN);
return ret;
}
bool ieee80211_wep_is_weak_iv(struct sk_buff *skb, struct ieee80211_key *key)
{
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
unsigned int hdrlen;
u8 *ivpos;
u32 iv;
if (!ieee80211_has_protected(hdr->frame_control))
return false;
hdrlen = ieee80211_hdrlen(hdr->frame_control);
ivpos = skb->data + hdrlen;
iv = (ivpos[0] << 16) | (ivpos[1] << 8) | ivpos[2];
return ieee80211_wep_weak_iv(iv, key->conf.keylen);
}
ieee80211_rx_result
ieee80211_crypto_wep_decrypt(struct ieee80211_rx_data *rx)
{
struct sk_buff *skb = rx->skb;
struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
if (!ieee80211_is_data(hdr->frame_control) &&
!ieee80211_is_auth(hdr->frame_control))
return RX_CONTINUE;
if (!(status->flag & RX_FLAG_DECRYPTED)) {
if (ieee80211_wep_decrypt(rx->local, rx->skb, rx->key))
return RX_DROP_UNUSABLE;
} else if (!(status->flag & RX_FLAG_IV_STRIPPED)) {
ieee80211_wep_remove_iv(rx->local, rx->skb, rx->key);
/* remove ICV */
skb_trim(rx->skb, rx->skb->len - WEP_ICV_LEN);
}
return RX_CONTINUE;
}
static int wep_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb)
{
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
if (!info->control.hw_key) {
if (ieee80211_wep_encrypt(tx->local, skb, tx->key->conf.key,
tx->key->conf.keylen,
tx->key->conf.keyidx))
return -1;
} else if (info->control.hw_key->flags &
IEEE80211_KEY_FLAG_GENERATE_IV) {
if (!ieee80211_wep_add_iv(tx->local, skb,
tx->key->conf.keylen,
tx->key->conf.keyidx))
return -1;
}
return 0;
}
ieee80211_tx_result
ieee80211_crypto_wep_encrypt(struct ieee80211_tx_data *tx)
{
struct sk_buff *skb;
ieee80211_tx_set_protected(tx);
skb = tx->skb;
do {
if (wep_encrypt_skb(tx, skb) < 0) {
I802_DEBUG_INC(tx->local->tx_handlers_drop_wep);
return TX_DROP;
}
} while ((skb = skb->next));
return TX_CONTINUE;
}
| gpl-2.0 |
Insswer/kernel_imx | arch/arm/mach-pxa/reset.c | 4530 | 1852 | /*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/gpio.h>
#include <linux/io.h>
#include <asm/proc-fns.h>
#include <mach/regs-ost.h>
#include <mach/reset.h>
unsigned int reset_status;
EXPORT_SYMBOL(reset_status);
static void do_hw_reset(void);
static int reset_gpio = -1;
int init_gpio_reset(int gpio, int output, int level)
{
int rc;
rc = gpio_request(gpio, "reset generator");
if (rc) {
printk(KERN_ERR "Can't request reset_gpio\n");
goto out;
}
if (output)
rc = gpio_direction_output(gpio, level);
else
rc = gpio_direction_input(gpio);
if (rc) {
printk(KERN_ERR "Can't configure reset_gpio\n");
gpio_free(gpio);
goto out;
}
out:
if (!rc)
reset_gpio = gpio;
return rc;
}
/*
* Trigger GPIO reset.
* This covers various types of logic connecting gpio pin
* to RESET pins (nRESET or GPIO_RESET):
*/
static void do_gpio_reset(void)
{
BUG_ON(reset_gpio == -1);
/* drive it low */
gpio_direction_output(reset_gpio, 0);
mdelay(2);
/* rising edge or drive high */
gpio_set_value(reset_gpio, 1);
mdelay(2);
/* falling edge */
gpio_set_value(reset_gpio, 0);
/* give it some time */
mdelay(10);
WARN_ON(1);
/* fallback */
do_hw_reset();
}
static void do_hw_reset(void)
{
/* Initialize the watchdog and let it fire */
OWER = OWER_WME;
OSSR = OSSR_M3;
OSMR3 = OSCR + 368640; /* ... in 100 ms */
}
void arch_reset(char mode, const char *cmd)
{
clear_reset_status(RESET_STATUS_ALL);
switch (mode) {
case 's':
/* Jump into ROM at address 0 */
cpu_reset(0);
break;
case 'g':
do_gpio_reset();
break;
case 'h':
default:
do_hw_reset();
break;
}
}
| gpl-2.0 |
burakgon/E7_Elite_kernel | kernel/drivers/usb/host/ohci-hcd.c | 4530 | 34280 | /*
* Open Host Controller Interface (OHCI) driver for USB.
*
* Maintainer: Alan Stern <stern@rowland.harvard.edu>
*
* (C) Copyright 1999 Roman Weissgaerber <weissg@vienna.at>
* (C) Copyright 2000-2004 David Brownell <dbrownell@users.sourceforge.net>
*
* [ Initialisation is based on Linus' ]
* [ uhci code and gregs ohci fragments ]
* [ (C) Copyright 1999 Linus Torvalds ]
* [ (C) Copyright 1999 Gregory P. Smith]
*
*
* OHCI is the main "non-Intel/VIA" standard for USB 1.1 host controller
* interfaces (though some non-x86 Intel chips use it). It supports
* smarter hardware than UHCI. A download link for the spec available
* through the http://www.usb.org website.
*
* This file is licenced under the GPL.
*/
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/pci.h>
#include <linux/kernel.h>
#include <linux/delay.h>
#include <linux/ioport.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/timer.h>
#include <linux/list.h>
#include <linux/usb.h>
#include <linux/usb/otg.h>
#include <linux/usb/hcd.h>
#include <linux/dma-mapping.h>
#include <linux/dmapool.h>
#include <linux/workqueue.h>
#include <linux/debugfs.h>
#include <asm/io.h>
#include <asm/irq.h>
#include <asm/unaligned.h>
#include <asm/byteorder.h>
#define DRIVER_AUTHOR "Roman Weissgaerber, David Brownell"
#define DRIVER_DESC "USB 1.1 'Open' Host Controller (OHCI) Driver"
/*-------------------------------------------------------------------------*/
#undef OHCI_VERBOSE_DEBUG /* not always helpful */
/* For initializing controller (mask in an HCFS mode too) */
#define OHCI_CONTROL_INIT OHCI_CTRL_CBSR
#define OHCI_INTR_INIT \
(OHCI_INTR_MIE | OHCI_INTR_RHSC | OHCI_INTR_UE \
| OHCI_INTR_RD | OHCI_INTR_WDH)
#ifdef __hppa__
/* On PA-RISC, PDC can leave IR set incorrectly; ignore it there. */
#define IR_DISABLE
#endif
#ifdef CONFIG_ARCH_OMAP
/* OMAP doesn't support IR (no SMM; not needed) */
#define IR_DISABLE
#endif
/*-------------------------------------------------------------------------*/
static const char hcd_name [] = "ohci_hcd";
#define STATECHANGE_DELAY msecs_to_jiffies(300)
#include "ohci.h"
#include "pci-quirks.h"
static void ohci_dump (struct ohci_hcd *ohci, int verbose);
static int ohci_init (struct ohci_hcd *ohci);
static void ohci_stop (struct usb_hcd *hcd);
#if defined(CONFIG_PM) || defined(CONFIG_PCI)
static int ohci_restart (struct ohci_hcd *ohci);
#endif
#ifdef CONFIG_PCI
static void sb800_prefetch(struct ohci_hcd *ohci, int on);
#else
static inline void sb800_prefetch(struct ohci_hcd *ohci, int on)
{
return;
}
#endif
#include "ohci-hub.c"
#include "ohci-dbg.c"
#include "ohci-mem.c"
#include "ohci-q.c"
/*
* On architectures with edge-triggered interrupts we must never return
* IRQ_NONE.
*/
#if defined(CONFIG_SA1111) /* ... or other edge-triggered systems */
#define IRQ_NOTMINE IRQ_HANDLED
#else
#define IRQ_NOTMINE IRQ_NONE
#endif
/* Some boards misreport power switching/overcurrent */
static bool distrust_firmware = 1;
module_param (distrust_firmware, bool, 0);
MODULE_PARM_DESC (distrust_firmware,
"true to distrust firmware power/overcurrent setup");
/* Some boards leave IR set wrongly, since they fail BIOS/SMM handshakes */
static bool no_handshake = 0;
module_param (no_handshake, bool, 0);
MODULE_PARM_DESC (no_handshake, "true (not default) disables BIOS handshake");
/*-------------------------------------------------------------------------*/
/*
* queue up an urb for anything except the root hub
*/
static int ohci_urb_enqueue (
struct usb_hcd *hcd,
struct urb *urb,
gfp_t mem_flags
) {
struct ohci_hcd *ohci = hcd_to_ohci (hcd);
struct ed *ed;
urb_priv_t *urb_priv;
unsigned int pipe = urb->pipe;
int i, size = 0;
unsigned long flags;
int retval = 0;
#ifdef OHCI_VERBOSE_DEBUG
urb_print(urb, "SUB", usb_pipein(pipe), -EINPROGRESS);
#endif
/* every endpoint has a ed, locate and maybe (re)initialize it */
if (! (ed = ed_get (ohci, urb->ep, urb->dev, pipe, urb->interval)))
return -ENOMEM;
/* for the private part of the URB we need the number of TDs (size) */
switch (ed->type) {
case PIPE_CONTROL:
/* td_submit_urb() doesn't yet handle these */
if (urb->transfer_buffer_length > 4096)
return -EMSGSIZE;
/* 1 TD for setup, 1 for ACK, plus ... */
size = 2;
/* FALLTHROUGH */
// case PIPE_INTERRUPT:
// case PIPE_BULK:
default:
/* one TD for every 4096 Bytes (can be up to 8K) */
size += urb->transfer_buffer_length / 4096;
/* ... and for any remaining bytes ... */
if ((urb->transfer_buffer_length % 4096) != 0)
size++;
/* ... and maybe a zero length packet to wrap it up */
if (size == 0)
size++;
else if ((urb->transfer_flags & URB_ZERO_PACKET) != 0
&& (urb->transfer_buffer_length
% usb_maxpacket (urb->dev, pipe,
usb_pipeout (pipe))) == 0)
size++;
break;
case PIPE_ISOCHRONOUS: /* number of packets from URB */
size = urb->number_of_packets;
break;
}
/* allocate the private part of the URB */
urb_priv = kzalloc (sizeof (urb_priv_t) + size * sizeof (struct td *),
mem_flags);
if (!urb_priv)
return -ENOMEM;
INIT_LIST_HEAD (&urb_priv->pending);
urb_priv->length = size;
urb_priv->ed = ed;
/* allocate the TDs (deferring hash chain updates) */
for (i = 0; i < size; i++) {
urb_priv->td [i] = td_alloc (ohci, mem_flags);
if (!urb_priv->td [i]) {
urb_priv->length = i;
urb_free_priv (ohci, urb_priv);
return -ENOMEM;
}
}
spin_lock_irqsave (&ohci->lock, flags);
/* don't submit to a dead HC */
if (!HCD_HW_ACCESSIBLE(hcd)) {
retval = -ENODEV;
goto fail;
}
if (ohci->rh_state != OHCI_RH_RUNNING) {
retval = -ENODEV;
goto fail;
}
retval = usb_hcd_link_urb_to_ep(hcd, urb);
if (retval)
goto fail;
/* schedule the ed if needed */
if (ed->state == ED_IDLE) {
retval = ed_schedule (ohci, ed);
if (retval < 0) {
usb_hcd_unlink_urb_from_ep(hcd, urb);
goto fail;
}
if (ed->type == PIPE_ISOCHRONOUS) {
u16 frame = ohci_frame_no(ohci);
/* delay a few frames before the first TD */
frame += max_t (u16, 8, ed->interval);
frame &= ~(ed->interval - 1);
frame |= ed->branch;
urb->start_frame = frame;
/* yes, only URB_ISO_ASAP is supported, and
* urb->start_frame is never used as input.
*/
}
} else if (ed->type == PIPE_ISOCHRONOUS)
urb->start_frame = ed->last_iso + ed->interval;
/* fill the TDs and link them to the ed; and
* enable that part of the schedule, if needed
* and update count of queued periodic urbs
*/
urb->hcpriv = urb_priv;
td_submit_urb (ohci, urb);
fail:
if (retval)
urb_free_priv (ohci, urb_priv);
spin_unlock_irqrestore (&ohci->lock, flags);
return retval;
}
/*
* decouple the URB from the HC queues (TDs, urb_priv).
* reporting is always done
* asynchronously, and we might be dealing with an urb that's
* partially transferred, or an ED with other urbs being unlinked.
*/
static int ohci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
{
struct ohci_hcd *ohci = hcd_to_ohci (hcd);
unsigned long flags;
int rc;
#ifdef OHCI_VERBOSE_DEBUG
urb_print(urb, "UNLINK", 1, status);
#endif
spin_lock_irqsave (&ohci->lock, flags);
rc = usb_hcd_check_unlink_urb(hcd, urb, status);
if (rc) {
; /* Do nothing */
} else if (ohci->rh_state == OHCI_RH_RUNNING) {
urb_priv_t *urb_priv;
/* Unless an IRQ completed the unlink while it was being
* handed to us, flag it for unlink and giveback, and force
* some upcoming INTR_SF to call finish_unlinks()
*/
urb_priv = urb->hcpriv;
if (urb_priv) {
if (urb_priv->ed->state == ED_OPER)
start_ed_unlink (ohci, urb_priv->ed);
}
} else {
/*
* with HC dead, we won't respect hc queue pointers
* any more ... just clean up every urb's memory.
*/
if (urb->hcpriv)
finish_urb(ohci, urb, status);
}
spin_unlock_irqrestore (&ohci->lock, flags);
return rc;
}
/*-------------------------------------------------------------------------*/
/* frees config/altsetting state for endpoints,
* including ED memory, dummy TD, and bulk/intr data toggle
*/
static void
ohci_endpoint_disable (struct usb_hcd *hcd, struct usb_host_endpoint *ep)
{
struct ohci_hcd *ohci = hcd_to_ohci (hcd);
unsigned long flags;
struct ed *ed = ep->hcpriv;
unsigned limit = 1000;
/* ASSERT: any requests/urbs are being unlinked */
/* ASSERT: nobody can be submitting urbs for this any more */
if (!ed)
return;
rescan:
spin_lock_irqsave (&ohci->lock, flags);
if (ohci->rh_state != OHCI_RH_RUNNING) {
sanitize:
ed->state = ED_IDLE;
if (quirk_zfmicro(ohci) && ed->type == PIPE_INTERRUPT)
ohci->eds_scheduled--;
finish_unlinks (ohci, 0);
}
switch (ed->state) {
case ED_UNLINK: /* wait for hw to finish? */
/* major IRQ delivery trouble loses INTR_SF too... */
if (limit-- == 0) {
ohci_warn(ohci, "ED unlink timeout\n");
if (quirk_zfmicro(ohci)) {
ohci_warn(ohci, "Attempting ZF TD recovery\n");
ohci->ed_to_check = ed;
ohci->zf_delay = 2;
}
goto sanitize;
}
spin_unlock_irqrestore (&ohci->lock, flags);
schedule_timeout_uninterruptible(1);
goto rescan;
case ED_IDLE: /* fully unlinked */
if (list_empty (&ed->td_list)) {
td_free (ohci, ed->dummy);
ed_free (ohci, ed);
break;
}
/* else FALL THROUGH */
default:
/* caller was supposed to have unlinked any requests;
* that's not our job. can't recover; must leak ed.
*/
ohci_err (ohci, "leak ed %p (#%02x) state %d%s\n",
ed, ep->desc.bEndpointAddress, ed->state,
list_empty (&ed->td_list) ? "" : " (has tds)");
td_free (ohci, ed->dummy);
break;
}
ep->hcpriv = NULL;
spin_unlock_irqrestore (&ohci->lock, flags);
}
static int ohci_get_frame (struct usb_hcd *hcd)
{
struct ohci_hcd *ohci = hcd_to_ohci (hcd);
return ohci_frame_no(ohci);
}
static void ohci_usb_reset (struct ohci_hcd *ohci)
{
ohci->hc_control = ohci_readl (ohci, &ohci->regs->control);
ohci->hc_control &= OHCI_CTRL_RWC;
ohci_writel (ohci, ohci->hc_control, &ohci->regs->control);
ohci->rh_state = OHCI_RH_HALTED;
}
/* ohci_shutdown forcibly disables IRQs and DMA, helping kexec and
* other cases where the next software may expect clean state from the
* "firmware". this is bus-neutral, unlike shutdown() methods.
*/
static void
ohci_shutdown (struct usb_hcd *hcd)
{
struct ohci_hcd *ohci;
ohci = hcd_to_ohci (hcd);
ohci_writel(ohci, (u32) ~0, &ohci->regs->intrdisable);
/* Software reset, after which the controller goes into SUSPEND */
ohci_writel(ohci, OHCI_HCR, &ohci->regs->cmdstatus);
ohci_readl(ohci, &ohci->regs->cmdstatus); /* flush the writes */
udelay(10);
ohci_writel(ohci, ohci->fminterval, &ohci->regs->fminterval);
}
static int check_ed(struct ohci_hcd *ohci, struct ed *ed)
{
return (hc32_to_cpu(ohci, ed->hwINFO) & ED_IN) != 0
&& (hc32_to_cpu(ohci, ed->hwHeadP) & TD_MASK)
== (hc32_to_cpu(ohci, ed->hwTailP) & TD_MASK)
&& !list_empty(&ed->td_list);
}
/* ZF Micro watchdog timer callback. The ZF Micro chipset sometimes completes
* an interrupt TD but neglects to add it to the donelist. On systems with
* this chipset, we need to periodically check the state of the queues to look
* for such "lost" TDs.
*/
static void unlink_watchdog_func(unsigned long _ohci)
{
unsigned long flags;
unsigned max;
unsigned seen_count = 0;
unsigned i;
struct ed **seen = NULL;
struct ohci_hcd *ohci = (struct ohci_hcd *) _ohci;
spin_lock_irqsave(&ohci->lock, flags);
max = ohci->eds_scheduled;
if (!max)
goto done;
if (ohci->ed_to_check)
goto out;
seen = kcalloc(max, sizeof *seen, GFP_ATOMIC);
if (!seen)
goto out;
for (i = 0; i < NUM_INTS; i++) {
struct ed *ed = ohci->periodic[i];
while (ed) {
unsigned temp;
/* scan this branch of the periodic schedule tree */
for (temp = 0; temp < seen_count; temp++) {
if (seen[temp] == ed) {
/* we've checked it and what's after */
ed = NULL;
break;
}
}
if (!ed)
break;
seen[seen_count++] = ed;
if (!check_ed(ohci, ed)) {
ed = ed->ed_next;
continue;
}
/* HC's TD list is empty, but HCD sees at least one
* TD that's not been sent through the donelist.
*/
ohci->ed_to_check = ed;
ohci->zf_delay = 2;
/* The HC may wait until the next frame to report the
* TD as done through the donelist and INTR_WDH. (We
* just *assume* it's not a multi-TD interrupt URB;
* those could defer the IRQ more than one frame, using
* DI...) Check again after the next INTR_SF.
*/
ohci_writel(ohci, OHCI_INTR_SF,
&ohci->regs->intrstatus);
ohci_writel(ohci, OHCI_INTR_SF,
&ohci->regs->intrenable);
/* flush those writes */
(void) ohci_readl(ohci, &ohci->regs->control);
goto out;
}
}
out:
kfree(seen);
if (ohci->eds_scheduled)
mod_timer(&ohci->unlink_watchdog, round_jiffies(jiffies + HZ));
done:
spin_unlock_irqrestore(&ohci->lock, flags);
}
/*-------------------------------------------------------------------------*
* HC functions
*-------------------------------------------------------------------------*/
/* init memory, and kick BIOS/SMM off */
static int ohci_init (struct ohci_hcd *ohci)
{
int ret;
struct usb_hcd *hcd = ohci_to_hcd(ohci);
if (distrust_firmware)
ohci->flags |= OHCI_QUIRK_HUB_POWER;
ohci->rh_state = OHCI_RH_HALTED;
ohci->regs = hcd->regs;
/* REVISIT this BIOS handshake is now moved into PCI "quirks", and
* was never needed for most non-PCI systems ... remove the code?
*/
#ifndef IR_DISABLE
/* SMM owns the HC? not for long! */
if (!no_handshake && ohci_readl (ohci,
&ohci->regs->control) & OHCI_CTRL_IR) {
u32 temp;
ohci_dbg (ohci, "USB HC TakeOver from BIOS/SMM\n");
/* this timeout is arbitrary. we make it long, so systems
* depending on usb keyboards may be usable even if the
* BIOS/SMM code seems pretty broken.
*/
temp = 500; /* arbitrary: five seconds */
ohci_writel (ohci, OHCI_INTR_OC, &ohci->regs->intrenable);
ohci_writel (ohci, OHCI_OCR, &ohci->regs->cmdstatus);
while (ohci_readl (ohci, &ohci->regs->control) & OHCI_CTRL_IR) {
msleep (10);
if (--temp == 0) {
ohci_err (ohci, "USB HC takeover failed!"
" (BIOS/SMM bug)\n");
return -EBUSY;
}
}
ohci_usb_reset (ohci);
}
#endif
/* Disable HC interrupts */
ohci_writel (ohci, OHCI_INTR_MIE, &ohci->regs->intrdisable);
/* flush the writes, and save key bits like RWC */
if (ohci_readl (ohci, &ohci->regs->control) & OHCI_CTRL_RWC)
ohci->hc_control |= OHCI_CTRL_RWC;
/* Read the number of ports unless overridden */
if (ohci->num_ports == 0)
ohci->num_ports = roothub_a(ohci) & RH_A_NDP;
if (ohci->hcca)
return 0;
ohci->hcca = dma_alloc_coherent (hcd->self.controller,
sizeof *ohci->hcca, &ohci->hcca_dma, 0);
if (!ohci->hcca)
return -ENOMEM;
if ((ret = ohci_mem_init (ohci)) < 0)
ohci_stop (hcd);
else {
create_debug_files (ohci);
}
return ret;
}
/*-------------------------------------------------------------------------*/
/* Start an OHCI controller, set the BUS operational
* resets USB and controller
* enable interrupts
*/
static int ohci_run (struct ohci_hcd *ohci)
{
u32 mask, val;
int first = ohci->fminterval == 0;
struct usb_hcd *hcd = ohci_to_hcd(ohci);
ohci->rh_state = OHCI_RH_HALTED;
/* boot firmware should have set this up (5.1.1.3.1) */
if (first) {
val = ohci_readl (ohci, &ohci->regs->fminterval);
ohci->fminterval = val & 0x3fff;
if (ohci->fminterval != FI)
ohci_dbg (ohci, "fminterval delta %d\n",
ohci->fminterval - FI);
ohci->fminterval |= FSMP (ohci->fminterval) << 16;
/* also: power/overcurrent flags in roothub.a */
}
/* Reset USB nearly "by the book". RemoteWakeupConnected has
* to be checked in case boot firmware (BIOS/SMM/...) has set up
* wakeup in a way the bus isn't aware of (e.g., legacy PCI PM).
* If the bus glue detected wakeup capability then it should
* already be enabled; if so we'll just enable it again.
*/
if ((ohci->hc_control & OHCI_CTRL_RWC) != 0)
device_set_wakeup_capable(hcd->self.controller, 1);
switch (ohci->hc_control & OHCI_CTRL_HCFS) {
case OHCI_USB_OPER:
val = 0;
break;
case OHCI_USB_SUSPEND:
case OHCI_USB_RESUME:
ohci->hc_control &= OHCI_CTRL_RWC;
ohci->hc_control |= OHCI_USB_RESUME;
val = 10 /* msec wait */;
break;
// case OHCI_USB_RESET:
default:
ohci->hc_control &= OHCI_CTRL_RWC;
ohci->hc_control |= OHCI_USB_RESET;
val = 50 /* msec wait */;
break;
}
ohci_writel (ohci, ohci->hc_control, &ohci->regs->control);
// flush the writes
(void) ohci_readl (ohci, &ohci->regs->control);
msleep(val);
memset (ohci->hcca, 0, sizeof (struct ohci_hcca));
/* 2msec timelimit here means no irqs/preempt */
spin_lock_irq (&ohci->lock);
retry:
/* HC Reset requires max 10 us delay */
ohci_writel (ohci, OHCI_HCR, &ohci->regs->cmdstatus);
val = 30; /* ... allow extra time */
while ((ohci_readl (ohci, &ohci->regs->cmdstatus) & OHCI_HCR) != 0) {
if (--val == 0) {
spin_unlock_irq (&ohci->lock);
ohci_err (ohci, "USB HC reset timed out!\n");
return -1;
}
udelay (1);
}
/* now we're in the SUSPEND state ... must go OPERATIONAL
* within 2msec else HC enters RESUME
*
* ... but some hardware won't init fmInterval "by the book"
* (SiS, OPTi ...), so reset again instead. SiS doesn't need
* this if we write fmInterval after we're OPERATIONAL.
* Unclear about ALi, ServerWorks, and others ... this could
* easily be a longstanding bug in chip init on Linux.
*/
if (ohci->flags & OHCI_QUIRK_INITRESET) {
ohci_writel (ohci, ohci->hc_control, &ohci->regs->control);
// flush those writes
(void) ohci_readl (ohci, &ohci->regs->control);
}
/* Tell the controller where the control and bulk lists are
* The lists are empty now. */
ohci_writel (ohci, 0, &ohci->regs->ed_controlhead);
ohci_writel (ohci, 0, &ohci->regs->ed_bulkhead);
/* a reset clears this */
ohci_writel (ohci, (u32) ohci->hcca_dma, &ohci->regs->hcca);
periodic_reinit (ohci);
/* some OHCI implementations are finicky about how they init.
* bogus values here mean not even enumeration could work.
*/
if ((ohci_readl (ohci, &ohci->regs->fminterval) & 0x3fff0000) == 0
|| !ohci_readl (ohci, &ohci->regs->periodicstart)) {
if (!(ohci->flags & OHCI_QUIRK_INITRESET)) {
ohci->flags |= OHCI_QUIRK_INITRESET;
ohci_dbg (ohci, "enabling initreset quirk\n");
goto retry;
}
spin_unlock_irq (&ohci->lock);
ohci_err (ohci, "init err (%08x %04x)\n",
ohci_readl (ohci, &ohci->regs->fminterval),
ohci_readl (ohci, &ohci->regs->periodicstart));
return -EOVERFLOW;
}
/* use rhsc irqs after khubd is fully initialized */
set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
hcd->uses_new_polling = 1;
/* start controller operations */
ohci->hc_control &= OHCI_CTRL_RWC;
ohci->hc_control |= OHCI_CONTROL_INIT | OHCI_USB_OPER;
ohci_writel (ohci, ohci->hc_control, &ohci->regs->control);
ohci->rh_state = OHCI_RH_RUNNING;
/* wake on ConnectStatusChange, matching external hubs */
ohci_writel (ohci, RH_HS_DRWE, &ohci->regs->roothub.status);
/* Choose the interrupts we care about now, others later on demand */
mask = OHCI_INTR_INIT;
ohci_writel (ohci, ~0, &ohci->regs->intrstatus);
ohci_writel (ohci, mask, &ohci->regs->intrenable);
/* handle root hub init quirks ... */
val = roothub_a (ohci);
val &= ~(RH_A_PSM | RH_A_OCPM);
if (ohci->flags & OHCI_QUIRK_SUPERIO) {
/* NSC 87560 and maybe others */
val |= RH_A_NOCP;
val &= ~(RH_A_POTPGT | RH_A_NPS);
ohci_writel (ohci, val, &ohci->regs->roothub.a);
} else if ((ohci->flags & OHCI_QUIRK_AMD756) ||
(ohci->flags & OHCI_QUIRK_HUB_POWER)) {
/* hub power always on; required for AMD-756 and some
* Mac platforms. ganged overcurrent reporting, if any.
*/
val |= RH_A_NPS;
ohci_writel (ohci, val, &ohci->regs->roothub.a);
}
ohci_writel (ohci, RH_HS_LPSC, &ohci->regs->roothub.status);
ohci_writel (ohci, (val & RH_A_NPS) ? 0 : RH_B_PPCM,
&ohci->regs->roothub.b);
// flush those writes
(void) ohci_readl (ohci, &ohci->regs->control);
ohci->next_statechange = jiffies + STATECHANGE_DELAY;
spin_unlock_irq (&ohci->lock);
// POTPGT delay is bits 24-31, in 2 ms units.
mdelay ((val >> 23) & 0x1fe);
if (quirk_zfmicro(ohci)) {
/* Create timer to watch for bad queue state on ZF Micro */
setup_timer(&ohci->unlink_watchdog, unlink_watchdog_func,
(unsigned long) ohci);
ohci->eds_scheduled = 0;
ohci->ed_to_check = NULL;
}
ohci_dump (ohci, 1);
return 0;
}
/*-------------------------------------------------------------------------*/
/* an interrupt happens */
static irqreturn_t ohci_irq (struct usb_hcd *hcd)
{
struct ohci_hcd *ohci = hcd_to_ohci (hcd);
struct ohci_regs __iomem *regs = ohci->regs;
int ints;
/* Read interrupt status (and flush pending writes). We ignore the
* optimization of checking the LSB of hcca->done_head; it doesn't
* work on all systems (edge triggering for OHCI can be a factor).
*/
ints = ohci_readl(ohci, ®s->intrstatus);
/* Check for an all 1's result which is a typical consequence
* of dead, unclocked, or unplugged (CardBus...) devices
*/
if (ints == ~(u32)0) {
ohci->rh_state = OHCI_RH_HALTED;
ohci_dbg (ohci, "device removed!\n");
usb_hc_died(hcd);
return IRQ_HANDLED;
}
/* We only care about interrupts that are enabled */
ints &= ohci_readl(ohci, ®s->intrenable);
/* interrupt for some other device? */
if (ints == 0 || unlikely(ohci->rh_state == OHCI_RH_HALTED))
return IRQ_NOTMINE;
if (ints & OHCI_INTR_UE) {
// e.g. due to PCI Master/Target Abort
if (quirk_nec(ohci)) {
/* Workaround for a silicon bug in some NEC chips used
* in Apple's PowerBooks. Adapted from Darwin code.
*/
ohci_err (ohci, "OHCI Unrecoverable Error, scheduling NEC chip restart\n");
ohci_writel (ohci, OHCI_INTR_UE, ®s->intrdisable);
schedule_work (&ohci->nec_work);
} else {
ohci_err (ohci, "OHCI Unrecoverable Error, disabled\n");
ohci->rh_state = OHCI_RH_HALTED;
usb_hc_died(hcd);
}
ohci_dump (ohci, 1);
ohci_usb_reset (ohci);
}
if (ints & OHCI_INTR_RHSC) {
ohci_vdbg(ohci, "rhsc\n");
ohci->next_statechange = jiffies + STATECHANGE_DELAY;
ohci_writel(ohci, OHCI_INTR_RD | OHCI_INTR_RHSC,
®s->intrstatus);
/* NOTE: Vendors didn't always make the same implementation
* choices for RHSC. Many followed the spec; RHSC triggers
* on an edge, like setting and maybe clearing a port status
* change bit. With others it's level-triggered, active
* until khubd clears all the port status change bits. We'll
* always disable it here and rely on polling until khubd
* re-enables it.
*/
ohci_writel(ohci, OHCI_INTR_RHSC, ®s->intrdisable);
usb_hcd_poll_rh_status(hcd);
}
/* For connect and disconnect events, we expect the controller
* to turn on RHSC along with RD. But for remote wakeup events
* this might not happen.
*/
else if (ints & OHCI_INTR_RD) {
ohci_vdbg(ohci, "resume detect\n");
ohci_writel(ohci, OHCI_INTR_RD, ®s->intrstatus);
set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
if (ohci->autostop) {
spin_lock (&ohci->lock);
ohci_rh_resume (ohci);
spin_unlock (&ohci->lock);
} else
usb_hcd_resume_root_hub(hcd);
}
if (ints & OHCI_INTR_WDH) {
spin_lock (&ohci->lock);
dl_done_list (ohci);
spin_unlock (&ohci->lock);
}
if (quirk_zfmicro(ohci) && (ints & OHCI_INTR_SF)) {
spin_lock(&ohci->lock);
if (ohci->ed_to_check) {
struct ed *ed = ohci->ed_to_check;
if (check_ed(ohci, ed)) {
/* HC thinks the TD list is empty; HCD knows
* at least one TD is outstanding
*/
if (--ohci->zf_delay == 0) {
struct td *td = list_entry(
ed->td_list.next,
struct td, td_list);
ohci_warn(ohci,
"Reclaiming orphan TD %p\n",
td);
takeback_td(ohci, td);
ohci->ed_to_check = NULL;
}
} else
ohci->ed_to_check = NULL;
}
spin_unlock(&ohci->lock);
}
/* could track INTR_SO to reduce available PCI/... bandwidth */
/* handle any pending URB/ED unlinks, leaving INTR_SF enabled
* when there's still unlinking to be done (next frame).
*/
spin_lock (&ohci->lock);
if (ohci->ed_rm_list)
finish_unlinks (ohci, ohci_frame_no(ohci));
if ((ints & OHCI_INTR_SF) != 0
&& !ohci->ed_rm_list
&& !ohci->ed_to_check
&& ohci->rh_state == OHCI_RH_RUNNING)
ohci_writel (ohci, OHCI_INTR_SF, ®s->intrdisable);
spin_unlock (&ohci->lock);
if (ohci->rh_state == OHCI_RH_RUNNING) {
ohci_writel (ohci, ints, ®s->intrstatus);
ohci_writel (ohci, OHCI_INTR_MIE, ®s->intrenable);
// flush those writes
(void) ohci_readl (ohci, &ohci->regs->control);
}
return IRQ_HANDLED;
}
/*-------------------------------------------------------------------------*/
static void ohci_stop (struct usb_hcd *hcd)
{
struct ohci_hcd *ohci = hcd_to_ohci (hcd);
ohci_dump (ohci, 1);
if (quirk_nec(ohci))
flush_work_sync(&ohci->nec_work);
ohci_usb_reset (ohci);
ohci_writel (ohci, OHCI_INTR_MIE, &ohci->regs->intrdisable);
free_irq(hcd->irq, hcd);
hcd->irq = 0;
if (quirk_zfmicro(ohci))
del_timer(&ohci->unlink_watchdog);
if (quirk_amdiso(ohci))
usb_amd_dev_put();
remove_debug_files (ohci);
ohci_mem_cleanup (ohci);
if (ohci->hcca) {
dma_free_coherent (hcd->self.controller,
sizeof *ohci->hcca,
ohci->hcca, ohci->hcca_dma);
ohci->hcca = NULL;
ohci->hcca_dma = 0;
}
}
/*-------------------------------------------------------------------------*/
#if defined(CONFIG_PM) || defined(CONFIG_PCI)
/* must not be called from interrupt context */
static int ohci_restart (struct ohci_hcd *ohci)
{
int temp;
int i;
struct urb_priv *priv;
spin_lock_irq(&ohci->lock);
ohci->rh_state = OHCI_RH_HALTED;
/* Recycle any "live" eds/tds (and urbs). */
if (!list_empty (&ohci->pending))
ohci_dbg(ohci, "abort schedule...\n");
list_for_each_entry (priv, &ohci->pending, pending) {
struct urb *urb = priv->td[0]->urb;
struct ed *ed = priv->ed;
switch (ed->state) {
case ED_OPER:
ed->state = ED_UNLINK;
ed->hwINFO |= cpu_to_hc32(ohci, ED_DEQUEUE);
ed_deschedule (ohci, ed);
ed->ed_next = ohci->ed_rm_list;
ed->ed_prev = NULL;
ohci->ed_rm_list = ed;
/* FALLTHROUGH */
case ED_UNLINK:
break;
default:
ohci_dbg(ohci, "bogus ed %p state %d\n",
ed, ed->state);
}
if (!urb->unlinked)
urb->unlinked = -ESHUTDOWN;
}
finish_unlinks (ohci, 0);
spin_unlock_irq(&ohci->lock);
/* paranoia, in case that didn't work: */
/* empty the interrupt branches */
for (i = 0; i < NUM_INTS; i++) ohci->load [i] = 0;
for (i = 0; i < NUM_INTS; i++) ohci->hcca->int_table [i] = 0;
/* no EDs to remove */
ohci->ed_rm_list = NULL;
/* empty control and bulk lists */
ohci->ed_controltail = NULL;
ohci->ed_bulktail = NULL;
if ((temp = ohci_run (ohci)) < 0) {
ohci_err (ohci, "can't restart, %d\n", temp);
return temp;
}
ohci_dbg(ohci, "restart complete\n");
return 0;
}
#endif
/*-------------------------------------------------------------------------*/
MODULE_AUTHOR (DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE ("GPL");
#ifdef CONFIG_PCI
#include "ohci-pci.c"
#define PCI_DRIVER ohci_pci_driver
#endif
#if defined(CONFIG_ARCH_SA1100) && defined(CONFIG_SA1111)
#include "ohci-sa1111.c"
#define SA1111_DRIVER ohci_hcd_sa1111_driver
#endif
#if defined(CONFIG_ARCH_S3C24XX) || defined(CONFIG_ARCH_S3C64XX)
#include "ohci-s3c2410.c"
#define PLATFORM_DRIVER ohci_hcd_s3c2410_driver
#endif
#ifdef CONFIG_USB_OHCI_EXYNOS
#include "ohci-exynos.c"
#define PLATFORM_DRIVER exynos_ohci_driver
#endif
#ifdef CONFIG_USB_OHCI_HCD_OMAP1
#include "ohci-omap.c"
#define OMAP1_PLATFORM_DRIVER ohci_hcd_omap_driver
#endif
#ifdef CONFIG_USB_OHCI_HCD_OMAP3
#include "ohci-omap3.c"
#define OMAP3_PLATFORM_DRIVER ohci_hcd_omap3_driver
#endif
#if defined(CONFIG_PXA27x) || defined(CONFIG_PXA3xx)
#include "ohci-pxa27x.c"
#define PLATFORM_DRIVER ohci_hcd_pxa27x_driver
#endif
#ifdef CONFIG_ARCH_EP93XX
#include "ohci-ep93xx.c"
#define PLATFORM_DRIVER ohci_hcd_ep93xx_driver
#endif
#ifdef CONFIG_MIPS_ALCHEMY
#include "ohci-au1xxx.c"
#define PLATFORM_DRIVER ohci_hcd_au1xxx_driver
#endif
#ifdef CONFIG_PNX8550
#include "ohci-pnx8550.c"
#define PLATFORM_DRIVER ohci_hcd_pnx8550_driver
#endif
#ifdef CONFIG_USB_OHCI_HCD_PPC_SOC
#include "ohci-ppc-soc.c"
#define PLATFORM_DRIVER ohci_hcd_ppc_soc_driver
#endif
#ifdef CONFIG_ARCH_AT91
#include "ohci-at91.c"
#define PLATFORM_DRIVER ohci_hcd_at91_driver
#endif
#if defined(CONFIG_ARCH_PNX4008) || defined(CONFIG_ARCH_LPC32XX)
#include "ohci-nxp.c"
#define PLATFORM_DRIVER usb_hcd_nxp_driver
#endif
#ifdef CONFIG_ARCH_DAVINCI_DA8XX
#include "ohci-da8xx.c"
#define PLATFORM_DRIVER ohci_hcd_da8xx_driver
#endif
#ifdef CONFIG_USB_OHCI_SH
#include "ohci-sh.c"
#define PLATFORM_DRIVER ohci_hcd_sh_driver
#endif
#ifdef CONFIG_USB_OHCI_HCD_PPC_OF
#include "ohci-ppc-of.c"
#define OF_PLATFORM_DRIVER ohci_hcd_ppc_of_driver
#endif
#ifdef CONFIG_PLAT_SPEAR
#include "ohci-spear.c"
#define PLATFORM_DRIVER spear_ohci_hcd_driver
#endif
#ifdef CONFIG_PPC_PS3
#include "ohci-ps3.c"
#define PS3_SYSTEM_BUS_DRIVER ps3_ohci_driver
#endif
#ifdef CONFIG_USB_OHCI_HCD_SSB
#include "ohci-ssb.c"
#define SSB_OHCI_DRIVER ssb_ohci_driver
#endif
#ifdef CONFIG_MFD_SM501
#include "ohci-sm501.c"
#define SM501_OHCI_DRIVER ohci_hcd_sm501_driver
#endif
#ifdef CONFIG_MFD_TC6393XB
#include "ohci-tmio.c"
#define TMIO_OHCI_DRIVER ohci_hcd_tmio_driver
#endif
#ifdef CONFIG_MACH_JZ4740
#include "ohci-jz4740.c"
#define PLATFORM_DRIVER ohci_hcd_jz4740_driver
#endif
#ifdef CONFIG_USB_OCTEON_OHCI
#include "ohci-octeon.c"
#define PLATFORM_DRIVER ohci_octeon_driver
#endif
#ifdef CONFIG_USB_CNS3XXX_OHCI
#include "ohci-cns3xxx.c"
#define PLATFORM_DRIVER ohci_hcd_cns3xxx_driver
#endif
#ifdef CONFIG_CPU_XLR
#include "ohci-xls.c"
#define PLATFORM_DRIVER ohci_xls_driver
#endif
#ifdef CONFIG_USB_OHCI_HCD_PLATFORM
#include "ohci-platform.c"
#define PLATFORM_DRIVER ohci_platform_driver
#endif
#if !defined(PCI_DRIVER) && \
!defined(PLATFORM_DRIVER) && \
!defined(OMAP1_PLATFORM_DRIVER) && \
!defined(OMAP3_PLATFORM_DRIVER) && \
!defined(OF_PLATFORM_DRIVER) && \
!defined(SA1111_DRIVER) && \
!defined(PS3_SYSTEM_BUS_DRIVER) && \
!defined(SM501_OHCI_DRIVER) && \
!defined(TMIO_OHCI_DRIVER) && \
!defined(SSB_OHCI_DRIVER)
#error "missing bus glue for ohci-hcd"
#endif
static int __init ohci_hcd_mod_init(void)
{
int retval = 0;
if (usb_disabled())
return -ENODEV;
printk(KERN_INFO "%s: " DRIVER_DESC "\n", hcd_name);
pr_debug ("%s: block sizes: ed %Zd td %Zd\n", hcd_name,
sizeof (struct ed), sizeof (struct td));
set_bit(USB_OHCI_LOADED, &usb_hcds_loaded);
#ifdef DEBUG
ohci_debug_root = debugfs_create_dir("ohci", usb_debug_root);
if (!ohci_debug_root) {
retval = -ENOENT;
goto error_debug;
}
#endif
#ifdef PS3_SYSTEM_BUS_DRIVER
retval = ps3_ohci_driver_register(&PS3_SYSTEM_BUS_DRIVER);
if (retval < 0)
goto error_ps3;
#endif
#ifdef PLATFORM_DRIVER
retval = platform_driver_register(&PLATFORM_DRIVER);
if (retval < 0)
goto error_platform;
#endif
#ifdef OMAP1_PLATFORM_DRIVER
retval = platform_driver_register(&OMAP1_PLATFORM_DRIVER);
if (retval < 0)
goto error_omap1_platform;
#endif
#ifdef OMAP3_PLATFORM_DRIVER
retval = platform_driver_register(&OMAP3_PLATFORM_DRIVER);
if (retval < 0)
goto error_omap3_platform;
#endif
#ifdef OF_PLATFORM_DRIVER
retval = platform_driver_register(&OF_PLATFORM_DRIVER);
if (retval < 0)
goto error_of_platform;
#endif
#ifdef SA1111_DRIVER
retval = sa1111_driver_register(&SA1111_DRIVER);
if (retval < 0)
goto error_sa1111;
#endif
#ifdef PCI_DRIVER
retval = pci_register_driver(&PCI_DRIVER);
if (retval < 0)
goto error_pci;
#endif
#ifdef SSB_OHCI_DRIVER
retval = ssb_driver_register(&SSB_OHCI_DRIVER);
if (retval)
goto error_ssb;
#endif
#ifdef SM501_OHCI_DRIVER
retval = platform_driver_register(&SM501_OHCI_DRIVER);
if (retval < 0)
goto error_sm501;
#endif
#ifdef TMIO_OHCI_DRIVER
retval = platform_driver_register(&TMIO_OHCI_DRIVER);
if (retval < 0)
goto error_tmio;
#endif
return retval;
/* Error path */
#ifdef TMIO_OHCI_DRIVER
platform_driver_unregister(&TMIO_OHCI_DRIVER);
error_tmio:
#endif
#ifdef SM501_OHCI_DRIVER
platform_driver_unregister(&SM501_OHCI_DRIVER);
error_sm501:
#endif
#ifdef SSB_OHCI_DRIVER
ssb_driver_unregister(&SSB_OHCI_DRIVER);
error_ssb:
#endif
#ifdef PCI_DRIVER
pci_unregister_driver(&PCI_DRIVER);
error_pci:
#endif
#ifdef SA1111_DRIVER
sa1111_driver_unregister(&SA1111_DRIVER);
error_sa1111:
#endif
#ifdef OF_PLATFORM_DRIVER
platform_driver_unregister(&OF_PLATFORM_DRIVER);
error_of_platform:
#endif
#ifdef PLATFORM_DRIVER
platform_driver_unregister(&PLATFORM_DRIVER);
error_platform:
#endif
#ifdef OMAP1_PLATFORM_DRIVER
platform_driver_unregister(&OMAP1_PLATFORM_DRIVER);
error_omap1_platform:
#endif
#ifdef OMAP3_PLATFORM_DRIVER
platform_driver_unregister(&OMAP3_PLATFORM_DRIVER);
error_omap3_platform:
#endif
#ifdef PS3_SYSTEM_BUS_DRIVER
ps3_ohci_driver_unregister(&PS3_SYSTEM_BUS_DRIVER);
error_ps3:
#endif
#ifdef DEBUG
debugfs_remove(ohci_debug_root);
ohci_debug_root = NULL;
error_debug:
#endif
clear_bit(USB_OHCI_LOADED, &usb_hcds_loaded);
return retval;
}
module_init(ohci_hcd_mod_init);
static void __exit ohci_hcd_mod_exit(void)
{
#ifdef TMIO_OHCI_DRIVER
platform_driver_unregister(&TMIO_OHCI_DRIVER);
#endif
#ifdef SM501_OHCI_DRIVER
platform_driver_unregister(&SM501_OHCI_DRIVER);
#endif
#ifdef SSB_OHCI_DRIVER
ssb_driver_unregister(&SSB_OHCI_DRIVER);
#endif
#ifdef PCI_DRIVER
pci_unregister_driver(&PCI_DRIVER);
#endif
#ifdef SA1111_DRIVER
sa1111_driver_unregister(&SA1111_DRIVER);
#endif
#ifdef OF_PLATFORM_DRIVER
platform_driver_unregister(&OF_PLATFORM_DRIVER);
#endif
#ifdef PLATFORM_DRIVER
platform_driver_unregister(&PLATFORM_DRIVER);
#endif
#ifdef OMAP3_PLATFORM_DRIVER
platform_driver_unregister(&OMAP3_PLATFORM_DRIVER);
#endif
#ifdef PS3_SYSTEM_BUS_DRIVER
ps3_ohci_driver_unregister(&PS3_SYSTEM_BUS_DRIVER);
#endif
#ifdef DEBUG
debugfs_remove(ohci_debug_root);
#endif
clear_bit(USB_OHCI_LOADED, &usb_hcds_loaded);
}
module_exit(ohci_hcd_mod_exit);
| gpl-2.0 |
syhost/android_kernel_zte_nx503a | arch/s390/boot/compressed/misc.c | 4786 | 3788 | /*
* Definitions and wrapper functions for kernel decompressor
*
* Copyright IBM Corp. 2010
*
* Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
*/
#include <asm/uaccess.h>
#include <asm/page.h>
#include <asm/ipl.h>
#include "sizes.h"
/*
* gzip declarations
*/
#define STATIC static
#undef memset
#undef memcpy
#undef memmove
#define memmove memmove
#define memzero(s, n) memset((s), 0, (n))
/* Symbols defined by linker scripts */
extern char input_data[];
extern int input_len;
extern char _text, _end;
extern char _bss, _ebss;
static void error(char *m);
static unsigned long free_mem_ptr;
static unsigned long free_mem_end_ptr;
#ifdef CONFIG_HAVE_KERNEL_BZIP2
#define HEAP_SIZE 0x400000
#else
#define HEAP_SIZE 0x10000
#endif
#ifdef CONFIG_KERNEL_GZIP
#include "../../../../lib/decompress_inflate.c"
#endif
#ifdef CONFIG_KERNEL_BZIP2
#include "../../../../lib/decompress_bunzip2.c"
#endif
#ifdef CONFIG_KERNEL_LZMA
#include "../../../../lib/decompress_unlzma.c"
#endif
#ifdef CONFIG_KERNEL_LZO
#include "../../../../lib/decompress_unlzo.c"
#endif
#ifdef CONFIG_KERNEL_XZ
#include "../../../../lib/decompress_unxz.c"
#endif
extern _sclp_print_early(const char *);
static int puts(const char *s)
{
_sclp_print_early(s);
return 0;
}
void *memset(void *s, int c, size_t n)
{
char *xs;
if (c == 0)
return __builtin_memset(s, 0, n);
xs = (char *) s;
if (n > 0)
do {
*xs++ = c;
} while (--n > 0);
return s;
}
void *memcpy(void *__dest, __const void *__src, size_t __n)
{
return __builtin_memcpy(__dest, __src, __n);
}
void *memmove(void *__dest, __const void *__src, size_t __n)
{
char *d;
const char *s;
if (__dest <= __src)
return __builtin_memcpy(__dest, __src, __n);
d = __dest + __n;
s = __src + __n;
while (__n--)
*--d = *--s;
return __dest;
}
static void error(char *x)
{
unsigned long long psw = 0x000a0000deadbeefULL;
puts("\n\n");
puts(x);
puts("\n\n -- System halted");
asm volatile("lpsw %0" : : "Q" (psw));
}
/*
* Safe guard the ipl parameter block against a memory area that will be
* overwritten. The validity check for the ipl parameter block is complex
* (see cio_get_iplinfo and ipl_save_parameters) but if the pointer to
* the ipl parameter block intersects with the passed memory area we can
* safely assume that we can read from that memory. In that case just copy
* the memory to IPL_PARMBLOCK_ORIGIN even if there is no ipl parameter
* block.
*/
static void check_ipl_parmblock(void *start, unsigned long size)
{
void *src, *dst;
src = (void *)(unsigned long) S390_lowcore.ipl_parmblock_ptr;
if (src + PAGE_SIZE <= start || src >= start + size)
return;
dst = (void *) IPL_PARMBLOCK_ORIGIN;
memmove(dst, src, PAGE_SIZE);
S390_lowcore.ipl_parmblock_ptr = IPL_PARMBLOCK_ORIGIN;
}
unsigned long decompress_kernel(void)
{
unsigned long output_addr;
unsigned char *output;
output_addr = ((unsigned long) &_end + HEAP_SIZE + 4095UL) & -4096UL;
check_ipl_parmblock((void *) 0, output_addr + SZ__bss_start);
memset(&_bss, 0, &_ebss - &_bss);
free_mem_ptr = (unsigned long)&_end;
free_mem_end_ptr = free_mem_ptr + HEAP_SIZE;
output = (unsigned char *) output_addr;
#ifdef CONFIG_BLK_DEV_INITRD
/*
* Move the initrd right behind the end of the decompressed
* kernel image.
*/
if (INITRD_START && INITRD_SIZE &&
INITRD_START < (unsigned long) output + SZ__bss_start) {
check_ipl_parmblock(output + SZ__bss_start,
INITRD_START + INITRD_SIZE);
memmove(output + SZ__bss_start,
(void *) INITRD_START, INITRD_SIZE);
INITRD_START = (unsigned long) output + SZ__bss_start;
}
#endif
puts("Uncompressing Linux... ");
decompress(input_data, input_len, NULL, NULL, output, NULL, error);
puts("Ok, booting the kernel.\n");
return (unsigned long) output;
}
| gpl-2.0 |
Euphoria-OS-Devices/android_kernel_lge_msm8974 | drivers/xen/xen-acpi-processor.c | 4786 | 16724 | /*
* Copyright 2012 by Oracle Inc
* Author: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
*
* This code borrows ideas from https://lkml.org/lkml/2011/11/30/249
* so many thanks go to Kevin Tian <kevin.tian@intel.com>
* and Yu Ke <ke.yu@intel.com>.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
*/
#include <linux/cpumask.h>
#include <linux/cpufreq.h>
#include <linux/freezer.h>
#include <linux/kernel.h>
#include <linux/kthread.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/types.h>
#include <acpi/acpi_bus.h>
#include <acpi/acpi_drivers.h>
#include <acpi/processor.h>
#include <xen/interface/platform.h>
#include <asm/xen/hypercall.h>
#define DRV_NAME "xen-acpi-processor: "
static int no_hypercall;
MODULE_PARM_DESC(off, "Inhibit the hypercall.");
module_param_named(off, no_hypercall, int, 0400);
/*
* Note: Do not convert the acpi_id* below to cpumask_var_t or use cpumask_bit
* - as those shrink to nr_cpu_bits (which is dependent on possible_cpu), which
* can be less than what we want to put in. Instead use the 'nr_acpi_bits'
* which is dynamically computed based on the MADT or x2APIC table.
*/
static unsigned int nr_acpi_bits;
/* Mutex to protect the acpi_ids_done - for CPU hotplug use. */
static DEFINE_MUTEX(acpi_ids_mutex);
/* Which ACPI ID we have processed from 'struct acpi_processor'. */
static unsigned long *acpi_ids_done;
/* Which ACPI ID exist in the SSDT/DSDT processor definitions. */
static unsigned long __initdata *acpi_id_present;
/* And if there is an _CST definition (or a PBLK) for the ACPI IDs */
static unsigned long __initdata *acpi_id_cst_present;
static int push_cxx_to_hypervisor(struct acpi_processor *_pr)
{
struct xen_platform_op op = {
.cmd = XENPF_set_processor_pminfo,
.interface_version = XENPF_INTERFACE_VERSION,
.u.set_pminfo.id = _pr->acpi_id,
.u.set_pminfo.type = XEN_PM_CX,
};
struct xen_processor_cx *dst_cx, *dst_cx_states = NULL;
struct acpi_processor_cx *cx;
unsigned int i, ok;
int ret = 0;
dst_cx_states = kcalloc(_pr->power.count,
sizeof(struct xen_processor_cx), GFP_KERNEL);
if (!dst_cx_states)
return -ENOMEM;
for (ok = 0, i = 1; i <= _pr->power.count; i++) {
cx = &_pr->power.states[i];
if (!cx->valid)
continue;
dst_cx = &(dst_cx_states[ok++]);
dst_cx->reg.space_id = ACPI_ADR_SPACE_SYSTEM_IO;
if (cx->entry_method == ACPI_CSTATE_SYSTEMIO) {
dst_cx->reg.bit_width = 8;
dst_cx->reg.bit_offset = 0;
dst_cx->reg.access_size = 1;
} else {
dst_cx->reg.space_id = ACPI_ADR_SPACE_FIXED_HARDWARE;
if (cx->entry_method == ACPI_CSTATE_FFH) {
/* NATIVE_CSTATE_BEYOND_HALT */
dst_cx->reg.bit_offset = 2;
dst_cx->reg.bit_width = 1; /* VENDOR_INTEL */
}
dst_cx->reg.access_size = 0;
}
dst_cx->reg.address = cx->address;
dst_cx->type = cx->type;
dst_cx->latency = cx->latency;
dst_cx->power = cx->power;
dst_cx->dpcnt = 0;
set_xen_guest_handle(dst_cx->dp, NULL);
}
if (!ok) {
pr_debug(DRV_NAME "No _Cx for ACPI CPU %u\n", _pr->acpi_id);
kfree(dst_cx_states);
return -EINVAL;
}
op.u.set_pminfo.power.count = ok;
op.u.set_pminfo.power.flags.bm_control = _pr->flags.bm_control;
op.u.set_pminfo.power.flags.bm_check = _pr->flags.bm_check;
op.u.set_pminfo.power.flags.has_cst = _pr->flags.has_cst;
op.u.set_pminfo.power.flags.power_setup_done =
_pr->flags.power_setup_done;
set_xen_guest_handle(op.u.set_pminfo.power.states, dst_cx_states);
if (!no_hypercall)
ret = HYPERVISOR_dom0_op(&op);
if (!ret) {
pr_debug("ACPI CPU%u - C-states uploaded.\n", _pr->acpi_id);
for (i = 1; i <= _pr->power.count; i++) {
cx = &_pr->power.states[i];
if (!cx->valid)
continue;
pr_debug(" C%d: %s %d uS\n",
cx->type, cx->desc, (u32)cx->latency);
}
} else if (ret != -EINVAL)
/* EINVAL means the ACPI ID is incorrect - meaning the ACPI
* table is referencing a non-existing CPU - which can happen
* with broken ACPI tables. */
pr_err(DRV_NAME "(CX): Hypervisor error (%d) for ACPI CPU%u\n",
ret, _pr->acpi_id);
kfree(dst_cx_states);
return ret;
}
static struct xen_processor_px *
xen_copy_pss_data(struct acpi_processor *_pr,
struct xen_processor_performance *dst_perf)
{
struct xen_processor_px *dst_states = NULL;
unsigned int i;
BUILD_BUG_ON(sizeof(struct xen_processor_px) !=
sizeof(struct acpi_processor_px));
dst_states = kcalloc(_pr->performance->state_count,
sizeof(struct xen_processor_px), GFP_KERNEL);
if (!dst_states)
return ERR_PTR(-ENOMEM);
dst_perf->state_count = _pr->performance->state_count;
for (i = 0; i < _pr->performance->state_count; i++) {
/* Fortunatly for us, they are both the same size */
memcpy(&(dst_states[i]), &(_pr->performance->states[i]),
sizeof(struct acpi_processor_px));
}
return dst_states;
}
static int xen_copy_psd_data(struct acpi_processor *_pr,
struct xen_processor_performance *dst)
{
struct acpi_psd_package *pdomain;
BUILD_BUG_ON(sizeof(struct xen_psd_package) !=
sizeof(struct acpi_psd_package));
/* This information is enumerated only if acpi_processor_preregister_performance
* has been called.
*/
dst->shared_type = _pr->performance->shared_type;
pdomain = &(_pr->performance->domain_info);
/* 'acpi_processor_preregister_performance' does not parse if the
* num_processors <= 1, but Xen still requires it. Do it manually here.
*/
if (pdomain->num_processors <= 1) {
if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ALL)
dst->shared_type = CPUFREQ_SHARED_TYPE_ALL;
else if (pdomain->coord_type == DOMAIN_COORD_TYPE_HW_ALL)
dst->shared_type = CPUFREQ_SHARED_TYPE_HW;
else if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ANY)
dst->shared_type = CPUFREQ_SHARED_TYPE_ANY;
}
memcpy(&(dst->domain_info), pdomain, sizeof(struct acpi_psd_package));
return 0;
}
static int xen_copy_pct_data(struct acpi_pct_register *pct,
struct xen_pct_register *dst_pct)
{
/* It would be nice if you could just do 'memcpy(pct, dst_pct') but
* sadly the Xen structure did not have the proper padding so the
* descriptor field takes two (dst_pct) bytes instead of one (pct).
*/
dst_pct->descriptor = pct->descriptor;
dst_pct->length = pct->length;
dst_pct->space_id = pct->space_id;
dst_pct->bit_width = pct->bit_width;
dst_pct->bit_offset = pct->bit_offset;
dst_pct->reserved = pct->reserved;
dst_pct->address = pct->address;
return 0;
}
static int push_pxx_to_hypervisor(struct acpi_processor *_pr)
{
int ret = 0;
struct xen_platform_op op = {
.cmd = XENPF_set_processor_pminfo,
.interface_version = XENPF_INTERFACE_VERSION,
.u.set_pminfo.id = _pr->acpi_id,
.u.set_pminfo.type = XEN_PM_PX,
};
struct xen_processor_performance *dst_perf;
struct xen_processor_px *dst_states = NULL;
dst_perf = &op.u.set_pminfo.perf;
dst_perf->platform_limit = _pr->performance_platform_limit;
dst_perf->flags |= XEN_PX_PPC;
xen_copy_pct_data(&(_pr->performance->control_register),
&dst_perf->control_register);
xen_copy_pct_data(&(_pr->performance->status_register),
&dst_perf->status_register);
dst_perf->flags |= XEN_PX_PCT;
dst_states = xen_copy_pss_data(_pr, dst_perf);
if (!IS_ERR_OR_NULL(dst_states)) {
set_xen_guest_handle(dst_perf->states, dst_states);
dst_perf->flags |= XEN_PX_PSS;
}
if (!xen_copy_psd_data(_pr, dst_perf))
dst_perf->flags |= XEN_PX_PSD;
if (dst_perf->flags != (XEN_PX_PSD | XEN_PX_PSS | XEN_PX_PCT | XEN_PX_PPC)) {
pr_warn(DRV_NAME "ACPI CPU%u missing some P-state data (%x), skipping.\n",
_pr->acpi_id, dst_perf->flags);
ret = -ENODEV;
goto err_free;
}
if (!no_hypercall)
ret = HYPERVISOR_dom0_op(&op);
if (!ret) {
struct acpi_processor_performance *perf;
unsigned int i;
perf = _pr->performance;
pr_debug("ACPI CPU%u - P-states uploaded.\n", _pr->acpi_id);
for (i = 0; i < perf->state_count; i++) {
pr_debug(" %cP%d: %d MHz, %d mW, %d uS\n",
(i == perf->state ? '*' : ' '), i,
(u32) perf->states[i].core_frequency,
(u32) perf->states[i].power,
(u32) perf->states[i].transition_latency);
}
} else if (ret != -EINVAL)
/* EINVAL means the ACPI ID is incorrect - meaning the ACPI
* table is referencing a non-existing CPU - which can happen
* with broken ACPI tables. */
pr_warn(DRV_NAME "(_PXX): Hypervisor error (%d) for ACPI CPU%u\n",
ret, _pr->acpi_id);
err_free:
if (!IS_ERR_OR_NULL(dst_states))
kfree(dst_states);
return ret;
}
static int upload_pm_data(struct acpi_processor *_pr)
{
int err = 0;
mutex_lock(&acpi_ids_mutex);
if (__test_and_set_bit(_pr->acpi_id, acpi_ids_done)) {
mutex_unlock(&acpi_ids_mutex);
return -EBUSY;
}
if (_pr->flags.power)
err = push_cxx_to_hypervisor(_pr);
if (_pr->performance && _pr->performance->states)
err |= push_pxx_to_hypervisor(_pr);
mutex_unlock(&acpi_ids_mutex);
return err;
}
static unsigned int __init get_max_acpi_id(void)
{
struct xenpf_pcpuinfo *info;
struct xen_platform_op op = {
.cmd = XENPF_get_cpuinfo,
.interface_version = XENPF_INTERFACE_VERSION,
};
int ret = 0;
unsigned int i, last_cpu, max_acpi_id = 0;
info = &op.u.pcpu_info;
info->xen_cpuid = 0;
ret = HYPERVISOR_dom0_op(&op);
if (ret)
return NR_CPUS;
/* The max_present is the same irregardless of the xen_cpuid */
last_cpu = op.u.pcpu_info.max_present;
for (i = 0; i <= last_cpu; i++) {
info->xen_cpuid = i;
ret = HYPERVISOR_dom0_op(&op);
if (ret)
continue;
max_acpi_id = max(info->acpi_id, max_acpi_id);
}
max_acpi_id *= 2; /* Slack for CPU hotplug support. */
pr_debug(DRV_NAME "Max ACPI ID: %u\n", max_acpi_id);
return max_acpi_id;
}
/*
* The read_acpi_id and check_acpi_ids are there to support the Xen
* oddity of virtual CPUs != physical CPUs in the initial domain.
* The user can supply 'xen_max_vcpus=X' on the Xen hypervisor line
* which will band the amount of CPUs the initial domain can see.
* In general that is OK, except it plays havoc with any of the
* for_each_[present|online]_cpu macros which are banded to the virtual
* CPU amount.
*/
static acpi_status __init
read_acpi_id(acpi_handle handle, u32 lvl, void *context, void **rv)
{
u32 acpi_id;
acpi_status status;
acpi_object_type acpi_type;
unsigned long long tmp;
union acpi_object object = { 0 };
struct acpi_buffer buffer = { sizeof(union acpi_object), &object };
acpi_io_address pblk = 0;
status = acpi_get_type(handle, &acpi_type);
if (ACPI_FAILURE(status))
return AE_OK;
switch (acpi_type) {
case ACPI_TYPE_PROCESSOR:
status = acpi_evaluate_object(handle, NULL, NULL, &buffer);
if (ACPI_FAILURE(status))
return AE_OK;
acpi_id = object.processor.proc_id;
pblk = object.processor.pblk_address;
break;
case ACPI_TYPE_DEVICE:
status = acpi_evaluate_integer(handle, "_UID", NULL, &tmp);
if (ACPI_FAILURE(status))
return AE_OK;
acpi_id = tmp;
break;
default:
return AE_OK;
}
/* There are more ACPI Processor objects than in x2APIC or MADT.
* This can happen with incorrect ACPI SSDT declerations. */
if (acpi_id > nr_acpi_bits) {
pr_debug(DRV_NAME "We only have %u, trying to set %u\n",
nr_acpi_bits, acpi_id);
return AE_OK;
}
/* OK, There is a ACPI Processor object */
__set_bit(acpi_id, acpi_id_present);
pr_debug(DRV_NAME "ACPI CPU%u w/ PBLK:0x%lx\n", acpi_id,
(unsigned long)pblk);
status = acpi_evaluate_object(handle, "_CST", NULL, &buffer);
if (ACPI_FAILURE(status)) {
if (!pblk)
return AE_OK;
}
/* .. and it has a C-state */
__set_bit(acpi_id, acpi_id_cst_present);
return AE_OK;
}
static int __init check_acpi_ids(struct acpi_processor *pr_backup)
{
if (!pr_backup)
return -ENODEV;
/* All online CPUs have been processed at this stage. Now verify
* whether in fact "online CPUs" == physical CPUs.
*/
acpi_id_present = kcalloc(BITS_TO_LONGS(nr_acpi_bits), sizeof(unsigned long), GFP_KERNEL);
if (!acpi_id_present)
return -ENOMEM;
acpi_id_cst_present = kcalloc(BITS_TO_LONGS(nr_acpi_bits), sizeof(unsigned long), GFP_KERNEL);
if (!acpi_id_cst_present) {
kfree(acpi_id_present);
return -ENOMEM;
}
acpi_walk_namespace(ACPI_TYPE_PROCESSOR, ACPI_ROOT_OBJECT,
ACPI_UINT32_MAX,
read_acpi_id, NULL, NULL, NULL);
acpi_get_devices("ACPI0007", read_acpi_id, NULL, NULL);
if (!bitmap_equal(acpi_id_present, acpi_ids_done, nr_acpi_bits)) {
unsigned int i;
for_each_set_bit(i, acpi_id_present, nr_acpi_bits) {
pr_backup->acpi_id = i;
/* Mask out C-states if there are no _CST or PBLK */
pr_backup->flags.power = test_bit(i, acpi_id_cst_present);
(void)upload_pm_data(pr_backup);
}
}
kfree(acpi_id_present);
acpi_id_present = NULL;
kfree(acpi_id_cst_present);
acpi_id_cst_present = NULL;
return 0;
}
static int __init check_prereq(void)
{
struct cpuinfo_x86 *c = &cpu_data(0);
if (!xen_initial_domain())
return -ENODEV;
if (!acpi_gbl_FADT.smi_command)
return -ENODEV;
if (c->x86_vendor == X86_VENDOR_INTEL) {
if (!cpu_has(c, X86_FEATURE_EST))
return -ENODEV;
return 0;
}
if (c->x86_vendor == X86_VENDOR_AMD) {
/* Copied from powernow-k8.h, can't include ../cpufreq/powernow
* as we get compile warnings for the static functions.
*/
#define CPUID_FREQ_VOLT_CAPABILITIES 0x80000007
#define USE_HW_PSTATE 0x00000080
u32 eax, ebx, ecx, edx;
cpuid(CPUID_FREQ_VOLT_CAPABILITIES, &eax, &ebx, &ecx, &edx);
if ((edx & USE_HW_PSTATE) != USE_HW_PSTATE)
return -ENODEV;
return 0;
}
return -ENODEV;
}
/* acpi_perf_data is a pointer to percpu data. */
static struct acpi_processor_performance __percpu *acpi_perf_data;
static void free_acpi_perf_data(void)
{
unsigned int i;
/* Freeing a NULL pointer is OK, and alloc_percpu zeroes. */
for_each_possible_cpu(i)
free_cpumask_var(per_cpu_ptr(acpi_perf_data, i)
->shared_cpu_map);
free_percpu(acpi_perf_data);
}
static int __init xen_acpi_processor_init(void)
{
struct acpi_processor *pr_backup = NULL;
unsigned int i;
int rc = check_prereq();
if (rc)
return rc;
nr_acpi_bits = get_max_acpi_id() + 1;
acpi_ids_done = kcalloc(BITS_TO_LONGS(nr_acpi_bits), sizeof(unsigned long), GFP_KERNEL);
if (!acpi_ids_done)
return -ENOMEM;
acpi_perf_data = alloc_percpu(struct acpi_processor_performance);
if (!acpi_perf_data) {
pr_debug(DRV_NAME "Memory allocation error for acpi_perf_data.\n");
kfree(acpi_ids_done);
return -ENOMEM;
}
for_each_possible_cpu(i) {
if (!zalloc_cpumask_var_node(
&per_cpu_ptr(acpi_perf_data, i)->shared_cpu_map,
GFP_KERNEL, cpu_to_node(i))) {
rc = -ENOMEM;
goto err_out;
}
}
/* Do initialization in ACPI core. It is OK to fail here. */
(void)acpi_processor_preregister_performance(acpi_perf_data);
for_each_possible_cpu(i) {
struct acpi_processor_performance *perf;
perf = per_cpu_ptr(acpi_perf_data, i);
rc = acpi_processor_register_performance(perf, i);
if (rc)
goto err_out;
}
rc = acpi_processor_notify_smm(THIS_MODULE);
if (rc)
goto err_unregister;
for_each_possible_cpu(i) {
struct acpi_processor *_pr;
_pr = per_cpu(processors, i /* APIC ID */);
if (!_pr)
continue;
if (!pr_backup) {
pr_backup = kzalloc(sizeof(struct acpi_processor), GFP_KERNEL);
memcpy(pr_backup, _pr, sizeof(struct acpi_processor));
}
(void)upload_pm_data(_pr);
}
rc = check_acpi_ids(pr_backup);
if (rc)
goto err_unregister;
kfree(pr_backup);
return 0;
err_unregister:
for_each_possible_cpu(i) {
struct acpi_processor_performance *perf;
perf = per_cpu_ptr(acpi_perf_data, i);
acpi_processor_unregister_performance(perf, i);
}
err_out:
/* Freeing a NULL pointer is OK: alloc_percpu zeroes. */
free_acpi_perf_data();
kfree(acpi_ids_done);
return rc;
}
static void __exit xen_acpi_processor_exit(void)
{
int i;
kfree(acpi_ids_done);
for_each_possible_cpu(i) {
struct acpi_processor_performance *perf;
perf = per_cpu_ptr(acpi_perf_data, i);
acpi_processor_unregister_performance(perf, i);
}
free_acpi_perf_data();
}
MODULE_AUTHOR("Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>");
MODULE_DESCRIPTION("Xen ACPI Processor P-states (and Cx) driver which uploads PM data to Xen hypervisor");
MODULE_LICENSE("GPL");
/* We want to be loaded before the CPU freq scaling drivers are loaded.
* They are loaded in late_initcall. */
device_initcall(xen_acpi_processor_init);
module_exit(xen_acpi_processor_exit);
| gpl-2.0 |
drowningchild/msm-2.6.38 | drivers/rtc/rtc-ds1672.c | 4786 | 5179 | /*
* An rtc/i2c driver for the Dallas DS1672
* Copyright 2005-06 Tower Technologies
*
* Author: Alessandro Zummo <a.zummo@towertech.it>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/i2c.h>
#include <linux/rtc.h>
#define DRV_VERSION "0.4"
/* Registers */
#define DS1672_REG_CNT_BASE 0
#define DS1672_REG_CONTROL 4
#define DS1672_REG_TRICKLE 5
#define DS1672_REG_CONTROL_EOSC 0x80
static struct i2c_driver ds1672_driver;
/*
* In the routines that deal directly with the ds1672 hardware, we use
* rtc_time -- month 0-11, hour 0-23, yr = calendar year-epoch
* Epoch is initialized as 2000. Time is set to UTC.
*/
static int ds1672_get_datetime(struct i2c_client *client, struct rtc_time *tm)
{
unsigned long time;
unsigned char addr = DS1672_REG_CNT_BASE;
unsigned char buf[4];
struct i2c_msg msgs[] = {
{client->addr, 0, 1, &addr}, /* setup read ptr */
{client->addr, I2C_M_RD, 4, buf}, /* read date */
};
/* read date registers */
if ((i2c_transfer(client->adapter, &msgs[0], 2)) != 2) {
dev_err(&client->dev, "%s: read error\n", __func__);
return -EIO;
}
dev_dbg(&client->dev,
"%s: raw read data - counters=%02x,%02x,%02x,%02x\n",
__func__, buf[0], buf[1], buf[2], buf[3]);
time = (buf[3] << 24) | (buf[2] << 16) | (buf[1] << 8) | buf[0];
rtc_time_to_tm(time, tm);
dev_dbg(&client->dev, "%s: tm is secs=%d, mins=%d, hours=%d, "
"mday=%d, mon=%d, year=%d, wday=%d\n",
__func__, tm->tm_sec, tm->tm_min, tm->tm_hour,
tm->tm_mday, tm->tm_mon, tm->tm_year, tm->tm_wday);
return 0;
}
static int ds1672_set_mmss(struct i2c_client *client, unsigned long secs)
{
int xfer;
unsigned char buf[6];
buf[0] = DS1672_REG_CNT_BASE;
buf[1] = secs & 0x000000FF;
buf[2] = (secs & 0x0000FF00) >> 8;
buf[3] = (secs & 0x00FF0000) >> 16;
buf[4] = (secs & 0xFF000000) >> 24;
buf[5] = 0; /* set control reg to enable counting */
xfer = i2c_master_send(client, buf, 6);
if (xfer != 6) {
dev_err(&client->dev, "%s: send: %d\n", __func__, xfer);
return -EIO;
}
return 0;
}
static int ds1672_rtc_read_time(struct device *dev, struct rtc_time *tm)
{
return ds1672_get_datetime(to_i2c_client(dev), tm);
}
static int ds1672_rtc_set_mmss(struct device *dev, unsigned long secs)
{
return ds1672_set_mmss(to_i2c_client(dev), secs);
}
static int ds1672_get_control(struct i2c_client *client, u8 *status)
{
unsigned char addr = DS1672_REG_CONTROL;
struct i2c_msg msgs[] = {
{client->addr, 0, 1, &addr}, /* setup read ptr */
{client->addr, I2C_M_RD, 1, status}, /* read control */
};
/* read control register */
if ((i2c_transfer(client->adapter, &msgs[0], 2)) != 2) {
dev_err(&client->dev, "%s: read error\n", __func__);
return -EIO;
}
return 0;
}
/* following are the sysfs callback functions */
static ssize_t show_control(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct i2c_client *client = to_i2c_client(dev);
u8 control;
int err;
err = ds1672_get_control(client, &control);
if (err)
return err;
return sprintf(buf, "%s\n", (control & DS1672_REG_CONTROL_EOSC)
? "disabled" : "enabled");
}
static DEVICE_ATTR(control, S_IRUGO, show_control, NULL);
static const struct rtc_class_ops ds1672_rtc_ops = {
.read_time = ds1672_rtc_read_time,
.set_mmss = ds1672_rtc_set_mmss,
};
static int ds1672_remove(struct i2c_client *client)
{
struct rtc_device *rtc = i2c_get_clientdata(client);
if (rtc)
rtc_device_unregister(rtc);
return 0;
}
static int ds1672_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
int err = 0;
u8 control;
struct rtc_device *rtc;
dev_dbg(&client->dev, "%s\n", __func__);
if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C))
return -ENODEV;
dev_info(&client->dev, "chip found, driver version " DRV_VERSION "\n");
rtc = rtc_device_register(ds1672_driver.driver.name, &client->dev,
&ds1672_rtc_ops, THIS_MODULE);
if (IS_ERR(rtc))
return PTR_ERR(rtc);
i2c_set_clientdata(client, rtc);
/* read control register */
err = ds1672_get_control(client, &control);
if (err)
goto exit_devreg;
if (control & DS1672_REG_CONTROL_EOSC)
dev_warn(&client->dev, "Oscillator not enabled. "
"Set time to enable.\n");
/* Register sysfs hooks */
err = device_create_file(&client->dev, &dev_attr_control);
if (err)
goto exit_devreg;
return 0;
exit_devreg:
rtc_device_unregister(rtc);
return err;
}
static struct i2c_device_id ds1672_id[] = {
{ "ds1672", 0 },
{ }
};
static struct i2c_driver ds1672_driver = {
.driver = {
.name = "rtc-ds1672",
},
.probe = &ds1672_probe,
.remove = &ds1672_remove,
.id_table = ds1672_id,
};
static int __init ds1672_init(void)
{
return i2c_add_driver(&ds1672_driver);
}
static void __exit ds1672_exit(void)
{
i2c_del_driver(&ds1672_driver);
}
MODULE_AUTHOR("Alessandro Zummo <a.zummo@towertech.it>");
MODULE_DESCRIPTION("Dallas/Maxim DS1672 timekeeper driver");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);
module_init(ds1672_init);
module_exit(ds1672_exit);
| gpl-2.0 |
adis1313/android_kernel_samsung_ks01lte | arch/arm/mach-sa1100/leds-lart.c | 4786 | 2001 | /*
* linux/arch/arm/mach-sa1100/leds-lart.c
*
* (C) Erik Mouw (J.A.K.Mouw@its.tudelft.nl), April 21, 2000
*
* LART uses the LED as follows:
* - GPIO23 is the LED, on if system is not idle
* You can use both CONFIG_LEDS_CPU and CONFIG_LEDS_TIMER at the same
* time, but in that case the timer events will still dictate the
* pace of the LED.
*/
#include <linux/init.h>
#include <mach/hardware.h>
#include <asm/leds.h>
#include "leds.h"
#define LED_STATE_ENABLED 1
#define LED_STATE_CLAIMED 2
static unsigned int led_state;
static unsigned int hw_led_state;
#define LED_23 GPIO_GPIO23
#define LED_MASK (LED_23)
void lart_leds_event(led_event_t evt)
{
unsigned long flags;
local_irq_save(flags);
switch(evt) {
case led_start:
/* pin 23 is output pin */
GPDR |= LED_23;
hw_led_state = LED_MASK;
led_state = LED_STATE_ENABLED;
break;
case led_stop:
led_state &= ~LED_STATE_ENABLED;
break;
case led_claim:
led_state |= LED_STATE_CLAIMED;
hw_led_state = LED_MASK;
break;
case led_release:
led_state &= ~LED_STATE_CLAIMED;
hw_led_state = LED_MASK;
break;
#ifdef CONFIG_LEDS_TIMER
case led_timer:
if (!(led_state & LED_STATE_CLAIMED))
hw_led_state ^= LED_23;
break;
#endif
#ifdef CONFIG_LEDS_CPU
case led_idle_start:
/* The LART people like the LED to be off when the
system is idle... */
if (!(led_state & LED_STATE_CLAIMED))
hw_led_state &= ~LED_23;
break;
case led_idle_end:
/* ... and on if the system is not idle */
if (!(led_state & LED_STATE_CLAIMED))
hw_led_state |= LED_23;
break;
#endif
case led_red_on:
if (led_state & LED_STATE_CLAIMED)
hw_led_state &= ~LED_23;
break;
case led_red_off:
if (led_state & LED_STATE_CLAIMED)
hw_led_state |= LED_23;
break;
default:
break;
}
/* Now set the GPIO state, or nothing will happen at all */
if (led_state & LED_STATE_ENABLED) {
GPSR = hw_led_state;
GPCR = hw_led_state ^ LED_MASK;
}
local_irq_restore(flags);
}
| gpl-2.0 |
poondog/kangaroo-m7-mkII | drivers/input/touchscreen/pcap_ts.c | 5042 | 6720 | /*
* Driver for Motorola PCAP2 touchscreen as found in the EZX phone platform.
*
* Copyright (C) 2006 Harald Welte <laforge@openezx.org>
* Copyright (C) 2009 Daniel Ribeiro <drwyrm@gmail.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/fs.h>
#include <linux/string.h>
#include <linux/slab.h>
#include <linux/pm.h>
#include <linux/timer.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
#include <linux/input.h>
#include <linux/mfd/ezx-pcap.h>
struct pcap_ts {
struct pcap_chip *pcap;
struct input_dev *input;
struct delayed_work work;
u16 x, y;
u16 pressure;
u8 read_state;
};
#define SAMPLE_DELAY 20 /* msecs */
#define X_AXIS_MIN 0
#define X_AXIS_MAX 1023
#define Y_AXIS_MAX X_AXIS_MAX
#define Y_AXIS_MIN X_AXIS_MIN
#define PRESSURE_MAX X_AXIS_MAX
#define PRESSURE_MIN X_AXIS_MIN
static void pcap_ts_read_xy(void *data, u16 res[2])
{
struct pcap_ts *pcap_ts = data;
switch (pcap_ts->read_state) {
case PCAP_ADC_TS_M_PRESSURE:
/* pressure reading is unreliable */
if (res[0] > PRESSURE_MIN && res[0] < PRESSURE_MAX)
pcap_ts->pressure = res[0];
pcap_ts->read_state = PCAP_ADC_TS_M_XY;
schedule_delayed_work(&pcap_ts->work, 0);
break;
case PCAP_ADC_TS_M_XY:
pcap_ts->y = res[0];
pcap_ts->x = res[1];
if (pcap_ts->x <= X_AXIS_MIN || pcap_ts->x >= X_AXIS_MAX ||
pcap_ts->y <= Y_AXIS_MIN || pcap_ts->y >= Y_AXIS_MAX) {
/* pen has been released */
input_report_abs(pcap_ts->input, ABS_PRESSURE, 0);
input_report_key(pcap_ts->input, BTN_TOUCH, 0);
pcap_ts->read_state = PCAP_ADC_TS_M_STANDBY;
schedule_delayed_work(&pcap_ts->work, 0);
} else {
/* pen is touching the screen */
input_report_abs(pcap_ts->input, ABS_X, pcap_ts->x);
input_report_abs(pcap_ts->input, ABS_Y, pcap_ts->y);
input_report_key(pcap_ts->input, BTN_TOUCH, 1);
input_report_abs(pcap_ts->input, ABS_PRESSURE,
pcap_ts->pressure);
/* switch back to pressure read mode */
pcap_ts->read_state = PCAP_ADC_TS_M_PRESSURE;
schedule_delayed_work(&pcap_ts->work,
msecs_to_jiffies(SAMPLE_DELAY));
}
input_sync(pcap_ts->input);
break;
default:
dev_warn(&pcap_ts->input->dev,
"pcap_ts: Warning, unhandled read_state %d\n",
pcap_ts->read_state);
break;
}
}
static void pcap_ts_work(struct work_struct *work)
{
struct delayed_work *dw = container_of(work, struct delayed_work, work);
struct pcap_ts *pcap_ts = container_of(dw, struct pcap_ts, work);
u8 ch[2];
pcap_set_ts_bits(pcap_ts->pcap,
pcap_ts->read_state << PCAP_ADC_TS_M_SHIFT);
if (pcap_ts->read_state == PCAP_ADC_TS_M_STANDBY)
return;
/* start adc conversion */
ch[0] = PCAP_ADC_CH_TS_X1;
ch[1] = PCAP_ADC_CH_TS_Y1;
pcap_adc_async(pcap_ts->pcap, PCAP_ADC_BANK_1, 0, ch,
pcap_ts_read_xy, pcap_ts);
}
static irqreturn_t pcap_ts_event_touch(int pirq, void *data)
{
struct pcap_ts *pcap_ts = data;
if (pcap_ts->read_state == PCAP_ADC_TS_M_STANDBY) {
pcap_ts->read_state = PCAP_ADC_TS_M_PRESSURE;
schedule_delayed_work(&pcap_ts->work, 0);
}
return IRQ_HANDLED;
}
static int pcap_ts_open(struct input_dev *dev)
{
struct pcap_ts *pcap_ts = input_get_drvdata(dev);
pcap_ts->read_state = PCAP_ADC_TS_M_STANDBY;
schedule_delayed_work(&pcap_ts->work, 0);
return 0;
}
static void pcap_ts_close(struct input_dev *dev)
{
struct pcap_ts *pcap_ts = input_get_drvdata(dev);
cancel_delayed_work_sync(&pcap_ts->work);
pcap_ts->read_state = PCAP_ADC_TS_M_NONTS;
pcap_set_ts_bits(pcap_ts->pcap,
pcap_ts->read_state << PCAP_ADC_TS_M_SHIFT);
}
static int __devinit pcap_ts_probe(struct platform_device *pdev)
{
struct input_dev *input_dev;
struct pcap_ts *pcap_ts;
int err = -ENOMEM;
pcap_ts = kzalloc(sizeof(*pcap_ts), GFP_KERNEL);
if (!pcap_ts)
return err;
pcap_ts->pcap = dev_get_drvdata(pdev->dev.parent);
platform_set_drvdata(pdev, pcap_ts);
input_dev = input_allocate_device();
if (!input_dev)
goto fail;
INIT_DELAYED_WORK(&pcap_ts->work, pcap_ts_work);
pcap_ts->read_state = PCAP_ADC_TS_M_NONTS;
pcap_set_ts_bits(pcap_ts->pcap,
pcap_ts->read_state << PCAP_ADC_TS_M_SHIFT);
pcap_ts->input = input_dev;
input_set_drvdata(input_dev, pcap_ts);
input_dev->name = "pcap-touchscreen";
input_dev->phys = "pcap_ts/input0";
input_dev->id.bustype = BUS_HOST;
input_dev->id.vendor = 0x0001;
input_dev->id.product = 0x0002;
input_dev->id.version = 0x0100;
input_dev->dev.parent = &pdev->dev;
input_dev->open = pcap_ts_open;
input_dev->close = pcap_ts_close;
input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
input_dev->keybit[BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH);
input_set_abs_params(input_dev, ABS_X, X_AXIS_MIN, X_AXIS_MAX, 0, 0);
input_set_abs_params(input_dev, ABS_Y, Y_AXIS_MIN, Y_AXIS_MAX, 0, 0);
input_set_abs_params(input_dev, ABS_PRESSURE, PRESSURE_MIN,
PRESSURE_MAX, 0, 0);
err = input_register_device(pcap_ts->input);
if (err)
goto fail_allocate;
err = request_irq(pcap_to_irq(pcap_ts->pcap, PCAP_IRQ_TS),
pcap_ts_event_touch, 0, "Touch Screen", pcap_ts);
if (err)
goto fail_register;
return 0;
fail_register:
input_unregister_device(input_dev);
goto fail;
fail_allocate:
input_free_device(input_dev);
fail:
kfree(pcap_ts);
return err;
}
static int __devexit pcap_ts_remove(struct platform_device *pdev)
{
struct pcap_ts *pcap_ts = platform_get_drvdata(pdev);
free_irq(pcap_to_irq(pcap_ts->pcap, PCAP_IRQ_TS), pcap_ts);
cancel_delayed_work_sync(&pcap_ts->work);
input_unregister_device(pcap_ts->input);
kfree(pcap_ts);
return 0;
}
#ifdef CONFIG_PM
static int pcap_ts_suspend(struct device *dev)
{
struct pcap_ts *pcap_ts = dev_get_drvdata(dev);
pcap_set_ts_bits(pcap_ts->pcap, PCAP_ADC_TS_REF_LOWPWR);
return 0;
}
static int pcap_ts_resume(struct device *dev)
{
struct pcap_ts *pcap_ts = dev_get_drvdata(dev);
pcap_set_ts_bits(pcap_ts->pcap,
pcap_ts->read_state << PCAP_ADC_TS_M_SHIFT);
return 0;
}
static const struct dev_pm_ops pcap_ts_pm_ops = {
.suspend = pcap_ts_suspend,
.resume = pcap_ts_resume,
};
#define PCAP_TS_PM_OPS (&pcap_ts_pm_ops)
#else
#define PCAP_TS_PM_OPS NULL
#endif
static struct platform_driver pcap_ts_driver = {
.probe = pcap_ts_probe,
.remove = __devexit_p(pcap_ts_remove),
.driver = {
.name = "pcap-ts",
.owner = THIS_MODULE,
.pm = PCAP_TS_PM_OPS,
},
};
module_platform_driver(pcap_ts_driver);
MODULE_DESCRIPTION("Motorola PCAP2 touchscreen driver");
MODULE_AUTHOR("Daniel Ribeiro / Harald Welte");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:pcap_ts");
| gpl-2.0 |
HeydayGuan/android-kernel-3.0 | drivers/input/mousedev.c | 7602 | 26378 | /*
* Input driver to ExplorerPS/2 device driver module.
*
* Copyright (c) 1999-2002 Vojtech Pavlik
* Copyright (c) 2004 Dmitry Torokhov
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as published by
* the Free Software Foundation.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#define MOUSEDEV_MINOR_BASE 32
#define MOUSEDEV_MINORS 32
#define MOUSEDEV_MIX 31
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/poll.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/input.h>
#include <linux/random.h>
#include <linux/major.h>
#include <linux/device.h>
#include <linux/kernel.h>
#ifdef CONFIG_INPUT_MOUSEDEV_PSAUX
#include <linux/miscdevice.h>
#endif
MODULE_AUTHOR("Vojtech Pavlik <vojtech@ucw.cz>");
MODULE_DESCRIPTION("Mouse (ExplorerPS/2) device interfaces");
MODULE_LICENSE("GPL");
#ifndef CONFIG_INPUT_MOUSEDEV_SCREEN_X
#define CONFIG_INPUT_MOUSEDEV_SCREEN_X 1024
#endif
#ifndef CONFIG_INPUT_MOUSEDEV_SCREEN_Y
#define CONFIG_INPUT_MOUSEDEV_SCREEN_Y 768
#endif
static int xres = CONFIG_INPUT_MOUSEDEV_SCREEN_X;
module_param(xres, uint, 0644);
MODULE_PARM_DESC(xres, "Horizontal screen resolution");
static int yres = CONFIG_INPUT_MOUSEDEV_SCREEN_Y;
module_param(yres, uint, 0644);
MODULE_PARM_DESC(yres, "Vertical screen resolution");
static unsigned tap_time = 200;
module_param(tap_time, uint, 0644);
MODULE_PARM_DESC(tap_time, "Tap time for touchpads in absolute mode (msecs)");
struct mousedev_hw_data {
int dx, dy, dz;
int x, y;
int abs_event;
unsigned long buttons;
};
struct mousedev {
int open;
int minor;
struct input_handle handle;
wait_queue_head_t wait;
struct list_head client_list;
spinlock_t client_lock; /* protects client_list */
struct mutex mutex;
struct device dev;
bool exist;
struct list_head mixdev_node;
int mixdev_open;
struct mousedev_hw_data packet;
unsigned int pkt_count;
int old_x[4], old_y[4];
int frac_dx, frac_dy;
unsigned long touch;
};
enum mousedev_emul {
MOUSEDEV_EMUL_PS2,
MOUSEDEV_EMUL_IMPS,
MOUSEDEV_EMUL_EXPS
};
struct mousedev_motion {
int dx, dy, dz;
unsigned long buttons;
};
#define PACKET_QUEUE_LEN 16
struct mousedev_client {
struct fasync_struct *fasync;
struct mousedev *mousedev;
struct list_head node;
struct mousedev_motion packets[PACKET_QUEUE_LEN];
unsigned int head, tail;
spinlock_t packet_lock;
int pos_x, pos_y;
signed char ps2[6];
unsigned char ready, buffer, bufsiz;
unsigned char imexseq, impsseq;
enum mousedev_emul mode;
unsigned long last_buttons;
};
#define MOUSEDEV_SEQ_LEN 6
static unsigned char mousedev_imps_seq[] = { 0xf3, 200, 0xf3, 100, 0xf3, 80 };
static unsigned char mousedev_imex_seq[] = { 0xf3, 200, 0xf3, 200, 0xf3, 80 };
static struct input_handler mousedev_handler;
static struct mousedev *mousedev_table[MOUSEDEV_MINORS];
static DEFINE_MUTEX(mousedev_table_mutex);
static struct mousedev *mousedev_mix;
static LIST_HEAD(mousedev_mix_list);
static void mixdev_open_devices(void);
static void mixdev_close_devices(void);
#define fx(i) (mousedev->old_x[(mousedev->pkt_count - (i)) & 03])
#define fy(i) (mousedev->old_y[(mousedev->pkt_count - (i)) & 03])
static void mousedev_touchpad_event(struct input_dev *dev,
struct mousedev *mousedev,
unsigned int code, int value)
{
int size, tmp;
enum { FRACTION_DENOM = 128 };
switch (code) {
case ABS_X:
fx(0) = value;
if (mousedev->touch && mousedev->pkt_count >= 2) {
size = input_abs_get_max(dev, ABS_X) -
input_abs_get_min(dev, ABS_X);
if (size == 0)
size = 256 * 2;
tmp = ((value - fx(2)) * 256 * FRACTION_DENOM) / size;
tmp += mousedev->frac_dx;
mousedev->packet.dx = tmp / FRACTION_DENOM;
mousedev->frac_dx =
tmp - mousedev->packet.dx * FRACTION_DENOM;
}
break;
case ABS_Y:
fy(0) = value;
if (mousedev->touch && mousedev->pkt_count >= 2) {
/* use X size for ABS_Y to keep the same scale */
size = input_abs_get_max(dev, ABS_X) -
input_abs_get_min(dev, ABS_X);
if (size == 0)
size = 256 * 2;
tmp = -((value - fy(2)) * 256 * FRACTION_DENOM) / size;
tmp += mousedev->frac_dy;
mousedev->packet.dy = tmp / FRACTION_DENOM;
mousedev->frac_dy = tmp -
mousedev->packet.dy * FRACTION_DENOM;
}
break;
}
}
static void mousedev_abs_event(struct input_dev *dev, struct mousedev *mousedev,
unsigned int code, int value)
{
int min, max, size;
switch (code) {
case ABS_X:
min = input_abs_get_min(dev, ABS_X);
max = input_abs_get_max(dev, ABS_X);
size = max - min;
if (size == 0)
size = xres ? : 1;
value = clamp(value, min, max);
mousedev->packet.x = ((value - min) * xres) / size;
mousedev->packet.abs_event = 1;
break;
case ABS_Y:
min = input_abs_get_min(dev, ABS_Y);
max = input_abs_get_max(dev, ABS_Y);
size = max - min;
if (size == 0)
size = yres ? : 1;
value = clamp(value, min, max);
mousedev->packet.y = yres - ((value - min) * yres) / size;
mousedev->packet.abs_event = 1;
break;
}
}
static void mousedev_rel_event(struct mousedev *mousedev,
unsigned int code, int value)
{
switch (code) {
case REL_X:
mousedev->packet.dx += value;
break;
case REL_Y:
mousedev->packet.dy -= value;
break;
case REL_WHEEL:
mousedev->packet.dz -= value;
break;
}
}
static void mousedev_key_event(struct mousedev *mousedev,
unsigned int code, int value)
{
int index;
switch (code) {
case BTN_TOUCH:
case BTN_0:
case BTN_LEFT: index = 0; break;
case BTN_STYLUS:
case BTN_1:
case BTN_RIGHT: index = 1; break;
case BTN_2:
case BTN_FORWARD:
case BTN_STYLUS2:
case BTN_MIDDLE: index = 2; break;
case BTN_3:
case BTN_BACK:
case BTN_SIDE: index = 3; break;
case BTN_4:
case BTN_EXTRA: index = 4; break;
default: return;
}
if (value) {
set_bit(index, &mousedev->packet.buttons);
set_bit(index, &mousedev_mix->packet.buttons);
} else {
clear_bit(index, &mousedev->packet.buttons);
clear_bit(index, &mousedev_mix->packet.buttons);
}
}
static void mousedev_notify_readers(struct mousedev *mousedev,
struct mousedev_hw_data *packet)
{
struct mousedev_client *client;
struct mousedev_motion *p;
unsigned int new_head;
int wake_readers = 0;
rcu_read_lock();
list_for_each_entry_rcu(client, &mousedev->client_list, node) {
/* Just acquire the lock, interrupts already disabled */
spin_lock(&client->packet_lock);
p = &client->packets[client->head];
if (client->ready && p->buttons != mousedev->packet.buttons) {
new_head = (client->head + 1) % PACKET_QUEUE_LEN;
if (new_head != client->tail) {
p = &client->packets[client->head = new_head];
memset(p, 0, sizeof(struct mousedev_motion));
}
}
if (packet->abs_event) {
p->dx += packet->x - client->pos_x;
p->dy += packet->y - client->pos_y;
client->pos_x = packet->x;
client->pos_y = packet->y;
}
client->pos_x += packet->dx;
client->pos_x = client->pos_x < 0 ?
0 : (client->pos_x >= xres ? xres : client->pos_x);
client->pos_y += packet->dy;
client->pos_y = client->pos_y < 0 ?
0 : (client->pos_y >= yres ? yres : client->pos_y);
p->dx += packet->dx;
p->dy += packet->dy;
p->dz += packet->dz;
p->buttons = mousedev->packet.buttons;
if (p->dx || p->dy || p->dz ||
p->buttons != client->last_buttons)
client->ready = 1;
spin_unlock(&client->packet_lock);
if (client->ready) {
kill_fasync(&client->fasync, SIGIO, POLL_IN);
wake_readers = 1;
}
}
rcu_read_unlock();
if (wake_readers)
wake_up_interruptible(&mousedev->wait);
}
static void mousedev_touchpad_touch(struct mousedev *mousedev, int value)
{
if (!value) {
if (mousedev->touch &&
time_before(jiffies,
mousedev->touch + msecs_to_jiffies(tap_time))) {
/*
* Toggle left button to emulate tap.
* We rely on the fact that mousedev_mix always has 0
* motion packet so we won't mess current position.
*/
set_bit(0, &mousedev->packet.buttons);
set_bit(0, &mousedev_mix->packet.buttons);
mousedev_notify_readers(mousedev, &mousedev_mix->packet);
mousedev_notify_readers(mousedev_mix,
&mousedev_mix->packet);
clear_bit(0, &mousedev->packet.buttons);
clear_bit(0, &mousedev_mix->packet.buttons);
}
mousedev->touch = mousedev->pkt_count = 0;
mousedev->frac_dx = 0;
mousedev->frac_dy = 0;
} else if (!mousedev->touch)
mousedev->touch = jiffies;
}
static void mousedev_event(struct input_handle *handle,
unsigned int type, unsigned int code, int value)
{
struct mousedev *mousedev = handle->private;
switch (type) {
case EV_ABS:
/* Ignore joysticks */
if (test_bit(BTN_TRIGGER, handle->dev->keybit))
return;
if (test_bit(BTN_TOOL_FINGER, handle->dev->keybit))
mousedev_touchpad_event(handle->dev,
mousedev, code, value);
else
mousedev_abs_event(handle->dev, mousedev, code, value);
break;
case EV_REL:
mousedev_rel_event(mousedev, code, value);
break;
case EV_KEY:
if (value != 2) {
if (code == BTN_TOUCH &&
test_bit(BTN_TOOL_FINGER, handle->dev->keybit))
mousedev_touchpad_touch(mousedev, value);
else
mousedev_key_event(mousedev, code, value);
}
break;
case EV_SYN:
if (code == SYN_REPORT) {
if (mousedev->touch) {
mousedev->pkt_count++;
/*
* Input system eats duplicate events,
* but we need all of them to do correct
* averaging so apply present one forward
*/
fx(0) = fx(1);
fy(0) = fy(1);
}
mousedev_notify_readers(mousedev, &mousedev->packet);
mousedev_notify_readers(mousedev_mix, &mousedev->packet);
mousedev->packet.dx = mousedev->packet.dy =
mousedev->packet.dz = 0;
mousedev->packet.abs_event = 0;
}
break;
}
}
static int mousedev_fasync(int fd, struct file *file, int on)
{
struct mousedev_client *client = file->private_data;
return fasync_helper(fd, file, on, &client->fasync);
}
static void mousedev_free(struct device *dev)
{
struct mousedev *mousedev = container_of(dev, struct mousedev, dev);
input_put_device(mousedev->handle.dev);
kfree(mousedev);
}
static int mousedev_open_device(struct mousedev *mousedev)
{
int retval;
retval = mutex_lock_interruptible(&mousedev->mutex);
if (retval)
return retval;
if (mousedev->minor == MOUSEDEV_MIX)
mixdev_open_devices();
else if (!mousedev->exist)
retval = -ENODEV;
else if (!mousedev->open++) {
retval = input_open_device(&mousedev->handle);
if (retval)
mousedev->open--;
}
mutex_unlock(&mousedev->mutex);
return retval;
}
static void mousedev_close_device(struct mousedev *mousedev)
{
mutex_lock(&mousedev->mutex);
if (mousedev->minor == MOUSEDEV_MIX)
mixdev_close_devices();
else if (mousedev->exist && !--mousedev->open)
input_close_device(&mousedev->handle);
mutex_unlock(&mousedev->mutex);
}
/*
* Open all available devices so they can all be multiplexed in one.
* stream. Note that this function is called with mousedev_mix->mutex
* held.
*/
static void mixdev_open_devices(void)
{
struct mousedev *mousedev;
if (mousedev_mix->open++)
return;
list_for_each_entry(mousedev, &mousedev_mix_list, mixdev_node) {
if (!mousedev->mixdev_open) {
if (mousedev_open_device(mousedev))
continue;
mousedev->mixdev_open = 1;
}
}
}
/*
* Close all devices that were opened as part of multiplexed
* device. Note that this function is called with mousedev_mix->mutex
* held.
*/
static void mixdev_close_devices(void)
{
struct mousedev *mousedev;
if (--mousedev_mix->open)
return;
list_for_each_entry(mousedev, &mousedev_mix_list, mixdev_node) {
if (mousedev->mixdev_open) {
mousedev->mixdev_open = 0;
mousedev_close_device(mousedev);
}
}
}
static void mousedev_attach_client(struct mousedev *mousedev,
struct mousedev_client *client)
{
spin_lock(&mousedev->client_lock);
list_add_tail_rcu(&client->node, &mousedev->client_list);
spin_unlock(&mousedev->client_lock);
}
static void mousedev_detach_client(struct mousedev *mousedev,
struct mousedev_client *client)
{
spin_lock(&mousedev->client_lock);
list_del_rcu(&client->node);
spin_unlock(&mousedev->client_lock);
synchronize_rcu();
}
static int mousedev_release(struct inode *inode, struct file *file)
{
struct mousedev_client *client = file->private_data;
struct mousedev *mousedev = client->mousedev;
mousedev_detach_client(mousedev, client);
kfree(client);
mousedev_close_device(mousedev);
put_device(&mousedev->dev);
return 0;
}
static int mousedev_open(struct inode *inode, struct file *file)
{
struct mousedev_client *client;
struct mousedev *mousedev;
int error;
int i;
#ifdef CONFIG_INPUT_MOUSEDEV_PSAUX
if (imajor(inode) == MISC_MAJOR)
i = MOUSEDEV_MIX;
else
#endif
i = iminor(inode) - MOUSEDEV_MINOR_BASE;
if (i >= MOUSEDEV_MINORS)
return -ENODEV;
error = mutex_lock_interruptible(&mousedev_table_mutex);
if (error) {
return error;
}
mousedev = mousedev_table[i];
if (mousedev)
get_device(&mousedev->dev);
mutex_unlock(&mousedev_table_mutex);
if (!mousedev) {
return -ENODEV;
}
client = kzalloc(sizeof(struct mousedev_client), GFP_KERNEL);
if (!client) {
error = -ENOMEM;
goto err_put_mousedev;
}
spin_lock_init(&client->packet_lock);
client->pos_x = xres / 2;
client->pos_y = yres / 2;
client->mousedev = mousedev;
mousedev_attach_client(mousedev, client);
error = mousedev_open_device(mousedev);
if (error)
goto err_free_client;
file->private_data = client;
return 0;
err_free_client:
mousedev_detach_client(mousedev, client);
kfree(client);
err_put_mousedev:
put_device(&mousedev->dev);
return error;
}
static inline int mousedev_limit_delta(int delta, int limit)
{
return delta > limit ? limit : (delta < -limit ? -limit : delta);
}
static void mousedev_packet(struct mousedev_client *client,
signed char *ps2_data)
{
struct mousedev_motion *p = &client->packets[client->tail];
ps2_data[0] = 0x08 |
((p->dx < 0) << 4) | ((p->dy < 0) << 5) | (p->buttons & 0x07);
ps2_data[1] = mousedev_limit_delta(p->dx, 127);
ps2_data[2] = mousedev_limit_delta(p->dy, 127);
p->dx -= ps2_data[1];
p->dy -= ps2_data[2];
switch (client->mode) {
case MOUSEDEV_EMUL_EXPS:
ps2_data[3] = mousedev_limit_delta(p->dz, 7);
p->dz -= ps2_data[3];
ps2_data[3] = (ps2_data[3] & 0x0f) | ((p->buttons & 0x18) << 1);
client->bufsiz = 4;
break;
case MOUSEDEV_EMUL_IMPS:
ps2_data[0] |=
((p->buttons & 0x10) >> 3) | ((p->buttons & 0x08) >> 1);
ps2_data[3] = mousedev_limit_delta(p->dz, 127);
p->dz -= ps2_data[3];
client->bufsiz = 4;
break;
case MOUSEDEV_EMUL_PS2:
default:
ps2_data[0] |=
((p->buttons & 0x10) >> 3) | ((p->buttons & 0x08) >> 1);
p->dz = 0;
client->bufsiz = 3;
break;
}
if (!p->dx && !p->dy && !p->dz) {
if (client->tail == client->head) {
client->ready = 0;
client->last_buttons = p->buttons;
} else
client->tail = (client->tail + 1) % PACKET_QUEUE_LEN;
}
}
static void mousedev_generate_response(struct mousedev_client *client,
int command)
{
client->ps2[0] = 0xfa; /* ACK */
switch (command) {
case 0xeb: /* Poll */
mousedev_packet(client, &client->ps2[1]);
client->bufsiz++; /* account for leading ACK */
break;
case 0xf2: /* Get ID */
switch (client->mode) {
case MOUSEDEV_EMUL_PS2:
client->ps2[1] = 0;
break;
case MOUSEDEV_EMUL_IMPS:
client->ps2[1] = 3;
break;
case MOUSEDEV_EMUL_EXPS:
client->ps2[1] = 4;
break;
}
client->bufsiz = 2;
break;
case 0xe9: /* Get info */
client->ps2[1] = 0x60; client->ps2[2] = 3; client->ps2[3] = 200;
client->bufsiz = 4;
break;
case 0xff: /* Reset */
client->impsseq = client->imexseq = 0;
client->mode = MOUSEDEV_EMUL_PS2;
client->ps2[1] = 0xaa; client->ps2[2] = 0x00;
client->bufsiz = 3;
break;
default:
client->bufsiz = 1;
break;
}
client->buffer = client->bufsiz;
}
static ssize_t mousedev_write(struct file *file, const char __user *buffer,
size_t count, loff_t *ppos)
{
struct mousedev_client *client = file->private_data;
unsigned char c;
unsigned int i;
for (i = 0; i < count; i++) {
if (get_user(c, buffer + i))
return -EFAULT;
spin_lock_irq(&client->packet_lock);
if (c == mousedev_imex_seq[client->imexseq]) {
if (++client->imexseq == MOUSEDEV_SEQ_LEN) {
client->imexseq = 0;
client->mode = MOUSEDEV_EMUL_EXPS;
}
} else
client->imexseq = 0;
if (c == mousedev_imps_seq[client->impsseq]) {
if (++client->impsseq == MOUSEDEV_SEQ_LEN) {
client->impsseq = 0;
client->mode = MOUSEDEV_EMUL_IMPS;
}
} else
client->impsseq = 0;
mousedev_generate_response(client, c);
spin_unlock_irq(&client->packet_lock);
}
kill_fasync(&client->fasync, SIGIO, POLL_IN);
wake_up_interruptible(&client->mousedev->wait);
return count;
}
static ssize_t mousedev_read(struct file *file, char __user *buffer,
size_t count, loff_t *ppos)
{
struct mousedev_client *client = file->private_data;
struct mousedev *mousedev = client->mousedev;
signed char data[sizeof(client->ps2)];
int retval = 0;
if (!client->ready && !client->buffer && mousedev->exist &&
(file->f_flags & O_NONBLOCK))
return -EAGAIN;
retval = wait_event_interruptible(mousedev->wait,
!mousedev->exist || client->ready || client->buffer);
if (retval)
return retval;
if (!mousedev->exist)
return -ENODEV;
spin_lock_irq(&client->packet_lock);
if (!client->buffer && client->ready) {
mousedev_packet(client, client->ps2);
client->buffer = client->bufsiz;
}
if (count > client->buffer)
count = client->buffer;
memcpy(data, client->ps2 + client->bufsiz - client->buffer, count);
client->buffer -= count;
spin_unlock_irq(&client->packet_lock);
if (copy_to_user(buffer, data, count))
return -EFAULT;
return count;
}
/* No kernel lock - fine */
static unsigned int mousedev_poll(struct file *file, poll_table *wait)
{
struct mousedev_client *client = file->private_data;
struct mousedev *mousedev = client->mousedev;
unsigned int mask;
poll_wait(file, &mousedev->wait, wait);
mask = mousedev->exist ? POLLOUT | POLLWRNORM : POLLHUP | POLLERR;
if (client->ready || client->buffer)
mask |= POLLIN | POLLRDNORM;
return mask;
}
static const struct file_operations mousedev_fops = {
.owner = THIS_MODULE,
.read = mousedev_read,
.write = mousedev_write,
.poll = mousedev_poll,
.open = mousedev_open,
.release = mousedev_release,
.fasync = mousedev_fasync,
.llseek = noop_llseek,
};
static int mousedev_install_chrdev(struct mousedev *mousedev)
{
mousedev_table[mousedev->minor] = mousedev;
return 0;
}
static void mousedev_remove_chrdev(struct mousedev *mousedev)
{
mutex_lock(&mousedev_table_mutex);
mousedev_table[mousedev->minor] = NULL;
mutex_unlock(&mousedev_table_mutex);
}
/*
* Mark device non-existent. This disables writes, ioctls and
* prevents new users from opening the device. Already posted
* blocking reads will stay, however new ones will fail.
*/
static void mousedev_mark_dead(struct mousedev *mousedev)
{
mutex_lock(&mousedev->mutex);
mousedev->exist = false;
mutex_unlock(&mousedev->mutex);
}
/*
* Wake up users waiting for IO so they can disconnect from
* dead device.
*/
static void mousedev_hangup(struct mousedev *mousedev)
{
struct mousedev_client *client;
spin_lock(&mousedev->client_lock);
list_for_each_entry(client, &mousedev->client_list, node)
kill_fasync(&client->fasync, SIGIO, POLL_HUP);
spin_unlock(&mousedev->client_lock);
wake_up_interruptible(&mousedev->wait);
}
static void mousedev_cleanup(struct mousedev *mousedev)
{
struct input_handle *handle = &mousedev->handle;
mousedev_mark_dead(mousedev);
mousedev_hangup(mousedev);
mousedev_remove_chrdev(mousedev);
/* mousedev is marked dead so no one else accesses mousedev->open */
if (mousedev->open)
input_close_device(handle);
}
static struct mousedev *mousedev_create(struct input_dev *dev,
struct input_handler *handler,
int minor)
{
struct mousedev *mousedev;
int error;
mousedev = kzalloc(sizeof(struct mousedev), GFP_KERNEL);
if (!mousedev) {
error = -ENOMEM;
goto err_out;
}
INIT_LIST_HEAD(&mousedev->client_list);
INIT_LIST_HEAD(&mousedev->mixdev_node);
spin_lock_init(&mousedev->client_lock);
mutex_init(&mousedev->mutex);
lockdep_set_subclass(&mousedev->mutex,
minor == MOUSEDEV_MIX ? SINGLE_DEPTH_NESTING : 0);
init_waitqueue_head(&mousedev->wait);
if (minor == MOUSEDEV_MIX)
dev_set_name(&mousedev->dev, "mice");
else
dev_set_name(&mousedev->dev, "mouse%d", minor);
mousedev->minor = minor;
mousedev->exist = true;
mousedev->handle.dev = input_get_device(dev);
mousedev->handle.name = dev_name(&mousedev->dev);
mousedev->handle.handler = handler;
mousedev->handle.private = mousedev;
mousedev->dev.class = &input_class;
if (dev)
mousedev->dev.parent = &dev->dev;
mousedev->dev.devt = MKDEV(INPUT_MAJOR, MOUSEDEV_MINOR_BASE + minor);
mousedev->dev.release = mousedev_free;
device_initialize(&mousedev->dev);
if (minor != MOUSEDEV_MIX) {
error = input_register_handle(&mousedev->handle);
if (error)
goto err_free_mousedev;
}
error = mousedev_install_chrdev(mousedev);
if (error)
goto err_unregister_handle;
error = device_add(&mousedev->dev);
if (error)
goto err_cleanup_mousedev;
return mousedev;
err_cleanup_mousedev:
mousedev_cleanup(mousedev);
err_unregister_handle:
if (minor != MOUSEDEV_MIX)
input_unregister_handle(&mousedev->handle);
err_free_mousedev:
put_device(&mousedev->dev);
err_out:
return ERR_PTR(error);
}
static void mousedev_destroy(struct mousedev *mousedev)
{
device_del(&mousedev->dev);
mousedev_cleanup(mousedev);
if (mousedev->minor != MOUSEDEV_MIX)
input_unregister_handle(&mousedev->handle);
put_device(&mousedev->dev);
}
static int mixdev_add_device(struct mousedev *mousedev)
{
int retval;
retval = mutex_lock_interruptible(&mousedev_mix->mutex);
if (retval)
return retval;
if (mousedev_mix->open) {
retval = mousedev_open_device(mousedev);
if (retval)
goto out;
mousedev->mixdev_open = 1;
}
get_device(&mousedev->dev);
list_add_tail(&mousedev->mixdev_node, &mousedev_mix_list);
out:
mutex_unlock(&mousedev_mix->mutex);
return retval;
}
static void mixdev_remove_device(struct mousedev *mousedev)
{
mutex_lock(&mousedev_mix->mutex);
if (mousedev->mixdev_open) {
mousedev->mixdev_open = 0;
mousedev_close_device(mousedev);
}
list_del_init(&mousedev->mixdev_node);
mutex_unlock(&mousedev_mix->mutex);
put_device(&mousedev->dev);
}
static int mousedev_connect(struct input_handler *handler,
struct input_dev *dev,
const struct input_device_id *id)
{
struct mousedev *mousedev;
int minor;
int error;
for (minor = 0; minor < MOUSEDEV_MINORS; minor++)
if (!mousedev_table[minor])
break;
if (minor == MOUSEDEV_MINORS) {
pr_err("no more free mousedev devices\n");
return -ENFILE;
}
mousedev = mousedev_create(dev, handler, minor);
if (IS_ERR(mousedev))
return PTR_ERR(mousedev);
error = mixdev_add_device(mousedev);
if (error) {
mousedev_destroy(mousedev);
return error;
}
return 0;
}
static void mousedev_disconnect(struct input_handle *handle)
{
struct mousedev *mousedev = handle->private;
mixdev_remove_device(mousedev);
mousedev_destroy(mousedev);
}
static const struct input_device_id mousedev_ids[] = {
{
.flags = INPUT_DEVICE_ID_MATCH_EVBIT |
INPUT_DEVICE_ID_MATCH_KEYBIT |
INPUT_DEVICE_ID_MATCH_RELBIT,
.evbit = { BIT_MASK(EV_KEY) | BIT_MASK(EV_REL) },
.keybit = { [BIT_WORD(BTN_LEFT)] = BIT_MASK(BTN_LEFT) },
.relbit = { BIT_MASK(REL_X) | BIT_MASK(REL_Y) },
}, /* A mouse like device, at least one button,
two relative axes */
{
.flags = INPUT_DEVICE_ID_MATCH_EVBIT |
INPUT_DEVICE_ID_MATCH_RELBIT,
.evbit = { BIT_MASK(EV_KEY) | BIT_MASK(EV_REL) },
.relbit = { BIT_MASK(REL_WHEEL) },
}, /* A separate scrollwheel */
{
.flags = INPUT_DEVICE_ID_MATCH_EVBIT |
INPUT_DEVICE_ID_MATCH_KEYBIT |
INPUT_DEVICE_ID_MATCH_ABSBIT,
.evbit = { BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS) },
.keybit = { [BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH) },
.absbit = { BIT_MASK(ABS_X) | BIT_MASK(ABS_Y) },
}, /* A tablet like device, at least touch detection,
two absolute axes */
{
.flags = INPUT_DEVICE_ID_MATCH_EVBIT |
INPUT_DEVICE_ID_MATCH_KEYBIT |
INPUT_DEVICE_ID_MATCH_ABSBIT,
.evbit = { BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS) },
.keybit = { [BIT_WORD(BTN_TOOL_FINGER)] =
BIT_MASK(BTN_TOOL_FINGER) },
.absbit = { BIT_MASK(ABS_X) | BIT_MASK(ABS_Y) |
BIT_MASK(ABS_PRESSURE) |
BIT_MASK(ABS_TOOL_WIDTH) },
}, /* A touchpad */
{
.flags = INPUT_DEVICE_ID_MATCH_EVBIT |
INPUT_DEVICE_ID_MATCH_KEYBIT |
INPUT_DEVICE_ID_MATCH_ABSBIT,
.evbit = { BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS) },
.keybit = { [BIT_WORD(BTN_LEFT)] = BIT_MASK(BTN_LEFT) },
.absbit = { BIT_MASK(ABS_X) | BIT_MASK(ABS_Y) },
}, /* Mouse-like device with absolute X and Y but ordinary
clicks, like hp ILO2 High Performance mouse */
{ }, /* Terminating entry */
};
MODULE_DEVICE_TABLE(input, mousedev_ids);
static struct input_handler mousedev_handler = {
.event = mousedev_event,
.connect = mousedev_connect,
.disconnect = mousedev_disconnect,
.fops = &mousedev_fops,
.minor = MOUSEDEV_MINOR_BASE,
.name = "mousedev",
.id_table = mousedev_ids,
};
#ifdef CONFIG_INPUT_MOUSEDEV_PSAUX
static struct miscdevice psaux_mouse = {
PSMOUSE_MINOR, "psaux", &mousedev_fops
};
static int psaux_registered;
#endif
static int __init mousedev_init(void)
{
int error;
mousedev_mix = mousedev_create(NULL, &mousedev_handler, MOUSEDEV_MIX);
if (IS_ERR(mousedev_mix))
return PTR_ERR(mousedev_mix);
error = input_register_handler(&mousedev_handler);
if (error) {
mousedev_destroy(mousedev_mix);
return error;
}
#ifdef CONFIG_INPUT_MOUSEDEV_PSAUX
error = misc_register(&psaux_mouse);
if (error)
pr_warning("could not register psaux device, error: %d\n",
error);
else
psaux_registered = 1;
#endif
pr_info("PS/2 mouse device common for all mice\n");
return 0;
}
static void __exit mousedev_exit(void)
{
#ifdef CONFIG_INPUT_MOUSEDEV_PSAUX
if (psaux_registered)
misc_deregister(&psaux_mouse);
#endif
input_unregister_handler(&mousedev_handler);
mousedev_destroy(mousedev_mix);
}
module_init(mousedev_init);
module_exit(mousedev_exit);
| gpl-2.0 |
InsomniaAOSP/android_kernel_samsung_d2 | arch/mips/pci/ops-titan-ht.c | 7858 | 3429 | /*
* Copyright 2003 PMC-Sierra
* Author: Manish Lachwani (lachwani@pmc-sierra.com)
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
* NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
* USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
* ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/types.h>
#include <linux/pci.h>
#include <linux/kernel.h>
#include <linux/delay.h>
#include <asm/io.h>
#include <asm/titan_dep.h>
static int titan_ht_config_read_dword(struct pci_bus *bus, unsigned int devfn,
int offset, u32 *val)
{
volatile uint32_t address;
int busno;
busno = bus->number;
address = (busno << 16) | (devfn << 8) | (offset & 0xfc) | 0x80000000;
if (busno != 0)
address |= 1;
/*
* RM9000 HT Errata: Issue back to back HT config
* transcations. Issue a BIU sync before and
* after the HT cycle
*/
*(volatile int32_t *) 0xfb0000f0 |= 0x2;
udelay(30);
*(volatile int32_t *) 0xfb0006f8 = address;
*(val) = *(volatile int32_t *) 0xfb0006fc;
udelay(30);
* (volatile int32_t *) 0xfb0000f0 |= 0x2;
return PCIBIOS_SUCCESSFUL;
}
static int titan_ht_config_read(struct pci_bus *bus, unsigned int devfn,
int offset, int size, u32 *val)
{
uint32_t dword;
titan_ht_config_read_dword(bus, devfn, offset, &dword);
dword >>= ((offset & 3) << 3);
dword &= (0xffffffffU >> ((4 - size) << 8));
return PCIBIOS_SUCCESSFUL;
}
static inline int titan_ht_config_write_dword(struct pci_bus *bus,
unsigned int devfn, int offset, u32 val)
{
volatile uint32_t address;
int busno;
busno = bus->number;
address = (busno << 16) | (devfn << 8) | (offset & 0xfc) | 0x80000000;
if (busno != 0)
address |= 1;
*(volatile int32_t *) 0xfb0000f0 |= 0x2;
udelay(30);
*(volatile int32_t *) 0xfb0006f8 = address;
*(volatile int32_t *) 0xfb0006fc = val;
udelay(30);
*(volatile int32_t *) 0xfb0000f0 |= 0x2;
return PCIBIOS_SUCCESSFUL;
}
static int titan_ht_config_write(struct pci_bus *bus, unsigned int devfn,
int offset, int size, u32 val)
{
uint32_t val1, val2, mask;
titan_ht_config_read_dword(bus, devfn, offset, &val2);
val1 = val << ((offset & 3) << 3);
mask = ~(0xffffffffU >> ((4 - size) << 8));
val2 &= ~(mask << ((offset & 3) << 8));
titan_ht_config_write_dword(bus, devfn, offset, val1 | val2);
return PCIBIOS_SUCCESSFUL;
}
struct pci_ops titan_ht_pci_ops = {
.read = titan_ht_config_read,
.write = titan_ht_config_write,
};
| gpl-2.0 |
houzhenggang/linux-rlx-upstream | drivers/staging/winbond/wb35reg.c | 8114 | 20746 | #include "wb35reg_f.h"
#include <linux/usb.h>
#include <linux/slab.h>
extern void phy_calibration_winbond(struct hw_data *phw_data, u32 frequency);
/*
* true : read command process successfully
* false : register not support
* RegisterNo : start base
* pRegisterData : data point
* NumberOfData : number of register data
* Flag : AUTO_INCREMENT - RegisterNo will auto increment 4
* NO_INCREMENT - Function will write data into the same register
*/
unsigned char Wb35Reg_BurstWrite(struct hw_data *pHwData, u16 RegisterNo, u32 *pRegisterData, u8 NumberOfData, u8 Flag)
{
struct wb35_reg *reg = &pHwData->reg;
struct urb *urb = NULL;
struct wb35_reg_queue *reg_queue = NULL;
u16 UrbSize;
struct usb_ctrlrequest *dr;
u16 i, DataSize = NumberOfData * 4;
/* Module shutdown */
if (pHwData->SurpriseRemove)
return false;
/* Trying to use burst write function if use new hardware */
UrbSize = sizeof(struct wb35_reg_queue) + DataSize + sizeof(struct usb_ctrlrequest);
reg_queue = kzalloc(UrbSize, GFP_ATOMIC);
urb = usb_alloc_urb(0, GFP_ATOMIC);
if (urb && reg_queue) {
reg_queue->DIRECT = 2; /* burst write register */
reg_queue->INDEX = RegisterNo;
reg_queue->pBuffer = (u32 *)((u8 *)reg_queue + sizeof(struct wb35_reg_queue));
memcpy(reg_queue->pBuffer, pRegisterData, DataSize);
/* the function for reversing register data from little endian to big endian */
for (i = 0; i < NumberOfData ; i++)
reg_queue->pBuffer[i] = cpu_to_le32(reg_queue->pBuffer[i]);
dr = (struct usb_ctrlrequest *)((u8 *)reg_queue + sizeof(struct wb35_reg_queue) + DataSize);
dr->bRequestType = USB_TYPE_VENDOR | USB_DIR_OUT | USB_RECIP_DEVICE;
dr->bRequest = 0x04; /* USB or vendor-defined request code, burst mode */
dr->wValue = cpu_to_le16(Flag); /* 0: Register number auto-increment, 1: No auto increment */
dr->wIndex = cpu_to_le16(RegisterNo);
dr->wLength = cpu_to_le16(DataSize);
reg_queue->Next = NULL;
reg_queue->pUsbReq = dr;
reg_queue->urb = urb;
spin_lock_irq(®->EP0VM_spin_lock);
if (reg->reg_first == NULL)
reg->reg_first = reg_queue;
else
reg->reg_last->Next = reg_queue;
reg->reg_last = reg_queue;
spin_unlock_irq(®->EP0VM_spin_lock);
/* Start EP0VM */
Wb35Reg_EP0VM_start(pHwData);
return true;
} else {
if (urb)
usb_free_urb(urb);
kfree(reg_queue);
return false;
}
return false;
}
void Wb35Reg_Update(struct hw_data *pHwData, u16 RegisterNo, u32 RegisterValue)
{
struct wb35_reg *reg = &pHwData->reg;
switch (RegisterNo) {
case 0x3b0: reg->U1B0 = RegisterValue; break;
case 0x3bc: reg->U1BC_LEDConfigure = RegisterValue; break;
case 0x400: reg->D00_DmaControl = RegisterValue; break;
case 0x800: reg->M00_MacControl = RegisterValue; break;
case 0x804: reg->M04_MulticastAddress1 = RegisterValue; break;
case 0x808: reg->M08_MulticastAddress2 = RegisterValue; break;
case 0x824: reg->M24_MacControl = RegisterValue; break;
case 0x828: reg->M28_MacControl = RegisterValue; break;
case 0x82c: reg->M2C_MacControl = RegisterValue; break;
case 0x838: reg->M38_MacControl = RegisterValue; break;
case 0x840: reg->M40_MacControl = RegisterValue; break;
case 0x844: reg->M44_MacControl = RegisterValue; break;
case 0x848: reg->M48_MacControl = RegisterValue; break;
case 0x84c: reg->M4C_MacStatus = RegisterValue; break;
case 0x860: reg->M60_MacControl = RegisterValue; break;
case 0x868: reg->M68_MacControl = RegisterValue; break;
case 0x870: reg->M70_MacControl = RegisterValue; break;
case 0x874: reg->M74_MacControl = RegisterValue; break;
case 0x878: reg->M78_ERPInformation = RegisterValue; break;
case 0x87C: reg->M7C_MacControl = RegisterValue; break;
case 0x880: reg->M80_MacControl = RegisterValue; break;
case 0x884: reg->M84_MacControl = RegisterValue; break;
case 0x888: reg->M88_MacControl = RegisterValue; break;
case 0x898: reg->M98_MacControl = RegisterValue; break;
case 0x100c: reg->BB0C = RegisterValue; break;
case 0x102c: reg->BB2C = RegisterValue; break;
case 0x1030: reg->BB30 = RegisterValue; break;
case 0x103c: reg->BB3C = RegisterValue; break;
case 0x1048: reg->BB48 = RegisterValue; break;
case 0x104c: reg->BB4C = RegisterValue; break;
case 0x1050: reg->BB50 = RegisterValue; break;
case 0x1054: reg->BB54 = RegisterValue; break;
case 0x1058: reg->BB58 = RegisterValue; break;
case 0x105c: reg->BB5C = RegisterValue; break;
case 0x1060: reg->BB60 = RegisterValue; break;
}
}
/*
* true : read command process successfully
* false : register not support
*/
unsigned char Wb35Reg_WriteSync(struct hw_data *pHwData, u16 RegisterNo, u32 RegisterValue)
{
struct wb35_reg *reg = &pHwData->reg;
int ret = -1;
/* Module shutdown */
if (pHwData->SurpriseRemove)
return false;
RegisterValue = cpu_to_le32(RegisterValue);
/* update the register by send usb message */
reg->SyncIoPause = 1;
/* Wait until EP0VM stop */
while (reg->EP0vm_state != VM_STOP)
msleep(10);
/* Sync IoCallDriver */
reg->EP0vm_state = VM_RUNNING;
ret = usb_control_msg(pHwData->udev,
usb_sndctrlpipe(pHwData->udev, 0),
0x03, USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT,
0x0, RegisterNo, &RegisterValue, 4, HZ * 100);
reg->EP0vm_state = VM_STOP;
reg->SyncIoPause = 0;
Wb35Reg_EP0VM_start(pHwData);
if (ret < 0) {
pr_debug("EP0 Write register usb message sending error\n");
pHwData->SurpriseRemove = 1;
return false;
}
return true;
}
/*
* true : read command process successfully
* false : register not support
*/
unsigned char Wb35Reg_Write(struct hw_data *pHwData, u16 RegisterNo, u32 RegisterValue)
{
struct wb35_reg *reg = &pHwData->reg;
struct usb_ctrlrequest *dr;
struct urb *urb = NULL;
struct wb35_reg_queue *reg_queue = NULL;
u16 UrbSize;
/* Module shutdown */
if (pHwData->SurpriseRemove)
return false;
/* update the register by send urb request */
UrbSize = sizeof(struct wb35_reg_queue) + sizeof(struct usb_ctrlrequest);
reg_queue = kzalloc(UrbSize, GFP_ATOMIC);
urb = usb_alloc_urb(0, GFP_ATOMIC);
if (urb && reg_queue) {
reg_queue->DIRECT = 1; /* burst write register */
reg_queue->INDEX = RegisterNo;
reg_queue->VALUE = cpu_to_le32(RegisterValue);
reg_queue->RESERVED_VALID = false;
dr = (struct usb_ctrlrequest *)((u8 *)reg_queue + sizeof(struct wb35_reg_queue));
dr->bRequestType = USB_TYPE_VENDOR | USB_DIR_OUT | USB_RECIP_DEVICE;
dr->bRequest = 0x03; /* USB or vendor-defined request code, burst mode */
dr->wValue = cpu_to_le16(0x0);
dr->wIndex = cpu_to_le16(RegisterNo);
dr->wLength = cpu_to_le16(4);
/* Enter the sending queue */
reg_queue->Next = NULL;
reg_queue->pUsbReq = dr;
reg_queue->urb = urb;
spin_lock_irq(®->EP0VM_spin_lock);
if (reg->reg_first == NULL)
reg->reg_first = reg_queue;
else
reg->reg_last->Next = reg_queue;
reg->reg_last = reg_queue;
spin_unlock_irq(®->EP0VM_spin_lock);
/* Start EP0VM */
Wb35Reg_EP0VM_start(pHwData);
return true;
} else {
if (urb)
usb_free_urb(urb);
kfree(reg_queue);
return false;
}
}
/*
* This command will be executed with a user defined value. When it completes,
* this value is useful. For example, hal_set_current_channel will use it.
* true : read command process successfully
* false : register not support
*/
unsigned char Wb35Reg_WriteWithCallbackValue(struct hw_data *pHwData,
u16 RegisterNo,
u32 RegisterValue,
s8 *pValue,
s8 Len)
{
struct wb35_reg *reg = &pHwData->reg;
struct usb_ctrlrequest *dr;
struct urb *urb = NULL;
struct wb35_reg_queue *reg_queue = NULL;
u16 UrbSize;
/* Module shutdown */
if (pHwData->SurpriseRemove)
return false;
/* update the register by send urb request */
UrbSize = sizeof(struct wb35_reg_queue) + sizeof(struct usb_ctrlrequest);
reg_queue = kzalloc(UrbSize, GFP_ATOMIC);
urb = usb_alloc_urb(0, GFP_ATOMIC);
if (urb && reg_queue) {
reg_queue->DIRECT = 1; /* burst write register */
reg_queue->INDEX = RegisterNo;
reg_queue->VALUE = cpu_to_le32(RegisterValue);
/* NOTE : Users must guarantee the size of value will not exceed the buffer size. */
memcpy(reg_queue->RESERVED, pValue, Len);
reg_queue->RESERVED_VALID = true;
dr = (struct usb_ctrlrequest *)((u8 *)reg_queue + sizeof(struct wb35_reg_queue));
dr->bRequestType = USB_TYPE_VENDOR | USB_DIR_OUT | USB_RECIP_DEVICE;
dr->bRequest = 0x03; /* USB or vendor-defined request code, burst mode */
dr->wValue = cpu_to_le16(0x0);
dr->wIndex = cpu_to_le16(RegisterNo);
dr->wLength = cpu_to_le16(4);
/* Enter the sending queue */
reg_queue->Next = NULL;
reg_queue->pUsbReq = dr;
reg_queue->urb = urb;
spin_lock_irq(®->EP0VM_spin_lock);
if (reg->reg_first == NULL)
reg->reg_first = reg_queue;
else
reg->reg_last->Next = reg_queue;
reg->reg_last = reg_queue;
spin_unlock_irq(®->EP0VM_spin_lock);
/* Start EP0VM */
Wb35Reg_EP0VM_start(pHwData);
return true;
} else {
if (urb)
usb_free_urb(urb);
kfree(reg_queue);
return false;
}
}
/*
* true : read command process successfully
* false : register not support
* pRegisterValue : It must be a resident buffer due to
* asynchronous read register.
*/
unsigned char Wb35Reg_ReadSync(struct hw_data *pHwData, u16 RegisterNo, u32 *pRegisterValue)
{
struct wb35_reg *reg = &pHwData->reg;
u32 *pltmp = pRegisterValue;
int ret = -1;
/* Module shutdown */
if (pHwData->SurpriseRemove)
return false;
/* Read the register by send usb message */
reg->SyncIoPause = 1;
/* Wait until EP0VM stop */
while (reg->EP0vm_state != VM_STOP)
msleep(10);
reg->EP0vm_state = VM_RUNNING;
ret = usb_control_msg(pHwData->udev,
usb_rcvctrlpipe(pHwData->udev, 0),
0x01, USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN,
0x0, RegisterNo, pltmp, 4, HZ * 100);
*pRegisterValue = cpu_to_le32(*pltmp);
reg->EP0vm_state = VM_STOP;
Wb35Reg_Update(pHwData, RegisterNo, *pRegisterValue);
reg->SyncIoPause = 0;
Wb35Reg_EP0VM_start(pHwData);
if (ret < 0) {
pr_debug("EP0 Read register usb message sending error\n");
pHwData->SurpriseRemove = 1;
return false;
}
return true;
}
/*
* true : read command process successfully
* false : register not support
* pRegisterValue : It must be a resident buffer due to
* asynchronous read register.
*/
unsigned char Wb35Reg_Read(struct hw_data *pHwData, u16 RegisterNo, u32 *pRegisterValue)
{
struct wb35_reg *reg = &pHwData->reg;
struct usb_ctrlrequest *dr;
struct urb *urb;
struct wb35_reg_queue *reg_queue;
u16 UrbSize;
/* Module shutdown */
if (pHwData->SurpriseRemove)
return false;
/* update the variable by send Urb to read register */
UrbSize = sizeof(struct wb35_reg_queue) + sizeof(struct usb_ctrlrequest);
reg_queue = kzalloc(UrbSize, GFP_ATOMIC);
urb = usb_alloc_urb(0, GFP_ATOMIC);
if (urb && reg_queue) {
reg_queue->DIRECT = 0; /* read register */
reg_queue->INDEX = RegisterNo;
reg_queue->pBuffer = pRegisterValue;
dr = (struct usb_ctrlrequest *)((u8 *)reg_queue + sizeof(struct wb35_reg_queue));
dr->bRequestType = USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN;
dr->bRequest = 0x01; /* USB or vendor-defined request code, burst mode */
dr->wValue = cpu_to_le16(0x0);
dr->wIndex = cpu_to_le16(RegisterNo);
dr->wLength = cpu_to_le16(4);
/* Enter the sending queue */
reg_queue->Next = NULL;
reg_queue->pUsbReq = dr;
reg_queue->urb = urb;
spin_lock_irq(®->EP0VM_spin_lock);
if (reg->reg_first == NULL)
reg->reg_first = reg_queue;
else
reg->reg_last->Next = reg_queue;
reg->reg_last = reg_queue;
spin_unlock_irq(®->EP0VM_spin_lock);
/* Start EP0VM */
Wb35Reg_EP0VM_start(pHwData);
return true;
} else {
if (urb)
usb_free_urb(urb);
kfree(reg_queue);
return false;
}
}
void Wb35Reg_EP0VM_start(struct hw_data *pHwData)
{
struct wb35_reg *reg = &pHwData->reg;
if (atomic_inc_return(®->RegFireCount) == 1) {
reg->EP0vm_state = VM_RUNNING;
Wb35Reg_EP0VM(pHwData);
} else
atomic_dec(®->RegFireCount);
}
void Wb35Reg_EP0VM(struct hw_data *pHwData)
{
struct wb35_reg *reg = &pHwData->reg;
struct urb *urb;
struct usb_ctrlrequest *dr;
u32 *pBuffer;
int ret = -1;
struct wb35_reg_queue *reg_queue;
if (reg->SyncIoPause)
goto cleanup;
if (pHwData->SurpriseRemove)
goto cleanup;
/* Get the register data and send to USB through Irp */
spin_lock_irq(®->EP0VM_spin_lock);
reg_queue = reg->reg_first;
spin_unlock_irq(®->EP0VM_spin_lock);
if (!reg_queue)
goto cleanup;
/* Get an Urb, send it */
urb = (struct urb *)reg_queue->urb;
dr = reg_queue->pUsbReq;
urb = reg_queue->urb;
pBuffer = reg_queue->pBuffer;
if (reg_queue->DIRECT == 1) /* output */
pBuffer = ®_queue->VALUE;
usb_fill_control_urb(urb, pHwData->udev,
REG_DIRECTION(pHwData->udev, reg_queue),
(u8 *)dr, pBuffer, cpu_to_le16(dr->wLength),
Wb35Reg_EP0VM_complete, (void *)pHwData);
reg->EP0vm_state = VM_RUNNING;
ret = usb_submit_urb(urb, GFP_ATOMIC);
if (ret < 0) {
pr_debug("EP0 Irp sending error\n");
goto cleanup;
}
return;
cleanup:
reg->EP0vm_state = VM_STOP;
atomic_dec(®->RegFireCount);
}
void Wb35Reg_EP0VM_complete(struct urb *urb)
{
struct hw_data *pHwData = (struct hw_data *)urb->context;
struct wb35_reg *reg = &pHwData->reg;
struct wb35_reg_queue *reg_queue;
/* Variable setting */
reg->EP0vm_state = VM_COMPLETED;
reg->EP0VM_status = urb->status;
if (pHwData->SurpriseRemove) { /* Let WbWlanHalt to handle surprise remove */
reg->EP0vm_state = VM_STOP;
atomic_dec(®->RegFireCount);
} else {
/* Complete to send, remove the URB from the first */
spin_lock_irq(®->EP0VM_spin_lock);
reg_queue = reg->reg_first;
if (reg_queue == reg->reg_last)
reg->reg_last = NULL;
reg->reg_first = reg->reg_first->Next;
spin_unlock_irq(®->EP0VM_spin_lock);
if (reg->EP0VM_status) {
pr_debug("EP0 IoCompleteRoutine return error\n");
reg->EP0vm_state = VM_STOP;
pHwData->SurpriseRemove = 1;
} else {
/* Success. Update the result */
/* Start the next send */
Wb35Reg_EP0VM(pHwData);
}
kfree(reg_queue);
}
usb_free_urb(urb);
}
void Wb35Reg_destroy(struct hw_data *pHwData)
{
struct wb35_reg *reg = &pHwData->reg;
struct urb *urb;
struct wb35_reg_queue *reg_queue;
Uxx_power_off_procedure(pHwData);
/* Wait for Reg operation completed */
do {
msleep(10); /* Delay for waiting function enter */
} while (reg->EP0vm_state != VM_STOP);
msleep(10); /* Delay for waiting function enter */
/* Release all the data in RegQueue */
spin_lock_irq(®->EP0VM_spin_lock);
reg_queue = reg->reg_first;
while (reg_queue) {
if (reg_queue == reg->reg_last)
reg->reg_last = NULL;
reg->reg_first = reg->reg_first->Next;
urb = reg_queue->urb;
spin_unlock_irq(®->EP0VM_spin_lock);
if (urb) {
usb_free_urb(urb);
kfree(reg_queue);
} else {
pr_debug("EP0 queue release error\n");
}
spin_lock_irq(®->EP0VM_spin_lock);
reg_queue = reg->reg_first;
}
spin_unlock_irq(®->EP0VM_spin_lock);
}
/*
* =======================================================================
* The function can be run in passive-level only.
* =========================================================================
*/
unsigned char Wb35Reg_initial(struct hw_data *pHwData)
{
struct wb35_reg *reg = &pHwData->reg;
u32 ltmp;
u32 SoftwareSet, VCO_trim, TxVga, Region_ScanInterval;
/* Spin lock is acquired for read and write IRP command */
spin_lock_init(®->EP0VM_spin_lock);
/* Getting RF module type from EEPROM */
Wb35Reg_WriteSync(pHwData, 0x03b4, 0x080d0000); /* Start EEPROM access + Read + address(0x0d) */
Wb35Reg_ReadSync(pHwData, 0x03b4, <mp);
/* Update RF module type and determine the PHY type by inf or EEPROM */
reg->EEPROMPhyType = (u8)(ltmp & 0xff);
/*
* 0 V MAX2825, 1 V MAX2827, 2 V MAX2828, 3 V MAX2829
* 16V AL2230, 17 - AL7230, 18 - AL2230S
* 32 Reserved
* 33 - W89RF242(TxVGA 0~19), 34 - W89RF242(TxVGA 0~34)
*/
if (reg->EEPROMPhyType != RF_DECIDE_BY_INF) {
if ((reg->EEPROMPhyType == RF_MAXIM_2825) ||
(reg->EEPROMPhyType == RF_MAXIM_2827) ||
(reg->EEPROMPhyType == RF_MAXIM_2828) ||
(reg->EEPROMPhyType == RF_MAXIM_2829) ||
(reg->EEPROMPhyType == RF_MAXIM_V1) ||
(reg->EEPROMPhyType == RF_AIROHA_2230) ||
(reg->EEPROMPhyType == RF_AIROHA_2230S) ||
(reg->EEPROMPhyType == RF_AIROHA_7230) ||
(reg->EEPROMPhyType == RF_WB_242) ||
(reg->EEPROMPhyType == RF_WB_242_1))
pHwData->phy_type = reg->EEPROMPhyType;
}
/* Power On procedure running. The relative parameter will be set according to phy_type */
Uxx_power_on_procedure(pHwData);
/* Reading MAC address */
Uxx_ReadEthernetAddress(pHwData);
/* Read VCO trim for RF parameter */
Wb35Reg_WriteSync(pHwData, 0x03b4, 0x08200000);
Wb35Reg_ReadSync(pHwData, 0x03b4, &VCO_trim);
/* Read Antenna On/Off of software flag */
Wb35Reg_WriteSync(pHwData, 0x03b4, 0x08210000);
Wb35Reg_ReadSync(pHwData, 0x03b4, &SoftwareSet);
/* Read TXVGA */
Wb35Reg_WriteSync(pHwData, 0x03b4, 0x08100000);
Wb35Reg_ReadSync(pHwData, 0x03b4, &TxVga);
/* Get Scan interval setting from EEPROM offset 0x1c */
Wb35Reg_WriteSync(pHwData, 0x03b4, 0x081d0000);
Wb35Reg_ReadSync(pHwData, 0x03b4, &Region_ScanInterval);
/* Update Ethernet address */
memcpy(pHwData->CurrentMacAddress, pHwData->PermanentMacAddress, ETH_ALEN);
/* Update software variable */
pHwData->SoftwareSet = (u16)(SoftwareSet & 0xffff);
TxVga &= 0x000000ff;
pHwData->PowerIndexFromEEPROM = (u8)TxVga;
pHwData->VCO_trim = (u8)VCO_trim & 0xff;
if (pHwData->VCO_trim == 0xff)
pHwData->VCO_trim = 0x28;
reg->EEPROMRegion = (u8)(Region_ScanInterval >> 8);
if (reg->EEPROMRegion < 1 || reg->EEPROMRegion > 6)
reg->EEPROMRegion = REGION_AUTO;
/* For Get Tx VGA from EEPROM */
GetTxVgaFromEEPROM(pHwData);
/* Set Scan Interval */
pHwData->Scan_Interval = (u8)(Region_ScanInterval & 0xff) * 10;
if ((pHwData->Scan_Interval == 2550) || (pHwData->Scan_Interval < 10)) /* Is default setting 0xff * 10 */
pHwData->Scan_Interval = SCAN_MAX_CHNL_TIME;
/* Initial register */
RFSynthesizer_initial(pHwData);
BBProcessor_initial(pHwData); /* Async write, must wait until complete */
Wb35Reg_phy_calibration(pHwData);
Mxx_initial(pHwData);
Dxx_initial(pHwData);
if (pHwData->SurpriseRemove)
return false;
else
return true; /* Initial fail */
}
/*
* ================================================================
* CardComputeCrc --
*
* Description:
* Runs the AUTODIN II CRC algorithm on buffer Buffer of length, Length.
*
* Arguments:
* Buffer - the input buffer
* Length - the length of Buffer
*
* Return Value:
* The 32-bit CRC value.
* ===================================================================
*/
u32 CardComputeCrc(u8 *Buffer, u32 Length)
{
u32 Crc, Carry;
u32 i, j;
u8 CurByte;
Crc = 0xffffffff;
for (i = 0; i < Length; i++) {
CurByte = Buffer[i];
for (j = 0; j < 8; j++) {
Carry = ((Crc & 0x80000000) ? 1 : 0) ^ (CurByte & 0x01);
Crc <<= 1;
CurByte >>= 1;
if (Carry)
Crc = (Crc ^ 0x04c11db6) | Carry;
}
}
return Crc;
}
/*
* ==================================================================
* BitReverse --
* Reverse the bits in the input argument, dwData, which is
* regarded as a string of bits with the length, DataLength.
*
* Arguments:
* dwData :
* DataLength :
*
* Return:
* The converted value.
* ==================================================================
*/
u32 BitReverse(u32 dwData, u32 DataLength)
{
u32 HalfLength, i, j;
u32 BitA, BitB;
if (DataLength <= 0)
return 0; /* No conversion is done. */
dwData = dwData & (0xffffffff >> (32 - DataLength));
HalfLength = DataLength / 2;
for (i = 0, j = DataLength - 1; i < HalfLength; i++, j--) {
BitA = GetBit(dwData, i);
BitB = GetBit(dwData, j);
if (BitA && !BitB) {
dwData = ClearBit(dwData, i);
dwData = SetBit(dwData, j);
} else if (!BitA && BitB) {
dwData = SetBit(dwData, i);
dwData = ClearBit(dwData, j);
} else {
/* Do nothing since these two bits are of the save values. */
}
}
return dwData;
}
void Wb35Reg_phy_calibration(struct hw_data *pHwData)
{
u32 BB3c, BB54;
if ((pHwData->phy_type == RF_WB_242) ||
(pHwData->phy_type == RF_WB_242_1)) {
phy_calibration_winbond(pHwData, 2412); /* Sync operation */
Wb35Reg_ReadSync(pHwData, 0x103c, &BB3c);
Wb35Reg_ReadSync(pHwData, 0x1054, &BB54);
pHwData->BB3c_cal = BB3c;
pHwData->BB54_cal = BB54;
RFSynthesizer_initial(pHwData);
BBProcessor_initial(pHwData); /* Async operation */
Wb35Reg_WriteSync(pHwData, 0x103c, BB3c);
Wb35Reg_WriteSync(pHwData, 0x1054, BB54);
}
}
| gpl-2.0 |
Droid-Concepts/android_kernel_lge_f320k | drivers/staging/vt6655/wroute.c | 8370 | 6204 | /*
* Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
* All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*
* File: wroute.c
*
* Purpose: handle WMAC frame relay & filterring
*
* Author: Lyndon Chen
*
* Date: May 20, 2003
*
* Functions:
* ROUTEbRelay - Relay packet
*
* Revision History:
*
*/
#include "mac.h"
#include "tcrc.h"
#include "rxtx.h"
#include "wroute.h"
#include "card.h"
#include "baseband.h"
/*--------------------- Static Definitions -------------------------*/
/*--------------------- Static Classes ----------------------------*/
/*--------------------- Static Variables --------------------------*/
static int msglevel =MSG_LEVEL_INFO;
//static int msglevel =MSG_LEVEL_DEBUG;
/*--------------------- Static Functions --------------------------*/
/*--------------------- Export Variables --------------------------*/
/*
* Description:
* Relay packet. Return true if packet is copy to DMA1
*
* Parameters:
* In:
* pDevice -
* pbySkbData - rx packet skb data
* Out:
* true, false
*
* Return Value: true if packet duplicate; otherwise false
*
*/
bool ROUTEbRelay (PSDevice pDevice, unsigned char *pbySkbData, unsigned int uDataLen, unsigned int uNodeIndex)
{
PSMgmtObject pMgmt = pDevice->pMgmt;
PSTxDesc pHeadTD, pLastTD;
unsigned int cbFrameBodySize;
unsigned int uMACfragNum;
unsigned char byPktType;
bool bNeedEncryption = false;
SKeyItem STempKey;
PSKeyItem pTransmitKey = NULL;
unsigned int cbHeaderSize;
unsigned int ii;
unsigned char *pbyBSSID;
if (AVAIL_TD(pDevice, TYPE_AC0DMA)<=0) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Relay can't allocate TD1..\n");
return false;
}
pHeadTD = pDevice->apCurrTD[TYPE_AC0DMA];
pHeadTD->m_td1TD1.byTCR = (TCR_EDP|TCR_STP);
memcpy(pDevice->sTxEthHeader.abyDstAddr, (unsigned char *)pbySkbData, ETH_HLEN);
cbFrameBodySize = uDataLen - ETH_HLEN;
if (ntohs(pDevice->sTxEthHeader.wType) > ETH_DATA_LEN) {
cbFrameBodySize += 8;
}
if (pDevice->bEncryptionEnable == true) {
bNeedEncryption = true;
// get group key
pbyBSSID = pDevice->abyBroadcastAddr;
if(KeybGetTransmitKey(&(pDevice->sKey), pbyBSSID, GROUP_KEY, &pTransmitKey) == false) {
pTransmitKey = NULL;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_DEBUG"KEY is NULL. [%d]\n", pDevice->pMgmt->eCurrMode);
} else {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_DEBUG"Get GTK.\n");
}
}
if (pDevice->bEnableHostWEP) {
if (uNodeIndex < MAX_NODE_NUM + 1) {
pTransmitKey = &STempKey;
pTransmitKey->byCipherSuite = pMgmt->sNodeDBTable[uNodeIndex].byCipherSuite;
pTransmitKey->dwKeyIndex = pMgmt->sNodeDBTable[uNodeIndex].dwKeyIndex;
pTransmitKey->uKeyLength = pMgmt->sNodeDBTable[uNodeIndex].uWepKeyLength;
pTransmitKey->dwTSC47_16 = pMgmt->sNodeDBTable[uNodeIndex].dwTSC47_16;
pTransmitKey->wTSC15_0 = pMgmt->sNodeDBTable[uNodeIndex].wTSC15_0;
memcpy(pTransmitKey->abyKey,
&pMgmt->sNodeDBTable[uNodeIndex].abyWepKey[0],
pTransmitKey->uKeyLength
);
}
}
uMACfragNum = cbGetFragCount(pDevice, pTransmitKey, cbFrameBodySize, &pDevice->sTxEthHeader);
if (uMACfragNum > AVAIL_TD(pDevice,TYPE_AC0DMA)) {
return false;
}
byPktType = (unsigned char)pDevice->byPacketType;
if (pDevice->bFixRate) {
if (pDevice->eCurrentPHYType == PHY_TYPE_11B) {
if (pDevice->uConnectionRate >= RATE_11M) {
pDevice->wCurrentRate = RATE_11M;
} else {
pDevice->wCurrentRate = (unsigned short)pDevice->uConnectionRate;
}
} else {
if ((pDevice->eCurrentPHYType == PHY_TYPE_11A) &&
(pDevice->uConnectionRate <= RATE_6M)) {
pDevice->wCurrentRate = RATE_6M;
} else {
if (pDevice->uConnectionRate >= RATE_54M)
pDevice->wCurrentRate = RATE_54M;
else
pDevice->wCurrentRate = (unsigned short)pDevice->uConnectionRate;
}
}
}
else {
pDevice->wCurrentRate = pDevice->pMgmt->sNodeDBTable[uNodeIndex].wTxDataRate;
}
if (pDevice->wCurrentRate <= RATE_11M)
byPktType = PK_TYPE_11B;
vGenerateFIFOHeader(pDevice, byPktType, pDevice->pbyTmpBuff, bNeedEncryption,
cbFrameBodySize, TYPE_AC0DMA, pHeadTD,
&pDevice->sTxEthHeader, pbySkbData, pTransmitKey, uNodeIndex,
&uMACfragNum,
&cbHeaderSize
);
if (MACbIsRegBitsOn(pDevice->PortOffset, MAC_REG_PSCTL, PSCTL_PS)) {
// Disable PS
MACbPSWakeup(pDevice->PortOffset);
}
pDevice->bPWBitOn = false;
pLastTD = pHeadTD;
for (ii = 0; ii < uMACfragNum; ii++) {
// Poll Transmit the adapter
wmb();
pHeadTD->m_td0TD0.f1Owner=OWNED_BY_NIC;
wmb();
if (ii == (uMACfragNum - 1))
pLastTD = pHeadTD;
pHeadTD = pHeadTD->next;
}
pLastTD->pTDInfo->skb = 0;
pLastTD->pTDInfo->byFlags = 0;
pDevice->apCurrTD[TYPE_AC0DMA] = pHeadTD;
MACvTransmitAC0(pDevice->PortOffset);
return true;
}
| gpl-2.0 |
HTCKernels/One-SV-international-k2u | drivers/staging/vt6655/datarate.c | 8370 | 12409 | /*
* Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
* All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*
* File: datarate.c
*
* Purpose: Handles the auto fallback & data rates functions
*
* Author: Lyndon Chen
*
* Date: July 17, 2002
*
* Functions:
* RATEvParseMaxRate - Parsing the highest basic & support rate in rate field of frame
* RATEvTxRateFallBack - Rate fallback Algorithm Implementaion
* RATEuSetIE- Set rate IE field.
*
* Revision History:
*
*/
#include "ttype.h"
#include "tmacro.h"
#include "mac.h"
#include "80211mgr.h"
#include "bssdb.h"
#include "datarate.h"
#include "card.h"
#include "baseband.h"
#include "srom.h"
/*--------------------- Static Definitions -------------------------*/
/*--------------------- Static Classes ----------------------------*/
extern unsigned short TxRate_iwconfig; //2008-5-8 <add> by chester
/*--------------------- Static Variables --------------------------*/
//static int msglevel =MSG_LEVEL_DEBUG;
static int msglevel =MSG_LEVEL_INFO;
const unsigned char acbyIERate[MAX_RATE] =
{0x02, 0x04, 0x0B, 0x16, 0x0C, 0x12, 0x18, 0x24, 0x30, 0x48, 0x60, 0x6C};
#define AUTORATE_TXOK_CNT 0x0400
#define AUTORATE_TXFAIL_CNT 0x0064
#define AUTORATE_TIMEOUT 10
/*--------------------- Static Functions --------------------------*/
void s_vResetCounter (
PKnownNodeDB psNodeDBTable
);
void
s_vResetCounter (
PKnownNodeDB psNodeDBTable
)
{
unsigned char ii;
// clear statistic counter for auto_rate
for(ii=0;ii<=MAX_RATE;ii++) {
psNodeDBTable->uTxOk[ii] = 0;
psNodeDBTable->uTxFail[ii] = 0;
}
}
/*--------------------- Export Variables --------------------------*/
/*--------------------- Export Functions --------------------------*/
/*+
*
* Description:
* Get RateIdx from the value in SuppRates IE or ExtSuppRates IE
*
* Parameters:
* In:
* unsigned char - Rate value in SuppRates IE or ExtSuppRates IE
* Out:
* none
*
* Return Value: RateIdx
*
-*/
unsigned char
DATARATEbyGetRateIdx (
unsigned char byRate
)
{
unsigned char ii;
//Erase basicRate flag.
byRate = byRate & 0x7F;//0111 1111
for (ii = 0; ii < MAX_RATE; ii ++) {
if (acbyIERate[ii] == byRate)
return ii;
}
return 0;
}
/*+
*
* Routine Description:
* Rate fallback Algorithm Implementaion
*
* Parameters:
* In:
* pDevice - Pointer to the adapter
* psNodeDBTable - Pointer to Node Data Base
* Out:
* none
*
* Return Value: none
*
-*/
#define AUTORATE_TXCNT_THRESHOLD 20
#define AUTORATE_INC_THRESHOLD 30
/*+
*
* Description:
* Get RateIdx from the value in SuppRates IE or ExtSuppRates IE
*
* Parameters:
* In:
* unsigned char - Rate value in SuppRates IE or ExtSuppRates IE
* Out:
* none
*
* Return Value: RateIdx
*
-*/
unsigned short
wGetRateIdx(
unsigned char byRate
)
{
unsigned short ii;
//Erase basicRate flag.
byRate = byRate & 0x7F;//0111 1111
for (ii = 0; ii < MAX_RATE; ii ++) {
if (acbyIERate[ii] == byRate)
return ii;
}
return 0;
}
/*+
*
* Description:
* Parsing the highest basic & support rate in rate field of frame.
*
* Parameters:
* In:
* pDevice - Pointer to the adapter
* pItemRates - Pointer to Rate field defined in 802.11 spec.
* pItemExtRates - Pointer to Extended Rate field defined in 802.11 spec.
* Out:
* pwMaxBasicRate - Maximum Basic Rate
* pwMaxSuppRate - Maximum Supported Rate
* pbyTopCCKRate - Maximum Basic Rate in CCK mode
* pbyTopOFDMRate - Maximum Basic Rate in OFDM mode
*
* Return Value: none
*
-*/
void
RATEvParseMaxRate (
void *pDeviceHandler,
PWLAN_IE_SUPP_RATES pItemRates,
PWLAN_IE_SUPP_RATES pItemExtRates,
bool bUpdateBasicRate,
unsigned short *pwMaxBasicRate,
unsigned short *pwMaxSuppRate,
unsigned short *pwSuppRate,
unsigned char *pbyTopCCKRate,
unsigned char *pbyTopOFDMRate
)
{
PSDevice pDevice = (PSDevice) pDeviceHandler;
unsigned int ii;
unsigned char byHighSuppRate = 0;
unsigned char byRate = 0;
unsigned short wOldBasicRate = pDevice->wBasicRate;
unsigned int uRateLen;
if (pItemRates == NULL)
return;
*pwSuppRate = 0;
uRateLen = pItemRates->len;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"ParseMaxRate Len: %d\n", uRateLen);
if (pDevice->eCurrentPHYType != PHY_TYPE_11B) {
if (uRateLen > WLAN_RATES_MAXLEN)
uRateLen = WLAN_RATES_MAXLEN;
} else {
if (uRateLen > WLAN_RATES_MAXLEN_11B)
uRateLen = WLAN_RATES_MAXLEN_11B;
}
for (ii = 0; ii < uRateLen; ii++) {
byRate = (unsigned char)(pItemRates->abyRates[ii]);
if (WLAN_MGMT_IS_BASICRATE(byRate) &&
(bUpdateBasicRate == true)) {
// Add to basic rate set, update pDevice->byTopCCKBasicRate and pDevice->byTopOFDMBasicRate
CARDbAddBasicRate((void *)pDevice, wGetRateIdx(byRate));
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"ParseMaxRate AddBasicRate: %d\n", wGetRateIdx(byRate));
}
byRate = (unsigned char)(pItemRates->abyRates[ii]&0x7F);
if (byHighSuppRate == 0)
byHighSuppRate = byRate;
if (byRate > byHighSuppRate)
byHighSuppRate = byRate;
*pwSuppRate |= (1<<wGetRateIdx(byRate));
}
if ((pItemExtRates != NULL) && (pItemExtRates->byElementID == WLAN_EID_EXTSUPP_RATES) &&
(pDevice->eCurrentPHYType != PHY_TYPE_11B)) {
unsigned int uExtRateLen = pItemExtRates->len;
if (uExtRateLen > WLAN_RATES_MAXLEN)
uExtRateLen = WLAN_RATES_MAXLEN;
for (ii = 0; ii < uExtRateLen ; ii++) {
byRate = (unsigned char)(pItemExtRates->abyRates[ii]);
// select highest basic rate
if (WLAN_MGMT_IS_BASICRATE(pItemExtRates->abyRates[ii])) {
// Add to basic rate set, update pDevice->byTopCCKBasicRate and pDevice->byTopOFDMBasicRate
CARDbAddBasicRate((void *)pDevice, wGetRateIdx(byRate));
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"ParseMaxRate AddBasicRate: %d\n", wGetRateIdx(byRate));
}
byRate = (unsigned char)(pItemExtRates->abyRates[ii]&0x7F);
if (byHighSuppRate == 0)
byHighSuppRate = byRate;
if (byRate > byHighSuppRate)
byHighSuppRate = byRate;
*pwSuppRate |= (1<<wGetRateIdx(byRate));
//DBG_PRN_GRP09(("ParseMaxRate : HighSuppRate: %d, %X\n", wGetRateIdx(byRate), byRate));
}
} //if(pItemExtRates != NULL)
if ((pDevice->byPacketType == PK_TYPE_11GB) && CARDbIsOFDMinBasicRate((void *)pDevice)) {
pDevice->byPacketType = PK_TYPE_11GA;
}
*pbyTopCCKRate = pDevice->byTopCCKBasicRate;
*pbyTopOFDMRate = pDevice->byTopOFDMBasicRate;
*pwMaxSuppRate = wGetRateIdx(byHighSuppRate);
if ((pDevice->byPacketType==PK_TYPE_11B) || (pDevice->byPacketType==PK_TYPE_11GB))
*pwMaxBasicRate = pDevice->byTopCCKBasicRate;
else
*pwMaxBasicRate = pDevice->byTopOFDMBasicRate;
if (wOldBasicRate != pDevice->wBasicRate)
CARDvSetRSPINF((void *)pDevice, pDevice->eCurrentPHYType);
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Exit ParseMaxRate\n");
}
/*+
*
* Routine Description:
* Rate fallback Algorithm Implementaion
*
* Parameters:
* In:
* pDevice - Pointer to the adapter
* psNodeDBTable - Pointer to Node Data Base
* Out:
* none
*
* Return Value: none
*
-*/
#define AUTORATE_TXCNT_THRESHOLD 20
#define AUTORATE_INC_THRESHOLD 30
void
RATEvTxRateFallBack (
void *pDeviceHandler,
PKnownNodeDB psNodeDBTable
)
{
PSDevice pDevice = (PSDevice) pDeviceHandler;
unsigned short wIdxDownRate = 0;
unsigned int ii;
//unsigned long dwRateTable[MAX_RATE] = {1, 2, 5, 11, 6, 9, 12, 18, 24, 36, 48, 54};
bool bAutoRate[MAX_RATE] = {true,true,true,true,false,false,true,true,true,true,true,true};
unsigned long dwThroughputTbl[MAX_RATE] = {10, 20, 55, 110, 60, 90, 120, 180, 240, 360, 480, 540};
unsigned long dwThroughput = 0;
unsigned short wIdxUpRate = 0;
unsigned long dwTxDiff = 0;
if (pDevice->pMgmt->eScanState != WMAC_NO_SCANNING) {
// Don't do Fallback when scanning Channel
return;
}
psNodeDBTable->uTimeCount ++;
if (psNodeDBTable->uTxFail[MAX_RATE] > psNodeDBTable->uTxOk[MAX_RATE])
dwTxDiff = psNodeDBTable->uTxFail[MAX_RATE] - psNodeDBTable->uTxOk[MAX_RATE];
if ((psNodeDBTable->uTxOk[MAX_RATE] < AUTORATE_TXOK_CNT) &&
(dwTxDiff < AUTORATE_TXFAIL_CNT) &&
(psNodeDBTable->uTimeCount < AUTORATE_TIMEOUT)) {
return;
}
if (psNodeDBTable->uTimeCount >= AUTORATE_TIMEOUT) {
psNodeDBTable->uTimeCount = 0;
}
for(ii=0;ii<MAX_RATE;ii++) {
if (psNodeDBTable->wSuppRate & (0x0001<<ii)) {
if (bAutoRate[ii] == true) {
wIdxUpRate = (unsigned short) ii;
}
} else {
bAutoRate[ii] = false;
}
}
for(ii=0;ii<=psNodeDBTable->wTxDataRate;ii++) {
if ( (psNodeDBTable->uTxOk[ii] != 0) ||
(psNodeDBTable->uTxFail[ii] != 0) ) {
dwThroughputTbl[ii] *= psNodeDBTable->uTxOk[ii];
if (ii < RATE_11M) {
psNodeDBTable->uTxFail[ii] *= 4;
}
dwThroughputTbl[ii] /= (psNodeDBTable->uTxOk[ii] + psNodeDBTable->uTxFail[ii]);
}
// DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Rate %d,Ok: %d, Fail:%d, Throughput:%d\n",
// ii, psNodeDBTable->uTxOk[ii], psNodeDBTable->uTxFail[ii], dwThroughputTbl[ii]);
}
dwThroughput = dwThroughputTbl[psNodeDBTable->wTxDataRate];
wIdxDownRate = psNodeDBTable->wTxDataRate;
for(ii = psNodeDBTable->wTxDataRate; ii > 0;) {
ii--;
if ( (dwThroughputTbl[ii] > dwThroughput) &&
(bAutoRate[ii]==true) ) {
dwThroughput = dwThroughputTbl[ii];
wIdxDownRate = (unsigned short) ii;
}
}
psNodeDBTable->wTxDataRate = wIdxDownRate;
if (psNodeDBTable->uTxOk[MAX_RATE]) {
if (psNodeDBTable->uTxOk[MAX_RATE] >
(psNodeDBTable->uTxFail[MAX_RATE] * 4) ) {
psNodeDBTable->wTxDataRate = wIdxUpRate;
}
}else { // adhoc, if uTxOk =0 & uTxFail = 0
if (psNodeDBTable->uTxFail[MAX_RATE] == 0)
psNodeDBTable->wTxDataRate = wIdxUpRate;
}
//2008-5-8 <add> by chester
TxRate_iwconfig=psNodeDBTable->wTxDataRate;
s_vResetCounter(psNodeDBTable);
// DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Rate: %d, U:%d, D:%d\n", psNodeDBTable->wTxDataRate, wIdxUpRate, wIdxDownRate);
return;
}
/*+
*
* Description:
* This routine is used to assemble available Rate IE.
*
* Parameters:
* In:
* pDevice
* Out:
*
* Return Value: None
*
-*/
unsigned char
RATEuSetIE (
PWLAN_IE_SUPP_RATES pSrcRates,
PWLAN_IE_SUPP_RATES pDstRates,
unsigned int uRateLen
)
{
unsigned int ii, uu, uRateCnt = 0;
if ((pSrcRates == NULL) || (pDstRates == NULL))
return 0;
if (pSrcRates->len == 0)
return 0;
for (ii = 0; ii < uRateLen; ii++) {
for (uu = 0; uu < pSrcRates->len; uu++) {
if ((pSrcRates->abyRates[uu] & 0x7F) == acbyIERate[ii]) {
pDstRates->abyRates[uRateCnt ++] = pSrcRates->abyRates[uu];
break;
}
}
}
return (unsigned char)uRateCnt;
}
| gpl-2.0 |
scottellis/linux-pansenti | drivers/net/wireless/ath/ath6kl/htc.c | 179 | 63442 | /*
* Copyright (c) 2007-2011 Atheros Communications Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include "core.h"
#include "htc_hif.h"
#include "debug.h"
#include "hif-ops.h"
#include <asm/unaligned.h>
#define CALC_TXRX_PADDED_LEN(dev, len) (__ALIGN_MASK((len), (dev)->block_mask))
static void ath6kl_htc_tx_buf_align(u8 **buf, unsigned long len)
{
u8 *align_addr;
if (!IS_ALIGNED((unsigned long) *buf, 4)) {
align_addr = PTR_ALIGN(*buf - 4, 4);
memmove(align_addr, *buf, len);
*buf = align_addr;
}
}
static void ath6kl_htc_tx_prep_pkt(struct htc_packet *packet, u8 flags,
int ctrl0, int ctrl1)
{
struct htc_frame_hdr *hdr;
packet->buf -= HTC_HDR_LENGTH;
hdr = (struct htc_frame_hdr *)packet->buf;
/* Endianess? */
put_unaligned((u16)packet->act_len, &hdr->payld_len);
hdr->flags = flags;
hdr->eid = packet->endpoint;
hdr->ctrl[0] = ctrl0;
hdr->ctrl[1] = ctrl1;
}
static void htc_reclaim_txctrl_buf(struct htc_target *target,
struct htc_packet *pkt)
{
spin_lock_bh(&target->htc_lock);
list_add_tail(&pkt->list, &target->free_ctrl_txbuf);
spin_unlock_bh(&target->htc_lock);
}
static struct htc_packet *htc_get_control_buf(struct htc_target *target,
bool tx)
{
struct htc_packet *packet = NULL;
struct list_head *buf_list;
buf_list = tx ? &target->free_ctrl_txbuf : &target->free_ctrl_rxbuf;
spin_lock_bh(&target->htc_lock);
if (list_empty(buf_list)) {
spin_unlock_bh(&target->htc_lock);
return NULL;
}
packet = list_first_entry(buf_list, struct htc_packet, list);
list_del(&packet->list);
spin_unlock_bh(&target->htc_lock);
if (tx)
packet->buf = packet->buf_start + HTC_HDR_LENGTH;
return packet;
}
static void htc_tx_comp_update(struct htc_target *target,
struct htc_endpoint *endpoint,
struct htc_packet *packet)
{
packet->completion = NULL;
packet->buf += HTC_HDR_LENGTH;
if (!packet->status)
return;
ath6kl_err("req failed (status:%d, ep:%d, len:%d creds:%d)\n",
packet->status, packet->endpoint, packet->act_len,
packet->info.tx.cred_used);
/* on failure to submit, reclaim credits for this packet */
spin_lock_bh(&target->tx_lock);
endpoint->cred_dist.cred_to_dist +=
packet->info.tx.cred_used;
endpoint->cred_dist.txq_depth = get_queue_depth(&endpoint->txq);
ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "ctxt:0x%p dist:0x%p\n",
target->cred_dist_cntxt, &target->cred_dist_list);
ath6k_credit_distribute(target->cred_dist_cntxt,
&target->cred_dist_list,
HTC_CREDIT_DIST_SEND_COMPLETE);
spin_unlock_bh(&target->tx_lock);
}
static void htc_tx_complete(struct htc_endpoint *endpoint,
struct list_head *txq)
{
if (list_empty(txq))
return;
ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
"send complete ep %d, (%d pkts)\n",
endpoint->eid, get_queue_depth(txq));
ath6kl_tx_complete(endpoint->target->dev->ar, txq);
}
static void htc_tx_comp_handler(struct htc_target *target,
struct htc_packet *packet)
{
struct htc_endpoint *endpoint = &target->endpoint[packet->endpoint];
struct list_head container;
htc_tx_comp_update(target, endpoint, packet);
INIT_LIST_HEAD(&container);
list_add_tail(&packet->list, &container);
/* do completion */
htc_tx_complete(endpoint, &container);
}
static void htc_async_tx_scat_complete(struct htc_target *target,
struct hif_scatter_req *scat_req)
{
struct htc_endpoint *endpoint;
struct htc_packet *packet;
struct list_head tx_compq;
int i;
INIT_LIST_HEAD(&tx_compq);
ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
"htc_async_tx_scat_complete total len: %d entries: %d\n",
scat_req->len, scat_req->scat_entries);
if (scat_req->status)
ath6kl_err("send scatter req failed: %d\n", scat_req->status);
packet = scat_req->scat_list[0].packet;
endpoint = &target->endpoint[packet->endpoint];
/* walk through the scatter list and process */
for (i = 0; i < scat_req->scat_entries; i++) {
packet = scat_req->scat_list[i].packet;
if (!packet) {
WARN_ON(1);
return;
}
packet->status = scat_req->status;
htc_tx_comp_update(target, endpoint, packet);
list_add_tail(&packet->list, &tx_compq);
}
/* free scatter request */
hif_scatter_req_add(target->dev->ar, scat_req);
/* complete all packets */
htc_tx_complete(endpoint, &tx_compq);
}
static int ath6kl_htc_tx_issue(struct htc_target *target,
struct htc_packet *packet)
{
int status;
bool sync = false;
u32 padded_len, send_len;
if (!packet->completion)
sync = true;
send_len = packet->act_len + HTC_HDR_LENGTH;
ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "%s: transmit len : %d (%s)\n",
__func__, send_len, sync ? "sync" : "async");
padded_len = CALC_TXRX_PADDED_LEN(target, send_len);
ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
"DevSendPacket, padded len: %d mbox:0x%X (mode:%s)\n",
padded_len,
target->dev->ar->mbox_info.htc_addr,
sync ? "sync" : "async");
if (sync) {
status = hif_read_write_sync(target->dev->ar,
target->dev->ar->mbox_info.htc_addr,
packet->buf, padded_len,
HIF_WR_SYNC_BLOCK_INC);
packet->status = status;
packet->buf += HTC_HDR_LENGTH;
} else
status = hif_write_async(target->dev->ar,
target->dev->ar->mbox_info.htc_addr,
packet->buf, padded_len,
HIF_WR_ASYNC_BLOCK_INC, packet);
return status;
}
static int htc_check_credits(struct htc_target *target,
struct htc_endpoint *ep, u8 *flags,
enum htc_endpoint_id eid, unsigned int len,
int *req_cred)
{
*req_cred = (len > target->tgt_cred_sz) ?
DIV_ROUND_UP(len, target->tgt_cred_sz) : 1;
ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "creds required:%d got:%d\n",
*req_cred, ep->cred_dist.credits);
if (ep->cred_dist.credits < *req_cred) {
if (eid == ENDPOINT_0)
return -EINVAL;
/* Seek more credits */
ep->cred_dist.seek_cred = *req_cred - ep->cred_dist.credits;
ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "ctxt:0x%p dist:0x%p\n",
target->cred_dist_cntxt, &ep->cred_dist);
ath6k_seek_credits(target->cred_dist_cntxt, &ep->cred_dist);
ep->cred_dist.seek_cred = 0;
if (ep->cred_dist.credits < *req_cred) {
ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
"not enough credits for ep %d - leaving packet in queue\n",
eid);
return -EINVAL;
}
}
ep->cred_dist.credits -= *req_cred;
ep->ep_st.cred_cosumd += *req_cred;
/* When we are getting low on credits, ask for more */
if (ep->cred_dist.credits < ep->cred_dist.cred_per_msg) {
ep->cred_dist.seek_cred =
ep->cred_dist.cred_per_msg - ep->cred_dist.credits;
ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "ctxt:0x%p dist:0x%p\n",
target->cred_dist_cntxt, &ep->cred_dist);
ath6k_seek_credits(target->cred_dist_cntxt, &ep->cred_dist);
/* see if we were successful in getting more */
if (ep->cred_dist.credits < ep->cred_dist.cred_per_msg) {
/* tell the target we need credits ASAP! */
*flags |= HTC_FLAGS_NEED_CREDIT_UPDATE;
ep->ep_st.cred_low_indicate += 1;
ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "host needs credits\n");
}
}
return 0;
}
static void ath6kl_htc_tx_pkts_get(struct htc_target *target,
struct htc_endpoint *endpoint,
struct list_head *queue)
{
int req_cred;
u8 flags;
struct htc_packet *packet;
unsigned int len;
while (true) {
flags = 0;
if (list_empty(&endpoint->txq))
break;
packet = list_first_entry(&endpoint->txq, struct htc_packet,
list);
ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
"got head pkt:0x%p , queue depth: %d\n",
packet, get_queue_depth(&endpoint->txq));
len = CALC_TXRX_PADDED_LEN(target,
packet->act_len + HTC_HDR_LENGTH);
if (htc_check_credits(target, endpoint, &flags,
packet->endpoint, len, &req_cred))
break;
/* now we can fully move onto caller's queue */
packet = list_first_entry(&endpoint->txq, struct htc_packet,
list);
list_move_tail(&packet->list, queue);
/* save the number of credits this packet consumed */
packet->info.tx.cred_used = req_cred;
/* all TX packets are handled asynchronously */
packet->completion = htc_tx_comp_handler;
packet->context = target;
endpoint->ep_st.tx_issued += 1;
/* save send flags */
packet->info.tx.flags = flags;
packet->info.tx.seqno = endpoint->seqno;
endpoint->seqno++;
}
}
/* See if the padded tx length falls on a credit boundary */
static int htc_get_credit_padding(unsigned int cred_sz, int *len,
struct htc_endpoint *ep)
{
int rem_cred, cred_pad;
rem_cred = *len % cred_sz;
/* No padding needed */
if (!rem_cred)
return 0;
if (!(ep->conn_flags & HTC_FLGS_TX_BNDL_PAD_EN))
return -1;
/*
* The transfer consumes a "partial" credit, this
* packet cannot be bundled unless we add
* additional "dummy" padding (max 255 bytes) to
* consume the entire credit.
*/
cred_pad = *len < cred_sz ? (cred_sz - *len) : rem_cred;
if ((cred_pad > 0) && (cred_pad <= 255))
*len += cred_pad;
else
/* The amount of padding is too large, send as non-bundled */
return -1;
return cred_pad;
}
static int ath6kl_htc_tx_setup_scat_list(struct htc_target *target,
struct htc_endpoint *endpoint,
struct hif_scatter_req *scat_req,
int n_scat,
struct list_head *queue)
{
struct htc_packet *packet;
int i, len, rem_scat, cred_pad;
int status = 0;
rem_scat = target->max_tx_bndl_sz;
for (i = 0; i < n_scat; i++) {
scat_req->scat_list[i].packet = NULL;
if (list_empty(queue))
break;
packet = list_first_entry(queue, struct htc_packet, list);
len = CALC_TXRX_PADDED_LEN(target,
packet->act_len + HTC_HDR_LENGTH);
cred_pad = htc_get_credit_padding(target->tgt_cred_sz,
&len, endpoint);
if (cred_pad < 0 || rem_scat < len) {
status = -ENOSPC;
break;
}
rem_scat -= len;
/* now remove it from the queue */
list_del(&packet->list);
scat_req->scat_list[i].packet = packet;
/* prepare packet and flag message as part of a send bundle */
ath6kl_htc_tx_prep_pkt(packet,
packet->info.tx.flags | HTC_FLAGS_SEND_BUNDLE,
cred_pad, packet->info.tx.seqno);
/* Make sure the buffer is 4-byte aligned */
ath6kl_htc_tx_buf_align(&packet->buf,
packet->act_len + HTC_HDR_LENGTH);
scat_req->scat_list[i].buf = packet->buf;
scat_req->scat_list[i].len = len;
scat_req->len += len;
scat_req->scat_entries++;
ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
"%d, adding pkt : 0x%p len:%d (remaining space:%d)\n",
i, packet, len, rem_scat);
}
/* Roll back scatter setup in case of any failure */
if (scat_req->scat_entries < HTC_MIN_HTC_MSGS_TO_BUNDLE) {
for (i = scat_req->scat_entries - 1; i >= 0; i--) {
packet = scat_req->scat_list[i].packet;
if (packet) {
packet->buf += HTC_HDR_LENGTH;
list_add(&packet->list, queue);
}
}
return -EAGAIN;
}
return status;
}
/*
* Drain a queue and send as bundles this function may return without fully
* draining the queue when
*
* 1. scatter resources are exhausted
* 2. a message that will consume a partial credit will stop the
* bundling process early
* 3. we drop below the minimum number of messages for a bundle
*/
static void ath6kl_htc_tx_bundle(struct htc_endpoint *endpoint,
struct list_head *queue,
int *sent_bundle, int *n_bundle_pkts)
{
struct htc_target *target = endpoint->target;
struct hif_scatter_req *scat_req = NULL;
int n_scat, n_sent_bundle = 0, tot_pkts_bundle = 0;
int status;
while (true) {
status = 0;
n_scat = get_queue_depth(queue);
n_scat = min(n_scat, target->msg_per_bndl_max);
if (n_scat < HTC_MIN_HTC_MSGS_TO_BUNDLE)
/* not enough to bundle */
break;
scat_req = hif_scatter_req_get(target->dev->ar);
if (!scat_req) {
/* no scatter resources */
ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
"no more scatter resources\n");
break;
}
ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "pkts to scatter: %d\n",
n_scat);
scat_req->len = 0;
scat_req->scat_entries = 0;
status = ath6kl_htc_tx_setup_scat_list(target, endpoint,
scat_req, n_scat,
queue);
if (status == -EAGAIN) {
hif_scatter_req_add(target->dev->ar, scat_req);
break;
}
/* send path is always asynchronous */
scat_req->complete = htc_async_tx_scat_complete;
n_sent_bundle++;
tot_pkts_bundle += scat_req->scat_entries;
ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
"send scatter total bytes: %d , entries: %d\n",
scat_req->len, scat_req->scat_entries);
ath6kldev_submit_scat_req(target->dev, scat_req, false);
if (status)
break;
}
*sent_bundle = n_sent_bundle;
*n_bundle_pkts = tot_pkts_bundle;
ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "%s (sent:%d)\n",
__func__, n_sent_bundle);
return;
}
static void ath6kl_htc_tx_from_queue(struct htc_target *target,
struct htc_endpoint *endpoint)
{
struct list_head txq;
struct htc_packet *packet;
int bundle_sent;
int n_pkts_bundle;
spin_lock_bh(&target->tx_lock);
endpoint->tx_proc_cnt++;
if (endpoint->tx_proc_cnt > 1) {
endpoint->tx_proc_cnt--;
spin_unlock_bh(&target->tx_lock);
ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "htc_try_send (busy)\n");
return;
}
/*
* drain the endpoint TX queue for transmission as long
* as we have enough credits.
*/
INIT_LIST_HEAD(&txq);
while (true) {
if (list_empty(&endpoint->txq))
break;
ath6kl_htc_tx_pkts_get(target, endpoint, &txq);
if (list_empty(&txq))
break;
spin_unlock_bh(&target->tx_lock);
bundle_sent = 0;
n_pkts_bundle = 0;
while (true) {
/* try to send a bundle on each pass */
if ((target->tx_bndl_enable) &&
(get_queue_depth(&txq) >=
HTC_MIN_HTC_MSGS_TO_BUNDLE)) {
int temp1 = 0, temp2 = 0;
ath6kl_htc_tx_bundle(endpoint, &txq,
&temp1, &temp2);
bundle_sent += temp1;
n_pkts_bundle += temp2;
}
if (list_empty(&txq))
break;
packet = list_first_entry(&txq, struct htc_packet,
list);
list_del(&packet->list);
ath6kl_htc_tx_prep_pkt(packet, packet->info.tx.flags,
0, packet->info.tx.seqno);
ath6kl_htc_tx_issue(target, packet);
}
spin_lock_bh(&target->tx_lock);
endpoint->ep_st.tx_bundles += bundle_sent;
endpoint->ep_st.tx_pkt_bundled += n_pkts_bundle;
}
endpoint->tx_proc_cnt = 0;
spin_unlock_bh(&target->tx_lock);
}
static bool ath6kl_htc_tx_try(struct htc_target *target,
struct htc_endpoint *endpoint,
struct htc_packet *tx_pkt)
{
struct htc_ep_callbacks ep_cb;
int txq_depth;
bool overflow = false;
ep_cb = endpoint->ep_cb;
spin_lock_bh(&target->tx_lock);
txq_depth = get_queue_depth(&endpoint->txq);
spin_unlock_bh(&target->tx_lock);
if (txq_depth >= endpoint->max_txq_depth)
overflow = true;
if (overflow)
ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
"ep %d, tx queue will overflow :%d , tx depth:%d, max:%d\n",
endpoint->eid, overflow, txq_depth,
endpoint->max_txq_depth);
if (overflow && ep_cb.tx_full) {
ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
"indicating overflowed tx packet: 0x%p\n", tx_pkt);
if (ep_cb.tx_full(endpoint->target, tx_pkt) ==
HTC_SEND_FULL_DROP) {
endpoint->ep_st.tx_dropped += 1;
return false;
}
}
spin_lock_bh(&target->tx_lock);
list_add_tail(&tx_pkt->list, &endpoint->txq);
spin_unlock_bh(&target->tx_lock);
ath6kl_htc_tx_from_queue(target, endpoint);
return true;
}
static void htc_chk_ep_txq(struct htc_target *target)
{
struct htc_endpoint *endpoint;
struct htc_endpoint_credit_dist *cred_dist;
/*
* Run through the credit distribution list to see if there are
* packets queued. NOTE: no locks need to be taken since the
* distribution list is not dynamic (cannot be re-ordered) and we
* are not modifying any state.
*/
list_for_each_entry(cred_dist, &target->cred_dist_list, list) {
endpoint = (struct htc_endpoint *)cred_dist->htc_rsvd;
spin_lock_bh(&target->tx_lock);
if (!list_empty(&endpoint->txq)) {
ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
"ep %d has %d credits and %d packets in tx queue\n",
cred_dist->endpoint,
endpoint->cred_dist.credits,
get_queue_depth(&endpoint->txq));
spin_unlock_bh(&target->tx_lock);
/*
* Try to start the stalled queue, this list is
* ordered by priority. If there are credits
* available the highest priority queue will get a
* chance to reclaim credits from lower priority
* ones.
*/
ath6kl_htc_tx_from_queue(target, endpoint);
spin_lock_bh(&target->tx_lock);
}
spin_unlock_bh(&target->tx_lock);
}
}
static int htc_setup_tx_complete(struct htc_target *target)
{
struct htc_packet *send_pkt = NULL;
int status;
send_pkt = htc_get_control_buf(target, true);
if (!send_pkt)
return -ENOMEM;
if (target->htc_tgt_ver >= HTC_VERSION_2P1) {
struct htc_setup_comp_ext_msg *setup_comp_ext;
u32 flags = 0;
setup_comp_ext =
(struct htc_setup_comp_ext_msg *)send_pkt->buf;
memset(setup_comp_ext, 0, sizeof(*setup_comp_ext));
setup_comp_ext->msg_id =
cpu_to_le16(HTC_MSG_SETUP_COMPLETE_EX_ID);
if (target->msg_per_bndl_max > 0) {
/* Indicate HTC bundling to the target */
flags |= HTC_SETUP_COMP_FLG_RX_BNDL_EN;
setup_comp_ext->msg_per_rxbndl =
target->msg_per_bndl_max;
}
memcpy(&setup_comp_ext->flags, &flags,
sizeof(setup_comp_ext->flags));
set_htc_pkt_info(send_pkt, NULL, (u8 *) setup_comp_ext,
sizeof(struct htc_setup_comp_ext_msg),
ENDPOINT_0, HTC_SERVICE_TX_PACKET_TAG);
} else {
struct htc_setup_comp_msg *setup_comp;
setup_comp = (struct htc_setup_comp_msg *)send_pkt->buf;
memset(setup_comp, 0, sizeof(struct htc_setup_comp_msg));
setup_comp->msg_id = cpu_to_le16(HTC_MSG_SETUP_COMPLETE_ID);
set_htc_pkt_info(send_pkt, NULL, (u8 *) setup_comp,
sizeof(struct htc_setup_comp_msg),
ENDPOINT_0, HTC_SERVICE_TX_PACKET_TAG);
}
/* we want synchronous operation */
send_pkt->completion = NULL;
ath6kl_htc_tx_prep_pkt(send_pkt, 0, 0, 0);
status = ath6kl_htc_tx_issue(target, send_pkt);
if (send_pkt != NULL)
htc_reclaim_txctrl_buf(target, send_pkt);
return status;
}
void ath6kl_htc_set_credit_dist(struct htc_target *target,
struct htc_credit_state_info *cred_dist_cntxt,
u16 srvc_pri_order[], int list_len)
{
struct htc_endpoint *endpoint;
int i, ep;
target->cred_dist_cntxt = cred_dist_cntxt;
list_add_tail(&target->endpoint[ENDPOINT_0].cred_dist.list,
&target->cred_dist_list);
for (i = 0; i < list_len; i++) {
for (ep = ENDPOINT_1; ep < ENDPOINT_MAX; ep++) {
endpoint = &target->endpoint[ep];
if (endpoint->svc_id == srvc_pri_order[i]) {
list_add_tail(&endpoint->cred_dist.list,
&target->cred_dist_list);
break;
}
}
if (ep >= ENDPOINT_MAX) {
WARN_ON(1);
return;
}
}
}
int ath6kl_htc_tx(struct htc_target *target, struct htc_packet *packet)
{
struct htc_endpoint *endpoint;
struct list_head queue;
ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
"htc_tx: ep id: %d, buf: 0x%p, len: %d\n",
packet->endpoint, packet->buf, packet->act_len);
if (packet->endpoint >= ENDPOINT_MAX) {
WARN_ON(1);
return -EINVAL;
}
endpoint = &target->endpoint[packet->endpoint];
if (!ath6kl_htc_tx_try(target, endpoint, packet)) {
packet->status = (target->htc_flags & HTC_OP_STATE_STOPPING) ?
-ECANCELED : -ENOSPC;
INIT_LIST_HEAD(&queue);
list_add(&packet->list, &queue);
htc_tx_complete(endpoint, &queue);
}
return 0;
}
/* flush endpoint TX queue */
void ath6kl_htc_flush_txep(struct htc_target *target,
enum htc_endpoint_id eid, u16 tag)
{
struct htc_packet *packet, *tmp_pkt;
struct list_head discard_q, container;
struct htc_endpoint *endpoint = &target->endpoint[eid];
if (!endpoint->svc_id) {
WARN_ON(1);
return;
}
/* initialize the discard queue */
INIT_LIST_HEAD(&discard_q);
spin_lock_bh(&target->tx_lock);
list_for_each_entry_safe(packet, tmp_pkt, &endpoint->txq, list) {
if ((tag == HTC_TX_PACKET_TAG_ALL) ||
(tag == packet->info.tx.tag))
list_move_tail(&packet->list, &discard_q);
}
spin_unlock_bh(&target->tx_lock);
list_for_each_entry_safe(packet, tmp_pkt, &discard_q, list) {
packet->status = -ECANCELED;
list_del(&packet->list);
ath6kl_dbg(ATH6KL_DBG_TRC,
"flushing tx pkt:0x%p, len:%d, ep:%d tag:0x%X\n",
packet, packet->act_len,
packet->endpoint, packet->info.tx.tag);
INIT_LIST_HEAD(&container);
list_add_tail(&packet->list, &container);
htc_tx_complete(endpoint, &container);
}
}
static void ath6kl_htc_flush_txep_all(struct htc_target *target)
{
struct htc_endpoint *endpoint;
int i;
dump_cred_dist_stats(target);
for (i = ENDPOINT_0; i < ENDPOINT_MAX; i++) {
endpoint = &target->endpoint[i];
if (endpoint->svc_id == 0)
/* not in use.. */
continue;
ath6kl_htc_flush_txep(target, i, HTC_TX_PACKET_TAG_ALL);
}
}
void ath6kl_htc_indicate_activity_change(struct htc_target *target,
enum htc_endpoint_id eid, bool active)
{
struct htc_endpoint *endpoint = &target->endpoint[eid];
bool dist = false;
if (endpoint->svc_id == 0) {
WARN_ON(1);
return;
}
spin_lock_bh(&target->tx_lock);
if (active) {
if (!(endpoint->cred_dist.dist_flags & HTC_EP_ACTIVE)) {
endpoint->cred_dist.dist_flags |= HTC_EP_ACTIVE;
dist = true;
}
} else {
if (endpoint->cred_dist.dist_flags & HTC_EP_ACTIVE) {
endpoint->cred_dist.dist_flags &= ~HTC_EP_ACTIVE;
dist = true;
}
}
if (dist) {
endpoint->cred_dist.txq_depth =
get_queue_depth(&endpoint->txq);
ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "ctxt:0x%p dist:0x%p\n",
target->cred_dist_cntxt, &target->cred_dist_list);
ath6k_credit_distribute(target->cred_dist_cntxt,
&target->cred_dist_list,
HTC_CREDIT_DIST_ACTIVITY_CHANGE);
}
spin_unlock_bh(&target->tx_lock);
if (dist && !active)
htc_chk_ep_txq(target);
}
/* HTC Rx */
static inline void ath6kl_htc_rx_update_stats(struct htc_endpoint *endpoint,
int n_look_ahds)
{
endpoint->ep_st.rx_pkts++;
if (n_look_ahds == 1)
endpoint->ep_st.rx_lkahds++;
else if (n_look_ahds > 1)
endpoint->ep_st.rx_bundle_lkahd++;
}
static inline bool htc_valid_rx_frame_len(struct htc_target *target,
enum htc_endpoint_id eid, int len)
{
return (eid == target->dev->ar->ctrl_ep) ?
len <= ATH6KL_BUFFER_SIZE : len <= ATH6KL_AMSDU_BUFFER_SIZE;
}
static int htc_add_rxbuf(struct htc_target *target, struct htc_packet *packet)
{
struct list_head queue;
INIT_LIST_HEAD(&queue);
list_add_tail(&packet->list, &queue);
return ath6kl_htc_add_rxbuf_multiple(target, &queue);
}
static void htc_reclaim_rxbuf(struct htc_target *target,
struct htc_packet *packet,
struct htc_endpoint *ep)
{
if (packet->info.rx.rx_flags & HTC_RX_PKT_NO_RECYCLE) {
htc_rxpkt_reset(packet);
packet->status = -ECANCELED;
ep->ep_cb.rx(ep->target, packet);
} else {
htc_rxpkt_reset(packet);
htc_add_rxbuf((void *)(target), packet);
}
}
static void reclaim_rx_ctrl_buf(struct htc_target *target,
struct htc_packet *packet)
{
spin_lock_bh(&target->htc_lock);
list_add_tail(&packet->list, &target->free_ctrl_rxbuf);
spin_unlock_bh(&target->htc_lock);
}
static int ath6kl_htc_rx_packet(struct htc_target *target,
struct htc_packet *packet,
u32 rx_len)
{
struct ath6kl_device *dev = target->dev;
u32 padded_len;
int status;
padded_len = CALC_TXRX_PADDED_LEN(target, rx_len);
if (padded_len > packet->buf_len) {
ath6kl_err("not enough receive space for packet - padlen:%d recvlen:%d bufferlen:%d\n",
padded_len, rx_len, packet->buf_len);
return -ENOMEM;
}
ath6kl_dbg(ATH6KL_DBG_HTC_RECV,
"dev_rx_pkt (0x%p : hdr:0x%X) padded len: %d mbox:0x%X (mode:%s)\n",
packet, packet->info.rx.exp_hdr,
padded_len, dev->ar->mbox_info.htc_addr, "sync");
status = hif_read_write_sync(dev->ar,
dev->ar->mbox_info.htc_addr,
packet->buf, padded_len,
HIF_RD_SYNC_BLOCK_FIX);
packet->status = status;
return status;
}
/*
* optimization for recv packets, we can indicate a
* "hint" that there are more single-packets to fetch
* on this endpoint.
*/
static void ath6kl_htc_rx_set_indicate(u32 lk_ahd,
struct htc_endpoint *endpoint,
struct htc_packet *packet)
{
struct htc_frame_hdr *htc_hdr = (struct htc_frame_hdr *)&lk_ahd;
if (htc_hdr->eid == packet->endpoint) {
if (!list_empty(&endpoint->rx_bufq))
packet->info.rx.indicat_flags |=
HTC_RX_FLAGS_INDICATE_MORE_PKTS;
}
}
static void ath6kl_htc_rx_chk_water_mark(struct htc_endpoint *endpoint)
{
struct htc_ep_callbacks ep_cb = endpoint->ep_cb;
if (ep_cb.rx_refill_thresh > 0) {
spin_lock_bh(&endpoint->target->rx_lock);
if (get_queue_depth(&endpoint->rx_bufq)
< ep_cb.rx_refill_thresh) {
spin_unlock_bh(&endpoint->target->rx_lock);
ep_cb.rx_refill(endpoint->target, endpoint->eid);
return;
}
spin_unlock_bh(&endpoint->target->rx_lock);
}
}
/* This function is called with rx_lock held */
static int ath6kl_htc_rx_setup(struct htc_target *target,
struct htc_endpoint *ep,
u32 *lk_ahds, struct list_head *queue, int n_msg)
{
struct htc_packet *packet;
/* FIXME: type of lk_ahds can't be right */
struct htc_frame_hdr *htc_hdr = (struct htc_frame_hdr *)lk_ahds;
struct htc_ep_callbacks ep_cb;
int status = 0, j, full_len;
bool no_recycle;
full_len = CALC_TXRX_PADDED_LEN(target,
le16_to_cpu(htc_hdr->payld_len) +
sizeof(*htc_hdr));
if (!htc_valid_rx_frame_len(target, ep->eid, full_len)) {
ath6kl_warn("Rx buffer requested with invalid length\n");
return -EINVAL;
}
ep_cb = ep->ep_cb;
for (j = 0; j < n_msg; j++) {
/*
* Reset flag, any packets allocated using the
* rx_alloc() API cannot be recycled on
* cleanup,they must be explicitly returned.
*/
no_recycle = false;
if (ep_cb.rx_allocthresh &&
(full_len > ep_cb.rx_alloc_thresh)) {
ep->ep_st.rx_alloc_thresh_hit += 1;
ep->ep_st.rxalloc_thresh_byte +=
le16_to_cpu(htc_hdr->payld_len);
spin_unlock_bh(&target->rx_lock);
no_recycle = true;
packet = ep_cb.rx_allocthresh(ep->target, ep->eid,
full_len);
spin_lock_bh(&target->rx_lock);
} else {
/* refill handler is being used */
if (list_empty(&ep->rx_bufq)) {
if (ep_cb.rx_refill) {
spin_unlock_bh(&target->rx_lock);
ep_cb.rx_refill(ep->target, ep->eid);
spin_lock_bh(&target->rx_lock);
}
}
if (list_empty(&ep->rx_bufq))
packet = NULL;
else {
packet = list_first_entry(&ep->rx_bufq,
struct htc_packet, list);
list_del(&packet->list);
}
}
if (!packet) {
target->rx_st_flags |= HTC_RECV_WAIT_BUFFERS;
target->ep_waiting = ep->eid;
return -ENOSPC;
}
/* clear flags */
packet->info.rx.rx_flags = 0;
packet->info.rx.indicat_flags = 0;
packet->status = 0;
if (no_recycle)
/*
* flag that these packets cannot be
* recycled, they have to be returned to
* the user
*/
packet->info.rx.rx_flags |= HTC_RX_PKT_NO_RECYCLE;
/* Caller needs to free this upon any failure */
list_add_tail(&packet->list, queue);
if (target->htc_flags & HTC_OP_STATE_STOPPING) {
status = -ECANCELED;
break;
}
if (j) {
packet->info.rx.rx_flags |= HTC_RX_PKT_REFRESH_HDR;
packet->info.rx.exp_hdr = 0xFFFFFFFF;
} else
/* set expected look ahead */
packet->info.rx.exp_hdr = *lk_ahds;
packet->act_len = le16_to_cpu(htc_hdr->payld_len) +
HTC_HDR_LENGTH;
}
return status;
}
static int ath6kl_htc_rx_alloc(struct htc_target *target,
u32 lk_ahds[], int msg,
struct htc_endpoint *endpoint,
struct list_head *queue)
{
int status = 0;
struct htc_packet *packet, *tmp_pkt;
struct htc_frame_hdr *htc_hdr;
int i, n_msg;
spin_lock_bh(&target->rx_lock);
for (i = 0; i < msg; i++) {
htc_hdr = (struct htc_frame_hdr *)&lk_ahds[i];
if (htc_hdr->eid >= ENDPOINT_MAX) {
ath6kl_err("invalid ep in look-ahead: %d\n",
htc_hdr->eid);
status = -ENOMEM;
break;
}
if (htc_hdr->eid != endpoint->eid) {
ath6kl_err("invalid ep in look-ahead: %d should be : %d (index:%d)\n",
htc_hdr->eid, endpoint->eid, i);
status = -ENOMEM;
break;
}
if (le16_to_cpu(htc_hdr->payld_len) > HTC_MAX_PAYLOAD_LENGTH) {
ath6kl_err("payload len %d exceeds max htc : %d !\n",
htc_hdr->payld_len,
(u32) HTC_MAX_PAYLOAD_LENGTH);
status = -ENOMEM;
break;
}
if (endpoint->svc_id == 0) {
ath6kl_err("ep %d is not connected !\n", htc_hdr->eid);
status = -ENOMEM;
break;
}
if (htc_hdr->flags & HTC_FLG_RX_BNDL_CNT) {
/*
* HTC header indicates that every packet to follow
* has the same padded length so that it can be
* optimally fetched as a full bundle.
*/
n_msg = (htc_hdr->flags & HTC_FLG_RX_BNDL_CNT) >>
HTC_FLG_RX_BNDL_CNT_S;
/* the count doesn't include the starter frame */
n_msg++;
if (n_msg > target->msg_per_bndl_max) {
status = -ENOMEM;
break;
}
endpoint->ep_st.rx_bundle_from_hdr += 1;
ath6kl_dbg(ATH6KL_DBG_HTC_RECV,
"htc hdr indicates :%d msg can be fetched as a bundle\n",
n_msg);
} else
/* HTC header only indicates 1 message to fetch */
n_msg = 1;
/* Setup packet buffers for each message */
status = ath6kl_htc_rx_setup(target, endpoint, &lk_ahds[i],
queue, n_msg);
/*
* This is due to unavailabilty of buffers to rx entire data.
* Return no error so that free buffers from queue can be used
* to receive partial data.
*/
if (status == -ENOSPC) {
spin_unlock_bh(&target->rx_lock);
return 0;
}
if (status)
break;
}
spin_unlock_bh(&target->rx_lock);
if (status) {
list_for_each_entry_safe(packet, tmp_pkt, queue, list) {
list_del(&packet->list);
htc_reclaim_rxbuf(target, packet,
&target->endpoint[packet->endpoint]);
}
}
return status;
}
static void htc_ctrl_rx(struct htc_target *context, struct htc_packet *packets)
{
if (packets->endpoint != ENDPOINT_0) {
WARN_ON(1);
return;
}
if (packets->status == -ECANCELED) {
reclaim_rx_ctrl_buf(context, packets);
return;
}
if (packets->act_len > 0) {
ath6kl_err("htc_ctrl_rx, got message with len:%zu\n",
packets->act_len + HTC_HDR_LENGTH);
ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES,
"Unexpected ENDPOINT 0 Message", "",
packets->buf - HTC_HDR_LENGTH,
packets->act_len + HTC_HDR_LENGTH);
}
htc_reclaim_rxbuf(context, packets, &context->endpoint[0]);
}
static void htc_proc_cred_rpt(struct htc_target *target,
struct htc_credit_report *rpt,
int n_entries,
enum htc_endpoint_id from_ep)
{
struct htc_endpoint *endpoint;
int tot_credits = 0, i;
bool dist = false;
ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
"htc_proc_cred_rpt, credit report entries:%d\n", n_entries);
spin_lock_bh(&target->tx_lock);
for (i = 0; i < n_entries; i++, rpt++) {
if (rpt->eid >= ENDPOINT_MAX) {
WARN_ON(1);
spin_unlock_bh(&target->tx_lock);
return;
}
endpoint = &target->endpoint[rpt->eid];
ath6kl_dbg(ATH6KL_DBG_HTC_SEND, " ep %d got %d credits\n",
rpt->eid, rpt->credits);
endpoint->ep_st.tx_cred_rpt += 1;
endpoint->ep_st.cred_retnd += rpt->credits;
if (from_ep == rpt->eid) {
/*
* This credit report arrived on the same endpoint
* indicating it arrived in an RX packet.
*/
endpoint->ep_st.cred_from_rx += rpt->credits;
endpoint->ep_st.cred_rpt_from_rx += 1;
} else if (from_ep == ENDPOINT_0) {
/* credit arrived on endpoint 0 as a NULL message */
endpoint->ep_st.cred_from_ep0 += rpt->credits;
endpoint->ep_st.cred_rpt_ep0 += 1;
} else {
endpoint->ep_st.cred_from_other += rpt->credits;
endpoint->ep_st.cred_rpt_from_other += 1;
}
if (rpt->eid == ENDPOINT_0)
/* always give endpoint 0 credits back */
endpoint->cred_dist.credits += rpt->credits;
else {
endpoint->cred_dist.cred_to_dist += rpt->credits;
dist = true;
}
/*
* Refresh tx depth for distribution function that will
* recover these credits NOTE: this is only valid when
* there are credits to recover!
*/
endpoint->cred_dist.txq_depth =
get_queue_depth(&endpoint->txq);
tot_credits += rpt->credits;
}
ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
"report indicated %d credits to distribute\n",
tot_credits);
if (dist) {
/*
* This was a credit return based on a completed send
* operations note, this is done with the lock held
*/
ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "ctxt:0x%p dist:0x%p\n",
target->cred_dist_cntxt, &target->cred_dist_list);
ath6k_credit_distribute(target->cred_dist_cntxt,
&target->cred_dist_list,
HTC_CREDIT_DIST_SEND_COMPLETE);
}
spin_unlock_bh(&target->tx_lock);
if (tot_credits)
htc_chk_ep_txq(target);
}
static int htc_parse_trailer(struct htc_target *target,
struct htc_record_hdr *record,
u8 *record_buf, u32 *next_lk_ahds,
enum htc_endpoint_id endpoint,
int *n_lk_ahds)
{
struct htc_bundle_lkahd_rpt *bundle_lkahd_rpt;
struct htc_lookahead_report *lk_ahd;
int len;
switch (record->rec_id) {
case HTC_RECORD_CREDITS:
len = record->len / sizeof(struct htc_credit_report);
if (!len) {
WARN_ON(1);
return -EINVAL;
}
htc_proc_cred_rpt(target,
(struct htc_credit_report *) record_buf,
len, endpoint);
break;
case HTC_RECORD_LOOKAHEAD:
len = record->len / sizeof(*lk_ahd);
if (!len) {
WARN_ON(1);
return -EINVAL;
}
lk_ahd = (struct htc_lookahead_report *) record_buf;
if ((lk_ahd->pre_valid == ((~lk_ahd->post_valid) & 0xFF))
&& next_lk_ahds) {
ath6kl_dbg(ATH6KL_DBG_HTC_RECV,
"lk_ahd report found (pre valid:0x%X, post valid:0x%X)\n",
lk_ahd->pre_valid, lk_ahd->post_valid);
/* look ahead bytes are valid, copy them over */
memcpy((u8 *)&next_lk_ahds[0], lk_ahd->lk_ahd, 4);
ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, "Next Look Ahead",
"", next_lk_ahds, 4);
*n_lk_ahds = 1;
}
break;
case HTC_RECORD_LOOKAHEAD_BUNDLE:
len = record->len / sizeof(*bundle_lkahd_rpt);
if (!len || (len > HTC_HOST_MAX_MSG_PER_BUNDLE)) {
WARN_ON(1);
return -EINVAL;
}
if (next_lk_ahds) {
int i;
bundle_lkahd_rpt =
(struct htc_bundle_lkahd_rpt *) record_buf;
ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, "Bundle lk_ahd",
"", record_buf, record->len);
for (i = 0; i < len; i++) {
memcpy((u8 *)&next_lk_ahds[i],
bundle_lkahd_rpt->lk_ahd, 4);
bundle_lkahd_rpt++;
}
*n_lk_ahds = i;
}
break;
default:
ath6kl_err("unhandled record: id:%d len:%d\n",
record->rec_id, record->len);
break;
}
return 0;
}
static int htc_proc_trailer(struct htc_target *target,
u8 *buf, int len, u32 *next_lk_ahds,
int *n_lk_ahds, enum htc_endpoint_id endpoint)
{
struct htc_record_hdr *record;
int orig_len;
int status;
u8 *record_buf;
u8 *orig_buf;
ath6kl_dbg(ATH6KL_DBG_HTC_RECV, "+htc_proc_trailer (len:%d)\n", len);
ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, "Recv Trailer", "",
buf, len);
orig_buf = buf;
orig_len = len;
status = 0;
while (len > 0) {
if (len < sizeof(struct htc_record_hdr)) {
status = -ENOMEM;
break;
}
/* these are byte aligned structs */
record = (struct htc_record_hdr *) buf;
len -= sizeof(struct htc_record_hdr);
buf += sizeof(struct htc_record_hdr);
if (record->len > len) {
ath6kl_err("invalid record len: %d (id:%d) buf has: %d bytes left\n",
record->len, record->rec_id, len);
status = -ENOMEM;
break;
}
record_buf = buf;
status = htc_parse_trailer(target, record, record_buf,
next_lk_ahds, endpoint, n_lk_ahds);
if (status)
break;
/* advance buffer past this record for next time around */
buf += record->len;
len -= record->len;
}
if (status)
ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, "BAD Recv Trailer",
"", orig_buf, orig_len);
return status;
}
static int ath6kl_htc_rx_process_hdr(struct htc_target *target,
struct htc_packet *packet,
u32 *next_lkahds, int *n_lkahds)
{
int status = 0;
u16 payload_len;
u32 lk_ahd;
struct htc_frame_hdr *htc_hdr = (struct htc_frame_hdr *)packet->buf;
if (n_lkahds != NULL)
*n_lkahds = 0;
ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, "HTC Recv PKT", "htc ",
packet->buf, packet->act_len);
/*
* NOTE: we cannot assume the alignment of buf, so we use the safe
* macros to retrieve 16 bit fields.
*/
payload_len = le16_to_cpu(get_unaligned(&htc_hdr->payld_len));
memcpy((u8 *)&lk_ahd, packet->buf, sizeof(lk_ahd));
if (packet->info.rx.rx_flags & HTC_RX_PKT_REFRESH_HDR) {
/*
* Refresh the expected header and the actual length as it
* was unknown when this packet was grabbed as part of the
* bundle.
*/
packet->info.rx.exp_hdr = lk_ahd;
packet->act_len = payload_len + HTC_HDR_LENGTH;
/* validate the actual header that was refreshed */
if (packet->act_len > packet->buf_len) {
ath6kl_err("refreshed hdr payload len (%d) in bundled recv is invalid (hdr: 0x%X)\n",
payload_len, lk_ahd);
/*
* Limit this to max buffer just to print out some
* of the buffer.
*/
packet->act_len = min(packet->act_len, packet->buf_len);
status = -ENOMEM;
goto fail_rx;
}
if (packet->endpoint != htc_hdr->eid) {
ath6kl_err("refreshed hdr ep (%d) does not match expected ep (%d)\n",
htc_hdr->eid, packet->endpoint);
status = -ENOMEM;
goto fail_rx;
}
}
if (lk_ahd != packet->info.rx.exp_hdr) {
ath6kl_err("%s(): lk_ahd mismatch! (pPkt:0x%p flags:0x%X)\n",
__func__, packet, packet->info.rx.rx_flags);
ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, "Expected Message lk_ahd",
"", &packet->info.rx.exp_hdr, 4);
ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, "Current Frame Header",
"", (u8 *)&lk_ahd, sizeof(lk_ahd));
status = -ENOMEM;
goto fail_rx;
}
if (htc_hdr->flags & HTC_FLG_RX_TRAILER) {
if (htc_hdr->ctrl[0] < sizeof(struct htc_record_hdr) ||
htc_hdr->ctrl[0] > payload_len) {
ath6kl_err("%s(): invalid hdr (payload len should be :%d, CB[0] is:%d)\n",
__func__, payload_len, htc_hdr->ctrl[0]);
status = -ENOMEM;
goto fail_rx;
}
if (packet->info.rx.rx_flags & HTC_RX_PKT_IGNORE_LOOKAHEAD) {
next_lkahds = NULL;
n_lkahds = NULL;
}
status = htc_proc_trailer(target, packet->buf + HTC_HDR_LENGTH
+ payload_len - htc_hdr->ctrl[0],
htc_hdr->ctrl[0], next_lkahds,
n_lkahds, packet->endpoint);
if (status)
goto fail_rx;
packet->act_len -= htc_hdr->ctrl[0];
}
packet->buf += HTC_HDR_LENGTH;
packet->act_len -= HTC_HDR_LENGTH;
fail_rx:
if (status)
ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, "BAD HTC Recv PKT",
"", packet->buf,
packet->act_len < 256 ? packet->act_len : 256);
else {
if (packet->act_len > 0)
ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES,
"HTC - Application Msg", "",
packet->buf, packet->act_len);
}
return status;
}
static void ath6kl_htc_rx_complete(struct htc_endpoint *endpoint,
struct htc_packet *packet)
{
ath6kl_dbg(ATH6KL_DBG_HTC_RECV,
"htc calling ep %d recv callback on packet 0x%p\n",
endpoint->eid, packet);
endpoint->ep_cb.rx(endpoint->target, packet);
}
static int ath6kl_htc_rx_bundle(struct htc_target *target,
struct list_head *rxq,
struct list_head *sync_compq,
int *n_pkt_fetched, bool part_bundle)
{
struct hif_scatter_req *scat_req;
struct htc_packet *packet;
int rem_space = target->max_rx_bndl_sz;
int n_scat_pkt, status = 0, i, len;
n_scat_pkt = get_queue_depth(rxq);
n_scat_pkt = min(n_scat_pkt, target->msg_per_bndl_max);
if ((get_queue_depth(rxq) - n_scat_pkt) > 0) {
/*
* We were forced to split this bundle receive operation
* all packets in this partial bundle must have their
* lookaheads ignored.
*/
part_bundle = true;
/*
* This would only happen if the target ignored our max
* bundle limit.
*/
ath6kl_warn("%s(): partial bundle detected num:%d , %d\n",
__func__, get_queue_depth(rxq), n_scat_pkt);
}
len = 0;
ath6kl_dbg(ATH6KL_DBG_HTC_RECV,
"%s(): (numpackets: %d , actual : %d)\n",
__func__, get_queue_depth(rxq), n_scat_pkt);
scat_req = hif_scatter_req_get(target->dev->ar);
if (scat_req == NULL)
goto fail_rx_pkt;
for (i = 0; i < n_scat_pkt; i++) {
int pad_len;
packet = list_first_entry(rxq, struct htc_packet, list);
list_del(&packet->list);
pad_len = CALC_TXRX_PADDED_LEN(target,
packet->act_len);
if ((rem_space - pad_len) < 0) {
list_add(&packet->list, rxq);
break;
}
rem_space -= pad_len;
if (part_bundle || (i < (n_scat_pkt - 1)))
/*
* Packet 0..n-1 cannot be checked for look-aheads
* since we are fetching a bundle the last packet
* however can have it's lookahead used
*/
packet->info.rx.rx_flags |=
HTC_RX_PKT_IGNORE_LOOKAHEAD;
/* NOTE: 1 HTC packet per scatter entry */
scat_req->scat_list[i].buf = packet->buf;
scat_req->scat_list[i].len = pad_len;
packet->info.rx.rx_flags |= HTC_RX_PKT_PART_OF_BUNDLE;
list_add_tail(&packet->list, sync_compq);
WARN_ON(!scat_req->scat_list[i].len);
len += scat_req->scat_list[i].len;
}
scat_req->len = len;
scat_req->scat_entries = i;
status = ath6kldev_submit_scat_req(target->dev, scat_req, true);
if (!status)
*n_pkt_fetched = i;
/* free scatter request */
hif_scatter_req_add(target->dev->ar, scat_req);
fail_rx_pkt:
return status;
}
static int ath6kl_htc_rx_process_packets(struct htc_target *target,
struct list_head *comp_pktq,
u32 lk_ahds[],
int *n_lk_ahd)
{
struct htc_packet *packet, *tmp_pkt;
struct htc_endpoint *ep;
int status = 0;
list_for_each_entry_safe(packet, tmp_pkt, comp_pktq, list) {
list_del(&packet->list);
ep = &target->endpoint[packet->endpoint];
/* process header for each of the recv packet */
status = ath6kl_htc_rx_process_hdr(target, packet, lk_ahds,
n_lk_ahd);
if (status)
return status;
if (list_empty(comp_pktq)) {
/*
* Last packet's more packet flag is set
* based on the lookahead.
*/
if (*n_lk_ahd > 0)
ath6kl_htc_rx_set_indicate(lk_ahds[0],
ep, packet);
} else
/*
* Packets in a bundle automatically have
* this flag set.
*/
packet->info.rx.indicat_flags |=
HTC_RX_FLAGS_INDICATE_MORE_PKTS;
ath6kl_htc_rx_update_stats(ep, *n_lk_ahd);
if (packet->info.rx.rx_flags & HTC_RX_PKT_PART_OF_BUNDLE)
ep->ep_st.rx_bundl += 1;
ath6kl_htc_rx_complete(ep, packet);
}
return status;
}
static int ath6kl_htc_rx_fetch(struct htc_target *target,
struct list_head *rx_pktq,
struct list_head *comp_pktq)
{
int fetched_pkts;
bool part_bundle = false;
int status = 0;
/* now go fetch the list of HTC packets */
while (!list_empty(rx_pktq)) {
fetched_pkts = 0;
if (target->rx_bndl_enable && (get_queue_depth(rx_pktq) > 1)) {
/*
* There are enough packets to attempt a
* bundle transfer and recv bundling is
* allowed.
*/
status = ath6kl_htc_rx_bundle(target, rx_pktq,
comp_pktq,
&fetched_pkts,
part_bundle);
if (status)
return status;
if (!list_empty(rx_pktq))
part_bundle = true;
}
if (!fetched_pkts) {
struct htc_packet *packet;
packet = list_first_entry(rx_pktq, struct htc_packet,
list);
list_del(&packet->list);
/* fully synchronous */
packet->completion = NULL;
if (!list_empty(rx_pktq))
/*
* look_aheads in all packet
* except the last one in the
* bundle must be ignored
*/
packet->info.rx.rx_flags |=
HTC_RX_PKT_IGNORE_LOOKAHEAD;
/* go fetch the packet */
status = ath6kl_htc_rx_packet(target, packet,
packet->act_len);
if (status)
return status;
list_add_tail(&packet->list, comp_pktq);
}
}
return status;
}
int ath6kl_htc_rxmsg_pending_handler(struct htc_target *target,
u32 msg_look_ahead[], int *num_pkts)
{
struct htc_packet *packets, *tmp_pkt;
struct htc_endpoint *endpoint;
struct list_head rx_pktq, comp_pktq;
int status = 0;
u32 look_aheads[HTC_HOST_MAX_MSG_PER_BUNDLE];
int num_look_ahead = 1;
enum htc_endpoint_id id;
int n_fetched = 0;
*num_pkts = 0;
/*
* On first entry copy the look_aheads into our temp array for
* processing
*/
memcpy(look_aheads, msg_look_ahead, sizeof(look_aheads));
while (true) {
/*
* First lookahead sets the expected endpoint IDs for all
* packets in a bundle.
*/
id = ((struct htc_frame_hdr *)&look_aheads[0])->eid;
endpoint = &target->endpoint[id];
if (id >= ENDPOINT_MAX) {
ath6kl_err("MsgPend, invalid endpoint in look-ahead: %d\n",
id);
status = -ENOMEM;
break;
}
INIT_LIST_HEAD(&rx_pktq);
INIT_LIST_HEAD(&comp_pktq);
/*
* Try to allocate as many HTC RX packets indicated by the
* look_aheads.
*/
status = ath6kl_htc_rx_alloc(target, look_aheads,
num_look_ahead, endpoint,
&rx_pktq);
if (status)
break;
if (get_queue_depth(&rx_pktq) >= 2)
/*
* A recv bundle was detected, force IRQ status
* re-check again
*/
target->chk_irq_status_cnt = 1;
n_fetched += get_queue_depth(&rx_pktq);
num_look_ahead = 0;
status = ath6kl_htc_rx_fetch(target, &rx_pktq, &comp_pktq);
if (!status)
ath6kl_htc_rx_chk_water_mark(endpoint);
/* Process fetched packets */
status = ath6kl_htc_rx_process_packets(target, &comp_pktq,
look_aheads,
&num_look_ahead);
if (!num_look_ahead || status)
break;
/*
* For SYNCH processing, if we get here, we are running
* through the loop again due to a detected lookahead. Set
* flag that we should re-check IRQ status registers again
* before leaving IRQ processing, this can net better
* performance in high throughput situations.
*/
target->chk_irq_status_cnt = 1;
}
if (status) {
ath6kl_err("failed to get pending recv messages: %d\n",
status);
/*
* Cleanup any packets we allocated but didn't use to
* actually fetch any packets.
*/
list_for_each_entry_safe(packets, tmp_pkt, &rx_pktq, list) {
list_del(&packets->list);
htc_reclaim_rxbuf(target, packets,
&target->endpoint[packets->endpoint]);
}
/* cleanup any packets in sync completion queue */
list_for_each_entry_safe(packets, tmp_pkt, &comp_pktq, list) {
list_del(&packets->list);
htc_reclaim_rxbuf(target, packets,
&target->endpoint[packets->endpoint]);
}
if (target->htc_flags & HTC_OP_STATE_STOPPING) {
ath6kl_warn("host is going to stop blocking receiver for htc_stop\n");
ath6kldev_rx_control(target->dev, false);
}
}
/*
* Before leaving, check to see if host ran out of buffers and
* needs to stop the receiver.
*/
if (target->rx_st_flags & HTC_RECV_WAIT_BUFFERS) {
ath6kl_warn("host has no rx buffers blocking receiver to prevent overrun\n");
ath6kldev_rx_control(target->dev, false);
}
*num_pkts = n_fetched;
return status;
}
/*
* Synchronously wait for a control message from the target,
* This function is used at initialization time ONLY. At init messages
* on ENDPOINT 0 are expected.
*/
static struct htc_packet *htc_wait_for_ctrl_msg(struct htc_target *target)
{
struct htc_packet *packet = NULL;
struct htc_frame_hdr *htc_hdr;
u32 look_ahead;
if (ath6kldev_poll_mboxmsg_rx(target->dev, &look_ahead,
HTC_TARGET_RESPONSE_TIMEOUT))
return NULL;
ath6kl_dbg(ATH6KL_DBG_HTC_RECV,
"htc_wait_for_ctrl_msg: look_ahead : 0x%X\n", look_ahead);
htc_hdr = (struct htc_frame_hdr *)&look_ahead;
if (htc_hdr->eid != ENDPOINT_0)
return NULL;
packet = htc_get_control_buf(target, false);
if (!packet)
return NULL;
packet->info.rx.rx_flags = 0;
packet->info.rx.exp_hdr = look_ahead;
packet->act_len = le16_to_cpu(htc_hdr->payld_len) + HTC_HDR_LENGTH;
if (packet->act_len > packet->buf_len)
goto fail_ctrl_rx;
/* we want synchronous operation */
packet->completion = NULL;
/* get the message from the device, this will block */
if (ath6kl_htc_rx_packet(target, packet, packet->act_len))
goto fail_ctrl_rx;
/* process receive header */
packet->status = ath6kl_htc_rx_process_hdr(target, packet, NULL, NULL);
if (packet->status) {
ath6kl_err("htc_wait_for_ctrl_msg, ath6kl_htc_rx_process_hdr failed (status = %d)\n",
packet->status);
goto fail_ctrl_rx;
}
return packet;
fail_ctrl_rx:
if (packet != NULL) {
htc_rxpkt_reset(packet);
reclaim_rx_ctrl_buf(target, packet);
}
return NULL;
}
int ath6kl_htc_add_rxbuf_multiple(struct htc_target *target,
struct list_head *pkt_queue)
{
struct htc_endpoint *endpoint;
struct htc_packet *first_pkt;
bool rx_unblock = false;
int status = 0, depth;
if (list_empty(pkt_queue))
return -ENOMEM;
first_pkt = list_first_entry(pkt_queue, struct htc_packet, list);
if (first_pkt->endpoint >= ENDPOINT_MAX)
return status;
depth = get_queue_depth(pkt_queue);
ath6kl_dbg(ATH6KL_DBG_HTC_RECV,
"htc_add_rxbuf_multiple: ep id: %d, cnt:%d, len: %d\n",
first_pkt->endpoint, depth, first_pkt->buf_len);
endpoint = &target->endpoint[first_pkt->endpoint];
if (target->htc_flags & HTC_OP_STATE_STOPPING) {
struct htc_packet *packet, *tmp_pkt;
/* walk through queue and mark each one canceled */
list_for_each_entry_safe(packet, tmp_pkt, pkt_queue, list) {
packet->status = -ECANCELED;
list_del(&packet->list);
ath6kl_htc_rx_complete(endpoint, packet);
}
return status;
}
spin_lock_bh(&target->rx_lock);
list_splice_tail_init(pkt_queue, &endpoint->rx_bufq);
/* check if we are blocked waiting for a new buffer */
if (target->rx_st_flags & HTC_RECV_WAIT_BUFFERS) {
if (target->ep_waiting == first_pkt->endpoint) {
ath6kl_dbg(ATH6KL_DBG_HTC_RECV,
"receiver was blocked on ep:%d, unblocking.\n",
target->ep_waiting);
target->rx_st_flags &= ~HTC_RECV_WAIT_BUFFERS;
target->ep_waiting = ENDPOINT_MAX;
rx_unblock = true;
}
}
spin_unlock_bh(&target->rx_lock);
if (rx_unblock && !(target->htc_flags & HTC_OP_STATE_STOPPING))
/* TODO : implement a buffer threshold count? */
ath6kldev_rx_control(target->dev, true);
return status;
}
void ath6kl_htc_flush_rx_buf(struct htc_target *target)
{
struct htc_endpoint *endpoint;
struct htc_packet *packet, *tmp_pkt;
int i;
for (i = ENDPOINT_0; i < ENDPOINT_MAX; i++) {
endpoint = &target->endpoint[i];
if (!endpoint->svc_id)
/* not in use.. */
continue;
spin_lock_bh(&target->rx_lock);
list_for_each_entry_safe(packet, tmp_pkt,
&endpoint->rx_bufq, list) {
list_del(&packet->list);
spin_unlock_bh(&target->rx_lock);
ath6kl_dbg(ATH6KL_DBG_HTC_RECV,
"flushing rx pkt:0x%p, len:%d, ep:%d\n",
packet, packet->buf_len,
packet->endpoint);
dev_kfree_skb(packet->pkt_cntxt);
spin_lock_bh(&target->rx_lock);
}
spin_unlock_bh(&target->rx_lock);
}
}
int ath6kl_htc_conn_service(struct htc_target *target,
struct htc_service_connect_req *conn_req,
struct htc_service_connect_resp *conn_resp)
{
struct htc_packet *rx_pkt = NULL;
struct htc_packet *tx_pkt = NULL;
struct htc_conn_service_resp *resp_msg;
struct htc_conn_service_msg *conn_msg;
struct htc_endpoint *endpoint;
enum htc_endpoint_id assigned_ep = ENDPOINT_MAX;
unsigned int max_msg_sz = 0;
int status = 0;
ath6kl_dbg(ATH6KL_DBG_TRC,
"htc_conn_service, target:0x%p service id:0x%X\n",
target, conn_req->svc_id);
if (conn_req->svc_id == HTC_CTRL_RSVD_SVC) {
/* special case for pseudo control service */
assigned_ep = ENDPOINT_0;
max_msg_sz = HTC_MAX_CTRL_MSG_LEN;
} else {
/* allocate a packet to send to the target */
tx_pkt = htc_get_control_buf(target, true);
if (!tx_pkt)
return -ENOMEM;
conn_msg = (struct htc_conn_service_msg *)tx_pkt->buf;
memset(conn_msg, 0, sizeof(*conn_msg));
conn_msg->msg_id = cpu_to_le16(HTC_MSG_CONN_SVC_ID);
conn_msg->svc_id = cpu_to_le16(conn_req->svc_id);
conn_msg->conn_flags = cpu_to_le16(conn_req->conn_flags);
set_htc_pkt_info(tx_pkt, NULL, (u8 *) conn_msg,
sizeof(*conn_msg) + conn_msg->svc_meta_len,
ENDPOINT_0, HTC_SERVICE_TX_PACKET_TAG);
/* we want synchronous operation */
tx_pkt->completion = NULL;
ath6kl_htc_tx_prep_pkt(tx_pkt, 0, 0, 0);
status = ath6kl_htc_tx_issue(target, tx_pkt);
if (status)
goto fail_tx;
/* wait for response */
rx_pkt = htc_wait_for_ctrl_msg(target);
if (!rx_pkt) {
status = -ENOMEM;
goto fail_tx;
}
resp_msg = (struct htc_conn_service_resp *)rx_pkt->buf;
if ((le16_to_cpu(resp_msg->msg_id) != HTC_MSG_CONN_SVC_RESP_ID)
|| (rx_pkt->act_len < sizeof(*resp_msg))) {
status = -ENOMEM;
goto fail_tx;
}
conn_resp->resp_code = resp_msg->status;
/* check response status */
if (resp_msg->status != HTC_SERVICE_SUCCESS) {
ath6kl_err("target failed service 0x%X connect request (status:%d)\n",
resp_msg->svc_id, resp_msg->status);
status = -ENOMEM;
goto fail_tx;
}
assigned_ep = (enum htc_endpoint_id)resp_msg->eid;
max_msg_sz = le16_to_cpu(resp_msg->max_msg_sz);
}
if (assigned_ep >= ENDPOINT_MAX || !max_msg_sz) {
status = -ENOMEM;
goto fail_tx;
}
endpoint = &target->endpoint[assigned_ep];
endpoint->eid = assigned_ep;
if (endpoint->svc_id) {
status = -ENOMEM;
goto fail_tx;
}
/* return assigned endpoint to caller */
conn_resp->endpoint = assigned_ep;
conn_resp->len_max = max_msg_sz;
/* setup the endpoint */
/* this marks the endpoint in use */
endpoint->svc_id = conn_req->svc_id;
endpoint->max_txq_depth = conn_req->max_txq_depth;
endpoint->len_max = max_msg_sz;
endpoint->ep_cb = conn_req->ep_cb;
endpoint->cred_dist.svc_id = conn_req->svc_id;
endpoint->cred_dist.htc_rsvd = endpoint;
endpoint->cred_dist.endpoint = assigned_ep;
endpoint->cred_dist.cred_sz = target->tgt_cred_sz;
if (conn_req->max_rxmsg_sz) {
/*
* Override cred_per_msg calculation, this optimizes
* the credit-low indications since the host will actually
* issue smaller messages in the Send path.
*/
if (conn_req->max_rxmsg_sz > max_msg_sz) {
status = -ENOMEM;
goto fail_tx;
}
endpoint->cred_dist.cred_per_msg =
conn_req->max_rxmsg_sz / target->tgt_cred_sz;
} else
endpoint->cred_dist.cred_per_msg =
max_msg_sz / target->tgt_cred_sz;
if (!endpoint->cred_dist.cred_per_msg)
endpoint->cred_dist.cred_per_msg = 1;
/* save local connection flags */
endpoint->conn_flags = conn_req->flags;
fail_tx:
if (tx_pkt)
htc_reclaim_txctrl_buf(target, tx_pkt);
if (rx_pkt) {
htc_rxpkt_reset(rx_pkt);
reclaim_rx_ctrl_buf(target, rx_pkt);
}
return status;
}
static void reset_ep_state(struct htc_target *target)
{
struct htc_endpoint *endpoint;
int i;
for (i = ENDPOINT_0; i < ENDPOINT_MAX; i++) {
endpoint = &target->endpoint[i];
memset(&endpoint->cred_dist, 0, sizeof(endpoint->cred_dist));
endpoint->svc_id = 0;
endpoint->len_max = 0;
endpoint->max_txq_depth = 0;
memset(&endpoint->ep_st, 0,
sizeof(endpoint->ep_st));
INIT_LIST_HEAD(&endpoint->rx_bufq);
INIT_LIST_HEAD(&endpoint->txq);
endpoint->target = target;
}
/* reset distribution list */
INIT_LIST_HEAD(&target->cred_dist_list);
}
int ath6kl_htc_get_rxbuf_num(struct htc_target *target,
enum htc_endpoint_id endpoint)
{
int num;
spin_lock_bh(&target->rx_lock);
num = get_queue_depth(&(target->endpoint[endpoint].rx_bufq));
spin_unlock_bh(&target->rx_lock);
return num;
}
static void htc_setup_msg_bndl(struct htc_target *target)
{
/* limit what HTC can handle */
target->msg_per_bndl_max = min(HTC_HOST_MAX_MSG_PER_BUNDLE,
target->msg_per_bndl_max);
if (ath6kl_hif_enable_scatter(target->dev->ar)) {
target->msg_per_bndl_max = 0;
return;
}
/* limit bundle what the device layer can handle */
target->msg_per_bndl_max = min(target->max_scat_entries,
target->msg_per_bndl_max);
ath6kl_dbg(ATH6KL_DBG_TRC,
"htc bundling allowed. max msg per htc bundle: %d\n",
target->msg_per_bndl_max);
/* Max rx bundle size is limited by the max tx bundle size */
target->max_rx_bndl_sz = target->max_xfer_szper_scatreq;
/* Max tx bundle size if limited by the extended mbox address range */
target->max_tx_bndl_sz = min(HIF_MBOX0_EXT_WIDTH,
target->max_xfer_szper_scatreq);
ath6kl_dbg(ATH6KL_DBG_ANY, "max recv: %d max send: %d\n",
target->max_rx_bndl_sz, target->max_tx_bndl_sz);
if (target->max_tx_bndl_sz)
target->tx_bndl_enable = true;
if (target->max_rx_bndl_sz)
target->rx_bndl_enable = true;
if ((target->tgt_cred_sz % target->block_sz) != 0) {
ath6kl_warn("credit size: %d is not block aligned! Disabling send bundling\n",
target->tgt_cred_sz);
/*
* Disallow send bundling since the credit size is
* not aligned to a block size the I/O block
* padding will spill into the next credit buffer
* which is fatal.
*/
target->tx_bndl_enable = false;
}
}
int ath6kl_htc_wait_target(struct htc_target *target)
{
struct htc_packet *packet = NULL;
struct htc_ready_ext_msg *rdy_msg;
struct htc_service_connect_req connect;
struct htc_service_connect_resp resp;
int status;
/* we should be getting 1 control message that the target is ready */
packet = htc_wait_for_ctrl_msg(target);
if (!packet)
return -ENOMEM;
/* we controlled the buffer creation so it's properly aligned */
rdy_msg = (struct htc_ready_ext_msg *)packet->buf;
if ((le16_to_cpu(rdy_msg->ver2_0_info.msg_id) != HTC_MSG_READY_ID) ||
(packet->act_len < sizeof(struct htc_ready_msg))) {
status = -ENOMEM;
goto fail_wait_target;
}
if (!rdy_msg->ver2_0_info.cred_cnt || !rdy_msg->ver2_0_info.cred_sz) {
status = -ENOMEM;
goto fail_wait_target;
}
target->tgt_creds = le16_to_cpu(rdy_msg->ver2_0_info.cred_cnt);
target->tgt_cred_sz = le16_to_cpu(rdy_msg->ver2_0_info.cred_sz);
ath6kl_dbg(ATH6KL_DBG_HTC_RECV,
"target ready: credits: %d credit size: %d\n",
target->tgt_creds, target->tgt_cred_sz);
/* check if this is an extended ready message */
if (packet->act_len >= sizeof(struct htc_ready_ext_msg)) {
/* this is an extended message */
target->htc_tgt_ver = rdy_msg->htc_ver;
target->msg_per_bndl_max = rdy_msg->msg_per_htc_bndl;
} else {
/* legacy */
target->htc_tgt_ver = HTC_VERSION_2P0;
target->msg_per_bndl_max = 0;
}
ath6kl_dbg(ATH6KL_DBG_TRC, "using htc protocol version : %s (%d)\n",
(target->htc_tgt_ver == HTC_VERSION_2P0) ? "2.0" : ">= 2.1",
target->htc_tgt_ver);
if (target->msg_per_bndl_max > 0)
htc_setup_msg_bndl(target);
/* setup our pseudo HTC control endpoint connection */
memset(&connect, 0, sizeof(connect));
memset(&resp, 0, sizeof(resp));
connect.ep_cb.rx = htc_ctrl_rx;
connect.ep_cb.rx_refill = NULL;
connect.ep_cb.tx_full = NULL;
connect.max_txq_depth = NUM_CONTROL_BUFFERS;
connect.svc_id = HTC_CTRL_RSVD_SVC;
/* connect fake service */
status = ath6kl_htc_conn_service((void *)target, &connect, &resp);
if (status)
ath6kl_hif_cleanup_scatter(target->dev->ar);
fail_wait_target:
if (packet) {
htc_rxpkt_reset(packet);
reclaim_rx_ctrl_buf(target, packet);
}
return status;
}
/*
* Start HTC, enable interrupts and let the target know
* host has finished setup.
*/
int ath6kl_htc_start(struct htc_target *target)
{
struct htc_packet *packet;
int status;
/* Disable interrupts at the chip level */
ath6kldev_disable_intrs(target->dev);
target->htc_flags = 0;
target->rx_st_flags = 0;
/* Push control receive buffers into htc control endpoint */
while ((packet = htc_get_control_buf(target, false)) != NULL) {
status = htc_add_rxbuf(target, packet);
if (status)
return status;
}
/* NOTE: the first entry in the distribution list is ENDPOINT_0 */
ath6k_credit_init(target->cred_dist_cntxt, &target->cred_dist_list,
target->tgt_creds);
dump_cred_dist_stats(target);
/* Indicate to the target of the setup completion */
status = htc_setup_tx_complete(target);
if (status)
return status;
/* unmask interrupts */
status = ath6kldev_unmask_intrs(target->dev);
if (status)
ath6kl_htc_stop(target);
return status;
}
/* htc_stop: stop interrupt reception, and flush all queued buffers */
void ath6kl_htc_stop(struct htc_target *target)
{
spin_lock_bh(&target->htc_lock);
target->htc_flags |= HTC_OP_STATE_STOPPING;
spin_unlock_bh(&target->htc_lock);
/*
* Masking interrupts is a synchronous operation, when this
* function returns all pending HIF I/O has completed, we can
* safely flush the queues.
*/
ath6kldev_mask_intrs(target->dev);
ath6kl_htc_flush_txep_all(target);
ath6kl_htc_flush_rx_buf(target);
reset_ep_state(target);
}
void *ath6kl_htc_create(struct ath6kl *ar)
{
struct htc_target *target = NULL;
struct htc_packet *packet;
int status = 0, i = 0;
u32 block_size, ctrl_bufsz;
target = kzalloc(sizeof(*target), GFP_KERNEL);
if (!target) {
ath6kl_err("unable to allocate memory\n");
return NULL;
}
target->dev = kzalloc(sizeof(*target->dev), GFP_KERNEL);
if (!target->dev) {
ath6kl_err("unable to allocate memory\n");
status = -ENOMEM;
goto fail_create_htc;
}
spin_lock_init(&target->htc_lock);
spin_lock_init(&target->rx_lock);
spin_lock_init(&target->tx_lock);
INIT_LIST_HEAD(&target->free_ctrl_txbuf);
INIT_LIST_HEAD(&target->free_ctrl_rxbuf);
INIT_LIST_HEAD(&target->cred_dist_list);
target->dev->ar = ar;
target->dev->htc_cnxt = target;
target->ep_waiting = ENDPOINT_MAX;
reset_ep_state(target);
status = ath6kldev_setup(target->dev);
if (status)
goto fail_create_htc;
block_size = ar->mbox_info.block_size;
ctrl_bufsz = (block_size > HTC_MAX_CTRL_MSG_LEN) ?
(block_size + HTC_HDR_LENGTH) :
(HTC_MAX_CTRL_MSG_LEN + HTC_HDR_LENGTH);
for (i = 0; i < NUM_CONTROL_BUFFERS; i++) {
packet = kzalloc(sizeof(*packet), GFP_KERNEL);
if (!packet)
break;
packet->buf_start = kzalloc(ctrl_bufsz, GFP_KERNEL);
if (!packet->buf_start) {
kfree(packet);
break;
}
packet->buf_len = ctrl_bufsz;
if (i < NUM_CONTROL_RX_BUFFERS) {
packet->act_len = 0;
packet->buf = packet->buf_start;
packet->endpoint = ENDPOINT_0;
list_add_tail(&packet->list, &target->free_ctrl_rxbuf);
} else
list_add_tail(&packet->list, &target->free_ctrl_txbuf);
}
fail_create_htc:
if (i != NUM_CONTROL_BUFFERS || status) {
if (target) {
ath6kl_htc_cleanup(target);
target = NULL;
}
}
return target;
}
/* cleanup the HTC instance */
void ath6kl_htc_cleanup(struct htc_target *target)
{
struct htc_packet *packet, *tmp_packet;
ath6kl_hif_cleanup_scatter(target->dev->ar);
list_for_each_entry_safe(packet, tmp_packet,
&target->free_ctrl_txbuf, list) {
list_del(&packet->list);
kfree(packet->buf_start);
kfree(packet);
}
list_for_each_entry_safe(packet, tmp_packet,
&target->free_ctrl_rxbuf, list) {
list_del(&packet->list);
kfree(packet->buf_start);
kfree(packet);
}
kfree(target->dev);
kfree(target);
}
| gpl-2.0 |
FXITech/u-boot | arch/powerpc/cpu/mpc8220/interrupts.c | 179 | 2015 | /*
* (C) Copyright -2003
* Wolfgang Denk, DENX Software Engineering, wd@denx.de.
*
* (C) Copyright 2001
* Josh Huber <huber@mclx.com>, Mission Critical Linux, Inc.
*
* See file CREDITS for list of people who contributed to this
* project.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 2 of
* the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston,
* MA 02111-1307 USA
*/
/*
* interrupts.c - just enough support for the decrementer/timer
*/
#include <common.h>
#include <asm/processor.h>
#include <command.h>
int interrupt_init_cpu (ulong * decrementer_count)
{
*decrementer_count = get_tbclk () / CONFIG_SYS_HZ;
return (0);
}
/****************************************************************************/
/*
* Handle external interrupts
*/
void external_interrupt (struct pt_regs *regs)
{
puts ("external_interrupt (oops!)\n");
}
void timer_interrupt_cpu (struct pt_regs *regs)
{
/* nothing to do here */
return;
}
/****************************************************************************/
/*
* Install and free a interrupt handler.
*/
void irq_install_handler (int vec, interrupt_handler_t * handler, void *arg)
{
}
void irq_free_handler (int vec)
{
}
/****************************************************************************/
void
do_irqinfo (cmd_tbl_t * cmdtp, bd_t * bd, int flag, int argc, char * const argv[])
{
puts ("IRQ related functions are unimplemented currently.\n");
}
| gpl-2.0 |
caglar10ur/linux-2.6.27.y | arch/powerpc/platforms/cell/spufs/context.c | 179 | 4773 | /*
* SPU file system -- SPU context management
*
* (C) Copyright IBM Deutschland Entwicklung GmbH 2005
*
* Author: Arnd Bergmann <arndb@de.ibm.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <asm/atomic.h>
#include <asm/spu.h>
#include <asm/spu_csa.h>
#include "spufs.h"
atomic_t nr_spu_contexts = ATOMIC_INIT(0);
struct spu_context *alloc_spu_context(struct spu_gang *gang)
{
struct spu_context *ctx;
ctx = kzalloc(sizeof *ctx, GFP_KERNEL);
if (!ctx)
goto out;
/* Binding to physical processor deferred
* until spu_activate().
*/
if (spu_init_csa(&ctx->csa))
goto out_free;
spin_lock_init(&ctx->mmio_lock);
mutex_init(&ctx->mapping_lock);
kref_init(&ctx->kref);
mutex_init(&ctx->state_mutex);
mutex_init(&ctx->run_mutex);
init_waitqueue_head(&ctx->ibox_wq);
init_waitqueue_head(&ctx->wbox_wq);
init_waitqueue_head(&ctx->stop_wq);
init_waitqueue_head(&ctx->mfc_wq);
init_waitqueue_head(&ctx->run_wq);
ctx->state = SPU_STATE_SAVED;
ctx->ops = &spu_backing_ops;
ctx->owner = get_task_mm(current);
INIT_LIST_HEAD(&ctx->rq);
INIT_LIST_HEAD(&ctx->aff_list);
if (gang)
spu_gang_add_ctx(gang, ctx);
__spu_update_sched_info(ctx);
spu_set_timeslice(ctx);
ctx->stats.util_state = SPU_UTIL_IDLE_LOADED;
atomic_inc(&nr_spu_contexts);
goto out;
out_free:
kfree(ctx);
ctx = NULL;
out:
return ctx;
}
void destroy_spu_context(struct kref *kref)
{
struct spu_context *ctx;
ctx = container_of(kref, struct spu_context, kref);
spu_context_nospu_trace(destroy_spu_context__enter, ctx);
mutex_lock(&ctx->state_mutex);
spu_deactivate(ctx);
mutex_unlock(&ctx->state_mutex);
spu_fini_csa(&ctx->csa);
if (ctx->gang)
spu_gang_remove_ctx(ctx->gang, ctx);
if (ctx->prof_priv_kref)
kref_put(ctx->prof_priv_kref, ctx->prof_priv_release);
BUG_ON(!list_empty(&ctx->rq));
atomic_dec(&nr_spu_contexts);
kfree(ctx->switch_log);
kfree(ctx);
}
struct spu_context * get_spu_context(struct spu_context *ctx)
{
kref_get(&ctx->kref);
return ctx;
}
int put_spu_context(struct spu_context *ctx)
{
return kref_put(&ctx->kref, &destroy_spu_context);
}
/* give up the mm reference when the context is about to be destroyed */
void spu_forget(struct spu_context *ctx)
{
struct mm_struct *mm;
/*
* This is basically an open-coded spu_acquire_saved, except that
* we don't acquire the state mutex interruptible, and we don't
* want this context to be rescheduled on release.
*/
mutex_lock(&ctx->state_mutex);
if (ctx->state != SPU_STATE_SAVED)
spu_deactivate(ctx);
mm = ctx->owner;
ctx->owner = NULL;
mmput(mm);
spu_release(ctx);
}
void spu_unmap_mappings(struct spu_context *ctx)
{
mutex_lock(&ctx->mapping_lock);
if (ctx->local_store)
unmap_mapping_range(ctx->local_store, 0, LS_SIZE, 1);
if (ctx->mfc)
unmap_mapping_range(ctx->mfc, 0, SPUFS_MFC_MAP_SIZE, 1);
if (ctx->cntl)
unmap_mapping_range(ctx->cntl, 0, SPUFS_CNTL_MAP_SIZE, 1);
if (ctx->signal1)
unmap_mapping_range(ctx->signal1, 0, SPUFS_SIGNAL_MAP_SIZE, 1);
if (ctx->signal2)
unmap_mapping_range(ctx->signal2, 0, SPUFS_SIGNAL_MAP_SIZE, 1);
if (ctx->mss)
unmap_mapping_range(ctx->mss, 0, SPUFS_MSS_MAP_SIZE, 1);
if (ctx->psmap)
unmap_mapping_range(ctx->psmap, 0, SPUFS_PS_MAP_SIZE, 1);
mutex_unlock(&ctx->mapping_lock);
}
/**
* spu_acquire_saved - lock spu contex and make sure it is in saved state
* @ctx: spu contex to lock
*/
int spu_acquire_saved(struct spu_context *ctx)
{
int ret;
spu_context_nospu_trace(spu_acquire_saved__enter, ctx);
ret = spu_acquire(ctx);
if (ret)
return ret;
if (ctx->state != SPU_STATE_SAVED) {
set_bit(SPU_SCHED_WAS_ACTIVE, &ctx->sched_flags);
spu_deactivate(ctx);
}
return 0;
}
/**
* spu_release_saved - unlock spu context and return it to the runqueue
* @ctx: context to unlock
*/
void spu_release_saved(struct spu_context *ctx)
{
BUG_ON(ctx->state != SPU_STATE_SAVED);
if (test_and_clear_bit(SPU_SCHED_WAS_ACTIVE, &ctx->sched_flags) &&
test_bit(SPU_SCHED_SPU_RUN, &ctx->sched_flags))
spu_activate(ctx, 0);
spu_release(ctx);
}
| gpl-2.0 |
wimpknocker/android_kernel_samsung_viennalte | sound/soc/codecs/audience/es705-i2c.c | 435 | 7721 | /*
* es705-i2c.c -- Audience eS705 I2C interface
*
* Copyright 2011 Audience, Inc.
*
* Author: Greg Clemson <gclemson@audience.com>
*
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/init.h>
#include <linux/firmware.h>
#include <linux/delay.h>
#include <linux/pm.h>
#include <linux/completion.h>
#include <linux/i2c.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/err.h>
#include <sound/core.h>
#include <sound/pcm.h>
#include <sound/pcm_params.h>
#include <sound/soc.h>
#include <sound/soc-dapm.h>
#include <sound/initval.h>
#include <sound/tlv.h>
#include <linux/kthread.h>
#include <linux/esxxx.h>
#include "es705.h"
#include "es705-platform.h"
#include "es705-i2c.h"
static int es705_i2c_read(struct es705_priv *es705, void *buf, int len)
{
struct i2c_msg msg[] = {
{
.addr = es705->i2c_client->addr,
.flags = I2C_M_RD,
.len = len,
.buf = buf,
},
};
int rc = 0;
rc = i2c_transfer(es705->i2c_client->adapter, msg, 1);
/*
* i2c_transfer returns number of messages executed. Since we
* are always sending only 1 msg, return value should be 1 for
* success case
*/
if (rc != 1) {
pr_err("%s(): i2c_transfer() failed, rc = %d, msg_len = %d\n",
__func__, rc, len);
return -EIO;
} else {
return 0;
}
}
#define I2C_BUF_SIZE 512
static int es705_i2c_write(struct es705_priv *es705, const void *buf, int len)
{
struct i2c_msg msg[] = {
{
.addr = es705->i2c_client->addr,
.flags = 0,
},
};
int rc = 0;
int pos = 0;
while (pos < len) {
msg[0].len = min(len - pos, I2C_BUF_SIZE);
/*
* The function i2c_master_send() indicates we
* can trust the i2c bus interface not to
* write in this buffer.
*/
msg[0].buf = (void *)(buf + pos);
rc = i2c_transfer(es705->i2c_client->adapter, msg, 1);
if (rc != 1) {
dev_err(es705->dev, "%s(): i2c_transfer() failed, rc = %d, msg_len = %d\n",
__func__, rc, len);
return -EIO;
}
pos += msg[0].len;
}
return 0;
}
static int es705_i2c_write_then_read(struct es705_priv *es705,
const void *buf, int len,
u32 *rspn, int match)
{
int rc;
rc = es705_i2c_write(es705, buf, len);
if (!rc)
rc = es705_i2c_read(es705, rspn, match);
return rc;
}
static int es705_i2c_cmd(struct es705_priv *es705, u32 cmd, int sr, u32 *resp)
{
int err;
u32 rv;
dev_dbg(es705->dev, "%s(): cmd=0x%08x sr=%i\n", __func__, cmd, sr);
cmd = cpu_to_be32(cmd);
err = es705_i2c_write(es705, &cmd, sizeof(cmd));
if (err || sr)
return err;
/* The response must be actively read. Maximum response time
* is 10ms.
*/
usleep_range(10000, 10500);
err = es705_i2c_read(es705, &rv, sizeof(rv));
if (!err)
*resp = be32_to_cpu(rv);
dev_dbg(es705->dev, "%s(): resp=0x%08x\n", __func__, *resp);
return err;
}
static int es705_i2c_boot_setup(struct es705_priv *es705)
{
u16 boot_cmd = ES705_I2C_BOOT_CMD;
u16 boot_ack = 0;
char msg[2];
int rc;
dev_dbg(es705->dev, "%s(): write ES705_BOOT_CMD = 0x%04x\n",
__func__, boot_cmd);
cpu_to_be16s(&boot_cmd);
memcpy(msg, (char *)&boot_cmd, 2);
rc = es705->dev_write(es705, msg, 2);
if (rc < 0) {
dev_err(es705->dev, "%s(): firmware load failed boot write\n",
__func__);
goto es705_boot_setup_failed;
}
usleep_range(1000, 1000);
memset(msg, 0, 2);
rc = es705->dev_read(es705, msg, 2);
if (rc < 0) {
dev_err(es705->dev, "%s(): firmware load failed boot ack\n",
__func__);
goto es705_boot_setup_failed;
}
memcpy((char *)&boot_ack, msg, 2);
dev_dbg(es705->dev, "%s(): boot_ack = 0x%04x\n", __func__, boot_ack);
if (boot_ack != ES705_I2C_BOOT_ACK) {
dev_err(es705->dev, "%s(): firmware load failed boot ack pattern",
__func__);
rc = -EIO;
goto es705_boot_setup_failed;
}
es705_boot_setup_failed:
return rc;
}
static int es705_i2c_boot_finish(struct es705_priv *es705)
{
u32 sync_cmd;
u32 sync_rspn;
int match = 1;
int rc = 0;
dev_dbg(es705->dev, "%s(): finish fw download\n", __func__);
if (es705->es705_power_state == ES705_SET_POWER_STATE_VS_OVERLAY) {
sync_cmd = (ES705_SYNC_CMD << 16) | ES705_SYNC_INTR_RISING_EDGE;
dev_dbg(es705->dev, "%s(): FW type : VOICESENSE\n", __func__);
} else {
sync_cmd = (ES705_SYNC_CMD << 16) | ES705_SYNC_POLLING;
dev_dbg(es705->dev, "%s(): fw type : STANDARD\n", __func__);
}
sync_rspn = sync_cmd;
/* Give the chip some time to become ready after firmware download. */
msleep(20);
/* finish es705 boot, check es705 readiness */
rc = es705_i2c_write_then_read(es705, &sync_cmd, sizeof(sync_cmd),
&sync_rspn, match);
if (rc)
dev_err(es705->dev, "%s(): SYNC fail\n", __func__);
return rc;
}
static int es705_i2c_probe(struct i2c_client *i2c,
const struct i2c_device_id *id)
{
struct esxxx_platform_data *pdata = i2c->dev.platform_data;
int rc;
dev_dbg(&i2c->dev, "%s(): i2c->name = %s\n", __func__, i2c->name);
es705_priv.i2c_client = i2c;
if (pdata == NULL) {
dev_err(&i2c->dev, "%s(): pdata is NULL", __func__);
rc = -EIO;
goto pdata_error;
}
i2c_set_clientdata(i2c, &es705_priv);
es705_priv.intf = ES705_I2C_INTF;
es705_priv.dev_read = es705_i2c_read;
es705_priv.dev_write = es705_i2c_write;
es705_priv.dev_write_then_read = es705_i2c_write_then_read;
es705_priv.boot_setup = es705_i2c_boot_setup;
es705_priv.boot_finish = es705_i2c_boot_finish;
es705_priv.cmd = es705_i2c_cmd;
es705_priv.dev = &i2c->dev;
es705_priv.streamdev = i2c_streamdev;
rc = es705_core_probe(&i2c->dev);
if (rc) {
dev_err(&i2c->dev, "%s(): es705_core_probe() failed %d\n",
__func__, rc);
goto es705_core_probe_error;
}
rc = es705_bootup(&es705_priv);
if (rc) {
dev_err(&i2c->dev, "%s(): es705_bootup failed %d\n",
__func__, rc);
goto bootup_error;
}
rc = snd_soc_register_codec(&i2c->dev, &soc_codec_dev_es705, es705_dai,
ES705_NUM_CODEC_DAIS);
dev_dbg(&i2c->dev, "%s(): rc = snd_soc_regsiter_codec() = %d\n", __func__, rc);
return rc;
bootup_error:
es705_core_probe_error:
pdata_error:
dev_dbg(&i2c->dev, "%s(): exit with error\n", __func__);
return rc;
}
static int es705_i2c_remove(struct i2c_client *i2c)
{
struct esxxx_platform_data *pdata = i2c->dev.platform_data;
es705_gpio_free(pdata);
snd_soc_unregister_codec(&i2c->dev);
kfree(i2c_get_clientdata(i2c));
return 0;
}
struct es_stream_device i2c_streamdev = {
.read = es705_i2c_read,
.intf = ES705_I2C_INTF,
};
int es705_i2c_init(struct es705_priv *es705)
{
int rc;
rc = i2c_add_driver(&es705_i2c_driver);
if (!rc) {
dev_dbg(es705->dev, "%s(): registered as I2C", __func__);
es705_priv.intf = ES705_I2C_INTF;
/*
es705_priv.device_read = ;
es705_priv.device_write = ;
*/
}
else
dev_err(es705->dev, "%s(): i2c_add_driver failed, rc = %d",
__func__, rc);
return rc;
}
static const struct i2c_device_id es705_i2c_id[] = {
{ "es705", 0},
{ }
};
MODULE_DEVICE_TABLE(i2c, es705_i2c_id);
struct i2c_driver es705_i2c_driver = {
.driver = {
.name = "es705-codec",
.owner = THIS_MODULE,
},
.probe = es705_i2c_probe,
.remove = es705_i2c_remove,
.id_table = es705_i2c_id,
};
MODULE_DESCRIPTION("ASoC ES705 driver");
MODULE_AUTHOR("Greg Clemson <gclemson@audience.com>");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("platform:es705-codec");
| gpl-2.0 |
nowster/linux-ubnt-e200 | drivers/power/reset/xgene-reboot.c | 947 | 2938 | /*
* AppliedMicro X-Gene SoC Reboot Driver
*
* Copyright (c) 2013, Applied Micro Circuits Corporation
* Author: Feng Kan <fkan@apm.com>
* Author: Loc Ho <lho@apm.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 2 of
* the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston,
* MA 02111-1307 USA
*
* This driver provides system reboot functionality for APM X-Gene SoC.
* For system shutdown, this is board specify. If a board designer
* implements GPIO shutdown, use the gpio-poweroff.c driver.
*/
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/notifier.h>
#include <linux/of_device.h>
#include <linux/of_address.h>
#include <linux/platform_device.h>
#include <linux/reboot.h>
#include <linux/stat.h>
#include <linux/slab.h>
struct xgene_reboot_context {
struct device *dev;
void *csr;
u32 mask;
struct notifier_block restart_handler;
};
static int xgene_restart_handler(struct notifier_block *this,
unsigned long mode, void *cmd)
{
struct xgene_reboot_context *ctx =
container_of(this, struct xgene_reboot_context,
restart_handler);
/* Issue the reboot */
writel(ctx->mask, ctx->csr);
mdelay(1000);
dev_emerg(ctx->dev, "Unable to restart system\n");
return NOTIFY_DONE;
}
static int xgene_reboot_probe(struct platform_device *pdev)
{
struct xgene_reboot_context *ctx;
struct device *dev = &pdev->dev;
int err;
ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
if (!ctx)
return -ENOMEM;
ctx->csr = of_iomap(dev->of_node, 0);
if (!ctx->csr) {
dev_err(dev, "can not map resource\n");
return -ENODEV;
}
if (of_property_read_u32(dev->of_node, "mask", &ctx->mask))
ctx->mask = 0xFFFFFFFF;
ctx->dev = dev;
ctx->restart_handler.notifier_call = xgene_restart_handler;
ctx->restart_handler.priority = 128;
err = register_restart_handler(&ctx->restart_handler);
if (err)
dev_err(dev, "cannot register restart handler (err=%d)\n", err);
return err;
}
static const struct of_device_id xgene_reboot_of_match[] = {
{ .compatible = "apm,xgene-reboot" },
{}
};
static struct platform_driver xgene_reboot_driver = {
.probe = xgene_reboot_probe,
.driver = {
.name = "xgene-reboot",
.of_match_table = xgene_reboot_of_match,
},
};
static int __init xgene_reboot_init(void)
{
return platform_driver_register(&xgene_reboot_driver);
}
device_initcall(xgene_reboot_init);
| gpl-2.0 |
Hardslog/grimlock_kernel_asus_tegra3_unified | drivers/hwmon/adt7475.c | 3251 | 47807 | /*
* adt7475 - Thermal sensor driver for the ADT7475 chip and derivatives
* Copyright (C) 2007-2008, Advanced Micro Devices, Inc.
* Copyright (C) 2008 Jordan Crouse <jordan@cosmicpenguin.net>
* Copyright (C) 2008 Hans de Goede <hdegoede@redhat.com>
* Copyright (C) 2009 Jean Delvare <khali@linux-fr.org>
*
* Derived from the lm83 driver by Jean Delvare
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/i2c.h>
#include <linux/hwmon.h>
#include <linux/hwmon-sysfs.h>
#include <linux/hwmon-vid.h>
#include <linux/err.h>
/* Indexes for the sysfs hooks */
#define INPUT 0
#define MIN 1
#define MAX 2
#define CONTROL 3
#define OFFSET 3
#define AUTOMIN 4
#define THERM 5
#define HYSTERSIS 6
/* These are unique identifiers for the sysfs functions - unlike the
numbers above, these are not also indexes into an array
*/
#define ALARM 9
#define FAULT 10
/* 7475 Common Registers */
#define REG_DEVREV2 0x12 /* ADT7490 only */
#define REG_VTT 0x1E /* ADT7490 only */
#define REG_EXTEND3 0x1F /* ADT7490 only */
#define REG_VOLTAGE_BASE 0x20
#define REG_TEMP_BASE 0x25
#define REG_TACH_BASE 0x28
#define REG_PWM_BASE 0x30
#define REG_PWM_MAX_BASE 0x38
#define REG_DEVID 0x3D
#define REG_VENDID 0x3E
#define REG_DEVID2 0x3F
#define REG_STATUS1 0x41
#define REG_STATUS2 0x42
#define REG_VID 0x43 /* ADT7476 only */
#define REG_VOLTAGE_MIN_BASE 0x44
#define REG_VOLTAGE_MAX_BASE 0x45
#define REG_TEMP_MIN_BASE 0x4E
#define REG_TEMP_MAX_BASE 0x4F
#define REG_TACH_MIN_BASE 0x54
#define REG_PWM_CONFIG_BASE 0x5C
#define REG_TEMP_TRANGE_BASE 0x5F
#define REG_PWM_MIN_BASE 0x64
#define REG_TEMP_TMIN_BASE 0x67
#define REG_TEMP_THERM_BASE 0x6A
#define REG_REMOTE1_HYSTERSIS 0x6D
#define REG_REMOTE2_HYSTERSIS 0x6E
#define REG_TEMP_OFFSET_BASE 0x70
#define REG_CONFIG2 0x73
#define REG_EXTEND1 0x76
#define REG_EXTEND2 0x77
#define REG_CONFIG3 0x78
#define REG_CONFIG5 0x7C
#define REG_CONFIG4 0x7D
#define REG_STATUS4 0x81 /* ADT7490 only */
#define REG_VTT_MIN 0x84 /* ADT7490 only */
#define REG_VTT_MAX 0x86 /* ADT7490 only */
#define VID_VIDSEL 0x80 /* ADT7476 only */
#define CONFIG2_ATTN 0x20
#define CONFIG3_SMBALERT 0x01
#define CONFIG3_THERM 0x02
#define CONFIG4_PINFUNC 0x03
#define CONFIG4_MAXDUTY 0x08
#define CONFIG4_ATTN_IN10 0x30
#define CONFIG4_ATTN_IN43 0xC0
#define CONFIG5_TWOSCOMP 0x01
#define CONFIG5_TEMPOFFSET 0x02
#define CONFIG5_VIDGPIO 0x10 /* ADT7476 only */
/* ADT7475 Settings */
#define ADT7475_VOLTAGE_COUNT 5 /* Not counting Vtt */
#define ADT7475_TEMP_COUNT 3
#define ADT7475_TACH_COUNT 4
#define ADT7475_PWM_COUNT 3
/* Macro to read the registers */
#define adt7475_read(reg) i2c_smbus_read_byte_data(client, (reg))
/* Macros to easily index the registers */
#define TACH_REG(idx) (REG_TACH_BASE + ((idx) * 2))
#define TACH_MIN_REG(idx) (REG_TACH_MIN_BASE + ((idx) * 2))
#define PWM_REG(idx) (REG_PWM_BASE + (idx))
#define PWM_MAX_REG(idx) (REG_PWM_MAX_BASE + (idx))
#define PWM_MIN_REG(idx) (REG_PWM_MIN_BASE + (idx))
#define PWM_CONFIG_REG(idx) (REG_PWM_CONFIG_BASE + (idx))
#define VOLTAGE_REG(idx) (REG_VOLTAGE_BASE + (idx))
#define VOLTAGE_MIN_REG(idx) (REG_VOLTAGE_MIN_BASE + ((idx) * 2))
#define VOLTAGE_MAX_REG(idx) (REG_VOLTAGE_MAX_BASE + ((idx) * 2))
#define TEMP_REG(idx) (REG_TEMP_BASE + (idx))
#define TEMP_MIN_REG(idx) (REG_TEMP_MIN_BASE + ((idx) * 2))
#define TEMP_MAX_REG(idx) (REG_TEMP_MAX_BASE + ((idx) * 2))
#define TEMP_TMIN_REG(idx) (REG_TEMP_TMIN_BASE + (idx))
#define TEMP_THERM_REG(idx) (REG_TEMP_THERM_BASE + (idx))
#define TEMP_OFFSET_REG(idx) (REG_TEMP_OFFSET_BASE + (idx))
#define TEMP_TRANGE_REG(idx) (REG_TEMP_TRANGE_BASE + (idx))
static const unsigned short normal_i2c[] = { 0x2c, 0x2d, 0x2e, I2C_CLIENT_END };
enum chips { adt7473, adt7475, adt7476, adt7490 };
static const struct i2c_device_id adt7475_id[] = {
{ "adt7473", adt7473 },
{ "adt7475", adt7475 },
{ "adt7476", adt7476 },
{ "adt7490", adt7490 },
{ }
};
MODULE_DEVICE_TABLE(i2c, adt7475_id);
struct adt7475_data {
struct device *hwmon_dev;
struct mutex lock;
unsigned long measure_updated;
unsigned long limits_updated;
char valid;
u8 config4;
u8 config5;
u8 has_voltage;
u8 bypass_attn; /* Bypass voltage attenuator */
u8 has_pwm2:1;
u8 has_fan4:1;
u8 has_vid:1;
u32 alarms;
u16 voltage[3][6];
u16 temp[7][3];
u16 tach[2][4];
u8 pwm[4][3];
u8 range[3];
u8 pwmctl[3];
u8 pwmchan[3];
u8 vid;
u8 vrm;
};
static struct i2c_driver adt7475_driver;
static struct adt7475_data *adt7475_update_device(struct device *dev);
static void adt7475_read_hystersis(struct i2c_client *client);
static void adt7475_read_pwm(struct i2c_client *client, int index);
/* Given a temp value, convert it to register value */
static inline u16 temp2reg(struct adt7475_data *data, long val)
{
u16 ret;
if (!(data->config5 & CONFIG5_TWOSCOMP)) {
val = SENSORS_LIMIT(val, -64000, 191000);
ret = (val + 64500) / 1000;
} else {
val = SENSORS_LIMIT(val, -128000, 127000);
if (val < -500)
ret = (256500 + val) / 1000;
else
ret = (val + 500) / 1000;
}
return ret << 2;
}
/* Given a register value, convert it to a real temp value */
static inline int reg2temp(struct adt7475_data *data, u16 reg)
{
if (data->config5 & CONFIG5_TWOSCOMP) {
if (reg >= 512)
return (reg - 1024) * 250;
else
return reg * 250;
} else
return (reg - 256) * 250;
}
static inline int tach2rpm(u16 tach)
{
if (tach == 0 || tach == 0xFFFF)
return 0;
return (90000 * 60) / tach;
}
static inline u16 rpm2tach(unsigned long rpm)
{
if (rpm == 0)
return 0;
return SENSORS_LIMIT((90000 * 60) / rpm, 1, 0xFFFF);
}
/* Scaling factors for voltage inputs, taken from the ADT7490 datasheet */
static const int adt7473_in_scaling[ADT7475_VOLTAGE_COUNT + 1][2] = {
{ 45, 94 }, /* +2.5V */
{ 175, 525 }, /* Vccp */
{ 68, 71 }, /* Vcc */
{ 93, 47 }, /* +5V */
{ 120, 20 }, /* +12V */
{ 45, 45 }, /* Vtt */
};
static inline int reg2volt(int channel, u16 reg, u8 bypass_attn)
{
const int *r = adt7473_in_scaling[channel];
if (bypass_attn & (1 << channel))
return DIV_ROUND_CLOSEST(reg * 2250, 1024);
return DIV_ROUND_CLOSEST(reg * (r[0] + r[1]) * 2250, r[1] * 1024);
}
static inline u16 volt2reg(int channel, long volt, u8 bypass_attn)
{
const int *r = adt7473_in_scaling[channel];
long reg;
if (bypass_attn & (1 << channel))
reg = (volt * 1024) / 2250;
else
reg = (volt * r[1] * 1024) / ((r[0] + r[1]) * 2250);
return SENSORS_LIMIT(reg, 0, 1023) & (0xff << 2);
}
static u16 adt7475_read_word(struct i2c_client *client, int reg)
{
u16 val;
val = i2c_smbus_read_byte_data(client, reg);
val |= (i2c_smbus_read_byte_data(client, reg + 1) << 8);
return val;
}
static void adt7475_write_word(struct i2c_client *client, int reg, u16 val)
{
i2c_smbus_write_byte_data(client, reg + 1, val >> 8);
i2c_smbus_write_byte_data(client, reg, val & 0xFF);
}
/* Find the nearest value in a table - used for pwm frequency and
auto temp range */
static int find_nearest(long val, const int *array, int size)
{
int i;
if (val < array[0])
return 0;
if (val > array[size - 1])
return size - 1;
for (i = 0; i < size - 1; i++) {
int a, b;
if (val > array[i + 1])
continue;
a = val - array[i];
b = array[i + 1] - val;
return (a <= b) ? i : i + 1;
}
return 0;
}
static ssize_t show_voltage(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct adt7475_data *data = adt7475_update_device(dev);
struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr);
unsigned short val;
switch (sattr->nr) {
case ALARM:
return sprintf(buf, "%d\n",
(data->alarms >> sattr->index) & 1);
default:
val = data->voltage[sattr->nr][sattr->index];
return sprintf(buf, "%d\n",
reg2volt(sattr->index, val, data->bypass_attn));
}
}
static ssize_t set_voltage(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr);
struct i2c_client *client = to_i2c_client(dev);
struct adt7475_data *data = i2c_get_clientdata(client);
unsigned char reg;
long val;
if (strict_strtol(buf, 10, &val))
return -EINVAL;
mutex_lock(&data->lock);
data->voltage[sattr->nr][sattr->index] =
volt2reg(sattr->index, val, data->bypass_attn);
if (sattr->index < ADT7475_VOLTAGE_COUNT) {
if (sattr->nr == MIN)
reg = VOLTAGE_MIN_REG(sattr->index);
else
reg = VOLTAGE_MAX_REG(sattr->index);
} else {
if (sattr->nr == MIN)
reg = REG_VTT_MIN;
else
reg = REG_VTT_MAX;
}
i2c_smbus_write_byte_data(client, reg,
data->voltage[sattr->nr][sattr->index] >> 2);
mutex_unlock(&data->lock);
return count;
}
static ssize_t show_temp(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct adt7475_data *data = adt7475_update_device(dev);
struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr);
int out;
switch (sattr->nr) {
case HYSTERSIS:
mutex_lock(&data->lock);
out = data->temp[sattr->nr][sattr->index];
if (sattr->index != 1)
out = (out >> 4) & 0xF;
else
out = (out & 0xF);
/* Show the value as an absolute number tied to
* THERM */
out = reg2temp(data, data->temp[THERM][sattr->index]) -
out * 1000;
mutex_unlock(&data->lock);
break;
case OFFSET:
/* Offset is always 2's complement, regardless of the
* setting in CONFIG5 */
mutex_lock(&data->lock);
out = (s8)data->temp[sattr->nr][sattr->index];
if (data->config5 & CONFIG5_TEMPOFFSET)
out *= 1000;
else
out *= 500;
mutex_unlock(&data->lock);
break;
case ALARM:
out = (data->alarms >> (sattr->index + 4)) & 1;
break;
case FAULT:
/* Note - only for remote1 and remote2 */
out = !!(data->alarms & (sattr->index ? 0x8000 : 0x4000));
break;
default:
/* All other temp values are in the configured format */
out = reg2temp(data, data->temp[sattr->nr][sattr->index]);
}
return sprintf(buf, "%d\n", out);
}
static ssize_t set_temp(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr);
struct i2c_client *client = to_i2c_client(dev);
struct adt7475_data *data = i2c_get_clientdata(client);
unsigned char reg = 0;
u8 out;
int temp;
long val;
if (strict_strtol(buf, 10, &val))
return -EINVAL;
mutex_lock(&data->lock);
/* We need the config register in all cases for temp <-> reg conv. */
data->config5 = adt7475_read(REG_CONFIG5);
switch (sattr->nr) {
case OFFSET:
if (data->config5 & CONFIG5_TEMPOFFSET) {
val = SENSORS_LIMIT(val, -63000, 127000);
out = data->temp[OFFSET][sattr->index] = val / 1000;
} else {
val = SENSORS_LIMIT(val, -63000, 64000);
out = data->temp[OFFSET][sattr->index] = val / 500;
}
break;
case HYSTERSIS:
/* The value will be given as an absolute value, turn it
into an offset based on THERM */
/* Read fresh THERM and HYSTERSIS values from the chip */
data->temp[THERM][sattr->index] =
adt7475_read(TEMP_THERM_REG(sattr->index)) << 2;
adt7475_read_hystersis(client);
temp = reg2temp(data, data->temp[THERM][sattr->index]);
val = SENSORS_LIMIT(val, temp - 15000, temp);
val = (temp - val) / 1000;
if (sattr->index != 1) {
data->temp[HYSTERSIS][sattr->index] &= 0xF0;
data->temp[HYSTERSIS][sattr->index] |= (val & 0xF) << 4;
} else {
data->temp[HYSTERSIS][sattr->index] &= 0x0F;
data->temp[HYSTERSIS][sattr->index] |= (val & 0xF);
}
out = data->temp[HYSTERSIS][sattr->index];
break;
default:
data->temp[sattr->nr][sattr->index] = temp2reg(data, val);
/* We maintain an extra 2 digits of precision for simplicity
* - shift those back off before writing the value */
out = (u8) (data->temp[sattr->nr][sattr->index] >> 2);
}
switch (sattr->nr) {
case MIN:
reg = TEMP_MIN_REG(sattr->index);
break;
case MAX:
reg = TEMP_MAX_REG(sattr->index);
break;
case OFFSET:
reg = TEMP_OFFSET_REG(sattr->index);
break;
case AUTOMIN:
reg = TEMP_TMIN_REG(sattr->index);
break;
case THERM:
reg = TEMP_THERM_REG(sattr->index);
break;
case HYSTERSIS:
if (sattr->index != 2)
reg = REG_REMOTE1_HYSTERSIS;
else
reg = REG_REMOTE2_HYSTERSIS;
break;
}
i2c_smbus_write_byte_data(client, reg, out);
mutex_unlock(&data->lock);
return count;
}
/* Table of autorange values - the user will write the value in millidegrees,
and we'll convert it */
static const int autorange_table[] = {
2000, 2500, 3330, 4000, 5000, 6670, 8000,
10000, 13330, 16000, 20000, 26670, 32000, 40000,
53330, 80000
};
static ssize_t show_point2(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct adt7475_data *data = adt7475_update_device(dev);
struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr);
int out, val;
mutex_lock(&data->lock);
out = (data->range[sattr->index] >> 4) & 0x0F;
val = reg2temp(data, data->temp[AUTOMIN][sattr->index]);
mutex_unlock(&data->lock);
return sprintf(buf, "%d\n", val + autorange_table[out]);
}
static ssize_t set_point2(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct i2c_client *client = to_i2c_client(dev);
struct adt7475_data *data = i2c_get_clientdata(client);
struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr);
int temp;
long val;
if (strict_strtol(buf, 10, &val))
return -EINVAL;
mutex_lock(&data->lock);
/* Get a fresh copy of the needed registers */
data->config5 = adt7475_read(REG_CONFIG5);
data->temp[AUTOMIN][sattr->index] =
adt7475_read(TEMP_TMIN_REG(sattr->index)) << 2;
data->range[sattr->index] =
adt7475_read(TEMP_TRANGE_REG(sattr->index));
/* The user will write an absolute value, so subtract the start point
to figure the range */
temp = reg2temp(data, data->temp[AUTOMIN][sattr->index]);
val = SENSORS_LIMIT(val, temp + autorange_table[0],
temp + autorange_table[ARRAY_SIZE(autorange_table) - 1]);
val -= temp;
/* Find the nearest table entry to what the user wrote */
val = find_nearest(val, autorange_table, ARRAY_SIZE(autorange_table));
data->range[sattr->index] &= ~0xF0;
data->range[sattr->index] |= val << 4;
i2c_smbus_write_byte_data(client, TEMP_TRANGE_REG(sattr->index),
data->range[sattr->index]);
mutex_unlock(&data->lock);
return count;
}
static ssize_t show_tach(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct adt7475_data *data = adt7475_update_device(dev);
struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr);
int out;
if (sattr->nr == ALARM)
out = (data->alarms >> (sattr->index + 10)) & 1;
else
out = tach2rpm(data->tach[sattr->nr][sattr->index]);
return sprintf(buf, "%d\n", out);
}
static ssize_t set_tach(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr);
struct i2c_client *client = to_i2c_client(dev);
struct adt7475_data *data = i2c_get_clientdata(client);
unsigned long val;
if (strict_strtoul(buf, 10, &val))
return -EINVAL;
mutex_lock(&data->lock);
data->tach[MIN][sattr->index] = rpm2tach(val);
adt7475_write_word(client, TACH_MIN_REG(sattr->index),
data->tach[MIN][sattr->index]);
mutex_unlock(&data->lock);
return count;
}
static ssize_t show_pwm(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct adt7475_data *data = adt7475_update_device(dev);
struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr);
return sprintf(buf, "%d\n", data->pwm[sattr->nr][sattr->index]);
}
static ssize_t show_pwmchan(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct adt7475_data *data = adt7475_update_device(dev);
struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr);
return sprintf(buf, "%d\n", data->pwmchan[sattr->index]);
}
static ssize_t show_pwmctrl(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct adt7475_data *data = adt7475_update_device(dev);
struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr);
return sprintf(buf, "%d\n", data->pwmctl[sattr->index]);
}
static ssize_t set_pwm(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr);
struct i2c_client *client = to_i2c_client(dev);
struct adt7475_data *data = i2c_get_clientdata(client);
unsigned char reg = 0;
long val;
if (strict_strtol(buf, 10, &val))
return -EINVAL;
mutex_lock(&data->lock);
switch (sattr->nr) {
case INPUT:
/* Get a fresh value for CONTROL */
data->pwm[CONTROL][sattr->index] =
adt7475_read(PWM_CONFIG_REG(sattr->index));
/* If we are not in manual mode, then we shouldn't allow
* the user to set the pwm speed */
if (((data->pwm[CONTROL][sattr->index] >> 5) & 7) != 7) {
mutex_unlock(&data->lock);
return count;
}
reg = PWM_REG(sattr->index);
break;
case MIN:
reg = PWM_MIN_REG(sattr->index);
break;
case MAX:
reg = PWM_MAX_REG(sattr->index);
break;
}
data->pwm[sattr->nr][sattr->index] = SENSORS_LIMIT(val, 0, 0xFF);
i2c_smbus_write_byte_data(client, reg,
data->pwm[sattr->nr][sattr->index]);
mutex_unlock(&data->lock);
return count;
}
/* Called by set_pwmctrl and set_pwmchan */
static int hw_set_pwm(struct i2c_client *client, int index,
unsigned int pwmctl, unsigned int pwmchan)
{
struct adt7475_data *data = i2c_get_clientdata(client);
long val = 0;
switch (pwmctl) {
case 0:
val = 0x03; /* Run at full speed */
break;
case 1:
val = 0x07; /* Manual mode */
break;
case 2:
switch (pwmchan) {
case 1:
/* Remote1 controls PWM */
val = 0x00;
break;
case 2:
/* local controls PWM */
val = 0x01;
break;
case 4:
/* remote2 controls PWM */
val = 0x02;
break;
case 6:
/* local/remote2 control PWM */
val = 0x05;
break;
case 7:
/* All three control PWM */
val = 0x06;
break;
default:
return -EINVAL;
}
break;
default:
return -EINVAL;
}
data->pwmctl[index] = pwmctl;
data->pwmchan[index] = pwmchan;
data->pwm[CONTROL][index] &= ~0xE0;
data->pwm[CONTROL][index] |= (val & 7) << 5;
i2c_smbus_write_byte_data(client, PWM_CONFIG_REG(index),
data->pwm[CONTROL][index]);
return 0;
}
static ssize_t set_pwmchan(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr);
struct i2c_client *client = to_i2c_client(dev);
struct adt7475_data *data = i2c_get_clientdata(client);
int r;
long val;
if (strict_strtol(buf, 10, &val))
return -EINVAL;
mutex_lock(&data->lock);
/* Read Modify Write PWM values */
adt7475_read_pwm(client, sattr->index);
r = hw_set_pwm(client, sattr->index, data->pwmctl[sattr->index], val);
if (r)
count = r;
mutex_unlock(&data->lock);
return count;
}
static ssize_t set_pwmctrl(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr);
struct i2c_client *client = to_i2c_client(dev);
struct adt7475_data *data = i2c_get_clientdata(client);
int r;
long val;
if (strict_strtol(buf, 10, &val))
return -EINVAL;
mutex_lock(&data->lock);
/* Read Modify Write PWM values */
adt7475_read_pwm(client, sattr->index);
r = hw_set_pwm(client, sattr->index, val, data->pwmchan[sattr->index]);
if (r)
count = r;
mutex_unlock(&data->lock);
return count;
}
/* List of frequencies for the PWM */
static const int pwmfreq_table[] = {
11, 14, 22, 29, 35, 44, 58, 88
};
static ssize_t show_pwmfreq(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct adt7475_data *data = adt7475_update_device(dev);
struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr);
return sprintf(buf, "%d\n",
pwmfreq_table[data->range[sattr->index] & 7]);
}
static ssize_t set_pwmfreq(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr);
struct i2c_client *client = to_i2c_client(dev);
struct adt7475_data *data = i2c_get_clientdata(client);
int out;
long val;
if (strict_strtol(buf, 10, &val))
return -EINVAL;
out = find_nearest(val, pwmfreq_table, ARRAY_SIZE(pwmfreq_table));
mutex_lock(&data->lock);
data->range[sattr->index] =
adt7475_read(TEMP_TRANGE_REG(sattr->index));
data->range[sattr->index] &= ~7;
data->range[sattr->index] |= out;
i2c_smbus_write_byte_data(client, TEMP_TRANGE_REG(sattr->index),
data->range[sattr->index]);
mutex_unlock(&data->lock);
return count;
}
static ssize_t show_pwm_at_crit(struct device *dev,
struct device_attribute *devattr, char *buf)
{
struct adt7475_data *data = adt7475_update_device(dev);
return sprintf(buf, "%d\n", !!(data->config4 & CONFIG4_MAXDUTY));
}
static ssize_t set_pwm_at_crit(struct device *dev,
struct device_attribute *devattr,
const char *buf, size_t count)
{
struct i2c_client *client = to_i2c_client(dev);
struct adt7475_data *data = i2c_get_clientdata(client);
long val;
if (strict_strtol(buf, 10, &val))
return -EINVAL;
if (val != 0 && val != 1)
return -EINVAL;
mutex_lock(&data->lock);
data->config4 = i2c_smbus_read_byte_data(client, REG_CONFIG4);
if (val)
data->config4 |= CONFIG4_MAXDUTY;
else
data->config4 &= ~CONFIG4_MAXDUTY;
i2c_smbus_write_byte_data(client, REG_CONFIG4, data->config4);
mutex_unlock(&data->lock);
return count;
}
static ssize_t show_vrm(struct device *dev, struct device_attribute *devattr,
char *buf)
{
struct adt7475_data *data = dev_get_drvdata(dev);
return sprintf(buf, "%d\n", (int)data->vrm);
}
static ssize_t set_vrm(struct device *dev, struct device_attribute *devattr,
const char *buf, size_t count)
{
struct adt7475_data *data = dev_get_drvdata(dev);
long val;
if (strict_strtol(buf, 10, &val))
return -EINVAL;
if (val < 0 || val > 255)
return -EINVAL;
data->vrm = val;
return count;
}
static ssize_t show_vid(struct device *dev, struct device_attribute *devattr,
char *buf)
{
struct adt7475_data *data = adt7475_update_device(dev);
return sprintf(buf, "%d\n", vid_from_reg(data->vid, data->vrm));
}
static SENSOR_DEVICE_ATTR_2(in0_input, S_IRUGO, show_voltage, NULL, INPUT, 0);
static SENSOR_DEVICE_ATTR_2(in0_max, S_IRUGO | S_IWUSR, show_voltage,
set_voltage, MAX, 0);
static SENSOR_DEVICE_ATTR_2(in0_min, S_IRUGO | S_IWUSR, show_voltage,
set_voltage, MIN, 0);
static SENSOR_DEVICE_ATTR_2(in0_alarm, S_IRUGO, show_voltage, NULL, ALARM, 0);
static SENSOR_DEVICE_ATTR_2(in1_input, S_IRUGO, show_voltage, NULL, INPUT, 1);
static SENSOR_DEVICE_ATTR_2(in1_max, S_IRUGO | S_IWUSR, show_voltage,
set_voltage, MAX, 1);
static SENSOR_DEVICE_ATTR_2(in1_min, S_IRUGO | S_IWUSR, show_voltage,
set_voltage, MIN, 1);
static SENSOR_DEVICE_ATTR_2(in1_alarm, S_IRUGO, show_voltage, NULL, ALARM, 1);
static SENSOR_DEVICE_ATTR_2(in2_input, S_IRUGO, show_voltage, NULL, INPUT, 2);
static SENSOR_DEVICE_ATTR_2(in2_max, S_IRUGO | S_IWUSR, show_voltage,
set_voltage, MAX, 2);
static SENSOR_DEVICE_ATTR_2(in2_min, S_IRUGO | S_IWUSR, show_voltage,
set_voltage, MIN, 2);
static SENSOR_DEVICE_ATTR_2(in2_alarm, S_IRUGO, show_voltage, NULL, ALARM, 2);
static SENSOR_DEVICE_ATTR_2(in3_input, S_IRUGO, show_voltage, NULL, INPUT, 3);
static SENSOR_DEVICE_ATTR_2(in3_max, S_IRUGO | S_IWUSR, show_voltage,
set_voltage, MAX, 3);
static SENSOR_DEVICE_ATTR_2(in3_min, S_IRUGO | S_IWUSR, show_voltage,
set_voltage, MIN, 3);
static SENSOR_DEVICE_ATTR_2(in3_alarm, S_IRUGO, show_voltage, NULL, ALARM, 3);
static SENSOR_DEVICE_ATTR_2(in4_input, S_IRUGO, show_voltage, NULL, INPUT, 4);
static SENSOR_DEVICE_ATTR_2(in4_max, S_IRUGO | S_IWUSR, show_voltage,
set_voltage, MAX, 4);
static SENSOR_DEVICE_ATTR_2(in4_min, S_IRUGO | S_IWUSR, show_voltage,
set_voltage, MIN, 4);
static SENSOR_DEVICE_ATTR_2(in4_alarm, S_IRUGO, show_voltage, NULL, ALARM, 8);
static SENSOR_DEVICE_ATTR_2(in5_input, S_IRUGO, show_voltage, NULL, INPUT, 5);
static SENSOR_DEVICE_ATTR_2(in5_max, S_IRUGO | S_IWUSR, show_voltage,
set_voltage, MAX, 5);
static SENSOR_DEVICE_ATTR_2(in5_min, S_IRUGO | S_IWUSR, show_voltage,
set_voltage, MIN, 5);
static SENSOR_DEVICE_ATTR_2(in5_alarm, S_IRUGO, show_voltage, NULL, ALARM, 31);
static SENSOR_DEVICE_ATTR_2(temp1_input, S_IRUGO, show_temp, NULL, INPUT, 0);
static SENSOR_DEVICE_ATTR_2(temp1_alarm, S_IRUGO, show_temp, NULL, ALARM, 0);
static SENSOR_DEVICE_ATTR_2(temp1_fault, S_IRUGO, show_temp, NULL, FAULT, 0);
static SENSOR_DEVICE_ATTR_2(temp1_max, S_IRUGO | S_IWUSR, show_temp, set_temp,
MAX, 0);
static SENSOR_DEVICE_ATTR_2(temp1_min, S_IRUGO | S_IWUSR, show_temp, set_temp,
MIN, 0);
static SENSOR_DEVICE_ATTR_2(temp1_offset, S_IRUGO | S_IWUSR, show_temp,
set_temp, OFFSET, 0);
static SENSOR_DEVICE_ATTR_2(temp1_auto_point1_temp, S_IRUGO | S_IWUSR,
show_temp, set_temp, AUTOMIN, 0);
static SENSOR_DEVICE_ATTR_2(temp1_auto_point2_temp, S_IRUGO | S_IWUSR,
show_point2, set_point2, 0, 0);
static SENSOR_DEVICE_ATTR_2(temp1_crit, S_IRUGO | S_IWUSR, show_temp, set_temp,
THERM, 0);
static SENSOR_DEVICE_ATTR_2(temp1_crit_hyst, S_IRUGO | S_IWUSR, show_temp,
set_temp, HYSTERSIS, 0);
static SENSOR_DEVICE_ATTR_2(temp2_input, S_IRUGO, show_temp, NULL, INPUT, 1);
static SENSOR_DEVICE_ATTR_2(temp2_alarm, S_IRUGO, show_temp, NULL, ALARM, 1);
static SENSOR_DEVICE_ATTR_2(temp2_max, S_IRUGO | S_IWUSR, show_temp, set_temp,
MAX, 1);
static SENSOR_DEVICE_ATTR_2(temp2_min, S_IRUGO | S_IWUSR, show_temp, set_temp,
MIN, 1);
static SENSOR_DEVICE_ATTR_2(temp2_offset, S_IRUGO | S_IWUSR, show_temp,
set_temp, OFFSET, 1);
static SENSOR_DEVICE_ATTR_2(temp2_auto_point1_temp, S_IRUGO | S_IWUSR,
show_temp, set_temp, AUTOMIN, 1);
static SENSOR_DEVICE_ATTR_2(temp2_auto_point2_temp, S_IRUGO | S_IWUSR,
show_point2, set_point2, 0, 1);
static SENSOR_DEVICE_ATTR_2(temp2_crit, S_IRUGO | S_IWUSR, show_temp, set_temp,
THERM, 1);
static SENSOR_DEVICE_ATTR_2(temp2_crit_hyst, S_IRUGO | S_IWUSR, show_temp,
set_temp, HYSTERSIS, 1);
static SENSOR_DEVICE_ATTR_2(temp3_input, S_IRUGO, show_temp, NULL, INPUT, 2);
static SENSOR_DEVICE_ATTR_2(temp3_alarm, S_IRUGO, show_temp, NULL, ALARM, 2);
static SENSOR_DEVICE_ATTR_2(temp3_fault, S_IRUGO, show_temp, NULL, FAULT, 2);
static SENSOR_DEVICE_ATTR_2(temp3_max, S_IRUGO | S_IWUSR, show_temp, set_temp,
MAX, 2);
static SENSOR_DEVICE_ATTR_2(temp3_min, S_IRUGO | S_IWUSR, show_temp, set_temp,
MIN, 2);
static SENSOR_DEVICE_ATTR_2(temp3_offset, S_IRUGO | S_IWUSR, show_temp,
set_temp, OFFSET, 2);
static SENSOR_DEVICE_ATTR_2(temp3_auto_point1_temp, S_IRUGO | S_IWUSR,
show_temp, set_temp, AUTOMIN, 2);
static SENSOR_DEVICE_ATTR_2(temp3_auto_point2_temp, S_IRUGO | S_IWUSR,
show_point2, set_point2, 0, 2);
static SENSOR_DEVICE_ATTR_2(temp3_crit, S_IRUGO | S_IWUSR, show_temp, set_temp,
THERM, 2);
static SENSOR_DEVICE_ATTR_2(temp3_crit_hyst, S_IRUGO | S_IWUSR, show_temp,
set_temp, HYSTERSIS, 2);
static SENSOR_DEVICE_ATTR_2(fan1_input, S_IRUGO, show_tach, NULL, INPUT, 0);
static SENSOR_DEVICE_ATTR_2(fan1_min, S_IRUGO | S_IWUSR, show_tach, set_tach,
MIN, 0);
static SENSOR_DEVICE_ATTR_2(fan1_alarm, S_IRUGO, show_tach, NULL, ALARM, 0);
static SENSOR_DEVICE_ATTR_2(fan2_input, S_IRUGO, show_tach, NULL, INPUT, 1);
static SENSOR_DEVICE_ATTR_2(fan2_min, S_IRUGO | S_IWUSR, show_tach, set_tach,
MIN, 1);
static SENSOR_DEVICE_ATTR_2(fan2_alarm, S_IRUGO, show_tach, NULL, ALARM, 1);
static SENSOR_DEVICE_ATTR_2(fan3_input, S_IRUGO, show_tach, NULL, INPUT, 2);
static SENSOR_DEVICE_ATTR_2(fan3_min, S_IRUGO | S_IWUSR, show_tach, set_tach,
MIN, 2);
static SENSOR_DEVICE_ATTR_2(fan3_alarm, S_IRUGO, show_tach, NULL, ALARM, 2);
static SENSOR_DEVICE_ATTR_2(fan4_input, S_IRUGO, show_tach, NULL, INPUT, 3);
static SENSOR_DEVICE_ATTR_2(fan4_min, S_IRUGO | S_IWUSR, show_tach, set_tach,
MIN, 3);
static SENSOR_DEVICE_ATTR_2(fan4_alarm, S_IRUGO, show_tach, NULL, ALARM, 3);
static SENSOR_DEVICE_ATTR_2(pwm1, S_IRUGO | S_IWUSR, show_pwm, set_pwm, INPUT,
0);
static SENSOR_DEVICE_ATTR_2(pwm1_freq, S_IRUGO | S_IWUSR, show_pwmfreq,
set_pwmfreq, INPUT, 0);
static SENSOR_DEVICE_ATTR_2(pwm1_enable, S_IRUGO | S_IWUSR, show_pwmctrl,
set_pwmctrl, INPUT, 0);
static SENSOR_DEVICE_ATTR_2(pwm1_auto_channels_temp, S_IRUGO | S_IWUSR,
show_pwmchan, set_pwmchan, INPUT, 0);
static SENSOR_DEVICE_ATTR_2(pwm1_auto_point1_pwm, S_IRUGO | S_IWUSR, show_pwm,
set_pwm, MIN, 0);
static SENSOR_DEVICE_ATTR_2(pwm1_auto_point2_pwm, S_IRUGO | S_IWUSR, show_pwm,
set_pwm, MAX, 0);
static SENSOR_DEVICE_ATTR_2(pwm2, S_IRUGO | S_IWUSR, show_pwm, set_pwm, INPUT,
1);
static SENSOR_DEVICE_ATTR_2(pwm2_freq, S_IRUGO | S_IWUSR, show_pwmfreq,
set_pwmfreq, INPUT, 1);
static SENSOR_DEVICE_ATTR_2(pwm2_enable, S_IRUGO | S_IWUSR, show_pwmctrl,
set_pwmctrl, INPUT, 1);
static SENSOR_DEVICE_ATTR_2(pwm2_auto_channels_temp, S_IRUGO | S_IWUSR,
show_pwmchan, set_pwmchan, INPUT, 1);
static SENSOR_DEVICE_ATTR_2(pwm2_auto_point1_pwm, S_IRUGO | S_IWUSR, show_pwm,
set_pwm, MIN, 1);
static SENSOR_DEVICE_ATTR_2(pwm2_auto_point2_pwm, S_IRUGO | S_IWUSR, show_pwm,
set_pwm, MAX, 1);
static SENSOR_DEVICE_ATTR_2(pwm3, S_IRUGO | S_IWUSR, show_pwm, set_pwm, INPUT,
2);
static SENSOR_DEVICE_ATTR_2(pwm3_freq, S_IRUGO | S_IWUSR, show_pwmfreq,
set_pwmfreq, INPUT, 2);
static SENSOR_DEVICE_ATTR_2(pwm3_enable, S_IRUGO | S_IWUSR, show_pwmctrl,
set_pwmctrl, INPUT, 2);
static SENSOR_DEVICE_ATTR_2(pwm3_auto_channels_temp, S_IRUGO | S_IWUSR,
show_pwmchan, set_pwmchan, INPUT, 2);
static SENSOR_DEVICE_ATTR_2(pwm3_auto_point1_pwm, S_IRUGO | S_IWUSR, show_pwm,
set_pwm, MIN, 2);
static SENSOR_DEVICE_ATTR_2(pwm3_auto_point2_pwm, S_IRUGO | S_IWUSR, show_pwm,
set_pwm, MAX, 2);
/* Non-standard name, might need revisiting */
static DEVICE_ATTR(pwm_use_point2_pwm_at_crit, S_IWUSR | S_IRUGO,
show_pwm_at_crit, set_pwm_at_crit);
static DEVICE_ATTR(vrm, S_IWUSR | S_IRUGO, show_vrm, set_vrm);
static DEVICE_ATTR(cpu0_vid, S_IRUGO, show_vid, NULL);
static struct attribute *adt7475_attrs[] = {
&sensor_dev_attr_in1_input.dev_attr.attr,
&sensor_dev_attr_in1_max.dev_attr.attr,
&sensor_dev_attr_in1_min.dev_attr.attr,
&sensor_dev_attr_in1_alarm.dev_attr.attr,
&sensor_dev_attr_in2_input.dev_attr.attr,
&sensor_dev_attr_in2_max.dev_attr.attr,
&sensor_dev_attr_in2_min.dev_attr.attr,
&sensor_dev_attr_in2_alarm.dev_attr.attr,
&sensor_dev_attr_temp1_input.dev_attr.attr,
&sensor_dev_attr_temp1_alarm.dev_attr.attr,
&sensor_dev_attr_temp1_fault.dev_attr.attr,
&sensor_dev_attr_temp1_max.dev_attr.attr,
&sensor_dev_attr_temp1_min.dev_attr.attr,
&sensor_dev_attr_temp1_offset.dev_attr.attr,
&sensor_dev_attr_temp1_auto_point1_temp.dev_attr.attr,
&sensor_dev_attr_temp1_auto_point2_temp.dev_attr.attr,
&sensor_dev_attr_temp1_crit.dev_attr.attr,
&sensor_dev_attr_temp1_crit_hyst.dev_attr.attr,
&sensor_dev_attr_temp2_input.dev_attr.attr,
&sensor_dev_attr_temp2_alarm.dev_attr.attr,
&sensor_dev_attr_temp2_max.dev_attr.attr,
&sensor_dev_attr_temp2_min.dev_attr.attr,
&sensor_dev_attr_temp2_offset.dev_attr.attr,
&sensor_dev_attr_temp2_auto_point1_temp.dev_attr.attr,
&sensor_dev_attr_temp2_auto_point2_temp.dev_attr.attr,
&sensor_dev_attr_temp2_crit.dev_attr.attr,
&sensor_dev_attr_temp2_crit_hyst.dev_attr.attr,
&sensor_dev_attr_temp3_input.dev_attr.attr,
&sensor_dev_attr_temp3_fault.dev_attr.attr,
&sensor_dev_attr_temp3_alarm.dev_attr.attr,
&sensor_dev_attr_temp3_max.dev_attr.attr,
&sensor_dev_attr_temp3_min.dev_attr.attr,
&sensor_dev_attr_temp3_offset.dev_attr.attr,
&sensor_dev_attr_temp3_auto_point1_temp.dev_attr.attr,
&sensor_dev_attr_temp3_auto_point2_temp.dev_attr.attr,
&sensor_dev_attr_temp3_crit.dev_attr.attr,
&sensor_dev_attr_temp3_crit_hyst.dev_attr.attr,
&sensor_dev_attr_fan1_input.dev_attr.attr,
&sensor_dev_attr_fan1_min.dev_attr.attr,
&sensor_dev_attr_fan1_alarm.dev_attr.attr,
&sensor_dev_attr_fan2_input.dev_attr.attr,
&sensor_dev_attr_fan2_min.dev_attr.attr,
&sensor_dev_attr_fan2_alarm.dev_attr.attr,
&sensor_dev_attr_fan3_input.dev_attr.attr,
&sensor_dev_attr_fan3_min.dev_attr.attr,
&sensor_dev_attr_fan3_alarm.dev_attr.attr,
&sensor_dev_attr_pwm1.dev_attr.attr,
&sensor_dev_attr_pwm1_freq.dev_attr.attr,
&sensor_dev_attr_pwm1_enable.dev_attr.attr,
&sensor_dev_attr_pwm1_auto_channels_temp.dev_attr.attr,
&sensor_dev_attr_pwm1_auto_point1_pwm.dev_attr.attr,
&sensor_dev_attr_pwm1_auto_point2_pwm.dev_attr.attr,
&sensor_dev_attr_pwm3.dev_attr.attr,
&sensor_dev_attr_pwm3_freq.dev_attr.attr,
&sensor_dev_attr_pwm3_enable.dev_attr.attr,
&sensor_dev_attr_pwm3_auto_channels_temp.dev_attr.attr,
&sensor_dev_attr_pwm3_auto_point1_pwm.dev_attr.attr,
&sensor_dev_attr_pwm3_auto_point2_pwm.dev_attr.attr,
&dev_attr_pwm_use_point2_pwm_at_crit.attr,
NULL,
};
static struct attribute *fan4_attrs[] = {
&sensor_dev_attr_fan4_input.dev_attr.attr,
&sensor_dev_attr_fan4_min.dev_attr.attr,
&sensor_dev_attr_fan4_alarm.dev_attr.attr,
NULL
};
static struct attribute *pwm2_attrs[] = {
&sensor_dev_attr_pwm2.dev_attr.attr,
&sensor_dev_attr_pwm2_freq.dev_attr.attr,
&sensor_dev_attr_pwm2_enable.dev_attr.attr,
&sensor_dev_attr_pwm2_auto_channels_temp.dev_attr.attr,
&sensor_dev_attr_pwm2_auto_point1_pwm.dev_attr.attr,
&sensor_dev_attr_pwm2_auto_point2_pwm.dev_attr.attr,
NULL
};
static struct attribute *in0_attrs[] = {
&sensor_dev_attr_in0_input.dev_attr.attr,
&sensor_dev_attr_in0_max.dev_attr.attr,
&sensor_dev_attr_in0_min.dev_attr.attr,
&sensor_dev_attr_in0_alarm.dev_attr.attr,
NULL
};
static struct attribute *in3_attrs[] = {
&sensor_dev_attr_in3_input.dev_attr.attr,
&sensor_dev_attr_in3_max.dev_attr.attr,
&sensor_dev_attr_in3_min.dev_attr.attr,
&sensor_dev_attr_in3_alarm.dev_attr.attr,
NULL
};
static struct attribute *in4_attrs[] = {
&sensor_dev_attr_in4_input.dev_attr.attr,
&sensor_dev_attr_in4_max.dev_attr.attr,
&sensor_dev_attr_in4_min.dev_attr.attr,
&sensor_dev_attr_in4_alarm.dev_attr.attr,
NULL
};
static struct attribute *in5_attrs[] = {
&sensor_dev_attr_in5_input.dev_attr.attr,
&sensor_dev_attr_in5_max.dev_attr.attr,
&sensor_dev_attr_in5_min.dev_attr.attr,
&sensor_dev_attr_in5_alarm.dev_attr.attr,
NULL
};
static struct attribute *vid_attrs[] = {
&dev_attr_cpu0_vid.attr,
&dev_attr_vrm.attr,
NULL
};
static struct attribute_group adt7475_attr_group = { .attrs = adt7475_attrs };
static struct attribute_group fan4_attr_group = { .attrs = fan4_attrs };
static struct attribute_group pwm2_attr_group = { .attrs = pwm2_attrs };
static struct attribute_group in0_attr_group = { .attrs = in0_attrs };
static struct attribute_group in3_attr_group = { .attrs = in3_attrs };
static struct attribute_group in4_attr_group = { .attrs = in4_attrs };
static struct attribute_group in5_attr_group = { .attrs = in5_attrs };
static struct attribute_group vid_attr_group = { .attrs = vid_attrs };
static int adt7475_detect(struct i2c_client *client,
struct i2c_board_info *info)
{
struct i2c_adapter *adapter = client->adapter;
int vendid, devid, devid2;
const char *name;
if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA))
return -ENODEV;
vendid = adt7475_read(REG_VENDID);
devid2 = adt7475_read(REG_DEVID2);
if (vendid != 0x41 || /* Analog Devices */
(devid2 & 0xf8) != 0x68)
return -ENODEV;
devid = adt7475_read(REG_DEVID);
if (devid == 0x73)
name = "adt7473";
else if (devid == 0x75 && client->addr == 0x2e)
name = "adt7475";
else if (devid == 0x76)
name = "adt7476";
else if ((devid2 & 0xfc) == 0x6c)
name = "adt7490";
else {
dev_dbg(&adapter->dev,
"Couldn't detect an ADT7473/75/76/90 part at "
"0x%02x\n", (unsigned int)client->addr);
return -ENODEV;
}
strlcpy(info->type, name, I2C_NAME_SIZE);
return 0;
}
static void adt7475_remove_files(struct i2c_client *client,
struct adt7475_data *data)
{
sysfs_remove_group(&client->dev.kobj, &adt7475_attr_group);
if (data->has_fan4)
sysfs_remove_group(&client->dev.kobj, &fan4_attr_group);
if (data->has_pwm2)
sysfs_remove_group(&client->dev.kobj, &pwm2_attr_group);
if (data->has_voltage & (1 << 0))
sysfs_remove_group(&client->dev.kobj, &in0_attr_group);
if (data->has_voltage & (1 << 3))
sysfs_remove_group(&client->dev.kobj, &in3_attr_group);
if (data->has_voltage & (1 << 4))
sysfs_remove_group(&client->dev.kobj, &in4_attr_group);
if (data->has_voltage & (1 << 5))
sysfs_remove_group(&client->dev.kobj, &in5_attr_group);
if (data->has_vid)
sysfs_remove_group(&client->dev.kobj, &vid_attr_group);
}
static int adt7475_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
static const char *names[] = {
[adt7473] = "ADT7473",
[adt7475] = "ADT7475",
[adt7476] = "ADT7476",
[adt7490] = "ADT7490",
};
struct adt7475_data *data;
int i, ret = 0, revision;
u8 config2, config3;
data = kzalloc(sizeof(*data), GFP_KERNEL);
if (data == NULL)
return -ENOMEM;
mutex_init(&data->lock);
i2c_set_clientdata(client, data);
/* Initialize device-specific values */
switch (id->driver_data) {
case adt7476:
data->has_voltage = 0x0e; /* in1 to in3 */
revision = adt7475_read(REG_DEVID2) & 0x07;
break;
case adt7490:
data->has_voltage = 0x3e; /* in1 to in5 */
revision = adt7475_read(REG_DEVID2) & 0x03;
if (revision == 0x03)
revision += adt7475_read(REG_DEVREV2);
break;
default:
data->has_voltage = 0x06; /* in1, in2 */
revision = adt7475_read(REG_DEVID2) & 0x07;
}
config3 = adt7475_read(REG_CONFIG3);
/* Pin PWM2 may alternatively be used for ALERT output */
if (!(config3 & CONFIG3_SMBALERT))
data->has_pwm2 = 1;
/* Meaning of this bit is inverted for the ADT7473-1 */
if (id->driver_data == adt7473 && revision >= 1)
data->has_pwm2 = !data->has_pwm2;
data->config4 = adt7475_read(REG_CONFIG4);
/* Pin TACH4 may alternatively be used for THERM */
if ((data->config4 & CONFIG4_PINFUNC) == 0x0)
data->has_fan4 = 1;
/* THERM configuration is more complex on the ADT7476 and ADT7490,
because 2 different pins (TACH4 and +2.5 Vin) can be used for
this function */
if (id->driver_data == adt7490) {
if ((data->config4 & CONFIG4_PINFUNC) == 0x1 &&
!(config3 & CONFIG3_THERM))
data->has_fan4 = 1;
}
if (id->driver_data == adt7476 || id->driver_data == adt7490) {
if (!(config3 & CONFIG3_THERM) ||
(data->config4 & CONFIG4_PINFUNC) == 0x1)
data->has_voltage |= (1 << 0); /* in0 */
}
/* On the ADT7476, the +12V input pin may instead be used as VID5,
and VID pins may alternatively be used as GPIO */
if (id->driver_data == adt7476) {
u8 vid = adt7475_read(REG_VID);
if (!(vid & VID_VIDSEL))
data->has_voltage |= (1 << 4); /* in4 */
data->has_vid = !(adt7475_read(REG_CONFIG5) & CONFIG5_VIDGPIO);
}
/* Voltage attenuators can be bypassed, globally or individually */
config2 = adt7475_read(REG_CONFIG2);
if (config2 & CONFIG2_ATTN) {
data->bypass_attn = (0x3 << 3) | 0x3;
} else {
data->bypass_attn = ((data->config4 & CONFIG4_ATTN_IN10) >> 4) |
((data->config4 & CONFIG4_ATTN_IN43) >> 3);
}
data->bypass_attn &= data->has_voltage;
/* Call adt7475_read_pwm for all pwm's as this will reprogram any
pwm's which are disabled to manual mode with 0% duty cycle */
for (i = 0; i < ADT7475_PWM_COUNT; i++)
adt7475_read_pwm(client, i);
ret = sysfs_create_group(&client->dev.kobj, &adt7475_attr_group);
if (ret)
goto efree;
/* Features that can be disabled individually */
if (data->has_fan4) {
ret = sysfs_create_group(&client->dev.kobj, &fan4_attr_group);
if (ret)
goto eremove;
}
if (data->has_pwm2) {
ret = sysfs_create_group(&client->dev.kobj, &pwm2_attr_group);
if (ret)
goto eremove;
}
if (data->has_voltage & (1 << 0)) {
ret = sysfs_create_group(&client->dev.kobj, &in0_attr_group);
if (ret)
goto eremove;
}
if (data->has_voltage & (1 << 3)) {
ret = sysfs_create_group(&client->dev.kobj, &in3_attr_group);
if (ret)
goto eremove;
}
if (data->has_voltage & (1 << 4)) {
ret = sysfs_create_group(&client->dev.kobj, &in4_attr_group);
if (ret)
goto eremove;
}
if (data->has_voltage & (1 << 5)) {
ret = sysfs_create_group(&client->dev.kobj, &in5_attr_group);
if (ret)
goto eremove;
}
if (data->has_vid) {
data->vrm = vid_which_vrm();
ret = sysfs_create_group(&client->dev.kobj, &vid_attr_group);
if (ret)
goto eremove;
}
data->hwmon_dev = hwmon_device_register(&client->dev);
if (IS_ERR(data->hwmon_dev)) {
ret = PTR_ERR(data->hwmon_dev);
goto eremove;
}
dev_info(&client->dev, "%s device, revision %d\n",
names[id->driver_data], revision);
if ((data->has_voltage & 0x11) || data->has_fan4 || data->has_pwm2)
dev_info(&client->dev, "Optional features:%s%s%s%s%s\n",
(data->has_voltage & (1 << 0)) ? " in0" : "",
(data->has_voltage & (1 << 4)) ? " in4" : "",
data->has_fan4 ? " fan4" : "",
data->has_pwm2 ? " pwm2" : "",
data->has_vid ? " vid" : "");
if (data->bypass_attn)
dev_info(&client->dev, "Bypassing attenuators on:%s%s%s%s\n",
(data->bypass_attn & (1 << 0)) ? " in0" : "",
(data->bypass_attn & (1 << 1)) ? " in1" : "",
(data->bypass_attn & (1 << 3)) ? " in3" : "",
(data->bypass_attn & (1 << 4)) ? " in4" : "");
return 0;
eremove:
adt7475_remove_files(client, data);
efree:
kfree(data);
return ret;
}
static int adt7475_remove(struct i2c_client *client)
{
struct adt7475_data *data = i2c_get_clientdata(client);
hwmon_device_unregister(data->hwmon_dev);
adt7475_remove_files(client, data);
kfree(data);
return 0;
}
static struct i2c_driver adt7475_driver = {
.class = I2C_CLASS_HWMON,
.driver = {
.name = "adt7475",
},
.probe = adt7475_probe,
.remove = adt7475_remove,
.id_table = adt7475_id,
.detect = adt7475_detect,
.address_list = normal_i2c,
};
static void adt7475_read_hystersis(struct i2c_client *client)
{
struct adt7475_data *data = i2c_get_clientdata(client);
data->temp[HYSTERSIS][0] = (u16) adt7475_read(REG_REMOTE1_HYSTERSIS);
data->temp[HYSTERSIS][1] = data->temp[HYSTERSIS][0];
data->temp[HYSTERSIS][2] = (u16) adt7475_read(REG_REMOTE2_HYSTERSIS);
}
static void adt7475_read_pwm(struct i2c_client *client, int index)
{
struct adt7475_data *data = i2c_get_clientdata(client);
unsigned int v;
data->pwm[CONTROL][index] = adt7475_read(PWM_CONFIG_REG(index));
/* Figure out the internal value for pwmctrl and pwmchan
based on the current settings */
v = (data->pwm[CONTROL][index] >> 5) & 7;
if (v == 3)
data->pwmctl[index] = 0;
else if (v == 7)
data->pwmctl[index] = 1;
else if (v == 4) {
/* The fan is disabled - we don't want to
support that, so change to manual mode and
set the duty cycle to 0 instead
*/
data->pwm[INPUT][index] = 0;
data->pwm[CONTROL][index] &= ~0xE0;
data->pwm[CONTROL][index] |= (7 << 5);
i2c_smbus_write_byte_data(client, PWM_CONFIG_REG(index),
data->pwm[INPUT][index]);
i2c_smbus_write_byte_data(client, PWM_CONFIG_REG(index),
data->pwm[CONTROL][index]);
data->pwmctl[index] = 1;
} else {
data->pwmctl[index] = 2;
switch (v) {
case 0:
data->pwmchan[index] = 1;
break;
case 1:
data->pwmchan[index] = 2;
break;
case 2:
data->pwmchan[index] = 4;
break;
case 5:
data->pwmchan[index] = 6;
break;
case 6:
data->pwmchan[index] = 7;
break;
}
}
}
static struct adt7475_data *adt7475_update_device(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
struct adt7475_data *data = i2c_get_clientdata(client);
u16 ext;
int i;
mutex_lock(&data->lock);
/* Measurement values update every 2 seconds */
if (time_after(jiffies, data->measure_updated + HZ * 2) ||
!data->valid) {
data->alarms = adt7475_read(REG_STATUS2) << 8;
data->alarms |= adt7475_read(REG_STATUS1);
ext = (adt7475_read(REG_EXTEND2) << 8) |
adt7475_read(REG_EXTEND1);
for (i = 0; i < ADT7475_VOLTAGE_COUNT; i++) {
if (!(data->has_voltage & (1 << i)))
continue;
data->voltage[INPUT][i] =
(adt7475_read(VOLTAGE_REG(i)) << 2) |
((ext >> (i * 2)) & 3);
}
for (i = 0; i < ADT7475_TEMP_COUNT; i++)
data->temp[INPUT][i] =
(adt7475_read(TEMP_REG(i)) << 2) |
((ext >> ((i + 5) * 2)) & 3);
if (data->has_voltage & (1 << 5)) {
data->alarms |= adt7475_read(REG_STATUS4) << 24;
ext = adt7475_read(REG_EXTEND3);
data->voltage[INPUT][5] = adt7475_read(REG_VTT) << 2 |
((ext >> 4) & 3);
}
for (i = 0; i < ADT7475_TACH_COUNT; i++) {
if (i == 3 && !data->has_fan4)
continue;
data->tach[INPUT][i] =
adt7475_read_word(client, TACH_REG(i));
}
/* Updated by hw when in auto mode */
for (i = 0; i < ADT7475_PWM_COUNT; i++) {
if (i == 1 && !data->has_pwm2)
continue;
data->pwm[INPUT][i] = adt7475_read(PWM_REG(i));
}
if (data->has_vid)
data->vid = adt7475_read(REG_VID) & 0x3f;
data->measure_updated = jiffies;
}
/* Limits and settings, should never change update every 60 seconds */
if (time_after(jiffies, data->limits_updated + HZ * 60) ||
!data->valid) {
data->config4 = adt7475_read(REG_CONFIG4);
data->config5 = adt7475_read(REG_CONFIG5);
for (i = 0; i < ADT7475_VOLTAGE_COUNT; i++) {
if (!(data->has_voltage & (1 << i)))
continue;
/* Adjust values so they match the input precision */
data->voltage[MIN][i] =
adt7475_read(VOLTAGE_MIN_REG(i)) << 2;
data->voltage[MAX][i] =
adt7475_read(VOLTAGE_MAX_REG(i)) << 2;
}
if (data->has_voltage & (1 << 5)) {
data->voltage[MIN][5] = adt7475_read(REG_VTT_MIN) << 2;
data->voltage[MAX][5] = adt7475_read(REG_VTT_MAX) << 2;
}
for (i = 0; i < ADT7475_TEMP_COUNT; i++) {
/* Adjust values so they match the input precision */
data->temp[MIN][i] =
adt7475_read(TEMP_MIN_REG(i)) << 2;
data->temp[MAX][i] =
adt7475_read(TEMP_MAX_REG(i)) << 2;
data->temp[AUTOMIN][i] =
adt7475_read(TEMP_TMIN_REG(i)) << 2;
data->temp[THERM][i] =
adt7475_read(TEMP_THERM_REG(i)) << 2;
data->temp[OFFSET][i] =
adt7475_read(TEMP_OFFSET_REG(i));
}
adt7475_read_hystersis(client);
for (i = 0; i < ADT7475_TACH_COUNT; i++) {
if (i == 3 && !data->has_fan4)
continue;
data->tach[MIN][i] =
adt7475_read_word(client, TACH_MIN_REG(i));
}
for (i = 0; i < ADT7475_PWM_COUNT; i++) {
if (i == 1 && !data->has_pwm2)
continue;
data->pwm[MAX][i] = adt7475_read(PWM_MAX_REG(i));
data->pwm[MIN][i] = adt7475_read(PWM_MIN_REG(i));
/* Set the channel and control information */
adt7475_read_pwm(client, i);
}
data->range[0] = adt7475_read(TEMP_TRANGE_REG(0));
data->range[1] = adt7475_read(TEMP_TRANGE_REG(1));
data->range[2] = adt7475_read(TEMP_TRANGE_REG(2));
data->limits_updated = jiffies;
data->valid = 1;
}
mutex_unlock(&data->lock);
return data;
}
static int __init sensors_adt7475_init(void)
{
return i2c_add_driver(&adt7475_driver);
}
static void __exit sensors_adt7475_exit(void)
{
i2c_del_driver(&adt7475_driver);
}
MODULE_AUTHOR("Advanced Micro Devices, Inc");
MODULE_DESCRIPTION("adt7475 driver");
MODULE_LICENSE("GPL");
module_init(sensors_adt7475_init);
module_exit(sensors_adt7475_exit);
| gpl-2.0 |
storm31/android_kernel_samsung_aries | net/mac80211/rc80211_pid_algo.c | 4275 | 14945 | /*
* Copyright 2002-2005, Instant802 Networks, Inc.
* Copyright 2005, Devicescape Software, Inc.
* Copyright 2007, Mattias Nissler <mattias.nissler@gmx.de>
* Copyright 2007-2008, Stefano Brivio <stefano.brivio@polimi.it>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/netdevice.h>
#include <linux/types.h>
#include <linux/skbuff.h>
#include <linux/debugfs.h>
#include <linux/slab.h>
#include <net/mac80211.h>
#include "rate.h"
#include "mesh.h"
#include "rc80211_pid.h"
/* This is an implementation of a TX rate control algorithm that uses a PID
* controller. Given a target failed frames rate, the controller decides about
* TX rate changes to meet the target failed frames rate.
*
* The controller basically computes the following:
*
* adj = CP * err + CI * err_avg + CD * (err - last_err) * (1 + sharpening)
*
* where
* adj adjustment value that is used to switch TX rate (see below)
* err current error: target vs. current failed frames percentage
* last_err last error
* err_avg average (i.e. poor man's integral) of recent errors
* sharpening non-zero when fast response is needed (i.e. right after
* association or no frames sent for a long time), heading
* to zero over time
* CP Proportional coefficient
* CI Integral coefficient
* CD Derivative coefficient
*
* CP, CI, CD are subject to careful tuning.
*
* The integral component uses a exponential moving average approach instead of
* an actual sliding window. The advantage is that we don't need to keep an
* array of the last N error values and computation is easier.
*
* Once we have the adj value, we map it to a rate by means of a learning
* algorithm. This algorithm keeps the state of the percentual failed frames
* difference between rates. The behaviour of the lowest available rate is kept
* as a reference value, and every time we switch between two rates, we compute
* the difference between the failed frames each rate exhibited. By doing so,
* we compare behaviours which different rates exhibited in adjacent timeslices,
* thus the comparison is minimally affected by external conditions. This
* difference gets propagated to the whole set of measurements, so that the
* reference is always the same. Periodically, we normalize this set so that
* recent events weigh the most. By comparing the adj value with this set, we
* avoid pejorative switches to lower rates and allow for switches to higher
* rates if they behaved well.
*
* Note that for the computations we use a fixed-point representation to avoid
* floating point arithmetic. Hence, all values are shifted left by
* RC_PID_ARITH_SHIFT.
*/
/* Adjust the rate while ensuring that we won't switch to a lower rate if it
* exhibited a worse failed frames behaviour and we'll choose the highest rate
* whose failed frames behaviour is not worse than the one of the original rate
* target. While at it, check that the new rate is valid. */
static void rate_control_pid_adjust_rate(struct ieee80211_supported_band *sband,
struct ieee80211_sta *sta,
struct rc_pid_sta_info *spinfo, int adj,
struct rc_pid_rateinfo *rinfo)
{
int cur_sorted, new_sorted, probe, tmp, n_bitrates, band;
int cur = spinfo->txrate_idx;
band = sband->band;
n_bitrates = sband->n_bitrates;
/* Map passed arguments to sorted values. */
cur_sorted = rinfo[cur].rev_index;
new_sorted = cur_sorted + adj;
/* Check limits. */
if (new_sorted < 0)
new_sorted = rinfo[0].rev_index;
else if (new_sorted >= n_bitrates)
new_sorted = rinfo[n_bitrates - 1].rev_index;
tmp = new_sorted;
if (adj < 0) {
/* Ensure that the rate decrease isn't disadvantageous. */
for (probe = cur_sorted; probe >= new_sorted; probe--)
if (rinfo[probe].diff <= rinfo[cur_sorted].diff &&
rate_supported(sta, band, rinfo[probe].index))
tmp = probe;
} else {
/* Look for rate increase with zero (or below) cost. */
for (probe = new_sorted + 1; probe < n_bitrates; probe++)
if (rinfo[probe].diff <= rinfo[new_sorted].diff &&
rate_supported(sta, band, rinfo[probe].index))
tmp = probe;
}
/* Fit the rate found to the nearest supported rate. */
do {
if (rate_supported(sta, band, rinfo[tmp].index)) {
spinfo->txrate_idx = rinfo[tmp].index;
break;
}
if (adj < 0)
tmp--;
else
tmp++;
} while (tmp < n_bitrates && tmp >= 0);
#ifdef CONFIG_MAC80211_DEBUGFS
rate_control_pid_event_rate_change(&spinfo->events,
spinfo->txrate_idx,
sband->bitrates[spinfo->txrate_idx].bitrate);
#endif
}
/* Normalize the failed frames per-rate differences. */
static void rate_control_pid_normalize(struct rc_pid_info *pinfo, int l)
{
int i, norm_offset = pinfo->norm_offset;
struct rc_pid_rateinfo *r = pinfo->rinfo;
if (r[0].diff > norm_offset)
r[0].diff -= norm_offset;
else if (r[0].diff < -norm_offset)
r[0].diff += norm_offset;
for (i = 0; i < l - 1; i++)
if (r[i + 1].diff > r[i].diff + norm_offset)
r[i + 1].diff -= norm_offset;
else if (r[i + 1].diff <= r[i].diff)
r[i + 1].diff += norm_offset;
}
static void rate_control_pid_sample(struct rc_pid_info *pinfo,
struct ieee80211_supported_band *sband,
struct ieee80211_sta *sta,
struct rc_pid_sta_info *spinfo)
{
struct rc_pid_rateinfo *rinfo = pinfo->rinfo;
u32 pf;
s32 err_avg;
u32 err_prop;
u32 err_int;
u32 err_der;
int adj, i, j, tmp;
unsigned long period;
/* In case nothing happened during the previous control interval, turn
* the sharpening factor on. */
period = msecs_to_jiffies(pinfo->sampling_period);
if (jiffies - spinfo->last_sample > 2 * period)
spinfo->sharp_cnt = pinfo->sharpen_duration;
spinfo->last_sample = jiffies;
/* This should never happen, but in case, we assume the old sample is
* still a good measurement and copy it. */
if (unlikely(spinfo->tx_num_xmit == 0))
pf = spinfo->last_pf;
else
pf = spinfo->tx_num_failed * 100 / spinfo->tx_num_xmit;
spinfo->tx_num_xmit = 0;
spinfo->tx_num_failed = 0;
/* If we just switched rate, update the rate behaviour info. */
if (pinfo->oldrate != spinfo->txrate_idx) {
i = rinfo[pinfo->oldrate].rev_index;
j = rinfo[spinfo->txrate_idx].rev_index;
tmp = (pf - spinfo->last_pf);
tmp = RC_PID_DO_ARITH_RIGHT_SHIFT(tmp, RC_PID_ARITH_SHIFT);
rinfo[j].diff = rinfo[i].diff + tmp;
pinfo->oldrate = spinfo->txrate_idx;
}
rate_control_pid_normalize(pinfo, sband->n_bitrates);
/* Compute the proportional, integral and derivative errors. */
err_prop = (pinfo->target - pf) << RC_PID_ARITH_SHIFT;
err_avg = spinfo->err_avg_sc >> pinfo->smoothing_shift;
spinfo->err_avg_sc = spinfo->err_avg_sc - err_avg + err_prop;
err_int = spinfo->err_avg_sc >> pinfo->smoothing_shift;
err_der = (pf - spinfo->last_pf) *
(1 + pinfo->sharpen_factor * spinfo->sharp_cnt);
spinfo->last_pf = pf;
if (spinfo->sharp_cnt)
spinfo->sharp_cnt--;
#ifdef CONFIG_MAC80211_DEBUGFS
rate_control_pid_event_pf_sample(&spinfo->events, pf, err_prop, err_int,
err_der);
#endif
/* Compute the controller output. */
adj = (err_prop * pinfo->coeff_p + err_int * pinfo->coeff_i
+ err_der * pinfo->coeff_d);
adj = RC_PID_DO_ARITH_RIGHT_SHIFT(adj, 2 * RC_PID_ARITH_SHIFT);
/* Change rate. */
if (adj)
rate_control_pid_adjust_rate(sband, sta, spinfo, adj, rinfo);
}
static void rate_control_pid_tx_status(void *priv, struct ieee80211_supported_band *sband,
struct ieee80211_sta *sta, void *priv_sta,
struct sk_buff *skb)
{
struct rc_pid_info *pinfo = priv;
struct rc_pid_sta_info *spinfo = priv_sta;
unsigned long period;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
if (!spinfo)
return;
/* Ignore all frames that were sent with a different rate than the rate
* we currently advise mac80211 to use. */
if (info->status.rates[0].idx != spinfo->txrate_idx)
return;
spinfo->tx_num_xmit++;
#ifdef CONFIG_MAC80211_DEBUGFS
rate_control_pid_event_tx_status(&spinfo->events, info);
#endif
/* We count frames that totally failed to be transmitted as two bad
* frames, those that made it out but had some retries as one good and
* one bad frame. */
if (!(info->flags & IEEE80211_TX_STAT_ACK)) {
spinfo->tx_num_failed += 2;
spinfo->tx_num_xmit++;
} else if (info->status.rates[0].count > 1) {
spinfo->tx_num_failed++;
spinfo->tx_num_xmit++;
}
/* Update PID controller state. */
period = msecs_to_jiffies(pinfo->sampling_period);
if (time_after(jiffies, spinfo->last_sample + period))
rate_control_pid_sample(pinfo, sband, sta, spinfo);
}
static void
rate_control_pid_get_rate(void *priv, struct ieee80211_sta *sta,
void *priv_sta,
struct ieee80211_tx_rate_control *txrc)
{
struct sk_buff *skb = txrc->skb;
struct ieee80211_supported_band *sband = txrc->sband;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct rc_pid_sta_info *spinfo = priv_sta;
int rateidx;
if (txrc->rts)
info->control.rates[0].count =
txrc->hw->conf.long_frame_max_tx_count;
else
info->control.rates[0].count =
txrc->hw->conf.short_frame_max_tx_count;
/* Send management frames and NO_ACK data using lowest rate. */
if (rate_control_send_low(sta, priv_sta, txrc))
return;
rateidx = spinfo->txrate_idx;
if (rateidx >= sband->n_bitrates)
rateidx = sband->n_bitrates - 1;
info->control.rates[0].idx = rateidx;
#ifdef CONFIG_MAC80211_DEBUGFS
rate_control_pid_event_tx_rate(&spinfo->events,
rateidx, sband->bitrates[rateidx].bitrate);
#endif
}
static void
rate_control_pid_rate_init(void *priv, struct ieee80211_supported_band *sband,
struct ieee80211_sta *sta, void *priv_sta)
{
struct rc_pid_sta_info *spinfo = priv_sta;
struct rc_pid_info *pinfo = priv;
struct rc_pid_rateinfo *rinfo = pinfo->rinfo;
int i, j, tmp;
bool s;
/* TODO: This routine should consider using RSSI from previous packets
* as we need to have IEEE 802.1X auth succeed immediately after assoc..
* Until that method is implemented, we will use the lowest supported
* rate as a workaround. */
/* Sort the rates. This is optimized for the most common case (i.e.
* almost-sorted CCK+OFDM rates). Kind of bubble-sort with reversed
* mapping too. */
for (i = 0; i < sband->n_bitrates; i++) {
rinfo[i].index = i;
rinfo[i].rev_index = i;
if (RC_PID_FAST_START)
rinfo[i].diff = 0;
else
rinfo[i].diff = i * pinfo->norm_offset;
}
for (i = 1; i < sband->n_bitrates; i++) {
s = 0;
for (j = 0; j < sband->n_bitrates - i; j++)
if (unlikely(sband->bitrates[rinfo[j].index].bitrate >
sband->bitrates[rinfo[j + 1].index].bitrate)) {
tmp = rinfo[j].index;
rinfo[j].index = rinfo[j + 1].index;
rinfo[j + 1].index = tmp;
rinfo[rinfo[j].index].rev_index = j;
rinfo[rinfo[j + 1].index].rev_index = j + 1;
s = 1;
}
if (!s)
break;
}
spinfo->txrate_idx = rate_lowest_index(sband, sta);
}
static void *rate_control_pid_alloc(struct ieee80211_hw *hw,
struct dentry *debugfsdir)
{
struct rc_pid_info *pinfo;
struct rc_pid_rateinfo *rinfo;
struct ieee80211_supported_band *sband;
int i, max_rates = 0;
#ifdef CONFIG_MAC80211_DEBUGFS
struct rc_pid_debugfs_entries *de;
#endif
pinfo = kmalloc(sizeof(*pinfo), GFP_ATOMIC);
if (!pinfo)
return NULL;
for (i = 0; i < IEEE80211_NUM_BANDS; i++) {
sband = hw->wiphy->bands[i];
if (sband && sband->n_bitrates > max_rates)
max_rates = sband->n_bitrates;
}
rinfo = kmalloc(sizeof(*rinfo) * max_rates, GFP_ATOMIC);
if (!rinfo) {
kfree(pinfo);
return NULL;
}
pinfo->target = RC_PID_TARGET_PF;
pinfo->sampling_period = RC_PID_INTERVAL;
pinfo->coeff_p = RC_PID_COEFF_P;
pinfo->coeff_i = RC_PID_COEFF_I;
pinfo->coeff_d = RC_PID_COEFF_D;
pinfo->smoothing_shift = RC_PID_SMOOTHING_SHIFT;
pinfo->sharpen_factor = RC_PID_SHARPENING_FACTOR;
pinfo->sharpen_duration = RC_PID_SHARPENING_DURATION;
pinfo->norm_offset = RC_PID_NORM_OFFSET;
pinfo->rinfo = rinfo;
pinfo->oldrate = 0;
#ifdef CONFIG_MAC80211_DEBUGFS
de = &pinfo->dentries;
de->target = debugfs_create_u32("target_pf", S_IRUSR | S_IWUSR,
debugfsdir, &pinfo->target);
de->sampling_period = debugfs_create_u32("sampling_period",
S_IRUSR | S_IWUSR, debugfsdir,
&pinfo->sampling_period);
de->coeff_p = debugfs_create_u32("coeff_p", S_IRUSR | S_IWUSR,
debugfsdir, (u32 *)&pinfo->coeff_p);
de->coeff_i = debugfs_create_u32("coeff_i", S_IRUSR | S_IWUSR,
debugfsdir, (u32 *)&pinfo->coeff_i);
de->coeff_d = debugfs_create_u32("coeff_d", S_IRUSR | S_IWUSR,
debugfsdir, (u32 *)&pinfo->coeff_d);
de->smoothing_shift = debugfs_create_u32("smoothing_shift",
S_IRUSR | S_IWUSR, debugfsdir,
&pinfo->smoothing_shift);
de->sharpen_factor = debugfs_create_u32("sharpen_factor",
S_IRUSR | S_IWUSR, debugfsdir,
&pinfo->sharpen_factor);
de->sharpen_duration = debugfs_create_u32("sharpen_duration",
S_IRUSR | S_IWUSR, debugfsdir,
&pinfo->sharpen_duration);
de->norm_offset = debugfs_create_u32("norm_offset",
S_IRUSR | S_IWUSR, debugfsdir,
&pinfo->norm_offset);
#endif
return pinfo;
}
static void rate_control_pid_free(void *priv)
{
struct rc_pid_info *pinfo = priv;
#ifdef CONFIG_MAC80211_DEBUGFS
struct rc_pid_debugfs_entries *de = &pinfo->dentries;
debugfs_remove(de->norm_offset);
debugfs_remove(de->sharpen_duration);
debugfs_remove(de->sharpen_factor);
debugfs_remove(de->smoothing_shift);
debugfs_remove(de->coeff_d);
debugfs_remove(de->coeff_i);
debugfs_remove(de->coeff_p);
debugfs_remove(de->sampling_period);
debugfs_remove(de->target);
#endif
kfree(pinfo->rinfo);
kfree(pinfo);
}
static void *rate_control_pid_alloc_sta(void *priv, struct ieee80211_sta *sta,
gfp_t gfp)
{
struct rc_pid_sta_info *spinfo;
spinfo = kzalloc(sizeof(*spinfo), gfp);
if (spinfo == NULL)
return NULL;
spinfo->last_sample = jiffies;
#ifdef CONFIG_MAC80211_DEBUGFS
spin_lock_init(&spinfo->events.lock);
init_waitqueue_head(&spinfo->events.waitqueue);
#endif
return spinfo;
}
static void rate_control_pid_free_sta(void *priv, struct ieee80211_sta *sta,
void *priv_sta)
{
kfree(priv_sta);
}
static struct rate_control_ops mac80211_rcpid = {
.name = "pid",
.tx_status = rate_control_pid_tx_status,
.get_rate = rate_control_pid_get_rate,
.rate_init = rate_control_pid_rate_init,
.alloc = rate_control_pid_alloc,
.free = rate_control_pid_free,
.alloc_sta = rate_control_pid_alloc_sta,
.free_sta = rate_control_pid_free_sta,
#ifdef CONFIG_MAC80211_DEBUGFS
.add_sta_debugfs = rate_control_pid_add_sta_debugfs,
.remove_sta_debugfs = rate_control_pid_remove_sta_debugfs,
#endif
};
int __init rc80211_pid_init(void)
{
return ieee80211_rate_control_register(&mac80211_rcpid);
}
void rc80211_pid_exit(void)
{
ieee80211_rate_control_unregister(&mac80211_rcpid);
}
| gpl-2.0 |
raja161287/kernel_samsung_msm8930-common | arch/arm/mach-msm/board-sapphire-rfkill.c | 4531 | 2657 | /* linux/arch/arm/mach-msm/board-sapphire-rfkill.c
* Copyright (C) 2007-2009 HTC Corporation.
* Author: Thomas Tsai <thomas_tsai@htc.com>
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
/* Control bluetooth power for sapphire platform */
#include <linux/platform_device.h>
#include <linux/module.h>
#include <linux/device.h>
#include <linux/rfkill.h>
#include <linux/delay.h>
#include <linux/gpio.h>
#include <asm/mach-types.h>
#include "gpio_chip.h"
#include "board-sapphire.h"
static struct rfkill *bt_rfk;
static const char bt_name[] = "brf6300";
extern int sapphire_bt_fastclock_power(int on);
static int bluetooth_set_power(void *data, bool blocked)
{
if (!blocked) {
sapphire_bt_fastclock_power(1);
gpio_set_value(SAPPHIRE_GPIO_BT_32K_EN, 1);
udelay(10);
gpio_direction_output(101, 1);
} else {
gpio_direction_output(101, 0);
gpio_set_value(SAPPHIRE_GPIO_BT_32K_EN, 0);
sapphire_bt_fastclock_power(0);
}
return 0;
}
static struct rfkill_ops sapphire_rfkill_ops = {
.set_block = bluetooth_set_power,
};
static int sapphire_rfkill_probe(struct platform_device *pdev)
{
int rc = 0;
bool default_state = true; /* off */
bluetooth_set_power(NULL, default_state);
bt_rfk = rfkill_alloc(bt_name, &pdev->dev, RFKILL_TYPE_BLUETOOTH,
&sapphire_rfkill_ops, NULL);
if (!bt_rfk)
return -ENOMEM;
/* userspace cannot take exclusive control */
rfkill_set_states(bt_rfk, default_state, false);
rc = rfkill_register(bt_rfk);
if (rc)
rfkill_destroy(bt_rfk);
return rc;
}
static int sapphire_rfkill_remove(struct platform_device *dev)
{
rfkill_unregister(bt_rfk);
rfkill_destroy(bt_rfk);
return 0;
}
static struct platform_driver sapphire_rfkill_driver = {
.probe = sapphire_rfkill_probe,
.remove = sapphire_rfkill_remove,
.driver = {
.name = "sapphire_rfkill",
.owner = THIS_MODULE,
},
};
static int __init sapphire_rfkill_init(void)
{
return platform_driver_register(&sapphire_rfkill_driver);
}
static void __exit sapphire_rfkill_exit(void)
{
platform_driver_unregister(&sapphire_rfkill_driver);
}
module_init(sapphire_rfkill_init);
module_exit(sapphire_rfkill_exit);
MODULE_DESCRIPTION("sapphire rfkill");
MODULE_AUTHOR("Nick Pelly <npelly@google.com>");
MODULE_LICENSE("GPL");
| gpl-2.0 |
euqove/evitaul-3.4.10-g5f4de050 | drivers/tty/hvc/hvc_dcc.c | 4531 | 2346 | /* Copyright (c) 2010, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
#include <linux/console.h>
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/moduleparam.h>
#include <linux/types.h>
#include <asm/processor.h>
#include "hvc_console.h"
/* DCC Status Bits */
#define DCC_STATUS_RX (1 << 30)
#define DCC_STATUS_TX (1 << 29)
static inline u32 __dcc_getstatus(void)
{
u32 __ret;
asm volatile("mrc p14, 0, %0, c0, c1, 0 @ read comms ctrl reg"
: "=r" (__ret) : : "cc");
return __ret;
}
static inline char __dcc_getchar(void)
{
char __c;
asm volatile("mrc p14, 0, %0, c0, c5, 0 @ read comms data reg"
: "=r" (__c));
isb();
return __c;
}
static inline void __dcc_putchar(char c)
{
asm volatile("mcr p14, 0, %0, c0, c5, 0 @ write a char"
: /* no output register */
: "r" (c));
isb();
}
static int hvc_dcc_put_chars(uint32_t vt, const char *buf, int count)
{
int i;
for (i = 0; i < count; i++) {
while (__dcc_getstatus() & DCC_STATUS_TX)
cpu_relax();
__dcc_putchar(buf[i]);
}
return count;
}
static int hvc_dcc_get_chars(uint32_t vt, char *buf, int count)
{
int i;
for (i = 0; i < count; ++i)
if (__dcc_getstatus() & DCC_STATUS_RX)
buf[i] = __dcc_getchar();
else
break;
return i;
}
static const struct hv_ops hvc_dcc_get_put_ops = {
.get_chars = hvc_dcc_get_chars,
.put_chars = hvc_dcc_put_chars,
};
static int __init hvc_dcc_console_init(void)
{
hvc_instantiate(0, 0, &hvc_dcc_get_put_ops);
return 0;
}
console_initcall(hvc_dcc_console_init);
static int __init hvc_dcc_init(void)
{
hvc_alloc(0, 0, &hvc_dcc_get_put_ops, 128);
return 0;
}
device_initcall(hvc_dcc_init);
| gpl-2.0 |
cooldudezach/android_kernel_zte_hera | drivers/staging/usbip/userspace/libsrc/usbip_host_driver.c | 8371 | 9220 | /*
* Copyright (C) 2011 matt mooney <mfm@muteddisk.com>
* 2005-2007 Takahiro Hirofuchi
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <sys/types.h>
#include <sys/stat.h>
#include <errno.h>
#include <unistd.h>
#include "usbip_common.h"
#include "usbip_host_driver.h"
#undef PROGNAME
#define PROGNAME "libusbip"
struct usbip_host_driver *host_driver;
#define SYSFS_OPEN_RETRIES 100
/* only the first interface value is true! */
static int32_t read_attr_usbip_status(struct usbip_usb_device *udev)
{
char attrpath[SYSFS_PATH_MAX];
struct sysfs_attribute *attr;
int value = 0;
int rc;
struct stat s;
int retries = SYSFS_OPEN_RETRIES;
/* This access is racy!
*
* Just after detach, our driver removes the sysfs
* files and recreates them.
*
* We may try and fail to open the usbip_status of
* an exported device in the (short) window where
* it has been removed and not yet recreated.
*
* This is a bug in the interface. Nothing we can do
* except work around it here by polling for the sysfs
* usbip_status to reappear.
*/
snprintf(attrpath, SYSFS_PATH_MAX, "%s/%s:%d.%d/usbip_status",
udev->path, udev->busid, udev->bConfigurationValue, 0);
while (retries > 0) {
if (stat(attrpath, &s) == 0)
break;
if (errno != ENOENT) {
dbg("stat failed: %s", attrpath);
return -1;
}
usleep(10000); /* 10ms */
retries--;
}
if (retries == 0)
dbg("usbip_status not ready after %d retries",
SYSFS_OPEN_RETRIES);
else if (retries < SYSFS_OPEN_RETRIES)
dbg("warning: usbip_status ready after %d retries",
SYSFS_OPEN_RETRIES - retries);
attr = sysfs_open_attribute(attrpath);
if (!attr) {
dbg("sysfs_open_attribute failed: %s", attrpath);
return -1;
}
rc = sysfs_read_attribute(attr);
if (rc) {
dbg("sysfs_read_attribute failed: %s", attrpath);
sysfs_close_attribute(attr);
return -1;
}
value = atoi(attr->value);
sysfs_close_attribute(attr);
return value;
}
static struct usbip_exported_device *usbip_exported_device_new(char *sdevpath)
{
struct usbip_exported_device *edev = NULL;
size_t size;
int i;
edev = calloc(1, sizeof(*edev));
if (!edev) {
dbg("calloc failed");
return NULL;
}
edev->sudev = sysfs_open_device_path(sdevpath);
if (!edev->sudev) {
dbg("sysfs_open_device_path failed: %s", sdevpath);
goto err;
}
read_usb_device(edev->sudev, &edev->udev);
edev->status = read_attr_usbip_status(&edev->udev);
if (edev->status < 0)
goto err;
/* reallocate buffer to include usb interface data */
size = sizeof(*edev) + edev->udev.bNumInterfaces *
sizeof(struct usbip_usb_interface);
edev = realloc(edev, size);
if (!edev) {
dbg("realloc failed");
goto err;
}
for (i = 0; i < edev->udev.bNumInterfaces; i++)
read_usb_interface(&edev->udev, i, &edev->uinf[i]);
return edev;
err:
if (edev && edev->sudev)
sysfs_close_device(edev->sudev);
if (edev)
free(edev);
return NULL;
}
static int check_new(struct dlist *dlist, struct sysfs_device *target)
{
struct sysfs_device *dev;
dlist_for_each_data(dlist, dev, struct sysfs_device) {
if (!strncmp(dev->bus_id, target->bus_id, SYSFS_BUS_ID_SIZE))
/* device found and is not new */
return 0;
}
return 1;
}
static void delete_nothing(void *unused_data)
{
/*
* NOTE: Do not delete anything, but the container will be deleted.
*/
(void) unused_data;
}
static int refresh_exported_devices(void)
{
/* sysfs_device of usb_interface */
struct sysfs_device *suintf;
struct dlist *suintf_list;
/* sysfs_device of usb_device */
struct sysfs_device *sudev;
struct dlist *sudev_list;
struct usbip_exported_device *edev;
sudev_list = dlist_new_with_delete(sizeof(struct sysfs_device),
delete_nothing);
suintf_list = sysfs_get_driver_devices(host_driver->sysfs_driver);
if (!suintf_list) {
/*
* Not an error condition. There are simply no devices bound to
* the driver yet.
*/
dbg("bind " USBIP_HOST_DRV_NAME ".ko to a usb device to be "
"exportable!");
return 0;
}
/* collect unique USB devices (not interfaces) */
dlist_for_each_data(suintf_list, suintf, struct sysfs_device) {
/* get usb device of this usb interface */
sudev = sysfs_get_device_parent(suintf);
if (!sudev) {
dbg("sysfs_get_device_parent failed: %s", suintf->name);
continue;
}
if (check_new(sudev_list, sudev)) {
/* insert item at head of list */
dlist_unshift(sudev_list, sudev);
}
}
dlist_for_each_data(sudev_list, sudev, struct sysfs_device) {
edev = usbip_exported_device_new(sudev->path);
if (!edev) {
dbg("usbip_exported_device_new failed");
continue;
}
dlist_unshift(host_driver->edev_list, edev);
host_driver->ndevs++;
}
dlist_destroy(sudev_list);
return 0;
}
static struct sysfs_driver *open_sysfs_host_driver(void)
{
char bus_type[] = "usb";
char sysfs_mntpath[SYSFS_PATH_MAX];
char host_drv_path[SYSFS_PATH_MAX];
struct sysfs_driver *host_drv;
int rc;
rc = sysfs_get_mnt_path(sysfs_mntpath, SYSFS_PATH_MAX);
if (rc < 0) {
dbg("sysfs_get_mnt_path failed");
return NULL;
}
snprintf(host_drv_path, SYSFS_PATH_MAX, "%s/%s/%s/%s/%s",
sysfs_mntpath, SYSFS_BUS_NAME, bus_type, SYSFS_DRIVERS_NAME,
USBIP_HOST_DRV_NAME);
host_drv = sysfs_open_driver_path(host_drv_path);
if (!host_drv) {
dbg("sysfs_open_driver_path failed");
return NULL;
}
return host_drv;
}
static void usbip_exported_device_delete(void *dev)
{
struct usbip_exported_device *edev = dev;
sysfs_close_device(edev->sudev);
free(dev);
}
int usbip_host_driver_open(void)
{
int rc;
host_driver = calloc(1, sizeof(*host_driver));
if (!host_driver) {
dbg("calloc failed");
return -1;
}
host_driver->ndevs = 0;
host_driver->edev_list =
dlist_new_with_delete(sizeof(struct usbip_exported_device),
usbip_exported_device_delete);
if (!host_driver->edev_list) {
dbg("dlist_new_with_delete failed");
goto err_free_host_driver;
}
host_driver->sysfs_driver = open_sysfs_host_driver();
if (!host_driver->sysfs_driver)
goto err_destroy_edev_list;
rc = refresh_exported_devices();
if (rc < 0)
goto err_close_sysfs_driver;
return 0;
err_close_sysfs_driver:
sysfs_close_driver(host_driver->sysfs_driver);
err_destroy_edev_list:
dlist_destroy(host_driver->edev_list);
err_free_host_driver:
free(host_driver);
host_driver = NULL;
return -1;
}
void usbip_host_driver_close(void)
{
if (!host_driver)
return;
if (host_driver->edev_list)
dlist_destroy(host_driver->edev_list);
if (host_driver->sysfs_driver)
sysfs_close_driver(host_driver->sysfs_driver);
free(host_driver);
host_driver = NULL;
}
int usbip_host_refresh_device_list(void)
{
int rc;
if (host_driver->edev_list)
dlist_destroy(host_driver->edev_list);
host_driver->ndevs = 0;
host_driver->edev_list =
dlist_new_with_delete(sizeof(struct usbip_exported_device),
usbip_exported_device_delete);
if (!host_driver->edev_list) {
dbg("dlist_new_with_delete failed");
return -1;
}
rc = refresh_exported_devices();
if (rc < 0)
return -1;
return 0;
}
int usbip_host_export_device(struct usbip_exported_device *edev, int sockfd)
{
char attr_name[] = "usbip_sockfd";
char attr_path[SYSFS_PATH_MAX];
struct sysfs_attribute *attr;
char sockfd_buff[30];
int ret;
if (edev->status != SDEV_ST_AVAILABLE) {
dbg("device not available: %s", edev->udev.busid);
switch (edev->status) {
case SDEV_ST_ERROR:
dbg("status SDEV_ST_ERROR");
break;
case SDEV_ST_USED:
dbg("status SDEV_ST_USED");
break;
default:
dbg("status unknown: 0x%x", edev->status);
}
return -1;
}
/* only the first interface is true */
snprintf(attr_path, sizeof(attr_path), "%s/%s:%d.%d/%s",
edev->udev.path, edev->udev.busid,
edev->udev.bConfigurationValue, 0, attr_name);
attr = sysfs_open_attribute(attr_path);
if (!attr) {
dbg("sysfs_open_attribute failed: %s", attr_path);
return -1;
}
snprintf(sockfd_buff, sizeof(sockfd_buff), "%d\n", sockfd);
dbg("write: %s", sockfd_buff);
ret = sysfs_write_attribute(attr, sockfd_buff, strlen(sockfd_buff));
if (ret < 0) {
dbg("sysfs_write_attribute failed: sockfd %s to %s",
sockfd_buff, attr_path);
goto err_write_sockfd;
}
dbg("connect: %s", edev->udev.busid);
err_write_sockfd:
sysfs_close_attribute(attr);
return ret;
}
struct usbip_exported_device *usbip_host_get_device(int num)
{
struct usbip_exported_device *edev;
struct dlist *dlist = host_driver->edev_list;
int cnt = 0;
dlist_for_each_data(dlist, edev, struct usbip_exported_device) {
if (num == cnt)
return edev;
else
cnt++;
}
return NULL;
}
| gpl-2.0 |
bergwolf/redpatch | arch/powerpc/platforms/ps3/time.c | 9395 | 2507 | /*
* PS3 time and rtc routines.
*
* Copyright (C) 2006 Sony Computer Entertainment Inc.
* Copyright 2006 Sony Corp.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/kernel.h>
#include <linux/platform_device.h>
#include <asm/firmware.h>
#include <asm/rtc.h>
#include <asm/lv1call.h>
#include <asm/ps3.h>
#include "platform.h"
#define dump_tm(_a) _dump_tm(_a, __func__, __LINE__)
static void _dump_tm(const struct rtc_time *tm, const char* func, int line)
{
pr_debug("%s:%d tm_sec %d\n", func, line, tm->tm_sec);
pr_debug("%s:%d tm_min %d\n", func, line, tm->tm_min);
pr_debug("%s:%d tm_hour %d\n", func, line, tm->tm_hour);
pr_debug("%s:%d tm_mday %d\n", func, line, tm->tm_mday);
pr_debug("%s:%d tm_mon %d\n", func, line, tm->tm_mon);
pr_debug("%s:%d tm_year %d\n", func, line, tm->tm_year);
pr_debug("%s:%d tm_wday %d\n", func, line, tm->tm_wday);
}
#define dump_time(_a) _dump_time(_a, __func__, __LINE__)
static void __maybe_unused _dump_time(int time, const char *func,
int line)
{
struct rtc_time tm;
to_tm(time, &tm);
pr_debug("%s:%d time %d\n", func, line, time);
_dump_tm(&tm, func, line);
}
void __init ps3_calibrate_decr(void)
{
int result;
u64 tmp;
result = ps3_repository_read_be_tb_freq(0, &tmp);
BUG_ON(result);
ppc_tb_freq = tmp;
ppc_proc_freq = ppc_tb_freq * 40;
}
static u64 read_rtc(void)
{
int result;
u64 rtc_val;
u64 tb_val;
result = lv1_get_rtc(&rtc_val, &tb_val);
BUG_ON(result);
return rtc_val;
}
unsigned long __init ps3_get_boot_time(void)
{
return read_rtc() + ps3_os_area_get_rtc_diff();
}
static int __init ps3_rtc_init(void)
{
struct platform_device *pdev;
if (!firmware_has_feature(FW_FEATURE_PS3_LV1))
return -ENODEV;
pdev = platform_device_register_simple("rtc-ps3", -1, NULL, 0);
if (IS_ERR(pdev))
return PTR_ERR(pdev);
return 0;
}
module_init(ps3_rtc_init);
| gpl-2.0 |
aznrice/android_kernel_samsung_afyonltetmo | drivers/uwb/uwbd.c | 9651 | 10482 | /*
* Ultra Wide Band
* Neighborhood Management Daemon
*
* Copyright (C) 2005-2006 Intel Corporation
* Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version
* 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*
*
* This daemon takes care of maintaing information that describes the
* UWB neighborhood that the radios in this machine can see. It also
* keeps a tab of which devices are visible, makes sure each HC sits
* on a different channel to avoid interfering, etc.
*
* Different drivers (radio controller, device, any API in general)
* communicate with this daemon through an event queue. Daemon wakes
* up, takes a list of events and handles them one by one; handling
* function is extracted from a table based on the event's type and
* subtype. Events are freed only if the handling function says so.
*
* . Lock protecting the event list has to be an spinlock and locked
* with IRQSAVE because it might be called from an interrupt
* context (ie: when events arrive and the notification drops
* down from the ISR).
*
* . UWB radio controller drivers queue events to the daemon using
* uwbd_event_queue(). They just get the event, chew it to make it
* look like UWBD likes it and pass it in a buffer allocated with
* uwb_event_alloc().
*
* EVENTS
*
* Events have a type, a subtype, a length, some other stuff and the
* data blob, which depends on the event. The header is 'struct
* uwb_event'; for payloads, see 'struct uwbd_evt_*'.
*
* EVENT HANDLER TABLES
*
* To find a handling function for an event, the type is used to index
* a subtype-table in the type-table. The subtype-table is indexed
* with the subtype to get the function that handles the event. Start
* with the main type-table 'uwbd_evt_type_handler'.
*
* DEVICES
*
* Devices are created when a bunch of beacons have been received and
* it is stablished that the device has stable radio presence. CREATED
* only, not configured. Devices are ONLY configured when an
* Application-Specific IE Probe is receieved, in which the device
* declares which Protocol ID it groks. Then the device is CONFIGURED
* (and the driver->probe() stuff of the device model is invoked).
*
* Devices are considered disconnected when a certain number of
* beacons are not received in an amount of time.
*
* Handler functions are called normally uwbd_evt_handle_*().
*/
#include <linux/kthread.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/freezer.h>
#include "uwb-internal.h"
/*
* UWBD Event handler function signature
*
* Return !0 if the event needs not to be freed (ie the handler
* takes/took care of it). 0 means the daemon code will free the
* event.
*
* @evt->rc is already referenced and guaranteed to exist. See
* uwb_evt_handle().
*/
typedef int (*uwbd_evt_handler_f)(struct uwb_event *);
/**
* Properties of a UWBD event
*
* @handler: the function that will handle this event
* @name: text name of event
*/
struct uwbd_event {
uwbd_evt_handler_f handler;
const char *name;
};
/* Table of handlers for and properties of the UWBD Radio Control Events */
static struct uwbd_event uwbd_urc_events[] = {
[UWB_RC_EVT_IE_RCV] = {
.handler = uwbd_evt_handle_rc_ie_rcv,
.name = "IE_RECEIVED"
},
[UWB_RC_EVT_BEACON] = {
.handler = uwbd_evt_handle_rc_beacon,
.name = "BEACON_RECEIVED"
},
[UWB_RC_EVT_BEACON_SIZE] = {
.handler = uwbd_evt_handle_rc_beacon_size,
.name = "BEACON_SIZE_CHANGE"
},
[UWB_RC_EVT_BPOIE_CHANGE] = {
.handler = uwbd_evt_handle_rc_bpoie_change,
.name = "BPOIE_CHANGE"
},
[UWB_RC_EVT_BP_SLOT_CHANGE] = {
.handler = uwbd_evt_handle_rc_bp_slot_change,
.name = "BP_SLOT_CHANGE"
},
[UWB_RC_EVT_DRP_AVAIL] = {
.handler = uwbd_evt_handle_rc_drp_avail,
.name = "DRP_AVAILABILITY_CHANGE"
},
[UWB_RC_EVT_DRP] = {
.handler = uwbd_evt_handle_rc_drp,
.name = "DRP"
},
[UWB_RC_EVT_DEV_ADDR_CONFLICT] = {
.handler = uwbd_evt_handle_rc_dev_addr_conflict,
.name = "DEV_ADDR_CONFLICT",
},
};
struct uwbd_evt_type_handler {
const char *name;
struct uwbd_event *uwbd_events;
size_t size;
};
/* Table of handlers for each UWBD Event type. */
static struct uwbd_evt_type_handler uwbd_urc_evt_type_handlers[] = {
[UWB_RC_CET_GENERAL] = {
.name = "URC",
.uwbd_events = uwbd_urc_events,
.size = ARRAY_SIZE(uwbd_urc_events),
},
};
static const struct uwbd_event uwbd_message_handlers[] = {
[UWB_EVT_MSG_RESET] = {
.handler = uwbd_msg_handle_reset,
.name = "reset",
},
};
/*
* Handle an URC event passed to the UWB Daemon
*
* @evt: the event to handle
* @returns: 0 if the event can be kfreed, !0 on the contrary
* (somebody else took ownership) [coincidentally, returning
* a <0 errno code will free it :)].
*
* Looks up the two indirection tables (one for the type, one for the
* subtype) to decide which function handles it and then calls the
* handler.
*
* The event structure passed to the event handler has the radio
* controller in @evt->rc referenced. The reference will be dropped
* once the handler returns, so if it needs it for longer (async),
* it'll need to take another one.
*/
static
int uwbd_event_handle_urc(struct uwb_event *evt)
{
int result = -EINVAL;
struct uwbd_evt_type_handler *type_table;
uwbd_evt_handler_f handler;
u8 type, context;
u16 event;
type = evt->notif.rceb->bEventType;
event = le16_to_cpu(evt->notif.rceb->wEvent);
context = evt->notif.rceb->bEventContext;
if (type >= ARRAY_SIZE(uwbd_urc_evt_type_handlers))
goto out;
type_table = &uwbd_urc_evt_type_handlers[type];
if (type_table->uwbd_events == NULL)
goto out;
if (event >= type_table->size)
goto out;
handler = type_table->uwbd_events[event].handler;
if (handler == NULL)
goto out;
result = (*handler)(evt);
out:
if (result < 0)
dev_err(&evt->rc->uwb_dev.dev,
"UWBD: event 0x%02x/%04x/%02x, handling failed: %d\n",
type, event, context, result);
return result;
}
static void uwbd_event_handle_message(struct uwb_event *evt)
{
struct uwb_rc *rc;
int result;
rc = evt->rc;
if (evt->message < 0 || evt->message >= ARRAY_SIZE(uwbd_message_handlers)) {
dev_err(&rc->uwb_dev.dev, "UWBD: invalid message type %d\n", evt->message);
return;
}
result = uwbd_message_handlers[evt->message].handler(evt);
if (result < 0)
dev_err(&rc->uwb_dev.dev, "UWBD: '%s' message failed: %d\n",
uwbd_message_handlers[evt->message].name, result);
}
static void uwbd_event_handle(struct uwb_event *evt)
{
struct uwb_rc *rc;
int should_keep;
rc = evt->rc;
if (rc->ready) {
switch (evt->type) {
case UWB_EVT_TYPE_NOTIF:
should_keep = uwbd_event_handle_urc(evt);
if (should_keep <= 0)
kfree(evt->notif.rceb);
break;
case UWB_EVT_TYPE_MSG:
uwbd_event_handle_message(evt);
break;
default:
dev_err(&rc->uwb_dev.dev, "UWBD: invalid event type %d\n", evt->type);
break;
}
}
__uwb_rc_put(rc); /* for the __uwb_rc_get() in uwb_rc_notif_cb() */
}
/**
* UWB Daemon
*
* Listens to all UWB notifications and takes care to track the state
* of the UWB neighbourhood for the kernel. When we do a run, we
* spinlock, move the list to a private copy and release the
* lock. Hold it as little as possible. Not a conflict: it is
* guaranteed we own the events in the private list.
*
* FIXME: should change so we don't have a 1HZ timer all the time, but
* only if there are devices.
*/
static int uwbd(void *param)
{
struct uwb_rc *rc = param;
unsigned long flags;
struct uwb_event *evt;
int should_stop = 0;
while (1) {
wait_event_interruptible_timeout(
rc->uwbd.wq,
!list_empty(&rc->uwbd.event_list)
|| (should_stop = kthread_should_stop()),
HZ);
if (should_stop)
break;
try_to_freeze();
spin_lock_irqsave(&rc->uwbd.event_list_lock, flags);
if (!list_empty(&rc->uwbd.event_list)) {
evt = list_first_entry(&rc->uwbd.event_list, struct uwb_event, list_node);
list_del(&evt->list_node);
} else
evt = NULL;
spin_unlock_irqrestore(&rc->uwbd.event_list_lock, flags);
if (evt) {
uwbd_event_handle(evt);
kfree(evt);
}
uwb_beca_purge(rc); /* Purge devices that left */
}
return 0;
}
/** Start the UWB daemon */
void uwbd_start(struct uwb_rc *rc)
{
rc->uwbd.task = kthread_run(uwbd, rc, "uwbd");
if (rc->uwbd.task == NULL)
printk(KERN_ERR "UWB: Cannot start management daemon; "
"UWB won't work\n");
else
rc->uwbd.pid = rc->uwbd.task->pid;
}
/* Stop the UWB daemon and free any unprocessed events */
void uwbd_stop(struct uwb_rc *rc)
{
kthread_stop(rc->uwbd.task);
uwbd_flush(rc);
}
/*
* Queue an event for the management daemon
*
* When some lower layer receives an event, it uses this function to
* push it forward to the UWB daemon.
*
* Once you pass the event, you don't own it any more, but the daemon
* does. It will uwb_event_free() it when done, so make sure you
* uwb_event_alloc()ed it or bad things will happen.
*
* If the daemon is not running, we just free the event.
*/
void uwbd_event_queue(struct uwb_event *evt)
{
struct uwb_rc *rc = evt->rc;
unsigned long flags;
spin_lock_irqsave(&rc->uwbd.event_list_lock, flags);
if (rc->uwbd.pid != 0) {
list_add(&evt->list_node, &rc->uwbd.event_list);
wake_up_all(&rc->uwbd.wq);
} else {
__uwb_rc_put(evt->rc);
if (evt->type == UWB_EVT_TYPE_NOTIF)
kfree(evt->notif.rceb);
kfree(evt);
}
spin_unlock_irqrestore(&rc->uwbd.event_list_lock, flags);
return;
}
void uwbd_flush(struct uwb_rc *rc)
{
struct uwb_event *evt, *nxt;
spin_lock_irq(&rc->uwbd.event_list_lock);
list_for_each_entry_safe(evt, nxt, &rc->uwbd.event_list, list_node) {
if (evt->rc == rc) {
__uwb_rc_put(rc);
list_del(&evt->list_node);
if (evt->type == UWB_EVT_TYPE_NOTIF)
kfree(evt->notif.rceb);
kfree(evt);
}
}
spin_unlock_irq(&rc->uwbd.event_list_lock);
}
| gpl-2.0 |
systemdaemon/systemd | src/linux/arch/unicore32/kernel/stacktrace.c | 11955 | 3350 | /*
* linux/arch/unicore32/kernel/stacktrace.c
*
* Code specific to PKUnity SoC and UniCore ISA
*
* Copyright (C) 2001-2010 GUAN Xue-tao
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/stacktrace.h>
#include <asm/stacktrace.h>
#if defined(CONFIG_FRAME_POINTER)
/*
* Unwind the current stack frame and store the new register values in the
* structure passed as argument. Unwinding is equivalent to a function return,
* hence the new PC value rather than LR should be used for backtrace.
*
* With framepointer enabled, a simple function prologue looks like this:
* mov ip, sp
* stmdb sp!, {fp, ip, lr, pc}
* sub fp, ip, #4
*
* A simple function epilogue looks like this:
* ldm sp, {fp, sp, pc}
*
* Note that with framepointer enabled, even the leaf functions have the same
* prologue and epilogue, therefore we can ignore the LR value in this case.
*/
int notrace unwind_frame(struct stackframe *frame)
{
unsigned long high, low;
unsigned long fp = frame->fp;
/* only go to a higher address on the stack */
low = frame->sp;
high = ALIGN(low, THREAD_SIZE);
/* check current frame pointer is within bounds */
if (fp < (low + 12) || fp + 4 >= high)
return -EINVAL;
/* restore the registers from the stack frame */
frame->fp = *(unsigned long *)(fp - 12);
frame->sp = *(unsigned long *)(fp - 8);
frame->pc = *(unsigned long *)(fp - 4);
return 0;
}
#endif
void notrace walk_stackframe(struct stackframe *frame,
int (*fn)(struct stackframe *, void *), void *data)
{
while (1) {
int ret;
if (fn(frame, data))
break;
ret = unwind_frame(frame);
if (ret < 0)
break;
}
}
EXPORT_SYMBOL(walk_stackframe);
#ifdef CONFIG_STACKTRACE
struct stack_trace_data {
struct stack_trace *trace;
unsigned int no_sched_functions;
unsigned int skip;
};
static int save_trace(struct stackframe *frame, void *d)
{
struct stack_trace_data *data = d;
struct stack_trace *trace = data->trace;
unsigned long addr = frame->pc;
if (data->no_sched_functions && in_sched_functions(addr))
return 0;
if (data->skip) {
data->skip--;
return 0;
}
trace->entries[trace->nr_entries++] = addr;
return trace->nr_entries >= trace->max_entries;
}
void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
{
struct stack_trace_data data;
struct stackframe frame;
data.trace = trace;
data.skip = trace->skip;
if (tsk != current) {
data.no_sched_functions = 1;
frame.fp = thread_saved_fp(tsk);
frame.sp = thread_saved_sp(tsk);
frame.lr = 0; /* recovered from the stack */
frame.pc = thread_saved_pc(tsk);
} else {
register unsigned long current_sp asm("sp");
data.no_sched_functions = 0;
frame.fp = (unsigned long)__builtin_frame_address(0);
frame.sp = current_sp;
frame.lr = (unsigned long)__builtin_return_address(0);
frame.pc = (unsigned long)save_stack_trace_tsk;
}
walk_stackframe(&frame, save_trace, &data);
if (trace->nr_entries < trace->max_entries)
trace->entries[trace->nr_entries++] = ULONG_MAX;
}
void save_stack_trace(struct stack_trace *trace)
{
save_stack_trace_tsk(current, trace);
}
EXPORT_SYMBOL_GPL(save_stack_trace);
#endif
| gpl-2.0 |
0xD34D/kernel_omap | fs/dlm/midcomms.c | 14771 | 3813 | /******************************************************************************
*******************************************************************************
**
** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
** Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
**
** This copyrighted material is made available to anyone wishing to use,
** modify, copy, or redistribute it subject to the terms and conditions
** of the GNU General Public License v.2.
**
*******************************************************************************
******************************************************************************/
/*
* midcomms.c
*
* This is the appallingly named "mid-level" comms layer.
*
* Its purpose is to take packets from the "real" comms layer,
* split them up into packets and pass them to the interested
* part of the locking mechanism.
*
* It also takes messages from the locking layer, formats them
* into packets and sends them to the comms layer.
*/
#include "dlm_internal.h"
#include "lowcomms.h"
#include "config.h"
#include "lock.h"
#include "midcomms.h"
static void copy_from_cb(void *dst, const void *base, unsigned offset,
unsigned len, unsigned limit)
{
unsigned copy = len;
if ((copy + offset) > limit)
copy = limit - offset;
memcpy(dst, base + offset, copy);
len -= copy;
if (len)
memcpy(dst + copy, base, len);
}
/*
* Called from the low-level comms layer to process a buffer of
* commands.
*
* Only complete messages are processed here, any "spare" bytes from
* the end of a buffer are saved and tacked onto the front of the next
* message that comes in. I doubt this will happen very often but we
* need to be able to cope with it and I don't want the task to be waiting
* for packets to come in when there is useful work to be done.
*/
int dlm_process_incoming_buffer(int nodeid, const void *base,
unsigned offset, unsigned len, unsigned limit)
{
union {
unsigned char __buf[DLM_INBUF_LEN];
/* this is to force proper alignment on some arches */
union dlm_packet p;
} __tmp;
union dlm_packet *p = &__tmp.p;
int ret = 0;
int err = 0;
uint16_t msglen;
uint32_t lockspace;
while (len > sizeof(struct dlm_header)) {
/* Copy just the header to check the total length. The
message may wrap around the end of the buffer back to the
start, so we need to use a temp buffer and copy_from_cb. */
copy_from_cb(p, base, offset, sizeof(struct dlm_header),
limit);
msglen = le16_to_cpu(p->header.h_length);
lockspace = p->header.h_lockspace;
err = -EINVAL;
if (msglen < sizeof(struct dlm_header))
break;
if (p->header.h_cmd == DLM_MSG) {
if (msglen < sizeof(struct dlm_message))
break;
} else {
if (msglen < sizeof(struct dlm_rcom))
break;
}
err = -E2BIG;
if (msglen > dlm_config.ci_buffer_size) {
log_print("message size %d from %d too big, buf len %d",
msglen, nodeid, len);
break;
}
err = 0;
/* If only part of the full message is contained in this
buffer, then do nothing and wait for lowcomms to call
us again later with more data. We return 0 meaning
we've consumed none of the input buffer. */
if (msglen > len)
break;
/* Allocate a larger temp buffer if the full message won't fit
in the buffer on the stack (which should work for most
ordinary messages). */
if (msglen > sizeof(__tmp) && p == &__tmp.p) {
p = kmalloc(dlm_config.ci_buffer_size, GFP_NOFS);
if (p == NULL)
return ret;
}
copy_from_cb(p, base, offset, msglen, limit);
BUG_ON(lockspace != p->header.h_lockspace);
ret += msglen;
offset += msglen;
offset &= (limit - 1);
len -= msglen;
dlm_receive_buffer(p, nodeid);
}
if (p != &__tmp.p)
kfree(p);
return err ? err : ret;
}
| gpl-2.0 |
unusual-thoughts/linux-xps13 | drivers/media/platform/s5p-tv/hdmi_drv.c | 180 | 28448 | /*
* Samsung HDMI interface driver
*
* Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
*
* Tomasz Stanislawski, <t.stanislaws@samsung.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published
* by the Free Software Foundiation. either version 2 of the License,
* or (at your option) any later version
*/
#define pr_fmt(fmt) "s5p-tv (hdmi_drv): " fmt
#ifdef CONFIG_VIDEO_SAMSUNG_S5P_HDMI_DEBUG
#define DEBUG
#endif
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/io.h>
#include <linux/i2c.h>
#include <linux/platform_device.h>
#include <media/v4l2-subdev.h>
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/delay.h>
#include <linux/bug.h>
#include <linux/pm_runtime.h>
#include <linux/clk.h>
#include <linux/regulator/consumer.h>
#include <linux/v4l2-dv-timings.h>
#include <linux/platform_data/media/s5p_hdmi.h>
#include <media/v4l2-common.h>
#include <media/v4l2-dev.h>
#include <media/v4l2-device.h>
#include <media/v4l2-dv-timings.h>
#include "regs-hdmi.h"
MODULE_AUTHOR("Tomasz Stanislawski, <t.stanislaws@samsung.com>");
MODULE_DESCRIPTION("Samsung HDMI");
MODULE_LICENSE("GPL");
struct hdmi_pulse {
u32 beg;
u32 end;
};
struct hdmi_timings {
struct hdmi_pulse hact;
u32 hsyn_pol; /* 0 - high, 1 - low */
struct hdmi_pulse hsyn;
u32 interlaced;
struct hdmi_pulse vact[2];
u32 vsyn_pol; /* 0 - high, 1 - low */
u32 vsyn_off;
struct hdmi_pulse vsyn[2];
};
struct hdmi_resources {
struct clk *hdmi;
struct clk *sclk_hdmi;
struct clk *sclk_pixel;
struct clk *sclk_hdmiphy;
struct clk *hdmiphy;
struct regulator_bulk_data *regul_bulk;
int regul_count;
};
struct hdmi_device {
/** base address of HDMI registers */
void __iomem *regs;
/** HDMI interrupt */
unsigned int irq;
/** pointer to device parent */
struct device *dev;
/** subdev generated by HDMI device */
struct v4l2_subdev sd;
/** V4L2 device structure */
struct v4l2_device v4l2_dev;
/** subdev of HDMIPHY interface */
struct v4l2_subdev *phy_sd;
/** subdev of MHL interface */
struct v4l2_subdev *mhl_sd;
/** configuration of current graphic mode */
const struct hdmi_timings *cur_conf;
/** flag indicating that timings are dirty */
int cur_conf_dirty;
/** current timings */
struct v4l2_dv_timings cur_timings;
/** other resources */
struct hdmi_resources res;
};
static const struct platform_device_id hdmi_driver_types[] = {
{
.name = "s5pv210-hdmi",
}, {
.name = "exynos4-hdmi",
}, {
/* end node */
}
};
static const struct v4l2_subdev_ops hdmi_sd_ops;
static struct hdmi_device *sd_to_hdmi_dev(struct v4l2_subdev *sd)
{
return container_of(sd, struct hdmi_device, sd);
}
static inline
void hdmi_write(struct hdmi_device *hdev, u32 reg_id, u32 value)
{
writel(value, hdev->regs + reg_id);
}
static inline
void hdmi_write_mask(struct hdmi_device *hdev, u32 reg_id, u32 value, u32 mask)
{
u32 old = readl(hdev->regs + reg_id);
value = (value & mask) | (old & ~mask);
writel(value, hdev->regs + reg_id);
}
static inline
void hdmi_writeb(struct hdmi_device *hdev, u32 reg_id, u8 value)
{
writeb(value, hdev->regs + reg_id);
}
static inline
void hdmi_writebn(struct hdmi_device *hdev, u32 reg_id, int n, u32 value)
{
switch (n) {
default:
writeb(value >> 24, hdev->regs + reg_id + 12);
case 3:
writeb(value >> 16, hdev->regs + reg_id + 8);
case 2:
writeb(value >> 8, hdev->regs + reg_id + 4);
case 1:
writeb(value >> 0, hdev->regs + reg_id + 0);
}
}
static inline u32 hdmi_read(struct hdmi_device *hdev, u32 reg_id)
{
return readl(hdev->regs + reg_id);
}
static irqreturn_t hdmi_irq_handler(int irq, void *dev_data)
{
struct hdmi_device *hdev = dev_data;
u32 intc_flag;
(void)irq;
intc_flag = hdmi_read(hdev, HDMI_INTC_FLAG);
/* clearing flags for HPD plug/unplug */
if (intc_flag & HDMI_INTC_FLAG_HPD_UNPLUG) {
pr_info("unplugged\n");
hdmi_write_mask(hdev, HDMI_INTC_FLAG, ~0,
HDMI_INTC_FLAG_HPD_UNPLUG);
}
if (intc_flag & HDMI_INTC_FLAG_HPD_PLUG) {
pr_info("plugged\n");
hdmi_write_mask(hdev, HDMI_INTC_FLAG, ~0,
HDMI_INTC_FLAG_HPD_PLUG);
}
return IRQ_HANDLED;
}
static void hdmi_reg_init(struct hdmi_device *hdev)
{
/* enable HPD interrupts */
hdmi_write_mask(hdev, HDMI_INTC_CON, ~0, HDMI_INTC_EN_GLOBAL |
HDMI_INTC_EN_HPD_PLUG | HDMI_INTC_EN_HPD_UNPLUG);
/* choose DVI mode */
hdmi_write_mask(hdev, HDMI_MODE_SEL,
HDMI_MODE_DVI_EN, HDMI_MODE_MASK);
hdmi_write_mask(hdev, HDMI_CON_2, ~0,
HDMI_DVI_PERAMBLE_EN | HDMI_DVI_BAND_EN);
/* disable bluescreen */
hdmi_write_mask(hdev, HDMI_CON_0, 0, HDMI_BLUE_SCR_EN);
/* choose bluescreen (fecal) color */
hdmi_writeb(hdev, HDMI_BLUE_SCREEN_0, 0x12);
hdmi_writeb(hdev, HDMI_BLUE_SCREEN_1, 0x34);
hdmi_writeb(hdev, HDMI_BLUE_SCREEN_2, 0x56);
}
static void hdmi_timing_apply(struct hdmi_device *hdev,
const struct hdmi_timings *t)
{
/* setting core registers */
hdmi_writebn(hdev, HDMI_H_BLANK_0, 2, t->hact.beg);
hdmi_writebn(hdev, HDMI_H_SYNC_GEN_0, 3,
(t->hsyn_pol << 20) | (t->hsyn.end << 10) | t->hsyn.beg);
hdmi_writeb(hdev, HDMI_VSYNC_POL, t->vsyn_pol);
hdmi_writebn(hdev, HDMI_V_BLANK_0, 3,
(t->vact[0].beg << 11) | t->vact[0].end);
hdmi_writebn(hdev, HDMI_V_SYNC_GEN_1_0, 3,
(t->vsyn[0].beg << 12) | t->vsyn[0].end);
if (t->interlaced) {
u32 vsyn_trans = t->hsyn.beg + t->vsyn_off;
hdmi_writeb(hdev, HDMI_INT_PRO_MODE, 1);
hdmi_writebn(hdev, HDMI_H_V_LINE_0, 3,
(t->hact.end << 12) | t->vact[1].end);
hdmi_writebn(hdev, HDMI_V_BLANK_F_0, 3,
(t->vact[1].end << 11) | t->vact[1].beg);
hdmi_writebn(hdev, HDMI_V_SYNC_GEN_2_0, 3,
(t->vsyn[1].beg << 12) | t->vsyn[1].end);
hdmi_writebn(hdev, HDMI_V_SYNC_GEN_3_0, 3,
(vsyn_trans << 12) | vsyn_trans);
} else {
hdmi_writeb(hdev, HDMI_INT_PRO_MODE, 0);
hdmi_writebn(hdev, HDMI_H_V_LINE_0, 3,
(t->hact.end << 12) | t->vact[0].end);
}
/* Timing generator registers */
hdmi_writebn(hdev, HDMI_TG_H_FSZ_L, 2, t->hact.end);
hdmi_writebn(hdev, HDMI_TG_HACT_ST_L, 2, t->hact.beg);
hdmi_writebn(hdev, HDMI_TG_HACT_SZ_L, 2, t->hact.end - t->hact.beg);
hdmi_writebn(hdev, HDMI_TG_VSYNC_L, 2, t->vsyn[0].beg);
hdmi_writebn(hdev, HDMI_TG_VACT_ST_L, 2, t->vact[0].beg);
hdmi_writebn(hdev, HDMI_TG_VACT_SZ_L, 2,
t->vact[0].end - t->vact[0].beg);
hdmi_writebn(hdev, HDMI_TG_VSYNC_TOP_HDMI_L, 2, t->vsyn[0].beg);
hdmi_writebn(hdev, HDMI_TG_FIELD_TOP_HDMI_L, 2, t->vsyn[0].beg);
if (t->interlaced) {
hdmi_write_mask(hdev, HDMI_TG_CMD, ~0, HDMI_TG_FIELD_EN);
hdmi_writebn(hdev, HDMI_TG_V_FSZ_L, 2, t->vact[1].end);
hdmi_writebn(hdev, HDMI_TG_VSYNC2_L, 2, t->vsyn[1].beg);
hdmi_writebn(hdev, HDMI_TG_FIELD_CHG_L, 2, t->vact[0].end);
hdmi_writebn(hdev, HDMI_TG_VACT_ST2_L, 2, t->vact[1].beg);
hdmi_writebn(hdev, HDMI_TG_VSYNC_BOT_HDMI_L, 2, t->vsyn[1].beg);
hdmi_writebn(hdev, HDMI_TG_FIELD_BOT_HDMI_L, 2, t->vsyn[1].beg);
} else {
hdmi_write_mask(hdev, HDMI_TG_CMD, 0, HDMI_TG_FIELD_EN);
hdmi_writebn(hdev, HDMI_TG_V_FSZ_L, 2, t->vact[0].end);
}
}
static int hdmi_conf_apply(struct hdmi_device *hdmi_dev)
{
struct device *dev = hdmi_dev->dev;
const struct hdmi_timings *conf = hdmi_dev->cur_conf;
int ret;
dev_dbg(dev, "%s\n", __func__);
/* skip if conf is already synchronized with HW */
if (!hdmi_dev->cur_conf_dirty)
return 0;
/* reset hdmiphy */
hdmi_write_mask(hdmi_dev, HDMI_PHY_RSTOUT, ~0, HDMI_PHY_SW_RSTOUT);
mdelay(10);
hdmi_write_mask(hdmi_dev, HDMI_PHY_RSTOUT, 0, HDMI_PHY_SW_RSTOUT);
mdelay(10);
/* configure timings */
ret = v4l2_subdev_call(hdmi_dev->phy_sd, video, s_dv_timings,
&hdmi_dev->cur_timings);
if (ret) {
dev_err(dev, "failed to set timings\n");
return ret;
}
/* resetting HDMI core */
hdmi_write_mask(hdmi_dev, HDMI_CORE_RSTOUT, 0, HDMI_CORE_SW_RSTOUT);
mdelay(10);
hdmi_write_mask(hdmi_dev, HDMI_CORE_RSTOUT, ~0, HDMI_CORE_SW_RSTOUT);
mdelay(10);
hdmi_reg_init(hdmi_dev);
/* setting core registers */
hdmi_timing_apply(hdmi_dev, conf);
hdmi_dev->cur_conf_dirty = 0;
return 0;
}
static void hdmi_dumpregs(struct hdmi_device *hdev, char *prefix)
{
#define DUMPREG(reg_id) \
dev_dbg(hdev->dev, "%s:" #reg_id " = %08x\n", prefix, \
readl(hdev->regs + reg_id))
dev_dbg(hdev->dev, "%s: ---- CONTROL REGISTERS ----\n", prefix);
DUMPREG(HDMI_INTC_FLAG);
DUMPREG(HDMI_INTC_CON);
DUMPREG(HDMI_HPD_STATUS);
DUMPREG(HDMI_PHY_RSTOUT);
DUMPREG(HDMI_PHY_VPLL);
DUMPREG(HDMI_PHY_CMU);
DUMPREG(HDMI_CORE_RSTOUT);
dev_dbg(hdev->dev, "%s: ---- CORE REGISTERS ----\n", prefix);
DUMPREG(HDMI_CON_0);
DUMPREG(HDMI_CON_1);
DUMPREG(HDMI_CON_2);
DUMPREG(HDMI_SYS_STATUS);
DUMPREG(HDMI_PHY_STATUS);
DUMPREG(HDMI_STATUS_EN);
DUMPREG(HDMI_HPD);
DUMPREG(HDMI_MODE_SEL);
DUMPREG(HDMI_HPD_GEN);
DUMPREG(HDMI_DC_CONTROL);
DUMPREG(HDMI_VIDEO_PATTERN_GEN);
dev_dbg(hdev->dev, "%s: ---- CORE SYNC REGISTERS ----\n", prefix);
DUMPREG(HDMI_H_BLANK_0);
DUMPREG(HDMI_H_BLANK_1);
DUMPREG(HDMI_V_BLANK_0);
DUMPREG(HDMI_V_BLANK_1);
DUMPREG(HDMI_V_BLANK_2);
DUMPREG(HDMI_H_V_LINE_0);
DUMPREG(HDMI_H_V_LINE_1);
DUMPREG(HDMI_H_V_LINE_2);
DUMPREG(HDMI_VSYNC_POL);
DUMPREG(HDMI_INT_PRO_MODE);
DUMPREG(HDMI_V_BLANK_F_0);
DUMPREG(HDMI_V_BLANK_F_1);
DUMPREG(HDMI_V_BLANK_F_2);
DUMPREG(HDMI_H_SYNC_GEN_0);
DUMPREG(HDMI_H_SYNC_GEN_1);
DUMPREG(HDMI_H_SYNC_GEN_2);
DUMPREG(HDMI_V_SYNC_GEN_1_0);
DUMPREG(HDMI_V_SYNC_GEN_1_1);
DUMPREG(HDMI_V_SYNC_GEN_1_2);
DUMPREG(HDMI_V_SYNC_GEN_2_0);
DUMPREG(HDMI_V_SYNC_GEN_2_1);
DUMPREG(HDMI_V_SYNC_GEN_2_2);
DUMPREG(HDMI_V_SYNC_GEN_3_0);
DUMPREG(HDMI_V_SYNC_GEN_3_1);
DUMPREG(HDMI_V_SYNC_GEN_3_2);
dev_dbg(hdev->dev, "%s: ---- TG REGISTERS ----\n", prefix);
DUMPREG(HDMI_TG_CMD);
DUMPREG(HDMI_TG_H_FSZ_L);
DUMPREG(HDMI_TG_H_FSZ_H);
DUMPREG(HDMI_TG_HACT_ST_L);
DUMPREG(HDMI_TG_HACT_ST_H);
DUMPREG(HDMI_TG_HACT_SZ_L);
DUMPREG(HDMI_TG_HACT_SZ_H);
DUMPREG(HDMI_TG_V_FSZ_L);
DUMPREG(HDMI_TG_V_FSZ_H);
DUMPREG(HDMI_TG_VSYNC_L);
DUMPREG(HDMI_TG_VSYNC_H);
DUMPREG(HDMI_TG_VSYNC2_L);
DUMPREG(HDMI_TG_VSYNC2_H);
DUMPREG(HDMI_TG_VACT_ST_L);
DUMPREG(HDMI_TG_VACT_ST_H);
DUMPREG(HDMI_TG_VACT_SZ_L);
DUMPREG(HDMI_TG_VACT_SZ_H);
DUMPREG(HDMI_TG_FIELD_CHG_L);
DUMPREG(HDMI_TG_FIELD_CHG_H);
DUMPREG(HDMI_TG_VACT_ST2_L);
DUMPREG(HDMI_TG_VACT_ST2_H);
DUMPREG(HDMI_TG_VSYNC_TOP_HDMI_L);
DUMPREG(HDMI_TG_VSYNC_TOP_HDMI_H);
DUMPREG(HDMI_TG_VSYNC_BOT_HDMI_L);
DUMPREG(HDMI_TG_VSYNC_BOT_HDMI_H);
DUMPREG(HDMI_TG_FIELD_TOP_HDMI_L);
DUMPREG(HDMI_TG_FIELD_TOP_HDMI_H);
DUMPREG(HDMI_TG_FIELD_BOT_HDMI_L);
DUMPREG(HDMI_TG_FIELD_BOT_HDMI_H);
#undef DUMPREG
}
static const struct hdmi_timings hdmi_timings_480p = {
.hact = { .beg = 138, .end = 858 },
.hsyn_pol = 1,
.hsyn = { .beg = 16, .end = 16 + 62 },
.interlaced = 0,
.vact[0] = { .beg = 42 + 3, .end = 522 + 3 },
.vsyn_pol = 1,
.vsyn[0] = { .beg = 6 + 3, .end = 12 + 3},
};
static const struct hdmi_timings hdmi_timings_576p50 = {
.hact = { .beg = 144, .end = 864 },
.hsyn_pol = 1,
.hsyn = { .beg = 12, .end = 12 + 64 },
.interlaced = 0,
.vact[0] = { .beg = 44 + 5, .end = 620 + 5 },
.vsyn_pol = 1,
.vsyn[0] = { .beg = 0 + 5, .end = 5 + 5},
};
static const struct hdmi_timings hdmi_timings_720p60 = {
.hact = { .beg = 370, .end = 1650 },
.hsyn_pol = 0,
.hsyn = { .beg = 110, .end = 110 + 40 },
.interlaced = 0,
.vact[0] = { .beg = 25 + 5, .end = 745 + 5 },
.vsyn_pol = 0,
.vsyn[0] = { .beg = 0 + 5, .end = 5 + 5},
};
static const struct hdmi_timings hdmi_timings_720p50 = {
.hact = { .beg = 700, .end = 1980 },
.hsyn_pol = 0,
.hsyn = { .beg = 440, .end = 440 + 40 },
.interlaced = 0,
.vact[0] = { .beg = 25 + 5, .end = 745 + 5 },
.vsyn_pol = 0,
.vsyn[0] = { .beg = 0 + 5, .end = 5 + 5},
};
static const struct hdmi_timings hdmi_timings_1080p24 = {
.hact = { .beg = 830, .end = 2750 },
.hsyn_pol = 0,
.hsyn = { .beg = 638, .end = 638 + 44 },
.interlaced = 0,
.vact[0] = { .beg = 41 + 4, .end = 1121 + 4 },
.vsyn_pol = 0,
.vsyn[0] = { .beg = 0 + 4, .end = 5 + 4},
};
static const struct hdmi_timings hdmi_timings_1080p60 = {
.hact = { .beg = 280, .end = 2200 },
.hsyn_pol = 0,
.hsyn = { .beg = 88, .end = 88 + 44 },
.interlaced = 0,
.vact[0] = { .beg = 41 + 4, .end = 1121 + 4 },
.vsyn_pol = 0,
.vsyn[0] = { .beg = 0 + 4, .end = 5 + 4},
};
static const struct hdmi_timings hdmi_timings_1080i60 = {
.hact = { .beg = 280, .end = 2200 },
.hsyn_pol = 0,
.hsyn = { .beg = 88, .end = 88 + 44 },
.interlaced = 1,
.vact[0] = { .beg = 20 + 2, .end = 560 + 2 },
.vact[1] = { .beg = 583 + 2, .end = 1123 + 2 },
.vsyn_pol = 0,
.vsyn_off = 1100,
.vsyn[0] = { .beg = 0 + 2, .end = 5 + 2},
.vsyn[1] = { .beg = 562 + 2, .end = 567 + 2},
};
static const struct hdmi_timings hdmi_timings_1080i50 = {
.hact = { .beg = 720, .end = 2640 },
.hsyn_pol = 0,
.hsyn = { .beg = 528, .end = 528 + 44 },
.interlaced = 1,
.vact[0] = { .beg = 20 + 2, .end = 560 + 2 },
.vact[1] = { .beg = 583 + 2, .end = 1123 + 2 },
.vsyn_pol = 0,
.vsyn_off = 1320,
.vsyn[0] = { .beg = 0 + 2, .end = 5 + 2},
.vsyn[1] = { .beg = 562 + 2, .end = 567 + 2},
};
static const struct hdmi_timings hdmi_timings_1080p50 = {
.hact = { .beg = 720, .end = 2640 },
.hsyn_pol = 0,
.hsyn = { .beg = 528, .end = 528 + 44 },
.interlaced = 0,
.vact[0] = { .beg = 41 + 4, .end = 1121 + 4 },
.vsyn_pol = 0,
.vsyn[0] = { .beg = 0 + 4, .end = 5 + 4},
};
/* default hdmi_timings index of the timings configured on probe */
#define HDMI_DEFAULT_TIMINGS_IDX (0)
static const struct {
bool reduced_fps;
const struct v4l2_dv_timings dv_timings;
const struct hdmi_timings *hdmi_timings;
} hdmi_timings[] = {
{ false, V4L2_DV_BT_CEA_720X480P59_94, &hdmi_timings_480p },
{ false, V4L2_DV_BT_CEA_720X576P50, &hdmi_timings_576p50 },
{ false, V4L2_DV_BT_CEA_1280X720P50, &hdmi_timings_720p50 },
{ true, V4L2_DV_BT_CEA_1280X720P60, &hdmi_timings_720p60 },
{ false, V4L2_DV_BT_CEA_1920X1080P24, &hdmi_timings_1080p24 },
{ false, V4L2_DV_BT_CEA_1920X1080P30, &hdmi_timings_1080p60 },
{ false, V4L2_DV_BT_CEA_1920X1080P50, &hdmi_timings_1080p50 },
{ false, V4L2_DV_BT_CEA_1920X1080I50, &hdmi_timings_1080i50 },
{ false, V4L2_DV_BT_CEA_1920X1080I60, &hdmi_timings_1080i60 },
{ false, V4L2_DV_BT_CEA_1920X1080P60, &hdmi_timings_1080p60 },
};
static int hdmi_streamon(struct hdmi_device *hdev)
{
struct device *dev = hdev->dev;
struct hdmi_resources *res = &hdev->res;
int ret, tries;
dev_dbg(dev, "%s\n", __func__);
ret = hdmi_conf_apply(hdev);
if (ret)
return ret;
ret = v4l2_subdev_call(hdev->phy_sd, video, s_stream, 1);
if (ret)
return ret;
/* waiting for HDMIPHY's PLL to get to steady state */
for (tries = 100; tries; --tries) {
u32 val = hdmi_read(hdev, HDMI_PHY_STATUS);
if (val & HDMI_PHY_STATUS_READY)
break;
mdelay(1);
}
/* steady state not achieved */
if (tries == 0) {
dev_err(dev, "hdmiphy's pll could not reach steady state.\n");
v4l2_subdev_call(hdev->phy_sd, video, s_stream, 0);
hdmi_dumpregs(hdev, "hdmiphy - s_stream");
return -EIO;
}
/* starting MHL */
ret = v4l2_subdev_call(hdev->mhl_sd, video, s_stream, 1);
if (hdev->mhl_sd && ret) {
v4l2_subdev_call(hdev->phy_sd, video, s_stream, 0);
hdmi_dumpregs(hdev, "mhl - s_stream");
return -EIO;
}
/* hdmiphy clock is used for HDMI in streaming mode */
clk_disable(res->sclk_hdmi);
clk_set_parent(res->sclk_hdmi, res->sclk_hdmiphy);
clk_enable(res->sclk_hdmi);
/* enable HDMI and timing generator */
hdmi_write_mask(hdev, HDMI_CON_0, ~0, HDMI_EN);
hdmi_write_mask(hdev, HDMI_TG_CMD, ~0, HDMI_TG_EN);
hdmi_dumpregs(hdev, "streamon");
return 0;
}
static int hdmi_streamoff(struct hdmi_device *hdev)
{
struct device *dev = hdev->dev;
struct hdmi_resources *res = &hdev->res;
dev_dbg(dev, "%s\n", __func__);
hdmi_write_mask(hdev, HDMI_CON_0, 0, HDMI_EN);
hdmi_write_mask(hdev, HDMI_TG_CMD, 0, HDMI_TG_EN);
/* pixel(vpll) clock is used for HDMI in config mode */
clk_disable(res->sclk_hdmi);
clk_set_parent(res->sclk_hdmi, res->sclk_pixel);
clk_enable(res->sclk_hdmi);
v4l2_subdev_call(hdev->mhl_sd, video, s_stream, 0);
v4l2_subdev_call(hdev->phy_sd, video, s_stream, 0);
hdmi_dumpregs(hdev, "streamoff");
return 0;
}
static int hdmi_s_stream(struct v4l2_subdev *sd, int enable)
{
struct hdmi_device *hdev = sd_to_hdmi_dev(sd);
struct device *dev = hdev->dev;
dev_dbg(dev, "%s(%d)\n", __func__, enable);
if (enable)
return hdmi_streamon(hdev);
return hdmi_streamoff(hdev);
}
static int hdmi_resource_poweron(struct hdmi_resources *res)
{
int ret;
/* turn HDMI power on */
ret = regulator_bulk_enable(res->regul_count, res->regul_bulk);
if (ret < 0)
return ret;
/* power-on hdmi physical interface */
clk_enable(res->hdmiphy);
/* use VPP as parent clock; HDMIPHY is not working yet */
clk_set_parent(res->sclk_hdmi, res->sclk_pixel);
/* turn clocks on */
clk_enable(res->sclk_hdmi);
return 0;
}
static void hdmi_resource_poweroff(struct hdmi_resources *res)
{
/* turn clocks off */
clk_disable(res->sclk_hdmi);
/* power-off hdmiphy */
clk_disable(res->hdmiphy);
/* turn HDMI power off */
regulator_bulk_disable(res->regul_count, res->regul_bulk);
}
static int hdmi_s_power(struct v4l2_subdev *sd, int on)
{
struct hdmi_device *hdev = sd_to_hdmi_dev(sd);
int ret;
if (on)
ret = pm_runtime_get_sync(hdev->dev);
else
ret = pm_runtime_put_sync(hdev->dev);
/* only values < 0 indicate errors */
return ret < 0 ? ret : 0;
}
static int hdmi_s_dv_timings(struct v4l2_subdev *sd,
struct v4l2_dv_timings *timings)
{
struct hdmi_device *hdev = sd_to_hdmi_dev(sd);
struct device *dev = hdev->dev;
int i;
for (i = 0; i < ARRAY_SIZE(hdmi_timings); i++)
if (v4l2_match_dv_timings(&hdmi_timings[i].dv_timings,
timings, 0, false))
break;
if (i == ARRAY_SIZE(hdmi_timings)) {
dev_err(dev, "timings not supported\n");
return -EINVAL;
}
hdev->cur_conf = hdmi_timings[i].hdmi_timings;
hdev->cur_conf_dirty = 1;
hdev->cur_timings = *timings;
if (!hdmi_timings[i].reduced_fps)
hdev->cur_timings.bt.flags &= ~V4L2_DV_FL_CAN_REDUCE_FPS;
return 0;
}
static int hdmi_g_dv_timings(struct v4l2_subdev *sd,
struct v4l2_dv_timings *timings)
{
*timings = sd_to_hdmi_dev(sd)->cur_timings;
return 0;
}
static int hdmi_get_fmt(struct v4l2_subdev *sd,
struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_format *format)
{
struct v4l2_mbus_framefmt *fmt = &format->format;
struct hdmi_device *hdev = sd_to_hdmi_dev(sd);
const struct hdmi_timings *t = hdev->cur_conf;
dev_dbg(hdev->dev, "%s\n", __func__);
if (!hdev->cur_conf)
return -EINVAL;
if (format->pad)
return -EINVAL;
memset(fmt, 0, sizeof(*fmt));
fmt->width = t->hact.end - t->hact.beg;
fmt->height = t->vact[0].end - t->vact[0].beg;
fmt->code = MEDIA_BUS_FMT_FIXED; /* means RGB888 */
fmt->colorspace = V4L2_COLORSPACE_SRGB;
if (t->interlaced) {
fmt->field = V4L2_FIELD_INTERLACED;
fmt->height *= 2;
} else {
fmt->field = V4L2_FIELD_NONE;
}
return 0;
}
static int hdmi_enum_dv_timings(struct v4l2_subdev *sd,
struct v4l2_enum_dv_timings *timings)
{
if (timings->pad != 0)
return -EINVAL;
if (timings->index >= ARRAY_SIZE(hdmi_timings))
return -EINVAL;
timings->timings = hdmi_timings[timings->index].dv_timings;
if (!hdmi_timings[timings->index].reduced_fps)
timings->timings.bt.flags &= ~V4L2_DV_FL_CAN_REDUCE_FPS;
return 0;
}
static int hdmi_dv_timings_cap(struct v4l2_subdev *sd,
struct v4l2_dv_timings_cap *cap)
{
struct hdmi_device *hdev = sd_to_hdmi_dev(sd);
if (cap->pad != 0)
return -EINVAL;
/* Let the phy fill in the pixelclock range */
v4l2_subdev_call(hdev->phy_sd, pad, dv_timings_cap, cap);
cap->type = V4L2_DV_BT_656_1120;
cap->bt.min_width = 720;
cap->bt.max_width = 1920;
cap->bt.min_height = 480;
cap->bt.max_height = 1080;
cap->bt.standards = V4L2_DV_BT_STD_CEA861;
cap->bt.capabilities = V4L2_DV_BT_CAP_INTERLACED |
V4L2_DV_BT_CAP_PROGRESSIVE;
return 0;
}
static const struct v4l2_subdev_core_ops hdmi_sd_core_ops = {
.s_power = hdmi_s_power,
};
static const struct v4l2_subdev_video_ops hdmi_sd_video_ops = {
.s_dv_timings = hdmi_s_dv_timings,
.g_dv_timings = hdmi_g_dv_timings,
.s_stream = hdmi_s_stream,
};
static const struct v4l2_subdev_pad_ops hdmi_sd_pad_ops = {
.enum_dv_timings = hdmi_enum_dv_timings,
.dv_timings_cap = hdmi_dv_timings_cap,
.get_fmt = hdmi_get_fmt,
};
static const struct v4l2_subdev_ops hdmi_sd_ops = {
.core = &hdmi_sd_core_ops,
.video = &hdmi_sd_video_ops,
.pad = &hdmi_sd_pad_ops,
};
static int hdmi_runtime_suspend(struct device *dev)
{
struct v4l2_subdev *sd = dev_get_drvdata(dev);
struct hdmi_device *hdev = sd_to_hdmi_dev(sd);
dev_dbg(dev, "%s\n", __func__);
v4l2_subdev_call(hdev->mhl_sd, core, s_power, 0);
hdmi_resource_poweroff(&hdev->res);
/* flag that device context is lost */
hdev->cur_conf_dirty = 1;
return 0;
}
static int hdmi_runtime_resume(struct device *dev)
{
struct v4l2_subdev *sd = dev_get_drvdata(dev);
struct hdmi_device *hdev = sd_to_hdmi_dev(sd);
int ret;
dev_dbg(dev, "%s\n", __func__);
ret = hdmi_resource_poweron(&hdev->res);
if (ret < 0)
return ret;
/* starting MHL */
ret = v4l2_subdev_call(hdev->mhl_sd, core, s_power, 1);
if (hdev->mhl_sd && ret)
goto fail;
dev_dbg(dev, "poweron succeed\n");
return 0;
fail:
hdmi_resource_poweroff(&hdev->res);
dev_err(dev, "poweron failed\n");
return ret;
}
static const struct dev_pm_ops hdmi_pm_ops = {
.runtime_suspend = hdmi_runtime_suspend,
.runtime_resume = hdmi_runtime_resume,
};
static void hdmi_resource_clear_clocks(struct hdmi_resources *res)
{
res->hdmi = ERR_PTR(-EINVAL);
res->sclk_hdmi = ERR_PTR(-EINVAL);
res->sclk_pixel = ERR_PTR(-EINVAL);
res->sclk_hdmiphy = ERR_PTR(-EINVAL);
res->hdmiphy = ERR_PTR(-EINVAL);
}
static void hdmi_resources_cleanup(struct hdmi_device *hdev)
{
struct hdmi_resources *res = &hdev->res;
dev_dbg(hdev->dev, "HDMI resource cleanup\n");
/* put clocks, power */
if (res->regul_count)
regulator_bulk_free(res->regul_count, res->regul_bulk);
/* kfree is NULL-safe */
kfree(res->regul_bulk);
if (!IS_ERR(res->hdmiphy))
clk_put(res->hdmiphy);
if (!IS_ERR(res->sclk_hdmiphy))
clk_put(res->sclk_hdmiphy);
if (!IS_ERR(res->sclk_pixel))
clk_put(res->sclk_pixel);
if (!IS_ERR(res->sclk_hdmi))
clk_put(res->sclk_hdmi);
if (!IS_ERR(res->hdmi))
clk_put(res->hdmi);
memset(res, 0, sizeof(*res));
hdmi_resource_clear_clocks(res);
}
static int hdmi_resources_init(struct hdmi_device *hdev)
{
struct device *dev = hdev->dev;
struct hdmi_resources *res = &hdev->res;
static char *supply[] = {
"hdmi-en",
"vdd",
"vdd_osc",
"vdd_pll",
};
int i, ret;
dev_dbg(dev, "HDMI resource init\n");
memset(res, 0, sizeof(*res));
hdmi_resource_clear_clocks(res);
/* get clocks, power */
res->hdmi = clk_get(dev, "hdmi");
if (IS_ERR(res->hdmi)) {
dev_err(dev, "failed to get clock 'hdmi'\n");
goto fail;
}
res->sclk_hdmi = clk_get(dev, "sclk_hdmi");
if (IS_ERR(res->sclk_hdmi)) {
dev_err(dev, "failed to get clock 'sclk_hdmi'\n");
goto fail;
}
res->sclk_pixel = clk_get(dev, "sclk_pixel");
if (IS_ERR(res->sclk_pixel)) {
dev_err(dev, "failed to get clock 'sclk_pixel'\n");
goto fail;
}
res->sclk_hdmiphy = clk_get(dev, "sclk_hdmiphy");
if (IS_ERR(res->sclk_hdmiphy)) {
dev_err(dev, "failed to get clock 'sclk_hdmiphy'\n");
goto fail;
}
res->hdmiphy = clk_get(dev, "hdmiphy");
if (IS_ERR(res->hdmiphy)) {
dev_err(dev, "failed to get clock 'hdmiphy'\n");
goto fail;
}
res->regul_bulk = kcalloc(ARRAY_SIZE(supply),
sizeof(res->regul_bulk[0]), GFP_KERNEL);
if (!res->regul_bulk) {
dev_err(dev, "failed to get memory for regulators\n");
goto fail;
}
for (i = 0; i < ARRAY_SIZE(supply); ++i) {
res->regul_bulk[i].supply = supply[i];
res->regul_bulk[i].consumer = NULL;
}
ret = regulator_bulk_get(dev, ARRAY_SIZE(supply), res->regul_bulk);
if (ret) {
dev_err(dev, "failed to get regulators\n");
goto fail;
}
res->regul_count = ARRAY_SIZE(supply);
return 0;
fail:
dev_err(dev, "HDMI resource init - failed\n");
hdmi_resources_cleanup(hdev);
return -ENODEV;
}
static int hdmi_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct resource *res;
struct i2c_adapter *adapter;
struct v4l2_subdev *sd;
struct hdmi_device *hdmi_dev = NULL;
struct s5p_hdmi_platform_data *pdata = dev->platform_data;
int ret;
dev_dbg(dev, "probe start\n");
if (!pdata) {
dev_err(dev, "platform data is missing\n");
ret = -ENODEV;
goto fail;
}
hdmi_dev = devm_kzalloc(&pdev->dev, sizeof(*hdmi_dev), GFP_KERNEL);
if (!hdmi_dev) {
dev_err(dev, "out of memory\n");
ret = -ENOMEM;
goto fail;
}
hdmi_dev->dev = dev;
ret = hdmi_resources_init(hdmi_dev);
if (ret)
goto fail;
/* mapping HDMI registers */
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (res == NULL) {
dev_err(dev, "get memory resource failed.\n");
ret = -ENXIO;
goto fail_init;
}
hdmi_dev->regs = devm_ioremap(&pdev->dev, res->start,
resource_size(res));
if (hdmi_dev->regs == NULL) {
dev_err(dev, "register mapping failed.\n");
ret = -ENXIO;
goto fail_init;
}
res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
if (res == NULL) {
dev_err(dev, "get interrupt resource failed.\n");
ret = -ENXIO;
goto fail_init;
}
ret = devm_request_irq(&pdev->dev, res->start, hdmi_irq_handler, 0,
"hdmi", hdmi_dev);
if (ret) {
dev_err(dev, "request interrupt failed.\n");
goto fail_init;
}
hdmi_dev->irq = res->start;
/* setting v4l2 name to prevent WARN_ON in v4l2_device_register */
strlcpy(hdmi_dev->v4l2_dev.name, dev_name(dev),
sizeof(hdmi_dev->v4l2_dev.name));
/* passing NULL owner prevents driver from erasing drvdata */
ret = v4l2_device_register(NULL, &hdmi_dev->v4l2_dev);
if (ret) {
dev_err(dev, "could not register v4l2 device.\n");
goto fail_init;
}
/* testing if hdmiphy info is present */
if (!pdata->hdmiphy_info) {
dev_err(dev, "hdmiphy info is missing in platform data\n");
ret = -ENXIO;
goto fail_vdev;
}
adapter = i2c_get_adapter(pdata->hdmiphy_bus);
if (adapter == NULL) {
dev_err(dev, "hdmiphy adapter request failed\n");
ret = -ENXIO;
goto fail_vdev;
}
hdmi_dev->phy_sd = v4l2_i2c_new_subdev_board(&hdmi_dev->v4l2_dev,
adapter, pdata->hdmiphy_info, NULL);
/* on failure or not adapter is no longer useful */
i2c_put_adapter(adapter);
if (hdmi_dev->phy_sd == NULL) {
dev_err(dev, "missing subdev for hdmiphy\n");
ret = -ENODEV;
goto fail_vdev;
}
/* initialization of MHL interface if present */
if (pdata->mhl_info) {
adapter = i2c_get_adapter(pdata->mhl_bus);
if (adapter == NULL) {
dev_err(dev, "MHL adapter request failed\n");
ret = -ENXIO;
goto fail_vdev;
}
hdmi_dev->mhl_sd = v4l2_i2c_new_subdev_board(
&hdmi_dev->v4l2_dev, adapter,
pdata->mhl_info, NULL);
/* on failure or not adapter is no longer useful */
i2c_put_adapter(adapter);
if (hdmi_dev->mhl_sd == NULL) {
dev_err(dev, "missing subdev for MHL\n");
ret = -ENODEV;
goto fail_vdev;
}
}
clk_enable(hdmi_dev->res.hdmi);
pm_runtime_enable(dev);
sd = &hdmi_dev->sd;
v4l2_subdev_init(sd, &hdmi_sd_ops);
sd->owner = THIS_MODULE;
strlcpy(sd->name, "s5p-hdmi", sizeof(sd->name));
hdmi_dev->cur_timings =
hdmi_timings[HDMI_DEFAULT_TIMINGS_IDX].dv_timings;
/* FIXME: missing fail timings is not supported */
hdmi_dev->cur_conf =
hdmi_timings[HDMI_DEFAULT_TIMINGS_IDX].hdmi_timings;
hdmi_dev->cur_conf_dirty = 1;
/* storing subdev for call that have only access to struct device */
dev_set_drvdata(dev, sd);
dev_info(dev, "probe successful\n");
return 0;
fail_vdev:
v4l2_device_unregister(&hdmi_dev->v4l2_dev);
fail_init:
hdmi_resources_cleanup(hdmi_dev);
fail:
dev_err(dev, "probe failed\n");
return ret;
}
static int hdmi_remove(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct v4l2_subdev *sd = dev_get_drvdata(dev);
struct hdmi_device *hdmi_dev = sd_to_hdmi_dev(sd);
pm_runtime_disable(dev);
clk_disable(hdmi_dev->res.hdmi);
v4l2_device_unregister(&hdmi_dev->v4l2_dev);
disable_irq(hdmi_dev->irq);
hdmi_resources_cleanup(hdmi_dev);
dev_info(dev, "remove successful\n");
return 0;
}
static struct platform_driver hdmi_driver __refdata = {
.probe = hdmi_probe,
.remove = hdmi_remove,
.id_table = hdmi_driver_types,
.driver = {
.name = "s5p-hdmi",
.pm = &hdmi_pm_ops,
}
};
module_platform_driver(hdmi_driver);
| gpl-2.0 |
vcgato29/linux | drivers/usb/chipidea/ci_hdrc_msm.c | 948 | 2962 | /* Copyright (c) 2010, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*/
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/usb/msm_hsusb_hw.h>
#include <linux/usb/ulpi.h>
#include <linux/usb/gadget.h>
#include <linux/usb/chipidea.h>
#include "ci.h"
#define MSM_USB_BASE (ci->hw_bank.abs)
static void ci_hdrc_msm_notify_event(struct ci_hdrc *ci, unsigned event)
{
struct device *dev = ci->gadget.dev.parent;
switch (event) {
case CI_HDRC_CONTROLLER_RESET_EVENT:
dev_dbg(dev, "CI_HDRC_CONTROLLER_RESET_EVENT received\n");
writel(0, USB_AHBBURST);
writel(0, USB_AHBMODE);
usb_phy_init(ci->usb_phy);
break;
case CI_HDRC_CONTROLLER_STOPPED_EVENT:
dev_dbg(dev, "CI_HDRC_CONTROLLER_STOPPED_EVENT received\n");
/*
* Put the phy in non-driving mode. Otherwise host
* may not detect soft-disconnection.
*/
usb_phy_notify_disconnect(ci->usb_phy, USB_SPEED_UNKNOWN);
break;
default:
dev_dbg(dev, "unknown ci_hdrc event\n");
break;
}
}
static struct ci_hdrc_platform_data ci_hdrc_msm_platdata = {
.name = "ci_hdrc_msm",
.capoffset = DEF_CAPOFFSET,
.flags = CI_HDRC_REGS_SHARED |
CI_HDRC_DISABLE_STREAMING,
.notify_event = ci_hdrc_msm_notify_event,
};
static int ci_hdrc_msm_probe(struct platform_device *pdev)
{
struct platform_device *plat_ci;
struct usb_phy *phy;
dev_dbg(&pdev->dev, "ci_hdrc_msm_probe\n");
/*
* OTG(PHY) driver takes care of PHY initialization, clock management,
* powering up VBUS, mapping of registers address space and power
* management.
*/
phy = devm_usb_get_phy_by_phandle(&pdev->dev, "usb-phy", 0);
if (IS_ERR(phy))
return PTR_ERR(phy);
ci_hdrc_msm_platdata.usb_phy = phy;
plat_ci = ci_hdrc_add_device(&pdev->dev,
pdev->resource, pdev->num_resources,
&ci_hdrc_msm_platdata);
if (IS_ERR(plat_ci)) {
dev_err(&pdev->dev, "ci_hdrc_add_device failed!\n");
return PTR_ERR(plat_ci);
}
platform_set_drvdata(pdev, plat_ci);
pm_runtime_no_callbacks(&pdev->dev);
pm_runtime_enable(&pdev->dev);
return 0;
}
static int ci_hdrc_msm_remove(struct platform_device *pdev)
{
struct platform_device *plat_ci = platform_get_drvdata(pdev);
pm_runtime_disable(&pdev->dev);
ci_hdrc_remove_device(plat_ci);
return 0;
}
static const struct of_device_id msm_ci_dt_match[] = {
{ .compatible = "qcom,ci-hdrc", },
{ }
};
MODULE_DEVICE_TABLE(of, msm_ci_dt_match);
static struct platform_driver ci_hdrc_msm_driver = {
.probe = ci_hdrc_msm_probe,
.remove = ci_hdrc_msm_remove,
.driver = {
.name = "msm_hsusb",
.of_match_table = msm_ci_dt_match,
},
};
module_platform_driver(ci_hdrc_msm_driver);
MODULE_ALIAS("platform:msm_hsusb");
MODULE_ALIAS("platform:ci13xxx_msm");
MODULE_LICENSE("GPL v2");
| gpl-2.0 |
jmztaylor/kernel_gb_marvelc | arch/blackfin/kernel/nmi.c | 1204 | 6013 | /*
* Blackfin nmi_watchdog Driver
*
* Originally based on bfin_wdt.c
* Copyright 2010-2010 Analog Devices Inc.
* Graff Yang <graf.yang@analog.com>
*
* Enter bugs at http://blackfin.uclinux.org/
*
* Licensed under the GPL-2 or later.
*/
#include <linux/bitops.h>
#include <linux/hardirq.h>
#include <linux/sysdev.h>
#include <linux/pm.h>
#include <linux/nmi.h>
#include <linux/smp.h>
#include <linux/timer.h>
#include <asm/blackfin.h>
#include <asm/atomic.h>
#include <asm/cacheflush.h>
#include <asm/bfin_watchdog.h>
#define DRV_NAME "nmi-wdt"
#define NMI_WDT_TIMEOUT 5 /* 5 seconds */
#define NMI_CHECK_TIMEOUT (4 * HZ) /* 4 seconds in jiffies */
static int nmi_wdt_cpu = 1;
static unsigned int timeout = NMI_WDT_TIMEOUT;
static int nmi_active;
static unsigned short wdoga_ctl;
static unsigned int wdoga_cnt;
static struct corelock_slot saved_corelock;
static atomic_t nmi_touched[NR_CPUS];
static struct timer_list ntimer;
enum {
COREA_ENTER_NMI = 0,
COREA_EXIT_NMI,
COREB_EXIT_NMI,
NMI_EVENT_NR,
};
static unsigned long nmi_event __attribute__ ((__section__(".l2.bss")));
/* we are in nmi, non-atomic bit ops is safe */
static inline void set_nmi_event(int event)
{
__set_bit(event, &nmi_event);
}
static inline void wait_nmi_event(int event)
{
while (!test_bit(event, &nmi_event))
barrier();
__clear_bit(event, &nmi_event);
}
static inline void send_corea_nmi(void)
{
wdoga_ctl = bfin_read_WDOGA_CTL();
wdoga_cnt = bfin_read_WDOGA_CNT();
bfin_write_WDOGA_CTL(WDEN_DISABLE);
bfin_write_WDOGA_CNT(0);
bfin_write_WDOGA_CTL(WDEN_ENABLE | ICTL_NMI);
}
static inline void restore_corea_nmi(void)
{
bfin_write_WDOGA_CTL(WDEN_DISABLE);
bfin_write_WDOGA_CTL(WDOG_EXPIRED | WDEN_DISABLE | ICTL_NONE);
bfin_write_WDOGA_CNT(wdoga_cnt);
bfin_write_WDOGA_CTL(wdoga_ctl);
}
static inline void save_corelock(void)
{
saved_corelock = corelock;
corelock.lock = 0;
}
static inline void restore_corelock(void)
{
corelock = saved_corelock;
}
static inline void nmi_wdt_keepalive(void)
{
bfin_write_WDOGB_STAT(0);
}
static inline void nmi_wdt_stop(void)
{
bfin_write_WDOGB_CTL(WDEN_DISABLE);
}
/* before calling this function, you must stop the WDT */
static inline void nmi_wdt_clear(void)
{
/* clear TRO bit, disable event generation */
bfin_write_WDOGB_CTL(WDOG_EXPIRED | WDEN_DISABLE | ICTL_NONE);
}
static inline void nmi_wdt_start(void)
{
bfin_write_WDOGB_CTL(WDEN_ENABLE | ICTL_NMI);
}
static inline int nmi_wdt_running(void)
{
return ((bfin_read_WDOGB_CTL() & WDEN_MASK) != WDEN_DISABLE);
}
static inline int nmi_wdt_set_timeout(unsigned long t)
{
u32 cnt, max_t, sclk;
int run;
sclk = get_sclk();
max_t = -1 / sclk;
cnt = t * sclk;
if (t > max_t) {
pr_warning("NMI: timeout value is too large\n");
return -EINVAL;
}
run = nmi_wdt_running();
nmi_wdt_stop();
bfin_write_WDOGB_CNT(cnt);
if (run)
nmi_wdt_start();
timeout = t;
return 0;
}
int check_nmi_wdt_touched(void)
{
unsigned int this_cpu = smp_processor_id();
unsigned int cpu;
cpumask_t mask = cpu_online_map;
if (!atomic_read(&nmi_touched[this_cpu]))
return 0;
atomic_set(&nmi_touched[this_cpu], 0);
cpu_clear(this_cpu, mask);
for_each_cpu_mask(cpu, mask) {
invalidate_dcache_range((unsigned long)(&nmi_touched[cpu]),
(unsigned long)(&nmi_touched[cpu]));
if (!atomic_read(&nmi_touched[cpu]))
return 0;
atomic_set(&nmi_touched[cpu], 0);
}
return 1;
}
static void nmi_wdt_timer(unsigned long data)
{
if (check_nmi_wdt_touched())
nmi_wdt_keepalive();
mod_timer(&ntimer, jiffies + NMI_CHECK_TIMEOUT);
}
static int __init init_nmi_wdt(void)
{
nmi_wdt_set_timeout(timeout);
nmi_wdt_start();
nmi_active = true;
init_timer(&ntimer);
ntimer.function = nmi_wdt_timer;
ntimer.expires = jiffies + NMI_CHECK_TIMEOUT;
add_timer(&ntimer);
pr_info("nmi_wdt: initialized: timeout=%d sec\n", timeout);
return 0;
}
device_initcall(init_nmi_wdt);
void touch_nmi_watchdog(void)
{
atomic_set(&nmi_touched[smp_processor_id()], 1);
}
/* Suspend/resume support */
#ifdef CONFIG_PM
static int nmi_wdt_suspend(struct sys_device *dev, pm_message_t state)
{
nmi_wdt_stop();
return 0;
}
static int nmi_wdt_resume(struct sys_device *dev)
{
if (nmi_active)
nmi_wdt_start();
return 0;
}
static struct sysdev_class nmi_sysclass = {
.name = DRV_NAME,
.resume = nmi_wdt_resume,
.suspend = nmi_wdt_suspend,
};
static struct sys_device device_nmi_wdt = {
.id = 0,
.cls = &nmi_sysclass,
};
static int __init init_nmi_wdt_sysfs(void)
{
int error;
if (!nmi_active)
return 0;
error = sysdev_class_register(&nmi_sysclass);
if (!error)
error = sysdev_register(&device_nmi_wdt);
return error;
}
late_initcall(init_nmi_wdt_sysfs);
#endif /* CONFIG_PM */
asmlinkage notrace void do_nmi(struct pt_regs *fp)
{
unsigned int cpu = smp_processor_id();
nmi_enter();
cpu_pda[cpu].__nmi_count += 1;
if (cpu == nmi_wdt_cpu) {
/* CoreB goes here first */
/* reload the WDOG_STAT */
nmi_wdt_keepalive();
/* clear nmi interrupt for CoreB */
nmi_wdt_stop();
nmi_wdt_clear();
/* trigger NMI interrupt of CoreA */
send_corea_nmi();
/* waiting CoreB to enter NMI */
wait_nmi_event(COREA_ENTER_NMI);
/* recover WDOGA's settings */
restore_corea_nmi();
save_corelock();
/* corelock is save/cleared, CoreA is dummping messages */
wait_nmi_event(COREA_EXIT_NMI);
} else {
/* OK, CoreA entered NMI */
set_nmi_event(COREA_ENTER_NMI);
}
pr_emerg("\nNMI Watchdog detected LOCKUP, dump for CPU %d\n", cpu);
dump_bfin_process(fp);
dump_bfin_mem(fp);
show_regs(fp);
dump_bfin_trace_buffer();
show_stack(current, (unsigned long *)fp);
if (cpu == nmi_wdt_cpu) {
pr_emerg("This fault is not recoverable, sorry!\n");
/* CoreA dump finished, restore the corelock */
restore_corelock();
set_nmi_event(COREB_EXIT_NMI);
} else {
/* CoreB dump finished, notice the CoreA we are done */
set_nmi_event(COREA_EXIT_NMI);
/* synchronize with CoreA */
wait_nmi_event(COREB_EXIT_NMI);
}
nmi_exit();
}
| gpl-2.0 |
techomancer/kernel-galaxytab | arch/mips/vr41xx/common/siu.c | 1460 | 3377 | /*
* NEC VR4100 series SIU platform device.
*
* Copyright (C) 2007-2008 Yoichi Yuasa <yuasa@linux-mips.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/ioport.h>
#include <linux/platform_device.h>
#include <linux/serial_core.h>
#include <asm/cpu.h>
#include <asm/vr41xx/siu.h>
static unsigned int siu_type1_ports[SIU_PORTS_MAX] __initdata = {
PORT_VR41XX_SIU,
PORT_UNKNOWN,
};
static struct resource siu_type1_resource[] __initdata = {
{
.start = 0x0c000000,
.end = 0x0c00000a,
.flags = IORESOURCE_MEM,
},
{
.start = SIU_IRQ,
.end = SIU_IRQ,
.flags = IORESOURCE_IRQ,
},
};
static unsigned int siu_type2_ports[SIU_PORTS_MAX] __initdata = {
PORT_VR41XX_SIU,
PORT_VR41XX_DSIU,
};
static struct resource siu_type2_resource[] __initdata = {
{
.start = 0x0f000800,
.end = 0x0f00080a,
.flags = IORESOURCE_MEM,
},
{
.start = 0x0f000820,
.end = 0x0f000829,
.flags = IORESOURCE_MEM,
},
{
.start = SIU_IRQ,
.end = SIU_IRQ,
.flags = IORESOURCE_IRQ,
},
{
.start = DSIU_IRQ,
.end = DSIU_IRQ,
.flags = IORESOURCE_IRQ,
},
};
static int __init vr41xx_siu_add(void)
{
struct platform_device *pdev;
struct resource *res;
unsigned int num;
int retval;
pdev = platform_device_alloc("SIU", -1);
if (!pdev)
return -ENOMEM;
switch (current_cpu_type()) {
case CPU_VR4111:
case CPU_VR4121:
pdev->dev.platform_data = siu_type1_ports;
res = siu_type1_resource;
num = ARRAY_SIZE(siu_type1_resource);
break;
case CPU_VR4122:
case CPU_VR4131:
case CPU_VR4133:
pdev->dev.platform_data = siu_type2_ports;
res = siu_type2_resource;
num = ARRAY_SIZE(siu_type2_resource);
break;
default:
retval = -ENODEV;
goto err_free_device;
}
retval = platform_device_add_resources(pdev, res, num);
if (retval)
goto err_free_device;
retval = platform_device_add(pdev);
if (retval)
goto err_free_device;
return 0;
err_free_device:
platform_device_put(pdev);
return retval;
}
device_initcall(vr41xx_siu_add);
void __init vr41xx_siu_setup(void)
{
struct uart_port port;
struct resource *res;
unsigned int *type;
int i;
switch (current_cpu_type()) {
case CPU_VR4111:
case CPU_VR4121:
type = siu_type1_ports;
res = siu_type1_resource;
break;
case CPU_VR4122:
case CPU_VR4131:
case CPU_VR4133:
type = siu_type2_ports;
res = siu_type2_resource;
break;
default:
return;
}
for (i = 0; i < SIU_PORTS_MAX; i++) {
port.line = i;
port.type = type[i];
if (port.type == PORT_UNKNOWN)
break;
port.mapbase = res[i].start;
port.membase = (unsigned char __iomem *)KSEG1ADDR(res[i].start);
vr41xx_siu_early_setup(&port);
}
}
| gpl-2.0 |
OPNay/android_kernel_samsung_palladio | arch/x86/oprofile/nmi_timer_int.c | 1716 | 1441 | /**
* @file nmi_timer_int.c
*
* @remark Copyright 2003 OProfile authors
* @remark Read the file COPYING
*
* @author Zwane Mwaikambo <zwane@linuxpower.ca>
*/
#include <linux/init.h>
#include <linux/smp.h>
#include <linux/errno.h>
#include <linux/oprofile.h>
#include <linux/rcupdate.h>
#include <linux/kdebug.h>
#include <asm/nmi.h>
#include <asm/apic.h>
#include <asm/ptrace.h>
static int profile_timer_exceptions_notify(struct notifier_block *self,
unsigned long val, void *data)
{
struct die_args *args = (struct die_args *)data;
int ret = NOTIFY_DONE;
switch (val) {
case DIE_NMI:
oprofile_add_sample(args->regs, 0);
ret = NOTIFY_STOP;
break;
default:
break;
}
return ret;
}
static struct notifier_block profile_timer_exceptions_nb = {
.notifier_call = profile_timer_exceptions_notify,
.next = NULL,
.priority = 0
};
static int timer_start(void)
{
if (register_die_notifier(&profile_timer_exceptions_nb))
return 1;
return 0;
}
static void timer_stop(void)
{
unregister_die_notifier(&profile_timer_exceptions_nb);
synchronize_sched(); /* Allow already-started NMIs to complete. */
}
int __init op_nmi_timer_init(struct oprofile_operations *ops)
{
if ((nmi_watchdog != NMI_IO_APIC) || (atomic_read(&nmi_active) <= 0))
return -ENODEV;
ops->start = timer_start;
ops->stop = timer_stop;
ops->cpu_type = "timer";
printk(KERN_INFO "oprofile: using NMI timer interrupt.\n");
return 0;
}
| gpl-2.0 |
Troj80/android_kernel_htc_vivo | drivers/usb/otg/otg-wakelock.c | 4276 | 3725 | /*
* otg-wakelock.c
*
* Copyright (C) 2011 Google, Inc.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <linux/kernel.h>
#include <linux/device.h>
#include <linux/module.h>
#include <linux/notifier.h>
#include <linux/wakelock.h>
#include <linux/spinlock.h>
#include <linux/usb/otg.h>
#define TEMPORARY_HOLD_TIME 2000
static bool enabled = true;
static struct usb_phy *otgwl_xceiv;
static struct notifier_block otgwl_nb;
/*
* otgwl_spinlock is held while the VBUS lock is grabbed or dropped and the
* held field is updated to match.
*/
static DEFINE_SPINLOCK(otgwl_spinlock);
/*
* Only one lock, but since these 3 fields are associated with each other...
*/
struct otgwl_lock {
char name[40];
struct wake_lock wakelock;
bool held;
};
/*
* VBUS present lock. Also used as a timed lock on charger
* connect/disconnect and USB host disconnect, to allow the system
* to react to the change in power.
*/
static struct otgwl_lock vbus_lock;
static void otgwl_hold(struct otgwl_lock *lock)
{
if (!lock->held) {
wake_lock(&lock->wakelock);
lock->held = true;
}
}
static void otgwl_temporary_hold(struct otgwl_lock *lock)
{
wake_lock_timeout(&lock->wakelock,
msecs_to_jiffies(TEMPORARY_HOLD_TIME));
lock->held = false;
}
static void otgwl_drop(struct otgwl_lock *lock)
{
if (lock->held) {
wake_unlock(&lock->wakelock);
lock->held = false;
}
}
static void otgwl_handle_event(unsigned long event)
{
unsigned long irqflags;
spin_lock_irqsave(&otgwl_spinlock, irqflags);
if (!enabled) {
otgwl_drop(&vbus_lock);
spin_unlock_irqrestore(&otgwl_spinlock, irqflags);
return;
}
switch (event) {
case USB_EVENT_VBUS:
case USB_EVENT_ENUMERATED:
otgwl_hold(&vbus_lock);
break;
case USB_EVENT_NONE:
case USB_EVENT_ID:
case USB_EVENT_CHARGER:
otgwl_temporary_hold(&vbus_lock);
break;
default:
break;
}
spin_unlock_irqrestore(&otgwl_spinlock, irqflags);
}
static int otgwl_otg_notifications(struct notifier_block *nb,
unsigned long event, void *unused)
{
otgwl_handle_event(event);
return NOTIFY_OK;
}
static int set_enabled(const char *val, const struct kernel_param *kp)
{
int rv = param_set_bool(val, kp);
if (rv)
return rv;
if (otgwl_xceiv)
otgwl_handle_event(otgwl_xceiv->last_event);
return 0;
}
static struct kernel_param_ops enabled_param_ops = {
.set = set_enabled,
.get = param_get_bool,
};
module_param_cb(enabled, &enabled_param_ops, &enabled, 0644);
MODULE_PARM_DESC(enabled, "enable wakelock when VBUS present");
static int __init otg_wakelock_init(void)
{
int ret;
otgwl_xceiv = usb_get_transceiver();
if (!otgwl_xceiv) {
pr_err("%s: No USB transceiver found\n", __func__);
return -ENODEV;
}
snprintf(vbus_lock.name, sizeof(vbus_lock.name), "vbus-%s",
dev_name(otgwl_xceiv->dev));
wake_lock_init(&vbus_lock.wakelock, WAKE_LOCK_SUSPEND,
vbus_lock.name);
otgwl_nb.notifier_call = otgwl_otg_notifications;
ret = usb_register_notifier(otgwl_xceiv, &otgwl_nb);
if (ret) {
pr_err("%s: usb_register_notifier on transceiver %s"
" failed\n", __func__,
dev_name(otgwl_xceiv->dev));
otgwl_xceiv = NULL;
wake_lock_destroy(&vbus_lock.wakelock);
return ret;
}
otgwl_handle_event(otgwl_xceiv->last_event);
return ret;
}
late_initcall(otg_wakelock_init);
| gpl-2.0 |
FrozenCow/msm | net/ipv4/netfilter/nf_nat_core.c | 4276 | 21584 | /* NAT for netfilter; shared with compatibility layer. */
/* (C) 1999-2001 Paul `Rusty' Russell
* (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/module.h>
#include <linux/types.h>
#include <linux/timer.h>
#include <linux/skbuff.h>
#include <linux/gfp.h>
#include <net/checksum.h>
#include <net/icmp.h>
#include <net/ip.h>
#include <net/tcp.h> /* For tcp_prot in getorigdst */
#include <linux/icmp.h>
#include <linux/udp.h>
#include <linux/jhash.h>
#include <linux/netfilter_ipv4.h>
#include <net/netfilter/nf_conntrack.h>
#include <net/netfilter/nf_conntrack_core.h>
#include <net/netfilter/nf_nat.h>
#include <net/netfilter/nf_nat_protocol.h>
#include <net/netfilter/nf_nat_core.h>
#include <net/netfilter/nf_nat_helper.h>
#include <net/netfilter/nf_conntrack_helper.h>
#include <net/netfilter/nf_conntrack_l3proto.h>
#include <net/netfilter/nf_conntrack_zones.h>
static DEFINE_SPINLOCK(nf_nat_lock);
static struct nf_conntrack_l3proto *l3proto __read_mostly;
#define MAX_IP_NAT_PROTO 256
static const struct nf_nat_protocol __rcu *nf_nat_protos[MAX_IP_NAT_PROTO]
__read_mostly;
static inline const struct nf_nat_protocol *
__nf_nat_proto_find(u_int8_t protonum)
{
return rcu_dereference(nf_nat_protos[protonum]);
}
/* We keep an extra hash for each conntrack, for fast searching. */
static inline unsigned int
hash_by_src(const struct net *net, u16 zone,
const struct nf_conntrack_tuple *tuple)
{
unsigned int hash;
/* Original src, to ensure we map it consistently if poss. */
hash = jhash_3words((__force u32)tuple->src.u3.ip,
(__force u32)tuple->src.u.all ^ zone,
tuple->dst.protonum, nf_conntrack_hash_rnd);
return ((u64)hash * net->ipv4.nat_htable_size) >> 32;
}
/* Is this tuple already taken? (not by us) */
int
nf_nat_used_tuple(const struct nf_conntrack_tuple *tuple,
const struct nf_conn *ignored_conntrack)
{
/* Conntrack tracking doesn't keep track of outgoing tuples; only
incoming ones. NAT means they don't have a fixed mapping,
so we invert the tuple and look for the incoming reply.
We could keep a separate hash if this proves too slow. */
struct nf_conntrack_tuple reply;
nf_ct_invert_tuplepr(&reply, tuple);
return nf_conntrack_tuple_taken(&reply, ignored_conntrack);
}
EXPORT_SYMBOL(nf_nat_used_tuple);
/* If we source map this tuple so reply looks like reply_tuple, will
* that meet the constraints of range. */
static int
in_range(const struct nf_conntrack_tuple *tuple,
const struct nf_nat_ipv4_range *range)
{
const struct nf_nat_protocol *proto;
int ret = 0;
/* If we are supposed to map IPs, then we must be in the
range specified, otherwise let this drag us onto a new src IP. */
if (range->flags & NF_NAT_RANGE_MAP_IPS) {
if (ntohl(tuple->src.u3.ip) < ntohl(range->min_ip) ||
ntohl(tuple->src.u3.ip) > ntohl(range->max_ip))
return 0;
}
rcu_read_lock();
proto = __nf_nat_proto_find(tuple->dst.protonum);
if (!(range->flags & NF_NAT_RANGE_PROTO_SPECIFIED) ||
proto->in_range(tuple, NF_NAT_MANIP_SRC,
&range->min, &range->max))
ret = 1;
rcu_read_unlock();
return ret;
}
static inline int
same_src(const struct nf_conn *ct,
const struct nf_conntrack_tuple *tuple)
{
const struct nf_conntrack_tuple *t;
t = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple;
return (t->dst.protonum == tuple->dst.protonum &&
t->src.u3.ip == tuple->src.u3.ip &&
t->src.u.all == tuple->src.u.all);
}
/* Only called for SRC manip */
static int
find_appropriate_src(struct net *net, u16 zone,
const struct nf_conntrack_tuple *tuple,
struct nf_conntrack_tuple *result,
const struct nf_nat_ipv4_range *range)
{
unsigned int h = hash_by_src(net, zone, tuple);
const struct nf_conn_nat *nat;
const struct nf_conn *ct;
const struct hlist_node *n;
rcu_read_lock();
hlist_for_each_entry_rcu(nat, n, &net->ipv4.nat_bysource[h], bysource) {
ct = nat->ct;
if (same_src(ct, tuple) && nf_ct_zone(ct) == zone) {
/* Copy source part from reply tuple. */
nf_ct_invert_tuplepr(result,
&ct->tuplehash[IP_CT_DIR_REPLY].tuple);
result->dst = tuple->dst;
if (in_range(result, range)) {
rcu_read_unlock();
return 1;
}
}
}
rcu_read_unlock();
return 0;
}
/* For [FUTURE] fragmentation handling, we want the least-used
src-ip/dst-ip/proto triple. Fairness doesn't come into it. Thus
if the range specifies 1.2.3.4 ports 10000-10005 and 1.2.3.5 ports
1-65535, we don't do pro-rata allocation based on ports; we choose
the ip with the lowest src-ip/dst-ip/proto usage.
*/
static void
find_best_ips_proto(u16 zone, struct nf_conntrack_tuple *tuple,
const struct nf_nat_ipv4_range *range,
const struct nf_conn *ct,
enum nf_nat_manip_type maniptype)
{
__be32 *var_ipp;
/* Host order */
u_int32_t minip, maxip, j;
/* No IP mapping? Do nothing. */
if (!(range->flags & NF_NAT_RANGE_MAP_IPS))
return;
if (maniptype == NF_NAT_MANIP_SRC)
var_ipp = &tuple->src.u3.ip;
else
var_ipp = &tuple->dst.u3.ip;
/* Fast path: only one choice. */
if (range->min_ip == range->max_ip) {
*var_ipp = range->min_ip;
return;
}
/* Hashing source and destination IPs gives a fairly even
* spread in practice (if there are a small number of IPs
* involved, there usually aren't that many connections
* anyway). The consistency means that servers see the same
* client coming from the same IP (some Internet Banking sites
* like this), even across reboots. */
minip = ntohl(range->min_ip);
maxip = ntohl(range->max_ip);
j = jhash_2words((__force u32)tuple->src.u3.ip,
range->flags & NF_NAT_RANGE_PERSISTENT ?
0 : (__force u32)tuple->dst.u3.ip ^ zone, 0);
j = ((u64)j * (maxip - minip + 1)) >> 32;
*var_ipp = htonl(minip + j);
}
/* Manipulate the tuple into the range given. For NF_INET_POST_ROUTING,
* we change the source to map into the range. For NF_INET_PRE_ROUTING
* and NF_INET_LOCAL_OUT, we change the destination to map into the
* range. It might not be possible to get a unique tuple, but we try.
* At worst (or if we race), we will end up with a final duplicate in
* __ip_conntrack_confirm and drop the packet. */
static void
get_unique_tuple(struct nf_conntrack_tuple *tuple,
const struct nf_conntrack_tuple *orig_tuple,
const struct nf_nat_ipv4_range *range,
struct nf_conn *ct,
enum nf_nat_manip_type maniptype)
{
struct net *net = nf_ct_net(ct);
const struct nf_nat_protocol *proto;
u16 zone = nf_ct_zone(ct);
/* 1) If this srcip/proto/src-proto-part is currently mapped,
and that same mapping gives a unique tuple within the given
range, use that.
This is only required for source (ie. NAT/masq) mappings.
So far, we don't do local source mappings, so multiple
manips not an issue. */
if (maniptype == NF_NAT_MANIP_SRC &&
!(range->flags & NF_NAT_RANGE_PROTO_RANDOM)) {
/* try the original tuple first */
if (in_range(orig_tuple, range)) {
if (!nf_nat_used_tuple(orig_tuple, ct)) {
*tuple = *orig_tuple;
return;
}
} else if (find_appropriate_src(net, zone, orig_tuple, tuple,
range)) {
pr_debug("get_unique_tuple: Found current src map\n");
if (!nf_nat_used_tuple(tuple, ct))
return;
}
}
/* 2) Select the least-used IP/proto combination in the given
range. */
*tuple = *orig_tuple;
find_best_ips_proto(zone, tuple, range, ct, maniptype);
/* 3) The per-protocol part of the manip is made to map into
the range to make a unique tuple. */
rcu_read_lock();
proto = __nf_nat_proto_find(orig_tuple->dst.protonum);
/* Only bother mapping if it's not already in range and unique */
if (!(range->flags & NF_NAT_RANGE_PROTO_RANDOM)) {
if (range->flags & NF_NAT_RANGE_PROTO_SPECIFIED) {
if (proto->in_range(tuple, maniptype, &range->min,
&range->max) &&
(range->min.all == range->max.all ||
!nf_nat_used_tuple(tuple, ct)))
goto out;
} else if (!nf_nat_used_tuple(tuple, ct)) {
goto out;
}
}
/* Last change: get protocol to try to obtain unique tuple. */
proto->unique_tuple(tuple, range, maniptype, ct);
out:
rcu_read_unlock();
}
unsigned int
nf_nat_setup_info(struct nf_conn *ct,
const struct nf_nat_ipv4_range *range,
enum nf_nat_manip_type maniptype)
{
struct net *net = nf_ct_net(ct);
struct nf_conntrack_tuple curr_tuple, new_tuple;
struct nf_conn_nat *nat;
/* nat helper or nfctnetlink also setup binding */
nat = nfct_nat(ct);
if (!nat) {
nat = nf_ct_ext_add(ct, NF_CT_EXT_NAT, GFP_ATOMIC);
if (nat == NULL) {
pr_debug("failed to add NAT extension\n");
return NF_ACCEPT;
}
}
NF_CT_ASSERT(maniptype == NF_NAT_MANIP_SRC ||
maniptype == NF_NAT_MANIP_DST);
BUG_ON(nf_nat_initialized(ct, maniptype));
/* What we've got will look like inverse of reply. Normally
this is what is in the conntrack, except for prior
manipulations (future optimization: if num_manips == 0,
orig_tp =
conntrack->tuplehash[IP_CT_DIR_ORIGINAL].tuple) */
nf_ct_invert_tuplepr(&curr_tuple,
&ct->tuplehash[IP_CT_DIR_REPLY].tuple);
get_unique_tuple(&new_tuple, &curr_tuple, range, ct, maniptype);
if (!nf_ct_tuple_equal(&new_tuple, &curr_tuple)) {
struct nf_conntrack_tuple reply;
/* Alter conntrack table so will recognize replies. */
nf_ct_invert_tuplepr(&reply, &new_tuple);
nf_conntrack_alter_reply(ct, &reply);
/* Non-atomic: we own this at the moment. */
if (maniptype == NF_NAT_MANIP_SRC)
ct->status |= IPS_SRC_NAT;
else
ct->status |= IPS_DST_NAT;
}
if (maniptype == NF_NAT_MANIP_SRC) {
unsigned int srchash;
srchash = hash_by_src(net, nf_ct_zone(ct),
&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
spin_lock_bh(&nf_nat_lock);
/* nf_conntrack_alter_reply might re-allocate extension area */
nat = nfct_nat(ct);
nat->ct = ct;
hlist_add_head_rcu(&nat->bysource,
&net->ipv4.nat_bysource[srchash]);
spin_unlock_bh(&nf_nat_lock);
}
/* It's done. */
if (maniptype == NF_NAT_MANIP_DST)
ct->status |= IPS_DST_NAT_DONE;
else
ct->status |= IPS_SRC_NAT_DONE;
return NF_ACCEPT;
}
EXPORT_SYMBOL(nf_nat_setup_info);
/* Returns true if succeeded. */
static bool
manip_pkt(u_int16_t proto,
struct sk_buff *skb,
unsigned int iphdroff,
const struct nf_conntrack_tuple *target,
enum nf_nat_manip_type maniptype)
{
struct iphdr *iph;
const struct nf_nat_protocol *p;
if (!skb_make_writable(skb, iphdroff + sizeof(*iph)))
return false;
iph = (void *)skb->data + iphdroff;
/* Manipulate protcol part. */
/* rcu_read_lock()ed by nf_hook_slow */
p = __nf_nat_proto_find(proto);
if (!p->manip_pkt(skb, iphdroff, target, maniptype))
return false;
iph = (void *)skb->data + iphdroff;
if (maniptype == NF_NAT_MANIP_SRC) {
csum_replace4(&iph->check, iph->saddr, target->src.u3.ip);
iph->saddr = target->src.u3.ip;
} else {
csum_replace4(&iph->check, iph->daddr, target->dst.u3.ip);
iph->daddr = target->dst.u3.ip;
}
return true;
}
/* Do packet manipulations according to nf_nat_setup_info. */
unsigned int nf_nat_packet(struct nf_conn *ct,
enum ip_conntrack_info ctinfo,
unsigned int hooknum,
struct sk_buff *skb)
{
enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
unsigned long statusbit;
enum nf_nat_manip_type mtype = HOOK2MANIP(hooknum);
if (mtype == NF_NAT_MANIP_SRC)
statusbit = IPS_SRC_NAT;
else
statusbit = IPS_DST_NAT;
/* Invert if this is reply dir. */
if (dir == IP_CT_DIR_REPLY)
statusbit ^= IPS_NAT_MASK;
/* Non-atomic: these bits don't change. */
if (ct->status & statusbit) {
struct nf_conntrack_tuple target;
/* We are aiming to look like inverse of other direction. */
nf_ct_invert_tuplepr(&target, &ct->tuplehash[!dir].tuple);
if (!manip_pkt(target.dst.protonum, skb, 0, &target, mtype))
return NF_DROP;
}
return NF_ACCEPT;
}
EXPORT_SYMBOL_GPL(nf_nat_packet);
/* Dir is direction ICMP is coming from (opposite to packet it contains) */
int nf_nat_icmp_reply_translation(struct nf_conn *ct,
enum ip_conntrack_info ctinfo,
unsigned int hooknum,
struct sk_buff *skb)
{
struct {
struct icmphdr icmp;
struct iphdr ip;
} *inside;
struct nf_conntrack_tuple target;
int hdrlen = ip_hdrlen(skb);
enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
unsigned long statusbit;
enum nf_nat_manip_type manip = HOOK2MANIP(hooknum);
if (!skb_make_writable(skb, hdrlen + sizeof(*inside)))
return 0;
inside = (void *)skb->data + hdrlen;
/* We're actually going to mangle it beyond trivial checksum
adjustment, so make sure the current checksum is correct. */
if (nf_ip_checksum(skb, hooknum, hdrlen, 0))
return 0;
/* Must be RELATED */
NF_CT_ASSERT(skb->nfctinfo == IP_CT_RELATED ||
skb->nfctinfo == IP_CT_RELATED_REPLY);
/* Redirects on non-null nats must be dropped, else they'll
start talking to each other without our translation, and be
confused... --RR */
if (inside->icmp.type == ICMP_REDIRECT) {
/* If NAT isn't finished, assume it and drop. */
if ((ct->status & IPS_NAT_DONE_MASK) != IPS_NAT_DONE_MASK)
return 0;
if (ct->status & IPS_NAT_MASK)
return 0;
}
if (manip == NF_NAT_MANIP_SRC)
statusbit = IPS_SRC_NAT;
else
statusbit = IPS_DST_NAT;
/* Invert if this is reply dir. */
if (dir == IP_CT_DIR_REPLY)
statusbit ^= IPS_NAT_MASK;
if (!(ct->status & statusbit))
return 1;
pr_debug("icmp_reply_translation: translating error %p manip %u "
"dir %s\n", skb, manip,
dir == IP_CT_DIR_ORIGINAL ? "ORIG" : "REPLY");
/* Change inner back to look like incoming packet. We do the
opposite manip on this hook to normal, because it might not
pass all hooks (locally-generated ICMP). Consider incoming
packet: PREROUTING (DST manip), routing produces ICMP, goes
through POSTROUTING (which must correct the DST manip). */
if (!manip_pkt(inside->ip.protocol, skb, hdrlen + sizeof(inside->icmp),
&ct->tuplehash[!dir].tuple, !manip))
return 0;
if (skb->ip_summed != CHECKSUM_PARTIAL) {
/* Reloading "inside" here since manip_pkt inner. */
inside = (void *)skb->data + hdrlen;
inside->icmp.checksum = 0;
inside->icmp.checksum =
csum_fold(skb_checksum(skb, hdrlen,
skb->len - hdrlen, 0));
}
/* Change outer to look the reply to an incoming packet
* (proto 0 means don't invert per-proto part). */
nf_ct_invert_tuplepr(&target, &ct->tuplehash[!dir].tuple);
if (!manip_pkt(0, skb, 0, &target, manip))
return 0;
return 1;
}
EXPORT_SYMBOL_GPL(nf_nat_icmp_reply_translation);
/* Protocol registration. */
int nf_nat_protocol_register(const struct nf_nat_protocol *proto)
{
int ret = 0;
spin_lock_bh(&nf_nat_lock);
if (rcu_dereference_protected(
nf_nat_protos[proto->protonum],
lockdep_is_held(&nf_nat_lock)
) != &nf_nat_unknown_protocol) {
ret = -EBUSY;
goto out;
}
RCU_INIT_POINTER(nf_nat_protos[proto->protonum], proto);
out:
spin_unlock_bh(&nf_nat_lock);
return ret;
}
EXPORT_SYMBOL(nf_nat_protocol_register);
/* No one stores the protocol anywhere; simply delete it. */
void nf_nat_protocol_unregister(const struct nf_nat_protocol *proto)
{
spin_lock_bh(&nf_nat_lock);
RCU_INIT_POINTER(nf_nat_protos[proto->protonum],
&nf_nat_unknown_protocol);
spin_unlock_bh(&nf_nat_lock);
synchronize_rcu();
}
EXPORT_SYMBOL(nf_nat_protocol_unregister);
/* No one using conntrack by the time this called. */
static void nf_nat_cleanup_conntrack(struct nf_conn *ct)
{
struct nf_conn_nat *nat = nf_ct_ext_find(ct, NF_CT_EXT_NAT);
if (nat == NULL || nat->ct == NULL)
return;
NF_CT_ASSERT(nat->ct->status & IPS_SRC_NAT_DONE);
spin_lock_bh(&nf_nat_lock);
hlist_del_rcu(&nat->bysource);
spin_unlock_bh(&nf_nat_lock);
}
static void nf_nat_move_storage(void *new, void *old)
{
struct nf_conn_nat *new_nat = new;
struct nf_conn_nat *old_nat = old;
struct nf_conn *ct = old_nat->ct;
if (!ct || !(ct->status & IPS_SRC_NAT_DONE))
return;
spin_lock_bh(&nf_nat_lock);
hlist_replace_rcu(&old_nat->bysource, &new_nat->bysource);
spin_unlock_bh(&nf_nat_lock);
}
static struct nf_ct_ext_type nat_extend __read_mostly = {
.len = sizeof(struct nf_conn_nat),
.align = __alignof__(struct nf_conn_nat),
.destroy = nf_nat_cleanup_conntrack,
.move = nf_nat_move_storage,
.id = NF_CT_EXT_NAT,
.flags = NF_CT_EXT_F_PREALLOC,
};
#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
#include <linux/netfilter/nfnetlink.h>
#include <linux/netfilter/nfnetlink_conntrack.h>
static const struct nla_policy protonat_nla_policy[CTA_PROTONAT_MAX+1] = {
[CTA_PROTONAT_PORT_MIN] = { .type = NLA_U16 },
[CTA_PROTONAT_PORT_MAX] = { .type = NLA_U16 },
};
static int nfnetlink_parse_nat_proto(struct nlattr *attr,
const struct nf_conn *ct,
struct nf_nat_ipv4_range *range)
{
struct nlattr *tb[CTA_PROTONAT_MAX+1];
const struct nf_nat_protocol *npt;
int err;
err = nla_parse_nested(tb, CTA_PROTONAT_MAX, attr, protonat_nla_policy);
if (err < 0)
return err;
rcu_read_lock();
npt = __nf_nat_proto_find(nf_ct_protonum(ct));
if (npt->nlattr_to_range)
err = npt->nlattr_to_range(tb, range);
rcu_read_unlock();
return err;
}
static const struct nla_policy nat_nla_policy[CTA_NAT_MAX+1] = {
[CTA_NAT_MINIP] = { .type = NLA_U32 },
[CTA_NAT_MAXIP] = { .type = NLA_U32 },
[CTA_NAT_PROTO] = { .type = NLA_NESTED },
};
static int
nfnetlink_parse_nat(const struct nlattr *nat,
const struct nf_conn *ct, struct nf_nat_ipv4_range *range)
{
struct nlattr *tb[CTA_NAT_MAX+1];
int err;
memset(range, 0, sizeof(*range));
err = nla_parse_nested(tb, CTA_NAT_MAX, nat, nat_nla_policy);
if (err < 0)
return err;
if (tb[CTA_NAT_MINIP])
range->min_ip = nla_get_be32(tb[CTA_NAT_MINIP]);
if (!tb[CTA_NAT_MAXIP])
range->max_ip = range->min_ip;
else
range->max_ip = nla_get_be32(tb[CTA_NAT_MAXIP]);
if (range->min_ip)
range->flags |= NF_NAT_RANGE_MAP_IPS;
if (!tb[CTA_NAT_PROTO])
return 0;
err = nfnetlink_parse_nat_proto(tb[CTA_NAT_PROTO], ct, range);
if (err < 0)
return err;
return 0;
}
static int
nfnetlink_parse_nat_setup(struct nf_conn *ct,
enum nf_nat_manip_type manip,
const struct nlattr *attr)
{
struct nf_nat_ipv4_range range;
if (nfnetlink_parse_nat(attr, ct, &range) < 0)
return -EINVAL;
if (nf_nat_initialized(ct, manip))
return -EEXIST;
return nf_nat_setup_info(ct, &range, manip);
}
#else
static int
nfnetlink_parse_nat_setup(struct nf_conn *ct,
enum nf_nat_manip_type manip,
const struct nlattr *attr)
{
return -EOPNOTSUPP;
}
#endif
static int __net_init nf_nat_net_init(struct net *net)
{
/* Leave them the same for the moment. */
net->ipv4.nat_htable_size = net->ct.htable_size;
net->ipv4.nat_bysource = nf_ct_alloc_hashtable(&net->ipv4.nat_htable_size, 0);
if (!net->ipv4.nat_bysource)
return -ENOMEM;
return 0;
}
/* Clear NAT section of all conntracks, in case we're loaded again. */
static int clean_nat(struct nf_conn *i, void *data)
{
struct nf_conn_nat *nat = nfct_nat(i);
if (!nat)
return 0;
memset(nat, 0, sizeof(*nat));
i->status &= ~(IPS_NAT_MASK | IPS_NAT_DONE_MASK | IPS_SEQ_ADJUST);
return 0;
}
static void __net_exit nf_nat_net_exit(struct net *net)
{
nf_ct_iterate_cleanup(net, &clean_nat, NULL);
synchronize_rcu();
nf_ct_free_hashtable(net->ipv4.nat_bysource, net->ipv4.nat_htable_size);
}
static struct pernet_operations nf_nat_net_ops = {
.init = nf_nat_net_init,
.exit = nf_nat_net_exit,
};
static struct nf_ct_helper_expectfn follow_master_nat = {
.name = "nat-follow-master",
.expectfn = nf_nat_follow_master,
};
static int __init nf_nat_init(void)
{
size_t i;
int ret;
need_ipv4_conntrack();
ret = nf_ct_extend_register(&nat_extend);
if (ret < 0) {
printk(KERN_ERR "nf_nat_core: Unable to register extension\n");
return ret;
}
ret = register_pernet_subsys(&nf_nat_net_ops);
if (ret < 0)
goto cleanup_extend;
/* Sew in builtin protocols. */
spin_lock_bh(&nf_nat_lock);
for (i = 0; i < MAX_IP_NAT_PROTO; i++)
RCU_INIT_POINTER(nf_nat_protos[i], &nf_nat_unknown_protocol);
RCU_INIT_POINTER(nf_nat_protos[IPPROTO_TCP], &nf_nat_protocol_tcp);
RCU_INIT_POINTER(nf_nat_protos[IPPROTO_UDP], &nf_nat_protocol_udp);
RCU_INIT_POINTER(nf_nat_protos[IPPROTO_ICMP], &nf_nat_protocol_icmp);
spin_unlock_bh(&nf_nat_lock);
/* Initialize fake conntrack so that NAT will skip it */
nf_ct_untracked_status_or(IPS_NAT_DONE_MASK);
l3proto = nf_ct_l3proto_find_get((u_int16_t)AF_INET);
nf_ct_helper_expectfn_register(&follow_master_nat);
BUG_ON(nf_nat_seq_adjust_hook != NULL);
RCU_INIT_POINTER(nf_nat_seq_adjust_hook, nf_nat_seq_adjust);
BUG_ON(nfnetlink_parse_nat_setup_hook != NULL);
RCU_INIT_POINTER(nfnetlink_parse_nat_setup_hook,
nfnetlink_parse_nat_setup);
BUG_ON(nf_ct_nat_offset != NULL);
RCU_INIT_POINTER(nf_ct_nat_offset, nf_nat_get_offset);
return 0;
cleanup_extend:
nf_ct_extend_unregister(&nat_extend);
return ret;
}
static void __exit nf_nat_cleanup(void)
{
unregister_pernet_subsys(&nf_nat_net_ops);
nf_ct_l3proto_put(l3proto);
nf_ct_extend_unregister(&nat_extend);
nf_ct_helper_expectfn_unregister(&follow_master_nat);
RCU_INIT_POINTER(nf_nat_seq_adjust_hook, NULL);
RCU_INIT_POINTER(nfnetlink_parse_nat_setup_hook, NULL);
RCU_INIT_POINTER(nf_ct_nat_offset, NULL);
synchronize_net();
}
MODULE_LICENSE("GPL");
MODULE_ALIAS("nf-nat-ipv4");
module_init(nf_nat_init);
module_exit(nf_nat_cleanup);
| gpl-2.0 |
defconoi/Unleashed-N5 | drivers/acpi/processor_throttling.c | 4276 | 32294 | /*
* processor_throttling.c - Throttling submodule of the ACPI processor driver
*
* Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
* Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
* Copyright (C) 2004 Dominik Brodowski <linux@brodo.de>
* Copyright (C) 2004 Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
* - Added processor hotplug support
*
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or (at
* your option) any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
*
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/sched.h>
#include <linux/cpufreq.h>
#include <asm/io.h>
#include <asm/uaccess.h>
#include <acpi/acpi_bus.h>
#include <acpi/acpi_drivers.h>
#include <acpi/processor.h>
#define PREFIX "ACPI: "
#define ACPI_PROCESSOR_CLASS "processor"
#define _COMPONENT ACPI_PROCESSOR_COMPONENT
ACPI_MODULE_NAME("processor_throttling");
/* ignore_tpc:
* 0 -> acpi processor driver doesn't ignore _TPC values
* 1 -> acpi processor driver ignores _TPC values
*/
static int ignore_tpc;
module_param(ignore_tpc, int, 0644);
MODULE_PARM_DESC(ignore_tpc, "Disable broken BIOS _TPC throttling support");
struct throttling_tstate {
unsigned int cpu; /* cpu nr */
int target_state; /* target T-state */
};
#define THROTTLING_PRECHANGE (1)
#define THROTTLING_POSTCHANGE (2)
static int acpi_processor_get_throttling(struct acpi_processor *pr);
int acpi_processor_set_throttling(struct acpi_processor *pr,
int state, bool force);
static int acpi_processor_update_tsd_coord(void)
{
int count, count_target;
int retval = 0;
unsigned int i, j;
cpumask_var_t covered_cpus;
struct acpi_processor *pr, *match_pr;
struct acpi_tsd_package *pdomain, *match_pdomain;
struct acpi_processor_throttling *pthrottling, *match_pthrottling;
if (!zalloc_cpumask_var(&covered_cpus, GFP_KERNEL))
return -ENOMEM;
/*
* Now that we have _TSD data from all CPUs, lets setup T-state
* coordination between all CPUs.
*/
for_each_possible_cpu(i) {
pr = per_cpu(processors, i);
if (!pr)
continue;
/* Basic validity check for domain info */
pthrottling = &(pr->throttling);
/*
* If tsd package for one cpu is invalid, the coordination
* among all CPUs is thought as invalid.
* Maybe it is ugly.
*/
if (!pthrottling->tsd_valid_flag) {
retval = -EINVAL;
break;
}
}
if (retval)
goto err_ret;
for_each_possible_cpu(i) {
pr = per_cpu(processors, i);
if (!pr)
continue;
if (cpumask_test_cpu(i, covered_cpus))
continue;
pthrottling = &pr->throttling;
pdomain = &(pthrottling->domain_info);
cpumask_set_cpu(i, pthrottling->shared_cpu_map);
cpumask_set_cpu(i, covered_cpus);
/*
* If the number of processor in the TSD domain is 1, it is
* unnecessary to parse the coordination for this CPU.
*/
if (pdomain->num_processors <= 1)
continue;
/* Validate the Domain info */
count_target = pdomain->num_processors;
count = 1;
for_each_possible_cpu(j) {
if (i == j)
continue;
match_pr = per_cpu(processors, j);
if (!match_pr)
continue;
match_pthrottling = &(match_pr->throttling);
match_pdomain = &(match_pthrottling->domain_info);
if (match_pdomain->domain != pdomain->domain)
continue;
/* Here i and j are in the same domain.
* If two TSD packages have the same domain, they
* should have the same num_porcessors and
* coordination type. Otherwise it will be regarded
* as illegal.
*/
if (match_pdomain->num_processors != count_target) {
retval = -EINVAL;
goto err_ret;
}
if (pdomain->coord_type != match_pdomain->coord_type) {
retval = -EINVAL;
goto err_ret;
}
cpumask_set_cpu(j, covered_cpus);
cpumask_set_cpu(j, pthrottling->shared_cpu_map);
count++;
}
for_each_possible_cpu(j) {
if (i == j)
continue;
match_pr = per_cpu(processors, j);
if (!match_pr)
continue;
match_pthrottling = &(match_pr->throttling);
match_pdomain = &(match_pthrottling->domain_info);
if (match_pdomain->domain != pdomain->domain)
continue;
/*
* If some CPUS have the same domain, they
* will have the same shared_cpu_map.
*/
cpumask_copy(match_pthrottling->shared_cpu_map,
pthrottling->shared_cpu_map);
}
}
err_ret:
free_cpumask_var(covered_cpus);
for_each_possible_cpu(i) {
pr = per_cpu(processors, i);
if (!pr)
continue;
/*
* Assume no coordination on any error parsing domain info.
* The coordination type will be forced as SW_ALL.
*/
if (retval) {
pthrottling = &(pr->throttling);
cpumask_clear(pthrottling->shared_cpu_map);
cpumask_set_cpu(i, pthrottling->shared_cpu_map);
pthrottling->shared_type = DOMAIN_COORD_TYPE_SW_ALL;
}
}
return retval;
}
/*
* Update the T-state coordination after the _TSD
* data for all cpus is obtained.
*/
void acpi_processor_throttling_init(void)
{
if (acpi_processor_update_tsd_coord())
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
"Assume no T-state coordination\n"));
return;
}
static int acpi_processor_throttling_notifier(unsigned long event, void *data)
{
struct throttling_tstate *p_tstate = data;
struct acpi_processor *pr;
unsigned int cpu ;
int target_state;
struct acpi_processor_limit *p_limit;
struct acpi_processor_throttling *p_throttling;
cpu = p_tstate->cpu;
pr = per_cpu(processors, cpu);
if (!pr) {
ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Invalid pr pointer\n"));
return 0;
}
if (!pr->flags.throttling) {
ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Throttling control is "
"unsupported on CPU %d\n", cpu));
return 0;
}
target_state = p_tstate->target_state;
p_throttling = &(pr->throttling);
switch (event) {
case THROTTLING_PRECHANGE:
/*
* Prechange event is used to choose one proper t-state,
* which meets the limits of thermal, user and _TPC.
*/
p_limit = &pr->limit;
if (p_limit->thermal.tx > target_state)
target_state = p_limit->thermal.tx;
if (p_limit->user.tx > target_state)
target_state = p_limit->user.tx;
if (pr->throttling_platform_limit > target_state)
target_state = pr->throttling_platform_limit;
if (target_state >= p_throttling->state_count) {
printk(KERN_WARNING
"Exceed the limit of T-state \n");
target_state = p_throttling->state_count - 1;
}
p_tstate->target_state = target_state;
ACPI_DEBUG_PRINT((ACPI_DB_INFO, "PreChange Event:"
"target T-state of CPU %d is T%d\n",
cpu, target_state));
break;
case THROTTLING_POSTCHANGE:
/*
* Postchange event is only used to update the
* T-state flag of acpi_processor_throttling.
*/
p_throttling->state = target_state;
ACPI_DEBUG_PRINT((ACPI_DB_INFO, "PostChange Event:"
"CPU %d is switched to T%d\n",
cpu, target_state));
break;
default:
printk(KERN_WARNING
"Unsupported Throttling notifier event\n");
break;
}
return 0;
}
/*
* _TPC - Throttling Present Capabilities
*/
static int acpi_processor_get_platform_limit(struct acpi_processor *pr)
{
acpi_status status = 0;
unsigned long long tpc = 0;
if (!pr)
return -EINVAL;
if (ignore_tpc)
goto end;
status = acpi_evaluate_integer(pr->handle, "_TPC", NULL, &tpc);
if (ACPI_FAILURE(status)) {
if (status != AE_NOT_FOUND) {
ACPI_EXCEPTION((AE_INFO, status, "Evaluating _TPC"));
}
return -ENODEV;
}
end:
pr->throttling_platform_limit = (int)tpc;
return 0;
}
int acpi_processor_tstate_has_changed(struct acpi_processor *pr)
{
int result = 0;
int throttling_limit;
int current_state;
struct acpi_processor_limit *limit;
int target_state;
if (ignore_tpc)
return 0;
result = acpi_processor_get_platform_limit(pr);
if (result) {
/* Throttling Limit is unsupported */
return result;
}
throttling_limit = pr->throttling_platform_limit;
if (throttling_limit >= pr->throttling.state_count) {
/* Uncorrect Throttling Limit */
return -EINVAL;
}
current_state = pr->throttling.state;
if (current_state > throttling_limit) {
/*
* The current state can meet the requirement of
* _TPC limit. But it is reasonable that OSPM changes
* t-states from high to low for better performance.
* Of course the limit condition of thermal
* and user should be considered.
*/
limit = &pr->limit;
target_state = throttling_limit;
if (limit->thermal.tx > target_state)
target_state = limit->thermal.tx;
if (limit->user.tx > target_state)
target_state = limit->user.tx;
} else if (current_state == throttling_limit) {
/*
* Unnecessary to change the throttling state
*/
return 0;
} else {
/*
* If the current state is lower than the limit of _TPC, it
* will be forced to switch to the throttling state defined
* by throttling_platfor_limit.
* Because the previous state meets with the limit condition
* of thermal and user, it is unnecessary to check it again.
*/
target_state = throttling_limit;
}
return acpi_processor_set_throttling(pr, target_state, false);
}
/*
* This function is used to reevaluate whether the T-state is valid
* after one CPU is onlined/offlined.
* It is noted that it won't reevaluate the following properties for
* the T-state.
* 1. Control method.
* 2. the number of supported T-state
* 3. TSD domain
*/
void acpi_processor_reevaluate_tstate(struct acpi_processor *pr,
unsigned long action)
{
int result = 0;
if (action == CPU_DEAD) {
/* When one CPU is offline, the T-state throttling
* will be invalidated.
*/
pr->flags.throttling = 0;
return;
}
/* the following is to recheck whether the T-state is valid for
* the online CPU
*/
if (!pr->throttling.state_count) {
/* If the number of T-state is invalid, it is
* invalidated.
*/
pr->flags.throttling = 0;
return;
}
pr->flags.throttling = 1;
/* Disable throttling (if enabled). We'll let subsequent
* policy (e.g.thermal) decide to lower performance if it
* so chooses, but for now we'll crank up the speed.
*/
result = acpi_processor_get_throttling(pr);
if (result)
goto end;
if (pr->throttling.state) {
result = acpi_processor_set_throttling(pr, 0, false);
if (result)
goto end;
}
end:
if (result)
pr->flags.throttling = 0;
}
/*
* _PTC - Processor Throttling Control (and status) register location
*/
static int acpi_processor_get_throttling_control(struct acpi_processor *pr)
{
int result = 0;
acpi_status status = 0;
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
union acpi_object *ptc = NULL;
union acpi_object obj = { 0 };
struct acpi_processor_throttling *throttling;
status = acpi_evaluate_object(pr->handle, "_PTC", NULL, &buffer);
if (ACPI_FAILURE(status)) {
if (status != AE_NOT_FOUND) {
ACPI_EXCEPTION((AE_INFO, status, "Evaluating _PTC"));
}
return -ENODEV;
}
ptc = (union acpi_object *)buffer.pointer;
if (!ptc || (ptc->type != ACPI_TYPE_PACKAGE)
|| (ptc->package.count != 2)) {
printk(KERN_ERR PREFIX "Invalid _PTC data\n");
result = -EFAULT;
goto end;
}
/*
* control_register
*/
obj = ptc->package.elements[0];
if ((obj.type != ACPI_TYPE_BUFFER)
|| (obj.buffer.length < sizeof(struct acpi_ptc_register))
|| (obj.buffer.pointer == NULL)) {
printk(KERN_ERR PREFIX
"Invalid _PTC data (control_register)\n");
result = -EFAULT;
goto end;
}
memcpy(&pr->throttling.control_register, obj.buffer.pointer,
sizeof(struct acpi_ptc_register));
/*
* status_register
*/
obj = ptc->package.elements[1];
if ((obj.type != ACPI_TYPE_BUFFER)
|| (obj.buffer.length < sizeof(struct acpi_ptc_register))
|| (obj.buffer.pointer == NULL)) {
printk(KERN_ERR PREFIX "Invalid _PTC data (status_register)\n");
result = -EFAULT;
goto end;
}
memcpy(&pr->throttling.status_register, obj.buffer.pointer,
sizeof(struct acpi_ptc_register));
throttling = &pr->throttling;
if ((throttling->control_register.bit_width +
throttling->control_register.bit_offset) > 32) {
printk(KERN_ERR PREFIX "Invalid _PTC control register\n");
result = -EFAULT;
goto end;
}
if ((throttling->status_register.bit_width +
throttling->status_register.bit_offset) > 32) {
printk(KERN_ERR PREFIX "Invalid _PTC status register\n");
result = -EFAULT;
goto end;
}
end:
kfree(buffer.pointer);
return result;
}
/*
* _TSS - Throttling Supported States
*/
static int acpi_processor_get_throttling_states(struct acpi_processor *pr)
{
int result = 0;
acpi_status status = AE_OK;
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
struct acpi_buffer format = { sizeof("NNNNN"), "NNNNN" };
struct acpi_buffer state = { 0, NULL };
union acpi_object *tss = NULL;
int i;
status = acpi_evaluate_object(pr->handle, "_TSS", NULL, &buffer);
if (ACPI_FAILURE(status)) {
if (status != AE_NOT_FOUND) {
ACPI_EXCEPTION((AE_INFO, status, "Evaluating _TSS"));
}
return -ENODEV;
}
tss = buffer.pointer;
if (!tss || (tss->type != ACPI_TYPE_PACKAGE)) {
printk(KERN_ERR PREFIX "Invalid _TSS data\n");
result = -EFAULT;
goto end;
}
ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found %d throttling states\n",
tss->package.count));
pr->throttling.state_count = tss->package.count;
pr->throttling.states_tss =
kmalloc(sizeof(struct acpi_processor_tx_tss) * tss->package.count,
GFP_KERNEL);
if (!pr->throttling.states_tss) {
result = -ENOMEM;
goto end;
}
for (i = 0; i < pr->throttling.state_count; i++) {
struct acpi_processor_tx_tss *tx =
(struct acpi_processor_tx_tss *)&(pr->throttling.
states_tss[i]);
state.length = sizeof(struct acpi_processor_tx_tss);
state.pointer = tx;
ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Extracting state %d\n", i));
status = acpi_extract_package(&(tss->package.elements[i]),
&format, &state);
if (ACPI_FAILURE(status)) {
ACPI_EXCEPTION((AE_INFO, status, "Invalid _TSS data"));
result = -EFAULT;
kfree(pr->throttling.states_tss);
goto end;
}
if (!tx->freqpercentage) {
printk(KERN_ERR PREFIX
"Invalid _TSS data: freq is zero\n");
result = -EFAULT;
kfree(pr->throttling.states_tss);
goto end;
}
}
end:
kfree(buffer.pointer);
return result;
}
/*
* _TSD - T-State Dependencies
*/
static int acpi_processor_get_tsd(struct acpi_processor *pr)
{
int result = 0;
acpi_status status = AE_OK;
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
struct acpi_buffer format = { sizeof("NNNNN"), "NNNNN" };
struct acpi_buffer state = { 0, NULL };
union acpi_object *tsd = NULL;
struct acpi_tsd_package *pdomain;
struct acpi_processor_throttling *pthrottling;
pthrottling = &pr->throttling;
pthrottling->tsd_valid_flag = 0;
status = acpi_evaluate_object(pr->handle, "_TSD", NULL, &buffer);
if (ACPI_FAILURE(status)) {
if (status != AE_NOT_FOUND) {
ACPI_EXCEPTION((AE_INFO, status, "Evaluating _TSD"));
}
return -ENODEV;
}
tsd = buffer.pointer;
if (!tsd || (tsd->type != ACPI_TYPE_PACKAGE)) {
printk(KERN_ERR PREFIX "Invalid _TSD data\n");
result = -EFAULT;
goto end;
}
if (tsd->package.count != 1) {
printk(KERN_ERR PREFIX "Invalid _TSD data\n");
result = -EFAULT;
goto end;
}
pdomain = &(pr->throttling.domain_info);
state.length = sizeof(struct acpi_tsd_package);
state.pointer = pdomain;
status = acpi_extract_package(&(tsd->package.elements[0]),
&format, &state);
if (ACPI_FAILURE(status)) {
printk(KERN_ERR PREFIX "Invalid _TSD data\n");
result = -EFAULT;
goto end;
}
if (pdomain->num_entries != ACPI_TSD_REV0_ENTRIES) {
printk(KERN_ERR PREFIX "Unknown _TSD:num_entries\n");
result = -EFAULT;
goto end;
}
if (pdomain->revision != ACPI_TSD_REV0_REVISION) {
printk(KERN_ERR PREFIX "Unknown _TSD:revision\n");
result = -EFAULT;
goto end;
}
pthrottling = &pr->throttling;
pthrottling->tsd_valid_flag = 1;
pthrottling->shared_type = pdomain->coord_type;
cpumask_set_cpu(pr->id, pthrottling->shared_cpu_map);
/*
* If the coordination type is not defined in ACPI spec,
* the tsd_valid_flag will be clear and coordination type
* will be forecd as DOMAIN_COORD_TYPE_SW_ALL.
*/
if (pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ALL &&
pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ANY &&
pdomain->coord_type != DOMAIN_COORD_TYPE_HW_ALL) {
pthrottling->tsd_valid_flag = 0;
pthrottling->shared_type = DOMAIN_COORD_TYPE_SW_ALL;
}
end:
kfree(buffer.pointer);
return result;
}
/* --------------------------------------------------------------------------
Throttling Control
-------------------------------------------------------------------------- */
static int acpi_processor_get_throttling_fadt(struct acpi_processor *pr)
{
int state = 0;
u32 value = 0;
u32 duty_mask = 0;
u32 duty_value = 0;
if (!pr)
return -EINVAL;
if (!pr->flags.throttling)
return -ENODEV;
pr->throttling.state = 0;
duty_mask = pr->throttling.state_count - 1;
duty_mask <<= pr->throttling.duty_offset;
local_irq_disable();
value = inl(pr->throttling.address);
/*
* Compute the current throttling state when throttling is enabled
* (bit 4 is on).
*/
if (value & 0x10) {
duty_value = value & duty_mask;
duty_value >>= pr->throttling.duty_offset;
if (duty_value)
state = pr->throttling.state_count - duty_value;
}
pr->throttling.state = state;
local_irq_enable();
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
"Throttling state is T%d (%d%% throttling applied)\n",
state, pr->throttling.states[state].performance));
return 0;
}
#ifdef CONFIG_X86
static int acpi_throttling_rdmsr(u64 *value)
{
u64 msr_high, msr_low;
u64 msr = 0;
int ret = -1;
if ((this_cpu_read(cpu_info.x86_vendor) != X86_VENDOR_INTEL) ||
!this_cpu_has(X86_FEATURE_ACPI)) {
printk(KERN_ERR PREFIX
"HARDWARE addr space,NOT supported yet\n");
} else {
msr_low = 0;
msr_high = 0;
rdmsr_safe(MSR_IA32_THERM_CONTROL,
(u32 *)&msr_low , (u32 *) &msr_high);
msr = (msr_high << 32) | msr_low;
*value = (u64) msr;
ret = 0;
}
return ret;
}
static int acpi_throttling_wrmsr(u64 value)
{
int ret = -1;
u64 msr;
if ((this_cpu_read(cpu_info.x86_vendor) != X86_VENDOR_INTEL) ||
!this_cpu_has(X86_FEATURE_ACPI)) {
printk(KERN_ERR PREFIX
"HARDWARE addr space,NOT supported yet\n");
} else {
msr = value;
wrmsr_safe(MSR_IA32_THERM_CONTROL,
msr & 0xffffffff, msr >> 32);
ret = 0;
}
return ret;
}
#else
static int acpi_throttling_rdmsr(u64 *value)
{
printk(KERN_ERR PREFIX
"HARDWARE addr space,NOT supported yet\n");
return -1;
}
static int acpi_throttling_wrmsr(u64 value)
{
printk(KERN_ERR PREFIX
"HARDWARE addr space,NOT supported yet\n");
return -1;
}
#endif
static int acpi_read_throttling_status(struct acpi_processor *pr,
u64 *value)
{
u32 bit_width, bit_offset;
u32 ptc_value;
u64 ptc_mask;
struct acpi_processor_throttling *throttling;
int ret = -1;
throttling = &pr->throttling;
switch (throttling->status_register.space_id) {
case ACPI_ADR_SPACE_SYSTEM_IO:
bit_width = throttling->status_register.bit_width;
bit_offset = throttling->status_register.bit_offset;
acpi_os_read_port((acpi_io_address) throttling->status_register.
address, &ptc_value,
(u32) (bit_width + bit_offset));
ptc_mask = (1 << bit_width) - 1;
*value = (u64) ((ptc_value >> bit_offset) & ptc_mask);
ret = 0;
break;
case ACPI_ADR_SPACE_FIXED_HARDWARE:
ret = acpi_throttling_rdmsr(value);
break;
default:
printk(KERN_ERR PREFIX "Unknown addr space %d\n",
(u32) (throttling->status_register.space_id));
}
return ret;
}
static int acpi_write_throttling_state(struct acpi_processor *pr,
u64 value)
{
u32 bit_width, bit_offset;
u64 ptc_value;
u64 ptc_mask;
struct acpi_processor_throttling *throttling;
int ret = -1;
throttling = &pr->throttling;
switch (throttling->control_register.space_id) {
case ACPI_ADR_SPACE_SYSTEM_IO:
bit_width = throttling->control_register.bit_width;
bit_offset = throttling->control_register.bit_offset;
ptc_mask = (1 << bit_width) - 1;
ptc_value = value & ptc_mask;
acpi_os_write_port((acpi_io_address) throttling->
control_register.address,
(u32) (ptc_value << bit_offset),
(u32) (bit_width + bit_offset));
ret = 0;
break;
case ACPI_ADR_SPACE_FIXED_HARDWARE:
ret = acpi_throttling_wrmsr(value);
break;
default:
printk(KERN_ERR PREFIX "Unknown addr space %d\n",
(u32) (throttling->control_register.space_id));
}
return ret;
}
static int acpi_get_throttling_state(struct acpi_processor *pr,
u64 value)
{
int i;
for (i = 0; i < pr->throttling.state_count; i++) {
struct acpi_processor_tx_tss *tx =
(struct acpi_processor_tx_tss *)&(pr->throttling.
states_tss[i]);
if (tx->control == value)
return i;
}
return -1;
}
static int acpi_get_throttling_value(struct acpi_processor *pr,
int state, u64 *value)
{
int ret = -1;
if (state >= 0 && state <= pr->throttling.state_count) {
struct acpi_processor_tx_tss *tx =
(struct acpi_processor_tx_tss *)&(pr->throttling.
states_tss[state]);
*value = tx->control;
ret = 0;
}
return ret;
}
static int acpi_processor_get_throttling_ptc(struct acpi_processor *pr)
{
int state = 0;
int ret;
u64 value;
if (!pr)
return -EINVAL;
if (!pr->flags.throttling)
return -ENODEV;
pr->throttling.state = 0;
value = 0;
ret = acpi_read_throttling_status(pr, &value);
if (ret >= 0) {
state = acpi_get_throttling_state(pr, value);
if (state == -1) {
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
"Invalid throttling state, reset\n"));
state = 0;
ret = acpi_processor_set_throttling(pr, state, true);
if (ret)
return ret;
}
pr->throttling.state = state;
}
return 0;
}
static int acpi_processor_get_throttling(struct acpi_processor *pr)
{
cpumask_var_t saved_mask;
int ret;
if (!pr)
return -EINVAL;
if (!pr->flags.throttling)
return -ENODEV;
if (!alloc_cpumask_var(&saved_mask, GFP_KERNEL))
return -ENOMEM;
/*
* Migrate task to the cpu pointed by pr.
*/
cpumask_copy(saved_mask, ¤t->cpus_allowed);
/* FIXME: use work_on_cpu() */
if (set_cpus_allowed_ptr(current, cpumask_of(pr->id))) {
/* Can't migrate to the target pr->id CPU. Exit */
free_cpumask_var(saved_mask);
return -ENODEV;
}
ret = pr->throttling.acpi_processor_get_throttling(pr);
/* restore the previous state */
set_cpus_allowed_ptr(current, saved_mask);
free_cpumask_var(saved_mask);
return ret;
}
static int acpi_processor_get_fadt_info(struct acpi_processor *pr)
{
int i, step;
if (!pr->throttling.address) {
ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No throttling register\n"));
return -EINVAL;
} else if (!pr->throttling.duty_width) {
ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No throttling states\n"));
return -EINVAL;
}
/* TBD: Support duty_cycle values that span bit 4. */
else if ((pr->throttling.duty_offset + pr->throttling.duty_width) > 4) {
printk(KERN_WARNING PREFIX "duty_cycle spans bit 4\n");
return -EINVAL;
}
pr->throttling.state_count = 1 << acpi_gbl_FADT.duty_width;
/*
* Compute state values. Note that throttling displays a linear power
* performance relationship (at 50% performance the CPU will consume
* 50% power). Values are in 1/10th of a percent to preserve accuracy.
*/
step = (1000 / pr->throttling.state_count);
for (i = 0; i < pr->throttling.state_count; i++) {
pr->throttling.states[i].performance = 1000 - step * i;
pr->throttling.states[i].power = 1000 - step * i;
}
return 0;
}
static int acpi_processor_set_throttling_fadt(struct acpi_processor *pr,
int state, bool force)
{
u32 value = 0;
u32 duty_mask = 0;
u32 duty_value = 0;
if (!pr)
return -EINVAL;
if ((state < 0) || (state > (pr->throttling.state_count - 1)))
return -EINVAL;
if (!pr->flags.throttling)
return -ENODEV;
if (!force && (state == pr->throttling.state))
return 0;
if (state < pr->throttling_platform_limit)
return -EPERM;
/*
* Calculate the duty_value and duty_mask.
*/
if (state) {
duty_value = pr->throttling.state_count - state;
duty_value <<= pr->throttling.duty_offset;
/* Used to clear all duty_value bits */
duty_mask = pr->throttling.state_count - 1;
duty_mask <<= acpi_gbl_FADT.duty_offset;
duty_mask = ~duty_mask;
}
local_irq_disable();
/*
* Disable throttling by writing a 0 to bit 4. Note that we must
* turn it off before you can change the duty_value.
*/
value = inl(pr->throttling.address);
if (value & 0x10) {
value &= 0xFFFFFFEF;
outl(value, pr->throttling.address);
}
/*
* Write the new duty_value and then enable throttling. Note
* that a state value of 0 leaves throttling disabled.
*/
if (state) {
value &= duty_mask;
value |= duty_value;
outl(value, pr->throttling.address);
value |= 0x00000010;
outl(value, pr->throttling.address);
}
pr->throttling.state = state;
local_irq_enable();
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
"Throttling state set to T%d (%d%%)\n", state,
(pr->throttling.states[state].performance ? pr->
throttling.states[state].performance / 10 : 0)));
return 0;
}
static int acpi_processor_set_throttling_ptc(struct acpi_processor *pr,
int state, bool force)
{
int ret;
u64 value;
if (!pr)
return -EINVAL;
if ((state < 0) || (state > (pr->throttling.state_count - 1)))
return -EINVAL;
if (!pr->flags.throttling)
return -ENODEV;
if (!force && (state == pr->throttling.state))
return 0;
if (state < pr->throttling_platform_limit)
return -EPERM;
value = 0;
ret = acpi_get_throttling_value(pr, state, &value);
if (ret >= 0) {
acpi_write_throttling_state(pr, value);
pr->throttling.state = state;
}
return 0;
}
int acpi_processor_set_throttling(struct acpi_processor *pr,
int state, bool force)
{
cpumask_var_t saved_mask;
int ret = 0;
unsigned int i;
struct acpi_processor *match_pr;
struct acpi_processor_throttling *p_throttling;
struct throttling_tstate t_state;
cpumask_var_t online_throttling_cpus;
if (!pr)
return -EINVAL;
if (!pr->flags.throttling)
return -ENODEV;
if ((state < 0) || (state > (pr->throttling.state_count - 1)))
return -EINVAL;
if (!alloc_cpumask_var(&saved_mask, GFP_KERNEL))
return -ENOMEM;
if (!alloc_cpumask_var(&online_throttling_cpus, GFP_KERNEL)) {
free_cpumask_var(saved_mask);
return -ENOMEM;
}
if (cpu_is_offline(pr->id)) {
/*
* the cpu pointed by pr->id is offline. Unnecessary to change
* the throttling state any more.
*/
return -ENODEV;
}
cpumask_copy(saved_mask, ¤t->cpus_allowed);
t_state.target_state = state;
p_throttling = &(pr->throttling);
cpumask_and(online_throttling_cpus, cpu_online_mask,
p_throttling->shared_cpu_map);
/*
* The throttling notifier will be called for every
* affected cpu in order to get one proper T-state.
* The notifier event is THROTTLING_PRECHANGE.
*/
for_each_cpu(i, online_throttling_cpus) {
t_state.cpu = i;
acpi_processor_throttling_notifier(THROTTLING_PRECHANGE,
&t_state);
}
/*
* The function of acpi_processor_set_throttling will be called
* to switch T-state. If the coordination type is SW_ALL or HW_ALL,
* it is necessary to call it for every affected cpu. Otherwise
* it can be called only for the cpu pointed by pr.
*/
if (p_throttling->shared_type == DOMAIN_COORD_TYPE_SW_ANY) {
/* FIXME: use work_on_cpu() */
if (set_cpus_allowed_ptr(current, cpumask_of(pr->id))) {
/* Can't migrate to the pr->id CPU. Exit */
ret = -ENODEV;
goto exit;
}
ret = p_throttling->acpi_processor_set_throttling(pr,
t_state.target_state, force);
} else {
/*
* When the T-state coordination is SW_ALL or HW_ALL,
* it is necessary to set T-state for every affected
* cpus.
*/
for_each_cpu(i, online_throttling_cpus) {
match_pr = per_cpu(processors, i);
/*
* If the pointer is invalid, we will report the
* error message and continue.
*/
if (!match_pr) {
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
"Invalid Pointer for CPU %d\n", i));
continue;
}
/*
* If the throttling control is unsupported on CPU i,
* we will report the error message and continue.
*/
if (!match_pr->flags.throttling) {
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
"Throttling Control is unsupported "
"on CPU %d\n", i));
continue;
}
t_state.cpu = i;
/* FIXME: use work_on_cpu() */
if (set_cpus_allowed_ptr(current, cpumask_of(i)))
continue;
ret = match_pr->throttling.
acpi_processor_set_throttling(
match_pr, t_state.target_state, force);
}
}
/*
* After the set_throttling is called, the
* throttling notifier is called for every
* affected cpu to update the T-states.
* The notifier event is THROTTLING_POSTCHANGE
*/
for_each_cpu(i, online_throttling_cpus) {
t_state.cpu = i;
acpi_processor_throttling_notifier(THROTTLING_POSTCHANGE,
&t_state);
}
/* restore the previous state */
/* FIXME: use work_on_cpu() */
set_cpus_allowed_ptr(current, saved_mask);
exit:
free_cpumask_var(online_throttling_cpus);
free_cpumask_var(saved_mask);
return ret;
}
int acpi_processor_get_throttling_info(struct acpi_processor *pr)
{
int result = 0;
struct acpi_processor_throttling *pthrottling;
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
"pblk_address[0x%08x] duty_offset[%d] duty_width[%d]\n",
pr->throttling.address,
pr->throttling.duty_offset,
pr->throttling.duty_width));
/*
* Evaluate _PTC, _TSS and _TPC
* They must all be present or none of them can be used.
*/
if (acpi_processor_get_throttling_control(pr) ||
acpi_processor_get_throttling_states(pr) ||
acpi_processor_get_platform_limit(pr))
{
pr->throttling.acpi_processor_get_throttling =
&acpi_processor_get_throttling_fadt;
pr->throttling.acpi_processor_set_throttling =
&acpi_processor_set_throttling_fadt;
if (acpi_processor_get_fadt_info(pr))
return 0;
} else {
pr->throttling.acpi_processor_get_throttling =
&acpi_processor_get_throttling_ptc;
pr->throttling.acpi_processor_set_throttling =
&acpi_processor_set_throttling_ptc;
}
/*
* If TSD package for one CPU can't be parsed successfully, it means
* that this CPU will have no coordination with other CPUs.
*/
if (acpi_processor_get_tsd(pr)) {
pthrottling = &pr->throttling;
pthrottling->tsd_valid_flag = 0;
cpumask_set_cpu(pr->id, pthrottling->shared_cpu_map);
pthrottling->shared_type = DOMAIN_COORD_TYPE_SW_ALL;
}
/*
* PIIX4 Errata: We don't support throttling on the original PIIX4.
* This shouldn't be an issue as few (if any) mobile systems ever
* used this part.
*/
if (errata.piix4.throttle) {
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
"Throttling not supported on PIIX4 A- or B-step\n"));
return 0;
}
ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found %d throttling states\n",
pr->throttling.state_count));
pr->flags.throttling = 1;
/*
* Disable throttling (if enabled). We'll let subsequent policy (e.g.
* thermal) decide to lower performance if it so chooses, but for now
* we'll crank up the speed.
*/
result = acpi_processor_get_throttling(pr);
if (result)
goto end;
if (pr->throttling.state) {
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
"Disabling throttling (was T%d)\n",
pr->throttling.state));
result = acpi_processor_set_throttling(pr, 0, false);
if (result)
goto end;
}
end:
if (result)
pr->flags.throttling = 0;
return result;
}
| gpl-2.0 |
srsdanitest/swingacera9 | drivers/video/arkfb.c | 4788 | 33254 | /*
* linux/drivers/video/arkfb.c -- Frame buffer device driver for ARK 2000PV
* with ICS 5342 dac (it is easy to add support for different dacs).
*
* Copyright (c) 2007 Ondrej Zajicek <santiago@crfreenet.org>
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file COPYING in the main directory of this archive for
* more details.
*
* Code is based on s3fb
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/mm.h>
#include <linux/tty.h>
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/fb.h>
#include <linux/svga.h>
#include <linux/init.h>
#include <linux/pci.h>
#include <linux/console.h> /* Why should fb driver call console functions? because console_lock() */
#include <video/vga.h>
#ifdef CONFIG_MTRR
#include <asm/mtrr.h>
#endif
struct arkfb_info {
int mclk_freq;
int mtrr_reg;
struct dac_info *dac;
struct vgastate state;
struct mutex open_lock;
unsigned int ref_count;
u32 pseudo_palette[16];
};
/* ------------------------------------------------------------------------- */
static const struct svga_fb_format arkfb_formats[] = {
{ 0, {0, 6, 0}, {0, 6, 0}, {0, 6, 0}, {0, 0, 0}, 0,
FB_TYPE_TEXT, FB_AUX_TEXT_SVGA_STEP4, FB_VISUAL_PSEUDOCOLOR, 8, 8},
{ 4, {0, 6, 0}, {0, 6, 0}, {0, 6, 0}, {0, 0, 0}, 0,
FB_TYPE_PACKED_PIXELS, 0, FB_VISUAL_PSEUDOCOLOR, 8, 16},
{ 4, {0, 6, 0}, {0, 6, 0}, {0, 6, 0}, {0, 0, 0}, 1,
FB_TYPE_INTERLEAVED_PLANES, 1, FB_VISUAL_PSEUDOCOLOR, 8, 16},
{ 8, {0, 6, 0}, {0, 6, 0}, {0, 6, 0}, {0, 0, 0}, 0,
FB_TYPE_PACKED_PIXELS, 0, FB_VISUAL_PSEUDOCOLOR, 8, 8},
{16, {10, 5, 0}, {5, 5, 0}, {0, 5, 0}, {0, 0, 0}, 0,
FB_TYPE_PACKED_PIXELS, 0, FB_VISUAL_TRUECOLOR, 4, 4},
{16, {11, 5, 0}, {5, 6, 0}, {0, 5, 0}, {0, 0, 0}, 0,
FB_TYPE_PACKED_PIXELS, 0, FB_VISUAL_TRUECOLOR, 4, 4},
{24, {16, 8, 0}, {8, 8, 0}, {0, 8, 0}, {0, 0, 0}, 0,
FB_TYPE_PACKED_PIXELS, 0, FB_VISUAL_TRUECOLOR, 8, 8},
{32, {16, 8, 0}, {8, 8, 0}, {0, 8, 0}, {0, 0, 0}, 0,
FB_TYPE_PACKED_PIXELS, 0, FB_VISUAL_TRUECOLOR, 2, 2},
SVGA_FORMAT_END
};
/* CRT timing register sets */
static const struct vga_regset ark_h_total_regs[] = {{0x00, 0, 7}, {0x41, 7, 7}, VGA_REGSET_END};
static const struct vga_regset ark_h_display_regs[] = {{0x01, 0, 7}, {0x41, 6, 6}, VGA_REGSET_END};
static const struct vga_regset ark_h_blank_start_regs[] = {{0x02, 0, 7}, {0x41, 5, 5}, VGA_REGSET_END};
static const struct vga_regset ark_h_blank_end_regs[] = {{0x03, 0, 4}, {0x05, 7, 7 }, VGA_REGSET_END};
static const struct vga_regset ark_h_sync_start_regs[] = {{0x04, 0, 7}, {0x41, 4, 4}, VGA_REGSET_END};
static const struct vga_regset ark_h_sync_end_regs[] = {{0x05, 0, 4}, VGA_REGSET_END};
static const struct vga_regset ark_v_total_regs[] = {{0x06, 0, 7}, {0x07, 0, 0}, {0x07, 5, 5}, {0x40, 7, 7}, VGA_REGSET_END};
static const struct vga_regset ark_v_display_regs[] = {{0x12, 0, 7}, {0x07, 1, 1}, {0x07, 6, 6}, {0x40, 6, 6}, VGA_REGSET_END};
static const struct vga_regset ark_v_blank_start_regs[] = {{0x15, 0, 7}, {0x07, 3, 3}, {0x09, 5, 5}, {0x40, 5, 5}, VGA_REGSET_END};
// const struct vga_regset ark_v_blank_end_regs[] = {{0x16, 0, 6}, VGA_REGSET_END};
static const struct vga_regset ark_v_blank_end_regs[] = {{0x16, 0, 7}, VGA_REGSET_END};
static const struct vga_regset ark_v_sync_start_regs[] = {{0x10, 0, 7}, {0x07, 2, 2}, {0x07, 7, 7}, {0x40, 4, 4}, VGA_REGSET_END};
static const struct vga_regset ark_v_sync_end_regs[] = {{0x11, 0, 3}, VGA_REGSET_END};
static const struct vga_regset ark_line_compare_regs[] = {{0x18, 0, 7}, {0x07, 4, 4}, {0x09, 6, 6}, VGA_REGSET_END};
static const struct vga_regset ark_start_address_regs[] = {{0x0d, 0, 7}, {0x0c, 0, 7}, {0x40, 0, 2}, VGA_REGSET_END};
static const struct vga_regset ark_offset_regs[] = {{0x13, 0, 7}, {0x41, 3, 3}, VGA_REGSET_END};
static const struct svga_timing_regs ark_timing_regs = {
ark_h_total_regs, ark_h_display_regs, ark_h_blank_start_regs,
ark_h_blank_end_regs, ark_h_sync_start_regs, ark_h_sync_end_regs,
ark_v_total_regs, ark_v_display_regs, ark_v_blank_start_regs,
ark_v_blank_end_regs, ark_v_sync_start_regs, ark_v_sync_end_regs,
};
/* ------------------------------------------------------------------------- */
/* Module parameters */
static char *mode_option __devinitdata = "640x480-8@60";
#ifdef CONFIG_MTRR
static int mtrr = 1;
#endif
MODULE_AUTHOR("(c) 2007 Ondrej Zajicek <santiago@crfreenet.org>");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("fbdev driver for ARK 2000PV");
module_param(mode_option, charp, 0444);
MODULE_PARM_DESC(mode_option, "Default video mode ('640x480-8@60', etc)");
module_param_named(mode, mode_option, charp, 0444);
MODULE_PARM_DESC(mode, "Default video mode ('640x480-8@60', etc) (deprecated)");
#ifdef CONFIG_MTRR
module_param(mtrr, int, 0444);
MODULE_PARM_DESC(mtrr, "Enable write-combining with MTRR (1=enable, 0=disable, default=1)");
#endif
static int threshold = 4;
module_param(threshold, int, 0644);
MODULE_PARM_DESC(threshold, "FIFO threshold");
/* ------------------------------------------------------------------------- */
static void arkfb_settile(struct fb_info *info, struct fb_tilemap *map)
{
const u8 *font = map->data;
u8 __iomem *fb = (u8 __iomem *)info->screen_base;
int i, c;
if ((map->width != 8) || (map->height != 16) ||
(map->depth != 1) || (map->length != 256)) {
printk(KERN_ERR "fb%d: unsupported font parameters: width %d, "
"height %d, depth %d, length %d\n", info->node,
map->width, map->height, map->depth, map->length);
return;
}
fb += 2;
for (c = 0; c < map->length; c++) {
for (i = 0; i < map->height; i++) {
fb_writeb(font[i], &fb[i * 4]);
fb_writeb(font[i], &fb[i * 4 + (128 * 8)]);
}
fb += 128;
if ((c % 8) == 7)
fb += 128*8;
font += map->height;
}
}
static void arkfb_tilecursor(struct fb_info *info, struct fb_tilecursor *cursor)
{
struct arkfb_info *par = info->par;
svga_tilecursor(par->state.vgabase, info, cursor);
}
static struct fb_tile_ops arkfb_tile_ops = {
.fb_settile = arkfb_settile,
.fb_tilecopy = svga_tilecopy,
.fb_tilefill = svga_tilefill,
.fb_tileblit = svga_tileblit,
.fb_tilecursor = arkfb_tilecursor,
.fb_get_tilemax = svga_get_tilemax,
};
/* ------------------------------------------------------------------------- */
/* image data is MSB-first, fb structure is MSB-first too */
static inline u32 expand_color(u32 c)
{
return ((c & 1) | ((c & 2) << 7) | ((c & 4) << 14) | ((c & 8) << 21)) * 0xFF;
}
/* arkfb_iplan_imageblit silently assumes that almost everything is 8-pixel aligned */
static void arkfb_iplan_imageblit(struct fb_info *info, const struct fb_image *image)
{
u32 fg = expand_color(image->fg_color);
u32 bg = expand_color(image->bg_color);
const u8 *src1, *src;
u8 __iomem *dst1;
u32 __iomem *dst;
u32 val;
int x, y;
src1 = image->data;
dst1 = info->screen_base + (image->dy * info->fix.line_length)
+ ((image->dx / 8) * 4);
for (y = 0; y < image->height; y++) {
src = src1;
dst = (u32 __iomem *) dst1;
for (x = 0; x < image->width; x += 8) {
val = *(src++) * 0x01010101;
val = (val & fg) | (~val & bg);
fb_writel(val, dst++);
}
src1 += image->width / 8;
dst1 += info->fix.line_length;
}
}
/* arkfb_iplan_fillrect silently assumes that almost everything is 8-pixel aligned */
static void arkfb_iplan_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
{
u32 fg = expand_color(rect->color);
u8 __iomem *dst1;
u32 __iomem *dst;
int x, y;
dst1 = info->screen_base + (rect->dy * info->fix.line_length)
+ ((rect->dx / 8) * 4);
for (y = 0; y < rect->height; y++) {
dst = (u32 __iomem *) dst1;
for (x = 0; x < rect->width; x += 8) {
fb_writel(fg, dst++);
}
dst1 += info->fix.line_length;
}
}
/* image data is MSB-first, fb structure is high-nibble-in-low-byte-first */
static inline u32 expand_pixel(u32 c)
{
return (((c & 1) << 24) | ((c & 2) << 27) | ((c & 4) << 14) | ((c & 8) << 17) |
((c & 16) << 4) | ((c & 32) << 7) | ((c & 64) >> 6) | ((c & 128) >> 3)) * 0xF;
}
/* arkfb_cfb4_imageblit silently assumes that almost everything is 8-pixel aligned */
static void arkfb_cfb4_imageblit(struct fb_info *info, const struct fb_image *image)
{
u32 fg = image->fg_color * 0x11111111;
u32 bg = image->bg_color * 0x11111111;
const u8 *src1, *src;
u8 __iomem *dst1;
u32 __iomem *dst;
u32 val;
int x, y;
src1 = image->data;
dst1 = info->screen_base + (image->dy * info->fix.line_length)
+ ((image->dx / 8) * 4);
for (y = 0; y < image->height; y++) {
src = src1;
dst = (u32 __iomem *) dst1;
for (x = 0; x < image->width; x += 8) {
val = expand_pixel(*(src++));
val = (val & fg) | (~val & bg);
fb_writel(val, dst++);
}
src1 += image->width / 8;
dst1 += info->fix.line_length;
}
}
static void arkfb_imageblit(struct fb_info *info, const struct fb_image *image)
{
if ((info->var.bits_per_pixel == 4) && (image->depth == 1)
&& ((image->width % 8) == 0) && ((image->dx % 8) == 0)) {
if (info->fix.type == FB_TYPE_INTERLEAVED_PLANES)
arkfb_iplan_imageblit(info, image);
else
arkfb_cfb4_imageblit(info, image);
} else
cfb_imageblit(info, image);
}
static void arkfb_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
{
if ((info->var.bits_per_pixel == 4)
&& ((rect->width % 8) == 0) && ((rect->dx % 8) == 0)
&& (info->fix.type == FB_TYPE_INTERLEAVED_PLANES))
arkfb_iplan_fillrect(info, rect);
else
cfb_fillrect(info, rect);
}
/* ------------------------------------------------------------------------- */
enum
{
DAC_PSEUDO8_8,
DAC_RGB1555_8,
DAC_RGB0565_8,
DAC_RGB0888_8,
DAC_RGB8888_8,
DAC_PSEUDO8_16,
DAC_RGB1555_16,
DAC_RGB0565_16,
DAC_RGB0888_16,
DAC_RGB8888_16,
DAC_MAX
};
struct dac_ops {
int (*dac_get_mode)(struct dac_info *info);
int (*dac_set_mode)(struct dac_info *info, int mode);
int (*dac_get_freq)(struct dac_info *info, int channel);
int (*dac_set_freq)(struct dac_info *info, int channel, u32 freq);
void (*dac_release)(struct dac_info *info);
};
typedef void (*dac_read_regs_t)(void *data, u8 *code, int count);
typedef void (*dac_write_regs_t)(void *data, u8 *code, int count);
struct dac_info
{
struct dac_ops *dacops;
dac_read_regs_t dac_read_regs;
dac_write_regs_t dac_write_regs;
void *data;
};
static inline u8 dac_read_reg(struct dac_info *info, u8 reg)
{
u8 code[2] = {reg, 0};
info->dac_read_regs(info->data, code, 1);
return code[1];
}
static inline void dac_read_regs(struct dac_info *info, u8 *code, int count)
{
info->dac_read_regs(info->data, code, count);
}
static inline void dac_write_reg(struct dac_info *info, u8 reg, u8 val)
{
u8 code[2] = {reg, val};
info->dac_write_regs(info->data, code, 1);
}
static inline void dac_write_regs(struct dac_info *info, u8 *code, int count)
{
info->dac_write_regs(info->data, code, count);
}
static inline int dac_set_mode(struct dac_info *info, int mode)
{
return info->dacops->dac_set_mode(info, mode);
}
static inline int dac_set_freq(struct dac_info *info, int channel, u32 freq)
{
return info->dacops->dac_set_freq(info, channel, freq);
}
static inline void dac_release(struct dac_info *info)
{
info->dacops->dac_release(info);
}
/* ------------------------------------------------------------------------- */
/* ICS5342 DAC */
struct ics5342_info
{
struct dac_info dac;
u8 mode;
};
#define DAC_PAR(info) ((struct ics5342_info *) info)
/* LSB is set to distinguish unused slots */
static const u8 ics5342_mode_table[DAC_MAX] = {
[DAC_PSEUDO8_8] = 0x01, [DAC_RGB1555_8] = 0x21, [DAC_RGB0565_8] = 0x61,
[DAC_RGB0888_8] = 0x41, [DAC_PSEUDO8_16] = 0x11, [DAC_RGB1555_16] = 0x31,
[DAC_RGB0565_16] = 0x51, [DAC_RGB0888_16] = 0x91, [DAC_RGB8888_16] = 0x71
};
static int ics5342_set_mode(struct dac_info *info, int mode)
{
u8 code;
if (mode >= DAC_MAX)
return -EINVAL;
code = ics5342_mode_table[mode];
if (! code)
return -EINVAL;
dac_write_reg(info, 6, code & 0xF0);
DAC_PAR(info)->mode = mode;
return 0;
}
static const struct svga_pll ics5342_pll = {3, 129, 3, 33, 0, 3,
60000, 250000, 14318};
/* pd4 - allow only posdivider 4 (r=2) */
static const struct svga_pll ics5342_pll_pd4 = {3, 129, 3, 33, 2, 2,
60000, 335000, 14318};
/* 270 MHz should be upper bound for VCO clock according to specs,
but that is too restrictive in pd4 case */
static int ics5342_set_freq(struct dac_info *info, int channel, u32 freq)
{
u16 m, n, r;
/* only postdivider 4 (r=2) is valid in mode DAC_PSEUDO8_16 */
int rv = svga_compute_pll((DAC_PAR(info)->mode == DAC_PSEUDO8_16)
? &ics5342_pll_pd4 : &ics5342_pll,
freq, &m, &n, &r, 0);
if (rv < 0) {
return -EINVAL;
} else {
u8 code[6] = {4, 3, 5, m-2, 5, (n-2) | (r << 5)};
dac_write_regs(info, code, 3);
return 0;
}
}
static void ics5342_release(struct dac_info *info)
{
ics5342_set_mode(info, DAC_PSEUDO8_8);
kfree(info);
}
static struct dac_ops ics5342_ops = {
.dac_set_mode = ics5342_set_mode,
.dac_set_freq = ics5342_set_freq,
.dac_release = ics5342_release
};
static struct dac_info * ics5342_init(dac_read_regs_t drr, dac_write_regs_t dwr, void *data)
{
struct dac_info *info = kzalloc(sizeof(struct ics5342_info), GFP_KERNEL);
if (! info)
return NULL;
info->dacops = &ics5342_ops;
info->dac_read_regs = drr;
info->dac_write_regs = dwr;
info->data = data;
DAC_PAR(info)->mode = DAC_PSEUDO8_8; /* estimation */
return info;
}
/* ------------------------------------------------------------------------- */
static unsigned short dac_regs[4] = {0x3c8, 0x3c9, 0x3c6, 0x3c7};
static void ark_dac_read_regs(void *data, u8 *code, int count)
{
struct fb_info *info = data;
struct arkfb_info *par;
u8 regval;
par = info->par;
regval = vga_rseq(par->state.vgabase, 0x1C);
while (count != 0)
{
vga_wseq(par->state.vgabase, 0x1C, regval | (code[0] & 4 ? 0x80 : 0));
code[1] = vga_r(par->state.vgabase, dac_regs[code[0] & 3]);
count--;
code += 2;
}
vga_wseq(par->state.vgabase, 0x1C, regval);
}
static void ark_dac_write_regs(void *data, u8 *code, int count)
{
struct fb_info *info = data;
struct arkfb_info *par;
u8 regval;
par = info->par;
regval = vga_rseq(par->state.vgabase, 0x1C);
while (count != 0)
{
vga_wseq(par->state.vgabase, 0x1C, regval | (code[0] & 4 ? 0x80 : 0));
vga_w(par->state.vgabase, dac_regs[code[0] & 3], code[1]);
count--;
code += 2;
}
vga_wseq(par->state.vgabase, 0x1C, regval);
}
static void ark_set_pixclock(struct fb_info *info, u32 pixclock)
{
struct arkfb_info *par = info->par;
u8 regval;
int rv = dac_set_freq(par->dac, 0, 1000000000 / pixclock);
if (rv < 0) {
printk(KERN_ERR "fb%d: cannot set requested pixclock, keeping old value\n", info->node);
return;
}
/* Set VGA misc register */
regval = vga_r(par->state.vgabase, VGA_MIS_R);
vga_w(par->state.vgabase, VGA_MIS_W, regval | VGA_MIS_ENB_PLL_LOAD);
}
/* Open framebuffer */
static int arkfb_open(struct fb_info *info, int user)
{
struct arkfb_info *par = info->par;
mutex_lock(&(par->open_lock));
if (par->ref_count == 0) {
void __iomem *vgabase = par->state.vgabase;
memset(&(par->state), 0, sizeof(struct vgastate));
par->state.vgabase = vgabase;
par->state.flags = VGA_SAVE_MODE | VGA_SAVE_FONTS | VGA_SAVE_CMAP;
par->state.num_crtc = 0x60;
par->state.num_seq = 0x30;
save_vga(&(par->state));
}
par->ref_count++;
mutex_unlock(&(par->open_lock));
return 0;
}
/* Close framebuffer */
static int arkfb_release(struct fb_info *info, int user)
{
struct arkfb_info *par = info->par;
mutex_lock(&(par->open_lock));
if (par->ref_count == 0) {
mutex_unlock(&(par->open_lock));
return -EINVAL;
}
if (par->ref_count == 1) {
restore_vga(&(par->state));
dac_set_mode(par->dac, DAC_PSEUDO8_8);
}
par->ref_count--;
mutex_unlock(&(par->open_lock));
return 0;
}
/* Validate passed in var */
static int arkfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
{
int rv, mem, step;
/* Find appropriate format */
rv = svga_match_format (arkfb_formats, var, NULL);
if (rv < 0)
{
printk(KERN_ERR "fb%d: unsupported mode requested\n", info->node);
return rv;
}
/* Do not allow to have real resoulution larger than virtual */
if (var->xres > var->xres_virtual)
var->xres_virtual = var->xres;
if (var->yres > var->yres_virtual)
var->yres_virtual = var->yres;
/* Round up xres_virtual to have proper alignment of lines */
step = arkfb_formats[rv].xresstep - 1;
var->xres_virtual = (var->xres_virtual+step) & ~step;
/* Check whether have enough memory */
mem = ((var->bits_per_pixel * var->xres_virtual) >> 3) * var->yres_virtual;
if (mem > info->screen_size)
{
printk(KERN_ERR "fb%d: not enough framebuffer memory (%d kB requested , %d kB available)\n", info->node, mem >> 10, (unsigned int) (info->screen_size >> 10));
return -EINVAL;
}
rv = svga_check_timings (&ark_timing_regs, var, info->node);
if (rv < 0)
{
printk(KERN_ERR "fb%d: invalid timings requested\n", info->node);
return rv;
}
/* Interlaced mode is broken */
if (var->vmode & FB_VMODE_INTERLACED)
return -EINVAL;
return 0;
}
/* Set video mode from par */
static int arkfb_set_par(struct fb_info *info)
{
struct arkfb_info *par = info->par;
u32 value, mode, hmul, hdiv, offset_value, screen_size;
u32 bpp = info->var.bits_per_pixel;
u8 regval;
if (bpp != 0) {
info->fix.ypanstep = 1;
info->fix.line_length = (info->var.xres_virtual * bpp) / 8;
info->flags &= ~FBINFO_MISC_TILEBLITTING;
info->tileops = NULL;
/* in 4bpp supports 8p wide tiles only, any tiles otherwise */
info->pixmap.blit_x = (bpp == 4) ? (1 << (8 - 1)) : (~(u32)0);
info->pixmap.blit_y = ~(u32)0;
offset_value = (info->var.xres_virtual * bpp) / 64;
screen_size = info->var.yres_virtual * info->fix.line_length;
} else {
info->fix.ypanstep = 16;
info->fix.line_length = 0;
info->flags |= FBINFO_MISC_TILEBLITTING;
info->tileops = &arkfb_tile_ops;
/* supports 8x16 tiles only */
info->pixmap.blit_x = 1 << (8 - 1);
info->pixmap.blit_y = 1 << (16 - 1);
offset_value = info->var.xres_virtual / 16;
screen_size = (info->var.xres_virtual * info->var.yres_virtual) / 64;
}
info->var.xoffset = 0;
info->var.yoffset = 0;
info->var.activate = FB_ACTIVATE_NOW;
/* Unlock registers */
svga_wcrt_mask(par->state.vgabase, 0x11, 0x00, 0x80);
/* Blank screen and turn off sync */
svga_wseq_mask(par->state.vgabase, 0x01, 0x20, 0x20);
svga_wcrt_mask(par->state.vgabase, 0x17, 0x00, 0x80);
/* Set default values */
svga_set_default_gfx_regs(par->state.vgabase);
svga_set_default_atc_regs(par->state.vgabase);
svga_set_default_seq_regs(par->state.vgabase);
svga_set_default_crt_regs(par->state.vgabase);
svga_wcrt_multi(par->state.vgabase, ark_line_compare_regs, 0xFFFFFFFF);
svga_wcrt_multi(par->state.vgabase, ark_start_address_regs, 0);
/* ARK specific initialization */
svga_wseq_mask(par->state.vgabase, 0x10, 0x1F, 0x1F); /* enable linear framebuffer and full memory access */
svga_wseq_mask(par->state.vgabase, 0x12, 0x03, 0x03); /* 4 MB linear framebuffer size */
vga_wseq(par->state.vgabase, 0x13, info->fix.smem_start >> 16);
vga_wseq(par->state.vgabase, 0x14, info->fix.smem_start >> 24);
vga_wseq(par->state.vgabase, 0x15, 0);
vga_wseq(par->state.vgabase, 0x16, 0);
/* Set the FIFO threshold register */
/* It is fascinating way to store 5-bit value in 8-bit register */
regval = 0x10 | ((threshold & 0x0E) >> 1) | (threshold & 0x01) << 7 | (threshold & 0x10) << 1;
vga_wseq(par->state.vgabase, 0x18, regval);
/* Set the offset register */
pr_debug("fb%d: offset register : %d\n", info->node, offset_value);
svga_wcrt_multi(par->state.vgabase, ark_offset_regs, offset_value);
/* fix for hi-res textmode */
svga_wcrt_mask(par->state.vgabase, 0x40, 0x08, 0x08);
if (info->var.vmode & FB_VMODE_DOUBLE)
svga_wcrt_mask(par->state.vgabase, 0x09, 0x80, 0x80);
else
svga_wcrt_mask(par->state.vgabase, 0x09, 0x00, 0x80);
if (info->var.vmode & FB_VMODE_INTERLACED)
svga_wcrt_mask(par->state.vgabase, 0x44, 0x04, 0x04);
else
svga_wcrt_mask(par->state.vgabase, 0x44, 0x00, 0x04);
hmul = 1;
hdiv = 1;
mode = svga_match_format(arkfb_formats, &(info->var), &(info->fix));
/* Set mode-specific register values */
switch (mode) {
case 0:
pr_debug("fb%d: text mode\n", info->node);
svga_set_textmode_vga_regs(par->state.vgabase);
vga_wseq(par->state.vgabase, 0x11, 0x10); /* basic VGA mode */
svga_wcrt_mask(par->state.vgabase, 0x46, 0x00, 0x04); /* 8bit pixel path */
dac_set_mode(par->dac, DAC_PSEUDO8_8);
break;
case 1:
pr_debug("fb%d: 4 bit pseudocolor\n", info->node);
vga_wgfx(par->state.vgabase, VGA_GFX_MODE, 0x40);
vga_wseq(par->state.vgabase, 0x11, 0x10); /* basic VGA mode */
svga_wcrt_mask(par->state.vgabase, 0x46, 0x00, 0x04); /* 8bit pixel path */
dac_set_mode(par->dac, DAC_PSEUDO8_8);
break;
case 2:
pr_debug("fb%d: 4 bit pseudocolor, planar\n", info->node);
vga_wseq(par->state.vgabase, 0x11, 0x10); /* basic VGA mode */
svga_wcrt_mask(par->state.vgabase, 0x46, 0x00, 0x04); /* 8bit pixel path */
dac_set_mode(par->dac, DAC_PSEUDO8_8);
break;
case 3:
pr_debug("fb%d: 8 bit pseudocolor\n", info->node);
vga_wseq(par->state.vgabase, 0x11, 0x16); /* 8bpp accel mode */
if (info->var.pixclock > 20000) {
pr_debug("fb%d: not using multiplex\n", info->node);
svga_wcrt_mask(par->state.vgabase, 0x46, 0x00, 0x04); /* 8bit pixel path */
dac_set_mode(par->dac, DAC_PSEUDO8_8);
} else {
pr_debug("fb%d: using multiplex\n", info->node);
svga_wcrt_mask(par->state.vgabase, 0x46, 0x04, 0x04); /* 16bit pixel path */
dac_set_mode(par->dac, DAC_PSEUDO8_16);
hdiv = 2;
}
break;
case 4:
pr_debug("fb%d: 5/5/5 truecolor\n", info->node);
vga_wseq(par->state.vgabase, 0x11, 0x1A); /* 16bpp accel mode */
svga_wcrt_mask(par->state.vgabase, 0x46, 0x04, 0x04); /* 16bit pixel path */
dac_set_mode(par->dac, DAC_RGB1555_16);
break;
case 5:
pr_debug("fb%d: 5/6/5 truecolor\n", info->node);
vga_wseq(par->state.vgabase, 0x11, 0x1A); /* 16bpp accel mode */
svga_wcrt_mask(par->state.vgabase, 0x46, 0x04, 0x04); /* 16bit pixel path */
dac_set_mode(par->dac, DAC_RGB0565_16);
break;
case 6:
pr_debug("fb%d: 8/8/8 truecolor\n", info->node);
vga_wseq(par->state.vgabase, 0x11, 0x16); /* 8bpp accel mode ??? */
svga_wcrt_mask(par->state.vgabase, 0x46, 0x04, 0x04); /* 16bit pixel path */
dac_set_mode(par->dac, DAC_RGB0888_16);
hmul = 3;
hdiv = 2;
break;
case 7:
pr_debug("fb%d: 8/8/8/8 truecolor\n", info->node);
vga_wseq(par->state.vgabase, 0x11, 0x1E); /* 32bpp accel mode */
svga_wcrt_mask(par->state.vgabase, 0x46, 0x04, 0x04); /* 16bit pixel path */
dac_set_mode(par->dac, DAC_RGB8888_16);
hmul = 2;
break;
default:
printk(KERN_ERR "fb%d: unsupported mode - bug\n", info->node);
return -EINVAL;
}
ark_set_pixclock(info, (hdiv * info->var.pixclock) / hmul);
svga_set_timings(par->state.vgabase, &ark_timing_regs, &(info->var), hmul, hdiv,
(info->var.vmode & FB_VMODE_DOUBLE) ? 2 : 1,
(info->var.vmode & FB_VMODE_INTERLACED) ? 2 : 1,
hmul, info->node);
/* Set interlaced mode start/end register */
value = info->var.xres + info->var.left_margin + info->var.right_margin + info->var.hsync_len;
value = ((value * hmul / hdiv) / 8) - 5;
vga_wcrt(par->state.vgabase, 0x42, (value + 1) / 2);
memset_io(info->screen_base, 0x00, screen_size);
/* Device and screen back on */
svga_wcrt_mask(par->state.vgabase, 0x17, 0x80, 0x80);
svga_wseq_mask(par->state.vgabase, 0x01, 0x00, 0x20);
return 0;
}
/* Set a colour register */
static int arkfb_setcolreg(u_int regno, u_int red, u_int green, u_int blue,
u_int transp, struct fb_info *fb)
{
switch (fb->var.bits_per_pixel) {
case 0:
case 4:
if (regno >= 16)
return -EINVAL;
if ((fb->var.bits_per_pixel == 4) &&
(fb->var.nonstd == 0)) {
outb(0xF0, VGA_PEL_MSK);
outb(regno*16, VGA_PEL_IW);
} else {
outb(0x0F, VGA_PEL_MSK);
outb(regno, VGA_PEL_IW);
}
outb(red >> 10, VGA_PEL_D);
outb(green >> 10, VGA_PEL_D);
outb(blue >> 10, VGA_PEL_D);
break;
case 8:
if (regno >= 256)
return -EINVAL;
outb(0xFF, VGA_PEL_MSK);
outb(regno, VGA_PEL_IW);
outb(red >> 10, VGA_PEL_D);
outb(green >> 10, VGA_PEL_D);
outb(blue >> 10, VGA_PEL_D);
break;
case 16:
if (regno >= 16)
return 0;
if (fb->var.green.length == 5)
((u32*)fb->pseudo_palette)[regno] = ((red & 0xF800) >> 1) |
((green & 0xF800) >> 6) | ((blue & 0xF800) >> 11);
else if (fb->var.green.length == 6)
((u32*)fb->pseudo_palette)[regno] = (red & 0xF800) |
((green & 0xFC00) >> 5) | ((blue & 0xF800) >> 11);
else
return -EINVAL;
break;
case 24:
case 32:
if (regno >= 16)
return 0;
((u32*)fb->pseudo_palette)[regno] = ((red & 0xFF00) << 8) |
(green & 0xFF00) | ((blue & 0xFF00) >> 8);
break;
default:
return -EINVAL;
}
return 0;
}
/* Set the display blanking state */
static int arkfb_blank(int blank_mode, struct fb_info *info)
{
struct arkfb_info *par = info->par;
switch (blank_mode) {
case FB_BLANK_UNBLANK:
pr_debug("fb%d: unblank\n", info->node);
svga_wseq_mask(par->state.vgabase, 0x01, 0x00, 0x20);
svga_wcrt_mask(par->state.vgabase, 0x17, 0x80, 0x80);
break;
case FB_BLANK_NORMAL:
pr_debug("fb%d: blank\n", info->node);
svga_wseq_mask(par->state.vgabase, 0x01, 0x20, 0x20);
svga_wcrt_mask(par->state.vgabase, 0x17, 0x80, 0x80);
break;
case FB_BLANK_POWERDOWN:
case FB_BLANK_HSYNC_SUSPEND:
case FB_BLANK_VSYNC_SUSPEND:
pr_debug("fb%d: sync down\n", info->node);
svga_wseq_mask(par->state.vgabase, 0x01, 0x20, 0x20);
svga_wcrt_mask(par->state.vgabase, 0x17, 0x00, 0x80);
break;
}
return 0;
}
/* Pan the display */
static int arkfb_pan_display(struct fb_var_screeninfo *var, struct fb_info *info)
{
struct arkfb_info *par = info->par;
unsigned int offset;
/* Calculate the offset */
if (info->var.bits_per_pixel == 0) {
offset = (var->yoffset / 16) * (info->var.xres_virtual / 2)
+ (var->xoffset / 2);
offset = offset >> 2;
} else {
offset = (var->yoffset * info->fix.line_length) +
(var->xoffset * info->var.bits_per_pixel / 8);
offset = offset >> ((info->var.bits_per_pixel == 4) ? 2 : 3);
}
/* Set the offset */
svga_wcrt_multi(par->state.vgabase, ark_start_address_regs, offset);
return 0;
}
/* ------------------------------------------------------------------------- */
/* Frame buffer operations */
static struct fb_ops arkfb_ops = {
.owner = THIS_MODULE,
.fb_open = arkfb_open,
.fb_release = arkfb_release,
.fb_check_var = arkfb_check_var,
.fb_set_par = arkfb_set_par,
.fb_setcolreg = arkfb_setcolreg,
.fb_blank = arkfb_blank,
.fb_pan_display = arkfb_pan_display,
.fb_fillrect = arkfb_fillrect,
.fb_copyarea = cfb_copyarea,
.fb_imageblit = arkfb_imageblit,
.fb_get_caps = svga_get_caps,
};
/* ------------------------------------------------------------------------- */
/* PCI probe */
static int __devinit ark_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
{
struct pci_bus_region bus_reg;
struct resource vga_res;
struct fb_info *info;
struct arkfb_info *par;
int rc;
u8 regval;
/* Ignore secondary VGA device because there is no VGA arbitration */
if (! svga_primary_device(dev)) {
dev_info(&(dev->dev), "ignoring secondary device\n");
return -ENODEV;
}
/* Allocate and fill driver data structure */
info = framebuffer_alloc(sizeof(struct arkfb_info), &(dev->dev));
if (! info) {
dev_err(&(dev->dev), "cannot allocate memory\n");
return -ENOMEM;
}
par = info->par;
mutex_init(&par->open_lock);
info->flags = FBINFO_PARTIAL_PAN_OK | FBINFO_HWACCEL_YPAN;
info->fbops = &arkfb_ops;
/* Prepare PCI device */
rc = pci_enable_device(dev);
if (rc < 0) {
dev_err(info->device, "cannot enable PCI device\n");
goto err_enable_device;
}
rc = pci_request_regions(dev, "arkfb");
if (rc < 0) {
dev_err(info->device, "cannot reserve framebuffer region\n");
goto err_request_regions;
}
par->dac = ics5342_init(ark_dac_read_regs, ark_dac_write_regs, info);
if (! par->dac) {
rc = -ENOMEM;
dev_err(info->device, "RAMDAC initialization failed\n");
goto err_dac;
}
info->fix.smem_start = pci_resource_start(dev, 0);
info->fix.smem_len = pci_resource_len(dev, 0);
/* Map physical IO memory address into kernel space */
info->screen_base = pci_iomap(dev, 0, 0);
if (! info->screen_base) {
rc = -ENOMEM;
dev_err(info->device, "iomap for framebuffer failed\n");
goto err_iomap;
}
bus_reg.start = 0;
bus_reg.end = 64 * 1024;
vga_res.flags = IORESOURCE_IO;
pcibios_bus_to_resource(dev, &vga_res, &bus_reg);
par->state.vgabase = (void __iomem *) vga_res.start;
/* FIXME get memsize */
regval = vga_rseq(par->state.vgabase, 0x10);
info->screen_size = (1 << (regval >> 6)) << 20;
info->fix.smem_len = info->screen_size;
strcpy(info->fix.id, "ARK 2000PV");
info->fix.mmio_start = 0;
info->fix.mmio_len = 0;
info->fix.type = FB_TYPE_PACKED_PIXELS;
info->fix.visual = FB_VISUAL_PSEUDOCOLOR;
info->fix.ypanstep = 0;
info->fix.accel = FB_ACCEL_NONE;
info->pseudo_palette = (void*) (par->pseudo_palette);
/* Prepare startup mode */
rc = fb_find_mode(&(info->var), info, mode_option, NULL, 0, NULL, 8);
if (! ((rc == 1) || (rc == 2))) {
rc = -EINVAL;
dev_err(info->device, "mode %s not found\n", mode_option);
goto err_find_mode;
}
rc = fb_alloc_cmap(&info->cmap, 256, 0);
if (rc < 0) {
dev_err(info->device, "cannot allocate colormap\n");
goto err_alloc_cmap;
}
rc = register_framebuffer(info);
if (rc < 0) {
dev_err(info->device, "cannot register framebugger\n");
goto err_reg_fb;
}
printk(KERN_INFO "fb%d: %s on %s, %d MB RAM\n", info->node, info->fix.id,
pci_name(dev), info->fix.smem_len >> 20);
/* Record a reference to the driver data */
pci_set_drvdata(dev, info);
#ifdef CONFIG_MTRR
if (mtrr) {
par->mtrr_reg = -1;
par->mtrr_reg = mtrr_add(info->fix.smem_start, info->fix.smem_len, MTRR_TYPE_WRCOMB, 1);
}
#endif
return 0;
/* Error handling */
err_reg_fb:
fb_dealloc_cmap(&info->cmap);
err_alloc_cmap:
err_find_mode:
pci_iounmap(dev, info->screen_base);
err_iomap:
dac_release(par->dac);
err_dac:
pci_release_regions(dev);
err_request_regions:
/* pci_disable_device(dev); */
err_enable_device:
framebuffer_release(info);
return rc;
}
/* PCI remove */
static void __devexit ark_pci_remove(struct pci_dev *dev)
{
struct fb_info *info = pci_get_drvdata(dev);
if (info) {
struct arkfb_info *par = info->par;
#ifdef CONFIG_MTRR
if (par->mtrr_reg >= 0) {
mtrr_del(par->mtrr_reg, 0, 0);
par->mtrr_reg = -1;
}
#endif
dac_release(par->dac);
unregister_framebuffer(info);
fb_dealloc_cmap(&info->cmap);
pci_iounmap(dev, info->screen_base);
pci_release_regions(dev);
/* pci_disable_device(dev); */
pci_set_drvdata(dev, NULL);
framebuffer_release(info);
}
}
#ifdef CONFIG_PM
/* PCI suspend */
static int ark_pci_suspend (struct pci_dev* dev, pm_message_t state)
{
struct fb_info *info = pci_get_drvdata(dev);
struct arkfb_info *par = info->par;
dev_info(info->device, "suspend\n");
console_lock();
mutex_lock(&(par->open_lock));
if ((state.event == PM_EVENT_FREEZE) || (par->ref_count == 0)) {
mutex_unlock(&(par->open_lock));
console_unlock();
return 0;
}
fb_set_suspend(info, 1);
pci_save_state(dev);
pci_disable_device(dev);
pci_set_power_state(dev, pci_choose_state(dev, state));
mutex_unlock(&(par->open_lock));
console_unlock();
return 0;
}
/* PCI resume */
static int ark_pci_resume (struct pci_dev* dev)
{
struct fb_info *info = pci_get_drvdata(dev);
struct arkfb_info *par = info->par;
dev_info(info->device, "resume\n");
console_lock();
mutex_lock(&(par->open_lock));
if (par->ref_count == 0)
goto fail;
pci_set_power_state(dev, PCI_D0);
pci_restore_state(dev);
if (pci_enable_device(dev))
goto fail;
pci_set_master(dev);
arkfb_set_par(info);
fb_set_suspend(info, 0);
fail:
mutex_unlock(&(par->open_lock));
console_unlock();
return 0;
}
#else
#define ark_pci_suspend NULL
#define ark_pci_resume NULL
#endif /* CONFIG_PM */
/* List of boards that we are trying to support */
static struct pci_device_id ark_devices[] __devinitdata = {
{PCI_DEVICE(0xEDD8, 0xA099)},
{0, 0, 0, 0, 0, 0, 0}
};
MODULE_DEVICE_TABLE(pci, ark_devices);
static struct pci_driver arkfb_pci_driver = {
.name = "arkfb",
.id_table = ark_devices,
.probe = ark_pci_probe,
.remove = __devexit_p(ark_pci_remove),
.suspend = ark_pci_suspend,
.resume = ark_pci_resume,
};
/* Cleanup */
static void __exit arkfb_cleanup(void)
{
pr_debug("arkfb: cleaning up\n");
pci_unregister_driver(&arkfb_pci_driver);
}
/* Driver Initialisation */
static int __init arkfb_init(void)
{
#ifndef MODULE
char *option = NULL;
if (fb_get_options("arkfb", &option))
return -ENODEV;
if (option && *option)
mode_option = option;
#endif
pr_debug("arkfb: initializing\n");
return pci_register_driver(&arkfb_pci_driver);
}
module_init(arkfb_init);
module_exit(arkfb_cleanup);
| gpl-2.0 |
TeamWin/android_kernel_huawei_mt2l03 | drivers/video/arkfb.c | 4788 | 33254 | /*
* linux/drivers/video/arkfb.c -- Frame buffer device driver for ARK 2000PV
* with ICS 5342 dac (it is easy to add support for different dacs).
*
* Copyright (c) 2007 Ondrej Zajicek <santiago@crfreenet.org>
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file COPYING in the main directory of this archive for
* more details.
*
* Code is based on s3fb
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/mm.h>
#include <linux/tty.h>
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/fb.h>
#include <linux/svga.h>
#include <linux/init.h>
#include <linux/pci.h>
#include <linux/console.h> /* Why should fb driver call console functions? because console_lock() */
#include <video/vga.h>
#ifdef CONFIG_MTRR
#include <asm/mtrr.h>
#endif
struct arkfb_info {
int mclk_freq;
int mtrr_reg;
struct dac_info *dac;
struct vgastate state;
struct mutex open_lock;
unsigned int ref_count;
u32 pseudo_palette[16];
};
/* ------------------------------------------------------------------------- */
static const struct svga_fb_format arkfb_formats[] = {
{ 0, {0, 6, 0}, {0, 6, 0}, {0, 6, 0}, {0, 0, 0}, 0,
FB_TYPE_TEXT, FB_AUX_TEXT_SVGA_STEP4, FB_VISUAL_PSEUDOCOLOR, 8, 8},
{ 4, {0, 6, 0}, {0, 6, 0}, {0, 6, 0}, {0, 0, 0}, 0,
FB_TYPE_PACKED_PIXELS, 0, FB_VISUAL_PSEUDOCOLOR, 8, 16},
{ 4, {0, 6, 0}, {0, 6, 0}, {0, 6, 0}, {0, 0, 0}, 1,
FB_TYPE_INTERLEAVED_PLANES, 1, FB_VISUAL_PSEUDOCOLOR, 8, 16},
{ 8, {0, 6, 0}, {0, 6, 0}, {0, 6, 0}, {0, 0, 0}, 0,
FB_TYPE_PACKED_PIXELS, 0, FB_VISUAL_PSEUDOCOLOR, 8, 8},
{16, {10, 5, 0}, {5, 5, 0}, {0, 5, 0}, {0, 0, 0}, 0,
FB_TYPE_PACKED_PIXELS, 0, FB_VISUAL_TRUECOLOR, 4, 4},
{16, {11, 5, 0}, {5, 6, 0}, {0, 5, 0}, {0, 0, 0}, 0,
FB_TYPE_PACKED_PIXELS, 0, FB_VISUAL_TRUECOLOR, 4, 4},
{24, {16, 8, 0}, {8, 8, 0}, {0, 8, 0}, {0, 0, 0}, 0,
FB_TYPE_PACKED_PIXELS, 0, FB_VISUAL_TRUECOLOR, 8, 8},
{32, {16, 8, 0}, {8, 8, 0}, {0, 8, 0}, {0, 0, 0}, 0,
FB_TYPE_PACKED_PIXELS, 0, FB_VISUAL_TRUECOLOR, 2, 2},
SVGA_FORMAT_END
};
/* CRT timing register sets */
static const struct vga_regset ark_h_total_regs[] = {{0x00, 0, 7}, {0x41, 7, 7}, VGA_REGSET_END};
static const struct vga_regset ark_h_display_regs[] = {{0x01, 0, 7}, {0x41, 6, 6}, VGA_REGSET_END};
static const struct vga_regset ark_h_blank_start_regs[] = {{0x02, 0, 7}, {0x41, 5, 5}, VGA_REGSET_END};
static const struct vga_regset ark_h_blank_end_regs[] = {{0x03, 0, 4}, {0x05, 7, 7 }, VGA_REGSET_END};
static const struct vga_regset ark_h_sync_start_regs[] = {{0x04, 0, 7}, {0x41, 4, 4}, VGA_REGSET_END};
static const struct vga_regset ark_h_sync_end_regs[] = {{0x05, 0, 4}, VGA_REGSET_END};
static const struct vga_regset ark_v_total_regs[] = {{0x06, 0, 7}, {0x07, 0, 0}, {0x07, 5, 5}, {0x40, 7, 7}, VGA_REGSET_END};
static const struct vga_regset ark_v_display_regs[] = {{0x12, 0, 7}, {0x07, 1, 1}, {0x07, 6, 6}, {0x40, 6, 6}, VGA_REGSET_END};
static const struct vga_regset ark_v_blank_start_regs[] = {{0x15, 0, 7}, {0x07, 3, 3}, {0x09, 5, 5}, {0x40, 5, 5}, VGA_REGSET_END};
// const struct vga_regset ark_v_blank_end_regs[] = {{0x16, 0, 6}, VGA_REGSET_END};
static const struct vga_regset ark_v_blank_end_regs[] = {{0x16, 0, 7}, VGA_REGSET_END};
static const struct vga_regset ark_v_sync_start_regs[] = {{0x10, 0, 7}, {0x07, 2, 2}, {0x07, 7, 7}, {0x40, 4, 4}, VGA_REGSET_END};
static const struct vga_regset ark_v_sync_end_regs[] = {{0x11, 0, 3}, VGA_REGSET_END};
static const struct vga_regset ark_line_compare_regs[] = {{0x18, 0, 7}, {0x07, 4, 4}, {0x09, 6, 6}, VGA_REGSET_END};
static const struct vga_regset ark_start_address_regs[] = {{0x0d, 0, 7}, {0x0c, 0, 7}, {0x40, 0, 2}, VGA_REGSET_END};
static const struct vga_regset ark_offset_regs[] = {{0x13, 0, 7}, {0x41, 3, 3}, VGA_REGSET_END};
static const struct svga_timing_regs ark_timing_regs = {
ark_h_total_regs, ark_h_display_regs, ark_h_blank_start_regs,
ark_h_blank_end_regs, ark_h_sync_start_regs, ark_h_sync_end_regs,
ark_v_total_regs, ark_v_display_regs, ark_v_blank_start_regs,
ark_v_blank_end_regs, ark_v_sync_start_regs, ark_v_sync_end_regs,
};
/* ------------------------------------------------------------------------- */
/* Module parameters */
static char *mode_option __devinitdata = "640x480-8@60";
#ifdef CONFIG_MTRR
static int mtrr = 1;
#endif
MODULE_AUTHOR("(c) 2007 Ondrej Zajicek <santiago@crfreenet.org>");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("fbdev driver for ARK 2000PV");
module_param(mode_option, charp, 0444);
MODULE_PARM_DESC(mode_option, "Default video mode ('640x480-8@60', etc)");
module_param_named(mode, mode_option, charp, 0444);
MODULE_PARM_DESC(mode, "Default video mode ('640x480-8@60', etc) (deprecated)");
#ifdef CONFIG_MTRR
module_param(mtrr, int, 0444);
MODULE_PARM_DESC(mtrr, "Enable write-combining with MTRR (1=enable, 0=disable, default=1)");
#endif
static int threshold = 4;
module_param(threshold, int, 0644);
MODULE_PARM_DESC(threshold, "FIFO threshold");
/* ------------------------------------------------------------------------- */
static void arkfb_settile(struct fb_info *info, struct fb_tilemap *map)
{
const u8 *font = map->data;
u8 __iomem *fb = (u8 __iomem *)info->screen_base;
int i, c;
if ((map->width != 8) || (map->height != 16) ||
(map->depth != 1) || (map->length != 256)) {
printk(KERN_ERR "fb%d: unsupported font parameters: width %d, "
"height %d, depth %d, length %d\n", info->node,
map->width, map->height, map->depth, map->length);
return;
}
fb += 2;
for (c = 0; c < map->length; c++) {
for (i = 0; i < map->height; i++) {
fb_writeb(font[i], &fb[i * 4]);
fb_writeb(font[i], &fb[i * 4 + (128 * 8)]);
}
fb += 128;
if ((c % 8) == 7)
fb += 128*8;
font += map->height;
}
}
static void arkfb_tilecursor(struct fb_info *info, struct fb_tilecursor *cursor)
{
struct arkfb_info *par = info->par;
svga_tilecursor(par->state.vgabase, info, cursor);
}
static struct fb_tile_ops arkfb_tile_ops = {
.fb_settile = arkfb_settile,
.fb_tilecopy = svga_tilecopy,
.fb_tilefill = svga_tilefill,
.fb_tileblit = svga_tileblit,
.fb_tilecursor = arkfb_tilecursor,
.fb_get_tilemax = svga_get_tilemax,
};
/* ------------------------------------------------------------------------- */
/* image data is MSB-first, fb structure is MSB-first too */
static inline u32 expand_color(u32 c)
{
return ((c & 1) | ((c & 2) << 7) | ((c & 4) << 14) | ((c & 8) << 21)) * 0xFF;
}
/* arkfb_iplan_imageblit silently assumes that almost everything is 8-pixel aligned */
static void arkfb_iplan_imageblit(struct fb_info *info, const struct fb_image *image)
{
u32 fg = expand_color(image->fg_color);
u32 bg = expand_color(image->bg_color);
const u8 *src1, *src;
u8 __iomem *dst1;
u32 __iomem *dst;
u32 val;
int x, y;
src1 = image->data;
dst1 = info->screen_base + (image->dy * info->fix.line_length)
+ ((image->dx / 8) * 4);
for (y = 0; y < image->height; y++) {
src = src1;
dst = (u32 __iomem *) dst1;
for (x = 0; x < image->width; x += 8) {
val = *(src++) * 0x01010101;
val = (val & fg) | (~val & bg);
fb_writel(val, dst++);
}
src1 += image->width / 8;
dst1 += info->fix.line_length;
}
}
/* arkfb_iplan_fillrect silently assumes that almost everything is 8-pixel aligned */
static void arkfb_iplan_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
{
u32 fg = expand_color(rect->color);
u8 __iomem *dst1;
u32 __iomem *dst;
int x, y;
dst1 = info->screen_base + (rect->dy * info->fix.line_length)
+ ((rect->dx / 8) * 4);
for (y = 0; y < rect->height; y++) {
dst = (u32 __iomem *) dst1;
for (x = 0; x < rect->width; x += 8) {
fb_writel(fg, dst++);
}
dst1 += info->fix.line_length;
}
}
/* image data is MSB-first, fb structure is high-nibble-in-low-byte-first */
static inline u32 expand_pixel(u32 c)
{
return (((c & 1) << 24) | ((c & 2) << 27) | ((c & 4) << 14) | ((c & 8) << 17) |
((c & 16) << 4) | ((c & 32) << 7) | ((c & 64) >> 6) | ((c & 128) >> 3)) * 0xF;
}
/* arkfb_cfb4_imageblit silently assumes that almost everything is 8-pixel aligned */
static void arkfb_cfb4_imageblit(struct fb_info *info, const struct fb_image *image)
{
u32 fg = image->fg_color * 0x11111111;
u32 bg = image->bg_color * 0x11111111;
const u8 *src1, *src;
u8 __iomem *dst1;
u32 __iomem *dst;
u32 val;
int x, y;
src1 = image->data;
dst1 = info->screen_base + (image->dy * info->fix.line_length)
+ ((image->dx / 8) * 4);
for (y = 0; y < image->height; y++) {
src = src1;
dst = (u32 __iomem *) dst1;
for (x = 0; x < image->width; x += 8) {
val = expand_pixel(*(src++));
val = (val & fg) | (~val & bg);
fb_writel(val, dst++);
}
src1 += image->width / 8;
dst1 += info->fix.line_length;
}
}
static void arkfb_imageblit(struct fb_info *info, const struct fb_image *image)
{
if ((info->var.bits_per_pixel == 4) && (image->depth == 1)
&& ((image->width % 8) == 0) && ((image->dx % 8) == 0)) {
if (info->fix.type == FB_TYPE_INTERLEAVED_PLANES)
arkfb_iplan_imageblit(info, image);
else
arkfb_cfb4_imageblit(info, image);
} else
cfb_imageblit(info, image);
}
static void arkfb_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
{
if ((info->var.bits_per_pixel == 4)
&& ((rect->width % 8) == 0) && ((rect->dx % 8) == 0)
&& (info->fix.type == FB_TYPE_INTERLEAVED_PLANES))
arkfb_iplan_fillrect(info, rect);
else
cfb_fillrect(info, rect);
}
/* ------------------------------------------------------------------------- */
enum
{
DAC_PSEUDO8_8,
DAC_RGB1555_8,
DAC_RGB0565_8,
DAC_RGB0888_8,
DAC_RGB8888_8,
DAC_PSEUDO8_16,
DAC_RGB1555_16,
DAC_RGB0565_16,
DAC_RGB0888_16,
DAC_RGB8888_16,
DAC_MAX
};
struct dac_ops {
int (*dac_get_mode)(struct dac_info *info);
int (*dac_set_mode)(struct dac_info *info, int mode);
int (*dac_get_freq)(struct dac_info *info, int channel);
int (*dac_set_freq)(struct dac_info *info, int channel, u32 freq);
void (*dac_release)(struct dac_info *info);
};
typedef void (*dac_read_regs_t)(void *data, u8 *code, int count);
typedef void (*dac_write_regs_t)(void *data, u8 *code, int count);
struct dac_info
{
struct dac_ops *dacops;
dac_read_regs_t dac_read_regs;
dac_write_regs_t dac_write_regs;
void *data;
};
static inline u8 dac_read_reg(struct dac_info *info, u8 reg)
{
u8 code[2] = {reg, 0};
info->dac_read_regs(info->data, code, 1);
return code[1];
}
static inline void dac_read_regs(struct dac_info *info, u8 *code, int count)
{
info->dac_read_regs(info->data, code, count);
}
static inline void dac_write_reg(struct dac_info *info, u8 reg, u8 val)
{
u8 code[2] = {reg, val};
info->dac_write_regs(info->data, code, 1);
}
static inline void dac_write_regs(struct dac_info *info, u8 *code, int count)
{
info->dac_write_regs(info->data, code, count);
}
static inline int dac_set_mode(struct dac_info *info, int mode)
{
return info->dacops->dac_set_mode(info, mode);
}
static inline int dac_set_freq(struct dac_info *info, int channel, u32 freq)
{
return info->dacops->dac_set_freq(info, channel, freq);
}
static inline void dac_release(struct dac_info *info)
{
info->dacops->dac_release(info);
}
/* ------------------------------------------------------------------------- */
/* ICS5342 DAC */
struct ics5342_info
{
struct dac_info dac;
u8 mode;
};
#define DAC_PAR(info) ((struct ics5342_info *) info)
/* LSB is set to distinguish unused slots */
static const u8 ics5342_mode_table[DAC_MAX] = {
[DAC_PSEUDO8_8] = 0x01, [DAC_RGB1555_8] = 0x21, [DAC_RGB0565_8] = 0x61,
[DAC_RGB0888_8] = 0x41, [DAC_PSEUDO8_16] = 0x11, [DAC_RGB1555_16] = 0x31,
[DAC_RGB0565_16] = 0x51, [DAC_RGB0888_16] = 0x91, [DAC_RGB8888_16] = 0x71
};
static int ics5342_set_mode(struct dac_info *info, int mode)
{
u8 code;
if (mode >= DAC_MAX)
return -EINVAL;
code = ics5342_mode_table[mode];
if (! code)
return -EINVAL;
dac_write_reg(info, 6, code & 0xF0);
DAC_PAR(info)->mode = mode;
return 0;
}
static const struct svga_pll ics5342_pll = {3, 129, 3, 33, 0, 3,
60000, 250000, 14318};
/* pd4 - allow only posdivider 4 (r=2) */
static const struct svga_pll ics5342_pll_pd4 = {3, 129, 3, 33, 2, 2,
60000, 335000, 14318};
/* 270 MHz should be upper bound for VCO clock according to specs,
but that is too restrictive in pd4 case */
static int ics5342_set_freq(struct dac_info *info, int channel, u32 freq)
{
u16 m, n, r;
/* only postdivider 4 (r=2) is valid in mode DAC_PSEUDO8_16 */
int rv = svga_compute_pll((DAC_PAR(info)->mode == DAC_PSEUDO8_16)
? &ics5342_pll_pd4 : &ics5342_pll,
freq, &m, &n, &r, 0);
if (rv < 0) {
return -EINVAL;
} else {
u8 code[6] = {4, 3, 5, m-2, 5, (n-2) | (r << 5)};
dac_write_regs(info, code, 3);
return 0;
}
}
static void ics5342_release(struct dac_info *info)
{
ics5342_set_mode(info, DAC_PSEUDO8_8);
kfree(info);
}
static struct dac_ops ics5342_ops = {
.dac_set_mode = ics5342_set_mode,
.dac_set_freq = ics5342_set_freq,
.dac_release = ics5342_release
};
static struct dac_info * ics5342_init(dac_read_regs_t drr, dac_write_regs_t dwr, void *data)
{
struct dac_info *info = kzalloc(sizeof(struct ics5342_info), GFP_KERNEL);
if (! info)
return NULL;
info->dacops = &ics5342_ops;
info->dac_read_regs = drr;
info->dac_write_regs = dwr;
info->data = data;
DAC_PAR(info)->mode = DAC_PSEUDO8_8; /* estimation */
return info;
}
/* ------------------------------------------------------------------------- */
static unsigned short dac_regs[4] = {0x3c8, 0x3c9, 0x3c6, 0x3c7};
static void ark_dac_read_regs(void *data, u8 *code, int count)
{
struct fb_info *info = data;
struct arkfb_info *par;
u8 regval;
par = info->par;
regval = vga_rseq(par->state.vgabase, 0x1C);
while (count != 0)
{
vga_wseq(par->state.vgabase, 0x1C, regval | (code[0] & 4 ? 0x80 : 0));
code[1] = vga_r(par->state.vgabase, dac_regs[code[0] & 3]);
count--;
code += 2;
}
vga_wseq(par->state.vgabase, 0x1C, regval);
}
static void ark_dac_write_regs(void *data, u8 *code, int count)
{
struct fb_info *info = data;
struct arkfb_info *par;
u8 regval;
par = info->par;
regval = vga_rseq(par->state.vgabase, 0x1C);
while (count != 0)
{
vga_wseq(par->state.vgabase, 0x1C, regval | (code[0] & 4 ? 0x80 : 0));
vga_w(par->state.vgabase, dac_regs[code[0] & 3], code[1]);
count--;
code += 2;
}
vga_wseq(par->state.vgabase, 0x1C, regval);
}
static void ark_set_pixclock(struct fb_info *info, u32 pixclock)
{
struct arkfb_info *par = info->par;
u8 regval;
int rv = dac_set_freq(par->dac, 0, 1000000000 / pixclock);
if (rv < 0) {
printk(KERN_ERR "fb%d: cannot set requested pixclock, keeping old value\n", info->node);
return;
}
/* Set VGA misc register */
regval = vga_r(par->state.vgabase, VGA_MIS_R);
vga_w(par->state.vgabase, VGA_MIS_W, regval | VGA_MIS_ENB_PLL_LOAD);
}
/* Open framebuffer */
static int arkfb_open(struct fb_info *info, int user)
{
struct arkfb_info *par = info->par;
mutex_lock(&(par->open_lock));
if (par->ref_count == 0) {
void __iomem *vgabase = par->state.vgabase;
memset(&(par->state), 0, sizeof(struct vgastate));
par->state.vgabase = vgabase;
par->state.flags = VGA_SAVE_MODE | VGA_SAVE_FONTS | VGA_SAVE_CMAP;
par->state.num_crtc = 0x60;
par->state.num_seq = 0x30;
save_vga(&(par->state));
}
par->ref_count++;
mutex_unlock(&(par->open_lock));
return 0;
}
/* Close framebuffer */
static int arkfb_release(struct fb_info *info, int user)
{
struct arkfb_info *par = info->par;
mutex_lock(&(par->open_lock));
if (par->ref_count == 0) {
mutex_unlock(&(par->open_lock));
return -EINVAL;
}
if (par->ref_count == 1) {
restore_vga(&(par->state));
dac_set_mode(par->dac, DAC_PSEUDO8_8);
}
par->ref_count--;
mutex_unlock(&(par->open_lock));
return 0;
}
/* Validate passed in var */
static int arkfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
{
int rv, mem, step;
/* Find appropriate format */
rv = svga_match_format (arkfb_formats, var, NULL);
if (rv < 0)
{
printk(KERN_ERR "fb%d: unsupported mode requested\n", info->node);
return rv;
}
/* Do not allow to have real resoulution larger than virtual */
if (var->xres > var->xres_virtual)
var->xres_virtual = var->xres;
if (var->yres > var->yres_virtual)
var->yres_virtual = var->yres;
/* Round up xres_virtual to have proper alignment of lines */
step = arkfb_formats[rv].xresstep - 1;
var->xres_virtual = (var->xres_virtual+step) & ~step;
/* Check whether have enough memory */
mem = ((var->bits_per_pixel * var->xres_virtual) >> 3) * var->yres_virtual;
if (mem > info->screen_size)
{
printk(KERN_ERR "fb%d: not enough framebuffer memory (%d kB requested , %d kB available)\n", info->node, mem >> 10, (unsigned int) (info->screen_size >> 10));
return -EINVAL;
}
rv = svga_check_timings (&ark_timing_regs, var, info->node);
if (rv < 0)
{
printk(KERN_ERR "fb%d: invalid timings requested\n", info->node);
return rv;
}
/* Interlaced mode is broken */
if (var->vmode & FB_VMODE_INTERLACED)
return -EINVAL;
return 0;
}
/* Set video mode from par */
static int arkfb_set_par(struct fb_info *info)
{
struct arkfb_info *par = info->par;
u32 value, mode, hmul, hdiv, offset_value, screen_size;
u32 bpp = info->var.bits_per_pixel;
u8 regval;
if (bpp != 0) {
info->fix.ypanstep = 1;
info->fix.line_length = (info->var.xres_virtual * bpp) / 8;
info->flags &= ~FBINFO_MISC_TILEBLITTING;
info->tileops = NULL;
/* in 4bpp supports 8p wide tiles only, any tiles otherwise */
info->pixmap.blit_x = (bpp == 4) ? (1 << (8 - 1)) : (~(u32)0);
info->pixmap.blit_y = ~(u32)0;
offset_value = (info->var.xres_virtual * bpp) / 64;
screen_size = info->var.yres_virtual * info->fix.line_length;
} else {
info->fix.ypanstep = 16;
info->fix.line_length = 0;
info->flags |= FBINFO_MISC_TILEBLITTING;
info->tileops = &arkfb_tile_ops;
/* supports 8x16 tiles only */
info->pixmap.blit_x = 1 << (8 - 1);
info->pixmap.blit_y = 1 << (16 - 1);
offset_value = info->var.xres_virtual / 16;
screen_size = (info->var.xres_virtual * info->var.yres_virtual) / 64;
}
info->var.xoffset = 0;
info->var.yoffset = 0;
info->var.activate = FB_ACTIVATE_NOW;
/* Unlock registers */
svga_wcrt_mask(par->state.vgabase, 0x11, 0x00, 0x80);
/* Blank screen and turn off sync */
svga_wseq_mask(par->state.vgabase, 0x01, 0x20, 0x20);
svga_wcrt_mask(par->state.vgabase, 0x17, 0x00, 0x80);
/* Set default values */
svga_set_default_gfx_regs(par->state.vgabase);
svga_set_default_atc_regs(par->state.vgabase);
svga_set_default_seq_regs(par->state.vgabase);
svga_set_default_crt_regs(par->state.vgabase);
svga_wcrt_multi(par->state.vgabase, ark_line_compare_regs, 0xFFFFFFFF);
svga_wcrt_multi(par->state.vgabase, ark_start_address_regs, 0);
/* ARK specific initialization */
svga_wseq_mask(par->state.vgabase, 0x10, 0x1F, 0x1F); /* enable linear framebuffer and full memory access */
svga_wseq_mask(par->state.vgabase, 0x12, 0x03, 0x03); /* 4 MB linear framebuffer size */
vga_wseq(par->state.vgabase, 0x13, info->fix.smem_start >> 16);
vga_wseq(par->state.vgabase, 0x14, info->fix.smem_start >> 24);
vga_wseq(par->state.vgabase, 0x15, 0);
vga_wseq(par->state.vgabase, 0x16, 0);
/* Set the FIFO threshold register */
/* It is fascinating way to store 5-bit value in 8-bit register */
regval = 0x10 | ((threshold & 0x0E) >> 1) | (threshold & 0x01) << 7 | (threshold & 0x10) << 1;
vga_wseq(par->state.vgabase, 0x18, regval);
/* Set the offset register */
pr_debug("fb%d: offset register : %d\n", info->node, offset_value);
svga_wcrt_multi(par->state.vgabase, ark_offset_regs, offset_value);
/* fix for hi-res textmode */
svga_wcrt_mask(par->state.vgabase, 0x40, 0x08, 0x08);
if (info->var.vmode & FB_VMODE_DOUBLE)
svga_wcrt_mask(par->state.vgabase, 0x09, 0x80, 0x80);
else
svga_wcrt_mask(par->state.vgabase, 0x09, 0x00, 0x80);
if (info->var.vmode & FB_VMODE_INTERLACED)
svga_wcrt_mask(par->state.vgabase, 0x44, 0x04, 0x04);
else
svga_wcrt_mask(par->state.vgabase, 0x44, 0x00, 0x04);
hmul = 1;
hdiv = 1;
mode = svga_match_format(arkfb_formats, &(info->var), &(info->fix));
/* Set mode-specific register values */
switch (mode) {
case 0:
pr_debug("fb%d: text mode\n", info->node);
svga_set_textmode_vga_regs(par->state.vgabase);
vga_wseq(par->state.vgabase, 0x11, 0x10); /* basic VGA mode */
svga_wcrt_mask(par->state.vgabase, 0x46, 0x00, 0x04); /* 8bit pixel path */
dac_set_mode(par->dac, DAC_PSEUDO8_8);
break;
case 1:
pr_debug("fb%d: 4 bit pseudocolor\n", info->node);
vga_wgfx(par->state.vgabase, VGA_GFX_MODE, 0x40);
vga_wseq(par->state.vgabase, 0x11, 0x10); /* basic VGA mode */
svga_wcrt_mask(par->state.vgabase, 0x46, 0x00, 0x04); /* 8bit pixel path */
dac_set_mode(par->dac, DAC_PSEUDO8_8);
break;
case 2:
pr_debug("fb%d: 4 bit pseudocolor, planar\n", info->node);
vga_wseq(par->state.vgabase, 0x11, 0x10); /* basic VGA mode */
svga_wcrt_mask(par->state.vgabase, 0x46, 0x00, 0x04); /* 8bit pixel path */
dac_set_mode(par->dac, DAC_PSEUDO8_8);
break;
case 3:
pr_debug("fb%d: 8 bit pseudocolor\n", info->node);
vga_wseq(par->state.vgabase, 0x11, 0x16); /* 8bpp accel mode */
if (info->var.pixclock > 20000) {
pr_debug("fb%d: not using multiplex\n", info->node);
svga_wcrt_mask(par->state.vgabase, 0x46, 0x00, 0x04); /* 8bit pixel path */
dac_set_mode(par->dac, DAC_PSEUDO8_8);
} else {
pr_debug("fb%d: using multiplex\n", info->node);
svga_wcrt_mask(par->state.vgabase, 0x46, 0x04, 0x04); /* 16bit pixel path */
dac_set_mode(par->dac, DAC_PSEUDO8_16);
hdiv = 2;
}
break;
case 4:
pr_debug("fb%d: 5/5/5 truecolor\n", info->node);
vga_wseq(par->state.vgabase, 0x11, 0x1A); /* 16bpp accel mode */
svga_wcrt_mask(par->state.vgabase, 0x46, 0x04, 0x04); /* 16bit pixel path */
dac_set_mode(par->dac, DAC_RGB1555_16);
break;
case 5:
pr_debug("fb%d: 5/6/5 truecolor\n", info->node);
vga_wseq(par->state.vgabase, 0x11, 0x1A); /* 16bpp accel mode */
svga_wcrt_mask(par->state.vgabase, 0x46, 0x04, 0x04); /* 16bit pixel path */
dac_set_mode(par->dac, DAC_RGB0565_16);
break;
case 6:
pr_debug("fb%d: 8/8/8 truecolor\n", info->node);
vga_wseq(par->state.vgabase, 0x11, 0x16); /* 8bpp accel mode ??? */
svga_wcrt_mask(par->state.vgabase, 0x46, 0x04, 0x04); /* 16bit pixel path */
dac_set_mode(par->dac, DAC_RGB0888_16);
hmul = 3;
hdiv = 2;
break;
case 7:
pr_debug("fb%d: 8/8/8/8 truecolor\n", info->node);
vga_wseq(par->state.vgabase, 0x11, 0x1E); /* 32bpp accel mode */
svga_wcrt_mask(par->state.vgabase, 0x46, 0x04, 0x04); /* 16bit pixel path */
dac_set_mode(par->dac, DAC_RGB8888_16);
hmul = 2;
break;
default:
printk(KERN_ERR "fb%d: unsupported mode - bug\n", info->node);
return -EINVAL;
}
ark_set_pixclock(info, (hdiv * info->var.pixclock) / hmul);
svga_set_timings(par->state.vgabase, &ark_timing_regs, &(info->var), hmul, hdiv,
(info->var.vmode & FB_VMODE_DOUBLE) ? 2 : 1,
(info->var.vmode & FB_VMODE_INTERLACED) ? 2 : 1,
hmul, info->node);
/* Set interlaced mode start/end register */
value = info->var.xres + info->var.left_margin + info->var.right_margin + info->var.hsync_len;
value = ((value * hmul / hdiv) / 8) - 5;
vga_wcrt(par->state.vgabase, 0x42, (value + 1) / 2);
memset_io(info->screen_base, 0x00, screen_size);
/* Device and screen back on */
svga_wcrt_mask(par->state.vgabase, 0x17, 0x80, 0x80);
svga_wseq_mask(par->state.vgabase, 0x01, 0x00, 0x20);
return 0;
}
/* Set a colour register */
static int arkfb_setcolreg(u_int regno, u_int red, u_int green, u_int blue,
u_int transp, struct fb_info *fb)
{
switch (fb->var.bits_per_pixel) {
case 0:
case 4:
if (regno >= 16)
return -EINVAL;
if ((fb->var.bits_per_pixel == 4) &&
(fb->var.nonstd == 0)) {
outb(0xF0, VGA_PEL_MSK);
outb(regno*16, VGA_PEL_IW);
} else {
outb(0x0F, VGA_PEL_MSK);
outb(regno, VGA_PEL_IW);
}
outb(red >> 10, VGA_PEL_D);
outb(green >> 10, VGA_PEL_D);
outb(blue >> 10, VGA_PEL_D);
break;
case 8:
if (regno >= 256)
return -EINVAL;
outb(0xFF, VGA_PEL_MSK);
outb(regno, VGA_PEL_IW);
outb(red >> 10, VGA_PEL_D);
outb(green >> 10, VGA_PEL_D);
outb(blue >> 10, VGA_PEL_D);
break;
case 16:
if (regno >= 16)
return 0;
if (fb->var.green.length == 5)
((u32*)fb->pseudo_palette)[regno] = ((red & 0xF800) >> 1) |
((green & 0xF800) >> 6) | ((blue & 0xF800) >> 11);
else if (fb->var.green.length == 6)
((u32*)fb->pseudo_palette)[regno] = (red & 0xF800) |
((green & 0xFC00) >> 5) | ((blue & 0xF800) >> 11);
else
return -EINVAL;
break;
case 24:
case 32:
if (regno >= 16)
return 0;
((u32*)fb->pseudo_palette)[regno] = ((red & 0xFF00) << 8) |
(green & 0xFF00) | ((blue & 0xFF00) >> 8);
break;
default:
return -EINVAL;
}
return 0;
}
/* Set the display blanking state */
static int arkfb_blank(int blank_mode, struct fb_info *info)
{
struct arkfb_info *par = info->par;
switch (blank_mode) {
case FB_BLANK_UNBLANK:
pr_debug("fb%d: unblank\n", info->node);
svga_wseq_mask(par->state.vgabase, 0x01, 0x00, 0x20);
svga_wcrt_mask(par->state.vgabase, 0x17, 0x80, 0x80);
break;
case FB_BLANK_NORMAL:
pr_debug("fb%d: blank\n", info->node);
svga_wseq_mask(par->state.vgabase, 0x01, 0x20, 0x20);
svga_wcrt_mask(par->state.vgabase, 0x17, 0x80, 0x80);
break;
case FB_BLANK_POWERDOWN:
case FB_BLANK_HSYNC_SUSPEND:
case FB_BLANK_VSYNC_SUSPEND:
pr_debug("fb%d: sync down\n", info->node);
svga_wseq_mask(par->state.vgabase, 0x01, 0x20, 0x20);
svga_wcrt_mask(par->state.vgabase, 0x17, 0x00, 0x80);
break;
}
return 0;
}
/* Pan the display */
static int arkfb_pan_display(struct fb_var_screeninfo *var, struct fb_info *info)
{
struct arkfb_info *par = info->par;
unsigned int offset;
/* Calculate the offset */
if (info->var.bits_per_pixel == 0) {
offset = (var->yoffset / 16) * (info->var.xres_virtual / 2)
+ (var->xoffset / 2);
offset = offset >> 2;
} else {
offset = (var->yoffset * info->fix.line_length) +
(var->xoffset * info->var.bits_per_pixel / 8);
offset = offset >> ((info->var.bits_per_pixel == 4) ? 2 : 3);
}
/* Set the offset */
svga_wcrt_multi(par->state.vgabase, ark_start_address_regs, offset);
return 0;
}
/* ------------------------------------------------------------------------- */
/* Frame buffer operations */
static struct fb_ops arkfb_ops = {
.owner = THIS_MODULE,
.fb_open = arkfb_open,
.fb_release = arkfb_release,
.fb_check_var = arkfb_check_var,
.fb_set_par = arkfb_set_par,
.fb_setcolreg = arkfb_setcolreg,
.fb_blank = arkfb_blank,
.fb_pan_display = arkfb_pan_display,
.fb_fillrect = arkfb_fillrect,
.fb_copyarea = cfb_copyarea,
.fb_imageblit = arkfb_imageblit,
.fb_get_caps = svga_get_caps,
};
/* ------------------------------------------------------------------------- */
/* PCI probe */
static int __devinit ark_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
{
struct pci_bus_region bus_reg;
struct resource vga_res;
struct fb_info *info;
struct arkfb_info *par;
int rc;
u8 regval;
/* Ignore secondary VGA device because there is no VGA arbitration */
if (! svga_primary_device(dev)) {
dev_info(&(dev->dev), "ignoring secondary device\n");
return -ENODEV;
}
/* Allocate and fill driver data structure */
info = framebuffer_alloc(sizeof(struct arkfb_info), &(dev->dev));
if (! info) {
dev_err(&(dev->dev), "cannot allocate memory\n");
return -ENOMEM;
}
par = info->par;
mutex_init(&par->open_lock);
info->flags = FBINFO_PARTIAL_PAN_OK | FBINFO_HWACCEL_YPAN;
info->fbops = &arkfb_ops;
/* Prepare PCI device */
rc = pci_enable_device(dev);
if (rc < 0) {
dev_err(info->device, "cannot enable PCI device\n");
goto err_enable_device;
}
rc = pci_request_regions(dev, "arkfb");
if (rc < 0) {
dev_err(info->device, "cannot reserve framebuffer region\n");
goto err_request_regions;
}
par->dac = ics5342_init(ark_dac_read_regs, ark_dac_write_regs, info);
if (! par->dac) {
rc = -ENOMEM;
dev_err(info->device, "RAMDAC initialization failed\n");
goto err_dac;
}
info->fix.smem_start = pci_resource_start(dev, 0);
info->fix.smem_len = pci_resource_len(dev, 0);
/* Map physical IO memory address into kernel space */
info->screen_base = pci_iomap(dev, 0, 0);
if (! info->screen_base) {
rc = -ENOMEM;
dev_err(info->device, "iomap for framebuffer failed\n");
goto err_iomap;
}
bus_reg.start = 0;
bus_reg.end = 64 * 1024;
vga_res.flags = IORESOURCE_IO;
pcibios_bus_to_resource(dev, &vga_res, &bus_reg);
par->state.vgabase = (void __iomem *) vga_res.start;
/* FIXME get memsize */
regval = vga_rseq(par->state.vgabase, 0x10);
info->screen_size = (1 << (regval >> 6)) << 20;
info->fix.smem_len = info->screen_size;
strcpy(info->fix.id, "ARK 2000PV");
info->fix.mmio_start = 0;
info->fix.mmio_len = 0;
info->fix.type = FB_TYPE_PACKED_PIXELS;
info->fix.visual = FB_VISUAL_PSEUDOCOLOR;
info->fix.ypanstep = 0;
info->fix.accel = FB_ACCEL_NONE;
info->pseudo_palette = (void*) (par->pseudo_palette);
/* Prepare startup mode */
rc = fb_find_mode(&(info->var), info, mode_option, NULL, 0, NULL, 8);
if (! ((rc == 1) || (rc == 2))) {
rc = -EINVAL;
dev_err(info->device, "mode %s not found\n", mode_option);
goto err_find_mode;
}
rc = fb_alloc_cmap(&info->cmap, 256, 0);
if (rc < 0) {
dev_err(info->device, "cannot allocate colormap\n");
goto err_alloc_cmap;
}
rc = register_framebuffer(info);
if (rc < 0) {
dev_err(info->device, "cannot register framebugger\n");
goto err_reg_fb;
}
printk(KERN_INFO "fb%d: %s on %s, %d MB RAM\n", info->node, info->fix.id,
pci_name(dev), info->fix.smem_len >> 20);
/* Record a reference to the driver data */
pci_set_drvdata(dev, info);
#ifdef CONFIG_MTRR
if (mtrr) {
par->mtrr_reg = -1;
par->mtrr_reg = mtrr_add(info->fix.smem_start, info->fix.smem_len, MTRR_TYPE_WRCOMB, 1);
}
#endif
return 0;
/* Error handling */
err_reg_fb:
fb_dealloc_cmap(&info->cmap);
err_alloc_cmap:
err_find_mode:
pci_iounmap(dev, info->screen_base);
err_iomap:
dac_release(par->dac);
err_dac:
pci_release_regions(dev);
err_request_regions:
/* pci_disable_device(dev); */
err_enable_device:
framebuffer_release(info);
return rc;
}
/* PCI remove */
static void __devexit ark_pci_remove(struct pci_dev *dev)
{
struct fb_info *info = pci_get_drvdata(dev);
if (info) {
struct arkfb_info *par = info->par;
#ifdef CONFIG_MTRR
if (par->mtrr_reg >= 0) {
mtrr_del(par->mtrr_reg, 0, 0);
par->mtrr_reg = -1;
}
#endif
dac_release(par->dac);
unregister_framebuffer(info);
fb_dealloc_cmap(&info->cmap);
pci_iounmap(dev, info->screen_base);
pci_release_regions(dev);
/* pci_disable_device(dev); */
pci_set_drvdata(dev, NULL);
framebuffer_release(info);
}
}
#ifdef CONFIG_PM
/* PCI suspend */
static int ark_pci_suspend (struct pci_dev* dev, pm_message_t state)
{
struct fb_info *info = pci_get_drvdata(dev);
struct arkfb_info *par = info->par;
dev_info(info->device, "suspend\n");
console_lock();
mutex_lock(&(par->open_lock));
if ((state.event == PM_EVENT_FREEZE) || (par->ref_count == 0)) {
mutex_unlock(&(par->open_lock));
console_unlock();
return 0;
}
fb_set_suspend(info, 1);
pci_save_state(dev);
pci_disable_device(dev);
pci_set_power_state(dev, pci_choose_state(dev, state));
mutex_unlock(&(par->open_lock));
console_unlock();
return 0;
}
/* PCI resume */
static int ark_pci_resume (struct pci_dev* dev)
{
struct fb_info *info = pci_get_drvdata(dev);
struct arkfb_info *par = info->par;
dev_info(info->device, "resume\n");
console_lock();
mutex_lock(&(par->open_lock));
if (par->ref_count == 0)
goto fail;
pci_set_power_state(dev, PCI_D0);
pci_restore_state(dev);
if (pci_enable_device(dev))
goto fail;
pci_set_master(dev);
arkfb_set_par(info);
fb_set_suspend(info, 0);
fail:
mutex_unlock(&(par->open_lock));
console_unlock();
return 0;
}
#else
#define ark_pci_suspend NULL
#define ark_pci_resume NULL
#endif /* CONFIG_PM */
/* List of boards that we are trying to support */
static struct pci_device_id ark_devices[] __devinitdata = {
{PCI_DEVICE(0xEDD8, 0xA099)},
{0, 0, 0, 0, 0, 0, 0}
};
MODULE_DEVICE_TABLE(pci, ark_devices);
static struct pci_driver arkfb_pci_driver = {
.name = "arkfb",
.id_table = ark_devices,
.probe = ark_pci_probe,
.remove = __devexit_p(ark_pci_remove),
.suspend = ark_pci_suspend,
.resume = ark_pci_resume,
};
/* Cleanup */
static void __exit arkfb_cleanup(void)
{
pr_debug("arkfb: cleaning up\n");
pci_unregister_driver(&arkfb_pci_driver);
}
/* Driver Initialisation */
static int __init arkfb_init(void)
{
#ifndef MODULE
char *option = NULL;
if (fb_get_options("arkfb", &option))
return -ENODEV;
if (option && *option)
mode_option = option;
#endif
pr_debug("arkfb: initializing\n");
return pci_register_driver(&arkfb_pci_driver);
}
module_init(arkfb_init);
module_exit(arkfb_cleanup);
| gpl-2.0 |
MassStash/htc_m8wl_kernel_sense_4.4.4 | drivers/scsi/ibmmca.c | 4788 | 87152 | /*
Low Level Linux Driver for the IBM Microchannel SCSI Subsystem for
Linux Kernel >= 2.4.0.
Copyright (c) 1995 Strom Systems, Inc. under the terms of the GNU
General Public License. Written by Martin Kolinek, December 1995.
Further development by: Chris Beauregard, Klaus Kudielka, Michael Lang
See the file Documentation/scsi/ibmmca.txt for a detailed description
of this driver, the commandline arguments and the history of its
development.
See the WWW-page: http://www.uni-mainz.de/~langm000/linux.html for latest
updates, info and ADF-files for adapters supported by this driver.
Alan Cox <alan@lxorguk.ukuu.org.uk>
Updated for Linux 2.5.45 to use the new error handler, cleaned up the
lock macros and did a few unavoidable locking tweaks, plus one locking
fix in the irq and completion path.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/ctype.h>
#include <linux/string.h>
#include <linux/interrupt.h>
#include <linux/ioport.h>
#include <linux/delay.h>
#include <linux/blkdev.h>
#include <linux/proc_fs.h>
#include <linux/stat.h>
#include <linux/mca.h>
#include <linux/spinlock.h>
#include <linux/init.h>
#include <asm/io.h>
#include "scsi.h"
#include <scsi/scsi_host.h>
/* Common forward declarations for all Linux-versions: */
static int ibmmca_queuecommand (struct Scsi_Host *, struct scsi_cmnd *);
static int ibmmca_abort (Scsi_Cmnd *);
static int ibmmca_host_reset (Scsi_Cmnd *);
static int ibmmca_biosparam (struct scsi_device *, struct block_device *, sector_t, int *);
static int ibmmca_proc_info(struct Scsi_Host *shpnt, char *buffer, char **start, off_t offset, int length, int inout);
/* current version of this driver-source: */
#define IBMMCA_SCSI_DRIVER_VERSION "4.0b-ac"
/* driver configuration */
#define IM_MAX_HOSTS 8 /* maximum number of host adapters */
#define IM_RESET_DELAY 60 /* seconds allowed for a reset */
/* driver debugging - #undef all for normal operation */
/* if defined: count interrupts and ignore this special one: */
#undef IM_DEBUG_TIMEOUT //50
#define TIMEOUT_PUN 0
#define TIMEOUT_LUN 0
/* verbose interrupt: */
#undef IM_DEBUG_INT
/* verbose queuecommand: */
#undef IM_DEBUG_CMD
/* verbose queucommand for specific SCSI-device type: */
#undef IM_DEBUG_CMD_SPEC_DEV
/* verbose device probing */
#undef IM_DEBUG_PROBE
/* device type that shall be displayed on syslog (only during debugging): */
#define IM_DEBUG_CMD_DEVICE TYPE_TAPE
/* relative addresses of hardware registers on a subsystem */
#define IM_CMD_REG(h) ((h)->io_port) /*Command Interface, (4 bytes long) */
#define IM_ATTN_REG(h) ((h)->io_port+4) /*Attention (1 byte) */
#define IM_CTR_REG(h) ((h)->io_port+5) /*Basic Control (1 byte) */
#define IM_INTR_REG(h) ((h)->io_port+6) /*Interrupt Status (1 byte, r/o) */
#define IM_STAT_REG(h) ((h)->io_port+7) /*Basic Status (1 byte, read only) */
/* basic I/O-port of first adapter */
#define IM_IO_PORT 0x3540
/* maximum number of hosts that can be found */
#define IM_N_IO_PORT 8
/*requests going into the upper nibble of the Attention register */
/*note: the lower nibble specifies the device(0-14), or subsystem(15) */
#define IM_IMM_CMD 0x10 /*immediate command */
#define IM_SCB 0x30 /*Subsystem Control Block command */
#define IM_LONG_SCB 0x40 /*long Subsystem Control Block command */
#define IM_EOI 0xe0 /*end-of-interrupt request */
/*values for bits 7,1,0 of Basic Control reg. (bits 6-2 reserved) */
#define IM_HW_RESET 0x80 /*hardware reset */
#define IM_ENABLE_DMA 0x02 /*enable subsystem's busmaster DMA */
#define IM_ENABLE_INTR 0x01 /*enable interrupts to the system */
/*to interpret the upper nibble of Interrupt Status register */
/*note: the lower nibble specifies the device(0-14), or subsystem(15) */
#define IM_SCB_CMD_COMPLETED 0x10
#define IM_SCB_CMD_COMPLETED_WITH_RETRIES 0x50
#define IM_LOOP_SCATTER_BUFFER_FULL 0x60
#define IM_ADAPTER_HW_FAILURE 0x70
#define IM_IMMEDIATE_CMD_COMPLETED 0xa0
#define IM_CMD_COMPLETED_WITH_FAILURE 0xc0
#define IM_CMD_ERROR 0xe0
#define IM_SOFTWARE_SEQUENCING_ERROR 0xf0
/*to interpret bits 3-0 of Basic Status register (bits 7-4 reserved) */
#define IM_CMD_REG_FULL 0x08
#define IM_CMD_REG_EMPTY 0x04
#define IM_INTR_REQUEST 0x02
#define IM_BUSY 0x01
/*immediate commands (word written into low 2 bytes of command reg) */
#define IM_RESET_IMM_CMD 0x0400
#define IM_FEATURE_CTR_IMM_CMD 0x040c
#define IM_DMA_PACING_IMM_CMD 0x040d
#define IM_ASSIGN_IMM_CMD 0x040e
#define IM_ABORT_IMM_CMD 0x040f
#define IM_FORMAT_PREP_IMM_CMD 0x0417
/*SCB (Subsystem Control Block) structure */
struct im_scb {
unsigned short command; /*command word (read, etc.) */
unsigned short enable; /*enable word, modifies cmd */
union {
unsigned long log_blk_adr; /*block address on SCSI device */
unsigned char scsi_cmd_length; /*6,10,12, for other scsi cmd */
} u1;
unsigned long sys_buf_adr; /*physical system memory adr */
unsigned long sys_buf_length; /*size of sys mem buffer */
unsigned long tsb_adr; /*Termination Status Block adr */
unsigned long scb_chain_adr; /*optional SCB chain address */
union {
struct {
unsigned short count; /*block count, on SCSI device */
unsigned short length; /*block length, on SCSI device */
} blk;
unsigned char scsi_command[12]; /*other scsi command */
} u2;
};
/*structure scatter-gather element (for list of system memory areas) */
struct im_sge {
void *address;
unsigned long byte_length;
};
/*structure returned by a get_pos_info command: */
struct im_pos_info {
unsigned short pos_id; /* adapter id */
unsigned char pos_3a; /* pos 3 (if pos 6 = 0) */
unsigned char pos_2; /* pos 2 */
unsigned char int_level; /* interrupt level IRQ 11 or 14 */
unsigned char pos_4a; /* pos 4 (if pos 6 = 0) */
unsigned short connector_size; /* MCA connector size: 16 or 32 Bit */
unsigned char num_luns; /* number of supported luns per device */
unsigned char num_puns; /* number of supported puns */
unsigned char pacing_factor; /* pacing factor */
unsigned char num_ldns; /* number of ldns available */
unsigned char eoi_off; /* time EOI and interrupt inactive */
unsigned char max_busy; /* time between reset and busy on */
unsigned short cache_stat; /* ldn cachestat. Bit=1 = not cached */
unsigned short retry_stat; /* retry status of ldns. Bit=1=disabled */
unsigned char pos_4b; /* pos 4 (if pos 6 = 1) */
unsigned char pos_3b; /* pos 3 (if pos 6 = 1) */
unsigned char pos_6; /* pos 6 */
unsigned char pos_5; /* pos 5 */
unsigned short max_overlap; /* maximum overlapping requests */
unsigned short num_bus; /* number of SCSI-busses */
};
/*values for SCB command word */
#define IM_NO_SYNCHRONOUS 0x0040 /*flag for any command */
#define IM_NO_DISCONNECT 0x0080 /*flag for any command */
#define IM_READ_DATA_CMD 0x1c01
#define IM_WRITE_DATA_CMD 0x1c02
#define IM_READ_VERIFY_CMD 0x1c03
#define IM_WRITE_VERIFY_CMD 0x1c04
#define IM_REQUEST_SENSE_CMD 0x1c08
#define IM_READ_CAPACITY_CMD 0x1c09
#define IM_DEVICE_INQUIRY_CMD 0x1c0b
#define IM_READ_LOGICAL_CMD 0x1c2a
#define IM_OTHER_SCSI_CMD_CMD 0x241f
/* unused, but supported, SCB commands */
#define IM_GET_COMMAND_COMPLETE_STATUS_CMD 0x1c07 /* command status */
#define IM_GET_POS_INFO_CMD 0x1c0a /* returns neat stuff */
#define IM_READ_PREFETCH_CMD 0x1c31 /* caching controller only */
#define IM_FOMAT_UNIT_CMD 0x1c16 /* format unit */
#define IM_REASSIGN_BLOCK_CMD 0x1c18 /* in case of error */
/*values to set bits in the enable word of SCB */
#define IM_READ_CONTROL 0x8000
#define IM_REPORT_TSB_ONLY_ON_ERROR 0x4000
#define IM_RETRY_ENABLE 0x2000
#define IM_POINTER_TO_LIST 0x1000
#define IM_SUPRESS_EXCEPTION_SHORT 0x0400
#define IM_BYPASS_BUFFER 0x0200
#define IM_CHAIN_ON_NO_ERROR 0x0001
/*TSB (Termination Status Block) structure */
struct im_tsb {
unsigned short end_status;
unsigned short reserved1;
unsigned long residual_byte_count;
unsigned long sg_list_element_adr;
unsigned short status_length;
unsigned char dev_status;
unsigned char cmd_status;
unsigned char dev_error;
unsigned char cmd_error;
unsigned short reserved2;
unsigned short reserved3;
unsigned short low_of_last_scb_adr;
unsigned short high_of_last_scb_adr;
};
/*subsystem uses interrupt request level 14 */
#define IM_IRQ 14
/*SCSI-2 F/W may evade to interrupt 11 */
#define IM_IRQ_FW 11
/* Model 95 has an additional alphanumeric display, which can be used
to display SCSI-activities. 8595 models do not have any disk led, which
makes this feature quite useful.
The regular PS/2 disk led is turned on/off by bits 6,7 of system
control port. */
/* LED display-port (actually, last LED on display) */
#define MOD95_LED_PORT 0x108
/* system-control-register of PS/2s with diskindicator */
#define PS2_SYS_CTR 0x92
/* activity displaying methods */
#define LED_DISP 1
#define LED_ADISP 2
#define LED_ACTIVITY 4
/* failed intr */
#define CMD_FAIL 255
/* The SCSI-ID(!) of the accessed SCSI-device is shown on PS/2-95 machines' LED
displays. ldn is no longer displayed here, because the ldn mapping is now
done dynamically and the ldn <-> pun,lun maps can be looked-up at boottime
or during uptime in /proc/scsi/ibmmca/<host_no> in case of trouble,
interest, debugging or just for having fun. The left number gives the
host-adapter number and the right shows the accessed SCSI-ID. */
/* display_mode is set by the ibmmcascsi= command line arg */
static int display_mode = 0;
/* set default adapter timeout */
static unsigned int adapter_timeout = 45;
/* for probing on feature-command: */
static unsigned int global_command_error_excuse = 0;
/* global setting by command line for adapter_speed */
static int global_adapter_speed = 0; /* full speed by default */
/* Panel / LED on, do it right for F/W addressin, too. adisplay will
* just ignore ids>7, as the panel has only 7 digits available */
#define PS2_DISK_LED_ON(ad,id) { if (display_mode & LED_DISP) { if (id>9) \
outw((ad+48)|((id+55)<<8), MOD95_LED_PORT ); else \
outw((ad+48)|((id+48)<<8), MOD95_LED_PORT ); } else \
if (display_mode & LED_ADISP) { if (id<7) outb((char)(id+48),MOD95_LED_PORT+1+id); \
outb((char)(ad+48), MOD95_LED_PORT); } \
if ((display_mode & LED_ACTIVITY)||(!display_mode)) \
outb(inb(PS2_SYS_CTR) | 0xc0, PS2_SYS_CTR); }
/* Panel / LED off */
/* bug fixed, Dec 15, 1997, where | was replaced by & here */
#define PS2_DISK_LED_OFF() { if (display_mode & LED_DISP) \
outw(0x2020, MOD95_LED_PORT ); else if (display_mode & LED_ADISP) { \
outl(0x20202020,MOD95_LED_PORT); outl(0x20202020,MOD95_LED_PORT+4); } \
if ((display_mode & LED_ACTIVITY)||(!display_mode)) \
outb(inb(PS2_SYS_CTR) & 0x3f, PS2_SYS_CTR); }
/* types of different supported hardware that goes to hostdata special */
#define IBM_SCSI2_FW 0
#define IBM_7568_WCACHE 1
#define IBM_EXP_UNIT 2
#define IBM_SCSI_WCACHE 3
#define IBM_SCSI 4
#define IBM_INTEGSCSI 5
/* other special flags for hostdata structure */
#define FORCED_DETECTION 100
#define INTEGRATED_SCSI 101
/* List of possible IBM-SCSI-adapters */
static short ibmmca_id_table[] = {
0x8efc,
0x8efd,
0x8ef8,
0x8eff,
0x8efe,
/* No entry for integrated SCSI, that's part of the register */
0
};
static const char *ibmmca_description[] = {
"IBM SCSI-2 F/W Adapter", /* special = 0 */
"IBM 7568 Industrial Computer SCSI Adapter w/Cache", /* special = 1 */
"IBM Expansion Unit SCSI Controller", /* special = 2 */
"IBM SCSI Adapter w/Cache", /* special = 3 */
"IBM SCSI Adapter", /* special = 4 */
"IBM Integrated SCSI Controller", /* special = 5 */
};
/* Max number of logical devices (can be up from 0 to 14). 15 is the address
of the adapter itself. */
#define MAX_LOG_DEV 15
/*local data for a logical device */
struct logical_device {
struct im_scb scb; /* SCSI-subsystem-control-block structure */
struct im_tsb tsb; /* SCSI command complete status block structure */
struct im_sge sge[16]; /* scatter gather list structure */
unsigned char buf[256]; /* SCSI command return data buffer */
Scsi_Cmnd *cmd; /* SCSI-command that is currently in progress */
int device_type; /* type of the SCSI-device. See include/scsi/scsi.h
for interpretation of the possible values */
int block_length; /* blocksize of a particular logical SCSI-device */
int cache_flag; /* 1 if this is uncached, 0 if cache is present for ldn */
int retry_flag; /* 1 if adapter retry is disabled, 0 if enabled */
};
/* statistics of the driver during operations (for proc_info) */
struct Driver_Statistics {
/* SCSI statistics on the adapter */
int ldn_access[MAX_LOG_DEV + 1]; /* total accesses on a ldn */
int ldn_read_access[MAX_LOG_DEV + 1]; /* total read-access on a ldn */
int ldn_write_access[MAX_LOG_DEV + 1]; /* total write-access on a ldn */
int ldn_inquiry_access[MAX_LOG_DEV + 1]; /* total inquiries on a ldn */
int ldn_modeselect_access[MAX_LOG_DEV + 1]; /* total mode selects on ldn */
int scbs; /* short SCBs queued */
int long_scbs; /* long SCBs queued */
int total_accesses; /* total accesses on all ldns */
int total_interrupts; /* total interrupts (should be
same as total_accesses) */
int total_errors; /* command completed with error */
/* dynamical assignment statistics */
int total_scsi_devices; /* number of physical pun,lun */
int dyn_flag; /* flag showing dynamical mode */
int dynamical_assignments; /* number of remappings of ldns */
int ldn_assignments[MAX_LOG_DEV + 1]; /* number of remappings of each
ldn */
};
/* data structure for each host adapter */
struct ibmmca_hostdata {
/* array of logical devices: */
struct logical_device _ld[MAX_LOG_DEV + 1];
/* array to convert (pun, lun) into logical device number: */
unsigned char _get_ldn[16][8];
/*array that contains the information about the physical SCSI-devices
attached to this host adapter: */
unsigned char _get_scsi[16][8];
/* used only when checking logical devices: */
int _local_checking_phase_flag;
/* report received interrupt: */
int _got_interrupt;
/* report termination-status of SCSI-command: */
int _stat_result;
/* reset status (used only when doing reset): */
int _reset_status;
/* code of the last SCSI command (needed for panic info): */
int _last_scsi_command[MAX_LOG_DEV + 1];
/* identifier of the last SCSI-command type */
int _last_scsi_type[MAX_LOG_DEV + 1];
/* last blockcount */
int _last_scsi_blockcount[MAX_LOG_DEV + 1];
/* last locgical block address */
unsigned long _last_scsi_logical_block[MAX_LOG_DEV + 1];
/* Counter that points on the next reassignable ldn for dynamical
remapping. The default value is 7, that is the first reassignable
number in the list at boottime: */
int _next_ldn;
/* Statistics-structure for this IBM-SCSI-host: */
struct Driver_Statistics _IBM_DS;
/* This hostadapters pos-registers pos2 until pos6 */
unsigned int _pos[8];
/* assign a special variable, that contains dedicated info about the
adaptertype */
int _special;
/* connector size on the MCA bus */
int _connector_size;
/* synchronous SCSI transfer rate bitpattern */
int _adapter_speed;
};
/* macros to access host data structure */
#define subsystem_pun(h) ((h)->this_id)
#define subsystem_maxid(h) ((h)->max_id)
#define ld(h) (((struct ibmmca_hostdata *) (h)->hostdata)->_ld)
#define get_ldn(h) (((struct ibmmca_hostdata *) (h)->hostdata)->_get_ldn)
#define get_scsi(h) (((struct ibmmca_hostdata *) (h)->hostdata)->_get_scsi)
#define local_checking_phase_flag(h) (((struct ibmmca_hostdata *) (h)->hostdata)->_local_checking_phase_flag)
#define got_interrupt(h) (((struct ibmmca_hostdata *) (h)->hostdata)->_got_interrupt)
#define stat_result(h) (((struct ibmmca_hostdata *) (h)->hostdata)->_stat_result)
#define reset_status(h) (((struct ibmmca_hostdata *) (h)->hostdata)->_reset_status)
#define last_scsi_command(h) (((struct ibmmca_hostdata *) (h)->hostdata)->_last_scsi_command)
#define last_scsi_type(h) (((struct ibmmca_hostdata *) (h)->hostdata)->_last_scsi_type)
#define last_scsi_blockcount(h) (((struct ibmmca_hostdata *) (h)->hostdata)->_last_scsi_blockcount)
#define last_scsi_logical_block(h) (((struct ibmmca_hostdata *) (h)->hostdata)->_last_scsi_logical_block)
#define last_scsi_type(h) (((struct ibmmca_hostdata *) (h)->hostdata)->_last_scsi_type)
#define next_ldn(h) (((struct ibmmca_hostdata *) (h)->hostdata)->_next_ldn)
#define IBM_DS(h) (((struct ibmmca_hostdata *) (h)->hostdata)->_IBM_DS)
#define special(h) (((struct ibmmca_hostdata *) (h)->hostdata)->_special)
#define subsystem_connector_size(h) (((struct ibmmca_hostdata *) (h)->hostdata)->_connector_size)
#define adapter_speed(h) (((struct ibmmca_hostdata *) (h)->hostdata)->_adapter_speed)
#define pos2(h) (((struct ibmmca_hostdata *) (h)->hostdata)->_pos[2])
#define pos3(h) (((struct ibmmca_hostdata *) (h)->hostdata)->_pos[3])
#define pos4(h) (((struct ibmmca_hostdata *) (h)->hostdata)->_pos[4])
#define pos5(h) (((struct ibmmca_hostdata *) (h)->hostdata)->_pos[5])
#define pos6(h) (((struct ibmmca_hostdata *) (h)->hostdata)->_pos[6])
/* Define a arbitrary number as subsystem-marker-type. This number is, as
described in the ANSI-SCSI-standard, not occupied by other device-types. */
#define TYPE_IBM_SCSI_ADAPTER 0x2F
/* Define 0xFF for no device type, because this type is not defined within
the ANSI-SCSI-standard, therefore, it can be used and should not cause any
harm. */
#define TYPE_NO_DEVICE 0xFF
/* define medium-changer. If this is not defined previously, e.g. Linux
2.0.x, define this type here. */
#ifndef TYPE_MEDIUM_CHANGER
#define TYPE_MEDIUM_CHANGER 0x08
#endif
/* define possible operations for the immediate_assign command */
#define SET_LDN 0
#define REMOVE_LDN 1
/* ldn which is used to probe the SCSI devices */
#define PROBE_LDN 0
/* reset status flag contents */
#define IM_RESET_NOT_IN_PROGRESS 0
#define IM_RESET_IN_PROGRESS 1
#define IM_RESET_FINISHED_OK 2
#define IM_RESET_FINISHED_FAIL 3
#define IM_RESET_NOT_IN_PROGRESS_NO_INT 4
#define IM_RESET_FINISHED_OK_NO_INT 5
/* define undefined SCSI-command */
#define NO_SCSI 0xffff
/*-----------------------------------------------------------------------*/
/* if this is nonzero, ibmmcascsi option has been passed to the kernel */
static int io_port[IM_MAX_HOSTS] = { 0, 0, 0, 0, 0, 0, 0, 0 };
static int scsi_id[IM_MAX_HOSTS] = { 7, 7, 7, 7, 7, 7, 7, 7 };
/* fill module-parameters only, when this define is present.
(that is kernel version 2.1.x) */
#if defined(MODULE)
static char *boot_options = NULL;
module_param(boot_options, charp, 0);
module_param_array(io_port, int, NULL, 0);
module_param_array(scsi_id, int, NULL, 0);
MODULE_LICENSE("GPL");
#endif
/*counter of concurrent disk read/writes, to turn on/off disk led */
static int disk_rw_in_progress = 0;
static unsigned int pos[8]; /* whole pos register-line for diagnosis */
/* Taking into account the additions, made by ZP Gu.
* This selects now the preset value from the configfile and
* offers the 'normal' commandline option to be accepted */
#ifdef CONFIG_IBMMCA_SCSI_ORDER_STANDARD
static char ibm_ansi_order = 1;
#else
static char ibm_ansi_order = 0;
#endif
static void issue_cmd(struct Scsi_Host *, unsigned long, unsigned char);
static void internal_done(Scsi_Cmnd * cmd);
static void check_devices(struct Scsi_Host *, int);
static int immediate_assign(struct Scsi_Host *, unsigned int, unsigned int, unsigned int, unsigned int);
static int immediate_feature(struct Scsi_Host *, unsigned int, unsigned int);
#ifdef CONFIG_IBMMCA_SCSI_DEV_RESET
static int immediate_reset(struct Scsi_Host *, unsigned int);
#endif
static int device_inquiry(struct Scsi_Host *, int);
static int read_capacity(struct Scsi_Host *, int);
static int get_pos_info(struct Scsi_Host *);
static char *ti_p(int);
static char *ti_l(int);
static char *ibmrate(unsigned int, int);
static int probe_display(int);
static int probe_bus_mode(struct Scsi_Host *);
static int device_exists(struct Scsi_Host *, int, int *, int *);
static int option_setup(char *);
/* local functions needed for proc_info */
static int ldn_access_load(struct Scsi_Host *, int);
static int ldn_access_total_read_write(struct Scsi_Host *);
static irqreturn_t interrupt_handler(int irq, void *dev_id)
{
unsigned int intr_reg;
unsigned int cmd_result;
unsigned int ldn;
unsigned long flags;
Scsi_Cmnd *cmd;
int lastSCSI;
struct device *dev = dev_id;
struct Scsi_Host *shpnt = dev_get_drvdata(dev);
spin_lock_irqsave(shpnt->host_lock, flags);
if(!(inb(IM_STAT_REG(shpnt)) & IM_INTR_REQUEST)) {
spin_unlock_irqrestore(shpnt->host_lock, flags);
return IRQ_NONE;
}
/* the reset-function already did all the job, even ints got
renabled on the subsystem, so just return */
if ((reset_status(shpnt) == IM_RESET_NOT_IN_PROGRESS_NO_INT) || (reset_status(shpnt) == IM_RESET_FINISHED_OK_NO_INT)) {
reset_status(shpnt) = IM_RESET_NOT_IN_PROGRESS;
spin_unlock_irqrestore(shpnt->host_lock, flags);
return IRQ_HANDLED;
}
/*must wait for attention reg not busy, then send EOI to subsystem */
while (1) {
if (!(inb(IM_STAT_REG(shpnt)) & IM_BUSY))
break;
cpu_relax();
}
/*get command result and logical device */
intr_reg = (unsigned char) (inb(IM_INTR_REG(shpnt)));
cmd_result = intr_reg & 0xf0;
ldn = intr_reg & 0x0f;
/* get the last_scsi_command here */
lastSCSI = last_scsi_command(shpnt)[ldn];
outb(IM_EOI | ldn, IM_ATTN_REG(shpnt));
/*these should never happen (hw fails, or a local programming bug) */
if (!global_command_error_excuse) {
switch (cmd_result) {
/* Prevent from Ooopsing on error to show the real reason */
case IM_ADAPTER_HW_FAILURE:
case IM_SOFTWARE_SEQUENCING_ERROR:
case IM_CMD_ERROR:
printk(KERN_ERR "IBM MCA SCSI: Fatal Subsystem ERROR!\n");
printk(KERN_ERR " Last cmd=0x%x, ena=%x, len=", lastSCSI, ld(shpnt)[ldn].scb.enable);
if (ld(shpnt)[ldn].cmd)
printk("%ld/%ld,", (long) (scsi_bufflen(ld(shpnt)[ldn].cmd)), (long) (ld(shpnt)[ldn].scb.sys_buf_length));
else
printk("none,");
if (ld(shpnt)[ldn].cmd)
printk("Blocksize=%d", ld(shpnt)[ldn].scb.u2.blk.length);
else
printk("Blocksize=none");
printk(", host=%p, ldn=0x%x\n", shpnt, ldn);
if (ld(shpnt)[ldn].cmd) {
printk(KERN_ERR "Blockcount=%d/%d\n", last_scsi_blockcount(shpnt)[ldn], ld(shpnt)[ldn].scb.u2.blk.count);
printk(KERN_ERR "Logical block=%lx/%lx\n", last_scsi_logical_block(shpnt)[ldn], ld(shpnt)[ldn].scb.u1.log_blk_adr);
}
printk(KERN_ERR "Reason given: %s\n", (cmd_result == IM_ADAPTER_HW_FAILURE) ? "HARDWARE FAILURE" : (cmd_result == IM_SOFTWARE_SEQUENCING_ERROR) ? "SOFTWARE SEQUENCING ERROR" : (cmd_result == IM_CMD_ERROR) ? "COMMAND ERROR" : "UNKNOWN");
/* if errors appear, enter this section to give detailed info */
printk(KERN_ERR "IBM MCA SCSI: Subsystem Error-Status follows:\n");
printk(KERN_ERR " Command Type................: %x\n", last_scsi_type(shpnt)[ldn]);
printk(KERN_ERR " Attention Register..........: %x\n", inb(IM_ATTN_REG(shpnt)));
printk(KERN_ERR " Basic Control Register......: %x\n", inb(IM_CTR_REG(shpnt)));
printk(KERN_ERR " Interrupt Status Register...: %x\n", intr_reg);
printk(KERN_ERR " Basic Status Register.......: %x\n", inb(IM_STAT_REG(shpnt)));
if ((last_scsi_type(shpnt)[ldn] == IM_SCB) || (last_scsi_type(shpnt)[ldn] == IM_LONG_SCB)) {
printk(KERN_ERR " SCB-Command.................: %x\n", ld(shpnt)[ldn].scb.command);
printk(KERN_ERR " SCB-Enable..................: %x\n", ld(shpnt)[ldn].scb.enable);
printk(KERN_ERR " SCB-logical block address...: %lx\n", ld(shpnt)[ldn].scb.u1.log_blk_adr);
printk(KERN_ERR " SCB-system buffer address...: %lx\n", ld(shpnt)[ldn].scb.sys_buf_adr);
printk(KERN_ERR " SCB-system buffer length....: %lx\n", ld(shpnt)[ldn].scb.sys_buf_length);
printk(KERN_ERR " SCB-tsb address.............: %lx\n", ld(shpnt)[ldn].scb.tsb_adr);
printk(KERN_ERR " SCB-Chain address...........: %lx\n", ld(shpnt)[ldn].scb.scb_chain_adr);
printk(KERN_ERR " SCB-block count.............: %x\n", ld(shpnt)[ldn].scb.u2.blk.count);
printk(KERN_ERR " SCB-block length............: %x\n", ld(shpnt)[ldn].scb.u2.blk.length);
}
printk(KERN_ERR " Send this report to the maintainer.\n");
panic("IBM MCA SCSI: Fatal error message from the subsystem (0x%X,0x%X)!\n", lastSCSI, cmd_result);
break;
}
} else {
/* The command error handling is made silent, but we tell the
* calling function, that there is a reported error from the
* adapter. */
switch (cmd_result) {
case IM_ADAPTER_HW_FAILURE:
case IM_SOFTWARE_SEQUENCING_ERROR:
case IM_CMD_ERROR:
global_command_error_excuse = CMD_FAIL;
break;
default:
global_command_error_excuse = 0;
break;
}
}
/* if no panic appeared, increase the interrupt-counter */
IBM_DS(shpnt).total_interrupts++;
/*only for local checking phase */
if (local_checking_phase_flag(shpnt)) {
stat_result(shpnt) = cmd_result;
got_interrupt(shpnt) = 1;
reset_status(shpnt) = IM_RESET_FINISHED_OK;
last_scsi_command(shpnt)[ldn] = NO_SCSI;
spin_unlock_irqrestore(shpnt->host_lock, flags);
return IRQ_HANDLED;
}
/* handling of commands coming from upper level of scsi driver */
if (last_scsi_type(shpnt)[ldn] == IM_IMM_CMD) {
/* verify ldn, and may handle rare reset immediate command */
if ((reset_status(shpnt) == IM_RESET_IN_PROGRESS) && (last_scsi_command(shpnt)[ldn] == IM_RESET_IMM_CMD)) {
if (cmd_result == IM_CMD_COMPLETED_WITH_FAILURE) {
disk_rw_in_progress = 0;
PS2_DISK_LED_OFF();
reset_status(shpnt) = IM_RESET_FINISHED_FAIL;
} else {
/*reset disk led counter, turn off disk led */
disk_rw_in_progress = 0;
PS2_DISK_LED_OFF();
reset_status(shpnt) = IM_RESET_FINISHED_OK;
}
stat_result(shpnt) = cmd_result;
last_scsi_command(shpnt)[ldn] = NO_SCSI;
last_scsi_type(shpnt)[ldn] = 0;
spin_unlock_irqrestore(shpnt->host_lock, flags);
return IRQ_HANDLED;
} else if (last_scsi_command(shpnt)[ldn] == IM_ABORT_IMM_CMD) {
/* react on SCSI abort command */
#ifdef IM_DEBUG_PROBE
printk("IBM MCA SCSI: Interrupt from SCSI-abort.\n");
#endif
disk_rw_in_progress = 0;
PS2_DISK_LED_OFF();
cmd = ld(shpnt)[ldn].cmd;
ld(shpnt)[ldn].cmd = NULL;
if (cmd_result == IM_CMD_COMPLETED_WITH_FAILURE)
cmd->result = DID_NO_CONNECT << 16;
else
cmd->result = DID_ABORT << 16;
stat_result(shpnt) = cmd_result;
last_scsi_command(shpnt)[ldn] = NO_SCSI;
last_scsi_type(shpnt)[ldn] = 0;
if (cmd->scsi_done)
(cmd->scsi_done) (cmd); /* should be the internal_done */
spin_unlock_irqrestore(shpnt->host_lock, flags);
return IRQ_HANDLED;
} else {
disk_rw_in_progress = 0;
PS2_DISK_LED_OFF();
reset_status(shpnt) = IM_RESET_FINISHED_OK;
stat_result(shpnt) = cmd_result;
last_scsi_command(shpnt)[ldn] = NO_SCSI;
spin_unlock_irqrestore(shpnt->host_lock, flags);
return IRQ_HANDLED;
}
}
last_scsi_command(shpnt)[ldn] = NO_SCSI;
last_scsi_type(shpnt)[ldn] = 0;
cmd = ld(shpnt)[ldn].cmd;
ld(shpnt)[ldn].cmd = NULL;
#ifdef IM_DEBUG_TIMEOUT
if (cmd) {
if ((cmd->target == TIMEOUT_PUN) && (cmd->device->lun == TIMEOUT_LUN)) {
spin_unlock_irqsave(shpnt->host_lock, flags);
printk("IBM MCA SCSI: Ignoring interrupt from pun=%x, lun=%x.\n", cmd->target, cmd->device->lun);
return IRQ_HANDLED;
}
}
#endif
/*if no command structure, just return, else clear cmd */
if (!cmd)
{
spin_unlock_irqrestore(shpnt->host_lock, flags);
return IRQ_HANDLED;
}
#ifdef IM_DEBUG_INT
printk("cmd=%02x ireg=%02x ds=%02x cs=%02x de=%02x ce=%02x\n", cmd->cmnd[0], intr_reg, ld(shpnt)[ldn].tsb.dev_status, ld(shpnt)[ldn].tsb.cmd_status, ld(shpnt)[ldn].tsb.dev_error, ld(shpnt)[ldn].tsb.cmd_error);
#endif
/*if this is end of media read/write, may turn off PS/2 disk led */
if ((ld(shpnt)[ldn].device_type != TYPE_NO_LUN) && (ld(shpnt)[ldn].device_type != TYPE_NO_DEVICE)) {
/* only access this, if there was a valid device addressed */
if (--disk_rw_in_progress == 0)
PS2_DISK_LED_OFF();
}
/* IBM describes the status-mask to be 0x1e, but this is not conform
* with SCSI-definition, I suppose, the reason for it is that IBM
* adapters do not support CMD_TERMINATED, TASK_SET_FULL and
* ACA_ACTIVE as returning statusbyte information. (ML) */
if (cmd_result == IM_CMD_COMPLETED_WITH_FAILURE) {
cmd->result = (unsigned char) (ld(shpnt)[ldn].tsb.dev_status & 0x1e);
IBM_DS(shpnt).total_errors++;
} else
cmd->result = 0;
/* write device status into cmd->result, and call done function */
if (lastSCSI == NO_SCSI) { /* unexpected interrupt :-( */
cmd->result |= DID_BAD_INTR << 16;
printk("IBM MCA SCSI: WARNING - Interrupt from non-pending SCSI-command!\n");
} else /* things went right :-) */
cmd->result |= DID_OK << 16;
if (cmd->scsi_done)
(cmd->scsi_done) (cmd);
spin_unlock_irqrestore(shpnt->host_lock, flags);
return IRQ_HANDLED;
}
static void issue_cmd(struct Scsi_Host *shpnt, unsigned long cmd_reg,
unsigned char attn_reg)
{
unsigned long flags;
/* must wait for attention reg not busy */
while (1) {
spin_lock_irqsave(shpnt->host_lock, flags);
if (!(inb(IM_STAT_REG(shpnt)) & IM_BUSY))
break;
spin_unlock_irqrestore(shpnt->host_lock, flags);
}
/* write registers and enable system interrupts */
outl(cmd_reg, IM_CMD_REG(shpnt));
outb(attn_reg, IM_ATTN_REG(shpnt));
spin_unlock_irqrestore(shpnt->host_lock, flags);
}
static void internal_done(Scsi_Cmnd * cmd)
{
cmd->SCp.Status++;
return;
}
/* SCSI-SCB-command for device_inquiry */
static int device_inquiry(struct Scsi_Host *shpnt, int ldn)
{
int retr;
struct im_scb *scb;
struct im_tsb *tsb;
unsigned char *buf;
scb = &(ld(shpnt)[ldn].scb);
tsb = &(ld(shpnt)[ldn].tsb);
buf = (unsigned char *) (&(ld(shpnt)[ldn].buf));
ld(shpnt)[ldn].tsb.dev_status = 0; /* prepare statusblock */
for (retr = 0; retr < 3; retr++) {
/* fill scb with inquiry command */
scb->command = IM_DEVICE_INQUIRY_CMD | IM_NO_DISCONNECT;
scb->enable = IM_REPORT_TSB_ONLY_ON_ERROR | IM_READ_CONTROL | IM_SUPRESS_EXCEPTION_SHORT | IM_RETRY_ENABLE | IM_BYPASS_BUFFER;
last_scsi_command(shpnt)[ldn] = IM_DEVICE_INQUIRY_CMD;
last_scsi_type(shpnt)[ldn] = IM_SCB;
scb->sys_buf_adr = isa_virt_to_bus(buf);
scb->sys_buf_length = 255; /* maximum bufferlength gives max info */
scb->tsb_adr = isa_virt_to_bus(tsb);
/* issue scb to passed ldn, and busy wait for interrupt */
got_interrupt(shpnt) = 0;
issue_cmd(shpnt, isa_virt_to_bus(scb), IM_SCB | ldn);
while (!got_interrupt(shpnt))
barrier();
/*if command successful, break */
if ((stat_result(shpnt) == IM_SCB_CMD_COMPLETED) || (stat_result(shpnt) == IM_SCB_CMD_COMPLETED_WITH_RETRIES))
return 1;
}
/*if all three retries failed, return "no device at this ldn" */
if (retr >= 3)
return 0;
else
return 1;
}
static int read_capacity(struct Scsi_Host *shpnt, int ldn)
{
int retr;
struct im_scb *scb;
struct im_tsb *tsb;
unsigned char *buf;
scb = &(ld(shpnt)[ldn].scb);
tsb = &(ld(shpnt)[ldn].tsb);
buf = (unsigned char *) (&(ld(shpnt)[ldn].buf));
ld(shpnt)[ldn].tsb.dev_status = 0;
for (retr = 0; retr < 3; retr++) {
/*fill scb with read capacity command */
scb->command = IM_READ_CAPACITY_CMD;
scb->enable = IM_REPORT_TSB_ONLY_ON_ERROR | IM_READ_CONTROL | IM_RETRY_ENABLE | IM_BYPASS_BUFFER;
last_scsi_command(shpnt)[ldn] = IM_READ_CAPACITY_CMD;
last_scsi_type(shpnt)[ldn] = IM_SCB;
scb->sys_buf_adr = isa_virt_to_bus(buf);
scb->sys_buf_length = 8;
scb->tsb_adr = isa_virt_to_bus(tsb);
/*issue scb to passed ldn, and busy wait for interrupt */
got_interrupt(shpnt) = 0;
issue_cmd(shpnt, isa_virt_to_bus(scb), IM_SCB | ldn);
while (!got_interrupt(shpnt))
barrier();
/*if got capacity, get block length and return one device found */
if ((stat_result(shpnt) == IM_SCB_CMD_COMPLETED) || (stat_result(shpnt) == IM_SCB_CMD_COMPLETED_WITH_RETRIES))
return 1;
}
/*if all three retries failed, return "no device at this ldn" */
if (retr >= 3)
return 0;
else
return 1;
}
static int get_pos_info(struct Scsi_Host *shpnt)
{
int retr;
struct im_scb *scb;
struct im_tsb *tsb;
unsigned char *buf;
scb = &(ld(shpnt)[MAX_LOG_DEV].scb);
tsb = &(ld(shpnt)[MAX_LOG_DEV].tsb);
buf = (unsigned char *) (&(ld(shpnt)[MAX_LOG_DEV].buf));
ld(shpnt)[MAX_LOG_DEV].tsb.dev_status = 0;
for (retr = 0; retr < 3; retr++) {
/*fill scb with get_pos_info command */
scb->command = IM_GET_POS_INFO_CMD;
scb->enable = IM_READ_CONTROL | IM_REPORT_TSB_ONLY_ON_ERROR | IM_RETRY_ENABLE | IM_BYPASS_BUFFER;
last_scsi_command(shpnt)[MAX_LOG_DEV] = IM_GET_POS_INFO_CMD;
last_scsi_type(shpnt)[MAX_LOG_DEV] = IM_SCB;
scb->sys_buf_adr = isa_virt_to_bus(buf);
if (special(shpnt) == IBM_SCSI2_FW)
scb->sys_buf_length = 256; /* get all info from F/W adapter */
else
scb->sys_buf_length = 18; /* get exactly 18 bytes for other SCSI */
scb->tsb_adr = isa_virt_to_bus(tsb);
/*issue scb to ldn=15, and busy wait for interrupt */
got_interrupt(shpnt) = 0;
issue_cmd(shpnt, isa_virt_to_bus(scb), IM_SCB | MAX_LOG_DEV);
/* FIXME: timeout */
while (!got_interrupt(shpnt))
barrier();
/*if got POS-stuff, get block length and return one device found */
if ((stat_result(shpnt) == IM_SCB_CMD_COMPLETED) || (stat_result(shpnt) == IM_SCB_CMD_COMPLETED_WITH_RETRIES))
return 1;
}
/* if all three retries failed, return "no device at this ldn" */
if (retr >= 3)
return 0;
else
return 1;
}
/* SCSI-immediate-command for assign. This functions maps/unmaps specific
ldn-numbers on SCSI (PUN,LUN). It is needed for presetting of the
subsystem and for dynamical remapping od ldns. */
static int immediate_assign(struct Scsi_Host *shpnt, unsigned int pun,
unsigned int lun, unsigned int ldn,
unsigned int operation)
{
int retr;
unsigned long imm_cmd;
for (retr = 0; retr < 3; retr++) {
/* select mutation level of the SCSI-adapter */
switch (special(shpnt)) {
case IBM_SCSI2_FW:
imm_cmd = (unsigned long) (IM_ASSIGN_IMM_CMD);
imm_cmd |= (unsigned long) ((lun & 7) << 24);
imm_cmd |= (unsigned long) ((operation & 1) << 23);
imm_cmd |= (unsigned long) ((pun & 7) << 20) | ((pun & 8) << 24);
imm_cmd |= (unsigned long) ((ldn & 15) << 16);
break;
default:
imm_cmd = inl(IM_CMD_REG(shpnt));
imm_cmd &= (unsigned long) (0xF8000000); /* keep reserved bits */
imm_cmd |= (unsigned long) (IM_ASSIGN_IMM_CMD);
imm_cmd |= (unsigned long) ((lun & 7) << 24);
imm_cmd |= (unsigned long) ((operation & 1) << 23);
imm_cmd |= (unsigned long) ((pun & 7) << 20);
imm_cmd |= (unsigned long) ((ldn & 15) << 16);
break;
}
last_scsi_command(shpnt)[MAX_LOG_DEV] = IM_ASSIGN_IMM_CMD;
last_scsi_type(shpnt)[MAX_LOG_DEV] = IM_IMM_CMD;
got_interrupt(shpnt) = 0;
issue_cmd(shpnt, (unsigned long) (imm_cmd), IM_IMM_CMD | MAX_LOG_DEV);
while (!got_interrupt(shpnt))
barrier();
/*if command successful, break */
if (stat_result(shpnt) == IM_IMMEDIATE_CMD_COMPLETED)
return 1;
}
if (retr >= 3)
return 0;
else
return 1;
}
static int immediate_feature(struct Scsi_Host *shpnt, unsigned int speed, unsigned int timeout)
{
int retr;
unsigned long imm_cmd;
for (retr = 0; retr < 3; retr++) {
/* select mutation level of the SCSI-adapter */
imm_cmd = IM_FEATURE_CTR_IMM_CMD;
imm_cmd |= (unsigned long) ((speed & 0x7) << 29);
imm_cmd |= (unsigned long) ((timeout & 0x1fff) << 16);
last_scsi_command(shpnt)[MAX_LOG_DEV] = IM_FEATURE_CTR_IMM_CMD;
last_scsi_type(shpnt)[MAX_LOG_DEV] = IM_IMM_CMD;
got_interrupt(shpnt) = 0;
/* we need to run into command errors in order to probe for the
* right speed! */
global_command_error_excuse = 1;
issue_cmd(shpnt, (unsigned long) (imm_cmd), IM_IMM_CMD | MAX_LOG_DEV);
/* FIXME: timeout */
while (!got_interrupt(shpnt))
barrier();
if (global_command_error_excuse == CMD_FAIL) {
global_command_error_excuse = 0;
return 2;
} else
global_command_error_excuse = 0;
/*if command successful, break */
if (stat_result(shpnt) == IM_IMMEDIATE_CMD_COMPLETED)
return 1;
}
if (retr >= 3)
return 0;
else
return 1;
}
#ifdef CONFIG_IBMMCA_SCSI_DEV_RESET
static int immediate_reset(struct Scsi_Host *shpnt, unsigned int ldn)
{
int retries;
int ticks;
unsigned long imm_command;
for (retries = 0; retries < 3; retries++) {
imm_command = inl(IM_CMD_REG(shpnt));
imm_command &= (unsigned long) (0xFFFF0000); /* keep reserved bits */
imm_command |= (unsigned long) (IM_RESET_IMM_CMD);
last_scsi_command(shpnt)[ldn] = IM_RESET_IMM_CMD;
last_scsi_type(shpnt)[ldn] = IM_IMM_CMD;
got_interrupt(shpnt) = 0;
reset_status(shpnt) = IM_RESET_IN_PROGRESS;
issue_cmd(shpnt, (unsigned long) (imm_command), IM_IMM_CMD | ldn);
ticks = IM_RESET_DELAY * HZ;
while (reset_status(shpnt) == IM_RESET_IN_PROGRESS && --ticks) {
udelay((1 + 999 / HZ) * 1000);
barrier();
}
/* if reset did not complete, just complain */
if (!ticks) {
printk(KERN_ERR "IBM MCA SCSI: reset did not complete within %d seconds.\n", IM_RESET_DELAY);
reset_status(shpnt) = IM_RESET_FINISHED_OK;
/* did not work, finish */
return 1;
}
/*if command successful, break */
if (stat_result(shpnt) == IM_IMMEDIATE_CMD_COMPLETED)
return 1;
}
if (retries >= 3)
return 0;
else
return 1;
}
#endif
/* type-interpreter for physical device numbers */
static char *ti_p(int dev)
{
switch (dev) {
case TYPE_IBM_SCSI_ADAPTER:
return ("A");
case TYPE_DISK:
return ("D");
case TYPE_TAPE:
return ("T");
case TYPE_PROCESSOR:
return ("P");
case TYPE_WORM:
return ("W");
case TYPE_ROM:
return ("R");
case TYPE_SCANNER:
return ("S");
case TYPE_MOD:
return ("M");
case TYPE_MEDIUM_CHANGER:
return ("C");
case TYPE_NO_LUN:
return ("+"); /* show NO_LUN */
}
return ("-"); /* TYPE_NO_DEVICE and others */
}
/* interpreter for logical device numbers (ldn) */
static char *ti_l(int val)
{
const char hex[16] = "0123456789abcdef";
static char answer[2];
answer[1] = (char) (0x0);
if (val <= MAX_LOG_DEV)
answer[0] = hex[val];
else
answer[0] = '-';
return (char *) &answer;
}
/* transfers bitpattern of the feature command to values in MHz */
static char *ibmrate(unsigned int speed, int i)
{
switch (speed) {
case 0:
return i ? "5.00" : "10.00";
case 1:
return i ? "4.00" : "8.00";
case 2:
return i ? "3.33" : "6.66";
case 3:
return i ? "2.86" : "5.00";
case 4:
return i ? "2.50" : "4.00";
case 5:
return i ? "2.22" : "3.10";
case 6:
return i ? "2.00" : "2.50";
case 7:
return i ? "1.82" : "2.00";
}
return "---";
}
static int probe_display(int what)
{
static int rotator = 0;
const char rotor[] = "|/-\\";
if (!(display_mode & LED_DISP))
return 0;
if (!what) {
outl(0x20202020, MOD95_LED_PORT);
outl(0x20202020, MOD95_LED_PORT + 4);
} else {
outb('S', MOD95_LED_PORT + 7);
outb('C', MOD95_LED_PORT + 6);
outb('S', MOD95_LED_PORT + 5);
outb('I', MOD95_LED_PORT + 4);
outb('i', MOD95_LED_PORT + 3);
outb('n', MOD95_LED_PORT + 2);
outb('i', MOD95_LED_PORT + 1);
outb((char) (rotor[rotator]), MOD95_LED_PORT);
rotator++;
if (rotator > 3)
rotator = 0;
}
return 0;
}
static int probe_bus_mode(struct Scsi_Host *shpnt)
{
struct im_pos_info *info;
int num_bus = 0;
int ldn;
info = (struct im_pos_info *) (&(ld(shpnt)[MAX_LOG_DEV].buf));
if (get_pos_info(shpnt)) {
if (info->connector_size & 0xf000)
subsystem_connector_size(shpnt) = 16;
else
subsystem_connector_size(shpnt) = 32;
num_bus |= (info->pos_4b & 8) >> 3;
for (ldn = 0; ldn <= MAX_LOG_DEV; ldn++) {
if ((special(shpnt) == IBM_SCSI_WCACHE) || (special(shpnt) == IBM_7568_WCACHE)) {
if (!((info->cache_stat >> ldn) & 1))
ld(shpnt)[ldn].cache_flag = 0;
}
if (!((info->retry_stat >> ldn) & 1))
ld(shpnt)[ldn].retry_flag = 0;
}
#ifdef IM_DEBUG_PROBE
printk("IBM MCA SCSI: SCSI-Cache bits: ");
for (ldn = 0; ldn <= MAX_LOG_DEV; ldn++) {
printk("%d", ld(shpnt)[ldn].cache_flag);
}
printk("\nIBM MCA SCSI: SCSI-Retry bits: ");
for (ldn = 0; ldn <= MAX_LOG_DEV; ldn++) {
printk("%d", ld(shpnt)[ldn].retry_flag);
}
printk("\n");
#endif
}
return num_bus;
}
/* probing scsi devices */
static void check_devices(struct Scsi_Host *shpnt, int adaptertype)
{
int id, lun, ldn, ticks;
int count_devices; /* local counter for connected device */
int max_pun;
int num_bus;
int speedrun; /* local adapter_speed check variable */
/* assign default values to certain variables */
ticks = 0;
count_devices = 0;
IBM_DS(shpnt).dyn_flag = 0; /* normally no need for dynamical ldn management */
IBM_DS(shpnt).total_errors = 0; /* set errorcounter to 0 */
next_ldn(shpnt) = 7; /* next ldn to be assigned is 7, because 0-6 is 'hardwired' */
/* initialize the very important driver-informational arrays/structs */
memset(ld(shpnt), 0, sizeof(ld(shpnt)));
for (ldn = 0; ldn <= MAX_LOG_DEV; ldn++) {
last_scsi_command(shpnt)[ldn] = NO_SCSI; /* emptify last SCSI-command storage */
last_scsi_type(shpnt)[ldn] = 0;
ld(shpnt)[ldn].cache_flag = 1;
ld(shpnt)[ldn].retry_flag = 1;
}
memset(get_ldn(shpnt), TYPE_NO_DEVICE, sizeof(get_ldn(shpnt))); /* this is essential ! */
memset(get_scsi(shpnt), TYPE_NO_DEVICE, sizeof(get_scsi(shpnt))); /* this is essential ! */
for (lun = 0; lun < 8; lun++) {
/* mark the adapter at its pun on all luns */
get_scsi(shpnt)[subsystem_pun(shpnt)][lun] = TYPE_IBM_SCSI_ADAPTER;
get_ldn(shpnt)[subsystem_pun(shpnt)][lun] = MAX_LOG_DEV; /* make sure, the subsystem
ldn is active for all
luns. */
}
probe_display(0); /* Supercool display usage during SCSI-probing. */
/* This makes sense, when booting without any */
/* monitor connected on model XX95. */
/* STEP 1: */
adapter_speed(shpnt) = global_adapter_speed;
speedrun = adapter_speed(shpnt);
while (immediate_feature(shpnt, speedrun, adapter_timeout) == 2) {
probe_display(1);
if (speedrun == 7)
panic("IBM MCA SCSI: Cannot set Synchronous-Transfer-Rate!\n");
speedrun++;
if (speedrun > 7)
speedrun = 7;
}
adapter_speed(shpnt) = speedrun;
/* Get detailed information about the current adapter, necessary for
* device operations: */
num_bus = probe_bus_mode(shpnt);
/* num_bus contains only valid data for the F/W adapter! */
if (adaptertype == IBM_SCSI2_FW) { /* F/W SCSI adapter: */
/* F/W adapter PUN-space extension evaluation: */
if (num_bus) {
printk(KERN_INFO "IBM MCA SCSI: Separate bus mode (wide-addressing enabled)\n");
subsystem_maxid(shpnt) = 16;
} else {
printk(KERN_INFO "IBM MCA SCSI: Combined bus mode (wide-addressing disabled)\n");
subsystem_maxid(shpnt) = 8;
}
printk(KERN_INFO "IBM MCA SCSI: Sync.-Rate (F/W: 20, Int.: 10, Ext.: %s) MBytes/s\n", ibmrate(speedrun, adaptertype));
} else /* all other IBM SCSI adapters: */
printk(KERN_INFO "IBM MCA SCSI: Synchronous-SCSI-Transfer-Rate: %s MBytes/s\n", ibmrate(speedrun, adaptertype));
/* assign correct PUN device space */
max_pun = subsystem_maxid(shpnt);
#ifdef IM_DEBUG_PROBE
printk("IBM MCA SCSI: Current SCSI-host index: %d\n", shpnt);
printk("IBM MCA SCSI: Removing default logical SCSI-device mapping.");
#else
printk(KERN_INFO "IBM MCA SCSI: Dev. Order: %s, Mapping (takes <2min): ", (ibm_ansi_order) ? "ANSI" : "New");
#endif
for (ldn = 0; ldn < MAX_LOG_DEV; ldn++) {
probe_display(1);
#ifdef IM_DEBUG_PROBE
printk(".");
#endif
immediate_assign(shpnt, 0, 0, ldn, REMOVE_LDN); /* remove ldn (wherever) */
}
lun = 0; /* default lun is 0 */
#ifndef IM_DEBUG_PROBE
printk("cleared,");
#endif
/* STEP 2: */
#ifdef IM_DEBUG_PROBE
printk("\nIBM MCA SCSI: Scanning SCSI-devices.");
#endif
for (id = 0; id < max_pun; id++)
#ifdef CONFIG_SCSI_MULTI_LUN
for (lun = 0; lun < 8; lun++)
#endif
{
probe_display(1);
#ifdef IM_DEBUG_PROBE
printk(".");
#endif
if (id != subsystem_pun(shpnt)) {
/* if pun is not the adapter: */
/* set ldn=0 to pun,lun */
immediate_assign(shpnt, id, lun, PROBE_LDN, SET_LDN);
if (device_inquiry(shpnt, PROBE_LDN)) { /* probe device */
get_scsi(shpnt)[id][lun] = (unsigned char) (ld(shpnt)[PROBE_LDN].buf[0]);
/* entry, even for NO_LUN */
if (ld(shpnt)[PROBE_LDN].buf[0] != TYPE_NO_LUN)
count_devices++; /* a existing device is found */
}
/* remove ldn */
immediate_assign(shpnt, id, lun, PROBE_LDN, REMOVE_LDN);
}
}
#ifndef IM_DEBUG_PROBE
printk("scanned,");
#endif
/* STEP 3: */
#ifdef IM_DEBUG_PROBE
printk("\nIBM MCA SCSI: Mapping SCSI-devices.");
#endif
ldn = 0;
lun = 0;
#ifdef CONFIG_SCSI_MULTI_LUN
for (lun = 0; lun < 8 && ldn < MAX_LOG_DEV; lun++)
#endif
for (id = 0; id < max_pun && ldn < MAX_LOG_DEV; id++) {
probe_display(1);
#ifdef IM_DEBUG_PROBE
printk(".");
#endif
if (id != subsystem_pun(shpnt)) {
if (get_scsi(shpnt)[id][lun] != TYPE_NO_LUN && get_scsi(shpnt)[id][lun] != TYPE_NO_DEVICE) {
/* Only map if accepted type. Always enter for
lun == 0 to get no gaps into ldn-mapping for ldn<7. */
immediate_assign(shpnt, id, lun, ldn, SET_LDN);
get_ldn(shpnt)[id][lun] = ldn; /* map ldn */
if (device_exists(shpnt, ldn, &ld(shpnt)[ldn].block_length, &ld(shpnt)[ldn].device_type)) {
#ifdef CONFIG_IBMMCA_SCSI_DEV_RESET
printk("resetting device at ldn=%x ... ", ldn);
immediate_reset(shpnt, ldn);
#endif
ldn++;
} else {
/* device vanished, probably because we don't know how to
* handle it or because it has problems */
if (lun > 0) {
/* remove mapping */
get_ldn(shpnt)[id][lun] = TYPE_NO_DEVICE;
immediate_assign(shpnt, 0, 0, ldn, REMOVE_LDN);
} else
ldn++;
}
} else if (lun == 0) {
/* map lun == 0, even if no device exists */
immediate_assign(shpnt, id, lun, ldn, SET_LDN);
get_ldn(shpnt)[id][lun] = ldn; /* map ldn */
ldn++;
}
}
}
/* STEP 4: */
/* map remaining ldns to non-existing devices */
for (lun = 1; lun < 8 && ldn < MAX_LOG_DEV; lun++)
for (id = 0; id < max_pun && ldn < MAX_LOG_DEV; id++) {
if (get_scsi(shpnt)[id][lun] == TYPE_NO_LUN || get_scsi(shpnt)[id][lun] == TYPE_NO_DEVICE) {
probe_display(1);
/* Map remaining ldns only to NON-existing pun,lun
combinations to make sure an inquiry will fail.
For MULTI_LUN, it is needed to avoid adapter autonome
SCSI-remapping. */
immediate_assign(shpnt, id, lun, ldn, SET_LDN);
get_ldn(shpnt)[id][lun] = ldn;
ldn++;
}
}
#ifndef IM_DEBUG_PROBE
printk("mapped.");
#endif
printk("\n");
#ifdef IM_DEBUG_PROBE
if (ibm_ansi_order)
printk("IBM MCA SCSI: Device order: IBM/ANSI (pun=7 is first).\n");
else
printk("IBM MCA SCSI: Device order: New Industry Standard (pun=0 is first).\n");
#endif
#ifdef IM_DEBUG_PROBE
/* Show the physical and logical mapping during boot. */
printk("IBM MCA SCSI: Determined SCSI-device-mapping:\n");
printk(" Physical SCSI-Device Map Logical SCSI-Device Map\n");
printk("ID\\LUN 0 1 2 3 4 5 6 7 ID\\LUN 0 1 2 3 4 5 6 7\n");
for (id = 0; id < max_pun; id++) {
printk("%2d ", id);
for (lun = 0; lun < 8; lun++)
printk("%2s ", ti_p(get_scsi(shpnt)[id][lun]));
printk(" %2d ", id);
for (lun = 0; lun < 8; lun++)
printk("%2s ", ti_l(get_ldn(shpnt)[id][lun]));
printk("\n");
}
#endif
/* assign total number of found SCSI-devices to the statistics struct */
IBM_DS(shpnt).total_scsi_devices = count_devices;
/* decide for output in /proc-filesystem, if the configuration of
SCSI-devices makes dynamical reassignment of devices necessary */
if (count_devices >= MAX_LOG_DEV)
IBM_DS(shpnt).dyn_flag = 1; /* dynamical assignment is necessary */
else
IBM_DS(shpnt).dyn_flag = 0; /* dynamical assignment is not necessary */
/* If no SCSI-devices are assigned, return 1 in order to cause message. */
if (ldn == 0)
printk("IBM MCA SCSI: Warning: No SCSI-devices found/assigned!\n");
/* reset the counters for statistics on the current adapter */
IBM_DS(shpnt).scbs = 0;
IBM_DS(shpnt).long_scbs = 0;
IBM_DS(shpnt).total_accesses = 0;
IBM_DS(shpnt).total_interrupts = 0;
IBM_DS(shpnt).dynamical_assignments = 0;
memset(IBM_DS(shpnt).ldn_access, 0x0, sizeof(IBM_DS(shpnt).ldn_access));
memset(IBM_DS(shpnt).ldn_read_access, 0x0, sizeof(IBM_DS(shpnt).ldn_read_access));
memset(IBM_DS(shpnt).ldn_write_access, 0x0, sizeof(IBM_DS(shpnt).ldn_write_access));
memset(IBM_DS(shpnt).ldn_inquiry_access, 0x0, sizeof(IBM_DS(shpnt).ldn_inquiry_access));
memset(IBM_DS(shpnt).ldn_modeselect_access, 0x0, sizeof(IBM_DS(shpnt).ldn_modeselect_access));
memset(IBM_DS(shpnt).ldn_assignments, 0x0, sizeof(IBM_DS(shpnt).ldn_assignments));
probe_display(0);
return;
}
static int device_exists(struct Scsi_Host *shpnt, int ldn, int *block_length, int *device_type)
{
unsigned char *buf;
/* if no valid device found, return immediately with 0 */
if (!(device_inquiry(shpnt, ldn)))
return 0;
buf = (unsigned char *) (&(ld(shpnt)[ldn].buf));
if (*buf == TYPE_ROM) {
*device_type = TYPE_ROM;
*block_length = 2048; /* (standard blocksize for yellow-/red-book) */
return 1;
}
if (*buf == TYPE_WORM) {
*device_type = TYPE_WORM;
*block_length = 2048;
return 1;
}
if (*buf == TYPE_DISK) {
*device_type = TYPE_DISK;
if (read_capacity(shpnt, ldn)) {
*block_length = *(buf + 7) + (*(buf + 6) << 8) + (*(buf + 5) << 16) + (*(buf + 4) << 24);
return 1;
} else
return 0;
}
if (*buf == TYPE_MOD) {
*device_type = TYPE_MOD;
if (read_capacity(shpnt, ldn)) {
*block_length = *(buf + 7) + (*(buf + 6) << 8) + (*(buf + 5) << 16) + (*(buf + 4) << 24);
return 1;
} else
return 0;
}
if (*buf == TYPE_TAPE) {
*device_type = TYPE_TAPE;
*block_length = 0; /* not in use (setting by mt and mtst in op.) */
return 1;
}
if (*buf == TYPE_PROCESSOR) {
*device_type = TYPE_PROCESSOR;
*block_length = 0; /* they set their stuff on drivers */
return 1;
}
if (*buf == TYPE_SCANNER) {
*device_type = TYPE_SCANNER;
*block_length = 0; /* they set their stuff on drivers */
return 1;
}
if (*buf == TYPE_MEDIUM_CHANGER) {
*device_type = TYPE_MEDIUM_CHANGER;
*block_length = 0; /* One never knows, what to expect on a medium
changer device. */
return 1;
}
return 0;
}
static void internal_ibmmca_scsi_setup(char *str, int *ints)
{
int i, j, io_base, id_base;
char *token;
io_base = 0;
id_base = 0;
if (str) {
j = 0;
while ((token = strsep(&str, ",")) != NULL) {
if (!strcmp(token, "activity"))
display_mode |= LED_ACTIVITY;
if (!strcmp(token, "display"))
display_mode |= LED_DISP;
if (!strcmp(token, "adisplay"))
display_mode |= LED_ADISP;
if (!strcmp(token, "normal"))
ibm_ansi_order = 0;
if (!strcmp(token, "ansi"))
ibm_ansi_order = 1;
if (!strcmp(token, "fast"))
global_adapter_speed = 0;
if (!strcmp(token, "medium"))
global_adapter_speed = 4;
if (!strcmp(token, "slow"))
global_adapter_speed = 7;
if ((*token == '-') || (isdigit(*token))) {
if (!(j % 2) && (io_base < IM_MAX_HOSTS))
io_port[io_base++] = simple_strtoul(token, NULL, 0);
if ((j % 2) && (id_base < IM_MAX_HOSTS))
scsi_id[id_base++] = simple_strtoul(token, NULL, 0);
j++;
}
}
} else if (ints) {
for (i = 0; i < IM_MAX_HOSTS && 2 * i + 2 < ints[0]; i++) {
io_port[i] = ints[2 * i + 2];
scsi_id[i] = ints[2 * i + 2];
}
}
return;
}
#if 0
FIXME NEED TO MOVE TO SYSFS
static int ibmmca_getinfo(char *buf, int slot, void *dev_id)
{
struct Scsi_Host *shpnt;
int len, speciale, connectore, k;
unsigned int pos[8];
unsigned long flags;
struct Scsi_Host *dev = dev_id;
spin_lock_irqsave(dev->host_lock, flags);
shpnt = dev; /* assign host-structure to local pointer */
len = 0; /* set filled text-buffer index to 0 */
/* get the _special contents of the hostdata structure */
speciale = ((struct ibmmca_hostdata *) shpnt->hostdata)->_special;
connectore = ((struct ibmmca_hostdata *) shpnt->hostdata)->_connector_size;
for (k = 2; k < 4; k++)
pos[k] = ((struct ibmmca_hostdata *) shpnt->hostdata)->_pos[k];
if (speciale == FORCED_DETECTION) { /* forced detection */
len += sprintf(buf + len,
"Adapter category: forced detected\n" "***************************************\n" "*** Forced detected SCSI Adapter ***\n" "*** No chip-information available ***\n" "***************************************\n");
} else if (speciale == INTEGRATED_SCSI) {
/* if the integrated subsystem has been found automatically: */
len += sprintf(buf + len,
"Adapter category: integrated\n" "Chip revision level: %d\n" "Chip status: %s\n" "8 kByte NVRAM status: %s\n", ((pos[2] & 0xf0) >> 4), (pos[2] & 1) ? "enabled" : "disabled", (pos[2] & 2) ? "locked" : "accessible");
} else if ((speciale >= 0) && (speciale < ARRAY_SIZE(subsys_list))) {
/* if the subsystem is a slot adapter */
len += sprintf(buf + len, "Adapter category: slot-card\n" "ROM Segment Address: ");
if ((pos[2] & 0xf0) == 0xf0)
len += sprintf(buf + len, "off\n");
else
len += sprintf(buf + len, "0x%x\n", ((pos[2] & 0xf0) << 13) + 0xc0000);
len += sprintf(buf + len, "Chip status: %s\n", (pos[2] & 1) ? "enabled" : "disabled");
len += sprintf(buf + len, "Adapter I/O Offset: 0x%x\n", ((pos[2] & 0x0e) << 2));
} else {
len += sprintf(buf + len, "Adapter category: unknown\n");
}
/* common subsystem information to write to the slotn file */
len += sprintf(buf + len, "Subsystem PUN: %d\n", shpnt->this_id);
len += sprintf(buf + len, "I/O base address range: 0x%x-0x%x\n", (unsigned int) (shpnt->io_port), (unsigned int) (shpnt->io_port + 7));
len += sprintf(buf + len, "MCA-slot size: %d bits", connectore);
/* Now make sure, the bufferlength is devidable by 4 to avoid
* paging problems of the buffer. */
while (len % sizeof(int) != (sizeof(int) - 1))
len += sprintf(buf + len, " ");
len += sprintf(buf + len, "\n");
spin_unlock_irqrestore(shpnt->host_lock, flags);
return len;
}
#endif
static struct scsi_host_template ibmmca_driver_template = {
.proc_name = "ibmmca",
.proc_info = ibmmca_proc_info,
.name = "IBM SCSI-Subsystem",
.queuecommand = ibmmca_queuecommand,
.eh_abort_handler = ibmmca_abort,
.eh_host_reset_handler = ibmmca_host_reset,
.bios_param = ibmmca_biosparam,
.can_queue = 16,
.this_id = 7,
.sg_tablesize = 16,
.cmd_per_lun = 1,
.use_clustering = ENABLE_CLUSTERING,
};
static int ibmmca_probe(struct device *dev)
{
struct Scsi_Host *shpnt;
int port, id, i, j, k, irq, enabled, ret = -EINVAL;
struct mca_device *mca_dev = to_mca_device(dev);
const char *description = ibmmca_description[mca_dev->index];
/* First of all, print the version number of the driver. This is
* important to allow better user bugreports in case of already
* having problems with the MCA_bus probing. */
printk(KERN_INFO "IBM MCA SCSI: Version %s\n", IBMMCA_SCSI_DRIVER_VERSION);
/* The POS2-register of all PS/2 model SCSI-subsystems has the following
* interpretation of bits:
* Bit 7 - 4 : Chip Revision ID (Release)
* Bit 3 - 2 : Reserved
* Bit 1 : 8k NVRAM Disabled
* Bit 0 : Chip Enable (EN-Signal)
* The POS3-register is interpreted as follows:
* Bit 7 - 5 : SCSI ID
* Bit 4 : Reserved = 0
* Bit 3 - 0 : Reserved = 0
* (taken from "IBM, PS/2 Hardware Interface Technical Reference, Common
* Interfaces (1991)").
* In short words, this means, that IBM PS/2 machines only support
* 1 single subsystem by default. The slot-adapters must have another
* configuration on pos2. Here, one has to assume the following
* things for POS2-register:
* Bit 7 - 4 : Chip Revision ID (Release)
* Bit 3 - 1 : port offset factor
* Bit 0 : Chip Enable (EN-Signal)
* As I found a patch here, setting the IO-registers to 0x3540 forced,
* as there was a 0x05 in POS2 on a model 56, I assume, that the
* port 0x3540 must be fix for integrated SCSI-controllers.
* Ok, this discovery leads to the following implementation: (M.Lang) */
/* first look for the IBM SCSI integrated subsystem on the motherboard */
for (j = 0; j < 8; j++) /* read the pos-information */
pos[j] = mca_device_read_pos(mca_dev, j);
id = (pos[3] & 0xe0) >> 5; /* this is correct and represents the PUN */
enabled = (pos[2] &0x01);
if (!enabled) {
printk(KERN_WARNING "IBM MCA SCSI: WARNING - Your SCSI-subsystem is disabled!\n");
printk(KERN_WARNING " SCSI-operations may not work.\n");
}
/* pos2 = pos3 = 0xff if there is no integrated SCSI-subsystem present, but
* if we ignore the settings of all surrounding pos registers, it is not
* completely sufficient to only check pos2 and pos3. */
/* Therefore, now the following if statement is used to
* make sure, we see a real integrated onboard SCSI-interface and no
* internal system information, which gets mapped to some pos registers
* on models 95xx. */
if (mca_dev->slot == MCA_INTEGSCSI &&
((!pos[0] && !pos[1] && pos[2] > 0 &&
pos[3] > 0 && !pos[4] && !pos[5] &&
!pos[6] && !pos[7]) ||
(pos[0] == 0xff && pos[1] == 0xff &&
pos[2] < 0xff && pos[3] < 0xff &&
pos[4] == 0xff && pos[5] == 0xff &&
pos[6] == 0xff && pos[7] == 0xff))) {
irq = IM_IRQ;
port = IM_IO_PORT;
} else {
irq = IM_IRQ;
port = IM_IO_PORT + ((pos[2] &0x0e) << 2);
if ((mca_dev->index == IBM_SCSI2_FW) && (pos[6] != 0)) {
printk(KERN_ERR "IBM MCA SCSI: ERROR - Wrong POS(6)-register setting!\n");
printk(KERN_ERR " Impossible to determine adapter PUN!\n");
printk(KERN_ERR " Guessing adapter PUN = 7.\n");
id = 7;
} else {
id = (pos[3] & 0xe0) >> 5; /* get subsystem PUN */
if (mca_dev->index == IBM_SCSI2_FW) {
id |= (pos[3] & 0x10) >> 1; /* get subsystem PUN high-bit
* for F/W adapters */
}
}
if ((mca_dev->index == IBM_SCSI2_FW) &&
(pos[4] & 0x01) && (pos[6] == 0)) {
/* IRQ11 is used by SCSI-2 F/W Adapter/A */
printk(KERN_DEBUG "IBM MCA SCSI: SCSI-2 F/W adapter needs IRQ 11.\n");
irq = IM_IRQ_FW;
}
}
/* give detailed information on the subsystem. This helps me
* additionally during debugging and analyzing bug-reports. */
printk(KERN_INFO "IBM MCA SCSI: %s found, io=0x%x, scsi id=%d,\n",
description, port, id);
if (mca_dev->slot == MCA_INTEGSCSI)
printk(KERN_INFO " chip rev.=%d, 8K NVRAM=%s, subsystem=%s\n", ((pos[2] & 0xf0) >> 4), (pos[2] & 2) ? "locked" : "accessible", (pos[2] & 1) ? "enabled." : "disabled.");
else {
if ((pos[2] & 0xf0) == 0xf0)
printk(KERN_DEBUG " ROM Addr.=off,");
else
printk(KERN_DEBUG " ROM Addr.=0x%x,", ((pos[2] & 0xf0) << 13) + 0xc0000);
printk(KERN_DEBUG " port-offset=0x%x, subsystem=%s\n", ((pos[2] & 0x0e) << 2), (pos[2] & 1) ? "enabled." : "disabled.");
}
/* check I/O region */
if (!request_region(port, IM_N_IO_PORT, description)) {
printk(KERN_ERR "IBM MCA SCSI: Unable to get I/O region 0x%x-0x%x (%d ports).\n", port, port + IM_N_IO_PORT - 1, IM_N_IO_PORT);
goto out_fail;
}
/* register host */
shpnt = scsi_host_alloc(&ibmmca_driver_template,
sizeof(struct ibmmca_hostdata));
if (!shpnt) {
printk(KERN_ERR "IBM MCA SCSI: Unable to register host.\n");
goto out_release;
}
dev_set_drvdata(dev, shpnt);
if(request_irq(irq, interrupt_handler, IRQF_SHARED, description, dev)) {
printk(KERN_ERR "IBM MCA SCSI: failed to request interrupt %d\n", irq);
goto out_free_host;
}
/* request I/O region */
special(shpnt) = mca_dev->index; /* important assignment or else crash! */
subsystem_connector_size(shpnt) = 0; /* preset slot-size */
shpnt->irq = irq; /* assign necessary stuff for the adapter */
shpnt->io_port = port;
shpnt->n_io_port = IM_N_IO_PORT;
shpnt->this_id = id;
shpnt->max_id = 8; /* 8 PUNs are default */
/* now, the SCSI-subsystem is connected to Linux */
#ifdef IM_DEBUG_PROBE
ctrl = (unsigned int) (inb(IM_CTR_REG(found))); /* get control-register status */
printk("IBM MCA SCSI: Control Register contents: %x, status: %x\n", ctrl, inb(IM_STAT_REG(found)));
printk("IBM MCA SCSI: This adapters' POS-registers: ");
for (i = 0; i < 8; i++)
printk("%x ", pos[i]);
printk("\n");
#endif
reset_status(shpnt) = IM_RESET_NOT_IN_PROGRESS;
for (i = 0; i < 16; i++) /* reset the tables */
for (j = 0; j < 8; j++)
get_ldn(shpnt)[i][j] = MAX_LOG_DEV;
/* check which logical devices exist */
/* after this line, local interrupting is possible: */
local_checking_phase_flag(shpnt) = 1;
check_devices(shpnt, mca_dev->index); /* call by value, using the global variable hosts */
local_checking_phase_flag(shpnt) = 0;
/* an ibm mca subsystem has been detected */
for (k = 2; k < 7; k++)
((struct ibmmca_hostdata *) shpnt->hostdata)->_pos[k] = pos[k];
((struct ibmmca_hostdata *) shpnt->hostdata)->_special = INTEGRATED_SCSI;
mca_device_set_name(mca_dev, description);
/* FIXME: NEED TO REPLUMB TO SYSFS
mca_set_adapter_procfn(MCA_INTEGSCSI, (MCA_ProcFn) ibmmca_getinfo, shpnt);
*/
mca_device_set_claim(mca_dev, 1);
if (scsi_add_host(shpnt, dev)) {
dev_printk(KERN_ERR, dev, "IBM MCA SCSI: scsi_add_host failed\n");
goto out_free_host;
}
scsi_scan_host(shpnt);
return 0;
out_free_host:
scsi_host_put(shpnt);
out_release:
release_region(port, IM_N_IO_PORT);
out_fail:
return ret;
}
static int __devexit ibmmca_remove(struct device *dev)
{
struct Scsi_Host *shpnt = dev_get_drvdata(dev);
scsi_remove_host(shpnt);
release_region(shpnt->io_port, shpnt->n_io_port);
free_irq(shpnt->irq, dev);
scsi_host_put(shpnt);
return 0;
}
/* The following routine is the SCSI command queue for the midlevel driver */
static int ibmmca_queuecommand_lck(Scsi_Cmnd * cmd, void (*done) (Scsi_Cmnd *))
{
unsigned int ldn;
unsigned int scsi_cmd;
struct im_scb *scb;
struct Scsi_Host *shpnt;
int current_ldn;
int id, lun;
int target;
int max_pun;
int i;
struct scatterlist *sg;
shpnt = cmd->device->host;
max_pun = subsystem_maxid(shpnt);
if (ibm_ansi_order) {
target = max_pun - 1 - cmd->device->id;
if ((target <= subsystem_pun(shpnt)) && (cmd->device->id <= subsystem_pun(shpnt)))
target--;
else if ((target >= subsystem_pun(shpnt)) && (cmd->device->id >= subsystem_pun(shpnt)))
target++;
} else
target = cmd->device->id;
/* if (target,lun) is NO LUN or not existing at all, return error */
if ((get_scsi(shpnt)[target][cmd->device->lun] == TYPE_NO_LUN) || (get_scsi(shpnt)[target][cmd->device->lun] == TYPE_NO_DEVICE)) {
cmd->result = DID_NO_CONNECT << 16;
if (done)
done(cmd);
return 0;
}
/*if (target,lun) unassigned, do further checks... */
ldn = get_ldn(shpnt)[target][cmd->device->lun];
if (ldn >= MAX_LOG_DEV) { /* on invalid ldn do special stuff */
if (ldn > MAX_LOG_DEV) { /* dynamical remapping if ldn unassigned */
current_ldn = next_ldn(shpnt); /* stop-value for one circle */
while (ld(shpnt)[next_ldn(shpnt)].cmd) { /* search for a occupied, but not in */
/* command-processing ldn. */
next_ldn(shpnt)++;
if (next_ldn(shpnt) >= MAX_LOG_DEV)
next_ldn(shpnt) = 7;
if (current_ldn == next_ldn(shpnt)) { /* One circle done ? */
/* no non-processing ldn found */
scmd_printk(KERN_WARNING, cmd,
"IBM MCA SCSI: Cannot assign SCSI-device dynamically!\n"
" On ldn 7-14 SCSI-commands everywhere in progress.\n"
" Reporting DID_NO_CONNECT for device.\n");
cmd->result = DID_NO_CONNECT << 16; /* return no connect */
if (done)
done(cmd);
return 0;
}
}
/* unmap non-processing ldn */
for (id = 0; id < max_pun; id++)
for (lun = 0; lun < 8; lun++) {
if (get_ldn(shpnt)[id][lun] == next_ldn(shpnt)) {
get_ldn(shpnt)[id][lun] = TYPE_NO_DEVICE;
get_scsi(shpnt)[id][lun] = TYPE_NO_DEVICE;
/* unmap entry */
}
}
/* set reduced interrupt_handler-mode for checking */
local_checking_phase_flag(shpnt) = 1;
/* map found ldn to pun,lun */
get_ldn(shpnt)[target][cmd->device->lun] = next_ldn(shpnt);
/* change ldn to the right value, that is now next_ldn */
ldn = next_ldn(shpnt);
/* unassign all ldns (pun,lun,ldn does not matter for remove) */
immediate_assign(shpnt, 0, 0, 0, REMOVE_LDN);
/* set only LDN for remapped device */
immediate_assign(shpnt, target, cmd->device->lun, ldn, SET_LDN);
/* get device information for ld[ldn] */
if (device_exists(shpnt, ldn, &ld(shpnt)[ldn].block_length, &ld(shpnt)[ldn].device_type)) {
ld(shpnt)[ldn].cmd = NULL; /* To prevent panic set 0, because
devices that were not assigned,
should have nothing in progress. */
get_scsi(shpnt)[target][cmd->device->lun] = ld(shpnt)[ldn].device_type;
/* increase assignment counters for statistics in /proc */
IBM_DS(shpnt).dynamical_assignments++;
IBM_DS(shpnt).ldn_assignments[ldn]++;
} else
/* panic here, because a device, found at boottime has
vanished */
panic("IBM MCA SCSI: ldn=0x%x, SCSI-device on (%d,%d) vanished!\n", ldn, target, cmd->device->lun);
/* unassign again all ldns (pun,lun,ldn does not matter for remove) */
immediate_assign(shpnt, 0, 0, 0, REMOVE_LDN);
/* remap all ldns, as written in the pun/lun table */
lun = 0;
#ifdef CONFIG_SCSI_MULTI_LUN
for (lun = 0; lun < 8; lun++)
#endif
for (id = 0; id < max_pun; id++) {
if (get_ldn(shpnt)[id][lun] <= MAX_LOG_DEV)
immediate_assign(shpnt, id, lun, get_ldn(shpnt)[id][lun], SET_LDN);
}
/* set back to normal interrupt_handling */
local_checking_phase_flag(shpnt) = 0;
#ifdef IM_DEBUG_PROBE
/* Information on syslog terminal */
printk("IBM MCA SCSI: ldn=0x%x dynamically reassigned to (%d,%d).\n", ldn, target, cmd->device->lun);
#endif
/* increase next_ldn for next dynamical assignment */
next_ldn(shpnt)++;
if (next_ldn(shpnt) >= MAX_LOG_DEV)
next_ldn(shpnt) = 7;
} else { /* wall against Linux accesses to the subsystem adapter */
cmd->result = DID_BAD_TARGET << 16;
if (done)
done(cmd);
return 0;
}
}
/*verify there is no command already in progress for this log dev */
if (ld(shpnt)[ldn].cmd)
panic("IBM MCA SCSI: cmd already in progress for this ldn.\n");
/*save done in cmd, and save cmd for the interrupt handler */
cmd->scsi_done = done;
ld(shpnt)[ldn].cmd = cmd;
/*fill scb information independent of the scsi command */
scb = &(ld(shpnt)[ldn].scb);
ld(shpnt)[ldn].tsb.dev_status = 0;
scb->enable = IM_REPORT_TSB_ONLY_ON_ERROR | IM_RETRY_ENABLE;
scb->tsb_adr = isa_virt_to_bus(&(ld(shpnt)[ldn].tsb));
scsi_cmd = cmd->cmnd[0];
if (scsi_sg_count(cmd)) {
BUG_ON(scsi_sg_count(cmd) > 16);
scsi_for_each_sg(cmd, sg, scsi_sg_count(cmd), i) {
ld(shpnt)[ldn].sge[i].address = (void *) (isa_page_to_bus(sg_page(sg)) + sg->offset);
ld(shpnt)[ldn].sge[i].byte_length = sg->length;
}
scb->enable |= IM_POINTER_TO_LIST;
scb->sys_buf_adr = isa_virt_to_bus(&(ld(shpnt)[ldn].sge[0]));
scb->sys_buf_length = scsi_sg_count(cmd) * sizeof(struct im_sge);
} else {
scb->sys_buf_adr = isa_virt_to_bus(scsi_sglist(cmd));
/* recent Linux midlevel SCSI places 1024 byte for inquiry
* command. Far too much for old PS/2 hardware. */
switch (scsi_cmd) {
/* avoid command errors by setting bufferlengths to
* ANSI-standard. Beware of forcing it to 255,
* this could SEGV the kernel!!! */
case INQUIRY:
case REQUEST_SENSE:
case MODE_SENSE:
case MODE_SELECT:
if (scsi_bufflen(cmd) > 255)
scb->sys_buf_length = 255;
else
scb->sys_buf_length = scsi_bufflen(cmd);
break;
case TEST_UNIT_READY:
scb->sys_buf_length = 0;
break;
default:
scb->sys_buf_length = scsi_bufflen(cmd);
break;
}
}
/*fill scb information dependent on scsi command */
#ifdef IM_DEBUG_CMD
printk("issue scsi cmd=%02x to ldn=%d\n", scsi_cmd, ldn);
#endif
/* for specific device-type debugging: */
#ifdef IM_DEBUG_CMD_SPEC_DEV
if (ld(shpnt)[ldn].device_type == IM_DEBUG_CMD_DEVICE)
printk("(SCSI-device-type=0x%x) issue scsi cmd=%02x to ldn=%d\n", ld(shpnt)[ldn].device_type, scsi_cmd, ldn);
#endif
/* for possible panics store current command */
last_scsi_command(shpnt)[ldn] = scsi_cmd;
last_scsi_type(shpnt)[ldn] = IM_SCB;
/* update statistical info */
IBM_DS(shpnt).total_accesses++;
IBM_DS(shpnt).ldn_access[ldn]++;
switch (scsi_cmd) {
case READ_6:
case WRITE_6:
case READ_10:
case WRITE_10:
case READ_12:
case WRITE_12:
/* Distinguish between disk and other devices. Only disks (that are the
most frequently accessed devices) should be supported by the
IBM-SCSI-Subsystem commands. */
switch (ld(shpnt)[ldn].device_type) {
case TYPE_DISK: /* for harddisks enter here ... */
case TYPE_MOD: /* ... try it also for MO-drives (send flames as */
/* you like, if this won't work.) */
if (scsi_cmd == READ_6 || scsi_cmd == READ_10 || scsi_cmd == READ_12) {
/* read command preparations */
scb->enable |= IM_READ_CONTROL;
IBM_DS(shpnt).ldn_read_access[ldn]++; /* increase READ-access on ldn stat. */
scb->command = IM_READ_DATA_CMD | IM_NO_DISCONNECT;
} else { /* write command preparations */
IBM_DS(shpnt).ldn_write_access[ldn]++; /* increase write-count on ldn stat. */
scb->command = IM_WRITE_DATA_CMD | IM_NO_DISCONNECT;
}
if (scsi_cmd == READ_6 || scsi_cmd == WRITE_6) {
scb->u1.log_blk_adr = (((unsigned) cmd->cmnd[3]) << 0) | (((unsigned) cmd->cmnd[2]) << 8) | ((((unsigned) cmd->cmnd[1]) & 0x1f) << 16);
scb->u2.blk.count = (unsigned) cmd->cmnd[4];
} else {
scb->u1.log_blk_adr = (((unsigned) cmd->cmnd[5]) << 0) | (((unsigned) cmd->cmnd[4]) << 8) | (((unsigned) cmd->cmnd[3]) << 16) | (((unsigned) cmd->cmnd[2]) << 24);
scb->u2.blk.count = (((unsigned) cmd->cmnd[8]) << 0) | (((unsigned) cmd->cmnd[7]) << 8);
}
last_scsi_logical_block(shpnt)[ldn] = scb->u1.log_blk_adr;
last_scsi_blockcount(shpnt)[ldn] = scb->u2.blk.count;
scb->u2.blk.length = ld(shpnt)[ldn].block_length;
break;
/* for other devices, enter here. Other types are not known by
Linux! TYPE_NO_LUN is forbidden as valid device. */
case TYPE_ROM:
case TYPE_TAPE:
case TYPE_PROCESSOR:
case TYPE_WORM:
case TYPE_SCANNER:
case TYPE_MEDIUM_CHANGER:
/* If there is a sequential-device, IBM recommends to use
IM_OTHER_SCSI_CMD_CMD instead of subsystem READ/WRITE.
This includes CD-ROM devices, too, due to the partial sequential
read capabilities. */
scb->command = IM_OTHER_SCSI_CMD_CMD;
if (scsi_cmd == READ_6 || scsi_cmd == READ_10 || scsi_cmd == READ_12)
/* enable READ */
scb->enable |= IM_READ_CONTROL;
scb->enable |= IM_BYPASS_BUFFER;
scb->u1.scsi_cmd_length = cmd->cmd_len;
memcpy(scb->u2.scsi_command, cmd->cmnd, cmd->cmd_len);
last_scsi_type(shpnt)[ldn] = IM_LONG_SCB;
/* Read/write on this non-disk devices is also displayworthy,
so flash-up the LED/display. */
break;
}
break;
case INQUIRY:
IBM_DS(shpnt).ldn_inquiry_access[ldn]++;
scb->command = IM_DEVICE_INQUIRY_CMD;
scb->enable |= IM_READ_CONTROL | IM_SUPRESS_EXCEPTION_SHORT | IM_BYPASS_BUFFER;
scb->u1.log_blk_adr = 0;
break;
case TEST_UNIT_READY:
scb->command = IM_OTHER_SCSI_CMD_CMD;
scb->enable |= IM_READ_CONTROL | IM_SUPRESS_EXCEPTION_SHORT | IM_BYPASS_BUFFER;
scb->u1.log_blk_adr = 0;
scb->u1.scsi_cmd_length = 6;
memcpy(scb->u2.scsi_command, cmd->cmnd, 6);
last_scsi_type(shpnt)[ldn] = IM_LONG_SCB;
break;
case READ_CAPACITY:
/* the length of system memory buffer must be exactly 8 bytes */
scb->command = IM_READ_CAPACITY_CMD;
scb->enable |= IM_READ_CONTROL | IM_BYPASS_BUFFER;
if (scb->sys_buf_length > 8)
scb->sys_buf_length = 8;
break;
/* Commands that need read-only-mode (system <- device): */
case REQUEST_SENSE:
scb->command = IM_REQUEST_SENSE_CMD;
scb->enable |= IM_READ_CONTROL | IM_SUPRESS_EXCEPTION_SHORT | IM_BYPASS_BUFFER;
break;
/* Commands that need write-only-mode (system -> device): */
case MODE_SELECT:
case MODE_SELECT_10:
IBM_DS(shpnt).ldn_modeselect_access[ldn]++;
scb->command = IM_OTHER_SCSI_CMD_CMD;
scb->enable |= IM_SUPRESS_EXCEPTION_SHORT | IM_BYPASS_BUFFER; /*Select needs WRITE-enabled */
scb->u1.scsi_cmd_length = cmd->cmd_len;
memcpy(scb->u2.scsi_command, cmd->cmnd, cmd->cmd_len);
last_scsi_type(shpnt)[ldn] = IM_LONG_SCB;
break;
/* For other commands, read-only is useful. Most other commands are
running without an input-data-block. */
default:
scb->command = IM_OTHER_SCSI_CMD_CMD;
scb->enable |= IM_READ_CONTROL | IM_SUPRESS_EXCEPTION_SHORT | IM_BYPASS_BUFFER;
scb->u1.scsi_cmd_length = cmd->cmd_len;
memcpy(scb->u2.scsi_command, cmd->cmnd, cmd->cmd_len);
last_scsi_type(shpnt)[ldn] = IM_LONG_SCB;
break;
}
/*issue scb command, and return */
if (++disk_rw_in_progress == 1)
PS2_DISK_LED_ON(shpnt->host_no, target);
if (last_scsi_type(shpnt)[ldn] == IM_LONG_SCB) {
issue_cmd(shpnt, isa_virt_to_bus(scb), IM_LONG_SCB | ldn);
IBM_DS(shpnt).long_scbs++;
} else {
issue_cmd(shpnt, isa_virt_to_bus(scb), IM_SCB | ldn);
IBM_DS(shpnt).scbs++;
}
return 0;
}
static DEF_SCSI_QCMD(ibmmca_queuecommand)
static int __ibmmca_abort(Scsi_Cmnd * cmd)
{
/* Abort does not work, as the adapter never generates an interrupt on
* whatever situation is simulated, even when really pending commands
* are running on the adapters' hardware ! */
struct Scsi_Host *shpnt;
unsigned int ldn;
void (*saved_done) (Scsi_Cmnd *);
int target;
int max_pun;
unsigned long imm_command;
#ifdef IM_DEBUG_PROBE
printk("IBM MCA SCSI: Abort subroutine called...\n");
#endif
shpnt = cmd->device->host;
max_pun = subsystem_maxid(shpnt);
if (ibm_ansi_order) {
target = max_pun - 1 - cmd->device->id;
if ((target <= subsystem_pun(shpnt)) && (cmd->device->id <= subsystem_pun(shpnt)))
target--;
else if ((target >= subsystem_pun(shpnt)) && (cmd->device->id >= subsystem_pun(shpnt)))
target++;
} else
target = cmd->device->id;
/* get logical device number, and disable system interrupts */
printk(KERN_WARNING "IBM MCA SCSI: Sending abort to device pun=%d, lun=%d.\n", target, cmd->device->lun);
ldn = get_ldn(shpnt)[target][cmd->device->lun];
/*if cmd for this ldn has already finished, no need to abort */
if (!ld(shpnt)[ldn].cmd) {
return SUCCESS;
}
/* Clear ld.cmd, save done function, install internal done,
* send abort immediate command (this enables sys. interrupts),
* and wait until the interrupt arrives.
*/
saved_done = cmd->scsi_done;
cmd->scsi_done = internal_done;
cmd->SCp.Status = 0;
last_scsi_command(shpnt)[ldn] = IM_ABORT_IMM_CMD;
last_scsi_type(shpnt)[ldn] = IM_IMM_CMD;
imm_command = inl(IM_CMD_REG(shpnt));
imm_command &= (unsigned long) (0xffff0000); /* mask reserved stuff */
imm_command |= (unsigned long) (IM_ABORT_IMM_CMD);
/* must wait for attention reg not busy */
/* FIXME - timeout, politeness */
while (1) {
if (!(inb(IM_STAT_REG(shpnt)) & IM_BUSY))
break;
}
/* write registers and enable system interrupts */
outl(imm_command, IM_CMD_REG(shpnt));
outb(IM_IMM_CMD | ldn, IM_ATTN_REG(shpnt));
#ifdef IM_DEBUG_PROBE
printk("IBM MCA SCSI: Abort queued to adapter...\n");
#endif
spin_unlock_irq(shpnt->host_lock);
while (!cmd->SCp.Status)
yield();
spin_lock_irq(shpnt->host_lock);
cmd->scsi_done = saved_done;
#ifdef IM_DEBUG_PROBE
printk("IBM MCA SCSI: Abort returned with adapter response...\n");
#endif
/*if abort went well, call saved done, then return success or error */
if (cmd->result == (DID_ABORT << 16))
{
cmd->result |= DID_ABORT << 16;
if (cmd->scsi_done)
(cmd->scsi_done) (cmd);
ld(shpnt)[ldn].cmd = NULL;
#ifdef IM_DEBUG_PROBE
printk("IBM MCA SCSI: Abort finished with success.\n");
#endif
return SUCCESS;
} else {
cmd->result |= DID_NO_CONNECT << 16;
if (cmd->scsi_done)
(cmd->scsi_done) (cmd);
ld(shpnt)[ldn].cmd = NULL;
#ifdef IM_DEBUG_PROBE
printk("IBM MCA SCSI: Abort failed.\n");
#endif
return FAILED;
}
}
static int ibmmca_abort(Scsi_Cmnd * cmd)
{
struct Scsi_Host *shpnt = cmd->device->host;
int rc;
spin_lock_irq(shpnt->host_lock);
rc = __ibmmca_abort(cmd);
spin_unlock_irq(shpnt->host_lock);
return rc;
}
static int __ibmmca_host_reset(Scsi_Cmnd * cmd)
{
struct Scsi_Host *shpnt;
Scsi_Cmnd *cmd_aid;
int ticks, i;
unsigned long imm_command;
BUG_ON(cmd == NULL);
ticks = IM_RESET_DELAY * HZ;
shpnt = cmd->device->host;
if (local_checking_phase_flag(shpnt)) {
printk(KERN_WARNING "IBM MCA SCSI: unable to reset while checking devices.\n");
return FAILED;
}
/* issue reset immediate command to subsystem, and wait for interrupt */
printk("IBM MCA SCSI: resetting all devices.\n");
reset_status(shpnt) = IM_RESET_IN_PROGRESS;
last_scsi_command(shpnt)[0xf] = IM_RESET_IMM_CMD;
last_scsi_type(shpnt)[0xf] = IM_IMM_CMD;
imm_command = inl(IM_CMD_REG(shpnt));
imm_command &= (unsigned long) (0xffff0000); /* mask reserved stuff */
imm_command |= (unsigned long) (IM_RESET_IMM_CMD);
/* must wait for attention reg not busy */
while (1) {
if (!(inb(IM_STAT_REG(shpnt)) & IM_BUSY))
break;
spin_unlock_irq(shpnt->host_lock);
yield();
spin_lock_irq(shpnt->host_lock);
}
/*write registers and enable system interrupts */
outl(imm_command, IM_CMD_REG(shpnt));
outb(IM_IMM_CMD | 0xf, IM_ATTN_REG(shpnt));
/* wait for interrupt finished or intr_stat register to be set, as the
* interrupt will not be executed, while we are in here! */
/* FIXME: This is really really icky we so want a sleeping version of this ! */
while (reset_status(shpnt) == IM_RESET_IN_PROGRESS && --ticks && ((inb(IM_INTR_REG(shpnt)) & 0x8f) != 0x8f)) {
udelay((1 + 999 / HZ) * 1000);
barrier();
}
/* if reset did not complete, just return an error */
if (!ticks) {
printk(KERN_ERR "IBM MCA SCSI: reset did not complete within %d seconds.\n", IM_RESET_DELAY);
reset_status(shpnt) = IM_RESET_FINISHED_FAIL;
return FAILED;
}
if ((inb(IM_INTR_REG(shpnt)) & 0x8f) == 0x8f) {
/* analysis done by this routine and not by the intr-routine */
if (inb(IM_INTR_REG(shpnt)) == 0xaf)
reset_status(shpnt) = IM_RESET_FINISHED_OK_NO_INT;
else if (inb(IM_INTR_REG(shpnt)) == 0xcf)
reset_status(shpnt) = IM_RESET_FINISHED_FAIL;
else /* failed, 4get it */
reset_status(shpnt) = IM_RESET_NOT_IN_PROGRESS_NO_INT;
outb(IM_EOI | 0xf, IM_ATTN_REG(shpnt));
}
/* if reset failed, just return an error */
if (reset_status(shpnt) == IM_RESET_FINISHED_FAIL) {
printk(KERN_ERR "IBM MCA SCSI: reset failed.\n");
return FAILED;
}
/* so reset finished ok - call outstanding done's, and return success */
printk(KERN_INFO "IBM MCA SCSI: Reset successfully completed.\n");
for (i = 0; i < MAX_LOG_DEV; i++) {
cmd_aid = ld(shpnt)[i].cmd;
if (cmd_aid && cmd_aid->scsi_done) {
ld(shpnt)[i].cmd = NULL;
cmd_aid->result = DID_RESET << 16;
}
}
return SUCCESS;
}
static int ibmmca_host_reset(Scsi_Cmnd * cmd)
{
struct Scsi_Host *shpnt = cmd->device->host;
int rc;
spin_lock_irq(shpnt->host_lock);
rc = __ibmmca_host_reset(cmd);
spin_unlock_irq(shpnt->host_lock);
return rc;
}
static int ibmmca_biosparam(struct scsi_device *sdev, struct block_device *bdev, sector_t capacity, int *info)
{
int size = capacity;
info[0] = 64;
info[1] = 32;
info[2] = size / (info[0] * info[1]);
if (info[2] >= 1024) {
info[0] = 128;
info[1] = 63;
info[2] = size / (info[0] * info[1]);
if (info[2] >= 1024) {
info[0] = 255;
info[1] = 63;
info[2] = size / (info[0] * info[1]);
if (info[2] >= 1024)
info[2] = 1023;
}
}
return 0;
}
/* calculate percentage of total accesses on a ldn */
static int ldn_access_load(struct Scsi_Host *shpnt, int ldn)
{
if (IBM_DS(shpnt).total_accesses == 0)
return (0);
if (IBM_DS(shpnt).ldn_access[ldn] == 0)
return (0);
return (IBM_DS(shpnt).ldn_access[ldn] * 100) / IBM_DS(shpnt).total_accesses;
}
/* calculate total amount of r/w-accesses */
static int ldn_access_total_read_write(struct Scsi_Host *shpnt)
{
int a;
int i;
a = 0;
for (i = 0; i <= MAX_LOG_DEV; i++)
a += IBM_DS(shpnt).ldn_read_access[i] + IBM_DS(shpnt).ldn_write_access[i];
return (a);
}
static int ldn_access_total_inquiry(struct Scsi_Host *shpnt)
{
int a;
int i;
a = 0;
for (i = 0; i <= MAX_LOG_DEV; i++)
a += IBM_DS(shpnt).ldn_inquiry_access[i];
return (a);
}
static int ldn_access_total_modeselect(struct Scsi_Host *shpnt)
{
int a;
int i;
a = 0;
for (i = 0; i <= MAX_LOG_DEV; i++)
a += IBM_DS(shpnt).ldn_modeselect_access[i];
return (a);
}
/* routine to display info in the proc-fs-structure (a deluxe feature) */
static int ibmmca_proc_info(struct Scsi_Host *shpnt, char *buffer, char **start, off_t offset, int length, int inout)
{
int len = 0;
int i, id, lun;
unsigned long flags;
int max_pun;
spin_lock_irqsave(shpnt->host_lock, flags); /* Check it */
max_pun = subsystem_maxid(shpnt);
len += sprintf(buffer + len, "\n IBM-SCSI-Subsystem-Linux-Driver, Version %s\n\n\n", IBMMCA_SCSI_DRIVER_VERSION);
len += sprintf(buffer + len, " SCSI Access-Statistics:\n");
len += sprintf(buffer + len, " Device Scanning Order....: %s\n", (ibm_ansi_order) ? "IBM/ANSI" : "New Industry Standard");
#ifdef CONFIG_SCSI_MULTI_LUN
len += sprintf(buffer + len, " Multiple LUN probing.....: Yes\n");
#else
len += sprintf(buffer + len, " Multiple LUN probing.....: No\n");
#endif
len += sprintf(buffer + len, " This Hostnumber..........: %d\n", shpnt->host_no);
len += sprintf(buffer + len, " Base I/O-Port............: 0x%x\n", (unsigned int) (IM_CMD_REG(shpnt)));
len += sprintf(buffer + len, " (Shared) IRQ.............: %d\n", IM_IRQ);
len += sprintf(buffer + len, " Total Interrupts.........: %d\n", IBM_DS(shpnt).total_interrupts);
len += sprintf(buffer + len, " Total SCSI Accesses......: %d\n", IBM_DS(shpnt).total_accesses);
len += sprintf(buffer + len, " Total short SCBs.........: %d\n", IBM_DS(shpnt).scbs);
len += sprintf(buffer + len, " Total long SCBs..........: %d\n", IBM_DS(shpnt).long_scbs);
len += sprintf(buffer + len, " Total SCSI READ/WRITE..: %d\n", ldn_access_total_read_write(shpnt));
len += sprintf(buffer + len, " Total SCSI Inquiries...: %d\n", ldn_access_total_inquiry(shpnt));
len += sprintf(buffer + len, " Total SCSI Modeselects.: %d\n", ldn_access_total_modeselect(shpnt));
len += sprintf(buffer + len, " Total SCSI other cmds..: %d\n", IBM_DS(shpnt).total_accesses - ldn_access_total_read_write(shpnt)
- ldn_access_total_modeselect(shpnt)
- ldn_access_total_inquiry(shpnt));
len += sprintf(buffer + len, " Total SCSI command fails.: %d\n\n", IBM_DS(shpnt).total_errors);
len += sprintf(buffer + len, " Logical-Device-Number (LDN) Access-Statistics:\n");
len += sprintf(buffer + len, " LDN | Accesses [%%] | READ | WRITE | ASSIGNMENTS\n");
len += sprintf(buffer + len, " -----|--------------|-----------|-----------|--------------\n");
for (i = 0; i <= MAX_LOG_DEV; i++)
len += sprintf(buffer + len, " %2X | %3d | %8d | %8d | %8d\n", i, ldn_access_load(shpnt, i), IBM_DS(shpnt).ldn_read_access[i], IBM_DS(shpnt).ldn_write_access[i], IBM_DS(shpnt).ldn_assignments[i]);
len += sprintf(buffer + len, " -----------------------------------------------------------\n\n");
len += sprintf(buffer + len, " Dynamical-LDN-Assignment-Statistics:\n");
len += sprintf(buffer + len, " Number of physical SCSI-devices..: %d (+ Adapter)\n", IBM_DS(shpnt).total_scsi_devices);
len += sprintf(buffer + len, " Dynamical Assignment necessary...: %s\n", IBM_DS(shpnt).dyn_flag ? "Yes" : "No ");
len += sprintf(buffer + len, " Next LDN to be assigned..........: 0x%x\n", next_ldn(shpnt));
len += sprintf(buffer + len, " Dynamical assignments done yet...: %d\n", IBM_DS(shpnt).dynamical_assignments);
len += sprintf(buffer + len, "\n Current SCSI-Device-Mapping:\n");
len += sprintf(buffer + len, " Physical SCSI-Device Map Logical SCSI-Device Map\n");
len += sprintf(buffer + len, " ID\\LUN 0 1 2 3 4 5 6 7 ID\\LUN 0 1 2 3 4 5 6 7\n");
for (id = 0; id < max_pun; id++) {
len += sprintf(buffer + len, " %2d ", id);
for (lun = 0; lun < 8; lun++)
len += sprintf(buffer + len, "%2s ", ti_p(get_scsi(shpnt)[id][lun]));
len += sprintf(buffer + len, " %2d ", id);
for (lun = 0; lun < 8; lun++)
len += sprintf(buffer + len, "%2s ", ti_l(get_ldn(shpnt)[id][lun]));
len += sprintf(buffer + len, "\n");
}
len += sprintf(buffer + len, "(A = IBM-Subsystem, D = Harddisk, T = Tapedrive, P = Processor, W = WORM,\n");
len += sprintf(buffer + len, " R = CD-ROM, S = Scanner, M = MO-Drive, C = Medium-Changer, + = unprovided LUN,\n");
len += sprintf(buffer + len, " - = nothing found, nothing assigned or unprobed LUN)\n\n");
*start = buffer + offset;
len -= offset;
if (len > length)
len = length;
spin_unlock_irqrestore(shpnt->host_lock, flags);
return len;
}
static int option_setup(char *str)
{
int ints[IM_MAX_HOSTS];
char *cur = str;
int i = 1;
while (cur && isdigit(*cur) && i < IM_MAX_HOSTS) {
ints[i++] = simple_strtoul(cur, NULL, 0);
if ((cur = strchr(cur, ',')) != NULL)
cur++;
}
ints[0] = i - 1;
internal_ibmmca_scsi_setup(cur, ints);
return 1;
}
__setup("ibmmcascsi=", option_setup);
static struct mca_driver ibmmca_driver = {
.id_table = ibmmca_id_table,
.driver = {
.name = "ibmmca",
.bus = &mca_bus_type,
.probe = ibmmca_probe,
.remove = __devexit_p(ibmmca_remove),
},
};
static int __init ibmmca_init(void)
{
#ifdef MODULE
/* If the driver is run as module, read from conf.modules or cmd-line */
if (boot_options)
option_setup(boot_options);
#endif
return mca_register_driver_integrated(&ibmmca_driver, MCA_INTEGSCSI);
}
static void __exit ibmmca_exit(void)
{
mca_unregister_driver(&ibmmca_driver);
}
module_init(ibmmca_init);
module_exit(ibmmca_exit);
| gpl-2.0 |
AscendG630-DEV/android_kernel_g630u20 | arch/x86/kernel/mpparse.c | 4788 | 22031 | /*
* Intel Multiprocessor Specification 1.1 and 1.4
* compliant MP-table parsing routines.
*
* (c) 1995 Alan Cox, Building #3 <alan@lxorguk.ukuu.org.uk>
* (c) 1998, 1999, 2000, 2009 Ingo Molnar <mingo@redhat.com>
* (c) 2008 Alexey Starikovskiy <astarikovskiy@suse.de>
*/
#include <linux/mm.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/bootmem.h>
#include <linux/memblock.h>
#include <linux/kernel_stat.h>
#include <linux/mc146818rtc.h>
#include <linux/bitops.h>
#include <linux/acpi.h>
#include <linux/module.h>
#include <linux/smp.h>
#include <linux/pci.h>
#include <asm/mtrr.h>
#include <asm/mpspec.h>
#include <asm/pgalloc.h>
#include <asm/io_apic.h>
#include <asm/proto.h>
#include <asm/bios_ebda.h>
#include <asm/e820.h>
#include <asm/trampoline.h>
#include <asm/setup.h>
#include <asm/smp.h>
#include <asm/apic.h>
/*
* Checksum an MP configuration block.
*/
static int __init mpf_checksum(unsigned char *mp, int len)
{
int sum = 0;
while (len--)
sum += *mp++;
return sum & 0xFF;
}
int __init default_mpc_apic_id(struct mpc_cpu *m)
{
return m->apicid;
}
static void __init MP_processor_info(struct mpc_cpu *m)
{
int apicid;
char *bootup_cpu = "";
if (!(m->cpuflag & CPU_ENABLED)) {
disabled_cpus++;
return;
}
apicid = x86_init.mpparse.mpc_apic_id(m);
if (m->cpuflag & CPU_BOOTPROCESSOR) {
bootup_cpu = " (Bootup-CPU)";
boot_cpu_physical_apicid = m->apicid;
}
printk(KERN_INFO "Processor #%d%s\n", m->apicid, bootup_cpu);
generic_processor_info(apicid, m->apicver);
}
#ifdef CONFIG_X86_IO_APIC
void __init default_mpc_oem_bus_info(struct mpc_bus *m, char *str)
{
memcpy(str, m->bustype, 6);
str[6] = 0;
apic_printk(APIC_VERBOSE, "Bus #%d is %s\n", m->busid, str);
}
static void __init MP_bus_info(struct mpc_bus *m)
{
char str[7];
x86_init.mpparse.mpc_oem_bus_info(m, str);
#if MAX_MP_BUSSES < 256
if (m->busid >= MAX_MP_BUSSES) {
printk(KERN_WARNING "MP table busid value (%d) for bustype %s "
" is too large, max. supported is %d\n",
m->busid, str, MAX_MP_BUSSES - 1);
return;
}
#endif
set_bit(m->busid, mp_bus_not_pci);
if (strncmp(str, BUSTYPE_ISA, sizeof(BUSTYPE_ISA) - 1) == 0) {
#if defined(CONFIG_EISA) || defined(CONFIG_MCA)
mp_bus_id_to_type[m->busid] = MP_BUS_ISA;
#endif
} else if (strncmp(str, BUSTYPE_PCI, sizeof(BUSTYPE_PCI) - 1) == 0) {
if (x86_init.mpparse.mpc_oem_pci_bus)
x86_init.mpparse.mpc_oem_pci_bus(m);
clear_bit(m->busid, mp_bus_not_pci);
#if defined(CONFIG_EISA) || defined(CONFIG_MCA)
mp_bus_id_to_type[m->busid] = MP_BUS_PCI;
} else if (strncmp(str, BUSTYPE_EISA, sizeof(BUSTYPE_EISA) - 1) == 0) {
mp_bus_id_to_type[m->busid] = MP_BUS_EISA;
} else if (strncmp(str, BUSTYPE_MCA, sizeof(BUSTYPE_MCA) - 1) == 0) {
mp_bus_id_to_type[m->busid] = MP_BUS_MCA;
#endif
} else
printk(KERN_WARNING "Unknown bustype %s - ignoring\n", str);
}
static void __init MP_ioapic_info(struct mpc_ioapic *m)
{
if (m->flags & MPC_APIC_USABLE)
mp_register_ioapic(m->apicid, m->apicaddr, gsi_top);
}
static void __init print_mp_irq_info(struct mpc_intsrc *mp_irq)
{
apic_printk(APIC_VERBOSE, "Int: type %d, pol %d, trig %d, bus %02x,"
" IRQ %02x, APIC ID %x, APIC INT %02x\n",
mp_irq->irqtype, mp_irq->irqflag & 3,
(mp_irq->irqflag >> 2) & 3, mp_irq->srcbus,
mp_irq->srcbusirq, mp_irq->dstapic, mp_irq->dstirq);
}
#else /* CONFIG_X86_IO_APIC */
static inline void __init MP_bus_info(struct mpc_bus *m) {}
static inline void __init MP_ioapic_info(struct mpc_ioapic *m) {}
#endif /* CONFIG_X86_IO_APIC */
static void __init MP_lintsrc_info(struct mpc_lintsrc *m)
{
apic_printk(APIC_VERBOSE, "Lint: type %d, pol %d, trig %d, bus %02x,"
" IRQ %02x, APIC ID %x, APIC LINT %02x\n",
m->irqtype, m->irqflag & 3, (m->irqflag >> 2) & 3, m->srcbusid,
m->srcbusirq, m->destapic, m->destapiclint);
}
/*
* Read/parse the MPC
*/
static int __init smp_check_mpc(struct mpc_table *mpc, char *oem, char *str)
{
if (memcmp(mpc->signature, MPC_SIGNATURE, 4)) {
printk(KERN_ERR "MPTABLE: bad signature [%c%c%c%c]!\n",
mpc->signature[0], mpc->signature[1],
mpc->signature[2], mpc->signature[3]);
return 0;
}
if (mpf_checksum((unsigned char *)mpc, mpc->length)) {
printk(KERN_ERR "MPTABLE: checksum error!\n");
return 0;
}
if (mpc->spec != 0x01 && mpc->spec != 0x04) {
printk(KERN_ERR "MPTABLE: bad table version (%d)!!\n",
mpc->spec);
return 0;
}
if (!mpc->lapic) {
printk(KERN_ERR "MPTABLE: null local APIC address!\n");
return 0;
}
memcpy(oem, mpc->oem, 8);
oem[8] = 0;
printk(KERN_INFO "MPTABLE: OEM ID: %s\n", oem);
memcpy(str, mpc->productid, 12);
str[12] = 0;
printk(KERN_INFO "MPTABLE: Product ID: %s\n", str);
printk(KERN_INFO "MPTABLE: APIC at: 0x%X\n", mpc->lapic);
return 1;
}
static void skip_entry(unsigned char **ptr, int *count, int size)
{
*ptr += size;
*count += size;
}
static void __init smp_dump_mptable(struct mpc_table *mpc, unsigned char *mpt)
{
printk(KERN_ERR "Your mptable is wrong, contact your HW vendor!\n"
"type %x\n", *mpt);
print_hex_dump(KERN_ERR, " ", DUMP_PREFIX_ADDRESS, 16,
1, mpc, mpc->length, 1);
}
void __init default_smp_read_mpc_oem(struct mpc_table *mpc) { }
static int __init smp_read_mpc(struct mpc_table *mpc, unsigned early)
{
char str[16];
char oem[10];
int count = sizeof(*mpc);
unsigned char *mpt = ((unsigned char *)mpc) + count;
if (!smp_check_mpc(mpc, oem, str))
return 0;
#ifdef CONFIG_X86_32
generic_mps_oem_check(mpc, oem, str);
#endif
/* Initialize the lapic mapping */
if (!acpi_lapic)
register_lapic_address(mpc->lapic);
if (early)
return 1;
if (mpc->oemptr)
x86_init.mpparse.smp_read_mpc_oem(mpc);
/*
* Now process the configuration blocks.
*/
x86_init.mpparse.mpc_record(0);
while (count < mpc->length) {
switch (*mpt) {
case MP_PROCESSOR:
/* ACPI may have already provided this data */
if (!acpi_lapic)
MP_processor_info((struct mpc_cpu *)mpt);
skip_entry(&mpt, &count, sizeof(struct mpc_cpu));
break;
case MP_BUS:
MP_bus_info((struct mpc_bus *)mpt);
skip_entry(&mpt, &count, sizeof(struct mpc_bus));
break;
case MP_IOAPIC:
MP_ioapic_info((struct mpc_ioapic *)mpt);
skip_entry(&mpt, &count, sizeof(struct mpc_ioapic));
break;
case MP_INTSRC:
mp_save_irq((struct mpc_intsrc *)mpt);
skip_entry(&mpt, &count, sizeof(struct mpc_intsrc));
break;
case MP_LINTSRC:
MP_lintsrc_info((struct mpc_lintsrc *)mpt);
skip_entry(&mpt, &count, sizeof(struct mpc_lintsrc));
break;
default:
/* wrong mptable */
smp_dump_mptable(mpc, mpt);
count = mpc->length;
break;
}
x86_init.mpparse.mpc_record(1);
}
if (!num_processors)
printk(KERN_ERR "MPTABLE: no processors registered!\n");
return num_processors;
}
#ifdef CONFIG_X86_IO_APIC
static int __init ELCR_trigger(unsigned int irq)
{
unsigned int port;
port = 0x4d0 + (irq >> 3);
return (inb(port) >> (irq & 7)) & 1;
}
static void __init construct_default_ioirq_mptable(int mpc_default_type)
{
struct mpc_intsrc intsrc;
int i;
int ELCR_fallback = 0;
intsrc.type = MP_INTSRC;
intsrc.irqflag = 0; /* conforming */
intsrc.srcbus = 0;
intsrc.dstapic = mpc_ioapic_id(0);
intsrc.irqtype = mp_INT;
/*
* If true, we have an ISA/PCI system with no IRQ entries
* in the MP table. To prevent the PCI interrupts from being set up
* incorrectly, we try to use the ELCR. The sanity check to see if
* there is good ELCR data is very simple - IRQ0, 1, 2 and 13 can
* never be level sensitive, so we simply see if the ELCR agrees.
* If it does, we assume it's valid.
*/
if (mpc_default_type == 5) {
printk(KERN_INFO "ISA/PCI bus type with no IRQ information... "
"falling back to ELCR\n");
if (ELCR_trigger(0) || ELCR_trigger(1) || ELCR_trigger(2) ||
ELCR_trigger(13))
printk(KERN_ERR "ELCR contains invalid data... "
"not using ELCR\n");
else {
printk(KERN_INFO
"Using ELCR to identify PCI interrupts\n");
ELCR_fallback = 1;
}
}
for (i = 0; i < 16; i++) {
switch (mpc_default_type) {
case 2:
if (i == 0 || i == 13)
continue; /* IRQ0 & IRQ13 not connected */
/* fall through */
default:
if (i == 2)
continue; /* IRQ2 is never connected */
}
if (ELCR_fallback) {
/*
* If the ELCR indicates a level-sensitive interrupt, we
* copy that information over to the MP table in the
* irqflag field (level sensitive, active high polarity).
*/
if (ELCR_trigger(i))
intsrc.irqflag = 13;
else
intsrc.irqflag = 0;
}
intsrc.srcbusirq = i;
intsrc.dstirq = i ? i : 2; /* IRQ0 to INTIN2 */
mp_save_irq(&intsrc);
}
intsrc.irqtype = mp_ExtINT;
intsrc.srcbusirq = 0;
intsrc.dstirq = 0; /* 8259A to INTIN0 */
mp_save_irq(&intsrc);
}
static void __init construct_ioapic_table(int mpc_default_type)
{
struct mpc_ioapic ioapic;
struct mpc_bus bus;
bus.type = MP_BUS;
bus.busid = 0;
switch (mpc_default_type) {
default:
printk(KERN_ERR "???\nUnknown standard configuration %d\n",
mpc_default_type);
/* fall through */
case 1:
case 5:
memcpy(bus.bustype, "ISA ", 6);
break;
case 2:
case 6:
case 3:
memcpy(bus.bustype, "EISA ", 6);
break;
case 4:
case 7:
memcpy(bus.bustype, "MCA ", 6);
}
MP_bus_info(&bus);
if (mpc_default_type > 4) {
bus.busid = 1;
memcpy(bus.bustype, "PCI ", 6);
MP_bus_info(&bus);
}
ioapic.type = MP_IOAPIC;
ioapic.apicid = 2;
ioapic.apicver = mpc_default_type > 4 ? 0x10 : 0x01;
ioapic.flags = MPC_APIC_USABLE;
ioapic.apicaddr = IO_APIC_DEFAULT_PHYS_BASE;
MP_ioapic_info(&ioapic);
/*
* We set up most of the low 16 IO-APIC pins according to MPS rules.
*/
construct_default_ioirq_mptable(mpc_default_type);
}
#else
static inline void __init construct_ioapic_table(int mpc_default_type) { }
#endif
static inline void __init construct_default_ISA_mptable(int mpc_default_type)
{
struct mpc_cpu processor;
struct mpc_lintsrc lintsrc;
int linttypes[2] = { mp_ExtINT, mp_NMI };
int i;
/*
* local APIC has default address
*/
mp_lapic_addr = APIC_DEFAULT_PHYS_BASE;
/*
* 2 CPUs, numbered 0 & 1.
*/
processor.type = MP_PROCESSOR;
/* Either an integrated APIC or a discrete 82489DX. */
processor.apicver = mpc_default_type > 4 ? 0x10 : 0x01;
processor.cpuflag = CPU_ENABLED;
processor.cpufeature = (boot_cpu_data.x86 << 8) |
(boot_cpu_data.x86_model << 4) | boot_cpu_data.x86_mask;
processor.featureflag = boot_cpu_data.x86_capability[0];
processor.reserved[0] = 0;
processor.reserved[1] = 0;
for (i = 0; i < 2; i++) {
processor.apicid = i;
MP_processor_info(&processor);
}
construct_ioapic_table(mpc_default_type);
lintsrc.type = MP_LINTSRC;
lintsrc.irqflag = 0; /* conforming */
lintsrc.srcbusid = 0;
lintsrc.srcbusirq = 0;
lintsrc.destapic = MP_APIC_ALL;
for (i = 0; i < 2; i++) {
lintsrc.irqtype = linttypes[i];
lintsrc.destapiclint = i;
MP_lintsrc_info(&lintsrc);
}
}
static struct mpf_intel *mpf_found;
static unsigned long __init get_mpc_size(unsigned long physptr)
{
struct mpc_table *mpc;
unsigned long size;
mpc = early_ioremap(physptr, PAGE_SIZE);
size = mpc->length;
early_iounmap(mpc, PAGE_SIZE);
apic_printk(APIC_VERBOSE, " mpc: %lx-%lx\n", physptr, physptr + size);
return size;
}
static int __init check_physptr(struct mpf_intel *mpf, unsigned int early)
{
struct mpc_table *mpc;
unsigned long size;
size = get_mpc_size(mpf->physptr);
mpc = early_ioremap(mpf->physptr, size);
/*
* Read the physical hardware table. Anything here will
* override the defaults.
*/
if (!smp_read_mpc(mpc, early)) {
#ifdef CONFIG_X86_LOCAL_APIC
smp_found_config = 0;
#endif
printk(KERN_ERR "BIOS bug, MP table errors detected!...\n"
"... disabling SMP support. (tell your hw vendor)\n");
early_iounmap(mpc, size);
return -1;
}
early_iounmap(mpc, size);
if (early)
return -1;
#ifdef CONFIG_X86_IO_APIC
/*
* If there are no explicit MP IRQ entries, then we are
* broken. We set up most of the low 16 IO-APIC pins to
* ISA defaults and hope it will work.
*/
if (!mp_irq_entries) {
struct mpc_bus bus;
printk(KERN_ERR "BIOS bug, no explicit IRQ entries, "
"using default mptable. (tell your hw vendor)\n");
bus.type = MP_BUS;
bus.busid = 0;
memcpy(bus.bustype, "ISA ", 6);
MP_bus_info(&bus);
construct_default_ioirq_mptable(0);
}
#endif
return 0;
}
/*
* Scan the memory blocks for an SMP configuration block.
*/
void __init default_get_smp_config(unsigned int early)
{
struct mpf_intel *mpf = mpf_found;
if (!mpf)
return;
if (acpi_lapic && early)
return;
/*
* MPS doesn't support hyperthreading, aka only have
* thread 0 apic id in MPS table
*/
if (acpi_lapic && acpi_ioapic)
return;
printk(KERN_INFO "Intel MultiProcessor Specification v1.%d\n",
mpf->specification);
#if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_X86_32)
if (mpf->feature2 & (1 << 7)) {
printk(KERN_INFO " IMCR and PIC compatibility mode.\n");
pic_mode = 1;
} else {
printk(KERN_INFO " Virtual Wire compatibility mode.\n");
pic_mode = 0;
}
#endif
/*
* Now see if we need to read further.
*/
if (mpf->feature1 != 0) {
if (early) {
/*
* local APIC has default address
*/
mp_lapic_addr = APIC_DEFAULT_PHYS_BASE;
return;
}
printk(KERN_INFO "Default MP configuration #%d\n",
mpf->feature1);
construct_default_ISA_mptable(mpf->feature1);
} else if (mpf->physptr) {
if (check_physptr(mpf, early))
return;
} else
BUG();
if (!early)
printk(KERN_INFO "Processors: %d\n", num_processors);
/*
* Only use the first configuration found.
*/
}
static void __init smp_reserve_memory(struct mpf_intel *mpf)
{
memblock_reserve(mpf->physptr, get_mpc_size(mpf->physptr));
}
static int __init smp_scan_config(unsigned long base, unsigned long length)
{
unsigned int *bp = phys_to_virt(base);
struct mpf_intel *mpf;
unsigned long mem;
apic_printk(APIC_VERBOSE, "Scan SMP from %p for %ld bytes.\n",
bp, length);
BUILD_BUG_ON(sizeof(*mpf) != 16);
while (length > 0) {
mpf = (struct mpf_intel *)bp;
if ((*bp == SMP_MAGIC_IDENT) &&
(mpf->length == 1) &&
!mpf_checksum((unsigned char *)bp, 16) &&
((mpf->specification == 1)
|| (mpf->specification == 4))) {
#ifdef CONFIG_X86_LOCAL_APIC
smp_found_config = 1;
#endif
mpf_found = mpf;
printk(KERN_INFO "found SMP MP-table at [%p] %llx\n",
mpf, (u64)virt_to_phys(mpf));
mem = virt_to_phys(mpf);
memblock_reserve(mem, sizeof(*mpf));
if (mpf->physptr)
smp_reserve_memory(mpf);
return 1;
}
bp += 4;
length -= 16;
}
return 0;
}
void __init default_find_smp_config(void)
{
unsigned int address;
/*
* FIXME: Linux assumes you have 640K of base ram..
* this continues the error...
*
* 1) Scan the bottom 1K for a signature
* 2) Scan the top 1K of base RAM
* 3) Scan the 64K of bios
*/
if (smp_scan_config(0x0, 0x400) ||
smp_scan_config(639 * 0x400, 0x400) ||
smp_scan_config(0xF0000, 0x10000))
return;
/*
* If it is an SMP machine we should know now, unless the
* configuration is in an EISA/MCA bus machine with an
* extended bios data area.
*
* there is a real-mode segmented pointer pointing to the
* 4K EBDA area at 0x40E, calculate and scan it here.
*
* NOTE! There are Linux loaders that will corrupt the EBDA
* area, and as such this kind of SMP config may be less
* trustworthy, simply because the SMP table may have been
* stomped on during early boot. These loaders are buggy and
* should be fixed.
*
* MP1.4 SPEC states to only scan first 1K of 4K EBDA.
*/
address = get_bios_ebda();
if (address)
smp_scan_config(address, 0x400);
}
#ifdef CONFIG_X86_IO_APIC
static u8 __initdata irq_used[MAX_IRQ_SOURCES];
static int __init get_MP_intsrc_index(struct mpc_intsrc *m)
{
int i;
if (m->irqtype != mp_INT)
return 0;
if (m->irqflag != 0x0f)
return 0;
/* not legacy */
for (i = 0; i < mp_irq_entries; i++) {
if (mp_irqs[i].irqtype != mp_INT)
continue;
if (mp_irqs[i].irqflag != 0x0f)
continue;
if (mp_irqs[i].srcbus != m->srcbus)
continue;
if (mp_irqs[i].srcbusirq != m->srcbusirq)
continue;
if (irq_used[i]) {
/* already claimed */
return -2;
}
irq_used[i] = 1;
return i;
}
/* not found */
return -1;
}
#define SPARE_SLOT_NUM 20
static struct mpc_intsrc __initdata *m_spare[SPARE_SLOT_NUM];
static void __init check_irq_src(struct mpc_intsrc *m, int *nr_m_spare)
{
int i;
apic_printk(APIC_VERBOSE, "OLD ");
print_mp_irq_info(m);
i = get_MP_intsrc_index(m);
if (i > 0) {
memcpy(m, &mp_irqs[i], sizeof(*m));
apic_printk(APIC_VERBOSE, "NEW ");
print_mp_irq_info(&mp_irqs[i]);
return;
}
if (!i) {
/* legacy, do nothing */
return;
}
if (*nr_m_spare < SPARE_SLOT_NUM) {
/*
* not found (-1), or duplicated (-2) are invalid entries,
* we need to use the slot later
*/
m_spare[*nr_m_spare] = m;
*nr_m_spare += 1;
}
}
static int __init
check_slot(unsigned long mpc_new_phys, unsigned long mpc_new_length, int count)
{
if (!mpc_new_phys || count <= mpc_new_length) {
WARN(1, "update_mptable: No spare slots (length: %x)\n", count);
return -1;
}
return 0;
}
#else /* CONFIG_X86_IO_APIC */
static
inline void __init check_irq_src(struct mpc_intsrc *m, int *nr_m_spare) {}
#endif /* CONFIG_X86_IO_APIC */
static int __init replace_intsrc_all(struct mpc_table *mpc,
unsigned long mpc_new_phys,
unsigned long mpc_new_length)
{
#ifdef CONFIG_X86_IO_APIC
int i;
#endif
int count = sizeof(*mpc);
int nr_m_spare = 0;
unsigned char *mpt = ((unsigned char *)mpc) + count;
printk(KERN_INFO "mpc_length %x\n", mpc->length);
while (count < mpc->length) {
switch (*mpt) {
case MP_PROCESSOR:
skip_entry(&mpt, &count, sizeof(struct mpc_cpu));
break;
case MP_BUS:
skip_entry(&mpt, &count, sizeof(struct mpc_bus));
break;
case MP_IOAPIC:
skip_entry(&mpt, &count, sizeof(struct mpc_ioapic));
break;
case MP_INTSRC:
check_irq_src((struct mpc_intsrc *)mpt, &nr_m_spare);
skip_entry(&mpt, &count, sizeof(struct mpc_intsrc));
break;
case MP_LINTSRC:
skip_entry(&mpt, &count, sizeof(struct mpc_lintsrc));
break;
default:
/* wrong mptable */
smp_dump_mptable(mpc, mpt);
goto out;
}
}
#ifdef CONFIG_X86_IO_APIC
for (i = 0; i < mp_irq_entries; i++) {
if (irq_used[i])
continue;
if (mp_irqs[i].irqtype != mp_INT)
continue;
if (mp_irqs[i].irqflag != 0x0f)
continue;
if (nr_m_spare > 0) {
apic_printk(APIC_VERBOSE, "*NEW* found\n");
nr_m_spare--;
memcpy(m_spare[nr_m_spare], &mp_irqs[i], sizeof(mp_irqs[i]));
m_spare[nr_m_spare] = NULL;
} else {
struct mpc_intsrc *m = (struct mpc_intsrc *)mpt;
count += sizeof(struct mpc_intsrc);
if (check_slot(mpc_new_phys, mpc_new_length, count) < 0)
goto out;
memcpy(m, &mp_irqs[i], sizeof(*m));
mpc->length = count;
mpt += sizeof(struct mpc_intsrc);
}
print_mp_irq_info(&mp_irqs[i]);
}
#endif
out:
/* update checksum */
mpc->checksum = 0;
mpc->checksum -= mpf_checksum((unsigned char *)mpc, mpc->length);
return 0;
}
int enable_update_mptable;
static int __init update_mptable_setup(char *str)
{
enable_update_mptable = 1;
#ifdef CONFIG_PCI
pci_routeirq = 1;
#endif
return 0;
}
early_param("update_mptable", update_mptable_setup);
static unsigned long __initdata mpc_new_phys;
static unsigned long mpc_new_length __initdata = 4096;
/* alloc_mptable or alloc_mptable=4k */
static int __initdata alloc_mptable;
static int __init parse_alloc_mptable_opt(char *p)
{
enable_update_mptable = 1;
#ifdef CONFIG_PCI
pci_routeirq = 1;
#endif
alloc_mptable = 1;
if (!p)
return 0;
mpc_new_length = memparse(p, &p);
return 0;
}
early_param("alloc_mptable", parse_alloc_mptable_opt);
void __init early_reserve_e820_mpc_new(void)
{
if (enable_update_mptable && alloc_mptable)
mpc_new_phys = early_reserve_e820(mpc_new_length, 4);
}
static int __init update_mp_table(void)
{
char str[16];
char oem[10];
struct mpf_intel *mpf;
struct mpc_table *mpc, *mpc_new;
if (!enable_update_mptable)
return 0;
mpf = mpf_found;
if (!mpf)
return 0;
/*
* Now see if we need to go further.
*/
if (mpf->feature1 != 0)
return 0;
if (!mpf->physptr)
return 0;
mpc = phys_to_virt(mpf->physptr);
if (!smp_check_mpc(mpc, oem, str))
return 0;
printk(KERN_INFO "mpf: %llx\n", (u64)virt_to_phys(mpf));
printk(KERN_INFO "physptr: %x\n", mpf->physptr);
if (mpc_new_phys && mpc->length > mpc_new_length) {
mpc_new_phys = 0;
printk(KERN_INFO "mpc_new_length is %ld, please use alloc_mptable=8k\n",
mpc_new_length);
}
if (!mpc_new_phys) {
unsigned char old, new;
/* check if we can change the position */
mpc->checksum = 0;
old = mpf_checksum((unsigned char *)mpc, mpc->length);
mpc->checksum = 0xff;
new = mpf_checksum((unsigned char *)mpc, mpc->length);
if (old == new) {
printk(KERN_INFO "mpc is readonly, please try alloc_mptable instead\n");
return 0;
}
printk(KERN_INFO "use in-position replacing\n");
} else {
mpf->physptr = mpc_new_phys;
mpc_new = phys_to_virt(mpc_new_phys);
memcpy(mpc_new, mpc, mpc->length);
mpc = mpc_new;
/* check if we can modify that */
if (mpc_new_phys - mpf->physptr) {
struct mpf_intel *mpf_new;
/* steal 16 bytes from [0, 1k) */
printk(KERN_INFO "mpf new: %x\n", 0x400 - 16);
mpf_new = phys_to_virt(0x400 - 16);
memcpy(mpf_new, mpf, 16);
mpf = mpf_new;
mpf->physptr = mpc_new_phys;
}
mpf->checksum = 0;
mpf->checksum -= mpf_checksum((unsigned char *)mpf, 16);
printk(KERN_INFO "physptr new: %x\n", mpf->physptr);
}
/*
* only replace the one with mp_INT and
* MP_IRQ_TRIGGER_LEVEL|MP_IRQ_POLARITY_LOW,
* already in mp_irqs , stored by ... and mp_config_acpi_gsi,
* may need pci=routeirq for all coverage
*/
replace_intsrc_all(mpc, mpc_new_phys, mpc_new_length);
return 0;
}
late_initcall(update_mp_table);
| gpl-2.0 |
MattCrystal/freezing-octo-ironman | drivers/mfd/ab8500-sysctrl.c | 5044 | 1806 | /*
* Copyright (C) ST-Ericsson SA 2010
* Author: Mattias Nilsson <mattias.i.nilsson@stericsson.com> for ST Ericsson.
* License terms: GNU General Public License (GPL) version 2
*/
#include <linux/err.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/mfd/abx500.h>
#include <linux/mfd/abx500/ab8500.h>
#include <linux/mfd/abx500/ab8500-sysctrl.h>
static struct device *sysctrl_dev;
static inline bool valid_bank(u8 bank)
{
return ((bank == AB8500_SYS_CTRL1_BLOCK) ||
(bank == AB8500_SYS_CTRL2_BLOCK));
}
int ab8500_sysctrl_read(u16 reg, u8 *value)
{
u8 bank;
if (sysctrl_dev == NULL)
return -EAGAIN;
bank = (reg >> 8);
if (!valid_bank(bank))
return -EINVAL;
return abx500_get_register_interruptible(sysctrl_dev, bank,
(u8)(reg & 0xFF), value);
}
int ab8500_sysctrl_write(u16 reg, u8 mask, u8 value)
{
u8 bank;
if (sysctrl_dev == NULL)
return -EAGAIN;
bank = (reg >> 8);
if (!valid_bank(bank))
return -EINVAL;
return abx500_mask_and_set_register_interruptible(sysctrl_dev, bank,
(u8)(reg & 0xFF), mask, value);
}
static int __devinit ab8500_sysctrl_probe(struct platform_device *pdev)
{
sysctrl_dev = &pdev->dev;
return 0;
}
static int __devexit ab8500_sysctrl_remove(struct platform_device *pdev)
{
sysctrl_dev = NULL;
return 0;
}
static struct platform_driver ab8500_sysctrl_driver = {
.driver = {
.name = "ab8500-sysctrl",
.owner = THIS_MODULE,
},
.probe = ab8500_sysctrl_probe,
.remove = __devexit_p(ab8500_sysctrl_remove),
};
static int __init ab8500_sysctrl_init(void)
{
return platform_driver_register(&ab8500_sysctrl_driver);
}
subsys_initcall(ab8500_sysctrl_init);
MODULE_AUTHOR("Mattias Nilsson <mattias.i.nilsson@stericsson.com");
MODULE_DESCRIPTION("AB8500 system control driver");
MODULE_LICENSE("GPL v2");
| gpl-2.0 |
TeamLGOG/android_kernel_lge_gee_3.4 | drivers/scsi/mac_esp.c | 5044 | 15967 | /* mac_esp.c: ESP front-end for Macintosh Quadra systems.
*
* Adapted from jazz_esp.c and the old mac_esp.c.
*
* The pseudo DMA algorithm is based on the one used in NetBSD.
* See sys/arch/mac68k/obio/esp.c for some background information.
*
* Copyright (C) 2007-2008 Finn Thain
*/
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
#include <linux/scatterlist.h>
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/nubus.h>
#include <linux/slab.h>
#include <asm/irq.h>
#include <asm/dma.h>
#include <asm/macints.h>
#include <asm/macintosh.h>
#include <asm/mac_via.h>
#include <scsi/scsi_host.h>
#include "esp_scsi.h"
#define DRV_MODULE_NAME "mac_esp"
#define PFX DRV_MODULE_NAME ": "
#define DRV_VERSION "1.000"
#define DRV_MODULE_RELDATE "Sept 15, 2007"
#define MAC_ESP_IO_BASE 0x50F00000
#define MAC_ESP_REGS_QUADRA (MAC_ESP_IO_BASE + 0x10000)
#define MAC_ESP_REGS_QUADRA2 (MAC_ESP_IO_BASE + 0xF000)
#define MAC_ESP_REGS_QUADRA3 (MAC_ESP_IO_BASE + 0x18000)
#define MAC_ESP_REGS_SPACING 0x402
#define MAC_ESP_PDMA_REG 0xF9800024
#define MAC_ESP_PDMA_REG_SPACING 0x4
#define MAC_ESP_PDMA_IO_OFFSET 0x100
#define esp_read8(REG) mac_esp_read8(esp, REG)
#define esp_write8(VAL, REG) mac_esp_write8(esp, VAL, REG)
struct mac_esp_priv {
struct esp *esp;
void __iomem *pdma_regs;
void __iomem *pdma_io;
int error;
};
static struct esp *esp_chips[2];
#define MAC_ESP_GET_PRIV(esp) ((struct mac_esp_priv *) \
platform_get_drvdata((struct platform_device *) \
(esp->dev)))
static inline void mac_esp_write8(struct esp *esp, u8 val, unsigned long reg)
{
nubus_writeb(val, esp->regs + reg * 16);
}
static inline u8 mac_esp_read8(struct esp *esp, unsigned long reg)
{
return nubus_readb(esp->regs + reg * 16);
}
/* For pseudo DMA and PIO we need the virtual address
* so this address mapping is the identity mapping.
*/
static dma_addr_t mac_esp_map_single(struct esp *esp, void *buf,
size_t sz, int dir)
{
return (dma_addr_t)buf;
}
static int mac_esp_map_sg(struct esp *esp, struct scatterlist *sg,
int num_sg, int dir)
{
int i;
for (i = 0; i < num_sg; i++)
sg[i].dma_address = (u32)sg_virt(&sg[i]);
return num_sg;
}
static void mac_esp_unmap_single(struct esp *esp, dma_addr_t addr,
size_t sz, int dir)
{
/* Nothing to do. */
}
static void mac_esp_unmap_sg(struct esp *esp, struct scatterlist *sg,
int num_sg, int dir)
{
/* Nothing to do. */
}
static void mac_esp_reset_dma(struct esp *esp)
{
/* Nothing to do. */
}
static void mac_esp_dma_drain(struct esp *esp)
{
/* Nothing to do. */
}
static void mac_esp_dma_invalidate(struct esp *esp)
{
/* Nothing to do. */
}
static int mac_esp_dma_error(struct esp *esp)
{
return MAC_ESP_GET_PRIV(esp)->error;
}
static inline int mac_esp_wait_for_empty_fifo(struct esp *esp)
{
struct mac_esp_priv *mep = MAC_ESP_GET_PRIV(esp);
int i = 500000;
do {
if (!(esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES))
return 0;
if (esp_read8(ESP_STATUS) & ESP_STAT_INTR)
return 1;
udelay(2);
} while (--i);
printk(KERN_ERR PFX "FIFO is not empty (sreg %02x)\n",
esp_read8(ESP_STATUS));
mep->error = 1;
return 1;
}
static inline int mac_esp_wait_for_dreq(struct esp *esp)
{
struct mac_esp_priv *mep = MAC_ESP_GET_PRIV(esp);
int i = 500000;
do {
if (mep->pdma_regs == NULL) {
if (via2_scsi_drq_pending())
return 0;
} else {
if (nubus_readl(mep->pdma_regs) & 0x200)
return 0;
}
if (esp_read8(ESP_STATUS) & ESP_STAT_INTR)
return 1;
udelay(2);
} while (--i);
printk(KERN_ERR PFX "PDMA timeout (sreg %02x)\n",
esp_read8(ESP_STATUS));
mep->error = 1;
return 1;
}
#define MAC_ESP_PDMA_LOOP(operands) \
asm volatile ( \
" tstw %1 \n" \
" jbeq 20f \n" \
"1: movew " operands " \n" \
"2: movew " operands " \n" \
"3: movew " operands " \n" \
"4: movew " operands " \n" \
"5: movew " operands " \n" \
"6: movew " operands " \n" \
"7: movew " operands " \n" \
"8: movew " operands " \n" \
"9: movew " operands " \n" \
"10: movew " operands " \n" \
"11: movew " operands " \n" \
"12: movew " operands " \n" \
"13: movew " operands " \n" \
"14: movew " operands " \n" \
"15: movew " operands " \n" \
"16: movew " operands " \n" \
" subqw #1,%1 \n" \
" jbne 1b \n" \
"20: tstw %2 \n" \
" jbeq 30f \n" \
"21: movew " operands " \n" \
" subqw #1,%2 \n" \
" jbne 21b \n" \
"30: tstw %3 \n" \
" jbeq 40f \n" \
"31: moveb " operands " \n" \
"32: nop \n" \
"40: \n" \
" \n" \
" .section __ex_table,\"a\" \n" \
" .align 4 \n" \
" .long 1b,40b \n" \
" .long 2b,40b \n" \
" .long 3b,40b \n" \
" .long 4b,40b \n" \
" .long 5b,40b \n" \
" .long 6b,40b \n" \
" .long 7b,40b \n" \
" .long 8b,40b \n" \
" .long 9b,40b \n" \
" .long 10b,40b \n" \
" .long 11b,40b \n" \
" .long 12b,40b \n" \
" .long 13b,40b \n" \
" .long 14b,40b \n" \
" .long 15b,40b \n" \
" .long 16b,40b \n" \
" .long 21b,40b \n" \
" .long 31b,40b \n" \
" .long 32b,40b \n" \
" .previous \n" \
: "+a" (addr), "+r" (count32), "+r" (count2) \
: "g" (count1), "a" (mep->pdma_io))
static void mac_esp_send_pdma_cmd(struct esp *esp, u32 addr, u32 esp_count,
u32 dma_count, int write, u8 cmd)
{
struct mac_esp_priv *mep = MAC_ESP_GET_PRIV(esp);
mep->error = 0;
if (!write)
scsi_esp_cmd(esp, ESP_CMD_FLUSH);
esp_write8((esp_count >> 0) & 0xFF, ESP_TCLOW);
esp_write8((esp_count >> 8) & 0xFF, ESP_TCMED);
scsi_esp_cmd(esp, cmd);
do {
unsigned int count32 = esp_count >> 5;
unsigned int count2 = (esp_count & 0x1F) >> 1;
unsigned int count1 = esp_count & 1;
unsigned int start_addr = addr;
if (mac_esp_wait_for_dreq(esp))
break;
if (write) {
MAC_ESP_PDMA_LOOP("%4@,%0@+");
esp_count -= addr - start_addr;
} else {
unsigned int n;
MAC_ESP_PDMA_LOOP("%0@+,%4@");
if (mac_esp_wait_for_empty_fifo(esp))
break;
n = (esp_read8(ESP_TCMED) << 8) + esp_read8(ESP_TCLOW);
addr = start_addr + esp_count - n;
esp_count = n;
}
} while (esp_count);
}
/*
* Programmed IO routines follow.
*/
static inline unsigned int mac_esp_wait_for_fifo(struct esp *esp)
{
int i = 500000;
do {
unsigned int fbytes = esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES;
if (fbytes)
return fbytes;
udelay(2);
} while (--i);
printk(KERN_ERR PFX "FIFO is empty (sreg %02x)\n",
esp_read8(ESP_STATUS));
return 0;
}
static inline int mac_esp_wait_for_intr(struct esp *esp)
{
struct mac_esp_priv *mep = MAC_ESP_GET_PRIV(esp);
int i = 500000;
do {
esp->sreg = esp_read8(ESP_STATUS);
if (esp->sreg & ESP_STAT_INTR)
return 0;
udelay(2);
} while (--i);
printk(KERN_ERR PFX "IRQ timeout (sreg %02x)\n", esp->sreg);
mep->error = 1;
return 1;
}
#define MAC_ESP_PIO_LOOP(operands, reg1) \
asm volatile ( \
"1: moveb " operands " \n" \
" subqw #1,%1 \n" \
" jbne 1b \n" \
: "+a" (addr), "+r" (reg1) \
: "a" (fifo))
#define MAC_ESP_PIO_FILL(operands, reg1) \
asm volatile ( \
" moveb " operands " \n" \
" moveb " operands " \n" \
" moveb " operands " \n" \
" moveb " operands " \n" \
" moveb " operands " \n" \
" moveb " operands " \n" \
" moveb " operands " \n" \
" moveb " operands " \n" \
" moveb " operands " \n" \
" moveb " operands " \n" \
" moveb " operands " \n" \
" moveb " operands " \n" \
" moveb " operands " \n" \
" moveb " operands " \n" \
" moveb " operands " \n" \
" moveb " operands " \n" \
" subqw #8,%1 \n" \
" subqw #8,%1 \n" \
: "+a" (addr), "+r" (reg1) \
: "a" (fifo))
#define MAC_ESP_FIFO_SIZE 16
static void mac_esp_send_pio_cmd(struct esp *esp, u32 addr, u32 esp_count,
u32 dma_count, int write, u8 cmd)
{
struct mac_esp_priv *mep = MAC_ESP_GET_PRIV(esp);
u8 *fifo = esp->regs + ESP_FDATA * 16;
cmd &= ~ESP_CMD_DMA;
mep->error = 0;
if (write) {
scsi_esp_cmd(esp, cmd);
while (1) {
unsigned int n;
n = mac_esp_wait_for_fifo(esp);
if (!n)
break;
if (n > esp_count)
n = esp_count;
esp_count -= n;
MAC_ESP_PIO_LOOP("%2@,%0@+", n);
if (!esp_count)
break;
if (mac_esp_wait_for_intr(esp))
break;
if (((esp->sreg & ESP_STAT_PMASK) != ESP_DIP) &&
((esp->sreg & ESP_STAT_PMASK) != ESP_MIP))
break;
esp->ireg = esp_read8(ESP_INTRPT);
if ((esp->ireg & (ESP_INTR_DC | ESP_INTR_BSERV)) !=
ESP_INTR_BSERV)
break;
scsi_esp_cmd(esp, ESP_CMD_TI);
}
} else {
scsi_esp_cmd(esp, ESP_CMD_FLUSH);
if (esp_count >= MAC_ESP_FIFO_SIZE)
MAC_ESP_PIO_FILL("%0@+,%2@", esp_count);
else
MAC_ESP_PIO_LOOP("%0@+,%2@", esp_count);
scsi_esp_cmd(esp, cmd);
while (esp_count) {
unsigned int n;
if (mac_esp_wait_for_intr(esp))
break;
if (((esp->sreg & ESP_STAT_PMASK) != ESP_DOP) &&
((esp->sreg & ESP_STAT_PMASK) != ESP_MOP))
break;
esp->ireg = esp_read8(ESP_INTRPT);
if ((esp->ireg & (ESP_INTR_DC | ESP_INTR_BSERV)) !=
ESP_INTR_BSERV)
break;
n = MAC_ESP_FIFO_SIZE -
(esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES);
if (n > esp_count)
n = esp_count;
if (n == MAC_ESP_FIFO_SIZE) {
MAC_ESP_PIO_FILL("%0@+,%2@", esp_count);
} else {
esp_count -= n;
MAC_ESP_PIO_LOOP("%0@+,%2@", n);
}
scsi_esp_cmd(esp, ESP_CMD_TI);
}
}
}
static int mac_esp_irq_pending(struct esp *esp)
{
if (esp_read8(ESP_STATUS) & ESP_STAT_INTR)
return 1;
return 0;
}
static u32 mac_esp_dma_length_limit(struct esp *esp, u32 dma_addr, u32 dma_len)
{
return dma_len > 0xFFFF ? 0xFFFF : dma_len;
}
static irqreturn_t mac_scsi_esp_intr(int irq, void *dev_id)
{
int got_intr;
/*
* This is an edge triggered IRQ, so we have to be careful to
* avoid missing a transition when it is shared by two ESP devices.
*/
do {
got_intr = 0;
if (esp_chips[0] &&
(mac_esp_read8(esp_chips[0], ESP_STATUS) & ESP_STAT_INTR)) {
(void)scsi_esp_intr(irq, esp_chips[0]);
got_intr = 1;
}
if (esp_chips[1] &&
(mac_esp_read8(esp_chips[1], ESP_STATUS) & ESP_STAT_INTR)) {
(void)scsi_esp_intr(irq, esp_chips[1]);
got_intr = 1;
}
} while (got_intr);
return IRQ_HANDLED;
}
static struct esp_driver_ops mac_esp_ops = {
.esp_write8 = mac_esp_write8,
.esp_read8 = mac_esp_read8,
.map_single = mac_esp_map_single,
.map_sg = mac_esp_map_sg,
.unmap_single = mac_esp_unmap_single,
.unmap_sg = mac_esp_unmap_sg,
.irq_pending = mac_esp_irq_pending,
.dma_length_limit = mac_esp_dma_length_limit,
.reset_dma = mac_esp_reset_dma,
.dma_drain = mac_esp_dma_drain,
.dma_invalidate = mac_esp_dma_invalidate,
.send_dma_cmd = mac_esp_send_pdma_cmd,
.dma_error = mac_esp_dma_error,
};
static int __devinit esp_mac_probe(struct platform_device *dev)
{
struct scsi_host_template *tpnt = &scsi_esp_template;
struct Scsi_Host *host;
struct esp *esp;
int err;
struct mac_esp_priv *mep;
if (!MACH_IS_MAC)
return -ENODEV;
if (dev->id > 1)
return -ENODEV;
host = scsi_host_alloc(tpnt, sizeof(struct esp));
err = -ENOMEM;
if (!host)
goto fail;
host->max_id = 8;
host->use_clustering = DISABLE_CLUSTERING;
esp = shost_priv(host);
esp->host = host;
esp->dev = dev;
esp->command_block = kzalloc(16, GFP_KERNEL);
if (!esp->command_block)
goto fail_unlink;
esp->command_block_dma = (dma_addr_t)esp->command_block;
esp->scsi_id = 7;
host->this_id = esp->scsi_id;
esp->scsi_id_mask = 1 << esp->scsi_id;
mep = kzalloc(sizeof(struct mac_esp_priv), GFP_KERNEL);
if (!mep)
goto fail_free_command_block;
mep->esp = esp;
platform_set_drvdata(dev, mep);
switch (macintosh_config->scsi_type) {
case MAC_SCSI_QUADRA:
esp->cfreq = 16500000;
esp->regs = (void __iomem *)MAC_ESP_REGS_QUADRA;
mep->pdma_io = esp->regs + MAC_ESP_PDMA_IO_OFFSET;
mep->pdma_regs = NULL;
break;
case MAC_SCSI_QUADRA2:
esp->cfreq = 25000000;
esp->regs = (void __iomem *)(MAC_ESP_REGS_QUADRA2 +
dev->id * MAC_ESP_REGS_SPACING);
mep->pdma_io = esp->regs + MAC_ESP_PDMA_IO_OFFSET;
mep->pdma_regs = (void __iomem *)(MAC_ESP_PDMA_REG +
dev->id * MAC_ESP_PDMA_REG_SPACING);
nubus_writel(0x1d1, mep->pdma_regs);
break;
case MAC_SCSI_QUADRA3:
/* These quadras have a real DMA controller (the PSC) but we
* don't know how to drive it so we must use PIO instead.
*/
esp->cfreq = 25000000;
esp->regs = (void __iomem *)MAC_ESP_REGS_QUADRA3;
mep->pdma_io = NULL;
mep->pdma_regs = NULL;
break;
}
esp->ops = &mac_esp_ops;
if (mep->pdma_io == NULL) {
printk(KERN_INFO PFX "using PIO for controller %d\n", dev->id);
esp_write8(0, ESP_TCLOW);
esp_write8(0, ESP_TCMED);
esp->flags = ESP_FLAG_DISABLE_SYNC;
mac_esp_ops.send_dma_cmd = mac_esp_send_pio_cmd;
} else {
printk(KERN_INFO PFX "using PDMA for controller %d\n", dev->id);
}
host->irq = IRQ_MAC_SCSI;
esp_chips[dev->id] = esp;
mb();
if (esp_chips[!dev->id] == NULL) {
err = request_irq(host->irq, mac_scsi_esp_intr, 0, "ESP", NULL);
if (err < 0) {
esp_chips[dev->id] = NULL;
goto fail_free_priv;
}
}
err = scsi_esp_register(esp, &dev->dev);
if (err)
goto fail_free_irq;
return 0;
fail_free_irq:
if (esp_chips[!dev->id] == NULL)
free_irq(host->irq, esp);
fail_free_priv:
kfree(mep);
fail_free_command_block:
kfree(esp->command_block);
fail_unlink:
scsi_host_put(host);
fail:
return err;
}
static int __devexit esp_mac_remove(struct platform_device *dev)
{
struct mac_esp_priv *mep = platform_get_drvdata(dev);
struct esp *esp = mep->esp;
unsigned int irq = esp->host->irq;
scsi_esp_unregister(esp);
esp_chips[dev->id] = NULL;
if (!(esp_chips[0] || esp_chips[1]))
free_irq(irq, NULL);
kfree(mep);
kfree(esp->command_block);
scsi_host_put(esp->host);
return 0;
}
static struct platform_driver esp_mac_driver = {
.probe = esp_mac_probe,
.remove = __devexit_p(esp_mac_remove),
.driver = {
.name = DRV_MODULE_NAME,
.owner = THIS_MODULE,
},
};
static int __init mac_esp_init(void)
{
return platform_driver_register(&esp_mac_driver);
}
static void __exit mac_esp_exit(void)
{
platform_driver_unregister(&esp_mac_driver);
}
MODULE_DESCRIPTION("Mac ESP SCSI driver");
MODULE_AUTHOR("Finn Thain <fthain@telegraphics.com.au>");
MODULE_LICENSE("GPL v2");
MODULE_VERSION(DRV_VERSION);
MODULE_ALIAS("platform:" DRV_MODULE_NAME);
module_init(mac_esp_init);
module_exit(mac_esp_exit);
| gpl-2.0 |
schqiushui/kernel_kk443_sense_mec | fs/hfs/sysdep.c | 8116 | 1041 | /*
* linux/fs/hfs/sysdep.c
*
* Copyright (C) 1996 Paul H. Hargrove
* (C) 2003 Ardis Technologies <roman@ardistech.com>
* This file may be distributed under the terms of the GNU General Public License.
*
* This file contains the code to do various system dependent things.
*/
#include <linux/namei.h>
#include "hfs_fs.h"
/* dentry case-handling: just lowercase everything */
static int hfs_revalidate_dentry(struct dentry *dentry, struct nameidata *nd)
{
struct inode *inode;
int diff;
if (nd->flags & LOOKUP_RCU)
return -ECHILD;
inode = dentry->d_inode;
if(!inode)
return 1;
/* fix up inode on a timezone change */
diff = sys_tz.tz_minuteswest * 60 - HFS_I(inode)->tz_secondswest;
if (diff) {
inode->i_ctime.tv_sec += diff;
inode->i_atime.tv_sec += diff;
inode->i_mtime.tv_sec += diff;
HFS_I(inode)->tz_secondswest += diff;
}
return 1;
}
const struct dentry_operations hfs_dentry_operations =
{
.d_revalidate = hfs_revalidate_dentry,
.d_hash = hfs_hash_dentry,
.d_compare = hfs_compare_dentry,
};
| gpl-2.0 |
LiquidSmooth-Devices/Deathly_Kernel_D2 | drivers/staging/vt6656/power.c | 8116 | 9176 | /*
* Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
* All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*
*
* File: power.c
*
* Purpose: Handles 802.11 power management functions
*
* Author: Lyndon Chen
*
* Date: July 17, 2002
*
* Functions:
* PSvEnablePowerSaving - Enable Power Saving Mode
* PSvDiasblePowerSaving - Disable Power Saving Mode
* PSbConsiderPowerDown - Decide if we can Power Down
* PSvSendPSPOLL - Send PS-POLL packet
* PSbSendNullPacket - Send Null packet
* PSbIsNextTBTTWakeUp - Decide if we need to wake up at next Beacon
*
* Revision History:
*
*/
#include "ttype.h"
#include "mac.h"
#include "device.h"
#include "wmgr.h"
#include "power.h"
#include "wcmd.h"
#include "rxtx.h"
#include "card.h"
#include "control.h"
#include "rndis.h"
/*--------------------- Static Definitions -------------------------*/
/*--------------------- Static Classes ----------------------------*/
/*--------------------- Static Variables --------------------------*/
static int msglevel = MSG_LEVEL_INFO;
/*--------------------- Static Functions --------------------------*/
/*--------------------- Export Variables --------------------------*/
/*--------------------- Export Functions --------------------------*/
/*
*
* Routine Description:
* Enable hw power saving functions
*
* Return Value:
* None.
*
*/
void PSvEnablePowerSaving(void *hDeviceContext,
WORD wListenInterval)
{
PSDevice pDevice = (PSDevice)hDeviceContext;
PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
WORD wAID = pMgmt->wCurrAID | BIT14 | BIT15;
/* set period of power up before TBTT */
MACvWriteWord(pDevice, MAC_REG_PWBT, C_PWBT);
if (pDevice->eOPMode != OP_MODE_ADHOC) {
/* set AID */
MACvWriteWord(pDevice, MAC_REG_AIDATIM, wAID);
} else {
/* set ATIM Window */
/* MACvWriteATIMW(pDevice->PortOffset, pMgmt->wCurrATIMWindow); */
}
/* Warren:06-18-2004,the sequence must follow PSEN->AUTOSLEEP->GO2DOZE */
/* enable power saving hw function */
MACvRegBitsOn(pDevice, MAC_REG_PSCTL, PSCTL_PSEN);
/* Set AutoSleep */
MACvRegBitsOn(pDevice, MAC_REG_PSCFG, PSCFG_AUTOSLEEP);
/* Warren:MUST turn on this once before turn on AUTOSLEEP ,or the AUTOSLEEP doesn't work */
MACvRegBitsOn(pDevice, MAC_REG_PSCTL, PSCTL_GO2DOZE);
if (wListenInterval >= 2) {
/* clear always listen beacon */
MACvRegBitsOff(pDevice, MAC_REG_PSCTL, PSCTL_ALBCN);
/* first time set listen next beacon */
MACvRegBitsOn(pDevice, MAC_REG_PSCTL, PSCTL_LNBCN);
pMgmt->wCountToWakeUp = wListenInterval;
} else {
/* always listen beacon */
MACvRegBitsOn(pDevice, MAC_REG_PSCTL, PSCTL_ALBCN);
pMgmt->wCountToWakeUp = 0;
}
pDevice->bEnablePSMode = TRUE;
/* We don't send null pkt in ad hoc mode since beacon will handle this. */
if (pDevice->eOPMode == OP_MODE_INFRASTRUCTURE)
PSbSendNullPacket(pDevice);
pDevice->bPWBitOn = TRUE;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "PS:Power Saving Mode Enable...\n");
}
/*
*
* Routine Description:
* Disable hw power saving functions
*
* Return Value:
* None.
*
*/
void PSvDisablePowerSaving(void *hDeviceContext)
{
PSDevice pDevice = (PSDevice)hDeviceContext;
/* PSMgmtObject pMgmt = &(pDevice->sMgmtObj); */
/* disable power saving hw function */
CONTROLnsRequestOut(pDevice, MESSAGE_TYPE_DISABLE_PS, 0,
0, 0, NULL);
/* clear AutoSleep */
MACvRegBitsOff(pDevice, MAC_REG_PSCFG, PSCFG_AUTOSLEEP);
/* set always listen beacon */
MACvRegBitsOn(pDevice, MAC_REG_PSCTL, PSCTL_ALBCN);
pDevice->bEnablePSMode = FALSE;
if (pDevice->eOPMode == OP_MODE_INFRASTRUCTURE)
PSbSendNullPacket(pDevice);
pDevice->bPWBitOn = FALSE;
}
/*
*
* Routine Description:
* Consider to power down when no more packets to tx or rx.
*
* Return Value:
* TRUE, if power down success
* FALSE, if fail
*/
BOOL PSbConsiderPowerDown(void *hDeviceContext,
BOOL bCheckRxDMA,
BOOL bCheckCountToWakeUp)
{
PSDevice pDevice = (PSDevice)hDeviceContext;
PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
BYTE byData;
/* check if already in Doze mode */
ControlvReadByte(pDevice, MESSAGE_REQUEST_MACREG,
MAC_REG_PSCTL, &byData);
if ((byData & PSCTL_PS) != 0)
return TRUE;
if (pMgmt->eCurrMode != WMAC_MODE_IBSS_STA) {
/* check if in TIM wake period */
if (pMgmt->bInTIMWake)
return FALSE;
}
/* check scan state */
if (pDevice->bCmdRunning)
return FALSE;
/* Tx Burst */
if (pDevice->bPSModeTxBurst)
return FALSE;
/* Froce PSEN on */
MACvRegBitsOn(pDevice, MAC_REG_PSCTL, PSCTL_PSEN);
if (pMgmt->eCurrMode != WMAC_MODE_IBSS_STA) {
if (bCheckCountToWakeUp && (pMgmt->wCountToWakeUp == 0
|| pMgmt->wCountToWakeUp == 1)) {
return FALSE;
}
}
pDevice->bPSRxBeacon = TRUE;
/* no Tx, no Rx isr, now go to Doze */
MACvRegBitsOn(pDevice, MAC_REG_PSCTL, PSCTL_GO2DOZE);
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Go to Doze ZZZZZZZZZZZZZZZ\n");
return TRUE;
}
/*
*
* Routine Description:
* Send PS-POLL packet
*
* Return Value:
* None.
*
*/
void PSvSendPSPOLL(void *hDeviceContext)
{
PSDevice pDevice = (PSDevice)hDeviceContext;
PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
PSTxMgmtPacket pTxPacket = NULL;
memset(pMgmt->pbyPSPacketPool, 0, sizeof(STxMgmtPacket) + WLAN_HDR_ADDR2_LEN);
pTxPacket = (PSTxMgmtPacket)pMgmt->pbyPSPacketPool;
pTxPacket->p80211Header = (PUWLAN_80211HDR)((PBYTE)pTxPacket + sizeof(STxMgmtPacket));
pTxPacket->p80211Header->sA2.wFrameCtl = cpu_to_le16(
(
WLAN_SET_FC_FTYPE(WLAN_TYPE_CTL) |
WLAN_SET_FC_FSTYPE(WLAN_FSTYPE_PSPOLL) |
WLAN_SET_FC_PWRMGT(0)
));
pTxPacket->p80211Header->sA2.wDurationID = pMgmt->wCurrAID | BIT14 | BIT15;
memcpy(pTxPacket->p80211Header->sA2.abyAddr1, pMgmt->abyCurrBSSID, WLAN_ADDR_LEN);
memcpy(pTxPacket->p80211Header->sA2.abyAddr2, pMgmt->abyMACAddr, WLAN_ADDR_LEN);
pTxPacket->cbMPDULen = WLAN_HDR_ADDR2_LEN;
pTxPacket->cbPayloadLen = 0;
/* log failure if sending failed */
if (csMgmt_xmit(pDevice, pTxPacket) != CMD_STATUS_PENDING) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Send PS-Poll packet failed..\n");
}
}
/*
*
* Routine Description:
* Send NULL packet to AP for notification power state of STA
*
* Return Value:
* None.
*
*/
BOOL PSbSendNullPacket(void *hDeviceContext)
{
PSDevice pDevice = (PSDevice)hDeviceContext;
PSTxMgmtPacket pTxPacket = NULL;
PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
u16 flags = 0;
if (pDevice->bLinkPass == FALSE)
return FALSE;
if ((pDevice->bEnablePSMode == FALSE) &&
(pDevice->fTxDataInSleep == FALSE)) {
return FALSE;
}
memset(pMgmt->pbyPSPacketPool, 0, sizeof(STxMgmtPacket) + WLAN_NULLDATA_FR_MAXLEN);
pTxPacket = (PSTxMgmtPacket)pMgmt->pbyPSPacketPool;
pTxPacket->p80211Header = (PUWLAN_80211HDR)((PBYTE)pTxPacket + sizeof(STxMgmtPacket));
flags = WLAN_SET_FC_FTYPE(WLAN_TYPE_DATA) |
WLAN_SET_FC_FSTYPE(WLAN_FSTYPE_NULL);
if (pDevice->bEnablePSMode)
flags |= WLAN_SET_FC_PWRMGT(1);
else
flags |= WLAN_SET_FC_PWRMGT(0);
pTxPacket->p80211Header->sA3.wFrameCtl = cpu_to_le16(flags);
if (pMgmt->eCurrMode != WMAC_MODE_IBSS_STA)
pTxPacket->p80211Header->sA3.wFrameCtl |= cpu_to_le16((WORD)WLAN_SET_FC_TODS(1));
memcpy(pTxPacket->p80211Header->sA3.abyAddr1, pMgmt->abyCurrBSSID, WLAN_ADDR_LEN);
memcpy(pTxPacket->p80211Header->sA3.abyAddr2, pMgmt->abyMACAddr, WLAN_ADDR_LEN);
memcpy(pTxPacket->p80211Header->sA3.abyAddr3, pMgmt->abyCurrBSSID, WLAN_BSSID_LEN);
pTxPacket->cbMPDULen = WLAN_HDR_ADDR3_LEN;
pTxPacket->cbPayloadLen = 0;
/* log error if sending failed */
if (csMgmt_xmit(pDevice, pTxPacket) != CMD_STATUS_PENDING) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Send Null Packet failed !\n");
return FALSE;
}
return TRUE;
}
/*
*
* Routine Description:
* Check if Next TBTT must wake up
*
* Return Value:
* None.
*
*/
BOOL PSbIsNextTBTTWakeUp(void *hDeviceContext)
{
PSDevice pDevice = (PSDevice)hDeviceContext;
PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
BOOL bWakeUp = FALSE;
if (pMgmt->wListenInterval >= 2) {
if (pMgmt->wCountToWakeUp == 0)
pMgmt->wCountToWakeUp = pMgmt->wListenInterval;
pMgmt->wCountToWakeUp--;
if (pMgmt->wCountToWakeUp == 1) {
/* Turn on wake up to listen next beacon */
MACvRegBitsOn(pDevice, MAC_REG_PSCTL, PSCTL_LNBCN);
pDevice->bPSRxBeacon = FALSE;
bWakeUp = TRUE;
} else if (!pDevice->bPSRxBeacon) {
/* Listen until RxBeacon */
MACvRegBitsOn(pDevice, MAC_REG_PSCTL, PSCTL_LNBCN);
}
}
return bWakeUp;
}
| gpl-2.0 |
ubuntustudio-kernel/ubuntu-precise-lowlatency | tools/power/cpupower/utils/cpupower-info.c | 8372 | 3014 | /*
* (C) 2011 Thomas Renninger <trenn@suse.de>, Novell Inc.
*
* Licensed under the terms of the GNU GPL License version 2.
*/
#include <unistd.h>
#include <stdio.h>
#include <stdlib.h>
#include <errno.h>
#include <string.h>
#include <getopt.h>
#include <cpufreq.h>
#include "helpers/helpers.h"
#include "helpers/sysfs.h"
static struct option set_opts[] = {
{ .name = "perf-bias", .has_arg = optional_argument, .flag = NULL, .val = 'b'},
{ .name = "sched-mc", .has_arg = optional_argument, .flag = NULL, .val = 'm'},
{ .name = "sched-smt", .has_arg = optional_argument, .flag = NULL, .val = 's'},
{ },
};
static void print_wrong_arg_exit(void)
{
printf(_("invalid or unknown argument\n"));
exit(EXIT_FAILURE);
}
int cmd_info(int argc, char **argv)
{
extern char *optarg;
extern int optind, opterr, optopt;
unsigned int cpu;
union {
struct {
int sched_mc:1;
int sched_smt:1;
int perf_bias:1;
};
int params;
} params = {};
int ret = 0;
setlocale(LC_ALL, "");
textdomain(PACKAGE);
/* parameter parsing */
while ((ret = getopt_long(argc, argv, "msb", set_opts, NULL)) != -1) {
switch (ret) {
case 'b':
if (params.perf_bias)
print_wrong_arg_exit();
params.perf_bias = 1;
break;
case 'm':
if (params.sched_mc)
print_wrong_arg_exit();
params.sched_mc = 1;
break;
case 's':
if (params.sched_smt)
print_wrong_arg_exit();
params.sched_smt = 1;
break;
default:
print_wrong_arg_exit();
}
};
if (!params.params)
params.params = 0x7;
/* Default is: show output of CPU 0 only */
if (bitmask_isallclear(cpus_chosen))
bitmask_setbit(cpus_chosen, 0);
if (params.sched_mc) {
ret = sysfs_get_sched("mc");
printf(_("System's multi core scheduler setting: "));
if (ret < 0)
/* if sysfs file is missing it's: errno == ENOENT */
printf(_("not supported\n"));
else
printf("%d\n", ret);
}
if (params.sched_smt) {
ret = sysfs_get_sched("smt");
printf(_("System's thread sibling scheduler setting: "));
if (ret < 0)
/* if sysfs file is missing it's: errno == ENOENT */
printf(_("not supported\n"));
else
printf("%d\n", ret);
}
/* Add more per cpu options here */
if (!params.perf_bias)
return ret;
if (params.perf_bias) {
if (!run_as_root) {
params.perf_bias = 0;
printf(_("Intel's performance bias setting needs root privileges\n"));
} else if (!(cpupower_cpu_info.caps & CPUPOWER_CAP_PERF_BIAS)) {
printf(_("System does not support Intel's performance"
" bias setting\n"));
params.perf_bias = 0;
}
}
/* loop over CPUs */
for (cpu = bitmask_first(cpus_chosen);
cpu <= bitmask_last(cpus_chosen); cpu++) {
if (!bitmask_isbitset(cpus_chosen, cpu) ||
cpufreq_cpu_exists(cpu))
continue;
printf(_("analyzing CPU %d:\n"), cpu);
if (params.perf_bias) {
ret = msr_intel_get_perf_bias(cpu);
if (ret < 0) {
printf(_("Could not read perf-bias value\n"));
break;
} else
printf(_("perf-bias: %d\n"), ret);
}
}
return ret;
}
| gpl-2.0 |
davidmueller13/custom_kernel_lt03lte_aosp_6.0 | security/keys/encrypted-keys/masterkey_trusted.c | 9140 | 1363 | /*
* Copyright (C) 2010 IBM Corporation
* Copyright (C) 2010 Politecnico di Torino, Italy
* TORSEC group -- http://security.polito.it
*
* Authors:
* Mimi Zohar <zohar@us.ibm.com>
* Roberto Sassu <roberto.sassu@polito.it>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, version 2 of the License.
*
* See Documentation/security/keys-trusted-encrypted.txt
*/
#include <linux/uaccess.h>
#include <linux/module.h>
#include <linux/err.h>
#include <keys/trusted-type.h>
#include <keys/encrypted-type.h>
#include "encrypted.h"
/*
* request_trusted_key - request the trusted key
*
* Trusted keys are sealed to PCRs and other metadata. Although userspace
* manages both trusted/encrypted key-types, like the encrypted key type
* data, trusted key type data is not visible decrypted from userspace.
*/
struct key *request_trusted_key(const char *trusted_desc,
u8 **master_key, size_t *master_keylen)
{
struct trusted_key_payload *tpayload;
struct key *tkey;
tkey = request_key(&key_type_trusted, trusted_desc, NULL);
if (IS_ERR(tkey))
goto error;
down_read(&tkey->sem);
tpayload = tkey->payload.data;
*master_key = tpayload->key;
*master_keylen = tpayload->key_len;
error:
return tkey;
}
| gpl-2.0 |
krizky82/semc-kernel-msm7x30-ics | drivers/video/nvidia/nv_hw.c | 12468 | 51720 | /***************************************************************************\
|* *|
|* Copyright 1993-2003 NVIDIA, Corporation. All rights reserved. *|
|* *|
|* NOTICE TO USER: The source code is copyrighted under U.S. and *|
|* international laws. Users and possessors of this source code are *|
|* hereby granted a nonexclusive, royalty-free copyright license to *|
|* use this code in individual and commercial software. *|
|* *|
|* Any use of this source code must include, in the user documenta- *|
|* tion and internal comments to the code, notices to the end user *|
|* as follows: *|
|* *|
|* Copyright 1993-2003 NVIDIA, Corporation. All rights reserved. *|
|* *|
|* NVIDIA, CORPORATION MAKES NO REPRESENTATION ABOUT THE SUITABILITY *|
|* OF THIS SOURCE CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" *|
|* WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. NVIDIA, CORPOR- *|
|* ATION DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOURCE CODE, *|
|* INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, NONINFRINGE- *|
|* MENT, AND FITNESS FOR A PARTICULAR PURPOSE. IN NO EVENT SHALL *|
|* NVIDIA, CORPORATION BE LIABLE FOR ANY SPECIAL, INDIRECT, INCI- *|
|* DENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RE- *|
|* SULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION *|
|* OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF *|
|* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOURCE CODE. *|
|* *|
|* U.S. Government End Users. This source code is a "commercial *|
|* item," as that term is defined at 48 C.F.R. 2.101 (OCT 1995), *|
|* consisting of "commercial computer software" and "commercial *|
|* computer software documentation," as such terms are used in *|
|* 48 C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Govern- *|
|* ment only as a commercial end item. Consistent with 48 C.F.R. *|
|* 12.212 and 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), *|
|* all U.S. Government End Users acquire the source code with only *|
|* those rights set forth herein. *|
|* *|
\***************************************************************************/
/*
* GPL Licensing Note - According to Mark Vojkovich, author of the Xorg/
* XFree86 'nv' driver, this source code is provided under MIT-style licensing
* where the source code is provided "as is" without warranty of any kind.
* The only usage restriction is for the copyright notices to be retained
* whenever code is used.
*
* Antonino Daplas <adaplas@pol.net> 2005-03-11
*/
/* $XFree86: xc/programs/Xserver/hw/xfree86/drivers/nv/nv_hw.c,v 1.4 2003/11/03 05:11:25 tsi Exp $ */
#include <linux/pci.h>
#include "nv_type.h"
#include "nv_local.h"
#include "nv_proto.h"
void NVLockUnlock(struct nvidia_par *par, int Lock)
{
u8 cr11;
VGA_WR08(par->PCIO, 0x3D4, 0x1F);
VGA_WR08(par->PCIO, 0x3D5, Lock ? 0x99 : 0x57);
VGA_WR08(par->PCIO, 0x3D4, 0x11);
cr11 = VGA_RD08(par->PCIO, 0x3D5);
if (Lock)
cr11 |= 0x80;
else
cr11 &= ~0x80;
VGA_WR08(par->PCIO, 0x3D5, cr11);
}
int NVShowHideCursor(struct nvidia_par *par, int ShowHide)
{
int cur = par->CurrentState->cursor1;
par->CurrentState->cursor1 = (par->CurrentState->cursor1 & 0xFE) |
(ShowHide & 0x01);
VGA_WR08(par->PCIO, 0x3D4, 0x31);
VGA_WR08(par->PCIO, 0x3D5, par->CurrentState->cursor1);
if (par->Architecture == NV_ARCH_40)
NV_WR32(par->PRAMDAC, 0x0300, NV_RD32(par->PRAMDAC, 0x0300));
return (cur & 0x01);
}
/****************************************************************************\
* *
* The video arbitration routines calculate some "magic" numbers. Fixes *
* the snow seen when accessing the framebuffer without it. *
* It just works (I hope). *
* *
\****************************************************************************/
typedef struct {
int graphics_lwm;
int video_lwm;
int graphics_burst_size;
int video_burst_size;
int valid;
} nv4_fifo_info;
typedef struct {
int pclk_khz;
int mclk_khz;
int nvclk_khz;
char mem_page_miss;
char mem_latency;
int memory_width;
char enable_video;
char gr_during_vid;
char pix_bpp;
char mem_aligned;
char enable_mp;
} nv4_sim_state;
typedef struct {
int graphics_lwm;
int video_lwm;
int graphics_burst_size;
int video_burst_size;
int valid;
} nv10_fifo_info;
typedef struct {
int pclk_khz;
int mclk_khz;
int nvclk_khz;
char mem_page_miss;
char mem_latency;
u32 memory_type;
int memory_width;
char enable_video;
char gr_during_vid;
char pix_bpp;
char mem_aligned;
char enable_mp;
} nv10_sim_state;
static void nvGetClocks(struct nvidia_par *par, unsigned int *MClk,
unsigned int *NVClk)
{
unsigned int pll, N, M, MB, NB, P;
if (par->Architecture >= NV_ARCH_40) {
pll = NV_RD32(par->PMC, 0x4020);
P = (pll >> 16) & 0x07;
pll = NV_RD32(par->PMC, 0x4024);
M = pll & 0xFF;
N = (pll >> 8) & 0xFF;
if (((par->Chipset & 0xfff0) == 0x0290) ||
((par->Chipset & 0xfff0) == 0x0390)) {
MB = 1;
NB = 1;
} else {
MB = (pll >> 16) & 0xFF;
NB = (pll >> 24) & 0xFF;
}
*MClk = ((N * NB * par->CrystalFreqKHz) / (M * MB)) >> P;
pll = NV_RD32(par->PMC, 0x4000);
P = (pll >> 16) & 0x07;
pll = NV_RD32(par->PMC, 0x4004);
M = pll & 0xFF;
N = (pll >> 8) & 0xFF;
MB = (pll >> 16) & 0xFF;
NB = (pll >> 24) & 0xFF;
*NVClk = ((N * NB * par->CrystalFreqKHz) / (M * MB)) >> P;
} else if (par->twoStagePLL) {
pll = NV_RD32(par->PRAMDAC0, 0x0504);
M = pll & 0xFF;
N = (pll >> 8) & 0xFF;
P = (pll >> 16) & 0x0F;
pll = NV_RD32(par->PRAMDAC0, 0x0574);
if (pll & 0x80000000) {
MB = pll & 0xFF;
NB = (pll >> 8) & 0xFF;
} else {
MB = 1;
NB = 1;
}
*MClk = ((N * NB * par->CrystalFreqKHz) / (M * MB)) >> P;
pll = NV_RD32(par->PRAMDAC0, 0x0500);
M = pll & 0xFF;
N = (pll >> 8) & 0xFF;
P = (pll >> 16) & 0x0F;
pll = NV_RD32(par->PRAMDAC0, 0x0570);
if (pll & 0x80000000) {
MB = pll & 0xFF;
NB = (pll >> 8) & 0xFF;
} else {
MB = 1;
NB = 1;
}
*NVClk = ((N * NB * par->CrystalFreqKHz) / (M * MB)) >> P;
} else
if (((par->Chipset & 0x0ff0) == 0x0300) ||
((par->Chipset & 0x0ff0) == 0x0330)) {
pll = NV_RD32(par->PRAMDAC0, 0x0504);
M = pll & 0x0F;
N = (pll >> 8) & 0xFF;
P = (pll >> 16) & 0x07;
if (pll & 0x00000080) {
MB = (pll >> 4) & 0x07;
NB = (pll >> 19) & 0x1f;
} else {
MB = 1;
NB = 1;
}
*MClk = ((N * NB * par->CrystalFreqKHz) / (M * MB)) >> P;
pll = NV_RD32(par->PRAMDAC0, 0x0500);
M = pll & 0x0F;
N = (pll >> 8) & 0xFF;
P = (pll >> 16) & 0x07;
if (pll & 0x00000080) {
MB = (pll >> 4) & 0x07;
NB = (pll >> 19) & 0x1f;
} else {
MB = 1;
NB = 1;
}
*NVClk = ((N * NB * par->CrystalFreqKHz) / (M * MB)) >> P;
} else {
pll = NV_RD32(par->PRAMDAC0, 0x0504);
M = pll & 0xFF;
N = (pll >> 8) & 0xFF;
P = (pll >> 16) & 0x0F;
*MClk = (N * par->CrystalFreqKHz / M) >> P;
pll = NV_RD32(par->PRAMDAC0, 0x0500);
M = pll & 0xFF;
N = (pll >> 8) & 0xFF;
P = (pll >> 16) & 0x0F;
*NVClk = (N * par->CrystalFreqKHz / M) >> P;
}
}
static void nv4CalcArbitration(nv4_fifo_info * fifo, nv4_sim_state * arb)
{
int data, pagemiss, cas, width, video_enable, bpp;
int nvclks, mclks, pclks, vpagemiss, crtpagemiss, vbs;
int found, mclk_extra, mclk_loop, cbs, m1, p1;
int mclk_freq, pclk_freq, nvclk_freq, mp_enable;
int us_m, us_n, us_p, video_drain_rate, crtc_drain_rate;
int vpm_us, us_video, vlwm, video_fill_us, cpm_us, us_crt, clwm;
fifo->valid = 1;
pclk_freq = arb->pclk_khz;
mclk_freq = arb->mclk_khz;
nvclk_freq = arb->nvclk_khz;
pagemiss = arb->mem_page_miss;
cas = arb->mem_latency;
width = arb->memory_width >> 6;
video_enable = arb->enable_video;
bpp = arb->pix_bpp;
mp_enable = arb->enable_mp;
clwm = 0;
vlwm = 0;
cbs = 128;
pclks = 2;
nvclks = 2;
nvclks += 2;
nvclks += 1;
mclks = 5;
mclks += 3;
mclks += 1;
mclks += cas;
mclks += 1;
mclks += 1;
mclks += 1;
mclks += 1;
mclk_extra = 3;
nvclks += 2;
nvclks += 1;
nvclks += 1;
nvclks += 1;
if (mp_enable)
mclks += 4;
nvclks += 0;
pclks += 0;
found = 0;
vbs = 0;
while (found != 1) {
fifo->valid = 1;
found = 1;
mclk_loop = mclks + mclk_extra;
us_m = mclk_loop * 1000 * 1000 / mclk_freq;
us_n = nvclks * 1000 * 1000 / nvclk_freq;
us_p = nvclks * 1000 * 1000 / pclk_freq;
if (video_enable) {
video_drain_rate = pclk_freq * 2;
crtc_drain_rate = pclk_freq * bpp / 8;
vpagemiss = 2;
vpagemiss += 1;
crtpagemiss = 2;
vpm_us =
(vpagemiss * pagemiss) * 1000 * 1000 / mclk_freq;
if (nvclk_freq * 2 > mclk_freq * width)
video_fill_us =
cbs * 1000 * 1000 / 16 / nvclk_freq;
else
video_fill_us =
cbs * 1000 * 1000 / (8 * width) /
mclk_freq;
us_video = vpm_us + us_m + us_n + us_p + video_fill_us;
vlwm = us_video * video_drain_rate / (1000 * 1000);
vlwm++;
vbs = 128;
if (vlwm > 128)
vbs = 64;
if (vlwm > (256 - 64))
vbs = 32;
if (nvclk_freq * 2 > mclk_freq * width)
video_fill_us =
vbs * 1000 * 1000 / 16 / nvclk_freq;
else
video_fill_us =
vbs * 1000 * 1000 / (8 * width) /
mclk_freq;
cpm_us =
crtpagemiss * pagemiss * 1000 * 1000 / mclk_freq;
us_crt =
us_video + video_fill_us + cpm_us + us_m + us_n +
us_p;
clwm = us_crt * crtc_drain_rate / (1000 * 1000);
clwm++;
} else {
crtc_drain_rate = pclk_freq * bpp / 8;
crtpagemiss = 2;
crtpagemiss += 1;
cpm_us =
crtpagemiss * pagemiss * 1000 * 1000 / mclk_freq;
us_crt = cpm_us + us_m + us_n + us_p;
clwm = us_crt * crtc_drain_rate / (1000 * 1000);
clwm++;
}
m1 = clwm + cbs - 512;
p1 = m1 * pclk_freq / mclk_freq;
p1 = p1 * bpp / 8;
if ((p1 < m1) && (m1 > 0)) {
fifo->valid = 0;
found = 0;
if (mclk_extra == 0)
found = 1;
mclk_extra--;
} else if (video_enable) {
if ((clwm > 511) || (vlwm > 255)) {
fifo->valid = 0;
found = 0;
if (mclk_extra == 0)
found = 1;
mclk_extra--;
}
} else {
if (clwm > 519) {
fifo->valid = 0;
found = 0;
if (mclk_extra == 0)
found = 1;
mclk_extra--;
}
}
if (clwm < 384)
clwm = 384;
if (vlwm < 128)
vlwm = 128;
data = (int)(clwm);
fifo->graphics_lwm = data;
fifo->graphics_burst_size = 128;
data = (int)((vlwm + 15));
fifo->video_lwm = data;
fifo->video_burst_size = vbs;
}
}
static void nv4UpdateArbitrationSettings(unsigned VClk,
unsigned pixelDepth,
unsigned *burst,
unsigned *lwm, struct nvidia_par *par)
{
nv4_fifo_info fifo_data;
nv4_sim_state sim_data;
unsigned int MClk, NVClk, cfg1;
nvGetClocks(par, &MClk, &NVClk);
cfg1 = NV_RD32(par->PFB, 0x00000204);
sim_data.pix_bpp = (char)pixelDepth;
sim_data.enable_video = 0;
sim_data.enable_mp = 0;
sim_data.memory_width = (NV_RD32(par->PEXTDEV, 0x0000) & 0x10) ?
128 : 64;
sim_data.mem_latency = (char)cfg1 & 0x0F;
sim_data.mem_aligned = 1;
sim_data.mem_page_miss =
(char)(((cfg1 >> 4) & 0x0F) + ((cfg1 >> 31) & 0x01));
sim_data.gr_during_vid = 0;
sim_data.pclk_khz = VClk;
sim_data.mclk_khz = MClk;
sim_data.nvclk_khz = NVClk;
nv4CalcArbitration(&fifo_data, &sim_data);
if (fifo_data.valid) {
int b = fifo_data.graphics_burst_size >> 4;
*burst = 0;
while (b >>= 1)
(*burst)++;
*lwm = fifo_data.graphics_lwm >> 3;
}
}
static void nv10CalcArbitration(nv10_fifo_info * fifo, nv10_sim_state * arb)
{
int data, pagemiss, width, video_enable, bpp;
int nvclks, mclks, pclks, vpagemiss, crtpagemiss;
int nvclk_fill;
int found, mclk_extra, mclk_loop, cbs, m1;
int mclk_freq, pclk_freq, nvclk_freq, mp_enable;
int us_m, us_m_min, us_n, us_p, crtc_drain_rate;
int vus_m;
int vpm_us, us_video, cpm_us, us_crt, clwm;
int clwm_rnd_down;
int m2us, us_pipe_min, p1clk, p2;
int min_mclk_extra;
int us_min_mclk_extra;
fifo->valid = 1;
pclk_freq = arb->pclk_khz; /* freq in KHz */
mclk_freq = arb->mclk_khz;
nvclk_freq = arb->nvclk_khz;
pagemiss = arb->mem_page_miss;
width = arb->memory_width / 64;
video_enable = arb->enable_video;
bpp = arb->pix_bpp;
mp_enable = arb->enable_mp;
clwm = 0;
cbs = 512;
pclks = 4; /* lwm detect. */
nvclks = 3; /* lwm -> sync. */
nvclks += 2; /* fbi bus cycles (1 req + 1 busy) */
/* 2 edge sync. may be very close to edge so just put one. */
mclks = 1;
mclks += 1; /* arb_hp_req */
mclks += 5; /* ap_hp_req tiling pipeline */
mclks += 2; /* tc_req latency fifo */
mclks += 2; /* fb_cas_n_ memory request to fbio block */
mclks += 7; /* sm_d_rdv data returned from fbio block */
/* fb.rd.d.Put_gc need to accumulate 256 bits for read */
if (arb->memory_type == 0)
if (arb->memory_width == 64) /* 64 bit bus */
mclks += 4;
else
mclks += 2;
else if (arb->memory_width == 64) /* 64 bit bus */
mclks += 2;
else
mclks += 1;
if ((!video_enable) && (arb->memory_width == 128)) {
mclk_extra = (bpp == 32) ? 31 : 42; /* Margin of error */
min_mclk_extra = 17;
} else {
mclk_extra = (bpp == 32) ? 8 : 4; /* Margin of error */
/* mclk_extra = 4; *//* Margin of error */
min_mclk_extra = 18;
}
/* 2 edge sync. may be very close to edge so just put one. */
nvclks += 1;
nvclks += 1; /* fbi_d_rdv_n */
nvclks += 1; /* Fbi_d_rdata */
nvclks += 1; /* crtfifo load */
if (mp_enable)
mclks += 4; /* Mp can get in with a burst of 8. */
/* Extra clocks determined by heuristics */
nvclks += 0;
pclks += 0;
found = 0;
while (found != 1) {
fifo->valid = 1;
found = 1;
mclk_loop = mclks + mclk_extra;
/* Mclk latency in us */
us_m = mclk_loop * 1000 * 1000 / mclk_freq;
/* Minimum Mclk latency in us */
us_m_min = mclks * 1000 * 1000 / mclk_freq;
us_min_mclk_extra = min_mclk_extra * 1000 * 1000 / mclk_freq;
/* nvclk latency in us */
us_n = nvclks * 1000 * 1000 / nvclk_freq;
/* nvclk latency in us */
us_p = pclks * 1000 * 1000 / pclk_freq;
us_pipe_min = us_m_min + us_n + us_p;
/* Mclk latency in us */
vus_m = mclk_loop * 1000 * 1000 / mclk_freq;
if (video_enable) {
crtc_drain_rate = pclk_freq * bpp / 8; /* MB/s */
vpagemiss = 1; /* self generating page miss */
vpagemiss += 1; /* One higher priority before */
crtpagemiss = 2; /* self generating page miss */
if (mp_enable)
crtpagemiss += 1; /* if MA0 conflict */
vpm_us =
(vpagemiss * pagemiss) * 1000 * 1000 / mclk_freq;
/* Video has separate read return path */
us_video = vpm_us + vus_m;
cpm_us =
crtpagemiss * pagemiss * 1000 * 1000 / mclk_freq;
/* Wait for video */
us_crt = us_video
+ cpm_us /* CRT Page miss */
+ us_m + us_n + us_p /* other latency */
;
clwm = us_crt * crtc_drain_rate / (1000 * 1000);
/* fixed point <= float_point - 1. Fixes that */
clwm++;
} else {
/* bpp * pclk/8 */
crtc_drain_rate = pclk_freq * bpp / 8;
crtpagemiss = 1; /* self generating page miss */
crtpagemiss += 1; /* MA0 page miss */
if (mp_enable)
crtpagemiss += 1; /* if MA0 conflict */
cpm_us =
crtpagemiss * pagemiss * 1000 * 1000 / mclk_freq;
us_crt = cpm_us + us_m + us_n + us_p;
clwm = us_crt * crtc_drain_rate / (1000 * 1000);
/* fixed point <= float_point - 1. Fixes that */
clwm++;
/* Finally, a heuristic check when width == 64 bits */
if (width == 1) {
nvclk_fill = nvclk_freq * 8;
if (crtc_drain_rate * 100 >= nvclk_fill * 102)
/*Large number to fail */
clwm = 0xfff;
else if (crtc_drain_rate * 100 >=
nvclk_fill * 98) {
clwm = 1024;
cbs = 512;
}
}
}
/*
Overfill check:
*/
clwm_rnd_down = ((int)clwm / 8) * 8;
if (clwm_rnd_down < clwm)
clwm += 8;
m1 = clwm + cbs - 1024; /* Amount of overfill */
m2us = us_pipe_min + us_min_mclk_extra;
/* pclk cycles to drain */
p1clk = m2us * pclk_freq / (1000 * 1000);
p2 = p1clk * bpp / 8; /* bytes drained. */
if ((p2 < m1) && (m1 > 0)) {
fifo->valid = 0;
found = 0;
if (min_mclk_extra == 0) {
if (cbs <= 32) {
/* Can't adjust anymore! */
found = 1;
} else {
/* reduce the burst size */
cbs = cbs / 2;
}
} else {
min_mclk_extra--;
}
} else {
if (clwm > 1023) { /* Have some margin */
fifo->valid = 0;
found = 0;
if (min_mclk_extra == 0)
/* Can't adjust anymore! */
found = 1;
else
min_mclk_extra--;
}
}
if (clwm < (1024 - cbs + 8))
clwm = 1024 - cbs + 8;
data = (int)(clwm);
/* printf("CRT LWM: %f bytes, prog: 0x%x, bs: 256\n",
clwm, data ); */
fifo->graphics_lwm = data;
fifo->graphics_burst_size = cbs;
fifo->video_lwm = 1024;
fifo->video_burst_size = 512;
}
}
static void nv10UpdateArbitrationSettings(unsigned VClk,
unsigned pixelDepth,
unsigned *burst,
unsigned *lwm,
struct nvidia_par *par)
{
nv10_fifo_info fifo_data;
nv10_sim_state sim_data;
unsigned int MClk, NVClk, cfg1;
nvGetClocks(par, &MClk, &NVClk);
cfg1 = NV_RD32(par->PFB, 0x0204);
sim_data.pix_bpp = (char)pixelDepth;
sim_data.enable_video = 1;
sim_data.enable_mp = 0;
sim_data.memory_type = (NV_RD32(par->PFB, 0x0200) & 0x01) ? 1 : 0;
sim_data.memory_width = (NV_RD32(par->PEXTDEV, 0x0000) & 0x10) ?
128 : 64;
sim_data.mem_latency = (char)cfg1 & 0x0F;
sim_data.mem_aligned = 1;
sim_data.mem_page_miss =
(char)(((cfg1 >> 4) & 0x0F) + ((cfg1 >> 31) & 0x01));
sim_data.gr_during_vid = 0;
sim_data.pclk_khz = VClk;
sim_data.mclk_khz = MClk;
sim_data.nvclk_khz = NVClk;
nv10CalcArbitration(&fifo_data, &sim_data);
if (fifo_data.valid) {
int b = fifo_data.graphics_burst_size >> 4;
*burst = 0;
while (b >>= 1)
(*burst)++;
*lwm = fifo_data.graphics_lwm >> 3;
}
}
static void nv30UpdateArbitrationSettings (
struct nvidia_par *par,
unsigned int *burst,
unsigned int *lwm
)
{
unsigned int MClk, NVClk;
unsigned int fifo_size, burst_size, graphics_lwm;
fifo_size = 2048;
burst_size = 512;
graphics_lwm = fifo_size - burst_size;
nvGetClocks(par, &MClk, &NVClk);
*burst = 0;
burst_size >>= 5;
while(burst_size >>= 1) (*burst)++;
*lwm = graphics_lwm >> 3;
}
static void nForceUpdateArbitrationSettings(unsigned VClk,
unsigned pixelDepth,
unsigned *burst,
unsigned *lwm,
struct nvidia_par *par)
{
nv10_fifo_info fifo_data;
nv10_sim_state sim_data;
unsigned int M, N, P, pll, MClk, NVClk, memctrl;
struct pci_dev *dev;
if ((par->Chipset & 0x0FF0) == 0x01A0) {
unsigned int uMClkPostDiv;
dev = pci_get_bus_and_slot(0, 3);
pci_read_config_dword(dev, 0x6C, &uMClkPostDiv);
uMClkPostDiv = (uMClkPostDiv >> 8) & 0xf;
if (!uMClkPostDiv)
uMClkPostDiv = 4;
MClk = 400000 / uMClkPostDiv;
} else {
dev = pci_get_bus_and_slot(0, 5);
pci_read_config_dword(dev, 0x4c, &MClk);
MClk /= 1000;
}
pci_dev_put(dev);
pll = NV_RD32(par->PRAMDAC0, 0x0500);
M = (pll >> 0) & 0xFF;
N = (pll >> 8) & 0xFF;
P = (pll >> 16) & 0x0F;
NVClk = (N * par->CrystalFreqKHz / M) >> P;
sim_data.pix_bpp = (char)pixelDepth;
sim_data.enable_video = 0;
sim_data.enable_mp = 0;
dev = pci_get_bus_and_slot(0, 1);
pci_read_config_dword(dev, 0x7C, &sim_data.memory_type);
pci_dev_put(dev);
sim_data.memory_type = (sim_data.memory_type >> 12) & 1;
sim_data.memory_width = 64;
dev = pci_get_bus_and_slot(0, 3);
pci_read_config_dword(dev, 0, &memctrl);
pci_dev_put(dev);
memctrl >>= 16;
if ((memctrl == 0x1A9) || (memctrl == 0x1AB) || (memctrl == 0x1ED)) {
u32 dimm[3];
dev = pci_get_bus_and_slot(0, 2);
pci_read_config_dword(dev, 0x40, &dimm[0]);
dimm[0] = (dimm[0] >> 8) & 0x4f;
pci_read_config_dword(dev, 0x44, &dimm[1]);
dimm[1] = (dimm[1] >> 8) & 0x4f;
pci_read_config_dword(dev, 0x48, &dimm[2]);
dimm[2] = (dimm[2] >> 8) & 0x4f;
if ((dimm[0] + dimm[1]) != dimm[2]) {
printk("nvidiafb: your nForce DIMMs are not arranged "
"in optimal banks!\n");
}
pci_dev_put(dev);
}
sim_data.mem_latency = 3;
sim_data.mem_aligned = 1;
sim_data.mem_page_miss = 10;
sim_data.gr_during_vid = 0;
sim_data.pclk_khz = VClk;
sim_data.mclk_khz = MClk;
sim_data.nvclk_khz = NVClk;
nv10CalcArbitration(&fifo_data, &sim_data);
if (fifo_data.valid) {
int b = fifo_data.graphics_burst_size >> 4;
*burst = 0;
while (b >>= 1)
(*burst)++;
*lwm = fifo_data.graphics_lwm >> 3;
}
}
/****************************************************************************\
* *
* RIVA Mode State Routines *
* *
\****************************************************************************/
/*
* Calculate the Video Clock parameters for the PLL.
*/
static void CalcVClock(int clockIn,
int *clockOut, u32 * pllOut, struct nvidia_par *par)
{
unsigned lowM, highM;
unsigned DeltaNew, DeltaOld;
unsigned VClk, Freq;
unsigned M, N, P;
DeltaOld = 0xFFFFFFFF;
VClk = (unsigned)clockIn;
if (par->CrystalFreqKHz == 13500) {
lowM = 7;
highM = 13;
} else {
lowM = 8;
highM = 14;
}
for (P = 0; P <= 4; P++) {
Freq = VClk << P;
if ((Freq >= 128000) && (Freq <= 350000)) {
for (M = lowM; M <= highM; M++) {
N = ((VClk << P) * M) / par->CrystalFreqKHz;
if (N <= 255) {
Freq =
((par->CrystalFreqKHz * N) /
M) >> P;
if (Freq > VClk)
DeltaNew = Freq - VClk;
else
DeltaNew = VClk - Freq;
if (DeltaNew < DeltaOld) {
*pllOut =
(P << 16) | (N << 8) | M;
*clockOut = Freq;
DeltaOld = DeltaNew;
}
}
}
}
}
}
static void CalcVClock2Stage(int clockIn,
int *clockOut,
u32 * pllOut,
u32 * pllBOut, struct nvidia_par *par)
{
unsigned DeltaNew, DeltaOld;
unsigned VClk, Freq;
unsigned M, N, P;
DeltaOld = 0xFFFFFFFF;
*pllBOut = 0x80000401; /* fixed at x4 for now */
VClk = (unsigned)clockIn;
for (P = 0; P <= 6; P++) {
Freq = VClk << P;
if ((Freq >= 400000) && (Freq <= 1000000)) {
for (M = 1; M <= 13; M++) {
N = ((VClk << P) * M) /
(par->CrystalFreqKHz << 2);
if ((N >= 5) && (N <= 255)) {
Freq =
(((par->CrystalFreqKHz << 2) * N) /
M) >> P;
if (Freq > VClk)
DeltaNew = Freq - VClk;
else
DeltaNew = VClk - Freq;
if (DeltaNew < DeltaOld) {
*pllOut =
(P << 16) | (N << 8) | M;
*clockOut = Freq;
DeltaOld = DeltaNew;
}
}
}
}
}
}
/*
* Calculate extended mode parameters (SVGA) and save in a
* mode state structure.
*/
void NVCalcStateExt(struct nvidia_par *par,
RIVA_HW_STATE * state,
int bpp,
int width,
int hDisplaySize, int height, int dotClock, int flags)
{
int pixelDepth, VClk = 0;
/*
* Save mode parameters.
*/
state->bpp = bpp; /* this is not bitsPerPixel, it's 8,15,16,32 */
state->width = width;
state->height = height;
/*
* Extended RIVA registers.
*/
pixelDepth = (bpp + 1) / 8;
if (par->twoStagePLL)
CalcVClock2Stage(dotClock, &VClk, &state->pll, &state->pllB,
par);
else
CalcVClock(dotClock, &VClk, &state->pll, par);
switch (par->Architecture) {
case NV_ARCH_04:
nv4UpdateArbitrationSettings(VClk,
pixelDepth * 8,
&(state->arbitration0),
&(state->arbitration1), par);
state->cursor0 = 0x00;
state->cursor1 = 0xbC;
if (flags & FB_VMODE_DOUBLE)
state->cursor1 |= 2;
state->cursor2 = 0x00000000;
state->pllsel = 0x10000700;
state->config = 0x00001114;
state->general = bpp == 16 ? 0x00101100 : 0x00100100;
state->repaint1 = hDisplaySize < 1280 ? 0x04 : 0x00;
break;
case NV_ARCH_40:
if (!par->FlatPanel)
state->control = NV_RD32(par->PRAMDAC0, 0x0580) &
0xeffffeff;
/* fallthrough */
case NV_ARCH_10:
case NV_ARCH_20:
case NV_ARCH_30:
default:
if ((par->Chipset & 0xfff0) == 0x0240 ||
(par->Chipset & 0xfff0) == 0x03d0) {
state->arbitration0 = 256;
state->arbitration1 = 0x0480;
} else if (((par->Chipset & 0xffff) == 0x01A0) ||
((par->Chipset & 0xffff) == 0x01f0)) {
nForceUpdateArbitrationSettings(VClk,
pixelDepth * 8,
&(state->arbitration0),
&(state->arbitration1),
par);
} else if (par->Architecture < NV_ARCH_30) {
nv10UpdateArbitrationSettings(VClk,
pixelDepth * 8,
&(state->arbitration0),
&(state->arbitration1),
par);
} else {
nv30UpdateArbitrationSettings(par,
&(state->arbitration0),
&(state->arbitration1));
}
state->cursor0 = 0x80 | (par->CursorStart >> 17);
state->cursor1 = (par->CursorStart >> 11) << 2;
state->cursor2 = par->CursorStart >> 24;
if (flags & FB_VMODE_DOUBLE)
state->cursor1 |= 2;
state->pllsel = 0x10000700;
state->config = NV_RD32(par->PFB, 0x00000200);
state->general = bpp == 16 ? 0x00101100 : 0x00100100;
state->repaint1 = hDisplaySize < 1280 ? 0x04 : 0x00;
break;
}
if (bpp != 8) /* DirectColor */
state->general |= 0x00000030;
state->repaint0 = (((width / 8) * pixelDepth) & 0x700) >> 3;
state->pixel = (pixelDepth > 2) ? 3 : pixelDepth;
}
void NVLoadStateExt(struct nvidia_par *par, RIVA_HW_STATE * state)
{
int i, j;
NV_WR32(par->PMC, 0x0140, 0x00000000);
NV_WR32(par->PMC, 0x0200, 0xFFFF00FF);
NV_WR32(par->PMC, 0x0200, 0xFFFFFFFF);
NV_WR32(par->PTIMER, 0x0200 * 4, 0x00000008);
NV_WR32(par->PTIMER, 0x0210 * 4, 0x00000003);
NV_WR32(par->PTIMER, 0x0140 * 4, 0x00000000);
NV_WR32(par->PTIMER, 0x0100 * 4, 0xFFFFFFFF);
if (par->Architecture == NV_ARCH_04) {
if (state)
NV_WR32(par->PFB, 0x0200, state->config);
} else if ((par->Architecture < NV_ARCH_40) ||
(par->Chipset & 0xfff0) == 0x0040) {
for (i = 0; i < 8; i++) {
NV_WR32(par->PFB, 0x0240 + (i * 0x10), 0);
NV_WR32(par->PFB, 0x0244 + (i * 0x10),
par->FbMapSize - 1);
}
} else {
int regions = 12;
if (((par->Chipset & 0xfff0) == 0x0090) ||
((par->Chipset & 0xfff0) == 0x01D0) ||
((par->Chipset & 0xfff0) == 0x0290) ||
((par->Chipset & 0xfff0) == 0x0390) ||
((par->Chipset & 0xfff0) == 0x03D0))
regions = 15;
for(i = 0; i < regions; i++) {
NV_WR32(par->PFB, 0x0600 + (i * 0x10), 0);
NV_WR32(par->PFB, 0x0604 + (i * 0x10),
par->FbMapSize - 1);
}
}
if (par->Architecture >= NV_ARCH_40) {
NV_WR32(par->PRAMIN, 0x0000 * 4, 0x80000010);
NV_WR32(par->PRAMIN, 0x0001 * 4, 0x00101202);
NV_WR32(par->PRAMIN, 0x0002 * 4, 0x80000011);
NV_WR32(par->PRAMIN, 0x0003 * 4, 0x00101204);
NV_WR32(par->PRAMIN, 0x0004 * 4, 0x80000012);
NV_WR32(par->PRAMIN, 0x0005 * 4, 0x00101206);
NV_WR32(par->PRAMIN, 0x0006 * 4, 0x80000013);
NV_WR32(par->PRAMIN, 0x0007 * 4, 0x00101208);
NV_WR32(par->PRAMIN, 0x0008 * 4, 0x80000014);
NV_WR32(par->PRAMIN, 0x0009 * 4, 0x0010120A);
NV_WR32(par->PRAMIN, 0x000A * 4, 0x80000015);
NV_WR32(par->PRAMIN, 0x000B * 4, 0x0010120C);
NV_WR32(par->PRAMIN, 0x000C * 4, 0x80000016);
NV_WR32(par->PRAMIN, 0x000D * 4, 0x0010120E);
NV_WR32(par->PRAMIN, 0x000E * 4, 0x80000017);
NV_WR32(par->PRAMIN, 0x000F * 4, 0x00101210);
NV_WR32(par->PRAMIN, 0x0800 * 4, 0x00003000);
NV_WR32(par->PRAMIN, 0x0801 * 4, par->FbMapSize - 1);
NV_WR32(par->PRAMIN, 0x0802 * 4, 0x00000002);
NV_WR32(par->PRAMIN, 0x0808 * 4, 0x02080062);
NV_WR32(par->PRAMIN, 0x0809 * 4, 0x00000000);
NV_WR32(par->PRAMIN, 0x080A * 4, 0x00001200);
NV_WR32(par->PRAMIN, 0x080B * 4, 0x00001200);
NV_WR32(par->PRAMIN, 0x080C * 4, 0x00000000);
NV_WR32(par->PRAMIN, 0x080D * 4, 0x00000000);
NV_WR32(par->PRAMIN, 0x0810 * 4, 0x02080043);
NV_WR32(par->PRAMIN, 0x0811 * 4, 0x00000000);
NV_WR32(par->PRAMIN, 0x0812 * 4, 0x00000000);
NV_WR32(par->PRAMIN, 0x0813 * 4, 0x00000000);
NV_WR32(par->PRAMIN, 0x0814 * 4, 0x00000000);
NV_WR32(par->PRAMIN, 0x0815 * 4, 0x00000000);
NV_WR32(par->PRAMIN, 0x0818 * 4, 0x02080044);
NV_WR32(par->PRAMIN, 0x0819 * 4, 0x02000000);
NV_WR32(par->PRAMIN, 0x081A * 4, 0x00000000);
NV_WR32(par->PRAMIN, 0x081B * 4, 0x00000000);
NV_WR32(par->PRAMIN, 0x081C * 4, 0x00000000);
NV_WR32(par->PRAMIN, 0x081D * 4, 0x00000000);
NV_WR32(par->PRAMIN, 0x0820 * 4, 0x02080019);
NV_WR32(par->PRAMIN, 0x0821 * 4, 0x00000000);
NV_WR32(par->PRAMIN, 0x0822 * 4, 0x00000000);
NV_WR32(par->PRAMIN, 0x0823 * 4, 0x00000000);
NV_WR32(par->PRAMIN, 0x0824 * 4, 0x00000000);
NV_WR32(par->PRAMIN, 0x0825 * 4, 0x00000000);
NV_WR32(par->PRAMIN, 0x0828 * 4, 0x020A005C);
NV_WR32(par->PRAMIN, 0x0829 * 4, 0x00000000);
NV_WR32(par->PRAMIN, 0x082A * 4, 0x00000000);
NV_WR32(par->PRAMIN, 0x082B * 4, 0x00000000);
NV_WR32(par->PRAMIN, 0x082C * 4, 0x00000000);
NV_WR32(par->PRAMIN, 0x082D * 4, 0x00000000);
NV_WR32(par->PRAMIN, 0x0830 * 4, 0x0208009F);
NV_WR32(par->PRAMIN, 0x0831 * 4, 0x00000000);
NV_WR32(par->PRAMIN, 0x0832 * 4, 0x00001200);
NV_WR32(par->PRAMIN, 0x0833 * 4, 0x00001200);
NV_WR32(par->PRAMIN, 0x0834 * 4, 0x00000000);
NV_WR32(par->PRAMIN, 0x0835 * 4, 0x00000000);
NV_WR32(par->PRAMIN, 0x0838 * 4, 0x0208004A);
NV_WR32(par->PRAMIN, 0x0839 * 4, 0x02000000);
NV_WR32(par->PRAMIN, 0x083A * 4, 0x00000000);
NV_WR32(par->PRAMIN, 0x083B * 4, 0x00000000);
NV_WR32(par->PRAMIN, 0x083C * 4, 0x00000000);
NV_WR32(par->PRAMIN, 0x083D * 4, 0x00000000);
NV_WR32(par->PRAMIN, 0x0840 * 4, 0x02080077);
NV_WR32(par->PRAMIN, 0x0841 * 4, 0x00000000);
NV_WR32(par->PRAMIN, 0x0842 * 4, 0x00001200);
NV_WR32(par->PRAMIN, 0x0843 * 4, 0x00001200);
NV_WR32(par->PRAMIN, 0x0844 * 4, 0x00000000);
NV_WR32(par->PRAMIN, 0x0845 * 4, 0x00000000);
NV_WR32(par->PRAMIN, 0x084C * 4, 0x00003002);
NV_WR32(par->PRAMIN, 0x084D * 4, 0x00007FFF);
NV_WR32(par->PRAMIN, 0x084E * 4,
par->FbUsableSize | 0x00000002);
#ifdef __BIG_ENDIAN
NV_WR32(par->PRAMIN, 0x080A * 4,
NV_RD32(par->PRAMIN, 0x080A * 4) | 0x01000000);
NV_WR32(par->PRAMIN, 0x0812 * 4,
NV_RD32(par->PRAMIN, 0x0812 * 4) | 0x01000000);
NV_WR32(par->PRAMIN, 0x081A * 4,
NV_RD32(par->PRAMIN, 0x081A * 4) | 0x01000000);
NV_WR32(par->PRAMIN, 0x0822 * 4,
NV_RD32(par->PRAMIN, 0x0822 * 4) | 0x01000000);
NV_WR32(par->PRAMIN, 0x082A * 4,
NV_RD32(par->PRAMIN, 0x082A * 4) | 0x01000000);
NV_WR32(par->PRAMIN, 0x0832 * 4,
NV_RD32(par->PRAMIN, 0x0832 * 4) | 0x01000000);
NV_WR32(par->PRAMIN, 0x083A * 4,
NV_RD32(par->PRAMIN, 0x083A * 4) | 0x01000000);
NV_WR32(par->PRAMIN, 0x0842 * 4,
NV_RD32(par->PRAMIN, 0x0842 * 4) | 0x01000000);
NV_WR32(par->PRAMIN, 0x0819 * 4, 0x01000000);
NV_WR32(par->PRAMIN, 0x0839 * 4, 0x01000000);
#endif
} else {
NV_WR32(par->PRAMIN, 0x0000 * 4, 0x80000010);
NV_WR32(par->PRAMIN, 0x0001 * 4, 0x80011201);
NV_WR32(par->PRAMIN, 0x0002 * 4, 0x80000011);
NV_WR32(par->PRAMIN, 0x0003 * 4, 0x80011202);
NV_WR32(par->PRAMIN, 0x0004 * 4, 0x80000012);
NV_WR32(par->PRAMIN, 0x0005 * 4, 0x80011203);
NV_WR32(par->PRAMIN, 0x0006 * 4, 0x80000013);
NV_WR32(par->PRAMIN, 0x0007 * 4, 0x80011204);
NV_WR32(par->PRAMIN, 0x0008 * 4, 0x80000014);
NV_WR32(par->PRAMIN, 0x0009 * 4, 0x80011205);
NV_WR32(par->PRAMIN, 0x000A * 4, 0x80000015);
NV_WR32(par->PRAMIN, 0x000B * 4, 0x80011206);
NV_WR32(par->PRAMIN, 0x000C * 4, 0x80000016);
NV_WR32(par->PRAMIN, 0x000D * 4, 0x80011207);
NV_WR32(par->PRAMIN, 0x000E * 4, 0x80000017);
NV_WR32(par->PRAMIN, 0x000F * 4, 0x80011208);
NV_WR32(par->PRAMIN, 0x0800 * 4, 0x00003000);
NV_WR32(par->PRAMIN, 0x0801 * 4, par->FbMapSize - 1);
NV_WR32(par->PRAMIN, 0x0802 * 4, 0x00000002);
NV_WR32(par->PRAMIN, 0x0803 * 4, 0x00000002);
if (par->Architecture >= NV_ARCH_10)
NV_WR32(par->PRAMIN, 0x0804 * 4, 0x01008062);
else
NV_WR32(par->PRAMIN, 0x0804 * 4, 0x01008042);
NV_WR32(par->PRAMIN, 0x0805 * 4, 0x00000000);
NV_WR32(par->PRAMIN, 0x0806 * 4, 0x12001200);
NV_WR32(par->PRAMIN, 0x0807 * 4, 0x00000000);
NV_WR32(par->PRAMIN, 0x0808 * 4, 0x01008043);
NV_WR32(par->PRAMIN, 0x0809 * 4, 0x00000000);
NV_WR32(par->PRAMIN, 0x080A * 4, 0x00000000);
NV_WR32(par->PRAMIN, 0x080B * 4, 0x00000000);
NV_WR32(par->PRAMIN, 0x080C * 4, 0x01008044);
NV_WR32(par->PRAMIN, 0x080D * 4, 0x00000002);
NV_WR32(par->PRAMIN, 0x080E * 4, 0x00000000);
NV_WR32(par->PRAMIN, 0x080F * 4, 0x00000000);
NV_WR32(par->PRAMIN, 0x0810 * 4, 0x01008019);
NV_WR32(par->PRAMIN, 0x0811 * 4, 0x00000000);
NV_WR32(par->PRAMIN, 0x0812 * 4, 0x00000000);
NV_WR32(par->PRAMIN, 0x0813 * 4, 0x00000000);
NV_WR32(par->PRAMIN, 0x0814 * 4, 0x0100A05C);
NV_WR32(par->PRAMIN, 0x0815 * 4, 0x00000000);
NV_WR32(par->PRAMIN, 0x0816 * 4, 0x00000000);
NV_WR32(par->PRAMIN, 0x0817 * 4, 0x00000000);
if (par->WaitVSyncPossible)
NV_WR32(par->PRAMIN, 0x0818 * 4, 0x0100809F);
else
NV_WR32(par->PRAMIN, 0x0818 * 4, 0x0100805F);
NV_WR32(par->PRAMIN, 0x0819 * 4, 0x00000000);
NV_WR32(par->PRAMIN, 0x081A * 4, 0x12001200);
NV_WR32(par->PRAMIN, 0x081B * 4, 0x00000000);
NV_WR32(par->PRAMIN, 0x081C * 4, 0x0100804A);
NV_WR32(par->PRAMIN, 0x081D * 4, 0x00000002);
NV_WR32(par->PRAMIN, 0x081E * 4, 0x00000000);
NV_WR32(par->PRAMIN, 0x081F * 4, 0x00000000);
NV_WR32(par->PRAMIN, 0x0820 * 4, 0x01018077);
NV_WR32(par->PRAMIN, 0x0821 * 4, 0x00000000);
NV_WR32(par->PRAMIN, 0x0822 * 4, 0x12001200);
NV_WR32(par->PRAMIN, 0x0823 * 4, 0x00000000);
NV_WR32(par->PRAMIN, 0x0824 * 4, 0x00003002);
NV_WR32(par->PRAMIN, 0x0825 * 4, 0x00007FFF);
NV_WR32(par->PRAMIN, 0x0826 * 4,
par->FbUsableSize | 0x00000002);
NV_WR32(par->PRAMIN, 0x0827 * 4, 0x00000002);
#ifdef __BIG_ENDIAN
NV_WR32(par->PRAMIN, 0x0804 * 4,
NV_RD32(par->PRAMIN, 0x0804 * 4) | 0x00080000);
NV_WR32(par->PRAMIN, 0x0808 * 4,
NV_RD32(par->PRAMIN, 0x0808 * 4) | 0x00080000);
NV_WR32(par->PRAMIN, 0x080C * 4,
NV_RD32(par->PRAMIN, 0x080C * 4) | 0x00080000);
NV_WR32(par->PRAMIN, 0x0810 * 4,
NV_RD32(par->PRAMIN, 0x0810 * 4) | 0x00080000);
NV_WR32(par->PRAMIN, 0x0814 * 4,
NV_RD32(par->PRAMIN, 0x0814 * 4) | 0x00080000);
NV_WR32(par->PRAMIN, 0x0818 * 4,
NV_RD32(par->PRAMIN, 0x0818 * 4) | 0x00080000);
NV_WR32(par->PRAMIN, 0x081C * 4,
NV_RD32(par->PRAMIN, 0x081C * 4) | 0x00080000);
NV_WR32(par->PRAMIN, 0x0820 * 4,
NV_RD32(par->PRAMIN, 0x0820 * 4) | 0x00080000);
NV_WR32(par->PRAMIN, 0x080D * 4, 0x00000001);
NV_WR32(par->PRAMIN, 0x081D * 4, 0x00000001);
#endif
}
if (par->Architecture < NV_ARCH_10) {
if ((par->Chipset & 0x0fff) == 0x0020) {
NV_WR32(par->PRAMIN, 0x0824 * 4,
NV_RD32(par->PRAMIN, 0x0824 * 4) | 0x00020000);
NV_WR32(par->PRAMIN, 0x0826 * 4,
NV_RD32(par->PRAMIN,
0x0826 * 4) + par->FbAddress);
}
NV_WR32(par->PGRAPH, 0x0080, 0x000001FF);
NV_WR32(par->PGRAPH, 0x0080, 0x1230C000);
NV_WR32(par->PGRAPH, 0x0084, 0x72111101);
NV_WR32(par->PGRAPH, 0x0088, 0x11D5F071);
NV_WR32(par->PGRAPH, 0x008C, 0x0004FF31);
NV_WR32(par->PGRAPH, 0x008C, 0x4004FF31);
NV_WR32(par->PGRAPH, 0x0140, 0x00000000);
NV_WR32(par->PGRAPH, 0x0100, 0xFFFFFFFF);
NV_WR32(par->PGRAPH, 0x0170, 0x10010100);
NV_WR32(par->PGRAPH, 0x0710, 0xFFFFFFFF);
NV_WR32(par->PGRAPH, 0x0720, 0x00000001);
NV_WR32(par->PGRAPH, 0x0810, 0x00000000);
NV_WR32(par->PGRAPH, 0x0608, 0xFFFFFFFF);
} else {
NV_WR32(par->PGRAPH, 0x0080, 0xFFFFFFFF);
NV_WR32(par->PGRAPH, 0x0080, 0x00000000);
NV_WR32(par->PGRAPH, 0x0140, 0x00000000);
NV_WR32(par->PGRAPH, 0x0100, 0xFFFFFFFF);
NV_WR32(par->PGRAPH, 0x0144, 0x10010100);
NV_WR32(par->PGRAPH, 0x0714, 0xFFFFFFFF);
NV_WR32(par->PGRAPH, 0x0720, 0x00000001);
NV_WR32(par->PGRAPH, 0x0710,
NV_RD32(par->PGRAPH, 0x0710) & 0x0007ff00);
NV_WR32(par->PGRAPH, 0x0710,
NV_RD32(par->PGRAPH, 0x0710) | 0x00020100);
if (par->Architecture == NV_ARCH_10) {
NV_WR32(par->PGRAPH, 0x0084, 0x00118700);
NV_WR32(par->PGRAPH, 0x0088, 0x24E00810);
NV_WR32(par->PGRAPH, 0x008C, 0x55DE0030);
for (i = 0; i < 32; i++)
NV_WR32(&par->PGRAPH[(0x0B00 / 4) + i], 0,
NV_RD32(&par->PFB[(0x0240 / 4) + i],
0));
NV_WR32(par->PGRAPH, 0x640, 0);
NV_WR32(par->PGRAPH, 0x644, 0);
NV_WR32(par->PGRAPH, 0x684, par->FbMapSize - 1);
NV_WR32(par->PGRAPH, 0x688, par->FbMapSize - 1);
NV_WR32(par->PGRAPH, 0x0810, 0x00000000);
NV_WR32(par->PGRAPH, 0x0608, 0xFFFFFFFF);
} else {
if (par->Architecture >= NV_ARCH_40) {
NV_WR32(par->PGRAPH, 0x0084, 0x401287c0);
NV_WR32(par->PGRAPH, 0x008C, 0x60de8051);
NV_WR32(par->PGRAPH, 0x0090, 0x00008000);
NV_WR32(par->PGRAPH, 0x0610, 0x00be3c5f);
NV_WR32(par->PGRAPH, 0x0bc4,
NV_RD32(par->PGRAPH, 0x0bc4) |
0x00008000);
j = NV_RD32(par->REGS, 0x1540) & 0xff;
if (j) {
for (i = 0; !(j & 1); j >>= 1, i++);
NV_WR32(par->PGRAPH, 0x5000, i);
}
if ((par->Chipset & 0xfff0) == 0x0040) {
NV_WR32(par->PGRAPH, 0x09b0,
0x83280fff);
NV_WR32(par->PGRAPH, 0x09b4,
0x000000a0);
} else {
NV_WR32(par->PGRAPH, 0x0820,
0x83280eff);
NV_WR32(par->PGRAPH, 0x0824,
0x000000a0);
}
switch (par->Chipset & 0xfff0) {
case 0x0040:
case 0x0210:
NV_WR32(par->PGRAPH, 0x09b8,
0x0078e366);
NV_WR32(par->PGRAPH, 0x09bc,
0x0000014c);
NV_WR32(par->PFB, 0x033C,
NV_RD32(par->PFB, 0x33C) &
0xffff7fff);
break;
case 0x00C0:
case 0x0120:
NV_WR32(par->PGRAPH, 0x0828,
0x007596ff);
NV_WR32(par->PGRAPH, 0x082C,
0x00000108);
break;
case 0x0160:
case 0x01D0:
case 0x0240:
case 0x03D0:
NV_WR32(par->PMC, 0x1700,
NV_RD32(par->PFB, 0x020C));
NV_WR32(par->PMC, 0x1704, 0);
NV_WR32(par->PMC, 0x1708, 0);
NV_WR32(par->PMC, 0x170C,
NV_RD32(par->PFB, 0x020C));
NV_WR32(par->PGRAPH, 0x0860, 0);
NV_WR32(par->PGRAPH, 0x0864, 0);
NV_WR32(par->PRAMDAC, 0x0608,
NV_RD32(par->PRAMDAC,
0x0608) | 0x00100000);
break;
case 0x0140:
NV_WR32(par->PGRAPH, 0x0828,
0x0072cb77);
NV_WR32(par->PGRAPH, 0x082C,
0x00000108);
break;
case 0x0220:
NV_WR32(par->PGRAPH, 0x0860, 0);
NV_WR32(par->PGRAPH, 0x0864, 0);
NV_WR32(par->PRAMDAC, 0x0608,
NV_RD32(par->PRAMDAC, 0x0608) |
0x00100000);
break;
case 0x0090:
case 0x0290:
case 0x0390:
NV_WR32(par->PRAMDAC, 0x0608,
NV_RD32(par->PRAMDAC, 0x0608) |
0x00100000);
NV_WR32(par->PGRAPH, 0x0828,
0x07830610);
NV_WR32(par->PGRAPH, 0x082C,
0x0000016A);
break;
default:
break;
};
NV_WR32(par->PGRAPH, 0x0b38, 0x2ffff800);
NV_WR32(par->PGRAPH, 0x0b3c, 0x00006000);
NV_WR32(par->PGRAPH, 0x032C, 0x01000000);
NV_WR32(par->PGRAPH, 0x0220, 0x00001200);
} else if (par->Architecture == NV_ARCH_30) {
NV_WR32(par->PGRAPH, 0x0084, 0x40108700);
NV_WR32(par->PGRAPH, 0x0890, 0x00140000);
NV_WR32(par->PGRAPH, 0x008C, 0xf00e0431);
NV_WR32(par->PGRAPH, 0x0090, 0x00008000);
NV_WR32(par->PGRAPH, 0x0610, 0xf04b1f36);
NV_WR32(par->PGRAPH, 0x0B80, 0x1002d888);
NV_WR32(par->PGRAPH, 0x0B88, 0x62ff007f);
} else {
NV_WR32(par->PGRAPH, 0x0084, 0x00118700);
NV_WR32(par->PGRAPH, 0x008C, 0xF20E0431);
NV_WR32(par->PGRAPH, 0x0090, 0x00000000);
NV_WR32(par->PGRAPH, 0x009C, 0x00000040);
if ((par->Chipset & 0x0ff0) >= 0x0250) {
NV_WR32(par->PGRAPH, 0x0890,
0x00080000);
NV_WR32(par->PGRAPH, 0x0610,
0x304B1FB6);
NV_WR32(par->PGRAPH, 0x0B80,
0x18B82880);
NV_WR32(par->PGRAPH, 0x0B84,
0x44000000);
NV_WR32(par->PGRAPH, 0x0098,
0x40000080);
NV_WR32(par->PGRAPH, 0x0B88,
0x000000ff);
} else {
NV_WR32(par->PGRAPH, 0x0880,
0x00080000);
NV_WR32(par->PGRAPH, 0x0094,
0x00000005);
NV_WR32(par->PGRAPH, 0x0B80,
0x45CAA208);
NV_WR32(par->PGRAPH, 0x0B84,
0x24000000);
NV_WR32(par->PGRAPH, 0x0098,
0x00000040);
NV_WR32(par->PGRAPH, 0x0750,
0x00E00038);
NV_WR32(par->PGRAPH, 0x0754,
0x00000030);
NV_WR32(par->PGRAPH, 0x0750,
0x00E10038);
NV_WR32(par->PGRAPH, 0x0754,
0x00000030);
}
}
if ((par->Architecture < NV_ARCH_40) ||
((par->Chipset & 0xfff0) == 0x0040)) {
for (i = 0; i < 32; i++) {
NV_WR32(par->PGRAPH, 0x0900 + i*4,
NV_RD32(par->PFB, 0x0240 +i*4));
NV_WR32(par->PGRAPH, 0x6900 + i*4,
NV_RD32(par->PFB, 0x0240 +i*4));
}
} else {
if (((par->Chipset & 0xfff0) == 0x0090) ||
((par->Chipset & 0xfff0) == 0x01D0) ||
((par->Chipset & 0xfff0) == 0x0290) ||
((par->Chipset & 0xfff0) == 0x0390) ||
((par->Chipset & 0xfff0) == 0x03D0)) {
for (i = 0; i < 60; i++) {
NV_WR32(par->PGRAPH,
0x0D00 + i*4,
NV_RD32(par->PFB,
0x0600 + i*4));
NV_WR32(par->PGRAPH,
0x6900 + i*4,
NV_RD32(par->PFB,
0x0600 + i*4));
}
} else {
for (i = 0; i < 48; i++) {
NV_WR32(par->PGRAPH,
0x0900 + i*4,
NV_RD32(par->PFB,
0x0600 + i*4));
if(((par->Chipset & 0xfff0)
!= 0x0160) &&
((par->Chipset & 0xfff0)
!= 0x0220) &&
((par->Chipset & 0xfff0)
!= 0x240))
NV_WR32(par->PGRAPH,
0x6900 + i*4,
NV_RD32(par->PFB,
0x0600 + i*4));
}
}
}
if (par->Architecture >= NV_ARCH_40) {
if ((par->Chipset & 0xfff0) == 0x0040) {
NV_WR32(par->PGRAPH, 0x09A4,
NV_RD32(par->PFB, 0x0200));
NV_WR32(par->PGRAPH, 0x09A8,
NV_RD32(par->PFB, 0x0204));
NV_WR32(par->PGRAPH, 0x69A4,
NV_RD32(par->PFB, 0x0200));
NV_WR32(par->PGRAPH, 0x69A8,
NV_RD32(par->PFB, 0x0204));
NV_WR32(par->PGRAPH, 0x0820, 0);
NV_WR32(par->PGRAPH, 0x0824, 0);
NV_WR32(par->PGRAPH, 0x0864,
par->FbMapSize - 1);
NV_WR32(par->PGRAPH, 0x0868,
par->FbMapSize - 1);
} else {
if ((par->Chipset & 0xfff0) == 0x0090 ||
(par->Chipset & 0xfff0) == 0x01D0 ||
(par->Chipset & 0xfff0) == 0x0290 ||
(par->Chipset & 0xfff0) == 0x0390) {
NV_WR32(par->PGRAPH, 0x0DF0,
NV_RD32(par->PFB, 0x0200));
NV_WR32(par->PGRAPH, 0x0DF4,
NV_RD32(par->PFB, 0x0204));
} else {
NV_WR32(par->PGRAPH, 0x09F0,
NV_RD32(par->PFB, 0x0200));
NV_WR32(par->PGRAPH, 0x09F4,
NV_RD32(par->PFB, 0x0204));
}
NV_WR32(par->PGRAPH, 0x69F0,
NV_RD32(par->PFB, 0x0200));
NV_WR32(par->PGRAPH, 0x69F4,
NV_RD32(par->PFB, 0x0204));
NV_WR32(par->PGRAPH, 0x0840, 0);
NV_WR32(par->PGRAPH, 0x0844, 0);
NV_WR32(par->PGRAPH, 0x08a0,
par->FbMapSize - 1);
NV_WR32(par->PGRAPH, 0x08a4,
par->FbMapSize - 1);
}
} else {
NV_WR32(par->PGRAPH, 0x09A4,
NV_RD32(par->PFB, 0x0200));
NV_WR32(par->PGRAPH, 0x09A8,
NV_RD32(par->PFB, 0x0204));
NV_WR32(par->PGRAPH, 0x0750, 0x00EA0000);
NV_WR32(par->PGRAPH, 0x0754,
NV_RD32(par->PFB, 0x0200));
NV_WR32(par->PGRAPH, 0x0750, 0x00EA0004);
NV_WR32(par->PGRAPH, 0x0754,
NV_RD32(par->PFB, 0x0204));
NV_WR32(par->PGRAPH, 0x0820, 0);
NV_WR32(par->PGRAPH, 0x0824, 0);
NV_WR32(par->PGRAPH, 0x0864,
par->FbMapSize - 1);
NV_WR32(par->PGRAPH, 0x0868,
par->FbMapSize - 1);
}
NV_WR32(par->PGRAPH, 0x0B20, 0x00000000);
NV_WR32(par->PGRAPH, 0x0B04, 0xFFFFFFFF);
}
}
NV_WR32(par->PGRAPH, 0x053C, 0);
NV_WR32(par->PGRAPH, 0x0540, 0);
NV_WR32(par->PGRAPH, 0x0544, 0x00007FFF);
NV_WR32(par->PGRAPH, 0x0548, 0x00007FFF);
NV_WR32(par->PFIFO, 0x0140 * 4, 0x00000000);
NV_WR32(par->PFIFO, 0x0141 * 4, 0x00000001);
NV_WR32(par->PFIFO, 0x0480 * 4, 0x00000000);
NV_WR32(par->PFIFO, 0x0494 * 4, 0x00000000);
if (par->Architecture >= NV_ARCH_40)
NV_WR32(par->PFIFO, 0x0481 * 4, 0x00010000);
else
NV_WR32(par->PFIFO, 0x0481 * 4, 0x00000100);
NV_WR32(par->PFIFO, 0x0490 * 4, 0x00000000);
NV_WR32(par->PFIFO, 0x0491 * 4, 0x00000000);
if (par->Architecture >= NV_ARCH_40)
NV_WR32(par->PFIFO, 0x048B * 4, 0x00001213);
else
NV_WR32(par->PFIFO, 0x048B * 4, 0x00001209);
NV_WR32(par->PFIFO, 0x0400 * 4, 0x00000000);
NV_WR32(par->PFIFO, 0x0414 * 4, 0x00000000);
NV_WR32(par->PFIFO, 0x0084 * 4, 0x03000100);
NV_WR32(par->PFIFO, 0x0085 * 4, 0x00000110);
NV_WR32(par->PFIFO, 0x0086 * 4, 0x00000112);
NV_WR32(par->PFIFO, 0x0143 * 4, 0x0000FFFF);
NV_WR32(par->PFIFO, 0x0496 * 4, 0x0000FFFF);
NV_WR32(par->PFIFO, 0x0050 * 4, 0x00000000);
NV_WR32(par->PFIFO, 0x0040 * 4, 0xFFFFFFFF);
NV_WR32(par->PFIFO, 0x0415 * 4, 0x00000001);
NV_WR32(par->PFIFO, 0x048C * 4, 0x00000000);
NV_WR32(par->PFIFO, 0x04A0 * 4, 0x00000000);
#ifdef __BIG_ENDIAN
NV_WR32(par->PFIFO, 0x0489 * 4, 0x800F0078);
#else
NV_WR32(par->PFIFO, 0x0489 * 4, 0x000F0078);
#endif
NV_WR32(par->PFIFO, 0x0488 * 4, 0x00000001);
NV_WR32(par->PFIFO, 0x0480 * 4, 0x00000001);
NV_WR32(par->PFIFO, 0x0494 * 4, 0x00000001);
NV_WR32(par->PFIFO, 0x0495 * 4, 0x00000001);
NV_WR32(par->PFIFO, 0x0140 * 4, 0x00000001);
if (!state) {
par->CurrentState = NULL;
return;
}
if (par->Architecture >= NV_ARCH_10) {
if (par->twoHeads) {
NV_WR32(par->PCRTC0, 0x0860, state->head);
NV_WR32(par->PCRTC0, 0x2860, state->head2);
}
NV_WR32(par->PRAMDAC, 0x0404, NV_RD32(par->PRAMDAC, 0x0404) |
(1 << 25));
NV_WR32(par->PMC, 0x8704, 1);
NV_WR32(par->PMC, 0x8140, 0);
NV_WR32(par->PMC, 0x8920, 0);
NV_WR32(par->PMC, 0x8924, 0);
NV_WR32(par->PMC, 0x8908, par->FbMapSize - 1);
NV_WR32(par->PMC, 0x890C, par->FbMapSize - 1);
NV_WR32(par->PMC, 0x1588, 0);
NV_WR32(par->PCRTC, 0x0810, state->cursorConfig);
NV_WR32(par->PCRTC, 0x0830, state->displayV - 3);
NV_WR32(par->PCRTC, 0x0834, state->displayV - 1);
if (par->FlatPanel) {
if ((par->Chipset & 0x0ff0) == 0x0110) {
NV_WR32(par->PRAMDAC, 0x0528, state->dither);
} else if (par->twoHeads) {
NV_WR32(par->PRAMDAC, 0x083C, state->dither);
}
VGA_WR08(par->PCIO, 0x03D4, 0x53);
VGA_WR08(par->PCIO, 0x03D5, state->timingH);
VGA_WR08(par->PCIO, 0x03D4, 0x54);
VGA_WR08(par->PCIO, 0x03D5, state->timingV);
VGA_WR08(par->PCIO, 0x03D4, 0x21);
VGA_WR08(par->PCIO, 0x03D5, 0xfa);
}
VGA_WR08(par->PCIO, 0x03D4, 0x41);
VGA_WR08(par->PCIO, 0x03D5, state->extra);
}
VGA_WR08(par->PCIO, 0x03D4, 0x19);
VGA_WR08(par->PCIO, 0x03D5, state->repaint0);
VGA_WR08(par->PCIO, 0x03D4, 0x1A);
VGA_WR08(par->PCIO, 0x03D5, state->repaint1);
VGA_WR08(par->PCIO, 0x03D4, 0x25);
VGA_WR08(par->PCIO, 0x03D5, state->screen);
VGA_WR08(par->PCIO, 0x03D4, 0x28);
VGA_WR08(par->PCIO, 0x03D5, state->pixel);
VGA_WR08(par->PCIO, 0x03D4, 0x2D);
VGA_WR08(par->PCIO, 0x03D5, state->horiz);
VGA_WR08(par->PCIO, 0x03D4, 0x1C);
VGA_WR08(par->PCIO, 0x03D5, state->fifo);
VGA_WR08(par->PCIO, 0x03D4, 0x1B);
VGA_WR08(par->PCIO, 0x03D5, state->arbitration0);
VGA_WR08(par->PCIO, 0x03D4, 0x20);
VGA_WR08(par->PCIO, 0x03D5, state->arbitration1);
if(par->Architecture >= NV_ARCH_30) {
VGA_WR08(par->PCIO, 0x03D4, 0x47);
VGA_WR08(par->PCIO, 0x03D5, state->arbitration1 >> 8);
}
VGA_WR08(par->PCIO, 0x03D4, 0x30);
VGA_WR08(par->PCIO, 0x03D5, state->cursor0);
VGA_WR08(par->PCIO, 0x03D4, 0x31);
VGA_WR08(par->PCIO, 0x03D5, state->cursor1);
VGA_WR08(par->PCIO, 0x03D4, 0x2F);
VGA_WR08(par->PCIO, 0x03D5, state->cursor2);
VGA_WR08(par->PCIO, 0x03D4, 0x39);
VGA_WR08(par->PCIO, 0x03D5, state->interlace);
if (!par->FlatPanel) {
if (par->Architecture >= NV_ARCH_40)
NV_WR32(par->PRAMDAC0, 0x0580, state->control);
NV_WR32(par->PRAMDAC0, 0x050C, state->pllsel);
NV_WR32(par->PRAMDAC0, 0x0508, state->vpll);
if (par->twoHeads)
NV_WR32(par->PRAMDAC0, 0x0520, state->vpll2);
if (par->twoStagePLL) {
NV_WR32(par->PRAMDAC0, 0x0578, state->vpllB);
NV_WR32(par->PRAMDAC0, 0x057C, state->vpll2B);
}
} else {
NV_WR32(par->PRAMDAC, 0x0848, state->scale);
NV_WR32(par->PRAMDAC, 0x0828, state->crtcSync +
par->PanelTweak);
}
NV_WR32(par->PRAMDAC, 0x0600, state->general);
NV_WR32(par->PCRTC, 0x0140, 0);
NV_WR32(par->PCRTC, 0x0100, 1);
par->CurrentState = state;
}
void NVUnloadStateExt(struct nvidia_par *par, RIVA_HW_STATE * state) {
VGA_WR08(par->PCIO, 0x03D4, 0x19);
state->repaint0 = VGA_RD08(par->PCIO, 0x03D5);
VGA_WR08(par->PCIO, 0x03D4, 0x1A);
state->repaint1 = VGA_RD08(par->PCIO, 0x03D5);
VGA_WR08(par->PCIO, 0x03D4, 0x25);
state->screen = VGA_RD08(par->PCIO, 0x03D5);
VGA_WR08(par->PCIO, 0x03D4, 0x28);
state->pixel = VGA_RD08(par->PCIO, 0x03D5);
VGA_WR08(par->PCIO, 0x03D4, 0x2D);
state->horiz = VGA_RD08(par->PCIO, 0x03D5);
VGA_WR08(par->PCIO, 0x03D4, 0x1C);
state->fifo = VGA_RD08(par->PCIO, 0x03D5);
VGA_WR08(par->PCIO, 0x03D4, 0x1B);
state->arbitration0 = VGA_RD08(par->PCIO, 0x03D5);
VGA_WR08(par->PCIO, 0x03D4, 0x20);
state->arbitration1 = VGA_RD08(par->PCIO, 0x03D5);
if(par->Architecture >= NV_ARCH_30) {
VGA_WR08(par->PCIO, 0x03D4, 0x47);
state->arbitration1 |= (VGA_RD08(par->PCIO, 0x03D5) & 1) << 8;
}
VGA_WR08(par->PCIO, 0x03D4, 0x30);
state->cursor0 = VGA_RD08(par->PCIO, 0x03D5);
VGA_WR08(par->PCIO, 0x03D4, 0x31);
state->cursor1 = VGA_RD08(par->PCIO, 0x03D5);
VGA_WR08(par->PCIO, 0x03D4, 0x2F);
state->cursor2 = VGA_RD08(par->PCIO, 0x03D5);
VGA_WR08(par->PCIO, 0x03D4, 0x39);
state->interlace = VGA_RD08(par->PCIO, 0x03D5);
state->vpll = NV_RD32(par->PRAMDAC0, 0x0508);
if (par->twoHeads)
state->vpll2 = NV_RD32(par->PRAMDAC0, 0x0520);
if (par->twoStagePLL) {
state->vpllB = NV_RD32(par->PRAMDAC0, 0x0578);
state->vpll2B = NV_RD32(par->PRAMDAC0, 0x057C);
}
state->pllsel = NV_RD32(par->PRAMDAC0, 0x050C);
state->general = NV_RD32(par->PRAMDAC, 0x0600);
state->scale = NV_RD32(par->PRAMDAC, 0x0848);
state->config = NV_RD32(par->PFB, 0x0200);
if (par->Architecture >= NV_ARCH_40 && !par->FlatPanel)
state->control = NV_RD32(par->PRAMDAC0, 0x0580);
if (par->Architecture >= NV_ARCH_10) {
if (par->twoHeads) {
state->head = NV_RD32(par->PCRTC0, 0x0860);
state->head2 = NV_RD32(par->PCRTC0, 0x2860);
VGA_WR08(par->PCIO, 0x03D4, 0x44);
state->crtcOwner = VGA_RD08(par->PCIO, 0x03D5);
}
VGA_WR08(par->PCIO, 0x03D4, 0x41);
state->extra = VGA_RD08(par->PCIO, 0x03D5);
state->cursorConfig = NV_RD32(par->PCRTC, 0x0810);
if ((par->Chipset & 0x0ff0) == 0x0110) {
state->dither = NV_RD32(par->PRAMDAC, 0x0528);
} else if (par->twoHeads) {
state->dither = NV_RD32(par->PRAMDAC, 0x083C);
}
if (par->FlatPanel) {
VGA_WR08(par->PCIO, 0x03D4, 0x53);
state->timingH = VGA_RD08(par->PCIO, 0x03D5);
VGA_WR08(par->PCIO, 0x03D4, 0x54);
state->timingV = VGA_RD08(par->PCIO, 0x03D5);
}
}
}
void NVSetStartAddress(struct nvidia_par *par, u32 start)
{
NV_WR32(par->PCRTC, 0x800, start);
}
| gpl-2.0 |
mageec/mageec-gcc | gcc/testsuite/gfortran.dg/execute_command_line_1.f90 | 181 | 1531 | ! { dg-do compile }
!
! Check that we accept all variants of the EXECUTE_COMMAND_LINE intrinsic.
!
integer :: i, j
character(len=100) :: s
s = ""
call execute_command_line ("ls *.f90")
print *, "-----------------------------"
call execute_command_line ("sleep 1 ; ls *.f90", .false.)
print *, "I'm not waiting"
call sleep(2)
print *, "-----------------------------"
call execute_command_line ("sleep 1 ; ls *.f90", .true.)
print *, "I did wait"
call sleep(2)
print *, "-----------------------------"
call execute_command_line ("ls *.f90", .true., i)
print *, "Exist status was: ", i
print *, "-----------------------------"
call execute_command_line ("ls *.doesnotexist", .true., i)
print *, "Exist status was: ", i
print *, "-----------------------------"
call execute_command_line ("echo foo", .true., i, j)
print *, "Exist status was: ", i
print *, "Command status was: ", j
print *, "-----------------------------"
call execute_command_line ("echo foo", .true., i, j, s)
print *, "Exist status was: ", i
print *, "Command status was: ", j
print *, "Error message is: ", trim(s)
print *, "-----------------------------"
call execute_command_line ("ls *.doesnotexist", .true., i, j, s)
print *, "Exist status was: ", i
print *, "Command status was: ", j
print *, "Error message is: ", trim(s)
print *, "-----------------------------"
call execute_command_line ("sleep 20", .false.)
print *, "Please kill me with ^C"
call sleep (10)
end
| gpl-2.0 |
kito-cheng/gcc | gcc/testsuite/gfortran.dg/volatile3.f90 | 181 | 1123 | ! { dg-do compile }
! { dg-shouldfail "Invalid use of VOLATILE" }
! Test whether volatile statements and attributes are
! properly error checked.
! PR fortran/29601
program volatile_test
implicit none
real, external, volatile :: foo ! { dg-error "VOLATILE attribute conflicts with EXTERNAL attribute" }
real, intrinsic, volatile :: sin ! { dg-error "VOLATILE attribute conflicts with INTRINSIC attribute" }
real, parameter, volatile :: r = 5.5 ! { dg-error "PARAMETER attribute conflicts with VOLATILE attribute" }
real :: l,m
real,volatile :: n
real, volatile,volatile :: r = 3. ! { dg-error "Duplicate VOLATILE attribute" }
volatile :: l,n ! { dg-warning "Duplicate VOLATILE attribute" }
volatile ! { dg-error "Syntax error in VOLATILE statement" }
volatile :: volatile_test ! { dg-error "PROGRAM attribute conflicts with VOLATILE attribute" }
l = 4.0
m = 3.0
contains
subroutine foo(a) ! { dg-error "has no IMPLICIT type" } ! due to error below
integer, intent(in), volatile :: a ! { dg-error "VOLATILE attribute conflicts with INTENT\\(IN\\)" }
end subroutine
end program volatile_test
| gpl-2.0 |
davidmueller13/android_kernel_samsung_lt03lte | drivers/input/touchscreen/imagis/ist30xx_misc.c | 181 | 27851 | /*
* Copyright (C) 2010,Imagis Technology Co. Ltd. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <linux/i2c.h>
#include <linux/delay.h>
#include <linux/slab.h>
#include "ist30xx.h"
#include "ist30xx_update.h"
#include "ist30xx_misc.h"
#define TSP_CH_SCREEN (1)
#define TSP_CH_KEY (2)
#define TOUCH_NODE_PARSING_DEBUG (0)
extern struct ist30xx_data *ts_data;
TSP_INFO ist30xx_tsp_info;
TKEY_INFO ist30xx_tkey_info;
static u32 *ist30xx_frame_buf;
static u32 *ist30xx_frame_rawbuf;
static u32 *ist30xx_frame_fltbuf;
int ist30xx_tkey_update_info(void)
{
int ret = 0;
u32 tkey_info1, tkey_info2, tkey_info3;
TKEY_INFO *tkey = &ist30xx_tkey_info;
ret = ist30xx_read_cmd(ts_data->client, CMD_GET_KEY_INFO1, &tkey_info1);
if (ret) return ret;
ret = ist30xx_read_cmd(ts_data->client, CMD_GET_KEY_INFO2, &tkey_info2);
if (ret) return ret;
ret = ist30xx_read_cmd(ts_data->client, CMD_GET_KEY_INFO3, &tkey_info3);
if (ret) return ret;
tkey->enable = ((tkey_info1 & (0xFF << 24)) ? true : false);
tkey->key_num = (tkey_info1 >> 16) & 0xFF;
tkey->ch_num[0] = (tkey_info2 >> 24) & 0xFF;
tkey->ch_num[1] = (tkey_info2 >> 16) & 0xFF;
tkey->ch_num[2] = (tkey_info2 >> 8) & 0xFF;
tkey->ch_num[3] = tkey_info2 & 0xFF;
tkey->ch_num[4] = (tkey_info3 >> 24) & 0xFF;
return ret;
}
#define TSP_INFO_SWAP_XY (1 << 0)
#define TSP_INFO_FLIP_X (1 << 1)
#define TSP_INFO_FLIP_Y (1 << 2)
int ist30xx_tsp_update_info(void)
{
int ret = 0;
u32 tsp_ch_num, tsp_swap, tsp_dir;
TSP_INFO *tsp = &ist30xx_tsp_info;
ret = ist30xx_read_cmd(ts_data->client, CMD_GET_TSP_SWAP_INFO, &tsp_swap);
if (ret) return ret;
ret = ist30xx_read_cmd(ts_data->client, CMD_GET_TSP_DIRECTION, &tsp_dir);
if (ret) return ret;
ret = ist30xx_read_cmd(ts_data->client, CMD_GET_TSP_CHNUM1, &tsp_ch_num);
if (ret || !tsp_ch_num) return ret;
tsp->finger_num = IST30XX_MAX_MT_FINGERS;
tsp->ch_num.rx = tsp_ch_num >> 16;
tsp->ch_num.tx = tsp_ch_num & 0xFFFF;
tsp->dir.swap_xy = (tsp_swap & TSP_INFO_SWAP_XY ? true : false);
tsp->dir.flip_x = (tsp_swap & TSP_INFO_FLIP_X ? true : false);
tsp->dir.flip_y = (tsp_swap & TSP_INFO_FLIP_Y ? true : false);
tsp->node.len = tsp->ch_num.tx * tsp->ch_num.rx;
tsp->height = (tsp->dir.swap_xy ? tsp->ch_num.rx : tsp->ch_num.tx);
tsp->width = (tsp->dir.swap_xy ? tsp->ch_num.tx : tsp->ch_num.rx);
return ret;
}
int ist30xx_check_valid_ch(int ch_tx, int ch_rx)
{
TKEY_INFO *tkey = &ist30xx_tkey_info;
TSP_INFO *tsp = &ist30xx_tsp_info;
if ((ch_tx > tsp->ch_num.tx) || (ch_rx > tsp->ch_num.rx))
return 0;
if (tkey->enable) {
if (tkey->axis_rx) {
tsp_verb("tx: %d, rx: %d\n", ch_tx, ch_rx);
if (ch_rx == tsp->ch_num.rx - 1) {
tsp_verb("ch_tx: %d\n", ch_tx);
if ((ch_tx == tkey->ch_num[0]) || (ch_tx == tkey->ch_num[1]) ||
(ch_tx == tkey->ch_num[2]) || (ch_tx == tkey->ch_num[3]) ||
(ch_tx == tkey->ch_num[4]))
return TSP_CH_KEY;
else
return 0;
}
} else {
if (ch_tx == tsp->ch_num.tx - 1) {
if ((ch_rx == tkey->ch_num[0]) || (ch_rx == tkey->ch_num[1]) ||
(ch_rx == tkey->ch_num[2]) || (ch_rx == tkey->ch_num[3]) ||
(ch_rx == tkey->ch_num[4]))
return TSP_CH_KEY;
else
return 0;
}
}
}
return TSP_CH_SCREEN;
}
int ist30xx_parse_touch_node(u8 flag, struct TSP_NODE_BUF *node)
{
#if TOUCH_NODE_PARSING_DEBUG
int j;
TSP_INFO *tsp = &ist30xx_tsp_info;
#endif
int i;
u16 *raw = (u16 *)&node->raw;
u16 *base = (u16 *)&node->base;
u16 *filter = (u16 *)&node->filter;
for (i = 0; i < node->len; i++) {
if (flag & (NODE_FLAG_RAW | NODE_FLAG_BASE)) {
*raw++ = *ist30xx_frame_rawbuf & 0xFFF;
*base++ = (*ist30xx_frame_rawbuf >> 16) & 0xFFF;
ist30xx_frame_rawbuf++;
}
if (flag & NODE_FLAG_FILTER)
*filter++ = *ist30xx_frame_fltbuf++ & 0xFFF;
}
#if TOUCH_NODE_PARSING_DEBUG
tsp_info("RAW - %d * %d\n", tsp->ch_num.tx, tsp->ch_num.rx);
for (i = 0; i < tsp->ch_num.tx; i++) {
printk("\n[ TSP ] ");
for (j = 0; j < tsp->ch_num.rx; j++)
printk("%4d ", node->raw[i][j]);
}
tsp_info("BASE - %d * %d\n", tsp->ch_num.tx, tsp->ch_num.rx);
for (i = 0; i < tsp->ch_num.tx; i++) {
printk("\n[ TSP ] ");
for (j = 0; j < tsp->ch_num.rx; j++)
printk("%4d ", node->base[i][j]);
}
tsp_info("FILTER - %d * %d\n", tsp->ch_num.tx, tsp->ch_num.rx);
for (i = 0; i < tsp->ch_num.tx; i++) {
printk("\n[ TSP ] ");
for (j = 0; j < tsp->ch_num.rx; j++)
printk("%4d ", node->filter[i][j]);
}
#endif
return 0;
}
int print_touch_node(u8 flag, struct TSP_NODE_BUF *node, char *buf, bool ch_tsp)
{
int i, j;
int count = 0;
int val = 0;
const int msg_len = 128;
char msg[msg_len];
TSP_INFO *tsp = &ist30xx_tsp_info;
if (tsp->dir.swap_xy) {
} else {
for (i = 0; i < tsp->ch_num.tx; i++) {
for (j = 0; j < tsp->ch_num.rx; j++) {
if (ch_tsp && (ist30xx_check_valid_ch(i, j) != TSP_CH_SCREEN))
continue;
if (flag == NODE_FLAG_RAW)
val = (int)node->raw[i][j];
else if (flag == NODE_FLAG_BASE)
val = (int)node->base[i][j];
else if (flag == NODE_FLAG_FILTER)
val = (int)node->filter[i][j];
else if (flag == NODE_FLAG_DIFF)
val = (int)(node->raw[i][j] - node->base[i][j]);
else
return 0;
if (val < 0) val = 0;
count += snprintf(msg, msg_len, "%4d ", val);
strncat(buf, msg, msg_len);
}
count += snprintf(msg, msg_len, "\n");
strncat(buf, msg, msg_len);
}
}
return count;
}
int parse_tsp_node(u8 flag, struct TSP_NODE_BUF *node, s16 *buf16)
{
int i, j;
s16 val = 0;
TSP_INFO *tsp = &ist30xx_tsp_info;
if ((flag != NODE_FLAG_RAW) && (flag != NODE_FLAG_BASE) &&
(flag != NODE_FLAG_FILTER) && (flag != NODE_FLAG_DIFF))
return -EPERM;
if (tsp->dir.swap_xy) {
} else {
for (i = 0; i < tsp->ch_num.tx; i++) {
for (j = 0; j < tsp->ch_num.rx; j++) {
if (ist30xx_check_valid_ch(i, j) != TSP_CH_SCREEN)
continue;
switch ((int)flag) {
case NODE_FLAG_RAW:
val = (s16)node->raw[i][j];
break;
case NODE_FLAG_BASE:
val = (s16)node->base[i][j];
break;
case NODE_FLAG_FILTER:
val = (s16)node->filter[i][j];
break;
case NODE_FLAG_DIFF:
val = (s16)(node->raw[i][j] - node->base[i][j]);
break;
}
if (val < 0) val = 0;
*buf16++ = val;
}
}
}
return 0;
}
int ist30xx_read_touch_node(u8 flag, struct TSP_NODE_BUF *node)
{
int ret = 0;
ist30xx_disable_irq(ts_data);
ret = ist30xx_cmd_reg(ts_data->client, CMD_ENTER_REG_ACCESS);
if (ret) goto read_tsp_node_end;
ret = ist30xx_write_cmd(ts_data->client, IST30XX_RX_CNT_ADDR, node->len);
if (ret) goto read_tsp_node_end;
if (flag & (NODE_FLAG_RAW | NODE_FLAG_BASE)) {
tsp_debug("Reg addr: %x, size: %d\n", IST30XXB_RAW_ADDR, node->len);
ret = ist30xx_read_buf(ts_data->client, IST30XXB_RAW_ADDR,
ist30xx_frame_rawbuf, node->len);
if (ret) goto read_tsp_node_end;
}
if (flag & NODE_FLAG_FILTER) {
tsp_debug("Reg addr: %x, size: %d\n", IST30XXB_FILTER_ADDR, node->len);
ret = ist30xx_read_buf(ts_data->client, IST30XXB_FILTER_ADDR,
ist30xx_frame_fltbuf, node->len);
if (ret) goto read_tsp_node_end;
}
ret = ist30xx_cmd_reg(ts_data->client, CMD_EXIT_REG_ACCESS);
if (ret) goto read_tsp_node_end;
ret = ist30xx_cmd_start_scan(ts_data->client);
if (ret) goto read_tsp_node_end;
read_tsp_node_end:
ist30xx_enable_irq(ts_data);
return ret;
}
/* sysfs: /sys/class/touch/node/refresh */
ssize_t ist30xx_frame_refresh(struct device *dev, struct device_attribute *attr,
char *buf)
{
int ret = 0;
TSP_INFO *tsp = &ist30xx_tsp_info;
u8 flag = NODE_FLAG_RAW | NODE_FLAG_BASE | NODE_FLAG_FILTER;
ret = ist30xx_read_touch_node(flag, &tsp->node);
if (ret)
ret = sprintf(buf, "cmd 1frame raw update fail\n");
ret = ist30xx_parse_touch_node(flag, &tsp->node);
return ret;
}
/* sysfs: /sys/class/touch/node/base */
ssize_t ist30xx_base_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
int count = 0;
TSP_INFO *tsp = &ist30xx_tsp_info;
buf[0] = '\0';
count = sprintf(buf, "dump ist30xxb baseline(%d)\n", tsp->node.len);
count += print_touch_node(NODE_FLAG_BASE, &tsp->node, buf, false);
return count;
}
/* sysfs: /sys/class/touch/node/raw */
ssize_t ist30xx_raw_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
int count = 0;
TSP_INFO *tsp = &ist30xx_tsp_info;
buf[0] = '\0';
count = sprintf(buf, "dump ist30xxb raw(%d)\n", tsp->node.len);
count += print_touch_node(NODE_FLAG_RAW, &tsp->node, buf, false);
return count;
}
/* sysfs: /sys/class/touch/node/diff */
ssize_t ist30xx_diff_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
int count = 0;
TSP_INFO *tsp = &ist30xx_tsp_info;
buf[0] = '\0';
count = sprintf(buf, "dump ist30xxb difference (%d)\n", tsp->node.len);
count += print_touch_node(NODE_FLAG_DIFF, &tsp->node, buf, false);
return count;
}
/* sysfs: /sys/class/touch/node/filter */
ssize_t ist30xx_filter_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
int count = 0;
TSP_INFO *tsp = &ist30xx_tsp_info;
buf[0] = '\0';
count = sprintf(buf, "dump ist30xxb filter (%d)\n", tsp->node.len);
count += print_touch_node(NODE_FLAG_FILTER, &tsp->node, buf, false);
return count;
}
extern int calib_ms_delay;
/* sysfs: /sys/class/touch/sys/clb */
ssize_t ist30xx_calib_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t size)
{
int ret = -1;
int ms_delay;
sscanf(buf, "%d", &ms_delay);
if (ms_delay > 10 && ms_delay < 1000) // 1sec ~ 100sec
calib_ms_delay = ms_delay;
tsp_info("Calibration wait time %dsec\n", calib_ms_delay / 10);
ist30xx_disable_irq(ts_data);
ret = ist30xx_cmd_run_device(ts_data->client, true);
if (ret) {
ist30xx_enable_irq(ts_data);
return size;
}
ist30xx_calibrate(1);
ist30xx_start(ts_data);
return size;
}
ssize_t ist30xx_calib_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
int ret;
int count = 0;
u32 value;
mutex_lock(&ist30xx_mutex);
ist30xx_disable_irq(ts_data);
ret = ist30xx_cmd_run_device(ts_data->client, true);
if (ret)
goto calib_show_end;
ret = ist30xx_read_cmd(ts_data->client, CMD_GET_CALIB_RESULT, &value);
if (ret) {
count = sprintf(buf, "Error Read Calibration Result\n");
goto calib_show_end;
}
count = sprintf(buf,
"Calibration Status : %d, Max raw gap : %d - (raw: %08x)\n",
CALIB_TO_STATUS(value), CALIB_TO_GAP(value), value);
calib_show_end:
ist30xx_start(ts_data);
ist30xx_enable_irq(ts_data);
mutex_unlock(&ist30xx_mutex);
return count;
}
/* sysfs: /sys/class/touch/sys/power */
ssize_t ist30xx_power_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t size)
{
u32 power_en;
sscanf(buf, "%d", &power_en);
tsp_info("Power enable: %d\n", power_en);
if (power_en > 1) {
tsp_err("Unknown argument value, %d\n", power_en);
return size;
}
if (power_en) {
mutex_lock(&ist30xx_mutex);
ist30xx_internal_resume(ts_data);
ist30xx_enable_irq(ts_data);
mutex_unlock(&ist30xx_mutex);
ist30xx_start(ts_data);
} else {
mutex_lock(&ist30xx_mutex);
ist30xx_disable_irq(ts_data);
ist30xx_internal_suspend(ts_data);
mutex_unlock(&ist30xx_mutex);
}
return size;
}
extern int ist30xx_max_error_cnt;
/* sysfs: /sys/class/touch/sys/errcnt */
ssize_t ist30xx_errcnt_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t size)
{
int err_cnt;
sscanf(buf, "%d", &err_cnt);
if (err_cnt < 0) return size;
tsp_info("Request reset error count: %d\n", err_cnt);
ist30xx_max_error_cnt = err_cnt;
return size;
}
#if IST30XX_EVENT_MODE
extern int ist30xx_max_scan_retry;
/* sysfs: /sys/class/touch/sys/scancnt */
ssize_t ist30xx_scancnt_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t size)
{
int retry;
sscanf(buf, "%d", &retry);
if (retry < 0) return size;
tsp_info("Timer scan count retry: %d\n", retry);
ist30xx_max_scan_retry = retry;
return size;
}
extern int timer_period_ms;
/* sysfs: /sys/class/touch/sys/timerms */
ssize_t ist30xx_timerms_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t size)
{
int ms;
sscanf(buf, "%d", &ms);
if ((ms < 0) || (ms > 10000)) return size;
tsp_info("Timer period ms: %dms\n", ms);
timer_period_ms = ms;
return size;
}
#endif
extern int ist30xx_dbg_level;
/* sysfs: /sys/class/touch/sys/printk */
ssize_t ist30xx_printk_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t size)
{
int level;
sscanf(buf, "%d", &level);
if ((level < DEV_ERR) || (level > DEV_VERB)) return size;
tsp_info("prink log level: %d\n", level);
ist30xx_dbg_level = level;
return size;
}
ssize_t ist30xx_printk_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
return sprintf(buf, "prink log level: %d\n", ist30xx_dbg_level);
}
/* sysfs: /sys/class/touch/sys/dummy */
ssize_t ist30xx_dummy_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
int ret;
int count = 0;
u32 value;
ist30xx_disable_irq(ts_data);
ret = ist30xx_read_cmd(ts_data->client, CMD_GET_COORD, &value);
if (ret)
count = sprintf(buf, "Error Read Calibration Result\n");
ist30xx_enable_irq(ts_data);
count = sprintf(buf,
"Calibration Status : %d, Max raw gap : %d - (raw: %08x)\n",
CALIB_TO_STATUS(value), CALIB_TO_GAP(value), value);
return count;
}
#define TUNES_CMD_WRITE (1)
#define TUNES_CMD_READ (2)
#define TUNES_CMD_REG_ENTER (3)
#define TUNES_CMD_REG_EXIT (4)
#define TUNES_CMD_UPDATE_PARAM (5)
#define TUNES_CMD_UPDATE_FW (6)
#define DIRECT_ADDR(k) (IST30XXB_DA_ADDR(k))
#define DIRECT_CMD_WRITE ('w')
#define DIRECT_CMD_READ ('r')
#pragma pack(1)
typedef struct {
u8 cmd;
u32 addr;
u16 len;
} TUNES_INFO;
#pragma pack()
#pragma pack(1)
typedef struct {
char cmd;
u32 addr;
u32 val;
} DIRECT_INFO;
#pragma pack()
static TUNES_INFO ist30xx_tunes;
static DIRECT_INFO ist30xx_direct;
static bool tunes_cmd_done = false;
static bool ist30xx_reg_mode = false;
/* sysfs: /sys/class/touch/sys/direct */
ssize_t ist30xxb_direct_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t size)
{
int ret = -EPERM;
DIRECT_INFO *direct = (DIRECT_INFO *)&ist30xx_direct;
sscanf(buf, "%c %x %x", &direct->cmd, &direct->addr, &direct->val);
tsp_debug("Direct cmd: %c, addr: %x, val: %x\n",
direct->cmd, direct->addr, direct->val);
if ((direct->cmd != DIRECT_CMD_WRITE) && (direct->cmd != DIRECT_CMD_READ)) {
tsp_warn("Direct cmd is not correct!\n");
return size;
}
if (direct->cmd == DIRECT_CMD_WRITE) {
ret = ist30xx_write_cmd(ts_data->client, DIRECT_ADDR(direct->addr),
direct->val);
ret = ist30xx_read_cmd(ts_data->client, DIRECT_ADDR(direct->addr),
&direct->val);
tsp_debug("Direct write addr: %x, val: %x\n",
direct->addr, direct->val);
}
return size;
}
#define DIRECT_BUF_COUNT (4)
ssize_t ist30xxb_direct_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
int i, ret, count = 0;
int len;
u32 addr;
u32 buf32[DIRECT_BUF_COUNT];
int max_len = DIRECT_BUF_COUNT;
const int msg_len = 256;
char msg[msg_len];
DIRECT_INFO *direct = (DIRECT_INFO *)&ist30xx_direct;
if (direct->cmd != DIRECT_CMD_READ)
return sprintf(buf, "ex) echo r addr len > direct\n");
len = direct->val;
addr = DIRECT_ADDR(direct->addr);
ts_data->status.event_mode = false;
ist30xx_disable_irq(ts_data);
while (len > 0) {
if (len < max_len) max_len = len;
memset(buf32, 0, sizeof(buf32));
ret = ist30xxb_burst_read(ts_data->client, addr, buf32, max_len);
if (ret) {
count = sprintf(buf, "I2C Burst read fail, addr: %x\n", addr);
break;
}
for (i = 0; i < max_len; i++) {
count += snprintf(msg, msg_len, "0x%08x ", buf32[i]);
strncat(buf, msg, msg_len);
}
count += snprintf(msg, msg_len, "\n");
strncat(buf, msg, msg_len);
addr += max_len * IST30XX_DATA_LEN;
len -= max_len;
}
ist30xx_enable_irq(ts_data);
ts_data->status.event_mode = true;
tsp_debug("%s", buf);
return count;
}
/* sysfs: /sys/class/touch/tunes/regcmd */
ssize_t tunes_regcmd_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t size)
{
int ret = -1;
u32 *buf32;
memcpy(&ist30xx_tunes, buf, sizeof(ist30xx_tunes));
buf += sizeof(ist30xx_tunes);
buf32 = (u32 *)buf;
tunes_cmd_done = false;
switch (ist30xx_tunes.cmd) {
case TUNES_CMD_WRITE:
break;
case TUNES_CMD_READ:
break;
case TUNES_CMD_REG_ENTER:
ist30xx_disable_irq(ts_data);
ret = ist30xx_cmd_run_device(ts_data->client, true);
if (ret)
goto regcmd_fail;
/* enter reg access mode */
ret = ist30xx_cmd_reg(ts_data->client, CMD_ENTER_REG_ACCESS);
if (ret)
goto regcmd_fail;
ist30xx_reg_mode = true;
break;
case TUNES_CMD_REG_EXIT:
/* exit reg access mode */
ret = ist30xx_cmd_reg(ts_data->client, CMD_EXIT_REG_ACCESS);
if (ret)
goto regcmd_fail;
ret = ist30xx_cmd_start_scan(ts_data->client);
if (ret)
goto regcmd_fail;
ist30xx_reg_mode = false;
ist30xx_enable_irq(ts_data);
break;
default:
ist30xx_enable_irq(ts_data);
return size;
}
tunes_cmd_done = true;
return size;
regcmd_fail:
tsp_err("Tunes regcmd i2c_fail, ret=%d\n", ret);
return size;
}
ssize_t tunes_regcmd_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
int size;
size = sprintf(buf, "cmd: 0x%02x, addr: 0x%08x, len: 0x%04x\n",
ist30xx_tunes.cmd, ist30xx_tunes.addr, ist30xx_tunes.len);
return size;
}
#define MAX_WRITE_LEN (1)
/* sysfs: /sys/class/touch/tunes/reg */
ssize_t tunes_reg_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t size)
{
int ret;
u32 *buf32 = (u32 *)buf;
int waddr, wcnt = 0, len = 0;
if (ist30xx_tunes.cmd != TUNES_CMD_WRITE) {
tsp_err("error, IST30XX_REG_CMD is not correct!\n");
return size;
}
if (!ist30xx_reg_mode) {
tsp_err("error, IST30XX_REG_CMD is not ready!\n");
return size;
}
if (!tunes_cmd_done) {
tsp_err("error, IST30XX_REG_CMD is not ready!\n");
return size;
}
waddr = ist30xx_tunes.addr;
if (ist30xx_tunes.len >= MAX_WRITE_LEN)
len = MAX_WRITE_LEN;
else
len = ist30xx_tunes.len;
while (wcnt < ist30xx_tunes.len) {
ret = ist30xx_write_buf(ts_data->client, waddr, buf32, len);
if (ret) {
tsp_err("Tunes regstore i2c_fail, ret=%d\n", ret);
return size;
}
wcnt += len;
if ((ist30xx_tunes.len - wcnt) < MAX_WRITE_LEN)
len = ist30xx_tunes.len - wcnt;
buf32 += MAX_WRITE_LEN;
waddr += MAX_WRITE_LEN * IST30XX_DATA_LEN;
}
tunes_cmd_done = false;
return size;
}
ssize_t tunes_reg_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
int ret;
int size;
u32 *buf32 = (u32 *)buf;
#if I2C_MONOPOLY_MODE
unsigned long flags;
#endif
if (ist30xx_tunes.cmd != TUNES_CMD_READ) {
tsp_err("error, IST30XX_REG_CMD is not correct!\n");
return 0;
}
if (!tunes_cmd_done) {
tsp_err("error, IST30XX_REG_CMD is not ready!\n");
return 0;
}
size = ist30xx_tunes.len;
ret = ist30xx_write_cmd(ts_data->client, IST30XX_RX_CNT_ADDR, size);
if (ret) {
tsp_err("Tunes regshow i2c_fail, ret=%d\n", ret);
return 0;
}
#if I2C_MONOPOLY_MODE
local_irq_save(flags); // Activated only when the GPIO I2C is used
#endif
ret = ist30xx_read_buf(ts_data->client, ist30xx_tunes.addr, buf32, size);
#if I2C_MONOPOLY_MODE
local_irq_restore(flags); // Activated only when the GPIO I2C is used
#endif
if (ret) {
tsp_err("Tunes regshow i2c_fail, ret=%d\n", ret);
return size;
}
size = ist30xx_tunes.len * IST30XX_DATA_LEN;
tunes_cmd_done = false;
return size;
}
/* sysfs: /sys/class/touch/tunes/firmware */
ssize_t tunes_fw_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t size)
{
ist30xx_get_update_info(ts_data, buf, size);
mutex_lock(&ist30xx_mutex);
ist30xx_fw_update(ts_data->client, buf, size, true);
mutex_unlock(&ist30xx_mutex);
ist30xx_calibrate(1);
ist30xx_init_touch_driver(ts_data);
return size;
}
/* sysfs: /sys/class/touch/tunes/adb */
ssize_t tunes_adb_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t size)
{
int ret;
char *tmp, *ptr;
char token[9];
u32 cmd, addr, len, val;
int write_len;
sscanf(buf, "%x %x %x", &cmd, &addr, &len);
switch (cmd) {
case TUNES_CMD_WRITE: /* write cmd */
write_len = 0;
ptr = (char *)(buf + 15);
while (write_len < len) {
memcpy(token, ptr, 8);
token[8] = 0;
val = simple_strtoul(token, &tmp, 16);
ret = ist30xx_write_buf(ts_data->client, addr, &val, 1);
if (ret) {
tsp_err("Tunes regstore i2c_fail, ret=%d\n", ret);
return size;
}
ptr += 8;
write_len++;
addr += 4;
}
break;
case TUNES_CMD_READ: /* read cmd */
ist30xx_tunes.cmd = cmd;
ist30xx_tunes.addr = addr;
ist30xx_tunes.len = len;
break;
case TUNES_CMD_REG_ENTER: /* enter */
ist30xx_disable_irq(ts_data);
ret = ist30xx_cmd_run_device(ts_data->client, true);
if (ret < 0)
goto cmd_fail;
ret = ist30xx_cmd_reg(ts_data->client, CMD_ENTER_REG_ACCESS);
if (ret < 0)
goto cmd_fail;
ist30xx_reg_mode = true;
break;
case TUNES_CMD_REG_EXIT: /* exit */
if (ist30xx_reg_mode == true) {
ret = ist30xx_cmd_reg(ts_data->client, CMD_EXIT_REG_ACCESS);
if (ret < 0)
goto cmd_fail;
ret = ist30xx_cmd_start_scan(ts_data->client);
if (ret < 0)
goto cmd_fail;
ist30xx_reg_mode = false;
ist30xx_enable_irq(ts_data);
}
break;
default:
break;
}
return size;
cmd_fail:
tsp_err("Tunes adb i2c_fail\n");
return size;
}
ssize_t tunes_adb_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
int ret;
int i, len, size = 0;
char reg_val[10];
#if I2C_MONOPOLY_MODE
unsigned long flags;
#endif
ret = ist30xx_write_cmd(ts_data->client, IST30XX_RX_CNT_ADDR, ist30xx_tunes.len);
if (ret) {
tsp_err("Tunes regshow i2c_fail, ret=%d\n", ret);
return size;
}
#if I2C_MONOPOLY_MODE
local_irq_save(flags);
#endif
ret = ist30xx_read_buf(ts_data->client, ist30xx_tunes.addr,
ist30xx_frame_buf, ist30xx_tunes.len);
#if I2C_MONOPOLY_MODE
local_irq_restore(flags);
#endif
if (ret) {
tsp_err("Tunes regshow i2c_fail, ret=%d\n", ret);
return size;
}
size = 0;
buf[0] = 0;
len = sprintf(reg_val, "%08x", ist30xx_tunes.addr);
strcat(buf, reg_val);
size += len;
for (i = 0; i < ist30xx_tunes.len; i++) {
len = sprintf(reg_val, "%08x", ist30xx_frame_buf[i]);
strcat(buf, reg_val);
size += len;
}
return size;
}
#if IST30XX_ALGORITHM_MODE
/* sysfs: /sys/class/touch/tunes/algorithm */
extern u32 ist30xx_algr_addr, ist30xx_algr_size;
ssize_t ist30xx_algr_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t size)
{
sscanf(buf, "%x %d", &ist30xx_algr_addr, &ist30xx_algr_size);
tsp_info("Algorithm addr: 0x%x, count: %d\n",
ist30xx_algr_addr, ist30xx_algr_size);
ist30xx_algr_addr |= IST30XXB_ACCESS_ADDR;
return size;
}
ssize_t ist30xx_algr_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
int ret;
u32 algr_addr;
int count = 0;
ret = ist30xx_read_cmd(ts_data->client, IST30XXB_MEM_ALGORITHM, &algr_addr);
if (ret) {
tsp_warn("Algorithm mem addr read fail!\n");
return 0;
}
tsp_info("algr_addr(0x%x): 0x%x\n", IST30XXB_MEM_ALGORITHM, algr_addr);
count = sprintf(buf, "Algorithm addr : 0x%x\n", algr_addr);
return count;
}
#endif // IST30XX_ALGORITHM_MODE
#define MISC_DEFAULT_ATTR (0644)
/* sysfs */
static DEVICE_ATTR(refresh, MISC_DEFAULT_ATTR, ist30xx_frame_refresh, NULL);
static DEVICE_ATTR(filter, MISC_DEFAULT_ATTR, ist30xx_filter_show, NULL);
static DEVICE_ATTR(raw, MISC_DEFAULT_ATTR, ist30xx_raw_show, NULL);
static DEVICE_ATTR(base, MISC_DEFAULT_ATTR, ist30xx_base_show, NULL);
static DEVICE_ATTR(diff, MISC_DEFAULT_ATTR, ist30xx_diff_show, NULL);
static DEVICE_ATTR(printk, MISC_DEFAULT_ATTR, ist30xx_printk_show, ist30xx_printk_store);
static DEVICE_ATTR(direct, MISC_DEFAULT_ATTR, ist30xxb_direct_show, ist30xxb_direct_store);
static DEVICE_ATTR(clb, MISC_DEFAULT_ATTR, ist30xx_calib_show, ist30xx_calib_store);
static DEVICE_ATTR(tsp_power, MISC_DEFAULT_ATTR, NULL, ist30xx_power_store);
static DEVICE_ATTR(errcnt, MISC_DEFAULT_ATTR, NULL, ist30xx_errcnt_store);
static DEVICE_ATTR(dummy, MISC_DEFAULT_ATTR, ist30xx_dummy_show, NULL);
#if IST30XX_EVENT_MODE
static DEVICE_ATTR(scancnt, MISC_DEFAULT_ATTR, NULL, ist30xx_scancnt_store);
static DEVICE_ATTR(timerms, MISC_DEFAULT_ATTR, NULL, ist30xx_timerms_store);
#endif
static DEVICE_ATTR(regcmd, MISC_DEFAULT_ATTR, tunes_regcmd_show, tunes_regcmd_store);
static DEVICE_ATTR(reg, MISC_DEFAULT_ATTR, tunes_reg_show, tunes_reg_store);
static DEVICE_ATTR(tunes_fw, MISC_DEFAULT_ATTR, NULL, tunes_fw_store);
static DEVICE_ATTR(adb, MISC_DEFAULT_ATTR, tunes_adb_show, tunes_adb_store);
#if IST30XX_ALGORITHM_MODE
static DEVICE_ATTR(algorithm, MISC_DEFAULT_ATTR, ist30xx_algr_show, ist30xx_algr_store);
#endif
static struct attribute *node_attributes[] = {
&dev_attr_refresh.attr,
&dev_attr_filter.attr,
&dev_attr_raw.attr,
&dev_attr_base.attr,
&dev_attr_diff.attr,
NULL,
};
static struct attribute *sys_attributes[] = {
&dev_attr_printk.attr,
&dev_attr_direct.attr,
&dev_attr_clb.attr,
&dev_attr_tsp_power.attr,
&dev_attr_errcnt.attr,
&dev_attr_dummy.attr,
#if IST30XX_EVENT_MODE
&dev_attr_scancnt.attr,
&dev_attr_timerms.attr,
#endif
NULL,
};
static struct attribute *tunes_attributes[] = {
&dev_attr_regcmd.attr,
&dev_attr_reg.attr,
&dev_attr_tunes_fw.attr,
&dev_attr_adb.attr,
#if IST30XX_ALGORITHM_MODE
&dev_attr_algorithm.attr,
#endif
NULL,
};
static struct attribute_group node_attr_group = {
.attrs = node_attributes,
};
static struct attribute_group sys_attr_group = {
.attrs = sys_attributes,
};
static struct attribute_group tunes_attr_group = {
.attrs = tunes_attributes,
};
extern struct class *ist30xx_class;
struct device *ist30xx_sys_dev;
struct device *ist30xx_tunes_dev;
struct device *ist30xx_node_dev;
int ist30xx_init_misc_sysfs(void)
{
/* /sys/class/touch/sys */
ist30xx_sys_dev = device_create(ist30xx_class, NULL, 0, NULL, "sys");
/* /sys/class/touch/sys/... */
if (sysfs_create_group(&ist30xx_sys_dev->kobj, &sys_attr_group))
tsp_err("Failed to create sysfs group(%s)!\n", "sys");
/* /sys/class/touch/tunes */
ist30xx_tunes_dev = device_create(ist30xx_class, NULL, 0, NULL, "tunes");
/* /sys/class/touch/tunes/... */
if (sysfs_create_group(&ist30xx_tunes_dev->kobj, &tunes_attr_group))
tsp_err("Failed to create sysfs group(%s)!\n", "tunes");
/* /sys/class/touch/node */
ist30xx_node_dev = device_create(ist30xx_class, NULL, 0, NULL, "node");
/* /sys/class/touch/node/... */
if (sysfs_create_group(&ist30xx_node_dev->kobj, &node_attr_group))
tsp_err("Failed to create sysfs group(%s)!\n", "node");
ist30xx_frame_buf = kmalloc(4096, GFP_KERNEL);
ist30xx_frame_rawbuf = kmalloc(4096, GFP_KERNEL);
ist30xx_frame_fltbuf = kmalloc(4096, GFP_KERNEL);
if (!ist30xx_frame_buf || !ist30xx_frame_rawbuf || !ist30xx_frame_fltbuf)
return -ENOMEM;
return 0;
}
| gpl-2.0 |
TheTypoMaster/linux-kernel | drivers/net/ethernet/ethoc.c | 437 | 32715 | /*
* linux/drivers/net/ethernet/ethoc.c
*
* Copyright (C) 2007-2008 Avionic Design Development GmbH
* Copyright (C) 2008-2009 Avionic Design GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* Written by Thierry Reding <thierry.reding@avionic-design.de>
*/
#include <linux/dma-mapping.h>
#include <linux/etherdevice.h>
#include <linux/clk.h>
#include <linux/crc32.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/mii.h>
#include <linux/phy.h>
#include <linux/platform_device.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/of.h>
#include <linux/module.h>
#include <net/ethoc.h>
static int buffer_size = 0x8000; /* 32 KBytes */
module_param(buffer_size, int, 0);
MODULE_PARM_DESC(buffer_size, "DMA buffer allocation size");
/* register offsets */
#define MODER 0x00
#define INT_SOURCE 0x04
#define INT_MASK 0x08
#define IPGT 0x0c
#define IPGR1 0x10
#define IPGR2 0x14
#define PACKETLEN 0x18
#define COLLCONF 0x1c
#define TX_BD_NUM 0x20
#define CTRLMODER 0x24
#define MIIMODER 0x28
#define MIICOMMAND 0x2c
#define MIIADDRESS 0x30
#define MIITX_DATA 0x34
#define MIIRX_DATA 0x38
#define MIISTATUS 0x3c
#define MAC_ADDR0 0x40
#define MAC_ADDR1 0x44
#define ETH_HASH0 0x48
#define ETH_HASH1 0x4c
#define ETH_TXCTRL 0x50
#define ETH_END 0x54
/* mode register */
#define MODER_RXEN (1 << 0) /* receive enable */
#define MODER_TXEN (1 << 1) /* transmit enable */
#define MODER_NOPRE (1 << 2) /* no preamble */
#define MODER_BRO (1 << 3) /* broadcast address */
#define MODER_IAM (1 << 4) /* individual address mode */
#define MODER_PRO (1 << 5) /* promiscuous mode */
#define MODER_IFG (1 << 6) /* interframe gap for incoming frames */
#define MODER_LOOP (1 << 7) /* loopback */
#define MODER_NBO (1 << 8) /* no back-off */
#define MODER_EDE (1 << 9) /* excess defer enable */
#define MODER_FULLD (1 << 10) /* full duplex */
#define MODER_RESET (1 << 11) /* FIXME: reset (undocumented) */
#define MODER_DCRC (1 << 12) /* delayed CRC enable */
#define MODER_CRC (1 << 13) /* CRC enable */
#define MODER_HUGE (1 << 14) /* huge packets enable */
#define MODER_PAD (1 << 15) /* padding enabled */
#define MODER_RSM (1 << 16) /* receive small packets */
/* interrupt source and mask registers */
#define INT_MASK_TXF (1 << 0) /* transmit frame */
#define INT_MASK_TXE (1 << 1) /* transmit error */
#define INT_MASK_RXF (1 << 2) /* receive frame */
#define INT_MASK_RXE (1 << 3) /* receive error */
#define INT_MASK_BUSY (1 << 4)
#define INT_MASK_TXC (1 << 5) /* transmit control frame */
#define INT_MASK_RXC (1 << 6) /* receive control frame */
#define INT_MASK_TX (INT_MASK_TXF | INT_MASK_TXE)
#define INT_MASK_RX (INT_MASK_RXF | INT_MASK_RXE)
#define INT_MASK_ALL ( \
INT_MASK_TXF | INT_MASK_TXE | \
INT_MASK_RXF | INT_MASK_RXE | \
INT_MASK_TXC | INT_MASK_RXC | \
INT_MASK_BUSY \
)
/* packet length register */
#define PACKETLEN_MIN(min) (((min) & 0xffff) << 16)
#define PACKETLEN_MAX(max) (((max) & 0xffff) << 0)
#define PACKETLEN_MIN_MAX(min, max) (PACKETLEN_MIN(min) | \
PACKETLEN_MAX(max))
/* transmit buffer number register */
#define TX_BD_NUM_VAL(x) (((x) <= 0x80) ? (x) : 0x80)
/* control module mode register */
#define CTRLMODER_PASSALL (1 << 0) /* pass all receive frames */
#define CTRLMODER_RXFLOW (1 << 1) /* receive control flow */
#define CTRLMODER_TXFLOW (1 << 2) /* transmit control flow */
/* MII mode register */
#define MIIMODER_CLKDIV(x) ((x) & 0xfe) /* needs to be an even number */
#define MIIMODER_NOPRE (1 << 8) /* no preamble */
/* MII command register */
#define MIICOMMAND_SCAN (1 << 0) /* scan status */
#define MIICOMMAND_READ (1 << 1) /* read status */
#define MIICOMMAND_WRITE (1 << 2) /* write control data */
/* MII address register */
#define MIIADDRESS_FIAD(x) (((x) & 0x1f) << 0)
#define MIIADDRESS_RGAD(x) (((x) & 0x1f) << 8)
#define MIIADDRESS_ADDR(phy, reg) (MIIADDRESS_FIAD(phy) | \
MIIADDRESS_RGAD(reg))
/* MII transmit data register */
#define MIITX_DATA_VAL(x) ((x) & 0xffff)
/* MII receive data register */
#define MIIRX_DATA_VAL(x) ((x) & 0xffff)
/* MII status register */
#define MIISTATUS_LINKFAIL (1 << 0)
#define MIISTATUS_BUSY (1 << 1)
#define MIISTATUS_INVALID (1 << 2)
/* TX buffer descriptor */
#define TX_BD_CS (1 << 0) /* carrier sense lost */
#define TX_BD_DF (1 << 1) /* defer indication */
#define TX_BD_LC (1 << 2) /* late collision */
#define TX_BD_RL (1 << 3) /* retransmission limit */
#define TX_BD_RETRY_MASK (0x00f0)
#define TX_BD_RETRY(x) (((x) & 0x00f0) >> 4)
#define TX_BD_UR (1 << 8) /* transmitter underrun */
#define TX_BD_CRC (1 << 11) /* TX CRC enable */
#define TX_BD_PAD (1 << 12) /* pad enable for short packets */
#define TX_BD_WRAP (1 << 13)
#define TX_BD_IRQ (1 << 14) /* interrupt request enable */
#define TX_BD_READY (1 << 15) /* TX buffer ready */
#define TX_BD_LEN(x) (((x) & 0xffff) << 16)
#define TX_BD_LEN_MASK (0xffff << 16)
#define TX_BD_STATS (TX_BD_CS | TX_BD_DF | TX_BD_LC | \
TX_BD_RL | TX_BD_RETRY_MASK | TX_BD_UR)
/* RX buffer descriptor */
#define RX_BD_LC (1 << 0) /* late collision */
#define RX_BD_CRC (1 << 1) /* RX CRC error */
#define RX_BD_SF (1 << 2) /* short frame */
#define RX_BD_TL (1 << 3) /* too long */
#define RX_BD_DN (1 << 4) /* dribble nibble */
#define RX_BD_IS (1 << 5) /* invalid symbol */
#define RX_BD_OR (1 << 6) /* receiver overrun */
#define RX_BD_MISS (1 << 7)
#define RX_BD_CF (1 << 8) /* control frame */
#define RX_BD_WRAP (1 << 13)
#define RX_BD_IRQ (1 << 14) /* interrupt request enable */
#define RX_BD_EMPTY (1 << 15)
#define RX_BD_LEN(x) (((x) & 0xffff) << 16)
#define RX_BD_STATS (RX_BD_LC | RX_BD_CRC | RX_BD_SF | RX_BD_TL | \
RX_BD_DN | RX_BD_IS | RX_BD_OR | RX_BD_MISS)
#define ETHOC_BUFSIZ 1536
#define ETHOC_ZLEN 64
#define ETHOC_BD_BASE 0x400
#define ETHOC_TIMEOUT (HZ / 2)
#define ETHOC_MII_TIMEOUT (1 + (HZ / 5))
/**
* struct ethoc - driver-private device structure
* @iobase: pointer to I/O memory region
* @membase: pointer to buffer memory region
* @dma_alloc: dma allocated buffer size
* @io_region_size: I/O memory region size
* @num_bd: number of buffer descriptors
* @num_tx: number of send buffers
* @cur_tx: last send buffer written
* @dty_tx: last buffer actually sent
* @num_rx: number of receive buffers
* @cur_rx: current receive buffer
* @vma: pointer to array of virtual memory addresses for buffers
* @netdev: pointer to network device structure
* @napi: NAPI structure
* @msg_enable: device state flags
* @lock: device lock
* @phy: attached PHY
* @mdio: MDIO bus for PHY access
* @phy_id: address of attached PHY
*/
struct ethoc {
void __iomem *iobase;
void __iomem *membase;
int dma_alloc;
resource_size_t io_region_size;
unsigned int num_bd;
unsigned int num_tx;
unsigned int cur_tx;
unsigned int dty_tx;
unsigned int num_rx;
unsigned int cur_rx;
void **vma;
struct net_device *netdev;
struct napi_struct napi;
u32 msg_enable;
spinlock_t lock;
struct phy_device *phy;
struct mii_bus *mdio;
struct clk *clk;
s8 phy_id;
};
/**
* struct ethoc_bd - buffer descriptor
* @stat: buffer statistics
* @addr: physical memory address
*/
struct ethoc_bd {
u32 stat;
u32 addr;
};
static inline u32 ethoc_read(struct ethoc *dev, loff_t offset)
{
return ioread32(dev->iobase + offset);
}
static inline void ethoc_write(struct ethoc *dev, loff_t offset, u32 data)
{
iowrite32(data, dev->iobase + offset);
}
static inline void ethoc_read_bd(struct ethoc *dev, int index,
struct ethoc_bd *bd)
{
loff_t offset = ETHOC_BD_BASE + (index * sizeof(struct ethoc_bd));
bd->stat = ethoc_read(dev, offset + 0);
bd->addr = ethoc_read(dev, offset + 4);
}
static inline void ethoc_write_bd(struct ethoc *dev, int index,
const struct ethoc_bd *bd)
{
loff_t offset = ETHOC_BD_BASE + (index * sizeof(struct ethoc_bd));
ethoc_write(dev, offset + 0, bd->stat);
ethoc_write(dev, offset + 4, bd->addr);
}
static inline void ethoc_enable_irq(struct ethoc *dev, u32 mask)
{
u32 imask = ethoc_read(dev, INT_MASK);
imask |= mask;
ethoc_write(dev, INT_MASK, imask);
}
static inline void ethoc_disable_irq(struct ethoc *dev, u32 mask)
{
u32 imask = ethoc_read(dev, INT_MASK);
imask &= ~mask;
ethoc_write(dev, INT_MASK, imask);
}
static inline void ethoc_ack_irq(struct ethoc *dev, u32 mask)
{
ethoc_write(dev, INT_SOURCE, mask);
}
static inline void ethoc_enable_rx_and_tx(struct ethoc *dev)
{
u32 mode = ethoc_read(dev, MODER);
mode |= MODER_RXEN | MODER_TXEN;
ethoc_write(dev, MODER, mode);
}
static inline void ethoc_disable_rx_and_tx(struct ethoc *dev)
{
u32 mode = ethoc_read(dev, MODER);
mode &= ~(MODER_RXEN | MODER_TXEN);
ethoc_write(dev, MODER, mode);
}
static int ethoc_init_ring(struct ethoc *dev, unsigned long mem_start)
{
struct ethoc_bd bd;
int i;
void *vma;
dev->cur_tx = 0;
dev->dty_tx = 0;
dev->cur_rx = 0;
ethoc_write(dev, TX_BD_NUM, dev->num_tx);
/* setup transmission buffers */
bd.addr = mem_start;
bd.stat = TX_BD_IRQ | TX_BD_CRC;
vma = dev->membase;
for (i = 0; i < dev->num_tx; i++) {
if (i == dev->num_tx - 1)
bd.stat |= TX_BD_WRAP;
ethoc_write_bd(dev, i, &bd);
bd.addr += ETHOC_BUFSIZ;
dev->vma[i] = vma;
vma += ETHOC_BUFSIZ;
}
bd.stat = RX_BD_EMPTY | RX_BD_IRQ;
for (i = 0; i < dev->num_rx; i++) {
if (i == dev->num_rx - 1)
bd.stat |= RX_BD_WRAP;
ethoc_write_bd(dev, dev->num_tx + i, &bd);
bd.addr += ETHOC_BUFSIZ;
dev->vma[dev->num_tx + i] = vma;
vma += ETHOC_BUFSIZ;
}
return 0;
}
static int ethoc_reset(struct ethoc *dev)
{
u32 mode;
/* TODO: reset controller? */
ethoc_disable_rx_and_tx(dev);
/* TODO: setup registers */
/* enable FCS generation and automatic padding */
mode = ethoc_read(dev, MODER);
mode |= MODER_CRC | MODER_PAD;
ethoc_write(dev, MODER, mode);
/* set full-duplex mode */
mode = ethoc_read(dev, MODER);
mode |= MODER_FULLD;
ethoc_write(dev, MODER, mode);
ethoc_write(dev, IPGT, 0x15);
ethoc_ack_irq(dev, INT_MASK_ALL);
ethoc_enable_irq(dev, INT_MASK_ALL);
ethoc_enable_rx_and_tx(dev);
return 0;
}
static unsigned int ethoc_update_rx_stats(struct ethoc *dev,
struct ethoc_bd *bd)
{
struct net_device *netdev = dev->netdev;
unsigned int ret = 0;
if (bd->stat & RX_BD_TL) {
dev_err(&netdev->dev, "RX: frame too long\n");
netdev->stats.rx_length_errors++;
ret++;
}
if (bd->stat & RX_BD_SF) {
dev_err(&netdev->dev, "RX: frame too short\n");
netdev->stats.rx_length_errors++;
ret++;
}
if (bd->stat & RX_BD_DN) {
dev_err(&netdev->dev, "RX: dribble nibble\n");
netdev->stats.rx_frame_errors++;
}
if (bd->stat & RX_BD_CRC) {
dev_err(&netdev->dev, "RX: wrong CRC\n");
netdev->stats.rx_crc_errors++;
ret++;
}
if (bd->stat & RX_BD_OR) {
dev_err(&netdev->dev, "RX: overrun\n");
netdev->stats.rx_over_errors++;
ret++;
}
if (bd->stat & RX_BD_MISS)
netdev->stats.rx_missed_errors++;
if (bd->stat & RX_BD_LC) {
dev_err(&netdev->dev, "RX: late collision\n");
netdev->stats.collisions++;
ret++;
}
return ret;
}
static int ethoc_rx(struct net_device *dev, int limit)
{
struct ethoc *priv = netdev_priv(dev);
int count;
for (count = 0; count < limit; ++count) {
unsigned int entry;
struct ethoc_bd bd;
entry = priv->num_tx + priv->cur_rx;
ethoc_read_bd(priv, entry, &bd);
if (bd.stat & RX_BD_EMPTY) {
ethoc_ack_irq(priv, INT_MASK_RX);
/* If packet (interrupt) came in between checking
* BD_EMTPY and clearing the interrupt source, then we
* risk missing the packet as the RX interrupt won't
* trigger right away when we reenable it; hence, check
* BD_EMTPY here again to make sure there isn't such a
* packet waiting for us...
*/
ethoc_read_bd(priv, entry, &bd);
if (bd.stat & RX_BD_EMPTY)
break;
}
if (ethoc_update_rx_stats(priv, &bd) == 0) {
int size = bd.stat >> 16;
struct sk_buff *skb;
size -= 4; /* strip the CRC */
skb = netdev_alloc_skb_ip_align(dev, size);
if (likely(skb)) {
void *src = priv->vma[entry];
memcpy_fromio(skb_put(skb, size), src, size);
skb->protocol = eth_type_trans(skb, dev);
dev->stats.rx_packets++;
dev->stats.rx_bytes += size;
netif_receive_skb(skb);
} else {
if (net_ratelimit())
dev_warn(&dev->dev,
"low on memory - packet dropped\n");
dev->stats.rx_dropped++;
break;
}
}
/* clear the buffer descriptor so it can be reused */
bd.stat &= ~RX_BD_STATS;
bd.stat |= RX_BD_EMPTY;
ethoc_write_bd(priv, entry, &bd);
if (++priv->cur_rx == priv->num_rx)
priv->cur_rx = 0;
}
return count;
}
static void ethoc_update_tx_stats(struct ethoc *dev, struct ethoc_bd *bd)
{
struct net_device *netdev = dev->netdev;
if (bd->stat & TX_BD_LC) {
dev_err(&netdev->dev, "TX: late collision\n");
netdev->stats.tx_window_errors++;
}
if (bd->stat & TX_BD_RL) {
dev_err(&netdev->dev, "TX: retransmit limit\n");
netdev->stats.tx_aborted_errors++;
}
if (bd->stat & TX_BD_UR) {
dev_err(&netdev->dev, "TX: underrun\n");
netdev->stats.tx_fifo_errors++;
}
if (bd->stat & TX_BD_CS) {
dev_err(&netdev->dev, "TX: carrier sense lost\n");
netdev->stats.tx_carrier_errors++;
}
if (bd->stat & TX_BD_STATS)
netdev->stats.tx_errors++;
netdev->stats.collisions += (bd->stat >> 4) & 0xf;
netdev->stats.tx_bytes += bd->stat >> 16;
netdev->stats.tx_packets++;
}
static int ethoc_tx(struct net_device *dev, int limit)
{
struct ethoc *priv = netdev_priv(dev);
int count;
struct ethoc_bd bd;
for (count = 0; count < limit; ++count) {
unsigned int entry;
entry = priv->dty_tx & (priv->num_tx-1);
ethoc_read_bd(priv, entry, &bd);
if (bd.stat & TX_BD_READY || (priv->dty_tx == priv->cur_tx)) {
ethoc_ack_irq(priv, INT_MASK_TX);
/* If interrupt came in between reading in the BD
* and clearing the interrupt source, then we risk
* missing the event as the TX interrupt won't trigger
* right away when we reenable it; hence, check
* BD_EMPTY here again to make sure there isn't such an
* event pending...
*/
ethoc_read_bd(priv, entry, &bd);
if (bd.stat & TX_BD_READY ||
(priv->dty_tx == priv->cur_tx))
break;
}
ethoc_update_tx_stats(priv, &bd);
priv->dty_tx++;
}
if ((priv->cur_tx - priv->dty_tx) <= (priv->num_tx / 2))
netif_wake_queue(dev);
return count;
}
static irqreturn_t ethoc_interrupt(int irq, void *dev_id)
{
struct net_device *dev = dev_id;
struct ethoc *priv = netdev_priv(dev);
u32 pending;
u32 mask;
/* Figure out what triggered the interrupt...
* The tricky bit here is that the interrupt source bits get
* set in INT_SOURCE for an event regardless of whether that
* event is masked or not. Thus, in order to figure out what
* triggered the interrupt, we need to remove the sources
* for all events that are currently masked. This behaviour
* is not particularly well documented but reasonable...
*/
mask = ethoc_read(priv, INT_MASK);
pending = ethoc_read(priv, INT_SOURCE);
pending &= mask;
if (unlikely(pending == 0))
return IRQ_NONE;
ethoc_ack_irq(priv, pending);
/* We always handle the dropped packet interrupt */
if (pending & INT_MASK_BUSY) {
dev_err(&dev->dev, "packet dropped\n");
dev->stats.rx_dropped++;
}
/* Handle receive/transmit event by switching to polling */
if (pending & (INT_MASK_TX | INT_MASK_RX)) {
ethoc_disable_irq(priv, INT_MASK_TX | INT_MASK_RX);
napi_schedule(&priv->napi);
}
return IRQ_HANDLED;
}
static int ethoc_get_mac_address(struct net_device *dev, void *addr)
{
struct ethoc *priv = netdev_priv(dev);
u8 *mac = (u8 *)addr;
u32 reg;
reg = ethoc_read(priv, MAC_ADDR0);
mac[2] = (reg >> 24) & 0xff;
mac[3] = (reg >> 16) & 0xff;
mac[4] = (reg >> 8) & 0xff;
mac[5] = (reg >> 0) & 0xff;
reg = ethoc_read(priv, MAC_ADDR1);
mac[0] = (reg >> 8) & 0xff;
mac[1] = (reg >> 0) & 0xff;
return 0;
}
static int ethoc_poll(struct napi_struct *napi, int budget)
{
struct ethoc *priv = container_of(napi, struct ethoc, napi);
int rx_work_done = 0;
int tx_work_done = 0;
rx_work_done = ethoc_rx(priv->netdev, budget);
tx_work_done = ethoc_tx(priv->netdev, budget);
if (rx_work_done < budget && tx_work_done < budget) {
napi_complete(napi);
ethoc_enable_irq(priv, INT_MASK_TX | INT_MASK_RX);
}
return rx_work_done;
}
static int ethoc_mdio_read(struct mii_bus *bus, int phy, int reg)
{
struct ethoc *priv = bus->priv;
int i;
ethoc_write(priv, MIIADDRESS, MIIADDRESS_ADDR(phy, reg));
ethoc_write(priv, MIICOMMAND, MIICOMMAND_READ);
for (i = 0; i < 5; i++) {
u32 status = ethoc_read(priv, MIISTATUS);
if (!(status & MIISTATUS_BUSY)) {
u32 data = ethoc_read(priv, MIIRX_DATA);
/* reset MII command register */
ethoc_write(priv, MIICOMMAND, 0);
return data;
}
usleep_range(100, 200);
}
return -EBUSY;
}
static int ethoc_mdio_write(struct mii_bus *bus, int phy, int reg, u16 val)
{
struct ethoc *priv = bus->priv;
int i;
ethoc_write(priv, MIIADDRESS, MIIADDRESS_ADDR(phy, reg));
ethoc_write(priv, MIITX_DATA, val);
ethoc_write(priv, MIICOMMAND, MIICOMMAND_WRITE);
for (i = 0; i < 5; i++) {
u32 stat = ethoc_read(priv, MIISTATUS);
if (!(stat & MIISTATUS_BUSY)) {
/* reset MII command register */
ethoc_write(priv, MIICOMMAND, 0);
return 0;
}
usleep_range(100, 200);
}
return -EBUSY;
}
static void ethoc_mdio_poll(struct net_device *dev)
{
}
static int ethoc_mdio_probe(struct net_device *dev)
{
struct ethoc *priv = netdev_priv(dev);
struct phy_device *phy;
int err;
if (priv->phy_id != -1)
phy = priv->mdio->phy_map[priv->phy_id];
else
phy = phy_find_first(priv->mdio);
if (!phy) {
dev_err(&dev->dev, "no PHY found\n");
return -ENXIO;
}
err = phy_connect_direct(dev, phy, ethoc_mdio_poll,
PHY_INTERFACE_MODE_GMII);
if (err) {
dev_err(&dev->dev, "could not attach to PHY\n");
return err;
}
priv->phy = phy;
phy->advertising &= ~(ADVERTISED_1000baseT_Full |
ADVERTISED_1000baseT_Half);
phy->supported &= ~(SUPPORTED_1000baseT_Full |
SUPPORTED_1000baseT_Half);
return 0;
}
static int ethoc_open(struct net_device *dev)
{
struct ethoc *priv = netdev_priv(dev);
int ret;
ret = request_irq(dev->irq, ethoc_interrupt, IRQF_SHARED,
dev->name, dev);
if (ret)
return ret;
ethoc_init_ring(priv, dev->mem_start);
ethoc_reset(priv);
if (netif_queue_stopped(dev)) {
dev_dbg(&dev->dev, " resuming queue\n");
netif_wake_queue(dev);
} else {
dev_dbg(&dev->dev, " starting queue\n");
netif_start_queue(dev);
}
phy_start(priv->phy);
napi_enable(&priv->napi);
if (netif_msg_ifup(priv)) {
dev_info(&dev->dev, "I/O: %08lx Memory: %08lx-%08lx\n",
dev->base_addr, dev->mem_start, dev->mem_end);
}
return 0;
}
static int ethoc_stop(struct net_device *dev)
{
struct ethoc *priv = netdev_priv(dev);
napi_disable(&priv->napi);
if (priv->phy)
phy_stop(priv->phy);
ethoc_disable_rx_and_tx(priv);
free_irq(dev->irq, dev);
if (!netif_queue_stopped(dev))
netif_stop_queue(dev);
return 0;
}
static int ethoc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
struct ethoc *priv = netdev_priv(dev);
struct mii_ioctl_data *mdio = if_mii(ifr);
struct phy_device *phy = NULL;
if (!netif_running(dev))
return -EINVAL;
if (cmd != SIOCGMIIPHY) {
if (mdio->phy_id >= PHY_MAX_ADDR)
return -ERANGE;
phy = priv->mdio->phy_map[mdio->phy_id];
if (!phy)
return -ENODEV;
} else {
phy = priv->phy;
}
return phy_mii_ioctl(phy, ifr, cmd);
}
static void ethoc_do_set_mac_address(struct net_device *dev)
{
struct ethoc *priv = netdev_priv(dev);
unsigned char *mac = dev->dev_addr;
ethoc_write(priv, MAC_ADDR0, (mac[2] << 24) | (mac[3] << 16) |
(mac[4] << 8) | (mac[5] << 0));
ethoc_write(priv, MAC_ADDR1, (mac[0] << 8) | (mac[1] << 0));
}
static int ethoc_set_mac_address(struct net_device *dev, void *p)
{
const struct sockaddr *addr = p;
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
ethoc_do_set_mac_address(dev);
return 0;
}
static void ethoc_set_multicast_list(struct net_device *dev)
{
struct ethoc *priv = netdev_priv(dev);
u32 mode = ethoc_read(priv, MODER);
struct netdev_hw_addr *ha;
u32 hash[2] = { 0, 0 };
/* set loopback mode if requested */
if (dev->flags & IFF_LOOPBACK)
mode |= MODER_LOOP;
else
mode &= ~MODER_LOOP;
/* receive broadcast frames if requested */
if (dev->flags & IFF_BROADCAST)
mode &= ~MODER_BRO;
else
mode |= MODER_BRO;
/* enable promiscuous mode if requested */
if (dev->flags & IFF_PROMISC)
mode |= MODER_PRO;
else
mode &= ~MODER_PRO;
ethoc_write(priv, MODER, mode);
/* receive multicast frames */
if (dev->flags & IFF_ALLMULTI) {
hash[0] = 0xffffffff;
hash[1] = 0xffffffff;
} else {
netdev_for_each_mc_addr(ha, dev) {
u32 crc = ether_crc(ETH_ALEN, ha->addr);
int bit = (crc >> 26) & 0x3f;
hash[bit >> 5] |= 1 << (bit & 0x1f);
}
}
ethoc_write(priv, ETH_HASH0, hash[0]);
ethoc_write(priv, ETH_HASH1, hash[1]);
}
static int ethoc_change_mtu(struct net_device *dev, int new_mtu)
{
return -ENOSYS;
}
static void ethoc_tx_timeout(struct net_device *dev)
{
struct ethoc *priv = netdev_priv(dev);
u32 pending = ethoc_read(priv, INT_SOURCE);
if (likely(pending))
ethoc_interrupt(dev->irq, dev);
}
static netdev_tx_t ethoc_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct ethoc *priv = netdev_priv(dev);
struct ethoc_bd bd;
unsigned int entry;
void *dest;
if (unlikely(skb->len > ETHOC_BUFSIZ)) {
dev->stats.tx_errors++;
goto out;
}
entry = priv->cur_tx % priv->num_tx;
spin_lock_irq(&priv->lock);
priv->cur_tx++;
ethoc_read_bd(priv, entry, &bd);
if (unlikely(skb->len < ETHOC_ZLEN))
bd.stat |= TX_BD_PAD;
else
bd.stat &= ~TX_BD_PAD;
dest = priv->vma[entry];
memcpy_toio(dest, skb->data, skb->len);
bd.stat &= ~(TX_BD_STATS | TX_BD_LEN_MASK);
bd.stat |= TX_BD_LEN(skb->len);
ethoc_write_bd(priv, entry, &bd);
bd.stat |= TX_BD_READY;
ethoc_write_bd(priv, entry, &bd);
if (priv->cur_tx == (priv->dty_tx + priv->num_tx)) {
dev_dbg(&dev->dev, "stopping queue\n");
netif_stop_queue(dev);
}
spin_unlock_irq(&priv->lock);
skb_tx_timestamp(skb);
out:
dev_kfree_skb(skb);
return NETDEV_TX_OK;
}
static int ethoc_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
struct ethoc *priv = netdev_priv(dev);
struct phy_device *phydev = priv->phy;
if (!phydev)
return -EOPNOTSUPP;
return phy_ethtool_gset(phydev, cmd);
}
static int ethoc_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
struct ethoc *priv = netdev_priv(dev);
struct phy_device *phydev = priv->phy;
if (!phydev)
return -EOPNOTSUPP;
return phy_ethtool_sset(phydev, cmd);
}
static int ethoc_get_regs_len(struct net_device *netdev)
{
return ETH_END;
}
static void ethoc_get_regs(struct net_device *dev, struct ethtool_regs *regs,
void *p)
{
struct ethoc *priv = netdev_priv(dev);
u32 *regs_buff = p;
unsigned i;
regs->version = 0;
for (i = 0; i < ETH_END / sizeof(u32); ++i)
regs_buff[i] = ethoc_read(priv, i * sizeof(u32));
}
static void ethoc_get_ringparam(struct net_device *dev,
struct ethtool_ringparam *ring)
{
struct ethoc *priv = netdev_priv(dev);
ring->rx_max_pending = priv->num_bd - 1;
ring->rx_mini_max_pending = 0;
ring->rx_jumbo_max_pending = 0;
ring->tx_max_pending = priv->num_bd - 1;
ring->rx_pending = priv->num_rx;
ring->rx_mini_pending = 0;
ring->rx_jumbo_pending = 0;
ring->tx_pending = priv->num_tx;
}
static int ethoc_set_ringparam(struct net_device *dev,
struct ethtool_ringparam *ring)
{
struct ethoc *priv = netdev_priv(dev);
if (ring->tx_pending < 1 || ring->rx_pending < 1 ||
ring->tx_pending + ring->rx_pending > priv->num_bd)
return -EINVAL;
if (ring->rx_mini_pending || ring->rx_jumbo_pending)
return -EINVAL;
if (netif_running(dev)) {
netif_tx_disable(dev);
ethoc_disable_rx_and_tx(priv);
ethoc_disable_irq(priv, INT_MASK_TX | INT_MASK_RX);
synchronize_irq(dev->irq);
}
priv->num_tx = rounddown_pow_of_two(ring->tx_pending);
priv->num_rx = ring->rx_pending;
ethoc_init_ring(priv, dev->mem_start);
if (netif_running(dev)) {
ethoc_enable_irq(priv, INT_MASK_TX | INT_MASK_RX);
ethoc_enable_rx_and_tx(priv);
netif_wake_queue(dev);
}
return 0;
}
const struct ethtool_ops ethoc_ethtool_ops = {
.get_settings = ethoc_get_settings,
.set_settings = ethoc_set_settings,
.get_regs_len = ethoc_get_regs_len,
.get_regs = ethoc_get_regs,
.get_link = ethtool_op_get_link,
.get_ringparam = ethoc_get_ringparam,
.set_ringparam = ethoc_set_ringparam,
.get_ts_info = ethtool_op_get_ts_info,
};
static const struct net_device_ops ethoc_netdev_ops = {
.ndo_open = ethoc_open,
.ndo_stop = ethoc_stop,
.ndo_do_ioctl = ethoc_ioctl,
.ndo_set_mac_address = ethoc_set_mac_address,
.ndo_set_rx_mode = ethoc_set_multicast_list,
.ndo_change_mtu = ethoc_change_mtu,
.ndo_tx_timeout = ethoc_tx_timeout,
.ndo_start_xmit = ethoc_start_xmit,
};
/**
* ethoc_probe - initialize OpenCores ethernet MAC
* pdev: platform device
*/
static int ethoc_probe(struct platform_device *pdev)
{
struct net_device *netdev = NULL;
struct resource *res = NULL;
struct resource *mmio = NULL;
struct resource *mem = NULL;
struct ethoc *priv = NULL;
unsigned int phy;
int num_bd;
int ret = 0;
bool random_mac = false;
struct ethoc_platform_data *pdata = dev_get_platdata(&pdev->dev);
u32 eth_clkfreq = pdata ? pdata->eth_clkfreq : 0;
/* allocate networking device */
netdev = alloc_etherdev(sizeof(struct ethoc));
if (!netdev) {
ret = -ENOMEM;
goto out;
}
SET_NETDEV_DEV(netdev, &pdev->dev);
platform_set_drvdata(pdev, netdev);
/* obtain I/O memory space */
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
dev_err(&pdev->dev, "cannot obtain I/O memory space\n");
ret = -ENXIO;
goto free;
}
mmio = devm_request_mem_region(&pdev->dev, res->start,
resource_size(res), res->name);
if (!mmio) {
dev_err(&pdev->dev, "cannot request I/O memory space\n");
ret = -ENXIO;
goto free;
}
netdev->base_addr = mmio->start;
/* obtain buffer memory space */
res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
if (res) {
mem = devm_request_mem_region(&pdev->dev, res->start,
resource_size(res), res->name);
if (!mem) {
dev_err(&pdev->dev, "cannot request memory space\n");
ret = -ENXIO;
goto free;
}
netdev->mem_start = mem->start;
netdev->mem_end = mem->end;
}
/* obtain device IRQ number */
res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
if (!res) {
dev_err(&pdev->dev, "cannot obtain IRQ\n");
ret = -ENXIO;
goto free;
}
netdev->irq = res->start;
/* setup driver-private data */
priv = netdev_priv(netdev);
priv->netdev = netdev;
priv->dma_alloc = 0;
priv->io_region_size = resource_size(mmio);
priv->iobase = devm_ioremap_nocache(&pdev->dev, netdev->base_addr,
resource_size(mmio));
if (!priv->iobase) {
dev_err(&pdev->dev, "cannot remap I/O memory space\n");
ret = -ENXIO;
goto error;
}
if (netdev->mem_end) {
priv->membase = devm_ioremap_nocache(&pdev->dev,
netdev->mem_start, resource_size(mem));
if (!priv->membase) {
dev_err(&pdev->dev, "cannot remap memory space\n");
ret = -ENXIO;
goto error;
}
} else {
/* Allocate buffer memory */
priv->membase = dmam_alloc_coherent(&pdev->dev,
buffer_size, (void *)&netdev->mem_start,
GFP_KERNEL);
if (!priv->membase) {
dev_err(&pdev->dev, "cannot allocate %dB buffer\n",
buffer_size);
ret = -ENOMEM;
goto error;
}
netdev->mem_end = netdev->mem_start + buffer_size;
priv->dma_alloc = buffer_size;
}
/* calculate the number of TX/RX buffers, maximum 128 supported */
num_bd = min_t(unsigned int,
128, (netdev->mem_end - netdev->mem_start + 1) / ETHOC_BUFSIZ);
if (num_bd < 4) {
ret = -ENODEV;
goto error;
}
priv->num_bd = num_bd;
/* num_tx must be a power of two */
priv->num_tx = rounddown_pow_of_two(num_bd >> 1);
priv->num_rx = num_bd - priv->num_tx;
dev_dbg(&pdev->dev, "ethoc: num_tx: %d num_rx: %d\n",
priv->num_tx, priv->num_rx);
priv->vma = devm_kzalloc(&pdev->dev, num_bd*sizeof(void *), GFP_KERNEL);
if (!priv->vma) {
ret = -ENOMEM;
goto error;
}
/* Allow the platform setup code to pass in a MAC address. */
if (pdata) {
memcpy(netdev->dev_addr, pdata->hwaddr, IFHWADDRLEN);
priv->phy_id = pdata->phy_id;
} else {
priv->phy_id = -1;
#ifdef CONFIG_OF
{
const uint8_t *mac;
mac = of_get_property(pdev->dev.of_node,
"local-mac-address",
NULL);
if (mac)
memcpy(netdev->dev_addr, mac, IFHWADDRLEN);
}
#endif
}
/* Check that the given MAC address is valid. If it isn't, read the
* current MAC from the controller.
*/
if (!is_valid_ether_addr(netdev->dev_addr))
ethoc_get_mac_address(netdev, netdev->dev_addr);
/* Check the MAC again for validity, if it still isn't choose and
* program a random one.
*/
if (!is_valid_ether_addr(netdev->dev_addr)) {
eth_random_addr(netdev->dev_addr);
random_mac = true;
}
ethoc_do_set_mac_address(netdev);
if (random_mac)
netdev->addr_assign_type = NET_ADDR_RANDOM;
/* Allow the platform setup code to adjust MII management bus clock. */
if (!eth_clkfreq) {
struct clk *clk = devm_clk_get(&pdev->dev, NULL);
if (!IS_ERR(clk)) {
priv->clk = clk;
clk_prepare_enable(clk);
eth_clkfreq = clk_get_rate(clk);
}
}
if (eth_clkfreq) {
u32 clkdiv = MIIMODER_CLKDIV(eth_clkfreq / 2500000 + 1);
if (!clkdiv)
clkdiv = 2;
dev_dbg(&pdev->dev, "setting MII clkdiv to %u\n", clkdiv);
ethoc_write(priv, MIIMODER,
(ethoc_read(priv, MIIMODER) & MIIMODER_NOPRE) |
clkdiv);
}
/* register MII bus */
priv->mdio = mdiobus_alloc();
if (!priv->mdio) {
ret = -ENOMEM;
goto free;
}
priv->mdio->name = "ethoc-mdio";
snprintf(priv->mdio->id, MII_BUS_ID_SIZE, "%s-%d",
priv->mdio->name, pdev->id);
priv->mdio->read = ethoc_mdio_read;
priv->mdio->write = ethoc_mdio_write;
priv->mdio->priv = priv;
priv->mdio->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL);
if (!priv->mdio->irq) {
ret = -ENOMEM;
goto free_mdio;
}
for (phy = 0; phy < PHY_MAX_ADDR; phy++)
priv->mdio->irq[phy] = PHY_POLL;
ret = mdiobus_register(priv->mdio);
if (ret) {
dev_err(&netdev->dev, "failed to register MDIO bus\n");
goto free_mdio;
}
ret = ethoc_mdio_probe(netdev);
if (ret) {
dev_err(&netdev->dev, "failed to probe MDIO bus\n");
goto error;
}
/* setup the net_device structure */
netdev->netdev_ops = ðoc_netdev_ops;
netdev->watchdog_timeo = ETHOC_TIMEOUT;
netdev->features |= 0;
netdev->ethtool_ops = ðoc_ethtool_ops;
/* setup NAPI */
netif_napi_add(netdev, &priv->napi, ethoc_poll, 64);
spin_lock_init(&priv->lock);
ret = register_netdev(netdev);
if (ret < 0) {
dev_err(&netdev->dev, "failed to register interface\n");
goto error2;
}
goto out;
error2:
netif_napi_del(&priv->napi);
error:
mdiobus_unregister(priv->mdio);
free_mdio:
kfree(priv->mdio->irq);
mdiobus_free(priv->mdio);
free:
if (priv->clk)
clk_disable_unprepare(priv->clk);
free_netdev(netdev);
out:
return ret;
}
/**
* ethoc_remove - shutdown OpenCores ethernet MAC
* @pdev: platform device
*/
static int ethoc_remove(struct platform_device *pdev)
{
struct net_device *netdev = platform_get_drvdata(pdev);
struct ethoc *priv = netdev_priv(netdev);
if (netdev) {
netif_napi_del(&priv->napi);
phy_disconnect(priv->phy);
priv->phy = NULL;
if (priv->mdio) {
mdiobus_unregister(priv->mdio);
kfree(priv->mdio->irq);
mdiobus_free(priv->mdio);
}
if (priv->clk)
clk_disable_unprepare(priv->clk);
unregister_netdev(netdev);
free_netdev(netdev);
}
return 0;
}
#ifdef CONFIG_PM
static int ethoc_suspend(struct platform_device *pdev, pm_message_t state)
{
return -ENOSYS;
}
static int ethoc_resume(struct platform_device *pdev)
{
return -ENOSYS;
}
#else
# define ethoc_suspend NULL
# define ethoc_resume NULL
#endif
static const struct of_device_id ethoc_match[] = {
{ .compatible = "opencores,ethoc", },
{},
};
MODULE_DEVICE_TABLE(of, ethoc_match);
static struct platform_driver ethoc_driver = {
.probe = ethoc_probe,
.remove = ethoc_remove,
.suspend = ethoc_suspend,
.resume = ethoc_resume,
.driver = {
.name = "ethoc",
.of_match_table = ethoc_match,
},
};
module_platform_driver(ethoc_driver);
MODULE_AUTHOR("Thierry Reding <thierry.reding@avionic-design.de>");
MODULE_DESCRIPTION("OpenCores Ethernet MAC driver");
MODULE_LICENSE("GPL v2");
| gpl-2.0 |
cocacake/linux | drivers/mfd/wm8400-core.c | 437 | 5260 | /*
* Core driver for WM8400.
*
* Copyright 2008 Wolfson Microelectronics PLC.
*
* Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 2 of the
* License, or (at your option) any later version.
*
*/
#include <linux/module.h>
#include <linux/bug.h>
#include <linux/err.h>
#include <linux/i2c.h>
#include <linux/kernel.h>
#include <linux/mfd/core.h>
#include <linux/mfd/wm8400-private.h>
#include <linux/mfd/wm8400-audio.h>
#include <linux/regmap.h>
#include <linux/slab.h>
static bool wm8400_volatile(struct device *dev, unsigned int reg)
{
switch (reg) {
case WM8400_INTERRUPT_STATUS_1:
case WM8400_INTERRUPT_LEVELS:
case WM8400_SHUTDOWN_REASON:
return true;
default:
return false;
}
}
/**
* wm8400_reg_read - Single register read
*
* @wm8400: Pointer to wm8400 control structure
* @reg: Register to read
*
* @return Read value
*/
u16 wm8400_reg_read(struct wm8400 *wm8400, u8 reg)
{
unsigned int val;
int ret;
ret = regmap_read(wm8400->regmap, reg, &val);
if (ret < 0)
return ret;
return val;
}
EXPORT_SYMBOL_GPL(wm8400_reg_read);
int wm8400_block_read(struct wm8400 *wm8400, u8 reg, int count, u16 *data)
{
return regmap_bulk_read(wm8400->regmap, reg, data, count);
}
EXPORT_SYMBOL_GPL(wm8400_block_read);
static int wm8400_register_codec(struct wm8400 *wm8400)
{
const struct mfd_cell cell = {
.name = "wm8400-codec",
.platform_data = wm8400,
.pdata_size = sizeof(*wm8400),
};
return mfd_add_devices(wm8400->dev, -1, &cell, 1, NULL, 0, NULL);
}
/*
* wm8400_init - Generic initialisation
*
* The WM8400 can be configured as either an I2C or SPI device. Probe
* functions for each bus set up the accessors then call into this to
* set up the device itself.
*/
static int wm8400_init(struct wm8400 *wm8400,
struct wm8400_platform_data *pdata)
{
unsigned int reg;
int ret;
dev_set_drvdata(wm8400->dev, wm8400);
/* Check that this is actually a WM8400 */
ret = regmap_read(wm8400->regmap, WM8400_RESET_ID, ®);
if (ret != 0) {
dev_err(wm8400->dev, "Chip ID register read failed\n");
return -EIO;
}
if (reg != 0x6172) {
dev_err(wm8400->dev, "Device is not a WM8400, ID is %x\n",
reg);
return -ENODEV;
}
ret = regmap_read(wm8400->regmap, WM8400_ID, ®);
if (ret != 0) {
dev_err(wm8400->dev, "ID register read failed: %d\n", ret);
return ret;
}
reg = (reg & WM8400_CHIP_REV_MASK) >> WM8400_CHIP_REV_SHIFT;
dev_info(wm8400->dev, "WM8400 revision %x\n", reg);
ret = wm8400_register_codec(wm8400);
if (ret != 0) {
dev_err(wm8400->dev, "Failed to register codec\n");
goto err_children;
}
if (pdata && pdata->platform_init) {
ret = pdata->platform_init(wm8400->dev);
if (ret != 0) {
dev_err(wm8400->dev, "Platform init failed: %d\n",
ret);
goto err_children;
}
} else
dev_warn(wm8400->dev, "No platform initialisation supplied\n");
return 0;
err_children:
mfd_remove_devices(wm8400->dev);
return ret;
}
static void wm8400_release(struct wm8400 *wm8400)
{
mfd_remove_devices(wm8400->dev);
}
static const struct regmap_config wm8400_regmap_config = {
.reg_bits = 8,
.val_bits = 16,
.max_register = WM8400_REGISTER_COUNT - 1,
.volatile_reg = wm8400_volatile,
.cache_type = REGCACHE_RBTREE,
};
/**
* wm8400_reset_codec_reg_cache - Reset cached codec registers to
* their default values.
*/
void wm8400_reset_codec_reg_cache(struct wm8400 *wm8400)
{
regmap_reinit_cache(wm8400->regmap, &wm8400_regmap_config);
}
EXPORT_SYMBOL_GPL(wm8400_reset_codec_reg_cache);
#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
static int wm8400_i2c_probe(struct i2c_client *i2c,
const struct i2c_device_id *id)
{
struct wm8400 *wm8400;
wm8400 = devm_kzalloc(&i2c->dev, sizeof(struct wm8400), GFP_KERNEL);
if (!wm8400)
return -ENOMEM;
wm8400->regmap = devm_regmap_init_i2c(i2c, &wm8400_regmap_config);
if (IS_ERR(wm8400->regmap))
return PTR_ERR(wm8400->regmap);
wm8400->dev = &i2c->dev;
i2c_set_clientdata(i2c, wm8400);
return wm8400_init(wm8400, dev_get_platdata(&i2c->dev));
}
static int wm8400_i2c_remove(struct i2c_client *i2c)
{
struct wm8400 *wm8400 = i2c_get_clientdata(i2c);
wm8400_release(wm8400);
return 0;
}
static const struct i2c_device_id wm8400_i2c_id[] = {
{ "wm8400", 0 },
{ }
};
MODULE_DEVICE_TABLE(i2c, wm8400_i2c_id);
static struct i2c_driver wm8400_i2c_driver = {
.driver = {
.name = "WM8400",
},
.probe = wm8400_i2c_probe,
.remove = wm8400_i2c_remove,
.id_table = wm8400_i2c_id,
};
#endif
static int __init wm8400_module_init(void)
{
int ret = -ENODEV;
#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
ret = i2c_add_driver(&wm8400_i2c_driver);
if (ret != 0)
pr_err("Failed to register I2C driver: %d\n", ret);
#endif
return ret;
}
subsys_initcall(wm8400_module_init);
static void __exit wm8400_module_exit(void)
{
#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
i2c_del_driver(&wm8400_i2c_driver);
#endif
}
module_exit(wm8400_module_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com>");
| gpl-2.0 |
yubo/linux-2-6-32-220-23-1-el6 | net/sunrpc/svcauth.c | 693 | 3862 | /*
* linux/net/sunrpc/svcauth.c
*
* The generic interface for RPC authentication on the server side.
*
* Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>
*
* CHANGES
* 19-Apr-2000 Chris Evans - Security fix
*/
#include <linux/types.h>
#include <linux/module.h>
#include <linux/sunrpc/types.h>
#include <linux/sunrpc/xdr.h>
#include <linux/sunrpc/svcsock.h>
#include <linux/sunrpc/svcauth.h>
#include <linux/err.h>
#include <linux/hash.h>
#define RPCDBG_FACILITY RPCDBG_AUTH
/*
* Table of authenticators
*/
extern struct auth_ops svcauth_null;
extern struct auth_ops svcauth_unix;
static DEFINE_SPINLOCK(authtab_lock);
static struct auth_ops *authtab[RPC_AUTH_MAXFLAVOR] = {
[0] = &svcauth_null,
[1] = &svcauth_unix,
};
int
svc_authenticate(struct svc_rqst *rqstp, __be32 *authp)
{
rpc_authflavor_t flavor;
struct auth_ops *aops;
*authp = rpc_auth_ok;
flavor = svc_getnl(&rqstp->rq_arg.head[0]);
dprintk("svc: svc_authenticate (%d)\n", flavor);
spin_lock(&authtab_lock);
if (flavor >= RPC_AUTH_MAXFLAVOR || !(aops = authtab[flavor])
|| !try_module_get(aops->owner)) {
spin_unlock(&authtab_lock);
*authp = rpc_autherr_badcred;
return SVC_DENIED;
}
spin_unlock(&authtab_lock);
rqstp->rq_authop = aops;
return aops->accept(rqstp, authp);
}
EXPORT_SYMBOL_GPL(svc_authenticate);
int svc_set_client(struct svc_rqst *rqstp)
{
return rqstp->rq_authop->set_client(rqstp);
}
EXPORT_SYMBOL_GPL(svc_set_client);
/* A request, which was authenticated, has now executed.
* Time to finalise the credentials and verifier
* and release and resources
*/
int svc_authorise(struct svc_rqst *rqstp)
{
struct auth_ops *aops = rqstp->rq_authop;
int rv = 0;
rqstp->rq_authop = NULL;
if (aops) {
rv = aops->release(rqstp);
module_put(aops->owner);
}
return rv;
}
int
svc_auth_register(rpc_authflavor_t flavor, struct auth_ops *aops)
{
int rv = -EINVAL;
spin_lock(&authtab_lock);
if (flavor < RPC_AUTH_MAXFLAVOR && authtab[flavor] == NULL) {
authtab[flavor] = aops;
rv = 0;
}
spin_unlock(&authtab_lock);
return rv;
}
EXPORT_SYMBOL_GPL(svc_auth_register);
void
svc_auth_unregister(rpc_authflavor_t flavor)
{
spin_lock(&authtab_lock);
if (flavor < RPC_AUTH_MAXFLAVOR)
authtab[flavor] = NULL;
spin_unlock(&authtab_lock);
}
EXPORT_SYMBOL_GPL(svc_auth_unregister);
/**************************************************
* 'auth_domains' are stored in a hash table indexed by name.
* When the last reference to an 'auth_domain' is dropped,
* the object is unhashed and freed.
* If auth_domain_lookup fails to find an entry, it will return
* it's second argument 'new'. If this is non-null, it will
* have been atomically linked into the table.
*/
#define DN_HASHBITS 6
#define DN_HASHMAX (1<<DN_HASHBITS)
#define DN_HASHMASK (DN_HASHMAX-1)
static struct hlist_head auth_domain_table[DN_HASHMAX];
static spinlock_t auth_domain_lock =
__SPIN_LOCK_UNLOCKED(auth_domain_lock);
void auth_domain_put(struct auth_domain *dom)
{
if (atomic_dec_and_lock(&dom->ref.refcount, &auth_domain_lock)) {
hlist_del(&dom->hash);
dom->flavour->domain_release(dom);
spin_unlock(&auth_domain_lock);
}
}
EXPORT_SYMBOL_GPL(auth_domain_put);
struct auth_domain *
auth_domain_lookup(char *name, struct auth_domain *new)
{
struct auth_domain *hp;
struct hlist_head *head;
struct hlist_node *np;
head = &auth_domain_table[hash_str(name, DN_HASHBITS)];
spin_lock(&auth_domain_lock);
hlist_for_each_entry(hp, np, head, hash) {
if (strcmp(hp->name, name)==0) {
kref_get(&hp->ref);
spin_unlock(&auth_domain_lock);
return hp;
}
}
if (new)
hlist_add_head(&new->hash, head);
spin_unlock(&auth_domain_lock);
return new;
}
EXPORT_SYMBOL_GPL(auth_domain_lookup);
struct auth_domain *auth_domain_find(char *name)
{
return auth_domain_lookup(name, NULL);
}
EXPORT_SYMBOL_GPL(auth_domain_find);
| gpl-2.0 |
actzendaria/xgt | drivers/firmware/dmi-sysfs.c | 1205 | 17337 | /*
* dmi-sysfs.c
*
* This module exports the DMI tables read-only to userspace through the
* sysfs file system.
*
* Data is currently found below
* /sys/firmware/dmi/...
*
* DMI attributes are presented in attribute files with names
* formatted using %d-%d, so that the first integer indicates the
* structure type (0-255), and the second field is the instance of that
* entry.
*
* Copyright 2011 Google, Inc.
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/kobject.h>
#include <linux/dmi.h>
#include <linux/capability.h>
#include <linux/slab.h>
#include <linux/list.h>
#include <linux/io.h>
#define MAX_ENTRY_TYPE 255 /* Most of these aren't used, but we consider
the top entry type is only 8 bits */
struct dmi_sysfs_entry {
struct dmi_header dh;
struct kobject kobj;
int instance;
int position;
struct list_head list;
struct kobject *child;
};
/*
* Global list of dmi_sysfs_entry. Even though this should only be
* manipulated at setup and teardown, the lazy nature of the kobject
* system means we get lazy removes.
*/
static LIST_HEAD(entry_list);
static DEFINE_SPINLOCK(entry_list_lock);
/* dmi_sysfs_attribute - Top level attribute. used by all entries. */
struct dmi_sysfs_attribute {
struct attribute attr;
ssize_t (*show)(struct dmi_sysfs_entry *entry, char *buf);
};
#define DMI_SYSFS_ATTR(_entry, _name) \
struct dmi_sysfs_attribute dmi_sysfs_attr_##_entry##_##_name = { \
.attr = {.name = __stringify(_name), .mode = 0400}, \
.show = dmi_sysfs_##_entry##_##_name, \
}
/*
* dmi_sysfs_mapped_attribute - Attribute where we require the entry be
* mapped in. Use in conjunction with dmi_sysfs_specialize_attr_ops.
*/
struct dmi_sysfs_mapped_attribute {
struct attribute attr;
ssize_t (*show)(struct dmi_sysfs_entry *entry,
const struct dmi_header *dh,
char *buf);
};
#define DMI_SYSFS_MAPPED_ATTR(_entry, _name) \
struct dmi_sysfs_mapped_attribute dmi_sysfs_attr_##_entry##_##_name = { \
.attr = {.name = __stringify(_name), .mode = 0400}, \
.show = dmi_sysfs_##_entry##_##_name, \
}
/*************************************************
* Generic DMI entry support.
*************************************************/
static void dmi_entry_free(struct kobject *kobj)
{
kfree(kobj);
}
static struct dmi_sysfs_entry *to_entry(struct kobject *kobj)
{
return container_of(kobj, struct dmi_sysfs_entry, kobj);
}
static struct dmi_sysfs_attribute *to_attr(struct attribute *attr)
{
return container_of(attr, struct dmi_sysfs_attribute, attr);
}
static ssize_t dmi_sysfs_attr_show(struct kobject *kobj,
struct attribute *_attr, char *buf)
{
struct dmi_sysfs_entry *entry = to_entry(kobj);
struct dmi_sysfs_attribute *attr = to_attr(_attr);
/* DMI stuff is only ever admin visible */
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
return attr->show(entry, buf);
}
static const struct sysfs_ops dmi_sysfs_attr_ops = {
.show = dmi_sysfs_attr_show,
};
typedef ssize_t (*dmi_callback)(struct dmi_sysfs_entry *,
const struct dmi_header *dh, void *);
struct find_dmi_data {
struct dmi_sysfs_entry *entry;
dmi_callback callback;
void *private;
int instance_countdown;
ssize_t ret;
};
static void find_dmi_entry_helper(const struct dmi_header *dh,
void *_data)
{
struct find_dmi_data *data = _data;
struct dmi_sysfs_entry *entry = data->entry;
/* Is this the entry we want? */
if (dh->type != entry->dh.type)
return;
if (data->instance_countdown != 0) {
/* try the next instance? */
data->instance_countdown--;
return;
}
/*
* Don't ever revisit the instance. Short circuit later
* instances by letting the instance_countdown run negative
*/
data->instance_countdown--;
/* Found the entry */
data->ret = data->callback(entry, dh, data->private);
}
/* State for passing the read parameters through dmi_find_entry() */
struct dmi_read_state {
char *buf;
loff_t pos;
size_t count;
};
static ssize_t find_dmi_entry(struct dmi_sysfs_entry *entry,
dmi_callback callback, void *private)
{
struct find_dmi_data data = {
.entry = entry,
.callback = callback,
.private = private,
.instance_countdown = entry->instance,
.ret = -EIO, /* To signal the entry disappeared */
};
int ret;
ret = dmi_walk(find_dmi_entry_helper, &data);
/* This shouldn't happen, but just in case. */
if (ret)
return -EINVAL;
return data.ret;
}
/*
* Calculate and return the byte length of the dmi entry identified by
* dh. This includes both the formatted portion as well as the
* unformatted string space, including the two trailing nul characters.
*/
static size_t dmi_entry_length(const struct dmi_header *dh)
{
const char *p = (const char *)dh;
p += dh->length;
while (p[0] || p[1])
p++;
return 2 + p - (const char *)dh;
}
/*************************************************
* Support bits for specialized DMI entry support
*************************************************/
struct dmi_entry_attr_show_data {
struct attribute *attr;
char *buf;
};
static ssize_t dmi_entry_attr_show_helper(struct dmi_sysfs_entry *entry,
const struct dmi_header *dh,
void *_data)
{
struct dmi_entry_attr_show_data *data = _data;
struct dmi_sysfs_mapped_attribute *attr;
attr = container_of(data->attr,
struct dmi_sysfs_mapped_attribute, attr);
return attr->show(entry, dh, data->buf);
}
static ssize_t dmi_entry_attr_show(struct kobject *kobj,
struct attribute *attr,
char *buf)
{
struct dmi_entry_attr_show_data data = {
.attr = attr,
.buf = buf,
};
/* Find the entry according to our parent and call the
* normalized show method hanging off of the attribute */
return find_dmi_entry(to_entry(kobj->parent),
dmi_entry_attr_show_helper, &data);
}
static const struct sysfs_ops dmi_sysfs_specialize_attr_ops = {
.show = dmi_entry_attr_show,
};
/*************************************************
* Specialized DMI entry support.
*************************************************/
/*** Type 15 - System Event Table ***/
#define DMI_SEL_ACCESS_METHOD_IO8 0x00
#define DMI_SEL_ACCESS_METHOD_IO2x8 0x01
#define DMI_SEL_ACCESS_METHOD_IO16 0x02
#define DMI_SEL_ACCESS_METHOD_PHYS32 0x03
#define DMI_SEL_ACCESS_METHOD_GPNV 0x04
struct dmi_system_event_log {
struct dmi_header header;
u16 area_length;
u16 header_start_offset;
u16 data_start_offset;
u8 access_method;
u8 status;
u32 change_token;
union {
struct {
u16 index_addr;
u16 data_addr;
} io;
u32 phys_addr32;
u16 gpnv_handle;
u32 access_method_address;
};
u8 header_format;
u8 type_descriptors_supported_count;
u8 per_log_type_descriptor_length;
u8 supported_log_type_descriptos[0];
} __packed;
#define DMI_SYSFS_SEL_FIELD(_field) \
static ssize_t dmi_sysfs_sel_##_field(struct dmi_sysfs_entry *entry, \
const struct dmi_header *dh, \
char *buf) \
{ \
struct dmi_system_event_log sel; \
if (sizeof(sel) > dmi_entry_length(dh)) \
return -EIO; \
memcpy(&sel, dh, sizeof(sel)); \
return sprintf(buf, "%u\n", sel._field); \
} \
static DMI_SYSFS_MAPPED_ATTR(sel, _field)
DMI_SYSFS_SEL_FIELD(area_length);
DMI_SYSFS_SEL_FIELD(header_start_offset);
DMI_SYSFS_SEL_FIELD(data_start_offset);
DMI_SYSFS_SEL_FIELD(access_method);
DMI_SYSFS_SEL_FIELD(status);
DMI_SYSFS_SEL_FIELD(change_token);
DMI_SYSFS_SEL_FIELD(access_method_address);
DMI_SYSFS_SEL_FIELD(header_format);
DMI_SYSFS_SEL_FIELD(type_descriptors_supported_count);
DMI_SYSFS_SEL_FIELD(per_log_type_descriptor_length);
static struct attribute *dmi_sysfs_sel_attrs[] = {
&dmi_sysfs_attr_sel_area_length.attr,
&dmi_sysfs_attr_sel_header_start_offset.attr,
&dmi_sysfs_attr_sel_data_start_offset.attr,
&dmi_sysfs_attr_sel_access_method.attr,
&dmi_sysfs_attr_sel_status.attr,
&dmi_sysfs_attr_sel_change_token.attr,
&dmi_sysfs_attr_sel_access_method_address.attr,
&dmi_sysfs_attr_sel_header_format.attr,
&dmi_sysfs_attr_sel_type_descriptors_supported_count.attr,
&dmi_sysfs_attr_sel_per_log_type_descriptor_length.attr,
NULL,
};
static struct kobj_type dmi_system_event_log_ktype = {
.release = dmi_entry_free,
.sysfs_ops = &dmi_sysfs_specialize_attr_ops,
.default_attrs = dmi_sysfs_sel_attrs,
};
typedef u8 (*sel_io_reader)(const struct dmi_system_event_log *sel,
loff_t offset);
static DEFINE_MUTEX(io_port_lock);
static u8 read_sel_8bit_indexed_io(const struct dmi_system_event_log *sel,
loff_t offset)
{
u8 ret;
mutex_lock(&io_port_lock);
outb((u8)offset, sel->io.index_addr);
ret = inb(sel->io.data_addr);
mutex_unlock(&io_port_lock);
return ret;
}
static u8 read_sel_2x8bit_indexed_io(const struct dmi_system_event_log *sel,
loff_t offset)
{
u8 ret;
mutex_lock(&io_port_lock);
outb((u8)offset, sel->io.index_addr);
outb((u8)(offset >> 8), sel->io.index_addr + 1);
ret = inb(sel->io.data_addr);
mutex_unlock(&io_port_lock);
return ret;
}
static u8 read_sel_16bit_indexed_io(const struct dmi_system_event_log *sel,
loff_t offset)
{
u8 ret;
mutex_lock(&io_port_lock);
outw((u16)offset, sel->io.index_addr);
ret = inb(sel->io.data_addr);
mutex_unlock(&io_port_lock);
return ret;
}
static sel_io_reader sel_io_readers[] = {
[DMI_SEL_ACCESS_METHOD_IO8] = read_sel_8bit_indexed_io,
[DMI_SEL_ACCESS_METHOD_IO2x8] = read_sel_2x8bit_indexed_io,
[DMI_SEL_ACCESS_METHOD_IO16] = read_sel_16bit_indexed_io,
};
static ssize_t dmi_sel_raw_read_io(struct dmi_sysfs_entry *entry,
const struct dmi_system_event_log *sel,
char *buf, loff_t pos, size_t count)
{
ssize_t wrote = 0;
sel_io_reader io_reader = sel_io_readers[sel->access_method];
while (count && pos < sel->area_length) {
count--;
*(buf++) = io_reader(sel, pos++);
wrote++;
}
return wrote;
}
static ssize_t dmi_sel_raw_read_phys32(struct dmi_sysfs_entry *entry,
const struct dmi_system_event_log *sel,
char *buf, loff_t pos, size_t count)
{
u8 __iomem *mapped;
ssize_t wrote = 0;
mapped = ioremap(sel->access_method_address, sel->area_length);
if (!mapped)
return -EIO;
while (count && pos < sel->area_length) {
count--;
*(buf++) = readb(mapped + pos++);
wrote++;
}
iounmap(mapped);
return wrote;
}
static ssize_t dmi_sel_raw_read_helper(struct dmi_sysfs_entry *entry,
const struct dmi_header *dh,
void *_state)
{
struct dmi_read_state *state = _state;
struct dmi_system_event_log sel;
if (sizeof(sel) > dmi_entry_length(dh))
return -EIO;
memcpy(&sel, dh, sizeof(sel));
switch (sel.access_method) {
case DMI_SEL_ACCESS_METHOD_IO8:
case DMI_SEL_ACCESS_METHOD_IO2x8:
case DMI_SEL_ACCESS_METHOD_IO16:
return dmi_sel_raw_read_io(entry, &sel, state->buf,
state->pos, state->count);
case DMI_SEL_ACCESS_METHOD_PHYS32:
return dmi_sel_raw_read_phys32(entry, &sel, state->buf,
state->pos, state->count);
case DMI_SEL_ACCESS_METHOD_GPNV:
pr_info("dmi-sysfs: GPNV support missing.\n");
return -EIO;
default:
pr_info("dmi-sysfs: Unknown access method %02x\n",
sel.access_method);
return -EIO;
}
}
static ssize_t dmi_sel_raw_read(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t pos, size_t count)
{
struct dmi_sysfs_entry *entry = to_entry(kobj->parent);
struct dmi_read_state state = {
.buf = buf,
.pos = pos,
.count = count,
};
return find_dmi_entry(entry, dmi_sel_raw_read_helper, &state);
}
static struct bin_attribute dmi_sel_raw_attr = {
.attr = {.name = "raw_event_log", .mode = 0400},
.read = dmi_sel_raw_read,
};
static int dmi_system_event_log(struct dmi_sysfs_entry *entry)
{
int ret;
entry->child = kzalloc(sizeof(*entry->child), GFP_KERNEL);
if (!entry->child)
return -ENOMEM;
ret = kobject_init_and_add(entry->child,
&dmi_system_event_log_ktype,
&entry->kobj,
"system_event_log");
if (ret)
goto out_free;
ret = sysfs_create_bin_file(entry->child, &dmi_sel_raw_attr);
if (ret)
goto out_del;
return 0;
out_del:
kobject_del(entry->child);
out_free:
kfree(entry->child);
return ret;
}
/*************************************************
* Generic DMI entry support.
*************************************************/
static ssize_t dmi_sysfs_entry_length(struct dmi_sysfs_entry *entry, char *buf)
{
return sprintf(buf, "%d\n", entry->dh.length);
}
static ssize_t dmi_sysfs_entry_handle(struct dmi_sysfs_entry *entry, char *buf)
{
return sprintf(buf, "%d\n", entry->dh.handle);
}
static ssize_t dmi_sysfs_entry_type(struct dmi_sysfs_entry *entry, char *buf)
{
return sprintf(buf, "%d\n", entry->dh.type);
}
static ssize_t dmi_sysfs_entry_instance(struct dmi_sysfs_entry *entry,
char *buf)
{
return sprintf(buf, "%d\n", entry->instance);
}
static ssize_t dmi_sysfs_entry_position(struct dmi_sysfs_entry *entry,
char *buf)
{
return sprintf(buf, "%d\n", entry->position);
}
static DMI_SYSFS_ATTR(entry, length);
static DMI_SYSFS_ATTR(entry, handle);
static DMI_SYSFS_ATTR(entry, type);
static DMI_SYSFS_ATTR(entry, instance);
static DMI_SYSFS_ATTR(entry, position);
static struct attribute *dmi_sysfs_entry_attrs[] = {
&dmi_sysfs_attr_entry_length.attr,
&dmi_sysfs_attr_entry_handle.attr,
&dmi_sysfs_attr_entry_type.attr,
&dmi_sysfs_attr_entry_instance.attr,
&dmi_sysfs_attr_entry_position.attr,
NULL,
};
static ssize_t dmi_entry_raw_read_helper(struct dmi_sysfs_entry *entry,
const struct dmi_header *dh,
void *_state)
{
struct dmi_read_state *state = _state;
size_t entry_length;
entry_length = dmi_entry_length(dh);
return memory_read_from_buffer(state->buf, state->count,
&state->pos, dh, entry_length);
}
static ssize_t dmi_entry_raw_read(struct file *filp,
struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t pos, size_t count)
{
struct dmi_sysfs_entry *entry = to_entry(kobj);
struct dmi_read_state state = {
.buf = buf,
.pos = pos,
.count = count,
};
return find_dmi_entry(entry, dmi_entry_raw_read_helper, &state);
}
static const struct bin_attribute dmi_entry_raw_attr = {
.attr = {.name = "raw", .mode = 0400},
.read = dmi_entry_raw_read,
};
static void dmi_sysfs_entry_release(struct kobject *kobj)
{
struct dmi_sysfs_entry *entry = to_entry(kobj);
spin_lock(&entry_list_lock);
list_del(&entry->list);
spin_unlock(&entry_list_lock);
kfree(entry);
}
static struct kobj_type dmi_sysfs_entry_ktype = {
.release = dmi_sysfs_entry_release,
.sysfs_ops = &dmi_sysfs_attr_ops,
.default_attrs = dmi_sysfs_entry_attrs,
};
static struct kobject *dmi_kobj;
static struct kset *dmi_kset;
/* Global count of all instances seen. Only for setup */
static int __initdata instance_counts[MAX_ENTRY_TYPE + 1];
/* Global positional count of all entries seen. Only for setup */
static int __initdata position_count;
static void __init dmi_sysfs_register_handle(const struct dmi_header *dh,
void *_ret)
{
struct dmi_sysfs_entry *entry;
int *ret = _ret;
/* If a previous entry saw an error, short circuit */
if (*ret)
return;
/* Allocate and register a new entry into the entries set */
entry = kzalloc(sizeof(*entry), GFP_KERNEL);
if (!entry) {
*ret = -ENOMEM;
return;
}
/* Set the key */
memcpy(&entry->dh, dh, sizeof(*dh));
entry->instance = instance_counts[dh->type]++;
entry->position = position_count++;
entry->kobj.kset = dmi_kset;
*ret = kobject_init_and_add(&entry->kobj, &dmi_sysfs_entry_ktype, NULL,
"%d-%d", dh->type, entry->instance);
if (*ret) {
kfree(entry);
return;
}
/* Thread on the global list for cleanup */
spin_lock(&entry_list_lock);
list_add_tail(&entry->list, &entry_list);
spin_unlock(&entry_list_lock);
/* Handle specializations by type */
switch (dh->type) {
case DMI_ENTRY_SYSTEM_EVENT_LOG:
*ret = dmi_system_event_log(entry);
break;
default:
/* No specialization */
break;
}
if (*ret)
goto out_err;
/* Create the raw binary file to access the entry */
*ret = sysfs_create_bin_file(&entry->kobj, &dmi_entry_raw_attr);
if (*ret)
goto out_err;
return;
out_err:
kobject_put(entry->child);
kobject_put(&entry->kobj);
return;
}
static void cleanup_entry_list(void)
{
struct dmi_sysfs_entry *entry, *next;
/* No locks, we are on our way out */
list_for_each_entry_safe(entry, next, &entry_list, list) {
kobject_put(entry->child);
kobject_put(&entry->kobj);
}
}
static int __init dmi_sysfs_init(void)
{
int error = -ENOMEM;
int val;
/* Set up our directory */
dmi_kobj = kobject_create_and_add("dmi", firmware_kobj);
if (!dmi_kobj)
goto err;
dmi_kset = kset_create_and_add("entries", NULL, dmi_kobj);
if (!dmi_kset)
goto err;
val = 0;
error = dmi_walk(dmi_sysfs_register_handle, &val);
if (error)
goto err;
if (val) {
error = val;
goto err;
}
pr_debug("dmi-sysfs: loaded.\n");
return 0;
err:
cleanup_entry_list();
kset_unregister(dmi_kset);
kobject_put(dmi_kobj);
return error;
}
/* clean up everything. */
static void __exit dmi_sysfs_exit(void)
{
pr_debug("dmi-sysfs: unloading.\n");
cleanup_entry_list();
kset_unregister(dmi_kset);
kobject_del(dmi_kobj);
kobject_put(dmi_kobj);
}
module_init(dmi_sysfs_init);
module_exit(dmi_sysfs_exit);
MODULE_AUTHOR("Mike Waychison <mikew@google.com>");
MODULE_DESCRIPTION("DMI sysfs support");
MODULE_LICENSE("GPL");
| gpl-2.0 |
DarkenedSky94/android_kernel_samsung_smdk4412 | kernel/srcu.c | 2997 | 10675 | /*
* Sleepable Read-Copy Update mechanism for mutual exclusion.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*
* Copyright (C) IBM Corporation, 2006
*
* Author: Paul McKenney <paulmck@us.ibm.com>
*
* For detailed explanation of Read-Copy Update mechanism see -
* Documentation/RCU/ *.txt
*
*/
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/percpu.h>
#include <linux/preempt.h>
#include <linux/rcupdate.h>
#include <linux/sched.h>
#include <linux/smp.h>
#include <linux/delay.h>
#include <linux/srcu.h>
static int init_srcu_struct_fields(struct srcu_struct *sp)
{
sp->completed = 0;
mutex_init(&sp->mutex);
sp->per_cpu_ref = alloc_percpu(struct srcu_struct_array);
return sp->per_cpu_ref ? 0 : -ENOMEM;
}
#ifdef CONFIG_DEBUG_LOCK_ALLOC
int __init_srcu_struct(struct srcu_struct *sp, const char *name,
struct lock_class_key *key)
{
/* Don't re-initialize a lock while it is held. */
debug_check_no_locks_freed((void *)sp, sizeof(*sp));
lockdep_init_map(&sp->dep_map, name, key, 0);
return init_srcu_struct_fields(sp);
}
EXPORT_SYMBOL_GPL(__init_srcu_struct);
#else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
/**
* init_srcu_struct - initialize a sleep-RCU structure
* @sp: structure to initialize.
*
* Must invoke this on a given srcu_struct before passing that srcu_struct
* to any other function. Each srcu_struct represents a separate domain
* of SRCU protection.
*/
int init_srcu_struct(struct srcu_struct *sp)
{
return init_srcu_struct_fields(sp);
}
EXPORT_SYMBOL_GPL(init_srcu_struct);
#endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */
/*
* srcu_readers_active_idx -- returns approximate number of readers
* active on the specified rank of per-CPU counters.
*/
static int srcu_readers_active_idx(struct srcu_struct *sp, int idx)
{
int cpu;
int sum;
sum = 0;
for_each_possible_cpu(cpu)
sum += per_cpu_ptr(sp->per_cpu_ref, cpu)->c[idx];
return sum;
}
/**
* srcu_readers_active - returns approximate number of readers.
* @sp: which srcu_struct to count active readers (holding srcu_read_lock).
*
* Note that this is not an atomic primitive, and can therefore suffer
* severe errors when invoked on an active srcu_struct. That said, it
* can be useful as an error check at cleanup time.
*/
static int srcu_readers_active(struct srcu_struct *sp)
{
return srcu_readers_active_idx(sp, 0) + srcu_readers_active_idx(sp, 1);
}
/**
* cleanup_srcu_struct - deconstruct a sleep-RCU structure
* @sp: structure to clean up.
*
* Must invoke this after you are finished using a given srcu_struct that
* was initialized via init_srcu_struct(), else you leak memory.
*/
void cleanup_srcu_struct(struct srcu_struct *sp)
{
int sum;
sum = srcu_readers_active(sp);
WARN_ON(sum); /* Leakage unless caller handles error. */
if (sum != 0)
return;
free_percpu(sp->per_cpu_ref);
sp->per_cpu_ref = NULL;
}
EXPORT_SYMBOL_GPL(cleanup_srcu_struct);
/*
* Counts the new reader in the appropriate per-CPU element of the
* srcu_struct. Must be called from process context.
* Returns an index that must be passed to the matching srcu_read_unlock().
*/
int __srcu_read_lock(struct srcu_struct *sp)
{
int idx;
preempt_disable();
idx = sp->completed & 0x1;
barrier(); /* ensure compiler looks -once- at sp->completed. */
per_cpu_ptr(sp->per_cpu_ref, smp_processor_id())->c[idx]++;
srcu_barrier(); /* ensure compiler won't misorder critical section. */
preempt_enable();
return idx;
}
EXPORT_SYMBOL_GPL(__srcu_read_lock);
/*
* Removes the count for the old reader from the appropriate per-CPU
* element of the srcu_struct. Note that this may well be a different
* CPU than that which was incremented by the corresponding srcu_read_lock().
* Must be called from process context.
*/
void __srcu_read_unlock(struct srcu_struct *sp, int idx)
{
preempt_disable();
srcu_barrier(); /* ensure compiler won't misorder critical section. */
per_cpu_ptr(sp->per_cpu_ref, smp_processor_id())->c[idx]--;
preempt_enable();
}
EXPORT_SYMBOL_GPL(__srcu_read_unlock);
/*
* We use an adaptive strategy for synchronize_srcu() and especially for
* synchronize_srcu_expedited(). We spin for a fixed time period
* (defined below) to allow SRCU readers to exit their read-side critical
* sections. If there are still some readers after 10 microseconds,
* we repeatedly block for 1-millisecond time periods. This approach
* has done well in testing, so there is no need for a config parameter.
*/
#define SYNCHRONIZE_SRCU_READER_DELAY 10
/*
* Helper function for synchronize_srcu() and synchronize_srcu_expedited().
*/
static void __synchronize_srcu(struct srcu_struct *sp, void (*sync_func)(void))
{
int idx;
idx = sp->completed;
mutex_lock(&sp->mutex);
/*
* Check to see if someone else did the work for us while we were
* waiting to acquire the lock. We need -two- advances of
* the counter, not just one. If there was but one, we might have
* shown up -after- our helper's first synchronize_sched(), thus
* having failed to prevent CPU-reordering races with concurrent
* srcu_read_unlock()s on other CPUs (see comment below). So we
* either (1) wait for two or (2) supply the second ourselves.
*/
if ((sp->completed - idx) >= 2) {
mutex_unlock(&sp->mutex);
return;
}
sync_func(); /* Force memory barrier on all CPUs. */
/*
* The preceding synchronize_sched() ensures that any CPU that
* sees the new value of sp->completed will also see any preceding
* changes to data structures made by this CPU. This prevents
* some other CPU from reordering the accesses in its SRCU
* read-side critical section to precede the corresponding
* srcu_read_lock() -- ensuring that such references will in
* fact be protected.
*
* So it is now safe to do the flip.
*/
idx = sp->completed & 0x1;
sp->completed++;
sync_func(); /* Force memory barrier on all CPUs. */
/*
* At this point, because of the preceding synchronize_sched(),
* all srcu_read_lock() calls using the old counters have completed.
* Their corresponding critical sections might well be still
* executing, but the srcu_read_lock() primitives themselves
* will have finished executing. We initially give readers
* an arbitrarily chosen 10 microseconds to get out of their
* SRCU read-side critical sections, then loop waiting 1/HZ
* seconds per iteration. The 10-microsecond value has done
* very well in testing.
*/
if (srcu_readers_active_idx(sp, idx))
udelay(SYNCHRONIZE_SRCU_READER_DELAY);
while (srcu_readers_active_idx(sp, idx))
schedule_timeout_interruptible(1);
sync_func(); /* Force memory barrier on all CPUs. */
/*
* The preceding synchronize_sched() forces all srcu_read_unlock()
* primitives that were executing concurrently with the preceding
* for_each_possible_cpu() loop to have completed by this point.
* More importantly, it also forces the corresponding SRCU read-side
* critical sections to have also completed, and the corresponding
* references to SRCU-protected data items to be dropped.
*
* Note:
*
* Despite what you might think at first glance, the
* preceding synchronize_sched() -must- be within the
* critical section ended by the following mutex_unlock().
* Otherwise, a task taking the early exit can race
* with a srcu_read_unlock(), which might have executed
* just before the preceding srcu_readers_active() check,
* and whose CPU might have reordered the srcu_read_unlock()
* with the preceding critical section. In this case, there
* is nothing preventing the synchronize_sched() task that is
* taking the early exit from freeing a data structure that
* is still being referenced (out of order) by the task
* doing the srcu_read_unlock().
*
* Alternatively, the comparison with "2" on the early exit
* could be changed to "3", but this increases synchronize_srcu()
* latency for bulk loads. So the current code is preferred.
*/
mutex_unlock(&sp->mutex);
}
/**
* synchronize_srcu - wait for prior SRCU read-side critical-section completion
* @sp: srcu_struct with which to synchronize.
*
* Flip the completed counter, and wait for the old count to drain to zero.
* As with classic RCU, the updater must use some separate means of
* synchronizing concurrent updates. Can block; must be called from
* process context.
*
* Note that it is illegal to call synchronize_srcu() from the corresponding
* SRCU read-side critical section; doing so will result in deadlock.
* However, it is perfectly legal to call synchronize_srcu() on one
* srcu_struct from some other srcu_struct's read-side critical section.
*/
void synchronize_srcu(struct srcu_struct *sp)
{
__synchronize_srcu(sp, synchronize_sched);
}
EXPORT_SYMBOL_GPL(synchronize_srcu);
/**
* synchronize_srcu_expedited - like synchronize_srcu, but less patient
* @sp: srcu_struct with which to synchronize.
*
* Flip the completed counter, and wait for the old count to drain to zero.
* As with classic RCU, the updater must use some separate means of
* synchronizing concurrent updates. Can block; must be called from
* process context.
*
* Note that it is illegal to call synchronize_srcu_expedited()
* from the corresponding SRCU read-side critical section; doing so
* will result in deadlock. However, it is perfectly legal to call
* synchronize_srcu_expedited() on one srcu_struct from some other
* srcu_struct's read-side critical section.
*/
void synchronize_srcu_expedited(struct srcu_struct *sp)
{
__synchronize_srcu(sp, synchronize_sched_expedited);
}
EXPORT_SYMBOL_GPL(synchronize_srcu_expedited);
/**
* srcu_batches_completed - return batches completed.
* @sp: srcu_struct on which to report batch completion.
*
* Report the number of batches, correlated with, but not necessarily
* precisely the same as, the number of grace periods that have elapsed.
*/
long srcu_batches_completed(struct srcu_struct *sp)
{
return sp->completed;
}
EXPORT_SYMBOL_GPL(srcu_batches_completed);
| gpl-2.0 |
jfdsmabalot/kernel_samsung_msm8974pro | fs/nfs/unlink.c | 3253 | 14621 | /*
* linux/fs/nfs/unlink.c
*
* nfs sillydelete handling
*
*/
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/dcache.h>
#include <linux/sunrpc/sched.h>
#include <linux/sunrpc/clnt.h>
#include <linux/nfs_fs.h>
#include <linux/sched.h>
#include <linux/wait.h>
#include <linux/namei.h>
#include "internal.h"
#include "nfs4_fs.h"
#include "iostat.h"
#include "delegation.h"
/**
* nfs_free_unlinkdata - release data from a sillydelete operation.
* @data: pointer to unlink structure.
*/
static void
nfs_free_unlinkdata(struct nfs_unlinkdata *data)
{
iput(data->dir);
put_rpccred(data->cred);
kfree(data->args.name.name);
kfree(data);
}
#define NAME_ALLOC_LEN(len) ((len+16) & ~15)
/**
* nfs_copy_dname - copy dentry name to data structure
* @dentry: pointer to dentry
* @data: nfs_unlinkdata
*/
static int nfs_copy_dname(struct dentry *dentry, struct nfs_unlinkdata *data)
{
char *str;
int len = dentry->d_name.len;
str = kmemdup(dentry->d_name.name, NAME_ALLOC_LEN(len), GFP_KERNEL);
if (!str)
return -ENOMEM;
data->args.name.len = len;
data->args.name.name = str;
return 0;
}
static void nfs_free_dname(struct nfs_unlinkdata *data)
{
kfree(data->args.name.name);
data->args.name.name = NULL;
data->args.name.len = 0;
}
static void nfs_dec_sillycount(struct inode *dir)
{
struct nfs_inode *nfsi = NFS_I(dir);
if (atomic_dec_return(&nfsi->silly_count) == 1)
wake_up(&nfsi->waitqueue);
}
/**
* nfs_async_unlink_done - Sillydelete post-processing
* @task: rpc_task of the sillydelete
*
* Do the directory attribute update.
*/
static void nfs_async_unlink_done(struct rpc_task *task, void *calldata)
{
struct nfs_unlinkdata *data = calldata;
struct inode *dir = data->dir;
if (!NFS_PROTO(dir)->unlink_done(task, dir))
rpc_restart_call_prepare(task);
}
/**
* nfs_async_unlink_release - Release the sillydelete data.
* @task: rpc_task of the sillydelete
*
* We need to call nfs_put_unlinkdata as a 'tk_release' task since the
* rpc_task would be freed too.
*/
static void nfs_async_unlink_release(void *calldata)
{
struct nfs_unlinkdata *data = calldata;
struct super_block *sb = data->dir->i_sb;
nfs_dec_sillycount(data->dir);
nfs_free_unlinkdata(data);
nfs_sb_deactive(sb);
}
static void nfs_unlink_prepare(struct rpc_task *task, void *calldata)
{
struct nfs_unlinkdata *data = calldata;
NFS_PROTO(data->dir)->unlink_rpc_prepare(task, data);
}
static const struct rpc_call_ops nfs_unlink_ops = {
.rpc_call_done = nfs_async_unlink_done,
.rpc_release = nfs_async_unlink_release,
.rpc_call_prepare = nfs_unlink_prepare,
};
static int nfs_do_call_unlink(struct dentry *parent, struct inode *dir, struct nfs_unlinkdata *data)
{
struct rpc_message msg = {
.rpc_argp = &data->args,
.rpc_resp = &data->res,
.rpc_cred = data->cred,
};
struct rpc_task_setup task_setup_data = {
.rpc_message = &msg,
.callback_ops = &nfs_unlink_ops,
.callback_data = data,
.workqueue = nfsiod_workqueue,
.flags = RPC_TASK_ASYNC,
};
struct rpc_task *task;
struct dentry *alias;
alias = d_lookup(parent, &data->args.name);
if (alias != NULL) {
int ret;
void *devname_garbage = NULL;
/*
* Hey, we raced with lookup... See if we need to transfer
* the sillyrename information to the aliased dentry.
*/
nfs_free_dname(data);
ret = nfs_copy_dname(alias, data);
spin_lock(&alias->d_lock);
if (ret == 0 && alias->d_inode != NULL &&
!(alias->d_flags & DCACHE_NFSFS_RENAMED)) {
devname_garbage = alias->d_fsdata;
alias->d_fsdata = data;
alias->d_flags |= DCACHE_NFSFS_RENAMED;
ret = 1;
} else
ret = 0;
spin_unlock(&alias->d_lock);
nfs_dec_sillycount(dir);
dput(alias);
/*
* If we'd displaced old cached devname, free it. At that
* point dentry is definitely not a root, so we won't need
* that anymore.
*/
kfree(devname_garbage);
return ret;
}
data->dir = igrab(dir);
if (!data->dir) {
nfs_dec_sillycount(dir);
return 0;
}
nfs_sb_active(dir->i_sb);
data->args.fh = NFS_FH(dir);
nfs_fattr_init(data->res.dir_attr);
NFS_PROTO(dir)->unlink_setup(&msg, dir);
task_setup_data.rpc_client = NFS_CLIENT(dir);
task = rpc_run_task(&task_setup_data);
if (!IS_ERR(task))
rpc_put_task_async(task);
return 1;
}
static int nfs_call_unlink(struct dentry *dentry, struct nfs_unlinkdata *data)
{
struct dentry *parent;
struct inode *dir;
int ret = 0;
parent = dget_parent(dentry);
if (parent == NULL)
goto out_free;
dir = parent->d_inode;
/* Non-exclusive lock protects against concurrent lookup() calls */
spin_lock(&dir->i_lock);
if (atomic_inc_not_zero(&NFS_I(dir)->silly_count) == 0) {
/* Deferred delete */
hlist_add_head(&data->list, &NFS_I(dir)->silly_list);
spin_unlock(&dir->i_lock);
ret = 1;
goto out_dput;
}
spin_unlock(&dir->i_lock);
ret = nfs_do_call_unlink(parent, dir, data);
out_dput:
dput(parent);
out_free:
return ret;
}
void nfs_block_sillyrename(struct dentry *dentry)
{
struct nfs_inode *nfsi = NFS_I(dentry->d_inode);
wait_event(nfsi->waitqueue, atomic_cmpxchg(&nfsi->silly_count, 1, 0) == 1);
}
void nfs_unblock_sillyrename(struct dentry *dentry)
{
struct inode *dir = dentry->d_inode;
struct nfs_inode *nfsi = NFS_I(dir);
struct nfs_unlinkdata *data;
atomic_inc(&nfsi->silly_count);
spin_lock(&dir->i_lock);
while (!hlist_empty(&nfsi->silly_list)) {
if (!atomic_inc_not_zero(&nfsi->silly_count))
break;
data = hlist_entry(nfsi->silly_list.first, struct nfs_unlinkdata, list);
hlist_del(&data->list);
spin_unlock(&dir->i_lock);
if (nfs_do_call_unlink(dentry, dir, data) == 0)
nfs_free_unlinkdata(data);
spin_lock(&dir->i_lock);
}
spin_unlock(&dir->i_lock);
}
/**
* nfs_async_unlink - asynchronous unlinking of a file
* @dir: parent directory of dentry
* @dentry: dentry to unlink
*/
static int
nfs_async_unlink(struct inode *dir, struct dentry *dentry)
{
struct nfs_unlinkdata *data;
int status = -ENOMEM;
void *devname_garbage = NULL;
data = kzalloc(sizeof(*data), GFP_KERNEL);
if (data == NULL)
goto out;
data->cred = rpc_lookup_cred();
if (IS_ERR(data->cred)) {
status = PTR_ERR(data->cred);
goto out_free;
}
data->res.dir_attr = &data->dir_attr;
status = -EBUSY;
spin_lock(&dentry->d_lock);
if (dentry->d_flags & DCACHE_NFSFS_RENAMED)
goto out_unlock;
dentry->d_flags |= DCACHE_NFSFS_RENAMED;
devname_garbage = dentry->d_fsdata;
dentry->d_fsdata = data;
spin_unlock(&dentry->d_lock);
/*
* If we'd displaced old cached devname, free it. At that
* point dentry is definitely not a root, so we won't need
* that anymore.
*/
if (devname_garbage)
kfree(devname_garbage);
return 0;
out_unlock:
spin_unlock(&dentry->d_lock);
put_rpccred(data->cred);
out_free:
kfree(data);
out:
return status;
}
/**
* nfs_complete_unlink - Initialize completion of the sillydelete
* @dentry: dentry to delete
* @inode: inode
*
* Since we're most likely to be called by dentry_iput(), we
* only use the dentry to find the sillydelete. We then copy the name
* into the qstr.
*/
void
nfs_complete_unlink(struct dentry *dentry, struct inode *inode)
{
struct nfs_unlinkdata *data = NULL;
spin_lock(&dentry->d_lock);
if (dentry->d_flags & DCACHE_NFSFS_RENAMED) {
dentry->d_flags &= ~DCACHE_NFSFS_RENAMED;
data = dentry->d_fsdata;
dentry->d_fsdata = NULL;
}
spin_unlock(&dentry->d_lock);
if (data != NULL && (NFS_STALE(inode) || !nfs_call_unlink(dentry, data)))
nfs_free_unlinkdata(data);
}
/* Cancel a queued async unlink. Called when a sillyrename run fails. */
static void
nfs_cancel_async_unlink(struct dentry *dentry)
{
spin_lock(&dentry->d_lock);
if (dentry->d_flags & DCACHE_NFSFS_RENAMED) {
struct nfs_unlinkdata *data = dentry->d_fsdata;
dentry->d_flags &= ~DCACHE_NFSFS_RENAMED;
dentry->d_fsdata = NULL;
spin_unlock(&dentry->d_lock);
nfs_free_unlinkdata(data);
return;
}
spin_unlock(&dentry->d_lock);
}
/**
* nfs_async_rename_done - Sillyrename post-processing
* @task: rpc_task of the sillyrename
* @calldata: nfs_renamedata for the sillyrename
*
* Do the directory attribute updates and the d_move
*/
static void nfs_async_rename_done(struct rpc_task *task, void *calldata)
{
struct nfs_renamedata *data = calldata;
struct inode *old_dir = data->old_dir;
struct inode *new_dir = data->new_dir;
struct dentry *old_dentry = data->old_dentry;
struct dentry *new_dentry = data->new_dentry;
if (!NFS_PROTO(old_dir)->rename_done(task, old_dir, new_dir)) {
rpc_restart_call_prepare(task);
return;
}
if (task->tk_status != 0) {
nfs_cancel_async_unlink(old_dentry);
return;
}
d_drop(old_dentry);
d_drop(new_dentry);
}
/**
* nfs_async_rename_release - Release the sillyrename data.
* @calldata: the struct nfs_renamedata to be released
*/
static void nfs_async_rename_release(void *calldata)
{
struct nfs_renamedata *data = calldata;
struct super_block *sb = data->old_dir->i_sb;
if (data->old_dentry->d_inode)
nfs_mark_for_revalidate(data->old_dentry->d_inode);
dput(data->old_dentry);
dput(data->new_dentry);
iput(data->old_dir);
iput(data->new_dir);
nfs_sb_deactive(sb);
put_rpccred(data->cred);
kfree(data);
}
static void nfs_rename_prepare(struct rpc_task *task, void *calldata)
{
struct nfs_renamedata *data = calldata;
NFS_PROTO(data->old_dir)->rename_rpc_prepare(task, data);
}
static const struct rpc_call_ops nfs_rename_ops = {
.rpc_call_done = nfs_async_rename_done,
.rpc_release = nfs_async_rename_release,
.rpc_call_prepare = nfs_rename_prepare,
};
/**
* nfs_async_rename - perform an asynchronous rename operation
* @old_dir: directory that currently holds the dentry to be renamed
* @new_dir: target directory for the rename
* @old_dentry: original dentry to be renamed
* @new_dentry: dentry to which the old_dentry should be renamed
*
* It's expected that valid references to the dentries and inodes are held
*/
static struct rpc_task *
nfs_async_rename(struct inode *old_dir, struct inode *new_dir,
struct dentry *old_dentry, struct dentry *new_dentry)
{
struct nfs_renamedata *data;
struct rpc_message msg = { };
struct rpc_task_setup task_setup_data = {
.rpc_message = &msg,
.callback_ops = &nfs_rename_ops,
.workqueue = nfsiod_workqueue,
.rpc_client = NFS_CLIENT(old_dir),
.flags = RPC_TASK_ASYNC,
};
data = kzalloc(sizeof(*data), GFP_KERNEL);
if (data == NULL)
return ERR_PTR(-ENOMEM);
task_setup_data.callback_data = data;
data->cred = rpc_lookup_cred();
if (IS_ERR(data->cred)) {
struct rpc_task *task = ERR_CAST(data->cred);
kfree(data);
return task;
}
msg.rpc_argp = &data->args;
msg.rpc_resp = &data->res;
msg.rpc_cred = data->cred;
/* set up nfs_renamedata */
data->old_dir = old_dir;
ihold(old_dir);
data->new_dir = new_dir;
ihold(new_dir);
data->old_dentry = dget(old_dentry);
data->new_dentry = dget(new_dentry);
nfs_fattr_init(&data->old_fattr);
nfs_fattr_init(&data->new_fattr);
/* set up nfs_renameargs */
data->args.old_dir = NFS_FH(old_dir);
data->args.old_name = &old_dentry->d_name;
data->args.new_dir = NFS_FH(new_dir);
data->args.new_name = &new_dentry->d_name;
/* set up nfs_renameres */
data->res.old_fattr = &data->old_fattr;
data->res.new_fattr = &data->new_fattr;
nfs_sb_active(old_dir->i_sb);
NFS_PROTO(data->old_dir)->rename_setup(&msg, old_dir);
return rpc_run_task(&task_setup_data);
}
/**
* nfs_sillyrename - Perform a silly-rename of a dentry
* @dir: inode of directory that contains dentry
* @dentry: dentry to be sillyrenamed
*
* NFSv2/3 is stateless and the server doesn't know when the client is
* holding a file open. To prevent application problems when a file is
* unlinked while it's still open, the client performs a "silly-rename".
* That is, it renames the file to a hidden file in the same directory,
* and only performs the unlink once the last reference to it is put.
*
* The final cleanup is done during dentry_iput.
*
* (Note: NFSv4 is stateful, and has opens, so in theory an NFSv4 server
* could take responsibility for keeping open files referenced. The server
* would also need to ensure that opened-but-deleted files were kept over
* reboots. However, we may not assume a server does so. (RFC 5661
* does provide an OPEN4_RESULT_PRESERVE_UNLINKED flag that a server can
* use to advertise that it does this; some day we may take advantage of
* it.))
*/
int
nfs_sillyrename(struct inode *dir, struct dentry *dentry)
{
static unsigned int sillycounter;
const int fileidsize = sizeof(NFS_FILEID(dentry->d_inode))*2;
const int countersize = sizeof(sillycounter)*2;
const int slen = sizeof(".nfs")+fileidsize+countersize-1;
char silly[slen+1];
struct dentry *sdentry;
struct rpc_task *task;
int error = -EIO;
dfprintk(VFS, "NFS: silly-rename(%s/%s, ct=%d)\n",
dentry->d_parent->d_name.name, dentry->d_name.name,
dentry->d_count);
nfs_inc_stats(dir, NFSIOS_SILLYRENAME);
/*
* We don't allow a dentry to be silly-renamed twice.
*/
error = -EBUSY;
if (dentry->d_flags & DCACHE_NFSFS_RENAMED)
goto out;
sprintf(silly, ".nfs%*.*Lx",
fileidsize, fileidsize,
(unsigned long long)NFS_FILEID(dentry->d_inode));
/* Return delegation in anticipation of the rename */
nfs_inode_return_delegation(dentry->d_inode);
sdentry = NULL;
do {
char *suffix = silly + slen - countersize;
dput(sdentry);
sillycounter++;
sprintf(suffix, "%*.*x", countersize, countersize, sillycounter);
dfprintk(VFS, "NFS: trying to rename %s to %s\n",
dentry->d_name.name, silly);
sdentry = lookup_one_len(silly, dentry->d_parent, slen);
/*
* N.B. Better to return EBUSY here ... it could be
* dangerous to delete the file while it's in use.
*/
if (IS_ERR(sdentry))
goto out;
} while (sdentry->d_inode != NULL); /* need negative lookup */
/* queue unlink first. Can't do this from rpc_release as it
* has to allocate memory
*/
error = nfs_async_unlink(dir, dentry);
if (error)
goto out_dput;
/* populate unlinkdata with the right dname */
error = nfs_copy_dname(sdentry,
(struct nfs_unlinkdata *)dentry->d_fsdata);
if (error) {
nfs_cancel_async_unlink(dentry);
goto out_dput;
}
/* run the rename task, undo unlink if it fails */
task = nfs_async_rename(dir, dir, dentry, sdentry);
if (IS_ERR(task)) {
error = -EBUSY;
nfs_cancel_async_unlink(dentry);
goto out_dput;
}
/* wait for the RPC task to complete, unless a SIGKILL intervenes */
error = rpc_wait_for_completion_task(task);
if (error == 0)
error = task->tk_status;
rpc_put_task(task);
out_dput:
dput(sdentry);
out:
return error;
}
| gpl-2.0 |
cometzero/e210s_jb | drivers/staging/octeon/cvmx-helper-spi.c | 4789 | 6088 | /***********************license start***************
* Author: Cavium Networks
*
* Contact: support@caviumnetworks.com
* This file is part of the OCTEON SDK
*
* Copyright (c) 2003-2008 Cavium Networks
*
* This file is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, Version 2, as
* published by the Free Software Foundation.
*
* This file is distributed in the hope that it will be useful, but
* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
* NONINFRINGEMENT. See the GNU General Public License for more
* details.
*
* You should have received a copy of the GNU General Public License
* along with this file; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
* or visit http://www.gnu.org/licenses/.
*
* This file may also be available under a different license from Cavium.
* Contact Cavium Networks for more information
***********************license end**************************************/
void __cvmx_interrupt_gmxx_enable(int interface);
void __cvmx_interrupt_spxx_int_msk_enable(int index);
void __cvmx_interrupt_stxx_int_msk_enable(int index);
/*
* Functions for SPI initialization, configuration,
* and monitoring.
*/
#include <asm/octeon/octeon.h>
#include "cvmx-config.h"
#include "cvmx-spi.h"
#include "cvmx-helper.h"
#include "cvmx-pip-defs.h"
#include "cvmx-pko-defs.h"
/*
* CVMX_HELPER_SPI_TIMEOUT is used to determine how long the SPI
* initialization routines wait for SPI training. You can override the
* value using executive-config.h if necessary.
*/
#ifndef CVMX_HELPER_SPI_TIMEOUT
#define CVMX_HELPER_SPI_TIMEOUT 10
#endif
/**
* Probe a SPI interface and determine the number of ports
* connected to it. The SPI interface should still be down after
* this call.
*
* @interface: Interface to probe
*
* Returns Number of ports on the interface. Zero to disable.
*/
int __cvmx_helper_spi_probe(int interface)
{
int num_ports = 0;
if ((cvmx_sysinfo_get()->board_type != CVMX_BOARD_TYPE_SIM) &&
cvmx_spi4000_is_present(interface)) {
num_ports = 10;
} else {
union cvmx_pko_reg_crc_enable enable;
num_ports = 16;
/*
* Unlike the SPI4000, most SPI devices don't
* automatically put on the L2 CRC. For everything
* except for the SPI4000 have PKO append the L2 CRC
* to the packet.
*/
enable.u64 = cvmx_read_csr(CVMX_PKO_REG_CRC_ENABLE);
enable.s.enable |= 0xffff << (interface * 16);
cvmx_write_csr(CVMX_PKO_REG_CRC_ENABLE, enable.u64);
}
__cvmx_helper_setup_gmx(interface, num_ports);
return num_ports;
}
/**
* Bringup and enable a SPI interface. After this call packet I/O
* should be fully functional. This is called with IPD enabled but
* PKO disabled.
*
* @interface: Interface to bring up
*
* Returns Zero on success, negative on failure
*/
int __cvmx_helper_spi_enable(int interface)
{
/*
* Normally the ethernet L2 CRC is checked and stripped in the
* GMX block. When you are using SPI, this isn' the case and
* IPD needs to check the L2 CRC.
*/
int num_ports = cvmx_helper_ports_on_interface(interface);
int ipd_port;
for (ipd_port = interface * 16; ipd_port < interface * 16 + num_ports;
ipd_port++) {
union cvmx_pip_prt_cfgx port_config;
port_config.u64 = cvmx_read_csr(CVMX_PIP_PRT_CFGX(ipd_port));
port_config.s.crc_en = 1;
cvmx_write_csr(CVMX_PIP_PRT_CFGX(ipd_port), port_config.u64);
}
if (cvmx_sysinfo_get()->board_type != CVMX_BOARD_TYPE_SIM) {
cvmx_spi_start_interface(interface, CVMX_SPI_MODE_DUPLEX,
CVMX_HELPER_SPI_TIMEOUT, num_ports);
if (cvmx_spi4000_is_present(interface))
cvmx_spi4000_initialize(interface);
}
__cvmx_interrupt_spxx_int_msk_enable(interface);
__cvmx_interrupt_stxx_int_msk_enable(interface);
__cvmx_interrupt_gmxx_enable(interface);
return 0;
}
/**
* Return the link state of an IPD/PKO port as returned by
* auto negotiation. The result of this function may not match
* Octeon's link config if auto negotiation has changed since
* the last call to cvmx_helper_link_set().
*
* @ipd_port: IPD/PKO port to query
*
* Returns Link state
*/
cvmx_helper_link_info_t __cvmx_helper_spi_link_get(int ipd_port)
{
cvmx_helper_link_info_t result;
int interface = cvmx_helper_get_interface_num(ipd_port);
int index = cvmx_helper_get_interface_index_num(ipd_port);
result.u64 = 0;
if (cvmx_sysinfo_get()->board_type == CVMX_BOARD_TYPE_SIM) {
/* The simulator gives you a simulated full duplex link */
result.s.link_up = 1;
result.s.full_duplex = 1;
result.s.speed = 10000;
} else if (cvmx_spi4000_is_present(interface)) {
union cvmx_gmxx_rxx_rx_inbnd inband =
cvmx_spi4000_check_speed(interface, index);
result.s.link_up = inband.s.status;
result.s.full_duplex = inband.s.duplex;
switch (inband.s.speed) {
case 0: /* 10 Mbps */
result.s.speed = 10;
break;
case 1: /* 100 Mbps */
result.s.speed = 100;
break;
case 2: /* 1 Gbps */
result.s.speed = 1000;
break;
case 3: /* Illegal */
result.s.speed = 0;
result.s.link_up = 0;
break;
}
} else {
/* For generic SPI we can't determine the link, just return some
sane results */
result.s.link_up = 1;
result.s.full_duplex = 1;
result.s.speed = 10000;
}
return result;
}
/**
* Configure an IPD/PKO port for the specified link state. This
* function does not influence auto negotiation at the PHY level.
* The passed link state must always match the link state returned
* by cvmx_helper_link_get(). It is normally best to use
* cvmx_helper_link_autoconf() instead.
*
* @ipd_port: IPD/PKO port to configure
* @link_info: The new link state
*
* Returns Zero on success, negative on failure
*/
int __cvmx_helper_spi_link_set(int ipd_port, cvmx_helper_link_info_t link_info)
{
/* Nothing to do. If we have a SPI4000 then the setup was already performed
by cvmx_spi4000_check_speed(). If not then there isn't any link
info */
return 0;
}
| gpl-2.0 |
SandPox/kernel_collection | drivers/media/v4l2-core/videobuf-dvb.c | 4789 | 10220 | /*
*
* some helper function for simple DVB cards which simply DMA the
* complete transport stream and let the computer sort everything else
* (i.e. we are using the software demux, ...). Also uses the
* video-buf to manage DMA buffers.
*
* (c) 2004 Gerd Knorr <kraxel@bytesex.org> [SUSE Labs]
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/device.h>
#include <linux/fs.h>
#include <linux/kthread.h>
#include <linux/file.h>
#include <linux/slab.h>
#include <linux/freezer.h>
#include <media/videobuf-core.h>
#include <media/videobuf-dvb.h>
/* ------------------------------------------------------------------ */
MODULE_AUTHOR("Gerd Knorr <kraxel@bytesex.org> [SuSE Labs]");
MODULE_LICENSE("GPL");
static unsigned int debug;
module_param(debug, int, 0644);
MODULE_PARM_DESC(debug,"enable debug messages");
#define dprintk(fmt, arg...) if (debug) \
printk(KERN_DEBUG "%s/dvb: " fmt, dvb->name , ## arg)
/* ------------------------------------------------------------------ */
static int videobuf_dvb_thread(void *data)
{
struct videobuf_dvb *dvb = data;
struct videobuf_buffer *buf;
unsigned long flags;
void *outp;
dprintk("dvb thread started\n");
set_freezable();
videobuf_read_start(&dvb->dvbq);
for (;;) {
/* fetch next buffer */
buf = list_entry(dvb->dvbq.stream.next,
struct videobuf_buffer, stream);
list_del(&buf->stream);
videobuf_waiton(&dvb->dvbq, buf, 0, 1);
/* no more feeds left or stop_feed() asked us to quit */
if (0 == dvb->nfeeds)
break;
if (kthread_should_stop())
break;
try_to_freeze();
/* feed buffer data to demux */
outp = videobuf_queue_to_vaddr(&dvb->dvbq, buf);
if (buf->state == VIDEOBUF_DONE)
dvb_dmx_swfilter(&dvb->demux, outp,
buf->size);
/* requeue buffer */
list_add_tail(&buf->stream,&dvb->dvbq.stream);
spin_lock_irqsave(dvb->dvbq.irqlock,flags);
dvb->dvbq.ops->buf_queue(&dvb->dvbq,buf);
spin_unlock_irqrestore(dvb->dvbq.irqlock,flags);
}
videobuf_read_stop(&dvb->dvbq);
dprintk("dvb thread stopped\n");
/* Hmm, linux becomes *very* unhappy without this ... */
while (!kthread_should_stop()) {
set_current_state(TASK_INTERRUPTIBLE);
schedule();
}
return 0;
}
static int videobuf_dvb_start_feed(struct dvb_demux_feed *feed)
{
struct dvb_demux *demux = feed->demux;
struct videobuf_dvb *dvb = demux->priv;
int rc;
if (!demux->dmx.frontend)
return -EINVAL;
mutex_lock(&dvb->lock);
dvb->nfeeds++;
rc = dvb->nfeeds;
if (NULL != dvb->thread)
goto out;
dvb->thread = kthread_run(videobuf_dvb_thread,
dvb, "%s dvb", dvb->name);
if (IS_ERR(dvb->thread)) {
rc = PTR_ERR(dvb->thread);
dvb->thread = NULL;
}
out:
mutex_unlock(&dvb->lock);
return rc;
}
static int videobuf_dvb_stop_feed(struct dvb_demux_feed *feed)
{
struct dvb_demux *demux = feed->demux;
struct videobuf_dvb *dvb = demux->priv;
int err = 0;
mutex_lock(&dvb->lock);
dvb->nfeeds--;
if (0 == dvb->nfeeds && NULL != dvb->thread) {
err = kthread_stop(dvb->thread);
dvb->thread = NULL;
}
mutex_unlock(&dvb->lock);
return err;
}
static int videobuf_dvb_register_adapter(struct videobuf_dvb_frontends *fe,
struct module *module,
void *adapter_priv,
struct device *device,
char *adapter_name,
short *adapter_nr,
int mfe_shared)
{
int result;
mutex_init(&fe->lock);
/* register adapter */
result = dvb_register_adapter(&fe->adapter, adapter_name, module,
device, adapter_nr);
if (result < 0) {
printk(KERN_WARNING "%s: dvb_register_adapter failed (errno = %d)\n",
adapter_name, result);
}
fe->adapter.priv = adapter_priv;
fe->adapter.mfe_shared = mfe_shared;
return result;
}
static int videobuf_dvb_register_frontend(struct dvb_adapter *adapter,
struct videobuf_dvb *dvb)
{
int result;
/* register frontend */
result = dvb_register_frontend(adapter, dvb->frontend);
if (result < 0) {
printk(KERN_WARNING "%s: dvb_register_frontend failed (errno = %d)\n",
dvb->name, result);
goto fail_frontend;
}
/* register demux stuff */
dvb->demux.dmx.capabilities =
DMX_TS_FILTERING | DMX_SECTION_FILTERING |
DMX_MEMORY_BASED_FILTERING;
dvb->demux.priv = dvb;
dvb->demux.filternum = 256;
dvb->demux.feednum = 256;
dvb->demux.start_feed = videobuf_dvb_start_feed;
dvb->demux.stop_feed = videobuf_dvb_stop_feed;
result = dvb_dmx_init(&dvb->demux);
if (result < 0) {
printk(KERN_WARNING "%s: dvb_dmx_init failed (errno = %d)\n",
dvb->name, result);
goto fail_dmx;
}
dvb->dmxdev.filternum = 256;
dvb->dmxdev.demux = &dvb->demux.dmx;
dvb->dmxdev.capabilities = 0;
result = dvb_dmxdev_init(&dvb->dmxdev, adapter);
if (result < 0) {
printk(KERN_WARNING "%s: dvb_dmxdev_init failed (errno = %d)\n",
dvb->name, result);
goto fail_dmxdev;
}
dvb->fe_hw.source = DMX_FRONTEND_0;
result = dvb->demux.dmx.add_frontend(&dvb->demux.dmx, &dvb->fe_hw);
if (result < 0) {
printk(KERN_WARNING "%s: add_frontend failed (DMX_FRONTEND_0, errno = %d)\n",
dvb->name, result);
goto fail_fe_hw;
}
dvb->fe_mem.source = DMX_MEMORY_FE;
result = dvb->demux.dmx.add_frontend(&dvb->demux.dmx, &dvb->fe_mem);
if (result < 0) {
printk(KERN_WARNING "%s: add_frontend failed (DMX_MEMORY_FE, errno = %d)\n",
dvb->name, result);
goto fail_fe_mem;
}
result = dvb->demux.dmx.connect_frontend(&dvb->demux.dmx, &dvb->fe_hw);
if (result < 0) {
printk(KERN_WARNING "%s: connect_frontend failed (errno = %d)\n",
dvb->name, result);
goto fail_fe_conn;
}
/* register network adapter */
result = dvb_net_init(adapter, &dvb->net, &dvb->demux.dmx);
if (result < 0) {
printk(KERN_WARNING "%s: dvb_net_init failed (errno = %d)\n",
dvb->name, result);
goto fail_fe_conn;
}
return 0;
fail_fe_conn:
dvb->demux.dmx.remove_frontend(&dvb->demux.dmx, &dvb->fe_mem);
fail_fe_mem:
dvb->demux.dmx.remove_frontend(&dvb->demux.dmx, &dvb->fe_hw);
fail_fe_hw:
dvb_dmxdev_release(&dvb->dmxdev);
fail_dmxdev:
dvb_dmx_release(&dvb->demux);
fail_dmx:
dvb_unregister_frontend(dvb->frontend);
fail_frontend:
dvb_frontend_detach(dvb->frontend);
dvb->frontend = NULL;
return result;
}
/* ------------------------------------------------------------------ */
/* Register a single adapter and one or more frontends */
int videobuf_dvb_register_bus(struct videobuf_dvb_frontends *f,
struct module *module,
void *adapter_priv,
struct device *device,
short *adapter_nr,
int mfe_shared)
{
struct list_head *list, *q;
struct videobuf_dvb_frontend *fe;
int res;
fe = videobuf_dvb_get_frontend(f, 1);
if (!fe) {
printk(KERN_WARNING "Unable to register the adapter which has no frontends\n");
return -EINVAL;
}
/* Bring up the adapter */
res = videobuf_dvb_register_adapter(f, module, adapter_priv, device,
fe->dvb.name, adapter_nr, mfe_shared);
if (res < 0) {
printk(KERN_WARNING "videobuf_dvb_register_adapter failed (errno = %d)\n", res);
return res;
}
/* Attach all of the frontends to the adapter */
mutex_lock(&f->lock);
list_for_each_safe(list, q, &f->felist) {
fe = list_entry(list, struct videobuf_dvb_frontend, felist);
res = videobuf_dvb_register_frontend(&f->adapter, &fe->dvb);
if (res < 0) {
printk(KERN_WARNING "%s: videobuf_dvb_register_frontend failed (errno = %d)\n",
fe->dvb.name, res);
goto err;
}
}
mutex_unlock(&f->lock);
return 0;
err:
mutex_unlock(&f->lock);
videobuf_dvb_unregister_bus(f);
return res;
}
EXPORT_SYMBOL(videobuf_dvb_register_bus);
void videobuf_dvb_unregister_bus(struct videobuf_dvb_frontends *f)
{
videobuf_dvb_dealloc_frontends(f);
dvb_unregister_adapter(&f->adapter);
}
EXPORT_SYMBOL(videobuf_dvb_unregister_bus);
struct videobuf_dvb_frontend *videobuf_dvb_get_frontend(
struct videobuf_dvb_frontends *f, int id)
{
struct list_head *list, *q;
struct videobuf_dvb_frontend *fe, *ret = NULL;
mutex_lock(&f->lock);
list_for_each_safe(list, q, &f->felist) {
fe = list_entry(list, struct videobuf_dvb_frontend, felist);
if (fe->id == id) {
ret = fe;
break;
}
}
mutex_unlock(&f->lock);
return ret;
}
EXPORT_SYMBOL(videobuf_dvb_get_frontend);
int videobuf_dvb_find_frontend(struct videobuf_dvb_frontends *f,
struct dvb_frontend *p)
{
struct list_head *list, *q;
struct videobuf_dvb_frontend *fe = NULL;
int ret = 0;
mutex_lock(&f->lock);
list_for_each_safe(list, q, &f->felist) {
fe = list_entry(list, struct videobuf_dvb_frontend, felist);
if (fe->dvb.frontend == p) {
ret = fe->id;
break;
}
}
mutex_unlock(&f->lock);
return ret;
}
EXPORT_SYMBOL(videobuf_dvb_find_frontend);
struct videobuf_dvb_frontend *videobuf_dvb_alloc_frontend(
struct videobuf_dvb_frontends *f, int id)
{
struct videobuf_dvb_frontend *fe;
fe = kzalloc(sizeof(struct videobuf_dvb_frontend), GFP_KERNEL);
if (fe == NULL)
goto fail_alloc;
fe->id = id;
mutex_init(&fe->dvb.lock);
mutex_lock(&f->lock);
list_add_tail(&fe->felist, &f->felist);
mutex_unlock(&f->lock);
fail_alloc:
return fe;
}
EXPORT_SYMBOL(videobuf_dvb_alloc_frontend);
void videobuf_dvb_dealloc_frontends(struct videobuf_dvb_frontends *f)
{
struct list_head *list, *q;
struct videobuf_dvb_frontend *fe;
mutex_lock(&f->lock);
list_for_each_safe(list, q, &f->felist) {
fe = list_entry(list, struct videobuf_dvb_frontend, felist);
if (fe->dvb.net.dvbdev) {
dvb_net_release(&fe->dvb.net);
fe->dvb.demux.dmx.remove_frontend(&fe->dvb.demux.dmx,
&fe->dvb.fe_mem);
fe->dvb.demux.dmx.remove_frontend(&fe->dvb.demux.dmx,
&fe->dvb.fe_hw);
dvb_dmxdev_release(&fe->dvb.dmxdev);
dvb_dmx_release(&fe->dvb.demux);
dvb_unregister_frontend(fe->dvb.frontend);
}
if (fe->dvb.frontend)
/* always allocated, may have been reset */
dvb_frontend_detach(fe->dvb.frontend);
list_del(list); /* remove list entry */
kfree(fe); /* free frontend allocation */
}
mutex_unlock(&f->lock);
}
EXPORT_SYMBOL(videobuf_dvb_dealloc_frontends);
| gpl-2.0 |
Jazz-823/kernel_sony_togari | net/caif/cfsrvl.c | 4789 | 5478 | /*
* Copyright (C) ST-Ericsson AB 2010
* Author: Sjur Brendeland/sjur.brandeland@stericsson.com
* License terms: GNU General Public License (GPL) version 2
*/
#define pr_fmt(fmt) KBUILD_MODNAME ":%s(): " fmt, __func__
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <net/caif/caif_layer.h>
#include <net/caif/cfsrvl.h>
#include <net/caif/cfpkt.h>
#define SRVL_CTRL_PKT_SIZE 1
#define SRVL_FLOW_OFF 0x81
#define SRVL_FLOW_ON 0x80
#define SRVL_SET_PIN 0x82
#define SRVL_CTRL_PKT_SIZE 1
#define container_obj(layr) container_of(layr, struct cfsrvl, layer)
static void cfservl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl,
int phyid)
{
struct cfsrvl *service = container_obj(layr);
if (layr->up == NULL || layr->up->ctrlcmd == NULL)
return;
switch (ctrl) {
case CAIF_CTRLCMD_INIT_RSP:
service->open = true;
layr->up->ctrlcmd(layr->up, ctrl, phyid);
break;
case CAIF_CTRLCMD_DEINIT_RSP:
case CAIF_CTRLCMD_INIT_FAIL_RSP:
service->open = false;
layr->up->ctrlcmd(layr->up, ctrl, phyid);
break;
case _CAIF_CTRLCMD_PHYIF_FLOW_OFF_IND:
if (phyid != service->dev_info.id)
break;
if (service->modem_flow_on)
layr->up->ctrlcmd(layr->up,
CAIF_CTRLCMD_FLOW_OFF_IND, phyid);
service->phy_flow_on = false;
break;
case _CAIF_CTRLCMD_PHYIF_FLOW_ON_IND:
if (phyid != service->dev_info.id)
return;
if (service->modem_flow_on) {
layr->up->ctrlcmd(layr->up,
CAIF_CTRLCMD_FLOW_ON_IND,
phyid);
}
service->phy_flow_on = true;
break;
case CAIF_CTRLCMD_FLOW_OFF_IND:
if (service->phy_flow_on) {
layr->up->ctrlcmd(layr->up,
CAIF_CTRLCMD_FLOW_OFF_IND, phyid);
}
service->modem_flow_on = false;
break;
case CAIF_CTRLCMD_FLOW_ON_IND:
if (service->phy_flow_on) {
layr->up->ctrlcmd(layr->up,
CAIF_CTRLCMD_FLOW_ON_IND, phyid);
}
service->modem_flow_on = true;
break;
case _CAIF_CTRLCMD_PHYIF_DOWN_IND:
/* In case interface is down, let's fake a remove shutdown */
layr->up->ctrlcmd(layr->up,
CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND, phyid);
break;
case CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND:
layr->up->ctrlcmd(layr->up, ctrl, phyid);
break;
default:
pr_warn("Unexpected ctrl in cfsrvl (%d)\n", ctrl);
/* We have both modem and phy flow on, send flow on */
layr->up->ctrlcmd(layr->up, ctrl, phyid);
service->phy_flow_on = true;
break;
}
}
static int cfservl_modemcmd(struct cflayer *layr, enum caif_modemcmd ctrl)
{
struct cfsrvl *service = container_obj(layr);
caif_assert(layr != NULL);
caif_assert(layr->dn != NULL);
caif_assert(layr->dn->transmit != NULL);
if (!service->supports_flowctrl)
return 0;
switch (ctrl) {
case CAIF_MODEMCMD_FLOW_ON_REQ:
{
struct cfpkt *pkt;
struct caif_payload_info *info;
u8 flow_on = SRVL_FLOW_ON;
pkt = cfpkt_create(SRVL_CTRL_PKT_SIZE);
if (!pkt)
return -ENOMEM;
if (cfpkt_add_head(pkt, &flow_on, 1) < 0) {
pr_err("Packet is erroneous!\n");
cfpkt_destroy(pkt);
return -EPROTO;
}
info = cfpkt_info(pkt);
info->channel_id = service->layer.id;
info->hdr_len = 1;
info->dev_info = &service->dev_info;
return layr->dn->transmit(layr->dn, pkt);
}
case CAIF_MODEMCMD_FLOW_OFF_REQ:
{
struct cfpkt *pkt;
struct caif_payload_info *info;
u8 flow_off = SRVL_FLOW_OFF;
pkt = cfpkt_create(SRVL_CTRL_PKT_SIZE);
if (!pkt)
return -ENOMEM;
if (cfpkt_add_head(pkt, &flow_off, 1) < 0) {
pr_err("Packet is erroneous!\n");
cfpkt_destroy(pkt);
return -EPROTO;
}
info = cfpkt_info(pkt);
info->channel_id = service->layer.id;
info->hdr_len = 1;
info->dev_info = &service->dev_info;
return layr->dn->transmit(layr->dn, pkt);
}
default:
break;
}
return -EINVAL;
}
static void cfsrvl_release(struct cflayer *layer)
{
struct cfsrvl *service = container_of(layer, struct cfsrvl, layer);
kfree(service);
}
void cfsrvl_init(struct cfsrvl *service,
u8 channel_id,
struct dev_info *dev_info,
bool supports_flowctrl
)
{
caif_assert(offsetof(struct cfsrvl, layer) == 0);
service->open = false;
service->modem_flow_on = true;
service->phy_flow_on = true;
service->layer.id = channel_id;
service->layer.ctrlcmd = cfservl_ctrlcmd;
service->layer.modemcmd = cfservl_modemcmd;
service->dev_info = *dev_info;
service->supports_flowctrl = supports_flowctrl;
service->release = cfsrvl_release;
}
bool cfsrvl_ready(struct cfsrvl *service, int *err)
{
if (!service->open) {
*err = -ENOTCONN;
return false;
}
return true;
}
u8 cfsrvl_getphyid(struct cflayer *layer)
{
struct cfsrvl *servl = container_obj(layer);
return servl->dev_info.id;
}
bool cfsrvl_phyid_match(struct cflayer *layer, int phyid)
{
struct cfsrvl *servl = container_obj(layer);
return servl->dev_info.id == phyid;
}
void caif_free_client(struct cflayer *adap_layer)
{
struct cfsrvl *servl;
if (adap_layer == NULL || adap_layer->dn == NULL)
return;
servl = container_obj(adap_layer->dn);
servl->release(&servl->layer);
}
EXPORT_SYMBOL(caif_free_client);
void caif_client_register_refcnt(struct cflayer *adapt_layer,
void (*hold)(struct cflayer *lyr),
void (*put)(struct cflayer *lyr))
{
struct cfsrvl *service;
service = container_of(adapt_layer->dn, struct cfsrvl, layer);
WARN_ON(adapt_layer == NULL || adapt_layer->dn == NULL);
service->hold = hold;
service->put = put;
}
EXPORT_SYMBOL(caif_client_register_refcnt);
| gpl-2.0 |
MoKee/android_kernel_xiaomi_cancro | drivers/video/fbmon.c | 4789 | 37832 | /*
* linux/drivers/video/fbmon.c
*
* Copyright (C) 2002 James Simmons <jsimmons@users.sf.net>
*
* Credits:
*
* The EDID Parser is a conglomeration from the following sources:
*
* 1. SciTech SNAP Graphics Architecture
* Copyright (C) 1991-2002 SciTech Software, Inc. All rights reserved.
*
* 2. XFree86 4.3.0, interpret_edid.c
* Copyright 1998 by Egbert Eich <Egbert.Eich@Physik.TU-Darmstadt.DE>
*
* 3. John Fremlin <vii@users.sourceforge.net> and
* Ani Joshi <ajoshi@unixbox.com>
*
* Generalized Timing Formula is derived from:
*
* GTF Spreadsheet by Andy Morrish (1/5/97)
* available at http://www.vesa.org
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file COPYING in the main directory of this archive
* for more details.
*
*/
#include <linux/fb.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/slab.h>
#include <video/edid.h>
#ifdef CONFIG_PPC_OF
#include <asm/prom.h>
#include <asm/pci-bridge.h>
#endif
#include "edid.h"
/*
* EDID parser
*/
#undef DEBUG /* define this for verbose EDID parsing output */
#ifdef DEBUG
#define DPRINTK(fmt, args...) printk(fmt,## args)
#else
#define DPRINTK(fmt, args...)
#endif
#define FBMON_FIX_HEADER 1
#define FBMON_FIX_INPUT 2
#define FBMON_FIX_TIMINGS 3
#ifdef CONFIG_FB_MODE_HELPERS
struct broken_edid {
u8 manufacturer[4];
u32 model;
u32 fix;
};
static const struct broken_edid brokendb[] = {
/* DEC FR-PCXAV-YZ */
{
.manufacturer = "DEC",
.model = 0x073a,
.fix = FBMON_FIX_HEADER,
},
/* ViewSonic PF775a */
{
.manufacturer = "VSC",
.model = 0x5a44,
.fix = FBMON_FIX_INPUT,
},
/* Sharp UXGA? */
{
.manufacturer = "SHP",
.model = 0x138e,
.fix = FBMON_FIX_TIMINGS,
},
};
static const unsigned char edid_v1_header[] = { 0x00, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0x00
};
static void copy_string(unsigned char *c, unsigned char *s)
{
int i;
c = c + 5;
for (i = 0; (i < 13 && *c != 0x0A); i++)
*(s++) = *(c++);
*s = 0;
while (i-- && (*--s == 0x20)) *s = 0;
}
static int edid_is_serial_block(unsigned char *block)
{
if ((block[0] == 0x00) && (block[1] == 0x00) &&
(block[2] == 0x00) && (block[3] == 0xff) &&
(block[4] == 0x00))
return 1;
else
return 0;
}
static int edid_is_ascii_block(unsigned char *block)
{
if ((block[0] == 0x00) && (block[1] == 0x00) &&
(block[2] == 0x00) && (block[3] == 0xfe) &&
(block[4] == 0x00))
return 1;
else
return 0;
}
static int edid_is_limits_block(unsigned char *block)
{
if ((block[0] == 0x00) && (block[1] == 0x00) &&
(block[2] == 0x00) && (block[3] == 0xfd) &&
(block[4] == 0x00))
return 1;
else
return 0;
}
static int edid_is_monitor_block(unsigned char *block)
{
if ((block[0] == 0x00) && (block[1] == 0x00) &&
(block[2] == 0x00) && (block[3] == 0xfc) &&
(block[4] == 0x00))
return 1;
else
return 0;
}
static int edid_is_timing_block(unsigned char *block)
{
if ((block[0] != 0x00) || (block[1] != 0x00) ||
(block[2] != 0x00) || (block[4] != 0x00))
return 1;
else
return 0;
}
static int check_edid(unsigned char *edid)
{
unsigned char *block = edid + ID_MANUFACTURER_NAME, manufacturer[4];
unsigned char *b;
u32 model;
int i, fix = 0, ret = 0;
manufacturer[0] = ((block[0] & 0x7c) >> 2) + '@';
manufacturer[1] = ((block[0] & 0x03) << 3) +
((block[1] & 0xe0) >> 5) + '@';
manufacturer[2] = (block[1] & 0x1f) + '@';
manufacturer[3] = 0;
model = block[2] + (block[3] << 8);
for (i = 0; i < ARRAY_SIZE(brokendb); i++) {
if (!strncmp(manufacturer, brokendb[i].manufacturer, 4) &&
brokendb[i].model == model) {
fix = brokendb[i].fix;
break;
}
}
switch (fix) {
case FBMON_FIX_HEADER:
for (i = 0; i < 8; i++) {
if (edid[i] != edid_v1_header[i]) {
ret = fix;
break;
}
}
break;
case FBMON_FIX_INPUT:
b = edid + EDID_STRUCT_DISPLAY;
/* Only if display is GTF capable will
the input type be reset to analog */
if (b[4] & 0x01 && b[0] & 0x80)
ret = fix;
break;
case FBMON_FIX_TIMINGS:
b = edid + DETAILED_TIMING_DESCRIPTIONS_START;
ret = fix;
for (i = 0; i < 4; i++) {
if (edid_is_limits_block(b)) {
ret = 0;
break;
}
b += DETAILED_TIMING_DESCRIPTION_SIZE;
}
break;
}
if (ret)
printk("fbmon: The EDID Block of "
"Manufacturer: %s Model: 0x%x is known to "
"be broken,\n", manufacturer, model);
return ret;
}
static void fix_edid(unsigned char *edid, int fix)
{
int i;
unsigned char *b, csum = 0;
switch (fix) {
case FBMON_FIX_HEADER:
printk("fbmon: trying a header reconstruct\n");
memcpy(edid, edid_v1_header, 8);
break;
case FBMON_FIX_INPUT:
printk("fbmon: trying to fix input type\n");
b = edid + EDID_STRUCT_DISPLAY;
b[0] &= ~0x80;
edid[127] += 0x80;
break;
case FBMON_FIX_TIMINGS:
printk("fbmon: trying to fix monitor timings\n");
b = edid + DETAILED_TIMING_DESCRIPTIONS_START;
for (i = 0; i < 4; i++) {
if (!(edid_is_serial_block(b) ||
edid_is_ascii_block(b) ||
edid_is_monitor_block(b) ||
edid_is_timing_block(b))) {
b[0] = 0x00;
b[1] = 0x00;
b[2] = 0x00;
b[3] = 0xfd;
b[4] = 0x00;
b[5] = 60; /* vfmin */
b[6] = 60; /* vfmax */
b[7] = 30; /* hfmin */
b[8] = 75; /* hfmax */
b[9] = 17; /* pixclock - 170 MHz*/
b[10] = 0; /* GTF */
break;
}
b += DETAILED_TIMING_DESCRIPTION_SIZE;
}
for (i = 0; i < EDID_LENGTH - 1; i++)
csum += edid[i];
edid[127] = 256 - csum;
break;
}
}
static int edid_checksum(unsigned char *edid)
{
unsigned char csum = 0, all_null = 0;
int i, err = 0, fix = check_edid(edid);
if (fix)
fix_edid(edid, fix);
for (i = 0; i < EDID_LENGTH; i++) {
csum += edid[i];
all_null |= edid[i];
}
if (csum == 0x00 && all_null) {
/* checksum passed, everything's good */
err = 1;
}
return err;
}
static int edid_check_header(unsigned char *edid)
{
int i, err = 1, fix = check_edid(edid);
if (fix)
fix_edid(edid, fix);
for (i = 0; i < 8; i++) {
if (edid[i] != edid_v1_header[i])
err = 0;
}
return err;
}
static void parse_vendor_block(unsigned char *block, struct fb_monspecs *specs)
{
specs->manufacturer[0] = ((block[0] & 0x7c) >> 2) + '@';
specs->manufacturer[1] = ((block[0] & 0x03) << 3) +
((block[1] & 0xe0) >> 5) + '@';
specs->manufacturer[2] = (block[1] & 0x1f) + '@';
specs->manufacturer[3] = 0;
specs->model = block[2] + (block[3] << 8);
specs->serial = block[4] + (block[5] << 8) +
(block[6] << 16) + (block[7] << 24);
specs->year = block[9] + 1990;
specs->week = block[8];
DPRINTK(" Manufacturer: %s\n", specs->manufacturer);
DPRINTK(" Model: %x\n", specs->model);
DPRINTK(" Serial#: %u\n", specs->serial);
DPRINTK(" Year: %u Week %u\n", specs->year, specs->week);
}
static void get_dpms_capabilities(unsigned char flags,
struct fb_monspecs *specs)
{
specs->dpms = 0;
if (flags & DPMS_ACTIVE_OFF)
specs->dpms |= FB_DPMS_ACTIVE_OFF;
if (flags & DPMS_SUSPEND)
specs->dpms |= FB_DPMS_SUSPEND;
if (flags & DPMS_STANDBY)
specs->dpms |= FB_DPMS_STANDBY;
DPRINTK(" DPMS: Active %s, Suspend %s, Standby %s\n",
(flags & DPMS_ACTIVE_OFF) ? "yes" : "no",
(flags & DPMS_SUSPEND) ? "yes" : "no",
(flags & DPMS_STANDBY) ? "yes" : "no");
}
static void get_chroma(unsigned char *block, struct fb_monspecs *specs)
{
int tmp;
DPRINTK(" Chroma\n");
/* Chromaticity data */
tmp = ((block[5] & (3 << 6)) >> 6) | (block[0x7] << 2);
tmp *= 1000;
tmp += 512;
specs->chroma.redx = tmp/1024;
DPRINTK(" RedX: 0.%03d ", specs->chroma.redx);
tmp = ((block[5] & (3 << 4)) >> 4) | (block[0x8] << 2);
tmp *= 1000;
tmp += 512;
specs->chroma.redy = tmp/1024;
DPRINTK("RedY: 0.%03d\n", specs->chroma.redy);
tmp = ((block[5] & (3 << 2)) >> 2) | (block[0x9] << 2);
tmp *= 1000;
tmp += 512;
specs->chroma.greenx = tmp/1024;
DPRINTK(" GreenX: 0.%03d ", specs->chroma.greenx);
tmp = (block[5] & 3) | (block[0xa] << 2);
tmp *= 1000;
tmp += 512;
specs->chroma.greeny = tmp/1024;
DPRINTK("GreenY: 0.%03d\n", specs->chroma.greeny);
tmp = ((block[6] & (3 << 6)) >> 6) | (block[0xb] << 2);
tmp *= 1000;
tmp += 512;
specs->chroma.bluex = tmp/1024;
DPRINTK(" BlueX: 0.%03d ", specs->chroma.bluex);
tmp = ((block[6] & (3 << 4)) >> 4) | (block[0xc] << 2);
tmp *= 1000;
tmp += 512;
specs->chroma.bluey = tmp/1024;
DPRINTK("BlueY: 0.%03d\n", specs->chroma.bluey);
tmp = ((block[6] & (3 << 2)) >> 2) | (block[0xd] << 2);
tmp *= 1000;
tmp += 512;
specs->chroma.whitex = tmp/1024;
DPRINTK(" WhiteX: 0.%03d ", specs->chroma.whitex);
tmp = (block[6] & 3) | (block[0xe] << 2);
tmp *= 1000;
tmp += 512;
specs->chroma.whitey = tmp/1024;
DPRINTK("WhiteY: 0.%03d\n", specs->chroma.whitey);
}
static void calc_mode_timings(int xres, int yres, int refresh,
struct fb_videomode *mode)
{
struct fb_var_screeninfo *var;
var = kzalloc(sizeof(struct fb_var_screeninfo), GFP_KERNEL);
if (var) {
var->xres = xres;
var->yres = yres;
fb_get_mode(FB_VSYNCTIMINGS | FB_IGNOREMON,
refresh, var, NULL);
mode->xres = xres;
mode->yres = yres;
mode->pixclock = var->pixclock;
mode->refresh = refresh;
mode->left_margin = var->left_margin;
mode->right_margin = var->right_margin;
mode->upper_margin = var->upper_margin;
mode->lower_margin = var->lower_margin;
mode->hsync_len = var->hsync_len;
mode->vsync_len = var->vsync_len;
mode->vmode = 0;
mode->sync = 0;
kfree(var);
}
}
static int get_est_timing(unsigned char *block, struct fb_videomode *mode)
{
int num = 0;
unsigned char c;
c = block[0];
if (c&0x80) {
calc_mode_timings(720, 400, 70, &mode[num]);
mode[num++].flag = FB_MODE_IS_CALCULATED;
DPRINTK(" 720x400@70Hz\n");
}
if (c&0x40) {
calc_mode_timings(720, 400, 88, &mode[num]);
mode[num++].flag = FB_MODE_IS_CALCULATED;
DPRINTK(" 720x400@88Hz\n");
}
if (c&0x20) {
mode[num++] = vesa_modes[3];
DPRINTK(" 640x480@60Hz\n");
}
if (c&0x10) {
calc_mode_timings(640, 480, 67, &mode[num]);
mode[num++].flag = FB_MODE_IS_CALCULATED;
DPRINTK(" 640x480@67Hz\n");
}
if (c&0x08) {
mode[num++] = vesa_modes[4];
DPRINTK(" 640x480@72Hz\n");
}
if (c&0x04) {
mode[num++] = vesa_modes[5];
DPRINTK(" 640x480@75Hz\n");
}
if (c&0x02) {
mode[num++] = vesa_modes[7];
DPRINTK(" 800x600@56Hz\n");
}
if (c&0x01) {
mode[num++] = vesa_modes[8];
DPRINTK(" 800x600@60Hz\n");
}
c = block[1];
if (c&0x80) {
mode[num++] = vesa_modes[9];
DPRINTK(" 800x600@72Hz\n");
}
if (c&0x40) {
mode[num++] = vesa_modes[10];
DPRINTK(" 800x600@75Hz\n");
}
if (c&0x20) {
calc_mode_timings(832, 624, 75, &mode[num]);
mode[num++].flag = FB_MODE_IS_CALCULATED;
DPRINTK(" 832x624@75Hz\n");
}
if (c&0x10) {
mode[num++] = vesa_modes[12];
DPRINTK(" 1024x768@87Hz Interlaced\n");
}
if (c&0x08) {
mode[num++] = vesa_modes[13];
DPRINTK(" 1024x768@60Hz\n");
}
if (c&0x04) {
mode[num++] = vesa_modes[14];
DPRINTK(" 1024x768@70Hz\n");
}
if (c&0x02) {
mode[num++] = vesa_modes[15];
DPRINTK(" 1024x768@75Hz\n");
}
if (c&0x01) {
mode[num++] = vesa_modes[21];
DPRINTK(" 1280x1024@75Hz\n");
}
c = block[2];
if (c&0x80) {
mode[num++] = vesa_modes[17];
DPRINTK(" 1152x870@75Hz\n");
}
DPRINTK(" Manufacturer's mask: %x\n",c&0x7F);
return num;
}
static int get_std_timing(unsigned char *block, struct fb_videomode *mode,
int ver, int rev)
{
int xres, yres = 0, refresh, ratio, i;
xres = (block[0] + 31) * 8;
if (xres <= 256)
return 0;
ratio = (block[1] & 0xc0) >> 6;
switch (ratio) {
case 0:
/* in EDID 1.3 the meaning of 0 changed to 16:10 (prior 1:1) */
if (ver < 1 || (ver == 1 && rev < 3))
yres = xres;
else
yres = (xres * 10)/16;
break;
case 1:
yres = (xres * 3)/4;
break;
case 2:
yres = (xres * 4)/5;
break;
case 3:
yres = (xres * 9)/16;
break;
}
refresh = (block[1] & 0x3f) + 60;
DPRINTK(" %dx%d@%dHz\n", xres, yres, refresh);
for (i = 0; i < VESA_MODEDB_SIZE; i++) {
if (vesa_modes[i].xres == xres &&
vesa_modes[i].yres == yres &&
vesa_modes[i].refresh == refresh) {
*mode = vesa_modes[i];
mode->flag |= FB_MODE_IS_STANDARD;
return 1;
}
}
calc_mode_timings(xres, yres, refresh, mode);
return 1;
}
static int get_dst_timing(unsigned char *block,
struct fb_videomode *mode, int ver, int rev)
{
int j, num = 0;
for (j = 0; j < 6; j++, block += STD_TIMING_DESCRIPTION_SIZE)
num += get_std_timing(block, &mode[num], ver, rev);
return num;
}
static void get_detailed_timing(unsigned char *block,
struct fb_videomode *mode)
{
mode->xres = H_ACTIVE;
mode->yres = V_ACTIVE;
mode->pixclock = PIXEL_CLOCK;
mode->pixclock /= 1000;
mode->pixclock = KHZ2PICOS(mode->pixclock);
mode->right_margin = H_SYNC_OFFSET;
mode->left_margin = (H_ACTIVE + H_BLANKING) -
(H_ACTIVE + H_SYNC_OFFSET + H_SYNC_WIDTH);
mode->upper_margin = V_BLANKING - V_SYNC_OFFSET -
V_SYNC_WIDTH;
mode->lower_margin = V_SYNC_OFFSET;
mode->hsync_len = H_SYNC_WIDTH;
mode->vsync_len = V_SYNC_WIDTH;
if (HSYNC_POSITIVE)
mode->sync |= FB_SYNC_HOR_HIGH_ACT;
if (VSYNC_POSITIVE)
mode->sync |= FB_SYNC_VERT_HIGH_ACT;
mode->refresh = PIXEL_CLOCK/((H_ACTIVE + H_BLANKING) *
(V_ACTIVE + V_BLANKING));
if (INTERLACED) {
mode->yres *= 2;
mode->upper_margin *= 2;
mode->lower_margin *= 2;
mode->vsync_len *= 2;
mode->vmode |= FB_VMODE_INTERLACED;
}
mode->flag = FB_MODE_IS_DETAILED;
DPRINTK(" %d MHz ", PIXEL_CLOCK/1000000);
DPRINTK("%d %d %d %d ", H_ACTIVE, H_ACTIVE + H_SYNC_OFFSET,
H_ACTIVE + H_SYNC_OFFSET + H_SYNC_WIDTH, H_ACTIVE + H_BLANKING);
DPRINTK("%d %d %d %d ", V_ACTIVE, V_ACTIVE + V_SYNC_OFFSET,
V_ACTIVE + V_SYNC_OFFSET + V_SYNC_WIDTH, V_ACTIVE + V_BLANKING);
DPRINTK("%sHSync %sVSync\n\n", (HSYNC_POSITIVE) ? "+" : "-",
(VSYNC_POSITIVE) ? "+" : "-");
}
/**
* fb_create_modedb - create video mode database
* @edid: EDID data
* @dbsize: database size
*
* RETURNS: struct fb_videomode, @dbsize contains length of database
*
* DESCRIPTION:
* This function builds a mode database using the contents of the EDID
* data
*/
static struct fb_videomode *fb_create_modedb(unsigned char *edid, int *dbsize)
{
struct fb_videomode *mode, *m;
unsigned char *block;
int num = 0, i, first = 1;
int ver, rev;
ver = edid[EDID_STRUCT_VERSION];
rev = edid[EDID_STRUCT_REVISION];
mode = kzalloc(50 * sizeof(struct fb_videomode), GFP_KERNEL);
if (mode == NULL)
return NULL;
if (edid == NULL || !edid_checksum(edid) ||
!edid_check_header(edid)) {
kfree(mode);
return NULL;
}
*dbsize = 0;
DPRINTK(" Detailed Timings\n");
block = edid + DETAILED_TIMING_DESCRIPTIONS_START;
for (i = 0; i < 4; i++, block+= DETAILED_TIMING_DESCRIPTION_SIZE) {
if (!(block[0] == 0x00 && block[1] == 0x00)) {
get_detailed_timing(block, &mode[num]);
if (first) {
mode[num].flag |= FB_MODE_IS_FIRST;
first = 0;
}
num++;
}
}
DPRINTK(" Supported VESA Modes\n");
block = edid + ESTABLISHED_TIMING_1;
num += get_est_timing(block, &mode[num]);
DPRINTK(" Standard Timings\n");
block = edid + STD_TIMING_DESCRIPTIONS_START;
for (i = 0; i < STD_TIMING; i++, block += STD_TIMING_DESCRIPTION_SIZE)
num += get_std_timing(block, &mode[num], ver, rev);
block = edid + DETAILED_TIMING_DESCRIPTIONS_START;
for (i = 0; i < 4; i++, block+= DETAILED_TIMING_DESCRIPTION_SIZE) {
if (block[0] == 0x00 && block[1] == 0x00 && block[3] == 0xfa)
num += get_dst_timing(block + 5, &mode[num], ver, rev);
}
/* Yikes, EDID data is totally useless */
if (!num) {
kfree(mode);
return NULL;
}
*dbsize = num;
m = kmalloc(num * sizeof(struct fb_videomode), GFP_KERNEL);
if (!m)
return mode;
memmove(m, mode, num * sizeof(struct fb_videomode));
kfree(mode);
return m;
}
/**
* fb_destroy_modedb - destroys mode database
* @modedb: mode database to destroy
*
* DESCRIPTION:
* Destroy mode database created by fb_create_modedb
*/
void fb_destroy_modedb(struct fb_videomode *modedb)
{
kfree(modedb);
}
static int fb_get_monitor_limits(unsigned char *edid, struct fb_monspecs *specs)
{
int i, retval = 1;
unsigned char *block;
block = edid + DETAILED_TIMING_DESCRIPTIONS_START;
DPRINTK(" Monitor Operating Limits: ");
for (i = 0; i < 4; i++, block += DETAILED_TIMING_DESCRIPTION_SIZE) {
if (edid_is_limits_block(block)) {
specs->hfmin = H_MIN_RATE * 1000;
specs->hfmax = H_MAX_RATE * 1000;
specs->vfmin = V_MIN_RATE;
specs->vfmax = V_MAX_RATE;
specs->dclkmax = MAX_PIXEL_CLOCK * 1000000;
specs->gtf = (GTF_SUPPORT) ? 1 : 0;
retval = 0;
DPRINTK("From EDID\n");
break;
}
}
/* estimate monitor limits based on modes supported */
if (retval) {
struct fb_videomode *modes, *mode;
int num_modes, hz, hscan, pixclock;
int vtotal, htotal;
modes = fb_create_modedb(edid, &num_modes);
if (!modes) {
DPRINTK("None Available\n");
return 1;
}
retval = 0;
for (i = 0; i < num_modes; i++) {
mode = &modes[i];
pixclock = PICOS2KHZ(modes[i].pixclock) * 1000;
htotal = mode->xres + mode->right_margin + mode->hsync_len
+ mode->left_margin;
vtotal = mode->yres + mode->lower_margin + mode->vsync_len
+ mode->upper_margin;
if (mode->vmode & FB_VMODE_INTERLACED)
vtotal /= 2;
if (mode->vmode & FB_VMODE_DOUBLE)
vtotal *= 2;
hscan = (pixclock + htotal / 2) / htotal;
hscan = (hscan + 500) / 1000 * 1000;
hz = (hscan + vtotal / 2) / vtotal;
if (specs->dclkmax == 0 || specs->dclkmax < pixclock)
specs->dclkmax = pixclock;
if (specs->dclkmin == 0 || specs->dclkmin > pixclock)
specs->dclkmin = pixclock;
if (specs->hfmax == 0 || specs->hfmax < hscan)
specs->hfmax = hscan;
if (specs->hfmin == 0 || specs->hfmin > hscan)
specs->hfmin = hscan;
if (specs->vfmax == 0 || specs->vfmax < hz)
specs->vfmax = hz;
if (specs->vfmin == 0 || specs->vfmin > hz)
specs->vfmin = hz;
}
DPRINTK("Extrapolated\n");
fb_destroy_modedb(modes);
}
DPRINTK(" H: %d-%dKHz V: %d-%dHz DCLK: %dMHz\n",
specs->hfmin/1000, specs->hfmax/1000, specs->vfmin,
specs->vfmax, specs->dclkmax/1000000);
return retval;
}
static void get_monspecs(unsigned char *edid, struct fb_monspecs *specs)
{
unsigned char c, *block;
block = edid + EDID_STRUCT_DISPLAY;
fb_get_monitor_limits(edid, specs);
c = block[0] & 0x80;
specs->input = 0;
if (c) {
specs->input |= FB_DISP_DDI;
DPRINTK(" Digital Display Input");
} else {
DPRINTK(" Analog Display Input: Input Voltage - ");
switch ((block[0] & 0x60) >> 5) {
case 0:
DPRINTK("0.700V/0.300V");
specs->input |= FB_DISP_ANA_700_300;
break;
case 1:
DPRINTK("0.714V/0.286V");
specs->input |= FB_DISP_ANA_714_286;
break;
case 2:
DPRINTK("1.000V/0.400V");
specs->input |= FB_DISP_ANA_1000_400;
break;
case 3:
DPRINTK("0.700V/0.000V");
specs->input |= FB_DISP_ANA_700_000;
break;
}
}
DPRINTK("\n Sync: ");
c = block[0] & 0x10;
if (c)
DPRINTK(" Configurable signal level\n");
c = block[0] & 0x0f;
specs->signal = 0;
if (c & 0x10) {
DPRINTK("Blank to Blank ");
specs->signal |= FB_SIGNAL_BLANK_BLANK;
}
if (c & 0x08) {
DPRINTK("Separate ");
specs->signal |= FB_SIGNAL_SEPARATE;
}
if (c & 0x04) {
DPRINTK("Composite ");
specs->signal |= FB_SIGNAL_COMPOSITE;
}
if (c & 0x02) {
DPRINTK("Sync on Green ");
specs->signal |= FB_SIGNAL_SYNC_ON_GREEN;
}
if (c & 0x01) {
DPRINTK("Serration on ");
specs->signal |= FB_SIGNAL_SERRATION_ON;
}
DPRINTK("\n");
specs->max_x = block[1];
specs->max_y = block[2];
DPRINTK(" Max H-size in cm: ");
if (specs->max_x)
DPRINTK("%d\n", specs->max_x);
else
DPRINTK("variable\n");
DPRINTK(" Max V-size in cm: ");
if (specs->max_y)
DPRINTK("%d\n", specs->max_y);
else
DPRINTK("variable\n");
c = block[3];
specs->gamma = c+100;
DPRINTK(" Gamma: ");
DPRINTK("%d.%d\n", specs->gamma/100, specs->gamma % 100);
get_dpms_capabilities(block[4], specs);
switch ((block[4] & 0x18) >> 3) {
case 0:
DPRINTK(" Monochrome/Grayscale\n");
specs->input |= FB_DISP_MONO;
break;
case 1:
DPRINTK(" RGB Color Display\n");
specs->input |= FB_DISP_RGB;
break;
case 2:
DPRINTK(" Non-RGB Multicolor Display\n");
specs->input |= FB_DISP_MULTI;
break;
default:
DPRINTK(" Unknown\n");
specs->input |= FB_DISP_UNKNOWN;
break;
}
get_chroma(block, specs);
specs->misc = 0;
c = block[4] & 0x7;
if (c & 0x04) {
DPRINTK(" Default color format is primary\n");
specs->misc |= FB_MISC_PRIM_COLOR;
}
if (c & 0x02) {
DPRINTK(" First DETAILED Timing is preferred\n");
specs->misc |= FB_MISC_1ST_DETAIL;
}
if (c & 0x01) {
printk(" Display is GTF capable\n");
specs->gtf = 1;
}
}
int fb_parse_edid(unsigned char *edid, struct fb_var_screeninfo *var)
{
int i;
unsigned char *block;
if (edid == NULL || var == NULL)
return 1;
if (!(edid_checksum(edid)))
return 1;
if (!(edid_check_header(edid)))
return 1;
block = edid + DETAILED_TIMING_DESCRIPTIONS_START;
for (i = 0; i < 4; i++, block += DETAILED_TIMING_DESCRIPTION_SIZE) {
if (edid_is_timing_block(block)) {
var->xres = var->xres_virtual = H_ACTIVE;
var->yres = var->yres_virtual = V_ACTIVE;
var->height = var->width = 0;
var->right_margin = H_SYNC_OFFSET;
var->left_margin = (H_ACTIVE + H_BLANKING) -
(H_ACTIVE + H_SYNC_OFFSET + H_SYNC_WIDTH);
var->upper_margin = V_BLANKING - V_SYNC_OFFSET -
V_SYNC_WIDTH;
var->lower_margin = V_SYNC_OFFSET;
var->hsync_len = H_SYNC_WIDTH;
var->vsync_len = V_SYNC_WIDTH;
var->pixclock = PIXEL_CLOCK;
var->pixclock /= 1000;
var->pixclock = KHZ2PICOS(var->pixclock);
if (HSYNC_POSITIVE)
var->sync |= FB_SYNC_HOR_HIGH_ACT;
if (VSYNC_POSITIVE)
var->sync |= FB_SYNC_VERT_HIGH_ACT;
return 0;
}
}
return 1;
}
void fb_edid_to_monspecs(unsigned char *edid, struct fb_monspecs *specs)
{
unsigned char *block;
int i, found = 0;
if (edid == NULL)
return;
if (!(edid_checksum(edid)))
return;
if (!(edid_check_header(edid)))
return;
memset(specs, 0, sizeof(struct fb_monspecs));
specs->version = edid[EDID_STRUCT_VERSION];
specs->revision = edid[EDID_STRUCT_REVISION];
DPRINTK("========================================\n");
DPRINTK("Display Information (EDID)\n");
DPRINTK("========================================\n");
DPRINTK(" EDID Version %d.%d\n", (int) specs->version,
(int) specs->revision);
parse_vendor_block(edid + ID_MANUFACTURER_NAME, specs);
block = edid + DETAILED_TIMING_DESCRIPTIONS_START;
for (i = 0; i < 4; i++, block += DETAILED_TIMING_DESCRIPTION_SIZE) {
if (edid_is_serial_block(block)) {
copy_string(block, specs->serial_no);
DPRINTK(" Serial Number: %s\n", specs->serial_no);
} else if (edid_is_ascii_block(block)) {
copy_string(block, specs->ascii);
DPRINTK(" ASCII Block: %s\n", specs->ascii);
} else if (edid_is_monitor_block(block)) {
copy_string(block, specs->monitor);
DPRINTK(" Monitor Name: %s\n", specs->monitor);
}
}
DPRINTK(" Display Characteristics:\n");
get_monspecs(edid, specs);
specs->modedb = fb_create_modedb(edid, &specs->modedb_len);
/*
* Workaround for buggy EDIDs that sets that the first
* detailed timing is preferred but has not detailed
* timing specified
*/
for (i = 0; i < specs->modedb_len; i++) {
if (specs->modedb[i].flag & FB_MODE_IS_DETAILED) {
found = 1;
break;
}
}
if (!found)
specs->misc &= ~FB_MISC_1ST_DETAIL;
DPRINTK("========================================\n");
}
/**
* fb_edid_add_monspecs() - add monitor video modes from E-EDID data
* @edid: 128 byte array with an E-EDID block
* @spacs: monitor specs to be extended
*/
void fb_edid_add_monspecs(unsigned char *edid, struct fb_monspecs *specs)
{
unsigned char *block;
struct fb_videomode *m;
int num = 0, i;
u8 svd[64], edt[(128 - 4) / DETAILED_TIMING_DESCRIPTION_SIZE];
u8 pos = 4, svd_n = 0;
if (!edid)
return;
if (!edid_checksum(edid))
return;
if (edid[0] != 0x2 ||
edid[2] < 4 || edid[2] > 128 - DETAILED_TIMING_DESCRIPTION_SIZE)
return;
DPRINTK(" Short Video Descriptors\n");
while (pos < edid[2]) {
u8 len = edid[pos] & 0x1f, type = (edid[pos] >> 5) & 7;
pr_debug("Data block %u of %u bytes\n", type, len);
if (type == 2)
for (i = pos; i < pos + len; i++) {
u8 idx = edid[pos + i] & 0x7f;
svd[svd_n++] = idx;
pr_debug("N%sative mode #%d\n",
edid[pos + i] & 0x80 ? "" : "on-n", idx);
}
pos += len + 1;
}
block = edid + edid[2];
DPRINTK(" Extended Detailed Timings\n");
for (i = 0; i < (128 - edid[2]) / DETAILED_TIMING_DESCRIPTION_SIZE;
i++, block += DETAILED_TIMING_DESCRIPTION_SIZE)
if (PIXEL_CLOCK)
edt[num++] = block - edid;
/* Yikes, EDID data is totally useless */
if (!(num + svd_n))
return;
m = kzalloc((specs->modedb_len + num + svd_n) *
sizeof(struct fb_videomode), GFP_KERNEL);
if (!m)
return;
memcpy(m, specs->modedb, specs->modedb_len * sizeof(struct fb_videomode));
for (i = specs->modedb_len; i < specs->modedb_len + num; i++) {
get_detailed_timing(edid + edt[i - specs->modedb_len], &m[i]);
if (i == specs->modedb_len)
m[i].flag |= FB_MODE_IS_FIRST;
pr_debug("Adding %ux%u@%u\n", m[i].xres, m[i].yres, m[i].refresh);
}
for (i = specs->modedb_len + num; i < specs->modedb_len + num + svd_n; i++) {
int idx = svd[i - specs->modedb_len - num];
if (!idx || idx > 63) {
pr_warning("Reserved SVD code %d\n", idx);
} else if (idx > ARRAY_SIZE(cea_modes) || !cea_modes[idx].xres) {
pr_warning("Unimplemented SVD code %d\n", idx);
} else {
memcpy(&m[i], cea_modes + idx, sizeof(m[i]));
pr_debug("Adding SVD #%d: %ux%u@%u\n", idx,
m[i].xres, m[i].yres, m[i].refresh);
}
}
kfree(specs->modedb);
specs->modedb = m;
specs->modedb_len = specs->modedb_len + num + svd_n;
}
/*
* VESA Generalized Timing Formula (GTF)
*/
#define FLYBACK 550
#define V_FRONTPORCH 1
#define H_OFFSET 40
#define H_SCALEFACTOR 20
#define H_BLANKSCALE 128
#define H_GRADIENT 600
#define C_VAL 30
#define M_VAL 300
struct __fb_timings {
u32 dclk;
u32 hfreq;
u32 vfreq;
u32 hactive;
u32 vactive;
u32 hblank;
u32 vblank;
u32 htotal;
u32 vtotal;
};
/**
* fb_get_vblank - get vertical blank time
* @hfreq: horizontal freq
*
* DESCRIPTION:
* vblank = right_margin + vsync_len + left_margin
*
* given: right_margin = 1 (V_FRONTPORCH)
* vsync_len = 3
* flyback = 550
*
* flyback * hfreq
* left_margin = --------------- - vsync_len
* 1000000
*/
static u32 fb_get_vblank(u32 hfreq)
{
u32 vblank;
vblank = (hfreq * FLYBACK)/1000;
vblank = (vblank + 500)/1000;
return (vblank + V_FRONTPORCH);
}
/**
* fb_get_hblank_by_freq - get horizontal blank time given hfreq
* @hfreq: horizontal freq
* @xres: horizontal resolution in pixels
*
* DESCRIPTION:
*
* xres * duty_cycle
* hblank = ------------------
* 100 - duty_cycle
*
* duty cycle = percent of htotal assigned to inactive display
* duty cycle = C - (M/Hfreq)
*
* where: C = ((offset - scale factor) * blank_scale)
* -------------------------------------- + scale factor
* 256
* M = blank_scale * gradient
*
*/
static u32 fb_get_hblank_by_hfreq(u32 hfreq, u32 xres)
{
u32 c_val, m_val, duty_cycle, hblank;
c_val = (((H_OFFSET - H_SCALEFACTOR) * H_BLANKSCALE)/256 +
H_SCALEFACTOR) * 1000;
m_val = (H_BLANKSCALE * H_GRADIENT)/256;
m_val = (m_val * 1000000)/hfreq;
duty_cycle = c_val - m_val;
hblank = (xres * duty_cycle)/(100000 - duty_cycle);
return (hblank);
}
/**
* fb_get_hblank_by_dclk - get horizontal blank time given pixelclock
* @dclk: pixelclock in Hz
* @xres: horizontal resolution in pixels
*
* DESCRIPTION:
*
* xres * duty_cycle
* hblank = ------------------
* 100 - duty_cycle
*
* duty cycle = percent of htotal assigned to inactive display
* duty cycle = C - (M * h_period)
*
* where: h_period = SQRT(100 - C + (0.4 * xres * M)/dclk) + C - 100
* -----------------------------------------------
* 2 * M
* M = 300;
* C = 30;
*/
static u32 fb_get_hblank_by_dclk(u32 dclk, u32 xres)
{
u32 duty_cycle, h_period, hblank;
dclk /= 1000;
h_period = 100 - C_VAL;
h_period *= h_period;
h_period += (M_VAL * xres * 2 * 1000)/(5 * dclk);
h_period *= 10000;
h_period = int_sqrt(h_period);
h_period -= (100 - C_VAL) * 100;
h_period *= 1000;
h_period /= 2 * M_VAL;
duty_cycle = C_VAL * 1000 - (M_VAL * h_period)/100;
hblank = (xres * duty_cycle)/(100000 - duty_cycle) + 8;
hblank &= ~15;
return (hblank);
}
/**
* fb_get_hfreq - estimate hsync
* @vfreq: vertical refresh rate
* @yres: vertical resolution
*
* DESCRIPTION:
*
* (yres + front_port) * vfreq * 1000000
* hfreq = -------------------------------------
* (1000000 - (vfreq * FLYBACK)
*
*/
static u32 fb_get_hfreq(u32 vfreq, u32 yres)
{
u32 divisor, hfreq;
divisor = (1000000 - (vfreq * FLYBACK))/1000;
hfreq = (yres + V_FRONTPORCH) * vfreq * 1000;
return (hfreq/divisor);
}
static void fb_timings_vfreq(struct __fb_timings *timings)
{
timings->hfreq = fb_get_hfreq(timings->vfreq, timings->vactive);
timings->vblank = fb_get_vblank(timings->hfreq);
timings->vtotal = timings->vactive + timings->vblank;
timings->hblank = fb_get_hblank_by_hfreq(timings->hfreq,
timings->hactive);
timings->htotal = timings->hactive + timings->hblank;
timings->dclk = timings->htotal * timings->hfreq;
}
static void fb_timings_hfreq(struct __fb_timings *timings)
{
timings->vblank = fb_get_vblank(timings->hfreq);
timings->vtotal = timings->vactive + timings->vblank;
timings->vfreq = timings->hfreq/timings->vtotal;
timings->hblank = fb_get_hblank_by_hfreq(timings->hfreq,
timings->hactive);
timings->htotal = timings->hactive + timings->hblank;
timings->dclk = timings->htotal * timings->hfreq;
}
static void fb_timings_dclk(struct __fb_timings *timings)
{
timings->hblank = fb_get_hblank_by_dclk(timings->dclk,
timings->hactive);
timings->htotal = timings->hactive + timings->hblank;
timings->hfreq = timings->dclk/timings->htotal;
timings->vblank = fb_get_vblank(timings->hfreq);
timings->vtotal = timings->vactive + timings->vblank;
timings->vfreq = timings->hfreq/timings->vtotal;
}
/*
* fb_get_mode - calculates video mode using VESA GTF
* @flags: if: 0 - maximize vertical refresh rate
* 1 - vrefresh-driven calculation;
* 2 - hscan-driven calculation;
* 3 - pixelclock-driven calculation;
* @val: depending on @flags, ignored, vrefresh, hsync or pixelclock
* @var: pointer to fb_var_screeninfo
* @info: pointer to fb_info
*
* DESCRIPTION:
* Calculates video mode based on monitor specs using VESA GTF.
* The GTF is best for VESA GTF compliant monitors but is
* specifically formulated to work for older monitors as well.
*
* If @flag==0, the function will attempt to maximize the
* refresh rate. Otherwise, it will calculate timings based on
* the flag and accompanying value.
*
* If FB_IGNOREMON bit is set in @flags, monitor specs will be
* ignored and @var will be filled with the calculated timings.
*
* All calculations are based on the VESA GTF Spreadsheet
* available at VESA's public ftp (http://www.vesa.org).
*
* NOTES:
* The timings generated by the GTF will be different from VESA
* DMT. It might be a good idea to keep a table of standard
* VESA modes as well. The GTF may also not work for some displays,
* such as, and especially, analog TV.
*
* REQUIRES:
* A valid info->monspecs, otherwise 'safe numbers' will be used.
*/
int fb_get_mode(int flags, u32 val, struct fb_var_screeninfo *var, struct fb_info *info)
{
struct __fb_timings *timings;
u32 interlace = 1, dscan = 1;
u32 hfmin, hfmax, vfmin, vfmax, dclkmin, dclkmax, err = 0;
timings = kzalloc(sizeof(struct __fb_timings), GFP_KERNEL);
if (!timings)
return -ENOMEM;
/*
* If monspecs are invalid, use values that are enough
* for 640x480@60
*/
if (!info || !info->monspecs.hfmax || !info->monspecs.vfmax ||
!info->monspecs.dclkmax ||
info->monspecs.hfmax < info->monspecs.hfmin ||
info->monspecs.vfmax < info->monspecs.vfmin ||
info->monspecs.dclkmax < info->monspecs.dclkmin) {
hfmin = 29000; hfmax = 30000;
vfmin = 60; vfmax = 60;
dclkmin = 0; dclkmax = 25000000;
} else {
hfmin = info->monspecs.hfmin;
hfmax = info->monspecs.hfmax;
vfmin = info->monspecs.vfmin;
vfmax = info->monspecs.vfmax;
dclkmin = info->monspecs.dclkmin;
dclkmax = info->monspecs.dclkmax;
}
timings->hactive = var->xres;
timings->vactive = var->yres;
if (var->vmode & FB_VMODE_INTERLACED) {
timings->vactive /= 2;
interlace = 2;
}
if (var->vmode & FB_VMODE_DOUBLE) {
timings->vactive *= 2;
dscan = 2;
}
switch (flags & ~FB_IGNOREMON) {
case FB_MAXTIMINGS: /* maximize refresh rate */
timings->hfreq = hfmax;
fb_timings_hfreq(timings);
if (timings->vfreq > vfmax) {
timings->vfreq = vfmax;
fb_timings_vfreq(timings);
}
if (timings->dclk > dclkmax) {
timings->dclk = dclkmax;
fb_timings_dclk(timings);
}
break;
case FB_VSYNCTIMINGS: /* vrefresh driven */
timings->vfreq = val;
fb_timings_vfreq(timings);
break;
case FB_HSYNCTIMINGS: /* hsync driven */
timings->hfreq = val;
fb_timings_hfreq(timings);
break;
case FB_DCLKTIMINGS: /* pixelclock driven */
timings->dclk = PICOS2KHZ(val) * 1000;
fb_timings_dclk(timings);
break;
default:
err = -EINVAL;
}
if (err || (!(flags & FB_IGNOREMON) &&
(timings->vfreq < vfmin || timings->vfreq > vfmax ||
timings->hfreq < hfmin || timings->hfreq > hfmax ||
timings->dclk < dclkmin || timings->dclk > dclkmax))) {
err = -EINVAL;
} else {
var->pixclock = KHZ2PICOS(timings->dclk/1000);
var->hsync_len = (timings->htotal * 8)/100;
var->right_margin = (timings->hblank/2) - var->hsync_len;
var->left_margin = timings->hblank - var->right_margin -
var->hsync_len;
var->vsync_len = (3 * interlace)/dscan;
var->lower_margin = (1 * interlace)/dscan;
var->upper_margin = (timings->vblank * interlace)/dscan -
(var->vsync_len + var->lower_margin);
}
kfree(timings);
return err;
}
#else
int fb_parse_edid(unsigned char *edid, struct fb_var_screeninfo *var)
{
return 1;
}
void fb_edid_to_monspecs(unsigned char *edid, struct fb_monspecs *specs)
{
specs = NULL;
}
void fb_edid_add_monspecs(unsigned char *edid, struct fb_monspecs *specs)
{
}
void fb_destroy_modedb(struct fb_videomode *modedb)
{
}
int fb_get_mode(int flags, u32 val, struct fb_var_screeninfo *var,
struct fb_info *info)
{
return -EINVAL;
}
#endif /* CONFIG_FB_MODE_HELPERS */
/*
* fb_validate_mode - validates var against monitor capabilities
* @var: pointer to fb_var_screeninfo
* @info: pointer to fb_info
*
* DESCRIPTION:
* Validates video mode against monitor capabilities specified in
* info->monspecs.
*
* REQUIRES:
* A valid info->monspecs.
*/
int fb_validate_mode(const struct fb_var_screeninfo *var, struct fb_info *info)
{
u32 hfreq, vfreq, htotal, vtotal, pixclock;
u32 hfmin, hfmax, vfmin, vfmax, dclkmin, dclkmax;
/*
* If monspecs are invalid, use values that are enough
* for 640x480@60
*/
if (!info->monspecs.hfmax || !info->monspecs.vfmax ||
!info->monspecs.dclkmax ||
info->monspecs.hfmax < info->monspecs.hfmin ||
info->monspecs.vfmax < info->monspecs.vfmin ||
info->monspecs.dclkmax < info->monspecs.dclkmin) {
hfmin = 29000; hfmax = 30000;
vfmin = 60; vfmax = 60;
dclkmin = 0; dclkmax = 25000000;
} else {
hfmin = info->monspecs.hfmin;
hfmax = info->monspecs.hfmax;
vfmin = info->monspecs.vfmin;
vfmax = info->monspecs.vfmax;
dclkmin = info->monspecs.dclkmin;
dclkmax = info->monspecs.dclkmax;
}
if (!var->pixclock)
return -EINVAL;
pixclock = PICOS2KHZ(var->pixclock) * 1000;
htotal = var->xres + var->right_margin + var->hsync_len +
var->left_margin;
vtotal = var->yres + var->lower_margin + var->vsync_len +
var->upper_margin;
if (var->vmode & FB_VMODE_INTERLACED)
vtotal /= 2;
if (var->vmode & FB_VMODE_DOUBLE)
vtotal *= 2;
hfreq = pixclock/htotal;
hfreq = (hfreq + 500) / 1000 * 1000;
vfreq = hfreq/vtotal;
return (vfreq < vfmin || vfreq > vfmax ||
hfreq < hfmin || hfreq > hfmax ||
pixclock < dclkmin || pixclock > dclkmax) ?
-EINVAL : 0;
}
#if defined(CONFIG_FIRMWARE_EDID) && defined(CONFIG_X86)
/*
* We need to ensure that the EDID block is only returned for
* the primary graphics adapter.
*/
const unsigned char *fb_firmware_edid(struct device *device)
{
struct pci_dev *dev = NULL;
struct resource *res = NULL;
unsigned char *edid = NULL;
if (device)
dev = to_pci_dev(device);
if (dev)
res = &dev->resource[PCI_ROM_RESOURCE];
if (res && res->flags & IORESOURCE_ROM_SHADOW)
edid = edid_info.dummy;
return edid;
}
#else
const unsigned char *fb_firmware_edid(struct device *device)
{
return NULL;
}
#endif
EXPORT_SYMBOL(fb_firmware_edid);
EXPORT_SYMBOL(fb_parse_edid);
EXPORT_SYMBOL(fb_edid_to_monspecs);
EXPORT_SYMBOL(fb_edid_add_monspecs);
EXPORT_SYMBOL(fb_get_mode);
EXPORT_SYMBOL(fb_validate_mode);
EXPORT_SYMBOL(fb_destroy_modedb);
| gpl-2.0 |
micchie/mptcp | drivers/acpi/acpica/exfield.c | 5045 | 12226 | /******************************************************************************
*
* Module Name: exfield - ACPI AML (p-code) execution - field manipulation
*
*****************************************************************************/
/*
* Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions, and the following disclaimer,
* without modification.
* 2. Redistributions in binary form must reproduce at minimum a disclaimer
* substantially similar to the "NO WARRANTY" disclaimer below
* ("Disclaimer") and any redistribution must be conditioned upon
* including a substantially similar Disclaimer requirement for further
* binary redistribution.
* 3. Neither the names of the above-listed copyright holders nor the names
* of any contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* Alternatively, this software may be distributed under the terms of the
* GNU General Public License ("GPL") version 2 as published by the Free
* Software Foundation.
*
* NO WARRANTY
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGES.
*/
#include <acpi/acpi.h>
#include "accommon.h"
#include "acdispat.h"
#include "acinterp.h"
#define _COMPONENT ACPI_EXECUTER
ACPI_MODULE_NAME("exfield")
/*******************************************************************************
*
* FUNCTION: acpi_ex_read_data_from_field
*
* PARAMETERS: walk_state - Current execution state
* obj_desc - The named field
* ret_buffer_desc - Where the return data object is stored
*
* RETURN: Status
*
* DESCRIPTION: Read from a named field. Returns either an Integer or a
* Buffer, depending on the size of the field.
*
******************************************************************************/
acpi_status
acpi_ex_read_data_from_field(struct acpi_walk_state *walk_state,
union acpi_operand_object *obj_desc,
union acpi_operand_object **ret_buffer_desc)
{
acpi_status status;
union acpi_operand_object *buffer_desc;
acpi_size length;
void *buffer;
u32 function;
ACPI_FUNCTION_TRACE_PTR(ex_read_data_from_field, obj_desc);
/* Parameter validation */
if (!obj_desc) {
return_ACPI_STATUS(AE_AML_NO_OPERAND);
}
if (!ret_buffer_desc) {
return_ACPI_STATUS(AE_BAD_PARAMETER);
}
if (obj_desc->common.type == ACPI_TYPE_BUFFER_FIELD) {
/*
* If the buffer_field arguments have not been previously evaluated,
* evaluate them now and save the results.
*/
if (!(obj_desc->common.flags & AOPOBJ_DATA_VALID)) {
status = acpi_ds_get_buffer_field_arguments(obj_desc);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
}
} else if ((obj_desc->common.type == ACPI_TYPE_LOCAL_REGION_FIELD) &&
(obj_desc->field.region_obj->region.space_id ==
ACPI_ADR_SPACE_SMBUS
|| obj_desc->field.region_obj->region.space_id ==
ACPI_ADR_SPACE_GSBUS
|| obj_desc->field.region_obj->region.space_id ==
ACPI_ADR_SPACE_IPMI)) {
/*
* This is an SMBus, GSBus or IPMI read. We must create a buffer to hold
* the data and then directly access the region handler.
*
* Note: SMBus and GSBus protocol value is passed in upper 16-bits of Function
*/
if (obj_desc->field.region_obj->region.space_id ==
ACPI_ADR_SPACE_SMBUS) {
length = ACPI_SMBUS_BUFFER_SIZE;
function =
ACPI_READ | (obj_desc->field.attribute << 16);
} else if (obj_desc->field.region_obj->region.space_id ==
ACPI_ADR_SPACE_GSBUS) {
length = ACPI_GSBUS_BUFFER_SIZE;
function =
ACPI_READ | (obj_desc->field.attribute << 16);
} else { /* IPMI */
length = ACPI_IPMI_BUFFER_SIZE;
function = ACPI_READ;
}
buffer_desc = acpi_ut_create_buffer_object(length);
if (!buffer_desc) {
return_ACPI_STATUS(AE_NO_MEMORY);
}
/* Lock entire transaction if requested */
acpi_ex_acquire_global_lock(obj_desc->common_field.field_flags);
/* Call the region handler for the read */
status = acpi_ex_access_region(obj_desc, 0,
ACPI_CAST_PTR(u64,
buffer_desc->
buffer.pointer),
function);
acpi_ex_release_global_lock(obj_desc->common_field.field_flags);
goto exit;
}
/*
* Allocate a buffer for the contents of the field.
*
* If the field is larger than the current integer width, create
* a BUFFER to hold it. Otherwise, use an INTEGER. This allows
* the use of arithmetic operators on the returned value if the
* field size is equal or smaller than an Integer.
*
* Note: Field.length is in bits.
*/
length =
(acpi_size) ACPI_ROUND_BITS_UP_TO_BYTES(obj_desc->field.bit_length);
if (length > acpi_gbl_integer_byte_width) {
/* Field is too large for an Integer, create a Buffer instead */
buffer_desc = acpi_ut_create_buffer_object(length);
if (!buffer_desc) {
return_ACPI_STATUS(AE_NO_MEMORY);
}
buffer = buffer_desc->buffer.pointer;
} else {
/* Field will fit within an Integer (normal case) */
buffer_desc = acpi_ut_create_integer_object((u64) 0);
if (!buffer_desc) {
return_ACPI_STATUS(AE_NO_MEMORY);
}
length = acpi_gbl_integer_byte_width;
buffer = &buffer_desc->integer.value;
}
ACPI_DEBUG_PRINT((ACPI_DB_BFIELD,
"FieldRead [TO]: Obj %p, Type %X, Buf %p, ByteLen %X\n",
obj_desc, obj_desc->common.type, buffer,
(u32) length));
ACPI_DEBUG_PRINT((ACPI_DB_BFIELD,
"FieldRead [FROM]: BitLen %X, BitOff %X, ByteOff %X\n",
obj_desc->common_field.bit_length,
obj_desc->common_field.start_field_bit_offset,
obj_desc->common_field.base_byte_offset));
/* Lock entire transaction if requested */
acpi_ex_acquire_global_lock(obj_desc->common_field.field_flags);
/* Read from the field */
status = acpi_ex_extract_from_field(obj_desc, buffer, (u32) length);
acpi_ex_release_global_lock(obj_desc->common_field.field_flags);
exit:
if (ACPI_FAILURE(status)) {
acpi_ut_remove_reference(buffer_desc);
} else {
*ret_buffer_desc = buffer_desc;
}
return_ACPI_STATUS(status);
}
/*******************************************************************************
*
* FUNCTION: acpi_ex_write_data_to_field
*
* PARAMETERS: source_desc - Contains data to write
* obj_desc - The named field
* result_desc - Where the return value is returned, if any
*
* RETURN: Status
*
* DESCRIPTION: Write to a named field
*
******************************************************************************/
acpi_status
acpi_ex_write_data_to_field(union acpi_operand_object *source_desc,
union acpi_operand_object *obj_desc,
union acpi_operand_object **result_desc)
{
acpi_status status;
u32 length;
void *buffer;
union acpi_operand_object *buffer_desc;
u32 function;
ACPI_FUNCTION_TRACE_PTR(ex_write_data_to_field, obj_desc);
/* Parameter validation */
if (!source_desc || !obj_desc) {
return_ACPI_STATUS(AE_AML_NO_OPERAND);
}
if (obj_desc->common.type == ACPI_TYPE_BUFFER_FIELD) {
/*
* If the buffer_field arguments have not been previously evaluated,
* evaluate them now and save the results.
*/
if (!(obj_desc->common.flags & AOPOBJ_DATA_VALID)) {
status = acpi_ds_get_buffer_field_arguments(obj_desc);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
}
} else if ((obj_desc->common.type == ACPI_TYPE_LOCAL_REGION_FIELD) &&
(obj_desc->field.region_obj->region.space_id ==
ACPI_ADR_SPACE_SMBUS
|| obj_desc->field.region_obj->region.space_id ==
ACPI_ADR_SPACE_GSBUS
|| obj_desc->field.region_obj->region.space_id ==
ACPI_ADR_SPACE_IPMI)) {
/*
* This is an SMBus, GSBus or IPMI write. We will bypass the entire field
* mechanism and handoff the buffer directly to the handler. For
* these address spaces, the buffer is bi-directional; on a write,
* return data is returned in the same buffer.
*
* Source must be a buffer of sufficient size:
* ACPI_SMBUS_BUFFER_SIZE, ACPI_GSBUS_BUFFER_SIZE, or ACPI_IPMI_BUFFER_SIZE.
*
* Note: SMBus and GSBus protocol type is passed in upper 16-bits of Function
*/
if (source_desc->common.type != ACPI_TYPE_BUFFER) {
ACPI_ERROR((AE_INFO,
"SMBus/IPMI/GenericSerialBus write requires Buffer, found type %s",
acpi_ut_get_object_type_name(source_desc)));
return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
}
if (obj_desc->field.region_obj->region.space_id ==
ACPI_ADR_SPACE_SMBUS) {
length = ACPI_SMBUS_BUFFER_SIZE;
function =
ACPI_WRITE | (obj_desc->field.attribute << 16);
} else if (obj_desc->field.region_obj->region.space_id ==
ACPI_ADR_SPACE_GSBUS) {
length = ACPI_GSBUS_BUFFER_SIZE;
function =
ACPI_WRITE | (obj_desc->field.attribute << 16);
} else { /* IPMI */
length = ACPI_IPMI_BUFFER_SIZE;
function = ACPI_WRITE;
}
if (source_desc->buffer.length < length) {
ACPI_ERROR((AE_INFO,
"SMBus/IPMI/GenericSerialBus write requires Buffer of length %u, found length %u",
length, source_desc->buffer.length));
return_ACPI_STATUS(AE_AML_BUFFER_LIMIT);
}
/* Create the bi-directional buffer */
buffer_desc = acpi_ut_create_buffer_object(length);
if (!buffer_desc) {
return_ACPI_STATUS(AE_NO_MEMORY);
}
buffer = buffer_desc->buffer.pointer;
ACPI_MEMCPY(buffer, source_desc->buffer.pointer, length);
/* Lock entire transaction if requested */
acpi_ex_acquire_global_lock(obj_desc->common_field.field_flags);
/*
* Perform the write (returns status and perhaps data in the
* same buffer)
*/
status = acpi_ex_access_region(obj_desc, 0,
(u64 *) buffer, function);
acpi_ex_release_global_lock(obj_desc->common_field.field_flags);
*result_desc = buffer_desc;
return_ACPI_STATUS(status);
}
/* Get a pointer to the data to be written */
switch (source_desc->common.type) {
case ACPI_TYPE_INTEGER:
buffer = &source_desc->integer.value;
length = sizeof(source_desc->integer.value);
break;
case ACPI_TYPE_BUFFER:
buffer = source_desc->buffer.pointer;
length = source_desc->buffer.length;
break;
case ACPI_TYPE_STRING:
buffer = source_desc->string.pointer;
length = source_desc->string.length;
break;
default:
return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
}
ACPI_DEBUG_PRINT((ACPI_DB_BFIELD,
"FieldWrite [FROM]: Obj %p (%s:%X), Buf %p, ByteLen %X\n",
source_desc,
acpi_ut_get_type_name(source_desc->common.type),
source_desc->common.type, buffer, length));
ACPI_DEBUG_PRINT((ACPI_DB_BFIELD,
"FieldWrite [TO]: Obj %p (%s:%X), BitLen %X, BitOff %X, ByteOff %X\n",
obj_desc,
acpi_ut_get_type_name(obj_desc->common.type),
obj_desc->common.type,
obj_desc->common_field.bit_length,
obj_desc->common_field.start_field_bit_offset,
obj_desc->common_field.base_byte_offset));
/* Lock entire transaction if requested */
acpi_ex_acquire_global_lock(obj_desc->common_field.field_flags);
/* Write to the field */
status = acpi_ex_insert_into_field(obj_desc, buffer, length);
acpi_ex_release_global_lock(obj_desc->common_field.field_flags);
return_ACPI_STATUS(status);
}
| gpl-2.0 |
zarboz/dlxpul_43 | drivers/acpi/acpica/exstoren.c | 5045 | 9591 |
/******************************************************************************
*
* Module Name: exstoren - AML Interpreter object store support,
* Store to Node (namespace object)
*
*****************************************************************************/
/*
* Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions, and the following disclaimer,
* without modification.
* 2. Redistributions in binary form must reproduce at minimum a disclaimer
* substantially similar to the "NO WARRANTY" disclaimer below
* ("Disclaimer") and any redistribution must be conditioned upon
* including a substantially similar Disclaimer requirement for further
* binary redistribution.
* 3. Neither the names of the above-listed copyright holders nor the names
* of any contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* Alternatively, this software may be distributed under the terms of the
* GNU General Public License ("GPL") version 2 as published by the Free
* Software Foundation.
*
* NO WARRANTY
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGES.
*/
#include <acpi/acpi.h>
#include "accommon.h"
#include "acinterp.h"
#include "amlcode.h"
#define _COMPONENT ACPI_EXECUTER
ACPI_MODULE_NAME("exstoren")
/*******************************************************************************
*
* FUNCTION: acpi_ex_resolve_object
*
* PARAMETERS: source_desc_ptr - Pointer to the source object
* target_type - Current type of the target
* walk_state - Current walk state
*
* RETURN: Status, resolved object in source_desc_ptr.
*
* DESCRIPTION: Resolve an object. If the object is a reference, dereference
* it and return the actual object in the source_desc_ptr.
*
******************************************************************************/
acpi_status
acpi_ex_resolve_object(union acpi_operand_object **source_desc_ptr,
acpi_object_type target_type,
struct acpi_walk_state *walk_state)
{
union acpi_operand_object *source_desc = *source_desc_ptr;
acpi_status status = AE_OK;
ACPI_FUNCTION_TRACE(ex_resolve_object);
/* Ensure we have a Target that can be stored to */
switch (target_type) {
case ACPI_TYPE_BUFFER_FIELD:
case ACPI_TYPE_LOCAL_REGION_FIELD:
case ACPI_TYPE_LOCAL_BANK_FIELD:
case ACPI_TYPE_LOCAL_INDEX_FIELD:
/*
* These cases all require only Integers or values that
* can be converted to Integers (Strings or Buffers)
*/
case ACPI_TYPE_INTEGER:
case ACPI_TYPE_STRING:
case ACPI_TYPE_BUFFER:
/*
* Stores into a Field/Region or into a Integer/Buffer/String
* are all essentially the same. This case handles the
* "interchangeable" types Integer, String, and Buffer.
*/
if (source_desc->common.type == ACPI_TYPE_LOCAL_REFERENCE) {
/* Resolve a reference object first */
status =
acpi_ex_resolve_to_value(source_desc_ptr,
walk_state);
if (ACPI_FAILURE(status)) {
break;
}
}
/* For copy_object, no further validation necessary */
if (walk_state->opcode == AML_COPY_OP) {
break;
}
/* Must have a Integer, Buffer, or String */
if ((source_desc->common.type != ACPI_TYPE_INTEGER) &&
(source_desc->common.type != ACPI_TYPE_BUFFER) &&
(source_desc->common.type != ACPI_TYPE_STRING) &&
!((source_desc->common.type == ACPI_TYPE_LOCAL_REFERENCE) &&
(source_desc->reference.class == ACPI_REFCLASS_TABLE))) {
/* Conversion successful but still not a valid type */
ACPI_ERROR((AE_INFO,
"Cannot assign type %s to %s (must be type Int/Str/Buf)",
acpi_ut_get_object_type_name(source_desc),
acpi_ut_get_type_name(target_type)));
status = AE_AML_OPERAND_TYPE;
}
break;
case ACPI_TYPE_LOCAL_ALIAS:
case ACPI_TYPE_LOCAL_METHOD_ALIAS:
/*
* All aliases should have been resolved earlier, during the
* operand resolution phase.
*/
ACPI_ERROR((AE_INFO, "Store into an unresolved Alias object"));
status = AE_AML_INTERNAL;
break;
case ACPI_TYPE_PACKAGE:
default:
/*
* All other types than Alias and the various Fields come here,
* including the untyped case - ACPI_TYPE_ANY.
*/
break;
}
return_ACPI_STATUS(status);
}
/*******************************************************************************
*
* FUNCTION: acpi_ex_store_object_to_object
*
* PARAMETERS: source_desc - Object to store
* dest_desc - Object to receive a copy of the source
* new_desc - New object if dest_desc is obsoleted
* walk_state - Current walk state
*
* RETURN: Status
*
* DESCRIPTION: "Store" an object to another object. This may include
* converting the source type to the target type (implicit
* conversion), and a copy of the value of the source to
* the target.
*
* The Assignment of an object to another (not named) object
* is handled here.
* The Source passed in will replace the current value (if any)
* with the input value.
*
* When storing into an object the data is converted to the
* target object type then stored in the object. This means
* that the target object type (for an initialized target) will
* not be changed by a store operation.
*
* This module allows destination types of Number, String,
* Buffer, and Package.
*
* Assumes parameters are already validated. NOTE: source_desc
* resolution (from a reference object) must be performed by
* the caller if necessary.
*
******************************************************************************/
acpi_status
acpi_ex_store_object_to_object(union acpi_operand_object *source_desc,
union acpi_operand_object *dest_desc,
union acpi_operand_object **new_desc,
struct acpi_walk_state *walk_state)
{
union acpi_operand_object *actual_src_desc;
acpi_status status = AE_OK;
ACPI_FUNCTION_TRACE_PTR(ex_store_object_to_object, source_desc);
actual_src_desc = source_desc;
if (!dest_desc) {
/*
* There is no destination object (An uninitialized node or
* package element), so we can simply copy the source object
* creating a new destination object
*/
status =
acpi_ut_copy_iobject_to_iobject(actual_src_desc, new_desc,
walk_state);
return_ACPI_STATUS(status);
}
if (source_desc->common.type != dest_desc->common.type) {
/*
* The source type does not match the type of the destination.
* Perform the "implicit conversion" of the source to the current type
* of the target as per the ACPI specification.
*
* If no conversion performed, actual_src_desc = source_desc.
* Otherwise, actual_src_desc is a temporary object to hold the
* converted object.
*/
status = acpi_ex_convert_to_target_type(dest_desc->common.type,
source_desc,
&actual_src_desc,
walk_state);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
if (source_desc == actual_src_desc) {
/*
* No conversion was performed. Return the source_desc as the
* new object.
*/
*new_desc = source_desc;
return_ACPI_STATUS(AE_OK);
}
}
/*
* We now have two objects of identical types, and we can perform a
* copy of the *value* of the source object.
*/
switch (dest_desc->common.type) {
case ACPI_TYPE_INTEGER:
dest_desc->integer.value = actual_src_desc->integer.value;
/* Truncate value if we are executing from a 32-bit ACPI table */
acpi_ex_truncate_for32bit_table(dest_desc);
break;
case ACPI_TYPE_STRING:
status =
acpi_ex_store_string_to_string(actual_src_desc, dest_desc);
break;
case ACPI_TYPE_BUFFER:
status =
acpi_ex_store_buffer_to_buffer(actual_src_desc, dest_desc);
break;
case ACPI_TYPE_PACKAGE:
status =
acpi_ut_copy_iobject_to_iobject(actual_src_desc, &dest_desc,
walk_state);
break;
default:
/*
* All other types come here.
*/
ACPI_WARNING((AE_INFO, "Store into type %s not implemented",
acpi_ut_get_object_type_name(dest_desc)));
status = AE_NOT_IMPLEMENTED;
break;
}
if (actual_src_desc != source_desc) {
/* Delete the intermediate (temporary) source object */
acpi_ut_remove_reference(actual_src_desc);
}
*new_desc = dest_desc;
return_ACPI_STATUS(status);
}
| gpl-2.0 |
art1p/android_kernel_lge_omap4-common | arch/arm/plat-s3c24xx/setup-ts.c | 8885 | 1085 | /* linux/arch/arm/plat-s3c24xx/setup-ts.c
*
* Copyright (c) 2010 Samsung Electronics Co., Ltd.
* http://www.samsung.com/
*
* Based on S3C24XX setup for i2c device
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/kernel.h>
#include <linux/gpio.h>
struct platform_device; /* don't need the contents */
#include <mach/hardware.h>
#include <mach/regs-gpio.h>
/**
* s3c24xx_ts_cfg_gpio - configure gpio for s3c2410 systems
*
* Configure the GPIO for the S3C2410 system, where we have external FETs
* connected to the device (later systems such as the S3C2440 integrate
* these into the device).
*/
void s3c24xx_ts_cfg_gpio(struct platform_device *dev)
{
s3c2410_gpio_cfgpin(S3C2410_GPG(12), S3C2410_GPG12_XMON);
s3c2410_gpio_cfgpin(S3C2410_GPG(13), S3C2410_GPG13_nXPON);
s3c2410_gpio_cfgpin(S3C2410_GPG(14), S3C2410_GPG14_YMON);
s3c2410_gpio_cfgpin(S3C2410_GPG(15), S3C2410_GPG15_nYPON);
}
| gpl-2.0 |
Feche/android_kernel_motorola_olympus_oc | arch/xtensa/platforms/s6105/setup.c | 12469 | 1583 | /*
* s6105 control routines
*
* Copyright (c) 2009 emlix GmbH
*/
#include <linux/irq.h>
#include <linux/io.h>
#include <linux/gpio.h>
#include <asm/bootparam.h>
#include <variant/hardware.h>
#include <variant/gpio.h>
#include <platform/gpio.h>
void platform_halt(void)
{
local_irq_disable();
while (1)
;
}
void platform_power_off(void)
{
platform_halt();
}
void platform_restart(void)
{
platform_halt();
}
void __init platform_setup(char **cmdline)
{
unsigned long reg;
reg = readl(S6_REG_GREG1 + S6_GREG1_PLLSEL);
reg &= ~(S6_GREG1_PLLSEL_GMAC_MASK << S6_GREG1_PLLSEL_GMAC |
S6_GREG1_PLLSEL_GMII_MASK << S6_GREG1_PLLSEL_GMII);
reg |= S6_GREG1_PLLSEL_GMAC_125MHZ << S6_GREG1_PLLSEL_GMAC |
S6_GREG1_PLLSEL_GMII_125MHZ << S6_GREG1_PLLSEL_GMII;
writel(reg, S6_REG_GREG1 + S6_GREG1_PLLSEL);
reg = readl(S6_REG_GREG1 + S6_GREG1_CLKGATE);
reg &= ~(1 << S6_GREG1_BLOCK_SB);
reg &= ~(1 << S6_GREG1_BLOCK_GMAC);
writel(reg, S6_REG_GREG1 + S6_GREG1_CLKGATE);
reg = readl(S6_REG_GREG1 + S6_GREG1_BLOCKENA);
reg |= 1 << S6_GREG1_BLOCK_SB;
reg |= 1 << S6_GREG1_BLOCK_GMAC;
writel(reg, S6_REG_GREG1 + S6_GREG1_BLOCKENA);
printk(KERN_NOTICE "S6105 on Stretch S6000 - "
"Copyright (C) 2009 emlix GmbH <info@emlix.com>\n");
}
void __init platform_init(bp_tag_t *first)
{
s6_gpio_init(0);
gpio_request(GPIO_LED1_NGREEN, "led1_green");
gpio_request(GPIO_LED1_RED, "led1_red");
gpio_direction_output(GPIO_LED1_NGREEN, 1);
}
void platform_heartbeat(void)
{
static unsigned int c;
if (!(++c & 0x4F))
gpio_direction_output(GPIO_LED1_RED, !(c & 0x10));
}
| gpl-2.0 |
andr7e/rk3188_tablet_jb | kernel/arch/sh/boards/mach-se/7780/irq.c | 13237 | 1962 | /*
* linux/arch/sh/boards/se/7780/irq.c
*
* Copyright (C) 2006,2007 Nobuhiro Iwamatsu
*
* Hitachi UL SolutionEngine 7780 Support.
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#include <linux/init.h>
#include <linux/irq.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <mach-se/mach/se7780.h>
#define INTC_BASE 0xffd00000
#define INTC_ICR1 (INTC_BASE+0x1c)
/*
* Initialize IRQ setting
*/
void __init init_se7780_IRQ(void)
{
/* enable all interrupt at FPGA */
__raw_writew(0, FPGA_INTMSK1);
/* mask SM501 interrupt */
__raw_writew((__raw_readw(FPGA_INTMSK1) | 0x0002), FPGA_INTMSK1);
/* enable all interrupt at FPGA */
__raw_writew(0, FPGA_INTMSK2);
/* set FPGA INTSEL register */
/* FPGA + 0x06 */
__raw_writew( ((IRQPIN_SM501 << IRQPOS_SM501) |
(IRQPIN_SMC91CX << IRQPOS_SMC91CX)), FPGA_INTSEL1);
/* FPGA + 0x08 */
__raw_writew(((IRQPIN_EXTINT4 << IRQPOS_EXTINT4) |
(IRQPIN_EXTINT3 << IRQPOS_EXTINT3) |
(IRQPIN_EXTINT2 << IRQPOS_EXTINT2) |
(IRQPIN_EXTINT1 << IRQPOS_EXTINT1)), FPGA_INTSEL2);
/* FPGA + 0x0A */
__raw_writew((IRQPIN_PCCPW << IRQPOS_PCCPW), FPGA_INTSEL3);
plat_irq_setup_pins(IRQ_MODE_IRQ); /* install handlers for IRQ0-7 */
/* ICR1: detect low level(for 2ndcut) */
__raw_writel(0xAAAA0000, INTC_ICR1);
/*
* FPGA PCISEL register initialize
*
* CPU || SLOT1 | SLOT2 | S-ATA | USB
* -------------------------------------
* INTA || INTA | INTD | -- | INTB
* -------------------------------------
* INTB || INTB | INTA | -- | INTC
* -------------------------------------
* INTC || INTC | INTB | INTA | --
* -------------------------------------
* INTD || INTD | INTC | -- | INTA
* -------------------------------------
*/
__raw_writew(0x0013, FPGA_PCI_INTSEL1);
__raw_writew(0xE402, FPGA_PCI_INTSEL2);
}
| gpl-2.0 |
zaidshb/semc-kernel-qsd8k | arch/mips/mm/highmem.c | 182 | 2659 | #include <linux/module.h>
#include <linux/highmem.h>
#include <asm/tlbflush.h>
void *__kmap(struct page *page)
{
void *addr;
might_sleep();
if (!PageHighMem(page))
return page_address(page);
addr = kmap_high(page);
flush_tlb_one((unsigned long)addr);
return addr;
}
void __kunmap(struct page *page)
{
if (in_interrupt())
BUG();
if (!PageHighMem(page))
return;
kunmap_high(page);
}
/*
* kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because
* no global lock is needed and because the kmap code must perform a global TLB
* invalidation when the kmap pool wraps.
*
* However when holding an atomic kmap is is not legal to sleep, so atomic
* kmaps are appropriate for short, tight code paths only.
*/
void *__kmap_atomic(struct page *page, enum km_type type)
{
enum fixed_addresses idx;
unsigned long vaddr;
/* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
pagefault_disable();
if (!PageHighMem(page))
return page_address(page);
idx = type + KM_TYPE_NR*smp_processor_id();
vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
#ifdef CONFIG_DEBUG_HIGHMEM
if (!pte_none(*(kmap_pte-idx)))
BUG();
#endif
set_pte(kmap_pte-idx, mk_pte(page, kmap_prot));
local_flush_tlb_one((unsigned long)vaddr);
return (void*) vaddr;
}
void __kunmap_atomic(void *kvaddr, enum km_type type)
{
#ifdef CONFIG_DEBUG_HIGHMEM
unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id();
if (vaddr < FIXADDR_START) { // FIXME
pagefault_enable();
return;
}
if (vaddr != __fix_to_virt(FIX_KMAP_BEGIN+idx))
BUG();
/*
* force other mappings to Oops if they'll try to access
* this pte without first remap it
*/
pte_clear(&init_mm, vaddr, kmap_pte-idx);
local_flush_tlb_one(vaddr);
#endif
pagefault_enable();
}
/*
* This is the same as kmap_atomic() but can map memory that doesn't
* have a struct page associated with it.
*/
void *kmap_atomic_pfn(unsigned long pfn, enum km_type type)
{
enum fixed_addresses idx;
unsigned long vaddr;
pagefault_disable();
idx = type + KM_TYPE_NR*smp_processor_id();
vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
set_pte(kmap_pte-idx, pfn_pte(pfn, kmap_prot));
flush_tlb_one(vaddr);
return (void*) vaddr;
}
struct page *__kmap_atomic_to_page(void *ptr)
{
unsigned long idx, vaddr = (unsigned long)ptr;
pte_t *pte;
if (vaddr < FIXADDR_START)
return virt_to_page(ptr);
idx = virt_to_fix(vaddr);
pte = kmap_pte - (idx - FIX_KMAP_BEGIN);
return pte_page(*pte);
}
EXPORT_SYMBOL(__kmap);
EXPORT_SYMBOL(__kunmap);
EXPORT_SYMBOL(__kmap_atomic);
EXPORT_SYMBOL(__kunmap_atomic);
| gpl-2.0 |
paulashford1975/clearfog-388-kernel-4.5.4 | sound/pci/hda/patch_ca0132.c | 182 | 127378 | /*
* HD audio interface patch for Creative CA0132 chip
*
* Copyright (c) 2011, Creative Technology Ltd.
*
* Based on patch_ca0110.c
* Copyright (c) 2008 Takashi Iwai <tiwai@suse.de>
*
* This driver is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This driver is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/mutex.h>
#include <linux/module.h>
#include <linux/firmware.h>
#include <sound/core.h>
#include "hda_codec.h"
#include "hda_local.h"
#include "hda_auto_parser.h"
#include "hda_jack.h"
#include "ca0132_regs.h"
/* Enable this to see controls for tuning purpose. */
/*#define ENABLE_TUNING_CONTROLS*/
#define FLOAT_ZERO 0x00000000
#define FLOAT_ONE 0x3f800000
#define FLOAT_TWO 0x40000000
#define FLOAT_MINUS_5 0xc0a00000
#define UNSOL_TAG_DSP 0x16
#define DSP_DMA_WRITE_BUFLEN_INIT (1UL<<18)
#define DSP_DMA_WRITE_BUFLEN_OVLY (1UL<<15)
#define DMA_TRANSFER_FRAME_SIZE_NWORDS 8
#define DMA_TRANSFER_MAX_FRAME_SIZE_NWORDS 32
#define DMA_OVERLAY_FRAME_SIZE_NWORDS 2
#define MASTERCONTROL 0x80
#define MASTERCONTROL_ALLOC_DMA_CHAN 10
#define MASTERCONTROL_QUERY_SPEAKER_EQ_ADDRESS 60
#define WIDGET_CHIP_CTRL 0x15
#define WIDGET_DSP_CTRL 0x16
#define MEM_CONNID_MICIN1 3
#define MEM_CONNID_MICIN2 5
#define MEM_CONNID_MICOUT1 12
#define MEM_CONNID_MICOUT2 14
#define MEM_CONNID_WUH 10
#define MEM_CONNID_DSP 16
#define MEM_CONNID_DMIC 100
#define SCP_SET 0
#define SCP_GET 1
#define EFX_FILE "ctefx.bin"
#ifdef CONFIG_SND_HDA_CODEC_CA0132_DSP
MODULE_FIRMWARE(EFX_FILE);
#endif
static char *dirstr[2] = { "Playback", "Capture" };
enum {
SPEAKER_OUT,
HEADPHONE_OUT
};
enum {
DIGITAL_MIC,
LINE_MIC_IN
};
enum {
#define VNODE_START_NID 0x80
VNID_SPK = VNODE_START_NID, /* Speaker vnid */
VNID_MIC,
VNID_HP_SEL,
VNID_AMIC1_SEL,
VNID_HP_ASEL,
VNID_AMIC1_ASEL,
VNODE_END_NID,
#define VNODES_COUNT (VNODE_END_NID - VNODE_START_NID)
#define EFFECT_START_NID 0x90
#define OUT_EFFECT_START_NID EFFECT_START_NID
SURROUND = OUT_EFFECT_START_NID,
CRYSTALIZER,
DIALOG_PLUS,
SMART_VOLUME,
X_BASS,
EQUALIZER,
OUT_EFFECT_END_NID,
#define OUT_EFFECTS_COUNT (OUT_EFFECT_END_NID - OUT_EFFECT_START_NID)
#define IN_EFFECT_START_NID OUT_EFFECT_END_NID
ECHO_CANCELLATION = IN_EFFECT_START_NID,
VOICE_FOCUS,
MIC_SVM,
NOISE_REDUCTION,
IN_EFFECT_END_NID,
#define IN_EFFECTS_COUNT (IN_EFFECT_END_NID - IN_EFFECT_START_NID)
VOICEFX = IN_EFFECT_END_NID,
PLAY_ENHANCEMENT,
CRYSTAL_VOICE,
EFFECT_END_NID
#define EFFECTS_COUNT (EFFECT_END_NID - EFFECT_START_NID)
};
/* Effects values size*/
#define EFFECT_VALS_MAX_COUNT 12
/* Latency introduced by DSP blocks in milliseconds. */
#define DSP_CAPTURE_INIT_LATENCY 0
#define DSP_CRYSTAL_VOICE_LATENCY 124
#define DSP_PLAYBACK_INIT_LATENCY 13
#define DSP_PLAY_ENHANCEMENT_LATENCY 30
#define DSP_SPEAKER_OUT_LATENCY 7
struct ct_effect {
char name[SNDRV_CTL_ELEM_ID_NAME_MAXLEN];
hda_nid_t nid;
int mid; /*effect module ID*/
int reqs[EFFECT_VALS_MAX_COUNT]; /*effect module request*/
int direct; /* 0:output; 1:input*/
int params; /* number of default non-on/off params */
/*effect default values, 1st is on/off. */
unsigned int def_vals[EFFECT_VALS_MAX_COUNT];
};
#define EFX_DIR_OUT 0
#define EFX_DIR_IN 1
static struct ct_effect ca0132_effects[EFFECTS_COUNT] = {
{ .name = "Surround",
.nid = SURROUND,
.mid = 0x96,
.reqs = {0, 1},
.direct = EFX_DIR_OUT,
.params = 1,
.def_vals = {0x3F800000, 0x3F2B851F}
},
{ .name = "Crystalizer",
.nid = CRYSTALIZER,
.mid = 0x96,
.reqs = {7, 8},
.direct = EFX_DIR_OUT,
.params = 1,
.def_vals = {0x3F800000, 0x3F266666}
},
{ .name = "Dialog Plus",
.nid = DIALOG_PLUS,
.mid = 0x96,
.reqs = {2, 3},
.direct = EFX_DIR_OUT,
.params = 1,
.def_vals = {0x00000000, 0x3F000000}
},
{ .name = "Smart Volume",
.nid = SMART_VOLUME,
.mid = 0x96,
.reqs = {4, 5, 6},
.direct = EFX_DIR_OUT,
.params = 2,
.def_vals = {0x3F800000, 0x3F3D70A4, 0x00000000}
},
{ .name = "X-Bass",
.nid = X_BASS,
.mid = 0x96,
.reqs = {24, 23, 25},
.direct = EFX_DIR_OUT,
.params = 2,
.def_vals = {0x3F800000, 0x42A00000, 0x3F000000}
},
{ .name = "Equalizer",
.nid = EQUALIZER,
.mid = 0x96,
.reqs = {9, 10, 11, 12, 13, 14,
15, 16, 17, 18, 19, 20},
.direct = EFX_DIR_OUT,
.params = 11,
.def_vals = {0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000}
},
{ .name = "Echo Cancellation",
.nid = ECHO_CANCELLATION,
.mid = 0x95,
.reqs = {0, 1, 2, 3},
.direct = EFX_DIR_IN,
.params = 3,
.def_vals = {0x00000000, 0x3F3A9692, 0x00000000, 0x00000000}
},
{ .name = "Voice Focus",
.nid = VOICE_FOCUS,
.mid = 0x95,
.reqs = {6, 7, 8, 9},
.direct = EFX_DIR_IN,
.params = 3,
.def_vals = {0x3F800000, 0x3D7DF3B6, 0x41F00000, 0x41F00000}
},
{ .name = "Mic SVM",
.nid = MIC_SVM,
.mid = 0x95,
.reqs = {44, 45},
.direct = EFX_DIR_IN,
.params = 1,
.def_vals = {0x00000000, 0x3F3D70A4}
},
{ .name = "Noise Reduction",
.nid = NOISE_REDUCTION,
.mid = 0x95,
.reqs = {4, 5},
.direct = EFX_DIR_IN,
.params = 1,
.def_vals = {0x3F800000, 0x3F000000}
},
{ .name = "VoiceFX",
.nid = VOICEFX,
.mid = 0x95,
.reqs = {10, 11, 12, 13, 14, 15, 16, 17, 18},
.direct = EFX_DIR_IN,
.params = 8,
.def_vals = {0x00000000, 0x43C80000, 0x44AF0000, 0x44FA0000,
0x3F800000, 0x3F800000, 0x3F800000, 0x00000000,
0x00000000}
}
};
/* Tuning controls */
#ifdef ENABLE_TUNING_CONTROLS
enum {
#define TUNING_CTL_START_NID 0xC0
WEDGE_ANGLE = TUNING_CTL_START_NID,
SVM_LEVEL,
EQUALIZER_BAND_0,
EQUALIZER_BAND_1,
EQUALIZER_BAND_2,
EQUALIZER_BAND_3,
EQUALIZER_BAND_4,
EQUALIZER_BAND_5,
EQUALIZER_BAND_6,
EQUALIZER_BAND_7,
EQUALIZER_BAND_8,
EQUALIZER_BAND_9,
TUNING_CTL_END_NID
#define TUNING_CTLS_COUNT (TUNING_CTL_END_NID - TUNING_CTL_START_NID)
};
struct ct_tuning_ctl {
char name[SNDRV_CTL_ELEM_ID_NAME_MAXLEN];
hda_nid_t parent_nid;
hda_nid_t nid;
int mid; /*effect module ID*/
int req; /*effect module request*/
int direct; /* 0:output; 1:input*/
unsigned int def_val;/*effect default values*/
};
static struct ct_tuning_ctl ca0132_tuning_ctls[] = {
{ .name = "Wedge Angle",
.parent_nid = VOICE_FOCUS,
.nid = WEDGE_ANGLE,
.mid = 0x95,
.req = 8,
.direct = EFX_DIR_IN,
.def_val = 0x41F00000
},
{ .name = "SVM Level",
.parent_nid = MIC_SVM,
.nid = SVM_LEVEL,
.mid = 0x95,
.req = 45,
.direct = EFX_DIR_IN,
.def_val = 0x3F3D70A4
},
{ .name = "EQ Band0",
.parent_nid = EQUALIZER,
.nid = EQUALIZER_BAND_0,
.mid = 0x96,
.req = 11,
.direct = EFX_DIR_OUT,
.def_val = 0x00000000
},
{ .name = "EQ Band1",
.parent_nid = EQUALIZER,
.nid = EQUALIZER_BAND_1,
.mid = 0x96,
.req = 12,
.direct = EFX_DIR_OUT,
.def_val = 0x00000000
},
{ .name = "EQ Band2",
.parent_nid = EQUALIZER,
.nid = EQUALIZER_BAND_2,
.mid = 0x96,
.req = 13,
.direct = EFX_DIR_OUT,
.def_val = 0x00000000
},
{ .name = "EQ Band3",
.parent_nid = EQUALIZER,
.nid = EQUALIZER_BAND_3,
.mid = 0x96,
.req = 14,
.direct = EFX_DIR_OUT,
.def_val = 0x00000000
},
{ .name = "EQ Band4",
.parent_nid = EQUALIZER,
.nid = EQUALIZER_BAND_4,
.mid = 0x96,
.req = 15,
.direct = EFX_DIR_OUT,
.def_val = 0x00000000
},
{ .name = "EQ Band5",
.parent_nid = EQUALIZER,
.nid = EQUALIZER_BAND_5,
.mid = 0x96,
.req = 16,
.direct = EFX_DIR_OUT,
.def_val = 0x00000000
},
{ .name = "EQ Band6",
.parent_nid = EQUALIZER,
.nid = EQUALIZER_BAND_6,
.mid = 0x96,
.req = 17,
.direct = EFX_DIR_OUT,
.def_val = 0x00000000
},
{ .name = "EQ Band7",
.parent_nid = EQUALIZER,
.nid = EQUALIZER_BAND_7,
.mid = 0x96,
.req = 18,
.direct = EFX_DIR_OUT,
.def_val = 0x00000000
},
{ .name = "EQ Band8",
.parent_nid = EQUALIZER,
.nid = EQUALIZER_BAND_8,
.mid = 0x96,
.req = 19,
.direct = EFX_DIR_OUT,
.def_val = 0x00000000
},
{ .name = "EQ Band9",
.parent_nid = EQUALIZER,
.nid = EQUALIZER_BAND_9,
.mid = 0x96,
.req = 20,
.direct = EFX_DIR_OUT,
.def_val = 0x00000000
}
};
#endif
/* Voice FX Presets */
#define VOICEFX_MAX_PARAM_COUNT 9
struct ct_voicefx {
char *name;
hda_nid_t nid;
int mid;
int reqs[VOICEFX_MAX_PARAM_COUNT]; /*effect module request*/
};
struct ct_voicefx_preset {
char *name; /*preset name*/
unsigned int vals[VOICEFX_MAX_PARAM_COUNT];
};
static struct ct_voicefx ca0132_voicefx = {
.name = "VoiceFX Capture Switch",
.nid = VOICEFX,
.mid = 0x95,
.reqs = {10, 11, 12, 13, 14, 15, 16, 17, 18}
};
static struct ct_voicefx_preset ca0132_voicefx_presets[] = {
{ .name = "Neutral",
.vals = { 0x00000000, 0x43C80000, 0x44AF0000,
0x44FA0000, 0x3F800000, 0x3F800000,
0x3F800000, 0x00000000, 0x00000000 }
},
{ .name = "Female2Male",
.vals = { 0x3F800000, 0x43C80000, 0x44AF0000,
0x44FA0000, 0x3F19999A, 0x3F866666,
0x3F800000, 0x00000000, 0x00000000 }
},
{ .name = "Male2Female",
.vals = { 0x3F800000, 0x43C80000, 0x44AF0000,
0x450AC000, 0x4017AE14, 0x3F6B851F,
0x3F800000, 0x00000000, 0x00000000 }
},
{ .name = "ScrappyKid",
.vals = { 0x3F800000, 0x43C80000, 0x44AF0000,
0x44FA0000, 0x40400000, 0x3F28F5C3,
0x3F800000, 0x00000000, 0x00000000 }
},
{ .name = "Elderly",
.vals = { 0x3F800000, 0x44324000, 0x44BB8000,
0x44E10000, 0x3FB33333, 0x3FB9999A,
0x3F800000, 0x3E3A2E43, 0x00000000 }
},
{ .name = "Orc",
.vals = { 0x3F800000, 0x43EA0000, 0x44A52000,
0x45098000, 0x3F266666, 0x3FC00000,
0x3F800000, 0x00000000, 0x00000000 }
},
{ .name = "Elf",
.vals = { 0x3F800000, 0x43C70000, 0x44AE6000,
0x45193000, 0x3F8E147B, 0x3F75C28F,
0x3F800000, 0x00000000, 0x00000000 }
},
{ .name = "Dwarf",
.vals = { 0x3F800000, 0x43930000, 0x44BEE000,
0x45007000, 0x3F451EB8, 0x3F7851EC,
0x3F800000, 0x00000000, 0x00000000 }
},
{ .name = "AlienBrute",
.vals = { 0x3F800000, 0x43BFC5AC, 0x44B28FDF,
0x451F6000, 0x3F266666, 0x3FA7D945,
0x3F800000, 0x3CF5C28F, 0x00000000 }
},
{ .name = "Robot",
.vals = { 0x3F800000, 0x43C80000, 0x44AF0000,
0x44FA0000, 0x3FB2718B, 0x3F800000,
0xBC07010E, 0x00000000, 0x00000000 }
},
{ .name = "Marine",
.vals = { 0x3F800000, 0x43C20000, 0x44906000,
0x44E70000, 0x3F4CCCCD, 0x3F8A3D71,
0x3F0A3D71, 0x00000000, 0x00000000 }
},
{ .name = "Emo",
.vals = { 0x3F800000, 0x43C80000, 0x44AF0000,
0x44FA0000, 0x3F800000, 0x3F800000,
0x3E4CCCCD, 0x00000000, 0x00000000 }
},
{ .name = "DeepVoice",
.vals = { 0x3F800000, 0x43A9C5AC, 0x44AA4FDF,
0x44FFC000, 0x3EDBB56F, 0x3F99C4CA,
0x3F800000, 0x00000000, 0x00000000 }
},
{ .name = "Munchkin",
.vals = { 0x3F800000, 0x43C80000, 0x44AF0000,
0x44FA0000, 0x3F800000, 0x3F1A043C,
0x3F800000, 0x00000000, 0x00000000 }
}
};
enum hda_cmd_vendor_io {
/* for DspIO node */
VENDOR_DSPIO_SCP_WRITE_DATA_LOW = 0x000,
VENDOR_DSPIO_SCP_WRITE_DATA_HIGH = 0x100,
VENDOR_DSPIO_STATUS = 0xF01,
VENDOR_DSPIO_SCP_POST_READ_DATA = 0x702,
VENDOR_DSPIO_SCP_READ_DATA = 0xF02,
VENDOR_DSPIO_DSP_INIT = 0x703,
VENDOR_DSPIO_SCP_POST_COUNT_QUERY = 0x704,
VENDOR_DSPIO_SCP_READ_COUNT = 0xF04,
/* for ChipIO node */
VENDOR_CHIPIO_ADDRESS_LOW = 0x000,
VENDOR_CHIPIO_ADDRESS_HIGH = 0x100,
VENDOR_CHIPIO_STREAM_FORMAT = 0x200,
VENDOR_CHIPIO_DATA_LOW = 0x300,
VENDOR_CHIPIO_DATA_HIGH = 0x400,
VENDOR_CHIPIO_GET_PARAMETER = 0xF00,
VENDOR_CHIPIO_STATUS = 0xF01,
VENDOR_CHIPIO_HIC_POST_READ = 0x702,
VENDOR_CHIPIO_HIC_READ_DATA = 0xF03,
VENDOR_CHIPIO_8051_DATA_WRITE = 0x707,
VENDOR_CHIPIO_8051_DATA_READ = 0xF07,
VENDOR_CHIPIO_CT_EXTENSIONS_ENABLE = 0x70A,
VENDOR_CHIPIO_CT_EXTENSIONS_GET = 0xF0A,
VENDOR_CHIPIO_PLL_PMU_WRITE = 0x70C,
VENDOR_CHIPIO_PLL_PMU_READ = 0xF0C,
VENDOR_CHIPIO_8051_ADDRESS_LOW = 0x70D,
VENDOR_CHIPIO_8051_ADDRESS_HIGH = 0x70E,
VENDOR_CHIPIO_FLAG_SET = 0x70F,
VENDOR_CHIPIO_FLAGS_GET = 0xF0F,
VENDOR_CHIPIO_PARAM_SET = 0x710,
VENDOR_CHIPIO_PARAM_GET = 0xF10,
VENDOR_CHIPIO_PORT_ALLOC_CONFIG_SET = 0x711,
VENDOR_CHIPIO_PORT_ALLOC_SET = 0x712,
VENDOR_CHIPIO_PORT_ALLOC_GET = 0xF12,
VENDOR_CHIPIO_PORT_FREE_SET = 0x713,
VENDOR_CHIPIO_PARAM_EX_ID_GET = 0xF17,
VENDOR_CHIPIO_PARAM_EX_ID_SET = 0x717,
VENDOR_CHIPIO_PARAM_EX_VALUE_GET = 0xF18,
VENDOR_CHIPIO_PARAM_EX_VALUE_SET = 0x718,
VENDOR_CHIPIO_DMIC_CTL_SET = 0x788,
VENDOR_CHIPIO_DMIC_CTL_GET = 0xF88,
VENDOR_CHIPIO_DMIC_PIN_SET = 0x789,
VENDOR_CHIPIO_DMIC_PIN_GET = 0xF89,
VENDOR_CHIPIO_DMIC_MCLK_SET = 0x78A,
VENDOR_CHIPIO_DMIC_MCLK_GET = 0xF8A,
VENDOR_CHIPIO_EAPD_SEL_SET = 0x78D
};
/*
* Control flag IDs
*/
enum control_flag_id {
/* Connection manager stream setup is bypassed/enabled */
CONTROL_FLAG_C_MGR = 0,
/* DSP DMA is bypassed/enabled */
CONTROL_FLAG_DMA = 1,
/* 8051 'idle' mode is disabled/enabled */
CONTROL_FLAG_IDLE_ENABLE = 2,
/* Tracker for the SPDIF-in path is bypassed/enabled */
CONTROL_FLAG_TRACKER = 3,
/* DigitalOut to Spdif2Out connection is disabled/enabled */
CONTROL_FLAG_SPDIF2OUT = 4,
/* Digital Microphone is disabled/enabled */
CONTROL_FLAG_DMIC = 5,
/* ADC_B rate is 48 kHz/96 kHz */
CONTROL_FLAG_ADC_B_96KHZ = 6,
/* ADC_C rate is 48 kHz/96 kHz */
CONTROL_FLAG_ADC_C_96KHZ = 7,
/* DAC rate is 48 kHz/96 kHz (affects all DACs) */
CONTROL_FLAG_DAC_96KHZ = 8,
/* DSP rate is 48 kHz/96 kHz */
CONTROL_FLAG_DSP_96KHZ = 9,
/* SRC clock is 98 MHz/196 MHz (196 MHz forces rate to 96 KHz) */
CONTROL_FLAG_SRC_CLOCK_196MHZ = 10,
/* SRC rate is 48 kHz/96 kHz (48 kHz disabled when clock is 196 MHz) */
CONTROL_FLAG_SRC_RATE_96KHZ = 11,
/* Decode Loop (DSP->SRC->DSP) is disabled/enabled */
CONTROL_FLAG_DECODE_LOOP = 12,
/* De-emphasis filter on DAC-1 disabled/enabled */
CONTROL_FLAG_DAC1_DEEMPHASIS = 13,
/* De-emphasis filter on DAC-2 disabled/enabled */
CONTROL_FLAG_DAC2_DEEMPHASIS = 14,
/* De-emphasis filter on DAC-3 disabled/enabled */
CONTROL_FLAG_DAC3_DEEMPHASIS = 15,
/* High-pass filter on ADC_B disabled/enabled */
CONTROL_FLAG_ADC_B_HIGH_PASS = 16,
/* High-pass filter on ADC_C disabled/enabled */
CONTROL_FLAG_ADC_C_HIGH_PASS = 17,
/* Common mode on Port_A disabled/enabled */
CONTROL_FLAG_PORT_A_COMMON_MODE = 18,
/* Common mode on Port_D disabled/enabled */
CONTROL_FLAG_PORT_D_COMMON_MODE = 19,
/* Impedance for ramp generator on Port_A 16 Ohm/10K Ohm */
CONTROL_FLAG_PORT_A_10KOHM_LOAD = 20,
/* Impedance for ramp generator on Port_D, 16 Ohm/10K Ohm */
CONTROL_FLAG_PORT_D_10KOHM_LOAD = 21,
/* ASI rate is 48kHz/96kHz */
CONTROL_FLAG_ASI_96KHZ = 22,
/* DAC power settings able to control attached ports no/yes */
CONTROL_FLAG_DACS_CONTROL_PORTS = 23,
/* Clock Stop OK reporting is disabled/enabled */
CONTROL_FLAG_CONTROL_STOP_OK_ENABLE = 24,
/* Number of control flags */
CONTROL_FLAGS_MAX = (CONTROL_FLAG_CONTROL_STOP_OK_ENABLE+1)
};
/*
* Control parameter IDs
*/
enum control_param_id {
/* 0: None, 1: Mic1In*/
CONTROL_PARAM_VIP_SOURCE = 1,
/* 0: force HDA, 1: allow DSP if HDA Spdif1Out stream is idle */
CONTROL_PARAM_SPDIF1_SOURCE = 2,
/* Port A output stage gain setting to use when 16 Ohm output
* impedance is selected*/
CONTROL_PARAM_PORTA_160OHM_GAIN = 8,
/* Port D output stage gain setting to use when 16 Ohm output
* impedance is selected*/
CONTROL_PARAM_PORTD_160OHM_GAIN = 10,
/* Stream Control */
/* Select stream with the given ID */
CONTROL_PARAM_STREAM_ID = 24,
/* Source connection point for the selected stream */
CONTROL_PARAM_STREAM_SOURCE_CONN_POINT = 25,
/* Destination connection point for the selected stream */
CONTROL_PARAM_STREAM_DEST_CONN_POINT = 26,
/* Number of audio channels in the selected stream */
CONTROL_PARAM_STREAMS_CHANNELS = 27,
/*Enable control for the selected stream */
CONTROL_PARAM_STREAM_CONTROL = 28,
/* Connection Point Control */
/* Select connection point with the given ID */
CONTROL_PARAM_CONN_POINT_ID = 29,
/* Connection point sample rate */
CONTROL_PARAM_CONN_POINT_SAMPLE_RATE = 30,
/* Node Control */
/* Select HDA node with the given ID */
CONTROL_PARAM_NODE_ID = 31
};
/*
* Dsp Io Status codes
*/
enum hda_vendor_status_dspio {
/* Success */
VENDOR_STATUS_DSPIO_OK = 0x00,
/* Busy, unable to accept new command, the host must retry */
VENDOR_STATUS_DSPIO_BUSY = 0x01,
/* SCP command queue is full */
VENDOR_STATUS_DSPIO_SCP_COMMAND_QUEUE_FULL = 0x02,
/* SCP response queue is empty */
VENDOR_STATUS_DSPIO_SCP_RESPONSE_QUEUE_EMPTY = 0x03
};
/*
* Chip Io Status codes
*/
enum hda_vendor_status_chipio {
/* Success */
VENDOR_STATUS_CHIPIO_OK = 0x00,
/* Busy, unable to accept new command, the host must retry */
VENDOR_STATUS_CHIPIO_BUSY = 0x01
};
/*
* CA0132 sample rate
*/
enum ca0132_sample_rate {
SR_6_000 = 0x00,
SR_8_000 = 0x01,
SR_9_600 = 0x02,
SR_11_025 = 0x03,
SR_16_000 = 0x04,
SR_22_050 = 0x05,
SR_24_000 = 0x06,
SR_32_000 = 0x07,
SR_44_100 = 0x08,
SR_48_000 = 0x09,
SR_88_200 = 0x0A,
SR_96_000 = 0x0B,
SR_144_000 = 0x0C,
SR_176_400 = 0x0D,
SR_192_000 = 0x0E,
SR_384_000 = 0x0F,
SR_COUNT = 0x10,
SR_RATE_UNKNOWN = 0x1F
};
enum dsp_download_state {
DSP_DOWNLOAD_FAILED = -1,
DSP_DOWNLOAD_INIT = 0,
DSP_DOWNLOADING = 1,
DSP_DOWNLOADED = 2
};
/* retrieve parameters from hda format */
#define get_hdafmt_chs(fmt) (fmt & 0xf)
#define get_hdafmt_bits(fmt) ((fmt >> 4) & 0x7)
#define get_hdafmt_rate(fmt) ((fmt >> 8) & 0x7f)
#define get_hdafmt_type(fmt) ((fmt >> 15) & 0x1)
/*
* CA0132 specific
*/
struct ca0132_spec {
struct snd_kcontrol_new *mixers[5];
unsigned int num_mixers;
const struct hda_verb *base_init_verbs;
const struct hda_verb *base_exit_verbs;
const struct hda_verb *chip_init_verbs;
struct hda_verb *spec_init_verbs;
struct auto_pin_cfg autocfg;
/* Nodes configurations */
struct hda_multi_out multiout;
hda_nid_t out_pins[AUTO_CFG_MAX_OUTS];
hda_nid_t dacs[AUTO_CFG_MAX_OUTS];
unsigned int num_outputs;
hda_nid_t input_pins[AUTO_PIN_LAST];
hda_nid_t adcs[AUTO_PIN_LAST];
hda_nid_t dig_out;
hda_nid_t dig_in;
unsigned int num_inputs;
hda_nid_t shared_mic_nid;
hda_nid_t shared_out_nid;
hda_nid_t unsol_tag_hp;
hda_nid_t unsol_tag_amic1;
/* chip access */
struct mutex chipio_mutex; /* chip access mutex */
u32 curr_chip_addx;
/* DSP download related */
enum dsp_download_state dsp_state;
unsigned int dsp_stream_id;
unsigned int wait_scp;
unsigned int wait_scp_header;
unsigned int wait_num_data;
unsigned int scp_resp_header;
unsigned int scp_resp_data[4];
unsigned int scp_resp_count;
/* mixer and effects related */
unsigned char dmic_ctl;
int cur_out_type;
int cur_mic_type;
long vnode_lvol[VNODES_COUNT];
long vnode_rvol[VNODES_COUNT];
long vnode_lswitch[VNODES_COUNT];
long vnode_rswitch[VNODES_COUNT];
long effects_switch[EFFECTS_COUNT];
long voicefx_val;
long cur_mic_boost;
struct hda_codec *codec;
struct delayed_work unsol_hp_work;
int quirk;
#ifdef ENABLE_TUNING_CONTROLS
long cur_ctl_vals[TUNING_CTLS_COUNT];
#endif
};
/*
* CA0132 quirks table
*/
enum {
QUIRK_NONE,
QUIRK_ALIENWARE,
};
static const struct hda_pintbl alienware_pincfgs[] = {
{ 0x0b, 0x90170110 }, /* Builtin Speaker */
{ 0x0c, 0x411111f0 }, /* N/A */
{ 0x0d, 0x411111f0 }, /* N/A */
{ 0x0e, 0x411111f0 }, /* N/A */
{ 0x0f, 0x0321101f }, /* HP */
{ 0x10, 0x411111f0 }, /* Headset? disabled for now */
{ 0x11, 0x03a11021 }, /* Mic */
{ 0x12, 0xd5a30140 }, /* Builtin Mic */
{ 0x13, 0x411111f0 }, /* N/A */
{ 0x18, 0x411111f0 }, /* N/A */
{}
};
static const struct snd_pci_quirk ca0132_quirks[] = {
SND_PCI_QUIRK(0x1028, 0x0685, "Alienware 15 2015", QUIRK_ALIENWARE),
SND_PCI_QUIRK(0x1028, 0x0688, "Alienware 17 2015", QUIRK_ALIENWARE),
{}
};
/*
* CA0132 codec access
*/
static unsigned int codec_send_command(struct hda_codec *codec, hda_nid_t nid,
unsigned int verb, unsigned int parm, unsigned int *res)
{
unsigned int response;
response = snd_hda_codec_read(codec, nid, 0, verb, parm);
*res = response;
return ((response == -1) ? -1 : 0);
}
static int codec_set_converter_format(struct hda_codec *codec, hda_nid_t nid,
unsigned short converter_format, unsigned int *res)
{
return codec_send_command(codec, nid, VENDOR_CHIPIO_STREAM_FORMAT,
converter_format & 0xffff, res);
}
static int codec_set_converter_stream_channel(struct hda_codec *codec,
hda_nid_t nid, unsigned char stream,
unsigned char channel, unsigned int *res)
{
unsigned char converter_stream_channel = 0;
converter_stream_channel = (stream << 4) | (channel & 0x0f);
return codec_send_command(codec, nid, AC_VERB_SET_CHANNEL_STREAMID,
converter_stream_channel, res);
}
/* Chip access helper function */
static int chipio_send(struct hda_codec *codec,
unsigned int reg,
unsigned int data)
{
unsigned int res;
unsigned long timeout = jiffies + msecs_to_jiffies(1000);
/* send bits of data specified by reg */
do {
res = snd_hda_codec_read(codec, WIDGET_CHIP_CTRL, 0,
reg, data);
if (res == VENDOR_STATUS_CHIPIO_OK)
return 0;
msleep(20);
} while (time_before(jiffies, timeout));
return -EIO;
}
/*
* Write chip address through the vendor widget -- NOT protected by the Mutex!
*/
static int chipio_write_address(struct hda_codec *codec,
unsigned int chip_addx)
{
struct ca0132_spec *spec = codec->spec;
int res;
if (spec->curr_chip_addx == chip_addx)
return 0;
/* send low 16 bits of the address */
res = chipio_send(codec, VENDOR_CHIPIO_ADDRESS_LOW,
chip_addx & 0xffff);
if (res != -EIO) {
/* send high 16 bits of the address */
res = chipio_send(codec, VENDOR_CHIPIO_ADDRESS_HIGH,
chip_addx >> 16);
}
spec->curr_chip_addx = (res < 0) ? ~0UL : chip_addx;
return res;
}
/*
* Write data through the vendor widget -- NOT protected by the Mutex!
*/
static int chipio_write_data(struct hda_codec *codec, unsigned int data)
{
struct ca0132_spec *spec = codec->spec;
int res;
/* send low 16 bits of the data */
res = chipio_send(codec, VENDOR_CHIPIO_DATA_LOW, data & 0xffff);
if (res != -EIO) {
/* send high 16 bits of the data */
res = chipio_send(codec, VENDOR_CHIPIO_DATA_HIGH,
data >> 16);
}
/*If no error encountered, automatically increment the address
as per chip behaviour*/
spec->curr_chip_addx = (res != -EIO) ?
(spec->curr_chip_addx + 4) : ~0UL;
return res;
}
/*
* Write multiple data through the vendor widget -- NOT protected by the Mutex!
*/
static int chipio_write_data_multiple(struct hda_codec *codec,
const u32 *data,
unsigned int count)
{
int status = 0;
if (data == NULL) {
codec_dbg(codec, "chipio_write_data null ptr\n");
return -EINVAL;
}
while ((count-- != 0) && (status == 0))
status = chipio_write_data(codec, *data++);
return status;
}
/*
* Read data through the vendor widget -- NOT protected by the Mutex!
*/
static int chipio_read_data(struct hda_codec *codec, unsigned int *data)
{
struct ca0132_spec *spec = codec->spec;
int res;
/* post read */
res = chipio_send(codec, VENDOR_CHIPIO_HIC_POST_READ, 0);
if (res != -EIO) {
/* read status */
res = chipio_send(codec, VENDOR_CHIPIO_STATUS, 0);
}
if (res != -EIO) {
/* read data */
*data = snd_hda_codec_read(codec, WIDGET_CHIP_CTRL, 0,
VENDOR_CHIPIO_HIC_READ_DATA,
0);
}
/*If no error encountered, automatically increment the address
as per chip behaviour*/
spec->curr_chip_addx = (res != -EIO) ?
(spec->curr_chip_addx + 4) : ~0UL;
return res;
}
/*
* Write given value to the given address through the chip I/O widget.
* protected by the Mutex
*/
static int chipio_write(struct hda_codec *codec,
unsigned int chip_addx, const unsigned int data)
{
struct ca0132_spec *spec = codec->spec;
int err;
mutex_lock(&spec->chipio_mutex);
/* write the address, and if successful proceed to write data */
err = chipio_write_address(codec, chip_addx);
if (err < 0)
goto exit;
err = chipio_write_data(codec, data);
if (err < 0)
goto exit;
exit:
mutex_unlock(&spec->chipio_mutex);
return err;
}
/*
* Write multiple values to the given address through the chip I/O widget.
* protected by the Mutex
*/
static int chipio_write_multiple(struct hda_codec *codec,
u32 chip_addx,
const u32 *data,
unsigned int count)
{
struct ca0132_spec *spec = codec->spec;
int status;
mutex_lock(&spec->chipio_mutex);
status = chipio_write_address(codec, chip_addx);
if (status < 0)
goto error;
status = chipio_write_data_multiple(codec, data, count);
error:
mutex_unlock(&spec->chipio_mutex);
return status;
}
/*
* Read the given address through the chip I/O widget
* protected by the Mutex
*/
static int chipio_read(struct hda_codec *codec,
unsigned int chip_addx, unsigned int *data)
{
struct ca0132_spec *spec = codec->spec;
int err;
mutex_lock(&spec->chipio_mutex);
/* write the address, and if successful proceed to write data */
err = chipio_write_address(codec, chip_addx);
if (err < 0)
goto exit;
err = chipio_read_data(codec, data);
if (err < 0)
goto exit;
exit:
mutex_unlock(&spec->chipio_mutex);
return err;
}
/*
* Set chip control flags through the chip I/O widget.
*/
static void chipio_set_control_flag(struct hda_codec *codec,
enum control_flag_id flag_id,
bool flag_state)
{
unsigned int val;
unsigned int flag_bit;
flag_bit = (flag_state ? 1 : 0);
val = (flag_bit << 7) | (flag_id);
snd_hda_codec_write(codec, WIDGET_CHIP_CTRL, 0,
VENDOR_CHIPIO_FLAG_SET, val);
}
/*
* Set chip parameters through the chip I/O widget.
*/
static void chipio_set_control_param(struct hda_codec *codec,
enum control_param_id param_id, int param_val)
{
struct ca0132_spec *spec = codec->spec;
int val;
if ((param_id < 32) && (param_val < 8)) {
val = (param_val << 5) | (param_id);
snd_hda_codec_write(codec, WIDGET_CHIP_CTRL, 0,
VENDOR_CHIPIO_PARAM_SET, val);
} else {
mutex_lock(&spec->chipio_mutex);
if (chipio_send(codec, VENDOR_CHIPIO_STATUS, 0) == 0) {
snd_hda_codec_write(codec, WIDGET_CHIP_CTRL, 0,
VENDOR_CHIPIO_PARAM_EX_ID_SET,
param_id);
snd_hda_codec_write(codec, WIDGET_CHIP_CTRL, 0,
VENDOR_CHIPIO_PARAM_EX_VALUE_SET,
param_val);
}
mutex_unlock(&spec->chipio_mutex);
}
}
/*
* Set sampling rate of the connection point.
*/
static void chipio_set_conn_rate(struct hda_codec *codec,
int connid, enum ca0132_sample_rate rate)
{
chipio_set_control_param(codec, CONTROL_PARAM_CONN_POINT_ID, connid);
chipio_set_control_param(codec, CONTROL_PARAM_CONN_POINT_SAMPLE_RATE,
rate);
}
/*
* Enable clocks.
*/
static void chipio_enable_clocks(struct hda_codec *codec)
{
struct ca0132_spec *spec = codec->spec;
mutex_lock(&spec->chipio_mutex);
snd_hda_codec_write(codec, WIDGET_CHIP_CTRL, 0,
VENDOR_CHIPIO_8051_ADDRESS_LOW, 0);
snd_hda_codec_write(codec, WIDGET_CHIP_CTRL, 0,
VENDOR_CHIPIO_PLL_PMU_WRITE, 0xff);
snd_hda_codec_write(codec, WIDGET_CHIP_CTRL, 0,
VENDOR_CHIPIO_8051_ADDRESS_LOW, 5);
snd_hda_codec_write(codec, WIDGET_CHIP_CTRL, 0,
VENDOR_CHIPIO_PLL_PMU_WRITE, 0x0b);
snd_hda_codec_write(codec, WIDGET_CHIP_CTRL, 0,
VENDOR_CHIPIO_8051_ADDRESS_LOW, 6);
snd_hda_codec_write(codec, WIDGET_CHIP_CTRL, 0,
VENDOR_CHIPIO_PLL_PMU_WRITE, 0xff);
mutex_unlock(&spec->chipio_mutex);
}
/*
* CA0132 DSP IO stuffs
*/
static int dspio_send(struct hda_codec *codec, unsigned int reg,
unsigned int data)
{
int res;
unsigned long timeout = jiffies + msecs_to_jiffies(1000);
/* send bits of data specified by reg to dsp */
do {
res = snd_hda_codec_read(codec, WIDGET_DSP_CTRL, 0, reg, data);
if ((res >= 0) && (res != VENDOR_STATUS_DSPIO_BUSY))
return res;
msleep(20);
} while (time_before(jiffies, timeout));
return -EIO;
}
/*
* Wait for DSP to be ready for commands
*/
static void dspio_write_wait(struct hda_codec *codec)
{
int status;
unsigned long timeout = jiffies + msecs_to_jiffies(1000);
do {
status = snd_hda_codec_read(codec, WIDGET_DSP_CTRL, 0,
VENDOR_DSPIO_STATUS, 0);
if ((status == VENDOR_STATUS_DSPIO_OK) ||
(status == VENDOR_STATUS_DSPIO_SCP_RESPONSE_QUEUE_EMPTY))
break;
msleep(1);
} while (time_before(jiffies, timeout));
}
/*
* Write SCP data to DSP
*/
static int dspio_write(struct hda_codec *codec, unsigned int scp_data)
{
struct ca0132_spec *spec = codec->spec;
int status;
dspio_write_wait(codec);
mutex_lock(&spec->chipio_mutex);
status = dspio_send(codec, VENDOR_DSPIO_SCP_WRITE_DATA_LOW,
scp_data & 0xffff);
if (status < 0)
goto error;
status = dspio_send(codec, VENDOR_DSPIO_SCP_WRITE_DATA_HIGH,
scp_data >> 16);
if (status < 0)
goto error;
/* OK, now check if the write itself has executed*/
status = snd_hda_codec_read(codec, WIDGET_DSP_CTRL, 0,
VENDOR_DSPIO_STATUS, 0);
error:
mutex_unlock(&spec->chipio_mutex);
return (status == VENDOR_STATUS_DSPIO_SCP_COMMAND_QUEUE_FULL) ?
-EIO : 0;
}
/*
* Write multiple SCP data to DSP
*/
static int dspio_write_multiple(struct hda_codec *codec,
unsigned int *buffer, unsigned int size)
{
int status = 0;
unsigned int count;
if ((buffer == NULL))
return -EINVAL;
count = 0;
while (count < size) {
status = dspio_write(codec, *buffer++);
if (status != 0)
break;
count++;
}
return status;
}
static int dspio_read(struct hda_codec *codec, unsigned int *data)
{
int status;
status = dspio_send(codec, VENDOR_DSPIO_SCP_POST_READ_DATA, 0);
if (status == -EIO)
return status;
status = dspio_send(codec, VENDOR_DSPIO_STATUS, 0);
if (status == -EIO ||
status == VENDOR_STATUS_DSPIO_SCP_RESPONSE_QUEUE_EMPTY)
return -EIO;
*data = snd_hda_codec_read(codec, WIDGET_DSP_CTRL, 0,
VENDOR_DSPIO_SCP_READ_DATA, 0);
return 0;
}
static int dspio_read_multiple(struct hda_codec *codec, unsigned int *buffer,
unsigned int *buf_size, unsigned int size_count)
{
int status = 0;
unsigned int size = *buf_size;
unsigned int count;
unsigned int skip_count;
unsigned int dummy;
if ((buffer == NULL))
return -1;
count = 0;
while (count < size && count < size_count) {
status = dspio_read(codec, buffer++);
if (status != 0)
break;
count++;
}
skip_count = count;
if (status == 0) {
while (skip_count < size) {
status = dspio_read(codec, &dummy);
if (status != 0)
break;
skip_count++;
}
}
*buf_size = count;
return status;
}
/*
* Construct the SCP header using corresponding fields
*/
static inline unsigned int
make_scp_header(unsigned int target_id, unsigned int source_id,
unsigned int get_flag, unsigned int req,
unsigned int device_flag, unsigned int resp_flag,
unsigned int error_flag, unsigned int data_size)
{
unsigned int header = 0;
header = (data_size & 0x1f) << 27;
header |= (error_flag & 0x01) << 26;
header |= (resp_flag & 0x01) << 25;
header |= (device_flag & 0x01) << 24;
header |= (req & 0x7f) << 17;
header |= (get_flag & 0x01) << 16;
header |= (source_id & 0xff) << 8;
header |= target_id & 0xff;
return header;
}
/*
* Extract corresponding fields from SCP header
*/
static inline void
extract_scp_header(unsigned int header,
unsigned int *target_id, unsigned int *source_id,
unsigned int *get_flag, unsigned int *req,
unsigned int *device_flag, unsigned int *resp_flag,
unsigned int *error_flag, unsigned int *data_size)
{
if (data_size)
*data_size = (header >> 27) & 0x1f;
if (error_flag)
*error_flag = (header >> 26) & 0x01;
if (resp_flag)
*resp_flag = (header >> 25) & 0x01;
if (device_flag)
*device_flag = (header >> 24) & 0x01;
if (req)
*req = (header >> 17) & 0x7f;
if (get_flag)
*get_flag = (header >> 16) & 0x01;
if (source_id)
*source_id = (header >> 8) & 0xff;
if (target_id)
*target_id = header & 0xff;
}
#define SCP_MAX_DATA_WORDS (16)
/* Structure to contain any SCP message */
struct scp_msg {
unsigned int hdr;
unsigned int data[SCP_MAX_DATA_WORDS];
};
static void dspio_clear_response_queue(struct hda_codec *codec)
{
unsigned int dummy = 0;
int status = -1;
/* clear all from the response queue */
do {
status = dspio_read(codec, &dummy);
} while (status == 0);
}
static int dspio_get_response_data(struct hda_codec *codec)
{
struct ca0132_spec *spec = codec->spec;
unsigned int data = 0;
unsigned int count;
if (dspio_read(codec, &data) < 0)
return -EIO;
if ((data & 0x00ffffff) == spec->wait_scp_header) {
spec->scp_resp_header = data;
spec->scp_resp_count = data >> 27;
count = spec->wait_num_data;
dspio_read_multiple(codec, spec->scp_resp_data,
&spec->scp_resp_count, count);
return 0;
}
return -EIO;
}
/*
* Send SCP message to DSP
*/
static int dspio_send_scp_message(struct hda_codec *codec,
unsigned char *send_buf,
unsigned int send_buf_size,
unsigned char *return_buf,
unsigned int return_buf_size,
unsigned int *bytes_returned)
{
struct ca0132_spec *spec = codec->spec;
int status = -1;
unsigned int scp_send_size = 0;
unsigned int total_size;
bool waiting_for_resp = false;
unsigned int header;
struct scp_msg *ret_msg;
unsigned int resp_src_id, resp_target_id;
unsigned int data_size, src_id, target_id, get_flag, device_flag;
if (bytes_returned)
*bytes_returned = 0;
/* get scp header from buffer */
header = *((unsigned int *)send_buf);
extract_scp_header(header, &target_id, &src_id, &get_flag, NULL,
&device_flag, NULL, NULL, &data_size);
scp_send_size = data_size + 1;
total_size = (scp_send_size * 4);
if (send_buf_size < total_size)
return -EINVAL;
if (get_flag || device_flag) {
if (!return_buf || return_buf_size < 4 || !bytes_returned)
return -EINVAL;
spec->wait_scp_header = *((unsigned int *)send_buf);
/* swap source id with target id */
resp_target_id = src_id;
resp_src_id = target_id;
spec->wait_scp_header &= 0xffff0000;
spec->wait_scp_header |= (resp_src_id << 8) | (resp_target_id);
spec->wait_num_data = return_buf_size/sizeof(unsigned int) - 1;
spec->wait_scp = 1;
waiting_for_resp = true;
}
status = dspio_write_multiple(codec, (unsigned int *)send_buf,
scp_send_size);
if (status < 0) {
spec->wait_scp = 0;
return status;
}
if (waiting_for_resp) {
unsigned long timeout = jiffies + msecs_to_jiffies(1000);
memset(return_buf, 0, return_buf_size);
do {
msleep(20);
} while (spec->wait_scp && time_before(jiffies, timeout));
waiting_for_resp = false;
if (!spec->wait_scp) {
ret_msg = (struct scp_msg *)return_buf;
memcpy(&ret_msg->hdr, &spec->scp_resp_header, 4);
memcpy(&ret_msg->data, spec->scp_resp_data,
spec->wait_num_data);
*bytes_returned = (spec->scp_resp_count + 1) * 4;
status = 0;
} else {
status = -EIO;
}
spec->wait_scp = 0;
}
return status;
}
/**
* Prepare and send the SCP message to DSP
* @codec: the HDA codec
* @mod_id: ID of the DSP module to send the command
* @req: ID of request to send to the DSP module
* @dir: SET or GET
* @data: pointer to the data to send with the request, request specific
* @len: length of the data, in bytes
* @reply: point to the buffer to hold data returned for a reply
* @reply_len: length of the reply buffer returned from GET
*
* Returns zero or a negative error code.
*/
static int dspio_scp(struct hda_codec *codec,
int mod_id, int req, int dir, void *data, unsigned int len,
void *reply, unsigned int *reply_len)
{
int status = 0;
struct scp_msg scp_send, scp_reply;
unsigned int ret_bytes, send_size, ret_size;
unsigned int send_get_flag, reply_resp_flag, reply_error_flag;
unsigned int reply_data_size;
memset(&scp_send, 0, sizeof(scp_send));
memset(&scp_reply, 0, sizeof(scp_reply));
if ((len != 0 && data == NULL) || (len > SCP_MAX_DATA_WORDS))
return -EINVAL;
if (dir == SCP_GET && reply == NULL) {
codec_dbg(codec, "dspio_scp get but has no buffer\n");
return -EINVAL;
}
if (reply != NULL && (reply_len == NULL || (*reply_len == 0))) {
codec_dbg(codec, "dspio_scp bad resp buf len parms\n");
return -EINVAL;
}
scp_send.hdr = make_scp_header(mod_id, 0x20, (dir == SCP_GET), req,
0, 0, 0, len/sizeof(unsigned int));
if (data != NULL && len > 0) {
len = min((unsigned int)(sizeof(scp_send.data)), len);
memcpy(scp_send.data, data, len);
}
ret_bytes = 0;
send_size = sizeof(unsigned int) + len;
status = dspio_send_scp_message(codec, (unsigned char *)&scp_send,
send_size, (unsigned char *)&scp_reply,
sizeof(scp_reply), &ret_bytes);
if (status < 0) {
codec_dbg(codec, "dspio_scp: send scp msg failed\n");
return status;
}
/* extract send and reply headers members */
extract_scp_header(scp_send.hdr, NULL, NULL, &send_get_flag,
NULL, NULL, NULL, NULL, NULL);
extract_scp_header(scp_reply.hdr, NULL, NULL, NULL, NULL, NULL,
&reply_resp_flag, &reply_error_flag,
&reply_data_size);
if (!send_get_flag)
return 0;
if (reply_resp_flag && !reply_error_flag) {
ret_size = (ret_bytes - sizeof(scp_reply.hdr))
/ sizeof(unsigned int);
if (*reply_len < ret_size*sizeof(unsigned int)) {
codec_dbg(codec, "reply too long for buf\n");
return -EINVAL;
} else if (ret_size != reply_data_size) {
codec_dbg(codec, "RetLen and HdrLen .NE.\n");
return -EINVAL;
} else {
*reply_len = ret_size*sizeof(unsigned int);
memcpy(reply, scp_reply.data, *reply_len);
}
} else {
codec_dbg(codec, "reply ill-formed or errflag set\n");
return -EIO;
}
return status;
}
/*
* Set DSP parameters
*/
static int dspio_set_param(struct hda_codec *codec, int mod_id,
int req, void *data, unsigned int len)
{
return dspio_scp(codec, mod_id, req, SCP_SET, data, len, NULL, NULL);
}
static int dspio_set_uint_param(struct hda_codec *codec, int mod_id,
int req, unsigned int data)
{
return dspio_set_param(codec, mod_id, req, &data, sizeof(unsigned int));
}
/*
* Allocate a DSP DMA channel via an SCP message
*/
static int dspio_alloc_dma_chan(struct hda_codec *codec, unsigned int *dma_chan)
{
int status = 0;
unsigned int size = sizeof(dma_chan);
codec_dbg(codec, " dspio_alloc_dma_chan() -- begin\n");
status = dspio_scp(codec, MASTERCONTROL, MASTERCONTROL_ALLOC_DMA_CHAN,
SCP_GET, NULL, 0, dma_chan, &size);
if (status < 0) {
codec_dbg(codec, "dspio_alloc_dma_chan: SCP Failed\n");
return status;
}
if ((*dma_chan + 1) == 0) {
codec_dbg(codec, "no free dma channels to allocate\n");
return -EBUSY;
}
codec_dbg(codec, "dspio_alloc_dma_chan: chan=%d\n", *dma_chan);
codec_dbg(codec, " dspio_alloc_dma_chan() -- complete\n");
return status;
}
/*
* Free a DSP DMA via an SCP message
*/
static int dspio_free_dma_chan(struct hda_codec *codec, unsigned int dma_chan)
{
int status = 0;
unsigned int dummy = 0;
codec_dbg(codec, " dspio_free_dma_chan() -- begin\n");
codec_dbg(codec, "dspio_free_dma_chan: chan=%d\n", dma_chan);
status = dspio_scp(codec, MASTERCONTROL, MASTERCONTROL_ALLOC_DMA_CHAN,
SCP_SET, &dma_chan, sizeof(dma_chan), NULL, &dummy);
if (status < 0) {
codec_dbg(codec, "dspio_free_dma_chan: SCP Failed\n");
return status;
}
codec_dbg(codec, " dspio_free_dma_chan() -- complete\n");
return status;
}
/*
* (Re)start the DSP
*/
static int dsp_set_run_state(struct hda_codec *codec)
{
unsigned int dbg_ctrl_reg;
unsigned int halt_state;
int err;
err = chipio_read(codec, DSP_DBGCNTL_INST_OFFSET, &dbg_ctrl_reg);
if (err < 0)
return err;
halt_state = (dbg_ctrl_reg & DSP_DBGCNTL_STATE_MASK) >>
DSP_DBGCNTL_STATE_LOBIT;
if (halt_state != 0) {
dbg_ctrl_reg &= ~((halt_state << DSP_DBGCNTL_SS_LOBIT) &
DSP_DBGCNTL_SS_MASK);
err = chipio_write(codec, DSP_DBGCNTL_INST_OFFSET,
dbg_ctrl_reg);
if (err < 0)
return err;
dbg_ctrl_reg |= (halt_state << DSP_DBGCNTL_EXEC_LOBIT) &
DSP_DBGCNTL_EXEC_MASK;
err = chipio_write(codec, DSP_DBGCNTL_INST_OFFSET,
dbg_ctrl_reg);
if (err < 0)
return err;
}
return 0;
}
/*
* Reset the DSP
*/
static int dsp_reset(struct hda_codec *codec)
{
unsigned int res;
int retry = 20;
codec_dbg(codec, "dsp_reset\n");
do {
res = dspio_send(codec, VENDOR_DSPIO_DSP_INIT, 0);
retry--;
} while (res == -EIO && retry);
if (!retry) {
codec_dbg(codec, "dsp_reset timeout\n");
return -EIO;
}
return 0;
}
/*
* Convert chip address to DSP address
*/
static unsigned int dsp_chip_to_dsp_addx(unsigned int chip_addx,
bool *code, bool *yram)
{
*code = *yram = false;
if (UC_RANGE(chip_addx, 1)) {
*code = true;
return UC_OFF(chip_addx);
} else if (X_RANGE_ALL(chip_addx, 1)) {
return X_OFF(chip_addx);
} else if (Y_RANGE_ALL(chip_addx, 1)) {
*yram = true;
return Y_OFF(chip_addx);
}
return INVALID_CHIP_ADDRESS;
}
/*
* Check if the DSP DMA is active
*/
static bool dsp_is_dma_active(struct hda_codec *codec, unsigned int dma_chan)
{
unsigned int dma_chnlstart_reg;
chipio_read(codec, DSPDMAC_CHNLSTART_INST_OFFSET, &dma_chnlstart_reg);
return ((dma_chnlstart_reg & (1 <<
(DSPDMAC_CHNLSTART_EN_LOBIT + dma_chan))) != 0);
}
static int dsp_dma_setup_common(struct hda_codec *codec,
unsigned int chip_addx,
unsigned int dma_chan,
unsigned int port_map_mask,
bool ovly)
{
int status = 0;
unsigned int chnl_prop;
unsigned int dsp_addx;
unsigned int active;
bool code, yram;
codec_dbg(codec, "-- dsp_dma_setup_common() -- Begin ---------\n");
if (dma_chan >= DSPDMAC_DMA_CFG_CHANNEL_COUNT) {
codec_dbg(codec, "dma chan num invalid\n");
return -EINVAL;
}
if (dsp_is_dma_active(codec, dma_chan)) {
codec_dbg(codec, "dma already active\n");
return -EBUSY;
}
dsp_addx = dsp_chip_to_dsp_addx(chip_addx, &code, &yram);
if (dsp_addx == INVALID_CHIP_ADDRESS) {
codec_dbg(codec, "invalid chip addr\n");
return -ENXIO;
}
chnl_prop = DSPDMAC_CHNLPROP_AC_MASK;
active = 0;
codec_dbg(codec, " dsp_dma_setup_common() start reg pgm\n");
if (ovly) {
status = chipio_read(codec, DSPDMAC_CHNLPROP_INST_OFFSET,
&chnl_prop);
if (status < 0) {
codec_dbg(codec, "read CHNLPROP Reg fail\n");
return status;
}
codec_dbg(codec, "dsp_dma_setup_common() Read CHNLPROP\n");
}
if (!code)
chnl_prop &= ~(1 << (DSPDMAC_CHNLPROP_MSPCE_LOBIT + dma_chan));
else
chnl_prop |= (1 << (DSPDMAC_CHNLPROP_MSPCE_LOBIT + dma_chan));
chnl_prop &= ~(1 << (DSPDMAC_CHNLPROP_DCON_LOBIT + dma_chan));
status = chipio_write(codec, DSPDMAC_CHNLPROP_INST_OFFSET, chnl_prop);
if (status < 0) {
codec_dbg(codec, "write CHNLPROP Reg fail\n");
return status;
}
codec_dbg(codec, " dsp_dma_setup_common() Write CHNLPROP\n");
if (ovly) {
status = chipio_read(codec, DSPDMAC_ACTIVE_INST_OFFSET,
&active);
if (status < 0) {
codec_dbg(codec, "read ACTIVE Reg fail\n");
return status;
}
codec_dbg(codec, "dsp_dma_setup_common() Read ACTIVE\n");
}
active &= (~(1 << (DSPDMAC_ACTIVE_AAR_LOBIT + dma_chan))) &
DSPDMAC_ACTIVE_AAR_MASK;
status = chipio_write(codec, DSPDMAC_ACTIVE_INST_OFFSET, active);
if (status < 0) {
codec_dbg(codec, "write ACTIVE Reg fail\n");
return status;
}
codec_dbg(codec, " dsp_dma_setup_common() Write ACTIVE\n");
status = chipio_write(codec, DSPDMAC_AUDCHSEL_INST_OFFSET(dma_chan),
port_map_mask);
if (status < 0) {
codec_dbg(codec, "write AUDCHSEL Reg fail\n");
return status;
}
codec_dbg(codec, " dsp_dma_setup_common() Write AUDCHSEL\n");
status = chipio_write(codec, DSPDMAC_IRQCNT_INST_OFFSET(dma_chan),
DSPDMAC_IRQCNT_BICNT_MASK | DSPDMAC_IRQCNT_CICNT_MASK);
if (status < 0) {
codec_dbg(codec, "write IRQCNT Reg fail\n");
return status;
}
codec_dbg(codec, " dsp_dma_setup_common() Write IRQCNT\n");
codec_dbg(codec,
"ChipA=0x%x,DspA=0x%x,dmaCh=%u, "
"CHSEL=0x%x,CHPROP=0x%x,Active=0x%x\n",
chip_addx, dsp_addx, dma_chan,
port_map_mask, chnl_prop, active);
codec_dbg(codec, "-- dsp_dma_setup_common() -- Complete ------\n");
return 0;
}
/*
* Setup the DSP DMA per-transfer-specific registers
*/
static int dsp_dma_setup(struct hda_codec *codec,
unsigned int chip_addx,
unsigned int count,
unsigned int dma_chan)
{
int status = 0;
bool code, yram;
unsigned int dsp_addx;
unsigned int addr_field;
unsigned int incr_field;
unsigned int base_cnt;
unsigned int cur_cnt;
unsigned int dma_cfg = 0;
unsigned int adr_ofs = 0;
unsigned int xfr_cnt = 0;
const unsigned int max_dma_count = 1 << (DSPDMAC_XFRCNT_BCNT_HIBIT -
DSPDMAC_XFRCNT_BCNT_LOBIT + 1);
codec_dbg(codec, "-- dsp_dma_setup() -- Begin ---------\n");
if (count > max_dma_count) {
codec_dbg(codec, "count too big\n");
return -EINVAL;
}
dsp_addx = dsp_chip_to_dsp_addx(chip_addx, &code, &yram);
if (dsp_addx == INVALID_CHIP_ADDRESS) {
codec_dbg(codec, "invalid chip addr\n");
return -ENXIO;
}
codec_dbg(codec, " dsp_dma_setup() start reg pgm\n");
addr_field = dsp_addx << DSPDMAC_DMACFG_DBADR_LOBIT;
incr_field = 0;
if (!code) {
addr_field <<= 1;
if (yram)
addr_field |= (1 << DSPDMAC_DMACFG_DBADR_LOBIT);
incr_field = (1 << DSPDMAC_DMACFG_AINCR_LOBIT);
}
dma_cfg = addr_field + incr_field;
status = chipio_write(codec, DSPDMAC_DMACFG_INST_OFFSET(dma_chan),
dma_cfg);
if (status < 0) {
codec_dbg(codec, "write DMACFG Reg fail\n");
return status;
}
codec_dbg(codec, " dsp_dma_setup() Write DMACFG\n");
adr_ofs = (count - 1) << (DSPDMAC_DSPADROFS_BOFS_LOBIT +
(code ? 0 : 1));
status = chipio_write(codec, DSPDMAC_DSPADROFS_INST_OFFSET(dma_chan),
adr_ofs);
if (status < 0) {
codec_dbg(codec, "write DSPADROFS Reg fail\n");
return status;
}
codec_dbg(codec, " dsp_dma_setup() Write DSPADROFS\n");
base_cnt = (count - 1) << DSPDMAC_XFRCNT_BCNT_LOBIT;
cur_cnt = (count - 1) << DSPDMAC_XFRCNT_CCNT_LOBIT;
xfr_cnt = base_cnt | cur_cnt;
status = chipio_write(codec,
DSPDMAC_XFRCNT_INST_OFFSET(dma_chan), xfr_cnt);
if (status < 0) {
codec_dbg(codec, "write XFRCNT Reg fail\n");
return status;
}
codec_dbg(codec, " dsp_dma_setup() Write XFRCNT\n");
codec_dbg(codec,
"ChipA=0x%x, cnt=0x%x, DMACFG=0x%x, "
"ADROFS=0x%x, XFRCNT=0x%x\n",
chip_addx, count, dma_cfg, adr_ofs, xfr_cnt);
codec_dbg(codec, "-- dsp_dma_setup() -- Complete ---------\n");
return 0;
}
/*
* Start the DSP DMA
*/
static int dsp_dma_start(struct hda_codec *codec,
unsigned int dma_chan, bool ovly)
{
unsigned int reg = 0;
int status = 0;
codec_dbg(codec, "-- dsp_dma_start() -- Begin ---------\n");
if (ovly) {
status = chipio_read(codec,
DSPDMAC_CHNLSTART_INST_OFFSET, ®);
if (status < 0) {
codec_dbg(codec, "read CHNLSTART reg fail\n");
return status;
}
codec_dbg(codec, "-- dsp_dma_start() Read CHNLSTART\n");
reg &= ~(DSPDMAC_CHNLSTART_EN_MASK |
DSPDMAC_CHNLSTART_DIS_MASK);
}
status = chipio_write(codec, DSPDMAC_CHNLSTART_INST_OFFSET,
reg | (1 << (dma_chan + DSPDMAC_CHNLSTART_EN_LOBIT)));
if (status < 0) {
codec_dbg(codec, "write CHNLSTART reg fail\n");
return status;
}
codec_dbg(codec, "-- dsp_dma_start() -- Complete ---------\n");
return status;
}
/*
* Stop the DSP DMA
*/
static int dsp_dma_stop(struct hda_codec *codec,
unsigned int dma_chan, bool ovly)
{
unsigned int reg = 0;
int status = 0;
codec_dbg(codec, "-- dsp_dma_stop() -- Begin ---------\n");
if (ovly) {
status = chipio_read(codec,
DSPDMAC_CHNLSTART_INST_OFFSET, ®);
if (status < 0) {
codec_dbg(codec, "read CHNLSTART reg fail\n");
return status;
}
codec_dbg(codec, "-- dsp_dma_stop() Read CHNLSTART\n");
reg &= ~(DSPDMAC_CHNLSTART_EN_MASK |
DSPDMAC_CHNLSTART_DIS_MASK);
}
status = chipio_write(codec, DSPDMAC_CHNLSTART_INST_OFFSET,
reg | (1 << (dma_chan + DSPDMAC_CHNLSTART_DIS_LOBIT)));
if (status < 0) {
codec_dbg(codec, "write CHNLSTART reg fail\n");
return status;
}
codec_dbg(codec, "-- dsp_dma_stop() -- Complete ---------\n");
return status;
}
/**
* Allocate router ports
*
* @codec: the HDA codec
* @num_chans: number of channels in the stream
* @ports_per_channel: number of ports per channel
* @start_device: start device
* @port_map: pointer to the port list to hold the allocated ports
*
* Returns zero or a negative error code.
*/
static int dsp_allocate_router_ports(struct hda_codec *codec,
unsigned int num_chans,
unsigned int ports_per_channel,
unsigned int start_device,
unsigned int *port_map)
{
int status = 0;
int res;
u8 val;
status = chipio_send(codec, VENDOR_CHIPIO_STATUS, 0);
if (status < 0)
return status;
val = start_device << 6;
val |= (ports_per_channel - 1) << 4;
val |= num_chans - 1;
snd_hda_codec_write(codec, WIDGET_CHIP_CTRL, 0,
VENDOR_CHIPIO_PORT_ALLOC_CONFIG_SET,
val);
snd_hda_codec_write(codec, WIDGET_CHIP_CTRL, 0,
VENDOR_CHIPIO_PORT_ALLOC_SET,
MEM_CONNID_DSP);
status = chipio_send(codec, VENDOR_CHIPIO_STATUS, 0);
if (status < 0)
return status;
res = snd_hda_codec_read(codec, WIDGET_CHIP_CTRL, 0,
VENDOR_CHIPIO_PORT_ALLOC_GET, 0);
*port_map = res;
return (res < 0) ? res : 0;
}
/*
* Free router ports
*/
static int dsp_free_router_ports(struct hda_codec *codec)
{
int status = 0;
status = chipio_send(codec, VENDOR_CHIPIO_STATUS, 0);
if (status < 0)
return status;
snd_hda_codec_write(codec, WIDGET_CHIP_CTRL, 0,
VENDOR_CHIPIO_PORT_FREE_SET,
MEM_CONNID_DSP);
status = chipio_send(codec, VENDOR_CHIPIO_STATUS, 0);
return status;
}
/*
* Allocate DSP ports for the download stream
*/
static int dsp_allocate_ports(struct hda_codec *codec,
unsigned int num_chans,
unsigned int rate_multi, unsigned int *port_map)
{
int status;
codec_dbg(codec, " dsp_allocate_ports() -- begin\n");
if ((rate_multi != 1) && (rate_multi != 2) && (rate_multi != 4)) {
codec_dbg(codec, "bad rate multiple\n");
return -EINVAL;
}
status = dsp_allocate_router_ports(codec, num_chans,
rate_multi, 0, port_map);
codec_dbg(codec, " dsp_allocate_ports() -- complete\n");
return status;
}
static int dsp_allocate_ports_format(struct hda_codec *codec,
const unsigned short fmt,
unsigned int *port_map)
{
int status;
unsigned int num_chans;
unsigned int sample_rate_div = ((get_hdafmt_rate(fmt) >> 0) & 3) + 1;
unsigned int sample_rate_mul = ((get_hdafmt_rate(fmt) >> 3) & 3) + 1;
unsigned int rate_multi = sample_rate_mul / sample_rate_div;
if ((rate_multi != 1) && (rate_multi != 2) && (rate_multi != 4)) {
codec_dbg(codec, "bad rate multiple\n");
return -EINVAL;
}
num_chans = get_hdafmt_chs(fmt) + 1;
status = dsp_allocate_ports(codec, num_chans, rate_multi, port_map);
return status;
}
/*
* free DSP ports
*/
static int dsp_free_ports(struct hda_codec *codec)
{
int status;
codec_dbg(codec, " dsp_free_ports() -- begin\n");
status = dsp_free_router_ports(codec);
if (status < 0) {
codec_dbg(codec, "free router ports fail\n");
return status;
}
codec_dbg(codec, " dsp_free_ports() -- complete\n");
return status;
}
/*
* HDA DMA engine stuffs for DSP code download
*/
struct dma_engine {
struct hda_codec *codec;
unsigned short m_converter_format;
struct snd_dma_buffer *dmab;
unsigned int buf_size;
};
enum dma_state {
DMA_STATE_STOP = 0,
DMA_STATE_RUN = 1
};
static int dma_convert_to_hda_format(struct hda_codec *codec,
unsigned int sample_rate,
unsigned short channels,
unsigned short *hda_format)
{
unsigned int format_val;
format_val = snd_hdac_calc_stream_format(sample_rate,
channels, SNDRV_PCM_FORMAT_S32_LE, 32, 0);
if (hda_format)
*hda_format = (unsigned short)format_val;
return 0;
}
/*
* Reset DMA for DSP download
*/
static int dma_reset(struct dma_engine *dma)
{
struct hda_codec *codec = dma->codec;
struct ca0132_spec *spec = codec->spec;
int status;
if (dma->dmab->area)
snd_hda_codec_load_dsp_cleanup(codec, dma->dmab);
status = snd_hda_codec_load_dsp_prepare(codec,
dma->m_converter_format,
dma->buf_size,
dma->dmab);
if (status < 0)
return status;
spec->dsp_stream_id = status;
return 0;
}
static int dma_set_state(struct dma_engine *dma, enum dma_state state)
{
bool cmd;
switch (state) {
case DMA_STATE_STOP:
cmd = false;
break;
case DMA_STATE_RUN:
cmd = true;
break;
default:
return 0;
}
snd_hda_codec_load_dsp_trigger(dma->codec, cmd);
return 0;
}
static unsigned int dma_get_buffer_size(struct dma_engine *dma)
{
return dma->dmab->bytes;
}
static unsigned char *dma_get_buffer_addr(struct dma_engine *dma)
{
return dma->dmab->area;
}
static int dma_xfer(struct dma_engine *dma,
const unsigned int *data,
unsigned int count)
{
memcpy(dma->dmab->area, data, count);
return 0;
}
static void dma_get_converter_format(
struct dma_engine *dma,
unsigned short *format)
{
if (format)
*format = dma->m_converter_format;
}
static unsigned int dma_get_stream_id(struct dma_engine *dma)
{
struct ca0132_spec *spec = dma->codec->spec;
return spec->dsp_stream_id;
}
struct dsp_image_seg {
u32 magic;
u32 chip_addr;
u32 count;
u32 data[0];
};
static const u32 g_magic_value = 0x4c46584d;
static const u32 g_chip_addr_magic_value = 0xFFFFFF01;
static bool is_valid(const struct dsp_image_seg *p)
{
return p->magic == g_magic_value;
}
static bool is_hci_prog_list_seg(const struct dsp_image_seg *p)
{
return g_chip_addr_magic_value == p->chip_addr;
}
static bool is_last(const struct dsp_image_seg *p)
{
return p->count == 0;
}
static size_t dsp_sizeof(const struct dsp_image_seg *p)
{
return sizeof(*p) + p->count*sizeof(u32);
}
static const struct dsp_image_seg *get_next_seg_ptr(
const struct dsp_image_seg *p)
{
return (struct dsp_image_seg *)((unsigned char *)(p) + dsp_sizeof(p));
}
/*
* CA0132 chip DSP transfer stuffs. For DSP download.
*/
#define INVALID_DMA_CHANNEL (~0U)
/*
* Program a list of address/data pairs via the ChipIO widget.
* The segment data is in the format of successive pairs of words.
* These are repeated as indicated by the segment's count field.
*/
static int dspxfr_hci_write(struct hda_codec *codec,
const struct dsp_image_seg *fls)
{
int status;
const u32 *data;
unsigned int count;
if (fls == NULL || fls->chip_addr != g_chip_addr_magic_value) {
codec_dbg(codec, "hci_write invalid params\n");
return -EINVAL;
}
count = fls->count;
data = (u32 *)(fls->data);
while (count >= 2) {
status = chipio_write(codec, data[0], data[1]);
if (status < 0) {
codec_dbg(codec, "hci_write chipio failed\n");
return status;
}
count -= 2;
data += 2;
}
return 0;
}
/**
* Write a block of data into DSP code or data RAM using pre-allocated
* DMA engine.
*
* @codec: the HDA codec
* @fls: pointer to a fast load image
* @reloc: Relocation address for loading single-segment overlays, or 0 for
* no relocation
* @dma_engine: pointer to DMA engine to be used for DSP download
* @dma_chan: The number of DMA channels used for DSP download
* @port_map_mask: port mapping
* @ovly: TRUE if overlay format is required
*
* Returns zero or a negative error code.
*/
static int dspxfr_one_seg(struct hda_codec *codec,
const struct dsp_image_seg *fls,
unsigned int reloc,
struct dma_engine *dma_engine,
unsigned int dma_chan,
unsigned int port_map_mask,
bool ovly)
{
int status = 0;
bool comm_dma_setup_done = false;
const unsigned int *data;
unsigned int chip_addx;
unsigned int words_to_write;
unsigned int buffer_size_words;
unsigned char *buffer_addx;
unsigned short hda_format;
unsigned int sample_rate_div;
unsigned int sample_rate_mul;
unsigned int num_chans;
unsigned int hda_frame_size_words;
unsigned int remainder_words;
const u32 *data_remainder;
u32 chip_addx_remainder;
unsigned int run_size_words;
const struct dsp_image_seg *hci_write = NULL;
unsigned long timeout;
bool dma_active;
if (fls == NULL)
return -EINVAL;
if (is_hci_prog_list_seg(fls)) {
hci_write = fls;
fls = get_next_seg_ptr(fls);
}
if (hci_write && (!fls || is_last(fls))) {
codec_dbg(codec, "hci_write\n");
return dspxfr_hci_write(codec, hci_write);
}
if (fls == NULL || dma_engine == NULL || port_map_mask == 0) {
codec_dbg(codec, "Invalid Params\n");
return -EINVAL;
}
data = fls->data;
chip_addx = fls->chip_addr,
words_to_write = fls->count;
if (!words_to_write)
return hci_write ? dspxfr_hci_write(codec, hci_write) : 0;
if (reloc)
chip_addx = (chip_addx & (0xFFFF0000 << 2)) + (reloc << 2);
if (!UC_RANGE(chip_addx, words_to_write) &&
!X_RANGE_ALL(chip_addx, words_to_write) &&
!Y_RANGE_ALL(chip_addx, words_to_write)) {
codec_dbg(codec, "Invalid chip_addx Params\n");
return -EINVAL;
}
buffer_size_words = (unsigned int)dma_get_buffer_size(dma_engine) /
sizeof(u32);
buffer_addx = dma_get_buffer_addr(dma_engine);
if (buffer_addx == NULL) {
codec_dbg(codec, "dma_engine buffer NULL\n");
return -EINVAL;
}
dma_get_converter_format(dma_engine, &hda_format);
sample_rate_div = ((get_hdafmt_rate(hda_format) >> 0) & 3) + 1;
sample_rate_mul = ((get_hdafmt_rate(hda_format) >> 3) & 3) + 1;
num_chans = get_hdafmt_chs(hda_format) + 1;
hda_frame_size_words = ((sample_rate_div == 0) ? 0 :
(num_chans * sample_rate_mul / sample_rate_div));
if (hda_frame_size_words == 0) {
codec_dbg(codec, "frmsz zero\n");
return -EINVAL;
}
buffer_size_words = min(buffer_size_words,
(unsigned int)(UC_RANGE(chip_addx, 1) ?
65536 : 32768));
buffer_size_words -= buffer_size_words % hda_frame_size_words;
codec_dbg(codec,
"chpadr=0x%08x frmsz=%u nchan=%u "
"rate_mul=%u div=%u bufsz=%u\n",
chip_addx, hda_frame_size_words, num_chans,
sample_rate_mul, sample_rate_div, buffer_size_words);
if (buffer_size_words < hda_frame_size_words) {
codec_dbg(codec, "dspxfr_one_seg:failed\n");
return -EINVAL;
}
remainder_words = words_to_write % hda_frame_size_words;
data_remainder = data;
chip_addx_remainder = chip_addx;
data += remainder_words;
chip_addx += remainder_words*sizeof(u32);
words_to_write -= remainder_words;
while (words_to_write != 0) {
run_size_words = min(buffer_size_words, words_to_write);
codec_dbg(codec, "dspxfr (seg loop)cnt=%u rs=%u remainder=%u\n",
words_to_write, run_size_words, remainder_words);
dma_xfer(dma_engine, data, run_size_words*sizeof(u32));
if (!comm_dma_setup_done) {
status = dsp_dma_stop(codec, dma_chan, ovly);
if (status < 0)
return status;
status = dsp_dma_setup_common(codec, chip_addx,
dma_chan, port_map_mask, ovly);
if (status < 0)
return status;
comm_dma_setup_done = true;
}
status = dsp_dma_setup(codec, chip_addx,
run_size_words, dma_chan);
if (status < 0)
return status;
status = dsp_dma_start(codec, dma_chan, ovly);
if (status < 0)
return status;
if (!dsp_is_dma_active(codec, dma_chan)) {
codec_dbg(codec, "dspxfr:DMA did not start\n");
return -EIO;
}
status = dma_set_state(dma_engine, DMA_STATE_RUN);
if (status < 0)
return status;
if (remainder_words != 0) {
status = chipio_write_multiple(codec,
chip_addx_remainder,
data_remainder,
remainder_words);
if (status < 0)
return status;
remainder_words = 0;
}
if (hci_write) {
status = dspxfr_hci_write(codec, hci_write);
if (status < 0)
return status;
hci_write = NULL;
}
timeout = jiffies + msecs_to_jiffies(2000);
do {
dma_active = dsp_is_dma_active(codec, dma_chan);
if (!dma_active)
break;
msleep(20);
} while (time_before(jiffies, timeout));
if (dma_active)
break;
codec_dbg(codec, "+++++ DMA complete\n");
dma_set_state(dma_engine, DMA_STATE_STOP);
status = dma_reset(dma_engine);
if (status < 0)
return status;
data += run_size_words;
chip_addx += run_size_words*sizeof(u32);
words_to_write -= run_size_words;
}
if (remainder_words != 0) {
status = chipio_write_multiple(codec, chip_addx_remainder,
data_remainder, remainder_words);
}
return status;
}
/**
* Write the entire DSP image of a DSP code/data overlay to DSP memories
*
* @codec: the HDA codec
* @fls_data: pointer to a fast load image
* @reloc: Relocation address for loading single-segment overlays, or 0 for
* no relocation
* @sample_rate: sampling rate of the stream used for DSP download
* @channels: channels of the stream used for DSP download
* @ovly: TRUE if overlay format is required
*
* Returns zero or a negative error code.
*/
static int dspxfr_image(struct hda_codec *codec,
const struct dsp_image_seg *fls_data,
unsigned int reloc,
unsigned int sample_rate,
unsigned short channels,
bool ovly)
{
struct ca0132_spec *spec = codec->spec;
int status;
unsigned short hda_format = 0;
unsigned int response;
unsigned char stream_id = 0;
struct dma_engine *dma_engine;
unsigned int dma_chan;
unsigned int port_map_mask;
if (fls_data == NULL)
return -EINVAL;
dma_engine = kzalloc(sizeof(*dma_engine), GFP_KERNEL);
if (!dma_engine)
return -ENOMEM;
dma_engine->dmab = kzalloc(sizeof(*dma_engine->dmab), GFP_KERNEL);
if (!dma_engine->dmab) {
kfree(dma_engine);
return -ENOMEM;
}
dma_engine->codec = codec;
dma_convert_to_hda_format(codec, sample_rate, channels, &hda_format);
dma_engine->m_converter_format = hda_format;
dma_engine->buf_size = (ovly ? DSP_DMA_WRITE_BUFLEN_OVLY :
DSP_DMA_WRITE_BUFLEN_INIT) * 2;
dma_chan = ovly ? INVALID_DMA_CHANNEL : 0;
status = codec_set_converter_format(codec, WIDGET_CHIP_CTRL,
hda_format, &response);
if (status < 0) {
codec_dbg(codec, "set converter format fail\n");
goto exit;
}
status = snd_hda_codec_load_dsp_prepare(codec,
dma_engine->m_converter_format,
dma_engine->buf_size,
dma_engine->dmab);
if (status < 0)
goto exit;
spec->dsp_stream_id = status;
if (ovly) {
status = dspio_alloc_dma_chan(codec, &dma_chan);
if (status < 0) {
codec_dbg(codec, "alloc dmachan fail\n");
dma_chan = INVALID_DMA_CHANNEL;
goto exit;
}
}
port_map_mask = 0;
status = dsp_allocate_ports_format(codec, hda_format,
&port_map_mask);
if (status < 0) {
codec_dbg(codec, "alloc ports fail\n");
goto exit;
}
stream_id = dma_get_stream_id(dma_engine);
status = codec_set_converter_stream_channel(codec,
WIDGET_CHIP_CTRL, stream_id, 0, &response);
if (status < 0) {
codec_dbg(codec, "set stream chan fail\n");
goto exit;
}
while ((fls_data != NULL) && !is_last(fls_data)) {
if (!is_valid(fls_data)) {
codec_dbg(codec, "FLS check fail\n");
status = -EINVAL;
goto exit;
}
status = dspxfr_one_seg(codec, fls_data, reloc,
dma_engine, dma_chan,
port_map_mask, ovly);
if (status < 0)
break;
if (is_hci_prog_list_seg(fls_data))
fls_data = get_next_seg_ptr(fls_data);
if ((fls_data != NULL) && !is_last(fls_data))
fls_data = get_next_seg_ptr(fls_data);
}
if (port_map_mask != 0)
status = dsp_free_ports(codec);
if (status < 0)
goto exit;
status = codec_set_converter_stream_channel(codec,
WIDGET_CHIP_CTRL, 0, 0, &response);
exit:
if (ovly && (dma_chan != INVALID_DMA_CHANNEL))
dspio_free_dma_chan(codec, dma_chan);
if (dma_engine->dmab->area)
snd_hda_codec_load_dsp_cleanup(codec, dma_engine->dmab);
kfree(dma_engine->dmab);
kfree(dma_engine);
return status;
}
/*
* CA0132 DSP download stuffs.
*/
static void dspload_post_setup(struct hda_codec *codec)
{
codec_dbg(codec, "---- dspload_post_setup ------\n");
/*set DSP speaker to 2.0 configuration*/
chipio_write(codec, XRAM_XRAM_INST_OFFSET(0x18), 0x08080080);
chipio_write(codec, XRAM_XRAM_INST_OFFSET(0x19), 0x3f800000);
/*update write pointer*/
chipio_write(codec, XRAM_XRAM_INST_OFFSET(0x29), 0x00000002);
}
/**
* dspload_image - Download DSP from a DSP Image Fast Load structure.
*
* @codec: the HDA codec
* @fls: pointer to a fast load image
* @ovly: TRUE if overlay format is required
* @reloc: Relocation address for loading single-segment overlays, or 0 for
* no relocation
* @autostart: TRUE if DSP starts after loading; ignored if ovly is TRUE
* @router_chans: number of audio router channels to be allocated (0 means use
* internal defaults; max is 32)
*
* Download DSP from a DSP Image Fast Load structure. This structure is a
* linear, non-constant sized element array of structures, each of which
* contain the count of the data to be loaded, the data itself, and the
* corresponding starting chip address of the starting data location.
* Returns zero or a negative error code.
*/
static int dspload_image(struct hda_codec *codec,
const struct dsp_image_seg *fls,
bool ovly,
unsigned int reloc,
bool autostart,
int router_chans)
{
int status = 0;
unsigned int sample_rate;
unsigned short channels;
codec_dbg(codec, "---- dspload_image begin ------\n");
if (router_chans == 0) {
if (!ovly)
router_chans = DMA_TRANSFER_FRAME_SIZE_NWORDS;
else
router_chans = DMA_OVERLAY_FRAME_SIZE_NWORDS;
}
sample_rate = 48000;
channels = (unsigned short)router_chans;
while (channels > 16) {
sample_rate *= 2;
channels /= 2;
}
do {
codec_dbg(codec, "Ready to program DMA\n");
if (!ovly)
status = dsp_reset(codec);
if (status < 0)
break;
codec_dbg(codec, "dsp_reset() complete\n");
status = dspxfr_image(codec, fls, reloc, sample_rate, channels,
ovly);
if (status < 0)
break;
codec_dbg(codec, "dspxfr_image() complete\n");
if (autostart && !ovly) {
dspload_post_setup(codec);
status = dsp_set_run_state(codec);
}
codec_dbg(codec, "LOAD FINISHED\n");
} while (0);
return status;
}
#ifdef CONFIG_SND_HDA_CODEC_CA0132_DSP
static bool dspload_is_loaded(struct hda_codec *codec)
{
unsigned int data = 0;
int status = 0;
status = chipio_read(codec, 0x40004, &data);
if ((status < 0) || (data != 1))
return false;
return true;
}
#else
#define dspload_is_loaded(codec) false
#endif
static bool dspload_wait_loaded(struct hda_codec *codec)
{
unsigned long timeout = jiffies + msecs_to_jiffies(2000);
do {
if (dspload_is_loaded(codec)) {
codec_info(codec, "ca0132 DSP downloaded and running\n");
return true;
}
msleep(20);
} while (time_before(jiffies, timeout));
codec_err(codec, "ca0132 failed to download DSP\n");
return false;
}
/*
* PCM callbacks
*/
static int ca0132_playback_pcm_prepare(struct hda_pcm_stream *hinfo,
struct hda_codec *codec,
unsigned int stream_tag,
unsigned int format,
struct snd_pcm_substream *substream)
{
struct ca0132_spec *spec = codec->spec;
snd_hda_codec_setup_stream(codec, spec->dacs[0], stream_tag, 0, format);
return 0;
}
static int ca0132_playback_pcm_cleanup(struct hda_pcm_stream *hinfo,
struct hda_codec *codec,
struct snd_pcm_substream *substream)
{
struct ca0132_spec *spec = codec->spec;
if (spec->dsp_state == DSP_DOWNLOADING)
return 0;
/*If Playback effects are on, allow stream some time to flush
*effects tail*/
if (spec->effects_switch[PLAY_ENHANCEMENT - EFFECT_START_NID])
msleep(50);
snd_hda_codec_cleanup_stream(codec, spec->dacs[0]);
return 0;
}
static unsigned int ca0132_playback_pcm_delay(struct hda_pcm_stream *info,
struct hda_codec *codec,
struct snd_pcm_substream *substream)
{
struct ca0132_spec *spec = codec->spec;
unsigned int latency = DSP_PLAYBACK_INIT_LATENCY;
struct snd_pcm_runtime *runtime = substream->runtime;
if (spec->dsp_state != DSP_DOWNLOADED)
return 0;
/* Add latency if playback enhancement and either effect is enabled. */
if (spec->effects_switch[PLAY_ENHANCEMENT - EFFECT_START_NID]) {
if ((spec->effects_switch[SURROUND - EFFECT_START_NID]) ||
(spec->effects_switch[DIALOG_PLUS - EFFECT_START_NID]))
latency += DSP_PLAY_ENHANCEMENT_LATENCY;
}
/* Applying Speaker EQ adds latency as well. */
if (spec->cur_out_type == SPEAKER_OUT)
latency += DSP_SPEAKER_OUT_LATENCY;
return (latency * runtime->rate) / 1000;
}
/*
* Digital out
*/
static int ca0132_dig_playback_pcm_open(struct hda_pcm_stream *hinfo,
struct hda_codec *codec,
struct snd_pcm_substream *substream)
{
struct ca0132_spec *spec = codec->spec;
return snd_hda_multi_out_dig_open(codec, &spec->multiout);
}
static int ca0132_dig_playback_pcm_prepare(struct hda_pcm_stream *hinfo,
struct hda_codec *codec,
unsigned int stream_tag,
unsigned int format,
struct snd_pcm_substream *substream)
{
struct ca0132_spec *spec = codec->spec;
return snd_hda_multi_out_dig_prepare(codec, &spec->multiout,
stream_tag, format, substream);
}
static int ca0132_dig_playback_pcm_cleanup(struct hda_pcm_stream *hinfo,
struct hda_codec *codec,
struct snd_pcm_substream *substream)
{
struct ca0132_spec *spec = codec->spec;
return snd_hda_multi_out_dig_cleanup(codec, &spec->multiout);
}
static int ca0132_dig_playback_pcm_close(struct hda_pcm_stream *hinfo,
struct hda_codec *codec,
struct snd_pcm_substream *substream)
{
struct ca0132_spec *spec = codec->spec;
return snd_hda_multi_out_dig_close(codec, &spec->multiout);
}
/*
* Analog capture
*/
static int ca0132_capture_pcm_prepare(struct hda_pcm_stream *hinfo,
struct hda_codec *codec,
unsigned int stream_tag,
unsigned int format,
struct snd_pcm_substream *substream)
{
snd_hda_codec_setup_stream(codec, hinfo->nid,
stream_tag, 0, format);
return 0;
}
static int ca0132_capture_pcm_cleanup(struct hda_pcm_stream *hinfo,
struct hda_codec *codec,
struct snd_pcm_substream *substream)
{
struct ca0132_spec *spec = codec->spec;
if (spec->dsp_state == DSP_DOWNLOADING)
return 0;
snd_hda_codec_cleanup_stream(codec, hinfo->nid);
return 0;
}
static unsigned int ca0132_capture_pcm_delay(struct hda_pcm_stream *info,
struct hda_codec *codec,
struct snd_pcm_substream *substream)
{
struct ca0132_spec *spec = codec->spec;
unsigned int latency = DSP_CAPTURE_INIT_LATENCY;
struct snd_pcm_runtime *runtime = substream->runtime;
if (spec->dsp_state != DSP_DOWNLOADED)
return 0;
if (spec->effects_switch[CRYSTAL_VOICE - EFFECT_START_NID])
latency += DSP_CRYSTAL_VOICE_LATENCY;
return (latency * runtime->rate) / 1000;
}
/*
* Controls stuffs.
*/
/*
* Mixer controls helpers.
*/
#define CA0132_CODEC_VOL_MONO(xname, nid, channel, dir) \
{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \
.name = xname, \
.subdevice = HDA_SUBDEV_AMP_FLAG, \
.access = SNDRV_CTL_ELEM_ACCESS_READWRITE | \
SNDRV_CTL_ELEM_ACCESS_TLV_READ | \
SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK, \
.info = ca0132_volume_info, \
.get = ca0132_volume_get, \
.put = ca0132_volume_put, \
.tlv = { .c = ca0132_volume_tlv }, \
.private_value = HDA_COMPOSE_AMP_VAL(nid, channel, 0, dir) }
#define CA0132_CODEC_MUTE_MONO(xname, nid, channel, dir) \
{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \
.name = xname, \
.subdevice = HDA_SUBDEV_AMP_FLAG, \
.info = snd_hda_mixer_amp_switch_info, \
.get = ca0132_switch_get, \
.put = ca0132_switch_put, \
.private_value = HDA_COMPOSE_AMP_VAL(nid, channel, 0, dir) }
/* stereo */
#define CA0132_CODEC_VOL(xname, nid, dir) \
CA0132_CODEC_VOL_MONO(xname, nid, 3, dir)
#define CA0132_CODEC_MUTE(xname, nid, dir) \
CA0132_CODEC_MUTE_MONO(xname, nid, 3, dir)
/* The followings are for tuning of products */
#ifdef ENABLE_TUNING_CONTROLS
static unsigned int voice_focus_vals_lookup[] = {
0x41A00000, 0x41A80000, 0x41B00000, 0x41B80000, 0x41C00000, 0x41C80000,
0x41D00000, 0x41D80000, 0x41E00000, 0x41E80000, 0x41F00000, 0x41F80000,
0x42000000, 0x42040000, 0x42080000, 0x420C0000, 0x42100000, 0x42140000,
0x42180000, 0x421C0000, 0x42200000, 0x42240000, 0x42280000, 0x422C0000,
0x42300000, 0x42340000, 0x42380000, 0x423C0000, 0x42400000, 0x42440000,
0x42480000, 0x424C0000, 0x42500000, 0x42540000, 0x42580000, 0x425C0000,
0x42600000, 0x42640000, 0x42680000, 0x426C0000, 0x42700000, 0x42740000,
0x42780000, 0x427C0000, 0x42800000, 0x42820000, 0x42840000, 0x42860000,
0x42880000, 0x428A0000, 0x428C0000, 0x428E0000, 0x42900000, 0x42920000,
0x42940000, 0x42960000, 0x42980000, 0x429A0000, 0x429C0000, 0x429E0000,
0x42A00000, 0x42A20000, 0x42A40000, 0x42A60000, 0x42A80000, 0x42AA0000,
0x42AC0000, 0x42AE0000, 0x42B00000, 0x42B20000, 0x42B40000, 0x42B60000,
0x42B80000, 0x42BA0000, 0x42BC0000, 0x42BE0000, 0x42C00000, 0x42C20000,
0x42C40000, 0x42C60000, 0x42C80000, 0x42CA0000, 0x42CC0000, 0x42CE0000,
0x42D00000, 0x42D20000, 0x42D40000, 0x42D60000, 0x42D80000, 0x42DA0000,
0x42DC0000, 0x42DE0000, 0x42E00000, 0x42E20000, 0x42E40000, 0x42E60000,
0x42E80000, 0x42EA0000, 0x42EC0000, 0x42EE0000, 0x42F00000, 0x42F20000,
0x42F40000, 0x42F60000, 0x42F80000, 0x42FA0000, 0x42FC0000, 0x42FE0000,
0x43000000, 0x43010000, 0x43020000, 0x43030000, 0x43040000, 0x43050000,
0x43060000, 0x43070000, 0x43080000, 0x43090000, 0x430A0000, 0x430B0000,
0x430C0000, 0x430D0000, 0x430E0000, 0x430F0000, 0x43100000, 0x43110000,
0x43120000, 0x43130000, 0x43140000, 0x43150000, 0x43160000, 0x43170000,
0x43180000, 0x43190000, 0x431A0000, 0x431B0000, 0x431C0000, 0x431D0000,
0x431E0000, 0x431F0000, 0x43200000, 0x43210000, 0x43220000, 0x43230000,
0x43240000, 0x43250000, 0x43260000, 0x43270000, 0x43280000, 0x43290000,
0x432A0000, 0x432B0000, 0x432C0000, 0x432D0000, 0x432E0000, 0x432F0000,
0x43300000, 0x43310000, 0x43320000, 0x43330000, 0x43340000
};
static unsigned int mic_svm_vals_lookup[] = {
0x00000000, 0x3C23D70A, 0x3CA3D70A, 0x3CF5C28F, 0x3D23D70A, 0x3D4CCCCD,
0x3D75C28F, 0x3D8F5C29, 0x3DA3D70A, 0x3DB851EC, 0x3DCCCCCD, 0x3DE147AE,
0x3DF5C28F, 0x3E051EB8, 0x3E0F5C29, 0x3E19999A, 0x3E23D70A, 0x3E2E147B,
0x3E3851EC, 0x3E428F5C, 0x3E4CCCCD, 0x3E570A3D, 0x3E6147AE, 0x3E6B851F,
0x3E75C28F, 0x3E800000, 0x3E851EB8, 0x3E8A3D71, 0x3E8F5C29, 0x3E947AE1,
0x3E99999A, 0x3E9EB852, 0x3EA3D70A, 0x3EA8F5C3, 0x3EAE147B, 0x3EB33333,
0x3EB851EC, 0x3EBD70A4, 0x3EC28F5C, 0x3EC7AE14, 0x3ECCCCCD, 0x3ED1EB85,
0x3ED70A3D, 0x3EDC28F6, 0x3EE147AE, 0x3EE66666, 0x3EEB851F, 0x3EF0A3D7,
0x3EF5C28F, 0x3EFAE148, 0x3F000000, 0x3F028F5C, 0x3F051EB8, 0x3F07AE14,
0x3F0A3D71, 0x3F0CCCCD, 0x3F0F5C29, 0x3F11EB85, 0x3F147AE1, 0x3F170A3D,
0x3F19999A, 0x3F1C28F6, 0x3F1EB852, 0x3F2147AE, 0x3F23D70A, 0x3F266666,
0x3F28F5C3, 0x3F2B851F, 0x3F2E147B, 0x3F30A3D7, 0x3F333333, 0x3F35C28F,
0x3F3851EC, 0x3F3AE148, 0x3F3D70A4, 0x3F400000, 0x3F428F5C, 0x3F451EB8,
0x3F47AE14, 0x3F4A3D71, 0x3F4CCCCD, 0x3F4F5C29, 0x3F51EB85, 0x3F547AE1,
0x3F570A3D, 0x3F59999A, 0x3F5C28F6, 0x3F5EB852, 0x3F6147AE, 0x3F63D70A,
0x3F666666, 0x3F68F5C3, 0x3F6B851F, 0x3F6E147B, 0x3F70A3D7, 0x3F733333,
0x3F75C28F, 0x3F7851EC, 0x3F7AE148, 0x3F7D70A4, 0x3F800000
};
static unsigned int equalizer_vals_lookup[] = {
0xC1C00000, 0xC1B80000, 0xC1B00000, 0xC1A80000, 0xC1A00000, 0xC1980000,
0xC1900000, 0xC1880000, 0xC1800000, 0xC1700000, 0xC1600000, 0xC1500000,
0xC1400000, 0xC1300000, 0xC1200000, 0xC1100000, 0xC1000000, 0xC0E00000,
0xC0C00000, 0xC0A00000, 0xC0800000, 0xC0400000, 0xC0000000, 0xBF800000,
0x00000000, 0x3F800000, 0x40000000, 0x40400000, 0x40800000, 0x40A00000,
0x40C00000, 0x40E00000, 0x41000000, 0x41100000, 0x41200000, 0x41300000,
0x41400000, 0x41500000, 0x41600000, 0x41700000, 0x41800000, 0x41880000,
0x41900000, 0x41980000, 0x41A00000, 0x41A80000, 0x41B00000, 0x41B80000,
0x41C00000
};
static int tuning_ctl_set(struct hda_codec *codec, hda_nid_t nid,
unsigned int *lookup, int idx)
{
int i = 0;
for (i = 0; i < TUNING_CTLS_COUNT; i++)
if (nid == ca0132_tuning_ctls[i].nid)
break;
snd_hda_power_up(codec);
dspio_set_param(codec, ca0132_tuning_ctls[i].mid,
ca0132_tuning_ctls[i].req,
&(lookup[idx]), sizeof(unsigned int));
snd_hda_power_down(codec);
return 1;
}
static int tuning_ctl_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
struct ca0132_spec *spec = codec->spec;
hda_nid_t nid = get_amp_nid(kcontrol);
long *valp = ucontrol->value.integer.value;
int idx = nid - TUNING_CTL_START_NID;
*valp = spec->cur_ctl_vals[idx];
return 0;
}
static int voice_focus_ctl_info(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
{
int chs = get_amp_channels(kcontrol);
uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
uinfo->count = chs == 3 ? 2 : 1;
uinfo->value.integer.min = 20;
uinfo->value.integer.max = 180;
uinfo->value.integer.step = 1;
return 0;
}
static int voice_focus_ctl_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
struct ca0132_spec *spec = codec->spec;
hda_nid_t nid = get_amp_nid(kcontrol);
long *valp = ucontrol->value.integer.value;
int idx;
idx = nid - TUNING_CTL_START_NID;
/* any change? */
if (spec->cur_ctl_vals[idx] == *valp)
return 0;
spec->cur_ctl_vals[idx] = *valp;
idx = *valp - 20;
tuning_ctl_set(codec, nid, voice_focus_vals_lookup, idx);
return 1;
}
static int mic_svm_ctl_info(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
{
int chs = get_amp_channels(kcontrol);
uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
uinfo->count = chs == 3 ? 2 : 1;
uinfo->value.integer.min = 0;
uinfo->value.integer.max = 100;
uinfo->value.integer.step = 1;
return 0;
}
static int mic_svm_ctl_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
struct ca0132_spec *spec = codec->spec;
hda_nid_t nid = get_amp_nid(kcontrol);
long *valp = ucontrol->value.integer.value;
int idx;
idx = nid - TUNING_CTL_START_NID;
/* any change? */
if (spec->cur_ctl_vals[idx] == *valp)
return 0;
spec->cur_ctl_vals[idx] = *valp;
idx = *valp;
tuning_ctl_set(codec, nid, mic_svm_vals_lookup, idx);
return 0;
}
static int equalizer_ctl_info(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
{
int chs = get_amp_channels(kcontrol);
uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
uinfo->count = chs == 3 ? 2 : 1;
uinfo->value.integer.min = 0;
uinfo->value.integer.max = 48;
uinfo->value.integer.step = 1;
return 0;
}
static int equalizer_ctl_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
struct ca0132_spec *spec = codec->spec;
hda_nid_t nid = get_amp_nid(kcontrol);
long *valp = ucontrol->value.integer.value;
int idx;
idx = nid - TUNING_CTL_START_NID;
/* any change? */
if (spec->cur_ctl_vals[idx] == *valp)
return 0;
spec->cur_ctl_vals[idx] = *valp;
idx = *valp;
tuning_ctl_set(codec, nid, equalizer_vals_lookup, idx);
return 1;
}
static const DECLARE_TLV_DB_SCALE(voice_focus_db_scale, 2000, 100, 0);
static const DECLARE_TLV_DB_SCALE(eq_db_scale, -2400, 100, 0);
static int add_tuning_control(struct hda_codec *codec,
hda_nid_t pnid, hda_nid_t nid,
const char *name, int dir)
{
char namestr[SNDRV_CTL_ELEM_ID_NAME_MAXLEN];
int type = dir ? HDA_INPUT : HDA_OUTPUT;
struct snd_kcontrol_new knew =
HDA_CODEC_VOLUME_MONO(namestr, nid, 1, 0, type);
knew.access = SNDRV_CTL_ELEM_ACCESS_READWRITE |
SNDRV_CTL_ELEM_ACCESS_TLV_READ;
knew.tlv.c = 0;
knew.tlv.p = 0;
switch (pnid) {
case VOICE_FOCUS:
knew.info = voice_focus_ctl_info;
knew.get = tuning_ctl_get;
knew.put = voice_focus_ctl_put;
knew.tlv.p = voice_focus_db_scale;
break;
case MIC_SVM:
knew.info = mic_svm_ctl_info;
knew.get = tuning_ctl_get;
knew.put = mic_svm_ctl_put;
break;
case EQUALIZER:
knew.info = equalizer_ctl_info;
knew.get = tuning_ctl_get;
knew.put = equalizer_ctl_put;
knew.tlv.p = eq_db_scale;
break;
default:
return 0;
}
knew.private_value =
HDA_COMPOSE_AMP_VAL(nid, 1, 0, type);
sprintf(namestr, "%s %s Volume", name, dirstr[dir]);
return snd_hda_ctl_add(codec, nid, snd_ctl_new1(&knew, codec));
}
static int add_tuning_ctls(struct hda_codec *codec)
{
int i;
int err;
for (i = 0; i < TUNING_CTLS_COUNT; i++) {
err = add_tuning_control(codec,
ca0132_tuning_ctls[i].parent_nid,
ca0132_tuning_ctls[i].nid,
ca0132_tuning_ctls[i].name,
ca0132_tuning_ctls[i].direct);
if (err < 0)
return err;
}
return 0;
}
static void ca0132_init_tuning_defaults(struct hda_codec *codec)
{
struct ca0132_spec *spec = codec->spec;
int i;
/* Wedge Angle defaults to 30. 10 below is 30 - 20. 20 is min. */
spec->cur_ctl_vals[WEDGE_ANGLE - TUNING_CTL_START_NID] = 10;
/* SVM level defaults to 0.74. */
spec->cur_ctl_vals[SVM_LEVEL - TUNING_CTL_START_NID] = 74;
/* EQ defaults to 0dB. */
for (i = 2; i < TUNING_CTLS_COUNT; i++)
spec->cur_ctl_vals[i] = 24;
}
#endif /*ENABLE_TUNING_CONTROLS*/
/*
* Select the active output.
* If autodetect is enabled, output will be selected based on jack detection.
* If jack inserted, headphone will be selected, else built-in speakers
* If autodetect is disabled, output will be selected based on selection.
*/
static int ca0132_select_out(struct hda_codec *codec)
{
struct ca0132_spec *spec = codec->spec;
unsigned int pin_ctl;
int jack_present;
int auto_jack;
unsigned int tmp;
int err;
codec_dbg(codec, "ca0132_select_out\n");
snd_hda_power_up_pm(codec);
auto_jack = spec->vnode_lswitch[VNID_HP_ASEL - VNODE_START_NID];
if (auto_jack)
jack_present = snd_hda_jack_detect(codec, spec->unsol_tag_hp);
else
jack_present =
spec->vnode_lswitch[VNID_HP_SEL - VNODE_START_NID];
if (jack_present)
spec->cur_out_type = HEADPHONE_OUT;
else
spec->cur_out_type = SPEAKER_OUT;
if (spec->cur_out_type == SPEAKER_OUT) {
codec_dbg(codec, "ca0132_select_out speaker\n");
/*speaker out config*/
tmp = FLOAT_ONE;
err = dspio_set_uint_param(codec, 0x80, 0x04, tmp);
if (err < 0)
goto exit;
/*enable speaker EQ*/
tmp = FLOAT_ONE;
err = dspio_set_uint_param(codec, 0x8f, 0x00, tmp);
if (err < 0)
goto exit;
/* Setup EAPD */
snd_hda_codec_write(codec, spec->out_pins[1], 0,
VENDOR_CHIPIO_EAPD_SEL_SET, 0x02);
snd_hda_codec_write(codec, spec->out_pins[0], 0,
AC_VERB_SET_EAPD_BTLENABLE, 0x00);
snd_hda_codec_write(codec, spec->out_pins[0], 0,
VENDOR_CHIPIO_EAPD_SEL_SET, 0x00);
snd_hda_codec_write(codec, spec->out_pins[0], 0,
AC_VERB_SET_EAPD_BTLENABLE, 0x02);
/* disable headphone node */
pin_ctl = snd_hda_codec_read(codec, spec->out_pins[1], 0,
AC_VERB_GET_PIN_WIDGET_CONTROL, 0);
snd_hda_set_pin_ctl(codec, spec->out_pins[1],
pin_ctl & ~PIN_HP);
/* enable speaker node */
pin_ctl = snd_hda_codec_read(codec, spec->out_pins[0], 0,
AC_VERB_GET_PIN_WIDGET_CONTROL, 0);
snd_hda_set_pin_ctl(codec, spec->out_pins[0],
pin_ctl | PIN_OUT);
} else {
codec_dbg(codec, "ca0132_select_out hp\n");
/*headphone out config*/
tmp = FLOAT_ZERO;
err = dspio_set_uint_param(codec, 0x80, 0x04, tmp);
if (err < 0)
goto exit;
/*disable speaker EQ*/
tmp = FLOAT_ZERO;
err = dspio_set_uint_param(codec, 0x8f, 0x00, tmp);
if (err < 0)
goto exit;
/* Setup EAPD */
snd_hda_codec_write(codec, spec->out_pins[0], 0,
VENDOR_CHIPIO_EAPD_SEL_SET, 0x00);
snd_hda_codec_write(codec, spec->out_pins[0], 0,
AC_VERB_SET_EAPD_BTLENABLE, 0x00);
snd_hda_codec_write(codec, spec->out_pins[1], 0,
VENDOR_CHIPIO_EAPD_SEL_SET, 0x02);
snd_hda_codec_write(codec, spec->out_pins[0], 0,
AC_VERB_SET_EAPD_BTLENABLE, 0x02);
/* disable speaker*/
pin_ctl = snd_hda_codec_read(codec, spec->out_pins[0], 0,
AC_VERB_GET_PIN_WIDGET_CONTROL, 0);
snd_hda_set_pin_ctl(codec, spec->out_pins[0],
pin_ctl & ~PIN_HP);
/* enable headphone*/
pin_ctl = snd_hda_codec_read(codec, spec->out_pins[1], 0,
AC_VERB_GET_PIN_WIDGET_CONTROL, 0);
snd_hda_set_pin_ctl(codec, spec->out_pins[1],
pin_ctl | PIN_HP);
}
exit:
snd_hda_power_down_pm(codec);
return err < 0 ? err : 0;
}
static void ca0132_unsol_hp_delayed(struct work_struct *work)
{
struct ca0132_spec *spec = container_of(
to_delayed_work(work), struct ca0132_spec, unsol_hp_work);
struct hda_jack_tbl *jack;
ca0132_select_out(spec->codec);
jack = snd_hda_jack_tbl_get(spec->codec, spec->unsol_tag_hp);
if (jack) {
jack->block_report = 0;
snd_hda_jack_report_sync(spec->codec);
}
}
static void ca0132_set_dmic(struct hda_codec *codec, int enable);
static int ca0132_mic_boost_set(struct hda_codec *codec, long val);
static int ca0132_effects_set(struct hda_codec *codec, hda_nid_t nid, long val);
/*
* Select the active VIP source
*/
static int ca0132_set_vipsource(struct hda_codec *codec, int val)
{
struct ca0132_spec *spec = codec->spec;
unsigned int tmp;
if (spec->dsp_state != DSP_DOWNLOADED)
return 0;
/* if CrystalVoice if off, vipsource should be 0 */
if (!spec->effects_switch[CRYSTAL_VOICE - EFFECT_START_NID] ||
(val == 0)) {
chipio_set_control_param(codec, CONTROL_PARAM_VIP_SOURCE, 0);
chipio_set_conn_rate(codec, MEM_CONNID_MICIN1, SR_96_000);
chipio_set_conn_rate(codec, MEM_CONNID_MICOUT1, SR_96_000);
if (spec->cur_mic_type == DIGITAL_MIC)
tmp = FLOAT_TWO;
else
tmp = FLOAT_ONE;
dspio_set_uint_param(codec, 0x80, 0x00, tmp);
tmp = FLOAT_ZERO;
dspio_set_uint_param(codec, 0x80, 0x05, tmp);
} else {
chipio_set_conn_rate(codec, MEM_CONNID_MICIN1, SR_16_000);
chipio_set_conn_rate(codec, MEM_CONNID_MICOUT1, SR_16_000);
if (spec->cur_mic_type == DIGITAL_MIC)
tmp = FLOAT_TWO;
else
tmp = FLOAT_ONE;
dspio_set_uint_param(codec, 0x80, 0x00, tmp);
tmp = FLOAT_ONE;
dspio_set_uint_param(codec, 0x80, 0x05, tmp);
msleep(20);
chipio_set_control_param(codec, CONTROL_PARAM_VIP_SOURCE, val);
}
return 1;
}
/*
* Select the active microphone.
* If autodetect is enabled, mic will be selected based on jack detection.
* If jack inserted, ext.mic will be selected, else built-in mic
* If autodetect is disabled, mic will be selected based on selection.
*/
static int ca0132_select_mic(struct hda_codec *codec)
{
struct ca0132_spec *spec = codec->spec;
int jack_present;
int auto_jack;
codec_dbg(codec, "ca0132_select_mic\n");
snd_hda_power_up_pm(codec);
auto_jack = spec->vnode_lswitch[VNID_AMIC1_ASEL - VNODE_START_NID];
if (auto_jack)
jack_present = snd_hda_jack_detect(codec, spec->unsol_tag_amic1);
else
jack_present =
spec->vnode_lswitch[VNID_AMIC1_SEL - VNODE_START_NID];
if (jack_present)
spec->cur_mic_type = LINE_MIC_IN;
else
spec->cur_mic_type = DIGITAL_MIC;
if (spec->cur_mic_type == DIGITAL_MIC) {
/* enable digital Mic */
chipio_set_conn_rate(codec, MEM_CONNID_DMIC, SR_32_000);
ca0132_set_dmic(codec, 1);
ca0132_mic_boost_set(codec, 0);
/* set voice focus */
ca0132_effects_set(codec, VOICE_FOCUS,
spec->effects_switch
[VOICE_FOCUS - EFFECT_START_NID]);
} else {
/* disable digital Mic */
chipio_set_conn_rate(codec, MEM_CONNID_DMIC, SR_96_000);
ca0132_set_dmic(codec, 0);
ca0132_mic_boost_set(codec, spec->cur_mic_boost);
/* disable voice focus */
ca0132_effects_set(codec, VOICE_FOCUS, 0);
}
snd_hda_power_down_pm(codec);
return 0;
}
/*
* Check if VNODE settings take effect immediately.
*/
static bool ca0132_is_vnode_effective(struct hda_codec *codec,
hda_nid_t vnid,
hda_nid_t *shared_nid)
{
struct ca0132_spec *spec = codec->spec;
hda_nid_t nid;
switch (vnid) {
case VNID_SPK:
nid = spec->shared_out_nid;
break;
case VNID_MIC:
nid = spec->shared_mic_nid;
break;
default:
return false;
}
if (shared_nid)
*shared_nid = nid;
return true;
}
/*
* The following functions are control change helpers.
* They return 0 if no changed. Return 1 if changed.
*/
static int ca0132_voicefx_set(struct hda_codec *codec, int enable)
{
struct ca0132_spec *spec = codec->spec;
unsigned int tmp;
/* based on CrystalVoice state to enable VoiceFX. */
if (enable) {
tmp = spec->effects_switch[CRYSTAL_VOICE - EFFECT_START_NID] ?
FLOAT_ONE : FLOAT_ZERO;
} else {
tmp = FLOAT_ZERO;
}
dspio_set_uint_param(codec, ca0132_voicefx.mid,
ca0132_voicefx.reqs[0], tmp);
return 1;
}
/*
* Set the effects parameters
*/
static int ca0132_effects_set(struct hda_codec *codec, hda_nid_t nid, long val)
{
struct ca0132_spec *spec = codec->spec;
unsigned int on;
int num_fx = OUT_EFFECTS_COUNT + IN_EFFECTS_COUNT;
int err = 0;
int idx = nid - EFFECT_START_NID;
if ((idx < 0) || (idx >= num_fx))
return 0; /* no changed */
/* for out effect, qualify with PE */
if ((nid >= OUT_EFFECT_START_NID) && (nid < OUT_EFFECT_END_NID)) {
/* if PE if off, turn off out effects. */
if (!spec->effects_switch[PLAY_ENHANCEMENT - EFFECT_START_NID])
val = 0;
}
/* for in effect, qualify with CrystalVoice */
if ((nid >= IN_EFFECT_START_NID) && (nid < IN_EFFECT_END_NID)) {
/* if CrystalVoice if off, turn off in effects. */
if (!spec->effects_switch[CRYSTAL_VOICE - EFFECT_START_NID])
val = 0;
/* Voice Focus applies to 2-ch Mic, Digital Mic */
if ((nid == VOICE_FOCUS) && (spec->cur_mic_type != DIGITAL_MIC))
val = 0;
}
codec_dbg(codec, "ca0132_effect_set: nid=0x%x, val=%ld\n",
nid, val);
on = (val == 0) ? FLOAT_ZERO : FLOAT_ONE;
err = dspio_set_uint_param(codec, ca0132_effects[idx].mid,
ca0132_effects[idx].reqs[0], on);
if (err < 0)
return 0; /* no changed */
return 1;
}
/*
* Turn on/off Playback Enhancements
*/
static int ca0132_pe_switch_set(struct hda_codec *codec)
{
struct ca0132_spec *spec = codec->spec;
hda_nid_t nid;
int i, ret = 0;
codec_dbg(codec, "ca0132_pe_switch_set: val=%ld\n",
spec->effects_switch[PLAY_ENHANCEMENT - EFFECT_START_NID]);
i = OUT_EFFECT_START_NID - EFFECT_START_NID;
nid = OUT_EFFECT_START_NID;
/* PE affects all out effects */
for (; nid < OUT_EFFECT_END_NID; nid++, i++)
ret |= ca0132_effects_set(codec, nid, spec->effects_switch[i]);
return ret;
}
/* Check if Mic1 is streaming, if so, stop streaming */
static int stop_mic1(struct hda_codec *codec)
{
struct ca0132_spec *spec = codec->spec;
unsigned int oldval = snd_hda_codec_read(codec, spec->adcs[0], 0,
AC_VERB_GET_CONV, 0);
if (oldval != 0)
snd_hda_codec_write(codec, spec->adcs[0], 0,
AC_VERB_SET_CHANNEL_STREAMID,
0);
return oldval;
}
/* Resume Mic1 streaming if it was stopped. */
static void resume_mic1(struct hda_codec *codec, unsigned int oldval)
{
struct ca0132_spec *spec = codec->spec;
/* Restore the previous stream and channel */
if (oldval != 0)
snd_hda_codec_write(codec, spec->adcs[0], 0,
AC_VERB_SET_CHANNEL_STREAMID,
oldval);
}
/*
* Turn on/off CrystalVoice
*/
static int ca0132_cvoice_switch_set(struct hda_codec *codec)
{
struct ca0132_spec *spec = codec->spec;
hda_nid_t nid;
int i, ret = 0;
unsigned int oldval;
codec_dbg(codec, "ca0132_cvoice_switch_set: val=%ld\n",
spec->effects_switch[CRYSTAL_VOICE - EFFECT_START_NID]);
i = IN_EFFECT_START_NID - EFFECT_START_NID;
nid = IN_EFFECT_START_NID;
/* CrystalVoice affects all in effects */
for (; nid < IN_EFFECT_END_NID; nid++, i++)
ret |= ca0132_effects_set(codec, nid, spec->effects_switch[i]);
/* including VoiceFX */
ret |= ca0132_voicefx_set(codec, (spec->voicefx_val ? 1 : 0));
/* set correct vipsource */
oldval = stop_mic1(codec);
ret |= ca0132_set_vipsource(codec, 1);
resume_mic1(codec, oldval);
return ret;
}
static int ca0132_mic_boost_set(struct hda_codec *codec, long val)
{
struct ca0132_spec *spec = codec->spec;
int ret = 0;
if (val) /* on */
ret = snd_hda_codec_amp_update(codec, spec->input_pins[0], 0,
HDA_INPUT, 0, HDA_AMP_VOLMASK, 3);
else /* off */
ret = snd_hda_codec_amp_update(codec, spec->input_pins[0], 0,
HDA_INPUT, 0, HDA_AMP_VOLMASK, 0);
return ret;
}
static int ca0132_vnode_switch_set(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
hda_nid_t nid = get_amp_nid(kcontrol);
hda_nid_t shared_nid = 0;
bool effective;
int ret = 0;
struct ca0132_spec *spec = codec->spec;
int auto_jack;
if (nid == VNID_HP_SEL) {
auto_jack =
spec->vnode_lswitch[VNID_HP_ASEL - VNODE_START_NID];
if (!auto_jack)
ca0132_select_out(codec);
return 1;
}
if (nid == VNID_AMIC1_SEL) {
auto_jack =
spec->vnode_lswitch[VNID_AMIC1_ASEL - VNODE_START_NID];
if (!auto_jack)
ca0132_select_mic(codec);
return 1;
}
if (nid == VNID_HP_ASEL) {
ca0132_select_out(codec);
return 1;
}
if (nid == VNID_AMIC1_ASEL) {
ca0132_select_mic(codec);
return 1;
}
/* if effective conditions, then update hw immediately. */
effective = ca0132_is_vnode_effective(codec, nid, &shared_nid);
if (effective) {
int dir = get_amp_direction(kcontrol);
int ch = get_amp_channels(kcontrol);
unsigned long pval;
mutex_lock(&codec->control_mutex);
pval = kcontrol->private_value;
kcontrol->private_value = HDA_COMPOSE_AMP_VAL(shared_nid, ch,
0, dir);
ret = snd_hda_mixer_amp_switch_put(kcontrol, ucontrol);
kcontrol->private_value = pval;
mutex_unlock(&codec->control_mutex);
}
return ret;
}
/* End of control change helpers. */
static int ca0132_voicefx_info(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
{
unsigned int items = sizeof(ca0132_voicefx_presets)
/ sizeof(struct ct_voicefx_preset);
uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
uinfo->count = 1;
uinfo->value.enumerated.items = items;
if (uinfo->value.enumerated.item >= items)
uinfo->value.enumerated.item = items - 1;
strcpy(uinfo->value.enumerated.name,
ca0132_voicefx_presets[uinfo->value.enumerated.item].name);
return 0;
}
static int ca0132_voicefx_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
struct ca0132_spec *spec = codec->spec;
ucontrol->value.enumerated.item[0] = spec->voicefx_val;
return 0;
}
static int ca0132_voicefx_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
struct ca0132_spec *spec = codec->spec;
int i, err = 0;
int sel = ucontrol->value.enumerated.item[0];
unsigned int items = sizeof(ca0132_voicefx_presets)
/ sizeof(struct ct_voicefx_preset);
if (sel >= items)
return 0;
codec_dbg(codec, "ca0132_voicefx_put: sel=%d, preset=%s\n",
sel, ca0132_voicefx_presets[sel].name);
/*
* Idx 0 is default.
* Default needs to qualify with CrystalVoice state.
*/
for (i = 0; i < VOICEFX_MAX_PARAM_COUNT; i++) {
err = dspio_set_uint_param(codec, ca0132_voicefx.mid,
ca0132_voicefx.reqs[i],
ca0132_voicefx_presets[sel].vals[i]);
if (err < 0)
break;
}
if (err >= 0) {
spec->voicefx_val = sel;
/* enable voice fx */
ca0132_voicefx_set(codec, (sel ? 1 : 0));
}
return 1;
}
static int ca0132_switch_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
struct ca0132_spec *spec = codec->spec;
hda_nid_t nid = get_amp_nid(kcontrol);
int ch = get_amp_channels(kcontrol);
long *valp = ucontrol->value.integer.value;
/* vnode */
if ((nid >= VNODE_START_NID) && (nid < VNODE_END_NID)) {
if (ch & 1) {
*valp = spec->vnode_lswitch[nid - VNODE_START_NID];
valp++;
}
if (ch & 2) {
*valp = spec->vnode_rswitch[nid - VNODE_START_NID];
valp++;
}
return 0;
}
/* effects, include PE and CrystalVoice */
if ((nid >= EFFECT_START_NID) && (nid < EFFECT_END_NID)) {
*valp = spec->effects_switch[nid - EFFECT_START_NID];
return 0;
}
/* mic boost */
if (nid == spec->input_pins[0]) {
*valp = spec->cur_mic_boost;
return 0;
}
return 0;
}
static int ca0132_switch_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
struct ca0132_spec *spec = codec->spec;
hda_nid_t nid = get_amp_nid(kcontrol);
int ch = get_amp_channels(kcontrol);
long *valp = ucontrol->value.integer.value;
int changed = 1;
codec_dbg(codec, "ca0132_switch_put: nid=0x%x, val=%ld\n",
nid, *valp);
snd_hda_power_up(codec);
/* vnode */
if ((nid >= VNODE_START_NID) && (nid < VNODE_END_NID)) {
if (ch & 1) {
spec->vnode_lswitch[nid - VNODE_START_NID] = *valp;
valp++;
}
if (ch & 2) {
spec->vnode_rswitch[nid - VNODE_START_NID] = *valp;
valp++;
}
changed = ca0132_vnode_switch_set(kcontrol, ucontrol);
goto exit;
}
/* PE */
if (nid == PLAY_ENHANCEMENT) {
spec->effects_switch[nid - EFFECT_START_NID] = *valp;
changed = ca0132_pe_switch_set(codec);
goto exit;
}
/* CrystalVoice */
if (nid == CRYSTAL_VOICE) {
spec->effects_switch[nid - EFFECT_START_NID] = *valp;
changed = ca0132_cvoice_switch_set(codec);
goto exit;
}
/* out and in effects */
if (((nid >= OUT_EFFECT_START_NID) && (nid < OUT_EFFECT_END_NID)) ||
((nid >= IN_EFFECT_START_NID) && (nid < IN_EFFECT_END_NID))) {
spec->effects_switch[nid - EFFECT_START_NID] = *valp;
changed = ca0132_effects_set(codec, nid, *valp);
goto exit;
}
/* mic boost */
if (nid == spec->input_pins[0]) {
spec->cur_mic_boost = *valp;
/* Mic boost does not apply to Digital Mic */
if (spec->cur_mic_type != DIGITAL_MIC)
changed = ca0132_mic_boost_set(codec, *valp);
goto exit;
}
exit:
snd_hda_power_down(codec);
return changed;
}
/*
* Volume related
*/
static int ca0132_volume_info(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
{
struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
struct ca0132_spec *spec = codec->spec;
hda_nid_t nid = get_amp_nid(kcontrol);
int ch = get_amp_channels(kcontrol);
int dir = get_amp_direction(kcontrol);
unsigned long pval;
int err;
switch (nid) {
case VNID_SPK:
/* follow shared_out info */
nid = spec->shared_out_nid;
mutex_lock(&codec->control_mutex);
pval = kcontrol->private_value;
kcontrol->private_value = HDA_COMPOSE_AMP_VAL(nid, ch, 0, dir);
err = snd_hda_mixer_amp_volume_info(kcontrol, uinfo);
kcontrol->private_value = pval;
mutex_unlock(&codec->control_mutex);
break;
case VNID_MIC:
/* follow shared_mic info */
nid = spec->shared_mic_nid;
mutex_lock(&codec->control_mutex);
pval = kcontrol->private_value;
kcontrol->private_value = HDA_COMPOSE_AMP_VAL(nid, ch, 0, dir);
err = snd_hda_mixer_amp_volume_info(kcontrol, uinfo);
kcontrol->private_value = pval;
mutex_unlock(&codec->control_mutex);
break;
default:
err = snd_hda_mixer_amp_volume_info(kcontrol, uinfo);
}
return err;
}
static int ca0132_volume_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
struct ca0132_spec *spec = codec->spec;
hda_nid_t nid = get_amp_nid(kcontrol);
int ch = get_amp_channels(kcontrol);
long *valp = ucontrol->value.integer.value;
/* store the left and right volume */
if (ch & 1) {
*valp = spec->vnode_lvol[nid - VNODE_START_NID];
valp++;
}
if (ch & 2) {
*valp = spec->vnode_rvol[nid - VNODE_START_NID];
valp++;
}
return 0;
}
static int ca0132_volume_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
struct ca0132_spec *spec = codec->spec;
hda_nid_t nid = get_amp_nid(kcontrol);
int ch = get_amp_channels(kcontrol);
long *valp = ucontrol->value.integer.value;
hda_nid_t shared_nid = 0;
bool effective;
int changed = 1;
/* store the left and right volume */
if (ch & 1) {
spec->vnode_lvol[nid - VNODE_START_NID] = *valp;
valp++;
}
if (ch & 2) {
spec->vnode_rvol[nid - VNODE_START_NID] = *valp;
valp++;
}
/* if effective conditions, then update hw immediately. */
effective = ca0132_is_vnode_effective(codec, nid, &shared_nid);
if (effective) {
int dir = get_amp_direction(kcontrol);
unsigned long pval;
snd_hda_power_up(codec);
mutex_lock(&codec->control_mutex);
pval = kcontrol->private_value;
kcontrol->private_value = HDA_COMPOSE_AMP_VAL(shared_nid, ch,
0, dir);
changed = snd_hda_mixer_amp_volume_put(kcontrol, ucontrol);
kcontrol->private_value = pval;
mutex_unlock(&codec->control_mutex);
snd_hda_power_down(codec);
}
return changed;
}
static int ca0132_volume_tlv(struct snd_kcontrol *kcontrol, int op_flag,
unsigned int size, unsigned int __user *tlv)
{
struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
struct ca0132_spec *spec = codec->spec;
hda_nid_t nid = get_amp_nid(kcontrol);
int ch = get_amp_channels(kcontrol);
int dir = get_amp_direction(kcontrol);
unsigned long pval;
int err;
switch (nid) {
case VNID_SPK:
/* follow shared_out tlv */
nid = spec->shared_out_nid;
mutex_lock(&codec->control_mutex);
pval = kcontrol->private_value;
kcontrol->private_value = HDA_COMPOSE_AMP_VAL(nid, ch, 0, dir);
err = snd_hda_mixer_amp_tlv(kcontrol, op_flag, size, tlv);
kcontrol->private_value = pval;
mutex_unlock(&codec->control_mutex);
break;
case VNID_MIC:
/* follow shared_mic tlv */
nid = spec->shared_mic_nid;
mutex_lock(&codec->control_mutex);
pval = kcontrol->private_value;
kcontrol->private_value = HDA_COMPOSE_AMP_VAL(nid, ch, 0, dir);
err = snd_hda_mixer_amp_tlv(kcontrol, op_flag, size, tlv);
kcontrol->private_value = pval;
mutex_unlock(&codec->control_mutex);
break;
default:
err = snd_hda_mixer_amp_tlv(kcontrol, op_flag, size, tlv);
}
return err;
}
static int add_fx_switch(struct hda_codec *codec, hda_nid_t nid,
const char *pfx, int dir)
{
char namestr[SNDRV_CTL_ELEM_ID_NAME_MAXLEN];
int type = dir ? HDA_INPUT : HDA_OUTPUT;
struct snd_kcontrol_new knew =
CA0132_CODEC_MUTE_MONO(namestr, nid, 1, type);
sprintf(namestr, "%s %s Switch", pfx, dirstr[dir]);
return snd_hda_ctl_add(codec, nid, snd_ctl_new1(&knew, codec));
}
static int add_voicefx(struct hda_codec *codec)
{
struct snd_kcontrol_new knew =
HDA_CODEC_MUTE_MONO(ca0132_voicefx.name,
VOICEFX, 1, 0, HDA_INPUT);
knew.info = ca0132_voicefx_info;
knew.get = ca0132_voicefx_get;
knew.put = ca0132_voicefx_put;
return snd_hda_ctl_add(codec, VOICEFX, snd_ctl_new1(&knew, codec));
}
/*
* When changing Node IDs for Mixer Controls below, make sure to update
* Node IDs in ca0132_config() as well.
*/
static struct snd_kcontrol_new ca0132_mixer[] = {
CA0132_CODEC_VOL("Master Playback Volume", VNID_SPK, HDA_OUTPUT),
CA0132_CODEC_MUTE("Master Playback Switch", VNID_SPK, HDA_OUTPUT),
CA0132_CODEC_VOL("Capture Volume", VNID_MIC, HDA_INPUT),
CA0132_CODEC_MUTE("Capture Switch", VNID_MIC, HDA_INPUT),
HDA_CODEC_VOLUME("Analog-Mic2 Capture Volume", 0x08, 0, HDA_INPUT),
HDA_CODEC_MUTE("Analog-Mic2 Capture Switch", 0x08, 0, HDA_INPUT),
HDA_CODEC_VOLUME("What U Hear Capture Volume", 0x0a, 0, HDA_INPUT),
HDA_CODEC_MUTE("What U Hear Capture Switch", 0x0a, 0, HDA_INPUT),
CA0132_CODEC_MUTE_MONO("Mic1-Boost (30dB) Capture Switch",
0x12, 1, HDA_INPUT),
CA0132_CODEC_MUTE_MONO("HP/Speaker Playback Switch",
VNID_HP_SEL, 1, HDA_OUTPUT),
CA0132_CODEC_MUTE_MONO("AMic1/DMic Capture Switch",
VNID_AMIC1_SEL, 1, HDA_INPUT),
CA0132_CODEC_MUTE_MONO("HP/Speaker Auto Detect Playback Switch",
VNID_HP_ASEL, 1, HDA_OUTPUT),
CA0132_CODEC_MUTE_MONO("AMic1/DMic Auto Detect Capture Switch",
VNID_AMIC1_ASEL, 1, HDA_INPUT),
{ } /* end */
};
static int ca0132_build_controls(struct hda_codec *codec)
{
struct ca0132_spec *spec = codec->spec;
int i, num_fx;
int err = 0;
/* Add Mixer controls */
for (i = 0; i < spec->num_mixers; i++) {
err = snd_hda_add_new_ctls(codec, spec->mixers[i]);
if (err < 0)
return err;
}
/* Add in and out effects controls.
* VoiceFX, PE and CrystalVoice are added separately.
*/
num_fx = OUT_EFFECTS_COUNT + IN_EFFECTS_COUNT;
for (i = 0; i < num_fx; i++) {
err = add_fx_switch(codec, ca0132_effects[i].nid,
ca0132_effects[i].name,
ca0132_effects[i].direct);
if (err < 0)
return err;
}
err = add_fx_switch(codec, PLAY_ENHANCEMENT, "PlayEnhancement", 0);
if (err < 0)
return err;
err = add_fx_switch(codec, CRYSTAL_VOICE, "CrystalVoice", 1);
if (err < 0)
return err;
add_voicefx(codec);
#ifdef ENABLE_TUNING_CONTROLS
add_tuning_ctls(codec);
#endif
err = snd_hda_jack_add_kctls(codec, &spec->autocfg);
if (err < 0)
return err;
if (spec->dig_out) {
err = snd_hda_create_spdif_out_ctls(codec, spec->dig_out,
spec->dig_out);
if (err < 0)
return err;
err = snd_hda_create_spdif_share_sw(codec, &spec->multiout);
if (err < 0)
return err;
/* spec->multiout.share_spdif = 1; */
}
if (spec->dig_in) {
err = snd_hda_create_spdif_in_ctls(codec, spec->dig_in);
if (err < 0)
return err;
}
return 0;
}
/*
* PCM
*/
static struct hda_pcm_stream ca0132_pcm_analog_playback = {
.substreams = 1,
.channels_min = 2,
.channels_max = 6,
.ops = {
.prepare = ca0132_playback_pcm_prepare,
.cleanup = ca0132_playback_pcm_cleanup,
.get_delay = ca0132_playback_pcm_delay,
},
};
static struct hda_pcm_stream ca0132_pcm_analog_capture = {
.substreams = 1,
.channels_min = 2,
.channels_max = 2,
.ops = {
.prepare = ca0132_capture_pcm_prepare,
.cleanup = ca0132_capture_pcm_cleanup,
.get_delay = ca0132_capture_pcm_delay,
},
};
static struct hda_pcm_stream ca0132_pcm_digital_playback = {
.substreams = 1,
.channels_min = 2,
.channels_max = 2,
.ops = {
.open = ca0132_dig_playback_pcm_open,
.close = ca0132_dig_playback_pcm_close,
.prepare = ca0132_dig_playback_pcm_prepare,
.cleanup = ca0132_dig_playback_pcm_cleanup
},
};
static struct hda_pcm_stream ca0132_pcm_digital_capture = {
.substreams = 1,
.channels_min = 2,
.channels_max = 2,
};
static int ca0132_build_pcms(struct hda_codec *codec)
{
struct ca0132_spec *spec = codec->spec;
struct hda_pcm *info;
info = snd_hda_codec_pcm_new(codec, "CA0132 Analog");
if (!info)
return -ENOMEM;
info->stream[SNDRV_PCM_STREAM_PLAYBACK] = ca0132_pcm_analog_playback;
info->stream[SNDRV_PCM_STREAM_PLAYBACK].nid = spec->dacs[0];
info->stream[SNDRV_PCM_STREAM_PLAYBACK].channels_max =
spec->multiout.max_channels;
info->stream[SNDRV_PCM_STREAM_CAPTURE] = ca0132_pcm_analog_capture;
info->stream[SNDRV_PCM_STREAM_CAPTURE].substreams = 1;
info->stream[SNDRV_PCM_STREAM_CAPTURE].nid = spec->adcs[0];
info = snd_hda_codec_pcm_new(codec, "CA0132 Analog Mic-In2");
if (!info)
return -ENOMEM;
info->stream[SNDRV_PCM_STREAM_CAPTURE] = ca0132_pcm_analog_capture;
info->stream[SNDRV_PCM_STREAM_CAPTURE].substreams = 1;
info->stream[SNDRV_PCM_STREAM_CAPTURE].nid = spec->adcs[1];
info = snd_hda_codec_pcm_new(codec, "CA0132 What U Hear");
if (!info)
return -ENOMEM;
info->stream[SNDRV_PCM_STREAM_CAPTURE] = ca0132_pcm_analog_capture;
info->stream[SNDRV_PCM_STREAM_CAPTURE].substreams = 1;
info->stream[SNDRV_PCM_STREAM_CAPTURE].nid = spec->adcs[2];
if (!spec->dig_out && !spec->dig_in)
return 0;
info = snd_hda_codec_pcm_new(codec, "CA0132 Digital");
if (!info)
return -ENOMEM;
info->pcm_type = HDA_PCM_TYPE_SPDIF;
if (spec->dig_out) {
info->stream[SNDRV_PCM_STREAM_PLAYBACK] =
ca0132_pcm_digital_playback;
info->stream[SNDRV_PCM_STREAM_PLAYBACK].nid = spec->dig_out;
}
if (spec->dig_in) {
info->stream[SNDRV_PCM_STREAM_CAPTURE] =
ca0132_pcm_digital_capture;
info->stream[SNDRV_PCM_STREAM_CAPTURE].nid = spec->dig_in;
}
return 0;
}
static void init_output(struct hda_codec *codec, hda_nid_t pin, hda_nid_t dac)
{
if (pin) {
snd_hda_set_pin_ctl(codec, pin, PIN_HP);
if (get_wcaps(codec, pin) & AC_WCAP_OUT_AMP)
snd_hda_codec_write(codec, pin, 0,
AC_VERB_SET_AMP_GAIN_MUTE,
AMP_OUT_UNMUTE);
}
if (dac && (get_wcaps(codec, dac) & AC_WCAP_OUT_AMP))
snd_hda_codec_write(codec, dac, 0,
AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_ZERO);
}
static void init_input(struct hda_codec *codec, hda_nid_t pin, hda_nid_t adc)
{
if (pin) {
snd_hda_set_pin_ctl(codec, pin, PIN_VREF80);
if (get_wcaps(codec, pin) & AC_WCAP_IN_AMP)
snd_hda_codec_write(codec, pin, 0,
AC_VERB_SET_AMP_GAIN_MUTE,
AMP_IN_UNMUTE(0));
}
if (adc && (get_wcaps(codec, adc) & AC_WCAP_IN_AMP)) {
snd_hda_codec_write(codec, adc, 0, AC_VERB_SET_AMP_GAIN_MUTE,
AMP_IN_UNMUTE(0));
/* init to 0 dB and unmute. */
snd_hda_codec_amp_stereo(codec, adc, HDA_INPUT, 0,
HDA_AMP_VOLMASK, 0x5a);
snd_hda_codec_amp_stereo(codec, adc, HDA_INPUT, 0,
HDA_AMP_MUTE, 0);
}
}
static void refresh_amp_caps(struct hda_codec *codec, hda_nid_t nid, int dir)
{
unsigned int caps;
caps = snd_hda_param_read(codec, nid, dir == HDA_OUTPUT ?
AC_PAR_AMP_OUT_CAP : AC_PAR_AMP_IN_CAP);
snd_hda_override_amp_caps(codec, nid, dir, caps);
}
/*
* Switch between Digital built-in mic and analog mic.
*/
static void ca0132_set_dmic(struct hda_codec *codec, int enable)
{
struct ca0132_spec *spec = codec->spec;
unsigned int tmp;
u8 val;
unsigned int oldval;
codec_dbg(codec, "ca0132_set_dmic: enable=%d\n", enable);
oldval = stop_mic1(codec);
ca0132_set_vipsource(codec, 0);
if (enable) {
/* set DMic input as 2-ch */
tmp = FLOAT_TWO;
dspio_set_uint_param(codec, 0x80, 0x00, tmp);
val = spec->dmic_ctl;
val |= 0x80;
snd_hda_codec_write(codec, spec->input_pins[0], 0,
VENDOR_CHIPIO_DMIC_CTL_SET, val);
if (!(spec->dmic_ctl & 0x20))
chipio_set_control_flag(codec, CONTROL_FLAG_DMIC, 1);
} else {
/* set AMic input as mono */
tmp = FLOAT_ONE;
dspio_set_uint_param(codec, 0x80, 0x00, tmp);
val = spec->dmic_ctl;
/* clear bit7 and bit5 to disable dmic */
val &= 0x5f;
snd_hda_codec_write(codec, spec->input_pins[0], 0,
VENDOR_CHIPIO_DMIC_CTL_SET, val);
if (!(spec->dmic_ctl & 0x20))
chipio_set_control_flag(codec, CONTROL_FLAG_DMIC, 0);
}
ca0132_set_vipsource(codec, 1);
resume_mic1(codec, oldval);
}
/*
* Initialization for Digital Mic.
*/
static void ca0132_init_dmic(struct hda_codec *codec)
{
struct ca0132_spec *spec = codec->spec;
u8 val;
/* Setup Digital Mic here, but don't enable.
* Enable based on jack detect.
*/
/* MCLK uses MPIO1, set to enable.
* Bit 2-0: MPIO select
* Bit 3: set to disable
* Bit 7-4: reserved
*/
val = 0x01;
snd_hda_codec_write(codec, spec->input_pins[0], 0,
VENDOR_CHIPIO_DMIC_MCLK_SET, val);
/* Data1 uses MPIO3. Data2 not use
* Bit 2-0: Data1 MPIO select
* Bit 3: set disable Data1
* Bit 6-4: Data2 MPIO select
* Bit 7: set disable Data2
*/
val = 0x83;
snd_hda_codec_write(codec, spec->input_pins[0], 0,
VENDOR_CHIPIO_DMIC_PIN_SET, val);
/* Use Ch-0 and Ch-1. Rate is 48K, mode 1. Disable DMic first.
* Bit 3-0: Channel mask
* Bit 4: set for 48KHz, clear for 32KHz
* Bit 5: mode
* Bit 6: set to select Data2, clear for Data1
* Bit 7: set to enable DMic, clear for AMic
*/
val = 0x23;
/* keep a copy of dmic ctl val for enable/disable dmic purpuse */
spec->dmic_ctl = val;
snd_hda_codec_write(codec, spec->input_pins[0], 0,
VENDOR_CHIPIO_DMIC_CTL_SET, val);
}
/*
* Initialization for Analog Mic 2
*/
static void ca0132_init_analog_mic2(struct hda_codec *codec)
{
struct ca0132_spec *spec = codec->spec;
mutex_lock(&spec->chipio_mutex);
snd_hda_codec_write(codec, WIDGET_CHIP_CTRL, 0,
VENDOR_CHIPIO_8051_ADDRESS_LOW, 0x20);
snd_hda_codec_write(codec, WIDGET_CHIP_CTRL, 0,
VENDOR_CHIPIO_8051_ADDRESS_HIGH, 0x19);
snd_hda_codec_write(codec, WIDGET_CHIP_CTRL, 0,
VENDOR_CHIPIO_8051_DATA_WRITE, 0x00);
snd_hda_codec_write(codec, WIDGET_CHIP_CTRL, 0,
VENDOR_CHIPIO_8051_ADDRESS_LOW, 0x2D);
snd_hda_codec_write(codec, WIDGET_CHIP_CTRL, 0,
VENDOR_CHIPIO_8051_ADDRESS_HIGH, 0x19);
snd_hda_codec_write(codec, WIDGET_CHIP_CTRL, 0,
VENDOR_CHIPIO_8051_DATA_WRITE, 0x00);
mutex_unlock(&spec->chipio_mutex);
}
static void ca0132_refresh_widget_caps(struct hda_codec *codec)
{
struct ca0132_spec *spec = codec->spec;
int i;
codec_dbg(codec, "ca0132_refresh_widget_caps.\n");
snd_hda_codec_update_widgets(codec);
for (i = 0; i < spec->multiout.num_dacs; i++)
refresh_amp_caps(codec, spec->dacs[i], HDA_OUTPUT);
for (i = 0; i < spec->num_outputs; i++)
refresh_amp_caps(codec, spec->out_pins[i], HDA_OUTPUT);
for (i = 0; i < spec->num_inputs; i++) {
refresh_amp_caps(codec, spec->adcs[i], HDA_INPUT);
refresh_amp_caps(codec, spec->input_pins[i], HDA_INPUT);
}
}
/*
* Setup default parameters for DSP
*/
static void ca0132_setup_defaults(struct hda_codec *codec)
{
struct ca0132_spec *spec = codec->spec;
unsigned int tmp;
int num_fx;
int idx, i;
if (spec->dsp_state != DSP_DOWNLOADED)
return;
/* out, in effects + voicefx */
num_fx = OUT_EFFECTS_COUNT + IN_EFFECTS_COUNT + 1;
for (idx = 0; idx < num_fx; idx++) {
for (i = 0; i <= ca0132_effects[idx].params; i++) {
dspio_set_uint_param(codec, ca0132_effects[idx].mid,
ca0132_effects[idx].reqs[i],
ca0132_effects[idx].def_vals[i]);
}
}
/*remove DSP headroom*/
tmp = FLOAT_ZERO;
dspio_set_uint_param(codec, 0x96, 0x3C, tmp);
/*set speaker EQ bypass attenuation*/
dspio_set_uint_param(codec, 0x8f, 0x01, tmp);
/* set AMic1 and AMic2 as mono mic */
tmp = FLOAT_ONE;
dspio_set_uint_param(codec, 0x80, 0x00, tmp);
dspio_set_uint_param(codec, 0x80, 0x01, tmp);
/* set AMic1 as CrystalVoice input */
tmp = FLOAT_ONE;
dspio_set_uint_param(codec, 0x80, 0x05, tmp);
/* set WUH source */
tmp = FLOAT_TWO;
dspio_set_uint_param(codec, 0x31, 0x00, tmp);
}
/*
* Initialization of flags in chip
*/
static void ca0132_init_flags(struct hda_codec *codec)
{
chipio_set_control_flag(codec, CONTROL_FLAG_IDLE_ENABLE, 0);
chipio_set_control_flag(codec, CONTROL_FLAG_PORT_A_COMMON_MODE, 0);
chipio_set_control_flag(codec, CONTROL_FLAG_PORT_D_COMMON_MODE, 0);
chipio_set_control_flag(codec, CONTROL_FLAG_PORT_A_10KOHM_LOAD, 0);
chipio_set_control_flag(codec, CONTROL_FLAG_PORT_D_10KOHM_LOAD, 0);
chipio_set_control_flag(codec, CONTROL_FLAG_ADC_C_HIGH_PASS, 1);
}
/*
* Initialization of parameters in chip
*/
static void ca0132_init_params(struct hda_codec *codec)
{
chipio_set_control_param(codec, CONTROL_PARAM_PORTA_160OHM_GAIN, 6);
chipio_set_control_param(codec, CONTROL_PARAM_PORTD_160OHM_GAIN, 6);
}
static void ca0132_set_dsp_msr(struct hda_codec *codec, bool is96k)
{
chipio_set_control_flag(codec, CONTROL_FLAG_DSP_96KHZ, is96k);
chipio_set_control_flag(codec, CONTROL_FLAG_DAC_96KHZ, is96k);
chipio_set_control_flag(codec, CONTROL_FLAG_SRC_RATE_96KHZ, is96k);
chipio_set_control_flag(codec, CONTROL_FLAG_SRC_CLOCK_196MHZ, is96k);
chipio_set_control_flag(codec, CONTROL_FLAG_ADC_B_96KHZ, is96k);
chipio_set_control_flag(codec, CONTROL_FLAG_ADC_C_96KHZ, is96k);
chipio_set_conn_rate(codec, MEM_CONNID_MICIN1, SR_96_000);
chipio_set_conn_rate(codec, MEM_CONNID_MICOUT1, SR_96_000);
chipio_set_conn_rate(codec, MEM_CONNID_WUH, SR_48_000);
}
static bool ca0132_download_dsp_images(struct hda_codec *codec)
{
bool dsp_loaded = false;
const struct dsp_image_seg *dsp_os_image;
const struct firmware *fw_entry;
if (request_firmware(&fw_entry, EFX_FILE, codec->card->dev) != 0)
return false;
dsp_os_image = (struct dsp_image_seg *)(fw_entry->data);
if (dspload_image(codec, dsp_os_image, 0, 0, true, 0)) {
codec_err(codec, "ca0132 DSP load image failed\n");
goto exit_download;
}
dsp_loaded = dspload_wait_loaded(codec);
exit_download:
release_firmware(fw_entry);
return dsp_loaded;
}
static void ca0132_download_dsp(struct hda_codec *codec)
{
struct ca0132_spec *spec = codec->spec;
#ifndef CONFIG_SND_HDA_CODEC_CA0132_DSP
return; /* NOP */
#endif
if (spec->dsp_state == DSP_DOWNLOAD_FAILED)
return; /* don't retry failures */
chipio_enable_clocks(codec);
spec->dsp_state = DSP_DOWNLOADING;
if (!ca0132_download_dsp_images(codec))
spec->dsp_state = DSP_DOWNLOAD_FAILED;
else
spec->dsp_state = DSP_DOWNLOADED;
if (spec->dsp_state == DSP_DOWNLOADED)
ca0132_set_dsp_msr(codec, true);
}
static void ca0132_process_dsp_response(struct hda_codec *codec,
struct hda_jack_callback *callback)
{
struct ca0132_spec *spec = codec->spec;
codec_dbg(codec, "ca0132_process_dsp_response\n");
if (spec->wait_scp) {
if (dspio_get_response_data(codec) >= 0)
spec->wait_scp = 0;
}
dspio_clear_response_queue(codec);
}
static void hp_callback(struct hda_codec *codec, struct hda_jack_callback *cb)
{
struct ca0132_spec *spec = codec->spec;
struct hda_jack_tbl *tbl;
/* Delay enabling the HP amp, to let the mic-detection
* state machine run.
*/
cancel_delayed_work_sync(&spec->unsol_hp_work);
schedule_delayed_work(&spec->unsol_hp_work, msecs_to_jiffies(500));
tbl = snd_hda_jack_tbl_get(codec, cb->nid);
if (tbl)
tbl->block_report = 1;
}
static void amic_callback(struct hda_codec *codec, struct hda_jack_callback *cb)
{
ca0132_select_mic(codec);
}
static void ca0132_init_unsol(struct hda_codec *codec)
{
struct ca0132_spec *spec = codec->spec;
snd_hda_jack_detect_enable_callback(codec, spec->unsol_tag_hp, hp_callback);
snd_hda_jack_detect_enable_callback(codec, spec->unsol_tag_amic1,
amic_callback);
snd_hda_jack_detect_enable_callback(codec, UNSOL_TAG_DSP,
ca0132_process_dsp_response);
}
/*
* Verbs tables.
*/
/* Sends before DSP download. */
static struct hda_verb ca0132_base_init_verbs[] = {
/*enable ct extension*/
{0x15, VENDOR_CHIPIO_CT_EXTENSIONS_ENABLE, 0x1},
{}
};
/* Send at exit. */
static struct hda_verb ca0132_base_exit_verbs[] = {
/*set afg to D3*/
{0x01, AC_VERB_SET_POWER_STATE, 0x03},
/*disable ct extension*/
{0x15, VENDOR_CHIPIO_CT_EXTENSIONS_ENABLE, 0},
{}
};
/* Other verbs tables. Sends after DSP download. */
static struct hda_verb ca0132_init_verbs0[] = {
/* chip init verbs */
{0x15, 0x70D, 0xF0},
{0x15, 0x70E, 0xFE},
{0x15, 0x707, 0x75},
{0x15, 0x707, 0xD3},
{0x15, 0x707, 0x09},
{0x15, 0x707, 0x53},
{0x15, 0x707, 0xD4},
{0x15, 0x707, 0xEF},
{0x15, 0x707, 0x75},
{0x15, 0x707, 0xD3},
{0x15, 0x707, 0x09},
{0x15, 0x707, 0x02},
{0x15, 0x707, 0x37},
{0x15, 0x707, 0x78},
{0x15, 0x53C, 0xCE},
{0x15, 0x575, 0xC9},
{0x15, 0x53D, 0xCE},
{0x15, 0x5B7, 0xC9},
{0x15, 0x70D, 0xE8},
{0x15, 0x70E, 0xFE},
{0x15, 0x707, 0x02},
{0x15, 0x707, 0x68},
{0x15, 0x707, 0x62},
{0x15, 0x53A, 0xCE},
{0x15, 0x546, 0xC9},
{0x15, 0x53B, 0xCE},
{0x15, 0x5E8, 0xC9},
{0x15, 0x717, 0x0D},
{0x15, 0x718, 0x20},
{}
};
static void ca0132_init_chip(struct hda_codec *codec)
{
struct ca0132_spec *spec = codec->spec;
int num_fx;
int i;
unsigned int on;
mutex_init(&spec->chipio_mutex);
spec->cur_out_type = SPEAKER_OUT;
spec->cur_mic_type = DIGITAL_MIC;
spec->cur_mic_boost = 0;
for (i = 0; i < VNODES_COUNT; i++) {
spec->vnode_lvol[i] = 0x5a;
spec->vnode_rvol[i] = 0x5a;
spec->vnode_lswitch[i] = 0;
spec->vnode_rswitch[i] = 0;
}
/*
* Default states for effects are in ca0132_effects[].
*/
num_fx = OUT_EFFECTS_COUNT + IN_EFFECTS_COUNT;
for (i = 0; i < num_fx; i++) {
on = (unsigned int)ca0132_effects[i].reqs[0];
spec->effects_switch[i] = on ? 1 : 0;
}
spec->voicefx_val = 0;
spec->effects_switch[PLAY_ENHANCEMENT - EFFECT_START_NID] = 1;
spec->effects_switch[CRYSTAL_VOICE - EFFECT_START_NID] = 0;
#ifdef ENABLE_TUNING_CONTROLS
ca0132_init_tuning_defaults(codec);
#endif
}
static void ca0132_exit_chip(struct hda_codec *codec)
{
/* put any chip cleanup stuffs here. */
if (dspload_is_loaded(codec))
dsp_reset(codec);
}
static int ca0132_init(struct hda_codec *codec)
{
struct ca0132_spec *spec = codec->spec;
struct auto_pin_cfg *cfg = &spec->autocfg;
int i;
if (spec->dsp_state != DSP_DOWNLOAD_FAILED)
spec->dsp_state = DSP_DOWNLOAD_INIT;
spec->curr_chip_addx = INVALID_CHIP_ADDRESS;
snd_hda_power_up_pm(codec);
ca0132_init_unsol(codec);
ca0132_init_params(codec);
ca0132_init_flags(codec);
snd_hda_sequence_write(codec, spec->base_init_verbs);
ca0132_download_dsp(codec);
ca0132_refresh_widget_caps(codec);
ca0132_setup_defaults(codec);
ca0132_init_analog_mic2(codec);
ca0132_init_dmic(codec);
for (i = 0; i < spec->num_outputs; i++)
init_output(codec, spec->out_pins[i], spec->dacs[0]);
init_output(codec, cfg->dig_out_pins[0], spec->dig_out);
for (i = 0; i < spec->num_inputs; i++)
init_input(codec, spec->input_pins[i], spec->adcs[i]);
init_input(codec, cfg->dig_in_pin, spec->dig_in);
snd_hda_sequence_write(codec, spec->chip_init_verbs);
snd_hda_sequence_write(codec, spec->spec_init_verbs);
ca0132_select_out(codec);
ca0132_select_mic(codec);
snd_hda_jack_report_sync(codec);
snd_hda_power_down_pm(codec);
return 0;
}
static void ca0132_free(struct hda_codec *codec)
{
struct ca0132_spec *spec = codec->spec;
cancel_delayed_work_sync(&spec->unsol_hp_work);
snd_hda_power_up(codec);
snd_hda_sequence_write(codec, spec->base_exit_verbs);
ca0132_exit_chip(codec);
snd_hda_power_down(codec);
kfree(spec->spec_init_verbs);
kfree(codec->spec);
}
static struct hda_codec_ops ca0132_patch_ops = {
.build_controls = ca0132_build_controls,
.build_pcms = ca0132_build_pcms,
.init = ca0132_init,
.free = ca0132_free,
.unsol_event = snd_hda_jack_unsol_event,
};
static void ca0132_config(struct hda_codec *codec)
{
struct ca0132_spec *spec = codec->spec;
struct auto_pin_cfg *cfg = &spec->autocfg;
spec->dacs[0] = 0x2;
spec->dacs[1] = 0x3;
spec->dacs[2] = 0x4;
spec->multiout.dac_nids = spec->dacs;
spec->multiout.num_dacs = 3;
spec->multiout.max_channels = 2;
if (spec->quirk == QUIRK_ALIENWARE) {
codec_dbg(codec, "ca0132_config: QUIRK_ALIENWARE applied.\n");
snd_hda_apply_pincfgs(codec, alienware_pincfgs);
spec->num_outputs = 2;
spec->out_pins[0] = 0x0b; /* speaker out */
spec->out_pins[1] = 0x0f;
spec->shared_out_nid = 0x2;
spec->unsol_tag_hp = 0x0f;
spec->adcs[0] = 0x7; /* digital mic / analog mic1 */
spec->adcs[1] = 0x8; /* analog mic2 */
spec->adcs[2] = 0xa; /* what u hear */
spec->num_inputs = 3;
spec->input_pins[0] = 0x12;
spec->input_pins[1] = 0x11;
spec->input_pins[2] = 0x13;
spec->shared_mic_nid = 0x7;
spec->unsol_tag_amic1 = 0x11;
} else {
spec->num_outputs = 2;
spec->out_pins[0] = 0x0b; /* speaker out */
spec->out_pins[1] = 0x10; /* headphone out */
spec->shared_out_nid = 0x2;
spec->unsol_tag_hp = spec->out_pins[1];
spec->adcs[0] = 0x7; /* digital mic / analog mic1 */
spec->adcs[1] = 0x8; /* analog mic2 */
spec->adcs[2] = 0xa; /* what u hear */
spec->num_inputs = 3;
spec->input_pins[0] = 0x12;
spec->input_pins[1] = 0x11;
spec->input_pins[2] = 0x13;
spec->shared_mic_nid = 0x7;
spec->unsol_tag_amic1 = spec->input_pins[0];
/* SPDIF I/O */
spec->dig_out = 0x05;
spec->multiout.dig_out_nid = spec->dig_out;
cfg->dig_out_pins[0] = 0x0c;
cfg->dig_outs = 1;
cfg->dig_out_type[0] = HDA_PCM_TYPE_SPDIF;
spec->dig_in = 0x09;
cfg->dig_in_pin = 0x0e;
cfg->dig_in_type = HDA_PCM_TYPE_SPDIF;
}
}
static int ca0132_prepare_verbs(struct hda_codec *codec)
{
/* Verbs + terminator (an empty element) */
#define NUM_SPEC_VERBS 4
struct ca0132_spec *spec = codec->spec;
spec->chip_init_verbs = ca0132_init_verbs0;
spec->spec_init_verbs = kzalloc(sizeof(struct hda_verb) * NUM_SPEC_VERBS, GFP_KERNEL);
if (!spec->spec_init_verbs)
return -ENOMEM;
/* HP jack autodetection */
spec->spec_init_verbs[0].nid = spec->unsol_tag_hp;
spec->spec_init_verbs[0].param = AC_VERB_SET_UNSOLICITED_ENABLE;
spec->spec_init_verbs[0].verb = AC_USRSP_EN | spec->unsol_tag_hp;
/* MIC1 jack autodetection */
spec->spec_init_verbs[1].nid = spec->unsol_tag_amic1;
spec->spec_init_verbs[1].param = AC_VERB_SET_UNSOLICITED_ENABLE;
spec->spec_init_verbs[1].verb = AC_USRSP_EN | spec->unsol_tag_amic1;
/* config EAPD */
spec->spec_init_verbs[2].nid = 0x0b;
spec->spec_init_verbs[2].param = 0x78D;
spec->spec_init_verbs[2].verb = 0x00;
/* Previously commented configuration */
/*
spec->spec_init_verbs[3].nid = 0x0b;
spec->spec_init_verbs[3].param = AC_VERB_SET_EAPD_BTLENABLE;
spec->spec_init_verbs[3].verb = 0x02;
spec->spec_init_verbs[4].nid = 0x10;
spec->spec_init_verbs[4].param = 0x78D;
spec->spec_init_verbs[4].verb = 0x02;
spec->spec_init_verbs[5].nid = 0x10;
spec->spec_init_verbs[5].param = AC_VERB_SET_EAPD_BTLENABLE;
spec->spec_init_verbs[5].verb = 0x02;
*/
/* Terminator: spec->spec_init_verbs[NUM_SPEC_VERBS-1] */
return 0;
}
static int patch_ca0132(struct hda_codec *codec)
{
struct ca0132_spec *spec;
int err;
const struct snd_pci_quirk *quirk;
codec_dbg(codec, "patch_ca0132\n");
spec = kzalloc(sizeof(*spec), GFP_KERNEL);
if (!spec)
return -ENOMEM;
codec->spec = spec;
spec->codec = codec;
codec->patch_ops = ca0132_patch_ops;
codec->pcm_format_first = 1;
codec->no_sticky_stream = 1;
/* Detect codec quirk */
quirk = snd_pci_quirk_lookup(codec->bus->pci, ca0132_quirks);
if (quirk)
spec->quirk = quirk->value;
else
spec->quirk = QUIRK_NONE;
spec->dsp_state = DSP_DOWNLOAD_INIT;
spec->num_mixers = 1;
spec->mixers[0] = ca0132_mixer;
spec->base_init_verbs = ca0132_base_init_verbs;
spec->base_exit_verbs = ca0132_base_exit_verbs;
INIT_DELAYED_WORK(&spec->unsol_hp_work, ca0132_unsol_hp_delayed);
ca0132_init_chip(codec);
ca0132_config(codec);
err = ca0132_prepare_verbs(codec);
if (err < 0)
return err;
err = snd_hda_parse_pin_def_config(codec, &spec->autocfg, NULL);
if (err < 0)
return err;
return 0;
}
/*
* patch entries
*/
static struct hda_device_id snd_hda_id_ca0132[] = {
HDA_CODEC_ENTRY(0x11020011, "CA0132", patch_ca0132),
{} /* terminator */
};
MODULE_DEVICE_TABLE(hdaudio, snd_hda_id_ca0132);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Creative Sound Core3D codec");
static struct hda_codec_driver ca0132_driver = {
.id = snd_hda_id_ca0132,
};
module_hda_codec_driver(ca0132_driver);
| gpl-2.0 |
giangnguyennet/linux | drivers/acpi/acpi_processor.c | 182 | 13426 | /*
* acpi_processor.c - ACPI processor enumeration support
*
* Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
* Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
* Copyright (C) 2004 Dominik Brodowski <linux@brodo.de>
* Copyright (C) 2004 Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
* Copyright (C) 2013, Intel Corporation
* Rafael J. Wysocki <rafael.j.wysocki@intel.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation.
*/
#include <linux/acpi.h>
#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <acpi/processor.h>
#include <asm/cpu.h>
#include "internal.h"
#define _COMPONENT ACPI_PROCESSOR_COMPONENT
ACPI_MODULE_NAME("processor");
DEFINE_PER_CPU(struct acpi_processor *, processors);
EXPORT_PER_CPU_SYMBOL(processors);
/* --------------------------------------------------------------------------
Errata Handling
-------------------------------------------------------------------------- */
struct acpi_processor_errata errata __read_mostly;
EXPORT_SYMBOL_GPL(errata);
static int acpi_processor_errata_piix4(struct pci_dev *dev)
{
u8 value1 = 0;
u8 value2 = 0;
if (!dev)
return -EINVAL;
/*
* Note that 'dev' references the PIIX4 ACPI Controller.
*/
switch (dev->revision) {
case 0:
ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found PIIX4 A-step\n"));
break;
case 1:
ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found PIIX4 B-step\n"));
break;
case 2:
ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found PIIX4E\n"));
break;
case 3:
ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found PIIX4M\n"));
break;
default:
ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found unknown PIIX4\n"));
break;
}
switch (dev->revision) {
case 0: /* PIIX4 A-step */
case 1: /* PIIX4 B-step */
/*
* See specification changes #13 ("Manual Throttle Duty Cycle")
* and #14 ("Enabling and Disabling Manual Throttle"), plus
* erratum #5 ("STPCLK# Deassertion Time") from the January
* 2002 PIIX4 specification update. Applies to only older
* PIIX4 models.
*/
errata.piix4.throttle = 1;
case 2: /* PIIX4E */
case 3: /* PIIX4M */
/*
* See erratum #18 ("C3 Power State/BMIDE and Type-F DMA
* Livelock") from the January 2002 PIIX4 specification update.
* Applies to all PIIX4 models.
*/
/*
* BM-IDE
* ------
* Find the PIIX4 IDE Controller and get the Bus Master IDE
* Status register address. We'll use this later to read
* each IDE controller's DMA status to make sure we catch all
* DMA activity.
*/
dev = pci_get_subsys(PCI_VENDOR_ID_INTEL,
PCI_DEVICE_ID_INTEL_82371AB,
PCI_ANY_ID, PCI_ANY_ID, NULL);
if (dev) {
errata.piix4.bmisx = pci_resource_start(dev, 4);
pci_dev_put(dev);
}
/*
* Type-F DMA
* ----------
* Find the PIIX4 ISA Controller and read the Motherboard
* DMA controller's status to see if Type-F (Fast) DMA mode
* is enabled (bit 7) on either channel. Note that we'll
* disable C3 support if this is enabled, as some legacy
* devices won't operate well if fast DMA is disabled.
*/
dev = pci_get_subsys(PCI_VENDOR_ID_INTEL,
PCI_DEVICE_ID_INTEL_82371AB_0,
PCI_ANY_ID, PCI_ANY_ID, NULL);
if (dev) {
pci_read_config_byte(dev, 0x76, &value1);
pci_read_config_byte(dev, 0x77, &value2);
if ((value1 & 0x80) || (value2 & 0x80))
errata.piix4.fdma = 1;
pci_dev_put(dev);
}
break;
}
if (errata.piix4.bmisx)
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
"Bus master activity detection (BM-IDE) erratum enabled\n"));
if (errata.piix4.fdma)
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
"Type-F DMA livelock erratum (C3 disabled)\n"));
return 0;
}
static int acpi_processor_errata(void)
{
int result = 0;
struct pci_dev *dev = NULL;
/*
* PIIX4
*/
dev = pci_get_subsys(PCI_VENDOR_ID_INTEL,
PCI_DEVICE_ID_INTEL_82371AB_3, PCI_ANY_ID,
PCI_ANY_ID, NULL);
if (dev) {
result = acpi_processor_errata_piix4(dev);
pci_dev_put(dev);
}
return result;
}
/* --------------------------------------------------------------------------
Initialization
-------------------------------------------------------------------------- */
#ifdef CONFIG_ACPI_HOTPLUG_CPU
static int acpi_processor_hotadd_init(struct acpi_processor *pr)
{
unsigned long long sta;
acpi_status status;
int ret;
if (invalid_phys_cpuid(pr->phys_id))
return -ENODEV;
status = acpi_evaluate_integer(pr->handle, "_STA", NULL, &sta);
if (ACPI_FAILURE(status) || !(sta & ACPI_STA_DEVICE_PRESENT))
return -ENODEV;
cpu_maps_update_begin();
cpu_hotplug_begin();
ret = acpi_map_cpu(pr->handle, pr->phys_id, &pr->id);
if (ret)
goto out;
ret = arch_register_cpu(pr->id);
if (ret) {
acpi_unmap_cpu(pr->id);
goto out;
}
/*
* CPU got hot-added, but cpu_data is not initialized yet. Set a flag
* to delay cpu_idle/throttling initialization and do it when the CPU
* gets online for the first time.
*/
pr_info("CPU%d has been hot-added\n", pr->id);
pr->flags.need_hotplug_init = 1;
out:
cpu_hotplug_done();
cpu_maps_update_done();
return ret;
}
#else
static inline int acpi_processor_hotadd_init(struct acpi_processor *pr)
{
return -ENODEV;
}
#endif /* CONFIG_ACPI_HOTPLUG_CPU */
static int acpi_processor_get_info(struct acpi_device *device)
{
union acpi_object object = { 0 };
struct acpi_buffer buffer = { sizeof(union acpi_object), &object };
struct acpi_processor *pr = acpi_driver_data(device);
int device_declaration = 0;
acpi_status status = AE_OK;
static int cpu0_initialized;
unsigned long long value;
acpi_processor_errata();
/*
* Check to see if we have bus mastering arbitration control. This
* is required for proper C3 usage (to maintain cache coherency).
*/
if (acpi_gbl_FADT.pm2_control_block && acpi_gbl_FADT.pm2_control_length) {
pr->flags.bm_control = 1;
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
"Bus mastering arbitration control present\n"));
} else
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
"No bus mastering arbitration control\n"));
if (!strcmp(acpi_device_hid(device), ACPI_PROCESSOR_OBJECT_HID)) {
/* Declared with "Processor" statement; match ProcessorID */
status = acpi_evaluate_object(pr->handle, NULL, NULL, &buffer);
if (ACPI_FAILURE(status)) {
dev_err(&device->dev,
"Failed to evaluate processor object (0x%x)\n",
status);
return -ENODEV;
}
pr->acpi_id = object.processor.proc_id;
} else {
/*
* Declared with "Device" statement; match _UID.
* Note that we don't handle string _UIDs yet.
*/
status = acpi_evaluate_integer(pr->handle, METHOD_NAME__UID,
NULL, &value);
if (ACPI_FAILURE(status)) {
dev_err(&device->dev,
"Failed to evaluate processor _UID (0x%x)\n",
status);
return -ENODEV;
}
device_declaration = 1;
pr->acpi_id = value;
}
pr->phys_id = acpi_get_phys_id(pr->handle, device_declaration,
pr->acpi_id);
if (invalid_phys_cpuid(pr->phys_id))
acpi_handle_debug(pr->handle, "failed to get CPU physical ID.\n");
pr->id = acpi_map_cpuid(pr->phys_id, pr->acpi_id);
if (!cpu0_initialized && !acpi_has_cpu_in_madt()) {
cpu0_initialized = 1;
/*
* Handle UP system running SMP kernel, with no CPU
* entry in MADT
*/
if (invalid_logical_cpuid(pr->id) && (num_online_cpus() == 1))
pr->id = 0;
}
/*
* Extra Processor objects may be enumerated on MP systems with
* less than the max # of CPUs. They should be ignored _iff
* they are physically not present.
*/
if (invalid_logical_cpuid(pr->id)) {
int ret = acpi_processor_hotadd_init(pr);
if (ret)
return ret;
}
/*
* On some boxes several processors use the same processor bus id.
* But they are located in different scope. For example:
* \_SB.SCK0.CPU0
* \_SB.SCK1.CPU0
* Rename the processor device bus id. And the new bus id will be
* generated as the following format:
* CPU+CPU ID.
*/
sprintf(acpi_device_bid(device), "CPU%X", pr->id);
ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Processor [%d:%d]\n", pr->id,
pr->acpi_id));
if (!object.processor.pblk_address)
ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No PBLK (NULL address)\n"));
else if (object.processor.pblk_length != 6)
dev_err(&device->dev, "Invalid PBLK length [%d]\n",
object.processor.pblk_length);
else {
pr->throttling.address = object.processor.pblk_address;
pr->throttling.duty_offset = acpi_gbl_FADT.duty_offset;
pr->throttling.duty_width = acpi_gbl_FADT.duty_width;
pr->pblk = object.processor.pblk_address;
/*
* We don't care about error returns - we just try to mark
* these reserved so that nobody else is confused into thinking
* that this region might be unused..
*
* (In particular, allocating the IO range for Cardbus)
*/
request_region(pr->throttling.address, 6, "ACPI CPU throttle");
}
/*
* If ACPI describes a slot number for this CPU, we can use it to
* ensure we get the right value in the "physical id" field
* of /proc/cpuinfo
*/
status = acpi_evaluate_integer(pr->handle, "_SUN", NULL, &value);
if (ACPI_SUCCESS(status))
arch_fix_phys_package_id(pr->id, value);
return 0;
}
/*
* Do not put anything in here which needs the core to be online.
* For example MSR access or setting up things which check for cpuinfo_x86
* (cpu_data(cpu)) values, like CPU feature flags, family, model, etc.
* Such things have to be put in and set up by the processor driver's .probe().
*/
static DEFINE_PER_CPU(void *, processor_device_array);
static int acpi_processor_add(struct acpi_device *device,
const struct acpi_device_id *id)
{
struct acpi_processor *pr;
struct device *dev;
int result = 0;
pr = kzalloc(sizeof(struct acpi_processor), GFP_KERNEL);
if (!pr)
return -ENOMEM;
if (!zalloc_cpumask_var(&pr->throttling.shared_cpu_map, GFP_KERNEL)) {
result = -ENOMEM;
goto err_free_pr;
}
pr->handle = device->handle;
strcpy(acpi_device_name(device), ACPI_PROCESSOR_DEVICE_NAME);
strcpy(acpi_device_class(device), ACPI_PROCESSOR_CLASS);
device->driver_data = pr;
result = acpi_processor_get_info(device);
if (result) /* Processor is not physically present or unavailable */
return 0;
#ifdef CONFIG_SMP
if (pr->id >= setup_max_cpus && pr->id != 0)
return 0;
#endif
BUG_ON(pr->id >= nr_cpu_ids);
/*
* Buggy BIOS check.
* ACPI id of processors can be reported wrongly by the BIOS.
* Don't trust it blindly
*/
if (per_cpu(processor_device_array, pr->id) != NULL &&
per_cpu(processor_device_array, pr->id) != device) {
dev_warn(&device->dev,
"BIOS reported wrong ACPI id %d for the processor\n",
pr->id);
/* Give up, but do not abort the namespace scan. */
goto err;
}
/*
* processor_device_array is not cleared on errors to allow buggy BIOS
* checks.
*/
per_cpu(processor_device_array, pr->id) = device;
per_cpu(processors, pr->id) = pr;
dev = get_cpu_device(pr->id);
if (!dev) {
result = -ENODEV;
goto err;
}
result = acpi_bind_one(dev, device);
if (result)
goto err;
pr->dev = dev;
/* Trigger the processor driver's .probe() if present. */
if (device_attach(dev) >= 0)
return 1;
dev_err(dev, "Processor driver could not be attached\n");
acpi_unbind_one(dev);
err:
free_cpumask_var(pr->throttling.shared_cpu_map);
device->driver_data = NULL;
per_cpu(processors, pr->id) = NULL;
err_free_pr:
kfree(pr);
return result;
}
#ifdef CONFIG_ACPI_HOTPLUG_CPU
/* --------------------------------------------------------------------------
Removal
-------------------------------------------------------------------------- */
static void acpi_processor_remove(struct acpi_device *device)
{
struct acpi_processor *pr;
if (!device || !acpi_driver_data(device))
return;
pr = acpi_driver_data(device);
if (pr->id >= nr_cpu_ids)
goto out;
/*
* The only reason why we ever get here is CPU hot-removal. The CPU is
* already offline and the ACPI device removal locking prevents it from
* being put back online at this point.
*
* Unbind the driver from the processor device and detach it from the
* ACPI companion object.
*/
device_release_driver(pr->dev);
acpi_unbind_one(pr->dev);
/* Clean up. */
per_cpu(processor_device_array, pr->id) = NULL;
per_cpu(processors, pr->id) = NULL;
cpu_maps_update_begin();
cpu_hotplug_begin();
/* Remove the CPU. */
arch_unregister_cpu(pr->id);
acpi_unmap_cpu(pr->id);
cpu_hotplug_done();
cpu_maps_update_done();
try_offline_node(cpu_to_node(pr->id));
out:
free_cpumask_var(pr->throttling.shared_cpu_map);
kfree(pr);
}
#endif /* CONFIG_ACPI_HOTPLUG_CPU */
/*
* The following ACPI IDs are known to be suitable for representing as
* processor devices.
*/
static const struct acpi_device_id processor_device_ids[] = {
{ ACPI_PROCESSOR_OBJECT_HID, },
{ ACPI_PROCESSOR_DEVICE_HID, },
{ }
};
static struct acpi_scan_handler __refdata processor_handler = {
.ids = processor_device_ids,
.attach = acpi_processor_add,
#ifdef CONFIG_ACPI_HOTPLUG_CPU
.detach = acpi_processor_remove,
#endif
.hotplug = {
.enabled = true,
},
};
void __init acpi_processor_init(void)
{
acpi_scan_add_handler_with_hotplug(&processor_handler, "processor");
}
| gpl-2.0 |
javelinanddart/bricked-pyramid-3.0 | arch/sparc/kernel/sigutil_64.c | 2230 | 2520 | #include <linux/kernel.h>
#include <linux/types.h>
#include <linux/thread_info.h>
#include <linux/uaccess.h>
#include <asm/sigcontext.h>
#include <asm/fpumacro.h>
#include <asm/ptrace.h>
#include "sigutil.h"
int save_fpu_state(struct pt_regs *regs, __siginfo_fpu_t __user *fpu)
{
unsigned long *fpregs = current_thread_info()->fpregs;
unsigned long fprs;
int err = 0;
fprs = current_thread_info()->fpsaved[0];
if (fprs & FPRS_DL)
err |= copy_to_user(&fpu->si_float_regs[0], fpregs,
(sizeof(unsigned int) * 32));
if (fprs & FPRS_DU)
err |= copy_to_user(&fpu->si_float_regs[32], fpregs+16,
(sizeof(unsigned int) * 32));
err |= __put_user(current_thread_info()->xfsr[0], &fpu->si_fsr);
err |= __put_user(current_thread_info()->gsr[0], &fpu->si_gsr);
err |= __put_user(fprs, &fpu->si_fprs);
return err;
}
int restore_fpu_state(struct pt_regs *regs, __siginfo_fpu_t __user *fpu)
{
unsigned long *fpregs = current_thread_info()->fpregs;
unsigned long fprs;
int err;
err = __get_user(fprs, &fpu->si_fprs);
fprs_write(0);
regs->tstate &= ~TSTATE_PEF;
if (fprs & FPRS_DL)
err |= copy_from_user(fpregs, &fpu->si_float_regs[0],
(sizeof(unsigned int) * 32));
if (fprs & FPRS_DU)
err |= copy_from_user(fpregs+16, &fpu->si_float_regs[32],
(sizeof(unsigned int) * 32));
err |= __get_user(current_thread_info()->xfsr[0], &fpu->si_fsr);
err |= __get_user(current_thread_info()->gsr[0], &fpu->si_gsr);
current_thread_info()->fpsaved[0] |= fprs;
return err;
}
int save_rwin_state(int wsaved, __siginfo_rwin_t __user *rwin)
{
int i, err = __put_user(wsaved, &rwin->wsaved);
for (i = 0; i < wsaved; i++) {
struct reg_window *rp = ¤t_thread_info()->reg_window[i];
unsigned long fp = current_thread_info()->rwbuf_stkptrs[i];
err |= copy_to_user(&rwin->reg_window[i], rp,
sizeof(struct reg_window));
err |= __put_user(fp, &rwin->rwbuf_stkptrs[i]);
}
return err;
}
int restore_rwin_state(__siginfo_rwin_t __user *rp)
{
struct thread_info *t = current_thread_info();
int i, wsaved, err;
__get_user(wsaved, &rp->wsaved);
if (wsaved > NSWINS)
return -EFAULT;
err = 0;
for (i = 0; i < wsaved; i++) {
err |= copy_from_user(&t->reg_window[i],
&rp->reg_window[i],
sizeof(struct reg_window));
err |= __get_user(t->rwbuf_stkptrs[i],
&rp->rwbuf_stkptrs[i]);
}
if (err)
return err;
set_thread_wsaved(wsaved);
synchronize_user_stack();
if (get_thread_wsaved())
return -EFAULT;
return 0;
}
| gpl-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.