repo_name string | path string | copies string | size string | content string | license string |
|---|---|---|---|---|---|
Testing1235678/android_kernel_samsung_t1 | fs/jffs2/summary.c | 3187 | 24084 | /*
* JFFS2 -- Journalling Flash File System, Version 2.
*
* Copyright © 2004 Ferenc Havasi <havasi@inf.u-szeged.hu>,
* Zoltan Sogor <weth@inf.u-szeged.hu>,
* Patrik Kluba <pajko@halom.u-szeged.hu>,
* University of Szeged, Hungary
* 2006 KaiGai Kohei <kaigai@ak.jp.nec.com>
*
* For licensing information, see the file 'LICENCE' in this directory.
*
*/
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/mtd/mtd.h>
#include <linux/pagemap.h>
#include <linux/crc32.h>
#include <linux/compiler.h>
#include <linux/vmalloc.h>
#include "nodelist.h"
#include "debug.h"
int jffs2_sum_init(struct jffs2_sb_info *c)
{
uint32_t sum_size = min_t(uint32_t, c->sector_size, MAX_SUMMARY_SIZE);
c->summary = kzalloc(sizeof(struct jffs2_summary), GFP_KERNEL);
if (!c->summary) {
JFFS2_WARNING("Can't allocate memory for summary information!\n");
return -ENOMEM;
}
c->summary->sum_buf = kmalloc(sum_size, GFP_KERNEL);
if (!c->summary->sum_buf) {
JFFS2_WARNING("Can't allocate buffer for writing out summary information!\n");
kfree(c->summary);
return -ENOMEM;
}
dbg_summary("returned successfully\n");
return 0;
}
void jffs2_sum_exit(struct jffs2_sb_info *c)
{
dbg_summary("called\n");
jffs2_sum_disable_collecting(c->summary);
kfree(c->summary->sum_buf);
c->summary->sum_buf = NULL;
kfree(c->summary);
c->summary = NULL;
}
static int jffs2_sum_add_mem(struct jffs2_summary *s, union jffs2_sum_mem *item)
{
if (!s->sum_list_head)
s->sum_list_head = (union jffs2_sum_mem *) item;
if (s->sum_list_tail)
s->sum_list_tail->u.next = (union jffs2_sum_mem *) item;
s->sum_list_tail = (union jffs2_sum_mem *) item;
switch (je16_to_cpu(item->u.nodetype)) {
case JFFS2_NODETYPE_INODE:
s->sum_size += JFFS2_SUMMARY_INODE_SIZE;
s->sum_num++;
dbg_summary("inode (%u) added to summary\n",
je32_to_cpu(item->i.inode));
break;
case JFFS2_NODETYPE_DIRENT:
s->sum_size += JFFS2_SUMMARY_DIRENT_SIZE(item->d.nsize);
s->sum_num++;
dbg_summary("dirent (%u) added to summary\n",
je32_to_cpu(item->d.ino));
break;
#ifdef CONFIG_JFFS2_FS_XATTR
case JFFS2_NODETYPE_XATTR:
s->sum_size += JFFS2_SUMMARY_XATTR_SIZE;
s->sum_num++;
dbg_summary("xattr (xid=%u, version=%u) added to summary\n",
je32_to_cpu(item->x.xid), je32_to_cpu(item->x.version));
break;
case JFFS2_NODETYPE_XREF:
s->sum_size += JFFS2_SUMMARY_XREF_SIZE;
s->sum_num++;
dbg_summary("xref added to summary\n");
break;
#endif
default:
JFFS2_WARNING("UNKNOWN node type %u\n",
je16_to_cpu(item->u.nodetype));
return 1;
}
return 0;
}
/* The following 3 functions are called from scan.c to collect summary info for not closed jeb */
int jffs2_sum_add_padding_mem(struct jffs2_summary *s, uint32_t size)
{
dbg_summary("called with %u\n", size);
s->sum_padded += size;
return 0;
}
int jffs2_sum_add_inode_mem(struct jffs2_summary *s, struct jffs2_raw_inode *ri,
uint32_t ofs)
{
struct jffs2_sum_inode_mem *temp = kmalloc(sizeof(struct jffs2_sum_inode_mem), GFP_KERNEL);
if (!temp)
return -ENOMEM;
temp->nodetype = ri->nodetype;
temp->inode = ri->ino;
temp->version = ri->version;
temp->offset = cpu_to_je32(ofs); /* relative offset from the beginning of the jeb */
temp->totlen = ri->totlen;
temp->next = NULL;
return jffs2_sum_add_mem(s, (union jffs2_sum_mem *)temp);
}
int jffs2_sum_add_dirent_mem(struct jffs2_summary *s, struct jffs2_raw_dirent *rd,
uint32_t ofs)
{
struct jffs2_sum_dirent_mem *temp =
kmalloc(sizeof(struct jffs2_sum_dirent_mem) + rd->nsize, GFP_KERNEL);
if (!temp)
return -ENOMEM;
temp->nodetype = rd->nodetype;
temp->totlen = rd->totlen;
temp->offset = cpu_to_je32(ofs); /* relative from the beginning of the jeb */
temp->pino = rd->pino;
temp->version = rd->version;
temp->ino = rd->ino;
temp->nsize = rd->nsize;
temp->type = rd->type;
temp->next = NULL;
memcpy(temp->name, rd->name, rd->nsize);
return jffs2_sum_add_mem(s, (union jffs2_sum_mem *)temp);
}
#ifdef CONFIG_JFFS2_FS_XATTR
int jffs2_sum_add_xattr_mem(struct jffs2_summary *s, struct jffs2_raw_xattr *rx, uint32_t ofs)
{
struct jffs2_sum_xattr_mem *temp;
temp = kmalloc(sizeof(struct jffs2_sum_xattr_mem), GFP_KERNEL);
if (!temp)
return -ENOMEM;
temp->nodetype = rx->nodetype;
temp->xid = rx->xid;
temp->version = rx->version;
temp->offset = cpu_to_je32(ofs);
temp->totlen = rx->totlen;
temp->next = NULL;
return jffs2_sum_add_mem(s, (union jffs2_sum_mem *)temp);
}
int jffs2_sum_add_xref_mem(struct jffs2_summary *s, struct jffs2_raw_xref *rr, uint32_t ofs)
{
struct jffs2_sum_xref_mem *temp;
temp = kmalloc(sizeof(struct jffs2_sum_xref_mem), GFP_KERNEL);
if (!temp)
return -ENOMEM;
temp->nodetype = rr->nodetype;
temp->offset = cpu_to_je32(ofs);
temp->next = NULL;
return jffs2_sum_add_mem(s, (union jffs2_sum_mem *)temp);
}
#endif
/* Cleanup every collected summary information */
static void jffs2_sum_clean_collected(struct jffs2_summary *s)
{
union jffs2_sum_mem *temp;
if (!s->sum_list_head) {
dbg_summary("already empty\n");
}
while (s->sum_list_head) {
temp = s->sum_list_head;
s->sum_list_head = s->sum_list_head->u.next;
kfree(temp);
}
s->sum_list_tail = NULL;
s->sum_padded = 0;
s->sum_num = 0;
}
void jffs2_sum_reset_collected(struct jffs2_summary *s)
{
dbg_summary("called\n");
jffs2_sum_clean_collected(s);
s->sum_size = 0;
}
void jffs2_sum_disable_collecting(struct jffs2_summary *s)
{
dbg_summary("called\n");
jffs2_sum_clean_collected(s);
s->sum_size = JFFS2_SUMMARY_NOSUM_SIZE;
}
int jffs2_sum_is_disabled(struct jffs2_summary *s)
{
return (s->sum_size == JFFS2_SUMMARY_NOSUM_SIZE);
}
/* Move the collected summary information into sb (called from scan.c) */
void jffs2_sum_move_collected(struct jffs2_sb_info *c, struct jffs2_summary *s)
{
dbg_summary("oldsize=0x%x oldnum=%u => newsize=0x%x newnum=%u\n",
c->summary->sum_size, c->summary->sum_num,
s->sum_size, s->sum_num);
c->summary->sum_size = s->sum_size;
c->summary->sum_num = s->sum_num;
c->summary->sum_padded = s->sum_padded;
c->summary->sum_list_head = s->sum_list_head;
c->summary->sum_list_tail = s->sum_list_tail;
s->sum_list_head = s->sum_list_tail = NULL;
}
/* Called from wbuf.c to collect writed node info */
int jffs2_sum_add_kvec(struct jffs2_sb_info *c, const struct kvec *invecs,
unsigned long count, uint32_t ofs)
{
union jffs2_node_union *node;
struct jffs2_eraseblock *jeb;
if (c->summary->sum_size == JFFS2_SUMMARY_NOSUM_SIZE) {
dbg_summary("Summary is disabled for this jeb! Skipping summary info!\n");
return 0;
}
node = invecs[0].iov_base;
jeb = &c->blocks[ofs / c->sector_size];
ofs -= jeb->offset;
switch (je16_to_cpu(node->u.nodetype)) {
case JFFS2_NODETYPE_INODE: {
struct jffs2_sum_inode_mem *temp =
kmalloc(sizeof(struct jffs2_sum_inode_mem), GFP_KERNEL);
if (!temp)
goto no_mem;
temp->nodetype = node->i.nodetype;
temp->inode = node->i.ino;
temp->version = node->i.version;
temp->offset = cpu_to_je32(ofs);
temp->totlen = node->i.totlen;
temp->next = NULL;
return jffs2_sum_add_mem(c->summary, (union jffs2_sum_mem *)temp);
}
case JFFS2_NODETYPE_DIRENT: {
struct jffs2_sum_dirent_mem *temp =
kmalloc(sizeof(struct jffs2_sum_dirent_mem) + node->d.nsize, GFP_KERNEL);
if (!temp)
goto no_mem;
temp->nodetype = node->d.nodetype;
temp->totlen = node->d.totlen;
temp->offset = cpu_to_je32(ofs);
temp->pino = node->d.pino;
temp->version = node->d.version;
temp->ino = node->d.ino;
temp->nsize = node->d.nsize;
temp->type = node->d.type;
temp->next = NULL;
switch (count) {
case 1:
memcpy(temp->name,node->d.name,node->d.nsize);
break;
case 2:
memcpy(temp->name,invecs[1].iov_base,node->d.nsize);
break;
default:
BUG(); /* impossible count value */
break;
}
return jffs2_sum_add_mem(c->summary, (union jffs2_sum_mem *)temp);
}
#ifdef CONFIG_JFFS2_FS_XATTR
case JFFS2_NODETYPE_XATTR: {
struct jffs2_sum_xattr_mem *temp;
temp = kmalloc(sizeof(struct jffs2_sum_xattr_mem), GFP_KERNEL);
if (!temp)
goto no_mem;
temp->nodetype = node->x.nodetype;
temp->xid = node->x.xid;
temp->version = node->x.version;
temp->totlen = node->x.totlen;
temp->offset = cpu_to_je32(ofs);
temp->next = NULL;
return jffs2_sum_add_mem(c->summary, (union jffs2_sum_mem *)temp);
}
case JFFS2_NODETYPE_XREF: {
struct jffs2_sum_xref_mem *temp;
temp = kmalloc(sizeof(struct jffs2_sum_xref_mem), GFP_KERNEL);
if (!temp)
goto no_mem;
temp->nodetype = node->r.nodetype;
temp->offset = cpu_to_je32(ofs);
temp->next = NULL;
return jffs2_sum_add_mem(c->summary, (union jffs2_sum_mem *)temp);
}
#endif
case JFFS2_NODETYPE_PADDING:
dbg_summary("node PADDING\n");
c->summary->sum_padded += je32_to_cpu(node->u.totlen);
break;
case JFFS2_NODETYPE_CLEANMARKER:
dbg_summary("node CLEANMARKER\n");
break;
case JFFS2_NODETYPE_SUMMARY:
dbg_summary("node SUMMARY\n");
break;
default:
/* If you implement a new node type you should also implement
summary support for it or disable summary.
*/
BUG();
break;
}
return 0;
no_mem:
JFFS2_WARNING("MEMORY ALLOCATION ERROR!");
return -ENOMEM;
}
static struct jffs2_raw_node_ref *sum_link_node_ref(struct jffs2_sb_info *c,
struct jffs2_eraseblock *jeb,
uint32_t ofs, uint32_t len,
struct jffs2_inode_cache *ic)
{
/* If there was a gap, mark it dirty */
if ((ofs & ~3) > c->sector_size - jeb->free_size) {
/* Ew. Summary doesn't actually tell us explicitly about dirty space */
jffs2_scan_dirty_space(c, jeb, (ofs & ~3) - (c->sector_size - jeb->free_size));
}
return jffs2_link_node_ref(c, jeb, jeb->offset + ofs, len, ic);
}
/* Process the stored summary information - helper function for jffs2_sum_scan_sumnode() */
static int jffs2_sum_process_sum_data(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
struct jffs2_raw_summary *summary, uint32_t *pseudo_random)
{
struct jffs2_inode_cache *ic;
struct jffs2_full_dirent *fd;
void *sp;
int i, ino;
int err;
sp = summary->sum;
for (i=0; i<je32_to_cpu(summary->sum_num); i++) {
dbg_summary("processing summary index %d\n", i);
cond_resched();
/* Make sure there's a spare ref for dirty space */
err = jffs2_prealloc_raw_node_refs(c, jeb, 2);
if (err)
return err;
switch (je16_to_cpu(((struct jffs2_sum_unknown_flash *)sp)->nodetype)) {
case JFFS2_NODETYPE_INODE: {
struct jffs2_sum_inode_flash *spi;
spi = sp;
ino = je32_to_cpu(spi->inode);
dbg_summary("Inode at 0x%08x-0x%08x\n",
jeb->offset + je32_to_cpu(spi->offset),
jeb->offset + je32_to_cpu(spi->offset) + je32_to_cpu(spi->totlen));
ic = jffs2_scan_make_ino_cache(c, ino);
if (!ic) {
JFFS2_NOTICE("scan_make_ino_cache failed\n");
return -ENOMEM;
}
sum_link_node_ref(c, jeb, je32_to_cpu(spi->offset) | REF_UNCHECKED,
PAD(je32_to_cpu(spi->totlen)), ic);
*pseudo_random += je32_to_cpu(spi->version);
sp += JFFS2_SUMMARY_INODE_SIZE;
break;
}
case JFFS2_NODETYPE_DIRENT: {
struct jffs2_sum_dirent_flash *spd;
int checkedlen;
spd = sp;
dbg_summary("Dirent at 0x%08x-0x%08x\n",
jeb->offset + je32_to_cpu(spd->offset),
jeb->offset + je32_to_cpu(spd->offset) + je32_to_cpu(spd->totlen));
/* This should never happen, but https://dev.laptop.org/ticket/4184 */
checkedlen = strnlen(spd->name, spd->nsize);
if (!checkedlen) {
printk(KERN_ERR "Dirent at %08x has zero at start of name. Aborting mount.\n",
jeb->offset + je32_to_cpu(spd->offset));
return -EIO;
}
if (checkedlen < spd->nsize) {
printk(KERN_ERR "Dirent at %08x has zeroes in name. Truncating to %d chars\n",
jeb->offset + je32_to_cpu(spd->offset), checkedlen);
}
fd = jffs2_alloc_full_dirent(checkedlen+1);
if (!fd)
return -ENOMEM;
memcpy(&fd->name, spd->name, checkedlen);
fd->name[checkedlen] = 0;
ic = jffs2_scan_make_ino_cache(c, je32_to_cpu(spd->pino));
if (!ic) {
jffs2_free_full_dirent(fd);
return -ENOMEM;
}
fd->raw = sum_link_node_ref(c, jeb, je32_to_cpu(spd->offset) | REF_UNCHECKED,
PAD(je32_to_cpu(spd->totlen)), ic);
fd->next = NULL;
fd->version = je32_to_cpu(spd->version);
fd->ino = je32_to_cpu(spd->ino);
fd->nhash = full_name_hash(fd->name, checkedlen);
fd->type = spd->type;
jffs2_add_fd_to_list(c, fd, &ic->scan_dents);
*pseudo_random += je32_to_cpu(spd->version);
sp += JFFS2_SUMMARY_DIRENT_SIZE(spd->nsize);
break;
}
#ifdef CONFIG_JFFS2_FS_XATTR
case JFFS2_NODETYPE_XATTR: {
struct jffs2_xattr_datum *xd;
struct jffs2_sum_xattr_flash *spx;
spx = (struct jffs2_sum_xattr_flash *)sp;
dbg_summary("xattr at %#08x-%#08x (xid=%u, version=%u)\n",
jeb->offset + je32_to_cpu(spx->offset),
jeb->offset + je32_to_cpu(spx->offset) + je32_to_cpu(spx->totlen),
je32_to_cpu(spx->xid), je32_to_cpu(spx->version));
xd = jffs2_setup_xattr_datum(c, je32_to_cpu(spx->xid),
je32_to_cpu(spx->version));
if (IS_ERR(xd))
return PTR_ERR(xd);
if (xd->version > je32_to_cpu(spx->version)) {
/* node is not the newest one */
struct jffs2_raw_node_ref *raw
= sum_link_node_ref(c, jeb, je32_to_cpu(spx->offset) | REF_UNCHECKED,
PAD(je32_to_cpu(spx->totlen)), NULL);
raw->next_in_ino = xd->node->next_in_ino;
xd->node->next_in_ino = raw;
} else {
xd->version = je32_to_cpu(spx->version);
sum_link_node_ref(c, jeb, je32_to_cpu(spx->offset) | REF_UNCHECKED,
PAD(je32_to_cpu(spx->totlen)), (void *)xd);
}
*pseudo_random += je32_to_cpu(spx->xid);
sp += JFFS2_SUMMARY_XATTR_SIZE;
break;
}
case JFFS2_NODETYPE_XREF: {
struct jffs2_xattr_ref *ref;
struct jffs2_sum_xref_flash *spr;
spr = (struct jffs2_sum_xref_flash *)sp;
dbg_summary("xref at %#08x-%#08x\n",
jeb->offset + je32_to_cpu(spr->offset),
jeb->offset + je32_to_cpu(spr->offset) +
(uint32_t)PAD(sizeof(struct jffs2_raw_xref)));
ref = jffs2_alloc_xattr_ref();
if (!ref) {
JFFS2_NOTICE("allocation of xattr_datum failed\n");
return -ENOMEM;
}
ref->next = c->xref_temp;
c->xref_temp = ref;
sum_link_node_ref(c, jeb, je32_to_cpu(spr->offset) | REF_UNCHECKED,
PAD(sizeof(struct jffs2_raw_xref)), (void *)ref);
*pseudo_random += ref->node->flash_offset;
sp += JFFS2_SUMMARY_XREF_SIZE;
break;
}
#endif
default : {
uint16_t nodetype = je16_to_cpu(((struct jffs2_sum_unknown_flash *)sp)->nodetype);
JFFS2_WARNING("Unsupported node type %x found in summary! Exiting...\n", nodetype);
if ((nodetype & JFFS2_COMPAT_MASK) == JFFS2_FEATURE_INCOMPAT)
return -EIO;
/* For compatible node types, just fall back to the full scan */
c->wasted_size -= jeb->wasted_size;
c->free_size += c->sector_size - jeb->free_size;
c->used_size -= jeb->used_size;
c->dirty_size -= jeb->dirty_size;
jeb->wasted_size = jeb->used_size = jeb->dirty_size = 0;
jeb->free_size = c->sector_size;
jffs2_free_jeb_node_refs(c, jeb);
return -ENOTRECOVERABLE;
}
}
}
return 0;
}
/* Process the summary node - called from jffs2_scan_eraseblock() */
int jffs2_sum_scan_sumnode(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
struct jffs2_raw_summary *summary, uint32_t sumsize,
uint32_t *pseudo_random)
{
struct jffs2_unknown_node crcnode;
int ret, ofs;
uint32_t crc;
ofs = c->sector_size - sumsize;
dbg_summary("summary found for 0x%08x at 0x%08x (0x%x bytes)\n",
jeb->offset, jeb->offset + ofs, sumsize);
/* OK, now check for node validity and CRC */
crcnode.magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
crcnode.nodetype = cpu_to_je16(JFFS2_NODETYPE_SUMMARY);
crcnode.totlen = summary->totlen;
crc = crc32(0, &crcnode, sizeof(crcnode)-4);
if (je32_to_cpu(summary->hdr_crc) != crc) {
dbg_summary("Summary node header is corrupt (bad CRC or "
"no summary at all)\n");
goto crc_err;
}
if (je32_to_cpu(summary->totlen) != sumsize) {
dbg_summary("Summary node is corrupt (wrong erasesize?)\n");
goto crc_err;
}
crc = crc32(0, summary, sizeof(struct jffs2_raw_summary)-8);
if (je32_to_cpu(summary->node_crc) != crc) {
dbg_summary("Summary node is corrupt (bad CRC)\n");
goto crc_err;
}
crc = crc32(0, summary->sum, sumsize - sizeof(struct jffs2_raw_summary));
if (je32_to_cpu(summary->sum_crc) != crc) {
dbg_summary("Summary node data is corrupt (bad CRC)\n");
goto crc_err;
}
if ( je32_to_cpu(summary->cln_mkr) ) {
dbg_summary("Summary : CLEANMARKER node \n");
ret = jffs2_prealloc_raw_node_refs(c, jeb, 1);
if (ret)
return ret;
if (je32_to_cpu(summary->cln_mkr) != c->cleanmarker_size) {
dbg_summary("CLEANMARKER node has totlen 0x%x != normal 0x%x\n",
je32_to_cpu(summary->cln_mkr), c->cleanmarker_size);
if ((ret = jffs2_scan_dirty_space(c, jeb, PAD(je32_to_cpu(summary->cln_mkr)))))
return ret;
} else if (jeb->first_node) {
dbg_summary("CLEANMARKER node not first node in block "
"(0x%08x)\n", jeb->offset);
if ((ret = jffs2_scan_dirty_space(c, jeb, PAD(je32_to_cpu(summary->cln_mkr)))))
return ret;
} else {
jffs2_link_node_ref(c, jeb, jeb->offset | REF_NORMAL,
je32_to_cpu(summary->cln_mkr), NULL);
}
}
ret = jffs2_sum_process_sum_data(c, jeb, summary, pseudo_random);
/* -ENOTRECOVERABLE isn't a fatal error -- it means we should do a full
scan of this eraseblock. So return zero */
if (ret == -ENOTRECOVERABLE)
return 0;
if (ret)
return ret; /* real error */
/* for PARANOIA_CHECK */
ret = jffs2_prealloc_raw_node_refs(c, jeb, 2);
if (ret)
return ret;
sum_link_node_ref(c, jeb, ofs | REF_NORMAL, sumsize, NULL);
if (unlikely(jeb->free_size)) {
JFFS2_WARNING("Free size 0x%x bytes in eraseblock @0x%08x with summary?\n",
jeb->free_size, jeb->offset);
jeb->wasted_size += jeb->free_size;
c->wasted_size += jeb->free_size;
c->free_size -= jeb->free_size;
jeb->free_size = 0;
}
return jffs2_scan_classify_jeb(c, jeb);
crc_err:
JFFS2_WARNING("Summary node crc error, skipping summary information.\n");
return 0;
}
/* Write summary data to flash - helper function for jffs2_sum_write_sumnode() */
static int jffs2_sum_write_data(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
uint32_t infosize, uint32_t datasize, int padsize)
{
struct jffs2_raw_summary isum;
union jffs2_sum_mem *temp;
struct jffs2_sum_marker *sm;
struct kvec vecs[2];
uint32_t sum_ofs;
void *wpage;
int ret;
size_t retlen;
if (padsize + datasize > MAX_SUMMARY_SIZE) {
/* It won't fit in the buffer. Abort summary for this jeb */
jffs2_sum_disable_collecting(c->summary);
JFFS2_WARNING("Summary too big (%d data, %d pad) in eraseblock at %08x\n",
datasize, padsize, jeb->offset);
/* Non-fatal */
return 0;
}
/* Is there enough space for summary? */
if (padsize < 0) {
/* don't try to write out summary for this jeb */
jffs2_sum_disable_collecting(c->summary);
JFFS2_WARNING("Not enough space for summary, padsize = %d\n",
padsize);
/* Non-fatal */
return 0;
}
memset(c->summary->sum_buf, 0xff, datasize);
memset(&isum, 0, sizeof(isum));
isum.magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
isum.nodetype = cpu_to_je16(JFFS2_NODETYPE_SUMMARY);
isum.totlen = cpu_to_je32(infosize);
isum.hdr_crc = cpu_to_je32(crc32(0, &isum, sizeof(struct jffs2_unknown_node) - 4));
isum.padded = cpu_to_je32(c->summary->sum_padded);
isum.cln_mkr = cpu_to_je32(c->cleanmarker_size);
isum.sum_num = cpu_to_je32(c->summary->sum_num);
wpage = c->summary->sum_buf;
while (c->summary->sum_num) {
temp = c->summary->sum_list_head;
switch (je16_to_cpu(temp->u.nodetype)) {
case JFFS2_NODETYPE_INODE: {
struct jffs2_sum_inode_flash *sino_ptr = wpage;
sino_ptr->nodetype = temp->i.nodetype;
sino_ptr->inode = temp->i.inode;
sino_ptr->version = temp->i.version;
sino_ptr->offset = temp->i.offset;
sino_ptr->totlen = temp->i.totlen;
wpage += JFFS2_SUMMARY_INODE_SIZE;
break;
}
case JFFS2_NODETYPE_DIRENT: {
struct jffs2_sum_dirent_flash *sdrnt_ptr = wpage;
sdrnt_ptr->nodetype = temp->d.nodetype;
sdrnt_ptr->totlen = temp->d.totlen;
sdrnt_ptr->offset = temp->d.offset;
sdrnt_ptr->pino = temp->d.pino;
sdrnt_ptr->version = temp->d.version;
sdrnt_ptr->ino = temp->d.ino;
sdrnt_ptr->nsize = temp->d.nsize;
sdrnt_ptr->type = temp->d.type;
memcpy(sdrnt_ptr->name, temp->d.name,
temp->d.nsize);
wpage += JFFS2_SUMMARY_DIRENT_SIZE(temp->d.nsize);
break;
}
#ifdef CONFIG_JFFS2_FS_XATTR
case JFFS2_NODETYPE_XATTR: {
struct jffs2_sum_xattr_flash *sxattr_ptr = wpage;
temp = c->summary->sum_list_head;
sxattr_ptr->nodetype = temp->x.nodetype;
sxattr_ptr->xid = temp->x.xid;
sxattr_ptr->version = temp->x.version;
sxattr_ptr->offset = temp->x.offset;
sxattr_ptr->totlen = temp->x.totlen;
wpage += JFFS2_SUMMARY_XATTR_SIZE;
break;
}
case JFFS2_NODETYPE_XREF: {
struct jffs2_sum_xref_flash *sxref_ptr = wpage;
temp = c->summary->sum_list_head;
sxref_ptr->nodetype = temp->r.nodetype;
sxref_ptr->offset = temp->r.offset;
wpage += JFFS2_SUMMARY_XREF_SIZE;
break;
}
#endif
default : {
if ((je16_to_cpu(temp->u.nodetype) & JFFS2_COMPAT_MASK)
== JFFS2_FEATURE_RWCOMPAT_COPY) {
dbg_summary("Writing unknown RWCOMPAT_COPY node type %x\n",
je16_to_cpu(temp->u.nodetype));
jffs2_sum_disable_collecting(c->summary);
} else {
BUG(); /* unknown node in summary information */
}
}
}
c->summary->sum_list_head = temp->u.next;
kfree(temp);
c->summary->sum_num--;
}
jffs2_sum_reset_collected(c->summary);
wpage += padsize;
sm = wpage;
sm->offset = cpu_to_je32(c->sector_size - jeb->free_size);
sm->magic = cpu_to_je32(JFFS2_SUM_MAGIC);
isum.sum_crc = cpu_to_je32(crc32(0, c->summary->sum_buf, datasize));
isum.node_crc = cpu_to_je32(crc32(0, &isum, sizeof(isum) - 8));
vecs[0].iov_base = &isum;
vecs[0].iov_len = sizeof(isum);
vecs[1].iov_base = c->summary->sum_buf;
vecs[1].iov_len = datasize;
sum_ofs = jeb->offset + c->sector_size - jeb->free_size;
dbg_summary("JFFS2: writing out data to flash to pos : 0x%08x\n",
sum_ofs);
ret = jffs2_flash_writev(c, vecs, 2, sum_ofs, &retlen, 0);
if (ret || (retlen != infosize)) {
JFFS2_WARNING("Write of %u bytes at 0x%08x failed. returned %d, retlen %zd\n",
infosize, sum_ofs, ret, retlen);
if (retlen) {
/* Waste remaining space */
spin_lock(&c->erase_completion_lock);
jffs2_link_node_ref(c, jeb, sum_ofs | REF_OBSOLETE, infosize, NULL);
spin_unlock(&c->erase_completion_lock);
}
c->summary->sum_size = JFFS2_SUMMARY_NOSUM_SIZE;
return 0;
}
spin_lock(&c->erase_completion_lock);
jffs2_link_node_ref(c, jeb, sum_ofs | REF_NORMAL, infosize, NULL);
spin_unlock(&c->erase_completion_lock);
return 0;
}
/* Write out summary information - called from jffs2_do_reserve_space */
int jffs2_sum_write_sumnode(struct jffs2_sb_info *c)
{
int datasize, infosize, padsize;
struct jffs2_eraseblock *jeb;
int ret = 0;
dbg_summary("called\n");
spin_unlock(&c->erase_completion_lock);
jeb = c->nextblock;
jffs2_prealloc_raw_node_refs(c, jeb, 1);
if (!c->summary->sum_num || !c->summary->sum_list_head) {
JFFS2_WARNING("Empty summary info!!!\n");
BUG();
}
datasize = c->summary->sum_size + sizeof(struct jffs2_sum_marker);
infosize = sizeof(struct jffs2_raw_summary) + datasize;
padsize = jeb->free_size - infosize;
infosize += padsize;
datasize += padsize;
ret = jffs2_sum_write_data(c, jeb, infosize, datasize, padsize);
spin_lock(&c->erase_completion_lock);
return ret;
}
| gpl-2.0 |
juanxincai521/us780 | lib/atomic64.c | 3443 | 4352 | /*
* Generic implementation of 64-bit atomics using spinlocks,
* useful on processors that don't have 64-bit atomic instructions.
*
* Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/types.h>
#include <linux/cache.h>
#include <linux/spinlock.h>
#include <linux/init.h>
#include <linux/export.h>
#include <linux/atomic.h>
/*
* We use a hashed array of spinlocks to provide exclusive access
* to each atomic64_t variable. Since this is expected to used on
* systems with small numbers of CPUs (<= 4 or so), we use a
* relatively small array of 16 spinlocks to avoid wasting too much
* memory on the spinlock array.
*/
#define NR_LOCKS 16
/*
* Ensure each lock is in a separate cacheline.
*/
static union {
raw_spinlock_t lock;
char pad[L1_CACHE_BYTES];
} atomic64_lock[NR_LOCKS] __cacheline_aligned_in_smp;
static inline raw_spinlock_t *lock_addr(const atomic64_t *v)
{
unsigned long addr = (unsigned long) v;
addr >>= L1_CACHE_SHIFT;
addr ^= (addr >> 8) ^ (addr >> 16);
return &atomic64_lock[addr & (NR_LOCKS - 1)].lock;
}
long long atomic64_read(const atomic64_t *v)
{
unsigned long flags;
raw_spinlock_t *lock = lock_addr(v);
long long val;
raw_spin_lock_irqsave(lock, flags);
val = v->counter;
raw_spin_unlock_irqrestore(lock, flags);
return val;
}
EXPORT_SYMBOL(atomic64_read);
void atomic64_set(atomic64_t *v, long long i)
{
unsigned long flags;
raw_spinlock_t *lock = lock_addr(v);
raw_spin_lock_irqsave(lock, flags);
v->counter = i;
raw_spin_unlock_irqrestore(lock, flags);
}
EXPORT_SYMBOL(atomic64_set);
void atomic64_add(long long a, atomic64_t *v)
{
unsigned long flags;
raw_spinlock_t *lock = lock_addr(v);
raw_spin_lock_irqsave(lock, flags);
v->counter += a;
raw_spin_unlock_irqrestore(lock, flags);
}
EXPORT_SYMBOL(atomic64_add);
long long atomic64_add_return(long long a, atomic64_t *v)
{
unsigned long flags;
raw_spinlock_t *lock = lock_addr(v);
long long val;
raw_spin_lock_irqsave(lock, flags);
val = v->counter += a;
raw_spin_unlock_irqrestore(lock, flags);
return val;
}
EXPORT_SYMBOL(atomic64_add_return);
void atomic64_sub(long long a, atomic64_t *v)
{
unsigned long flags;
raw_spinlock_t *lock = lock_addr(v);
raw_spin_lock_irqsave(lock, flags);
v->counter -= a;
raw_spin_unlock_irqrestore(lock, flags);
}
EXPORT_SYMBOL(atomic64_sub);
long long atomic64_sub_return(long long a, atomic64_t *v)
{
unsigned long flags;
raw_spinlock_t *lock = lock_addr(v);
long long val;
raw_spin_lock_irqsave(lock, flags);
val = v->counter -= a;
raw_spin_unlock_irqrestore(lock, flags);
return val;
}
EXPORT_SYMBOL(atomic64_sub_return);
long long atomic64_dec_if_positive(atomic64_t *v)
{
unsigned long flags;
raw_spinlock_t *lock = lock_addr(v);
long long val;
raw_spin_lock_irqsave(lock, flags);
val = v->counter - 1;
if (val >= 0)
v->counter = val;
raw_spin_unlock_irqrestore(lock, flags);
return val;
}
EXPORT_SYMBOL(atomic64_dec_if_positive);
long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n)
{
unsigned long flags;
raw_spinlock_t *lock = lock_addr(v);
long long val;
raw_spin_lock_irqsave(lock, flags);
val = v->counter;
if (val == o)
v->counter = n;
raw_spin_unlock_irqrestore(lock, flags);
return val;
}
EXPORT_SYMBOL(atomic64_cmpxchg);
long long atomic64_xchg(atomic64_t *v, long long new)
{
unsigned long flags;
raw_spinlock_t *lock = lock_addr(v);
long long val;
raw_spin_lock_irqsave(lock, flags);
val = v->counter;
v->counter = new;
raw_spin_unlock_irqrestore(lock, flags);
return val;
}
EXPORT_SYMBOL(atomic64_xchg);
int atomic64_add_unless(atomic64_t *v, long long a, long long u)
{
unsigned long flags;
raw_spinlock_t *lock = lock_addr(v);
int ret = 0;
raw_spin_lock_irqsave(lock, flags);
if (v->counter != u) {
v->counter += a;
ret = 1;
}
raw_spin_unlock_irqrestore(lock, flags);
return ret;
}
EXPORT_SYMBOL(atomic64_add_unless);
static int init_atomic64_lock(void)
{
int i;
for (i = 0; i < NR_LOCKS; ++i)
raw_spin_lock_init(&atomic64_lock[i].lock);
return 0;
}
pure_initcall(init_atomic64_lock);
| gpl-2.0 |
tsfs/Vybrid-Linux | drivers/regulator/virtual.c | 3955 | 9080 | /*
* reg-virtual-consumer.c
*
* Copyright 2008 Wolfson Microelectronics PLC.
*
* Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 2 of the
* License, or (at your option) any later version.
*/
#include <linux/err.h>
#include <linux/mutex.h>
#include <linux/platform_device.h>
#include <linux/regulator/consumer.h>
#include <linux/slab.h>
struct virtual_consumer_data {
struct mutex lock;
struct regulator *regulator;
bool enabled;
int min_uV;
int max_uV;
int min_uA;
int max_uA;
unsigned int mode;
};
static void update_voltage_constraints(struct device *dev,
struct virtual_consumer_data *data)
{
int ret;
if (data->min_uV && data->max_uV
&& data->min_uV <= data->max_uV) {
dev_dbg(dev, "Requesting %d-%duV\n",
data->min_uV, data->max_uV);
ret = regulator_set_voltage(data->regulator,
data->min_uV, data->max_uV);
if (ret != 0) {
dev_err(dev,
"regulator_set_voltage() failed: %d\n", ret);
return;
}
}
if (data->min_uV && data->max_uV && !data->enabled) {
dev_dbg(dev, "Enabling regulator\n");
ret = regulator_enable(data->regulator);
if (ret == 0)
data->enabled = true;
else
dev_err(dev, "regulator_enable() failed: %d\n",
ret);
}
if (!(data->min_uV && data->max_uV) && data->enabled) {
dev_dbg(dev, "Disabling regulator\n");
ret = regulator_disable(data->regulator);
if (ret == 0)
data->enabled = false;
else
dev_err(dev, "regulator_disable() failed: %d\n",
ret);
}
}
static void update_current_limit_constraints(struct device *dev,
struct virtual_consumer_data *data)
{
int ret;
if (data->max_uA
&& data->min_uA <= data->max_uA) {
dev_dbg(dev, "Requesting %d-%duA\n",
data->min_uA, data->max_uA);
ret = regulator_set_current_limit(data->regulator,
data->min_uA, data->max_uA);
if (ret != 0) {
dev_err(dev,
"regulator_set_current_limit() failed: %d\n",
ret);
return;
}
}
if (data->max_uA && !data->enabled) {
dev_dbg(dev, "Enabling regulator\n");
ret = regulator_enable(data->regulator);
if (ret == 0)
data->enabled = true;
else
dev_err(dev, "regulator_enable() failed: %d\n",
ret);
}
if (!(data->min_uA && data->max_uA) && data->enabled) {
dev_dbg(dev, "Disabling regulator\n");
ret = regulator_disable(data->regulator);
if (ret == 0)
data->enabled = false;
else
dev_err(dev, "regulator_disable() failed: %d\n",
ret);
}
}
static ssize_t show_min_uV(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct virtual_consumer_data *data = dev_get_drvdata(dev);
return sprintf(buf, "%d\n", data->min_uV);
}
static ssize_t set_min_uV(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct virtual_consumer_data *data = dev_get_drvdata(dev);
long val;
if (strict_strtol(buf, 10, &val) != 0)
return count;
mutex_lock(&data->lock);
data->min_uV = val;
update_voltage_constraints(dev, data);
mutex_unlock(&data->lock);
return count;
}
static ssize_t show_max_uV(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct virtual_consumer_data *data = dev_get_drvdata(dev);
return sprintf(buf, "%d\n", data->max_uV);
}
static ssize_t set_max_uV(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct virtual_consumer_data *data = dev_get_drvdata(dev);
long val;
if (strict_strtol(buf, 10, &val) != 0)
return count;
mutex_lock(&data->lock);
data->max_uV = val;
update_voltage_constraints(dev, data);
mutex_unlock(&data->lock);
return count;
}
static ssize_t show_min_uA(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct virtual_consumer_data *data = dev_get_drvdata(dev);
return sprintf(buf, "%d\n", data->min_uA);
}
static ssize_t set_min_uA(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct virtual_consumer_data *data = dev_get_drvdata(dev);
long val;
if (strict_strtol(buf, 10, &val) != 0)
return count;
mutex_lock(&data->lock);
data->min_uA = val;
update_current_limit_constraints(dev, data);
mutex_unlock(&data->lock);
return count;
}
static ssize_t show_max_uA(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct virtual_consumer_data *data = dev_get_drvdata(dev);
return sprintf(buf, "%d\n", data->max_uA);
}
static ssize_t set_max_uA(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct virtual_consumer_data *data = dev_get_drvdata(dev);
long val;
if (strict_strtol(buf, 10, &val) != 0)
return count;
mutex_lock(&data->lock);
data->max_uA = val;
update_current_limit_constraints(dev, data);
mutex_unlock(&data->lock);
return count;
}
static ssize_t show_mode(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct virtual_consumer_data *data = dev_get_drvdata(dev);
switch (data->mode) {
case REGULATOR_MODE_FAST:
return sprintf(buf, "fast\n");
case REGULATOR_MODE_NORMAL:
return sprintf(buf, "normal\n");
case REGULATOR_MODE_IDLE:
return sprintf(buf, "idle\n");
case REGULATOR_MODE_STANDBY:
return sprintf(buf, "standby\n");
default:
return sprintf(buf, "unknown\n");
}
}
static ssize_t set_mode(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct virtual_consumer_data *data = dev_get_drvdata(dev);
unsigned int mode;
int ret;
/*
* sysfs_streq() doesn't need the \n's, but we add them so the strings
* will be shared with show_mode(), above.
*/
if (sysfs_streq(buf, "fast\n"))
mode = REGULATOR_MODE_FAST;
else if (sysfs_streq(buf, "normal\n"))
mode = REGULATOR_MODE_NORMAL;
else if (sysfs_streq(buf, "idle\n"))
mode = REGULATOR_MODE_IDLE;
else if (sysfs_streq(buf, "standby\n"))
mode = REGULATOR_MODE_STANDBY;
else {
dev_err(dev, "Configuring invalid mode\n");
return count;
}
mutex_lock(&data->lock);
ret = regulator_set_mode(data->regulator, mode);
if (ret == 0)
data->mode = mode;
else
dev_err(dev, "Failed to configure mode: %d\n", ret);
mutex_unlock(&data->lock);
return count;
}
static DEVICE_ATTR(min_microvolts, 0666, show_min_uV, set_min_uV);
static DEVICE_ATTR(max_microvolts, 0666, show_max_uV, set_max_uV);
static DEVICE_ATTR(min_microamps, 0666, show_min_uA, set_min_uA);
static DEVICE_ATTR(max_microamps, 0666, show_max_uA, set_max_uA);
static DEVICE_ATTR(mode, 0666, show_mode, set_mode);
static struct attribute *regulator_virtual_attributes[] = {
&dev_attr_min_microvolts.attr,
&dev_attr_max_microvolts.attr,
&dev_attr_min_microamps.attr,
&dev_attr_max_microamps.attr,
&dev_attr_mode.attr,
NULL
};
static const struct attribute_group regulator_virtual_attr_group = {
.attrs = regulator_virtual_attributes,
};
static int __devinit regulator_virtual_probe(struct platform_device *pdev)
{
char *reg_id = pdev->dev.platform_data;
struct virtual_consumer_data *drvdata;
int ret;
drvdata = kzalloc(sizeof(struct virtual_consumer_data), GFP_KERNEL);
if (drvdata == NULL)
return -ENOMEM;
mutex_init(&drvdata->lock);
drvdata->regulator = regulator_get(&pdev->dev, reg_id);
if (IS_ERR(drvdata->regulator)) {
ret = PTR_ERR(drvdata->regulator);
dev_err(&pdev->dev, "Failed to obtain supply '%s': %d\n",
reg_id, ret);
goto err;
}
ret = sysfs_create_group(&pdev->dev.kobj,
®ulator_virtual_attr_group);
if (ret != 0) {
dev_err(&pdev->dev,
"Failed to create attribute group: %d\n", ret);
goto err_regulator;
}
drvdata->mode = regulator_get_mode(drvdata->regulator);
platform_set_drvdata(pdev, drvdata);
return 0;
err_regulator:
regulator_put(drvdata->regulator);
err:
kfree(drvdata);
return ret;
}
static int __devexit regulator_virtual_remove(struct platform_device *pdev)
{
struct virtual_consumer_data *drvdata = platform_get_drvdata(pdev);
sysfs_remove_group(&pdev->dev.kobj, ®ulator_virtual_attr_group);
if (drvdata->enabled)
regulator_disable(drvdata->regulator);
regulator_put(drvdata->regulator);
kfree(drvdata);
platform_set_drvdata(pdev, NULL);
return 0;
}
static struct platform_driver regulator_virtual_consumer_driver = {
.probe = regulator_virtual_probe,
.remove = __devexit_p(regulator_virtual_remove),
.driver = {
.name = "reg-virt-consumer",
.owner = THIS_MODULE,
},
};
static int __init regulator_virtual_consumer_init(void)
{
return platform_driver_register(®ulator_virtual_consumer_driver);
}
module_init(regulator_virtual_consumer_init);
static void __exit regulator_virtual_consumer_exit(void)
{
platform_driver_unregister(®ulator_virtual_consumer_driver);
}
module_exit(regulator_virtual_consumer_exit);
MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com>");
MODULE_DESCRIPTION("Virtual regulator consumer");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:reg-virt-consumer");
| gpl-2.0 |
fards/DellStreak5-GingerBread | drivers/eisa/pci_eisa.c | 4211 | 1702 | /*
* Minimalist driver for a generic PCI-to-EISA bridge.
*
* (C) 2003 Marc Zyngier <maz@wild-wind.fr.eu.org>
*
* This code is released under the GPL version 2.
*
* Ivan Kokshaysky <ink@jurassic.park.msu.ru> :
* Generalisation from i82375 to PCI_CLASS_BRIDGE_EISA.
*/
#include <linux/kernel.h>
#include <linux/device.h>
#include <linux/eisa.h>
#include <linux/pci.h>
#include <linux/module.h>
#include <linux/init.h>
/* There is only *one* pci_eisa device per machine, right ? */
static struct eisa_root_device pci_eisa_root;
static int __init pci_eisa_init(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
int rc;
if ((rc = pci_enable_device (pdev))) {
printk (KERN_ERR "pci_eisa : Could not enable device %s\n",
pci_name(pdev));
return rc;
}
pci_eisa_root.dev = &pdev->dev;
pci_eisa_root.res = pdev->bus->resource[0];
pci_eisa_root.bus_base_addr = pdev->bus->resource[0]->start;
pci_eisa_root.slots = EISA_MAX_SLOTS;
pci_eisa_root.dma_mask = pdev->dma_mask;
dev_set_drvdata(pci_eisa_root.dev, &pci_eisa_root);
if (eisa_root_register (&pci_eisa_root)) {
printk (KERN_ERR "pci_eisa : Could not register EISA root\n");
return -1;
}
return 0;
}
static struct pci_device_id pci_eisa_pci_tbl[] = {
{ PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
PCI_CLASS_BRIDGE_EISA << 8, 0xffff00, 0 },
{ 0, }
};
static struct pci_driver pci_eisa_driver = {
.name = "pci_eisa",
.id_table = pci_eisa_pci_tbl,
.probe = pci_eisa_init,
};
static int __init pci_eisa_init_module (void)
{
return pci_register_driver (&pci_eisa_driver);
}
device_initcall(pci_eisa_init_module);
MODULE_DEVICE_TABLE(pci, pci_eisa_pci_tbl);
| gpl-2.0 |
schqiushui/kernel_sense_kk_m8ace | arch/arm/mach-ixp2000/ixdp2x01.c | 4723 | 12747 | /*
* arch/arm/mach-ixp2000/ixdp2x01.c
*
* Code common to Intel IXDP2401 and IXDP2801 platforms
*
* Original Author: Andrzej Mialkowski <andrzej.mialkowski@intel.com>
* Maintainer: Deepak Saxena <dsaxena@plexity.net>
*
* Copyright (C) 2002-2003 Intel Corp.
* Copyright (C) 2003-2004 MontaVista Software, Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/mm.h>
#include <linux/sched.h>
#include <linux/interrupt.h>
#include <linux/bitops.h>
#include <linux/pci.h>
#include <linux/ioport.h>
#include <linux/delay.h>
#include <linux/serial.h>
#include <linux/tty.h>
#include <linux/serial_core.h>
#include <linux/platform_device.h>
#include <linux/serial_8250.h>
#include <linux/io.h>
#include <asm/irq.h>
#include <asm/pgtable.h>
#include <asm/page.h>
#include <mach/hardware.h>
#include <asm/mach-types.h>
#include <asm/mach/pci.h>
#include <asm/mach/map.h>
#include <asm/mach/irq.h>
#include <asm/mach/time.h>
#include <asm/mach/arch.h>
#include <asm/mach/flash.h>
/*************************************************************************
* IXDP2x01 IRQ Handling
*************************************************************************/
static void ixdp2x01_irq_mask(struct irq_data *d)
{
ixp2000_reg_wrb(IXDP2X01_INT_MASK_SET_REG,
IXP2000_BOARD_IRQ_MASK(d->irq));
}
static void ixdp2x01_irq_unmask(struct irq_data *d)
{
ixp2000_reg_write(IXDP2X01_INT_MASK_CLR_REG,
IXP2000_BOARD_IRQ_MASK(d->irq));
}
static u32 valid_irq_mask;
static void ixdp2x01_irq_handler(unsigned int irq, struct irq_desc *desc)
{
u32 ex_interrupt;
int i;
desc->irq_data.chip->irq_mask(&desc->irq_data);
ex_interrupt = *IXDP2X01_INT_STAT_REG & valid_irq_mask;
if (!ex_interrupt) {
printk(KERN_ERR "Spurious IXDP2X01 CPLD interrupt!\n");
return;
}
for (i = 0; i < IXP2000_BOARD_IRQS; i++) {
if (ex_interrupt & (1 << i)) {
int cpld_irq = IXP2000_BOARD_IRQ(0) + i;
generic_handle_irq(cpld_irq);
}
}
desc->irq_data.chip->irq_unmask(&desc->irq_data);
}
static struct irq_chip ixdp2x01_irq_chip = {
.irq_mask = ixdp2x01_irq_mask,
.irq_ack = ixdp2x01_irq_mask,
.irq_unmask = ixdp2x01_irq_unmask
};
/*
* We only do anything if we are the master NPU on the board.
* The slave NPU only has the ethernet chip going directly to
* the PCIB interrupt input.
*/
void __init ixdp2x01_init_irq(void)
{
int irq = 0;
/* initialize chip specific interrupts */
ixp2000_init_irq();
if (machine_is_ixdp2401())
valid_irq_mask = IXDP2401_VALID_IRQ_MASK;
else
valid_irq_mask = IXDP2801_VALID_IRQ_MASK;
/* Mask all interrupts from CPLD, disable simulation */
ixp2000_reg_write(IXDP2X01_INT_MASK_SET_REG, 0xffffffff);
ixp2000_reg_wrb(IXDP2X01_INT_SIM_REG, 0);
for (irq = NR_IXP2000_IRQS; irq < NR_IXDP2X01_IRQS; irq++) {
if (irq & valid_irq_mask) {
irq_set_chip_and_handler(irq, &ixdp2x01_irq_chip,
handle_level_irq);
set_irq_flags(irq, IRQF_VALID);
} else {
set_irq_flags(irq, 0);
}
}
/* Hook into PCI interrupts */
irq_set_chained_handler(IRQ_IXP2000_PCIB, ixdp2x01_irq_handler);
}
/*************************************************************************
* IXDP2x01 memory map
*************************************************************************/
static struct map_desc ixdp2x01_io_desc __initdata = {
.virtual = IXDP2X01_VIRT_CPLD_BASE,
.pfn = __phys_to_pfn(IXDP2X01_PHYS_CPLD_BASE),
.length = IXDP2X01_CPLD_REGION_SIZE,
.type = MT_DEVICE
};
static void __init ixdp2x01_map_io(void)
{
ixp2000_map_io();
iotable_init(&ixdp2x01_io_desc, 1);
}
/*************************************************************************
* IXDP2x01 serial ports
*************************************************************************/
static struct plat_serial8250_port ixdp2x01_serial_port1[] = {
{
.mapbase = (unsigned long)IXDP2X01_UART1_PHYS_BASE,
.membase = (char *)IXDP2X01_UART1_VIRT_BASE,
.irq = IRQ_IXDP2X01_UART1,
.flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST,
.iotype = UPIO_MEM32,
.regshift = 2,
.uartclk = IXDP2X01_UART_CLK,
},
{ }
};
static struct resource ixdp2x01_uart_resource1 = {
.start = IXDP2X01_UART1_PHYS_BASE,
.end = IXDP2X01_UART1_PHYS_BASE + 0xffff,
.flags = IORESOURCE_MEM,
};
static struct platform_device ixdp2x01_serial_device1 = {
.name = "serial8250",
.id = PLAT8250_DEV_PLATFORM1,
.dev = {
.platform_data = ixdp2x01_serial_port1,
},
.num_resources = 1,
.resource = &ixdp2x01_uart_resource1,
};
static struct plat_serial8250_port ixdp2x01_serial_port2[] = {
{
.mapbase = (unsigned long)IXDP2X01_UART2_PHYS_BASE,
.membase = (char *)IXDP2X01_UART2_VIRT_BASE,
.irq = IRQ_IXDP2X01_UART2,
.flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST,
.iotype = UPIO_MEM32,
.regshift = 2,
.uartclk = IXDP2X01_UART_CLK,
},
{ }
};
static struct resource ixdp2x01_uart_resource2 = {
.start = IXDP2X01_UART2_PHYS_BASE,
.end = IXDP2X01_UART2_PHYS_BASE + 0xffff,
.flags = IORESOURCE_MEM,
};
static struct platform_device ixdp2x01_serial_device2 = {
.name = "serial8250",
.id = PLAT8250_DEV_PLATFORM2,
.dev = {
.platform_data = ixdp2x01_serial_port2,
},
.num_resources = 1,
.resource = &ixdp2x01_uart_resource2,
};
static void ixdp2x01_uart_init(void)
{
platform_device_register(&ixdp2x01_serial_device1);
platform_device_register(&ixdp2x01_serial_device2);
}
/*************************************************************************
* IXDP2x01 timer tick configuration
*************************************************************************/
static unsigned int ixdp2x01_clock;
static int __init ixdp2x01_clock_setup(char *str)
{
ixdp2x01_clock = simple_strtoul(str, NULL, 10);
return 1;
}
__setup("ixdp2x01_clock=", ixdp2x01_clock_setup);
static void __init ixdp2x01_timer_init(void)
{
if (!ixdp2x01_clock)
ixdp2x01_clock = 50000000;
ixp2000_init_time(ixdp2x01_clock);
}
static struct sys_timer ixdp2x01_timer = {
.init = ixdp2x01_timer_init,
.offset = ixp2000_gettimeoffset,
};
/*************************************************************************
* IXDP2x01 PCI
*************************************************************************/
void __init ixdp2x01_pci_preinit(void)
{
ixp2000_reg_write(IXP2000_PCI_ADDR_EXT, 0x00000000);
ixp2000_pci_preinit();
pcibios_setup("firmware");
}
#define DEVPIN(dev, pin) ((pin) | ((dev) << 3))
static int __init ixdp2x01_pci_map_irq(const struct pci_dev *dev, u8 slot,
u8 pin)
{
u8 bus = dev->bus->number;
u32 devpin = DEVPIN(PCI_SLOT(dev->devfn), pin);
struct pci_bus *tmp_bus = dev->bus;
/* Primary bus, no interrupts here */
if (bus == 0) {
return -1;
}
/* Lookup first leaf in bus tree */
while ((tmp_bus->parent != NULL) && (tmp_bus->parent->parent != NULL)) {
tmp_bus = tmp_bus->parent;
}
/* Select between known bridges */
switch (tmp_bus->self->devfn | (tmp_bus->self->bus->number << 8)) {
/* Device is located after first MB bridge */
case 0x0008:
if (tmp_bus == dev->bus) {
/* Device is located directly after first MB bridge */
switch (devpin) {
case DEVPIN(1, 1): /* Onboard 82546 ch 0 */
if (machine_is_ixdp2401())
return IRQ_IXDP2401_INTA_82546;
return -1;
case DEVPIN(1, 2): /* Onboard 82546 ch 1 */
if (machine_is_ixdp2401())
return IRQ_IXDP2401_INTB_82546;
return -1;
case DEVPIN(0, 1): /* PMC INTA# */
return IRQ_IXDP2X01_SPCI_PMC_INTA;
case DEVPIN(0, 2): /* PMC INTB# */
return IRQ_IXDP2X01_SPCI_PMC_INTB;
case DEVPIN(0, 3): /* PMC INTC# */
return IRQ_IXDP2X01_SPCI_PMC_INTC;
case DEVPIN(0, 4): /* PMC INTD# */
return IRQ_IXDP2X01_SPCI_PMC_INTD;
}
}
break;
case 0x0010:
if (tmp_bus == dev->bus) {
/* Device is located directly after second MB bridge */
/* Secondary bus of second bridge */
switch (devpin) {
case DEVPIN(0, 1): /* DB#0 */
return IRQ_IXDP2X01_SPCI_DB_0;
case DEVPIN(1, 1): /* DB#1 */
return IRQ_IXDP2X01_SPCI_DB_1;
}
} else {
/* Device is located indirectly after second MB bridge */
/* Not supported now */
}
break;
}
return -1;
}
static int ixdp2x01_pci_setup(int nr, struct pci_sys_data *sys)
{
sys->mem_offset = 0xe0000000;
if (machine_is_ixdp2801() || machine_is_ixdp28x5())
sys->mem_offset -= ((*IXP2000_PCI_ADDR_EXT & 0xE000) << 16);
return ixp2000_pci_setup(nr, sys);
}
struct hw_pci ixdp2x01_pci __initdata = {
.nr_controllers = 1,
.setup = ixdp2x01_pci_setup,
.preinit = ixdp2x01_pci_preinit,
.scan = ixp2000_pci_scan_bus,
.map_irq = ixdp2x01_pci_map_irq,
};
int __init ixdp2x01_pci_init(void)
{
if (machine_is_ixdp2401() || machine_is_ixdp2801() ||\
machine_is_ixdp28x5())
pci_common_init(&ixdp2x01_pci);
return 0;
}
subsys_initcall(ixdp2x01_pci_init);
/*************************************************************************
* IXDP2x01 Machine Initialization
*************************************************************************/
static struct flash_platform_data ixdp2x01_flash_platform_data = {
.map_name = "cfi_probe",
.width = 1,
};
static unsigned long ixdp2x01_flash_bank_setup(unsigned long ofs)
{
ixp2000_reg_wrb(IXDP2X01_CPLD_FLASH_REG,
((ofs >> IXDP2X01_FLASH_WINDOW_BITS) | IXDP2X01_CPLD_FLASH_INTERN));
return (ofs & IXDP2X01_FLASH_WINDOW_MASK);
}
static struct ixp2000_flash_data ixdp2x01_flash_data = {
.platform_data = &ixdp2x01_flash_platform_data,
.bank_setup = ixdp2x01_flash_bank_setup
};
static struct resource ixdp2x01_flash_resource = {
.start = 0xc4000000,
.end = 0xc4000000 + 0x01ffffff,
.flags = IORESOURCE_MEM,
};
static struct platform_device ixdp2x01_flash = {
.name = "IXP2000-Flash",
.id = 0,
.dev = {
.platform_data = &ixdp2x01_flash_data,
},
.num_resources = 1,
.resource = &ixdp2x01_flash_resource,
};
static struct ixp2000_i2c_pins ixdp2x01_i2c_gpio_pins = {
.sda_pin = IXDP2X01_GPIO_SDA,
.scl_pin = IXDP2X01_GPIO_SCL,
};
static struct platform_device ixdp2x01_i2c_controller = {
.name = "IXP2000-I2C",
.id = 0,
.dev = {
.platform_data = &ixdp2x01_i2c_gpio_pins,
},
.num_resources = 0
};
static struct platform_device *ixdp2x01_devices[] __initdata = {
&ixdp2x01_flash,
&ixdp2x01_i2c_controller
};
static void __init ixdp2x01_init_machine(void)
{
ixp2000_reg_wrb(IXDP2X01_CPLD_FLASH_REG,
(IXDP2X01_CPLD_FLASH_BANK_MASK | IXDP2X01_CPLD_FLASH_INTERN));
ixdp2x01_flash_data.nr_banks =
((*IXDP2X01_CPLD_FLASH_REG & IXDP2X01_CPLD_FLASH_BANK_MASK) + 1);
platform_add_devices(ixdp2x01_devices, ARRAY_SIZE(ixdp2x01_devices));
ixp2000_uart_init();
ixdp2x01_uart_init();
}
static void ixdp2401_restart(char mode, const char *cmd)
{
/*
* Reset flash banking register so that we are pointing at
* RedBoot bank.
*/
ixp2000_reg_write(IXDP2X01_CPLD_FLASH_REG,
((0 >> IXDP2X01_FLASH_WINDOW_BITS)
| IXDP2X01_CPLD_FLASH_INTERN));
ixp2000_reg_wrb(IXDP2X01_CPLD_RESET_REG, 0xffffffff);
ixp2000_restart(mode, cmd);
}
static void ixdp280x_restart(char mode, const char *cmd)
{
/*
* On IXDP2801 we need to write this magic sequence to the CPLD
* to cause a complete reset of the CPU and all external devices
* and move the flash bank register back to 0.
*/
unsigned long reset_reg = *IXDP2X01_CPLD_RESET_REG;
reset_reg = 0x55AA0000 | (reset_reg & 0x0000FFFF);
ixp2000_reg_write(IXDP2X01_CPLD_RESET_REG, reset_reg);
ixp2000_reg_wrb(IXDP2X01_CPLD_RESET_REG, 0x80000000);
ixp2000_restart(mode, cmd);
}
#ifdef CONFIG_ARCH_IXDP2401
MACHINE_START(IXDP2401, "Intel IXDP2401 Development Platform")
/* Maintainer: MontaVista Software, Inc. */
.atag_offset = 0x100,
.map_io = ixdp2x01_map_io,
.init_irq = ixdp2x01_init_irq,
.timer = &ixdp2x01_timer,
.init_machine = ixdp2x01_init_machine,
.restart = ixdp2401_restart,
MACHINE_END
#endif
#ifdef CONFIG_ARCH_IXDP2801
MACHINE_START(IXDP2801, "Intel IXDP2801 Development Platform")
/* Maintainer: MontaVista Software, Inc. */
.atag_offset = 0x100,
.map_io = ixdp2x01_map_io,
.init_irq = ixdp2x01_init_irq,
.timer = &ixdp2x01_timer,
.init_machine = ixdp2x01_init_machine,
.restart = ixdp280x_restart,
MACHINE_END
/*
* IXDP28x5 is basically an IXDP2801 with a different CPU but Intel
* changed the machine ID in the bootloader
*/
MACHINE_START(IXDP28X5, "Intel IXDP2805/2855 Development Platform")
/* Maintainer: MontaVista Software, Inc. */
.atag_offset = 0x100,
.map_io = ixdp2x01_map_io,
.init_irq = ixdp2x01_init_irq,
.timer = &ixdp2x01_timer,
.init_machine = ixdp2x01_init_machine,
.restart = ixdp280x_restart,
MACHINE_END
#endif
| gpl-2.0 |
SerenityS/android_kernel_pantech_ef39s_jb | drivers/watchdog/at91rm9200_wdt.c | 4979 | 6677 | /*
* Watchdog driver for Atmel AT91RM9200 (Thunder)
*
* Copyright (C) 2003 SAN People (Pty) Ltd
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/bitops.h>
#include <linux/errno.h>
#include <linux/fs.h>
#include <linux/init.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/miscdevice.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/platform_device.h>
#include <linux/types.h>
#include <linux/watchdog.h>
#include <linux/uaccess.h>
#include <mach/at91_st.h>
#define WDT_DEFAULT_TIME 5 /* seconds */
#define WDT_MAX_TIME 256 /* seconds */
static int wdt_time = WDT_DEFAULT_TIME;
static bool nowayout = WATCHDOG_NOWAYOUT;
module_param(wdt_time, int, 0);
MODULE_PARM_DESC(wdt_time, "Watchdog time in seconds. (default="
__MODULE_STRING(WDT_DEFAULT_TIME) ")");
#ifdef CONFIG_WATCHDOG_NOWAYOUT
module_param(nowayout, bool, 0);
MODULE_PARM_DESC(nowayout,
"Watchdog cannot be stopped once started (default="
__MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
#endif
static unsigned long at91wdt_busy;
/* ......................................................................... */
/*
* Disable the watchdog.
*/
static inline void at91_wdt_stop(void)
{
at91_st_write(AT91_ST_WDMR, AT91_ST_EXTEN);
}
/*
* Enable and reset the watchdog.
*/
static inline void at91_wdt_start(void)
{
at91_st_write(AT91_ST_WDMR, AT91_ST_EXTEN | AT91_ST_RSTEN |
(((65536 * wdt_time) >> 8) & AT91_ST_WDV));
at91_st_write(AT91_ST_CR, AT91_ST_WDRST);
}
/*
* Reload the watchdog timer. (ie, pat the watchdog)
*/
static inline void at91_wdt_reload(void)
{
at91_st_write(AT91_ST_CR, AT91_ST_WDRST);
}
/* ......................................................................... */
/*
* Watchdog device is opened, and watchdog starts running.
*/
static int at91_wdt_open(struct inode *inode, struct file *file)
{
if (test_and_set_bit(0, &at91wdt_busy))
return -EBUSY;
at91_wdt_start();
return nonseekable_open(inode, file);
}
/*
* Close the watchdog device.
* If CONFIG_WATCHDOG_NOWAYOUT is NOT defined then the watchdog is also
* disabled.
*/
static int at91_wdt_close(struct inode *inode, struct file *file)
{
/* Disable the watchdog when file is closed */
if (!nowayout)
at91_wdt_stop();
clear_bit(0, &at91wdt_busy);
return 0;
}
/*
* Change the watchdog time interval.
*/
static int at91_wdt_settimeout(int new_time)
{
/*
* All counting occurs at SLOW_CLOCK / 128 = 256 Hz
*
* Since WDV is a 16-bit counter, the maximum period is
* 65536 / 256 = 256 seconds.
*/
if ((new_time <= 0) || (new_time > WDT_MAX_TIME))
return -EINVAL;
/* Set new watchdog time. It will be used when
at91_wdt_start() is called. */
wdt_time = new_time;
return 0;
}
static const struct watchdog_info at91_wdt_info = {
.identity = "at91 watchdog",
.options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING,
};
/*
* Handle commands from user-space.
*/
static long at91_wdt_ioctl(struct file *file,
unsigned int cmd, unsigned long arg)
{
void __user *argp = (void __user *)arg;
int __user *p = argp;
int new_value;
switch (cmd) {
case WDIOC_GETSUPPORT:
return copy_to_user(argp, &at91_wdt_info,
sizeof(at91_wdt_info)) ? -EFAULT : 0;
case WDIOC_GETSTATUS:
case WDIOC_GETBOOTSTATUS:
return put_user(0, p);
case WDIOC_SETOPTIONS:
if (get_user(new_value, p))
return -EFAULT;
if (new_value & WDIOS_DISABLECARD)
at91_wdt_stop();
if (new_value & WDIOS_ENABLECARD)
at91_wdt_start();
return 0;
case WDIOC_KEEPALIVE:
at91_wdt_reload(); /* pat the watchdog */
return 0;
case WDIOC_SETTIMEOUT:
if (get_user(new_value, p))
return -EFAULT;
if (at91_wdt_settimeout(new_value))
return -EINVAL;
/* Enable new time value */
at91_wdt_start();
/* Return current value */
return put_user(wdt_time, p);
case WDIOC_GETTIMEOUT:
return put_user(wdt_time, p);
default:
return -ENOTTY;
}
}
/*
* Pat the watchdog whenever device is written to.
*/
static ssize_t at91_wdt_write(struct file *file, const char *data,
size_t len, loff_t *ppos)
{
at91_wdt_reload(); /* pat the watchdog */
return len;
}
/* ......................................................................... */
static const struct file_operations at91wdt_fops = {
.owner = THIS_MODULE,
.llseek = no_llseek,
.unlocked_ioctl = at91_wdt_ioctl,
.open = at91_wdt_open,
.release = at91_wdt_close,
.write = at91_wdt_write,
};
static struct miscdevice at91wdt_miscdev = {
.minor = WATCHDOG_MINOR,
.name = "watchdog",
.fops = &at91wdt_fops,
};
static int __devinit at91wdt_probe(struct platform_device *pdev)
{
int res;
if (at91wdt_miscdev.parent)
return -EBUSY;
at91wdt_miscdev.parent = &pdev->dev;
res = misc_register(&at91wdt_miscdev);
if (res)
return res;
pr_info("AT91 Watchdog Timer enabled (%d seconds%s)\n",
wdt_time, nowayout ? ", nowayout" : "");
return 0;
}
static int __devexit at91wdt_remove(struct platform_device *pdev)
{
int res;
res = misc_deregister(&at91wdt_miscdev);
if (!res)
at91wdt_miscdev.parent = NULL;
return res;
}
static void at91wdt_shutdown(struct platform_device *pdev)
{
at91_wdt_stop();
}
#ifdef CONFIG_PM
static int at91wdt_suspend(struct platform_device *pdev, pm_message_t message)
{
at91_wdt_stop();
return 0;
}
static int at91wdt_resume(struct platform_device *pdev)
{
if (at91wdt_busy)
at91_wdt_start();
return 0;
}
#else
#define at91wdt_suspend NULL
#define at91wdt_resume NULL
#endif
static struct platform_driver at91wdt_driver = {
.probe = at91wdt_probe,
.remove = __devexit_p(at91wdt_remove),
.shutdown = at91wdt_shutdown,
.suspend = at91wdt_suspend,
.resume = at91wdt_resume,
.driver = {
.name = "at91_wdt",
.owner = THIS_MODULE,
},
};
static int __init at91_wdt_init(void)
{
/* Check that the heartbeat value is within range;
if not reset to the default */
if (at91_wdt_settimeout(wdt_time)) {
at91_wdt_settimeout(WDT_DEFAULT_TIME);
pr_info("wdt_time value must be 1 <= wdt_time <= 256, using %d\n",
wdt_time);
}
return platform_driver_register(&at91wdt_driver);
}
static void __exit at91_wdt_exit(void)
{
platform_driver_unregister(&at91wdt_driver);
}
module_init(at91_wdt_init);
module_exit(at91_wdt_exit);
MODULE_AUTHOR("Andrew Victor");
MODULE_DESCRIPTION("Watchdog driver for Atmel AT91RM9200");
MODULE_LICENSE("GPL");
MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
MODULE_ALIAS("platform:at91_wdt");
| gpl-2.0 |
skravik/syk-linux-msm-rhod | drivers/mtd/onenand/samsung.c | 4979 | 28994 | /*
* Samsung S3C64XX/S5PC1XX OneNAND driver
*
* Copyright © 2008-2010 Samsung Electronics
* Kyungmin Park <kyungmin.park@samsung.com>
* Marek Szyprowski <m.szyprowski@samsung.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* Implementation:
* S3C64XX and S5PC100: emulate the pseudo BufferRAM
* S5PC110: use DMA
*/
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/onenand.h>
#include <linux/mtd/partitions.h>
#include <linux/dma-mapping.h>
#include <linux/interrupt.h>
#include <asm/mach/flash.h>
#include <plat/regs-onenand.h>
#include <linux/io.h>
enum soc_type {
TYPE_S3C6400,
TYPE_S3C6410,
TYPE_S5PC100,
TYPE_S5PC110,
};
#define ONENAND_ERASE_STATUS 0x00
#define ONENAND_MULTI_ERASE_SET 0x01
#define ONENAND_ERASE_START 0x03
#define ONENAND_UNLOCK_START 0x08
#define ONENAND_UNLOCK_END 0x09
#define ONENAND_LOCK_START 0x0A
#define ONENAND_LOCK_END 0x0B
#define ONENAND_LOCK_TIGHT_START 0x0C
#define ONENAND_LOCK_TIGHT_END 0x0D
#define ONENAND_UNLOCK_ALL 0x0E
#define ONENAND_OTP_ACCESS 0x12
#define ONENAND_SPARE_ACCESS_ONLY 0x13
#define ONENAND_MAIN_ACCESS_ONLY 0x14
#define ONENAND_ERASE_VERIFY 0x15
#define ONENAND_MAIN_SPARE_ACCESS 0x16
#define ONENAND_PIPELINE_READ 0x4000
#define MAP_00 (0x0)
#define MAP_01 (0x1)
#define MAP_10 (0x2)
#define MAP_11 (0x3)
#define S3C64XX_CMD_MAP_SHIFT 24
#define S5PC100_CMD_MAP_SHIFT 26
#define S3C6400_FBA_SHIFT 10
#define S3C6400_FPA_SHIFT 4
#define S3C6400_FSA_SHIFT 2
#define S3C6410_FBA_SHIFT 12
#define S3C6410_FPA_SHIFT 6
#define S3C6410_FSA_SHIFT 4
#define S5PC100_FBA_SHIFT 13
#define S5PC100_FPA_SHIFT 7
#define S5PC100_FSA_SHIFT 5
/* S5PC110 specific definitions */
#define S5PC110_DMA_SRC_ADDR 0x400
#define S5PC110_DMA_SRC_CFG 0x404
#define S5PC110_DMA_DST_ADDR 0x408
#define S5PC110_DMA_DST_CFG 0x40C
#define S5PC110_DMA_TRANS_SIZE 0x414
#define S5PC110_DMA_TRANS_CMD 0x418
#define S5PC110_DMA_TRANS_STATUS 0x41C
#define S5PC110_DMA_TRANS_DIR 0x420
#define S5PC110_INTC_DMA_CLR 0x1004
#define S5PC110_INTC_ONENAND_CLR 0x1008
#define S5PC110_INTC_DMA_MASK 0x1024
#define S5PC110_INTC_ONENAND_MASK 0x1028
#define S5PC110_INTC_DMA_PEND 0x1044
#define S5PC110_INTC_ONENAND_PEND 0x1048
#define S5PC110_INTC_DMA_STATUS 0x1064
#define S5PC110_INTC_ONENAND_STATUS 0x1068
#define S5PC110_INTC_DMA_TD (1 << 24)
#define S5PC110_INTC_DMA_TE (1 << 16)
#define S5PC110_DMA_CFG_SINGLE (0x0 << 16)
#define S5PC110_DMA_CFG_4BURST (0x2 << 16)
#define S5PC110_DMA_CFG_8BURST (0x3 << 16)
#define S5PC110_DMA_CFG_16BURST (0x4 << 16)
#define S5PC110_DMA_CFG_INC (0x0 << 8)
#define S5PC110_DMA_CFG_CNT (0x1 << 8)
#define S5PC110_DMA_CFG_8BIT (0x0 << 0)
#define S5PC110_DMA_CFG_16BIT (0x1 << 0)
#define S5PC110_DMA_CFG_32BIT (0x2 << 0)
#define S5PC110_DMA_SRC_CFG_READ (S5PC110_DMA_CFG_16BURST | \
S5PC110_DMA_CFG_INC | \
S5PC110_DMA_CFG_16BIT)
#define S5PC110_DMA_DST_CFG_READ (S5PC110_DMA_CFG_16BURST | \
S5PC110_DMA_CFG_INC | \
S5PC110_DMA_CFG_32BIT)
#define S5PC110_DMA_SRC_CFG_WRITE (S5PC110_DMA_CFG_16BURST | \
S5PC110_DMA_CFG_INC | \
S5PC110_DMA_CFG_32BIT)
#define S5PC110_DMA_DST_CFG_WRITE (S5PC110_DMA_CFG_16BURST | \
S5PC110_DMA_CFG_INC | \
S5PC110_DMA_CFG_16BIT)
#define S5PC110_DMA_TRANS_CMD_TDC (0x1 << 18)
#define S5PC110_DMA_TRANS_CMD_TEC (0x1 << 16)
#define S5PC110_DMA_TRANS_CMD_TR (0x1 << 0)
#define S5PC110_DMA_TRANS_STATUS_TD (0x1 << 18)
#define S5PC110_DMA_TRANS_STATUS_TB (0x1 << 17)
#define S5PC110_DMA_TRANS_STATUS_TE (0x1 << 16)
#define S5PC110_DMA_DIR_READ 0x0
#define S5PC110_DMA_DIR_WRITE 0x1
struct s3c_onenand {
struct mtd_info *mtd;
struct platform_device *pdev;
enum soc_type type;
void __iomem *base;
struct resource *base_res;
void __iomem *ahb_addr;
struct resource *ahb_res;
int bootram_command;
void __iomem *page_buf;
void __iomem *oob_buf;
unsigned int (*mem_addr)(int fba, int fpa, int fsa);
unsigned int (*cmd_map)(unsigned int type, unsigned int val);
void __iomem *dma_addr;
struct resource *dma_res;
unsigned long phys_base;
struct completion complete;
};
#define CMD_MAP_00(dev, addr) (dev->cmd_map(MAP_00, ((addr) << 1)))
#define CMD_MAP_01(dev, mem_addr) (dev->cmd_map(MAP_01, (mem_addr)))
#define CMD_MAP_10(dev, mem_addr) (dev->cmd_map(MAP_10, (mem_addr)))
#define CMD_MAP_11(dev, addr) (dev->cmd_map(MAP_11, ((addr) << 2)))
static struct s3c_onenand *onenand;
static inline int s3c_read_reg(int offset)
{
return readl(onenand->base + offset);
}
static inline void s3c_write_reg(int value, int offset)
{
writel(value, onenand->base + offset);
}
static inline int s3c_read_cmd(unsigned int cmd)
{
return readl(onenand->ahb_addr + cmd);
}
static inline void s3c_write_cmd(int value, unsigned int cmd)
{
writel(value, onenand->ahb_addr + cmd);
}
#ifdef SAMSUNG_DEBUG
static void s3c_dump_reg(void)
{
int i;
for (i = 0; i < 0x400; i += 0x40) {
printk(KERN_INFO "0x%08X: 0x%08x 0x%08x 0x%08x 0x%08x\n",
(unsigned int) onenand->base + i,
s3c_read_reg(i), s3c_read_reg(i + 0x10),
s3c_read_reg(i + 0x20), s3c_read_reg(i + 0x30));
}
}
#endif
static unsigned int s3c64xx_cmd_map(unsigned type, unsigned val)
{
return (type << S3C64XX_CMD_MAP_SHIFT) | val;
}
static unsigned int s5pc1xx_cmd_map(unsigned type, unsigned val)
{
return (type << S5PC100_CMD_MAP_SHIFT) | val;
}
static unsigned int s3c6400_mem_addr(int fba, int fpa, int fsa)
{
return (fba << S3C6400_FBA_SHIFT) | (fpa << S3C6400_FPA_SHIFT) |
(fsa << S3C6400_FSA_SHIFT);
}
static unsigned int s3c6410_mem_addr(int fba, int fpa, int fsa)
{
return (fba << S3C6410_FBA_SHIFT) | (fpa << S3C6410_FPA_SHIFT) |
(fsa << S3C6410_FSA_SHIFT);
}
static unsigned int s5pc100_mem_addr(int fba, int fpa, int fsa)
{
return (fba << S5PC100_FBA_SHIFT) | (fpa << S5PC100_FPA_SHIFT) |
(fsa << S5PC100_FSA_SHIFT);
}
static void s3c_onenand_reset(void)
{
unsigned long timeout = 0x10000;
int stat;
s3c_write_reg(ONENAND_MEM_RESET_COLD, MEM_RESET_OFFSET);
while (1 && timeout--) {
stat = s3c_read_reg(INT_ERR_STAT_OFFSET);
if (stat & RST_CMP)
break;
}
stat = s3c_read_reg(INT_ERR_STAT_OFFSET);
s3c_write_reg(stat, INT_ERR_ACK_OFFSET);
/* Clear interrupt */
s3c_write_reg(0x0, INT_ERR_ACK_OFFSET);
/* Clear the ECC status */
s3c_write_reg(0x0, ECC_ERR_STAT_OFFSET);
}
static unsigned short s3c_onenand_readw(void __iomem *addr)
{
struct onenand_chip *this = onenand->mtd->priv;
struct device *dev = &onenand->pdev->dev;
int reg = addr - this->base;
int word_addr = reg >> 1;
int value;
/* It's used for probing time */
switch (reg) {
case ONENAND_REG_MANUFACTURER_ID:
return s3c_read_reg(MANUFACT_ID_OFFSET);
case ONENAND_REG_DEVICE_ID:
return s3c_read_reg(DEVICE_ID_OFFSET);
case ONENAND_REG_VERSION_ID:
return s3c_read_reg(FLASH_VER_ID_OFFSET);
case ONENAND_REG_DATA_BUFFER_SIZE:
return s3c_read_reg(DATA_BUF_SIZE_OFFSET);
case ONENAND_REG_TECHNOLOGY:
return s3c_read_reg(TECH_OFFSET);
case ONENAND_REG_SYS_CFG1:
return s3c_read_reg(MEM_CFG_OFFSET);
/* Used at unlock all status */
case ONENAND_REG_CTRL_STATUS:
return 0;
case ONENAND_REG_WP_STATUS:
return ONENAND_WP_US;
default:
break;
}
/* BootRAM access control */
if ((unsigned int) addr < ONENAND_DATARAM && onenand->bootram_command) {
if (word_addr == 0)
return s3c_read_reg(MANUFACT_ID_OFFSET);
if (word_addr == 1)
return s3c_read_reg(DEVICE_ID_OFFSET);
if (word_addr == 2)
return s3c_read_reg(FLASH_VER_ID_OFFSET);
}
value = s3c_read_cmd(CMD_MAP_11(onenand, word_addr)) & 0xffff;
dev_info(dev, "%s: Illegal access at reg 0x%x, value 0x%x\n", __func__,
word_addr, value);
return value;
}
static void s3c_onenand_writew(unsigned short value, void __iomem *addr)
{
struct onenand_chip *this = onenand->mtd->priv;
struct device *dev = &onenand->pdev->dev;
unsigned int reg = addr - this->base;
unsigned int word_addr = reg >> 1;
/* It's used for probing time */
switch (reg) {
case ONENAND_REG_SYS_CFG1:
s3c_write_reg(value, MEM_CFG_OFFSET);
return;
case ONENAND_REG_START_ADDRESS1:
case ONENAND_REG_START_ADDRESS2:
return;
/* Lock/lock-tight/unlock/unlock_all */
case ONENAND_REG_START_BLOCK_ADDRESS:
return;
default:
break;
}
/* BootRAM access control */
if ((unsigned int)addr < ONENAND_DATARAM) {
if (value == ONENAND_CMD_READID) {
onenand->bootram_command = 1;
return;
}
if (value == ONENAND_CMD_RESET) {
s3c_write_reg(ONENAND_MEM_RESET_COLD, MEM_RESET_OFFSET);
onenand->bootram_command = 0;
return;
}
}
dev_info(dev, "%s: Illegal access at reg 0x%x, value 0x%x\n", __func__,
word_addr, value);
s3c_write_cmd(value, CMD_MAP_11(onenand, word_addr));
}
static int s3c_onenand_wait(struct mtd_info *mtd, int state)
{
struct device *dev = &onenand->pdev->dev;
unsigned int flags = INT_ACT;
unsigned int stat, ecc;
unsigned long timeout;
switch (state) {
case FL_READING:
flags |= BLK_RW_CMP | LOAD_CMP;
break;
case FL_WRITING:
flags |= BLK_RW_CMP | PGM_CMP;
break;
case FL_ERASING:
flags |= BLK_RW_CMP | ERS_CMP;
break;
case FL_LOCKING:
flags |= BLK_RW_CMP;
break;
default:
break;
}
/* The 20 msec is enough */
timeout = jiffies + msecs_to_jiffies(20);
while (time_before(jiffies, timeout)) {
stat = s3c_read_reg(INT_ERR_STAT_OFFSET);
if (stat & flags)
break;
if (state != FL_READING)
cond_resched();
}
/* To get correct interrupt status in timeout case */
stat = s3c_read_reg(INT_ERR_STAT_OFFSET);
s3c_write_reg(stat, INT_ERR_ACK_OFFSET);
/*
* In the Spec. it checks the controller status first
* However if you get the correct information in case of
* power off recovery (POR) test, it should read ECC status first
*/
if (stat & LOAD_CMP) {
ecc = s3c_read_reg(ECC_ERR_STAT_OFFSET);
if (ecc & ONENAND_ECC_4BIT_UNCORRECTABLE) {
dev_info(dev, "%s: ECC error = 0x%04x\n", __func__,
ecc);
mtd->ecc_stats.failed++;
return -EBADMSG;
}
}
if (stat & (LOCKED_BLK | ERS_FAIL | PGM_FAIL | LD_FAIL_ECC_ERR)) {
dev_info(dev, "%s: controller error = 0x%04x\n", __func__,
stat);
if (stat & LOCKED_BLK)
dev_info(dev, "%s: it's locked error = 0x%04x\n",
__func__, stat);
return -EIO;
}
return 0;
}
static int s3c_onenand_command(struct mtd_info *mtd, int cmd, loff_t addr,
size_t len)
{
struct onenand_chip *this = mtd->priv;
unsigned int *m, *s;
int fba, fpa, fsa = 0;
unsigned int mem_addr, cmd_map_01, cmd_map_10;
int i, mcount, scount;
int index;
fba = (int) (addr >> this->erase_shift);
fpa = (int) (addr >> this->page_shift);
fpa &= this->page_mask;
mem_addr = onenand->mem_addr(fba, fpa, fsa);
cmd_map_01 = CMD_MAP_01(onenand, mem_addr);
cmd_map_10 = CMD_MAP_10(onenand, mem_addr);
switch (cmd) {
case ONENAND_CMD_READ:
case ONENAND_CMD_READOOB:
case ONENAND_CMD_BUFFERRAM:
ONENAND_SET_NEXT_BUFFERRAM(this);
default:
break;
}
index = ONENAND_CURRENT_BUFFERRAM(this);
/*
* Emulate Two BufferRAMs and access with 4 bytes pointer
*/
m = (unsigned int *) onenand->page_buf;
s = (unsigned int *) onenand->oob_buf;
if (index) {
m += (this->writesize >> 2);
s += (mtd->oobsize >> 2);
}
mcount = mtd->writesize >> 2;
scount = mtd->oobsize >> 2;
switch (cmd) {
case ONENAND_CMD_READ:
/* Main */
for (i = 0; i < mcount; i++)
*m++ = s3c_read_cmd(cmd_map_01);
return 0;
case ONENAND_CMD_READOOB:
s3c_write_reg(TSRF, TRANS_SPARE_OFFSET);
/* Main */
for (i = 0; i < mcount; i++)
*m++ = s3c_read_cmd(cmd_map_01);
/* Spare */
for (i = 0; i < scount; i++)
*s++ = s3c_read_cmd(cmd_map_01);
s3c_write_reg(0, TRANS_SPARE_OFFSET);
return 0;
case ONENAND_CMD_PROG:
/* Main */
for (i = 0; i < mcount; i++)
s3c_write_cmd(*m++, cmd_map_01);
return 0;
case ONENAND_CMD_PROGOOB:
s3c_write_reg(TSRF, TRANS_SPARE_OFFSET);
/* Main - dummy write */
for (i = 0; i < mcount; i++)
s3c_write_cmd(0xffffffff, cmd_map_01);
/* Spare */
for (i = 0; i < scount; i++)
s3c_write_cmd(*s++, cmd_map_01);
s3c_write_reg(0, TRANS_SPARE_OFFSET);
return 0;
case ONENAND_CMD_UNLOCK_ALL:
s3c_write_cmd(ONENAND_UNLOCK_ALL, cmd_map_10);
return 0;
case ONENAND_CMD_ERASE:
s3c_write_cmd(ONENAND_ERASE_START, cmd_map_10);
return 0;
default:
break;
}
return 0;
}
static unsigned char *s3c_get_bufferram(struct mtd_info *mtd, int area)
{
struct onenand_chip *this = mtd->priv;
int index = ONENAND_CURRENT_BUFFERRAM(this);
unsigned char *p;
if (area == ONENAND_DATARAM) {
p = (unsigned char *) onenand->page_buf;
if (index == 1)
p += this->writesize;
} else {
p = (unsigned char *) onenand->oob_buf;
if (index == 1)
p += mtd->oobsize;
}
return p;
}
static int onenand_read_bufferram(struct mtd_info *mtd, int area,
unsigned char *buffer, int offset,
size_t count)
{
unsigned char *p;
p = s3c_get_bufferram(mtd, area);
memcpy(buffer, p + offset, count);
return 0;
}
static int onenand_write_bufferram(struct mtd_info *mtd, int area,
const unsigned char *buffer, int offset,
size_t count)
{
unsigned char *p;
p = s3c_get_bufferram(mtd, area);
memcpy(p + offset, buffer, count);
return 0;
}
static int (*s5pc110_dma_ops)(void *dst, void *src, size_t count, int direction);
static int s5pc110_dma_poll(void *dst, void *src, size_t count, int direction)
{
void __iomem *base = onenand->dma_addr;
int status;
unsigned long timeout;
writel(src, base + S5PC110_DMA_SRC_ADDR);
writel(dst, base + S5PC110_DMA_DST_ADDR);
if (direction == S5PC110_DMA_DIR_READ) {
writel(S5PC110_DMA_SRC_CFG_READ, base + S5PC110_DMA_SRC_CFG);
writel(S5PC110_DMA_DST_CFG_READ, base + S5PC110_DMA_DST_CFG);
} else {
writel(S5PC110_DMA_SRC_CFG_WRITE, base + S5PC110_DMA_SRC_CFG);
writel(S5PC110_DMA_DST_CFG_WRITE, base + S5PC110_DMA_DST_CFG);
}
writel(count, base + S5PC110_DMA_TRANS_SIZE);
writel(direction, base + S5PC110_DMA_TRANS_DIR);
writel(S5PC110_DMA_TRANS_CMD_TR, base + S5PC110_DMA_TRANS_CMD);
/*
* There's no exact timeout values at Spec.
* In real case it takes under 1 msec.
* So 20 msecs are enough.
*/
timeout = jiffies + msecs_to_jiffies(20);
do {
status = readl(base + S5PC110_DMA_TRANS_STATUS);
if (status & S5PC110_DMA_TRANS_STATUS_TE) {
writel(S5PC110_DMA_TRANS_CMD_TEC,
base + S5PC110_DMA_TRANS_CMD);
return -EIO;
}
} while (!(status & S5PC110_DMA_TRANS_STATUS_TD) &&
time_before(jiffies, timeout));
writel(S5PC110_DMA_TRANS_CMD_TDC, base + S5PC110_DMA_TRANS_CMD);
return 0;
}
static irqreturn_t s5pc110_onenand_irq(int irq, void *data)
{
void __iomem *base = onenand->dma_addr;
int status, cmd = 0;
status = readl(base + S5PC110_INTC_DMA_STATUS);
if (likely(status & S5PC110_INTC_DMA_TD))
cmd = S5PC110_DMA_TRANS_CMD_TDC;
if (unlikely(status & S5PC110_INTC_DMA_TE))
cmd = S5PC110_DMA_TRANS_CMD_TEC;
writel(cmd, base + S5PC110_DMA_TRANS_CMD);
writel(status, base + S5PC110_INTC_DMA_CLR);
if (!onenand->complete.done)
complete(&onenand->complete);
return IRQ_HANDLED;
}
static int s5pc110_dma_irq(void *dst, void *src, size_t count, int direction)
{
void __iomem *base = onenand->dma_addr;
int status;
status = readl(base + S5PC110_INTC_DMA_MASK);
if (status) {
status &= ~(S5PC110_INTC_DMA_TD | S5PC110_INTC_DMA_TE);
writel(status, base + S5PC110_INTC_DMA_MASK);
}
writel(src, base + S5PC110_DMA_SRC_ADDR);
writel(dst, base + S5PC110_DMA_DST_ADDR);
if (direction == S5PC110_DMA_DIR_READ) {
writel(S5PC110_DMA_SRC_CFG_READ, base + S5PC110_DMA_SRC_CFG);
writel(S5PC110_DMA_DST_CFG_READ, base + S5PC110_DMA_DST_CFG);
} else {
writel(S5PC110_DMA_SRC_CFG_WRITE, base + S5PC110_DMA_SRC_CFG);
writel(S5PC110_DMA_DST_CFG_WRITE, base + S5PC110_DMA_DST_CFG);
}
writel(count, base + S5PC110_DMA_TRANS_SIZE);
writel(direction, base + S5PC110_DMA_TRANS_DIR);
writel(S5PC110_DMA_TRANS_CMD_TR, base + S5PC110_DMA_TRANS_CMD);
wait_for_completion_timeout(&onenand->complete, msecs_to_jiffies(20));
return 0;
}
static int s5pc110_read_bufferram(struct mtd_info *mtd, int area,
unsigned char *buffer, int offset, size_t count)
{
struct onenand_chip *this = mtd->priv;
void __iomem *p;
void *buf = (void *) buffer;
dma_addr_t dma_src, dma_dst;
int err, ofs, page_dma = 0;
struct device *dev = &onenand->pdev->dev;
p = this->base + area;
if (ONENAND_CURRENT_BUFFERRAM(this)) {
if (area == ONENAND_DATARAM)
p += this->writesize;
else
p += mtd->oobsize;
}
if (offset & 3 || (size_t) buf & 3 ||
!onenand->dma_addr || count != mtd->writesize)
goto normal;
/* Handle vmalloc address */
if (buf >= high_memory) {
struct page *page;
if (((size_t) buf & PAGE_MASK) !=
((size_t) (buf + count - 1) & PAGE_MASK))
goto normal;
page = vmalloc_to_page(buf);
if (!page)
goto normal;
/* Page offset */
ofs = ((size_t) buf & ~PAGE_MASK);
page_dma = 1;
/* DMA routine */
dma_src = onenand->phys_base + (p - this->base);
dma_dst = dma_map_page(dev, page, ofs, count, DMA_FROM_DEVICE);
} else {
/* DMA routine */
dma_src = onenand->phys_base + (p - this->base);
dma_dst = dma_map_single(dev, buf, count, DMA_FROM_DEVICE);
}
if (dma_mapping_error(dev, dma_dst)) {
dev_err(dev, "Couldn't map a %d byte buffer for DMA\n", count);
goto normal;
}
err = s5pc110_dma_ops((void *) dma_dst, (void *) dma_src,
count, S5PC110_DMA_DIR_READ);
if (page_dma)
dma_unmap_page(dev, dma_dst, count, DMA_FROM_DEVICE);
else
dma_unmap_single(dev, dma_dst, count, DMA_FROM_DEVICE);
if (!err)
return 0;
normal:
if (count != mtd->writesize) {
/* Copy the bufferram to memory to prevent unaligned access */
memcpy(this->page_buf, p, mtd->writesize);
p = this->page_buf + offset;
}
memcpy(buffer, p, count);
return 0;
}
static int s5pc110_chip_probe(struct mtd_info *mtd)
{
/* Now just return 0 */
return 0;
}
static int s3c_onenand_bbt_wait(struct mtd_info *mtd, int state)
{
unsigned int flags = INT_ACT | LOAD_CMP;
unsigned int stat;
unsigned long timeout;
/* The 20 msec is enough */
timeout = jiffies + msecs_to_jiffies(20);
while (time_before(jiffies, timeout)) {
stat = s3c_read_reg(INT_ERR_STAT_OFFSET);
if (stat & flags)
break;
}
/* To get correct interrupt status in timeout case */
stat = s3c_read_reg(INT_ERR_STAT_OFFSET);
s3c_write_reg(stat, INT_ERR_ACK_OFFSET);
if (stat & LD_FAIL_ECC_ERR) {
s3c_onenand_reset();
return ONENAND_BBT_READ_ERROR;
}
if (stat & LOAD_CMP) {
int ecc = s3c_read_reg(ECC_ERR_STAT_OFFSET);
if (ecc & ONENAND_ECC_4BIT_UNCORRECTABLE) {
s3c_onenand_reset();
return ONENAND_BBT_READ_ERROR;
}
}
return 0;
}
static void s3c_onenand_check_lock_status(struct mtd_info *mtd)
{
struct onenand_chip *this = mtd->priv;
struct device *dev = &onenand->pdev->dev;
unsigned int block, end;
int tmp;
end = this->chipsize >> this->erase_shift;
for (block = 0; block < end; block++) {
unsigned int mem_addr = onenand->mem_addr(block, 0, 0);
tmp = s3c_read_cmd(CMD_MAP_01(onenand, mem_addr));
if (s3c_read_reg(INT_ERR_STAT_OFFSET) & LOCKED_BLK) {
dev_err(dev, "block %d is write-protected!\n", block);
s3c_write_reg(LOCKED_BLK, INT_ERR_ACK_OFFSET);
}
}
}
static void s3c_onenand_do_lock_cmd(struct mtd_info *mtd, loff_t ofs,
size_t len, int cmd)
{
struct onenand_chip *this = mtd->priv;
int start, end, start_mem_addr, end_mem_addr;
start = ofs >> this->erase_shift;
start_mem_addr = onenand->mem_addr(start, 0, 0);
end = start + (len >> this->erase_shift) - 1;
end_mem_addr = onenand->mem_addr(end, 0, 0);
if (cmd == ONENAND_CMD_LOCK) {
s3c_write_cmd(ONENAND_LOCK_START, CMD_MAP_10(onenand,
start_mem_addr));
s3c_write_cmd(ONENAND_LOCK_END, CMD_MAP_10(onenand,
end_mem_addr));
} else {
s3c_write_cmd(ONENAND_UNLOCK_START, CMD_MAP_10(onenand,
start_mem_addr));
s3c_write_cmd(ONENAND_UNLOCK_END, CMD_MAP_10(onenand,
end_mem_addr));
}
this->wait(mtd, FL_LOCKING);
}
static void s3c_unlock_all(struct mtd_info *mtd)
{
struct onenand_chip *this = mtd->priv;
loff_t ofs = 0;
size_t len = this->chipsize;
if (this->options & ONENAND_HAS_UNLOCK_ALL) {
/* Write unlock command */
this->command(mtd, ONENAND_CMD_UNLOCK_ALL, 0, 0);
/* No need to check return value */
this->wait(mtd, FL_LOCKING);
/* Workaround for all block unlock in DDP */
if (!ONENAND_IS_DDP(this)) {
s3c_onenand_check_lock_status(mtd);
return;
}
/* All blocks on another chip */
ofs = this->chipsize >> 1;
len = this->chipsize >> 1;
}
s3c_onenand_do_lock_cmd(mtd, ofs, len, ONENAND_CMD_UNLOCK);
s3c_onenand_check_lock_status(mtd);
}
static void s3c_onenand_setup(struct mtd_info *mtd)
{
struct onenand_chip *this = mtd->priv;
onenand->mtd = mtd;
if (onenand->type == TYPE_S3C6400) {
onenand->mem_addr = s3c6400_mem_addr;
onenand->cmd_map = s3c64xx_cmd_map;
} else if (onenand->type == TYPE_S3C6410) {
onenand->mem_addr = s3c6410_mem_addr;
onenand->cmd_map = s3c64xx_cmd_map;
} else if (onenand->type == TYPE_S5PC100) {
onenand->mem_addr = s5pc100_mem_addr;
onenand->cmd_map = s5pc1xx_cmd_map;
} else if (onenand->type == TYPE_S5PC110) {
/* Use generic onenand functions */
this->read_bufferram = s5pc110_read_bufferram;
this->chip_probe = s5pc110_chip_probe;
return;
} else {
BUG();
}
this->read_word = s3c_onenand_readw;
this->write_word = s3c_onenand_writew;
this->wait = s3c_onenand_wait;
this->bbt_wait = s3c_onenand_bbt_wait;
this->unlock_all = s3c_unlock_all;
this->command = s3c_onenand_command;
this->read_bufferram = onenand_read_bufferram;
this->write_bufferram = onenand_write_bufferram;
}
static int s3c_onenand_probe(struct platform_device *pdev)
{
struct onenand_platform_data *pdata;
struct onenand_chip *this;
struct mtd_info *mtd;
struct resource *r;
int size, err;
pdata = pdev->dev.platform_data;
/* No need to check pdata. the platform data is optional */
size = sizeof(struct mtd_info) + sizeof(struct onenand_chip);
mtd = kzalloc(size, GFP_KERNEL);
if (!mtd) {
dev_err(&pdev->dev, "failed to allocate memory\n");
return -ENOMEM;
}
onenand = kzalloc(sizeof(struct s3c_onenand), GFP_KERNEL);
if (!onenand) {
err = -ENOMEM;
goto onenand_fail;
}
this = (struct onenand_chip *) &mtd[1];
mtd->priv = this;
mtd->dev.parent = &pdev->dev;
mtd->owner = THIS_MODULE;
onenand->pdev = pdev;
onenand->type = platform_get_device_id(pdev)->driver_data;
s3c_onenand_setup(mtd);
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!r) {
dev_err(&pdev->dev, "no memory resource defined\n");
return -ENOENT;
goto ahb_resource_failed;
}
onenand->base_res = request_mem_region(r->start, resource_size(r),
pdev->name);
if (!onenand->base_res) {
dev_err(&pdev->dev, "failed to request memory resource\n");
err = -EBUSY;
goto resource_failed;
}
onenand->base = ioremap(r->start, resource_size(r));
if (!onenand->base) {
dev_err(&pdev->dev, "failed to map memory resource\n");
err = -EFAULT;
goto ioremap_failed;
}
/* Set onenand_chip also */
this->base = onenand->base;
/* Use runtime badblock check */
this->options |= ONENAND_SKIP_UNLOCK_CHECK;
if (onenand->type != TYPE_S5PC110) {
r = platform_get_resource(pdev, IORESOURCE_MEM, 1);
if (!r) {
dev_err(&pdev->dev, "no buffer memory resource defined\n");
err = -ENOENT;
goto ahb_resource_failed;
}
onenand->ahb_res = request_mem_region(r->start, resource_size(r),
pdev->name);
if (!onenand->ahb_res) {
dev_err(&pdev->dev, "failed to request buffer memory resource\n");
err = -EBUSY;
goto ahb_resource_failed;
}
onenand->ahb_addr = ioremap(r->start, resource_size(r));
if (!onenand->ahb_addr) {
dev_err(&pdev->dev, "failed to map buffer memory resource\n");
err = -EINVAL;
goto ahb_ioremap_failed;
}
/* Allocate 4KiB BufferRAM */
onenand->page_buf = kzalloc(SZ_4K, GFP_KERNEL);
if (!onenand->page_buf) {
err = -ENOMEM;
goto page_buf_fail;
}
/* Allocate 128 SpareRAM */
onenand->oob_buf = kzalloc(128, GFP_KERNEL);
if (!onenand->oob_buf) {
err = -ENOMEM;
goto oob_buf_fail;
}
/* S3C doesn't handle subpage write */
mtd->subpage_sft = 0;
this->subpagesize = mtd->writesize;
} else { /* S5PC110 */
r = platform_get_resource(pdev, IORESOURCE_MEM, 1);
if (!r) {
dev_err(&pdev->dev, "no dma memory resource defined\n");
err = -ENOENT;
goto dma_resource_failed;
}
onenand->dma_res = request_mem_region(r->start, resource_size(r),
pdev->name);
if (!onenand->dma_res) {
dev_err(&pdev->dev, "failed to request dma memory resource\n");
err = -EBUSY;
goto dma_resource_failed;
}
onenand->dma_addr = ioremap(r->start, resource_size(r));
if (!onenand->dma_addr) {
dev_err(&pdev->dev, "failed to map dma memory resource\n");
err = -EINVAL;
goto dma_ioremap_failed;
}
onenand->phys_base = onenand->base_res->start;
s5pc110_dma_ops = s5pc110_dma_poll;
/* Interrupt support */
r = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
if (r) {
init_completion(&onenand->complete);
s5pc110_dma_ops = s5pc110_dma_irq;
err = request_irq(r->start, s5pc110_onenand_irq,
IRQF_SHARED, "onenand", &onenand);
if (err) {
dev_err(&pdev->dev, "failed to get irq\n");
goto scan_failed;
}
}
}
if (onenand_scan(mtd, 1)) {
err = -EFAULT;
goto scan_failed;
}
if (onenand->type != TYPE_S5PC110) {
/* S3C doesn't handle subpage write */
mtd->subpage_sft = 0;
this->subpagesize = mtd->writesize;
}
if (s3c_read_reg(MEM_CFG_OFFSET) & ONENAND_SYS_CFG1_SYNC_READ)
dev_info(&onenand->pdev->dev, "OneNAND Sync. Burst Read enabled\n");
err = mtd_device_parse_register(mtd, NULL, NULL,
pdata ? pdata->parts : NULL,
pdata ? pdata->nr_parts : 0);
platform_set_drvdata(pdev, mtd);
return 0;
scan_failed:
if (onenand->dma_addr)
iounmap(onenand->dma_addr);
dma_ioremap_failed:
if (onenand->dma_res)
release_mem_region(onenand->dma_res->start,
resource_size(onenand->dma_res));
kfree(onenand->oob_buf);
oob_buf_fail:
kfree(onenand->page_buf);
page_buf_fail:
if (onenand->ahb_addr)
iounmap(onenand->ahb_addr);
ahb_ioremap_failed:
if (onenand->ahb_res)
release_mem_region(onenand->ahb_res->start,
resource_size(onenand->ahb_res));
dma_resource_failed:
ahb_resource_failed:
iounmap(onenand->base);
ioremap_failed:
if (onenand->base_res)
release_mem_region(onenand->base_res->start,
resource_size(onenand->base_res));
resource_failed:
kfree(onenand);
onenand_fail:
kfree(mtd);
return err;
}
static int __devexit s3c_onenand_remove(struct platform_device *pdev)
{
struct mtd_info *mtd = platform_get_drvdata(pdev);
onenand_release(mtd);
if (onenand->ahb_addr)
iounmap(onenand->ahb_addr);
if (onenand->ahb_res)
release_mem_region(onenand->ahb_res->start,
resource_size(onenand->ahb_res));
if (onenand->dma_addr)
iounmap(onenand->dma_addr);
if (onenand->dma_res)
release_mem_region(onenand->dma_res->start,
resource_size(onenand->dma_res));
iounmap(onenand->base);
release_mem_region(onenand->base_res->start,
resource_size(onenand->base_res));
platform_set_drvdata(pdev, NULL);
kfree(onenand->oob_buf);
kfree(onenand->page_buf);
kfree(onenand);
kfree(mtd);
return 0;
}
static int s3c_pm_ops_suspend(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct mtd_info *mtd = platform_get_drvdata(pdev);
struct onenand_chip *this = mtd->priv;
this->wait(mtd, FL_PM_SUSPENDED);
return 0;
}
static int s3c_pm_ops_resume(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct mtd_info *mtd = platform_get_drvdata(pdev);
struct onenand_chip *this = mtd->priv;
this->unlock_all(mtd);
return 0;
}
static const struct dev_pm_ops s3c_pm_ops = {
.suspend = s3c_pm_ops_suspend,
.resume = s3c_pm_ops_resume,
};
static struct platform_device_id s3c_onenand_driver_ids[] = {
{
.name = "s3c6400-onenand",
.driver_data = TYPE_S3C6400,
}, {
.name = "s3c6410-onenand",
.driver_data = TYPE_S3C6410,
}, {
.name = "s5pc100-onenand",
.driver_data = TYPE_S5PC100,
}, {
.name = "s5pc110-onenand",
.driver_data = TYPE_S5PC110,
}, { },
};
MODULE_DEVICE_TABLE(platform, s3c_onenand_driver_ids);
static struct platform_driver s3c_onenand_driver = {
.driver = {
.name = "samsung-onenand",
.pm = &s3c_pm_ops,
},
.id_table = s3c_onenand_driver_ids,
.probe = s3c_onenand_probe,
.remove = __devexit_p(s3c_onenand_remove),
};
module_platform_driver(s3c_onenand_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Kyungmin Park <kyungmin.park@samsung.com>");
MODULE_DESCRIPTION("Samsung OneNAND controller support");
| gpl-2.0 |
mixtile/loftq-linux | sound/pci/ice1712/ak4xxx.c | 8563 | 5089 | /*
* ALSA driver for ICEnsemble ICE1712 (Envy24)
*
* AK4524 / AK4528 / AK4529 / AK4355 / AK4381 interface
*
* Copyright (c) 2000 Jaroslav Kysela <perex@perex.cz>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
#include <asm/io.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/module.h>
#include <sound/core.h>
#include <sound/initval.h>
#include "ice1712.h"
MODULE_AUTHOR("Jaroslav Kysela <perex@perex.cz>");
MODULE_DESCRIPTION("ICEnsemble ICE17xx <-> AK4xxx AD/DA chip interface");
MODULE_LICENSE("GPL");
static void snd_ice1712_akm4xxx_lock(struct snd_akm4xxx *ak, int chip)
{
struct snd_ice1712 *ice = ak->private_data[0];
snd_ice1712_save_gpio_status(ice);
}
static void snd_ice1712_akm4xxx_unlock(struct snd_akm4xxx *ak, int chip)
{
struct snd_ice1712 *ice = ak->private_data[0];
snd_ice1712_restore_gpio_status(ice);
}
/*
* write AK4xxx register
*/
static void snd_ice1712_akm4xxx_write(struct snd_akm4xxx *ak, int chip,
unsigned char addr, unsigned char data)
{
unsigned int tmp;
int idx;
unsigned int addrdata;
struct snd_ak4xxx_private *priv = (void *)ak->private_value[0];
struct snd_ice1712 *ice = ak->private_data[0];
if (snd_BUG_ON(chip < 0 || chip >= 4))
return;
tmp = snd_ice1712_gpio_read(ice);
tmp |= priv->add_flags;
tmp &= ~priv->mask_flags;
if (priv->cs_mask == priv->cs_addr) {
if (priv->cif) {
tmp |= priv->cs_mask; /* start without chip select */
} else {
tmp &= ~priv->cs_mask; /* chip select low */
snd_ice1712_gpio_write(ice, tmp);
udelay(1);
}
} else {
/* doesn't handle cf=1 yet */
tmp &= ~priv->cs_mask;
tmp |= priv->cs_addr;
snd_ice1712_gpio_write(ice, tmp);
udelay(1);
}
/* build I2C address + data byte */
addrdata = (priv->caddr << 6) | 0x20 | (addr & 0x1f);
addrdata = (addrdata << 8) | data;
for (idx = 15; idx >= 0; idx--) {
/* drop clock */
tmp &= ~priv->clk_mask;
snd_ice1712_gpio_write(ice, tmp);
udelay(1);
/* set data */
if (addrdata & (1 << idx))
tmp |= priv->data_mask;
else
tmp &= ~priv->data_mask;
snd_ice1712_gpio_write(ice, tmp);
udelay(1);
/* raise clock */
tmp |= priv->clk_mask;
snd_ice1712_gpio_write(ice, tmp);
udelay(1);
}
if (priv->cs_mask == priv->cs_addr) {
if (priv->cif) {
/* assert a cs pulse to trigger */
tmp &= ~priv->cs_mask;
snd_ice1712_gpio_write(ice, tmp);
udelay(1);
}
tmp |= priv->cs_mask; /* chip select high to trigger */
} else {
tmp &= ~priv->cs_mask;
tmp |= priv->cs_none; /* deselect address */
}
snd_ice1712_gpio_write(ice, tmp);
udelay(1);
}
/*
* initialize the struct snd_akm4xxx record with the template
*/
int snd_ice1712_akm4xxx_init(struct snd_akm4xxx *ak, const struct snd_akm4xxx *temp,
const struct snd_ak4xxx_private *_priv, struct snd_ice1712 *ice)
{
struct snd_ak4xxx_private *priv;
if (_priv != NULL) {
priv = kmalloc(sizeof(*priv), GFP_KERNEL);
if (priv == NULL)
return -ENOMEM;
*priv = *_priv;
} else {
priv = NULL;
}
*ak = *temp;
ak->card = ice->card;
ak->private_value[0] = (unsigned long)priv;
ak->private_data[0] = ice;
if (ak->ops.lock == NULL)
ak->ops.lock = snd_ice1712_akm4xxx_lock;
if (ak->ops.unlock == NULL)
ak->ops.unlock = snd_ice1712_akm4xxx_unlock;
if (ak->ops.write == NULL)
ak->ops.write = snd_ice1712_akm4xxx_write;
snd_akm4xxx_init(ak);
return 0;
}
void snd_ice1712_akm4xxx_free(struct snd_ice1712 *ice)
{
unsigned int akidx;
if (ice->akm == NULL)
return;
for (akidx = 0; akidx < ice->akm_codecs; akidx++) {
struct snd_akm4xxx *ak = &ice->akm[akidx];
kfree((void*)ak->private_value[0]);
}
kfree(ice->akm);
}
/*
* build AK4xxx controls
*/
int snd_ice1712_akm4xxx_build_controls(struct snd_ice1712 *ice)
{
unsigned int akidx;
int err;
for (akidx = 0; akidx < ice->akm_codecs; akidx++) {
struct snd_akm4xxx *ak = &ice->akm[akidx];
err = snd_akm4xxx_build_controls(ak);
if (err < 0)
return err;
}
return 0;
}
static int __init alsa_ice1712_akm4xxx_module_init(void)
{
return 0;
}
static void __exit alsa_ice1712_akm4xxx_module_exit(void)
{
}
module_init(alsa_ice1712_akm4xxx_module_init)
module_exit(alsa_ice1712_akm4xxx_module_exit)
EXPORT_SYMBOL(snd_ice1712_akm4xxx_init);
EXPORT_SYMBOL(snd_ice1712_akm4xxx_free);
EXPORT_SYMBOL(snd_ice1712_akm4xxx_build_controls);
| gpl-2.0 |
multipath-tcp/mptcp_3.4.x | drivers/media/rc/keymaps/rc-lme2510.c | 8819 | 2756 | /* LME2510 remote control
*
*
* Copyright (C) 2010 Malcolm Priestley (tvboxspy@gmail.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*/
#include <media/rc-map.h>
#include <linux/module.h>
static struct rc_map_table lme2510_rc[] = {
/* Type 1 - 26 buttons */
{ 0x10ed45, KEY_0 },
{ 0x10ed5f, KEY_1 },
{ 0x10ed50, KEY_2 },
{ 0x10ed5d, KEY_3 },
{ 0x10ed41, KEY_4 },
{ 0x10ed0a, KEY_5 },
{ 0x10ed42, KEY_6 },
{ 0x10ed47, KEY_7 },
{ 0x10ed49, KEY_8 },
{ 0x10ed05, KEY_9 },
{ 0x10ed43, KEY_POWER },
{ 0x10ed46, KEY_SUBTITLE },
{ 0x10ed06, KEY_PAUSE },
{ 0x10ed03, KEY_MEDIA_REPEAT},
{ 0x10ed02, KEY_PAUSE },
{ 0x10ed5e, KEY_VOLUMEUP },
{ 0x10ed5c, KEY_VOLUMEDOWN },
{ 0x10ed09, KEY_CHANNELUP },
{ 0x10ed1a, KEY_CHANNELDOWN },
{ 0x10ed1e, KEY_PLAY },
{ 0x10ed1b, KEY_ZOOM },
{ 0x10ed59, KEY_MUTE },
{ 0x10ed5a, KEY_TV },
{ 0x10ed18, KEY_RECORD },
{ 0x10ed07, KEY_EPG },
{ 0x10ed01, KEY_STOP },
/* Type 2 - 20 buttons */
{ 0xbf15, KEY_0 },
{ 0xbf08, KEY_1 },
{ 0xbf09, KEY_2 },
{ 0xbf0a, KEY_3 },
{ 0xbf0c, KEY_4 },
{ 0xbf0d, KEY_5 },
{ 0xbf0e, KEY_6 },
{ 0xbf10, KEY_7 },
{ 0xbf11, KEY_8 },
{ 0xbf12, KEY_9 },
{ 0xbf00, KEY_POWER },
{ 0xbf04, KEY_MEDIA_REPEAT}, /* Recall */
{ 0xbf1a, KEY_PAUSE }, /* Timeshift */
{ 0xbf02, KEY_VOLUMEUP }, /* 2 x -/+ Keys not marked */
{ 0xbf06, KEY_VOLUMEDOWN }, /* Volume defined as right hand*/
{ 0xbf01, KEY_CHANNELUP },
{ 0xbf05, KEY_CHANNELDOWN },
{ 0xbf14, KEY_ZOOM },
{ 0xbf18, KEY_RECORD },
{ 0xbf16, KEY_STOP },
/* Type 3 - 20 buttons */
{ 0x1c, KEY_0 },
{ 0x07, KEY_1 },
{ 0x15, KEY_2 },
{ 0x09, KEY_3 },
{ 0x16, KEY_4 },
{ 0x19, KEY_5 },
{ 0x0d, KEY_6 },
{ 0x0c, KEY_7 },
{ 0x18, KEY_8 },
{ 0x5e, KEY_9 },
{ 0x45, KEY_POWER },
{ 0x44, KEY_MEDIA_REPEAT}, /* Recall */
{ 0x4a, KEY_PAUSE }, /* Timeshift */
{ 0x47, KEY_VOLUMEUP }, /* 2 x -/+ Keys not marked */
{ 0x43, KEY_VOLUMEDOWN }, /* Volume defined as right hand*/
{ 0x46, KEY_CHANNELUP },
{ 0x40, KEY_CHANNELDOWN },
{ 0x08, KEY_ZOOM },
{ 0x42, KEY_RECORD },
{ 0x5a, KEY_STOP },
};
static struct rc_map_list lme2510_map = {
.map = {
.scan = lme2510_rc,
.size = ARRAY_SIZE(lme2510_rc),
.rc_type = RC_TYPE_NEC,
.name = RC_MAP_LME2510,
}
};
static int __init init_rc_lme2510_map(void)
{
return rc_map_register(&lme2510_map);
}
static void __exit exit_rc_lme2510_map(void)
{
rc_map_unregister(&lme2510_map);
}
module_init(init_rc_lme2510_map)
module_exit(exit_rc_lme2510_map)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Malcolm Priestley tvboxspy@gmail.com");
| gpl-2.0 |
lynxluna/linux-ginger | drivers/media/video/btcx-risc.c | 13427 | 6470 | /*
btcx-risc.c
bt848/bt878/cx2388x risc code generator.
(c) 2000-03 Gerd Knorr <kraxel@bytesex.org> [SuSE Labs]
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/pci.h>
#include <linux/interrupt.h>
#include <linux/videodev2.h>
#include <asm/page.h>
#include <asm/pgtable.h>
#include "btcx-risc.h"
MODULE_DESCRIPTION("some code shared by bttv and cx88xx drivers");
MODULE_AUTHOR("Gerd Knorr");
MODULE_LICENSE("GPL");
static unsigned int debug;
module_param(debug, int, 0644);
MODULE_PARM_DESC(debug,"debug messages, default is 0 (no)");
/* ---------------------------------------------------------- */
/* allocate/free risc memory */
static int memcnt;
void btcx_riscmem_free(struct pci_dev *pci,
struct btcx_riscmem *risc)
{
if (NULL == risc->cpu)
return;
if (debug) {
memcnt--;
printk("btcx: riscmem free [%d] dma=%lx\n",
memcnt, (unsigned long)risc->dma);
}
pci_free_consistent(pci, risc->size, risc->cpu, risc->dma);
memset(risc,0,sizeof(*risc));
}
int btcx_riscmem_alloc(struct pci_dev *pci,
struct btcx_riscmem *risc,
unsigned int size)
{
__le32 *cpu;
dma_addr_t dma = 0;
if (NULL != risc->cpu && risc->size < size)
btcx_riscmem_free(pci,risc);
if (NULL == risc->cpu) {
cpu = pci_alloc_consistent(pci, size, &dma);
if (NULL == cpu)
return -ENOMEM;
risc->cpu = cpu;
risc->dma = dma;
risc->size = size;
if (debug) {
memcnt++;
printk("btcx: riscmem alloc [%d] dma=%lx cpu=%p size=%d\n",
memcnt, (unsigned long)dma, cpu, size);
}
}
memset(risc->cpu,0,risc->size);
return 0;
}
/* ---------------------------------------------------------- */
/* screen overlay helpers */
int
btcx_screen_clips(int swidth, int sheight, struct v4l2_rect *win,
struct v4l2_clip *clips, unsigned int n)
{
if (win->left < 0) {
/* left */
clips[n].c.left = 0;
clips[n].c.top = 0;
clips[n].c.width = -win->left;
clips[n].c.height = win->height;
n++;
}
if (win->left + win->width > swidth) {
/* right */
clips[n].c.left = swidth - win->left;
clips[n].c.top = 0;
clips[n].c.width = win->width - clips[n].c.left;
clips[n].c.height = win->height;
n++;
}
if (win->top < 0) {
/* top */
clips[n].c.left = 0;
clips[n].c.top = 0;
clips[n].c.width = win->width;
clips[n].c.height = -win->top;
n++;
}
if (win->top + win->height > sheight) {
/* bottom */
clips[n].c.left = 0;
clips[n].c.top = sheight - win->top;
clips[n].c.width = win->width;
clips[n].c.height = win->height - clips[n].c.top;
n++;
}
return n;
}
int
btcx_align(struct v4l2_rect *win, struct v4l2_clip *clips, unsigned int n, int mask)
{
s32 nx,nw,dx;
unsigned int i;
/* fixup window */
nx = (win->left + mask) & ~mask;
nw = (win->width) & ~mask;
if (nx + nw > win->left + win->width)
nw -= mask+1;
dx = nx - win->left;
win->left = nx;
win->width = nw;
if (debug)
printk(KERN_DEBUG "btcx: window align %dx%d+%d+%d [dx=%d]\n",
win->width, win->height, win->left, win->top, dx);
/* fixup clips */
for (i = 0; i < n; i++) {
nx = (clips[i].c.left-dx) & ~mask;
nw = (clips[i].c.width) & ~mask;
if (nx + nw < clips[i].c.left-dx + clips[i].c.width)
nw += mask+1;
clips[i].c.left = nx;
clips[i].c.width = nw;
if (debug)
printk(KERN_DEBUG "btcx: clip align %dx%d+%d+%d\n",
clips[i].c.width, clips[i].c.height,
clips[i].c.left, clips[i].c.top);
}
return 0;
}
void
btcx_sort_clips(struct v4l2_clip *clips, unsigned int nclips)
{
struct v4l2_clip swap;
int i,j,n;
if (nclips < 2)
return;
for (i = nclips-2; i >= 0; i--) {
for (n = 0, j = 0; j <= i; j++) {
if (clips[j].c.left > clips[j+1].c.left) {
swap = clips[j];
clips[j] = clips[j+1];
clips[j+1] = swap;
n++;
}
}
if (0 == n)
break;
}
}
void
btcx_calc_skips(int line, int width, int *maxy,
struct btcx_skiplist *skips, unsigned int *nskips,
const struct v4l2_clip *clips, unsigned int nclips)
{
unsigned int clip,skip;
int end, maxline;
skip=0;
maxline = 9999;
for (clip = 0; clip < nclips; clip++) {
/* sanity checks */
if (clips[clip].c.left + clips[clip].c.width <= 0)
continue;
if (clips[clip].c.left > (signed)width)
break;
/* vertical range */
if (line > clips[clip].c.top+clips[clip].c.height-1)
continue;
if (line < clips[clip].c.top) {
if (maxline > clips[clip].c.top-1)
maxline = clips[clip].c.top-1;
continue;
}
if (maxline > clips[clip].c.top+clips[clip].c.height-1)
maxline = clips[clip].c.top+clips[clip].c.height-1;
/* horizontal range */
if (0 == skip || clips[clip].c.left > skips[skip-1].end) {
/* new one */
skips[skip].start = clips[clip].c.left;
if (skips[skip].start < 0)
skips[skip].start = 0;
skips[skip].end = clips[clip].c.left + clips[clip].c.width;
if (skips[skip].end > width)
skips[skip].end = width;
skip++;
} else {
/* overlaps -- expand last one */
end = clips[clip].c.left + clips[clip].c.width;
if (skips[skip-1].end < end)
skips[skip-1].end = end;
if (skips[skip-1].end > width)
skips[skip-1].end = width;
}
}
*nskips = skip;
*maxy = maxline;
if (debug) {
printk(KERN_DEBUG "btcx: skips line %d-%d:",line,maxline);
for (skip = 0; skip < *nskips; skip++) {
printk(" %d-%d",skips[skip].start,skips[skip].end);
}
printk("\n");
}
}
/* ---------------------------------------------------------- */
EXPORT_SYMBOL(btcx_riscmem_alloc);
EXPORT_SYMBOL(btcx_riscmem_free);
EXPORT_SYMBOL(btcx_screen_clips);
EXPORT_SYMBOL(btcx_align);
EXPORT_SYMBOL(btcx_sort_clips);
EXPORT_SYMBOL(btcx_calc_skips);
/*
* Local variables:
* c-basic-offset: 8
* End:
*/
| gpl-2.0 |
nightvixen/explosion_kernel | arch/m32r/platforms/m32700ut/io.c | 13939 | 10805 | /*
* linux/arch/m32r/platforms/m32700ut/io.c
*
* Typical I/O routines for M32700UT board.
*
* Copyright (c) 2001-2005 Hiroyuki Kondo, Hirokazu Takata,
* Hitoshi Yamamoto, Takeo Takahashi
*
* This file is subject to the terms and conditions of the GNU General
* Public License. See the file "COPYING" in the main directory of this
* archive for more details.
*/
#include <asm/m32r.h>
#include <asm/page.h>
#include <asm/io.h>
#include <asm/byteorder.h>
#if defined(CONFIG_PCMCIA) && defined(CONFIG_M32R_CFC)
#include <linux/types.h>
#define M32R_PCC_IOMAP_SIZE 0x1000
#define M32R_PCC_IOSTART0 0x1000
#define M32R_PCC_IOEND0 (M32R_PCC_IOSTART0 + M32R_PCC_IOMAP_SIZE - 1)
extern void pcc_ioread_byte(int, unsigned long, void *, size_t, size_t, int);
extern void pcc_ioread_word(int, unsigned long, void *, size_t, size_t, int);
extern void pcc_iowrite_byte(int, unsigned long, void *, size_t, size_t, int);
extern void pcc_iowrite_word(int, unsigned long, void *, size_t, size_t, int);
#endif /* CONFIG_PCMCIA && CONFIG_M32R_CFC */
#define PORT2ADDR(port) _port2addr(port)
#define PORT2ADDR_USB(port) _port2addr_usb(port)
static inline void *_port2addr(unsigned long port)
{
return (void *)(port | NONCACHE_OFFSET);
}
#if defined(CONFIG_IDE) && !defined(CONFIG_M32R_CFC)
static inline void *__port2addr_ata(unsigned long port)
{
static int dummy_reg;
switch (port) {
case 0x1f0: return (void *)(0x0c002000 | NONCACHE_OFFSET);
case 0x1f1: return (void *)(0x0c012800 | NONCACHE_OFFSET);
case 0x1f2: return (void *)(0x0c012002 | NONCACHE_OFFSET);
case 0x1f3: return (void *)(0x0c012802 | NONCACHE_OFFSET);
case 0x1f4: return (void *)(0x0c012004 | NONCACHE_OFFSET);
case 0x1f5: return (void *)(0x0c012804 | NONCACHE_OFFSET);
case 0x1f6: return (void *)(0x0c012006 | NONCACHE_OFFSET);
case 0x1f7: return (void *)(0x0c012806 | NONCACHE_OFFSET);
case 0x3f6: return (void *)(0x0c01200e | NONCACHE_OFFSET);
default: return (void *)&dummy_reg;
}
}
#endif
/*
* M32700UT-LAN is located in the extended bus space
* from 0x10000000 to 0x13ffffff on physical address.
* The base address of LAN controller(LAN91C111) is 0x300.
*/
#define LAN_IOSTART (0x300 | NONCACHE_OFFSET)
#define LAN_IOEND (0x320 | NONCACHE_OFFSET)
static inline void *_port2addr_ne(unsigned long port)
{
return (void *)(port + 0x10000000);
}
static inline void *_port2addr_usb(unsigned long port)
{
return (void *)((port & 0x0f) + NONCACHE_OFFSET + 0x10303000);
}
static inline void delay(void)
{
__asm__ __volatile__ ("push r0; \n\t pop r0;" : : :"memory");
}
/*
* NIC I/O function
*/
#define PORT2ADDR_NE(port) _port2addr_ne(port)
static inline unsigned char _ne_inb(void *portp)
{
return *(volatile unsigned char *)portp;
}
static inline unsigned short _ne_inw(void *portp)
{
return (unsigned short)le16_to_cpu(*(volatile unsigned short *)portp);
}
static inline void _ne_insb(void *portp, void *addr, unsigned long count)
{
unsigned char *buf = (unsigned char *)addr;
while (count--)
*buf++ = _ne_inb(portp);
}
static inline void _ne_outb(unsigned char b, void *portp)
{
*(volatile unsigned char *)portp = b;
}
static inline void _ne_outw(unsigned short w, void *portp)
{
*(volatile unsigned short *)portp = cpu_to_le16(w);
}
unsigned char _inb(unsigned long port)
{
if (port >= LAN_IOSTART && port < LAN_IOEND)
return _ne_inb(PORT2ADDR_NE(port));
#if defined(CONFIG_IDE) && !defined(CONFIG_M32R_CFC)
else if ((port >= 0x1f0 && port <=0x1f7) || port == 0x3f6) {
return *(volatile unsigned char *)__port2addr_ata(port);
}
#endif
#if defined(CONFIG_PCMCIA) && defined(CONFIG_M32R_CFC)
else if (port >= M32R_PCC_IOSTART0 && port <= M32R_PCC_IOEND0) {
unsigned char b;
pcc_ioread_byte(0, port, &b, sizeof(b), 1, 0);
return b;
} else
#endif
return *(volatile unsigned char *)PORT2ADDR(port);
}
unsigned short _inw(unsigned long port)
{
if (port >= LAN_IOSTART && port < LAN_IOEND)
return _ne_inw(PORT2ADDR_NE(port));
#if defined(CONFIG_IDE) && !defined(CONFIG_M32R_CFC)
else if ((port >= 0x1f0 && port <=0x1f7) || port == 0x3f6) {
return *(volatile unsigned short *)__port2addr_ata(port);
}
#endif
#if defined(CONFIG_USB)
else if(port >= 0x340 && port < 0x3a0)
return *(volatile unsigned short *)PORT2ADDR_USB(port);
#endif
#if defined(CONFIG_PCMCIA) && defined(CONFIG_M32R_CFC)
else if (port >= M32R_PCC_IOSTART0 && port <= M32R_PCC_IOEND0) {
unsigned short w;
pcc_ioread_word(0, port, &w, sizeof(w), 1, 0);
return w;
} else
#endif
return *(volatile unsigned short *)PORT2ADDR(port);
}
unsigned long _inl(unsigned long port)
{
#if defined(CONFIG_PCMCIA) && defined(CONFIG_M32R_CFC)
if (port >= M32R_PCC_IOSTART0 && port <= M32R_PCC_IOEND0) {
unsigned long l;
pcc_ioread_word(0, port, &l, sizeof(l), 1, 0);
return l;
} else
#endif
return *(volatile unsigned long *)PORT2ADDR(port);
}
unsigned char _inb_p(unsigned long port)
{
unsigned char v = _inb(port);
delay();
return (v);
}
unsigned short _inw_p(unsigned long port)
{
unsigned short v = _inw(port);
delay();
return (v);
}
unsigned long _inl_p(unsigned long port)
{
unsigned long v = _inl(port);
delay();
return (v);
}
void _outb(unsigned char b, unsigned long port)
{
if (port >= LAN_IOSTART && port < LAN_IOEND)
_ne_outb(b, PORT2ADDR_NE(port));
else
#if defined(CONFIG_IDE) && !defined(CONFIG_M32R_CFC)
if ((port >= 0x1f0 && port <=0x1f7) || port == 0x3f6) {
*(volatile unsigned char *)__port2addr_ata(port) = b;
} else
#endif
#if defined(CONFIG_PCMCIA) && defined(CONFIG_M32R_CFC)
if (port >= M32R_PCC_IOSTART0 && port <= M32R_PCC_IOEND0) {
pcc_iowrite_byte(0, port, &b, sizeof(b), 1, 0);
} else
#endif
*(volatile unsigned char *)PORT2ADDR(port) = b;
}
void _outw(unsigned short w, unsigned long port)
{
if (port >= LAN_IOSTART && port < LAN_IOEND)
_ne_outw(w, PORT2ADDR_NE(port));
else
#if defined(CONFIG_IDE) && !defined(CONFIG_M32R_CFC)
if ((port >= 0x1f0 && port <=0x1f7) || port == 0x3f6) {
*(volatile unsigned short *)__port2addr_ata(port) = w;
} else
#endif
#if defined(CONFIG_USB)
if(port >= 0x340 && port < 0x3a0)
*(volatile unsigned short *)PORT2ADDR_USB(port) = w;
else
#endif
#if defined(CONFIG_PCMCIA) && defined(CONFIG_M32R_CFC)
if (port >= M32R_PCC_IOSTART0 && port <= M32R_PCC_IOEND0) {
pcc_iowrite_word(0, port, &w, sizeof(w), 1, 0);
} else
#endif
*(volatile unsigned short *)PORT2ADDR(port) = w;
}
void _outl(unsigned long l, unsigned long port)
{
#if defined(CONFIG_PCMCIA) && defined(CONFIG_M32R_CFC)
if (port >= M32R_PCC_IOSTART0 && port <= M32R_PCC_IOEND0) {
pcc_iowrite_word(0, port, &l, sizeof(l), 1, 0);
} else
#endif
*(volatile unsigned long *)PORT2ADDR(port) = l;
}
void _outb_p(unsigned char b, unsigned long port)
{
_outb(b, port);
delay();
}
void _outw_p(unsigned short w, unsigned long port)
{
_outw(w, port);
delay();
}
void _outl_p(unsigned long l, unsigned long port)
{
_outl(l, port);
delay();
}
void _insb(unsigned int port, void *addr, unsigned long count)
{
if (port >= LAN_IOSTART && port < LAN_IOEND)
_ne_insb(PORT2ADDR_NE(port), addr, count);
#if defined(CONFIG_IDE) && !defined(CONFIG_M32R_CFC)
else if ((port >= 0x1f0 && port <=0x1f7) || port == 0x3f6) {
unsigned char *buf = addr;
unsigned char *portp = __port2addr_ata(port);
while (count--)
*buf++ = *(volatile unsigned char *)portp;
}
#endif
#if defined(CONFIG_PCMCIA) && defined(CONFIG_M32R_CFC)
else if (port >= M32R_PCC_IOSTART0 && port <= M32R_PCC_IOEND0) {
pcc_ioread_byte(0, port, (void *)addr, sizeof(unsigned char),
count, 1);
}
#endif
else {
unsigned char *buf = addr;
unsigned char *portp = PORT2ADDR(port);
while (count--)
*buf++ = *(volatile unsigned char *)portp;
}
}
void _insw(unsigned int port, void *addr, unsigned long count)
{
unsigned short *buf = addr;
unsigned short *portp;
if (port >= LAN_IOSTART && port < LAN_IOEND) {
/*
* This portion is only used by smc91111.c to read data
* from the DATA_REG. Do not swap the data.
*/
portp = PORT2ADDR_NE(port);
while (count--)
*buf++ = *(volatile unsigned short *)portp;
#if defined(CONFIG_PCMCIA) && defined(CONFIG_M32R_CFC)
} else if (port >= M32R_PCC_IOSTART0 && port <= M32R_PCC_IOEND0) {
pcc_ioread_word(9, port, (void *)addr, sizeof(unsigned short),
count, 1);
#endif
#if defined(CONFIG_IDE) && !defined(CONFIG_M32R_CFC)
} else if ((port >= 0x1f0 && port <=0x1f7) || port == 0x3f6) {
portp = __port2addr_ata(port);
while (count--)
*buf++ = *(volatile unsigned short *)portp;
#endif
} else {
portp = PORT2ADDR(port);
while (count--)
*buf++ = *(volatile unsigned short *)portp;
}
}
void _insl(unsigned int port, void *addr, unsigned long count)
{
unsigned long *buf = addr;
unsigned long *portp;
portp = PORT2ADDR(port);
while (count--)
*buf++ = *(volatile unsigned long *)portp;
}
void _outsb(unsigned int port, const void *addr, unsigned long count)
{
const unsigned char *buf = addr;
unsigned char *portp;
if (port >= LAN_IOSTART && port < LAN_IOEND) {
portp = PORT2ADDR_NE(port);
while (count--)
_ne_outb(*buf++, portp);
#if defined(CONFIG_IDE) && !defined(CONFIG_M32R_CFC)
} else if ((port >= 0x1f0 && port <=0x1f7) || port == 0x3f6) {
portp = __port2addr_ata(port);
while (count--)
*(volatile unsigned char *)portp = *buf++;
#endif
#if defined(CONFIG_PCMCIA) && defined(CONFIG_M32R_CFC)
} else if (port >= M32R_PCC_IOSTART0 && port <= M32R_PCC_IOEND0) {
pcc_iowrite_byte(0, port, (void *)addr, sizeof(unsigned char),
count, 1);
#endif
} else {
portp = PORT2ADDR(port);
while (count--)
*(volatile unsigned char *)portp = *buf++;
}
}
void _outsw(unsigned int port, const void *addr, unsigned long count)
{
const unsigned short *buf = addr;
unsigned short *portp;
if (port >= LAN_IOSTART && port < LAN_IOEND) {
/*
* This portion is only used by smc91111.c to write data
* into the DATA_REG. Do not swap the data.
*/
portp = PORT2ADDR_NE(port);
while (count--)
*(volatile unsigned short *)portp = *buf++;
#if defined(CONFIG_IDE) && !defined(CONFIG_M32R_CFC)
} else if ((port >= 0x1f0 && port <=0x1f7) || port == 0x3f6) {
portp = __port2addr_ata(port);
while (count--)
*(volatile unsigned short *)portp = *buf++;
#endif
#if defined(CONFIG_PCMCIA) && defined(CONFIG_M32R_CFC)
} else if (port >= M32R_PCC_IOSTART0 && port <= M32R_PCC_IOEND0) {
pcc_iowrite_word(9, port, (void *)addr, sizeof(unsigned short),
count, 1);
#endif
} else {
portp = PORT2ADDR(port);
while (count--)
*(volatile unsigned short *)portp = *buf++;
}
}
void _outsl(unsigned int port, const void *addr, unsigned long count)
{
const unsigned long *buf = addr;
unsigned char *portp;
portp = PORT2ADDR(port);
while (count--)
*(volatile unsigned long *)portp = *buf++;
}
| gpl-2.0 |
crseanpaul/kernel | drivers/s390/cio/isc.c | 14707 | 1707 | /*
* Functions for registration of I/O interruption subclasses on s390.
*
* Copyright IBM Corp. 2008
* Authors: Sebastian Ott <sebott@linux.vnet.ibm.com>
*/
#include <linux/spinlock.h>
#include <linux/module.h>
#include <asm/isc.h>
static unsigned int isc_refs[MAX_ISC + 1];
static DEFINE_SPINLOCK(isc_ref_lock);
/**
* isc_register - register an I/O interruption subclass.
* @isc: I/O interruption subclass to register
*
* The number of users for @isc is increased. If this is the first user to
* register @isc, the corresponding I/O interruption subclass mask is enabled.
*
* Context:
* This function must not be called in interrupt context.
*/
void isc_register(unsigned int isc)
{
if (isc > MAX_ISC) {
WARN_ON(1);
return;
}
spin_lock(&isc_ref_lock);
if (isc_refs[isc] == 0)
ctl_set_bit(6, 31 - isc);
isc_refs[isc]++;
spin_unlock(&isc_ref_lock);
}
EXPORT_SYMBOL_GPL(isc_register);
/**
* isc_unregister - unregister an I/O interruption subclass.
* @isc: I/O interruption subclass to unregister
*
* The number of users for @isc is decreased. If this is the last user to
* unregister @isc, the corresponding I/O interruption subclass mask is
* disabled.
* Note: This function must not be called if isc_register() hasn't been called
* before by the driver for @isc.
*
* Context:
* This function must not be called in interrupt context.
*/
void isc_unregister(unsigned int isc)
{
spin_lock(&isc_ref_lock);
/* check for misuse */
if (isc > MAX_ISC || isc_refs[isc] == 0) {
WARN_ON(1);
goto out_unlock;
}
if (isc_refs[isc] == 1)
ctl_clear_bit(6, 31 - isc);
isc_refs[isc]--;
out_unlock:
spin_unlock(&isc_ref_lock);
}
EXPORT_SYMBOL_GPL(isc_unregister);
| gpl-2.0 |
duanyukun/linux | drivers/hid/hid-uclogic.c | 372 | 43774 | /*
* HID driver for UC-Logic devices not fully compliant with HID standard
*
* Copyright (c) 2010-2014 Nikolai Kondrashov
* Copyright (c) 2013 Martin Rusko
*/
/*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*/
#include <linux/device.h>
#include <linux/hid.h>
#include <linux/module.h>
#include <linux/usb.h>
#include <asm/unaligned.h>
#include "usbhid/usbhid.h"
#include "hid-ids.h"
/*
* See WPXXXXU model descriptions, device and HID report descriptors at
* http://sf.net/apps/mediawiki/digimend/?title=UC-Logic_Tablet_WP4030U
* http://sf.net/apps/mediawiki/digimend/?title=UC-Logic_Tablet_WP5540U
* http://sf.net/apps/mediawiki/digimend/?title=UC-Logic_Tablet_WP8060U
*/
/* Size of the original descriptor of WPXXXXU tablets */
#define WPXXXXU_RDESC_ORIG_SIZE 212
/* Fixed WP4030U report descriptor */
static __u8 wp4030u_rdesc_fixed[] = {
0x05, 0x0D, /* Usage Page (Digitizer), */
0x09, 0x02, /* Usage (Pen), */
0xA1, 0x01, /* Collection (Application), */
0x85, 0x09, /* Report ID (9), */
0x09, 0x20, /* Usage (Stylus), */
0xA0, /* Collection (Physical), */
0x75, 0x01, /* Report Size (1), */
0x09, 0x42, /* Usage (Tip Switch), */
0x09, 0x44, /* Usage (Barrel Switch), */
0x09, 0x46, /* Usage (Tablet Pick), */
0x14, /* Logical Minimum (0), */
0x25, 0x01, /* Logical Maximum (1), */
0x95, 0x03, /* Report Count (3), */
0x81, 0x02, /* Input (Variable), */
0x95, 0x05, /* Report Count (5), */
0x81, 0x01, /* Input (Constant), */
0x75, 0x10, /* Report Size (16), */
0x95, 0x01, /* Report Count (1), */
0x14, /* Logical Minimum (0), */
0xA4, /* Push, */
0x05, 0x01, /* Usage Page (Desktop), */
0x55, 0xFD, /* Unit Exponent (-3), */
0x65, 0x13, /* Unit (Inch), */
0x34, /* Physical Minimum (0), */
0x09, 0x30, /* Usage (X), */
0x46, 0xA0, 0x0F, /* Physical Maximum (4000), */
0x26, 0xFF, 0x7F, /* Logical Maximum (32767), */
0x81, 0x02, /* Input (Variable), */
0x09, 0x31, /* Usage (Y), */
0x46, 0xB8, 0x0B, /* Physical Maximum (3000), */
0x26, 0xFF, 0x7F, /* Logical Maximum (32767), */
0x81, 0x02, /* Input (Variable), */
0xB4, /* Pop, */
0x09, 0x30, /* Usage (Tip Pressure), */
0x26, 0xFF, 0x03, /* Logical Maximum (1023), */
0x81, 0x02, /* Input (Variable), */
0xC0, /* End Collection, */
0xC0 /* End Collection */
};
/* Fixed WP5540U report descriptor */
static __u8 wp5540u_rdesc_fixed[] = {
0x05, 0x0D, /* Usage Page (Digitizer), */
0x09, 0x02, /* Usage (Pen), */
0xA1, 0x01, /* Collection (Application), */
0x85, 0x09, /* Report ID (9), */
0x09, 0x20, /* Usage (Stylus), */
0xA0, /* Collection (Physical), */
0x75, 0x01, /* Report Size (1), */
0x09, 0x42, /* Usage (Tip Switch), */
0x09, 0x44, /* Usage (Barrel Switch), */
0x09, 0x46, /* Usage (Tablet Pick), */
0x14, /* Logical Minimum (0), */
0x25, 0x01, /* Logical Maximum (1), */
0x95, 0x03, /* Report Count (3), */
0x81, 0x02, /* Input (Variable), */
0x95, 0x05, /* Report Count (5), */
0x81, 0x01, /* Input (Constant), */
0x75, 0x10, /* Report Size (16), */
0x95, 0x01, /* Report Count (1), */
0x14, /* Logical Minimum (0), */
0xA4, /* Push, */
0x05, 0x01, /* Usage Page (Desktop), */
0x55, 0xFD, /* Unit Exponent (-3), */
0x65, 0x13, /* Unit (Inch), */
0x34, /* Physical Minimum (0), */
0x09, 0x30, /* Usage (X), */
0x46, 0x7C, 0x15, /* Physical Maximum (5500), */
0x26, 0xFF, 0x7F, /* Logical Maximum (32767), */
0x81, 0x02, /* Input (Variable), */
0x09, 0x31, /* Usage (Y), */
0x46, 0xA0, 0x0F, /* Physical Maximum (4000), */
0x26, 0xFF, 0x7F, /* Logical Maximum (32767), */
0x81, 0x02, /* Input (Variable), */
0xB4, /* Pop, */
0x09, 0x30, /* Usage (Tip Pressure), */
0x26, 0xFF, 0x03, /* Logical Maximum (1023), */
0x81, 0x02, /* Input (Variable), */
0xC0, /* End Collection, */
0xC0, /* End Collection, */
0x05, 0x01, /* Usage Page (Desktop), */
0x09, 0x02, /* Usage (Mouse), */
0xA1, 0x01, /* Collection (Application), */
0x85, 0x08, /* Report ID (8), */
0x09, 0x01, /* Usage (Pointer), */
0xA0, /* Collection (Physical), */
0x75, 0x01, /* Report Size (1), */
0x05, 0x09, /* Usage Page (Button), */
0x19, 0x01, /* Usage Minimum (01h), */
0x29, 0x03, /* Usage Maximum (03h), */
0x14, /* Logical Minimum (0), */
0x25, 0x01, /* Logical Maximum (1), */
0x95, 0x03, /* Report Count (3), */
0x81, 0x02, /* Input (Variable), */
0x95, 0x05, /* Report Count (5), */
0x81, 0x01, /* Input (Constant), */
0x05, 0x01, /* Usage Page (Desktop), */
0x75, 0x08, /* Report Size (8), */
0x09, 0x30, /* Usage (X), */
0x09, 0x31, /* Usage (Y), */
0x15, 0x81, /* Logical Minimum (-127), */
0x25, 0x7F, /* Logical Maximum (127), */
0x95, 0x02, /* Report Count (2), */
0x81, 0x06, /* Input (Variable, Relative), */
0x09, 0x38, /* Usage (Wheel), */
0x15, 0xFF, /* Logical Minimum (-1), */
0x25, 0x01, /* Logical Maximum (1), */
0x95, 0x01, /* Report Count (1), */
0x81, 0x06, /* Input (Variable, Relative), */
0x81, 0x01, /* Input (Constant), */
0xC0, /* End Collection, */
0xC0 /* End Collection */
};
/* Fixed WP8060U report descriptor */
static __u8 wp8060u_rdesc_fixed[] = {
0x05, 0x0D, /* Usage Page (Digitizer), */
0x09, 0x02, /* Usage (Pen), */
0xA1, 0x01, /* Collection (Application), */
0x85, 0x09, /* Report ID (9), */
0x09, 0x20, /* Usage (Stylus), */
0xA0, /* Collection (Physical), */
0x75, 0x01, /* Report Size (1), */
0x09, 0x42, /* Usage (Tip Switch), */
0x09, 0x44, /* Usage (Barrel Switch), */
0x09, 0x46, /* Usage (Tablet Pick), */
0x14, /* Logical Minimum (0), */
0x25, 0x01, /* Logical Maximum (1), */
0x95, 0x03, /* Report Count (3), */
0x81, 0x02, /* Input (Variable), */
0x95, 0x05, /* Report Count (5), */
0x81, 0x01, /* Input (Constant), */
0x75, 0x10, /* Report Size (16), */
0x95, 0x01, /* Report Count (1), */
0x14, /* Logical Minimum (0), */
0xA4, /* Push, */
0x05, 0x01, /* Usage Page (Desktop), */
0x55, 0xFD, /* Unit Exponent (-3), */
0x65, 0x13, /* Unit (Inch), */
0x34, /* Physical Minimum (0), */
0x09, 0x30, /* Usage (X), */
0x46, 0x40, 0x1F, /* Physical Maximum (8000), */
0x26, 0xFF, 0x7F, /* Logical Maximum (32767), */
0x81, 0x02, /* Input (Variable), */
0x09, 0x31, /* Usage (Y), */
0x46, 0x70, 0x17, /* Physical Maximum (6000), */
0x26, 0xFF, 0x7F, /* Logical Maximum (32767), */
0x81, 0x02, /* Input (Variable), */
0xB4, /* Pop, */
0x09, 0x30, /* Usage (Tip Pressure), */
0x26, 0xFF, 0x03, /* Logical Maximum (1023), */
0x81, 0x02, /* Input (Variable), */
0xC0, /* End Collection, */
0xC0, /* End Collection, */
0x05, 0x01, /* Usage Page (Desktop), */
0x09, 0x02, /* Usage (Mouse), */
0xA1, 0x01, /* Collection (Application), */
0x85, 0x08, /* Report ID (8), */
0x09, 0x01, /* Usage (Pointer), */
0xA0, /* Collection (Physical), */
0x75, 0x01, /* Report Size (1), */
0x05, 0x09, /* Usage Page (Button), */
0x19, 0x01, /* Usage Minimum (01h), */
0x29, 0x03, /* Usage Maximum (03h), */
0x14, /* Logical Minimum (0), */
0x25, 0x01, /* Logical Maximum (1), */
0x95, 0x03, /* Report Count (3), */
0x81, 0x02, /* Input (Variable), */
0x95, 0x05, /* Report Count (5), */
0x81, 0x01, /* Input (Constant), */
0x05, 0x01, /* Usage Page (Desktop), */
0x75, 0x08, /* Report Size (8), */
0x09, 0x30, /* Usage (X), */
0x09, 0x31, /* Usage (Y), */
0x15, 0x81, /* Logical Minimum (-127), */
0x25, 0x7F, /* Logical Maximum (127), */
0x95, 0x02, /* Report Count (2), */
0x81, 0x06, /* Input (Variable, Relative), */
0x09, 0x38, /* Usage (Wheel), */
0x15, 0xFF, /* Logical Minimum (-1), */
0x25, 0x01, /* Logical Maximum (1), */
0x95, 0x01, /* Report Count (1), */
0x81, 0x06, /* Input (Variable, Relative), */
0x81, 0x01, /* Input (Constant), */
0xC0, /* End Collection, */
0xC0 /* End Collection */
};
/*
* See WP1062 description, device and HID report descriptors at
* http://sf.net/apps/mediawiki/digimend/?title=UC-Logic_Tablet_WP1062
*/
/* Size of the original descriptor of WP1062 tablet */
#define WP1062_RDESC_ORIG_SIZE 254
/* Fixed WP1062 report descriptor */
static __u8 wp1062_rdesc_fixed[] = {
0x05, 0x0D, /* Usage Page (Digitizer), */
0x09, 0x02, /* Usage (Pen), */
0xA1, 0x01, /* Collection (Application), */
0x85, 0x09, /* Report ID (9), */
0x09, 0x20, /* Usage (Stylus), */
0xA0, /* Collection (Physical), */
0x75, 0x01, /* Report Size (1), */
0x09, 0x42, /* Usage (Tip Switch), */
0x09, 0x44, /* Usage (Barrel Switch), */
0x09, 0x46, /* Usage (Tablet Pick), */
0x14, /* Logical Minimum (0), */
0x25, 0x01, /* Logical Maximum (1), */
0x95, 0x03, /* Report Count (3), */
0x81, 0x02, /* Input (Variable), */
0x95, 0x04, /* Report Count (4), */
0x81, 0x01, /* Input (Constant), */
0x09, 0x32, /* Usage (In Range), */
0x95, 0x01, /* Report Count (1), */
0x81, 0x02, /* Input (Variable), */
0x75, 0x10, /* Report Size (16), */
0x95, 0x01, /* Report Count (1), */
0x14, /* Logical Minimum (0), */
0xA4, /* Push, */
0x05, 0x01, /* Usage Page (Desktop), */
0x55, 0xFD, /* Unit Exponent (-3), */
0x65, 0x13, /* Unit (Inch), */
0x34, /* Physical Minimum (0), */
0x09, 0x30, /* Usage (X), */
0x46, 0x10, 0x27, /* Physical Maximum (10000), */
0x26, 0x20, 0x4E, /* Logical Maximum (20000), */
0x81, 0x02, /* Input (Variable), */
0x09, 0x31, /* Usage (Y), */
0x46, 0xB7, 0x19, /* Physical Maximum (6583), */
0x26, 0x6E, 0x33, /* Logical Maximum (13166), */
0x81, 0x02, /* Input (Variable), */
0xB4, /* Pop, */
0x09, 0x30, /* Usage (Tip Pressure), */
0x26, 0xFF, 0x03, /* Logical Maximum (1023), */
0x81, 0x02, /* Input (Variable), */
0xC0, /* End Collection, */
0xC0 /* End Collection */
};
/*
* See PF1209 description, device and HID report descriptors at
* http://sf.net/apps/mediawiki/digimend/?title=UC-Logic_Tablet_PF1209
*/
/* Size of the original descriptor of PF1209 tablet */
#define PF1209_RDESC_ORIG_SIZE 234
/* Fixed PF1209 report descriptor */
static __u8 pf1209_rdesc_fixed[] = {
0x05, 0x0D, /* Usage Page (Digitizer), */
0x09, 0x02, /* Usage (Pen), */
0xA1, 0x01, /* Collection (Application), */
0x85, 0x09, /* Report ID (9), */
0x09, 0x20, /* Usage (Stylus), */
0xA0, /* Collection (Physical), */
0x75, 0x01, /* Report Size (1), */
0x09, 0x42, /* Usage (Tip Switch), */
0x09, 0x44, /* Usage (Barrel Switch), */
0x09, 0x46, /* Usage (Tablet Pick), */
0x14, /* Logical Minimum (0), */
0x25, 0x01, /* Logical Maximum (1), */
0x95, 0x03, /* Report Count (3), */
0x81, 0x02, /* Input (Variable), */
0x95, 0x05, /* Report Count (5), */
0x81, 0x01, /* Input (Constant), */
0x75, 0x10, /* Report Size (16), */
0x95, 0x01, /* Report Count (1), */
0x14, /* Logical Minimum (0), */
0xA4, /* Push, */
0x05, 0x01, /* Usage Page (Desktop), */
0x55, 0xFD, /* Unit Exponent (-3), */
0x65, 0x13, /* Unit (Inch), */
0x34, /* Physical Minimum (0), */
0x09, 0x30, /* Usage (X), */
0x46, 0xE0, 0x2E, /* Physical Maximum (12000), */
0x26, 0xFF, 0x7F, /* Logical Maximum (32767), */
0x81, 0x02, /* Input (Variable), */
0x09, 0x31, /* Usage (Y), */
0x46, 0x28, 0x23, /* Physical Maximum (9000), */
0x26, 0xFF, 0x7F, /* Logical Maximum (32767), */
0x81, 0x02, /* Input (Variable), */
0xB4, /* Pop, */
0x09, 0x30, /* Usage (Tip Pressure), */
0x26, 0xFF, 0x03, /* Logical Maximum (1023), */
0x81, 0x02, /* Input (Variable), */
0xC0, /* End Collection, */
0xC0, /* End Collection, */
0x05, 0x01, /* Usage Page (Desktop), */
0x09, 0x02, /* Usage (Mouse), */
0xA1, 0x01, /* Collection (Application), */
0x85, 0x08, /* Report ID (8), */
0x09, 0x01, /* Usage (Pointer), */
0xA0, /* Collection (Physical), */
0x75, 0x01, /* Report Size (1), */
0x05, 0x09, /* Usage Page (Button), */
0x19, 0x01, /* Usage Minimum (01h), */
0x29, 0x03, /* Usage Maximum (03h), */
0x14, /* Logical Minimum (0), */
0x25, 0x01, /* Logical Maximum (1), */
0x95, 0x03, /* Report Count (3), */
0x81, 0x02, /* Input (Variable), */
0x95, 0x05, /* Report Count (5), */
0x81, 0x01, /* Input (Constant), */
0x05, 0x01, /* Usage Page (Desktop), */
0x75, 0x08, /* Report Size (8), */
0x09, 0x30, /* Usage (X), */
0x09, 0x31, /* Usage (Y), */
0x15, 0x81, /* Logical Minimum (-127), */
0x25, 0x7F, /* Logical Maximum (127), */
0x95, 0x02, /* Report Count (2), */
0x81, 0x06, /* Input (Variable, Relative), */
0x09, 0x38, /* Usage (Wheel), */
0x15, 0xFF, /* Logical Minimum (-1), */
0x25, 0x01, /* Logical Maximum (1), */
0x95, 0x01, /* Report Count (1), */
0x81, 0x06, /* Input (Variable, Relative), */
0x81, 0x01, /* Input (Constant), */
0xC0, /* End Collection, */
0xC0 /* End Collection */
};
/*
* See TWHL850 description, device and HID report descriptors at
* http://sf.net/apps/mediawiki/digimend/?title=UC-Logic_Wireless_Tablet_TWHL850
*/
/* Size of the original descriptors of TWHL850 tablet */
#define TWHL850_RDESC_ORIG_SIZE0 182
#define TWHL850_RDESC_ORIG_SIZE1 161
#define TWHL850_RDESC_ORIG_SIZE2 92
/* Fixed PID 0522 tablet report descriptor, interface 0 (stylus) */
static __u8 twhl850_rdesc_fixed0[] = {
0x05, 0x0D, /* Usage Page (Digitizer), */
0x09, 0x02, /* Usage (Pen), */
0xA1, 0x01, /* Collection (Application), */
0x85, 0x09, /* Report ID (9), */
0x09, 0x20, /* Usage (Stylus), */
0xA0, /* Collection (Physical), */
0x14, /* Logical Minimum (0), */
0x25, 0x01, /* Logical Maximum (1), */
0x75, 0x01, /* Report Size (1), */
0x95, 0x03, /* Report Count (3), */
0x09, 0x42, /* Usage (Tip Switch), */
0x09, 0x44, /* Usage (Barrel Switch), */
0x09, 0x46, /* Usage (Tablet Pick), */
0x81, 0x02, /* Input (Variable), */
0x81, 0x03, /* Input (Constant, Variable), */
0x95, 0x01, /* Report Count (1), */
0x09, 0x32, /* Usage (In Range), */
0x81, 0x02, /* Input (Variable), */
0x81, 0x03, /* Input (Constant, Variable), */
0x75, 0x10, /* Report Size (16), */
0xA4, /* Push, */
0x05, 0x01, /* Usage Page (Desktop), */
0x65, 0x13, /* Unit (Inch), */
0x55, 0xFD, /* Unit Exponent (-3), */
0x34, /* Physical Minimum (0), */
0x09, 0x30, /* Usage (X), */
0x46, 0x40, 0x1F, /* Physical Maximum (8000), */
0x26, 0x00, 0x7D, /* Logical Maximum (32000), */
0x81, 0x02, /* Input (Variable), */
0x09, 0x31, /* Usage (Y), */
0x46, 0x88, 0x13, /* Physical Maximum (5000), */
0x26, 0x20, 0x4E, /* Logical Maximum (20000), */
0x81, 0x02, /* Input (Variable), */
0xB4, /* Pop, */
0x09, 0x30, /* Usage (Tip Pressure), */
0x26, 0xFF, 0x03, /* Logical Maximum (1023), */
0x81, 0x02, /* Input (Variable), */
0xC0, /* End Collection, */
0xC0 /* End Collection */
};
/* Fixed PID 0522 tablet report descriptor, interface 1 (mouse) */
static __u8 twhl850_rdesc_fixed1[] = {
0x05, 0x01, /* Usage Page (Desktop), */
0x09, 0x02, /* Usage (Mouse), */
0xA1, 0x01, /* Collection (Application), */
0x85, 0x01, /* Report ID (1), */
0x09, 0x01, /* Usage (Pointer), */
0xA0, /* Collection (Physical), */
0x05, 0x09, /* Usage Page (Button), */
0x75, 0x01, /* Report Size (1), */
0x95, 0x03, /* Report Count (3), */
0x19, 0x01, /* Usage Minimum (01h), */
0x29, 0x03, /* Usage Maximum (03h), */
0x14, /* Logical Minimum (0), */
0x25, 0x01, /* Logical Maximum (1), */
0x81, 0x02, /* Input (Variable), */
0x95, 0x05, /* Report Count (5), */
0x81, 0x03, /* Input (Constant, Variable), */
0x05, 0x01, /* Usage Page (Desktop), */
0x09, 0x30, /* Usage (X), */
0x09, 0x31, /* Usage (Y), */
0x16, 0x00, 0x80, /* Logical Minimum (-32768), */
0x26, 0xFF, 0x7F, /* Logical Maximum (32767), */
0x75, 0x10, /* Report Size (16), */
0x95, 0x02, /* Report Count (2), */
0x81, 0x06, /* Input (Variable, Relative), */
0x09, 0x38, /* Usage (Wheel), */
0x15, 0xFF, /* Logical Minimum (-1), */
0x25, 0x01, /* Logical Maximum (1), */
0x95, 0x01, /* Report Count (1), */
0x75, 0x08, /* Report Size (8), */
0x81, 0x06, /* Input (Variable, Relative), */
0x81, 0x03, /* Input (Constant, Variable), */
0xC0, /* End Collection, */
0xC0 /* End Collection */
};
/* Fixed PID 0522 tablet report descriptor, interface 2 (frame buttons) */
static __u8 twhl850_rdesc_fixed2[] = {
0x05, 0x01, /* Usage Page (Desktop), */
0x09, 0x06, /* Usage (Keyboard), */
0xA1, 0x01, /* Collection (Application), */
0x85, 0x03, /* Report ID (3), */
0x05, 0x07, /* Usage Page (Keyboard), */
0x14, /* Logical Minimum (0), */
0x19, 0xE0, /* Usage Minimum (KB Leftcontrol), */
0x29, 0xE7, /* Usage Maximum (KB Right GUI), */
0x25, 0x01, /* Logical Maximum (1), */
0x75, 0x01, /* Report Size (1), */
0x95, 0x08, /* Report Count (8), */
0x81, 0x02, /* Input (Variable), */
0x18, /* Usage Minimum (None), */
0x29, 0xFF, /* Usage Maximum (FFh), */
0x26, 0xFF, 0x00, /* Logical Maximum (255), */
0x75, 0x08, /* Report Size (8), */
0x95, 0x06, /* Report Count (6), */
0x80, /* Input, */
0xC0 /* End Collection */
};
/*
* See TWHA60 description, device and HID report descriptors at
* http://sf.net/apps/mediawiki/digimend/?title=UC-Logic_Tablet_TWHA60
*/
/* Size of the original descriptors of TWHA60 tablet */
#define TWHA60_RDESC_ORIG_SIZE0 254
#define TWHA60_RDESC_ORIG_SIZE1 139
/* Fixed TWHA60 report descriptor, interface 0 (stylus) */
static __u8 twha60_rdesc_fixed0[] = {
0x05, 0x0D, /* Usage Page (Digitizer), */
0x09, 0x02, /* Usage (Pen), */
0xA1, 0x01, /* Collection (Application), */
0x85, 0x09, /* Report ID (9), */
0x09, 0x20, /* Usage (Stylus), */
0xA0, /* Collection (Physical), */
0x75, 0x01, /* Report Size (1), */
0x09, 0x42, /* Usage (Tip Switch), */
0x09, 0x44, /* Usage (Barrel Switch), */
0x09, 0x46, /* Usage (Tablet Pick), */
0x14, /* Logical Minimum (0), */
0x25, 0x01, /* Logical Maximum (1), */
0x95, 0x03, /* Report Count (3), */
0x81, 0x02, /* Input (Variable), */
0x95, 0x04, /* Report Count (4), */
0x81, 0x01, /* Input (Constant), */
0x09, 0x32, /* Usage (In Range), */
0x95, 0x01, /* Report Count (1), */
0x81, 0x02, /* Input (Variable), */
0x75, 0x10, /* Report Size (16), */
0x95, 0x01, /* Report Count (1), */
0x14, /* Logical Minimum (0), */
0xA4, /* Push, */
0x05, 0x01, /* Usage Page (Desktop), */
0x55, 0xFD, /* Unit Exponent (-3), */
0x65, 0x13, /* Unit (Inch), */
0x34, /* Physical Minimum (0), */
0x09, 0x30, /* Usage (X), */
0x46, 0x10, 0x27, /* Physical Maximum (10000), */
0x27, 0x3F, 0x9C,
0x00, 0x00, /* Logical Maximum (39999), */
0x81, 0x02, /* Input (Variable), */
0x09, 0x31, /* Usage (Y), */
0x46, 0x6A, 0x18, /* Physical Maximum (6250), */
0x26, 0xA7, 0x61, /* Logical Maximum (24999), */
0x81, 0x02, /* Input (Variable), */
0xB4, /* Pop, */
0x09, 0x30, /* Usage (Tip Pressure), */
0x26, 0xFF, 0x03, /* Logical Maximum (1023), */
0x81, 0x02, /* Input (Variable), */
0xC0, /* End Collection, */
0xC0 /* End Collection */
};
/* Fixed TWHA60 report descriptor, interface 1 (frame buttons) */
static __u8 twha60_rdesc_fixed1[] = {
0x05, 0x01, /* Usage Page (Desktop), */
0x09, 0x06, /* Usage (Keyboard), */
0xA1, 0x01, /* Collection (Application), */
0x85, 0x05, /* Report ID (5), */
0x05, 0x07, /* Usage Page (Keyboard), */
0x14, /* Logical Minimum (0), */
0x25, 0x01, /* Logical Maximum (1), */
0x75, 0x01, /* Report Size (1), */
0x95, 0x08, /* Report Count (8), */
0x81, 0x01, /* Input (Constant), */
0x95, 0x0C, /* Report Count (12), */
0x19, 0x3A, /* Usage Minimum (KB F1), */
0x29, 0x45, /* Usage Maximum (KB F12), */
0x81, 0x02, /* Input (Variable), */
0x95, 0x0C, /* Report Count (12), */
0x19, 0x68, /* Usage Minimum (KB F13), */
0x29, 0x73, /* Usage Maximum (KB F24), */
0x81, 0x02, /* Input (Variable), */
0x95, 0x08, /* Report Count (8), */
0x81, 0x01, /* Input (Constant), */
0xC0 /* End Collection */
};
/* Report descriptor template placeholder head */
#define UCLOGIC_PH_HEAD 0xFE, 0xED, 0x1D
/* Report descriptor template placeholder IDs */
enum uclogic_ph_id {
UCLOGIC_PH_ID_X_LM,
UCLOGIC_PH_ID_X_PM,
UCLOGIC_PH_ID_Y_LM,
UCLOGIC_PH_ID_Y_PM,
UCLOGIC_PH_ID_PRESSURE_LM,
UCLOGIC_PH_ID_NUM
};
/* Report descriptor template placeholder */
#define UCLOGIC_PH(_ID) UCLOGIC_PH_HEAD, UCLOGIC_PH_ID_##_ID
#define UCLOGIC_PEN_REPORT_ID 0x07
/* Fixed report descriptor template */
static const __u8 uclogic_tablet_rdesc_template[] = {
0x05, 0x0D, /* Usage Page (Digitizer), */
0x09, 0x02, /* Usage (Pen), */
0xA1, 0x01, /* Collection (Application), */
0x85, 0x07, /* Report ID (7), */
0x09, 0x20, /* Usage (Stylus), */
0xA0, /* Collection (Physical), */
0x14, /* Logical Minimum (0), */
0x25, 0x01, /* Logical Maximum (1), */
0x75, 0x01, /* Report Size (1), */
0x09, 0x42, /* Usage (Tip Switch), */
0x09, 0x44, /* Usage (Barrel Switch), */
0x09, 0x46, /* Usage (Tablet Pick), */
0x95, 0x03, /* Report Count (3), */
0x81, 0x02, /* Input (Variable), */
0x95, 0x03, /* Report Count (3), */
0x81, 0x03, /* Input (Constant, Variable), */
0x09, 0x32, /* Usage (In Range), */
0x95, 0x01, /* Report Count (1), */
0x81, 0x02, /* Input (Variable), */
0x95, 0x01, /* Report Count (1), */
0x81, 0x03, /* Input (Constant, Variable), */
0x75, 0x10, /* Report Size (16), */
0x95, 0x01, /* Report Count (1), */
0xA4, /* Push, */
0x05, 0x01, /* Usage Page (Desktop), */
0x65, 0x13, /* Unit (Inch), */
0x55, 0xFD, /* Unit Exponent (-3), */
0x34, /* Physical Minimum (0), */
0x09, 0x30, /* Usage (X), */
0x27, UCLOGIC_PH(X_LM), /* Logical Maximum (PLACEHOLDER), */
0x47, UCLOGIC_PH(X_PM), /* Physical Maximum (PLACEHOLDER), */
0x81, 0x02, /* Input (Variable), */
0x09, 0x31, /* Usage (Y), */
0x27, UCLOGIC_PH(Y_LM), /* Logical Maximum (PLACEHOLDER), */
0x47, UCLOGIC_PH(Y_PM), /* Physical Maximum (PLACEHOLDER), */
0x81, 0x02, /* Input (Variable), */
0xB4, /* Pop, */
0x09, 0x30, /* Usage (Tip Pressure), */
0x27,
UCLOGIC_PH(PRESSURE_LM),/* Logical Maximum (PLACEHOLDER), */
0x81, 0x02, /* Input (Variable), */
0xC0, /* End Collection, */
0xC0 /* End Collection */
};
/* Parameter indices */
enum uclogic_prm {
UCLOGIC_PRM_X_LM = 1,
UCLOGIC_PRM_Y_LM = 2,
UCLOGIC_PRM_PRESSURE_LM = 4,
UCLOGIC_PRM_RESOLUTION = 5,
UCLOGIC_PRM_NUM
};
/* Driver data */
struct uclogic_drvdata {
__u8 *rdesc;
unsigned int rsize;
bool invert_pen_inrange;
bool ignore_pen_usage;
};
static __u8 *uclogic_report_fixup(struct hid_device *hdev, __u8 *rdesc,
unsigned int *rsize)
{
struct usb_interface *iface = to_usb_interface(hdev->dev.parent);
__u8 iface_num = iface->cur_altsetting->desc.bInterfaceNumber;
struct uclogic_drvdata *drvdata = hid_get_drvdata(hdev);
switch (hdev->product) {
case USB_DEVICE_ID_UCLOGIC_TABLET_PF1209:
if (*rsize == PF1209_RDESC_ORIG_SIZE) {
rdesc = pf1209_rdesc_fixed;
*rsize = sizeof(pf1209_rdesc_fixed);
}
break;
case USB_DEVICE_ID_UCLOGIC_TABLET_WP4030U:
if (*rsize == WPXXXXU_RDESC_ORIG_SIZE) {
rdesc = wp4030u_rdesc_fixed;
*rsize = sizeof(wp4030u_rdesc_fixed);
}
break;
case USB_DEVICE_ID_UCLOGIC_TABLET_WP5540U:
if (*rsize == WPXXXXU_RDESC_ORIG_SIZE) {
rdesc = wp5540u_rdesc_fixed;
*rsize = sizeof(wp5540u_rdesc_fixed);
}
break;
case USB_DEVICE_ID_UCLOGIC_TABLET_WP8060U:
if (*rsize == WPXXXXU_RDESC_ORIG_SIZE) {
rdesc = wp8060u_rdesc_fixed;
*rsize = sizeof(wp8060u_rdesc_fixed);
}
break;
case USB_DEVICE_ID_UCLOGIC_TABLET_WP1062:
if (*rsize == WP1062_RDESC_ORIG_SIZE) {
rdesc = wp1062_rdesc_fixed;
*rsize = sizeof(wp1062_rdesc_fixed);
}
break;
case USB_DEVICE_ID_UCLOGIC_WIRELESS_TABLET_TWHL850:
switch (iface_num) {
case 0:
if (*rsize == TWHL850_RDESC_ORIG_SIZE0) {
rdesc = twhl850_rdesc_fixed0;
*rsize = sizeof(twhl850_rdesc_fixed0);
}
break;
case 1:
if (*rsize == TWHL850_RDESC_ORIG_SIZE1) {
rdesc = twhl850_rdesc_fixed1;
*rsize = sizeof(twhl850_rdesc_fixed1);
}
break;
case 2:
if (*rsize == TWHL850_RDESC_ORIG_SIZE2) {
rdesc = twhl850_rdesc_fixed2;
*rsize = sizeof(twhl850_rdesc_fixed2);
}
break;
}
break;
case USB_DEVICE_ID_UCLOGIC_TABLET_TWHA60:
switch (iface_num) {
case 0:
if (*rsize == TWHA60_RDESC_ORIG_SIZE0) {
rdesc = twha60_rdesc_fixed0;
*rsize = sizeof(twha60_rdesc_fixed0);
}
break;
case 1:
if (*rsize == TWHA60_RDESC_ORIG_SIZE1) {
rdesc = twha60_rdesc_fixed1;
*rsize = sizeof(twha60_rdesc_fixed1);
}
break;
}
break;
default:
if (drvdata->rdesc != NULL) {
rdesc = drvdata->rdesc;
*rsize = drvdata->rsize;
}
}
return rdesc;
}
static int uclogic_input_mapping(struct hid_device *hdev, struct hid_input *hi,
struct hid_field *field, struct hid_usage *usage,
unsigned long **bit, int *max)
{
struct uclogic_drvdata *drvdata = hid_get_drvdata(hdev);
/* discard the unused pen interface */
if ((drvdata->ignore_pen_usage) &&
(field->application == HID_DG_PEN))
return -1;
/* let hid-core decide what to do */
return 0;
}
static void uclogic_input_configured(struct hid_device *hdev,
struct hid_input *hi)
{
char *name;
const char *suffix = NULL;
struct hid_field *field;
size_t len;
/* no report associated (HID_QUIRK_MULTI_INPUT not set) */
if (!hi->report)
return;
field = hi->report->field[0];
switch (field->application) {
case HID_GD_KEYBOARD:
suffix = "Keyboard";
break;
case HID_GD_MOUSE:
suffix = "Mouse";
break;
case HID_GD_KEYPAD:
suffix = "Pad";
break;
case HID_DG_PEN:
suffix = "Pen";
break;
case HID_CP_CONSUMER_CONTROL:
suffix = "Consumer Control";
break;
case HID_GD_SYSTEM_CONTROL:
suffix = "System Control";
break;
}
if (suffix) {
len = strlen(hdev->name) + 2 + strlen(suffix);
name = devm_kzalloc(&hi->input->dev, len, GFP_KERNEL);
if (name) {
snprintf(name, len, "%s %s", hdev->name, suffix);
hi->input->name = name;
}
}
}
/**
* Enable fully-functional tablet mode and determine device parameters.
*
* @hdev: HID device
*/
static int uclogic_tablet_enable(struct hid_device *hdev)
{
int rc;
struct usb_device *usb_dev = hid_to_usb_dev(hdev);
struct uclogic_drvdata *drvdata = hid_get_drvdata(hdev);
__le16 *buf = NULL;
size_t len;
s32 params[UCLOGIC_PH_ID_NUM];
s32 resolution;
__u8 *p;
s32 v;
/*
* Read string descriptor containing tablet parameters. The specific
* string descriptor and data were discovered by sniffing the Windows
* driver traffic.
* NOTE: This enables fully-functional tablet mode.
*/
len = UCLOGIC_PRM_NUM * sizeof(*buf);
buf = kmalloc(len, GFP_KERNEL);
if (buf == NULL) {
hid_err(hdev, "failed to allocate parameter buffer\n");
rc = -ENOMEM;
goto cleanup;
}
rc = usb_control_msg(usb_dev, usb_rcvctrlpipe(usb_dev, 0),
USB_REQ_GET_DESCRIPTOR, USB_DIR_IN,
(USB_DT_STRING << 8) + 0x64,
0x0409, buf, len,
USB_CTRL_GET_TIMEOUT);
if (rc == -EPIPE) {
hid_err(hdev, "device parameters not found\n");
rc = -ENODEV;
goto cleanup;
} else if (rc < 0) {
hid_err(hdev, "failed to get device parameters: %d\n", rc);
rc = -ENODEV;
goto cleanup;
} else if (rc != len) {
hid_err(hdev, "invalid device parameters\n");
rc = -ENODEV;
goto cleanup;
}
/* Extract device parameters */
params[UCLOGIC_PH_ID_X_LM] = le16_to_cpu(buf[UCLOGIC_PRM_X_LM]);
params[UCLOGIC_PH_ID_Y_LM] = le16_to_cpu(buf[UCLOGIC_PRM_Y_LM]);
params[UCLOGIC_PH_ID_PRESSURE_LM] =
le16_to_cpu(buf[UCLOGIC_PRM_PRESSURE_LM]);
resolution = le16_to_cpu(buf[UCLOGIC_PRM_RESOLUTION]);
if (resolution == 0) {
params[UCLOGIC_PH_ID_X_PM] = 0;
params[UCLOGIC_PH_ID_Y_PM] = 0;
} else {
params[UCLOGIC_PH_ID_X_PM] = params[UCLOGIC_PH_ID_X_LM] *
1000 / resolution;
params[UCLOGIC_PH_ID_Y_PM] = params[UCLOGIC_PH_ID_Y_LM] *
1000 / resolution;
}
/* Allocate fixed report descriptor */
drvdata->rdesc = devm_kzalloc(&hdev->dev,
sizeof(uclogic_tablet_rdesc_template),
GFP_KERNEL);
if (drvdata->rdesc == NULL) {
hid_err(hdev, "failed to allocate fixed rdesc\n");
rc = -ENOMEM;
goto cleanup;
}
drvdata->rsize = sizeof(uclogic_tablet_rdesc_template);
/* Format fixed report descriptor */
memcpy(drvdata->rdesc, uclogic_tablet_rdesc_template,
drvdata->rsize);
for (p = drvdata->rdesc;
p <= drvdata->rdesc + drvdata->rsize - 4;) {
if (p[0] == 0xFE && p[1] == 0xED && p[2] == 0x1D &&
p[3] < ARRAY_SIZE(params)) {
v = params[p[3]];
put_unaligned(cpu_to_le32(v), (s32 *)p);
p += 4;
} else {
p++;
}
}
rc = 0;
cleanup:
kfree(buf);
return rc;
}
static int uclogic_probe(struct hid_device *hdev,
const struct hid_device_id *id)
{
int rc;
struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
struct uclogic_drvdata *drvdata;
/*
* libinput requires the pad interface to be on a different node
* than the pen, so use QUIRK_MULTI_INPUT for all tablets.
*/
hdev->quirks |= HID_QUIRK_MULTI_INPUT;
hdev->quirks |= HID_QUIRK_NO_EMPTY_INPUT;
/* Allocate and assign driver data */
drvdata = devm_kzalloc(&hdev->dev, sizeof(*drvdata), GFP_KERNEL);
if (drvdata == NULL)
return -ENOMEM;
hid_set_drvdata(hdev, drvdata);
switch (id->product) {
case USB_DEVICE_ID_HUION_TABLET:
/* If this is the pen interface */
if (intf->cur_altsetting->desc.bInterfaceNumber == 0) {
rc = uclogic_tablet_enable(hdev);
if (rc) {
hid_err(hdev, "tablet enabling failed\n");
return rc;
}
drvdata->invert_pen_inrange = true;
} else {
drvdata->ignore_pen_usage = true;
}
break;
}
rc = hid_parse(hdev);
if (rc) {
hid_err(hdev, "parse failed\n");
return rc;
}
rc = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
if (rc) {
hid_err(hdev, "hw start failed\n");
return rc;
}
return 0;
}
static int uclogic_raw_event(struct hid_device *hdev, struct hid_report *report,
u8 *data, int size)
{
struct uclogic_drvdata *drvdata = hid_get_drvdata(hdev);
if ((drvdata->invert_pen_inrange) &&
(report->type == HID_INPUT_REPORT) &&
(report->id == UCLOGIC_PEN_REPORT_ID) &&
(size >= 2))
/* Invert the in-range bit */
data[1] ^= 0x40;
return 0;
}
static const struct hid_device_id uclogic_devices[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC,
USB_DEVICE_ID_UCLOGIC_TABLET_PF1209) },
{ HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC,
USB_DEVICE_ID_UCLOGIC_TABLET_WP4030U) },
{ HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC,
USB_DEVICE_ID_UCLOGIC_TABLET_WP5540U) },
{ HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC,
USB_DEVICE_ID_UCLOGIC_TABLET_WP8060U) },
{ HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC,
USB_DEVICE_ID_UCLOGIC_TABLET_WP1062) },
{ HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC,
USB_DEVICE_ID_UCLOGIC_WIRELESS_TABLET_TWHL850) },
{ HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC,
USB_DEVICE_ID_UCLOGIC_TABLET_TWHA60) },
{ HID_USB_DEVICE(USB_VENDOR_ID_HUION, USB_DEVICE_ID_HUION_TABLET) },
{ HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_HUION_TABLET) },
{ }
};
MODULE_DEVICE_TABLE(hid, uclogic_devices);
static struct hid_driver uclogic_driver = {
.name = "uclogic",
.id_table = uclogic_devices,
.probe = uclogic_probe,
.report_fixup = uclogic_report_fixup,
.raw_event = uclogic_raw_event,
.input_mapping = uclogic_input_mapping,
.input_configured = uclogic_input_configured,
};
module_hid_driver(uclogic_driver);
MODULE_AUTHOR("Martin Rusko");
MODULE_AUTHOR("Nikolai Kondrashov");
MODULE_LICENSE("GPL");
| gpl-2.0 |
LeEco-MSM8996-Development/android_kernel_leeco_msm8996 | arch/arm/mach-s3c24xx/s3c2442.c | 628 | 2104 | /* linux/arch/arm/mach-s3c2442/s3c2442.c
*
* Copyright (c) 2004-2005 Simtec Electronics
* http://armlinux.simtec.co.uk/
* Ben Dooks <ben@simtec.co.uk>
*
* S3C2442 core and lock support
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/device.h>
#include <linux/syscore_ops.h>
#include <linux/interrupt.h>
#include <linux/ioport.h>
#include <linux/mutex.h>
#include <linux/gpio.h>
#include <linux/clk.h>
#include <linux/io.h>
#include <mach/hardware.h>
#include <mach/gpio-samsung.h>
#include <linux/atomic.h>
#include <asm/irq.h>
#include <mach/regs-clock.h>
#include <plat/cpu.h>
#include <plat/pm.h>
#include <plat/gpio-core.h>
#include <plat/gpio-cfg.h>
#include <plat/gpio-cfg-helpers.h>
#include "common.h"
static struct device s3c2442_dev = {
.bus = &s3c2442_subsys,
};
int __init s3c2442_init(void)
{
printk("S3C2442: Initialising architecture\n");
#ifdef CONFIG_PM
register_syscore_ops(&s3c2410_pm_syscore_ops);
register_syscore_ops(&s3c24xx_irq_syscore_ops);
#endif
register_syscore_ops(&s3c244x_pm_syscore_ops);
return device_register(&s3c2442_dev);
}
void __init s3c2442_map_io(void)
{
s3c244x_map_io();
s3c24xx_gpiocfg_default.set_pull = s3c24xx_gpio_setpull_1down;
s3c24xx_gpiocfg_default.get_pull = s3c24xx_gpio_getpull_1down;
}
| gpl-2.0 |
uzairabdulmajeed/uam-kernel | net/appletalk/ddp.c | 884 | 49203 | /*
* DDP: An implementation of the AppleTalk DDP protocol for
* Ethernet 'ELAP'.
*
* Alan Cox <alan@lxorguk.ukuu.org.uk>
*
* With more than a little assistance from
*
* Wesley Craig <netatalk@umich.edu>
*
* Fixes:
* Neil Horman : Added missing device ioctls
* Michael Callahan : Made routing work
* Wesley Craig : Fix probing to listen to a
* passed node id.
* Alan Cox : Added send/recvmsg support
* Alan Cox : Moved at. to protinfo in
* socket.
* Alan Cox : Added firewall hooks.
* Alan Cox : Supports new ARPHRD_LOOPBACK
* Christer Weinigel : Routing and /proc fixes.
* Bradford Johnson : LocalTalk.
* Tom Dyas : Module support.
* Alan Cox : Hooks for PPP (based on the
* LocalTalk hook).
* Alan Cox : Posix bits
* Alan Cox/Mike Freeman : Possible fix to NBP problems
* Bradford Johnson : IP-over-DDP (experimental)
* Jay Schulist : Moved IP-over-DDP to its own
* driver file. (ipddp.c & ipddp.h)
* Jay Schulist : Made work as module with
* AppleTalk drivers, cleaned it.
* Rob Newberry : Added proxy AARP and AARP
* procfs, moved probing to AARP
* module.
* Adrian Sun/
* Michael Zuelsdorff : fix for net.0 packets. don't
* allow illegal ether/tokentalk
* port assignment. we lose a
* valid localtalk port as a
* result.
* Arnaldo C. de Melo : Cleanup, in preparation for
* shared skb support 8)
* Arnaldo C. de Melo : Move proc stuff to atalk_proc.c,
* use seq_file
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
*/
#include <linux/capability.h>
#include <linux/module.h>
#include <linux/if_arp.h>
#include <linux/termios.h> /* For TIOCOUTQ/INQ */
#include <linux/compat.h>
#include <linux/slab.h>
#include <net/datalink.h>
#include <net/psnap.h>
#include <net/sock.h>
#include <net/tcp_states.h>
#include <net/route.h>
#include <linux/atalk.h>
#include <linux/highmem.h>
struct datalink_proto *ddp_dl, *aarp_dl;
static const struct proto_ops atalk_dgram_ops;
/**************************************************************************\
* *
* Handlers for the socket list. *
* *
\**************************************************************************/
HLIST_HEAD(atalk_sockets);
DEFINE_RWLOCK(atalk_sockets_lock);
static inline void __atalk_insert_socket(struct sock *sk)
{
sk_add_node(sk, &atalk_sockets);
}
static inline void atalk_remove_socket(struct sock *sk)
{
write_lock_bh(&atalk_sockets_lock);
sk_del_node_init(sk);
write_unlock_bh(&atalk_sockets_lock);
}
static struct sock *atalk_search_socket(struct sockaddr_at *to,
struct atalk_iface *atif)
{
struct sock *s;
read_lock_bh(&atalk_sockets_lock);
sk_for_each(s, &atalk_sockets) {
struct atalk_sock *at = at_sk(s);
if (to->sat_port != at->src_port)
continue;
if (to->sat_addr.s_net == ATADDR_ANYNET &&
to->sat_addr.s_node == ATADDR_BCAST)
goto found;
if (to->sat_addr.s_net == at->src_net &&
(to->sat_addr.s_node == at->src_node ||
to->sat_addr.s_node == ATADDR_BCAST ||
to->sat_addr.s_node == ATADDR_ANYNODE))
goto found;
/* XXXX.0 -- we got a request for this router. make sure
* that the node is appropriately set. */
if (to->sat_addr.s_node == ATADDR_ANYNODE &&
to->sat_addr.s_net != ATADDR_ANYNET &&
atif->address.s_node == at->src_node) {
to->sat_addr.s_node = atif->address.s_node;
goto found;
}
}
s = NULL;
found:
read_unlock_bh(&atalk_sockets_lock);
return s;
}
/**
* atalk_find_or_insert_socket - Try to find a socket matching ADDR
* @sk: socket to insert in the list if it is not there already
* @sat: address to search for
*
* Try to find a socket matching ADDR in the socket list, if found then return
* it. If not, insert SK into the socket list.
*
* This entire operation must execute atomically.
*/
static struct sock *atalk_find_or_insert_socket(struct sock *sk,
struct sockaddr_at *sat)
{
struct sock *s;
struct atalk_sock *at;
write_lock_bh(&atalk_sockets_lock);
sk_for_each(s, &atalk_sockets) {
at = at_sk(s);
if (at->src_net == sat->sat_addr.s_net &&
at->src_node == sat->sat_addr.s_node &&
at->src_port == sat->sat_port)
goto found;
}
s = NULL;
__atalk_insert_socket(sk); /* Wheee, it's free, assign and insert. */
found:
write_unlock_bh(&atalk_sockets_lock);
return s;
}
static void atalk_destroy_timer(unsigned long data)
{
struct sock *sk = (struct sock *)data;
if (sk_has_allocations(sk)) {
sk->sk_timer.expires = jiffies + SOCK_DESTROY_TIME;
add_timer(&sk->sk_timer);
} else
sock_put(sk);
}
static inline void atalk_destroy_socket(struct sock *sk)
{
atalk_remove_socket(sk);
skb_queue_purge(&sk->sk_receive_queue);
if (sk_has_allocations(sk)) {
setup_timer(&sk->sk_timer, atalk_destroy_timer,
(unsigned long)sk);
sk->sk_timer.expires = jiffies + SOCK_DESTROY_TIME;
add_timer(&sk->sk_timer);
} else
sock_put(sk);
}
/**************************************************************************\
* *
* Routing tables for the AppleTalk socket layer. *
* *
\**************************************************************************/
/* Anti-deadlock ordering is atalk_routes_lock --> iface_lock -DaveM */
struct atalk_route *atalk_routes;
DEFINE_RWLOCK(atalk_routes_lock);
struct atalk_iface *atalk_interfaces;
DEFINE_RWLOCK(atalk_interfaces_lock);
/* For probing devices or in a routerless network */
struct atalk_route atrtr_default;
/* AppleTalk interface control */
/*
* Drop a device. Doesn't drop any of its routes - that is the caller's
* problem. Called when we down the interface or delete the address.
*/
static void atif_drop_device(struct net_device *dev)
{
struct atalk_iface **iface = &atalk_interfaces;
struct atalk_iface *tmp;
write_lock_bh(&atalk_interfaces_lock);
while ((tmp = *iface) != NULL) {
if (tmp->dev == dev) {
*iface = tmp->next;
dev_put(dev);
kfree(tmp);
dev->atalk_ptr = NULL;
} else
iface = &tmp->next;
}
write_unlock_bh(&atalk_interfaces_lock);
}
static struct atalk_iface *atif_add_device(struct net_device *dev,
struct atalk_addr *sa)
{
struct atalk_iface *iface = kzalloc(sizeof(*iface), GFP_KERNEL);
if (!iface)
goto out;
dev_hold(dev);
iface->dev = dev;
dev->atalk_ptr = iface;
iface->address = *sa;
iface->status = 0;
write_lock_bh(&atalk_interfaces_lock);
iface->next = atalk_interfaces;
atalk_interfaces = iface;
write_unlock_bh(&atalk_interfaces_lock);
out:
return iface;
}
/* Perform phase 2 AARP probing on our tentative address */
static int atif_probe_device(struct atalk_iface *atif)
{
int netrange = ntohs(atif->nets.nr_lastnet) -
ntohs(atif->nets.nr_firstnet) + 1;
int probe_net = ntohs(atif->address.s_net);
int probe_node = atif->address.s_node;
int netct, nodect;
/* Offset the network we start probing with */
if (probe_net == ATADDR_ANYNET) {
probe_net = ntohs(atif->nets.nr_firstnet);
if (netrange)
probe_net += jiffies % netrange;
}
if (probe_node == ATADDR_ANYNODE)
probe_node = jiffies & 0xFF;
/* Scan the networks */
atif->status |= ATIF_PROBE;
for (netct = 0; netct <= netrange; netct++) {
/* Sweep the available nodes from a given start */
atif->address.s_net = htons(probe_net);
for (nodect = 0; nodect < 256; nodect++) {
atif->address.s_node = (nodect + probe_node) & 0xFF;
if (atif->address.s_node > 0 &&
atif->address.s_node < 254) {
/* Probe a proposed address */
aarp_probe_network(atif);
if (!(atif->status & ATIF_PROBE_FAIL)) {
atif->status &= ~ATIF_PROBE;
return 0;
}
}
atif->status &= ~ATIF_PROBE_FAIL;
}
probe_net++;
if (probe_net > ntohs(atif->nets.nr_lastnet))
probe_net = ntohs(atif->nets.nr_firstnet);
}
atif->status &= ~ATIF_PROBE;
return -EADDRINUSE; /* Network is full... */
}
/* Perform AARP probing for a proxy address */
static int atif_proxy_probe_device(struct atalk_iface *atif,
struct atalk_addr* proxy_addr)
{
int netrange = ntohs(atif->nets.nr_lastnet) -
ntohs(atif->nets.nr_firstnet) + 1;
/* we probe the interface's network */
int probe_net = ntohs(atif->address.s_net);
int probe_node = ATADDR_ANYNODE; /* we'll take anything */
int netct, nodect;
/* Offset the network we start probing with */
if (probe_net == ATADDR_ANYNET) {
probe_net = ntohs(atif->nets.nr_firstnet);
if (netrange)
probe_net += jiffies % netrange;
}
if (probe_node == ATADDR_ANYNODE)
probe_node = jiffies & 0xFF;
/* Scan the networks */
for (netct = 0; netct <= netrange; netct++) {
/* Sweep the available nodes from a given start */
proxy_addr->s_net = htons(probe_net);
for (nodect = 0; nodect < 256; nodect++) {
proxy_addr->s_node = (nodect + probe_node) & 0xFF;
if (proxy_addr->s_node > 0 &&
proxy_addr->s_node < 254) {
/* Tell AARP to probe a proposed address */
int ret = aarp_proxy_probe_network(atif,
proxy_addr);
if (ret != -EADDRINUSE)
return ret;
}
}
probe_net++;
if (probe_net > ntohs(atif->nets.nr_lastnet))
probe_net = ntohs(atif->nets.nr_firstnet);
}
return -EADDRINUSE; /* Network is full... */
}
struct atalk_addr *atalk_find_dev_addr(struct net_device *dev)
{
struct atalk_iface *iface = dev->atalk_ptr;
return iface ? &iface->address : NULL;
}
static struct atalk_addr *atalk_find_primary(void)
{
struct atalk_iface *fiface = NULL;
struct atalk_addr *retval;
struct atalk_iface *iface;
/*
* Return a point-to-point interface only if
* there is no non-ptp interface available.
*/
read_lock_bh(&atalk_interfaces_lock);
for (iface = atalk_interfaces; iface; iface = iface->next) {
if (!fiface && !(iface->dev->flags & IFF_LOOPBACK))
fiface = iface;
if (!(iface->dev->flags & (IFF_LOOPBACK | IFF_POINTOPOINT))) {
retval = &iface->address;
goto out;
}
}
if (fiface)
retval = &fiface->address;
else if (atalk_interfaces)
retval = &atalk_interfaces->address;
else
retval = NULL;
out:
read_unlock_bh(&atalk_interfaces_lock);
return retval;
}
/*
* Find a match for 'any network' - ie any of our interfaces with that
* node number will do just nicely.
*/
static struct atalk_iface *atalk_find_anynet(int node, struct net_device *dev)
{
struct atalk_iface *iface = dev->atalk_ptr;
if (!iface || iface->status & ATIF_PROBE)
goto out_err;
if (node != ATADDR_BCAST &&
iface->address.s_node != node &&
node != ATADDR_ANYNODE)
goto out_err;
out:
return iface;
out_err:
iface = NULL;
goto out;
}
/* Find a match for a specific network:node pair */
static struct atalk_iface *atalk_find_interface(__be16 net, int node)
{
struct atalk_iface *iface;
read_lock_bh(&atalk_interfaces_lock);
for (iface = atalk_interfaces; iface; iface = iface->next) {
if ((node == ATADDR_BCAST ||
node == ATADDR_ANYNODE ||
iface->address.s_node == node) &&
iface->address.s_net == net &&
!(iface->status & ATIF_PROBE))
break;
/* XXXX.0 -- net.0 returns the iface associated with net */
if (node == ATADDR_ANYNODE && net != ATADDR_ANYNET &&
ntohs(iface->nets.nr_firstnet) <= ntohs(net) &&
ntohs(net) <= ntohs(iface->nets.nr_lastnet))
break;
}
read_unlock_bh(&atalk_interfaces_lock);
return iface;
}
/*
* Find a route for an AppleTalk packet. This ought to get cached in
* the socket (later on...). We know about host routes and the fact
* that a route must be direct to broadcast.
*/
static struct atalk_route *atrtr_find(struct atalk_addr *target)
{
/*
* we must search through all routes unless we find a
* host route, because some host routes might overlap
* network routes
*/
struct atalk_route *net_route = NULL;
struct atalk_route *r;
read_lock_bh(&atalk_routes_lock);
for (r = atalk_routes; r; r = r->next) {
if (!(r->flags & RTF_UP))
continue;
if (r->target.s_net == target->s_net) {
if (r->flags & RTF_HOST) {
/*
* if this host route is for the target,
* the we're done
*/
if (r->target.s_node == target->s_node)
goto out;
} else
/*
* this route will work if there isn't a
* direct host route, so cache it
*/
net_route = r;
}
}
/*
* if we found a network route but not a direct host
* route, then return it
*/
if (net_route)
r = net_route;
else if (atrtr_default.dev)
r = &atrtr_default;
else /* No route can be found */
r = NULL;
out:
read_unlock_bh(&atalk_routes_lock);
return r;
}
/*
* Given an AppleTalk network, find the device to use. This can be
* a simple lookup.
*/
struct net_device *atrtr_get_dev(struct atalk_addr *sa)
{
struct atalk_route *atr = atrtr_find(sa);
return atr ? atr->dev : NULL;
}
/* Set up a default router */
static void atrtr_set_default(struct net_device *dev)
{
atrtr_default.dev = dev;
atrtr_default.flags = RTF_UP;
atrtr_default.gateway.s_net = htons(0);
atrtr_default.gateway.s_node = 0;
}
/*
* Add a router. Basically make sure it looks valid and stuff the
* entry in the list. While it uses netranges we always set them to one
* entry to work like netatalk.
*/
static int atrtr_create(struct rtentry *r, struct net_device *devhint)
{
struct sockaddr_at *ta = (struct sockaddr_at *)&r->rt_dst;
struct sockaddr_at *ga = (struct sockaddr_at *)&r->rt_gateway;
struct atalk_route *rt;
struct atalk_iface *iface, *riface;
int retval = -EINVAL;
/*
* Fixme: Raise/Lower a routing change semaphore for these
* operations.
*/
/* Validate the request */
if (ta->sat_family != AF_APPLETALK ||
(!devhint && ga->sat_family != AF_APPLETALK))
goto out;
/* Now walk the routing table and make our decisions */
write_lock_bh(&atalk_routes_lock);
for (rt = atalk_routes; rt; rt = rt->next) {
if (r->rt_flags != rt->flags)
continue;
if (ta->sat_addr.s_net == rt->target.s_net) {
if (!(rt->flags & RTF_HOST))
break;
if (ta->sat_addr.s_node == rt->target.s_node)
break;
}
}
if (!devhint) {
riface = NULL;
read_lock_bh(&atalk_interfaces_lock);
for (iface = atalk_interfaces; iface; iface = iface->next) {
if (!riface &&
ntohs(ga->sat_addr.s_net) >=
ntohs(iface->nets.nr_firstnet) &&
ntohs(ga->sat_addr.s_net) <=
ntohs(iface->nets.nr_lastnet))
riface = iface;
if (ga->sat_addr.s_net == iface->address.s_net &&
ga->sat_addr.s_node == iface->address.s_node)
riface = iface;
}
read_unlock_bh(&atalk_interfaces_lock);
retval = -ENETUNREACH;
if (!riface)
goto out_unlock;
devhint = riface->dev;
}
if (!rt) {
rt = kzalloc(sizeof(*rt), GFP_ATOMIC);
retval = -ENOBUFS;
if (!rt)
goto out_unlock;
rt->next = atalk_routes;
atalk_routes = rt;
}
/* Fill in the routing entry */
rt->target = ta->sat_addr;
dev_hold(devhint);
rt->dev = devhint;
rt->flags = r->rt_flags;
rt->gateway = ga->sat_addr;
retval = 0;
out_unlock:
write_unlock_bh(&atalk_routes_lock);
out:
return retval;
}
/* Delete a route. Find it and discard it */
static int atrtr_delete(struct atalk_addr * addr)
{
struct atalk_route **r = &atalk_routes;
int retval = 0;
struct atalk_route *tmp;
write_lock_bh(&atalk_routes_lock);
while ((tmp = *r) != NULL) {
if (tmp->target.s_net == addr->s_net &&
(!(tmp->flags&RTF_GATEWAY) ||
tmp->target.s_node == addr->s_node)) {
*r = tmp->next;
dev_put(tmp->dev);
kfree(tmp);
goto out;
}
r = &tmp->next;
}
retval = -ENOENT;
out:
write_unlock_bh(&atalk_routes_lock);
return retval;
}
/*
* Called when a device is downed. Just throw away any routes
* via it.
*/
static void atrtr_device_down(struct net_device *dev)
{
struct atalk_route **r = &atalk_routes;
struct atalk_route *tmp;
write_lock_bh(&atalk_routes_lock);
while ((tmp = *r) != NULL) {
if (tmp->dev == dev) {
*r = tmp->next;
dev_put(dev);
kfree(tmp);
} else
r = &tmp->next;
}
write_unlock_bh(&atalk_routes_lock);
if (atrtr_default.dev == dev)
atrtr_set_default(NULL);
}
/* Actually down the interface */
static inline void atalk_dev_down(struct net_device *dev)
{
atrtr_device_down(dev); /* Remove all routes for the device */
aarp_device_down(dev); /* Remove AARP entries for the device */
atif_drop_device(dev); /* Remove the device */
}
/*
* A device event has occurred. Watch for devices going down and
* delete our use of them (iface and route).
*/
static int ddp_device_event(struct notifier_block *this, unsigned long event,
void *ptr)
{
struct net_device *dev = ptr;
if (!net_eq(dev_net(dev), &init_net))
return NOTIFY_DONE;
if (event == NETDEV_DOWN)
/* Discard any use of this */
atalk_dev_down(dev);
return NOTIFY_DONE;
}
/* ioctl calls. Shouldn't even need touching */
/* Device configuration ioctl calls */
static int atif_ioctl(int cmd, void __user *arg)
{
static char aarp_mcast[6] = { 0x09, 0x00, 0x00, 0xFF, 0xFF, 0xFF };
struct ifreq atreq;
struct atalk_netrange *nr;
struct sockaddr_at *sa;
struct net_device *dev;
struct atalk_iface *atif;
int ct;
int limit;
struct rtentry rtdef;
int add_route;
if (copy_from_user(&atreq, arg, sizeof(atreq)))
return -EFAULT;
dev = __dev_get_by_name(&init_net, atreq.ifr_name);
if (!dev)
return -ENODEV;
sa = (struct sockaddr_at *)&atreq.ifr_addr;
atif = atalk_find_dev(dev);
switch (cmd) {
case SIOCSIFADDR:
if (!capable(CAP_NET_ADMIN))
return -EPERM;
if (sa->sat_family != AF_APPLETALK)
return -EINVAL;
if (dev->type != ARPHRD_ETHER &&
dev->type != ARPHRD_LOOPBACK &&
dev->type != ARPHRD_LOCALTLK &&
dev->type != ARPHRD_PPP)
return -EPROTONOSUPPORT;
nr = (struct atalk_netrange *)&sa->sat_zero[0];
add_route = 1;
/*
* if this is a point-to-point iface, and we already
* have an iface for this AppleTalk address, then we
* should not add a route
*/
if ((dev->flags & IFF_POINTOPOINT) &&
atalk_find_interface(sa->sat_addr.s_net,
sa->sat_addr.s_node)) {
printk(KERN_DEBUG "AppleTalk: point-to-point "
"interface added with "
"existing address\n");
add_route = 0;
}
/*
* Phase 1 is fine on LocalTalk but we don't do
* EtherTalk phase 1. Anyone wanting to add it go ahead.
*/
if (dev->type == ARPHRD_ETHER && nr->nr_phase != 2)
return -EPROTONOSUPPORT;
if (sa->sat_addr.s_node == ATADDR_BCAST ||
sa->sat_addr.s_node == 254)
return -EINVAL;
if (atif) {
/* Already setting address */
if (atif->status & ATIF_PROBE)
return -EBUSY;
atif->address.s_net = sa->sat_addr.s_net;
atif->address.s_node = sa->sat_addr.s_node;
atrtr_device_down(dev); /* Flush old routes */
} else {
atif = atif_add_device(dev, &sa->sat_addr);
if (!atif)
return -ENOMEM;
}
atif->nets = *nr;
/*
* Check if the chosen address is used. If so we
* error and atalkd will try another.
*/
if (!(dev->flags & IFF_LOOPBACK) &&
!(dev->flags & IFF_POINTOPOINT) &&
atif_probe_device(atif) < 0) {
atif_drop_device(dev);
return -EADDRINUSE;
}
/* Hey it worked - add the direct routes */
sa = (struct sockaddr_at *)&rtdef.rt_gateway;
sa->sat_family = AF_APPLETALK;
sa->sat_addr.s_net = atif->address.s_net;
sa->sat_addr.s_node = atif->address.s_node;
sa = (struct sockaddr_at *)&rtdef.rt_dst;
rtdef.rt_flags = RTF_UP;
sa->sat_family = AF_APPLETALK;
sa->sat_addr.s_node = ATADDR_ANYNODE;
if (dev->flags & IFF_LOOPBACK ||
dev->flags & IFF_POINTOPOINT)
rtdef.rt_flags |= RTF_HOST;
/* Routerless initial state */
if (nr->nr_firstnet == htons(0) &&
nr->nr_lastnet == htons(0xFFFE)) {
sa->sat_addr.s_net = atif->address.s_net;
atrtr_create(&rtdef, dev);
atrtr_set_default(dev);
} else {
limit = ntohs(nr->nr_lastnet);
if (limit - ntohs(nr->nr_firstnet) > 4096) {
printk(KERN_WARNING "Too many routes/"
"iface.\n");
return -EINVAL;
}
if (add_route)
for (ct = ntohs(nr->nr_firstnet);
ct <= limit; ct++) {
sa->sat_addr.s_net = htons(ct);
atrtr_create(&rtdef, dev);
}
}
dev_mc_add_global(dev, aarp_mcast);
return 0;
case SIOCGIFADDR:
if (!atif)
return -EADDRNOTAVAIL;
sa->sat_family = AF_APPLETALK;
sa->sat_addr = atif->address;
break;
case SIOCGIFBRDADDR:
if (!atif)
return -EADDRNOTAVAIL;
sa->sat_family = AF_APPLETALK;
sa->sat_addr.s_net = atif->address.s_net;
sa->sat_addr.s_node = ATADDR_BCAST;
break;
case SIOCATALKDIFADDR:
case SIOCDIFADDR:
if (!capable(CAP_NET_ADMIN))
return -EPERM;
if (sa->sat_family != AF_APPLETALK)
return -EINVAL;
atalk_dev_down(dev);
break;
case SIOCSARP:
if (!capable(CAP_NET_ADMIN))
return -EPERM;
if (sa->sat_family != AF_APPLETALK)
return -EINVAL;
/*
* for now, we only support proxy AARP on ELAP;
* we should be able to do it for LocalTalk, too.
*/
if (dev->type != ARPHRD_ETHER)
return -EPROTONOSUPPORT;
/*
* atif points to the current interface on this network;
* we aren't concerned about its current status (at
* least for now), but it has all the settings about
* the network we're going to probe. Consequently, it
* must exist.
*/
if (!atif)
return -EADDRNOTAVAIL;
nr = (struct atalk_netrange *)&(atif->nets);
/*
* Phase 1 is fine on Localtalk but we don't do
* Ethertalk phase 1. Anyone wanting to add it go ahead.
*/
if (dev->type == ARPHRD_ETHER && nr->nr_phase != 2)
return -EPROTONOSUPPORT;
if (sa->sat_addr.s_node == ATADDR_BCAST ||
sa->sat_addr.s_node == 254)
return -EINVAL;
/*
* Check if the chosen address is used. If so we
* error and ATCP will try another.
*/
if (atif_proxy_probe_device(atif, &(sa->sat_addr)) < 0)
return -EADDRINUSE;
/*
* We now have an address on the local network, and
* the AARP code will defend it for us until we take it
* down. We don't set up any routes right now, because
* ATCP will install them manually via SIOCADDRT.
*/
break;
case SIOCDARP:
if (!capable(CAP_NET_ADMIN))
return -EPERM;
if (sa->sat_family != AF_APPLETALK)
return -EINVAL;
if (!atif)
return -EADDRNOTAVAIL;
/* give to aarp module to remove proxy entry */
aarp_proxy_remove(atif->dev, &(sa->sat_addr));
return 0;
}
return copy_to_user(arg, &atreq, sizeof(atreq)) ? -EFAULT : 0;
}
/* Routing ioctl() calls */
static int atrtr_ioctl(unsigned int cmd, void __user *arg)
{
struct rtentry rt;
if (copy_from_user(&rt, arg, sizeof(rt)))
return -EFAULT;
switch (cmd) {
case SIOCDELRT:
if (rt.rt_dst.sa_family != AF_APPLETALK)
return -EINVAL;
return atrtr_delete(&((struct sockaddr_at *)
&rt.rt_dst)->sat_addr);
case SIOCADDRT: {
struct net_device *dev = NULL;
if (rt.rt_dev) {
char name[IFNAMSIZ];
if (copy_from_user(name, rt.rt_dev, IFNAMSIZ-1))
return -EFAULT;
name[IFNAMSIZ-1] = '\0';
dev = __dev_get_by_name(&init_net, name);
if (!dev)
return -ENODEV;
}
return atrtr_create(&rt, dev);
}
}
return -EINVAL;
}
/**************************************************************************\
* *
* Handling for system calls applied via the various interfaces to an *
* AppleTalk socket object. *
* *
\**************************************************************************/
/*
* Checksum: This is 'optional'. It's quite likely also a good
* candidate for assembler hackery 8)
*/
static unsigned long atalk_sum_partial(const unsigned char *data,
int len, unsigned long sum)
{
/* This ought to be unwrapped neatly. I'll trust gcc for now */
while (len--) {
sum += *data++;
sum = rol16(sum, 1);
}
return sum;
}
/* Checksum skb data -- similar to skb_checksum */
static unsigned long atalk_sum_skb(const struct sk_buff *skb, int offset,
int len, unsigned long sum)
{
int start = skb_headlen(skb);
struct sk_buff *frag_iter;
int i, copy;
/* checksum stuff in header space */
if ( (copy = start - offset) > 0) {
if (copy > len)
copy = len;
sum = atalk_sum_partial(skb->data + offset, copy, sum);
if ( (len -= copy) == 0)
return sum;
offset += copy;
}
/* checksum stuff in frags */
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
int end;
const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
WARN_ON(start > offset + len);
end = start + skb_frag_size(frag);
if ((copy = end - offset) > 0) {
u8 *vaddr;
if (copy > len)
copy = len;
vaddr = kmap_atomic(skb_frag_page(frag));
sum = atalk_sum_partial(vaddr + frag->page_offset +
offset - start, copy, sum);
kunmap_atomic(vaddr);
if (!(len -= copy))
return sum;
offset += copy;
}
start = end;
}
skb_walk_frags(skb, frag_iter) {
int end;
WARN_ON(start > offset + len);
end = start + frag_iter->len;
if ((copy = end - offset) > 0) {
if (copy > len)
copy = len;
sum = atalk_sum_skb(frag_iter, offset - start,
copy, sum);
if ((len -= copy) == 0)
return sum;
offset += copy;
}
start = end;
}
BUG_ON(len > 0);
return sum;
}
static __be16 atalk_checksum(const struct sk_buff *skb, int len)
{
unsigned long sum;
/* skip header 4 bytes */
sum = atalk_sum_skb(skb, 4, len-4, 0);
/* Use 0xFFFF for 0. 0 itself means none */
return sum ? htons((unsigned short)sum) : htons(0xFFFF);
}
static struct proto ddp_proto = {
.name = "DDP",
.owner = THIS_MODULE,
.obj_size = sizeof(struct atalk_sock),
};
/*
* Create a socket. Initialise the socket, blank the addresses
* set the state.
*/
static int atalk_create(struct net *net, struct socket *sock, int protocol,
int kern)
{
struct sock *sk;
int rc = -ESOCKTNOSUPPORT;
if (!net_eq(net, &init_net))
return -EAFNOSUPPORT;
/*
* We permit SOCK_DGRAM and RAW is an extension. It is trivial to do
* and gives you the full ELAP frame. Should be handy for CAP 8)
*/
if (sock->type != SOCK_RAW && sock->type != SOCK_DGRAM)
goto out;
rc = -ENOMEM;
sk = sk_alloc(net, PF_APPLETALK, GFP_KERNEL, &ddp_proto);
if (!sk)
goto out;
rc = 0;
sock->ops = &atalk_dgram_ops;
sock_init_data(sock, sk);
/* Checksums on by default */
sock_set_flag(sk, SOCK_ZAPPED);
out:
return rc;
}
/* Free a socket. No work needed */
static int atalk_release(struct socket *sock)
{
struct sock *sk = sock->sk;
if (sk) {
sock_hold(sk);
lock_sock(sk);
sock_orphan(sk);
sock->sk = NULL;
atalk_destroy_socket(sk);
release_sock(sk);
sock_put(sk);
}
return 0;
}
/**
* atalk_pick_and_bind_port - Pick a source port when one is not given
* @sk: socket to insert into the tables
* @sat: address to search for
*
* Pick a source port when one is not given. If we can find a suitable free
* one, we insert the socket into the tables using it.
*
* This whole operation must be atomic.
*/
static int atalk_pick_and_bind_port(struct sock *sk, struct sockaddr_at *sat)
{
int retval;
write_lock_bh(&atalk_sockets_lock);
for (sat->sat_port = ATPORT_RESERVED;
sat->sat_port < ATPORT_LAST;
sat->sat_port++) {
struct sock *s;
sk_for_each(s, &atalk_sockets) {
struct atalk_sock *at = at_sk(s);
if (at->src_net == sat->sat_addr.s_net &&
at->src_node == sat->sat_addr.s_node &&
at->src_port == sat->sat_port)
goto try_next_port;
}
/* Wheee, it's free, assign and insert. */
__atalk_insert_socket(sk);
at_sk(sk)->src_port = sat->sat_port;
retval = 0;
goto out;
try_next_port:;
}
retval = -EBUSY;
out:
write_unlock_bh(&atalk_sockets_lock);
return retval;
}
static int atalk_autobind(struct sock *sk)
{
struct atalk_sock *at = at_sk(sk);
struct sockaddr_at sat;
struct atalk_addr *ap = atalk_find_primary();
int n = -EADDRNOTAVAIL;
if (!ap || ap->s_net == htons(ATADDR_ANYNET))
goto out;
at->src_net = sat.sat_addr.s_net = ap->s_net;
at->src_node = sat.sat_addr.s_node = ap->s_node;
n = atalk_pick_and_bind_port(sk, &sat);
if (!n)
sock_reset_flag(sk, SOCK_ZAPPED);
out:
return n;
}
/* Set the address 'our end' of the connection */
static int atalk_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
{
struct sockaddr_at *addr = (struct sockaddr_at *)uaddr;
struct sock *sk = sock->sk;
struct atalk_sock *at = at_sk(sk);
int err;
if (!sock_flag(sk, SOCK_ZAPPED) ||
addr_len != sizeof(struct sockaddr_at))
return -EINVAL;
if (addr->sat_family != AF_APPLETALK)
return -EAFNOSUPPORT;
lock_sock(sk);
if (addr->sat_addr.s_net == htons(ATADDR_ANYNET)) {
struct atalk_addr *ap = atalk_find_primary();
err = -EADDRNOTAVAIL;
if (!ap)
goto out;
at->src_net = addr->sat_addr.s_net = ap->s_net;
at->src_node = addr->sat_addr.s_node= ap->s_node;
} else {
err = -EADDRNOTAVAIL;
if (!atalk_find_interface(addr->sat_addr.s_net,
addr->sat_addr.s_node))
goto out;
at->src_net = addr->sat_addr.s_net;
at->src_node = addr->sat_addr.s_node;
}
if (addr->sat_port == ATADDR_ANYPORT) {
err = atalk_pick_and_bind_port(sk, addr);
if (err < 0)
goto out;
} else {
at->src_port = addr->sat_port;
err = -EADDRINUSE;
if (atalk_find_or_insert_socket(sk, addr))
goto out;
}
sock_reset_flag(sk, SOCK_ZAPPED);
err = 0;
out:
release_sock(sk);
return err;
}
/* Set the address we talk to */
static int atalk_connect(struct socket *sock, struct sockaddr *uaddr,
int addr_len, int flags)
{
struct sock *sk = sock->sk;
struct atalk_sock *at = at_sk(sk);
struct sockaddr_at *addr;
int err;
sk->sk_state = TCP_CLOSE;
sock->state = SS_UNCONNECTED;
if (addr_len != sizeof(*addr))
return -EINVAL;
addr = (struct sockaddr_at *)uaddr;
if (addr->sat_family != AF_APPLETALK)
return -EAFNOSUPPORT;
if (addr->sat_addr.s_node == ATADDR_BCAST &&
!sock_flag(sk, SOCK_BROADCAST)) {
#if 1
pr_warn("atalk_connect: %s is broken and did not set SO_BROADCAST.\n",
current->comm);
#else
return -EACCES;
#endif
}
lock_sock(sk);
err = -EBUSY;
if (sock_flag(sk, SOCK_ZAPPED))
if (atalk_autobind(sk) < 0)
goto out;
err = -ENETUNREACH;
if (!atrtr_get_dev(&addr->sat_addr))
goto out;
at->dest_port = addr->sat_port;
at->dest_net = addr->sat_addr.s_net;
at->dest_node = addr->sat_addr.s_node;
sock->state = SS_CONNECTED;
sk->sk_state = TCP_ESTABLISHED;
err = 0;
out:
release_sock(sk);
return err;
}
/*
* Find the name of an AppleTalk socket. Just copy the right
* fields into the sockaddr.
*/
static int atalk_getname(struct socket *sock, struct sockaddr *uaddr,
int *uaddr_len, int peer)
{
struct sockaddr_at sat;
struct sock *sk = sock->sk;
struct atalk_sock *at = at_sk(sk);
int err;
lock_sock(sk);
err = -ENOBUFS;
if (sock_flag(sk, SOCK_ZAPPED))
if (atalk_autobind(sk) < 0)
goto out;
*uaddr_len = sizeof(struct sockaddr_at);
memset(&sat, 0, sizeof(sat));
if (peer) {
err = -ENOTCONN;
if (sk->sk_state != TCP_ESTABLISHED)
goto out;
sat.sat_addr.s_net = at->dest_net;
sat.sat_addr.s_node = at->dest_node;
sat.sat_port = at->dest_port;
} else {
sat.sat_addr.s_net = at->src_net;
sat.sat_addr.s_node = at->src_node;
sat.sat_port = at->src_port;
}
err = 0;
sat.sat_family = AF_APPLETALK;
memcpy(uaddr, &sat, sizeof(sat));
out:
release_sock(sk);
return err;
}
#if defined(CONFIG_IPDDP) || defined(CONFIG_IPDDP_MODULE)
static __inline__ int is_ip_over_ddp(struct sk_buff *skb)
{
return skb->data[12] == 22;
}
static int handle_ip_over_ddp(struct sk_buff *skb)
{
struct net_device *dev = __dev_get_by_name(&init_net, "ipddp0");
struct net_device_stats *stats;
/* This needs to be able to handle ipddp"N" devices */
if (!dev) {
kfree_skb(skb);
return NET_RX_DROP;
}
skb->protocol = htons(ETH_P_IP);
skb_pull(skb, 13);
skb->dev = dev;
skb_reset_transport_header(skb);
stats = netdev_priv(dev);
stats->rx_packets++;
stats->rx_bytes += skb->len + 13;
return netif_rx(skb); /* Send the SKB up to a higher place. */
}
#else
/* make it easy for gcc to optimize this test out, i.e. kill the code */
#define is_ip_over_ddp(skb) 0
#define handle_ip_over_ddp(skb) 0
#endif
static int atalk_route_packet(struct sk_buff *skb, struct net_device *dev,
struct ddpehdr *ddp, __u16 len_hops, int origlen)
{
struct atalk_route *rt;
struct atalk_addr ta;
/*
* Don't route multicast, etc., packets, or packets sent to "this
* network"
*/
if (skb->pkt_type != PACKET_HOST || !ddp->deh_dnet) {
/*
* FIXME:
*
* Can it ever happen that a packet is from a PPP iface and
* needs to be broadcast onto the default network?
*/
if (dev->type == ARPHRD_PPP)
printk(KERN_DEBUG "AppleTalk: didn't forward broadcast "
"packet received from PPP iface\n");
goto free_it;
}
ta.s_net = ddp->deh_dnet;
ta.s_node = ddp->deh_dnode;
/* Route the packet */
rt = atrtr_find(&ta);
/* increment hops count */
len_hops += 1 << 10;
if (!rt || !(len_hops & (15 << 10)))
goto free_it;
/* FIXME: use skb->cb to be able to use shared skbs */
/*
* Route goes through another gateway, so set the target to the
* gateway instead.
*/
if (rt->flags & RTF_GATEWAY) {
ta.s_net = rt->gateway.s_net;
ta.s_node = rt->gateway.s_node;
}
/* Fix up skb->len field */
skb_trim(skb, min_t(unsigned int, origlen,
(rt->dev->hard_header_len +
ddp_dl->header_length + (len_hops & 1023))));
/* FIXME: use skb->cb to be able to use shared skbs */
ddp->deh_len_hops = htons(len_hops);
/*
* Send the buffer onwards
*
* Now we must always be careful. If it's come from LocalTalk to
* EtherTalk it might not fit
*
* Order matters here: If a packet has to be copied to make a new
* headroom (rare hopefully) then it won't need unsharing.
*
* Note. ddp-> becomes invalid at the realloc.
*/
if (skb_headroom(skb) < 22) {
/* 22 bytes - 12 ether, 2 len, 3 802.2 5 snap */
struct sk_buff *nskb = skb_realloc_headroom(skb, 32);
kfree_skb(skb);
skb = nskb;
} else
skb = skb_unshare(skb, GFP_ATOMIC);
/*
* If the buffer didn't vanish into the lack of space bitbucket we can
* send it.
*/
if (skb == NULL)
goto drop;
if (aarp_send_ddp(rt->dev, skb, &ta, NULL) == NET_XMIT_DROP)
return NET_RX_DROP;
return NET_RX_SUCCESS;
free_it:
kfree_skb(skb);
drop:
return NET_RX_DROP;
}
/**
* atalk_rcv - Receive a packet (in skb) from device dev
* @skb - packet received
* @dev - network device where the packet comes from
* @pt - packet type
*
* Receive a packet (in skb) from device dev. This has come from the SNAP
* decoder, and on entry skb->transport_header is the DDP header, skb->len
* is the DDP header, skb->len is the DDP length. The physical headers
* have been extracted. PPP should probably pass frames marked as for this
* layer. [ie ARPHRD_ETHERTALK]
*/
static int atalk_rcv(struct sk_buff *skb, struct net_device *dev,
struct packet_type *pt, struct net_device *orig_dev)
{
struct ddpehdr *ddp;
struct sock *sock;
struct atalk_iface *atif;
struct sockaddr_at tosat;
int origlen;
__u16 len_hops;
if (!net_eq(dev_net(dev), &init_net))
goto drop;
/* Don't mangle buffer if shared */
if (!(skb = skb_share_check(skb, GFP_ATOMIC)))
goto out;
/* Size check and make sure header is contiguous */
if (!pskb_may_pull(skb, sizeof(*ddp)))
goto drop;
ddp = ddp_hdr(skb);
len_hops = ntohs(ddp->deh_len_hops);
/* Trim buffer in case of stray trailing data */
origlen = skb->len;
skb_trim(skb, min_t(unsigned int, skb->len, len_hops & 1023));
/*
* Size check to see if ddp->deh_len was crap
* (Otherwise we'll detonate most spectacularly
* in the middle of atalk_checksum() or recvmsg()).
*/
if (skb->len < sizeof(*ddp) || skb->len < (len_hops & 1023)) {
pr_debug("AppleTalk: dropping corrupted frame (deh_len=%u, "
"skb->len=%u)\n", len_hops & 1023, skb->len);
goto drop;
}
/*
* Any checksums. Note we don't do htons() on this == is assumed to be
* valid for net byte orders all over the networking code...
*/
if (ddp->deh_sum &&
atalk_checksum(skb, len_hops & 1023) != ddp->deh_sum)
/* Not a valid AppleTalk frame - dustbin time */
goto drop;
/* Check the packet is aimed at us */
if (!ddp->deh_dnet) /* Net 0 is 'this network' */
atif = atalk_find_anynet(ddp->deh_dnode, dev);
else
atif = atalk_find_interface(ddp->deh_dnet, ddp->deh_dnode);
if (!atif) {
/* Not ours, so we route the packet via the correct
* AppleTalk iface
*/
return atalk_route_packet(skb, dev, ddp, len_hops, origlen);
}
/* if IP over DDP is not selected this code will be optimized out */
if (is_ip_over_ddp(skb))
return handle_ip_over_ddp(skb);
/*
* Which socket - atalk_search_socket() looks for a *full match*
* of the <net, node, port> tuple.
*/
tosat.sat_addr.s_net = ddp->deh_dnet;
tosat.sat_addr.s_node = ddp->deh_dnode;
tosat.sat_port = ddp->deh_dport;
sock = atalk_search_socket(&tosat, atif);
if (!sock) /* But not one of our sockets */
goto drop;
/* Queue packet (standard) */
if (sock_queue_rcv_skb(sock, skb) < 0)
goto drop;
return NET_RX_SUCCESS;
drop:
kfree_skb(skb);
out:
return NET_RX_DROP;
}
/*
* Receive a LocalTalk frame. We make some demands on the caller here.
* Caller must provide enough headroom on the packet to pull the short
* header and append a long one.
*/
static int ltalk_rcv(struct sk_buff *skb, struct net_device *dev,
struct packet_type *pt, struct net_device *orig_dev)
{
if (!net_eq(dev_net(dev), &init_net))
goto freeit;
/* Expand any short form frames */
if (skb_mac_header(skb)[2] == 1) {
struct ddpehdr *ddp;
/* Find our address */
struct atalk_addr *ap = atalk_find_dev_addr(dev);
if (!ap || skb->len < sizeof(__be16) || skb->len > 1023)
goto freeit;
/* Don't mangle buffer if shared */
if (!(skb = skb_share_check(skb, GFP_ATOMIC)))
return 0;
/*
* The push leaves us with a ddephdr not an shdr, and
* handily the port bytes in the right place preset.
*/
ddp = (struct ddpehdr *) skb_push(skb, sizeof(*ddp) - 4);
/* Now fill in the long header */
/*
* These two first. The mac overlays the new source/dest
* network information so we MUST copy these before
* we write the network numbers !
*/
ddp->deh_dnode = skb_mac_header(skb)[0]; /* From physical header */
ddp->deh_snode = skb_mac_header(skb)[1]; /* From physical header */
ddp->deh_dnet = ap->s_net; /* Network number */
ddp->deh_snet = ap->s_net;
ddp->deh_sum = 0; /* No checksum */
/*
* Not sure about this bit...
*/
/* Non routable, so force a drop if we slip up later */
ddp->deh_len_hops = htons(skb->len + (DDP_MAXHOPS << 10));
}
skb_reset_transport_header(skb);
return atalk_rcv(skb, dev, pt, orig_dev);
freeit:
kfree_skb(skb);
return 0;
}
static int atalk_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
size_t len)
{
struct sock *sk = sock->sk;
struct atalk_sock *at = at_sk(sk);
struct sockaddr_at *usat = (struct sockaddr_at *)msg->msg_name;
int flags = msg->msg_flags;
int loopback = 0;
struct sockaddr_at local_satalk, gsat;
struct sk_buff *skb;
struct net_device *dev;
struct ddpehdr *ddp;
int size;
struct atalk_route *rt;
int err;
if (flags & ~(MSG_DONTWAIT|MSG_CMSG_COMPAT))
return -EINVAL;
if (len > DDP_MAXSZ)
return -EMSGSIZE;
lock_sock(sk);
if (usat) {
err = -EBUSY;
if (sock_flag(sk, SOCK_ZAPPED))
if (atalk_autobind(sk) < 0)
goto out;
err = -EINVAL;
if (msg->msg_namelen < sizeof(*usat) ||
usat->sat_family != AF_APPLETALK)
goto out;
err = -EPERM;
/* netatalk didn't implement this check */
if (usat->sat_addr.s_node == ATADDR_BCAST &&
!sock_flag(sk, SOCK_BROADCAST)) {
goto out;
}
} else {
err = -ENOTCONN;
if (sk->sk_state != TCP_ESTABLISHED)
goto out;
usat = &local_satalk;
usat->sat_family = AF_APPLETALK;
usat->sat_port = at->dest_port;
usat->sat_addr.s_node = at->dest_node;
usat->sat_addr.s_net = at->dest_net;
}
/* Build a packet */
SOCK_DEBUG(sk, "SK %p: Got address.\n", sk);
/* For headers */
size = sizeof(struct ddpehdr) + len + ddp_dl->header_length;
if (usat->sat_addr.s_net || usat->sat_addr.s_node == ATADDR_ANYNODE) {
rt = atrtr_find(&usat->sat_addr);
} else {
struct atalk_addr at_hint;
at_hint.s_node = 0;
at_hint.s_net = at->src_net;
rt = atrtr_find(&at_hint);
}
err = ENETUNREACH;
if (!rt)
goto out;
dev = rt->dev;
SOCK_DEBUG(sk, "SK %p: Size needed %d, device %s\n",
sk, size, dev->name);
size += dev->hard_header_len;
release_sock(sk);
skb = sock_alloc_send_skb(sk, size, (flags & MSG_DONTWAIT), &err);
lock_sock(sk);
if (!skb)
goto out;
skb_reserve(skb, ddp_dl->header_length);
skb_reserve(skb, dev->hard_header_len);
skb->dev = dev;
SOCK_DEBUG(sk, "SK %p: Begin build.\n", sk);
ddp = (struct ddpehdr *)skb_put(skb, sizeof(struct ddpehdr));
ddp->deh_len_hops = htons(len + sizeof(*ddp));
ddp->deh_dnet = usat->sat_addr.s_net;
ddp->deh_snet = at->src_net;
ddp->deh_dnode = usat->sat_addr.s_node;
ddp->deh_snode = at->src_node;
ddp->deh_dport = usat->sat_port;
ddp->deh_sport = at->src_port;
SOCK_DEBUG(sk, "SK %p: Copy user data (%Zd bytes).\n", sk, len);
err = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len);
if (err) {
kfree_skb(skb);
err = -EFAULT;
goto out;
}
if (sk->sk_no_check == 1)
ddp->deh_sum = 0;
else
ddp->deh_sum = atalk_checksum(skb, len + sizeof(*ddp));
/*
* Loopback broadcast packets to non gateway targets (ie routes
* to group we are in)
*/
if (ddp->deh_dnode == ATADDR_BCAST &&
!(rt->flags & RTF_GATEWAY) && !(dev->flags & IFF_LOOPBACK)) {
struct sk_buff *skb2 = skb_copy(skb, GFP_KERNEL);
if (skb2) {
loopback = 1;
SOCK_DEBUG(sk, "SK %p: send out(copy).\n", sk);
/*
* If it fails it is queued/sent above in the aarp queue
*/
aarp_send_ddp(dev, skb2, &usat->sat_addr, NULL);
}
}
if (dev->flags & IFF_LOOPBACK || loopback) {
SOCK_DEBUG(sk, "SK %p: Loop back.\n", sk);
/* loop back */
skb_orphan(skb);
if (ddp->deh_dnode == ATADDR_BCAST) {
struct atalk_addr at_lo;
at_lo.s_node = 0;
at_lo.s_net = 0;
rt = atrtr_find(&at_lo);
if (!rt) {
kfree_skb(skb);
err = -ENETUNREACH;
goto out;
}
dev = rt->dev;
skb->dev = dev;
}
ddp_dl->request(ddp_dl, skb, dev->dev_addr);
} else {
SOCK_DEBUG(sk, "SK %p: send out.\n", sk);
if (rt->flags & RTF_GATEWAY) {
gsat.sat_addr = rt->gateway;
usat = &gsat;
}
/*
* If it fails it is queued/sent above in the aarp queue
*/
aarp_send_ddp(dev, skb, &usat->sat_addr, NULL);
}
SOCK_DEBUG(sk, "SK %p: Done write (%Zd).\n", sk, len);
out:
release_sock(sk);
return err ? : len;
}
static int atalk_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
size_t size, int flags)
{
struct sock *sk = sock->sk;
struct ddpehdr *ddp;
int copied = 0;
int offset = 0;
int err = 0;
struct sk_buff *skb;
skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT,
flags & MSG_DONTWAIT, &err);
lock_sock(sk);
if (!skb)
goto out;
/* FIXME: use skb->cb to be able to use shared skbs */
ddp = ddp_hdr(skb);
copied = ntohs(ddp->deh_len_hops) & 1023;
if (sk->sk_type != SOCK_RAW) {
offset = sizeof(*ddp);
copied -= offset;
}
if (copied > size) {
copied = size;
msg->msg_flags |= MSG_TRUNC;
}
err = skb_copy_datagram_iovec(skb, offset, msg->msg_iov, copied);
if (!err && msg->msg_name) {
struct sockaddr_at *sat = msg->msg_name;
sat->sat_family = AF_APPLETALK;
sat->sat_port = ddp->deh_sport;
sat->sat_addr.s_node = ddp->deh_snode;
sat->sat_addr.s_net = ddp->deh_snet;
msg->msg_namelen = sizeof(*sat);
}
skb_free_datagram(sk, skb); /* Free the datagram. */
out:
release_sock(sk);
return err ? : copied;
}
/*
* AppleTalk ioctl calls.
*/
static int atalk_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
{
int rc = -ENOIOCTLCMD;
struct sock *sk = sock->sk;
void __user *argp = (void __user *)arg;
switch (cmd) {
/* Protocol layer */
case TIOCOUTQ: {
long amount = sk->sk_sndbuf - sk_wmem_alloc_get(sk);
if (amount < 0)
amount = 0;
rc = put_user(amount, (int __user *)argp);
break;
}
case TIOCINQ: {
/*
* These two are safe on a single CPU system as only
* user tasks fiddle here
*/
struct sk_buff *skb = skb_peek(&sk->sk_receive_queue);
long amount = 0;
if (skb)
amount = skb->len - sizeof(struct ddpehdr);
rc = put_user(amount, (int __user *)argp);
break;
}
case SIOCGSTAMP:
rc = sock_get_timestamp(sk, argp);
break;
case SIOCGSTAMPNS:
rc = sock_get_timestampns(sk, argp);
break;
/* Routing */
case SIOCADDRT:
case SIOCDELRT:
rc = -EPERM;
if (capable(CAP_NET_ADMIN))
rc = atrtr_ioctl(cmd, argp);
break;
/* Interface */
case SIOCGIFADDR:
case SIOCSIFADDR:
case SIOCGIFBRDADDR:
case SIOCATALKDIFADDR:
case SIOCDIFADDR:
case SIOCSARP: /* proxy AARP */
case SIOCDARP: /* proxy AARP */
rtnl_lock();
rc = atif_ioctl(cmd, argp);
rtnl_unlock();
break;
}
return rc;
}
#ifdef CONFIG_COMPAT
static int atalk_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
{
/*
* SIOCATALKDIFADDR is a SIOCPROTOPRIVATE ioctl number, so we
* cannot handle it in common code. The data we access if ifreq
* here is compatible, so we can simply call the native
* handler.
*/
if (cmd == SIOCATALKDIFADDR)
return atalk_ioctl(sock, cmd, (unsigned long)compat_ptr(arg));
return -ENOIOCTLCMD;
}
#endif
static const struct net_proto_family atalk_family_ops = {
.family = PF_APPLETALK,
.create = atalk_create,
.owner = THIS_MODULE,
};
static const struct proto_ops atalk_dgram_ops = {
.family = PF_APPLETALK,
.owner = THIS_MODULE,
.release = atalk_release,
.bind = atalk_bind,
.connect = atalk_connect,
.socketpair = sock_no_socketpair,
.accept = sock_no_accept,
.getname = atalk_getname,
.poll = datagram_poll,
.ioctl = atalk_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = atalk_compat_ioctl,
#endif
.listen = sock_no_listen,
.shutdown = sock_no_shutdown,
.setsockopt = sock_no_setsockopt,
.getsockopt = sock_no_getsockopt,
.sendmsg = atalk_sendmsg,
.recvmsg = atalk_recvmsg,
.mmap = sock_no_mmap,
.sendpage = sock_no_sendpage,
};
static struct notifier_block ddp_notifier = {
.notifier_call = ddp_device_event,
};
static struct packet_type ltalk_packet_type __read_mostly = {
.type = cpu_to_be16(ETH_P_LOCALTALK),
.func = ltalk_rcv,
};
static struct packet_type ppptalk_packet_type __read_mostly = {
.type = cpu_to_be16(ETH_P_PPPTALK),
.func = atalk_rcv,
};
static unsigned char ddp_snap_id[] = { 0x08, 0x00, 0x07, 0x80, 0x9B };
/* Export symbols for use by drivers when AppleTalk is a module */
EXPORT_SYMBOL(atrtr_get_dev);
EXPORT_SYMBOL(atalk_find_dev_addr);
static const char atalk_err_snap[] __initconst =
KERN_CRIT "Unable to register DDP with SNAP.\n";
/* Called by proto.c on kernel start up */
static int __init atalk_init(void)
{
int rc = proto_register(&ddp_proto, 0);
if (rc != 0)
goto out;
(void)sock_register(&atalk_family_ops);
ddp_dl = register_snap_client(ddp_snap_id, atalk_rcv);
if (!ddp_dl)
printk(atalk_err_snap);
dev_add_pack(<alk_packet_type);
dev_add_pack(&ppptalk_packet_type);
register_netdevice_notifier(&ddp_notifier);
aarp_proto_init();
atalk_proc_init();
atalk_register_sysctl();
out:
return rc;
}
module_init(atalk_init);
/*
* No explicit module reference count manipulation is needed in the
* protocol. Socket layer sets module reference count for us
* and interfaces reference counting is done
* by the network device layer.
*
* Ergo, before the AppleTalk module can be removed, all AppleTalk
* sockets be closed from user space.
*/
static void __exit atalk_exit(void)
{
#ifdef CONFIG_SYSCTL
atalk_unregister_sysctl();
#endif /* CONFIG_SYSCTL */
atalk_proc_exit();
aarp_cleanup_module(); /* General aarp clean-up. */
unregister_netdevice_notifier(&ddp_notifier);
dev_remove_pack(<alk_packet_type);
dev_remove_pack(&ppptalk_packet_type);
unregister_snap_client(ddp_dl);
sock_unregister(PF_APPLETALK);
proto_unregister(&ddp_proto);
}
module_exit(atalk_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Alan Cox <alan@lxorguk.ukuu.org.uk>");
MODULE_DESCRIPTION("AppleTalk 0.20\n");
MODULE_ALIAS_NETPROTO(PF_APPLETALK);
| gpl-2.0 |
Motorhead1991/samsung_att_kernel_source-msm7x30 | arch/x86/kernel/i8259.c | 884 | 10770 | #include <linux/linkage.h>
#include <linux/errno.h>
#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/ioport.h>
#include <linux/interrupt.h>
#include <linux/timex.h>
#include <linux/random.h>
#include <linux/init.h>
#include <linux/kernel_stat.h>
#include <linux/sysdev.h>
#include <linux/bitops.h>
#include <linux/acpi.h>
#include <linux/io.h>
#include <linux/delay.h>
#include <asm/atomic.h>
#include <asm/system.h>
#include <asm/timer.h>
#include <asm/hw_irq.h>
#include <asm/pgtable.h>
#include <asm/desc.h>
#include <asm/apic.h>
#include <asm/i8259.h>
/*
* This is the 'legacy' 8259A Programmable Interrupt Controller,
* present in the majority of PC/AT boxes.
* plus some generic x86 specific things if generic specifics makes
* any sense at all.
*/
static int i8259A_auto_eoi;
DEFINE_RAW_SPINLOCK(i8259A_lock);
static void mask_and_ack_8259A(unsigned int);
static void mask_8259A(void);
static void unmask_8259A(void);
static void disable_8259A_irq(unsigned int irq);
static void enable_8259A_irq(unsigned int irq);
static void init_8259A(int auto_eoi);
static int i8259A_irq_pending(unsigned int irq);
struct irq_chip i8259A_chip = {
.name = "XT-PIC",
.mask = disable_8259A_irq,
.disable = disable_8259A_irq,
.unmask = enable_8259A_irq,
.mask_ack = mask_and_ack_8259A,
};
/*
* 8259A PIC functions to handle ISA devices:
*/
/*
* This contains the irq mask for both 8259A irq controllers,
*/
unsigned int cached_irq_mask = 0xffff;
/*
* Not all IRQs can be routed through the IO-APIC, eg. on certain (older)
* boards the timer interrupt is not really connected to any IO-APIC pin,
* it's fed to the master 8259A's IR0 line only.
*
* Any '1' bit in this mask means the IRQ is routed through the IO-APIC.
* this 'mixed mode' IRQ handling costs nothing because it's only used
* at IRQ setup time.
*/
unsigned long io_apic_irqs;
static void disable_8259A_irq(unsigned int irq)
{
unsigned int mask = 1 << irq;
unsigned long flags;
raw_spin_lock_irqsave(&i8259A_lock, flags);
cached_irq_mask |= mask;
if (irq & 8)
outb(cached_slave_mask, PIC_SLAVE_IMR);
else
outb(cached_master_mask, PIC_MASTER_IMR);
raw_spin_unlock_irqrestore(&i8259A_lock, flags);
}
static void enable_8259A_irq(unsigned int irq)
{
unsigned int mask = ~(1 << irq);
unsigned long flags;
raw_spin_lock_irqsave(&i8259A_lock, flags);
cached_irq_mask &= mask;
if (irq & 8)
outb(cached_slave_mask, PIC_SLAVE_IMR);
else
outb(cached_master_mask, PIC_MASTER_IMR);
raw_spin_unlock_irqrestore(&i8259A_lock, flags);
}
static int i8259A_irq_pending(unsigned int irq)
{
unsigned int mask = 1<<irq;
unsigned long flags;
int ret;
raw_spin_lock_irqsave(&i8259A_lock, flags);
if (irq < 8)
ret = inb(PIC_MASTER_CMD) & mask;
else
ret = inb(PIC_SLAVE_CMD) & (mask >> 8);
raw_spin_unlock_irqrestore(&i8259A_lock, flags);
return ret;
}
static void make_8259A_irq(unsigned int irq)
{
disable_irq_nosync(irq);
io_apic_irqs &= ~(1<<irq);
set_irq_chip_and_handler_name(irq, &i8259A_chip, handle_level_irq,
"XT");
enable_irq(irq);
}
/*
* This function assumes to be called rarely. Switching between
* 8259A registers is slow.
* This has to be protected by the irq controller spinlock
* before being called.
*/
static inline int i8259A_irq_real(unsigned int irq)
{
int value;
int irqmask = 1<<irq;
if (irq < 8) {
outb(0x0B, PIC_MASTER_CMD); /* ISR register */
value = inb(PIC_MASTER_CMD) & irqmask;
outb(0x0A, PIC_MASTER_CMD); /* back to the IRR register */
return value;
}
outb(0x0B, PIC_SLAVE_CMD); /* ISR register */
value = inb(PIC_SLAVE_CMD) & (irqmask >> 8);
outb(0x0A, PIC_SLAVE_CMD); /* back to the IRR register */
return value;
}
/*
* Careful! The 8259A is a fragile beast, it pretty
* much _has_ to be done exactly like this (mask it
* first, _then_ send the EOI, and the order of EOI
* to the two 8259s is important!
*/
static void mask_and_ack_8259A(unsigned int irq)
{
unsigned int irqmask = 1 << irq;
unsigned long flags;
raw_spin_lock_irqsave(&i8259A_lock, flags);
/*
* Lightweight spurious IRQ detection. We do not want
* to overdo spurious IRQ handling - it's usually a sign
* of hardware problems, so we only do the checks we can
* do without slowing down good hardware unnecessarily.
*
* Note that IRQ7 and IRQ15 (the two spurious IRQs
* usually resulting from the 8259A-1|2 PICs) occur
* even if the IRQ is masked in the 8259A. Thus we
* can check spurious 8259A IRQs without doing the
* quite slow i8259A_irq_real() call for every IRQ.
* This does not cover 100% of spurious interrupts,
* but should be enough to warn the user that there
* is something bad going on ...
*/
if (cached_irq_mask & irqmask)
goto spurious_8259A_irq;
cached_irq_mask |= irqmask;
handle_real_irq:
if (irq & 8) {
inb(PIC_SLAVE_IMR); /* DUMMY - (do we need this?) */
outb(cached_slave_mask, PIC_SLAVE_IMR);
/* 'Specific EOI' to slave */
outb(0x60+(irq&7), PIC_SLAVE_CMD);
/* 'Specific EOI' to master-IRQ2 */
outb(0x60+PIC_CASCADE_IR, PIC_MASTER_CMD);
} else {
inb(PIC_MASTER_IMR); /* DUMMY - (do we need this?) */
outb(cached_master_mask, PIC_MASTER_IMR);
outb(0x60+irq, PIC_MASTER_CMD); /* 'Specific EOI to master */
}
raw_spin_unlock_irqrestore(&i8259A_lock, flags);
return;
spurious_8259A_irq:
/*
* this is the slow path - should happen rarely.
*/
if (i8259A_irq_real(irq))
/*
* oops, the IRQ _is_ in service according to the
* 8259A - not spurious, go handle it.
*/
goto handle_real_irq;
{
static int spurious_irq_mask;
/*
* At this point we can be sure the IRQ is spurious,
* lets ACK and report it. [once per IRQ]
*/
if (!(spurious_irq_mask & irqmask)) {
printk(KERN_DEBUG
"spurious 8259A interrupt: IRQ%d.\n", irq);
spurious_irq_mask |= irqmask;
}
atomic_inc(&irq_err_count);
/*
* Theoretically we do not have to handle this IRQ,
* but in Linux this does not cause problems and is
* simpler for us.
*/
goto handle_real_irq;
}
}
static char irq_trigger[2];
/**
* ELCR registers (0x4d0, 0x4d1) control edge/level of IRQ
*/
static void restore_ELCR(char *trigger)
{
outb(trigger[0], 0x4d0);
outb(trigger[1], 0x4d1);
}
static void save_ELCR(char *trigger)
{
/* IRQ 0,1,2,8,13 are marked as reserved */
trigger[0] = inb(0x4d0) & 0xF8;
trigger[1] = inb(0x4d1) & 0xDE;
}
static int i8259A_resume(struct sys_device *dev)
{
init_8259A(i8259A_auto_eoi);
restore_ELCR(irq_trigger);
return 0;
}
static int i8259A_suspend(struct sys_device *dev, pm_message_t state)
{
save_ELCR(irq_trigger);
return 0;
}
static int i8259A_shutdown(struct sys_device *dev)
{
/* Put the i8259A into a quiescent state that
* the kernel initialization code can get it
* out of.
*/
outb(0xff, PIC_MASTER_IMR); /* mask all of 8259A-1 */
outb(0xff, PIC_SLAVE_IMR); /* mask all of 8259A-1 */
return 0;
}
static struct sysdev_class i8259_sysdev_class = {
.name = "i8259",
.suspend = i8259A_suspend,
.resume = i8259A_resume,
.shutdown = i8259A_shutdown,
};
static struct sys_device device_i8259A = {
.id = 0,
.cls = &i8259_sysdev_class,
};
static void mask_8259A(void)
{
unsigned long flags;
raw_spin_lock_irqsave(&i8259A_lock, flags);
outb(0xff, PIC_MASTER_IMR); /* mask all of 8259A-1 */
outb(0xff, PIC_SLAVE_IMR); /* mask all of 8259A-2 */
raw_spin_unlock_irqrestore(&i8259A_lock, flags);
}
static void unmask_8259A(void)
{
unsigned long flags;
raw_spin_lock_irqsave(&i8259A_lock, flags);
outb(cached_master_mask, PIC_MASTER_IMR); /* restore master IRQ mask */
outb(cached_slave_mask, PIC_SLAVE_IMR); /* restore slave IRQ mask */
raw_spin_unlock_irqrestore(&i8259A_lock, flags);
}
static void init_8259A(int auto_eoi)
{
unsigned long flags;
i8259A_auto_eoi = auto_eoi;
raw_spin_lock_irqsave(&i8259A_lock, flags);
outb(0xff, PIC_MASTER_IMR); /* mask all of 8259A-1 */
outb(0xff, PIC_SLAVE_IMR); /* mask all of 8259A-2 */
/*
* outb_pic - this has to work on a wide range of PC hardware.
*/
outb_pic(0x11, PIC_MASTER_CMD); /* ICW1: select 8259A-1 init */
/* ICW2: 8259A-1 IR0-7 mapped to 0x30-0x37 on x86-64,
to 0x20-0x27 on i386 */
outb_pic(IRQ0_VECTOR, PIC_MASTER_IMR);
/* 8259A-1 (the master) has a slave on IR2 */
outb_pic(1U << PIC_CASCADE_IR, PIC_MASTER_IMR);
if (auto_eoi) /* master does Auto EOI */
outb_pic(MASTER_ICW4_DEFAULT | PIC_ICW4_AEOI, PIC_MASTER_IMR);
else /* master expects normal EOI */
outb_pic(MASTER_ICW4_DEFAULT, PIC_MASTER_IMR);
outb_pic(0x11, PIC_SLAVE_CMD); /* ICW1: select 8259A-2 init */
/* ICW2: 8259A-2 IR0-7 mapped to IRQ8_VECTOR */
outb_pic(IRQ8_VECTOR, PIC_SLAVE_IMR);
/* 8259A-2 is a slave on master's IR2 */
outb_pic(PIC_CASCADE_IR, PIC_SLAVE_IMR);
/* (slave's support for AEOI in flat mode is to be investigated) */
outb_pic(SLAVE_ICW4_DEFAULT, PIC_SLAVE_IMR);
if (auto_eoi)
/*
* In AEOI mode we just have to mask the interrupt
* when acking.
*/
i8259A_chip.mask_ack = disable_8259A_irq;
else
i8259A_chip.mask_ack = mask_and_ack_8259A;
udelay(100); /* wait for 8259A to initialize */
outb(cached_master_mask, PIC_MASTER_IMR); /* restore master IRQ mask */
outb(cached_slave_mask, PIC_SLAVE_IMR); /* restore slave IRQ mask */
raw_spin_unlock_irqrestore(&i8259A_lock, flags);
}
/*
* make i8259 a driver so that we can select pic functions at run time. the goal
* is to make x86 binary compatible among pc compatible and non-pc compatible
* platforms, such as x86 MID.
*/
static void legacy_pic_noop(void) { };
static void legacy_pic_uint_noop(unsigned int unused) { };
static void legacy_pic_int_noop(int unused) { };
static struct irq_chip dummy_pic_chip = {
.name = "dummy pic",
.mask = legacy_pic_uint_noop,
.unmask = legacy_pic_uint_noop,
.disable = legacy_pic_uint_noop,
.mask_ack = legacy_pic_uint_noop,
};
static int legacy_pic_irq_pending_noop(unsigned int irq)
{
return 0;
}
struct legacy_pic null_legacy_pic = {
.nr_legacy_irqs = 0,
.chip = &dummy_pic_chip,
.mask_all = legacy_pic_noop,
.restore_mask = legacy_pic_noop,
.init = legacy_pic_int_noop,
.irq_pending = legacy_pic_irq_pending_noop,
.make_irq = legacy_pic_uint_noop,
};
struct legacy_pic default_legacy_pic = {
.nr_legacy_irqs = NR_IRQS_LEGACY,
.chip = &i8259A_chip,
.mask_all = mask_8259A,
.restore_mask = unmask_8259A,
.init = init_8259A,
.irq_pending = i8259A_irq_pending,
.make_irq = make_8259A_irq,
};
struct legacy_pic *legacy_pic = &default_legacy_pic;
static int __init i8259A_init_sysfs(void)
{
int error;
if (legacy_pic != &default_legacy_pic)
return 0;
error = sysdev_class_register(&i8259_sysdev_class);
if (!error)
error = sysdev_register(&device_i8259A);
return error;
}
device_initcall(i8259A_init_sysfs);
| gpl-2.0 |
Snuzzo/B14CKB1RD_kernel_m8 | lib/lz4/lz4_decompress.c | 1396 | 8014 | /*
* LZ4 Decompressor for Linux kernel
*
* Copyright (C) 2013, LG Electronics, Kyungsik Lee <kyungsik.lee@lge.com>
*
* Based on LZ4 implementation by Yann Collet.
*
* LZ4 - Fast LZ compression algorithm
* Copyright (C) 2011-2012, Yann Collet.
* BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* You can contact the author at :
* - LZ4 homepage : http://fastcompression.blogspot.com/p/lz4.html
* - LZ4 source repository : http://code.google.com/p/lz4/
*/
#ifndef STATIC
#include <linux/module.h>
#include <linux/kernel.h>
#endif
#include <linux/lz4.h>
#include <asm/unaligned.h>
#include "lz4defs.h"
static int lz4_uncompress(const char *source, char *dest, int osize)
{
const BYTE *ip = (const BYTE *) source;
const BYTE *ref;
BYTE *op = (BYTE *) dest;
BYTE * const oend = op + osize;
BYTE *cpy;
unsigned token;
size_t length;
size_t dec32table[] = {0, 3, 2, 3, 0, 0, 0, 0};
#if LZ4_ARCH64
size_t dec64table[] = {0, 0, 0, -1, 0, 1, 2, 3};
#endif
while (1) {
/* get runlength */
token = *ip++;
length = (token >> ML_BITS);
if (length == RUN_MASK) {
size_t len;
len = *ip++;
for (; len == 255; length += 255)
len = *ip++;
if (unlikely(length > (size_t)(length + len)))
goto _output_error;
length += len;
}
/* copy literals */
cpy = op + length;
if (unlikely(cpy > oend - COPYLENGTH)) {
/*
* Error: not enough place for another match
* (min 4) + 5 literals
*/
if (cpy != oend)
goto _output_error;
memcpy(op, ip, length);
ip += length;
break; /* EOF */
}
LZ4_WILDCOPY(ip, op, cpy);
ip -= (op - cpy);
op = cpy;
/* get offset */
LZ4_READ_LITTLEENDIAN_16(ref, cpy, ip);
ip += 2;
/* Error: offset create reference outside destination buffer */
if (unlikely(ref < (BYTE *const) dest))
goto _output_error;
/* get matchlength */
length = token & ML_MASK;
if (length == ML_MASK) {
for (; *ip == 255; length += 255)
ip++;
if (unlikely(length > (size_t)(length + *ip)))
goto _output_error;
length += *ip++;
}
/* copy repeated sequence */
if (unlikely((op - ref) < STEPSIZE)) {
#if LZ4_ARCH64
size_t dec64 = dec64table[op - ref];
#else
const int dec64 = 0;
#endif
op[0] = ref[0];
op[1] = ref[1];
op[2] = ref[2];
op[3] = ref[3];
op += 4;
ref += 4;
ref -= dec32table[op-ref];
PUT4(ref, op);
op += STEPSIZE - 4;
ref -= dec64;
} else {
LZ4_COPYSTEP(ref, op);
}
cpy = op + length - (STEPSIZE - 4);
if (cpy > (oend - COPYLENGTH)) {
/* Error: request to write beyond destination buffer */
if (cpy > oend)
goto _output_error;
LZ4_SECURECOPY(ref, op, (oend - COPYLENGTH));
while (op < cpy)
*op++ = *ref++;
op = cpy;
/*
* Check EOF (should never happen, since last 5 bytes
* are supposed to be literals)
*/
if (op == oend)
goto _output_error;
continue;
}
LZ4_SECURECOPY(ref, op, cpy);
op = cpy; /* correction */
}
/* end of decoding */
return (int) (((char *)ip) - source);
/* write overflow error detected */
_output_error:
return -1;
}
static int lz4_uncompress_unknownoutputsize(const char *source, char *dest,
int isize, size_t maxoutputsize)
{
const BYTE *ip = (const BYTE *) source;
const BYTE *const iend = ip + isize;
const BYTE *ref;
BYTE *op = (BYTE *) dest;
BYTE * const oend = op + maxoutputsize;
BYTE *cpy;
size_t dec32table[] = {0, 3, 2, 3, 0, 0, 0, 0};
#if LZ4_ARCH64
size_t dec64table[] = {0, 0, 0, -1, 0, 1, 2, 3};
#endif
/* Main Loop */
while (ip < iend) {
unsigned token;
size_t length;
/* get runlength */
token = *ip++;
length = (token >> ML_BITS);
if (length == RUN_MASK) {
int s = 255;
while ((ip < iend) && (s == 255)) {
s = *ip++;
if (unlikely(length > (size_t)(length + s)))
goto _output_error;
length += s;
}
}
/* copy literals */
cpy = op + length;
if ((cpy > oend - COPYLENGTH) ||
(ip + length > iend - COPYLENGTH)) {
if (cpy > oend)
goto _output_error;/* writes beyond buffer */
if (ip + length != iend)
goto _output_error;/*
* Error: LZ4 format requires
* to consume all input
* at this stage
*/
memcpy(op, ip, length);
op += length;
break;/* Necessarily EOF, due to parsing restrictions */
}
LZ4_WILDCOPY(ip, op, cpy);
ip -= (op - cpy);
op = cpy;
/* get offset */
LZ4_READ_LITTLEENDIAN_16(ref, cpy, ip);
ip += 2;
if (ref < (BYTE * const) dest)
goto _output_error;
/*
* Error : offset creates reference
* outside of destination buffer
*/
/* get matchlength */
length = (token & ML_MASK);
if (length == ML_MASK) {
while (ip < iend) {
int s = *ip++;
if (unlikely(length > (size_t)(length + s)))
goto _output_error;
length += s;
if (s == 255)
continue;
break;
}
}
/* copy repeated sequence */
if (unlikely((op - ref) < STEPSIZE)) {
#if LZ4_ARCH64
size_t dec64 = dec64table[op - ref];
#else
const int dec64 = 0;
#endif
op[0] = ref[0];
op[1] = ref[1];
op[2] = ref[2];
op[3] = ref[3];
op += 4;
ref += 4;
ref -= dec32table[op - ref];
PUT4(ref, op);
op += STEPSIZE - 4;
ref -= dec64;
} else {
LZ4_COPYSTEP(ref, op);
}
cpy = op + length - (STEPSIZE-4);
if (cpy > oend - COPYLENGTH) {
if (cpy > oend)
goto _output_error; /* write outside of buf */
LZ4_SECURECOPY(ref, op, (oend - COPYLENGTH));
while (op < cpy)
*op++ = *ref++;
op = cpy;
/*
* Check EOF (should never happen, since last 5 bytes
* are supposed to be literals)
*/
if (op == oend)
goto _output_error;
continue;
}
LZ4_SECURECOPY(ref, op, cpy);
op = cpy; /* correction */
}
/* end of decoding */
return (int) (((char *) op) - dest);
/* write overflow error detected */
_output_error:
return -1;
}
int lz4_decompress(const unsigned char *src, size_t *src_len,
unsigned char *dest, size_t actual_dest_len)
{
int ret = -1;
int input_len = 0;
input_len = lz4_uncompress(src, dest, actual_dest_len);
if (input_len < 0)
goto exit_0;
*src_len = input_len;
return 0;
exit_0:
return ret;
}
#ifndef STATIC
EXPORT_SYMBOL(lz4_decompress);
#endif
int lz4_decompress_unknownoutputsize(const unsigned char *src, size_t src_len,
unsigned char *dest, size_t *dest_len)
{
int ret = -1;
int out_len = 0;
out_len = lz4_uncompress_unknownoutputsize(src, dest, src_len,
*dest_len);
if (out_len < 0)
goto exit_0;
*dest_len = out_len;
return 0;
exit_0:
return ret;
}
#ifndef STATIC
EXPORT_SYMBOL(lz4_decompress_unknownoutputsize);
MODULE_LICENSE("Dual BSD/GPL");
MODULE_DESCRIPTION("LZ4 Decompressor");
#endif
| gpl-2.0 |
itsmerajit/kernel_otus | lib/lz4/lz4_decompress.c | 1396 | 8014 | /*
* LZ4 Decompressor for Linux kernel
*
* Copyright (C) 2013, LG Electronics, Kyungsik Lee <kyungsik.lee@lge.com>
*
* Based on LZ4 implementation by Yann Collet.
*
* LZ4 - Fast LZ compression algorithm
* Copyright (C) 2011-2012, Yann Collet.
* BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* You can contact the author at :
* - LZ4 homepage : http://fastcompression.blogspot.com/p/lz4.html
* - LZ4 source repository : http://code.google.com/p/lz4/
*/
#ifndef STATIC
#include <linux/module.h>
#include <linux/kernel.h>
#endif
#include <linux/lz4.h>
#include <asm/unaligned.h>
#include "lz4defs.h"
static int lz4_uncompress(const char *source, char *dest, int osize)
{
const BYTE *ip = (const BYTE *) source;
const BYTE *ref;
BYTE *op = (BYTE *) dest;
BYTE * const oend = op + osize;
BYTE *cpy;
unsigned token;
size_t length;
size_t dec32table[] = {0, 3, 2, 3, 0, 0, 0, 0};
#if LZ4_ARCH64
size_t dec64table[] = {0, 0, 0, -1, 0, 1, 2, 3};
#endif
while (1) {
/* get runlength */
token = *ip++;
length = (token >> ML_BITS);
if (length == RUN_MASK) {
size_t len;
len = *ip++;
for (; len == 255; length += 255)
len = *ip++;
if (unlikely(length > (size_t)(length + len)))
goto _output_error;
length += len;
}
/* copy literals */
cpy = op + length;
if (unlikely(cpy > oend - COPYLENGTH)) {
/*
* Error: not enough place for another match
* (min 4) + 5 literals
*/
if (cpy != oend)
goto _output_error;
memcpy(op, ip, length);
ip += length;
break; /* EOF */
}
LZ4_WILDCOPY(ip, op, cpy);
ip -= (op - cpy);
op = cpy;
/* get offset */
LZ4_READ_LITTLEENDIAN_16(ref, cpy, ip);
ip += 2;
/* Error: offset create reference outside destination buffer */
if (unlikely(ref < (BYTE *const) dest))
goto _output_error;
/* get matchlength */
length = token & ML_MASK;
if (length == ML_MASK) {
for (; *ip == 255; length += 255)
ip++;
if (unlikely(length > (size_t)(length + *ip)))
goto _output_error;
length += *ip++;
}
/* copy repeated sequence */
if (unlikely((op - ref) < STEPSIZE)) {
#if LZ4_ARCH64
size_t dec64 = dec64table[op - ref];
#else
const int dec64 = 0;
#endif
op[0] = ref[0];
op[1] = ref[1];
op[2] = ref[2];
op[3] = ref[3];
op += 4;
ref += 4;
ref -= dec32table[op-ref];
PUT4(ref, op);
op += STEPSIZE - 4;
ref -= dec64;
} else {
LZ4_COPYSTEP(ref, op);
}
cpy = op + length - (STEPSIZE - 4);
if (cpy > (oend - COPYLENGTH)) {
/* Error: request to write beyond destination buffer */
if (cpy > oend)
goto _output_error;
LZ4_SECURECOPY(ref, op, (oend - COPYLENGTH));
while (op < cpy)
*op++ = *ref++;
op = cpy;
/*
* Check EOF (should never happen, since last 5 bytes
* are supposed to be literals)
*/
if (op == oend)
goto _output_error;
continue;
}
LZ4_SECURECOPY(ref, op, cpy);
op = cpy; /* correction */
}
/* end of decoding */
return (int) (((char *)ip) - source);
/* write overflow error detected */
_output_error:
return -1;
}
static int lz4_uncompress_unknownoutputsize(const char *source, char *dest,
int isize, size_t maxoutputsize)
{
const BYTE *ip = (const BYTE *) source;
const BYTE *const iend = ip + isize;
const BYTE *ref;
BYTE *op = (BYTE *) dest;
BYTE * const oend = op + maxoutputsize;
BYTE *cpy;
size_t dec32table[] = {0, 3, 2, 3, 0, 0, 0, 0};
#if LZ4_ARCH64
size_t dec64table[] = {0, 0, 0, -1, 0, 1, 2, 3};
#endif
/* Main Loop */
while (ip < iend) {
unsigned token;
size_t length;
/* get runlength */
token = *ip++;
length = (token >> ML_BITS);
if (length == RUN_MASK) {
int s = 255;
while ((ip < iend) && (s == 255)) {
s = *ip++;
if (unlikely(length > (size_t)(length + s)))
goto _output_error;
length += s;
}
}
/* copy literals */
cpy = op + length;
if ((cpy > oend - COPYLENGTH) ||
(ip + length > iend - COPYLENGTH)) {
if (cpy > oend)
goto _output_error;/* writes beyond buffer */
if (ip + length != iend)
goto _output_error;/*
* Error: LZ4 format requires
* to consume all input
* at this stage
*/
memcpy(op, ip, length);
op += length;
break;/* Necessarily EOF, due to parsing restrictions */
}
LZ4_WILDCOPY(ip, op, cpy);
ip -= (op - cpy);
op = cpy;
/* get offset */
LZ4_READ_LITTLEENDIAN_16(ref, cpy, ip);
ip += 2;
if (ref < (BYTE * const) dest)
goto _output_error;
/*
* Error : offset creates reference
* outside of destination buffer
*/
/* get matchlength */
length = (token & ML_MASK);
if (length == ML_MASK) {
while (ip < iend) {
int s = *ip++;
if (unlikely(length > (size_t)(length + s)))
goto _output_error;
length += s;
if (s == 255)
continue;
break;
}
}
/* copy repeated sequence */
if (unlikely((op - ref) < STEPSIZE)) {
#if LZ4_ARCH64
size_t dec64 = dec64table[op - ref];
#else
const int dec64 = 0;
#endif
op[0] = ref[0];
op[1] = ref[1];
op[2] = ref[2];
op[3] = ref[3];
op += 4;
ref += 4;
ref -= dec32table[op - ref];
PUT4(ref, op);
op += STEPSIZE - 4;
ref -= dec64;
} else {
LZ4_COPYSTEP(ref, op);
}
cpy = op + length - (STEPSIZE-4);
if (cpy > oend - COPYLENGTH) {
if (cpy > oend)
goto _output_error; /* write outside of buf */
LZ4_SECURECOPY(ref, op, (oend - COPYLENGTH));
while (op < cpy)
*op++ = *ref++;
op = cpy;
/*
* Check EOF (should never happen, since last 5 bytes
* are supposed to be literals)
*/
if (op == oend)
goto _output_error;
continue;
}
LZ4_SECURECOPY(ref, op, cpy);
op = cpy; /* correction */
}
/* end of decoding */
return (int) (((char *) op) - dest);
/* write overflow error detected */
_output_error:
return -1;
}
int lz4_decompress(const unsigned char *src, size_t *src_len,
unsigned char *dest, size_t actual_dest_len)
{
int ret = -1;
int input_len = 0;
input_len = lz4_uncompress(src, dest, actual_dest_len);
if (input_len < 0)
goto exit_0;
*src_len = input_len;
return 0;
exit_0:
return ret;
}
#ifndef STATIC
EXPORT_SYMBOL(lz4_decompress);
#endif
int lz4_decompress_unknownoutputsize(const unsigned char *src, size_t src_len,
unsigned char *dest, size_t *dest_len)
{
int ret = -1;
int out_len = 0;
out_len = lz4_uncompress_unknownoutputsize(src, dest, src_len,
*dest_len);
if (out_len < 0)
goto exit_0;
*dest_len = out_len;
return 0;
exit_0:
return ret;
}
#ifndef STATIC
EXPORT_SYMBOL(lz4_decompress_unknownoutputsize);
MODULE_LICENSE("Dual BSD/GPL");
MODULE_DESCRIPTION("LZ4 Decompressor");
#endif
| gpl-2.0 |
LeMaker/linux-actions | fs/squashfs/namei.c | 2420 | 7650 | /*
* Squashfs - a compressed read only filesystem for Linux
*
* Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008
* Phillip Lougher <phillip@squashfs.org.uk>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2,
* or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*
* namei.c
*/
/*
* This file implements code to do filename lookup in directories.
*
* Like inodes, directories are packed into compressed metadata blocks, stored
* in a directory table. Directories are accessed using the start address of
* the metablock containing the directory and the offset into the
* decompressed block (<block, offset>).
*
* Directories are organised in a slightly complex way, and are not simply
* a list of file names. The organisation takes advantage of the
* fact that (in most cases) the inodes of the files will be in the same
* compressed metadata block, and therefore, can share the start block.
* Directories are therefore organised in a two level list, a directory
* header containing the shared start block value, and a sequence of directory
* entries, each of which share the shared start block. A new directory header
* is written once/if the inode start block changes. The directory
* header/directory entry list is repeated as many times as necessary.
*
* Directories are sorted, and can contain a directory index to speed up
* file lookup. Directory indexes store one entry per metablock, each entry
* storing the index/filename mapping to the first directory header
* in each metadata block. Directories are sorted in alphabetical order,
* and at lookup the index is scanned linearly looking for the first filename
* alphabetically larger than the filename being looked up. At this point the
* location of the metadata block the filename is in has been found.
* The general idea of the index is ensure only one metadata block needs to be
* decompressed to do a lookup irrespective of the length of the directory.
* This scheme has the advantage that it doesn't require extra memory overhead
* and doesn't require much extra storage on disk.
*/
#include <linux/fs.h>
#include <linux/vfs.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/dcache.h>
#include <linux/xattr.h>
#include "squashfs_fs.h"
#include "squashfs_fs_sb.h"
#include "squashfs_fs_i.h"
#include "squashfs.h"
#include "xattr.h"
/*
* Lookup name in the directory index, returning the location of the metadata
* block containing it, and the directory index this represents.
*
* If we get an error reading the index then return the part of the index
* (if any) we have managed to read - the index isn't essential, just
* quicker.
*/
static int get_dir_index_using_name(struct super_block *sb,
u64 *next_block, int *next_offset, u64 index_start,
int index_offset, int i_count, const char *name,
int len)
{
struct squashfs_sb_info *msblk = sb->s_fs_info;
int i, length = 0, err;
unsigned int size;
struct squashfs_dir_index *index;
char *str;
TRACE("Entered get_dir_index_using_name, i_count %d\n", i_count);
index = kmalloc(sizeof(*index) + SQUASHFS_NAME_LEN * 2 + 2, GFP_KERNEL);
if (index == NULL) {
ERROR("Failed to allocate squashfs_dir_index\n");
goto out;
}
str = &index->name[SQUASHFS_NAME_LEN + 1];
strncpy(str, name, len);
str[len] = '\0';
for (i = 0; i < i_count; i++) {
err = squashfs_read_metadata(sb, index, &index_start,
&index_offset, sizeof(*index));
if (err < 0)
break;
size = le32_to_cpu(index->size) + 1;
if (size > SQUASHFS_NAME_LEN)
break;
err = squashfs_read_metadata(sb, index->name, &index_start,
&index_offset, size);
if (err < 0)
break;
index->name[size] = '\0';
if (strcmp(index->name, str) > 0)
break;
length = le32_to_cpu(index->index);
*next_block = le32_to_cpu(index->start_block) +
msblk->directory_table;
}
*next_offset = (length + *next_offset) % SQUASHFS_METADATA_SIZE;
kfree(index);
out:
/*
* Return index (f_pos) of the looked up metadata block. Translate
* from internal f_pos to external f_pos which is offset by 3 because
* we invent "." and ".." entries which are not actually stored in the
* directory.
*/
return length + 3;
}
static struct dentry *squashfs_lookup(struct inode *dir, struct dentry *dentry,
unsigned int flags)
{
const unsigned char *name = dentry->d_name.name;
int len = dentry->d_name.len;
struct inode *inode = NULL;
struct squashfs_sb_info *msblk = dir->i_sb->s_fs_info;
struct squashfs_dir_header dirh;
struct squashfs_dir_entry *dire;
u64 block = squashfs_i(dir)->start + msblk->directory_table;
int offset = squashfs_i(dir)->offset;
int err, length;
unsigned int dir_count, size;
TRACE("Entered squashfs_lookup [%llx:%x]\n", block, offset);
dire = kmalloc(sizeof(*dire) + SQUASHFS_NAME_LEN + 1, GFP_KERNEL);
if (dire == NULL) {
ERROR("Failed to allocate squashfs_dir_entry\n");
return ERR_PTR(-ENOMEM);
}
if (len > SQUASHFS_NAME_LEN) {
err = -ENAMETOOLONG;
goto failed;
}
length = get_dir_index_using_name(dir->i_sb, &block, &offset,
squashfs_i(dir)->dir_idx_start,
squashfs_i(dir)->dir_idx_offset,
squashfs_i(dir)->dir_idx_cnt, name, len);
while (length < i_size_read(dir)) {
/*
* Read directory header.
*/
err = squashfs_read_metadata(dir->i_sb, &dirh, &block,
&offset, sizeof(dirh));
if (err < 0)
goto read_failure;
length += sizeof(dirh);
dir_count = le32_to_cpu(dirh.count) + 1;
if (dir_count > SQUASHFS_DIR_COUNT)
goto data_error;
while (dir_count--) {
/*
* Read directory entry.
*/
err = squashfs_read_metadata(dir->i_sb, dire, &block,
&offset, sizeof(*dire));
if (err < 0)
goto read_failure;
size = le16_to_cpu(dire->size) + 1;
/* size should never be larger than SQUASHFS_NAME_LEN */
if (size > SQUASHFS_NAME_LEN)
goto data_error;
err = squashfs_read_metadata(dir->i_sb, dire->name,
&block, &offset, size);
if (err < 0)
goto read_failure;
length += sizeof(*dire) + size;
if (name[0] < dire->name[0])
goto exit_lookup;
if (len == size && !strncmp(name, dire->name, len)) {
unsigned int blk, off, ino_num;
long long ino;
blk = le32_to_cpu(dirh.start_block);
off = le16_to_cpu(dire->offset);
ino_num = le32_to_cpu(dirh.inode_number) +
(short) le16_to_cpu(dire->inode_number);
ino = SQUASHFS_MKINODE(blk, off);
TRACE("calling squashfs_iget for directory "
"entry %s, inode %x:%x, %d\n", name,
blk, off, ino_num);
inode = squashfs_iget(dir->i_sb, ino, ino_num);
goto exit_lookup;
}
}
}
exit_lookup:
kfree(dire);
return d_splice_alias(inode, dentry);
data_error:
err = -EIO;
read_failure:
ERROR("Unable to read directory block [%llx:%x]\n",
squashfs_i(dir)->start + msblk->directory_table,
squashfs_i(dir)->offset);
failed:
kfree(dire);
return ERR_PTR(err);
}
const struct inode_operations squashfs_dir_inode_ops = {
.lookup = squashfs_lookup,
.getxattr = generic_getxattr,
.listxattr = squashfs_listxattr
};
| gpl-2.0 |
sandymanu/sandy_lettuce_8916 | sound/soc/blackfin/bf5xx-ssm2602.c | 2420 | 3597 | /*
* File: sound/soc/blackfin/bf5xx-ssm2602.c
* Author: Cliff Cai <Cliff.Cai@analog.com>
*
* Created: Tue June 06 2008
* Description: board driver for SSM2602 sound chip
*
* Modified:
* Copyright 2008 Analog Devices Inc.
*
* Bugs: Enter bugs at http://blackfin.uclinux.org/
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, see the file COPYING, or write
* to the Free Software Foundation, Inc.,
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/device.h>
#include <sound/core.h>
#include <sound/pcm.h>
#include <sound/soc.h>
#include <sound/pcm_params.h>
#include <asm/dma.h>
#include <asm/portmux.h>
#include <linux/gpio.h>
#include "../codecs/ssm2602.h"
#include "bf5xx-sport.h"
#include "bf5xx-i2s-pcm.h"
static struct snd_soc_card bf5xx_ssm2602;
static int bf5xx_ssm2602_dai_init(struct snd_soc_pcm_runtime *rtd)
{
/*
* If you are using a crystal source which frequency is not 12MHz
* then modify the below case statement with frequency of the crystal.
*
* If you are using the SPORT to generate clocking then this is
* where to do it.
*/
return snd_soc_dai_set_sysclk(rtd->codec_dai, SSM2602_SYSCLK, 12000000,
SND_SOC_CLOCK_IN);
}
/* CODEC is master for BCLK and LRC in this configuration. */
#define BF5XX_SSM2602_DAIFMT (SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF | \
SND_SOC_DAIFMT_CBM_CFM)
static struct snd_soc_dai_link bf5xx_ssm2602_dai[] = {
{
.name = "ssm2602",
.stream_name = "SSM2602",
.cpu_dai_name = "bfin-i2s.0",
.codec_dai_name = "ssm2602-hifi",
.platform_name = "bfin-i2s-pcm-audio",
.codec_name = "ssm2602.0-001b",
.init = bf5xx_ssm2602_dai_init,
.dai_fmt = BF5XX_SSM2602_DAIFMT,
},
{
.name = "ssm2602",
.stream_name = "SSM2602",
.cpu_dai_name = "bfin-i2s.1",
.codec_dai_name = "ssm2602-hifi",
.platform_name = "bfin-i2s-pcm-audio",
.codec_name = "ssm2602.0-001b",
.init = bf5xx_ssm2602_dai_init,
.dai_fmt = BF5XX_SSM2602_DAIFMT,
},
};
static struct snd_soc_card bf5xx_ssm2602 = {
.name = "bfin-ssm2602",
.owner = THIS_MODULE,
.dai_link = &bf5xx_ssm2602_dai[CONFIG_SND_BF5XX_SPORT_NUM],
.num_links = 1,
};
static struct platform_device *bf5xx_ssm2602_snd_device;
static int __init bf5xx_ssm2602_init(void)
{
int ret;
pr_debug("%s enter\n", __func__);
bf5xx_ssm2602_snd_device = platform_device_alloc("soc-audio", -1);
if (!bf5xx_ssm2602_snd_device)
return -ENOMEM;
platform_set_drvdata(bf5xx_ssm2602_snd_device, &bf5xx_ssm2602);
ret = platform_device_add(bf5xx_ssm2602_snd_device);
if (ret)
platform_device_put(bf5xx_ssm2602_snd_device);
return ret;
}
static void __exit bf5xx_ssm2602_exit(void)
{
pr_debug("%s enter\n", __func__);
platform_device_unregister(bf5xx_ssm2602_snd_device);
}
module_init(bf5xx_ssm2602_init);
module_exit(bf5xx_ssm2602_exit);
/* Module information */
MODULE_AUTHOR("Cliff Cai");
MODULE_DESCRIPTION("ALSA SoC SSM2602 BF527-EZKIT");
MODULE_LICENSE("GPL");
| gpl-2.0 |
BIBIMAINETTIDEV/imx6m300-linux-3.10.17 | drivers/staging/media/solo6x10/solo6x10-i2c.c | 2676 | 7612 | /*
* Copyright (C) 2010-2013 Bluecherry, LLC <http://www.bluecherrydvr.com>
*
* Original author:
* Ben Collins <bcollins@ubuntu.com>
*
* Additional work by:
* John Brooks <john.brooks@bluecherry.net>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
/* XXX: The SOLO6x10 i2c does not have separate interrupts for each i2c
* channel. The bus can only handle one i2c event at a time. The below handles
* this all wrong. We should be using the status registers to see if the bus
* is in use, and have a global lock to check the status register. Also,
* the bulk of the work should be handled out-of-interrupt. The ugly loops
* that occur during interrupt scare me. The ISR should merely signal
* thread context, ACK the interrupt, and move on. -- BenC */
#include <linux/kernel.h>
#include "solo6x10.h"
u8 solo_i2c_readbyte(struct solo_dev *solo_dev, int id, u8 addr, u8 off)
{
struct i2c_msg msgs[2];
u8 data;
msgs[0].flags = 0;
msgs[0].addr = addr;
msgs[0].len = 1;
msgs[0].buf = &off;
msgs[1].flags = I2C_M_RD;
msgs[1].addr = addr;
msgs[1].len = 1;
msgs[1].buf = &data;
i2c_transfer(&solo_dev->i2c_adap[id], msgs, 2);
return data;
}
void solo_i2c_writebyte(struct solo_dev *solo_dev, int id, u8 addr,
u8 off, u8 data)
{
struct i2c_msg msgs;
u8 buf[2];
buf[0] = off;
buf[1] = data;
msgs.flags = 0;
msgs.addr = addr;
msgs.len = 2;
msgs.buf = buf;
i2c_transfer(&solo_dev->i2c_adap[id], &msgs, 1);
}
static void solo_i2c_flush(struct solo_dev *solo_dev, int wr)
{
u32 ctrl;
ctrl = SOLO_IIC_CH_SET(solo_dev->i2c_id);
if (solo_dev->i2c_state == IIC_STATE_START)
ctrl |= SOLO_IIC_START;
if (wr) {
ctrl |= SOLO_IIC_WRITE;
} else {
ctrl |= SOLO_IIC_READ;
if (!(solo_dev->i2c_msg->flags & I2C_M_NO_RD_ACK))
ctrl |= SOLO_IIC_ACK_EN;
}
if (solo_dev->i2c_msg_ptr == solo_dev->i2c_msg->len)
ctrl |= SOLO_IIC_STOP;
solo_reg_write(solo_dev, SOLO_IIC_CTRL, ctrl);
}
static void solo_i2c_start(struct solo_dev *solo_dev)
{
u32 addr = solo_dev->i2c_msg->addr << 1;
if (solo_dev->i2c_msg->flags & I2C_M_RD)
addr |= 1;
solo_dev->i2c_state = IIC_STATE_START;
solo_reg_write(solo_dev, SOLO_IIC_TXD, addr);
solo_i2c_flush(solo_dev, 1);
}
static void solo_i2c_stop(struct solo_dev *solo_dev)
{
solo_irq_off(solo_dev, SOLO_IRQ_IIC);
solo_reg_write(solo_dev, SOLO_IIC_CTRL, 0);
solo_dev->i2c_state = IIC_STATE_STOP;
wake_up(&solo_dev->i2c_wait);
}
static int solo_i2c_handle_read(struct solo_dev *solo_dev)
{
prepare_read:
if (solo_dev->i2c_msg_ptr != solo_dev->i2c_msg->len) {
solo_i2c_flush(solo_dev, 0);
return 0;
}
solo_dev->i2c_msg_ptr = 0;
solo_dev->i2c_msg++;
solo_dev->i2c_msg_num--;
if (solo_dev->i2c_msg_num == 0) {
solo_i2c_stop(solo_dev);
return 0;
}
if (!(solo_dev->i2c_msg->flags & I2C_M_NOSTART)) {
solo_i2c_start(solo_dev);
} else {
if (solo_dev->i2c_msg->flags & I2C_M_RD)
goto prepare_read;
else
solo_i2c_stop(solo_dev);
}
return 0;
}
static int solo_i2c_handle_write(struct solo_dev *solo_dev)
{
retry_write:
if (solo_dev->i2c_msg_ptr != solo_dev->i2c_msg->len) {
solo_reg_write(solo_dev, SOLO_IIC_TXD,
solo_dev->i2c_msg->buf[solo_dev->i2c_msg_ptr]);
solo_dev->i2c_msg_ptr++;
solo_i2c_flush(solo_dev, 1);
return 0;
}
solo_dev->i2c_msg_ptr = 0;
solo_dev->i2c_msg++;
solo_dev->i2c_msg_num--;
if (solo_dev->i2c_msg_num == 0) {
solo_i2c_stop(solo_dev);
return 0;
}
if (!(solo_dev->i2c_msg->flags & I2C_M_NOSTART)) {
solo_i2c_start(solo_dev);
} else {
if (solo_dev->i2c_msg->flags & I2C_M_RD)
solo_i2c_stop(solo_dev);
else
goto retry_write;
}
return 0;
}
int solo_i2c_isr(struct solo_dev *solo_dev)
{
u32 status = solo_reg_read(solo_dev, SOLO_IIC_CTRL);
int ret = -EINVAL;
if (CHK_FLAGS(status, SOLO_IIC_STATE_TRNS | SOLO_IIC_STATE_SIG_ERR)
|| solo_dev->i2c_id < 0) {
solo_i2c_stop(solo_dev);
return -ENXIO;
}
switch (solo_dev->i2c_state) {
case IIC_STATE_START:
if (solo_dev->i2c_msg->flags & I2C_M_RD) {
solo_dev->i2c_state = IIC_STATE_READ;
ret = solo_i2c_handle_read(solo_dev);
break;
}
solo_dev->i2c_state = IIC_STATE_WRITE;
case IIC_STATE_WRITE:
ret = solo_i2c_handle_write(solo_dev);
break;
case IIC_STATE_READ:
solo_dev->i2c_msg->buf[solo_dev->i2c_msg_ptr] =
solo_reg_read(solo_dev, SOLO_IIC_RXD);
solo_dev->i2c_msg_ptr++;
ret = solo_i2c_handle_read(solo_dev);
break;
default:
solo_i2c_stop(solo_dev);
}
return ret;
}
static int solo_i2c_master_xfer(struct i2c_adapter *adap,
struct i2c_msg msgs[], int num)
{
struct solo_dev *solo_dev = adap->algo_data;
unsigned long timeout;
int ret;
int i;
DEFINE_WAIT(wait);
for (i = 0; i < SOLO_I2C_ADAPTERS; i++) {
if (&solo_dev->i2c_adap[i] == adap)
break;
}
if (i == SOLO_I2C_ADAPTERS)
return num; /* XXX Right return value for failure? */
mutex_lock(&solo_dev->i2c_mutex);
solo_dev->i2c_id = i;
solo_dev->i2c_msg = msgs;
solo_dev->i2c_msg_num = num;
solo_dev->i2c_msg_ptr = 0;
solo_reg_write(solo_dev, SOLO_IIC_CTRL, 0);
solo_irq_on(solo_dev, SOLO_IRQ_IIC);
solo_i2c_start(solo_dev);
timeout = HZ / 2;
for (;;) {
prepare_to_wait(&solo_dev->i2c_wait, &wait,
TASK_INTERRUPTIBLE);
if (solo_dev->i2c_state == IIC_STATE_STOP)
break;
timeout = schedule_timeout(timeout);
if (!timeout)
break;
if (signal_pending(current))
break;
}
finish_wait(&solo_dev->i2c_wait, &wait);
ret = num - solo_dev->i2c_msg_num;
solo_dev->i2c_state = IIC_STATE_IDLE;
solo_dev->i2c_id = -1;
mutex_unlock(&solo_dev->i2c_mutex);
return ret;
}
static u32 solo_i2c_functionality(struct i2c_adapter *adap)
{
return I2C_FUNC_I2C;
}
static const struct i2c_algorithm solo_i2c_algo = {
.master_xfer = solo_i2c_master_xfer,
.functionality = solo_i2c_functionality,
};
int solo_i2c_init(struct solo_dev *solo_dev)
{
int i;
int ret;
solo_reg_write(solo_dev, SOLO_IIC_CFG,
SOLO_IIC_PRESCALE(8) | SOLO_IIC_ENABLE);
solo_dev->i2c_id = -1;
solo_dev->i2c_state = IIC_STATE_IDLE;
init_waitqueue_head(&solo_dev->i2c_wait);
mutex_init(&solo_dev->i2c_mutex);
for (i = 0; i < SOLO_I2C_ADAPTERS; i++) {
struct i2c_adapter *adap = &solo_dev->i2c_adap[i];
snprintf(adap->name, I2C_NAME_SIZE, "%s I2C %d",
SOLO6X10_NAME, i);
adap->algo = &solo_i2c_algo;
adap->algo_data = solo_dev;
adap->retries = 1;
adap->dev.parent = &solo_dev->pdev->dev;
ret = i2c_add_adapter(adap);
if (ret) {
adap->algo_data = NULL;
break;
}
}
if (ret) {
for (i = 0; i < SOLO_I2C_ADAPTERS; i++) {
if (!solo_dev->i2c_adap[i].algo_data)
break;
i2c_del_adapter(&solo_dev->i2c_adap[i]);
solo_dev->i2c_adap[i].algo_data = NULL;
}
return ret;
}
return 0;
}
void solo_i2c_exit(struct solo_dev *solo_dev)
{
int i;
for (i = 0; i < SOLO_I2C_ADAPTERS; i++) {
if (!solo_dev->i2c_adap[i].algo_data)
continue;
i2c_del_adapter(&solo_dev->i2c_adap[i]);
solo_dev->i2c_adap[i].algo_data = NULL;
}
}
| gpl-2.0 |
tilaksidduram/android_kernel_samsung_smdk4412 | drivers/mfd/janz-cmodio.c | 2932 | 7532 | /*
* Janz CMOD-IO MODULbus Carrier Board PCI Driver
*
* Copyright (c) 2010 Ira W. Snyder <iws@ovro.caltech.edu>
*
* Lots of inspiration and code was copied from drivers/mfd/sm501.c
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/pci.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/mfd/core.h>
#include <linux/mfd/janz.h>
#define DRV_NAME "janz-cmodio"
/* Size of each MODULbus module in PCI BAR4 */
#define CMODIO_MODULBUS_SIZE 0x200
/* Maximum number of MODULbus modules on a CMOD-IO carrier board */
#define CMODIO_MAX_MODULES 4
/* Module Parameters */
static unsigned int num_modules = CMODIO_MAX_MODULES;
static unsigned char *modules[CMODIO_MAX_MODULES] = {
"empty", "empty", "empty", "empty",
};
module_param_array(modules, charp, &num_modules, S_IRUGO);
MODULE_PARM_DESC(modules, "MODULbus modules attached to the carrier board");
/* Unique Device Id */
static unsigned int cmodio_id;
struct cmodio_device {
/* Parent PCI device */
struct pci_dev *pdev;
/* PLX control registers */
struct janz_cmodio_onboard_regs __iomem *ctrl;
/* hex switch position */
u8 hex;
/* mfd-core API */
struct mfd_cell cells[CMODIO_MAX_MODULES];
struct resource resources[3 * CMODIO_MAX_MODULES];
struct janz_platform_data pdata[CMODIO_MAX_MODULES];
};
/*
* Subdevices using the mfd-core API
*/
static int __devinit cmodio_setup_subdevice(struct cmodio_device *priv,
char *name, unsigned int devno,
unsigned int modno)
{
struct janz_platform_data *pdata;
struct mfd_cell *cell;
struct resource *res;
struct pci_dev *pci;
pci = priv->pdev;
cell = &priv->cells[devno];
res = &priv->resources[devno * 3];
pdata = &priv->pdata[devno];
cell->name = name;
cell->resources = res;
cell->num_resources = 3;
/* Setup the subdevice ID -- must be unique */
cell->id = cmodio_id++;
/* Add platform data */
pdata->modno = modno;
cell->platform_data = pdata;
cell->pdata_size = sizeof(*pdata);
/* MODULbus registers -- PCI BAR3 is big-endian MODULbus access */
res->flags = IORESOURCE_MEM;
res->parent = &pci->resource[3];
res->start = pci->resource[3].start + (CMODIO_MODULBUS_SIZE * modno);
res->end = res->start + CMODIO_MODULBUS_SIZE - 1;
res++;
/* PLX Control Registers -- PCI BAR4 is interrupt and other registers */
res->flags = IORESOURCE_MEM;
res->parent = &pci->resource[4];
res->start = pci->resource[4].start;
res->end = pci->resource[4].end;
res++;
/*
* IRQ
*
* The start and end fields are used as an offset to the irq_base
* parameter passed into the mfd_add_devices() function call. All
* devices share the same IRQ.
*/
res->flags = IORESOURCE_IRQ;
res->parent = NULL;
res->start = 0;
res->end = 0;
res++;
return 0;
}
/* Probe each submodule using kernel parameters */
static int __devinit cmodio_probe_submodules(struct cmodio_device *priv)
{
struct pci_dev *pdev = priv->pdev;
unsigned int num_probed = 0;
char *name;
int i;
for (i = 0; i < num_modules; i++) {
name = modules[i];
if (!strcmp(name, "") || !strcmp(name, "empty"))
continue;
dev_dbg(&priv->pdev->dev, "MODULbus %d: name %s\n", i, name);
cmodio_setup_subdevice(priv, name, num_probed, i);
num_probed++;
}
/* print an error message if no modules were probed */
if (num_probed == 0) {
dev_err(&priv->pdev->dev, "no MODULbus modules specified, "
"please set the ``modules'' kernel "
"parameter according to your "
"hardware configuration\n");
return -ENODEV;
}
return mfd_add_devices(&pdev->dev, 0, priv->cells,
num_probed, NULL, pdev->irq);
}
/*
* SYSFS Attributes
*/
static ssize_t mbus_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct cmodio_device *priv = dev_get_drvdata(dev);
return snprintf(buf, PAGE_SIZE, "%x\n", priv->hex);
}
static DEVICE_ATTR(modulbus_number, S_IRUGO, mbus_show, NULL);
static struct attribute *cmodio_sysfs_attrs[] = {
&dev_attr_modulbus_number.attr,
NULL,
};
static const struct attribute_group cmodio_sysfs_attr_group = {
.attrs = cmodio_sysfs_attrs,
};
/*
* PCI Driver
*/
static int __devinit cmodio_pci_probe(struct pci_dev *dev,
const struct pci_device_id *id)
{
struct cmodio_device *priv;
int ret;
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv) {
dev_err(&dev->dev, "unable to allocate private data\n");
ret = -ENOMEM;
goto out_return;
}
pci_set_drvdata(dev, priv);
priv->pdev = dev;
/* Hardware Initialization */
ret = pci_enable_device(dev);
if (ret) {
dev_err(&dev->dev, "unable to enable device\n");
goto out_free_priv;
}
pci_set_master(dev);
ret = pci_request_regions(dev, DRV_NAME);
if (ret) {
dev_err(&dev->dev, "unable to request regions\n");
goto out_pci_disable_device;
}
/* Onboard configuration registers */
priv->ctrl = pci_ioremap_bar(dev, 4);
if (!priv->ctrl) {
dev_err(&dev->dev, "unable to remap onboard regs\n");
ret = -ENOMEM;
goto out_pci_release_regions;
}
/* Read the hex switch on the carrier board */
priv->hex = ioread8(&priv->ctrl->int_enable);
/* Add the MODULbus number (hex switch value) to the device's sysfs */
ret = sysfs_create_group(&dev->dev.kobj, &cmodio_sysfs_attr_group);
if (ret) {
dev_err(&dev->dev, "unable to create sysfs attributes\n");
goto out_unmap_ctrl;
}
/*
* Disable all interrupt lines, each submodule will enable its
* own interrupt line if needed
*/
iowrite8(0xf, &priv->ctrl->int_disable);
/* Register drivers for all submodules */
ret = cmodio_probe_submodules(priv);
if (ret) {
dev_err(&dev->dev, "unable to probe submodules\n");
goto out_sysfs_remove_group;
}
return 0;
out_sysfs_remove_group:
sysfs_remove_group(&dev->dev.kobj, &cmodio_sysfs_attr_group);
out_unmap_ctrl:
iounmap(priv->ctrl);
out_pci_release_regions:
pci_release_regions(dev);
out_pci_disable_device:
pci_disable_device(dev);
out_free_priv:
kfree(priv);
out_return:
return ret;
}
static void __devexit cmodio_pci_remove(struct pci_dev *dev)
{
struct cmodio_device *priv = pci_get_drvdata(dev);
mfd_remove_devices(&dev->dev);
sysfs_remove_group(&dev->dev.kobj, &cmodio_sysfs_attr_group);
iounmap(priv->ctrl);
pci_release_regions(dev);
pci_disable_device(dev);
kfree(priv);
}
#define PCI_VENDOR_ID_JANZ 0x13c3
/* The list of devices that this module will support */
static DEFINE_PCI_DEVICE_TABLE(cmodio_pci_ids) = {
{ PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9030, PCI_VENDOR_ID_JANZ, 0x0101 },
{ PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9050, PCI_VENDOR_ID_JANZ, 0x0100 },
{ 0, }
};
MODULE_DEVICE_TABLE(pci, cmodio_pci_ids);
static struct pci_driver cmodio_pci_driver = {
.name = DRV_NAME,
.id_table = cmodio_pci_ids,
.probe = cmodio_pci_probe,
.remove = __devexit_p(cmodio_pci_remove),
};
/*
* Module Init / Exit
*/
static int __init cmodio_init(void)
{
return pci_register_driver(&cmodio_pci_driver);
}
static void __exit cmodio_exit(void)
{
pci_unregister_driver(&cmodio_pci_driver);
}
MODULE_AUTHOR("Ira W. Snyder <iws@ovro.caltech.edu>");
MODULE_DESCRIPTION("Janz CMOD-IO PCI MODULbus Carrier Board Driver");
MODULE_LICENSE("GPL");
module_init(cmodio_init);
module_exit(cmodio_exit);
| gpl-2.0 |
manveru0/FeaCore_Phoenix_S3_JellyBean | drivers/pcmcia/bfin_cf_pcmcia.c | 3188 | 7775 | /*
* file: drivers/pcmcia/bfin_cf.c
*
* based on: drivers/pcmcia/omap_cf.c
* omap_cf.c -- OMAP 16xx CompactFlash controller driver
*
* Copyright (c) 2005 David Brownell
* Copyright (c) 2006-2008 Michael Hennerich Analog Devices Inc.
*
* bugs: enter bugs at http://blackfin.uclinux.org/
*
* this program is free software; you can redistribute it and/or modify
* it under the terms of the gnu general public license as published by
* the free software foundation; either version 2, or (at your option)
* any later version.
*
* this program is distributed in the hope that it will be useful,
* but without any warranty; without even the implied warranty of
* merchantability or fitness for a particular purpose. see the
* gnu general public license for more details.
*
* you should have received a copy of the gnu general public license
* along with this program; see the file copying.
* if not, write to the free software foundation,
* 59 temple place - suite 330, boston, ma 02111-1307, usa.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/platform_device.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/io.h>
#include <pcmcia/ss.h>
#include <pcmcia/cisreg.h>
#include <asm/gpio.h>
#define SZ_1K 0x00000400
#define SZ_8K 0x00002000
#define SZ_2K (2 * SZ_1K)
#define POLL_INTERVAL (2 * HZ)
#define CF_ATASEL_ENA 0x20311802 /* Inverts RESET */
#define CF_ATASEL_DIS 0x20311800
#define bfin_cf_present(pfx) (gpio_get_value(pfx))
/*--------------------------------------------------------------------------*/
static const char driver_name[] = "bfin_cf_pcmcia";
struct bfin_cf_socket {
struct pcmcia_socket socket;
struct timer_list timer;
unsigned present:1;
unsigned active:1;
struct platform_device *pdev;
unsigned long phys_cf_io;
unsigned long phys_cf_attr;
u_int irq;
u_short cd_pfx;
};
/*--------------------------------------------------------------------------*/
static int bfin_cf_reset(void)
{
outw(0, CF_ATASEL_ENA);
mdelay(200);
outw(0, CF_ATASEL_DIS);
return 0;
}
static int bfin_cf_ss_init(struct pcmcia_socket *s)
{
return 0;
}
/* the timer is primarily to kick this socket's pccardd */
static void bfin_cf_timer(unsigned long _cf)
{
struct bfin_cf_socket *cf = (void *)_cf;
unsigned short present = bfin_cf_present(cf->cd_pfx);
if (present != cf->present) {
cf->present = present;
dev_dbg(&cf->pdev->dev, ": card %s\n",
present ? "present" : "gone");
pcmcia_parse_events(&cf->socket, SS_DETECT);
}
if (cf->active)
mod_timer(&cf->timer, jiffies + POLL_INTERVAL);
}
static int bfin_cf_get_status(struct pcmcia_socket *s, u_int *sp)
{
struct bfin_cf_socket *cf;
if (!sp)
return -EINVAL;
cf = container_of(s, struct bfin_cf_socket, socket);
if (bfin_cf_present(cf->cd_pfx)) {
*sp = SS_READY | SS_DETECT | SS_POWERON | SS_3VCARD;
s->pcmcia_irq = 0;
s->pci_irq = cf->irq;
} else
*sp = 0;
return 0;
}
static int
bfin_cf_set_socket(struct pcmcia_socket *sock, struct socket_state_t *s)
{
struct bfin_cf_socket *cf;
cf = container_of(sock, struct bfin_cf_socket, socket);
switch (s->Vcc) {
case 0:
case 33:
break;
case 50:
break;
default:
return -EINVAL;
}
if (s->flags & SS_RESET) {
disable_irq(cf->irq);
bfin_cf_reset();
enable_irq(cf->irq);
}
dev_dbg(&cf->pdev->dev, ": Vcc %d, io_irq %d, flags %04x csc %04x\n",
s->Vcc, s->io_irq, s->flags, s->csc_mask);
return 0;
}
static int bfin_cf_ss_suspend(struct pcmcia_socket *s)
{
return bfin_cf_set_socket(s, &dead_socket);
}
/* regions are 2K each: mem, attrib, io (and reserved-for-ide) */
static int bfin_cf_set_io_map(struct pcmcia_socket *s, struct pccard_io_map *io)
{
struct bfin_cf_socket *cf;
cf = container_of(s, struct bfin_cf_socket, socket);
io->flags &= MAP_ACTIVE | MAP_ATTRIB | MAP_16BIT;
io->start = cf->phys_cf_io;
io->stop = io->start + SZ_2K - 1;
return 0;
}
static int
bfin_cf_set_mem_map(struct pcmcia_socket *s, struct pccard_mem_map *map)
{
struct bfin_cf_socket *cf;
if (map->card_start)
return -EINVAL;
cf = container_of(s, struct bfin_cf_socket, socket);
map->static_start = cf->phys_cf_io;
map->flags &= MAP_ACTIVE | MAP_ATTRIB | MAP_16BIT;
if (map->flags & MAP_ATTRIB)
map->static_start = cf->phys_cf_attr;
return 0;
}
static struct pccard_operations bfin_cf_ops = {
.init = bfin_cf_ss_init,
.suspend = bfin_cf_ss_suspend,
.get_status = bfin_cf_get_status,
.set_socket = bfin_cf_set_socket,
.set_io_map = bfin_cf_set_io_map,
.set_mem_map = bfin_cf_set_mem_map,
};
/*--------------------------------------------------------------------------*/
static int __devinit bfin_cf_probe(struct platform_device *pdev)
{
struct bfin_cf_socket *cf;
struct resource *io_mem, *attr_mem;
int irq;
unsigned short cd_pfx;
int status = 0;
dev_info(&pdev->dev, "Blackfin CompactFlash/PCMCIA Socket Driver\n");
irq = platform_get_irq(pdev, 0);
if (irq <= 0)
return -EINVAL;
cd_pfx = platform_get_irq(pdev, 1); /*Card Detect GPIO PIN */
if (gpio_request(cd_pfx, "pcmcia: CD")) {
dev_err(&pdev->dev,
"Failed ro request Card Detect GPIO_%d\n",
cd_pfx);
return -EBUSY;
}
gpio_direction_input(cd_pfx);
cf = kzalloc(sizeof *cf, GFP_KERNEL);
if (!cf) {
gpio_free(cd_pfx);
return -ENOMEM;
}
cf->cd_pfx = cd_pfx;
setup_timer(&cf->timer, bfin_cf_timer, (unsigned long)cf);
cf->pdev = pdev;
platform_set_drvdata(pdev, cf);
cf->irq = irq;
cf->socket.pci_irq = irq;
irq_set_irq_type(irq, IRQF_TRIGGER_LOW);
io_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
attr_mem = platform_get_resource(pdev, IORESOURCE_MEM, 1);
if (!io_mem || !attr_mem)
goto fail0;
cf->phys_cf_io = io_mem->start;
cf->phys_cf_attr = attr_mem->start;
/* pcmcia layer only remaps "real" memory */
cf->socket.io_offset = (unsigned long)
ioremap(cf->phys_cf_io, SZ_2K);
if (!cf->socket.io_offset)
goto fail0;
dev_err(&pdev->dev, ": on irq %d\n", irq);
dev_dbg(&pdev->dev, ": %s\n",
bfin_cf_present(cf->cd_pfx) ? "present" : "(not present)");
cf->socket.owner = THIS_MODULE;
cf->socket.dev.parent = &pdev->dev;
cf->socket.ops = &bfin_cf_ops;
cf->socket.resource_ops = &pccard_static_ops;
cf->socket.features = SS_CAP_PCCARD | SS_CAP_STATIC_MAP
| SS_CAP_MEM_ALIGN;
cf->socket.map_size = SZ_2K;
status = pcmcia_register_socket(&cf->socket);
if (status < 0)
goto fail2;
cf->active = 1;
mod_timer(&cf->timer, jiffies + POLL_INTERVAL);
return 0;
fail2:
iounmap((void __iomem *)cf->socket.io_offset);
release_mem_region(cf->phys_cf_io, SZ_8K);
fail0:
gpio_free(cf->cd_pfx);
kfree(cf);
platform_set_drvdata(pdev, NULL);
return status;
}
static int __devexit bfin_cf_remove(struct platform_device *pdev)
{
struct bfin_cf_socket *cf = platform_get_drvdata(pdev);
gpio_free(cf->cd_pfx);
cf->active = 0;
pcmcia_unregister_socket(&cf->socket);
del_timer_sync(&cf->timer);
iounmap((void __iomem *)cf->socket.io_offset);
release_mem_region(cf->phys_cf_io, SZ_8K);
platform_set_drvdata(pdev, NULL);
kfree(cf);
return 0;
}
static struct platform_driver bfin_cf_driver = {
.driver = {
.name = (char *)driver_name,
.owner = THIS_MODULE,
},
.probe = bfin_cf_probe,
.remove = __devexit_p(bfin_cf_remove),
};
static int __init bfin_cf_init(void)
{
return platform_driver_register(&bfin_cf_driver);
}
static void __exit bfin_cf_exit(void)
{
platform_driver_unregister(&bfin_cf_driver);
}
module_init(bfin_cf_init);
module_exit(bfin_cf_exit);
MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
MODULE_DESCRIPTION("BFIN CF/PCMCIA Driver");
MODULE_LICENSE("GPL");
| gpl-2.0 |
pgielda/linux-2.6-olinuxino | arch/mips/nxp/pnx8550/common/setup.c | 3444 | 4044 | /*
*
* 2.6 port, Embedded Alley Solutions, Inc
*
* Based on Per Hallsmark, per.hallsmark@mvista.com
*
* This program is free software; you can distribute it and/or modify it
* under the terms of the GNU General Public License (Version 2) as
* published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
*/
#include <linux/init.h>
#include <linux/sched.h>
#include <linux/ioport.h>
#include <linux/irq.h>
#include <linux/mm.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/serial_pnx8xxx.h>
#include <linux/pm.h>
#include <asm/cpu.h>
#include <asm/bootinfo.h>
#include <asm/irq.h>
#include <asm/mipsregs.h>
#include <asm/reboot.h>
#include <asm/pgtable.h>
#include <asm/time.h>
#include <glb.h>
#include <int.h>
#include <pci.h>
#include <uart.h>
#include <nand.h>
extern void __init board_setup(void);
extern void pnx8550_machine_restart(char *);
extern void pnx8550_machine_halt(void);
extern void pnx8550_machine_power_off(void);
extern struct resource ioport_resource;
extern struct resource iomem_resource;
extern char *prom_getcmdline(void);
struct resource standard_io_resources[] = {
{
.start = 0x00,
.end = 0x1f,
.name = "dma1",
.flags = IORESOURCE_BUSY
}, {
.start = 0x40,
.end = 0x5f,
.name = "timer",
.flags = IORESOURCE_BUSY
}, {
.start = 0x80,
.end = 0x8f,
.name = "dma page reg",
.flags = IORESOURCE_BUSY
}, {
.start = 0xc0,
.end = 0xdf,
.name = "dma2",
.flags = IORESOURCE_BUSY
},
};
#define STANDARD_IO_RESOURCES ARRAY_SIZE(standard_io_resources)
extern struct resource pci_io_resource;
extern struct resource pci_mem_resource;
/* Return the total size of DRAM-memory, (RANK0 + RANK1) */
unsigned long get_system_mem_size(void)
{
/* Read IP2031_RANK0_ADDR_LO */
unsigned long dram_r0_lo = inl(PCI_BASE | 0x65010);
/* Read IP2031_RANK1_ADDR_HI */
unsigned long dram_r1_hi = inl(PCI_BASE | 0x65018);
return dram_r1_hi - dram_r0_lo + 1;
}
int pnx8550_console_port = -1;
void __init plat_mem_setup(void)
{
int i;
char* argptr;
board_setup(); /* board specific setup */
_machine_restart = pnx8550_machine_restart;
_machine_halt = pnx8550_machine_halt;
pm_power_off = pnx8550_machine_power_off;
/* Clear the Global 2 Register, PCI Inta Output Enable Registers
Bit 1:Enable DAC Powerdown
-> 0:DACs are enabled and are working normally
1:DACs are powerdown
Bit 0:Enable of PCI inta output
-> 0 = Disable PCI inta output
1 = Enable PCI inta output
*/
PNX8550_GLB2_ENAB_INTA_O = 0;
/* IO/MEM resources. */
set_io_port_base(PNX8550_PORT_BASE);
ioport_resource.start = 0;
ioport_resource.end = ~0;
iomem_resource.start = 0;
iomem_resource.end = ~0;
/* Request I/O space for devices on this board */
for (i = 0; i < STANDARD_IO_RESOURCES; i++)
request_resource(&ioport_resource, standard_io_resources + i);
/* Place the Mode Control bit for GPIO pin 16 in primary function */
/* Pin 16 is used by UART1, UA1_TX */
outl((PNX8550_GPIO_MODE_PRIMOP << PNX8550_GPIO_MC_16_BIT) |
(PNX8550_GPIO_MODE_PRIMOP << PNX8550_GPIO_MC_17_BIT),
PNX8550_GPIO_MC1);
argptr = prom_getcmdline();
if ((argptr = strstr(argptr, "console=ttyS")) != NULL) {
argptr += strlen("console=ttyS");
pnx8550_console_port = *argptr == '0' ? 0 : 1;
/* We must initialize the UART (console) before early printk */
/* Set LCR to 8-bit and BAUD to 38400 (no 5) */
ip3106_lcr(UART_BASE, pnx8550_console_port) =
PNX8XXX_UART_LCR_8BIT;
ip3106_baud(UART_BASE, pnx8550_console_port) = 5;
}
return;
}
| gpl-2.0 |
turtlepa/android_kernel_samsung_aries-galaxys4gmtd | drivers/video/neofb.c | 4724 | 56730 | /*
* linux/drivers/video/neofb.c -- NeoMagic Framebuffer Driver
*
* Copyright (c) 2001-2002 Denis Oliver Kropp <dok@directfb.org>
*
*
* Card specific code is based on XFree86's neomagic driver.
* Framebuffer framework code is based on code of cyber2000fb.
*
* This file is subject to the terms and conditions of the GNU General
* Public License. See the file COPYING in the main directory of this
* archive for more details.
*
*
* 0.4.1
* - Cosmetic changes (dok)
*
* 0.4
* - Toshiba Libretto support, allow modes larger than LCD size if
* LCD is disabled, keep BIOS settings if internal/external display
* haven't been enabled explicitly
* (Thomas J. Moore <dark@mama.indstate.edu>)
*
* 0.3.3
* - Porting over to new fbdev api. (jsimmons)
*
* 0.3.2
* - got rid of all floating point (dok)
*
* 0.3.1
* - added module license (dok)
*
* 0.3
* - hardware accelerated clear and move for 2200 and above (dok)
* - maximum allowed dotclock is handled now (dok)
*
* 0.2.1
* - correct panning after X usage (dok)
* - added module and kernel parameters (dok)
* - no stretching if external display is enabled (dok)
*
* 0.2
* - initial version (dok)
*
*
* TODO
* - ioctl for internal/external switching
* - blanking
* - 32bit depth support, maybe impossible
* - disable pan-on-sync, need specs
*
* BUGS
* - white margin on bootup like with tdfxfb (colormap problem?)
*
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/fb.h>
#include <linux/pci.h>
#include <linux/init.h>
#ifdef CONFIG_TOSHIBA
#include <linux/toshiba.h>
#endif
#include <asm/io.h>
#include <asm/irq.h>
#include <asm/pgtable.h>
#include <asm/system.h>
#ifdef CONFIG_MTRR
#include <asm/mtrr.h>
#endif
#include <video/vga.h>
#include <video/neomagic.h>
#define NEOFB_VERSION "0.4.2"
/* --------------------------------------------------------------------- */
static int internal;
static int external;
static int libretto;
static int nostretch;
static int nopciburst;
static char *mode_option __devinitdata = NULL;
#ifdef MODULE
MODULE_AUTHOR("(c) 2001-2002 Denis Oliver Kropp <dok@convergence.de>");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("FBDev driver for NeoMagic PCI Chips");
module_param(internal, bool, 0);
MODULE_PARM_DESC(internal, "Enable output on internal LCD Display.");
module_param(external, bool, 0);
MODULE_PARM_DESC(external, "Enable output on external CRT.");
module_param(libretto, bool, 0);
MODULE_PARM_DESC(libretto, "Force Libretto 100/110 800x480 LCD.");
module_param(nostretch, bool, 0);
MODULE_PARM_DESC(nostretch,
"Disable stretching of modes smaller than LCD.");
module_param(nopciburst, bool, 0);
MODULE_PARM_DESC(nopciburst, "Disable PCI burst mode.");
module_param(mode_option, charp, 0);
MODULE_PARM_DESC(mode_option, "Preferred video mode ('640x480-8@60', etc)");
#endif
/* --------------------------------------------------------------------- */
static biosMode bios8[] = {
{320, 240, 0x40},
{300, 400, 0x42},
{640, 400, 0x20},
{640, 480, 0x21},
{800, 600, 0x23},
{1024, 768, 0x25},
};
static biosMode bios16[] = {
{320, 200, 0x2e},
{320, 240, 0x41},
{300, 400, 0x43},
{640, 480, 0x31},
{800, 600, 0x34},
{1024, 768, 0x37},
};
static biosMode bios24[] = {
{640, 480, 0x32},
{800, 600, 0x35},
{1024, 768, 0x38}
};
#ifdef NO_32BIT_SUPPORT_YET
/* FIXME: guessed values, wrong */
static biosMode bios32[] = {
{640, 480, 0x33},
{800, 600, 0x36},
{1024, 768, 0x39}
};
#endif
static inline void write_le32(int regindex, u32 val, const struct neofb_par *par)
{
writel(val, par->neo2200 + par->cursorOff + regindex);
}
static int neoFindMode(int xres, int yres, int depth)
{
int xres_s;
int i, size;
biosMode *mode;
switch (depth) {
case 8:
size = ARRAY_SIZE(bios8);
mode = bios8;
break;
case 16:
size = ARRAY_SIZE(bios16);
mode = bios16;
break;
case 24:
size = ARRAY_SIZE(bios24);
mode = bios24;
break;
#ifdef NO_32BIT_SUPPORT_YET
case 32:
size = ARRAY_SIZE(bios32);
mode = bios32;
break;
#endif
default:
return 0;
}
for (i = 0; i < size; i++) {
if (xres <= mode[i].x_res) {
xres_s = mode[i].x_res;
for (; i < size; i++) {
if (mode[i].x_res != xres_s)
return mode[i - 1].mode;
if (yres <= mode[i].y_res)
return mode[i].mode;
}
}
}
return mode[size - 1].mode;
}
/*
* neoCalcVCLK --
*
* Determine the closest clock frequency to the one requested.
*/
#define MAX_N 127
#define MAX_D 31
#define MAX_F 1
static void neoCalcVCLK(const struct fb_info *info,
struct neofb_par *par, long freq)
{
int n, d, f;
int n_best = 0, d_best = 0, f_best = 0;
long f_best_diff = 0x7ffff;
for (f = 0; f <= MAX_F; f++)
for (d = 0; d <= MAX_D; d++)
for (n = 0; n <= MAX_N; n++) {
long f_out;
long f_diff;
f_out = ((14318 * (n + 1)) / (d + 1)) >> f;
f_diff = abs(f_out - freq);
if (f_diff <= f_best_diff) {
f_best_diff = f_diff;
n_best = n;
d_best = d;
f_best = f;
}
if (f_out > freq)
break;
}
if (info->fix.accel == FB_ACCEL_NEOMAGIC_NM2200 ||
info->fix.accel == FB_ACCEL_NEOMAGIC_NM2230 ||
info->fix.accel == FB_ACCEL_NEOMAGIC_NM2360 ||
info->fix.accel == FB_ACCEL_NEOMAGIC_NM2380) {
/* NOT_DONE: We are trying the full range of the 2200 clock.
We should be able to try n up to 2047 */
par->VCLK3NumeratorLow = n_best;
par->VCLK3NumeratorHigh = (f_best << 7);
} else
par->VCLK3NumeratorLow = n_best | (f_best << 7);
par->VCLK3Denominator = d_best;
#ifdef NEOFB_DEBUG
printk(KERN_DEBUG "neoVCLK: f:%ld NumLow=%d NumHi=%d Den=%d Df=%ld\n",
freq,
par->VCLK3NumeratorLow,
par->VCLK3NumeratorHigh,
par->VCLK3Denominator, f_best_diff);
#endif
}
/*
* vgaHWInit --
* Handle the initialization, etc. of a screen.
* Return FALSE on failure.
*/
static int vgaHWInit(const struct fb_var_screeninfo *var,
struct neofb_par *par)
{
int hsync_end = var->xres + var->right_margin + var->hsync_len;
int htotal = (hsync_end + var->left_margin) >> 3;
int vsync_start = var->yres + var->lower_margin;
int vsync_end = vsync_start + var->vsync_len;
int vtotal = vsync_end + var->upper_margin;
par->MiscOutReg = 0x23;
if (!(var->sync & FB_SYNC_HOR_HIGH_ACT))
par->MiscOutReg |= 0x40;
if (!(var->sync & FB_SYNC_VERT_HIGH_ACT))
par->MiscOutReg |= 0x80;
/*
* Time Sequencer
*/
par->Sequencer[0] = 0x00;
par->Sequencer[1] = 0x01;
par->Sequencer[2] = 0x0F;
par->Sequencer[3] = 0x00; /* Font select */
par->Sequencer[4] = 0x0E; /* Misc */
/*
* CRTC Controller
*/
par->CRTC[0] = htotal - 5;
par->CRTC[1] = (var->xres >> 3) - 1;
par->CRTC[2] = (var->xres >> 3) - 1;
par->CRTC[3] = ((htotal - 1) & 0x1F) | 0x80;
par->CRTC[4] = ((var->xres + var->right_margin) >> 3);
par->CRTC[5] = (((htotal - 1) & 0x20) << 2)
| (((hsync_end >> 3)) & 0x1F);
par->CRTC[6] = (vtotal - 2) & 0xFF;
par->CRTC[7] = (((vtotal - 2) & 0x100) >> 8)
| (((var->yres - 1) & 0x100) >> 7)
| ((vsync_start & 0x100) >> 6)
| (((var->yres - 1) & 0x100) >> 5)
| 0x10 | (((vtotal - 2) & 0x200) >> 4)
| (((var->yres - 1) & 0x200) >> 3)
| ((vsync_start & 0x200) >> 2);
par->CRTC[8] = 0x00;
par->CRTC[9] = (((var->yres - 1) & 0x200) >> 4) | 0x40;
if (var->vmode & FB_VMODE_DOUBLE)
par->CRTC[9] |= 0x80;
par->CRTC[10] = 0x00;
par->CRTC[11] = 0x00;
par->CRTC[12] = 0x00;
par->CRTC[13] = 0x00;
par->CRTC[14] = 0x00;
par->CRTC[15] = 0x00;
par->CRTC[16] = vsync_start & 0xFF;
par->CRTC[17] = (vsync_end & 0x0F) | 0x20;
par->CRTC[18] = (var->yres - 1) & 0xFF;
par->CRTC[19] = var->xres_virtual >> 4;
par->CRTC[20] = 0x00;
par->CRTC[21] = (var->yres - 1) & 0xFF;
par->CRTC[22] = (vtotal - 1) & 0xFF;
par->CRTC[23] = 0xC3;
par->CRTC[24] = 0xFF;
/*
* are these unnecessary?
* vgaHWHBlankKGA(mode, regp, 0, KGA_FIX_OVERSCAN | KGA_ENABLE_ON_ZERO);
* vgaHWVBlankKGA(mode, regp, 0, KGA_FIX_OVERSCAN | KGA_ENABLE_ON_ZERO);
*/
/*
* Graphics Display Controller
*/
par->Graphics[0] = 0x00;
par->Graphics[1] = 0x00;
par->Graphics[2] = 0x00;
par->Graphics[3] = 0x00;
par->Graphics[4] = 0x00;
par->Graphics[5] = 0x40;
par->Graphics[6] = 0x05; /* only map 64k VGA memory !!!! */
par->Graphics[7] = 0x0F;
par->Graphics[8] = 0xFF;
par->Attribute[0] = 0x00; /* standard colormap translation */
par->Attribute[1] = 0x01;
par->Attribute[2] = 0x02;
par->Attribute[3] = 0x03;
par->Attribute[4] = 0x04;
par->Attribute[5] = 0x05;
par->Attribute[6] = 0x06;
par->Attribute[7] = 0x07;
par->Attribute[8] = 0x08;
par->Attribute[9] = 0x09;
par->Attribute[10] = 0x0A;
par->Attribute[11] = 0x0B;
par->Attribute[12] = 0x0C;
par->Attribute[13] = 0x0D;
par->Attribute[14] = 0x0E;
par->Attribute[15] = 0x0F;
par->Attribute[16] = 0x41;
par->Attribute[17] = 0xFF;
par->Attribute[18] = 0x0F;
par->Attribute[19] = 0x00;
par->Attribute[20] = 0x00;
return 0;
}
static void vgaHWLock(struct vgastate *state)
{
/* Protect CRTC[0-7] */
vga_wcrt(state->vgabase, 0x11, vga_rcrt(state->vgabase, 0x11) | 0x80);
}
static void vgaHWUnlock(void)
{
/* Unprotect CRTC[0-7] */
vga_wcrt(NULL, 0x11, vga_rcrt(NULL, 0x11) & ~0x80);
}
static void neoLock(struct vgastate *state)
{
vga_wgfx(state->vgabase, 0x09, 0x00);
vgaHWLock(state);
}
static void neoUnlock(void)
{
vgaHWUnlock();
vga_wgfx(NULL, 0x09, 0x26);
}
/*
* VGA Palette management
*/
static int paletteEnabled = 0;
static inline void VGAenablePalette(void)
{
vga_r(NULL, VGA_IS1_RC);
vga_w(NULL, VGA_ATT_W, 0x00);
paletteEnabled = 1;
}
static inline void VGAdisablePalette(void)
{
vga_r(NULL, VGA_IS1_RC);
vga_w(NULL, VGA_ATT_W, 0x20);
paletteEnabled = 0;
}
static inline void VGAwATTR(u8 index, u8 value)
{
if (paletteEnabled)
index &= ~0x20;
else
index |= 0x20;
vga_r(NULL, VGA_IS1_RC);
vga_wattr(NULL, index, value);
}
static void vgaHWProtect(int on)
{
unsigned char tmp;
tmp = vga_rseq(NULL, 0x01);
if (on) {
/*
* Turn off screen and disable sequencer.
*/
vga_wseq(NULL, 0x00, 0x01); /* Synchronous Reset */
vga_wseq(NULL, 0x01, tmp | 0x20); /* disable the display */
VGAenablePalette();
} else {
/*
* Reenable sequencer, then turn on screen.
*/
vga_wseq(NULL, 0x01, tmp & ~0x20); /* reenable display */
vga_wseq(NULL, 0x00, 0x03); /* clear synchronousreset */
VGAdisablePalette();
}
}
static void vgaHWRestore(const struct fb_info *info,
const struct neofb_par *par)
{
int i;
vga_w(NULL, VGA_MIS_W, par->MiscOutReg);
for (i = 1; i < 5; i++)
vga_wseq(NULL, i, par->Sequencer[i]);
/* Ensure CRTC registers 0-7 are unlocked by clearing bit 7 or CRTC[17] */
vga_wcrt(NULL, 17, par->CRTC[17] & ~0x80);
for (i = 0; i < 25; i++)
vga_wcrt(NULL, i, par->CRTC[i]);
for (i = 0; i < 9; i++)
vga_wgfx(NULL, i, par->Graphics[i]);
VGAenablePalette();
for (i = 0; i < 21; i++)
VGAwATTR(i, par->Attribute[i]);
VGAdisablePalette();
}
/* -------------------- Hardware specific routines ------------------------- */
/*
* Hardware Acceleration for Neo2200+
*/
static inline int neo2200_sync(struct fb_info *info)
{
struct neofb_par *par = info->par;
while (readl(&par->neo2200->bltStat) & 1)
cpu_relax();
return 0;
}
static inline void neo2200_wait_fifo(struct fb_info *info,
int requested_fifo_space)
{
// ndev->neo.waitfifo_calls++;
// ndev->neo.waitfifo_sum += requested_fifo_space;
/* FIXME: does not work
if (neo_fifo_space < requested_fifo_space)
{
neo_fifo_waitcycles++;
while (1)
{
neo_fifo_space = (neo2200->bltStat >> 8);
if (neo_fifo_space >= requested_fifo_space)
break;
}
}
else
{
neo_fifo_cache_hits++;
}
neo_fifo_space -= requested_fifo_space;
*/
neo2200_sync(info);
}
static inline void neo2200_accel_init(struct fb_info *info,
struct fb_var_screeninfo *var)
{
struct neofb_par *par = info->par;
Neo2200 __iomem *neo2200 = par->neo2200;
u32 bltMod, pitch;
neo2200_sync(info);
switch (var->bits_per_pixel) {
case 8:
bltMod = NEO_MODE1_DEPTH8;
pitch = var->xres_virtual;
break;
case 15:
case 16:
bltMod = NEO_MODE1_DEPTH16;
pitch = var->xres_virtual * 2;
break;
case 24:
bltMod = NEO_MODE1_DEPTH24;
pitch = var->xres_virtual * 3;
break;
default:
printk(KERN_ERR
"neofb: neo2200_accel_init: unexpected bits per pixel!\n");
return;
}
writel(bltMod << 16, &neo2200->bltStat);
writel((pitch << 16) | pitch, &neo2200->pitch);
}
/* --------------------------------------------------------------------- */
static int
neofb_open(struct fb_info *info, int user)
{
struct neofb_par *par = info->par;
if (!par->ref_count) {
memset(&par->state, 0, sizeof(struct vgastate));
par->state.flags = VGA_SAVE_MODE | VGA_SAVE_FONTS;
save_vga(&par->state);
}
par->ref_count++;
return 0;
}
static int
neofb_release(struct fb_info *info, int user)
{
struct neofb_par *par = info->par;
if (!par->ref_count)
return -EINVAL;
if (par->ref_count == 1) {
restore_vga(&par->state);
}
par->ref_count--;
return 0;
}
static int
neofb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
{
struct neofb_par *par = info->par;
int memlen, vramlen;
int mode_ok = 0;
DBG("neofb_check_var");
if (PICOS2KHZ(var->pixclock) > par->maxClock)
return -EINVAL;
/* Is the mode larger than the LCD panel? */
if (par->internal_display &&
((var->xres > par->NeoPanelWidth) ||
(var->yres > par->NeoPanelHeight))) {
printk(KERN_INFO
"Mode (%dx%d) larger than the LCD panel (%dx%d)\n",
var->xres, var->yres, par->NeoPanelWidth,
par->NeoPanelHeight);
return -EINVAL;
}
/* Is the mode one of the acceptable sizes? */
if (!par->internal_display)
mode_ok = 1;
else {
switch (var->xres) {
case 1280:
if (var->yres == 1024)
mode_ok = 1;
break;
case 1024:
if (var->yres == 768)
mode_ok = 1;
break;
case 800:
if (var->yres == (par->libretto ? 480 : 600))
mode_ok = 1;
break;
case 640:
if (var->yres == 480)
mode_ok = 1;
break;
}
}
if (!mode_ok) {
printk(KERN_INFO
"Mode (%dx%d) won't display properly on LCD\n",
var->xres, var->yres);
return -EINVAL;
}
var->red.msb_right = 0;
var->green.msb_right = 0;
var->blue.msb_right = 0;
var->transp.msb_right = 0;
var->transp.offset = 0;
var->transp.length = 0;
switch (var->bits_per_pixel) {
case 8: /* PSEUDOCOLOUR, 256 */
var->red.offset = 0;
var->red.length = 8;
var->green.offset = 0;
var->green.length = 8;
var->blue.offset = 0;
var->blue.length = 8;
break;
case 16: /* DIRECTCOLOUR, 64k */
var->red.offset = 11;
var->red.length = 5;
var->green.offset = 5;
var->green.length = 6;
var->blue.offset = 0;
var->blue.length = 5;
break;
case 24: /* TRUECOLOUR, 16m */
var->red.offset = 16;
var->red.length = 8;
var->green.offset = 8;
var->green.length = 8;
var->blue.offset = 0;
var->blue.length = 8;
break;
#ifdef NO_32BIT_SUPPORT_YET
case 32: /* TRUECOLOUR, 16m */
var->transp.offset = 24;
var->transp.length = 8;
var->red.offset = 16;
var->red.length = 8;
var->green.offset = 8;
var->green.length = 8;
var->blue.offset = 0;
var->blue.length = 8;
break;
#endif
default:
printk(KERN_WARNING "neofb: no support for %dbpp\n",
var->bits_per_pixel);
return -EINVAL;
}
vramlen = info->fix.smem_len;
if (vramlen > 4 * 1024 * 1024)
vramlen = 4 * 1024 * 1024;
if (var->xres_virtual < var->xres)
var->xres_virtual = var->xres;
memlen = var->xres_virtual * var->bits_per_pixel * var->yres_virtual >> 3;
if (memlen > vramlen) {
var->yres_virtual = vramlen * 8 / (var->xres_virtual *
var->bits_per_pixel);
memlen = var->xres_virtual * var->bits_per_pixel *
var->yres_virtual / 8;
}
/* we must round yres/xres down, we already rounded y/xres_virtual up
if it was possible. We should return -EINVAL, but I disagree */
if (var->yres_virtual < var->yres)
var->yres = var->yres_virtual;
if (var->xoffset + var->xres > var->xres_virtual)
var->xoffset = var->xres_virtual - var->xres;
if (var->yoffset + var->yres > var->yres_virtual)
var->yoffset = var->yres_virtual - var->yres;
var->nonstd = 0;
var->height = -1;
var->width = -1;
if (var->bits_per_pixel >= 24 || !par->neo2200)
var->accel_flags &= ~FB_ACCELF_TEXT;
return 0;
}
static int neofb_set_par(struct fb_info *info)
{
struct neofb_par *par = info->par;
unsigned char temp;
int i, clock_hi = 0;
int lcd_stretch;
int hoffset, voffset;
int vsync_start, vtotal;
DBG("neofb_set_par");
neoUnlock();
vgaHWProtect(1); /* Blank the screen */
vsync_start = info->var.yres + info->var.lower_margin;
vtotal = vsync_start + info->var.vsync_len + info->var.upper_margin;
/*
* This will allocate the datastructure and initialize all of the
* generic VGA registers.
*/
if (vgaHWInit(&info->var, par))
return -EINVAL;
/*
* The default value assigned by vgaHW.c is 0x41, but this does
* not work for NeoMagic.
*/
par->Attribute[16] = 0x01;
switch (info->var.bits_per_pixel) {
case 8:
par->CRTC[0x13] = info->var.xres_virtual >> 3;
par->ExtCRTOffset = info->var.xres_virtual >> 11;
par->ExtColorModeSelect = 0x11;
break;
case 16:
par->CRTC[0x13] = info->var.xres_virtual >> 2;
par->ExtCRTOffset = info->var.xres_virtual >> 10;
par->ExtColorModeSelect = 0x13;
break;
case 24:
par->CRTC[0x13] = (info->var.xres_virtual * 3) >> 3;
par->ExtCRTOffset = (info->var.xres_virtual * 3) >> 11;
par->ExtColorModeSelect = 0x14;
break;
#ifdef NO_32BIT_SUPPORT_YET
case 32: /* FIXME: guessed values */
par->CRTC[0x13] = info->var.xres_virtual >> 1;
par->ExtCRTOffset = info->var.xres_virtual >> 9;
par->ExtColorModeSelect = 0x15;
break;
#endif
default:
break;
}
par->ExtCRTDispAddr = 0x10;
/* Vertical Extension */
par->VerticalExt = (((vtotal - 2) & 0x400) >> 10)
| (((info->var.yres - 1) & 0x400) >> 9)
| (((vsync_start) & 0x400) >> 8)
| (((vsync_start) & 0x400) >> 7);
/* Fast write bursts on unless disabled. */
if (par->pci_burst)
par->SysIfaceCntl1 = 0x30;
else
par->SysIfaceCntl1 = 0x00;
par->SysIfaceCntl2 = 0xc0; /* VESA Bios sets this to 0x80! */
/* Initialize: by default, we want display config register to be read */
par->PanelDispCntlRegRead = 1;
/* Enable any user specified display devices. */
par->PanelDispCntlReg1 = 0x00;
if (par->internal_display)
par->PanelDispCntlReg1 |= 0x02;
if (par->external_display)
par->PanelDispCntlReg1 |= 0x01;
/* If the user did not specify any display devices, then... */
if (par->PanelDispCntlReg1 == 0x00) {
/* Default to internal (i.e., LCD) only. */
par->PanelDispCntlReg1 = vga_rgfx(NULL, 0x20) & 0x03;
}
/* If we are using a fixed mode, then tell the chip we are. */
switch (info->var.xres) {
case 1280:
par->PanelDispCntlReg1 |= 0x60;
break;
case 1024:
par->PanelDispCntlReg1 |= 0x40;
break;
case 800:
par->PanelDispCntlReg1 |= 0x20;
break;
case 640:
default:
break;
}
/* Setup shadow register locking. */
switch (par->PanelDispCntlReg1 & 0x03) {
case 0x01: /* External CRT only mode: */
par->GeneralLockReg = 0x00;
/* We need to program the VCLK for external display only mode. */
par->ProgramVCLK = 1;
break;
case 0x02: /* Internal LCD only mode: */
case 0x03: /* Simultaneous internal/external (LCD/CRT) mode: */
par->GeneralLockReg = 0x01;
/* Don't program the VCLK when using the LCD. */
par->ProgramVCLK = 0;
break;
}
/*
* If the screen is to be stretched, turn on stretching for the
* various modes.
*
* OPTION_LCD_STRETCH means stretching should be turned off!
*/
par->PanelDispCntlReg2 = 0x00;
par->PanelDispCntlReg3 = 0x00;
if (par->lcd_stretch && (par->PanelDispCntlReg1 == 0x02) && /* LCD only */
(info->var.xres != par->NeoPanelWidth)) {
switch (info->var.xres) {
case 320: /* Needs testing. KEM -- 24 May 98 */
case 400: /* Needs testing. KEM -- 24 May 98 */
case 640:
case 800:
case 1024:
lcd_stretch = 1;
par->PanelDispCntlReg2 |= 0xC6;
break;
default:
lcd_stretch = 0;
/* No stretching in these modes. */
}
} else
lcd_stretch = 0;
/*
* If the screen is to be centerd, turn on the centering for the
* various modes.
*/
par->PanelVertCenterReg1 = 0x00;
par->PanelVertCenterReg2 = 0x00;
par->PanelVertCenterReg3 = 0x00;
par->PanelVertCenterReg4 = 0x00;
par->PanelVertCenterReg5 = 0x00;
par->PanelHorizCenterReg1 = 0x00;
par->PanelHorizCenterReg2 = 0x00;
par->PanelHorizCenterReg3 = 0x00;
par->PanelHorizCenterReg4 = 0x00;
par->PanelHorizCenterReg5 = 0x00;
if (par->PanelDispCntlReg1 & 0x02) {
if (info->var.xres == par->NeoPanelWidth) {
/*
* No centering required when the requested display width
* equals the panel width.
*/
} else {
par->PanelDispCntlReg2 |= 0x01;
par->PanelDispCntlReg3 |= 0x10;
/* Calculate the horizontal and vertical offsets. */
if (!lcd_stretch) {
hoffset =
((par->NeoPanelWidth -
info->var.xres) >> 4) - 1;
voffset =
((par->NeoPanelHeight -
info->var.yres) >> 1) - 2;
} else {
/* Stretched modes cannot be centered. */
hoffset = 0;
voffset = 0;
}
switch (info->var.xres) {
case 320: /* Needs testing. KEM -- 24 May 98 */
par->PanelHorizCenterReg3 = hoffset;
par->PanelVertCenterReg2 = voffset;
break;
case 400: /* Needs testing. KEM -- 24 May 98 */
par->PanelHorizCenterReg4 = hoffset;
par->PanelVertCenterReg1 = voffset;
break;
case 640:
par->PanelHorizCenterReg1 = hoffset;
par->PanelVertCenterReg3 = voffset;
break;
case 800:
par->PanelHorizCenterReg2 = hoffset;
par->PanelVertCenterReg4 = voffset;
break;
case 1024:
par->PanelHorizCenterReg5 = hoffset;
par->PanelVertCenterReg5 = voffset;
break;
case 1280:
default:
/* No centering in these modes. */
break;
}
}
}
par->biosMode =
neoFindMode(info->var.xres, info->var.yres,
info->var.bits_per_pixel);
/*
* Calculate the VCLK that most closely matches the requested dot
* clock.
*/
neoCalcVCLK(info, par, PICOS2KHZ(info->var.pixclock));
/* Since we program the clocks ourselves, always use VCLK3. */
par->MiscOutReg |= 0x0C;
/* alread unlocked above */
/* BOGUS vga_wgfx(NULL, 0x09, 0x26); */
/* don't know what this is, but it's 0 from bootup anyway */
vga_wgfx(NULL, 0x15, 0x00);
/* was set to 0x01 by my bios in text and vesa modes */
vga_wgfx(NULL, 0x0A, par->GeneralLockReg);
/*
* The color mode needs to be set before calling vgaHWRestore
* to ensure the DAC is initialized properly.
*
* NOTE: Make sure we don't change bits make sure we don't change
* any reserved bits.
*/
temp = vga_rgfx(NULL, 0x90);
switch (info->fix.accel) {
case FB_ACCEL_NEOMAGIC_NM2070:
temp &= 0xF0; /* Save bits 7:4 */
temp |= (par->ExtColorModeSelect & ~0xF0);
break;
case FB_ACCEL_NEOMAGIC_NM2090:
case FB_ACCEL_NEOMAGIC_NM2093:
case FB_ACCEL_NEOMAGIC_NM2097:
case FB_ACCEL_NEOMAGIC_NM2160:
case FB_ACCEL_NEOMAGIC_NM2200:
case FB_ACCEL_NEOMAGIC_NM2230:
case FB_ACCEL_NEOMAGIC_NM2360:
case FB_ACCEL_NEOMAGIC_NM2380:
temp &= 0x70; /* Save bits 6:4 */
temp |= (par->ExtColorModeSelect & ~0x70);
break;
}
vga_wgfx(NULL, 0x90, temp);
/*
* In some rare cases a lockup might occur if we don't delay
* here. (Reported by Miles Lane)
*/
//mdelay(200);
/*
* Disable horizontal and vertical graphics and text expansions so
* that vgaHWRestore works properly.
*/
temp = vga_rgfx(NULL, 0x25);
temp &= 0x39;
vga_wgfx(NULL, 0x25, temp);
/*
* Sleep for 200ms to make sure that the two operations above have
* had time to take effect.
*/
mdelay(200);
/*
* This function handles restoring the generic VGA registers. */
vgaHWRestore(info, par);
/* linear colormap for non palettized modes */
switch (info->var.bits_per_pixel) {
case 8:
/* PseudoColor, 256 */
info->fix.visual = FB_VISUAL_PSEUDOCOLOR;
break;
case 16:
/* TrueColor, 64k */
info->fix.visual = FB_VISUAL_TRUECOLOR;
for (i = 0; i < 64; i++) {
outb(i, 0x3c8);
outb(i << 1, 0x3c9);
outb(i, 0x3c9);
outb(i << 1, 0x3c9);
}
break;
case 24:
#ifdef NO_32BIT_SUPPORT_YET
case 32:
#endif
/* TrueColor, 16m */
info->fix.visual = FB_VISUAL_TRUECOLOR;
for (i = 0; i < 256; i++) {
outb(i, 0x3c8);
outb(i, 0x3c9);
outb(i, 0x3c9);
outb(i, 0x3c9);
}
break;
}
vga_wgfx(NULL, 0x0E, par->ExtCRTDispAddr);
vga_wgfx(NULL, 0x0F, par->ExtCRTOffset);
temp = vga_rgfx(NULL, 0x10);
temp &= 0x0F; /* Save bits 3:0 */
temp |= (par->SysIfaceCntl1 & ~0x0F); /* VESA Bios sets bit 1! */
vga_wgfx(NULL, 0x10, temp);
vga_wgfx(NULL, 0x11, par->SysIfaceCntl2);
vga_wgfx(NULL, 0x15, 0 /*par->SingleAddrPage */ );
vga_wgfx(NULL, 0x16, 0 /*par->DualAddrPage */ );
temp = vga_rgfx(NULL, 0x20);
switch (info->fix.accel) {
case FB_ACCEL_NEOMAGIC_NM2070:
temp &= 0xFC; /* Save bits 7:2 */
temp |= (par->PanelDispCntlReg1 & ~0xFC);
break;
case FB_ACCEL_NEOMAGIC_NM2090:
case FB_ACCEL_NEOMAGIC_NM2093:
case FB_ACCEL_NEOMAGIC_NM2097:
case FB_ACCEL_NEOMAGIC_NM2160:
temp &= 0xDC; /* Save bits 7:6,4:2 */
temp |= (par->PanelDispCntlReg1 & ~0xDC);
break;
case FB_ACCEL_NEOMAGIC_NM2200:
case FB_ACCEL_NEOMAGIC_NM2230:
case FB_ACCEL_NEOMAGIC_NM2360:
case FB_ACCEL_NEOMAGIC_NM2380:
temp &= 0x98; /* Save bits 7,4:3 */
temp |= (par->PanelDispCntlReg1 & ~0x98);
break;
}
vga_wgfx(NULL, 0x20, temp);
temp = vga_rgfx(NULL, 0x25);
temp &= 0x38; /* Save bits 5:3 */
temp |= (par->PanelDispCntlReg2 & ~0x38);
vga_wgfx(NULL, 0x25, temp);
if (info->fix.accel != FB_ACCEL_NEOMAGIC_NM2070) {
temp = vga_rgfx(NULL, 0x30);
temp &= 0xEF; /* Save bits 7:5 and bits 3:0 */
temp |= (par->PanelDispCntlReg3 & ~0xEF);
vga_wgfx(NULL, 0x30, temp);
}
vga_wgfx(NULL, 0x28, par->PanelVertCenterReg1);
vga_wgfx(NULL, 0x29, par->PanelVertCenterReg2);
vga_wgfx(NULL, 0x2a, par->PanelVertCenterReg3);
if (info->fix.accel != FB_ACCEL_NEOMAGIC_NM2070) {
vga_wgfx(NULL, 0x32, par->PanelVertCenterReg4);
vga_wgfx(NULL, 0x33, par->PanelHorizCenterReg1);
vga_wgfx(NULL, 0x34, par->PanelHorizCenterReg2);
vga_wgfx(NULL, 0x35, par->PanelHorizCenterReg3);
}
if (info->fix.accel == FB_ACCEL_NEOMAGIC_NM2160)
vga_wgfx(NULL, 0x36, par->PanelHorizCenterReg4);
if (info->fix.accel == FB_ACCEL_NEOMAGIC_NM2200 ||
info->fix.accel == FB_ACCEL_NEOMAGIC_NM2230 ||
info->fix.accel == FB_ACCEL_NEOMAGIC_NM2360 ||
info->fix.accel == FB_ACCEL_NEOMAGIC_NM2380) {
vga_wgfx(NULL, 0x36, par->PanelHorizCenterReg4);
vga_wgfx(NULL, 0x37, par->PanelVertCenterReg5);
vga_wgfx(NULL, 0x38, par->PanelHorizCenterReg5);
clock_hi = 1;
}
/* Program VCLK3 if needed. */
if (par->ProgramVCLK && ((vga_rgfx(NULL, 0x9B) != par->VCLK3NumeratorLow)
|| (vga_rgfx(NULL, 0x9F) != par->VCLK3Denominator)
|| (clock_hi && ((vga_rgfx(NULL, 0x8F) & ~0x0f)
!= (par->VCLK3NumeratorHigh &
~0x0F))))) {
vga_wgfx(NULL, 0x9B, par->VCLK3NumeratorLow);
if (clock_hi) {
temp = vga_rgfx(NULL, 0x8F);
temp &= 0x0F; /* Save bits 3:0 */
temp |= (par->VCLK3NumeratorHigh & ~0x0F);
vga_wgfx(NULL, 0x8F, temp);
}
vga_wgfx(NULL, 0x9F, par->VCLK3Denominator);
}
if (par->biosMode)
vga_wcrt(NULL, 0x23, par->biosMode);
vga_wgfx(NULL, 0x93, 0xc0); /* Gives 5x faster framebuffer writes !!! */
/* Program vertical extension register */
if (info->fix.accel == FB_ACCEL_NEOMAGIC_NM2200 ||
info->fix.accel == FB_ACCEL_NEOMAGIC_NM2230 ||
info->fix.accel == FB_ACCEL_NEOMAGIC_NM2360 ||
info->fix.accel == FB_ACCEL_NEOMAGIC_NM2380) {
vga_wcrt(NULL, 0x70, par->VerticalExt);
}
vgaHWProtect(0); /* Turn on screen */
/* Calling this also locks offset registers required in update_start */
neoLock(&par->state);
info->fix.line_length =
info->var.xres_virtual * (info->var.bits_per_pixel >> 3);
switch (info->fix.accel) {
case FB_ACCEL_NEOMAGIC_NM2200:
case FB_ACCEL_NEOMAGIC_NM2230:
case FB_ACCEL_NEOMAGIC_NM2360:
case FB_ACCEL_NEOMAGIC_NM2380:
neo2200_accel_init(info, &info->var);
break;
default:
break;
}
return 0;
}
/*
* Pan or Wrap the Display
*/
static int neofb_pan_display(struct fb_var_screeninfo *var,
struct fb_info *info)
{
struct neofb_par *par = info->par;
struct vgastate *state = &par->state;
int oldExtCRTDispAddr;
int Base;
DBG("neofb_update_start");
Base = (var->yoffset * var->xres_virtual + var->xoffset) >> 2;
Base *= (var->bits_per_pixel + 7) / 8;
neoUnlock();
/*
* These are the generic starting address registers.
*/
vga_wcrt(state->vgabase, 0x0C, (Base & 0x00FF00) >> 8);
vga_wcrt(state->vgabase, 0x0D, (Base & 0x00FF));
/*
* Make sure we don't clobber some other bits that might already
* have been set. NOTE: NM2200 has a writable bit 3, but it shouldn't
* be needed.
*/
oldExtCRTDispAddr = vga_rgfx(NULL, 0x0E);
vga_wgfx(state->vgabase, 0x0E, (((Base >> 16) & 0x0f) | (oldExtCRTDispAddr & 0xf0)));
neoLock(state);
return 0;
}
static int neofb_setcolreg(u_int regno, u_int red, u_int green, u_int blue,
u_int transp, struct fb_info *fb)
{
if (regno >= fb->cmap.len || regno > 255)
return -EINVAL;
if (fb->var.bits_per_pixel <= 8) {
outb(regno, 0x3c8);
outb(red >> 10, 0x3c9);
outb(green >> 10, 0x3c9);
outb(blue >> 10, 0x3c9);
} else if (regno < 16) {
switch (fb->var.bits_per_pixel) {
case 16:
((u32 *) fb->pseudo_palette)[regno] =
((red & 0xf800)) | ((green & 0xfc00) >> 5) |
((blue & 0xf800) >> 11);
break;
case 24:
((u32 *) fb->pseudo_palette)[regno] =
((red & 0xff00) << 8) | ((green & 0xff00)) |
((blue & 0xff00) >> 8);
break;
#ifdef NO_32BIT_SUPPORT_YET
case 32:
((u32 *) fb->pseudo_palette)[regno] =
((transp & 0xff00) << 16) | ((red & 0xff00) << 8) |
((green & 0xff00)) | ((blue & 0xff00) >> 8);
break;
#endif
default:
return 1;
}
}
return 0;
}
/*
* (Un)Blank the display.
*/
static int neofb_blank(int blank_mode, struct fb_info *info)
{
/*
* Blank the screen if blank_mode != 0, else unblank.
* Return 0 if blanking succeeded, != 0 if un-/blanking failed due to
* e.g. a video mode which doesn't support it. Implements VESA suspend
* and powerdown modes for monitors, and backlight control on LCDs.
* blank_mode == 0: unblanked (backlight on)
* blank_mode == 1: blank (backlight on)
* blank_mode == 2: suspend vsync (backlight off)
* blank_mode == 3: suspend hsync (backlight off)
* blank_mode == 4: powerdown (backlight off)
*
* wms...Enable VESA DPMS compatible powerdown mode
* run "setterm -powersave powerdown" to take advantage
*/
struct neofb_par *par = info->par;
int seqflags, lcdflags, dpmsflags, reg, tmpdisp;
/*
* Read back the register bits related to display configuration. They might
* have been changed underneath the driver via Fn key stroke.
*/
neoUnlock();
tmpdisp = vga_rgfx(NULL, 0x20) & 0x03;
neoLock(&par->state);
/* In case we blank the screen, we want to store the possibly new
* configuration in the driver. During un-blank, we re-apply this setting,
* since the LCD bit will be cleared in order to switch off the backlight.
*/
if (par->PanelDispCntlRegRead) {
par->PanelDispCntlReg1 = tmpdisp;
}
par->PanelDispCntlRegRead = !blank_mode;
switch (blank_mode) {
case FB_BLANK_POWERDOWN: /* powerdown - both sync lines down */
seqflags = VGA_SR01_SCREEN_OFF; /* Disable sequencer */
lcdflags = 0; /* LCD off */
dpmsflags = NEO_GR01_SUPPRESS_HSYNC |
NEO_GR01_SUPPRESS_VSYNC;
#ifdef CONFIG_TOSHIBA
/* Do we still need this ? */
/* attempt to turn off backlight on toshiba; also turns off external */
{
SMMRegisters regs;
regs.eax = 0xff00; /* HCI_SET */
regs.ebx = 0x0002; /* HCI_BACKLIGHT */
regs.ecx = 0x0000; /* HCI_DISABLE */
tosh_smm(®s);
}
#endif
break;
case FB_BLANK_HSYNC_SUSPEND: /* hsync off */
seqflags = VGA_SR01_SCREEN_OFF; /* Disable sequencer */
lcdflags = 0; /* LCD off */
dpmsflags = NEO_GR01_SUPPRESS_HSYNC;
break;
case FB_BLANK_VSYNC_SUSPEND: /* vsync off */
seqflags = VGA_SR01_SCREEN_OFF; /* Disable sequencer */
lcdflags = 0; /* LCD off */
dpmsflags = NEO_GR01_SUPPRESS_VSYNC;
break;
case FB_BLANK_NORMAL: /* just blank screen (backlight stays on) */
seqflags = VGA_SR01_SCREEN_OFF; /* Disable sequencer */
/*
* During a blank operation with the LID shut, we might store "LCD off"
* by mistake. Due to timing issues, the BIOS may switch the lights
* back on, and we turn it back off once we "unblank".
*
* So here is an attempt to implement ">=" - if we are in the process
* of unblanking, and the LCD bit is unset in the driver but set in the
* register, we must keep it.
*/
lcdflags = ((par->PanelDispCntlReg1 | tmpdisp) & 0x02); /* LCD normal */
dpmsflags = 0x00; /* no hsync/vsync suppression */
break;
case FB_BLANK_UNBLANK: /* unblank */
seqflags = 0; /* Enable sequencer */
lcdflags = ((par->PanelDispCntlReg1 | tmpdisp) & 0x02); /* LCD normal */
dpmsflags = 0x00; /* no hsync/vsync suppression */
#ifdef CONFIG_TOSHIBA
/* Do we still need this ? */
/* attempt to re-enable backlight/external on toshiba */
{
SMMRegisters regs;
regs.eax = 0xff00; /* HCI_SET */
regs.ebx = 0x0002; /* HCI_BACKLIGHT */
regs.ecx = 0x0001; /* HCI_ENABLE */
tosh_smm(®s);
}
#endif
break;
default: /* Anything else we don't understand; return 1 to tell
* fb_blank we didn't aactually do anything */
return 1;
}
neoUnlock();
reg = (vga_rseq(NULL, 0x01) & ~0x20) | seqflags;
vga_wseq(NULL, 0x01, reg);
reg = (vga_rgfx(NULL, 0x20) & ~0x02) | lcdflags;
vga_wgfx(NULL, 0x20, reg);
reg = (vga_rgfx(NULL, 0x01) & ~0xF0) | 0x80 | dpmsflags;
vga_wgfx(NULL, 0x01, reg);
neoLock(&par->state);
return 0;
}
static void
neo2200_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
{
struct neofb_par *par = info->par;
u_long dst, rop;
dst = rect->dx + rect->dy * info->var.xres_virtual;
rop = rect->rop ? 0x060000 : 0x0c0000;
neo2200_wait_fifo(info, 4);
/* set blt control */
writel(NEO_BC3_FIFO_EN |
NEO_BC0_SRC_IS_FG | NEO_BC3_SKIP_MAPPING |
// NEO_BC3_DST_XY_ADDR |
// NEO_BC3_SRC_XY_ADDR |
rop, &par->neo2200->bltCntl);
switch (info->var.bits_per_pixel) {
case 8:
writel(rect->color, &par->neo2200->fgColor);
break;
case 16:
case 24:
writel(((u32 *) (info->pseudo_palette))[rect->color],
&par->neo2200->fgColor);
break;
}
writel(dst * ((info->var.bits_per_pixel + 7) >> 3),
&par->neo2200->dstStart);
writel((rect->height << 16) | (rect->width & 0xffff),
&par->neo2200->xyExt);
}
static void
neo2200_copyarea(struct fb_info *info, const struct fb_copyarea *area)
{
u32 sx = area->sx, sy = area->sy, dx = area->dx, dy = area->dy;
struct neofb_par *par = info->par;
u_long src, dst, bltCntl;
bltCntl = NEO_BC3_FIFO_EN | NEO_BC3_SKIP_MAPPING | 0x0C0000;
if ((dy > sy) || ((dy == sy) && (dx > sx))) {
/* Start with the lower right corner */
sy += (area->height - 1);
dy += (area->height - 1);
sx += (area->width - 1);
dx += (area->width - 1);
bltCntl |= NEO_BC0_X_DEC | NEO_BC0_DST_Y_DEC | NEO_BC0_SRC_Y_DEC;
}
src = sx * (info->var.bits_per_pixel >> 3) + sy*info->fix.line_length;
dst = dx * (info->var.bits_per_pixel >> 3) + dy*info->fix.line_length;
neo2200_wait_fifo(info, 4);
/* set blt control */
writel(bltCntl, &par->neo2200->bltCntl);
writel(src, &par->neo2200->srcStart);
writel(dst, &par->neo2200->dstStart);
writel((area->height << 16) | (area->width & 0xffff),
&par->neo2200->xyExt);
}
static void
neo2200_imageblit(struct fb_info *info, const struct fb_image *image)
{
struct neofb_par *par = info->par;
int s_pitch = (image->width * image->depth + 7) >> 3;
int scan_align = info->pixmap.scan_align - 1;
int buf_align = info->pixmap.buf_align - 1;
int bltCntl_flags, d_pitch, data_len;
// The data is padded for the hardware
d_pitch = (s_pitch + scan_align) & ~scan_align;
data_len = ((d_pitch * image->height) + buf_align) & ~buf_align;
neo2200_sync(info);
if (image->depth == 1) {
if (info->var.bits_per_pixel == 24 && image->width < 16) {
/* FIXME. There is a bug with accelerated color-expanded
* transfers in 24 bit mode if the image being transferred
* is less than 16 bits wide. This is due to insufficient
* padding when writing the image. We need to adjust
* struct fb_pixmap. Not yet done. */
cfb_imageblit(info, image);
return;
}
bltCntl_flags = NEO_BC0_SRC_MONO;
} else if (image->depth == info->var.bits_per_pixel) {
bltCntl_flags = 0;
} else {
/* We don't currently support hardware acceleration if image
* depth is different from display */
cfb_imageblit(info, image);
return;
}
switch (info->var.bits_per_pixel) {
case 8:
writel(image->fg_color, &par->neo2200->fgColor);
writel(image->bg_color, &par->neo2200->bgColor);
break;
case 16:
case 24:
writel(((u32 *) (info->pseudo_palette))[image->fg_color],
&par->neo2200->fgColor);
writel(((u32 *) (info->pseudo_palette))[image->bg_color],
&par->neo2200->bgColor);
break;
}
writel(NEO_BC0_SYS_TO_VID |
NEO_BC3_SKIP_MAPPING | bltCntl_flags |
// NEO_BC3_DST_XY_ADDR |
0x0c0000, &par->neo2200->bltCntl);
writel(0, &par->neo2200->srcStart);
// par->neo2200->dstStart = (image->dy << 16) | (image->dx & 0xffff);
writel(((image->dx & 0xffff) * (info->var.bits_per_pixel >> 3) +
image->dy * info->fix.line_length), &par->neo2200->dstStart);
writel((image->height << 16) | (image->width & 0xffff),
&par->neo2200->xyExt);
memcpy_toio(par->mmio_vbase + 0x100000, image->data, data_len);
}
static void
neofb_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
{
switch (info->fix.accel) {
case FB_ACCEL_NEOMAGIC_NM2200:
case FB_ACCEL_NEOMAGIC_NM2230:
case FB_ACCEL_NEOMAGIC_NM2360:
case FB_ACCEL_NEOMAGIC_NM2380:
neo2200_fillrect(info, rect);
break;
default:
cfb_fillrect(info, rect);
break;
}
}
static void
neofb_copyarea(struct fb_info *info, const struct fb_copyarea *area)
{
switch (info->fix.accel) {
case FB_ACCEL_NEOMAGIC_NM2200:
case FB_ACCEL_NEOMAGIC_NM2230:
case FB_ACCEL_NEOMAGIC_NM2360:
case FB_ACCEL_NEOMAGIC_NM2380:
neo2200_copyarea(info, area);
break;
default:
cfb_copyarea(info, area);
break;
}
}
static void
neofb_imageblit(struct fb_info *info, const struct fb_image *image)
{
switch (info->fix.accel) {
case FB_ACCEL_NEOMAGIC_NM2200:
case FB_ACCEL_NEOMAGIC_NM2230:
case FB_ACCEL_NEOMAGIC_NM2360:
case FB_ACCEL_NEOMAGIC_NM2380:
neo2200_imageblit(info, image);
break;
default:
cfb_imageblit(info, image);
break;
}
}
static int
neofb_sync(struct fb_info *info)
{
switch (info->fix.accel) {
case FB_ACCEL_NEOMAGIC_NM2200:
case FB_ACCEL_NEOMAGIC_NM2230:
case FB_ACCEL_NEOMAGIC_NM2360:
case FB_ACCEL_NEOMAGIC_NM2380:
neo2200_sync(info);
break;
default:
break;
}
return 0;
}
/*
static void
neofb_draw_cursor(struct fb_info *info, u8 *dst, u8 *src, unsigned int width)
{
//memset_io(info->sprite.addr, 0xff, 1);
}
static int
neofb_cursor(struct fb_info *info, struct fb_cursor *cursor)
{
struct neofb_par *par = (struct neofb_par *) info->par;
* Disable cursor *
write_le32(NEOREG_CURSCNTL, ~NEO_CURS_ENABLE, par);
if (cursor->set & FB_CUR_SETPOS) {
u32 x = cursor->image.dx;
u32 y = cursor->image.dy;
info->cursor.image.dx = x;
info->cursor.image.dy = y;
write_le32(NEOREG_CURSX, x, par);
write_le32(NEOREG_CURSY, y, par);
}
if (cursor->set & FB_CUR_SETSIZE) {
info->cursor.image.height = cursor->image.height;
info->cursor.image.width = cursor->image.width;
}
if (cursor->set & FB_CUR_SETHOT)
info->cursor.hot = cursor->hot;
if (cursor->set & FB_CUR_SETCMAP) {
if (cursor->image.depth == 1) {
u32 fg = cursor->image.fg_color;
u32 bg = cursor->image.bg_color;
info->cursor.image.fg_color = fg;
info->cursor.image.bg_color = bg;
fg = ((fg & 0xff0000) >> 16) | ((fg & 0xff) << 16) | (fg & 0xff00);
bg = ((bg & 0xff0000) >> 16) | ((bg & 0xff) << 16) | (bg & 0xff00);
write_le32(NEOREG_CURSFGCOLOR, fg, par);
write_le32(NEOREG_CURSBGCOLOR, bg, par);
}
}
if (cursor->set & FB_CUR_SETSHAPE)
fb_load_cursor_image(info);
if (info->cursor.enable)
write_le32(NEOREG_CURSCNTL, NEO_CURS_ENABLE, par);
return 0;
}
*/
static struct fb_ops neofb_ops = {
.owner = THIS_MODULE,
.fb_open = neofb_open,
.fb_release = neofb_release,
.fb_check_var = neofb_check_var,
.fb_set_par = neofb_set_par,
.fb_setcolreg = neofb_setcolreg,
.fb_pan_display = neofb_pan_display,
.fb_blank = neofb_blank,
.fb_sync = neofb_sync,
.fb_fillrect = neofb_fillrect,
.fb_copyarea = neofb_copyarea,
.fb_imageblit = neofb_imageblit,
};
/* --------------------------------------------------------------------- */
static struct fb_videomode __devinitdata mode800x480 = {
.xres = 800,
.yres = 480,
.pixclock = 25000,
.left_margin = 88,
.right_margin = 40,
.upper_margin = 23,
.lower_margin = 1,
.hsync_len = 128,
.vsync_len = 4,
.sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
.vmode = FB_VMODE_NONINTERLACED
};
static int __devinit neo_map_mmio(struct fb_info *info,
struct pci_dev *dev)
{
struct neofb_par *par = info->par;
DBG("neo_map_mmio");
switch (info->fix.accel) {
case FB_ACCEL_NEOMAGIC_NM2070:
info->fix.mmio_start = pci_resource_start(dev, 0)+
0x100000;
break;
case FB_ACCEL_NEOMAGIC_NM2090:
case FB_ACCEL_NEOMAGIC_NM2093:
info->fix.mmio_start = pci_resource_start(dev, 0)+
0x200000;
break;
case FB_ACCEL_NEOMAGIC_NM2160:
case FB_ACCEL_NEOMAGIC_NM2097:
case FB_ACCEL_NEOMAGIC_NM2200:
case FB_ACCEL_NEOMAGIC_NM2230:
case FB_ACCEL_NEOMAGIC_NM2360:
case FB_ACCEL_NEOMAGIC_NM2380:
info->fix.mmio_start = pci_resource_start(dev, 1);
break;
default:
info->fix.mmio_start = pci_resource_start(dev, 0);
}
info->fix.mmio_len = MMIO_SIZE;
if (!request_mem_region
(info->fix.mmio_start, MMIO_SIZE, "memory mapped I/O")) {
printk("neofb: memory mapped IO in use\n");
return -EBUSY;
}
par->mmio_vbase = ioremap(info->fix.mmio_start, MMIO_SIZE);
if (!par->mmio_vbase) {
printk("neofb: unable to map memory mapped IO\n");
release_mem_region(info->fix.mmio_start,
info->fix.mmio_len);
return -ENOMEM;
} else
printk(KERN_INFO "neofb: mapped io at %p\n",
par->mmio_vbase);
return 0;
}
static void neo_unmap_mmio(struct fb_info *info)
{
struct neofb_par *par = info->par;
DBG("neo_unmap_mmio");
iounmap(par->mmio_vbase);
par->mmio_vbase = NULL;
release_mem_region(info->fix.mmio_start,
info->fix.mmio_len);
}
static int __devinit neo_map_video(struct fb_info *info,
struct pci_dev *dev, int video_len)
{
//unsigned long addr;
DBG("neo_map_video");
info->fix.smem_start = pci_resource_start(dev, 0);
info->fix.smem_len = video_len;
if (!request_mem_region(info->fix.smem_start, info->fix.smem_len,
"frame buffer")) {
printk("neofb: frame buffer in use\n");
return -EBUSY;
}
info->screen_base =
ioremap(info->fix.smem_start, info->fix.smem_len);
if (!info->screen_base) {
printk("neofb: unable to map screen memory\n");
release_mem_region(info->fix.smem_start,
info->fix.smem_len);
return -ENOMEM;
} else
printk(KERN_INFO "neofb: mapped framebuffer at %p\n",
info->screen_base);
#ifdef CONFIG_MTRR
((struct neofb_par *)(info->par))->mtrr =
mtrr_add(info->fix.smem_start, pci_resource_len(dev, 0),
MTRR_TYPE_WRCOMB, 1);
#endif
/* Clear framebuffer, it's all white in memory after boot */
memset_io(info->screen_base, 0, info->fix.smem_len);
/* Allocate Cursor drawing pad.
info->fix.smem_len -= PAGE_SIZE;
addr = info->fix.smem_start + info->fix.smem_len;
write_le32(NEOREG_CURSMEMPOS, ((0x000f & (addr >> 10)) << 8) |
((0x0ff0 & (addr >> 10)) >> 4), par);
addr = (unsigned long) info->screen_base + info->fix.smem_len;
info->sprite.addr = (u8 *) addr; */
return 0;
}
static void neo_unmap_video(struct fb_info *info)
{
DBG("neo_unmap_video");
#ifdef CONFIG_MTRR
{
struct neofb_par *par = info->par;
mtrr_del(par->mtrr, info->fix.smem_start,
info->fix.smem_len);
}
#endif
iounmap(info->screen_base);
info->screen_base = NULL;
release_mem_region(info->fix.smem_start,
info->fix.smem_len);
}
static int __devinit neo_scan_monitor(struct fb_info *info)
{
struct neofb_par *par = info->par;
unsigned char type, display;
int w;
// Eventually we will have i2c support.
info->monspecs.modedb = kmalloc(sizeof(struct fb_videomode), GFP_KERNEL);
if (!info->monspecs.modedb)
return -ENOMEM;
info->monspecs.modedb_len = 1;
/* Determine the panel type */
vga_wgfx(NULL, 0x09, 0x26);
type = vga_rgfx(NULL, 0x21);
display = vga_rgfx(NULL, 0x20);
if (!par->internal_display && !par->external_display) {
par->internal_display = display & 2 || !(display & 3) ? 1 : 0;
par->external_display = display & 1;
printk (KERN_INFO "Autodetected %s display\n",
par->internal_display && par->external_display ? "simultaneous" :
par->internal_display ? "internal" : "external");
}
/* Determine panel width -- used in NeoValidMode. */
w = vga_rgfx(NULL, 0x20);
vga_wgfx(NULL, 0x09, 0x00);
switch ((w & 0x18) >> 3) {
case 0x00:
// 640x480@60
par->NeoPanelWidth = 640;
par->NeoPanelHeight = 480;
memcpy(info->monspecs.modedb, &vesa_modes[3], sizeof(struct fb_videomode));
break;
case 0x01:
par->NeoPanelWidth = 800;
if (par->libretto) {
par->NeoPanelHeight = 480;
memcpy(info->monspecs.modedb, &mode800x480, sizeof(struct fb_videomode));
} else {
// 800x600@60
par->NeoPanelHeight = 600;
memcpy(info->monspecs.modedb, &vesa_modes[8], sizeof(struct fb_videomode));
}
break;
case 0x02:
// 1024x768@60
par->NeoPanelWidth = 1024;
par->NeoPanelHeight = 768;
memcpy(info->monspecs.modedb, &vesa_modes[13], sizeof(struct fb_videomode));
break;
case 0x03:
/* 1280x1024@60 panel support needs to be added */
#ifdef NOT_DONE
par->NeoPanelWidth = 1280;
par->NeoPanelHeight = 1024;
memcpy(info->monspecs.modedb, &vesa_modes[20], sizeof(struct fb_videomode));
break;
#else
printk(KERN_ERR
"neofb: Only 640x480, 800x600/480 and 1024x768 panels are currently supported\n");
return -1;
#endif
default:
// 640x480@60
par->NeoPanelWidth = 640;
par->NeoPanelHeight = 480;
memcpy(info->monspecs.modedb, &vesa_modes[3], sizeof(struct fb_videomode));
break;
}
printk(KERN_INFO "Panel is a %dx%d %s %s display\n",
par->NeoPanelWidth,
par->NeoPanelHeight,
(type & 0x02) ? "color" : "monochrome",
(type & 0x10) ? "TFT" : "dual scan");
return 0;
}
static int __devinit neo_init_hw(struct fb_info *info)
{
struct neofb_par *par = info->par;
int videoRam = 896;
int maxClock = 65000;
int CursorMem = 1024;
int CursorOff = 0x100;
DBG("neo_init_hw");
neoUnlock();
#if 0
printk(KERN_DEBUG "--- Neo extended register dump ---\n");
for (int w = 0; w < 0x85; w++)
printk(KERN_DEBUG "CR %p: %p\n", (void *) w,
(void *) vga_rcrt(NULL, w));
for (int w = 0; w < 0xC7; w++)
printk(KERN_DEBUG "GR %p: %p\n", (void *) w,
(void *) vga_rgfx(NULL, w));
#endif
switch (info->fix.accel) {
case FB_ACCEL_NEOMAGIC_NM2070:
videoRam = 896;
maxClock = 65000;
break;
case FB_ACCEL_NEOMAGIC_NM2090:
case FB_ACCEL_NEOMAGIC_NM2093:
case FB_ACCEL_NEOMAGIC_NM2097:
videoRam = 1152;
maxClock = 80000;
break;
case FB_ACCEL_NEOMAGIC_NM2160:
videoRam = 2048;
maxClock = 90000;
break;
case FB_ACCEL_NEOMAGIC_NM2200:
videoRam = 2560;
maxClock = 110000;
break;
case FB_ACCEL_NEOMAGIC_NM2230:
videoRam = 3008;
maxClock = 110000;
break;
case FB_ACCEL_NEOMAGIC_NM2360:
videoRam = 4096;
maxClock = 110000;
break;
case FB_ACCEL_NEOMAGIC_NM2380:
videoRam = 6144;
maxClock = 110000;
break;
}
switch (info->fix.accel) {
case FB_ACCEL_NEOMAGIC_NM2070:
case FB_ACCEL_NEOMAGIC_NM2090:
case FB_ACCEL_NEOMAGIC_NM2093:
CursorMem = 2048;
CursorOff = 0x100;
break;
case FB_ACCEL_NEOMAGIC_NM2097:
case FB_ACCEL_NEOMAGIC_NM2160:
CursorMem = 1024;
CursorOff = 0x100;
break;
case FB_ACCEL_NEOMAGIC_NM2200:
case FB_ACCEL_NEOMAGIC_NM2230:
case FB_ACCEL_NEOMAGIC_NM2360:
case FB_ACCEL_NEOMAGIC_NM2380:
CursorMem = 1024;
CursorOff = 0x1000;
par->neo2200 = (Neo2200 __iomem *) par->mmio_vbase;
break;
}
/*
info->sprite.size = CursorMem;
info->sprite.scan_align = 1;
info->sprite.buf_align = 1;
info->sprite.flags = FB_PIXMAP_IO;
info->sprite.outbuf = neofb_draw_cursor;
*/
par->maxClock = maxClock;
par->cursorOff = CursorOff;
return videoRam * 1024;
}
static struct fb_info *__devinit neo_alloc_fb_info(struct pci_dev *dev, const struct
pci_device_id *id)
{
struct fb_info *info;
struct neofb_par *par;
info = framebuffer_alloc(sizeof(struct neofb_par), &dev->dev);
if (!info)
return NULL;
par = info->par;
info->fix.accel = id->driver_data;
par->pci_burst = !nopciburst;
par->lcd_stretch = !nostretch;
par->libretto = libretto;
par->internal_display = internal;
par->external_display = external;
info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_YPAN;
switch (info->fix.accel) {
case FB_ACCEL_NEOMAGIC_NM2070:
snprintf(info->fix.id, sizeof(info->fix.id),
"MagicGraph 128");
break;
case FB_ACCEL_NEOMAGIC_NM2090:
snprintf(info->fix.id, sizeof(info->fix.id),
"MagicGraph 128V");
break;
case FB_ACCEL_NEOMAGIC_NM2093:
snprintf(info->fix.id, sizeof(info->fix.id),
"MagicGraph 128ZV");
break;
case FB_ACCEL_NEOMAGIC_NM2097:
snprintf(info->fix.id, sizeof(info->fix.id),
"MagicGraph 128ZV+");
break;
case FB_ACCEL_NEOMAGIC_NM2160:
snprintf(info->fix.id, sizeof(info->fix.id),
"MagicGraph 128XD");
break;
case FB_ACCEL_NEOMAGIC_NM2200:
snprintf(info->fix.id, sizeof(info->fix.id),
"MagicGraph 256AV");
info->flags |= FBINFO_HWACCEL_IMAGEBLIT |
FBINFO_HWACCEL_COPYAREA |
FBINFO_HWACCEL_FILLRECT;
break;
case FB_ACCEL_NEOMAGIC_NM2230:
snprintf(info->fix.id, sizeof(info->fix.id),
"MagicGraph 256AV+");
info->flags |= FBINFO_HWACCEL_IMAGEBLIT |
FBINFO_HWACCEL_COPYAREA |
FBINFO_HWACCEL_FILLRECT;
break;
case FB_ACCEL_NEOMAGIC_NM2360:
snprintf(info->fix.id, sizeof(info->fix.id),
"MagicGraph 256ZX");
info->flags |= FBINFO_HWACCEL_IMAGEBLIT |
FBINFO_HWACCEL_COPYAREA |
FBINFO_HWACCEL_FILLRECT;
break;
case FB_ACCEL_NEOMAGIC_NM2380:
snprintf(info->fix.id, sizeof(info->fix.id),
"MagicGraph 256XL+");
info->flags |= FBINFO_HWACCEL_IMAGEBLIT |
FBINFO_HWACCEL_COPYAREA |
FBINFO_HWACCEL_FILLRECT;
break;
}
info->fix.type = FB_TYPE_PACKED_PIXELS;
info->fix.type_aux = 0;
info->fix.xpanstep = 0;
info->fix.ypanstep = 4;
info->fix.ywrapstep = 0;
info->fix.accel = id->driver_data;
info->fbops = &neofb_ops;
info->pseudo_palette = par->palette;
return info;
}
static void neo_free_fb_info(struct fb_info *info)
{
if (info) {
/*
* Free the colourmap
*/
fb_dealloc_cmap(&info->cmap);
framebuffer_release(info);
}
}
/* --------------------------------------------------------------------- */
static int __devinit neofb_probe(struct pci_dev *dev,
const struct pci_device_id *id)
{
struct fb_info *info;
u_int h_sync, v_sync;
int video_len, err;
DBG("neofb_probe");
err = pci_enable_device(dev);
if (err)
return err;
err = -ENOMEM;
info = neo_alloc_fb_info(dev, id);
if (!info)
return err;
err = neo_map_mmio(info, dev);
if (err)
goto err_map_mmio;
err = neo_scan_monitor(info);
if (err)
goto err_scan_monitor;
video_len = neo_init_hw(info);
if (video_len < 0) {
err = video_len;
goto err_init_hw;
}
err = neo_map_video(info, dev, video_len);
if (err)
goto err_init_hw;
if (!fb_find_mode(&info->var, info, mode_option, NULL, 0,
info->monspecs.modedb, 16)) {
printk(KERN_ERR "neofb: Unable to find usable video mode.\n");
goto err_map_video;
}
/*
* Calculate the hsync and vsync frequencies. Note that
* we split the 1e12 constant up so that we can preserve
* the precision and fit the results into 32-bit registers.
* (1953125000 * 512 = 1e12)
*/
h_sync = 1953125000 / info->var.pixclock;
h_sync =
h_sync * 512 / (info->var.xres + info->var.left_margin +
info->var.right_margin + info->var.hsync_len);
v_sync =
h_sync / (info->var.yres + info->var.upper_margin +
info->var.lower_margin + info->var.vsync_len);
printk(KERN_INFO "neofb v" NEOFB_VERSION
": %dkB VRAM, using %dx%d, %d.%03dkHz, %dHz\n",
info->fix.smem_len >> 10, info->var.xres,
info->var.yres, h_sync / 1000, h_sync % 1000, v_sync);
if (fb_alloc_cmap(&info->cmap, 256, 0) < 0)
goto err_map_video;
err = register_framebuffer(info);
if (err < 0)
goto err_reg_fb;
printk(KERN_INFO "fb%d: %s frame buffer device\n",
info->node, info->fix.id);
/*
* Our driver data
*/
pci_set_drvdata(dev, info);
return 0;
err_reg_fb:
fb_dealloc_cmap(&info->cmap);
err_map_video:
neo_unmap_video(info);
err_init_hw:
fb_destroy_modedb(info->monspecs.modedb);
err_scan_monitor:
neo_unmap_mmio(info);
err_map_mmio:
neo_free_fb_info(info);
return err;
}
static void __devexit neofb_remove(struct pci_dev *dev)
{
struct fb_info *info = pci_get_drvdata(dev);
DBG("neofb_remove");
if (info) {
/*
* If unregister_framebuffer fails, then
* we will be leaving hooks that could cause
* oopsen laying around.
*/
if (unregister_framebuffer(info))
printk(KERN_WARNING
"neofb: danger danger! Oopsen imminent!\n");
neo_unmap_video(info);
fb_destroy_modedb(info->monspecs.modedb);
neo_unmap_mmio(info);
neo_free_fb_info(info);
/*
* Ensure that the driver data is no longer
* valid.
*/
pci_set_drvdata(dev, NULL);
}
}
static struct pci_device_id neofb_devices[] = {
{PCI_VENDOR_ID_NEOMAGIC, PCI_CHIP_NM2070,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, FB_ACCEL_NEOMAGIC_NM2070},
{PCI_VENDOR_ID_NEOMAGIC, PCI_CHIP_NM2090,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, FB_ACCEL_NEOMAGIC_NM2090},
{PCI_VENDOR_ID_NEOMAGIC, PCI_CHIP_NM2093,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, FB_ACCEL_NEOMAGIC_NM2093},
{PCI_VENDOR_ID_NEOMAGIC, PCI_CHIP_NM2097,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, FB_ACCEL_NEOMAGIC_NM2097},
{PCI_VENDOR_ID_NEOMAGIC, PCI_CHIP_NM2160,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, FB_ACCEL_NEOMAGIC_NM2160},
{PCI_VENDOR_ID_NEOMAGIC, PCI_CHIP_NM2200,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, FB_ACCEL_NEOMAGIC_NM2200},
{PCI_VENDOR_ID_NEOMAGIC, PCI_CHIP_NM2230,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, FB_ACCEL_NEOMAGIC_NM2230},
{PCI_VENDOR_ID_NEOMAGIC, PCI_CHIP_NM2360,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, FB_ACCEL_NEOMAGIC_NM2360},
{PCI_VENDOR_ID_NEOMAGIC, PCI_CHIP_NM2380,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, FB_ACCEL_NEOMAGIC_NM2380},
{0, 0, 0, 0, 0, 0, 0}
};
MODULE_DEVICE_TABLE(pci, neofb_devices);
static struct pci_driver neofb_driver = {
.name = "neofb",
.id_table = neofb_devices,
.probe = neofb_probe,
.remove = __devexit_p(neofb_remove)
};
/* ************************* init in-kernel code ************************** */
#ifndef MODULE
static int __init neofb_setup(char *options)
{
char *this_opt;
DBG("neofb_setup");
if (!options || !*options)
return 0;
while ((this_opt = strsep(&options, ",")) != NULL) {
if (!*this_opt)
continue;
if (!strncmp(this_opt, "internal", 8))
internal = 1;
else if (!strncmp(this_opt, "external", 8))
external = 1;
else if (!strncmp(this_opt, "nostretch", 9))
nostretch = 1;
else if (!strncmp(this_opt, "nopciburst", 10))
nopciburst = 1;
else if (!strncmp(this_opt, "libretto", 8))
libretto = 1;
else
mode_option = this_opt;
}
return 0;
}
#endif /* MODULE */
static int __init neofb_init(void)
{
#ifndef MODULE
char *option = NULL;
if (fb_get_options("neofb", &option))
return -ENODEV;
neofb_setup(option);
#endif
return pci_register_driver(&neofb_driver);
}
module_init(neofb_init);
#ifdef MODULE
static void __exit neofb_exit(void)
{
pci_unregister_driver(&neofb_driver);
}
module_exit(neofb_exit);
#endif /* MODULE */
| gpl-2.0 |
houst0nn/android_kernel_lge_geeb | drivers/pci/rom.c | 4724 | 7450 | /*
* drivers/pci/rom.c
*
* (C) Copyright 2004 Jon Smirl <jonsmirl@yahoo.com>
* (C) Copyright 2004 Silicon Graphics, Inc. Jesse Barnes <jbarnes@sgi.com>
*
* PCI ROM access routines
*/
#include <linux/kernel.h>
#include <linux/export.h>
#include <linux/pci.h>
#include <linux/slab.h>
#include "pci.h"
/**
* pci_enable_rom - enable ROM decoding for a PCI device
* @pdev: PCI device to enable
*
* Enable ROM decoding on @dev. This involves simply turning on the last
* bit of the PCI ROM BAR. Note that some cards may share address decoders
* between the ROM and other resources, so enabling it may disable access
* to MMIO registers or other card memory.
*/
int pci_enable_rom(struct pci_dev *pdev)
{
struct resource *res = pdev->resource + PCI_ROM_RESOURCE;
struct pci_bus_region region;
u32 rom_addr;
if (!res->flags)
return -1;
pcibios_resource_to_bus(pdev, ®ion, res);
pci_read_config_dword(pdev, pdev->rom_base_reg, &rom_addr);
rom_addr &= ~PCI_ROM_ADDRESS_MASK;
rom_addr |= region.start | PCI_ROM_ADDRESS_ENABLE;
pci_write_config_dword(pdev, pdev->rom_base_reg, rom_addr);
return 0;
}
/**
* pci_disable_rom - disable ROM decoding for a PCI device
* @pdev: PCI device to disable
*
* Disable ROM decoding on a PCI device by turning off the last bit in the
* ROM BAR.
*/
void pci_disable_rom(struct pci_dev *pdev)
{
u32 rom_addr;
pci_read_config_dword(pdev, pdev->rom_base_reg, &rom_addr);
rom_addr &= ~PCI_ROM_ADDRESS_ENABLE;
pci_write_config_dword(pdev, pdev->rom_base_reg, rom_addr);
}
/**
* pci_get_rom_size - obtain the actual size of the ROM image
* @pdev: target PCI device
* @rom: kernel virtual pointer to image of ROM
* @size: size of PCI window
* return: size of actual ROM image
*
* Determine the actual length of the ROM image.
* The PCI window size could be much larger than the
* actual image size.
*/
size_t pci_get_rom_size(struct pci_dev *pdev, void __iomem *rom, size_t size)
{
void __iomem *image;
int last_image;
image = rom;
do {
void __iomem *pds;
/* Standard PCI ROMs start out with these bytes 55 AA */
if (readb(image) != 0x55) {
dev_err(&pdev->dev, "Invalid ROM contents\n");
break;
}
if (readb(image + 1) != 0xAA)
break;
/* get the PCI data structure and check its signature */
pds = image + readw(image + 24);
if (readb(pds) != 'P')
break;
if (readb(pds + 1) != 'C')
break;
if (readb(pds + 2) != 'I')
break;
if (readb(pds + 3) != 'R')
break;
last_image = readb(pds + 21) & 0x80;
/* this length is reliable */
image += readw(pds + 16) * 512;
} while (!last_image);
/* never return a size larger than the PCI resource window */
/* there are known ROMs that get the size wrong */
return min((size_t)(image - rom), size);
}
/**
* pci_map_rom - map a PCI ROM to kernel space
* @pdev: pointer to pci device struct
* @size: pointer to receive size of pci window over ROM
*
* Return: kernel virtual pointer to image of ROM
*
* Map a PCI ROM into kernel space. If ROM is boot video ROM,
* the shadow BIOS copy will be returned instead of the
* actual ROM.
*/
void __iomem *pci_map_rom(struct pci_dev *pdev, size_t *size)
{
struct resource *res = &pdev->resource[PCI_ROM_RESOURCE];
loff_t start;
void __iomem *rom;
/*
* IORESOURCE_ROM_SHADOW set on x86, x86_64 and IA64 supports legacy
* memory map if the VGA enable bit of the Bridge Control register is
* set for embedded VGA.
*/
if (res->flags & IORESOURCE_ROM_SHADOW) {
/* primary video rom always starts here */
start = (loff_t)0xC0000;
*size = 0x20000; /* cover C000:0 through E000:0 */
} else {
if (res->flags &
(IORESOURCE_ROM_COPY | IORESOURCE_ROM_BIOS_COPY)) {
*size = pci_resource_len(pdev, PCI_ROM_RESOURCE);
return (void __iomem *)(unsigned long)
pci_resource_start(pdev, PCI_ROM_RESOURCE);
} else {
/* assign the ROM an address if it doesn't have one */
if (res->parent == NULL &&
pci_assign_resource(pdev,PCI_ROM_RESOURCE))
return NULL;
start = pci_resource_start(pdev, PCI_ROM_RESOURCE);
*size = pci_resource_len(pdev, PCI_ROM_RESOURCE);
if (*size == 0)
return NULL;
/* Enable ROM space decodes */
if (pci_enable_rom(pdev))
return NULL;
}
}
rom = ioremap(start, *size);
if (!rom) {
/* restore enable if ioremap fails */
if (!(res->flags & (IORESOURCE_ROM_ENABLE |
IORESOURCE_ROM_SHADOW |
IORESOURCE_ROM_COPY)))
pci_disable_rom(pdev);
return NULL;
}
/*
* Try to find the true size of the ROM since sometimes the PCI window
* size is much larger than the actual size of the ROM.
* True size is important if the ROM is going to be copied.
*/
*size = pci_get_rom_size(pdev, rom, *size);
return rom;
}
#if 0
/**
* pci_map_rom_copy - map a PCI ROM to kernel space, create a copy
* @pdev: pointer to pci device struct
* @size: pointer to receive size of pci window over ROM
*
* Return: kernel virtual pointer to image of ROM
*
* Map a PCI ROM into kernel space. If ROM is boot video ROM,
* the shadow BIOS copy will be returned instead of the
* actual ROM.
*/
void __iomem *pci_map_rom_copy(struct pci_dev *pdev, size_t *size)
{
struct resource *res = &pdev->resource[PCI_ROM_RESOURCE];
void __iomem *rom;
rom = pci_map_rom(pdev, size);
if (!rom)
return NULL;
if (res->flags & (IORESOURCE_ROM_COPY | IORESOURCE_ROM_SHADOW |
IORESOURCE_ROM_BIOS_COPY))
return rom;
res->start = (unsigned long)kmalloc(*size, GFP_KERNEL);
if (!res->start)
return rom;
res->end = res->start + *size;
memcpy_fromio((void*)(unsigned long)res->start, rom, *size);
pci_unmap_rom(pdev, rom);
res->flags |= IORESOURCE_ROM_COPY;
return (void __iomem *)(unsigned long)res->start;
}
#endif /* 0 */
/**
* pci_unmap_rom - unmap the ROM from kernel space
* @pdev: pointer to pci device struct
* @rom: virtual address of the previous mapping
*
* Remove a mapping of a previously mapped ROM
*/
void pci_unmap_rom(struct pci_dev *pdev, void __iomem *rom)
{
struct resource *res = &pdev->resource[PCI_ROM_RESOURCE];
if (res->flags & (IORESOURCE_ROM_COPY | IORESOURCE_ROM_BIOS_COPY))
return;
iounmap(rom);
/* Disable again before continuing, leave enabled if pci=rom */
if (!(res->flags & (IORESOURCE_ROM_ENABLE | IORESOURCE_ROM_SHADOW)))
pci_disable_rom(pdev);
}
#if 0
/**
* pci_remove_rom - disable the ROM and remove its sysfs attribute
* @pdev: pointer to pci device struct
*
* Remove the rom file in sysfs and disable ROM decoding.
*/
void pci_remove_rom(struct pci_dev *pdev)
{
struct resource *res = &pdev->resource[PCI_ROM_RESOURCE];
if (pci_resource_len(pdev, PCI_ROM_RESOURCE))
sysfs_remove_bin_file(&pdev->dev.kobj, pdev->rom_attr);
if (!(res->flags & (IORESOURCE_ROM_ENABLE |
IORESOURCE_ROM_SHADOW |
IORESOURCE_ROM_BIOS_COPY |
IORESOURCE_ROM_COPY)))
pci_disable_rom(pdev);
}
#endif /* 0 */
/**
* pci_cleanup_rom - free the ROM copy created by pci_map_rom_copy
* @pdev: pointer to pci device struct
*
* Free the copied ROM if we allocated one.
*/
void pci_cleanup_rom(struct pci_dev *pdev)
{
struct resource *res = &pdev->resource[PCI_ROM_RESOURCE];
if (res->flags & IORESOURCE_ROM_COPY) {
kfree((void*)(unsigned long)res->start);
res->flags &= ~IORESOURCE_ROM_COPY;
res->start = 0;
res->end = 0;
}
}
EXPORT_SYMBOL(pci_map_rom);
EXPORT_SYMBOL(pci_unmap_rom);
EXPORT_SYMBOL_GPL(pci_enable_rom);
EXPORT_SYMBOL_GPL(pci_disable_rom);
| gpl-2.0 |
davidmueller13/Galaxy_Note_3 | drivers/watchdog/ib700wdt.c | 4980 | 8631 | /*
* IB700 Single Board Computer WDT driver
*
* (c) Copyright 2001 Charles Howes <chowes@vsol.net>
*
* Based on advantechwdt.c which is based on acquirewdt.c which
* is based on wdt.c.
*
* (c) Copyright 2000-2001 Marek Michalkiewicz <marekm@linux.org.pl>
*
* Based on acquirewdt.c which is based on wdt.c.
* Original copyright messages:
*
* (c) Copyright 1996 Alan Cox <alan@lxorguk.ukuu.org.uk>,
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
* Neither Alan Cox nor CymruNet Ltd. admit liability nor provide
* warranty for any of this software. This material is provided
* "AS-IS" and at no charge.
*
* (c) Copyright 1995 Alan Cox <alan@lxorguk.ukuu.org.uk>
*
* 14-Dec-2001 Matt Domsch <Matt_Domsch@dell.com>
* Added nowayout module option to override CONFIG_WATCHDOG_NOWAYOUT
* Added timeout module option to override default
*
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/types.h>
#include <linux/miscdevice.h>
#include <linux/watchdog.h>
#include <linux/ioport.h>
#include <linux/fs.h>
#include <linux/init.h>
#include <linux/spinlock.h>
#include <linux/moduleparam.h>
#include <linux/platform_device.h>
#include <linux/io.h>
#include <linux/uaccess.h>
static struct platform_device *ibwdt_platform_device;
static unsigned long ibwdt_is_open;
static DEFINE_SPINLOCK(ibwdt_lock);
static char expect_close;
/* Module information */
#define DRV_NAME "ib700wdt"
/*
*
* Watchdog Timer Configuration
*
* The function of the watchdog timer is to reset the system
* automatically and is defined at I/O port 0443H. To enable the
* watchdog timer and allow the system to reset, write I/O port 0443H.
* To disable the timer, write I/O port 0441H for the system to stop the
* watchdog function. The timer has a tolerance of 20% for its
* intervals.
*
* The following describes how the timer should be programmed.
*
* Enabling Watchdog:
* MOV AX,000FH (Choose the values from 0 to F)
* MOV DX,0443H
* OUT DX,AX
*
* Disabling Watchdog:
* MOV AX,000FH (Any value is fine.)
* MOV DX,0441H
* OUT DX,AX
*
* Watchdog timer control table:
* Level Value Time/sec | Level Value Time/sec
* 1 F 0 | 9 7 16
* 2 E 2 | 10 6 18
* 3 D 4 | 11 5 20
* 4 C 6 | 12 4 22
* 5 B 8 | 13 3 24
* 6 A 10 | 14 2 26
* 7 9 12 | 15 1 28
* 8 8 14 | 16 0 30
*
*/
#define WDT_STOP 0x441
#define WDT_START 0x443
/* Default timeout */
#define WATCHDOG_TIMEOUT 30 /* 30 seconds +/- 20% */
static int timeout = WATCHDOG_TIMEOUT; /* in seconds */
module_param(timeout, int, 0);
MODULE_PARM_DESC(timeout,
"Watchdog timeout in seconds. 0<= timeout <=30, default="
__MODULE_STRING(WATCHDOG_TIMEOUT) ".");
static bool nowayout = WATCHDOG_NOWAYOUT;
module_param(nowayout, bool, 0);
MODULE_PARM_DESC(nowayout,
"Watchdog cannot be stopped once started (default="
__MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
/*
* Watchdog Operations
*/
static void ibwdt_ping(void)
{
int wd_margin = 15 - ((timeout + 1) / 2);
spin_lock(&ibwdt_lock);
/* Write a watchdog value */
outb_p(wd_margin, WDT_START);
spin_unlock(&ibwdt_lock);
}
static void ibwdt_disable(void)
{
spin_lock(&ibwdt_lock);
outb_p(0, WDT_STOP);
spin_unlock(&ibwdt_lock);
}
static int ibwdt_set_heartbeat(int t)
{
if (t < 0 || t > 30)
return -EINVAL;
timeout = t;
return 0;
}
/*
* /dev/watchdog handling
*/
static ssize_t ibwdt_write(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
if (count) {
if (!nowayout) {
size_t i;
/* In case it was set long ago */
expect_close = 0;
for (i = 0; i != count; i++) {
char c;
if (get_user(c, buf + i))
return -EFAULT;
if (c == 'V')
expect_close = 42;
}
}
ibwdt_ping();
}
return count;
}
static long ibwdt_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
int new_margin;
void __user *argp = (void __user *)arg;
int __user *p = argp;
static const struct watchdog_info ident = {
.options = WDIOF_KEEPALIVEPING | WDIOF_SETTIMEOUT
| WDIOF_MAGICCLOSE,
.firmware_version = 1,
.identity = "IB700 WDT",
};
switch (cmd) {
case WDIOC_GETSUPPORT:
if (copy_to_user(argp, &ident, sizeof(ident)))
return -EFAULT;
break;
case WDIOC_GETSTATUS:
case WDIOC_GETBOOTSTATUS:
return put_user(0, p);
case WDIOC_SETOPTIONS:
{
int options, retval = -EINVAL;
if (get_user(options, p))
return -EFAULT;
if (options & WDIOS_DISABLECARD) {
ibwdt_disable();
retval = 0;
}
if (options & WDIOS_ENABLECARD) {
ibwdt_ping();
retval = 0;
}
return retval;
}
case WDIOC_KEEPALIVE:
ibwdt_ping();
break;
case WDIOC_SETTIMEOUT:
if (get_user(new_margin, p))
return -EFAULT;
if (ibwdt_set_heartbeat(new_margin))
return -EINVAL;
ibwdt_ping();
/* Fall */
case WDIOC_GETTIMEOUT:
return put_user(timeout, p);
default:
return -ENOTTY;
}
return 0;
}
static int ibwdt_open(struct inode *inode, struct file *file)
{
if (test_and_set_bit(0, &ibwdt_is_open))
return -EBUSY;
if (nowayout)
__module_get(THIS_MODULE);
/* Activate */
ibwdt_ping();
return nonseekable_open(inode, file);
}
static int ibwdt_close(struct inode *inode, struct file *file)
{
if (expect_close == 42) {
ibwdt_disable();
} else {
pr_crit("WDT device closed unexpectedly. WDT will not stop!\n");
ibwdt_ping();
}
clear_bit(0, &ibwdt_is_open);
expect_close = 0;
return 0;
}
/*
* Kernel Interfaces
*/
static const struct file_operations ibwdt_fops = {
.owner = THIS_MODULE,
.llseek = no_llseek,
.write = ibwdt_write,
.unlocked_ioctl = ibwdt_ioctl,
.open = ibwdt_open,
.release = ibwdt_close,
};
static struct miscdevice ibwdt_miscdev = {
.minor = WATCHDOG_MINOR,
.name = "watchdog",
.fops = &ibwdt_fops,
};
/*
* Init & exit routines
*/
static int __devinit ibwdt_probe(struct platform_device *dev)
{
int res;
#if WDT_START != WDT_STOP
if (!request_region(WDT_STOP, 1, "IB700 WDT")) {
pr_err("STOP method I/O %X is not available\n", WDT_STOP);
res = -EIO;
goto out_nostopreg;
}
#endif
if (!request_region(WDT_START, 1, "IB700 WDT")) {
pr_err("START method I/O %X is not available\n", WDT_START);
res = -EIO;
goto out_nostartreg;
}
/* Check that the heartbeat value is within it's range ;
* if not reset to the default */
if (ibwdt_set_heartbeat(timeout)) {
ibwdt_set_heartbeat(WATCHDOG_TIMEOUT);
pr_info("timeout value must be 0<=x<=30, using %d\n", timeout);
}
res = misc_register(&ibwdt_miscdev);
if (res) {
pr_err("failed to register misc device\n");
goto out_nomisc;
}
return 0;
out_nomisc:
release_region(WDT_START, 1);
out_nostartreg:
#if WDT_START != WDT_STOP
release_region(WDT_STOP, 1);
#endif
out_nostopreg:
return res;
}
static int __devexit ibwdt_remove(struct platform_device *dev)
{
misc_deregister(&ibwdt_miscdev);
release_region(WDT_START, 1);
#if WDT_START != WDT_STOP
release_region(WDT_STOP, 1);
#endif
return 0;
}
static void ibwdt_shutdown(struct platform_device *dev)
{
/* Turn the WDT off if we have a soft shutdown */
ibwdt_disable();
}
static struct platform_driver ibwdt_driver = {
.probe = ibwdt_probe,
.remove = __devexit_p(ibwdt_remove),
.shutdown = ibwdt_shutdown,
.driver = {
.owner = THIS_MODULE,
.name = DRV_NAME,
},
};
static int __init ibwdt_init(void)
{
int err;
pr_info("WDT driver for IB700 single board computer initialising\n");
err = platform_driver_register(&ibwdt_driver);
if (err)
return err;
ibwdt_platform_device = platform_device_register_simple(DRV_NAME,
-1, NULL, 0);
if (IS_ERR(ibwdt_platform_device)) {
err = PTR_ERR(ibwdt_platform_device);
goto unreg_platform_driver;
}
return 0;
unreg_platform_driver:
platform_driver_unregister(&ibwdt_driver);
return err;
}
static void __exit ibwdt_exit(void)
{
platform_device_unregister(ibwdt_platform_device);
platform_driver_unregister(&ibwdt_driver);
pr_info("Watchdog Module Unloaded\n");
}
module_init(ibwdt_init);
module_exit(ibwdt_exit);
MODULE_AUTHOR("Charles Howes <chowes@vsol.net>");
MODULE_DESCRIPTION("IB700 SBC watchdog driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
/* end of ib700wdt.c */
| gpl-2.0 |
mathieudevos/linux_kernel_3.2.48 | drivers/staging/vt6656/control.c | 8564 | 2895 | /*
* Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
* All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*
*
* File: control.c
*
* Purpose: Handle USB control endpoint
*
* Author: Jerry Chen
*
* Date: Apr. 5, 2004
*
* Functions:
* CONTROLnsRequestOut - Write variable length bytes to MEM/BB/MAC/EEPROM
* CONTROLnsRequestIn - Read variable length bytes from MEM/BB/MAC/EEPROM
* ControlvWriteByte - Write one byte to MEM/BB/MAC/EEPROM
* ControlvReadByte - Read one byte from MEM/BB/MAC/EEPROM
* ControlvMaskByte - Read one byte from MEM/BB/MAC/EEPROM and clear/set
* some bits in the same address
*
* Revision History:
* 04-05-2004 Jerry Chen: Initial release
* 11-24-2004 Warren Hsu: Add ControlvWriteByte, ControlvReadByte,
* ControlvMaskByte
*
*/
#include "control.h"
#include "rndis.h"
/*--------------------- Static Definitions -------------------------*/
/* static int msglevel =MSG_LEVEL_INFO; */
/* static int msglevel =MSG_LEVEL_DEBUG; */
/*--------------------- Static Classes ----------------------------*/
/*--------------------- Static Variables --------------------------*/
/*--------------------- Static Functions --------------------------*/
/*--------------------- Export Variables --------------------------*/
/*--------------------- Export Functions --------------------------*/
void ControlvWriteByte(PSDevice pDevice, BYTE byRegType, BYTE byRegOfs,
BYTE byData)
{
BYTE byData1;
byData1 = byData;
CONTROLnsRequestOut(pDevice,
MESSAGE_TYPE_WRITE,
byRegOfs,
byRegType,
1,
&byData1);
}
void ControlvReadByte(PSDevice pDevice, BYTE byRegType, BYTE byRegOfs,
PBYTE pbyData)
{
int ntStatus;
BYTE byData1;
ntStatus = CONTROLnsRequestIn(pDevice,
MESSAGE_TYPE_READ,
byRegOfs,
byRegType,
1,
&byData1);
*pbyData = byData1;
}
void ControlvMaskByte(PSDevice pDevice, BYTE byRegType, BYTE byRegOfs,
BYTE byMask, BYTE byData)
{
BYTE pbyData[2];
pbyData[0] = byData;
pbyData[1] = byMask;
CONTROLnsRequestOut(pDevice,
MESSAGE_TYPE_WRITE_MASK,
byRegOfs,
byRegType,
2,
pbyData);
}
| gpl-2.0 |
friedrich420/N910G-AEL-Kernel-Lollipop-Sources | sound/soc/codecs/audience/es705-spi.c | 117 | 5521 | /*
* es705-spi.c -- Audience eS705 SPI interface
*
* Copyright 2011 Audience, Inc.
*
* Author: Hemal Meghpara <hmeghpara@audience.com>
*
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/init.h>
#include <linux/err.h>
#include <linux/errno.h>
#include <linux/module.h>
#include <linux/device.h>
#include <linux/interrupt.h>
#include <linux/firmware.h>
#include <linux/spi/spi.h>
#include <linux/delay.h>
#include "es705.h"
#include "es705-spi.h"
static int es705_spi_read(struct es705_priv *es705, void *buf, int len)
{
struct spi_device *spi = es705->spi_client;
int rc;
rc = spi_read(spi, buf, len);
if (rc < 0) {
dev_err(&spi->dev, "%s(): error %d reading SR\n",
__func__, rc);
return rc;
}
return rc;
}
static int es705_spi_write(struct es705_priv *es705, const void *buf, int len)
{
struct spi_device *spi = es705->spi_client;
int rc;
rc = spi_write(spi, buf, len);
if (rc != 0)
dev_err(&spi->dev, "%s(): error %d writing SR\n",
__func__, rc);
return rc;
}
static int es705_spi_write_then_read(struct es705_priv *es705,
const void *buf, int len,
u32 *rspn, int match)
{
int rc;
rc = es705_spi_write(es705, buf, len);
if (!rc)
rc = es705_spi_read(es705, rspn, match);
return rc;
}
static int es705_spi_cmd(struct es705_priv *es705, u32 cmd, int sr, u32 *resp)
{
int err;
u32 rv;
int read_retries = 15;
dev_dbg(es705->dev, "%s(): cmd=0x%08x sr=%i\n", __func__, cmd, sr);
cmd = cpu_to_be32(cmd);
err = es705_spi_write(es705, &cmd, sizeof(cmd));
if (err || sr)
return err;
usleep_range(20000, 20500);
do {
err = es705_spi_read(es705, &rv, sizeof(rv));
dev_dbg(es705->dev, "%s(): spi read err = %d, rv = 0x%08x\n",
__func__, err, be32_to_cpu(rv));
if (err < 0)
break;
usleep_range(1000, 1050);
} while (rv == 0 && read_retries--);
if (rv == 0 && read_retries < 0)
err = -ETIMEDOUT;
if (!err)
*resp = be32_to_cpu(rv);
dev_dbg(es705->dev, "%s(): resp=0x%08x\n", __func__, *resp);
return err;
}
#define ES705_SPI_BOOT_MAX_RETRY 15
static int es705_spi_boot_setup(struct es705_priv *es705)
{
u32 boot_cmd = ES705_BOOT_CMD;
u32 sync_cmd = (ES705_SYNC_CMD << 16) | ES705_SYNC_POLLING;
u32 sbl_rspn = ES705_SBL_ACK;
u32 ack_rspn = ES705_BOOT_ACK;
int match = 1;
int rc;
dev_dbg(es705->dev, "%s(): prepare for fw download\n", __func__);
rc = es705_spi_write_then_read(es705, &sync_cmd, sizeof(sync_cmd),
&sbl_rspn, match);
if (rc) {
dev_err(es705->dev, "%s(): SYNC_SBL fail\n", __func__);
goto es705_spi_boot_setup_failed;
}
es705->mode = SBL;
rc = es705_spi_write_then_read(es705, &boot_cmd, sizeof(boot_cmd),
&ack_rspn, match);
if (rc)
dev_err(es705->dev, "%s(): BOOT_CMD fail\n", __func__);
es705_spi_boot_setup_failed:
return rc;
}
static int es705_spi_boot_finish(struct es705_priv *es705)
{
u32 sync_cmd;
u32 sync_rspn;
int match = 1;
int rc = 0;
dev_dbg(es705->dev, "%s(): finish fw download\n", __func__);
if (es705->es705_power_state == ES705_SET_POWER_STATE_VS_OVERLAY) {
sync_cmd = (ES705_SYNC_CMD << 16) | ES705_SYNC_INTR_RISING_EDGE;
dev_dbg(es705->dev, "%s(): FW type : VOICESENSE\n", __func__);
} else {
sync_cmd = (ES705_SYNC_CMD << 16) | ES705_SYNC_POLLING;
dev_dbg(es705->dev, "%s(): fw type : STANDARD\n", __func__);
}
sync_rspn = sync_cmd;
/* Give the chip some time to become ready after firmware download. */
msleep(20);
/* finish es705 boot, check es705 readiness */
rc = es705_spi_write_then_read(es705, &sync_cmd, sizeof(sync_cmd),
&sync_rspn, match);
if (rc)
dev_err(es705->dev, "%s(): SYNC fail\n", __func__);
return rc;
}
static int es705_spi_probe(struct spi_device *spi)
{
int rc;
es705_priv.spi_client = spi;
es705_priv.intf = ES705_SPI_INTF;
es705_priv.dev_read = es705_spi_read;
es705_priv.dev_write = es705_spi_write;
es705_priv.dev_write_then_read = es705_spi_write_then_read;
es705_priv.boot_setup = es705_spi_boot_setup;
es705_priv.boot_finish = es705_spi_boot_finish;
es705_priv.cmd = es705_spi_cmd;
dev_info(&spi->dev, "%s()\n", __func__);
es705_priv.streamdev = spi_streamdev;
rc = es705_core_probe(&spi->dev);
if (rc) {
dev_err(&spi->dev, "%s(): es705_core_probe() failed %d\n",
__func__, rc);
goto es705_core_probe_error;
}
rc = es705_bootup(&es705_priv);
if (rc) {
dev_err(&spi->dev, "%s(): es705_bootup failed %d\n",
__func__, rc);
goto bootup_error;
}
return rc;
bootup_error:
es705_core_probe_error:
dev_dbg(&spi->dev, "%s(): exit with error\n", __func__);
return rc;
}
struct es_stream_device spi_streamdev = {
.read = es705_spi_read,
.intf = ES705_SPI_INTF,
};
static int es705_spi_remove(struct spi_device *spi)
{
snd_soc_unregister_codec(&spi->dev);
return 0;
}
struct spi_driver es705_spi_driver = {
.driver = {
.name = "es705_spi",
.bus = &spi_bus_type,
.owner = THIS_MODULE,
},
/* .id_table = m25p_ids,*/
.probe = es705_spi_probe,
.remove = es705_spi_remove,
};
static int __init es705_spi_init(void)
{
return 0;
}
static void __exit es705_spi_exit(void)
{
spi_unregister_driver(&es705_spi_driver);
}
module_init(es705_spi_init);
module_exit(es705_spi_exit);
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Hemal Meghpara <hmeghpara@audience.com>");
MODULE_DESCRIPTION("es705 SPI driver for es705 ALSA control");
MODULE_ALIAS("platform:es705-codec");
| gpl-2.0 |
rmcc/commtiva-kernel-z71 | arch/x86/kernel/apic/probe_64.c | 373 | 2231 | /*
* Copyright 2004 James Cleverdon, IBM.
* Subject to the GNU Public License, v.2
*
* Generic APIC sub-arch probe layer.
*
* Hacked for x86-64 by James Cleverdon from i386 architecture code by
* Martin Bligh, Andi Kleen, James Bottomley, John Stultz, and
* James Cleverdon.
*/
#include <linux/threads.h>
#include <linux/cpumask.h>
#include <linux/string.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/ctype.h>
#include <linux/init.h>
#include <linux/hardirq.h>
#include <linux/dmar.h>
#include <asm/smp.h>
#include <asm/apic.h>
#include <asm/ipi.h>
#include <asm/setup.h>
extern struct apic apic_flat;
extern struct apic apic_physflat;
extern struct apic apic_x2xpic_uv_x;
extern struct apic apic_x2apic_phys;
extern struct apic apic_x2apic_cluster;
struct apic __read_mostly *apic = &apic_flat;
EXPORT_SYMBOL_GPL(apic);
static struct apic *apic_probe[] __initdata = {
#ifdef CONFIG_X86_UV
&apic_x2apic_uv_x,
#endif
#ifdef CONFIG_X86_X2APIC
&apic_x2apic_phys,
&apic_x2apic_cluster,
#endif
&apic_physflat,
NULL,
};
static int apicid_phys_pkg_id(int initial_apic_id, int index_msb)
{
return hard_smp_processor_id() >> index_msb;
}
/*
* Check the APIC IDs in bios_cpu_apicid and choose the APIC mode.
*/
void __init default_setup_apic_routing(void)
{
#ifdef CONFIG_X86_X2APIC
if (x2apic_mode
#ifdef CONFIG_X86_UV
&& apic != &apic_x2apic_uv_x
#endif
) {
if (x2apic_phys)
apic = &apic_x2apic_phys;
else
apic = &apic_x2apic_cluster;
}
#endif
if (apic == &apic_flat && num_possible_cpus() > 8)
apic = &apic_physflat;
printk(KERN_INFO "Setting APIC routing to %s\n", apic->name);
if (is_vsmp_box()) {
/* need to update phys_pkg_id */
apic->phys_pkg_id = apicid_phys_pkg_id;
}
}
/* Same for both flat and physical. */
void apic_send_IPI_self(int vector)
{
__default_send_IPI_shortcut(APIC_DEST_SELF, vector, APIC_DEST_PHYSICAL);
}
int __init default_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
{
int i;
for (i = 0; apic_probe[i]; ++i) {
if (apic_probe[i]->acpi_madt_oem_check(oem_id, oem_table_id)) {
apic = apic_probe[i];
printk(KERN_INFO "Setting APIC routing to %s.\n",
apic->name);
return 1;
}
}
return 0;
}
| gpl-2.0 |
CalcProgrammer1/ubuntu-kernel-quincyatt | drivers/video/msm/lcdc_toshiba_wvga_pt.c | 885 | 12170 | /* Copyright (c) 2009-2010, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <linux/delay.h>
#include <linux/module.h>
#ifdef CONFIG_SPI_QSD
#include <linux/spi/spi.h>
#endif
#include <mach/gpio.h>
#include <mach/pmic.h>
#include "msm_fb.h"
#ifdef CONFIG_FB_MSM_TRY_MDDI_CATCH_LCDC_PRISM
#include "mddihosti.h"
#endif
#ifdef CONFIG_SPI_QSD
#define LCDC_TOSHIBA_SPI_DEVICE_NAME "lcdc_toshiba_ltm030dd40"
static struct spi_device *lcdc_toshiba_spi_client;
#else
static int spi_cs;
static int spi_sclk;
static int spi_mosi;
static int spi_miso;
#endif
struct toshiba_state_type{
boolean disp_initialized;
boolean display_on;
boolean disp_powered_up;
};
static struct toshiba_state_type toshiba_state = { 0 };
static struct msm_panel_common_pdata *lcdc_toshiba_pdata;
#ifndef CONFIG_SPI_QSD
static void toshiba_spi_write_byte(char dc, uint8 data)
{
uint32 bit;
int bnum;
gpio_set_value(spi_sclk, 0); /* clk low */
/* dc: 0 for command, 1 for parameter */
gpio_set_value(spi_mosi, dc);
udelay(1); /* at least 20 ns */
gpio_set_value(spi_sclk, 1); /* clk high */
udelay(1); /* at least 20 ns */
bnum = 8; /* 8 data bits */
bit = 0x80;
while (bnum) {
gpio_set_value(spi_sclk, 0); /* clk low */
if (data & bit)
gpio_set_value(spi_mosi, 1);
else
gpio_set_value(spi_mosi, 0);
udelay(1);
gpio_set_value(spi_sclk, 1); /* clk high */
udelay(1);
bit >>= 1;
bnum--;
}
}
#endif
static int toshiba_spi_write(char cmd, uint32 data, int num)
{
char *bp;
#ifdef CONFIG_SPI_QSD
char tx_buf[4];
int rc, i;
struct spi_message m;
struct spi_transfer t;
uint32 final_data = 0;
if (!lcdc_toshiba_spi_client) {
printk(KERN_ERR "%s lcdc_toshiba_spi_client is NULL\n",
__func__);
return -EINVAL;
}
memset(&t, 0, sizeof t);
t.tx_buf = tx_buf;
spi_setup(lcdc_toshiba_spi_client);
spi_message_init(&m);
spi_message_add_tail(&t, &m);
/* command byte first */
final_data |= cmd << 23;
t.len = num + 2;
if (t.len < 4)
t.bits_per_word = 8 * t.len;
/* followed by parameter bytes */
if (num) {
bp = (char *)&data;;
bp += (num - 1);
i = 1;
while (num) {
final_data |= 1 << (((4 - i) << 3) - i - 1);
final_data |= *bp << (((4 - i - 1) << 3) - i - 1);
num--;
bp--;
i++;
}
}
bp = (char *)&final_data;
for (i = 0; i < t.len; i++)
tx_buf[i] = bp[3 - i];
t.rx_buf = NULL;
rc = spi_sync(lcdc_toshiba_spi_client, &m);
if (rc)
printk(KERN_ERR "spi_sync _write failed %d\n", rc);
return rc;
#else
gpio_set_value(spi_cs, 1); /* cs high */
/* command byte first */
toshiba_spi_write_byte(0, cmd);
/* followed by parameter bytes */
if (num) {
bp = (char *)&data;;
bp += (num - 1);
while (num) {
toshiba_spi_write_byte(1, *bp);
num--;
bp--;
}
}
gpio_set_value(spi_cs, 0); /* cs low */
udelay(1);
return 0;
#endif
}
static int toshiba_spi_read_bytes(char cmd, uint32 *data, int num)
{
#ifdef CONFIG_SPI_QSD
char tx_buf[5];
char rx_buf[5];
int rc;
struct spi_message m;
struct spi_transfer t;
if (!lcdc_toshiba_spi_client) {
printk(KERN_ERR "%s lcdc_toshiba_spi_client is NULL\n",
__func__);
return -EINVAL;
}
memset(&t, 0, sizeof t);
t.tx_buf = tx_buf;
t.rx_buf = rx_buf;
spi_setup(lcdc_toshiba_spi_client);
spi_message_init(&m);
spi_message_add_tail(&t, &m);
/* command byte first */
tx_buf[0] = 0 | ((cmd >> 1) & 0x7f);
tx_buf[1] = (cmd & 0x01) << 7;
tx_buf[2] = 0;
tx_buf[3] = 0;
tx_buf[4] = 0;
t.len = 5;
rc = spi_sync(lcdc_toshiba_spi_client, &m);
*data = 0;
*data = ((rx_buf[1] & 0x1f) << 19) | (rx_buf[2] << 11) |
(rx_buf[3] << 3) | ((rx_buf[4] & 0xe0) >> 5);
if (rc)
printk(KERN_ERR "spi_sync _read failed %d\n", rc);
return rc;
#else
uint32 dbit, bits;
int bnum;
gpio_set_value(spi_cs, 1); /* cs high */
/* command byte first */
toshiba_spi_write_byte(0, cmd);
if (num > 1) {
/* extra dc bit */
gpio_set_value(spi_sclk, 0); /* clk low */
udelay(1);
dbit = gpio_get_value(spi_miso);/* dc bit */
udelay(1);
gpio_set_value(spi_sclk, 1); /* clk high */
}
/* followed by data bytes */
bnum = num * 8; /* number of bits */
bits = 0;
while (bnum) {
bits <<= 1;
gpio_set_value(spi_sclk, 0); /* clk low */
udelay(1);
dbit = gpio_get_value(spi_miso);
udelay(1);
gpio_set_value(spi_sclk, 1); /* clk high */
bits |= dbit;
bnum--;
}
*data = bits;
udelay(1);
gpio_set_value(spi_cs, 0); /* cs low */
udelay(1);
return 0;
#endif
}
#ifndef CONFIG_SPI_QSD
static void spi_pin_assign(void)
{
/* Setting the Default GPIO's */
spi_sclk = *(lcdc_toshiba_pdata->gpio_num);
spi_cs = *(lcdc_toshiba_pdata->gpio_num + 1);
spi_mosi = *(lcdc_toshiba_pdata->gpio_num + 2);
spi_miso = *(lcdc_toshiba_pdata->gpio_num + 3);
}
#endif
static void toshiba_disp_powerup(void)
{
if (!toshiba_state.disp_powered_up && !toshiba_state.display_on) {
/* Reset the hardware first */
/* Include DAC power up implementation here */
toshiba_state.disp_powered_up = TRUE;
}
}
static void toshiba_disp_on(void)
{
uint32 data;
#ifndef CONFIG_SPI_QSD
gpio_set_value(spi_cs, 0); /* low */
gpio_set_value(spi_sclk, 1); /* high */
gpio_set_value(spi_mosi, 0);
gpio_set_value(spi_miso, 0);
#endif
if (toshiba_state.disp_powered_up && !toshiba_state.display_on) {
toshiba_spi_write(0, 0, 0);
mdelay(7);
toshiba_spi_write(0, 0, 0);
mdelay(7);
toshiba_spi_write(0, 0, 0);
mdelay(7);
toshiba_spi_write(0xba, 0x11, 1);
toshiba_spi_write(0x36, 0x00, 1);
mdelay(1);
toshiba_spi_write(0x3a, 0x60, 1);
toshiba_spi_write(0xb1, 0x5d, 1);
mdelay(1);
toshiba_spi_write(0xb2, 0x33, 1);
toshiba_spi_write(0xb3, 0x22, 1);
mdelay(1);
toshiba_spi_write(0xb4, 0x02, 1);
toshiba_spi_write(0xb5, 0x1e, 1); /* vcs -- adjust brightness */
mdelay(1);
toshiba_spi_write(0xb6, 0x27, 1);
toshiba_spi_write(0xb7, 0x03, 1);
mdelay(1);
toshiba_spi_write(0xb9, 0x24, 1);
toshiba_spi_write(0xbd, 0xa1, 1);
mdelay(1);
toshiba_spi_write(0xbb, 0x00, 1);
toshiba_spi_write(0xbf, 0x01, 1);
mdelay(1);
toshiba_spi_write(0xbe, 0x00, 1);
toshiba_spi_write(0xc0, 0x11, 1);
mdelay(1);
toshiba_spi_write(0xc1, 0x11, 1);
toshiba_spi_write(0xc2, 0x11, 1);
mdelay(1);
toshiba_spi_write(0xc3, 0x3232, 2);
mdelay(1);
toshiba_spi_write(0xc4, 0x3232, 2);
mdelay(1);
toshiba_spi_write(0xc5, 0x3232, 2);
mdelay(1);
toshiba_spi_write(0xc6, 0x3232, 2);
mdelay(1);
toshiba_spi_write(0xc7, 0x6445, 2);
mdelay(1);
toshiba_spi_write(0xc8, 0x44, 1);
toshiba_spi_write(0xc9, 0x52, 1);
mdelay(1);
toshiba_spi_write(0xca, 0x00, 1);
mdelay(1);
toshiba_spi_write(0xec, 0x02a4, 2); /* 0x02a4 */
mdelay(1);
toshiba_spi_write(0xcf, 0x01, 1);
mdelay(1);
toshiba_spi_write(0xd0, 0xc003, 2); /* c003 */
mdelay(1);
toshiba_spi_write(0xd1, 0x01, 1);
mdelay(1);
toshiba_spi_write(0xd2, 0x0028, 2);
mdelay(1);
toshiba_spi_write(0xd3, 0x0028, 2);
mdelay(1);
toshiba_spi_write(0xd4, 0x26a4, 2);
mdelay(1);
toshiba_spi_write(0xd5, 0x20, 1);
mdelay(1);
toshiba_spi_write(0xef, 0x3200, 2);
mdelay(32);
toshiba_spi_write(0xbc, 0x80, 1); /* wvga pass through */
toshiba_spi_write(0x3b, 0x00, 1);
mdelay(1);
toshiba_spi_write(0xb0, 0x16, 1);
mdelay(1);
toshiba_spi_write(0xb8, 0xfff5, 2);
mdelay(1);
toshiba_spi_write(0x11, 0, 0);
mdelay(5);
toshiba_spi_write(0x29, 0, 0);
mdelay(5);
toshiba_state.display_on = TRUE;
}
data = 0;
toshiba_spi_read_bytes(0x04, &data, 3);
printk(KERN_INFO "toshiba_disp_on: id=%x\n", data);
}
static int lcdc_toshiba_panel_on(struct platform_device *pdev)
{
if (!toshiba_state.disp_initialized) {
/* Configure reset GPIO that drives DAC */
if (lcdc_toshiba_pdata->panel_config_gpio)
lcdc_toshiba_pdata->panel_config_gpio(1);
toshiba_disp_powerup();
toshiba_disp_on();
toshiba_state.disp_initialized = TRUE;
}
return 0;
}
static int lcdc_toshiba_panel_off(struct platform_device *pdev)
{
if (toshiba_state.disp_powered_up && toshiba_state.display_on) {
/* Main panel power off (Deep standby in) */
toshiba_spi_write(0x28, 0, 0); /* display off */
mdelay(1);
toshiba_spi_write(0xb8, 0x8002, 2); /* output control */
mdelay(1);
toshiba_spi_write(0x10, 0x00, 1); /* sleep mode in */
mdelay(85); /* wait 85 msec */
toshiba_spi_write(0xb0, 0x00, 1); /* deep standby in */
mdelay(1);
if (lcdc_toshiba_pdata->panel_config_gpio)
lcdc_toshiba_pdata->panel_config_gpio(0);
toshiba_state.display_on = FALSE;
toshiba_state.disp_initialized = FALSE;
}
return 0;
}
static void lcdc_toshiba_set_backlight(struct msm_fb_data_type *mfd)
{
int bl_level;
int ret = -EPERM;
int i = 0;
bl_level = mfd->bl_level;
while (i++ < 3) {
ret = pmic_set_led_intensity(LED_LCD, bl_level);
if (ret == 0)
return;
msleep(10);
}
printk(KERN_WARNING "%s: can't set lcd backlight!\n",
__func__);
}
static int __devinit toshiba_probe(struct platform_device *pdev)
{
if (pdev->id == 0) {
lcdc_toshiba_pdata = pdev->dev.platform_data;
#ifndef CONFIG_SPI_QSD
spi_pin_assign();
#endif
return 0;
}
msm_fb_add_device(pdev);
return 0;
}
#ifdef CONFIG_SPI_QSD
static int __devinit lcdc_toshiba_spi_probe(struct spi_device *spi)
{
lcdc_toshiba_spi_client = spi;
lcdc_toshiba_spi_client->bits_per_word = 32;
return 0;
}
static int __devexit lcdc_toshiba_spi_remove(struct spi_device *spi)
{
lcdc_toshiba_spi_client = NULL;
return 0;
}
static struct spi_driver lcdc_toshiba_spi_driver = {
.driver = {
.name = LCDC_TOSHIBA_SPI_DEVICE_NAME,
.owner = THIS_MODULE,
},
.probe = lcdc_toshiba_spi_probe,
.remove = __devexit_p(lcdc_toshiba_spi_remove),
};
#endif
static struct platform_driver this_driver = {
.probe = toshiba_probe,
.driver = {
.name = "lcdc_toshiba_wvga",
},
};
static struct msm_fb_panel_data toshiba_panel_data = {
.on = lcdc_toshiba_panel_on,
.off = lcdc_toshiba_panel_off,
.set_backlight = lcdc_toshiba_set_backlight,
};
static struct platform_device this_device = {
.name = "lcdc_toshiba_wvga",
.id = 1,
.dev = {
.platform_data = &toshiba_panel_data,
}
};
static int __init lcdc_toshiba_panel_init(void)
{
int ret;
struct msm_panel_info *pinfo;
#ifdef CONFIG_FB_MSM_TRY_MDDI_CATCH_LCDC_PRISM
if (mddi_get_client_id() != 0)
return 0;
ret = msm_fb_detect_client("lcdc_toshiba_wvga_pt");
if (ret)
return 0;
#endif
ret = platform_driver_register(&this_driver);
if (ret)
return ret;
pinfo = &toshiba_panel_data.panel_info;
pinfo->xres = 480;
pinfo->yres = 800;
MSM_FB_SINGLE_MODE_PANEL(pinfo);
pinfo->type = LCDC_PANEL;
pinfo->pdest = DISPLAY_1;
pinfo->wait_cycle = 0;
pinfo->bpp = 18;
pinfo->fb_num = 2;
/* 30Mhz mdp_lcdc_pclk and mdp_lcdc_pad_pcl */
pinfo->clk_rate = 30720000;
pinfo->bl_max = 15;
pinfo->bl_min = 1;
pinfo->lcdc.h_back_porch = 184; /* hsw = 8 + hbp=184 */
pinfo->lcdc.h_front_porch = 4;
pinfo->lcdc.h_pulse_width = 8;
pinfo->lcdc.v_back_porch = 2; /* vsw=1 + vbp = 2 */
pinfo->lcdc.v_front_porch = 3;
pinfo->lcdc.v_pulse_width = 1;
pinfo->lcdc.border_clr = 0; /* blk */
pinfo->lcdc.underflow_clr = 0xff; /* blue */
pinfo->lcdc.hsync_skew = 0;
ret = platform_device_register(&this_device);
if (ret) {
printk(KERN_ERR "%s not able to register the device\n",
__func__);
goto fail_driver;
}
#ifdef CONFIG_SPI_QSD
ret = spi_register_driver(&lcdc_toshiba_spi_driver);
if (ret) {
printk(KERN_ERR "%s not able to register spi\n", __func__);
goto fail_device;
}
#endif
return ret;
#ifdef CONFIG_SPI_QSD
fail_device:
platform_device_unregister(&this_device);
#endif
fail_driver:
platform_driver_unregister(&this_driver);
return ret;
}
device_initcall(lcdc_toshiba_panel_init);
| gpl-2.0 |
CyanideL/android_kernel_oneplus_msm8974 | drivers/misc/ti_drv2667.c | 2165 | 17300 | /* Copyright (c) 2012, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/gpio.h>
#include <linux/i2c.h>
#include <linux/pm.h>
#include <linux/err.h>
#include <linux/of.h>
#include <linux/regulator/consumer.h>
#include <linux/i2c/ti_drv2667.h>
#include "../staging/android/timed_output.h"
#ifdef CONFIG_HAS_EARLYSUSPEND
#include <linux/earlysuspend.h>
#define DRV2667_SUS_LEVEL 1
#endif
#define DRV2667_STATUS_REG 0x00
#define DRV2667_CNTL1_REG 0x01
#define DRV2667_CNTL2_REG 0x02
#define DRV2667_WAV_SEQ3_REG 0x03
#define DRV2667_FIFO_REG 0x0B
#define DRV2667_PAGE_REG 0xFF
#define DRV2667_STANDBY_MASK 0xBF
#define DRV2667_INPUT_MUX_MASK 0x04
#define DRV2667_GAIN_MASK 0xFC
#define DRV2667_GAIN_SHIFT 0
#define DRV2667_TIMEOUT_MASK 0xF3
#define DRV2667_TIMEOUT_SHIFT 2
#define DRV2667_GO_MASK 0x01
#define DRV2667_FIFO_SIZE 100
#define DRV2667_VIB_START_VAL 0x7F
#define DRV2667_REG_PAGE_ID 0x00
#define DRV2667_FIFO_CHUNK_MS 10
#define DRV2667_BYTES_PER_MS 8
#define DRV2667_WAV_SEQ_ID_IDX 0
#define DRV2667_WAV_SEQ_REP_IDX 6
#define DRV2667_WAV_SEQ_FREQ_IDX 8
#define DRV2667_WAV_SEQ_FREQ_MIN 8
#define DRV2667_WAV_SEQ_DUR_IDX 9
#define DRV2667_MIN_IDLE_TIMEOUT_MS 5
#define DRV2667_MAX_IDLE_TIMEOUT_MS 20
#define DRV2667_VTG_MIN_UV 3000000
#define DRV2667_VTG_MAX_UV 5500000
#define DRV2667_VTG_CURR_UA 24000
#define DRV2667_I2C_VTG_MIN_UV 1800000
#define DRV2667_I2C_VTG_MAX_UV 1800000
#define DRV2667_I2C_CURR_UA 9630
/* supports 3 modes in digital - fifo, ram and wave */
enum drv2667_modes {
FIFO_MODE = 0,
RAM_SEQ_MODE,
WAV_SEQ_MODE,
ANALOG_MODE,
};
struct drv2667_data {
struct i2c_client *client;
struct timed_output_dev dev;
struct hrtimer timer;
struct work_struct work;
struct mutex lock;
struct regulator *vdd;
struct regulator *vdd_i2c;
u32 max_runtime_ms;
u32 runtime_left;
u8 buf[DRV2667_FIFO_SIZE + 1];
u8 cntl2_val;
enum drv2667_modes mode;
u32 time_chunk_ms;
#ifdef CONFIG_HAS_EARLYSUSPEND
struct early_suspend es;
#endif
};
static int drv2667_read_reg(struct i2c_client *client, u32 reg)
{
int rc;
rc = i2c_smbus_read_byte_data(client, reg);
if (rc < 0)
dev_err(&client->dev, "i2c reg read for 0x%x failed\n", reg);
return rc;
}
static int drv2667_write_reg(struct i2c_client *client, u32 reg, u8 val)
{
int rc;
rc = i2c_smbus_write_byte_data(client, reg, val);
if (rc < 0)
dev_err(&client->dev, "i2c reg write for 0x%xfailed\n", reg);
return rc;
}
static void drv2667_dump_regs(struct drv2667_data *data, char *label)
{
dev_dbg(&data->client->dev,
"%s: reg0x00 = 0x%x, reg0x01 = 0x%x reg0x02 = 0x%x", label,
drv2667_read_reg(data->client, DRV2667_STATUS_REG),
drv2667_read_reg(data->client, DRV2667_CNTL1_REG),
drv2667_read_reg(data->client, DRV2667_CNTL2_REG));
}
static void drv2667_worker(struct work_struct *work)
{
struct drv2667_data *data;
int rc = 0;
u8 val;
data = container_of(work, struct drv2667_data, work);
if (data->mode == WAV_SEQ_MODE) {
/* clear go bit */
val = data->cntl2_val & ~DRV2667_GO_MASK;
rc = drv2667_write_reg(data->client, DRV2667_CNTL2_REG, val);
if (rc < 0) {
dev_err(&data->client->dev, "i2c send msg failed\n");
return;
}
/* restart wave if runtime is left */
if (data->runtime_left) {
val = data->cntl2_val | DRV2667_GO_MASK;
rc = drv2667_write_reg(data->client,
DRV2667_CNTL2_REG, val);
}
} else if (data->mode == FIFO_MODE) {
/* data is played at 8khz */
if (data->runtime_left < data->time_chunk_ms)
val = data->runtime_left * DRV2667_BYTES_PER_MS;
else
val = data->time_chunk_ms * DRV2667_BYTES_PER_MS;
rc = i2c_master_send(data->client, data->buf, val + 1);
}
if (rc < 0)
dev_err(&data->client->dev, "i2c send message failed\n");
}
static void drv2667_enable(struct timed_output_dev *dev, int runtime)
{
struct drv2667_data *data = container_of(dev, struct drv2667_data, dev);
unsigned long time_ms;
if (runtime > data->max_runtime_ms) {
dev_dbg(&data->client->dev, "Invalid runtime\n");
runtime = data->max_runtime_ms;
}
mutex_lock(&data->lock);
hrtimer_cancel(&data->timer);
data->runtime_left = runtime;
if (data->runtime_left < data->time_chunk_ms)
time_ms = runtime * NSEC_PER_MSEC;
else
time_ms = data->time_chunk_ms * NSEC_PER_MSEC;
hrtimer_start(&data->timer, ktime_set(0, time_ms), HRTIMER_MODE_REL);
schedule_work(&data->work);
mutex_unlock(&data->lock);
}
static int drv2667_get_time(struct timed_output_dev *dev)
{
struct drv2667_data *data = container_of(dev, struct drv2667_data, dev);
if (hrtimer_active(&data->timer))
return data->runtime_left +
ktime_to_ms(hrtimer_get_remaining(&data->timer));
return 0;
}
static enum hrtimer_restart drv2667_timer(struct hrtimer *timer)
{
struct drv2667_data *data;
int time_ms;
data = container_of(timer, struct drv2667_data, timer);
if (data->runtime_left <= data->time_chunk_ms) {
data->runtime_left = 0;
schedule_work(&data->work);
return HRTIMER_NORESTART;
}
data->runtime_left -= data->time_chunk_ms;
if (data->runtime_left < data->time_chunk_ms)
time_ms = data->runtime_left * NSEC_PER_MSEC;
else
time_ms = data->time_chunk_ms * NSEC_PER_MSEC;
hrtimer_forward_now(&data->timer, ktime_set(0, time_ms));
schedule_work(&data->work);
return HRTIMER_RESTART;
}
static int drv2667_vreg_config(struct drv2667_data *data, bool on)
{
int rc = 0;
if (!on)
goto deconfig_vreg;
data->vdd = regulator_get(&data->client->dev, "vdd");
if (IS_ERR(data->vdd)) {
rc = PTR_ERR(data->vdd);
dev_err(&data->client->dev, "unable to request vdd\n");
return rc;
}
if (regulator_count_voltages(data->vdd) > 0) {
rc = regulator_set_voltage(data->vdd,
DRV2667_VTG_MIN_UV, DRV2667_VTG_MAX_UV);
if (rc < 0) {
dev_err(&data->client->dev,
"vdd set voltage failed(%d)\n", rc);
goto put_vdd;
}
}
data->vdd_i2c = regulator_get(&data->client->dev, "vdd-i2c");
if (IS_ERR(data->vdd_i2c)) {
rc = PTR_ERR(data->vdd_i2c);
dev_err(&data->client->dev, "unable to request vdd for i2c\n");
goto reset_vdd_volt;
}
if (regulator_count_voltages(data->vdd_i2c) > 0) {
rc = regulator_set_voltage(data->vdd_i2c,
DRV2667_I2C_VTG_MIN_UV, DRV2667_I2C_VTG_MAX_UV);
if (rc < 0) {
dev_err(&data->client->dev,
"vdd_i2c set voltage failed(%d)\n", rc);
goto put_vdd_i2c;
}
}
return rc;
deconfig_vreg:
if (regulator_count_voltages(data->vdd_i2c) > 0)
regulator_set_voltage(data->vdd_i2c, 0, DRV2667_I2C_VTG_MAX_UV);
put_vdd_i2c:
regulator_put(data->vdd_i2c);
reset_vdd_volt:
if (regulator_count_voltages(data->vdd) > 0)
regulator_set_voltage(data->vdd, 0, DRV2667_VTG_MAX_UV);
put_vdd:
regulator_put(data->vdd);
return rc;
}
static int reg_set_optimum_mode_check(struct regulator *reg, int load_uA)
{
return (regulator_count_voltages(reg) > 0) ?
regulator_set_optimum_mode(reg, load_uA) : 0;
}
static int drv2667_vreg_on(struct drv2667_data *data, bool on)
{
int rc = 0;
if (!on)
goto vreg_off;
rc = reg_set_optimum_mode_check(data->vdd, DRV2667_VTG_CURR_UA);
if (rc < 0) {
dev_err(&data->client->dev,
"Regulator vdd set_opt failed rc=%d\n", rc);
return rc;
}
rc = regulator_enable(data->vdd);
if (rc < 0) {
dev_err(&data->client->dev, "enable vdd failed\n");
return rc;
}
rc = reg_set_optimum_mode_check(data->vdd_i2c, DRV2667_I2C_CURR_UA);
if (rc < 0) {
dev_err(&data->client->dev,
"Regulator vdd_i2c set_opt failed rc=%d\n", rc);
return rc;
}
rc = regulator_enable(data->vdd_i2c);
if (rc < 0) {
dev_err(&data->client->dev, "enable vdd_i2c failed\n");
goto disable_vdd;
}
return rc;
vreg_off:
regulator_disable(data->vdd_i2c);
disable_vdd:
regulator_disable(data->vdd);
return rc;
}
#ifdef CONFIG_PM
static int drv2667_suspend(struct device *dev)
{
struct drv2667_data *data = dev_get_drvdata(dev);
u8 val;
int rc;
hrtimer_cancel(&data->timer);
cancel_work_sync(&data->work);
/* set standby */
val = data->cntl2_val | ~DRV2667_STANDBY_MASK;
rc = drv2667_write_reg(data->client, DRV2667_CNTL2_REG, val);
if (rc < 0)
dev_err(dev, "unable to set standby\n");
/* turn regulators off */
drv2667_vreg_on(data, false);
return 0;
}
static int drv2667_resume(struct device *dev)
{
struct drv2667_data *data = dev_get_drvdata(dev);
int rc;
/* turn regulators on */
rc = drv2667_vreg_on(data, true);
if (rc < 0) {
dev_err(dev, "unable to turn regulators on\n");
return rc;
}
/* clear standby */
rc = drv2667_write_reg(data->client,
DRV2667_CNTL2_REG, data->cntl2_val);
if (rc < 0) {
dev_err(dev, "unable to clear standby\n");
goto vreg_off;
}
return 0;
vreg_off:
drv2667_vreg_on(data, false);
return rc;
}
#ifdef CONFIG_HAS_EARLYSUSPEND
static void drv2667_early_suspend(struct early_suspend *es)
{
struct drv2667_data *data = container_of(es, struct drv2667_data, es);
drv2667_suspend(&data->client->dev);
}
static void drv2667_late_resume(struct early_suspend *es)
{
struct drv2667_data *data = container_of(es, struct drv2667_data, es);
drv2667_resume(&data->client->dev);
}
#endif
static const struct dev_pm_ops drv2667_pm_ops = {
#ifndef CONFIG_HAS_EARLYSUSPEND
.suspend = drv2667_suspend,
.resume = drv2667_resume,
#endif
};
#endif
#ifdef CONFIG_OF
static int drv2667_parse_dt(struct device *dev, struct drv2667_pdata *pdata)
{
struct property *prop;
int rc;
u32 temp;
rc = of_property_read_string(dev->of_node, "ti,label", &pdata->name);
/* set vibrator as default name */
if (rc < 0)
pdata->name = "vibrator";
rc = of_property_read_u32(dev->of_node, "ti,gain", &temp);
/* set gain as 0 */
if (rc < 0)
pdata->gain = 0;
else
pdata->gain = (u8) temp;
rc = of_property_read_u32(dev->of_node, "ti,mode", &temp);
/* set FIFO mode as default */
if (rc < 0)
pdata->mode = FIFO_MODE;
else
pdata->mode = (u8) temp;
/* read wave sequence */
if (pdata->mode == WAV_SEQ_MODE) {
prop = of_find_property(dev->of_node, "ti,wav-seq", &temp);
if (!prop) {
dev_err(dev, "wav seq data not found");
return -ENODEV;
} else if (temp != DRV2667_WAV_SEQ_LEN) {
dev_err(dev, "Invalid length of wav seq data\n");
return -EINVAL;
}
memcpy(pdata->wav_seq, prop->value, DRV2667_WAV_SEQ_LEN);
}
rc = of_property_read_u32(dev->of_node, "ti,idle-timeout-ms", &temp);
/* configure minimum idle timeout */
if (rc < 0)
pdata->idle_timeout_ms = DRV2667_MIN_IDLE_TIMEOUT_MS;
else
pdata->idle_timeout_ms = (u8) temp;
rc = of_property_read_u32(dev->of_node, "ti,max-runtime-ms",
&pdata->max_runtime_ms);
/* configure one sec as default time */
if (rc < 0)
pdata->max_runtime_ms = MSEC_PER_SEC;
return 0;
}
#else
static int drv2667_parse_dt(struct device *dev, struct drv2667_pdata *pdata)
{
return -ENODEV;
}
#endif
static int __devinit drv2667_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct drv2667_data *data;
struct drv2667_pdata *pdata;
int rc, i;
u8 val, fifo_seq_val, reg;
if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
dev_err(&client->dev, "i2c is not supported\n");
return -EIO;
}
if (client->dev.of_node) {
pdata = devm_kzalloc(&client->dev,
sizeof(struct drv2667_pdata), GFP_KERNEL);
if (!pdata) {
dev_err(&client->dev, "unable to allocate pdata\n");
return -ENOMEM;
}
/* parse DT */
rc = drv2667_parse_dt(&client->dev, pdata);
if (rc) {
dev_err(&client->dev, "DT parsing failed\n");
return rc;
}
} else {
pdata = client->dev.platform_data;
if (!pdata) {
dev_err(&client->dev, "invalid pdata\n");
return -EINVAL;
}
}
data = devm_kzalloc(&client->dev, sizeof(struct drv2667_data),
GFP_KERNEL);
if (!data) {
dev_err(&client->dev, "unable to allocate memory\n");
return -ENOMEM;
}
i2c_set_clientdata(client, data);
data->client = client;
data->max_runtime_ms = pdata->max_runtime_ms;
mutex_init(&data->lock);
INIT_WORK(&data->work, drv2667_worker);
hrtimer_init(&data->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
data->timer.function = drv2667_timer;
data->mode = pdata->mode;
/* configure voltage regulators */
rc = drv2667_vreg_config(data, true);
if (rc) {
dev_err(&client->dev, "unable to configure regulators\n");
goto destroy_mutex;
}
/* turn on voltage regulators */
rc = drv2667_vreg_on(data, true);
if (rc) {
dev_err(&client->dev, "unable to turn on regulators\n");
goto deconfig_vreg;
}
rc = drv2667_read_reg(client, DRV2667_CNTL2_REG);
if (rc < 0)
goto vreg_off;
/* set timeout, clear standby */
val = (u8) rc;
if (pdata->idle_timeout_ms < DRV2667_MIN_IDLE_TIMEOUT_MS ||
pdata->idle_timeout_ms > DRV2667_MAX_IDLE_TIMEOUT_MS ||
(pdata->idle_timeout_ms % DRV2667_MIN_IDLE_TIMEOUT_MS)) {
dev_err(&client->dev, "Invalid idle timeout\n");
goto vreg_off;
}
val = (val & DRV2667_TIMEOUT_MASK) |
((pdata->idle_timeout_ms / DRV2667_MIN_IDLE_TIMEOUT_MS - 1) <<
DRV2667_TIMEOUT_SHIFT);
val &= DRV2667_STANDBY_MASK;
rc = drv2667_write_reg(client, DRV2667_CNTL2_REG, val);
if (rc < 0)
goto vreg_off;
/* cache control2 val */
data->cntl2_val = val;
/* program drv2667 registers */
rc = drv2667_read_reg(client, DRV2667_CNTL1_REG);
if (rc < 0)
goto vreg_off;
/* gain and input mode */
val = (u8) rc;
/* remove this check after adding support for these modes */
if (data->mode == ANALOG_MODE || data->mode == RAM_SEQ_MODE) {
dev_err(&data->client->dev, "Mode not supported\n");
goto vreg_off;
} else
val &= ~DRV2667_INPUT_MUX_MASK; /* set digital mode */
val = (val & DRV2667_GAIN_MASK) | (pdata->gain << DRV2667_GAIN_SHIFT);
rc = drv2667_write_reg(client, DRV2667_CNTL1_REG, val);
if (rc < 0)
goto vreg_off;
if (data->mode == FIFO_MODE) {
/* Load a predefined pattern for FIFO mode */
data->buf[0] = DRV2667_FIFO_REG;
fifo_seq_val = DRV2667_VIB_START_VAL;
for (i = 1; i < DRV2667_FIFO_SIZE - 1; i++, fifo_seq_val++)
data->buf[i] = fifo_seq_val;
data->time_chunk_ms = DRV2667_FIFO_CHUNK_MS;
} else if (data->mode == WAV_SEQ_MODE) {
u8 freq, rep, dur;
/* program wave sequence from pdata */
/* id to wave sequence 3, set page */
rc = drv2667_write_reg(client, DRV2667_WAV_SEQ3_REG,
pdata->wav_seq[DRV2667_WAV_SEQ_ID_IDX]);
if (rc < 0)
goto vreg_off;
/* set page to wave form sequence */
rc = drv2667_write_reg(client, DRV2667_PAGE_REG,
pdata->wav_seq[DRV2667_WAV_SEQ_ID_IDX]);
if (rc < 0)
goto vreg_off;
/* program waveform sequence */
for (reg = 0, i = 0; i < DRV2667_WAV_SEQ_LEN - 1; i++, reg++) {
rc = drv2667_write_reg(client, reg,
pdata->wav_seq[i+1]);
if (rc < 0)
goto vreg_off;
}
/* set page back to normal register space */
rc = drv2667_write_reg(client, DRV2667_PAGE_REG,
DRV2667_REG_PAGE_ID);
if (rc < 0)
goto vreg_off;
freq = pdata->wav_seq[DRV2667_WAV_SEQ_FREQ_IDX];
rep = pdata->wav_seq[DRV2667_WAV_SEQ_REP_IDX];
dur = pdata->wav_seq[DRV2667_WAV_SEQ_DUR_IDX];
data->time_chunk_ms = (rep * dur * MSEC_PER_SEC) /
(freq * DRV2667_WAV_SEQ_FREQ_MIN);
}
drv2667_dump_regs(data, "new");
/* register with timed output class */
data->dev.name = pdata->name;
data->dev.get_time = drv2667_get_time;
data->dev.enable = drv2667_enable;
rc = timed_output_dev_register(&data->dev);
if (rc) {
dev_err(&client->dev, "unable to register with timed_output\n");
goto vreg_off;
}
#ifdef CONFIG_HAS_EARLYSUSPEND
data->es.level = EARLY_SUSPEND_LEVEL_BLANK_SCREEN + DRV2667_SUS_LEVEL;
data->es.suspend = drv2667_early_suspend;
data->es.resume = drv2667_late_resume;
register_early_suspend(&data->es);
#endif
return 0;
vreg_off:
drv2667_vreg_on(data, false);
deconfig_vreg:
drv2667_vreg_config(data, false);
destroy_mutex:
mutex_destroy(&data->lock);
return rc;
}
static int __devexit drv2667_remove(struct i2c_client *client)
{
struct drv2667_data *data = i2c_get_clientdata(client);
#ifdef CONFIG_HAS_EARLYSUSPEND
unregister_early_suspend(&data->es);
#endif
mutex_destroy(&data->lock);
timed_output_dev_unregister(&data->dev);
hrtimer_cancel(&data->timer);
cancel_work_sync(&data->work);
drv2667_vreg_on(data, false);
drv2667_vreg_config(data, false);
return 0;
}
static const struct i2c_device_id drv2667_id_table[] = {
{"drv2667", 0},
{ },
};
MODULE_DEVICE_TABLE(i2c, drv2667_id_table);
#ifdef CONFIG_OF
static const struct of_device_id drv2667_of_id_table[] = {
{.compatible = "ti, drv2667"},
{ },
};
#else
#define drv2667_of_id_table NULL
#endif
static struct i2c_driver drv2667_i2c_driver = {
.driver = {
.name = "drv2667",
.owner = THIS_MODULE,
.of_match_table = drv2667_of_id_table,
#ifdef CONFIG_PM
.pm = &drv2667_pm_ops,
#endif
},
.probe = drv2667_probe,
.remove = __devexit_p(drv2667_remove),
.id_table = drv2667_id_table,
};
module_i2c_driver(drv2667_i2c_driver);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("TI DRV2667 chip driver");
| gpl-2.0 |
aftian/linux-blankon | virt/kvm/ioapic.c | 2165 | 10960 | /*
* Copyright (C) 2001 MandrakeSoft S.A.
* Copyright 2010 Red Hat, Inc. and/or its affiliates.
*
* MandrakeSoft S.A.
* 43, rue d'Aboukir
* 75002 Paris - France
* http://www.linux-mandrake.com/
* http://www.mandrakesoft.com/
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
* Yunhong Jiang <yunhong.jiang@intel.com>
* Yaozu (Eddie) Dong <eddie.dong@intel.com>
* Based on Xen 3.1 code.
*/
#include <linux/kvm_host.h>
#include <linux/kvm.h>
#include <linux/mm.h>
#include <linux/highmem.h>
#include <linux/smp.h>
#include <linux/hrtimer.h>
#include <linux/io.h>
#include <linux/slab.h>
#include <asm/processor.h>
#include <asm/page.h>
#include <asm/current.h>
#include <trace/events/kvm.h>
#include "ioapic.h"
#include "lapic.h"
#include "irq.h"
#if 0
#define ioapic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg)
#else
#define ioapic_debug(fmt, arg...)
#endif
static int ioapic_deliver(struct kvm_ioapic *vioapic, int irq);
static unsigned long ioapic_read_indirect(struct kvm_ioapic *ioapic,
unsigned long addr,
unsigned long length)
{
unsigned long result = 0;
switch (ioapic->ioregsel) {
case IOAPIC_REG_VERSION:
result = ((((IOAPIC_NUM_PINS - 1) & 0xff) << 16)
| (IOAPIC_VERSION_ID & 0xff));
break;
case IOAPIC_REG_APIC_ID:
case IOAPIC_REG_ARB_ID:
result = ((ioapic->id & 0xf) << 24);
break;
default:
{
u32 redir_index = (ioapic->ioregsel - 0x10) >> 1;
u64 redir_content;
ASSERT(redir_index < IOAPIC_NUM_PINS);
redir_content = ioapic->redirtbl[redir_index].bits;
result = (ioapic->ioregsel & 0x1) ?
(redir_content >> 32) & 0xffffffff :
redir_content & 0xffffffff;
break;
}
}
return result;
}
static int ioapic_service(struct kvm_ioapic *ioapic, unsigned int idx)
{
union kvm_ioapic_redirect_entry *pent;
int injected = -1;
pent = &ioapic->redirtbl[idx];
if (!pent->fields.mask) {
injected = ioapic_deliver(ioapic, idx);
if (injected && pent->fields.trig_mode == IOAPIC_LEVEL_TRIG)
pent->fields.remote_irr = 1;
}
return injected;
}
static void update_handled_vectors(struct kvm_ioapic *ioapic)
{
DECLARE_BITMAP(handled_vectors, 256);
int i;
memset(handled_vectors, 0, sizeof(handled_vectors));
for (i = 0; i < IOAPIC_NUM_PINS; ++i)
__set_bit(ioapic->redirtbl[i].fields.vector, handled_vectors);
memcpy(ioapic->handled_vectors, handled_vectors,
sizeof(handled_vectors));
smp_wmb();
}
static void ioapic_write_indirect(struct kvm_ioapic *ioapic, u32 val)
{
unsigned index;
bool mask_before, mask_after;
union kvm_ioapic_redirect_entry *e;
switch (ioapic->ioregsel) {
case IOAPIC_REG_VERSION:
/* Writes are ignored. */
break;
case IOAPIC_REG_APIC_ID:
ioapic->id = (val >> 24) & 0xf;
break;
case IOAPIC_REG_ARB_ID:
break;
default:
index = (ioapic->ioregsel - 0x10) >> 1;
ioapic_debug("change redir index %x val %x\n", index, val);
if (index >= IOAPIC_NUM_PINS)
return;
e = &ioapic->redirtbl[index];
mask_before = e->fields.mask;
if (ioapic->ioregsel & 1) {
e->bits &= 0xffffffff;
e->bits |= (u64) val << 32;
} else {
e->bits &= ~0xffffffffULL;
e->bits |= (u32) val;
e->fields.remote_irr = 0;
}
update_handled_vectors(ioapic);
mask_after = e->fields.mask;
if (mask_before != mask_after)
kvm_fire_mask_notifiers(ioapic->kvm, KVM_IRQCHIP_IOAPIC, index, mask_after);
if (e->fields.trig_mode == IOAPIC_LEVEL_TRIG
&& ioapic->irr & (1 << index))
ioapic_service(ioapic, index);
break;
}
}
static int ioapic_deliver(struct kvm_ioapic *ioapic, int irq)
{
union kvm_ioapic_redirect_entry *entry = &ioapic->redirtbl[irq];
struct kvm_lapic_irq irqe;
ioapic_debug("dest=%x dest_mode=%x delivery_mode=%x "
"vector=%x trig_mode=%x\n",
entry->fields.dest_id, entry->fields.dest_mode,
entry->fields.delivery_mode, entry->fields.vector,
entry->fields.trig_mode);
irqe.dest_id = entry->fields.dest_id;
irqe.vector = entry->fields.vector;
irqe.dest_mode = entry->fields.dest_mode;
irqe.trig_mode = entry->fields.trig_mode;
irqe.delivery_mode = entry->fields.delivery_mode << 8;
irqe.level = 1;
irqe.shorthand = 0;
#ifdef CONFIG_X86
/* Always delivery PIT interrupt to vcpu 0 */
if (irq == 0) {
irqe.dest_mode = 0; /* Physical mode. */
/* need to read apic_id from apic regiest since
* it can be rewritten */
irqe.dest_id = ioapic->kvm->bsp_vcpu->vcpu_id;
}
#endif
return kvm_irq_delivery_to_apic(ioapic->kvm, NULL, &irqe);
}
int kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int level)
{
u32 old_irr;
u32 mask = 1 << irq;
union kvm_ioapic_redirect_entry entry;
int ret = 1;
spin_lock(&ioapic->lock);
old_irr = ioapic->irr;
if (irq >= 0 && irq < IOAPIC_NUM_PINS) {
entry = ioapic->redirtbl[irq];
level ^= entry.fields.polarity;
if (!level)
ioapic->irr &= ~mask;
else {
int edge = (entry.fields.trig_mode == IOAPIC_EDGE_TRIG);
ioapic->irr |= mask;
if ((edge && old_irr != ioapic->irr) ||
(!edge && !entry.fields.remote_irr))
ret = ioapic_service(ioapic, irq);
else
ret = 0; /* report coalesced interrupt */
}
trace_kvm_ioapic_set_irq(entry.bits, irq, ret == 0);
}
spin_unlock(&ioapic->lock);
return ret;
}
static void __kvm_ioapic_update_eoi(struct kvm_ioapic *ioapic, int vector,
int trigger_mode)
{
int i;
for (i = 0; i < IOAPIC_NUM_PINS; i++) {
union kvm_ioapic_redirect_entry *ent = &ioapic->redirtbl[i];
if (ent->fields.vector != vector)
continue;
/*
* We are dropping lock while calling ack notifiers because ack
* notifier callbacks for assigned devices call into IOAPIC
* recursively. Since remote_irr is cleared only after call
* to notifiers if the same vector will be delivered while lock
* is dropped it will be put into irr and will be delivered
* after ack notifier returns.
*/
spin_unlock(&ioapic->lock);
kvm_notify_acked_irq(ioapic->kvm, KVM_IRQCHIP_IOAPIC, i);
spin_lock(&ioapic->lock);
if (trigger_mode != IOAPIC_LEVEL_TRIG)
continue;
ASSERT(ent->fields.trig_mode == IOAPIC_LEVEL_TRIG);
ent->fields.remote_irr = 0;
if (!ent->fields.mask && (ioapic->irr & (1 << i)))
ioapic_service(ioapic, i);
}
}
void kvm_ioapic_update_eoi(struct kvm *kvm, int vector, int trigger_mode)
{
struct kvm_ioapic *ioapic = kvm->arch.vioapic;
smp_rmb();
if (!test_bit(vector, ioapic->handled_vectors))
return;
spin_lock(&ioapic->lock);
__kvm_ioapic_update_eoi(ioapic, vector, trigger_mode);
spin_unlock(&ioapic->lock);
}
static inline struct kvm_ioapic *to_ioapic(struct kvm_io_device *dev)
{
return container_of(dev, struct kvm_ioapic, dev);
}
static inline int ioapic_in_range(struct kvm_ioapic *ioapic, gpa_t addr)
{
return ((addr >= ioapic->base_address &&
(addr < ioapic->base_address + IOAPIC_MEM_LENGTH)));
}
static int ioapic_mmio_read(struct kvm_io_device *this, gpa_t addr, int len,
void *val)
{
struct kvm_ioapic *ioapic = to_ioapic(this);
u32 result;
if (!ioapic_in_range(ioapic, addr))
return -EOPNOTSUPP;
ioapic_debug("addr %lx\n", (unsigned long)addr);
ASSERT(!(addr & 0xf)); /* check alignment */
addr &= 0xff;
spin_lock(&ioapic->lock);
switch (addr) {
case IOAPIC_REG_SELECT:
result = ioapic->ioregsel;
break;
case IOAPIC_REG_WINDOW:
result = ioapic_read_indirect(ioapic, addr, len);
break;
default:
result = 0;
break;
}
spin_unlock(&ioapic->lock);
switch (len) {
case 8:
*(u64 *) val = result;
break;
case 1:
case 2:
case 4:
memcpy(val, (char *)&result, len);
break;
default:
printk(KERN_WARNING "ioapic: wrong length %d\n", len);
}
return 0;
}
static int ioapic_mmio_write(struct kvm_io_device *this, gpa_t addr, int len,
const void *val)
{
struct kvm_ioapic *ioapic = to_ioapic(this);
u32 data;
if (!ioapic_in_range(ioapic, addr))
return -EOPNOTSUPP;
ioapic_debug("ioapic_mmio_write addr=%p len=%d val=%p\n",
(void*)addr, len, val);
ASSERT(!(addr & 0xf)); /* check alignment */
if (len == 4 || len == 8)
data = *(u32 *) val;
else {
printk(KERN_WARNING "ioapic: Unsupported size %d\n", len);
return 0;
}
addr &= 0xff;
spin_lock(&ioapic->lock);
switch (addr) {
case IOAPIC_REG_SELECT:
ioapic->ioregsel = data;
break;
case IOAPIC_REG_WINDOW:
ioapic_write_indirect(ioapic, data);
break;
#ifdef CONFIG_IA64
case IOAPIC_REG_EOI:
__kvm_ioapic_update_eoi(ioapic, data, IOAPIC_LEVEL_TRIG);
break;
#endif
default:
break;
}
spin_unlock(&ioapic->lock);
return 0;
}
void kvm_ioapic_reset(struct kvm_ioapic *ioapic)
{
int i;
for (i = 0; i < IOAPIC_NUM_PINS; i++)
ioapic->redirtbl[i].fields.mask = 1;
ioapic->base_address = IOAPIC_DEFAULT_BASE_ADDRESS;
ioapic->ioregsel = 0;
ioapic->irr = 0;
ioapic->id = 0;
update_handled_vectors(ioapic);
}
static const struct kvm_io_device_ops ioapic_mmio_ops = {
.read = ioapic_mmio_read,
.write = ioapic_mmio_write,
};
int kvm_ioapic_init(struct kvm *kvm)
{
struct kvm_ioapic *ioapic;
int ret;
ioapic = kzalloc(sizeof(struct kvm_ioapic), GFP_KERNEL);
if (!ioapic)
return -ENOMEM;
spin_lock_init(&ioapic->lock);
kvm->arch.vioapic = ioapic;
kvm_ioapic_reset(ioapic);
kvm_iodevice_init(&ioapic->dev, &ioapic_mmio_ops);
ioapic->kvm = kvm;
mutex_lock(&kvm->slots_lock);
ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, &ioapic->dev);
mutex_unlock(&kvm->slots_lock);
if (ret < 0) {
kvm->arch.vioapic = NULL;
kfree(ioapic);
}
return ret;
}
void kvm_ioapic_destroy(struct kvm *kvm)
{
struct kvm_ioapic *ioapic = kvm->arch.vioapic;
if (ioapic) {
kvm_io_bus_unregister_dev(kvm, KVM_MMIO_BUS, &ioapic->dev);
kvm->arch.vioapic = NULL;
kfree(ioapic);
}
}
int kvm_get_ioapic(struct kvm *kvm, struct kvm_ioapic_state *state)
{
struct kvm_ioapic *ioapic = ioapic_irqchip(kvm);
if (!ioapic)
return -EINVAL;
spin_lock(&ioapic->lock);
memcpy(state, ioapic, sizeof(struct kvm_ioapic_state));
spin_unlock(&ioapic->lock);
return 0;
}
int kvm_set_ioapic(struct kvm *kvm, struct kvm_ioapic_state *state)
{
struct kvm_ioapic *ioapic = ioapic_irqchip(kvm);
if (!ioapic)
return -EINVAL;
spin_lock(&ioapic->lock);
memcpy(ioapic, state, sizeof(struct kvm_ioapic_state));
update_handled_vectors(ioapic);
spin_unlock(&ioapic->lock);
return 0;
}
| gpl-2.0 |
EPDCenter/android_kernel_bq_dc_v1 | kernel/auditsc.c | 2165 | 68068 | /* auditsc.c -- System-call auditing support
* Handles all system-call specific auditing features.
*
* Copyright 2003-2004 Red Hat Inc., Durham, North Carolina.
* Copyright 2005 Hewlett-Packard Development Company, L.P.
* Copyright (C) 2005, 2006 IBM Corporation
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
* Written by Rickard E. (Rik) Faith <faith@redhat.com>
*
* Many of the ideas implemented here are from Stephen C. Tweedie,
* especially the idea of avoiding a copy by using getname.
*
* The method for actual interception of syscall entry and exit (not in
* this file -- see entry.S) is based on a GPL'd patch written by
* okir@suse.de and Copyright 2003 SuSE Linux AG.
*
* POSIX message queue support added by George Wilson <ltcgcw@us.ibm.com>,
* 2006.
*
* The support of additional filter rules compares (>, <, >=, <=) was
* added by Dustin Kirkland <dustin.kirkland@us.ibm.com>, 2005.
*
* Modified by Amy Griffis <amy.griffis@hp.com> to collect additional
* filesystem information.
*
* Subject and object context labeling support added by <danjones@us.ibm.com>
* and <dustin.kirkland@us.ibm.com> for LSPP certification compliance.
*/
#include <linux/init.h>
#include <asm/types.h>
#include <asm/atomic.h>
#include <linux/fs.h>
#include <linux/namei.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/mount.h>
#include <linux/socket.h>
#include <linux/mqueue.h>
#include <linux/audit.h>
#include <linux/personality.h>
#include <linux/time.h>
#include <linux/netlink.h>
#include <linux/compiler.h>
#include <asm/unistd.h>
#include <linux/security.h>
#include <linux/list.h>
#include <linux/tty.h>
#include <linux/binfmts.h>
#include <linux/highmem.h>
#include <linux/syscalls.h>
#include <linux/capability.h>
#include <linux/fs_struct.h>
#include "audit.h"
/* AUDIT_NAMES is the number of slots we reserve in the audit_context
* for saving names from getname(). */
#define AUDIT_NAMES 20
/* Indicates that audit should log the full pathname. */
#define AUDIT_NAME_FULL -1
/* no execve audit message should be longer than this (userspace limits) */
#define MAX_EXECVE_AUDIT_LEN 7500
/* number of audit rules */
int audit_n_rules;
/* determines whether we collect data for signals sent */
int audit_signals;
struct audit_cap_data {
kernel_cap_t permitted;
kernel_cap_t inheritable;
union {
unsigned int fE; /* effective bit of a file capability */
kernel_cap_t effective; /* effective set of a process */
};
};
/* When fs/namei.c:getname() is called, we store the pointer in name and
* we don't let putname() free it (instead we free all of the saved
* pointers at syscall exit time).
*
* Further, in fs/namei.c:path_lookup() we store the inode and device. */
struct audit_names {
const char *name;
int name_len; /* number of name's characters to log */
unsigned name_put; /* call __putname() for this name */
unsigned long ino;
dev_t dev;
umode_t mode;
uid_t uid;
gid_t gid;
dev_t rdev;
u32 osid;
struct audit_cap_data fcap;
unsigned int fcap_ver;
};
struct audit_aux_data {
struct audit_aux_data *next;
int type;
};
#define AUDIT_AUX_IPCPERM 0
/* Number of target pids per aux struct. */
#define AUDIT_AUX_PIDS 16
struct audit_aux_data_execve {
struct audit_aux_data d;
int argc;
int envc;
struct mm_struct *mm;
};
struct audit_aux_data_pids {
struct audit_aux_data d;
pid_t target_pid[AUDIT_AUX_PIDS];
uid_t target_auid[AUDIT_AUX_PIDS];
uid_t target_uid[AUDIT_AUX_PIDS];
unsigned int target_sessionid[AUDIT_AUX_PIDS];
u32 target_sid[AUDIT_AUX_PIDS];
char target_comm[AUDIT_AUX_PIDS][TASK_COMM_LEN];
int pid_count;
};
struct audit_aux_data_bprm_fcaps {
struct audit_aux_data d;
struct audit_cap_data fcap;
unsigned int fcap_ver;
struct audit_cap_data old_pcap;
struct audit_cap_data new_pcap;
};
struct audit_aux_data_capset {
struct audit_aux_data d;
pid_t pid;
struct audit_cap_data cap;
};
struct audit_tree_refs {
struct audit_tree_refs *next;
struct audit_chunk *c[31];
};
/* The per-task audit context. */
struct audit_context {
int dummy; /* must be the first element */
int in_syscall; /* 1 if task is in a syscall */
enum audit_state state, current_state;
unsigned int serial; /* serial number for record */
int major; /* syscall number */
struct timespec ctime; /* time of syscall entry */
unsigned long argv[4]; /* syscall arguments */
long return_code;/* syscall return code */
u64 prio;
int return_valid; /* return code is valid */
int name_count;
struct audit_names names[AUDIT_NAMES];
char * filterkey; /* key for rule that triggered record */
struct path pwd;
struct audit_context *previous; /* For nested syscalls */
struct audit_aux_data *aux;
struct audit_aux_data *aux_pids;
struct sockaddr_storage *sockaddr;
size_t sockaddr_len;
/* Save things to print about task_struct */
pid_t pid, ppid;
uid_t uid, euid, suid, fsuid;
gid_t gid, egid, sgid, fsgid;
unsigned long personality;
int arch;
pid_t target_pid;
uid_t target_auid;
uid_t target_uid;
unsigned int target_sessionid;
u32 target_sid;
char target_comm[TASK_COMM_LEN];
struct audit_tree_refs *trees, *first_trees;
struct list_head killed_trees;
int tree_count;
int type;
union {
struct {
int nargs;
long args[6];
} socketcall;
struct {
uid_t uid;
gid_t gid;
mode_t mode;
u32 osid;
int has_perm;
uid_t perm_uid;
gid_t perm_gid;
mode_t perm_mode;
unsigned long qbytes;
} ipc;
struct {
mqd_t mqdes;
struct mq_attr mqstat;
} mq_getsetattr;
struct {
mqd_t mqdes;
int sigev_signo;
} mq_notify;
struct {
mqd_t mqdes;
size_t msg_len;
unsigned int msg_prio;
struct timespec abs_timeout;
} mq_sendrecv;
struct {
int oflag;
mode_t mode;
struct mq_attr attr;
} mq_open;
struct {
pid_t pid;
struct audit_cap_data cap;
} capset;
struct {
int fd;
int flags;
} mmap;
};
int fds[2];
#if AUDIT_DEBUG
int put_count;
int ino_count;
#endif
};
static inline int open_arg(int flags, int mask)
{
int n = ACC_MODE(flags);
if (flags & (O_TRUNC | O_CREAT))
n |= AUDIT_PERM_WRITE;
return n & mask;
}
static int audit_match_perm(struct audit_context *ctx, int mask)
{
unsigned n;
if (unlikely(!ctx))
return 0;
n = ctx->major;
switch (audit_classify_syscall(ctx->arch, n)) {
case 0: /* native */
if ((mask & AUDIT_PERM_WRITE) &&
audit_match_class(AUDIT_CLASS_WRITE, n))
return 1;
if ((mask & AUDIT_PERM_READ) &&
audit_match_class(AUDIT_CLASS_READ, n))
return 1;
if ((mask & AUDIT_PERM_ATTR) &&
audit_match_class(AUDIT_CLASS_CHATTR, n))
return 1;
return 0;
case 1: /* 32bit on biarch */
if ((mask & AUDIT_PERM_WRITE) &&
audit_match_class(AUDIT_CLASS_WRITE_32, n))
return 1;
if ((mask & AUDIT_PERM_READ) &&
audit_match_class(AUDIT_CLASS_READ_32, n))
return 1;
if ((mask & AUDIT_PERM_ATTR) &&
audit_match_class(AUDIT_CLASS_CHATTR_32, n))
return 1;
return 0;
case 2: /* open */
return mask & ACC_MODE(ctx->argv[1]);
case 3: /* openat */
return mask & ACC_MODE(ctx->argv[2]);
case 4: /* socketcall */
return ((mask & AUDIT_PERM_WRITE) && ctx->argv[0] == SYS_BIND);
case 5: /* execve */
return mask & AUDIT_PERM_EXEC;
default:
return 0;
}
}
static int audit_match_filetype(struct audit_context *ctx, int which)
{
unsigned index = which & ~S_IFMT;
mode_t mode = which & S_IFMT;
if (unlikely(!ctx))
return 0;
if (index >= ctx->name_count)
return 0;
if (ctx->names[index].ino == -1)
return 0;
if ((ctx->names[index].mode ^ mode) & S_IFMT)
return 0;
return 1;
}
/*
* We keep a linked list of fixed-sized (31 pointer) arrays of audit_chunk *;
* ->first_trees points to its beginning, ->trees - to the current end of data.
* ->tree_count is the number of free entries in array pointed to by ->trees.
* Original condition is (NULL, NULL, 0); as soon as it grows we never revert to NULL,
* "empty" becomes (p, p, 31) afterwards. We don't shrink the list (and seriously,
* it's going to remain 1-element for almost any setup) until we free context itself.
* References in it _are_ dropped - at the same time we free/drop aux stuff.
*/
#ifdef CONFIG_AUDIT_TREE
static void audit_set_auditable(struct audit_context *ctx)
{
if (!ctx->prio) {
ctx->prio = 1;
ctx->current_state = AUDIT_RECORD_CONTEXT;
}
}
static int put_tree_ref(struct audit_context *ctx, struct audit_chunk *chunk)
{
struct audit_tree_refs *p = ctx->trees;
int left = ctx->tree_count;
if (likely(left)) {
p->c[--left] = chunk;
ctx->tree_count = left;
return 1;
}
if (!p)
return 0;
p = p->next;
if (p) {
p->c[30] = chunk;
ctx->trees = p;
ctx->tree_count = 30;
return 1;
}
return 0;
}
static int grow_tree_refs(struct audit_context *ctx)
{
struct audit_tree_refs *p = ctx->trees;
ctx->trees = kzalloc(sizeof(struct audit_tree_refs), GFP_KERNEL);
if (!ctx->trees) {
ctx->trees = p;
return 0;
}
if (p)
p->next = ctx->trees;
else
ctx->first_trees = ctx->trees;
ctx->tree_count = 31;
return 1;
}
#endif
static void unroll_tree_refs(struct audit_context *ctx,
struct audit_tree_refs *p, int count)
{
#ifdef CONFIG_AUDIT_TREE
struct audit_tree_refs *q;
int n;
if (!p) {
/* we started with empty chain */
p = ctx->first_trees;
count = 31;
/* if the very first allocation has failed, nothing to do */
if (!p)
return;
}
n = count;
for (q = p; q != ctx->trees; q = q->next, n = 31) {
while (n--) {
audit_put_chunk(q->c[n]);
q->c[n] = NULL;
}
}
while (n-- > ctx->tree_count) {
audit_put_chunk(q->c[n]);
q->c[n] = NULL;
}
ctx->trees = p;
ctx->tree_count = count;
#endif
}
static void free_tree_refs(struct audit_context *ctx)
{
struct audit_tree_refs *p, *q;
for (p = ctx->first_trees; p; p = q) {
q = p->next;
kfree(p);
}
}
static int match_tree_refs(struct audit_context *ctx, struct audit_tree *tree)
{
#ifdef CONFIG_AUDIT_TREE
struct audit_tree_refs *p;
int n;
if (!tree)
return 0;
/* full ones */
for (p = ctx->first_trees; p != ctx->trees; p = p->next) {
for (n = 0; n < 31; n++)
if (audit_tree_match(p->c[n], tree))
return 1;
}
/* partial */
if (p) {
for (n = ctx->tree_count; n < 31; n++)
if (audit_tree_match(p->c[n], tree))
return 1;
}
#endif
return 0;
}
/* Determine if any context name data matches a rule's watch data */
/* Compare a task_struct with an audit_rule. Return 1 on match, 0
* otherwise.
*
* If task_creation is true, this is an explicit indication that we are
* filtering a task rule at task creation time. This and tsk == current are
* the only situations where tsk->cred may be accessed without an rcu read lock.
*/
static int audit_filter_rules(struct task_struct *tsk,
struct audit_krule *rule,
struct audit_context *ctx,
struct audit_names *name,
enum audit_state *state,
bool task_creation)
{
const struct cred *cred;
int i, j, need_sid = 1;
u32 sid;
cred = rcu_dereference_check(tsk->cred, tsk == current || task_creation);
for (i = 0; i < rule->field_count; i++) {
struct audit_field *f = &rule->fields[i];
int result = 0;
switch (f->type) {
case AUDIT_PID:
result = audit_comparator(tsk->pid, f->op, f->val);
break;
case AUDIT_PPID:
if (ctx) {
if (!ctx->ppid)
ctx->ppid = sys_getppid();
result = audit_comparator(ctx->ppid, f->op, f->val);
}
break;
case AUDIT_UID:
result = audit_comparator(cred->uid, f->op, f->val);
break;
case AUDIT_EUID:
result = audit_comparator(cred->euid, f->op, f->val);
break;
case AUDIT_SUID:
result = audit_comparator(cred->suid, f->op, f->val);
break;
case AUDIT_FSUID:
result = audit_comparator(cred->fsuid, f->op, f->val);
break;
case AUDIT_GID:
result = audit_comparator(cred->gid, f->op, f->val);
break;
case AUDIT_EGID:
result = audit_comparator(cred->egid, f->op, f->val);
break;
case AUDIT_SGID:
result = audit_comparator(cred->sgid, f->op, f->val);
break;
case AUDIT_FSGID:
result = audit_comparator(cred->fsgid, f->op, f->val);
break;
case AUDIT_PERS:
result = audit_comparator(tsk->personality, f->op, f->val);
break;
case AUDIT_ARCH:
if (ctx)
result = audit_comparator(ctx->arch, f->op, f->val);
break;
case AUDIT_EXIT:
if (ctx && ctx->return_valid)
result = audit_comparator(ctx->return_code, f->op, f->val);
break;
case AUDIT_SUCCESS:
if (ctx && ctx->return_valid) {
if (f->val)
result = audit_comparator(ctx->return_valid, f->op, AUDITSC_SUCCESS);
else
result = audit_comparator(ctx->return_valid, f->op, AUDITSC_FAILURE);
}
break;
case AUDIT_DEVMAJOR:
if (name)
result = audit_comparator(MAJOR(name->dev),
f->op, f->val);
else if (ctx) {
for (j = 0; j < ctx->name_count; j++) {
if (audit_comparator(MAJOR(ctx->names[j].dev), f->op, f->val)) {
++result;
break;
}
}
}
break;
case AUDIT_DEVMINOR:
if (name)
result = audit_comparator(MINOR(name->dev),
f->op, f->val);
else if (ctx) {
for (j = 0; j < ctx->name_count; j++) {
if (audit_comparator(MINOR(ctx->names[j].dev), f->op, f->val)) {
++result;
break;
}
}
}
break;
case AUDIT_INODE:
if (name)
result = (name->ino == f->val);
else if (ctx) {
for (j = 0; j < ctx->name_count; j++) {
if (audit_comparator(ctx->names[j].ino, f->op, f->val)) {
++result;
break;
}
}
}
break;
case AUDIT_WATCH:
if (name)
result = audit_watch_compare(rule->watch, name->ino, name->dev);
break;
case AUDIT_DIR:
if (ctx)
result = match_tree_refs(ctx, rule->tree);
break;
case AUDIT_LOGINUID:
result = 0;
if (ctx)
result = audit_comparator(tsk->loginuid, f->op, f->val);
break;
case AUDIT_SUBJ_USER:
case AUDIT_SUBJ_ROLE:
case AUDIT_SUBJ_TYPE:
case AUDIT_SUBJ_SEN:
case AUDIT_SUBJ_CLR:
/* NOTE: this may return negative values indicating
a temporary error. We simply treat this as a
match for now to avoid losing information that
may be wanted. An error message will also be
logged upon error */
if (f->lsm_rule) {
if (need_sid) {
security_task_getsecid(tsk, &sid);
need_sid = 0;
}
result = security_audit_rule_match(sid, f->type,
f->op,
f->lsm_rule,
ctx);
}
break;
case AUDIT_OBJ_USER:
case AUDIT_OBJ_ROLE:
case AUDIT_OBJ_TYPE:
case AUDIT_OBJ_LEV_LOW:
case AUDIT_OBJ_LEV_HIGH:
/* The above note for AUDIT_SUBJ_USER...AUDIT_SUBJ_CLR
also applies here */
if (f->lsm_rule) {
/* Find files that match */
if (name) {
result = security_audit_rule_match(
name->osid, f->type, f->op,
f->lsm_rule, ctx);
} else if (ctx) {
for (j = 0; j < ctx->name_count; j++) {
if (security_audit_rule_match(
ctx->names[j].osid,
f->type, f->op,
f->lsm_rule, ctx)) {
++result;
break;
}
}
}
/* Find ipc objects that match */
if (!ctx || ctx->type != AUDIT_IPC)
break;
if (security_audit_rule_match(ctx->ipc.osid,
f->type, f->op,
f->lsm_rule, ctx))
++result;
}
break;
case AUDIT_ARG0:
case AUDIT_ARG1:
case AUDIT_ARG2:
case AUDIT_ARG3:
if (ctx)
result = audit_comparator(ctx->argv[f->type-AUDIT_ARG0], f->op, f->val);
break;
case AUDIT_FILTERKEY:
/* ignore this field for filtering */
result = 1;
break;
case AUDIT_PERM:
result = audit_match_perm(ctx, f->val);
break;
case AUDIT_FILETYPE:
result = audit_match_filetype(ctx, f->val);
break;
}
if (!result)
return 0;
}
if (ctx) {
if (rule->prio <= ctx->prio)
return 0;
if (rule->filterkey) {
kfree(ctx->filterkey);
ctx->filterkey = kstrdup(rule->filterkey, GFP_ATOMIC);
}
ctx->prio = rule->prio;
}
switch (rule->action) {
case AUDIT_NEVER: *state = AUDIT_DISABLED; break;
case AUDIT_ALWAYS: *state = AUDIT_RECORD_CONTEXT; break;
}
return 1;
}
/* At process creation time, we can determine if system-call auditing is
* completely disabled for this task. Since we only have the task
* structure at this point, we can only check uid and gid.
*/
static enum audit_state audit_filter_task(struct task_struct *tsk, char **key)
{
struct audit_entry *e;
enum audit_state state;
rcu_read_lock();
list_for_each_entry_rcu(e, &audit_filter_list[AUDIT_FILTER_TASK], list) {
if (audit_filter_rules(tsk, &e->rule, NULL, NULL,
&state, true)) {
if (state == AUDIT_RECORD_CONTEXT)
*key = kstrdup(e->rule.filterkey, GFP_ATOMIC);
rcu_read_unlock();
return state;
}
}
rcu_read_unlock();
return AUDIT_BUILD_CONTEXT;
}
/* At syscall entry and exit time, this filter is called if the
* audit_state is not low enough that auditing cannot take place, but is
* also not high enough that we already know we have to write an audit
* record (i.e., the state is AUDIT_SETUP_CONTEXT or AUDIT_BUILD_CONTEXT).
*/
static enum audit_state audit_filter_syscall(struct task_struct *tsk,
struct audit_context *ctx,
struct list_head *list)
{
struct audit_entry *e;
enum audit_state state;
if (audit_pid && tsk->tgid == audit_pid)
return AUDIT_DISABLED;
rcu_read_lock();
if (!list_empty(list)) {
int word = AUDIT_WORD(ctx->major);
int bit = AUDIT_BIT(ctx->major);
list_for_each_entry_rcu(e, list, list) {
if ((e->rule.mask[word] & bit) == bit &&
audit_filter_rules(tsk, &e->rule, ctx, NULL,
&state, false)) {
rcu_read_unlock();
ctx->current_state = state;
return state;
}
}
}
rcu_read_unlock();
return AUDIT_BUILD_CONTEXT;
}
/* At syscall exit time, this filter is called if any audit_names[] have been
* collected during syscall processing. We only check rules in sublists at hash
* buckets applicable to the inode numbers in audit_names[].
* Regarding audit_state, same rules apply as for audit_filter_syscall().
*/
void audit_filter_inodes(struct task_struct *tsk, struct audit_context *ctx)
{
int i;
struct audit_entry *e;
enum audit_state state;
if (audit_pid && tsk->tgid == audit_pid)
return;
rcu_read_lock();
for (i = 0; i < ctx->name_count; i++) {
int word = AUDIT_WORD(ctx->major);
int bit = AUDIT_BIT(ctx->major);
struct audit_names *n = &ctx->names[i];
int h = audit_hash_ino((u32)n->ino);
struct list_head *list = &audit_inode_hash[h];
if (list_empty(list))
continue;
list_for_each_entry_rcu(e, list, list) {
if ((e->rule.mask[word] & bit) == bit &&
audit_filter_rules(tsk, &e->rule, ctx, n,
&state, false)) {
rcu_read_unlock();
ctx->current_state = state;
return;
}
}
}
rcu_read_unlock();
}
static inline struct audit_context *audit_get_context(struct task_struct *tsk,
int return_valid,
long return_code)
{
struct audit_context *context = tsk->audit_context;
if (likely(!context))
return NULL;
context->return_valid = return_valid;
/*
* we need to fix up the return code in the audit logs if the actual
* return codes are later going to be fixed up by the arch specific
* signal handlers
*
* This is actually a test for:
* (rc == ERESTARTSYS ) || (rc == ERESTARTNOINTR) ||
* (rc == ERESTARTNOHAND) || (rc == ERESTART_RESTARTBLOCK)
*
* but is faster than a bunch of ||
*/
if (unlikely(return_code <= -ERESTARTSYS) &&
(return_code >= -ERESTART_RESTARTBLOCK) &&
(return_code != -ENOIOCTLCMD))
context->return_code = -EINTR;
else
context->return_code = return_code;
if (context->in_syscall && !context->dummy) {
audit_filter_syscall(tsk, context, &audit_filter_list[AUDIT_FILTER_EXIT]);
audit_filter_inodes(tsk, context);
}
tsk->audit_context = NULL;
return context;
}
static inline void audit_free_names(struct audit_context *context)
{
int i;
#if AUDIT_DEBUG == 2
if (context->put_count + context->ino_count != context->name_count) {
printk(KERN_ERR "%s:%d(:%d): major=%d in_syscall=%d"
" name_count=%d put_count=%d"
" ino_count=%d [NOT freeing]\n",
__FILE__, __LINE__,
context->serial, context->major, context->in_syscall,
context->name_count, context->put_count,
context->ino_count);
for (i = 0; i < context->name_count; i++) {
printk(KERN_ERR "names[%d] = %p = %s\n", i,
context->names[i].name,
context->names[i].name ?: "(null)");
}
dump_stack();
return;
}
#endif
#if AUDIT_DEBUG
context->put_count = 0;
context->ino_count = 0;
#endif
for (i = 0; i < context->name_count; i++) {
if (context->names[i].name && context->names[i].name_put)
__putname(context->names[i].name);
}
context->name_count = 0;
path_put(&context->pwd);
context->pwd.dentry = NULL;
context->pwd.mnt = NULL;
}
static inline void audit_free_aux(struct audit_context *context)
{
struct audit_aux_data *aux;
while ((aux = context->aux)) {
context->aux = aux->next;
kfree(aux);
}
while ((aux = context->aux_pids)) {
context->aux_pids = aux->next;
kfree(aux);
}
}
static inline void audit_zero_context(struct audit_context *context,
enum audit_state state)
{
memset(context, 0, sizeof(*context));
context->state = state;
context->prio = state == AUDIT_RECORD_CONTEXT ? ~0ULL : 0;
}
static inline struct audit_context *audit_alloc_context(enum audit_state state)
{
struct audit_context *context;
if (!(context = kmalloc(sizeof(*context), GFP_KERNEL)))
return NULL;
audit_zero_context(context, state);
INIT_LIST_HEAD(&context->killed_trees);
return context;
}
/**
* audit_alloc - allocate an audit context block for a task
* @tsk: task
*
* Filter on the task information and allocate a per-task audit context
* if necessary. Doing so turns on system call auditing for the
* specified task. This is called from copy_process, so no lock is
* needed.
*/
int audit_alloc(struct task_struct *tsk)
{
struct audit_context *context;
enum audit_state state;
char *key = NULL;
if (likely(!audit_ever_enabled))
return 0; /* Return if not auditing. */
state = audit_filter_task(tsk, &key);
if (likely(state == AUDIT_DISABLED))
return 0;
if (!(context = audit_alloc_context(state))) {
kfree(key);
audit_log_lost("out of memory in audit_alloc");
return -ENOMEM;
}
context->filterkey = key;
tsk->audit_context = context;
set_tsk_thread_flag(tsk, TIF_SYSCALL_AUDIT);
return 0;
}
static inline void audit_free_context(struct audit_context *context)
{
struct audit_context *previous;
int count = 0;
do {
previous = context->previous;
if (previous || (count && count < 10)) {
++count;
printk(KERN_ERR "audit(:%d): major=%d name_count=%d:"
" freeing multiple contexts (%d)\n",
context->serial, context->major,
context->name_count, count);
}
audit_free_names(context);
unroll_tree_refs(context, NULL, 0);
free_tree_refs(context);
audit_free_aux(context);
kfree(context->filterkey);
kfree(context->sockaddr);
kfree(context);
context = previous;
} while (context);
if (count >= 10)
printk(KERN_ERR "audit: freed %d contexts\n", count);
}
void audit_log_task_context(struct audit_buffer *ab)
{
char *ctx = NULL;
unsigned len;
int error;
u32 sid;
security_task_getsecid(current, &sid);
if (!sid)
return;
error = security_secid_to_secctx(sid, &ctx, &len);
if (error) {
if (error != -EINVAL)
goto error_path;
return;
}
audit_log_format(ab, " subj=%s", ctx);
security_release_secctx(ctx, len);
return;
error_path:
audit_panic("error in audit_log_task_context");
return;
}
EXPORT_SYMBOL(audit_log_task_context);
static void audit_log_task_info(struct audit_buffer *ab, struct task_struct *tsk)
{
char name[sizeof(tsk->comm)];
struct mm_struct *mm = tsk->mm;
struct vm_area_struct *vma;
/* tsk == current */
get_task_comm(name, tsk);
audit_log_format(ab, " comm=");
audit_log_untrustedstring(ab, name);
if (mm) {
down_read(&mm->mmap_sem);
vma = mm->mmap;
while (vma) {
if ((vma->vm_flags & VM_EXECUTABLE) &&
vma->vm_file) {
audit_log_d_path(ab, "exe=",
&vma->vm_file->f_path);
break;
}
vma = vma->vm_next;
}
up_read(&mm->mmap_sem);
}
audit_log_task_context(ab);
}
static int audit_log_pid_context(struct audit_context *context, pid_t pid,
uid_t auid, uid_t uid, unsigned int sessionid,
u32 sid, char *comm)
{
struct audit_buffer *ab;
char *ctx = NULL;
u32 len;
int rc = 0;
ab = audit_log_start(context, GFP_KERNEL, AUDIT_OBJ_PID);
if (!ab)
return rc;
audit_log_format(ab, "opid=%d oauid=%d ouid=%d oses=%d", pid, auid,
uid, sessionid);
if (security_secid_to_secctx(sid, &ctx, &len)) {
audit_log_format(ab, " obj=(none)");
rc = 1;
} else {
audit_log_format(ab, " obj=%s", ctx);
security_release_secctx(ctx, len);
}
audit_log_format(ab, " ocomm=");
audit_log_untrustedstring(ab, comm);
audit_log_end(ab);
return rc;
}
/*
* to_send and len_sent accounting are very loose estimates. We aren't
* really worried about a hard cap to MAX_EXECVE_AUDIT_LEN so much as being
* within about 500 bytes (next page boundary)
*
* why snprintf? an int is up to 12 digits long. if we just assumed when
* logging that a[%d]= was going to be 16 characters long we would be wasting
* space in every audit message. In one 7500 byte message we can log up to
* about 1000 min size arguments. That comes down to about 50% waste of space
* if we didn't do the snprintf to find out how long arg_num_len was.
*/
static int audit_log_single_execve_arg(struct audit_context *context,
struct audit_buffer **ab,
int arg_num,
size_t *len_sent,
const char __user *p,
char *buf)
{
char arg_num_len_buf[12];
const char __user *tmp_p = p;
/* how many digits are in arg_num? 5 is the length of ' a=""' */
size_t arg_num_len = snprintf(arg_num_len_buf, 12, "%d", arg_num) + 5;
size_t len, len_left, to_send;
size_t max_execve_audit_len = MAX_EXECVE_AUDIT_LEN;
unsigned int i, has_cntl = 0, too_long = 0;
int ret;
/* strnlen_user includes the null we don't want to send */
len_left = len = strnlen_user(p, MAX_ARG_STRLEN) - 1;
/*
* We just created this mm, if we can't find the strings
* we just copied into it something is _very_ wrong. Similar
* for strings that are too long, we should not have created
* any.
*/
if (unlikely((len == -1) || len > MAX_ARG_STRLEN - 1)) {
WARN_ON(1);
send_sig(SIGKILL, current, 0);
return -1;
}
/* walk the whole argument looking for non-ascii chars */
do {
if (len_left > MAX_EXECVE_AUDIT_LEN)
to_send = MAX_EXECVE_AUDIT_LEN;
else
to_send = len_left;
ret = copy_from_user(buf, tmp_p, to_send);
/*
* There is no reason for this copy to be short. We just
* copied them here, and the mm hasn't been exposed to user-
* space yet.
*/
if (ret) {
WARN_ON(1);
send_sig(SIGKILL, current, 0);
return -1;
}
buf[to_send] = '\0';
has_cntl = audit_string_contains_control(buf, to_send);
if (has_cntl) {
/*
* hex messages get logged as 2 bytes, so we can only
* send half as much in each message
*/
max_execve_audit_len = MAX_EXECVE_AUDIT_LEN / 2;
break;
}
len_left -= to_send;
tmp_p += to_send;
} while (len_left > 0);
len_left = len;
if (len > max_execve_audit_len)
too_long = 1;
/* rewalk the argument actually logging the message */
for (i = 0; len_left > 0; i++) {
int room_left;
if (len_left > max_execve_audit_len)
to_send = max_execve_audit_len;
else
to_send = len_left;
/* do we have space left to send this argument in this ab? */
room_left = MAX_EXECVE_AUDIT_LEN - arg_num_len - *len_sent;
if (has_cntl)
room_left -= (to_send * 2);
else
room_left -= to_send;
if (room_left < 0) {
*len_sent = 0;
audit_log_end(*ab);
*ab = audit_log_start(context, GFP_KERNEL, AUDIT_EXECVE);
if (!*ab)
return 0;
}
/*
* first record needs to say how long the original string was
* so we can be sure nothing was lost.
*/
if ((i == 0) && (too_long))
audit_log_format(*ab, " a%d_len=%zu", arg_num,
has_cntl ? 2*len : len);
/*
* normally arguments are small enough to fit and we already
* filled buf above when we checked for control characters
* so don't bother with another copy_from_user
*/
if (len >= max_execve_audit_len)
ret = copy_from_user(buf, p, to_send);
else
ret = 0;
if (ret) {
WARN_ON(1);
send_sig(SIGKILL, current, 0);
return -1;
}
buf[to_send] = '\0';
/* actually log it */
audit_log_format(*ab, " a%d", arg_num);
if (too_long)
audit_log_format(*ab, "[%d]", i);
audit_log_format(*ab, "=");
if (has_cntl)
audit_log_n_hex(*ab, buf, to_send);
else
audit_log_string(*ab, buf);
p += to_send;
len_left -= to_send;
*len_sent += arg_num_len;
if (has_cntl)
*len_sent += to_send * 2;
else
*len_sent += to_send;
}
/* include the null we didn't log */
return len + 1;
}
static void audit_log_execve_info(struct audit_context *context,
struct audit_buffer **ab,
struct audit_aux_data_execve *axi)
{
int i;
size_t len, len_sent = 0;
const char __user *p;
char *buf;
if (axi->mm != current->mm)
return; /* execve failed, no additional info */
p = (const char __user *)axi->mm->arg_start;
audit_log_format(*ab, "argc=%d", axi->argc);
/*
* we need some kernel buffer to hold the userspace args. Just
* allocate one big one rather than allocating one of the right size
* for every single argument inside audit_log_single_execve_arg()
* should be <8k allocation so should be pretty safe.
*/
buf = kmalloc(MAX_EXECVE_AUDIT_LEN + 1, GFP_KERNEL);
if (!buf) {
audit_panic("out of memory for argv string\n");
return;
}
for (i = 0; i < axi->argc; i++) {
len = audit_log_single_execve_arg(context, ab, i,
&len_sent, p, buf);
if (len <= 0)
break;
p += len;
}
kfree(buf);
}
static void audit_log_cap(struct audit_buffer *ab, char *prefix, kernel_cap_t *cap)
{
int i;
audit_log_format(ab, " %s=", prefix);
CAP_FOR_EACH_U32(i) {
audit_log_format(ab, "%08x", cap->cap[(_KERNEL_CAPABILITY_U32S-1) - i]);
}
}
static void audit_log_fcaps(struct audit_buffer *ab, struct audit_names *name)
{
kernel_cap_t *perm = &name->fcap.permitted;
kernel_cap_t *inh = &name->fcap.inheritable;
int log = 0;
if (!cap_isclear(*perm)) {
audit_log_cap(ab, "cap_fp", perm);
log = 1;
}
if (!cap_isclear(*inh)) {
audit_log_cap(ab, "cap_fi", inh);
log = 1;
}
if (log)
audit_log_format(ab, " cap_fe=%d cap_fver=%x", name->fcap.fE, name->fcap_ver);
}
static void show_special(struct audit_context *context, int *call_panic)
{
struct audit_buffer *ab;
int i;
ab = audit_log_start(context, GFP_KERNEL, context->type);
if (!ab)
return;
switch (context->type) {
case AUDIT_SOCKETCALL: {
int nargs = context->socketcall.nargs;
audit_log_format(ab, "nargs=%d", nargs);
for (i = 0; i < nargs; i++)
audit_log_format(ab, " a%d=%lx", i,
context->socketcall.args[i]);
break; }
case AUDIT_IPC: {
u32 osid = context->ipc.osid;
audit_log_format(ab, "ouid=%u ogid=%u mode=%#o",
context->ipc.uid, context->ipc.gid, context->ipc.mode);
if (osid) {
char *ctx = NULL;
u32 len;
if (security_secid_to_secctx(osid, &ctx, &len)) {
audit_log_format(ab, " osid=%u", osid);
*call_panic = 1;
} else {
audit_log_format(ab, " obj=%s", ctx);
security_release_secctx(ctx, len);
}
}
if (context->ipc.has_perm) {
audit_log_end(ab);
ab = audit_log_start(context, GFP_KERNEL,
AUDIT_IPC_SET_PERM);
audit_log_format(ab,
"qbytes=%lx ouid=%u ogid=%u mode=%#o",
context->ipc.qbytes,
context->ipc.perm_uid,
context->ipc.perm_gid,
context->ipc.perm_mode);
if (!ab)
return;
}
break; }
case AUDIT_MQ_OPEN: {
audit_log_format(ab,
"oflag=0x%x mode=%#o mq_flags=0x%lx mq_maxmsg=%ld "
"mq_msgsize=%ld mq_curmsgs=%ld",
context->mq_open.oflag, context->mq_open.mode,
context->mq_open.attr.mq_flags,
context->mq_open.attr.mq_maxmsg,
context->mq_open.attr.mq_msgsize,
context->mq_open.attr.mq_curmsgs);
break; }
case AUDIT_MQ_SENDRECV: {
audit_log_format(ab,
"mqdes=%d msg_len=%zd msg_prio=%u "
"abs_timeout_sec=%ld abs_timeout_nsec=%ld",
context->mq_sendrecv.mqdes,
context->mq_sendrecv.msg_len,
context->mq_sendrecv.msg_prio,
context->mq_sendrecv.abs_timeout.tv_sec,
context->mq_sendrecv.abs_timeout.tv_nsec);
break; }
case AUDIT_MQ_NOTIFY: {
audit_log_format(ab, "mqdes=%d sigev_signo=%d",
context->mq_notify.mqdes,
context->mq_notify.sigev_signo);
break; }
case AUDIT_MQ_GETSETATTR: {
struct mq_attr *attr = &context->mq_getsetattr.mqstat;
audit_log_format(ab,
"mqdes=%d mq_flags=0x%lx mq_maxmsg=%ld mq_msgsize=%ld "
"mq_curmsgs=%ld ",
context->mq_getsetattr.mqdes,
attr->mq_flags, attr->mq_maxmsg,
attr->mq_msgsize, attr->mq_curmsgs);
break; }
case AUDIT_CAPSET: {
audit_log_format(ab, "pid=%d", context->capset.pid);
audit_log_cap(ab, "cap_pi", &context->capset.cap.inheritable);
audit_log_cap(ab, "cap_pp", &context->capset.cap.permitted);
audit_log_cap(ab, "cap_pe", &context->capset.cap.effective);
break; }
case AUDIT_MMAP: {
audit_log_format(ab, "fd=%d flags=0x%x", context->mmap.fd,
context->mmap.flags);
break; }
}
audit_log_end(ab);
}
static void audit_log_exit(struct audit_context *context, struct task_struct *tsk)
{
const struct cred *cred;
int i, call_panic = 0;
struct audit_buffer *ab;
struct audit_aux_data *aux;
const char *tty;
/* tsk == current */
context->pid = tsk->pid;
if (!context->ppid)
context->ppid = sys_getppid();
cred = current_cred();
context->uid = cred->uid;
context->gid = cred->gid;
context->euid = cred->euid;
context->suid = cred->suid;
context->fsuid = cred->fsuid;
context->egid = cred->egid;
context->sgid = cred->sgid;
context->fsgid = cred->fsgid;
context->personality = tsk->personality;
ab = audit_log_start(context, GFP_KERNEL, AUDIT_SYSCALL);
if (!ab)
return; /* audit_panic has been called */
audit_log_format(ab, "arch=%x syscall=%d",
context->arch, context->major);
if (context->personality != PER_LINUX)
audit_log_format(ab, " per=%lx", context->personality);
if (context->return_valid)
audit_log_format(ab, " success=%s exit=%ld",
(context->return_valid==AUDITSC_SUCCESS)?"yes":"no",
context->return_code);
spin_lock_irq(&tsk->sighand->siglock);
if (tsk->signal && tsk->signal->tty && tsk->signal->tty->name)
tty = tsk->signal->tty->name;
else
tty = "(none)";
spin_unlock_irq(&tsk->sighand->siglock);
audit_log_format(ab,
" a0=%lx a1=%lx a2=%lx a3=%lx items=%d"
" ppid=%d pid=%d auid=%u uid=%u gid=%u"
" euid=%u suid=%u fsuid=%u"
" egid=%u sgid=%u fsgid=%u tty=%s ses=%u",
context->argv[0],
context->argv[1],
context->argv[2],
context->argv[3],
context->name_count,
context->ppid,
context->pid,
tsk->loginuid,
context->uid,
context->gid,
context->euid, context->suid, context->fsuid,
context->egid, context->sgid, context->fsgid, tty,
tsk->sessionid);
audit_log_task_info(ab, tsk);
audit_log_key(ab, context->filterkey);
audit_log_end(ab);
for (aux = context->aux; aux; aux = aux->next) {
ab = audit_log_start(context, GFP_KERNEL, aux->type);
if (!ab)
continue; /* audit_panic has been called */
switch (aux->type) {
case AUDIT_EXECVE: {
struct audit_aux_data_execve *axi = (void *)aux;
audit_log_execve_info(context, &ab, axi);
break; }
case AUDIT_BPRM_FCAPS: {
struct audit_aux_data_bprm_fcaps *axs = (void *)aux;
audit_log_format(ab, "fver=%x", axs->fcap_ver);
audit_log_cap(ab, "fp", &axs->fcap.permitted);
audit_log_cap(ab, "fi", &axs->fcap.inheritable);
audit_log_format(ab, " fe=%d", axs->fcap.fE);
audit_log_cap(ab, "old_pp", &axs->old_pcap.permitted);
audit_log_cap(ab, "old_pi", &axs->old_pcap.inheritable);
audit_log_cap(ab, "old_pe", &axs->old_pcap.effective);
audit_log_cap(ab, "new_pp", &axs->new_pcap.permitted);
audit_log_cap(ab, "new_pi", &axs->new_pcap.inheritable);
audit_log_cap(ab, "new_pe", &axs->new_pcap.effective);
break; }
}
audit_log_end(ab);
}
if (context->type)
show_special(context, &call_panic);
if (context->fds[0] >= 0) {
ab = audit_log_start(context, GFP_KERNEL, AUDIT_FD_PAIR);
if (ab) {
audit_log_format(ab, "fd0=%d fd1=%d",
context->fds[0], context->fds[1]);
audit_log_end(ab);
}
}
if (context->sockaddr_len) {
ab = audit_log_start(context, GFP_KERNEL, AUDIT_SOCKADDR);
if (ab) {
audit_log_format(ab, "saddr=");
audit_log_n_hex(ab, (void *)context->sockaddr,
context->sockaddr_len);
audit_log_end(ab);
}
}
for (aux = context->aux_pids; aux; aux = aux->next) {
struct audit_aux_data_pids *axs = (void *)aux;
for (i = 0; i < axs->pid_count; i++)
if (audit_log_pid_context(context, axs->target_pid[i],
axs->target_auid[i],
axs->target_uid[i],
axs->target_sessionid[i],
axs->target_sid[i],
axs->target_comm[i]))
call_panic = 1;
}
if (context->target_pid &&
audit_log_pid_context(context, context->target_pid,
context->target_auid, context->target_uid,
context->target_sessionid,
context->target_sid, context->target_comm))
call_panic = 1;
if (context->pwd.dentry && context->pwd.mnt) {
ab = audit_log_start(context, GFP_KERNEL, AUDIT_CWD);
if (ab) {
audit_log_d_path(ab, "cwd=", &context->pwd);
audit_log_end(ab);
}
}
for (i = 0; i < context->name_count; i++) {
struct audit_names *n = &context->names[i];
ab = audit_log_start(context, GFP_KERNEL, AUDIT_PATH);
if (!ab)
continue; /* audit_panic has been called */
audit_log_format(ab, "item=%d", i);
if (n->name) {
switch(n->name_len) {
case AUDIT_NAME_FULL:
/* log the full path */
audit_log_format(ab, " name=");
audit_log_untrustedstring(ab, n->name);
break;
case 0:
/* name was specified as a relative path and the
* directory component is the cwd */
audit_log_d_path(ab, "name=", &context->pwd);
break;
default:
/* log the name's directory component */
audit_log_format(ab, " name=");
audit_log_n_untrustedstring(ab, n->name,
n->name_len);
}
} else
audit_log_format(ab, " name=(null)");
if (n->ino != (unsigned long)-1) {
audit_log_format(ab, " inode=%lu"
" dev=%02x:%02x mode=%#o"
" ouid=%u ogid=%u rdev=%02x:%02x",
n->ino,
MAJOR(n->dev),
MINOR(n->dev),
n->mode,
n->uid,
n->gid,
MAJOR(n->rdev),
MINOR(n->rdev));
}
if (n->osid != 0) {
char *ctx = NULL;
u32 len;
if (security_secid_to_secctx(
n->osid, &ctx, &len)) {
audit_log_format(ab, " osid=%u", n->osid);
call_panic = 2;
} else {
audit_log_format(ab, " obj=%s", ctx);
security_release_secctx(ctx, len);
}
}
audit_log_fcaps(ab, n);
audit_log_end(ab);
}
/* Send end of event record to help user space know we are finished */
ab = audit_log_start(context, GFP_KERNEL, AUDIT_EOE);
if (ab)
audit_log_end(ab);
if (call_panic)
audit_panic("error converting sid to string");
}
/**
* audit_free - free a per-task audit context
* @tsk: task whose audit context block to free
*
* Called from copy_process and do_exit
*/
void audit_free(struct task_struct *tsk)
{
struct audit_context *context;
context = audit_get_context(tsk, 0, 0);
if (likely(!context))
return;
/* Check for system calls that do not go through the exit
* function (e.g., exit_group), then free context block.
* We use GFP_ATOMIC here because we might be doing this
* in the context of the idle thread */
/* that can happen only if we are called from do_exit() */
if (context->in_syscall && context->current_state == AUDIT_RECORD_CONTEXT)
audit_log_exit(context, tsk);
if (!list_empty(&context->killed_trees))
audit_kill_trees(&context->killed_trees);
audit_free_context(context);
}
/**
* audit_syscall_entry - fill in an audit record at syscall entry
* @arch: architecture type
* @major: major syscall type (function)
* @a1: additional syscall register 1
* @a2: additional syscall register 2
* @a3: additional syscall register 3
* @a4: additional syscall register 4
*
* Fill in audit context at syscall entry. This only happens if the
* audit context was created when the task was created and the state or
* filters demand the audit context be built. If the state from the
* per-task filter or from the per-syscall filter is AUDIT_RECORD_CONTEXT,
* then the record will be written at syscall exit time (otherwise, it
* will only be written if another part of the kernel requests that it
* be written).
*/
void audit_syscall_entry(int arch, int major,
unsigned long a1, unsigned long a2,
unsigned long a3, unsigned long a4)
{
struct task_struct *tsk = current;
struct audit_context *context = tsk->audit_context;
enum audit_state state;
if (unlikely(!context))
return;
/*
* This happens only on certain architectures that make system
* calls in kernel_thread via the entry.S interface, instead of
* with direct calls. (If you are porting to a new
* architecture, hitting this condition can indicate that you
* got the _exit/_leave calls backward in entry.S.)
*
* i386 no
* x86_64 no
* ppc64 yes (see arch/powerpc/platforms/iseries/misc.S)
*
* This also happens with vm86 emulation in a non-nested manner
* (entries without exits), so this case must be caught.
*/
if (context->in_syscall) {
struct audit_context *newctx;
#if AUDIT_DEBUG
printk(KERN_ERR
"audit(:%d) pid=%d in syscall=%d;"
" entering syscall=%d\n",
context->serial, tsk->pid, context->major, major);
#endif
newctx = audit_alloc_context(context->state);
if (newctx) {
newctx->previous = context;
context = newctx;
tsk->audit_context = newctx;
} else {
/* If we can't alloc a new context, the best we
* can do is to leak memory (any pending putname
* will be lost). The only other alternative is
* to abandon auditing. */
audit_zero_context(context, context->state);
}
}
BUG_ON(context->in_syscall || context->name_count);
if (!audit_enabled)
return;
context->arch = arch;
context->major = major;
context->argv[0] = a1;
context->argv[1] = a2;
context->argv[2] = a3;
context->argv[3] = a4;
state = context->state;
context->dummy = !audit_n_rules;
if (!context->dummy && state == AUDIT_BUILD_CONTEXT) {
context->prio = 0;
state = audit_filter_syscall(tsk, context, &audit_filter_list[AUDIT_FILTER_ENTRY]);
}
if (likely(state == AUDIT_DISABLED))
return;
context->serial = 0;
context->ctime = CURRENT_TIME;
context->in_syscall = 1;
context->current_state = state;
context->ppid = 0;
}
void audit_finish_fork(struct task_struct *child)
{
struct audit_context *ctx = current->audit_context;
struct audit_context *p = child->audit_context;
if (!p || !ctx)
return;
if (!ctx->in_syscall || ctx->current_state != AUDIT_RECORD_CONTEXT)
return;
p->arch = ctx->arch;
p->major = ctx->major;
memcpy(p->argv, ctx->argv, sizeof(ctx->argv));
p->ctime = ctx->ctime;
p->dummy = ctx->dummy;
p->in_syscall = ctx->in_syscall;
p->filterkey = kstrdup(ctx->filterkey, GFP_KERNEL);
p->ppid = current->pid;
p->prio = ctx->prio;
p->current_state = ctx->current_state;
}
/**
* audit_syscall_exit - deallocate audit context after a system call
* @valid: success/failure flag
* @return_code: syscall return value
*
* Tear down after system call. If the audit context has been marked as
* auditable (either because of the AUDIT_RECORD_CONTEXT state from
* filtering, or because some other part of the kernel write an audit
* message), then write out the syscall information. In call cases,
* free the names stored from getname().
*/
void audit_syscall_exit(int valid, long return_code)
{
struct task_struct *tsk = current;
struct audit_context *context;
context = audit_get_context(tsk, valid, return_code);
if (likely(!context))
return;
if (context->in_syscall && context->current_state == AUDIT_RECORD_CONTEXT)
audit_log_exit(context, tsk);
context->in_syscall = 0;
context->prio = context->state == AUDIT_RECORD_CONTEXT ? ~0ULL : 0;
if (!list_empty(&context->killed_trees))
audit_kill_trees(&context->killed_trees);
if (context->previous) {
struct audit_context *new_context = context->previous;
context->previous = NULL;
audit_free_context(context);
tsk->audit_context = new_context;
} else {
audit_free_names(context);
unroll_tree_refs(context, NULL, 0);
audit_free_aux(context);
context->aux = NULL;
context->aux_pids = NULL;
context->target_pid = 0;
context->target_sid = 0;
context->sockaddr_len = 0;
context->type = 0;
context->fds[0] = -1;
if (context->state != AUDIT_RECORD_CONTEXT) {
kfree(context->filterkey);
context->filterkey = NULL;
}
tsk->audit_context = context;
}
}
static inline void handle_one(const struct inode *inode)
{
#ifdef CONFIG_AUDIT_TREE
struct audit_context *context;
struct audit_tree_refs *p;
struct audit_chunk *chunk;
int count;
if (likely(hlist_empty(&inode->i_fsnotify_marks)))
return;
context = current->audit_context;
p = context->trees;
count = context->tree_count;
rcu_read_lock();
chunk = audit_tree_lookup(inode);
rcu_read_unlock();
if (!chunk)
return;
if (likely(put_tree_ref(context, chunk)))
return;
if (unlikely(!grow_tree_refs(context))) {
printk(KERN_WARNING "out of memory, audit has lost a tree reference\n");
audit_set_auditable(context);
audit_put_chunk(chunk);
unroll_tree_refs(context, p, count);
return;
}
put_tree_ref(context, chunk);
#endif
}
static void handle_path(const struct dentry *dentry)
{
#ifdef CONFIG_AUDIT_TREE
struct audit_context *context;
struct audit_tree_refs *p;
const struct dentry *d, *parent;
struct audit_chunk *drop;
unsigned long seq;
int count;
context = current->audit_context;
p = context->trees;
count = context->tree_count;
retry:
drop = NULL;
d = dentry;
rcu_read_lock();
seq = read_seqbegin(&rename_lock);
for(;;) {
struct inode *inode = d->d_inode;
if (inode && unlikely(!hlist_empty(&inode->i_fsnotify_marks))) {
struct audit_chunk *chunk;
chunk = audit_tree_lookup(inode);
if (chunk) {
if (unlikely(!put_tree_ref(context, chunk))) {
drop = chunk;
break;
}
}
}
parent = d->d_parent;
if (parent == d)
break;
d = parent;
}
if (unlikely(read_seqretry(&rename_lock, seq) || drop)) { /* in this order */
rcu_read_unlock();
if (!drop) {
/* just a race with rename */
unroll_tree_refs(context, p, count);
goto retry;
}
audit_put_chunk(drop);
if (grow_tree_refs(context)) {
/* OK, got more space */
unroll_tree_refs(context, p, count);
goto retry;
}
/* too bad */
printk(KERN_WARNING
"out of memory, audit has lost a tree reference\n");
unroll_tree_refs(context, p, count);
audit_set_auditable(context);
return;
}
rcu_read_unlock();
#endif
}
/**
* audit_getname - add a name to the list
* @name: name to add
*
* Add a name to the list of audit names for this context.
* Called from fs/namei.c:getname().
*/
void __audit_getname(const char *name)
{
struct audit_context *context = current->audit_context;
if (IS_ERR(name) || !name)
return;
if (!context->in_syscall) {
#if AUDIT_DEBUG == 2
printk(KERN_ERR "%s:%d(:%d): ignoring getname(%p)\n",
__FILE__, __LINE__, context->serial, name);
dump_stack();
#endif
return;
}
BUG_ON(context->name_count >= AUDIT_NAMES);
context->names[context->name_count].name = name;
context->names[context->name_count].name_len = AUDIT_NAME_FULL;
context->names[context->name_count].name_put = 1;
context->names[context->name_count].ino = (unsigned long)-1;
context->names[context->name_count].osid = 0;
++context->name_count;
if (!context->pwd.dentry)
get_fs_pwd(current->fs, &context->pwd);
}
/* audit_putname - intercept a putname request
* @name: name to intercept and delay for putname
*
* If we have stored the name from getname in the audit context,
* then we delay the putname until syscall exit.
* Called from include/linux/fs.h:putname().
*/
void audit_putname(const char *name)
{
struct audit_context *context = current->audit_context;
BUG_ON(!context);
if (!context->in_syscall) {
#if AUDIT_DEBUG == 2
printk(KERN_ERR "%s:%d(:%d): __putname(%p)\n",
__FILE__, __LINE__, context->serial, name);
if (context->name_count) {
int i;
for (i = 0; i < context->name_count; i++)
printk(KERN_ERR "name[%d] = %p = %s\n", i,
context->names[i].name,
context->names[i].name ?: "(null)");
}
#endif
__putname(name);
}
#if AUDIT_DEBUG
else {
++context->put_count;
if (context->put_count > context->name_count) {
printk(KERN_ERR "%s:%d(:%d): major=%d"
" in_syscall=%d putname(%p) name_count=%d"
" put_count=%d\n",
__FILE__, __LINE__,
context->serial, context->major,
context->in_syscall, name, context->name_count,
context->put_count);
dump_stack();
}
}
#endif
}
static int audit_inc_name_count(struct audit_context *context,
const struct inode *inode)
{
if (context->name_count >= AUDIT_NAMES) {
if (inode)
printk(KERN_DEBUG "audit: name_count maxed, losing inode data: "
"dev=%02x:%02x, inode=%lu\n",
MAJOR(inode->i_sb->s_dev),
MINOR(inode->i_sb->s_dev),
inode->i_ino);
else
printk(KERN_DEBUG "name_count maxed, losing inode data\n");
return 1;
}
context->name_count++;
#if AUDIT_DEBUG
context->ino_count++;
#endif
return 0;
}
static inline int audit_copy_fcaps(struct audit_names *name, const struct dentry *dentry)
{
struct cpu_vfs_cap_data caps;
int rc;
memset(&name->fcap.permitted, 0, sizeof(kernel_cap_t));
memset(&name->fcap.inheritable, 0, sizeof(kernel_cap_t));
name->fcap.fE = 0;
name->fcap_ver = 0;
if (!dentry)
return 0;
rc = get_vfs_caps_from_disk(dentry, &caps);
if (rc)
return rc;
name->fcap.permitted = caps.permitted;
name->fcap.inheritable = caps.inheritable;
name->fcap.fE = !!(caps.magic_etc & VFS_CAP_FLAGS_EFFECTIVE);
name->fcap_ver = (caps.magic_etc & VFS_CAP_REVISION_MASK) >> VFS_CAP_REVISION_SHIFT;
return 0;
}
/* Copy inode data into an audit_names. */
static void audit_copy_inode(struct audit_names *name, const struct dentry *dentry,
const struct inode *inode)
{
name->ino = inode->i_ino;
name->dev = inode->i_sb->s_dev;
name->mode = inode->i_mode;
name->uid = inode->i_uid;
name->gid = inode->i_gid;
name->rdev = inode->i_rdev;
security_inode_getsecid(inode, &name->osid);
audit_copy_fcaps(name, dentry);
}
/**
* audit_inode - store the inode and device from a lookup
* @name: name being audited
* @dentry: dentry being audited
*
* Called from fs/namei.c:path_lookup().
*/
void __audit_inode(const char *name, const struct dentry *dentry)
{
int idx;
struct audit_context *context = current->audit_context;
const struct inode *inode = dentry->d_inode;
if (!context->in_syscall)
return;
if (context->name_count
&& context->names[context->name_count-1].name
&& context->names[context->name_count-1].name == name)
idx = context->name_count - 1;
else if (context->name_count > 1
&& context->names[context->name_count-2].name
&& context->names[context->name_count-2].name == name)
idx = context->name_count - 2;
else {
/* FIXME: how much do we care about inodes that have no
* associated name? */
if (audit_inc_name_count(context, inode))
return;
idx = context->name_count - 1;
context->names[idx].name = NULL;
}
handle_path(dentry);
audit_copy_inode(&context->names[idx], dentry, inode);
}
/**
* audit_inode_child - collect inode info for created/removed objects
* @dentry: dentry being audited
* @parent: inode of dentry parent
*
* For syscalls that create or remove filesystem objects, audit_inode
* can only collect information for the filesystem object's parent.
* This call updates the audit context with the child's information.
* Syscalls that create a new filesystem object must be hooked after
* the object is created. Syscalls that remove a filesystem object
* must be hooked prior, in order to capture the target inode during
* unsuccessful attempts.
*/
void __audit_inode_child(const struct dentry *dentry,
const struct inode *parent)
{
int idx;
struct audit_context *context = current->audit_context;
const char *found_parent = NULL, *found_child = NULL;
const struct inode *inode = dentry->d_inode;
const char *dname = dentry->d_name.name;
int dirlen = 0;
if (!context->in_syscall)
return;
if (inode)
handle_one(inode);
/* parent is more likely, look for it first */
for (idx = 0; idx < context->name_count; idx++) {
struct audit_names *n = &context->names[idx];
if (!n->name)
continue;
if (n->ino == parent->i_ino &&
!audit_compare_dname_path(dname, n->name, &dirlen)) {
n->name_len = dirlen; /* update parent data in place */
found_parent = n->name;
goto add_names;
}
}
/* no matching parent, look for matching child */
for (idx = 0; idx < context->name_count; idx++) {
struct audit_names *n = &context->names[idx];
if (!n->name)
continue;
/* strcmp() is the more likely scenario */
if (!strcmp(dname, n->name) ||
!audit_compare_dname_path(dname, n->name, &dirlen)) {
if (inode)
audit_copy_inode(n, NULL, inode);
else
n->ino = (unsigned long)-1;
found_child = n->name;
goto add_names;
}
}
add_names:
if (!found_parent) {
if (audit_inc_name_count(context, parent))
return;
idx = context->name_count - 1;
context->names[idx].name = NULL;
audit_copy_inode(&context->names[idx], NULL, parent);
}
if (!found_child) {
if (audit_inc_name_count(context, inode))
return;
idx = context->name_count - 1;
/* Re-use the name belonging to the slot for a matching parent
* directory. All names for this context are relinquished in
* audit_free_names() */
if (found_parent) {
context->names[idx].name = found_parent;
context->names[idx].name_len = AUDIT_NAME_FULL;
/* don't call __putname() */
context->names[idx].name_put = 0;
} else {
context->names[idx].name = NULL;
}
if (inode)
audit_copy_inode(&context->names[idx], NULL, inode);
else
context->names[idx].ino = (unsigned long)-1;
}
}
EXPORT_SYMBOL_GPL(__audit_inode_child);
/**
* auditsc_get_stamp - get local copies of audit_context values
* @ctx: audit_context for the task
* @t: timespec to store time recorded in the audit_context
* @serial: serial value that is recorded in the audit_context
*
* Also sets the context as auditable.
*/
int auditsc_get_stamp(struct audit_context *ctx,
struct timespec *t, unsigned int *serial)
{
if (!ctx->in_syscall)
return 0;
if (!ctx->serial)
ctx->serial = audit_serial();
t->tv_sec = ctx->ctime.tv_sec;
t->tv_nsec = ctx->ctime.tv_nsec;
*serial = ctx->serial;
if (!ctx->prio) {
ctx->prio = 1;
ctx->current_state = AUDIT_RECORD_CONTEXT;
}
return 1;
}
/* global counter which is incremented every time something logs in */
static atomic_t session_id = ATOMIC_INIT(0);
/**
* audit_set_loginuid - set a task's audit_context loginuid
* @task: task whose audit context is being modified
* @loginuid: loginuid value
*
* Returns 0.
*
* Called (set) from fs/proc/base.c::proc_loginuid_write().
*/
int audit_set_loginuid(struct task_struct *task, uid_t loginuid)
{
unsigned int sessionid = atomic_inc_return(&session_id);
struct audit_context *context = task->audit_context;
if (context && context->in_syscall) {
struct audit_buffer *ab;
ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_LOGIN);
if (ab) {
audit_log_format(ab, "login pid=%d uid=%u "
"old auid=%u new auid=%u"
" old ses=%u new ses=%u",
task->pid, task_uid(task),
task->loginuid, loginuid,
task->sessionid, sessionid);
audit_log_end(ab);
}
}
task->sessionid = sessionid;
task->loginuid = loginuid;
return 0;
}
/**
* __audit_mq_open - record audit data for a POSIX MQ open
* @oflag: open flag
* @mode: mode bits
* @attr: queue attributes
*
*/
void __audit_mq_open(int oflag, mode_t mode, struct mq_attr *attr)
{
struct audit_context *context = current->audit_context;
if (attr)
memcpy(&context->mq_open.attr, attr, sizeof(struct mq_attr));
else
memset(&context->mq_open.attr, 0, sizeof(struct mq_attr));
context->mq_open.oflag = oflag;
context->mq_open.mode = mode;
context->type = AUDIT_MQ_OPEN;
}
/**
* __audit_mq_sendrecv - record audit data for a POSIX MQ timed send/receive
* @mqdes: MQ descriptor
* @msg_len: Message length
* @msg_prio: Message priority
* @abs_timeout: Message timeout in absolute time
*
*/
void __audit_mq_sendrecv(mqd_t mqdes, size_t msg_len, unsigned int msg_prio,
const struct timespec *abs_timeout)
{
struct audit_context *context = current->audit_context;
struct timespec *p = &context->mq_sendrecv.abs_timeout;
if (abs_timeout)
memcpy(p, abs_timeout, sizeof(struct timespec));
else
memset(p, 0, sizeof(struct timespec));
context->mq_sendrecv.mqdes = mqdes;
context->mq_sendrecv.msg_len = msg_len;
context->mq_sendrecv.msg_prio = msg_prio;
context->type = AUDIT_MQ_SENDRECV;
}
/**
* __audit_mq_notify - record audit data for a POSIX MQ notify
* @mqdes: MQ descriptor
* @notification: Notification event
*
*/
void __audit_mq_notify(mqd_t mqdes, const struct sigevent *notification)
{
struct audit_context *context = current->audit_context;
if (notification)
context->mq_notify.sigev_signo = notification->sigev_signo;
else
context->mq_notify.sigev_signo = 0;
context->mq_notify.mqdes = mqdes;
context->type = AUDIT_MQ_NOTIFY;
}
/**
* __audit_mq_getsetattr - record audit data for a POSIX MQ get/set attribute
* @mqdes: MQ descriptor
* @mqstat: MQ flags
*
*/
void __audit_mq_getsetattr(mqd_t mqdes, struct mq_attr *mqstat)
{
struct audit_context *context = current->audit_context;
context->mq_getsetattr.mqdes = mqdes;
context->mq_getsetattr.mqstat = *mqstat;
context->type = AUDIT_MQ_GETSETATTR;
}
/**
* audit_ipc_obj - record audit data for ipc object
* @ipcp: ipc permissions
*
*/
void __audit_ipc_obj(struct kern_ipc_perm *ipcp)
{
struct audit_context *context = current->audit_context;
context->ipc.uid = ipcp->uid;
context->ipc.gid = ipcp->gid;
context->ipc.mode = ipcp->mode;
context->ipc.has_perm = 0;
security_ipc_getsecid(ipcp, &context->ipc.osid);
context->type = AUDIT_IPC;
}
/**
* audit_ipc_set_perm - record audit data for new ipc permissions
* @qbytes: msgq bytes
* @uid: msgq user id
* @gid: msgq group id
* @mode: msgq mode (permissions)
*
* Called only after audit_ipc_obj().
*/
void __audit_ipc_set_perm(unsigned long qbytes, uid_t uid, gid_t gid, mode_t mode)
{
struct audit_context *context = current->audit_context;
context->ipc.qbytes = qbytes;
context->ipc.perm_uid = uid;
context->ipc.perm_gid = gid;
context->ipc.perm_mode = mode;
context->ipc.has_perm = 1;
}
int audit_bprm(struct linux_binprm *bprm)
{
struct audit_aux_data_execve *ax;
struct audit_context *context = current->audit_context;
if (likely(!audit_enabled || !context || context->dummy))
return 0;
ax = kmalloc(sizeof(*ax), GFP_KERNEL);
if (!ax)
return -ENOMEM;
ax->argc = bprm->argc;
ax->envc = bprm->envc;
ax->mm = bprm->mm;
ax->d.type = AUDIT_EXECVE;
ax->d.next = context->aux;
context->aux = (void *)ax;
return 0;
}
/**
* audit_socketcall - record audit data for sys_socketcall
* @nargs: number of args
* @args: args array
*
*/
void audit_socketcall(int nargs, unsigned long *args)
{
struct audit_context *context = current->audit_context;
if (likely(!context || context->dummy))
return;
context->type = AUDIT_SOCKETCALL;
context->socketcall.nargs = nargs;
memcpy(context->socketcall.args, args, nargs * sizeof(unsigned long));
}
/**
* __audit_fd_pair - record audit data for pipe and socketpair
* @fd1: the first file descriptor
* @fd2: the second file descriptor
*
*/
void __audit_fd_pair(int fd1, int fd2)
{
struct audit_context *context = current->audit_context;
context->fds[0] = fd1;
context->fds[1] = fd2;
}
/**
* audit_sockaddr - record audit data for sys_bind, sys_connect, sys_sendto
* @len: data length in user space
* @a: data address in kernel space
*
* Returns 0 for success or NULL context or < 0 on error.
*/
int audit_sockaddr(int len, void *a)
{
struct audit_context *context = current->audit_context;
if (likely(!context || context->dummy))
return 0;
if (!context->sockaddr) {
void *p = kmalloc(sizeof(struct sockaddr_storage), GFP_KERNEL);
if (!p)
return -ENOMEM;
context->sockaddr = p;
}
context->sockaddr_len = len;
memcpy(context->sockaddr, a, len);
return 0;
}
void __audit_ptrace(struct task_struct *t)
{
struct audit_context *context = current->audit_context;
context->target_pid = t->pid;
context->target_auid = audit_get_loginuid(t);
context->target_uid = task_uid(t);
context->target_sessionid = audit_get_sessionid(t);
security_task_getsecid(t, &context->target_sid);
memcpy(context->target_comm, t->comm, TASK_COMM_LEN);
}
/**
* audit_signal_info - record signal info for shutting down audit subsystem
* @sig: signal value
* @t: task being signaled
*
* If the audit subsystem is being terminated, record the task (pid)
* and uid that is doing that.
*/
int __audit_signal_info(int sig, struct task_struct *t)
{
struct audit_aux_data_pids *axp;
struct task_struct *tsk = current;
struct audit_context *ctx = tsk->audit_context;
uid_t uid = current_uid(), t_uid = task_uid(t);
if (audit_pid && t->tgid == audit_pid) {
if (sig == SIGTERM || sig == SIGHUP || sig == SIGUSR1 || sig == SIGUSR2) {
audit_sig_pid = tsk->pid;
if (tsk->loginuid != -1)
audit_sig_uid = tsk->loginuid;
else
audit_sig_uid = uid;
security_task_getsecid(tsk, &audit_sig_sid);
}
if (!audit_signals || audit_dummy_context())
return 0;
}
/* optimize the common case by putting first signal recipient directly
* in audit_context */
if (!ctx->target_pid) {
ctx->target_pid = t->tgid;
ctx->target_auid = audit_get_loginuid(t);
ctx->target_uid = t_uid;
ctx->target_sessionid = audit_get_sessionid(t);
security_task_getsecid(t, &ctx->target_sid);
memcpy(ctx->target_comm, t->comm, TASK_COMM_LEN);
return 0;
}
axp = (void *)ctx->aux_pids;
if (!axp || axp->pid_count == AUDIT_AUX_PIDS) {
axp = kzalloc(sizeof(*axp), GFP_ATOMIC);
if (!axp)
return -ENOMEM;
axp->d.type = AUDIT_OBJ_PID;
axp->d.next = ctx->aux_pids;
ctx->aux_pids = (void *)axp;
}
BUG_ON(axp->pid_count >= AUDIT_AUX_PIDS);
axp->target_pid[axp->pid_count] = t->tgid;
axp->target_auid[axp->pid_count] = audit_get_loginuid(t);
axp->target_uid[axp->pid_count] = t_uid;
axp->target_sessionid[axp->pid_count] = audit_get_sessionid(t);
security_task_getsecid(t, &axp->target_sid[axp->pid_count]);
memcpy(axp->target_comm[axp->pid_count], t->comm, TASK_COMM_LEN);
axp->pid_count++;
return 0;
}
/**
* __audit_log_bprm_fcaps - store information about a loading bprm and relevant fcaps
* @bprm: pointer to the bprm being processed
* @new: the proposed new credentials
* @old: the old credentials
*
* Simply check if the proc already has the caps given by the file and if not
* store the priv escalation info for later auditing at the end of the syscall
*
* -Eric
*/
int __audit_log_bprm_fcaps(struct linux_binprm *bprm,
const struct cred *new, const struct cred *old)
{
struct audit_aux_data_bprm_fcaps *ax;
struct audit_context *context = current->audit_context;
struct cpu_vfs_cap_data vcaps;
struct dentry *dentry;
ax = kmalloc(sizeof(*ax), GFP_KERNEL);
if (!ax)
return -ENOMEM;
ax->d.type = AUDIT_BPRM_FCAPS;
ax->d.next = context->aux;
context->aux = (void *)ax;
dentry = dget(bprm->file->f_dentry);
get_vfs_caps_from_disk(dentry, &vcaps);
dput(dentry);
ax->fcap.permitted = vcaps.permitted;
ax->fcap.inheritable = vcaps.inheritable;
ax->fcap.fE = !!(vcaps.magic_etc & VFS_CAP_FLAGS_EFFECTIVE);
ax->fcap_ver = (vcaps.magic_etc & VFS_CAP_REVISION_MASK) >> VFS_CAP_REVISION_SHIFT;
ax->old_pcap.permitted = old->cap_permitted;
ax->old_pcap.inheritable = old->cap_inheritable;
ax->old_pcap.effective = old->cap_effective;
ax->new_pcap.permitted = new->cap_permitted;
ax->new_pcap.inheritable = new->cap_inheritable;
ax->new_pcap.effective = new->cap_effective;
return 0;
}
/**
* __audit_log_capset - store information about the arguments to the capset syscall
* @pid: target pid of the capset call
* @new: the new credentials
* @old: the old (current) credentials
*
* Record the aguments userspace sent to sys_capset for later printing by the
* audit system if applicable
*/
void __audit_log_capset(pid_t pid,
const struct cred *new, const struct cred *old)
{
struct audit_context *context = current->audit_context;
context->capset.pid = pid;
context->capset.cap.effective = new->cap_effective;
context->capset.cap.inheritable = new->cap_effective;
context->capset.cap.permitted = new->cap_permitted;
context->type = AUDIT_CAPSET;
}
void __audit_mmap_fd(int fd, int flags)
{
struct audit_context *context = current->audit_context;
context->mmap.fd = fd;
context->mmap.flags = flags;
context->type = AUDIT_MMAP;
}
/**
* audit_core_dumps - record information about processes that end abnormally
* @signr: signal value
*
* If a process ends with a core dump, something fishy is going on and we
* should record the event for investigation.
*/
void audit_core_dumps(long signr)
{
struct audit_buffer *ab;
u32 sid;
uid_t auid = audit_get_loginuid(current), uid;
gid_t gid;
unsigned int sessionid = audit_get_sessionid(current);
if (!audit_enabled)
return;
if (signr == SIGQUIT) /* don't care for those */
return;
ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_ANOM_ABEND);
current_uid_gid(&uid, &gid);
audit_log_format(ab, "auid=%u uid=%u gid=%u ses=%u",
auid, uid, gid, sessionid);
security_task_getsecid(current, &sid);
if (sid) {
char *ctx = NULL;
u32 len;
if (security_secid_to_secctx(sid, &ctx, &len))
audit_log_format(ab, " ssid=%u", sid);
else {
audit_log_format(ab, " subj=%s", ctx);
security_release_secctx(ctx, len);
}
}
audit_log_format(ab, " pid=%d comm=", current->pid);
audit_log_untrustedstring(ab, current->comm);
audit_log_format(ab, " sig=%ld", signr);
audit_log_end(ab);
}
struct list_head *audit_killed_trees(void)
{
struct audit_context *ctx = current->audit_context;
if (likely(!ctx || !ctx->in_syscall))
return NULL;
return &ctx->killed_trees;
}
| gpl-2.0 |
akellar/android_kernel_motorola_shamu | drivers/scsi/csiostor/csio_wr.c | 2677 | 43927 | /*
* This file is part of the Chelsio FCoE driver for Linux.
*
* Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/compiler.h>
#include <linux/slab.h>
#include <asm/page.h>
#include <linux/cache.h>
#include "csio_hw.h"
#include "csio_wr.h"
#include "csio_mb.h"
#include "csio_defs.h"
int csio_intr_coalesce_cnt; /* value:SGE_INGRESS_RX_THRESHOLD[0] */
static int csio_sge_thresh_reg; /* SGE_INGRESS_RX_THRESHOLD[0] */
int csio_intr_coalesce_time = 10; /* value:SGE_TIMER_VALUE_1 */
static int csio_sge_timer_reg = 1;
#define CSIO_SET_FLBUF_SIZE(_hw, _reg, _val) \
csio_wr_reg32((_hw), (_val), SGE_FL_BUFFER_SIZE##_reg)
static void
csio_get_flbuf_size(struct csio_hw *hw, struct csio_sge *sge, uint32_t reg)
{
sge->sge_fl_buf_size[reg] = csio_rd_reg32(hw, SGE_FL_BUFFER_SIZE0 +
reg * sizeof(uint32_t));
}
/* Free list buffer size */
static inline uint32_t
csio_wr_fl_bufsz(struct csio_sge *sge, struct csio_dma_buf *buf)
{
return sge->sge_fl_buf_size[buf->paddr & 0xF];
}
/* Size of the egress queue status page */
static inline uint32_t
csio_wr_qstat_pgsz(struct csio_hw *hw)
{
return (hw->wrm.sge.sge_control & EGRSTATUSPAGESIZE(1)) ? 128 : 64;
}
/* Ring freelist doorbell */
static inline void
csio_wr_ring_fldb(struct csio_hw *hw, struct csio_q *flq)
{
/*
* Ring the doorbell only when we have atleast CSIO_QCREDIT_SZ
* number of bytes in the freelist queue. This translates to atleast
* 8 freelist buffer pointers (since each pointer is 8 bytes).
*/
if (flq->inc_idx >= 8) {
csio_wr_reg32(hw, DBPRIO(1) | QID(flq->un.fl.flid) |
CSIO_HW_PIDX(hw, flq->inc_idx / 8),
MYPF_REG(SGE_PF_KDOORBELL));
flq->inc_idx &= 7;
}
}
/* Write a 0 cidx increment value to enable SGE interrupts for this queue */
static void
csio_wr_sge_intr_enable(struct csio_hw *hw, uint16_t iqid)
{
csio_wr_reg32(hw, CIDXINC(0) |
INGRESSQID(iqid) |
TIMERREG(X_TIMERREG_RESTART_COUNTER),
MYPF_REG(SGE_PF_GTS));
}
/*
* csio_wr_fill_fl - Populate the FL buffers of a FL queue.
* @hw: HW module.
* @flq: Freelist queue.
*
* Fill up freelist buffer entries with buffers of size specified
* in the size register.
*
*/
static int
csio_wr_fill_fl(struct csio_hw *hw, struct csio_q *flq)
{
struct csio_wrm *wrm = csio_hw_to_wrm(hw);
struct csio_sge *sge = &wrm->sge;
__be64 *d = (__be64 *)(flq->vstart);
struct csio_dma_buf *buf = &flq->un.fl.bufs[0];
uint64_t paddr;
int sreg = flq->un.fl.sreg;
int n = flq->credits;
while (n--) {
buf->len = sge->sge_fl_buf_size[sreg];
buf->vaddr = pci_alloc_consistent(hw->pdev, buf->len,
&buf->paddr);
if (!buf->vaddr) {
csio_err(hw, "Could only fill %d buffers!\n", n + 1);
return -ENOMEM;
}
paddr = buf->paddr | (sreg & 0xF);
*d++ = cpu_to_be64(paddr);
buf++;
}
return 0;
}
/*
* csio_wr_update_fl -
* @hw: HW module.
* @flq: Freelist queue.
*
*
*/
static inline void
csio_wr_update_fl(struct csio_hw *hw, struct csio_q *flq, uint16_t n)
{
flq->inc_idx += n;
flq->pidx += n;
if (unlikely(flq->pidx >= flq->credits))
flq->pidx -= (uint16_t)flq->credits;
CSIO_INC_STATS(flq, n_flq_refill);
}
/*
* csio_wr_alloc_q - Allocate a WR queue and initialize it.
* @hw: HW module
* @qsize: Size of the queue in bytes
* @wrsize: Since of WR in this queue, if fixed.
* @type: Type of queue (Ingress/Egress/Freelist)
* @owner: Module that owns this queue.
* @nflb: Number of freelist buffers for FL.
* @sreg: What is the FL buffer size register?
* @iq_int_handler: Ingress queue handler in INTx mode.
*
* This function allocates and sets up a queue for the caller
* of size qsize, aligned at the required boundary. This is subject to
* be free entries being available in the queue array. If one is found,
* it is initialized with the allocated queue, marked as being used (owner),
* and a handle returned to the caller in form of the queue's index
* into the q_arr array.
* If user has indicated a freelist (by specifying nflb > 0), create
* another queue (with its own index into q_arr) for the freelist. Allocate
* memory for DMA buffer metadata (vaddr, len etc). Save off the freelist
* idx in the ingress queue's flq.idx. This is how a Freelist is associated
* with its owning ingress queue.
*/
int
csio_wr_alloc_q(struct csio_hw *hw, uint32_t qsize, uint32_t wrsize,
uint16_t type, void *owner, uint32_t nflb, int sreg,
iq_handler_t iq_intx_handler)
{
struct csio_wrm *wrm = csio_hw_to_wrm(hw);
struct csio_q *q, *flq;
int free_idx = wrm->free_qidx;
int ret_idx = free_idx;
uint32_t qsz;
int flq_idx;
if (free_idx >= wrm->num_q) {
csio_err(hw, "No more free queues.\n");
return -1;
}
switch (type) {
case CSIO_EGRESS:
qsz = ALIGN(qsize, CSIO_QCREDIT_SZ) + csio_wr_qstat_pgsz(hw);
break;
case CSIO_INGRESS:
switch (wrsize) {
case 16:
case 32:
case 64:
case 128:
break;
default:
csio_err(hw, "Invalid Ingress queue WR size:%d\n",
wrsize);
return -1;
}
/*
* Number of elements must be a multiple of 16
* So this includes status page size
*/
qsz = ALIGN(qsize/wrsize, 16) * wrsize;
break;
case CSIO_FREELIST:
qsz = ALIGN(qsize/wrsize, 8) * wrsize + csio_wr_qstat_pgsz(hw);
break;
default:
csio_err(hw, "Invalid queue type: 0x%x\n", type);
return -1;
}
q = wrm->q_arr[free_idx];
q->vstart = pci_alloc_consistent(hw->pdev, qsz, &q->pstart);
if (!q->vstart) {
csio_err(hw,
"Failed to allocate DMA memory for "
"queue at id: %d size: %d\n", free_idx, qsize);
return -1;
}
/*
* We need to zero out the contents, importantly for ingress,
* since we start with a generatiom bit of 1 for ingress.
*/
memset(q->vstart, 0, qsz);
q->type = type;
q->owner = owner;
q->pidx = q->cidx = q->inc_idx = 0;
q->size = qsz;
q->wr_sz = wrsize; /* If using fixed size WRs */
wrm->free_qidx++;
if (type == CSIO_INGRESS) {
/* Since queue area is set to zero */
q->un.iq.genbit = 1;
/*
* Ingress queue status page size is always the size of
* the ingress queue entry.
*/
q->credits = (qsz - q->wr_sz) / q->wr_sz;
q->vwrap = (void *)((uintptr_t)(q->vstart) + qsz
- q->wr_sz);
/* Allocate memory for FL if requested */
if (nflb > 0) {
flq_idx = csio_wr_alloc_q(hw, nflb * sizeof(__be64),
sizeof(__be64), CSIO_FREELIST,
owner, 0, sreg, NULL);
if (flq_idx == -1) {
csio_err(hw,
"Failed to allocate FL queue"
" for IQ idx:%d\n", free_idx);
return -1;
}
/* Associate the new FL with the Ingress quue */
q->un.iq.flq_idx = flq_idx;
flq = wrm->q_arr[q->un.iq.flq_idx];
flq->un.fl.bufs = kzalloc(flq->credits *
sizeof(struct csio_dma_buf),
GFP_KERNEL);
if (!flq->un.fl.bufs) {
csio_err(hw,
"Failed to allocate FL queue bufs"
" for IQ idx:%d\n", free_idx);
return -1;
}
flq->un.fl.packen = 0;
flq->un.fl.offset = 0;
flq->un.fl.sreg = sreg;
/* Fill up the free list buffers */
if (csio_wr_fill_fl(hw, flq))
return -1;
/*
* Make sure in a FLQ, atleast 1 credit (8 FL buffers)
* remains unpopulated,otherwise HW thinks
* FLQ is empty.
*/
flq->pidx = flq->inc_idx = flq->credits - 8;
} else {
q->un.iq.flq_idx = -1;
}
/* Associate the IQ INTx handler. */
q->un.iq.iq_intx_handler = iq_intx_handler;
csio_q_iqid(hw, ret_idx) = CSIO_MAX_QID;
} else if (type == CSIO_EGRESS) {
q->credits = (qsz - csio_wr_qstat_pgsz(hw)) / CSIO_QCREDIT_SZ;
q->vwrap = (void *)((uintptr_t)(q->vstart) + qsz
- csio_wr_qstat_pgsz(hw));
csio_q_eqid(hw, ret_idx) = CSIO_MAX_QID;
} else { /* Freelist */
q->credits = (qsz - csio_wr_qstat_pgsz(hw)) / sizeof(__be64);
q->vwrap = (void *)((uintptr_t)(q->vstart) + qsz
- csio_wr_qstat_pgsz(hw));
csio_q_flid(hw, ret_idx) = CSIO_MAX_QID;
}
return ret_idx;
}
/*
* csio_wr_iq_create_rsp - Response handler for IQ creation.
* @hw: The HW module.
* @mbp: Mailbox.
* @iq_idx: Ingress queue that got created.
*
* Handle FW_IQ_CMD mailbox completion. Save off the assigned IQ/FL ids.
*/
static int
csio_wr_iq_create_rsp(struct csio_hw *hw, struct csio_mb *mbp, int iq_idx)
{
struct csio_iq_params iqp;
enum fw_retval retval;
uint32_t iq_id;
int flq_idx;
memset(&iqp, 0, sizeof(struct csio_iq_params));
csio_mb_iq_alloc_write_rsp(hw, mbp, &retval, &iqp);
if (retval != FW_SUCCESS) {
csio_err(hw, "IQ cmd returned 0x%x!\n", retval);
mempool_free(mbp, hw->mb_mempool);
return -EINVAL;
}
csio_q_iqid(hw, iq_idx) = iqp.iqid;
csio_q_physiqid(hw, iq_idx) = iqp.physiqid;
csio_q_pidx(hw, iq_idx) = csio_q_cidx(hw, iq_idx) = 0;
csio_q_inc_idx(hw, iq_idx) = 0;
/* Actual iq-id. */
iq_id = iqp.iqid - hw->wrm.fw_iq_start;
/* Set the iq-id to iq map table. */
if (iq_id >= CSIO_MAX_IQ) {
csio_err(hw,
"Exceeding MAX_IQ(%d) supported!"
" iqid:%d rel_iqid:%d FW iq_start:%d\n",
CSIO_MAX_IQ, iq_id, iqp.iqid, hw->wrm.fw_iq_start);
mempool_free(mbp, hw->mb_mempool);
return -EINVAL;
}
csio_q_set_intr_map(hw, iq_idx, iq_id);
/*
* During FW_IQ_CMD, FW sets interrupt_sent bit to 1 in the SGE
* ingress context of this queue. This will block interrupts to
* this queue until the next GTS write. Therefore, we do a
* 0-cidx increment GTS write for this queue just to clear the
* interrupt_sent bit. This will re-enable interrupts to this
* queue.
*/
csio_wr_sge_intr_enable(hw, iqp.physiqid);
flq_idx = csio_q_iq_flq_idx(hw, iq_idx);
if (flq_idx != -1) {
struct csio_q *flq = hw->wrm.q_arr[flq_idx];
csio_q_flid(hw, flq_idx) = iqp.fl0id;
csio_q_cidx(hw, flq_idx) = 0;
csio_q_pidx(hw, flq_idx) = csio_q_credits(hw, flq_idx) - 8;
csio_q_inc_idx(hw, flq_idx) = csio_q_credits(hw, flq_idx) - 8;
/* Now update SGE about the buffers allocated during init */
csio_wr_ring_fldb(hw, flq);
}
mempool_free(mbp, hw->mb_mempool);
return 0;
}
/*
* csio_wr_iq_create - Configure an Ingress queue with FW.
* @hw: The HW module.
* @priv: Private data object.
* @iq_idx: Ingress queue index in the WR module.
* @vec: MSIX vector.
* @portid: PCIE Channel to be associated with this queue.
* @async: Is this a FW asynchronous message handling queue?
* @cbfn: Completion callback.
*
* This API configures an ingress queue with FW by issuing a FW_IQ_CMD mailbox
* with alloc/write bits set.
*/
int
csio_wr_iq_create(struct csio_hw *hw, void *priv, int iq_idx,
uint32_t vec, uint8_t portid, bool async,
void (*cbfn) (struct csio_hw *, struct csio_mb *))
{
struct csio_mb *mbp;
struct csio_iq_params iqp;
int flq_idx;
memset(&iqp, 0, sizeof(struct csio_iq_params));
csio_q_portid(hw, iq_idx) = portid;
mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
if (!mbp) {
csio_err(hw, "IQ command out of memory!\n");
return -ENOMEM;
}
switch (hw->intr_mode) {
case CSIO_IM_INTX:
case CSIO_IM_MSI:
/* For interrupt forwarding queue only */
if (hw->intr_iq_idx == iq_idx)
iqp.iqandst = X_INTERRUPTDESTINATION_PCIE;
else
iqp.iqandst = X_INTERRUPTDESTINATION_IQ;
iqp.iqandstindex =
csio_q_physiqid(hw, hw->intr_iq_idx);
break;
case CSIO_IM_MSIX:
iqp.iqandst = X_INTERRUPTDESTINATION_PCIE;
iqp.iqandstindex = (uint16_t)vec;
break;
case CSIO_IM_NONE:
mempool_free(mbp, hw->mb_mempool);
return -EINVAL;
}
/* Pass in the ingress queue cmd parameters */
iqp.pfn = hw->pfn;
iqp.vfn = 0;
iqp.iq_start = 1;
iqp.viid = 0;
iqp.type = FW_IQ_TYPE_FL_INT_CAP;
iqp.iqasynch = async;
if (csio_intr_coalesce_cnt)
iqp.iqanus = X_UPDATESCHEDULING_COUNTER_OPTTIMER;
else
iqp.iqanus = X_UPDATESCHEDULING_TIMER;
iqp.iqanud = X_UPDATEDELIVERY_INTERRUPT;
iqp.iqpciech = portid;
iqp.iqintcntthresh = (uint8_t)csio_sge_thresh_reg;
switch (csio_q_wr_sz(hw, iq_idx)) {
case 16:
iqp.iqesize = 0; break;
case 32:
iqp.iqesize = 1; break;
case 64:
iqp.iqesize = 2; break;
case 128:
iqp.iqesize = 3; break;
}
iqp.iqsize = csio_q_size(hw, iq_idx) /
csio_q_wr_sz(hw, iq_idx);
iqp.iqaddr = csio_q_pstart(hw, iq_idx);
flq_idx = csio_q_iq_flq_idx(hw, iq_idx);
if (flq_idx != -1) {
struct csio_q *flq = hw->wrm.q_arr[flq_idx];
iqp.fl0paden = 1;
iqp.fl0packen = flq->un.fl.packen ? 1 : 0;
iqp.fl0fbmin = X_FETCHBURSTMIN_64B;
iqp.fl0fbmax = X_FETCHBURSTMAX_512B;
iqp.fl0size = csio_q_size(hw, flq_idx) / CSIO_QCREDIT_SZ;
iqp.fl0addr = csio_q_pstart(hw, flq_idx);
}
csio_mb_iq_alloc_write(hw, mbp, priv, CSIO_MB_DEFAULT_TMO, &iqp, cbfn);
if (csio_mb_issue(hw, mbp)) {
csio_err(hw, "Issue of IQ cmd failed!\n");
mempool_free(mbp, hw->mb_mempool);
return -EINVAL;
}
if (cbfn != NULL)
return 0;
return csio_wr_iq_create_rsp(hw, mbp, iq_idx);
}
/*
* csio_wr_eq_create_rsp - Response handler for EQ creation.
* @hw: The HW module.
* @mbp: Mailbox.
* @eq_idx: Egress queue that got created.
*
* Handle FW_EQ_OFLD_CMD mailbox completion. Save off the assigned EQ ids.
*/
static int
csio_wr_eq_cfg_rsp(struct csio_hw *hw, struct csio_mb *mbp, int eq_idx)
{
struct csio_eq_params eqp;
enum fw_retval retval;
memset(&eqp, 0, sizeof(struct csio_eq_params));
csio_mb_eq_ofld_alloc_write_rsp(hw, mbp, &retval, &eqp);
if (retval != FW_SUCCESS) {
csio_err(hw, "EQ OFLD cmd returned 0x%x!\n", retval);
mempool_free(mbp, hw->mb_mempool);
return -EINVAL;
}
csio_q_eqid(hw, eq_idx) = (uint16_t)eqp.eqid;
csio_q_physeqid(hw, eq_idx) = (uint16_t)eqp.physeqid;
csio_q_pidx(hw, eq_idx) = csio_q_cidx(hw, eq_idx) = 0;
csio_q_inc_idx(hw, eq_idx) = 0;
mempool_free(mbp, hw->mb_mempool);
return 0;
}
/*
* csio_wr_eq_create - Configure an Egress queue with FW.
* @hw: HW module.
* @priv: Private data.
* @eq_idx: Egress queue index in the WR module.
* @iq_idx: Associated ingress queue index.
* @cbfn: Completion callback.
*
* This API configures a offload egress queue with FW by issuing a
* FW_EQ_OFLD_CMD (with alloc + write ) mailbox.
*/
int
csio_wr_eq_create(struct csio_hw *hw, void *priv, int eq_idx,
int iq_idx, uint8_t portid,
void (*cbfn) (struct csio_hw *, struct csio_mb *))
{
struct csio_mb *mbp;
struct csio_eq_params eqp;
memset(&eqp, 0, sizeof(struct csio_eq_params));
mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
if (!mbp) {
csio_err(hw, "EQ command out of memory!\n");
return -ENOMEM;
}
eqp.pfn = hw->pfn;
eqp.vfn = 0;
eqp.eqstart = 1;
eqp.hostfcmode = X_HOSTFCMODE_STATUS_PAGE;
eqp.iqid = csio_q_iqid(hw, iq_idx);
eqp.fbmin = X_FETCHBURSTMIN_64B;
eqp.fbmax = X_FETCHBURSTMAX_512B;
eqp.cidxfthresh = 0;
eqp.pciechn = portid;
eqp.eqsize = csio_q_size(hw, eq_idx) / CSIO_QCREDIT_SZ;
eqp.eqaddr = csio_q_pstart(hw, eq_idx);
csio_mb_eq_ofld_alloc_write(hw, mbp, priv, CSIO_MB_DEFAULT_TMO,
&eqp, cbfn);
if (csio_mb_issue(hw, mbp)) {
csio_err(hw, "Issue of EQ OFLD cmd failed!\n");
mempool_free(mbp, hw->mb_mempool);
return -EINVAL;
}
if (cbfn != NULL)
return 0;
return csio_wr_eq_cfg_rsp(hw, mbp, eq_idx);
}
/*
* csio_wr_iq_destroy_rsp - Response handler for IQ removal.
* @hw: The HW module.
* @mbp: Mailbox.
* @iq_idx: Ingress queue that was freed.
*
* Handle FW_IQ_CMD (free) mailbox completion.
*/
static int
csio_wr_iq_destroy_rsp(struct csio_hw *hw, struct csio_mb *mbp, int iq_idx)
{
enum fw_retval retval = csio_mb_fw_retval(mbp);
int rv = 0;
if (retval != FW_SUCCESS)
rv = -EINVAL;
mempool_free(mbp, hw->mb_mempool);
return rv;
}
/*
* csio_wr_iq_destroy - Free an ingress queue.
* @hw: The HW module.
* @priv: Private data object.
* @iq_idx: Ingress queue index to destroy
* @cbfn: Completion callback.
*
* This API frees an ingress queue by issuing the FW_IQ_CMD
* with the free bit set.
*/
static int
csio_wr_iq_destroy(struct csio_hw *hw, void *priv, int iq_idx,
void (*cbfn)(struct csio_hw *, struct csio_mb *))
{
int rv = 0;
struct csio_mb *mbp;
struct csio_iq_params iqp;
int flq_idx;
memset(&iqp, 0, sizeof(struct csio_iq_params));
mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
if (!mbp)
return -ENOMEM;
iqp.pfn = hw->pfn;
iqp.vfn = 0;
iqp.iqid = csio_q_iqid(hw, iq_idx);
iqp.type = FW_IQ_TYPE_FL_INT_CAP;
flq_idx = csio_q_iq_flq_idx(hw, iq_idx);
if (flq_idx != -1)
iqp.fl0id = csio_q_flid(hw, flq_idx);
else
iqp.fl0id = 0xFFFF;
iqp.fl1id = 0xFFFF;
csio_mb_iq_free(hw, mbp, priv, CSIO_MB_DEFAULT_TMO, &iqp, cbfn);
rv = csio_mb_issue(hw, mbp);
if (rv != 0) {
mempool_free(mbp, hw->mb_mempool);
return rv;
}
if (cbfn != NULL)
return 0;
return csio_wr_iq_destroy_rsp(hw, mbp, iq_idx);
}
/*
* csio_wr_eq_destroy_rsp - Response handler for OFLD EQ creation.
* @hw: The HW module.
* @mbp: Mailbox.
* @eq_idx: Egress queue that was freed.
*
* Handle FW_OFLD_EQ_CMD (free) mailbox completion.
*/
static int
csio_wr_eq_destroy_rsp(struct csio_hw *hw, struct csio_mb *mbp, int eq_idx)
{
enum fw_retval retval = csio_mb_fw_retval(mbp);
int rv = 0;
if (retval != FW_SUCCESS)
rv = -EINVAL;
mempool_free(mbp, hw->mb_mempool);
return rv;
}
/*
* csio_wr_eq_destroy - Free an Egress queue.
* @hw: The HW module.
* @priv: Private data object.
* @eq_idx: Egress queue index to destroy
* @cbfn: Completion callback.
*
* This API frees an Egress queue by issuing the FW_EQ_OFLD_CMD
* with the free bit set.
*/
static int
csio_wr_eq_destroy(struct csio_hw *hw, void *priv, int eq_idx,
void (*cbfn) (struct csio_hw *, struct csio_mb *))
{
int rv = 0;
struct csio_mb *mbp;
struct csio_eq_params eqp;
memset(&eqp, 0, sizeof(struct csio_eq_params));
mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
if (!mbp)
return -ENOMEM;
eqp.pfn = hw->pfn;
eqp.vfn = 0;
eqp.eqid = csio_q_eqid(hw, eq_idx);
csio_mb_eq_ofld_free(hw, mbp, priv, CSIO_MB_DEFAULT_TMO, &eqp, cbfn);
rv = csio_mb_issue(hw, mbp);
if (rv != 0) {
mempool_free(mbp, hw->mb_mempool);
return rv;
}
if (cbfn != NULL)
return 0;
return csio_wr_eq_destroy_rsp(hw, mbp, eq_idx);
}
/*
* csio_wr_cleanup_eq_stpg - Cleanup Egress queue status page
* @hw: HW module
* @qidx: Egress queue index
*
* Cleanup the Egress queue status page.
*/
static void
csio_wr_cleanup_eq_stpg(struct csio_hw *hw, int qidx)
{
struct csio_q *q = csio_hw_to_wrm(hw)->q_arr[qidx];
struct csio_qstatus_page *stp = (struct csio_qstatus_page *)q->vwrap;
memset(stp, 0, sizeof(*stp));
}
/*
* csio_wr_cleanup_iq_ftr - Cleanup Footer entries in IQ
* @hw: HW module
* @qidx: Ingress queue index
*
* Cleanup the footer entries in the given ingress queue,
* set to 1 the internal copy of genbit.
*/
static void
csio_wr_cleanup_iq_ftr(struct csio_hw *hw, int qidx)
{
struct csio_wrm *wrm = csio_hw_to_wrm(hw);
struct csio_q *q = wrm->q_arr[qidx];
void *wr;
struct csio_iqwr_footer *ftr;
uint32_t i = 0;
/* set to 1 since we are just about zero out genbit */
q->un.iq.genbit = 1;
for (i = 0; i < q->credits; i++) {
/* Get the WR */
wr = (void *)((uintptr_t)q->vstart +
(i * q->wr_sz));
/* Get the footer */
ftr = (struct csio_iqwr_footer *)((uintptr_t)wr +
(q->wr_sz - sizeof(*ftr)));
/* Zero out footer */
memset(ftr, 0, sizeof(*ftr));
}
}
int
csio_wr_destroy_queues(struct csio_hw *hw, bool cmd)
{
int i, flq_idx;
struct csio_q *q;
struct csio_wrm *wrm = csio_hw_to_wrm(hw);
int rv;
for (i = 0; i < wrm->free_qidx; i++) {
q = wrm->q_arr[i];
switch (q->type) {
case CSIO_EGRESS:
if (csio_q_eqid(hw, i) != CSIO_MAX_QID) {
csio_wr_cleanup_eq_stpg(hw, i);
if (!cmd) {
csio_q_eqid(hw, i) = CSIO_MAX_QID;
continue;
}
rv = csio_wr_eq_destroy(hw, NULL, i, NULL);
if ((rv == -EBUSY) || (rv == -ETIMEDOUT))
cmd = false;
csio_q_eqid(hw, i) = CSIO_MAX_QID;
}
case CSIO_INGRESS:
if (csio_q_iqid(hw, i) != CSIO_MAX_QID) {
csio_wr_cleanup_iq_ftr(hw, i);
if (!cmd) {
csio_q_iqid(hw, i) = CSIO_MAX_QID;
flq_idx = csio_q_iq_flq_idx(hw, i);
if (flq_idx != -1)
csio_q_flid(hw, flq_idx) =
CSIO_MAX_QID;
continue;
}
rv = csio_wr_iq_destroy(hw, NULL, i, NULL);
if ((rv == -EBUSY) || (rv == -ETIMEDOUT))
cmd = false;
csio_q_iqid(hw, i) = CSIO_MAX_QID;
flq_idx = csio_q_iq_flq_idx(hw, i);
if (flq_idx != -1)
csio_q_flid(hw, flq_idx) = CSIO_MAX_QID;
}
default:
break;
}
}
hw->flags &= ~CSIO_HWF_Q_FW_ALLOCED;
return 0;
}
/*
* csio_wr_get - Get requested size of WR entry/entries from queue.
* @hw: HW module.
* @qidx: Index of queue.
* @size: Cumulative size of Work request(s).
* @wrp: Work request pair.
*
* If requested credits are available, return the start address of the
* work request in the work request pair. Set pidx accordingly and
* return.
*
* NOTE about WR pair:
* ==================
* A WR can start towards the end of a queue, and then continue at the
* beginning, since the queue is considered to be circular. This will
* require a pair of address/size to be passed back to the caller -
* hence Work request pair format.
*/
int
csio_wr_get(struct csio_hw *hw, int qidx, uint32_t size,
struct csio_wr_pair *wrp)
{
struct csio_wrm *wrm = csio_hw_to_wrm(hw);
struct csio_q *q = wrm->q_arr[qidx];
void *cwr = (void *)((uintptr_t)(q->vstart) +
(q->pidx * CSIO_QCREDIT_SZ));
struct csio_qstatus_page *stp = (struct csio_qstatus_page *)q->vwrap;
uint16_t cidx = q->cidx = ntohs(stp->cidx);
uint16_t pidx = q->pidx;
uint32_t req_sz = ALIGN(size, CSIO_QCREDIT_SZ);
int req_credits = req_sz / CSIO_QCREDIT_SZ;
int credits;
CSIO_DB_ASSERT(q->owner != NULL);
CSIO_DB_ASSERT((qidx >= 0) && (qidx < wrm->free_qidx));
CSIO_DB_ASSERT(cidx <= q->credits);
/* Calculate credits */
if (pidx > cidx) {
credits = q->credits - (pidx - cidx) - 1;
} else if (cidx > pidx) {
credits = cidx - pidx - 1;
} else {
/* cidx == pidx, empty queue */
credits = q->credits;
CSIO_INC_STATS(q, n_qempty);
}
/*
* Check if we have enough credits.
* credits = 1 implies queue is full.
*/
if (!credits || (req_credits > credits)) {
CSIO_INC_STATS(q, n_qfull);
return -EBUSY;
}
/*
* If we are here, we have enough credits to satisfy the
* request. Check if we are near the end of q, and if WR spills over.
* If it does, use the first addr/size to cover the queue until
* the end. Fit the remainder portion of the request at the top
* of queue and return it in the second addr/len. Set pidx
* accordingly.
*/
if (unlikely(((uintptr_t)cwr + req_sz) > (uintptr_t)(q->vwrap))) {
wrp->addr1 = cwr;
wrp->size1 = (uint32_t)((uintptr_t)q->vwrap - (uintptr_t)cwr);
wrp->addr2 = q->vstart;
wrp->size2 = req_sz - wrp->size1;
q->pidx = (uint16_t)(ALIGN(wrp->size2, CSIO_QCREDIT_SZ) /
CSIO_QCREDIT_SZ);
CSIO_INC_STATS(q, n_qwrap);
CSIO_INC_STATS(q, n_eq_wr_split);
} else {
wrp->addr1 = cwr;
wrp->size1 = req_sz;
wrp->addr2 = NULL;
wrp->size2 = 0;
q->pidx += (uint16_t)req_credits;
/* We are the end of queue, roll back pidx to top of queue */
if (unlikely(q->pidx == q->credits)) {
q->pidx = 0;
CSIO_INC_STATS(q, n_qwrap);
}
}
q->inc_idx = (uint16_t)req_credits;
CSIO_INC_STATS(q, n_tot_reqs);
return 0;
}
/*
* csio_wr_copy_to_wrp - Copies given data into WR.
* @data_buf - Data buffer
* @wrp - Work request pair.
* @wr_off - Work request offset.
* @data_len - Data length.
*
* Copies the given data in Work Request. Work request pair(wrp) specifies
* address information of Work request.
* Returns: none
*/
void
csio_wr_copy_to_wrp(void *data_buf, struct csio_wr_pair *wrp,
uint32_t wr_off, uint32_t data_len)
{
uint32_t nbytes;
/* Number of space available in buffer addr1 of WRP */
nbytes = ((wrp->size1 - wr_off) >= data_len) ?
data_len : (wrp->size1 - wr_off);
memcpy((uint8_t *) wrp->addr1 + wr_off, data_buf, nbytes);
data_len -= nbytes;
/* Write the remaining data from the begining of circular buffer */
if (data_len) {
CSIO_DB_ASSERT(data_len <= wrp->size2);
CSIO_DB_ASSERT(wrp->addr2 != NULL);
memcpy(wrp->addr2, (uint8_t *) data_buf + nbytes, data_len);
}
}
/*
* csio_wr_issue - Notify chip of Work request.
* @hw: HW module.
* @qidx: Index of queue.
* @prio: 0: Low priority, 1: High priority
*
* Rings the SGE Doorbell by writing the current producer index of the passed
* in queue into the register.
*
*/
int
csio_wr_issue(struct csio_hw *hw, int qidx, bool prio)
{
struct csio_wrm *wrm = csio_hw_to_wrm(hw);
struct csio_q *q = wrm->q_arr[qidx];
CSIO_DB_ASSERT((qidx >= 0) && (qidx < wrm->free_qidx));
wmb();
/* Ring SGE Doorbell writing q->pidx into it */
csio_wr_reg32(hw, DBPRIO(prio) | QID(q->un.eq.physeqid) |
CSIO_HW_PIDX(hw, q->inc_idx),
MYPF_REG(SGE_PF_KDOORBELL));
q->inc_idx = 0;
return 0;
}
static inline uint32_t
csio_wr_avail_qcredits(struct csio_q *q)
{
if (q->pidx > q->cidx)
return q->pidx - q->cidx;
else if (q->cidx > q->pidx)
return q->credits - (q->cidx - q->pidx);
else
return 0; /* cidx == pidx, empty queue */
}
/*
* csio_wr_inval_flq_buf - Invalidate a free list buffer entry.
* @hw: HW module.
* @flq: The freelist queue.
*
* Invalidate the driver's version of a freelist buffer entry,
* without freeing the associated the DMA memory. The entry
* to be invalidated is picked up from the current Free list
* queue cidx.
*
*/
static inline void
csio_wr_inval_flq_buf(struct csio_hw *hw, struct csio_q *flq)
{
flq->cidx++;
if (flq->cidx == flq->credits) {
flq->cidx = 0;
CSIO_INC_STATS(flq, n_qwrap);
}
}
/*
* csio_wr_process_fl - Process a freelist completion.
* @hw: HW module.
* @q: The ingress queue attached to the Freelist.
* @wr: The freelist completion WR in the ingress queue.
* @len_to_qid: The lower 32-bits of the first flit of the RSP footer
* @iq_handler: Caller's handler for this completion.
* @priv: Private pointer of caller
*
*/
static inline void
csio_wr_process_fl(struct csio_hw *hw, struct csio_q *q,
void *wr, uint32_t len_to_qid,
void (*iq_handler)(struct csio_hw *, void *,
uint32_t, struct csio_fl_dma_buf *,
void *),
void *priv)
{
struct csio_wrm *wrm = csio_hw_to_wrm(hw);
struct csio_sge *sge = &wrm->sge;
struct csio_fl_dma_buf flb;
struct csio_dma_buf *buf, *fbuf;
uint32_t bufsz, len, lastlen = 0;
int n;
struct csio_q *flq = hw->wrm.q_arr[q->un.iq.flq_idx];
CSIO_DB_ASSERT(flq != NULL);
len = len_to_qid;
if (len & IQWRF_NEWBUF) {
if (flq->un.fl.offset > 0) {
csio_wr_inval_flq_buf(hw, flq);
flq->un.fl.offset = 0;
}
len = IQWRF_LEN_GET(len);
}
CSIO_DB_ASSERT(len != 0);
flb.totlen = len;
/* Consume all freelist buffers used for len bytes */
for (n = 0, fbuf = flb.flbufs; ; n++, fbuf++) {
buf = &flq->un.fl.bufs[flq->cidx];
bufsz = csio_wr_fl_bufsz(sge, buf);
fbuf->paddr = buf->paddr;
fbuf->vaddr = buf->vaddr;
flb.offset = flq->un.fl.offset;
lastlen = min(bufsz, len);
fbuf->len = lastlen;
len -= lastlen;
if (!len)
break;
csio_wr_inval_flq_buf(hw, flq);
}
flb.defer_free = flq->un.fl.packen ? 0 : 1;
iq_handler(hw, wr, q->wr_sz - sizeof(struct csio_iqwr_footer),
&flb, priv);
if (flq->un.fl.packen)
flq->un.fl.offset += ALIGN(lastlen, sge->csio_fl_align);
else
csio_wr_inval_flq_buf(hw, flq);
}
/*
* csio_is_new_iqwr - Is this a new Ingress queue entry ?
* @q: Ingress quueue.
* @ftr: Ingress queue WR SGE footer.
*
* The entry is new if our generation bit matches the corresponding
* bit in the footer of the current WR.
*/
static inline bool
csio_is_new_iqwr(struct csio_q *q, struct csio_iqwr_footer *ftr)
{
return (q->un.iq.genbit == (ftr->u.type_gen >> IQWRF_GEN_SHIFT));
}
/*
* csio_wr_process_iq - Process elements in Ingress queue.
* @hw: HW pointer
* @qidx: Index of queue
* @iq_handler: Handler for this queue
* @priv: Caller's private pointer
*
* This routine walks through every entry of the ingress queue, calling
* the provided iq_handler with the entry, until the generation bit
* flips.
*/
int
csio_wr_process_iq(struct csio_hw *hw, struct csio_q *q,
void (*iq_handler)(struct csio_hw *, void *,
uint32_t, struct csio_fl_dma_buf *,
void *),
void *priv)
{
struct csio_wrm *wrm = csio_hw_to_wrm(hw);
void *wr = (void *)((uintptr_t)q->vstart + (q->cidx * q->wr_sz));
struct csio_iqwr_footer *ftr;
uint32_t wr_type, fw_qid, qid;
struct csio_q *q_completed;
struct csio_q *flq = csio_iq_has_fl(q) ?
wrm->q_arr[q->un.iq.flq_idx] : NULL;
int rv = 0;
/* Get the footer */
ftr = (struct csio_iqwr_footer *)((uintptr_t)wr +
(q->wr_sz - sizeof(*ftr)));
/*
* When q wrapped around last time, driver should have inverted
* ic.genbit as well.
*/
while (csio_is_new_iqwr(q, ftr)) {
CSIO_DB_ASSERT(((uintptr_t)wr + q->wr_sz) <=
(uintptr_t)q->vwrap);
rmb();
wr_type = IQWRF_TYPE_GET(ftr->u.type_gen);
switch (wr_type) {
case X_RSPD_TYPE_CPL:
/* Subtract footer from WR len */
iq_handler(hw, wr, q->wr_sz - sizeof(*ftr), NULL, priv);
break;
case X_RSPD_TYPE_FLBUF:
csio_wr_process_fl(hw, q, wr,
ntohl(ftr->pldbuflen_qid),
iq_handler, priv);
break;
case X_RSPD_TYPE_INTR:
fw_qid = ntohl(ftr->pldbuflen_qid);
qid = fw_qid - wrm->fw_iq_start;
q_completed = hw->wrm.intr_map[qid];
if (unlikely(qid ==
csio_q_physiqid(hw, hw->intr_iq_idx))) {
/*
* We are already in the Forward Interrupt
* Interrupt Queue Service! Do-not service
* again!
*
*/
} else {
CSIO_DB_ASSERT(q_completed);
CSIO_DB_ASSERT(
q_completed->un.iq.iq_intx_handler);
/* Call the queue handler. */
q_completed->un.iq.iq_intx_handler(hw, NULL,
0, NULL, (void *)q_completed);
}
break;
default:
csio_warn(hw, "Unknown resp type 0x%x received\n",
wr_type);
CSIO_INC_STATS(q, n_rsp_unknown);
break;
}
/*
* Ingress *always* has fixed size WR entries. Therefore,
* there should always be complete WRs towards the end of
* queue.
*/
if (((uintptr_t)wr + q->wr_sz) == (uintptr_t)q->vwrap) {
/* Roll over to start of queue */
q->cidx = 0;
wr = q->vstart;
/* Toggle genbit */
q->un.iq.genbit ^= 0x1;
CSIO_INC_STATS(q, n_qwrap);
} else {
q->cidx++;
wr = (void *)((uintptr_t)(q->vstart) +
(q->cidx * q->wr_sz));
}
ftr = (struct csio_iqwr_footer *)((uintptr_t)wr +
(q->wr_sz - sizeof(*ftr)));
q->inc_idx++;
} /* while (q->un.iq.genbit == hdr->genbit) */
/*
* We need to re-arm SGE interrupts in case we got a stray interrupt,
* especially in msix mode. With INTx, this may be a common occurence.
*/
if (unlikely(!q->inc_idx)) {
CSIO_INC_STATS(q, n_stray_comp);
rv = -EINVAL;
goto restart;
}
/* Replenish free list buffers if pending falls below low water mark */
if (flq) {
uint32_t avail = csio_wr_avail_qcredits(flq);
if (avail <= 16) {
/* Make sure in FLQ, atleast 1 credit (8 FL buffers)
* remains unpopulated otherwise HW thinks
* FLQ is empty.
*/
csio_wr_update_fl(hw, flq, (flq->credits - 8) - avail);
csio_wr_ring_fldb(hw, flq);
}
}
restart:
/* Now inform SGE about our incremental index value */
csio_wr_reg32(hw, CIDXINC(q->inc_idx) |
INGRESSQID(q->un.iq.physiqid) |
TIMERREG(csio_sge_timer_reg),
MYPF_REG(SGE_PF_GTS));
q->stats.n_tot_rsps += q->inc_idx;
q->inc_idx = 0;
return rv;
}
int
csio_wr_process_iq_idx(struct csio_hw *hw, int qidx,
void (*iq_handler)(struct csio_hw *, void *,
uint32_t, struct csio_fl_dma_buf *,
void *),
void *priv)
{
struct csio_wrm *wrm = csio_hw_to_wrm(hw);
struct csio_q *iq = wrm->q_arr[qidx];
return csio_wr_process_iq(hw, iq, iq_handler, priv);
}
static int
csio_closest_timer(struct csio_sge *s, int time)
{
int i, delta, match = 0, min_delta = INT_MAX;
for (i = 0; i < ARRAY_SIZE(s->timer_val); i++) {
delta = time - s->timer_val[i];
if (delta < 0)
delta = -delta;
if (delta < min_delta) {
min_delta = delta;
match = i;
}
}
return match;
}
static int
csio_closest_thresh(struct csio_sge *s, int cnt)
{
int i, delta, match = 0, min_delta = INT_MAX;
for (i = 0; i < ARRAY_SIZE(s->counter_val); i++) {
delta = cnt - s->counter_val[i];
if (delta < 0)
delta = -delta;
if (delta < min_delta) {
min_delta = delta;
match = i;
}
}
return match;
}
static void
csio_wr_fixup_host_params(struct csio_hw *hw)
{
struct csio_wrm *wrm = csio_hw_to_wrm(hw);
struct csio_sge *sge = &wrm->sge;
uint32_t clsz = L1_CACHE_BYTES;
uint32_t s_hps = PAGE_SHIFT - 10;
uint32_t ingpad = 0;
uint32_t stat_len = clsz > 64 ? 128 : 64;
csio_wr_reg32(hw, HOSTPAGESIZEPF0(s_hps) | HOSTPAGESIZEPF1(s_hps) |
HOSTPAGESIZEPF2(s_hps) | HOSTPAGESIZEPF3(s_hps) |
HOSTPAGESIZEPF4(s_hps) | HOSTPAGESIZEPF5(s_hps) |
HOSTPAGESIZEPF6(s_hps) | HOSTPAGESIZEPF7(s_hps),
SGE_HOST_PAGE_SIZE);
sge->csio_fl_align = clsz < 32 ? 32 : clsz;
ingpad = ilog2(sge->csio_fl_align) - 5;
csio_set_reg_field(hw, SGE_CONTROL, INGPADBOUNDARY_MASK |
EGRSTATUSPAGESIZE(1),
INGPADBOUNDARY(ingpad) |
EGRSTATUSPAGESIZE(stat_len != 64));
/* FL BUFFER SIZE#0 is Page size i,e already aligned to cache line */
csio_wr_reg32(hw, PAGE_SIZE, SGE_FL_BUFFER_SIZE0);
/*
* If using hard params, the following will get set correctly
* in csio_wr_set_sge().
*/
if (hw->flags & CSIO_HWF_USING_SOFT_PARAMS) {
csio_wr_reg32(hw,
(csio_rd_reg32(hw, SGE_FL_BUFFER_SIZE2) +
sge->csio_fl_align - 1) & ~(sge->csio_fl_align - 1),
SGE_FL_BUFFER_SIZE2);
csio_wr_reg32(hw,
(csio_rd_reg32(hw, SGE_FL_BUFFER_SIZE3) +
sge->csio_fl_align - 1) & ~(sge->csio_fl_align - 1),
SGE_FL_BUFFER_SIZE3);
}
csio_wr_reg32(hw, HPZ0(PAGE_SHIFT - 12), ULP_RX_TDDP_PSZ);
/* default value of rx_dma_offset of the NIC driver */
csio_set_reg_field(hw, SGE_CONTROL, PKTSHIFT_MASK,
PKTSHIFT(CSIO_SGE_RX_DMA_OFFSET));
csio_hw_tp_wr_bits_indirect(hw, TP_INGRESS_CONFIG,
CSUM_HAS_PSEUDO_HDR, 0);
}
static void
csio_init_intr_coalesce_parms(struct csio_hw *hw)
{
struct csio_wrm *wrm = csio_hw_to_wrm(hw);
struct csio_sge *sge = &wrm->sge;
csio_sge_thresh_reg = csio_closest_thresh(sge, csio_intr_coalesce_cnt);
if (csio_intr_coalesce_cnt) {
csio_sge_thresh_reg = 0;
csio_sge_timer_reg = X_TIMERREG_RESTART_COUNTER;
return;
}
csio_sge_timer_reg = csio_closest_timer(sge, csio_intr_coalesce_time);
}
/*
* csio_wr_get_sge - Get SGE register values.
* @hw: HW module.
*
* Used by non-master functions and by master-functions relying on config file.
*/
static void
csio_wr_get_sge(struct csio_hw *hw)
{
struct csio_wrm *wrm = csio_hw_to_wrm(hw);
struct csio_sge *sge = &wrm->sge;
uint32_t ingpad;
int i;
u32 timer_value_0_and_1, timer_value_2_and_3, timer_value_4_and_5;
u32 ingress_rx_threshold;
sge->sge_control = csio_rd_reg32(hw, SGE_CONTROL);
ingpad = INGPADBOUNDARY_GET(sge->sge_control);
switch (ingpad) {
case X_INGPCIEBOUNDARY_32B:
sge->csio_fl_align = 32; break;
case X_INGPCIEBOUNDARY_64B:
sge->csio_fl_align = 64; break;
case X_INGPCIEBOUNDARY_128B:
sge->csio_fl_align = 128; break;
case X_INGPCIEBOUNDARY_256B:
sge->csio_fl_align = 256; break;
case X_INGPCIEBOUNDARY_512B:
sge->csio_fl_align = 512; break;
case X_INGPCIEBOUNDARY_1024B:
sge->csio_fl_align = 1024; break;
case X_INGPCIEBOUNDARY_2048B:
sge->csio_fl_align = 2048; break;
case X_INGPCIEBOUNDARY_4096B:
sge->csio_fl_align = 4096; break;
}
for (i = 0; i < CSIO_SGE_FL_SIZE_REGS; i++)
csio_get_flbuf_size(hw, sge, i);
timer_value_0_and_1 = csio_rd_reg32(hw, SGE_TIMER_VALUE_0_AND_1);
timer_value_2_and_3 = csio_rd_reg32(hw, SGE_TIMER_VALUE_2_AND_3);
timer_value_4_and_5 = csio_rd_reg32(hw, SGE_TIMER_VALUE_4_AND_5);
sge->timer_val[0] = (uint16_t)csio_core_ticks_to_us(hw,
TIMERVALUE0_GET(timer_value_0_and_1));
sge->timer_val[1] = (uint16_t)csio_core_ticks_to_us(hw,
TIMERVALUE1_GET(timer_value_0_and_1));
sge->timer_val[2] = (uint16_t)csio_core_ticks_to_us(hw,
TIMERVALUE2_GET(timer_value_2_and_3));
sge->timer_val[3] = (uint16_t)csio_core_ticks_to_us(hw,
TIMERVALUE3_GET(timer_value_2_and_3));
sge->timer_val[4] = (uint16_t)csio_core_ticks_to_us(hw,
TIMERVALUE4_GET(timer_value_4_and_5));
sge->timer_val[5] = (uint16_t)csio_core_ticks_to_us(hw,
TIMERVALUE5_GET(timer_value_4_and_5));
ingress_rx_threshold = csio_rd_reg32(hw, SGE_INGRESS_RX_THRESHOLD);
sge->counter_val[0] = THRESHOLD_0_GET(ingress_rx_threshold);
sge->counter_val[1] = THRESHOLD_1_GET(ingress_rx_threshold);
sge->counter_val[2] = THRESHOLD_2_GET(ingress_rx_threshold);
sge->counter_val[3] = THRESHOLD_3_GET(ingress_rx_threshold);
csio_init_intr_coalesce_parms(hw);
}
/*
* csio_wr_set_sge - Initialize SGE registers
* @hw: HW module.
*
* Used by Master function to initialize SGE registers in the absence
* of a config file.
*/
static void
csio_wr_set_sge(struct csio_hw *hw)
{
struct csio_wrm *wrm = csio_hw_to_wrm(hw);
struct csio_sge *sge = &wrm->sge;
int i;
/*
* Set up our basic SGE mode to deliver CPL messages to our Ingress
* Queue and Packet Date to the Free List.
*/
csio_set_reg_field(hw, SGE_CONTROL, RXPKTCPLMODE(1), RXPKTCPLMODE(1));
sge->sge_control = csio_rd_reg32(hw, SGE_CONTROL);
/* sge->csio_fl_align is set up by csio_wr_fixup_host_params(). */
/*
* Set up to drop DOORBELL writes when the DOORBELL FIFO overflows
* and generate an interrupt when this occurs so we can recover.
*/
csio_set_reg_field(hw, SGE_DBFIFO_STATUS,
HP_INT_THRESH(HP_INT_THRESH_MASK) |
CSIO_HW_LP_INT_THRESH(hw, CSIO_HW_M_LP_INT_THRESH(hw)),
HP_INT_THRESH(CSIO_SGE_DBFIFO_INT_THRESH) |
CSIO_HW_LP_INT_THRESH(hw, CSIO_SGE_DBFIFO_INT_THRESH));
csio_set_reg_field(hw, SGE_DOORBELL_CONTROL, ENABLE_DROP,
ENABLE_DROP);
/* SGE_FL_BUFFER_SIZE0 is set up by csio_wr_fixup_host_params(). */
CSIO_SET_FLBUF_SIZE(hw, 1, CSIO_SGE_FLBUF_SIZE1);
csio_wr_reg32(hw, (CSIO_SGE_FLBUF_SIZE2 + sge->csio_fl_align - 1)
& ~(sge->csio_fl_align - 1), SGE_FL_BUFFER_SIZE2);
csio_wr_reg32(hw, (CSIO_SGE_FLBUF_SIZE3 + sge->csio_fl_align - 1)
& ~(sge->csio_fl_align - 1), SGE_FL_BUFFER_SIZE3);
CSIO_SET_FLBUF_SIZE(hw, 4, CSIO_SGE_FLBUF_SIZE4);
CSIO_SET_FLBUF_SIZE(hw, 5, CSIO_SGE_FLBUF_SIZE5);
CSIO_SET_FLBUF_SIZE(hw, 6, CSIO_SGE_FLBUF_SIZE6);
CSIO_SET_FLBUF_SIZE(hw, 7, CSIO_SGE_FLBUF_SIZE7);
CSIO_SET_FLBUF_SIZE(hw, 8, CSIO_SGE_FLBUF_SIZE8);
for (i = 0; i < CSIO_SGE_FL_SIZE_REGS; i++)
csio_get_flbuf_size(hw, sge, i);
/* Initialize interrupt coalescing attributes */
sge->timer_val[0] = CSIO_SGE_TIMER_VAL_0;
sge->timer_val[1] = CSIO_SGE_TIMER_VAL_1;
sge->timer_val[2] = CSIO_SGE_TIMER_VAL_2;
sge->timer_val[3] = CSIO_SGE_TIMER_VAL_3;
sge->timer_val[4] = CSIO_SGE_TIMER_VAL_4;
sge->timer_val[5] = CSIO_SGE_TIMER_VAL_5;
sge->counter_val[0] = CSIO_SGE_INT_CNT_VAL_0;
sge->counter_val[1] = CSIO_SGE_INT_CNT_VAL_1;
sge->counter_val[2] = CSIO_SGE_INT_CNT_VAL_2;
sge->counter_val[3] = CSIO_SGE_INT_CNT_VAL_3;
csio_wr_reg32(hw, THRESHOLD_0(sge->counter_val[0]) |
THRESHOLD_1(sge->counter_val[1]) |
THRESHOLD_2(sge->counter_val[2]) |
THRESHOLD_3(sge->counter_val[3]),
SGE_INGRESS_RX_THRESHOLD);
csio_wr_reg32(hw,
TIMERVALUE0(csio_us_to_core_ticks(hw, sge->timer_val[0])) |
TIMERVALUE1(csio_us_to_core_ticks(hw, sge->timer_val[1])),
SGE_TIMER_VALUE_0_AND_1);
csio_wr_reg32(hw,
TIMERVALUE2(csio_us_to_core_ticks(hw, sge->timer_val[2])) |
TIMERVALUE3(csio_us_to_core_ticks(hw, sge->timer_val[3])),
SGE_TIMER_VALUE_2_AND_3);
csio_wr_reg32(hw,
TIMERVALUE4(csio_us_to_core_ticks(hw, sge->timer_val[4])) |
TIMERVALUE5(csio_us_to_core_ticks(hw, sge->timer_val[5])),
SGE_TIMER_VALUE_4_AND_5);
csio_init_intr_coalesce_parms(hw);
}
void
csio_wr_sge_init(struct csio_hw *hw)
{
/*
* If we are master and chip is not initialized:
* - If we plan to use the config file, we need to fixup some
* host specific registers, and read the rest of the SGE
* configuration.
* - If we dont plan to use the config file, we need to initialize
* SGE entirely, including fixing the host specific registers.
* If we are master and chip is initialized, just read and work off of
* the already initialized SGE values.
* If we arent the master, we are only allowed to read and work off of
* the already initialized SGE values.
*
* Therefore, before calling this function, we assume that the master-
* ship of the card, state and whether to use config file or not, have
* already been decided.
*/
if (csio_is_hw_master(hw)) {
if (hw->fw_state != CSIO_DEV_STATE_INIT)
csio_wr_fixup_host_params(hw);
if (hw->flags & CSIO_HWF_USING_SOFT_PARAMS)
csio_wr_get_sge(hw);
else
csio_wr_set_sge(hw);
} else
csio_wr_get_sge(hw);
}
/*
* csio_wrm_init - Initialize Work request module.
* @wrm: WR module
* @hw: HW pointer
*
* Allocates memory for an array of queue pointers starting at q_arr.
*/
int
csio_wrm_init(struct csio_wrm *wrm, struct csio_hw *hw)
{
int i;
if (!wrm->num_q) {
csio_err(hw, "Num queues is not set\n");
return -EINVAL;
}
wrm->q_arr = kzalloc(sizeof(struct csio_q *) * wrm->num_q, GFP_KERNEL);
if (!wrm->q_arr)
goto err;
for (i = 0; i < wrm->num_q; i++) {
wrm->q_arr[i] = kzalloc(sizeof(struct csio_q), GFP_KERNEL);
if (!wrm->q_arr[i]) {
while (--i >= 0)
kfree(wrm->q_arr[i]);
goto err_free_arr;
}
}
wrm->free_qidx = 0;
return 0;
err_free_arr:
kfree(wrm->q_arr);
err:
return -ENOMEM;
}
/*
* csio_wrm_exit - Initialize Work request module.
* @wrm: WR module
* @hw: HW module
*
* Uninitialize WR module. Free q_arr and pointers in it.
* We have the additional job of freeing the DMA memory associated
* with the queues.
*/
void
csio_wrm_exit(struct csio_wrm *wrm, struct csio_hw *hw)
{
int i;
uint32_t j;
struct csio_q *q;
struct csio_dma_buf *buf;
for (i = 0; i < wrm->num_q; i++) {
q = wrm->q_arr[i];
if (wrm->free_qidx && (i < wrm->free_qidx)) {
if (q->type == CSIO_FREELIST) {
if (!q->un.fl.bufs)
continue;
for (j = 0; j < q->credits; j++) {
buf = &q->un.fl.bufs[j];
if (!buf->vaddr)
continue;
pci_free_consistent(hw->pdev, buf->len,
buf->vaddr,
buf->paddr);
}
kfree(q->un.fl.bufs);
}
pci_free_consistent(hw->pdev, q->size,
q->vstart, q->pstart);
}
kfree(q);
}
hw->flags &= ~CSIO_HWF_Q_MEM_ALLOCED;
kfree(wrm->q_arr);
}
| gpl-2.0 |
miquelmartos/geeksphone-kernel-zero-3.0 | drivers/net/wimax/i2400m/control.c | 2933 | 43832 | /*
* Intel Wireless WiMAX Connection 2400m
* Miscellaneous control functions for managing the device
*
*
* Copyright (C) 2007-2008 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*
* Intel Corporation <linux-wimax@intel.com>
* Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
* - Initial implementation
*
* This is a collection of functions used to control the device (plus
* a few helpers).
*
* There are utilities for handling TLV buffers, hooks on the device's
* reports to act on device changes of state [i2400m_report_hook()],
* on acks to commands [i2400m_msg_ack_hook()], a helper for sending
* commands to the device and blocking until a reply arrives
* [i2400m_msg_to_dev()], a few high level commands for manipulating
* the device state, powersving mode and configuration plus the
* routines to setup the device once communication is stablished with
* it [i2400m_dev_initialize()].
*
* ROADMAP
*
* i2400m_dev_initialize() Called by i2400m_dev_start()
* i2400m_set_init_config()
* i2400m_cmd_get_state()
* i2400m_dev_shutdown() Called by i2400m_dev_stop()
* i2400m_reset()
*
* i2400m_{cmd,get,set}_*()
* i2400m_msg_to_dev()
* i2400m_msg_check_status()
*
* i2400m_report_hook() Called on reception of an event
* i2400m_report_state_hook()
* i2400m_tlv_buffer_walk()
* i2400m_tlv_match()
* i2400m_report_tlv_system_state()
* i2400m_report_tlv_rf_switches_status()
* i2400m_report_tlv_media_status()
* i2400m_cmd_enter_powersave()
*
* i2400m_msg_ack_hook() Called on reception of a reply to a
* command, get or set
*/
#include <stdarg.h>
#include "i2400m.h"
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/wimax/i2400m.h>
#define D_SUBMODULE control
#include "debug-levels.h"
static int i2400m_idle_mode_disabled;/* 0 (idle mode enabled) by default */
module_param_named(idle_mode_disabled, i2400m_idle_mode_disabled, int, 0644);
MODULE_PARM_DESC(idle_mode_disabled,
"If true, the device will not enable idle mode negotiation "
"with the base station (when connected) to save power.");
/* 0 (power saving enabled) by default */
static int i2400m_power_save_disabled;
module_param_named(power_save_disabled, i2400m_power_save_disabled, int, 0644);
MODULE_PARM_DESC(power_save_disabled,
"If true, the driver will not tell the device to enter "
"power saving mode when it reports it is ready for it. "
"False by default (so the device is told to do power "
"saving).");
static int i2400m_passive_mode; /* 0 (passive mode disabled) by default */
module_param_named(passive_mode, i2400m_passive_mode, int, 0644);
MODULE_PARM_DESC(passive_mode,
"If true, the driver will not do any device setup "
"and leave it up to user space, who must be properly "
"setup.");
/*
* Return if a TLV is of a give type and size
*
* @tlv_hdr: pointer to the TLV
* @tlv_type: type of the TLV we are looking for
* @tlv_size: expected size of the TLV we are looking for (if -1,
* don't check the size). This includes the header
* Returns: 0 if the TLV matches
* < 0 if it doesn't match at all
* > 0 total TLV + payload size, if the type matches, but not
* the size
*/
static
ssize_t i2400m_tlv_match(const struct i2400m_tlv_hdr *tlv,
enum i2400m_tlv tlv_type, ssize_t tlv_size)
{
if (le16_to_cpu(tlv->type) != tlv_type) /* Not our type? skip */
return -1;
if (tlv_size != -1
&& le16_to_cpu(tlv->length) + sizeof(*tlv) != tlv_size) {
size_t size = le16_to_cpu(tlv->length) + sizeof(*tlv);
printk(KERN_WARNING "W: tlv type 0x%x mismatched because of "
"size (got %zu vs %zu expected)\n",
tlv_type, size, tlv_size);
return size;
}
return 0;
}
/*
* Given a buffer of TLVs, iterate over them
*
* @i2400m: device instance
* @tlv_buf: pointer to the beginning of the TLV buffer
* @buf_size: buffer size in bytes
* @tlv_pos: seek position; this is assumed to be a pointer returned
* by i2400m_tlv_buffer_walk() [and thus, validated]. The
* TLV returned will be the one following this one.
*
* Usage:
*
* tlv_itr = NULL;
* while (tlv_itr = i2400m_tlv_buffer_walk(i2400m, buf, size, tlv_itr)) {
* ...
* // Do stuff with tlv_itr, DON'T MODIFY IT
* ...
* }
*/
static
const struct i2400m_tlv_hdr *i2400m_tlv_buffer_walk(
struct i2400m *i2400m,
const void *tlv_buf, size_t buf_size,
const struct i2400m_tlv_hdr *tlv_pos)
{
struct device *dev = i2400m_dev(i2400m);
const struct i2400m_tlv_hdr *tlv_top = tlv_buf + buf_size;
size_t offset, length, avail_size;
unsigned type;
if (tlv_pos == NULL) /* Take the first one? */
tlv_pos = tlv_buf;
else /* Nope, the next one */
tlv_pos = (void *) tlv_pos
+ le16_to_cpu(tlv_pos->length) + sizeof(*tlv_pos);
if (tlv_pos == tlv_top) { /* buffer done */
tlv_pos = NULL;
goto error_beyond_end;
}
if (tlv_pos > tlv_top) {
tlv_pos = NULL;
WARN_ON(1);
goto error_beyond_end;
}
offset = (void *) tlv_pos - (void *) tlv_buf;
avail_size = buf_size - offset;
if (avail_size < sizeof(*tlv_pos)) {
dev_err(dev, "HW BUG? tlv_buf %p [%zu bytes], tlv @%zu: "
"short header\n", tlv_buf, buf_size, offset);
goto error_short_header;
}
type = le16_to_cpu(tlv_pos->type);
length = le16_to_cpu(tlv_pos->length);
if (avail_size < sizeof(*tlv_pos) + length) {
dev_err(dev, "HW BUG? tlv_buf %p [%zu bytes], "
"tlv type 0x%04x @%zu: "
"short data (%zu bytes vs %zu needed)\n",
tlv_buf, buf_size, type, offset, avail_size,
sizeof(*tlv_pos) + length);
goto error_short_header;
}
error_short_header:
error_beyond_end:
return tlv_pos;
}
/*
* Find a TLV in a buffer of sequential TLVs
*
* @i2400m: device descriptor
* @tlv_hdr: pointer to the first TLV in the sequence
* @size: size of the buffer in bytes; all TLVs are assumed to fit
* fully in the buffer (otherwise we'll complain).
* @tlv_type: type of the TLV we are looking for
* @tlv_size: expected size of the TLV we are looking for (if -1,
* don't check the size). This includes the header
*
* Returns: NULL if the TLV is not found, otherwise a pointer to
* it. If the sizes don't match, an error is printed and NULL
* returned.
*/
static
const struct i2400m_tlv_hdr *i2400m_tlv_find(
struct i2400m *i2400m,
const struct i2400m_tlv_hdr *tlv_hdr, size_t size,
enum i2400m_tlv tlv_type, ssize_t tlv_size)
{
ssize_t match;
struct device *dev = i2400m_dev(i2400m);
const struct i2400m_tlv_hdr *tlv = NULL;
while ((tlv = i2400m_tlv_buffer_walk(i2400m, tlv_hdr, size, tlv))) {
match = i2400m_tlv_match(tlv, tlv_type, tlv_size);
if (match == 0) /* found it :) */
break;
if (match > 0)
dev_warn(dev, "TLV type 0x%04x found with size "
"mismatch (%zu vs %zu needed)\n",
tlv_type, match, tlv_size);
}
return tlv;
}
static const struct
{
char *msg;
int errno;
} ms_to_errno[I2400M_MS_MAX] = {
[I2400M_MS_DONE_OK] = { "", 0 },
[I2400M_MS_DONE_IN_PROGRESS] = { "", 0 },
[I2400M_MS_INVALID_OP] = { "invalid opcode", -ENOSYS },
[I2400M_MS_BAD_STATE] = { "invalid state", -EILSEQ },
[I2400M_MS_ILLEGAL_VALUE] = { "illegal value", -EINVAL },
[I2400M_MS_MISSING_PARAMS] = { "missing parameters", -ENOMSG },
[I2400M_MS_VERSION_ERROR] = { "bad version", -EIO },
[I2400M_MS_ACCESSIBILITY_ERROR] = { "accesibility error", -EIO },
[I2400M_MS_BUSY] = { "busy", -EBUSY },
[I2400M_MS_CORRUPTED_TLV] = { "corrupted TLV", -EILSEQ },
[I2400M_MS_UNINITIALIZED] = { "not unitialized", -EILSEQ },
[I2400M_MS_UNKNOWN_ERROR] = { "unknown error", -EIO },
[I2400M_MS_PRODUCTION_ERROR] = { "production error", -EIO },
[I2400M_MS_NO_RF] = { "no RF", -EIO },
[I2400M_MS_NOT_READY_FOR_POWERSAVE] =
{ "not ready for powersave", -EACCES },
[I2400M_MS_THERMAL_CRITICAL] = { "thermal critical", -EL3HLT },
};
/*
* i2400m_msg_check_status - translate a message's status code
*
* @i2400m: device descriptor
* @l3l4_hdr: message header
* @strbuf: buffer to place a formatted error message (unless NULL).
* @strbuf_size: max amount of available space; larger messages will
* be truncated.
*
* Returns: errno code corresponding to the status code in @l3l4_hdr
* and a message in @strbuf describing the error.
*/
int i2400m_msg_check_status(const struct i2400m_l3l4_hdr *l3l4_hdr,
char *strbuf, size_t strbuf_size)
{
int result;
enum i2400m_ms status = le16_to_cpu(l3l4_hdr->status);
const char *str;
if (status == 0)
return 0;
if (status >= ARRAY_SIZE(ms_to_errno)) {
str = "unknown status code";
result = -EBADR;
} else {
str = ms_to_errno[status].msg;
result = ms_to_errno[status].errno;
}
if (strbuf)
snprintf(strbuf, strbuf_size, "%s (%d)", str, status);
return result;
}
/*
* Act on a TLV System State reported by the device
*
* @i2400m: device descriptor
* @ss: validated System State TLV
*/
static
void i2400m_report_tlv_system_state(struct i2400m *i2400m,
const struct i2400m_tlv_system_state *ss)
{
struct device *dev = i2400m_dev(i2400m);
struct wimax_dev *wimax_dev = &i2400m->wimax_dev;
enum i2400m_system_state i2400m_state = le32_to_cpu(ss->state);
d_fnstart(3, dev, "(i2400m %p ss %p [%u])\n", i2400m, ss, i2400m_state);
if (i2400m->state != i2400m_state) {
i2400m->state = i2400m_state;
wake_up_all(&i2400m->state_wq);
}
switch (i2400m_state) {
case I2400M_SS_UNINITIALIZED:
case I2400M_SS_INIT:
case I2400M_SS_CONFIG:
case I2400M_SS_PRODUCTION:
wimax_state_change(wimax_dev, WIMAX_ST_UNINITIALIZED);
break;
case I2400M_SS_RF_OFF:
case I2400M_SS_RF_SHUTDOWN:
wimax_state_change(wimax_dev, WIMAX_ST_RADIO_OFF);
break;
case I2400M_SS_READY:
case I2400M_SS_STANDBY:
case I2400M_SS_SLEEPACTIVE:
wimax_state_change(wimax_dev, WIMAX_ST_READY);
break;
case I2400M_SS_CONNECTING:
case I2400M_SS_WIMAX_CONNECTED:
wimax_state_change(wimax_dev, WIMAX_ST_READY);
break;
case I2400M_SS_SCAN:
case I2400M_SS_OUT_OF_ZONE:
wimax_state_change(wimax_dev, WIMAX_ST_SCANNING);
break;
case I2400M_SS_IDLE:
d_printf(1, dev, "entering BS-negotiated idle mode\n");
case I2400M_SS_DISCONNECTING:
case I2400M_SS_DATA_PATH_CONNECTED:
wimax_state_change(wimax_dev, WIMAX_ST_CONNECTED);
break;
default:
/* Huh? just in case, shut it down */
dev_err(dev, "HW BUG? unknown state %u: shutting down\n",
i2400m_state);
i2400m_reset(i2400m, I2400M_RT_WARM);
break;
}
d_fnend(3, dev, "(i2400m %p ss %p [%u]) = void\n",
i2400m, ss, i2400m_state);
}
/*
* Parse and act on a TLV Media Status sent by the device
*
* @i2400m: device descriptor
* @ms: validated Media Status TLV
*
* This will set the carrier up on down based on the device's link
* report. This is done asides of what the WiMAX stack does based on
* the device's state as sometimes we need to do a link-renew (the BS
* wants us to renew a DHCP lease, for example).
*
* In fact, doc says that every time we get a link-up, we should do a
* DHCP negotiation...
*/
static
void i2400m_report_tlv_media_status(struct i2400m *i2400m,
const struct i2400m_tlv_media_status *ms)
{
struct device *dev = i2400m_dev(i2400m);
struct wimax_dev *wimax_dev = &i2400m->wimax_dev;
struct net_device *net_dev = wimax_dev->net_dev;
enum i2400m_media_status status = le32_to_cpu(ms->media_status);
d_fnstart(3, dev, "(i2400m %p ms %p [%u])\n", i2400m, ms, status);
switch (status) {
case I2400M_MEDIA_STATUS_LINK_UP:
netif_carrier_on(net_dev);
break;
case I2400M_MEDIA_STATUS_LINK_DOWN:
netif_carrier_off(net_dev);
break;
/*
* This is the network telling us we need to retrain the DHCP
* lease -- so far, we are trusting the WiMAX Network Service
* in user space to pick this up and poke the DHCP client.
*/
case I2400M_MEDIA_STATUS_LINK_RENEW:
netif_carrier_on(net_dev);
break;
default:
dev_err(dev, "HW BUG? unknown media status %u\n",
status);
}
d_fnend(3, dev, "(i2400m %p ms %p [%u]) = void\n",
i2400m, ms, status);
}
/*
* Process a TLV from a 'state report'
*
* @i2400m: device descriptor
* @tlv: pointer to the TLV header; it has been already validated for
* consistent size.
* @tag: for error messages
*
* Act on the TLVs from a 'state report'.
*/
static
void i2400m_report_state_parse_tlv(struct i2400m *i2400m,
const struct i2400m_tlv_hdr *tlv,
const char *tag)
{
struct device *dev = i2400m_dev(i2400m);
const struct i2400m_tlv_media_status *ms;
const struct i2400m_tlv_system_state *ss;
const struct i2400m_tlv_rf_switches_status *rfss;
if (0 == i2400m_tlv_match(tlv, I2400M_TLV_SYSTEM_STATE, sizeof(*ss))) {
ss = container_of(tlv, typeof(*ss), hdr);
d_printf(2, dev, "%s: system state TLV "
"found (0x%04x), state 0x%08x\n",
tag, I2400M_TLV_SYSTEM_STATE,
le32_to_cpu(ss->state));
i2400m_report_tlv_system_state(i2400m, ss);
}
if (0 == i2400m_tlv_match(tlv, I2400M_TLV_RF_STATUS, sizeof(*rfss))) {
rfss = container_of(tlv, typeof(*rfss), hdr);
d_printf(2, dev, "%s: RF status TLV "
"found (0x%04x), sw 0x%02x hw 0x%02x\n",
tag, I2400M_TLV_RF_STATUS,
le32_to_cpu(rfss->sw_rf_switch),
le32_to_cpu(rfss->hw_rf_switch));
i2400m_report_tlv_rf_switches_status(i2400m, rfss);
}
if (0 == i2400m_tlv_match(tlv, I2400M_TLV_MEDIA_STATUS, sizeof(*ms))) {
ms = container_of(tlv, typeof(*ms), hdr);
d_printf(2, dev, "%s: Media Status TLV: %u\n",
tag, le32_to_cpu(ms->media_status));
i2400m_report_tlv_media_status(i2400m, ms);
}
}
/*
* Parse a 'state report' and extract information
*
* @i2400m: device descriptor
* @l3l4_hdr: pointer to message; it has been already validated for
* consistent size.
* @size: size of the message (header + payload). The header length
* declaration is assumed to be congruent with @size (as in
* sizeof(*l3l4_hdr) + l3l4_hdr->length == size)
*
* Walk over the TLVs in a report state and act on them.
*/
static
void i2400m_report_state_hook(struct i2400m *i2400m,
const struct i2400m_l3l4_hdr *l3l4_hdr,
size_t size, const char *tag)
{
struct device *dev = i2400m_dev(i2400m);
const struct i2400m_tlv_hdr *tlv;
size_t tlv_size = le16_to_cpu(l3l4_hdr->length);
d_fnstart(4, dev, "(i2400m %p, l3l4_hdr %p, size %zu, %s)\n",
i2400m, l3l4_hdr, size, tag);
tlv = NULL;
while ((tlv = i2400m_tlv_buffer_walk(i2400m, &l3l4_hdr->pl,
tlv_size, tlv)))
i2400m_report_state_parse_tlv(i2400m, tlv, tag);
d_fnend(4, dev, "(i2400m %p, l3l4_hdr %p, size %zu, %s) = void\n",
i2400m, l3l4_hdr, size, tag);
}
/*
* i2400m_report_hook - (maybe) act on a report
*
* @i2400m: device descriptor
* @l3l4_hdr: pointer to message; it has been already validated for
* consistent size.
* @size: size of the message (header + payload). The header length
* declaration is assumed to be congruent with @size (as in
* sizeof(*l3l4_hdr) + l3l4_hdr->length == size)
*
* Extract information we might need (like carrien on/off) from a
* device report.
*/
void i2400m_report_hook(struct i2400m *i2400m,
const struct i2400m_l3l4_hdr *l3l4_hdr, size_t size)
{
struct device *dev = i2400m_dev(i2400m);
unsigned msg_type;
d_fnstart(3, dev, "(i2400m %p l3l4_hdr %p size %zu)\n",
i2400m, l3l4_hdr, size);
/* Chew on the message, we might need some information from
* here */
msg_type = le16_to_cpu(l3l4_hdr->type);
switch (msg_type) {
case I2400M_MT_REPORT_STATE: /* carrier detection... */
i2400m_report_state_hook(i2400m,
l3l4_hdr, size, "REPORT STATE");
break;
/* If the device is ready for power save, then ask it to do
* it. */
case I2400M_MT_REPORT_POWERSAVE_READY: /* zzzzz */
if (l3l4_hdr->status == cpu_to_le16(I2400M_MS_DONE_OK)) {
if (i2400m_power_save_disabled)
d_printf(1, dev, "ready for powersave, "
"not requesting (disabled by module "
"parameter)\n");
else {
d_printf(1, dev, "ready for powersave, "
"requesting\n");
i2400m_cmd_enter_powersave(i2400m);
}
}
break;
}
d_fnend(3, dev, "(i2400m %p l3l4_hdr %p size %zu) = void\n",
i2400m, l3l4_hdr, size);
}
/*
* i2400m_msg_ack_hook - process cmd/set/get ack for internal status
*
* @i2400m: device descriptor
* @l3l4_hdr: pointer to message; it has been already validated for
* consistent size.
* @size: size of the message
*
* Extract information we might need from acks to commands and act on
* it. This is akin to i2400m_report_hook(). Note most of this
* processing should be done in the function that calls the
* command. This is here for some cases where it can't happen...
*/
static void i2400m_msg_ack_hook(struct i2400m *i2400m,
const struct i2400m_l3l4_hdr *l3l4_hdr,
size_t size)
{
int result;
struct device *dev = i2400m_dev(i2400m);
unsigned ack_type, ack_status;
char strerr[32];
/* Chew on the message, we might need some information from
* here */
ack_type = le16_to_cpu(l3l4_hdr->type);
ack_status = le16_to_cpu(l3l4_hdr->status);
switch (ack_type) {
case I2400M_MT_CMD_ENTER_POWERSAVE:
/* This is just left here for the sake of example, as
* the processing is done somewhere else. */
if (0) {
result = i2400m_msg_check_status(
l3l4_hdr, strerr, sizeof(strerr));
if (result >= 0)
d_printf(1, dev, "ready for power save: %zd\n",
size);
}
break;
}
}
/*
* i2400m_msg_size_check() - verify message size and header are congruent
*
* It is ok if the total message size is larger than the expected
* size, as there can be padding.
*/
int i2400m_msg_size_check(struct i2400m *i2400m,
const struct i2400m_l3l4_hdr *l3l4_hdr,
size_t msg_size)
{
int result;
struct device *dev = i2400m_dev(i2400m);
size_t expected_size;
d_fnstart(4, dev, "(i2400m %p l3l4_hdr %p msg_size %zu)\n",
i2400m, l3l4_hdr, msg_size);
if (msg_size < sizeof(*l3l4_hdr)) {
dev_err(dev, "bad size for message header "
"(expected at least %zu, got %zu)\n",
(size_t) sizeof(*l3l4_hdr), msg_size);
result = -EIO;
goto error_hdr_size;
}
expected_size = le16_to_cpu(l3l4_hdr->length) + sizeof(*l3l4_hdr);
if (msg_size < expected_size) {
dev_err(dev, "bad size for message code 0x%04x (expected %zu, "
"got %zu)\n", le16_to_cpu(l3l4_hdr->type),
expected_size, msg_size);
result = -EIO;
} else
result = 0;
error_hdr_size:
d_fnend(4, dev,
"(i2400m %p l3l4_hdr %p msg_size %zu) = %d\n",
i2400m, l3l4_hdr, msg_size, result);
return result;
}
/*
* Cancel a wait for a command ACK
*
* @i2400m: device descriptor
* @code: [negative] errno code to cancel with (don't use
* -EINPROGRESS)
*
* If there is an ack already filled out, free it.
*/
void i2400m_msg_to_dev_cancel_wait(struct i2400m *i2400m, int code)
{
struct sk_buff *ack_skb;
unsigned long flags;
spin_lock_irqsave(&i2400m->rx_lock, flags);
ack_skb = i2400m->ack_skb;
if (ack_skb && !IS_ERR(ack_skb))
kfree_skb(ack_skb);
i2400m->ack_skb = ERR_PTR(code);
spin_unlock_irqrestore(&i2400m->rx_lock, flags);
}
/**
* i2400m_msg_to_dev - Send a control message to the device and get a response
*
* @i2400m: device descriptor
*
* @msg_skb: an skb *
*
* @buf: pointer to the buffer containing the message to be sent; it
* has to start with a &struct i2400M_l3l4_hdr and then
* followed by the payload. Once this function returns, the
* buffer can be reused.
*
* @buf_len: buffer size
*
* Returns:
*
* Pointer to skb containing the ack message. You need to check the
* pointer with IS_ERR(), as it might be an error code. Error codes
* could happen because:
*
* - the message wasn't formatted correctly
* - couldn't send the message
* - failed waiting for a response
* - the ack message wasn't formatted correctly
*
* The returned skb has been allocated with wimax_msg_to_user_alloc(),
* it contains the response in a netlink attribute and is ready to be
* passed up to user space with wimax_msg_to_user_send(). To access
* the payload and its length, use wimax_msg_{data,len}() on the skb.
*
* The skb has to be freed with kfree_skb() once done.
*
* Description:
*
* This function delivers a message/command to the device and waits
* for an ack to be received. The format is described in
* linux/wimax/i2400m.h. In summary, a command/get/set is followed by an
* ack.
*
* This function will not check the ack status, that's left up to the
* caller. Once done with the ack skb, it has to be kfree_skb()ed.
*
* The i2400m handles only one message at the same time, thus we need
* the mutex to exclude other players.
*
* We write the message and then wait for an answer to come back. The
* RX path intercepts control messages and handles them in
* i2400m_rx_ctl(). Reports (notifications) are (maybe) processed
* locally and then forwarded (as needed) to user space on the WiMAX
* stack message pipe. Acks are saved and passed back to us through an
* skb in i2400m->ack_skb which is ready to be given to generic
* netlink if need be.
*/
struct sk_buff *i2400m_msg_to_dev(struct i2400m *i2400m,
const void *buf, size_t buf_len)
{
int result;
struct device *dev = i2400m_dev(i2400m);
const struct i2400m_l3l4_hdr *msg_l3l4_hdr;
struct sk_buff *ack_skb;
const struct i2400m_l3l4_hdr *ack_l3l4_hdr;
size_t ack_len;
int ack_timeout;
unsigned msg_type;
unsigned long flags;
d_fnstart(3, dev, "(i2400m %p buf %p len %zu)\n",
i2400m, buf, buf_len);
rmb(); /* Make sure we see what i2400m_dev_reset_handle() */
if (i2400m->boot_mode)
return ERR_PTR(-EL3RST);
msg_l3l4_hdr = buf;
/* Check msg & payload consistency */
result = i2400m_msg_size_check(i2400m, msg_l3l4_hdr, buf_len);
if (result < 0)
goto error_bad_msg;
msg_type = le16_to_cpu(msg_l3l4_hdr->type);
d_printf(1, dev, "CMD/GET/SET 0x%04x %zu bytes\n",
msg_type, buf_len);
d_dump(2, dev, buf, buf_len);
/* Setup the completion, ack_skb ("we are waiting") and send
* the message to the device */
mutex_lock(&i2400m->msg_mutex);
spin_lock_irqsave(&i2400m->rx_lock, flags);
i2400m->ack_skb = ERR_PTR(-EINPROGRESS);
spin_unlock_irqrestore(&i2400m->rx_lock, flags);
init_completion(&i2400m->msg_completion);
result = i2400m_tx(i2400m, buf, buf_len, I2400M_PT_CTRL);
if (result < 0) {
dev_err(dev, "can't send message 0x%04x: %d\n",
le16_to_cpu(msg_l3l4_hdr->type), result);
goto error_tx;
}
/* Some commands take longer to execute because of crypto ops,
* so we give them some more leeway on timeout */
switch (msg_type) {
case I2400M_MT_GET_TLS_OPERATION_RESULT:
case I2400M_MT_CMD_SEND_EAP_RESPONSE:
ack_timeout = 5 * HZ;
break;
default:
ack_timeout = HZ;
}
if (unlikely(i2400m->trace_msg_from_user))
wimax_msg(&i2400m->wimax_dev, "echo", buf, buf_len, GFP_KERNEL);
/* The RX path in rx.c will put any response for this message
* in i2400m->ack_skb and wake us up. If we cancel the wait,
* we need to change the value of i2400m->ack_skb to something
* not -EINPROGRESS so RX knows there is no one waiting. */
result = wait_for_completion_interruptible_timeout(
&i2400m->msg_completion, ack_timeout);
if (result == 0) {
dev_err(dev, "timeout waiting for reply to message 0x%04x\n",
msg_type);
result = -ETIMEDOUT;
i2400m_msg_to_dev_cancel_wait(i2400m, result);
goto error_wait_for_completion;
} else if (result < 0) {
dev_err(dev, "error waiting for reply to message 0x%04x: %d\n",
msg_type, result);
i2400m_msg_to_dev_cancel_wait(i2400m, result);
goto error_wait_for_completion;
}
/* Pull out the ack data from i2400m->ack_skb -- see if it is
* an error and act accordingly */
spin_lock_irqsave(&i2400m->rx_lock, flags);
ack_skb = i2400m->ack_skb;
if (IS_ERR(ack_skb))
result = PTR_ERR(ack_skb);
else
result = 0;
i2400m->ack_skb = NULL;
spin_unlock_irqrestore(&i2400m->rx_lock, flags);
if (result < 0)
goto error_ack_status;
ack_l3l4_hdr = wimax_msg_data_len(ack_skb, &ack_len);
/* Check the ack and deliver it if it is ok */
if (unlikely(i2400m->trace_msg_from_user))
wimax_msg(&i2400m->wimax_dev, "echo",
ack_l3l4_hdr, ack_len, GFP_KERNEL);
result = i2400m_msg_size_check(i2400m, ack_l3l4_hdr, ack_len);
if (result < 0) {
dev_err(dev, "HW BUG? reply to message 0x%04x: %d\n",
msg_type, result);
goto error_bad_ack_len;
}
if (msg_type != le16_to_cpu(ack_l3l4_hdr->type)) {
dev_err(dev, "HW BUG? bad reply 0x%04x to message 0x%04x\n",
le16_to_cpu(ack_l3l4_hdr->type), msg_type);
result = -EIO;
goto error_bad_ack_type;
}
i2400m_msg_ack_hook(i2400m, ack_l3l4_hdr, ack_len);
mutex_unlock(&i2400m->msg_mutex);
d_fnend(3, dev, "(i2400m %p buf %p len %zu) = %p\n",
i2400m, buf, buf_len, ack_skb);
return ack_skb;
error_bad_ack_type:
error_bad_ack_len:
kfree_skb(ack_skb);
error_ack_status:
error_wait_for_completion:
error_tx:
mutex_unlock(&i2400m->msg_mutex);
error_bad_msg:
d_fnend(3, dev, "(i2400m %p buf %p len %zu) = %d\n",
i2400m, buf, buf_len, result);
return ERR_PTR(result);
}
/*
* Definitions for the Enter Power Save command
*
* The Enter Power Save command requests the device to go into power
* saving mode. The device will ack or nak the command depending on it
* being ready for it. If it acks, we tell the USB subsystem to
*
* As well, the device might request to go into power saving mode by
* sending a report (REPORT_POWERSAVE_READY), in which case, we issue
* this command. The hookups in the RX coder allow
*/
enum {
I2400M_WAKEUP_ENABLED = 0x01,
I2400M_WAKEUP_DISABLED = 0x02,
I2400M_TLV_TYPE_WAKEUP_MODE = 144,
};
struct i2400m_cmd_enter_power_save {
struct i2400m_l3l4_hdr hdr;
struct i2400m_tlv_hdr tlv;
__le32 val;
} __packed;
/*
* Request entering power save
*
* This command is (mainly) executed when the device indicates that it
* is ready to go into powersave mode via a REPORT_POWERSAVE_READY.
*/
int i2400m_cmd_enter_powersave(struct i2400m *i2400m)
{
int result;
struct device *dev = i2400m_dev(i2400m);
struct sk_buff *ack_skb;
struct i2400m_cmd_enter_power_save *cmd;
char strerr[32];
result = -ENOMEM;
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (cmd == NULL)
goto error_alloc;
cmd->hdr.type = cpu_to_le16(I2400M_MT_CMD_ENTER_POWERSAVE);
cmd->hdr.length = cpu_to_le16(sizeof(*cmd) - sizeof(cmd->hdr));
cmd->hdr.version = cpu_to_le16(I2400M_L3L4_VERSION);
cmd->tlv.type = cpu_to_le16(I2400M_TLV_TYPE_WAKEUP_MODE);
cmd->tlv.length = cpu_to_le16(sizeof(cmd->val));
cmd->val = cpu_to_le32(I2400M_WAKEUP_ENABLED);
ack_skb = i2400m_msg_to_dev(i2400m, cmd, sizeof(*cmd));
result = PTR_ERR(ack_skb);
if (IS_ERR(ack_skb)) {
dev_err(dev, "Failed to issue 'Enter power save' command: %d\n",
result);
goto error_msg_to_dev;
}
result = i2400m_msg_check_status(wimax_msg_data(ack_skb),
strerr, sizeof(strerr));
if (result == -EACCES)
d_printf(1, dev, "Cannot enter power save mode\n");
else if (result < 0)
dev_err(dev, "'Enter power save' (0x%04x) command failed: "
"%d - %s\n", I2400M_MT_CMD_ENTER_POWERSAVE,
result, strerr);
else
d_printf(1, dev, "device ready to power save\n");
kfree_skb(ack_skb);
error_msg_to_dev:
kfree(cmd);
error_alloc:
return result;
}
EXPORT_SYMBOL_GPL(i2400m_cmd_enter_powersave);
/*
* Definitions for getting device information
*/
enum {
I2400M_TLV_DETAILED_DEVICE_INFO = 140
};
/**
* i2400m_get_device_info - Query the device for detailed device information
*
* @i2400m: device descriptor
*
* Returns: an skb whose skb->data points to a 'struct
* i2400m_tlv_detailed_device_info'. When done, kfree_skb() it. The
* skb is *guaranteed* to contain the whole TLV data structure.
*
* On error, IS_ERR(skb) is true and ERR_PTR(skb) is the error
* code.
*/
struct sk_buff *i2400m_get_device_info(struct i2400m *i2400m)
{
int result;
struct device *dev = i2400m_dev(i2400m);
struct sk_buff *ack_skb;
struct i2400m_l3l4_hdr *cmd;
const struct i2400m_l3l4_hdr *ack;
size_t ack_len;
const struct i2400m_tlv_hdr *tlv;
const struct i2400m_tlv_detailed_device_info *ddi;
char strerr[32];
ack_skb = ERR_PTR(-ENOMEM);
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (cmd == NULL)
goto error_alloc;
cmd->type = cpu_to_le16(I2400M_MT_GET_DEVICE_INFO);
cmd->length = 0;
cmd->version = cpu_to_le16(I2400M_L3L4_VERSION);
ack_skb = i2400m_msg_to_dev(i2400m, cmd, sizeof(*cmd));
if (IS_ERR(ack_skb)) {
dev_err(dev, "Failed to issue 'get device info' command: %ld\n",
PTR_ERR(ack_skb));
goto error_msg_to_dev;
}
ack = wimax_msg_data_len(ack_skb, &ack_len);
result = i2400m_msg_check_status(ack, strerr, sizeof(strerr));
if (result < 0) {
dev_err(dev, "'get device info' (0x%04x) command failed: "
"%d - %s\n", I2400M_MT_GET_DEVICE_INFO, result,
strerr);
goto error_cmd_failed;
}
tlv = i2400m_tlv_find(i2400m, ack->pl, ack_len - sizeof(*ack),
I2400M_TLV_DETAILED_DEVICE_INFO, sizeof(*ddi));
if (tlv == NULL) {
dev_err(dev, "GET DEVICE INFO: "
"detailed device info TLV not found (0x%04x)\n",
I2400M_TLV_DETAILED_DEVICE_INFO);
result = -EIO;
goto error_no_tlv;
}
skb_pull(ack_skb, (void *) tlv - (void *) ack_skb->data);
error_msg_to_dev:
kfree(cmd);
error_alloc:
return ack_skb;
error_no_tlv:
error_cmd_failed:
kfree_skb(ack_skb);
kfree(cmd);
return ERR_PTR(result);
}
/* Firmware interface versions we support */
enum {
I2400M_HDIv_MAJOR = 9,
I2400M_HDIv_MINOR = 1,
I2400M_HDIv_MINOR_2 = 2,
};
/**
* i2400m_firmware_check - check firmware versions are compatible with
* the driver
*
* @i2400m: device descriptor
*
* Returns: 0 if ok, < 0 errno code an error and a message in the
* kernel log.
*
* Long function, but quite simple; first chunk launches the command
* and double checks the reply for the right TLV. Then we process the
* TLV (where the meat is).
*
* Once we process the TLV that gives us the firmware's interface
* version, we encode it and save it in i2400m->fw_version for future
* reference.
*/
int i2400m_firmware_check(struct i2400m *i2400m)
{
int result;
struct device *dev = i2400m_dev(i2400m);
struct sk_buff *ack_skb;
struct i2400m_l3l4_hdr *cmd;
const struct i2400m_l3l4_hdr *ack;
size_t ack_len;
const struct i2400m_tlv_hdr *tlv;
const struct i2400m_tlv_l4_message_versions *l4mv;
char strerr[32];
unsigned major, minor, branch;
result = -ENOMEM;
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (cmd == NULL)
goto error_alloc;
cmd->type = cpu_to_le16(I2400M_MT_GET_LM_VERSION);
cmd->length = 0;
cmd->version = cpu_to_le16(I2400M_L3L4_VERSION);
ack_skb = i2400m_msg_to_dev(i2400m, cmd, sizeof(*cmd));
if (IS_ERR(ack_skb)) {
result = PTR_ERR(ack_skb);
dev_err(dev, "Failed to issue 'get lm version' command: %-d\n",
result);
goto error_msg_to_dev;
}
ack = wimax_msg_data_len(ack_skb, &ack_len);
result = i2400m_msg_check_status(ack, strerr, sizeof(strerr));
if (result < 0) {
dev_err(dev, "'get lm version' (0x%04x) command failed: "
"%d - %s\n", I2400M_MT_GET_LM_VERSION, result,
strerr);
goto error_cmd_failed;
}
tlv = i2400m_tlv_find(i2400m, ack->pl, ack_len - sizeof(*ack),
I2400M_TLV_L4_MESSAGE_VERSIONS, sizeof(*l4mv));
if (tlv == NULL) {
dev_err(dev, "get lm version: TLV not found (0x%04x)\n",
I2400M_TLV_L4_MESSAGE_VERSIONS);
result = -EIO;
goto error_no_tlv;
}
l4mv = container_of(tlv, typeof(*l4mv), hdr);
major = le16_to_cpu(l4mv->major);
minor = le16_to_cpu(l4mv->minor);
branch = le16_to_cpu(l4mv->branch);
result = -EINVAL;
if (major != I2400M_HDIv_MAJOR) {
dev_err(dev, "unsupported major fw version "
"%u.%u.%u\n", major, minor, branch);
goto error_bad_major;
}
result = 0;
if (minor < I2400M_HDIv_MINOR_2 && minor > I2400M_HDIv_MINOR)
dev_warn(dev, "untested minor fw version %u.%u.%u\n",
major, minor, branch);
/* Yes, we ignore the branch -- we don't have to track it */
i2400m->fw_version = major << 16 | minor;
dev_info(dev, "firmware interface version %u.%u.%u\n",
major, minor, branch);
error_bad_major:
error_no_tlv:
error_cmd_failed:
kfree_skb(ack_skb);
error_msg_to_dev:
kfree(cmd);
error_alloc:
return result;
}
/*
* Send an DoExitIdle command to the device to ask it to go out of
* basestation-idle mode.
*
* @i2400m: device descriptor
*
* This starts a renegotiation with the basestation that might involve
* another crypto handshake with user space.
*
* Returns: 0 if ok, < 0 errno code on error.
*/
int i2400m_cmd_exit_idle(struct i2400m *i2400m)
{
int result;
struct device *dev = i2400m_dev(i2400m);
struct sk_buff *ack_skb;
struct i2400m_l3l4_hdr *cmd;
char strerr[32];
result = -ENOMEM;
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (cmd == NULL)
goto error_alloc;
cmd->type = cpu_to_le16(I2400M_MT_CMD_EXIT_IDLE);
cmd->length = 0;
cmd->version = cpu_to_le16(I2400M_L3L4_VERSION);
ack_skb = i2400m_msg_to_dev(i2400m, cmd, sizeof(*cmd));
result = PTR_ERR(ack_skb);
if (IS_ERR(ack_skb)) {
dev_err(dev, "Failed to issue 'exit idle' command: %d\n",
result);
goto error_msg_to_dev;
}
result = i2400m_msg_check_status(wimax_msg_data(ack_skb),
strerr, sizeof(strerr));
kfree_skb(ack_skb);
error_msg_to_dev:
kfree(cmd);
error_alloc:
return result;
}
/*
* Query the device for its state, update the WiMAX stack's idea of it
*
* @i2400m: device descriptor
*
* Returns: 0 if ok, < 0 errno code on error.
*
* Executes a 'Get State' command and parses the returned
* TLVs.
*
* Because this is almost identical to a 'Report State', we use
* i2400m_report_state_hook() to parse the answer. This will set the
* carrier state, as well as the RF Kill switches state.
*/
static int i2400m_cmd_get_state(struct i2400m *i2400m)
{
int result;
struct device *dev = i2400m_dev(i2400m);
struct sk_buff *ack_skb;
struct i2400m_l3l4_hdr *cmd;
const struct i2400m_l3l4_hdr *ack;
size_t ack_len;
char strerr[32];
result = -ENOMEM;
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (cmd == NULL)
goto error_alloc;
cmd->type = cpu_to_le16(I2400M_MT_GET_STATE);
cmd->length = 0;
cmd->version = cpu_to_le16(I2400M_L3L4_VERSION);
ack_skb = i2400m_msg_to_dev(i2400m, cmd, sizeof(*cmd));
if (IS_ERR(ack_skb)) {
dev_err(dev, "Failed to issue 'get state' command: %ld\n",
PTR_ERR(ack_skb));
result = PTR_ERR(ack_skb);
goto error_msg_to_dev;
}
ack = wimax_msg_data_len(ack_skb, &ack_len);
result = i2400m_msg_check_status(ack, strerr, sizeof(strerr));
if (result < 0) {
dev_err(dev, "'get state' (0x%04x) command failed: "
"%d - %s\n", I2400M_MT_GET_STATE, result, strerr);
goto error_cmd_failed;
}
i2400m_report_state_hook(i2400m, ack, ack_len - sizeof(*ack),
"GET STATE");
result = 0;
kfree_skb(ack_skb);
error_cmd_failed:
error_msg_to_dev:
kfree(cmd);
error_alloc:
return result;
}
/**
* Set basic configuration settings
*
* @i2400m: device descriptor
* @args: array of pointers to the TLV headers to send for
* configuration (each followed by its payload).
* TLV headers and payloads must be properly initialized, with the
* right endianess (LE).
* @arg_size: number of pointers in the @args array
*/
static int i2400m_set_init_config(struct i2400m *i2400m,
const struct i2400m_tlv_hdr **arg,
size_t args)
{
int result;
struct device *dev = i2400m_dev(i2400m);
struct sk_buff *ack_skb;
struct i2400m_l3l4_hdr *cmd;
char strerr[32];
unsigned argc, argsize, tlv_size;
const struct i2400m_tlv_hdr *tlv_hdr;
void *buf, *itr;
d_fnstart(3, dev, "(i2400m %p arg %p args %zu)\n", i2400m, arg, args);
result = 0;
if (args == 0)
goto none;
/* Compute the size of all the TLVs, so we can alloc a
* contiguous command block to copy them. */
argsize = 0;
for (argc = 0; argc < args; argc++) {
tlv_hdr = arg[argc];
argsize += sizeof(*tlv_hdr) + le16_to_cpu(tlv_hdr->length);
}
WARN_ON(argc >= 9); /* As per hw spec */
/* Alloc the space for the command and TLVs*/
result = -ENOMEM;
buf = kzalloc(sizeof(*cmd) + argsize, GFP_KERNEL);
if (buf == NULL)
goto error_alloc;
cmd = buf;
cmd->type = cpu_to_le16(I2400M_MT_SET_INIT_CONFIG);
cmd->length = cpu_to_le16(argsize);
cmd->version = cpu_to_le16(I2400M_L3L4_VERSION);
/* Copy the TLVs */
itr = buf + sizeof(*cmd);
for (argc = 0; argc < args; argc++) {
tlv_hdr = arg[argc];
tlv_size = sizeof(*tlv_hdr) + le16_to_cpu(tlv_hdr->length);
memcpy(itr, tlv_hdr, tlv_size);
itr += tlv_size;
}
/* Send the message! */
ack_skb = i2400m_msg_to_dev(i2400m, buf, sizeof(*cmd) + argsize);
result = PTR_ERR(ack_skb);
if (IS_ERR(ack_skb)) {
dev_err(dev, "Failed to issue 'init config' command: %d\n",
result);
goto error_msg_to_dev;
}
result = i2400m_msg_check_status(wimax_msg_data(ack_skb),
strerr, sizeof(strerr));
if (result < 0)
dev_err(dev, "'init config' (0x%04x) command failed: %d - %s\n",
I2400M_MT_SET_INIT_CONFIG, result, strerr);
kfree_skb(ack_skb);
error_msg_to_dev:
kfree(buf);
error_alloc:
none:
d_fnend(3, dev, "(i2400m %p arg %p args %zu) = %d\n",
i2400m, arg, args, result);
return result;
}
/**
* i2400m_set_idle_timeout - Set the device's idle mode timeout
*
* @i2400m: i2400m device descriptor
*
* @msecs: milliseconds for the timeout to enter idle mode. Between
* 100 to 300000 (5m); 0 to disable. In increments of 100.
*
* After this @msecs of the link being idle (no data being sent or
* received), the device will negotiate with the basestation entering
* idle mode for saving power. The connection is maintained, but
* getting out of it (done in tx.c) will require some negotiation,
* possible crypto re-handshake and a possible DHCP re-lease.
*
* Only available if fw_version >= 0x00090002.
*
* Returns: 0 if ok, < 0 errno code on error.
*/
int i2400m_set_idle_timeout(struct i2400m *i2400m, unsigned msecs)
{
int result;
struct device *dev = i2400m_dev(i2400m);
struct sk_buff *ack_skb;
struct {
struct i2400m_l3l4_hdr hdr;
struct i2400m_tlv_config_idle_timeout cit;
} *cmd;
const struct i2400m_l3l4_hdr *ack;
size_t ack_len;
char strerr[32];
result = -ENOSYS;
if (i2400m_le_v1_3(i2400m))
goto error_alloc;
result = -ENOMEM;
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (cmd == NULL)
goto error_alloc;
cmd->hdr.type = cpu_to_le16(I2400M_MT_GET_STATE);
cmd->hdr.length = cpu_to_le16(sizeof(*cmd) - sizeof(cmd->hdr));
cmd->hdr.version = cpu_to_le16(I2400M_L3L4_VERSION);
cmd->cit.hdr.type =
cpu_to_le16(I2400M_TLV_CONFIG_IDLE_TIMEOUT);
cmd->cit.hdr.length = cpu_to_le16(sizeof(cmd->cit.timeout));
cmd->cit.timeout = cpu_to_le32(msecs);
ack_skb = i2400m_msg_to_dev(i2400m, cmd, sizeof(*cmd));
if (IS_ERR(ack_skb)) {
dev_err(dev, "Failed to issue 'set idle timeout' command: "
"%ld\n", PTR_ERR(ack_skb));
result = PTR_ERR(ack_skb);
goto error_msg_to_dev;
}
ack = wimax_msg_data_len(ack_skb, &ack_len);
result = i2400m_msg_check_status(ack, strerr, sizeof(strerr));
if (result < 0) {
dev_err(dev, "'set idle timeout' (0x%04x) command failed: "
"%d - %s\n", I2400M_MT_GET_STATE, result, strerr);
goto error_cmd_failed;
}
result = 0;
kfree_skb(ack_skb);
error_cmd_failed:
error_msg_to_dev:
kfree(cmd);
error_alloc:
return result;
}
/**
* i2400m_dev_initialize - Initialize the device once communications are ready
*
* @i2400m: device descriptor
*
* Returns: 0 if ok, < 0 errno code on error.
*
* Configures the device to work the way we like it.
*
* At the point of this call, the device is registered with the WiMAX
* and netdev stacks, firmware is uploaded and we can talk to the
* device normally.
*/
int i2400m_dev_initialize(struct i2400m *i2400m)
{
int result;
struct device *dev = i2400m_dev(i2400m);
struct i2400m_tlv_config_idle_parameters idle_params;
struct i2400m_tlv_config_idle_timeout idle_timeout;
struct i2400m_tlv_config_d2h_data_format df;
struct i2400m_tlv_config_dl_host_reorder dlhr;
const struct i2400m_tlv_hdr *args[9];
unsigned argc = 0;
d_fnstart(3, dev, "(i2400m %p)\n", i2400m);
if (i2400m_passive_mode)
goto out_passive;
/* Disable idle mode? (enabled by default) */
if (i2400m_idle_mode_disabled) {
if (i2400m_le_v1_3(i2400m)) {
idle_params.hdr.type =
cpu_to_le16(I2400M_TLV_CONFIG_IDLE_PARAMETERS);
idle_params.hdr.length = cpu_to_le16(
sizeof(idle_params) - sizeof(idle_params.hdr));
idle_params.idle_timeout = 0;
idle_params.idle_paging_interval = 0;
args[argc++] = &idle_params.hdr;
} else {
idle_timeout.hdr.type =
cpu_to_le16(I2400M_TLV_CONFIG_IDLE_TIMEOUT);
idle_timeout.hdr.length = cpu_to_le16(
sizeof(idle_timeout) - sizeof(idle_timeout.hdr));
idle_timeout.timeout = 0;
args[argc++] = &idle_timeout.hdr;
}
}
if (i2400m_ge_v1_4(i2400m)) {
/* Enable extended RX data format? */
df.hdr.type =
cpu_to_le16(I2400M_TLV_CONFIG_D2H_DATA_FORMAT);
df.hdr.length = cpu_to_le16(
sizeof(df) - sizeof(df.hdr));
df.format = 1;
args[argc++] = &df.hdr;
/* Enable RX data reordering?
* (switch flipped in rx.c:i2400m_rx_setup() after fw upload) */
if (i2400m->rx_reorder) {
dlhr.hdr.type =
cpu_to_le16(I2400M_TLV_CONFIG_DL_HOST_REORDER);
dlhr.hdr.length = cpu_to_le16(
sizeof(dlhr) - sizeof(dlhr.hdr));
dlhr.reorder = 1;
args[argc++] = &dlhr.hdr;
}
}
result = i2400m_set_init_config(i2400m, args, argc);
if (result < 0)
goto error;
out_passive:
/*
* Update state: Here it just calls a get state; parsing the
* result (System State TLV and RF Status TLV [done in the rx
* path hooks]) will set the hardware and software RF-Kill
* status.
*/
result = i2400m_cmd_get_state(i2400m);
error:
if (result < 0)
dev_err(dev, "failed to initialize the device: %d\n", result);
d_fnend(3, dev, "(i2400m %p) = %d\n", i2400m, result);
return result;
}
/**
* i2400m_dev_shutdown - Shutdown a running device
*
* @i2400m: device descriptor
*
* Release resources acquired during the running of the device; in
* theory, should also tell the device to go to sleep, switch off the
* radio, all that, but at this point, in most cases (driver
* disconnection, reset handling) we can't even talk to the device.
*/
void i2400m_dev_shutdown(struct i2400m *i2400m)
{
struct device *dev = i2400m_dev(i2400m);
d_fnstart(3, dev, "(i2400m %p)\n", i2400m);
d_fnend(3, dev, "(i2400m %p) = void\n", i2400m);
}
| gpl-2.0 |
GustavoRD78/78Kernel-ZL-230 | drivers/media/video/cx231xx/cx231xx-audio.c | 2933 | 19234 | /*
* Conexant Cx231xx audio extension
*
* Copyright (C) 2008 <srinivasa.deevi at conexant dot com>
* Based on em28xx driver
*
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/kernel.h>
#include <linux/usb.h>
#include <linux/init.h>
#include <linux/sound.h>
#include <linux/spinlock.h>
#include <linux/soundcard.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/proc_fs.h>
#include <linux/module.h>
#include <sound/core.h>
#include <sound/pcm.h>
#include <sound/pcm_params.h>
#include <sound/info.h>
#include <sound/initval.h>
#include <sound/control.h>
#include <media/v4l2-common.h>
#include "cx231xx.h"
static int debug;
module_param(debug, int, 0644);
MODULE_PARM_DESC(debug, "activates debug info");
#define dprintk(fmt, arg...) do { \
if (debug) \
printk(KERN_INFO "cx231xx-audio %s: " fmt, \
__func__, ##arg); \
} while (0)
static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX;
static int cx231xx_isoc_audio_deinit(struct cx231xx *dev)
{
int i;
dprintk("Stopping isoc\n");
for (i = 0; i < CX231XX_AUDIO_BUFS; i++) {
if (dev->adev.urb[i]) {
if (!irqs_disabled())
usb_kill_urb(dev->adev.urb[i]);
else
usb_unlink_urb(dev->adev.urb[i]);
usb_free_urb(dev->adev.urb[i]);
dev->adev.urb[i] = NULL;
kfree(dev->adev.transfer_buffer[i]);
dev->adev.transfer_buffer[i] = NULL;
}
}
return 0;
}
static int cx231xx_bulk_audio_deinit(struct cx231xx *dev)
{
int i;
dprintk("Stopping bulk\n");
for (i = 0; i < CX231XX_AUDIO_BUFS; i++) {
if (dev->adev.urb[i]) {
if (!irqs_disabled())
usb_kill_urb(dev->adev.urb[i]);
else
usb_unlink_urb(dev->adev.urb[i]);
usb_free_urb(dev->adev.urb[i]);
dev->adev.urb[i] = NULL;
kfree(dev->adev.transfer_buffer[i]);
dev->adev.transfer_buffer[i] = NULL;
}
}
return 0;
}
static void cx231xx_audio_isocirq(struct urb *urb)
{
struct cx231xx *dev = urb->context;
int i;
unsigned int oldptr;
int period_elapsed = 0;
int status;
unsigned char *cp;
unsigned int stride;
struct snd_pcm_substream *substream;
struct snd_pcm_runtime *runtime;
if (dev->state & DEV_DISCONNECTED)
return;
switch (urb->status) {
case 0: /* success */
case -ETIMEDOUT: /* NAK */
break;
case -ECONNRESET: /* kill */
case -ENOENT:
case -ESHUTDOWN:
return;
default: /* error */
dprintk("urb completition error %d.\n", urb->status);
break;
}
if (atomic_read(&dev->stream_started) == 0)
return;
if (dev->adev.capture_pcm_substream) {
substream = dev->adev.capture_pcm_substream;
runtime = substream->runtime;
stride = runtime->frame_bits >> 3;
for (i = 0; i < urb->number_of_packets; i++) {
int length = urb->iso_frame_desc[i].actual_length /
stride;
cp = (unsigned char *)urb->transfer_buffer +
urb->iso_frame_desc[i].offset;
if (!length)
continue;
oldptr = dev->adev.hwptr_done_capture;
if (oldptr + length >= runtime->buffer_size) {
unsigned int cnt;
cnt = runtime->buffer_size - oldptr;
memcpy(runtime->dma_area + oldptr * stride, cp,
cnt * stride);
memcpy(runtime->dma_area, cp + cnt * stride,
length * stride - cnt * stride);
} else {
memcpy(runtime->dma_area + oldptr * stride, cp,
length * stride);
}
snd_pcm_stream_lock(substream);
dev->adev.hwptr_done_capture += length;
if (dev->adev.hwptr_done_capture >=
runtime->buffer_size)
dev->adev.hwptr_done_capture -=
runtime->buffer_size;
dev->adev.capture_transfer_done += length;
if (dev->adev.capture_transfer_done >=
runtime->period_size) {
dev->adev.capture_transfer_done -=
runtime->period_size;
period_elapsed = 1;
}
snd_pcm_stream_unlock(substream);
}
if (period_elapsed)
snd_pcm_period_elapsed(substream);
}
urb->status = 0;
status = usb_submit_urb(urb, GFP_ATOMIC);
if (status < 0) {
cx231xx_errdev("resubmit of audio urb failed (error=%i)\n",
status);
}
return;
}
static void cx231xx_audio_bulkirq(struct urb *urb)
{
struct cx231xx *dev = urb->context;
unsigned int oldptr;
int period_elapsed = 0;
int status;
unsigned char *cp;
unsigned int stride;
struct snd_pcm_substream *substream;
struct snd_pcm_runtime *runtime;
if (dev->state & DEV_DISCONNECTED)
return;
switch (urb->status) {
case 0: /* success */
case -ETIMEDOUT: /* NAK */
break;
case -ECONNRESET: /* kill */
case -ENOENT:
case -ESHUTDOWN:
return;
default: /* error */
dprintk("urb completition error %d.\n", urb->status);
break;
}
if (atomic_read(&dev->stream_started) == 0)
return;
if (dev->adev.capture_pcm_substream) {
substream = dev->adev.capture_pcm_substream;
runtime = substream->runtime;
stride = runtime->frame_bits >> 3;
if (1) {
int length = urb->actual_length /
stride;
cp = (unsigned char *)urb->transfer_buffer;
oldptr = dev->adev.hwptr_done_capture;
if (oldptr + length >= runtime->buffer_size) {
unsigned int cnt;
cnt = runtime->buffer_size - oldptr;
memcpy(runtime->dma_area + oldptr * stride, cp,
cnt * stride);
memcpy(runtime->dma_area, cp + cnt * stride,
length * stride - cnt * stride);
} else {
memcpy(runtime->dma_area + oldptr * stride, cp,
length * stride);
}
snd_pcm_stream_lock(substream);
dev->adev.hwptr_done_capture += length;
if (dev->adev.hwptr_done_capture >=
runtime->buffer_size)
dev->adev.hwptr_done_capture -=
runtime->buffer_size;
dev->adev.capture_transfer_done += length;
if (dev->adev.capture_transfer_done >=
runtime->period_size) {
dev->adev.capture_transfer_done -=
runtime->period_size;
period_elapsed = 1;
}
snd_pcm_stream_unlock(substream);
}
if (period_elapsed)
snd_pcm_period_elapsed(substream);
}
urb->status = 0;
status = usb_submit_urb(urb, GFP_ATOMIC);
if (status < 0) {
cx231xx_errdev("resubmit of audio urb failed (error=%i)\n",
status);
}
return;
}
static int cx231xx_init_audio_isoc(struct cx231xx *dev)
{
int i, errCode;
int sb_size;
cx231xx_info("%s: Starting ISO AUDIO transfers\n", __func__);
if (dev->state & DEV_DISCONNECTED)
return -ENODEV;
sb_size = CX231XX_ISO_NUM_AUDIO_PACKETS * dev->adev.max_pkt_size;
for (i = 0; i < CX231XX_AUDIO_BUFS; i++) {
struct urb *urb;
int j, k;
dev->adev.transfer_buffer[i] = kmalloc(sb_size, GFP_ATOMIC);
if (!dev->adev.transfer_buffer[i])
return -ENOMEM;
memset(dev->adev.transfer_buffer[i], 0x80, sb_size);
urb = usb_alloc_urb(CX231XX_ISO_NUM_AUDIO_PACKETS, GFP_ATOMIC);
if (!urb) {
cx231xx_errdev("usb_alloc_urb failed!\n");
for (j = 0; j < i; j++) {
usb_free_urb(dev->adev.urb[j]);
kfree(dev->adev.transfer_buffer[j]);
}
return -ENOMEM;
}
urb->dev = dev->udev;
urb->context = dev;
urb->pipe = usb_rcvisocpipe(dev->udev,
dev->adev.end_point_addr);
urb->transfer_flags = URB_ISO_ASAP | URB_NO_TRANSFER_DMA_MAP;
urb->transfer_buffer = dev->adev.transfer_buffer[i];
urb->interval = 1;
urb->complete = cx231xx_audio_isocirq;
urb->number_of_packets = CX231XX_ISO_NUM_AUDIO_PACKETS;
urb->transfer_buffer_length = sb_size;
for (j = k = 0; j < CX231XX_ISO_NUM_AUDIO_PACKETS;
j++, k += dev->adev.max_pkt_size) {
urb->iso_frame_desc[j].offset = k;
urb->iso_frame_desc[j].length = dev->adev.max_pkt_size;
}
dev->adev.urb[i] = urb;
}
for (i = 0; i < CX231XX_AUDIO_BUFS; i++) {
errCode = usb_submit_urb(dev->adev.urb[i], GFP_ATOMIC);
if (errCode < 0) {
cx231xx_isoc_audio_deinit(dev);
return errCode;
}
}
return errCode;
}
static int cx231xx_init_audio_bulk(struct cx231xx *dev)
{
int i, errCode;
int sb_size;
cx231xx_info("%s: Starting BULK AUDIO transfers\n", __func__);
if (dev->state & DEV_DISCONNECTED)
return -ENODEV;
sb_size = CX231XX_NUM_AUDIO_PACKETS * dev->adev.max_pkt_size;
for (i = 0; i < CX231XX_AUDIO_BUFS; i++) {
struct urb *urb;
int j;
dev->adev.transfer_buffer[i] = kmalloc(sb_size, GFP_ATOMIC);
if (!dev->adev.transfer_buffer[i])
return -ENOMEM;
memset(dev->adev.transfer_buffer[i], 0x80, sb_size);
urb = usb_alloc_urb(CX231XX_NUM_AUDIO_PACKETS, GFP_ATOMIC);
if (!urb) {
cx231xx_errdev("usb_alloc_urb failed!\n");
for (j = 0; j < i; j++) {
usb_free_urb(dev->adev.urb[j]);
kfree(dev->adev.transfer_buffer[j]);
}
return -ENOMEM;
}
urb->dev = dev->udev;
urb->context = dev;
urb->pipe = usb_rcvbulkpipe(dev->udev,
dev->adev.end_point_addr);
urb->transfer_flags = URB_NO_TRANSFER_DMA_MAP;
urb->transfer_buffer = dev->adev.transfer_buffer[i];
urb->complete = cx231xx_audio_bulkirq;
urb->transfer_buffer_length = sb_size;
dev->adev.urb[i] = urb;
}
for (i = 0; i < CX231XX_AUDIO_BUFS; i++) {
errCode = usb_submit_urb(dev->adev.urb[i], GFP_ATOMIC);
if (errCode < 0) {
cx231xx_bulk_audio_deinit(dev);
return errCode;
}
}
return errCode;
}
static int snd_pcm_alloc_vmalloc_buffer(struct snd_pcm_substream *subs,
size_t size)
{
struct snd_pcm_runtime *runtime = subs->runtime;
dprintk("Allocating vbuffer\n");
if (runtime->dma_area) {
if (runtime->dma_bytes > size)
return 0;
vfree(runtime->dma_area);
}
runtime->dma_area = vmalloc(size);
if (!runtime->dma_area)
return -ENOMEM;
runtime->dma_bytes = size;
return 0;
}
static struct snd_pcm_hardware snd_cx231xx_hw_capture = {
.info = SNDRV_PCM_INFO_BLOCK_TRANSFER |
SNDRV_PCM_INFO_MMAP |
SNDRV_PCM_INFO_INTERLEAVED |
SNDRV_PCM_INFO_MMAP_VALID,
.formats = SNDRV_PCM_FMTBIT_S16_LE,
.rates = SNDRV_PCM_RATE_CONTINUOUS | SNDRV_PCM_RATE_KNOT,
.rate_min = 48000,
.rate_max = 48000,
.channels_min = 2,
.channels_max = 2,
.buffer_bytes_max = 62720 * 8, /* just about the value in usbaudio.c */
.period_bytes_min = 64, /* 12544/2, */
.period_bytes_max = 12544,
.periods_min = 2,
.periods_max = 98, /* 12544, */
};
static int snd_cx231xx_capture_open(struct snd_pcm_substream *substream)
{
struct cx231xx *dev = snd_pcm_substream_chip(substream);
struct snd_pcm_runtime *runtime = substream->runtime;
int ret = 0;
dprintk("opening device and trying to acquire exclusive lock\n");
if (!dev) {
cx231xx_errdev("BUG: cx231xx can't find device struct."
" Can't proceed with open\n");
return -ENODEV;
}
if (dev->state & DEV_DISCONNECTED) {
cx231xx_errdev("Can't open. the device was removed.\n");
return -ENODEV;
}
/* Sets volume, mute, etc */
dev->mute = 0;
/* set alternate setting for audio interface */
/* 1 - 48000 samples per sec */
mutex_lock(&dev->lock);
if (dev->USE_ISO)
ret = cx231xx_set_alt_setting(dev, INDEX_AUDIO, 1);
else
ret = cx231xx_set_alt_setting(dev, INDEX_AUDIO, 0);
mutex_unlock(&dev->lock);
if (ret < 0) {
cx231xx_errdev("failed to set alternate setting !\n");
return ret;
}
runtime->hw = snd_cx231xx_hw_capture;
mutex_lock(&dev->lock);
/* inform hardware to start streaming */
ret = cx231xx_capture_start(dev, 1, Audio);
dev->adev.users++;
mutex_unlock(&dev->lock);
snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS);
dev->adev.capture_pcm_substream = substream;
runtime->private_data = dev;
return 0;
}
static int snd_cx231xx_pcm_close(struct snd_pcm_substream *substream)
{
int ret;
struct cx231xx *dev = snd_pcm_substream_chip(substream);
dprintk("closing device\n");
/* inform hardware to stop streaming */
mutex_lock(&dev->lock);
ret = cx231xx_capture_start(dev, 0, Audio);
/* set alternate setting for audio interface */
/* 1 - 48000 samples per sec */
ret = cx231xx_set_alt_setting(dev, INDEX_AUDIO, 0);
if (ret < 0) {
cx231xx_errdev("failed to set alternate setting !\n");
mutex_unlock(&dev->lock);
return ret;
}
dev->mute = 1;
dev->adev.users--;
mutex_unlock(&dev->lock);
if (dev->adev.users == 0 && dev->adev.shutdown == 1) {
dprintk("audio users: %d\n", dev->adev.users);
dprintk("disabling audio stream!\n");
dev->adev.shutdown = 0;
dprintk("released lock\n");
if (atomic_read(&dev->stream_started) > 0) {
atomic_set(&dev->stream_started, 0);
schedule_work(&dev->wq_trigger);
}
}
return 0;
}
static int snd_cx231xx_hw_capture_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *hw_params)
{
unsigned int channels, rate, format;
int ret;
dprintk("Setting capture parameters\n");
ret = snd_pcm_alloc_vmalloc_buffer(substream,
params_buffer_bytes(hw_params));
format = params_format(hw_params);
rate = params_rate(hw_params);
channels = params_channels(hw_params);
/* TODO: set up cx231xx audio chip to deliver the correct audio format,
current default is 48000hz multiplexed => 96000hz mono
which shouldn't matter since analogue TV only supports mono */
return 0;
}
static int snd_cx231xx_hw_capture_free(struct snd_pcm_substream *substream)
{
struct cx231xx *dev = snd_pcm_substream_chip(substream);
dprintk("Stop capture, if needed\n");
if (atomic_read(&dev->stream_started) > 0) {
atomic_set(&dev->stream_started, 0);
schedule_work(&dev->wq_trigger);
}
return 0;
}
static int snd_cx231xx_prepare(struct snd_pcm_substream *substream)
{
struct cx231xx *dev = snd_pcm_substream_chip(substream);
dev->adev.hwptr_done_capture = 0;
dev->adev.capture_transfer_done = 0;
return 0;
}
static void audio_trigger(struct work_struct *work)
{
struct cx231xx *dev = container_of(work, struct cx231xx, wq_trigger);
if (atomic_read(&dev->stream_started)) {
dprintk("starting capture");
if (is_fw_load(dev) == 0)
cx25840_call(dev, core, load_fw);
if (dev->USE_ISO)
cx231xx_init_audio_isoc(dev);
else
cx231xx_init_audio_bulk(dev);
} else {
dprintk("stopping capture");
cx231xx_isoc_audio_deinit(dev);
}
}
static int snd_cx231xx_capture_trigger(struct snd_pcm_substream *substream,
int cmd)
{
struct cx231xx *dev = snd_pcm_substream_chip(substream);
int retval;
if (dev->state & DEV_DISCONNECTED)
return -ENODEV;
spin_lock(&dev->adev.slock);
switch (cmd) {
case SNDRV_PCM_TRIGGER_START:
atomic_set(&dev->stream_started, 1);
break;
case SNDRV_PCM_TRIGGER_STOP:
atomic_set(&dev->stream_started, 0);
break;
default:
retval = -EINVAL;
}
spin_unlock(&dev->adev.slock);
schedule_work(&dev->wq_trigger);
return 0;
}
static snd_pcm_uframes_t snd_cx231xx_capture_pointer(struct snd_pcm_substream
*substream)
{
struct cx231xx *dev;
unsigned long flags;
snd_pcm_uframes_t hwptr_done;
dev = snd_pcm_substream_chip(substream);
spin_lock_irqsave(&dev->adev.slock, flags);
hwptr_done = dev->adev.hwptr_done_capture;
spin_unlock_irqrestore(&dev->adev.slock, flags);
return hwptr_done;
}
static struct page *snd_pcm_get_vmalloc_page(struct snd_pcm_substream *subs,
unsigned long offset)
{
void *pageptr = subs->runtime->dma_area + offset;
return vmalloc_to_page(pageptr);
}
static struct snd_pcm_ops snd_cx231xx_pcm_capture = {
.open = snd_cx231xx_capture_open,
.close = snd_cx231xx_pcm_close,
.ioctl = snd_pcm_lib_ioctl,
.hw_params = snd_cx231xx_hw_capture_params,
.hw_free = snd_cx231xx_hw_capture_free,
.prepare = snd_cx231xx_prepare,
.trigger = snd_cx231xx_capture_trigger,
.pointer = snd_cx231xx_capture_pointer,
.page = snd_pcm_get_vmalloc_page,
};
static int cx231xx_audio_init(struct cx231xx *dev)
{
struct cx231xx_audio *adev = &dev->adev;
struct snd_pcm *pcm;
struct snd_card *card;
static int devnr;
int err;
struct usb_interface *uif;
int i, isoc_pipe = 0;
if (dev->has_alsa_audio != 1) {
/* This device does not support the extension (in this case
the device is expecting the snd-usb-audio module or
doesn't have analog audio support at all) */
return 0;
}
cx231xx_info("cx231xx-audio.c: probing for cx231xx "
"non standard usbaudio\n");
err = snd_card_create(index[devnr], "Cx231xx Audio", THIS_MODULE,
0, &card);
if (err < 0)
return err;
spin_lock_init(&adev->slock);
err = snd_pcm_new(card, "Cx231xx Audio", 0, 0, 1, &pcm);
if (err < 0) {
snd_card_free(card);
return err;
}
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE,
&snd_cx231xx_pcm_capture);
pcm->info_flags = 0;
pcm->private_data = dev;
strcpy(pcm->name, "Conexant cx231xx Capture");
snd_card_set_dev(card, &dev->udev->dev);
strcpy(card->driver, "Cx231xx-Audio");
strcpy(card->shortname, "Cx231xx Audio");
strcpy(card->longname, "Conexant cx231xx Audio");
INIT_WORK(&dev->wq_trigger, audio_trigger);
err = snd_card_register(card);
if (err < 0) {
snd_card_free(card);
return err;
}
adev->sndcard = card;
adev->udev = dev->udev;
/* compute alternate max packet sizes for Audio */
uif =
dev->udev->actconfig->interface[dev->current_pcb_config.
hs_config_info[0].interface_info.
audio_index + 1];
adev->end_point_addr =
le16_to_cpu(uif->altsetting[0].endpoint[isoc_pipe].desc.
bEndpointAddress);
adev->num_alt = uif->num_altsetting;
cx231xx_info("EndPoint Addr 0x%x, Alternate settings: %i\n",
adev->end_point_addr, adev->num_alt);
adev->alt_max_pkt_size = kmalloc(32 * adev->num_alt, GFP_KERNEL);
if (adev->alt_max_pkt_size == NULL) {
cx231xx_errdev("out of memory!\n");
return -ENOMEM;
}
for (i = 0; i < adev->num_alt; i++) {
u16 tmp =
le16_to_cpu(uif->altsetting[i].endpoint[isoc_pipe].desc.
wMaxPacketSize);
adev->alt_max_pkt_size[i] =
(tmp & 0x07ff) * (((tmp & 0x1800) >> 11) + 1);
cx231xx_info("Alternate setting %i, max size= %i\n", i,
adev->alt_max_pkt_size[i]);
}
return 0;
}
static int cx231xx_audio_fini(struct cx231xx *dev)
{
if (dev == NULL)
return 0;
if (dev->has_alsa_audio != 1) {
/* This device does not support the extension (in this case
the device is expecting the snd-usb-audio module or
doesn't have analog audio support at all) */
return 0;
}
if (dev->adev.sndcard) {
snd_card_free(dev->adev.sndcard);
kfree(dev->adev.alt_max_pkt_size);
dev->adev.sndcard = NULL;
}
return 0;
}
static struct cx231xx_ops audio_ops = {
.id = CX231XX_AUDIO,
.name = "Cx231xx Audio Extension",
.init = cx231xx_audio_init,
.fini = cx231xx_audio_fini,
};
static int __init cx231xx_alsa_register(void)
{
return cx231xx_register_extension(&audio_ops);
}
static void __exit cx231xx_alsa_unregister(void)
{
cx231xx_unregister_extension(&audio_ops);
}
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Srinivasa Deevi <srinivasa.deevi@conexant.com>");
MODULE_DESCRIPTION("Cx231xx Audio driver");
module_init(cx231xx_alsa_register);
module_exit(cx231xx_alsa_unregister);
| gpl-2.0 |
rcoscali/nethunter-kernel-samsung-tuna | drivers/pci/htirq.c | 2933 | 4386 | /*
* File: htirq.c
* Purpose: Hypertransport Interrupt Capability
*
* Copyright (C) 2006 Linux Networx
* Copyright (C) Eric Biederman <ebiederman@lnxi.com>
*/
#include <linux/irq.h>
#include <linux/pci.h>
#include <linux/spinlock.h>
#include <linux/slab.h>
#include <linux/htirq.h>
/* Global ht irq lock.
*
* This is needed to serialize access to the data port in hypertransport
* irq capability.
*
* With multiple simultaneous hypertransport irq devices it might pay
* to make this more fine grained. But start with simple, stupid, and correct.
*/
static DEFINE_SPINLOCK(ht_irq_lock);
struct ht_irq_cfg {
struct pci_dev *dev;
/* Update callback used to cope with buggy hardware */
ht_irq_update_t *update;
unsigned pos;
unsigned idx;
struct ht_irq_msg msg;
};
void write_ht_irq_msg(unsigned int irq, struct ht_irq_msg *msg)
{
struct ht_irq_cfg *cfg = irq_get_handler_data(irq);
unsigned long flags;
spin_lock_irqsave(&ht_irq_lock, flags);
if (cfg->msg.address_lo != msg->address_lo) {
pci_write_config_byte(cfg->dev, cfg->pos + 2, cfg->idx);
pci_write_config_dword(cfg->dev, cfg->pos + 4, msg->address_lo);
}
if (cfg->msg.address_hi != msg->address_hi) {
pci_write_config_byte(cfg->dev, cfg->pos + 2, cfg->idx + 1);
pci_write_config_dword(cfg->dev, cfg->pos + 4, msg->address_hi);
}
if (cfg->update)
cfg->update(cfg->dev, irq, msg);
spin_unlock_irqrestore(&ht_irq_lock, flags);
cfg->msg = *msg;
}
void fetch_ht_irq_msg(unsigned int irq, struct ht_irq_msg *msg)
{
struct ht_irq_cfg *cfg = irq_get_handler_data(irq);
*msg = cfg->msg;
}
void mask_ht_irq(struct irq_data *data)
{
struct ht_irq_cfg *cfg = irq_data_get_irq_handler_data(data);
struct ht_irq_msg msg = cfg->msg;
msg.address_lo |= 1;
write_ht_irq_msg(data->irq, &msg);
}
void unmask_ht_irq(struct irq_data *data)
{
struct ht_irq_cfg *cfg = irq_data_get_irq_handler_data(data);
struct ht_irq_msg msg = cfg->msg;
msg.address_lo &= ~1;
write_ht_irq_msg(data->irq, &msg);
}
/**
* __ht_create_irq - create an irq and attach it to a device.
* @dev: The hypertransport device to find the irq capability on.
* @idx: Which of the possible irqs to attach to.
* @update: Function to be called when changing the htirq message
*
* The irq number of the new irq or a negative error value is returned.
*/
int __ht_create_irq(struct pci_dev *dev, int idx, ht_irq_update_t *update)
{
struct ht_irq_cfg *cfg;
unsigned long flags;
u32 data;
int max_irq;
int pos;
int irq;
int node;
pos = pci_find_ht_capability(dev, HT_CAPTYPE_IRQ);
if (!pos)
return -EINVAL;
/* Verify the idx I want to use is in range */
spin_lock_irqsave(&ht_irq_lock, flags);
pci_write_config_byte(dev, pos + 2, 1);
pci_read_config_dword(dev, pos + 4, &data);
spin_unlock_irqrestore(&ht_irq_lock, flags);
max_irq = (data >> 16) & 0xff;
if ( idx > max_irq)
return -EINVAL;
cfg = kmalloc(sizeof(*cfg), GFP_KERNEL);
if (!cfg)
return -ENOMEM;
cfg->dev = dev;
cfg->update = update;
cfg->pos = pos;
cfg->idx = 0x10 + (idx * 2);
/* Initialize msg to a value that will never match the first write. */
cfg->msg.address_lo = 0xffffffff;
cfg->msg.address_hi = 0xffffffff;
node = dev_to_node(&dev->dev);
irq = create_irq_nr(0, node);
if (irq <= 0) {
kfree(cfg);
return -EBUSY;
}
irq_set_handler_data(irq, cfg);
if (arch_setup_ht_irq(irq, dev) < 0) {
ht_destroy_irq(irq);
return -EBUSY;
}
return irq;
}
/**
* ht_create_irq - create an irq and attach it to a device.
* @dev: The hypertransport device to find the irq capability on.
* @idx: Which of the possible irqs to attach to.
*
* ht_create_irq needs to be called for all hypertransport devices
* that generate irqs.
*
* The irq number of the new irq or a negative error value is returned.
*/
int ht_create_irq(struct pci_dev *dev, int idx)
{
return __ht_create_irq(dev, idx, NULL);
}
/**
* ht_destroy_irq - destroy an irq created with ht_create_irq
* @irq: irq to be destroyed
*
* This reverses ht_create_irq removing the specified irq from
* existence. The irq should be free before this happens.
*/
void ht_destroy_irq(unsigned int irq)
{
struct ht_irq_cfg *cfg;
cfg = irq_get_handler_data(irq);
irq_set_chip(irq, NULL);
irq_set_handler_data(irq, NULL);
destroy_irq(irq);
kfree(cfg);
}
EXPORT_SYMBOL(__ht_create_irq);
EXPORT_SYMBOL(ht_create_irq);
EXPORT_SYMBOL(ht_destroy_irq);
| gpl-2.0 |
TheNotOnly/android_kernel_lge_jagnm_kk | arch/m68k/platform/coldfire/timers.c | 4469 | 5415 | /***************************************************************************/
/*
* timers.c -- generic ColdFire hardware timer support.
*
* Copyright (C) 1999-2008, Greg Ungerer <gerg@snapgear.com>
*/
/***************************************************************************/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/sched.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/profile.h>
#include <linux/clocksource.h>
#include <asm/io.h>
#include <asm/traps.h>
#include <asm/machdep.h>
#include <asm/coldfire.h>
#include <asm/mcftimer.h>
#include <asm/mcfsim.h>
/***************************************************************************/
/*
* By default use timer1 as the system clock timer.
*/
#define FREQ (MCF_BUSCLK / 16)
#define TA(a) (MCFTIMER_BASE1 + (a))
/*
* These provide the underlying interrupt vector support.
* Unfortunately it is a little different on each ColdFire.
*/
void coldfire_profile_init(void);
#if defined(CONFIG_M532x)
#define __raw_readtrr __raw_readl
#define __raw_writetrr __raw_writel
#else
#define __raw_readtrr __raw_readw
#define __raw_writetrr __raw_writew
#endif
static u32 mcftmr_cycles_per_jiffy;
static u32 mcftmr_cnt;
static irq_handler_t timer_interrupt;
/***************************************************************************/
static void init_timer_irq(void)
{
#ifdef MCFSIM_ICR_AUTOVEC
/* Timer1 is always used as system timer */
writeb(MCFSIM_ICR_AUTOVEC | MCFSIM_ICR_LEVEL6 | MCFSIM_ICR_PRI3,
MCF_MBAR + MCFSIM_TIMER1ICR);
mcf_mapirq2imr(MCF_IRQ_TIMER, MCFINTC_TIMER1);
#ifdef CONFIG_HIGHPROFILE
/* Timer2 is to be used as a high speed profile timer */
writeb(MCFSIM_ICR_AUTOVEC | MCFSIM_ICR_LEVEL7 | MCFSIM_ICR_PRI3,
MCF_MBAR + MCFSIM_TIMER2ICR);
mcf_mapirq2imr(MCF_IRQ_PROFILER, MCFINTC_TIMER2);
#endif
#endif /* MCFSIM_ICR_AUTOVEC */
}
/***************************************************************************/
static irqreturn_t mcftmr_tick(int irq, void *dummy)
{
/* Reset the ColdFire timer */
__raw_writeb(MCFTIMER_TER_CAP | MCFTIMER_TER_REF, TA(MCFTIMER_TER));
mcftmr_cnt += mcftmr_cycles_per_jiffy;
return timer_interrupt(irq, dummy);
}
/***************************************************************************/
static struct irqaction mcftmr_timer_irq = {
.name = "timer",
.flags = IRQF_DISABLED | IRQF_TIMER,
.handler = mcftmr_tick,
};
/***************************************************************************/
static cycle_t mcftmr_read_clk(struct clocksource *cs)
{
unsigned long flags;
u32 cycles;
u16 tcn;
local_irq_save(flags);
tcn = __raw_readw(TA(MCFTIMER_TCN));
cycles = mcftmr_cnt;
local_irq_restore(flags);
return cycles + tcn;
}
/***************************************************************************/
static struct clocksource mcftmr_clk = {
.name = "tmr",
.rating = 250,
.read = mcftmr_read_clk,
.mask = CLOCKSOURCE_MASK(32),
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
/***************************************************************************/
void hw_timer_init(irq_handler_t handler)
{
__raw_writew(MCFTIMER_TMR_DISABLE, TA(MCFTIMER_TMR));
mcftmr_cycles_per_jiffy = FREQ / HZ;
/*
* The coldfire timer runs from 0 to TRR included, then 0
* again and so on. It counts thus actually TRR + 1 steps
* for 1 tick, not TRR. So if you want n cycles,
* initialize TRR with n - 1.
*/
__raw_writetrr(mcftmr_cycles_per_jiffy - 1, TA(MCFTIMER_TRR));
__raw_writew(MCFTIMER_TMR_ENORI | MCFTIMER_TMR_CLK16 |
MCFTIMER_TMR_RESTART | MCFTIMER_TMR_ENABLE, TA(MCFTIMER_TMR));
clocksource_register_hz(&mcftmr_clk, FREQ);
timer_interrupt = handler;
init_timer_irq();
setup_irq(MCF_IRQ_TIMER, &mcftmr_timer_irq);
#ifdef CONFIG_HIGHPROFILE
coldfire_profile_init();
#endif
}
/***************************************************************************/
#ifdef CONFIG_HIGHPROFILE
/***************************************************************************/
/*
* By default use timer2 as the profiler clock timer.
*/
#define PA(a) (MCFTIMER_BASE2 + (a))
/*
* Choose a reasonably fast profile timer. Make it an odd value to
* try and get good coverage of kernel operations.
*/
#define PROFILEHZ 1013
/*
* Use the other timer to provide high accuracy profiling info.
*/
irqreturn_t coldfire_profile_tick(int irq, void *dummy)
{
/* Reset ColdFire timer2 */
__raw_writeb(MCFTIMER_TER_CAP | MCFTIMER_TER_REF, PA(MCFTIMER_TER));
if (current->pid)
profile_tick(CPU_PROFILING);
return IRQ_HANDLED;
}
/***************************************************************************/
static struct irqaction coldfire_profile_irq = {
.name = "profile timer",
.flags = IRQF_DISABLED | IRQF_TIMER,
.handler = coldfire_profile_tick,
};
void coldfire_profile_init(void)
{
printk(KERN_INFO "PROFILE: lodging TIMER2 @ %dHz as profile timer\n",
PROFILEHZ);
/* Set up TIMER 2 as high speed profile clock */
__raw_writew(MCFTIMER_TMR_DISABLE, PA(MCFTIMER_TMR));
__raw_writetrr(((MCF_BUSCLK / 16) / PROFILEHZ), PA(MCFTIMER_TRR));
__raw_writew(MCFTIMER_TMR_ENORI | MCFTIMER_TMR_CLK16 |
MCFTIMER_TMR_RESTART | MCFTIMER_TMR_ENABLE, PA(MCFTIMER_TMR));
setup_irq(MCF_IRQ_PROFILER, &coldfire_profile_irq);
}
/***************************************************************************/
#endif /* CONFIG_HIGHPROFILE */
/***************************************************************************/
| gpl-2.0 |
mifl/android_kernel_pantech_oscar | drivers/input/keyboard/jornada720_kbd.c | 4981 | 5450 | /*
* drivers/input/keyboard/jornada720_kbd.c
*
* HP Jornada 720 keyboard platform driver
*
* Copyright (C) 2006/2007 Kristoffer Ericson <Kristoffer.Ericson@Gmail.com>
*
* Copyright (C) 2006 jornada 720 kbd driver by
Filip Zyzniewsk <Filip.Zyzniewski@tefnet.plX
* based on (C) 2004 jornada 720 kbd driver by
Alex Lange <chicken@handhelds.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
*/
#include <linux/device.h>
#include <linux/errno.h>
#include <linux/interrupt.h>
#include <linux/init.h>
#include <linux/input.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <mach/jornada720.h>
#include <mach/hardware.h>
#include <mach/irqs.h>
MODULE_AUTHOR("Kristoffer Ericson <Kristoffer.Ericson@gmail.com>");
MODULE_DESCRIPTION("HP Jornada 710/720/728 keyboard driver");
MODULE_LICENSE("GPL v2");
static unsigned short jornada_std_keymap[128] = { /* ROW */
0, KEY_ESC, KEY_F1, KEY_F2, KEY_F3, KEY_F4, KEY_F5, KEY_F6, KEY_F7, /* #1 */
KEY_F8, KEY_F9, KEY_F10, KEY_F11, KEY_VOLUMEUP, KEY_VOLUMEDOWN, KEY_MUTE, /* -> */
0, KEY_1, KEY_2, KEY_3, KEY_4, KEY_5, KEY_6, KEY_7, KEY_8, KEY_9, /* #2 */
KEY_0, KEY_MINUS, KEY_EQUAL,0, 0, 0, /* -> */
0, KEY_Q, KEY_W, KEY_E, KEY_R, KEY_T, KEY_Y, KEY_U, KEY_I, KEY_O, /* #3 */
KEY_P, KEY_BACKSLASH, KEY_BACKSPACE, 0, 0, 0, /* -> */
0, KEY_A, KEY_S, KEY_D, KEY_F, KEY_G, KEY_H, KEY_J, KEY_K, KEY_L, /* #4 */
KEY_SEMICOLON, KEY_LEFTBRACE, KEY_RIGHTBRACE, 0, 0, 0, /* -> */
0, KEY_Z, KEY_X, KEY_C, KEY_V, KEY_B, KEY_N, KEY_M, KEY_COMMA, /* #5 */
KEY_DOT, KEY_KPMINUS, KEY_APOSTROPHE, KEY_ENTER, 0, 0,0, /* -> */
0, KEY_TAB, 0, KEY_LEFTSHIFT, 0, KEY_APOSTROPHE, 0, 0, 0, 0, /* #6 */
KEY_UP, 0, KEY_RIGHTSHIFT, 0, 0, 0,0, 0, 0, 0, 0, KEY_LEFTALT, KEY_GRAVE, /* -> */
0, 0, KEY_LEFT, KEY_DOWN, KEY_RIGHT, 0, 0, 0, 0,0, KEY_KPASTERISK, /* -> */
KEY_LEFTCTRL, 0, KEY_SPACE, 0, 0, 0, KEY_SLASH, KEY_DELETE, 0, 0, /* -> */
0, 0, 0, KEY_POWER, /* -> */
};
struct jornadakbd {
unsigned short keymap[ARRAY_SIZE(jornada_std_keymap)];
struct input_dev *input;
};
static irqreturn_t jornada720_kbd_interrupt(int irq, void *dev_id)
{
struct platform_device *pdev = dev_id;
struct jornadakbd *jornadakbd = platform_get_drvdata(pdev);
struct input_dev *input = jornadakbd->input;
u8 count, kbd_data, scan_code;
/* startup ssp with spinlock */
jornada_ssp_start();
if (jornada_ssp_inout(GETSCANKEYCODE) != TXDUMMY) {
printk(KERN_DEBUG
"jornada720_kbd: "
"GetKeycode command failed with ETIMEDOUT, "
"flushed bus\n");
} else {
/* How many keycodes are waiting for us? */
count = jornada_ssp_byte(TXDUMMY);
/* Lets drag them out one at a time */
while (count--) {
/* Exchange TxDummy for location (keymap[kbddata]) */
kbd_data = jornada_ssp_byte(TXDUMMY);
scan_code = kbd_data & 0x7f;
input_event(input, EV_MSC, MSC_SCAN, scan_code);
input_report_key(input, jornadakbd->keymap[scan_code],
!(kbd_data & 0x80));
input_sync(input);
}
}
/* release spinlock and turn off ssp */
jornada_ssp_end();
return IRQ_HANDLED;
};
static int __devinit jornada720_kbd_probe(struct platform_device *pdev)
{
struct jornadakbd *jornadakbd;
struct input_dev *input_dev;
int i, err;
jornadakbd = kzalloc(sizeof(struct jornadakbd), GFP_KERNEL);
input_dev = input_allocate_device();
if (!jornadakbd || !input_dev) {
err = -ENOMEM;
goto fail1;
}
platform_set_drvdata(pdev, jornadakbd);
memcpy(jornadakbd->keymap, jornada_std_keymap,
sizeof(jornada_std_keymap));
jornadakbd->input = input_dev;
input_dev->evbit[0] = BIT(EV_KEY) | BIT(EV_REP);
input_dev->name = "HP Jornada 720 keyboard";
input_dev->phys = "jornadakbd/input0";
input_dev->keycode = jornadakbd->keymap;
input_dev->keycodesize = sizeof(unsigned short);
input_dev->keycodemax = ARRAY_SIZE(jornada_std_keymap);
input_dev->id.bustype = BUS_HOST;
input_dev->dev.parent = &pdev->dev;
for (i = 0; i < ARRAY_SIZE(jornadakbd->keymap); i++)
__set_bit(jornadakbd->keymap[i], input_dev->keybit);
__clear_bit(KEY_RESERVED, input_dev->keybit);
input_set_capability(input_dev, EV_MSC, MSC_SCAN);
err = request_irq(IRQ_GPIO0,
jornada720_kbd_interrupt,
IRQF_TRIGGER_FALLING,
"jornadakbd", pdev);
if (err) {
printk(KERN_INFO "jornadakbd720_kbd: Unable to grab IRQ\n");
goto fail1;
}
err = input_register_device(jornadakbd->input);
if (err)
goto fail2;
return 0;
fail2: /* IRQ, DEVICE, MEMORY */
free_irq(IRQ_GPIO0, pdev);
fail1: /* DEVICE, MEMORY */
platform_set_drvdata(pdev, NULL);
input_free_device(input_dev);
kfree(jornadakbd);
return err;
};
static int __devexit jornada720_kbd_remove(struct platform_device *pdev)
{
struct jornadakbd *jornadakbd = platform_get_drvdata(pdev);
free_irq(IRQ_GPIO0, pdev);
platform_set_drvdata(pdev, NULL);
input_unregister_device(jornadakbd->input);
kfree(jornadakbd);
return 0;
}
/* work with hotplug and coldplug */
MODULE_ALIAS("platform:jornada720_kbd");
static struct platform_driver jornada720_kbd_driver = {
.driver = {
.name = "jornada720_kbd",
.owner = THIS_MODULE,
},
.probe = jornada720_kbd_probe,
.remove = __devexit_p(jornada720_kbd_remove),
};
module_platform_driver(jornada720_kbd_driver);
| gpl-2.0 |
LiquidSmooth-Devices/android_kernel_samsung_hlte | sound/isa/wavefront/wavefront_fx.c | 5237 | 6319 | /*
* Copyright (c) 1998-2002 by Paul Davis <pbd@op.net>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <asm/io.h>
#include <linux/init.h>
#include <linux/time.h>
#include <linux/wait.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/firmware.h>
#include <sound/core.h>
#include <sound/snd_wavefront.h>
#include <sound/initval.h>
/* Control bits for the Load Control Register
*/
#define FX_LSB_TRANSFER 0x01 /* transfer after DSP LSB byte written */
#define FX_MSB_TRANSFER 0x02 /* transfer after DSP MSB byte written */
#define FX_AUTO_INCR 0x04 /* auto-increment DSP address after transfer */
#define WAIT_IDLE 0xff
static int
wavefront_fx_idle (snd_wavefront_t *dev)
{
int i;
unsigned int x = 0x80;
for (i = 0; i < 1000; i++) {
x = inb (dev->fx_status);
if ((x & 0x80) == 0) {
break;
}
}
if (x & 0x80) {
snd_printk ("FX device never idle.\n");
return 0;
}
return (1);
}
static void
wavefront_fx_mute (snd_wavefront_t *dev, int onoff)
{
if (!wavefront_fx_idle(dev)) {
return;
}
outb (onoff ? 0x02 : 0x00, dev->fx_op);
}
static int
wavefront_fx_memset (snd_wavefront_t *dev,
int page,
int addr,
int cnt,
unsigned short *data)
{
if (page < 0 || page > 7) {
snd_printk ("FX memset: "
"page must be >= 0 and <= 7\n");
return -(EINVAL);
}
if (addr < 0 || addr > 0x7f) {
snd_printk ("FX memset: "
"addr must be >= 0 and <= 7f\n");
return -(EINVAL);
}
if (cnt == 1) {
outb (FX_LSB_TRANSFER, dev->fx_lcr);
outb (page, dev->fx_dsp_page);
outb (addr, dev->fx_dsp_addr);
outb ((data[0] >> 8), dev->fx_dsp_msb);
outb ((data[0] & 0xff), dev->fx_dsp_lsb);
snd_printk ("FX: addr %d:%x set to 0x%x\n",
page, addr, data[0]);
} else {
int i;
outb (FX_AUTO_INCR|FX_LSB_TRANSFER, dev->fx_lcr);
outb (page, dev->fx_dsp_page);
outb (addr, dev->fx_dsp_addr);
for (i = 0; i < cnt; i++) {
outb ((data[i] >> 8), dev->fx_dsp_msb);
outb ((data[i] & 0xff), dev->fx_dsp_lsb);
if (!wavefront_fx_idle (dev)) {
break;
}
}
if (i != cnt) {
snd_printk ("FX memset "
"(0x%x, 0x%x, 0x%lx, %d) incomplete\n",
page, addr, (unsigned long) data, cnt);
return -(EIO);
}
}
return 0;
}
int
snd_wavefront_fx_detect (snd_wavefront_t *dev)
{
/* This is a crude check, but its the best one I have for now.
Certainly on the Maui and the Tropez, wavefront_fx_idle() will
report "never idle", which suggests that this test should
work OK.
*/
if (inb (dev->fx_status) & 0x80) {
snd_printk ("Hmm, probably a Maui or Tropez.\n");
return -1;
}
return 0;
}
int
snd_wavefront_fx_open (struct snd_hwdep *hw, struct file *file)
{
if (!try_module_get(hw->card->module))
return -EFAULT;
file->private_data = hw;
return 0;
}
int
snd_wavefront_fx_release (struct snd_hwdep *hw, struct file *file)
{
module_put(hw->card->module);
return 0;
}
int
snd_wavefront_fx_ioctl (struct snd_hwdep *sdev, struct file *file,
unsigned int cmd, unsigned long arg)
{
struct snd_card *card;
snd_wavefront_card_t *acard;
snd_wavefront_t *dev;
wavefront_fx_info r;
unsigned short *page_data = NULL;
unsigned short *pd;
int err = 0;
card = sdev->card;
if (snd_BUG_ON(!card))
return -ENODEV;
if (snd_BUG_ON(!card->private_data))
return -ENODEV;
acard = card->private_data;
dev = &acard->wavefront;
if (copy_from_user (&r, (void __user *)arg, sizeof (wavefront_fx_info)))
return -EFAULT;
switch (r.request) {
case WFFX_MUTE:
wavefront_fx_mute (dev, r.data[0]);
return -EIO;
case WFFX_MEMSET:
if (r.data[2] <= 0) {
snd_printk ("cannot write "
"<= 0 bytes to FX\n");
return -EIO;
} else if (r.data[2] == 1) {
pd = (unsigned short *) &r.data[3];
} else {
if (r.data[2] > 256) {
snd_printk ("cannot write "
"> 512 bytes to FX\n");
return -EIO;
}
page_data = memdup_user((unsigned char __user *)
r.data[3],
r.data[2] * sizeof(short));
if (IS_ERR(page_data))
return PTR_ERR(page_data);
pd = page_data;
}
err = wavefront_fx_memset (dev,
r.data[0], /* page */
r.data[1], /* addr */
r.data[2], /* cnt */
pd);
kfree(page_data);
break;
default:
snd_printk ("FX: ioctl %d not yet supported\n",
r.request);
return -ENOTTY;
}
return err;
}
/* YSS225 initialization.
This code was developed using DOSEMU. The Turtle Beach SETUPSND
utility was run with I/O tracing in DOSEMU enabled, and a reconstruction
of the port I/O done, using the Yamaha faxback document as a guide
to add more logic to the code. Its really pretty weird.
This is the approach of just dumping the whole I/O
sequence as a series of port/value pairs and a simple loop
that outputs it.
*/
int __devinit
snd_wavefront_fx_start (snd_wavefront_t *dev)
{
unsigned int i;
int err;
const struct firmware *firmware = NULL;
if (dev->fx_initialized)
return 0;
err = request_firmware(&firmware, "yamaha/yss225_registers.bin",
dev->card->dev);
if (err < 0) {
err = -1;
goto out;
}
for (i = 0; i + 1 < firmware->size; i += 2) {
if (firmware->data[i] >= 8 && firmware->data[i] < 16) {
outb(firmware->data[i + 1],
dev->base + firmware->data[i]);
} else if (firmware->data[i] == WAIT_IDLE) {
if (!wavefront_fx_idle(dev)) {
err = -1;
goto out;
}
} else {
snd_printk(KERN_ERR "invalid address"
" in register data\n");
err = -1;
goto out;
}
}
dev->fx_initialized = 1;
err = 0;
out:
release_firmware(firmware);
return err;
}
MODULE_FIRMWARE("yamaha/yss225_registers.bin");
| gpl-2.0 |
mythos234/NamelessN910F-LL | drivers/mtd/chips/map_ram.c | 8309 | 3692 | /*
* Common code to handle map devices which are simple RAM
* (C) 2000 Red Hat. GPL'd.
*/
#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <asm/io.h>
#include <asm/byteorder.h>
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/map.h>
static int mapram_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
static int mapram_write (struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
static int mapram_erase (struct mtd_info *, struct erase_info *);
static void mapram_nop (struct mtd_info *);
static struct mtd_info *map_ram_probe(struct map_info *map);
static unsigned long mapram_unmapped_area(struct mtd_info *, unsigned long,
unsigned long, unsigned long);
static struct mtd_chip_driver mapram_chipdrv = {
.probe = map_ram_probe,
.name = "map_ram",
.module = THIS_MODULE
};
static struct mtd_info *map_ram_probe(struct map_info *map)
{
struct mtd_info *mtd;
/* Check the first byte is RAM */
#if 0
map_write8(map, 0x55, 0);
if (map_read8(map, 0) != 0x55)
return NULL;
map_write8(map, 0xAA, 0);
if (map_read8(map, 0) != 0xAA)
return NULL;
/* Check the last byte is RAM */
map_write8(map, 0x55, map->size-1);
if (map_read8(map, map->size-1) != 0x55)
return NULL;
map_write8(map, 0xAA, map->size-1);
if (map_read8(map, map->size-1) != 0xAA)
return NULL;
#endif
/* OK. It seems to be RAM. */
mtd = kzalloc(sizeof(*mtd), GFP_KERNEL);
if (!mtd)
return NULL;
map->fldrv = &mapram_chipdrv;
mtd->priv = map;
mtd->name = map->name;
mtd->type = MTD_RAM;
mtd->size = map->size;
mtd->_erase = mapram_erase;
mtd->_get_unmapped_area = mapram_unmapped_area;
mtd->_read = mapram_read;
mtd->_write = mapram_write;
mtd->_sync = mapram_nop;
mtd->flags = MTD_CAP_RAM;
mtd->writesize = 1;
mtd->erasesize = PAGE_SIZE;
while(mtd->size & (mtd->erasesize - 1))
mtd->erasesize >>= 1;
__module_get(THIS_MODULE);
return mtd;
}
/*
* Allow NOMMU mmap() to directly map the device (if not NULL)
* - return the address to which the offset maps
* - return -ENOSYS to indicate refusal to do the mapping
*/
static unsigned long mapram_unmapped_area(struct mtd_info *mtd,
unsigned long len,
unsigned long offset,
unsigned long flags)
{
struct map_info *map = mtd->priv;
return (unsigned long) map->virt + offset;
}
static int mapram_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
{
struct map_info *map = mtd->priv;
map_copy_from(map, buf, from, len);
*retlen = len;
return 0;
}
static int mapram_write (struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen, const u_char *buf)
{
struct map_info *map = mtd->priv;
map_copy_to(map, to, buf, len);
*retlen = len;
return 0;
}
static int mapram_erase (struct mtd_info *mtd, struct erase_info *instr)
{
/* Yeah, it's inefficient. Who cares? It's faster than a _real_
flash erase. */
struct map_info *map = mtd->priv;
map_word allff;
unsigned long i;
allff = map_word_ff(map);
for (i=0; i<instr->len; i += map_bankwidth(map))
map_write(map, allff, instr->addr + i);
instr->state = MTD_ERASE_DONE;
mtd_erase_callback(instr);
return 0;
}
static void mapram_nop(struct mtd_info *mtd)
{
/* Nothing to see here */
}
static int __init map_ram_init(void)
{
register_mtd_chip_driver(&mapram_chipdrv);
return 0;
}
static void __exit map_ram_exit(void)
{
unregister_mtd_chip_driver(&mapram_chipdrv);
}
module_init(map_ram_init);
module_exit(map_ram_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
MODULE_DESCRIPTION("MTD chip driver for RAM chips");
| gpl-2.0 |
blastagator/LGG2_TWRP_Kernel | sound/pci/oxygen/oxygen_pcm.c | 8565 | 22254 | /*
* C-Media CMI8788 driver - PCM code
*
* Copyright (c) Clemens Ladisch <clemens@ladisch.de>
*
*
* This driver is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, version 2.
*
* This driver is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this driver; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/pci.h>
#include <sound/control.h>
#include <sound/core.h>
#include <sound/pcm.h>
#include <sound/pcm_params.h>
#include "oxygen.h"
/* most DMA channels have a 16-bit counter for 32-bit words */
#define BUFFER_BYTES_MAX ((1 << 16) * 4)
/* the multichannel DMA channel has a 24-bit counter */
#define BUFFER_BYTES_MAX_MULTICH ((1 << 24) * 4)
#define PERIOD_BYTES_MIN 64
#define DEFAULT_BUFFER_BYTES (BUFFER_BYTES_MAX / 2)
#define DEFAULT_BUFFER_BYTES_MULTICH (1024 * 1024)
static const struct snd_pcm_hardware oxygen_stereo_hardware = {
.info = SNDRV_PCM_INFO_MMAP |
SNDRV_PCM_INFO_MMAP_VALID |
SNDRV_PCM_INFO_INTERLEAVED |
SNDRV_PCM_INFO_PAUSE |
SNDRV_PCM_INFO_SYNC_START |
SNDRV_PCM_INFO_NO_PERIOD_WAKEUP,
.formats = SNDRV_PCM_FMTBIT_S16_LE |
SNDRV_PCM_FMTBIT_S32_LE,
.rates = SNDRV_PCM_RATE_32000 |
SNDRV_PCM_RATE_44100 |
SNDRV_PCM_RATE_48000 |
SNDRV_PCM_RATE_64000 |
SNDRV_PCM_RATE_88200 |
SNDRV_PCM_RATE_96000 |
SNDRV_PCM_RATE_176400 |
SNDRV_PCM_RATE_192000,
.rate_min = 32000,
.rate_max = 192000,
.channels_min = 2,
.channels_max = 2,
.buffer_bytes_max = BUFFER_BYTES_MAX,
.period_bytes_min = PERIOD_BYTES_MIN,
.period_bytes_max = BUFFER_BYTES_MAX,
.periods_min = 1,
.periods_max = BUFFER_BYTES_MAX / PERIOD_BYTES_MIN,
};
static const struct snd_pcm_hardware oxygen_multichannel_hardware = {
.info = SNDRV_PCM_INFO_MMAP |
SNDRV_PCM_INFO_MMAP_VALID |
SNDRV_PCM_INFO_INTERLEAVED |
SNDRV_PCM_INFO_PAUSE |
SNDRV_PCM_INFO_SYNC_START |
SNDRV_PCM_INFO_NO_PERIOD_WAKEUP,
.formats = SNDRV_PCM_FMTBIT_S16_LE |
SNDRV_PCM_FMTBIT_S32_LE,
.rates = SNDRV_PCM_RATE_32000 |
SNDRV_PCM_RATE_44100 |
SNDRV_PCM_RATE_48000 |
SNDRV_PCM_RATE_64000 |
SNDRV_PCM_RATE_88200 |
SNDRV_PCM_RATE_96000 |
SNDRV_PCM_RATE_176400 |
SNDRV_PCM_RATE_192000,
.rate_min = 32000,
.rate_max = 192000,
.channels_min = 2,
.channels_max = 8,
.buffer_bytes_max = BUFFER_BYTES_MAX_MULTICH,
.period_bytes_min = PERIOD_BYTES_MIN,
.period_bytes_max = BUFFER_BYTES_MAX_MULTICH,
.periods_min = 1,
.periods_max = BUFFER_BYTES_MAX_MULTICH / PERIOD_BYTES_MIN,
};
static const struct snd_pcm_hardware oxygen_ac97_hardware = {
.info = SNDRV_PCM_INFO_MMAP |
SNDRV_PCM_INFO_MMAP_VALID |
SNDRV_PCM_INFO_INTERLEAVED |
SNDRV_PCM_INFO_PAUSE |
SNDRV_PCM_INFO_SYNC_START |
SNDRV_PCM_INFO_NO_PERIOD_WAKEUP,
.formats = SNDRV_PCM_FMTBIT_S16_LE,
.rates = SNDRV_PCM_RATE_48000,
.rate_min = 48000,
.rate_max = 48000,
.channels_min = 2,
.channels_max = 2,
.buffer_bytes_max = BUFFER_BYTES_MAX,
.period_bytes_min = PERIOD_BYTES_MIN,
.period_bytes_max = BUFFER_BYTES_MAX,
.periods_min = 1,
.periods_max = BUFFER_BYTES_MAX / PERIOD_BYTES_MIN,
};
static const struct snd_pcm_hardware *const oxygen_hardware[PCM_COUNT] = {
[PCM_A] = &oxygen_stereo_hardware,
[PCM_B] = &oxygen_stereo_hardware,
[PCM_C] = &oxygen_stereo_hardware,
[PCM_SPDIF] = &oxygen_stereo_hardware,
[PCM_MULTICH] = &oxygen_multichannel_hardware,
[PCM_AC97] = &oxygen_ac97_hardware,
};
static inline unsigned int
oxygen_substream_channel(struct snd_pcm_substream *substream)
{
return (unsigned int)(uintptr_t)substream->runtime->private_data;
}
static int oxygen_open(struct snd_pcm_substream *substream,
unsigned int channel)
{
struct oxygen *chip = snd_pcm_substream_chip(substream);
struct snd_pcm_runtime *runtime = substream->runtime;
int err;
runtime->private_data = (void *)(uintptr_t)channel;
if (channel == PCM_B && chip->has_ac97_1 &&
(chip->model.device_config & CAPTURE_2_FROM_AC97_1))
runtime->hw = oxygen_ac97_hardware;
else
runtime->hw = *oxygen_hardware[channel];
switch (channel) {
case PCM_C:
runtime->hw.rates &= ~(SNDRV_PCM_RATE_32000 |
SNDRV_PCM_RATE_64000);
runtime->hw.rate_min = 44100;
break;
case PCM_MULTICH:
runtime->hw.channels_max = chip->model.dac_channels_pcm;
break;
}
if (chip->model.pcm_hardware_filter)
chip->model.pcm_hardware_filter(channel, &runtime->hw);
err = snd_pcm_hw_constraint_step(runtime, 0,
SNDRV_PCM_HW_PARAM_PERIOD_BYTES, 32);
if (err < 0)
return err;
err = snd_pcm_hw_constraint_step(runtime, 0,
SNDRV_PCM_HW_PARAM_BUFFER_BYTES, 32);
if (err < 0)
return err;
if (runtime->hw.formats & SNDRV_PCM_FMTBIT_S32_LE) {
err = snd_pcm_hw_constraint_msbits(runtime, 0, 32, 24);
if (err < 0)
return err;
}
if (runtime->hw.channels_max > 2) {
err = snd_pcm_hw_constraint_step(runtime, 0,
SNDRV_PCM_HW_PARAM_CHANNELS,
2);
if (err < 0)
return err;
}
snd_pcm_set_sync(substream);
chip->streams[channel] = substream;
mutex_lock(&chip->mutex);
chip->pcm_active |= 1 << channel;
if (channel == PCM_SPDIF) {
chip->spdif_pcm_bits = chip->spdif_bits;
chip->controls[CONTROL_SPDIF_PCM]->vd[0].access &=
~SNDRV_CTL_ELEM_ACCESS_INACTIVE;
snd_ctl_notify(chip->card, SNDRV_CTL_EVENT_MASK_VALUE |
SNDRV_CTL_EVENT_MASK_INFO,
&chip->controls[CONTROL_SPDIF_PCM]->id);
}
mutex_unlock(&chip->mutex);
return 0;
}
static int oxygen_rec_a_open(struct snd_pcm_substream *substream)
{
return oxygen_open(substream, PCM_A);
}
static int oxygen_rec_b_open(struct snd_pcm_substream *substream)
{
return oxygen_open(substream, PCM_B);
}
static int oxygen_rec_c_open(struct snd_pcm_substream *substream)
{
return oxygen_open(substream, PCM_C);
}
static int oxygen_spdif_open(struct snd_pcm_substream *substream)
{
return oxygen_open(substream, PCM_SPDIF);
}
static int oxygen_multich_open(struct snd_pcm_substream *substream)
{
return oxygen_open(substream, PCM_MULTICH);
}
static int oxygen_ac97_open(struct snd_pcm_substream *substream)
{
return oxygen_open(substream, PCM_AC97);
}
static int oxygen_close(struct snd_pcm_substream *substream)
{
struct oxygen *chip = snd_pcm_substream_chip(substream);
unsigned int channel = oxygen_substream_channel(substream);
mutex_lock(&chip->mutex);
chip->pcm_active &= ~(1 << channel);
if (channel == PCM_SPDIF) {
chip->controls[CONTROL_SPDIF_PCM]->vd[0].access |=
SNDRV_CTL_ELEM_ACCESS_INACTIVE;
snd_ctl_notify(chip->card, SNDRV_CTL_EVENT_MASK_VALUE |
SNDRV_CTL_EVENT_MASK_INFO,
&chip->controls[CONTROL_SPDIF_PCM]->id);
}
if (channel == PCM_SPDIF || channel == PCM_MULTICH)
oxygen_update_spdif_source(chip);
mutex_unlock(&chip->mutex);
chip->streams[channel] = NULL;
return 0;
}
static unsigned int oxygen_format(struct snd_pcm_hw_params *hw_params)
{
if (params_format(hw_params) == SNDRV_PCM_FORMAT_S32_LE)
return OXYGEN_FORMAT_24;
else
return OXYGEN_FORMAT_16;
}
static unsigned int oxygen_rate(struct snd_pcm_hw_params *hw_params)
{
switch (params_rate(hw_params)) {
case 32000:
return OXYGEN_RATE_32000;
case 44100:
return OXYGEN_RATE_44100;
default: /* 48000 */
return OXYGEN_RATE_48000;
case 64000:
return OXYGEN_RATE_64000;
case 88200:
return OXYGEN_RATE_88200;
case 96000:
return OXYGEN_RATE_96000;
case 176400:
return OXYGEN_RATE_176400;
case 192000:
return OXYGEN_RATE_192000;
}
}
static unsigned int oxygen_i2s_bits(struct snd_pcm_hw_params *hw_params)
{
if (params_format(hw_params) == SNDRV_PCM_FORMAT_S32_LE)
return OXYGEN_I2S_BITS_24;
else
return OXYGEN_I2S_BITS_16;
}
static unsigned int oxygen_play_channels(struct snd_pcm_hw_params *hw_params)
{
switch (params_channels(hw_params)) {
default: /* 2 */
return OXYGEN_PLAY_CHANNELS_2;
case 4:
return OXYGEN_PLAY_CHANNELS_4;
case 6:
return OXYGEN_PLAY_CHANNELS_6;
case 8:
return OXYGEN_PLAY_CHANNELS_8;
}
}
static const unsigned int channel_base_registers[PCM_COUNT] = {
[PCM_A] = OXYGEN_DMA_A_ADDRESS,
[PCM_B] = OXYGEN_DMA_B_ADDRESS,
[PCM_C] = OXYGEN_DMA_C_ADDRESS,
[PCM_SPDIF] = OXYGEN_DMA_SPDIF_ADDRESS,
[PCM_MULTICH] = OXYGEN_DMA_MULTICH_ADDRESS,
[PCM_AC97] = OXYGEN_DMA_AC97_ADDRESS,
};
static int oxygen_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *hw_params)
{
struct oxygen *chip = snd_pcm_substream_chip(substream);
unsigned int channel = oxygen_substream_channel(substream);
int err;
err = snd_pcm_lib_malloc_pages(substream,
params_buffer_bytes(hw_params));
if (err < 0)
return err;
oxygen_write32(chip, channel_base_registers[channel],
(u32)substream->runtime->dma_addr);
if (channel == PCM_MULTICH) {
oxygen_write32(chip, OXYGEN_DMA_MULTICH_COUNT,
params_buffer_bytes(hw_params) / 4 - 1);
oxygen_write32(chip, OXYGEN_DMA_MULTICH_TCOUNT,
params_period_bytes(hw_params) / 4 - 1);
} else {
oxygen_write16(chip, channel_base_registers[channel] + 4,
params_buffer_bytes(hw_params) / 4 - 1);
oxygen_write16(chip, channel_base_registers[channel] + 6,
params_period_bytes(hw_params) / 4 - 1);
}
return 0;
}
static u16 get_mclk(struct oxygen *chip, unsigned int channel,
struct snd_pcm_hw_params *params)
{
unsigned int mclks, shift;
if (channel == PCM_MULTICH)
mclks = chip->model.dac_mclks;
else
mclks = chip->model.adc_mclks;
if (params_rate(params) <= 48000)
shift = 0;
else if (params_rate(params) <= 96000)
shift = 2;
else
shift = 4;
return OXYGEN_I2S_MCLK(mclks >> shift);
}
static int oxygen_rec_a_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *hw_params)
{
struct oxygen *chip = snd_pcm_substream_chip(substream);
int err;
err = oxygen_hw_params(substream, hw_params);
if (err < 0)
return err;
spin_lock_irq(&chip->reg_lock);
oxygen_write8_masked(chip, OXYGEN_REC_FORMAT,
oxygen_format(hw_params) << OXYGEN_REC_FORMAT_A_SHIFT,
OXYGEN_REC_FORMAT_A_MASK);
oxygen_write16_masked(chip, OXYGEN_I2S_A_FORMAT,
oxygen_rate(hw_params) |
chip->model.adc_i2s_format |
get_mclk(chip, PCM_A, hw_params) |
oxygen_i2s_bits(hw_params),
OXYGEN_I2S_RATE_MASK |
OXYGEN_I2S_FORMAT_MASK |
OXYGEN_I2S_MCLK_MASK |
OXYGEN_I2S_BITS_MASK);
spin_unlock_irq(&chip->reg_lock);
mutex_lock(&chip->mutex);
chip->model.set_adc_params(chip, hw_params);
mutex_unlock(&chip->mutex);
return 0;
}
static int oxygen_rec_b_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *hw_params)
{
struct oxygen *chip = snd_pcm_substream_chip(substream);
int is_ac97;
int err;
err = oxygen_hw_params(substream, hw_params);
if (err < 0)
return err;
is_ac97 = chip->has_ac97_1 &&
(chip->model.device_config & CAPTURE_2_FROM_AC97_1);
spin_lock_irq(&chip->reg_lock);
oxygen_write8_masked(chip, OXYGEN_REC_FORMAT,
oxygen_format(hw_params) << OXYGEN_REC_FORMAT_B_SHIFT,
OXYGEN_REC_FORMAT_B_MASK);
if (!is_ac97)
oxygen_write16_masked(chip, OXYGEN_I2S_B_FORMAT,
oxygen_rate(hw_params) |
chip->model.adc_i2s_format |
get_mclk(chip, PCM_B, hw_params) |
oxygen_i2s_bits(hw_params),
OXYGEN_I2S_RATE_MASK |
OXYGEN_I2S_FORMAT_MASK |
OXYGEN_I2S_MCLK_MASK |
OXYGEN_I2S_BITS_MASK);
spin_unlock_irq(&chip->reg_lock);
if (!is_ac97) {
mutex_lock(&chip->mutex);
chip->model.set_adc_params(chip, hw_params);
mutex_unlock(&chip->mutex);
}
return 0;
}
static int oxygen_rec_c_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *hw_params)
{
struct oxygen *chip = snd_pcm_substream_chip(substream);
int err;
err = oxygen_hw_params(substream, hw_params);
if (err < 0)
return err;
spin_lock_irq(&chip->reg_lock);
oxygen_write8_masked(chip, OXYGEN_REC_FORMAT,
oxygen_format(hw_params) << OXYGEN_REC_FORMAT_C_SHIFT,
OXYGEN_REC_FORMAT_C_MASK);
spin_unlock_irq(&chip->reg_lock);
return 0;
}
static int oxygen_spdif_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *hw_params)
{
struct oxygen *chip = snd_pcm_substream_chip(substream);
int err;
err = oxygen_hw_params(substream, hw_params);
if (err < 0)
return err;
mutex_lock(&chip->mutex);
spin_lock_irq(&chip->reg_lock);
oxygen_clear_bits32(chip, OXYGEN_SPDIF_CONTROL,
OXYGEN_SPDIF_OUT_ENABLE);
oxygen_write8_masked(chip, OXYGEN_PLAY_FORMAT,
oxygen_format(hw_params) << OXYGEN_SPDIF_FORMAT_SHIFT,
OXYGEN_SPDIF_FORMAT_MASK);
oxygen_write32_masked(chip, OXYGEN_SPDIF_CONTROL,
oxygen_rate(hw_params) << OXYGEN_SPDIF_OUT_RATE_SHIFT,
OXYGEN_SPDIF_OUT_RATE_MASK);
oxygen_update_spdif_source(chip);
spin_unlock_irq(&chip->reg_lock);
mutex_unlock(&chip->mutex);
return 0;
}
static int oxygen_multich_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *hw_params)
{
struct oxygen *chip = snd_pcm_substream_chip(substream);
int err;
err = oxygen_hw_params(substream, hw_params);
if (err < 0)
return err;
mutex_lock(&chip->mutex);
spin_lock_irq(&chip->reg_lock);
oxygen_write8_masked(chip, OXYGEN_PLAY_CHANNELS,
oxygen_play_channels(hw_params),
OXYGEN_PLAY_CHANNELS_MASK);
oxygen_write8_masked(chip, OXYGEN_PLAY_FORMAT,
oxygen_format(hw_params) << OXYGEN_MULTICH_FORMAT_SHIFT,
OXYGEN_MULTICH_FORMAT_MASK);
oxygen_write16_masked(chip, OXYGEN_I2S_MULTICH_FORMAT,
oxygen_rate(hw_params) |
chip->model.dac_i2s_format |
get_mclk(chip, PCM_MULTICH, hw_params) |
oxygen_i2s_bits(hw_params),
OXYGEN_I2S_RATE_MASK |
OXYGEN_I2S_FORMAT_MASK |
OXYGEN_I2S_MCLK_MASK |
OXYGEN_I2S_BITS_MASK);
oxygen_update_spdif_source(chip);
spin_unlock_irq(&chip->reg_lock);
chip->model.set_dac_params(chip, hw_params);
oxygen_update_dac_routing(chip);
mutex_unlock(&chip->mutex);
return 0;
}
static int oxygen_hw_free(struct snd_pcm_substream *substream)
{
struct oxygen *chip = snd_pcm_substream_chip(substream);
unsigned int channel = oxygen_substream_channel(substream);
unsigned int channel_mask = 1 << channel;
spin_lock_irq(&chip->reg_lock);
chip->interrupt_mask &= ~channel_mask;
oxygen_write16(chip, OXYGEN_INTERRUPT_MASK, chip->interrupt_mask);
oxygen_set_bits8(chip, OXYGEN_DMA_FLUSH, channel_mask);
oxygen_clear_bits8(chip, OXYGEN_DMA_FLUSH, channel_mask);
spin_unlock_irq(&chip->reg_lock);
return snd_pcm_lib_free_pages(substream);
}
static int oxygen_spdif_hw_free(struct snd_pcm_substream *substream)
{
struct oxygen *chip = snd_pcm_substream_chip(substream);
spin_lock_irq(&chip->reg_lock);
oxygen_clear_bits32(chip, OXYGEN_SPDIF_CONTROL,
OXYGEN_SPDIF_OUT_ENABLE);
spin_unlock_irq(&chip->reg_lock);
return oxygen_hw_free(substream);
}
static int oxygen_prepare(struct snd_pcm_substream *substream)
{
struct oxygen *chip = snd_pcm_substream_chip(substream);
unsigned int channel = oxygen_substream_channel(substream);
unsigned int channel_mask = 1 << channel;
spin_lock_irq(&chip->reg_lock);
oxygen_set_bits8(chip, OXYGEN_DMA_FLUSH, channel_mask);
oxygen_clear_bits8(chip, OXYGEN_DMA_FLUSH, channel_mask);
if (substream->runtime->no_period_wakeup)
chip->interrupt_mask &= ~channel_mask;
else
chip->interrupt_mask |= channel_mask;
oxygen_write16(chip, OXYGEN_INTERRUPT_MASK, chip->interrupt_mask);
spin_unlock_irq(&chip->reg_lock);
return 0;
}
static int oxygen_trigger(struct snd_pcm_substream *substream, int cmd)
{
struct oxygen *chip = snd_pcm_substream_chip(substream);
struct snd_pcm_substream *s;
unsigned int mask = 0;
int pausing;
switch (cmd) {
case SNDRV_PCM_TRIGGER_STOP:
case SNDRV_PCM_TRIGGER_START:
case SNDRV_PCM_TRIGGER_SUSPEND:
pausing = 0;
break;
case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
pausing = 1;
break;
default:
return -EINVAL;
}
snd_pcm_group_for_each_entry(s, substream) {
if (snd_pcm_substream_chip(s) == chip) {
mask |= 1 << oxygen_substream_channel(s);
snd_pcm_trigger_done(s, substream);
}
}
spin_lock(&chip->reg_lock);
if (!pausing) {
if (cmd == SNDRV_PCM_TRIGGER_START)
chip->pcm_running |= mask;
else
chip->pcm_running &= ~mask;
oxygen_write8(chip, OXYGEN_DMA_STATUS, chip->pcm_running);
} else {
if (cmd == SNDRV_PCM_TRIGGER_PAUSE_PUSH)
oxygen_set_bits8(chip, OXYGEN_DMA_PAUSE, mask);
else
oxygen_clear_bits8(chip, OXYGEN_DMA_PAUSE, mask);
}
spin_unlock(&chip->reg_lock);
return 0;
}
static snd_pcm_uframes_t oxygen_pointer(struct snd_pcm_substream *substream)
{
struct oxygen *chip = snd_pcm_substream_chip(substream);
struct snd_pcm_runtime *runtime = substream->runtime;
unsigned int channel = oxygen_substream_channel(substream);
u32 curr_addr;
/* no spinlock, this read should be atomic */
curr_addr = oxygen_read32(chip, channel_base_registers[channel]);
return bytes_to_frames(runtime, curr_addr - (u32)runtime->dma_addr);
}
static struct snd_pcm_ops oxygen_rec_a_ops = {
.open = oxygen_rec_a_open,
.close = oxygen_close,
.ioctl = snd_pcm_lib_ioctl,
.hw_params = oxygen_rec_a_hw_params,
.hw_free = oxygen_hw_free,
.prepare = oxygen_prepare,
.trigger = oxygen_trigger,
.pointer = oxygen_pointer,
};
static struct snd_pcm_ops oxygen_rec_b_ops = {
.open = oxygen_rec_b_open,
.close = oxygen_close,
.ioctl = snd_pcm_lib_ioctl,
.hw_params = oxygen_rec_b_hw_params,
.hw_free = oxygen_hw_free,
.prepare = oxygen_prepare,
.trigger = oxygen_trigger,
.pointer = oxygen_pointer,
};
static struct snd_pcm_ops oxygen_rec_c_ops = {
.open = oxygen_rec_c_open,
.close = oxygen_close,
.ioctl = snd_pcm_lib_ioctl,
.hw_params = oxygen_rec_c_hw_params,
.hw_free = oxygen_hw_free,
.prepare = oxygen_prepare,
.trigger = oxygen_trigger,
.pointer = oxygen_pointer,
};
static struct snd_pcm_ops oxygen_spdif_ops = {
.open = oxygen_spdif_open,
.close = oxygen_close,
.ioctl = snd_pcm_lib_ioctl,
.hw_params = oxygen_spdif_hw_params,
.hw_free = oxygen_spdif_hw_free,
.prepare = oxygen_prepare,
.trigger = oxygen_trigger,
.pointer = oxygen_pointer,
};
static struct snd_pcm_ops oxygen_multich_ops = {
.open = oxygen_multich_open,
.close = oxygen_close,
.ioctl = snd_pcm_lib_ioctl,
.hw_params = oxygen_multich_hw_params,
.hw_free = oxygen_hw_free,
.prepare = oxygen_prepare,
.trigger = oxygen_trigger,
.pointer = oxygen_pointer,
};
static struct snd_pcm_ops oxygen_ac97_ops = {
.open = oxygen_ac97_open,
.close = oxygen_close,
.ioctl = snd_pcm_lib_ioctl,
.hw_params = oxygen_hw_params,
.hw_free = oxygen_hw_free,
.prepare = oxygen_prepare,
.trigger = oxygen_trigger,
.pointer = oxygen_pointer,
};
static void oxygen_pcm_free(struct snd_pcm *pcm)
{
snd_pcm_lib_preallocate_free_for_all(pcm);
}
int oxygen_pcm_init(struct oxygen *chip)
{
struct snd_pcm *pcm;
int outs, ins;
int err;
outs = !!(chip->model.device_config & PLAYBACK_0_TO_I2S);
ins = !!(chip->model.device_config & (CAPTURE_0_FROM_I2S_1 |
CAPTURE_0_FROM_I2S_2));
if (outs | ins) {
err = snd_pcm_new(chip->card, "Multichannel",
0, outs, ins, &pcm);
if (err < 0)
return err;
if (outs)
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK,
&oxygen_multich_ops);
if (chip->model.device_config & CAPTURE_0_FROM_I2S_1)
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE,
&oxygen_rec_a_ops);
else if (chip->model.device_config & CAPTURE_0_FROM_I2S_2)
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE,
&oxygen_rec_b_ops);
pcm->private_data = chip;
pcm->private_free = oxygen_pcm_free;
strcpy(pcm->name, "Multichannel");
if (outs)
snd_pcm_lib_preallocate_pages(pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream,
SNDRV_DMA_TYPE_DEV,
snd_dma_pci_data(chip->pci),
DEFAULT_BUFFER_BYTES_MULTICH,
BUFFER_BYTES_MAX_MULTICH);
if (ins)
snd_pcm_lib_preallocate_pages(pcm->streams[SNDRV_PCM_STREAM_CAPTURE].substream,
SNDRV_DMA_TYPE_DEV,
snd_dma_pci_data(chip->pci),
DEFAULT_BUFFER_BYTES,
BUFFER_BYTES_MAX);
}
outs = !!(chip->model.device_config & PLAYBACK_1_TO_SPDIF);
ins = !!(chip->model.device_config & CAPTURE_1_FROM_SPDIF);
if (outs | ins) {
err = snd_pcm_new(chip->card, "Digital", 1, outs, ins, &pcm);
if (err < 0)
return err;
if (outs)
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK,
&oxygen_spdif_ops);
if (ins)
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE,
&oxygen_rec_c_ops);
pcm->private_data = chip;
pcm->private_free = oxygen_pcm_free;
strcpy(pcm->name, "Digital");
snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
snd_dma_pci_data(chip->pci),
DEFAULT_BUFFER_BYTES,
BUFFER_BYTES_MAX);
}
if (chip->has_ac97_1) {
outs = !!(chip->model.device_config & PLAYBACK_2_TO_AC97_1);
ins = !!(chip->model.device_config & CAPTURE_2_FROM_AC97_1);
} else {
outs = 0;
ins = !!(chip->model.device_config & CAPTURE_2_FROM_I2S_2);
}
if (outs | ins) {
err = snd_pcm_new(chip->card, outs ? "AC97" : "Analog2",
2, outs, ins, &pcm);
if (err < 0)
return err;
if (outs) {
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK,
&oxygen_ac97_ops);
oxygen_write8_masked(chip, OXYGEN_REC_ROUTING,
OXYGEN_REC_B_ROUTE_AC97_1,
OXYGEN_REC_B_ROUTE_MASK);
}
if (ins)
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE,
&oxygen_rec_b_ops);
pcm->private_data = chip;
pcm->private_free = oxygen_pcm_free;
strcpy(pcm->name, outs ? "Front Panel" : "Analog 2");
snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
snd_dma_pci_data(chip->pci),
DEFAULT_BUFFER_BYTES,
BUFFER_BYTES_MAX);
}
return 0;
}
| gpl-2.0 |
TEAM-RAZOR-DEVICES/kernel_lge_mako | drivers/pci/pcie/portdrv_bus.c | 9333 | 1231 | /*
* File: portdrv_bus.c
* Purpose: PCI Express Port Bus Driver's Bus Overloading Functions
*
* Copyright (C) 2004 Intel
* Copyright (C) Tom Long Nguyen (tom.l.nguyen@intel.com)
*/
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/pm.h>
#include <linux/pcieport_if.h>
#include "portdrv.h"
static int pcie_port_bus_match(struct device *dev, struct device_driver *drv);
struct bus_type pcie_port_bus_type = {
.name = "pci_express",
.match = pcie_port_bus_match,
};
EXPORT_SYMBOL_GPL(pcie_port_bus_type);
static int pcie_port_bus_match(struct device *dev, struct device_driver *drv)
{
struct pcie_device *pciedev;
struct pcie_port_service_driver *driver;
if (drv->bus != &pcie_port_bus_type || dev->bus != &pcie_port_bus_type)
return 0;
pciedev = to_pcie_device(dev);
driver = to_service_driver(drv);
if (driver->service != pciedev->service)
return 0;
if ((driver->port_type != PCIE_ANY_PORT) &&
(driver->port_type != pciedev->port->pcie_type))
return 0;
return 1;
}
int pcie_port_bus_register(void)
{
return bus_register(&pcie_port_bus_type);
}
void pcie_port_bus_unregister(void)
{
bus_unregister(&pcie_port_bus_type);
}
| gpl-2.0 |
garwynn/SMN900P_MI5_Kernel | fs/fscache/histogram.c | 12661 | 2857 | /* FS-Cache latency histogram
*
* Copyright (C) 2008 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public Licence
* as published by the Free Software Foundation; either version
* 2 of the Licence, or (at your option) any later version.
*/
#define FSCACHE_DEBUG_LEVEL THREAD
#include <linux/module.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include "internal.h"
atomic_t fscache_obj_instantiate_histogram[HZ];
atomic_t fscache_objs_histogram[HZ];
atomic_t fscache_ops_histogram[HZ];
atomic_t fscache_retrieval_delay_histogram[HZ];
atomic_t fscache_retrieval_histogram[HZ];
/*
* display the time-taken histogram
*/
static int fscache_histogram_show(struct seq_file *m, void *v)
{
unsigned long index;
unsigned n[5], t;
switch ((unsigned long) v) {
case 1:
seq_puts(m, "JIFS SECS OBJ INST OP RUNS OBJ RUNS "
" RETRV DLY RETRIEVLS\n");
return 0;
case 2:
seq_puts(m, "===== ===== ========= ========= ========="
" ========= =========\n");
return 0;
default:
index = (unsigned long) v - 3;
n[0] = atomic_read(&fscache_obj_instantiate_histogram[index]);
n[1] = atomic_read(&fscache_ops_histogram[index]);
n[2] = atomic_read(&fscache_objs_histogram[index]);
n[3] = atomic_read(&fscache_retrieval_delay_histogram[index]);
n[4] = atomic_read(&fscache_retrieval_histogram[index]);
if (!(n[0] | n[1] | n[2] | n[3] | n[4]))
return 0;
t = (index * 1000) / HZ;
seq_printf(m, "%4lu 0.%03u %9u %9u %9u %9u %9u\n",
index, t, n[0], n[1], n[2], n[3], n[4]);
return 0;
}
}
/*
* set up the iterator to start reading from the first line
*/
static void *fscache_histogram_start(struct seq_file *m, loff_t *_pos)
{
if ((unsigned long long)*_pos >= HZ + 2)
return NULL;
if (*_pos == 0)
*_pos = 1;
return (void *)(unsigned long) *_pos;
}
/*
* move to the next line
*/
static void *fscache_histogram_next(struct seq_file *m, void *v, loff_t *pos)
{
(*pos)++;
return (unsigned long long)*pos > HZ + 2 ?
NULL : (void *)(unsigned long) *pos;
}
/*
* clean up after reading
*/
static void fscache_histogram_stop(struct seq_file *m, void *v)
{
}
static const struct seq_operations fscache_histogram_ops = {
.start = fscache_histogram_start,
.stop = fscache_histogram_stop,
.next = fscache_histogram_next,
.show = fscache_histogram_show,
};
/*
* open "/proc/fs/fscache/histogram" to provide latency data
*/
static int fscache_histogram_open(struct inode *inode, struct file *file)
{
return seq_open(file, &fscache_histogram_ops);
}
const struct file_operations fscache_histogram_fops = {
.owner = THIS_MODULE,
.open = fscache_histogram_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release,
};
| gpl-2.0 |
cryptickid/android_kernel_oneplus_msm8974 | drivers/video/aty/mach64_gx.c | 14709 | 20884 |
/*
* ATI Mach64 GX Support
*/
#include <linux/delay.h>
#include <linux/fb.h>
#include <asm/io.h>
#include <video/mach64.h>
#include "atyfb.h"
/* Definitions for the ICS 2595 == ATI 18818_1 Clockchip */
#define REF_FREQ_2595 1432 /* 14.33 MHz (exact 14.31818) */
#define REF_DIV_2595 46 /* really 43 on ICS 2595 !!! */
/* ohne Prescaler */
#define MAX_FREQ_2595 15938 /* 159.38 MHz (really 170.486) */
#define MIN_FREQ_2595 8000 /* 80.00 MHz ( 85.565) */
/* mit Prescaler 2, 4, 8 */
#define ABS_MIN_FREQ_2595 1000 /* 10.00 MHz (really 10.697) */
#define N_ADJ_2595 257
#define STOP_BITS_2595 0x1800
#define MIN_N_408 2
#define MIN_N_1703 6
#define MIN_M 2
#define MAX_M 30
#define MIN_N 35
#define MAX_N 255-8
/*
* Support Functions
*/
static void aty_dac_waste4(const struct atyfb_par *par)
{
(void) aty_ld_8(DAC_REGS, par);
(void) aty_ld_8(DAC_REGS + 2, par);
(void) aty_ld_8(DAC_REGS + 2, par);
(void) aty_ld_8(DAC_REGS + 2, par);
(void) aty_ld_8(DAC_REGS + 2, par);
}
static void aty_StrobeClock(const struct atyfb_par *par)
{
u8 tmp;
udelay(26);
tmp = aty_ld_8(CLOCK_CNTL, par);
aty_st_8(CLOCK_CNTL + par->clk_wr_offset, tmp | CLOCK_STROBE, par);
return;
}
/*
* IBM RGB514 DAC and Clock Chip
*/
static void aty_st_514(int offset, u8 val, const struct atyfb_par *par)
{
aty_st_8(DAC_CNTL, 1, par);
/* right addr byte */
aty_st_8(DAC_W_INDEX, offset & 0xff, par);
/* left addr byte */
aty_st_8(DAC_DATA, (offset >> 8) & 0xff, par);
aty_st_8(DAC_MASK, val, par);
aty_st_8(DAC_CNTL, 0, par);
}
static int aty_set_dac_514(const struct fb_info *info,
const union aty_pll *pll, u32 bpp, u32 accel)
{
struct atyfb_par *par = (struct atyfb_par *) info->par;
static struct {
u8 pixel_dly;
u8 misc2_cntl;
u8 pixel_rep;
u8 pixel_cntl_index;
u8 pixel_cntl_v1;
} tab[3] = {
{
0, 0x41, 0x03, 0x71, 0x45}, /* 8 bpp */
{
0, 0x45, 0x04, 0x0c, 0x01}, /* 555 */
{
0, 0x45, 0x06, 0x0e, 0x00}, /* XRGB */
};
int i;
switch (bpp) {
case 8:
default:
i = 0;
break;
case 16:
i = 1;
break;
case 32:
i = 2;
break;
}
aty_st_514(0x90, 0x00, par); /* VRAM Mask Low */
aty_st_514(0x04, tab[i].pixel_dly, par); /* Horizontal Sync Control */
aty_st_514(0x05, 0x00, par); /* Power Management */
aty_st_514(0x02, 0x01, par); /* Misc Clock Control */
aty_st_514(0x71, tab[i].misc2_cntl, par); /* Misc Control 2 */
aty_st_514(0x0a, tab[i].pixel_rep, par); /* Pixel Format */
aty_st_514(tab[i].pixel_cntl_index, tab[i].pixel_cntl_v1, par);
/* Misc Control 2 / 16 BPP Control / 32 BPP Control */
return 0;
}
static int aty_var_to_pll_514(const struct fb_info *info, u32 vclk_per,
u32 bpp, union aty_pll *pll)
{
/*
* FIXME: use real calculations instead of using fixed values from the old
* driver
*/
static struct {
u32 limit; /* pixlock rounding limit (arbitrary) */
u8 m; /* (df<<6) | vco_div_count */
u8 n; /* ref_div_count */
} RGB514_clocks[7] = {
{
8000, (3 << 6) | 20, 9}, /* 7395 ps / 135.2273 MHz */
{
10000, (1 << 6) | 19, 3}, /* 9977 ps / 100.2273 MHz */
{
13000, (1 << 6) | 2, 3}, /* 12509 ps / 79.9432 MHz */
{
14000, (2 << 6) | 8, 7}, /* 13394 ps / 74.6591 MHz */
{
16000, (1 << 6) | 44, 6}, /* 15378 ps / 65.0284 MHz */
{
25000, (1 << 6) | 15, 5}, /* 17460 ps / 57.2727 MHz */
{
50000, (0 << 6) | 53, 7}, /* 33145 ps / 30.1705 MHz */
};
int i;
for (i = 0; i < ARRAY_SIZE(RGB514_clocks); i++)
if (vclk_per <= RGB514_clocks[i].limit) {
pll->ibm514.m = RGB514_clocks[i].m;
pll->ibm514.n = RGB514_clocks[i].n;
return 0;
}
return -EINVAL;
}
static u32 aty_pll_514_to_var(const struct fb_info *info,
const union aty_pll *pll)
{
struct atyfb_par *par = (struct atyfb_par *) info->par;
u8 df, vco_div_count, ref_div_count;
df = pll->ibm514.m >> 6;
vco_div_count = pll->ibm514.m & 0x3f;
ref_div_count = pll->ibm514.n;
return ((par->ref_clk_per * ref_div_count) << (3 - df))/
(vco_div_count + 65);
}
static void aty_set_pll_514(const struct fb_info *info,
const union aty_pll *pll)
{
struct atyfb_par *par = (struct atyfb_par *) info->par;
aty_st_514(0x06, 0x02, par); /* DAC Operation */
aty_st_514(0x10, 0x01, par); /* PLL Control 1 */
aty_st_514(0x70, 0x01, par); /* Misc Control 1 */
aty_st_514(0x8f, 0x1f, par); /* PLL Ref. Divider Input */
aty_st_514(0x03, 0x00, par); /* Sync Control */
aty_st_514(0x05, 0x00, par); /* Power Management */
aty_st_514(0x20, pll->ibm514.m, par); /* F0 / M0 */
aty_st_514(0x21, pll->ibm514.n, par); /* F1 / N0 */
}
const struct aty_dac_ops aty_dac_ibm514 = {
.set_dac = aty_set_dac_514,
};
const struct aty_pll_ops aty_pll_ibm514 = {
.var_to_pll = aty_var_to_pll_514,
.pll_to_var = aty_pll_514_to_var,
.set_pll = aty_set_pll_514,
};
/*
* ATI 68860-B DAC
*/
static int aty_set_dac_ATI68860_B(const struct fb_info *info,
const union aty_pll *pll, u32 bpp,
u32 accel)
{
struct atyfb_par *par = (struct atyfb_par *) info->par;
u32 gModeReg, devSetupRegA, temp, mask;
gModeReg = 0;
devSetupRegA = 0;
switch (bpp) {
case 8:
gModeReg = 0x83;
devSetupRegA =
0x60 | 0x00 /*(info->mach64DAC8Bit ? 0x00 : 0x01) */ ;
break;
case 15:
gModeReg = 0xA0;
devSetupRegA = 0x60;
break;
case 16:
gModeReg = 0xA1;
devSetupRegA = 0x60;
break;
case 24:
gModeReg = 0xC0;
devSetupRegA = 0x60;
break;
case 32:
gModeReg = 0xE3;
devSetupRegA = 0x60;
break;
}
if (!accel) {
gModeReg = 0x80;
devSetupRegA = 0x61;
}
temp = aty_ld_8(DAC_CNTL, par);
aty_st_8(DAC_CNTL, (temp & ~DAC_EXT_SEL_RS2) | DAC_EXT_SEL_RS3,
par);
aty_st_8(DAC_REGS + 2, 0x1D, par);
aty_st_8(DAC_REGS + 3, gModeReg, par);
aty_st_8(DAC_REGS, 0x02, par);
temp = aty_ld_8(DAC_CNTL, par);
aty_st_8(DAC_CNTL, temp | DAC_EXT_SEL_RS2 | DAC_EXT_SEL_RS3, par);
if (info->fix.smem_len < ONE_MB)
mask = 0x04;
else if (info->fix.smem_len == ONE_MB)
mask = 0x08;
else
mask = 0x0C;
/* The following assumes that the BIOS has correctly set R7 of the
* Device Setup Register A at boot time.
*/
#define A860_DELAY_L 0x80
temp = aty_ld_8(DAC_REGS, par);
aty_st_8(DAC_REGS, (devSetupRegA | mask) | (temp & A860_DELAY_L),
par);
temp = aty_ld_8(DAC_CNTL, par);
aty_st_8(DAC_CNTL, (temp & ~(DAC_EXT_SEL_RS2 | DAC_EXT_SEL_RS3)),
par);
aty_st_le32(BUS_CNTL, 0x890e20f1, par);
aty_st_le32(DAC_CNTL, 0x47052100, par);
return 0;
}
const struct aty_dac_ops aty_dac_ati68860b = {
.set_dac = aty_set_dac_ATI68860_B,
};
/*
* AT&T 21C498 DAC
*/
static int aty_set_dac_ATT21C498(const struct fb_info *info,
const union aty_pll *pll, u32 bpp,
u32 accel)
{
struct atyfb_par *par = (struct atyfb_par *) info->par;
u32 dotClock;
int muxmode = 0;
int DACMask = 0;
dotClock = 100000000 / pll->ics2595.period_in_ps;
switch (bpp) {
case 8:
if (dotClock > 8000) {
DACMask = 0x24;
muxmode = 1;
} else
DACMask = 0x04;
break;
case 15:
DACMask = 0x16;
break;
case 16:
DACMask = 0x36;
break;
case 24:
DACMask = 0xE6;
break;
case 32:
DACMask = 0xE6;
break;
}
if (1 /* info->mach64DAC8Bit */ )
DACMask |= 0x02;
aty_dac_waste4(par);
aty_st_8(DAC_REGS + 2, DACMask, par);
aty_st_le32(BUS_CNTL, 0x890e20f1, par);
aty_st_le32(DAC_CNTL, 0x00072000, par);
return muxmode;
}
const struct aty_dac_ops aty_dac_att21c498 = {
.set_dac = aty_set_dac_ATT21C498,
};
/*
* ATI 18818 / ICS 2595 Clock Chip
*/
static int aty_var_to_pll_18818(const struct fb_info *info, u32 vclk_per,
u32 bpp, union aty_pll *pll)
{
u32 MHz100; /* in 0.01 MHz */
u32 program_bits;
u32 post_divider;
/* Calculate the programming word */
MHz100 = 100000000 / vclk_per;
program_bits = -1;
post_divider = 1;
if (MHz100 > MAX_FREQ_2595) {
MHz100 = MAX_FREQ_2595;
return -EINVAL;
} else if (MHz100 < ABS_MIN_FREQ_2595) {
program_bits = 0; /* MHz100 = 257 */
return -EINVAL;
} else {
while (MHz100 < MIN_FREQ_2595) {
MHz100 *= 2;
post_divider *= 2;
}
}
MHz100 *= 1000;
MHz100 = (REF_DIV_2595 * MHz100) / REF_FREQ_2595;
MHz100 += 500; /* + 0.5 round */
MHz100 /= 1000;
if (program_bits == -1) {
program_bits = MHz100 - N_ADJ_2595;
switch (post_divider) {
case 1:
program_bits |= 0x0600;
break;
case 2:
program_bits |= 0x0400;
break;
case 4:
program_bits |= 0x0200;
break;
case 8:
default:
break;
}
}
program_bits |= STOP_BITS_2595;
pll->ics2595.program_bits = program_bits;
pll->ics2595.locationAddr = 0;
pll->ics2595.post_divider = post_divider;
pll->ics2595.period_in_ps = vclk_per;
return 0;
}
static u32 aty_pll_18818_to_var(const struct fb_info *info,
const union aty_pll *pll)
{
return (pll->ics2595.period_in_ps); /* default for now */
}
static void aty_ICS2595_put1bit(u8 data, const struct atyfb_par *par)
{
u8 tmp;
data &= 0x01;
tmp = aty_ld_8(CLOCK_CNTL, par);
aty_st_8(CLOCK_CNTL + par->clk_wr_offset,
(tmp & ~0x04) | (data << 2), par);
tmp = aty_ld_8(CLOCK_CNTL, par);
aty_st_8(CLOCK_CNTL + par->clk_wr_offset, (tmp & ~0x08) | (0 << 3),
par);
aty_StrobeClock(par);
tmp = aty_ld_8(CLOCK_CNTL, par);
aty_st_8(CLOCK_CNTL + par->clk_wr_offset, (tmp & ~0x08) | (1 << 3),
par);
aty_StrobeClock(par);
return;
}
static void aty_set_pll18818(const struct fb_info *info,
const union aty_pll *pll)
{
struct atyfb_par *par = (struct atyfb_par *) info->par;
u32 program_bits;
u32 locationAddr;
u32 i;
u8 old_clock_cntl;
u8 old_crtc_ext_disp;
old_clock_cntl = aty_ld_8(CLOCK_CNTL, par);
aty_st_8(CLOCK_CNTL + par->clk_wr_offset, 0, par);
old_crtc_ext_disp = aty_ld_8(CRTC_GEN_CNTL + 3, par);
aty_st_8(CRTC_GEN_CNTL + 3,
old_crtc_ext_disp | (CRTC_EXT_DISP_EN >> 24), par);
mdelay(15); /* delay for 50 (15) ms */
program_bits = pll->ics2595.program_bits;
locationAddr = pll->ics2595.locationAddr;
/* Program the clock chip */
aty_st_8(CLOCK_CNTL + par->clk_wr_offset, 0, par); /* Strobe = 0 */
aty_StrobeClock(par);
aty_st_8(CLOCK_CNTL + par->clk_wr_offset, 1, par); /* Strobe = 0 */
aty_StrobeClock(par);
aty_ICS2595_put1bit(1, par); /* Send start bits */
aty_ICS2595_put1bit(0, par); /* Start bit */
aty_ICS2595_put1bit(0, par); /* Read / ~Write */
for (i = 0; i < 5; i++) { /* Location 0..4 */
aty_ICS2595_put1bit(locationAddr & 1, par);
locationAddr >>= 1;
}
for (i = 0; i < 8 + 1 + 2 + 2; i++) {
aty_ICS2595_put1bit(program_bits & 1, par);
program_bits >>= 1;
}
mdelay(1); /* delay for 1 ms */
(void) aty_ld_8(DAC_REGS, par); /* Clear DAC Counter */
aty_st_8(CRTC_GEN_CNTL + 3, old_crtc_ext_disp, par);
aty_st_8(CLOCK_CNTL + par->clk_wr_offset,
old_clock_cntl | CLOCK_STROBE, par);
mdelay(50); /* delay for 50 (15) ms */
aty_st_8(CLOCK_CNTL + par->clk_wr_offset,
((pll->ics2595.locationAddr & 0x0F) | CLOCK_STROBE), par);
return;
}
const struct aty_pll_ops aty_pll_ati18818_1 = {
.var_to_pll = aty_var_to_pll_18818,
.pll_to_var = aty_pll_18818_to_var,
.set_pll = aty_set_pll18818,
};
/*
* STG 1703 Clock Chip
*/
static int aty_var_to_pll_1703(const struct fb_info *info, u32 vclk_per,
u32 bpp, union aty_pll *pll)
{
u32 mhz100; /* in 0.01 MHz */
u32 program_bits;
/* u32 post_divider; */
u32 mach64MinFreq, mach64MaxFreq, mach64RefFreq;
u32 temp, tempB;
u16 remainder, preRemainder;
short divider = 0, tempA;
/* Calculate the programming word */
mhz100 = 100000000 / vclk_per;
mach64MinFreq = MIN_FREQ_2595;
mach64MaxFreq = MAX_FREQ_2595;
mach64RefFreq = REF_FREQ_2595; /* 14.32 MHz */
/* Calculate program word */
if (mhz100 == 0)
program_bits = 0xE0;
else {
if (mhz100 < mach64MinFreq)
mhz100 = mach64MinFreq;
if (mhz100 > mach64MaxFreq)
mhz100 = mach64MaxFreq;
divider = 0;
while (mhz100 < (mach64MinFreq << 3)) {
mhz100 <<= 1;
divider += 0x20;
}
temp = (unsigned int) (mhz100);
temp = (unsigned int) (temp * (MIN_N_1703 + 2));
temp -= (short) (mach64RefFreq << 1);
tempA = MIN_N_1703;
preRemainder = 0xffff;
do {
tempB = temp;
remainder = tempB % mach64RefFreq;
tempB = tempB / mach64RefFreq;
if ((tempB & 0xffff) <= 127
&& (remainder <= preRemainder)) {
preRemainder = remainder;
divider &= ~0x1f;
divider |= tempA;
divider =
(divider & 0x00ff) +
((tempB & 0xff) << 8);
}
temp += mhz100;
tempA++;
} while (tempA <= (MIN_N_1703 << 1));
program_bits = divider;
}
pll->ics2595.program_bits = program_bits;
pll->ics2595.locationAddr = 0;
pll->ics2595.post_divider = divider; /* fuer nix */
pll->ics2595.period_in_ps = vclk_per;
return 0;
}
static u32 aty_pll_1703_to_var(const struct fb_info *info,
const union aty_pll *pll)
{
return (pll->ics2595.period_in_ps); /* default for now */
}
static void aty_set_pll_1703(const struct fb_info *info,
const union aty_pll *pll)
{
struct atyfb_par *par = (struct atyfb_par *) info->par;
u32 program_bits;
u32 locationAddr;
char old_crtc_ext_disp;
old_crtc_ext_disp = aty_ld_8(CRTC_GEN_CNTL + 3, par);
aty_st_8(CRTC_GEN_CNTL + 3,
old_crtc_ext_disp | (CRTC_EXT_DISP_EN >> 24), par);
program_bits = pll->ics2595.program_bits;
locationAddr = pll->ics2595.locationAddr;
/* Program clock */
aty_dac_waste4(par);
(void) aty_ld_8(DAC_REGS + 2, par);
aty_st_8(DAC_REGS + 2, (locationAddr << 1) + 0x20, par);
aty_st_8(DAC_REGS + 2, 0, par);
aty_st_8(DAC_REGS + 2, (program_bits & 0xFF00) >> 8, par);
aty_st_8(DAC_REGS + 2, (program_bits & 0xFF), par);
(void) aty_ld_8(DAC_REGS, par); /* Clear DAC Counter */
aty_st_8(CRTC_GEN_CNTL + 3, old_crtc_ext_disp, par);
return;
}
const struct aty_pll_ops aty_pll_stg1703 = {
.var_to_pll = aty_var_to_pll_1703,
.pll_to_var = aty_pll_1703_to_var,
.set_pll = aty_set_pll_1703,
};
/*
* Chrontel 8398 Clock Chip
*/
static int aty_var_to_pll_8398(const struct fb_info *info, u32 vclk_per,
u32 bpp, union aty_pll *pll)
{
u32 tempA, tempB, fOut, longMHz100, diff, preDiff;
u32 mhz100; /* in 0.01 MHz */
u32 program_bits;
/* u32 post_divider; */
u32 mach64MinFreq, mach64MaxFreq, mach64RefFreq;
u16 m, n, k = 0, save_m, save_n, twoToKth;
/* Calculate the programming word */
mhz100 = 100000000 / vclk_per;
mach64MinFreq = MIN_FREQ_2595;
mach64MaxFreq = MAX_FREQ_2595;
mach64RefFreq = REF_FREQ_2595; /* 14.32 MHz */
save_m = 0;
save_n = 0;
/* Calculate program word */
if (mhz100 == 0)
program_bits = 0xE0;
else {
if (mhz100 < mach64MinFreq)
mhz100 = mach64MinFreq;
if (mhz100 > mach64MaxFreq)
mhz100 = mach64MaxFreq;
longMHz100 = mhz100 * 256 / 100; /* 8 bit scale this */
while (mhz100 < (mach64MinFreq << 3)) {
mhz100 <<= 1;
k++;
}
twoToKth = 1 << k;
diff = 0;
preDiff = 0xFFFFFFFF;
for (m = MIN_M; m <= MAX_M; m++) {
for (n = MIN_N; n <= MAX_N; n++) {
tempA = 938356; /* 14.31818 * 65536 */
tempA *= (n + 8); /* 43..256 */
tempB = twoToKth * 256;
tempB *= (m + 2); /* 4..32 */
fOut = tempA / tempB; /* 8 bit scale */
if (longMHz100 > fOut)
diff = longMHz100 - fOut;
else
diff = fOut - longMHz100;
if (diff < preDiff) {
save_m = m;
save_n = n;
preDiff = diff;
}
}
}
program_bits = (k << 6) + (save_m) + (save_n << 8);
}
pll->ics2595.program_bits = program_bits;
pll->ics2595.locationAddr = 0;
pll->ics2595.post_divider = 0;
pll->ics2595.period_in_ps = vclk_per;
return 0;
}
static u32 aty_pll_8398_to_var(const struct fb_info *info,
const union aty_pll *pll)
{
return (pll->ics2595.period_in_ps); /* default for now */
}
static void aty_set_pll_8398(const struct fb_info *info,
const union aty_pll *pll)
{
struct atyfb_par *par = (struct atyfb_par *) info->par;
u32 program_bits;
u32 locationAddr;
char old_crtc_ext_disp;
char tmp;
old_crtc_ext_disp = aty_ld_8(CRTC_GEN_CNTL + 3, par);
aty_st_8(CRTC_GEN_CNTL + 3,
old_crtc_ext_disp | (CRTC_EXT_DISP_EN >> 24), par);
program_bits = pll->ics2595.program_bits;
locationAddr = pll->ics2595.locationAddr;
/* Program clock */
tmp = aty_ld_8(DAC_CNTL, par);
aty_st_8(DAC_CNTL, tmp | DAC_EXT_SEL_RS2 | DAC_EXT_SEL_RS3, par);
aty_st_8(DAC_REGS, locationAddr, par);
aty_st_8(DAC_REGS + 1, (program_bits & 0xff00) >> 8, par);
aty_st_8(DAC_REGS + 1, (program_bits & 0xff), par);
tmp = aty_ld_8(DAC_CNTL, par);
aty_st_8(DAC_CNTL, (tmp & ~DAC_EXT_SEL_RS2) | DAC_EXT_SEL_RS3,
par);
(void) aty_ld_8(DAC_REGS, par); /* Clear DAC Counter */
aty_st_8(CRTC_GEN_CNTL + 3, old_crtc_ext_disp, par);
return;
}
const struct aty_pll_ops aty_pll_ch8398 = {
.var_to_pll = aty_var_to_pll_8398,
.pll_to_var = aty_pll_8398_to_var,
.set_pll = aty_set_pll_8398,
};
/*
* AT&T 20C408 Clock Chip
*/
static int aty_var_to_pll_408(const struct fb_info *info, u32 vclk_per,
u32 bpp, union aty_pll *pll)
{
u32 mhz100; /* in 0.01 MHz */
u32 program_bits;
/* u32 post_divider; */
u32 mach64MinFreq, mach64MaxFreq, mach64RefFreq;
u32 temp, tempB;
u16 remainder, preRemainder;
short divider = 0, tempA;
/* Calculate the programming word */
mhz100 = 100000000 / vclk_per;
mach64MinFreq = MIN_FREQ_2595;
mach64MaxFreq = MAX_FREQ_2595;
mach64RefFreq = REF_FREQ_2595; /* 14.32 MHz */
/* Calculate program word */
if (mhz100 == 0)
program_bits = 0xFF;
else {
if (mhz100 < mach64MinFreq)
mhz100 = mach64MinFreq;
if (mhz100 > mach64MaxFreq)
mhz100 = mach64MaxFreq;
while (mhz100 < (mach64MinFreq << 3)) {
mhz100 <<= 1;
divider += 0x40;
}
temp = (unsigned int) mhz100;
temp = (unsigned int) (temp * (MIN_N_408 + 2));
temp -= ((short) (mach64RefFreq << 1));
tempA = MIN_N_408;
preRemainder = 0xFFFF;
do {
tempB = temp;
remainder = tempB % mach64RefFreq;
tempB = tempB / mach64RefFreq;
if (((tempB & 0xFFFF) <= 255)
&& (remainder <= preRemainder)) {
preRemainder = remainder;
divider &= ~0x3f;
divider |= tempA;
divider =
(divider & 0x00FF) +
((tempB & 0xFF) << 8);
}
temp += mhz100;
tempA++;
} while (tempA <= 32);
program_bits = divider;
}
pll->ics2595.program_bits = program_bits;
pll->ics2595.locationAddr = 0;
pll->ics2595.post_divider = divider; /* fuer nix */
pll->ics2595.period_in_ps = vclk_per;
return 0;
}
static u32 aty_pll_408_to_var(const struct fb_info *info,
const union aty_pll *pll)
{
return (pll->ics2595.period_in_ps); /* default for now */
}
static void aty_set_pll_408(const struct fb_info *info,
const union aty_pll *pll)
{
struct atyfb_par *par = (struct atyfb_par *) info->par;
u32 program_bits;
u32 locationAddr;
u8 tmpA, tmpB, tmpC;
char old_crtc_ext_disp;
old_crtc_ext_disp = aty_ld_8(CRTC_GEN_CNTL + 3, par);
aty_st_8(CRTC_GEN_CNTL + 3,
old_crtc_ext_disp | (CRTC_EXT_DISP_EN >> 24), par);
program_bits = pll->ics2595.program_bits;
locationAddr = pll->ics2595.locationAddr;
/* Program clock */
aty_dac_waste4(par);
tmpB = aty_ld_8(DAC_REGS + 2, par) | 1;
aty_dac_waste4(par);
aty_st_8(DAC_REGS + 2, tmpB, par);
tmpA = tmpB;
tmpC = tmpA;
tmpA |= 8;
tmpB = 1;
aty_st_8(DAC_REGS, tmpB, par);
aty_st_8(DAC_REGS + 2, tmpA, par);
udelay(400); /* delay for 400 us */
locationAddr = (locationAddr << 2) + 0x40;
tmpB = locationAddr;
tmpA = program_bits >> 8;
aty_st_8(DAC_REGS, tmpB, par);
aty_st_8(DAC_REGS + 2, tmpA, par);
tmpB = locationAddr + 1;
tmpA = (u8) program_bits;
aty_st_8(DAC_REGS, tmpB, par);
aty_st_8(DAC_REGS + 2, tmpA, par);
tmpB = locationAddr + 2;
tmpA = 0x77;
aty_st_8(DAC_REGS, tmpB, par);
aty_st_8(DAC_REGS + 2, tmpA, par);
udelay(400); /* delay for 400 us */
tmpA = tmpC & (~(1 | 8));
tmpB = 1;
aty_st_8(DAC_REGS, tmpB, par);
aty_st_8(DAC_REGS + 2, tmpA, par);
(void) aty_ld_8(DAC_REGS, par); /* Clear DAC Counter */
aty_st_8(CRTC_GEN_CNTL + 3, old_crtc_ext_disp, par);
return;
}
const struct aty_pll_ops aty_pll_att20c408 = {
.var_to_pll = aty_var_to_pll_408,
.pll_to_var = aty_pll_408_to_var,
.set_pll = aty_set_pll_408,
};
/*
* Unsupported DAC and Clock Chip
*/
static int aty_set_dac_unsupported(const struct fb_info *info,
const union aty_pll *pll, u32 bpp,
u32 accel)
{
struct atyfb_par *par = (struct atyfb_par *) info->par;
aty_st_le32(BUS_CNTL, 0x890e20f1, par);
aty_st_le32(DAC_CNTL, 0x47052100, par);
/* new in 2.2.3p1 from Geert. ???????? */
aty_st_le32(BUS_CNTL, 0x590e10ff, par);
aty_st_le32(DAC_CNTL, 0x47012100, par);
return 0;
}
static int dummy(void)
{
return 0;
}
const struct aty_dac_ops aty_dac_unsupported = {
.set_dac = aty_set_dac_unsupported,
};
const struct aty_pll_ops aty_pll_unsupported = {
.var_to_pll = (void *) dummy,
.pll_to_var = (void *) dummy,
.set_pll = (void *) dummy,
};
| gpl-2.0 |
AxFab/newlib | newlib/libm/machine/aarch64/sf_trunc.c | 118 | 1626 | /* sf_trunc.c -- define truncf
Copyright (c) 2011, 2012 ARM Ltd. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. The name of the company may not be used to endorse or promote
products derived from this software without specific prior written
permission.
THIS SOFTWARE IS PROVIDED BY ARM LTD ``AS IS'' AND ANY EXPRESS OR IMPLIED
WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
IN NO EVENT SHALL ARM LTD BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */
#include <math.h>
float
truncf (float x)
{
float result;
asm ("frintz\t%s0, %s1" : "=w" (result) : "w" (x));
return result;
}
| gpl-2.0 |
fards/ainol_elfii_kernel | arch/arm/mach-omap2/clock34xx.c | 886 | 4195 | /*
* OMAP3-specific clock framework functions
*
* Copyright (C) 2007-2008 Texas Instruments, Inc.
* Copyright (C) 2007-2010 Nokia Corporation
*
* Paul Walmsley
* Jouni Högander
*
* Parts of this code are based on code written by
* Richard Woodruff, Tony Lindgren, Tuukka Tikkanen, Karthik Dasu,
* Russell King
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#undef DEBUG
#include <linux/kernel.h>
#include <linux/clk.h>
#include <linux/io.h>
#include <plat/clock.h>
#include "clock.h"
#include "clock34xx.h"
#include "cm.h"
#include "cm-regbits-34xx.h"
/**
* omap3430es2_clk_ssi_find_idlest - return CM_IDLEST info for SSI
* @clk: struct clk * being enabled
* @idlest_reg: void __iomem ** to store CM_IDLEST reg address into
* @idlest_bit: pointer to a u8 to store the CM_IDLEST bit shift into
* @idlest_val: pointer to a u8 to store the CM_IDLEST indicator
*
* The OMAP3430ES2 SSI target CM_IDLEST bit is at a different shift
* from the CM_{I,F}CLKEN bit. Pass back the correct info via
* @idlest_reg and @idlest_bit. No return value.
*/
static void omap3430es2_clk_ssi_find_idlest(struct clk *clk,
void __iomem **idlest_reg,
u8 *idlest_bit,
u8 *idlest_val)
{
u32 r;
r = (((__force u32)clk->enable_reg & ~0xf0) | 0x20);
*idlest_reg = (__force void __iomem *)r;
*idlest_bit = OMAP3430ES2_ST_SSI_IDLE_SHIFT;
*idlest_val = OMAP34XX_CM_IDLEST_VAL;
}
const struct clkops clkops_omap3430es2_ssi_wait = {
.enable = omap2_dflt_clk_enable,
.disable = omap2_dflt_clk_disable,
.find_idlest = omap3430es2_clk_ssi_find_idlest,
.find_companion = omap2_clk_dflt_find_companion,
};
/**
* omap3430es2_clk_dss_usbhost_find_idlest - CM_IDLEST info for DSS, USBHOST
* @clk: struct clk * being enabled
* @idlest_reg: void __iomem ** to store CM_IDLEST reg address into
* @idlest_bit: pointer to a u8 to store the CM_IDLEST bit shift into
* @idlest_val: pointer to a u8 to store the CM_IDLEST indicator
*
* Some OMAP modules on OMAP3 ES2+ chips have both initiator and
* target IDLEST bits. For our purposes, we are concerned with the
* target IDLEST bits, which exist at a different bit position than
* the *CLKEN bit position for these modules (DSS and USBHOST) (The
* default find_idlest code assumes that they are at the same
* position.) No return value.
*/
static void omap3430es2_clk_dss_usbhost_find_idlest(struct clk *clk,
void __iomem **idlest_reg,
u8 *idlest_bit,
u8 *idlest_val)
{
u32 r;
r = (((__force u32)clk->enable_reg & ~0xf0) | 0x20);
*idlest_reg = (__force void __iomem *)r;
/* USBHOST_IDLE has same shift */
*idlest_bit = OMAP3430ES2_ST_DSS_IDLE_SHIFT;
*idlest_val = OMAP34XX_CM_IDLEST_VAL;
}
const struct clkops clkops_omap3430es2_dss_usbhost_wait = {
.enable = omap2_dflt_clk_enable,
.disable = omap2_dflt_clk_disable,
.find_idlest = omap3430es2_clk_dss_usbhost_find_idlest,
.find_companion = omap2_clk_dflt_find_companion,
};
/**
* omap3430es2_clk_hsotgusb_find_idlest - return CM_IDLEST info for HSOTGUSB
* @clk: struct clk * being enabled
* @idlest_reg: void __iomem ** to store CM_IDLEST reg address into
* @idlest_bit: pointer to a u8 to store the CM_IDLEST bit shift into
* @idlest_val: pointer to a u8 to store the CM_IDLEST indicator
*
* The OMAP3430ES2 HSOTGUSB target CM_IDLEST bit is at a different
* shift from the CM_{I,F}CLKEN bit. Pass back the correct info via
* @idlest_reg and @idlest_bit. No return value.
*/
static void omap3430es2_clk_hsotgusb_find_idlest(struct clk *clk,
void __iomem **idlest_reg,
u8 *idlest_bit,
u8 *idlest_val)
{
u32 r;
r = (((__force u32)clk->enable_reg & ~0xf0) | 0x20);
*idlest_reg = (__force void __iomem *)r;
*idlest_bit = OMAP3430ES2_ST_HSOTGUSB_IDLE_SHIFT;
*idlest_val = OMAP34XX_CM_IDLEST_VAL;
}
const struct clkops clkops_omap3430es2_hsotgusb_wait = {
.enable = omap2_dflt_clk_enable,
.disable = omap2_dflt_clk_disable,
.find_idlest = omap3430es2_clk_hsotgusb_find_idlest,
.find_companion = omap2_clk_dflt_find_companion,
};
| gpl-2.0 |
jumpnow/linux | tools/testing/selftests/futex/functional/futex_wait_uninitialized_heap.c | 886 | 2738 | /******************************************************************************
*
* Copyright FUJITSU LIMITED 2010
* Copyright KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* DESCRIPTION
* Wait on uninitialized heap. It shold be zero and FUTEX_WAIT should
* return immediately. This test is intent to test zero page handling in
* futex.
*
* AUTHOR
* KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
*
* HISTORY
* 2010-Jan-6: Initial version by KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
*
*****************************************************************************/
#include <pthread.h>
#include <stdio.h>
#include <stdlib.h>
#include <sys/mman.h>
#include <syscall.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <unistd.h>
#include <errno.h>
#include <linux/futex.h>
#include <libgen.h>
#include "logging.h"
#include "futextest.h"
#define WAIT_US 5000000
static int child_blocked = 1;
static int child_ret;
void *buf;
void usage(char *prog)
{
printf("Usage: %s\n", prog);
printf(" -c Use color\n");
printf(" -h Display this help message\n");
printf(" -v L Verbosity level: %d=QUIET %d=CRITICAL %d=INFO\n",
VQUIET, VCRITICAL, VINFO);
}
void *wait_thread(void *arg)
{
int res;
child_ret = RET_PASS;
res = futex_wait(buf, 1, NULL, 0);
child_blocked = 0;
if (res != 0 && errno != EWOULDBLOCK) {
error("futex failure\n", errno);
child_ret = RET_ERROR;
}
pthread_exit(NULL);
}
int main(int argc, char **argv)
{
int c, ret = RET_PASS;
long page_size;
pthread_t thr;
while ((c = getopt(argc, argv, "chv:")) != -1) {
switch (c) {
case 'c':
log_color(1);
break;
case 'h':
usage(basename(argv[0]));
exit(0);
case 'v':
log_verbosity(atoi(optarg));
break;
default:
usage(basename(argv[0]));
exit(1);
}
}
page_size = sysconf(_SC_PAGESIZE);
buf = mmap(NULL, page_size, PROT_READ|PROT_WRITE,
MAP_PRIVATE|MAP_ANONYMOUS, 0, 0);
if (buf == (void *)-1) {
error("mmap\n", errno);
exit(1);
}
printf("%s: Test the uninitialized futex value in FUTEX_WAIT\n",
basename(argv[0]));
ret = pthread_create(&thr, NULL, wait_thread, NULL);
if (ret) {
error("pthread_create\n", errno);
ret = RET_ERROR;
goto out;
}
info("waiting %dus for child to return\n", WAIT_US);
usleep(WAIT_US);
ret = child_ret;
if (child_blocked) {
fail("child blocked in kernel\n");
ret = RET_FAIL;
}
out:
print_result(ret);
return ret;
}
| gpl-2.0 |
Pure4Team/desire820 | drivers/char/hw_random/bcm2835-rng.c | 2422 | 2754 | /**
* Copyright (c) 2010-2012 Broadcom. All rights reserved.
* Copyright (c) 2013 Lubomir Rintel
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License ("GPL")
* version 2, as published by the Free Software Foundation.
*/
#include <linux/hw_random.h>
#include <linux/init.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of_address.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/printk.h>
#define RNG_CTRL 0x0
#define RNG_STATUS 0x4
#define RNG_DATA 0x8
/* enable rng */
#define RNG_RBGEN 0x1
/* the initial numbers generated are "less random" so will be discarded */
#define RNG_WARMUP_COUNT 0x40000
static int bcm2835_rng_read(struct hwrng *rng, void *buf, size_t max,
bool wait)
{
void __iomem *rng_base = (void __iomem *)rng->priv;
while ((__raw_readl(rng_base + RNG_STATUS) >> 24) == 0) {
if (!wait)
return 0;
cpu_relax();
}
*(u32 *)buf = __raw_readl(rng_base + RNG_DATA);
return sizeof(u32);
}
static struct hwrng bcm2835_rng_ops = {
.name = "bcm2835",
.read = bcm2835_rng_read,
};
static int bcm2835_rng_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
void __iomem *rng_base;
int err;
/* map peripheral */
rng_base = of_iomap(np, 0);
if (!rng_base) {
dev_err(dev, "failed to remap rng regs");
return -ENODEV;
}
bcm2835_rng_ops.priv = (unsigned long)rng_base;
/* register driver */
err = hwrng_register(&bcm2835_rng_ops);
if (err) {
dev_err(dev, "hwrng registration failed\n");
iounmap(rng_base);
} else {
dev_info(dev, "hwrng registered\n");
/* set warm-up count & enable */
__raw_writel(RNG_WARMUP_COUNT, rng_base + RNG_STATUS);
__raw_writel(RNG_RBGEN, rng_base + RNG_CTRL);
}
return err;
}
static int bcm2835_rng_remove(struct platform_device *pdev)
{
void __iomem *rng_base = (void __iomem *)bcm2835_rng_ops.priv;
/* disable rng hardware */
__raw_writel(0, rng_base + RNG_CTRL);
/* unregister driver */
hwrng_unregister(&bcm2835_rng_ops);
iounmap(rng_base);
return 0;
}
static const struct of_device_id bcm2835_rng_of_match[] = {
{ .compatible = "brcm,bcm2835-rng", },
{},
};
MODULE_DEVICE_TABLE(of, bcm2835_rng_of_match);
static struct platform_driver bcm2835_rng_driver = {
.driver = {
.name = "bcm2835-rng",
.owner = THIS_MODULE,
.of_match_table = bcm2835_rng_of_match,
},
.probe = bcm2835_rng_probe,
.remove = bcm2835_rng_remove,
};
module_platform_driver(bcm2835_rng_driver);
MODULE_AUTHOR("Lubomir Rintel <lkundrak@v3.sk>");
MODULE_DESCRIPTION("BCM2835 Random Number Generator (RNG) driver");
MODULE_LICENSE("GPL v2");
| gpl-2.0 |
WarheadsSE/OX820-3.1-Linux | arch/arm/mach-s3c2410/mach-bast.c | 2678 | 15694 | /* linux/arch/arm/mach-s3c2410/mach-bast.c
*
* Copyright 2003-2008 Simtec Electronics
* Ben Dooks <ben@simtec.co.uk>
*
* http://www.simtec.co.uk/products/EB2410ITX/
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/interrupt.h>
#include <linux/list.h>
#include <linux/timer.h>
#include <linux/init.h>
#include <linux/gpio.h>
#include <linux/syscore_ops.h>
#include <linux/serial_core.h>
#include <linux/platform_device.h>
#include <linux/dm9000.h>
#include <linux/ata_platform.h>
#include <linux/i2c.h>
#include <linux/io.h>
#include <net/ax88796.h>
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
#include <asm/mach/irq.h>
#include <mach/bast-map.h>
#include <mach/bast-irq.h>
#include <mach/bast-cpld.h>
#include <mach/hardware.h>
#include <asm/irq.h>
#include <asm/mach-types.h>
//#include <asm/debug-ll.h>
#include <plat/regs-serial.h>
#include <mach/regs-gpio.h>
#include <mach/regs-mem.h>
#include <mach/regs-lcd.h>
#include <plat/hwmon.h>
#include <plat/nand.h>
#include <plat/iic.h>
#include <mach/fb.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/nand.h>
#include <linux/mtd/nand_ecc.h>
#include <linux/mtd/partitions.h>
#include <linux/serial_8250.h>
#include <plat/clock.h>
#include <plat/devs.h>
#include <plat/cpu.h>
#include <plat/cpu-freq.h>
#include <plat/gpio-cfg.h>
#include <plat/audio-simtec.h>
#include "usb-simtec.h"
#include "nor-simtec.h"
#define COPYRIGHT ", Copyright 2004-2008 Simtec Electronics"
/* macros for virtual address mods for the io space entries */
#define VA_C5(item) ((unsigned long)(item) + BAST_VAM_CS5)
#define VA_C4(item) ((unsigned long)(item) + BAST_VAM_CS4)
#define VA_C3(item) ((unsigned long)(item) + BAST_VAM_CS3)
#define VA_C2(item) ((unsigned long)(item) + BAST_VAM_CS2)
/* macros to modify the physical addresses for io space */
#define PA_CS2(item) (__phys_to_pfn((item) + S3C2410_CS2))
#define PA_CS3(item) (__phys_to_pfn((item) + S3C2410_CS3))
#define PA_CS4(item) (__phys_to_pfn((item) + S3C2410_CS4))
#define PA_CS5(item) (__phys_to_pfn((item) + S3C2410_CS5))
static struct map_desc bast_iodesc[] __initdata = {
/* ISA IO areas */
{
.virtual = (u32)S3C24XX_VA_ISA_BYTE,
.pfn = PA_CS2(BAST_PA_ISAIO),
.length = SZ_16M,
.type = MT_DEVICE,
}, {
.virtual = (u32)S3C24XX_VA_ISA_WORD,
.pfn = PA_CS3(BAST_PA_ISAIO),
.length = SZ_16M,
.type = MT_DEVICE,
},
/* bast CPLD control registers, and external interrupt controls */
{
.virtual = (u32)BAST_VA_CTRL1,
.pfn = __phys_to_pfn(BAST_PA_CTRL1),
.length = SZ_1M,
.type = MT_DEVICE,
}, {
.virtual = (u32)BAST_VA_CTRL2,
.pfn = __phys_to_pfn(BAST_PA_CTRL2),
.length = SZ_1M,
.type = MT_DEVICE,
}, {
.virtual = (u32)BAST_VA_CTRL3,
.pfn = __phys_to_pfn(BAST_PA_CTRL3),
.length = SZ_1M,
.type = MT_DEVICE,
}, {
.virtual = (u32)BAST_VA_CTRL4,
.pfn = __phys_to_pfn(BAST_PA_CTRL4),
.length = SZ_1M,
.type = MT_DEVICE,
},
/* PC104 IRQ mux */
{
.virtual = (u32)BAST_VA_PC104_IRQREQ,
.pfn = __phys_to_pfn(BAST_PA_PC104_IRQREQ),
.length = SZ_1M,
.type = MT_DEVICE,
}, {
.virtual = (u32)BAST_VA_PC104_IRQRAW,
.pfn = __phys_to_pfn(BAST_PA_PC104_IRQRAW),
.length = SZ_1M,
.type = MT_DEVICE,
}, {
.virtual = (u32)BAST_VA_PC104_IRQMASK,
.pfn = __phys_to_pfn(BAST_PA_PC104_IRQMASK),
.length = SZ_1M,
.type = MT_DEVICE,
},
/* peripheral space... one for each of fast/slow/byte/16bit */
/* note, ide is only decoded in word space, even though some registers
* are only 8bit */
/* slow, byte */
{ VA_C2(BAST_VA_ISAIO), PA_CS2(BAST_PA_ISAIO), SZ_16M, MT_DEVICE },
{ VA_C2(BAST_VA_ISAMEM), PA_CS2(BAST_PA_ISAMEM), SZ_16M, MT_DEVICE },
{ VA_C2(BAST_VA_SUPERIO), PA_CS2(BAST_PA_SUPERIO), SZ_1M, MT_DEVICE },
/* slow, word */
{ VA_C3(BAST_VA_ISAIO), PA_CS3(BAST_PA_ISAIO), SZ_16M, MT_DEVICE },
{ VA_C3(BAST_VA_ISAMEM), PA_CS3(BAST_PA_ISAMEM), SZ_16M, MT_DEVICE },
{ VA_C3(BAST_VA_SUPERIO), PA_CS3(BAST_PA_SUPERIO), SZ_1M, MT_DEVICE },
/* fast, byte */
{ VA_C4(BAST_VA_ISAIO), PA_CS4(BAST_PA_ISAIO), SZ_16M, MT_DEVICE },
{ VA_C4(BAST_VA_ISAMEM), PA_CS4(BAST_PA_ISAMEM), SZ_16M, MT_DEVICE },
{ VA_C4(BAST_VA_SUPERIO), PA_CS4(BAST_PA_SUPERIO), SZ_1M, MT_DEVICE },
/* fast, word */
{ VA_C5(BAST_VA_ISAIO), PA_CS5(BAST_PA_ISAIO), SZ_16M, MT_DEVICE },
{ VA_C5(BAST_VA_ISAMEM), PA_CS5(BAST_PA_ISAMEM), SZ_16M, MT_DEVICE },
{ VA_C5(BAST_VA_SUPERIO), PA_CS5(BAST_PA_SUPERIO), SZ_1M, MT_DEVICE },
};
#define UCON S3C2410_UCON_DEFAULT | S3C2410_UCON_UCLK
#define ULCON S3C2410_LCON_CS8 | S3C2410_LCON_PNONE | S3C2410_LCON_STOPB
#define UFCON S3C2410_UFCON_RXTRIG8 | S3C2410_UFCON_FIFOMODE
static struct s3c24xx_uart_clksrc bast_serial_clocks[] = {
[0] = {
.name = "uclk",
.divisor = 1,
.min_baud = 0,
.max_baud = 0,
},
[1] = {
.name = "pclk",
.divisor = 1,
.min_baud = 0,
.max_baud = 0,
}
};
static struct s3c2410_uartcfg bast_uartcfgs[] __initdata = {
[0] = {
.hwport = 0,
.flags = 0,
.ucon = UCON,
.ulcon = ULCON,
.ufcon = UFCON,
.clocks = bast_serial_clocks,
.clocks_size = ARRAY_SIZE(bast_serial_clocks),
},
[1] = {
.hwport = 1,
.flags = 0,
.ucon = UCON,
.ulcon = ULCON,
.ufcon = UFCON,
.clocks = bast_serial_clocks,
.clocks_size = ARRAY_SIZE(bast_serial_clocks),
},
/* port 2 is not actually used */
[2] = {
.hwport = 2,
.flags = 0,
.ucon = UCON,
.ulcon = ULCON,
.ufcon = UFCON,
.clocks = bast_serial_clocks,
.clocks_size = ARRAY_SIZE(bast_serial_clocks),
}
};
/* NAND Flash on BAST board */
#ifdef CONFIG_PM
static int bast_pm_suspend(void)
{
/* ensure that an nRESET is not generated on resume. */
gpio_direction_output(S3C2410_GPA(21), 1);
return 0;
}
static void bast_pm_resume(void)
{
s3c_gpio_cfgpin(S3C2410_GPA(21), S3C2410_GPA21_nRSTOUT);
}
#else
#define bast_pm_suspend NULL
#define bast_pm_resume NULL
#endif
static struct syscore_ops bast_pm_syscore_ops = {
.suspend = bast_pm_suspend,
.resume = bast_pm_resume,
};
static int smartmedia_map[] = { 0 };
static int chip0_map[] = { 1 };
static int chip1_map[] = { 2 };
static int chip2_map[] = { 3 };
static struct mtd_partition __initdata bast_default_nand_part[] = {
[0] = {
.name = "Boot Agent",
.size = SZ_16K,
.offset = 0,
},
[1] = {
.name = "/boot",
.size = SZ_4M - SZ_16K,
.offset = SZ_16K,
},
[2] = {
.name = "user",
.offset = SZ_4M,
.size = MTDPART_SIZ_FULL,
}
};
/* the bast has 4 selectable slots for nand-flash, the three
* on-board chip areas, as well as the external SmartMedia
* slot.
*
* Note, there is no current hot-plug support for the SmartMedia
* socket.
*/
static struct s3c2410_nand_set __initdata bast_nand_sets[] = {
[0] = {
.name = "SmartMedia",
.nr_chips = 1,
.nr_map = smartmedia_map,
.options = NAND_SCAN_SILENT_NODEV,
.nr_partitions = ARRAY_SIZE(bast_default_nand_part),
.partitions = bast_default_nand_part,
},
[1] = {
.name = "chip0",
.nr_chips = 1,
.nr_map = chip0_map,
.nr_partitions = ARRAY_SIZE(bast_default_nand_part),
.partitions = bast_default_nand_part,
},
[2] = {
.name = "chip1",
.nr_chips = 1,
.nr_map = chip1_map,
.options = NAND_SCAN_SILENT_NODEV,
.nr_partitions = ARRAY_SIZE(bast_default_nand_part),
.partitions = bast_default_nand_part,
},
[3] = {
.name = "chip2",
.nr_chips = 1,
.nr_map = chip2_map,
.options = NAND_SCAN_SILENT_NODEV,
.nr_partitions = ARRAY_SIZE(bast_default_nand_part),
.partitions = bast_default_nand_part,
}
};
static void bast_nand_select(struct s3c2410_nand_set *set, int slot)
{
unsigned int tmp;
slot = set->nr_map[slot] & 3;
pr_debug("bast_nand: selecting slot %d (set %p,%p)\n",
slot, set, set->nr_map);
tmp = __raw_readb(BAST_VA_CTRL2);
tmp &= BAST_CPLD_CTLR2_IDERST;
tmp |= slot;
tmp |= BAST_CPLD_CTRL2_WNAND;
pr_debug("bast_nand: ctrl2 now %02x\n", tmp);
__raw_writeb(tmp, BAST_VA_CTRL2);
}
static struct s3c2410_platform_nand __initdata bast_nand_info = {
.tacls = 30,
.twrph0 = 60,
.twrph1 = 60,
.nr_sets = ARRAY_SIZE(bast_nand_sets),
.sets = bast_nand_sets,
.select_chip = bast_nand_select,
};
/* DM9000 */
static struct resource bast_dm9k_resource[] = {
[0] = {
.start = S3C2410_CS5 + BAST_PA_DM9000,
.end = S3C2410_CS5 + BAST_PA_DM9000 + 3,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = S3C2410_CS5 + BAST_PA_DM9000 + 0x40,
.end = S3C2410_CS5 + BAST_PA_DM9000 + 0x40 + 0x3f,
.flags = IORESOURCE_MEM,
},
[2] = {
.start = IRQ_DM9000,
.end = IRQ_DM9000,
.flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL,
}
};
/* for the moment we limit ourselves to 16bit IO until some
* better IO routines can be written and tested
*/
static struct dm9000_plat_data bast_dm9k_platdata = {
.flags = DM9000_PLATF_16BITONLY,
};
static struct platform_device bast_device_dm9k = {
.name = "dm9000",
.id = 0,
.num_resources = ARRAY_SIZE(bast_dm9k_resource),
.resource = bast_dm9k_resource,
.dev = {
.platform_data = &bast_dm9k_platdata,
}
};
/* serial devices */
#define SERIAL_BASE (S3C2410_CS2 + BAST_PA_SUPERIO)
#define SERIAL_FLAGS (UPF_BOOT_AUTOCONF | UPF_IOREMAP | UPF_SHARE_IRQ)
#define SERIAL_CLK (1843200)
static struct plat_serial8250_port bast_sio_data[] = {
[0] = {
.mapbase = SERIAL_BASE + 0x2f8,
.irq = IRQ_PCSERIAL1,
.flags = SERIAL_FLAGS,
.iotype = UPIO_MEM,
.regshift = 0,
.uartclk = SERIAL_CLK,
},
[1] = {
.mapbase = SERIAL_BASE + 0x3f8,
.irq = IRQ_PCSERIAL2,
.flags = SERIAL_FLAGS,
.iotype = UPIO_MEM,
.regshift = 0,
.uartclk = SERIAL_CLK,
},
{ }
};
static struct platform_device bast_sio = {
.name = "serial8250",
.id = PLAT8250_DEV_PLATFORM,
.dev = {
.platform_data = &bast_sio_data,
},
};
/* we have devices on the bus which cannot work much over the
* standard 100KHz i2c bus frequency
*/
static struct s3c2410_platform_i2c __initdata bast_i2c_info = {
.flags = 0,
.slave_addr = 0x10,
.frequency = 100*1000,
};
/* Asix AX88796 10/100 ethernet controller */
static struct ax_plat_data bast_asix_platdata = {
.flags = AXFLG_MAC_FROMDEV,
.wordlength = 2,
.dcr_val = 0x48,
.rcr_val = 0x40,
};
static struct resource bast_asix_resource[] = {
[0] = {
.start = S3C2410_CS5 + BAST_PA_ASIXNET,
.end = S3C2410_CS5 + BAST_PA_ASIXNET + (0x18 * 0x20) - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = S3C2410_CS5 + BAST_PA_ASIXNET + (0x1f * 0x20),
.end = S3C2410_CS5 + BAST_PA_ASIXNET + (0x1f * 0x20),
.flags = IORESOURCE_MEM,
},
[2] = {
.start = IRQ_ASIX,
.end = IRQ_ASIX,
.flags = IORESOURCE_IRQ
}
};
static struct platform_device bast_device_asix = {
.name = "ax88796",
.id = 0,
.num_resources = ARRAY_SIZE(bast_asix_resource),
.resource = bast_asix_resource,
.dev = {
.platform_data = &bast_asix_platdata
}
};
/* Asix AX88796 10/100 ethernet controller parallel port */
static struct resource bast_asixpp_resource[] = {
[0] = {
.start = S3C2410_CS5 + BAST_PA_ASIXNET + (0x18 * 0x20),
.end = S3C2410_CS5 + BAST_PA_ASIXNET + (0x1b * 0x20) - 1,
.flags = IORESOURCE_MEM,
}
};
static struct platform_device bast_device_axpp = {
.name = "ax88796-pp",
.id = 0,
.num_resources = ARRAY_SIZE(bast_asixpp_resource),
.resource = bast_asixpp_resource,
};
/* LCD/VGA controller */
static struct s3c2410fb_display __initdata bast_lcd_info[] = {
{
.type = S3C2410_LCDCON1_TFT,
.width = 640,
.height = 480,
.pixclock = 33333,
.xres = 640,
.yres = 480,
.bpp = 4,
.left_margin = 40,
.right_margin = 20,
.hsync_len = 88,
.upper_margin = 30,
.lower_margin = 32,
.vsync_len = 3,
.lcdcon5 = 0x00014b02,
},
{
.type = S3C2410_LCDCON1_TFT,
.width = 640,
.height = 480,
.pixclock = 33333,
.xres = 640,
.yres = 480,
.bpp = 8,
.left_margin = 40,
.right_margin = 20,
.hsync_len = 88,
.upper_margin = 30,
.lower_margin = 32,
.vsync_len = 3,
.lcdcon5 = 0x00014b02,
},
{
.type = S3C2410_LCDCON1_TFT,
.width = 640,
.height = 480,
.pixclock = 33333,
.xres = 640,
.yres = 480,
.bpp = 16,
.left_margin = 40,
.right_margin = 20,
.hsync_len = 88,
.upper_margin = 30,
.lower_margin = 32,
.vsync_len = 3,
.lcdcon5 = 0x00014b02,
},
};
/* LCD/VGA controller */
static struct s3c2410fb_mach_info __initdata bast_fb_info = {
.displays = bast_lcd_info,
.num_displays = ARRAY_SIZE(bast_lcd_info),
.default_display = 1,
};
/* I2C devices fitted. */
static struct i2c_board_info bast_i2c_devs[] __initdata = {
{
I2C_BOARD_INFO("tlv320aic23", 0x1a),
}, {
I2C_BOARD_INFO("simtec-pmu", 0x6b),
}, {
I2C_BOARD_INFO("ch7013", 0x75),
},
};
static struct s3c_hwmon_pdata bast_hwmon_info = {
/* LCD contrast (0-6.6V) */
.in[0] = &(struct s3c_hwmon_chcfg) {
.name = "lcd-contrast",
.mult = 3300,
.div = 512,
},
/* LED current feedback */
.in[1] = &(struct s3c_hwmon_chcfg) {
.name = "led-feedback",
.mult = 3300,
.div = 1024,
},
/* LCD feedback (0-6.6V) */
.in[2] = &(struct s3c_hwmon_chcfg) {
.name = "lcd-feedback",
.mult = 3300,
.div = 512,
},
/* Vcore (1.8-2.0V), Vref 3.3V */
.in[3] = &(struct s3c_hwmon_chcfg) {
.name = "vcore",
.mult = 3300,
.div = 1024,
},
};
/* Standard BAST devices */
// cat /sys/devices/platform/s3c24xx-adc/s3c-hwmon/in_0
static struct platform_device *bast_devices[] __initdata = {
&s3c_device_ohci,
&s3c_device_lcd,
&s3c_device_wdt,
&s3c_device_i2c0,
&s3c_device_rtc,
&s3c_device_nand,
&s3c_device_adc,
&s3c_device_hwmon,
&bast_device_dm9k,
&bast_device_asix,
&bast_device_axpp,
&bast_sio,
};
static struct clk *bast_clocks[] __initdata = {
&s3c24xx_dclk0,
&s3c24xx_dclk1,
&s3c24xx_clkout0,
&s3c24xx_clkout1,
&s3c24xx_uclk,
};
static struct s3c_cpufreq_board __initdata bast_cpufreq = {
.refresh = 7800, /* 7.8usec */
.auto_io = 1,
.need_io = 1,
};
static struct s3c24xx_audio_simtec_pdata __initdata bast_audio = {
.have_mic = 1,
.have_lout = 1,
};
static void __init bast_map_io(void)
{
/* initialise the clocks */
s3c24xx_dclk0.parent = &clk_upll;
s3c24xx_dclk0.rate = 12*1000*1000;
s3c24xx_dclk1.parent = &clk_upll;
s3c24xx_dclk1.rate = 24*1000*1000;
s3c24xx_clkout0.parent = &s3c24xx_dclk0;
s3c24xx_clkout1.parent = &s3c24xx_dclk1;
s3c24xx_uclk.parent = &s3c24xx_clkout1;
s3c24xx_register_clocks(bast_clocks, ARRAY_SIZE(bast_clocks));
s3c_hwmon_set_platdata(&bast_hwmon_info);
s3c24xx_init_io(bast_iodesc, ARRAY_SIZE(bast_iodesc));
s3c24xx_init_clocks(0);
s3c24xx_init_uarts(bast_uartcfgs, ARRAY_SIZE(bast_uartcfgs));
}
static void __init bast_init(void)
{
register_syscore_ops(&bast_pm_syscore_ops);
s3c_i2c0_set_platdata(&bast_i2c_info);
s3c_nand_set_platdata(&bast_nand_info);
s3c24xx_fb_set_platdata(&bast_fb_info);
platform_add_devices(bast_devices, ARRAY_SIZE(bast_devices));
i2c_register_board_info(0, bast_i2c_devs,
ARRAY_SIZE(bast_i2c_devs));
usb_simtec_init();
nor_simtec_init();
simtec_audio_add(NULL, true, &bast_audio);
WARN_ON(gpio_request(S3C2410_GPA(21), "bast nreset"));
s3c_cpufreq_setboard(&bast_cpufreq);
}
MACHINE_START(BAST, "Simtec-BAST")
/* Maintainer: Ben Dooks <ben@simtec.co.uk> */
.boot_params = S3C2410_SDRAM_PA + 0x100,
.map_io = bast_map_io,
.init_irq = s3c24xx_init_irq,
.init_machine = bast_init,
.timer = &s3c24xx_timer,
MACHINE_END
| gpl-2.0 |
nbars/Custom-Kernel-SM-P600 | kernel-src/drivers/md/persistent-data/dm-space-map-common.c | 3958 | 16086 | /*
* Copyright (C) 2011 Red Hat, Inc.
*
* This file is released under the GPL.
*/
#include "dm-space-map-common.h"
#include "dm-transaction-manager.h"
#include <linux/bitops.h>
#include <linux/device-mapper.h>
#define DM_MSG_PREFIX "space map common"
/*----------------------------------------------------------------*/
/*
* Index validator.
*/
#define INDEX_CSUM_XOR 160478
static void index_prepare_for_write(struct dm_block_validator *v,
struct dm_block *b,
size_t block_size)
{
struct disk_metadata_index *mi_le = dm_block_data(b);
mi_le->blocknr = cpu_to_le64(dm_block_location(b));
mi_le->csum = cpu_to_le32(dm_bm_checksum(&mi_le->padding,
block_size - sizeof(__le32),
INDEX_CSUM_XOR));
}
static int index_check(struct dm_block_validator *v,
struct dm_block *b,
size_t block_size)
{
struct disk_metadata_index *mi_le = dm_block_data(b);
__le32 csum_disk;
if (dm_block_location(b) != le64_to_cpu(mi_le->blocknr)) {
DMERR("index_check failed blocknr %llu wanted %llu",
le64_to_cpu(mi_le->blocknr), dm_block_location(b));
return -ENOTBLK;
}
csum_disk = cpu_to_le32(dm_bm_checksum(&mi_le->padding,
block_size - sizeof(__le32),
INDEX_CSUM_XOR));
if (csum_disk != mi_le->csum) {
DMERR("index_check failed csum %u wanted %u",
le32_to_cpu(csum_disk), le32_to_cpu(mi_le->csum));
return -EILSEQ;
}
return 0;
}
static struct dm_block_validator index_validator = {
.name = "index",
.prepare_for_write = index_prepare_for_write,
.check = index_check
};
/*----------------------------------------------------------------*/
/*
* Bitmap validator
*/
#define BITMAP_CSUM_XOR 240779
static void bitmap_prepare_for_write(struct dm_block_validator *v,
struct dm_block *b,
size_t block_size)
{
struct disk_bitmap_header *disk_header = dm_block_data(b);
disk_header->blocknr = cpu_to_le64(dm_block_location(b));
disk_header->csum = cpu_to_le32(dm_bm_checksum(&disk_header->not_used,
block_size - sizeof(__le32),
BITMAP_CSUM_XOR));
}
static int bitmap_check(struct dm_block_validator *v,
struct dm_block *b,
size_t block_size)
{
struct disk_bitmap_header *disk_header = dm_block_data(b);
__le32 csum_disk;
if (dm_block_location(b) != le64_to_cpu(disk_header->blocknr)) {
DMERR("bitmap check failed blocknr %llu wanted %llu",
le64_to_cpu(disk_header->blocknr), dm_block_location(b));
return -ENOTBLK;
}
csum_disk = cpu_to_le32(dm_bm_checksum(&disk_header->not_used,
block_size - sizeof(__le32),
BITMAP_CSUM_XOR));
if (csum_disk != disk_header->csum) {
DMERR("bitmap check failed csum %u wanted %u",
le32_to_cpu(csum_disk), le32_to_cpu(disk_header->csum));
return -EILSEQ;
}
return 0;
}
static struct dm_block_validator dm_sm_bitmap_validator = {
.name = "sm_bitmap",
.prepare_for_write = bitmap_prepare_for_write,
.check = bitmap_check
};
/*----------------------------------------------------------------*/
#define ENTRIES_PER_WORD 32
#define ENTRIES_SHIFT 5
static void *dm_bitmap_data(struct dm_block *b)
{
return dm_block_data(b) + sizeof(struct disk_bitmap_header);
}
#define WORD_MASK_HIGH 0xAAAAAAAAAAAAAAAAULL
static unsigned bitmap_word_used(void *addr, unsigned b)
{
__le64 *words_le = addr;
__le64 *w_le = words_le + (b >> ENTRIES_SHIFT);
uint64_t bits = le64_to_cpu(*w_le);
uint64_t mask = (bits + WORD_MASK_HIGH + 1) & WORD_MASK_HIGH;
return !(~bits & mask);
}
static unsigned sm_lookup_bitmap(void *addr, unsigned b)
{
__le64 *words_le = addr;
__le64 *w_le = words_le + (b >> ENTRIES_SHIFT);
unsigned hi, lo;
b = (b & (ENTRIES_PER_WORD - 1)) << 1;
hi = !!test_bit_le(b, (void *) w_le);
lo = !!test_bit_le(b + 1, (void *) w_le);
return (hi << 1) | lo;
}
static void sm_set_bitmap(void *addr, unsigned b, unsigned val)
{
__le64 *words_le = addr;
__le64 *w_le = words_le + (b >> ENTRIES_SHIFT);
b = (b & (ENTRIES_PER_WORD - 1)) << 1;
if (val & 2)
__set_bit_le(b, (void *) w_le);
else
__clear_bit_le(b, (void *) w_le);
if (val & 1)
__set_bit_le(b + 1, (void *) w_le);
else
__clear_bit_le(b + 1, (void *) w_le);
}
static int sm_find_free(void *addr, unsigned begin, unsigned end,
unsigned *result)
{
while (begin < end) {
if (!(begin & (ENTRIES_PER_WORD - 1)) &&
bitmap_word_used(addr, begin)) {
begin += ENTRIES_PER_WORD;
continue;
}
if (!sm_lookup_bitmap(addr, begin)) {
*result = begin;
return 0;
}
begin++;
}
return -ENOSPC;
}
/*----------------------------------------------------------------*/
static int sm_ll_init(struct ll_disk *ll, struct dm_transaction_manager *tm)
{
ll->tm = tm;
ll->bitmap_info.tm = tm;
ll->bitmap_info.levels = 1;
/*
* Because the new bitmap blocks are created via a shadow
* operation, the old entry has already had its reference count
* decremented and we don't need the btree to do any bookkeeping.
*/
ll->bitmap_info.value_type.size = sizeof(struct disk_index_entry);
ll->bitmap_info.value_type.inc = NULL;
ll->bitmap_info.value_type.dec = NULL;
ll->bitmap_info.value_type.equal = NULL;
ll->ref_count_info.tm = tm;
ll->ref_count_info.levels = 1;
ll->ref_count_info.value_type.size = sizeof(uint32_t);
ll->ref_count_info.value_type.inc = NULL;
ll->ref_count_info.value_type.dec = NULL;
ll->ref_count_info.value_type.equal = NULL;
ll->block_size = dm_bm_block_size(dm_tm_get_bm(tm));
if (ll->block_size > (1 << 30)) {
DMERR("block size too big to hold bitmaps");
return -EINVAL;
}
ll->entries_per_block = (ll->block_size - sizeof(struct disk_bitmap_header)) *
ENTRIES_PER_BYTE;
ll->nr_blocks = 0;
ll->bitmap_root = 0;
ll->ref_count_root = 0;
return 0;
}
int sm_ll_extend(struct ll_disk *ll, dm_block_t extra_blocks)
{
int r;
dm_block_t i, nr_blocks, nr_indexes;
unsigned old_blocks, blocks;
nr_blocks = ll->nr_blocks + extra_blocks;
old_blocks = dm_sector_div_up(ll->nr_blocks, ll->entries_per_block);
blocks = dm_sector_div_up(nr_blocks, ll->entries_per_block);
nr_indexes = dm_sector_div_up(nr_blocks, ll->entries_per_block);
if (nr_indexes > ll->max_entries(ll)) {
DMERR("space map too large");
return -EINVAL;
}
for (i = old_blocks; i < blocks; i++) {
struct dm_block *b;
struct disk_index_entry idx;
r = dm_tm_new_block(ll->tm, &dm_sm_bitmap_validator, &b);
if (r < 0)
return r;
idx.blocknr = cpu_to_le64(dm_block_location(b));
r = dm_tm_unlock(ll->tm, b);
if (r < 0)
return r;
idx.nr_free = cpu_to_le32(ll->entries_per_block);
idx.none_free_before = 0;
r = ll->save_ie(ll, i, &idx);
if (r < 0)
return r;
}
ll->nr_blocks = nr_blocks;
return 0;
}
int sm_ll_lookup_bitmap(struct ll_disk *ll, dm_block_t b, uint32_t *result)
{
int r;
dm_block_t index = b;
struct disk_index_entry ie_disk;
struct dm_block *blk;
b = do_div(index, ll->entries_per_block);
r = ll->load_ie(ll, index, &ie_disk);
if (r < 0)
return r;
r = dm_tm_read_lock(ll->tm, le64_to_cpu(ie_disk.blocknr),
&dm_sm_bitmap_validator, &blk);
if (r < 0)
return r;
*result = sm_lookup_bitmap(dm_bitmap_data(blk), b);
return dm_tm_unlock(ll->tm, blk);
}
int sm_ll_lookup(struct ll_disk *ll, dm_block_t b, uint32_t *result)
{
__le32 le_rc;
int r = sm_ll_lookup_bitmap(ll, b, result);
if (r)
return r;
if (*result != 3)
return r;
r = dm_btree_lookup(&ll->ref_count_info, ll->ref_count_root, &b, &le_rc);
if (r < 0)
return r;
*result = le32_to_cpu(le_rc);
return r;
}
int sm_ll_find_free_block(struct ll_disk *ll, dm_block_t begin,
dm_block_t end, dm_block_t *result)
{
int r;
struct disk_index_entry ie_disk;
dm_block_t i, index_begin = begin;
dm_block_t index_end = dm_sector_div_up(end, ll->entries_per_block);
/*
* FIXME: Use shifts
*/
begin = do_div(index_begin, ll->entries_per_block);
end = do_div(end, ll->entries_per_block);
for (i = index_begin; i < index_end; i++, begin = 0) {
struct dm_block *blk;
unsigned position;
uint32_t bit_end;
r = ll->load_ie(ll, i, &ie_disk);
if (r < 0)
return r;
if (le32_to_cpu(ie_disk.nr_free) == 0)
continue;
r = dm_tm_read_lock(ll->tm, le64_to_cpu(ie_disk.blocknr),
&dm_sm_bitmap_validator, &blk);
if (r < 0)
return r;
bit_end = (i == index_end - 1) ? end : ll->entries_per_block;
r = sm_find_free(dm_bitmap_data(blk),
max_t(unsigned, begin, le32_to_cpu(ie_disk.none_free_before)),
bit_end, &position);
if (r == -ENOSPC) {
/*
* This might happen because we started searching
* part way through the bitmap.
*/
dm_tm_unlock(ll->tm, blk);
continue;
} else if (r < 0) {
dm_tm_unlock(ll->tm, blk);
return r;
}
r = dm_tm_unlock(ll->tm, blk);
if (r < 0)
return r;
*result = i * ll->entries_per_block + (dm_block_t) position;
return 0;
}
return -ENOSPC;
}
int sm_ll_insert(struct ll_disk *ll, dm_block_t b,
uint32_t ref_count, enum allocation_event *ev)
{
int r;
uint32_t bit, old;
struct dm_block *nb;
dm_block_t index = b;
struct disk_index_entry ie_disk;
void *bm_le;
int inc;
bit = do_div(index, ll->entries_per_block);
r = ll->load_ie(ll, index, &ie_disk);
if (r < 0)
return r;
r = dm_tm_shadow_block(ll->tm, le64_to_cpu(ie_disk.blocknr),
&dm_sm_bitmap_validator, &nb, &inc);
if (r < 0) {
DMERR("dm_tm_shadow_block() failed");
return r;
}
ie_disk.blocknr = cpu_to_le64(dm_block_location(nb));
bm_le = dm_bitmap_data(nb);
old = sm_lookup_bitmap(bm_le, bit);
if (ref_count <= 2) {
sm_set_bitmap(bm_le, bit, ref_count);
r = dm_tm_unlock(ll->tm, nb);
if (r < 0)
return r;
if (old > 2) {
r = dm_btree_remove(&ll->ref_count_info,
ll->ref_count_root,
&b, &ll->ref_count_root);
if (r)
return r;
}
} else {
__le32 le_rc = cpu_to_le32(ref_count);
sm_set_bitmap(bm_le, bit, 3);
r = dm_tm_unlock(ll->tm, nb);
if (r < 0)
return r;
__dm_bless_for_disk(&le_rc);
r = dm_btree_insert(&ll->ref_count_info, ll->ref_count_root,
&b, &le_rc, &ll->ref_count_root);
if (r < 0) {
DMERR("ref count insert failed");
return r;
}
}
if (ref_count && !old) {
*ev = SM_ALLOC;
ll->nr_allocated++;
ie_disk.nr_free = cpu_to_le32(le32_to_cpu(ie_disk.nr_free) - 1);
if (le32_to_cpu(ie_disk.none_free_before) == bit)
ie_disk.none_free_before = cpu_to_le32(bit + 1);
} else if (old && !ref_count) {
*ev = SM_FREE;
ll->nr_allocated--;
ie_disk.nr_free = cpu_to_le32(le32_to_cpu(ie_disk.nr_free) + 1);
ie_disk.none_free_before = cpu_to_le32(min(le32_to_cpu(ie_disk.none_free_before), bit));
}
return ll->save_ie(ll, index, &ie_disk);
}
int sm_ll_inc(struct ll_disk *ll, dm_block_t b, enum allocation_event *ev)
{
int r;
uint32_t rc;
r = sm_ll_lookup(ll, b, &rc);
if (r)
return r;
return sm_ll_insert(ll, b, rc + 1, ev);
}
int sm_ll_dec(struct ll_disk *ll, dm_block_t b, enum allocation_event *ev)
{
int r;
uint32_t rc;
r = sm_ll_lookup(ll, b, &rc);
if (r)
return r;
if (!rc)
return -EINVAL;
return sm_ll_insert(ll, b, rc - 1, ev);
}
int sm_ll_commit(struct ll_disk *ll)
{
return ll->commit(ll);
}
/*----------------------------------------------------------------*/
static int metadata_ll_load_ie(struct ll_disk *ll, dm_block_t index,
struct disk_index_entry *ie)
{
memcpy(ie, ll->mi_le.index + index, sizeof(*ie));
return 0;
}
static int metadata_ll_save_ie(struct ll_disk *ll, dm_block_t index,
struct disk_index_entry *ie)
{
memcpy(ll->mi_le.index + index, ie, sizeof(*ie));
return 0;
}
static int metadata_ll_init_index(struct ll_disk *ll)
{
int r;
struct dm_block *b;
r = dm_tm_new_block(ll->tm, &index_validator, &b);
if (r < 0)
return r;
memcpy(dm_block_data(b), &ll->mi_le, sizeof(ll->mi_le));
ll->bitmap_root = dm_block_location(b);
return dm_tm_unlock(ll->tm, b);
}
static int metadata_ll_open(struct ll_disk *ll)
{
int r;
struct dm_block *block;
r = dm_tm_read_lock(ll->tm, ll->bitmap_root,
&index_validator, &block);
if (r)
return r;
memcpy(&ll->mi_le, dm_block_data(block), sizeof(ll->mi_le));
return dm_tm_unlock(ll->tm, block);
}
static dm_block_t metadata_ll_max_entries(struct ll_disk *ll)
{
return MAX_METADATA_BITMAPS;
}
static int metadata_ll_commit(struct ll_disk *ll)
{
int r, inc;
struct dm_block *b;
r = dm_tm_shadow_block(ll->tm, ll->bitmap_root, &index_validator, &b, &inc);
if (r)
return r;
memcpy(dm_block_data(b), &ll->mi_le, sizeof(ll->mi_le));
ll->bitmap_root = dm_block_location(b);
return dm_tm_unlock(ll->tm, b);
}
int sm_ll_new_metadata(struct ll_disk *ll, struct dm_transaction_manager *tm)
{
int r;
r = sm_ll_init(ll, tm);
if (r < 0)
return r;
ll->load_ie = metadata_ll_load_ie;
ll->save_ie = metadata_ll_save_ie;
ll->init_index = metadata_ll_init_index;
ll->open_index = metadata_ll_open;
ll->max_entries = metadata_ll_max_entries;
ll->commit = metadata_ll_commit;
ll->nr_blocks = 0;
ll->nr_allocated = 0;
r = ll->init_index(ll);
if (r < 0)
return r;
r = dm_btree_empty(&ll->ref_count_info, &ll->ref_count_root);
if (r < 0)
return r;
return 0;
}
int sm_ll_open_metadata(struct ll_disk *ll, struct dm_transaction_manager *tm,
void *root_le, size_t len)
{
int r;
struct disk_sm_root *smr = root_le;
if (len < sizeof(struct disk_sm_root)) {
DMERR("sm_metadata root too small");
return -ENOMEM;
}
r = sm_ll_init(ll, tm);
if (r < 0)
return r;
ll->load_ie = metadata_ll_load_ie;
ll->save_ie = metadata_ll_save_ie;
ll->init_index = metadata_ll_init_index;
ll->open_index = metadata_ll_open;
ll->max_entries = metadata_ll_max_entries;
ll->commit = metadata_ll_commit;
ll->nr_blocks = le64_to_cpu(smr->nr_blocks);
ll->nr_allocated = le64_to_cpu(smr->nr_allocated);
ll->bitmap_root = le64_to_cpu(smr->bitmap_root);
ll->ref_count_root = le64_to_cpu(smr->ref_count_root);
return ll->open_index(ll);
}
/*----------------------------------------------------------------*/
static int disk_ll_load_ie(struct ll_disk *ll, dm_block_t index,
struct disk_index_entry *ie)
{
return dm_btree_lookup(&ll->bitmap_info, ll->bitmap_root, &index, ie);
}
static int disk_ll_save_ie(struct ll_disk *ll, dm_block_t index,
struct disk_index_entry *ie)
{
__dm_bless_for_disk(ie);
return dm_btree_insert(&ll->bitmap_info, ll->bitmap_root,
&index, ie, &ll->bitmap_root);
}
static int disk_ll_init_index(struct ll_disk *ll)
{
return dm_btree_empty(&ll->bitmap_info, &ll->bitmap_root);
}
static int disk_ll_open(struct ll_disk *ll)
{
/* nothing to do */
return 0;
}
static dm_block_t disk_ll_max_entries(struct ll_disk *ll)
{
return -1ULL;
}
static int disk_ll_commit(struct ll_disk *ll)
{
return 0;
}
int sm_ll_new_disk(struct ll_disk *ll, struct dm_transaction_manager *tm)
{
int r;
r = sm_ll_init(ll, tm);
if (r < 0)
return r;
ll->load_ie = disk_ll_load_ie;
ll->save_ie = disk_ll_save_ie;
ll->init_index = disk_ll_init_index;
ll->open_index = disk_ll_open;
ll->max_entries = disk_ll_max_entries;
ll->commit = disk_ll_commit;
ll->nr_blocks = 0;
ll->nr_allocated = 0;
r = ll->init_index(ll);
if (r < 0)
return r;
r = dm_btree_empty(&ll->ref_count_info, &ll->ref_count_root);
if (r < 0)
return r;
return 0;
}
int sm_ll_open_disk(struct ll_disk *ll, struct dm_transaction_manager *tm,
void *root_le, size_t len)
{
int r;
struct disk_sm_root *smr = root_le;
if (len < sizeof(struct disk_sm_root)) {
DMERR("sm_metadata root too small");
return -ENOMEM;
}
r = sm_ll_init(ll, tm);
if (r < 0)
return r;
ll->load_ie = disk_ll_load_ie;
ll->save_ie = disk_ll_save_ie;
ll->init_index = disk_ll_init_index;
ll->open_index = disk_ll_open;
ll->max_entries = disk_ll_max_entries;
ll->commit = disk_ll_commit;
ll->nr_blocks = le64_to_cpu(smr->nr_blocks);
ll->nr_allocated = le64_to_cpu(smr->nr_allocated);
ll->bitmap_root = le64_to_cpu(smr->bitmap_root);
ll->ref_count_root = le64_to_cpu(smr->ref_count_root);
return ll->open_index(ll);
}
/*----------------------------------------------------------------*/
| gpl-2.0 |
jmztaylor/android_kernel_htc_a3ul_new | arch/unicore32/kernel/pci.c | 4470 | 10218 | /*
* linux/arch/unicore32/kernel/pci.c
*
* Code specific to PKUnity SoC and UniCore ISA
*
* Copyright (C) 2001-2010 GUAN Xue-tao
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* PCI bios-type initialisation for PCI machines
*
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/interrupt.h>
#include <linux/pci.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/io.h>
static int debug_pci;
#define CONFIG_CMD(bus, devfn, where) \
(0x80000000 | (bus->number << 16) | (devfn << 8) | (where & ~3))
static int
puv3_read_config(struct pci_bus *bus, unsigned int devfn, int where,
int size, u32 *value)
{
writel(CONFIG_CMD(bus, devfn, where), PCICFG_ADDR);
switch (size) {
case 1:
*value = (readl(PCICFG_DATA) >> ((where & 3) * 8)) & 0xFF;
break;
case 2:
*value = (readl(PCICFG_DATA) >> ((where & 2) * 8)) & 0xFFFF;
break;
case 4:
*value = readl(PCICFG_DATA);
break;
}
return PCIBIOS_SUCCESSFUL;
}
static int
puv3_write_config(struct pci_bus *bus, unsigned int devfn, int where,
int size, u32 value)
{
writel(CONFIG_CMD(bus, devfn, where), PCICFG_ADDR);
switch (size) {
case 1:
writel((readl(PCICFG_DATA) & ~FMASK(8, (where&3)*8))
| FIELD(value, 8, (where&3)*8), PCICFG_DATA);
break;
case 2:
writel((readl(PCICFG_DATA) & ~FMASK(16, (where&2)*8))
| FIELD(value, 16, (where&2)*8), PCICFG_DATA);
break;
case 4:
writel(value, PCICFG_DATA);
break;
}
return PCIBIOS_SUCCESSFUL;
}
struct pci_ops pci_puv3_ops = {
.read = puv3_read_config,
.write = puv3_write_config,
};
void pci_puv3_preinit(void)
{
printk(KERN_DEBUG "PCI: PKUnity PCI Controller Initializing ...\n");
/* config PCI bridge base */
writel(io_v2p(PKUNITY_PCIBRI_BASE), PCICFG_BRIBASE);
writel(0, PCIBRI_AHBCTL0);
writel(io_v2p(PKUNITY_PCIBRI_BASE) | PCIBRI_BARx_MEM, PCIBRI_AHBBAR0);
writel(0xFFFF0000, PCIBRI_AHBAMR0);
writel(0, PCIBRI_AHBTAR0);
writel(PCIBRI_CTLx_AT, PCIBRI_AHBCTL1);
writel(io_v2p(PKUNITY_PCILIO_BASE) | PCIBRI_BARx_IO, PCIBRI_AHBBAR1);
writel(0xFFFF0000, PCIBRI_AHBAMR1);
writel(0x00000000, PCIBRI_AHBTAR1);
writel(PCIBRI_CTLx_PREF, PCIBRI_AHBCTL2);
writel(io_v2p(PKUNITY_PCIMEM_BASE) | PCIBRI_BARx_MEM, PCIBRI_AHBBAR2);
writel(0xF8000000, PCIBRI_AHBAMR2);
writel(0, PCIBRI_AHBTAR2);
writel(io_v2p(PKUNITY_PCIAHB_BASE) | PCIBRI_BARx_MEM, PCIBRI_BAR1);
writel(PCIBRI_CTLx_AT | PCIBRI_CTLx_PREF, PCIBRI_PCICTL0);
writel(io_v2p(PKUNITY_PCIAHB_BASE) | PCIBRI_BARx_MEM, PCIBRI_PCIBAR0);
writel(0xF8000000, PCIBRI_PCIAMR0);
writel(PKUNITY_SDRAM_BASE, PCIBRI_PCITAR0);
writel(readl(PCIBRI_CMD) | PCIBRI_CMD_IO | PCIBRI_CMD_MEM, PCIBRI_CMD);
}
static int __init pci_puv3_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
if (dev->bus->number == 0) {
#ifdef CONFIG_ARCH_FPGA /* 4 pci slots */
if (dev->devfn == 0x00)
return IRQ_PCIINTA;
else if (dev->devfn == 0x08)
return IRQ_PCIINTB;
else if (dev->devfn == 0x10)
return IRQ_PCIINTC;
else if (dev->devfn == 0x18)
return IRQ_PCIINTD;
#endif
#ifdef CONFIG_PUV3_DB0913 /* 3 pci slots */
if (dev->devfn == 0x30)
return IRQ_PCIINTB;
else if (dev->devfn == 0x60)
return IRQ_PCIINTC;
else if (dev->devfn == 0x58)
return IRQ_PCIINTD;
#endif
#if defined(CONFIG_PUV3_NB0916) || defined(CONFIG_PUV3_SMW0919)
/* only support 2 pci devices */
if (dev->devfn == 0x00)
return IRQ_PCIINTC; /* sata */
#endif
}
return -1;
}
/*
* Only first 128MB of memory can be accessed via PCI.
* We use GFP_DMA to allocate safe buffers to do map/unmap.
* This is really ugly and we need a better way of specifying
* DMA-capable regions of memory.
*/
void __init puv3_pci_adjust_zones(unsigned long *zone_size,
unsigned long *zhole_size)
{
unsigned int sz = SZ_128M >> PAGE_SHIFT;
/*
* Only adjust if > 128M on current system
*/
if (zone_size[0] <= sz)
return;
zone_size[1] = zone_size[0] - sz;
zone_size[0] = sz;
zhole_size[1] = zhole_size[0];
zhole_size[0] = 0;
}
void __devinit pcibios_update_irq(struct pci_dev *dev, int irq)
{
if (debug_pci)
printk(KERN_DEBUG "PCI: Assigning IRQ %02d to %s\n",
irq, pci_name(dev));
pci_write_config_byte(dev, PCI_INTERRUPT_LINE, irq);
}
/*
* If the bus contains any of these devices, then we must not turn on
* parity checking of any kind.
*/
static inline int pdev_bad_for_parity(struct pci_dev *dev)
{
return 0;
}
/*
* pcibios_fixup_bus - Called after each bus is probed,
* but before its children are examined.
*/
void __devinit pcibios_fixup_bus(struct pci_bus *bus)
{
struct pci_dev *dev;
u16 features = PCI_COMMAND_SERR
| PCI_COMMAND_PARITY
| PCI_COMMAND_FAST_BACK;
bus->resource[0] = &ioport_resource;
bus->resource[1] = &iomem_resource;
/*
* Walk the devices on this bus, working out what we can
* and can't support.
*/
list_for_each_entry(dev, &bus->devices, bus_list) {
u16 status;
pci_read_config_word(dev, PCI_STATUS, &status);
/*
* If any device on this bus does not support fast back
* to back transfers, then the bus as a whole is not able
* to support them. Having fast back to back transfers
* on saves us one PCI cycle per transaction.
*/
if (!(status & PCI_STATUS_FAST_BACK))
features &= ~PCI_COMMAND_FAST_BACK;
if (pdev_bad_for_parity(dev))
features &= ~(PCI_COMMAND_SERR
| PCI_COMMAND_PARITY);
switch (dev->class >> 8) {
case PCI_CLASS_BRIDGE_PCI:
pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &status);
status |= PCI_BRIDGE_CTL_PARITY
| PCI_BRIDGE_CTL_MASTER_ABORT;
status &= ~(PCI_BRIDGE_CTL_BUS_RESET
| PCI_BRIDGE_CTL_FAST_BACK);
pci_write_config_word(dev, PCI_BRIDGE_CONTROL, status);
break;
case PCI_CLASS_BRIDGE_CARDBUS:
pci_read_config_word(dev, PCI_CB_BRIDGE_CONTROL,
&status);
status |= PCI_CB_BRIDGE_CTL_PARITY
| PCI_CB_BRIDGE_CTL_MASTER_ABORT;
pci_write_config_word(dev, PCI_CB_BRIDGE_CONTROL,
status);
break;
}
}
/*
* Now walk the devices again, this time setting them up.
*/
list_for_each_entry(dev, &bus->devices, bus_list) {
u16 cmd;
pci_read_config_word(dev, PCI_COMMAND, &cmd);
cmd |= features;
pci_write_config_word(dev, PCI_COMMAND, cmd);
pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE,
L1_CACHE_BYTES >> 2);
}
/*
* Propagate the flags to the PCI bridge.
*/
if (bus->self && bus->self->hdr_type == PCI_HEADER_TYPE_BRIDGE) {
if (features & PCI_COMMAND_FAST_BACK)
bus->bridge_ctl |= PCI_BRIDGE_CTL_FAST_BACK;
if (features & PCI_COMMAND_PARITY)
bus->bridge_ctl |= PCI_BRIDGE_CTL_PARITY;
}
/*
* Report what we did for this bus
*/
printk(KERN_INFO "PCI: bus%d: Fast back to back transfers %sabled\n",
bus->number, (features & PCI_COMMAND_FAST_BACK) ? "en" : "dis");
}
#ifdef CONFIG_HOTPLUG
EXPORT_SYMBOL(pcibios_fixup_bus);
#endif
static int __init pci_common_init(void)
{
struct pci_bus *puv3_bus;
pci_puv3_preinit();
puv3_bus = pci_scan_bus(0, &pci_puv3_ops, NULL);
if (!puv3_bus)
panic("PCI: unable to scan bus!");
pci_fixup_irqs(pci_common_swizzle, pci_puv3_map_irq);
if (!pci_has_flag(PCI_PROBE_ONLY)) {
/*
* Size the bridge windows.
*/
pci_bus_size_bridges(puv3_bus);
/*
* Assign resources.
*/
pci_bus_assign_resources(puv3_bus);
}
/*
* Tell drivers about devices found.
*/
pci_bus_add_devices(puv3_bus);
return 0;
}
subsys_initcall(pci_common_init);
char * __devinit pcibios_setup(char *str)
{
if (!strcmp(str, "debug")) {
debug_pci = 1;
return NULL;
} else if (!strcmp(str, "firmware")) {
pci_add_flags(PCI_PROBE_ONLY);
return NULL;
}
return str;
}
void pcibios_set_master(struct pci_dev *dev)
{
/* No special bus mastering setup handling */
}
/*
* From arch/i386/kernel/pci-i386.c:
*
* We need to avoid collisions with `mirrored' VGA ports
* and other strange ISA hardware, so we always want the
* addresses to be allocated in the 0x000-0x0ff region
* modulo 0x400.
*
* Why? Because some silly external IO cards only decode
* the low 10 bits of the IO address. The 0x00-0xff region
* is reserved for motherboard devices that decode all 16
* bits, so it's ok to allocate at, say, 0x2800-0x28ff,
* but we want to try to avoid allocating at 0x2900-0x2bff
* which might be mirrored at 0x0100-0x03ff..
*/
resource_size_t pcibios_align_resource(void *data, const struct resource *res,
resource_size_t size, resource_size_t align)
{
resource_size_t start = res->start;
if (res->flags & IORESOURCE_IO && start & 0x300)
start = (start + 0x3ff) & ~0x3ff;
start = (start + align - 1) & ~(align - 1);
return start;
}
/**
* pcibios_enable_device - Enable I/O and memory.
* @dev: PCI device to be enabled
*/
int pcibios_enable_device(struct pci_dev *dev, int mask)
{
u16 cmd, old_cmd;
int idx;
struct resource *r;
pci_read_config_word(dev, PCI_COMMAND, &cmd);
old_cmd = cmd;
for (idx = 0; idx < 6; idx++) {
/* Only set up the requested stuff */
if (!(mask & (1 << idx)))
continue;
r = dev->resource + idx;
if (!r->start && r->end) {
printk(KERN_ERR "PCI: Device %s not available because"
" of resource collisions\n", pci_name(dev));
return -EINVAL;
}
if (r->flags & IORESOURCE_IO)
cmd |= PCI_COMMAND_IO;
if (r->flags & IORESOURCE_MEM)
cmd |= PCI_COMMAND_MEMORY;
}
/*
* Bridges (eg, cardbus bridges) need to be fully enabled
*/
if ((dev->class >> 16) == PCI_BASE_CLASS_BRIDGE)
cmd |= PCI_COMMAND_IO | PCI_COMMAND_MEMORY;
if (cmd != old_cmd) {
printk("PCI: enabling device %s (%04x -> %04x)\n",
pci_name(dev), old_cmd, cmd);
pci_write_config_word(dev, PCI_COMMAND, cmd);
}
return 0;
}
int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
enum pci_mmap_state mmap_state, int write_combine)
{
unsigned long phys;
if (mmap_state == pci_mmap_io)
return -EINVAL;
phys = vma->vm_pgoff;
/*
* Mark this as IO
*/
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
if (remap_pfn_range(vma, vma->vm_start, phys,
vma->vm_end - vma->vm_start,
vma->vm_page_prot))
return -EAGAIN;
return 0;
}
| gpl-2.0 |
CyanogenMod/android_kernel_goldfish | arch/s390/kernel/topology.c | 4470 | 10170 | /*
* Copyright IBM Corp. 2007,2011
* Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
*/
#define KMSG_COMPONENT "cpu"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/workqueue.h>
#include <linux/bootmem.h>
#include <linux/cpuset.h>
#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/cpu.h>
#include <linux/smp.h>
#include <linux/mm.h>
#define PTF_HORIZONTAL (0UL)
#define PTF_VERTICAL (1UL)
#define PTF_CHECK (2UL)
struct mask_info {
struct mask_info *next;
unsigned char id;
cpumask_t mask;
};
static int topology_enabled = 1;
static void topology_work_fn(struct work_struct *work);
static struct sysinfo_15_1_x *tl_info;
static void set_topology_timer(void);
static DECLARE_WORK(topology_work, topology_work_fn);
/* topology_lock protects the core linked list */
static DEFINE_SPINLOCK(topology_lock);
static struct mask_info core_info;
cpumask_t cpu_core_map[NR_CPUS];
unsigned char cpu_core_id[NR_CPUS];
static struct mask_info book_info;
cpumask_t cpu_book_map[NR_CPUS];
unsigned char cpu_book_id[NR_CPUS];
/* smp_cpu_state_mutex must be held when accessing this array */
int cpu_polarization[NR_CPUS];
static cpumask_t cpu_group_map(struct mask_info *info, unsigned int cpu)
{
cpumask_t mask;
cpumask_clear(&mask);
if (!topology_enabled || !MACHINE_HAS_TOPOLOGY) {
cpumask_copy(&mask, cpumask_of(cpu));
return mask;
}
while (info) {
if (cpumask_test_cpu(cpu, &info->mask)) {
mask = info->mask;
break;
}
info = info->next;
}
if (cpumask_empty(&mask))
cpumask_copy(&mask, cpumask_of(cpu));
return mask;
}
static struct mask_info *add_cpus_to_mask(struct topology_cpu *tl_cpu,
struct mask_info *book,
struct mask_info *core,
int one_core_per_cpu)
{
unsigned int cpu;
for (cpu = find_first_bit(&tl_cpu->mask[0], TOPOLOGY_CPU_BITS);
cpu < TOPOLOGY_CPU_BITS;
cpu = find_next_bit(&tl_cpu->mask[0], TOPOLOGY_CPU_BITS, cpu + 1))
{
unsigned int rcpu;
int lcpu;
rcpu = TOPOLOGY_CPU_BITS - 1 - cpu + tl_cpu->origin;
lcpu = smp_find_processor_id(rcpu);
if (lcpu >= 0) {
cpumask_set_cpu(lcpu, &book->mask);
cpu_book_id[lcpu] = book->id;
cpumask_set_cpu(lcpu, &core->mask);
if (one_core_per_cpu) {
cpu_core_id[lcpu] = rcpu;
core = core->next;
} else {
cpu_core_id[lcpu] = core->id;
}
cpu_set_polarization(lcpu, tl_cpu->pp);
}
}
return core;
}
static void clear_masks(void)
{
struct mask_info *info;
info = &core_info;
while (info) {
cpumask_clear(&info->mask);
info = info->next;
}
info = &book_info;
while (info) {
cpumask_clear(&info->mask);
info = info->next;
}
}
static union topology_entry *next_tle(union topology_entry *tle)
{
if (!tle->nl)
return (union topology_entry *)((struct topology_cpu *)tle + 1);
return (union topology_entry *)((struct topology_container *)tle + 1);
}
static void __tl_to_cores_generic(struct sysinfo_15_1_x *info)
{
struct mask_info *core = &core_info;
struct mask_info *book = &book_info;
union topology_entry *tle, *end;
tle = info->tle;
end = (union topology_entry *)((unsigned long)info + info->length);
while (tle < end) {
switch (tle->nl) {
case 2:
book = book->next;
book->id = tle->container.id;
break;
case 1:
core = core->next;
core->id = tle->container.id;
break;
case 0:
add_cpus_to_mask(&tle->cpu, book, core, 0);
break;
default:
clear_masks();
return;
}
tle = next_tle(tle);
}
}
static void __tl_to_cores_z10(struct sysinfo_15_1_x *info)
{
struct mask_info *core = &core_info;
struct mask_info *book = &book_info;
union topology_entry *tle, *end;
tle = info->tle;
end = (union topology_entry *)((unsigned long)info + info->length);
while (tle < end) {
switch (tle->nl) {
case 1:
book = book->next;
book->id = tle->container.id;
break;
case 0:
core = add_cpus_to_mask(&tle->cpu, book, core, 1);
break;
default:
clear_masks();
return;
}
tle = next_tle(tle);
}
}
static void tl_to_cores(struct sysinfo_15_1_x *info)
{
struct cpuid cpu_id;
get_cpu_id(&cpu_id);
spin_lock_irq(&topology_lock);
clear_masks();
switch (cpu_id.machine) {
case 0x2097:
case 0x2098:
__tl_to_cores_z10(info);
break;
default:
__tl_to_cores_generic(info);
}
spin_unlock_irq(&topology_lock);
}
static void topology_update_polarization_simple(void)
{
int cpu;
mutex_lock(&smp_cpu_state_mutex);
for_each_possible_cpu(cpu)
cpu_set_polarization(cpu, POLARIZATION_HRZ);
mutex_unlock(&smp_cpu_state_mutex);
}
static int ptf(unsigned long fc)
{
int rc;
asm volatile(
" .insn rre,0xb9a20000,%1,%1\n"
" ipm %0\n"
" srl %0,28\n"
: "=d" (rc)
: "d" (fc) : "cc");
return rc;
}
int topology_set_cpu_management(int fc)
{
int cpu, rc;
if (!MACHINE_HAS_TOPOLOGY)
return -EOPNOTSUPP;
if (fc)
rc = ptf(PTF_VERTICAL);
else
rc = ptf(PTF_HORIZONTAL);
if (rc)
return -EBUSY;
for_each_possible_cpu(cpu)
cpu_set_polarization(cpu, POLARIZATION_UNKNOWN);
return rc;
}
static void update_cpu_core_map(void)
{
unsigned long flags;
int cpu;
spin_lock_irqsave(&topology_lock, flags);
for_each_possible_cpu(cpu) {
cpu_core_map[cpu] = cpu_group_map(&core_info, cpu);
cpu_book_map[cpu] = cpu_group_map(&book_info, cpu);
}
spin_unlock_irqrestore(&topology_lock, flags);
}
void store_topology(struct sysinfo_15_1_x *info)
{
int rc;
rc = stsi(info, 15, 1, 3);
if (rc != -ENOSYS)
return;
stsi(info, 15, 1, 2);
}
int arch_update_cpu_topology(void)
{
struct sysinfo_15_1_x *info = tl_info;
struct device *dev;
int cpu;
if (!MACHINE_HAS_TOPOLOGY) {
update_cpu_core_map();
topology_update_polarization_simple();
return 0;
}
store_topology(info);
tl_to_cores(info);
update_cpu_core_map();
for_each_online_cpu(cpu) {
dev = get_cpu_device(cpu);
kobject_uevent(&dev->kobj, KOBJ_CHANGE);
}
return 1;
}
static void topology_work_fn(struct work_struct *work)
{
rebuild_sched_domains();
}
void topology_schedule_update(void)
{
schedule_work(&topology_work);
}
static void topology_timer_fn(unsigned long ignored)
{
if (ptf(PTF_CHECK))
topology_schedule_update();
set_topology_timer();
}
static struct timer_list topology_timer =
TIMER_DEFERRED_INITIALIZER(topology_timer_fn, 0, 0);
static atomic_t topology_poll = ATOMIC_INIT(0);
static void set_topology_timer(void)
{
if (atomic_add_unless(&topology_poll, -1, 0))
mod_timer(&topology_timer, jiffies + HZ / 10);
else
mod_timer(&topology_timer, jiffies + HZ * 60);
}
void topology_expect_change(void)
{
if (!MACHINE_HAS_TOPOLOGY)
return;
/* This is racy, but it doesn't matter since it is just a heuristic.
* Worst case is that we poll in a higher frequency for a bit longer.
*/
if (atomic_read(&topology_poll) > 60)
return;
atomic_add(60, &topology_poll);
set_topology_timer();
}
static int __init early_parse_topology(char *p)
{
if (strncmp(p, "off", 3))
return 0;
topology_enabled = 0;
return 0;
}
early_param("topology", early_parse_topology);
static void __init alloc_masks(struct sysinfo_15_1_x *info,
struct mask_info *mask, int offset)
{
int i, nr_masks;
nr_masks = info->mag[TOPOLOGY_NR_MAG - offset];
for (i = 0; i < info->mnest - offset; i++)
nr_masks *= info->mag[TOPOLOGY_NR_MAG - offset - 1 - i];
nr_masks = max(nr_masks, 1);
for (i = 0; i < nr_masks; i++) {
mask->next = alloc_bootmem(sizeof(struct mask_info));
mask = mask->next;
}
}
void __init s390_init_cpu_topology(void)
{
struct sysinfo_15_1_x *info;
int i;
if (!MACHINE_HAS_TOPOLOGY)
return;
tl_info = alloc_bootmem_pages(PAGE_SIZE);
info = tl_info;
store_topology(info);
pr_info("The CPU configuration topology of the machine is:");
for (i = 0; i < TOPOLOGY_NR_MAG; i++)
printk(KERN_CONT " %d", info->mag[i]);
printk(KERN_CONT " / %d\n", info->mnest);
alloc_masks(info, &core_info, 1);
alloc_masks(info, &book_info, 2);
}
static int cpu_management;
static ssize_t dispatching_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
ssize_t count;
mutex_lock(&smp_cpu_state_mutex);
count = sprintf(buf, "%d\n", cpu_management);
mutex_unlock(&smp_cpu_state_mutex);
return count;
}
static ssize_t dispatching_store(struct device *dev,
struct device_attribute *attr,
const char *buf,
size_t count)
{
int val, rc;
char delim;
if (sscanf(buf, "%d %c", &val, &delim) != 1)
return -EINVAL;
if (val != 0 && val != 1)
return -EINVAL;
rc = 0;
get_online_cpus();
mutex_lock(&smp_cpu_state_mutex);
if (cpu_management == val)
goto out;
rc = topology_set_cpu_management(val);
if (rc)
goto out;
cpu_management = val;
topology_expect_change();
out:
mutex_unlock(&smp_cpu_state_mutex);
put_online_cpus();
return rc ? rc : count;
}
static DEVICE_ATTR(dispatching, 0644, dispatching_show,
dispatching_store);
static ssize_t cpu_polarization_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
int cpu = dev->id;
ssize_t count;
mutex_lock(&smp_cpu_state_mutex);
switch (cpu_read_polarization(cpu)) {
case POLARIZATION_HRZ:
count = sprintf(buf, "horizontal\n");
break;
case POLARIZATION_VL:
count = sprintf(buf, "vertical:low\n");
break;
case POLARIZATION_VM:
count = sprintf(buf, "vertical:medium\n");
break;
case POLARIZATION_VH:
count = sprintf(buf, "vertical:high\n");
break;
default:
count = sprintf(buf, "unknown\n");
break;
}
mutex_unlock(&smp_cpu_state_mutex);
return count;
}
static DEVICE_ATTR(polarization, 0444, cpu_polarization_show, NULL);
static struct attribute *topology_cpu_attrs[] = {
&dev_attr_polarization.attr,
NULL,
};
static struct attribute_group topology_cpu_attr_group = {
.attrs = topology_cpu_attrs,
};
int topology_cpu_init(struct cpu *cpu)
{
return sysfs_create_group(&cpu->dev.kobj, &topology_cpu_attr_group);
}
static int __init topology_init(void)
{
if (!MACHINE_HAS_TOPOLOGY) {
topology_update_polarization_simple();
goto out;
}
set_topology_timer();
out:
update_cpu_core_map();
return device_create_file(cpu_subsys.dev_root, &dev_attr_dispatching);
}
device_initcall(topology_init);
| gpl-2.0 |
CyanogenMod/android_kernel_google_steelhead | crypto/serpent.c | 4982 | 20267 | /*
* Cryptographic API.
*
* Serpent Cipher Algorithm.
*
* Copyright (C) 2002 Dag Arne Osvik <osvik@ii.uib.no>
* 2003 Herbert Valerio Riedel <hvr@gnu.org>
*
* Added tnepres support: Ruben Jesus Garcia Hernandez <ruben@ugr.es>, 18.10.2004
* Based on code by hvr
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/errno.h>
#include <asm/byteorder.h>
#include <linux/crypto.h>
#include <linux/types.h>
/* Key is padded to the maximum of 256 bits before round key generation.
* Any key length <= 256 bits (32 bytes) is allowed by the algorithm.
*/
#define SERPENT_MIN_KEY_SIZE 0
#define SERPENT_MAX_KEY_SIZE 32
#define SERPENT_EXPKEY_WORDS 132
#define SERPENT_BLOCK_SIZE 16
#define PHI 0x9e3779b9UL
#define keyiter(a,b,c,d,i,j) \
b ^= d; b ^= c; b ^= a; b ^= PHI ^ i; b = rol32(b,11); k[j] = b;
#define loadkeys(x0,x1,x2,x3,i) \
x0=k[i]; x1=k[i+1]; x2=k[i+2]; x3=k[i+3];
#define storekeys(x0,x1,x2,x3,i) \
k[i]=x0; k[i+1]=x1; k[i+2]=x2; k[i+3]=x3;
#define K(x0,x1,x2,x3,i) \
x3 ^= k[4*(i)+3]; x2 ^= k[4*(i)+2]; \
x1 ^= k[4*(i)+1]; x0 ^= k[4*(i)+0];
#define LK(x0,x1,x2,x3,x4,i) \
x0=rol32(x0,13);\
x2=rol32(x2,3); x1 ^= x0; x4 = x0 << 3; \
x3 ^= x2; x1 ^= x2; \
x1=rol32(x1,1); x3 ^= x4; \
x3=rol32(x3,7); x4 = x1; \
x0 ^= x1; x4 <<= 7; x2 ^= x3; \
x0 ^= x3; x2 ^= x4; x3 ^= k[4*i+3]; \
x1 ^= k[4*i+1]; x0=rol32(x0,5); x2=rol32(x2,22);\
x0 ^= k[4*i+0]; x2 ^= k[4*i+2];
#define KL(x0,x1,x2,x3,x4,i) \
x0 ^= k[4*i+0]; x1 ^= k[4*i+1]; x2 ^= k[4*i+2]; \
x3 ^= k[4*i+3]; x0=ror32(x0,5); x2=ror32(x2,22);\
x4 = x1; x2 ^= x3; x0 ^= x3; \
x4 <<= 7; x0 ^= x1; x1=ror32(x1,1); \
x2 ^= x4; x3=ror32(x3,7); x4 = x0 << 3; \
x1 ^= x0; x3 ^= x4; x0=ror32(x0,13);\
x1 ^= x2; x3 ^= x2; x2=ror32(x2,3);
#define S0(x0,x1,x2,x3,x4) \
x4 = x3; \
x3 |= x0; x0 ^= x4; x4 ^= x2; \
x4 =~ x4; x3 ^= x1; x1 &= x0; \
x1 ^= x4; x2 ^= x0; x0 ^= x3; \
x4 |= x0; x0 ^= x2; x2 &= x1; \
x3 ^= x2; x1 =~ x1; x2 ^= x4; \
x1 ^= x2;
#define S1(x0,x1,x2,x3,x4) \
x4 = x1; \
x1 ^= x0; x0 ^= x3; x3 =~ x3; \
x4 &= x1; x0 |= x1; x3 ^= x2; \
x0 ^= x3; x1 ^= x3; x3 ^= x4; \
x1 |= x4; x4 ^= x2; x2 &= x0; \
x2 ^= x1; x1 |= x0; x0 =~ x0; \
x0 ^= x2; x4 ^= x1;
#define S2(x0,x1,x2,x3,x4) \
x3 =~ x3; \
x1 ^= x0; x4 = x0; x0 &= x2; \
x0 ^= x3; x3 |= x4; x2 ^= x1; \
x3 ^= x1; x1 &= x0; x0 ^= x2; \
x2 &= x3; x3 |= x1; x0 =~ x0; \
x3 ^= x0; x4 ^= x0; x0 ^= x2; \
x1 |= x2;
#define S3(x0,x1,x2,x3,x4) \
x4 = x1; \
x1 ^= x3; x3 |= x0; x4 &= x0; \
x0 ^= x2; x2 ^= x1; x1 &= x3; \
x2 ^= x3; x0 |= x4; x4 ^= x3; \
x1 ^= x0; x0 &= x3; x3 &= x4; \
x3 ^= x2; x4 |= x1; x2 &= x1; \
x4 ^= x3; x0 ^= x3; x3 ^= x2;
#define S4(x0,x1,x2,x3,x4) \
x4 = x3; \
x3 &= x0; x0 ^= x4; \
x3 ^= x2; x2 |= x4; x0 ^= x1; \
x4 ^= x3; x2 |= x0; \
x2 ^= x1; x1 &= x0; \
x1 ^= x4; x4 &= x2; x2 ^= x3; \
x4 ^= x0; x3 |= x1; x1 =~ x1; \
x3 ^= x0;
#define S5(x0,x1,x2,x3,x4) \
x4 = x1; x1 |= x0; \
x2 ^= x1; x3 =~ x3; x4 ^= x0; \
x0 ^= x2; x1 &= x4; x4 |= x3; \
x4 ^= x0; x0 &= x3; x1 ^= x3; \
x3 ^= x2; x0 ^= x1; x2 &= x4; \
x1 ^= x2; x2 &= x0; \
x3 ^= x2;
#define S6(x0,x1,x2,x3,x4) \
x4 = x1; \
x3 ^= x0; x1 ^= x2; x2 ^= x0; \
x0 &= x3; x1 |= x3; x4 =~ x4; \
x0 ^= x1; x1 ^= x2; \
x3 ^= x4; x4 ^= x0; x2 &= x0; \
x4 ^= x1; x2 ^= x3; x3 &= x1; \
x3 ^= x0; x1 ^= x2;
#define S7(x0,x1,x2,x3,x4) \
x1 =~ x1; \
x4 = x1; x0 =~ x0; x1 &= x2; \
x1 ^= x3; x3 |= x4; x4 ^= x2; \
x2 ^= x3; x3 ^= x0; x0 |= x1; \
x2 &= x0; x0 ^= x4; x4 ^= x3; \
x3 &= x0; x4 ^= x1; \
x2 ^= x4; x3 ^= x1; x4 |= x0; \
x4 ^= x1;
#define SI0(x0,x1,x2,x3,x4) \
x4 = x3; x1 ^= x0; \
x3 |= x1; x4 ^= x1; x0 =~ x0; \
x2 ^= x3; x3 ^= x0; x0 &= x1; \
x0 ^= x2; x2 &= x3; x3 ^= x4; \
x2 ^= x3; x1 ^= x3; x3 &= x0; \
x1 ^= x0; x0 ^= x2; x4 ^= x3;
#define SI1(x0,x1,x2,x3,x4) \
x1 ^= x3; x4 = x0; \
x0 ^= x2; x2 =~ x2; x4 |= x1; \
x4 ^= x3; x3 &= x1; x1 ^= x2; \
x2 &= x4; x4 ^= x1; x1 |= x3; \
x3 ^= x0; x2 ^= x0; x0 |= x4; \
x2 ^= x4; x1 ^= x0; \
x4 ^= x1;
#define SI2(x0,x1,x2,x3,x4) \
x2 ^= x1; x4 = x3; x3 =~ x3; \
x3 |= x2; x2 ^= x4; x4 ^= x0; \
x3 ^= x1; x1 |= x2; x2 ^= x0; \
x1 ^= x4; x4 |= x3; x2 ^= x3; \
x4 ^= x2; x2 &= x1; \
x2 ^= x3; x3 ^= x4; x4 ^= x0;
#define SI3(x0,x1,x2,x3,x4) \
x2 ^= x1; \
x4 = x1; x1 &= x2; \
x1 ^= x0; x0 |= x4; x4 ^= x3; \
x0 ^= x3; x3 |= x1; x1 ^= x2; \
x1 ^= x3; x0 ^= x2; x2 ^= x3; \
x3 &= x1; x1 ^= x0; x0 &= x2; \
x4 ^= x3; x3 ^= x0; x0 ^= x1;
#define SI4(x0,x1,x2,x3,x4) \
x2 ^= x3; x4 = x0; x0 &= x1; \
x0 ^= x2; x2 |= x3; x4 =~ x4; \
x1 ^= x0; x0 ^= x2; x2 &= x4; \
x2 ^= x0; x0 |= x4; \
x0 ^= x3; x3 &= x2; \
x4 ^= x3; x3 ^= x1; x1 &= x0; \
x4 ^= x1; x0 ^= x3;
#define SI5(x0,x1,x2,x3,x4) \
x4 = x1; x1 |= x2; \
x2 ^= x4; x1 ^= x3; x3 &= x4; \
x2 ^= x3; x3 |= x0; x0 =~ x0; \
x3 ^= x2; x2 |= x0; x4 ^= x1; \
x2 ^= x4; x4 &= x0; x0 ^= x1; \
x1 ^= x3; x0 &= x2; x2 ^= x3; \
x0 ^= x2; x2 ^= x4; x4 ^= x3;
#define SI6(x0,x1,x2,x3,x4) \
x0 ^= x2; \
x4 = x0; x0 &= x3; x2 ^= x3; \
x0 ^= x2; x3 ^= x1; x2 |= x4; \
x2 ^= x3; x3 &= x0; x0 =~ x0; \
x3 ^= x1; x1 &= x2; x4 ^= x0; \
x3 ^= x4; x4 ^= x2; x0 ^= x1; \
x2 ^= x0;
#define SI7(x0,x1,x2,x3,x4) \
x4 = x3; x3 &= x0; x0 ^= x2; \
x2 |= x4; x4 ^= x1; x0 =~ x0; \
x1 |= x3; x4 ^= x0; x0 &= x2; \
x0 ^= x1; x1 &= x2; x3 ^= x2; \
x4 ^= x3; x2 &= x3; x3 |= x0; \
x1 ^= x4; x3 ^= x4; x4 &= x0; \
x4 ^= x2;
struct serpent_ctx {
u32 expkey[SERPENT_EXPKEY_WORDS];
};
static int serpent_setkey(struct crypto_tfm *tfm, const u8 *key,
unsigned int keylen)
{
struct serpent_ctx *ctx = crypto_tfm_ctx(tfm);
u32 *k = ctx->expkey;
u8 *k8 = (u8 *)k;
u32 r0,r1,r2,r3,r4;
int i;
/* Copy key, add padding */
for (i = 0; i < keylen; ++i)
k8[i] = key[i];
if (i < SERPENT_MAX_KEY_SIZE)
k8[i++] = 1;
while (i < SERPENT_MAX_KEY_SIZE)
k8[i++] = 0;
/* Expand key using polynomial */
r0 = le32_to_cpu(k[3]);
r1 = le32_to_cpu(k[4]);
r2 = le32_to_cpu(k[5]);
r3 = le32_to_cpu(k[6]);
r4 = le32_to_cpu(k[7]);
keyiter(le32_to_cpu(k[0]),r0,r4,r2,0,0);
keyiter(le32_to_cpu(k[1]),r1,r0,r3,1,1);
keyiter(le32_to_cpu(k[2]),r2,r1,r4,2,2);
keyiter(le32_to_cpu(k[3]),r3,r2,r0,3,3);
keyiter(le32_to_cpu(k[4]),r4,r3,r1,4,4);
keyiter(le32_to_cpu(k[5]),r0,r4,r2,5,5);
keyiter(le32_to_cpu(k[6]),r1,r0,r3,6,6);
keyiter(le32_to_cpu(k[7]),r2,r1,r4,7,7);
keyiter(k[ 0],r3,r2,r0, 8, 8); keyiter(k[ 1],r4,r3,r1, 9, 9);
keyiter(k[ 2],r0,r4,r2, 10, 10); keyiter(k[ 3],r1,r0,r3, 11, 11);
keyiter(k[ 4],r2,r1,r4, 12, 12); keyiter(k[ 5],r3,r2,r0, 13, 13);
keyiter(k[ 6],r4,r3,r1, 14, 14); keyiter(k[ 7],r0,r4,r2, 15, 15);
keyiter(k[ 8],r1,r0,r3, 16, 16); keyiter(k[ 9],r2,r1,r4, 17, 17);
keyiter(k[ 10],r3,r2,r0, 18, 18); keyiter(k[ 11],r4,r3,r1, 19, 19);
keyiter(k[ 12],r0,r4,r2, 20, 20); keyiter(k[ 13],r1,r0,r3, 21, 21);
keyiter(k[ 14],r2,r1,r4, 22, 22); keyiter(k[ 15],r3,r2,r0, 23, 23);
keyiter(k[ 16],r4,r3,r1, 24, 24); keyiter(k[ 17],r0,r4,r2, 25, 25);
keyiter(k[ 18],r1,r0,r3, 26, 26); keyiter(k[ 19],r2,r1,r4, 27, 27);
keyiter(k[ 20],r3,r2,r0, 28, 28); keyiter(k[ 21],r4,r3,r1, 29, 29);
keyiter(k[ 22],r0,r4,r2, 30, 30); keyiter(k[ 23],r1,r0,r3, 31, 31);
k += 50;
keyiter(k[-26],r2,r1,r4, 32,-18); keyiter(k[-25],r3,r2,r0, 33,-17);
keyiter(k[-24],r4,r3,r1, 34,-16); keyiter(k[-23],r0,r4,r2, 35,-15);
keyiter(k[-22],r1,r0,r3, 36,-14); keyiter(k[-21],r2,r1,r4, 37,-13);
keyiter(k[-20],r3,r2,r0, 38,-12); keyiter(k[-19],r4,r3,r1, 39,-11);
keyiter(k[-18],r0,r4,r2, 40,-10); keyiter(k[-17],r1,r0,r3, 41, -9);
keyiter(k[-16],r2,r1,r4, 42, -8); keyiter(k[-15],r3,r2,r0, 43, -7);
keyiter(k[-14],r4,r3,r1, 44, -6); keyiter(k[-13],r0,r4,r2, 45, -5);
keyiter(k[-12],r1,r0,r3, 46, -4); keyiter(k[-11],r2,r1,r4, 47, -3);
keyiter(k[-10],r3,r2,r0, 48, -2); keyiter(k[ -9],r4,r3,r1, 49, -1);
keyiter(k[ -8],r0,r4,r2, 50, 0); keyiter(k[ -7],r1,r0,r3, 51, 1);
keyiter(k[ -6],r2,r1,r4, 52, 2); keyiter(k[ -5],r3,r2,r0, 53, 3);
keyiter(k[ -4],r4,r3,r1, 54, 4); keyiter(k[ -3],r0,r4,r2, 55, 5);
keyiter(k[ -2],r1,r0,r3, 56, 6); keyiter(k[ -1],r2,r1,r4, 57, 7);
keyiter(k[ 0],r3,r2,r0, 58, 8); keyiter(k[ 1],r4,r3,r1, 59, 9);
keyiter(k[ 2],r0,r4,r2, 60, 10); keyiter(k[ 3],r1,r0,r3, 61, 11);
keyiter(k[ 4],r2,r1,r4, 62, 12); keyiter(k[ 5],r3,r2,r0, 63, 13);
keyiter(k[ 6],r4,r3,r1, 64, 14); keyiter(k[ 7],r0,r4,r2, 65, 15);
keyiter(k[ 8],r1,r0,r3, 66, 16); keyiter(k[ 9],r2,r1,r4, 67, 17);
keyiter(k[ 10],r3,r2,r0, 68, 18); keyiter(k[ 11],r4,r3,r1, 69, 19);
keyiter(k[ 12],r0,r4,r2, 70, 20); keyiter(k[ 13],r1,r0,r3, 71, 21);
keyiter(k[ 14],r2,r1,r4, 72, 22); keyiter(k[ 15],r3,r2,r0, 73, 23);
keyiter(k[ 16],r4,r3,r1, 74, 24); keyiter(k[ 17],r0,r4,r2, 75, 25);
keyiter(k[ 18],r1,r0,r3, 76, 26); keyiter(k[ 19],r2,r1,r4, 77, 27);
keyiter(k[ 20],r3,r2,r0, 78, 28); keyiter(k[ 21],r4,r3,r1, 79, 29);
keyiter(k[ 22],r0,r4,r2, 80, 30); keyiter(k[ 23],r1,r0,r3, 81, 31);
k += 50;
keyiter(k[-26],r2,r1,r4, 82,-18); keyiter(k[-25],r3,r2,r0, 83,-17);
keyiter(k[-24],r4,r3,r1, 84,-16); keyiter(k[-23],r0,r4,r2, 85,-15);
keyiter(k[-22],r1,r0,r3, 86,-14); keyiter(k[-21],r2,r1,r4, 87,-13);
keyiter(k[-20],r3,r2,r0, 88,-12); keyiter(k[-19],r4,r3,r1, 89,-11);
keyiter(k[-18],r0,r4,r2, 90,-10); keyiter(k[-17],r1,r0,r3, 91, -9);
keyiter(k[-16],r2,r1,r4, 92, -8); keyiter(k[-15],r3,r2,r0, 93, -7);
keyiter(k[-14],r4,r3,r1, 94, -6); keyiter(k[-13],r0,r4,r2, 95, -5);
keyiter(k[-12],r1,r0,r3, 96, -4); keyiter(k[-11],r2,r1,r4, 97, -3);
keyiter(k[-10],r3,r2,r0, 98, -2); keyiter(k[ -9],r4,r3,r1, 99, -1);
keyiter(k[ -8],r0,r4,r2,100, 0); keyiter(k[ -7],r1,r0,r3,101, 1);
keyiter(k[ -6],r2,r1,r4,102, 2); keyiter(k[ -5],r3,r2,r0,103, 3);
keyiter(k[ -4],r4,r3,r1,104, 4); keyiter(k[ -3],r0,r4,r2,105, 5);
keyiter(k[ -2],r1,r0,r3,106, 6); keyiter(k[ -1],r2,r1,r4,107, 7);
keyiter(k[ 0],r3,r2,r0,108, 8); keyiter(k[ 1],r4,r3,r1,109, 9);
keyiter(k[ 2],r0,r4,r2,110, 10); keyiter(k[ 3],r1,r0,r3,111, 11);
keyiter(k[ 4],r2,r1,r4,112, 12); keyiter(k[ 5],r3,r2,r0,113, 13);
keyiter(k[ 6],r4,r3,r1,114, 14); keyiter(k[ 7],r0,r4,r2,115, 15);
keyiter(k[ 8],r1,r0,r3,116, 16); keyiter(k[ 9],r2,r1,r4,117, 17);
keyiter(k[ 10],r3,r2,r0,118, 18); keyiter(k[ 11],r4,r3,r1,119, 19);
keyiter(k[ 12],r0,r4,r2,120, 20); keyiter(k[ 13],r1,r0,r3,121, 21);
keyiter(k[ 14],r2,r1,r4,122, 22); keyiter(k[ 15],r3,r2,r0,123, 23);
keyiter(k[ 16],r4,r3,r1,124, 24); keyiter(k[ 17],r0,r4,r2,125, 25);
keyiter(k[ 18],r1,r0,r3,126, 26); keyiter(k[ 19],r2,r1,r4,127, 27);
keyiter(k[ 20],r3,r2,r0,128, 28); keyiter(k[ 21],r4,r3,r1,129, 29);
keyiter(k[ 22],r0,r4,r2,130, 30); keyiter(k[ 23],r1,r0,r3,131, 31);
/* Apply S-boxes */
S3(r3,r4,r0,r1,r2); storekeys(r1,r2,r4,r3, 28); loadkeys(r1,r2,r4,r3, 24);
S4(r1,r2,r4,r3,r0); storekeys(r2,r4,r3,r0, 24); loadkeys(r2,r4,r3,r0, 20);
S5(r2,r4,r3,r0,r1); storekeys(r1,r2,r4,r0, 20); loadkeys(r1,r2,r4,r0, 16);
S6(r1,r2,r4,r0,r3); storekeys(r4,r3,r2,r0, 16); loadkeys(r4,r3,r2,r0, 12);
S7(r4,r3,r2,r0,r1); storekeys(r1,r2,r0,r4, 12); loadkeys(r1,r2,r0,r4, 8);
S0(r1,r2,r0,r4,r3); storekeys(r0,r2,r4,r1, 8); loadkeys(r0,r2,r4,r1, 4);
S1(r0,r2,r4,r1,r3); storekeys(r3,r4,r1,r0, 4); loadkeys(r3,r4,r1,r0, 0);
S2(r3,r4,r1,r0,r2); storekeys(r2,r4,r3,r0, 0); loadkeys(r2,r4,r3,r0, -4);
S3(r2,r4,r3,r0,r1); storekeys(r0,r1,r4,r2, -4); loadkeys(r0,r1,r4,r2, -8);
S4(r0,r1,r4,r2,r3); storekeys(r1,r4,r2,r3, -8); loadkeys(r1,r4,r2,r3,-12);
S5(r1,r4,r2,r3,r0); storekeys(r0,r1,r4,r3,-12); loadkeys(r0,r1,r4,r3,-16);
S6(r0,r1,r4,r3,r2); storekeys(r4,r2,r1,r3,-16); loadkeys(r4,r2,r1,r3,-20);
S7(r4,r2,r1,r3,r0); storekeys(r0,r1,r3,r4,-20); loadkeys(r0,r1,r3,r4,-24);
S0(r0,r1,r3,r4,r2); storekeys(r3,r1,r4,r0,-24); loadkeys(r3,r1,r4,r0,-28);
k -= 50;
S1(r3,r1,r4,r0,r2); storekeys(r2,r4,r0,r3, 22); loadkeys(r2,r4,r0,r3, 18);
S2(r2,r4,r0,r3,r1); storekeys(r1,r4,r2,r3, 18); loadkeys(r1,r4,r2,r3, 14);
S3(r1,r4,r2,r3,r0); storekeys(r3,r0,r4,r1, 14); loadkeys(r3,r0,r4,r1, 10);
S4(r3,r0,r4,r1,r2); storekeys(r0,r4,r1,r2, 10); loadkeys(r0,r4,r1,r2, 6);
S5(r0,r4,r1,r2,r3); storekeys(r3,r0,r4,r2, 6); loadkeys(r3,r0,r4,r2, 2);
S6(r3,r0,r4,r2,r1); storekeys(r4,r1,r0,r2, 2); loadkeys(r4,r1,r0,r2, -2);
S7(r4,r1,r0,r2,r3); storekeys(r3,r0,r2,r4, -2); loadkeys(r3,r0,r2,r4, -6);
S0(r3,r0,r2,r4,r1); storekeys(r2,r0,r4,r3, -6); loadkeys(r2,r0,r4,r3,-10);
S1(r2,r0,r4,r3,r1); storekeys(r1,r4,r3,r2,-10); loadkeys(r1,r4,r3,r2,-14);
S2(r1,r4,r3,r2,r0); storekeys(r0,r4,r1,r2,-14); loadkeys(r0,r4,r1,r2,-18);
S3(r0,r4,r1,r2,r3); storekeys(r2,r3,r4,r0,-18); loadkeys(r2,r3,r4,r0,-22);
k -= 50;
S4(r2,r3,r4,r0,r1); storekeys(r3,r4,r0,r1, 28); loadkeys(r3,r4,r0,r1, 24);
S5(r3,r4,r0,r1,r2); storekeys(r2,r3,r4,r1, 24); loadkeys(r2,r3,r4,r1, 20);
S6(r2,r3,r4,r1,r0); storekeys(r4,r0,r3,r1, 20); loadkeys(r4,r0,r3,r1, 16);
S7(r4,r0,r3,r1,r2); storekeys(r2,r3,r1,r4, 16); loadkeys(r2,r3,r1,r4, 12);
S0(r2,r3,r1,r4,r0); storekeys(r1,r3,r4,r2, 12); loadkeys(r1,r3,r4,r2, 8);
S1(r1,r3,r4,r2,r0); storekeys(r0,r4,r2,r1, 8); loadkeys(r0,r4,r2,r1, 4);
S2(r0,r4,r2,r1,r3); storekeys(r3,r4,r0,r1, 4); loadkeys(r3,r4,r0,r1, 0);
S3(r3,r4,r0,r1,r2); storekeys(r1,r2,r4,r3, 0);
return 0;
}
static void serpent_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
{
struct serpent_ctx *ctx = crypto_tfm_ctx(tfm);
const u32
*k = ctx->expkey;
const __le32 *s = (const __le32 *)src;
__le32 *d = (__le32 *)dst;
u32 r0, r1, r2, r3, r4;
/*
* Note: The conversions between u8* and u32* might cause trouble
* on architectures with stricter alignment rules than x86
*/
r0 = le32_to_cpu(s[0]);
r1 = le32_to_cpu(s[1]);
r2 = le32_to_cpu(s[2]);
r3 = le32_to_cpu(s[3]);
K(r0,r1,r2,r3,0);
S0(r0,r1,r2,r3,r4); LK(r2,r1,r3,r0,r4,1);
S1(r2,r1,r3,r0,r4); LK(r4,r3,r0,r2,r1,2);
S2(r4,r3,r0,r2,r1); LK(r1,r3,r4,r2,r0,3);
S3(r1,r3,r4,r2,r0); LK(r2,r0,r3,r1,r4,4);
S4(r2,r0,r3,r1,r4); LK(r0,r3,r1,r4,r2,5);
S5(r0,r3,r1,r4,r2); LK(r2,r0,r3,r4,r1,6);
S6(r2,r0,r3,r4,r1); LK(r3,r1,r0,r4,r2,7);
S7(r3,r1,r0,r4,r2); LK(r2,r0,r4,r3,r1,8);
S0(r2,r0,r4,r3,r1); LK(r4,r0,r3,r2,r1,9);
S1(r4,r0,r3,r2,r1); LK(r1,r3,r2,r4,r0,10);
S2(r1,r3,r2,r4,r0); LK(r0,r3,r1,r4,r2,11);
S3(r0,r3,r1,r4,r2); LK(r4,r2,r3,r0,r1,12);
S4(r4,r2,r3,r0,r1); LK(r2,r3,r0,r1,r4,13);
S5(r2,r3,r0,r1,r4); LK(r4,r2,r3,r1,r0,14);
S6(r4,r2,r3,r1,r0); LK(r3,r0,r2,r1,r4,15);
S7(r3,r0,r2,r1,r4); LK(r4,r2,r1,r3,r0,16);
S0(r4,r2,r1,r3,r0); LK(r1,r2,r3,r4,r0,17);
S1(r1,r2,r3,r4,r0); LK(r0,r3,r4,r1,r2,18);
S2(r0,r3,r4,r1,r2); LK(r2,r3,r0,r1,r4,19);
S3(r2,r3,r0,r1,r4); LK(r1,r4,r3,r2,r0,20);
S4(r1,r4,r3,r2,r0); LK(r4,r3,r2,r0,r1,21);
S5(r4,r3,r2,r0,r1); LK(r1,r4,r3,r0,r2,22);
S6(r1,r4,r3,r0,r2); LK(r3,r2,r4,r0,r1,23);
S7(r3,r2,r4,r0,r1); LK(r1,r4,r0,r3,r2,24);
S0(r1,r4,r0,r3,r2); LK(r0,r4,r3,r1,r2,25);
S1(r0,r4,r3,r1,r2); LK(r2,r3,r1,r0,r4,26);
S2(r2,r3,r1,r0,r4); LK(r4,r3,r2,r0,r1,27);
S3(r4,r3,r2,r0,r1); LK(r0,r1,r3,r4,r2,28);
S4(r0,r1,r3,r4,r2); LK(r1,r3,r4,r2,r0,29);
S5(r1,r3,r4,r2,r0); LK(r0,r1,r3,r2,r4,30);
S6(r0,r1,r3,r2,r4); LK(r3,r4,r1,r2,r0,31);
S7(r3,r4,r1,r2,r0); K(r0,r1,r2,r3,32);
d[0] = cpu_to_le32(r0);
d[1] = cpu_to_le32(r1);
d[2] = cpu_to_le32(r2);
d[3] = cpu_to_le32(r3);
}
static void serpent_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
{
struct serpent_ctx *ctx = crypto_tfm_ctx(tfm);
const u32
*k = ((struct serpent_ctx *)ctx)->expkey;
const __le32 *s = (const __le32 *)src;
__le32 *d = (__le32 *)dst;
u32 r0, r1, r2, r3, r4;
r0 = le32_to_cpu(s[0]);
r1 = le32_to_cpu(s[1]);
r2 = le32_to_cpu(s[2]);
r3 = le32_to_cpu(s[3]);
K(r0,r1,r2,r3,32);
SI7(r0,r1,r2,r3,r4); KL(r1,r3,r0,r4,r2,31);
SI6(r1,r3,r0,r4,r2); KL(r0,r2,r4,r1,r3,30);
SI5(r0,r2,r4,r1,r3); KL(r2,r3,r0,r4,r1,29);
SI4(r2,r3,r0,r4,r1); KL(r2,r0,r1,r4,r3,28);
SI3(r2,r0,r1,r4,r3); KL(r1,r2,r3,r4,r0,27);
SI2(r1,r2,r3,r4,r0); KL(r2,r0,r4,r3,r1,26);
SI1(r2,r0,r4,r3,r1); KL(r1,r0,r4,r3,r2,25);
SI0(r1,r0,r4,r3,r2); KL(r4,r2,r0,r1,r3,24);
SI7(r4,r2,r0,r1,r3); KL(r2,r1,r4,r3,r0,23);
SI6(r2,r1,r4,r3,r0); KL(r4,r0,r3,r2,r1,22);
SI5(r4,r0,r3,r2,r1); KL(r0,r1,r4,r3,r2,21);
SI4(r0,r1,r4,r3,r2); KL(r0,r4,r2,r3,r1,20);
SI3(r0,r4,r2,r3,r1); KL(r2,r0,r1,r3,r4,19);
SI2(r2,r0,r1,r3,r4); KL(r0,r4,r3,r1,r2,18);
SI1(r0,r4,r3,r1,r2); KL(r2,r4,r3,r1,r0,17);
SI0(r2,r4,r3,r1,r0); KL(r3,r0,r4,r2,r1,16);
SI7(r3,r0,r4,r2,r1); KL(r0,r2,r3,r1,r4,15);
SI6(r0,r2,r3,r1,r4); KL(r3,r4,r1,r0,r2,14);
SI5(r3,r4,r1,r0,r2); KL(r4,r2,r3,r1,r0,13);
SI4(r4,r2,r3,r1,r0); KL(r4,r3,r0,r1,r2,12);
SI3(r4,r3,r0,r1,r2); KL(r0,r4,r2,r1,r3,11);
SI2(r0,r4,r2,r1,r3); KL(r4,r3,r1,r2,r0,10);
SI1(r4,r3,r1,r2,r0); KL(r0,r3,r1,r2,r4,9);
SI0(r0,r3,r1,r2,r4); KL(r1,r4,r3,r0,r2,8);
SI7(r1,r4,r3,r0,r2); KL(r4,r0,r1,r2,r3,7);
SI6(r4,r0,r1,r2,r3); KL(r1,r3,r2,r4,r0,6);
SI5(r1,r3,r2,r4,r0); KL(r3,r0,r1,r2,r4,5);
SI4(r3,r0,r1,r2,r4); KL(r3,r1,r4,r2,r0,4);
SI3(r3,r1,r4,r2,r0); KL(r4,r3,r0,r2,r1,3);
SI2(r4,r3,r0,r2,r1); KL(r3,r1,r2,r0,r4,2);
SI1(r3,r1,r2,r0,r4); KL(r4,r1,r2,r0,r3,1);
SI0(r4,r1,r2,r0,r3); K(r2,r3,r1,r4,0);
d[0] = cpu_to_le32(r2);
d[1] = cpu_to_le32(r3);
d[2] = cpu_to_le32(r1);
d[3] = cpu_to_le32(r4);
}
static struct crypto_alg serpent_alg = {
.cra_name = "serpent",
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
.cra_blocksize = SERPENT_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct serpent_ctx),
.cra_alignmask = 3,
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(serpent_alg.cra_list),
.cra_u = { .cipher = {
.cia_min_keysize = SERPENT_MIN_KEY_SIZE,
.cia_max_keysize = SERPENT_MAX_KEY_SIZE,
.cia_setkey = serpent_setkey,
.cia_encrypt = serpent_encrypt,
.cia_decrypt = serpent_decrypt } }
};
static int tnepres_setkey(struct crypto_tfm *tfm, const u8 *key,
unsigned int keylen)
{
u8 rev_key[SERPENT_MAX_KEY_SIZE];
int i;
for (i = 0; i < keylen; ++i)
rev_key[keylen - i - 1] = key[i];
return serpent_setkey(tfm, rev_key, keylen);
}
static void tnepres_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
{
const u32 * const s = (const u32 * const)src;
u32 * const d = (u32 * const)dst;
u32 rs[4], rd[4];
rs[0] = swab32(s[3]);
rs[1] = swab32(s[2]);
rs[2] = swab32(s[1]);
rs[3] = swab32(s[0]);
serpent_encrypt(tfm, (u8 *)rd, (u8 *)rs);
d[0] = swab32(rd[3]);
d[1] = swab32(rd[2]);
d[2] = swab32(rd[1]);
d[3] = swab32(rd[0]);
}
static void tnepres_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
{
const u32 * const s = (const u32 * const)src;
u32 * const d = (u32 * const)dst;
u32 rs[4], rd[4];
rs[0] = swab32(s[3]);
rs[1] = swab32(s[2]);
rs[2] = swab32(s[1]);
rs[3] = swab32(s[0]);
serpent_decrypt(tfm, (u8 *)rd, (u8 *)rs);
d[0] = swab32(rd[3]);
d[1] = swab32(rd[2]);
d[2] = swab32(rd[1]);
d[3] = swab32(rd[0]);
}
static struct crypto_alg tnepres_alg = {
.cra_name = "tnepres",
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
.cra_blocksize = SERPENT_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct serpent_ctx),
.cra_alignmask = 3,
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(serpent_alg.cra_list),
.cra_u = { .cipher = {
.cia_min_keysize = SERPENT_MIN_KEY_SIZE,
.cia_max_keysize = SERPENT_MAX_KEY_SIZE,
.cia_setkey = tnepres_setkey,
.cia_encrypt = tnepres_encrypt,
.cia_decrypt = tnepres_decrypt } }
};
static int __init serpent_mod_init(void)
{
int ret = crypto_register_alg(&serpent_alg);
if (ret)
return ret;
ret = crypto_register_alg(&tnepres_alg);
if (ret)
crypto_unregister_alg(&serpent_alg);
return ret;
}
static void __exit serpent_mod_fini(void)
{
crypto_unregister_alg(&tnepres_alg);
crypto_unregister_alg(&serpent_alg);
}
module_init(serpent_mod_init);
module_exit(serpent_mod_fini);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Serpent and tnepres (kerneli compatible serpent reversed) Cipher Algorithm");
MODULE_AUTHOR("Dag Arne Osvik <osvik@ii.uib.no>");
MODULE_ALIAS("tnepres");
| gpl-2.0 |
parheliamm/android_kernel_nubia_nx403a | drivers/media/video/timblogiw.c | 4982 | 21326 | /*
* timblogiw.c timberdale FPGA LogiWin Video In driver
* Copyright (c) 2009-2010 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
/* Supports:
* Timberdale FPGA LogiWin Video In
*/
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/dmaengine.h>
#include <linux/scatterlist.h>
#include <linux/interrupt.h>
#include <linux/list.h>
#include <linux/i2c.h>
#include <linux/module.h>
#include <media/v4l2-ioctl.h>
#include <media/v4l2-device.h>
#include <media/videobuf-dma-contig.h>
#include <media/timb_video.h>
#define DRIVER_NAME "timb-video"
#define TIMBLOGIWIN_NAME "Timberdale Video-In"
#define TIMBLOGIW_VERSION_CODE 0x04
#define TIMBLOGIW_LINES_PER_DESC 44
#define TIMBLOGIW_MAX_VIDEO_MEM 16
#define TIMBLOGIW_HAS_DECODER(lw) (lw->pdata.encoder.module_name)
struct timblogiw {
struct video_device video_dev;
struct v4l2_device v4l2_dev; /* mutual exclusion */
struct mutex lock;
struct device *dev;
struct timb_video_platform_data pdata;
struct v4l2_subdev *sd_enc; /* encoder */
bool opened;
};
struct timblogiw_tvnorm {
v4l2_std_id std;
u16 width;
u16 height;
u8 fps;
};
struct timblogiw_fh {
struct videobuf_queue vb_vidq;
struct timblogiw_tvnorm const *cur_norm;
struct list_head capture;
struct dma_chan *chan;
spinlock_t queue_lock; /* mutual exclusion */
unsigned int frame_count;
};
struct timblogiw_buffer {
/* common v4l buffer stuff -- must be first */
struct videobuf_buffer vb;
struct scatterlist sg[16];
dma_cookie_t cookie;
struct timblogiw_fh *fh;
};
const struct timblogiw_tvnorm timblogiw_tvnorms[] = {
{
.std = V4L2_STD_PAL,
.width = 720,
.height = 576,
.fps = 25
},
{
.std = V4L2_STD_NTSC,
.width = 720,
.height = 480,
.fps = 30
}
};
static int timblogiw_bytes_per_line(const struct timblogiw_tvnorm *norm)
{
return norm->width * 2;
}
static int timblogiw_frame_size(const struct timblogiw_tvnorm *norm)
{
return norm->height * timblogiw_bytes_per_line(norm);
}
static const struct timblogiw_tvnorm *timblogiw_get_norm(const v4l2_std_id std)
{
int i;
for (i = 0; i < ARRAY_SIZE(timblogiw_tvnorms); i++)
if (timblogiw_tvnorms[i].std & std)
return timblogiw_tvnorms + i;
/* default to first element */
return timblogiw_tvnorms;
}
static void timblogiw_dma_cb(void *data)
{
struct timblogiw_buffer *buf = data;
struct timblogiw_fh *fh = buf->fh;
struct videobuf_buffer *vb = &buf->vb;
spin_lock(&fh->queue_lock);
/* mark the transfer done */
buf->cookie = -1;
fh->frame_count++;
if (vb->state != VIDEOBUF_ERROR) {
list_del(&vb->queue);
do_gettimeofday(&vb->ts);
vb->field_count = fh->frame_count * 2;
vb->state = VIDEOBUF_DONE;
wake_up(&vb->done);
}
if (!list_empty(&fh->capture)) {
vb = list_entry(fh->capture.next, struct videobuf_buffer,
queue);
vb->state = VIDEOBUF_ACTIVE;
}
spin_unlock(&fh->queue_lock);
}
static bool timblogiw_dma_filter_fn(struct dma_chan *chan, void *filter_param)
{
return chan->chan_id == (uintptr_t)filter_param;
}
/* IOCTL functions */
static int timblogiw_g_fmt(struct file *file, void *priv,
struct v4l2_format *format)
{
struct video_device *vdev = video_devdata(file);
struct timblogiw *lw = video_get_drvdata(vdev);
struct timblogiw_fh *fh = priv;
dev_dbg(&vdev->dev, "%s entry\n", __func__);
if (format->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
return -EINVAL;
mutex_lock(&lw->lock);
format->fmt.pix.width = fh->cur_norm->width;
format->fmt.pix.height = fh->cur_norm->height;
format->fmt.pix.pixelformat = V4L2_PIX_FMT_UYVY;
format->fmt.pix.bytesperline = timblogiw_bytes_per_line(fh->cur_norm);
format->fmt.pix.sizeimage = timblogiw_frame_size(fh->cur_norm);
format->fmt.pix.field = V4L2_FIELD_NONE;
mutex_unlock(&lw->lock);
return 0;
}
static int timblogiw_try_fmt(struct file *file, void *priv,
struct v4l2_format *format)
{
struct video_device *vdev = video_devdata(file);
struct v4l2_pix_format *pix = &format->fmt.pix;
dev_dbg(&vdev->dev,
"%s - width=%d, height=%d, pixelformat=%d, field=%d\n"
"bytes per line %d, size image: %d, colorspace: %d\n",
__func__,
pix->width, pix->height, pix->pixelformat, pix->field,
pix->bytesperline, pix->sizeimage, pix->colorspace);
if (format->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
return -EINVAL;
if (pix->field != V4L2_FIELD_NONE)
return -EINVAL;
if (pix->pixelformat != V4L2_PIX_FMT_UYVY)
return -EINVAL;
return 0;
}
static int timblogiw_s_fmt(struct file *file, void *priv,
struct v4l2_format *format)
{
struct video_device *vdev = video_devdata(file);
struct timblogiw *lw = video_get_drvdata(vdev);
struct timblogiw_fh *fh = priv;
struct v4l2_pix_format *pix = &format->fmt.pix;
int err;
mutex_lock(&lw->lock);
err = timblogiw_try_fmt(file, priv, format);
if (err)
goto out;
if (videobuf_queue_is_busy(&fh->vb_vidq)) {
dev_err(&vdev->dev, "%s queue busy\n", __func__);
err = -EBUSY;
goto out;
}
pix->width = fh->cur_norm->width;
pix->height = fh->cur_norm->height;
out:
mutex_unlock(&lw->lock);
return err;
}
static int timblogiw_querycap(struct file *file, void *priv,
struct v4l2_capability *cap)
{
struct video_device *vdev = video_devdata(file);
dev_dbg(&vdev->dev, "%s: Entry\n", __func__);
memset(cap, 0, sizeof(*cap));
strncpy(cap->card, TIMBLOGIWIN_NAME, sizeof(cap->card)-1);
strncpy(cap->driver, DRIVER_NAME, sizeof(cap->driver) - 1);
strlcpy(cap->bus_info, vdev->name, sizeof(cap->bus_info));
cap->version = TIMBLOGIW_VERSION_CODE;
cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING |
V4L2_CAP_READWRITE;
return 0;
}
static int timblogiw_enum_fmt(struct file *file, void *priv,
struct v4l2_fmtdesc *fmt)
{
struct video_device *vdev = video_devdata(file);
dev_dbg(&vdev->dev, "%s, index: %d\n", __func__, fmt->index);
if (fmt->index != 0)
return -EINVAL;
memset(fmt, 0, sizeof(*fmt));
fmt->index = 0;
fmt->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
strncpy(fmt->description, "4:2:2, packed, YUYV",
sizeof(fmt->description)-1);
fmt->pixelformat = V4L2_PIX_FMT_UYVY;
return 0;
}
static int timblogiw_g_parm(struct file *file, void *priv,
struct v4l2_streamparm *sp)
{
struct timblogiw_fh *fh = priv;
struct v4l2_captureparm *cp = &sp->parm.capture;
cp->capability = V4L2_CAP_TIMEPERFRAME;
cp->timeperframe.numerator = 1;
cp->timeperframe.denominator = fh->cur_norm->fps;
return 0;
}
static int timblogiw_reqbufs(struct file *file, void *priv,
struct v4l2_requestbuffers *rb)
{
struct video_device *vdev = video_devdata(file);
struct timblogiw_fh *fh = priv;
dev_dbg(&vdev->dev, "%s: entry\n", __func__);
return videobuf_reqbufs(&fh->vb_vidq, rb);
}
static int timblogiw_querybuf(struct file *file, void *priv,
struct v4l2_buffer *b)
{
struct video_device *vdev = video_devdata(file);
struct timblogiw_fh *fh = priv;
dev_dbg(&vdev->dev, "%s: entry\n", __func__);
return videobuf_querybuf(&fh->vb_vidq, b);
}
static int timblogiw_qbuf(struct file *file, void *priv, struct v4l2_buffer *b)
{
struct video_device *vdev = video_devdata(file);
struct timblogiw_fh *fh = priv;
dev_dbg(&vdev->dev, "%s: entry\n", __func__);
return videobuf_qbuf(&fh->vb_vidq, b);
}
static int timblogiw_dqbuf(struct file *file, void *priv,
struct v4l2_buffer *b)
{
struct video_device *vdev = video_devdata(file);
struct timblogiw_fh *fh = priv;
dev_dbg(&vdev->dev, "%s: entry\n", __func__);
return videobuf_dqbuf(&fh->vb_vidq, b, file->f_flags & O_NONBLOCK);
}
static int timblogiw_g_std(struct file *file, void *priv, v4l2_std_id *std)
{
struct video_device *vdev = video_devdata(file);
struct timblogiw_fh *fh = priv;
dev_dbg(&vdev->dev, "%s: entry\n", __func__);
*std = fh->cur_norm->std;
return 0;
}
static int timblogiw_s_std(struct file *file, void *priv, v4l2_std_id *std)
{
struct video_device *vdev = video_devdata(file);
struct timblogiw *lw = video_get_drvdata(vdev);
struct timblogiw_fh *fh = priv;
int err = 0;
dev_dbg(&vdev->dev, "%s: entry\n", __func__);
mutex_lock(&lw->lock);
if (TIMBLOGIW_HAS_DECODER(lw))
err = v4l2_subdev_call(lw->sd_enc, core, s_std, *std);
if (!err)
fh->cur_norm = timblogiw_get_norm(*std);
mutex_unlock(&lw->lock);
return err;
}
static int timblogiw_enuminput(struct file *file, void *priv,
struct v4l2_input *inp)
{
struct video_device *vdev = video_devdata(file);
int i;
dev_dbg(&vdev->dev, "%s: Entry\n", __func__);
if (inp->index != 0)
return -EINVAL;
inp->index = 0;
strncpy(inp->name, "Timb input 1", sizeof(inp->name) - 1);
inp->type = V4L2_INPUT_TYPE_CAMERA;
inp->std = 0;
for (i = 0; i < ARRAY_SIZE(timblogiw_tvnorms); i++)
inp->std |= timblogiw_tvnorms[i].std;
return 0;
}
static int timblogiw_g_input(struct file *file, void *priv,
unsigned int *input)
{
struct video_device *vdev = video_devdata(file);
dev_dbg(&vdev->dev, "%s: Entry\n", __func__);
*input = 0;
return 0;
}
static int timblogiw_s_input(struct file *file, void *priv, unsigned int input)
{
struct video_device *vdev = video_devdata(file);
dev_dbg(&vdev->dev, "%s: Entry\n", __func__);
if (input != 0)
return -EINVAL;
return 0;
}
static int timblogiw_streamon(struct file *file, void *priv, unsigned int type)
{
struct video_device *vdev = video_devdata(file);
struct timblogiw_fh *fh = priv;
dev_dbg(&vdev->dev, "%s: entry\n", __func__);
if (type != V4L2_BUF_TYPE_VIDEO_CAPTURE) {
dev_dbg(&vdev->dev, "%s - No capture device\n", __func__);
return -EINVAL;
}
fh->frame_count = 0;
return videobuf_streamon(&fh->vb_vidq);
}
static int timblogiw_streamoff(struct file *file, void *priv,
unsigned int type)
{
struct video_device *vdev = video_devdata(file);
struct timblogiw_fh *fh = priv;
dev_dbg(&vdev->dev, "%s entry\n", __func__);
if (type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
return -EINVAL;
return videobuf_streamoff(&fh->vb_vidq);
}
static int timblogiw_querystd(struct file *file, void *priv, v4l2_std_id *std)
{
struct video_device *vdev = video_devdata(file);
struct timblogiw *lw = video_get_drvdata(vdev);
struct timblogiw_fh *fh = priv;
dev_dbg(&vdev->dev, "%s entry\n", __func__);
if (TIMBLOGIW_HAS_DECODER(lw))
return v4l2_subdev_call(lw->sd_enc, video, querystd, std);
else {
*std = fh->cur_norm->std;
return 0;
}
}
static int timblogiw_enum_framesizes(struct file *file, void *priv,
struct v4l2_frmsizeenum *fsize)
{
struct video_device *vdev = video_devdata(file);
struct timblogiw_fh *fh = priv;
dev_dbg(&vdev->dev, "%s - index: %d, format: %d\n", __func__,
fsize->index, fsize->pixel_format);
if ((fsize->index != 0) ||
(fsize->pixel_format != V4L2_PIX_FMT_UYVY))
return -EINVAL;
fsize->type = V4L2_FRMSIZE_TYPE_DISCRETE;
fsize->discrete.width = fh->cur_norm->width;
fsize->discrete.height = fh->cur_norm->height;
return 0;
}
/* Video buffer functions */
static int buffer_setup(struct videobuf_queue *vq, unsigned int *count,
unsigned int *size)
{
struct timblogiw_fh *fh = vq->priv_data;
*size = timblogiw_frame_size(fh->cur_norm);
if (!*count)
*count = 32;
while (*size * *count > TIMBLOGIW_MAX_VIDEO_MEM * 1024 * 1024)
(*count)--;
return 0;
}
static int buffer_prepare(struct videobuf_queue *vq, struct videobuf_buffer *vb,
enum v4l2_field field)
{
struct timblogiw_fh *fh = vq->priv_data;
struct timblogiw_buffer *buf = container_of(vb, struct timblogiw_buffer,
vb);
unsigned int data_size = timblogiw_frame_size(fh->cur_norm);
int err = 0;
if (vb->baddr && vb->bsize < data_size)
/* User provided buffer, but it is too small */
return -ENOMEM;
vb->size = data_size;
vb->width = fh->cur_norm->width;
vb->height = fh->cur_norm->height;
vb->field = field;
if (vb->state == VIDEOBUF_NEEDS_INIT) {
int i;
unsigned int size;
unsigned int bytes_per_desc = TIMBLOGIW_LINES_PER_DESC *
timblogiw_bytes_per_line(fh->cur_norm);
dma_addr_t addr;
sg_init_table(buf->sg, ARRAY_SIZE(buf->sg));
err = videobuf_iolock(vq, vb, NULL);
if (err)
goto err;
addr = videobuf_to_dma_contig(vb);
for (i = 0, size = 0; size < data_size; i++) {
sg_dma_address(buf->sg + i) = addr + size;
size += bytes_per_desc;
sg_dma_len(buf->sg + i) = (size > data_size) ?
(bytes_per_desc - (size - data_size)) :
bytes_per_desc;
}
vb->state = VIDEOBUF_PREPARED;
buf->cookie = -1;
buf->fh = fh;
}
return 0;
err:
videobuf_dma_contig_free(vq, vb);
vb->state = VIDEOBUF_NEEDS_INIT;
return err;
}
static void buffer_queue(struct videobuf_queue *vq, struct videobuf_buffer *vb)
{
struct timblogiw_fh *fh = vq->priv_data;
struct timblogiw_buffer *buf = container_of(vb, struct timblogiw_buffer,
vb);
struct dma_async_tx_descriptor *desc;
int sg_elems;
int bytes_per_desc = TIMBLOGIW_LINES_PER_DESC *
timblogiw_bytes_per_line(fh->cur_norm);
sg_elems = timblogiw_frame_size(fh->cur_norm) / bytes_per_desc;
sg_elems +=
(timblogiw_frame_size(fh->cur_norm) % bytes_per_desc) ? 1 : 0;
if (list_empty(&fh->capture))
vb->state = VIDEOBUF_ACTIVE;
else
vb->state = VIDEOBUF_QUEUED;
list_add_tail(&vb->queue, &fh->capture);
spin_unlock_irq(&fh->queue_lock);
desc = dmaengine_prep_slave_sg(fh->chan,
buf->sg, sg_elems, DMA_DEV_TO_MEM,
DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_SRC_UNMAP);
if (!desc) {
spin_lock_irq(&fh->queue_lock);
list_del_init(&vb->queue);
vb->state = VIDEOBUF_PREPARED;
return;
}
desc->callback_param = buf;
desc->callback = timblogiw_dma_cb;
buf->cookie = desc->tx_submit(desc);
spin_lock_irq(&fh->queue_lock);
}
static void buffer_release(struct videobuf_queue *vq,
struct videobuf_buffer *vb)
{
struct timblogiw_fh *fh = vq->priv_data;
struct timblogiw_buffer *buf = container_of(vb, struct timblogiw_buffer,
vb);
videobuf_waiton(vq, vb, 0, 0);
if (buf->cookie >= 0)
dma_sync_wait(fh->chan, buf->cookie);
videobuf_dma_contig_free(vq, vb);
vb->state = VIDEOBUF_NEEDS_INIT;
}
static struct videobuf_queue_ops timblogiw_video_qops = {
.buf_setup = buffer_setup,
.buf_prepare = buffer_prepare,
.buf_queue = buffer_queue,
.buf_release = buffer_release,
};
/* Device Operations functions */
static int timblogiw_open(struct file *file)
{
struct video_device *vdev = video_devdata(file);
struct timblogiw *lw = video_get_drvdata(vdev);
struct timblogiw_fh *fh;
v4l2_std_id std;
dma_cap_mask_t mask;
int err = 0;
dev_dbg(&vdev->dev, "%s: entry\n", __func__);
mutex_lock(&lw->lock);
if (lw->opened) {
err = -EBUSY;
goto out;
}
if (TIMBLOGIW_HAS_DECODER(lw) && !lw->sd_enc) {
struct i2c_adapter *adapt;
/* find the video decoder */
adapt = i2c_get_adapter(lw->pdata.i2c_adapter);
if (!adapt) {
dev_err(&vdev->dev, "No I2C bus #%d\n",
lw->pdata.i2c_adapter);
err = -ENODEV;
goto out;
}
/* now find the encoder */
lw->sd_enc = v4l2_i2c_new_subdev_board(&lw->v4l2_dev, adapt,
lw->pdata.encoder.info, NULL);
i2c_put_adapter(adapt);
if (!lw->sd_enc) {
dev_err(&vdev->dev, "Failed to get encoder: %s\n",
lw->pdata.encoder.module_name);
err = -ENODEV;
goto out;
}
}
fh = kzalloc(sizeof(*fh), GFP_KERNEL);
if (!fh) {
err = -ENOMEM;
goto out;
}
fh->cur_norm = timblogiw_tvnorms;
timblogiw_querystd(file, fh, &std);
fh->cur_norm = timblogiw_get_norm(std);
INIT_LIST_HEAD(&fh->capture);
spin_lock_init(&fh->queue_lock);
dma_cap_zero(mask);
dma_cap_set(DMA_SLAVE, mask);
dma_cap_set(DMA_PRIVATE, mask);
/* find the DMA channel */
fh->chan = dma_request_channel(mask, timblogiw_dma_filter_fn,
(void *)(uintptr_t)lw->pdata.dma_channel);
if (!fh->chan) {
dev_err(&vdev->dev, "Failed to get DMA channel\n");
kfree(fh);
err = -ENODEV;
goto out;
}
file->private_data = fh;
videobuf_queue_dma_contig_init(&fh->vb_vidq,
&timblogiw_video_qops, lw->dev, &fh->queue_lock,
V4L2_BUF_TYPE_VIDEO_CAPTURE, V4L2_FIELD_NONE,
sizeof(struct timblogiw_buffer), fh, NULL);
lw->opened = true;
out:
mutex_unlock(&lw->lock);
return err;
}
static int timblogiw_close(struct file *file)
{
struct video_device *vdev = video_devdata(file);
struct timblogiw *lw = video_get_drvdata(vdev);
struct timblogiw_fh *fh = file->private_data;
dev_dbg(&vdev->dev, "%s: Entry\n", __func__);
videobuf_stop(&fh->vb_vidq);
videobuf_mmap_free(&fh->vb_vidq);
dma_release_channel(fh->chan);
kfree(fh);
mutex_lock(&lw->lock);
lw->opened = false;
mutex_unlock(&lw->lock);
return 0;
}
static ssize_t timblogiw_read(struct file *file, char __user *data,
size_t count, loff_t *ppos)
{
struct video_device *vdev = video_devdata(file);
struct timblogiw_fh *fh = file->private_data;
dev_dbg(&vdev->dev, "%s: entry\n", __func__);
return videobuf_read_stream(&fh->vb_vidq, data, count, ppos, 0,
file->f_flags & O_NONBLOCK);
}
static unsigned int timblogiw_poll(struct file *file,
struct poll_table_struct *wait)
{
struct video_device *vdev = video_devdata(file);
struct timblogiw_fh *fh = file->private_data;
dev_dbg(&vdev->dev, "%s: entry\n", __func__);
return videobuf_poll_stream(file, &fh->vb_vidq, wait);
}
static int timblogiw_mmap(struct file *file, struct vm_area_struct *vma)
{
struct video_device *vdev = video_devdata(file);
struct timblogiw_fh *fh = file->private_data;
dev_dbg(&vdev->dev, "%s: entry\n", __func__);
return videobuf_mmap_mapper(&fh->vb_vidq, vma);
}
/* Platform device functions */
static __devinitconst struct v4l2_ioctl_ops timblogiw_ioctl_ops = {
.vidioc_querycap = timblogiw_querycap,
.vidioc_enum_fmt_vid_cap = timblogiw_enum_fmt,
.vidioc_g_fmt_vid_cap = timblogiw_g_fmt,
.vidioc_try_fmt_vid_cap = timblogiw_try_fmt,
.vidioc_s_fmt_vid_cap = timblogiw_s_fmt,
.vidioc_g_parm = timblogiw_g_parm,
.vidioc_reqbufs = timblogiw_reqbufs,
.vidioc_querybuf = timblogiw_querybuf,
.vidioc_qbuf = timblogiw_qbuf,
.vidioc_dqbuf = timblogiw_dqbuf,
.vidioc_g_std = timblogiw_g_std,
.vidioc_s_std = timblogiw_s_std,
.vidioc_enum_input = timblogiw_enuminput,
.vidioc_g_input = timblogiw_g_input,
.vidioc_s_input = timblogiw_s_input,
.vidioc_streamon = timblogiw_streamon,
.vidioc_streamoff = timblogiw_streamoff,
.vidioc_querystd = timblogiw_querystd,
.vidioc_enum_framesizes = timblogiw_enum_framesizes,
};
static __devinitconst struct v4l2_file_operations timblogiw_fops = {
.owner = THIS_MODULE,
.open = timblogiw_open,
.release = timblogiw_close,
.unlocked_ioctl = video_ioctl2, /* V4L2 ioctl handler */
.mmap = timblogiw_mmap,
.read = timblogiw_read,
.poll = timblogiw_poll,
};
static __devinitconst struct video_device timblogiw_template = {
.name = TIMBLOGIWIN_NAME,
.fops = &timblogiw_fops,
.ioctl_ops = &timblogiw_ioctl_ops,
.release = video_device_release_empty,
.minor = -1,
.tvnorms = V4L2_STD_PAL | V4L2_STD_NTSC
};
static int __devinit timblogiw_probe(struct platform_device *pdev)
{
int err;
struct timblogiw *lw = NULL;
struct timb_video_platform_data *pdata = pdev->dev.platform_data;
if (!pdata) {
dev_err(&pdev->dev, "No platform data\n");
err = -EINVAL;
goto err;
}
if (!pdata->encoder.module_name)
dev_info(&pdev->dev, "Running without decoder\n");
lw = kzalloc(sizeof(*lw), GFP_KERNEL);
if (!lw) {
err = -ENOMEM;
goto err;
}
if (pdev->dev.parent)
lw->dev = pdev->dev.parent;
else
lw->dev = &pdev->dev;
memcpy(&lw->pdata, pdata, sizeof(lw->pdata));
mutex_init(&lw->lock);
lw->video_dev = timblogiw_template;
strlcpy(lw->v4l2_dev.name, DRIVER_NAME, sizeof(lw->v4l2_dev.name));
err = v4l2_device_register(NULL, &lw->v4l2_dev);
if (err)
goto err_register;
lw->video_dev.v4l2_dev = &lw->v4l2_dev;
platform_set_drvdata(pdev, lw);
video_set_drvdata(&lw->video_dev, lw);
err = video_register_device(&lw->video_dev, VFL_TYPE_GRABBER, 0);
if (err) {
dev_err(&pdev->dev, "Error reg video: %d\n", err);
goto err_request;
}
return 0;
err_request:
platform_set_drvdata(pdev, NULL);
v4l2_device_unregister(&lw->v4l2_dev);
err_register:
kfree(lw);
err:
dev_err(&pdev->dev, "Failed to register: %d\n", err);
return err;
}
static int __devexit timblogiw_remove(struct platform_device *pdev)
{
struct timblogiw *lw = platform_get_drvdata(pdev);
video_unregister_device(&lw->video_dev);
v4l2_device_unregister(&lw->v4l2_dev);
kfree(lw);
platform_set_drvdata(pdev, NULL);
return 0;
}
static struct platform_driver timblogiw_platform_driver = {
.driver = {
.name = DRIVER_NAME,
.owner = THIS_MODULE,
},
.probe = timblogiw_probe,
.remove = __devexit_p(timblogiw_remove),
};
module_platform_driver(timblogiw_platform_driver);
MODULE_DESCRIPTION(TIMBLOGIWIN_NAME);
MODULE_AUTHOR("Pelagicore AB <info@pelagicore.com>");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("platform:"DRIVER_NAME);
| gpl-2.0 |
dmitriy103/amethyst-bravo-kernel | drivers/watchdog/pika_wdt.c | 4982 | 6967 | /*
* PIKA FPGA based Watchdog Timer
*
* Copyright (c) 2008 PIKA Technologies
* Sean MacLennan <smaclennan@pikatech.com>
*/
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/fs.h>
#include <linux/miscdevice.h>
#include <linux/watchdog.h>
#include <linux/reboot.h>
#include <linux/jiffies.h>
#include <linux/timer.h>
#include <linux/bitops.h>
#include <linux/uaccess.h>
#include <linux/io.h>
#include <linux/of_platform.h>
#define DRV_NAME "PIKA-WDT"
#define PFX DRV_NAME ": "
/* Hardware timeout in seconds */
#define WDT_HW_TIMEOUT 2
/* Timer heartbeat (500ms) */
#define WDT_TIMEOUT (HZ/2)
/* User land timeout */
#define WDT_HEARTBEAT 15
static int heartbeat = WDT_HEARTBEAT;
module_param(heartbeat, int, 0);
MODULE_PARM_DESC(heartbeat, "Watchdog heartbeats in seconds. "
"(default = " __MODULE_STRING(WDT_HEARTBEAT) ")");
static int nowayout = WATCHDOG_NOWAYOUT;
module_param(nowayout, int, 0);
MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started "
"(default=" __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
static struct {
void __iomem *fpga;
unsigned long next_heartbeat; /* the next_heartbeat for the timer */
unsigned long open;
char expect_close;
int bootstatus;
struct timer_list timer; /* The timer that pings the watchdog */
} pikawdt_private;
static struct watchdog_info ident = {
.identity = DRV_NAME,
.options = WDIOF_CARDRESET |
WDIOF_SETTIMEOUT |
WDIOF_KEEPALIVEPING |
WDIOF_MAGICCLOSE,
};
/*
* Reload the watchdog timer. (ie, pat the watchdog)
*/
static inline void pikawdt_reset(void)
{
/* -- FPGA: Reset Control Register (32bit R/W) (Offset: 0x14) --
* Bit 7, WTCHDG_EN: When set to 1, the watchdog timer is enabled.
* Once enabled, it cannot be disabled. The watchdog can be
* kicked by performing any write access to the reset
* control register (this register).
* Bit 8-11, WTCHDG_TIMEOUT_SEC: Sets the watchdog timeout value in
* seconds. Valid ranges are 1 to 15 seconds. The value can
* be modified dynamically.
*/
unsigned reset = in_be32(pikawdt_private.fpga + 0x14);
/* enable with max timeout - 15 seconds */
reset |= (1 << 7) + (WDT_HW_TIMEOUT << 8);
out_be32(pikawdt_private.fpga + 0x14, reset);
}
/*
* Timer tick
*/
static void pikawdt_ping(unsigned long data)
{
if (time_before(jiffies, pikawdt_private.next_heartbeat) ||
(!nowayout && !pikawdt_private.open)) {
pikawdt_reset();
mod_timer(&pikawdt_private.timer, jiffies + WDT_TIMEOUT);
} else
printk(KERN_CRIT PFX "I will reset your machine !\n");
}
static void pikawdt_keepalive(void)
{
pikawdt_private.next_heartbeat = jiffies + heartbeat * HZ;
}
static void pikawdt_start(void)
{
pikawdt_keepalive();
mod_timer(&pikawdt_private.timer, jiffies + WDT_TIMEOUT);
}
/*
* Watchdog device is opened, and watchdog starts running.
*/
static int pikawdt_open(struct inode *inode, struct file *file)
{
/* /dev/watchdog can only be opened once */
if (test_and_set_bit(0, &pikawdt_private.open))
return -EBUSY;
pikawdt_start();
return nonseekable_open(inode, file);
}
/*
* Close the watchdog device.
*/
static int pikawdt_release(struct inode *inode, struct file *file)
{
/* stop internal ping */
if (!pikawdt_private.expect_close)
del_timer(&pikawdt_private.timer);
clear_bit(0, &pikawdt_private.open);
pikawdt_private.expect_close = 0;
return 0;
}
/*
* Pat the watchdog whenever device is written to.
*/
static ssize_t pikawdt_write(struct file *file, const char __user *data,
size_t len, loff_t *ppos)
{
if (!len)
return 0;
/* Scan for magic character */
if (!nowayout) {
size_t i;
pikawdt_private.expect_close = 0;
for (i = 0; i < len; i++) {
char c;
if (get_user(c, data + i))
return -EFAULT;
if (c == 'V') {
pikawdt_private.expect_close = 42;
break;
}
}
}
pikawdt_keepalive();
return len;
}
/*
* Handle commands from user-space.
*/
static long pikawdt_ioctl(struct file *file,
unsigned int cmd, unsigned long arg)
{
void __user *argp = (void __user *)arg;
int __user *p = argp;
int new_value;
switch (cmd) {
case WDIOC_GETSUPPORT:
return copy_to_user(argp, &ident, sizeof(ident)) ? -EFAULT : 0;
case WDIOC_GETSTATUS:
return put_user(0, p);
case WDIOC_GETBOOTSTATUS:
return put_user(pikawdt_private.bootstatus, p);
case WDIOC_KEEPALIVE:
pikawdt_keepalive();
return 0;
case WDIOC_SETTIMEOUT:
if (get_user(new_value, p))
return -EFAULT;
heartbeat = new_value;
pikawdt_keepalive();
return put_user(new_value, p); /* return current value */
case WDIOC_GETTIMEOUT:
return put_user(heartbeat, p);
}
return -ENOTTY;
}
static const struct file_operations pikawdt_fops = {
.owner = THIS_MODULE,
.llseek = no_llseek,
.open = pikawdt_open,
.release = pikawdt_release,
.write = pikawdt_write,
.unlocked_ioctl = pikawdt_ioctl,
};
static struct miscdevice pikawdt_miscdev = {
.minor = WATCHDOG_MINOR,
.name = "watchdog",
.fops = &pikawdt_fops,
};
static int __init pikawdt_init(void)
{
struct device_node *np;
void __iomem *fpga;
static u32 post1;
int ret;
np = of_find_compatible_node(NULL, NULL, "pika,fpga");
if (np == NULL) {
printk(KERN_ERR PFX "Unable to find fpga.\n");
return -ENOENT;
}
pikawdt_private.fpga = of_iomap(np, 0);
of_node_put(np);
if (pikawdt_private.fpga == NULL) {
printk(KERN_ERR PFX "Unable to map fpga.\n");
return -ENOMEM;
}
ident.firmware_version = in_be32(pikawdt_private.fpga + 0x1c) & 0xffff;
/* POST information is in the sd area. */
np = of_find_compatible_node(NULL, NULL, "pika,fpga-sd");
if (np == NULL) {
printk(KERN_ERR PFX "Unable to find fpga-sd.\n");
ret = -ENOENT;
goto out;
}
fpga = of_iomap(np, 0);
of_node_put(np);
if (fpga == NULL) {
printk(KERN_ERR PFX "Unable to map fpga-sd.\n");
ret = -ENOMEM;
goto out;
}
/* -- FPGA: POST Test Results Register 1 (32bit R/W) (Offset: 0x4040) --
* Bit 31, WDOG: Set to 1 when the last reset was caused by a watchdog
* timeout.
*/
post1 = in_be32(fpga + 0x40);
if (post1 & 0x80000000)
pikawdt_private.bootstatus = WDIOF_CARDRESET;
iounmap(fpga);
setup_timer(&pikawdt_private.timer, pikawdt_ping, 0);
ret = misc_register(&pikawdt_miscdev);
if (ret) {
printk(KERN_ERR PFX "Unable to register miscdev.\n");
goto out;
}
printk(KERN_INFO PFX "initialized. heartbeat=%d sec (nowayout=%d)\n",
heartbeat, nowayout);
return 0;
out:
iounmap(pikawdt_private.fpga);
return ret;
}
static void __exit pikawdt_exit(void)
{
misc_deregister(&pikawdt_miscdev);
iounmap(pikawdt_private.fpga);
}
module_init(pikawdt_init);
module_exit(pikawdt_exit);
MODULE_AUTHOR("Sean MacLennan <smaclennan@pikatech.com>");
MODULE_DESCRIPTION("PIKA FPGA based Watchdog Timer");
MODULE_LICENSE("GPL");
MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
| gpl-2.0 |
deafnote/android_kernel_huawei_u88251 | crypto/serpent.c | 4982 | 20267 | /*
* Cryptographic API.
*
* Serpent Cipher Algorithm.
*
* Copyright (C) 2002 Dag Arne Osvik <osvik@ii.uib.no>
* 2003 Herbert Valerio Riedel <hvr@gnu.org>
*
* Added tnepres support: Ruben Jesus Garcia Hernandez <ruben@ugr.es>, 18.10.2004
* Based on code by hvr
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/errno.h>
#include <asm/byteorder.h>
#include <linux/crypto.h>
#include <linux/types.h>
/* Key is padded to the maximum of 256 bits before round key generation.
* Any key length <= 256 bits (32 bytes) is allowed by the algorithm.
*/
#define SERPENT_MIN_KEY_SIZE 0
#define SERPENT_MAX_KEY_SIZE 32
#define SERPENT_EXPKEY_WORDS 132
#define SERPENT_BLOCK_SIZE 16
#define PHI 0x9e3779b9UL
#define keyiter(a,b,c,d,i,j) \
b ^= d; b ^= c; b ^= a; b ^= PHI ^ i; b = rol32(b,11); k[j] = b;
#define loadkeys(x0,x1,x2,x3,i) \
x0=k[i]; x1=k[i+1]; x2=k[i+2]; x3=k[i+3];
#define storekeys(x0,x1,x2,x3,i) \
k[i]=x0; k[i+1]=x1; k[i+2]=x2; k[i+3]=x3;
#define K(x0,x1,x2,x3,i) \
x3 ^= k[4*(i)+3]; x2 ^= k[4*(i)+2]; \
x1 ^= k[4*(i)+1]; x0 ^= k[4*(i)+0];
#define LK(x0,x1,x2,x3,x4,i) \
x0=rol32(x0,13);\
x2=rol32(x2,3); x1 ^= x0; x4 = x0 << 3; \
x3 ^= x2; x1 ^= x2; \
x1=rol32(x1,1); x3 ^= x4; \
x3=rol32(x3,7); x4 = x1; \
x0 ^= x1; x4 <<= 7; x2 ^= x3; \
x0 ^= x3; x2 ^= x4; x3 ^= k[4*i+3]; \
x1 ^= k[4*i+1]; x0=rol32(x0,5); x2=rol32(x2,22);\
x0 ^= k[4*i+0]; x2 ^= k[4*i+2];
#define KL(x0,x1,x2,x3,x4,i) \
x0 ^= k[4*i+0]; x1 ^= k[4*i+1]; x2 ^= k[4*i+2]; \
x3 ^= k[4*i+3]; x0=ror32(x0,5); x2=ror32(x2,22);\
x4 = x1; x2 ^= x3; x0 ^= x3; \
x4 <<= 7; x0 ^= x1; x1=ror32(x1,1); \
x2 ^= x4; x3=ror32(x3,7); x4 = x0 << 3; \
x1 ^= x0; x3 ^= x4; x0=ror32(x0,13);\
x1 ^= x2; x3 ^= x2; x2=ror32(x2,3);
#define S0(x0,x1,x2,x3,x4) \
x4 = x3; \
x3 |= x0; x0 ^= x4; x4 ^= x2; \
x4 =~ x4; x3 ^= x1; x1 &= x0; \
x1 ^= x4; x2 ^= x0; x0 ^= x3; \
x4 |= x0; x0 ^= x2; x2 &= x1; \
x3 ^= x2; x1 =~ x1; x2 ^= x4; \
x1 ^= x2;
#define S1(x0,x1,x2,x3,x4) \
x4 = x1; \
x1 ^= x0; x0 ^= x3; x3 =~ x3; \
x4 &= x1; x0 |= x1; x3 ^= x2; \
x0 ^= x3; x1 ^= x3; x3 ^= x4; \
x1 |= x4; x4 ^= x2; x2 &= x0; \
x2 ^= x1; x1 |= x0; x0 =~ x0; \
x0 ^= x2; x4 ^= x1;
#define S2(x0,x1,x2,x3,x4) \
x3 =~ x3; \
x1 ^= x0; x4 = x0; x0 &= x2; \
x0 ^= x3; x3 |= x4; x2 ^= x1; \
x3 ^= x1; x1 &= x0; x0 ^= x2; \
x2 &= x3; x3 |= x1; x0 =~ x0; \
x3 ^= x0; x4 ^= x0; x0 ^= x2; \
x1 |= x2;
#define S3(x0,x1,x2,x3,x4) \
x4 = x1; \
x1 ^= x3; x3 |= x0; x4 &= x0; \
x0 ^= x2; x2 ^= x1; x1 &= x3; \
x2 ^= x3; x0 |= x4; x4 ^= x3; \
x1 ^= x0; x0 &= x3; x3 &= x4; \
x3 ^= x2; x4 |= x1; x2 &= x1; \
x4 ^= x3; x0 ^= x3; x3 ^= x2;
#define S4(x0,x1,x2,x3,x4) \
x4 = x3; \
x3 &= x0; x0 ^= x4; \
x3 ^= x2; x2 |= x4; x0 ^= x1; \
x4 ^= x3; x2 |= x0; \
x2 ^= x1; x1 &= x0; \
x1 ^= x4; x4 &= x2; x2 ^= x3; \
x4 ^= x0; x3 |= x1; x1 =~ x1; \
x3 ^= x0;
#define S5(x0,x1,x2,x3,x4) \
x4 = x1; x1 |= x0; \
x2 ^= x1; x3 =~ x3; x4 ^= x0; \
x0 ^= x2; x1 &= x4; x4 |= x3; \
x4 ^= x0; x0 &= x3; x1 ^= x3; \
x3 ^= x2; x0 ^= x1; x2 &= x4; \
x1 ^= x2; x2 &= x0; \
x3 ^= x2;
#define S6(x0,x1,x2,x3,x4) \
x4 = x1; \
x3 ^= x0; x1 ^= x2; x2 ^= x0; \
x0 &= x3; x1 |= x3; x4 =~ x4; \
x0 ^= x1; x1 ^= x2; \
x3 ^= x4; x4 ^= x0; x2 &= x0; \
x4 ^= x1; x2 ^= x3; x3 &= x1; \
x3 ^= x0; x1 ^= x2;
#define S7(x0,x1,x2,x3,x4) \
x1 =~ x1; \
x4 = x1; x0 =~ x0; x1 &= x2; \
x1 ^= x3; x3 |= x4; x4 ^= x2; \
x2 ^= x3; x3 ^= x0; x0 |= x1; \
x2 &= x0; x0 ^= x4; x4 ^= x3; \
x3 &= x0; x4 ^= x1; \
x2 ^= x4; x3 ^= x1; x4 |= x0; \
x4 ^= x1;
#define SI0(x0,x1,x2,x3,x4) \
x4 = x3; x1 ^= x0; \
x3 |= x1; x4 ^= x1; x0 =~ x0; \
x2 ^= x3; x3 ^= x0; x0 &= x1; \
x0 ^= x2; x2 &= x3; x3 ^= x4; \
x2 ^= x3; x1 ^= x3; x3 &= x0; \
x1 ^= x0; x0 ^= x2; x4 ^= x3;
#define SI1(x0,x1,x2,x3,x4) \
x1 ^= x3; x4 = x0; \
x0 ^= x2; x2 =~ x2; x4 |= x1; \
x4 ^= x3; x3 &= x1; x1 ^= x2; \
x2 &= x4; x4 ^= x1; x1 |= x3; \
x3 ^= x0; x2 ^= x0; x0 |= x4; \
x2 ^= x4; x1 ^= x0; \
x4 ^= x1;
#define SI2(x0,x1,x2,x3,x4) \
x2 ^= x1; x4 = x3; x3 =~ x3; \
x3 |= x2; x2 ^= x4; x4 ^= x0; \
x3 ^= x1; x1 |= x2; x2 ^= x0; \
x1 ^= x4; x4 |= x3; x2 ^= x3; \
x4 ^= x2; x2 &= x1; \
x2 ^= x3; x3 ^= x4; x4 ^= x0;
#define SI3(x0,x1,x2,x3,x4) \
x2 ^= x1; \
x4 = x1; x1 &= x2; \
x1 ^= x0; x0 |= x4; x4 ^= x3; \
x0 ^= x3; x3 |= x1; x1 ^= x2; \
x1 ^= x3; x0 ^= x2; x2 ^= x3; \
x3 &= x1; x1 ^= x0; x0 &= x2; \
x4 ^= x3; x3 ^= x0; x0 ^= x1;
#define SI4(x0,x1,x2,x3,x4) \
x2 ^= x3; x4 = x0; x0 &= x1; \
x0 ^= x2; x2 |= x3; x4 =~ x4; \
x1 ^= x0; x0 ^= x2; x2 &= x4; \
x2 ^= x0; x0 |= x4; \
x0 ^= x3; x3 &= x2; \
x4 ^= x3; x3 ^= x1; x1 &= x0; \
x4 ^= x1; x0 ^= x3;
#define SI5(x0,x1,x2,x3,x4) \
x4 = x1; x1 |= x2; \
x2 ^= x4; x1 ^= x3; x3 &= x4; \
x2 ^= x3; x3 |= x0; x0 =~ x0; \
x3 ^= x2; x2 |= x0; x4 ^= x1; \
x2 ^= x4; x4 &= x0; x0 ^= x1; \
x1 ^= x3; x0 &= x2; x2 ^= x3; \
x0 ^= x2; x2 ^= x4; x4 ^= x3;
#define SI6(x0,x1,x2,x3,x4) \
x0 ^= x2; \
x4 = x0; x0 &= x3; x2 ^= x3; \
x0 ^= x2; x3 ^= x1; x2 |= x4; \
x2 ^= x3; x3 &= x0; x0 =~ x0; \
x3 ^= x1; x1 &= x2; x4 ^= x0; \
x3 ^= x4; x4 ^= x2; x0 ^= x1; \
x2 ^= x0;
#define SI7(x0,x1,x2,x3,x4) \
x4 = x3; x3 &= x0; x0 ^= x2; \
x2 |= x4; x4 ^= x1; x0 =~ x0; \
x1 |= x3; x4 ^= x0; x0 &= x2; \
x0 ^= x1; x1 &= x2; x3 ^= x2; \
x4 ^= x3; x2 &= x3; x3 |= x0; \
x1 ^= x4; x3 ^= x4; x4 &= x0; \
x4 ^= x2;
struct serpent_ctx {
u32 expkey[SERPENT_EXPKEY_WORDS];
};
static int serpent_setkey(struct crypto_tfm *tfm, const u8 *key,
unsigned int keylen)
{
struct serpent_ctx *ctx = crypto_tfm_ctx(tfm);
u32 *k = ctx->expkey;
u8 *k8 = (u8 *)k;
u32 r0,r1,r2,r3,r4;
int i;
/* Copy key, add padding */
for (i = 0; i < keylen; ++i)
k8[i] = key[i];
if (i < SERPENT_MAX_KEY_SIZE)
k8[i++] = 1;
while (i < SERPENT_MAX_KEY_SIZE)
k8[i++] = 0;
/* Expand key using polynomial */
r0 = le32_to_cpu(k[3]);
r1 = le32_to_cpu(k[4]);
r2 = le32_to_cpu(k[5]);
r3 = le32_to_cpu(k[6]);
r4 = le32_to_cpu(k[7]);
keyiter(le32_to_cpu(k[0]),r0,r4,r2,0,0);
keyiter(le32_to_cpu(k[1]),r1,r0,r3,1,1);
keyiter(le32_to_cpu(k[2]),r2,r1,r4,2,2);
keyiter(le32_to_cpu(k[3]),r3,r2,r0,3,3);
keyiter(le32_to_cpu(k[4]),r4,r3,r1,4,4);
keyiter(le32_to_cpu(k[5]),r0,r4,r2,5,5);
keyiter(le32_to_cpu(k[6]),r1,r0,r3,6,6);
keyiter(le32_to_cpu(k[7]),r2,r1,r4,7,7);
keyiter(k[ 0],r3,r2,r0, 8, 8); keyiter(k[ 1],r4,r3,r1, 9, 9);
keyiter(k[ 2],r0,r4,r2, 10, 10); keyiter(k[ 3],r1,r0,r3, 11, 11);
keyiter(k[ 4],r2,r1,r4, 12, 12); keyiter(k[ 5],r3,r2,r0, 13, 13);
keyiter(k[ 6],r4,r3,r1, 14, 14); keyiter(k[ 7],r0,r4,r2, 15, 15);
keyiter(k[ 8],r1,r0,r3, 16, 16); keyiter(k[ 9],r2,r1,r4, 17, 17);
keyiter(k[ 10],r3,r2,r0, 18, 18); keyiter(k[ 11],r4,r3,r1, 19, 19);
keyiter(k[ 12],r0,r4,r2, 20, 20); keyiter(k[ 13],r1,r0,r3, 21, 21);
keyiter(k[ 14],r2,r1,r4, 22, 22); keyiter(k[ 15],r3,r2,r0, 23, 23);
keyiter(k[ 16],r4,r3,r1, 24, 24); keyiter(k[ 17],r0,r4,r2, 25, 25);
keyiter(k[ 18],r1,r0,r3, 26, 26); keyiter(k[ 19],r2,r1,r4, 27, 27);
keyiter(k[ 20],r3,r2,r0, 28, 28); keyiter(k[ 21],r4,r3,r1, 29, 29);
keyiter(k[ 22],r0,r4,r2, 30, 30); keyiter(k[ 23],r1,r0,r3, 31, 31);
k += 50;
keyiter(k[-26],r2,r1,r4, 32,-18); keyiter(k[-25],r3,r2,r0, 33,-17);
keyiter(k[-24],r4,r3,r1, 34,-16); keyiter(k[-23],r0,r4,r2, 35,-15);
keyiter(k[-22],r1,r0,r3, 36,-14); keyiter(k[-21],r2,r1,r4, 37,-13);
keyiter(k[-20],r3,r2,r0, 38,-12); keyiter(k[-19],r4,r3,r1, 39,-11);
keyiter(k[-18],r0,r4,r2, 40,-10); keyiter(k[-17],r1,r0,r3, 41, -9);
keyiter(k[-16],r2,r1,r4, 42, -8); keyiter(k[-15],r3,r2,r0, 43, -7);
keyiter(k[-14],r4,r3,r1, 44, -6); keyiter(k[-13],r0,r4,r2, 45, -5);
keyiter(k[-12],r1,r0,r3, 46, -4); keyiter(k[-11],r2,r1,r4, 47, -3);
keyiter(k[-10],r3,r2,r0, 48, -2); keyiter(k[ -9],r4,r3,r1, 49, -1);
keyiter(k[ -8],r0,r4,r2, 50, 0); keyiter(k[ -7],r1,r0,r3, 51, 1);
keyiter(k[ -6],r2,r1,r4, 52, 2); keyiter(k[ -5],r3,r2,r0, 53, 3);
keyiter(k[ -4],r4,r3,r1, 54, 4); keyiter(k[ -3],r0,r4,r2, 55, 5);
keyiter(k[ -2],r1,r0,r3, 56, 6); keyiter(k[ -1],r2,r1,r4, 57, 7);
keyiter(k[ 0],r3,r2,r0, 58, 8); keyiter(k[ 1],r4,r3,r1, 59, 9);
keyiter(k[ 2],r0,r4,r2, 60, 10); keyiter(k[ 3],r1,r0,r3, 61, 11);
keyiter(k[ 4],r2,r1,r4, 62, 12); keyiter(k[ 5],r3,r2,r0, 63, 13);
keyiter(k[ 6],r4,r3,r1, 64, 14); keyiter(k[ 7],r0,r4,r2, 65, 15);
keyiter(k[ 8],r1,r0,r3, 66, 16); keyiter(k[ 9],r2,r1,r4, 67, 17);
keyiter(k[ 10],r3,r2,r0, 68, 18); keyiter(k[ 11],r4,r3,r1, 69, 19);
keyiter(k[ 12],r0,r4,r2, 70, 20); keyiter(k[ 13],r1,r0,r3, 71, 21);
keyiter(k[ 14],r2,r1,r4, 72, 22); keyiter(k[ 15],r3,r2,r0, 73, 23);
keyiter(k[ 16],r4,r3,r1, 74, 24); keyiter(k[ 17],r0,r4,r2, 75, 25);
keyiter(k[ 18],r1,r0,r3, 76, 26); keyiter(k[ 19],r2,r1,r4, 77, 27);
keyiter(k[ 20],r3,r2,r0, 78, 28); keyiter(k[ 21],r4,r3,r1, 79, 29);
keyiter(k[ 22],r0,r4,r2, 80, 30); keyiter(k[ 23],r1,r0,r3, 81, 31);
k += 50;
keyiter(k[-26],r2,r1,r4, 82,-18); keyiter(k[-25],r3,r2,r0, 83,-17);
keyiter(k[-24],r4,r3,r1, 84,-16); keyiter(k[-23],r0,r4,r2, 85,-15);
keyiter(k[-22],r1,r0,r3, 86,-14); keyiter(k[-21],r2,r1,r4, 87,-13);
keyiter(k[-20],r3,r2,r0, 88,-12); keyiter(k[-19],r4,r3,r1, 89,-11);
keyiter(k[-18],r0,r4,r2, 90,-10); keyiter(k[-17],r1,r0,r3, 91, -9);
keyiter(k[-16],r2,r1,r4, 92, -8); keyiter(k[-15],r3,r2,r0, 93, -7);
keyiter(k[-14],r4,r3,r1, 94, -6); keyiter(k[-13],r0,r4,r2, 95, -5);
keyiter(k[-12],r1,r0,r3, 96, -4); keyiter(k[-11],r2,r1,r4, 97, -3);
keyiter(k[-10],r3,r2,r0, 98, -2); keyiter(k[ -9],r4,r3,r1, 99, -1);
keyiter(k[ -8],r0,r4,r2,100, 0); keyiter(k[ -7],r1,r0,r3,101, 1);
keyiter(k[ -6],r2,r1,r4,102, 2); keyiter(k[ -5],r3,r2,r0,103, 3);
keyiter(k[ -4],r4,r3,r1,104, 4); keyiter(k[ -3],r0,r4,r2,105, 5);
keyiter(k[ -2],r1,r0,r3,106, 6); keyiter(k[ -1],r2,r1,r4,107, 7);
keyiter(k[ 0],r3,r2,r0,108, 8); keyiter(k[ 1],r4,r3,r1,109, 9);
keyiter(k[ 2],r0,r4,r2,110, 10); keyiter(k[ 3],r1,r0,r3,111, 11);
keyiter(k[ 4],r2,r1,r4,112, 12); keyiter(k[ 5],r3,r2,r0,113, 13);
keyiter(k[ 6],r4,r3,r1,114, 14); keyiter(k[ 7],r0,r4,r2,115, 15);
keyiter(k[ 8],r1,r0,r3,116, 16); keyiter(k[ 9],r2,r1,r4,117, 17);
keyiter(k[ 10],r3,r2,r0,118, 18); keyiter(k[ 11],r4,r3,r1,119, 19);
keyiter(k[ 12],r0,r4,r2,120, 20); keyiter(k[ 13],r1,r0,r3,121, 21);
keyiter(k[ 14],r2,r1,r4,122, 22); keyiter(k[ 15],r3,r2,r0,123, 23);
keyiter(k[ 16],r4,r3,r1,124, 24); keyiter(k[ 17],r0,r4,r2,125, 25);
keyiter(k[ 18],r1,r0,r3,126, 26); keyiter(k[ 19],r2,r1,r4,127, 27);
keyiter(k[ 20],r3,r2,r0,128, 28); keyiter(k[ 21],r4,r3,r1,129, 29);
keyiter(k[ 22],r0,r4,r2,130, 30); keyiter(k[ 23],r1,r0,r3,131, 31);
/* Apply S-boxes */
S3(r3,r4,r0,r1,r2); storekeys(r1,r2,r4,r3, 28); loadkeys(r1,r2,r4,r3, 24);
S4(r1,r2,r4,r3,r0); storekeys(r2,r4,r3,r0, 24); loadkeys(r2,r4,r3,r0, 20);
S5(r2,r4,r3,r0,r1); storekeys(r1,r2,r4,r0, 20); loadkeys(r1,r2,r4,r0, 16);
S6(r1,r2,r4,r0,r3); storekeys(r4,r3,r2,r0, 16); loadkeys(r4,r3,r2,r0, 12);
S7(r4,r3,r2,r0,r1); storekeys(r1,r2,r0,r4, 12); loadkeys(r1,r2,r0,r4, 8);
S0(r1,r2,r0,r4,r3); storekeys(r0,r2,r4,r1, 8); loadkeys(r0,r2,r4,r1, 4);
S1(r0,r2,r4,r1,r3); storekeys(r3,r4,r1,r0, 4); loadkeys(r3,r4,r1,r0, 0);
S2(r3,r4,r1,r0,r2); storekeys(r2,r4,r3,r0, 0); loadkeys(r2,r4,r3,r0, -4);
S3(r2,r4,r3,r0,r1); storekeys(r0,r1,r4,r2, -4); loadkeys(r0,r1,r4,r2, -8);
S4(r0,r1,r4,r2,r3); storekeys(r1,r4,r2,r3, -8); loadkeys(r1,r4,r2,r3,-12);
S5(r1,r4,r2,r3,r0); storekeys(r0,r1,r4,r3,-12); loadkeys(r0,r1,r4,r3,-16);
S6(r0,r1,r4,r3,r2); storekeys(r4,r2,r1,r3,-16); loadkeys(r4,r2,r1,r3,-20);
S7(r4,r2,r1,r3,r0); storekeys(r0,r1,r3,r4,-20); loadkeys(r0,r1,r3,r4,-24);
S0(r0,r1,r3,r4,r2); storekeys(r3,r1,r4,r0,-24); loadkeys(r3,r1,r4,r0,-28);
k -= 50;
S1(r3,r1,r4,r0,r2); storekeys(r2,r4,r0,r3, 22); loadkeys(r2,r4,r0,r3, 18);
S2(r2,r4,r0,r3,r1); storekeys(r1,r4,r2,r3, 18); loadkeys(r1,r4,r2,r3, 14);
S3(r1,r4,r2,r3,r0); storekeys(r3,r0,r4,r1, 14); loadkeys(r3,r0,r4,r1, 10);
S4(r3,r0,r4,r1,r2); storekeys(r0,r4,r1,r2, 10); loadkeys(r0,r4,r1,r2, 6);
S5(r0,r4,r1,r2,r3); storekeys(r3,r0,r4,r2, 6); loadkeys(r3,r0,r4,r2, 2);
S6(r3,r0,r4,r2,r1); storekeys(r4,r1,r0,r2, 2); loadkeys(r4,r1,r0,r2, -2);
S7(r4,r1,r0,r2,r3); storekeys(r3,r0,r2,r4, -2); loadkeys(r3,r0,r2,r4, -6);
S0(r3,r0,r2,r4,r1); storekeys(r2,r0,r4,r3, -6); loadkeys(r2,r0,r4,r3,-10);
S1(r2,r0,r4,r3,r1); storekeys(r1,r4,r3,r2,-10); loadkeys(r1,r4,r3,r2,-14);
S2(r1,r4,r3,r2,r0); storekeys(r0,r4,r1,r2,-14); loadkeys(r0,r4,r1,r2,-18);
S3(r0,r4,r1,r2,r3); storekeys(r2,r3,r4,r0,-18); loadkeys(r2,r3,r4,r0,-22);
k -= 50;
S4(r2,r3,r4,r0,r1); storekeys(r3,r4,r0,r1, 28); loadkeys(r3,r4,r0,r1, 24);
S5(r3,r4,r0,r1,r2); storekeys(r2,r3,r4,r1, 24); loadkeys(r2,r3,r4,r1, 20);
S6(r2,r3,r4,r1,r0); storekeys(r4,r0,r3,r1, 20); loadkeys(r4,r0,r3,r1, 16);
S7(r4,r0,r3,r1,r2); storekeys(r2,r3,r1,r4, 16); loadkeys(r2,r3,r1,r4, 12);
S0(r2,r3,r1,r4,r0); storekeys(r1,r3,r4,r2, 12); loadkeys(r1,r3,r4,r2, 8);
S1(r1,r3,r4,r2,r0); storekeys(r0,r4,r2,r1, 8); loadkeys(r0,r4,r2,r1, 4);
S2(r0,r4,r2,r1,r3); storekeys(r3,r4,r0,r1, 4); loadkeys(r3,r4,r0,r1, 0);
S3(r3,r4,r0,r1,r2); storekeys(r1,r2,r4,r3, 0);
return 0;
}
static void serpent_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
{
struct serpent_ctx *ctx = crypto_tfm_ctx(tfm);
const u32
*k = ctx->expkey;
const __le32 *s = (const __le32 *)src;
__le32 *d = (__le32 *)dst;
u32 r0, r1, r2, r3, r4;
/*
* Note: The conversions between u8* and u32* might cause trouble
* on architectures with stricter alignment rules than x86
*/
r0 = le32_to_cpu(s[0]);
r1 = le32_to_cpu(s[1]);
r2 = le32_to_cpu(s[2]);
r3 = le32_to_cpu(s[3]);
K(r0,r1,r2,r3,0);
S0(r0,r1,r2,r3,r4); LK(r2,r1,r3,r0,r4,1);
S1(r2,r1,r3,r0,r4); LK(r4,r3,r0,r2,r1,2);
S2(r4,r3,r0,r2,r1); LK(r1,r3,r4,r2,r0,3);
S3(r1,r3,r4,r2,r0); LK(r2,r0,r3,r1,r4,4);
S4(r2,r0,r3,r1,r4); LK(r0,r3,r1,r4,r2,5);
S5(r0,r3,r1,r4,r2); LK(r2,r0,r3,r4,r1,6);
S6(r2,r0,r3,r4,r1); LK(r3,r1,r0,r4,r2,7);
S7(r3,r1,r0,r4,r2); LK(r2,r0,r4,r3,r1,8);
S0(r2,r0,r4,r3,r1); LK(r4,r0,r3,r2,r1,9);
S1(r4,r0,r3,r2,r1); LK(r1,r3,r2,r4,r0,10);
S2(r1,r3,r2,r4,r0); LK(r0,r3,r1,r4,r2,11);
S3(r0,r3,r1,r4,r2); LK(r4,r2,r3,r0,r1,12);
S4(r4,r2,r3,r0,r1); LK(r2,r3,r0,r1,r4,13);
S5(r2,r3,r0,r1,r4); LK(r4,r2,r3,r1,r0,14);
S6(r4,r2,r3,r1,r0); LK(r3,r0,r2,r1,r4,15);
S7(r3,r0,r2,r1,r4); LK(r4,r2,r1,r3,r0,16);
S0(r4,r2,r1,r3,r0); LK(r1,r2,r3,r4,r0,17);
S1(r1,r2,r3,r4,r0); LK(r0,r3,r4,r1,r2,18);
S2(r0,r3,r4,r1,r2); LK(r2,r3,r0,r1,r4,19);
S3(r2,r3,r0,r1,r4); LK(r1,r4,r3,r2,r0,20);
S4(r1,r4,r3,r2,r0); LK(r4,r3,r2,r0,r1,21);
S5(r4,r3,r2,r0,r1); LK(r1,r4,r3,r0,r2,22);
S6(r1,r4,r3,r0,r2); LK(r3,r2,r4,r0,r1,23);
S7(r3,r2,r4,r0,r1); LK(r1,r4,r0,r3,r2,24);
S0(r1,r4,r0,r3,r2); LK(r0,r4,r3,r1,r2,25);
S1(r0,r4,r3,r1,r2); LK(r2,r3,r1,r0,r4,26);
S2(r2,r3,r1,r0,r4); LK(r4,r3,r2,r0,r1,27);
S3(r4,r3,r2,r0,r1); LK(r0,r1,r3,r4,r2,28);
S4(r0,r1,r3,r4,r2); LK(r1,r3,r4,r2,r0,29);
S5(r1,r3,r4,r2,r0); LK(r0,r1,r3,r2,r4,30);
S6(r0,r1,r3,r2,r4); LK(r3,r4,r1,r2,r0,31);
S7(r3,r4,r1,r2,r0); K(r0,r1,r2,r3,32);
d[0] = cpu_to_le32(r0);
d[1] = cpu_to_le32(r1);
d[2] = cpu_to_le32(r2);
d[3] = cpu_to_le32(r3);
}
static void serpent_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
{
struct serpent_ctx *ctx = crypto_tfm_ctx(tfm);
const u32
*k = ((struct serpent_ctx *)ctx)->expkey;
const __le32 *s = (const __le32 *)src;
__le32 *d = (__le32 *)dst;
u32 r0, r1, r2, r3, r4;
r0 = le32_to_cpu(s[0]);
r1 = le32_to_cpu(s[1]);
r2 = le32_to_cpu(s[2]);
r3 = le32_to_cpu(s[3]);
K(r0,r1,r2,r3,32);
SI7(r0,r1,r2,r3,r4); KL(r1,r3,r0,r4,r2,31);
SI6(r1,r3,r0,r4,r2); KL(r0,r2,r4,r1,r3,30);
SI5(r0,r2,r4,r1,r3); KL(r2,r3,r0,r4,r1,29);
SI4(r2,r3,r0,r4,r1); KL(r2,r0,r1,r4,r3,28);
SI3(r2,r0,r1,r4,r3); KL(r1,r2,r3,r4,r0,27);
SI2(r1,r2,r3,r4,r0); KL(r2,r0,r4,r3,r1,26);
SI1(r2,r0,r4,r3,r1); KL(r1,r0,r4,r3,r2,25);
SI0(r1,r0,r4,r3,r2); KL(r4,r2,r0,r1,r3,24);
SI7(r4,r2,r0,r1,r3); KL(r2,r1,r4,r3,r0,23);
SI6(r2,r1,r4,r3,r0); KL(r4,r0,r3,r2,r1,22);
SI5(r4,r0,r3,r2,r1); KL(r0,r1,r4,r3,r2,21);
SI4(r0,r1,r4,r3,r2); KL(r0,r4,r2,r3,r1,20);
SI3(r0,r4,r2,r3,r1); KL(r2,r0,r1,r3,r4,19);
SI2(r2,r0,r1,r3,r4); KL(r0,r4,r3,r1,r2,18);
SI1(r0,r4,r3,r1,r2); KL(r2,r4,r3,r1,r0,17);
SI0(r2,r4,r3,r1,r0); KL(r3,r0,r4,r2,r1,16);
SI7(r3,r0,r4,r2,r1); KL(r0,r2,r3,r1,r4,15);
SI6(r0,r2,r3,r1,r4); KL(r3,r4,r1,r0,r2,14);
SI5(r3,r4,r1,r0,r2); KL(r4,r2,r3,r1,r0,13);
SI4(r4,r2,r3,r1,r0); KL(r4,r3,r0,r1,r2,12);
SI3(r4,r3,r0,r1,r2); KL(r0,r4,r2,r1,r3,11);
SI2(r0,r4,r2,r1,r3); KL(r4,r3,r1,r2,r0,10);
SI1(r4,r3,r1,r2,r0); KL(r0,r3,r1,r2,r4,9);
SI0(r0,r3,r1,r2,r4); KL(r1,r4,r3,r0,r2,8);
SI7(r1,r4,r3,r0,r2); KL(r4,r0,r1,r2,r3,7);
SI6(r4,r0,r1,r2,r3); KL(r1,r3,r2,r4,r0,6);
SI5(r1,r3,r2,r4,r0); KL(r3,r0,r1,r2,r4,5);
SI4(r3,r0,r1,r2,r4); KL(r3,r1,r4,r2,r0,4);
SI3(r3,r1,r4,r2,r0); KL(r4,r3,r0,r2,r1,3);
SI2(r4,r3,r0,r2,r1); KL(r3,r1,r2,r0,r4,2);
SI1(r3,r1,r2,r0,r4); KL(r4,r1,r2,r0,r3,1);
SI0(r4,r1,r2,r0,r3); K(r2,r3,r1,r4,0);
d[0] = cpu_to_le32(r2);
d[1] = cpu_to_le32(r3);
d[2] = cpu_to_le32(r1);
d[3] = cpu_to_le32(r4);
}
static struct crypto_alg serpent_alg = {
.cra_name = "serpent",
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
.cra_blocksize = SERPENT_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct serpent_ctx),
.cra_alignmask = 3,
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(serpent_alg.cra_list),
.cra_u = { .cipher = {
.cia_min_keysize = SERPENT_MIN_KEY_SIZE,
.cia_max_keysize = SERPENT_MAX_KEY_SIZE,
.cia_setkey = serpent_setkey,
.cia_encrypt = serpent_encrypt,
.cia_decrypt = serpent_decrypt } }
};
static int tnepres_setkey(struct crypto_tfm *tfm, const u8 *key,
unsigned int keylen)
{
u8 rev_key[SERPENT_MAX_KEY_SIZE];
int i;
for (i = 0; i < keylen; ++i)
rev_key[keylen - i - 1] = key[i];
return serpent_setkey(tfm, rev_key, keylen);
}
static void tnepres_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
{
const u32 * const s = (const u32 * const)src;
u32 * const d = (u32 * const)dst;
u32 rs[4], rd[4];
rs[0] = swab32(s[3]);
rs[1] = swab32(s[2]);
rs[2] = swab32(s[1]);
rs[3] = swab32(s[0]);
serpent_encrypt(tfm, (u8 *)rd, (u8 *)rs);
d[0] = swab32(rd[3]);
d[1] = swab32(rd[2]);
d[2] = swab32(rd[1]);
d[3] = swab32(rd[0]);
}
static void tnepres_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
{
const u32 * const s = (const u32 * const)src;
u32 * const d = (u32 * const)dst;
u32 rs[4], rd[4];
rs[0] = swab32(s[3]);
rs[1] = swab32(s[2]);
rs[2] = swab32(s[1]);
rs[3] = swab32(s[0]);
serpent_decrypt(tfm, (u8 *)rd, (u8 *)rs);
d[0] = swab32(rd[3]);
d[1] = swab32(rd[2]);
d[2] = swab32(rd[1]);
d[3] = swab32(rd[0]);
}
static struct crypto_alg tnepres_alg = {
.cra_name = "tnepres",
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
.cra_blocksize = SERPENT_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct serpent_ctx),
.cra_alignmask = 3,
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(serpent_alg.cra_list),
.cra_u = { .cipher = {
.cia_min_keysize = SERPENT_MIN_KEY_SIZE,
.cia_max_keysize = SERPENT_MAX_KEY_SIZE,
.cia_setkey = tnepres_setkey,
.cia_encrypt = tnepres_encrypt,
.cia_decrypt = tnepres_decrypt } }
};
static int __init serpent_mod_init(void)
{
int ret = crypto_register_alg(&serpent_alg);
if (ret)
return ret;
ret = crypto_register_alg(&tnepres_alg);
if (ret)
crypto_unregister_alg(&serpent_alg);
return ret;
}
static void __exit serpent_mod_fini(void)
{
crypto_unregister_alg(&tnepres_alg);
crypto_unregister_alg(&serpent_alg);
}
module_init(serpent_mod_init);
module_exit(serpent_mod_fini);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Serpent and tnepres (kerneli compatible serpent reversed) Cipher Algorithm");
MODULE_AUTHOR("Dag Arne Osvik <osvik@ii.uib.no>");
MODULE_ALIAS("tnepres");
| gpl-2.0 |
CandyDevices/kernel_motorola_msm8226 | drivers/media/video/timblogiw.c | 4982 | 21326 | /*
* timblogiw.c timberdale FPGA LogiWin Video In driver
* Copyright (c) 2009-2010 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
/* Supports:
* Timberdale FPGA LogiWin Video In
*/
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/dmaengine.h>
#include <linux/scatterlist.h>
#include <linux/interrupt.h>
#include <linux/list.h>
#include <linux/i2c.h>
#include <linux/module.h>
#include <media/v4l2-ioctl.h>
#include <media/v4l2-device.h>
#include <media/videobuf-dma-contig.h>
#include <media/timb_video.h>
#define DRIVER_NAME "timb-video"
#define TIMBLOGIWIN_NAME "Timberdale Video-In"
#define TIMBLOGIW_VERSION_CODE 0x04
#define TIMBLOGIW_LINES_PER_DESC 44
#define TIMBLOGIW_MAX_VIDEO_MEM 16
#define TIMBLOGIW_HAS_DECODER(lw) (lw->pdata.encoder.module_name)
struct timblogiw {
struct video_device video_dev;
struct v4l2_device v4l2_dev; /* mutual exclusion */
struct mutex lock;
struct device *dev;
struct timb_video_platform_data pdata;
struct v4l2_subdev *sd_enc; /* encoder */
bool opened;
};
struct timblogiw_tvnorm {
v4l2_std_id std;
u16 width;
u16 height;
u8 fps;
};
struct timblogiw_fh {
struct videobuf_queue vb_vidq;
struct timblogiw_tvnorm const *cur_norm;
struct list_head capture;
struct dma_chan *chan;
spinlock_t queue_lock; /* mutual exclusion */
unsigned int frame_count;
};
struct timblogiw_buffer {
/* common v4l buffer stuff -- must be first */
struct videobuf_buffer vb;
struct scatterlist sg[16];
dma_cookie_t cookie;
struct timblogiw_fh *fh;
};
const struct timblogiw_tvnorm timblogiw_tvnorms[] = {
{
.std = V4L2_STD_PAL,
.width = 720,
.height = 576,
.fps = 25
},
{
.std = V4L2_STD_NTSC,
.width = 720,
.height = 480,
.fps = 30
}
};
static int timblogiw_bytes_per_line(const struct timblogiw_tvnorm *norm)
{
return norm->width * 2;
}
static int timblogiw_frame_size(const struct timblogiw_tvnorm *norm)
{
return norm->height * timblogiw_bytes_per_line(norm);
}
static const struct timblogiw_tvnorm *timblogiw_get_norm(const v4l2_std_id std)
{
int i;
for (i = 0; i < ARRAY_SIZE(timblogiw_tvnorms); i++)
if (timblogiw_tvnorms[i].std & std)
return timblogiw_tvnorms + i;
/* default to first element */
return timblogiw_tvnorms;
}
static void timblogiw_dma_cb(void *data)
{
struct timblogiw_buffer *buf = data;
struct timblogiw_fh *fh = buf->fh;
struct videobuf_buffer *vb = &buf->vb;
spin_lock(&fh->queue_lock);
/* mark the transfer done */
buf->cookie = -1;
fh->frame_count++;
if (vb->state != VIDEOBUF_ERROR) {
list_del(&vb->queue);
do_gettimeofday(&vb->ts);
vb->field_count = fh->frame_count * 2;
vb->state = VIDEOBUF_DONE;
wake_up(&vb->done);
}
if (!list_empty(&fh->capture)) {
vb = list_entry(fh->capture.next, struct videobuf_buffer,
queue);
vb->state = VIDEOBUF_ACTIVE;
}
spin_unlock(&fh->queue_lock);
}
static bool timblogiw_dma_filter_fn(struct dma_chan *chan, void *filter_param)
{
return chan->chan_id == (uintptr_t)filter_param;
}
/* IOCTL functions */
static int timblogiw_g_fmt(struct file *file, void *priv,
struct v4l2_format *format)
{
struct video_device *vdev = video_devdata(file);
struct timblogiw *lw = video_get_drvdata(vdev);
struct timblogiw_fh *fh = priv;
dev_dbg(&vdev->dev, "%s entry\n", __func__);
if (format->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
return -EINVAL;
mutex_lock(&lw->lock);
format->fmt.pix.width = fh->cur_norm->width;
format->fmt.pix.height = fh->cur_norm->height;
format->fmt.pix.pixelformat = V4L2_PIX_FMT_UYVY;
format->fmt.pix.bytesperline = timblogiw_bytes_per_line(fh->cur_norm);
format->fmt.pix.sizeimage = timblogiw_frame_size(fh->cur_norm);
format->fmt.pix.field = V4L2_FIELD_NONE;
mutex_unlock(&lw->lock);
return 0;
}
static int timblogiw_try_fmt(struct file *file, void *priv,
struct v4l2_format *format)
{
struct video_device *vdev = video_devdata(file);
struct v4l2_pix_format *pix = &format->fmt.pix;
dev_dbg(&vdev->dev,
"%s - width=%d, height=%d, pixelformat=%d, field=%d\n"
"bytes per line %d, size image: %d, colorspace: %d\n",
__func__,
pix->width, pix->height, pix->pixelformat, pix->field,
pix->bytesperline, pix->sizeimage, pix->colorspace);
if (format->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
return -EINVAL;
if (pix->field != V4L2_FIELD_NONE)
return -EINVAL;
if (pix->pixelformat != V4L2_PIX_FMT_UYVY)
return -EINVAL;
return 0;
}
static int timblogiw_s_fmt(struct file *file, void *priv,
struct v4l2_format *format)
{
struct video_device *vdev = video_devdata(file);
struct timblogiw *lw = video_get_drvdata(vdev);
struct timblogiw_fh *fh = priv;
struct v4l2_pix_format *pix = &format->fmt.pix;
int err;
mutex_lock(&lw->lock);
err = timblogiw_try_fmt(file, priv, format);
if (err)
goto out;
if (videobuf_queue_is_busy(&fh->vb_vidq)) {
dev_err(&vdev->dev, "%s queue busy\n", __func__);
err = -EBUSY;
goto out;
}
pix->width = fh->cur_norm->width;
pix->height = fh->cur_norm->height;
out:
mutex_unlock(&lw->lock);
return err;
}
static int timblogiw_querycap(struct file *file, void *priv,
struct v4l2_capability *cap)
{
struct video_device *vdev = video_devdata(file);
dev_dbg(&vdev->dev, "%s: Entry\n", __func__);
memset(cap, 0, sizeof(*cap));
strncpy(cap->card, TIMBLOGIWIN_NAME, sizeof(cap->card)-1);
strncpy(cap->driver, DRIVER_NAME, sizeof(cap->driver) - 1);
strlcpy(cap->bus_info, vdev->name, sizeof(cap->bus_info));
cap->version = TIMBLOGIW_VERSION_CODE;
cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING |
V4L2_CAP_READWRITE;
return 0;
}
static int timblogiw_enum_fmt(struct file *file, void *priv,
struct v4l2_fmtdesc *fmt)
{
struct video_device *vdev = video_devdata(file);
dev_dbg(&vdev->dev, "%s, index: %d\n", __func__, fmt->index);
if (fmt->index != 0)
return -EINVAL;
memset(fmt, 0, sizeof(*fmt));
fmt->index = 0;
fmt->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
strncpy(fmt->description, "4:2:2, packed, YUYV",
sizeof(fmt->description)-1);
fmt->pixelformat = V4L2_PIX_FMT_UYVY;
return 0;
}
static int timblogiw_g_parm(struct file *file, void *priv,
struct v4l2_streamparm *sp)
{
struct timblogiw_fh *fh = priv;
struct v4l2_captureparm *cp = &sp->parm.capture;
cp->capability = V4L2_CAP_TIMEPERFRAME;
cp->timeperframe.numerator = 1;
cp->timeperframe.denominator = fh->cur_norm->fps;
return 0;
}
static int timblogiw_reqbufs(struct file *file, void *priv,
struct v4l2_requestbuffers *rb)
{
struct video_device *vdev = video_devdata(file);
struct timblogiw_fh *fh = priv;
dev_dbg(&vdev->dev, "%s: entry\n", __func__);
return videobuf_reqbufs(&fh->vb_vidq, rb);
}
static int timblogiw_querybuf(struct file *file, void *priv,
struct v4l2_buffer *b)
{
struct video_device *vdev = video_devdata(file);
struct timblogiw_fh *fh = priv;
dev_dbg(&vdev->dev, "%s: entry\n", __func__);
return videobuf_querybuf(&fh->vb_vidq, b);
}
static int timblogiw_qbuf(struct file *file, void *priv, struct v4l2_buffer *b)
{
struct video_device *vdev = video_devdata(file);
struct timblogiw_fh *fh = priv;
dev_dbg(&vdev->dev, "%s: entry\n", __func__);
return videobuf_qbuf(&fh->vb_vidq, b);
}
static int timblogiw_dqbuf(struct file *file, void *priv,
struct v4l2_buffer *b)
{
struct video_device *vdev = video_devdata(file);
struct timblogiw_fh *fh = priv;
dev_dbg(&vdev->dev, "%s: entry\n", __func__);
return videobuf_dqbuf(&fh->vb_vidq, b, file->f_flags & O_NONBLOCK);
}
static int timblogiw_g_std(struct file *file, void *priv, v4l2_std_id *std)
{
struct video_device *vdev = video_devdata(file);
struct timblogiw_fh *fh = priv;
dev_dbg(&vdev->dev, "%s: entry\n", __func__);
*std = fh->cur_norm->std;
return 0;
}
static int timblogiw_s_std(struct file *file, void *priv, v4l2_std_id *std)
{
struct video_device *vdev = video_devdata(file);
struct timblogiw *lw = video_get_drvdata(vdev);
struct timblogiw_fh *fh = priv;
int err = 0;
dev_dbg(&vdev->dev, "%s: entry\n", __func__);
mutex_lock(&lw->lock);
if (TIMBLOGIW_HAS_DECODER(lw))
err = v4l2_subdev_call(lw->sd_enc, core, s_std, *std);
if (!err)
fh->cur_norm = timblogiw_get_norm(*std);
mutex_unlock(&lw->lock);
return err;
}
static int timblogiw_enuminput(struct file *file, void *priv,
struct v4l2_input *inp)
{
struct video_device *vdev = video_devdata(file);
int i;
dev_dbg(&vdev->dev, "%s: Entry\n", __func__);
if (inp->index != 0)
return -EINVAL;
inp->index = 0;
strncpy(inp->name, "Timb input 1", sizeof(inp->name) - 1);
inp->type = V4L2_INPUT_TYPE_CAMERA;
inp->std = 0;
for (i = 0; i < ARRAY_SIZE(timblogiw_tvnorms); i++)
inp->std |= timblogiw_tvnorms[i].std;
return 0;
}
static int timblogiw_g_input(struct file *file, void *priv,
unsigned int *input)
{
struct video_device *vdev = video_devdata(file);
dev_dbg(&vdev->dev, "%s: Entry\n", __func__);
*input = 0;
return 0;
}
static int timblogiw_s_input(struct file *file, void *priv, unsigned int input)
{
struct video_device *vdev = video_devdata(file);
dev_dbg(&vdev->dev, "%s: Entry\n", __func__);
if (input != 0)
return -EINVAL;
return 0;
}
static int timblogiw_streamon(struct file *file, void *priv, unsigned int type)
{
struct video_device *vdev = video_devdata(file);
struct timblogiw_fh *fh = priv;
dev_dbg(&vdev->dev, "%s: entry\n", __func__);
if (type != V4L2_BUF_TYPE_VIDEO_CAPTURE) {
dev_dbg(&vdev->dev, "%s - No capture device\n", __func__);
return -EINVAL;
}
fh->frame_count = 0;
return videobuf_streamon(&fh->vb_vidq);
}
static int timblogiw_streamoff(struct file *file, void *priv,
unsigned int type)
{
struct video_device *vdev = video_devdata(file);
struct timblogiw_fh *fh = priv;
dev_dbg(&vdev->dev, "%s entry\n", __func__);
if (type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
return -EINVAL;
return videobuf_streamoff(&fh->vb_vidq);
}
static int timblogiw_querystd(struct file *file, void *priv, v4l2_std_id *std)
{
struct video_device *vdev = video_devdata(file);
struct timblogiw *lw = video_get_drvdata(vdev);
struct timblogiw_fh *fh = priv;
dev_dbg(&vdev->dev, "%s entry\n", __func__);
if (TIMBLOGIW_HAS_DECODER(lw))
return v4l2_subdev_call(lw->sd_enc, video, querystd, std);
else {
*std = fh->cur_norm->std;
return 0;
}
}
static int timblogiw_enum_framesizes(struct file *file, void *priv,
struct v4l2_frmsizeenum *fsize)
{
struct video_device *vdev = video_devdata(file);
struct timblogiw_fh *fh = priv;
dev_dbg(&vdev->dev, "%s - index: %d, format: %d\n", __func__,
fsize->index, fsize->pixel_format);
if ((fsize->index != 0) ||
(fsize->pixel_format != V4L2_PIX_FMT_UYVY))
return -EINVAL;
fsize->type = V4L2_FRMSIZE_TYPE_DISCRETE;
fsize->discrete.width = fh->cur_norm->width;
fsize->discrete.height = fh->cur_norm->height;
return 0;
}
/* Video buffer functions */
static int buffer_setup(struct videobuf_queue *vq, unsigned int *count,
unsigned int *size)
{
struct timblogiw_fh *fh = vq->priv_data;
*size = timblogiw_frame_size(fh->cur_norm);
if (!*count)
*count = 32;
while (*size * *count > TIMBLOGIW_MAX_VIDEO_MEM * 1024 * 1024)
(*count)--;
return 0;
}
static int buffer_prepare(struct videobuf_queue *vq, struct videobuf_buffer *vb,
enum v4l2_field field)
{
struct timblogiw_fh *fh = vq->priv_data;
struct timblogiw_buffer *buf = container_of(vb, struct timblogiw_buffer,
vb);
unsigned int data_size = timblogiw_frame_size(fh->cur_norm);
int err = 0;
if (vb->baddr && vb->bsize < data_size)
/* User provided buffer, but it is too small */
return -ENOMEM;
vb->size = data_size;
vb->width = fh->cur_norm->width;
vb->height = fh->cur_norm->height;
vb->field = field;
if (vb->state == VIDEOBUF_NEEDS_INIT) {
int i;
unsigned int size;
unsigned int bytes_per_desc = TIMBLOGIW_LINES_PER_DESC *
timblogiw_bytes_per_line(fh->cur_norm);
dma_addr_t addr;
sg_init_table(buf->sg, ARRAY_SIZE(buf->sg));
err = videobuf_iolock(vq, vb, NULL);
if (err)
goto err;
addr = videobuf_to_dma_contig(vb);
for (i = 0, size = 0; size < data_size; i++) {
sg_dma_address(buf->sg + i) = addr + size;
size += bytes_per_desc;
sg_dma_len(buf->sg + i) = (size > data_size) ?
(bytes_per_desc - (size - data_size)) :
bytes_per_desc;
}
vb->state = VIDEOBUF_PREPARED;
buf->cookie = -1;
buf->fh = fh;
}
return 0;
err:
videobuf_dma_contig_free(vq, vb);
vb->state = VIDEOBUF_NEEDS_INIT;
return err;
}
static void buffer_queue(struct videobuf_queue *vq, struct videobuf_buffer *vb)
{
struct timblogiw_fh *fh = vq->priv_data;
struct timblogiw_buffer *buf = container_of(vb, struct timblogiw_buffer,
vb);
struct dma_async_tx_descriptor *desc;
int sg_elems;
int bytes_per_desc = TIMBLOGIW_LINES_PER_DESC *
timblogiw_bytes_per_line(fh->cur_norm);
sg_elems = timblogiw_frame_size(fh->cur_norm) / bytes_per_desc;
sg_elems +=
(timblogiw_frame_size(fh->cur_norm) % bytes_per_desc) ? 1 : 0;
if (list_empty(&fh->capture))
vb->state = VIDEOBUF_ACTIVE;
else
vb->state = VIDEOBUF_QUEUED;
list_add_tail(&vb->queue, &fh->capture);
spin_unlock_irq(&fh->queue_lock);
desc = dmaengine_prep_slave_sg(fh->chan,
buf->sg, sg_elems, DMA_DEV_TO_MEM,
DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_SRC_UNMAP);
if (!desc) {
spin_lock_irq(&fh->queue_lock);
list_del_init(&vb->queue);
vb->state = VIDEOBUF_PREPARED;
return;
}
desc->callback_param = buf;
desc->callback = timblogiw_dma_cb;
buf->cookie = desc->tx_submit(desc);
spin_lock_irq(&fh->queue_lock);
}
static void buffer_release(struct videobuf_queue *vq,
struct videobuf_buffer *vb)
{
struct timblogiw_fh *fh = vq->priv_data;
struct timblogiw_buffer *buf = container_of(vb, struct timblogiw_buffer,
vb);
videobuf_waiton(vq, vb, 0, 0);
if (buf->cookie >= 0)
dma_sync_wait(fh->chan, buf->cookie);
videobuf_dma_contig_free(vq, vb);
vb->state = VIDEOBUF_NEEDS_INIT;
}
static struct videobuf_queue_ops timblogiw_video_qops = {
.buf_setup = buffer_setup,
.buf_prepare = buffer_prepare,
.buf_queue = buffer_queue,
.buf_release = buffer_release,
};
/* Device Operations functions */
static int timblogiw_open(struct file *file)
{
struct video_device *vdev = video_devdata(file);
struct timblogiw *lw = video_get_drvdata(vdev);
struct timblogiw_fh *fh;
v4l2_std_id std;
dma_cap_mask_t mask;
int err = 0;
dev_dbg(&vdev->dev, "%s: entry\n", __func__);
mutex_lock(&lw->lock);
if (lw->opened) {
err = -EBUSY;
goto out;
}
if (TIMBLOGIW_HAS_DECODER(lw) && !lw->sd_enc) {
struct i2c_adapter *adapt;
/* find the video decoder */
adapt = i2c_get_adapter(lw->pdata.i2c_adapter);
if (!adapt) {
dev_err(&vdev->dev, "No I2C bus #%d\n",
lw->pdata.i2c_adapter);
err = -ENODEV;
goto out;
}
/* now find the encoder */
lw->sd_enc = v4l2_i2c_new_subdev_board(&lw->v4l2_dev, adapt,
lw->pdata.encoder.info, NULL);
i2c_put_adapter(adapt);
if (!lw->sd_enc) {
dev_err(&vdev->dev, "Failed to get encoder: %s\n",
lw->pdata.encoder.module_name);
err = -ENODEV;
goto out;
}
}
fh = kzalloc(sizeof(*fh), GFP_KERNEL);
if (!fh) {
err = -ENOMEM;
goto out;
}
fh->cur_norm = timblogiw_tvnorms;
timblogiw_querystd(file, fh, &std);
fh->cur_norm = timblogiw_get_norm(std);
INIT_LIST_HEAD(&fh->capture);
spin_lock_init(&fh->queue_lock);
dma_cap_zero(mask);
dma_cap_set(DMA_SLAVE, mask);
dma_cap_set(DMA_PRIVATE, mask);
/* find the DMA channel */
fh->chan = dma_request_channel(mask, timblogiw_dma_filter_fn,
(void *)(uintptr_t)lw->pdata.dma_channel);
if (!fh->chan) {
dev_err(&vdev->dev, "Failed to get DMA channel\n");
kfree(fh);
err = -ENODEV;
goto out;
}
file->private_data = fh;
videobuf_queue_dma_contig_init(&fh->vb_vidq,
&timblogiw_video_qops, lw->dev, &fh->queue_lock,
V4L2_BUF_TYPE_VIDEO_CAPTURE, V4L2_FIELD_NONE,
sizeof(struct timblogiw_buffer), fh, NULL);
lw->opened = true;
out:
mutex_unlock(&lw->lock);
return err;
}
static int timblogiw_close(struct file *file)
{
struct video_device *vdev = video_devdata(file);
struct timblogiw *lw = video_get_drvdata(vdev);
struct timblogiw_fh *fh = file->private_data;
dev_dbg(&vdev->dev, "%s: Entry\n", __func__);
videobuf_stop(&fh->vb_vidq);
videobuf_mmap_free(&fh->vb_vidq);
dma_release_channel(fh->chan);
kfree(fh);
mutex_lock(&lw->lock);
lw->opened = false;
mutex_unlock(&lw->lock);
return 0;
}
static ssize_t timblogiw_read(struct file *file, char __user *data,
size_t count, loff_t *ppos)
{
struct video_device *vdev = video_devdata(file);
struct timblogiw_fh *fh = file->private_data;
dev_dbg(&vdev->dev, "%s: entry\n", __func__);
return videobuf_read_stream(&fh->vb_vidq, data, count, ppos, 0,
file->f_flags & O_NONBLOCK);
}
static unsigned int timblogiw_poll(struct file *file,
struct poll_table_struct *wait)
{
struct video_device *vdev = video_devdata(file);
struct timblogiw_fh *fh = file->private_data;
dev_dbg(&vdev->dev, "%s: entry\n", __func__);
return videobuf_poll_stream(file, &fh->vb_vidq, wait);
}
static int timblogiw_mmap(struct file *file, struct vm_area_struct *vma)
{
struct video_device *vdev = video_devdata(file);
struct timblogiw_fh *fh = file->private_data;
dev_dbg(&vdev->dev, "%s: entry\n", __func__);
return videobuf_mmap_mapper(&fh->vb_vidq, vma);
}
/* Platform device functions */
static __devinitconst struct v4l2_ioctl_ops timblogiw_ioctl_ops = {
.vidioc_querycap = timblogiw_querycap,
.vidioc_enum_fmt_vid_cap = timblogiw_enum_fmt,
.vidioc_g_fmt_vid_cap = timblogiw_g_fmt,
.vidioc_try_fmt_vid_cap = timblogiw_try_fmt,
.vidioc_s_fmt_vid_cap = timblogiw_s_fmt,
.vidioc_g_parm = timblogiw_g_parm,
.vidioc_reqbufs = timblogiw_reqbufs,
.vidioc_querybuf = timblogiw_querybuf,
.vidioc_qbuf = timblogiw_qbuf,
.vidioc_dqbuf = timblogiw_dqbuf,
.vidioc_g_std = timblogiw_g_std,
.vidioc_s_std = timblogiw_s_std,
.vidioc_enum_input = timblogiw_enuminput,
.vidioc_g_input = timblogiw_g_input,
.vidioc_s_input = timblogiw_s_input,
.vidioc_streamon = timblogiw_streamon,
.vidioc_streamoff = timblogiw_streamoff,
.vidioc_querystd = timblogiw_querystd,
.vidioc_enum_framesizes = timblogiw_enum_framesizes,
};
static __devinitconst struct v4l2_file_operations timblogiw_fops = {
.owner = THIS_MODULE,
.open = timblogiw_open,
.release = timblogiw_close,
.unlocked_ioctl = video_ioctl2, /* V4L2 ioctl handler */
.mmap = timblogiw_mmap,
.read = timblogiw_read,
.poll = timblogiw_poll,
};
static __devinitconst struct video_device timblogiw_template = {
.name = TIMBLOGIWIN_NAME,
.fops = &timblogiw_fops,
.ioctl_ops = &timblogiw_ioctl_ops,
.release = video_device_release_empty,
.minor = -1,
.tvnorms = V4L2_STD_PAL | V4L2_STD_NTSC
};
static int __devinit timblogiw_probe(struct platform_device *pdev)
{
int err;
struct timblogiw *lw = NULL;
struct timb_video_platform_data *pdata = pdev->dev.platform_data;
if (!pdata) {
dev_err(&pdev->dev, "No platform data\n");
err = -EINVAL;
goto err;
}
if (!pdata->encoder.module_name)
dev_info(&pdev->dev, "Running without decoder\n");
lw = kzalloc(sizeof(*lw), GFP_KERNEL);
if (!lw) {
err = -ENOMEM;
goto err;
}
if (pdev->dev.parent)
lw->dev = pdev->dev.parent;
else
lw->dev = &pdev->dev;
memcpy(&lw->pdata, pdata, sizeof(lw->pdata));
mutex_init(&lw->lock);
lw->video_dev = timblogiw_template;
strlcpy(lw->v4l2_dev.name, DRIVER_NAME, sizeof(lw->v4l2_dev.name));
err = v4l2_device_register(NULL, &lw->v4l2_dev);
if (err)
goto err_register;
lw->video_dev.v4l2_dev = &lw->v4l2_dev;
platform_set_drvdata(pdev, lw);
video_set_drvdata(&lw->video_dev, lw);
err = video_register_device(&lw->video_dev, VFL_TYPE_GRABBER, 0);
if (err) {
dev_err(&pdev->dev, "Error reg video: %d\n", err);
goto err_request;
}
return 0;
err_request:
platform_set_drvdata(pdev, NULL);
v4l2_device_unregister(&lw->v4l2_dev);
err_register:
kfree(lw);
err:
dev_err(&pdev->dev, "Failed to register: %d\n", err);
return err;
}
static int __devexit timblogiw_remove(struct platform_device *pdev)
{
struct timblogiw *lw = platform_get_drvdata(pdev);
video_unregister_device(&lw->video_dev);
v4l2_device_unregister(&lw->v4l2_dev);
kfree(lw);
platform_set_drvdata(pdev, NULL);
return 0;
}
static struct platform_driver timblogiw_platform_driver = {
.driver = {
.name = DRIVER_NAME,
.owner = THIS_MODULE,
},
.probe = timblogiw_probe,
.remove = __devexit_p(timblogiw_remove),
};
module_platform_driver(timblogiw_platform_driver);
MODULE_DESCRIPTION(TIMBLOGIWIN_NAME);
MODULE_AUTHOR("Pelagicore AB <info@pelagicore.com>");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("platform:"DRIVER_NAME);
| gpl-2.0 |
Team-Hydra/android_kernel_htc_msm8x60 | drivers/media/video/timblogiw.c | 4982 | 21326 | /*
* timblogiw.c timberdale FPGA LogiWin Video In driver
* Copyright (c) 2009-2010 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
/* Supports:
* Timberdale FPGA LogiWin Video In
*/
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/dmaengine.h>
#include <linux/scatterlist.h>
#include <linux/interrupt.h>
#include <linux/list.h>
#include <linux/i2c.h>
#include <linux/module.h>
#include <media/v4l2-ioctl.h>
#include <media/v4l2-device.h>
#include <media/videobuf-dma-contig.h>
#include <media/timb_video.h>
#define DRIVER_NAME "timb-video"
#define TIMBLOGIWIN_NAME "Timberdale Video-In"
#define TIMBLOGIW_VERSION_CODE 0x04
#define TIMBLOGIW_LINES_PER_DESC 44
#define TIMBLOGIW_MAX_VIDEO_MEM 16
#define TIMBLOGIW_HAS_DECODER(lw) (lw->pdata.encoder.module_name)
struct timblogiw {
struct video_device video_dev;
struct v4l2_device v4l2_dev; /* mutual exclusion */
struct mutex lock;
struct device *dev;
struct timb_video_platform_data pdata;
struct v4l2_subdev *sd_enc; /* encoder */
bool opened;
};
struct timblogiw_tvnorm {
v4l2_std_id std;
u16 width;
u16 height;
u8 fps;
};
struct timblogiw_fh {
struct videobuf_queue vb_vidq;
struct timblogiw_tvnorm const *cur_norm;
struct list_head capture;
struct dma_chan *chan;
spinlock_t queue_lock; /* mutual exclusion */
unsigned int frame_count;
};
struct timblogiw_buffer {
/* common v4l buffer stuff -- must be first */
struct videobuf_buffer vb;
struct scatterlist sg[16];
dma_cookie_t cookie;
struct timblogiw_fh *fh;
};
const struct timblogiw_tvnorm timblogiw_tvnorms[] = {
{
.std = V4L2_STD_PAL,
.width = 720,
.height = 576,
.fps = 25
},
{
.std = V4L2_STD_NTSC,
.width = 720,
.height = 480,
.fps = 30
}
};
static int timblogiw_bytes_per_line(const struct timblogiw_tvnorm *norm)
{
return norm->width * 2;
}
static int timblogiw_frame_size(const struct timblogiw_tvnorm *norm)
{
return norm->height * timblogiw_bytes_per_line(norm);
}
static const struct timblogiw_tvnorm *timblogiw_get_norm(const v4l2_std_id std)
{
int i;
for (i = 0; i < ARRAY_SIZE(timblogiw_tvnorms); i++)
if (timblogiw_tvnorms[i].std & std)
return timblogiw_tvnorms + i;
/* default to first element */
return timblogiw_tvnorms;
}
static void timblogiw_dma_cb(void *data)
{
struct timblogiw_buffer *buf = data;
struct timblogiw_fh *fh = buf->fh;
struct videobuf_buffer *vb = &buf->vb;
spin_lock(&fh->queue_lock);
/* mark the transfer done */
buf->cookie = -1;
fh->frame_count++;
if (vb->state != VIDEOBUF_ERROR) {
list_del(&vb->queue);
do_gettimeofday(&vb->ts);
vb->field_count = fh->frame_count * 2;
vb->state = VIDEOBUF_DONE;
wake_up(&vb->done);
}
if (!list_empty(&fh->capture)) {
vb = list_entry(fh->capture.next, struct videobuf_buffer,
queue);
vb->state = VIDEOBUF_ACTIVE;
}
spin_unlock(&fh->queue_lock);
}
static bool timblogiw_dma_filter_fn(struct dma_chan *chan, void *filter_param)
{
return chan->chan_id == (uintptr_t)filter_param;
}
/* IOCTL functions */
static int timblogiw_g_fmt(struct file *file, void *priv,
struct v4l2_format *format)
{
struct video_device *vdev = video_devdata(file);
struct timblogiw *lw = video_get_drvdata(vdev);
struct timblogiw_fh *fh = priv;
dev_dbg(&vdev->dev, "%s entry\n", __func__);
if (format->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
return -EINVAL;
mutex_lock(&lw->lock);
format->fmt.pix.width = fh->cur_norm->width;
format->fmt.pix.height = fh->cur_norm->height;
format->fmt.pix.pixelformat = V4L2_PIX_FMT_UYVY;
format->fmt.pix.bytesperline = timblogiw_bytes_per_line(fh->cur_norm);
format->fmt.pix.sizeimage = timblogiw_frame_size(fh->cur_norm);
format->fmt.pix.field = V4L2_FIELD_NONE;
mutex_unlock(&lw->lock);
return 0;
}
static int timblogiw_try_fmt(struct file *file, void *priv,
struct v4l2_format *format)
{
struct video_device *vdev = video_devdata(file);
struct v4l2_pix_format *pix = &format->fmt.pix;
dev_dbg(&vdev->dev,
"%s - width=%d, height=%d, pixelformat=%d, field=%d\n"
"bytes per line %d, size image: %d, colorspace: %d\n",
__func__,
pix->width, pix->height, pix->pixelformat, pix->field,
pix->bytesperline, pix->sizeimage, pix->colorspace);
if (format->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
return -EINVAL;
if (pix->field != V4L2_FIELD_NONE)
return -EINVAL;
if (pix->pixelformat != V4L2_PIX_FMT_UYVY)
return -EINVAL;
return 0;
}
static int timblogiw_s_fmt(struct file *file, void *priv,
struct v4l2_format *format)
{
struct video_device *vdev = video_devdata(file);
struct timblogiw *lw = video_get_drvdata(vdev);
struct timblogiw_fh *fh = priv;
struct v4l2_pix_format *pix = &format->fmt.pix;
int err;
mutex_lock(&lw->lock);
err = timblogiw_try_fmt(file, priv, format);
if (err)
goto out;
if (videobuf_queue_is_busy(&fh->vb_vidq)) {
dev_err(&vdev->dev, "%s queue busy\n", __func__);
err = -EBUSY;
goto out;
}
pix->width = fh->cur_norm->width;
pix->height = fh->cur_norm->height;
out:
mutex_unlock(&lw->lock);
return err;
}
static int timblogiw_querycap(struct file *file, void *priv,
struct v4l2_capability *cap)
{
struct video_device *vdev = video_devdata(file);
dev_dbg(&vdev->dev, "%s: Entry\n", __func__);
memset(cap, 0, sizeof(*cap));
strncpy(cap->card, TIMBLOGIWIN_NAME, sizeof(cap->card)-1);
strncpy(cap->driver, DRIVER_NAME, sizeof(cap->driver) - 1);
strlcpy(cap->bus_info, vdev->name, sizeof(cap->bus_info));
cap->version = TIMBLOGIW_VERSION_CODE;
cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING |
V4L2_CAP_READWRITE;
return 0;
}
static int timblogiw_enum_fmt(struct file *file, void *priv,
struct v4l2_fmtdesc *fmt)
{
struct video_device *vdev = video_devdata(file);
dev_dbg(&vdev->dev, "%s, index: %d\n", __func__, fmt->index);
if (fmt->index != 0)
return -EINVAL;
memset(fmt, 0, sizeof(*fmt));
fmt->index = 0;
fmt->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
strncpy(fmt->description, "4:2:2, packed, YUYV",
sizeof(fmt->description)-1);
fmt->pixelformat = V4L2_PIX_FMT_UYVY;
return 0;
}
static int timblogiw_g_parm(struct file *file, void *priv,
struct v4l2_streamparm *sp)
{
struct timblogiw_fh *fh = priv;
struct v4l2_captureparm *cp = &sp->parm.capture;
cp->capability = V4L2_CAP_TIMEPERFRAME;
cp->timeperframe.numerator = 1;
cp->timeperframe.denominator = fh->cur_norm->fps;
return 0;
}
static int timblogiw_reqbufs(struct file *file, void *priv,
struct v4l2_requestbuffers *rb)
{
struct video_device *vdev = video_devdata(file);
struct timblogiw_fh *fh = priv;
dev_dbg(&vdev->dev, "%s: entry\n", __func__);
return videobuf_reqbufs(&fh->vb_vidq, rb);
}
static int timblogiw_querybuf(struct file *file, void *priv,
struct v4l2_buffer *b)
{
struct video_device *vdev = video_devdata(file);
struct timblogiw_fh *fh = priv;
dev_dbg(&vdev->dev, "%s: entry\n", __func__);
return videobuf_querybuf(&fh->vb_vidq, b);
}
static int timblogiw_qbuf(struct file *file, void *priv, struct v4l2_buffer *b)
{
struct video_device *vdev = video_devdata(file);
struct timblogiw_fh *fh = priv;
dev_dbg(&vdev->dev, "%s: entry\n", __func__);
return videobuf_qbuf(&fh->vb_vidq, b);
}
static int timblogiw_dqbuf(struct file *file, void *priv,
struct v4l2_buffer *b)
{
struct video_device *vdev = video_devdata(file);
struct timblogiw_fh *fh = priv;
dev_dbg(&vdev->dev, "%s: entry\n", __func__);
return videobuf_dqbuf(&fh->vb_vidq, b, file->f_flags & O_NONBLOCK);
}
static int timblogiw_g_std(struct file *file, void *priv, v4l2_std_id *std)
{
struct video_device *vdev = video_devdata(file);
struct timblogiw_fh *fh = priv;
dev_dbg(&vdev->dev, "%s: entry\n", __func__);
*std = fh->cur_norm->std;
return 0;
}
static int timblogiw_s_std(struct file *file, void *priv, v4l2_std_id *std)
{
struct video_device *vdev = video_devdata(file);
struct timblogiw *lw = video_get_drvdata(vdev);
struct timblogiw_fh *fh = priv;
int err = 0;
dev_dbg(&vdev->dev, "%s: entry\n", __func__);
mutex_lock(&lw->lock);
if (TIMBLOGIW_HAS_DECODER(lw))
err = v4l2_subdev_call(lw->sd_enc, core, s_std, *std);
if (!err)
fh->cur_norm = timblogiw_get_norm(*std);
mutex_unlock(&lw->lock);
return err;
}
static int timblogiw_enuminput(struct file *file, void *priv,
struct v4l2_input *inp)
{
struct video_device *vdev = video_devdata(file);
int i;
dev_dbg(&vdev->dev, "%s: Entry\n", __func__);
if (inp->index != 0)
return -EINVAL;
inp->index = 0;
strncpy(inp->name, "Timb input 1", sizeof(inp->name) - 1);
inp->type = V4L2_INPUT_TYPE_CAMERA;
inp->std = 0;
for (i = 0; i < ARRAY_SIZE(timblogiw_tvnorms); i++)
inp->std |= timblogiw_tvnorms[i].std;
return 0;
}
static int timblogiw_g_input(struct file *file, void *priv,
unsigned int *input)
{
struct video_device *vdev = video_devdata(file);
dev_dbg(&vdev->dev, "%s: Entry\n", __func__);
*input = 0;
return 0;
}
static int timblogiw_s_input(struct file *file, void *priv, unsigned int input)
{
struct video_device *vdev = video_devdata(file);
dev_dbg(&vdev->dev, "%s: Entry\n", __func__);
if (input != 0)
return -EINVAL;
return 0;
}
static int timblogiw_streamon(struct file *file, void *priv, unsigned int type)
{
struct video_device *vdev = video_devdata(file);
struct timblogiw_fh *fh = priv;
dev_dbg(&vdev->dev, "%s: entry\n", __func__);
if (type != V4L2_BUF_TYPE_VIDEO_CAPTURE) {
dev_dbg(&vdev->dev, "%s - No capture device\n", __func__);
return -EINVAL;
}
fh->frame_count = 0;
return videobuf_streamon(&fh->vb_vidq);
}
static int timblogiw_streamoff(struct file *file, void *priv,
unsigned int type)
{
struct video_device *vdev = video_devdata(file);
struct timblogiw_fh *fh = priv;
dev_dbg(&vdev->dev, "%s entry\n", __func__);
if (type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
return -EINVAL;
return videobuf_streamoff(&fh->vb_vidq);
}
static int timblogiw_querystd(struct file *file, void *priv, v4l2_std_id *std)
{
struct video_device *vdev = video_devdata(file);
struct timblogiw *lw = video_get_drvdata(vdev);
struct timblogiw_fh *fh = priv;
dev_dbg(&vdev->dev, "%s entry\n", __func__);
if (TIMBLOGIW_HAS_DECODER(lw))
return v4l2_subdev_call(lw->sd_enc, video, querystd, std);
else {
*std = fh->cur_norm->std;
return 0;
}
}
static int timblogiw_enum_framesizes(struct file *file, void *priv,
struct v4l2_frmsizeenum *fsize)
{
struct video_device *vdev = video_devdata(file);
struct timblogiw_fh *fh = priv;
dev_dbg(&vdev->dev, "%s - index: %d, format: %d\n", __func__,
fsize->index, fsize->pixel_format);
if ((fsize->index != 0) ||
(fsize->pixel_format != V4L2_PIX_FMT_UYVY))
return -EINVAL;
fsize->type = V4L2_FRMSIZE_TYPE_DISCRETE;
fsize->discrete.width = fh->cur_norm->width;
fsize->discrete.height = fh->cur_norm->height;
return 0;
}
/* Video buffer functions */
static int buffer_setup(struct videobuf_queue *vq, unsigned int *count,
unsigned int *size)
{
struct timblogiw_fh *fh = vq->priv_data;
*size = timblogiw_frame_size(fh->cur_norm);
if (!*count)
*count = 32;
while (*size * *count > TIMBLOGIW_MAX_VIDEO_MEM * 1024 * 1024)
(*count)--;
return 0;
}
static int buffer_prepare(struct videobuf_queue *vq, struct videobuf_buffer *vb,
enum v4l2_field field)
{
struct timblogiw_fh *fh = vq->priv_data;
struct timblogiw_buffer *buf = container_of(vb, struct timblogiw_buffer,
vb);
unsigned int data_size = timblogiw_frame_size(fh->cur_norm);
int err = 0;
if (vb->baddr && vb->bsize < data_size)
/* User provided buffer, but it is too small */
return -ENOMEM;
vb->size = data_size;
vb->width = fh->cur_norm->width;
vb->height = fh->cur_norm->height;
vb->field = field;
if (vb->state == VIDEOBUF_NEEDS_INIT) {
int i;
unsigned int size;
unsigned int bytes_per_desc = TIMBLOGIW_LINES_PER_DESC *
timblogiw_bytes_per_line(fh->cur_norm);
dma_addr_t addr;
sg_init_table(buf->sg, ARRAY_SIZE(buf->sg));
err = videobuf_iolock(vq, vb, NULL);
if (err)
goto err;
addr = videobuf_to_dma_contig(vb);
for (i = 0, size = 0; size < data_size; i++) {
sg_dma_address(buf->sg + i) = addr + size;
size += bytes_per_desc;
sg_dma_len(buf->sg + i) = (size > data_size) ?
(bytes_per_desc - (size - data_size)) :
bytes_per_desc;
}
vb->state = VIDEOBUF_PREPARED;
buf->cookie = -1;
buf->fh = fh;
}
return 0;
err:
videobuf_dma_contig_free(vq, vb);
vb->state = VIDEOBUF_NEEDS_INIT;
return err;
}
static void buffer_queue(struct videobuf_queue *vq, struct videobuf_buffer *vb)
{
struct timblogiw_fh *fh = vq->priv_data;
struct timblogiw_buffer *buf = container_of(vb, struct timblogiw_buffer,
vb);
struct dma_async_tx_descriptor *desc;
int sg_elems;
int bytes_per_desc = TIMBLOGIW_LINES_PER_DESC *
timblogiw_bytes_per_line(fh->cur_norm);
sg_elems = timblogiw_frame_size(fh->cur_norm) / bytes_per_desc;
sg_elems +=
(timblogiw_frame_size(fh->cur_norm) % bytes_per_desc) ? 1 : 0;
if (list_empty(&fh->capture))
vb->state = VIDEOBUF_ACTIVE;
else
vb->state = VIDEOBUF_QUEUED;
list_add_tail(&vb->queue, &fh->capture);
spin_unlock_irq(&fh->queue_lock);
desc = dmaengine_prep_slave_sg(fh->chan,
buf->sg, sg_elems, DMA_DEV_TO_MEM,
DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_SRC_UNMAP);
if (!desc) {
spin_lock_irq(&fh->queue_lock);
list_del_init(&vb->queue);
vb->state = VIDEOBUF_PREPARED;
return;
}
desc->callback_param = buf;
desc->callback = timblogiw_dma_cb;
buf->cookie = desc->tx_submit(desc);
spin_lock_irq(&fh->queue_lock);
}
static void buffer_release(struct videobuf_queue *vq,
struct videobuf_buffer *vb)
{
struct timblogiw_fh *fh = vq->priv_data;
struct timblogiw_buffer *buf = container_of(vb, struct timblogiw_buffer,
vb);
videobuf_waiton(vq, vb, 0, 0);
if (buf->cookie >= 0)
dma_sync_wait(fh->chan, buf->cookie);
videobuf_dma_contig_free(vq, vb);
vb->state = VIDEOBUF_NEEDS_INIT;
}
static struct videobuf_queue_ops timblogiw_video_qops = {
.buf_setup = buffer_setup,
.buf_prepare = buffer_prepare,
.buf_queue = buffer_queue,
.buf_release = buffer_release,
};
/* Device Operations functions */
static int timblogiw_open(struct file *file)
{
struct video_device *vdev = video_devdata(file);
struct timblogiw *lw = video_get_drvdata(vdev);
struct timblogiw_fh *fh;
v4l2_std_id std;
dma_cap_mask_t mask;
int err = 0;
dev_dbg(&vdev->dev, "%s: entry\n", __func__);
mutex_lock(&lw->lock);
if (lw->opened) {
err = -EBUSY;
goto out;
}
if (TIMBLOGIW_HAS_DECODER(lw) && !lw->sd_enc) {
struct i2c_adapter *adapt;
/* find the video decoder */
adapt = i2c_get_adapter(lw->pdata.i2c_adapter);
if (!adapt) {
dev_err(&vdev->dev, "No I2C bus #%d\n",
lw->pdata.i2c_adapter);
err = -ENODEV;
goto out;
}
/* now find the encoder */
lw->sd_enc = v4l2_i2c_new_subdev_board(&lw->v4l2_dev, adapt,
lw->pdata.encoder.info, NULL);
i2c_put_adapter(adapt);
if (!lw->sd_enc) {
dev_err(&vdev->dev, "Failed to get encoder: %s\n",
lw->pdata.encoder.module_name);
err = -ENODEV;
goto out;
}
}
fh = kzalloc(sizeof(*fh), GFP_KERNEL);
if (!fh) {
err = -ENOMEM;
goto out;
}
fh->cur_norm = timblogiw_tvnorms;
timblogiw_querystd(file, fh, &std);
fh->cur_norm = timblogiw_get_norm(std);
INIT_LIST_HEAD(&fh->capture);
spin_lock_init(&fh->queue_lock);
dma_cap_zero(mask);
dma_cap_set(DMA_SLAVE, mask);
dma_cap_set(DMA_PRIVATE, mask);
/* find the DMA channel */
fh->chan = dma_request_channel(mask, timblogiw_dma_filter_fn,
(void *)(uintptr_t)lw->pdata.dma_channel);
if (!fh->chan) {
dev_err(&vdev->dev, "Failed to get DMA channel\n");
kfree(fh);
err = -ENODEV;
goto out;
}
file->private_data = fh;
videobuf_queue_dma_contig_init(&fh->vb_vidq,
&timblogiw_video_qops, lw->dev, &fh->queue_lock,
V4L2_BUF_TYPE_VIDEO_CAPTURE, V4L2_FIELD_NONE,
sizeof(struct timblogiw_buffer), fh, NULL);
lw->opened = true;
out:
mutex_unlock(&lw->lock);
return err;
}
static int timblogiw_close(struct file *file)
{
struct video_device *vdev = video_devdata(file);
struct timblogiw *lw = video_get_drvdata(vdev);
struct timblogiw_fh *fh = file->private_data;
dev_dbg(&vdev->dev, "%s: Entry\n", __func__);
videobuf_stop(&fh->vb_vidq);
videobuf_mmap_free(&fh->vb_vidq);
dma_release_channel(fh->chan);
kfree(fh);
mutex_lock(&lw->lock);
lw->opened = false;
mutex_unlock(&lw->lock);
return 0;
}
static ssize_t timblogiw_read(struct file *file, char __user *data,
size_t count, loff_t *ppos)
{
struct video_device *vdev = video_devdata(file);
struct timblogiw_fh *fh = file->private_data;
dev_dbg(&vdev->dev, "%s: entry\n", __func__);
return videobuf_read_stream(&fh->vb_vidq, data, count, ppos, 0,
file->f_flags & O_NONBLOCK);
}
static unsigned int timblogiw_poll(struct file *file,
struct poll_table_struct *wait)
{
struct video_device *vdev = video_devdata(file);
struct timblogiw_fh *fh = file->private_data;
dev_dbg(&vdev->dev, "%s: entry\n", __func__);
return videobuf_poll_stream(file, &fh->vb_vidq, wait);
}
static int timblogiw_mmap(struct file *file, struct vm_area_struct *vma)
{
struct video_device *vdev = video_devdata(file);
struct timblogiw_fh *fh = file->private_data;
dev_dbg(&vdev->dev, "%s: entry\n", __func__);
return videobuf_mmap_mapper(&fh->vb_vidq, vma);
}
/* Platform device functions */
static __devinitconst struct v4l2_ioctl_ops timblogiw_ioctl_ops = {
.vidioc_querycap = timblogiw_querycap,
.vidioc_enum_fmt_vid_cap = timblogiw_enum_fmt,
.vidioc_g_fmt_vid_cap = timblogiw_g_fmt,
.vidioc_try_fmt_vid_cap = timblogiw_try_fmt,
.vidioc_s_fmt_vid_cap = timblogiw_s_fmt,
.vidioc_g_parm = timblogiw_g_parm,
.vidioc_reqbufs = timblogiw_reqbufs,
.vidioc_querybuf = timblogiw_querybuf,
.vidioc_qbuf = timblogiw_qbuf,
.vidioc_dqbuf = timblogiw_dqbuf,
.vidioc_g_std = timblogiw_g_std,
.vidioc_s_std = timblogiw_s_std,
.vidioc_enum_input = timblogiw_enuminput,
.vidioc_g_input = timblogiw_g_input,
.vidioc_s_input = timblogiw_s_input,
.vidioc_streamon = timblogiw_streamon,
.vidioc_streamoff = timblogiw_streamoff,
.vidioc_querystd = timblogiw_querystd,
.vidioc_enum_framesizes = timblogiw_enum_framesizes,
};
static __devinitconst struct v4l2_file_operations timblogiw_fops = {
.owner = THIS_MODULE,
.open = timblogiw_open,
.release = timblogiw_close,
.unlocked_ioctl = video_ioctl2, /* V4L2 ioctl handler */
.mmap = timblogiw_mmap,
.read = timblogiw_read,
.poll = timblogiw_poll,
};
static __devinitconst struct video_device timblogiw_template = {
.name = TIMBLOGIWIN_NAME,
.fops = &timblogiw_fops,
.ioctl_ops = &timblogiw_ioctl_ops,
.release = video_device_release_empty,
.minor = -1,
.tvnorms = V4L2_STD_PAL | V4L2_STD_NTSC
};
static int __devinit timblogiw_probe(struct platform_device *pdev)
{
int err;
struct timblogiw *lw = NULL;
struct timb_video_platform_data *pdata = pdev->dev.platform_data;
if (!pdata) {
dev_err(&pdev->dev, "No platform data\n");
err = -EINVAL;
goto err;
}
if (!pdata->encoder.module_name)
dev_info(&pdev->dev, "Running without decoder\n");
lw = kzalloc(sizeof(*lw), GFP_KERNEL);
if (!lw) {
err = -ENOMEM;
goto err;
}
if (pdev->dev.parent)
lw->dev = pdev->dev.parent;
else
lw->dev = &pdev->dev;
memcpy(&lw->pdata, pdata, sizeof(lw->pdata));
mutex_init(&lw->lock);
lw->video_dev = timblogiw_template;
strlcpy(lw->v4l2_dev.name, DRIVER_NAME, sizeof(lw->v4l2_dev.name));
err = v4l2_device_register(NULL, &lw->v4l2_dev);
if (err)
goto err_register;
lw->video_dev.v4l2_dev = &lw->v4l2_dev;
platform_set_drvdata(pdev, lw);
video_set_drvdata(&lw->video_dev, lw);
err = video_register_device(&lw->video_dev, VFL_TYPE_GRABBER, 0);
if (err) {
dev_err(&pdev->dev, "Error reg video: %d\n", err);
goto err_request;
}
return 0;
err_request:
platform_set_drvdata(pdev, NULL);
v4l2_device_unregister(&lw->v4l2_dev);
err_register:
kfree(lw);
err:
dev_err(&pdev->dev, "Failed to register: %d\n", err);
return err;
}
static int __devexit timblogiw_remove(struct platform_device *pdev)
{
struct timblogiw *lw = platform_get_drvdata(pdev);
video_unregister_device(&lw->video_dev);
v4l2_device_unregister(&lw->v4l2_dev);
kfree(lw);
platform_set_drvdata(pdev, NULL);
return 0;
}
static struct platform_driver timblogiw_platform_driver = {
.driver = {
.name = DRIVER_NAME,
.owner = THIS_MODULE,
},
.probe = timblogiw_probe,
.remove = __devexit_p(timblogiw_remove),
};
module_platform_driver(timblogiw_platform_driver);
MODULE_DESCRIPTION(TIMBLOGIWIN_NAME);
MODULE_AUTHOR("Pelagicore AB <info@pelagicore.com>");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("platform:"DRIVER_NAME);
| gpl-2.0 |
Zaaap/android_kernel_lge_d620 | arch/powerpc/sysdev/ppc4xx_cpm.c | 7798 | 7793 | /*
* PowerPC 4xx Clock and Power Management
*
* Copyright (C) 2010, Applied Micro Circuits Corporation
* Victor Gallardo (vgallardo@apm.com)
*
* Based on arch/powerpc/platforms/44x/idle.c:
* Jerone Young <jyoung5@us.ibm.com>
* Copyright 2008 IBM Corp.
*
* Based on arch/powerpc/sysdev/fsl_pmc.c:
* Anton Vorontsov <avorontsov@ru.mvista.com>
* Copyright 2009 MontaVista Software, Inc.
*
* See file CREDITS for list of people who contributed to this
* project.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 2 of
* the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston,
* MA 02111-1307 USA
*/
#include <linux/kernel.h>
#include <linux/of_platform.h>
#include <linux/sysfs.h>
#include <linux/cpu.h>
#include <linux/suspend.h>
#include <asm/dcr.h>
#include <asm/dcr-native.h>
#include <asm/machdep.h>
#define CPM_ER 0
#define CPM_FR 1
#define CPM_SR 2
#define CPM_IDLE_WAIT 0
#define CPM_IDLE_DOZE 1
struct cpm {
dcr_host_t dcr_host;
unsigned int dcr_offset[3];
unsigned int powersave_off;
unsigned int unused;
unsigned int idle_doze;
unsigned int standby;
unsigned int suspend;
};
static struct cpm cpm;
struct cpm_idle_mode {
unsigned int enabled;
const char *name;
};
static struct cpm_idle_mode idle_mode[] = {
[CPM_IDLE_WAIT] = { 1, "wait" }, /* default */
[CPM_IDLE_DOZE] = { 0, "doze" },
};
static unsigned int cpm_set(unsigned int cpm_reg, unsigned int mask)
{
unsigned int value;
/* CPM controller supports 3 different types of sleep interface
* known as class 1, 2 and 3. For class 1 units, they are
* unconditionally put to sleep when the corresponding CPM bit is
* set. For class 2 and 3 units this is not case; if they can be
* put to to sleep, they will. Here we do not verify, we just
* set them and expect them to eventually go off when they can.
*/
value = dcr_read(cpm.dcr_host, cpm.dcr_offset[cpm_reg]);
dcr_write(cpm.dcr_host, cpm.dcr_offset[cpm_reg], value | mask);
/* return old state, to restore later if needed */
return value;
}
static void cpm_idle_wait(void)
{
unsigned long msr_save;
/* save off initial state */
msr_save = mfmsr();
/* sync required when CPM0_ER[CPU] is set */
mb();
/* set wait state MSR */
mtmsr(msr_save|MSR_WE|MSR_EE|MSR_CE|MSR_DE);
isync();
/* return to initial state */
mtmsr(msr_save);
isync();
}
static void cpm_idle_sleep(unsigned int mask)
{
unsigned int er_save;
/* update CPM_ER state */
er_save = cpm_set(CPM_ER, mask);
/* go to wait state so that CPM0_ER[CPU] can take effect */
cpm_idle_wait();
/* restore CPM_ER state */
dcr_write(cpm.dcr_host, cpm.dcr_offset[CPM_ER], er_save);
}
static void cpm_idle_doze(void)
{
cpm_idle_sleep(cpm.idle_doze);
}
static void cpm_idle_config(int mode)
{
int i;
if (idle_mode[mode].enabled)
return;
for (i = 0; i < ARRAY_SIZE(idle_mode); i++)
idle_mode[i].enabled = 0;
idle_mode[mode].enabled = 1;
}
static ssize_t cpm_idle_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
char *s = buf;
int i;
for (i = 0; i < ARRAY_SIZE(idle_mode); i++) {
if (idle_mode[i].enabled)
s += sprintf(s, "[%s] ", idle_mode[i].name);
else
s += sprintf(s, "%s ", idle_mode[i].name);
}
*(s-1) = '\n'; /* convert the last space to a newline */
return s - buf;
}
static ssize_t cpm_idle_store(struct kobject *kobj,
struct kobj_attribute *attr,
const char *buf, size_t n)
{
int i;
char *p;
int len;
p = memchr(buf, '\n', n);
len = p ? p - buf : n;
for (i = 0; i < ARRAY_SIZE(idle_mode); i++) {
if (strncmp(buf, idle_mode[i].name, len) == 0) {
cpm_idle_config(i);
return n;
}
}
return -EINVAL;
}
static struct kobj_attribute cpm_idle_attr =
__ATTR(idle, 0644, cpm_idle_show, cpm_idle_store);
static void cpm_idle_config_sysfs(void)
{
struct device *dev;
unsigned long ret;
dev = get_cpu_device(0);
ret = sysfs_create_file(&dev->kobj,
&cpm_idle_attr.attr);
if (ret)
printk(KERN_WARNING
"cpm: failed to create idle sysfs entry\n");
}
static void cpm_idle(void)
{
if (idle_mode[CPM_IDLE_DOZE].enabled)
cpm_idle_doze();
else
cpm_idle_wait();
}
static int cpm_suspend_valid(suspend_state_t state)
{
switch (state) {
case PM_SUSPEND_STANDBY:
return !!cpm.standby;
case PM_SUSPEND_MEM:
return !!cpm.suspend;
default:
return 0;
}
}
static void cpm_suspend_standby(unsigned int mask)
{
unsigned long tcr_save;
/* disable decrement interrupt */
tcr_save = mfspr(SPRN_TCR);
mtspr(SPRN_TCR, tcr_save & ~TCR_DIE);
/* go to sleep state */
cpm_idle_sleep(mask);
/* restore decrement interrupt */
mtspr(SPRN_TCR, tcr_save);
}
static int cpm_suspend_enter(suspend_state_t state)
{
switch (state) {
case PM_SUSPEND_STANDBY:
cpm_suspend_standby(cpm.standby);
break;
case PM_SUSPEND_MEM:
cpm_suspend_standby(cpm.suspend);
break;
}
return 0;
}
static struct platform_suspend_ops cpm_suspend_ops = {
.valid = cpm_suspend_valid,
.enter = cpm_suspend_enter,
};
static int cpm_get_uint_property(struct device_node *np,
const char *name)
{
int len;
const unsigned int *prop = of_get_property(np, name, &len);
if (prop == NULL || len < sizeof(u32))
return 0;
return *prop;
}
static int __init cpm_init(void)
{
struct device_node *np;
int dcr_base, dcr_len;
int ret = 0;
if (!cpm.powersave_off) {
cpm_idle_config(CPM_IDLE_WAIT);
ppc_md.power_save = &cpm_idle;
}
np = of_find_compatible_node(NULL, NULL, "ibm,cpm");
if (!np) {
ret = -EINVAL;
goto out;
}
dcr_base = dcr_resource_start(np, 0);
dcr_len = dcr_resource_len(np, 0);
if (dcr_base == 0 || dcr_len == 0) {
printk(KERN_ERR "cpm: could not parse dcr property for %s\n",
np->full_name);
ret = -EINVAL;
goto out;
}
cpm.dcr_host = dcr_map(np, dcr_base, dcr_len);
if (!DCR_MAP_OK(cpm.dcr_host)) {
printk(KERN_ERR "cpm: failed to map dcr property for %s\n",
np->full_name);
ret = -EINVAL;
goto out;
}
/* All 4xx SoCs with a CPM controller have one of two
* different order for the CPM registers. Some have the
* CPM registers in the following order (ER,FR,SR). The
* others have them in the following order (SR,ER,FR).
*/
if (cpm_get_uint_property(np, "er-offset") == 0) {
cpm.dcr_offset[CPM_ER] = 0;
cpm.dcr_offset[CPM_FR] = 1;
cpm.dcr_offset[CPM_SR] = 2;
} else {
cpm.dcr_offset[CPM_ER] = 1;
cpm.dcr_offset[CPM_FR] = 2;
cpm.dcr_offset[CPM_SR] = 0;
}
/* Now let's see what IPs to turn off for the following modes */
cpm.unused = cpm_get_uint_property(np, "unused-units");
cpm.idle_doze = cpm_get_uint_property(np, "idle-doze");
cpm.standby = cpm_get_uint_property(np, "standby");
cpm.suspend = cpm_get_uint_property(np, "suspend");
/* If some IPs are unused let's turn them off now */
if (cpm.unused) {
cpm_set(CPM_ER, cpm.unused);
cpm_set(CPM_FR, cpm.unused);
}
/* Now let's export interfaces */
if (!cpm.powersave_off && cpm.idle_doze)
cpm_idle_config_sysfs();
if (cpm.standby || cpm.suspend)
suspend_set_ops(&cpm_suspend_ops);
out:
if (np)
of_node_put(np);
return ret;
}
late_initcall(cpm_init);
static int __init cpm_powersave_off(char *arg)
{
cpm.powersave_off = 1;
return 0;
}
__setup("powersave=off", cpm_powersave_off);
| gpl-2.0 |
forumber/temiz_kernel_g2 | arch/arm/mach-ks8695/irq.c | 8822 | 4284 | /*
* arch/arm/mach-ks8695/irq.c
*
* Copyright (C) 2006 Ben Dooks <ben@simtec.co.uk>
* Copyright (C) 2006 Simtec Electronics
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/ioport.h>
#include <linux/device.h>
#include <linux/io.h>
#include <mach/hardware.h>
#include <asm/irq.h>
#include <asm/mach/irq.h>
#include <mach/regs-irq.h>
#include <mach/regs-gpio.h>
static void ks8695_irq_mask(struct irq_data *d)
{
unsigned long inten;
inten = __raw_readl(KS8695_IRQ_VA + KS8695_INTEN);
inten &= ~(1 << d->irq);
__raw_writel(inten, KS8695_IRQ_VA + KS8695_INTEN);
}
static void ks8695_irq_unmask(struct irq_data *d)
{
unsigned long inten;
inten = __raw_readl(KS8695_IRQ_VA + KS8695_INTEN);
inten |= (1 << d->irq);
__raw_writel(inten, KS8695_IRQ_VA + KS8695_INTEN);
}
static void ks8695_irq_ack(struct irq_data *d)
{
__raw_writel((1 << d->irq), KS8695_IRQ_VA + KS8695_INTST);
}
static struct irq_chip ks8695_irq_level_chip;
static struct irq_chip ks8695_irq_edge_chip;
static int ks8695_irq_set_type(struct irq_data *d, unsigned int type)
{
unsigned long ctrl, mode;
unsigned short level_triggered = 0;
ctrl = __raw_readl(KS8695_GPIO_VA + KS8695_IOPC);
switch (type) {
case IRQ_TYPE_LEVEL_HIGH:
mode = IOPC_TM_HIGH;
level_triggered = 1;
break;
case IRQ_TYPE_LEVEL_LOW:
mode = IOPC_TM_LOW;
level_triggered = 1;
break;
case IRQ_TYPE_EDGE_RISING:
mode = IOPC_TM_RISING;
break;
case IRQ_TYPE_EDGE_FALLING:
mode = IOPC_TM_FALLING;
break;
case IRQ_TYPE_EDGE_BOTH:
mode = IOPC_TM_EDGE;
break;
default:
return -EINVAL;
}
switch (d->irq) {
case KS8695_IRQ_EXTERN0:
ctrl &= ~IOPC_IOEINT0TM;
ctrl |= IOPC_IOEINT0_MODE(mode);
break;
case KS8695_IRQ_EXTERN1:
ctrl &= ~IOPC_IOEINT1TM;
ctrl |= IOPC_IOEINT1_MODE(mode);
break;
case KS8695_IRQ_EXTERN2:
ctrl &= ~IOPC_IOEINT2TM;
ctrl |= IOPC_IOEINT2_MODE(mode);
break;
case KS8695_IRQ_EXTERN3:
ctrl &= ~IOPC_IOEINT3TM;
ctrl |= IOPC_IOEINT3_MODE(mode);
break;
default:
return -EINVAL;
}
if (level_triggered) {
irq_set_chip_and_handler(d->irq, &ks8695_irq_level_chip,
handle_level_irq);
}
else {
irq_set_chip_and_handler(d->irq, &ks8695_irq_edge_chip,
handle_edge_irq);
}
__raw_writel(ctrl, KS8695_GPIO_VA + KS8695_IOPC);
return 0;
}
static struct irq_chip ks8695_irq_level_chip = {
.irq_ack = ks8695_irq_mask,
.irq_mask = ks8695_irq_mask,
.irq_unmask = ks8695_irq_unmask,
.irq_set_type = ks8695_irq_set_type,
};
static struct irq_chip ks8695_irq_edge_chip = {
.irq_ack = ks8695_irq_ack,
.irq_mask = ks8695_irq_mask,
.irq_unmask = ks8695_irq_unmask,
.irq_set_type = ks8695_irq_set_type,
};
void __init ks8695_init_irq(void)
{
unsigned int irq;
/* Disable all interrupts initially */
__raw_writel(0, KS8695_IRQ_VA + KS8695_INTMC);
__raw_writel(0, KS8695_IRQ_VA + KS8695_INTEN);
for (irq = 0; irq < NR_IRQS; irq++) {
switch (irq) {
/* Level-triggered interrupts */
case KS8695_IRQ_BUS_ERROR:
case KS8695_IRQ_UART_MODEM_STATUS:
case KS8695_IRQ_UART_LINE_STATUS:
case KS8695_IRQ_UART_RX:
case KS8695_IRQ_COMM_TX:
case KS8695_IRQ_COMM_RX:
irq_set_chip_and_handler(irq,
&ks8695_irq_level_chip,
handle_level_irq);
break;
/* Edge-triggered interrupts */
default:
/* clear pending bit */
ks8695_irq_ack(irq_get_irq_data(irq));
irq_set_chip_and_handler(irq,
&ks8695_irq_edge_chip,
handle_edge_irq);
}
set_irq_flags(irq, IRQF_VALID);
}
}
| gpl-2.0 |
Backspace-Dev/x920d-jp | drivers/net/ethernet/cisco/enic/vnic_intr.c | 9590 | 2028 | /*
* Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/pci.h>
#include <linux/delay.h>
#include "vnic_dev.h"
#include "vnic_intr.h"
void vnic_intr_free(struct vnic_intr *intr)
{
intr->ctrl = NULL;
}
int vnic_intr_alloc(struct vnic_dev *vdev, struct vnic_intr *intr,
unsigned int index)
{
intr->index = index;
intr->vdev = vdev;
intr->ctrl = vnic_dev_get_res(vdev, RES_TYPE_INTR_CTRL, index);
if (!intr->ctrl) {
pr_err("Failed to hook INTR[%d].ctrl resource\n", index);
return -EINVAL;
}
return 0;
}
void vnic_intr_init(struct vnic_intr *intr, u32 coalescing_timer,
unsigned int coalescing_type, unsigned int mask_on_assertion)
{
vnic_intr_coalescing_timer_set(intr, coalescing_timer);
iowrite32(coalescing_type, &intr->ctrl->coalescing_type);
iowrite32(mask_on_assertion, &intr->ctrl->mask_on_assertion);
iowrite32(0, &intr->ctrl->int_credits);
}
void vnic_intr_coalescing_timer_set(struct vnic_intr *intr,
u32 coalescing_timer)
{
iowrite32(vnic_dev_intr_coal_timer_usec_to_hw(intr->vdev,
coalescing_timer), &intr->ctrl->coalescing_timer);
}
void vnic_intr_clean(struct vnic_intr *intr)
{
iowrite32(0, &intr->ctrl->int_credits);
}
| gpl-2.0 |
DevriesL/SM-G9208_ImageBreaker | drivers/pci/hotplug/cpqphp_sysfs.c | 10614 | 5826 | /*
* Compaq Hot Plug Controller Driver
*
* Copyright (C) 1995,2001 Compaq Computer Corporation
* Copyright (C) 2001,2003 Greg Kroah-Hartman (greg@kroah.com)
* Copyright (C) 2001 IBM Corp.
*
* All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or (at
* your option) any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
* NON INFRINGEMENT. See the GNU General Public License for more
* details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*
* Send feedback to <greg@kroah.com>
*
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/proc_fs.h>
#include <linux/workqueue.h>
#include <linux/pci.h>
#include <linux/pci_hotplug.h>
#include <linux/mutex.h>
#include <linux/debugfs.h>
#include "cpqphp.h"
static DEFINE_MUTEX(cpqphp_mutex);
static int show_ctrl (struct controller *ctrl, char *buf)
{
char *out = buf;
int index;
struct pci_resource *res;
out += sprintf(buf, "Free resources: memory\n");
index = 11;
res = ctrl->mem_head;
while (res && index--) {
out += sprintf(out, "start = %8.8x, length = %8.8x\n", res->base, res->length);
res = res->next;
}
out += sprintf(out, "Free resources: prefetchable memory\n");
index = 11;
res = ctrl->p_mem_head;
while (res && index--) {
out += sprintf(out, "start = %8.8x, length = %8.8x\n", res->base, res->length);
res = res->next;
}
out += sprintf(out, "Free resources: IO\n");
index = 11;
res = ctrl->io_head;
while (res && index--) {
out += sprintf(out, "start = %8.8x, length = %8.8x\n", res->base, res->length);
res = res->next;
}
out += sprintf(out, "Free resources: bus numbers\n");
index = 11;
res = ctrl->bus_head;
while (res && index--) {
out += sprintf(out, "start = %8.8x, length = %8.8x\n", res->base, res->length);
res = res->next;
}
return out - buf;
}
static int show_dev (struct controller *ctrl, char *buf)
{
char * out = buf;
int index;
struct pci_resource *res;
struct pci_func *new_slot;
struct slot *slot;
slot = ctrl->slot;
while (slot) {
new_slot = cpqhp_slot_find(slot->bus, slot->device, 0);
if (!new_slot)
break;
out += sprintf(out, "assigned resources: memory\n");
index = 11;
res = new_slot->mem_head;
while (res && index--) {
out += sprintf(out, "start = %8.8x, length = %8.8x\n", res->base, res->length);
res = res->next;
}
out += sprintf(out, "assigned resources: prefetchable memory\n");
index = 11;
res = new_slot->p_mem_head;
while (res && index--) {
out += sprintf(out, "start = %8.8x, length = %8.8x\n", res->base, res->length);
res = res->next;
}
out += sprintf(out, "assigned resources: IO\n");
index = 11;
res = new_slot->io_head;
while (res && index--) {
out += sprintf(out, "start = %8.8x, length = %8.8x\n", res->base, res->length);
res = res->next;
}
out += sprintf(out, "assigned resources: bus numbers\n");
index = 11;
res = new_slot->bus_head;
while (res && index--) {
out += sprintf(out, "start = %8.8x, length = %8.8x\n", res->base, res->length);
res = res->next;
}
slot=slot->next;
}
return out - buf;
}
static int spew_debug_info(struct controller *ctrl, char *data, int size)
{
int used;
used = size - show_ctrl(ctrl, data);
used = (size - used) - show_dev(ctrl, &data[used]);
return used;
}
struct ctrl_dbg {
int size;
char *data;
struct controller *ctrl;
};
#define MAX_OUTPUT (4*PAGE_SIZE)
static int open(struct inode *inode, struct file *file)
{
struct controller *ctrl = inode->i_private;
struct ctrl_dbg *dbg;
int retval = -ENOMEM;
mutex_lock(&cpqphp_mutex);
dbg = kmalloc(sizeof(*dbg), GFP_KERNEL);
if (!dbg)
goto exit;
dbg->data = kmalloc(MAX_OUTPUT, GFP_KERNEL);
if (!dbg->data) {
kfree(dbg);
goto exit;
}
dbg->size = spew_debug_info(ctrl, dbg->data, MAX_OUTPUT);
file->private_data = dbg;
retval = 0;
exit:
mutex_unlock(&cpqphp_mutex);
return retval;
}
static loff_t lseek(struct file *file, loff_t off, int whence)
{
struct ctrl_dbg *dbg;
loff_t new = -1;
mutex_lock(&cpqphp_mutex);
dbg = file->private_data;
switch (whence) {
case 0:
new = off;
break;
case 1:
new = file->f_pos + off;
break;
}
if (new < 0 || new > dbg->size) {
mutex_unlock(&cpqphp_mutex);
return -EINVAL;
}
mutex_unlock(&cpqphp_mutex);
return (file->f_pos = new);
}
static ssize_t read(struct file *file, char __user *buf,
size_t nbytes, loff_t *ppos)
{
struct ctrl_dbg *dbg = file->private_data;
return simple_read_from_buffer(buf, nbytes, ppos, dbg->data, dbg->size);
}
static int release(struct inode *inode, struct file *file)
{
struct ctrl_dbg *dbg = file->private_data;
kfree(dbg->data);
kfree(dbg);
return 0;
}
static const struct file_operations debug_ops = {
.owner = THIS_MODULE,
.open = open,
.llseek = lseek,
.read = read,
.release = release,
};
static struct dentry *root;
void cpqhp_initialize_debugfs(void)
{
if (!root)
root = debugfs_create_dir("cpqhp", NULL);
}
void cpqhp_shutdown_debugfs(void)
{
debugfs_remove(root);
}
void cpqhp_create_debugfs_files(struct controller *ctrl)
{
ctrl->dentry = debugfs_create_file(dev_name(&ctrl->pci_dev->dev),
S_IRUGO, root, ctrl, &debug_ops);
}
void cpqhp_remove_debugfs_files(struct controller *ctrl)
{
if (ctrl->dentry)
debugfs_remove(ctrl->dentry);
ctrl->dentry = NULL;
}
| gpl-2.0 |
IADcodes/cubebone_kernel_nexus7_grouper | samples/trace_events/trace-events-sample.c | 13174 | 1053 | #include <linux/module.h>
#include <linux/kthread.h>
/*
* Any file that uses trace points, must include the header.
* But only one file, must include the header by defining
* CREATE_TRACE_POINTS first. This will make the C code that
* creates the handles for the trace points.
*/
#define CREATE_TRACE_POINTS
#include "trace-events-sample.h"
static void simple_thread_func(int cnt)
{
set_current_state(TASK_INTERRUPTIBLE);
schedule_timeout(HZ);
trace_foo_bar("hello", cnt);
}
static int simple_thread(void *arg)
{
int cnt = 0;
while (!kthread_should_stop())
simple_thread_func(cnt++);
return 0;
}
static struct task_struct *simple_tsk;
static int __init trace_event_init(void)
{
simple_tsk = kthread_run(simple_thread, NULL, "event-sample");
if (IS_ERR(simple_tsk))
return -1;
return 0;
}
static void __exit trace_event_exit(void)
{
kthread_stop(simple_tsk);
}
module_init(trace_event_init);
module_exit(trace_event_exit);
MODULE_AUTHOR("Steven Rostedt");
MODULE_DESCRIPTION("trace-events-sample");
MODULE_LICENSE("GPL");
| gpl-2.0 |
nunogil/lge-kernel-sniper | drivers/char/hw_random/omap-rng.c | 1655 | 4965 | /*
* omap-rng.c - RNG driver for TI OMAP CPU family
*
* Author: Deepak Saxena <dsaxena@plexity.net>
*
* Copyright 2005 (c) MontaVista Software, Inc.
*
* Mostly based on original driver:
*
* Copyright (C) 2005 Nokia Corporation
* Author: Juha Yrjölä <juha.yrjola@nokia.com>
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
* warranty of any kind, whether express or implied.
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/random.h>
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/platform_device.h>
#include <linux/hw_random.h>
#include <linux/delay.h>
#include <asm/io.h>
#define RNG_OUT_REG 0x00 /* Output register */
#define RNG_STAT_REG 0x04 /* Status register
[0] = STAT_BUSY */
#define RNG_ALARM_REG 0x24 /* Alarm register
[7:0] = ALARM_COUNTER */
#define RNG_CONFIG_REG 0x28 /* Configuration register
[11:6] = RESET_COUNT
[5:3] = RING2_DELAY
[2:0] = RING1_DELAY */
#define RNG_REV_REG 0x3c /* Revision register
[7:0] = REV_NB */
#define RNG_MASK_REG 0x40 /* Mask and reset register
[2] = IT_EN
[1] = SOFTRESET
[0] = AUTOIDLE */
#define RNG_SYSSTATUS 0x44 /* System status
[0] = RESETDONE */
static void __iomem *rng_base;
static struct clk *rng_ick;
static struct platform_device *rng_dev;
static inline u32 omap_rng_read_reg(int reg)
{
return __raw_readl(rng_base + reg);
}
static inline void omap_rng_write_reg(int reg, u32 val)
{
__raw_writel(val, rng_base + reg);
}
static int omap_rng_data_present(struct hwrng *rng, int wait)
{
int data, i;
for (i = 0; i < 20; i++) {
data = omap_rng_read_reg(RNG_STAT_REG) ? 0 : 1;
if (data || !wait)
break;
/* RNG produces data fast enough (2+ MBit/sec, even
* during "rngtest" loads, that these delays don't
* seem to trigger. We *could* use the RNG IRQ, but
* that'd be higher overhead ... so why bother?
*/
udelay(10);
}
return data;
}
static int omap_rng_data_read(struct hwrng *rng, u32 *data)
{
*data = omap_rng_read_reg(RNG_OUT_REG);
return 4;
}
static struct hwrng omap_rng_ops = {
.name = "omap",
.data_present = omap_rng_data_present,
.data_read = omap_rng_data_read,
};
static int __devinit omap_rng_probe(struct platform_device *pdev)
{
struct resource *res, *mem;
int ret;
/*
* A bit ugly, and it will never actually happen but there can
* be only one RNG and this catches any bork
*/
if (rng_dev)
return -EBUSY;
if (cpu_is_omap24xx()) {
rng_ick = clk_get(&pdev->dev, "ick");
if (IS_ERR(rng_ick)) {
dev_err(&pdev->dev, "Could not get rng_ick\n");
ret = PTR_ERR(rng_ick);
return ret;
} else
clk_enable(rng_ick);
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res)
return -ENOENT;
mem = request_mem_region(res->start, resource_size(res),
pdev->name);
if (mem == NULL) {
ret = -EBUSY;
goto err_region;
}
dev_set_drvdata(&pdev->dev, mem);
rng_base = ioremap(res->start, resource_size(res));
if (!rng_base) {
ret = -ENOMEM;
goto err_ioremap;
}
ret = hwrng_register(&omap_rng_ops);
if (ret)
goto err_register;
dev_info(&pdev->dev, "OMAP Random Number Generator ver. %02x\n",
omap_rng_read_reg(RNG_REV_REG));
omap_rng_write_reg(RNG_MASK_REG, 0x1);
rng_dev = pdev;
return 0;
err_register:
iounmap(rng_base);
rng_base = NULL;
err_ioremap:
release_resource(mem);
err_region:
if (cpu_is_omap24xx()) {
clk_disable(rng_ick);
clk_put(rng_ick);
}
return ret;
}
static int __exit omap_rng_remove(struct platform_device *pdev)
{
struct resource *mem = dev_get_drvdata(&pdev->dev);
hwrng_unregister(&omap_rng_ops);
omap_rng_write_reg(RNG_MASK_REG, 0x0);
iounmap(rng_base);
if (cpu_is_omap24xx()) {
clk_disable(rng_ick);
clk_put(rng_ick);
}
release_resource(mem);
rng_base = NULL;
return 0;
}
#ifdef CONFIG_PM
static int omap_rng_suspend(struct platform_device *pdev, pm_message_t message)
{
omap_rng_write_reg(RNG_MASK_REG, 0x0);
return 0;
}
static int omap_rng_resume(struct platform_device *pdev)
{
omap_rng_write_reg(RNG_MASK_REG, 0x1);
return 0;
}
#else
#define omap_rng_suspend NULL
#define omap_rng_resume NULL
#endif
/* work with hotplug and coldplug */
MODULE_ALIAS("platform:omap_rng");
static struct platform_driver omap_rng_driver = {
.driver = {
.name = "omap_rng",
.owner = THIS_MODULE,
},
.probe = omap_rng_probe,
.remove = __exit_p(omap_rng_remove),
.suspend = omap_rng_suspend,
.resume = omap_rng_resume
};
static int __init omap_rng_init(void)
{
if (!cpu_is_omap16xx() && !cpu_is_omap24xx())
return -ENODEV;
return platform_driver_register(&omap_rng_driver);
}
static void __exit omap_rng_exit(void)
{
platform_driver_unregister(&omap_rng_driver);
}
module_init(omap_rng_init);
module_exit(omap_rng_exit);
MODULE_AUTHOR("Deepak Saxena (and others)");
MODULE_LICENSE("GPL");
| gpl-2.0 |
fulcrum7/training_a | fs/nls/nls_cp1250.c | 2935 | 15395 | /*
* linux/fs/nls/nls_cp1250.c
*
* Charset cp1250 translation tables.
* Generated automatically from the Unicode and charset
* tables from the Unicode Organization (www.unicode.org).
* The Unicode to charset table has only exact mappings.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/nls.h>
#include <linux/errno.h>
static const wchar_t charset2uni[256] = {
/* 0x00*/
0x0000, 0x0001, 0x0002, 0x0003,
0x0004, 0x0005, 0x0006, 0x0007,
0x0008, 0x0009, 0x000a, 0x000b,
0x000c, 0x000d, 0x000e, 0x000f,
/* 0x10*/
0x0010, 0x0011, 0x0012, 0x0013,
0x0014, 0x0015, 0x0016, 0x0017,
0x0018, 0x0019, 0x001a, 0x001b,
0x001c, 0x001d, 0x001e, 0x001f,
/* 0x20*/
0x0020, 0x0021, 0x0022, 0x0023,
0x0024, 0x0025, 0x0026, 0x0027,
0x0028, 0x0029, 0x002a, 0x002b,
0x002c, 0x002d, 0x002e, 0x002f,
/* 0x30*/
0x0030, 0x0031, 0x0032, 0x0033,
0x0034, 0x0035, 0x0036, 0x0037,
0x0038, 0x0039, 0x003a, 0x003b,
0x003c, 0x003d, 0x003e, 0x003f,
/* 0x40*/
0x0040, 0x0041, 0x0042, 0x0043,
0x0044, 0x0045, 0x0046, 0x0047,
0x0048, 0x0049, 0x004a, 0x004b,
0x004c, 0x004d, 0x004e, 0x004f,
/* 0x50*/
0x0050, 0x0051, 0x0052, 0x0053,
0x0054, 0x0055, 0x0056, 0x0057,
0x0058, 0x0059, 0x005a, 0x005b,
0x005c, 0x005d, 0x005e, 0x005f,
/* 0x60*/
0x0060, 0x0061, 0x0062, 0x0063,
0x0064, 0x0065, 0x0066, 0x0067,
0x0068, 0x0069, 0x006a, 0x006b,
0x006c, 0x006d, 0x006e, 0x006f,
/* 0x70*/
0x0070, 0x0071, 0x0072, 0x0073,
0x0074, 0x0075, 0x0076, 0x0077,
0x0078, 0x0079, 0x007a, 0x007b,
0x007c, 0x007d, 0x007e, 0x007f,
/* 0x80*/
0x20ac, 0x0000, 0x201a, 0x0000,
0x201e, 0x2026, 0x2020, 0x2021,
0x0000, 0x2030, 0x0160, 0x2039,
0x015a, 0x0164, 0x017d, 0x0179,
/* 0x90*/
0x0000, 0x2018, 0x2019, 0x201c,
0x201d, 0x2022, 0x2013, 0x2014,
0x0000, 0x2122, 0x0161, 0x203a,
0x015b, 0x0165, 0x017e, 0x017a,
/* 0xa0*/
0x00a0, 0x02c7, 0x02d8, 0x0141,
0x00a4, 0x0104, 0x00a6, 0x00a7,
0x00a8, 0x00a9, 0x015e, 0x00ab,
0x00ac, 0x00ad, 0x00ae, 0x017b,
/* 0xb0*/
0x00b0, 0x00b1, 0x02db, 0x0142,
0x00b4, 0x00b5, 0x00b6, 0x00b7,
0x00b8, 0x0105, 0x015f, 0x00bb,
0x013d, 0x02dd, 0x013e, 0x017c,
/* 0xc0*/
0x0154, 0x00c1, 0x00c2, 0x0102,
0x00c4, 0x0139, 0x0106, 0x00c7,
0x010c, 0x00c9, 0x0118, 0x00cb,
0x011a, 0x00cd, 0x00ce, 0x010e,
/* 0xd0*/
0x0110, 0x0143, 0x0147, 0x00d3,
0x00d4, 0x0150, 0x00d6, 0x00d7,
0x0158, 0x016e, 0x00da, 0x0170,
0x00dc, 0x00dd, 0x0162, 0x00df,
/* 0xe0*/
0x0155, 0x00e1, 0x00e2, 0x0103,
0x00e4, 0x013a, 0x0107, 0x00e7,
0x010d, 0x00e9, 0x0119, 0x00eb,
0x011b, 0x00ed, 0x00ee, 0x010f,
/* 0xf0*/
0x0111, 0x0144, 0x0148, 0x00f3,
0x00f4, 0x0151, 0x00f6, 0x00f7,
0x0159, 0x016f, 0x00fa, 0x0171,
0x00fc, 0x00fd, 0x0163, 0x02d9,
};
static const unsigned char page00[256] = {
0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */
0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */
0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */
0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */
0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */
0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */
0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */
0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */
0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */
0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */
0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */
0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */
0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */
0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */
0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */
0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
0xa0, 0x00, 0x00, 0x00, 0xa4, 0x00, 0xa6, 0xa7, /* 0xa0-0xa7 */
0xa8, 0xa9, 0x00, 0xab, 0xac, 0xad, 0xae, 0x00, /* 0xa8-0xaf */
0xb0, 0xb1, 0x00, 0x00, 0xb4, 0xb5, 0xb6, 0xb7, /* 0xb0-0xb7 */
0xb8, 0x00, 0x00, 0xbb, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
0x00, 0xc1, 0xc2, 0x00, 0xc4, 0x00, 0x00, 0xc7, /* 0xc0-0xc7 */
0x00, 0xc9, 0x00, 0xcb, 0x00, 0xcd, 0xce, 0x00, /* 0xc8-0xcf */
0x00, 0x00, 0x00, 0xd3, 0xd4, 0x00, 0xd6, 0xd7, /* 0xd0-0xd7 */
0x00, 0x00, 0xda, 0x00, 0xdc, 0xdd, 0x00, 0xdf, /* 0xd8-0xdf */
0x00, 0xe1, 0xe2, 0x00, 0xe4, 0x00, 0x00, 0xe7, /* 0xe0-0xe7 */
0x00, 0xe9, 0x00, 0xeb, 0x00, 0xed, 0xee, 0x00, /* 0xe8-0xef */
0x00, 0x00, 0x00, 0xf3, 0xf4, 0x00, 0xf6, 0xf7, /* 0xf0-0xf7 */
0x00, 0x00, 0xfa, 0x00, 0xfc, 0xfd, 0x00, 0x00, /* 0xf8-0xff */
};
static const unsigned char page01[256] = {
0x00, 0x00, 0xc3, 0xe3, 0xa5, 0xb9, 0xc6, 0xe6, /* 0x00-0x07 */
0x00, 0x00, 0x00, 0x00, 0xc8, 0xe8, 0xcf, 0xef, /* 0x08-0x0f */
0xd0, 0xf0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
0xca, 0xea, 0xcc, 0xec, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
0x00, 0xc5, 0xe5, 0x00, 0x00, 0xbc, 0xbe, 0x00, /* 0x38-0x3f */
0x00, 0xa3, 0xb3, 0xd1, 0xf1, 0x00, 0x00, 0xd2, /* 0x40-0x47 */
0xf2, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
0xd5, 0xf5, 0x00, 0x00, 0xc0, 0xe0, 0x00, 0x00, /* 0x50-0x57 */
0xd8, 0xf8, 0x8c, 0x9c, 0x00, 0x00, 0xaa, 0xba, /* 0x58-0x5f */
0x8a, 0x9a, 0xde, 0xfe, 0x8d, 0x9d, 0x00, 0x00, /* 0x60-0x67 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xd9, 0xf9, /* 0x68-0x6f */
0xdb, 0xfb, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
0x00, 0x8f, 0x9f, 0xaf, 0xbf, 0x8e, 0x9e, 0x00, /* 0x78-0x7f */
};
static const unsigned char page02[256] = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xa1, /* 0xc0-0xc7 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
0xa2, 0xff, 0x00, 0xb2, 0x00, 0xbd, 0x00, 0x00, /* 0xd8-0xdf */
};
static const unsigned char page20[256] = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
0x00, 0x00, 0x00, 0x96, 0x97, 0x00, 0x00, 0x00, /* 0x10-0x17 */
0x91, 0x92, 0x82, 0x00, 0x93, 0x94, 0x84, 0x00, /* 0x18-0x1f */
0x86, 0x87, 0x95, 0x00, 0x00, 0x00, 0x85, 0x00, /* 0x20-0x27 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
0x89, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
0x00, 0x8b, 0x9b, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
0x00, 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
};
static const unsigned char page21[256] = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
0x00, 0x00, 0x99, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
};
static const unsigned char *const page_uni2charset[256] = {
page00, page01, page02, NULL, NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
page20, page21, NULL, NULL, NULL, NULL, NULL, NULL,
};
static const unsigned char charset2lower[256] = {
0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */
0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */
0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */
0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */
0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */
0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */
0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */
0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */
0x40, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x40-0x47 */
0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x48-0x4f */
0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x50-0x57 */
0x78, 0x79, 0x7a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */
0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */
0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */
0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */
0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */
0x80, 0x00, 0x82, 0x00, 0x84, 0x85, 0x86, 0x87, /* 0x80-0x87 */
0x00, 0x89, 0x9a, 0x8b, 0x9c, 0x9d, 0x9e, 0x9f, /* 0x88-0x8f */
0x00, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, /* 0x90-0x97 */
0x00, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, /* 0x98-0x9f */
0xa0, 0xa1, 0xa2, 0xb3, 0xa4, 0xb9, 0xa6, 0xa7, /* 0xa0-0xa7 */
0xa8, 0xa9, 0xba, 0xab, 0xac, 0xad, 0xae, 0xbf, /* 0xa8-0xaf */
0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, /* 0xb0-0xb7 */
0xb8, 0xb9, 0xba, 0xbb, 0xbe, 0xbd, 0xbe, 0xbf, /* 0xb8-0xbf */
0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, /* 0xc0-0xc7 */
0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, /* 0xc8-0xcf */
0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xd7, /* 0xd0-0xd7 */
0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xdf, /* 0xd8-0xdf */
0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, /* 0xe0-0xe7 */
0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, /* 0xe8-0xef */
0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, /* 0xf0-0xf7 */
0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff, /* 0xf8-0xff */
};
static const unsigned char charset2upper[256] = {
0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */
0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */
0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */
0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */
0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */
0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */
0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */
0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */
0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */
0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */
0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */
0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */
0x60, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x60-0x67 */
0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x68-0x6f */
0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x70-0x77 */
0x58, 0x59, 0x5a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */
0x80, 0x00, 0x82, 0x00, 0x84, 0x85, 0x86, 0x87, /* 0x80-0x87 */
0x00, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, /* 0x88-0x8f */
0x00, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, /* 0x90-0x97 */
0x00, 0x99, 0x8a, 0x9b, 0x8c, 0x8d, 0x8e, 0x8f, /* 0x98-0x9f */
0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, /* 0xa0-0xa7 */
0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, /* 0xa8-0xaf */
0xb0, 0xb1, 0xb2, 0xa3, 0xb4, 0xb5, 0xb6, 0xb7, /* 0xb0-0xb7 */
0xb8, 0xa5, 0xaa, 0xbb, 0xbc, 0xbd, 0xbc, 0xaf, /* 0xb8-0xbf */
0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, /* 0xc0-0xc7 */
0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, /* 0xc8-0xcf */
0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, /* 0xd0-0xd7 */
0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0x00, /* 0xd8-0xdf */
0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, /* 0xe0-0xe7 */
0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, /* 0xe8-0xef */
0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xf7, /* 0xf0-0xf7 */
0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xff, /* 0xf8-0xff */
};
static int uni2char(wchar_t uni, unsigned char *out, int boundlen)
{
const unsigned char *uni2charset;
unsigned char cl = uni & 0x00ff;
unsigned char ch = (uni & 0xff00) >> 8;
if (boundlen <= 0)
return -ENAMETOOLONG;
uni2charset = page_uni2charset[ch];
if (uni2charset && uni2charset[cl])
out[0] = uni2charset[cl];
else
return -EINVAL;
return 1;
}
static int char2uni(const unsigned char *rawstring, int boundlen, wchar_t *uni)
{
*uni = charset2uni[*rawstring];
if (*uni == 0x0000)
return -EINVAL;
return 1;
}
static struct nls_table table = {
.charset = "cp1250",
.uni2char = uni2char,
.char2uni = char2uni,
.charset2lower = charset2lower,
.charset2upper = charset2upper,
};
static int __init init_nls_cp1250(void)
{
return register_nls(&table);
}
static void __exit exit_nls_cp1250(void)
{
unregister_nls(&table);
}
module_init(init_nls_cp1250)
module_exit(exit_nls_cp1250)
MODULE_LICENSE("Dual BSD/GPL");
| gpl-2.0 |
MoKee/android_kernel_nubia_nx505j | arch/arm/mach-sa1100/h3600.c | 4727 | 4151 | /*
* Support for Compaq iPAQ H3600 handheld computer
*
* Copyright (c) 2000,1 Compaq Computer Corporation. (Author: Jamey Hicks)
* Copyright (c) 2009 Dmitry Artamonow <mad_soft@inbox.ru>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/gpio.h>
#include <video/sa1100fb.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
#include <asm/mach/irda.h>
#include <mach/h3xxx.h>
#include <mach/irqs.h>
#include "generic.h"
/*
* helper for sa1100fb
*/
static void h3600_lcd_power(int enable)
{
if (gpio_request(H3XXX_EGPIO_LCD_ON, "LCD power")) {
pr_err("%s: can't request H3XXX_EGPIO_LCD_ON\n", __func__);
goto err1;
}
if (gpio_request(H3600_EGPIO_LCD_PCI, "LCD control")) {
pr_err("%s: can't request H3XXX_EGPIO_LCD_PCI\n", __func__);
goto err2;
}
if (gpio_request(H3600_EGPIO_LCD_5V_ON, "LCD 5v")) {
pr_err("%s: can't request H3XXX_EGPIO_LCD_5V_ON\n", __func__);
goto err3;
}
if (gpio_request(H3600_EGPIO_LVDD_ON, "LCD 9v/-6.5v")) {
pr_err("%s: can't request H3600_EGPIO_LVDD_ON\n", __func__);
goto err4;
}
gpio_direction_output(H3XXX_EGPIO_LCD_ON, enable);
gpio_direction_output(H3600_EGPIO_LCD_PCI, enable);
gpio_direction_output(H3600_EGPIO_LCD_5V_ON, enable);
gpio_direction_output(H3600_EGPIO_LVDD_ON, enable);
gpio_free(H3600_EGPIO_LVDD_ON);
err4: gpio_free(H3600_EGPIO_LCD_5V_ON);
err3: gpio_free(H3600_EGPIO_LCD_PCI);
err2: gpio_free(H3XXX_EGPIO_LCD_ON);
err1: return;
}
static const struct sa1100fb_rgb h3600_rgb_16 = {
.red = { .offset = 12, .length = 4, },
.green = { .offset = 7, .length = 4, },
.blue = { .offset = 1, .length = 4, },
.transp = { .offset = 0, .length = 0, },
};
static struct sa1100fb_mach_info h3600_lcd_info = {
.pixclock = 174757, .bpp = 16,
.xres = 320, .yres = 240,
.hsync_len = 3, .vsync_len = 3,
.left_margin = 12, .upper_margin = 10,
.right_margin = 17, .lower_margin = 1,
.cmap_static = 1,
.lccr0 = LCCR0_Color | LCCR0_Sngl | LCCR0_Act,
.lccr3 = LCCR3_OutEnH | LCCR3_PixRsEdg | LCCR3_ACBsDiv(2),
.rgb[RGB_16] = &h3600_rgb_16,
.lcd_power = h3600_lcd_power,
};
static void __init h3600_map_io(void)
{
h3xxx_map_io();
}
/*
* This turns the IRDA power on or off on the Compaq H3600
*/
static int h3600_irda_set_power(struct device *dev, unsigned int state)
{
gpio_set_value(H3600_EGPIO_IR_ON, state);
return 0;
}
static void h3600_irda_set_speed(struct device *dev, unsigned int speed)
{
gpio_set_value(H3600_EGPIO_IR_FSEL, !(speed < 4000000));
}
static int h3600_irda_startup(struct device *dev)
{
int err = gpio_request(H3600_EGPIO_IR_ON, "IrDA power");
if (err)
goto err1;
err = gpio_direction_output(H3600_EGPIO_IR_ON, 0);
if (err)
goto err2;
err = gpio_request(H3600_EGPIO_IR_FSEL, "IrDA fsel");
if (err)
goto err2;
err = gpio_direction_output(H3600_EGPIO_IR_FSEL, 0);
if (err)
goto err3;
return 0;
err3: gpio_free(H3600_EGPIO_IR_FSEL);
err2: gpio_free(H3600_EGPIO_IR_ON);
err1: return err;
}
static void h3600_irda_shutdown(struct device *dev)
{
gpio_free(H3600_EGPIO_IR_ON);
gpio_free(H3600_EGPIO_IR_FSEL);
}
static struct irda_platform_data h3600_irda_data = {
.set_power = h3600_irda_set_power,
.set_speed = h3600_irda_set_speed,
.startup = h3600_irda_startup,
.shutdown = h3600_irda_shutdown,
};
static struct gpio_default_state h3600_default_gpio[] = {
{ H3XXX_GPIO_COM_DCD, GPIO_MODE_IN, "COM DCD" },
{ H3XXX_GPIO_COM_CTS, GPIO_MODE_IN, "COM CTS" },
{ H3XXX_GPIO_COM_RTS, GPIO_MODE_OUT0, "COM RTS" },
};
static void __init h3600_mach_init(void)
{
h3xxx_init_gpio(h3600_default_gpio, ARRAY_SIZE(h3600_default_gpio));
h3xxx_mach_init();
sa11x0_register_lcd(&h3600_lcd_info);
sa11x0_register_irda(&h3600_irda_data);
}
MACHINE_START(H3600, "Compaq iPAQ H3600")
.atag_offset = 0x100,
.map_io = h3600_map_io,
.nr_irqs = SA1100_NR_IRQS,
.init_irq = sa1100_init_irq,
.timer = &sa1100_timer,
.init_machine = h3600_mach_init,
.restart = sa11x0_restart,
MACHINE_END
| gpl-2.0 |
Kali-/htc-kernel-msm7x30-exp | arch/m68k/apollo/config.c | 4727 | 6157 | #include <linux/types.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/tty.h>
#include <linux/console.h>
#include <linux/rtc.h>
#include <linux/vt_kern.h>
#include <linux/interrupt.h>
#include <asm/setup.h>
#include <asm/bootinfo.h>
#include <asm/system.h>
#include <asm/pgtable.h>
#include <asm/apollohw.h>
#include <asm/irq.h>
#include <asm/rtc.h>
#include <asm/machdep.h>
u_long sio01_physaddr;
u_long sio23_physaddr;
u_long rtc_physaddr;
u_long pica_physaddr;
u_long picb_physaddr;
u_long cpuctrl_physaddr;
u_long timer_physaddr;
u_long apollo_model;
extern void dn_sched_init(irq_handler_t handler);
extern void dn_init_IRQ(void);
extern unsigned long dn_gettimeoffset(void);
extern int dn_dummy_hwclk(int, struct rtc_time *);
extern int dn_dummy_set_clock_mmss(unsigned long);
extern void dn_dummy_reset(void);
#ifdef CONFIG_HEARTBEAT
static void dn_heartbeat(int on);
#endif
static irqreturn_t dn_timer_int(int irq,void *);
static void dn_get_model(char *model);
static const char *apollo_models[] = {
[APOLLO_DN3000-APOLLO_DN3000] = "DN3000 (Otter)",
[APOLLO_DN3010-APOLLO_DN3000] = "DN3010 (Otter)",
[APOLLO_DN3500-APOLLO_DN3000] = "DN3500 (Cougar II)",
[APOLLO_DN4000-APOLLO_DN3000] = "DN4000 (Mink)",
[APOLLO_DN4500-APOLLO_DN3000] = "DN4500 (Roadrunner)"
};
int apollo_parse_bootinfo(const struct bi_record *record) {
int unknown = 0;
const unsigned long *data = record->data;
switch(record->tag) {
case BI_APOLLO_MODEL:
apollo_model=*data;
break;
default:
unknown=1;
}
return unknown;
}
void dn_setup_model(void) {
printk("Apollo hardware found: ");
printk("[%s]\n", apollo_models[apollo_model - APOLLO_DN3000]);
switch(apollo_model) {
case APOLLO_UNKNOWN:
panic("Unknown apollo model");
break;
case APOLLO_DN3000:
case APOLLO_DN3010:
sio01_physaddr=SAU8_SIO01_PHYSADDR;
rtc_physaddr=SAU8_RTC_PHYSADDR;
pica_physaddr=SAU8_PICA;
picb_physaddr=SAU8_PICB;
cpuctrl_physaddr=SAU8_CPUCTRL;
timer_physaddr=SAU8_TIMER;
break;
case APOLLO_DN4000:
sio01_physaddr=SAU7_SIO01_PHYSADDR;
sio23_physaddr=SAU7_SIO23_PHYSADDR;
rtc_physaddr=SAU7_RTC_PHYSADDR;
pica_physaddr=SAU7_PICA;
picb_physaddr=SAU7_PICB;
cpuctrl_physaddr=SAU7_CPUCTRL;
timer_physaddr=SAU7_TIMER;
break;
case APOLLO_DN4500:
panic("Apollo model not yet supported");
break;
case APOLLO_DN3500:
sio01_physaddr=SAU7_SIO01_PHYSADDR;
sio23_physaddr=SAU7_SIO23_PHYSADDR;
rtc_physaddr=SAU7_RTC_PHYSADDR;
pica_physaddr=SAU7_PICA;
picb_physaddr=SAU7_PICB;
cpuctrl_physaddr=SAU7_CPUCTRL;
timer_physaddr=SAU7_TIMER;
break;
default:
panic("Undefined apollo model");
break;
}
}
int dn_serial_console_wait_key(struct console *co) {
while(!(sio01.srb_csrb & 1))
barrier();
return sio01.rhrb_thrb;
}
void dn_serial_console_write (struct console *co, const char *str,unsigned int count)
{
while(count--) {
if (*str == '\n') {
sio01.rhrb_thrb = (unsigned char)'\r';
while (!(sio01.srb_csrb & 0x4))
;
}
sio01.rhrb_thrb = (unsigned char)*str++;
while (!(sio01.srb_csrb & 0x4))
;
}
}
void dn_serial_print (const char *str)
{
while (*str) {
if (*str == '\n') {
sio01.rhrb_thrb = (unsigned char)'\r';
while (!(sio01.srb_csrb & 0x4))
;
}
sio01.rhrb_thrb = (unsigned char)*str++;
while (!(sio01.srb_csrb & 0x4))
;
}
}
void __init config_apollo(void)
{
int i;
dn_setup_model();
mach_sched_init=dn_sched_init; /* */
mach_init_IRQ=dn_init_IRQ;
mach_gettimeoffset = dn_gettimeoffset;
mach_max_dma_address = 0xffffffff;
mach_hwclk = dn_dummy_hwclk; /* */
mach_set_clock_mmss = dn_dummy_set_clock_mmss; /* */
mach_reset = dn_dummy_reset; /* */
#ifdef CONFIG_HEARTBEAT
mach_heartbeat = dn_heartbeat;
#endif
mach_get_model = dn_get_model;
cpuctrl=0xaa00;
/* clear DMA translation table */
for(i=0;i<0x400;i++)
addr_xlat_map[i]=0;
}
irqreturn_t dn_timer_int(int irq, void *dev_id)
{
irq_handler_t timer_handler = dev_id;
volatile unsigned char x;
timer_handler(irq, dev_id);
x=*(volatile unsigned char *)(timer+3);
x=*(volatile unsigned char *)(timer+5);
return IRQ_HANDLED;
}
void dn_sched_init(irq_handler_t timer_routine)
{
/* program timer 1 */
*(volatile unsigned char *)(timer+3)=0x01;
*(volatile unsigned char *)(timer+1)=0x40;
*(volatile unsigned char *)(timer+5)=0x09;
*(volatile unsigned char *)(timer+7)=0xc4;
/* enable IRQ of PIC B */
*(volatile unsigned char *)(pica+1)&=(~8);
#if 0
printk("*(0x10803) %02x\n",*(volatile unsigned char *)(timer+0x3));
printk("*(0x10803) %02x\n",*(volatile unsigned char *)(timer+0x3));
#endif
if (request_irq(IRQ_APOLLO, dn_timer_int, 0, "time", timer_routine))
pr_err("Couldn't register timer interrupt\n");
}
unsigned long dn_gettimeoffset(void) {
return 0xdeadbeef;
}
int dn_dummy_hwclk(int op, struct rtc_time *t) {
if(!op) { /* read */
t->tm_sec=rtc->second;
t->tm_min=rtc->minute;
t->tm_hour=rtc->hours;
t->tm_mday=rtc->day_of_month;
t->tm_wday=rtc->day_of_week;
t->tm_mon=rtc->month;
t->tm_year=rtc->year;
} else {
rtc->second=t->tm_sec;
rtc->minute=t->tm_min;
rtc->hours=t->tm_hour;
rtc->day_of_month=t->tm_mday;
if(t->tm_wday!=-1)
rtc->day_of_week=t->tm_wday;
rtc->month=t->tm_mon;
rtc->year=t->tm_year;
}
return 0;
}
int dn_dummy_set_clock_mmss(unsigned long nowtime) {
printk("set_clock_mmss\n");
return 0;
}
void dn_dummy_reset(void) {
dn_serial_print("The end !\n");
for(;;);
}
void dn_dummy_waitbut(void) {
dn_serial_print("waitbut\n");
}
static void dn_get_model(char *model)
{
strcpy(model, "Apollo ");
if (apollo_model >= APOLLO_DN3000 && apollo_model <= APOLLO_DN4500)
strcat(model, apollo_models[apollo_model - APOLLO_DN3000]);
}
#ifdef CONFIG_HEARTBEAT
static int dn_cpuctrl=0xff00;
static void dn_heartbeat(int on) {
if(on) {
dn_cpuctrl&=~0x100;
cpuctrl=dn_cpuctrl;
}
else {
dn_cpuctrl&=~0x100;
dn_cpuctrl|=0x100;
cpuctrl=dn_cpuctrl;
}
}
#endif
| gpl-2.0 |
KylinUI/android_kernel_lge_mako | scripts/basic/fixdep.c | 4983 | 10782 | /*
* "Optimize" a list of dependencies as spit out by gcc -MD
* for the kernel build
* ===========================================================================
*
* Author Kai Germaschewski
* Copyright 2002 by Kai Germaschewski <kai.germaschewski@gmx.de>
*
* This software may be used and distributed according to the terms
* of the GNU General Public License, incorporated herein by reference.
*
*
* Introduction:
*
* gcc produces a very nice and correct list of dependencies which
* tells make when to remake a file.
*
* To use this list as-is however has the drawback that virtually
* every file in the kernel includes autoconf.h.
*
* If the user re-runs make *config, autoconf.h will be
* regenerated. make notices that and will rebuild every file which
* includes autoconf.h, i.e. basically all files. This is extremely
* annoying if the user just changed CONFIG_HIS_DRIVER from n to m.
*
* So we play the same trick that "mkdep" played before. We replace
* the dependency on autoconf.h by a dependency on every config
* option which is mentioned in any of the listed prequisites.
*
* kconfig populates a tree in include/config/ with an empty file
* for each config symbol and when the configuration is updated
* the files representing changed config options are touched
* which then let make pick up the changes and the files that use
* the config symbols are rebuilt.
*
* So if the user changes his CONFIG_HIS_DRIVER option, only the objects
* which depend on "include/linux/config/his/driver.h" will be rebuilt,
* so most likely only his driver ;-)
*
* The idea above dates, by the way, back to Michael E Chastain, AFAIK.
*
* So to get dependencies right, there are two issues:
* o if any of the files the compiler read changed, we need to rebuild
* o if the command line given to the compile the file changed, we
* better rebuild as well.
*
* The former is handled by using the -MD output, the later by saving
* the command line used to compile the old object and comparing it
* to the one we would now use.
*
* Again, also this idea is pretty old and has been discussed on
* kbuild-devel a long time ago. I don't have a sensibly working
* internet connection right now, so I rather don't mention names
* without double checking.
*
* This code here has been based partially based on mkdep.c, which
* says the following about its history:
*
* Copyright abandoned, Michael Chastain, <mailto:mec@shout.net>.
* This is a C version of syncdep.pl by Werner Almesberger.
*
*
* It is invoked as
*
* fixdep <depfile> <target> <cmdline>
*
* and will read the dependency file <depfile>
*
* The transformed dependency snipped is written to stdout.
*
* It first generates a line
*
* cmd_<target> = <cmdline>
*
* and then basically copies the .<target>.d file to stdout, in the
* process filtering out the dependency on autoconf.h and adding
* dependencies on include/config/my/option.h for every
* CONFIG_MY_OPTION encountered in any of the prequisites.
*
* It will also filter out all the dependencies on *.ver. We need
* to make sure that the generated version checksum are globally up
* to date before even starting the recursive build, so it's too late
* at this point anyway.
*
* The algorithm to grep for "CONFIG_..." is bit unusual, but should
* be fast ;-) We don't even try to really parse the header files, but
* merely grep, i.e. if CONFIG_FOO is mentioned in a comment, it will
* be picked up as well. It's not a problem with respect to
* correctness, since that can only give too many dependencies, thus
* we cannot miss a rebuild. Since people tend to not mention totally
* unrelated CONFIG_ options all over the place, it's not an
* efficiency problem either.
*
* (Note: it'd be easy to port over the complete mkdep state machine,
* but I don't think the added complexity is worth it)
*/
/*
* Note 2: if somebody writes HELLO_CONFIG_BOOM in a file, it will depend onto
* CONFIG_BOOM. This could seem a bug (not too hard to fix), but please do not
* fix it! Some UserModeLinux files (look at arch/um/) call CONFIG_BOOM as
* UML_CONFIG_BOOM, to avoid conflicts with /usr/include/linux/autoconf.h,
* through arch/um/include/uml-config.h; this fixdep "bug" makes sure that
* those files will have correct dependencies.
*/
#include <sys/types.h>
#include <sys/stat.h>
#include <sys/mman.h>
#include <unistd.h>
#include <fcntl.h>
#include <string.h>
#include <stdlib.h>
#include <stdio.h>
#include <limits.h>
#include <ctype.h>
#include <arpa/inet.h>
#define INT_CONF ntohl(0x434f4e46)
#define INT_ONFI ntohl(0x4f4e4649)
#define INT_NFIG ntohl(0x4e464947)
#define INT_FIG_ ntohl(0x4649475f)
char *target;
char *depfile;
char *cmdline;
static void usage(void)
{
fprintf(stderr, "Usage: fixdep <depfile> <target> <cmdline>\n");
exit(1);
}
/*
* Print out the commandline prefixed with cmd_<target filename> :=
*/
static void print_cmdline(void)
{
printf("cmd_%s := %s\n\n", target, cmdline);
}
struct item {
struct item *next;
unsigned int len;
unsigned int hash;
char name[0];
};
#define HASHSZ 256
static struct item *hashtab[HASHSZ];
static unsigned int strhash(const char *str, unsigned int sz)
{
/* fnv32 hash */
unsigned int i, hash = 2166136261U;
for (i = 0; i < sz; i++)
hash = (hash ^ str[i]) * 0x01000193;
return hash;
}
/*
* Lookup a value in the configuration string.
*/
static int is_defined_config(const char *name, int len, unsigned int hash)
{
struct item *aux;
for (aux = hashtab[hash % HASHSZ]; aux; aux = aux->next) {
if (aux->hash == hash && aux->len == len &&
memcmp(aux->name, name, len) == 0)
return 1;
}
return 0;
}
/*
* Add a new value to the configuration string.
*/
static void define_config(const char *name, int len, unsigned int hash)
{
struct item *aux = malloc(sizeof(*aux) + len);
if (!aux) {
perror("fixdep:malloc");
exit(1);
}
memcpy(aux->name, name, len);
aux->len = len;
aux->hash = hash;
aux->next = hashtab[hash % HASHSZ];
hashtab[hash % HASHSZ] = aux;
}
/*
* Clear the set of configuration strings.
*/
static void clear_config(void)
{
struct item *aux, *next;
unsigned int i;
for (i = 0; i < HASHSZ; i++) {
for (aux = hashtab[i]; aux; aux = next) {
next = aux->next;
free(aux);
}
hashtab[i] = NULL;
}
}
/*
* Record the use of a CONFIG_* word.
*/
static void use_config(const char *m, int slen)
{
unsigned int hash = strhash(m, slen);
int c, i;
if (is_defined_config(m, slen, hash))
return;
define_config(m, slen, hash);
printf(" $(wildcard include/config/");
for (i = 0; i < slen; i++) {
c = m[i];
if (c == '_')
c = '/';
else
c = tolower(c);
putchar(c);
}
printf(".h) \\\n");
}
static void parse_config_file(const char *map, size_t len)
{
const int *end = (const int *) (map + len);
/* start at +1, so that p can never be < map */
const int *m = (const int *) map + 1;
const char *p, *q;
for (; m < end; m++) {
if (*m == INT_CONF) { p = (char *) m ; goto conf; }
if (*m == INT_ONFI) { p = (char *) m-1; goto conf; }
if (*m == INT_NFIG) { p = (char *) m-2; goto conf; }
if (*m == INT_FIG_) { p = (char *) m-3; goto conf; }
continue;
conf:
if (p > map + len - 7)
continue;
if (memcmp(p, "CONFIG_", 7))
continue;
for (q = p + 7; q < map + len; q++) {
if (!(isalnum(*q) || *q == '_'))
goto found;
}
continue;
found:
if (!memcmp(q - 7, "_MODULE", 7))
q -= 7;
if( (q-p-7) < 0 )
continue;
use_config(p+7, q-p-7);
}
}
/* test is s ends in sub */
static int strrcmp(char *s, char *sub)
{
int slen = strlen(s);
int sublen = strlen(sub);
if (sublen > slen)
return 1;
return memcmp(s + slen - sublen, sub, sublen);
}
static void do_config_file(const char *filename)
{
struct stat st;
int fd;
void *map;
fd = open(filename, O_RDONLY);
if (fd < 0) {
fprintf(stderr, "fixdep: error opening config file: ");
perror(filename);
exit(2);
}
fstat(fd, &st);
if (st.st_size == 0) {
close(fd);
return;
}
map = mmap(NULL, st.st_size, PROT_READ, MAP_PRIVATE, fd, 0);
if ((long) map == -1) {
perror("fixdep: mmap");
close(fd);
return;
}
parse_config_file(map, st.st_size);
munmap(map, st.st_size);
close(fd);
}
/*
* Important: The below generated source_foo.o and deps_foo.o variable
* assignments are parsed not only by make, but also by the rather simple
* parser in scripts/mod/sumversion.c.
*/
static void parse_dep_file(void *map, size_t len)
{
char *m = map;
char *end = m + len;
char *p;
char s[PATH_MAX];
int first;
p = strchr(m, ':');
if (!p) {
fprintf(stderr, "fixdep: parse error\n");
exit(1);
}
memcpy(s, m, p-m); s[p-m] = 0;
m = p+1;
clear_config();
first = 1;
while (m < end) {
while (m < end && (*m == ' ' || *m == '\\' || *m == '\n'))
m++;
p = m;
while (p < end && *p != ' ') p++;
if (p == end) {
do p--; while (!isalnum(*p));
p++;
}
memcpy(s, m, p-m); s[p-m] = 0;
if (strrcmp(s, "include/generated/autoconf.h") &&
strrcmp(s, "arch/um/include/uml-config.h") &&
strrcmp(s, "include/linux/kconfig.h") &&
strrcmp(s, ".ver")) {
/*
* Do not list the source file as dependency, so that
* kbuild is not confused if a .c file is rewritten
* into .S or vice versa. Storing it in source_* is
* needed for modpost to compute srcversions.
*/
if (first) {
printf("source_%s := %s\n\n", target, s);
printf("deps_%s := \\\n", target);
} else
printf(" %s \\\n", s);
do_config_file(s);
}
first = 0;
m = p + 1;
}
printf("\n%s: $(deps_%s)\n\n", target, target);
printf("$(deps_%s):\n", target);
}
static void print_deps(void)
{
struct stat st;
int fd;
void *map;
fd = open(depfile, O_RDONLY);
if (fd < 0) {
fprintf(stderr, "fixdep: error opening depfile: ");
perror(depfile);
exit(2);
}
if (fstat(fd, &st) < 0) {
fprintf(stderr, "fixdep: error fstat'ing depfile: ");
perror(depfile);
exit(2);
}
if (st.st_size == 0) {
fprintf(stderr,"fixdep: %s is empty\n",depfile);
close(fd);
return;
}
map = mmap(NULL, st.st_size, PROT_READ, MAP_PRIVATE, fd, 0);
if ((long) map == -1) {
perror("fixdep: mmap");
close(fd);
return;
}
parse_dep_file(map, st.st_size);
munmap(map, st.st_size);
close(fd);
}
static void traps(void)
{
static char test[] __attribute__((aligned(sizeof(int)))) = "CONF";
int *p = (int *)test;
if (*p != INT_CONF) {
fprintf(stderr, "fixdep: sizeof(int) != 4 or wrong endianess? %#x\n",
*p);
exit(2);
}
}
int main(int argc, char *argv[])
{
traps();
if (argc != 4)
usage();
depfile = argv[1];
target = argv[2];
cmdline = argv[3];
print_cmdline();
print_deps();
return 0;
}
| gpl-2.0 |
TEAM-RAZOR-DEVICES/android_kernel_lge_v500 | drivers/video/backlight/vgg2432a4.c | 4983 | 6383 | /* drivers/video/backlight/vgg2432a4.c
*
* VGG2432A4 (ILI9320) LCD controller driver.
*
* Copyright 2007 Simtec Electronics
* http://armlinux.simtec.co.uk/
* Ben Dooks <ben@simtec.co.uk>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/fb.h>
#include <linux/init.h>
#include <linux/lcd.h>
#include <linux/module.h>
#include <linux/spi/spi.h>
#include <video/ili9320.h>
#include "ili9320.h"
/* Device initialisation sequences */
static struct ili9320_reg vgg_init1[] = {
{
.address = ILI9320_POWER1,
.value = ILI9320_POWER1_AP(0) | ILI9320_POWER1_BT(0),
}, {
.address = ILI9320_POWER2,
.value = (ILI9320_POWER2_VC(7) |
ILI9320_POWER2_DC0(0) | ILI9320_POWER2_DC1(0)),
}, {
.address = ILI9320_POWER3,
.value = ILI9320_POWER3_VRH(0),
}, {
.address = ILI9320_POWER4,
.value = ILI9320_POWER4_VREOUT(0),
},
};
static struct ili9320_reg vgg_init2[] = {
{
.address = ILI9320_POWER1,
.value = (ILI9320_POWER1_AP(3) | ILI9320_POWER1_APE |
ILI9320_POWER1_BT(7) | ILI9320_POWER1_SAP),
}, {
.address = ILI9320_POWER2,
.value = ILI9320_POWER2_VC(7) | ILI9320_POWER2_DC0(3),
}
};
static struct ili9320_reg vgg_gamma[] = {
{
.address = ILI9320_GAMMA1,
.value = 0x0000,
}, {
.address = ILI9320_GAMMA2,
.value = 0x0505,
}, {
.address = ILI9320_GAMMA3,
.value = 0x0004,
}, {
.address = ILI9320_GAMMA4,
.value = 0x0006,
}, {
.address = ILI9320_GAMMA5,
.value = 0x0707,
}, {
.address = ILI9320_GAMMA6,
.value = 0x0105,
}, {
.address = ILI9320_GAMMA7,
.value = 0x0002,
}, {
.address = ILI9320_GAMMA8,
.value = 0x0707,
}, {
.address = ILI9320_GAMMA9,
.value = 0x0704,
}, {
.address = ILI9320_GAMMA10,
.value = 0x807,
}
};
static struct ili9320_reg vgg_init0[] = {
[0] = {
/* set direction and scan mode gate */
.address = ILI9320_DRIVER,
.value = ILI9320_DRIVER_SS,
}, {
.address = ILI9320_DRIVEWAVE,
.value = (ILI9320_DRIVEWAVE_MUSTSET |
ILI9320_DRIVEWAVE_EOR | ILI9320_DRIVEWAVE_BC),
}, {
.address = ILI9320_ENTRYMODE,
.value = ILI9320_ENTRYMODE_ID(3) | ILI9320_ENTRYMODE_BGR,
}, {
.address = ILI9320_RESIZING,
.value = 0x0,
},
};
static int vgg2432a4_lcd_init(struct ili9320 *lcd,
struct ili9320_platdata *cfg)
{
unsigned int addr;
int ret;
/* Set VCore before anything else (VGG243237-6UFLWA) */
ret = ili9320_write(lcd, 0x00e5, 0x8000);
if (ret)
goto err_initial;
/* Start the oscillator up before we can do anything else. */
ret = ili9320_write(lcd, ILI9320_OSCILATION, ILI9320_OSCILATION_OSC);
if (ret)
goto err_initial;
/* must wait at-lesat 10ms after starting */
mdelay(15);
ret = ili9320_write_regs(lcd, vgg_init0, ARRAY_SIZE(vgg_init0));
if (ret != 0)
goto err_initial;
ili9320_write(lcd, ILI9320_DISPLAY2, cfg->display2);
ili9320_write(lcd, ILI9320_DISPLAY3, cfg->display3);
ili9320_write(lcd, ILI9320_DISPLAY4, cfg->display4);
ili9320_write(lcd, ILI9320_RGB_IF1, cfg->rgb_if1);
ili9320_write(lcd, ILI9320_FRAMEMAKER, 0x0);
ili9320_write(lcd, ILI9320_RGB_IF2, cfg->rgb_if2);
ret = ili9320_write_regs(lcd, vgg_init1, ARRAY_SIZE(vgg_init1));
if (ret != 0)
goto err_vgg;
mdelay(300);
ret = ili9320_write_regs(lcd, vgg_init2, ARRAY_SIZE(vgg_init2));
if (ret != 0)
goto err_vgg2;
mdelay(100);
ili9320_write(lcd, ILI9320_POWER3, 0x13c);
mdelay(100);
ili9320_write(lcd, ILI9320_POWER4, 0x1c00);
ili9320_write(lcd, ILI9320_POWER7, 0x000e);
mdelay(100);
ili9320_write(lcd, ILI9320_GRAM_HORIZ_ADDR, 0x00);
ili9320_write(lcd, ILI9320_GRAM_VERT_ADD, 0x00);
ret = ili9320_write_regs(lcd, vgg_gamma, ARRAY_SIZE(vgg_gamma));
if (ret != 0)
goto err_vgg3;
ili9320_write(lcd, ILI9320_HORIZ_START, 0x0);
ili9320_write(lcd, ILI9320_HORIZ_END, cfg->hsize - 1);
ili9320_write(lcd, ILI9320_VERT_START, 0x0);
ili9320_write(lcd, ILI9320_VERT_END, cfg->vsize - 1);
ili9320_write(lcd, ILI9320_DRIVER2,
ILI9320_DRIVER2_NL(((cfg->vsize - 240) / 8) + 0x1D));
ili9320_write(lcd, ILI9320_BASE_IMAGE, 0x1);
ili9320_write(lcd, ILI9320_VERT_SCROLL, 0x00);
for (addr = ILI9320_PARTIAL1_POSITION; addr <= ILI9320_PARTIAL2_END;
addr++) {
ili9320_write(lcd, addr, 0x0);
}
ili9320_write(lcd, ILI9320_INTERFACE1, 0x10);
ili9320_write(lcd, ILI9320_INTERFACE2, cfg->interface2);
ili9320_write(lcd, ILI9320_INTERFACE3, cfg->interface3);
ili9320_write(lcd, ILI9320_INTERFACE4, cfg->interface4);
ili9320_write(lcd, ILI9320_INTERFACE5, cfg->interface5);
ili9320_write(lcd, ILI9320_INTERFACE6, cfg->interface6);
lcd->display1 = (ILI9320_DISPLAY1_D(3) | ILI9320_DISPLAY1_DTE |
ILI9320_DISPLAY1_GON | ILI9320_DISPLAY1_BASEE |
0x40);
ili9320_write(lcd, ILI9320_DISPLAY1, lcd->display1);
return 0;
err_vgg3:
err_vgg2:
err_vgg:
err_initial:
return ret;
}
#ifdef CONFIG_PM
static int vgg2432a4_suspend(struct spi_device *spi, pm_message_t state)
{
return ili9320_suspend(dev_get_drvdata(&spi->dev), state);
}
static int vgg2432a4_resume(struct spi_device *spi)
{
return ili9320_resume(dev_get_drvdata(&spi->dev));
}
#else
#define vgg2432a4_suspend NULL
#define vgg2432a4_resume NULL
#endif
static struct ili9320_client vgg2432a4_client = {
.name = "VGG2432A4",
.init = vgg2432a4_lcd_init,
};
/* Device probe */
static int __devinit vgg2432a4_probe(struct spi_device *spi)
{
int ret;
ret = ili9320_probe_spi(spi, &vgg2432a4_client);
if (ret != 0) {
dev_err(&spi->dev, "failed to initialise ili9320\n");
return ret;
}
return 0;
}
static int __devexit vgg2432a4_remove(struct spi_device *spi)
{
return ili9320_remove(dev_get_drvdata(&spi->dev));
}
static void vgg2432a4_shutdown(struct spi_device *spi)
{
ili9320_shutdown(dev_get_drvdata(&spi->dev));
}
static struct spi_driver vgg2432a4_driver = {
.driver = {
.name = "VGG2432A4",
.owner = THIS_MODULE,
},
.probe = vgg2432a4_probe,
.remove = __devexit_p(vgg2432a4_remove),
.shutdown = vgg2432a4_shutdown,
.suspend = vgg2432a4_suspend,
.resume = vgg2432a4_resume,
};
module_spi_driver(vgg2432a4_driver);
MODULE_AUTHOR("Ben Dooks <ben-linux@fluff.org>");
MODULE_DESCRIPTION("VGG2432A4 LCD Driver");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("spi:VGG2432A4");
| gpl-2.0 |
davidevinavil/kernel_s500_jb | drivers/net/wan/pci200syn.c | 4983 | 11507 | /*
* Goramo PCI200SYN synchronous serial card driver for Linux
*
* Copyright (C) 2002-2008 Krzysztof Halasa <khc@pm.waw.pl>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License
* as published by the Free Software Foundation.
*
* For information see <http://www.kernel.org/pub/linux/utils/net/hdlc/>
*
* Sources of information:
* Hitachi HD64572 SCA-II User's Manual
* PLX Technology Inc. PCI9052 Data Book
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/capability.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/fcntl.h>
#include <linux/in.h>
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/ioport.h>
#include <linux/moduleparam.h>
#include <linux/netdevice.h>
#include <linux/hdlc.h>
#include <linux/pci.h>
#include <linux/delay.h>
#include <asm/io.h>
#include "hd64572.h"
#undef DEBUG_PKT
#define DEBUG_RINGS
#define PCI200SYN_PLX_SIZE 0x80 /* PLX control window size (128b) */
#define PCI200SYN_SCA_SIZE 0x400 /* SCA window size (1Kb) */
#define MAX_TX_BUFFERS 10
static int pci_clock_freq = 33000000;
#define CLOCK_BASE pci_clock_freq
/*
* PLX PCI9052 local configuration and shared runtime registers.
* This structure can be used to access 9052 registers (memory mapped).
*/
typedef struct {
u32 loc_addr_range[4]; /* 00-0Ch : Local Address Ranges */
u32 loc_rom_range; /* 10h : Local ROM Range */
u32 loc_addr_base[4]; /* 14-20h : Local Address Base Addrs */
u32 loc_rom_base; /* 24h : Local ROM Base */
u32 loc_bus_descr[4]; /* 28-34h : Local Bus Descriptors */
u32 rom_bus_descr; /* 38h : ROM Bus Descriptor */
u32 cs_base[4]; /* 3C-48h : Chip Select Base Addrs */
u32 intr_ctrl_stat; /* 4Ch : Interrupt Control/Status */
u32 init_ctrl; /* 50h : EEPROM ctrl, Init Ctrl, etc */
}plx9052;
typedef struct port_s {
struct napi_struct napi;
struct net_device *netdev;
struct card_s *card;
spinlock_t lock; /* TX lock */
sync_serial_settings settings;
int rxpart; /* partial frame received, next frame invalid*/
unsigned short encoding;
unsigned short parity;
u16 rxin; /* rx ring buffer 'in' pointer */
u16 txin; /* tx ring buffer 'in' and 'last' pointers */
u16 txlast;
u8 rxs, txs, tmc; /* SCA registers */
u8 chan; /* physical port # - 0 or 1 */
}port_t;
typedef struct card_s {
u8 __iomem *rambase; /* buffer memory base (virtual) */
u8 __iomem *scabase; /* SCA memory base (virtual) */
plx9052 __iomem *plxbase;/* PLX registers memory base (virtual) */
u16 rx_ring_buffers; /* number of buffers in a ring */
u16 tx_ring_buffers;
u16 buff_offset; /* offset of first buffer of first channel */
u8 irq; /* interrupt request level */
port_t ports[2];
}card_t;
#define get_port(card, port) (&card->ports[port])
#define sca_flush(card) (sca_in(IER0, card));
static inline void new_memcpy_toio(char __iomem *dest, char *src, int length)
{
int len;
do {
len = length > 256 ? 256 : length;
memcpy_toio(dest, src, len);
dest += len;
src += len;
length -= len;
readb(dest);
} while (len);
}
#undef memcpy_toio
#define memcpy_toio new_memcpy_toio
#include "hd64572.c"
static void pci200_set_iface(port_t *port)
{
card_t *card = port->card;
u16 msci = get_msci(port);
u8 rxs = port->rxs & CLK_BRG_MASK;
u8 txs = port->txs & CLK_BRG_MASK;
sca_out(EXS_TES1, (port->chan ? MSCI1_OFFSET : MSCI0_OFFSET) + EXS,
port->card);
switch(port->settings.clock_type) {
case CLOCK_INT:
rxs |= CLK_BRG; /* BRG output */
txs |= CLK_PIN_OUT | CLK_TX_RXCLK; /* RX clock */
break;
case CLOCK_TXINT:
rxs |= CLK_LINE; /* RXC input */
txs |= CLK_PIN_OUT | CLK_BRG; /* BRG output */
break;
case CLOCK_TXFROMRX:
rxs |= CLK_LINE; /* RXC input */
txs |= CLK_PIN_OUT | CLK_TX_RXCLK; /* RX clock */
break;
default: /* EXTernal clock */
rxs |= CLK_LINE; /* RXC input */
txs |= CLK_PIN_OUT | CLK_LINE; /* TXC input */
break;
}
port->rxs = rxs;
port->txs = txs;
sca_out(rxs, msci + RXS, card);
sca_out(txs, msci + TXS, card);
sca_set_port(port);
}
static int pci200_open(struct net_device *dev)
{
port_t *port = dev_to_port(dev);
int result = hdlc_open(dev);
if (result)
return result;
sca_open(dev);
pci200_set_iface(port);
sca_flush(port->card);
return 0;
}
static int pci200_close(struct net_device *dev)
{
sca_close(dev);
sca_flush(dev_to_port(dev)->card);
hdlc_close(dev);
return 0;
}
static int pci200_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
const size_t size = sizeof(sync_serial_settings);
sync_serial_settings new_line;
sync_serial_settings __user *line = ifr->ifr_settings.ifs_ifsu.sync;
port_t *port = dev_to_port(dev);
#ifdef DEBUG_RINGS
if (cmd == SIOCDEVPRIVATE) {
sca_dump_rings(dev);
return 0;
}
#endif
if (cmd != SIOCWANDEV)
return hdlc_ioctl(dev, ifr, cmd);
switch(ifr->ifr_settings.type) {
case IF_GET_IFACE:
ifr->ifr_settings.type = IF_IFACE_V35;
if (ifr->ifr_settings.size < size) {
ifr->ifr_settings.size = size; /* data size wanted */
return -ENOBUFS;
}
if (copy_to_user(line, &port->settings, size))
return -EFAULT;
return 0;
case IF_IFACE_V35:
case IF_IFACE_SYNC_SERIAL:
if (!capable(CAP_NET_ADMIN))
return -EPERM;
if (copy_from_user(&new_line, line, size))
return -EFAULT;
if (new_line.clock_type != CLOCK_EXT &&
new_line.clock_type != CLOCK_TXFROMRX &&
new_line.clock_type != CLOCK_INT &&
new_line.clock_type != CLOCK_TXINT)
return -EINVAL; /* No such clock setting */
if (new_line.loopback != 0 && new_line.loopback != 1)
return -EINVAL;
memcpy(&port->settings, &new_line, size); /* Update settings */
pci200_set_iface(port);
sca_flush(port->card);
return 0;
default:
return hdlc_ioctl(dev, ifr, cmd);
}
}
static void pci200_pci_remove_one(struct pci_dev *pdev)
{
int i;
card_t *card = pci_get_drvdata(pdev);
for (i = 0; i < 2; i++)
if (card->ports[i].card)
unregister_hdlc_device(card->ports[i].netdev);
if (card->irq)
free_irq(card->irq, card);
if (card->rambase)
iounmap(card->rambase);
if (card->scabase)
iounmap(card->scabase);
if (card->plxbase)
iounmap(card->plxbase);
pci_release_regions(pdev);
pci_disable_device(pdev);
pci_set_drvdata(pdev, NULL);
if (card->ports[0].netdev)
free_netdev(card->ports[0].netdev);
if (card->ports[1].netdev)
free_netdev(card->ports[1].netdev);
kfree(card);
}
static const struct net_device_ops pci200_ops = {
.ndo_open = pci200_open,
.ndo_stop = pci200_close,
.ndo_change_mtu = hdlc_change_mtu,
.ndo_start_xmit = hdlc_start_xmit,
.ndo_do_ioctl = pci200_ioctl,
};
static int __devinit pci200_pci_init_one(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
card_t *card;
u32 __iomem *p;
int i;
u32 ramsize;
u32 ramphys; /* buffer memory base */
u32 scaphys; /* SCA memory base */
u32 plxphys; /* PLX registers memory base */
i = pci_enable_device(pdev);
if (i)
return i;
i = pci_request_regions(pdev, "PCI200SYN");
if (i) {
pci_disable_device(pdev);
return i;
}
card = kzalloc(sizeof(card_t), GFP_KERNEL);
if (card == NULL) {
pci_release_regions(pdev);
pci_disable_device(pdev);
return -ENOBUFS;
}
pci_set_drvdata(pdev, card);
card->ports[0].netdev = alloc_hdlcdev(&card->ports[0]);
card->ports[1].netdev = alloc_hdlcdev(&card->ports[1]);
if (!card->ports[0].netdev || !card->ports[1].netdev) {
pr_err("unable to allocate memory\n");
pci200_pci_remove_one(pdev);
return -ENOMEM;
}
if (pci_resource_len(pdev, 0) != PCI200SYN_PLX_SIZE ||
pci_resource_len(pdev, 2) != PCI200SYN_SCA_SIZE ||
pci_resource_len(pdev, 3) < 16384) {
pr_err("invalid card EEPROM parameters\n");
pci200_pci_remove_one(pdev);
return -EFAULT;
}
plxphys = pci_resource_start(pdev,0) & PCI_BASE_ADDRESS_MEM_MASK;
card->plxbase = ioremap(plxphys, PCI200SYN_PLX_SIZE);
scaphys = pci_resource_start(pdev,2) & PCI_BASE_ADDRESS_MEM_MASK;
card->scabase = ioremap(scaphys, PCI200SYN_SCA_SIZE);
ramphys = pci_resource_start(pdev,3) & PCI_BASE_ADDRESS_MEM_MASK;
card->rambase = pci_ioremap_bar(pdev, 3);
if (card->plxbase == NULL ||
card->scabase == NULL ||
card->rambase == NULL) {
pr_err("ioremap() failed\n");
pci200_pci_remove_one(pdev);
return -EFAULT;
}
/* Reset PLX */
p = &card->plxbase->init_ctrl;
writel(readl(p) | 0x40000000, p);
readl(p); /* Flush the write - do not use sca_flush */
udelay(1);
writel(readl(p) & ~0x40000000, p);
readl(p); /* Flush the write - do not use sca_flush */
udelay(1);
ramsize = sca_detect_ram(card, card->rambase,
pci_resource_len(pdev, 3));
/* number of TX + RX buffers for one port - this is dual port card */
i = ramsize / (2 * (sizeof(pkt_desc) + HDLC_MAX_MRU));
card->tx_ring_buffers = min(i / 2, MAX_TX_BUFFERS);
card->rx_ring_buffers = i - card->tx_ring_buffers;
card->buff_offset = 2 * sizeof(pkt_desc) * (card->tx_ring_buffers +
card->rx_ring_buffers);
pr_info("%u KB RAM at 0x%x, IRQ%u, using %u TX + %u RX packets rings\n",
ramsize / 1024, ramphys,
pdev->irq, card->tx_ring_buffers, card->rx_ring_buffers);
if (card->tx_ring_buffers < 1) {
pr_err("RAM test failed\n");
pci200_pci_remove_one(pdev);
return -EFAULT;
}
/* Enable interrupts on the PCI bridge */
p = &card->plxbase->intr_ctrl_stat;
writew(readw(p) | 0x0040, p);
/* Allocate IRQ */
if (request_irq(pdev->irq, sca_intr, IRQF_SHARED, "pci200syn", card)) {
pr_warn("could not allocate IRQ%d\n", pdev->irq);
pci200_pci_remove_one(pdev);
return -EBUSY;
}
card->irq = pdev->irq;
sca_init(card, 0);
for (i = 0; i < 2; i++) {
port_t *port = &card->ports[i];
struct net_device *dev = port->netdev;
hdlc_device *hdlc = dev_to_hdlc(dev);
port->chan = i;
spin_lock_init(&port->lock);
dev->irq = card->irq;
dev->mem_start = ramphys;
dev->mem_end = ramphys + ramsize - 1;
dev->tx_queue_len = 50;
dev->netdev_ops = &pci200_ops;
hdlc->attach = sca_attach;
hdlc->xmit = sca_xmit;
port->settings.clock_type = CLOCK_EXT;
port->card = card;
sca_init_port(port);
if (register_hdlc_device(dev)) {
pr_err("unable to register hdlc device\n");
port->card = NULL;
pci200_pci_remove_one(pdev);
return -ENOBUFS;
}
netdev_info(dev, "PCI200SYN channel %d\n", port->chan);
}
sca_flush(card);
return 0;
}
static DEFINE_PCI_DEVICE_TABLE(pci200_pci_tbl) = {
{ PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9050, PCI_VENDOR_ID_PLX,
PCI_DEVICE_ID_PLX_PCI200SYN, 0, 0, 0 },
{ 0, }
};
static struct pci_driver pci200_pci_driver = {
.name = "PCI200SYN",
.id_table = pci200_pci_tbl,
.probe = pci200_pci_init_one,
.remove = pci200_pci_remove_one,
};
static int __init pci200_init_module(void)
{
if (pci_clock_freq < 1000000 || pci_clock_freq > 80000000) {
pr_err("Invalid PCI clock frequency\n");
return -EINVAL;
}
return pci_register_driver(&pci200_pci_driver);
}
static void __exit pci200_cleanup_module(void)
{
pci_unregister_driver(&pci200_pci_driver);
}
MODULE_AUTHOR("Krzysztof Halasa <khc@pm.waw.pl>");
MODULE_DESCRIPTION("Goramo PCI200SYN serial port driver");
MODULE_LICENSE("GPL v2");
MODULE_DEVICE_TABLE(pci, pci200_pci_tbl);
module_param(pci_clock_freq, int, 0444);
MODULE_PARM_DESC(pci_clock_freq, "System PCI clock frequency in Hz");
module_init(pci200_init_module);
module_exit(pci200_cleanup_module);
| gpl-2.0 |
jollaman999/jolla-kernel_G_Gen3 | drivers/video/vga16fb.c | 5239 | 38314 | /*
* linux/drivers/video/vga16.c -- VGA 16-color framebuffer driver
*
* Copyright 1999 Ben Pfaff <pfaffben@debian.org> and Petr Vandrovec <VANDROVE@vc.cvut.cz>
* Based on VGA info at http://www.goodnet.com/~tinara/FreeVGA/home.htm
* Based on VESA framebuffer (c) 1998 Gerd Knorr <kraxel@goldbach.in-berlin.de>
*
* This file is subject to the terms and conditions of the GNU General
* Public License. See the file COPYING in the main directory of this
* archive for more details.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/mm.h>
#include <linux/delay.h>
#include <linux/fb.h>
#include <linux/ioport.h>
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/screen_info.h>
#include <asm/io.h>
#include <video/vga.h>
#define VGA_FB_PHYS 0xA0000
#define VGA_FB_PHYS_LEN 65536
#define MODE_SKIP4 1
#define MODE_8BPP 2
#define MODE_CFB 4
#define MODE_TEXT 8
/* --------------------------------------------------------------------- */
/*
* card parameters
*/
struct vga16fb_par {
/* structure holding original VGA register settings when the
screen is blanked */
struct {
unsigned char SeqCtrlIndex; /* Sequencer Index reg. */
unsigned char CrtCtrlIndex; /* CRT-Contr. Index reg. */
unsigned char CrtMiscIO; /* Miscellaneous register */
unsigned char HorizontalTotal; /* CRT-Controller:00h */
unsigned char HorizDisplayEnd; /* CRT-Controller:01h */
unsigned char StartHorizRetrace;/* CRT-Controller:04h */
unsigned char EndHorizRetrace; /* CRT-Controller:05h */
unsigned char Overflow; /* CRT-Controller:07h */
unsigned char StartVertRetrace; /* CRT-Controller:10h */
unsigned char EndVertRetrace; /* CRT-Controller:11h */
unsigned char ModeControl; /* CRT-Controller:17h */
unsigned char ClockingMode; /* Seq-Controller:01h */
} vga_state;
struct vgastate state;
unsigned int ref_count;
int palette_blanked, vesa_blanked, mode, isVGA;
u8 misc, pel_msk, vss, clkdiv;
u8 crtc[VGA_CRT_C];
};
/* --------------------------------------------------------------------- */
static struct fb_var_screeninfo vga16fb_defined __devinitdata = {
.xres = 640,
.yres = 480,
.xres_virtual = 640,
.yres_virtual = 480,
.bits_per_pixel = 4,
.activate = FB_ACTIVATE_TEST,
.height = -1,
.width = -1,
.pixclock = 39721,
.left_margin = 48,
.right_margin = 16,
.upper_margin = 33,
.lower_margin = 10,
.hsync_len = 96,
.vsync_len = 2,
.vmode = FB_VMODE_NONINTERLACED,
};
/* name should not depend on EGA/VGA */
static struct fb_fix_screeninfo vga16fb_fix __devinitdata = {
.id = "VGA16 VGA",
.smem_start = VGA_FB_PHYS,
.smem_len = VGA_FB_PHYS_LEN,
.type = FB_TYPE_VGA_PLANES,
.type_aux = FB_AUX_VGA_PLANES_VGA4,
.visual = FB_VISUAL_PSEUDOCOLOR,
.xpanstep = 8,
.ypanstep = 1,
.line_length = 640 / 8,
.accel = FB_ACCEL_NONE
};
/* The VGA's weird architecture often requires that we read a byte and
write a byte to the same location. It doesn't matter *what* byte
we write, however. This is because all the action goes on behind
the scenes in the VGA's 32-bit latch register, and reading and writing
video memory just invokes latch behavior.
To avoid race conditions (is this necessary?), reading and writing
the memory byte should be done with a single instruction. One
suitable instruction is the x86 bitwise OR. The following
read-modify-write routine should optimize to one such bitwise
OR. */
static inline void rmw(volatile char __iomem *p)
{
readb(p);
writeb(1, p);
}
/* Set the Graphics Mode Register, and return its previous value.
Bits 0-1 are write mode, bit 3 is read mode. */
static inline int setmode(int mode)
{
int oldmode;
oldmode = vga_io_rgfx(VGA_GFX_MODE);
vga_io_w(VGA_GFX_D, mode);
return oldmode;
}
/* Select the Bit Mask Register and return its value. */
static inline int selectmask(void)
{
return vga_io_rgfx(VGA_GFX_BIT_MASK);
}
/* Set the value of the Bit Mask Register. It must already have been
selected with selectmask(). */
static inline void setmask(int mask)
{
vga_io_w(VGA_GFX_D, mask);
}
/* Set the Data Rotate Register and return its old value.
Bits 0-2 are rotate count, bits 3-4 are logical operation
(0=NOP, 1=AND, 2=OR, 3=XOR). */
static inline int setop(int op)
{
int oldop;
oldop = vga_io_rgfx(VGA_GFX_DATA_ROTATE);
vga_io_w(VGA_GFX_D, op);
return oldop;
}
/* Set the Enable Set/Reset Register and return its old value.
The code here always uses value 0xf for this register. */
static inline int setsr(int sr)
{
int oldsr;
oldsr = vga_io_rgfx(VGA_GFX_SR_ENABLE);
vga_io_w(VGA_GFX_D, sr);
return oldsr;
}
/* Set the Set/Reset Register and return its old value. */
static inline int setcolor(int color)
{
int oldcolor;
oldcolor = vga_io_rgfx(VGA_GFX_SR_VALUE);
vga_io_w(VGA_GFX_D, color);
return oldcolor;
}
/* Return the value in the Graphics Address Register. */
static inline int getindex(void)
{
return vga_io_r(VGA_GFX_I);
}
/* Set the value in the Graphics Address Register. */
static inline void setindex(int index)
{
vga_io_w(VGA_GFX_I, index);
}
static void vga16fb_pan_var(struct fb_info *info,
struct fb_var_screeninfo *var)
{
struct vga16fb_par *par = info->par;
u32 xoffset, pos;
xoffset = var->xoffset;
if (info->var.bits_per_pixel == 8) {
pos = (info->var.xres_virtual * var->yoffset + xoffset) >> 2;
} else if (par->mode & MODE_TEXT) {
int fh = 16; // FIXME !!! font height. Fugde for now.
pos = (info->var.xres_virtual * (var->yoffset / fh) + xoffset) >> 3;
} else {
if (info->var.nonstd)
xoffset--;
pos = (info->var.xres_virtual * var->yoffset + xoffset) >> 3;
}
vga_io_wcrt(VGA_CRTC_START_HI, pos >> 8);
vga_io_wcrt(VGA_CRTC_START_LO, pos & 0xFF);
/* if we support CFB4, then we must! support xoffset with pixel
* granularity if someone supports xoffset in bit resolution */
vga_io_r(VGA_IS1_RC); /* reset flip-flop */
vga_io_w(VGA_ATT_IW, VGA_ATC_PEL);
if (info->var.bits_per_pixel == 8)
vga_io_w(VGA_ATT_IW, (xoffset & 3) << 1);
else
vga_io_w(VGA_ATT_IW, xoffset & 7);
vga_io_r(VGA_IS1_RC);
vga_io_w(VGA_ATT_IW, 0x20);
}
static void vga16fb_update_fix(struct fb_info *info)
{
if (info->var.bits_per_pixel == 4) {
if (info->var.nonstd) {
info->fix.type = FB_TYPE_PACKED_PIXELS;
info->fix.line_length = info->var.xres_virtual / 2;
} else {
info->fix.type = FB_TYPE_VGA_PLANES;
info->fix.type_aux = FB_AUX_VGA_PLANES_VGA4;
info->fix.line_length = info->var.xres_virtual / 8;
}
} else if (info->var.bits_per_pixel == 0) {
info->fix.type = FB_TYPE_TEXT;
info->fix.type_aux = FB_AUX_TEXT_CGA;
info->fix.line_length = info->var.xres_virtual / 4;
} else { /* 8bpp */
if (info->var.nonstd) {
info->fix.type = FB_TYPE_VGA_PLANES;
info->fix.type_aux = FB_AUX_VGA_PLANES_CFB8;
info->fix.line_length = info->var.xres_virtual / 4;
} else {
info->fix.type = FB_TYPE_PACKED_PIXELS;
info->fix.line_length = info->var.xres_virtual;
}
}
}
static void vga16fb_clock_chip(struct vga16fb_par *par,
unsigned int pixclock,
const struct fb_info *info,
int mul, int div)
{
static const struct {
u32 pixclock;
u8 misc;
u8 seq_clock_mode;
} *ptr, *best, vgaclocks[] = {
{ 79442 /* 12.587 */, 0x00, 0x08},
{ 70616 /* 14.161 */, 0x04, 0x08},
{ 39721 /* 25.175 */, 0x00, 0x00},
{ 35308 /* 28.322 */, 0x04, 0x00},
{ 0 /* bad */, 0x00, 0x00}};
int err;
pixclock = (pixclock * mul) / div;
best = vgaclocks;
err = pixclock - best->pixclock;
if (err < 0) err = -err;
for (ptr = vgaclocks + 1; ptr->pixclock; ptr++) {
int tmp;
tmp = pixclock - ptr->pixclock;
if (tmp < 0) tmp = -tmp;
if (tmp < err) {
err = tmp;
best = ptr;
}
}
par->misc |= best->misc;
par->clkdiv = best->seq_clock_mode;
pixclock = (best->pixclock * div) / mul;
}
#define FAIL(X) return -EINVAL
static int vga16fb_open(struct fb_info *info, int user)
{
struct vga16fb_par *par = info->par;
if (!par->ref_count) {
memset(&par->state, 0, sizeof(struct vgastate));
par->state.flags = VGA_SAVE_FONTS | VGA_SAVE_MODE |
VGA_SAVE_CMAP;
save_vga(&par->state);
}
par->ref_count++;
return 0;
}
static int vga16fb_release(struct fb_info *info, int user)
{
struct vga16fb_par *par = info->par;
if (!par->ref_count)
return -EINVAL;
if (par->ref_count == 1)
restore_vga(&par->state);
par->ref_count--;
return 0;
}
static int vga16fb_check_var(struct fb_var_screeninfo *var,
struct fb_info *info)
{
struct vga16fb_par *par = info->par;
u32 xres, right, hslen, left, xtotal;
u32 yres, lower, vslen, upper, ytotal;
u32 vxres, xoffset, vyres, yoffset;
u32 pos;
u8 r7, rMode;
int shift;
int mode;
u32 maxmem;
par->pel_msk = 0xFF;
if (var->bits_per_pixel == 4) {
if (var->nonstd) {
if (!par->isVGA)
return -EINVAL;
shift = 3;
mode = MODE_SKIP4 | MODE_CFB;
maxmem = 16384;
par->pel_msk = 0x0F;
} else {
shift = 3;
mode = 0;
maxmem = 65536;
}
} else if (var->bits_per_pixel == 8) {
if (!par->isVGA)
return -EINVAL; /* no support on EGA */
shift = 2;
if (var->nonstd) {
mode = MODE_8BPP | MODE_CFB;
maxmem = 65536;
} else {
mode = MODE_SKIP4 | MODE_8BPP | MODE_CFB;
maxmem = 16384;
}
} else
return -EINVAL;
xres = (var->xres + 7) & ~7;
vxres = (var->xres_virtual + 0xF) & ~0xF;
xoffset = (var->xoffset + 7) & ~7;
left = (var->left_margin + 7) & ~7;
right = (var->right_margin + 7) & ~7;
hslen = (var->hsync_len + 7) & ~7;
if (vxres < xres)
vxres = xres;
if (xres + xoffset > vxres)
xoffset = vxres - xres;
var->xres = xres;
var->right_margin = right;
var->hsync_len = hslen;
var->left_margin = left;
var->xres_virtual = vxres;
var->xoffset = xoffset;
xres >>= shift;
right >>= shift;
hslen >>= shift;
left >>= shift;
vxres >>= shift;
xtotal = xres + right + hslen + left;
if (xtotal >= 256)
FAIL("xtotal too big");
if (hslen > 32)
FAIL("hslen too big");
if (right + hslen + left > 64)
FAIL("hblank too big");
par->crtc[VGA_CRTC_H_TOTAL] = xtotal - 5;
par->crtc[VGA_CRTC_H_BLANK_START] = xres - 1;
par->crtc[VGA_CRTC_H_DISP] = xres - 1;
pos = xres + right;
par->crtc[VGA_CRTC_H_SYNC_START] = pos;
pos += hslen;
par->crtc[VGA_CRTC_H_SYNC_END] = pos & 0x1F;
pos += left - 2; /* blank_end + 2 <= total + 5 */
par->crtc[VGA_CRTC_H_BLANK_END] = (pos & 0x1F) | 0x80;
if (pos & 0x20)
par->crtc[VGA_CRTC_H_SYNC_END] |= 0x80;
yres = var->yres;
lower = var->lower_margin;
vslen = var->vsync_len;
upper = var->upper_margin;
vyres = var->yres_virtual;
yoffset = var->yoffset;
if (yres > vyres)
vyres = yres;
if (vxres * vyres > maxmem) {
vyres = maxmem / vxres;
if (vyres < yres)
return -ENOMEM;
}
if (yoffset + yres > vyres)
yoffset = vyres - yres;
var->yres = yres;
var->lower_margin = lower;
var->vsync_len = vslen;
var->upper_margin = upper;
var->yres_virtual = vyres;
var->yoffset = yoffset;
if (var->vmode & FB_VMODE_DOUBLE) {
yres <<= 1;
lower <<= 1;
vslen <<= 1;
upper <<= 1;
}
ytotal = yres + lower + vslen + upper;
if (ytotal > 1024) {
ytotal >>= 1;
yres >>= 1;
lower >>= 1;
vslen >>= 1;
upper >>= 1;
rMode = 0x04;
} else
rMode = 0x00;
if (ytotal > 1024)
FAIL("ytotal too big");
if (vslen > 16)
FAIL("vslen too big");
par->crtc[VGA_CRTC_V_TOTAL] = ytotal - 2;
r7 = 0x10; /* disable linecompare */
if (ytotal & 0x100) r7 |= 0x01;
if (ytotal & 0x200) r7 |= 0x20;
par->crtc[VGA_CRTC_PRESET_ROW] = 0;
par->crtc[VGA_CRTC_MAX_SCAN] = 0x40; /* 1 scanline, no linecmp */
if (var->vmode & FB_VMODE_DOUBLE)
par->crtc[VGA_CRTC_MAX_SCAN] |= 0x80;
par->crtc[VGA_CRTC_CURSOR_START] = 0x20;
par->crtc[VGA_CRTC_CURSOR_END] = 0x00;
if ((mode & (MODE_CFB | MODE_8BPP)) == MODE_CFB)
xoffset--;
pos = yoffset * vxres + (xoffset >> shift);
par->crtc[VGA_CRTC_START_HI] = pos >> 8;
par->crtc[VGA_CRTC_START_LO] = pos & 0xFF;
par->crtc[VGA_CRTC_CURSOR_HI] = 0x00;
par->crtc[VGA_CRTC_CURSOR_LO] = 0x00;
pos = yres - 1;
par->crtc[VGA_CRTC_V_DISP_END] = pos & 0xFF;
par->crtc[VGA_CRTC_V_BLANK_START] = pos & 0xFF;
if (pos & 0x100)
r7 |= 0x0A; /* 0x02 -> DISP_END, 0x08 -> BLANK_START */
if (pos & 0x200) {
r7 |= 0x40; /* 0x40 -> DISP_END */
par->crtc[VGA_CRTC_MAX_SCAN] |= 0x20; /* BLANK_START */
}
pos += lower;
par->crtc[VGA_CRTC_V_SYNC_START] = pos & 0xFF;
if (pos & 0x100)
r7 |= 0x04;
if (pos & 0x200)
r7 |= 0x80;
pos += vslen;
par->crtc[VGA_CRTC_V_SYNC_END] = (pos & 0x0F) & ~0x10; /* disabled IRQ */
pos += upper - 1; /* blank_end + 1 <= ytotal + 2 */
par->crtc[VGA_CRTC_V_BLANK_END] = pos & 0xFF; /* 0x7F for original VGA,
but some SVGA chips requires all 8 bits to set */
if (vxres >= 512)
FAIL("vxres too long");
par->crtc[VGA_CRTC_OFFSET] = vxres >> 1;
if (mode & MODE_SKIP4)
par->crtc[VGA_CRTC_UNDERLINE] = 0x5F; /* 256, cfb8 */
else
par->crtc[VGA_CRTC_UNDERLINE] = 0x1F; /* 16, vgap */
par->crtc[VGA_CRTC_MODE] = rMode | ((mode & MODE_TEXT) ? 0xA3 : 0xE3);
par->crtc[VGA_CRTC_LINE_COMPARE] = 0xFF;
par->crtc[VGA_CRTC_OVERFLOW] = r7;
par->vss = 0x00; /* 3DA */
par->misc = 0xE3; /* enable CPU, ports 0x3Dx, positive sync */
if (var->sync & FB_SYNC_HOR_HIGH_ACT)
par->misc &= ~0x40;
if (var->sync & FB_SYNC_VERT_HIGH_ACT)
par->misc &= ~0x80;
par->mode = mode;
if (mode & MODE_8BPP)
/* pixel clock == vga clock / 2 */
vga16fb_clock_chip(par, var->pixclock, info, 1, 2);
else
/* pixel clock == vga clock */
vga16fb_clock_chip(par, var->pixclock, info, 1, 1);
var->red.offset = var->green.offset = var->blue.offset =
var->transp.offset = 0;
var->red.length = var->green.length = var->blue.length =
(par->isVGA) ? 6 : 2;
var->transp.length = 0;
var->activate = FB_ACTIVATE_NOW;
var->height = -1;
var->width = -1;
var->accel_flags = 0;
return 0;
}
#undef FAIL
static int vga16fb_set_par(struct fb_info *info)
{
struct vga16fb_par *par = info->par;
u8 gdc[VGA_GFX_C];
u8 seq[VGA_SEQ_C];
u8 atc[VGA_ATT_C];
int fh, i;
seq[VGA_SEQ_CLOCK_MODE] = 0x01 | par->clkdiv;
if (par->mode & MODE_TEXT)
seq[VGA_SEQ_PLANE_WRITE] = 0x03;
else
seq[VGA_SEQ_PLANE_WRITE] = 0x0F;
seq[VGA_SEQ_CHARACTER_MAP] = 0x00;
if (par->mode & MODE_TEXT)
seq[VGA_SEQ_MEMORY_MODE] = 0x03;
else if (par->mode & MODE_SKIP4)
seq[VGA_SEQ_MEMORY_MODE] = 0x0E;
else
seq[VGA_SEQ_MEMORY_MODE] = 0x06;
gdc[VGA_GFX_SR_VALUE] = 0x00;
gdc[VGA_GFX_SR_ENABLE] = 0x00;
gdc[VGA_GFX_COMPARE_VALUE] = 0x00;
gdc[VGA_GFX_DATA_ROTATE] = 0x00;
gdc[VGA_GFX_PLANE_READ] = 0;
if (par->mode & MODE_TEXT) {
gdc[VGA_GFX_MODE] = 0x10;
gdc[VGA_GFX_MISC] = 0x06;
} else {
if (par->mode & MODE_CFB)
gdc[VGA_GFX_MODE] = 0x40;
else
gdc[VGA_GFX_MODE] = 0x00;
gdc[VGA_GFX_MISC] = 0x05;
}
gdc[VGA_GFX_COMPARE_MASK] = 0x0F;
gdc[VGA_GFX_BIT_MASK] = 0xFF;
for (i = 0x00; i < 0x10; i++)
atc[i] = i;
if (par->mode & MODE_TEXT)
atc[VGA_ATC_MODE] = 0x04;
else if (par->mode & MODE_8BPP)
atc[VGA_ATC_MODE] = 0x41;
else
atc[VGA_ATC_MODE] = 0x81;
atc[VGA_ATC_OVERSCAN] = 0x00; /* 0 for EGA, 0xFF for VGA */
atc[VGA_ATC_PLANE_ENABLE] = 0x0F;
if (par->mode & MODE_8BPP)
atc[VGA_ATC_PEL] = (info->var.xoffset & 3) << 1;
else
atc[VGA_ATC_PEL] = info->var.xoffset & 7;
atc[VGA_ATC_COLOR_PAGE] = 0x00;
if (par->mode & MODE_TEXT) {
fh = 16; // FIXME !!! Fudge font height.
par->crtc[VGA_CRTC_MAX_SCAN] = (par->crtc[VGA_CRTC_MAX_SCAN]
& ~0x1F) | (fh - 1);
}
vga_io_w(VGA_MIS_W, vga_io_r(VGA_MIS_R) | 0x01);
/* Enable graphics register modification */
if (!par->isVGA) {
vga_io_w(EGA_GFX_E0, 0x00);
vga_io_w(EGA_GFX_E1, 0x01);
}
/* update misc output register */
vga_io_w(VGA_MIS_W, par->misc);
/* synchronous reset on */
vga_io_wseq(0x00, 0x01);
if (par->isVGA)
vga_io_w(VGA_PEL_MSK, par->pel_msk);
/* write sequencer registers */
vga_io_wseq(VGA_SEQ_CLOCK_MODE, seq[VGA_SEQ_CLOCK_MODE] | 0x20);
for (i = 2; i < VGA_SEQ_C; i++) {
vga_io_wseq(i, seq[i]);
}
/* synchronous reset off */
vga_io_wseq(0x00, 0x03);
/* deprotect CRT registers 0-7 */
vga_io_wcrt(VGA_CRTC_V_SYNC_END, par->crtc[VGA_CRTC_V_SYNC_END]);
/* write CRT registers */
for (i = 0; i < VGA_CRTC_REGS; i++) {
vga_io_wcrt(i, par->crtc[i]);
}
/* write graphics controller registers */
for (i = 0; i < VGA_GFX_C; i++) {
vga_io_wgfx(i, gdc[i]);
}
/* write attribute controller registers */
for (i = 0; i < VGA_ATT_C; i++) {
vga_io_r(VGA_IS1_RC); /* reset flip-flop */
vga_io_wattr(i, atc[i]);
}
/* Wait for screen to stabilize. */
mdelay(50);
vga_io_wseq(VGA_SEQ_CLOCK_MODE, seq[VGA_SEQ_CLOCK_MODE]);
vga_io_r(VGA_IS1_RC);
vga_io_w(VGA_ATT_IW, 0x20);
vga16fb_update_fix(info);
return 0;
}
static void ega16_setpalette(int regno, unsigned red, unsigned green, unsigned blue)
{
static const unsigned char map[] = { 000, 001, 010, 011 };
int val;
if (regno >= 16)
return;
val = map[red>>14] | ((map[green>>14]) << 1) | ((map[blue>>14]) << 2);
vga_io_r(VGA_IS1_RC); /* ! 0x3BA */
vga_io_wattr(regno, val);
vga_io_r(VGA_IS1_RC); /* some clones need it */
vga_io_w(VGA_ATT_IW, 0x20); /* unblank screen */
}
static void vga16_setpalette(int regno, unsigned red, unsigned green, unsigned blue)
{
outb(regno, VGA_PEL_IW);
outb(red >> 10, VGA_PEL_D);
outb(green >> 10, VGA_PEL_D);
outb(blue >> 10, VGA_PEL_D);
}
static int vga16fb_setcolreg(unsigned regno, unsigned red, unsigned green,
unsigned blue, unsigned transp,
struct fb_info *info)
{
struct vga16fb_par *par = info->par;
int gray;
/*
* Set a single color register. The values supplied are
* already rounded down to the hardware's capabilities
* (according to the entries in the `var' structure). Return
* != 0 for invalid regno.
*/
if (regno >= 256)
return 1;
gray = info->var.grayscale;
if (gray) {
/* gray = 0.30*R + 0.59*G + 0.11*B */
red = green = blue = (red * 77 + green * 151 + blue * 28) >> 8;
}
if (par->isVGA)
vga16_setpalette(regno,red,green,blue);
else
ega16_setpalette(regno,red,green,blue);
return 0;
}
static int vga16fb_pan_display(struct fb_var_screeninfo *var,
struct fb_info *info)
{
vga16fb_pan_var(info, var);
return 0;
}
/* The following VESA blanking code is taken from vgacon.c. The VGA
blanking code was originally by Huang shi chao, and modified by
Christoph Rimek (chrimek@toppoint.de) and todd j. derr
(tjd@barefoot.org) for Linux. */
static void vga_vesa_blank(struct vga16fb_par *par, int mode)
{
unsigned char SeqCtrlIndex = vga_io_r(VGA_SEQ_I);
unsigned char CrtCtrlIndex = vga_io_r(VGA_CRT_IC);
/* save original values of VGA controller registers */
if(!par->vesa_blanked) {
par->vga_state.CrtMiscIO = vga_io_r(VGA_MIS_R);
//sti();
par->vga_state.HorizontalTotal = vga_io_rcrt(0x00); /* HorizontalTotal */
par->vga_state.HorizDisplayEnd = vga_io_rcrt(0x01); /* HorizDisplayEnd */
par->vga_state.StartHorizRetrace = vga_io_rcrt(0x04); /* StartHorizRetrace */
par->vga_state.EndHorizRetrace = vga_io_rcrt(0x05); /* EndHorizRetrace */
par->vga_state.Overflow = vga_io_rcrt(0x07); /* Overflow */
par->vga_state.StartVertRetrace = vga_io_rcrt(0x10); /* StartVertRetrace */
par->vga_state.EndVertRetrace = vga_io_rcrt(0x11); /* EndVertRetrace */
par->vga_state.ModeControl = vga_io_rcrt(0x17); /* ModeControl */
par->vga_state.ClockingMode = vga_io_rseq(0x01); /* ClockingMode */
}
/* assure that video is enabled */
/* "0x20" is VIDEO_ENABLE_bit in register 01 of sequencer */
vga_io_wseq(0x01, par->vga_state.ClockingMode | 0x20);
/* test for vertical retrace in process.... */
if ((par->vga_state.CrtMiscIO & 0x80) == 0x80)
vga_io_w(VGA_MIS_W, par->vga_state.CrtMiscIO & 0xef);
/*
* Set <End of vertical retrace> to minimum (0) and
* <Start of vertical Retrace> to maximum (incl. overflow)
* Result: turn off vertical sync (VSync) pulse.
*/
if (mode & FB_BLANK_VSYNC_SUSPEND) {
vga_io_wcrt(VGA_CRTC_V_SYNC_START, 0xff);
vga_io_wcrt(VGA_CRTC_V_SYNC_END, 0x40);
/* bits 9,10 of vert. retrace */
vga_io_wcrt(VGA_CRTC_OVERFLOW, par->vga_state.Overflow | 0x84);
}
if (mode & FB_BLANK_HSYNC_SUSPEND) {
/*
* Set <End of horizontal retrace> to minimum (0) and
* <Start of horizontal Retrace> to maximum
* Result: turn off horizontal sync (HSync) pulse.
*/
vga_io_wcrt(VGA_CRTC_H_SYNC_START, 0xff);
vga_io_wcrt(VGA_CRTC_H_SYNC_END, 0x00);
}
/* restore both index registers */
outb_p(SeqCtrlIndex, VGA_SEQ_I);
outb_p(CrtCtrlIndex, VGA_CRT_IC);
}
static void vga_vesa_unblank(struct vga16fb_par *par)
{
unsigned char SeqCtrlIndex = vga_io_r(VGA_SEQ_I);
unsigned char CrtCtrlIndex = vga_io_r(VGA_CRT_IC);
/* restore original values of VGA controller registers */
vga_io_w(VGA_MIS_W, par->vga_state.CrtMiscIO);
/* HorizontalTotal */
vga_io_wcrt(0x00, par->vga_state.HorizontalTotal);
/* HorizDisplayEnd */
vga_io_wcrt(0x01, par->vga_state.HorizDisplayEnd);
/* StartHorizRetrace */
vga_io_wcrt(0x04, par->vga_state.StartHorizRetrace);
/* EndHorizRetrace */
vga_io_wcrt(0x05, par->vga_state.EndHorizRetrace);
/* Overflow */
vga_io_wcrt(0x07, par->vga_state.Overflow);
/* StartVertRetrace */
vga_io_wcrt(0x10, par->vga_state.StartVertRetrace);
/* EndVertRetrace */
vga_io_wcrt(0x11, par->vga_state.EndVertRetrace);
/* ModeControl */
vga_io_wcrt(0x17, par->vga_state.ModeControl);
/* ClockingMode */
vga_io_wseq(0x01, par->vga_state.ClockingMode);
/* restore index/control registers */
vga_io_w(VGA_SEQ_I, SeqCtrlIndex);
vga_io_w(VGA_CRT_IC, CrtCtrlIndex);
}
static void vga_pal_blank(void)
{
int i;
for (i=0; i<16; i++) {
outb_p(i, VGA_PEL_IW);
outb_p(0, VGA_PEL_D);
outb_p(0, VGA_PEL_D);
outb_p(0, VGA_PEL_D);
}
}
/* 0 unblank, 1 blank, 2 no vsync, 3 no hsync, 4 off */
static int vga16fb_blank(int blank, struct fb_info *info)
{
struct vga16fb_par *par = info->par;
switch (blank) {
case FB_BLANK_UNBLANK: /* Unblank */
if (par->vesa_blanked) {
vga_vesa_unblank(par);
par->vesa_blanked = 0;
}
if (par->palette_blanked) {
par->palette_blanked = 0;
}
break;
case FB_BLANK_NORMAL: /* blank */
vga_pal_blank();
par->palette_blanked = 1;
break;
default: /* VESA blanking */
vga_vesa_blank(par, blank);
par->vesa_blanked = 1;
break;
}
return 0;
}
static void vga_8planes_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
{
u32 dx = rect->dx, width = rect->width;
char oldindex = getindex();
char oldmode = setmode(0x40);
char oldmask = selectmask();
int line_ofs, height;
char oldop, oldsr;
char __iomem *where;
dx /= 4;
where = info->screen_base + dx + rect->dy * info->fix.line_length;
if (rect->rop == ROP_COPY) {
oldop = setop(0);
oldsr = setsr(0);
width /= 4;
line_ofs = info->fix.line_length - width;
setmask(0xff);
height = rect->height;
while (height--) {
int x;
/* we can do memset... */
for (x = width; x > 0; --x) {
writeb(rect->color, where);
where++;
}
where += line_ofs;
}
} else {
char oldcolor = setcolor(0xf);
int y;
oldop = setop(0x18);
oldsr = setsr(0xf);
setmask(0x0F);
for (y = 0; y < rect->height; y++) {
rmw(where);
rmw(where+1);
where += info->fix.line_length;
}
setcolor(oldcolor);
}
setmask(oldmask);
setsr(oldsr);
setop(oldop);
setmode(oldmode);
setindex(oldindex);
}
static void vga16fb_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
{
int x, x2, y2, vxres, vyres, width, height, line_ofs;
char __iomem *dst;
vxres = info->var.xres_virtual;
vyres = info->var.yres_virtual;
if (!rect->width || !rect->height || rect->dx > vxres || rect->dy > vyres)
return;
/* We could use hardware clipping but on many cards you get around
* hardware clipping by writing to framebuffer directly. */
x2 = rect->dx + rect->width;
y2 = rect->dy + rect->height;
x2 = x2 < vxres ? x2 : vxres;
y2 = y2 < vyres ? y2 : vyres;
width = x2 - rect->dx;
switch (info->fix.type) {
case FB_TYPE_VGA_PLANES:
if (info->fix.type_aux == FB_AUX_VGA_PLANES_VGA4) {
height = y2 - rect->dy;
width = rect->width/8;
line_ofs = info->fix.line_length - width;
dst = info->screen_base + (rect->dx/8) + rect->dy * info->fix.line_length;
switch (rect->rop) {
case ROP_COPY:
setmode(0);
setop(0);
setsr(0xf);
setcolor(rect->color);
selectmask();
setmask(0xff);
while (height--) {
for (x = 0; x < width; x++) {
writeb(0, dst);
dst++;
}
dst += line_ofs;
}
break;
case ROP_XOR:
setmode(0);
setop(0x18);
setsr(0xf);
setcolor(0xf);
selectmask();
setmask(0xff);
while (height--) {
for (x = 0; x < width; x++) {
rmw(dst);
dst++;
}
dst += line_ofs;
}
break;
}
} else
vga_8planes_fillrect(info, rect);
break;
case FB_TYPE_PACKED_PIXELS:
default:
cfb_fillrect(info, rect);
break;
}
}
static void vga_8planes_copyarea(struct fb_info *info, const struct fb_copyarea *area)
{
char oldindex = getindex();
char oldmode = setmode(0x41);
char oldop = setop(0);
char oldsr = setsr(0xf);
int height, line_ofs, x;
u32 sx, dx, width;
char __iomem *dest;
char __iomem *src;
height = area->height;
sx = area->sx / 4;
dx = area->dx / 4;
width = area->width / 4;
if (area->dy < area->sy || (area->dy == area->sy && dx < sx)) {
line_ofs = info->fix.line_length - width;
dest = info->screen_base + dx + area->dy * info->fix.line_length;
src = info->screen_base + sx + area->sy * info->fix.line_length;
while (height--) {
for (x = 0; x < width; x++) {
readb(src);
writeb(0, dest);
src++;
dest++;
}
src += line_ofs;
dest += line_ofs;
}
} else {
line_ofs = info->fix.line_length - width;
dest = info->screen_base + dx + width +
(area->dy + height - 1) * info->fix.line_length;
src = info->screen_base + sx + width +
(area->sy + height - 1) * info->fix.line_length;
while (height--) {
for (x = 0; x < width; x++) {
--src;
--dest;
readb(src);
writeb(0, dest);
}
src -= line_ofs;
dest -= line_ofs;
}
}
setsr(oldsr);
setop(oldop);
setmode(oldmode);
setindex(oldindex);
}
static void vga16fb_copyarea(struct fb_info *info, const struct fb_copyarea *area)
{
u32 dx = area->dx, dy = area->dy, sx = area->sx, sy = area->sy;
int x, x2, y2, old_dx, old_dy, vxres, vyres;
int height, width, line_ofs;
char __iomem *dst = NULL;
char __iomem *src = NULL;
vxres = info->var.xres_virtual;
vyres = info->var.yres_virtual;
if (area->dx > vxres || area->sx > vxres || area->dy > vyres ||
area->sy > vyres)
return;
/* clip the destination */
old_dx = area->dx;
old_dy = area->dy;
/*
* We could use hardware clipping but on many cards you get around
* hardware clipping by writing to framebuffer directly.
*/
x2 = area->dx + area->width;
y2 = area->dy + area->height;
dx = area->dx > 0 ? area->dx : 0;
dy = area->dy > 0 ? area->dy : 0;
x2 = x2 < vxres ? x2 : vxres;
y2 = y2 < vyres ? y2 : vyres;
width = x2 - dx;
height = y2 - dy;
if (sx + dx < old_dx || sy + dy < old_dy)
return;
/* update sx1,sy1 */
sx += (dx - old_dx);
sy += (dy - old_dy);
/* the source must be completely inside the virtual screen */
if (sx + width > vxres || sy + height > vyres)
return;
switch (info->fix.type) {
case FB_TYPE_VGA_PLANES:
if (info->fix.type_aux == FB_AUX_VGA_PLANES_VGA4) {
width = width/8;
height = height;
line_ofs = info->fix.line_length - width;
setmode(1);
setop(0);
setsr(0xf);
if (dy < sy || (dy == sy && dx < sx)) {
dst = info->screen_base + (dx/8) + dy * info->fix.line_length;
src = info->screen_base + (sx/8) + sy * info->fix.line_length;
while (height--) {
for (x = 0; x < width; x++) {
readb(src);
writeb(0, dst);
dst++;
src++;
}
src += line_ofs;
dst += line_ofs;
}
} else {
dst = info->screen_base + (dx/8) + width +
(dy + height - 1) * info->fix.line_length;
src = info->screen_base + (sx/8) + width +
(sy + height - 1) * info->fix.line_length;
while (height--) {
for (x = 0; x < width; x++) {
dst--;
src--;
readb(src);
writeb(0, dst);
}
src -= line_ofs;
dst -= line_ofs;
}
}
} else
vga_8planes_copyarea(info, area);
break;
case FB_TYPE_PACKED_PIXELS:
default:
cfb_copyarea(info, area);
break;
}
}
#define TRANS_MASK_LOW {0x0,0x8,0x4,0xC,0x2,0xA,0x6,0xE,0x1,0x9,0x5,0xD,0x3,0xB,0x7,0xF}
#define TRANS_MASK_HIGH {0x000, 0x800, 0x400, 0xC00, 0x200, 0xA00, 0x600, 0xE00, \
0x100, 0x900, 0x500, 0xD00, 0x300, 0xB00, 0x700, 0xF00}
#if defined(__LITTLE_ENDIAN)
static const u16 transl_l[] = TRANS_MASK_LOW;
static const u16 transl_h[] = TRANS_MASK_HIGH;
#elif defined(__BIG_ENDIAN)
static const u16 transl_l[] = TRANS_MASK_HIGH;
static const u16 transl_h[] = TRANS_MASK_LOW;
#else
#error "Only __BIG_ENDIAN and __LITTLE_ENDIAN are supported in vga-planes"
#endif
static void vga_8planes_imageblit(struct fb_info *info, const struct fb_image *image)
{
char oldindex = getindex();
char oldmode = setmode(0x40);
char oldop = setop(0);
char oldsr = setsr(0);
char oldmask = selectmask();
const char *cdat = image->data;
u32 dx = image->dx;
char __iomem *where;
int y;
dx /= 4;
where = info->screen_base + dx + image->dy * info->fix.line_length;
setmask(0xff);
writeb(image->bg_color, where);
readb(where);
selectmask();
setmask(image->fg_color ^ image->bg_color);
setmode(0x42);
setop(0x18);
for (y = 0; y < image->height; y++, where += info->fix.line_length)
writew(transl_h[cdat[y]&0xF] | transl_l[cdat[y] >> 4], where);
setmask(oldmask);
setsr(oldsr);
setop(oldop);
setmode(oldmode);
setindex(oldindex);
}
static void vga_imageblit_expand(struct fb_info *info, const struct fb_image *image)
{
char __iomem *where = info->screen_base + (image->dx/8) +
image->dy * info->fix.line_length;
struct vga16fb_par *par = info->par;
char *cdat = (char *) image->data;
char __iomem *dst;
int x, y;
switch (info->fix.type) {
case FB_TYPE_VGA_PLANES:
if (info->fix.type_aux == FB_AUX_VGA_PLANES_VGA4) {
if (par->isVGA) {
setmode(2);
setop(0);
setsr(0xf);
setcolor(image->fg_color);
selectmask();
setmask(0xff);
writeb(image->bg_color, where);
rmb();
readb(where); /* fill latches */
setmode(3);
wmb();
for (y = 0; y < image->height; y++) {
dst = where;
for (x = image->width/8; x--;)
writeb(*cdat++, dst++);
where += info->fix.line_length;
}
wmb();
} else {
setmode(0);
setop(0);
setsr(0xf);
setcolor(image->bg_color);
selectmask();
setmask(0xff);
for (y = 0; y < image->height; y++) {
dst = where;
for (x=image->width/8; x--;){
rmw(dst);
setcolor(image->fg_color);
selectmask();
if (*cdat) {
setmask(*cdat++);
rmw(dst++);
}
}
where += info->fix.line_length;
}
}
} else
vga_8planes_imageblit(info, image);
break;
case FB_TYPE_PACKED_PIXELS:
default:
cfb_imageblit(info, image);
break;
}
}
static void vga_imageblit_color(struct fb_info *info, const struct fb_image *image)
{
/*
* Draw logo
*/
struct vga16fb_par *par = info->par;
char __iomem *where =
info->screen_base + image->dy * info->fix.line_length +
image->dx/8;
const char *cdat = image->data;
char __iomem *dst;
int x, y;
switch (info->fix.type) {
case FB_TYPE_VGA_PLANES:
if (info->fix.type_aux == FB_AUX_VGA_PLANES_VGA4 &&
par->isVGA) {
setsr(0xf);
setop(0);
setmode(0);
for (y = 0; y < image->height; y++) {
for (x = 0; x < image->width; x++) {
dst = where + x/8;
setcolor(*cdat);
selectmask();
setmask(1 << (7 - (x % 8)));
fb_readb(dst);
fb_writeb(0, dst);
cdat++;
}
where += info->fix.line_length;
}
}
break;
case FB_TYPE_PACKED_PIXELS:
cfb_imageblit(info, image);
break;
default:
break;
}
}
static void vga16fb_imageblit(struct fb_info *info, const struct fb_image *image)
{
if (image->depth == 1)
vga_imageblit_expand(info, image);
else
vga_imageblit_color(info, image);
}
static void vga16fb_destroy(struct fb_info *info)
{
struct platform_device *dev = container_of(info->device, struct platform_device, dev);
iounmap(info->screen_base);
fb_dealloc_cmap(&info->cmap);
/* XXX unshare VGA regions */
platform_set_drvdata(dev, NULL);
framebuffer_release(info);
}
static struct fb_ops vga16fb_ops = {
.owner = THIS_MODULE,
.fb_open = vga16fb_open,
.fb_release = vga16fb_release,
.fb_destroy = vga16fb_destroy,
.fb_check_var = vga16fb_check_var,
.fb_set_par = vga16fb_set_par,
.fb_setcolreg = vga16fb_setcolreg,
.fb_pan_display = vga16fb_pan_display,
.fb_blank = vga16fb_blank,
.fb_fillrect = vga16fb_fillrect,
.fb_copyarea = vga16fb_copyarea,
.fb_imageblit = vga16fb_imageblit,
};
#ifndef MODULE
static int __init vga16fb_setup(char *options)
{
char *this_opt;
if (!options || !*options)
return 0;
while ((this_opt = strsep(&options, ",")) != NULL) {
if (!*this_opt) continue;
}
return 0;
}
#endif
static int __devinit vga16fb_probe(struct platform_device *dev)
{
struct fb_info *info;
struct vga16fb_par *par;
int i;
int ret = 0;
printk(KERN_DEBUG "vga16fb: initializing\n");
info = framebuffer_alloc(sizeof(struct vga16fb_par), &dev->dev);
if (!info) {
ret = -ENOMEM;
goto err_fb_alloc;
}
info->apertures = alloc_apertures(1);
if (!info->apertures) {
ret = -ENOMEM;
goto err_ioremap;
}
/* XXX share VGA_FB_PHYS and I/O region with vgacon and others */
info->screen_base = (void __iomem *)VGA_MAP_MEM(VGA_FB_PHYS, 0);
if (!info->screen_base) {
printk(KERN_ERR "vga16fb: unable to map device\n");
ret = -ENOMEM;
goto err_ioremap;
}
printk(KERN_INFO "vga16fb: mapped to 0x%p\n", info->screen_base);
par = info->par;
par->isVGA = screen_info.orig_video_isVGA;
par->palette_blanked = 0;
par->vesa_blanked = 0;
i = par->isVGA? 6 : 2;
vga16fb_defined.red.length = i;
vga16fb_defined.green.length = i;
vga16fb_defined.blue.length = i;
/* name should not depend on EGA/VGA */
info->fbops = &vga16fb_ops;
info->var = vga16fb_defined;
info->fix = vga16fb_fix;
/* supports rectangles with widths of multiples of 8 */
info->pixmap.blit_x = 1 << 7 | 1 << 15 | 1 << 23 | 1 << 31;
info->flags = FBINFO_FLAG_DEFAULT | FBINFO_MISC_FIRMWARE |
FBINFO_HWACCEL_YPAN;
i = (info->var.bits_per_pixel == 8) ? 256 : 16;
ret = fb_alloc_cmap(&info->cmap, i, 0);
if (ret) {
printk(KERN_ERR "vga16fb: unable to allocate colormap\n");
ret = -ENOMEM;
goto err_alloc_cmap;
}
if (vga16fb_check_var(&info->var, info)) {
printk(KERN_ERR "vga16fb: unable to validate variable\n");
ret = -EINVAL;
goto err_check_var;
}
vga16fb_update_fix(info);
info->apertures->ranges[0].base = VGA_FB_PHYS;
info->apertures->ranges[0].size = VGA_FB_PHYS_LEN;
if (register_framebuffer(info) < 0) {
printk(KERN_ERR "vga16fb: unable to register framebuffer\n");
ret = -EINVAL;
goto err_check_var;
}
printk(KERN_INFO "fb%d: %s frame buffer device\n",
info->node, info->fix.id);
platform_set_drvdata(dev, info);
return 0;
err_check_var:
fb_dealloc_cmap(&info->cmap);
err_alloc_cmap:
iounmap(info->screen_base);
err_ioremap:
framebuffer_release(info);
err_fb_alloc:
return ret;
}
static int __devexit vga16fb_remove(struct platform_device *dev)
{
struct fb_info *info = platform_get_drvdata(dev);
if (info)
unregister_framebuffer(info);
return 0;
}
static struct platform_driver vga16fb_driver = {
.probe = vga16fb_probe,
.remove = __devexit_p(vga16fb_remove),
.driver = {
.name = "vga16fb",
},
};
static struct platform_device *vga16fb_device;
static int __init vga16fb_init(void)
{
int ret;
#ifndef MODULE
char *option = NULL;
if (fb_get_options("vga16fb", &option))
return -ENODEV;
vga16fb_setup(option);
#endif
ret = platform_driver_register(&vga16fb_driver);
if (!ret) {
vga16fb_device = platform_device_alloc("vga16fb", 0);
if (vga16fb_device)
ret = platform_device_add(vga16fb_device);
else
ret = -ENOMEM;
if (ret) {
platform_device_put(vga16fb_device);
platform_driver_unregister(&vga16fb_driver);
}
}
return ret;
}
static void __exit vga16fb_exit(void)
{
platform_device_unregister(vga16fb_device);
platform_driver_unregister(&vga16fb_driver);
}
MODULE_DESCRIPTION("Legacy VGA framebuffer device driver");
MODULE_LICENSE("GPL");
module_init(vga16fb_init);
module_exit(vga16fb_exit);
/*
* Overrides for Emacs so that we follow Linus's tabbing style.
* ---------------------------------------------------------------------------
* Local variables:
* c-basic-offset: 8
* End:
*/
| gpl-2.0 |
thewisenerd/android_kernel_xiaomi_armani | drivers/video/imsttfb.c | 5239 | 44531 | /*
* drivers/video/imsttfb.c -- frame buffer device for IMS TwinTurbo
*
* This file is derived from the powermac console "imstt" driver:
* Copyright (C) 1997 Sigurdur Asgeirsson
* With additional hacking by Jeffrey Kuskin (jsk@mojave.stanford.edu)
* Modified by Danilo Beuche 1998
* Some register values added by Damien Doligez, INRIA Rocquencourt
* Various cleanups by Paul Mundt (lethal@chaoticdreams.org)
*
* This file was written by Ryan Nielsen (ran@krazynet.com)
* Most of the frame buffer device stuff was copied from atyfb.c
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file COPYING in the main directory of this archive for
* more details.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/mm.h>
#include <linux/vmalloc.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/fb.h>
#include <linux/init.h>
#include <linux/pci.h>
#include <asm/io.h>
#include <linux/uaccess.h>
#if defined(CONFIG_PPC)
#include <linux/nvram.h>
#include <asm/prom.h>
#include <asm/pci-bridge.h>
#include "macmodes.h"
#endif
#ifndef __powerpc__
#define eieio() /* Enforce In-order Execution of I/O */
#endif
/* TwinTurbo (Cosmo) registers */
enum {
S1SA = 0, /* 0x00 */
S2SA = 1, /* 0x04 */
SP = 2, /* 0x08 */
DSA = 3, /* 0x0C */
CNT = 4, /* 0x10 */
DP_OCTL = 5, /* 0x14 */
CLR = 6, /* 0x18 */
BI = 8, /* 0x20 */
MBC = 9, /* 0x24 */
BLTCTL = 10, /* 0x28 */
/* Scan Timing Generator Registers */
HES = 12, /* 0x30 */
HEB = 13, /* 0x34 */
HSB = 14, /* 0x38 */
HT = 15, /* 0x3C */
VES = 16, /* 0x40 */
VEB = 17, /* 0x44 */
VSB = 18, /* 0x48 */
VT = 19, /* 0x4C */
HCIV = 20, /* 0x50 */
VCIV = 21, /* 0x54 */
TCDR = 22, /* 0x58 */
VIL = 23, /* 0x5C */
STGCTL = 24, /* 0x60 */
/* Screen Refresh Generator Registers */
SSR = 25, /* 0x64 */
HRIR = 26, /* 0x68 */
SPR = 27, /* 0x6C */
CMR = 28, /* 0x70 */
SRGCTL = 29, /* 0x74 */
/* RAM Refresh Generator Registers */
RRCIV = 30, /* 0x78 */
RRSC = 31, /* 0x7C */
RRCR = 34, /* 0x88 */
/* System Registers */
GIOE = 32, /* 0x80 */
GIO = 33, /* 0x84 */
SCR = 35, /* 0x8C */
SSTATUS = 36, /* 0x90 */
PRC = 37, /* 0x94 */
#if 0
/* PCI Registers */
DVID = 0x00000000L,
SC = 0x00000004L,
CCR = 0x00000008L,
OG = 0x0000000CL,
BARM = 0x00000010L,
BARER = 0x00000030L,
#endif
};
/* IBM 624 RAMDAC Direct Registers */
enum {
PADDRW = 0x00,
PDATA = 0x04,
PPMASK = 0x08,
PADDRR = 0x0c,
PIDXLO = 0x10,
PIDXHI = 0x14,
PIDXDATA= 0x18,
PIDXCTL = 0x1c
};
/* IBM 624 RAMDAC Indirect Registers */
enum {
CLKCTL = 0x02, /* (0x01) Miscellaneous Clock Control */
SYNCCTL = 0x03, /* (0x00) Sync Control */
HSYNCPOS = 0x04, /* (0x00) Horizontal Sync Position */
PWRMNGMT = 0x05, /* (0x00) Power Management */
DACOP = 0x06, /* (0x02) DAC Operation */
PALETCTL = 0x07, /* (0x00) Palette Control */
SYSCLKCTL = 0x08, /* (0x01) System Clock Control */
PIXFMT = 0x0a, /* () Pixel Format [bpp >> 3 + 2] */
BPP8 = 0x0b, /* () 8 Bits/Pixel Control */
BPP16 = 0x0c, /* () 16 Bits/Pixel Control [bit 1=1 for 565] */
BPP24 = 0x0d, /* () 24 Bits/Pixel Control */
BPP32 = 0x0e, /* () 32 Bits/Pixel Control */
PIXCTL1 = 0x10, /* (0x05) Pixel PLL Control 1 */
PIXCTL2 = 0x11, /* (0x00) Pixel PLL Control 2 */
SYSCLKN = 0x15, /* () System Clock N (System PLL Reference Divider) */
SYSCLKM = 0x16, /* () System Clock M (System PLL VCO Divider) */
SYSCLKP = 0x17, /* () System Clock P */
SYSCLKC = 0x18, /* () System Clock C */
/*
* Dot clock rate is 20MHz * (m + 1) / ((n + 1) * (p ? 2 * p : 1)
* c is charge pump bias which depends on the VCO frequency
*/
PIXM0 = 0x20, /* () Pixel M 0 */
PIXN0 = 0x21, /* () Pixel N 0 */
PIXP0 = 0x22, /* () Pixel P 0 */
PIXC0 = 0x23, /* () Pixel C 0 */
CURSCTL = 0x30, /* (0x00) Cursor Control */
CURSXLO = 0x31, /* () Cursor X position, low 8 bits */
CURSXHI = 0x32, /* () Cursor X position, high 8 bits */
CURSYLO = 0x33, /* () Cursor Y position, low 8 bits */
CURSYHI = 0x34, /* () Cursor Y position, high 8 bits */
CURSHOTX = 0x35, /* () Cursor Hot Spot X */
CURSHOTY = 0x36, /* () Cursor Hot Spot Y */
CURSACCTL = 0x37, /* () Advanced Cursor Control Enable */
CURSACATTR = 0x38, /* () Advanced Cursor Attribute */
CURS1R = 0x40, /* () Cursor 1 Red */
CURS1G = 0x41, /* () Cursor 1 Green */
CURS1B = 0x42, /* () Cursor 1 Blue */
CURS2R = 0x43, /* () Cursor 2 Red */
CURS2G = 0x44, /* () Cursor 2 Green */
CURS2B = 0x45, /* () Cursor 2 Blue */
CURS3R = 0x46, /* () Cursor 3 Red */
CURS3G = 0x47, /* () Cursor 3 Green */
CURS3B = 0x48, /* () Cursor 3 Blue */
BORDR = 0x60, /* () Border Color Red */
BORDG = 0x61, /* () Border Color Green */
BORDB = 0x62, /* () Border Color Blue */
MISCTL1 = 0x70, /* (0x00) Miscellaneous Control 1 */
MISCTL2 = 0x71, /* (0x00) Miscellaneous Control 2 */
MISCTL3 = 0x72, /* (0x00) Miscellaneous Control 3 */
KEYCTL = 0x78 /* (0x00) Key Control/DB Operation */
};
/* TI TVP 3030 RAMDAC Direct Registers */
enum {
TVPADDRW = 0x00, /* 0 Palette/Cursor RAM Write Address/Index */
TVPPDATA = 0x04, /* 1 Palette Data RAM Data */
TVPPMASK = 0x08, /* 2 Pixel Read-Mask */
TVPPADRR = 0x0c, /* 3 Palette/Cursor RAM Read Address */
TVPCADRW = 0x10, /* 4 Cursor/Overscan Color Write Address */
TVPCDATA = 0x14, /* 5 Cursor/Overscan Color Data */
/* 6 reserved */
TVPCADRR = 0x1c, /* 7 Cursor/Overscan Color Read Address */
/* 8 reserved */
TVPDCCTL = 0x24, /* 9 Direct Cursor Control */
TVPIDATA = 0x28, /* 10 Index Data */
TVPCRDAT = 0x2c, /* 11 Cursor RAM Data */
TVPCXPOL = 0x30, /* 12 Cursor-Position X LSB */
TVPCXPOH = 0x34, /* 13 Cursor-Position X MSB */
TVPCYPOL = 0x38, /* 14 Cursor-Position Y LSB */
TVPCYPOH = 0x3c, /* 15 Cursor-Position Y MSB */
};
/* TI TVP 3030 RAMDAC Indirect Registers */
enum {
TVPIRREV = 0x01, /* Silicon Revision [RO] */
TVPIRICC = 0x06, /* Indirect Cursor Control (0x00) */
TVPIRBRC = 0x07, /* Byte Router Control (0xe4) */
TVPIRLAC = 0x0f, /* Latch Control (0x06) */
TVPIRTCC = 0x18, /* True Color Control (0x80) */
TVPIRMXC = 0x19, /* Multiplex Control (0x98) */
TVPIRCLS = 0x1a, /* Clock Selection (0x07) */
TVPIRPPG = 0x1c, /* Palette Page (0x00) */
TVPIRGEC = 0x1d, /* General Control (0x00) */
TVPIRMIC = 0x1e, /* Miscellaneous Control (0x00) */
TVPIRPLA = 0x2c, /* PLL Address */
TVPIRPPD = 0x2d, /* Pixel Clock PLL Data */
TVPIRMPD = 0x2e, /* Memory Clock PLL Data */
TVPIRLPD = 0x2f, /* Loop Clock PLL Data */
TVPIRCKL = 0x30, /* Color-Key Overlay Low */
TVPIRCKH = 0x31, /* Color-Key Overlay High */
TVPIRCRL = 0x32, /* Color-Key Red Low */
TVPIRCRH = 0x33, /* Color-Key Red High */
TVPIRCGL = 0x34, /* Color-Key Green Low */
TVPIRCGH = 0x35, /* Color-Key Green High */
TVPIRCBL = 0x36, /* Color-Key Blue Low */
TVPIRCBH = 0x37, /* Color-Key Blue High */
TVPIRCKC = 0x38, /* Color-Key Control (0x00) */
TVPIRMLC = 0x39, /* MCLK/Loop Clock Control (0x18) */
TVPIRSEN = 0x3a, /* Sense Test (0x00) */
TVPIRTMD = 0x3b, /* Test Mode Data */
TVPIRRML = 0x3c, /* CRC Remainder LSB [RO] */
TVPIRRMM = 0x3d, /* CRC Remainder MSB [RO] */
TVPIRRMS = 0x3e, /* CRC Bit Select [WO] */
TVPIRDID = 0x3f, /* Device ID [RO] (0x30) */
TVPIRRES = 0xff /* Software Reset [WO] */
};
struct initvalues {
__u8 addr, value;
};
static struct initvalues ibm_initregs[] __devinitdata = {
{ CLKCTL, 0x21 },
{ SYNCCTL, 0x00 },
{ HSYNCPOS, 0x00 },
{ PWRMNGMT, 0x00 },
{ DACOP, 0x02 },
{ PALETCTL, 0x00 },
{ SYSCLKCTL, 0x01 },
/*
* Note that colors in X are correct only if all video data is
* passed through the palette in the DAC. That is, "indirect
* color" must be configured. This is the case for the IBM DAC
* used in the 2MB and 4MB cards, at least.
*/
{ BPP8, 0x00 },
{ BPP16, 0x01 },
{ BPP24, 0x00 },
{ BPP32, 0x00 },
{ PIXCTL1, 0x05 },
{ PIXCTL2, 0x00 },
{ SYSCLKN, 0x08 },
{ SYSCLKM, 0x4f },
{ SYSCLKP, 0x00 },
{ SYSCLKC, 0x00 },
{ CURSCTL, 0x00 },
{ CURSACCTL, 0x01 },
{ CURSACATTR, 0xa8 },
{ CURS1R, 0xff },
{ CURS1G, 0xff },
{ CURS1B, 0xff },
{ CURS2R, 0xff },
{ CURS2G, 0xff },
{ CURS2B, 0xff },
{ CURS3R, 0xff },
{ CURS3G, 0xff },
{ CURS3B, 0xff },
{ BORDR, 0xff },
{ BORDG, 0xff },
{ BORDB, 0xff },
{ MISCTL1, 0x01 },
{ MISCTL2, 0x45 },
{ MISCTL3, 0x00 },
{ KEYCTL, 0x00 }
};
static struct initvalues tvp_initregs[] __devinitdata = {
{ TVPIRICC, 0x00 },
{ TVPIRBRC, 0xe4 },
{ TVPIRLAC, 0x06 },
{ TVPIRTCC, 0x80 },
{ TVPIRMXC, 0x4d },
{ TVPIRCLS, 0x05 },
{ TVPIRPPG, 0x00 },
{ TVPIRGEC, 0x00 },
{ TVPIRMIC, 0x08 },
{ TVPIRCKL, 0xff },
{ TVPIRCKH, 0xff },
{ TVPIRCRL, 0xff },
{ TVPIRCRH, 0xff },
{ TVPIRCGL, 0xff },
{ TVPIRCGH, 0xff },
{ TVPIRCBL, 0xff },
{ TVPIRCBH, 0xff },
{ TVPIRCKC, 0x00 },
{ TVPIRPLA, 0x00 },
{ TVPIRPPD, 0xc0 },
{ TVPIRPPD, 0xd5 },
{ TVPIRPPD, 0xea },
{ TVPIRPLA, 0x00 },
{ TVPIRMPD, 0xb9 },
{ TVPIRMPD, 0x3a },
{ TVPIRMPD, 0xb1 },
{ TVPIRPLA, 0x00 },
{ TVPIRLPD, 0xc1 },
{ TVPIRLPD, 0x3d },
{ TVPIRLPD, 0xf3 },
};
struct imstt_regvals {
__u32 pitch;
__u16 hes, heb, hsb, ht, ves, veb, vsb, vt, vil;
__u8 pclk_m, pclk_n, pclk_p;
/* Values of the tvp which change depending on colormode x resolution */
__u8 mlc[3]; /* Memory Loop Config 0x39 */
__u8 lckl_p[3]; /* P value of LCKL PLL */
};
struct imstt_par {
struct imstt_regvals init;
__u32 __iomem *dc_regs;
unsigned long cmap_regs_phys;
__u8 *cmap_regs;
__u32 ramdac;
__u32 palette[16];
};
enum {
IBM = 0,
TVP = 1
};
#define USE_NV_MODES 1
#define INIT_BPP 8
#define INIT_XRES 640
#define INIT_YRES 480
static int inverse = 0;
static char fontname[40] __initdata = { 0 };
#if defined(CONFIG_PPC)
static signed char init_vmode __devinitdata = -1, init_cmode __devinitdata = -1;
#endif
static struct imstt_regvals tvp_reg_init_2 = {
512,
0x0002, 0x0006, 0x0026, 0x0028, 0x0003, 0x0016, 0x0196, 0x0197, 0x0196,
0xec, 0x2a, 0xf3,
{ 0x3c, 0x3b, 0x39 }, { 0xf3, 0xf3, 0xf3 }
};
static struct imstt_regvals tvp_reg_init_6 = {
640,
0x0004, 0x0009, 0x0031, 0x0036, 0x0003, 0x002a, 0x020a, 0x020d, 0x020a,
0xef, 0x2e, 0xb2,
{ 0x39, 0x39, 0x38 }, { 0xf3, 0xf3, 0xf3 }
};
static struct imstt_regvals tvp_reg_init_12 = {
800,
0x0005, 0x000e, 0x0040, 0x0042, 0x0003, 0x018, 0x270, 0x271, 0x270,
0xf6, 0x2e, 0xf2,
{ 0x3a, 0x39, 0x38 }, { 0xf3, 0xf3, 0xf3 }
};
static struct imstt_regvals tvp_reg_init_13 = {
832,
0x0004, 0x0011, 0x0045, 0x0048, 0x0003, 0x002a, 0x029a, 0x029b, 0x0000,
0xfe, 0x3e, 0xf1,
{ 0x39, 0x38, 0x38 }, { 0xf3, 0xf3, 0xf2 }
};
static struct imstt_regvals tvp_reg_init_17 = {
1024,
0x0006, 0x0210, 0x0250, 0x0053, 0x1003, 0x0021, 0x0321, 0x0324, 0x0000,
0xfc, 0x3a, 0xf1,
{ 0x39, 0x38, 0x38 }, { 0xf3, 0xf3, 0xf2 }
};
static struct imstt_regvals tvp_reg_init_18 = {
1152,
0x0009, 0x0011, 0x059, 0x5b, 0x0003, 0x0031, 0x0397, 0x039a, 0x0000,
0xfd, 0x3a, 0xf1,
{ 0x39, 0x38, 0x38 }, { 0xf3, 0xf3, 0xf2 }
};
static struct imstt_regvals tvp_reg_init_19 = {
1280,
0x0009, 0x0016, 0x0066, 0x0069, 0x0003, 0x0027, 0x03e7, 0x03e8, 0x03e7,
0xf7, 0x36, 0xf0,
{ 0x38, 0x38, 0x38 }, { 0xf3, 0xf2, 0xf1 }
};
static struct imstt_regvals tvp_reg_init_20 = {
1280,
0x0009, 0x0018, 0x0068, 0x006a, 0x0003, 0x0029, 0x0429, 0x042a, 0x0000,
0xf0, 0x2d, 0xf0,
{ 0x38, 0x38, 0x38 }, { 0xf3, 0xf2, 0xf1 }
};
/*
* PCI driver prototypes
*/
static int imsttfb_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
static void imsttfb_remove(struct pci_dev *pdev);
/*
* Register access
*/
static inline u32 read_reg_le32(volatile u32 __iomem *base, int regindex)
{
#ifdef __powerpc__
return in_le32(base + regindex);
#else
return readl(base + regindex);
#endif
}
static inline void write_reg_le32(volatile u32 __iomem *base, int regindex, u32 val)
{
#ifdef __powerpc__
out_le32(base + regindex, val);
#else
writel(val, base + regindex);
#endif
}
static __u32
getclkMHz(struct imstt_par *par)
{
__u32 clk_m, clk_n, clk_p;
clk_m = par->init.pclk_m;
clk_n = par->init.pclk_n;
clk_p = par->init.pclk_p;
return 20 * (clk_m + 1) / ((clk_n + 1) * (clk_p ? 2 * clk_p : 1));
}
static void
setclkMHz(struct imstt_par *par, __u32 MHz)
{
__u32 clk_m, clk_n, x, stage, spilled;
clk_m = clk_n = 0;
stage = spilled = 0;
for (;;) {
switch (stage) {
case 0:
clk_m++;
break;
case 1:
clk_n++;
break;
}
x = 20 * (clk_m + 1) / (clk_n + 1);
if (x == MHz)
break;
if (x > MHz) {
spilled = 1;
stage = 1;
} else if (spilled && x < MHz) {
stage = 0;
}
}
par->init.pclk_m = clk_m;
par->init.pclk_n = clk_n;
par->init.pclk_p = 0;
}
static struct imstt_regvals *
compute_imstt_regvals_ibm(struct imstt_par *par, int xres, int yres)
{
struct imstt_regvals *init = &par->init;
__u32 MHz, hes, heb, veb, htp, vtp;
switch (xres) {
case 640:
hes = 0x0008; heb = 0x0012; veb = 0x002a; htp = 10; vtp = 2;
MHz = 30 /* .25 */ ;
break;
case 832:
hes = 0x0005; heb = 0x0020; veb = 0x0028; htp = 8; vtp = 3;
MHz = 57 /* .27_ */ ;
break;
case 1024:
hes = 0x000a; heb = 0x001c; veb = 0x0020; htp = 8; vtp = 3;
MHz = 80;
break;
case 1152:
hes = 0x0012; heb = 0x0022; veb = 0x0031; htp = 4; vtp = 3;
MHz = 101 /* .6_ */ ;
break;
case 1280:
hes = 0x0012; heb = 0x002f; veb = 0x0029; htp = 4; vtp = 1;
MHz = yres == 960 ? 126 : 135;
break;
case 1600:
hes = 0x0018; heb = 0x0040; veb = 0x002a; htp = 4; vtp = 3;
MHz = 200;
break;
default:
return NULL;
}
setclkMHz(par, MHz);
init->hes = hes;
init->heb = heb;
init->hsb = init->heb + (xres >> 3);
init->ht = init->hsb + htp;
init->ves = 0x0003;
init->veb = veb;
init->vsb = init->veb + yres;
init->vt = init->vsb + vtp;
init->vil = init->vsb;
init->pitch = xres;
return init;
}
static struct imstt_regvals *
compute_imstt_regvals_tvp(struct imstt_par *par, int xres, int yres)
{
struct imstt_regvals *init;
switch (xres) {
case 512:
init = &tvp_reg_init_2;
break;
case 640:
init = &tvp_reg_init_6;
break;
case 800:
init = &tvp_reg_init_12;
break;
case 832:
init = &tvp_reg_init_13;
break;
case 1024:
init = &tvp_reg_init_17;
break;
case 1152:
init = &tvp_reg_init_18;
break;
case 1280:
init = yres == 960 ? &tvp_reg_init_19 : &tvp_reg_init_20;
break;
default:
return NULL;
}
par->init = *init;
return init;
}
static struct imstt_regvals *
compute_imstt_regvals (struct imstt_par *par, u_int xres, u_int yres)
{
if (par->ramdac == IBM)
return compute_imstt_regvals_ibm(par, xres, yres);
else
return compute_imstt_regvals_tvp(par, xres, yres);
}
static void
set_imstt_regvals_ibm (struct imstt_par *par, u_int bpp)
{
struct imstt_regvals *init = &par->init;
__u8 pformat = (bpp >> 3) + 2;
par->cmap_regs[PIDXHI] = 0; eieio();
par->cmap_regs[PIDXLO] = PIXM0; eieio();
par->cmap_regs[PIDXDATA] = init->pclk_m;eieio();
par->cmap_regs[PIDXLO] = PIXN0; eieio();
par->cmap_regs[PIDXDATA] = init->pclk_n;eieio();
par->cmap_regs[PIDXLO] = PIXP0; eieio();
par->cmap_regs[PIDXDATA] = init->pclk_p;eieio();
par->cmap_regs[PIDXLO] = PIXC0; eieio();
par->cmap_regs[PIDXDATA] = 0x02; eieio();
par->cmap_regs[PIDXLO] = PIXFMT; eieio();
par->cmap_regs[PIDXDATA] = pformat; eieio();
}
static void
set_imstt_regvals_tvp (struct imstt_par *par, u_int bpp)
{
struct imstt_regvals *init = &par->init;
__u8 tcc, mxc, lckl_n, mic;
__u8 mlc, lckl_p;
switch (bpp) {
default:
case 8:
tcc = 0x80;
mxc = 0x4d;
lckl_n = 0xc1;
mlc = init->mlc[0];
lckl_p = init->lckl_p[0];
break;
case 16:
tcc = 0x44;
mxc = 0x55;
lckl_n = 0xe1;
mlc = init->mlc[1];
lckl_p = init->lckl_p[1];
break;
case 24:
tcc = 0x5e;
mxc = 0x5d;
lckl_n = 0xf1;
mlc = init->mlc[2];
lckl_p = init->lckl_p[2];
break;
case 32:
tcc = 0x46;
mxc = 0x5d;
lckl_n = 0xf1;
mlc = init->mlc[2];
lckl_p = init->lckl_p[2];
break;
}
mic = 0x08;
par->cmap_regs[TVPADDRW] = TVPIRPLA; eieio();
par->cmap_regs[TVPIDATA] = 0x00; eieio();
par->cmap_regs[TVPADDRW] = TVPIRPPD; eieio();
par->cmap_regs[TVPIDATA] = init->pclk_m; eieio();
par->cmap_regs[TVPADDRW] = TVPIRPPD; eieio();
par->cmap_regs[TVPIDATA] = init->pclk_n; eieio();
par->cmap_regs[TVPADDRW] = TVPIRPPD; eieio();
par->cmap_regs[TVPIDATA] = init->pclk_p; eieio();
par->cmap_regs[TVPADDRW] = TVPIRTCC; eieio();
par->cmap_regs[TVPIDATA] = tcc; eieio();
par->cmap_regs[TVPADDRW] = TVPIRMXC; eieio();
par->cmap_regs[TVPIDATA] = mxc; eieio();
par->cmap_regs[TVPADDRW] = TVPIRMIC; eieio();
par->cmap_regs[TVPIDATA] = mic; eieio();
par->cmap_regs[TVPADDRW] = TVPIRPLA; eieio();
par->cmap_regs[TVPIDATA] = 0x00; eieio();
par->cmap_regs[TVPADDRW] = TVPIRLPD; eieio();
par->cmap_regs[TVPIDATA] = lckl_n; eieio();
par->cmap_regs[TVPADDRW] = TVPIRPLA; eieio();
par->cmap_regs[TVPIDATA] = 0x15; eieio();
par->cmap_regs[TVPADDRW] = TVPIRMLC; eieio();
par->cmap_regs[TVPIDATA] = mlc; eieio();
par->cmap_regs[TVPADDRW] = TVPIRPLA; eieio();
par->cmap_regs[TVPIDATA] = 0x2a; eieio();
par->cmap_regs[TVPADDRW] = TVPIRLPD; eieio();
par->cmap_regs[TVPIDATA] = lckl_p; eieio();
}
static void
set_imstt_regvals (struct fb_info *info, u_int bpp)
{
struct imstt_par *par = info->par;
struct imstt_regvals *init = &par->init;
__u32 ctl, pitch, byteswap, scr;
if (par->ramdac == IBM)
set_imstt_regvals_ibm(par, bpp);
else
set_imstt_regvals_tvp(par, bpp);
/*
* From what I (jsk) can gather poking around with MacsBug,
* bits 8 and 9 in the SCR register control endianness
* correction (byte swapping). These bits must be set according
* to the color depth as follows:
* Color depth Bit 9 Bit 8
* ========== ===== =====
* 8bpp 0 0
* 16bpp 0 1
* 32bpp 1 1
*/
switch (bpp) {
default:
case 8:
ctl = 0x17b1;
pitch = init->pitch >> 2;
byteswap = 0x000;
break;
case 16:
ctl = 0x17b3;
pitch = init->pitch >> 1;
byteswap = 0x100;
break;
case 24:
ctl = 0x17b9;
pitch = init->pitch - (init->pitch >> 2);
byteswap = 0x200;
break;
case 32:
ctl = 0x17b5;
pitch = init->pitch;
byteswap = 0x300;
break;
}
if (par->ramdac == TVP)
ctl -= 0x30;
write_reg_le32(par->dc_regs, HES, init->hes);
write_reg_le32(par->dc_regs, HEB, init->heb);
write_reg_le32(par->dc_regs, HSB, init->hsb);
write_reg_le32(par->dc_regs, HT, init->ht);
write_reg_le32(par->dc_regs, VES, init->ves);
write_reg_le32(par->dc_regs, VEB, init->veb);
write_reg_le32(par->dc_regs, VSB, init->vsb);
write_reg_le32(par->dc_regs, VT, init->vt);
write_reg_le32(par->dc_regs, VIL, init->vil);
write_reg_le32(par->dc_regs, HCIV, 1);
write_reg_le32(par->dc_regs, VCIV, 1);
write_reg_le32(par->dc_regs, TCDR, 4);
write_reg_le32(par->dc_regs, RRCIV, 1);
write_reg_le32(par->dc_regs, RRSC, 0x980);
write_reg_le32(par->dc_regs, RRCR, 0x11);
if (par->ramdac == IBM) {
write_reg_le32(par->dc_regs, HRIR, 0x0100);
write_reg_le32(par->dc_regs, CMR, 0x00ff);
write_reg_le32(par->dc_regs, SRGCTL, 0x0073);
} else {
write_reg_le32(par->dc_regs, HRIR, 0x0200);
write_reg_le32(par->dc_regs, CMR, 0x01ff);
write_reg_le32(par->dc_regs, SRGCTL, 0x0003);
}
switch (info->fix.smem_len) {
case 0x200000:
scr = 0x059d | byteswap;
break;
/* case 0x400000:
case 0x800000: */
default:
pitch >>= 1;
scr = 0x150dd | byteswap;
break;
}
write_reg_le32(par->dc_regs, SCR, scr);
write_reg_le32(par->dc_regs, SPR, pitch);
write_reg_le32(par->dc_regs, STGCTL, ctl);
}
static inline void
set_offset (struct fb_var_screeninfo *var, struct fb_info *info)
{
struct imstt_par *par = info->par;
__u32 off = var->yoffset * (info->fix.line_length >> 3)
+ ((var->xoffset * (info->var.bits_per_pixel >> 3)) >> 3);
write_reg_le32(par->dc_regs, SSR, off);
}
static inline void
set_555 (struct imstt_par *par)
{
if (par->ramdac == IBM) {
par->cmap_regs[PIDXHI] = 0; eieio();
par->cmap_regs[PIDXLO] = BPP16; eieio();
par->cmap_regs[PIDXDATA] = 0x01; eieio();
} else {
par->cmap_regs[TVPADDRW] = TVPIRTCC; eieio();
par->cmap_regs[TVPIDATA] = 0x44; eieio();
}
}
static inline void
set_565 (struct imstt_par *par)
{
if (par->ramdac == IBM) {
par->cmap_regs[PIDXHI] = 0; eieio();
par->cmap_regs[PIDXLO] = BPP16; eieio();
par->cmap_regs[PIDXDATA] = 0x03; eieio();
} else {
par->cmap_regs[TVPADDRW] = TVPIRTCC; eieio();
par->cmap_regs[TVPIDATA] = 0x45; eieio();
}
}
static int
imsttfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
{
if ((var->bits_per_pixel != 8 && var->bits_per_pixel != 16
&& var->bits_per_pixel != 24 && var->bits_per_pixel != 32)
|| var->xres_virtual < var->xres || var->yres_virtual < var->yres
|| var->nonstd
|| (var->vmode & FB_VMODE_MASK) != FB_VMODE_NONINTERLACED)
return -EINVAL;
if ((var->xres * var->yres) * (var->bits_per_pixel >> 3) > info->fix.smem_len
|| (var->xres_virtual * var->yres_virtual) * (var->bits_per_pixel >> 3) > info->fix.smem_len)
return -EINVAL;
switch (var->bits_per_pixel) {
case 8:
var->red.offset = 0;
var->red.length = 8;
var->green.offset = 0;
var->green.length = 8;
var->blue.offset = 0;
var->blue.length = 8;
var->transp.offset = 0;
var->transp.length = 0;
break;
case 16: /* RGB 555 or 565 */
if (var->green.length != 6)
var->red.offset = 10;
var->red.length = 5;
var->green.offset = 5;
if (var->green.length != 6)
var->green.length = 5;
var->blue.offset = 0;
var->blue.length = 5;
var->transp.offset = 0;
var->transp.length = 0;
break;
case 24: /* RGB 888 */
var->red.offset = 16;
var->red.length = 8;
var->green.offset = 8;
var->green.length = 8;
var->blue.offset = 0;
var->blue.length = 8;
var->transp.offset = 0;
var->transp.length = 0;
break;
case 32: /* RGBA 8888 */
var->red.offset = 16;
var->red.length = 8;
var->green.offset = 8;
var->green.length = 8;
var->blue.offset = 0;
var->blue.length = 8;
var->transp.offset = 24;
var->transp.length = 8;
break;
}
if (var->yres == var->yres_virtual) {
__u32 vram = (info->fix.smem_len - (PAGE_SIZE << 2));
var->yres_virtual = ((vram << 3) / var->bits_per_pixel) / var->xres_virtual;
if (var->yres_virtual < var->yres)
var->yres_virtual = var->yres;
}
var->red.msb_right = 0;
var->green.msb_right = 0;
var->blue.msb_right = 0;
var->transp.msb_right = 0;
var->height = -1;
var->width = -1;
var->vmode = FB_VMODE_NONINTERLACED;
var->left_margin = var->right_margin = 16;
var->upper_margin = var->lower_margin = 16;
var->hsync_len = var->vsync_len = 8;
return 0;
}
static int
imsttfb_set_par(struct fb_info *info)
{
struct imstt_par *par = info->par;
if (!compute_imstt_regvals(par, info->var.xres, info->var.yres))
return -EINVAL;
if (info->var.green.length == 6)
set_565(par);
else
set_555(par);
set_imstt_regvals(info, info->var.bits_per_pixel);
info->var.pixclock = 1000000 / getclkMHz(par);
return 0;
}
static int
imsttfb_setcolreg (u_int regno, u_int red, u_int green, u_int blue,
u_int transp, struct fb_info *info)
{
struct imstt_par *par = info->par;
u_int bpp = info->var.bits_per_pixel;
if (regno > 255)
return 1;
red >>= 8;
green >>= 8;
blue >>= 8;
/* PADDRW/PDATA are the same as TVPPADDRW/TVPPDATA */
if (0 && bpp == 16) /* screws up X */
par->cmap_regs[PADDRW] = regno << 3;
else
par->cmap_regs[PADDRW] = regno;
eieio();
par->cmap_regs[PDATA] = red; eieio();
par->cmap_regs[PDATA] = green; eieio();
par->cmap_regs[PDATA] = blue; eieio();
if (regno < 16)
switch (bpp) {
case 16:
par->palette[regno] =
(regno << (info->var.green.length ==
5 ? 10 : 11)) | (regno << 5) | regno;
break;
case 24:
par->palette[regno] =
(regno << 16) | (regno << 8) | regno;
break;
case 32: {
int i = (regno << 8) | regno;
par->palette[regno] = (i << 16) |i;
break;
}
}
return 0;
}
static int
imsttfb_pan_display(struct fb_var_screeninfo *var, struct fb_info *info)
{
if (var->xoffset + info->var.xres > info->var.xres_virtual
|| var->yoffset + info->var.yres > info->var.yres_virtual)
return -EINVAL;
info->var.xoffset = var->xoffset;
info->var.yoffset = var->yoffset;
set_offset(var, info);
return 0;
}
static int
imsttfb_blank(int blank, struct fb_info *info)
{
struct imstt_par *par = info->par;
__u32 ctrl;
ctrl = read_reg_le32(par->dc_regs, STGCTL);
if (blank > 0) {
switch (blank) {
case FB_BLANK_NORMAL:
case FB_BLANK_POWERDOWN:
ctrl &= ~0x00000380;
if (par->ramdac == IBM) {
par->cmap_regs[PIDXHI] = 0; eieio();
par->cmap_regs[PIDXLO] = MISCTL2; eieio();
par->cmap_regs[PIDXDATA] = 0x55; eieio();
par->cmap_regs[PIDXLO] = MISCTL1; eieio();
par->cmap_regs[PIDXDATA] = 0x11; eieio();
par->cmap_regs[PIDXLO] = SYNCCTL; eieio();
par->cmap_regs[PIDXDATA] = 0x0f; eieio();
par->cmap_regs[PIDXLO] = PWRMNGMT; eieio();
par->cmap_regs[PIDXDATA] = 0x1f; eieio();
par->cmap_regs[PIDXLO] = CLKCTL; eieio();
par->cmap_regs[PIDXDATA] = 0xc0;
}
break;
case FB_BLANK_VSYNC_SUSPEND:
ctrl &= ~0x00000020;
break;
case FB_BLANK_HSYNC_SUSPEND:
ctrl &= ~0x00000010;
break;
}
} else {
if (par->ramdac == IBM) {
ctrl |= 0x000017b0;
par->cmap_regs[PIDXHI] = 0; eieio();
par->cmap_regs[PIDXLO] = CLKCTL; eieio();
par->cmap_regs[PIDXDATA] = 0x01; eieio();
par->cmap_regs[PIDXLO] = PWRMNGMT; eieio();
par->cmap_regs[PIDXDATA] = 0x00; eieio();
par->cmap_regs[PIDXLO] = SYNCCTL; eieio();
par->cmap_regs[PIDXDATA] = 0x00; eieio();
par->cmap_regs[PIDXLO] = MISCTL1; eieio();
par->cmap_regs[PIDXDATA] = 0x01; eieio();
par->cmap_regs[PIDXLO] = MISCTL2; eieio();
par->cmap_regs[PIDXDATA] = 0x45; eieio();
} else
ctrl |= 0x00001780;
}
write_reg_le32(par->dc_regs, STGCTL, ctrl);
return 0;
}
static void
imsttfb_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
{
struct imstt_par *par = info->par;
__u32 Bpp, line_pitch, bgc, dx, dy, width, height;
bgc = rect->color;
bgc |= (bgc << 8);
bgc |= (bgc << 16);
Bpp = info->var.bits_per_pixel >> 3,
line_pitch = info->fix.line_length;
dy = rect->dy * line_pitch;
dx = rect->dx * Bpp;
height = rect->height;
height--;
width = rect->width * Bpp;
width--;
if (rect->rop == ROP_COPY) {
while(read_reg_le32(par->dc_regs, SSTATUS) & 0x80);
write_reg_le32(par->dc_regs, DSA, dy + dx);
write_reg_le32(par->dc_regs, CNT, (height << 16) | width);
write_reg_le32(par->dc_regs, DP_OCTL, line_pitch);
write_reg_le32(par->dc_regs, BI, 0xffffffff);
write_reg_le32(par->dc_regs, MBC, 0xffffffff);
write_reg_le32(par->dc_regs, CLR, bgc);
write_reg_le32(par->dc_regs, BLTCTL, 0x840); /* 0x200000 */
while(read_reg_le32(par->dc_regs, SSTATUS) & 0x80);
while(read_reg_le32(par->dc_regs, SSTATUS) & 0x40);
} else {
while(read_reg_le32(par->dc_regs, SSTATUS) & 0x80);
write_reg_le32(par->dc_regs, DSA, dy + dx);
write_reg_le32(par->dc_regs, S1SA, dy + dx);
write_reg_le32(par->dc_regs, CNT, (height << 16) | width);
write_reg_le32(par->dc_regs, DP_OCTL, line_pitch);
write_reg_le32(par->dc_regs, SP, line_pitch);
write_reg_le32(par->dc_regs, BLTCTL, 0x40005);
while(read_reg_le32(par->dc_regs, SSTATUS) & 0x80);
while(read_reg_le32(par->dc_regs, SSTATUS) & 0x40);
}
}
static void
imsttfb_copyarea(struct fb_info *info, const struct fb_copyarea *area)
{
struct imstt_par *par = info->par;
__u32 Bpp, line_pitch, fb_offset_old, fb_offset_new, sp, dp_octl;
__u32 cnt, bltctl, sx, sy, dx, dy, height, width;
Bpp = info->var.bits_per_pixel >> 3,
sx = area->sx * Bpp;
sy = area->sy;
dx = area->dx * Bpp;
dy = area->dy;
height = area->height;
height--;
width = area->width * Bpp;
width--;
line_pitch = info->fix.line_length;
bltctl = 0x05;
sp = line_pitch << 16;
cnt = height << 16;
if (sy < dy) {
sy += height;
dy += height;
sp |= -(line_pitch) & 0xffff;
dp_octl = -(line_pitch) & 0xffff;
} else {
sp |= line_pitch;
dp_octl = line_pitch;
}
if (sx < dx) {
sx += width;
dx += width;
bltctl |= 0x80;
cnt |= -(width) & 0xffff;
} else {
cnt |= width;
}
fb_offset_old = sy * line_pitch + sx;
fb_offset_new = dy * line_pitch + dx;
while(read_reg_le32(par->dc_regs, SSTATUS) & 0x80);
write_reg_le32(par->dc_regs, S1SA, fb_offset_old);
write_reg_le32(par->dc_regs, SP, sp);
write_reg_le32(par->dc_regs, DSA, fb_offset_new);
write_reg_le32(par->dc_regs, CNT, cnt);
write_reg_le32(par->dc_regs, DP_OCTL, dp_octl);
write_reg_le32(par->dc_regs, BLTCTL, bltctl);
while(read_reg_le32(par->dc_regs, SSTATUS) & 0x80);
while(read_reg_le32(par->dc_regs, SSTATUS) & 0x40);
}
#if 0
static int
imsttfb_load_cursor_image(struct imstt_par *par, int width, int height, __u8 fgc)
{
u_int x, y;
if (width > 32 || height > 32)
return -EINVAL;
if (par->ramdac == IBM) {
par->cmap_regs[PIDXHI] = 1; eieio();
for (x = 0; x < 0x100; x++) {
par->cmap_regs[PIDXLO] = x; eieio();
par->cmap_regs[PIDXDATA] = 0x00; eieio();
}
par->cmap_regs[PIDXHI] = 1; eieio();
for (y = 0; y < height; y++)
for (x = 0; x < width >> 2; x++) {
par->cmap_regs[PIDXLO] = x + y * 8; eieio();
par->cmap_regs[PIDXDATA] = 0xff; eieio();
}
par->cmap_regs[PIDXHI] = 0; eieio();
par->cmap_regs[PIDXLO] = CURS1R; eieio();
par->cmap_regs[PIDXDATA] = fgc; eieio();
par->cmap_regs[PIDXLO] = CURS1G; eieio();
par->cmap_regs[PIDXDATA] = fgc; eieio();
par->cmap_regs[PIDXLO] = CURS1B; eieio();
par->cmap_regs[PIDXDATA] = fgc; eieio();
par->cmap_regs[PIDXLO] = CURS2R; eieio();
par->cmap_regs[PIDXDATA] = fgc; eieio();
par->cmap_regs[PIDXLO] = CURS2G; eieio();
par->cmap_regs[PIDXDATA] = fgc; eieio();
par->cmap_regs[PIDXLO] = CURS2B; eieio();
par->cmap_regs[PIDXDATA] = fgc; eieio();
par->cmap_regs[PIDXLO] = CURS3R; eieio();
par->cmap_regs[PIDXDATA] = fgc; eieio();
par->cmap_regs[PIDXLO] = CURS3G; eieio();
par->cmap_regs[PIDXDATA] = fgc; eieio();
par->cmap_regs[PIDXLO] = CURS3B; eieio();
par->cmap_regs[PIDXDATA] = fgc; eieio();
} else {
par->cmap_regs[TVPADDRW] = TVPIRICC; eieio();
par->cmap_regs[TVPIDATA] &= 0x03; eieio();
par->cmap_regs[TVPADDRW] = 0; eieio();
for (x = 0; x < 0x200; x++) {
par->cmap_regs[TVPCRDAT] = 0x00; eieio();
}
for (x = 0; x < 0x200; x++) {
par->cmap_regs[TVPCRDAT] = 0xff; eieio();
}
par->cmap_regs[TVPADDRW] = TVPIRICC; eieio();
par->cmap_regs[TVPIDATA] &= 0x03; eieio();
for (y = 0; y < height; y++)
for (x = 0; x < width >> 3; x++) {
par->cmap_regs[TVPADDRW] = x + y * 8; eieio();
par->cmap_regs[TVPCRDAT] = 0xff; eieio();
}
par->cmap_regs[TVPADDRW] = TVPIRICC; eieio();
par->cmap_regs[TVPIDATA] |= 0x08; eieio();
for (y = 0; y < height; y++)
for (x = 0; x < width >> 3; x++) {
par->cmap_regs[TVPADDRW] = x + y * 8; eieio();
par->cmap_regs[TVPCRDAT] = 0xff; eieio();
}
par->cmap_regs[TVPCADRW] = 0x00; eieio();
for (x = 0; x < 12; x++) {
par->cmap_regs[TVPCDATA] = fgc;
eieio();
}
}
return 1;
}
static void
imstt_set_cursor(struct imstt_par *par, struct fb_image *d, int on)
{
if (par->ramdac == IBM) {
par->cmap_regs[PIDXHI] = 0; eieio();
if (!on) {
par->cmap_regs[PIDXLO] = CURSCTL; eieio();
par->cmap_regs[PIDXDATA] = 0x00; eieio();
} else {
par->cmap_regs[PIDXLO] = CURSXHI; eieio();
par->cmap_regs[PIDXDATA] = d->dx >> 8; eieio();
par->cmap_regs[PIDXLO] = CURSXLO; eieio();
par->cmap_regs[PIDXDATA] = d->dx & 0xff;eieio();
par->cmap_regs[PIDXLO] = CURSYHI; eieio();
par->cmap_regs[PIDXDATA] = d->dy >> 8; eieio();
par->cmap_regs[PIDXLO] = CURSYLO; eieio();
par->cmap_regs[PIDXDATA] = d->dy & 0xff;eieio();
par->cmap_regs[PIDXLO] = CURSCTL; eieio();
par->cmap_regs[PIDXDATA] = 0x02; eieio();
}
} else {
if (!on) {
par->cmap_regs[TVPADDRW] = TVPIRICC; eieio();
par->cmap_regs[TVPIDATA] = 0x00; eieio();
} else {
__u16 x = d->dx + 0x40, y = d->dy + 0x40;
par->cmap_regs[TVPCXPOH] = x >> 8; eieio();
par->cmap_regs[TVPCXPOL] = x & 0xff; eieio();
par->cmap_regs[TVPCYPOH] = y >> 8; eieio();
par->cmap_regs[TVPCYPOL] = y & 0xff; eieio();
par->cmap_regs[TVPADDRW] = TVPIRICC; eieio();
par->cmap_regs[TVPIDATA] = 0x02; eieio();
}
}
}
static int
imsttfb_cursor(struct fb_info *info, struct fb_cursor *cursor)
{
struct imstt_par *par = info->par;
u32 flags = cursor->set, fg, bg, xx, yy;
if (cursor->dest == NULL && cursor->rop == ROP_XOR)
return 1;
imstt_set_cursor(info, cursor, 0);
if (flags & FB_CUR_SETPOS) {
xx = cursor->image.dx - info->var.xoffset;
yy = cursor->image.dy - info->var.yoffset;
}
if (flags & FB_CUR_SETSIZE) {
}
if (flags & (FB_CUR_SETSHAPE | FB_CUR_SETCMAP)) {
int fg_idx = cursor->image.fg_color;
int width = (cursor->image.width+7)/8;
u8 *dat = (u8 *) cursor->image.data;
u8 *dst = (u8 *) cursor->dest;
u8 *msk = (u8 *) cursor->mask;
switch (cursor->rop) {
case ROP_XOR:
for (i = 0; i < cursor->image.height; i++) {
for (j = 0; j < width; j++) {
d_idx = i * MAX_CURS/8 + j;
data[d_idx] = byte_rev[dat[s_idx] ^
dst[s_idx]];
mask[d_idx] = byte_rev[msk[s_idx]];
s_idx++;
}
}
break;
case ROP_COPY:
default:
for (i = 0; i < cursor->image.height; i++) {
for (j = 0; j < width; j++) {
d_idx = i * MAX_CURS/8 + j;
data[d_idx] = byte_rev[dat[s_idx]];
mask[d_idx] = byte_rev[msk[s_idx]];
s_idx++;
}
}
break;
}
fg = ((info->cmap.red[fg_idx] & 0xf8) << 7) |
((info->cmap.green[fg_idx] & 0xf8) << 2) |
((info->cmap.blue[fg_idx] & 0xf8) >> 3) | 1 << 15;
imsttfb_load_cursor_image(par, xx, yy, fgc);
}
if (cursor->enable)
imstt_set_cursor(info, cursor, 1);
return 0;
}
#endif
#define FBIMSTT_SETREG 0x545401
#define FBIMSTT_GETREG 0x545402
#define FBIMSTT_SETCMAPREG 0x545403
#define FBIMSTT_GETCMAPREG 0x545404
#define FBIMSTT_SETIDXREG 0x545405
#define FBIMSTT_GETIDXREG 0x545406
static int
imsttfb_ioctl(struct fb_info *info, u_int cmd, u_long arg)
{
struct imstt_par *par = info->par;
void __user *argp = (void __user *)arg;
__u32 reg[2];
__u8 idx[2];
switch (cmd) {
case FBIMSTT_SETREG:
if (copy_from_user(reg, argp, 8) || reg[0] > (0x1000 - sizeof(reg[0])) / sizeof(reg[0]))
return -EFAULT;
write_reg_le32(par->dc_regs, reg[0], reg[1]);
return 0;
case FBIMSTT_GETREG:
if (copy_from_user(reg, argp, 4) || reg[0] > (0x1000 - sizeof(reg[0])) / sizeof(reg[0]))
return -EFAULT;
reg[1] = read_reg_le32(par->dc_regs, reg[0]);
if (copy_to_user((void __user *)(arg + 4), ®[1], 4))
return -EFAULT;
return 0;
case FBIMSTT_SETCMAPREG:
if (copy_from_user(reg, argp, 8) || reg[0] > (0x1000 - sizeof(reg[0])) / sizeof(reg[0]))
return -EFAULT;
write_reg_le32(((u_int __iomem *)par->cmap_regs), reg[0], reg[1]);
return 0;
case FBIMSTT_GETCMAPREG:
if (copy_from_user(reg, argp, 4) || reg[0] > (0x1000 - sizeof(reg[0])) / sizeof(reg[0]))
return -EFAULT;
reg[1] = read_reg_le32(((u_int __iomem *)par->cmap_regs), reg[0]);
if (copy_to_user((void __user *)(arg + 4), ®[1], 4))
return -EFAULT;
return 0;
case FBIMSTT_SETIDXREG:
if (copy_from_user(idx, argp, 2))
return -EFAULT;
par->cmap_regs[PIDXHI] = 0; eieio();
par->cmap_regs[PIDXLO] = idx[0]; eieio();
par->cmap_regs[PIDXDATA] = idx[1]; eieio();
return 0;
case FBIMSTT_GETIDXREG:
if (copy_from_user(idx, argp, 1))
return -EFAULT;
par->cmap_regs[PIDXHI] = 0; eieio();
par->cmap_regs[PIDXLO] = idx[0]; eieio();
idx[1] = par->cmap_regs[PIDXDATA];
if (copy_to_user((void __user *)(arg + 1), &idx[1], 1))
return -EFAULT;
return 0;
default:
return -ENOIOCTLCMD;
}
}
static struct pci_device_id imsttfb_pci_tbl[] = {
{ PCI_VENDOR_ID_IMS, PCI_DEVICE_ID_IMS_TT128,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, IBM },
{ PCI_VENDOR_ID_IMS, PCI_DEVICE_ID_IMS_TT3D,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, TVP },
{ 0, }
};
MODULE_DEVICE_TABLE(pci, imsttfb_pci_tbl);
static struct pci_driver imsttfb_pci_driver = {
.name = "imsttfb",
.id_table = imsttfb_pci_tbl,
.probe = imsttfb_probe,
.remove = __devexit_p(imsttfb_remove),
};
static struct fb_ops imsttfb_ops = {
.owner = THIS_MODULE,
.fb_check_var = imsttfb_check_var,
.fb_set_par = imsttfb_set_par,
.fb_setcolreg = imsttfb_setcolreg,
.fb_pan_display = imsttfb_pan_display,
.fb_blank = imsttfb_blank,
.fb_fillrect = imsttfb_fillrect,
.fb_copyarea = imsttfb_copyarea,
.fb_imageblit = cfb_imageblit,
.fb_ioctl = imsttfb_ioctl,
};
static void __devinit
init_imstt(struct fb_info *info)
{
struct imstt_par *par = info->par;
__u32 i, tmp, *ip, *end;
tmp = read_reg_le32(par->dc_regs, PRC);
if (par->ramdac == IBM)
info->fix.smem_len = (tmp & 0x0004) ? 0x400000 : 0x200000;
else
info->fix.smem_len = 0x800000;
ip = (__u32 *)info->screen_base;
end = (__u32 *)(info->screen_base + info->fix.smem_len);
while (ip < end)
*ip++ = 0;
/* initialize the card */
tmp = read_reg_le32(par->dc_regs, STGCTL);
write_reg_le32(par->dc_regs, STGCTL, tmp & ~0x1);
write_reg_le32(par->dc_regs, SSR, 0);
/* set default values for DAC registers */
if (par->ramdac == IBM) {
par->cmap_regs[PPMASK] = 0xff;
eieio();
par->cmap_regs[PIDXHI] = 0;
eieio();
for (i = 0; i < ARRAY_SIZE(ibm_initregs); i++) {
par->cmap_regs[PIDXLO] = ibm_initregs[i].addr;
eieio();
par->cmap_regs[PIDXDATA] = ibm_initregs[i].value;
eieio();
}
} else {
for (i = 0; i < ARRAY_SIZE(tvp_initregs); i++) {
par->cmap_regs[TVPADDRW] = tvp_initregs[i].addr;
eieio();
par->cmap_regs[TVPIDATA] = tvp_initregs[i].value;
eieio();
}
}
#if USE_NV_MODES && defined(CONFIG_PPC32)
{
int vmode = init_vmode, cmode = init_cmode;
if (vmode == -1) {
vmode = nvram_read_byte(NV_VMODE);
if (vmode <= 0 || vmode > VMODE_MAX)
vmode = VMODE_640_480_67;
}
if (cmode == -1) {
cmode = nvram_read_byte(NV_CMODE);
if (cmode < CMODE_8 || cmode > CMODE_32)
cmode = CMODE_8;
}
if (mac_vmode_to_var(vmode, cmode, &info->var)) {
info->var.xres = info->var.xres_virtual = INIT_XRES;
info->var.yres = info->var.yres_virtual = INIT_YRES;
info->var.bits_per_pixel = INIT_BPP;
}
}
#else
info->var.xres = info->var.xres_virtual = INIT_XRES;
info->var.yres = info->var.yres_virtual = INIT_YRES;
info->var.bits_per_pixel = INIT_BPP;
#endif
if ((info->var.xres * info->var.yres) * (info->var.bits_per_pixel >> 3) > info->fix.smem_len
|| !(compute_imstt_regvals(par, info->var.xres, info->var.yres))) {
printk("imsttfb: %ux%ux%u not supported\n", info->var.xres, info->var.yres, info->var.bits_per_pixel);
framebuffer_release(info);
return;
}
sprintf(info->fix.id, "IMS TT (%s)", par->ramdac == IBM ? "IBM" : "TVP");
info->fix.mmio_len = 0x1000;
info->fix.accel = FB_ACCEL_IMS_TWINTURBO;
info->fix.type = FB_TYPE_PACKED_PIXELS;
info->fix.visual = info->var.bits_per_pixel == 8 ? FB_VISUAL_PSEUDOCOLOR
: FB_VISUAL_DIRECTCOLOR;
info->fix.line_length = info->var.xres * (info->var.bits_per_pixel >> 3);
info->fix.xpanstep = 8;
info->fix.ypanstep = 1;
info->fix.ywrapstep = 0;
info->var.accel_flags = FB_ACCELF_TEXT;
// if (par->ramdac == IBM)
// imstt_cursor_init(info);
if (info->var.green.length == 6)
set_565(par);
else
set_555(par);
set_imstt_regvals(info, info->var.bits_per_pixel);
info->var.pixclock = 1000000 / getclkMHz(par);
info->fbops = &imsttfb_ops;
info->flags = FBINFO_DEFAULT |
FBINFO_HWACCEL_COPYAREA |
FBINFO_HWACCEL_FILLRECT |
FBINFO_HWACCEL_YPAN;
fb_alloc_cmap(&info->cmap, 0, 0);
if (register_framebuffer(info) < 0) {
framebuffer_release(info);
return;
}
tmp = (read_reg_le32(par->dc_regs, SSTATUS) & 0x0f00) >> 8;
printk("fb%u: %s frame buffer; %uMB vram; chip version %u\n",
info->node, info->fix.id, info->fix.smem_len >> 20, tmp);
}
static int __devinit
imsttfb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
unsigned long addr, size;
struct imstt_par *par;
struct fb_info *info;
#ifdef CONFIG_PPC_OF
struct device_node *dp;
dp = pci_device_to_OF_node(pdev);
if(dp)
printk(KERN_INFO "%s: OF name %s\n",__func__, dp->name);
else
printk(KERN_ERR "imsttfb: no OF node for pci device\n");
#endif /* CONFIG_PPC_OF */
info = framebuffer_alloc(sizeof(struct imstt_par), &pdev->dev);
if (!info) {
printk(KERN_ERR "imsttfb: Can't allocate memory\n");
return -ENOMEM;
}
par = info->par;
addr = pci_resource_start (pdev, 0);
size = pci_resource_len (pdev, 0);
if (!request_mem_region(addr, size, "imsttfb")) {
printk(KERN_ERR "imsttfb: Can't reserve memory region\n");
framebuffer_release(info);
return -ENODEV;
}
switch (pdev->device) {
case PCI_DEVICE_ID_IMS_TT128: /* IMS,tt128mbA */
par->ramdac = IBM;
#ifdef CONFIG_PPC_OF
if (dp && ((strcmp(dp->name, "IMS,tt128mb8") == 0) ||
(strcmp(dp->name, "IMS,tt128mb8A") == 0)))
par->ramdac = TVP;
#endif /* CONFIG_PPC_OF */
break;
case PCI_DEVICE_ID_IMS_TT3D: /* IMS,tt3d */
par->ramdac = TVP;
break;
default:
printk(KERN_INFO "imsttfb: Device 0x%x unknown, "
"contact maintainer.\n", pdev->device);
release_mem_region(addr, size);
framebuffer_release(info);
return -ENODEV;
}
info->fix.smem_start = addr;
info->screen_base = (__u8 *)ioremap(addr, par->ramdac == IBM ?
0x400000 : 0x800000);
info->fix.mmio_start = addr + 0x800000;
par->dc_regs = ioremap(addr + 0x800000, 0x1000);
par->cmap_regs_phys = addr + 0x840000;
par->cmap_regs = (__u8 *)ioremap(addr + 0x840000, 0x1000);
info->pseudo_palette = par->palette;
init_imstt(info);
pci_set_drvdata(pdev, info);
return 0;
}
static void __devexit
imsttfb_remove(struct pci_dev *pdev)
{
struct fb_info *info = pci_get_drvdata(pdev);
struct imstt_par *par = info->par;
int size = pci_resource_len(pdev, 0);
unregister_framebuffer(info);
iounmap(par->cmap_regs);
iounmap(par->dc_regs);
iounmap(info->screen_base);
release_mem_region(info->fix.smem_start, size);
framebuffer_release(info);
}
#ifndef MODULE
static int __init
imsttfb_setup(char *options)
{
char *this_opt;
if (!options || !*options)
return 0;
while ((this_opt = strsep(&options, ",")) != NULL) {
if (!strncmp(this_opt, "font:", 5)) {
char *p;
int i;
p = this_opt + 5;
for (i = 0; i < sizeof(fontname) - 1; i++)
if (!*p || *p == ' ' || *p == ',')
break;
memcpy(fontname, this_opt + 5, i);
fontname[i] = 0;
} else if (!strncmp(this_opt, "inverse", 7)) {
inverse = 1;
fb_invert_cmaps();
}
#if defined(CONFIG_PPC)
else if (!strncmp(this_opt, "vmode:", 6)) {
int vmode = simple_strtoul(this_opt+6, NULL, 0);
if (vmode > 0 && vmode <= VMODE_MAX)
init_vmode = vmode;
} else if (!strncmp(this_opt, "cmode:", 6)) {
int cmode = simple_strtoul(this_opt+6, NULL, 0);
switch (cmode) {
case CMODE_8:
case 8:
init_cmode = CMODE_8;
break;
case CMODE_16:
case 15:
case 16:
init_cmode = CMODE_16;
break;
case CMODE_32:
case 24:
case 32:
init_cmode = CMODE_32;
break;
}
}
#endif
}
return 0;
}
#endif /* MODULE */
static int __init imsttfb_init(void)
{
#ifndef MODULE
char *option = NULL;
if (fb_get_options("imsttfb", &option))
return -ENODEV;
imsttfb_setup(option);
#endif
return pci_register_driver(&imsttfb_pci_driver);
}
static void __exit imsttfb_exit(void)
{
pci_unregister_driver(&imsttfb_pci_driver);
}
MODULE_LICENSE("GPL");
module_init(imsttfb_init);
module_exit(imsttfb_exit);
| gpl-2.0 |
GustavoRD78/78Kernel-5.1.1-23.4.A.0.546 | drivers/staging/wlan-ng/prism2usb.c | 5239 | 9974 | #include "hfa384x_usb.c"
#include "prism2mgmt.c"
#include "prism2mib.c"
#include "prism2sta.c"
#include "prism2fw.c"
#define PRISM_USB_DEVICE(vid, pid, name) \
USB_DEVICE(vid, pid), \
.driver_info = (unsigned long) name
static struct usb_device_id usb_prism_tbl[] = {
{PRISM_USB_DEVICE(0x04bb, 0x0922, "IOData AirPort WN-B11/USBS")},
{PRISM_USB_DEVICE(0x07aa, 0x0012, "Corega Wireless LAN USB Stick-11")},
{PRISM_USB_DEVICE(0x09aa, 0x3642, "Prism2.x 11Mbps WLAN USB Adapter")},
{PRISM_USB_DEVICE
(0x1668, 0x0408, "Actiontec Prism2.5 11Mbps WLAN USB Adapter")},
{PRISM_USB_DEVICE
(0x1668, 0x0421, "Actiontec Prism2.5 11Mbps WLAN USB Adapter")},
{PRISM_USB_DEVICE
(0x1915, 0x2236, "Linksys WUSB11v3.0 11Mbps WLAN USB Adapter")},
{PRISM_USB_DEVICE
(0x066b, 0x2212, "Linksys WUSB11v2.5 11Mbps WLAN USB Adapter")},
{PRISM_USB_DEVICE
(0x066b, 0x2213, "Linksys WUSB12v1.1 11Mbps WLAN USB Adapter")},
{PRISM_USB_DEVICE
(0x067c, 0x1022, "Siemens SpeedStream 1022 11Mbps WLAN USB Adapter")},
{PRISM_USB_DEVICE
(0x049f, 0x0033,
"Compaq/Intel W100 PRO/Wireless 11Mbps multiport WLAN Adapter")},
{PRISM_USB_DEVICE
(0x0411, 0x0016, "Melco WLI-USB-S11 11Mbps WLAN Adapter")},
{PRISM_USB_DEVICE
(0x08de, 0x7a01, "PRISM25 IEEE 802.11 Mini USB Adapter")},
{PRISM_USB_DEVICE
(0x8086, 0x1111, "Intel PRO/Wireless 2011B LAN USB Adapter")},
{PRISM_USB_DEVICE
(0x0d8e, 0x7a01, "PRISM25 IEEE 802.11 Mini USB Adapter")},
{PRISM_USB_DEVICE
(0x045e, 0x006e, "Microsoft MN510 Wireless USB Adapter")},
{PRISM_USB_DEVICE(0x0967, 0x0204, "Acer Warplink USB Adapter")},
{PRISM_USB_DEVICE
(0x0cde, 0x0002, "Z-Com 725/726 Prism2.5 USB/USB Integrated")},
{PRISM_USB_DEVICE
(0x0cde, 0x0005, "Z-Com Xl735 Wireless 802.11b USB Adapter")},
{PRISM_USB_DEVICE
(0x413c, 0x8100, "Dell TrueMobile 1180 Wireless USB Adapter")},
{PRISM_USB_DEVICE
(0x0b3b, 0x1601, "ALLNET 0193 11Mbps WLAN USB Adapter")},
{PRISM_USB_DEVICE
(0x0b3b, 0x1602, "ZyXEL ZyAIR B200 Wireless USB Adapter")},
{PRISM_USB_DEVICE
(0x0baf, 0x00eb, "USRobotics USR1120 Wireless USB Adapter")},
{PRISM_USB_DEVICE
(0x0411, 0x0027, "Melco WLI-USB-KS11G 11Mbps WLAN Adapter")},
{PRISM_USB_DEVICE
(0x04f1, 0x3009, "JVC MP-XP7250 Builtin USB WLAN Adapter")},
{PRISM_USB_DEVICE(0x0846, 0x4110, "NetGear MA111")},
{PRISM_USB_DEVICE(0x03f3, 0x0020, "Adaptec AWN-8020 USB WLAN Adapter")},
{PRISM_USB_DEVICE(0x2821, 0x3300, "ASUS-WL140 Wireless USB Adapter")},
{PRISM_USB_DEVICE(0x2001, 0x3700, "DWL-122 Wireless USB Adapter")},
{PRISM_USB_DEVICE
(0x2001, 0x3702, "DWL-120 Rev F Wireless USB Adapter")},
{PRISM_USB_DEVICE(0x50c2, 0x4013, "Averatec USB WLAN Adapter")},
{PRISM_USB_DEVICE(0x2c02, 0x14ea, "Planex GW-US11H WLAN USB Adapter")},
{PRISM_USB_DEVICE(0x124a, 0x168b, "Airvast PRISM3 WLAN USB Adapter")},
{PRISM_USB_DEVICE(0x083a, 0x3503, "T-Sinus 111 USB WLAN Adapter")},
{PRISM_USB_DEVICE(0x2821, 0x3300, "Hawking HighDB USB Adapter")},
{PRISM_USB_DEVICE
(0x0411, 0x0044, "Melco WLI-USB-KB11 11Mbps WLAN Adapter")},
{PRISM_USB_DEVICE(0x1668, 0x6106, "ROPEX FreeLan 802.11b USB Adapter")},
{PRISM_USB_DEVICE
(0x124a, 0x4017, "Pheenet WL-503IA 802.11b USB Adapter")},
{PRISM_USB_DEVICE(0x0bb2, 0x0302, "Ambit Microsystems Corp.")},
{PRISM_USB_DEVICE
(0x9016, 0x182d, "Sitecom WL-022 802.11b USB Adapter")},
{PRISM_USB_DEVICE
(0x0543, 0x0f01, "ViewSonic Airsync USB Adapter 11Mbps (Prism2.5)")},
{ /* terminator */ }
};
MODULE_DEVICE_TABLE(usb, usb_prism_tbl);
/*----------------------------------------------------------------
* prism2sta_probe_usb
*
* Probe routine called by the USB subsystem.
*
* Arguments:
* dev ptr to the usb_device struct
* ifnum interface number being offered
*
* Returns:
* NULL - we're not claiming the device+interface
* non-NULL - we are claiming the device+interface and
* this is a ptr to the data we want back
* when disconnect is called.
*
* Side effects:
*
* Call context:
* I'm not sure, assume it's interrupt.
*
----------------------------------------------------------------*/
static int prism2sta_probe_usb(struct usb_interface *interface,
const struct usb_device_id *id)
{
struct usb_device *dev;
wlandevice_t *wlandev = NULL;
hfa384x_t *hw = NULL;
int result = 0;
dev = interface_to_usbdev(interface);
wlandev = create_wlan();
if (wlandev == NULL) {
printk(KERN_ERR "%s: Memory allocation failure.\n", dev_info);
result = -EIO;
goto failed;
}
hw = wlandev->priv;
if (wlan_setup(wlandev, &(interface->dev)) != 0) {
printk(KERN_ERR "%s: wlan_setup() failed.\n", dev_info);
result = -EIO;
goto failed;
}
/* Initialize the hw data */
hfa384x_create(hw, dev);
hw->wlandev = wlandev;
/* Register the wlandev, this gets us a name and registers the
* linux netdevice.
*/
SET_NETDEV_DEV(wlandev->netdev, &(interface->dev));
/* Do a chip-level reset on the MAC */
if (prism2_doreset) {
result = hfa384x_corereset(hw,
prism2_reset_holdtime,
prism2_reset_settletime, 0);
if (result != 0) {
unregister_wlandev(wlandev);
hfa384x_destroy(hw);
result = -EIO;
printk(KERN_ERR
"%s: hfa384x_corereset() failed.\n", dev_info);
goto failed;
}
}
usb_get_dev(dev);
wlandev->msdstate = WLAN_MSD_HWPRESENT;
/* Try and load firmware, then enable card before we register */
prism2_fwtry(dev, wlandev);
prism2sta_ifstate(wlandev, P80211ENUM_ifstate_enable);
if (register_wlandev(wlandev) != 0) {
printk(KERN_ERR "%s: register_wlandev() failed.\n", dev_info);
result = -EIO;
goto failed;
}
goto done;
failed:
kfree(wlandev);
kfree(hw);
wlandev = NULL;
done:
usb_set_intfdata(interface, wlandev);
return result;
}
/*----------------------------------------------------------------
* prism2sta_disconnect_usb
*
* Called when a device previously claimed by probe is removed
* from the USB.
*
* Arguments:
* dev ptr to the usb_device struct
* ptr ptr returned by probe() when the device
* was claimed.
*
* Returns:
* Nothing
*
* Side effects:
*
* Call context:
* process
----------------------------------------------------------------*/
static void prism2sta_disconnect_usb(struct usb_interface *interface)
{
wlandevice_t *wlandev;
wlandev = (wlandevice_t *) usb_get_intfdata(interface);
if (wlandev != NULL) {
LIST_HEAD(cleanlist);
struct list_head *entry;
struct list_head *temp;
unsigned long flags;
hfa384x_t *hw = wlandev->priv;
if (!hw)
goto exit;
spin_lock_irqsave(&hw->ctlxq.lock, flags);
p80211netdev_hwremoved(wlandev);
list_splice_init(&hw->ctlxq.reapable, &cleanlist);
list_splice_init(&hw->ctlxq.completing, &cleanlist);
list_splice_init(&hw->ctlxq.pending, &cleanlist);
list_splice_init(&hw->ctlxq.active, &cleanlist);
spin_unlock_irqrestore(&hw->ctlxq.lock, flags);
/* There's no hardware to shutdown, but the driver
* might have some tasks or tasklets that must be
* stopped before we can tear everything down.
*/
prism2sta_ifstate(wlandev, P80211ENUM_ifstate_disable);
del_singleshot_timer_sync(&hw->throttle);
del_singleshot_timer_sync(&hw->reqtimer);
del_singleshot_timer_sync(&hw->resptimer);
/* Unlink all the URBs. This "removes the wheels"
* from the entire CTLX handling mechanism.
*/
usb_kill_urb(&hw->rx_urb);
usb_kill_urb(&hw->tx_urb);
usb_kill_urb(&hw->ctlx_urb);
tasklet_kill(&hw->completion_bh);
tasklet_kill(&hw->reaper_bh);
flush_scheduled_work();
/* Now we complete any outstanding commands
* and tell everyone who is waiting for their
* responses that we have shut down.
*/
list_for_each(entry, &cleanlist) {
hfa384x_usbctlx_t *ctlx;
ctlx = list_entry(entry, hfa384x_usbctlx_t, list);
complete(&ctlx->done);
}
/* Give any outstanding synchronous commands
* a chance to complete. All they need to do
* is "wake up", so that's easy.
* (I'd like a better way to do this, really.)
*/
msleep(100);
/* Now delete the CTLXs, because no-one else can now. */
list_for_each_safe(entry, temp, &cleanlist) {
hfa384x_usbctlx_t *ctlx;
ctlx = list_entry(entry, hfa384x_usbctlx_t, list);
kfree(ctlx);
}
/* Unhook the wlandev */
unregister_wlandev(wlandev);
wlan_unsetup(wlandev);
usb_put_dev(hw->usb);
hfa384x_destroy(hw);
kfree(hw);
kfree(wlandev);
}
exit:
usb_set_intfdata(interface, NULL);
}
#ifdef CONFIG_PM
static int prism2sta_suspend(struct usb_interface *interface,
pm_message_t message)
{
hfa384x_t *hw = NULL;
wlandevice_t *wlandev;
wlandev = (wlandevice_t *) usb_get_intfdata(interface);
if (!wlandev)
return -ENODEV;
hw = wlandev->priv;
if (!hw)
return -ENODEV;
prism2sta_ifstate(wlandev, P80211ENUM_ifstate_disable);
usb_kill_urb(&hw->rx_urb);
usb_kill_urb(&hw->tx_urb);
usb_kill_urb(&hw->ctlx_urb);
return 0;
}
static int prism2sta_resume(struct usb_interface *interface)
{
int result = 0;
hfa384x_t *hw = NULL;
wlandevice_t *wlandev;
wlandev = (wlandevice_t *) usb_get_intfdata(interface);
if (!wlandev)
return -ENODEV;
hw = wlandev->priv;
if (!hw)
return -ENODEV;
/* Do a chip-level reset on the MAC */
if (prism2_doreset) {
result = hfa384x_corereset(hw,
prism2_reset_holdtime,
prism2_reset_settletime, 0);
if (result != 0) {
unregister_wlandev(wlandev);
hfa384x_destroy(hw);
printk(KERN_ERR
"%s: hfa384x_corereset() failed.\n", dev_info);
kfree(wlandev);
kfree(hw);
wlandev = NULL;
return -ENODEV;
}
}
prism2sta_ifstate(wlandev, P80211ENUM_ifstate_enable);
return 0;
}
#else
#define prism2sta_suspend NULL
#define prism2sta_resume NULL
#endif /* CONFIG_PM */
static struct usb_driver prism2_usb_driver = {
.name = "prism2_usb",
.probe = prism2sta_probe_usb,
.disconnect = prism2sta_disconnect_usb,
.id_table = usb_prism_tbl,
.suspend = prism2sta_suspend,
.resume = prism2sta_resume,
.reset_resume = prism2sta_resume,
/* fops, minor? */
};
module_usb_driver(prism2_usb_driver);
| gpl-2.0 |
android-legacy/android_kernel_exynos | drivers/acpi/utils.c | 9079 | 9468 | /*
* acpi_utils.c - ACPI Utility Functions ($Revision: 10 $)
*
* Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
* Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
*
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or (at
* your option) any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
*
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/types.h>
#include <acpi/acpi_bus.h>
#include <acpi/acpi_drivers.h>
#include "internal.h"
#define _COMPONENT ACPI_BUS_COMPONENT
ACPI_MODULE_NAME("utils");
/* --------------------------------------------------------------------------
Object Evaluation Helpers
-------------------------------------------------------------------------- */
static void
acpi_util_eval_error(acpi_handle h, acpi_string p, acpi_status s)
{
#ifdef ACPI_DEBUG_OUTPUT
char prefix[80] = {'\0'};
struct acpi_buffer buffer = {sizeof(prefix), prefix};
acpi_get_name(h, ACPI_FULL_PATHNAME, &buffer);
ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Evaluate [%s.%s]: %s\n",
(char *) prefix, p, acpi_format_exception(s)));
#else
return;
#endif
}
acpi_status
acpi_extract_package(union acpi_object *package,
struct acpi_buffer *format, struct acpi_buffer *buffer)
{
u32 size_required = 0;
u32 tail_offset = 0;
char *format_string = NULL;
u32 format_count = 0;
u32 i = 0;
u8 *head = NULL;
u8 *tail = NULL;
if (!package || (package->type != ACPI_TYPE_PACKAGE)
|| (package->package.count < 1)) {
printk(KERN_WARNING PREFIX "Invalid package argument\n");
return AE_BAD_PARAMETER;
}
if (!format || !format->pointer || (format->length < 1)) {
printk(KERN_WARNING PREFIX "Invalid format argument\n");
return AE_BAD_PARAMETER;
}
if (!buffer) {
printk(KERN_WARNING PREFIX "Invalid buffer argument\n");
return AE_BAD_PARAMETER;
}
format_count = (format->length / sizeof(char)) - 1;
if (format_count > package->package.count) {
printk(KERN_WARNING PREFIX "Format specifies more objects [%d]"
" than exist in package [%d].\n",
format_count, package->package.count);
return AE_BAD_DATA;
}
format_string = format->pointer;
/*
* Calculate size_required.
*/
for (i = 0; i < format_count; i++) {
union acpi_object *element = &(package->package.elements[i]);
if (!element) {
return AE_BAD_DATA;
}
switch (element->type) {
case ACPI_TYPE_INTEGER:
switch (format_string[i]) {
case 'N':
size_required += sizeof(u64);
tail_offset += sizeof(u64);
break;
case 'S':
size_required +=
sizeof(char *) + sizeof(u64) +
sizeof(char);
tail_offset += sizeof(char *);
break;
default:
printk(KERN_WARNING PREFIX "Invalid package element"
" [%d]: got number, expecing"
" [%c]\n",
i, format_string[i]);
return AE_BAD_DATA;
break;
}
break;
case ACPI_TYPE_STRING:
case ACPI_TYPE_BUFFER:
switch (format_string[i]) {
case 'S':
size_required +=
sizeof(char *) +
(element->string.length * sizeof(char)) +
sizeof(char);
tail_offset += sizeof(char *);
break;
case 'B':
size_required +=
sizeof(u8 *) +
(element->buffer.length * sizeof(u8));
tail_offset += sizeof(u8 *);
break;
default:
printk(KERN_WARNING PREFIX "Invalid package element"
" [%d] got string/buffer,"
" expecing [%c]\n",
i, format_string[i]);
return AE_BAD_DATA;
break;
}
break;
case ACPI_TYPE_PACKAGE:
default:
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
"Found unsupported element at index=%d\n",
i));
/* TBD: handle nested packages... */
return AE_SUPPORT;
break;
}
}
/*
* Validate output buffer.
*/
if (buffer->length < size_required) {
buffer->length = size_required;
return AE_BUFFER_OVERFLOW;
} else if (buffer->length != size_required || !buffer->pointer) {
return AE_BAD_PARAMETER;
}
head = buffer->pointer;
tail = buffer->pointer + tail_offset;
/*
* Extract package data.
*/
for (i = 0; i < format_count; i++) {
u8 **pointer = NULL;
union acpi_object *element = &(package->package.elements[i]);
if (!element) {
return AE_BAD_DATA;
}
switch (element->type) {
case ACPI_TYPE_INTEGER:
switch (format_string[i]) {
case 'N':
*((u64 *) head) =
element->integer.value;
head += sizeof(u64);
break;
case 'S':
pointer = (u8 **) head;
*pointer = tail;
*((u64 *) tail) =
element->integer.value;
head += sizeof(u64 *);
tail += sizeof(u64);
/* NULL terminate string */
*tail = (char)0;
tail += sizeof(char);
break;
default:
/* Should never get here */
break;
}
break;
case ACPI_TYPE_STRING:
case ACPI_TYPE_BUFFER:
switch (format_string[i]) {
case 'S':
pointer = (u8 **) head;
*pointer = tail;
memcpy(tail, element->string.pointer,
element->string.length);
head += sizeof(char *);
tail += element->string.length * sizeof(char);
/* NULL terminate string */
*tail = (char)0;
tail += sizeof(char);
break;
case 'B':
pointer = (u8 **) head;
*pointer = tail;
memcpy(tail, element->buffer.pointer,
element->buffer.length);
head += sizeof(u8 *);
tail += element->buffer.length * sizeof(u8);
break;
default:
/* Should never get here */
break;
}
break;
case ACPI_TYPE_PACKAGE:
/* TBD: handle nested packages... */
default:
/* Should never get here */
break;
}
}
return AE_OK;
}
EXPORT_SYMBOL(acpi_extract_package);
acpi_status
acpi_evaluate_integer(acpi_handle handle,
acpi_string pathname,
struct acpi_object_list *arguments, unsigned long long *data)
{
acpi_status status = AE_OK;
union acpi_object element;
struct acpi_buffer buffer = { 0, NULL };
if (!data)
return AE_BAD_PARAMETER;
buffer.length = sizeof(union acpi_object);
buffer.pointer = &element;
status = acpi_evaluate_object(handle, pathname, arguments, &buffer);
if (ACPI_FAILURE(status)) {
acpi_util_eval_error(handle, pathname, status);
return status;
}
if (element.type != ACPI_TYPE_INTEGER) {
acpi_util_eval_error(handle, pathname, AE_BAD_DATA);
return AE_BAD_DATA;
}
*data = element.integer.value;
ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Return value [%llu]\n", *data));
return AE_OK;
}
EXPORT_SYMBOL(acpi_evaluate_integer);
acpi_status
acpi_evaluate_reference(acpi_handle handle,
acpi_string pathname,
struct acpi_object_list *arguments,
struct acpi_handle_list *list)
{
acpi_status status = AE_OK;
union acpi_object *package = NULL;
union acpi_object *element = NULL;
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
u32 i = 0;
if (!list) {
return AE_BAD_PARAMETER;
}
/* Evaluate object. */
status = acpi_evaluate_object(handle, pathname, arguments, &buffer);
if (ACPI_FAILURE(status))
goto end;
package = buffer.pointer;
if ((buffer.length == 0) || !package) {
printk(KERN_ERR PREFIX "No return object (len %X ptr %p)\n",
(unsigned)buffer.length, package);
status = AE_BAD_DATA;
acpi_util_eval_error(handle, pathname, status);
goto end;
}
if (package->type != ACPI_TYPE_PACKAGE) {
printk(KERN_ERR PREFIX "Expecting a [Package], found type %X\n",
package->type);
status = AE_BAD_DATA;
acpi_util_eval_error(handle, pathname, status);
goto end;
}
if (!package->package.count) {
printk(KERN_ERR PREFIX "[Package] has zero elements (%p)\n",
package);
status = AE_BAD_DATA;
acpi_util_eval_error(handle, pathname, status);
goto end;
}
if (package->package.count > ACPI_MAX_HANDLES) {
return AE_NO_MEMORY;
}
list->count = package->package.count;
/* Extract package data. */
for (i = 0; i < list->count; i++) {
element = &(package->package.elements[i]);
if (element->type != ACPI_TYPE_LOCAL_REFERENCE) {
status = AE_BAD_DATA;
printk(KERN_ERR PREFIX
"Expecting a [Reference] package element, found type %X\n",
element->type);
acpi_util_eval_error(handle, pathname, status);
break;
}
if (!element->reference.handle) {
printk(KERN_WARNING PREFIX "Invalid reference in"
" package %s\n", pathname);
status = AE_NULL_ENTRY;
break;
}
/* Get the acpi_handle. */
list->handles[i] = element->reference.handle;
ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found reference [%p]\n",
list->handles[i]));
}
end:
if (ACPI_FAILURE(status)) {
list->count = 0;
//kfree(list->handles);
}
kfree(buffer.pointer);
return status;
}
EXPORT_SYMBOL(acpi_evaluate_reference);
| gpl-2.0 |
g3tsum/kernel_msm8974 | arch/powerpc/sysdev/fsl_85xx_cache_sram.c | 9079 | 4394 | /*
* Copyright 2009-2010 Freescale Semiconductor, Inc.
*
* Simple memory allocator abstraction for QorIQ (P1/P2) based Cache-SRAM
*
* Author: Vivek Mahajan <vivek.mahajan@freescale.com>
*
* This file is derived from the original work done
* by Sylvain Munaut for the Bestcomm SRAM allocator.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/kernel.h>
#include <linux/export.h>
#include <linux/slab.h>
#include <linux/err.h>
#include <linux/of_platform.h>
#include <asm/pgtable.h>
#include <asm/fsl_85xx_cache_sram.h>
#include "fsl_85xx_cache_ctlr.h"
struct mpc85xx_cache_sram *cache_sram;
void *mpc85xx_cache_sram_alloc(unsigned int size,
phys_addr_t *phys, unsigned int align)
{
unsigned long offset;
unsigned long flags;
if (unlikely(cache_sram == NULL))
return NULL;
if (!size || (size > cache_sram->size) || (align > cache_sram->size)) {
pr_err("%s(): size(=%x) or align(=%x) zero or too big\n",
__func__, size, align);
return NULL;
}
if ((align & (align - 1)) || align <= 1) {
pr_err("%s(): align(=%x) must be power of two and >1\n",
__func__, align);
return NULL;
}
spin_lock_irqsave(&cache_sram->lock, flags);
offset = rh_alloc_align(cache_sram->rh, size, align, NULL);
spin_unlock_irqrestore(&cache_sram->lock, flags);
if (IS_ERR_VALUE(offset))
return NULL;
*phys = cache_sram->base_phys + offset;
return (unsigned char *)cache_sram->base_virt + offset;
}
EXPORT_SYMBOL(mpc85xx_cache_sram_alloc);
void mpc85xx_cache_sram_free(void *ptr)
{
unsigned long flags;
BUG_ON(!ptr);
spin_lock_irqsave(&cache_sram->lock, flags);
rh_free(cache_sram->rh, ptr - cache_sram->base_virt);
spin_unlock_irqrestore(&cache_sram->lock, flags);
}
EXPORT_SYMBOL(mpc85xx_cache_sram_free);
int __init instantiate_cache_sram(struct platform_device *dev,
struct sram_parameters sram_params)
{
int ret = 0;
if (cache_sram) {
dev_err(&dev->dev, "Already initialized cache-sram\n");
return -EBUSY;
}
cache_sram = kzalloc(sizeof(struct mpc85xx_cache_sram), GFP_KERNEL);
if (!cache_sram) {
dev_err(&dev->dev, "Out of memory for cache_sram structure\n");
return -ENOMEM;
}
cache_sram->base_phys = sram_params.sram_offset;
cache_sram->size = sram_params.sram_size;
if (!request_mem_region(cache_sram->base_phys, cache_sram->size,
"fsl_85xx_cache_sram")) {
dev_err(&dev->dev, "%s: request memory failed\n",
dev->dev.of_node->full_name);
ret = -ENXIO;
goto out_free;
}
cache_sram->base_virt = ioremap_prot(cache_sram->base_phys,
cache_sram->size, _PAGE_COHERENT | PAGE_KERNEL);
if (!cache_sram->base_virt) {
dev_err(&dev->dev, "%s: ioremap_prot failed\n",
dev->dev.of_node->full_name);
ret = -ENOMEM;
goto out_release;
}
cache_sram->rh = rh_create(sizeof(unsigned int));
if (IS_ERR(cache_sram->rh)) {
dev_err(&dev->dev, "%s: Unable to create remote heap\n",
dev->dev.of_node->full_name);
ret = PTR_ERR(cache_sram->rh);
goto out_unmap;
}
rh_attach_region(cache_sram->rh, 0, cache_sram->size);
spin_lock_init(&cache_sram->lock);
dev_info(&dev->dev, "[base:0x%llx, size:0x%x] configured and loaded\n",
(unsigned long long)cache_sram->base_phys, cache_sram->size);
return 0;
out_unmap:
iounmap(cache_sram->base_virt);
out_release:
release_mem_region(cache_sram->base_phys, cache_sram->size);
out_free:
kfree(cache_sram);
return ret;
}
void remove_cache_sram(struct platform_device *dev)
{
BUG_ON(!cache_sram);
rh_detach_region(cache_sram->rh, 0, cache_sram->size);
rh_destroy(cache_sram->rh);
iounmap(cache_sram->base_virt);
release_mem_region(cache_sram->base_phys, cache_sram->size);
kfree(cache_sram);
cache_sram = NULL;
dev_info(&dev->dev, "MPC85xx Cache-SRAM driver unloaded\n");
}
| gpl-2.0 |
pvanhoof/kernel | drivers/isdn/hysdn/hysdn_sched.c | 9847 | 7095 | /* $Id: hysdn_sched.c,v 1.5.6.4 2001/11/06 21:58:19 kai Exp $
*
* Linux driver for HYSDN cards
* scheduler routines for handling exchange card <-> pc.
*
* Author Werner Cornelius (werner@titro.de) for Hypercope GmbH
* Copyright 1999 by Werner Cornelius (werner@titro.de)
*
* This software may be used and distributed according to the terms
* of the GNU General Public License, incorporated herein by reference.
*
*/
#include <linux/signal.h>
#include <linux/kernel.h>
#include <linux/ioport.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <asm/io.h>
#include "hysdn_defs.h"
/*****************************************************************************/
/* hysdn_sched_rx is called from the cards handler to announce new data is */
/* available from the card. The routine has to handle the data and return */
/* with a nonzero code if the data could be worked (or even thrown away), if */
/* no room to buffer the data is available a zero return tells the card */
/* to keep the data until later. */
/*****************************************************************************/
int
hysdn_sched_rx(hysdn_card *card, unsigned char *buf, unsigned short len,
unsigned short chan)
{
switch (chan) {
case CHAN_NDIS_DATA:
if (hynet_enable & (1 << card->myid)) {
/* give packet to network handler */
hysdn_rx_netpkt(card, buf, len);
}
break;
case CHAN_ERRLOG:
hysdn_card_errlog(card, (tErrLogEntry *) buf, len);
if (card->err_log_state == ERRLOG_STATE_ON)
card->err_log_state = ERRLOG_STATE_START; /* start new fetch */
break;
#ifdef CONFIG_HYSDN_CAPI
case CHAN_CAPI:
/* give packet to CAPI handler */
if (hycapi_enable & (1 << card->myid)) {
hycapi_rx_capipkt(card, buf, len);
}
break;
#endif /* CONFIG_HYSDN_CAPI */
default:
printk(KERN_INFO "irq message channel %d len %d unhandled \n", chan, len);
break;
} /* switch rx channel */
return (1); /* always handled */
} /* hysdn_sched_rx */
/*****************************************************************************/
/* hysdn_sched_tx is called from the cards handler to announce that there is */
/* room in the tx-buffer to the card and data may be sent if needed. */
/* If the routine wants to send data it must fill buf, len and chan with the */
/* appropriate data and return a nonzero value. With a zero return no new */
/* data to send is assumed. maxlen specifies the buffer size available for */
/* sending. */
/*****************************************************************************/
int
hysdn_sched_tx(hysdn_card *card, unsigned char *buf,
unsigned short volatile *len, unsigned short volatile *chan,
unsigned short maxlen)
{
struct sk_buff *skb;
if (card->net_tx_busy) {
card->net_tx_busy = 0; /* reset flag */
hysdn_tx_netack(card); /* acknowledge packet send */
} /* a network packet has completely been transferred */
/* first of all async requests are handled */
if (card->async_busy) {
if (card->async_len <= maxlen) {
memcpy(buf, card->async_data, card->async_len);
*len = card->async_len;
*chan = card->async_channel;
card->async_busy = 0; /* reset request */
return (1);
}
card->async_busy = 0; /* in case of length error */
} /* async request */
if ((card->err_log_state == ERRLOG_STATE_START) &&
(maxlen >= ERRLOG_CMD_REQ_SIZE)) {
strcpy(buf, ERRLOG_CMD_REQ); /* copy the command */
*len = ERRLOG_CMD_REQ_SIZE; /* buffer length */
*chan = CHAN_ERRLOG; /* and channel */
card->err_log_state = ERRLOG_STATE_ON; /* new state is on */
return (1); /* tell that data should be send */
} /* error log start and able to send */
if ((card->err_log_state == ERRLOG_STATE_STOP) &&
(maxlen >= ERRLOG_CMD_STOP_SIZE)) {
strcpy(buf, ERRLOG_CMD_STOP); /* copy the command */
*len = ERRLOG_CMD_STOP_SIZE; /* buffer length */
*chan = CHAN_ERRLOG; /* and channel */
card->err_log_state = ERRLOG_STATE_OFF; /* new state is off */
return (1); /* tell that data should be send */
} /* error log start and able to send */
/* now handle network interface packets */
if ((hynet_enable & (1 << card->myid)) &&
(skb = hysdn_tx_netget(card)) != NULL)
{
if (skb->len <= maxlen) {
/* copy the packet to the buffer */
skb_copy_from_linear_data(skb, buf, skb->len);
*len = skb->len;
*chan = CHAN_NDIS_DATA;
card->net_tx_busy = 1; /* we are busy sending network data */
return (1); /* go and send the data */
} else
hysdn_tx_netack(card); /* aknowledge packet -> throw away */
} /* send a network packet if available */
#ifdef CONFIG_HYSDN_CAPI
if (((hycapi_enable & (1 << card->myid))) &&
((skb = hycapi_tx_capiget(card)) != NULL))
{
if (skb->len <= maxlen) {
skb_copy_from_linear_data(skb, buf, skb->len);
*len = skb->len;
*chan = CHAN_CAPI;
hycapi_tx_capiack(card);
return (1); /* go and send the data */
}
}
#endif /* CONFIG_HYSDN_CAPI */
return (0); /* nothing to send */
} /* hysdn_sched_tx */
/*****************************************************************************/
/* send one config line to the card and return 0 if successful, otherwise a */
/* negative error code. */
/* The function works with timeouts perhaps not giving the greatest speed */
/* sending the line, but this should be meaningless because only some lines */
/* are to be sent and this happens very seldom. */
/*****************************************************************************/
int
hysdn_tx_cfgline(hysdn_card *card, unsigned char *line, unsigned short chan)
{
int cnt = 50; /* timeout intervalls */
unsigned long flags;
if (card->debug_flags & LOG_SCHED_ASYN)
hysdn_addlog(card, "async tx-cfg chan=%d len=%d", chan, strlen(line) + 1);
while (card->async_busy) {
if (card->debug_flags & LOG_SCHED_ASYN)
hysdn_addlog(card, "async tx-cfg delayed");
msleep_interruptible(20); /* Timeout 20ms */
if (!--cnt)
return (-ERR_ASYNC_TIME); /* timed out */
} /* wait for buffer to become free */
spin_lock_irqsave(&card->hysdn_lock, flags);
strcpy(card->async_data, line);
card->async_len = strlen(line) + 1;
card->async_channel = chan;
card->async_busy = 1; /* request transfer */
/* now queue the task */
schedule_work(&card->irq_queue);
spin_unlock_irqrestore(&card->hysdn_lock, flags);
if (card->debug_flags & LOG_SCHED_ASYN)
hysdn_addlog(card, "async tx-cfg data queued");
cnt++; /* short delay */
while (card->async_busy) {
if (card->debug_flags & LOG_SCHED_ASYN)
hysdn_addlog(card, "async tx-cfg waiting for tx-ready");
msleep_interruptible(20); /* Timeout 20ms */
if (!--cnt)
return (-ERR_ASYNC_TIME); /* timed out */
} /* wait for buffer to become free again */
if (card->debug_flags & LOG_SCHED_ASYN)
hysdn_addlog(card, "async tx-cfg data send");
return (0); /* line send correctly */
} /* hysdn_tx_cfgline */
| gpl-2.0 |
pec0ra/android_kernel_sony_msm8x60 | arch/sparc/kernel/psycho_common.c | 11895 | 14509 | /* psycho_common.c: Code common to PSYCHO and derivative PCI controllers.
*
* Copyright (C) 2008 David S. Miller <davem@davemloft.net>
*/
#include <linux/kernel.h>
#include <linux/interrupt.h>
#include <asm/upa.h>
#include "pci_impl.h"
#include "iommu_common.h"
#include "psycho_common.h"
#define PSYCHO_STRBUF_CTRL_DENAB 0x0000000000000002ULL
#define PSYCHO_STCERR_WRITE 0x0000000000000002ULL
#define PSYCHO_STCERR_READ 0x0000000000000001ULL
#define PSYCHO_STCTAG_PPN 0x0fffffff00000000ULL
#define PSYCHO_STCTAG_VPN 0x00000000ffffe000ULL
#define PSYCHO_STCTAG_VALID 0x0000000000000002ULL
#define PSYCHO_STCTAG_WRITE 0x0000000000000001ULL
#define PSYCHO_STCLINE_LINDX 0x0000000001e00000ULL
#define PSYCHO_STCLINE_SPTR 0x00000000001f8000ULL
#define PSYCHO_STCLINE_LADDR 0x0000000000007f00ULL
#define PSYCHO_STCLINE_EPTR 0x00000000000000fcULL
#define PSYCHO_STCLINE_VALID 0x0000000000000002ULL
#define PSYCHO_STCLINE_FOFN 0x0000000000000001ULL
static DEFINE_SPINLOCK(stc_buf_lock);
static unsigned long stc_error_buf[128];
static unsigned long stc_tag_buf[16];
static unsigned long stc_line_buf[16];
static void psycho_check_stc_error(struct pci_pbm_info *pbm)
{
unsigned long err_base, tag_base, line_base;
struct strbuf *strbuf = &pbm->stc;
u64 control;
int i;
if (!strbuf->strbuf_control)
return;
err_base = strbuf->strbuf_err_stat;
tag_base = strbuf->strbuf_tag_diag;
line_base = strbuf->strbuf_line_diag;
spin_lock(&stc_buf_lock);
/* This is __REALLY__ dangerous. When we put the streaming
* buffer into diagnostic mode to probe it's tags and error
* status, we _must_ clear all of the line tag valid bits
* before re-enabling the streaming buffer. If any dirty data
* lives in the STC when we do this, we will end up
* invalidating it before it has a chance to reach main
* memory.
*/
control = upa_readq(strbuf->strbuf_control);
upa_writeq(control | PSYCHO_STRBUF_CTRL_DENAB, strbuf->strbuf_control);
for (i = 0; i < 128; i++) {
u64 val;
val = upa_readq(err_base + (i * 8UL));
upa_writeq(0UL, err_base + (i * 8UL));
stc_error_buf[i] = val;
}
for (i = 0; i < 16; i++) {
stc_tag_buf[i] = upa_readq(tag_base + (i * 8UL));
stc_line_buf[i] = upa_readq(line_base + (i * 8UL));
upa_writeq(0UL, tag_base + (i * 8UL));
upa_writeq(0UL, line_base + (i * 8UL));
}
/* OK, state is logged, exit diagnostic mode. */
upa_writeq(control, strbuf->strbuf_control);
for (i = 0; i < 16; i++) {
int j, saw_error, first, last;
saw_error = 0;
first = i * 8;
last = first + 8;
for (j = first; j < last; j++) {
u64 errval = stc_error_buf[j];
if (errval != 0) {
saw_error++;
printk(KERN_ERR "%s: STC_ERR(%d)[wr(%d)"
"rd(%d)]\n",
pbm->name,
j,
(errval & PSYCHO_STCERR_WRITE) ? 1 : 0,
(errval & PSYCHO_STCERR_READ) ? 1 : 0);
}
}
if (saw_error != 0) {
u64 tagval = stc_tag_buf[i];
u64 lineval = stc_line_buf[i];
printk(KERN_ERR "%s: STC_TAG(%d)[PA(%016llx)VA(%08llx)"
"V(%d)W(%d)]\n",
pbm->name,
i,
((tagval & PSYCHO_STCTAG_PPN) >> 19UL),
(tagval & PSYCHO_STCTAG_VPN),
((tagval & PSYCHO_STCTAG_VALID) ? 1 : 0),
((tagval & PSYCHO_STCTAG_WRITE) ? 1 : 0));
printk(KERN_ERR "%s: STC_LINE(%d)[LIDX(%llx)SP(%llx)"
"LADDR(%llx)EP(%llx)V(%d)FOFN(%d)]\n",
pbm->name,
i,
((lineval & PSYCHO_STCLINE_LINDX) >> 21UL),
((lineval & PSYCHO_STCLINE_SPTR) >> 15UL),
((lineval & PSYCHO_STCLINE_LADDR) >> 8UL),
((lineval & PSYCHO_STCLINE_EPTR) >> 2UL),
((lineval & PSYCHO_STCLINE_VALID) ? 1 : 0),
((lineval & PSYCHO_STCLINE_FOFN) ? 1 : 0));
}
}
spin_unlock(&stc_buf_lock);
}
#define PSYCHO_IOMMU_TAG 0xa580UL
#define PSYCHO_IOMMU_DATA 0xa600UL
static void psycho_record_iommu_tags_and_data(struct pci_pbm_info *pbm,
u64 *tag, u64 *data)
{
int i;
for (i = 0; i < 16; i++) {
unsigned long base = pbm->controller_regs;
unsigned long off = i * 8UL;
tag[i] = upa_readq(base + PSYCHO_IOMMU_TAG+off);
data[i] = upa_readq(base + PSYCHO_IOMMU_DATA+off);
/* Now clear out the entry. */
upa_writeq(0, base + PSYCHO_IOMMU_TAG + off);
upa_writeq(0, base + PSYCHO_IOMMU_DATA + off);
}
}
#define PSYCHO_IOMMU_TAG_ERRSTS (0x3UL << 23UL)
#define PSYCHO_IOMMU_TAG_ERR (0x1UL << 22UL)
#define PSYCHO_IOMMU_TAG_WRITE (0x1UL << 21UL)
#define PSYCHO_IOMMU_TAG_STREAM (0x1UL << 20UL)
#define PSYCHO_IOMMU_TAG_SIZE (0x1UL << 19UL)
#define PSYCHO_IOMMU_TAG_VPAGE 0x7ffffULL
#define PSYCHO_IOMMU_DATA_VALID (1UL << 30UL)
#define PSYCHO_IOMMU_DATA_CACHE (1UL << 28UL)
#define PSYCHO_IOMMU_DATA_PPAGE 0xfffffffULL
static void psycho_dump_iommu_tags_and_data(struct pci_pbm_info *pbm,
u64 *tag, u64 *data)
{
int i;
for (i = 0; i < 16; i++) {
u64 tag_val, data_val;
const char *type_str;
tag_val = tag[i];
if (!(tag_val & PSYCHO_IOMMU_TAG_ERR))
continue;
data_val = data[i];
switch((tag_val & PSYCHO_IOMMU_TAG_ERRSTS) >> 23UL) {
case 0:
type_str = "Protection Error";
break;
case 1:
type_str = "Invalid Error";
break;
case 2:
type_str = "TimeOut Error";
break;
case 3:
default:
type_str = "ECC Error";
break;
}
printk(KERN_ERR "%s: IOMMU TAG(%d)[error(%s) wr(%d) "
"str(%d) sz(%dK) vpg(%08llx)]\n",
pbm->name, i, type_str,
((tag_val & PSYCHO_IOMMU_TAG_WRITE) ? 1 : 0),
((tag_val & PSYCHO_IOMMU_TAG_STREAM) ? 1 : 0),
((tag_val & PSYCHO_IOMMU_TAG_SIZE) ? 64 : 8),
(tag_val & PSYCHO_IOMMU_TAG_VPAGE) << IOMMU_PAGE_SHIFT);
printk(KERN_ERR "%s: IOMMU DATA(%d)[valid(%d) cache(%d) "
"ppg(%016llx)]\n",
pbm->name, i,
((data_val & PSYCHO_IOMMU_DATA_VALID) ? 1 : 0),
((data_val & PSYCHO_IOMMU_DATA_CACHE) ? 1 : 0),
(data_val & PSYCHO_IOMMU_DATA_PPAGE) << IOMMU_PAGE_SHIFT);
}
}
#define PSYCHO_IOMMU_CTRL_XLTESTAT 0x0000000006000000UL
#define PSYCHO_IOMMU_CTRL_XLTEERR 0x0000000001000000UL
void psycho_check_iommu_error(struct pci_pbm_info *pbm,
unsigned long afsr,
unsigned long afar,
enum psycho_error_type type)
{
u64 control, iommu_tag[16], iommu_data[16];
struct iommu *iommu = pbm->iommu;
unsigned long flags;
spin_lock_irqsave(&iommu->lock, flags);
control = upa_readq(iommu->iommu_control);
if (control & PSYCHO_IOMMU_CTRL_XLTEERR) {
const char *type_str;
control &= ~PSYCHO_IOMMU_CTRL_XLTEERR;
upa_writeq(control, iommu->iommu_control);
switch ((control & PSYCHO_IOMMU_CTRL_XLTESTAT) >> 25UL) {
case 0:
type_str = "Protection Error";
break;
case 1:
type_str = "Invalid Error";
break;
case 2:
type_str = "TimeOut Error";
break;
case 3:
default:
type_str = "ECC Error";
break;
}
printk(KERN_ERR "%s: IOMMU Error, type[%s]\n",
pbm->name, type_str);
/* It is very possible for another DVMA to occur while
* we do this probe, and corrupt the system further.
* But we are so screwed at this point that we are
* likely to crash hard anyways, so get as much
* diagnostic information to the console as we can.
*/
psycho_record_iommu_tags_and_data(pbm, iommu_tag, iommu_data);
psycho_dump_iommu_tags_and_data(pbm, iommu_tag, iommu_data);
}
psycho_check_stc_error(pbm);
spin_unlock_irqrestore(&iommu->lock, flags);
}
#define PSYCHO_PCICTRL_SBH_ERR 0x0000000800000000UL
#define PSYCHO_PCICTRL_SERR 0x0000000400000000UL
static irqreturn_t psycho_pcierr_intr_other(struct pci_pbm_info *pbm)
{
irqreturn_t ret = IRQ_NONE;
u64 csr, csr_error_bits;
u16 stat, *addr;
csr = upa_readq(pbm->pci_csr);
csr_error_bits = csr & (PSYCHO_PCICTRL_SBH_ERR | PSYCHO_PCICTRL_SERR);
if (csr_error_bits) {
/* Clear the errors. */
upa_writeq(csr, pbm->pci_csr);
/* Log 'em. */
if (csr_error_bits & PSYCHO_PCICTRL_SBH_ERR)
printk(KERN_ERR "%s: PCI streaming byte hole "
"error asserted.\n", pbm->name);
if (csr_error_bits & PSYCHO_PCICTRL_SERR)
printk(KERN_ERR "%s: PCI SERR signal asserted.\n",
pbm->name);
ret = IRQ_HANDLED;
}
addr = psycho_pci_config_mkaddr(pbm, pbm->pci_first_busno,
0, PCI_STATUS);
pci_config_read16(addr, &stat);
if (stat & (PCI_STATUS_PARITY |
PCI_STATUS_SIG_TARGET_ABORT |
PCI_STATUS_REC_TARGET_ABORT |
PCI_STATUS_REC_MASTER_ABORT |
PCI_STATUS_SIG_SYSTEM_ERROR)) {
printk(KERN_ERR "%s: PCI bus error, PCI_STATUS[%04x]\n",
pbm->name, stat);
pci_config_write16(addr, 0xffff);
ret = IRQ_HANDLED;
}
return ret;
}
#define PSYCHO_PCIAFSR_PMA 0x8000000000000000ULL
#define PSYCHO_PCIAFSR_PTA 0x4000000000000000ULL
#define PSYCHO_PCIAFSR_PRTRY 0x2000000000000000ULL
#define PSYCHO_PCIAFSR_PPERR 0x1000000000000000ULL
#define PSYCHO_PCIAFSR_SMA 0x0800000000000000ULL
#define PSYCHO_PCIAFSR_STA 0x0400000000000000ULL
#define PSYCHO_PCIAFSR_SRTRY 0x0200000000000000ULL
#define PSYCHO_PCIAFSR_SPERR 0x0100000000000000ULL
#define PSYCHO_PCIAFSR_RESV1 0x00ff000000000000ULL
#define PSYCHO_PCIAFSR_BMSK 0x0000ffff00000000ULL
#define PSYCHO_PCIAFSR_BLK 0x0000000080000000ULL
#define PSYCHO_PCIAFSR_RESV2 0x0000000040000000ULL
#define PSYCHO_PCIAFSR_MID 0x000000003e000000ULL
#define PSYCHO_PCIAFSR_RESV3 0x0000000001ffffffULL
irqreturn_t psycho_pcierr_intr(int irq, void *dev_id)
{
struct pci_pbm_info *pbm = dev_id;
u64 afsr, afar, error_bits;
int reported;
afsr = upa_readq(pbm->pci_afsr);
afar = upa_readq(pbm->pci_afar);
error_bits = afsr &
(PSYCHO_PCIAFSR_PMA | PSYCHO_PCIAFSR_PTA |
PSYCHO_PCIAFSR_PRTRY | PSYCHO_PCIAFSR_PPERR |
PSYCHO_PCIAFSR_SMA | PSYCHO_PCIAFSR_STA |
PSYCHO_PCIAFSR_SRTRY | PSYCHO_PCIAFSR_SPERR);
if (!error_bits)
return psycho_pcierr_intr_other(pbm);
upa_writeq(error_bits, pbm->pci_afsr);
printk(KERN_ERR "%s: PCI Error, primary error type[%s]\n",
pbm->name,
(((error_bits & PSYCHO_PCIAFSR_PMA) ?
"Master Abort" :
((error_bits & PSYCHO_PCIAFSR_PTA) ?
"Target Abort" :
((error_bits & PSYCHO_PCIAFSR_PRTRY) ?
"Excessive Retries" :
((error_bits & PSYCHO_PCIAFSR_PPERR) ?
"Parity Error" : "???"))))));
printk(KERN_ERR "%s: bytemask[%04llx] UPA_MID[%02llx] was_block(%d)\n",
pbm->name,
(afsr & PSYCHO_PCIAFSR_BMSK) >> 32UL,
(afsr & PSYCHO_PCIAFSR_MID) >> 25UL,
(afsr & PSYCHO_PCIAFSR_BLK) ? 1 : 0);
printk(KERN_ERR "%s: PCI AFAR [%016llx]\n", pbm->name, afar);
printk(KERN_ERR "%s: PCI Secondary errors [", pbm->name);
reported = 0;
if (afsr & PSYCHO_PCIAFSR_SMA) {
reported++;
printk("(Master Abort)");
}
if (afsr & PSYCHO_PCIAFSR_STA) {
reported++;
printk("(Target Abort)");
}
if (afsr & PSYCHO_PCIAFSR_SRTRY) {
reported++;
printk("(Excessive Retries)");
}
if (afsr & PSYCHO_PCIAFSR_SPERR) {
reported++;
printk("(Parity Error)");
}
if (!reported)
printk("(none)");
printk("]\n");
if (error_bits & (PSYCHO_PCIAFSR_PTA | PSYCHO_PCIAFSR_STA)) {
psycho_check_iommu_error(pbm, afsr, afar, PCI_ERR);
pci_scan_for_target_abort(pbm, pbm->pci_bus);
}
if (error_bits & (PSYCHO_PCIAFSR_PMA | PSYCHO_PCIAFSR_SMA))
pci_scan_for_master_abort(pbm, pbm->pci_bus);
if (error_bits & (PSYCHO_PCIAFSR_PPERR | PSYCHO_PCIAFSR_SPERR))
pci_scan_for_parity_error(pbm, pbm->pci_bus);
return IRQ_HANDLED;
}
static void psycho_iommu_flush(struct pci_pbm_info *pbm)
{
int i;
for (i = 0; i < 16; i++) {
unsigned long off = i * 8;
upa_writeq(0, pbm->controller_regs + PSYCHO_IOMMU_TAG + off);
upa_writeq(0, pbm->controller_regs + PSYCHO_IOMMU_DATA + off);
}
}
#define PSYCHO_IOMMU_CONTROL 0x0200UL
#define PSYCHO_IOMMU_CTRL_TSBSZ 0x0000000000070000UL
#define PSYCHO_IOMMU_TSBSZ_1K 0x0000000000000000UL
#define PSYCHO_IOMMU_TSBSZ_2K 0x0000000000010000UL
#define PSYCHO_IOMMU_TSBSZ_4K 0x0000000000020000UL
#define PSYCHO_IOMMU_TSBSZ_8K 0x0000000000030000UL
#define PSYCHO_IOMMU_TSBSZ_16K 0x0000000000040000UL
#define PSYCHO_IOMMU_TSBSZ_32K 0x0000000000050000UL
#define PSYCHO_IOMMU_TSBSZ_64K 0x0000000000060000UL
#define PSYCHO_IOMMU_TSBSZ_128K 0x0000000000070000UL
#define PSYCHO_IOMMU_CTRL_TBWSZ 0x0000000000000004UL
#define PSYCHO_IOMMU_CTRL_DENAB 0x0000000000000002UL
#define PSYCHO_IOMMU_CTRL_ENAB 0x0000000000000001UL
#define PSYCHO_IOMMU_FLUSH 0x0210UL
#define PSYCHO_IOMMU_TSBBASE 0x0208UL
int psycho_iommu_init(struct pci_pbm_info *pbm, int tsbsize,
u32 dvma_offset, u32 dma_mask,
unsigned long write_complete_offset)
{
struct iommu *iommu = pbm->iommu;
u64 control;
int err;
iommu->iommu_control = pbm->controller_regs + PSYCHO_IOMMU_CONTROL;
iommu->iommu_tsbbase = pbm->controller_regs + PSYCHO_IOMMU_TSBBASE;
iommu->iommu_flush = pbm->controller_regs + PSYCHO_IOMMU_FLUSH;
iommu->iommu_tags = pbm->controller_regs + PSYCHO_IOMMU_TAG;
iommu->write_complete_reg = (pbm->controller_regs +
write_complete_offset);
iommu->iommu_ctxflush = 0;
control = upa_readq(iommu->iommu_control);
control |= PSYCHO_IOMMU_CTRL_DENAB;
upa_writeq(control, iommu->iommu_control);
psycho_iommu_flush(pbm);
/* Leave diag mode enabled for full-flushing done in pci_iommu.c */
err = iommu_table_init(iommu, tsbsize * 1024 * 8,
dvma_offset, dma_mask, pbm->numa_node);
if (err)
return err;
upa_writeq(__pa(iommu->page_table), iommu->iommu_tsbbase);
control = upa_readq(iommu->iommu_control);
control &= ~(PSYCHO_IOMMU_CTRL_TSBSZ | PSYCHO_IOMMU_CTRL_TBWSZ);
control |= PSYCHO_IOMMU_CTRL_ENAB;
switch (tsbsize) {
case 64:
control |= PSYCHO_IOMMU_TSBSZ_64K;
break;
case 128:
control |= PSYCHO_IOMMU_TSBSZ_128K;
break;
default:
return -EINVAL;
}
upa_writeq(control, iommu->iommu_control);
return 0;
}
void psycho_pbm_init_common(struct pci_pbm_info *pbm, struct platform_device *op,
const char *chip_name, int chip_type)
{
struct device_node *dp = op->dev.of_node;
pbm->name = dp->full_name;
pbm->numa_node = -1;
pbm->chip_type = chip_type;
pbm->chip_version = of_getintprop_default(dp, "version#", 0);
pbm->chip_revision = of_getintprop_default(dp, "module-revision#", 0);
pbm->op = op;
pbm->pci_ops = &sun4u_pci_ops;
pbm->config_space_reg_bits = 8;
pbm->index = pci_num_pbms++;
pci_get_pbm_props(pbm);
pci_determine_mem_io_space(pbm);
printk(KERN_INFO "%s: %s PCI Bus Module ver[%x:%x]\n",
pbm->name, chip_name,
pbm->chip_version, pbm->chip_revision);
}
| gpl-2.0 |
shardul-seth/PAC-man_semc-kernel-msm7x30 | drivers/pnp/isapnp/compat.c | 14711 | 2196 | /*
* compat.c - A series of functions to make it easier to convert drivers that use
* the old isapnp APIs. If possible use the new APIs instead.
*
* Copyright 2002 Adam Belay <ambx1@neo.rr.com>
*/
#include <linux/module.h>
#include <linux/isapnp.h>
#include <linux/string.h>
static void pnp_convert_id(char *buf, unsigned short vendor,
unsigned short device)
{
sprintf(buf, "%c%c%c%x%x%x%x",
'A' + ((vendor >> 2) & 0x3f) - 1,
'A' + (((vendor & 3) << 3) | ((vendor >> 13) & 7)) - 1,
'A' + ((vendor >> 8) & 0x1f) - 1,
(device >> 4) & 0x0f, device & 0x0f,
(device >> 12) & 0x0f, (device >> 8) & 0x0f);
}
struct pnp_card *pnp_find_card(unsigned short vendor, unsigned short device,
struct pnp_card *from)
{
char id[8];
char any[8];
struct list_head *list;
pnp_convert_id(id, vendor, device);
pnp_convert_id(any, ISAPNP_ANY_ID, ISAPNP_ANY_ID);
list = from ? from->global_list.next : pnp_cards.next;
while (list != &pnp_cards) {
struct pnp_card *card = global_to_pnp_card(list);
if (compare_pnp_id(card->id, id) || (memcmp(id, any, 7) == 0))
return card;
list = list->next;
}
return NULL;
}
struct pnp_dev *pnp_find_dev(struct pnp_card *card, unsigned short vendor,
unsigned short function, struct pnp_dev *from)
{
char id[8];
char any[8];
pnp_convert_id(id, vendor, function);
pnp_convert_id(any, ISAPNP_ANY_ID, ISAPNP_ANY_ID);
if (card == NULL) { /* look for a logical device from all cards */
struct list_head *list;
list = pnp_global.next;
if (from)
list = from->global_list.next;
while (list != &pnp_global) {
struct pnp_dev *dev = global_to_pnp_dev(list);
if (compare_pnp_id(dev->id, id) ||
(memcmp(id, any, 7) == 0))
return dev;
list = list->next;
}
} else {
struct list_head *list;
list = card->devices.next;
if (from) {
list = from->card_list.next;
if (from->card != card) /* something is wrong */
return NULL;
}
while (list != &card->devices) {
struct pnp_dev *dev = card_to_pnp_dev(list);
if (compare_pnp_id(dev->id, id))
return dev;
list = list->next;
}
}
return NULL;
}
EXPORT_SYMBOL(pnp_find_card);
EXPORT_SYMBOL(pnp_find_dev);
| gpl-2.0 |
zombah/ubuntu-ac100 | drivers/macintosh/apm_emu.c | 14967 | 3225 | /*
* APM emulation for PMU-based machines
*
* Copyright 2001 Benjamin Herrenschmidt (benh@kernel.crashing.org)
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2, or (at your option) any
* later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
*
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/apm-emulation.h>
#include <linux/adb.h>
#include <linux/pmu.h>
#define APM_CRITICAL 10
#define APM_LOW 30
static void pmu_apm_get_power_status(struct apm_power_info *info)
{
int percentage = -1;
int batteries = 0;
int time_units = -1;
int real_count = 0;
int i;
char charging = 0;
long charge = -1;
long amperage = 0;
unsigned long btype = 0;
info->battery_status = APM_BATTERY_STATUS_UNKNOWN;
info->battery_flag = APM_BATTERY_FLAG_UNKNOWN;
info->units = APM_UNITS_MINS;
if (pmu_power_flags & PMU_PWR_AC_PRESENT)
info->ac_line_status = APM_AC_ONLINE;
else
info->ac_line_status = APM_AC_OFFLINE;
for (i=0; i<pmu_battery_count; i++) {
if (pmu_batteries[i].flags & PMU_BATT_PRESENT) {
batteries++;
if (percentage < 0)
percentage = 0;
if (charge < 0)
charge = 0;
percentage += (pmu_batteries[i].charge * 100) /
pmu_batteries[i].max_charge;
charge += pmu_batteries[i].charge;
amperage += pmu_batteries[i].amperage;
if (btype == 0)
btype = (pmu_batteries[i].flags & PMU_BATT_TYPE_MASK);
real_count++;
if ((pmu_batteries[i].flags & PMU_BATT_CHARGING))
charging++;
}
}
if (batteries == 0)
info->ac_line_status = APM_AC_ONLINE;
if (real_count) {
if (amperage < 0) {
if (btype == PMU_BATT_TYPE_SMART)
time_units = (charge * 59) / (amperage * -1);
else
time_units = (charge * 16440) / (amperage * -60);
}
percentage /= real_count;
if (charging > 0) {
info->battery_status = APM_BATTERY_STATUS_CHARGING;
info->battery_flag = APM_BATTERY_FLAG_CHARGING;
} else if (percentage <= APM_CRITICAL) {
info->battery_status = APM_BATTERY_STATUS_CRITICAL;
info->battery_flag = APM_BATTERY_FLAG_CRITICAL;
} else if (percentage <= APM_LOW) {
info->battery_status = APM_BATTERY_STATUS_LOW;
info->battery_flag = APM_BATTERY_FLAG_LOW;
} else {
info->battery_status = APM_BATTERY_STATUS_HIGH;
info->battery_flag = APM_BATTERY_FLAG_HIGH;
}
}
info->battery_life = percentage;
info->time = time_units;
}
static int __init apm_emu_init(void)
{
apm_get_power_status = pmu_apm_get_power_status;
printk(KERN_INFO "apm_emu: PMU APM Emulation initialized.\n");
return 0;
}
static void __exit apm_emu_exit(void)
{
if (apm_get_power_status == pmu_apm_get_power_status)
apm_get_power_status = NULL;
printk(KERN_INFO "apm_emu: PMU APM Emulation removed.\n");
}
module_init(apm_emu_init);
module_exit(apm_emu_exit);
MODULE_AUTHOR("Benjamin Herrenschmidt");
MODULE_DESCRIPTION("APM emulation for PowerMac");
MODULE_LICENSE("GPL");
| gpl-2.0 |
tommypacker/android_kernel_lge_msm8974 | drivers/macintosh/apm_emu.c | 14967 | 3225 | /*
* APM emulation for PMU-based machines
*
* Copyright 2001 Benjamin Herrenschmidt (benh@kernel.crashing.org)
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2, or (at your option) any
* later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
*
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/apm-emulation.h>
#include <linux/adb.h>
#include <linux/pmu.h>
#define APM_CRITICAL 10
#define APM_LOW 30
static void pmu_apm_get_power_status(struct apm_power_info *info)
{
int percentage = -1;
int batteries = 0;
int time_units = -1;
int real_count = 0;
int i;
char charging = 0;
long charge = -1;
long amperage = 0;
unsigned long btype = 0;
info->battery_status = APM_BATTERY_STATUS_UNKNOWN;
info->battery_flag = APM_BATTERY_FLAG_UNKNOWN;
info->units = APM_UNITS_MINS;
if (pmu_power_flags & PMU_PWR_AC_PRESENT)
info->ac_line_status = APM_AC_ONLINE;
else
info->ac_line_status = APM_AC_OFFLINE;
for (i=0; i<pmu_battery_count; i++) {
if (pmu_batteries[i].flags & PMU_BATT_PRESENT) {
batteries++;
if (percentage < 0)
percentage = 0;
if (charge < 0)
charge = 0;
percentage += (pmu_batteries[i].charge * 100) /
pmu_batteries[i].max_charge;
charge += pmu_batteries[i].charge;
amperage += pmu_batteries[i].amperage;
if (btype == 0)
btype = (pmu_batteries[i].flags & PMU_BATT_TYPE_MASK);
real_count++;
if ((pmu_batteries[i].flags & PMU_BATT_CHARGING))
charging++;
}
}
if (batteries == 0)
info->ac_line_status = APM_AC_ONLINE;
if (real_count) {
if (amperage < 0) {
if (btype == PMU_BATT_TYPE_SMART)
time_units = (charge * 59) / (amperage * -1);
else
time_units = (charge * 16440) / (amperage * -60);
}
percentage /= real_count;
if (charging > 0) {
info->battery_status = APM_BATTERY_STATUS_CHARGING;
info->battery_flag = APM_BATTERY_FLAG_CHARGING;
} else if (percentage <= APM_CRITICAL) {
info->battery_status = APM_BATTERY_STATUS_CRITICAL;
info->battery_flag = APM_BATTERY_FLAG_CRITICAL;
} else if (percentage <= APM_LOW) {
info->battery_status = APM_BATTERY_STATUS_LOW;
info->battery_flag = APM_BATTERY_FLAG_LOW;
} else {
info->battery_status = APM_BATTERY_STATUS_HIGH;
info->battery_flag = APM_BATTERY_FLAG_HIGH;
}
}
info->battery_life = percentage;
info->time = time_units;
}
static int __init apm_emu_init(void)
{
apm_get_power_status = pmu_apm_get_power_status;
printk(KERN_INFO "apm_emu: PMU APM Emulation initialized.\n");
return 0;
}
static void __exit apm_emu_exit(void)
{
if (apm_get_power_status == pmu_apm_get_power_status)
apm_get_power_status = NULL;
printk(KERN_INFO "apm_emu: PMU APM Emulation removed.\n");
}
module_init(apm_emu_init);
module_exit(apm_emu_exit);
MODULE_AUTHOR("Benjamin Herrenschmidt");
MODULE_DESCRIPTION("APM emulation for PowerMac");
MODULE_LICENSE("GPL");
| gpl-2.0 |
RealVNC/android-kernel-msm | drivers/macintosh/apm_emu.c | 14967 | 3225 | /*
* APM emulation for PMU-based machines
*
* Copyright 2001 Benjamin Herrenschmidt (benh@kernel.crashing.org)
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2, or (at your option) any
* later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
*
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/apm-emulation.h>
#include <linux/adb.h>
#include <linux/pmu.h>
#define APM_CRITICAL 10
#define APM_LOW 30
static void pmu_apm_get_power_status(struct apm_power_info *info)
{
int percentage = -1;
int batteries = 0;
int time_units = -1;
int real_count = 0;
int i;
char charging = 0;
long charge = -1;
long amperage = 0;
unsigned long btype = 0;
info->battery_status = APM_BATTERY_STATUS_UNKNOWN;
info->battery_flag = APM_BATTERY_FLAG_UNKNOWN;
info->units = APM_UNITS_MINS;
if (pmu_power_flags & PMU_PWR_AC_PRESENT)
info->ac_line_status = APM_AC_ONLINE;
else
info->ac_line_status = APM_AC_OFFLINE;
for (i=0; i<pmu_battery_count; i++) {
if (pmu_batteries[i].flags & PMU_BATT_PRESENT) {
batteries++;
if (percentage < 0)
percentage = 0;
if (charge < 0)
charge = 0;
percentage += (pmu_batteries[i].charge * 100) /
pmu_batteries[i].max_charge;
charge += pmu_batteries[i].charge;
amperage += pmu_batteries[i].amperage;
if (btype == 0)
btype = (pmu_batteries[i].flags & PMU_BATT_TYPE_MASK);
real_count++;
if ((pmu_batteries[i].flags & PMU_BATT_CHARGING))
charging++;
}
}
if (batteries == 0)
info->ac_line_status = APM_AC_ONLINE;
if (real_count) {
if (amperage < 0) {
if (btype == PMU_BATT_TYPE_SMART)
time_units = (charge * 59) / (amperage * -1);
else
time_units = (charge * 16440) / (amperage * -60);
}
percentage /= real_count;
if (charging > 0) {
info->battery_status = APM_BATTERY_STATUS_CHARGING;
info->battery_flag = APM_BATTERY_FLAG_CHARGING;
} else if (percentage <= APM_CRITICAL) {
info->battery_status = APM_BATTERY_STATUS_CRITICAL;
info->battery_flag = APM_BATTERY_FLAG_CRITICAL;
} else if (percentage <= APM_LOW) {
info->battery_status = APM_BATTERY_STATUS_LOW;
info->battery_flag = APM_BATTERY_FLAG_LOW;
} else {
info->battery_status = APM_BATTERY_STATUS_HIGH;
info->battery_flag = APM_BATTERY_FLAG_HIGH;
}
}
info->battery_life = percentage;
info->time = time_units;
}
static int __init apm_emu_init(void)
{
apm_get_power_status = pmu_apm_get_power_status;
printk(KERN_INFO "apm_emu: PMU APM Emulation initialized.\n");
return 0;
}
static void __exit apm_emu_exit(void)
{
if (apm_get_power_status == pmu_apm_get_power_status)
apm_get_power_status = NULL;
printk(KERN_INFO "apm_emu: PMU APM Emulation removed.\n");
}
module_init(apm_emu_init);
module_exit(apm_emu_exit);
MODULE_AUTHOR("Benjamin Herrenschmidt");
MODULE_DESCRIPTION("APM emulation for PowerMac");
MODULE_LICENSE("GPL");
| gpl-2.0 |
Quintin-Z/rt-thread | bsp/imx6sx/iMX6_Platform_SDK/board/mx6sdl/smart_device/rev_a_iomux/i2c1_iomux_config.c | 120 | 9726 | /*
* Copyright (c) 2012, Freescale Semiconductor, Inc.
* All rights reserved.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
* ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
// File: i2c1_iomux_config.c
/* ------------------------------------------------------------------------------
* <auto-generated>
* This code was generated by a tool.
* Runtime Version:3.4.0.0
*
* Changes to this file may cause incorrect behavior and will be lost if
* the code is regenerated.
* </auto-generated>
* ------------------------------------------------------------------------------
*/
#include "iomux_config.h"
#include "registers/regsiomuxc.h"
// Function to configure IOMUXC for i2c1 module.
void i2c1_iomux_config(void)
{
// Config i2c1.I2C1_SCL to pad CSI0_DATA09(N5)
// HW_IOMUXC_SW_MUX_CTL_PAD_CSI0_DATA09_WR(0x00000014);
// HW_IOMUXC_SW_PAD_CTL_PAD_CSI0_DATA09_WR(0x0001B0B0);
// HW_IOMUXC_I2C1_SCL_IN_SELECT_INPUT_WR(0x00000000);
// Mux Register:
// IOMUXC_SW_MUX_CTL_PAD_CSI0_DATA09(0x020E0088)
// SION [4] - Software Input On Field Reset: DISABLED
// Force the selected mux mode Input path no matter of MUX_MODE functionality.
// DISABLED (0) - Input Path is determined by functionality of the selected mux mode (regular).
// ENABLED (1) - Force input path of pad.
// MUX_MODE [2:0] - MUX Mode Select Field Reset: ALT5
// Select iomux modes to be used for pad.
// ALT0 (0) - Select instance: ipu1 signal: IPU1_CSI0_DATA09
// ALT1 (1) - Select instance: eim signal: EIM_DATA07
// ALT2 (2) - Select instance: ecspi2 signal: ECSPI2_MOSI
// ALT3 (3) - Select instance: kpp signal: KEY_ROW7
// ALT4 (4) - Select instance: i2c1 signal: I2C1_SCL
// ALT5 (5) - Select instance: gpio5 signal: GPIO5_IO27
// ALT7 (7) - Select instance: arm signal: ARM_TRACE06
HW_IOMUXC_SW_MUX_CTL_PAD_CSI0_DATA09_WR(
BF_IOMUXC_SW_MUX_CTL_PAD_CSI0_DATA09_SION_V(ENABLED) |
BF_IOMUXC_SW_MUX_CTL_PAD_CSI0_DATA09_MUX_MODE_V(ALT4));
// Pad Control Register:
// IOMUXC_SW_PAD_CTL_PAD_CSI0_DATA09(0x020E039C)
// HYS [16] - Hysteresis Enable Field Reset: ENABLED
// DISABLED (0) - CMOS input
// ENABLED (1) - Schmitt trigger input
// PUS [15:14] - Pull Up / Down Config. Field Reset: 100K_OHM_PU
// 100K_OHM_PD (0) - 100K Ohm Pull Down
// 47K_OHM_PU (1) - 47K Ohm Pull Up
// 100K_OHM_PU (2) - 100K Ohm Pull Up
// 22K_OHM_PU (3) - 22K Ohm Pull Up
// PUE [13] - Pull / Keep Select Field Reset: PULL
// KEEP (0) - Keeper Enabled
// PULL (1) - Pull Enabled
// PKE [12] - Pull / Keep Enable Field Reset: ENABLED
// DISABLED (0) - Pull/Keeper Disabled
// ENABLED (1) - Pull/Keeper Enabled
// ODE [11] - Open Drain Enable Field Reset: DISABLED
// Enables open drain of the pin.
// DISABLED (0) - Output is CMOS.
// ENABLED (1) - Output is Open Drain.
// SPEED [7:6] - Speed Field Reset: 100MHZ
// RESERVED0 (0) - Reserved
// 50MHZ (1) - Low (50 MHz)
// 100MHZ (2) - Medium (100 MHz)
// 200MHZ (3) - Maximum (200 MHz)
// DSE [5:3] - Drive Strength Field Reset: 40_OHM
// HIZ (0) - HI-Z
// 240_OHM (1) - 240 Ohm
// 120_OHM (2) - 120 Ohm
// 80_OHM (3) - 80 Ohm
// 60_OHM (4) - 60 Ohm
// 48_OHM (5) - 48 Ohm
// 40_OHM (6) - 40 Ohm
// 34_OHM (7) - 34 Ohm
// SRE [0] - Slew Rate Field Reset: SLOW
// Slew rate control.
// SLOW (0) - Slow Slew Rate
// FAST (1) - Fast Slew Rate
HW_IOMUXC_SW_PAD_CTL_PAD_CSI0_DATA09_WR(
BF_IOMUXC_SW_PAD_CTL_PAD_CSI0_DATA09_HYS_V(ENABLED) |
BF_IOMUXC_SW_PAD_CTL_PAD_CSI0_DATA09_PUS_V(100K_OHM_PU) |
BF_IOMUXC_SW_PAD_CTL_PAD_CSI0_DATA09_PUE_V(PULL) |
BF_IOMUXC_SW_PAD_CTL_PAD_CSI0_DATA09_PKE_V(ENABLED) |
BF_IOMUXC_SW_PAD_CTL_PAD_CSI0_DATA09_ODE_V(DISABLED) |
BF_IOMUXC_SW_PAD_CTL_PAD_CSI0_DATA09_SPEED_V(100MHZ) |
BF_IOMUXC_SW_PAD_CTL_PAD_CSI0_DATA09_DSE_V(40_OHM) |
BF_IOMUXC_SW_PAD_CTL_PAD_CSI0_DATA09_SRE_V(SLOW));
// Pad CSI0_DATA09 is involved in Daisy Chain.
// Input Select Register:
// IOMUXC_I2C1_SCL_IN_SELECT_INPUT(0x020E0868)
// DAISY [0] - MUX Mode Select Field Reset: CSI0_DATA09_ALT4
// Selecting Pads Involved in Daisy Chain.
// CSI0_DATA09_ALT4 (0) - Select signal i2c1 I2C1_SCL as input from pad CSI0_DATA09(ALT4).
// EIM_DATA21_ALT6 (1) - Select signal i2c1 I2C1_SCL as input from pad EIM_DATA21(ALT6).
HW_IOMUXC_I2C1_SCL_IN_SELECT_INPUT_WR(
BF_IOMUXC_I2C1_SCL_IN_SELECT_INPUT_DAISY_V(CSI0_DATA09_ALT4));
// Config i2c1.I2C1_SDA to pad CSI0_DATA08(N6)
// HW_IOMUXC_SW_MUX_CTL_PAD_CSI0_DATA08_WR(0x00000014);
// HW_IOMUXC_SW_PAD_CTL_PAD_CSI0_DATA08_WR(0x0001B0B0);
// HW_IOMUXC_I2C1_SDA_IN_SELECT_INPUT_WR(0x00000000);
// Mux Register:
// IOMUXC_SW_MUX_CTL_PAD_CSI0_DATA08(0x020E0084)
// SION [4] - Software Input On Field Reset: DISABLED
// Force the selected mux mode Input path no matter of MUX_MODE functionality.
// DISABLED (0) - Input Path is determined by functionality of the selected mux mode (regular).
// ENABLED (1) - Force input path of pad.
// MUX_MODE [2:0] - MUX Mode Select Field Reset: ALT5
// Select iomux modes to be used for pad.
// ALT0 (0) - Select instance: ipu1 signal: IPU1_CSI0_DATA08
// ALT1 (1) - Select instance: eim signal: EIM_DATA06
// ALT2 (2) - Select instance: ecspi2 signal: ECSPI2_SCLK
// ALT3 (3) - Select instance: kpp signal: KEY_COL7
// ALT4 (4) - Select instance: i2c1 signal: I2C1_SDA
// ALT5 (5) - Select instance: gpio5 signal: GPIO5_IO26
// ALT7 (7) - Select instance: arm signal: ARM_TRACE05
HW_IOMUXC_SW_MUX_CTL_PAD_CSI0_DATA08_WR(
BF_IOMUXC_SW_MUX_CTL_PAD_CSI0_DATA08_SION_V(ENABLED) |
BF_IOMUXC_SW_MUX_CTL_PAD_CSI0_DATA08_MUX_MODE_V(ALT4));
// Pad Control Register:
// IOMUXC_SW_PAD_CTL_PAD_CSI0_DATA08(0x020E0398)
// HYS [16] - Hysteresis Enable Field Reset: ENABLED
// DISABLED (0) - CMOS input
// ENABLED (1) - Schmitt trigger input
// PUS [15:14] - Pull Up / Down Config. Field Reset: 100K_OHM_PU
// 100K_OHM_PD (0) - 100K Ohm Pull Down
// 47K_OHM_PU (1) - 47K Ohm Pull Up
// 100K_OHM_PU (2) - 100K Ohm Pull Up
// 22K_OHM_PU (3) - 22K Ohm Pull Up
// PUE [13] - Pull / Keep Select Field Reset: PULL
// KEEP (0) - Keeper Enabled
// PULL (1) - Pull Enabled
// PKE [12] - Pull / Keep Enable Field Reset: ENABLED
// DISABLED (0) - Pull/Keeper Disabled
// ENABLED (1) - Pull/Keeper Enabled
// ODE [11] - Open Drain Enable Field Reset: DISABLED
// Enables open drain of the pin.
// DISABLED (0) - Output is CMOS.
// ENABLED (1) - Output is Open Drain.
// SPEED [7:6] - Speed Field Reset: 100MHZ
// RESERVED0 (0) - Reserved
// 50MHZ (1) - Low (50 MHz)
// 100MHZ (2) - Medium (100 MHz)
// 200MHZ (3) - Maximum (200 MHz)
// DSE [5:3] - Drive Strength Field Reset: 40_OHM
// HIZ (0) - HI-Z
// 240_OHM (1) - 240 Ohm
// 120_OHM (2) - 120 Ohm
// 80_OHM (3) - 80 Ohm
// 60_OHM (4) - 60 Ohm
// 48_OHM (5) - 48 Ohm
// 40_OHM (6) - 40 Ohm
// 34_OHM (7) - 34 Ohm
// SRE [0] - Slew Rate Field Reset: SLOW
// Slew rate control.
// SLOW (0) - Slow Slew Rate
// FAST (1) - Fast Slew Rate
HW_IOMUXC_SW_PAD_CTL_PAD_CSI0_DATA08_WR(
BF_IOMUXC_SW_PAD_CTL_PAD_CSI0_DATA08_HYS_V(ENABLED) |
BF_IOMUXC_SW_PAD_CTL_PAD_CSI0_DATA08_PUS_V(100K_OHM_PU) |
BF_IOMUXC_SW_PAD_CTL_PAD_CSI0_DATA08_PUE_V(PULL) |
BF_IOMUXC_SW_PAD_CTL_PAD_CSI0_DATA08_PKE_V(ENABLED) |
BF_IOMUXC_SW_PAD_CTL_PAD_CSI0_DATA08_ODE_V(DISABLED) |
BF_IOMUXC_SW_PAD_CTL_PAD_CSI0_DATA08_SPEED_V(100MHZ) |
BF_IOMUXC_SW_PAD_CTL_PAD_CSI0_DATA08_DSE_V(40_OHM) |
BF_IOMUXC_SW_PAD_CTL_PAD_CSI0_DATA08_SRE_V(SLOW));
// Pad CSI0_DATA08 is involved in Daisy Chain.
// Input Select Register:
// IOMUXC_I2C1_SDA_IN_SELECT_INPUT(0x020E086C)
// DAISY [0] - MUX Mode Select Field Reset: CSI0_DATA08_ALT4
// Selecting Pads Involved in Daisy Chain.
// CSI0_DATA08_ALT4 (0) - Select signal i2c1 I2C1_SDA as input from pad CSI0_DATA08(ALT4).
// EIM_DATA28_ALT1 (1) - Select signal i2c1 I2C1_SDA as input from pad EIM_DATA28(ALT1).
HW_IOMUXC_I2C1_SDA_IN_SELECT_INPUT_WR(
BF_IOMUXC_I2C1_SDA_IN_SELECT_INPUT_DAISY_V(CSI0_DATA08_ALT4));
}
| gpl-2.0 |
alehkot/tsp | node_modules/node-ffi/deps/pthreads-win32/tests/exit5.c | 120 | 4803 | /*
* File: exit5.c
*
*
* --------------------------------------------------------------------------
*
* Pthreads-win32 - POSIX Threads Library for Win32
* Copyright(C) 1998 John E. Bossom
* Copyright(C) 1999,2005 Pthreads-win32 contributors
*
* Contact Email: rpj@callisto.canberra.edu.au
*
* The current list of contributors is contained
* in the file CONTRIBUTORS included with the source
* code distribution. The list can also be seen at the
* following World Wide Web location:
* http://sources.redhat.com/pthreads-win32/contributors.html
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library in the file COPYING.LIB;
* if not, write to the Free Software Foundation, Inc.,
* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
*
* --------------------------------------------------------------------------
*
* Test Synopsis: Test calling pthread_exit from a Win32 thread
* having created an implicit POSIX handle for it.
*
* Test Method (Validation or Falsification):
* - Validate return value and that POSIX handle is created and destroyed.
*
* Requirements Tested:
* -
*
* Features Tested:
* -
*
* Cases Tested:
* -
*
* Description:
* -
*
* Environment:
* -
*
* Input:
* - None.
*
* Output:
* - File name, Line number, and failed expression on failure.
* - No output on success.
*
* Assumptions:
* - have working pthread_create, pthread_self, pthread_mutex_lock/unlock
* pthread_testcancel, pthread_cancel
*
* Pass Criteria:
* - Process returns zero exit status.
*
* Fail Criteria:
* - Process returns non-zero exit status.
*/
#include "test.h"
#ifndef _UWIN
#include <process.h>
#endif
/*
* Create NUMTHREADS threads in addition to the Main thread.
*/
enum {
NUMTHREADS = 4
};
typedef struct bag_t_ bag_t;
struct bag_t_ {
int threadnum;
int started;
/* Add more per-thread state variables here */
int count;
pthread_t self;
};
static bag_t threadbag[NUMTHREADS + 1];
#if ! defined (__MINGW32__) || defined (__MSVCRT__)
unsigned __stdcall
#else
void
#endif
Win32thread(void * arg)
{
int result = 1;
bag_t * bag = (bag_t *) arg;
assert(bag == &threadbag[bag->threadnum]);
assert(bag->started == 0);
bag->started = 1;
assert((bag->self = pthread_self()).p != NULL);
assert(pthread_kill(bag->self, 0) == 0);
/*
* Doesn't return.
*/
pthread_exit((void *)(size_t)result);
return 0;
}
int
main()
{
int failed = 0;
int i;
HANDLE h[NUMTHREADS + 1];
unsigned thrAddr; /* Dummy variable to pass a valid location to _beginthreadex (Win98). */
for (i = 1; i <= NUMTHREADS; i++)
{
threadbag[i].started = 0;
threadbag[i].threadnum = i;
#if ! defined (__MINGW32__) || defined (__MSVCRT__)
h[i] = (HANDLE) _beginthreadex(NULL, 0, Win32thread, (void *) &threadbag[i], 0, &thrAddr);
#else
h[i] = (HANDLE) _beginthread(Win32thread, 0, (void *) &threadbag[i]);
#endif
}
/*
* Code to control or munipulate child threads should probably go here.
*/
Sleep(500);
/*
* Give threads time to run.
*/
Sleep(NUMTHREADS * 100);
/*
* Standard check that all threads started.
*/
for (i = 1; i <= NUMTHREADS; i++)
{
if (!threadbag[i].started)
{
failed |= !threadbag[i].started;
fprintf(stderr, "Thread %d: started %d\n", i, threadbag[i].started);
}
}
assert(!failed);
/*
* Check any results here. Set "failed" and only print output on failure.
*/
failed = 0;
for (i = 1; i <= NUMTHREADS; i++)
{
int fail = 0;
int result = 0;
#if ! defined (__MINGW32__) || defined (__MSVCRT__)
assert(GetExitCodeThread(h[i], (LPDWORD) &result) == TRUE);
#else
/*
* Can't get a result code.
*/
result = 1;
#endif
assert(threadbag[i].self.p != NULL && pthread_kill(threadbag[i].self, 0) == ESRCH);
fail = (result != 1);
if (fail)
{
fprintf(stderr, "Thread %d: started %d: count %d\n",
i,
threadbag[i].started,
threadbag[i].count);
}
failed = (failed || fail);
}
assert(!failed);
/*
* Success.
*/
return 0;
}
| gpl-2.0 |
wyldstallyns/Roughneck_kernel_m8-GPE-5.1 | drivers/misc/vib-triggers.c | 120 | 5607 | /*
* Vibrator Triggers Core
*
* Copyright (C) 2013 HTC Corporation.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/rwsem.h>
#include <linux/slab.h>
#include <linux/vibtrig.h>
/*
* Singleton of vib_trigger, and a lock to protect this object
*/
static DECLARE_RWSEM(triggers_container_lock);
static struct vib_trigger* trigger_singleton = NULL;
/*
* Singleton of vib_trigger_enabler, and a lock to protect this object
*/
static DECLARE_RWSEM(enabler_container_lock);
static struct vib_trigger_enabler* enabler_singleton = NULL;
/* Simple Vibrator Trigger Interface */
void vib_trigger_enabler_register(struct vib_trigger_enabler *enabler)
{
int err;
init_rwsem(&enabler->trigger_lock);
/* the default place to assign enabler_singleton */
down_write(&enabler_container_lock);
if (enabler_singleton != NULL) {
err = -EEXIST;
} else {
enabler_singleton = enabler;
vib_trigger_set_default(enabler);
err = 0;
}
up_write(&enabler_container_lock);
printk(KERN_DEBUG
"Registered vibrator trigger enabler: %s\n",
enabler->name);
}
EXPORT_SYMBOL_GPL(vib_trigger_enabler_register);
void vib_trigger_enabler_unregister(struct vib_trigger_enabler *enabler)
{
down_write(&enabler->trigger_lock);
if (enabler->trigger)
vib_trigger_set(enabler, NULL);
up_write(&enabler->trigger_lock);
/* Stop any motion */
enabler->enable(enabler, 0);
down_write(&enabler_container_lock);
enabler_singleton = NULL;
up_write(&enabler_container_lock);
}
EXPORT_SYMBOL_GPL(vib_trigger_enabler_unregister);
/* Caller must ensure enabler->trigger_lock held */
void vib_trigger_set(struct vib_trigger_enabler *enabler, struct vib_trigger *trigger)
{
unsigned long flags;
/* Remove any existing trigger */
if (enabler->trigger) {
write_lock_irqsave(&enabler->trigger->trig_container_lock, flags);
enabler->trigger->enabler = NULL;
write_unlock_irqrestore(&enabler->trigger->trig_container_lock, flags);
enabler->trigger = NULL;
enabler->enable(enabler, 0);
}
/* Bind trigger/enabler */
if (trigger) {
write_lock_irqsave(&trigger->trig_container_lock, flags);
trigger->enabler = enabler;
write_unlock_irqrestore(&trigger->trig_container_lock, flags);
enabler->trigger = trigger;
}
}
EXPORT_SYMBOL_GPL(vib_trigger_set);
void vib_trigger_set_default(struct vib_trigger_enabler *enabler)
{
if (!enabler->default_trigger)
return;
down_read(&triggers_container_lock);
down_write(&enabler->trigger_lock);
if (trigger_singleton != NULL)
if (!strcmp(enabler->default_trigger, trigger_singleton->name)) {
/* name matched */
vib_trigger_set(enabler, trigger_singleton);
}
up_write(&enabler->trigger_lock);
up_read(&triggers_container_lock);
}
EXPORT_SYMBOL_GPL(vib_trigger_set_default);
void vib_trigger_event(struct vib_trigger *trigger, int value)
{
if (!trigger)
return;
read_lock(&trigger->trig_container_lock);
if (trigger->enabler)
trigger->enabler->enable(trigger->enabler, value);
read_unlock(&trigger->trig_container_lock);
}
EXPORT_SYMBOL_GPL(vib_trigger_event);
void vib_trigger_register_simple(const char *name, struct vib_trigger **tp)
{
struct vib_trigger *trigger;
int err;
trigger = kzalloc(sizeof(struct vib_trigger), GFP_KERNEL);
if (trigger) {
trigger->name = name;
rwlock_init(&trigger->trig_container_lock);
trigger->enabler = NULL;
/* the only place to assign trigger_singleton */
down_write(&triggers_container_lock);
if (trigger_singleton != NULL) {
err = -EEXIST;
} else {
trigger_singleton = trigger;
err = 0;
}
up_write(&triggers_container_lock);
if (err == 0) {
/* Register with any vibrator trigger enabler that has this as a default trigger */
down_read(&enabler_container_lock);
if (enabler_singleton != NULL) {
down_write(&enabler_singleton->trigger_lock);
if (!enabler_singleton->trigger && enabler_singleton->default_trigger &&
!strcmp(enabler_singleton->default_trigger, trigger->name))
vib_trigger_set(enabler_singleton, trigger);
up_write(&enabler_singleton->trigger_lock);
}
up_read(&enabler_container_lock);
}
if (err < 0) {
kfree(trigger);
trigger = NULL;
printk(KERN_WARNING
"Vibrator trigger %s failed to register"
" (%d)\n", name, err);
}
} else
printk(KERN_WARNING
"Vibrator trigger %s failed to register"
" (no memory)\n", name);
*tp = trigger;
}
EXPORT_SYMBOL_GPL(vib_trigger_register_simple);
void vib_trigger_unregister_simple(struct vib_trigger *trigger)
{
if (trigger)
{
struct vib_trigger_enabler* enabler;
/* Remove from the Singleton of vib triggers */
down_write(&triggers_container_lock);
trigger_singleton = NULL;
up_write(&triggers_container_lock);
/* Remove anyone actively using this trigger */
enabler = trigger->enabler;
if (enabler) {
down_write(&enabler->trigger_lock);
if (enabler->trigger == trigger)
enabler->trigger = NULL;
up_write(&enabler->trigger_lock);
}
}
kfree(trigger);
}
EXPORT_SYMBOL_GPL(vib_trigger_unregister_simple);
MODULE_AUTHOR("HTC");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Vibrator Triggers Core");
| gpl-2.0 |
mayli/wrapfs-latest | net/sched/cls_u32.c | 120 | 17817 | /*
* net/sched/cls_u32.c Ugly (or Universal) 32bit key Packet Classifier.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
* Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
*
* The filters are packed to hash tables of key nodes
* with a set of 32bit key/mask pairs at every node.
* Nodes reference next level hash tables etc.
*
* This scheme is the best universal classifier I managed to
* invent; it is not super-fast, but it is not slow (provided you
* program it correctly), and general enough. And its relative
* speed grows as the number of rules becomes larger.
*
* It seems that it represents the best middle point between
* speed and manageability both by human and by machine.
*
* It is especially useful for link sharing combined with QoS;
* pure RSVP doesn't need such a general approach and can use
* much simpler (and faster) schemes, sort of cls_rsvp.c.
*
* JHS: We should remove the CONFIG_NET_CLS_IND from here
* eventually when the meta match extension is made available
*
* nfmark match added by Catalin(ux aka Dino) BOIE <catab at umbrella.ro>
*/
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/rtnetlink.h>
#include <linux/skbuff.h>
#include <linux/bitmap.h>
#include <net/netlink.h>
#include <net/act_api.h>
#include <net/pkt_cls.h>
struct tc_u_knode {
struct tc_u_knode *next;
u32 handle;
struct tc_u_hnode *ht_up;
struct tcf_exts exts;
#ifdef CONFIG_NET_CLS_IND
int ifindex;
#endif
u8 fshift;
struct tcf_result res;
struct tc_u_hnode *ht_down;
#ifdef CONFIG_CLS_U32_PERF
struct tc_u32_pcnt *pf;
#endif
#ifdef CONFIG_CLS_U32_MARK
struct tc_u32_mark mark;
#endif
struct tc_u32_sel sel;
};
struct tc_u_hnode {
struct tc_u_hnode *next;
u32 handle;
u32 prio;
struct tc_u_common *tp_c;
int refcnt;
unsigned int divisor;
struct tc_u_knode *ht[1];
};
struct tc_u_common {
struct tc_u_hnode *hlist;
struct Qdisc *q;
int refcnt;
u32 hgenerator;
};
static inline unsigned int u32_hash_fold(__be32 key,
const struct tc_u32_sel *sel,
u8 fshift)
{
unsigned int h = ntohl(key & sel->hmask) >> fshift;
return h;
}
static int u32_classify(struct sk_buff *skb, const struct tcf_proto *tp, struct tcf_result *res)
{
struct {
struct tc_u_knode *knode;
unsigned int off;
} stack[TC_U32_MAXDEPTH];
struct tc_u_hnode *ht = tp->root;
unsigned int off = skb_network_offset(skb);
struct tc_u_knode *n;
int sdepth = 0;
int off2 = 0;
int sel = 0;
#ifdef CONFIG_CLS_U32_PERF
int j;
#endif
int i, r;
next_ht:
n = ht->ht[sel];
next_knode:
if (n) {
struct tc_u32_key *key = n->sel.keys;
#ifdef CONFIG_CLS_U32_PERF
n->pf->rcnt += 1;
j = 0;
#endif
#ifdef CONFIG_CLS_U32_MARK
if ((skb->mark & n->mark.mask) != n->mark.val) {
n = n->next;
goto next_knode;
} else {
n->mark.success++;
}
#endif
for (i = n->sel.nkeys; i > 0; i--, key++) {
int toff = off + key->off + (off2 & key->offmask);
__be32 *data, hdata;
if (skb_headroom(skb) + toff > INT_MAX)
goto out;
data = skb_header_pointer(skb, toff, 4, &hdata);
if (!data)
goto out;
if ((*data ^ key->val) & key->mask) {
n = n->next;
goto next_knode;
}
#ifdef CONFIG_CLS_U32_PERF
n->pf->kcnts[j] += 1;
j++;
#endif
}
if (n->ht_down == NULL) {
check_terminal:
if (n->sel.flags & TC_U32_TERMINAL) {
*res = n->res;
#ifdef CONFIG_NET_CLS_IND
if (!tcf_match_indev(skb, n->ifindex)) {
n = n->next;
goto next_knode;
}
#endif
#ifdef CONFIG_CLS_U32_PERF
n->pf->rhit += 1;
#endif
r = tcf_exts_exec(skb, &n->exts, res);
if (r < 0) {
n = n->next;
goto next_knode;
}
return r;
}
n = n->next;
goto next_knode;
}
/* PUSH */
if (sdepth >= TC_U32_MAXDEPTH)
goto deadloop;
stack[sdepth].knode = n;
stack[sdepth].off = off;
sdepth++;
ht = n->ht_down;
sel = 0;
if (ht->divisor) {
__be32 *data, hdata;
data = skb_header_pointer(skb, off + n->sel.hoff, 4,
&hdata);
if (!data)
goto out;
sel = ht->divisor & u32_hash_fold(*data, &n->sel,
n->fshift);
}
if (!(n->sel.flags & (TC_U32_VAROFFSET | TC_U32_OFFSET | TC_U32_EAT)))
goto next_ht;
if (n->sel.flags & (TC_U32_OFFSET | TC_U32_VAROFFSET)) {
off2 = n->sel.off + 3;
if (n->sel.flags & TC_U32_VAROFFSET) {
__be16 *data, hdata;
data = skb_header_pointer(skb,
off + n->sel.offoff,
2, &hdata);
if (!data)
goto out;
off2 += ntohs(n->sel.offmask & *data) >>
n->sel.offshift;
}
off2 &= ~3;
}
if (n->sel.flags & TC_U32_EAT) {
off += off2;
off2 = 0;
}
if (off < skb->len)
goto next_ht;
}
/* POP */
if (sdepth--) {
n = stack[sdepth].knode;
ht = n->ht_up;
off = stack[sdepth].off;
goto check_terminal;
}
out:
return -1;
deadloop:
net_warn_ratelimited("cls_u32: dead loop\n");
return -1;
}
static struct tc_u_hnode *
u32_lookup_ht(struct tc_u_common *tp_c, u32 handle)
{
struct tc_u_hnode *ht;
for (ht = tp_c->hlist; ht; ht = ht->next)
if (ht->handle == handle)
break;
return ht;
}
static struct tc_u_knode *
u32_lookup_key(struct tc_u_hnode *ht, u32 handle)
{
unsigned int sel;
struct tc_u_knode *n = NULL;
sel = TC_U32_HASH(handle);
if (sel > ht->divisor)
goto out;
for (n = ht->ht[sel]; n; n = n->next)
if (n->handle == handle)
break;
out:
return n;
}
static unsigned long u32_get(struct tcf_proto *tp, u32 handle)
{
struct tc_u_hnode *ht;
struct tc_u_common *tp_c = tp->data;
if (TC_U32_HTID(handle) == TC_U32_ROOT)
ht = tp->root;
else
ht = u32_lookup_ht(tp_c, TC_U32_HTID(handle));
if (!ht)
return 0;
if (TC_U32_KEY(handle) == 0)
return (unsigned long)ht;
return (unsigned long)u32_lookup_key(ht, handle);
}
static void u32_put(struct tcf_proto *tp, unsigned long f)
{
}
static u32 gen_new_htid(struct tc_u_common *tp_c)
{
int i = 0x800;
do {
if (++tp_c->hgenerator == 0x7FF)
tp_c->hgenerator = 1;
} while (--i > 0 && u32_lookup_ht(tp_c, (tp_c->hgenerator|0x800)<<20));
return i > 0 ? (tp_c->hgenerator|0x800)<<20 : 0;
}
static int u32_init(struct tcf_proto *tp)
{
struct tc_u_hnode *root_ht;
struct tc_u_common *tp_c;
tp_c = tp->q->u32_node;
root_ht = kzalloc(sizeof(*root_ht), GFP_KERNEL);
if (root_ht == NULL)
return -ENOBUFS;
root_ht->divisor = 0;
root_ht->refcnt++;
root_ht->handle = tp_c ? gen_new_htid(tp_c) : 0x80000000;
root_ht->prio = tp->prio;
if (tp_c == NULL) {
tp_c = kzalloc(sizeof(*tp_c), GFP_KERNEL);
if (tp_c == NULL) {
kfree(root_ht);
return -ENOBUFS;
}
tp_c->q = tp->q;
tp->q->u32_node = tp_c;
}
tp_c->refcnt++;
root_ht->next = tp_c->hlist;
tp_c->hlist = root_ht;
root_ht->tp_c = tp_c;
tp->root = root_ht;
tp->data = tp_c;
return 0;
}
static int u32_destroy_key(struct tcf_proto *tp, struct tc_u_knode *n)
{
tcf_unbind_filter(tp, &n->res);
tcf_exts_destroy(tp, &n->exts);
if (n->ht_down)
n->ht_down->refcnt--;
#ifdef CONFIG_CLS_U32_PERF
kfree(n->pf);
#endif
kfree(n);
return 0;
}
static int u32_delete_key(struct tcf_proto *tp, struct tc_u_knode *key)
{
struct tc_u_knode **kp;
struct tc_u_hnode *ht = key->ht_up;
if (ht) {
for (kp = &ht->ht[TC_U32_HASH(key->handle)]; *kp; kp = &(*kp)->next) {
if (*kp == key) {
tcf_tree_lock(tp);
*kp = key->next;
tcf_tree_unlock(tp);
u32_destroy_key(tp, key);
return 0;
}
}
}
WARN_ON(1);
return 0;
}
static void u32_clear_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht)
{
struct tc_u_knode *n;
unsigned int h;
for (h = 0; h <= ht->divisor; h++) {
while ((n = ht->ht[h]) != NULL) {
ht->ht[h] = n->next;
u32_destroy_key(tp, n);
}
}
}
static int u32_destroy_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht)
{
struct tc_u_common *tp_c = tp->data;
struct tc_u_hnode **hn;
WARN_ON(ht->refcnt);
u32_clear_hnode(tp, ht);
for (hn = &tp_c->hlist; *hn; hn = &(*hn)->next) {
if (*hn == ht) {
*hn = ht->next;
kfree(ht);
return 0;
}
}
WARN_ON(1);
return -ENOENT;
}
static void u32_destroy(struct tcf_proto *tp)
{
struct tc_u_common *tp_c = tp->data;
struct tc_u_hnode *root_ht = tp->root;
WARN_ON(root_ht == NULL);
if (root_ht && --root_ht->refcnt == 0)
u32_destroy_hnode(tp, root_ht);
if (--tp_c->refcnt == 0) {
struct tc_u_hnode *ht;
tp->q->u32_node = NULL;
for (ht = tp_c->hlist; ht; ht = ht->next) {
ht->refcnt--;
u32_clear_hnode(tp, ht);
}
while ((ht = tp_c->hlist) != NULL) {
tp_c->hlist = ht->next;
WARN_ON(ht->refcnt != 0);
kfree(ht);
}
kfree(tp_c);
}
tp->data = NULL;
}
static int u32_delete(struct tcf_proto *tp, unsigned long arg)
{
struct tc_u_hnode *ht = (struct tc_u_hnode *)arg;
if (ht == NULL)
return 0;
if (TC_U32_KEY(ht->handle))
return u32_delete_key(tp, (struct tc_u_knode *)ht);
if (tp->root == ht)
return -EINVAL;
if (ht->refcnt == 1) {
ht->refcnt--;
u32_destroy_hnode(tp, ht);
} else {
return -EBUSY;
}
return 0;
}
#define NR_U32_NODE (1<<12)
static u32 gen_new_kid(struct tc_u_hnode *ht, u32 handle)
{
struct tc_u_knode *n;
unsigned long i;
unsigned long *bitmap = kzalloc(BITS_TO_LONGS(NR_U32_NODE) * sizeof(unsigned long),
GFP_KERNEL);
if (!bitmap)
return handle | 0xFFF;
for (n = ht->ht[TC_U32_HASH(handle)]; n; n = n->next)
set_bit(TC_U32_NODE(n->handle), bitmap);
i = find_next_zero_bit(bitmap, NR_U32_NODE, 0x800);
if (i >= NR_U32_NODE)
i = find_next_zero_bit(bitmap, NR_U32_NODE, 1);
kfree(bitmap);
return handle | (i >= NR_U32_NODE ? 0xFFF : i);
}
static const struct nla_policy u32_policy[TCA_U32_MAX + 1] = {
[TCA_U32_CLASSID] = { .type = NLA_U32 },
[TCA_U32_HASH] = { .type = NLA_U32 },
[TCA_U32_LINK] = { .type = NLA_U32 },
[TCA_U32_DIVISOR] = { .type = NLA_U32 },
[TCA_U32_SEL] = { .len = sizeof(struct tc_u32_sel) },
[TCA_U32_INDEV] = { .type = NLA_STRING, .len = IFNAMSIZ },
[TCA_U32_MARK] = { .len = sizeof(struct tc_u32_mark) },
};
static int u32_set_parms(struct net *net, struct tcf_proto *tp,
unsigned long base, struct tc_u_hnode *ht,
struct tc_u_knode *n, struct nlattr **tb,
struct nlattr *est, bool ovr)
{
int err;
struct tcf_exts e;
tcf_exts_init(&e, TCA_U32_ACT, TCA_U32_POLICE);
err = tcf_exts_validate(net, tp, tb, est, &e, ovr);
if (err < 0)
return err;
err = -EINVAL;
if (tb[TCA_U32_LINK]) {
u32 handle = nla_get_u32(tb[TCA_U32_LINK]);
struct tc_u_hnode *ht_down = NULL, *ht_old;
if (TC_U32_KEY(handle))
goto errout;
if (handle) {
ht_down = u32_lookup_ht(ht->tp_c, handle);
if (ht_down == NULL)
goto errout;
ht_down->refcnt++;
}
tcf_tree_lock(tp);
ht_old = n->ht_down;
n->ht_down = ht_down;
tcf_tree_unlock(tp);
if (ht_old)
ht_old->refcnt--;
}
if (tb[TCA_U32_CLASSID]) {
n->res.classid = nla_get_u32(tb[TCA_U32_CLASSID]);
tcf_bind_filter(tp, &n->res, base);
}
#ifdef CONFIG_NET_CLS_IND
if (tb[TCA_U32_INDEV]) {
int ret;
ret = tcf_change_indev(net, tb[TCA_U32_INDEV]);
if (ret < 0)
goto errout;
n->ifindex = ret;
}
#endif
tcf_exts_change(tp, &n->exts, &e);
return 0;
errout:
tcf_exts_destroy(tp, &e);
return err;
}
static int u32_change(struct net *net, struct sk_buff *in_skb,
struct tcf_proto *tp, unsigned long base, u32 handle,
struct nlattr **tca,
unsigned long *arg, bool ovr)
{
struct tc_u_common *tp_c = tp->data;
struct tc_u_hnode *ht;
struct tc_u_knode *n;
struct tc_u32_sel *s;
struct nlattr *opt = tca[TCA_OPTIONS];
struct nlattr *tb[TCA_U32_MAX + 1];
u32 htid;
int err;
if (opt == NULL)
return handle ? -EINVAL : 0;
err = nla_parse_nested(tb, TCA_U32_MAX, opt, u32_policy);
if (err < 0)
return err;
n = (struct tc_u_knode *)*arg;
if (n) {
if (TC_U32_KEY(n->handle) == 0)
return -EINVAL;
return u32_set_parms(net, tp, base, n->ht_up, n, tb,
tca[TCA_RATE], ovr);
}
if (tb[TCA_U32_DIVISOR]) {
unsigned int divisor = nla_get_u32(tb[TCA_U32_DIVISOR]);
if (--divisor > 0x100)
return -EINVAL;
if (TC_U32_KEY(handle))
return -EINVAL;
if (handle == 0) {
handle = gen_new_htid(tp->data);
if (handle == 0)
return -ENOMEM;
}
ht = kzalloc(sizeof(*ht) + divisor*sizeof(void *), GFP_KERNEL);
if (ht == NULL)
return -ENOBUFS;
ht->tp_c = tp_c;
ht->refcnt = 1;
ht->divisor = divisor;
ht->handle = handle;
ht->prio = tp->prio;
ht->next = tp_c->hlist;
tp_c->hlist = ht;
*arg = (unsigned long)ht;
return 0;
}
if (tb[TCA_U32_HASH]) {
htid = nla_get_u32(tb[TCA_U32_HASH]);
if (TC_U32_HTID(htid) == TC_U32_ROOT) {
ht = tp->root;
htid = ht->handle;
} else {
ht = u32_lookup_ht(tp->data, TC_U32_HTID(htid));
if (ht == NULL)
return -EINVAL;
}
} else {
ht = tp->root;
htid = ht->handle;
}
if (ht->divisor < TC_U32_HASH(htid))
return -EINVAL;
if (handle) {
if (TC_U32_HTID(handle) && TC_U32_HTID(handle^htid))
return -EINVAL;
handle = htid | TC_U32_NODE(handle);
} else
handle = gen_new_kid(ht, htid);
if (tb[TCA_U32_SEL] == NULL)
return -EINVAL;
s = nla_data(tb[TCA_U32_SEL]);
n = kzalloc(sizeof(*n) + s->nkeys*sizeof(struct tc_u32_key), GFP_KERNEL);
if (n == NULL)
return -ENOBUFS;
#ifdef CONFIG_CLS_U32_PERF
n->pf = kzalloc(sizeof(struct tc_u32_pcnt) + s->nkeys*sizeof(u64), GFP_KERNEL);
if (n->pf == NULL) {
kfree(n);
return -ENOBUFS;
}
#endif
memcpy(&n->sel, s, sizeof(*s) + s->nkeys*sizeof(struct tc_u32_key));
n->ht_up = ht;
n->handle = handle;
n->fshift = s->hmask ? ffs(ntohl(s->hmask)) - 1 : 0;
tcf_exts_init(&n->exts, TCA_U32_ACT, TCA_U32_POLICE);
#ifdef CONFIG_CLS_U32_MARK
if (tb[TCA_U32_MARK]) {
struct tc_u32_mark *mark;
mark = nla_data(tb[TCA_U32_MARK]);
memcpy(&n->mark, mark, sizeof(struct tc_u32_mark));
n->mark.success = 0;
}
#endif
err = u32_set_parms(net, tp, base, ht, n, tb, tca[TCA_RATE], ovr);
if (err == 0) {
struct tc_u_knode **ins;
for (ins = &ht->ht[TC_U32_HASH(handle)]; *ins; ins = &(*ins)->next)
if (TC_U32_NODE(handle) < TC_U32_NODE((*ins)->handle))
break;
n->next = *ins;
tcf_tree_lock(tp);
*ins = n;
tcf_tree_unlock(tp);
*arg = (unsigned long)n;
return 0;
}
#ifdef CONFIG_CLS_U32_PERF
kfree(n->pf);
#endif
kfree(n);
return err;
}
static void u32_walk(struct tcf_proto *tp, struct tcf_walker *arg)
{
struct tc_u_common *tp_c = tp->data;
struct tc_u_hnode *ht;
struct tc_u_knode *n;
unsigned int h;
if (arg->stop)
return;
for (ht = tp_c->hlist; ht; ht = ht->next) {
if (ht->prio != tp->prio)
continue;
if (arg->count >= arg->skip) {
if (arg->fn(tp, (unsigned long)ht, arg) < 0) {
arg->stop = 1;
return;
}
}
arg->count++;
for (h = 0; h <= ht->divisor; h++) {
for (n = ht->ht[h]; n; n = n->next) {
if (arg->count < arg->skip) {
arg->count++;
continue;
}
if (arg->fn(tp, (unsigned long)n, arg) < 0) {
arg->stop = 1;
return;
}
arg->count++;
}
}
}
}
static int u32_dump(struct net *net, struct tcf_proto *tp, unsigned long fh,
struct sk_buff *skb, struct tcmsg *t)
{
struct tc_u_knode *n = (struct tc_u_knode *)fh;
struct nlattr *nest;
if (n == NULL)
return skb->len;
t->tcm_handle = n->handle;
nest = nla_nest_start(skb, TCA_OPTIONS);
if (nest == NULL)
goto nla_put_failure;
if (TC_U32_KEY(n->handle) == 0) {
struct tc_u_hnode *ht = (struct tc_u_hnode *)fh;
u32 divisor = ht->divisor + 1;
if (nla_put_u32(skb, TCA_U32_DIVISOR, divisor))
goto nla_put_failure;
} else {
if (nla_put(skb, TCA_U32_SEL,
sizeof(n->sel) + n->sel.nkeys*sizeof(struct tc_u32_key),
&n->sel))
goto nla_put_failure;
if (n->ht_up) {
u32 htid = n->handle & 0xFFFFF000;
if (nla_put_u32(skb, TCA_U32_HASH, htid))
goto nla_put_failure;
}
if (n->res.classid &&
nla_put_u32(skb, TCA_U32_CLASSID, n->res.classid))
goto nla_put_failure;
if (n->ht_down &&
nla_put_u32(skb, TCA_U32_LINK, n->ht_down->handle))
goto nla_put_failure;
#ifdef CONFIG_CLS_U32_MARK
if ((n->mark.val || n->mark.mask) &&
nla_put(skb, TCA_U32_MARK, sizeof(n->mark), &n->mark))
goto nla_put_failure;
#endif
if (tcf_exts_dump(skb, &n->exts) < 0)
goto nla_put_failure;
#ifdef CONFIG_NET_CLS_IND
if (n->ifindex) {
struct net_device *dev;
dev = __dev_get_by_index(net, n->ifindex);
if (dev && nla_put_string(skb, TCA_U32_INDEV, dev->name))
goto nla_put_failure;
}
#endif
#ifdef CONFIG_CLS_U32_PERF
if (nla_put(skb, TCA_U32_PCNT,
sizeof(struct tc_u32_pcnt) + n->sel.nkeys*sizeof(u64),
n->pf))
goto nla_put_failure;
#endif
}
nla_nest_end(skb, nest);
if (TC_U32_KEY(n->handle))
if (tcf_exts_dump_stats(skb, &n->exts) < 0)
goto nla_put_failure;
return skb->len;
nla_put_failure:
nla_nest_cancel(skb, nest);
return -1;
}
static struct tcf_proto_ops cls_u32_ops __read_mostly = {
.kind = "u32",
.classify = u32_classify,
.init = u32_init,
.destroy = u32_destroy,
.get = u32_get,
.put = u32_put,
.change = u32_change,
.delete = u32_delete,
.walk = u32_walk,
.dump = u32_dump,
.owner = THIS_MODULE,
};
static int __init init_u32(void)
{
pr_info("u32 classifier\n");
#ifdef CONFIG_CLS_U32_PERF
pr_info(" Performance counters on\n");
#endif
#ifdef CONFIG_NET_CLS_IND
pr_info(" input device check on\n");
#endif
#ifdef CONFIG_NET_CLS_ACT
pr_info(" Actions configured\n");
#endif
return register_tcf_proto_ops(&cls_u32_ops);
}
static void __exit exit_u32(void)
{
unregister_tcf_proto_ops(&cls_u32_ops);
}
module_init(init_u32)
module_exit(exit_u32)
MODULE_LICENSE("GPL");
| gpl-2.0 |
AD5GB/android_kernel_googlesource-common | drivers/usb/host/xhci-plat.c | 632 | 4776 | /*
* xhci-plat.c - xHCI host controller driver platform Bus Glue.
*
* Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com
* Author: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
*
* A lot of code borrowed from the Linux xHCI driver.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*/
#include <linux/platform_device.h>
#include <linux/module.h>
#include <linux/slab.h>
#include "xhci.h"
static void xhci_plat_quirks(struct device *dev, struct xhci_hcd *xhci)
{
/*
* As of now platform drivers don't provide MSI support so we ensure
* here that the generic code does not try to make a pci_dev from our
* dev struct in order to setup MSI
*/
xhci->quirks |= XHCI_BROKEN_MSI;
}
/* called during probe() after chip reset completes */
static int xhci_plat_setup(struct usb_hcd *hcd)
{
return xhci_gen_setup(hcd, xhci_plat_quirks);
}
static const struct hc_driver xhci_plat_xhci_driver = {
.description = "xhci-hcd",
.product_desc = "xHCI Host Controller",
.hcd_priv_size = sizeof(struct xhci_hcd *),
/*
* generic hardware linkage
*/
.irq = xhci_irq,
.flags = HCD_MEMORY | HCD_USB3 | HCD_SHARED,
/*
* basic lifecycle operations
*/
.reset = xhci_plat_setup,
.start = xhci_run,
.stop = xhci_stop,
.shutdown = xhci_shutdown,
/*
* managing i/o requests and associated device resources
*/
.urb_enqueue = xhci_urb_enqueue,
.urb_dequeue = xhci_urb_dequeue,
.alloc_dev = xhci_alloc_dev,
.free_dev = xhci_free_dev,
.alloc_streams = xhci_alloc_streams,
.free_streams = xhci_free_streams,
.add_endpoint = xhci_add_endpoint,
.drop_endpoint = xhci_drop_endpoint,
.endpoint_reset = xhci_endpoint_reset,
.check_bandwidth = xhci_check_bandwidth,
.reset_bandwidth = xhci_reset_bandwidth,
.address_device = xhci_address_device,
.update_hub_device = xhci_update_hub_device,
.reset_device = xhci_discover_or_reset_device,
/*
* scheduling support
*/
.get_frame_number = xhci_get_frame,
/* Root hub support */
.hub_control = xhci_hub_control,
.hub_status_data = xhci_hub_status_data,
.bus_suspend = xhci_bus_suspend,
.bus_resume = xhci_bus_resume,
};
static int xhci_plat_probe(struct platform_device *pdev)
{
const struct hc_driver *driver;
struct xhci_hcd *xhci;
struct resource *res;
struct usb_hcd *hcd;
int ret;
int irq;
if (usb_disabled())
return -ENODEV;
driver = &xhci_plat_xhci_driver;
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return -ENODEV;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res)
return -ENODEV;
hcd = usb_create_hcd(driver, &pdev->dev, dev_name(&pdev->dev));
if (!hcd)
return -ENOMEM;
hcd->rsrc_start = res->start;
hcd->rsrc_len = resource_size(res);
if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len,
driver->description)) {
dev_dbg(&pdev->dev, "controller already in use\n");
ret = -EBUSY;
goto put_hcd;
}
hcd->regs = ioremap_nocache(hcd->rsrc_start, hcd->rsrc_len);
if (!hcd->regs) {
dev_dbg(&pdev->dev, "error mapping memory\n");
ret = -EFAULT;
goto release_mem_region;
}
ret = usb_add_hcd(hcd, irq, IRQF_SHARED);
if (ret)
goto unmap_registers;
/* USB 2.0 roothub is stored in the platform_device now. */
hcd = dev_get_drvdata(&pdev->dev);
xhci = hcd_to_xhci(hcd);
xhci->shared_hcd = usb_create_shared_hcd(driver, &pdev->dev,
dev_name(&pdev->dev), hcd);
if (!xhci->shared_hcd) {
ret = -ENOMEM;
goto dealloc_usb2_hcd;
}
/*
* Set the xHCI pointer before xhci_plat_setup() (aka hcd_driver.reset)
* is called by usb_add_hcd().
*/
*((struct xhci_hcd **) xhci->shared_hcd->hcd_priv) = xhci;
ret = usb_add_hcd(xhci->shared_hcd, irq, IRQF_SHARED);
if (ret)
goto put_usb3_hcd;
return 0;
put_usb3_hcd:
usb_put_hcd(xhci->shared_hcd);
dealloc_usb2_hcd:
usb_remove_hcd(hcd);
unmap_registers:
iounmap(hcd->regs);
release_mem_region:
release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
put_hcd:
usb_put_hcd(hcd);
return ret;
}
static int xhci_plat_remove(struct platform_device *dev)
{
struct usb_hcd *hcd = platform_get_drvdata(dev);
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
usb_remove_hcd(xhci->shared_hcd);
usb_put_hcd(xhci->shared_hcd);
usb_remove_hcd(hcd);
iounmap(hcd->regs);
usb_put_hcd(hcd);
kfree(xhci);
return 0;
}
static struct platform_driver usb_xhci_driver = {
.probe = xhci_plat_probe,
.remove = xhci_plat_remove,
.driver = {
.name = "xhci-hcd",
},
};
MODULE_ALIAS("platform:xhci-hcd");
int xhci_register_plat(void)
{
return platform_driver_register(&usb_xhci_driver);
}
void xhci_unregister_plat(void)
{
platform_driver_unregister(&usb_xhci_driver);
}
| gpl-2.0 |
Krylon360/SGS4G_Kernel_GB | drivers/net/irda/ali-ircc.c | 888 | 57667 | /*********************************************************************
*
* Filename: ali-ircc.h
* Version: 0.5
* Description: Driver for the ALI M1535D and M1543C FIR Controller
* Status: Experimental.
* Author: Benjamin Kong <benjamin_kong@ali.com.tw>
* Created at: 2000/10/16 03:46PM
* Modified at: 2001/1/3 02:55PM
* Modified by: Benjamin Kong <benjamin_kong@ali.com.tw>
* Modified at: 2003/11/6 and support for ALi south-bridge chipsets M1563
* Modified by: Clear Zhang <clear_zhang@ali.com.tw>
*
* Copyright (c) 2000 Benjamin Kong <benjamin_kong@ali.com.tw>
* All Rights Reserved
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 2 of
* the License, or (at your option) any later version.
*
********************************************************************/
#include <linux/module.h>
#include <linux/gfp.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/skbuff.h>
#include <linux/netdevice.h>
#include <linux/ioport.h>
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/rtnetlink.h>
#include <linux/serial_reg.h>
#include <linux/dma-mapping.h>
#include <linux/platform_device.h>
#include <asm/io.h>
#include <asm/dma.h>
#include <asm/byteorder.h>
#include <net/irda/wrapper.h>
#include <net/irda/irda.h>
#include <net/irda/irda_device.h>
#include "ali-ircc.h"
#define CHIP_IO_EXTENT 8
#define BROKEN_DONGLE_ID
#define ALI_IRCC_DRIVER_NAME "ali-ircc"
/* Power Management */
static int ali_ircc_suspend(struct platform_device *dev, pm_message_t state);
static int ali_ircc_resume(struct platform_device *dev);
static struct platform_driver ali_ircc_driver = {
.suspend = ali_ircc_suspend,
.resume = ali_ircc_resume,
.driver = {
.name = ALI_IRCC_DRIVER_NAME,
.owner = THIS_MODULE,
},
};
/* Module parameters */
static int qos_mtt_bits = 0x07; /* 1 ms or more */
/* Use BIOS settions by default, but user may supply module parameters */
static unsigned int io[] = { ~0, ~0, ~0, ~0 };
static unsigned int irq[] = { 0, 0, 0, 0 };
static unsigned int dma[] = { 0, 0, 0, 0 };
static int ali_ircc_probe_53(ali_chip_t *chip, chipio_t *info);
static int ali_ircc_init_43(ali_chip_t *chip, chipio_t *info);
static int ali_ircc_init_53(ali_chip_t *chip, chipio_t *info);
/* These are the currently known ALi sourth-bridge chipsets, the only one difference
* is that M1543C doesn't support HP HDSL-3600
*/
static ali_chip_t chips[] =
{
{ "M1543", { 0x3f0, 0x370 }, 0x51, 0x23, 0x20, 0x43, ali_ircc_probe_53, ali_ircc_init_43 },
{ "M1535", { 0x3f0, 0x370 }, 0x51, 0x23, 0x20, 0x53, ali_ircc_probe_53, ali_ircc_init_53 },
{ "M1563", { 0x3f0, 0x370 }, 0x51, 0x23, 0x20, 0x63, ali_ircc_probe_53, ali_ircc_init_53 },
{ NULL }
};
/* Max 4 instances for now */
static struct ali_ircc_cb *dev_self[] = { NULL, NULL, NULL, NULL };
/* Dongle Types */
static char *dongle_types[] = {
"TFDS6000",
"HP HSDL-3600",
"HP HSDL-1100",
"No dongle connected",
};
/* Some prototypes */
static int ali_ircc_open(int i, chipio_t *info);
static int ali_ircc_close(struct ali_ircc_cb *self);
static int ali_ircc_setup(chipio_t *info);
static int ali_ircc_is_receiving(struct ali_ircc_cb *self);
static int ali_ircc_net_open(struct net_device *dev);
static int ali_ircc_net_close(struct net_device *dev);
static int ali_ircc_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
static void ali_ircc_change_speed(struct ali_ircc_cb *self, __u32 baud);
/* SIR function */
static netdev_tx_t ali_ircc_sir_hard_xmit(struct sk_buff *skb,
struct net_device *dev);
static irqreturn_t ali_ircc_sir_interrupt(struct ali_ircc_cb *self);
static void ali_ircc_sir_receive(struct ali_ircc_cb *self);
static void ali_ircc_sir_write_wakeup(struct ali_ircc_cb *self);
static int ali_ircc_sir_write(int iobase, int fifo_size, __u8 *buf, int len);
static void ali_ircc_sir_change_speed(struct ali_ircc_cb *priv, __u32 speed);
/* FIR function */
static netdev_tx_t ali_ircc_fir_hard_xmit(struct sk_buff *skb,
struct net_device *dev);
static void ali_ircc_fir_change_speed(struct ali_ircc_cb *priv, __u32 speed);
static irqreturn_t ali_ircc_fir_interrupt(struct ali_ircc_cb *self);
static int ali_ircc_dma_receive(struct ali_ircc_cb *self);
static int ali_ircc_dma_receive_complete(struct ali_ircc_cb *self);
static int ali_ircc_dma_xmit_complete(struct ali_ircc_cb *self);
static void ali_ircc_dma_xmit(struct ali_ircc_cb *self);
/* My Function */
static int ali_ircc_read_dongle_id (int i, chipio_t *info);
static void ali_ircc_change_dongle_speed(struct ali_ircc_cb *priv, int speed);
/* ALi chip function */
static void SIR2FIR(int iobase);
static void FIR2SIR(int iobase);
static void SetCOMInterrupts(struct ali_ircc_cb *self , unsigned char enable);
/*
* Function ali_ircc_init ()
*
* Initialize chip. Find out whay kinds of chips we are dealing with
* and their configuation registers address
*/
static int __init ali_ircc_init(void)
{
ali_chip_t *chip;
chipio_t info;
int ret;
int cfg, cfg_base;
int reg, revision;
int i = 0;
IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __func__);
ret = platform_driver_register(&ali_ircc_driver);
if (ret) {
IRDA_ERROR("%s, Can't register driver!\n",
ALI_IRCC_DRIVER_NAME);
return ret;
}
ret = -ENODEV;
/* Probe for all the ALi chipsets we know about */
for (chip= chips; chip->name; chip++, i++)
{
IRDA_DEBUG(2, "%s(), Probing for %s ...\n", __func__, chip->name);
/* Try all config registers for this chip */
for (cfg=0; cfg<2; cfg++)
{
cfg_base = chip->cfg[cfg];
if (!cfg_base)
continue;
memset(&info, 0, sizeof(chipio_t));
info.cfg_base = cfg_base;
info.fir_base = io[i];
info.dma = dma[i];
info.irq = irq[i];
/* Enter Configuration */
outb(chip->entr1, cfg_base);
outb(chip->entr2, cfg_base);
/* Select Logical Device 5 Registers (UART2) */
outb(0x07, cfg_base);
outb(0x05, cfg_base+1);
/* Read Chip Identification Register */
outb(chip->cid_index, cfg_base);
reg = inb(cfg_base+1);
if (reg == chip->cid_value)
{
IRDA_DEBUG(2, "%s(), Chip found at 0x%03x\n", __func__, cfg_base);
outb(0x1F, cfg_base);
revision = inb(cfg_base+1);
IRDA_DEBUG(2, "%s(), Found %s chip, revision=%d\n", __func__,
chip->name, revision);
/*
* If the user supplies the base address, then
* we init the chip, if not we probe the values
* set by the BIOS
*/
if (io[i] < 2000)
{
chip->init(chip, &info);
}
else
{
chip->probe(chip, &info);
}
if (ali_ircc_open(i, &info) == 0)
ret = 0;
i++;
}
else
{
IRDA_DEBUG(2, "%s(), No %s chip at 0x%03x\n", __func__, chip->name, cfg_base);
}
/* Exit configuration */
outb(0xbb, cfg_base);
}
}
IRDA_DEBUG(2, "%s(), ----------------- End -----------------\n", __func__);
if (ret)
platform_driver_unregister(&ali_ircc_driver);
return ret;
}
/*
* Function ali_ircc_cleanup ()
*
* Close all configured chips
*
*/
static void __exit ali_ircc_cleanup(void)
{
int i;
IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __func__);
for (i=0; i < ARRAY_SIZE(dev_self); i++) {
if (dev_self[i])
ali_ircc_close(dev_self[i]);
}
platform_driver_unregister(&ali_ircc_driver);
IRDA_DEBUG(2, "%s(), ----------------- End -----------------\n", __func__);
}
static const struct net_device_ops ali_ircc_sir_ops = {
.ndo_open = ali_ircc_net_open,
.ndo_stop = ali_ircc_net_close,
.ndo_start_xmit = ali_ircc_sir_hard_xmit,
.ndo_do_ioctl = ali_ircc_net_ioctl,
};
static const struct net_device_ops ali_ircc_fir_ops = {
.ndo_open = ali_ircc_net_open,
.ndo_stop = ali_ircc_net_close,
.ndo_start_xmit = ali_ircc_fir_hard_xmit,
.ndo_do_ioctl = ali_ircc_net_ioctl,
};
/*
* Function ali_ircc_open (int i, chipio_t *inf)
*
* Open driver instance
*
*/
static int ali_ircc_open(int i, chipio_t *info)
{
struct net_device *dev;
struct ali_ircc_cb *self;
int dongle_id;
int err;
IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __func__);
if (i >= ARRAY_SIZE(dev_self)) {
IRDA_ERROR("%s(), maximum number of supported chips reached!\n",
__func__);
return -ENOMEM;
}
/* Set FIR FIFO and DMA Threshold */
if ((ali_ircc_setup(info)) == -1)
return -1;
dev = alloc_irdadev(sizeof(*self));
if (dev == NULL) {
IRDA_ERROR("%s(), can't allocate memory for control block!\n",
__func__);
return -ENOMEM;
}
self = netdev_priv(dev);
self->netdev = dev;
spin_lock_init(&self->lock);
/* Need to store self somewhere */
dev_self[i] = self;
self->index = i;
/* Initialize IO */
self->io.cfg_base = info->cfg_base; /* In ali_ircc_probe_53 assign */
self->io.fir_base = info->fir_base; /* info->sir_base = info->fir_base */
self->io.sir_base = info->sir_base; /* ALi SIR and FIR use the same address */
self->io.irq = info->irq;
self->io.fir_ext = CHIP_IO_EXTENT;
self->io.dma = info->dma;
self->io.fifo_size = 16; /* SIR: 16, FIR: 32 Benjamin 2000/11/1 */
/* Reserve the ioports that we need */
if (!request_region(self->io.fir_base, self->io.fir_ext,
ALI_IRCC_DRIVER_NAME)) {
IRDA_WARNING("%s(), can't get iobase of 0x%03x\n", __func__,
self->io.fir_base);
err = -ENODEV;
goto err_out1;
}
/* Initialize QoS for this device */
irda_init_max_qos_capabilies(&self->qos);
/* The only value we must override it the baudrate */
self->qos.baud_rate.bits = IR_9600|IR_19200|IR_38400|IR_57600|
IR_115200|IR_576000|IR_1152000|(IR_4000000 << 8); // benjamin 2000/11/8 05:27PM
self->qos.min_turn_time.bits = qos_mtt_bits;
irda_qos_bits_to_value(&self->qos);
/* Max DMA buffer size needed = (data_size + 6) * (window_size) + 6; */
self->rx_buff.truesize = 14384;
self->tx_buff.truesize = 14384;
/* Allocate memory if needed */
self->rx_buff.head =
dma_alloc_coherent(NULL, self->rx_buff.truesize,
&self->rx_buff_dma, GFP_KERNEL);
if (self->rx_buff.head == NULL) {
err = -ENOMEM;
goto err_out2;
}
memset(self->rx_buff.head, 0, self->rx_buff.truesize);
self->tx_buff.head =
dma_alloc_coherent(NULL, self->tx_buff.truesize,
&self->tx_buff_dma, GFP_KERNEL);
if (self->tx_buff.head == NULL) {
err = -ENOMEM;
goto err_out3;
}
memset(self->tx_buff.head, 0, self->tx_buff.truesize);
self->rx_buff.in_frame = FALSE;
self->rx_buff.state = OUTSIDE_FRAME;
self->tx_buff.data = self->tx_buff.head;
self->rx_buff.data = self->rx_buff.head;
/* Reset Tx queue info */
self->tx_fifo.len = self->tx_fifo.ptr = self->tx_fifo.free = 0;
self->tx_fifo.tail = self->tx_buff.head;
/* Override the network functions we need to use */
dev->netdev_ops = &ali_ircc_sir_ops;
err = register_netdev(dev);
if (err) {
IRDA_ERROR("%s(), register_netdev() failed!\n", __func__);
goto err_out4;
}
IRDA_MESSAGE("IrDA: Registered device %s\n", dev->name);
/* Check dongle id */
dongle_id = ali_ircc_read_dongle_id(i, info);
IRDA_MESSAGE("%s(), %s, Found dongle: %s\n", __func__,
ALI_IRCC_DRIVER_NAME, dongle_types[dongle_id]);
self->io.dongle_id = dongle_id;
IRDA_DEBUG(2, "%s(), ----------------- End -----------------\n", __func__);
return 0;
err_out4:
dma_free_coherent(NULL, self->tx_buff.truesize,
self->tx_buff.head, self->tx_buff_dma);
err_out3:
dma_free_coherent(NULL, self->rx_buff.truesize,
self->rx_buff.head, self->rx_buff_dma);
err_out2:
release_region(self->io.fir_base, self->io.fir_ext);
err_out1:
dev_self[i] = NULL;
free_netdev(dev);
return err;
}
/*
* Function ali_ircc_close (self)
*
* Close driver instance
*
*/
static int __exit ali_ircc_close(struct ali_ircc_cb *self)
{
int iobase;
IRDA_DEBUG(4, "%s(), ---------------- Start ----------------\n", __func__);
IRDA_ASSERT(self != NULL, return -1;);
iobase = self->io.fir_base;
/* Remove netdevice */
unregister_netdev(self->netdev);
/* Release the PORT that this driver is using */
IRDA_DEBUG(4, "%s(), Releasing Region %03x\n", __func__, self->io.fir_base);
release_region(self->io.fir_base, self->io.fir_ext);
if (self->tx_buff.head)
dma_free_coherent(NULL, self->tx_buff.truesize,
self->tx_buff.head, self->tx_buff_dma);
if (self->rx_buff.head)
dma_free_coherent(NULL, self->rx_buff.truesize,
self->rx_buff.head, self->rx_buff_dma);
dev_self[self->index] = NULL;
free_netdev(self->netdev);
IRDA_DEBUG(2, "%s(), ----------------- End -----------------\n", __func__);
return 0;
}
/*
* Function ali_ircc_init_43 (chip, info)
*
* Initialize the ALi M1543 chip.
*/
static int ali_ircc_init_43(ali_chip_t *chip, chipio_t *info)
{
/* All controller information like I/O address, DMA channel, IRQ
* are set by BIOS
*/
return 0;
}
/*
* Function ali_ircc_init_53 (chip, info)
*
* Initialize the ALi M1535 chip.
*/
static int ali_ircc_init_53(ali_chip_t *chip, chipio_t *info)
{
/* All controller information like I/O address, DMA channel, IRQ
* are set by BIOS
*/
return 0;
}
/*
* Function ali_ircc_probe_53 (chip, info)
*
* Probes for the ALi M1535D or M1535
*/
static int ali_ircc_probe_53(ali_chip_t *chip, chipio_t *info)
{
int cfg_base = info->cfg_base;
int hi, low, reg;
IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __func__);
/* Enter Configuration */
outb(chip->entr1, cfg_base);
outb(chip->entr2, cfg_base);
/* Select Logical Device 5 Registers (UART2) */
outb(0x07, cfg_base);
outb(0x05, cfg_base+1);
/* Read address control register */
outb(0x60, cfg_base);
hi = inb(cfg_base+1);
outb(0x61, cfg_base);
low = inb(cfg_base+1);
info->fir_base = (hi<<8) + low;
info->sir_base = info->fir_base;
IRDA_DEBUG(2, "%s(), probing fir_base=0x%03x\n", __func__, info->fir_base);
/* Read IRQ control register */
outb(0x70, cfg_base);
reg = inb(cfg_base+1);
info->irq = reg & 0x0f;
IRDA_DEBUG(2, "%s(), probing irq=%d\n", __func__, info->irq);
/* Read DMA channel */
outb(0x74, cfg_base);
reg = inb(cfg_base+1);
info->dma = reg & 0x07;
if(info->dma == 0x04)
IRDA_WARNING("%s(), No DMA channel assigned !\n", __func__);
else
IRDA_DEBUG(2, "%s(), probing dma=%d\n", __func__, info->dma);
/* Read Enabled Status */
outb(0x30, cfg_base);
reg = inb(cfg_base+1);
info->enabled = (reg & 0x80) && (reg & 0x01);
IRDA_DEBUG(2, "%s(), probing enabled=%d\n", __func__, info->enabled);
/* Read Power Status */
outb(0x22, cfg_base);
reg = inb(cfg_base+1);
info->suspended = (reg & 0x20);
IRDA_DEBUG(2, "%s(), probing suspended=%d\n", __func__, info->suspended);
/* Exit configuration */
outb(0xbb, cfg_base);
IRDA_DEBUG(2, "%s(), ----------------- End -----------------\n", __func__);
return 0;
}
/*
* Function ali_ircc_setup (info)
*
* Set FIR FIFO and DMA Threshold
* Returns non-negative on success.
*
*/
static int ali_ircc_setup(chipio_t *info)
{
unsigned char tmp;
int version;
int iobase = info->fir_base;
IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __func__);
/* Locking comments :
* Most operations here need to be protected. We are called before
* the device instance is created in ali_ircc_open(), therefore
* nobody can bother us - Jean II */
/* Switch to FIR space */
SIR2FIR(iobase);
/* Master Reset */
outb(0x40, iobase+FIR_MCR); // benjamin 2000/11/30 11:45AM
/* Read FIR ID Version Register */
switch_bank(iobase, BANK3);
version = inb(iobase+FIR_ID_VR);
/* Should be 0x00 in the M1535/M1535D */
if(version != 0x00)
{
IRDA_ERROR("%s, Wrong chip version %02x\n",
ALI_IRCC_DRIVER_NAME, version);
return -1;
}
/* Set FIR FIFO Threshold Register */
switch_bank(iobase, BANK1);
outb(RX_FIFO_Threshold, iobase+FIR_FIFO_TR);
/* Set FIR DMA Threshold Register */
outb(RX_DMA_Threshold, iobase+FIR_DMA_TR);
/* CRC enable */
switch_bank(iobase, BANK2);
outb(inb(iobase+FIR_IRDA_CR) | IRDA_CR_CRC, iobase+FIR_IRDA_CR);
/* NDIS driver set TX Length here BANK2 Alias 3, Alias4*/
/* Switch to Bank 0 */
switch_bank(iobase, BANK0);
tmp = inb(iobase+FIR_LCR_B);
tmp &=~0x20; // disable SIP
tmp |= 0x80; // these two steps make RX mode
tmp &= 0xbf;
outb(tmp, iobase+FIR_LCR_B);
/* Disable Interrupt */
outb(0x00, iobase+FIR_IER);
/* Switch to SIR space */
FIR2SIR(iobase);
IRDA_MESSAGE("%s, driver loaded (Benjamin Kong)\n",
ALI_IRCC_DRIVER_NAME);
/* Enable receive interrupts */
// outb(UART_IER_RDI, iobase+UART_IER); //benjamin 2000/11/23 01:25PM
// Turn on the interrupts in ali_ircc_net_open
IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __func__);
return 0;
}
/*
* Function ali_ircc_read_dongle_id (int index, info)
*
* Try to read dongle indentification. This procedure needs to be executed
* once after power-on/reset. It also needs to be used whenever you suspect
* that the user may have plugged/unplugged the IrDA Dongle.
*/
static int ali_ircc_read_dongle_id (int i, chipio_t *info)
{
int dongle_id, reg;
int cfg_base = info->cfg_base;
IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __func__);
/* Enter Configuration */
outb(chips[i].entr1, cfg_base);
outb(chips[i].entr2, cfg_base);
/* Select Logical Device 5 Registers (UART2) */
outb(0x07, cfg_base);
outb(0x05, cfg_base+1);
/* Read Dongle ID */
outb(0xf0, cfg_base);
reg = inb(cfg_base+1);
dongle_id = ((reg>>6)&0x02) | ((reg>>5)&0x01);
IRDA_DEBUG(2, "%s(), probing dongle_id=%d, dongle_types=%s\n", __func__,
dongle_id, dongle_types[dongle_id]);
/* Exit configuration */
outb(0xbb, cfg_base);
IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __func__);
return dongle_id;
}
/*
* Function ali_ircc_interrupt (irq, dev_id, regs)
*
* An interrupt from the chip has arrived. Time to do some work
*
*/
static irqreturn_t ali_ircc_interrupt(int irq, void *dev_id)
{
struct net_device *dev = dev_id;
struct ali_ircc_cb *self;
int ret;
IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __func__);
self = netdev_priv(dev);
spin_lock(&self->lock);
/* Dispatch interrupt handler for the current speed */
if (self->io.speed > 115200)
ret = ali_ircc_fir_interrupt(self);
else
ret = ali_ircc_sir_interrupt(self);
spin_unlock(&self->lock);
IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __func__);
return ret;
}
/*
* Function ali_ircc_fir_interrupt(irq, struct ali_ircc_cb *self)
*
* Handle MIR/FIR interrupt
*
*/
static irqreturn_t ali_ircc_fir_interrupt(struct ali_ircc_cb *self)
{
__u8 eir, OldMessageCount;
int iobase, tmp;
IRDA_DEBUG(1, "%s(), ---------------- Start ----------------\n", __func__);
iobase = self->io.fir_base;
switch_bank(iobase, BANK0);
self->InterruptID = inb(iobase+FIR_IIR);
self->BusStatus = inb(iobase+FIR_BSR);
OldMessageCount = (self->LineStatus + 1) & 0x07;
self->LineStatus = inb(iobase+FIR_LSR);
//self->ier = inb(iobase+FIR_IER); 2000/12/1 04:32PM
eir = self->InterruptID & self->ier; /* Mask out the interesting ones */
IRDA_DEBUG(1, "%s(), self->InterruptID = %x\n", __func__,self->InterruptID);
IRDA_DEBUG(1, "%s(), self->LineStatus = %x\n", __func__,self->LineStatus);
IRDA_DEBUG(1, "%s(), self->ier = %x\n", __func__,self->ier);
IRDA_DEBUG(1, "%s(), eir = %x\n", __func__,eir);
/* Disable interrupts */
SetCOMInterrupts(self, FALSE);
/* Tx or Rx Interrupt */
if (eir & IIR_EOM)
{
if (self->io.direction == IO_XMIT) /* TX */
{
IRDA_DEBUG(1, "%s(), ******* IIR_EOM (Tx) *******\n", __func__);
if(ali_ircc_dma_xmit_complete(self))
{
if (irda_device_txqueue_empty(self->netdev))
{
/* Prepare for receive */
ali_ircc_dma_receive(self);
self->ier = IER_EOM;
}
}
else
{
self->ier = IER_EOM;
}
}
else /* RX */
{
IRDA_DEBUG(1, "%s(), ******* IIR_EOM (Rx) *******\n", __func__);
if(OldMessageCount > ((self->LineStatus+1) & 0x07))
{
self->rcvFramesOverflow = TRUE;
IRDA_DEBUG(1, "%s(), ******* self->rcvFramesOverflow = TRUE ********\n", __func__);
}
if (ali_ircc_dma_receive_complete(self))
{
IRDA_DEBUG(1, "%s(), ******* receive complete ********\n", __func__);
self->ier = IER_EOM;
}
else
{
IRDA_DEBUG(1, "%s(), ******* Not receive complete ********\n", __func__);
self->ier = IER_EOM | IER_TIMER;
}
}
}
/* Timer Interrupt */
else if (eir & IIR_TIMER)
{
if(OldMessageCount > ((self->LineStatus+1) & 0x07))
{
self->rcvFramesOverflow = TRUE;
IRDA_DEBUG(1, "%s(), ******* self->rcvFramesOverflow = TRUE *******\n", __func__);
}
/* Disable Timer */
switch_bank(iobase, BANK1);
tmp = inb(iobase+FIR_CR);
outb( tmp& ~CR_TIMER_EN, iobase+FIR_CR);
/* Check if this is a Tx timer interrupt */
if (self->io.direction == IO_XMIT)
{
ali_ircc_dma_xmit(self);
/* Interrupt on EOM */
self->ier = IER_EOM;
}
else /* Rx */
{
if(ali_ircc_dma_receive_complete(self))
{
self->ier = IER_EOM;
}
else
{
self->ier = IER_EOM | IER_TIMER;
}
}
}
/* Restore Interrupt */
SetCOMInterrupts(self, TRUE);
IRDA_DEBUG(1, "%s(), ----------------- End ---------------\n", __func__);
return IRQ_RETVAL(eir);
}
/*
* Function ali_ircc_sir_interrupt (irq, self, eir)
*
* Handle SIR interrupt
*
*/
static irqreturn_t ali_ircc_sir_interrupt(struct ali_ircc_cb *self)
{
int iobase;
int iir, lsr;
IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __func__);
iobase = self->io.sir_base;
iir = inb(iobase+UART_IIR) & UART_IIR_ID;
if (iir) {
/* Clear interrupt */
lsr = inb(iobase+UART_LSR);
IRDA_DEBUG(4, "%s(), iir=%02x, lsr=%02x, iobase=%#x\n", __func__,
iir, lsr, iobase);
switch (iir)
{
case UART_IIR_RLSI:
IRDA_DEBUG(2, "%s(), RLSI\n", __func__);
break;
case UART_IIR_RDI:
/* Receive interrupt */
ali_ircc_sir_receive(self);
break;
case UART_IIR_THRI:
if (lsr & UART_LSR_THRE)
{
/* Transmitter ready for data */
ali_ircc_sir_write_wakeup(self);
}
break;
default:
IRDA_DEBUG(0, "%s(), unhandled IIR=%#x\n", __func__, iir);
break;
}
}
IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __func__);
return IRQ_RETVAL(iir);
}
/*
* Function ali_ircc_sir_receive (self)
*
* Receive one frame from the infrared port
*
*/
static void ali_ircc_sir_receive(struct ali_ircc_cb *self)
{
int boguscount = 0;
int iobase;
IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __func__);
IRDA_ASSERT(self != NULL, return;);
iobase = self->io.sir_base;
/*
* Receive all characters in Rx FIFO, unwrap and unstuff them.
* async_unwrap_char will deliver all found frames
*/
do {
async_unwrap_char(self->netdev, &self->netdev->stats, &self->rx_buff,
inb(iobase+UART_RX));
/* Make sure we don't stay here too long */
if (boguscount++ > 32) {
IRDA_DEBUG(2,"%s(), breaking!\n", __func__);
break;
}
} while (inb(iobase+UART_LSR) & UART_LSR_DR);
IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __func__ );
}
/*
* Function ali_ircc_sir_write_wakeup (tty)
*
* Called by the driver when there's room for more data. If we have
* more packets to send, we send them here.
*
*/
static void ali_ircc_sir_write_wakeup(struct ali_ircc_cb *self)
{
int actual = 0;
int iobase;
IRDA_ASSERT(self != NULL, return;);
IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __func__ );
iobase = self->io.sir_base;
/* Finished with frame? */
if (self->tx_buff.len > 0)
{
/* Write data left in transmit buffer */
actual = ali_ircc_sir_write(iobase, self->io.fifo_size,
self->tx_buff.data, self->tx_buff.len);
self->tx_buff.data += actual;
self->tx_buff.len -= actual;
}
else
{
if (self->new_speed)
{
/* We must wait until all data are gone */
while(!(inb(iobase+UART_LSR) & UART_LSR_TEMT))
IRDA_DEBUG(1, "%s(), UART_LSR_THRE\n", __func__ );
IRDA_DEBUG(1, "%s(), Changing speed! self->new_speed = %d\n", __func__ , self->new_speed);
ali_ircc_change_speed(self, self->new_speed);
self->new_speed = 0;
// benjamin 2000/11/10 06:32PM
if (self->io.speed > 115200)
{
IRDA_DEBUG(2, "%s(), ali_ircc_change_speed from UART_LSR_TEMT\n", __func__ );
self->ier = IER_EOM;
// SetCOMInterrupts(self, TRUE);
return;
}
}
else
{
netif_wake_queue(self->netdev);
}
self->netdev->stats.tx_packets++;
/* Turn on receive interrupts */
outb(UART_IER_RDI, iobase+UART_IER);
}
IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __func__ );
}
static void ali_ircc_change_speed(struct ali_ircc_cb *self, __u32 baud)
{
struct net_device *dev = self->netdev;
int iobase;
IRDA_DEBUG(1, "%s(), ---------------- Start ----------------\n", __func__ );
IRDA_DEBUG(2, "%s(), setting speed = %d\n", __func__ , baud);
/* This function *must* be called with irq off and spin-lock.
* - Jean II */
iobase = self->io.fir_base;
SetCOMInterrupts(self, FALSE); // 2000/11/24 11:43AM
/* Go to MIR, FIR Speed */
if (baud > 115200)
{
ali_ircc_fir_change_speed(self, baud);
/* Install FIR xmit handler*/
dev->netdev_ops = &ali_ircc_fir_ops;
/* Enable Interuupt */
self->ier = IER_EOM; // benjamin 2000/11/20 07:24PM
/* Be ready for incomming frames */
ali_ircc_dma_receive(self); // benajmin 2000/11/8 07:46PM not complete
}
/* Go to SIR Speed */
else
{
ali_ircc_sir_change_speed(self, baud);
/* Install SIR xmit handler*/
dev->netdev_ops = &ali_ircc_sir_ops;
}
SetCOMInterrupts(self, TRUE); // 2000/11/24 11:43AM
netif_wake_queue(self->netdev);
IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __func__ );
}
static void ali_ircc_fir_change_speed(struct ali_ircc_cb *priv, __u32 baud)
{
int iobase;
struct ali_ircc_cb *self = (struct ali_ircc_cb *) priv;
struct net_device *dev;
IRDA_DEBUG(1, "%s(), ---------------- Start ----------------\n", __func__ );
IRDA_ASSERT(self != NULL, return;);
dev = self->netdev;
iobase = self->io.fir_base;
IRDA_DEBUG(1, "%s(), self->io.speed = %d, change to speed = %d\n", __func__ ,self->io.speed,baud);
/* Come from SIR speed */
if(self->io.speed <=115200)
{
SIR2FIR(iobase);
}
/* Update accounting for new speed */
self->io.speed = baud;
// Set Dongle Speed mode
ali_ircc_change_dongle_speed(self, baud);
IRDA_DEBUG(1, "%s(), ----------------- End ------------------\n", __func__ );
}
/*
* Function ali_sir_change_speed (self, speed)
*
* Set speed of IrDA port to specified baudrate
*
*/
static void ali_ircc_sir_change_speed(struct ali_ircc_cb *priv, __u32 speed)
{
struct ali_ircc_cb *self = (struct ali_ircc_cb *) priv;
unsigned long flags;
int iobase;
int fcr; /* FIFO control reg */
int lcr; /* Line control reg */
int divisor;
IRDA_DEBUG(1, "%s(), ---------------- Start ----------------\n", __func__ );
IRDA_DEBUG(1, "%s(), Setting speed to: %d\n", __func__ , speed);
IRDA_ASSERT(self != NULL, return;);
iobase = self->io.sir_base;
/* Come from MIR or FIR speed */
if(self->io.speed >115200)
{
// Set Dongle Speed mode first
ali_ircc_change_dongle_speed(self, speed);
FIR2SIR(iobase);
}
// Clear Line and Auxiluary status registers 2000/11/24 11:47AM
inb(iobase+UART_LSR);
inb(iobase+UART_SCR);
/* Update accounting for new speed */
self->io.speed = speed;
spin_lock_irqsave(&self->lock, flags);
divisor = 115200/speed;
fcr = UART_FCR_ENABLE_FIFO;
/*
* Use trigger level 1 to avoid 3 ms. timeout delay at 9600 bps, and
* almost 1,7 ms at 19200 bps. At speeds above that we can just forget
* about this timeout since it will always be fast enough.
*/
if (self->io.speed < 38400)
fcr |= UART_FCR_TRIGGER_1;
else
fcr |= UART_FCR_TRIGGER_14;
/* IrDA ports use 8N1 */
lcr = UART_LCR_WLEN8;
outb(UART_LCR_DLAB | lcr, iobase+UART_LCR); /* Set DLAB */
outb(divisor & 0xff, iobase+UART_DLL); /* Set speed */
outb(divisor >> 8, iobase+UART_DLM);
outb(lcr, iobase+UART_LCR); /* Set 8N1 */
outb(fcr, iobase+UART_FCR); /* Enable FIFO's */
/* without this, the conection will be broken after come back from FIR speed,
but with this, the SIR connection is harder to established */
outb((UART_MCR_DTR | UART_MCR_RTS | UART_MCR_OUT2), iobase+UART_MCR);
spin_unlock_irqrestore(&self->lock, flags);
IRDA_DEBUG(1, "%s(), ----------------- End ------------------\n", __func__ );
}
static void ali_ircc_change_dongle_speed(struct ali_ircc_cb *priv, int speed)
{
struct ali_ircc_cb *self = (struct ali_ircc_cb *) priv;
int iobase,dongle_id;
int tmp = 0;
IRDA_DEBUG(1, "%s(), ---------------- Start ----------------\n", __func__ );
iobase = self->io.fir_base; /* or iobase = self->io.sir_base; */
dongle_id = self->io.dongle_id;
/* We are already locked, no need to do it again */
IRDA_DEBUG(1, "%s(), Set Speed for %s , Speed = %d\n", __func__ , dongle_types[dongle_id], speed);
switch_bank(iobase, BANK2);
tmp = inb(iobase+FIR_IRDA_CR);
/* IBM type dongle */
if(dongle_id == 0)
{
if(speed == 4000000)
{
// __ __
// SD/MODE __| |__ __
// __ __
// IRTX __ __| |__
// T1 T2 T3 T4 T5
tmp &= ~IRDA_CR_HDLC; // HDLC=0
tmp |= IRDA_CR_CRC; // CRC=1
switch_bank(iobase, BANK2);
outb(tmp, iobase+FIR_IRDA_CR);
// T1 -> SD/MODE:0 IRTX:0
tmp &= ~0x09;
tmp |= 0x02;
outb(tmp, iobase+FIR_IRDA_CR);
udelay(2);
// T2 -> SD/MODE:1 IRTX:0
tmp &= ~0x01;
tmp |= 0x0a;
outb(tmp, iobase+FIR_IRDA_CR);
udelay(2);
// T3 -> SD/MODE:1 IRTX:1
tmp |= 0x0b;
outb(tmp, iobase+FIR_IRDA_CR);
udelay(2);
// T4 -> SD/MODE:0 IRTX:1
tmp &= ~0x08;
tmp |= 0x03;
outb(tmp, iobase+FIR_IRDA_CR);
udelay(2);
// T5 -> SD/MODE:0 IRTX:0
tmp &= ~0x09;
tmp |= 0x02;
outb(tmp, iobase+FIR_IRDA_CR);
udelay(2);
// reset -> Normal TX output Signal
outb(tmp & ~0x02, iobase+FIR_IRDA_CR);
}
else /* speed <=1152000 */
{
// __
// SD/MODE __| |__
//
// IRTX ________
// T1 T2 T3
/* MIR 115200, 57600 */
if (speed==1152000)
{
tmp |= 0xA0; //HDLC=1, 1.152Mbps=1
}
else
{
tmp &=~0x80; //HDLC 0.576Mbps
tmp |= 0x20; //HDLC=1,
}
tmp |= IRDA_CR_CRC; // CRC=1
switch_bank(iobase, BANK2);
outb(tmp, iobase+FIR_IRDA_CR);
/* MIR 115200, 57600 */
//switch_bank(iobase, BANK2);
// T1 -> SD/MODE:0 IRTX:0
tmp &= ~0x09;
tmp |= 0x02;
outb(tmp, iobase+FIR_IRDA_CR);
udelay(2);
// T2 -> SD/MODE:1 IRTX:0
tmp &= ~0x01;
tmp |= 0x0a;
outb(tmp, iobase+FIR_IRDA_CR);
// T3 -> SD/MODE:0 IRTX:0
tmp &= ~0x09;
tmp |= 0x02;
outb(tmp, iobase+FIR_IRDA_CR);
udelay(2);
// reset -> Normal TX output Signal
outb(tmp & ~0x02, iobase+FIR_IRDA_CR);
}
}
else if (dongle_id == 1) /* HP HDSL-3600 */
{
switch(speed)
{
case 4000000:
tmp &= ~IRDA_CR_HDLC; // HDLC=0
break;
case 1152000:
tmp |= 0xA0; // HDLC=1, 1.152Mbps=1
break;
case 576000:
tmp &=~0x80; // HDLC 0.576Mbps
tmp |= 0x20; // HDLC=1,
break;
}
tmp |= IRDA_CR_CRC; // CRC=1
switch_bank(iobase, BANK2);
outb(tmp, iobase+FIR_IRDA_CR);
}
else /* HP HDSL-1100 */
{
if(speed <= 115200) /* SIR */
{
tmp &= ~IRDA_CR_FIR_SIN; // HP sin select = 0
switch_bank(iobase, BANK2);
outb(tmp, iobase+FIR_IRDA_CR);
}
else /* MIR FIR */
{
switch(speed)
{
case 4000000:
tmp &= ~IRDA_CR_HDLC; // HDLC=0
break;
case 1152000:
tmp |= 0xA0; // HDLC=1, 1.152Mbps=1
break;
case 576000:
tmp &=~0x80; // HDLC 0.576Mbps
tmp |= 0x20; // HDLC=1,
break;
}
tmp |= IRDA_CR_CRC; // CRC=1
tmp |= IRDA_CR_FIR_SIN; // HP sin select = 1
switch_bank(iobase, BANK2);
outb(tmp, iobase+FIR_IRDA_CR);
}
}
switch_bank(iobase, BANK0);
IRDA_DEBUG(1, "%s(), ----------------- End ------------------\n", __func__ );
}
/*
* Function ali_ircc_sir_write (driver)
*
* Fill Tx FIFO with transmit data
*
*/
static int ali_ircc_sir_write(int iobase, int fifo_size, __u8 *buf, int len)
{
int actual = 0;
IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __func__ );
/* Tx FIFO should be empty! */
if (!(inb(iobase+UART_LSR) & UART_LSR_THRE)) {
IRDA_DEBUG(0, "%s(), failed, fifo not empty!\n", __func__ );
return 0;
}
/* Fill FIFO with current frame */
while ((fifo_size-- > 0) && (actual < len)) {
/* Transmit next byte */
outb(buf[actual], iobase+UART_TX);
actual++;
}
IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __func__ );
return actual;
}
/*
* Function ali_ircc_net_open (dev)
*
* Start the device
*
*/
static int ali_ircc_net_open(struct net_device *dev)
{
struct ali_ircc_cb *self;
int iobase;
char hwname[32];
IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __func__ );
IRDA_ASSERT(dev != NULL, return -1;);
self = netdev_priv(dev);
IRDA_ASSERT(self != NULL, return 0;);
iobase = self->io.fir_base;
/* Request IRQ and install Interrupt Handler */
if (request_irq(self->io.irq, ali_ircc_interrupt, 0, dev->name, dev))
{
IRDA_WARNING("%s, unable to allocate irq=%d\n",
ALI_IRCC_DRIVER_NAME,
self->io.irq);
return -EAGAIN;
}
/*
* Always allocate the DMA channel after the IRQ, and clean up on
* failure.
*/
if (request_dma(self->io.dma, dev->name)) {
IRDA_WARNING("%s, unable to allocate dma=%d\n",
ALI_IRCC_DRIVER_NAME,
self->io.dma);
free_irq(self->io.irq, self);
return -EAGAIN;
}
/* Turn on interrups */
outb(UART_IER_RDI , iobase+UART_IER);
/* Ready to play! */
netif_start_queue(dev); //benjamin by irport
/* Give self a hardware name */
sprintf(hwname, "ALI-FIR @ 0x%03x", self->io.fir_base);
/*
* Open new IrLAP layer instance, now that everything should be
* initialized properly
*/
self->irlap = irlap_open(dev, &self->qos, hwname);
IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __func__ );
return 0;
}
/*
* Function ali_ircc_net_close (dev)
*
* Stop the device
*
*/
static int ali_ircc_net_close(struct net_device *dev)
{
struct ali_ircc_cb *self;
//int iobase;
IRDA_DEBUG(4, "%s(), ---------------- Start ----------------\n", __func__ );
IRDA_ASSERT(dev != NULL, return -1;);
self = netdev_priv(dev);
IRDA_ASSERT(self != NULL, return 0;);
/* Stop device */
netif_stop_queue(dev);
/* Stop and remove instance of IrLAP */
if (self->irlap)
irlap_close(self->irlap);
self->irlap = NULL;
disable_dma(self->io.dma);
/* Disable interrupts */
SetCOMInterrupts(self, FALSE);
free_irq(self->io.irq, dev);
free_dma(self->io.dma);
IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __func__ );
return 0;
}
/*
* Function ali_ircc_fir_hard_xmit (skb, dev)
*
* Transmit the frame
*
*/
static netdev_tx_t ali_ircc_fir_hard_xmit(struct sk_buff *skb,
struct net_device *dev)
{
struct ali_ircc_cb *self;
unsigned long flags;
int iobase;
__u32 speed;
int mtt, diff;
IRDA_DEBUG(1, "%s(), ---------------- Start -----------------\n", __func__ );
self = netdev_priv(dev);
iobase = self->io.fir_base;
netif_stop_queue(dev);
/* Make sure tests *& speed change are atomic */
spin_lock_irqsave(&self->lock, flags);
/* Note : you should make sure that speed changes are not going
* to corrupt any outgoing frame. Look at nsc-ircc for the gory
* details - Jean II */
/* Check if we need to change the speed */
speed = irda_get_next_speed(skb);
if ((speed != self->io.speed) && (speed != -1)) {
/* Check for empty frame */
if (!skb->len) {
ali_ircc_change_speed(self, speed);
dev->trans_start = jiffies;
spin_unlock_irqrestore(&self->lock, flags);
dev_kfree_skb(skb);
return NETDEV_TX_OK;
} else
self->new_speed = speed;
}
/* Register and copy this frame to DMA memory */
self->tx_fifo.queue[self->tx_fifo.free].start = self->tx_fifo.tail;
self->tx_fifo.queue[self->tx_fifo.free].len = skb->len;
self->tx_fifo.tail += skb->len;
dev->stats.tx_bytes += skb->len;
skb_copy_from_linear_data(skb, self->tx_fifo.queue[self->tx_fifo.free].start,
skb->len);
self->tx_fifo.len++;
self->tx_fifo.free++;
/* Start transmit only if there is currently no transmit going on */
if (self->tx_fifo.len == 1)
{
/* Check if we must wait the min turn time or not */
mtt = irda_get_mtt(skb);
if (mtt)
{
/* Check how much time we have used already */
do_gettimeofday(&self->now);
diff = self->now.tv_usec - self->stamp.tv_usec;
/* self->stamp is set from ali_ircc_dma_receive_complete() */
IRDA_DEBUG(1, "%s(), ******* diff = %d *******\n", __func__ , diff);
if (diff < 0)
diff += 1000000;
/* Check if the mtt is larger than the time we have
* already used by all the protocol processing
*/
if (mtt > diff)
{
mtt -= diff;
/*
* Use timer if delay larger than 1000 us, and
* use udelay for smaller values which should
* be acceptable
*/
if (mtt > 500)
{
/* Adjust for timer resolution */
mtt = (mtt+250) / 500; /* 4 discard, 5 get advanced, Let's round off */
IRDA_DEBUG(1, "%s(), ************** mtt = %d ***********\n", __func__ , mtt);
/* Setup timer */
if (mtt == 1) /* 500 us */
{
switch_bank(iobase, BANK1);
outb(TIMER_IIR_500, iobase+FIR_TIMER_IIR);
}
else if (mtt == 2) /* 1 ms */
{
switch_bank(iobase, BANK1);
outb(TIMER_IIR_1ms, iobase+FIR_TIMER_IIR);
}
else /* > 2ms -> 4ms */
{
switch_bank(iobase, BANK1);
outb(TIMER_IIR_2ms, iobase+FIR_TIMER_IIR);
}
/* Start timer */
outb(inb(iobase+FIR_CR) | CR_TIMER_EN, iobase+FIR_CR);
self->io.direction = IO_XMIT;
/* Enable timer interrupt */
self->ier = IER_TIMER;
SetCOMInterrupts(self, TRUE);
/* Timer will take care of the rest */
goto out;
}
else
udelay(mtt);
} // if (if (mtt > diff)
}// if (mtt)
/* Enable EOM interrupt */
self->ier = IER_EOM;
SetCOMInterrupts(self, TRUE);
/* Transmit frame */
ali_ircc_dma_xmit(self);
} // if (self->tx_fifo.len == 1)
out:
/* Not busy transmitting anymore if window is not full */
if (self->tx_fifo.free < MAX_TX_WINDOW)
netif_wake_queue(self->netdev);
/* Restore bank register */
switch_bank(iobase, BANK0);
dev->trans_start = jiffies;
spin_unlock_irqrestore(&self->lock, flags);
dev_kfree_skb(skb);
IRDA_DEBUG(1, "%s(), ----------------- End ------------------\n", __func__ );
return NETDEV_TX_OK;
}
static void ali_ircc_dma_xmit(struct ali_ircc_cb *self)
{
int iobase, tmp;
unsigned char FIFO_OPTI, Hi, Lo;
IRDA_DEBUG(1, "%s(), ---------------- Start -----------------\n", __func__ );
iobase = self->io.fir_base;
/* FIFO threshold , this method comes from NDIS5 code */
if(self->tx_fifo.queue[self->tx_fifo.ptr].len < TX_FIFO_Threshold)
FIFO_OPTI = self->tx_fifo.queue[self->tx_fifo.ptr].len-1;
else
FIFO_OPTI = TX_FIFO_Threshold;
/* Disable DMA */
switch_bank(iobase, BANK1);
outb(inb(iobase+FIR_CR) & ~CR_DMA_EN, iobase+FIR_CR);
self->io.direction = IO_XMIT;
irda_setup_dma(self->io.dma,
((u8 *)self->tx_fifo.queue[self->tx_fifo.ptr].start -
self->tx_buff.head) + self->tx_buff_dma,
self->tx_fifo.queue[self->tx_fifo.ptr].len,
DMA_TX_MODE);
/* Reset Tx FIFO */
switch_bank(iobase, BANK0);
outb(LCR_A_FIFO_RESET, iobase+FIR_LCR_A);
/* Set Tx FIFO threshold */
if (self->fifo_opti_buf!=FIFO_OPTI)
{
switch_bank(iobase, BANK1);
outb(FIFO_OPTI, iobase+FIR_FIFO_TR) ;
self->fifo_opti_buf=FIFO_OPTI;
}
/* Set Tx DMA threshold */
switch_bank(iobase, BANK1);
outb(TX_DMA_Threshold, iobase+FIR_DMA_TR);
/* Set max Tx frame size */
Hi = (self->tx_fifo.queue[self->tx_fifo.ptr].len >> 8) & 0x0f;
Lo = self->tx_fifo.queue[self->tx_fifo.ptr].len & 0xff;
switch_bank(iobase, BANK2);
outb(Hi, iobase+FIR_TX_DSR_HI);
outb(Lo, iobase+FIR_TX_DSR_LO);
/* Disable SIP , Disable Brick Wall (we don't support in TX mode), Change to TX mode */
switch_bank(iobase, BANK0);
tmp = inb(iobase+FIR_LCR_B);
tmp &= ~0x20; // Disable SIP
outb(((unsigned char)(tmp & 0x3f) | LCR_B_TX_MODE) & ~LCR_B_BW, iobase+FIR_LCR_B);
IRDA_DEBUG(1, "%s(), *** Change to TX mode: FIR_LCR_B = 0x%x ***\n", __func__ , inb(iobase+FIR_LCR_B));
outb(0, iobase+FIR_LSR);
/* Enable DMA and Burst Mode */
switch_bank(iobase, BANK1);
outb(inb(iobase+FIR_CR) | CR_DMA_EN | CR_DMA_BURST, iobase+FIR_CR);
switch_bank(iobase, BANK0);
IRDA_DEBUG(1, "%s(), ----------------- End ------------------\n", __func__ );
}
static int ali_ircc_dma_xmit_complete(struct ali_ircc_cb *self)
{
int iobase;
int ret = TRUE;
IRDA_DEBUG(1, "%s(), ---------------- Start -----------------\n", __func__ );
iobase = self->io.fir_base;
/* Disable DMA */
switch_bank(iobase, BANK1);
outb(inb(iobase+FIR_CR) & ~CR_DMA_EN, iobase+FIR_CR);
/* Check for underrun! */
switch_bank(iobase, BANK0);
if((inb(iobase+FIR_LSR) & LSR_FRAME_ABORT) == LSR_FRAME_ABORT)
{
IRDA_ERROR("%s(), ********* LSR_FRAME_ABORT *********\n", __func__);
self->netdev->stats.tx_errors++;
self->netdev->stats.tx_fifo_errors++;
}
else
{
self->netdev->stats.tx_packets++;
}
/* Check if we need to change the speed */
if (self->new_speed)
{
ali_ircc_change_speed(self, self->new_speed);
self->new_speed = 0;
}
/* Finished with this frame, so prepare for next */
self->tx_fifo.ptr++;
self->tx_fifo.len--;
/* Any frames to be sent back-to-back? */
if (self->tx_fifo.len)
{
ali_ircc_dma_xmit(self);
/* Not finished yet! */
ret = FALSE;
}
else
{ /* Reset Tx FIFO info */
self->tx_fifo.len = self->tx_fifo.ptr = self->tx_fifo.free = 0;
self->tx_fifo.tail = self->tx_buff.head;
}
/* Make sure we have room for more frames */
if (self->tx_fifo.free < MAX_TX_WINDOW) {
/* Not busy transmitting anymore */
/* Tell the network layer, that we can accept more frames */
netif_wake_queue(self->netdev);
}
switch_bank(iobase, BANK0);
IRDA_DEBUG(1, "%s(), ----------------- End ------------------\n", __func__ );
return ret;
}
/*
* Function ali_ircc_dma_receive (self)
*
* Get ready for receiving a frame. The device will initiate a DMA
* if it starts to receive a frame.
*
*/
static int ali_ircc_dma_receive(struct ali_ircc_cb *self)
{
int iobase, tmp;
IRDA_DEBUG(1, "%s(), ---------------- Start -----------------\n", __func__ );
iobase = self->io.fir_base;
/* Reset Tx FIFO info */
self->tx_fifo.len = self->tx_fifo.ptr = self->tx_fifo.free = 0;
self->tx_fifo.tail = self->tx_buff.head;
/* Disable DMA */
switch_bank(iobase, BANK1);
outb(inb(iobase+FIR_CR) & ~CR_DMA_EN, iobase+FIR_CR);
/* Reset Message Count */
switch_bank(iobase, BANK0);
outb(0x07, iobase+FIR_LSR);
self->rcvFramesOverflow = FALSE;
self->LineStatus = inb(iobase+FIR_LSR) ;
/* Reset Rx FIFO info */
self->io.direction = IO_RECV;
self->rx_buff.data = self->rx_buff.head;
/* Reset Rx FIFO */
// switch_bank(iobase, BANK0);
outb(LCR_A_FIFO_RESET, iobase+FIR_LCR_A);
self->st_fifo.len = self->st_fifo.pending_bytes = 0;
self->st_fifo.tail = self->st_fifo.head = 0;
irda_setup_dma(self->io.dma, self->rx_buff_dma, self->rx_buff.truesize,
DMA_RX_MODE);
/* Set Receive Mode,Brick Wall */
//switch_bank(iobase, BANK0);
tmp = inb(iobase+FIR_LCR_B);
outb((unsigned char)(tmp &0x3f) | LCR_B_RX_MODE | LCR_B_BW , iobase + FIR_LCR_B); // 2000/12/1 05:16PM
IRDA_DEBUG(1, "%s(), *** Change To RX mode: FIR_LCR_B = 0x%x ***\n", __func__ , inb(iobase+FIR_LCR_B));
/* Set Rx Threshold */
switch_bank(iobase, BANK1);
outb(RX_FIFO_Threshold, iobase+FIR_FIFO_TR);
outb(RX_DMA_Threshold, iobase+FIR_DMA_TR);
/* Enable DMA and Burst Mode */
// switch_bank(iobase, BANK1);
outb(CR_DMA_EN | CR_DMA_BURST, iobase+FIR_CR);
switch_bank(iobase, BANK0);
IRDA_DEBUG(1, "%s(), ----------------- End ------------------\n", __func__ );
return 0;
}
static int ali_ircc_dma_receive_complete(struct ali_ircc_cb *self)
{
struct st_fifo *st_fifo;
struct sk_buff *skb;
__u8 status, MessageCount;
int len, i, iobase, val;
IRDA_DEBUG(1, "%s(), ---------------- Start -----------------\n", __func__ );
st_fifo = &self->st_fifo;
iobase = self->io.fir_base;
switch_bank(iobase, BANK0);
MessageCount = inb(iobase+ FIR_LSR)&0x07;
if (MessageCount > 0)
IRDA_DEBUG(0, "%s(), Messsage count = %d,\n", __func__ , MessageCount);
for (i=0; i<=MessageCount; i++)
{
/* Bank 0 */
switch_bank(iobase, BANK0);
status = inb(iobase+FIR_LSR);
switch_bank(iobase, BANK2);
len = inb(iobase+FIR_RX_DSR_HI) & 0x0f;
len = len << 8;
len |= inb(iobase+FIR_RX_DSR_LO);
IRDA_DEBUG(1, "%s(), RX Length = 0x%.2x,\n", __func__ , len);
IRDA_DEBUG(1, "%s(), RX Status = 0x%.2x,\n", __func__ , status);
if (st_fifo->tail >= MAX_RX_WINDOW) {
IRDA_DEBUG(0, "%s(), window is full!\n", __func__ );
continue;
}
st_fifo->entries[st_fifo->tail].status = status;
st_fifo->entries[st_fifo->tail].len = len;
st_fifo->pending_bytes += len;
st_fifo->tail++;
st_fifo->len++;
}
for (i=0; i<=MessageCount; i++)
{
/* Get first entry */
status = st_fifo->entries[st_fifo->head].status;
len = st_fifo->entries[st_fifo->head].len;
st_fifo->pending_bytes -= len;
st_fifo->head++;
st_fifo->len--;
/* Check for errors */
if ((status & 0xd8) || self->rcvFramesOverflow || (len==0))
{
IRDA_DEBUG(0,"%s(), ************* RX Errors ************\n", __func__ );
/* Skip frame */
self->netdev->stats.rx_errors++;
self->rx_buff.data += len;
if (status & LSR_FIFO_UR)
{
self->netdev->stats.rx_frame_errors++;
IRDA_DEBUG(0,"%s(), ************* FIFO Errors ************\n", __func__ );
}
if (status & LSR_FRAME_ERROR)
{
self->netdev->stats.rx_frame_errors++;
IRDA_DEBUG(0,"%s(), ************* FRAME Errors ************\n", __func__ );
}
if (status & LSR_CRC_ERROR)
{
self->netdev->stats.rx_crc_errors++;
IRDA_DEBUG(0,"%s(), ************* CRC Errors ************\n", __func__ );
}
if(self->rcvFramesOverflow)
{
self->netdev->stats.rx_frame_errors++;
IRDA_DEBUG(0,"%s(), ************* Overran DMA buffer ************\n", __func__ );
}
if(len == 0)
{
self->netdev->stats.rx_frame_errors++;
IRDA_DEBUG(0,"%s(), ********** Receive Frame Size = 0 *********\n", __func__ );
}
}
else
{
if (st_fifo->pending_bytes < 32)
{
switch_bank(iobase, BANK0);
val = inb(iobase+FIR_BSR);
if ((val& BSR_FIFO_NOT_EMPTY)== 0x80)
{
IRDA_DEBUG(0, "%s(), ************* BSR_FIFO_NOT_EMPTY ************\n", __func__ );
/* Put this entry back in fifo */
st_fifo->head--;
st_fifo->len++;
st_fifo->pending_bytes += len;
st_fifo->entries[st_fifo->head].status = status;
st_fifo->entries[st_fifo->head].len = len;
/*
* DMA not finished yet, so try again
* later, set timer value, resolution
* 500 us
*/
switch_bank(iobase, BANK1);
outb(TIMER_IIR_500, iobase+FIR_TIMER_IIR); // 2001/1/2 05:07PM
/* Enable Timer */
outb(inb(iobase+FIR_CR) | CR_TIMER_EN, iobase+FIR_CR);
return FALSE; /* I'll be back! */
}
}
/*
* Remember the time we received this frame, so we can
* reduce the min turn time a bit since we will know
* how much time we have used for protocol processing
*/
do_gettimeofday(&self->stamp);
skb = dev_alloc_skb(len+1);
if (skb == NULL)
{
IRDA_WARNING("%s(), memory squeeze, "
"dropping frame.\n",
__func__);
self->netdev->stats.rx_dropped++;
return FALSE;
}
/* Make sure IP header gets aligned */
skb_reserve(skb, 1);
/* Copy frame without CRC, CRC is removed by hardware*/
skb_put(skb, len);
skb_copy_to_linear_data(skb, self->rx_buff.data, len);
/* Move to next frame */
self->rx_buff.data += len;
self->netdev->stats.rx_bytes += len;
self->netdev->stats.rx_packets++;
skb->dev = self->netdev;
skb_reset_mac_header(skb);
skb->protocol = htons(ETH_P_IRDA);
netif_rx(skb);
}
}
switch_bank(iobase, BANK0);
IRDA_DEBUG(1, "%s(), ----------------- End ------------------\n", __func__ );
return TRUE;
}
/*
* Function ali_ircc_sir_hard_xmit (skb, dev)
*
* Transmit the frame!
*
*/
static netdev_tx_t ali_ircc_sir_hard_xmit(struct sk_buff *skb,
struct net_device *dev)
{
struct ali_ircc_cb *self;
unsigned long flags;
int iobase;
__u32 speed;
IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __func__ );
IRDA_ASSERT(dev != NULL, return NETDEV_TX_OK;);
self = netdev_priv(dev);
IRDA_ASSERT(self != NULL, return NETDEV_TX_OK;);
iobase = self->io.sir_base;
netif_stop_queue(dev);
/* Make sure tests *& speed change are atomic */
spin_lock_irqsave(&self->lock, flags);
/* Note : you should make sure that speed changes are not going
* to corrupt any outgoing frame. Look at nsc-ircc for the gory
* details - Jean II */
/* Check if we need to change the speed */
speed = irda_get_next_speed(skb);
if ((speed != self->io.speed) && (speed != -1)) {
/* Check for empty frame */
if (!skb->len) {
ali_ircc_change_speed(self, speed);
dev->trans_start = jiffies;
spin_unlock_irqrestore(&self->lock, flags);
dev_kfree_skb(skb);
return NETDEV_TX_OK;
} else
self->new_speed = speed;
}
/* Init tx buffer */
self->tx_buff.data = self->tx_buff.head;
/* Copy skb to tx_buff while wrapping, stuffing and making CRC */
self->tx_buff.len = async_wrap_skb(skb, self->tx_buff.data,
self->tx_buff.truesize);
self->netdev->stats.tx_bytes += self->tx_buff.len;
/* Turn on transmit finished interrupt. Will fire immediately! */
outb(UART_IER_THRI, iobase+UART_IER);
dev->trans_start = jiffies;
spin_unlock_irqrestore(&self->lock, flags);
dev_kfree_skb(skb);
IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __func__ );
return NETDEV_TX_OK;
}
/*
* Function ali_ircc_net_ioctl (dev, rq, cmd)
*
* Process IOCTL commands for this device
*
*/
static int ali_ircc_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
struct if_irda_req *irq = (struct if_irda_req *) rq;
struct ali_ircc_cb *self;
unsigned long flags;
int ret = 0;
IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __func__ );
IRDA_ASSERT(dev != NULL, return -1;);
self = netdev_priv(dev);
IRDA_ASSERT(self != NULL, return -1;);
IRDA_DEBUG(2, "%s(), %s, (cmd=0x%X)\n", __func__ , dev->name, cmd);
switch (cmd) {
case SIOCSBANDWIDTH: /* Set bandwidth */
IRDA_DEBUG(1, "%s(), SIOCSBANDWIDTH\n", __func__ );
/*
* This function will also be used by IrLAP to change the
* speed, so we still must allow for speed change within
* interrupt context.
*/
if (!in_interrupt() && !capable(CAP_NET_ADMIN))
return -EPERM;
spin_lock_irqsave(&self->lock, flags);
ali_ircc_change_speed(self, irq->ifr_baudrate);
spin_unlock_irqrestore(&self->lock, flags);
break;
case SIOCSMEDIABUSY: /* Set media busy */
IRDA_DEBUG(1, "%s(), SIOCSMEDIABUSY\n", __func__ );
if (!capable(CAP_NET_ADMIN))
return -EPERM;
irda_device_set_media_busy(self->netdev, TRUE);
break;
case SIOCGRECEIVING: /* Check if we are receiving right now */
IRDA_DEBUG(2, "%s(), SIOCGRECEIVING\n", __func__ );
/* This is protected */
irq->ifr_receiving = ali_ircc_is_receiving(self);
break;
default:
ret = -EOPNOTSUPP;
}
IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __func__ );
return ret;
}
/*
* Function ali_ircc_is_receiving (self)
*
* Return TRUE is we are currently receiving a frame
*
*/
static int ali_ircc_is_receiving(struct ali_ircc_cb *self)
{
unsigned long flags;
int status = FALSE;
int iobase;
IRDA_DEBUG(2, "%s(), ---------------- Start -----------------\n", __func__ );
IRDA_ASSERT(self != NULL, return FALSE;);
spin_lock_irqsave(&self->lock, flags);
if (self->io.speed > 115200)
{
iobase = self->io.fir_base;
switch_bank(iobase, BANK1);
if((inb(iobase+FIR_FIFO_FR) & 0x3f) != 0)
{
/* We are receiving something */
IRDA_DEBUG(1, "%s(), We are receiving something\n", __func__ );
status = TRUE;
}
switch_bank(iobase, BANK0);
}
else
{
status = (self->rx_buff.state != OUTSIDE_FRAME);
}
spin_unlock_irqrestore(&self->lock, flags);
IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __func__ );
return status;
}
static int ali_ircc_suspend(struct platform_device *dev, pm_message_t state)
{
struct ali_ircc_cb *self = platform_get_drvdata(dev);
IRDA_MESSAGE("%s, Suspending\n", ALI_IRCC_DRIVER_NAME);
if (self->io.suspended)
return 0;
ali_ircc_net_close(self->netdev);
self->io.suspended = 1;
return 0;
}
static int ali_ircc_resume(struct platform_device *dev)
{
struct ali_ircc_cb *self = platform_get_drvdata(dev);
if (!self->io.suspended)
return 0;
ali_ircc_net_open(self->netdev);
IRDA_MESSAGE("%s, Waking up\n", ALI_IRCC_DRIVER_NAME);
self->io.suspended = 0;
return 0;
}
/* ALi Chip Function */
static void SetCOMInterrupts(struct ali_ircc_cb *self , unsigned char enable)
{
unsigned char newMask;
int iobase = self->io.fir_base; /* or sir_base */
IRDA_DEBUG(2, "%s(), -------- Start -------- ( Enable = %d )\n", __func__ , enable);
/* Enable the interrupt which we wish to */
if (enable){
if (self->io.direction == IO_XMIT)
{
if (self->io.speed > 115200) /* FIR, MIR */
{
newMask = self->ier;
}
else /* SIR */
{
newMask = UART_IER_THRI | UART_IER_RDI;
}
}
else {
if (self->io.speed > 115200) /* FIR, MIR */
{
newMask = self->ier;
}
else /* SIR */
{
newMask = UART_IER_RDI;
}
}
}
else /* Disable all the interrupts */
{
newMask = 0x00;
}
//SIR and FIR has different registers
if (self->io.speed > 115200)
{
switch_bank(iobase, BANK0);
outb(newMask, iobase+FIR_IER);
}
else
outb(newMask, iobase+UART_IER);
IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __func__ );
}
static void SIR2FIR(int iobase)
{
//unsigned char tmp;
IRDA_DEBUG(1, "%s(), ---------------- Start ----------------\n", __func__ );
/* Already protected (change_speed() or setup()), no need to lock.
* Jean II */
outb(0x28, iobase+UART_MCR);
outb(0x68, iobase+UART_MCR);
outb(0x88, iobase+UART_MCR);
outb(0x60, iobase+FIR_MCR); /* Master Reset */
outb(0x20, iobase+FIR_MCR); /* Master Interrupt Enable */
//tmp = inb(iobase+FIR_LCR_B); /* SIP enable */
//tmp |= 0x20;
//outb(tmp, iobase+FIR_LCR_B);
IRDA_DEBUG(1, "%s(), ----------------- End ------------------\n", __func__ );
}
static void FIR2SIR(int iobase)
{
unsigned char val;
IRDA_DEBUG(1, "%s(), ---------------- Start ----------------\n", __func__ );
/* Already protected (change_speed() or setup()), no need to lock.
* Jean II */
outb(0x20, iobase+FIR_MCR); /* IRQ to low */
outb(0x00, iobase+UART_IER);
outb(0xA0, iobase+FIR_MCR); /* Don't set master reset */
outb(0x00, iobase+UART_FCR);
outb(0x07, iobase+UART_FCR);
val = inb(iobase+UART_RX);
val = inb(iobase+UART_LSR);
val = inb(iobase+UART_MSR);
IRDA_DEBUG(1, "%s(), ----------------- End ------------------\n", __func__ );
}
MODULE_AUTHOR("Benjamin Kong <benjamin_kong@ali.com.tw>");
MODULE_DESCRIPTION("ALi FIR Controller Driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:" ALI_IRCC_DRIVER_NAME);
module_param_array(io, int, NULL, 0);
MODULE_PARM_DESC(io, "Base I/O addresses");
module_param_array(irq, int, NULL, 0);
MODULE_PARM_DESC(irq, "IRQ lines");
module_param_array(dma, int, NULL, 0);
MODULE_PARM_DESC(dma, "DMA channels");
module_init(ali_ircc_init);
module_exit(ali_ircc_cleanup);
| gpl-2.0 |
CM7lu3000/meteor | drivers/rtc/rtc-x1205.c | 1144 | 16061 | /*
* An i2c driver for the Xicor/Intersil X1205 RTC
* Copyright 2004 Karen Spearel
* Copyright 2005 Alessandro Zummo
*
* please send all reports to:
* Karen Spearel <kas111 at gmail dot com>
* Alessandro Zummo <a.zummo@towertech.it>
*
* based on a lot of other RTC drivers.
*
* Information and datasheet:
* http://www.intersil.com/cda/deviceinfo/0,1477,X1205,00.html
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/i2c.h>
#include <linux/bcd.h>
#include <linux/rtc.h>
#include <linux/delay.h>
#define DRV_VERSION "1.0.8"
/* offsets into CCR area */
#define CCR_SEC 0
#define CCR_MIN 1
#define CCR_HOUR 2
#define CCR_MDAY 3
#define CCR_MONTH 4
#define CCR_YEAR 5
#define CCR_WDAY 6
#define CCR_Y2K 7
#define X1205_REG_SR 0x3F /* status register */
#define X1205_REG_Y2K 0x37
#define X1205_REG_DW 0x36
#define X1205_REG_YR 0x35
#define X1205_REG_MO 0x34
#define X1205_REG_DT 0x33
#define X1205_REG_HR 0x32
#define X1205_REG_MN 0x31
#define X1205_REG_SC 0x30
#define X1205_REG_DTR 0x13
#define X1205_REG_ATR 0x12
#define X1205_REG_INT 0x11
#define X1205_REG_0 0x10
#define X1205_REG_Y2K1 0x0F
#define X1205_REG_DWA1 0x0E
#define X1205_REG_YRA1 0x0D
#define X1205_REG_MOA1 0x0C
#define X1205_REG_DTA1 0x0B
#define X1205_REG_HRA1 0x0A
#define X1205_REG_MNA1 0x09
#define X1205_REG_SCA1 0x08
#define X1205_REG_Y2K0 0x07
#define X1205_REG_DWA0 0x06
#define X1205_REG_YRA0 0x05
#define X1205_REG_MOA0 0x04
#define X1205_REG_DTA0 0x03
#define X1205_REG_HRA0 0x02
#define X1205_REG_MNA0 0x01
#define X1205_REG_SCA0 0x00
#define X1205_CCR_BASE 0x30 /* Base address of CCR */
#define X1205_ALM0_BASE 0x00 /* Base address of ALARM0 */
#define X1205_SR_RTCF 0x01 /* Clock failure */
#define X1205_SR_WEL 0x02 /* Write Enable Latch */
#define X1205_SR_RWEL 0x04 /* Register Write Enable */
#define X1205_SR_AL0 0x20 /* Alarm 0 match */
#define X1205_DTR_DTR0 0x01
#define X1205_DTR_DTR1 0x02
#define X1205_DTR_DTR2 0x04
#define X1205_HR_MIL 0x80 /* Set in ccr.hour for 24 hr mode */
#define X1205_INT_AL0E 0x20 /* Alarm 0 enable */
static struct i2c_driver x1205_driver;
/*
* In the routines that deal directly with the x1205 hardware, we use
* rtc_time -- month 0-11, hour 0-23, yr = calendar year-epoch
* Epoch is initialized as 2000. Time is set to UTC.
*/
static int x1205_get_datetime(struct i2c_client *client, struct rtc_time *tm,
unsigned char reg_base)
{
unsigned char dt_addr[2] = { 0, reg_base };
unsigned char buf[8];
int i;
struct i2c_msg msgs[] = {
{ client->addr, 0, 2, dt_addr }, /* setup read ptr */
{ client->addr, I2C_M_RD, 8, buf }, /* read date */
};
/* read date registers */
if (i2c_transfer(client->adapter, &msgs[0], 2) != 2) {
dev_err(&client->dev, "%s: read error\n", __func__);
return -EIO;
}
dev_dbg(&client->dev,
"%s: raw read data - sec=%02x, min=%02x, hr=%02x, "
"mday=%02x, mon=%02x, year=%02x, wday=%02x, y2k=%02x\n",
__func__,
buf[0], buf[1], buf[2], buf[3],
buf[4], buf[5], buf[6], buf[7]);
/* Mask out the enable bits if these are alarm registers */
if (reg_base < X1205_CCR_BASE)
for (i = 0; i <= 4; i++)
buf[i] &= 0x7F;
tm->tm_sec = bcd2bin(buf[CCR_SEC]);
tm->tm_min = bcd2bin(buf[CCR_MIN]);
tm->tm_hour = bcd2bin(buf[CCR_HOUR] & 0x3F); /* hr is 0-23 */
tm->tm_mday = bcd2bin(buf[CCR_MDAY]);
tm->tm_mon = bcd2bin(buf[CCR_MONTH]) - 1; /* mon is 0-11 */
tm->tm_year = bcd2bin(buf[CCR_YEAR])
+ (bcd2bin(buf[CCR_Y2K]) * 100) - 1900;
tm->tm_wday = buf[CCR_WDAY];
dev_dbg(&client->dev, "%s: tm is secs=%d, mins=%d, hours=%d, "
"mday=%d, mon=%d, year=%d, wday=%d\n",
__func__,
tm->tm_sec, tm->tm_min, tm->tm_hour,
tm->tm_mday, tm->tm_mon, tm->tm_year, tm->tm_wday);
return 0;
}
static int x1205_get_status(struct i2c_client *client, unsigned char *sr)
{
static unsigned char sr_addr[2] = { 0, X1205_REG_SR };
struct i2c_msg msgs[] = {
{ client->addr, 0, 2, sr_addr }, /* setup read ptr */
{ client->addr, I2C_M_RD, 1, sr }, /* read status */
};
/* read status register */
if (i2c_transfer(client->adapter, &msgs[0], 2) != 2) {
dev_err(&client->dev, "%s: read error\n", __func__);
return -EIO;
}
return 0;
}
static int x1205_set_datetime(struct i2c_client *client, struct rtc_time *tm,
u8 reg_base, unsigned char alm_enable)
{
int i, xfer;
unsigned char rdata[10] = { 0, reg_base };
unsigned char *buf = rdata + 2;
static const unsigned char wel[3] = { 0, X1205_REG_SR,
X1205_SR_WEL };
static const unsigned char rwel[3] = { 0, X1205_REG_SR,
X1205_SR_WEL | X1205_SR_RWEL };
static const unsigned char diswe[3] = { 0, X1205_REG_SR, 0 };
dev_dbg(&client->dev,
"%s: sec=%d min=%d hour=%d mday=%d mon=%d year=%d wday=%d\n",
__func__, tm->tm_sec, tm->tm_min, tm->tm_hour, tm->tm_mday,
tm->tm_mon, tm->tm_year, tm->tm_wday);
buf[CCR_SEC] = bin2bcd(tm->tm_sec);
buf[CCR_MIN] = bin2bcd(tm->tm_min);
/* set hour and 24hr bit */
buf[CCR_HOUR] = bin2bcd(tm->tm_hour) | X1205_HR_MIL;
buf[CCR_MDAY] = bin2bcd(tm->tm_mday);
/* month, 1 - 12 */
buf[CCR_MONTH] = bin2bcd(tm->tm_mon + 1);
/* year, since the rtc epoch*/
buf[CCR_YEAR] = bin2bcd(tm->tm_year % 100);
buf[CCR_WDAY] = tm->tm_wday & 0x07;
buf[CCR_Y2K] = bin2bcd((tm->tm_year + 1900) / 100);
/* If writing alarm registers, set compare bits on registers 0-4 */
if (reg_base < X1205_CCR_BASE)
for (i = 0; i <= 4; i++)
buf[i] |= 0x80;
/* this sequence is required to unlock the chip */
if ((xfer = i2c_master_send(client, wel, 3)) != 3) {
dev_err(&client->dev, "%s: wel - %d\n", __func__, xfer);
return -EIO;
}
if ((xfer = i2c_master_send(client, rwel, 3)) != 3) {
dev_err(&client->dev, "%s: rwel - %d\n", __func__, xfer);
return -EIO;
}
xfer = i2c_master_send(client, rdata, sizeof(rdata));
if (xfer != sizeof(rdata)) {
dev_err(&client->dev,
"%s: result=%d addr=%02x, data=%02x\n",
__func__,
xfer, rdata[1], rdata[2]);
return -EIO;
}
/* If we wrote to the nonvolatile region, wait 10msec for write cycle*/
if (reg_base < X1205_CCR_BASE) {
unsigned char al0e[3] = { 0, X1205_REG_INT, 0 };
msleep(10);
/* ...and set or clear the AL0E bit in the INT register */
/* Need to set RWEL again as the write has cleared it */
xfer = i2c_master_send(client, rwel, 3);
if (xfer != 3) {
dev_err(&client->dev,
"%s: aloe rwel - %d\n",
__func__,
xfer);
return -EIO;
}
if (alm_enable)
al0e[2] = X1205_INT_AL0E;
xfer = i2c_master_send(client, al0e, 3);
if (xfer != 3) {
dev_err(&client->dev,
"%s: al0e - %d\n",
__func__,
xfer);
return -EIO;
}
/* and wait 10msec again for this write to complete */
msleep(10);
}
/* disable further writes */
if ((xfer = i2c_master_send(client, diswe, 3)) != 3) {
dev_err(&client->dev, "%s: diswe - %d\n", __func__, xfer);
return -EIO;
}
return 0;
}
static int x1205_fix_osc(struct i2c_client *client)
{
int err;
struct rtc_time tm;
memset(&tm, 0, sizeof(tm));
err = x1205_set_datetime(client, &tm, X1205_CCR_BASE, 0);
if (err < 0)
dev_err(&client->dev, "unable to restart the oscillator\n");
return err;
}
static int x1205_get_dtrim(struct i2c_client *client, int *trim)
{
unsigned char dtr;
static unsigned char dtr_addr[2] = { 0, X1205_REG_DTR };
struct i2c_msg msgs[] = {
{ client->addr, 0, 2, dtr_addr }, /* setup read ptr */
{ client->addr, I2C_M_RD, 1, &dtr }, /* read dtr */
};
/* read dtr register */
if (i2c_transfer(client->adapter, &msgs[0], 2) != 2) {
dev_err(&client->dev, "%s: read error\n", __func__);
return -EIO;
}
dev_dbg(&client->dev, "%s: raw dtr=%x\n", __func__, dtr);
*trim = 0;
if (dtr & X1205_DTR_DTR0)
*trim += 20;
if (dtr & X1205_DTR_DTR1)
*trim += 10;
if (dtr & X1205_DTR_DTR2)
*trim = -*trim;
return 0;
}
static int x1205_get_atrim(struct i2c_client *client, int *trim)
{
s8 atr;
static unsigned char atr_addr[2] = { 0, X1205_REG_ATR };
struct i2c_msg msgs[] = {
{ client->addr, 0, 2, atr_addr }, /* setup read ptr */
{ client->addr, I2C_M_RD, 1, &atr }, /* read atr */
};
/* read atr register */
if (i2c_transfer(client->adapter, &msgs[0], 2) != 2) {
dev_err(&client->dev, "%s: read error\n", __func__);
return -EIO;
}
dev_dbg(&client->dev, "%s: raw atr=%x\n", __func__, atr);
/* atr is a two's complement value on 6 bits,
* perform sign extension. The formula is
* Catr = (atr * 0.25pF) + 11.00pF.
*/
if (atr & 0x20)
atr |= 0xC0;
dev_dbg(&client->dev, "%s: raw atr=%x (%d)\n", __func__, atr, atr);
*trim = (atr * 250) + 11000;
dev_dbg(&client->dev, "%s: real=%d\n", __func__, *trim);
return 0;
}
struct x1205_limit
{
unsigned char reg, mask, min, max;
};
static int x1205_validate_client(struct i2c_client *client)
{
int i, xfer;
/* Probe array. We will read the register at the specified
* address and check if the given bits are zero.
*/
static const unsigned char probe_zero_pattern[] = {
/* register, mask */
X1205_REG_SR, 0x18,
X1205_REG_DTR, 0xF8,
X1205_REG_ATR, 0xC0,
X1205_REG_INT, 0x18,
X1205_REG_0, 0xFF,
};
static const struct x1205_limit probe_limits_pattern[] = {
/* register, mask, min, max */
{ X1205_REG_Y2K, 0xFF, 19, 20 },
{ X1205_REG_DW, 0xFF, 0, 6 },
{ X1205_REG_YR, 0xFF, 0, 99 },
{ X1205_REG_MO, 0xFF, 0, 12 },
{ X1205_REG_DT, 0xFF, 0, 31 },
{ X1205_REG_HR, 0x7F, 0, 23 },
{ X1205_REG_MN, 0xFF, 0, 59 },
{ X1205_REG_SC, 0xFF, 0, 59 },
{ X1205_REG_Y2K1, 0xFF, 19, 20 },
{ X1205_REG_Y2K0, 0xFF, 19, 20 },
};
/* check that registers have bits a 0 where expected */
for (i = 0; i < ARRAY_SIZE(probe_zero_pattern); i += 2) {
unsigned char buf;
unsigned char addr[2] = { 0, probe_zero_pattern[i] };
struct i2c_msg msgs[2] = {
{ client->addr, 0, 2, addr },
{ client->addr, I2C_M_RD, 1, &buf },
};
if ((xfer = i2c_transfer(client->adapter, msgs, 2)) != 2) {
dev_err(&client->dev,
"%s: could not read register %x\n",
__func__, probe_zero_pattern[i]);
return -EIO;
}
if ((buf & probe_zero_pattern[i+1]) != 0) {
dev_err(&client->dev,
"%s: register=%02x, zero pattern=%d, value=%x\n",
__func__, probe_zero_pattern[i], i, buf);
return -ENODEV;
}
}
/* check limits (only registers with bcd values) */
for (i = 0; i < ARRAY_SIZE(probe_limits_pattern); i++) {
unsigned char reg, value;
unsigned char addr[2] = { 0, probe_limits_pattern[i].reg };
struct i2c_msg msgs[2] = {
{ client->addr, 0, 2, addr },
{ client->addr, I2C_M_RD, 1, ® },
};
if ((xfer = i2c_transfer(client->adapter, msgs, 2)) != 2) {
dev_err(&client->dev,
"%s: could not read register %x\n",
__func__, probe_limits_pattern[i].reg);
return -EIO;
}
value = bcd2bin(reg & probe_limits_pattern[i].mask);
if (value > probe_limits_pattern[i].max ||
value < probe_limits_pattern[i].min) {
dev_dbg(&client->dev,
"%s: register=%x, lim pattern=%d, value=%d\n",
__func__, probe_limits_pattern[i].reg,
i, value);
return -ENODEV;
}
}
return 0;
}
static int x1205_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
{
int err;
unsigned char intreg, status;
static unsigned char int_addr[2] = { 0, X1205_REG_INT };
struct i2c_client *client = to_i2c_client(dev);
struct i2c_msg msgs[] = {
{ client->addr, 0, 2, int_addr }, /* setup read ptr */
{ client->addr, I2C_M_RD, 1, &intreg }, /* read INT register */
};
/* read interrupt register and status register */
if (i2c_transfer(client->adapter, &msgs[0], 2) != 2) {
dev_err(&client->dev, "%s: read error\n", __func__);
return -EIO;
}
err = x1205_get_status(client, &status);
if (err == 0) {
alrm->pending = (status & X1205_SR_AL0) ? 1 : 0;
alrm->enabled = (intreg & X1205_INT_AL0E) ? 1 : 0;
err = x1205_get_datetime(client, &alrm->time, X1205_ALM0_BASE);
}
return err;
}
static int x1205_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
{
return x1205_set_datetime(to_i2c_client(dev),
&alrm->time, X1205_ALM0_BASE, alrm->enabled);
}
static int x1205_rtc_read_time(struct device *dev, struct rtc_time *tm)
{
return x1205_get_datetime(to_i2c_client(dev),
tm, X1205_CCR_BASE);
}
static int x1205_rtc_set_time(struct device *dev, struct rtc_time *tm)
{
return x1205_set_datetime(to_i2c_client(dev),
tm, X1205_CCR_BASE, 0);
}
static int x1205_rtc_proc(struct device *dev, struct seq_file *seq)
{
int err, dtrim, atrim;
if ((err = x1205_get_dtrim(to_i2c_client(dev), &dtrim)) == 0)
seq_printf(seq, "digital_trim\t: %d ppm\n", dtrim);
if ((err = x1205_get_atrim(to_i2c_client(dev), &atrim)) == 0)
seq_printf(seq, "analog_trim\t: %d.%02d pF\n",
atrim / 1000, atrim % 1000);
return 0;
}
static const struct rtc_class_ops x1205_rtc_ops = {
.proc = x1205_rtc_proc,
.read_time = x1205_rtc_read_time,
.set_time = x1205_rtc_set_time,
.read_alarm = x1205_rtc_read_alarm,
.set_alarm = x1205_rtc_set_alarm,
};
static ssize_t x1205_sysfs_show_atrim(struct device *dev,
struct device_attribute *attr, char *buf)
{
int err, atrim;
err = x1205_get_atrim(to_i2c_client(dev), &atrim);
if (err)
return err;
return sprintf(buf, "%d.%02d pF\n", atrim / 1000, atrim % 1000);
}
static DEVICE_ATTR(atrim, S_IRUGO, x1205_sysfs_show_atrim, NULL);
static ssize_t x1205_sysfs_show_dtrim(struct device *dev,
struct device_attribute *attr, char *buf)
{
int err, dtrim;
err = x1205_get_dtrim(to_i2c_client(dev), &dtrim);
if (err)
return err;
return sprintf(buf, "%d ppm\n", dtrim);
}
static DEVICE_ATTR(dtrim, S_IRUGO, x1205_sysfs_show_dtrim, NULL);
static int x1205_sysfs_register(struct device *dev)
{
int err;
err = device_create_file(dev, &dev_attr_atrim);
if (err)
return err;
err = device_create_file(dev, &dev_attr_dtrim);
if (err)
device_remove_file(dev, &dev_attr_atrim);
return err;
}
static void x1205_sysfs_unregister(struct device *dev)
{
device_remove_file(dev, &dev_attr_atrim);
device_remove_file(dev, &dev_attr_dtrim);
}
static int x1205_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
int err = 0;
unsigned char sr;
struct rtc_device *rtc;
dev_dbg(&client->dev, "%s\n", __func__);
if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C))
return -ENODEV;
if (x1205_validate_client(client) < 0)
return -ENODEV;
dev_info(&client->dev, "chip found, driver version " DRV_VERSION "\n");
rtc = rtc_device_register(x1205_driver.driver.name, &client->dev,
&x1205_rtc_ops, THIS_MODULE);
if (IS_ERR(rtc))
return PTR_ERR(rtc);
i2c_set_clientdata(client, rtc);
/* Check for power failures and eventualy enable the osc */
if ((err = x1205_get_status(client, &sr)) == 0) {
if (sr & X1205_SR_RTCF) {
dev_err(&client->dev,
"power failure detected, "
"please set the clock\n");
udelay(50);
x1205_fix_osc(client);
}
}
else
dev_err(&client->dev, "couldn't read status\n");
err = x1205_sysfs_register(&client->dev);
if (err)
goto exit_devreg;
return 0;
exit_devreg:
rtc_device_unregister(rtc);
return err;
}
static int x1205_remove(struct i2c_client *client)
{
struct rtc_device *rtc = i2c_get_clientdata(client);
rtc_device_unregister(rtc);
x1205_sysfs_unregister(&client->dev);
return 0;
}
static const struct i2c_device_id x1205_id[] = {
{ "x1205", 0 },
{ }
};
MODULE_DEVICE_TABLE(i2c, x1205_id);
static struct i2c_driver x1205_driver = {
.driver = {
.name = "rtc-x1205",
},
.probe = x1205_probe,
.remove = x1205_remove,
.id_table = x1205_id,
};
static int __init x1205_init(void)
{
return i2c_add_driver(&x1205_driver);
}
static void __exit x1205_exit(void)
{
i2c_del_driver(&x1205_driver);
}
MODULE_AUTHOR(
"Karen Spearel <kas111 at gmail dot com>, "
"Alessandro Zummo <a.zummo@towertech.it>");
MODULE_DESCRIPTION("Xicor/Intersil X1205 RTC driver");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);
module_init(x1205_init);
module_exit(x1205_exit);
| gpl-2.0 |
adityaxavier/MSM8612-Kitkat | drivers/staging/prima/CORE/BAP/src/bapRsnSsmReplayCtr.c | 1400 | 7918 | /*
* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all
* copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
* WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
* AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
* PERFORMANCE OF THIS SOFTWARE.
*/
/*
* Copyright (c) 2012, The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all
* copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
* WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
* AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
* PERFORMANCE OF THIS SOFTWARE.
*/
/*
* Woodside Networks, Inc proprietary. All rights reserved.
* File: $Header: //depot/software/projects/feature_branches/gen5_phase1/os/linux/classic/ap/apps/ssm/lib/aniSsmReplayCtr.c#2 $
*
* Contains definitions of various utilities for EAPoL frame
* parsing and creation.
*
* Author: Mayank D. Upadhyay
* Date: 19-June-2002
* History:-
* Date Modified by Modification Information
* ------------------------------------------------------
*
*/
#include "vos_types.h"
#include "vos_trace.h"
#include <bapRsnErrors.h>
#include <bapRsnSsmReplayCtr.h>
#include "vos_status.h"
#include "vos_memory.h"
#include "vos_utils.h"
#include "vos_packet.h"
//#include "aniSsmUtils.h"
/*
* Opaque replay counter type. Does TX and RX side replay counter
* tracking. On the TX side, it returns monotonically increasing values
* of the counter and checks that the peer returned a value matching
* the one we sent. On the RX side, it makes sure that the peer sent a
* replay counter greater than the last one seen (excepting for the
* first time a check is made which the application has to special case.)
*/
struct sAniSsmReplayCtr {
v_U8_t size;
v_U8_t *buf;
v_U32_t currentValue;
v_U8_t init;
};
static int
updateCtrBuf(tAniSsmReplayCtr *ctr);
/**
* aniSsmReplayCtrCreate
*
* Creates a replay counter and initializes it for first time
* use. The initialization can be done randomly or with a passed in
* value like 0. In case this is going to be used on the RX side, it
* doesn't matter what the initialization is and can be optimized to
* a fixed value so as to avoid the overhead of obtaining a random
* value.
*
* @param ctrPtr a pointer that will be set to the newly allocated
* counter if the operation succeeds
* @param size the number of bytes that are desired in the counter
* @param initValue if this is negative and size is greater than 4,
* the initialization is done randomly. Otherwise, these bytes are
* copied into the least significant four or less octets of the
* counter, depending on the size of the counter. i.e., if the counter
* is only 2B, then the least significant 2B of initValue will be
* copied over.
*
* @return ANI_OK if the operation succeeds
*/
int
aniSsmReplayCtrCreate(v_U32_t cryptHandle, tAniSsmReplayCtr **ctrPtr,
v_U8_t size,
int initValue)
{
tAniSsmReplayCtr *ctr;
ctr = vos_mem_malloc( sizeof(tAniSsmReplayCtr) );
if( NULL == ctr )
{
return ANI_E_MALLOC_FAILED;
}
ctr->buf = vos_mem_malloc( size );
if (ctr->buf == NULL)
{
VOS_ASSERT( 0 );
vos_mem_free(ctr);
return ANI_E_MALLOC_FAILED;
}
ctr->size = size;
// We cannot randomly generate the most significant bytes if the
// total number of bytes is not greater than 4 (sizeof ANI_U32).
if (initValue < 0 && ctr->size <= 4)
initValue = 0;
// If initValue is negative, initialize the ctr randomly, else
// initialize it to what the user specified.
if (initValue < 0)
{
if( !VOS_IS_STATUS_SUCCESS( vos_rand_get_bytes(cryptHandle, ctr->buf, ctr->size) ) )
{
return ANI_ERROR;
}
}
else {
ctr->currentValue = initValue - 1;
}
*ctrPtr = ctr;
return ANI_OK;
}
static int
updateCtrBuf(tAniSsmReplayCtr *ctr)
{
v_U32_t numBytes;
v_U32_t offset;
v_U32_t tmp;
tmp = vos_cpu_to_be32( ctr->currentValue );
numBytes = (4 <= ctr->size) ? 4 : ctr->size;
offset = 4 - numBytes;
vos_mem_copy(ctr->buf + ctr->size - numBytes,
((v_U8_t *) &tmp) + offset, numBytes);
return ANI_OK;
}
/**
* aniSsmReplayCtrCmp
*
* Used to check if the passed in value is greater
* than, less than, or the same as the previous value.
*
* Can be used on the TX side to determine if the response to a
* request contains the same counter as the one in the request.
*
* Can be used on the RX side to determine if the request has a
* counter greater than the previous request, or if this is a
* retransmission of the previous request. The application should
* special-case the first time this is called on the RX side.
*
* @param ctr the current replay counter
* @param value the value to check against
*
* @return a negative value if current ctr is less than the
* given value, zero if they are the same, and a positive value if the
* current counter is greater than that of the given value.
*/
int
aniSsmReplayCtrCmp(tAniSsmReplayCtr *ctr, v_U8_t *value)
{
return vos_mem_compare2(ctr->buf, value, ctr->size);
}
/**
* aniSsmReplayCtrUpdate
*
* Used on the RX side to update the value of the current replay
* counter to that received in the next request. Typically this is
* called after it is determined that this is not a retransmission,
* and some sort of integrity checking is done on it.
*
* @param ctr the current replay counter
* @param value the value that it should be set to
*
* @return ANI_OK if the operation succeeds
*/
int
aniSsmReplayCtrUpdate(tAniSsmReplayCtr *ctr,
v_U8_t *value)
{
vos_mem_copy(ctr->buf, value, ctr->size);
return ANI_OK;
}
/**
* aniSsmReplayCtrNext
*
* Used on the RX side to obtain the next value that should be sent
* with a request. After this call, the current value is incremented
* by one.
*
* @param ctr the current replay counter
* @param value where the next counter value should be copied
* into. The caller must allocated enough storage for this.
*
* @return ANI_OK if the operation succeeds
*/
int
aniSsmReplayCtrNext(tAniSsmReplayCtr *ctr,
v_U8_t *value)
{
ctr->currentValue++;
updateCtrBuf(ctr);
vos_mem_copy(value, ctr->buf, ctr->size);
return ANI_OK;
}
/**
* aniSsmReplayCtrFree
*
* Frees the replay counter context.
*
* @param ctr the replay counter to free.
*
* @return ANI_OK if the operation succeeds
*/
int
aniSsmReplayCtrFree(tAniSsmReplayCtr *ctr)
{
if (ctr->buf != NULL)
vos_mem_free(ctr->buf);
vos_mem_free(ctr);
return ANI_OK;
}
| gpl-2.0 |
gingo21/kernel_wiko_cink_slim | sound/usb/urb.c | 2168 | 25757 | /*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
#include <linux/gfp.h>
#include <linux/init.h>
#include <linux/usb.h>
#include <linux/usb/audio.h>
#include <sound/core.h>
#include <sound/pcm.h>
#include "usbaudio.h"
#include "helper.h"
#include "card.h"
#include "urb.h"
#include "pcm.h"
/*
* convert a sampling rate into our full speed format (fs/1000 in Q16.16)
* this will overflow at approx 524 kHz
*/
static inline unsigned get_usb_full_speed_rate(unsigned int rate)
{
return ((rate << 13) + 62) / 125;
}
/*
* convert a sampling rate into USB high speed format (fs/8000 in Q16.16)
* this will overflow at approx 4 MHz
*/
static inline unsigned get_usb_high_speed_rate(unsigned int rate)
{
return ((rate << 10) + 62) / 125;
}
/*
* unlink active urbs.
*/
static int deactivate_urbs(struct snd_usb_substream *subs, int force, int can_sleep)
{
struct snd_usb_audio *chip = subs->stream->chip;
unsigned int i;
int async;
subs->running = 0;
if (!force && subs->stream->chip->shutdown) /* to be sure... */
return -EBADFD;
async = !can_sleep && chip->async_unlink;
if (!async && in_interrupt())
return 0;
for (i = 0; i < subs->nurbs; i++) {
if (test_bit(i, &subs->active_mask)) {
if (!test_and_set_bit(i, &subs->unlink_mask)) {
struct urb *u = subs->dataurb[i].urb;
if (async)
usb_unlink_urb(u);
else
usb_kill_urb(u);
}
}
}
if (subs->syncpipe) {
for (i = 0; i < SYNC_URBS; i++) {
if (test_bit(i+16, &subs->active_mask)) {
if (!test_and_set_bit(i+16, &subs->unlink_mask)) {
struct urb *u = subs->syncurb[i].urb;
if (async)
usb_unlink_urb(u);
else
usb_kill_urb(u);
}
}
}
}
return 0;
}
/*
* release a urb data
*/
static void release_urb_ctx(struct snd_urb_ctx *u)
{
if (u->urb) {
if (u->buffer_size)
usb_free_coherent(u->subs->dev, u->buffer_size,
u->urb->transfer_buffer,
u->urb->transfer_dma);
usb_free_urb(u->urb);
u->urb = NULL;
}
}
/*
* wait until all urbs are processed.
*/
static int wait_clear_urbs(struct snd_usb_substream *subs)
{
unsigned long end_time = jiffies + msecs_to_jiffies(1000);
unsigned int i;
int alive;
do {
alive = 0;
for (i = 0; i < subs->nurbs; i++) {
if (test_bit(i, &subs->active_mask))
alive++;
}
if (subs->syncpipe) {
for (i = 0; i < SYNC_URBS; i++) {
if (test_bit(i + 16, &subs->active_mask))
alive++;
}
}
if (! alive)
break;
schedule_timeout_uninterruptible(1);
} while (time_before(jiffies, end_time));
if (alive)
snd_printk(KERN_ERR "timeout: still %d active urbs..\n", alive);
return 0;
}
/*
* release a substream
*/
void snd_usb_release_substream_urbs(struct snd_usb_substream *subs, int force)
{
int i;
/* stop urbs (to be sure) */
deactivate_urbs(subs, force, 1);
wait_clear_urbs(subs);
for (i = 0; i < MAX_URBS; i++)
release_urb_ctx(&subs->dataurb[i]);
for (i = 0; i < SYNC_URBS; i++)
release_urb_ctx(&subs->syncurb[i]);
usb_free_coherent(subs->dev, SYNC_URBS * 4,
subs->syncbuf, subs->sync_dma);
subs->syncbuf = NULL;
subs->nurbs = 0;
}
/*
* complete callback from data urb
*/
static void snd_complete_urb(struct urb *urb)
{
struct snd_urb_ctx *ctx = urb->context;
struct snd_usb_substream *subs = ctx->subs;
struct snd_pcm_substream *substream = ctx->subs->pcm_substream;
int err = 0;
if ((subs->running && subs->ops.retire(subs, substream->runtime, urb)) ||
!subs->running || /* can be stopped during retire callback */
(err = subs->ops.prepare(subs, substream->runtime, urb)) < 0 ||
(err = usb_submit_urb(urb, GFP_ATOMIC)) < 0) {
clear_bit(ctx->index, &subs->active_mask);
if (err < 0) {
snd_printd(KERN_ERR "cannot submit urb (err = %d)\n", err);
snd_pcm_stop(substream, SNDRV_PCM_STATE_XRUN);
}
}
}
/*
* complete callback from sync urb
*/
static void snd_complete_sync_urb(struct urb *urb)
{
struct snd_urb_ctx *ctx = urb->context;
struct snd_usb_substream *subs = ctx->subs;
struct snd_pcm_substream *substream = ctx->subs->pcm_substream;
int err = 0;
if ((subs->running && subs->ops.retire_sync(subs, substream->runtime, urb)) ||
!subs->running || /* can be stopped during retire callback */
(err = subs->ops.prepare_sync(subs, substream->runtime, urb)) < 0 ||
(err = usb_submit_urb(urb, GFP_ATOMIC)) < 0) {
clear_bit(ctx->index + 16, &subs->active_mask);
if (err < 0) {
snd_printd(KERN_ERR "cannot submit sync urb (err = %d)\n", err);
snd_pcm_stop(substream, SNDRV_PCM_STATE_XRUN);
}
}
}
/*
* initialize a substream for plaback/capture
*/
int snd_usb_init_substream_urbs(struct snd_usb_substream *subs,
unsigned int period_bytes,
unsigned int rate,
unsigned int frame_bits)
{
unsigned int maxsize, i;
int is_playback = subs->direction == SNDRV_PCM_STREAM_PLAYBACK;
unsigned int urb_packs, total_packs, packs_per_ms;
struct snd_usb_audio *chip = subs->stream->chip;
/* calculate the frequency in 16.16 format */
if (snd_usb_get_speed(subs->dev) == USB_SPEED_FULL)
subs->freqn = get_usb_full_speed_rate(rate);
else
subs->freqn = get_usb_high_speed_rate(rate);
subs->freqm = subs->freqn;
subs->freqshift = INT_MIN;
/* calculate max. frequency */
if (subs->maxpacksize) {
/* whatever fits into a max. size packet */
maxsize = subs->maxpacksize;
subs->freqmax = (maxsize / (frame_bits >> 3))
<< (16 - subs->datainterval);
} else {
/* no max. packet size: just take 25% higher than nominal */
subs->freqmax = subs->freqn + (subs->freqn >> 2);
maxsize = ((subs->freqmax + 0xffff) * (frame_bits >> 3))
>> (16 - subs->datainterval);
}
subs->phase = 0;
if (subs->fill_max)
subs->curpacksize = subs->maxpacksize;
else
subs->curpacksize = maxsize;
if (snd_usb_get_speed(subs->dev) != USB_SPEED_FULL)
packs_per_ms = 8 >> subs->datainterval;
else
packs_per_ms = 1;
if (is_playback) {
urb_packs = max(chip->nrpacks, 1);
urb_packs = min(urb_packs, (unsigned int)MAX_PACKS);
} else
urb_packs = 1;
urb_packs *= packs_per_ms;
if (subs->syncpipe)
urb_packs = min(urb_packs, 1U << subs->syncinterval);
/* decide how many packets to be used */
if (is_playback) {
unsigned int minsize, maxpacks;
/* determine how small a packet can be */
minsize = (subs->freqn >> (16 - subs->datainterval))
* (frame_bits >> 3);
/* with sync from device, assume it can be 12% lower */
if (subs->syncpipe)
minsize -= minsize >> 3;
minsize = max(minsize, 1u);
total_packs = (period_bytes + minsize - 1) / minsize;
/* we need at least two URBs for queueing */
if (total_packs < 2) {
total_packs = 2;
} else {
/* and we don't want too long a queue either */
maxpacks = max(MAX_QUEUE * packs_per_ms, urb_packs * 2);
total_packs = min(total_packs, maxpacks);
}
} else {
while (urb_packs > 1 && urb_packs * maxsize >= period_bytes)
urb_packs >>= 1;
total_packs = MAX_URBS * urb_packs;
}
subs->nurbs = (total_packs + urb_packs - 1) / urb_packs;
if (subs->nurbs > MAX_URBS) {
/* too much... */
subs->nurbs = MAX_URBS;
total_packs = MAX_URBS * urb_packs;
} else if (subs->nurbs < 2) {
/* too little - we need at least two packets
* to ensure contiguous playback/capture
*/
subs->nurbs = 2;
}
/* allocate and initialize data urbs */
for (i = 0; i < subs->nurbs; i++) {
struct snd_urb_ctx *u = &subs->dataurb[i];
u->index = i;
u->subs = subs;
u->packets = (i + 1) * total_packs / subs->nurbs
- i * total_packs / subs->nurbs;
u->buffer_size = maxsize * u->packets;
if (subs->fmt_type == UAC_FORMAT_TYPE_II)
u->packets++; /* for transfer delimiter */
u->urb = usb_alloc_urb(u->packets, GFP_KERNEL);
if (!u->urb)
goto out_of_memory;
u->urb->transfer_buffer =
usb_alloc_coherent(subs->dev, u->buffer_size,
GFP_KERNEL, &u->urb->transfer_dma);
if (!u->urb->transfer_buffer)
goto out_of_memory;
u->urb->pipe = subs->datapipe;
u->urb->transfer_flags = URB_ISO_ASAP | URB_NO_TRANSFER_DMA_MAP;
u->urb->interval = 1 << subs->datainterval;
u->urb->context = u;
u->urb->complete = snd_complete_urb;
}
if (subs->syncpipe) {
/* allocate and initialize sync urbs */
subs->syncbuf = usb_alloc_coherent(subs->dev, SYNC_URBS * 4,
GFP_KERNEL, &subs->sync_dma);
if (!subs->syncbuf)
goto out_of_memory;
for (i = 0; i < SYNC_URBS; i++) {
struct snd_urb_ctx *u = &subs->syncurb[i];
u->index = i;
u->subs = subs;
u->packets = 1;
u->urb = usb_alloc_urb(1, GFP_KERNEL);
if (!u->urb)
goto out_of_memory;
u->urb->transfer_buffer = subs->syncbuf + i * 4;
u->urb->transfer_dma = subs->sync_dma + i * 4;
u->urb->transfer_buffer_length = 4;
u->urb->pipe = subs->syncpipe;
u->urb->transfer_flags = URB_ISO_ASAP |
URB_NO_TRANSFER_DMA_MAP;
u->urb->number_of_packets = 1;
u->urb->interval = 1 << subs->syncinterval;
u->urb->context = u;
u->urb->complete = snd_complete_sync_urb;
}
}
return 0;
out_of_memory:
snd_usb_release_substream_urbs(subs, 0);
return -ENOMEM;
}
/*
* prepare urb for full speed capture sync pipe
*
* fill the length and offset of each urb descriptor.
* the fixed 10.14 frequency is passed through the pipe.
*/
static int prepare_capture_sync_urb(struct snd_usb_substream *subs,
struct snd_pcm_runtime *runtime,
struct urb *urb)
{
unsigned char *cp = urb->transfer_buffer;
struct snd_urb_ctx *ctx = urb->context;
urb->dev = ctx->subs->dev; /* we need to set this at each time */
urb->iso_frame_desc[0].length = 3;
urb->iso_frame_desc[0].offset = 0;
cp[0] = subs->freqn >> 2;
cp[1] = subs->freqn >> 10;
cp[2] = subs->freqn >> 18;
return 0;
}
/*
* prepare urb for high speed capture sync pipe
*
* fill the length and offset of each urb descriptor.
* the fixed 12.13 frequency is passed as 16.16 through the pipe.
*/
static int prepare_capture_sync_urb_hs(struct snd_usb_substream *subs,
struct snd_pcm_runtime *runtime,
struct urb *urb)
{
unsigned char *cp = urb->transfer_buffer;
struct snd_urb_ctx *ctx = urb->context;
urb->dev = ctx->subs->dev; /* we need to set this at each time */
urb->iso_frame_desc[0].length = 4;
urb->iso_frame_desc[0].offset = 0;
cp[0] = subs->freqn;
cp[1] = subs->freqn >> 8;
cp[2] = subs->freqn >> 16;
cp[3] = subs->freqn >> 24;
return 0;
}
/*
* process after capture sync complete
* - nothing to do
*/
static int retire_capture_sync_urb(struct snd_usb_substream *subs,
struct snd_pcm_runtime *runtime,
struct urb *urb)
{
return 0;
}
/*
* prepare urb for capture data pipe
*
* fill the offset and length of each descriptor.
*
* we use a temporary buffer to write the captured data.
* since the length of written data is determined by host, we cannot
* write onto the pcm buffer directly... the data is thus copied
* later at complete callback to the global buffer.
*/
static int prepare_capture_urb(struct snd_usb_substream *subs,
struct snd_pcm_runtime *runtime,
struct urb *urb)
{
int i, offs;
struct snd_urb_ctx *ctx = urb->context;
offs = 0;
urb->dev = ctx->subs->dev; /* we need to set this at each time */
for (i = 0; i < ctx->packets; i++) {
urb->iso_frame_desc[i].offset = offs;
urb->iso_frame_desc[i].length = subs->curpacksize;
offs += subs->curpacksize;
}
urb->transfer_buffer_length = offs;
urb->number_of_packets = ctx->packets;
return 0;
}
/*
* process after capture complete
*
* copy the data from each desctiptor to the pcm buffer, and
* update the current position.
*/
static int retire_capture_urb(struct snd_usb_substream *subs,
struct snd_pcm_runtime *runtime,
struct urb *urb)
{
unsigned long flags;
unsigned char *cp;
int i;
unsigned int stride, frames, bytes, oldptr;
int period_elapsed = 0;
stride = runtime->frame_bits >> 3;
for (i = 0; i < urb->number_of_packets; i++) {
cp = (unsigned char *)urb->transfer_buffer + urb->iso_frame_desc[i].offset;
if (urb->iso_frame_desc[i].status) {
snd_printd(KERN_ERR "frame %d active: %d\n", i, urb->iso_frame_desc[i].status);
// continue;
}
bytes = urb->iso_frame_desc[i].actual_length;
frames = bytes / stride;
if (!subs->txfr_quirk)
bytes = frames * stride;
if (bytes % (runtime->sample_bits >> 3) != 0) {
#ifdef CONFIG_SND_DEBUG_VERBOSE
int oldbytes = bytes;
#endif
bytes = frames * stride;
snd_printdd(KERN_ERR "Corrected urb data len. %d->%d\n",
oldbytes, bytes);
}
/* update the current pointer */
spin_lock_irqsave(&subs->lock, flags);
oldptr = subs->hwptr_done;
subs->hwptr_done += bytes;
if (subs->hwptr_done >= runtime->buffer_size * stride)
subs->hwptr_done -= runtime->buffer_size * stride;
frames = (bytes + (oldptr % stride)) / stride;
subs->transfer_done += frames;
if (subs->transfer_done >= runtime->period_size) {
subs->transfer_done -= runtime->period_size;
period_elapsed = 1;
}
spin_unlock_irqrestore(&subs->lock, flags);
/* copy a data chunk */
if (oldptr + bytes > runtime->buffer_size * stride) {
unsigned int bytes1 =
runtime->buffer_size * stride - oldptr;
memcpy(runtime->dma_area + oldptr, cp, bytes1);
memcpy(runtime->dma_area, cp + bytes1, bytes - bytes1);
} else {
memcpy(runtime->dma_area + oldptr, cp, bytes);
}
}
if (period_elapsed)
snd_pcm_period_elapsed(subs->pcm_substream);
return 0;
}
/*
* Process after capture complete when paused. Nothing to do.
*/
static int retire_paused_capture_urb(struct snd_usb_substream *subs,
struct snd_pcm_runtime *runtime,
struct urb *urb)
{
return 0;
}
/*
* prepare urb for playback sync pipe
*
* set up the offset and length to receive the current frequency.
*/
static int prepare_playback_sync_urb(struct snd_usb_substream *subs,
struct snd_pcm_runtime *runtime,
struct urb *urb)
{
struct snd_urb_ctx *ctx = urb->context;
urb->dev = ctx->subs->dev; /* we need to set this at each time */
urb->iso_frame_desc[0].length = min(4u, ctx->subs->syncmaxsize);
urb->iso_frame_desc[0].offset = 0;
return 0;
}
/*
* process after playback sync complete
*
* Full speed devices report feedback values in 10.14 format as samples per
* frame, high speed devices in 16.16 format as samples per microframe.
* Because the Audio Class 1 spec was written before USB 2.0, many high speed
* devices use a wrong interpretation, some others use an entirely different
* format. Therefore, we cannot predict what format any particular device uses
* and must detect it automatically.
*/
static int retire_playback_sync_urb(struct snd_usb_substream *subs,
struct snd_pcm_runtime *runtime,
struct urb *urb)
{
unsigned int f;
int shift;
unsigned long flags;
if (urb->iso_frame_desc[0].status != 0 ||
urb->iso_frame_desc[0].actual_length < 3)
return 0;
f = le32_to_cpup(urb->transfer_buffer);
if (urb->iso_frame_desc[0].actual_length == 3)
f &= 0x00ffffff;
else
f &= 0x0fffffff;
if (f == 0)
return 0;
if (unlikely(subs->freqshift == INT_MIN)) {
/*
* The first time we see a feedback value, determine its format
* by shifting it left or right until it matches the nominal
* frequency value. This assumes that the feedback does not
* differ from the nominal value more than +50% or -25%.
*/
shift = 0;
while (f < subs->freqn - subs->freqn / 4) {
f <<= 1;
shift++;
}
while (f > subs->freqn + subs->freqn / 2) {
f >>= 1;
shift--;
}
subs->freqshift = shift;
}
else if (subs->freqshift >= 0)
f <<= subs->freqshift;
else
f >>= -subs->freqshift;
if (likely(f >= subs->freqn - subs->freqn / 8 && f <= subs->freqmax)) {
/*
* If the frequency looks valid, set it.
* This value is referred to in prepare_playback_urb().
*/
spin_lock_irqsave(&subs->lock, flags);
subs->freqm = f;
spin_unlock_irqrestore(&subs->lock, flags);
} else {
/*
* Out of range; maybe the shift value is wrong.
* Reset it so that we autodetect again the next time.
*/
subs->freqshift = INT_MIN;
}
return 0;
}
/* determine the number of frames in the next packet */
static int snd_usb_audio_next_packet_size(struct snd_usb_substream *subs)
{
if (subs->fill_max)
return subs->maxframesize;
else {
subs->phase = (subs->phase & 0xffff)
+ (subs->freqm << subs->datainterval);
return min(subs->phase >> 16, subs->maxframesize);
}
}
/*
* Prepare urb for streaming before playback starts or when paused.
*
* We don't have any data, so we send silence.
*/
static int prepare_nodata_playback_urb(struct snd_usb_substream *subs,
struct snd_pcm_runtime *runtime,
struct urb *urb)
{
unsigned int i, offs, counts;
struct snd_urb_ctx *ctx = urb->context;
int stride = runtime->frame_bits >> 3;
offs = 0;
urb->dev = ctx->subs->dev;
for (i = 0; i < ctx->packets; ++i) {
counts = snd_usb_audio_next_packet_size(subs);
urb->iso_frame_desc[i].offset = offs * stride;
urb->iso_frame_desc[i].length = counts * stride;
offs += counts;
}
urb->number_of_packets = ctx->packets;
urb->transfer_buffer_length = offs * stride;
memset(urb->transfer_buffer,
runtime->format == SNDRV_PCM_FORMAT_U8 ? 0x80 : 0,
offs * stride);
return 0;
}
/*
* prepare urb for playback data pipe
*
* Since a URB can handle only a single linear buffer, we must use double
* buffering when the data to be transferred overflows the buffer boundary.
* To avoid inconsistencies when updating hwptr_done, we use double buffering
* for all URBs.
*/
static int prepare_playback_urb(struct snd_usb_substream *subs,
struct snd_pcm_runtime *runtime,
struct urb *urb)
{
int i, stride;
unsigned int counts, frames, bytes;
unsigned long flags;
int period_elapsed = 0;
struct snd_urb_ctx *ctx = urb->context;
stride = runtime->frame_bits >> 3;
frames = 0;
urb->dev = ctx->subs->dev; /* we need to set this at each time */
urb->number_of_packets = 0;
spin_lock_irqsave(&subs->lock, flags);
for (i = 0; i < ctx->packets; i++) {
counts = snd_usb_audio_next_packet_size(subs);
/* set up descriptor */
urb->iso_frame_desc[i].offset = frames * stride;
urb->iso_frame_desc[i].length = counts * stride;
frames += counts;
urb->number_of_packets++;
subs->transfer_done += counts;
if (subs->transfer_done >= runtime->period_size) {
subs->transfer_done -= runtime->period_size;
period_elapsed = 1;
if (subs->fmt_type == UAC_FORMAT_TYPE_II) {
if (subs->transfer_done > 0) {
/* FIXME: fill-max mode is not
* supported yet */
frames -= subs->transfer_done;
counts -= subs->transfer_done;
urb->iso_frame_desc[i].length =
counts * stride;
subs->transfer_done = 0;
}
i++;
if (i < ctx->packets) {
/* add a transfer delimiter */
urb->iso_frame_desc[i].offset =
frames * stride;
urb->iso_frame_desc[i].length = 0;
urb->number_of_packets++;
}
break;
}
}
if (period_elapsed) /* finish at the period boundary */
break;
}
bytes = frames * stride;
if (subs->hwptr_done + bytes > runtime->buffer_size * stride) {
/* err, the transferred area goes over buffer boundary. */
unsigned int bytes1 =
runtime->buffer_size * stride - subs->hwptr_done;
memcpy(urb->transfer_buffer,
runtime->dma_area + subs->hwptr_done, bytes1);
memcpy(urb->transfer_buffer + bytes1,
runtime->dma_area, bytes - bytes1);
} else {
memcpy(urb->transfer_buffer,
runtime->dma_area + subs->hwptr_done, bytes);
}
subs->hwptr_done += bytes;
if (subs->hwptr_done >= runtime->buffer_size * stride)
subs->hwptr_done -= runtime->buffer_size * stride;
runtime->delay += frames;
spin_unlock_irqrestore(&subs->lock, flags);
urb->transfer_buffer_length = bytes;
if (period_elapsed)
snd_pcm_period_elapsed(subs->pcm_substream);
return 0;
}
/*
* process after playback data complete
* - decrease the delay count again
*/
static int retire_playback_urb(struct snd_usb_substream *subs,
struct snd_pcm_runtime *runtime,
struct urb *urb)
{
unsigned long flags;
int stride = runtime->frame_bits >> 3;
int processed = urb->transfer_buffer_length / stride;
spin_lock_irqsave(&subs->lock, flags);
if (processed > runtime->delay)
runtime->delay = 0;
else
runtime->delay -= processed;
spin_unlock_irqrestore(&subs->lock, flags);
return 0;
}
static const char *usb_error_string(int err)
{
switch (err) {
case -ENODEV:
return "no device";
case -ENOENT:
return "endpoint not enabled";
case -EPIPE:
return "endpoint stalled";
case -ENOSPC:
return "not enough bandwidth";
case -ESHUTDOWN:
return "device disabled";
case -EHOSTUNREACH:
return "device suspended";
case -EINVAL:
case -EAGAIN:
case -EFBIG:
case -EMSGSIZE:
return "internal error";
default:
return "unknown error";
}
}
/*
* set up and start data/sync urbs
*/
static int start_urbs(struct snd_usb_substream *subs, struct snd_pcm_runtime *runtime)
{
unsigned int i;
int err;
if (subs->stream->chip->shutdown)
return -EBADFD;
for (i = 0; i < subs->nurbs; i++) {
if (snd_BUG_ON(!subs->dataurb[i].urb))
return -EINVAL;
if (subs->ops.prepare(subs, runtime, subs->dataurb[i].urb) < 0) {
snd_printk(KERN_ERR "cannot prepare datapipe for urb %d\n", i);
goto __error;
}
}
if (subs->syncpipe) {
for (i = 0; i < SYNC_URBS; i++) {
if (snd_BUG_ON(!subs->syncurb[i].urb))
return -EINVAL;
if (subs->ops.prepare_sync(subs, runtime, subs->syncurb[i].urb) < 0) {
snd_printk(KERN_ERR "cannot prepare syncpipe for urb %d\n", i);
goto __error;
}
}
}
subs->active_mask = 0;
subs->unlink_mask = 0;
subs->running = 1;
for (i = 0; i < subs->nurbs; i++) {
err = usb_submit_urb(subs->dataurb[i].urb, GFP_ATOMIC);
if (err < 0) {
snd_printk(KERN_ERR "cannot submit datapipe "
"for urb %d, error %d: %s\n",
i, err, usb_error_string(err));
goto __error;
}
set_bit(i, &subs->active_mask);
}
if (subs->syncpipe) {
for (i = 0; i < SYNC_URBS; i++) {
err = usb_submit_urb(subs->syncurb[i].urb, GFP_ATOMIC);
if (err < 0) {
snd_printk(KERN_ERR "cannot submit syncpipe "
"for urb %d, error %d: %s\n",
i, err, usb_error_string(err));
goto __error;
}
set_bit(i + 16, &subs->active_mask);
}
}
return 0;
__error:
// snd_pcm_stop(subs->pcm_substream, SNDRV_PCM_STATE_XRUN);
deactivate_urbs(subs, 0, 0);
return -EPIPE;
}
/*
*/
static struct snd_urb_ops audio_urb_ops[2] = {
{
.prepare = prepare_nodata_playback_urb,
.retire = retire_playback_urb,
.prepare_sync = prepare_playback_sync_urb,
.retire_sync = retire_playback_sync_urb,
},
{
.prepare = prepare_capture_urb,
.retire = retire_capture_urb,
.prepare_sync = prepare_capture_sync_urb,
.retire_sync = retire_capture_sync_urb,
},
};
/*
* initialize the substream instance.
*/
void snd_usb_init_substream(struct snd_usb_stream *as,
int stream, struct audioformat *fp)
{
struct snd_usb_substream *subs = &as->substream[stream];
INIT_LIST_HEAD(&subs->fmt_list);
spin_lock_init(&subs->lock);
subs->stream = as;
subs->direction = stream;
subs->dev = as->chip->dev;
subs->txfr_quirk = as->chip->txfr_quirk;
subs->ops = audio_urb_ops[stream];
if (snd_usb_get_speed(subs->dev) >= USB_SPEED_HIGH)
subs->ops.prepare_sync = prepare_capture_sync_urb_hs;
snd_usb_set_pcm_ops(as->pcm, stream);
list_add_tail(&fp->list, &subs->fmt_list);
subs->formats |= fp->formats;
subs->endpoint = fp->endpoint;
subs->num_formats++;
subs->fmt_type = fp->fmt_type;
}
int snd_usb_substream_playback_trigger(struct snd_pcm_substream *substream, int cmd)
{
struct snd_usb_substream *subs = substream->runtime->private_data;
switch (cmd) {
case SNDRV_PCM_TRIGGER_START:
case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
subs->ops.prepare = prepare_playback_urb;
return 0;
case SNDRV_PCM_TRIGGER_STOP:
return deactivate_urbs(subs, 0, 0);
case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
subs->ops.prepare = prepare_nodata_playback_urb;
return 0;
}
return -EINVAL;
}
int snd_usb_substream_capture_trigger(struct snd_pcm_substream *substream, int cmd)
{
struct snd_usb_substream *subs = substream->runtime->private_data;
switch (cmd) {
case SNDRV_PCM_TRIGGER_START:
subs->ops.retire = retire_capture_urb;
return start_urbs(subs, substream->runtime);
case SNDRV_PCM_TRIGGER_STOP:
return deactivate_urbs(subs, 0, 0);
case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
subs->ops.retire = retire_paused_capture_urb;
return 0;
case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
subs->ops.retire = retire_capture_urb;
return 0;
}
return -EINVAL;
}
int snd_usb_substream_prepare(struct snd_usb_substream *subs,
struct snd_pcm_runtime *runtime)
{
/* clear urbs (to be sure) */
deactivate_urbs(subs, 0, 1);
wait_clear_urbs(subs);
/* for playback, submit the URBs now; otherwise, the first hwptr_done
* updates for all URBs would happen at the same time when starting */
if (subs->direction == SNDRV_PCM_STREAM_PLAYBACK) {
subs->ops.prepare = prepare_nodata_playback_urb;
return start_urbs(subs, runtime);
}
return 0;
}
| gpl-2.0 |
nimon/m8_kernel | fs/ext2/dir.c | 2168 | 18239 | /*
* linux/fs/ext2/dir.c
*
* Copyright (C) 1992, 1993, 1994, 1995
* Remy Card (card@masi.ibp.fr)
* Laboratoire MASI - Institut Blaise Pascal
* Universite Pierre et Marie Curie (Paris VI)
*
* from
*
* linux/fs/minix/dir.c
*
* Copyright (C) 1991, 1992 Linus Torvalds
*
* ext2 directory handling functions
*
* Big-endian to little-endian byte-swapping/bitmaps by
* David S. Miller (davem@caip.rutgers.edu), 1995
*
* All code that works with directory layout had been switched to pagecache
* and moved here. AV
*/
#include "ext2.h"
#include <linux/buffer_head.h>
#include <linux/pagemap.h>
#include <linux/swap.h>
typedef struct ext2_dir_entry_2 ext2_dirent;
/*
* Tests against MAX_REC_LEN etc were put in place for 64k block
* sizes; if that is not possible on this arch, we can skip
* those tests and speed things up.
*/
static inline unsigned ext2_rec_len_from_disk(__le16 dlen)
{
unsigned len = le16_to_cpu(dlen);
#if (PAGE_CACHE_SIZE >= 65536)
if (len == EXT2_MAX_REC_LEN)
return 1 << 16;
#endif
return len;
}
static inline __le16 ext2_rec_len_to_disk(unsigned len)
{
#if (PAGE_CACHE_SIZE >= 65536)
if (len == (1 << 16))
return cpu_to_le16(EXT2_MAX_REC_LEN);
else
BUG_ON(len > (1 << 16));
#endif
return cpu_to_le16(len);
}
/*
* ext2 uses block-sized chunks. Arguably, sector-sized ones would be
* more robust, but we have what we have
*/
static inline unsigned ext2_chunk_size(struct inode *inode)
{
return inode->i_sb->s_blocksize;
}
static inline void ext2_put_page(struct page *page)
{
kunmap(page);
page_cache_release(page);
}
static inline unsigned long dir_pages(struct inode *inode)
{
return (inode->i_size+PAGE_CACHE_SIZE-1)>>PAGE_CACHE_SHIFT;
}
/*
* Return the offset into page `page_nr' of the last valid
* byte in that page, plus one.
*/
static unsigned
ext2_last_byte(struct inode *inode, unsigned long page_nr)
{
unsigned last_byte = inode->i_size;
last_byte -= page_nr << PAGE_CACHE_SHIFT;
if (last_byte > PAGE_CACHE_SIZE)
last_byte = PAGE_CACHE_SIZE;
return last_byte;
}
static int ext2_commit_chunk(struct page *page, loff_t pos, unsigned len)
{
struct address_space *mapping = page->mapping;
struct inode *dir = mapping->host;
int err = 0;
dir->i_version++;
block_write_end(NULL, mapping, pos, len, len, page, NULL);
if (pos+len > dir->i_size) {
i_size_write(dir, pos+len);
mark_inode_dirty(dir);
}
if (IS_DIRSYNC(dir)) {
err = write_one_page(page, 1);
if (!err)
err = sync_inode_metadata(dir, 1);
} else {
unlock_page(page);
}
return err;
}
static void ext2_check_page(struct page *page, int quiet)
{
struct inode *dir = page->mapping->host;
struct super_block *sb = dir->i_sb;
unsigned chunk_size = ext2_chunk_size(dir);
char *kaddr = page_address(page);
u32 max_inumber = le32_to_cpu(EXT2_SB(sb)->s_es->s_inodes_count);
unsigned offs, rec_len;
unsigned limit = PAGE_CACHE_SIZE;
ext2_dirent *p;
char *error;
if ((dir->i_size >> PAGE_CACHE_SHIFT) == page->index) {
limit = dir->i_size & ~PAGE_CACHE_MASK;
if (limit & (chunk_size - 1))
goto Ebadsize;
if (!limit)
goto out;
}
for (offs = 0; offs <= limit - EXT2_DIR_REC_LEN(1); offs += rec_len) {
p = (ext2_dirent *)(kaddr + offs);
rec_len = ext2_rec_len_from_disk(p->rec_len);
if (unlikely(rec_len < EXT2_DIR_REC_LEN(1)))
goto Eshort;
if (unlikely(rec_len & 3))
goto Ealign;
if (unlikely(rec_len < EXT2_DIR_REC_LEN(p->name_len)))
goto Enamelen;
if (unlikely(((offs + rec_len - 1) ^ offs) & ~(chunk_size-1)))
goto Espan;
if (unlikely(le32_to_cpu(p->inode) > max_inumber))
goto Einumber;
}
if (offs != limit)
goto Eend;
out:
SetPageChecked(page);
return;
/* Too bad, we had an error */
Ebadsize:
if (!quiet)
ext2_error(sb, __func__,
"size of directory #%lu is not a multiple "
"of chunk size", dir->i_ino);
goto fail;
Eshort:
error = "rec_len is smaller than minimal";
goto bad_entry;
Ealign:
error = "unaligned directory entry";
goto bad_entry;
Enamelen:
error = "rec_len is too small for name_len";
goto bad_entry;
Espan:
error = "directory entry across blocks";
goto bad_entry;
Einumber:
error = "inode out of bounds";
bad_entry:
if (!quiet)
ext2_error(sb, __func__, "bad entry in directory #%lu: : %s - "
"offset=%lu, inode=%lu, rec_len=%d, name_len=%d",
dir->i_ino, error, (page->index<<PAGE_CACHE_SHIFT)+offs,
(unsigned long) le32_to_cpu(p->inode),
rec_len, p->name_len);
goto fail;
Eend:
if (!quiet) {
p = (ext2_dirent *)(kaddr + offs);
ext2_error(sb, "ext2_check_page",
"entry in directory #%lu spans the page boundary"
"offset=%lu, inode=%lu",
dir->i_ino, (page->index<<PAGE_CACHE_SHIFT)+offs,
(unsigned long) le32_to_cpu(p->inode));
}
fail:
SetPageChecked(page);
SetPageError(page);
}
static struct page * ext2_get_page(struct inode *dir, unsigned long n,
int quiet)
{
struct address_space *mapping = dir->i_mapping;
struct page *page = read_mapping_page(mapping, n, NULL);
if (!IS_ERR(page)) {
kmap(page);
if (!PageChecked(page))
ext2_check_page(page, quiet);
if (PageError(page))
goto fail;
}
return page;
fail:
ext2_put_page(page);
return ERR_PTR(-EIO);
}
/*
* NOTE! unlike strncmp, ext2_match returns 1 for success, 0 for failure.
*
* len <= EXT2_NAME_LEN and de != NULL are guaranteed by caller.
*/
static inline int ext2_match (int len, const char * const name,
struct ext2_dir_entry_2 * de)
{
if (len != de->name_len)
return 0;
if (!de->inode)
return 0;
return !memcmp(name, de->name, len);
}
/*
* p is at least 6 bytes before the end of page
*/
static inline ext2_dirent *ext2_next_entry(ext2_dirent *p)
{
return (ext2_dirent *)((char *)p +
ext2_rec_len_from_disk(p->rec_len));
}
static inline unsigned
ext2_validate_entry(char *base, unsigned offset, unsigned mask)
{
ext2_dirent *de = (ext2_dirent*)(base + offset);
ext2_dirent *p = (ext2_dirent*)(base + (offset&mask));
while ((char*)p < (char*)de) {
if (p->rec_len == 0)
break;
p = ext2_next_entry(p);
}
return (char *)p - base;
}
static unsigned char ext2_filetype_table[EXT2_FT_MAX] = {
[EXT2_FT_UNKNOWN] = DT_UNKNOWN,
[EXT2_FT_REG_FILE] = DT_REG,
[EXT2_FT_DIR] = DT_DIR,
[EXT2_FT_CHRDEV] = DT_CHR,
[EXT2_FT_BLKDEV] = DT_BLK,
[EXT2_FT_FIFO] = DT_FIFO,
[EXT2_FT_SOCK] = DT_SOCK,
[EXT2_FT_SYMLINK] = DT_LNK,
};
#define S_SHIFT 12
static unsigned char ext2_type_by_mode[S_IFMT >> S_SHIFT] = {
[S_IFREG >> S_SHIFT] = EXT2_FT_REG_FILE,
[S_IFDIR >> S_SHIFT] = EXT2_FT_DIR,
[S_IFCHR >> S_SHIFT] = EXT2_FT_CHRDEV,
[S_IFBLK >> S_SHIFT] = EXT2_FT_BLKDEV,
[S_IFIFO >> S_SHIFT] = EXT2_FT_FIFO,
[S_IFSOCK >> S_SHIFT] = EXT2_FT_SOCK,
[S_IFLNK >> S_SHIFT] = EXT2_FT_SYMLINK,
};
static inline void ext2_set_de_type(ext2_dirent *de, struct inode *inode)
{
umode_t mode = inode->i_mode;
if (EXT2_HAS_INCOMPAT_FEATURE(inode->i_sb, EXT2_FEATURE_INCOMPAT_FILETYPE))
de->file_type = ext2_type_by_mode[(mode & S_IFMT)>>S_SHIFT];
else
de->file_type = 0;
}
static int
ext2_readdir (struct file * filp, void * dirent, filldir_t filldir)
{
loff_t pos = filp->f_pos;
struct inode *inode = file_inode(filp);
struct super_block *sb = inode->i_sb;
unsigned int offset = pos & ~PAGE_CACHE_MASK;
unsigned long n = pos >> PAGE_CACHE_SHIFT;
unsigned long npages = dir_pages(inode);
unsigned chunk_mask = ~(ext2_chunk_size(inode)-1);
unsigned char *types = NULL;
int need_revalidate = filp->f_version != inode->i_version;
if (pos > inode->i_size - EXT2_DIR_REC_LEN(1))
return 0;
if (EXT2_HAS_INCOMPAT_FEATURE(sb, EXT2_FEATURE_INCOMPAT_FILETYPE))
types = ext2_filetype_table;
for ( ; n < npages; n++, offset = 0) {
char *kaddr, *limit;
ext2_dirent *de;
struct page *page = ext2_get_page(inode, n, 0);
if (IS_ERR(page)) {
ext2_error(sb, __func__,
"bad page in #%lu",
inode->i_ino);
filp->f_pos += PAGE_CACHE_SIZE - offset;
return PTR_ERR(page);
}
kaddr = page_address(page);
if (unlikely(need_revalidate)) {
if (offset) {
offset = ext2_validate_entry(kaddr, offset, chunk_mask);
filp->f_pos = (n<<PAGE_CACHE_SHIFT) + offset;
}
filp->f_version = inode->i_version;
need_revalidate = 0;
}
de = (ext2_dirent *)(kaddr+offset);
limit = kaddr + ext2_last_byte(inode, n) - EXT2_DIR_REC_LEN(1);
for ( ;(char*)de <= limit; de = ext2_next_entry(de)) {
if (de->rec_len == 0) {
ext2_error(sb, __func__,
"zero-length directory entry");
ext2_put_page(page);
return -EIO;
}
if (de->inode) {
int over;
unsigned char d_type = DT_UNKNOWN;
if (types && de->file_type < EXT2_FT_MAX)
d_type = types[de->file_type];
offset = (char *)de - kaddr;
over = filldir(dirent, de->name, de->name_len,
(n<<PAGE_CACHE_SHIFT) | offset,
le32_to_cpu(de->inode), d_type);
if (over) {
ext2_put_page(page);
return 0;
}
}
filp->f_pos += ext2_rec_len_from_disk(de->rec_len);
}
ext2_put_page(page);
}
return 0;
}
/*
* ext2_find_entry()
*
* finds an entry in the specified directory with the wanted name. It
* returns the page in which the entry was found (as a parameter - res_page),
* and the entry itself. Page is returned mapped and unlocked.
* Entry is guaranteed to be valid.
*/
struct ext2_dir_entry_2 *ext2_find_entry (struct inode * dir,
struct qstr *child, struct page ** res_page)
{
const char *name = child->name;
int namelen = child->len;
unsigned reclen = EXT2_DIR_REC_LEN(namelen);
unsigned long start, n;
unsigned long npages = dir_pages(dir);
struct page *page = NULL;
struct ext2_inode_info *ei = EXT2_I(dir);
ext2_dirent * de;
int dir_has_error = 0;
if (npages == 0)
goto out;
/* OFFSET_CACHE */
*res_page = NULL;
start = ei->i_dir_start_lookup;
if (start >= npages)
start = 0;
n = start;
do {
char *kaddr;
page = ext2_get_page(dir, n, dir_has_error);
if (!IS_ERR(page)) {
kaddr = page_address(page);
de = (ext2_dirent *) kaddr;
kaddr += ext2_last_byte(dir, n) - reclen;
while ((char *) de <= kaddr) {
if (de->rec_len == 0) {
ext2_error(dir->i_sb, __func__,
"zero-length directory entry");
ext2_put_page(page);
goto out;
}
if (ext2_match (namelen, name, de))
goto found;
de = ext2_next_entry(de);
}
ext2_put_page(page);
} else
dir_has_error = 1;
if (++n >= npages)
n = 0;
/* next page is past the blocks we've got */
if (unlikely(n > (dir->i_blocks >> (PAGE_CACHE_SHIFT - 9)))) {
ext2_error(dir->i_sb, __func__,
"dir %lu size %lld exceeds block count %llu",
dir->i_ino, dir->i_size,
(unsigned long long)dir->i_blocks);
goto out;
}
} while (n != start);
out:
return NULL;
found:
*res_page = page;
ei->i_dir_start_lookup = n;
return de;
}
struct ext2_dir_entry_2 * ext2_dotdot (struct inode *dir, struct page **p)
{
struct page *page = ext2_get_page(dir, 0, 0);
ext2_dirent *de = NULL;
if (!IS_ERR(page)) {
de = ext2_next_entry((ext2_dirent *) page_address(page));
*p = page;
}
return de;
}
ino_t ext2_inode_by_name(struct inode *dir, struct qstr *child)
{
ino_t res = 0;
struct ext2_dir_entry_2 *de;
struct page *page;
de = ext2_find_entry (dir, child, &page);
if (de) {
res = le32_to_cpu(de->inode);
ext2_put_page(page);
}
return res;
}
static int ext2_prepare_chunk(struct page *page, loff_t pos, unsigned len)
{
return __block_write_begin(page, pos, len, ext2_get_block);
}
/* Releases the page */
void ext2_set_link(struct inode *dir, struct ext2_dir_entry_2 *de,
struct page *page, struct inode *inode, int update_times)
{
loff_t pos = page_offset(page) +
(char *) de - (char *) page_address(page);
unsigned len = ext2_rec_len_from_disk(de->rec_len);
int err;
lock_page(page);
err = ext2_prepare_chunk(page, pos, len);
BUG_ON(err);
de->inode = cpu_to_le32(inode->i_ino);
ext2_set_de_type(de, inode);
err = ext2_commit_chunk(page, pos, len);
ext2_put_page(page);
if (update_times)
dir->i_mtime = dir->i_ctime = CURRENT_TIME_SEC;
EXT2_I(dir)->i_flags &= ~EXT2_BTREE_FL;
mark_inode_dirty(dir);
}
/*
* Parent is locked.
*/
int ext2_add_link (struct dentry *dentry, struct inode *inode)
{
struct inode *dir = dentry->d_parent->d_inode;
const char *name = dentry->d_name.name;
int namelen = dentry->d_name.len;
unsigned chunk_size = ext2_chunk_size(dir);
unsigned reclen = EXT2_DIR_REC_LEN(namelen);
unsigned short rec_len, name_len;
struct page *page = NULL;
ext2_dirent * de;
unsigned long npages = dir_pages(dir);
unsigned long n;
char *kaddr;
loff_t pos;
int err;
/*
* We take care of directory expansion in the same loop.
* This code plays outside i_size, so it locks the page
* to protect that region.
*/
for (n = 0; n <= npages; n++) {
char *dir_end;
page = ext2_get_page(dir, n, 0);
err = PTR_ERR(page);
if (IS_ERR(page))
goto out;
lock_page(page);
kaddr = page_address(page);
dir_end = kaddr + ext2_last_byte(dir, n);
de = (ext2_dirent *)kaddr;
kaddr += PAGE_CACHE_SIZE - reclen;
while ((char *)de <= kaddr) {
if ((char *)de == dir_end) {
/* We hit i_size */
name_len = 0;
rec_len = chunk_size;
de->rec_len = ext2_rec_len_to_disk(chunk_size);
de->inode = 0;
goto got_it;
}
if (de->rec_len == 0) {
ext2_error(dir->i_sb, __func__,
"zero-length directory entry");
err = -EIO;
goto out_unlock;
}
err = -EEXIST;
if (ext2_match (namelen, name, de))
goto out_unlock;
name_len = EXT2_DIR_REC_LEN(de->name_len);
rec_len = ext2_rec_len_from_disk(de->rec_len);
if (!de->inode && rec_len >= reclen)
goto got_it;
if (rec_len >= name_len + reclen)
goto got_it;
de = (ext2_dirent *) ((char *) de + rec_len);
}
unlock_page(page);
ext2_put_page(page);
}
BUG();
return -EINVAL;
got_it:
pos = page_offset(page) +
(char*)de - (char*)page_address(page);
err = ext2_prepare_chunk(page, pos, rec_len);
if (err)
goto out_unlock;
if (de->inode) {
ext2_dirent *de1 = (ext2_dirent *) ((char *) de + name_len);
de1->rec_len = ext2_rec_len_to_disk(rec_len - name_len);
de->rec_len = ext2_rec_len_to_disk(name_len);
de = de1;
}
de->name_len = namelen;
memcpy(de->name, name, namelen);
de->inode = cpu_to_le32(inode->i_ino);
ext2_set_de_type (de, inode);
err = ext2_commit_chunk(page, pos, rec_len);
dir->i_mtime = dir->i_ctime = CURRENT_TIME_SEC;
EXT2_I(dir)->i_flags &= ~EXT2_BTREE_FL;
mark_inode_dirty(dir);
/* OFFSET_CACHE */
out_put:
ext2_put_page(page);
out:
return err;
out_unlock:
unlock_page(page);
goto out_put;
}
/*
* ext2_delete_entry deletes a directory entry by merging it with the
* previous entry. Page is up-to-date. Releases the page.
*/
int ext2_delete_entry (struct ext2_dir_entry_2 * dir, struct page * page )
{
struct inode *inode = page->mapping->host;
char *kaddr = page_address(page);
unsigned from = ((char*)dir - kaddr) & ~(ext2_chunk_size(inode)-1);
unsigned to = ((char *)dir - kaddr) +
ext2_rec_len_from_disk(dir->rec_len);
loff_t pos;
ext2_dirent * pde = NULL;
ext2_dirent * de = (ext2_dirent *) (kaddr + from);
int err;
while ((char*)de < (char*)dir) {
if (de->rec_len == 0) {
ext2_error(inode->i_sb, __func__,
"zero-length directory entry");
err = -EIO;
goto out;
}
pde = de;
de = ext2_next_entry(de);
}
if (pde)
from = (char*)pde - (char*)page_address(page);
pos = page_offset(page) + from;
lock_page(page);
err = ext2_prepare_chunk(page, pos, to - from);
BUG_ON(err);
if (pde)
pde->rec_len = ext2_rec_len_to_disk(to - from);
dir->inode = 0;
err = ext2_commit_chunk(page, pos, to - from);
inode->i_ctime = inode->i_mtime = CURRENT_TIME_SEC;
EXT2_I(inode)->i_flags &= ~EXT2_BTREE_FL;
mark_inode_dirty(inode);
out:
ext2_put_page(page);
return err;
}
/*
* Set the first fragment of directory.
*/
int ext2_make_empty(struct inode *inode, struct inode *parent)
{
struct page *page = grab_cache_page(inode->i_mapping, 0);
unsigned chunk_size = ext2_chunk_size(inode);
struct ext2_dir_entry_2 * de;
int err;
void *kaddr;
if (!page)
return -ENOMEM;
err = ext2_prepare_chunk(page, 0, chunk_size);
if (err) {
unlock_page(page);
goto fail;
}
kaddr = kmap_atomic(page);
memset(kaddr, 0, chunk_size);
de = (struct ext2_dir_entry_2 *)kaddr;
de->name_len = 1;
de->rec_len = ext2_rec_len_to_disk(EXT2_DIR_REC_LEN(1));
memcpy (de->name, ".\0\0", 4);
de->inode = cpu_to_le32(inode->i_ino);
ext2_set_de_type (de, inode);
de = (struct ext2_dir_entry_2 *)(kaddr + EXT2_DIR_REC_LEN(1));
de->name_len = 2;
de->rec_len = ext2_rec_len_to_disk(chunk_size - EXT2_DIR_REC_LEN(1));
de->inode = cpu_to_le32(parent->i_ino);
memcpy (de->name, "..\0", 4);
ext2_set_de_type (de, inode);
kunmap_atomic(kaddr);
err = ext2_commit_chunk(page, 0, chunk_size);
fail:
page_cache_release(page);
return err;
}
/*
* routine to check that the specified directory is empty (for rmdir)
*/
int ext2_empty_dir (struct inode * inode)
{
struct page *page = NULL;
unsigned long i, npages = dir_pages(inode);
int dir_has_error = 0;
for (i = 0; i < npages; i++) {
char *kaddr;
ext2_dirent * de;
page = ext2_get_page(inode, i, dir_has_error);
if (IS_ERR(page)) {
dir_has_error = 1;
continue;
}
kaddr = page_address(page);
de = (ext2_dirent *)kaddr;
kaddr += ext2_last_byte(inode, i) - EXT2_DIR_REC_LEN(1);
while ((char *)de <= kaddr) {
if (de->rec_len == 0) {
ext2_error(inode->i_sb, __func__,
"zero-length directory entry");
printk("kaddr=%p, de=%p\n", kaddr, de);
goto not_empty;
}
if (de->inode != 0) {
/* check for . and .. */
if (de->name[0] != '.')
goto not_empty;
if (de->name_len > 2)
goto not_empty;
if (de->name_len < 2) {
if (de->inode !=
cpu_to_le32(inode->i_ino))
goto not_empty;
} else if (de->name[1] != '.')
goto not_empty;
}
de = ext2_next_entry(de);
}
ext2_put_page(page);
}
return 1;
not_empty:
ext2_put_page(page);
return 0;
}
const struct file_operations ext2_dir_operations = {
.llseek = generic_file_llseek,
.read = generic_read_dir,
.readdir = ext2_readdir,
.unlocked_ioctl = ext2_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = ext2_compat_ioctl,
#endif
.fsync = ext2_fsync,
};
| gpl-2.0 |
denggww123/IMX6_DB_Kernel_3.0.35 | drivers/media/video/videobuf2-dma-sg.c | 2424 | 7268 | /*
* videobuf2-dma-sg.c - dma scatter/gather memory allocator for videobuf2
*
* Copyright (C) 2010 Samsung Electronics
*
* Author: Andrzej Pietrasiewicz <andrzej.p@samsung.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation.
*/
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/scatterlist.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <media/videobuf2-core.h>
#include <media/videobuf2-memops.h>
#include <media/videobuf2-dma-sg.h>
struct vb2_dma_sg_buf {
void *vaddr;
struct page **pages;
int write;
int offset;
struct vb2_dma_sg_desc sg_desc;
atomic_t refcount;
struct vb2_vmarea_handler handler;
};
static void vb2_dma_sg_put(void *buf_priv);
static void *vb2_dma_sg_alloc(void *alloc_ctx, unsigned long size)
{
struct vb2_dma_sg_buf *buf;
int i;
buf = kzalloc(sizeof *buf, GFP_KERNEL);
if (!buf)
return NULL;
buf->vaddr = NULL;
buf->write = 0;
buf->offset = 0;
buf->sg_desc.size = size;
buf->sg_desc.num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
buf->sg_desc.sglist = vmalloc(buf->sg_desc.num_pages *
sizeof(*buf->sg_desc.sglist));
if (!buf->sg_desc.sglist)
goto fail_sglist_alloc;
memset(buf->sg_desc.sglist, 0, buf->sg_desc.num_pages *
sizeof(*buf->sg_desc.sglist));
sg_init_table(buf->sg_desc.sglist, buf->sg_desc.num_pages);
buf->pages = kzalloc(buf->sg_desc.num_pages * sizeof(struct page *),
GFP_KERNEL);
if (!buf->pages)
goto fail_pages_array_alloc;
for (i = 0; i < buf->sg_desc.num_pages; ++i) {
buf->pages[i] = alloc_page(GFP_KERNEL | __GFP_ZERO | __GFP_NOWARN);
if (NULL == buf->pages[i])
goto fail_pages_alloc;
sg_set_page(&buf->sg_desc.sglist[i],
buf->pages[i], PAGE_SIZE, 0);
}
buf->handler.refcount = &buf->refcount;
buf->handler.put = vb2_dma_sg_put;
buf->handler.arg = buf;
atomic_inc(&buf->refcount);
printk(KERN_DEBUG "%s: Allocated buffer of %d pages\n",
__func__, buf->sg_desc.num_pages);
if (!buf->vaddr)
buf->vaddr = vm_map_ram(buf->pages,
buf->sg_desc.num_pages,
-1,
PAGE_KERNEL);
return buf;
fail_pages_alloc:
while (--i >= 0)
__free_page(buf->pages[i]);
kfree(buf->pages);
fail_pages_array_alloc:
vfree(buf->sg_desc.sglist);
fail_sglist_alloc:
kfree(buf);
return NULL;
}
static void vb2_dma_sg_put(void *buf_priv)
{
struct vb2_dma_sg_buf *buf = buf_priv;
int i = buf->sg_desc.num_pages;
if (atomic_dec_and_test(&buf->refcount)) {
printk(KERN_DEBUG "%s: Freeing buffer of %d pages\n", __func__,
buf->sg_desc.num_pages);
if (buf->vaddr)
vm_unmap_ram(buf->vaddr, buf->sg_desc.num_pages);
vfree(buf->sg_desc.sglist);
while (--i >= 0)
__free_page(buf->pages[i]);
kfree(buf->pages);
kfree(buf);
}
}
static void *vb2_dma_sg_get_userptr(void *alloc_ctx, unsigned long vaddr,
unsigned long size, int write)
{
struct vb2_dma_sg_buf *buf;
unsigned long first, last;
int num_pages_from_user, i;
buf = kzalloc(sizeof *buf, GFP_KERNEL);
if (!buf)
return NULL;
buf->vaddr = NULL;
buf->write = write;
buf->offset = vaddr & ~PAGE_MASK;
buf->sg_desc.size = size;
first = (vaddr & PAGE_MASK) >> PAGE_SHIFT;
last = ((vaddr + size - 1) & PAGE_MASK) >> PAGE_SHIFT;
buf->sg_desc.num_pages = last - first + 1;
buf->sg_desc.sglist = vmalloc(
buf->sg_desc.num_pages * sizeof(*buf->sg_desc.sglist));
if (!buf->sg_desc.sglist)
goto userptr_fail_sglist_alloc;
memset(buf->sg_desc.sglist, 0,
buf->sg_desc.num_pages * sizeof(*buf->sg_desc.sglist));
sg_init_table(buf->sg_desc.sglist, buf->sg_desc.num_pages);
buf->pages = kzalloc(buf->sg_desc.num_pages * sizeof(struct page *),
GFP_KERNEL);
if (!buf->pages)
goto userptr_fail_pages_array_alloc;
down_read(¤t->mm->mmap_sem);
num_pages_from_user = get_user_pages(current, current->mm,
vaddr & PAGE_MASK,
buf->sg_desc.num_pages,
write,
1, /* force */
buf->pages,
NULL);
up_read(¤t->mm->mmap_sem);
if (num_pages_from_user != buf->sg_desc.num_pages)
goto userptr_fail_get_user_pages;
sg_set_page(&buf->sg_desc.sglist[0], buf->pages[0],
PAGE_SIZE - buf->offset, buf->offset);
size -= PAGE_SIZE - buf->offset;
for (i = 1; i < buf->sg_desc.num_pages; ++i) {
sg_set_page(&buf->sg_desc.sglist[i], buf->pages[i],
min_t(size_t, PAGE_SIZE, size), 0);
size -= min_t(size_t, PAGE_SIZE, size);
}
return buf;
userptr_fail_get_user_pages:
printk(KERN_DEBUG "get_user_pages requested/got: %d/%d]\n",
num_pages_from_user, buf->sg_desc.num_pages);
while (--num_pages_from_user >= 0)
put_page(buf->pages[num_pages_from_user]);
kfree(buf->pages);
userptr_fail_pages_array_alloc:
vfree(buf->sg_desc.sglist);
userptr_fail_sglist_alloc:
kfree(buf);
return NULL;
}
/*
* @put_userptr: inform the allocator that a USERPTR buffer will no longer
* be used
*/
static void vb2_dma_sg_put_userptr(void *buf_priv)
{
struct vb2_dma_sg_buf *buf = buf_priv;
int i = buf->sg_desc.num_pages;
printk(KERN_DEBUG "%s: Releasing userspace buffer of %d pages\n",
__func__, buf->sg_desc.num_pages);
if (buf->vaddr)
vm_unmap_ram(buf->vaddr, buf->sg_desc.num_pages);
while (--i >= 0) {
if (buf->write)
set_page_dirty_lock(buf->pages[i]);
put_page(buf->pages[i]);
}
vfree(buf->sg_desc.sglist);
kfree(buf->pages);
kfree(buf);
}
static void *vb2_dma_sg_vaddr(void *buf_priv)
{
struct vb2_dma_sg_buf *buf = buf_priv;
BUG_ON(!buf);
if (!buf->vaddr)
buf->vaddr = vm_map_ram(buf->pages,
buf->sg_desc.num_pages,
-1,
PAGE_KERNEL);
/* add offset in case userptr is not page-aligned */
return buf->vaddr + buf->offset;
}
static unsigned int vb2_dma_sg_num_users(void *buf_priv)
{
struct vb2_dma_sg_buf *buf = buf_priv;
return atomic_read(&buf->refcount);
}
static int vb2_dma_sg_mmap(void *buf_priv, struct vm_area_struct *vma)
{
struct vb2_dma_sg_buf *buf = buf_priv;
unsigned long uaddr = vma->vm_start;
unsigned long usize = vma->vm_end - vma->vm_start;
int i = 0;
if (!buf) {
printk(KERN_ERR "No memory to map\n");
return -EINVAL;
}
do {
int ret;
ret = vm_insert_page(vma, uaddr, buf->pages[i++]);
if (ret) {
printk(KERN_ERR "Remapping memory, error: %d\n", ret);
return ret;
}
uaddr += PAGE_SIZE;
usize -= PAGE_SIZE;
} while (usize > 0);
/*
* Use common vm_area operations to track buffer refcount.
*/
vma->vm_private_data = &buf->handler;
vma->vm_ops = &vb2_common_vm_ops;
vma->vm_ops->open(vma);
return 0;
}
static void *vb2_dma_sg_cookie(void *buf_priv)
{
struct vb2_dma_sg_buf *buf = buf_priv;
return &buf->sg_desc;
}
const struct vb2_mem_ops vb2_dma_sg_memops = {
.alloc = vb2_dma_sg_alloc,
.put = vb2_dma_sg_put,
.get_userptr = vb2_dma_sg_get_userptr,
.put_userptr = vb2_dma_sg_put_userptr,
.vaddr = vb2_dma_sg_vaddr,
.mmap = vb2_dma_sg_mmap,
.num_users = vb2_dma_sg_num_users,
.cookie = vb2_dma_sg_cookie,
};
EXPORT_SYMBOL_GPL(vb2_dma_sg_memops);
MODULE_DESCRIPTION("dma scatter/gather memory handling routines for videobuf2");
MODULE_AUTHOR("Andrzej Pietrasiewicz");
MODULE_LICENSE("GPL");
| gpl-2.0 |
olear/R104 | drivers/isdn/hardware/mISDN/hfcsusb.c | 2936 | 56148 | /* hfcsusb.c
* mISDN driver for Colognechip HFC-S USB chip
*
* Copyright 2001 by Peter Sprenger (sprenger@moving-bytes.de)
* Copyright 2008 by Martin Bachem (info@bachem-it.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*
*
* module params
* debug=<n>, default=0, with n=0xHHHHGGGG
* H - l1 driver flags described in hfcsusb.h
* G - common mISDN debug flags described at mISDNhw.h
*
* poll=<n>, default 128
* n : burst size of PH_DATA_IND at transparent rx data
*
*/
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/usb.h>
#include <linux/mISDNhw.h>
#include <linux/slab.h>
#include "hfcsusb.h"
static const char *hfcsusb_rev = "Revision: 0.3.3 (socket), 2008-11-05";
static unsigned int debug;
static int poll = DEFAULT_TRANSP_BURST_SZ;
static LIST_HEAD(HFClist);
static DEFINE_RWLOCK(HFClock);
MODULE_AUTHOR("Martin Bachem");
MODULE_LICENSE("GPL");
module_param(debug, uint, S_IRUGO | S_IWUSR);
module_param(poll, int, 0);
static int hfcsusb_cnt;
/* some function prototypes */
static void hfcsusb_ph_command(struct hfcsusb *hw, u_char command);
static void release_hw(struct hfcsusb *hw);
static void reset_hfcsusb(struct hfcsusb *hw);
static void setPortMode(struct hfcsusb *hw);
static void hfcsusb_start_endpoint(struct hfcsusb *hw, int channel);
static void hfcsusb_stop_endpoint(struct hfcsusb *hw, int channel);
static int hfcsusb_setup_bch(struct bchannel *bch, int protocol);
static void deactivate_bchannel(struct bchannel *bch);
static void hfcsusb_ph_info(struct hfcsusb *hw);
/* start next background transfer for control channel */
static void
ctrl_start_transfer(struct hfcsusb *hw)
{
if (debug & DBG_HFC_CALL_TRACE)
printk(KERN_DEBUG "%s: %s\n", hw->name, __func__);
if (hw->ctrl_cnt) {
hw->ctrl_urb->pipe = hw->ctrl_out_pipe;
hw->ctrl_urb->setup_packet = (u_char *)&hw->ctrl_write;
hw->ctrl_urb->transfer_buffer = NULL;
hw->ctrl_urb->transfer_buffer_length = 0;
hw->ctrl_write.wIndex =
cpu_to_le16(hw->ctrl_buff[hw->ctrl_out_idx].hfcs_reg);
hw->ctrl_write.wValue =
cpu_to_le16(hw->ctrl_buff[hw->ctrl_out_idx].reg_val);
usb_submit_urb(hw->ctrl_urb, GFP_ATOMIC);
}
}
/*
* queue a control transfer request to write HFC-S USB
* chip register using CTRL resuest queue
*/
static int write_reg(struct hfcsusb *hw, __u8 reg, __u8 val)
{
struct ctrl_buf *buf;
if (debug & DBG_HFC_CALL_TRACE)
printk(KERN_DEBUG "%s: %s reg(0x%02x) val(0x%02x)\n",
hw->name, __func__, reg, val);
spin_lock(&hw->ctrl_lock);
if (hw->ctrl_cnt >= HFC_CTRL_BUFSIZE) {
spin_unlock(&hw->ctrl_lock);
return 1;
}
buf = &hw->ctrl_buff[hw->ctrl_in_idx];
buf->hfcs_reg = reg;
buf->reg_val = val;
if (++hw->ctrl_in_idx >= HFC_CTRL_BUFSIZE)
hw->ctrl_in_idx = 0;
if (++hw->ctrl_cnt == 1)
ctrl_start_transfer(hw);
spin_unlock(&hw->ctrl_lock);
return 0;
}
/* control completion routine handling background control cmds */
static void
ctrl_complete(struct urb *urb)
{
struct hfcsusb *hw = (struct hfcsusb *) urb->context;
if (debug & DBG_HFC_CALL_TRACE)
printk(KERN_DEBUG "%s: %s\n", hw->name, __func__);
urb->dev = hw->dev;
if (hw->ctrl_cnt) {
hw->ctrl_cnt--; /* decrement actual count */
if (++hw->ctrl_out_idx >= HFC_CTRL_BUFSIZE)
hw->ctrl_out_idx = 0; /* pointer wrap */
ctrl_start_transfer(hw); /* start next transfer */
}
}
/* handle LED bits */
static void
set_led_bit(struct hfcsusb *hw, signed short led_bits, int set_on)
{
if (set_on) {
if (led_bits < 0)
hw->led_state &= ~abs(led_bits);
else
hw->led_state |= led_bits;
} else {
if (led_bits < 0)
hw->led_state |= abs(led_bits);
else
hw->led_state &= ~led_bits;
}
}
/* handle LED requests */
static void
handle_led(struct hfcsusb *hw, int event)
{
struct hfcsusb_vdata *driver_info = (struct hfcsusb_vdata *)
hfcsusb_idtab[hw->vend_idx].driver_info;
__u8 tmpled;
if (driver_info->led_scheme == LED_OFF)
return;
tmpled = hw->led_state;
switch (event) {
case LED_POWER_ON:
set_led_bit(hw, driver_info->led_bits[0], 1);
set_led_bit(hw, driver_info->led_bits[1], 0);
set_led_bit(hw, driver_info->led_bits[2], 0);
set_led_bit(hw, driver_info->led_bits[3], 0);
break;
case LED_POWER_OFF:
set_led_bit(hw, driver_info->led_bits[0], 0);
set_led_bit(hw, driver_info->led_bits[1], 0);
set_led_bit(hw, driver_info->led_bits[2], 0);
set_led_bit(hw, driver_info->led_bits[3], 0);
break;
case LED_S0_ON:
set_led_bit(hw, driver_info->led_bits[1], 1);
break;
case LED_S0_OFF:
set_led_bit(hw, driver_info->led_bits[1], 0);
break;
case LED_B1_ON:
set_led_bit(hw, driver_info->led_bits[2], 1);
break;
case LED_B1_OFF:
set_led_bit(hw, driver_info->led_bits[2], 0);
break;
case LED_B2_ON:
set_led_bit(hw, driver_info->led_bits[3], 1);
break;
case LED_B2_OFF:
set_led_bit(hw, driver_info->led_bits[3], 0);
break;
}
if (hw->led_state != tmpled) {
if (debug & DBG_HFC_CALL_TRACE)
printk(KERN_DEBUG "%s: %s reg(0x%02x) val(x%02x)\n",
hw->name, __func__,
HFCUSB_P_DATA, hw->led_state);
write_reg(hw, HFCUSB_P_DATA, hw->led_state);
}
}
/*
* Layer2 -> Layer 1 Bchannel data
*/
static int
hfcusb_l2l1B(struct mISDNchannel *ch, struct sk_buff *skb)
{
struct bchannel *bch = container_of(ch, struct bchannel, ch);
struct hfcsusb *hw = bch->hw;
int ret = -EINVAL;
struct mISDNhead *hh = mISDN_HEAD_P(skb);
u_long flags;
if (debug & DBG_HFC_CALL_TRACE)
printk(KERN_DEBUG "%s: %s\n", hw->name, __func__);
switch (hh->prim) {
case PH_DATA_REQ:
spin_lock_irqsave(&hw->lock, flags);
ret = bchannel_senddata(bch, skb);
spin_unlock_irqrestore(&hw->lock, flags);
if (debug & DBG_HFC_CALL_TRACE)
printk(KERN_DEBUG "%s: %s PH_DATA_REQ ret(%i)\n",
hw->name, __func__, ret);
if (ret > 0) {
/*
* other l1 drivers don't send early confirms on
* transp data, but hfcsusb does because tx_next
* skb is needed in tx_iso_complete()
*/
queue_ch_frame(ch, PH_DATA_CNF, hh->id, NULL);
ret = 0;
}
return ret;
case PH_ACTIVATE_REQ:
if (!test_and_set_bit(FLG_ACTIVE, &bch->Flags)) {
hfcsusb_start_endpoint(hw, bch->nr);
ret = hfcsusb_setup_bch(bch, ch->protocol);
} else
ret = 0;
if (!ret)
_queue_data(ch, PH_ACTIVATE_IND, MISDN_ID_ANY,
0, NULL, GFP_KERNEL);
break;
case PH_DEACTIVATE_REQ:
deactivate_bchannel(bch);
_queue_data(ch, PH_DEACTIVATE_IND, MISDN_ID_ANY,
0, NULL, GFP_KERNEL);
ret = 0;
break;
}
if (!ret)
dev_kfree_skb(skb);
return ret;
}
/*
* send full D/B channel status information
* as MPH_INFORMATION_IND
*/
static void
hfcsusb_ph_info(struct hfcsusb *hw)
{
struct ph_info *phi;
struct dchannel *dch = &hw->dch;
int i;
phi = kzalloc(sizeof(struct ph_info) +
dch->dev.nrbchan * sizeof(struct ph_info_ch), GFP_ATOMIC);
phi->dch.ch.protocol = hw->protocol;
phi->dch.ch.Flags = dch->Flags;
phi->dch.state = dch->state;
phi->dch.num_bch = dch->dev.nrbchan;
for (i = 0; i < dch->dev.nrbchan; i++) {
phi->bch[i].protocol = hw->bch[i].ch.protocol;
phi->bch[i].Flags = hw->bch[i].Flags;
}
_queue_data(&dch->dev.D, MPH_INFORMATION_IND, MISDN_ID_ANY,
sizeof(struct ph_info_dch) + dch->dev.nrbchan *
sizeof(struct ph_info_ch), phi, GFP_ATOMIC);
kfree(phi);
}
/*
* Layer2 -> Layer 1 Dchannel data
*/
static int
hfcusb_l2l1D(struct mISDNchannel *ch, struct sk_buff *skb)
{
struct mISDNdevice *dev = container_of(ch, struct mISDNdevice, D);
struct dchannel *dch = container_of(dev, struct dchannel, dev);
struct mISDNhead *hh = mISDN_HEAD_P(skb);
struct hfcsusb *hw = dch->hw;
int ret = -EINVAL;
u_long flags;
switch (hh->prim) {
case PH_DATA_REQ:
if (debug & DBG_HFC_CALL_TRACE)
printk(KERN_DEBUG "%s: %s: PH_DATA_REQ\n",
hw->name, __func__);
spin_lock_irqsave(&hw->lock, flags);
ret = dchannel_senddata(dch, skb);
spin_unlock_irqrestore(&hw->lock, flags);
if (ret > 0) {
ret = 0;
queue_ch_frame(ch, PH_DATA_CNF, hh->id, NULL);
}
break;
case PH_ACTIVATE_REQ:
if (debug & DBG_HFC_CALL_TRACE)
printk(KERN_DEBUG "%s: %s: PH_ACTIVATE_REQ %s\n",
hw->name, __func__,
(hw->protocol == ISDN_P_NT_S0) ? "NT" : "TE");
if (hw->protocol == ISDN_P_NT_S0) {
ret = 0;
if (test_bit(FLG_ACTIVE, &dch->Flags)) {
_queue_data(&dch->dev.D,
PH_ACTIVATE_IND, MISDN_ID_ANY, 0,
NULL, GFP_ATOMIC);
} else {
hfcsusb_ph_command(hw,
HFC_L1_ACTIVATE_NT);
test_and_set_bit(FLG_L2_ACTIVATED,
&dch->Flags);
}
} else {
hfcsusb_ph_command(hw, HFC_L1_ACTIVATE_TE);
ret = l1_event(dch->l1, hh->prim);
}
break;
case PH_DEACTIVATE_REQ:
if (debug & DBG_HFC_CALL_TRACE)
printk(KERN_DEBUG "%s: %s: PH_DEACTIVATE_REQ\n",
hw->name, __func__);
test_and_clear_bit(FLG_L2_ACTIVATED, &dch->Flags);
if (hw->protocol == ISDN_P_NT_S0) {
hfcsusb_ph_command(hw, HFC_L1_DEACTIVATE_NT);
spin_lock_irqsave(&hw->lock, flags);
skb_queue_purge(&dch->squeue);
if (dch->tx_skb) {
dev_kfree_skb(dch->tx_skb);
dch->tx_skb = NULL;
}
dch->tx_idx = 0;
if (dch->rx_skb) {
dev_kfree_skb(dch->rx_skb);
dch->rx_skb = NULL;
}
test_and_clear_bit(FLG_TX_BUSY, &dch->Flags);
spin_unlock_irqrestore(&hw->lock, flags);
#ifdef FIXME
if (test_and_clear_bit(FLG_L1_BUSY, &dch->Flags))
dchannel_sched_event(&hc->dch, D_CLEARBUSY);
#endif
ret = 0;
} else
ret = l1_event(dch->l1, hh->prim);
break;
case MPH_INFORMATION_REQ:
hfcsusb_ph_info(hw);
ret = 0;
break;
}
return ret;
}
/*
* Layer 1 callback function
*/
static int
hfc_l1callback(struct dchannel *dch, u_int cmd)
{
struct hfcsusb *hw = dch->hw;
if (debug & DBG_HFC_CALL_TRACE)
printk(KERN_DEBUG "%s: %s cmd 0x%x\n",
hw->name, __func__, cmd);
switch (cmd) {
case INFO3_P8:
case INFO3_P10:
case HW_RESET_REQ:
case HW_POWERUP_REQ:
break;
case HW_DEACT_REQ:
skb_queue_purge(&dch->squeue);
if (dch->tx_skb) {
dev_kfree_skb(dch->tx_skb);
dch->tx_skb = NULL;
}
dch->tx_idx = 0;
if (dch->rx_skb) {
dev_kfree_skb(dch->rx_skb);
dch->rx_skb = NULL;
}
test_and_clear_bit(FLG_TX_BUSY, &dch->Flags);
break;
case PH_ACTIVATE_IND:
test_and_set_bit(FLG_ACTIVE, &dch->Flags);
_queue_data(&dch->dev.D, cmd, MISDN_ID_ANY, 0, NULL,
GFP_ATOMIC);
break;
case PH_DEACTIVATE_IND:
test_and_clear_bit(FLG_ACTIVE, &dch->Flags);
_queue_data(&dch->dev.D, cmd, MISDN_ID_ANY, 0, NULL,
GFP_ATOMIC);
break;
default:
if (dch->debug & DEBUG_HW)
printk(KERN_DEBUG "%s: %s: unknown cmd %x\n",
hw->name, __func__, cmd);
return -1;
}
hfcsusb_ph_info(hw);
return 0;
}
static int
open_dchannel(struct hfcsusb *hw, struct mISDNchannel *ch,
struct channel_req *rq)
{
int err = 0;
if (debug & DEBUG_HW_OPEN)
printk(KERN_DEBUG "%s: %s: dev(%d) open addr(%i) from %p\n",
hw->name, __func__, hw->dch.dev.id, rq->adr.channel,
__builtin_return_address(0));
if (rq->protocol == ISDN_P_NONE)
return -EINVAL;
test_and_clear_bit(FLG_ACTIVE, &hw->dch.Flags);
test_and_clear_bit(FLG_ACTIVE, &hw->ech.Flags);
hfcsusb_start_endpoint(hw, HFC_CHAN_D);
/* E-Channel logging */
if (rq->adr.channel == 1) {
if (hw->fifos[HFCUSB_PCM_RX].pipe) {
hfcsusb_start_endpoint(hw, HFC_CHAN_E);
set_bit(FLG_ACTIVE, &hw->ech.Flags);
_queue_data(&hw->ech.dev.D, PH_ACTIVATE_IND,
MISDN_ID_ANY, 0, NULL, GFP_ATOMIC);
} else
return -EINVAL;
}
if (!hw->initdone) {
hw->protocol = rq->protocol;
if (rq->protocol == ISDN_P_TE_S0) {
err = create_l1(&hw->dch, hfc_l1callback);
if (err)
return err;
}
setPortMode(hw);
ch->protocol = rq->protocol;
hw->initdone = 1;
} else {
if (rq->protocol != ch->protocol)
return -EPROTONOSUPPORT;
}
if (((ch->protocol == ISDN_P_NT_S0) && (hw->dch.state == 3)) ||
((ch->protocol == ISDN_P_TE_S0) && (hw->dch.state == 7)))
_queue_data(ch, PH_ACTIVATE_IND, MISDN_ID_ANY,
0, NULL, GFP_KERNEL);
rq->ch = ch;
if (!try_module_get(THIS_MODULE))
printk(KERN_WARNING "%s: %s: cannot get module\n",
hw->name, __func__);
return 0;
}
static int
open_bchannel(struct hfcsusb *hw, struct channel_req *rq)
{
struct bchannel *bch;
if (rq->adr.channel > 2)
return -EINVAL;
if (rq->protocol == ISDN_P_NONE)
return -EINVAL;
if (debug & DBG_HFC_CALL_TRACE)
printk(KERN_DEBUG "%s: %s B%i\n",
hw->name, __func__, rq->adr.channel);
bch = &hw->bch[rq->adr.channel - 1];
if (test_and_set_bit(FLG_OPEN, &bch->Flags))
return -EBUSY; /* b-channel can be only open once */
test_and_clear_bit(FLG_FILLEMPTY, &bch->Flags);
bch->ch.protocol = rq->protocol;
rq->ch = &bch->ch;
/* start USB endpoint for bchannel */
if (rq->adr.channel == 1)
hfcsusb_start_endpoint(hw, HFC_CHAN_B1);
else
hfcsusb_start_endpoint(hw, HFC_CHAN_B2);
if (!try_module_get(THIS_MODULE))
printk(KERN_WARNING "%s: %s:cannot get module\n",
hw->name, __func__);
return 0;
}
static int
channel_ctrl(struct hfcsusb *hw, struct mISDN_ctrl_req *cq)
{
int ret = 0;
if (debug & DBG_HFC_CALL_TRACE)
printk(KERN_DEBUG "%s: %s op(0x%x) channel(0x%x)\n",
hw->name, __func__, (cq->op), (cq->channel));
switch (cq->op) {
case MISDN_CTRL_GETOP:
cq->op = MISDN_CTRL_LOOP | MISDN_CTRL_CONNECT |
MISDN_CTRL_DISCONNECT;
break;
default:
printk(KERN_WARNING "%s: %s: unknown Op %x\n",
hw->name, __func__, cq->op);
ret = -EINVAL;
break;
}
return ret;
}
/*
* device control function
*/
static int
hfc_dctrl(struct mISDNchannel *ch, u_int cmd, void *arg)
{
struct mISDNdevice *dev = container_of(ch, struct mISDNdevice, D);
struct dchannel *dch = container_of(dev, struct dchannel, dev);
struct hfcsusb *hw = dch->hw;
struct channel_req *rq;
int err = 0;
if (dch->debug & DEBUG_HW)
printk(KERN_DEBUG "%s: %s: cmd:%x %p\n",
hw->name, __func__, cmd, arg);
switch (cmd) {
case OPEN_CHANNEL:
rq = arg;
if ((rq->protocol == ISDN_P_TE_S0) ||
(rq->protocol == ISDN_P_NT_S0))
err = open_dchannel(hw, ch, rq);
else
err = open_bchannel(hw, rq);
if (!err)
hw->open++;
break;
case CLOSE_CHANNEL:
hw->open--;
if (debug & DEBUG_HW_OPEN)
printk(KERN_DEBUG
"%s: %s: dev(%d) close from %p (open %d)\n",
hw->name, __func__, hw->dch.dev.id,
__builtin_return_address(0), hw->open);
if (!hw->open) {
hfcsusb_stop_endpoint(hw, HFC_CHAN_D);
if (hw->fifos[HFCUSB_PCM_RX].pipe)
hfcsusb_stop_endpoint(hw, HFC_CHAN_E);
handle_led(hw, LED_POWER_ON);
}
module_put(THIS_MODULE);
break;
case CONTROL_CHANNEL:
err = channel_ctrl(hw, arg);
break;
default:
if (dch->debug & DEBUG_HW)
printk(KERN_DEBUG "%s: %s: unknown command %x\n",
hw->name, __func__, cmd);
return -EINVAL;
}
return err;
}
/*
* S0 TE state change event handler
*/
static void
ph_state_te(struct dchannel *dch)
{
struct hfcsusb *hw = dch->hw;
if (debug & DEBUG_HW) {
if (dch->state <= HFC_MAX_TE_LAYER1_STATE)
printk(KERN_DEBUG "%s: %s: %s\n", hw->name, __func__,
HFC_TE_LAYER1_STATES[dch->state]);
else
printk(KERN_DEBUG "%s: %s: TE F%d\n",
hw->name, __func__, dch->state);
}
switch (dch->state) {
case 0:
l1_event(dch->l1, HW_RESET_IND);
break;
case 3:
l1_event(dch->l1, HW_DEACT_IND);
break;
case 5:
case 8:
l1_event(dch->l1, ANYSIGNAL);
break;
case 6:
l1_event(dch->l1, INFO2);
break;
case 7:
l1_event(dch->l1, INFO4_P8);
break;
}
if (dch->state == 7)
handle_led(hw, LED_S0_ON);
else
handle_led(hw, LED_S0_OFF);
}
/*
* S0 NT state change event handler
*/
static void
ph_state_nt(struct dchannel *dch)
{
struct hfcsusb *hw = dch->hw;
if (debug & DEBUG_HW) {
if (dch->state <= HFC_MAX_NT_LAYER1_STATE)
printk(KERN_DEBUG "%s: %s: %s\n",
hw->name, __func__,
HFC_NT_LAYER1_STATES[dch->state]);
else
printk(KERN_INFO DRIVER_NAME "%s: %s: NT G%d\n",
hw->name, __func__, dch->state);
}
switch (dch->state) {
case (1):
test_and_clear_bit(FLG_ACTIVE, &dch->Flags);
test_and_clear_bit(FLG_L2_ACTIVATED, &dch->Flags);
hw->nt_timer = 0;
hw->timers &= ~NT_ACTIVATION_TIMER;
handle_led(hw, LED_S0_OFF);
break;
case (2):
if (hw->nt_timer < 0) {
hw->nt_timer = 0;
hw->timers &= ~NT_ACTIVATION_TIMER;
hfcsusb_ph_command(dch->hw, HFC_L1_DEACTIVATE_NT);
} else {
hw->timers |= NT_ACTIVATION_TIMER;
hw->nt_timer = NT_T1_COUNT;
/* allow G2 -> G3 transition */
write_reg(hw, HFCUSB_STATES, 2 | HFCUSB_NT_G2_G3);
}
break;
case (3):
hw->nt_timer = 0;
hw->timers &= ~NT_ACTIVATION_TIMER;
test_and_set_bit(FLG_ACTIVE, &dch->Flags);
_queue_data(&dch->dev.D, PH_ACTIVATE_IND,
MISDN_ID_ANY, 0, NULL, GFP_ATOMIC);
handle_led(hw, LED_S0_ON);
break;
case (4):
hw->nt_timer = 0;
hw->timers &= ~NT_ACTIVATION_TIMER;
break;
default:
break;
}
hfcsusb_ph_info(hw);
}
static void
ph_state(struct dchannel *dch)
{
struct hfcsusb *hw = dch->hw;
if (hw->protocol == ISDN_P_NT_S0)
ph_state_nt(dch);
else if (hw->protocol == ISDN_P_TE_S0)
ph_state_te(dch);
}
/*
* disable/enable BChannel for desired protocoll
*/
static int
hfcsusb_setup_bch(struct bchannel *bch, int protocol)
{
struct hfcsusb *hw = bch->hw;
__u8 conhdlc, sctrl, sctrl_r;
if (debug & DEBUG_HW)
printk(KERN_DEBUG "%s: %s: protocol %x-->%x B%d\n",
hw->name, __func__, bch->state, protocol,
bch->nr);
/* setup val for CON_HDLC */
conhdlc = 0;
if (protocol > ISDN_P_NONE)
conhdlc = 8; /* enable FIFO */
switch (protocol) {
case (-1): /* used for init */
bch->state = -1;
/* fall through */
case (ISDN_P_NONE):
if (bch->state == ISDN_P_NONE)
return 0; /* already in idle state */
bch->state = ISDN_P_NONE;
clear_bit(FLG_HDLC, &bch->Flags);
clear_bit(FLG_TRANSPARENT, &bch->Flags);
break;
case (ISDN_P_B_RAW):
conhdlc |= 2;
bch->state = protocol;
set_bit(FLG_TRANSPARENT, &bch->Flags);
break;
case (ISDN_P_B_HDLC):
bch->state = protocol;
set_bit(FLG_HDLC, &bch->Flags);
break;
default:
if (debug & DEBUG_HW)
printk(KERN_DEBUG "%s: %s: prot not known %x\n",
hw->name, __func__, protocol);
return -ENOPROTOOPT;
}
if (protocol >= ISDN_P_NONE) {
write_reg(hw, HFCUSB_FIFO, (bch->nr == 1) ? 0 : 2);
write_reg(hw, HFCUSB_CON_HDLC, conhdlc);
write_reg(hw, HFCUSB_INC_RES_F, 2);
write_reg(hw, HFCUSB_FIFO, (bch->nr == 1) ? 1 : 3);
write_reg(hw, HFCUSB_CON_HDLC, conhdlc);
write_reg(hw, HFCUSB_INC_RES_F, 2);
sctrl = 0x40 + ((hw->protocol == ISDN_P_TE_S0) ? 0x00 : 0x04);
sctrl_r = 0x0;
if (test_bit(FLG_ACTIVE, &hw->bch[0].Flags)) {
sctrl |= 1;
sctrl_r |= 1;
}
if (test_bit(FLG_ACTIVE, &hw->bch[1].Flags)) {
sctrl |= 2;
sctrl_r |= 2;
}
write_reg(hw, HFCUSB_SCTRL, sctrl);
write_reg(hw, HFCUSB_SCTRL_R, sctrl_r);
if (protocol > ISDN_P_NONE)
handle_led(hw, (bch->nr == 1) ? LED_B1_ON : LED_B2_ON);
else
handle_led(hw, (bch->nr == 1) ? LED_B1_OFF :
LED_B2_OFF);
}
hfcsusb_ph_info(hw);
return 0;
}
static void
hfcsusb_ph_command(struct hfcsusb *hw, u_char command)
{
if (debug & DEBUG_HW)
printk(KERN_DEBUG "%s: %s: %x\n",
hw->name, __func__, command);
switch (command) {
case HFC_L1_ACTIVATE_TE:
/* force sending sending INFO1 */
write_reg(hw, HFCUSB_STATES, 0x14);
/* start l1 activation */
write_reg(hw, HFCUSB_STATES, 0x04);
break;
case HFC_L1_FORCE_DEACTIVATE_TE:
write_reg(hw, HFCUSB_STATES, 0x10);
write_reg(hw, HFCUSB_STATES, 0x03);
break;
case HFC_L1_ACTIVATE_NT:
if (hw->dch.state == 3)
_queue_data(&hw->dch.dev.D, PH_ACTIVATE_IND,
MISDN_ID_ANY, 0, NULL, GFP_ATOMIC);
else
write_reg(hw, HFCUSB_STATES, HFCUSB_ACTIVATE |
HFCUSB_DO_ACTION | HFCUSB_NT_G2_G3);
break;
case HFC_L1_DEACTIVATE_NT:
write_reg(hw, HFCUSB_STATES,
HFCUSB_DO_ACTION);
break;
}
}
/*
* Layer 1 B-channel hardware access
*/
static int
channel_bctrl(struct bchannel *bch, struct mISDN_ctrl_req *cq)
{
int ret = 0;
switch (cq->op) {
case MISDN_CTRL_GETOP:
cq->op = MISDN_CTRL_FILL_EMPTY;
break;
case MISDN_CTRL_FILL_EMPTY: /* fill fifo, if empty */
test_and_set_bit(FLG_FILLEMPTY, &bch->Flags);
if (debug & DEBUG_HW_OPEN)
printk(KERN_DEBUG "%s: FILL_EMPTY request (nr=%d "
"off=%d)\n", __func__, bch->nr, !!cq->p1);
break;
default:
printk(KERN_WARNING "%s: unknown Op %x\n", __func__, cq->op);
ret = -EINVAL;
break;
}
return ret;
}
/* collect data from incoming interrupt or isochron USB data */
static void
hfcsusb_rx_frame(struct usb_fifo *fifo, __u8 *data, unsigned int len,
int finish)
{
struct hfcsusb *hw = fifo->hw;
struct sk_buff *rx_skb = NULL;
int maxlen = 0;
int fifon = fifo->fifonum;
int i;
int hdlc = 0;
if (debug & DBG_HFC_CALL_TRACE)
printk(KERN_DEBUG "%s: %s: fifo(%i) len(%i) "
"dch(%p) bch(%p) ech(%p)\n",
hw->name, __func__, fifon, len,
fifo->dch, fifo->bch, fifo->ech);
if (!len)
return;
if ((!!fifo->dch + !!fifo->bch + !!fifo->ech) != 1) {
printk(KERN_DEBUG "%s: %s: undefined channel\n",
hw->name, __func__);
return;
}
spin_lock(&hw->lock);
if (fifo->dch) {
rx_skb = fifo->dch->rx_skb;
maxlen = fifo->dch->maxlen;
hdlc = 1;
}
if (fifo->bch) {
rx_skb = fifo->bch->rx_skb;
maxlen = fifo->bch->maxlen;
hdlc = test_bit(FLG_HDLC, &fifo->bch->Flags);
}
if (fifo->ech) {
rx_skb = fifo->ech->rx_skb;
maxlen = fifo->ech->maxlen;
hdlc = 1;
}
if (!rx_skb) {
rx_skb = mI_alloc_skb(maxlen, GFP_ATOMIC);
if (rx_skb) {
if (fifo->dch)
fifo->dch->rx_skb = rx_skb;
if (fifo->bch)
fifo->bch->rx_skb = rx_skb;
if (fifo->ech)
fifo->ech->rx_skb = rx_skb;
skb_trim(rx_skb, 0);
} else {
printk(KERN_DEBUG "%s: %s: No mem for rx_skb\n",
hw->name, __func__);
spin_unlock(&hw->lock);
return;
}
}
if (fifo->dch || fifo->ech) {
/* D/E-Channel SKB range check */
if ((rx_skb->len + len) >= MAX_DFRAME_LEN_L1) {
printk(KERN_DEBUG "%s: %s: sbk mem exceeded "
"for fifo(%d) HFCUSB_D_RX\n",
hw->name, __func__, fifon);
skb_trim(rx_skb, 0);
spin_unlock(&hw->lock);
return;
}
} else if (fifo->bch) {
/* B-Channel SKB range check */
if ((rx_skb->len + len) >= (MAX_BCH_SIZE + 3)) {
printk(KERN_DEBUG "%s: %s: sbk mem exceeded "
"for fifo(%d) HFCUSB_B_RX\n",
hw->name, __func__, fifon);
skb_trim(rx_skb, 0);
spin_unlock(&hw->lock);
return;
}
}
memcpy(skb_put(rx_skb, len), data, len);
if (hdlc) {
/* we have a complete hdlc packet */
if (finish) {
if ((rx_skb->len > 3) &&
(!(rx_skb->data[rx_skb->len - 1]))) {
if (debug & DBG_HFC_FIFO_VERBOSE) {
printk(KERN_DEBUG "%s: %s: fifon(%i)"
" new RX len(%i): ",
hw->name, __func__, fifon,
rx_skb->len);
i = 0;
while (i < rx_skb->len)
printk("%02x ",
rx_skb->data[i++]);
printk("\n");
}
/* remove CRC & status */
skb_trim(rx_skb, rx_skb->len - 3);
if (fifo->dch)
recv_Dchannel(fifo->dch);
if (fifo->bch)
recv_Bchannel(fifo->bch, MISDN_ID_ANY);
if (fifo->ech)
recv_Echannel(fifo->ech,
&hw->dch);
} else {
if (debug & DBG_HFC_FIFO_VERBOSE) {
printk(KERN_DEBUG
"%s: CRC or minlen ERROR fifon(%i) "
"RX len(%i): ",
hw->name, fifon, rx_skb->len);
i = 0;
while (i < rx_skb->len)
printk("%02x ",
rx_skb->data[i++]);
printk("\n");
}
skb_trim(rx_skb, 0);
}
}
} else {
/* deliver transparent data to layer2 */
if (rx_skb->len >= poll)
recv_Bchannel(fifo->bch, MISDN_ID_ANY);
}
spin_unlock(&hw->lock);
}
static void
fill_isoc_urb(struct urb *urb, struct usb_device *dev, unsigned int pipe,
void *buf, int num_packets, int packet_size, int interval,
usb_complete_t complete, void *context)
{
int k;
usb_fill_bulk_urb(urb, dev, pipe, buf, packet_size * num_packets,
complete, context);
urb->number_of_packets = num_packets;
urb->transfer_flags = URB_ISO_ASAP;
urb->actual_length = 0;
urb->interval = interval;
for (k = 0; k < num_packets; k++) {
urb->iso_frame_desc[k].offset = packet_size * k;
urb->iso_frame_desc[k].length = packet_size;
urb->iso_frame_desc[k].actual_length = 0;
}
}
/* receive completion routine for all ISO tx fifos */
static void
rx_iso_complete(struct urb *urb)
{
struct iso_urb *context_iso_urb = (struct iso_urb *) urb->context;
struct usb_fifo *fifo = context_iso_urb->owner_fifo;
struct hfcsusb *hw = fifo->hw;
int k, len, errcode, offset, num_isoc_packets, fifon, maxlen,
status, iso_status, i;
__u8 *buf;
static __u8 eof[8];
__u8 s0_state;
fifon = fifo->fifonum;
status = urb->status;
spin_lock(&hw->lock);
if (fifo->stop_gracefull) {
fifo->stop_gracefull = 0;
fifo->active = 0;
spin_unlock(&hw->lock);
return;
}
spin_unlock(&hw->lock);
/*
* ISO transfer only partially completed,
* look at individual frame status for details
*/
if (status == -EXDEV) {
if (debug & DEBUG_HW)
printk(KERN_DEBUG "%s: %s: with -EXDEV "
"urb->status %d, fifonum %d\n",
hw->name, __func__, status, fifon);
/* clear status, so go on with ISO transfers */
status = 0;
}
s0_state = 0;
if (fifo->active && !status) {
num_isoc_packets = iso_packets[fifon];
maxlen = fifo->usb_packet_maxlen;
for (k = 0; k < num_isoc_packets; ++k) {
len = urb->iso_frame_desc[k].actual_length;
offset = urb->iso_frame_desc[k].offset;
buf = context_iso_urb->buffer + offset;
iso_status = urb->iso_frame_desc[k].status;
if (iso_status && (debug & DBG_HFC_FIFO_VERBOSE)) {
printk(KERN_DEBUG "%s: %s: "
"ISO packet %i, status: %i\n",
hw->name, __func__, k, iso_status);
}
/* USB data log for every D ISO in */
if ((fifon == HFCUSB_D_RX) &&
(debug & DBG_HFC_USB_VERBOSE)) {
printk(KERN_DEBUG
"%s: %s: %d (%d/%d) len(%d) ",
hw->name, __func__, urb->start_frame,
k, num_isoc_packets-1,
len);
for (i = 0; i < len; i++)
printk("%x ", buf[i]);
printk("\n");
}
if (!iso_status) {
if (fifo->last_urblen != maxlen) {
/*
* save fifo fill-level threshold bits
* to use them later in TX ISO URB
* completions
*/
hw->threshold_mask = buf[1];
if (fifon == HFCUSB_D_RX)
s0_state = (buf[0] >> 4);
eof[fifon] = buf[0] & 1;
if (len > 2)
hfcsusb_rx_frame(fifo, buf + 2,
len - 2, (len < maxlen)
? eof[fifon] : 0);
} else
hfcsusb_rx_frame(fifo, buf, len,
(len < maxlen) ?
eof[fifon] : 0);
fifo->last_urblen = len;
}
}
/* signal S0 layer1 state change */
if ((s0_state) && (hw->initdone) &&
(s0_state != hw->dch.state)) {
hw->dch.state = s0_state;
schedule_event(&hw->dch, FLG_PHCHANGE);
}
fill_isoc_urb(urb, fifo->hw->dev, fifo->pipe,
context_iso_urb->buffer, num_isoc_packets,
fifo->usb_packet_maxlen, fifo->intervall,
(usb_complete_t)rx_iso_complete, urb->context);
errcode = usb_submit_urb(urb, GFP_ATOMIC);
if (errcode < 0) {
if (debug & DEBUG_HW)
printk(KERN_DEBUG "%s: %s: error submitting "
"ISO URB: %d\n",
hw->name, __func__, errcode);
}
} else {
if (status && (debug & DBG_HFC_URB_INFO))
printk(KERN_DEBUG "%s: %s: rx_iso_complete : "
"urb->status %d, fifonum %d\n",
hw->name, __func__, status, fifon);
}
}
/* receive completion routine for all interrupt rx fifos */
static void
rx_int_complete(struct urb *urb)
{
int len, status, i;
__u8 *buf, maxlen, fifon;
struct usb_fifo *fifo = (struct usb_fifo *) urb->context;
struct hfcsusb *hw = fifo->hw;
static __u8 eof[8];
spin_lock(&hw->lock);
if (fifo->stop_gracefull) {
fifo->stop_gracefull = 0;
fifo->active = 0;
spin_unlock(&hw->lock);
return;
}
spin_unlock(&hw->lock);
fifon = fifo->fifonum;
if ((!fifo->active) || (urb->status)) {
if (debug & DBG_HFC_URB_ERROR)
printk(KERN_DEBUG
"%s: %s: RX-Fifo %i is going down (%i)\n",
hw->name, __func__, fifon, urb->status);
fifo->urb->interval = 0; /* cancel automatic rescheduling */
return;
}
len = urb->actual_length;
buf = fifo->buffer;
maxlen = fifo->usb_packet_maxlen;
/* USB data log for every D INT in */
if ((fifon == HFCUSB_D_RX) && (debug & DBG_HFC_USB_VERBOSE)) {
printk(KERN_DEBUG "%s: %s: D RX INT len(%d) ",
hw->name, __func__, len);
for (i = 0; i < len; i++)
printk("%02x ", buf[i]);
printk("\n");
}
if (fifo->last_urblen != fifo->usb_packet_maxlen) {
/* the threshold mask is in the 2nd status byte */
hw->threshold_mask = buf[1];
/* signal S0 layer1 state change */
if (hw->initdone && ((buf[0] >> 4) != hw->dch.state)) {
hw->dch.state = (buf[0] >> 4);
schedule_event(&hw->dch, FLG_PHCHANGE);
}
eof[fifon] = buf[0] & 1;
/* if we have more than the 2 status bytes -> collect data */
if (len > 2)
hfcsusb_rx_frame(fifo, buf + 2,
urb->actual_length - 2,
(len < maxlen) ? eof[fifon] : 0);
} else {
hfcsusb_rx_frame(fifo, buf, urb->actual_length,
(len < maxlen) ? eof[fifon] : 0);
}
fifo->last_urblen = urb->actual_length;
status = usb_submit_urb(urb, GFP_ATOMIC);
if (status) {
if (debug & DEBUG_HW)
printk(KERN_DEBUG "%s: %s: error resubmitting USB\n",
hw->name, __func__);
}
}
/* transmit completion routine for all ISO tx fifos */
static void
tx_iso_complete(struct urb *urb)
{
struct iso_urb *context_iso_urb = (struct iso_urb *) urb->context;
struct usb_fifo *fifo = context_iso_urb->owner_fifo;
struct hfcsusb *hw = fifo->hw;
struct sk_buff *tx_skb;
int k, tx_offset, num_isoc_packets, sink, remain, current_len,
errcode, hdlc, i;
int *tx_idx;
int frame_complete, fifon, status;
__u8 threshbit;
spin_lock(&hw->lock);
if (fifo->stop_gracefull) {
fifo->stop_gracefull = 0;
fifo->active = 0;
spin_unlock(&hw->lock);
return;
}
if (fifo->dch) {
tx_skb = fifo->dch->tx_skb;
tx_idx = &fifo->dch->tx_idx;
hdlc = 1;
} else if (fifo->bch) {
tx_skb = fifo->bch->tx_skb;
tx_idx = &fifo->bch->tx_idx;
hdlc = test_bit(FLG_HDLC, &fifo->bch->Flags);
} else {
printk(KERN_DEBUG "%s: %s: neither BCH nor DCH\n",
hw->name, __func__);
spin_unlock(&hw->lock);
return;
}
fifon = fifo->fifonum;
status = urb->status;
tx_offset = 0;
/*
* ISO transfer only partially completed,
* look at individual frame status for details
*/
if (status == -EXDEV) {
if (debug & DBG_HFC_URB_ERROR)
printk(KERN_DEBUG "%s: %s: "
"-EXDEV (%i) fifon (%d)\n",
hw->name, __func__, status, fifon);
/* clear status, so go on with ISO transfers */
status = 0;
}
if (fifo->active && !status) {
/* is FifoFull-threshold set for our channel? */
threshbit = (hw->threshold_mask & (1 << fifon));
num_isoc_packets = iso_packets[fifon];
/* predict dataflow to avoid fifo overflow */
if (fifon >= HFCUSB_D_TX)
sink = (threshbit) ? SINK_DMIN : SINK_DMAX;
else
sink = (threshbit) ? SINK_MIN : SINK_MAX;
fill_isoc_urb(urb, fifo->hw->dev, fifo->pipe,
context_iso_urb->buffer, num_isoc_packets,
fifo->usb_packet_maxlen, fifo->intervall,
(usb_complete_t)tx_iso_complete, urb->context);
memset(context_iso_urb->buffer, 0,
sizeof(context_iso_urb->buffer));
frame_complete = 0;
for (k = 0; k < num_isoc_packets; ++k) {
/* analyze tx success of previous ISO packets */
if (debug & DBG_HFC_URB_ERROR) {
errcode = urb->iso_frame_desc[k].status;
if (errcode) {
printk(KERN_DEBUG "%s: %s: "
"ISO packet %i, status: %i\n",
hw->name, __func__, k, errcode);
}
}
/* Generate next ISO Packets */
if (tx_skb)
remain = tx_skb->len - *tx_idx;
else
remain = 0;
if (remain > 0) {
fifo->bit_line -= sink;
current_len = (0 - fifo->bit_line) / 8;
if (current_len > 14)
current_len = 14;
if (current_len < 0)
current_len = 0;
if (remain < current_len)
current_len = remain;
/* how much bit do we put on the line? */
fifo->bit_line += current_len * 8;
context_iso_urb->buffer[tx_offset] = 0;
if (current_len == remain) {
if (hdlc) {
/* signal frame completion */
context_iso_urb->
buffer[tx_offset] = 1;
/* add 2 byte flags and 16bit
* CRC at end of ISDN frame */
fifo->bit_line += 32;
}
frame_complete = 1;
}
/* copy tx data to iso-urb buffer */
memcpy(context_iso_urb->buffer + tx_offset + 1,
(tx_skb->data + *tx_idx), current_len);
*tx_idx += current_len;
urb->iso_frame_desc[k].offset = tx_offset;
urb->iso_frame_desc[k].length = current_len + 1;
/* USB data log for every D ISO out */
if ((fifon == HFCUSB_D_RX) &&
(debug & DBG_HFC_USB_VERBOSE)) {
printk(KERN_DEBUG
"%s: %s (%d/%d) offs(%d) len(%d) ",
hw->name, __func__,
k, num_isoc_packets-1,
urb->iso_frame_desc[k].offset,
urb->iso_frame_desc[k].length);
for (i = urb->iso_frame_desc[k].offset;
i < (urb->iso_frame_desc[k].offset
+ urb->iso_frame_desc[k].length);
i++)
printk("%x ",
context_iso_urb->buffer[i]);
printk(" skb->len(%i) tx-idx(%d)\n",
tx_skb->len, *tx_idx);
}
tx_offset += (current_len + 1);
} else {
urb->iso_frame_desc[k].offset = tx_offset++;
urb->iso_frame_desc[k].length = 1;
/* we lower data margin every msec */
fifo->bit_line -= sink;
if (fifo->bit_line < BITLINE_INF)
fifo->bit_line = BITLINE_INF;
}
if (frame_complete) {
frame_complete = 0;
if (debug & DBG_HFC_FIFO_VERBOSE) {
printk(KERN_DEBUG "%s: %s: "
"fifon(%i) new TX len(%i): ",
hw->name, __func__,
fifon, tx_skb->len);
i = 0;
while (i < tx_skb->len)
printk("%02x ",
tx_skb->data[i++]);
printk("\n");
}
dev_kfree_skb(tx_skb);
tx_skb = NULL;
if (fifo->dch && get_next_dframe(fifo->dch))
tx_skb = fifo->dch->tx_skb;
else if (fifo->bch &&
get_next_bframe(fifo->bch)) {
if (test_bit(FLG_TRANSPARENT,
&fifo->bch->Flags))
confirm_Bsend(fifo->bch);
tx_skb = fifo->bch->tx_skb;
}
}
}
errcode = usb_submit_urb(urb, GFP_ATOMIC);
if (errcode < 0) {
if (debug & DEBUG_HW)
printk(KERN_DEBUG
"%s: %s: error submitting ISO URB: %d \n",
hw->name, __func__, errcode);
}
/*
* abuse DChannel tx iso completion to trigger NT mode state
* changes tx_iso_complete is assumed to be called every
* fifo->intervall (ms)
*/
if ((fifon == HFCUSB_D_TX) && (hw->protocol == ISDN_P_NT_S0)
&& (hw->timers & NT_ACTIVATION_TIMER)) {
if ((--hw->nt_timer) < 0)
schedule_event(&hw->dch, FLG_PHCHANGE);
}
} else {
if (status && (debug & DBG_HFC_URB_ERROR))
printk(KERN_DEBUG "%s: %s: urb->status %s (%i)"
"fifonum=%d\n",
hw->name, __func__,
symbolic(urb_errlist, status), status, fifon);
}
spin_unlock(&hw->lock);
}
/*
* allocs urbs and start isoc transfer with two pending urbs to avoid
* gaps in the transfer chain
*/
static int
start_isoc_chain(struct usb_fifo *fifo, int num_packets_per_urb,
usb_complete_t complete, int packet_size)
{
struct hfcsusb *hw = fifo->hw;
int i, k, errcode;
if (debug)
printk(KERN_DEBUG "%s: %s: fifo %i\n",
hw->name, __func__, fifo->fifonum);
/* allocate Memory for Iso out Urbs */
for (i = 0; i < 2; i++) {
if (!(fifo->iso[i].urb)) {
fifo->iso[i].urb =
usb_alloc_urb(num_packets_per_urb, GFP_KERNEL);
if (!(fifo->iso[i].urb)) {
printk(KERN_DEBUG
"%s: %s: alloc urb for fifo %i failed",
hw->name, __func__, fifo->fifonum);
}
fifo->iso[i].owner_fifo = (struct usb_fifo *) fifo;
fifo->iso[i].indx = i;
/* Init the first iso */
if (ISO_BUFFER_SIZE >=
(fifo->usb_packet_maxlen *
num_packets_per_urb)) {
fill_isoc_urb(fifo->iso[i].urb,
fifo->hw->dev, fifo->pipe,
fifo->iso[i].buffer,
num_packets_per_urb,
fifo->usb_packet_maxlen,
fifo->intervall, complete,
&fifo->iso[i]);
memset(fifo->iso[i].buffer, 0,
sizeof(fifo->iso[i].buffer));
for (k = 0; k < num_packets_per_urb; k++) {
fifo->iso[i].urb->
iso_frame_desc[k].offset =
k * packet_size;
fifo->iso[i].urb->
iso_frame_desc[k].length =
packet_size;
}
} else {
printk(KERN_DEBUG
"%s: %s: ISO Buffer size to small!\n",
hw->name, __func__);
}
}
fifo->bit_line = BITLINE_INF;
errcode = usb_submit_urb(fifo->iso[i].urb, GFP_KERNEL);
fifo->active = (errcode >= 0) ? 1 : 0;
fifo->stop_gracefull = 0;
if (errcode < 0) {
printk(KERN_DEBUG "%s: %s: %s URB nr:%d\n",
hw->name, __func__,
symbolic(urb_errlist, errcode), i);
}
}
return fifo->active;
}
static void
stop_iso_gracefull(struct usb_fifo *fifo)
{
struct hfcsusb *hw = fifo->hw;
int i, timeout;
u_long flags;
for (i = 0; i < 2; i++) {
spin_lock_irqsave(&hw->lock, flags);
if (debug)
printk(KERN_DEBUG "%s: %s for fifo %i.%i\n",
hw->name, __func__, fifo->fifonum, i);
fifo->stop_gracefull = 1;
spin_unlock_irqrestore(&hw->lock, flags);
}
for (i = 0; i < 2; i++) {
timeout = 3;
while (fifo->stop_gracefull && timeout--)
schedule_timeout_interruptible((HZ/1000)*16);
if (debug && fifo->stop_gracefull)
printk(KERN_DEBUG "%s: ERROR %s for fifo %i.%i\n",
hw->name, __func__, fifo->fifonum, i);
}
}
static void
stop_int_gracefull(struct usb_fifo *fifo)
{
struct hfcsusb *hw = fifo->hw;
int timeout;
u_long flags;
spin_lock_irqsave(&hw->lock, flags);
if (debug)
printk(KERN_DEBUG "%s: %s for fifo %i\n",
hw->name, __func__, fifo->fifonum);
fifo->stop_gracefull = 1;
spin_unlock_irqrestore(&hw->lock, flags);
timeout = 3;
while (fifo->stop_gracefull && timeout--)
schedule_timeout_interruptible((HZ/1000)*3);
if (debug && fifo->stop_gracefull)
printk(KERN_DEBUG "%s: ERROR %s for fifo %i\n",
hw->name, __func__, fifo->fifonum);
}
/* start the interrupt transfer for the given fifo */
static void
start_int_fifo(struct usb_fifo *fifo)
{
struct hfcsusb *hw = fifo->hw;
int errcode;
if (debug)
printk(KERN_DEBUG "%s: %s: INT IN fifo:%d\n",
hw->name, __func__, fifo->fifonum);
if (!fifo->urb) {
fifo->urb = usb_alloc_urb(0, GFP_KERNEL);
if (!fifo->urb)
return;
}
usb_fill_int_urb(fifo->urb, fifo->hw->dev, fifo->pipe,
fifo->buffer, fifo->usb_packet_maxlen,
(usb_complete_t)rx_int_complete, fifo, fifo->intervall);
fifo->active = 1;
fifo->stop_gracefull = 0;
errcode = usb_submit_urb(fifo->urb, GFP_KERNEL);
if (errcode) {
printk(KERN_DEBUG "%s: %s: submit URB: status:%i\n",
hw->name, __func__, errcode);
fifo->active = 0;
}
}
static void
setPortMode(struct hfcsusb *hw)
{
if (debug & DEBUG_HW)
printk(KERN_DEBUG "%s: %s %s\n", hw->name, __func__,
(hw->protocol == ISDN_P_TE_S0) ? "TE" : "NT");
if (hw->protocol == ISDN_P_TE_S0) {
write_reg(hw, HFCUSB_SCTRL, 0x40);
write_reg(hw, HFCUSB_SCTRL_E, 0x00);
write_reg(hw, HFCUSB_CLKDEL, CLKDEL_TE);
write_reg(hw, HFCUSB_STATES, 3 | 0x10);
write_reg(hw, HFCUSB_STATES, 3);
} else {
write_reg(hw, HFCUSB_SCTRL, 0x44);
write_reg(hw, HFCUSB_SCTRL_E, 0x09);
write_reg(hw, HFCUSB_CLKDEL, CLKDEL_NT);
write_reg(hw, HFCUSB_STATES, 1 | 0x10);
write_reg(hw, HFCUSB_STATES, 1);
}
}
static void
reset_hfcsusb(struct hfcsusb *hw)
{
struct usb_fifo *fifo;
int i;
if (debug & DEBUG_HW)
printk(KERN_DEBUG "%s: %s\n", hw->name, __func__);
/* do Chip reset */
write_reg(hw, HFCUSB_CIRM, 8);
/* aux = output, reset off */
write_reg(hw, HFCUSB_CIRM, 0x10);
/* set USB_SIZE to match the wMaxPacketSize for INT or BULK transfers */
write_reg(hw, HFCUSB_USB_SIZE, (hw->packet_size / 8) |
((hw->packet_size / 8) << 4));
/* set USB_SIZE_I to match the the wMaxPacketSize for ISO transfers */
write_reg(hw, HFCUSB_USB_SIZE_I, hw->iso_packet_size);
/* enable PCM/GCI master mode */
write_reg(hw, HFCUSB_MST_MODE1, 0); /* set default values */
write_reg(hw, HFCUSB_MST_MODE0, 1); /* enable master mode */
/* init the fifos */
write_reg(hw, HFCUSB_F_THRES,
(HFCUSB_TX_THRESHOLD / 8) | ((HFCUSB_RX_THRESHOLD / 8) << 4));
fifo = hw->fifos;
for (i = 0; i < HFCUSB_NUM_FIFOS; i++) {
write_reg(hw, HFCUSB_FIFO, i); /* select the desired fifo */
fifo[i].max_size =
(i <= HFCUSB_B2_RX) ? MAX_BCH_SIZE : MAX_DFRAME_LEN;
fifo[i].last_urblen = 0;
/* set 2 bit for D- & E-channel */
write_reg(hw, HFCUSB_HDLC_PAR, ((i <= HFCUSB_B2_RX) ? 0 : 2));
/* enable all fifos */
if (i == HFCUSB_D_TX)
write_reg(hw, HFCUSB_CON_HDLC,
(hw->protocol == ISDN_P_NT_S0) ? 0x08 : 0x09);
else
write_reg(hw, HFCUSB_CON_HDLC, 0x08);
write_reg(hw, HFCUSB_INC_RES_F, 2); /* reset the fifo */
}
write_reg(hw, HFCUSB_SCTRL_R, 0); /* disable both B receivers */
handle_led(hw, LED_POWER_ON);
}
/* start USB data pipes dependand on device's endpoint configuration */
static void
hfcsusb_start_endpoint(struct hfcsusb *hw, int channel)
{
/* quick check if endpoint already running */
if ((channel == HFC_CHAN_D) && (hw->fifos[HFCUSB_D_RX].active))
return;
if ((channel == HFC_CHAN_B1) && (hw->fifos[HFCUSB_B1_RX].active))
return;
if ((channel == HFC_CHAN_B2) && (hw->fifos[HFCUSB_B2_RX].active))
return;
if ((channel == HFC_CHAN_E) && (hw->fifos[HFCUSB_PCM_RX].active))
return;
/* start rx endpoints using USB INT IN method */
if (hw->cfg_used == CNF_3INT3ISO || hw->cfg_used == CNF_4INT3ISO)
start_int_fifo(hw->fifos + channel*2 + 1);
/* start rx endpoints using USB ISO IN method */
if (hw->cfg_used == CNF_3ISO3ISO || hw->cfg_used == CNF_4ISO3ISO) {
switch (channel) {
case HFC_CHAN_D:
start_isoc_chain(hw->fifos + HFCUSB_D_RX,
ISOC_PACKETS_D,
(usb_complete_t)rx_iso_complete,
16);
break;
case HFC_CHAN_E:
start_isoc_chain(hw->fifos + HFCUSB_PCM_RX,
ISOC_PACKETS_D,
(usb_complete_t)rx_iso_complete,
16);
break;
case HFC_CHAN_B1:
start_isoc_chain(hw->fifos + HFCUSB_B1_RX,
ISOC_PACKETS_B,
(usb_complete_t)rx_iso_complete,
16);
break;
case HFC_CHAN_B2:
start_isoc_chain(hw->fifos + HFCUSB_B2_RX,
ISOC_PACKETS_B,
(usb_complete_t)rx_iso_complete,
16);
break;
}
}
/* start tx endpoints using USB ISO OUT method */
switch (channel) {
case HFC_CHAN_D:
start_isoc_chain(hw->fifos + HFCUSB_D_TX,
ISOC_PACKETS_B,
(usb_complete_t)tx_iso_complete, 1);
break;
case HFC_CHAN_B1:
start_isoc_chain(hw->fifos + HFCUSB_B1_TX,
ISOC_PACKETS_D,
(usb_complete_t)tx_iso_complete, 1);
break;
case HFC_CHAN_B2:
start_isoc_chain(hw->fifos + HFCUSB_B2_TX,
ISOC_PACKETS_B,
(usb_complete_t)tx_iso_complete, 1);
break;
}
}
/* stop USB data pipes dependand on device's endpoint configuration */
static void
hfcsusb_stop_endpoint(struct hfcsusb *hw, int channel)
{
/* quick check if endpoint currently running */
if ((channel == HFC_CHAN_D) && (!hw->fifos[HFCUSB_D_RX].active))
return;
if ((channel == HFC_CHAN_B1) && (!hw->fifos[HFCUSB_B1_RX].active))
return;
if ((channel == HFC_CHAN_B2) && (!hw->fifos[HFCUSB_B2_RX].active))
return;
if ((channel == HFC_CHAN_E) && (!hw->fifos[HFCUSB_PCM_RX].active))
return;
/* rx endpoints using USB INT IN method */
if (hw->cfg_used == CNF_3INT3ISO || hw->cfg_used == CNF_4INT3ISO)
stop_int_gracefull(hw->fifos + channel*2 + 1);
/* rx endpoints using USB ISO IN method */
if (hw->cfg_used == CNF_3ISO3ISO || hw->cfg_used == CNF_4ISO3ISO)
stop_iso_gracefull(hw->fifos + channel*2 + 1);
/* tx endpoints using USB ISO OUT method */
if (channel != HFC_CHAN_E)
stop_iso_gracefull(hw->fifos + channel*2);
}
/* Hardware Initialization */
static int
setup_hfcsusb(struct hfcsusb *hw)
{
u_char b;
if (debug & DBG_HFC_CALL_TRACE)
printk(KERN_DEBUG "%s: %s\n", hw->name, __func__);
/* check the chip id */
if (read_reg_atomic(hw, HFCUSB_CHIP_ID, &b) != 1) {
printk(KERN_DEBUG "%s: %s: cannot read chip id\n",
hw->name, __func__);
return 1;
}
if (b != HFCUSB_CHIPID) {
printk(KERN_DEBUG "%s: %s: Invalid chip id 0x%02x\n",
hw->name, __func__, b);
return 1;
}
/* first set the needed config, interface and alternate */
(void) usb_set_interface(hw->dev, hw->if_used, hw->alt_used);
hw->led_state = 0;
/* init the background machinery for control requests */
hw->ctrl_read.bRequestType = 0xc0;
hw->ctrl_read.bRequest = 1;
hw->ctrl_read.wLength = cpu_to_le16(1);
hw->ctrl_write.bRequestType = 0x40;
hw->ctrl_write.bRequest = 0;
hw->ctrl_write.wLength = 0;
usb_fill_control_urb(hw->ctrl_urb, hw->dev, hw->ctrl_out_pipe,
(u_char *)&hw->ctrl_write, NULL, 0,
(usb_complete_t)ctrl_complete, hw);
reset_hfcsusb(hw);
return 0;
}
static void
release_hw(struct hfcsusb *hw)
{
if (debug & DBG_HFC_CALL_TRACE)
printk(KERN_DEBUG "%s: %s\n", hw->name, __func__);
/*
* stop all endpoints gracefully
* TODO: mISDN_core should generate CLOSE_CHANNEL
* signals after calling mISDN_unregister_device()
*/
hfcsusb_stop_endpoint(hw, HFC_CHAN_D);
hfcsusb_stop_endpoint(hw, HFC_CHAN_B1);
hfcsusb_stop_endpoint(hw, HFC_CHAN_B2);
if (hw->fifos[HFCUSB_PCM_RX].pipe)
hfcsusb_stop_endpoint(hw, HFC_CHAN_E);
if (hw->protocol == ISDN_P_TE_S0)
l1_event(hw->dch.l1, CLOSE_CHANNEL);
mISDN_unregister_device(&hw->dch.dev);
mISDN_freebchannel(&hw->bch[1]);
mISDN_freebchannel(&hw->bch[0]);
mISDN_freedchannel(&hw->dch);
if (hw->ctrl_urb) {
usb_kill_urb(hw->ctrl_urb);
usb_free_urb(hw->ctrl_urb);
hw->ctrl_urb = NULL;
}
if (hw->intf)
usb_set_intfdata(hw->intf, NULL);
list_del(&hw->list);
kfree(hw);
hw = NULL;
}
static void
deactivate_bchannel(struct bchannel *bch)
{
struct hfcsusb *hw = bch->hw;
u_long flags;
if (bch->debug & DEBUG_HW)
printk(KERN_DEBUG "%s: %s: bch->nr(%i)\n",
hw->name, __func__, bch->nr);
spin_lock_irqsave(&hw->lock, flags);
mISDN_clear_bchannel(bch);
spin_unlock_irqrestore(&hw->lock, flags);
hfcsusb_setup_bch(bch, ISDN_P_NONE);
hfcsusb_stop_endpoint(hw, bch->nr);
}
/*
* Layer 1 B-channel hardware access
*/
static int
hfc_bctrl(struct mISDNchannel *ch, u_int cmd, void *arg)
{
struct bchannel *bch = container_of(ch, struct bchannel, ch);
int ret = -EINVAL;
if (bch->debug & DEBUG_HW)
printk(KERN_DEBUG "%s: cmd:%x %p\n", __func__, cmd, arg);
switch (cmd) {
case HW_TESTRX_RAW:
case HW_TESTRX_HDLC:
case HW_TESTRX_OFF:
ret = -EINVAL;
break;
case CLOSE_CHANNEL:
test_and_clear_bit(FLG_OPEN, &bch->Flags);
if (test_bit(FLG_ACTIVE, &bch->Flags))
deactivate_bchannel(bch);
ch->protocol = ISDN_P_NONE;
ch->peer = NULL;
module_put(THIS_MODULE);
ret = 0;
break;
case CONTROL_CHANNEL:
ret = channel_bctrl(bch, arg);
break;
default:
printk(KERN_WARNING "%s: unknown prim(%x)\n",
__func__, cmd);
}
return ret;
}
static int
setup_instance(struct hfcsusb *hw, struct device *parent)
{
u_long flags;
int err, i;
if (debug & DBG_HFC_CALL_TRACE)
printk(KERN_DEBUG "%s: %s\n", hw->name, __func__);
spin_lock_init(&hw->ctrl_lock);
spin_lock_init(&hw->lock);
mISDN_initdchannel(&hw->dch, MAX_DFRAME_LEN_L1, ph_state);
hw->dch.debug = debug & 0xFFFF;
hw->dch.hw = hw;
hw->dch.dev.Dprotocols = (1 << ISDN_P_TE_S0) | (1 << ISDN_P_NT_S0);
hw->dch.dev.D.send = hfcusb_l2l1D;
hw->dch.dev.D.ctrl = hfc_dctrl;
/* enable E-Channel logging */
if (hw->fifos[HFCUSB_PCM_RX].pipe)
mISDN_initdchannel(&hw->ech, MAX_DFRAME_LEN_L1, NULL);
hw->dch.dev.Bprotocols = (1 << (ISDN_P_B_RAW & ISDN_P_B_MASK)) |
(1 << (ISDN_P_B_HDLC & ISDN_P_B_MASK));
hw->dch.dev.nrbchan = 2;
for (i = 0; i < 2; i++) {
hw->bch[i].nr = i + 1;
set_channelmap(i + 1, hw->dch.dev.channelmap);
hw->bch[i].debug = debug;
mISDN_initbchannel(&hw->bch[i], MAX_DATA_MEM);
hw->bch[i].hw = hw;
hw->bch[i].ch.send = hfcusb_l2l1B;
hw->bch[i].ch.ctrl = hfc_bctrl;
hw->bch[i].ch.nr = i + 1;
list_add(&hw->bch[i].ch.list, &hw->dch.dev.bchannels);
}
hw->fifos[HFCUSB_B1_TX].bch = &hw->bch[0];
hw->fifos[HFCUSB_B1_RX].bch = &hw->bch[0];
hw->fifos[HFCUSB_B2_TX].bch = &hw->bch[1];
hw->fifos[HFCUSB_B2_RX].bch = &hw->bch[1];
hw->fifos[HFCUSB_D_TX].dch = &hw->dch;
hw->fifos[HFCUSB_D_RX].dch = &hw->dch;
hw->fifos[HFCUSB_PCM_RX].ech = &hw->ech;
hw->fifos[HFCUSB_PCM_TX].ech = &hw->ech;
err = setup_hfcsusb(hw);
if (err)
goto out;
snprintf(hw->name, MISDN_MAX_IDLEN - 1, "%s.%d", DRIVER_NAME,
hfcsusb_cnt + 1);
printk(KERN_INFO "%s: registered as '%s'\n",
DRIVER_NAME, hw->name);
err = mISDN_register_device(&hw->dch.dev, parent, hw->name);
if (err)
goto out;
hfcsusb_cnt++;
write_lock_irqsave(&HFClock, flags);
list_add_tail(&hw->list, &HFClist);
write_unlock_irqrestore(&HFClock, flags);
return 0;
out:
mISDN_freebchannel(&hw->bch[1]);
mISDN_freebchannel(&hw->bch[0]);
mISDN_freedchannel(&hw->dch);
kfree(hw);
return err;
}
static int
hfcsusb_probe(struct usb_interface *intf, const struct usb_device_id *id)
{
struct hfcsusb *hw;
struct usb_device *dev = interface_to_usbdev(intf);
struct usb_host_interface *iface = intf->cur_altsetting;
struct usb_host_interface *iface_used = NULL;
struct usb_host_endpoint *ep;
struct hfcsusb_vdata *driver_info;
int ifnum = iface->desc.bInterfaceNumber, i, idx, alt_idx,
probe_alt_setting, vend_idx, cfg_used, *vcf, attr, cfg_found,
ep_addr, cmptbl[16], small_match, iso_packet_size, packet_size,
alt_used = 0;
vend_idx = 0xffff;
for (i = 0; hfcsusb_idtab[i].idVendor; i++) {
if ((le16_to_cpu(dev->descriptor.idVendor)
== hfcsusb_idtab[i].idVendor) &&
(le16_to_cpu(dev->descriptor.idProduct)
== hfcsusb_idtab[i].idProduct)) {
vend_idx = i;
continue;
}
}
printk(KERN_DEBUG
"%s: interface(%d) actalt(%d) minor(%d) vend_idx(%d)\n",
__func__, ifnum, iface->desc.bAlternateSetting,
intf->minor, vend_idx);
if (vend_idx == 0xffff) {
printk(KERN_WARNING
"%s: no valid vendor found in USB descriptor\n",
__func__);
return -EIO;
}
/* if vendor and product ID is OK, start probing alternate settings */
alt_idx = 0;
small_match = -1;
/* default settings */
iso_packet_size = 16;
packet_size = 64;
while (alt_idx < intf->num_altsetting) {
iface = intf->altsetting + alt_idx;
probe_alt_setting = iface->desc.bAlternateSetting;
cfg_used = 0;
while (validconf[cfg_used][0]) {
cfg_found = 1;
vcf = validconf[cfg_used];
ep = iface->endpoint;
memcpy(cmptbl, vcf, 16 * sizeof(int));
/* check for all endpoints in this alternate setting */
for (i = 0; i < iface->desc.bNumEndpoints; i++) {
ep_addr = ep->desc.bEndpointAddress;
/* get endpoint base */
idx = ((ep_addr & 0x7f) - 1) * 2;
if (ep_addr & 0x80)
idx++;
attr = ep->desc.bmAttributes;
if (cmptbl[idx] != EP_NOP) {
if (cmptbl[idx] == EP_NUL)
cfg_found = 0;
if (attr == USB_ENDPOINT_XFER_INT
&& cmptbl[idx] == EP_INT)
cmptbl[idx] = EP_NUL;
if (attr == USB_ENDPOINT_XFER_BULK
&& cmptbl[idx] == EP_BLK)
cmptbl[idx] = EP_NUL;
if (attr == USB_ENDPOINT_XFER_ISOC
&& cmptbl[idx] == EP_ISO)
cmptbl[idx] = EP_NUL;
if (attr == USB_ENDPOINT_XFER_INT &&
ep->desc.bInterval < vcf[17]) {
cfg_found = 0;
}
}
ep++;
}
for (i = 0; i < 16; i++)
if (cmptbl[i] != EP_NOP && cmptbl[i] != EP_NUL)
cfg_found = 0;
if (cfg_found) {
if (small_match < cfg_used) {
small_match = cfg_used;
alt_used = probe_alt_setting;
iface_used = iface;
}
}
cfg_used++;
}
alt_idx++;
} /* (alt_idx < intf->num_altsetting) */
/* not found a valid USB Ta Endpoint config */
if (small_match == -1)
return -EIO;
iface = iface_used;
hw = kzalloc(sizeof(struct hfcsusb), GFP_KERNEL);
if (!hw)
return -ENOMEM; /* got no mem */
snprintf(hw->name, MISDN_MAX_IDLEN - 1, "%s", DRIVER_NAME);
ep = iface->endpoint;
vcf = validconf[small_match];
for (i = 0; i < iface->desc.bNumEndpoints; i++) {
struct usb_fifo *f;
ep_addr = ep->desc.bEndpointAddress;
/* get endpoint base */
idx = ((ep_addr & 0x7f) - 1) * 2;
if (ep_addr & 0x80)
idx++;
f = &hw->fifos[idx & 7];
/* init Endpoints */
if (vcf[idx] == EP_NOP || vcf[idx] == EP_NUL) {
ep++;
continue;
}
switch (ep->desc.bmAttributes) {
case USB_ENDPOINT_XFER_INT:
f->pipe = usb_rcvintpipe(dev,
ep->desc.bEndpointAddress);
f->usb_transfer_mode = USB_INT;
packet_size = le16_to_cpu(ep->desc.wMaxPacketSize);
break;
case USB_ENDPOINT_XFER_BULK:
if (ep_addr & 0x80)
f->pipe = usb_rcvbulkpipe(dev,
ep->desc.bEndpointAddress);
else
f->pipe = usb_sndbulkpipe(dev,
ep->desc.bEndpointAddress);
f->usb_transfer_mode = USB_BULK;
packet_size = le16_to_cpu(ep->desc.wMaxPacketSize);
break;
case USB_ENDPOINT_XFER_ISOC:
if (ep_addr & 0x80)
f->pipe = usb_rcvisocpipe(dev,
ep->desc.bEndpointAddress);
else
f->pipe = usb_sndisocpipe(dev,
ep->desc.bEndpointAddress);
f->usb_transfer_mode = USB_ISOC;
iso_packet_size = le16_to_cpu(ep->desc.wMaxPacketSize);
break;
default:
f->pipe = 0;
}
if (f->pipe) {
f->fifonum = idx & 7;
f->hw = hw;
f->usb_packet_maxlen =
le16_to_cpu(ep->desc.wMaxPacketSize);
f->intervall = ep->desc.bInterval;
}
ep++;
}
hw->dev = dev; /* save device */
hw->if_used = ifnum; /* save used interface */
hw->alt_used = alt_used; /* and alternate config */
hw->ctrl_paksize = dev->descriptor.bMaxPacketSize0; /* control size */
hw->cfg_used = vcf[16]; /* store used config */
hw->vend_idx = vend_idx; /* store found vendor */
hw->packet_size = packet_size;
hw->iso_packet_size = iso_packet_size;
/* create the control pipes needed for register access */
hw->ctrl_in_pipe = usb_rcvctrlpipe(hw->dev, 0);
hw->ctrl_out_pipe = usb_sndctrlpipe(hw->dev, 0);
hw->ctrl_urb = usb_alloc_urb(0, GFP_KERNEL);
driver_info =
(struct hfcsusb_vdata *)hfcsusb_idtab[vend_idx].driver_info;
printk(KERN_DEBUG "%s: %s: detected \"%s\" (%s, if=%d alt=%d)\n",
hw->name, __func__, driver_info->vend_name,
conf_str[small_match], ifnum, alt_used);
if (setup_instance(hw, dev->dev.parent))
return -EIO;
hw->intf = intf;
usb_set_intfdata(hw->intf, hw);
return 0;
}
/* function called when an active device is removed */
static void
hfcsusb_disconnect(struct usb_interface *intf)
{
struct hfcsusb *hw = usb_get_intfdata(intf);
struct hfcsusb *next;
int cnt = 0;
printk(KERN_INFO "%s: device disconnected\n", hw->name);
handle_led(hw, LED_POWER_OFF);
release_hw(hw);
list_for_each_entry_safe(hw, next, &HFClist, list)
cnt++;
if (!cnt)
hfcsusb_cnt = 0;
usb_set_intfdata(intf, NULL);
}
static struct usb_driver hfcsusb_drv = {
.name = DRIVER_NAME,
.id_table = hfcsusb_idtab,
.probe = hfcsusb_probe,
.disconnect = hfcsusb_disconnect,
};
static int __init
hfcsusb_init(void)
{
printk(KERN_INFO DRIVER_NAME " driver Rev. %s debug(0x%x) poll(%i)\n",
hfcsusb_rev, debug, poll);
if (usb_register(&hfcsusb_drv)) {
printk(KERN_INFO DRIVER_NAME
": Unable to register hfcsusb module at usb stack\n");
return -ENODEV;
}
return 0;
}
static void __exit
hfcsusb_cleanup(void)
{
if (debug & DBG_HFC_CALL_TRACE)
printk(KERN_INFO DRIVER_NAME ": %s\n", __func__);
/* unregister Hardware */
usb_deregister(&hfcsusb_drv); /* release our driver */
}
module_init(hfcsusb_init);
module_exit(hfcsusb_cleanup);
| gpl-2.0 |
KFire-Android/kernel_omap_otter-common | arch/arm/mach-omap1/leds.c | 2936 | 1508 | /*
* linux/arch/arm/mach-omap1/leds.c
*
* OMAP LEDs dispatcher
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <asm/leds.h>
#include <asm/mach-types.h>
#include <mach/gpio.h>
#include <plat/mux.h>
#include "leds.h"
static int __init
omap_leds_init(void)
{
if (!cpu_class_is_omap1())
return -ENODEV;
if (machine_is_omap_innovator())
leds_event = innovator_leds_event;
else if (machine_is_omap_h2()
|| machine_is_omap_h3()
|| machine_is_omap_perseus2())
leds_event = h2p2_dbg_leds_event;
else if (machine_is_omap_osk())
leds_event = osk_leds_event;
else
return -1;
if (machine_is_omap_h2()
|| machine_is_omap_h3()
#ifdef CONFIG_OMAP_OSK_MISTRAL
|| machine_is_omap_osk()
#endif
) {
/* LED1/LED2 pins can be used as GPIO (as done here), or by
* the LPG (works even in deep sleep!), to drive a bicolor
* LED on the H2 sample board, and another on the H2/P2
* "surfer" expansion board.
*
* The same pins drive a LED on the OSK Mistral board, but
* that's a different kind of LED (just one color at a time).
*/
omap_cfg_reg(P18_1610_GPIO3);
if (gpio_request(3, "LED red") == 0)
gpio_direction_output(3, 1);
else
printk(KERN_WARNING "LED: can't get GPIO3/red?\n");
omap_cfg_reg(MPUIO4);
if (gpio_request(OMAP_MPUIO(4), "LED green") == 0)
gpio_direction_output(OMAP_MPUIO(4), 1);
else
printk(KERN_WARNING "LED: can't get MPUIO4/green?\n");
}
leds_event(led_start);
return 0;
}
__initcall(omap_leds_init);
| gpl-2.0 |
haodongdong9999/vyos_kernel | arch/m68k/mm/memory.c | 4472 | 7769 | /*
* linux/arch/m68k/mm/memory.c
*
* Copyright (C) 1995 Hamish Macdonald
*/
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/init.h>
#include <linux/pagemap.h>
#include <linux/gfp.h>
#include <asm/setup.h>
#include <asm/segment.h>
#include <asm/page.h>
#include <asm/pgalloc.h>
#include <asm/traps.h>
#include <asm/machdep.h>
/* ++andreas: {get,free}_pointer_table rewritten to use unused fields from
struct page instead of separately kmalloced struct. Stolen from
arch/sparc/mm/srmmu.c ... */
typedef struct list_head ptable_desc;
static LIST_HEAD(ptable_list);
#define PD_PTABLE(page) ((ptable_desc *)&(virt_to_page(page)->lru))
#define PD_PAGE(ptable) (list_entry(ptable, struct page, lru))
#define PD_MARKBITS(dp) (*(unsigned char *)&PD_PAGE(dp)->index)
#define PTABLE_SIZE (PTRS_PER_PMD * sizeof(pmd_t))
void __init init_pointer_table(unsigned long ptable)
{
ptable_desc *dp;
unsigned long page = ptable & PAGE_MASK;
unsigned char mask = 1 << ((ptable - page)/PTABLE_SIZE);
dp = PD_PTABLE(page);
if (!(PD_MARKBITS(dp) & mask)) {
PD_MARKBITS(dp) = 0xff;
list_add(dp, &ptable_list);
}
PD_MARKBITS(dp) &= ~mask;
#ifdef DEBUG
printk("init_pointer_table: %lx, %x\n", ptable, PD_MARKBITS(dp));
#endif
/* unreserve the page so it's possible to free that page */
PD_PAGE(dp)->flags &= ~(1 << PG_reserved);
init_page_count(PD_PAGE(dp));
return;
}
pmd_t *get_pointer_table (void)
{
ptable_desc *dp = ptable_list.next;
unsigned char mask = PD_MARKBITS (dp);
unsigned char tmp;
unsigned int off;
/*
* For a pointer table for a user process address space, a
* table is taken from a page allocated for the purpose. Each
* page can hold 8 pointer tables. The page is remapped in
* virtual address space to be noncacheable.
*/
if (mask == 0) {
void *page;
ptable_desc *new;
if (!(page = (void *)get_zeroed_page(GFP_KERNEL)))
return NULL;
flush_tlb_kernel_page(page);
nocache_page(page);
new = PD_PTABLE(page);
PD_MARKBITS(new) = 0xfe;
list_add_tail(new, dp);
return (pmd_t *)page;
}
for (tmp = 1, off = 0; (mask & tmp) == 0; tmp <<= 1, off += PTABLE_SIZE)
;
PD_MARKBITS(dp) = mask & ~tmp;
if (!PD_MARKBITS(dp)) {
/* move to end of list */
list_move_tail(dp, &ptable_list);
}
return (pmd_t *) (page_address(PD_PAGE(dp)) + off);
}
int free_pointer_table (pmd_t *ptable)
{
ptable_desc *dp;
unsigned long page = (unsigned long)ptable & PAGE_MASK;
unsigned char mask = 1 << (((unsigned long)ptable - page)/PTABLE_SIZE);
dp = PD_PTABLE(page);
if (PD_MARKBITS (dp) & mask)
panic ("table already free!");
PD_MARKBITS (dp) |= mask;
if (PD_MARKBITS(dp) == 0xff) {
/* all tables in page are free, free page */
list_del(dp);
cache_page((void *)page);
free_page (page);
return 1;
} else if (ptable_list.next != dp) {
/*
* move this descriptor to the front of the list, since
* it has one or more free tables.
*/
list_move(dp, &ptable_list);
}
return 0;
}
/* invalidate page in both caches */
static inline void clear040(unsigned long paddr)
{
asm volatile (
"nop\n\t"
".chip 68040\n\t"
"cinvp %%bc,(%0)\n\t"
".chip 68k"
: : "a" (paddr));
}
/* invalidate page in i-cache */
static inline void cleari040(unsigned long paddr)
{
asm volatile (
"nop\n\t"
".chip 68040\n\t"
"cinvp %%ic,(%0)\n\t"
".chip 68k"
: : "a" (paddr));
}
/* push page in both caches */
/* RZ: cpush %bc DOES invalidate %ic, regardless of DPI */
static inline void push040(unsigned long paddr)
{
asm volatile (
"nop\n\t"
".chip 68040\n\t"
"cpushp %%bc,(%0)\n\t"
".chip 68k"
: : "a" (paddr));
}
/* push and invalidate page in both caches, must disable ints
* to avoid invalidating valid data */
static inline void pushcl040(unsigned long paddr)
{
unsigned long flags;
local_irq_save(flags);
push040(paddr);
if (CPU_IS_060)
clear040(paddr);
local_irq_restore(flags);
}
/*
* 040: Hit every page containing an address in the range paddr..paddr+len-1.
* (Low order bits of the ea of a CINVP/CPUSHP are "don't care"s).
* Hit every page until there is a page or less to go. Hit the next page,
* and the one after that if the range hits it.
*/
/* ++roman: A little bit more care is required here: The CINVP instruction
* invalidates cache entries WITHOUT WRITING DIRTY DATA BACK! So the beginning
* and the end of the region must be treated differently if they are not
* exactly at the beginning or end of a page boundary. Else, maybe too much
* data becomes invalidated and thus lost forever. CPUSHP does what we need:
* it invalidates the page after pushing dirty data to memory. (Thanks to Jes
* for discovering the problem!)
*/
/* ... but on the '060, CPUSH doesn't invalidate (for us, since we have set
* the DPI bit in the CACR; would it cause problems with temporarily changing
* this?). So we have to push first and then additionally to invalidate.
*/
/*
* cache_clear() semantics: Clear any cache entries for the area in question,
* without writing back dirty entries first. This is useful if the data will
* be overwritten anyway, e.g. by DMA to memory. The range is defined by a
* _physical_ address.
*/
void cache_clear (unsigned long paddr, int len)
{
if (CPU_IS_COLDFIRE) {
clear_cf_bcache(0, DCACHE_MAX_ADDR);
} else if (CPU_IS_040_OR_060) {
int tmp;
/*
* We need special treatment for the first page, in case it
* is not page-aligned. Page align the addresses to work
* around bug I17 in the 68060.
*/
if ((tmp = -paddr & (PAGE_SIZE - 1))) {
pushcl040(paddr & PAGE_MASK);
if ((len -= tmp) <= 0)
return;
paddr += tmp;
}
tmp = PAGE_SIZE;
paddr &= PAGE_MASK;
while ((len -= tmp) >= 0) {
clear040(paddr);
paddr += tmp;
}
if ((len += tmp))
/* a page boundary gets crossed at the end */
pushcl040(paddr);
}
else /* 68030 or 68020 */
asm volatile ("movec %/cacr,%/d0\n\t"
"oriw %0,%/d0\n\t"
"movec %/d0,%/cacr"
: : "i" (FLUSH_I_AND_D)
: "d0");
#ifdef CONFIG_M68K_L2_CACHE
if(mach_l2_flush)
mach_l2_flush(0);
#endif
}
EXPORT_SYMBOL(cache_clear);
/*
* cache_push() semantics: Write back any dirty cache data in the given area,
* and invalidate the range in the instruction cache. It needs not (but may)
* invalidate those entries also in the data cache. The range is defined by a
* _physical_ address.
*/
void cache_push (unsigned long paddr, int len)
{
if (CPU_IS_COLDFIRE) {
flush_cf_bcache(0, DCACHE_MAX_ADDR);
} else if (CPU_IS_040_OR_060) {
int tmp = PAGE_SIZE;
/*
* on 68040 or 68060, push cache lines for pages in the range;
* on the '040 this also invalidates the pushed lines, but not on
* the '060!
*/
len += paddr & (PAGE_SIZE - 1);
/*
* Work around bug I17 in the 68060 affecting some instruction
* lines not being invalidated properly.
*/
paddr &= PAGE_MASK;
do {
push040(paddr);
paddr += tmp;
} while ((len -= tmp) > 0);
}
/*
* 68030/68020 have no writeback cache. On the other hand,
* cache_push is actually a superset of cache_clear (the lines
* get written back and invalidated), so we should make sure
* to perform the corresponding actions. After all, this is getting
* called in places where we've just loaded code, or whatever, so
* flushing the icache is appropriate; flushing the dcache shouldn't
* be required.
*/
else /* 68030 or 68020 */
asm volatile ("movec %/cacr,%/d0\n\t"
"oriw %0,%/d0\n\t"
"movec %/d0,%/cacr"
: : "i" (FLUSH_I)
: "d0");
#ifdef CONFIG_M68K_L2_CACHE
if(mach_l2_flush)
mach_l2_flush(1);
#endif
}
EXPORT_SYMBOL(cache_push);
| gpl-2.0 |
antaril/AGK | arch/arm/mach-omap2/board-zoom-display.c | 4728 | 3660 | /*
* Copyright (C) 2010 Texas Instruments Inc.
*
* Modified from mach-omap2/board-zoom-peripherals.c
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/gpio.h>
#include <linux/i2c/twl.h>
#include <linux/spi/spi.h>
#include <plat/mcspi.h>
#include <video/omapdss.h>
#define LCD_PANEL_RESET_GPIO_PROD 96
#define LCD_PANEL_RESET_GPIO_PILOT 55
#define LCD_PANEL_QVGA_GPIO 56
static struct gpio zoom_lcd_gpios[] __initdata = {
{ -EINVAL, GPIOF_OUT_INIT_HIGH, "lcd reset" },
{ LCD_PANEL_QVGA_GPIO, GPIOF_OUT_INIT_HIGH, "lcd qvga" },
};
static void __init zoom_lcd_panel_init(void)
{
zoom_lcd_gpios[0].gpio = (omap_rev() > OMAP3430_REV_ES3_0) ?
LCD_PANEL_RESET_GPIO_PROD :
LCD_PANEL_RESET_GPIO_PILOT;
if (gpio_request_array(zoom_lcd_gpios, ARRAY_SIZE(zoom_lcd_gpios)))
pr_err("%s: Failed to get LCD GPIOs.\n", __func__);
}
static int zoom_panel_enable_lcd(struct omap_dss_device *dssdev)
{
return 0;
}
static void zoom_panel_disable_lcd(struct omap_dss_device *dssdev)
{
}
/*
* PWMA/B register offsets (TWL4030_MODULE_PWMA)
*/
#define TWL_INTBR_PMBR1 0xD
#define TWL_INTBR_GPBR1 0xC
#define TWL_LED_PWMON 0x0
#define TWL_LED_PWMOFF 0x1
static int zoom_set_bl_intensity(struct omap_dss_device *dssdev, int level)
{
#ifdef CONFIG_TWL4030_CORE
unsigned char c;
u8 mux_pwm, enb_pwm;
if (level > 100)
return -1;
twl_i2c_read_u8(TWL4030_MODULE_INTBR, &mux_pwm, TWL_INTBR_PMBR1);
twl_i2c_read_u8(TWL4030_MODULE_INTBR, &enb_pwm, TWL_INTBR_GPBR1);
if (level == 0) {
/* disable pwm1 output and clock */
enb_pwm = enb_pwm & 0xF5;
/* change pwm1 pin to gpio pin */
mux_pwm = mux_pwm & 0xCF;
twl_i2c_write_u8(TWL4030_MODULE_INTBR,
enb_pwm, TWL_INTBR_GPBR1);
twl_i2c_write_u8(TWL4030_MODULE_INTBR,
mux_pwm, TWL_INTBR_PMBR1);
return 0;
}
if (!((enb_pwm & 0xA) && (mux_pwm & 0x30))) {
/* change gpio pin to pwm1 pin */
mux_pwm = mux_pwm | 0x30;
/* enable pwm1 output and clock*/
enb_pwm = enb_pwm | 0x0A;
twl_i2c_write_u8(TWL4030_MODULE_INTBR,
mux_pwm, TWL_INTBR_PMBR1);
twl_i2c_write_u8(TWL4030_MODULE_INTBR,
enb_pwm, TWL_INTBR_GPBR1);
}
c = ((50 * (100 - level)) / 100) + 1;
twl_i2c_write_u8(TWL4030_MODULE_PWM1, 0x7F, TWL_LED_PWMOFF);
twl_i2c_write_u8(TWL4030_MODULE_PWM1, c, TWL_LED_PWMON);
#else
pr_warn("Backlight not enabled\n");
#endif
return 0;
}
static struct omap_dss_device zoom_lcd_device = {
.name = "lcd",
.driver_name = "NEC_8048_panel",
.type = OMAP_DISPLAY_TYPE_DPI,
.phy.dpi.data_lines = 24,
.platform_enable = zoom_panel_enable_lcd,
.platform_disable = zoom_panel_disable_lcd,
.max_backlight_level = 100,
.set_backlight = zoom_set_bl_intensity,
};
static struct omap_dss_device *zoom_dss_devices[] = {
&zoom_lcd_device,
};
static struct omap_dss_board_info zoom_dss_data = {
.num_devices = ARRAY_SIZE(zoom_dss_devices),
.devices = zoom_dss_devices,
.default_device = &zoom_lcd_device,
};
static struct omap2_mcspi_device_config dss_lcd_mcspi_config = {
.turbo_mode = 1,
};
static struct spi_board_info nec_8048_spi_board_info[] __initdata = {
[0] = {
.modalias = "nec_8048_spi",
.bus_num = 1,
.chip_select = 2,
.max_speed_hz = 375000,
.controller_data = &dss_lcd_mcspi_config,
},
};
void __init zoom_display_init(void)
{
omap_display_init(&zoom_dss_data);
spi_register_board_info(nec_8048_spi_board_info,
ARRAY_SIZE(nec_8048_spi_board_info));
zoom_lcd_panel_init();
}
| gpl-2.0 |
christianeisendle/linux | arch/sh/boards/mach-sdk7786/setup.c | 4728 | 6320 | /*
* Renesas Technology Europe SDK7786 Support.
*
* Copyright (C) 2010 Matt Fleming
* Copyright (C) 2010 Paul Mundt
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/io.h>
#include <linux/regulator/fixed.h>
#include <linux/regulator/machine.h>
#include <linux/smsc911x.h>
#include <linux/i2c.h>
#include <linux/irq.h>
#include <linux/clk.h>
#include <linux/clkdev.h>
#include <mach/fpga.h>
#include <mach/irq.h>
#include <asm/machvec.h>
#include <asm/heartbeat.h>
#include <asm/sizes.h>
#include <asm/clock.h>
#include <asm/reboot.h>
#include <asm/smp-ops.h>
static struct resource heartbeat_resource = {
.start = 0x07fff8b0,
.end = 0x07fff8b0 + sizeof(u16) - 1,
.flags = IORESOURCE_MEM | IORESOURCE_MEM_16BIT,
};
static struct platform_device heartbeat_device = {
.name = "heartbeat",
.id = -1,
.num_resources = 1,
.resource = &heartbeat_resource,
};
/* Dummy supplies, where voltage doesn't matter */
static struct regulator_consumer_supply dummy_supplies[] = {
REGULATOR_SUPPLY("vddvario", "smsc911x"),
REGULATOR_SUPPLY("vdd33a", "smsc911x"),
};
static struct resource smsc911x_resources[] = {
[0] = {
.name = "smsc911x-memory",
.start = 0x07ffff00,
.end = 0x07ffff00 + SZ_256 - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
.name = "smsc911x-irq",
.start = evt2irq(0x2c0),
.end = evt2irq(0x2c0),
.flags = IORESOURCE_IRQ,
},
};
static struct smsc911x_platform_config smsc911x_config = {
.irq_polarity = SMSC911X_IRQ_POLARITY_ACTIVE_LOW,
.irq_type = SMSC911X_IRQ_TYPE_OPEN_DRAIN,
.flags = SMSC911X_USE_32BIT,
.phy_interface = PHY_INTERFACE_MODE_MII,
};
static struct platform_device smsc911x_device = {
.name = "smsc911x",
.id = -1,
.num_resources = ARRAY_SIZE(smsc911x_resources),
.resource = smsc911x_resources,
.dev = {
.platform_data = &smsc911x_config,
},
};
static struct resource smbus_fpga_resource = {
.start = 0x07fff9e0,
.end = 0x07fff9e0 + SZ_32 - 1,
.flags = IORESOURCE_MEM,
};
static struct platform_device smbus_fpga_device = {
.name = "i2c-sdk7786",
.id = 0,
.num_resources = 1,
.resource = &smbus_fpga_resource,
};
static struct resource smbus_pcie_resource = {
.start = 0x07fffc30,
.end = 0x07fffc30 + SZ_32 - 1,
.flags = IORESOURCE_MEM,
};
static struct platform_device smbus_pcie_device = {
.name = "i2c-sdk7786",
.id = 1,
.num_resources = 1,
.resource = &smbus_pcie_resource,
};
static struct i2c_board_info __initdata sdk7786_i2c_devices[] = {
{
I2C_BOARD_INFO("max6900", 0x68),
},
};
static struct platform_device *sh7786_devices[] __initdata = {
&heartbeat_device,
&smsc911x_device,
&smbus_fpga_device,
&smbus_pcie_device,
};
static int sdk7786_i2c_setup(void)
{
unsigned int tmp;
/*
* Hand over I2C control to the FPGA.
*/
tmp = fpga_read_reg(SBCR);
tmp &= ~SCBR_I2CCEN;
tmp |= SCBR_I2CMEN;
fpga_write_reg(tmp, SBCR);
return i2c_register_board_info(0, sdk7786_i2c_devices,
ARRAY_SIZE(sdk7786_i2c_devices));
}
static int __init sdk7786_devices_setup(void)
{
int ret;
ret = platform_add_devices(sh7786_devices, ARRAY_SIZE(sh7786_devices));
if (unlikely(ret != 0))
return ret;
return sdk7786_i2c_setup();
}
device_initcall(sdk7786_devices_setup);
static int sdk7786_mode_pins(void)
{
return fpga_read_reg(MODSWR);
}
/*
* FPGA-driven PCIe clocks
*
* Historically these include the oscillator, clock B (slots 2/3/4) and
* clock A (slot 1 and the CPU clock). Newer revs of the PCB shove
* everything under a single PCIe clocks enable bit that happens to map
* to the same bit position as the oscillator bit for earlier FPGA
* versions.
*
* Given that the legacy clocks have the side-effect of shutting the CPU
* off through the FPGA along with the PCI slots, we simply leave them in
* their initial state and don't bother registering them with the clock
* framework.
*/
static int sdk7786_pcie_clk_enable(struct clk *clk)
{
fpga_write_reg(fpga_read_reg(PCIECR) | PCIECR_CLKEN, PCIECR);
return 0;
}
static void sdk7786_pcie_clk_disable(struct clk *clk)
{
fpga_write_reg(fpga_read_reg(PCIECR) & ~PCIECR_CLKEN, PCIECR);
}
static struct sh_clk_ops sdk7786_pcie_clk_ops = {
.enable = sdk7786_pcie_clk_enable,
.disable = sdk7786_pcie_clk_disable,
};
static struct clk sdk7786_pcie_clk = {
.ops = &sdk7786_pcie_clk_ops,
};
static struct clk_lookup sdk7786_pcie_cl = {
.con_id = "pcie_plat_clk",
.clk = &sdk7786_pcie_clk,
};
static int sdk7786_clk_init(void)
{
struct clk *clk;
int ret;
/*
* Only handle the EXTAL case, anyone interfacing a crystal
* resonator will need to provide their own input clock.
*/
if (test_mode_pin(MODE_PIN9))
return -EINVAL;
clk = clk_get(NULL, "extal");
if (IS_ERR(clk))
return PTR_ERR(clk);
ret = clk_set_rate(clk, 33333333);
clk_put(clk);
/*
* Setup the FPGA clocks.
*/
ret = clk_register(&sdk7786_pcie_clk);
if (unlikely(ret)) {
pr_err("FPGA clock registration failed\n");
return ret;
}
clkdev_add(&sdk7786_pcie_cl);
return 0;
}
static void sdk7786_restart(char *cmd)
{
fpga_write_reg(0xa5a5, SRSTR);
}
static void sdk7786_power_off(void)
{
fpga_write_reg(fpga_read_reg(PWRCR) | PWRCR_PDWNREQ, PWRCR);
/*
* It can take up to 20us for the R8C to do its job, back off and
* wait a bit until we've been shut off. Even though newer FPGA
* versions don't set the ACK bit, the latency issue remains.
*/
while ((fpga_read_reg(PWRCR) & PWRCR_PDWNACK) == 0)
cpu_sleep();
}
/* Initialize the board */
static void __init sdk7786_setup(char **cmdline_p)
{
pr_info("Renesas Technology Europe SDK7786 support:\n");
regulator_register_fixed(0, dummy_supplies, ARRAY_SIZE(dummy_supplies));
sdk7786_fpga_init();
sdk7786_nmi_init();
pr_info("\tPCB revision:\t%d\n", fpga_read_reg(PCBRR) & 0xf);
machine_ops.restart = sdk7786_restart;
pm_power_off = sdk7786_power_off;
register_smp_ops(&shx3_smp_ops);
}
/*
* The Machine Vector
*/
static struct sh_machine_vector mv_sdk7786 __initmv = {
.mv_name = "SDK7786",
.mv_setup = sdk7786_setup,
.mv_mode_pins = sdk7786_mode_pins,
.mv_clk_init = sdk7786_clk_init,
.mv_init_irq = sdk7786_init_irq,
};
| gpl-2.0 |
MattCrystal/Haunted | drivers/scsi/ps3rom.c | 5240 | 11539 | /*
* PS3 BD/DVD/CD-ROM Storage Driver
*
* Copyright (C) 2007 Sony Computer Entertainment Inc.
* Copyright 2007 Sony Corp.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published
* by the Free Software Foundation; version 2 of the License.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
#include <linux/cdrom.h>
#include <linux/highmem.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_dbg.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_eh.h>
#include <asm/lv1call.h>
#include <asm/ps3stor.h>
#define DEVICE_NAME "ps3rom"
#define BOUNCE_SIZE (64*1024)
#define PS3ROM_MAX_SECTORS (BOUNCE_SIZE >> 9)
struct ps3rom_private {
struct ps3_storage_device *dev;
struct scsi_cmnd *curr_cmd;
};
#define LV1_STORAGE_SEND_ATAPI_COMMAND (1)
struct lv1_atapi_cmnd_block {
u8 pkt[32]; /* packet command block */
u32 pktlen; /* should be 12 for ATAPI 8020 */
u32 blocks;
u32 block_size;
u32 proto; /* transfer mode */
u32 in_out; /* transfer direction */
u64 buffer; /* parameter except command block */
u32 arglen; /* length above */
};
enum lv1_atapi_proto {
NON_DATA_PROTO = 0,
PIO_DATA_IN_PROTO = 1,
PIO_DATA_OUT_PROTO = 2,
DMA_PROTO = 3
};
enum lv1_atapi_in_out {
DIR_WRITE = 0, /* memory -> device */
DIR_READ = 1 /* device -> memory */
};
static int ps3rom_slave_configure(struct scsi_device *scsi_dev)
{
struct ps3rom_private *priv = shost_priv(scsi_dev->host);
struct ps3_storage_device *dev = priv->dev;
dev_dbg(&dev->sbd.core, "%s:%u: id %u, lun %u, channel %u\n", __func__,
__LINE__, scsi_dev->id, scsi_dev->lun, scsi_dev->channel);
/*
* ATAPI SFF8020 devices use MODE_SENSE_10,
* so we can prohibit MODE_SENSE_6
*/
scsi_dev->use_10_for_ms = 1;
/* we don't support {READ,WRITE}_6 */
scsi_dev->use_10_for_rw = 1;
return 0;
}
static int ps3rom_atapi_request(struct ps3_storage_device *dev,
struct scsi_cmnd *cmd)
{
struct lv1_atapi_cmnd_block atapi_cmnd;
unsigned char opcode = cmd->cmnd[0];
int res;
u64 lpar;
dev_dbg(&dev->sbd.core, "%s:%u: send ATAPI command 0x%02x\n", __func__,
__LINE__, opcode);
memset(&atapi_cmnd, 0, sizeof(struct lv1_atapi_cmnd_block));
memcpy(&atapi_cmnd.pkt, cmd->cmnd, 12);
atapi_cmnd.pktlen = 12;
atapi_cmnd.block_size = 1; /* transfer size is block_size * blocks */
atapi_cmnd.blocks = atapi_cmnd.arglen = scsi_bufflen(cmd);
atapi_cmnd.buffer = dev->bounce_lpar;
switch (cmd->sc_data_direction) {
case DMA_FROM_DEVICE:
if (scsi_bufflen(cmd) >= CD_FRAMESIZE)
atapi_cmnd.proto = DMA_PROTO;
else
atapi_cmnd.proto = PIO_DATA_IN_PROTO;
atapi_cmnd.in_out = DIR_READ;
break;
case DMA_TO_DEVICE:
if (scsi_bufflen(cmd) >= CD_FRAMESIZE)
atapi_cmnd.proto = DMA_PROTO;
else
atapi_cmnd.proto = PIO_DATA_OUT_PROTO;
atapi_cmnd.in_out = DIR_WRITE;
scsi_sg_copy_to_buffer(cmd, dev->bounce_buf, dev->bounce_size);
break;
default:
atapi_cmnd.proto = NON_DATA_PROTO;
break;
}
lpar = ps3_mm_phys_to_lpar(__pa(&atapi_cmnd));
res = lv1_storage_send_device_command(dev->sbd.dev_id,
LV1_STORAGE_SEND_ATAPI_COMMAND,
lpar, sizeof(atapi_cmnd),
atapi_cmnd.buffer,
atapi_cmnd.arglen, &dev->tag);
if (res == LV1_DENIED_BY_POLICY) {
dev_dbg(&dev->sbd.core,
"%s:%u: ATAPI command 0x%02x denied by policy\n",
__func__, __LINE__, opcode);
return DID_ERROR << 16;
}
if (res) {
dev_err(&dev->sbd.core,
"%s:%u: ATAPI command 0x%02x failed %d\n", __func__,
__LINE__, opcode, res);
return DID_ERROR << 16;
}
return 0;
}
static inline unsigned int srb10_lba(const struct scsi_cmnd *cmd)
{
return cmd->cmnd[2] << 24 | cmd->cmnd[3] << 16 | cmd->cmnd[4] << 8 |
cmd->cmnd[5];
}
static inline unsigned int srb10_len(const struct scsi_cmnd *cmd)
{
return cmd->cmnd[7] << 8 | cmd->cmnd[8];
}
static int ps3rom_read_request(struct ps3_storage_device *dev,
struct scsi_cmnd *cmd, u32 start_sector,
u32 sectors)
{
int res;
dev_dbg(&dev->sbd.core, "%s:%u: read %u sectors starting at %u\n",
__func__, __LINE__, sectors, start_sector);
res = lv1_storage_read(dev->sbd.dev_id,
dev->regions[dev->region_idx].id, start_sector,
sectors, 0, dev->bounce_lpar, &dev->tag);
if (res) {
dev_err(&dev->sbd.core, "%s:%u: read failed %d\n", __func__,
__LINE__, res);
return DID_ERROR << 16;
}
return 0;
}
static int ps3rom_write_request(struct ps3_storage_device *dev,
struct scsi_cmnd *cmd, u32 start_sector,
u32 sectors)
{
int res;
dev_dbg(&dev->sbd.core, "%s:%u: write %u sectors starting at %u\n",
__func__, __LINE__, sectors, start_sector);
scsi_sg_copy_to_buffer(cmd, dev->bounce_buf, dev->bounce_size);
res = lv1_storage_write(dev->sbd.dev_id,
dev->regions[dev->region_idx].id, start_sector,
sectors, 0, dev->bounce_lpar, &dev->tag);
if (res) {
dev_err(&dev->sbd.core, "%s:%u: write failed %d\n", __func__,
__LINE__, res);
return DID_ERROR << 16;
}
return 0;
}
static int ps3rom_queuecommand_lck(struct scsi_cmnd *cmd,
void (*done)(struct scsi_cmnd *))
{
struct ps3rom_private *priv = shost_priv(cmd->device->host);
struct ps3_storage_device *dev = priv->dev;
unsigned char opcode;
int res;
#ifdef DEBUG
scsi_print_command(cmd);
#endif
priv->curr_cmd = cmd;
cmd->scsi_done = done;
opcode = cmd->cmnd[0];
/*
* While we can submit READ/WRITE SCSI commands as ATAPI commands,
* it's recommended for various reasons (performance, error handling,
* ...) to use lv1_storage_{read,write}() instead
*/
switch (opcode) {
case READ_10:
res = ps3rom_read_request(dev, cmd, srb10_lba(cmd),
srb10_len(cmd));
break;
case WRITE_10:
res = ps3rom_write_request(dev, cmd, srb10_lba(cmd),
srb10_len(cmd));
break;
default:
res = ps3rom_atapi_request(dev, cmd);
break;
}
if (res) {
memset(cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
cmd->result = res;
cmd->sense_buffer[0] = 0x70;
cmd->sense_buffer[2] = ILLEGAL_REQUEST;
priv->curr_cmd = NULL;
cmd->scsi_done(cmd);
}
return 0;
}
static DEF_SCSI_QCMD(ps3rom_queuecommand)
static int decode_lv1_status(u64 status, unsigned char *sense_key,
unsigned char *asc, unsigned char *ascq)
{
if (((status >> 24) & 0xff) != SAM_STAT_CHECK_CONDITION)
return -1;
*sense_key = (status >> 16) & 0xff;
*asc = (status >> 8) & 0xff;
*ascq = status & 0xff;
return 0;
}
static irqreturn_t ps3rom_interrupt(int irq, void *data)
{
struct ps3_storage_device *dev = data;
struct Scsi_Host *host;
struct ps3rom_private *priv;
struct scsi_cmnd *cmd;
int res;
u64 tag, status;
unsigned char sense_key, asc, ascq;
res = lv1_storage_get_async_status(dev->sbd.dev_id, &tag, &status);
/*
* status = -1 may mean that ATAPI transport completed OK, but
* ATAPI command itself resulted CHECK CONDITION
* so, upper layer should issue REQUEST_SENSE to check the sense data
*/
if (tag != dev->tag)
dev_err(&dev->sbd.core,
"%s:%u: tag mismatch, got %llx, expected %llx\n",
__func__, __LINE__, tag, dev->tag);
if (res) {
dev_err(&dev->sbd.core, "%s:%u: res=%d status=0x%llx\n",
__func__, __LINE__, res, status);
return IRQ_HANDLED;
}
host = ps3_system_bus_get_drvdata(&dev->sbd);
priv = shost_priv(host);
cmd = priv->curr_cmd;
if (!status) {
/* OK, completed */
if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
int len;
len = scsi_sg_copy_from_buffer(cmd,
dev->bounce_buf,
dev->bounce_size);
scsi_set_resid(cmd, scsi_bufflen(cmd) - len);
}
cmd->result = DID_OK << 16;
goto done;
}
if (cmd->cmnd[0] == REQUEST_SENSE) {
/* SCSI spec says request sense should never get error */
dev_err(&dev->sbd.core, "%s:%u: end error without autosense\n",
__func__, __LINE__);
cmd->result = DID_ERROR << 16 | SAM_STAT_CHECK_CONDITION;
goto done;
}
if (decode_lv1_status(status, &sense_key, &asc, &ascq)) {
cmd->result = DID_ERROR << 16;
goto done;
}
scsi_build_sense_buffer(0, cmd->sense_buffer, sense_key, asc, ascq);
cmd->result = SAM_STAT_CHECK_CONDITION;
done:
priv->curr_cmd = NULL;
cmd->scsi_done(cmd);
return IRQ_HANDLED;
}
static struct scsi_host_template ps3rom_host_template = {
.name = DEVICE_NAME,
.slave_configure = ps3rom_slave_configure,
.queuecommand = ps3rom_queuecommand,
.can_queue = 1,
.this_id = 7,
.sg_tablesize = SG_ALL,
.cmd_per_lun = 1,
.emulated = 1, /* only sg driver uses this */
.max_sectors = PS3ROM_MAX_SECTORS,
.use_clustering = ENABLE_CLUSTERING,
.module = THIS_MODULE,
};
static int __devinit ps3rom_probe(struct ps3_system_bus_device *_dev)
{
struct ps3_storage_device *dev = to_ps3_storage_device(&_dev->core);
int error;
struct Scsi_Host *host;
struct ps3rom_private *priv;
if (dev->blk_size != CD_FRAMESIZE) {
dev_err(&dev->sbd.core,
"%s:%u: cannot handle block size %llu\n", __func__,
__LINE__, dev->blk_size);
return -EINVAL;
}
dev->bounce_size = BOUNCE_SIZE;
dev->bounce_buf = kmalloc(BOUNCE_SIZE, GFP_DMA);
if (!dev->bounce_buf)
return -ENOMEM;
error = ps3stor_setup(dev, ps3rom_interrupt);
if (error)
goto fail_free_bounce;
host = scsi_host_alloc(&ps3rom_host_template,
sizeof(struct ps3rom_private));
if (!host) {
dev_err(&dev->sbd.core, "%s:%u: scsi_host_alloc failed\n",
__func__, __LINE__);
goto fail_teardown;
}
priv = shost_priv(host);
ps3_system_bus_set_drvdata(&dev->sbd, host);
priv->dev = dev;
/* One device/LUN per SCSI bus */
host->max_id = 1;
host->max_lun = 1;
error = scsi_add_host(host, &dev->sbd.core);
if (error) {
dev_err(&dev->sbd.core, "%s:%u: scsi_host_alloc failed %d\n",
__func__, __LINE__, error);
error = -ENODEV;
goto fail_host_put;
}
scsi_scan_host(host);
return 0;
fail_host_put:
scsi_host_put(host);
ps3_system_bus_set_drvdata(&dev->sbd, NULL);
fail_teardown:
ps3stor_teardown(dev);
fail_free_bounce:
kfree(dev->bounce_buf);
return error;
}
static int ps3rom_remove(struct ps3_system_bus_device *_dev)
{
struct ps3_storage_device *dev = to_ps3_storage_device(&_dev->core);
struct Scsi_Host *host = ps3_system_bus_get_drvdata(&dev->sbd);
scsi_remove_host(host);
ps3stor_teardown(dev);
scsi_host_put(host);
ps3_system_bus_set_drvdata(&dev->sbd, NULL);
kfree(dev->bounce_buf);
return 0;
}
static struct ps3_system_bus_driver ps3rom = {
.match_id = PS3_MATCH_ID_STOR_ROM,
.core.name = DEVICE_NAME,
.core.owner = THIS_MODULE,
.probe = ps3rom_probe,
.remove = ps3rom_remove
};
static int __init ps3rom_init(void)
{
return ps3_system_bus_driver_register(&ps3rom);
}
static void __exit ps3rom_exit(void)
{
ps3_system_bus_driver_unregister(&ps3rom);
}
module_init(ps3rom_init);
module_exit(ps3rom_exit);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("PS3 BD/DVD/CD-ROM Storage Driver");
MODULE_AUTHOR("Sony Corporation");
MODULE_ALIAS(PS3_MODULE_ALIAS_STOR_ROM);
| gpl-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.