repo_name string | path string | copies string | size string | content string | license string |
|---|---|---|---|---|---|
PatrickPalm/alcatel-kernel-msm7x30 | fs/xfs/linux-2.6/xfs_ioctl.c | 2534 | 35469 | /*
* Copyright (c) 2000-2005 Silicon Graphics, Inc.
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it would be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "xfs.h"
#include "xfs_fs.h"
#include "xfs_bit.h"
#include "xfs_log.h"
#include "xfs_inum.h"
#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
#include "xfs_alloc.h"
#include "xfs_mount.h"
#include "xfs_bmap_btree.h"
#include "xfs_dinode.h"
#include "xfs_inode.h"
#include "xfs_ioctl.h"
#include "xfs_rtalloc.h"
#include "xfs_itable.h"
#include "xfs_error.h"
#include "xfs_attr.h"
#include "xfs_bmap.h"
#include "xfs_buf_item.h"
#include "xfs_utils.h"
#include "xfs_dfrag.h"
#include "xfs_fsops.h"
#include "xfs_vnodeops.h"
#include "xfs_discard.h"
#include "xfs_quota.h"
#include "xfs_inode_item.h"
#include "xfs_export.h"
#include "xfs_trace.h"
#include <linux/capability.h>
#include <linux/dcache.h>
#include <linux/mount.h>
#include <linux/namei.h>
#include <linux/pagemap.h>
#include <linux/slab.h>
#include <linux/exportfs.h>
/*
* xfs_find_handle maps from userspace xfs_fsop_handlereq structure to
* a file or fs handle.
*
* XFS_IOC_PATH_TO_FSHANDLE
* returns fs handle for a mount point or path within that mount point
* XFS_IOC_FD_TO_HANDLE
* returns full handle for a FD opened in user space
* XFS_IOC_PATH_TO_HANDLE
* returns full handle for a path
*/
int
xfs_find_handle(
unsigned int cmd,
xfs_fsop_handlereq_t *hreq)
{
int hsize;
xfs_handle_t handle;
struct inode *inode;
struct file *file = NULL;
struct path path;
int error;
struct xfs_inode *ip;
if (cmd == XFS_IOC_FD_TO_HANDLE) {
file = fget(hreq->fd);
if (!file)
return -EBADF;
inode = file->f_path.dentry->d_inode;
} else {
error = user_lpath((const char __user *)hreq->path, &path);
if (error)
return error;
inode = path.dentry->d_inode;
}
ip = XFS_I(inode);
/*
* We can only generate handles for inodes residing on a XFS filesystem,
* and only for regular files, directories or symbolic links.
*/
error = -EINVAL;
if (inode->i_sb->s_magic != XFS_SB_MAGIC)
goto out_put;
error = -EBADF;
if (!S_ISREG(inode->i_mode) &&
!S_ISDIR(inode->i_mode) &&
!S_ISLNK(inode->i_mode))
goto out_put;
memcpy(&handle.ha_fsid, ip->i_mount->m_fixedfsid, sizeof(xfs_fsid_t));
if (cmd == XFS_IOC_PATH_TO_FSHANDLE) {
/*
* This handle only contains an fsid, zero the rest.
*/
memset(&handle.ha_fid, 0, sizeof(handle.ha_fid));
hsize = sizeof(xfs_fsid_t);
} else {
int lock_mode;
lock_mode = xfs_ilock_map_shared(ip);
handle.ha_fid.fid_len = sizeof(xfs_fid_t) -
sizeof(handle.ha_fid.fid_len);
handle.ha_fid.fid_pad = 0;
handle.ha_fid.fid_gen = ip->i_d.di_gen;
handle.ha_fid.fid_ino = ip->i_ino;
xfs_iunlock_map_shared(ip, lock_mode);
hsize = XFS_HSIZE(handle);
}
error = -EFAULT;
if (copy_to_user(hreq->ohandle, &handle, hsize) ||
copy_to_user(hreq->ohandlen, &hsize, sizeof(__s32)))
goto out_put;
error = 0;
out_put:
if (cmd == XFS_IOC_FD_TO_HANDLE)
fput(file);
else
path_put(&path);
return error;
}
/*
* No need to do permission checks on the various pathname components
* as the handle operations are privileged.
*/
STATIC int
xfs_handle_acceptable(
void *context,
struct dentry *dentry)
{
return 1;
}
/*
* Convert userspace handle data into a dentry.
*/
struct dentry *
xfs_handle_to_dentry(
struct file *parfilp,
void __user *uhandle,
u32 hlen)
{
xfs_handle_t handle;
struct xfs_fid64 fid;
/*
* Only allow handle opens under a directory.
*/
if (!S_ISDIR(parfilp->f_path.dentry->d_inode->i_mode))
return ERR_PTR(-ENOTDIR);
if (hlen != sizeof(xfs_handle_t))
return ERR_PTR(-EINVAL);
if (copy_from_user(&handle, uhandle, hlen))
return ERR_PTR(-EFAULT);
if (handle.ha_fid.fid_len !=
sizeof(handle.ha_fid) - sizeof(handle.ha_fid.fid_len))
return ERR_PTR(-EINVAL);
memset(&fid, 0, sizeof(struct fid));
fid.ino = handle.ha_fid.fid_ino;
fid.gen = handle.ha_fid.fid_gen;
return exportfs_decode_fh(parfilp->f_path.mnt, (struct fid *)&fid, 3,
FILEID_INO32_GEN | XFS_FILEID_TYPE_64FLAG,
xfs_handle_acceptable, NULL);
}
STATIC struct dentry *
xfs_handlereq_to_dentry(
struct file *parfilp,
xfs_fsop_handlereq_t *hreq)
{
return xfs_handle_to_dentry(parfilp, hreq->ihandle, hreq->ihandlen);
}
int
xfs_open_by_handle(
struct file *parfilp,
xfs_fsop_handlereq_t *hreq)
{
const struct cred *cred = current_cred();
int error;
int fd;
int permflag;
struct file *filp;
struct inode *inode;
struct dentry *dentry;
if (!capable(CAP_SYS_ADMIN))
return -XFS_ERROR(EPERM);
dentry = xfs_handlereq_to_dentry(parfilp, hreq);
if (IS_ERR(dentry))
return PTR_ERR(dentry);
inode = dentry->d_inode;
/* Restrict xfs_open_by_handle to directories & regular files. */
if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode))) {
error = -XFS_ERROR(EPERM);
goto out_dput;
}
#if BITS_PER_LONG != 32
hreq->oflags |= O_LARGEFILE;
#endif
/* Put open permission in namei format. */
permflag = hreq->oflags;
if ((permflag+1) & O_ACCMODE)
permflag++;
if (permflag & O_TRUNC)
permflag |= 2;
if ((!(permflag & O_APPEND) || (permflag & O_TRUNC)) &&
(permflag & FMODE_WRITE) && IS_APPEND(inode)) {
error = -XFS_ERROR(EPERM);
goto out_dput;
}
if ((permflag & FMODE_WRITE) && IS_IMMUTABLE(inode)) {
error = -XFS_ERROR(EACCES);
goto out_dput;
}
/* Can't write directories. */
if (S_ISDIR(inode->i_mode) && (permflag & FMODE_WRITE)) {
error = -XFS_ERROR(EISDIR);
goto out_dput;
}
fd = get_unused_fd();
if (fd < 0) {
error = fd;
goto out_dput;
}
filp = dentry_open(dentry, mntget(parfilp->f_path.mnt),
hreq->oflags, cred);
if (IS_ERR(filp)) {
put_unused_fd(fd);
return PTR_ERR(filp);
}
if (inode->i_mode & S_IFREG) {
filp->f_flags |= O_NOATIME;
filp->f_mode |= FMODE_NOCMTIME;
}
fd_install(fd, filp);
return fd;
out_dput:
dput(dentry);
return error;
}
/*
* This is a copy from fs/namei.c:vfs_readlink(), except for removing it's
* unused first argument.
*/
STATIC int
do_readlink(
char __user *buffer,
int buflen,
const char *link)
{
int len;
len = PTR_ERR(link);
if (IS_ERR(link))
goto out;
len = strlen(link);
if (len > (unsigned) buflen)
len = buflen;
if (copy_to_user(buffer, link, len))
len = -EFAULT;
out:
return len;
}
int
xfs_readlink_by_handle(
struct file *parfilp,
xfs_fsop_handlereq_t *hreq)
{
struct dentry *dentry;
__u32 olen;
void *link;
int error;
if (!capable(CAP_SYS_ADMIN))
return -XFS_ERROR(EPERM);
dentry = xfs_handlereq_to_dentry(parfilp, hreq);
if (IS_ERR(dentry))
return PTR_ERR(dentry);
/* Restrict this handle operation to symlinks only. */
if (!S_ISLNK(dentry->d_inode->i_mode)) {
error = -XFS_ERROR(EINVAL);
goto out_dput;
}
if (copy_from_user(&olen, hreq->ohandlen, sizeof(__u32))) {
error = -XFS_ERROR(EFAULT);
goto out_dput;
}
link = kmalloc(MAXPATHLEN+1, GFP_KERNEL);
if (!link) {
error = -XFS_ERROR(ENOMEM);
goto out_dput;
}
error = -xfs_readlink(XFS_I(dentry->d_inode), link);
if (error)
goto out_kfree;
error = do_readlink(hreq->ohandle, olen, link);
if (error)
goto out_kfree;
out_kfree:
kfree(link);
out_dput:
dput(dentry);
return error;
}
STATIC int
xfs_fssetdm_by_handle(
struct file *parfilp,
void __user *arg)
{
int error;
struct fsdmidata fsd;
xfs_fsop_setdm_handlereq_t dmhreq;
struct dentry *dentry;
if (!capable(CAP_MKNOD))
return -XFS_ERROR(EPERM);
if (copy_from_user(&dmhreq, arg, sizeof(xfs_fsop_setdm_handlereq_t)))
return -XFS_ERROR(EFAULT);
dentry = xfs_handlereq_to_dentry(parfilp, &dmhreq.hreq);
if (IS_ERR(dentry))
return PTR_ERR(dentry);
if (IS_IMMUTABLE(dentry->d_inode) || IS_APPEND(dentry->d_inode)) {
error = -XFS_ERROR(EPERM);
goto out;
}
if (copy_from_user(&fsd, dmhreq.data, sizeof(fsd))) {
error = -XFS_ERROR(EFAULT);
goto out;
}
error = -xfs_set_dmattrs(XFS_I(dentry->d_inode), fsd.fsd_dmevmask,
fsd.fsd_dmstate);
out:
dput(dentry);
return error;
}
STATIC int
xfs_attrlist_by_handle(
struct file *parfilp,
void __user *arg)
{
int error = -ENOMEM;
attrlist_cursor_kern_t *cursor;
xfs_fsop_attrlist_handlereq_t al_hreq;
struct dentry *dentry;
char *kbuf;
if (!capable(CAP_SYS_ADMIN))
return -XFS_ERROR(EPERM);
if (copy_from_user(&al_hreq, arg, sizeof(xfs_fsop_attrlist_handlereq_t)))
return -XFS_ERROR(EFAULT);
if (al_hreq.buflen > XATTR_LIST_MAX)
return -XFS_ERROR(EINVAL);
/*
* Reject flags, only allow namespaces.
*/
if (al_hreq.flags & ~(ATTR_ROOT | ATTR_SECURE))
return -XFS_ERROR(EINVAL);
dentry = xfs_handlereq_to_dentry(parfilp, &al_hreq.hreq);
if (IS_ERR(dentry))
return PTR_ERR(dentry);
kbuf = kzalloc(al_hreq.buflen, GFP_KERNEL);
if (!kbuf)
goto out_dput;
cursor = (attrlist_cursor_kern_t *)&al_hreq.pos;
error = -xfs_attr_list(XFS_I(dentry->d_inode), kbuf, al_hreq.buflen,
al_hreq.flags, cursor);
if (error)
goto out_kfree;
if (copy_to_user(al_hreq.buffer, kbuf, al_hreq.buflen))
error = -EFAULT;
out_kfree:
kfree(kbuf);
out_dput:
dput(dentry);
return error;
}
int
xfs_attrmulti_attr_get(
struct inode *inode,
unsigned char *name,
unsigned char __user *ubuf,
__uint32_t *len,
__uint32_t flags)
{
unsigned char *kbuf;
int error = EFAULT;
if (*len > XATTR_SIZE_MAX)
return EINVAL;
kbuf = kmalloc(*len, GFP_KERNEL);
if (!kbuf)
return ENOMEM;
error = xfs_attr_get(XFS_I(inode), name, kbuf, (int *)len, flags);
if (error)
goto out_kfree;
if (copy_to_user(ubuf, kbuf, *len))
error = EFAULT;
out_kfree:
kfree(kbuf);
return error;
}
int
xfs_attrmulti_attr_set(
struct inode *inode,
unsigned char *name,
const unsigned char __user *ubuf,
__uint32_t len,
__uint32_t flags)
{
unsigned char *kbuf;
int error = EFAULT;
if (IS_IMMUTABLE(inode) || IS_APPEND(inode))
return EPERM;
if (len > XATTR_SIZE_MAX)
return EINVAL;
kbuf = memdup_user(ubuf, len);
if (IS_ERR(kbuf))
return PTR_ERR(kbuf);
error = xfs_attr_set(XFS_I(inode), name, kbuf, len, flags);
return error;
}
int
xfs_attrmulti_attr_remove(
struct inode *inode,
unsigned char *name,
__uint32_t flags)
{
if (IS_IMMUTABLE(inode) || IS_APPEND(inode))
return EPERM;
return xfs_attr_remove(XFS_I(inode), name, flags);
}
STATIC int
xfs_attrmulti_by_handle(
struct file *parfilp,
void __user *arg)
{
int error;
xfs_attr_multiop_t *ops;
xfs_fsop_attrmulti_handlereq_t am_hreq;
struct dentry *dentry;
unsigned int i, size;
unsigned char *attr_name;
if (!capable(CAP_SYS_ADMIN))
return -XFS_ERROR(EPERM);
if (copy_from_user(&am_hreq, arg, sizeof(xfs_fsop_attrmulti_handlereq_t)))
return -XFS_ERROR(EFAULT);
/* overflow check */
if (am_hreq.opcount >= INT_MAX / sizeof(xfs_attr_multiop_t))
return -E2BIG;
dentry = xfs_handlereq_to_dentry(parfilp, &am_hreq.hreq);
if (IS_ERR(dentry))
return PTR_ERR(dentry);
error = E2BIG;
size = am_hreq.opcount * sizeof(xfs_attr_multiop_t);
if (!size || size > 16 * PAGE_SIZE)
goto out_dput;
ops = memdup_user(am_hreq.ops, size);
if (IS_ERR(ops)) {
error = PTR_ERR(ops);
goto out_dput;
}
attr_name = kmalloc(MAXNAMELEN, GFP_KERNEL);
if (!attr_name)
goto out_kfree_ops;
error = 0;
for (i = 0; i < am_hreq.opcount; i++) {
ops[i].am_error = strncpy_from_user((char *)attr_name,
ops[i].am_attrname, MAXNAMELEN);
if (ops[i].am_error == 0 || ops[i].am_error == MAXNAMELEN)
error = -ERANGE;
if (ops[i].am_error < 0)
break;
switch (ops[i].am_opcode) {
case ATTR_OP_GET:
ops[i].am_error = xfs_attrmulti_attr_get(
dentry->d_inode, attr_name,
ops[i].am_attrvalue, &ops[i].am_length,
ops[i].am_flags);
break;
case ATTR_OP_SET:
ops[i].am_error = mnt_want_write(parfilp->f_path.mnt);
if (ops[i].am_error)
break;
ops[i].am_error = xfs_attrmulti_attr_set(
dentry->d_inode, attr_name,
ops[i].am_attrvalue, ops[i].am_length,
ops[i].am_flags);
mnt_drop_write(parfilp->f_path.mnt);
break;
case ATTR_OP_REMOVE:
ops[i].am_error = mnt_want_write(parfilp->f_path.mnt);
if (ops[i].am_error)
break;
ops[i].am_error = xfs_attrmulti_attr_remove(
dentry->d_inode, attr_name,
ops[i].am_flags);
mnt_drop_write(parfilp->f_path.mnt);
break;
default:
ops[i].am_error = EINVAL;
}
}
if (copy_to_user(am_hreq.ops, ops, size))
error = XFS_ERROR(EFAULT);
kfree(attr_name);
out_kfree_ops:
kfree(ops);
out_dput:
dput(dentry);
return -error;
}
int
xfs_ioc_space(
struct xfs_inode *ip,
struct inode *inode,
struct file *filp,
int ioflags,
unsigned int cmd,
xfs_flock64_t *bf)
{
int attr_flags = 0;
int error;
/*
* Only allow the sys admin to reserve space unless
* unwritten extents are enabled.
*/
if (!xfs_sb_version_hasextflgbit(&ip->i_mount->m_sb) &&
!capable(CAP_SYS_ADMIN))
return -XFS_ERROR(EPERM);
if (inode->i_flags & (S_IMMUTABLE|S_APPEND))
return -XFS_ERROR(EPERM);
if (!(filp->f_mode & FMODE_WRITE))
return -XFS_ERROR(EBADF);
if (!S_ISREG(inode->i_mode))
return -XFS_ERROR(EINVAL);
if (filp->f_flags & (O_NDELAY|O_NONBLOCK))
attr_flags |= XFS_ATTR_NONBLOCK;
if (filp->f_flags & O_DSYNC)
attr_flags |= XFS_ATTR_SYNC;
if (ioflags & IO_INVIS)
attr_flags |= XFS_ATTR_DMI;
error = xfs_change_file_space(ip, cmd, bf, filp->f_pos, attr_flags);
return -error;
}
STATIC int
xfs_ioc_bulkstat(
xfs_mount_t *mp,
unsigned int cmd,
void __user *arg)
{
xfs_fsop_bulkreq_t bulkreq;
int count; /* # of records returned */
xfs_ino_t inlast; /* last inode number */
int done;
int error;
/* done = 1 if there are more stats to get and if bulkstat */
/* should be called again (unused here, but used in dmapi) */
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
if (XFS_FORCED_SHUTDOWN(mp))
return -XFS_ERROR(EIO);
if (copy_from_user(&bulkreq, arg, sizeof(xfs_fsop_bulkreq_t)))
return -XFS_ERROR(EFAULT);
if (copy_from_user(&inlast, bulkreq.lastip, sizeof(__s64)))
return -XFS_ERROR(EFAULT);
if ((count = bulkreq.icount) <= 0)
return -XFS_ERROR(EINVAL);
if (bulkreq.ubuffer == NULL)
return -XFS_ERROR(EINVAL);
if (cmd == XFS_IOC_FSINUMBERS)
error = xfs_inumbers(mp, &inlast, &count,
bulkreq.ubuffer, xfs_inumbers_fmt);
else if (cmd == XFS_IOC_FSBULKSTAT_SINGLE)
error = xfs_bulkstat_single(mp, &inlast,
bulkreq.ubuffer, &done);
else /* XFS_IOC_FSBULKSTAT */
error = xfs_bulkstat(mp, &inlast, &count, xfs_bulkstat_one,
sizeof(xfs_bstat_t), bulkreq.ubuffer,
&done);
if (error)
return -error;
if (bulkreq.ocount != NULL) {
if (copy_to_user(bulkreq.lastip, &inlast,
sizeof(xfs_ino_t)))
return -XFS_ERROR(EFAULT);
if (copy_to_user(bulkreq.ocount, &count, sizeof(count)))
return -XFS_ERROR(EFAULT);
}
return 0;
}
STATIC int
xfs_ioc_fsgeometry_v1(
xfs_mount_t *mp,
void __user *arg)
{
xfs_fsop_geom_t fsgeo;
int error;
error = xfs_fs_geometry(mp, &fsgeo, 3);
if (error)
return -error;
/*
* Caller should have passed an argument of type
* xfs_fsop_geom_v1_t. This is a proper subset of the
* xfs_fsop_geom_t that xfs_fs_geometry() fills in.
*/
if (copy_to_user(arg, &fsgeo, sizeof(xfs_fsop_geom_v1_t)))
return -XFS_ERROR(EFAULT);
return 0;
}
STATIC int
xfs_ioc_fsgeometry(
xfs_mount_t *mp,
void __user *arg)
{
xfs_fsop_geom_t fsgeo;
int error;
error = xfs_fs_geometry(mp, &fsgeo, 4);
if (error)
return -error;
if (copy_to_user(arg, &fsgeo, sizeof(fsgeo)))
return -XFS_ERROR(EFAULT);
return 0;
}
/*
* Linux extended inode flags interface.
*/
STATIC unsigned int
xfs_merge_ioc_xflags(
unsigned int flags,
unsigned int start)
{
unsigned int xflags = start;
if (flags & FS_IMMUTABLE_FL)
xflags |= XFS_XFLAG_IMMUTABLE;
else
xflags &= ~XFS_XFLAG_IMMUTABLE;
if (flags & FS_APPEND_FL)
xflags |= XFS_XFLAG_APPEND;
else
xflags &= ~XFS_XFLAG_APPEND;
if (flags & FS_SYNC_FL)
xflags |= XFS_XFLAG_SYNC;
else
xflags &= ~XFS_XFLAG_SYNC;
if (flags & FS_NOATIME_FL)
xflags |= XFS_XFLAG_NOATIME;
else
xflags &= ~XFS_XFLAG_NOATIME;
if (flags & FS_NODUMP_FL)
xflags |= XFS_XFLAG_NODUMP;
else
xflags &= ~XFS_XFLAG_NODUMP;
return xflags;
}
STATIC unsigned int
xfs_di2lxflags(
__uint16_t di_flags)
{
unsigned int flags = 0;
if (di_flags & XFS_DIFLAG_IMMUTABLE)
flags |= FS_IMMUTABLE_FL;
if (di_flags & XFS_DIFLAG_APPEND)
flags |= FS_APPEND_FL;
if (di_flags & XFS_DIFLAG_SYNC)
flags |= FS_SYNC_FL;
if (di_flags & XFS_DIFLAG_NOATIME)
flags |= FS_NOATIME_FL;
if (di_flags & XFS_DIFLAG_NODUMP)
flags |= FS_NODUMP_FL;
return flags;
}
STATIC int
xfs_ioc_fsgetxattr(
xfs_inode_t *ip,
int attr,
void __user *arg)
{
struct fsxattr fa;
memset(&fa, 0, sizeof(struct fsxattr));
xfs_ilock(ip, XFS_ILOCK_SHARED);
fa.fsx_xflags = xfs_ip2xflags(ip);
fa.fsx_extsize = ip->i_d.di_extsize << ip->i_mount->m_sb.sb_blocklog;
fa.fsx_projid = xfs_get_projid(ip);
if (attr) {
if (ip->i_afp) {
if (ip->i_afp->if_flags & XFS_IFEXTENTS)
fa.fsx_nextents = ip->i_afp->if_bytes /
sizeof(xfs_bmbt_rec_t);
else
fa.fsx_nextents = ip->i_d.di_anextents;
} else
fa.fsx_nextents = 0;
} else {
if (ip->i_df.if_flags & XFS_IFEXTENTS)
fa.fsx_nextents = ip->i_df.if_bytes /
sizeof(xfs_bmbt_rec_t);
else
fa.fsx_nextents = ip->i_d.di_nextents;
}
xfs_iunlock(ip, XFS_ILOCK_SHARED);
if (copy_to_user(arg, &fa, sizeof(fa)))
return -EFAULT;
return 0;
}
STATIC void
xfs_set_diflags(
struct xfs_inode *ip,
unsigned int xflags)
{
unsigned int di_flags;
/* can't set PREALLOC this way, just preserve it */
di_flags = (ip->i_d.di_flags & XFS_DIFLAG_PREALLOC);
if (xflags & XFS_XFLAG_IMMUTABLE)
di_flags |= XFS_DIFLAG_IMMUTABLE;
if (xflags & XFS_XFLAG_APPEND)
di_flags |= XFS_DIFLAG_APPEND;
if (xflags & XFS_XFLAG_SYNC)
di_flags |= XFS_DIFLAG_SYNC;
if (xflags & XFS_XFLAG_NOATIME)
di_flags |= XFS_DIFLAG_NOATIME;
if (xflags & XFS_XFLAG_NODUMP)
di_flags |= XFS_DIFLAG_NODUMP;
if (xflags & XFS_XFLAG_PROJINHERIT)
di_flags |= XFS_DIFLAG_PROJINHERIT;
if (xflags & XFS_XFLAG_NODEFRAG)
di_flags |= XFS_DIFLAG_NODEFRAG;
if (xflags & XFS_XFLAG_FILESTREAM)
di_flags |= XFS_DIFLAG_FILESTREAM;
if ((ip->i_d.di_mode & S_IFMT) == S_IFDIR) {
if (xflags & XFS_XFLAG_RTINHERIT)
di_flags |= XFS_DIFLAG_RTINHERIT;
if (xflags & XFS_XFLAG_NOSYMLINKS)
di_flags |= XFS_DIFLAG_NOSYMLINKS;
if (xflags & XFS_XFLAG_EXTSZINHERIT)
di_flags |= XFS_DIFLAG_EXTSZINHERIT;
} else if ((ip->i_d.di_mode & S_IFMT) == S_IFREG) {
if (xflags & XFS_XFLAG_REALTIME)
di_flags |= XFS_DIFLAG_REALTIME;
if (xflags & XFS_XFLAG_EXTSIZE)
di_flags |= XFS_DIFLAG_EXTSIZE;
}
ip->i_d.di_flags = di_flags;
}
STATIC void
xfs_diflags_to_linux(
struct xfs_inode *ip)
{
struct inode *inode = VFS_I(ip);
unsigned int xflags = xfs_ip2xflags(ip);
if (xflags & XFS_XFLAG_IMMUTABLE)
inode->i_flags |= S_IMMUTABLE;
else
inode->i_flags &= ~S_IMMUTABLE;
if (xflags & XFS_XFLAG_APPEND)
inode->i_flags |= S_APPEND;
else
inode->i_flags &= ~S_APPEND;
if (xflags & XFS_XFLAG_SYNC)
inode->i_flags |= S_SYNC;
else
inode->i_flags &= ~S_SYNC;
if (xflags & XFS_XFLAG_NOATIME)
inode->i_flags |= S_NOATIME;
else
inode->i_flags &= ~S_NOATIME;
}
#define FSX_PROJID 1
#define FSX_EXTSIZE 2
#define FSX_XFLAGS 4
#define FSX_NONBLOCK 8
STATIC int
xfs_ioctl_setattr(
xfs_inode_t *ip,
struct fsxattr *fa,
int mask)
{
struct xfs_mount *mp = ip->i_mount;
struct xfs_trans *tp;
unsigned int lock_flags = 0;
struct xfs_dquot *udqp = NULL;
struct xfs_dquot *gdqp = NULL;
struct xfs_dquot *olddquot = NULL;
int code;
trace_xfs_ioctl_setattr(ip);
if (mp->m_flags & XFS_MOUNT_RDONLY)
return XFS_ERROR(EROFS);
if (XFS_FORCED_SHUTDOWN(mp))
return XFS_ERROR(EIO);
/*
* Disallow 32bit project ids when projid32bit feature is not enabled.
*/
if ((mask & FSX_PROJID) && (fa->fsx_projid > (__uint16_t)-1) &&
!xfs_sb_version_hasprojid32bit(&ip->i_mount->m_sb))
return XFS_ERROR(EINVAL);
/*
* If disk quotas is on, we make sure that the dquots do exist on disk,
* before we start any other transactions. Trying to do this later
* is messy. We don't care to take a readlock to look at the ids
* in inode here, because we can't hold it across the trans_reserve.
* If the IDs do change before we take the ilock, we're covered
* because the i_*dquot fields will get updated anyway.
*/
if (XFS_IS_QUOTA_ON(mp) && (mask & FSX_PROJID)) {
code = xfs_qm_vop_dqalloc(ip, ip->i_d.di_uid,
ip->i_d.di_gid, fa->fsx_projid,
XFS_QMOPT_PQUOTA, &udqp, &gdqp);
if (code)
return code;
}
/*
* For the other attributes, we acquire the inode lock and
* first do an error checking pass.
*/
tp = xfs_trans_alloc(mp, XFS_TRANS_SETATTR_NOT_SIZE);
code = xfs_trans_reserve(tp, 0, XFS_ICHANGE_LOG_RES(mp), 0, 0, 0);
if (code)
goto error_return;
lock_flags = XFS_ILOCK_EXCL;
xfs_ilock(ip, lock_flags);
/*
* CAP_FOWNER overrides the following restrictions:
*
* The user ID of the calling process must be equal
* to the file owner ID, except in cases where the
* CAP_FSETID capability is applicable.
*/
if (current_fsuid() != ip->i_d.di_uid && !capable(CAP_FOWNER)) {
code = XFS_ERROR(EPERM);
goto error_return;
}
/*
* Do a quota reservation only if projid is actually going to change.
*/
if (mask & FSX_PROJID) {
if (XFS_IS_QUOTA_RUNNING(mp) &&
XFS_IS_PQUOTA_ON(mp) &&
xfs_get_projid(ip) != fa->fsx_projid) {
ASSERT(tp);
code = xfs_qm_vop_chown_reserve(tp, ip, udqp, gdqp,
capable(CAP_FOWNER) ?
XFS_QMOPT_FORCE_RES : 0);
if (code) /* out of quota */
goto error_return;
}
}
if (mask & FSX_EXTSIZE) {
/*
* Can't change extent size if any extents are allocated.
*/
if (ip->i_d.di_nextents &&
((ip->i_d.di_extsize << mp->m_sb.sb_blocklog) !=
fa->fsx_extsize)) {
code = XFS_ERROR(EINVAL); /* EFBIG? */
goto error_return;
}
/*
* Extent size must be a multiple of the appropriate block
* size, if set at all. It must also be smaller than the
* maximum extent size supported by the filesystem.
*
* Also, for non-realtime files, limit the extent size hint to
* half the size of the AGs in the filesystem so alignment
* doesn't result in extents larger than an AG.
*/
if (fa->fsx_extsize != 0) {
xfs_extlen_t size;
xfs_fsblock_t extsize_fsb;
extsize_fsb = XFS_B_TO_FSB(mp, fa->fsx_extsize);
if (extsize_fsb > MAXEXTLEN) {
code = XFS_ERROR(EINVAL);
goto error_return;
}
if (XFS_IS_REALTIME_INODE(ip) ||
((mask & FSX_XFLAGS) &&
(fa->fsx_xflags & XFS_XFLAG_REALTIME))) {
size = mp->m_sb.sb_rextsize <<
mp->m_sb.sb_blocklog;
} else {
size = mp->m_sb.sb_blocksize;
if (extsize_fsb > mp->m_sb.sb_agblocks / 2) {
code = XFS_ERROR(EINVAL);
goto error_return;
}
}
if (fa->fsx_extsize % size) {
code = XFS_ERROR(EINVAL);
goto error_return;
}
}
}
if (mask & FSX_XFLAGS) {
/*
* Can't change realtime flag if any extents are allocated.
*/
if ((ip->i_d.di_nextents || ip->i_delayed_blks) &&
(XFS_IS_REALTIME_INODE(ip)) !=
(fa->fsx_xflags & XFS_XFLAG_REALTIME)) {
code = XFS_ERROR(EINVAL); /* EFBIG? */
goto error_return;
}
/*
* If realtime flag is set then must have realtime data.
*/
if ((fa->fsx_xflags & XFS_XFLAG_REALTIME)) {
if ((mp->m_sb.sb_rblocks == 0) ||
(mp->m_sb.sb_rextsize == 0) ||
(ip->i_d.di_extsize % mp->m_sb.sb_rextsize)) {
code = XFS_ERROR(EINVAL);
goto error_return;
}
}
/*
* Can't modify an immutable/append-only file unless
* we have appropriate permission.
*/
if ((ip->i_d.di_flags &
(XFS_DIFLAG_IMMUTABLE|XFS_DIFLAG_APPEND) ||
(fa->fsx_xflags &
(XFS_XFLAG_IMMUTABLE | XFS_XFLAG_APPEND))) &&
!capable(CAP_LINUX_IMMUTABLE)) {
code = XFS_ERROR(EPERM);
goto error_return;
}
}
xfs_trans_ijoin(tp, ip);
/*
* Change file ownership. Must be the owner or privileged.
*/
if (mask & FSX_PROJID) {
/*
* CAP_FSETID overrides the following restrictions:
*
* The set-user-ID and set-group-ID bits of a file will be
* cleared upon successful return from chown()
*/
if ((ip->i_d.di_mode & (S_ISUID|S_ISGID)) &&
!capable(CAP_FSETID))
ip->i_d.di_mode &= ~(S_ISUID|S_ISGID);
/*
* Change the ownerships and register quota modifications
* in the transaction.
*/
if (xfs_get_projid(ip) != fa->fsx_projid) {
if (XFS_IS_QUOTA_RUNNING(mp) && XFS_IS_PQUOTA_ON(mp)) {
olddquot = xfs_qm_vop_chown(tp, ip,
&ip->i_gdquot, gdqp);
}
xfs_set_projid(ip, fa->fsx_projid);
/*
* We may have to rev the inode as well as
* the superblock version number since projids didn't
* exist before DINODE_VERSION_2 and SB_VERSION_NLINK.
*/
if (ip->i_d.di_version == 1)
xfs_bump_ino_vers2(tp, ip);
}
}
if (mask & FSX_EXTSIZE)
ip->i_d.di_extsize = fa->fsx_extsize >> mp->m_sb.sb_blocklog;
if (mask & FSX_XFLAGS) {
xfs_set_diflags(ip, fa->fsx_xflags);
xfs_diflags_to_linux(ip);
}
xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_CHG);
xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
XFS_STATS_INC(xs_ig_attrchg);
/*
* If this is a synchronous mount, make sure that the
* transaction goes to disk before returning to the user.
* This is slightly sub-optimal in that truncates require
* two sync transactions instead of one for wsync filesystems.
* One for the truncate and one for the timestamps since we
* don't want to change the timestamps unless we're sure the
* truncate worked. Truncates are less than 1% of the laddis
* mix so this probably isn't worth the trouble to optimize.
*/
if (mp->m_flags & XFS_MOUNT_WSYNC)
xfs_trans_set_sync(tp);
code = xfs_trans_commit(tp, 0);
xfs_iunlock(ip, lock_flags);
/*
* Release any dquot(s) the inode had kept before chown.
*/
xfs_qm_dqrele(olddquot);
xfs_qm_dqrele(udqp);
xfs_qm_dqrele(gdqp);
return code;
error_return:
xfs_qm_dqrele(udqp);
xfs_qm_dqrele(gdqp);
xfs_trans_cancel(tp, 0);
if (lock_flags)
xfs_iunlock(ip, lock_flags);
return code;
}
STATIC int
xfs_ioc_fssetxattr(
xfs_inode_t *ip,
struct file *filp,
void __user *arg)
{
struct fsxattr fa;
unsigned int mask;
if (copy_from_user(&fa, arg, sizeof(fa)))
return -EFAULT;
mask = FSX_XFLAGS | FSX_EXTSIZE | FSX_PROJID;
if (filp->f_flags & (O_NDELAY|O_NONBLOCK))
mask |= FSX_NONBLOCK;
return -xfs_ioctl_setattr(ip, &fa, mask);
}
STATIC int
xfs_ioc_getxflags(
xfs_inode_t *ip,
void __user *arg)
{
unsigned int flags;
flags = xfs_di2lxflags(ip->i_d.di_flags);
if (copy_to_user(arg, &flags, sizeof(flags)))
return -EFAULT;
return 0;
}
STATIC int
xfs_ioc_setxflags(
xfs_inode_t *ip,
struct file *filp,
void __user *arg)
{
struct fsxattr fa;
unsigned int flags;
unsigned int mask;
if (copy_from_user(&flags, arg, sizeof(flags)))
return -EFAULT;
if (flags & ~(FS_IMMUTABLE_FL | FS_APPEND_FL | \
FS_NOATIME_FL | FS_NODUMP_FL | \
FS_SYNC_FL))
return -EOPNOTSUPP;
mask = FSX_XFLAGS;
if (filp->f_flags & (O_NDELAY|O_NONBLOCK))
mask |= FSX_NONBLOCK;
fa.fsx_xflags = xfs_merge_ioc_xflags(flags, xfs_ip2xflags(ip));
return -xfs_ioctl_setattr(ip, &fa, mask);
}
STATIC int
xfs_getbmap_format(void **ap, struct getbmapx *bmv, int *full)
{
struct getbmap __user *base = *ap;
/* copy only getbmap portion (not getbmapx) */
if (copy_to_user(base, bmv, sizeof(struct getbmap)))
return XFS_ERROR(EFAULT);
*ap += sizeof(struct getbmap);
return 0;
}
STATIC int
xfs_ioc_getbmap(
struct xfs_inode *ip,
int ioflags,
unsigned int cmd,
void __user *arg)
{
struct getbmapx bmx;
int error;
if (copy_from_user(&bmx, arg, sizeof(struct getbmapx)))
return -XFS_ERROR(EFAULT);
if (bmx.bmv_count < 2)
return -XFS_ERROR(EINVAL);
bmx.bmv_iflags = (cmd == XFS_IOC_GETBMAPA ? BMV_IF_ATTRFORK : 0);
if (ioflags & IO_INVIS)
bmx.bmv_iflags |= BMV_IF_NO_DMAPI_READ;
error = xfs_getbmap(ip, &bmx, xfs_getbmap_format,
(struct getbmap *)arg+1);
if (error)
return -error;
/* copy back header - only size of getbmap */
if (copy_to_user(arg, &bmx, sizeof(struct getbmap)))
return -XFS_ERROR(EFAULT);
return 0;
}
STATIC int
xfs_getbmapx_format(void **ap, struct getbmapx *bmv, int *full)
{
struct getbmapx __user *base = *ap;
if (copy_to_user(base, bmv, sizeof(struct getbmapx)))
return XFS_ERROR(EFAULT);
*ap += sizeof(struct getbmapx);
return 0;
}
STATIC int
xfs_ioc_getbmapx(
struct xfs_inode *ip,
void __user *arg)
{
struct getbmapx bmx;
int error;
if (copy_from_user(&bmx, arg, sizeof(bmx)))
return -XFS_ERROR(EFAULT);
if (bmx.bmv_count < 2)
return -XFS_ERROR(EINVAL);
if (bmx.bmv_iflags & (~BMV_IF_VALID))
return -XFS_ERROR(EINVAL);
error = xfs_getbmap(ip, &bmx, xfs_getbmapx_format,
(struct getbmapx *)arg+1);
if (error)
return -error;
/* copy back header */
if (copy_to_user(arg, &bmx, sizeof(struct getbmapx)))
return -XFS_ERROR(EFAULT);
return 0;
}
/*
* Note: some of the ioctl's return positive numbers as a
* byte count indicating success, such as readlink_by_handle.
* So we don't "sign flip" like most other routines. This means
* true errors need to be returned as a negative value.
*/
long
xfs_file_ioctl(
struct file *filp,
unsigned int cmd,
unsigned long p)
{
struct inode *inode = filp->f_path.dentry->d_inode;
struct xfs_inode *ip = XFS_I(inode);
struct xfs_mount *mp = ip->i_mount;
void __user *arg = (void __user *)p;
int ioflags = 0;
int error;
if (filp->f_mode & FMODE_NOCMTIME)
ioflags |= IO_INVIS;
trace_xfs_file_ioctl(ip);
switch (cmd) {
case FITRIM:
return xfs_ioc_trim(mp, arg);
case XFS_IOC_ALLOCSP:
case XFS_IOC_FREESP:
case XFS_IOC_RESVSP:
case XFS_IOC_UNRESVSP:
case XFS_IOC_ALLOCSP64:
case XFS_IOC_FREESP64:
case XFS_IOC_RESVSP64:
case XFS_IOC_UNRESVSP64:
case XFS_IOC_ZERO_RANGE: {
xfs_flock64_t bf;
if (copy_from_user(&bf, arg, sizeof(bf)))
return -XFS_ERROR(EFAULT);
return xfs_ioc_space(ip, inode, filp, ioflags, cmd, &bf);
}
case XFS_IOC_DIOINFO: {
struct dioattr da;
xfs_buftarg_t *target =
XFS_IS_REALTIME_INODE(ip) ?
mp->m_rtdev_targp : mp->m_ddev_targp;
da.d_mem = da.d_miniosz = 1 << target->bt_sshift;
da.d_maxiosz = INT_MAX & ~(da.d_miniosz - 1);
if (copy_to_user(arg, &da, sizeof(da)))
return -XFS_ERROR(EFAULT);
return 0;
}
case XFS_IOC_FSBULKSTAT_SINGLE:
case XFS_IOC_FSBULKSTAT:
case XFS_IOC_FSINUMBERS:
return xfs_ioc_bulkstat(mp, cmd, arg);
case XFS_IOC_FSGEOMETRY_V1:
return xfs_ioc_fsgeometry_v1(mp, arg);
case XFS_IOC_FSGEOMETRY:
return xfs_ioc_fsgeometry(mp, arg);
case XFS_IOC_GETVERSION:
return put_user(inode->i_generation, (int __user *)arg);
case XFS_IOC_FSGETXATTR:
return xfs_ioc_fsgetxattr(ip, 0, arg);
case XFS_IOC_FSGETXATTRA:
return xfs_ioc_fsgetxattr(ip, 1, arg);
case XFS_IOC_FSSETXATTR:
return xfs_ioc_fssetxattr(ip, filp, arg);
case XFS_IOC_GETXFLAGS:
return xfs_ioc_getxflags(ip, arg);
case XFS_IOC_SETXFLAGS:
return xfs_ioc_setxflags(ip, filp, arg);
case XFS_IOC_FSSETDM: {
struct fsdmidata dmi;
if (copy_from_user(&dmi, arg, sizeof(dmi)))
return -XFS_ERROR(EFAULT);
error = xfs_set_dmattrs(ip, dmi.fsd_dmevmask,
dmi.fsd_dmstate);
return -error;
}
case XFS_IOC_GETBMAP:
case XFS_IOC_GETBMAPA:
return xfs_ioc_getbmap(ip, ioflags, cmd, arg);
case XFS_IOC_GETBMAPX:
return xfs_ioc_getbmapx(ip, arg);
case XFS_IOC_FD_TO_HANDLE:
case XFS_IOC_PATH_TO_HANDLE:
case XFS_IOC_PATH_TO_FSHANDLE: {
xfs_fsop_handlereq_t hreq;
if (copy_from_user(&hreq, arg, sizeof(hreq)))
return -XFS_ERROR(EFAULT);
return xfs_find_handle(cmd, &hreq);
}
case XFS_IOC_OPEN_BY_HANDLE: {
xfs_fsop_handlereq_t hreq;
if (copy_from_user(&hreq, arg, sizeof(xfs_fsop_handlereq_t)))
return -XFS_ERROR(EFAULT);
return xfs_open_by_handle(filp, &hreq);
}
case XFS_IOC_FSSETDM_BY_HANDLE:
return xfs_fssetdm_by_handle(filp, arg);
case XFS_IOC_READLINK_BY_HANDLE: {
xfs_fsop_handlereq_t hreq;
if (copy_from_user(&hreq, arg, sizeof(xfs_fsop_handlereq_t)))
return -XFS_ERROR(EFAULT);
return xfs_readlink_by_handle(filp, &hreq);
}
case XFS_IOC_ATTRLIST_BY_HANDLE:
return xfs_attrlist_by_handle(filp, arg);
case XFS_IOC_ATTRMULTI_BY_HANDLE:
return xfs_attrmulti_by_handle(filp, arg);
case XFS_IOC_SWAPEXT: {
struct xfs_swapext sxp;
if (copy_from_user(&sxp, arg, sizeof(xfs_swapext_t)))
return -XFS_ERROR(EFAULT);
error = xfs_swapext(&sxp);
return -error;
}
case XFS_IOC_FSCOUNTS: {
xfs_fsop_counts_t out;
error = xfs_fs_counts(mp, &out);
if (error)
return -error;
if (copy_to_user(arg, &out, sizeof(out)))
return -XFS_ERROR(EFAULT);
return 0;
}
case XFS_IOC_SET_RESBLKS: {
xfs_fsop_resblks_t inout;
__uint64_t in;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
if (mp->m_flags & XFS_MOUNT_RDONLY)
return -XFS_ERROR(EROFS);
if (copy_from_user(&inout, arg, sizeof(inout)))
return -XFS_ERROR(EFAULT);
/* input parameter is passed in resblks field of structure */
in = inout.resblks;
error = xfs_reserve_blocks(mp, &in, &inout);
if (error)
return -error;
if (copy_to_user(arg, &inout, sizeof(inout)))
return -XFS_ERROR(EFAULT);
return 0;
}
case XFS_IOC_GET_RESBLKS: {
xfs_fsop_resblks_t out;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
error = xfs_reserve_blocks(mp, NULL, &out);
if (error)
return -error;
if (copy_to_user(arg, &out, sizeof(out)))
return -XFS_ERROR(EFAULT);
return 0;
}
case XFS_IOC_FSGROWFSDATA: {
xfs_growfs_data_t in;
if (copy_from_user(&in, arg, sizeof(in)))
return -XFS_ERROR(EFAULT);
error = xfs_growfs_data(mp, &in);
return -error;
}
case XFS_IOC_FSGROWFSLOG: {
xfs_growfs_log_t in;
if (copy_from_user(&in, arg, sizeof(in)))
return -XFS_ERROR(EFAULT);
error = xfs_growfs_log(mp, &in);
return -error;
}
case XFS_IOC_FSGROWFSRT: {
xfs_growfs_rt_t in;
if (copy_from_user(&in, arg, sizeof(in)))
return -XFS_ERROR(EFAULT);
error = xfs_growfs_rt(mp, &in);
return -error;
}
case XFS_IOC_GOINGDOWN: {
__uint32_t in;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
if (get_user(in, (__uint32_t __user *)arg))
return -XFS_ERROR(EFAULT);
error = xfs_fs_goingdown(mp, in);
return -error;
}
case XFS_IOC_ERROR_INJECTION: {
xfs_error_injection_t in;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
if (copy_from_user(&in, arg, sizeof(in)))
return -XFS_ERROR(EFAULT);
error = xfs_errortag_add(in.errtag, mp);
return -error;
}
case XFS_IOC_ERROR_CLEARALL:
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
error = xfs_errortag_clearall(mp, 1);
return -error;
default:
return -ENOTTY;
}
}
| gpl-2.0 |
juston-li/flo | drivers/gpu/drm/nouveau/nouveau_vm.c | 5350 | 9930 | /*
* Copyright 2010 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include "drmP.h"
#include "nouveau_drv.h"
#include "nouveau_mm.h"
#include "nouveau_vm.h"
void
nouveau_vm_map_at(struct nouveau_vma *vma, u64 delta, struct nouveau_mem *node)
{
struct nouveau_vm *vm = vma->vm;
struct nouveau_mm_node *r;
int big = vma->node->type != vm->spg_shift;
u32 offset = vma->node->offset + (delta >> 12);
u32 bits = vma->node->type - 12;
u32 pde = (offset >> vm->pgt_bits) - vm->fpde;
u32 pte = (offset & ((1 << vm->pgt_bits) - 1)) >> bits;
u32 max = 1 << (vm->pgt_bits - bits);
u32 end, len;
delta = 0;
list_for_each_entry(r, &node->regions, rl_entry) {
u64 phys = (u64)r->offset << 12;
u32 num = r->length >> bits;
while (num) {
struct nouveau_gpuobj *pgt = vm->pgt[pde].obj[big];
end = (pte + num);
if (unlikely(end >= max))
end = max;
len = end - pte;
vm->map(vma, pgt, node, pte, len, phys, delta);
num -= len;
pte += len;
if (unlikely(end >= max)) {
phys += len << (bits + 12);
pde++;
pte = 0;
}
delta += (u64)len << vma->node->type;
}
}
vm->flush(vm);
}
void
nouveau_vm_map(struct nouveau_vma *vma, struct nouveau_mem *node)
{
nouveau_vm_map_at(vma, 0, node);
}
void
nouveau_vm_map_sg(struct nouveau_vma *vma, u64 delta, u64 length,
struct nouveau_mem *mem)
{
struct nouveau_vm *vm = vma->vm;
dma_addr_t *list = mem->pages;
int big = vma->node->type != vm->spg_shift;
u32 offset = vma->node->offset + (delta >> 12);
u32 bits = vma->node->type - 12;
u32 num = length >> vma->node->type;
u32 pde = (offset >> vm->pgt_bits) - vm->fpde;
u32 pte = (offset & ((1 << vm->pgt_bits) - 1)) >> bits;
u32 max = 1 << (vm->pgt_bits - bits);
u32 end, len;
while (num) {
struct nouveau_gpuobj *pgt = vm->pgt[pde].obj[big];
end = (pte + num);
if (unlikely(end >= max))
end = max;
len = end - pte;
vm->map_sg(vma, pgt, mem, pte, len, list);
num -= len;
pte += len;
list += len;
if (unlikely(end >= max)) {
pde++;
pte = 0;
}
}
vm->flush(vm);
}
void
nouveau_vm_unmap_at(struct nouveau_vma *vma, u64 delta, u64 length)
{
struct nouveau_vm *vm = vma->vm;
int big = vma->node->type != vm->spg_shift;
u32 offset = vma->node->offset + (delta >> 12);
u32 bits = vma->node->type - 12;
u32 num = length >> vma->node->type;
u32 pde = (offset >> vm->pgt_bits) - vm->fpde;
u32 pte = (offset & ((1 << vm->pgt_bits) - 1)) >> bits;
u32 max = 1 << (vm->pgt_bits - bits);
u32 end, len;
while (num) {
struct nouveau_gpuobj *pgt = vm->pgt[pde].obj[big];
end = (pte + num);
if (unlikely(end >= max))
end = max;
len = end - pte;
vm->unmap(pgt, pte, len);
num -= len;
pte += len;
if (unlikely(end >= max)) {
pde++;
pte = 0;
}
}
vm->flush(vm);
}
void
nouveau_vm_unmap(struct nouveau_vma *vma)
{
nouveau_vm_unmap_at(vma, 0, (u64)vma->node->length << 12);
}
static void
nouveau_vm_unmap_pgt(struct nouveau_vm *vm, int big, u32 fpde, u32 lpde)
{
struct nouveau_vm_pgd *vpgd;
struct nouveau_vm_pgt *vpgt;
struct nouveau_gpuobj *pgt;
u32 pde;
for (pde = fpde; pde <= lpde; pde++) {
vpgt = &vm->pgt[pde - vm->fpde];
if (--vpgt->refcount[big])
continue;
pgt = vpgt->obj[big];
vpgt->obj[big] = NULL;
list_for_each_entry(vpgd, &vm->pgd_list, head) {
vm->map_pgt(vpgd->obj, pde, vpgt->obj);
}
mutex_unlock(&vm->mm.mutex);
nouveau_gpuobj_ref(NULL, &pgt);
mutex_lock(&vm->mm.mutex);
}
}
static int
nouveau_vm_map_pgt(struct nouveau_vm *vm, u32 pde, u32 type)
{
struct nouveau_vm_pgt *vpgt = &vm->pgt[pde - vm->fpde];
struct nouveau_vm_pgd *vpgd;
struct nouveau_gpuobj *pgt;
int big = (type != vm->spg_shift);
u32 pgt_size;
int ret;
pgt_size = (1 << (vm->pgt_bits + 12)) >> type;
pgt_size *= 8;
mutex_unlock(&vm->mm.mutex);
ret = nouveau_gpuobj_new(vm->dev, NULL, pgt_size, 0x1000,
NVOBJ_FLAG_ZERO_ALLOC, &pgt);
mutex_lock(&vm->mm.mutex);
if (unlikely(ret))
return ret;
/* someone beat us to filling the PDE while we didn't have the lock */
if (unlikely(vpgt->refcount[big]++)) {
mutex_unlock(&vm->mm.mutex);
nouveau_gpuobj_ref(NULL, &pgt);
mutex_lock(&vm->mm.mutex);
return 0;
}
vpgt->obj[big] = pgt;
list_for_each_entry(vpgd, &vm->pgd_list, head) {
vm->map_pgt(vpgd->obj, pde, vpgt->obj);
}
return 0;
}
int
nouveau_vm_get(struct nouveau_vm *vm, u64 size, u32 page_shift,
u32 access, struct nouveau_vma *vma)
{
u32 align = (1 << page_shift) >> 12;
u32 msize = size >> 12;
u32 fpde, lpde, pde;
int ret;
mutex_lock(&vm->mm.mutex);
ret = nouveau_mm_get(&vm->mm, page_shift, msize, 0, align, &vma->node);
if (unlikely(ret != 0)) {
mutex_unlock(&vm->mm.mutex);
return ret;
}
fpde = (vma->node->offset >> vm->pgt_bits);
lpde = (vma->node->offset + vma->node->length - 1) >> vm->pgt_bits;
for (pde = fpde; pde <= lpde; pde++) {
struct nouveau_vm_pgt *vpgt = &vm->pgt[pde - vm->fpde];
int big = (vma->node->type != vm->spg_shift);
if (likely(vpgt->refcount[big])) {
vpgt->refcount[big]++;
continue;
}
ret = nouveau_vm_map_pgt(vm, pde, vma->node->type);
if (ret) {
if (pde != fpde)
nouveau_vm_unmap_pgt(vm, big, fpde, pde - 1);
nouveau_mm_put(&vm->mm, vma->node);
mutex_unlock(&vm->mm.mutex);
vma->node = NULL;
return ret;
}
}
mutex_unlock(&vm->mm.mutex);
vma->vm = vm;
vma->offset = (u64)vma->node->offset << 12;
vma->access = access;
return 0;
}
void
nouveau_vm_put(struct nouveau_vma *vma)
{
struct nouveau_vm *vm = vma->vm;
u32 fpde, lpde;
if (unlikely(vma->node == NULL))
return;
fpde = (vma->node->offset >> vm->pgt_bits);
lpde = (vma->node->offset + vma->node->length - 1) >> vm->pgt_bits;
mutex_lock(&vm->mm.mutex);
nouveau_vm_unmap_pgt(vm, vma->node->type != vm->spg_shift, fpde, lpde);
nouveau_mm_put(&vm->mm, vma->node);
vma->node = NULL;
mutex_unlock(&vm->mm.mutex);
}
int
nouveau_vm_new(struct drm_device *dev, u64 offset, u64 length, u64 mm_offset,
struct nouveau_vm **pvm)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_vm *vm;
u64 mm_length = (offset + length) - mm_offset;
u32 block, pgt_bits;
int ret;
vm = kzalloc(sizeof(*vm), GFP_KERNEL);
if (!vm)
return -ENOMEM;
if (dev_priv->card_type == NV_50) {
vm->map_pgt = nv50_vm_map_pgt;
vm->map = nv50_vm_map;
vm->map_sg = nv50_vm_map_sg;
vm->unmap = nv50_vm_unmap;
vm->flush = nv50_vm_flush;
vm->spg_shift = 12;
vm->lpg_shift = 16;
pgt_bits = 29;
block = (1 << pgt_bits);
if (length < block)
block = length;
} else
if (dev_priv->card_type >= NV_C0) {
vm->map_pgt = nvc0_vm_map_pgt;
vm->map = nvc0_vm_map;
vm->map_sg = nvc0_vm_map_sg;
vm->unmap = nvc0_vm_unmap;
vm->flush = nvc0_vm_flush;
vm->spg_shift = 12;
vm->lpg_shift = 17;
pgt_bits = 27;
block = 4096;
} else {
kfree(vm);
return -ENOSYS;
}
vm->fpde = offset >> pgt_bits;
vm->lpde = (offset + length - 1) >> pgt_bits;
vm->pgt = kcalloc(vm->lpde - vm->fpde + 1, sizeof(*vm->pgt), GFP_KERNEL);
if (!vm->pgt) {
kfree(vm);
return -ENOMEM;
}
INIT_LIST_HEAD(&vm->pgd_list);
vm->dev = dev;
vm->refcount = 1;
vm->pgt_bits = pgt_bits - 12;
ret = nouveau_mm_init(&vm->mm, mm_offset >> 12, mm_length >> 12,
block >> 12);
if (ret) {
kfree(vm);
return ret;
}
*pvm = vm;
return 0;
}
static int
nouveau_vm_link(struct nouveau_vm *vm, struct nouveau_gpuobj *pgd)
{
struct nouveau_vm_pgd *vpgd;
int i;
if (!pgd)
return 0;
vpgd = kzalloc(sizeof(*vpgd), GFP_KERNEL);
if (!vpgd)
return -ENOMEM;
nouveau_gpuobj_ref(pgd, &vpgd->obj);
mutex_lock(&vm->mm.mutex);
for (i = vm->fpde; i <= vm->lpde; i++)
vm->map_pgt(pgd, i, vm->pgt[i - vm->fpde].obj);
list_add(&vpgd->head, &vm->pgd_list);
mutex_unlock(&vm->mm.mutex);
return 0;
}
static void
nouveau_vm_unlink(struct nouveau_vm *vm, struct nouveau_gpuobj *mpgd)
{
struct nouveau_vm_pgd *vpgd, *tmp;
struct nouveau_gpuobj *pgd = NULL;
if (!mpgd)
return;
mutex_lock(&vm->mm.mutex);
list_for_each_entry_safe(vpgd, tmp, &vm->pgd_list, head) {
if (vpgd->obj == mpgd) {
pgd = vpgd->obj;
list_del(&vpgd->head);
kfree(vpgd);
break;
}
}
mutex_unlock(&vm->mm.mutex);
nouveau_gpuobj_ref(NULL, &pgd);
}
static void
nouveau_vm_del(struct nouveau_vm *vm)
{
struct nouveau_vm_pgd *vpgd, *tmp;
list_for_each_entry_safe(vpgd, tmp, &vm->pgd_list, head) {
nouveau_vm_unlink(vm, vpgd->obj);
}
nouveau_mm_fini(&vm->mm);
kfree(vm->pgt);
kfree(vm);
}
int
nouveau_vm_ref(struct nouveau_vm *ref, struct nouveau_vm **ptr,
struct nouveau_gpuobj *pgd)
{
struct nouveau_vm *vm;
int ret;
vm = ref;
if (vm) {
ret = nouveau_vm_link(vm, pgd);
if (ret)
return ret;
vm->refcount++;
}
vm = *ptr;
*ptr = ref;
if (vm) {
nouveau_vm_unlink(vm, pgd);
if (--vm->refcount == 0)
nouveau_vm_del(vm);
}
return 0;
}
| gpl-2.0 |
flar2/ville-bulletproof | drivers/s390/scsi/zfcp_sysfs.c | 5606 | 17160 | /*
* zfcp device driver
*
* sysfs attributes.
*
* Copyright IBM Corporation 2008, 2010
*/
#define KMSG_COMPONENT "zfcp"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/slab.h>
#include "zfcp_ext.h"
#define ZFCP_DEV_ATTR(_feat, _name, _mode, _show, _store) \
struct device_attribute dev_attr_##_feat##_##_name = __ATTR(_name, _mode,\
_show, _store)
#define ZFCP_DEFINE_ATTR(_feat_def, _feat, _name, _format, _value) \
static ssize_t zfcp_sysfs_##_feat##_##_name##_show(struct device *dev, \
struct device_attribute *at,\
char *buf) \
{ \
struct _feat_def *_feat = container_of(dev, struct _feat_def, dev); \
\
return sprintf(buf, _format, _value); \
} \
static ZFCP_DEV_ATTR(_feat, _name, S_IRUGO, \
zfcp_sysfs_##_feat##_##_name##_show, NULL);
#define ZFCP_DEFINE_A_ATTR(_name, _format, _value) \
static ssize_t zfcp_sysfs_adapter_##_name##_show(struct device *dev, \
struct device_attribute *at,\
char *buf) \
{ \
struct ccw_device *cdev = to_ccwdev(dev); \
struct zfcp_adapter *adapter = zfcp_ccw_adapter_by_cdev(cdev); \
int i; \
\
if (!adapter) \
return -ENODEV; \
\
i = sprintf(buf, _format, _value); \
zfcp_ccw_adapter_put(adapter); \
return i; \
} \
static ZFCP_DEV_ATTR(adapter, _name, S_IRUGO, \
zfcp_sysfs_adapter_##_name##_show, NULL);
ZFCP_DEFINE_A_ATTR(status, "0x%08x\n", atomic_read(&adapter->status));
ZFCP_DEFINE_A_ATTR(peer_wwnn, "0x%016llx\n",
(unsigned long long) adapter->peer_wwnn);
ZFCP_DEFINE_A_ATTR(peer_wwpn, "0x%016llx\n",
(unsigned long long) adapter->peer_wwpn);
ZFCP_DEFINE_A_ATTR(peer_d_id, "0x%06x\n", adapter->peer_d_id);
ZFCP_DEFINE_A_ATTR(card_version, "0x%04x\n", adapter->hydra_version);
ZFCP_DEFINE_A_ATTR(lic_version, "0x%08x\n", adapter->fsf_lic_version);
ZFCP_DEFINE_A_ATTR(hardware_version, "0x%08x\n", adapter->hardware_version);
ZFCP_DEFINE_A_ATTR(in_recovery, "%d\n", (atomic_read(&adapter->status) &
ZFCP_STATUS_COMMON_ERP_INUSE) != 0);
ZFCP_DEFINE_ATTR(zfcp_port, port, status, "0x%08x\n",
atomic_read(&port->status));
ZFCP_DEFINE_ATTR(zfcp_port, port, in_recovery, "%d\n",
(atomic_read(&port->status) &
ZFCP_STATUS_COMMON_ERP_INUSE) != 0);
ZFCP_DEFINE_ATTR(zfcp_port, port, access_denied, "%d\n",
(atomic_read(&port->status) &
ZFCP_STATUS_COMMON_ACCESS_DENIED) != 0);
ZFCP_DEFINE_ATTR(zfcp_unit, unit, status, "0x%08x\n",
zfcp_unit_sdev_status(unit));
ZFCP_DEFINE_ATTR(zfcp_unit, unit, in_recovery, "%d\n",
(zfcp_unit_sdev_status(unit) &
ZFCP_STATUS_COMMON_ERP_INUSE) != 0);
ZFCP_DEFINE_ATTR(zfcp_unit, unit, access_denied, "%d\n",
(zfcp_unit_sdev_status(unit) &
ZFCP_STATUS_COMMON_ACCESS_DENIED) != 0);
ZFCP_DEFINE_ATTR(zfcp_unit, unit, access_shared, "%d\n",
(zfcp_unit_sdev_status(unit) &
ZFCP_STATUS_LUN_SHARED) != 0);
ZFCP_DEFINE_ATTR(zfcp_unit, unit, access_readonly, "%d\n",
(zfcp_unit_sdev_status(unit) &
ZFCP_STATUS_LUN_READONLY) != 0);
static ssize_t zfcp_sysfs_port_failed_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct zfcp_port *port = container_of(dev, struct zfcp_port, dev);
if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_ERP_FAILED)
return sprintf(buf, "1\n");
return sprintf(buf, "0\n");
}
static ssize_t zfcp_sysfs_port_failed_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct zfcp_port *port = container_of(dev, struct zfcp_port, dev);
unsigned long val;
if (strict_strtoul(buf, 0, &val) || val != 0)
return -EINVAL;
zfcp_erp_set_port_status(port, ZFCP_STATUS_COMMON_RUNNING);
zfcp_erp_port_reopen(port, ZFCP_STATUS_COMMON_ERP_FAILED, "sypfai2");
zfcp_erp_wait(port->adapter);
return count;
}
static ZFCP_DEV_ATTR(port, failed, S_IWUSR | S_IRUGO,
zfcp_sysfs_port_failed_show,
zfcp_sysfs_port_failed_store);
static ssize_t zfcp_sysfs_unit_failed_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct zfcp_unit *unit = container_of(dev, struct zfcp_unit, dev);
struct scsi_device *sdev;
unsigned int status, failed = 1;
sdev = zfcp_unit_sdev(unit);
if (sdev) {
status = atomic_read(&sdev_to_zfcp(sdev)->status);
failed = status & ZFCP_STATUS_COMMON_ERP_FAILED ? 1 : 0;
scsi_device_put(sdev);
}
return sprintf(buf, "%d\n", failed);
}
static ssize_t zfcp_sysfs_unit_failed_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct zfcp_unit *unit = container_of(dev, struct zfcp_unit, dev);
unsigned long val;
struct scsi_device *sdev;
if (strict_strtoul(buf, 0, &val) || val != 0)
return -EINVAL;
sdev = zfcp_unit_sdev(unit);
if (sdev) {
zfcp_erp_set_lun_status(sdev, ZFCP_STATUS_COMMON_RUNNING);
zfcp_erp_lun_reopen(sdev, ZFCP_STATUS_COMMON_ERP_FAILED,
"syufai2");
zfcp_erp_wait(unit->port->adapter);
} else
zfcp_unit_scsi_scan(unit);
return count;
}
static ZFCP_DEV_ATTR(unit, failed, S_IWUSR | S_IRUGO,
zfcp_sysfs_unit_failed_show,
zfcp_sysfs_unit_failed_store);
static ssize_t zfcp_sysfs_adapter_failed_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct ccw_device *cdev = to_ccwdev(dev);
struct zfcp_adapter *adapter = zfcp_ccw_adapter_by_cdev(cdev);
int i;
if (!adapter)
return -ENODEV;
if (atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_ERP_FAILED)
i = sprintf(buf, "1\n");
else
i = sprintf(buf, "0\n");
zfcp_ccw_adapter_put(adapter);
return i;
}
static ssize_t zfcp_sysfs_adapter_failed_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct ccw_device *cdev = to_ccwdev(dev);
struct zfcp_adapter *adapter = zfcp_ccw_adapter_by_cdev(cdev);
unsigned long val;
int retval = 0;
if (!adapter)
return -ENODEV;
if (strict_strtoul(buf, 0, &val) || val != 0) {
retval = -EINVAL;
goto out;
}
zfcp_erp_set_adapter_status(adapter, ZFCP_STATUS_COMMON_RUNNING);
zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED,
"syafai2");
zfcp_erp_wait(adapter);
out:
zfcp_ccw_adapter_put(adapter);
return retval ? retval : (ssize_t) count;
}
static ZFCP_DEV_ATTR(adapter, failed, S_IWUSR | S_IRUGO,
zfcp_sysfs_adapter_failed_show,
zfcp_sysfs_adapter_failed_store);
static ssize_t zfcp_sysfs_port_rescan_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct ccw_device *cdev = to_ccwdev(dev);
struct zfcp_adapter *adapter = zfcp_ccw_adapter_by_cdev(cdev);
if (!adapter)
return -ENODEV;
/* sync the user-space- with the kernel-invocation of scan_work */
queue_work(adapter->work_queue, &adapter->scan_work);
flush_work(&adapter->scan_work);
zfcp_ccw_adapter_put(adapter);
return (ssize_t) count;
}
static ZFCP_DEV_ATTR(adapter, port_rescan, S_IWUSR, NULL,
zfcp_sysfs_port_rescan_store);
static ssize_t zfcp_sysfs_port_remove_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct ccw_device *cdev = to_ccwdev(dev);
struct zfcp_adapter *adapter = zfcp_ccw_adapter_by_cdev(cdev);
struct zfcp_port *port;
u64 wwpn;
int retval = -EINVAL;
if (!adapter)
return -ENODEV;
if (strict_strtoull(buf, 0, (unsigned long long *) &wwpn))
goto out;
port = zfcp_get_port_by_wwpn(adapter, wwpn);
if (!port)
goto out;
else
retval = 0;
write_lock_irq(&adapter->port_list_lock);
list_del(&port->list);
write_unlock_irq(&adapter->port_list_lock);
put_device(&port->dev);
zfcp_erp_port_shutdown(port, 0, "syprs_1");
zfcp_device_unregister(&port->dev, &zfcp_sysfs_port_attrs);
out:
zfcp_ccw_adapter_put(adapter);
return retval ? retval : (ssize_t) count;
}
static ZFCP_DEV_ATTR(adapter, port_remove, S_IWUSR, NULL,
zfcp_sysfs_port_remove_store);
static struct attribute *zfcp_adapter_attrs[] = {
&dev_attr_adapter_failed.attr,
&dev_attr_adapter_in_recovery.attr,
&dev_attr_adapter_port_remove.attr,
&dev_attr_adapter_port_rescan.attr,
&dev_attr_adapter_peer_wwnn.attr,
&dev_attr_adapter_peer_wwpn.attr,
&dev_attr_adapter_peer_d_id.attr,
&dev_attr_adapter_card_version.attr,
&dev_attr_adapter_lic_version.attr,
&dev_attr_adapter_status.attr,
&dev_attr_adapter_hardware_version.attr,
NULL
};
struct attribute_group zfcp_sysfs_adapter_attrs = {
.attrs = zfcp_adapter_attrs,
};
static ssize_t zfcp_sysfs_unit_add_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct zfcp_port *port = container_of(dev, struct zfcp_port, dev);
u64 fcp_lun;
if (strict_strtoull(buf, 0, (unsigned long long *) &fcp_lun))
return -EINVAL;
if (zfcp_unit_add(port, fcp_lun))
return -EINVAL;
return count;
}
static DEVICE_ATTR(unit_add, S_IWUSR, NULL, zfcp_sysfs_unit_add_store);
static ssize_t zfcp_sysfs_unit_remove_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct zfcp_port *port = container_of(dev, struct zfcp_port, dev);
u64 fcp_lun;
if (strict_strtoull(buf, 0, (unsigned long long *) &fcp_lun))
return -EINVAL;
if (zfcp_unit_remove(port, fcp_lun))
return -EINVAL;
return count;
}
static DEVICE_ATTR(unit_remove, S_IWUSR, NULL, zfcp_sysfs_unit_remove_store);
static struct attribute *zfcp_port_attrs[] = {
&dev_attr_unit_add.attr,
&dev_attr_unit_remove.attr,
&dev_attr_port_failed.attr,
&dev_attr_port_in_recovery.attr,
&dev_attr_port_status.attr,
&dev_attr_port_access_denied.attr,
NULL
};
/**
* zfcp_sysfs_port_attrs - sysfs attributes for all other ports
*/
struct attribute_group zfcp_sysfs_port_attrs = {
.attrs = zfcp_port_attrs,
};
static struct attribute *zfcp_unit_attrs[] = {
&dev_attr_unit_failed.attr,
&dev_attr_unit_in_recovery.attr,
&dev_attr_unit_status.attr,
&dev_attr_unit_access_denied.attr,
&dev_attr_unit_access_shared.attr,
&dev_attr_unit_access_readonly.attr,
NULL
};
struct attribute_group zfcp_sysfs_unit_attrs = {
.attrs = zfcp_unit_attrs,
};
#define ZFCP_DEFINE_LATENCY_ATTR(_name) \
static ssize_t \
zfcp_sysfs_unit_##_name##_latency_show(struct device *dev, \
struct device_attribute *attr, \
char *buf) { \
struct scsi_device *sdev = to_scsi_device(dev); \
struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev); \
struct zfcp_latencies *lat = &zfcp_sdev->latencies; \
struct zfcp_adapter *adapter = zfcp_sdev->port->adapter; \
unsigned long long fsum, fmin, fmax, csum, cmin, cmax, cc; \
\
spin_lock_bh(&lat->lock); \
fsum = lat->_name.fabric.sum * adapter->timer_ticks; \
fmin = lat->_name.fabric.min * adapter->timer_ticks; \
fmax = lat->_name.fabric.max * adapter->timer_ticks; \
csum = lat->_name.channel.sum * adapter->timer_ticks; \
cmin = lat->_name.channel.min * adapter->timer_ticks; \
cmax = lat->_name.channel.max * adapter->timer_ticks; \
cc = lat->_name.counter; \
spin_unlock_bh(&lat->lock); \
\
do_div(fsum, 1000); \
do_div(fmin, 1000); \
do_div(fmax, 1000); \
do_div(csum, 1000); \
do_div(cmin, 1000); \
do_div(cmax, 1000); \
\
return sprintf(buf, "%llu %llu %llu %llu %llu %llu %llu\n", \
fmin, fmax, fsum, cmin, cmax, csum, cc); \
} \
static ssize_t \
zfcp_sysfs_unit_##_name##_latency_store(struct device *dev, \
struct device_attribute *attr, \
const char *buf, size_t count) \
{ \
struct scsi_device *sdev = to_scsi_device(dev); \
struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev); \
struct zfcp_latencies *lat = &zfcp_sdev->latencies; \
unsigned long flags; \
\
spin_lock_irqsave(&lat->lock, flags); \
lat->_name.fabric.sum = 0; \
lat->_name.fabric.min = 0xFFFFFFFF; \
lat->_name.fabric.max = 0; \
lat->_name.channel.sum = 0; \
lat->_name.channel.min = 0xFFFFFFFF; \
lat->_name.channel.max = 0; \
lat->_name.counter = 0; \
spin_unlock_irqrestore(&lat->lock, flags); \
\
return (ssize_t) count; \
} \
static DEVICE_ATTR(_name##_latency, S_IWUSR | S_IRUGO, \
zfcp_sysfs_unit_##_name##_latency_show, \
zfcp_sysfs_unit_##_name##_latency_store);
ZFCP_DEFINE_LATENCY_ATTR(read);
ZFCP_DEFINE_LATENCY_ATTR(write);
ZFCP_DEFINE_LATENCY_ATTR(cmd);
#define ZFCP_DEFINE_SCSI_ATTR(_name, _format, _value) \
static ssize_t zfcp_sysfs_scsi_##_name##_show(struct device *dev, \
struct device_attribute *attr,\
char *buf) \
{ \
struct scsi_device *sdev = to_scsi_device(dev); \
struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev); \
struct zfcp_port *port = zfcp_sdev->port; \
\
return sprintf(buf, _format, _value); \
} \
static DEVICE_ATTR(_name, S_IRUGO, zfcp_sysfs_scsi_##_name##_show, NULL);
ZFCP_DEFINE_SCSI_ATTR(hba_id, "%s\n",
dev_name(&port->adapter->ccw_device->dev));
ZFCP_DEFINE_SCSI_ATTR(wwpn, "0x%016llx\n",
(unsigned long long) port->wwpn);
static ssize_t zfcp_sysfs_scsi_fcp_lun_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct scsi_device *sdev = to_scsi_device(dev);
return sprintf(buf, "0x%016llx\n", zfcp_scsi_dev_lun(sdev));
}
static DEVICE_ATTR(fcp_lun, S_IRUGO, zfcp_sysfs_scsi_fcp_lun_show, NULL);
struct device_attribute *zfcp_sysfs_sdev_attrs[] = {
&dev_attr_fcp_lun,
&dev_attr_wwpn,
&dev_attr_hba_id,
&dev_attr_read_latency,
&dev_attr_write_latency,
&dev_attr_cmd_latency,
NULL
};
static ssize_t zfcp_sysfs_adapter_util_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct Scsi_Host *scsi_host = dev_to_shost(dev);
struct fsf_qtcb_bottom_port *qtcb_port;
struct zfcp_adapter *adapter;
int retval;
adapter = (struct zfcp_adapter *) scsi_host->hostdata[0];
if (!(adapter->adapter_features & FSF_FEATURE_MEASUREMENT_DATA))
return -EOPNOTSUPP;
qtcb_port = kzalloc(sizeof(struct fsf_qtcb_bottom_port), GFP_KERNEL);
if (!qtcb_port)
return -ENOMEM;
retval = zfcp_fsf_exchange_port_data_sync(adapter->qdio, qtcb_port);
if (!retval)
retval = sprintf(buf, "%u %u %u\n", qtcb_port->cp_util,
qtcb_port->cb_util, qtcb_port->a_util);
kfree(qtcb_port);
return retval;
}
static DEVICE_ATTR(utilization, S_IRUGO, zfcp_sysfs_adapter_util_show, NULL);
static int zfcp_sysfs_adapter_ex_config(struct device *dev,
struct fsf_statistics_info *stat_inf)
{
struct Scsi_Host *scsi_host = dev_to_shost(dev);
struct fsf_qtcb_bottom_config *qtcb_config;
struct zfcp_adapter *adapter;
int retval;
adapter = (struct zfcp_adapter *) scsi_host->hostdata[0];
if (!(adapter->adapter_features & FSF_FEATURE_MEASUREMENT_DATA))
return -EOPNOTSUPP;
qtcb_config = kzalloc(sizeof(struct fsf_qtcb_bottom_config),
GFP_KERNEL);
if (!qtcb_config)
return -ENOMEM;
retval = zfcp_fsf_exchange_config_data_sync(adapter->qdio, qtcb_config);
if (!retval)
*stat_inf = qtcb_config->stat_info;
kfree(qtcb_config);
return retval;
}
#define ZFCP_SHOST_ATTR(_name, _format, _arg...) \
static ssize_t zfcp_sysfs_adapter_##_name##_show(struct device *dev, \
struct device_attribute *attr,\
char *buf) \
{ \
struct fsf_statistics_info stat_info; \
int retval; \
\
retval = zfcp_sysfs_adapter_ex_config(dev, &stat_info); \
if (retval) \
return retval; \
\
return sprintf(buf, _format, ## _arg); \
} \
static DEVICE_ATTR(_name, S_IRUGO, zfcp_sysfs_adapter_##_name##_show, NULL);
ZFCP_SHOST_ATTR(requests, "%llu %llu %llu\n",
(unsigned long long) stat_info.input_req,
(unsigned long long) stat_info.output_req,
(unsigned long long) stat_info.control_req);
ZFCP_SHOST_ATTR(megabytes, "%llu %llu\n",
(unsigned long long) stat_info.input_mb,
(unsigned long long) stat_info.output_mb);
ZFCP_SHOST_ATTR(seconds_active, "%llu\n",
(unsigned long long) stat_info.seconds_act);
static ssize_t zfcp_sysfs_adapter_q_full_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct Scsi_Host *scsi_host = class_to_shost(dev);
struct zfcp_qdio *qdio =
((struct zfcp_adapter *) scsi_host->hostdata[0])->qdio;
u64 util;
spin_lock_bh(&qdio->stat_lock);
util = qdio->req_q_util;
spin_unlock_bh(&qdio->stat_lock);
return sprintf(buf, "%d %llu\n", atomic_read(&qdio->req_q_full),
(unsigned long long)util);
}
static DEVICE_ATTR(queue_full, S_IRUGO, zfcp_sysfs_adapter_q_full_show, NULL);
struct device_attribute *zfcp_sysfs_shost_attrs[] = {
&dev_attr_utilization,
&dev_attr_requests,
&dev_attr_megabytes,
&dev_attr_seconds_active,
&dev_attr_queue_full,
NULL
};
| gpl-2.0 |
Nihhaar/android_kernel_xiaomi_mocha | sound/synth/util_mem.c | 9958 | 4663 | /*
* Copyright (C) 2000 Takashi Iwai <tiwai@suse.de>
*
* Generic memory management routines for soundcard memory allocation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/mutex.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <sound/core.h>
#include <sound/util_mem.h>
MODULE_AUTHOR("Takashi Iwai");
MODULE_DESCRIPTION("Generic memory management routines for soundcard memory allocation");
MODULE_LICENSE("GPL");
#define get_memblk(p) list_entry(p, struct snd_util_memblk, list)
/*
* create a new memory manager
*/
struct snd_util_memhdr *
snd_util_memhdr_new(int memsize)
{
struct snd_util_memhdr *hdr;
hdr = kzalloc(sizeof(*hdr), GFP_KERNEL);
if (hdr == NULL)
return NULL;
hdr->size = memsize;
mutex_init(&hdr->block_mutex);
INIT_LIST_HEAD(&hdr->block);
return hdr;
}
/*
* free a memory manager
*/
void snd_util_memhdr_free(struct snd_util_memhdr *hdr)
{
struct list_head *p;
if (!hdr)
return;
/* release all blocks */
while ((p = hdr->block.next) != &hdr->block) {
list_del(p);
kfree(get_memblk(p));
}
kfree(hdr);
}
/*
* allocate a memory block (without mutex)
*/
struct snd_util_memblk *
__snd_util_mem_alloc(struct snd_util_memhdr *hdr, int size)
{
struct snd_util_memblk *blk;
unsigned int units, prev_offset;
struct list_head *p;
if (snd_BUG_ON(!hdr || size <= 0))
return NULL;
/* word alignment */
units = size;
if (units & 1)
units++;
if (units > hdr->size)
return NULL;
/* look for empty block */
prev_offset = 0;
list_for_each(p, &hdr->block) {
blk = get_memblk(p);
if (blk->offset - prev_offset >= units)
goto __found;
prev_offset = blk->offset + blk->size;
}
if (hdr->size - prev_offset < units)
return NULL;
__found:
return __snd_util_memblk_new(hdr, units, p->prev);
}
/*
* create a new memory block with the given size
* the block is linked next to prev
*/
struct snd_util_memblk *
__snd_util_memblk_new(struct snd_util_memhdr *hdr, unsigned int units,
struct list_head *prev)
{
struct snd_util_memblk *blk;
blk = kmalloc(sizeof(struct snd_util_memblk) + hdr->block_extra_size,
GFP_KERNEL);
if (blk == NULL)
return NULL;
if (prev == &hdr->block)
blk->offset = 0;
else {
struct snd_util_memblk *p = get_memblk(prev);
blk->offset = p->offset + p->size;
}
blk->size = units;
list_add(&blk->list, prev);
hdr->nblocks++;
hdr->used += units;
return blk;
}
/*
* allocate a memory block (with mutex)
*/
struct snd_util_memblk *
snd_util_mem_alloc(struct snd_util_memhdr *hdr, int size)
{
struct snd_util_memblk *blk;
mutex_lock(&hdr->block_mutex);
blk = __snd_util_mem_alloc(hdr, size);
mutex_unlock(&hdr->block_mutex);
return blk;
}
/*
* remove the block from linked-list and free resource
* (without mutex)
*/
void
__snd_util_mem_free(struct snd_util_memhdr *hdr, struct snd_util_memblk *blk)
{
list_del(&blk->list);
hdr->nblocks--;
hdr->used -= blk->size;
kfree(blk);
}
/*
* free a memory block (with mutex)
*/
int snd_util_mem_free(struct snd_util_memhdr *hdr, struct snd_util_memblk *blk)
{
if (snd_BUG_ON(!hdr || !blk))
return -EINVAL;
mutex_lock(&hdr->block_mutex);
__snd_util_mem_free(hdr, blk);
mutex_unlock(&hdr->block_mutex);
return 0;
}
/*
* return available memory size
*/
int snd_util_mem_avail(struct snd_util_memhdr *hdr)
{
unsigned int size;
mutex_lock(&hdr->block_mutex);
size = hdr->size - hdr->used;
mutex_unlock(&hdr->block_mutex);
return size;
}
EXPORT_SYMBOL(snd_util_memhdr_new);
EXPORT_SYMBOL(snd_util_memhdr_free);
EXPORT_SYMBOL(snd_util_mem_alloc);
EXPORT_SYMBOL(snd_util_mem_free);
EXPORT_SYMBOL(snd_util_mem_avail);
EXPORT_SYMBOL(__snd_util_mem_alloc);
EXPORT_SYMBOL(__snd_util_mem_free);
EXPORT_SYMBOL(__snd_util_memblk_new);
/*
* INIT part
*/
static int __init alsa_util_mem_init(void)
{
return 0;
}
static void __exit alsa_util_mem_exit(void)
{
}
module_init(alsa_util_mem_init)
module_exit(alsa_util_mem_exit)
| gpl-2.0 |
windyyuan/linux | arch/mn10300/unit-asb2364/leds.c | 12262 | 2692 | /* leds.c: ASB2364 peripheral 7seg LEDs x4 support
*
* Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/kernel.h>
#include <linux/param.h>
#include <linux/init.h>
#include <asm/io.h>
#include <asm/processor.h>
#include <asm/intctl-regs.h>
#include <asm/rtc-regs.h>
#include <unit/leds.h>
#if MN10300_USE_7SEGLEDS
static const u8 asb2364_led_hex_tbl[16] = {
0x80, 0xf2, 0x48, 0x60, 0x32, 0x24, 0x04, 0xf0,
0x00, 0x20, 0x10, 0x06, 0x8c, 0x42, 0x0c, 0x1c
};
static const u32 asb2364_led_chase_tbl[6] = {
~0x02020202, /* top - segA */
~0x04040404, /* right top - segB */
~0x08080808, /* right bottom - segC */
~0x10101010, /* bottom - segD */
~0x20202020, /* left bottom - segE */
~0x40404040, /* left top - segF */
};
static unsigned asb2364_led_chase;
void peripheral_leds7x4_display_dec(unsigned int val, unsigned int points)
{
u32 leds;
leds = asb2364_led_hex_tbl[(val/1000) % 10];
leds <<= 8;
leds |= asb2364_led_hex_tbl[(val/100) % 10];
leds <<= 8;
leds |= asb2364_led_hex_tbl[(val/10) % 10];
leds <<= 8;
leds |= asb2364_led_hex_tbl[val % 10];
leds |= points^0x01010101;
ASB2364_7SEGLEDS = leds;
}
void peripheral_leds7x4_display_hex(unsigned int val, unsigned int points)
{
u32 leds;
leds = asb2364_led_hex_tbl[(val/1000) % 10];
leds <<= 8;
leds |= asb2364_led_hex_tbl[(val/100) % 10];
leds <<= 8;
leds |= asb2364_led_hex_tbl[(val/10) % 10];
leds <<= 8;
leds |= asb2364_led_hex_tbl[val % 10];
leds |= points^0x01010101;
ASB2364_7SEGLEDS = leds;
}
/* display triple horizontal bar and exception code */
void peripheral_leds_display_exception(enum exception_code code)
{
u32 leds;
leds = asb2364_led_hex_tbl[(code/0x100) % 0x10];
leds <<= 8;
leds |= asb2364_led_hex_tbl[(code/0x10) % 0x10];
leds <<= 8;
leds |= asb2364_led_hex_tbl[code % 0x10];
leds |= 0x6d010101;
ASB2364_7SEGLEDS = leds;
}
void peripheral_leds_led_chase(void)
{
ASB2364_7SEGLEDS = asb2364_led_chase_tbl[asb2364_led_chase];
asb2364_led_chase++;
if (asb2364_led_chase >= 6)
asb2364_led_chase = 0;
}
#else /* MN10300_USE_7SEGLEDS */
void peripheral_leds7x4_display_dec(unsigned int val, unsigned int points) { }
void peripheral_leds7x4_display_hex(unsigned int val, unsigned int points) { }
void peripheral_leds_display_exception(enum exception_code code) { }
void peripheral_leds_led_chase(void) { }
#endif /* MN10300_USE_7SEGLEDS */
| gpl-2.0 |
zephiK/android_kernel_moto_shamu_fk | drivers/net/wireless/prism54/isl_38xx.c | 12518 | 8116 | /*
* Copyright (C) 2002 Intersil Americas Inc.
* Copyright (C) 2003-2004 Luis R. Rodriguez <mcgrof@ruslug.rutgers.edu>_
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
#include <linux/module.h>
#include <linux/types.h>
#include <linux/delay.h>
#include <asm/uaccess.h>
#include <asm/io.h>
#include "prismcompat.h"
#include "isl_38xx.h"
#include "islpci_dev.h"
#include "islpci_mgt.h"
/******************************************************************************
Device Interface & Control functions
******************************************************************************/
/**
* isl38xx_disable_interrupts - disable all interrupts
* @device: pci memory base address
*
* Instructs the device to disable all interrupt reporting by asserting
* the IRQ line. New events may still show up in the interrupt identification
* register located at offset %ISL38XX_INT_IDENT_REG.
*/
void
isl38xx_disable_interrupts(void __iomem *device)
{
isl38xx_w32_flush(device, 0x00000000, ISL38XX_INT_EN_REG);
udelay(ISL38XX_WRITEIO_DELAY);
}
void
isl38xx_handle_sleep_request(isl38xx_control_block *control_block,
int *powerstate, void __iomem *device_base)
{
/* device requests to go into sleep mode
* check whether the transmit queues for data and management are empty */
if (isl38xx_in_queue(control_block, ISL38XX_CB_TX_DATA_LQ))
/* data tx queue not empty */
return;
if (isl38xx_in_queue(control_block, ISL38XX_CB_TX_MGMTQ))
/* management tx queue not empty */
return;
/* check also whether received frames are pending */
if (isl38xx_in_queue(control_block, ISL38XX_CB_RX_DATA_LQ))
/* data rx queue not empty */
return;
if (isl38xx_in_queue(control_block, ISL38XX_CB_RX_MGMTQ))
/* management rx queue not empty */
return;
#if VERBOSE > SHOW_ERROR_MESSAGES
DEBUG(SHOW_TRACING, "Device going to sleep mode\n");
#endif
/* all queues are empty, allow the device to go into sleep mode */
*powerstate = ISL38XX_PSM_POWERSAVE_STATE;
/* assert the Sleep interrupt in the Device Interrupt Register */
isl38xx_w32_flush(device_base, ISL38XX_DEV_INT_SLEEP,
ISL38XX_DEV_INT_REG);
udelay(ISL38XX_WRITEIO_DELAY);
}
void
isl38xx_handle_wakeup(isl38xx_control_block *control_block,
int *powerstate, void __iomem *device_base)
{
/* device is in active state, update the powerstate flag */
*powerstate = ISL38XX_PSM_ACTIVE_STATE;
/* now check whether there are frames pending for the card */
if (!isl38xx_in_queue(control_block, ISL38XX_CB_TX_DATA_LQ)
&& !isl38xx_in_queue(control_block, ISL38XX_CB_TX_MGMTQ))
return;
#if VERBOSE > SHOW_ERROR_MESSAGES
DEBUG(SHOW_ANYTHING, "Wake up handler trigger the device\n");
#endif
/* either data or management transmit queue has a frame pending
* trigger the device by setting the Update bit in the Device Int reg */
isl38xx_w32_flush(device_base, ISL38XX_DEV_INT_UPDATE,
ISL38XX_DEV_INT_REG);
udelay(ISL38XX_WRITEIO_DELAY);
}
void
isl38xx_trigger_device(int asleep, void __iomem *device_base)
{
u32 reg;
#if VERBOSE > SHOW_ERROR_MESSAGES
u32 counter = 0;
struct timeval current_time;
DEBUG(SHOW_FUNCTION_CALLS, "isl38xx trigger device\n");
#endif
/* check whether the device is in power save mode */
if (asleep) {
/* device is in powersave, trigger the device for wakeup */
#if VERBOSE > SHOW_ERROR_MESSAGES
do_gettimeofday(¤t_time);
DEBUG(SHOW_TRACING, "%08li.%08li Device wakeup triggered\n",
current_time.tv_sec, (long)current_time.tv_usec);
DEBUG(SHOW_TRACING, "%08li.%08li Device register read %08x\n",
current_time.tv_sec, (long)current_time.tv_usec,
readl(device_base + ISL38XX_CTRL_STAT_REG));
#endif
reg = readl(device_base + ISL38XX_INT_IDENT_REG);
if (reg == 0xabadface) {
#if VERBOSE > SHOW_ERROR_MESSAGES
do_gettimeofday(¤t_time);
DEBUG(SHOW_TRACING,
"%08li.%08li Device register abadface\n",
current_time.tv_sec, (long)current_time.tv_usec);
#endif
/* read the Device Status Register until Sleepmode bit is set */
while (reg = readl(device_base + ISL38XX_CTRL_STAT_REG),
(reg & ISL38XX_CTRL_STAT_SLEEPMODE) == 0) {
udelay(ISL38XX_WRITEIO_DELAY);
#if VERBOSE > SHOW_ERROR_MESSAGES
counter++;
#endif
}
#if VERBOSE > SHOW_ERROR_MESSAGES
DEBUG(SHOW_TRACING,
"%08li.%08li Device register read %08x\n",
current_time.tv_sec, (long)current_time.tv_usec,
readl(device_base + ISL38XX_CTRL_STAT_REG));
do_gettimeofday(¤t_time);
DEBUG(SHOW_TRACING,
"%08li.%08li Device asleep counter %i\n",
current_time.tv_sec, (long)current_time.tv_usec,
counter);
#endif
}
/* assert the Wakeup interrupt in the Device Interrupt Register */
isl38xx_w32_flush(device_base, ISL38XX_DEV_INT_WAKEUP,
ISL38XX_DEV_INT_REG);
#if VERBOSE > SHOW_ERROR_MESSAGES
udelay(ISL38XX_WRITEIO_DELAY);
/* perform another read on the Device Status Register */
reg = readl(device_base + ISL38XX_CTRL_STAT_REG);
do_gettimeofday(¤t_time);
DEBUG(SHOW_TRACING, "%08li.%08li Device register read %08x\n",
current_time.tv_sec, (long)current_time.tv_usec, reg);
#endif
} else {
/* device is (still) awake */
#if VERBOSE > SHOW_ERROR_MESSAGES
DEBUG(SHOW_TRACING, "Device is in active state\n");
#endif
/* trigger the device by setting the Update bit in the Device Int reg */
isl38xx_w32_flush(device_base, ISL38XX_DEV_INT_UPDATE,
ISL38XX_DEV_INT_REG);
}
}
void
isl38xx_interface_reset(void __iomem *device_base, dma_addr_t host_address)
{
#if VERBOSE > SHOW_ERROR_MESSAGES
DEBUG(SHOW_FUNCTION_CALLS, "isl38xx_interface_reset\n");
#endif
/* load the address of the control block in the device */
isl38xx_w32_flush(device_base, host_address, ISL38XX_CTRL_BLK_BASE_REG);
udelay(ISL38XX_WRITEIO_DELAY);
/* set the reset bit in the Device Interrupt Register */
isl38xx_w32_flush(device_base, ISL38XX_DEV_INT_RESET, ISL38XX_DEV_INT_REG);
udelay(ISL38XX_WRITEIO_DELAY);
/* enable the interrupt for detecting initialization */
/* Note: Do not enable other interrupts here. We want the
* device to have come up first 100% before allowing any other
* interrupts. */
isl38xx_w32_flush(device_base, ISL38XX_INT_IDENT_INIT, ISL38XX_INT_EN_REG);
udelay(ISL38XX_WRITEIO_DELAY); /* allow complete full reset */
}
void
isl38xx_enable_common_interrupts(void __iomem *device_base)
{
u32 reg;
reg = ISL38XX_INT_IDENT_UPDATE | ISL38XX_INT_IDENT_SLEEP |
ISL38XX_INT_IDENT_WAKEUP;
isl38xx_w32_flush(device_base, reg, ISL38XX_INT_EN_REG);
udelay(ISL38XX_WRITEIO_DELAY);
}
int
isl38xx_in_queue(isl38xx_control_block *cb, int queue)
{
const s32 delta = (le32_to_cpu(cb->driver_curr_frag[queue]) -
le32_to_cpu(cb->device_curr_frag[queue]));
/* determine the amount of fragments in the queue depending on the type
* of the queue, either transmit or receive */
BUG_ON(delta < 0); /* driver ptr must be ahead of device ptr */
switch (queue) {
/* send queues */
case ISL38XX_CB_TX_MGMTQ:
BUG_ON(delta > ISL38XX_CB_MGMT_QSIZE);
case ISL38XX_CB_TX_DATA_LQ:
case ISL38XX_CB_TX_DATA_HQ:
BUG_ON(delta > ISL38XX_CB_TX_QSIZE);
return delta;
/* receive queues */
case ISL38XX_CB_RX_MGMTQ:
BUG_ON(delta > ISL38XX_CB_MGMT_QSIZE);
return ISL38XX_CB_MGMT_QSIZE - delta;
case ISL38XX_CB_RX_DATA_LQ:
case ISL38XX_CB_RX_DATA_HQ:
BUG_ON(delta > ISL38XX_CB_RX_QSIZE);
return ISL38XX_CB_RX_QSIZE - delta;
}
BUG();
return 0;
}
| gpl-2.0 |
seltaeb/n7100-jb-samsung | sound/pci/echoaudio/layla24_dsp.c | 12518 | 10447 | /****************************************************************************
Copyright Echo Digital Audio Corporation (c) 1998 - 2004
All rights reserved
www.echoaudio.com
This file is part of Echo Digital Audio's generic driver library.
Echo Digital Audio's generic driver library is free software;
you can redistribute it and/or modify it under the terms of
the GNU General Public License as published by the Free Software Foundation.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place - Suite 330, Boston,
MA 02111-1307, USA.
*************************************************************************
Translation from C++ and adaptation for use in ALSA-Driver
were made by Giuliano Pochini <pochini@shiny.it>
****************************************************************************/
static int write_control_reg(struct echoaudio *chip, u32 value, char force);
static int set_input_clock(struct echoaudio *chip, u16 clock);
static int set_professional_spdif(struct echoaudio *chip, char prof);
static int set_digital_mode(struct echoaudio *chip, u8 mode);
static int load_asic_generic(struct echoaudio *chip, u32 cmd, short asic);
static int check_asic_status(struct echoaudio *chip);
static int init_hw(struct echoaudio *chip, u16 device_id, u16 subdevice_id)
{
int err;
DE_INIT(("init_hw() - Layla24\n"));
if (snd_BUG_ON((subdevice_id & 0xfff0) != LAYLA24))
return -ENODEV;
if ((err = init_dsp_comm_page(chip))) {
DE_INIT(("init_hw - could not initialize DSP comm page\n"));
return err;
}
chip->device_id = device_id;
chip->subdevice_id = subdevice_id;
chip->bad_board = TRUE;
chip->has_midi = TRUE;
chip->dsp_code_to_load = FW_LAYLA24_DSP;
chip->input_clock_types =
ECHO_CLOCK_BIT_INTERNAL | ECHO_CLOCK_BIT_SPDIF |
ECHO_CLOCK_BIT_WORD | ECHO_CLOCK_BIT_ADAT;
chip->digital_modes =
ECHOCAPS_HAS_DIGITAL_MODE_SPDIF_RCA |
ECHOCAPS_HAS_DIGITAL_MODE_SPDIF_OPTICAL |
ECHOCAPS_HAS_DIGITAL_MODE_ADAT;
if ((err = load_firmware(chip)) < 0)
return err;
chip->bad_board = FALSE;
if ((err = init_line_levels(chip)) < 0)
return err;
DE_INIT(("init_hw done\n"));
return err;
}
static int set_mixer_defaults(struct echoaudio *chip)
{
chip->digital_mode = DIGITAL_MODE_SPDIF_RCA;
chip->professional_spdif = FALSE;
chip->digital_in_automute = TRUE;
return init_line_levels(chip);
}
static u32 detect_input_clocks(const struct echoaudio *chip)
{
u32 clocks_from_dsp, clock_bits;
/* Map the DSP clock detect bits to the generic driver clock detect bits */
clocks_from_dsp = le32_to_cpu(chip->comm_page->status_clocks);
clock_bits = ECHO_CLOCK_BIT_INTERNAL;
if (clocks_from_dsp & GML_CLOCK_DETECT_BIT_SPDIF)
clock_bits |= ECHO_CLOCK_BIT_SPDIF;
if (clocks_from_dsp & GML_CLOCK_DETECT_BIT_ADAT)
clock_bits |= ECHO_CLOCK_BIT_ADAT;
if (clocks_from_dsp & GML_CLOCK_DETECT_BIT_WORD)
clock_bits |= ECHO_CLOCK_BIT_WORD;
return clock_bits;
}
/* Layla24 has an ASIC on the PCI card and another ASIC in the external box;
both need to be loaded. */
static int load_asic(struct echoaudio *chip)
{
int err;
if (chip->asic_loaded)
return 1;
DE_INIT(("load_asic\n"));
/* Give the DSP a few milliseconds to settle down */
mdelay(10);
/* Load the ASIC for the PCI card */
err = load_asic_generic(chip, DSP_FNC_LOAD_LAYLA24_PCI_CARD_ASIC,
FW_LAYLA24_1_ASIC);
if (err < 0)
return err;
chip->asic_code = FW_LAYLA24_2S_ASIC;
/* Now give the new ASIC a little time to set up */
mdelay(10);
/* Do the external one */
err = load_asic_generic(chip, DSP_FNC_LOAD_LAYLA24_EXTERNAL_ASIC,
FW_LAYLA24_2S_ASIC);
if (err < 0)
return FALSE;
/* Now give the external ASIC a little time to set up */
mdelay(10);
/* See if it worked */
err = check_asic_status(chip);
/* Set up the control register if the load succeeded -
48 kHz, internal clock, S/PDIF RCA mode */
if (!err)
err = write_control_reg(chip, GML_CONVERTER_ENABLE | GML_48KHZ,
TRUE);
DE_INIT(("load_asic() done\n"));
return err;
}
static int set_sample_rate(struct echoaudio *chip, u32 rate)
{
u32 control_reg, clock, base_rate;
if (snd_BUG_ON(rate >= 50000 &&
chip->digital_mode == DIGITAL_MODE_ADAT))
return -EINVAL;
/* Only set the clock for internal mode. */
if (chip->input_clock != ECHO_CLOCK_INTERNAL) {
DE_ACT(("set_sample_rate: Cannot set sample rate - "
"clock not set to CLK_CLOCKININTERNAL\n"));
/* Save the rate anyhow */
chip->comm_page->sample_rate = cpu_to_le32(rate);
chip->sample_rate = rate;
return 0;
}
/* Get the control register & clear the appropriate bits */
control_reg = le32_to_cpu(chip->comm_page->control_register);
control_reg &= GML_CLOCK_CLEAR_MASK & GML_SPDIF_RATE_CLEAR_MASK;
clock = 0;
switch (rate) {
case 96000:
clock = GML_96KHZ;
break;
case 88200:
clock = GML_88KHZ;
break;
case 48000:
clock = GML_48KHZ | GML_SPDIF_SAMPLE_RATE1;
break;
case 44100:
clock = GML_44KHZ;
/* Professional mode */
if (control_reg & GML_SPDIF_PRO_MODE)
clock |= GML_SPDIF_SAMPLE_RATE0;
break;
case 32000:
clock = GML_32KHZ | GML_SPDIF_SAMPLE_RATE0 |
GML_SPDIF_SAMPLE_RATE1;
break;
case 22050:
clock = GML_22KHZ;
break;
case 16000:
clock = GML_16KHZ;
break;
case 11025:
clock = GML_11KHZ;
break;
case 8000:
clock = GML_8KHZ;
break;
default:
/* If this is a non-standard rate, then the driver needs to
use Layla24's special "continuous frequency" mode */
clock = LAYLA24_CONTINUOUS_CLOCK;
if (rate > 50000) {
base_rate = rate >> 1;
control_reg |= GML_DOUBLE_SPEED_MODE;
} else {
base_rate = rate;
}
if (base_rate < 25000)
base_rate = 25000;
if (wait_handshake(chip))
return -EIO;
chip->comm_page->sample_rate =
cpu_to_le32(LAYLA24_MAGIC_NUMBER / base_rate - 2);
clear_handshake(chip);
send_vector(chip, DSP_VC_SET_LAYLA24_FREQUENCY_REG);
}
control_reg |= clock;
chip->comm_page->sample_rate = cpu_to_le32(rate); /* ignored by the DSP ? */
chip->sample_rate = rate;
DE_ACT(("set_sample_rate: %d clock %d\n", rate, control_reg));
return write_control_reg(chip, control_reg, FALSE);
}
static int set_input_clock(struct echoaudio *chip, u16 clock)
{
u32 control_reg, clocks_from_dsp;
/* Mask off the clock select bits */
control_reg = le32_to_cpu(chip->comm_page->control_register) &
GML_CLOCK_CLEAR_MASK;
clocks_from_dsp = le32_to_cpu(chip->comm_page->status_clocks);
/* Pick the new clock */
switch (clock) {
case ECHO_CLOCK_INTERNAL:
DE_ACT(("Set Layla24 clock to INTERNAL\n"));
chip->input_clock = ECHO_CLOCK_INTERNAL;
return set_sample_rate(chip, chip->sample_rate);
case ECHO_CLOCK_SPDIF:
if (chip->digital_mode == DIGITAL_MODE_ADAT)
return -EAGAIN;
control_reg |= GML_SPDIF_CLOCK;
/* Layla24 doesn't support 96KHz S/PDIF */
control_reg &= ~GML_DOUBLE_SPEED_MODE;
DE_ACT(("Set Layla24 clock to SPDIF\n"));
break;
case ECHO_CLOCK_WORD:
control_reg |= GML_WORD_CLOCK;
if (clocks_from_dsp & GML_CLOCK_DETECT_BIT_WORD96)
control_reg |= GML_DOUBLE_SPEED_MODE;
else
control_reg &= ~GML_DOUBLE_SPEED_MODE;
DE_ACT(("Set Layla24 clock to WORD\n"));
break;
case ECHO_CLOCK_ADAT:
if (chip->digital_mode != DIGITAL_MODE_ADAT)
return -EAGAIN;
control_reg |= GML_ADAT_CLOCK;
control_reg &= ~GML_DOUBLE_SPEED_MODE;
DE_ACT(("Set Layla24 clock to ADAT\n"));
break;
default:
DE_ACT(("Input clock 0x%x not supported for Layla24\n", clock));
return -EINVAL;
}
chip->input_clock = clock;
return write_control_reg(chip, control_reg, TRUE);
}
/* Depending on what digital mode you want, Layla24 needs different ASICs
loaded. This function checks the ASIC needed for the new mode and sees
if it matches the one already loaded. */
static int switch_asic(struct echoaudio *chip, short asic)
{
s8 *monitors;
/* Check to see if this is already loaded */
if (asic != chip->asic_code) {
monitors = kmemdup(chip->comm_page->monitors,
MONITOR_ARRAY_SIZE, GFP_KERNEL);
if (! monitors)
return -ENOMEM;
memset(chip->comm_page->monitors, ECHOGAIN_MUTED,
MONITOR_ARRAY_SIZE);
/* Load the desired ASIC */
if (load_asic_generic(chip, DSP_FNC_LOAD_LAYLA24_EXTERNAL_ASIC,
asic) < 0) {
memcpy(chip->comm_page->monitors, monitors,
MONITOR_ARRAY_SIZE);
kfree(monitors);
return -EIO;
}
chip->asic_code = asic;
memcpy(chip->comm_page->monitors, monitors, MONITOR_ARRAY_SIZE);
kfree(monitors);
}
return 0;
}
static int dsp_set_digital_mode(struct echoaudio *chip, u8 mode)
{
u32 control_reg;
int err, incompatible_clock;
short asic;
/* Set clock to "internal" if it's not compatible with the new mode */
incompatible_clock = FALSE;
switch (mode) {
case DIGITAL_MODE_SPDIF_OPTICAL:
case DIGITAL_MODE_SPDIF_RCA:
if (chip->input_clock == ECHO_CLOCK_ADAT)
incompatible_clock = TRUE;
asic = FW_LAYLA24_2S_ASIC;
break;
case DIGITAL_MODE_ADAT:
if (chip->input_clock == ECHO_CLOCK_SPDIF)
incompatible_clock = TRUE;
asic = FW_LAYLA24_2A_ASIC;
break;
default:
DE_ACT(("Digital mode not supported: %d\n", mode));
return -EINVAL;
}
if (incompatible_clock) { /* Switch to 48KHz, internal */
chip->sample_rate = 48000;
spin_lock_irq(&chip->lock);
set_input_clock(chip, ECHO_CLOCK_INTERNAL);
spin_unlock_irq(&chip->lock);
}
/* switch_asic() can sleep */
if (switch_asic(chip, asic) < 0)
return -EIO;
spin_lock_irq(&chip->lock);
/* Tweak the control register */
control_reg = le32_to_cpu(chip->comm_page->control_register);
control_reg &= GML_DIGITAL_MODE_CLEAR_MASK;
switch (mode) {
case DIGITAL_MODE_SPDIF_OPTICAL:
control_reg |= GML_SPDIF_OPTICAL_MODE;
break;
case DIGITAL_MODE_SPDIF_RCA:
/* GML_SPDIF_OPTICAL_MODE bit cleared */
break;
case DIGITAL_MODE_ADAT:
control_reg |= GML_ADAT_MODE;
control_reg &= ~GML_DOUBLE_SPEED_MODE;
break;
}
err = write_control_reg(chip, control_reg, TRUE);
spin_unlock_irq(&chip->lock);
if (err < 0)
return err;
chip->digital_mode = mode;
DE_ACT(("set_digital_mode to %d\n", mode));
return incompatible_clock;
}
| gpl-2.0 |
jiangchao87/m8uhl | arch/powerpc/boot/virtex.c | 14054 | 2919 | /*
* The platform specific code for virtex devices since a boot loader is not
* always used.
*
* (C) Copyright 2008 Xilinx, Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation.
*/
#include "ops.h"
#include "io.h"
#include "stdio.h"
#define UART_DLL 0 /* Out: Divisor Latch Low */
#define UART_DLM 1 /* Out: Divisor Latch High */
#define UART_FCR 2 /* Out: FIFO Control Register */
#define UART_FCR_CLEAR_RCVR 0x02 /* Clear the RCVR FIFO */
#define UART_FCR_CLEAR_XMIT 0x04 /* Clear the XMIT FIFO */
#define UART_LCR 3 /* Out: Line Control Register */
#define UART_MCR 4 /* Out: Modem Control Register */
#define UART_MCR_RTS 0x02 /* RTS complement */
#define UART_MCR_DTR 0x01 /* DTR complement */
#define UART_LCR_DLAB 0x80 /* Divisor latch access bit */
#define UART_LCR_WLEN8 0x03 /* Wordlength: 8 bits */
static int virtex_ns16550_console_init(void *devp)
{
unsigned char *reg_base;
u32 reg_shift, reg_offset, clk, spd;
u16 divisor;
int n;
if (dt_get_virtual_reg(devp, (void **)®_base, 1) < 1)
return -1;
n = getprop(devp, "reg-offset", ®_offset, sizeof(reg_offset));
if (n == sizeof(reg_offset))
reg_base += reg_offset;
n = getprop(devp, "reg-shift", ®_shift, sizeof(reg_shift));
if (n != sizeof(reg_shift))
reg_shift = 0;
n = getprop(devp, "current-speed", (void *)&spd, sizeof(spd));
if (n != sizeof(spd))
spd = 9600;
/* should there be a default clock rate?*/
n = getprop(devp, "clock-frequency", (void *)&clk, sizeof(clk));
if (n != sizeof(clk))
return -1;
divisor = clk / (16 * spd);
/* Access baud rate */
out_8(reg_base + (UART_LCR << reg_shift), UART_LCR_DLAB);
/* Baud rate based on input clock */
out_8(reg_base + (UART_DLL << reg_shift), divisor & 0xFF);
out_8(reg_base + (UART_DLM << reg_shift), divisor >> 8);
/* 8 data, 1 stop, no parity */
out_8(reg_base + (UART_LCR << reg_shift), UART_LCR_WLEN8);
/* RTS/DTR */
out_8(reg_base + (UART_MCR << reg_shift), UART_MCR_RTS | UART_MCR_DTR);
/* Clear transmitter and receiver */
out_8(reg_base + (UART_FCR << reg_shift),
UART_FCR_CLEAR_XMIT | UART_FCR_CLEAR_RCVR);
return 0;
}
/* For virtex, the kernel may be loaded without using a bootloader and if so
some UARTs need more setup than is provided in the normal console init
*/
int platform_specific_init(void)
{
void *devp;
char devtype[MAX_PROP_LEN];
char path[MAX_PATH_LEN];
devp = finddevice("/chosen");
if (devp == NULL)
return -1;
if (getprop(devp, "linux,stdout-path", path, MAX_PATH_LEN) > 0) {
devp = finddevice(path);
if (devp == NULL)
return -1;
if ((getprop(devp, "device_type", devtype, sizeof(devtype)) > 0)
&& !strcmp(devtype, "serial")
&& (dt_is_compatible(devp, "ns16550")))
virtex_ns16550_console_init(devp);
}
return 0;
}
| gpl-2.0 |
hiikezoe/android_kernel_fujitsu_f11d | drivers/parisc/lasi.c | 14566 | 6308 | /*
* LASI Device Driver
*
* (c) Copyright 1999 Red Hat Software
* Portions (c) Copyright 1999 The Puffin Group Inc.
* Portions (c) Copyright 1999 Hewlett-Packard
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* by Alan Cox <alan@redhat.com> and
* Alex deVries <alex@onefishtwo.ca>
*/
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/pm.h>
#include <linux/types.h>
#include <asm/io.h>
#include <asm/hardware.h>
#include <asm/led.h>
#include "gsc.h"
#define LASI_VER 0xC008 /* LASI Version */
#define LASI_IO_CONF 0x7FFFE /* LASI primary configuration register */
#define LASI_IO_CONF2 0x7FFFF /* LASI secondary configuration register */
static void lasi_choose_irq(struct parisc_device *dev, void *ctrl)
{
int irq;
switch (dev->id.sversion) {
case 0x74: irq = 7; break; /* Centronics */
case 0x7B: irq = 13; break; /* Audio */
case 0x81: irq = 14; break; /* Lasi itself */
case 0x82: irq = 9; break; /* SCSI */
case 0x83: irq = 20; break; /* Floppy */
case 0x84: irq = 26; break; /* PS/2 Keyboard */
case 0x87: irq = 18; break; /* ISDN */
case 0x8A: irq = 8; break; /* LAN */
case 0x8C: irq = 5; break; /* RS232 */
case 0x8D: irq = (dev->hw_path == 13) ? 16 : 17; break;
/* Telephone */
default: return; /* unknown */
}
gsc_asic_assign_irq(ctrl, irq, &dev->irq);
}
static void __init
lasi_init_irq(struct gsc_asic *this_lasi)
{
unsigned long lasi_base = this_lasi->hpa;
/* Stop LASI barking for a bit */
gsc_writel(0x00000000, lasi_base+OFFSET_IMR);
/* clear pending interrupts */
gsc_readl(lasi_base+OFFSET_IRR);
/* We're not really convinced we want to reset the onboard
* devices. Firmware does it for us...
*/
/* Resets */
/* gsc_writel(0xFFFFFFFF, lasi_base+0x2000);*/ /* Parallel */
if(pdc_add_valid(lasi_base+0x4004) == PDC_OK)
gsc_writel(0xFFFFFFFF, lasi_base+0x4004); /* Audio */
/* gsc_writel(0xFFFFFFFF, lasi_base+0x5000);*/ /* Serial */
/* gsc_writel(0xFFFFFFFF, lasi_base+0x6000);*/ /* SCSI */
gsc_writel(0xFFFFFFFF, lasi_base+0x7000); /* LAN */
gsc_writel(0xFFFFFFFF, lasi_base+0x8000); /* Keyboard */
gsc_writel(0xFFFFFFFF, lasi_base+0xA000); /* FDC */
/* Ok we hit it on the head with a hammer, our Dog is now
** comatose and muzzled. Devices will now unmask LASI
** interrupts as they are registered as irq's in the LASI range.
*/
/* XXX: I thought it was `awks that got `it on the `ead with an
* `ammer. -- willy
*/
}
/*
** lasi_led_init()
**
** lasi_led_init() initializes the LED controller on the LASI.
**
** Since Mirage and Electra machines use a different LED
** address register, we need to check for these machines
** explicitly.
*/
#ifndef CONFIG_CHASSIS_LCD_LED
#define lasi_led_init(x) /* nothing */
#else
static void __init lasi_led_init(unsigned long lasi_hpa)
{
unsigned long datareg;
switch (CPU_HVERSION) {
/* Gecko machines have only one single LED, which can be permanently
turned on by writing a zero into the power control register. */
case 0x600: /* Gecko (712/60) */
case 0x601: /* Gecko (712/80) */
case 0x602: /* Gecko (712/100) */
case 0x603: /* Anole 64 (743/64) */
case 0x604: /* Anole 100 (743/100) */
case 0x605: /* Gecko (712/120) */
datareg = lasi_hpa + 0x0000C000;
gsc_writeb(0, datareg);
return; /* no need to register the LED interrupt-function */
/* Mirage and Electra machines need special offsets */
case 0x60A: /* Mirage Jr (715/64) */
case 0x60B: /* Mirage 100 */
case 0x60C: /* Mirage 100+ */
case 0x60D: /* Electra 100 */
case 0x60E: /* Electra 120 */
datareg = lasi_hpa - 0x00020000;
break;
default:
datareg = lasi_hpa + 0x0000C000;
break;
}
register_led_driver(DISPLAY_MODEL_LASI, LED_CMD_REG_NONE, datareg);
}
#endif
/*
* lasi_power_off
*
* Function for lasi to turn off the power. This is accomplished by setting a
* 1 to PWR_ON_L in the Power Control Register
*
*/
static unsigned long lasi_power_off_hpa __read_mostly;
static void lasi_power_off(void)
{
unsigned long datareg;
/* calculate addr of the Power Control Register */
datareg = lasi_power_off_hpa + 0x0000C000;
/* Power down the machine */
gsc_writel(0x02, datareg);
}
static int __init lasi_init_chip(struct parisc_device *dev)
{
extern void (*chassis_power_off)(void);
struct gsc_asic *lasi;
struct gsc_irq gsc_irq;
int ret;
lasi = kzalloc(sizeof(*lasi), GFP_KERNEL);
if (!lasi)
return -ENOMEM;
lasi->name = "Lasi";
lasi->hpa = dev->hpa.start;
/* Check the 4-bit (yes, only 4) version register */
lasi->version = gsc_readl(lasi->hpa + LASI_VER) & 0xf;
printk(KERN_INFO "%s version %d at 0x%lx found.\n",
lasi->name, lasi->version, lasi->hpa);
/* initialize the chassis LEDs really early */
lasi_led_init(lasi->hpa);
/* Stop LASI barking for a bit */
lasi_init_irq(lasi);
/* the IRQ lasi should use */
dev->irq = gsc_alloc_irq(&gsc_irq);
if (dev->irq < 0) {
printk(KERN_ERR "%s(): cannot get GSC irq\n",
__func__);
kfree(lasi);
return -EBUSY;
}
lasi->eim = ((u32) gsc_irq.txn_addr) | gsc_irq.txn_data;
ret = request_irq(gsc_irq.irq, gsc_asic_intr, 0, "lasi", lasi);
if (ret < 0) {
kfree(lasi);
return ret;
}
/* enable IRQ's for devices below LASI */
gsc_writel(lasi->eim, lasi->hpa + OFFSET_IAR);
/* Done init'ing, register this driver */
ret = gsc_common_setup(dev, lasi);
if (ret) {
kfree(lasi);
return ret;
}
gsc_fixup_irqs(dev, lasi, lasi_choose_irq);
/* initialize the power off function */
/* FIXME: Record the LASI HPA for the power off function. This should
* ensure that only the first LASI (the one controlling the power off)
* should set the HPA here */
lasi_power_off_hpa = lasi->hpa;
chassis_power_off = lasi_power_off;
return ret;
}
static struct parisc_device_id lasi_tbl[] = {
{ HPHW_BA, HVERSION_REV_ANY_ID, HVERSION_ANY_ID, 0x00081 },
{ 0, }
};
struct parisc_driver lasi_driver = {
.name = "lasi",
.id_table = lasi_tbl,
.probe = lasi_init_chip,
};
| gpl-2.0 |
ghbhaha/android_kernel_oneplus_msm8974 | drivers/staging/prima/CORE/SME/src/pmc/pmcApi.c | 231 | 125704 | /*
* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all
* copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
* WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
* AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
* PERFORMANCE OF THIS SOFTWARE.
*/
/******************************************************************************
*
* Name: pmcApi.c
*
* Description: Routines that make up the Power Management Control (PMC) API.
*
******************************************************************************/
#include "palTypes.h"
#include "aniGlobal.h"
#include "palTimer.h"
#include "csrLinkList.h"
#include "smsDebug.h"
#include "pmcApi.h"
#include "pmc.h"
#include "cfgApi.h"
#include "smeInside.h"
#include "csrInsideApi.h"
#include "wlan_ps_wow_diag.h"
#include "wlan_qct_wda.h"
#include "limSessionUtils.h"
#include "csrInsideApi.h"
extern void pmcReleaseCommand( tpAniSirGlobal pMac, tSmeCmd *pCommand );
void pmcCloseDeferredMsgList(tpAniSirGlobal pMac);
void pmcCloseDeviceStateUpdateList(tpAniSirGlobal pMac);
void pmcCloseRequestStartUapsdList(tpAniSirGlobal pMac);
void pmcCloseRequestBmpsList(tpAniSirGlobal pMac);
void pmcCloseRequestFullPowerList(tpAniSirGlobal pMac);
void pmcClosePowerSaveCheckList(tpAniSirGlobal pMac);
/******************************************************************************
*
* Name: pmcOpen
*
* Description:
* Does a PMC open operation on the device.
*
* Parameters:
* hHal - HAL handle for device
*
* Returns:
* eHAL_STATUS_SUCCESS - open successful
* eHAL_STATUS_FAILURE - open not successful
*
******************************************************************************/
eHalStatus pmcOpen (tHalHandle hHal)
{
tpAniSirGlobal pMac = PMAC_STRUCT(hHal);
pmcLog(pMac, LOG2, FL("Entering pmcOpen"));
/* Initialize basic PMC information about device. */
pMac->pmc.powerSource = BATTERY_POWER;
pMac->pmc.pmcState = STOPPED;
pMac->pmc.pmcReady = FALSE;
/* Initialize Power Save Modes */
pMac->pmc.impsEnabled = FALSE;
pMac->pmc.autoBmpsEntryEnabled = FALSE;
pMac->pmc.smpsEnabled = FALSE;
pMac->pmc.uapsdEnabled = TRUE;
pMac->pmc.bmpsEnabled = TRUE;
pMac->pmc.standbyEnabled = TRUE;
pMac->pmc.wowlEnabled = TRUE;
pMac->pmc.rfSuppliesVotedOff= FALSE;
palZeroMemory(pMac->hHdd, &(pMac->pmc.bmpsConfig), sizeof(tPmcBmpsConfigParams));
palZeroMemory(pMac->hHdd, &(pMac->pmc.impsConfig), sizeof(tPmcImpsConfigParams));
palZeroMemory(pMac->hHdd, &(pMac->pmc.smpsConfig), sizeof(tPmcSmpsConfigParams));
/* Allocate a timer to use with IMPS. */
if (vos_timer_init(&pMac->pmc.hImpsTimer, VOS_TIMER_TYPE_SW, pmcImpsTimerExpired, hHal) != VOS_STATUS_SUCCESS)
{
pmcLog(pMac, LOGE, FL("Cannot allocate timer for IMPS"));
return eHAL_STATUS_FAILURE;
}
/* Allocate a timer used in Full Power State to measure traffic
levels and determine when to enter BMPS. */
if (!VOS_IS_STATUS_SUCCESS(vos_timer_init(&pMac->pmc.hTrafficTimer,
VOS_TIMER_TYPE_SW, pmcTrafficTimerExpired, hHal)))
{
pmcLog(pMac, LOGE, FL("Cannot allocate timer for traffic measurement"));
return eHAL_STATUS_FAILURE;
}
#ifdef FEATURE_WLAN_DIAG_SUPPORT
/* Allocate a timer used to report current PMC state through periodic DIAG event */
if (vos_timer_init(&pMac->pmc.hDiagEvtTimer, VOS_TIMER_TYPE_SW, pmcDiagEvtTimerExpired, hHal) != VOS_STATUS_SUCCESS)
{
pmcLog(pMac, LOGE, FL("Cannot allocate timer for diag event reporting"));
return eHAL_STATUS_FAILURE;
}
#endif
//Initialize the default value for Bmps related config.
pMac->pmc.bmpsConfig.trafficMeasurePeriod = BMPS_TRAFFIC_TIMER_DEFAULT;
pMac->pmc.bmpsConfig.bmpsPeriod = WNI_CFG_LISTEN_INTERVAL_STADEF;
/* Allocate a timer used to schedule a deferred power save mode exit. */
if (vos_timer_init(&pMac->pmc.hExitPowerSaveTimer, VOS_TIMER_TYPE_SW,
pmcExitPowerSaveTimerExpired, hHal) !=VOS_STATUS_SUCCESS)
{
pmcLog(pMac, LOGE, FL("Cannot allocate exit power save mode timer"));
PMC_ABORT;
return eHAL_STATUS_FAILURE;
}
/* Initialize lists for power save check routines and request full power callback routines. */
if (csrLLOpen(pMac->hHdd, &pMac->pmc.powerSaveCheckList) != eHAL_STATUS_SUCCESS)
{
pmcLog(pMac, LOGE, FL("Cannot initialize power save check routine list"));
PMC_ABORT;
return eHAL_STATUS_FAILURE;
}
if (csrLLOpen(pMac->hHdd, &pMac->pmc.requestFullPowerList) != eHAL_STATUS_SUCCESS)
{
pmcLog(pMac, LOGE, FL("Cannot initialize request full power callback routine list"));
PMC_ABORT;
return eHAL_STATUS_FAILURE;
}
/* Initialize lists for request BMPS callback routines. */
if (csrLLOpen(pMac->hHdd, &pMac->pmc.requestBmpsList) !=
eHAL_STATUS_SUCCESS)
{
pmcLog(pMac, LOGE, "PMC: cannot initialize request BMPS callback routine list");
return eHAL_STATUS_FAILURE;
}
/* Initialize lists for request start UAPSD callback routines. */
if (csrLLOpen(pMac->hHdd, &pMac->pmc.requestStartUapsdList) != eHAL_STATUS_SUCCESS)
{
pmcLog(pMac, LOGE, "PMC: cannot initialize request start UAPSD callback routine list");
return eHAL_STATUS_FAILURE;
}
/* Initialize lists for device state update indication callback routines. */
if (csrLLOpen(pMac->hHdd, &pMac->pmc.deviceStateUpdateIndList) != eHAL_STATUS_SUCCESS)
{
pmcLog(pMac, LOGE, "PMC: cannot initialize device state update indication callback list");
return eHAL_STATUS_FAILURE;
}
if (csrLLOpen(pMac->hHdd, &pMac->pmc.deferredMsgList) != eHAL_STATUS_SUCCESS)
{
pmcLog(pMac, LOGE, FL("Cannot initialize deferred msg list"));
PMC_ABORT;
return eHAL_STATUS_FAILURE;
}
return eHAL_STATUS_SUCCESS;
}
/******************************************************************************
*
* Name: pmcStart
*
* Description:
* Does a PMC start operation on the device.
*
* Parameters:
* hHal - HAL handle for device
*
* Returns:
* eHAL_STATUS_SUCCESS - start successful
* eHAL_STATUS_FAILURE - start not successful
*
******************************************************************************/
eHalStatus pmcStart (tHalHandle hHal)
{
tpAniSirGlobal pMac = PMAC_STRUCT(hHal);
tSirMacHTMIMOPowerSaveState htMimoPowerSaveState;
pmcLog(pMac, LOG2, FL("Entering pmcStart"));
/* Initialize basic PMC information about device. */
pMac->pmc.pmcState = FULL_POWER;
pMac->pmc.requestFullPowerPending = FALSE;
pMac->pmc.uapsdSessionRequired = FALSE;
pMac->pmc.wowlModeRequired = FALSE;
pMac->pmc.bmpsRequestedByHdd = FALSE;
pMac->pmc.remainInPowerActiveTillDHCP = FALSE;
pMac->pmc.remainInPowerActiveThreshold = 0;
/* WLAN Switch initial states. */
pMac->pmc.hwWlanSwitchState = ePMC_SWITCH_ON;
pMac->pmc.swWlanSwitchState = ePMC_SWITCH_ON;
/* No IMPS callback routine yet. */
pMac->pmc.impsCallbackRoutine = NULL;
/* No STANDBY callback routine yet. */
pMac->pmc.standbyCallbackRoutine = NULL;
/* No WOWL callback routine yet. */
pMac->pmc.enterWowlCallbackRoutine = NULL;
/* Initialize BMPS traffic counts. */
pMac->pmc.cLastTxUnicastFrames = 0;
pMac->pmc.cLastRxUnicastFrames = 0;
pMac->pmc.ImpsReqFailed = VOS_FALSE;
pMac->pmc.ImpsReqFailCnt = 0;
pMac->pmc.ImpsReqTimerFailed = 0;
pMac->pmc.ImpsReqTimerfailCnt = 0;
/* Configure SMPS. */
if (pMac->pmc.smpsEnabled && (pMac->pmc.powerSource != AC_POWER || pMac->pmc.smpsConfig.enterOnAc))
{
if (pMac->pmc.smpsConfig.mode == ePMC_DYNAMIC_SMPS)
htMimoPowerSaveState = eSIR_HT_MIMO_PS_DYNAMIC;
if (pMac->pmc.smpsConfig.mode == ePMC_STATIC_SMPS)
htMimoPowerSaveState = eSIR_HT_MIMO_PS_STATIC;
}
else
htMimoPowerSaveState = eSIR_HT_MIMO_PS_NO_LIMIT;
if (pmcSendMessage(hHal, eWNI_PMC_SMPS_STATE_IND, &htMimoPowerSaveState,
sizeof(tSirMacHTMIMOPowerSaveState)) != eHAL_STATUS_SUCCESS)
return eHAL_STATUS_FAILURE;
#ifdef FEATURE_WLAN_DIAG_SUPPORT
if (pmcStartDiagEvtTimer(hHal) != eHAL_STATUS_SUCCESS)
{
return eHAL_STATUS_FAILURE;
}
#endif
#if defined(ANI_LOGDUMP)
pmcDumpInit(hHal);
#endif
return eHAL_STATUS_SUCCESS;
}
/******************************************************************************
*
* Name: pmcStop
*
* Description:
* Does a PMC stop operation on the device.
*
* Parameters:
* hHal - HAL handle for device
*
* Returns:
* eHAL_STATUS_SUCCESS - stop successful
* eHAL_STATUS_FAILURE - stop not successful
*
******************************************************************************/
eHalStatus pmcStop (tHalHandle hHal)
{
tpAniSirGlobal pMac = PMAC_STRUCT(hHal);
tListElem *pEntry;
tPmcDeferredMsg *pDeferredMsg;
pmcLog(pMac, LOG2, FL("Entering pmcStop"));
/* Cancel any running timers. */
if (vos_timer_stop(&pMac->pmc.hImpsTimer) != VOS_STATUS_SUCCESS)
{
pmcLog(pMac, LOGE, FL("Cannot cancel IMPS timer"));
}
pmcStopTrafficTimer(hHal);
#ifdef FEATURE_WLAN_DIAG_SUPPORT
pmcStopDiagEvtTimer(hHal);
#endif
if (vos_timer_stop(&pMac->pmc.hExitPowerSaveTimer) != VOS_STATUS_SUCCESS)
{
pmcLog(pMac, LOGE, FL("Cannot cancel exit power save mode timer"));
}
/* Do all the callbacks. */
pmcDoCallbacks(hHal, eHAL_STATUS_FAILURE);
pmcDoBmpsCallbacks(hHal, eHAL_STATUS_FAILURE);
pMac->pmc.uapsdSessionRequired = FALSE;
pmcDoStartUapsdCallbacks(hHal, eHAL_STATUS_FAILURE);
pmcDoStandbyCallbacks(hHal, eHAL_STATUS_FAILURE);
//purge the deferred msg list
csrLLLock( &pMac->pmc.deferredMsgList );
while( NULL != ( pEntry = csrLLRemoveHead( &pMac->pmc.deferredMsgList, eANI_BOOLEAN_FALSE ) ) )
{
pDeferredMsg = GET_BASE_ADDR( pEntry, tPmcDeferredMsg, link );
palFreeMemory( pMac->hHdd, pDeferredMsg );
}
csrLLUnlock( &pMac->pmc.deferredMsgList );
/* PMC is stopped. */
pMac->pmc.pmcState = STOPPED;
pMac->pmc.pmcReady = FALSE;
return eHAL_STATUS_SUCCESS;
}
/******************************************************************************
*
* Name: pmcClose
*
* Description:
* Does a PMC close operation on the device.
*
* Parameters:
* hHal - HAL handle for device
*
* Returns:
* eHAL_STATUS_SUCCESS - close successful
* eHAL_STATUS_FAILURE - close not successful
*
******************************************************************************/
eHalStatus pmcClose (tHalHandle hHal)
{
tpAniSirGlobal pMac = PMAC_STRUCT(hHal);
pmcLog(pMac, LOG2, FL("Entering pmcClose"));
/* Free up allocated resources. */
if (vos_timer_destroy(&pMac->pmc.hImpsTimer) != VOS_STATUS_SUCCESS)
{
pmcLog(pMac, LOGE, FL("Cannot deallocate IMPS timer"));
}
if (!VOS_IS_STATUS_SUCCESS(vos_timer_destroy(&pMac->pmc.hTrafficTimer)))
{
pmcLog(pMac, LOGE, FL("Cannot deallocate traffic timer"));
}
#ifdef FEATURE_WLAN_DIAG_SUPPORT
if (vos_timer_destroy(&pMac->pmc.hDiagEvtTimer) != VOS_STATUS_SUCCESS)
{
pmcLog(pMac, LOGE, FL("Cannot deallocate timer for diag event reporting"));
}
#endif
if (vos_timer_destroy(&pMac->pmc.hExitPowerSaveTimer) != VOS_STATUS_SUCCESS)
{
pmcLog(pMac, LOGE, FL("Cannot deallocate exit power save mode timer"));
}
/*
The following list's entries are dynamically allocated so they need their own
cleanup function
*/
pmcClosePowerSaveCheckList(pMac);
pmcCloseRequestFullPowerList(pMac);
pmcCloseRequestBmpsList(pMac);
pmcCloseRequestStartUapsdList(pMac);
pmcCloseDeviceStateUpdateList(pMac);
pmcCloseDeferredMsgList(pMac);
return eHAL_STATUS_SUCCESS;
}
/******************************************************************************
*
* Name: pmcSignalPowerEvent
*
* Description:
* Signals to PMC that a power event has occurred.
*
* Parameters:
* hHal - HAL handle for device
* event - the event that has occurred
*
* Returns:
* eHAL_STATUS_SUCCESS - signaling successful
* eHAL_STATUS_FAILURE - signaling not successful
*
******************************************************************************/
eHalStatus pmcSignalPowerEvent (tHalHandle hHal, tPmcPowerEvent event)
{
tpAniSirGlobal pMac = PMAC_STRUCT(hHal);
#ifndef GEN6_ONWARDS
tSirMacHTMIMOPowerSaveState htMimoPowerSaveState;
#endif
pmcLog(pMac, LOG2, FL("Entering pmcSignalPowerEvent, event %d"), event);
/* Take action based on the event being signaled. */
switch (event)
{
#ifndef GEN6_ONWARDS
case ePMC_SYSTEM_HIBERNATE:
return pmcEnterLowPowerState(hHal);
case ePMC_SYSTEM_RESUME:
return pmcExitLowPowerState(hHal);
case ePMC_HW_WLAN_SWITCH_OFF:
pMac->pmc.hwWlanSwitchState = ePMC_SWITCH_OFF;
return pmcEnterLowPowerState(hHal);
case ePMC_HW_WLAN_SWITCH_ON:
pMac->pmc.hwWlanSwitchState = ePMC_SWITCH_ON;
return pmcExitLowPowerState(hHal);
case ePMC_SW_WLAN_SWITCH_OFF:
pMac->pmc.swWlanSwitchState = ePMC_SWITCH_OFF;
return pmcEnterLowPowerState(hHal);
case ePMC_SW_WLAN_SWITCH_ON:
pMac->pmc.swWlanSwitchState = ePMC_SWITCH_ON;
return pmcExitLowPowerState(hHal);
case ePMC_BATTERY_OPERATION:
pMac->pmc.powerSource = BATTERY_POWER;
/* Turn on SMPS. */
if (pMac->pmc.smpsEnabled)
{
if (pMac->pmc.smpsConfig.mode == ePMC_DYNAMIC_SMPS)
htMimoPowerSaveState = eSIR_HT_MIMO_PS_DYNAMIC;
if (pMac->pmc.smpsConfig.mode == ePMC_STATIC_SMPS)
htMimoPowerSaveState = eSIR_HT_MIMO_PS_STATIC;
if (pmcSendMessage(hHal, eWNI_PMC_SMPS_STATE_IND, &htMimoPowerSaveState,
sizeof(tSirMacHTMIMOPowerSaveState)) != eHAL_STATUS_SUCCESS)
return eHAL_STATUS_FAILURE;
}
return eHAL_STATUS_SUCCESS;
case ePMC_AC_OPERATION:
pMac->pmc.powerSource = AC_POWER;
/* Turn off SMPS. */
if (!pMac->pmc.smpsConfig.enterOnAc)
{
htMimoPowerSaveState = eSIR_HT_MIMO_PS_NO_LIMIT;
if (pmcSendMessage(hHal, eWNI_PMC_SMPS_STATE_IND, &htMimoPowerSaveState,
sizeof(tSirMacHTMIMOPowerSaveState)) != eHAL_STATUS_SUCCESS)
return eHAL_STATUS_FAILURE;
}
return eHAL_STATUS_SUCCESS;
#endif //GEN6_ONWARDS
default:
pmcLog(pMac, LOGE, FL("Invalid event %d"), event);
PMC_ABORT;
return eHAL_STATUS_FAILURE;
}
}
/******************************************************************************
*
* Name: pmcSetConfigPowerSave
*
* Description:
* Configures one of the power saving modes.
*
* Parameters:
* hHal - HAL handle for device
* psMode - the power saving mode to configure
* pConfigParams - pointer to configuration parameters specific to the
* power saving mode
*
* Returns:
* eHAL_STATUS_SUCCESS - configuration successful
* eHAL_STATUS_FAILURE - configuration not successful
*
******************************************************************************/
eHalStatus pmcSetConfigPowerSave (tHalHandle hHal, tPmcPowerSavingMode psMode, void *pConfigParams)
{
tpAniSirGlobal pMac = PMAC_STRUCT(hHal);
#ifdef FEATURE_WLAN_DIAG_SUPPORT
WLAN_VOS_DIAG_EVENT_DEF(psRequest, vos_event_wlan_powersave_payload_type);
#endif
pmcLog(pMac, LOG2, FL("Entering pmcSetConfigPowerSave, power save mode %d"), psMode);
/* Configure the specified power saving mode. */
switch (psMode)
{
case ePMC_IDLE_MODE_POWER_SAVE:
pMac->pmc.impsConfig = *(tpPmcImpsConfigParams)pConfigParams;
pmcLog(pMac, LOG3, FL("IMPS configuration"));
pmcLog(pMac, LOG3, " enter on AC: %d",
pMac->pmc.impsConfig.enterOnAc);
break;
case ePMC_BEACON_MODE_POWER_SAVE:
pMac->pmc.bmpsConfig = *(tpPmcBmpsConfigParams)pConfigParams;
pmcLog(pMac, LOG3, FL("BMPS configuration"));
pmcLog(pMac, LOG3, " enter on AC: %d",
pMac->pmc.bmpsConfig.enterOnAc);
pmcLog(pMac, LOG3, " TX threshold: %d",
pMac->pmc.bmpsConfig.txThreshold);
pmcLog(pMac, LOG3, " RX threshold: %d",
pMac->pmc.bmpsConfig.rxThreshold);
pmcLog(pMac, LOG3, " traffic measurement period (ms): %d",
pMac->pmc.bmpsConfig.trafficMeasurePeriod);
pmcLog(pMac, LOG3, " BMPS period: %d",
pMac->pmc.bmpsConfig.bmpsPeriod);
pmcLog(pMac, LOG3, " beacons to forward code: %d",
pMac->pmc.bmpsConfig.forwardBeacons);
pmcLog(pMac, LOG3, " value of N: %d",
pMac->pmc.bmpsConfig.valueOfN);
pmcLog(pMac, LOG3, " use PS poll: %d",
pMac->pmc.bmpsConfig.usePsPoll);
pmcLog(pMac, LOG3, " set PM on last frame: %d",
pMac->pmc.bmpsConfig.setPmOnLastFrame);
pmcLog(pMac, LOG3, " value of enableBeaconEarlyTermination: %d",
pMac->pmc.bmpsConfig.enableBeaconEarlyTermination);
pmcLog(pMac, LOG3, " value of bcnEarlyTermWakeInterval: %d",
pMac->pmc.bmpsConfig.bcnEarlyTermWakeInterval);
#ifdef FEATURE_WLAN_DIAG_SUPPORT
vos_mem_zero(&psRequest, sizeof(vos_event_wlan_powersave_payload_type));
psRequest.event_subtype = WLAN_BMPS_SET_CONFIG;
/* possible loss of data due to mismatch but expectation is that
values can reasonably be expected to fit in target widths */
psRequest.bmps_auto_timer_duration = (v_U16_t)pMac->pmc.bmpsConfig.trafficMeasurePeriod;
psRequest.bmps_period = (v_U16_t)pMac->pmc.bmpsConfig.bmpsPeriod;
WLAN_VOS_DIAG_EVENT_REPORT(&psRequest, EVENT_WLAN_POWERSAVE_GENERIC);
#endif
break;
case ePMC_SPATIAL_MULTIPLEX_POWER_SAVE:
pMac->pmc.smpsConfig = *(tpPmcSmpsConfigParams)pConfigParams;
pmcLog(pMac, LOG3, FL("SMPS configuration"));
pmcLog(pMac, LOG3, " mode: %d", pMac->pmc.smpsConfig.mode);
pmcLog(pMac, LOG3, " enter on AC: %d",
pMac->pmc.smpsConfig.enterOnAc);
break;
default:
pmcLog(pMac, LOGE, FL("Invalid power save mode %d"), psMode);
PMC_ABORT;
return eHAL_STATUS_FAILURE;
}
//Send the power save config down to PE/HAL/FW if BMPS mode is being configured
//and pmcReady has been invoked
if(PMC_IS_READY(pMac) && psMode == ePMC_BEACON_MODE_POWER_SAVE)
{
if (pmcSendPowerSaveConfigMessage(hHal) != eHAL_STATUS_SUCCESS)
return eHAL_STATUS_FAILURE;
}
return eHAL_STATUS_SUCCESS;
}
/******************************************************************************
*
* Name: pmcGetConfigPowerSave
*
* Description:
* Get the config for the specified power save mode
*
* Parameters:
* hHal - HAL handle for device
* psMode - the power saving mode to configure
* pConfigParams - pointer to configuration parameters specific to the
* power saving mode
*
* Returns:
* eHAL_STATUS_SUCCESS - configuration successful
* eHAL_STATUS_FAILURE - configuration not successful
*
******************************************************************************/
eHalStatus pmcGetConfigPowerSave (tHalHandle hHal, tPmcPowerSavingMode psMode, void *pConfigParams)
{
tpAniSirGlobal pMac = PMAC_STRUCT(hHal);
pmcLog(pMac, LOG2, FL("Entering pmcGetConfigPowerSave, power save mode %d"), psMode);
/* Configure the specified power saving mode. */
switch (psMode)
{
case ePMC_IDLE_MODE_POWER_SAVE:
*(tpPmcImpsConfigParams)pConfigParams = pMac->pmc.impsConfig;
break;
case ePMC_BEACON_MODE_POWER_SAVE:
*(tpPmcBmpsConfigParams)pConfigParams = pMac->pmc.bmpsConfig;
break;
case ePMC_SPATIAL_MULTIPLEX_POWER_SAVE:
*(tpPmcSmpsConfigParams)pConfigParams = pMac->pmc.smpsConfig;
break;
default:
pmcLog(pMac, LOGE, FL("Invalid power save mode %d"), psMode);
return eHAL_STATUS_FAILURE;
}
return eHAL_STATUS_SUCCESS;
}
/******************************************************************************
*
* Name: pmcEnablePowerSave
*
* Description:
* Enables one of the power saving modes.
*
* Parameters:
* hHal - HAL handle for device
* psMode - the power saving mode to enable
*
* Returns:
* eHAL_STATUS_SUCCESS - successfully enabled
* eHAL_STATUS_FAILURE - not successfully enabled
*
******************************************************************************/
eHalStatus pmcEnablePowerSave (tHalHandle hHal, tPmcPowerSavingMode psMode)
{
tpAniSirGlobal pMac = PMAC_STRUCT(hHal);
tSirMacHTMIMOPowerSaveState htMimoPowerSaveState;
#ifdef FEATURE_WLAN_DIAG_SUPPORT
WLAN_VOS_DIAG_EVENT_DEF(psRequest, vos_event_wlan_powersave_payload_type);
vos_mem_zero(&psRequest, sizeof(vos_event_wlan_powersave_payload_type));
psRequest.event_subtype = WLAN_PS_MODE_ENABLE_REQ;
psRequest.enable_disable_powersave_mode = psMode;
WLAN_VOS_DIAG_EVENT_REPORT(&psRequest, EVENT_WLAN_POWERSAVE_GENERIC);
#endif
pmcLog(pMac, LOG2, FL("Entering pmcEnablePowerSave, power save mode %d"), psMode);
/* Enable the specified power saving mode. */
switch (psMode)
{
case ePMC_IDLE_MODE_POWER_SAVE:
pMac->pmc.impsEnabled = TRUE;
break;
case ePMC_BEACON_MODE_POWER_SAVE:
pMac->pmc.bmpsEnabled = TRUE;
break;
case ePMC_SPATIAL_MULTIPLEX_POWER_SAVE:
pMac->pmc.smpsEnabled = TRUE;
/* If PMC already started, then turn on SMPS. */
if (pMac->pmc.pmcState != STOPPED)
if (pMac->pmc.powerSource != AC_POWER ||
pMac->pmc.smpsConfig.enterOnAc)
{
if (pMac->pmc.smpsConfig.mode == ePMC_DYNAMIC_SMPS)
htMimoPowerSaveState = eSIR_HT_MIMO_PS_DYNAMIC;
if (pMac->pmc.smpsConfig.mode == ePMC_STATIC_SMPS)
htMimoPowerSaveState = eSIR_HT_MIMO_PS_STATIC;
if (pmcSendMessage(hHal, eWNI_PMC_SMPS_STATE_IND, &htMimoPowerSaveState,
sizeof(tSirMacHTMIMOPowerSaveState)) != eHAL_STATUS_SUCCESS)
return eHAL_STATUS_FAILURE;
}
break;
case ePMC_UAPSD_MODE_POWER_SAVE:
pMac->pmc.uapsdEnabled = TRUE;
break;
case ePMC_STANDBY_MODE_POWER_SAVE:
pMac->pmc.standbyEnabled = TRUE;
break;
case ePMC_WOWL_MODE_POWER_SAVE:
pMac->pmc.wowlEnabled = TRUE;
break;
default:
pmcLog(pMac, LOGE, FL("Invalid power save mode %d"), psMode);
PMC_ABORT;
return eHAL_STATUS_FAILURE;
}
return eHAL_STATUS_SUCCESS;
}
/* ---------------------------------------------------------------------------
\fn pmcStartAutoBmpsTimer
\brief Starts a timer that periodically polls all the registered
module for entry into Bmps mode. This timer is started only if BMPS is
enabled and whenever the device is in full power.
\param hHal - The handle returned by macOpen.
\return eHalStatus
---------------------------------------------------------------------------*/
eHalStatus pmcStartAutoBmpsTimer (tHalHandle hHal)
{
tpAniSirGlobal pMac = PMAC_STRUCT(hHal);
#ifdef FEATURE_WLAN_DIAG_SUPPORT
WLAN_VOS_DIAG_EVENT_DEF(psRequest, vos_event_wlan_powersave_payload_type);
vos_mem_zero(&psRequest, sizeof(vos_event_wlan_powersave_payload_type));
psRequest.event_subtype = WLAN_START_BMPS_AUTO_TIMER_REQ;
WLAN_VOS_DIAG_EVENT_REPORT(&psRequest, EVENT_WLAN_POWERSAVE_GENERIC);
#endif
pmcLog(pMac, LOG2, FL("Entering pmcStartAutoBmpsTimer"));
/* Check if BMPS is enabled. */
if (!pMac->pmc.bmpsEnabled)
{
pmcLog(pMac, LOGE, "PMC: Cannot enable BMPS timer. BMPS is disabled");
return eHAL_STATUS_FAILURE;
}
pMac->pmc.autoBmpsEntryEnabled = TRUE;
/* Check if there is an Infra session. If there is no Infra session, timer will be started
when STA associates to AP */
if (pmcShouldBmpsTimerRun(pMac))
{
if (pmcStartTrafficTimer(hHal, pMac->pmc.bmpsConfig.trafficMeasurePeriod) != eHAL_STATUS_SUCCESS)
return eHAL_STATUS_FAILURE;
}
return eHAL_STATUS_SUCCESS;
}
/* ---------------------------------------------------------------------------
\fn pmcStopAutoBmpsTimer
\brief Stops the Auto BMPS Timer that was started using sme_startAutoBmpsTimer
Stopping the timer does not cause a device state change. Only the timer
is stopped. If "Full Power" is desired, use the pmcRequestFullPower API
\param hHal - The handle returned by macOpen.
\return eHalStatus
---------------------------------------------------------------------------*/
eHalStatus pmcStopAutoBmpsTimer (tHalHandle hHal)
{
tpAniSirGlobal pMac = PMAC_STRUCT(hHal);
#ifdef FEATURE_WLAN_DIAG_SUPPORT
WLAN_VOS_DIAG_EVENT_DEF(psRequest, vos_event_wlan_powersave_payload_type);
vos_mem_zero(&psRequest, sizeof(vos_event_wlan_powersave_payload_type));
psRequest.event_subtype = WLAN_STOP_BMPS_AUTO_TIMER_REQ;
WLAN_VOS_DIAG_EVENT_REPORT(&psRequest, EVENT_WLAN_POWERSAVE_GENERIC);
#endif
pmcLog(pMac, LOG2, FL("Entering pmcStopAutoBmpsTimer"));
pMac->pmc.autoBmpsEntryEnabled = FALSE;
/* If uapsd session is not required or HDD has not requested BMPS, stop the auto bmps timer.*/
if (!pMac->pmc.uapsdSessionRequired && !pMac->pmc.bmpsRequestedByHdd)
pmcStopTrafficTimer(hHal);
return eHAL_STATUS_SUCCESS;
}
/******************************************************************************
*
* Name: pmcDisablePowerSave
*
* Description:
* Disables one of the power saving modes.
*
* Parameters:
* hHal - HAL handle for device
* psMode - the power saving mode to disable
*
* Returns:
* eHAL_STATUS_SUCCESS - successfully disabled
* eHAL_STATUS_FAILURE - not successfully disabled
*
******************************************************************************/
eHalStatus pmcDisablePowerSave (tHalHandle hHal, tPmcPowerSavingMode psMode)
{
tpAniSirGlobal pMac = PMAC_STRUCT(hHal);
tSirMacHTMIMOPowerSaveState htMimoPowerSaveState;
#ifdef FEATURE_WLAN_DIAG_SUPPORT
WLAN_VOS_DIAG_EVENT_DEF(psRequest, vos_event_wlan_powersave_payload_type);
vos_mem_zero(&psRequest, sizeof(vos_event_wlan_powersave_payload_type));
psRequest.event_subtype = WLAN_PS_MODE_DISABLE_REQ;
psRequest.enable_disable_powersave_mode = psMode;
WLAN_VOS_DIAG_EVENT_REPORT(&psRequest, EVENT_WLAN_POWERSAVE_GENERIC);
#endif
pmcLog(pMac, LOG2, FL("Entering pmcDisablePowerSave, power save mode %d"), psMode);
/* Disable the specified power saving mode. */
switch (psMode)
{
case ePMC_IDLE_MODE_POWER_SAVE:
pMac->pmc.impsEnabled = FALSE;
break;
case ePMC_BEACON_MODE_POWER_SAVE:
pMac->pmc.bmpsEnabled = FALSE;
break;
case ePMC_SPATIAL_MULTIPLEX_POWER_SAVE:
pMac->pmc.smpsEnabled = FALSE;
/* Turn off SMPS. */
htMimoPowerSaveState = eSIR_HT_MIMO_PS_NO_LIMIT;
if (pmcSendMessage(hHal, eWNI_PMC_SMPS_STATE_IND, &htMimoPowerSaveState,
sizeof(tSirMacHTMIMOPowerSaveState)) != eHAL_STATUS_SUCCESS)
return eHAL_STATUS_FAILURE;
break;
case ePMC_UAPSD_MODE_POWER_SAVE:
pMac->pmc.uapsdEnabled = FALSE;
break;
case ePMC_STANDBY_MODE_POWER_SAVE:
pMac->pmc.standbyEnabled = FALSE;
break;
case ePMC_WOWL_MODE_POWER_SAVE:
pMac->pmc.wowlEnabled = FALSE;
break;
default:
pmcLog(pMac, LOGE, FL("Invalid power save mode %d"), psMode);
PMC_ABORT;
return eHAL_STATUS_FAILURE;
}
return eHAL_STATUS_SUCCESS;
}
/******************************************************************************
*
* Name: pmcQueryPowerState
*
* Description:
* Returns the current power state of the device.
*
* Parameters:
* hHal - HAL handle for device
* pPowerState - pointer to location to return power state
* pHwWlanSwitchState - pointer to location to return Hardware WLAN
* Switch state
* pSwWlanSwitchState - pointer to location to return Software WLAN
* Switch state
*
* Returns:
* eHAL_STATUS_SUCCESS - power state successfully returned
* eHAL_STATUS_FAILURE - power state not successfully returned
*
******************************************************************************/
eHalStatus pmcQueryPowerState (tHalHandle hHal, tPmcPowerState *pPowerState,
tPmcSwitchState *pHwWlanSwitchState, tPmcSwitchState *pSwWlanSwitchState)
{
tpAniSirGlobal pMac = PMAC_STRUCT(hHal);
pmcLog(pMac, LOG2, FL("Entering pmcQueryPowerState"));
/* Return current power state based on PMC state. */
if(pPowerState != NULL)
{
/* Return current power state based on PMC state. */
switch (pMac->pmc.pmcState)
{
case FULL_POWER:
*pPowerState = ePMC_FULL_POWER;
break;
default:
*pPowerState = ePMC_LOW_POWER;
break;
}
}
/* Return current switch settings. */
if(pHwWlanSwitchState != NULL)
*pHwWlanSwitchState = pMac->pmc.hwWlanSwitchState;
if(pSwWlanSwitchState != NULL)
*pSwWlanSwitchState = pMac->pmc.swWlanSwitchState;
return eHAL_STATUS_SUCCESS;
}
/******************************************************************************
*
* Name: pmcIsPowerSaveEnabled
*
* Description:
* Checks if the device is able to enter one of the power save modes.
* "Able to enter" means the power save mode is enabled for the device
* and the host is using the correct power source for entry into the
* power save mode. This routine does not indicate whether the device
* is actually in the power save mode at a particular point in time.
*
* Parameters:
* hHal - HAL handle for device
* psMode - the power saving mode
*
* Returns:
* TRUE if device is able to enter the power save mode, FALSE otherwise
*
******************************************************************************/
tANI_BOOLEAN pmcIsPowerSaveEnabled (tHalHandle hHal, tPmcPowerSavingMode psMode)
{
tpAniSirGlobal pMac = PMAC_STRUCT(hHal);
pmcLog(pMac, LOG2, FL("Entering pmcIsPowerSaveEnabled, power save mode %d"), psMode);
/* Check ability to enter based on the specified power saving mode. */
switch (psMode)
{
case ePMC_IDLE_MODE_POWER_SAVE:
return pMac->pmc.impsEnabled && (pMac->pmc.powerSource != AC_POWER || pMac->pmc.impsConfig.enterOnAc);
case ePMC_BEACON_MODE_POWER_SAVE:
return pMac->pmc.bmpsEnabled;
case ePMC_SPATIAL_MULTIPLEX_POWER_SAVE:
return pMac->pmc.smpsEnabled && (pMac->pmc.powerSource != AC_POWER || pMac->pmc.smpsConfig.enterOnAc);
case ePMC_UAPSD_MODE_POWER_SAVE:
return pMac->pmc.uapsdEnabled;
case ePMC_STANDBY_MODE_POWER_SAVE:
return pMac->pmc.standbyEnabled;
case ePMC_WOWL_MODE_POWER_SAVE:
return pMac->pmc.wowlEnabled;
break;
default:
pmcLog(pMac, LOGE, FL("Invalid power save mode %d"), psMode);
PMC_ABORT;
return FALSE;
}
}
/******************************************************************************
*
* Name: pmcRequestFullPower
*
* Description:
* Request that the device be brought to full power state.
*
* Parameters:
* hHal - HAL handle for device
* callbackRoutine - routine to call when device actually achieves full
* power state if "eHAL_STATUS_PMC_PENDING" is returned
* callbackContext - value to be passed as parameter to routine specified
* above
* fullPowerReason - Reason for requesting full power mode. This is used
* by PE to decide whether data null should be sent to
* AP when exiting BMPS mode. Caller should use the
* eSME_LINK_DISCONNECTED reason if link is disconnected
* and there is no need to tell the AP that we are going
* out of power save.
*
* Returns:
* eHAL_STATUS_SUCCESS - device brought to full power state
* eHAL_STATUS_FAILURE - device cannot be brought to full power state
* eHAL_STATUS_PMC_PENDING - device is being brought to full power state,
* callbackRoutine will be called when completed
*
******************************************************************************/
eHalStatus pmcRequestFullPower (tHalHandle hHal, void (*callbackRoutine) (void *callbackContext, eHalStatus status),
void *callbackContext, tRequestFullPowerReason fullPowerReason)
{
tpAniSirGlobal pMac = PMAC_STRUCT(hHal);
tpRequestFullPowerEntry pEntry;
#ifdef FEATURE_WLAN_DIAG_SUPPORT
WLAN_VOS_DIAG_EVENT_DEF(psRequest, vos_event_wlan_powersave_payload_type);
vos_mem_zero(&psRequest, sizeof(vos_event_wlan_powersave_payload_type));
psRequest.event_subtype = WLAN_ENTER_FULL_POWER_REQ;
psRequest.full_power_request_reason = fullPowerReason;
WLAN_VOS_DIAG_EVENT_REPORT(&psRequest, EVENT_WLAN_POWERSAVE_GENERIC);
#endif
pmcLog(pMac, LOG2, FL("Entering pmcRequestFullPower"));
if( !PMC_IS_READY(pMac) )
{
pmcLog(pMac, LOGE, FL("Requesting Full Power when PMC not ready"));
pmcLog(pMac, LOGE, FL("pmcReady = %d pmcState = %s"),
pMac->pmc.pmcReady, pmcGetPmcStateStr(pMac->pmc.pmcState));
return eHAL_STATUS_FAILURE;
}
/* If HDD is requesting full power, clear any buffered requests for WOWL and BMPS that were
requested by HDD previously */
if(SIR_IS_FULL_POWER_NEEDED_BY_HDD(fullPowerReason))
{
pMac->pmc.bmpsRequestedByHdd = FALSE;
pMac->pmc.wowlModeRequired = FALSE;
}
/* If already in full power, just return. */
if (pMac->pmc.pmcState == FULL_POWER)
return eHAL_STATUS_SUCCESS;
/* If in IMPS State, then cancel the timer. */
if (pMac->pmc.pmcState == IMPS)
if (vos_timer_stop(&pMac->pmc.hImpsTimer) != VOS_STATUS_SUCCESS)
{
pmcLog(pMac, LOGE, FL("Cannot cancel IMPS timer"));
}
/* Enter Request Full Power State. */
if (pmcEnterRequestFullPowerState(hHal, fullPowerReason) != eHAL_STATUS_SUCCESS)
return eHAL_STATUS_FAILURE;
/* If able to enter Request Full Power State, then request is pending.
Allocate entry for request full power callback routine list. */
//If caller doesn't need a callback, simply waits up the chip.
if( callbackRoutine )
{
if (palAllocateMemory(pMac->hHdd, (void **)&pEntry, sizeof(tRequestFullPowerEntry)) != eHAL_STATUS_SUCCESS)
{
pmcLog(pMac, LOGE, FL("Cannot allocate memory for request full power routine list entry"));
PMC_ABORT;
return eHAL_STATUS_FAILURE;
}
/* Store routine and context in entry. */
pEntry->callbackRoutine = callbackRoutine;
pEntry->callbackContext = callbackContext;
/* Add entry to list. */
csrLLInsertTail(&pMac->pmc.requestFullPowerList, &pEntry->link, TRUE);
}
return eHAL_STATUS_PMC_PENDING;
}
/******************************************************************************
*
* Name: pmcRequestImps
*
* Description:
* Request that the device be placed in Idle Mode Power Save (IMPS).
* The Common Scan/Roam Module makes this request. The device will be
* placed into IMPS for the specified amount of time, and then returned
* to full power.
*
* Parameters:
* hHal - HAL handle for device
* impsPeriod - amount of time to remain in IMPS (milliseconds)
* callbackRoutine - routine to call when IMPS period has finished and
* the device has been brought to full power
* callbackContext - value to be passed as parameter to routine specified
* above
*
* Returns:
* eHAL_STATUS_SUCCESS - device will enter IMPS
* eHAL_STATUS_PMC_DISABLED - IMPS is disabled
* eHAL_STATUS_PMC_NOT_NOW - another module is prohibiting entering IMPS
* at this time
* eHAL_STATUS_PMC_AC_POWER - IMPS is disabled when host operating from
* AC power
* eHAL_STATUS_PMC_ALREADY_IN_IMPS - device is already in IMPS
* eHAL_STATUS_PMC_SYS_ERROR - system error that prohibits entering IMPS
*
******************************************************************************/
eHalStatus pmcRequestImps (tHalHandle hHal, tANI_U32 impsPeriod,
void (*callbackRoutine) (void *callbackContext, eHalStatus status),
void *callbackContext)
{
tpAniSirGlobal pMac = PMAC_STRUCT(hHal);
eHalStatus status;
#ifdef FEATURE_WLAN_DIAG_SUPPORT
WLAN_VOS_DIAG_EVENT_DEF(psRequest, vos_event_wlan_powersave_payload_type);
vos_mem_zero(&psRequest, sizeof(vos_event_wlan_powersave_payload_type));
psRequest.event_subtype = WLAN_IMPS_ENTER_REQ;
psRequest.imps_period = impsPeriod;
WLAN_VOS_DIAG_EVENT_REPORT(&psRequest, EVENT_WLAN_POWERSAVE_GENERIC);
#endif
pmcLog(pMac, LOG2, FL("Entering pmcRequestImps"));
status = pmcEnterImpsCheck( pMac );
if( HAL_STATUS_SUCCESS( status ) )
{
/* Enter Request IMPS State. */
status = pmcEnterRequestImpsState( hHal );
if (HAL_STATUS_SUCCESS( status ))
{
/* Save the period and callback routine for when we need it. */
pMac->pmc.impsPeriod = impsPeriod;
pMac->pmc.impsCallbackRoutine = callbackRoutine;
pMac->pmc.impsCallbackContext = callbackContext;
}
else
{
status = eHAL_STATUS_PMC_SYS_ERROR;
}
}
return status;
}
/******************************************************************************
*
* Name: pmcRegisterPowerSaveCheck
*
* Description:
* Allows a routine to be registered so that the routine is called whenever
* the device is about to enter one of the power save modes. This routine
* will say whether the device is allowed to enter the power save mode at
* the time of the call.
*
* Parameters:
* hHal - HAL handle for device
* checkRoutine - routine to call before entering a power save mode, should
* return TRUE if the device is allowed to enter the power
* save mode, FALSE otherwise
* checkContext - value to be passed as parameter to routine specified above
*
* Returns:
* eHAL_STATUS_SUCCESS - successfully registered
* eHAL_STATUS_FAILURE - not successfully registered
*
******************************************************************************/
eHalStatus pmcRegisterPowerSaveCheck (tHalHandle hHal, tANI_BOOLEAN (*checkRoutine) (void *checkContext),
void *checkContext)
{
tpAniSirGlobal pMac = PMAC_STRUCT(hHal);
tpPowerSaveCheckEntry pEntry;
pmcLog(pMac, LOG2, FL("Entering pmcRegisterPowerSaveCheck"));
/* Allocate entry for power save check routine list. */
if (palAllocateMemory(pMac->hHdd, (void **)&pEntry, sizeof(tPowerSaveCheckEntry)) != eHAL_STATUS_SUCCESS)
{
pmcLog(pMac, LOGE, FL("Cannot allocate memory for power save check routine list entry"));
PMC_ABORT;
return eHAL_STATUS_FAILURE;
}
/* Store routine and context in entry. */
pEntry->checkRoutine = checkRoutine;
pEntry->checkContext = checkContext;
/* Add entry to list. */
csrLLInsertTail(&pMac->pmc.powerSaveCheckList, &pEntry->link, FALSE);
return eHAL_STATUS_SUCCESS;
}
/******************************************************************************
*
* Name: pmcDeregisterPowerSaveCheck
*
* Description:
* Reregisters a routine that was previously registered with
* pmcRegisterPowerSaveCheck.
*
* Parameters:
* hHal - HAL handle for device
* checkRoutine - routine to deregister
*
* Returns:
* eHAL_STATUS_SUCCESS - successfully deregistered
* eHAL_STATUS_FAILURE - not successfully deregistered
*
******************************************************************************/
eHalStatus pmcDeregisterPowerSaveCheck (tHalHandle hHal, tANI_BOOLEAN (*checkRoutine) (void *checkContext))
{
tpAniSirGlobal pMac = PMAC_STRUCT(hHal);
tListElem *pEntry;
tpPowerSaveCheckEntry pPowerSaveCheckEntry;
pmcLog(pMac, LOG2, FL("Entering pmcDeregisterPowerSaveCheck"));
/* Find entry in the power save check routine list that matches
the specified routine and remove it. */
pEntry = csrLLPeekHead(&pMac->pmc.powerSaveCheckList, FALSE);
while (pEntry != NULL)
{
pPowerSaveCheckEntry = GET_BASE_ADDR(pEntry, tPowerSaveCheckEntry, link);
if (pPowerSaveCheckEntry->checkRoutine == checkRoutine)
{
if (csrLLRemoveEntry(&pMac->pmc.powerSaveCheckList, pEntry, FALSE))
{
if (palFreeMemory(pMac->hHdd, pPowerSaveCheckEntry) != eHAL_STATUS_SUCCESS)
{
pmcLog(pMac, LOGE, FL("Cannot free memory for power save check routine list entry"));
PMC_ABORT;
return eHAL_STATUS_FAILURE;
}
}
else
{
pmcLog(pMac, LOGE, FL("Cannot remove power save check routine list entry"));
return eHAL_STATUS_FAILURE;
}
return eHAL_STATUS_SUCCESS;
}
pEntry = csrLLNext(&pMac->pmc.powerSaveCheckList, pEntry, FALSE);
}
/* Could not find matching entry. */
return eHAL_STATUS_FAILURE;
}
static void pmcProcessResponse( tpAniSirGlobal pMac, tSirSmeRsp *pMsg )
{
tListElem *pEntry = NULL;
tSmeCmd *pCommand = NULL;
tANI_BOOLEAN fRemoveCommand = eANI_BOOLEAN_TRUE;
pEntry = csrLLPeekHead(&pMac->sme.smeCmdActiveList, LL_ACCESS_LOCK);
if(pEntry)
{
pCommand = GET_BASE_ADDR(pEntry, tSmeCmd, Link);
pmcLog(pMac, LOG2, FL("process message = %d"), pMsg->messageType);
/* Process each different type of message. */
switch (pMsg->messageType)
{
/* We got a response to our IMPS request. */
case eWNI_PMC_ENTER_IMPS_RSP:
pmcLog(pMac, LOG2, FL("Rcvd eWNI_PMC_ENTER_IMPS_RSP with status = %d"), pMsg->statusCode);
if( (eSmeCommandEnterImps != pCommand->command) && (eSmeCommandEnterStandby != pCommand->command) )
{
pmcLog(pMac, LOGW, FL("Rcvd eWNI_PMC_ENTER_IMPS_RSP without request"));
fRemoveCommand = eANI_BOOLEAN_FALSE;
break;
}
if(pMac->pmc.pmcState == REQUEST_IMPS)
{
/* Enter IMPS State if response indicates success. */
if (pMsg->statusCode == eSIR_SME_SUCCESS)
{
pMac->pmc.ImpsReqFailed = VOS_FALSE;
pmcEnterImpsState(pMac);
if (!(pMac->pmc.ImpsReqFailed || pMac->pmc.ImpsReqTimerFailed) && pMac->pmc.ImpsReqFailCnt)
{
pmcLog(pMac, LOGE,
FL("Response message to request to enter IMPS was failed %d times before success"),
pMac->pmc.ImpsReqFailCnt);
pMac->pmc.ImpsReqFailCnt = 0;
}
}
/* If response is failure, then we stay in Full Power State and tell everyone that we aren't going into IMPS. */
else
{
pMac->pmc.ImpsReqFailed = VOS_TRUE;
if (!(pMac->pmc.ImpsReqFailCnt & 0xF))
{
pmcLog(pMac, LOGE,
FL("Response message to request to enter IMPS indicates failure, status %x, FailCnt - %d"),
pMsg->statusCode, ++pMac->pmc.ImpsReqFailCnt);
}
else
{
pMac->pmc.ImpsReqFailCnt++;
}
pmcEnterFullPowerState(pMac);
}
}
else if (pMac->pmc.pmcState == REQUEST_STANDBY)
{
/* Enter STANDBY State if response indicates success. */
if (pMsg->statusCode == eSIR_SME_SUCCESS)
{
pmcEnterStandbyState(pMac);
pmcDoStandbyCallbacks(pMac, eHAL_STATUS_SUCCESS);
}
/* If response is failure, then we stay in Full Power State
and tell everyone that we aren't going into STANDBY. */
else
{
pmcLog(pMac, LOGE, "PMC: response message to request to enter "
"standby indicates failure, status %x", pMsg->statusCode);
pmcEnterFullPowerState(pMac);
pmcDoStandbyCallbacks(pMac, eHAL_STATUS_FAILURE);
}
}
else
{
pmcLog(pMac, LOGE, "PMC: Enter IMPS rsp rcvd when device is "
"in %d state", pMac->pmc.pmcState);
}
break;
/* We got a response to our wake from IMPS request. */
case eWNI_PMC_EXIT_IMPS_RSP:
pmcLog(pMac, LOG2, FL("Rcvd eWNI_PMC_EXIT_IMPS_RSP with status = %d"), pMsg->statusCode);
if( eSmeCommandExitImps != pCommand->command )
{
pmcLog(pMac, LOGW, FL("Rcvd eWNI_PMC_EXIT_IMPS_RSP without request"));
fRemoveCommand = eANI_BOOLEAN_FALSE;
break;
}
/* Check that we are in the correct state for this message. */
if (pMac->pmc.pmcState != REQUEST_FULL_POWER)
{
pmcLog(pMac, LOGE, FL("Got Exit IMPS Response Message while "
"in state %d"), pMac->pmc.pmcState);
break;
}
/* Enter Full Power State. */
if (pMsg->statusCode != eSIR_SME_SUCCESS)
{
pmcLog(pMac, LOGE, FL("Response message to request to exit "
"IMPS indicates failure, status %x"), pMsg->statusCode);
}
pmcEnterFullPowerState(pMac);
break;
/* We got a response to our BMPS request. */
case eWNI_PMC_ENTER_BMPS_RSP:
pmcLog(pMac, LOG2, FL("Rcvd eWNI_PMC_ENTER_BMPS_RSP with status = %d"), pMsg->statusCode);
if( eSmeCommandEnterBmps != pCommand->command )
{
pmcLog(pMac, LOGW, FL("Rcvd eWNI_PMC_ENTER_BMPS_RSP without request"));
fRemoveCommand = eANI_BOOLEAN_FALSE;
break;
}
pMac->pmc.bmpsRequestQueued = eANI_BOOLEAN_FALSE;
/* Check that we are in the correct state for this message. */
if (pMac->pmc.pmcState != REQUEST_BMPS)
{
pmcLog(pMac, LOGE,
FL("Got Enter BMPS Response Message while in state %d"), pMac->pmc.pmcState);
break;
}
/* Enter BMPS State if response indicates success. */
if (pMsg->statusCode == eSIR_SME_SUCCESS)
{
pmcEnterBmpsState(pMac);
/* Note: If BMPS was requested because of start UAPSD,
there will no entries for BMPS callback routines and
pmcDoBmpsCallbacks will be a No-Op*/
pmcDoBmpsCallbacks(pMac, eHAL_STATUS_SUCCESS);
}
/* If response is failure, then we stay in Full Power State and tell everyone that we aren't going into BMPS. */
else
{
pmcLog(pMac, LOGE,
FL("Response message to request to enter BMPS indicates failure, status %x"),
pMsg->statusCode);
pmcEnterFullPowerState(pMac);
//Do not call UAPSD callback here since it may be re-entered
pmcDoBmpsCallbacks(pMac, eHAL_STATUS_FAILURE);
}
break;
/* We got a response to our wake from BMPS request. */
case eWNI_PMC_EXIT_BMPS_RSP:
pmcLog(pMac, LOG2, FL("Rcvd eWNI_PMC_EXIT_BMPS_RSP with status = %d"), pMsg->statusCode);
if( eSmeCommandExitBmps != pCommand->command )
{
pmcLog(pMac, LOGW, FL("Rcvd eWNI_PMC_EXIT_BMPS_RSP without request"));
fRemoveCommand = eANI_BOOLEAN_FALSE;
break;
}
/* Check that we are in the correct state for this message. */
if (pMac->pmc.pmcState != REQUEST_FULL_POWER)
{
pmcLog(pMac, LOGE,
FL("Got Exit BMPS Response Message while in state %d"), pMac->pmc.pmcState);
break;
}
/* Enter Full Power State. */
if (pMsg->statusCode != eSIR_SME_SUCCESS)
{
pmcLog(pMac, LOGP,
FL("Response message to request to exit BMPS indicates failure, status %x"),
pMsg->statusCode);
}
pmcEnterFullPowerState(pMac);
break;
/* We got a response to our Start UAPSD request. */
case eWNI_PMC_ENTER_UAPSD_RSP:
pmcLog(pMac, LOG2, FL("Rcvd eWNI_PMC_ENTER_UAPSD_RSP with status = %d"), pMsg->statusCode);
if( eSmeCommandEnterUapsd != pCommand->command )
{
pmcLog(pMac, LOGW, FL("Rcvd eWNI_PMC_ENTER_UAPSD_RSP without request"));
fRemoveCommand = eANI_BOOLEAN_FALSE;
break;
}
/* Check that we are in the correct state for this message. */
if (pMac->pmc.pmcState != REQUEST_START_UAPSD)
{
pmcLog(pMac, LOGE,
FL("Got Enter Uapsd rsp Message while in state %d"), pMac->pmc.pmcState);
break;
}
/* Enter UAPSD State if response indicates success. */
if (pMsg->statusCode == eSIR_SME_SUCCESS)
{
pmcEnterUapsdState(pMac);
pmcDoStartUapsdCallbacks(pMac, eHAL_STATUS_SUCCESS);
}
/* If response is failure, then we try to put the chip back in
BMPS mode*/
else {
pmcLog(pMac, LOGE, "PMC: response message to request to enter "
"UAPSD indicates failure, status %x", pMsg->statusCode);
//Need to reset the UAPSD flag so pmcEnterBmpsState won't try to enter UAPSD.
pMac->pmc.uapsdSessionRequired = FALSE;
pmcEnterBmpsState(pMac);
//UAPSD will not be retied in this case so tell requester we are done with failure
pmcDoStartUapsdCallbacks(pMac, eHAL_STATUS_FAILURE);
}
break;
/* We got a response to our Stop UAPSD request. */
case eWNI_PMC_EXIT_UAPSD_RSP:
pmcLog(pMac, LOG2, FL("Rcvd eWNI_PMC_EXIT_UAPSD_RSP with status = %d"), pMsg->statusCode);
if( eSmeCommandExitUapsd != pCommand->command )
{
pmcLog(pMac, LOGW, FL("Rcvd eWNI_PMC_EXIT_UAPSD_RSP without request"));
fRemoveCommand = eANI_BOOLEAN_FALSE;
break;
}
/* Check that we are in the correct state for this message. */
if (pMac->pmc.pmcState != REQUEST_STOP_UAPSD)
{
pmcLog(pMac, LOGE,
FL("Got Exit Uapsd rsp Message while in state %d"), pMac->pmc.pmcState);
break;
}
/* Enter BMPS State */
if (pMsg->statusCode != eSIR_SME_SUCCESS) {
pmcLog(pMac, LOGP, "PMC: response message to request to exit "
"UAPSD indicates failure, status %x", pMsg->statusCode);
}
pmcEnterBmpsState(pMac);
break;
/* We got a response to our enter WOWL request. */
case eWNI_PMC_ENTER_WOWL_RSP:
if( eSmeCommandEnterWowl != pCommand->command )
{
pmcLog(pMac, LOGW, FL("Rcvd eWNI_PMC_ENTER_WOWL_RSP without request"));
fRemoveCommand = eANI_BOOLEAN_FALSE;
break;
}
/* Check that we are in the correct state for this message. */
if (pMac->pmc.pmcState != REQUEST_ENTER_WOWL)
{
pmcLog(pMac, LOGE, FL("Got eWNI_PMC_ENTER_WOWL_RSP while in state %s"),
pmcGetPmcStateStr(pMac->pmc.pmcState));
break;
}
/* Enter WOWL State if response indicates success. */
if (pMsg->statusCode == eSIR_SME_SUCCESS) {
pmcEnterWowlState(pMac);
pmcDoEnterWowlCallbacks(pMac, eHAL_STATUS_SUCCESS);
}
/* If response is failure, then we try to put the chip back in
BMPS mode*/
else {
pmcLog(pMac, LOGE, "PMC: response message to request to enter "
"WOWL indicates failure, status %x", pMsg->statusCode);
pmcEnterBmpsState(pMac);
pmcDoEnterWowlCallbacks(pMac, eHAL_STATUS_FAILURE);
}
break;
/* We got a response to our exit WOWL request. */
case eWNI_PMC_EXIT_WOWL_RSP:
if( eSmeCommandExitWowl != pCommand->command )
{
pmcLog(pMac, LOGW, FL("Rcvd eWNI_PMC_EXIT_WOWL_RSP without request"));
fRemoveCommand = eANI_BOOLEAN_FALSE;
break;
}
/* Check that we are in the correct state for this message. */
if (pMac->pmc.pmcState != REQUEST_EXIT_WOWL)
{
pmcLog(pMac, LOGE, FL("Got Exit WOWL rsp Message while in state %d"), pMac->pmc.pmcState);
break;
}
/* Enter BMPS State */
if (pMsg->statusCode != eSIR_SME_SUCCESS) {
pmcLog(pMac, LOGP, "PMC: response message to request to exit "
"WOWL indicates failure, status %x", pMsg->statusCode);
}
pmcEnterBmpsState(pMac);
break;
default:
pmcLog(pMac, LOGE, FL("Invalid message type %d received"), pMsg->messageType);
PMC_ABORT;
break;
}//switch
if( fRemoveCommand )
{
if( csrLLRemoveEntry( &pMac->sme.smeCmdActiveList, pEntry, LL_ACCESS_LOCK ) )
{
pmcReleaseCommand( pMac, pCommand );
smeProcessPendingQueue( pMac );
}
}
}
else
{
pmcLog(pMac, LOGE, FL("message type %d received but no request is found"), pMsg->messageType);
}
}
/******************************************************************************
*
* Name: pmcMessageProcessor
*
* Description:
* Process a message received by PMC.
*
* Parameters:
* hHal - HAL handle for device
* pMsg - pointer to received message
*
* Returns:
* nothing
*
******************************************************************************/
void pmcMessageProcessor (tHalHandle hHal, tSirSmeRsp *pMsg)
{
tpAniSirGlobal pMac = PMAC_STRUCT(hHal);
pmcLog(pMac, LOG2, FL("Entering pmcMessageProcessor, message type %d"), pMsg->messageType);
switch( pMsg->messageType )
{
case eWNI_PMC_EXIT_BMPS_IND:
//When PMC needs to handle more indication from PE, they need to be added here.
{
/* Device left BMPS on its own. */
pmcLog(pMac, LOGW, FL("Rcvd eWNI_PMC_EXIT_BMPS_IND with status = %d"), pMsg->statusCode);
/* Check that we are in the correct state for this message. */
switch(pMac->pmc.pmcState)
{
case BMPS:
case REQUEST_START_UAPSD:
case UAPSD:
case REQUEST_STOP_UAPSD:
case REQUEST_ENTER_WOWL:
case WOWL:
case REQUEST_EXIT_WOWL:
case REQUEST_FULL_POWER:
pmcLog(pMac, LOGW, FL("Got eWNI_PMC_EXIT_BMPS_IND while in state %d"), pMac->pmc.pmcState);
break;
default:
pmcLog(pMac, LOGE, FL("Got eWNI_PMC_EXIT_BMPS_IND while in state %d"), pMac->pmc.pmcState);
PMC_ABORT;
break;
}
/* Enter Full Power State. */
if (pMsg->statusCode != eSIR_SME_SUCCESS)
{
pmcLog(pMac, LOGP, FL("Exit BMPS indication indicates failure, status %x"), pMsg->statusCode);
}
else
{
tpSirSmeExitBmpsInd pExitBmpsInd = (tpSirSmeExitBmpsInd)pMsg;
pmcEnterRequestFullPowerState(hHal, pExitBmpsInd->exitBmpsReason);
}
break;
}
default:
pmcProcessResponse( pMac, pMsg );
break;
}
}
tANI_BOOLEAN pmcValidateConnectState( tHalHandle hHal )
{
tpAniSirGlobal pMac = PMAC_STRUCT(hHal);
if ( !csrIsInfraConnected( pMac ) )
{
pmcLog(pMac, LOGW, "PMC: STA not associated. BMPS cannot be entered");
return eANI_BOOLEAN_FALSE;
}
//Cannot have other session
if ( csrIsIBSSStarted( pMac ) )
{
pmcLog(pMac, LOGW, "PMC: IBSS started. BMPS cannot be entered");
return eANI_BOOLEAN_FALSE;
}
if ( csrIsBTAMPStarted( pMac ) )
{
pmcLog(pMac, LOGW, "PMC: BT-AMP exists. BMPS cannot be entered");
return eANI_BOOLEAN_FALSE;
}
if ((vos_concurrent_sessions_running()) &&
(csrIsConcurrentInfraConnected( pMac ) ||
(vos_get_concurrency_mode()& VOS_SAP) ||
(vos_get_concurrency_mode()& VOS_P2P_GO)))
{
pmcLog(pMac, LOGW, "PMC: Multiple active sessions exists. BMPS cannot be entered");
return eANI_BOOLEAN_FALSE;
}
return eANI_BOOLEAN_TRUE;
}
tANI_BOOLEAN pmcAllowImps( tHalHandle hHal )
{
tpAniSirGlobal pMac = PMAC_STRUCT(hHal);
//Cannot have other session like IBSS or BT AMP running
if ( csrIsIBSSStarted( pMac ) )
{
pmcLog(pMac, LOGW, "PMC: IBSS started. IMPS cannot be entered");
return eANI_BOOLEAN_FALSE;
}
if ( csrIsBTAMPStarted( pMac ) )
{
pmcLog(pMac, LOGW, "PMC: BT-AMP exists. IMPS cannot be entered");
return eANI_BOOLEAN_FALSE;
}
//All sessions must be disconnected to allow IMPS
if ( !csrIsAllSessionDisconnected( pMac ) )
{
pmcLog(pMac, LOGW, "PMC: Atleast one connected session. IMPS cannot be entered");
return eANI_BOOLEAN_FALSE;
}
return eANI_BOOLEAN_TRUE;
}
/******************************************************************************
*
* Name: pmcRequestBmps
*
* Description:
* Request that the device be put in BMPS state.
*
* Parameters:
* hHal - HAL handle for device
* callbackRoutine - Callback routine invoked in case of success/failure
* callbackContext - value to be passed as parameter to routine specified
* above
*
* Returns:
* eHAL_STATUS_SUCCESS - device is in BMPS state
* eHAL_STATUS_FAILURE - device cannot be brought to BMPS state
* eHAL_STATUS_PMC_PENDING - device is being brought to BMPS state,
*
******************************************************************************/
eHalStatus pmcRequestBmps (
tHalHandle hHal,
void (*callbackRoutine) (void *callbackContext, eHalStatus status),
void *callbackContext)
{
tpAniSirGlobal pMac = PMAC_STRUCT(hHal);
tpRequestBmpsEntry pEntry;
eHalStatus status;
#ifdef FEATURE_WLAN_DIAG_SUPPORT
WLAN_VOS_DIAG_EVENT_DEF(psRequest, vos_event_wlan_powersave_payload_type);
vos_mem_zero(&psRequest, sizeof(vos_event_wlan_powersave_payload_type));
psRequest.event_subtype = WLAN_BMPS_ENTER_REQ;
WLAN_VOS_DIAG_EVENT_REPORT(&psRequest, EVENT_WLAN_POWERSAVE_GENERIC);
#endif
pmcLog(pMac, LOG2, "PMC: entering pmcRequestBmps");
/* If already in BMPS, just return. */
if (pMac->pmc.pmcState == BMPS || REQUEST_START_UAPSD == pMac->pmc.pmcState || UAPSD == pMac->pmc.pmcState)
{
pmcLog(pMac, LOG2, "PMC: Device already in BMPS pmcState %d", pMac->pmc.pmcState);
pMac->pmc.bmpsRequestedByHdd = TRUE;
return eHAL_STATUS_SUCCESS;
}
status = pmcEnterBmpsCheck( pMac );
if(HAL_STATUS_SUCCESS( status ))
{
status = pmcEnterRequestBmpsState(hHal);
/* Enter Request BMPS State. */
if ( HAL_STATUS_SUCCESS( status ) )
{
/* Remember that HDD requested BMPS. This flag will be used to put the
device back into BMPS if any module other than HDD (e.g. CSR, QoS, or BAP)
requests full power for any reason */
pMac->pmc.bmpsRequestedByHdd = TRUE;
/* If able to enter Request BMPS State, then request is pending.
Allocate entry for request BMPS callback routine list. */
if (palAllocateMemory(
pMac->hHdd, (void **)&pEntry,
sizeof(tRequestBmpsEntry)) != eHAL_STATUS_SUCCESS)
{
pmcLog(pMac, LOGE, "PMC: cannot allocate memory for request "
"BMPS routine list entry");
return eHAL_STATUS_FAILURE;
}
/* Store routine and context in entry. */
pEntry->callbackRoutine = callbackRoutine;
pEntry->callbackContext = callbackContext;
/* Add entry to list. */
csrLLInsertTail(&pMac->pmc.requestBmpsList, &pEntry->link, FALSE);
status = eHAL_STATUS_PMC_PENDING;
}
else
{
status = eHAL_STATUS_FAILURE;
}
}
/* Retry to enter the BMPS if the
status = eHAL_STATUS_PMC_NOT_NOW */
else if (status == eHAL_STATUS_PMC_NOT_NOW)
{
pmcStopTrafficTimer(hHal);
pmcLog(pMac, LOG1, FL("Can't enter BMPS+++"));
if (pmcShouldBmpsTimerRun(pMac))
{
if (pmcStartTrafficTimer(pMac,
pMac->pmc.bmpsConfig.trafficMeasurePeriod)
!= eHAL_STATUS_SUCCESS)
{
pmcLog(pMac, LOG1, FL("Cannot start BMPS Retry timer"));
}
pmcLog(pMac, LOG1,
FL("BMPS Retry Timer already running or started"));
}
}
return status;
}
/******************************************************************************
*
* Name: pmcStartUapsd
*
* Description:
* Request that the device be put in UAPSD state.
*
* Parameters:
* hHal - HAL handle for device
* callbackRoutine - Callback routine invoked in case of success/failure
* callbackContext - value to be passed as parameter to routine specified
* above
*
* Returns:
* eHAL_STATUS_SUCCESS - device is in UAPSD state
* eHAL_STATUS_FAILURE - device cannot be brought to UAPSD state
* eHAL_STATUS_PMC_PENDING - device is being brought to UAPSD state
* eHAL_STATUS_PMC_DISABLED - UAPSD is disabled or BMPS mode is disabled
*
******************************************************************************/
eHalStatus pmcStartUapsd (
tHalHandle hHal,
void (*callbackRoutine) (void *callbackContext, eHalStatus status),
void *callbackContext)
{
tpAniSirGlobal pMac = PMAC_STRUCT(hHal);
tpStartUapsdEntry pEntry;
#ifdef FEATURE_WLAN_DIAG_SUPPORT
WLAN_VOS_DIAG_EVENT_DEF(psRequest, vos_event_wlan_powersave_payload_type);
vos_mem_zero(&psRequest, sizeof(vos_event_wlan_powersave_payload_type));
psRequest.event_subtype = WLAN_UAPSD_START_REQ;
WLAN_VOS_DIAG_EVENT_REPORT(&psRequest, EVENT_WLAN_POWERSAVE_GENERIC);
#endif
pmcLog(pMac, LOG2, "PMC: entering pmcStartUapsd");
if( !PMC_IS_READY(pMac) )
{
pmcLog(pMac, LOGE, FL("Requesting UAPSD when PMC not ready"));
pmcLog(pMac, LOGE, FL("pmcReady = %d pmcState = %s"),
pMac->pmc.pmcReady, pmcGetPmcStateStr(pMac->pmc.pmcState));
return eHAL_STATUS_FAILURE;
}
/* Check if BMPS is enabled. */
if (!pMac->pmc.bmpsEnabled)
{
pmcLog(pMac, LOGE, "PMC: Cannot enter UAPSD. BMPS is disabled");
return eHAL_STATUS_PMC_DISABLED;
}
/* Check if UAPSD is enabled. */
if (!pMac->pmc.uapsdEnabled)
{
pmcLog(pMac, LOGE, "PMC: Cannot enter UAPSD. UAPSD is disabled");
return eHAL_STATUS_PMC_DISABLED;
}
/* If already in UAPSD, just return. */
if (pMac->pmc.pmcState == UAPSD)
return eHAL_STATUS_SUCCESS;
/* Check that we are associated. */
if (!pmcValidateConnectState( pMac ))
{
pmcLog(pMac, LOGE, "PMC: STA not associated with an AP. UAPSD cannot be entered");
return eHAL_STATUS_FAILURE;
}
/* Enter REQUEST_START_UAPSD State. */
if (pmcEnterRequestStartUapsdState(hHal) != eHAL_STATUS_SUCCESS)
return eHAL_STATUS_FAILURE;
if( NULL != callbackRoutine )
{
/* If success then request is pending. Allocate entry for callback routine list. */
if (palAllocateMemory(pMac->hHdd, (void **)&pEntry,
sizeof(tStartUapsdEntry)) != eHAL_STATUS_SUCCESS)
{
pmcLog(pMac, LOGE, "PMC: cannot allocate memory for request "
"start UAPSD routine list entry");
return eHAL_STATUS_FAILURE;
}
/* Store routine and context in entry. */
pEntry->callbackRoutine = callbackRoutine;
pEntry->callbackContext = callbackContext;
/* Add entry to list. */
csrLLInsertTail(&pMac->pmc.requestStartUapsdList, &pEntry->link, FALSE);
}
return eHAL_STATUS_PMC_PENDING;
}
/******************************************************************************
*
* Name: pmcStopUapsd
*
* Description:
* Request that the device be put out of UAPSD state.
*
* Parameters:
* hHal - HAL handle for device
*
* Returns:
* eHAL_STATUS_SUCCESS - device is put out of UAPSD and back in BMPS state
* eHAL_STATUS_FAILURE - device cannot be brought out of UAPSD state
*
******************************************************************************/
eHalStatus pmcStopUapsd (tHalHandle hHal)
{
tpAniSirGlobal pMac = PMAC_STRUCT(hHal);
#ifdef FEATURE_WLAN_DIAG_SUPPORT
WLAN_VOS_DIAG_EVENT_DEF(psRequest, vos_event_wlan_powersave_payload_type);
vos_mem_zero(&psRequest, sizeof(vos_event_wlan_powersave_payload_type));
psRequest.event_subtype = WLAN_UAPSD_STOP_REQ;
WLAN_VOS_DIAG_EVENT_REPORT(&psRequest, EVENT_WLAN_POWERSAVE_GENERIC);
#endif
pmcLog(pMac, LOG2, "PMC: entering pmcStopUapsd");
/* Clear any buffered command for entering UAPSD */
pMac->pmc.uapsdSessionRequired = FALSE;
/* Nothing to be done if we are already out of UAPSD. This can happen if
some other module (HDD, BT-AMP) requested Full Power.*/
if (pMac->pmc.pmcState != UAPSD && pMac->pmc.pmcState != REQUEST_STOP_UAPSD)
{
pmcLog(pMac, LOGW, "PMC: Device is already out of UAPSD "
"state. Current state is %d", pMac->pmc.pmcState);
return eHAL_STATUS_SUCCESS;
}
/* Enter REQUEST_STOP_UAPSD State*/
if (pmcEnterRequestStopUapsdState(hHal) != eHAL_STATUS_SUCCESS)
return eHAL_STATUS_FAILURE;
return eHAL_STATUS_SUCCESS;
}
/* ---------------------------------------------------------------------------
\fn pmcRequestStandby
\brief Request that the device be put in standby.
\param hHal - The handle returned by macOpen.
\param callbackRoutine - Callback routine invoked in case of success/failure
\param callbackContext - value to be passed as parameter to callback
\return eHalStatus
eHAL_STATUS_SUCCESS - device is in Standby mode
eHAL_STATUS_FAILURE - device cannot be put in standby mode
eHAL_STATUS_PMC_PENDING - device is being put in standby mode
---------------------------------------------------------------------------*/
extern eHalStatus pmcRequestStandby (
tHalHandle hHal,
void (*callbackRoutine) (void *callbackContext, eHalStatus status),
void *callbackContext)
{
tpAniSirGlobal pMac = PMAC_STRUCT(hHal);
#ifdef FEATURE_WLAN_DIAG_SUPPORT
WLAN_VOS_DIAG_EVENT_DEF(psRequest, vos_event_wlan_powersave_payload_type);
vos_mem_zero(&psRequest, sizeof(vos_event_wlan_powersave_payload_type));
psRequest.event_subtype = WLAN_ENTER_STANDBY_REQ;
WLAN_VOS_DIAG_EVENT_REPORT(&psRequest, EVENT_WLAN_POWERSAVE_GENERIC);
#endif
pmcLog(pMac, LOG2, "PMC: entering pmcRequestStandby");
/* Check if standby is enabled. */
if (!pMac->pmc.standbyEnabled)
{
pmcLog(pMac, LOGE, "PMC: Cannot enter standby. Standby is disabled");
return eHAL_STATUS_PMC_DISABLED;
}
if( !PMC_IS_READY(pMac) )
{
pmcLog(pMac, LOGE, FL("Requesting standby when PMC not ready"));
pmcLog(pMac, LOGE, FL("pmcReady = %d pmcState = %s"),
pMac->pmc.pmcReady, pmcGetPmcStateStr(pMac->pmc.pmcState));
return eHAL_STATUS_FAILURE;
}
/* If already in STANDBY, just return. */
if (pMac->pmc.pmcState == STANDBY)
return eHAL_STATUS_SUCCESS;
if (csrIsIBSSStarted(pMac) || csrIsBTAMPStarted(pMac))
{
VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_FATAL,
"WLAN: IBSS or BT-AMP session present. Cannot honor standby request");
return eHAL_STATUS_PMC_NOT_NOW;
}
/* Enter Request Standby State. */
if (pmcEnterRequestStandbyState(hHal) != eHAL_STATUS_SUCCESS)
return eHAL_STATUS_FAILURE;
/* Save the callback routine for when we need it. */
pMac->pmc.standbyCallbackRoutine = callbackRoutine;
pMac->pmc.standbyCallbackContext = callbackContext;
return eHAL_STATUS_PMC_PENDING;
}
/* ---------------------------------------------------------------------------
\fn pmcRegisterDeviceStateUpdateInd
\brief Register a callback routine that is called whenever
the device enters a new device state (Full Power, BMPS, UAPSD)
\param hHal - The handle returned by macOpen.
\param callbackRoutine - Callback routine to be registered
\param callbackContext - Cookie to be passed back during callback
\return eHalStatus
eHAL_STATUS_SUCCESS - successfully registered
eHAL_STATUS_FAILURE - not successfully registered
---------------------------------------------------------------------------*/
extern eHalStatus pmcRegisterDeviceStateUpdateInd (tHalHandle hHal,
void (*callbackRoutine) (void *callbackContext, tPmcState pmcState),
void *callbackContext)
{
tpAniSirGlobal pMac = PMAC_STRUCT(hHal);
tpDeviceStateUpdateIndEntry pEntry;
pmcLog(pMac, LOG2, FL("Entering pmcRegisterDeviceStateUpdateInd"));
/* Allocate entry for device power state update indication. */
if (palAllocateMemory(pMac->hHdd, (void **)&pEntry, sizeof(tDeviceStateUpdateIndEntry)) != eHAL_STATUS_SUCCESS)
{
pmcLog(pMac, LOGE, FL("Cannot allocate memory for device power state update indication"));
PMC_ABORT;
return eHAL_STATUS_FAILURE;
}
/* Store routine in entry. */
pEntry->callbackRoutine = callbackRoutine;
pEntry->callbackContext = callbackContext;
/* Add entry to list. */
csrLLInsertTail(&pMac->pmc.deviceStateUpdateIndList, &pEntry->link, FALSE);
return eHAL_STATUS_SUCCESS;
}
/* ---------------------------------------------------------------------------
\fn pmcDeregisterDeviceStateUpdateInd
\brief Deregister a routine that was registered for device state changes
\param hHal - The handle returned by macOpen.
\param callbackRoutine - Callback routine to be deregistered
\return eHalStatus
eHAL_STATUS_SUCCESS - successfully deregistered
eHAL_STATUS_FAILURE - not successfully deregistered
---------------------------------------------------------------------------*/
eHalStatus pmcDeregisterDeviceStateUpdateInd (tHalHandle hHal,
void (*callbackRoutine) (void *callbackContext, tPmcState pmcState))
{
tpAniSirGlobal pMac = PMAC_STRUCT(hHal);
tListElem *pEntry;
tpDeviceStateUpdateIndEntry pDeviceStateUpdateIndEntry;
pmcLog(pMac, LOG2, FL("Entering pmcDeregisterDeviceStateUpdateInd"));
/* Find entry in the power save update routine list that matches
the specified routine and remove it. */
pEntry = csrLLPeekHead(&pMac->pmc.deviceStateUpdateIndList, FALSE);
while (pEntry != NULL)
{
pDeviceStateUpdateIndEntry = GET_BASE_ADDR(pEntry, tDeviceStateUpdateIndEntry, link);
if (pDeviceStateUpdateIndEntry->callbackRoutine == callbackRoutine)
{
if (!csrLLRemoveEntry(&pMac->pmc.deviceStateUpdateIndList, pEntry, FALSE))
{
pmcLog(pMac, LOGE, FL("Cannot remove device state update ind entry from list"));
return eHAL_STATUS_FAILURE;
}
if (palFreeMemory(pMac->hHdd, pDeviceStateUpdateIndEntry) != eHAL_STATUS_SUCCESS)
{
pmcLog(pMac, LOGE, FL("Cannot free memory for device state update ind routine list entry"));
PMC_ABORT;
return eHAL_STATUS_FAILURE;
}
return eHAL_STATUS_SUCCESS;
}
pEntry = csrLLNext(&pMac->pmc.deviceStateUpdateIndList, pEntry, FALSE);
}
/* Could not find matching entry. */
return eHAL_STATUS_FAILURE;
}
/* ---------------------------------------------------------------------------
\fn pmcReady
\brief fn to inform PMC that eWNI_SME_SYS_READY_IND has been sent to PE.
This acts as a trigger to send a message to PE to update the power
save related conig to FW. Note that if HDD configures any power save
related stuff before this API is invoked, PMC will buffer all the
configutaion.
\param hHal - The handle returned by macOpen.
\return eHalStatus
---------------------------------------------------------------------------*/
eHalStatus pmcReady(tHalHandle hHal)
{
tpAniSirGlobal pMac = PMAC_STRUCT(hHal);
pmcLog(pMac, LOG2, FL("Entering pmcReady"));
if(pMac->pmc.pmcState == STOPPED)
{
pmcLog(pMac, LOGP, FL("pmcReady is invoked even before pmcStart"));
return eHAL_STATUS_FAILURE;
}
pMac->pmc.pmcReady = TRUE;
if (pmcSendPowerSaveConfigMessage(hHal) != eHAL_STATUS_SUCCESS)
{
return eHAL_STATUS_FAILURE;
}
return eHAL_STATUS_SUCCESS;
}
/* ---------------------------------------------------------------------------
\fn pmcWowlAddBcastPattern
\brief Add a pattern for Pattern Byte Matching in Wowl mode. Firmware will
do a pattern match on these patterns when Wowl is enabled during BMPS
mode. Note that Firmware performs the pattern matching only on
broadcast frames and while Libra is in BMPS mode.
\param hHal - The handle returned by macOpen.
\param pattern - Pointer to the pattern to be added
\return eHalStatus
eHAL_STATUS_FAILURE Cannot add pattern
eHAL_STATUS_SUCCESS Request accepted.
---------------------------------------------------------------------------*/
eHalStatus pmcWowlAddBcastPattern (
tHalHandle hHal,
tpSirWowlAddBcastPtrn pattern,
tANI_U8 sessionId)
{
tpAniSirGlobal pMac = PMAC_STRUCT(hHal);
tCsrRoamSession *pSession = CSR_GET_SESSION( pMac, sessionId );
#ifdef FEATURE_WLAN_DIAG_SUPPORT
vos_log_powersave_wow_add_ptrn_pkt_type *log_ptr = NULL;
WLAN_VOS_DIAG_LOG_ALLOC(log_ptr, vos_log_powersave_wow_add_ptrn_pkt_type, LOG_WLAN_POWERSAVE_WOW_ADD_PTRN_C);
#endif //#ifdef FEATURE_WLAN_DIAG_SUPPORT
pmcLog(pMac, LOG2, "PMC: entering pmcWowlAddBcastPattern");
if(pattern == NULL)
{
pmcLog(pMac, LOGE, FL("Null broadcast pattern being passed"));
return eHAL_STATUS_FAILURE;
}
if( pSession == NULL)
{
pmcLog(pMac, LOGE, FL("Session not found "));
return eHAL_STATUS_FAILURE;
}
#ifdef FEATURE_WLAN_DIAG_SUPPORT
if( log_ptr )
{
log_ptr->pattern_id = pattern->ucPatternId;
log_ptr->pattern_byte_offset = pattern->ucPatternByteOffset;
log_ptr->pattern_size = pattern->ucPatternSize;
log_ptr->pattern_mask_size = pattern->ucPatternMaskSize;
vos_mem_copy(log_ptr->pattern, pattern->ucPattern, SIR_WOWL_BCAST_PATTERN_MAX_SIZE);
/* 1 bit in the pattern mask denotes 1 byte of pattern hence pattern mask size is 1/8 */
vos_mem_copy(log_ptr->pattern_mask, pattern->ucPatternMask, SIR_WOWL_BCAST_PATTERN_MAX_SIZE >> 3);
}
WLAN_VOS_DIAG_LOG_REPORT(log_ptr);
WLAN_VOS_DIAG_LOG_FREE(log_ptr);
#endif
if(pattern->ucPatternId >= SIR_WOWL_BCAST_MAX_NUM_PATTERNS )
{
pmcLog(pMac, LOGE, FL("Pattern Id must range from 0 to %d"), SIR_WOWL_BCAST_MAX_NUM_PATTERNS-1);
return eHAL_STATUS_FAILURE;
}
if( pMac->pmc.pmcState == STANDBY || pMac->pmc.pmcState == REQUEST_STANDBY )
{
pmcLog(pMac, LOGE, FL("Cannot add WoWL Pattern as chip is in %s state"),
pmcGetPmcStateStr(pMac->pmc.pmcState));
return eHAL_STATUS_FAILURE;
}
if( pMac->pmc.pmcState == IMPS || pMac->pmc.pmcState == REQUEST_IMPS )
{
pmcLog(pMac, LOGE, FL("Cannot add WoWL Pattern as chip is in %s state"),
pmcGetPmcStateStr(pMac->pmc.pmcState));
return eHAL_STATUS_FAILURE;
}
if( !csrIsConnStateConnected(pMac, sessionId) )
{
pmcLog(pMac, LOGE, FL("Cannot add WoWL Pattern session in %d state"),
pSession->connectState);
return eHAL_STATUS_FAILURE;
}
vos_mem_copy(pattern->bssId, pSession->connectedProfile.bssid, sizeof(tSirMacAddr));
if (pmcSendMessage(hHal, eWNI_PMC_WOWL_ADD_BCAST_PTRN, pattern, sizeof(tSirWowlAddBcastPtrn))
!= eHAL_STATUS_SUCCESS)
{
pmcLog(pMac, LOGE, FL("Send of eWNI_PMC_WOWL_ADD_BCAST_PTRN to PE failed"));
return eHAL_STATUS_FAILURE;
}
return eHAL_STATUS_SUCCESS;
}
/* ---------------------------------------------------------------------------
\fn pmcWowlDelBcastPattern
\brief Delete a pattern that was added for Pattern Byte Matching.
\param hHal - The handle returned by macOpen.
\param pattern - Pattern to be deleted
\return eHalStatus
eHAL_STATUS_FAILURE Cannot delete pattern
eHAL_STATUS_SUCCESS Request accepted.
---------------------------------------------------------------------------*/
eHalStatus pmcWowlDelBcastPattern (
tHalHandle hHal,
tpSirWowlDelBcastPtrn pattern,
tANI_U8 sessionId)
{
tpAniSirGlobal pMac = PMAC_STRUCT(hHal);
tCsrRoamSession *pSession = CSR_GET_SESSION( pMac, sessionId );
#ifdef FEATURE_WLAN_DIAG_SUPPORT
WLAN_VOS_DIAG_EVENT_DEF(wowRequest, vos_event_wlan_powersave_wow_payload_type);
vos_mem_zero(&wowRequest, sizeof(vos_event_wlan_powersave_wow_payload_type));
wowRequest.event_subtype = WLAN_WOW_DEL_PTRN_REQ;
wowRequest.wow_del_ptrn_id = pattern->ucPatternId;
WLAN_VOS_DIAG_EVENT_REPORT(&wowRequest, EVENT_WLAN_POWERSAVE_WOW);
#endif
pmcLog(pMac, LOG2, "PMC: entering pmcWowlDelBcastPattern");
if( NULL == pSession )
{
pmcLog(pMac, LOGE, FL("Session not found "));
return eHAL_STATUS_FAILURE;
}
if(pattern->ucPatternId >= SIR_WOWL_BCAST_MAX_NUM_PATTERNS )
{
pmcLog(pMac, LOGE, FL("Pattern Id must range from 0 to %d"),
SIR_WOWL_BCAST_MAX_NUM_PATTERNS-1);
return eHAL_STATUS_FAILURE;
}
if(pMac->pmc.pmcState == STANDBY || pMac->pmc.pmcState == REQUEST_STANDBY)
{
pmcLog(pMac, LOGE, FL("Cannot delete WoWL Pattern as chip is in %s state"),
pmcGetPmcStateStr(pMac->pmc.pmcState));
return eHAL_STATUS_FAILURE;
}
vos_mem_copy(pattern->bssId, pSession->connectedProfile.bssid, sizeof(tSirMacAddr));
if( pMac->pmc.pmcState == IMPS || pMac->pmc.pmcState == REQUEST_IMPS )
{
eHalStatus status;
//Wake up the chip first
status = pmcDeferMsg( pMac, eWNI_PMC_WOWL_DEL_BCAST_PTRN,
pattern, sizeof(tSirWowlDelBcastPtrn) );
if( eHAL_STATUS_PMC_PENDING == status )
{
return eHAL_STATUS_SUCCESS;
}
else
{
//either fail or already in full power
if( !HAL_STATUS_SUCCESS( status ) )
{
return ( status );
}
//else let it through because it is in full power state
}
}
if (pmcSendMessage(hHal, eWNI_PMC_WOWL_DEL_BCAST_PTRN, pattern, sizeof(tSirWowlDelBcastPtrn))
!= eHAL_STATUS_SUCCESS)
{
pmcLog(pMac, LOGE, FL("Send of eWNI_PMC_WOWL_DEL_BCAST_PTRN to PE failed"));
return eHAL_STATUS_FAILURE;
}
return eHAL_STATUS_SUCCESS;
}
/* ---------------------------------------------------------------------------
\fn pmcEnterWowl
\brief Request that the device be brought to full power state.
Note 1: If "fullPowerReason" specificied in this API is set to
eSME_FULL_PWR_NEEDED_BY_HDD, PMC will clear any "buffered wowl" requests
and also clear any "buffered BMPS requests by HDD". Assumption is that since
HDD is requesting full power, we need to undo any previous HDD requests for
BMPS (using sme_RequestBmps) or WoWL (using sme_EnterWoWL). If the reason is
specified anything other than above, the buffered requests for BMPS and WoWL
will not be cleared.
Note 2: Requesting full power (no matter what the fullPowerReason is) doesn't
disable the "auto bmps timer" (if it is enabled) or clear any "buffered uapsd
request".
Note 3: When the device finally enters Full Power PMC will start a timer
if any of the following holds true:
- Auto BMPS mode is enabled
- Uapsd request is pending
- HDD's request for BMPS is pending
- HDD's request for WoWL is pending
On timer expiry PMC will attempt to put the device in BMPS mode if following
(in addition to those listed above) holds true:
- Polling of all modules through the Power Save Check routine passes
- STA is associated to an access point
\param hHal - The handle returned by macOpen.
\param - enterWowlCallbackRoutine Callback routine invoked in case of success/failure
\param - enterWowlCallbackContext - Cookie to be passed back during callback
\param - wakeReasonIndCB Callback routine invoked for Wake Reason Indication
\param - wakeReasonIndCBContext - Cookie to be passed back during callback
\param - fullPowerReason - Reason why this API is being invoked. SME needs to
distinguish between BAP and HDD requests
\return eHalStatus - status
eHAL_STATUS_SUCCESS - device brought to full power state
eHAL_STATUS_FAILURE - device cannot be brought to full power state
eHAL_STATUS_PMC_PENDING - device is being brought to full power state,
---------------------------------------------------------------------------*/
eHalStatus pmcEnterWowl (
tHalHandle hHal,
void (*enterWowlCallbackRoutine) (void *callbackContext, eHalStatus status),
void *enterWowlCallbackContext,
#ifdef WLAN_WAKEUP_EVENTS
void (*wakeReasonIndCB) (void *callbackContext, tpSirWakeReasonInd pWakeReasonInd),
void *wakeReasonIndCBContext,
#endif // WLAN_WAKEUP_EVENTS
tpSirSmeWowlEnterParams wowlEnterParams, tANI_U8 sessionId)
{
tpAniSirGlobal pMac = PMAC_STRUCT(hHal);
tCsrRoamSession *pSession = CSR_GET_SESSION( pMac, sessionId );
#ifdef FEATURE_WLAN_DIAG_SUPPORT
WLAN_VOS_DIAG_EVENT_DEF(wowRequest, vos_event_wlan_powersave_wow_payload_type);
vos_mem_zero(&wowRequest, sizeof(vos_event_wlan_powersave_wow_payload_type));
wowRequest.event_subtype = WLAN_WOW_ENTER_REQ;
wowRequest.wow_type = 0;
if(wowlEnterParams->ucMagicPktEnable)
{
wowRequest.wow_type |= 1;
vos_mem_copy(wowRequest.wow_magic_pattern, (tANI_U8 *)wowlEnterParams->magicPtrn, 6);
}
if(wowlEnterParams->ucPatternFilteringEnable)
{
wowRequest.wow_type |= 2;
}
WLAN_VOS_DIAG_EVENT_REPORT(&wowRequest, EVENT_WLAN_POWERSAVE_WOW);
#endif
pmcLog(pMac, LOG2, FL("PMC: entering pmcEnterWowl"));
if( NULL == pSession )
{
pmcLog(pMac, LOGE, FL("Session not found "));
return eHAL_STATUS_FAILURE;
}
if( !PMC_IS_READY(pMac) )
{
pmcLog(pMac, LOGE, FL("Requesting WoWL when PMC not ready"));
pmcLog(pMac, LOGE, FL("pmcReady = %d pmcState = %s"),
pMac->pmc.pmcReady, pmcGetPmcStateStr(pMac->pmc.pmcState));
return eHAL_STATUS_FAILURE;
}
/* Check if BMPS is enabled. */
if (!pMac->pmc.bmpsEnabled)
{
pmcLog(pMac, LOGE, "PMC: Cannot enter WoWL. BMPS is disabled");
return eHAL_STATUS_PMC_DISABLED;
}
/* Check if WoWL is enabled. */
if (!pMac->pmc.wowlEnabled)
{
pmcLog(pMac, LOGE, "PMC: Cannot enter WoWL. WoWL is disabled");
return eHAL_STATUS_PMC_DISABLED;
}
/* Check that we are associated with single Session. */
if (!pmcValidateConnectState( pMac ))
{
pmcLog(pMac, LOGE, "PMC: Cannot enable WOWL. STA not associated "
"with an Access Point in Infra Mode with single active session");
return eHAL_STATUS_FAILURE;
}
/* Is there a pending UAPSD request? HDD should have triggered QoS
module to do the necessary cleanup before triggring WOWL*/
if(pMac->pmc.uapsdSessionRequired)
{
pmcLog(pMac, LOGE, "PMC: Cannot request WOWL. Pending UAPSD request");
return eHAL_STATUS_FAILURE;
}
/* Check that entry into a power save mode is allowed at this time. */
if (pMac->pmc.pmcState == FULL_POWER && !pmcPowerSaveCheck(hHal))
{
pmcLog(pMac, LOGE, "PMC: Power save check failed. WOWL request "
"will not be accepted");
return eHAL_STATUS_FAILURE;
}
vos_mem_copy(wowlEnterParams->bssId, pSession->connectedProfile.bssid,
sizeof(tSirMacAddr));
// To avoid race condition, set callback routines before sending message.
/* cache the WOWL information */
pMac->pmc.wowlEnterParams = *wowlEnterParams;
pMac->pmc.enterWowlCallbackRoutine = enterWowlCallbackRoutine;
pMac->pmc.enterWowlCallbackContext = enterWowlCallbackContext;
#ifdef WLAN_WAKEUP_EVENTS
/* Cache the Wake Reason Indication callback information */
pMac->pmc.wakeReasonIndCB = wakeReasonIndCB;
pMac->pmc.wakeReasonIndCBContext = wakeReasonIndCBContext;
#endif // WLAN_WAKEUP_EVENTS
/* Enter Request WOWL State. */
if (pmcRequestEnterWowlState(hHal, wowlEnterParams) != eHAL_STATUS_SUCCESS)
return eHAL_STATUS_FAILURE;
pMac->pmc.wowlModeRequired = TRUE;
return eHAL_STATUS_PMC_PENDING;
}
/* ---------------------------------------------------------------------------
\fn pmcExitWowl
\brief This is the SME API exposed to HDD to request exit from WoWLAN mode.
SME will initiate exit from WoWLAN mode and device will be put in BMPS
mode.
\param hHal - The handle returned by macOpen.
\return eHalStatus
eHAL_STATUS_FAILURE Device cannot exit WoWLAN mode.
eHAL_STATUS_SUCCESS Request accepted to exit WoWLAN mode.
---------------------------------------------------------------------------*/
eHalStatus pmcExitWowl (tHalHandle hHal)
{
tpAniSirGlobal pMac = PMAC_STRUCT(hHal);
#ifdef FEATURE_WLAN_DIAG_SUPPORT
WLAN_VOS_DIAG_EVENT_DEF(wowRequest, vos_event_wlan_powersave_wow_payload_type);
vos_mem_zero(&wowRequest, sizeof(vos_event_wlan_powersave_wow_payload_type));
wowRequest.event_subtype = WLAN_WOW_EXIT_REQ;
WLAN_VOS_DIAG_EVENT_REPORT(&wowRequest, EVENT_WLAN_POWERSAVE_WOW);
#endif
pmcLog(pMac, LOG2, "PMC: entering pmcExitWowl");
/* Clear any buffered command for entering WOWL */
pMac->pmc.wowlModeRequired = FALSE;
/* Enter REQUEST_EXIT_WOWL State*/
if (pmcRequestExitWowlState(hHal) != eHAL_STATUS_SUCCESS)
return eHAL_STATUS_FAILURE;
/* Clear the callback routines */
pMac->pmc.enterWowlCallbackRoutine = NULL;
pMac->pmc.enterWowlCallbackContext = NULL;
return eHAL_STATUS_SUCCESS;
}
/* ---------------------------------------------------------------------------
\fn pmcSetHostOffload
\brief Set the host offload feature.
\param hHal - The handle returned by macOpen.
\param pRequest - Pointer to the offload request.
\return eHalStatus
eHAL_STATUS_FAILURE Cannot set the offload.
eHAL_STATUS_SUCCESS Request accepted.
---------------------------------------------------------------------------*/
eHalStatus pmcSetHostOffload (tHalHandle hHal, tpSirHostOffloadReq pRequest,
tANI_U8 sessionId)
{
tpSirHostOffloadReq pRequestBuf;
vos_msg_t msg;
tpAniSirGlobal pMac = PMAC_STRUCT(hHal);
tCsrRoamSession *pSession = CSR_GET_SESSION( pMac, sessionId );
VOS_TRACE( VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_INFO, "%s: IP address = %d.%d.%d.%d", __func__,
pRequest->params.hostIpv4Addr[0], pRequest->params.hostIpv4Addr[1],
pRequest->params.hostIpv4Addr[2], pRequest->params.hostIpv4Addr[3]);
if(NULL == pSession )
{
VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_ERROR, "%s: SESSION not Found\n", __func__);
return eHAL_STATUS_FAILURE;
}
pRequestBuf = vos_mem_malloc(sizeof(tSirHostOffloadReq));
if (NULL == pRequestBuf)
{
VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_ERROR, "%s: Not able to allocate memory for host offload request", __func__);
return eHAL_STATUS_FAILED_ALLOC;
}
vos_mem_copy(pRequest->bssId, pSession->connectedProfile.bssid, sizeof(tSirMacAddr));
vos_mem_copy(pRequestBuf, pRequest, sizeof(tSirHostOffloadReq));
msg.type = WDA_SET_HOST_OFFLOAD;
msg.reserved = 0;
msg.bodyptr = pRequestBuf;
if(VOS_STATUS_SUCCESS != vos_mq_post_message(VOS_MODULE_ID_WDA, &msg))
{
VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_ERROR, "%s: Not able to post WDA_SET_HOST_OFFLOAD message to WDA", __func__);
vos_mem_free(pRequestBuf);
return eHAL_STATUS_FAILURE;
}
return eHAL_STATUS_SUCCESS;
}
/* ---------------------------------------------------------------------------
\fn pmcSetKeepAlive
\brief Set the Keep Alive feature.
\param hHal - The handle returned by macOpen.
\param pRequest - Pointer to the Keep Alive.
\return eHalStatus
eHAL_STATUS_FAILURE Cannot set the keepalive.
eHAL_STATUS_SUCCESS Request accepted.
---------------------------------------------------------------------------*/
eHalStatus pmcSetKeepAlive (tHalHandle hHal, tpSirKeepAliveReq pRequest, tANI_U8 sessionId)
{
tpSirKeepAliveReq pRequestBuf;
vos_msg_t msg;
tpAniSirGlobal pMac = PMAC_STRUCT(hHal);
tCsrRoamSession *pSession = CSR_GET_SESSION( pMac, sessionId );
VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_INFO_LOW, "%s: "
"WDA_SET_KEEP_ALIVE message", __func__);
if(pSession == NULL )
{
VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_ERROR, "%s: "
" Session not Found", __func__);
return eHAL_STATUS_FAILURE;
}
pRequestBuf = vos_mem_malloc(sizeof(tSirKeepAliveReq));
if (NULL == pRequestBuf)
{
VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_ERROR, "%s: "
"Not able to allocate memory for keep alive request",
__func__);
return eHAL_STATUS_FAILED_ALLOC;
}
vos_mem_copy(pRequest->bssId, pSession->connectedProfile.bssid, sizeof(tSirMacAddr));
vos_mem_copy(pRequestBuf, pRequest, sizeof(tSirKeepAliveReq));
VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_INFO_LOW, "buff TP %d "
"input TP %d ", pRequestBuf->timePeriod, pRequest->timePeriod);
msg.type = WDA_SET_KEEP_ALIVE;
msg.reserved = 0;
msg.bodyptr = pRequestBuf;
if(VOS_STATUS_SUCCESS != vos_mq_post_message(VOS_MODULE_ID_WDA, &msg))
{
VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_ERROR, "%s: "
"Not able to post WDA_SET_KEEP_ALIVE message to WDA",
__func__);
vos_mem_free(pRequestBuf);
return eHAL_STATUS_FAILURE;
}
return eHAL_STATUS_SUCCESS;
}
#ifdef WLAN_NS_OFFLOAD
/* ---------------------------------------------------------------------------
\fn pmcSetNSOffload
\brief Set the host offload feature.
\param hHal - The handle returned by macOpen.
\param pRequest - Pointer to the offload request.
\return eHalStatus
eHAL_STATUS_FAILURE Cannot set the offload.
eHAL_STATUS_SUCCESS Request accepted.
---------------------------------------------------------------------------*/
eHalStatus pmcSetNSOffload (tHalHandle hHal, tpSirHostOffloadReq pRequest,
tANI_U8 sessionId)
{
tpAniSirGlobal pMac = PMAC_STRUCT(hHal);
tpSirHostOffloadReq pRequestBuf;
vos_msg_t msg;
tCsrRoamSession *pSession = CSR_GET_SESSION( pMac, sessionId );
if( NULL == pSession )
{
pmcLog(pMac, LOGE, FL("Session not found "));
return eHAL_STATUS_FAILURE;
}
vos_mem_copy(pRequest->bssId, pSession->connectedProfile.bssid,
sizeof(tSirMacAddr));
pRequestBuf = vos_mem_malloc(sizeof(tSirHostOffloadReq));
if (NULL == pRequestBuf)
{
VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_ERROR, "%s: Not able to allocate memory for NS offload request", __func__);
return eHAL_STATUS_FAILED_ALLOC;
}
vos_mem_copy(pRequestBuf, pRequest, sizeof(tSirHostOffloadReq));
msg.type = WDA_SET_NS_OFFLOAD;
msg.reserved = 0;
msg.bodyptr = pRequestBuf;
if(VOS_STATUS_SUCCESS != vos_mq_post_message(VOS_MODULE_ID_WDA, &msg))
{
VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_ERROR, "%s: Not able to post SIR_HAL_SET_HOST_OFFLOAD message to HAL", __func__);
vos_mem_free(pRequestBuf);
return eHAL_STATUS_FAILURE;
}
return eHAL_STATUS_SUCCESS;
}
#endif //WLAN_NS_OFFLOAD
void pmcClosePowerSaveCheckList(tpAniSirGlobal pMac)
{
tListElem *pEntry;
tpPowerSaveCheckEntry pPowerSaveCheckEntry;
csrLLLock(&pMac->pmc.powerSaveCheckList);
while ( (pEntry = csrLLRemoveHead(&pMac->pmc.powerSaveCheckList, FALSE)) )
{
pPowerSaveCheckEntry = GET_BASE_ADDR(pEntry, tPowerSaveCheckEntry, link);
if (palFreeMemory(pMac->hHdd, pPowerSaveCheckEntry) != eHAL_STATUS_SUCCESS)
{
pmcLog(pMac, LOGE, FL("Cannot free memory "));
PMC_ABORT;
break;
}
}
csrLLUnlock(&pMac->pmc.powerSaveCheckList);
csrLLClose(&pMac->pmc.powerSaveCheckList);
}
void pmcCloseRequestFullPowerList(tpAniSirGlobal pMac)
{
tListElem *pEntry;
tpRequestFullPowerEntry pRequestFullPowerEntry;
csrLLLock(&pMac->pmc.requestFullPowerList);
while ( (pEntry = csrLLRemoveHead(&pMac->pmc.requestFullPowerList, FALSE)) )
{
pRequestFullPowerEntry = GET_BASE_ADDR(pEntry, tRequestFullPowerEntry, link);
if (palFreeMemory(pMac->hHdd, pRequestFullPowerEntry) != eHAL_STATUS_SUCCESS)
{
pmcLog(pMac, LOGE, FL("Cannot free memory "));
PMC_ABORT;
break;
}
}
csrLLUnlock(&pMac->pmc.requestFullPowerList);
csrLLClose(&pMac->pmc.requestFullPowerList);
}
void pmcCloseRequestBmpsList(tpAniSirGlobal pMac)
{
tListElem *pEntry;
tpRequestBmpsEntry pRequestBmpsEntry;
csrLLLock(&pMac->pmc.requestBmpsList);
while ( (pEntry = csrLLRemoveHead(&pMac->pmc.requestBmpsList, FALSE)) )
{
pRequestBmpsEntry = GET_BASE_ADDR(pEntry, tRequestBmpsEntry, link);
if (palFreeMemory(pMac->hHdd, pRequestBmpsEntry) != eHAL_STATUS_SUCCESS)
{
pmcLog(pMac, LOGE, FL("Cannot free memory "));
PMC_ABORT;
break;
}
}
csrLLUnlock(&pMac->pmc.requestBmpsList);
csrLLClose(&pMac->pmc.requestBmpsList);
}
void pmcCloseRequestStartUapsdList(tpAniSirGlobal pMac)
{
tListElem *pEntry;
tpStartUapsdEntry pStartUapsdEntry;
csrLLLock(&pMac->pmc.requestStartUapsdList);
while ( (pEntry = csrLLRemoveHead(&pMac->pmc.requestStartUapsdList, FALSE)) )
{
pStartUapsdEntry = GET_BASE_ADDR(pEntry, tStartUapsdEntry, link);
if (palFreeMemory(pMac->hHdd, pStartUapsdEntry) != eHAL_STATUS_SUCCESS)
{
pmcLog(pMac, LOGE, FL("Cannot free memory "));
PMC_ABORT;
break;
}
}
csrLLUnlock(&pMac->pmc.requestStartUapsdList);
csrLLClose(&pMac->pmc.requestStartUapsdList);
}
void pmcCloseDeviceStateUpdateList(tpAniSirGlobal pMac)
{
tListElem *pEntry;
tpDeviceStateUpdateIndEntry pDeviceStateUpdateIndEntry;
csrLLLock(&pMac->pmc.deviceStateUpdateIndList);
while ( (pEntry = csrLLRemoveHead(&pMac->pmc.deviceStateUpdateIndList, FALSE)) )
{
pDeviceStateUpdateIndEntry = GET_BASE_ADDR(pEntry, tDeviceStateUpdateIndEntry, link);
if (palFreeMemory(pMac->hHdd, pDeviceStateUpdateIndEntry) != eHAL_STATUS_SUCCESS)
{
pmcLog(pMac, LOGE, FL("Cannot free memory "));
PMC_ABORT;
break;
}
}
csrLLUnlock(&pMac->pmc.deviceStateUpdateIndList);
csrLLClose(&pMac->pmc.deviceStateUpdateIndList);
}
void pmcCloseDeferredMsgList(tpAniSirGlobal pMac)
{
tListElem *pEntry;
tPmcDeferredMsg *pDeferredMsg;
csrLLLock(&pMac->pmc.deferredMsgList);
while ( (pEntry = csrLLRemoveHead(&pMac->pmc.deferredMsgList, FALSE)) )
{
pDeferredMsg = GET_BASE_ADDR(pEntry, tPmcDeferredMsg, link);
if (palFreeMemory(pMac->hHdd, pDeferredMsg) != eHAL_STATUS_SUCCESS)
{
pmcLog(pMac, LOGE, FL("Cannot free memory "));
PMC_ABORT;
break;
}
}
csrLLUnlock(&pMac->pmc.deferredMsgList);
csrLLClose(&pMac->pmc.deferredMsgList);
}
#ifdef FEATURE_WLAN_SCAN_PNO
static tSirRetStatus
pmcPopulateMacHeader( tpAniSirGlobal pMac,
tANI_U8* pBD,
tANI_U8 type,
tANI_U8 subType,
tSirMacAddr peerAddr,
tSirMacAddr selfMacAddr)
{
tSirRetStatus statusCode = eSIR_SUCCESS;
tpSirMacMgmtHdr pMacHdr;
/// Prepare MAC management header
pMacHdr = (tpSirMacMgmtHdr) (pBD);
// Prepare FC
pMacHdr->fc.protVer = SIR_MAC_PROTOCOL_VERSION;
pMacHdr->fc.type = type;
pMacHdr->fc.subType = subType;
// Prepare Address 1
palCopyMemory( pMac->hHdd,
(tANI_U8 *) pMacHdr->da,
(tANI_U8 *) peerAddr,
sizeof( tSirMacAddr ));
sirCopyMacAddr(pMacHdr->sa,selfMacAddr);
// Prepare Address 3
palCopyMemory( pMac->hHdd,
(tANI_U8 *) pMacHdr->bssId,
(tANI_U8 *) peerAddr,
sizeof( tSirMacAddr ));
return statusCode;
} /*** pmcPopulateMacHeader() ***/
static tSirRetStatus
pmcPrepareProbeReqTemplate(tpAniSirGlobal pMac,
tANI_U8 nChannelNum,
tANI_U32 dot11mode,
tSirMacAddr selfMacAddr,
tANI_U8 *pFrame,
tANI_U16 *pusLen)
{
tDot11fProbeRequest pr;
tANI_U32 nStatus, nBytes, nPayload;
tSirRetStatus nSirStatus;
/*Bcast tx*/
tSirMacAddr bssId = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF};
/*- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
// The scheme here is to fill out a 'tDot11fProbeRequest' structure
// and then hand it off to 'dot11fPackProbeRequest' (for
// serialization). We start by zero-initializing the structure:
palZeroMemory( pMac->hHdd, ( tANI_U8* )&pr, sizeof( pr ) );
PopulateDot11fSuppRates( pMac, nChannelNum, &pr.SuppRates,NULL);
if ( WNI_CFG_DOT11_MODE_11B != dot11mode )
{
PopulateDot11fExtSuppRates1( pMac, nChannelNum, &pr.ExtSuppRates );
}
if (IS_DOT11_MODE_HT(dot11mode))
{
PopulateDot11fHTCaps( pMac, NULL, &pr.HTCaps );
}
// That's it-- now we pack it. First, how much space are we going to
// need?
nStatus = dot11fGetPackedProbeRequestSize( pMac, &pr, &nPayload );
if ( DOT11F_FAILED( nStatus ) )
{
VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_ERROR,
"Failed to calculate the packed size f"
"or a Probe Request (0x%08x).", nStatus );
// We'll fall back on the worst case scenario:
nPayload = sizeof( tDot11fProbeRequest );
}
else if ( DOT11F_WARNED( nStatus ) )
{
VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_ERROR,
"There were warnings while calculating"
"the packed size for a Probe Request ("
"0x%08x).", nStatus );
}
nBytes = nPayload + sizeof( tSirMacMgmtHdr );
/* Prepare outgoing frame*/
palZeroMemory( pMac->hHdd, pFrame, nBytes );
// Next, we fill out the buffer descriptor:
nSirStatus = pmcPopulateMacHeader( pMac, pFrame, SIR_MAC_MGMT_FRAME,
SIR_MAC_MGMT_PROBE_REQ, bssId,selfMacAddr);
if ( eSIR_SUCCESS != nSirStatus )
{
VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_ERROR,
"Failed to populate the buffer descriptor for a Probe Request (%d).",
nSirStatus );
return nSirStatus; // allocated!
}
// That done, pack the Probe Request:
nStatus = dot11fPackProbeRequest( pMac, &pr, pFrame +
sizeof( tSirMacMgmtHdr ),
nPayload, &nPayload );
if ( DOT11F_FAILED( nStatus ) )
{
VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_ERROR,
"Failed to pack a Probe Request (0x%08x).", nStatus );
return eSIR_FAILURE; // allocated!
}
else if ( DOT11F_WARNED( nStatus ) )
{
VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_ERROR,
"There were warnings while packing a Probe Request" );
}
*pusLen = nPayload + sizeof(tSirMacMgmtHdr);
return eSIR_SUCCESS;
} // End pmcPrepareProbeReqTemplate.
eHalStatus pmcSetPreferredNetworkList
(
tHalHandle hHal,
tpSirPNOScanReq pRequest,
tANI_U8 sessionId,
preferredNetworkFoundIndCallback callbackRoutine,
void *callbackContext
)
{
tpSirPNOScanReq pRequestBuf;
vos_msg_t msg;
tpAniSirGlobal pMac = PMAC_STRUCT(hHal);
tCsrRoamSession *pSession = CSR_GET_SESSION( pMac, sessionId );
tANI_U8 ucDot11Mode;
VOS_TRACE( VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_INFO,
"%s: SSID = 0x%08lx%08lx%08lx%08lx%08lx%08lx%08lx%08lx, "
"0x%08lx%08lx%08lx%08lx%08lx%08lx%08lx%08lx", __func__,
*((v_U32_t *) &pRequest->aNetworks[0].ssId.ssId[0]),
*((v_U32_t *) &pRequest->aNetworks[0].ssId.ssId[4]),
*((v_U32_t *) &pRequest->aNetworks[0].ssId.ssId[8]),
*((v_U32_t *) &pRequest->aNetworks[0].ssId.ssId[12]),
*((v_U32_t *) &pRequest->aNetworks[0].ssId.ssId[16]),
*((v_U32_t *) &pRequest->aNetworks[0].ssId.ssId[20]),
*((v_U32_t *) &pRequest->aNetworks[0].ssId.ssId[24]),
*((v_U32_t *) &pRequest->aNetworks[0].ssId.ssId[28]),
*((v_U32_t *) &pRequest->aNetworks[1].ssId.ssId[0]),
*((v_U32_t *) &pRequest->aNetworks[1].ssId.ssId[4]),
*((v_U32_t *) &pRequest->aNetworks[1].ssId.ssId[8]),
*((v_U32_t *) &pRequest->aNetworks[1].ssId.ssId[12]),
*((v_U32_t *) &pRequest->aNetworks[1].ssId.ssId[16]),
*((v_U32_t *) &pRequest->aNetworks[1].ssId.ssId[20]),
*((v_U32_t *) &pRequest->aNetworks[1].ssId.ssId[24]),
*((v_U32_t *) &pRequest->aNetworks[1].ssId.ssId[28]));
pRequestBuf = vos_mem_malloc(sizeof(tSirPNOScanReq));
if (NULL == pRequestBuf)
{
VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_ERROR, "%s: Not able to allocate memory for PNO request", __func__);
return eHAL_STATUS_FAILED_ALLOC;
}
vos_mem_copy(pRequestBuf, pRequest, sizeof(tSirPNOScanReq));
/*Must translate the mode first*/
ucDot11Mode = (tANI_U8) csrTranslateToWNICfgDot11Mode(pMac,
csrFindBestPhyMode( pMac, pMac->roam.configParam.phyMode ));
/*Prepare a probe request for 2.4GHz band and one for 5GHz band*/
if (eSIR_SUCCESS == pmcPrepareProbeReqTemplate(pMac, SIR_PNO_24G_DEFAULT_CH,
ucDot11Mode, pSession->selfMacAddr,
pRequestBuf->p24GProbeTemplate,
&pRequestBuf->us24GProbeTemplateLen))
{
/* Append IE passed by supplicant(if any) to probe request */
if ((0 < pRequest->us24GProbeTemplateLen) &&
((pRequestBuf->us24GProbeTemplateLen +
pRequest->us24GProbeTemplateLen) < SIR_PNO_MAX_PB_REQ_SIZE ))
{
vos_mem_copy((tANI_U8 *)&pRequestBuf->p24GProbeTemplate +
pRequestBuf->us24GProbeTemplateLen,
(tANI_U8 *)&pRequest->p24GProbeTemplate,
pRequest->us24GProbeTemplateLen);
pRequestBuf->us24GProbeTemplateLen +=
pRequest->us24GProbeTemplateLen;
VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_INFO,
"%s: pRequest->us24GProbeTemplateLen = %d", __func__,
pRequest->us24GProbeTemplateLen);
}
else
{
VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_INFO,
"%s: Extra ie discarded on 2.4G, IE length = %d", __func__,
pRequest->us24GProbeTemplateLen);
}
}
if (eSIR_SUCCESS == pmcPrepareProbeReqTemplate(pMac, SIR_PNO_5G_DEFAULT_CH,
ucDot11Mode, pSession->selfMacAddr,
pRequestBuf->p5GProbeTemplate,
&pRequestBuf->us5GProbeTemplateLen))
{
/* Append IE passed by supplicant(if any) to probe request */
if ((0 < pRequest->us5GProbeTemplateLen ) &&
((pRequestBuf->us5GProbeTemplateLen +
pRequest->us5GProbeTemplateLen) < SIR_PNO_MAX_PB_REQ_SIZE ))
{
vos_mem_copy((tANI_U8 *)&pRequestBuf->p5GProbeTemplate +
pRequestBuf->us5GProbeTemplateLen,
(tANI_U8 *)&pRequest->p5GProbeTemplate,
pRequest->us5GProbeTemplateLen);
pRequestBuf->us5GProbeTemplateLen += pRequest->us5GProbeTemplateLen;
VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_INFO,
"%s: pRequestBuf->us5GProbeTemplateLen = %d", __func__,
pRequest->us5GProbeTemplateLen);
}
else
{
VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_INFO,
"%s: Extra IE discarded on 5G, IE length = %d", __func__,
pRequest->us5GProbeTemplateLen);
}
}
msg.type = WDA_SET_PNO_REQ;
msg.reserved = 0;
msg.bodyptr = pRequestBuf;
if (!VOS_IS_STATUS_SUCCESS(vos_mq_post_message(VOS_MODULE_ID_WDA, &msg)))
{
VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_ERROR, "%s: Not able to post WDA_SET_PNO_REQ message to WDA", __func__);
vos_mem_free(pRequestBuf);
return eHAL_STATUS_FAILURE;
}
/* Cache the Preferred Network Found Indication callback information */
pMac->pmc.prefNetwFoundCB = callbackRoutine;
pMac->pmc.preferredNetworkFoundIndCallbackContext = callbackContext;
VOS_TRACE( VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_INFO, "-%s", __func__);
return eHAL_STATUS_SUCCESS;
}
eHalStatus pmcSetRssiFilter(tHalHandle hHal, v_U8_t rssiThreshold)
{
tpSirSetRSSIFilterReq pRequestBuf;
vos_msg_t msg;
pRequestBuf = vos_mem_malloc(sizeof(tpSirSetRSSIFilterReq));
if (NULL == pRequestBuf)
{
VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_ERROR, "%s: Not able to allocate memory for PNO request", __func__);
return eHAL_STATUS_FAILED_ALLOC;
}
pRequestBuf->rssiThreshold = rssiThreshold;
msg.type = WDA_SET_RSSI_FILTER_REQ;
msg.reserved = 0;
msg.bodyptr = pRequestBuf;
if (!VOS_IS_STATUS_SUCCESS(vos_mq_post_message(VOS_MODULE_ID_WDA, &msg)))
{
VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_ERROR, "%s: Not able to post WDA_SET_PNO_REQ message to WDA", __func__);
vos_mem_free(pRequestBuf);
return eHAL_STATUS_FAILURE;
}
return eHAL_STATUS_SUCCESS;
}
eHalStatus pmcUpdateScanParams(tHalHandle hHal, tCsrConfig *pRequest, tCsrChannel *pChannelList, tANI_U8 b11dResolved)
{
tpSirUpdateScanParams pRequestBuf;
vos_msg_t msg;
int i;
VOS_TRACE( VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_INFO, "%s started", __func__);
pRequestBuf = vos_mem_malloc(sizeof(tSirUpdateScanParams));
if (NULL == pRequestBuf)
{
VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_ERROR, "%s: Not able to allocate memory for UpdateScanParams request", __func__);
return eHAL_STATUS_FAILED_ALLOC;
}
//
// Fill pRequestBuf structure from pRequest
//
pRequestBuf->b11dEnabled = pRequest->Is11eSupportEnabled;
pRequestBuf->b11dResolved = b11dResolved;
pRequestBuf->ucChannelCount =
( pChannelList->numChannels < SIR_PNO_MAX_NETW_CHANNELS_EX )?
pChannelList->numChannels:SIR_PNO_MAX_NETW_CHANNELS_EX;
for (i=0; i < pRequestBuf->ucChannelCount; i++)
{
VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_INFO,
"%s: Channel List %d: %d", __FUNCTION__, i, pChannelList->channelList[i] );
pRequestBuf->aChannels[i] = pChannelList->channelList[i];
}
pRequestBuf->usPassiveMinChTime = pRequest->nPassiveMinChnTime;
pRequestBuf->usPassiveMaxChTime = pRequest->nPassiveMaxChnTime;
pRequestBuf->usActiveMinChTime = pRequest->nActiveMinChnTime;
pRequestBuf->usActiveMaxChTime = pRequest->nActiveMaxChnTime;
pRequestBuf->ucCBState = PHY_SINGLE_CHANNEL_CENTERED;
msg.type = WDA_UPDATE_SCAN_PARAMS_REQ;
msg.reserved = 0;
msg.bodyptr = pRequestBuf;
if (!VOS_IS_STATUS_SUCCESS(vos_mq_post_message(VOS_MODULE_ID_WDA, &msg)))
{
VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_ERROR, "%s: Not able to post WDA_UPDATE_SCAN_PARAMS message to WDA", __func__);
vos_mem_free(pRequestBuf);
return eHAL_STATUS_FAILURE;
}
return eHAL_STATUS_SUCCESS;
}
#endif // FEATURE_WLAN_SCAN_PNO
eHalStatus pmcSetPowerParams(tHalHandle hHal, tSirSetPowerParamsReq* pwParams, tANI_BOOLEAN forced)
{
tSirSetPowerParamsReq* pRequestBuf;
vos_msg_t msg;
tpAniSirGlobal pMac = PMAC_STRUCT(hHal);
tpPESession psessionEntry;
psessionEntry = peGetValidPowerSaveSession(pMac);
if (!forced && (psessionEntry == NULL))
{
return eHAL_STATUS_NOT_INITIALIZED;
}
pRequestBuf = vos_mem_malloc(sizeof(tSirSetPowerParamsReq));
if (NULL == pRequestBuf)
{
VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_ERROR, "%s: Not able to allocate memory for Power Paramrequest", __func__);
return eHAL_STATUS_FAILED_ALLOC;
}
vos_mem_copy(pRequestBuf, pwParams, sizeof(*pRequestBuf));
msg.type = WDA_SET_POWER_PARAMS_REQ;
msg.reserved = 0;
msg.bodyptr = pRequestBuf;
if(VOS_STATUS_SUCCESS != vos_mq_post_message(VOS_MODULE_ID_WDA, &msg))
{
VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_ERROR, "%s: Not able to post WDA_SET_POWER_PARAMS_REQ message to WDA", __func__);
vos_mem_free(pRequestBuf);
return eHAL_STATUS_FAILURE;
}
return eHAL_STATUS_SUCCESS;
}
#ifdef WLAN_FEATURE_PACKET_FILTERING
eHalStatus pmcGetFilterMatchCount
(
tHalHandle hHal,
FilterMatchCountCallback callbackRoutine,
void *callbackContext,
tANI_U8 sessionId
)
{
tpSirRcvFltPktMatchRsp pRequestBuf;
vos_msg_t msg;
tpAniSirGlobal pMac = PMAC_STRUCT(hHal);
tCsrRoamSession *pSession = CSR_GET_SESSION( pMac, sessionId );
VOS_TRACE( VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_INFO,
"%s", __func__);
if(NULL == pSession )
{
VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_ERROR,
"%s: Session not found ", __func__);
return eHAL_STATUS_FAILURE;
}
pRequestBuf = vos_mem_malloc(sizeof(tSirRcvFltPktMatchRsp));
if (NULL == pRequestBuf)
{
VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_ERROR,
"%s: Not able to allocate "
"memory for Get PC Filter Match Count request", __func__);
return eHAL_STATUS_FAILED_ALLOC;
}
vos_mem_copy(pRequestBuf->bssId, pSession->connectedProfile.bssid, sizeof(tSirMacAddr));
msg.type = WDA_PACKET_COALESCING_FILTER_MATCH_COUNT_REQ;
msg.reserved = 0;
msg.bodyptr = pRequestBuf;
/* Cache the Packet Coalescing Filter Match Count callback information */
if (NULL != pMac->pmc.FilterMatchCountCB)
{
// Do we need to check if the callback is in use?
// Because we are not sending the same message again when it is pending,
// the only case when the callback is not NULL is that the previous message
//was timed out or failed.
// So, it will be safe to set the callback in this case.
}
pMac->pmc.FilterMatchCountCB = callbackRoutine;
pMac->pmc.FilterMatchCountCBContext = callbackContext;
if(VOS_STATUS_SUCCESS != vos_mq_post_message(VOS_MODULE_ID_WDA, &msg))
{
VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_ERROR,
"%s: Not able to post WDA_PACKET_COALESCING_FILTER_MATCH_COUNT_REQ "
"message to WDA", __func__);
vos_mem_free(pRequestBuf);
return eHAL_STATUS_FAILURE;
}
return eHAL_STATUS_SUCCESS;
}
#endif // WLAN_FEATURE_PACKET_FILTERING
#ifdef WLAN_FEATURE_GTK_OFFLOAD
/* ---------------------------------------------------------------------------
\fn pmcSetGTKOffload
\brief Set GTK offload feature.
\param hHal - The handle returned by macOpen.
\param pGtkOffload - Pointer to the GTK offload request.
\return eHalStatus
eHAL_STATUS_FAILURE Cannot set the offload.
eHAL_STATUS_SUCCESS Request accepted.
---------------------------------------------------------------------------*/
eHalStatus pmcSetGTKOffload (tHalHandle hHal, tpSirGtkOffloadParams pGtkOffload,
tANI_U8 sessionId)
{
tpSirGtkOffloadParams pRequestBuf;
vos_msg_t msg;
tpAniSirGlobal pMac = PMAC_STRUCT(hHal);
tCsrRoamSession *pSession = CSR_GET_SESSION( pMac, sessionId );
VOS_TRACE( VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO, "%s: KeyReplayCounter: %d",
__func__, pGtkOffload->ullKeyReplayCounter);
if(NULL == pSession )
{
VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_ERROR,
"%s: Session not found ", __func__);
return eHAL_STATUS_FAILURE;
}
pRequestBuf = (tpSirGtkOffloadParams)vos_mem_malloc(sizeof(tSirGtkOffloadParams));
if (NULL == pRequestBuf)
{
VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_ERROR, "%s: Not able to allocate "
"memory for GTK offload request", __func__);
return eHAL_STATUS_FAILED_ALLOC;
}
vos_mem_copy(pGtkOffload->bssId, pSession->connectedProfile.bssid, sizeof(tSirMacAddr));
vos_mem_copy(pRequestBuf, pGtkOffload, sizeof(tSirGtkOffloadParams));
msg.type = WDA_GTK_OFFLOAD_REQ;
msg.reserved = 0;
msg.bodyptr = pRequestBuf;
if (!VOS_IS_STATUS_SUCCESS(vos_mq_post_message(VOS_MODULE_ID_WDA, &msg)))
{
VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_ERROR, "%s: Not able to post "
"SIR_HAL_SET_GTK_OFFLOAD message to HAL", __func__);
vos_mem_free(pRequestBuf);
return eHAL_STATUS_FAILURE;
}
return eHAL_STATUS_SUCCESS;
}
/* ---------------------------------------------------------------------------
\fn pmcGetGTKOffload
\brief Get GTK offload information.
\param hHal - The handle returned by macOpen.
\param callbackRoutine - Pointer to the GTK Offload Get Info response callback routine.
\return eHalStatus
eHAL_STATUS_FAILURE Cannot set the offload.
eHAL_STATUS_SUCCESS Request accepted.
---------------------------------------------------------------------------*/
eHalStatus pmcGetGTKOffload(tHalHandle hHal, GTKOffloadGetInfoCallback callbackRoutine,
void *callbackContext, tANI_U8 sessionId)
{
tpSirGtkOffloadGetInfoRspParams pRequestBuf;
vos_msg_t msg;
tpAniSirGlobal pMac = PMAC_STRUCT(hHal);
tCsrRoamSession *pSession = CSR_GET_SESSION( pMac, sessionId );
VOS_TRACE( VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_INFO, "%s: filterId = %d",
__func__);
if(NULL == pSession )
{
VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_ERROR,
"%s: Session not found ", __func__);
return eHAL_STATUS_FAILURE;
}
pRequestBuf = (tpSirGtkOffloadGetInfoRspParams)
vos_mem_malloc(sizeof (tSirGtkOffloadGetInfoRspParams));
if (NULL == pRequestBuf)
{
VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_ERROR, "%s: Not able to allocate "
"memory for Get GTK offload request", __func__);
return eHAL_STATUS_FAILED_ALLOC;
}
vos_mem_copy(pRequestBuf->bssId, pSession->connectedProfile.bssid, sizeof(tSirMacAddr));
msg.type = WDA_GTK_OFFLOAD_GETINFO_REQ;
msg.reserved = 0;
msg.bodyptr = pRequestBuf;
/* Cache the Get GTK Offload callback information */
if (NULL != pMac->pmc.GtkOffloadGetInfoCB)
{
// Do we need to check if the callback is in use?
// Because we are not sending the same message again when it is pending,
// the only case when the callback is not NULL is that the previous message was timed out or failed.
// So, it will be safe to set the callback in this case.
}
pMac->pmc.GtkOffloadGetInfoCB = callbackRoutine;
pMac->pmc.GtkOffloadGetInfoCBContext = callbackContext;
if (!VOS_IS_STATUS_SUCCESS(vos_mq_post_message(VOS_MODULE_ID_WDA, &msg)))
{
VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_ERROR, "%s: Not able to post WDA_GTK_OFFLOAD_GETINFO_REQ message to WDA",
__func__);
vos_mem_free(pRequestBuf);
return eHAL_STATUS_FAILURE;
}
return eHAL_STATUS_SUCCESS;
}
#endif // WLAN_FEATURE_GTK_OFFLOAD
v_BOOL_t IsPmcImpsReqFailed (tHalHandle hHal)
{
tpAniSirGlobal pMac = PMAC_STRUCT(hHal);
v_BOOL_t impsReqFailStatus;
impsReqFailStatus = (pMac->pmc.ImpsReqFailed || pMac->pmc.ImpsReqTimerFailed);
return impsReqFailStatus;
}
void pmcResetImpsFailStatus (tHalHandle hHal)
{
tpAniSirGlobal pMac = PMAC_STRUCT(hHal);
pMac->pmc.ImpsReqFailed = VOS_FALSE;
pMac->pmc.ImpsReqTimerFailed = VOS_FALSE;
}
#ifdef FEATURE_WLAN_BATCH_SCAN
/* -----------------------------------------------------------------------------
\fn pmcSetBatchScanReq
\brief setting batch scan request in FW
\param hHal - The handle returned by macOpen.
\param sessionId - session ID
\param callbackRoutine - Pointer to set batch scan request callback routine
\param callbackContext - Pointer to set batch scan request callback context
\return eHalStatus
eHAL_STATUS_FAILURE Cannot set batch scan request
eHAL_STATUS_SUCCESS Request accepted.
-----------------------------------------------------------------------------*/
eHalStatus pmcSetBatchScanReq(tHalHandle hHal, tSirSetBatchScanReq *pRequest,
tANI_U8 sessionId, hddSetBatchScanReqCallback callbackRoutine,
void *callbackContext)
{
tpSirSetBatchScanReq pRequestBuf;
vos_msg_t msg;
tpAniSirGlobal pMac = PMAC_STRUCT(hHal);
pRequestBuf = vos_mem_malloc(sizeof(tSirSetBatchScanReq));
if (NULL == pRequestBuf)
{
VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_ERROR,
"%s: Not able to allocate memory for SET BATCH SCAN req", __func__);
return eHAL_STATUS_FAILED_ALLOC;
}
/* Cache HDD callback information*/
pMac->pmc.setBatchScanReqCallback = callbackRoutine;
pMac->pmc.setBatchScanReqCallbackContext = callbackContext;
pRequestBuf->scanFrequency = pRequest->scanFrequency;
pRequestBuf->numberOfScansToBatch = pRequest->numberOfScansToBatch;
pRequestBuf->bestNetwork = pRequest->bestNetwork;
pRequestBuf->rfBand = pRequest->rfBand;
pRequestBuf->rtt = pRequest->rtt;
msg.type = WDA_SET_BATCH_SCAN_REQ;
msg.reserved = 0;
msg.bodyptr = pRequestBuf;
if (!VOS_IS_STATUS_SUCCESS(vos_mq_post_message(VOS_MODULE_ID_WDA, &msg)))
{
VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_ERROR,
"%s: Not able to post WDA_SET_BATCH_SCAN_REQ message to WDA",
__func__);
vos_mem_free(pRequestBuf);
return eHAL_STATUS_FAILURE;
}
return eHAL_STATUS_SUCCESS;
}
/* -----------------------------------------------------------------------------
\fn pmcTriggerBatchScanResultInd
\brief API to trigger batch scan results indications from FW
\param hHal - The handle returned by macOpen.
\param sessionId - session ID
\param callbackRoutine - Pointer to get batch scan request callback routine
\param callbackContext - Pointer to get batch scan request callback context
\return eHalStatus
eHAL_STATUS_FAILURE Cannot set batch scan request
eHAL_STATUS_SUCCESS Request accepted.
-----------------------------------------------------------------------------*/
eHalStatus pmcTriggerBatchScanResultInd
(
tHalHandle hHal, tSirTriggerBatchScanResultInd *pRequest, tANI_U8 sessionId,
hddTriggerBatchScanResultIndCallback callbackRoutine, void *callbackContext
)
{
tpSirTriggerBatchScanResultInd pRequestBuf;
vos_msg_t msg;
tpAniSirGlobal pMac = PMAC_STRUCT(hHal);
pRequestBuf = vos_mem_malloc(sizeof(tSirTriggerBatchScanResultInd));
if (NULL == pRequestBuf)
{
VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_ERROR,
"%s: Not able to allocate memory for WDA_TRIGGER_BATCH_SCAN_RESULT_IND",
__func__);
return eHAL_STATUS_FAILED_ALLOC;
}
/*HDD callback to be called after getting batch scan result ind from FW*/
pMac->pmc.batchScanResultCallback = callbackRoutine;
pMac->pmc.batchScanResultCallbackContext = callbackContext;
pRequestBuf->param = pRequest->param;
msg.type = WDA_TRIGGER_BATCH_SCAN_RESULT_IND;
msg.reserved = 0;
msg.bodyptr = pRequestBuf;
if (!VOS_IS_STATUS_SUCCESS(vos_mq_post_message(VOS_MODULE_ID_WDA, &msg)))
{
VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_ERROR,
"%s: Not able to post WDA_TRIGGER_BATCH_SCAN_RESULT_IND message"
" to WDA", __func__);
vos_mem_free(pRequestBuf);
return eHAL_STATUS_FAILURE;
}
return eHAL_STATUS_SUCCESS;
}
/* -----------------------------------------------------------------------------
\fn pmcStopBatchScanInd
\brief Stoping batch scan request in FW
\param hHal - The handle returned by macOpen.
\param callbackRoutine - Pointer to stop batch scan request callback routine
\return eHalStatus
eHAL_STATUS_FAILURE Cannot set batch scan request
eHAL_STATUS_SUCCESS Request accepted.
-----------------------------------------------------------------------------*/
eHalStatus pmcStopBatchScanInd(tHalHandle hHal, tSirStopBatchScanInd *pRequest,
tANI_U8 sessionId)
{
tSirStopBatchScanInd *pRequestBuf;
vos_msg_t msg;
pRequestBuf = vos_mem_malloc(sizeof(tSirStopBatchScanInd));
if (NULL == pRequestBuf)
{
VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_ERROR,
"%s: Not able to allocate memory for STOP BATCH SCAN IND", __func__);
return eHAL_STATUS_FAILED_ALLOC;
}
pRequestBuf->param = pRequest->param;
msg.type = WDA_STOP_BATCH_SCAN_IND;
msg.reserved = 0;
msg.bodyptr = pRequestBuf;
if (!VOS_IS_STATUS_SUCCESS(vos_mq_post_message(VOS_MODULE_ID_WDA, &msg)))
{
VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_ERROR,
"%s: Not able to post WDA_TOP_BATCH_SCAN_IND message to WDA", __func__);
vos_mem_free(pRequestBuf);
return eHAL_STATUS_FAILURE;
}
return eHAL_STATUS_SUCCESS;
}
#endif
| gpl-2.0 |
iamroot11c/kernel_source | drivers/input/keyboard/jornada720_kbd.c | 231 | 5347 | /*
* drivers/input/keyboard/jornada720_kbd.c
*
* HP Jornada 720 keyboard platform driver
*
* Copyright (C) 2006/2007 Kristoffer Ericson <Kristoffer.Ericson@Gmail.com>
*
* Copyright (C) 2006 jornada 720 kbd driver by
Filip Zyzniewsk <Filip.Zyzniewski@tefnet.plX
* based on (C) 2004 jornada 720 kbd driver by
Alex Lange <chicken@handhelds.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
*/
#include <linux/device.h>
#include <linux/errno.h>
#include <linux/interrupt.h>
#include <linux/init.h>
#include <linux/input.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <mach/jornada720.h>
#include <mach/hardware.h>
#include <mach/irqs.h>
MODULE_AUTHOR("Kristoffer Ericson <Kristoffer.Ericson@gmail.com>");
MODULE_DESCRIPTION("HP Jornada 710/720/728 keyboard driver");
MODULE_LICENSE("GPL v2");
static unsigned short jornada_std_keymap[128] = { /* ROW */
0, KEY_ESC, KEY_F1, KEY_F2, KEY_F3, KEY_F4, KEY_F5, KEY_F6, KEY_F7, /* #1 */
KEY_F8, KEY_F9, KEY_F10, KEY_F11, KEY_VOLUMEUP, KEY_VOLUMEDOWN, KEY_MUTE, /* -> */
0, KEY_1, KEY_2, KEY_3, KEY_4, KEY_5, KEY_6, KEY_7, KEY_8, KEY_9, /* #2 */
KEY_0, KEY_MINUS, KEY_EQUAL,0, 0, 0, /* -> */
0, KEY_Q, KEY_W, KEY_E, KEY_R, KEY_T, KEY_Y, KEY_U, KEY_I, KEY_O, /* #3 */
KEY_P, KEY_BACKSLASH, KEY_BACKSPACE, 0, 0, 0, /* -> */
0, KEY_A, KEY_S, KEY_D, KEY_F, KEY_G, KEY_H, KEY_J, KEY_K, KEY_L, /* #4 */
KEY_SEMICOLON, KEY_LEFTBRACE, KEY_RIGHTBRACE, 0, 0, 0, /* -> */
0, KEY_Z, KEY_X, KEY_C, KEY_V, KEY_B, KEY_N, KEY_M, KEY_COMMA, /* #5 */
KEY_DOT, KEY_KPMINUS, KEY_APOSTROPHE, KEY_ENTER, 0, 0,0, /* -> */
0, KEY_TAB, 0, KEY_LEFTSHIFT, 0, KEY_APOSTROPHE, 0, 0, 0, 0, /* #6 */
KEY_UP, 0, KEY_RIGHTSHIFT, 0, 0, 0,0, 0, 0, 0, 0, KEY_LEFTALT, KEY_GRAVE, /* -> */
0, 0, KEY_LEFT, KEY_DOWN, KEY_RIGHT, 0, 0, 0, 0,0, KEY_KPASTERISK, /* -> */
KEY_LEFTCTRL, 0, KEY_SPACE, 0, 0, 0, KEY_SLASH, KEY_DELETE, 0, 0, /* -> */
0, 0, 0, KEY_POWER, /* -> */
};
struct jornadakbd {
unsigned short keymap[ARRAY_SIZE(jornada_std_keymap)];
struct input_dev *input;
};
static irqreturn_t jornada720_kbd_interrupt(int irq, void *dev_id)
{
struct platform_device *pdev = dev_id;
struct jornadakbd *jornadakbd = platform_get_drvdata(pdev);
struct input_dev *input = jornadakbd->input;
u8 count, kbd_data, scan_code;
/* startup ssp with spinlock */
jornada_ssp_start();
if (jornada_ssp_inout(GETSCANKEYCODE) != TXDUMMY) {
printk(KERN_DEBUG
"jornada720_kbd: "
"GetKeycode command failed with ETIMEDOUT, "
"flushed bus\n");
} else {
/* How many keycodes are waiting for us? */
count = jornada_ssp_byte(TXDUMMY);
/* Lets drag them out one at a time */
while (count--) {
/* Exchange TxDummy for location (keymap[kbddata]) */
kbd_data = jornada_ssp_byte(TXDUMMY);
scan_code = kbd_data & 0x7f;
input_event(input, EV_MSC, MSC_SCAN, scan_code);
input_report_key(input, jornadakbd->keymap[scan_code],
!(kbd_data & 0x80));
input_sync(input);
}
}
/* release spinlock and turn off ssp */
jornada_ssp_end();
return IRQ_HANDLED;
};
static int jornada720_kbd_probe(struct platform_device *pdev)
{
struct jornadakbd *jornadakbd;
struct input_dev *input_dev;
int i, err;
jornadakbd = kzalloc(sizeof(struct jornadakbd), GFP_KERNEL);
input_dev = input_allocate_device();
if (!jornadakbd || !input_dev) {
err = -ENOMEM;
goto fail1;
}
platform_set_drvdata(pdev, jornadakbd);
memcpy(jornadakbd->keymap, jornada_std_keymap,
sizeof(jornada_std_keymap));
jornadakbd->input = input_dev;
input_dev->evbit[0] = BIT(EV_KEY) | BIT(EV_REP);
input_dev->name = "HP Jornada 720 keyboard";
input_dev->phys = "jornadakbd/input0";
input_dev->keycode = jornadakbd->keymap;
input_dev->keycodesize = sizeof(unsigned short);
input_dev->keycodemax = ARRAY_SIZE(jornada_std_keymap);
input_dev->id.bustype = BUS_HOST;
input_dev->dev.parent = &pdev->dev;
for (i = 0; i < ARRAY_SIZE(jornadakbd->keymap); i++)
__set_bit(jornadakbd->keymap[i], input_dev->keybit);
__clear_bit(KEY_RESERVED, input_dev->keybit);
input_set_capability(input_dev, EV_MSC, MSC_SCAN);
err = request_irq(IRQ_GPIO0,
jornada720_kbd_interrupt,
IRQF_TRIGGER_FALLING,
"jornadakbd", pdev);
if (err) {
printk(KERN_INFO "jornadakbd720_kbd: Unable to grab IRQ\n");
goto fail1;
}
err = input_register_device(jornadakbd->input);
if (err)
goto fail2;
return 0;
fail2: /* IRQ, DEVICE, MEMORY */
free_irq(IRQ_GPIO0, pdev);
fail1: /* DEVICE, MEMORY */
input_free_device(input_dev);
kfree(jornadakbd);
return err;
};
static int jornada720_kbd_remove(struct platform_device *pdev)
{
struct jornadakbd *jornadakbd = platform_get_drvdata(pdev);
free_irq(IRQ_GPIO0, pdev);
input_unregister_device(jornadakbd->input);
kfree(jornadakbd);
return 0;
}
/* work with hotplug and coldplug */
MODULE_ALIAS("platform:jornada720_kbd");
static struct platform_driver jornada720_kbd_driver = {
.driver = {
.name = "jornada720_kbd",
.owner = THIS_MODULE,
},
.probe = jornada720_kbd_probe,
.remove = jornada720_kbd_remove,
};
module_platform_driver(jornada720_kbd_driver);
| gpl-2.0 |
karandpr/Doppler_GB | drivers/scsi/aacraid/linit.c | 487 | 44988 | /*
* Adaptec AAC series RAID controller driver
* (c) Copyright 2001 Red Hat Inc.
*
* based on the old aacraid driver that is..
* Adaptec aacraid device driver for Linux.
*
* Copyright (c) 2000-2007 Adaptec, Inc. (aacraid@adaptec.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; see the file COPYING. If not, write to
* the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
*
* Module Name:
* linit.c
*
* Abstract: Linux Driver entry module for Adaptec RAID Array Controller
*/
#include <linux/compat.h>
#include <linux/blkdev.h>
#include <linux/completion.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/pci.h>
#include <linux/slab.h>
#include <linux/smp_lock.h>
#include <linux/spinlock.h>
#include <linux/syscalls.h>
#include <linux/delay.h>
#include <linux/kthread.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_tcq.h>
#include <scsi/scsicam.h>
#include <scsi/scsi_eh.h>
#include "aacraid.h"
#define AAC_DRIVER_VERSION "1.1-5"
#ifndef AAC_DRIVER_BRANCH
#define AAC_DRIVER_BRANCH ""
#endif
#define AAC_DRIVER_BUILD_DATE __DATE__ " " __TIME__
#define AAC_DRIVERNAME "aacraid"
#ifdef AAC_DRIVER_BUILD
#define _str(x) #x
#define str(x) _str(x)
#define AAC_DRIVER_FULL_VERSION AAC_DRIVER_VERSION "[" str(AAC_DRIVER_BUILD) "]" AAC_DRIVER_BRANCH
#else
#define AAC_DRIVER_FULL_VERSION AAC_DRIVER_VERSION AAC_DRIVER_BRANCH " " AAC_DRIVER_BUILD_DATE
#endif
MODULE_AUTHOR("Red Hat Inc and Adaptec");
MODULE_DESCRIPTION("Dell PERC2, 2/Si, 3/Si, 3/Di, "
"Adaptec Advanced Raid Products, "
"HP NetRAID-4M, IBM ServeRAID & ICP SCSI driver");
MODULE_LICENSE("GPL");
MODULE_VERSION(AAC_DRIVER_FULL_VERSION);
static LIST_HEAD(aac_devices);
static int aac_cfg_major = -1;
char aac_driver_version[] = AAC_DRIVER_FULL_VERSION;
/*
* Because of the way Linux names scsi devices, the order in this table has
* become important. Check for on-board Raid first, add-in cards second.
*
* Note: The last field is used to index into aac_drivers below.
*/
#ifdef DECLARE_PCI_DEVICE_TABLE
static DECLARE_PCI_DEVICE_TABLE(aac_pci_tbl) = {
#elif defined(__devinitconst)
static const struct pci_device_id aac_pci_tbl[] __devinitconst = {
#else
static const struct pci_device_id aac_pci_tbl[] __devinitdata = {
#endif
{ 0x1028, 0x0001, 0x1028, 0x0001, 0, 0, 0 }, /* PERC 2/Si (Iguana/PERC2Si) */
{ 0x1028, 0x0002, 0x1028, 0x0002, 0, 0, 1 }, /* PERC 3/Di (Opal/PERC3Di) */
{ 0x1028, 0x0003, 0x1028, 0x0003, 0, 0, 2 }, /* PERC 3/Si (SlimFast/PERC3Si */
{ 0x1028, 0x0004, 0x1028, 0x00d0, 0, 0, 3 }, /* PERC 3/Di (Iguana FlipChip/PERC3DiF */
{ 0x1028, 0x0002, 0x1028, 0x00d1, 0, 0, 4 }, /* PERC 3/Di (Viper/PERC3DiV) */
{ 0x1028, 0x0002, 0x1028, 0x00d9, 0, 0, 5 }, /* PERC 3/Di (Lexus/PERC3DiL) */
{ 0x1028, 0x000a, 0x1028, 0x0106, 0, 0, 6 }, /* PERC 3/Di (Jaguar/PERC3DiJ) */
{ 0x1028, 0x000a, 0x1028, 0x011b, 0, 0, 7 }, /* PERC 3/Di (Dagger/PERC3DiD) */
{ 0x1028, 0x000a, 0x1028, 0x0121, 0, 0, 8 }, /* PERC 3/Di (Boxster/PERC3DiB) */
{ 0x9005, 0x0283, 0x9005, 0x0283, 0, 0, 9 }, /* catapult */
{ 0x9005, 0x0284, 0x9005, 0x0284, 0, 0, 10 }, /* tomcat */
{ 0x9005, 0x0285, 0x9005, 0x0286, 0, 0, 11 }, /* Adaptec 2120S (Crusader) */
{ 0x9005, 0x0285, 0x9005, 0x0285, 0, 0, 12 }, /* Adaptec 2200S (Vulcan) */
{ 0x9005, 0x0285, 0x9005, 0x0287, 0, 0, 13 }, /* Adaptec 2200S (Vulcan-2m) */
{ 0x9005, 0x0285, 0x17aa, 0x0286, 0, 0, 14 }, /* Legend S220 (Legend Crusader) */
{ 0x9005, 0x0285, 0x17aa, 0x0287, 0, 0, 15 }, /* Legend S230 (Legend Vulcan) */
{ 0x9005, 0x0285, 0x9005, 0x0288, 0, 0, 16 }, /* Adaptec 3230S (Harrier) */
{ 0x9005, 0x0285, 0x9005, 0x0289, 0, 0, 17 }, /* Adaptec 3240S (Tornado) */
{ 0x9005, 0x0285, 0x9005, 0x028a, 0, 0, 18 }, /* ASR-2020ZCR SCSI PCI-X ZCR (Skyhawk) */
{ 0x9005, 0x0285, 0x9005, 0x028b, 0, 0, 19 }, /* ASR-2025ZCR SCSI SO-DIMM PCI-X ZCR (Terminator) */
{ 0x9005, 0x0286, 0x9005, 0x028c, 0, 0, 20 }, /* ASR-2230S + ASR-2230SLP PCI-X (Lancer) */
{ 0x9005, 0x0286, 0x9005, 0x028d, 0, 0, 21 }, /* ASR-2130S (Lancer) */
{ 0x9005, 0x0286, 0x9005, 0x029b, 0, 0, 22 }, /* AAR-2820SA (Intruder) */
{ 0x9005, 0x0286, 0x9005, 0x029c, 0, 0, 23 }, /* AAR-2620SA (Intruder) */
{ 0x9005, 0x0286, 0x9005, 0x029d, 0, 0, 24 }, /* AAR-2420SA (Intruder) */
{ 0x9005, 0x0286, 0x9005, 0x029e, 0, 0, 25 }, /* ICP9024RO (Lancer) */
{ 0x9005, 0x0286, 0x9005, 0x029f, 0, 0, 26 }, /* ICP9014RO (Lancer) */
{ 0x9005, 0x0286, 0x9005, 0x02a0, 0, 0, 27 }, /* ICP9047MA (Lancer) */
{ 0x9005, 0x0286, 0x9005, 0x02a1, 0, 0, 28 }, /* ICP9087MA (Lancer) */
{ 0x9005, 0x0286, 0x9005, 0x02a3, 0, 0, 29 }, /* ICP5445AU (Hurricane44) */
{ 0x9005, 0x0285, 0x9005, 0x02a4, 0, 0, 30 }, /* ICP9085LI (Marauder-X) */
{ 0x9005, 0x0285, 0x9005, 0x02a5, 0, 0, 31 }, /* ICP5085BR (Marauder-E) */
{ 0x9005, 0x0286, 0x9005, 0x02a6, 0, 0, 32 }, /* ICP9067MA (Intruder-6) */
{ 0x9005, 0x0287, 0x9005, 0x0800, 0, 0, 33 }, /* Themisto Jupiter Platform */
{ 0x9005, 0x0200, 0x9005, 0x0200, 0, 0, 33 }, /* Themisto Jupiter Platform */
{ 0x9005, 0x0286, 0x9005, 0x0800, 0, 0, 34 }, /* Callisto Jupiter Platform */
{ 0x9005, 0x0285, 0x9005, 0x028e, 0, 0, 35 }, /* ASR-2020SA SATA PCI-X ZCR (Skyhawk) */
{ 0x9005, 0x0285, 0x9005, 0x028f, 0, 0, 36 }, /* ASR-2025SA SATA SO-DIMM PCI-X ZCR (Terminator) */
{ 0x9005, 0x0285, 0x9005, 0x0290, 0, 0, 37 }, /* AAR-2410SA PCI SATA 4ch (Jaguar II) */
{ 0x9005, 0x0285, 0x1028, 0x0291, 0, 0, 38 }, /* CERC SATA RAID 2 PCI SATA 6ch (DellCorsair) */
{ 0x9005, 0x0285, 0x9005, 0x0292, 0, 0, 39 }, /* AAR-2810SA PCI SATA 8ch (Corsair-8) */
{ 0x9005, 0x0285, 0x9005, 0x0293, 0, 0, 40 }, /* AAR-21610SA PCI SATA 16ch (Corsair-16) */
{ 0x9005, 0x0285, 0x9005, 0x0294, 0, 0, 41 }, /* ESD SO-DIMM PCI-X SATA ZCR (Prowler) */
{ 0x9005, 0x0285, 0x103C, 0x3227, 0, 0, 42 }, /* AAR-2610SA PCI SATA 6ch */
{ 0x9005, 0x0285, 0x9005, 0x0296, 0, 0, 43 }, /* ASR-2240S (SabreExpress) */
{ 0x9005, 0x0285, 0x9005, 0x0297, 0, 0, 44 }, /* ASR-4005 */
{ 0x9005, 0x0285, 0x1014, 0x02F2, 0, 0, 45 }, /* IBM 8i (AvonPark) */
{ 0x9005, 0x0285, 0x1014, 0x0312, 0, 0, 45 }, /* IBM 8i (AvonPark Lite) */
{ 0x9005, 0x0286, 0x1014, 0x9580, 0, 0, 46 }, /* IBM 8k/8k-l8 (Aurora) */
{ 0x9005, 0x0286, 0x1014, 0x9540, 0, 0, 47 }, /* IBM 8k/8k-l4 (Aurora Lite) */
{ 0x9005, 0x0285, 0x9005, 0x0298, 0, 0, 48 }, /* ASR-4000 (BlackBird) */
{ 0x9005, 0x0285, 0x9005, 0x0299, 0, 0, 49 }, /* ASR-4800SAS (Marauder-X) */
{ 0x9005, 0x0285, 0x9005, 0x029a, 0, 0, 50 }, /* ASR-4805SAS (Marauder-E) */
{ 0x9005, 0x0286, 0x9005, 0x02a2, 0, 0, 51 }, /* ASR-3800 (Hurricane44) */
{ 0x9005, 0x0285, 0x1028, 0x0287, 0, 0, 52 }, /* Perc 320/DC*/
{ 0x1011, 0x0046, 0x9005, 0x0365, 0, 0, 53 }, /* Adaptec 5400S (Mustang)*/
{ 0x1011, 0x0046, 0x9005, 0x0364, 0, 0, 54 }, /* Adaptec 5400S (Mustang)*/
{ 0x1011, 0x0046, 0x9005, 0x1364, 0, 0, 55 }, /* Dell PERC2/QC */
{ 0x1011, 0x0046, 0x103c, 0x10c2, 0, 0, 56 }, /* HP NetRAID-4M */
{ 0x9005, 0x0285, 0x1028, PCI_ANY_ID, 0, 0, 57 }, /* Dell Catchall */
{ 0x9005, 0x0285, 0x17aa, PCI_ANY_ID, 0, 0, 58 }, /* Legend Catchall */
{ 0x9005, 0x0285, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 59 }, /* Adaptec Catch All */
{ 0x9005, 0x0286, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 60 }, /* Adaptec Rocket Catch All */
{ 0x9005, 0x0288, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 61 }, /* Adaptec NEMER/ARK Catch All */
{ 0,}
};
MODULE_DEVICE_TABLE(pci, aac_pci_tbl);
/*
* dmb - For now we add the number of channels to this structure.
* In the future we should add a fib that reports the number of channels
* for the card. At that time we can remove the channels from here
*/
static struct aac_driver_ident aac_drivers[] = {
{ aac_rx_init, "percraid", "DELL ", "PERCRAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* PERC 2/Si (Iguana/PERC2Si) */
{ aac_rx_init, "percraid", "DELL ", "PERCRAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* PERC 3/Di (Opal/PERC3Di) */
{ aac_rx_init, "percraid", "DELL ", "PERCRAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* PERC 3/Si (SlimFast/PERC3Si */
{ aac_rx_init, "percraid", "DELL ", "PERCRAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* PERC 3/Di (Iguana FlipChip/PERC3DiF */
{ aac_rx_init, "percraid", "DELL ", "PERCRAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* PERC 3/Di (Viper/PERC3DiV) */
{ aac_rx_init, "percraid", "DELL ", "PERCRAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* PERC 3/Di (Lexus/PERC3DiL) */
{ aac_rx_init, "percraid", "DELL ", "PERCRAID ", 1, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* PERC 3/Di (Jaguar/PERC3DiJ) */
{ aac_rx_init, "percraid", "DELL ", "PERCRAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* PERC 3/Di (Dagger/PERC3DiD) */
{ aac_rx_init, "percraid", "DELL ", "PERCRAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* PERC 3/Di (Boxster/PERC3DiB) */
{ aac_rx_init, "aacraid", "ADAPTEC ", "catapult ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* catapult */
{ aac_rx_init, "aacraid", "ADAPTEC ", "tomcat ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* tomcat */
{ aac_rx_init, "aacraid", "ADAPTEC ", "Adaptec 2120S ", 1, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* Adaptec 2120S (Crusader) */
{ aac_rx_init, "aacraid", "ADAPTEC ", "Adaptec 2200S ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* Adaptec 2200S (Vulcan) */
{ aac_rx_init, "aacraid", "ADAPTEC ", "Adaptec 2200S ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* Adaptec 2200S (Vulcan-2m) */
{ aac_rx_init, "aacraid", "Legend ", "Legend S220 ", 1, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* Legend S220 (Legend Crusader) */
{ aac_rx_init, "aacraid", "Legend ", "Legend S230 ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* Legend S230 (Legend Vulcan) */
{ aac_rx_init, "aacraid", "ADAPTEC ", "Adaptec 3230S ", 2 }, /* Adaptec 3230S (Harrier) */
{ aac_rx_init, "aacraid", "ADAPTEC ", "Adaptec 3240S ", 2 }, /* Adaptec 3240S (Tornado) */
{ aac_rx_init, "aacraid", "ADAPTEC ", "ASR-2020ZCR ", 2 }, /* ASR-2020ZCR SCSI PCI-X ZCR (Skyhawk) */
{ aac_rx_init, "aacraid", "ADAPTEC ", "ASR-2025ZCR ", 2 }, /* ASR-2025ZCR SCSI SO-DIMM PCI-X ZCR (Terminator) */
{ aac_rkt_init, "aacraid", "ADAPTEC ", "ASR-2230S PCI-X ", 2 }, /* ASR-2230S + ASR-2230SLP PCI-X (Lancer) */
{ aac_rkt_init, "aacraid", "ADAPTEC ", "ASR-2130S PCI-X ", 1 }, /* ASR-2130S (Lancer) */
{ aac_rkt_init, "aacraid", "ADAPTEC ", "AAR-2820SA ", 1 }, /* AAR-2820SA (Intruder) */
{ aac_rkt_init, "aacraid", "ADAPTEC ", "AAR-2620SA ", 1 }, /* AAR-2620SA (Intruder) */
{ aac_rkt_init, "aacraid", "ADAPTEC ", "AAR-2420SA ", 1 }, /* AAR-2420SA (Intruder) */
{ aac_rkt_init, "aacraid", "ICP ", "ICP9024RO ", 2 }, /* ICP9024RO (Lancer) */
{ aac_rkt_init, "aacraid", "ICP ", "ICP9014RO ", 1 }, /* ICP9014RO (Lancer) */
{ aac_rkt_init, "aacraid", "ICP ", "ICP9047MA ", 1 }, /* ICP9047MA (Lancer) */
{ aac_rkt_init, "aacraid", "ICP ", "ICP9087MA ", 1 }, /* ICP9087MA (Lancer) */
{ aac_rkt_init, "aacraid", "ICP ", "ICP5445AU ", 1 }, /* ICP5445AU (Hurricane44) */
{ aac_rx_init, "aacraid", "ICP ", "ICP9085LI ", 1 }, /* ICP9085LI (Marauder-X) */
{ aac_rx_init, "aacraid", "ICP ", "ICP5085BR ", 1 }, /* ICP5085BR (Marauder-E) */
{ aac_rkt_init, "aacraid", "ICP ", "ICP9067MA ", 1 }, /* ICP9067MA (Intruder-6) */
{ NULL , "aacraid", "ADAPTEC ", "Themisto ", 0, AAC_QUIRK_SLAVE }, /* Jupiter Platform */
{ aac_rkt_init, "aacraid", "ADAPTEC ", "Callisto ", 2, AAC_QUIRK_MASTER }, /* Jupiter Platform */
{ aac_rx_init, "aacraid", "ADAPTEC ", "ASR-2020SA ", 1 }, /* ASR-2020SA SATA PCI-X ZCR (Skyhawk) */
{ aac_rx_init, "aacraid", "ADAPTEC ", "ASR-2025SA ", 1 }, /* ASR-2025SA SATA SO-DIMM PCI-X ZCR (Terminator) */
{ aac_rx_init, "aacraid", "ADAPTEC ", "AAR-2410SA SATA ", 1, AAC_QUIRK_17SG }, /* AAR-2410SA PCI SATA 4ch (Jaguar II) */
{ aac_rx_init, "aacraid", "DELL ", "CERC SR2 ", 1, AAC_QUIRK_17SG }, /* CERC SATA RAID 2 PCI SATA 6ch (DellCorsair) */
{ aac_rx_init, "aacraid", "ADAPTEC ", "AAR-2810SA SATA ", 1, AAC_QUIRK_17SG }, /* AAR-2810SA PCI SATA 8ch (Corsair-8) */
{ aac_rx_init, "aacraid", "ADAPTEC ", "AAR-21610SA SATA", 1, AAC_QUIRK_17SG }, /* AAR-21610SA PCI SATA 16ch (Corsair-16) */
{ aac_rx_init, "aacraid", "ADAPTEC ", "ASR-2026ZCR ", 1 }, /* ESD SO-DIMM PCI-X SATA ZCR (Prowler) */
{ aac_rx_init, "aacraid", "ADAPTEC ", "AAR-2610SA ", 1 }, /* SATA 6Ch (Bearcat) */
{ aac_rx_init, "aacraid", "ADAPTEC ", "ASR-2240S ", 1 }, /* ASR-2240S (SabreExpress) */
{ aac_rx_init, "aacraid", "ADAPTEC ", "ASR-4005 ", 1 }, /* ASR-4005 */
{ aac_rx_init, "ServeRAID","IBM ", "ServeRAID 8i ", 1 }, /* IBM 8i (AvonPark) */
{ aac_rkt_init, "ServeRAID","IBM ", "ServeRAID 8k-l8 ", 1 }, /* IBM 8k/8k-l8 (Aurora) */
{ aac_rkt_init, "ServeRAID","IBM ", "ServeRAID 8k-l4 ", 1 }, /* IBM 8k/8k-l4 (Aurora Lite) */
{ aac_rx_init, "aacraid", "ADAPTEC ", "ASR-4000 ", 1 }, /* ASR-4000 (BlackBird & AvonPark) */
{ aac_rx_init, "aacraid", "ADAPTEC ", "ASR-4800SAS ", 1 }, /* ASR-4800SAS (Marauder-X) */
{ aac_rx_init, "aacraid", "ADAPTEC ", "ASR-4805SAS ", 1 }, /* ASR-4805SAS (Marauder-E) */
{ aac_rkt_init, "aacraid", "ADAPTEC ", "ASR-3800 ", 1 }, /* ASR-3800 (Hurricane44) */
{ aac_rx_init, "percraid", "DELL ", "PERC 320/DC ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* Perc 320/DC*/
{ aac_sa_init, "aacraid", "ADAPTEC ", "Adaptec 5400S ", 4, AAC_QUIRK_34SG }, /* Adaptec 5400S (Mustang)*/
{ aac_sa_init, "aacraid", "ADAPTEC ", "AAC-364 ", 4, AAC_QUIRK_34SG }, /* Adaptec 5400S (Mustang)*/
{ aac_sa_init, "percraid", "DELL ", "PERCRAID ", 4, AAC_QUIRK_34SG }, /* Dell PERC2/QC */
{ aac_sa_init, "hpnraid", "HP ", "NetRAID ", 4, AAC_QUIRK_34SG }, /* HP NetRAID-4M */
{ aac_rx_init, "aacraid", "DELL ", "RAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* Dell Catchall */
{ aac_rx_init, "aacraid", "Legend ", "RAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* Legend Catchall */
{ aac_rx_init, "aacraid", "ADAPTEC ", "RAID ", 2 }, /* Adaptec Catch All */
{ aac_rkt_init, "aacraid", "ADAPTEC ", "RAID ", 2 }, /* Adaptec Rocket Catch All */
{ aac_nark_init, "aacraid", "ADAPTEC ", "RAID ", 2 } /* Adaptec NEMER/ARK Catch All */
};
/**
* aac_queuecommand - queue a SCSI command
* @cmd: SCSI command to queue
* @done: Function to call on command completion
*
* Queues a command for execution by the associated Host Adapter.
*
* TODO: unify with aac_scsi_cmd().
*/
static int aac_queuecommand(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
{
struct Scsi_Host *host = cmd->device->host;
struct aac_dev *dev = (struct aac_dev *)host->hostdata;
u32 count = 0;
cmd->scsi_done = done;
for (; count < (host->can_queue + AAC_NUM_MGT_FIB); ++count) {
struct fib * fib = &dev->fibs[count];
struct scsi_cmnd * command;
if (fib->hw_fib_va->header.XferState &&
((command = fib->callback_data)) &&
(command == cmd) &&
(cmd->SCp.phase == AAC_OWNER_FIRMWARE))
return 0; /* Already owned by Adapter */
}
cmd->SCp.phase = AAC_OWNER_LOWLEVEL;
return (aac_scsi_cmd(cmd) ? FAILED : 0);
}
/**
* aac_info - Returns the host adapter name
* @shost: Scsi host to report on
*
* Returns a static string describing the device in question
*/
static const char *aac_info(struct Scsi_Host *shost)
{
struct aac_dev *dev = (struct aac_dev *)shost->hostdata;
return aac_drivers[dev->cardtype].name;
}
/**
* aac_get_driver_ident
* @devtype: index into lookup table
*
* Returns a pointer to the entry in the driver lookup table.
*/
struct aac_driver_ident* aac_get_driver_ident(int devtype)
{
return &aac_drivers[devtype];
}
/**
* aac_biosparm - return BIOS parameters for disk
* @sdev: The scsi device corresponding to the disk
* @bdev: the block device corresponding to the disk
* @capacity: the sector capacity of the disk
* @geom: geometry block to fill in
*
* Return the Heads/Sectors/Cylinders BIOS Disk Parameters for Disk.
* The default disk geometry is 64 heads, 32 sectors, and the appropriate
* number of cylinders so as not to exceed drive capacity. In order for
* disks equal to or larger than 1 GB to be addressable by the BIOS
* without exceeding the BIOS limitation of 1024 cylinders, Extended
* Translation should be enabled. With Extended Translation enabled,
* drives between 1 GB inclusive and 2 GB exclusive are given a disk
* geometry of 128 heads and 32 sectors, and drives above 2 GB inclusive
* are given a disk geometry of 255 heads and 63 sectors. However, if
* the BIOS detects that the Extended Translation setting does not match
* the geometry in the partition table, then the translation inferred
* from the partition table will be used by the BIOS, and a warning may
* be displayed.
*/
static int aac_biosparm(struct scsi_device *sdev, struct block_device *bdev,
sector_t capacity, int *geom)
{
struct diskparm *param = (struct diskparm *)geom;
unsigned char *buf;
dprintk((KERN_DEBUG "aac_biosparm.\n"));
/*
* Assuming extended translation is enabled - #REVISIT#
*/
if (capacity >= 2 * 1024 * 1024) { /* 1 GB in 512 byte sectors */
if(capacity >= 4 * 1024 * 1024) { /* 2 GB in 512 byte sectors */
param->heads = 255;
param->sectors = 63;
} else {
param->heads = 128;
param->sectors = 32;
}
} else {
param->heads = 64;
param->sectors = 32;
}
param->cylinders = cap_to_cyls(capacity, param->heads * param->sectors);
/*
* Read the first 1024 bytes from the disk device, if the boot
* sector partition table is valid, search for a partition table
* entry whose end_head matches one of the standard geometry
* translations ( 64/32, 128/32, 255/63 ).
*/
buf = scsi_bios_ptable(bdev);
if (!buf)
return 0;
if(*(__le16 *)(buf + 0x40) == cpu_to_le16(0xaa55)) {
struct partition *first = (struct partition * )buf;
struct partition *entry = first;
int saved_cylinders = param->cylinders;
int num;
unsigned char end_head, end_sec;
for(num = 0; num < 4; num++) {
end_head = entry->end_head;
end_sec = entry->end_sector & 0x3f;
if(end_head == 63) {
param->heads = 64;
param->sectors = 32;
break;
} else if(end_head == 127) {
param->heads = 128;
param->sectors = 32;
break;
} else if(end_head == 254) {
param->heads = 255;
param->sectors = 63;
break;
}
entry++;
}
if (num == 4) {
end_head = first->end_head;
end_sec = first->end_sector & 0x3f;
}
param->cylinders = cap_to_cyls(capacity, param->heads * param->sectors);
if (num < 4 && end_sec == param->sectors) {
if (param->cylinders != saved_cylinders)
dprintk((KERN_DEBUG "Adopting geometry: heads=%d, sectors=%d from partition table %d.\n",
param->heads, param->sectors, num));
} else if (end_head > 0 || end_sec > 0) {
dprintk((KERN_DEBUG "Strange geometry: heads=%d, sectors=%d in partition table %d.\n",
end_head + 1, end_sec, num));
dprintk((KERN_DEBUG "Using geometry: heads=%d, sectors=%d.\n",
param->heads, param->sectors));
}
}
kfree(buf);
return 0;
}
/**
* aac_slave_configure - compute queue depths
* @sdev: SCSI device we are considering
*
* Selects queue depths for each target device based on the host adapter's
* total capacity and the queue depth supported by the target device.
* A queue depth of one automatically disables tagged queueing.
*/
static int aac_slave_configure(struct scsi_device *sdev)
{
struct aac_dev *aac = (struct aac_dev *)sdev->host->hostdata;
if (aac->jbod && (sdev->type == TYPE_DISK))
sdev->removable = 1;
if ((sdev->type == TYPE_DISK) &&
(sdev_channel(sdev) != CONTAINER_CHANNEL) &&
(!aac->jbod || sdev->inq_periph_qual) &&
(!aac->raid_scsi_mode || (sdev_channel(sdev) != 2))) {
if (expose_physicals == 0)
return -ENXIO;
if (expose_physicals < 0)
sdev->no_uld_attach = 1;
}
if (sdev->tagged_supported && (sdev->type == TYPE_DISK) &&
(!aac->raid_scsi_mode || (sdev_channel(sdev) != 2)) &&
!sdev->no_uld_attach) {
struct scsi_device * dev;
struct Scsi_Host *host = sdev->host;
unsigned num_lsu = 0;
unsigned num_one = 0;
unsigned depth;
unsigned cid;
/*
* Firmware has an individual device recovery time typically
* of 35 seconds, give us a margin.
*/
if (sdev->request_queue->rq_timeout < (45 * HZ))
blk_queue_rq_timeout(sdev->request_queue, 45*HZ);
for (cid = 0; cid < aac->maximum_num_containers; ++cid)
if (aac->fsa_dev[cid].valid)
++num_lsu;
__shost_for_each_device(dev, host) {
if (dev->tagged_supported && (dev->type == TYPE_DISK) &&
(!aac->raid_scsi_mode ||
(sdev_channel(sdev) != 2)) &&
!dev->no_uld_attach) {
if ((sdev_channel(dev) != CONTAINER_CHANNEL)
|| !aac->fsa_dev[sdev_id(dev)].valid)
++num_lsu;
} else
++num_one;
}
if (num_lsu == 0)
++num_lsu;
depth = (host->can_queue - num_one) / num_lsu;
if (depth > 256)
depth = 256;
else if (depth < 2)
depth = 2;
scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, depth);
} else
scsi_adjust_queue_depth(sdev, 0, 1);
return 0;
}
/**
* aac_change_queue_depth - alter queue depths
* @sdev: SCSI device we are considering
* @depth: desired queue depth
*
* Alters queue depths for target device based on the host adapter's
* total capacity and the queue depth supported by the target device.
*/
static int aac_change_queue_depth(struct scsi_device *sdev, int depth)
{
if (sdev->tagged_supported && (sdev->type == TYPE_DISK) &&
(sdev_channel(sdev) == CONTAINER_CHANNEL)) {
struct scsi_device * dev;
struct Scsi_Host *host = sdev->host;
unsigned num = 0;
__shost_for_each_device(dev, host) {
if (dev->tagged_supported && (dev->type == TYPE_DISK) &&
(sdev_channel(dev) == CONTAINER_CHANNEL))
++num;
++num;
}
if (num >= host->can_queue)
num = host->can_queue - 1;
if (depth > (host->can_queue - num))
depth = host->can_queue - num;
if (depth > 256)
depth = 256;
else if (depth < 2)
depth = 2;
scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, depth);
} else
scsi_adjust_queue_depth(sdev, 0, 1);
return sdev->queue_depth;
}
static ssize_t aac_show_raid_level(struct device *dev, struct device_attribute *attr, char *buf)
{
struct scsi_device *sdev = to_scsi_device(dev);
struct aac_dev *aac = (struct aac_dev *)(sdev->host->hostdata);
if (sdev_channel(sdev) != CONTAINER_CHANNEL)
return snprintf(buf, PAGE_SIZE, sdev->no_uld_attach
? "Hidden\n" :
((aac->jbod && (sdev->type == TYPE_DISK)) ? "JBOD\n" : ""));
return snprintf(buf, PAGE_SIZE, "%s\n",
get_container_type(aac->fsa_dev[sdev_id(sdev)].type));
}
static struct device_attribute aac_raid_level_attr = {
.attr = {
.name = "level",
.mode = S_IRUGO,
},
.show = aac_show_raid_level
};
static struct device_attribute *aac_dev_attrs[] = {
&aac_raid_level_attr,
NULL,
};
static int aac_ioctl(struct scsi_device *sdev, int cmd, void __user * arg)
{
struct aac_dev *dev = (struct aac_dev *)sdev->host->hostdata;
if (!capable(CAP_SYS_RAWIO))
return -EPERM;
return aac_do_ioctl(dev, cmd, arg);
}
static int aac_eh_abort(struct scsi_cmnd* cmd)
{
struct scsi_device * dev = cmd->device;
struct Scsi_Host * host = dev->host;
struct aac_dev * aac = (struct aac_dev *)host->hostdata;
int count;
int ret = FAILED;
printk(KERN_ERR "%s: Host adapter abort request (%d,%d,%d,%d)\n",
AAC_DRIVERNAME,
host->host_no, sdev_channel(dev), sdev_id(dev), dev->lun);
switch (cmd->cmnd[0]) {
case SERVICE_ACTION_IN:
if (!(aac->raw_io_interface) ||
!(aac->raw_io_64) ||
((cmd->cmnd[1] & 0x1f) != SAI_READ_CAPACITY_16))
break;
case INQUIRY:
case READ_CAPACITY:
/* Mark associated FIB to not complete, eh handler does this */
for (count = 0; count < (host->can_queue + AAC_NUM_MGT_FIB); ++count) {
struct fib * fib = &aac->fibs[count];
if (fib->hw_fib_va->header.XferState &&
(fib->flags & FIB_CONTEXT_FLAG) &&
(fib->callback_data == cmd)) {
fib->flags |= FIB_CONTEXT_FLAG_TIMED_OUT;
cmd->SCp.phase = AAC_OWNER_ERROR_HANDLER;
ret = SUCCESS;
}
}
break;
case TEST_UNIT_READY:
/* Mark associated FIB to not complete, eh handler does this */
for (count = 0; count < (host->can_queue + AAC_NUM_MGT_FIB); ++count) {
struct scsi_cmnd * command;
struct fib * fib = &aac->fibs[count];
if ((fib->hw_fib_va->header.XferState & cpu_to_le32(Async | NoResponseExpected)) &&
(fib->flags & FIB_CONTEXT_FLAG) &&
((command = fib->callback_data)) &&
(command->device == cmd->device)) {
fib->flags |= FIB_CONTEXT_FLAG_TIMED_OUT;
command->SCp.phase = AAC_OWNER_ERROR_HANDLER;
if (command == cmd)
ret = SUCCESS;
}
}
}
return ret;
}
/*
* aac_eh_reset - Reset command handling
* @scsi_cmd: SCSI command block causing the reset
*
*/
static int aac_eh_reset(struct scsi_cmnd* cmd)
{
struct scsi_device * dev = cmd->device;
struct Scsi_Host * host = dev->host;
struct scsi_cmnd * command;
int count;
struct aac_dev * aac = (struct aac_dev *)host->hostdata;
unsigned long flags;
/* Mark the associated FIB to not complete, eh handler does this */
for (count = 0; count < (host->can_queue + AAC_NUM_MGT_FIB); ++count) {
struct fib * fib = &aac->fibs[count];
if (fib->hw_fib_va->header.XferState &&
(fib->flags & FIB_CONTEXT_FLAG) &&
(fib->callback_data == cmd)) {
fib->flags |= FIB_CONTEXT_FLAG_TIMED_OUT;
cmd->SCp.phase = AAC_OWNER_ERROR_HANDLER;
}
}
printk(KERN_ERR "%s: Host adapter reset request. SCSI hang ?\n",
AAC_DRIVERNAME);
if ((count = aac_check_health(aac)))
return count;
/*
* Wait for all commands to complete to this specific
* target (block maximum 60 seconds).
*/
for (count = 60; count; --count) {
int active = aac->in_reset;
if (active == 0)
__shost_for_each_device(dev, host) {
spin_lock_irqsave(&dev->list_lock, flags);
list_for_each_entry(command, &dev->cmd_list, list) {
if ((command != cmd) &&
(command->SCp.phase == AAC_OWNER_FIRMWARE)) {
active++;
break;
}
}
spin_unlock_irqrestore(&dev->list_lock, flags);
if (active)
break;
}
/*
* We can exit If all the commands are complete
*/
if (active == 0)
return SUCCESS;
ssleep(1);
}
printk(KERN_ERR "%s: SCSI bus appears hung\n", AAC_DRIVERNAME);
/*
* This adapter needs a blind reset, only do so for Adapters that
* support a register, instead of a commanded, reset.
*/
if ((aac->supplement_adapter_info.SupportedOptions2 &
AAC_OPTION_MU_RESET) &&
aac_check_reset &&
((aac_check_reset != 1) ||
!(aac->supplement_adapter_info.SupportedOptions2 &
AAC_OPTION_IGNORE_RESET)))
aac_reset_adapter(aac, 2); /* Bypass wait for command quiesce */
return SUCCESS; /* Cause an immediate retry of the command with a ten second delay after successful tur */
}
/**
* aac_cfg_open - open a configuration file
* @inode: inode being opened
* @file: file handle attached
*
* Called when the configuration device is opened. Does the needed
* set up on the handle and then returns
*
* Bugs: This needs extending to check a given adapter is present
* so we can support hot plugging, and to ref count adapters.
*/
static int aac_cfg_open(struct inode *inode, struct file *file)
{
struct aac_dev *aac;
unsigned minor_number = iminor(inode);
int err = -ENODEV;
lock_kernel(); /* BKL pushdown: nothing else protects this list */
list_for_each_entry(aac, &aac_devices, entry) {
if (aac->id == minor_number) {
file->private_data = aac;
err = 0;
break;
}
}
unlock_kernel();
return err;
}
/**
* aac_cfg_ioctl - AAC configuration request
* @inode: inode of device
* @file: file handle
* @cmd: ioctl command code
* @arg: argument
*
* Handles a configuration ioctl. Currently this involves wrapping it
* up and feeding it into the nasty windowsalike glue layer.
*
* Bugs: Needs locking against parallel ioctls lower down
* Bugs: Needs to handle hot plugging
*/
static int aac_cfg_ioctl(struct inode *inode, struct file *file,
unsigned int cmd, unsigned long arg)
{
if (!capable(CAP_SYS_RAWIO))
return -EPERM;
return aac_do_ioctl(file->private_data, cmd, (void __user *)arg);
}
#ifdef CONFIG_COMPAT
static long aac_compat_do_ioctl(struct aac_dev *dev, unsigned cmd, unsigned long arg)
{
long ret;
lock_kernel();
switch (cmd) {
case FSACTL_MINIPORT_REV_CHECK:
case FSACTL_SENDFIB:
case FSACTL_OPEN_GET_ADAPTER_FIB:
case FSACTL_CLOSE_GET_ADAPTER_FIB:
case FSACTL_SEND_RAW_SRB:
case FSACTL_GET_PCI_INFO:
case FSACTL_QUERY_DISK:
case FSACTL_DELETE_DISK:
case FSACTL_FORCE_DELETE_DISK:
case FSACTL_GET_CONTAINERS:
case FSACTL_SEND_LARGE_FIB:
ret = aac_do_ioctl(dev, cmd, (void __user *)arg);
break;
case FSACTL_GET_NEXT_ADAPTER_FIB: {
struct fib_ioctl __user *f;
f = compat_alloc_user_space(sizeof(*f));
ret = 0;
if (clear_user(f, sizeof(*f)))
ret = -EFAULT;
if (copy_in_user(f, (void __user *)arg, sizeof(struct fib_ioctl) - sizeof(u32)))
ret = -EFAULT;
if (!ret)
ret = aac_do_ioctl(dev, cmd, f);
break;
}
default:
ret = -ENOIOCTLCMD;
break;
}
unlock_kernel();
return ret;
}
static int aac_compat_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
{
struct aac_dev *dev = (struct aac_dev *)sdev->host->hostdata;
return aac_compat_do_ioctl(dev, cmd, (unsigned long)arg);
}
static long aac_compat_cfg_ioctl(struct file *file, unsigned cmd, unsigned long arg)
{
if (!capable(CAP_SYS_RAWIO))
return -EPERM;
return aac_compat_do_ioctl((struct aac_dev *)file->private_data, cmd, arg);
}
#endif
static ssize_t aac_show_model(struct device *device,
struct device_attribute *attr, char *buf)
{
struct aac_dev *dev = (struct aac_dev*)class_to_shost(device)->hostdata;
int len;
if (dev->supplement_adapter_info.AdapterTypeText[0]) {
char * cp = dev->supplement_adapter_info.AdapterTypeText;
while (*cp && *cp != ' ')
++cp;
while (*cp == ' ')
++cp;
len = snprintf(buf, PAGE_SIZE, "%s\n", cp);
} else
len = snprintf(buf, PAGE_SIZE, "%s\n",
aac_drivers[dev->cardtype].model);
return len;
}
static ssize_t aac_show_vendor(struct device *device,
struct device_attribute *attr, char *buf)
{
struct aac_dev *dev = (struct aac_dev*)class_to_shost(device)->hostdata;
int len;
if (dev->supplement_adapter_info.AdapterTypeText[0]) {
char * cp = dev->supplement_adapter_info.AdapterTypeText;
while (*cp && *cp != ' ')
++cp;
len = snprintf(buf, PAGE_SIZE, "%.*s\n",
(int)(cp - (char *)dev->supplement_adapter_info.AdapterTypeText),
dev->supplement_adapter_info.AdapterTypeText);
} else
len = snprintf(buf, PAGE_SIZE, "%s\n",
aac_drivers[dev->cardtype].vname);
return len;
}
static ssize_t aac_show_flags(struct device *cdev,
struct device_attribute *attr, char *buf)
{
int len = 0;
struct aac_dev *dev = (struct aac_dev*)class_to_shost(cdev)->hostdata;
if (nblank(dprintk(x)))
len = snprintf(buf, PAGE_SIZE, "dprintk\n");
#ifdef AAC_DETAILED_STATUS_INFO
len += snprintf(buf + len, PAGE_SIZE - len,
"AAC_DETAILED_STATUS_INFO\n");
#endif
if (dev->raw_io_interface && dev->raw_io_64)
len += snprintf(buf + len, PAGE_SIZE - len,
"SAI_READ_CAPACITY_16\n");
if (dev->jbod)
len += snprintf(buf + len, PAGE_SIZE - len, "SUPPORTED_JBOD\n");
if (dev->supplement_adapter_info.SupportedOptions2 &
AAC_OPTION_POWER_MANAGEMENT)
len += snprintf(buf + len, PAGE_SIZE - len,
"SUPPORTED_POWER_MANAGEMENT\n");
if (dev->msi)
len += snprintf(buf + len, PAGE_SIZE - len, "PCI_HAS_MSI\n");
return len;
}
static ssize_t aac_show_kernel_version(struct device *device,
struct device_attribute *attr,
char *buf)
{
struct aac_dev *dev = (struct aac_dev*)class_to_shost(device)->hostdata;
int len, tmp;
tmp = le32_to_cpu(dev->adapter_info.kernelrev);
len = snprintf(buf, PAGE_SIZE, "%d.%d-%d[%d]\n",
tmp >> 24, (tmp >> 16) & 0xff, tmp & 0xff,
le32_to_cpu(dev->adapter_info.kernelbuild));
return len;
}
static ssize_t aac_show_monitor_version(struct device *device,
struct device_attribute *attr,
char *buf)
{
struct aac_dev *dev = (struct aac_dev*)class_to_shost(device)->hostdata;
int len, tmp;
tmp = le32_to_cpu(dev->adapter_info.monitorrev);
len = snprintf(buf, PAGE_SIZE, "%d.%d-%d[%d]\n",
tmp >> 24, (tmp >> 16) & 0xff, tmp & 0xff,
le32_to_cpu(dev->adapter_info.monitorbuild));
return len;
}
static ssize_t aac_show_bios_version(struct device *device,
struct device_attribute *attr,
char *buf)
{
struct aac_dev *dev = (struct aac_dev*)class_to_shost(device)->hostdata;
int len, tmp;
tmp = le32_to_cpu(dev->adapter_info.biosrev);
len = snprintf(buf, PAGE_SIZE, "%d.%d-%d[%d]\n",
tmp >> 24, (tmp >> 16) & 0xff, tmp & 0xff,
le32_to_cpu(dev->adapter_info.biosbuild));
return len;
}
static ssize_t aac_show_serial_number(struct device *device,
struct device_attribute *attr, char *buf)
{
struct aac_dev *dev = (struct aac_dev*)class_to_shost(device)->hostdata;
int len = 0;
if (le32_to_cpu(dev->adapter_info.serial[0]) != 0xBAD0)
len = snprintf(buf, PAGE_SIZE, "%06X\n",
le32_to_cpu(dev->adapter_info.serial[0]));
if (len &&
!memcmp(&dev->supplement_adapter_info.MfgPcbaSerialNo[
sizeof(dev->supplement_adapter_info.MfgPcbaSerialNo)-len],
buf, len-1))
len = snprintf(buf, PAGE_SIZE, "%.*s\n",
(int)sizeof(dev->supplement_adapter_info.MfgPcbaSerialNo),
dev->supplement_adapter_info.MfgPcbaSerialNo);
return len;
}
static ssize_t aac_show_max_channel(struct device *device,
struct device_attribute *attr, char *buf)
{
return snprintf(buf, PAGE_SIZE, "%d\n",
class_to_shost(device)->max_channel);
}
static ssize_t aac_show_max_id(struct device *device,
struct device_attribute *attr, char *buf)
{
return snprintf(buf, PAGE_SIZE, "%d\n",
class_to_shost(device)->max_id);
}
static ssize_t aac_store_reset_adapter(struct device *device,
struct device_attribute *attr,
const char *buf, size_t count)
{
int retval = -EACCES;
if (!capable(CAP_SYS_ADMIN))
return retval;
retval = aac_reset_adapter((struct aac_dev*)class_to_shost(device)->hostdata, buf[0] == '!');
if (retval >= 0)
retval = count;
return retval;
}
static ssize_t aac_show_reset_adapter(struct device *device,
struct device_attribute *attr,
char *buf)
{
struct aac_dev *dev = (struct aac_dev*)class_to_shost(device)->hostdata;
int len, tmp;
tmp = aac_adapter_check_health(dev);
if ((tmp == 0) && dev->in_reset)
tmp = -EBUSY;
len = snprintf(buf, PAGE_SIZE, "0x%x\n", tmp);
return len;
}
static struct device_attribute aac_model = {
.attr = {
.name = "model",
.mode = S_IRUGO,
},
.show = aac_show_model,
};
static struct device_attribute aac_vendor = {
.attr = {
.name = "vendor",
.mode = S_IRUGO,
},
.show = aac_show_vendor,
};
static struct device_attribute aac_flags = {
.attr = {
.name = "flags",
.mode = S_IRUGO,
},
.show = aac_show_flags,
};
static struct device_attribute aac_kernel_version = {
.attr = {
.name = "hba_kernel_version",
.mode = S_IRUGO,
},
.show = aac_show_kernel_version,
};
static struct device_attribute aac_monitor_version = {
.attr = {
.name = "hba_monitor_version",
.mode = S_IRUGO,
},
.show = aac_show_monitor_version,
};
static struct device_attribute aac_bios_version = {
.attr = {
.name = "hba_bios_version",
.mode = S_IRUGO,
},
.show = aac_show_bios_version,
};
static struct device_attribute aac_serial_number = {
.attr = {
.name = "serial_number",
.mode = S_IRUGO,
},
.show = aac_show_serial_number,
};
static struct device_attribute aac_max_channel = {
.attr = {
.name = "max_channel",
.mode = S_IRUGO,
},
.show = aac_show_max_channel,
};
static struct device_attribute aac_max_id = {
.attr = {
.name = "max_id",
.mode = S_IRUGO,
},
.show = aac_show_max_id,
};
static struct device_attribute aac_reset = {
.attr = {
.name = "reset_host",
.mode = S_IWUSR|S_IRUGO,
},
.store = aac_store_reset_adapter,
.show = aac_show_reset_adapter,
};
static struct device_attribute *aac_attrs[] = {
&aac_model,
&aac_vendor,
&aac_flags,
&aac_kernel_version,
&aac_monitor_version,
&aac_bios_version,
&aac_serial_number,
&aac_max_channel,
&aac_max_id,
&aac_reset,
NULL
};
ssize_t aac_get_serial_number(struct device *device, char *buf)
{
return aac_show_serial_number(device, &aac_serial_number, buf);
}
static const struct file_operations aac_cfg_fops = {
.owner = THIS_MODULE,
.ioctl = aac_cfg_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = aac_compat_cfg_ioctl,
#endif
.open = aac_cfg_open,
};
static struct scsi_host_template aac_driver_template = {
.module = THIS_MODULE,
.name = "AAC",
.proc_name = AAC_DRIVERNAME,
.info = aac_info,
.ioctl = aac_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = aac_compat_ioctl,
#endif
.queuecommand = aac_queuecommand,
.bios_param = aac_biosparm,
.shost_attrs = aac_attrs,
.slave_configure = aac_slave_configure,
.change_queue_depth = aac_change_queue_depth,
.sdev_attrs = aac_dev_attrs,
.eh_abort_handler = aac_eh_abort,
.eh_host_reset_handler = aac_eh_reset,
.can_queue = AAC_NUM_IO_FIB,
.this_id = MAXIMUM_NUM_CONTAINERS,
.sg_tablesize = 16,
.max_sectors = 128,
#if (AAC_NUM_IO_FIB > 256)
.cmd_per_lun = 256,
#else
.cmd_per_lun = AAC_NUM_IO_FIB,
#endif
.use_clustering = ENABLE_CLUSTERING,
.emulated = 1,
};
static void __aac_shutdown(struct aac_dev * aac)
{
if (aac->aif_thread)
kthread_stop(aac->thread);
aac_send_shutdown(aac);
aac_adapter_disable_int(aac);
free_irq(aac->pdev->irq, aac);
if (aac->msi)
pci_disable_msi(aac->pdev);
}
static int __devinit aac_probe_one(struct pci_dev *pdev,
const struct pci_device_id *id)
{
unsigned index = id->driver_data;
struct Scsi_Host *shost;
struct aac_dev *aac;
struct list_head *insert = &aac_devices;
int error = -ENODEV;
int unique_id = 0;
list_for_each_entry(aac, &aac_devices, entry) {
if (aac->id > unique_id)
break;
insert = &aac->entry;
unique_id++;
}
error = pci_enable_device(pdev);
if (error)
goto out;
error = -ENODEV;
if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) ||
pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)))
goto out_disable_pdev;
/*
* If the quirk31 bit is set, the adapter needs adapter
* to driver communication memory to be allocated below 2gig
*/
if (aac_drivers[index].quirks & AAC_QUIRK_31BIT)
if (pci_set_dma_mask(pdev, DMA_BIT_MASK(31)) ||
pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(31)))
goto out_disable_pdev;
pci_set_master(pdev);
shost = scsi_host_alloc(&aac_driver_template, sizeof(struct aac_dev));
if (!shost)
goto out_disable_pdev;
shost->irq = pdev->irq;
shost->base = pci_resource_start(pdev, 0);
shost->unique_id = unique_id;
shost->max_cmd_len = 16;
aac = (struct aac_dev *)shost->hostdata;
aac->scsi_host_ptr = shost;
aac->pdev = pdev;
aac->name = aac_driver_template.name;
aac->id = shost->unique_id;
aac->cardtype = index;
INIT_LIST_HEAD(&aac->entry);
aac->fibs = kmalloc(sizeof(struct fib) * (shost->can_queue + AAC_NUM_MGT_FIB), GFP_KERNEL);
if (!aac->fibs)
goto out_free_host;
spin_lock_init(&aac->fib_lock);
/*
* Map in the registers from the adapter.
*/
aac->base_size = AAC_MIN_FOOTPRINT_SIZE;
if ((*aac_drivers[index].init)(aac))
goto out_unmap;
/*
* Start any kernel threads needed
*/
aac->thread = kthread_run(aac_command_thread, aac, AAC_DRIVERNAME);
if (IS_ERR(aac->thread)) {
printk(KERN_ERR "aacraid: Unable to create command thread.\n");
error = PTR_ERR(aac->thread);
goto out_deinit;
}
/*
* If we had set a smaller DMA mask earlier, set it to 4gig
* now since the adapter can dma data to at least a 4gig
* address space.
*/
if (aac_drivers[index].quirks & AAC_QUIRK_31BIT)
if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))
goto out_deinit;
aac->maximum_num_channels = aac_drivers[index].channels;
error = aac_get_adapter_info(aac);
if (error < 0)
goto out_deinit;
/*
* Lets override negotiations and drop the maximum SG limit to 34
*/
if ((aac_drivers[index].quirks & AAC_QUIRK_34SG) &&
(shost->sg_tablesize > 34)) {
shost->sg_tablesize = 34;
shost->max_sectors = (shost->sg_tablesize * 8) + 112;
}
if ((aac_drivers[index].quirks & AAC_QUIRK_17SG) &&
(shost->sg_tablesize > 17)) {
shost->sg_tablesize = 17;
shost->max_sectors = (shost->sg_tablesize * 8) + 112;
}
error = pci_set_dma_max_seg_size(pdev,
(aac->adapter_info.options & AAC_OPT_NEW_COMM) ?
(shost->max_sectors << 9) : 65536);
if (error)
goto out_deinit;
/*
* Firmware printf works only with older firmware.
*/
if (aac_drivers[index].quirks & AAC_QUIRK_34SG)
aac->printf_enabled = 1;
else
aac->printf_enabled = 0;
/*
* max channel will be the physical channels plus 1 virtual channel
* all containers are on the virtual channel 0 (CONTAINER_CHANNEL)
* physical channels are address by their actual physical number+1
*/
if (aac->nondasd_support || expose_physicals || aac->jbod)
shost->max_channel = aac->maximum_num_channels;
else
shost->max_channel = 0;
aac_get_config_status(aac, 0);
aac_get_containers(aac);
list_add(&aac->entry, insert);
shost->max_id = aac->maximum_num_containers;
if (shost->max_id < aac->maximum_num_physicals)
shost->max_id = aac->maximum_num_physicals;
if (shost->max_id < MAXIMUM_NUM_CONTAINERS)
shost->max_id = MAXIMUM_NUM_CONTAINERS;
else
shost->this_id = shost->max_id;
/*
* dmb - we may need to move the setting of these parms somewhere else once
* we get a fib that can report the actual numbers
*/
shost->max_lun = AAC_MAX_LUN;
pci_set_drvdata(pdev, shost);
error = scsi_add_host(shost, &pdev->dev);
if (error)
goto out_deinit;
scsi_scan_host(shost);
return 0;
out_deinit:
__aac_shutdown(aac);
out_unmap:
aac_fib_map_free(aac);
if (aac->comm_addr)
pci_free_consistent(aac->pdev, aac->comm_size, aac->comm_addr,
aac->comm_phys);
kfree(aac->queues);
aac_adapter_ioremap(aac, 0);
kfree(aac->fibs);
kfree(aac->fsa_dev);
out_free_host:
scsi_host_put(shost);
out_disable_pdev:
pci_disable_device(pdev);
out:
return error;
}
static void aac_shutdown(struct pci_dev *dev)
{
struct Scsi_Host *shost = pci_get_drvdata(dev);
scsi_block_requests(shost);
__aac_shutdown((struct aac_dev *)shost->hostdata);
}
static void __devexit aac_remove_one(struct pci_dev *pdev)
{
struct Scsi_Host *shost = pci_get_drvdata(pdev);
struct aac_dev *aac = (struct aac_dev *)shost->hostdata;
scsi_remove_host(shost);
__aac_shutdown(aac);
aac_fib_map_free(aac);
pci_free_consistent(aac->pdev, aac->comm_size, aac->comm_addr,
aac->comm_phys);
kfree(aac->queues);
aac_adapter_ioremap(aac, 0);
kfree(aac->fibs);
kfree(aac->fsa_dev);
list_del(&aac->entry);
scsi_host_put(shost);
pci_disable_device(pdev);
if (list_empty(&aac_devices)) {
unregister_chrdev(aac_cfg_major, "aac");
aac_cfg_major = -1;
}
}
static struct pci_driver aac_pci_driver = {
.name = AAC_DRIVERNAME,
.id_table = aac_pci_tbl,
.probe = aac_probe_one,
.remove = __devexit_p(aac_remove_one),
.shutdown = aac_shutdown,
};
static int __init aac_init(void)
{
int error;
printk(KERN_INFO "Adaptec %s driver %s\n",
AAC_DRIVERNAME, aac_driver_version);
error = pci_register_driver(&aac_pci_driver);
if (error < 0)
return error;
aac_cfg_major = register_chrdev( 0, "aac", &aac_cfg_fops);
if (aac_cfg_major < 0) {
printk(KERN_WARNING
"aacraid: unable to register \"aac\" device.\n");
}
return 0;
}
static void __exit aac_exit(void)
{
if (aac_cfg_major > -1)
unregister_chrdev(aac_cfg_major, "aac");
pci_unregister_driver(&aac_pci_driver);
}
module_init(aac_init);
module_exit(aac_exit);
| gpl-2.0 |
jabez1314/linux | drivers/gpu/drm/amd/amdgpu/amdgpu_pll.c | 743 | 10706 | /*
* Copyright 2014 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include <drm/drmP.h>
#include <drm/amdgpu_drm.h>
#include "amdgpu.h"
#include "atom.h"
#include "atombios_encoders.h"
#include <asm/div64.h>
#include <linux/gcd.h>
/**
* amdgpu_pll_reduce_ratio - fractional number reduction
*
* @nom: nominator
* @den: denominator
* @nom_min: minimum value for nominator
* @den_min: minimum value for denominator
*
* Find the greatest common divisor and apply it on both nominator and
* denominator, but make nominator and denominator are at least as large
* as their minimum values.
*/
static void amdgpu_pll_reduce_ratio(unsigned *nom, unsigned *den,
unsigned nom_min, unsigned den_min)
{
unsigned tmp;
/* reduce the numbers to a simpler ratio */
tmp = gcd(*nom, *den);
*nom /= tmp;
*den /= tmp;
/* make sure nominator is large enough */
if (*nom < nom_min) {
tmp = DIV_ROUND_UP(nom_min, *nom);
*nom *= tmp;
*den *= tmp;
}
/* make sure the denominator is large enough */
if (*den < den_min) {
tmp = DIV_ROUND_UP(den_min, *den);
*nom *= tmp;
*den *= tmp;
}
}
/**
* amdgpu_pll_get_fb_ref_div - feedback and ref divider calculation
*
* @nom: nominator
* @den: denominator
* @post_div: post divider
* @fb_div_max: feedback divider maximum
* @ref_div_max: reference divider maximum
* @fb_div: resulting feedback divider
* @ref_div: resulting reference divider
*
* Calculate feedback and reference divider for a given post divider. Makes
* sure we stay within the limits.
*/
static void amdgpu_pll_get_fb_ref_div(unsigned nom, unsigned den, unsigned post_div,
unsigned fb_div_max, unsigned ref_div_max,
unsigned *fb_div, unsigned *ref_div)
{
/* limit reference * post divider to a maximum */
ref_div_max = min(128 / post_div, ref_div_max);
/* get matching reference and feedback divider */
*ref_div = min(max(DIV_ROUND_CLOSEST(den, post_div), 1u), ref_div_max);
*fb_div = DIV_ROUND_CLOSEST(nom * *ref_div * post_div, den);
/* limit fb divider to its maximum */
if (*fb_div > fb_div_max) {
*ref_div = DIV_ROUND_CLOSEST(*ref_div * fb_div_max, *fb_div);
*fb_div = fb_div_max;
}
}
/**
* amdgpu_pll_compute - compute PLL paramaters
*
* @pll: information about the PLL
* @dot_clock_p: resulting pixel clock
* fb_div_p: resulting feedback divider
* frac_fb_div_p: fractional part of the feedback divider
* ref_div_p: resulting reference divider
* post_div_p: resulting reference divider
*
* Try to calculate the PLL parameters to generate the given frequency:
* dot_clock = (ref_freq * feedback_div) / (ref_div * post_div)
*/
void amdgpu_pll_compute(struct amdgpu_pll *pll,
u32 freq,
u32 *dot_clock_p,
u32 *fb_div_p,
u32 *frac_fb_div_p,
u32 *ref_div_p,
u32 *post_div_p)
{
unsigned target_clock = pll->flags & AMDGPU_PLL_USE_FRAC_FB_DIV ?
freq : freq / 10;
unsigned fb_div_min, fb_div_max, fb_div;
unsigned post_div_min, post_div_max, post_div;
unsigned ref_div_min, ref_div_max, ref_div;
unsigned post_div_best, diff_best;
unsigned nom, den;
/* determine allowed feedback divider range */
fb_div_min = pll->min_feedback_div;
fb_div_max = pll->max_feedback_div;
if (pll->flags & AMDGPU_PLL_USE_FRAC_FB_DIV) {
fb_div_min *= 10;
fb_div_max *= 10;
}
/* determine allowed ref divider range */
if (pll->flags & AMDGPU_PLL_USE_REF_DIV)
ref_div_min = pll->reference_div;
else
ref_div_min = pll->min_ref_div;
if (pll->flags & AMDGPU_PLL_USE_FRAC_FB_DIV &&
pll->flags & AMDGPU_PLL_USE_REF_DIV)
ref_div_max = pll->reference_div;
else
ref_div_max = pll->max_ref_div;
/* determine allowed post divider range */
if (pll->flags & AMDGPU_PLL_USE_POST_DIV) {
post_div_min = pll->post_div;
post_div_max = pll->post_div;
} else {
unsigned vco_min, vco_max;
if (pll->flags & AMDGPU_PLL_IS_LCD) {
vco_min = pll->lcd_pll_out_min;
vco_max = pll->lcd_pll_out_max;
} else {
vco_min = pll->pll_out_min;
vco_max = pll->pll_out_max;
}
if (pll->flags & AMDGPU_PLL_USE_FRAC_FB_DIV) {
vco_min *= 10;
vco_max *= 10;
}
post_div_min = vco_min / target_clock;
if ((target_clock * post_div_min) < vco_min)
++post_div_min;
if (post_div_min < pll->min_post_div)
post_div_min = pll->min_post_div;
post_div_max = vco_max / target_clock;
if ((target_clock * post_div_max) > vco_max)
--post_div_max;
if (post_div_max > pll->max_post_div)
post_div_max = pll->max_post_div;
}
/* represent the searched ratio as fractional number */
nom = target_clock;
den = pll->reference_freq;
/* reduce the numbers to a simpler ratio */
amdgpu_pll_reduce_ratio(&nom, &den, fb_div_min, post_div_min);
/* now search for a post divider */
if (pll->flags & AMDGPU_PLL_PREFER_MINM_OVER_MAXP)
post_div_best = post_div_min;
else
post_div_best = post_div_max;
diff_best = ~0;
for (post_div = post_div_min; post_div <= post_div_max; ++post_div) {
unsigned diff;
amdgpu_pll_get_fb_ref_div(nom, den, post_div, fb_div_max,
ref_div_max, &fb_div, &ref_div);
diff = abs(target_clock - (pll->reference_freq * fb_div) /
(ref_div * post_div));
if (diff < diff_best || (diff == diff_best &&
!(pll->flags & AMDGPU_PLL_PREFER_MINM_OVER_MAXP))) {
post_div_best = post_div;
diff_best = diff;
}
}
post_div = post_div_best;
/* get the feedback and reference divider for the optimal value */
amdgpu_pll_get_fb_ref_div(nom, den, post_div, fb_div_max, ref_div_max,
&fb_div, &ref_div);
/* reduce the numbers to a simpler ratio once more */
/* this also makes sure that the reference divider is large enough */
amdgpu_pll_reduce_ratio(&fb_div, &ref_div, fb_div_min, ref_div_min);
/* avoid high jitter with small fractional dividers */
if (pll->flags & AMDGPU_PLL_USE_FRAC_FB_DIV && (fb_div % 10)) {
fb_div_min = max(fb_div_min, (9 - (fb_div % 10)) * 20 + 60);
if (fb_div < fb_div_min) {
unsigned tmp = DIV_ROUND_UP(fb_div_min, fb_div);
fb_div *= tmp;
ref_div *= tmp;
}
}
/* and finally save the result */
if (pll->flags & AMDGPU_PLL_USE_FRAC_FB_DIV) {
*fb_div_p = fb_div / 10;
*frac_fb_div_p = fb_div % 10;
} else {
*fb_div_p = fb_div;
*frac_fb_div_p = 0;
}
*dot_clock_p = ((pll->reference_freq * *fb_div_p * 10) +
(pll->reference_freq * *frac_fb_div_p)) /
(ref_div * post_div * 10);
*ref_div_p = ref_div;
*post_div_p = post_div;
DRM_DEBUG_KMS("%d - %d, pll dividers - fb: %d.%d ref: %d, post %d\n",
freq, *dot_clock_p * 10, *fb_div_p, *frac_fb_div_p,
ref_div, post_div);
}
/**
* amdgpu_pll_get_use_mask - look up a mask of which pplls are in use
*
* @crtc: drm crtc
*
* Returns the mask of which PPLLs (Pixel PLLs) are in use.
*/
u32 amdgpu_pll_get_use_mask(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct drm_crtc *test_crtc;
struct amdgpu_crtc *test_amdgpu_crtc;
u32 pll_in_use = 0;
list_for_each_entry(test_crtc, &dev->mode_config.crtc_list, head) {
if (crtc == test_crtc)
continue;
test_amdgpu_crtc = to_amdgpu_crtc(test_crtc);
if (test_amdgpu_crtc->pll_id != ATOM_PPLL_INVALID)
pll_in_use |= (1 << test_amdgpu_crtc->pll_id);
}
return pll_in_use;
}
/**
* amdgpu_pll_get_shared_dp_ppll - return the PPLL used by another crtc for DP
*
* @crtc: drm crtc
*
* Returns the PPLL (Pixel PLL) used by another crtc/encoder which is
* also in DP mode. For DP, a single PPLL can be used for all DP
* crtcs/encoders.
*/
int amdgpu_pll_get_shared_dp_ppll(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct drm_crtc *test_crtc;
struct amdgpu_crtc *test_amdgpu_crtc;
list_for_each_entry(test_crtc, &dev->mode_config.crtc_list, head) {
if (crtc == test_crtc)
continue;
test_amdgpu_crtc = to_amdgpu_crtc(test_crtc);
if (test_amdgpu_crtc->encoder &&
ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(test_amdgpu_crtc->encoder))) {
/* for DP use the same PLL for all */
if (test_amdgpu_crtc->pll_id != ATOM_PPLL_INVALID)
return test_amdgpu_crtc->pll_id;
}
}
return ATOM_PPLL_INVALID;
}
/**
* amdgpu_pll_get_shared_nondp_ppll - return the PPLL used by another non-DP crtc
*
* @crtc: drm crtc
* @encoder: drm encoder
*
* Returns the PPLL (Pixel PLL) used by another non-DP crtc/encoder which can
* be shared (i.e., same clock).
*/
int amdgpu_pll_get_shared_nondp_ppll(struct drm_crtc *crtc)
{
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
struct drm_device *dev = crtc->dev;
struct drm_crtc *test_crtc;
struct amdgpu_crtc *test_amdgpu_crtc;
u32 adjusted_clock, test_adjusted_clock;
adjusted_clock = amdgpu_crtc->adjusted_clock;
if (adjusted_clock == 0)
return ATOM_PPLL_INVALID;
list_for_each_entry(test_crtc, &dev->mode_config.crtc_list, head) {
if (crtc == test_crtc)
continue;
test_amdgpu_crtc = to_amdgpu_crtc(test_crtc);
if (test_amdgpu_crtc->encoder &&
!ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(test_amdgpu_crtc->encoder))) {
/* check if we are already driving this connector with another crtc */
if (test_amdgpu_crtc->connector == amdgpu_crtc->connector) {
/* if we are, return that pll */
if (test_amdgpu_crtc->pll_id != ATOM_PPLL_INVALID)
return test_amdgpu_crtc->pll_id;
}
/* for non-DP check the clock */
test_adjusted_clock = test_amdgpu_crtc->adjusted_clock;
if ((crtc->mode.clock == test_crtc->mode.clock) &&
(adjusted_clock == test_adjusted_clock) &&
(amdgpu_crtc->ss_enabled == test_amdgpu_crtc->ss_enabled) &&
(test_amdgpu_crtc->pll_id != ATOM_PPLL_INVALID))
return test_amdgpu_crtc->pll_id;
}
}
return ATOM_PPLL_INVALID;
}
| gpl-2.0 |
telf/error_state_capture_improvement | drivers/spi/spi-fsl-cpm.c | 743 | 10348 | /*
* Freescale SPI controller driver cpm functions.
*
* Maintainer: Kumar Gala
*
* Copyright (C) 2006 Polycom, Inc.
* Copyright 2010 Freescale Semiconductor, Inc.
*
* CPM SPI and QE buffer descriptors mode support:
* Copyright (c) 2009 MontaVista Software, Inc.
* Author: Anton Vorontsov <avorontsov@ru.mvista.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
#include <asm/cpm.h>
#include <asm/qe.h>
#include <linux/dma-mapping.h>
#include <linux/fsl_devices.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of_address.h>
#include <linux/spi/spi.h>
#include <linux/types.h>
#include <linux/platform_device.h>
#include "spi-fsl-cpm.h"
#include "spi-fsl-lib.h"
#include "spi-fsl-spi.h"
/* CPM1 and CPM2 are mutually exclusive. */
#ifdef CONFIG_CPM1
#include <asm/cpm1.h>
#define CPM_SPI_CMD mk_cr_cmd(CPM_CR_CH_SPI, 0)
#else
#include <asm/cpm2.h>
#define CPM_SPI_CMD mk_cr_cmd(CPM_CR_SPI_PAGE, CPM_CR_SPI_SBLOCK, 0, 0)
#endif
#define SPIE_TXB 0x00000200 /* Last char is written to tx fifo */
#define SPIE_RXB 0x00000100 /* Last char is written to rx buf */
/* SPCOM register values */
#define SPCOM_STR (1 << 23) /* Start transmit */
#define SPI_PRAM_SIZE 0x100
#define SPI_MRBLR ((unsigned int)PAGE_SIZE)
static void *fsl_dummy_rx;
static DEFINE_MUTEX(fsl_dummy_rx_lock);
static int fsl_dummy_rx_refcnt;
void fsl_spi_cpm_reinit_txrx(struct mpc8xxx_spi *mspi)
{
if (mspi->flags & SPI_QE) {
qe_issue_cmd(QE_INIT_TX_RX, mspi->subblock,
QE_CR_PROTOCOL_UNSPECIFIED, 0);
} else {
if (mspi->flags & SPI_CPM1) {
out_be32(&mspi->pram->rstate, 0);
out_be16(&mspi->pram->rbptr,
in_be16(&mspi->pram->rbase));
out_be32(&mspi->pram->tstate, 0);
out_be16(&mspi->pram->tbptr,
in_be16(&mspi->pram->tbase));
} else {
cpm_command(CPM_SPI_CMD, CPM_CR_INIT_TRX);
}
}
}
EXPORT_SYMBOL_GPL(fsl_spi_cpm_reinit_txrx);
static void fsl_spi_cpm_bufs_start(struct mpc8xxx_spi *mspi)
{
struct cpm_buf_desc __iomem *tx_bd = mspi->tx_bd;
struct cpm_buf_desc __iomem *rx_bd = mspi->rx_bd;
unsigned int xfer_len = min(mspi->count, SPI_MRBLR);
unsigned int xfer_ofs;
struct fsl_spi_reg *reg_base = mspi->reg_base;
xfer_ofs = mspi->xfer_in_progress->len - mspi->count;
if (mspi->rx_dma == mspi->dma_dummy_rx)
out_be32(&rx_bd->cbd_bufaddr, mspi->rx_dma);
else
out_be32(&rx_bd->cbd_bufaddr, mspi->rx_dma + xfer_ofs);
out_be16(&rx_bd->cbd_datlen, 0);
out_be16(&rx_bd->cbd_sc, BD_SC_EMPTY | BD_SC_INTRPT | BD_SC_WRAP);
if (mspi->tx_dma == mspi->dma_dummy_tx)
out_be32(&tx_bd->cbd_bufaddr, mspi->tx_dma);
else
out_be32(&tx_bd->cbd_bufaddr, mspi->tx_dma + xfer_ofs);
out_be16(&tx_bd->cbd_datlen, xfer_len);
out_be16(&tx_bd->cbd_sc, BD_SC_READY | BD_SC_INTRPT | BD_SC_WRAP |
BD_SC_LAST);
/* start transfer */
mpc8xxx_spi_write_reg(®_base->command, SPCOM_STR);
}
int fsl_spi_cpm_bufs(struct mpc8xxx_spi *mspi,
struct spi_transfer *t, bool is_dma_mapped)
{
struct device *dev = mspi->dev;
struct fsl_spi_reg *reg_base = mspi->reg_base;
if (is_dma_mapped) {
mspi->map_tx_dma = 0;
mspi->map_rx_dma = 0;
} else {
mspi->map_tx_dma = 1;
mspi->map_rx_dma = 1;
}
if (!t->tx_buf) {
mspi->tx_dma = mspi->dma_dummy_tx;
mspi->map_tx_dma = 0;
}
if (!t->rx_buf) {
mspi->rx_dma = mspi->dma_dummy_rx;
mspi->map_rx_dma = 0;
}
if (mspi->map_tx_dma) {
void *nonconst_tx = (void *)mspi->tx; /* shut up gcc */
mspi->tx_dma = dma_map_single(dev, nonconst_tx, t->len,
DMA_TO_DEVICE);
if (dma_mapping_error(dev, mspi->tx_dma)) {
dev_err(dev, "unable to map tx dma\n");
return -ENOMEM;
}
} else if (t->tx_buf) {
mspi->tx_dma = t->tx_dma;
}
if (mspi->map_rx_dma) {
mspi->rx_dma = dma_map_single(dev, mspi->rx, t->len,
DMA_FROM_DEVICE);
if (dma_mapping_error(dev, mspi->rx_dma)) {
dev_err(dev, "unable to map rx dma\n");
goto err_rx_dma;
}
} else if (t->rx_buf) {
mspi->rx_dma = t->rx_dma;
}
/* enable rx ints */
mpc8xxx_spi_write_reg(®_base->mask, SPIE_RXB);
mspi->xfer_in_progress = t;
mspi->count = t->len;
/* start CPM transfers */
fsl_spi_cpm_bufs_start(mspi);
return 0;
err_rx_dma:
if (mspi->map_tx_dma)
dma_unmap_single(dev, mspi->tx_dma, t->len, DMA_TO_DEVICE);
return -ENOMEM;
}
EXPORT_SYMBOL_GPL(fsl_spi_cpm_bufs);
void fsl_spi_cpm_bufs_complete(struct mpc8xxx_spi *mspi)
{
struct device *dev = mspi->dev;
struct spi_transfer *t = mspi->xfer_in_progress;
if (mspi->map_tx_dma)
dma_unmap_single(dev, mspi->tx_dma, t->len, DMA_TO_DEVICE);
if (mspi->map_rx_dma)
dma_unmap_single(dev, mspi->rx_dma, t->len, DMA_FROM_DEVICE);
mspi->xfer_in_progress = NULL;
}
EXPORT_SYMBOL_GPL(fsl_spi_cpm_bufs_complete);
void fsl_spi_cpm_irq(struct mpc8xxx_spi *mspi, u32 events)
{
u16 len;
struct fsl_spi_reg *reg_base = mspi->reg_base;
dev_dbg(mspi->dev, "%s: bd datlen %d, count %d\n", __func__,
in_be16(&mspi->rx_bd->cbd_datlen), mspi->count);
len = in_be16(&mspi->rx_bd->cbd_datlen);
if (len > mspi->count) {
WARN_ON(1);
len = mspi->count;
}
/* Clear the events */
mpc8xxx_spi_write_reg(®_base->event, events);
mspi->count -= len;
if (mspi->count)
fsl_spi_cpm_bufs_start(mspi);
else
complete(&mspi->done);
}
EXPORT_SYMBOL_GPL(fsl_spi_cpm_irq);
static void *fsl_spi_alloc_dummy_rx(void)
{
mutex_lock(&fsl_dummy_rx_lock);
if (!fsl_dummy_rx)
fsl_dummy_rx = kmalloc(SPI_MRBLR, GFP_KERNEL);
if (fsl_dummy_rx)
fsl_dummy_rx_refcnt++;
mutex_unlock(&fsl_dummy_rx_lock);
return fsl_dummy_rx;
}
static void fsl_spi_free_dummy_rx(void)
{
mutex_lock(&fsl_dummy_rx_lock);
switch (fsl_dummy_rx_refcnt) {
case 0:
WARN_ON(1);
break;
case 1:
kfree(fsl_dummy_rx);
fsl_dummy_rx = NULL;
/* fall through */
default:
fsl_dummy_rx_refcnt--;
break;
}
mutex_unlock(&fsl_dummy_rx_lock);
}
static unsigned long fsl_spi_cpm_get_pram(struct mpc8xxx_spi *mspi)
{
struct device *dev = mspi->dev;
struct device_node *np = dev->of_node;
const u32 *iprop;
int size;
void __iomem *spi_base;
unsigned long pram_ofs = -ENOMEM;
/* Can't use of_address_to_resource(), QE muram isn't at 0. */
iprop = of_get_property(np, "reg", &size);
/* QE with a fixed pram location? */
if (mspi->flags & SPI_QE && iprop && size == sizeof(*iprop) * 4)
return cpm_muram_alloc_fixed(iprop[2], SPI_PRAM_SIZE);
/* QE but with a dynamic pram location? */
if (mspi->flags & SPI_QE) {
pram_ofs = cpm_muram_alloc(SPI_PRAM_SIZE, 64);
qe_issue_cmd(QE_ASSIGN_PAGE_TO_DEVICE, mspi->subblock,
QE_CR_PROTOCOL_UNSPECIFIED, pram_ofs);
return pram_ofs;
}
spi_base = of_iomap(np, 1);
if (spi_base == NULL)
return -EINVAL;
if (mspi->flags & SPI_CPM2) {
pram_ofs = cpm_muram_alloc(SPI_PRAM_SIZE, 64);
out_be16(spi_base, pram_ofs);
}
iounmap(spi_base);
return pram_ofs;
}
int fsl_spi_cpm_init(struct mpc8xxx_spi *mspi)
{
struct device *dev = mspi->dev;
struct device_node *np = dev->of_node;
const u32 *iprop;
int size;
unsigned long bds_ofs;
if (!(mspi->flags & SPI_CPM_MODE))
return 0;
if (!fsl_spi_alloc_dummy_rx())
return -ENOMEM;
if (mspi->flags & SPI_QE) {
iprop = of_get_property(np, "cell-index", &size);
if (iprop && size == sizeof(*iprop))
mspi->subblock = *iprop;
switch (mspi->subblock) {
default:
dev_warn(dev, "cell-index unspecified, assuming SPI1\n");
/* fall through */
case 0:
mspi->subblock = QE_CR_SUBBLOCK_SPI1;
break;
case 1:
mspi->subblock = QE_CR_SUBBLOCK_SPI2;
break;
}
}
if (mspi->flags & SPI_CPM1) {
struct resource *res;
void *pram;
res = platform_get_resource(to_platform_device(dev),
IORESOURCE_MEM, 1);
pram = devm_ioremap_resource(dev, res);
if (IS_ERR(pram))
mspi->pram = NULL;
else
mspi->pram = pram;
} else {
unsigned long pram_ofs = fsl_spi_cpm_get_pram(mspi);
if (IS_ERR_VALUE(pram_ofs))
mspi->pram = NULL;
else
mspi->pram = cpm_muram_addr(pram_ofs);
}
if (mspi->pram == NULL) {
dev_err(dev, "can't allocate spi parameter ram\n");
goto err_pram;
}
bds_ofs = cpm_muram_alloc(sizeof(*mspi->tx_bd) +
sizeof(*mspi->rx_bd), 8);
if (IS_ERR_VALUE(bds_ofs)) {
dev_err(dev, "can't allocate bds\n");
goto err_bds;
}
mspi->dma_dummy_tx = dma_map_single(dev, empty_zero_page, PAGE_SIZE,
DMA_TO_DEVICE);
if (dma_mapping_error(dev, mspi->dma_dummy_tx)) {
dev_err(dev, "unable to map dummy tx buffer\n");
goto err_dummy_tx;
}
mspi->dma_dummy_rx = dma_map_single(dev, fsl_dummy_rx, SPI_MRBLR,
DMA_FROM_DEVICE);
if (dma_mapping_error(dev, mspi->dma_dummy_rx)) {
dev_err(dev, "unable to map dummy rx buffer\n");
goto err_dummy_rx;
}
mspi->tx_bd = cpm_muram_addr(bds_ofs);
mspi->rx_bd = cpm_muram_addr(bds_ofs + sizeof(*mspi->tx_bd));
/* Initialize parameter ram. */
out_be16(&mspi->pram->tbase, cpm_muram_offset(mspi->tx_bd));
out_be16(&mspi->pram->rbase, cpm_muram_offset(mspi->rx_bd));
out_8(&mspi->pram->tfcr, CPMFCR_EB | CPMFCR_GBL);
out_8(&mspi->pram->rfcr, CPMFCR_EB | CPMFCR_GBL);
out_be16(&mspi->pram->mrblr, SPI_MRBLR);
out_be32(&mspi->pram->rstate, 0);
out_be32(&mspi->pram->rdp, 0);
out_be16(&mspi->pram->rbptr, 0);
out_be16(&mspi->pram->rbc, 0);
out_be32(&mspi->pram->rxtmp, 0);
out_be32(&mspi->pram->tstate, 0);
out_be32(&mspi->pram->tdp, 0);
out_be16(&mspi->pram->tbptr, 0);
out_be16(&mspi->pram->tbc, 0);
out_be32(&mspi->pram->txtmp, 0);
return 0;
err_dummy_rx:
dma_unmap_single(dev, mspi->dma_dummy_tx, PAGE_SIZE, DMA_TO_DEVICE);
err_dummy_tx:
cpm_muram_free(bds_ofs);
err_bds:
if (!(mspi->flags & SPI_CPM1))
cpm_muram_free(cpm_muram_offset(mspi->pram));
err_pram:
fsl_spi_free_dummy_rx();
return -ENOMEM;
}
EXPORT_SYMBOL_GPL(fsl_spi_cpm_init);
void fsl_spi_cpm_free(struct mpc8xxx_spi *mspi)
{
struct device *dev = mspi->dev;
if (!(mspi->flags & SPI_CPM_MODE))
return;
dma_unmap_single(dev, mspi->dma_dummy_rx, SPI_MRBLR, DMA_FROM_DEVICE);
dma_unmap_single(dev, mspi->dma_dummy_tx, PAGE_SIZE, DMA_TO_DEVICE);
cpm_muram_free(cpm_muram_offset(mspi->tx_bd));
cpm_muram_free(cpm_muram_offset(mspi->pram));
fsl_spi_free_dummy_rx();
}
EXPORT_SYMBOL_GPL(fsl_spi_cpm_free);
MODULE_LICENSE("GPL");
| gpl-2.0 |
holyangel/M9 | drivers/platform/msm/msm_bus/msm_bus_board_8974.c | 999 | 50433 | /* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/device.h>
#include <linux/module.h>
#include <linux/msm-bus.h>
#include <linux/msm-bus-board.h>
#include "msm_bus_core.h"
#include "msm_bus_noc.h"
#include "msm_bus_bimc.h"
#define NMASTERS 120
#define NSLAVES 150
#define NFAB_8974 7
enum msm_bus_8974_master_ports_type {
/* System NOC Masters */
MASTER_PORT_LPASS_AHB = 0,
MASTER_PORT_QDSS_BAM,
MASTER_PORT_SNOC_CFG,
MASTER_PORT_GW_BIMC_SNOC,
MASTER_PORT_GW_CNOC_SNOC,
MASTER_PORT_CRYPTO_CORE0,
MASTER_PORT_CRYPTO_CORE1,
MASTER_PORT_LPASS_PROC,
MASTER_PORT_MSS,
MASTER_PORT_MSS_NAV,
MASTER_PORT_OCMEM_DMA,
MASTER_PORT_GW_PNOC_SNOC,
MASTER_PORT_WCSS,
MASTER_PORT_QDSS_ETR,
MASTER_PORT_USB3,
/* MMSS NOC Masters */
MASTER_PORT_GW_CNOC_MNOC_MMSS_CFG = 0,
MASTER_PORT_GW_CNOC_MNOC_CFG,
MASTER_PORT_GFX3D_PORT0,
MASTER_PORT_GFX3D_PORT1,
MASTER_PORT_JPEG,
MASTER_PORT_MDP,
/* Venus video core */
MASTER_PORT_VIDEO_PORT0,
MASTER_PORT_VIDEO_PORT1,
MASTER_PORT_VFE = 16,
/* BIMC Masters */
MASTER_PORT_KMPSS_M0 = 0,
MASTER_PORT_KMPSS_M1,
MASTER_PORT_MSS_PROC,
MASTER_PORT_GW_MNOC_BIMC_0,
MASTER_PORT_GW_MNOC_BIMC_1,
MASTER_PORT_GW_SNOC_BIMC_0,
MASTER_PORT_GW_SNOC_BIMC_1,
/* OCMEM NOC Masters */
MASTER_PORT_CNOC_ONOC_CFG = 0,
MASTER_PORT_JPEG_OCMEM,
MASTER_PORT_MDP_OCMEM,
MASTER_PORT_VIDEO_P0_OCMEM,
MASTER_PORT_VIDEO_P1_OCMEM,
MASTER_PORT_VFE_OCMEM,
/* Peripheral NOC Masters */
MASTER_PORT_SDCC_1 = 0,
MASTER_PORT_SDCC_3,
MASTER_PORT_SDCC_2,
MASTER_PORT_SDCC_4,
MASTER_PORT_TSIF,
MASTER_PORT_BAM_DMA,
MASTER_PORT_BLSP_2,
MASTER_PORT_USB_HSIC,
MASTER_PORT_BLSP_1,
MASTER_PORT_USB_HS,
MASTER_PORT_PNOC_CFG,
MASTER_PORT_GW_SNOC_PNOC,
/* Config NOC Masters */
MASTER_PORT_RPM_INST = 0,
MASTER_PORT_RPM_DATA,
MASTER_PORT_RPM_SYS,
MASTER_PORT_DEHR,
MASTER_PORT_QDSS_DAP,
MASTER_PORT_SPDM,
MASTER_PORT_TIC,
MASTER_PORT_GW_SNOC_CNOC,
};
enum msm_bus_8974_slave_ports_type {
/* System NOC Slaves */
SLAVE_PORT_KMPSS = 1,
SLAVE_PORT_LPASS,
SLAVE_PORT_USB3 = 4,
SLAVE_PORT_WCSS = 6,
SLAVE_PORT_GW_SNOC_BIMC_P0,
SLAVE_PORT_GW_SNOC_BIMC_P1,
SLAVE_PORT_GW_SNOC_CNOC,
SLAVE_PORT_OCIMEM,
SLAVE_PORT_SNOC_OCMEM,
SLAVE_PORT_GW_SNOC_PNOC,
SLAVE_PORT_SERVICE_SNOC,
SLAVE_PORT_QDSS_STM,
/* MMSS NOC Slaves */
SLAVE_PORT_CAMERA_CFG = 0,
SLAVE_PORT_DISPLAY_CFG,
SLAVE_PORT_OCMEM_CFG,
SLAVE_PORT_CPR_CFG,
SLAVE_PORT_CPR_XPU_CFG,
SLAVE_PORT_MISC_CFG = 6,
SLAVE_PORT_MISC_XPU_CFG,
SLAVE_PORT_VENUS_CFG,
SLAVE_PORT_GFX3D_CFG,
SLAVE_PORT_MMSS_CLK_CFG = 11,
SLAVE_PORT_MMSS_CLK_XPU_CFG,
SLAVE_PORT_MNOC_MPU_CFG,
SLAVE_PORT_ONOC_MPU_CFG,
SLAVE_PORT_GW_MMSS_BIMC_P0 = 16,
SLAVE_PORT_GW_MMSS_BIMC_P1,
SLAVE_PORT_SERVICE_MNOC,
/* BIMC Slaves */
SLAVE_PORT_EBI1_CH0 = 0,
SLAVE_PORT_EBI1_CH1,
SLAVE_PORT_KMPSS_L2,
SLAVE_PORT_GW_BIMC_SNOC,
/* OCMEM NOC Slaves */
SLAVE_PORT_OCMEM_P0 = 0,
SLAVE_PORT_OCMEM_P1,
SLAVE_PORT_SERVICE_ONOC,
/*Peripheral NOC Slaves */
SLAVE_PORT_SDCC_1 = 0,
SLAVE_PORT_SDCC_3,
SLAVE_PORT_SDCC_2,
SLAVE_PORT_SDCC_4,
SLAVE_PORT_TSIF,
SLAVE_PORT_BAM_DMA,
SLAVE_PORT_BLSP_2,
SLAVE_PORT_USB_HSIC,
SLAVE_PORT_BLSP_1,
SLAVE_PORT_USB_HS,
SLAVE_PORT_PDM,
SLAVE_PORT_PERIPH_APU_CFG,
SLAVE_PORT_PNOC_MPU_CFG,
SLAVE_PORT_PRNG,
SLAVE_PORT_GW_PNOC_SNOC,
SLAVE_PORT_SERVICE_PNOC,
/* Config NOC slaves */
SLAVE_PORT_CLK_CTL = 1,
SLAVE_PORT_CNOC_MSS,
SLAVE_PORT_SECURITY,
SLAVE_PORT_TCSR,
SLAVE_PORT_TLMM,
SLAVE_PORT_CRYPTO_0_CFG,
SLAVE_PORT_CRYPTO_1_CFG,
SLAVE_PORT_IMEM_CFG,
SLAVE_PORT_MESSAGE_RAM,
SLAVE_PORT_BIMC_CFG,
SLAVE_PORT_BOOT_ROM,
SLAVE_PORT_CNOC_MNOC_MMSS_CFG,
SLAVE_PORT_PMIC_ARB,
SLAVE_PORT_SPDM_WRAPPER,
SLAVE_PORT_DEHR_CFG,
SLAVE_PORT_MPM,
SLAVE_PORT_QDSS_CFG,
SLAVE_PORT_RBCPR_CFG,
SLAVE_PORT_RBCPR_QDSS_APU_CFG,
SLAVE_PORT_CNOC_MNOC_CFG,
SLAVE_PORT_SNOC_MPU_CFG,
SLAVE_PORT_CNOC_ONOC_CFG,
SLAVE_PORT_PNOC_CFG,
SLAVE_PORT_SNOC_CFG,
SLAVE_PORT_EBI1_DLL_CFG,
SLAVE_PORT_PHY_APU_CFG,
SLAVE_PORT_EBI1_PHY_CFG,
SLAVE_PORT_RPM,
SLAVE_PORT_GW_CNOC_SNOC,
SLAVE_PORT_SERVICE_CNOC,
};
/* Hardware IDs for RPM */
enum msm_bus_8974_mas_hw_id {
MAS_APPSS_PROC = 0,
MAS_AMSS_PROC,
MAS_MNOC_BIMC,
MAS_SNOC_BIMC,
MAS_CNOC_MNOC_MMSS_CFG,
MAS_CNOC_MNOC_CFG,
MAS_GFX3D,
MAS_JPEG,
MAS_MDP,
MAS_VIDEO_P0,
MAS_VIDEO_P1,
MAS_VFE,
MAS_CNOC_ONOC_CFG,
MAS_JPEG_OCMEM,
MAS_MDP_OCMEM,
MAS_VIDEO_P0_OCMEM,
MAS_VIDEO_P1_OCMEM,
MAS_VFE_OCMEM,
MAS_LPASS_AHB,
MAS_QDSS_BAM,
MAS_SNOC_CFG,
MAS_BIMC_SNOC,
MAS_CNOC_SNOC,
MAS_CRYPTO_CORE0,
MAS_CRYPTO_CORE1,
MAS_LPASS_PROC,
MAS_MSS,
MAS_MSS_NAV,
MAS_OCMEM_DMA,
MAS_PNOC_SNOC,
MAS_WCSS,
MAS_QDSS_ETR,
MAS_USB3,
MAS_SDCC_1,
MAS_SDCC_3,
MAS_SDCC_2,
MAS_SDCC_4,
MAS_TSIF,
MAS_BAM_DMA,
MAS_BLSP_2,
MAS_USB_HSIC,
MAS_BLSP_1,
MAS_USB_HS,
MAS_PNOC_CFG,
MAS_SNOC_PNOC,
MAS_RPM_INST,
MAS_RPM_DATA,
MAS_RPM_SYS,
MAS_DEHR,
MAS_QDSS_DAP,
MAS_SPDM,
MAS_TIC,
MAS_SNOC_CNOC,
MAS_OVNOC_SNOC,
MAS_OVNOC_ONOC,
MAS_V_OCMEM_GFX3D,
MAS_ONOC_OVNOC,
MAS_SNOC_OVNOC,
};
enum msm_bus_8974_slv_hw_id {
SLV_EBI = 0,
SLV_APSS_L2,
SLV_BIMC_SNOC,
SLV_CAMERA_CFG,
SLV_DISPLAY_CFG,
SLV_OCMEM_CFG,
SLV_CPR_CFG,
SLV_CPR_XPU_CFG,
SLV_MISC_CFG,
SLV_MISC_XPU_CFG,
SLV_VENUS_CFG,
SLV_GFX3D_CFG,
SLV_MMSS_CLK_CFG,
SLV_MMSS_CLK_XPU_CFG,
SLV_MNOC_MPU_CFG,
SLV_ONOC_MPU_CFG,
SLV_MMSS_BIMC,
SLV_SERVICE_MNOC,
SLV_OCMEM,
SLV_SERVICE_ONOC,
SLV_APPSS,
SLV_LPASS,
SLV_USB3,
SLV_WCSS,
SLV_SNOC_BIMC,
SLV_SNOC_CNOC,
SLV_OCIMEM,
SLV_SNOC_OCMEM,
SLV_SNOC_PNOC,
SLV_SERVICE_SNOC,
SLV_QDSS_STM,
SLV_SDCC_1,
SLV_SDCC_3,
SLV_SDCC_2,
SLV_SDCC_4,
SLV_TSIF,
SLV_BAM_DMA,
SLV_BLSP_2,
SLV_USB_HSIC,
SLV_BLSP_1,
SLV_USB_HS,
SLV_PDM,
SLV_PERIPH_APU_CFG,
SLV_MPU_CFG,
SLV_PRNG,
SLV_PNOC_SNOC,
SLV_SERVICE_PNOC,
SLV_CLK_CTL,
SLV_CNOC_MSS,
SLV_SECURITY,
SLV_TCSR,
SLV_TLMM,
SLV_CRYPTO_0_CFG,
SLV_CRYPTO_1_CFG,
SLV_IMEM_CFG,
SLV_MESSAGE_RAM,
SLV_BIMC_CFG,
SLV_BOOT_ROM,
SLV_CNOC_MNOC_MMSS_CFG,
SLV_PMIC_ARB,
SLV_SPDM_WRAPPER,
SLV_DEHR_CFG,
SLV_MPM,
SLV_QDSS_CFG,
SLV_RBCPR_CFG,
SLV_RBCPR_QDSS_APU_CFG,
SLV_CNOC_MNOC_CFG,
SLV_SNOC_MPU_CFG,
SLV_CNOC_ONOC_CFG,
SLV_PNOC_CFG,
SLV_SNOC_CFG,
SLV_EBI1_DLL_CFG,
SLV_PHY_APU_CFG,
SLV_EBI1_PHY_CFG,
SLV_RPM,
SLV_CNOC_SNOC,
SLV_SERVICE_CNOC,
SLV_SNOC_OVNOC,
SLV_ONOC_OVNOC,
SLV_OVNOC_ONOC,
SLV_OVNOC_SNOC,
};
static uint32_t master_iids[NMASTERS];
static uint32_t slave_iids[NSLAVES];
/* System NOC nodes */
static int mport_lpass_ahb[] = {MASTER_PORT_LPASS_AHB,};
static int mport_qdss_bam[] = {MASTER_PORT_QDSS_BAM,};
static int mport_snoc_cfg[] = {MASTER_PORT_SNOC_CFG,};
static int mport_gw_bimc_snoc[] = {MASTER_PORT_GW_BIMC_SNOC,};
static int mport_gw_cnoc_snoc[] = {MASTER_PORT_GW_CNOC_SNOC,};
static int mport_crypto_core0[] = {MASTER_PORT_CRYPTO_CORE0,};
static int mport_crypto_core1[] = {MASTER_PORT_CRYPTO_CORE1};
static int mport_lpass_proc[] = {MASTER_PORT_LPASS_PROC};
static int mport_mss[] = {MASTER_PORT_MSS};
static int mport_mss_nav[] = {MASTER_PORT_MSS_NAV};
static int mport_ocmem_dma[] = {MASTER_PORT_OCMEM_DMA};
static int mport_gw_pnoc_snoc[] = {MASTER_PORT_GW_PNOC_SNOC};
static int mport_wcss[] = {MASTER_PORT_WCSS};
static int mport_qdss_etr[] = {MASTER_PORT_QDSS_ETR};
static int mport_usb3[] = {MASTER_PORT_USB3};
static int sport_kmpss[] = {SLAVE_PORT_KMPSS};
static int sport_lpass[] = {SLAVE_PORT_LPASS};
static int sport_usb3[] = {SLAVE_PORT_USB3};
static int sport_wcss[] = {SLAVE_PORT_WCSS};
static int sport_gw_snoc_bimc[] = {
SLAVE_PORT_GW_SNOC_BIMC_P0,
SLAVE_PORT_GW_SNOC_BIMC_P1,
};
static int sport_gw_snoc_cnoc[] = {SLAVE_PORT_GW_SNOC_CNOC};
static int sport_ocimem[] = {SLAVE_PORT_OCIMEM};
static int sport_snoc_ocmem[] = {SLAVE_PORT_SNOC_OCMEM};
static int sport_gw_snoc_pnoc[] = {SLAVE_PORT_GW_SNOC_PNOC};
static int sport_service_snoc[] = {SLAVE_PORT_SERVICE_SNOC};
static int sport_qdss_stm[] = {SLAVE_PORT_QDSS_STM};
/* MMSS NOC nodes */
static int mport_gw_cnoc_mnoc_cfg[] = {
MASTER_PORT_GW_CNOC_MNOC_MMSS_CFG,
MASTER_PORT_GW_CNOC_MNOC_CFG,
};
static int mport_gfx3d[] = {
MASTER_PORT_GFX3D_PORT0,
MASTER_PORT_GFX3D_PORT1,
};
static int mport_jpeg[] = {MASTER_PORT_JPEG};
static int mport_mdp[] = {MASTER_PORT_MDP};
static int mport_video_port0[] = {MASTER_PORT_VIDEO_PORT0};
static int mport_video_port1[] = {MASTER_PORT_VIDEO_PORT1};
static int mport_vfe[] = {MASTER_PORT_VFE};
static int sport_camera_cfg[] = {SLAVE_PORT_CAMERA_CFG};
static int sport_display_cfg[] = {SLAVE_PORT_DISPLAY_CFG};
static int sport_ocmem_cfg[] = {SLAVE_PORT_OCMEM_CFG};
static int sport_cpr_cfg[] = {SLAVE_PORT_CPR_CFG};
static int sport_cpr_xpu_cfg[] = {SLAVE_PORT_CPR_XPU_CFG,};
static int sport_misc_cfg[] = {SLAVE_PORT_MISC_CFG};
static int sport_misc_xpu_cfg[] = {SLAVE_PORT_MISC_XPU_CFG};
static int sport_venus_cfg[] = {SLAVE_PORT_VENUS_CFG};
static int sport_gfx3d_cfg[] = {SLAVE_PORT_GFX3D_CFG};
static int sport_mmss_clk_cfg[] = {SLAVE_PORT_MMSS_CLK_CFG};
static int sport_mmss_clk_xpu_cfg[] = {
SLAVE_PORT_MMSS_CLK_XPU_CFG
};
static int sport_mnoc_mpu_cfg[] = {SLAVE_PORT_MNOC_MPU_CFG};
static int sport_onoc_mpu_cfg[] = {SLAVE_PORT_ONOC_MPU_CFG};
static int sport_gw_mmss_bimc[] = {
SLAVE_PORT_GW_MMSS_BIMC_P0,
SLAVE_PORT_GW_MMSS_BIMC_P1,
};
static int sport_service_mnoc[] = {SLAVE_PORT_SERVICE_MNOC};
/* BIMC Nodes */
static int mport_kmpss_m0[] = {MASTER_PORT_KMPSS_M0,};
static int mport_kmpss_m1[] = {MASTER_PORT_KMPSS_M1};
static int mport_mss_proc[] = {MASTER_PORT_MSS_PROC};
static int mport_gw_mnoc_bimc[] = {
MASTER_PORT_GW_MNOC_BIMC_0,
MASTER_PORT_GW_MNOC_BIMC_1,
};
static int mport_gw_snoc_bimc[] = {
MASTER_PORT_GW_SNOC_BIMC_0,
MASTER_PORT_GW_SNOC_BIMC_1,
};
static int sport_ebi1[] = {
SLAVE_PORT_EBI1_CH0,
SLAVE_PORT_EBI1_CH1,
};
static int sport_kmpss_l2[] = {SLAVE_PORT_KMPSS_L2,};
static int sport_gw_bimc_snoc[] = {SLAVE_PORT_GW_BIMC_SNOC,};
/* OCMEM NOC Nodes */
static int mport_cnoc_onoc_cfg[] = {
MASTER_PORT_CNOC_ONOC_CFG,
};
static int mport_jpeg_ocmem[] = {MASTER_PORT_JPEG_OCMEM,};
static int mport_mdp_ocmem[] = {MASTER_PORT_MDP_OCMEM,};
static int mport_video_p0_ocmem[] = {
MASTER_PORT_VIDEO_P0_OCMEM,
};
static int mport_video_p1_ocmem[] = {
MASTER_PORT_VIDEO_P1_OCMEM,
};
static int mport_vfe_ocmem[] = {MASTER_PORT_VFE_OCMEM,};
static int sport_ocmem[] = {
SLAVE_PORT_OCMEM_P0,
SLAVE_PORT_OCMEM_P1,
};
static int sport_service_onoc[] = {SLAVE_PORT_SERVICE_ONOC,};
/* Peripheral NOC Nodes */
static int mport_sdcc_1[] = {MASTER_PORT_SDCC_1,};
static int mport_sdcc_3[] = {MASTER_PORT_SDCC_3,};
static int mport_sdcc_2[] = {MASTER_PORT_SDCC_2,};
static int mport_sdcc_4[] = {MASTER_PORT_SDCC_4,};
static int mport_tsif[] = {MASTER_PORT_TSIF,};
static int mport_bam_dma[] = {MASTER_PORT_BAM_DMA,};
static int mport_blsp_2[] = {MASTER_PORT_BLSP_2,};
static int mport_usb_hsic[] = {MASTER_PORT_USB_HSIC,};
static int mport_blsp_1[] = {MASTER_PORT_BLSP_1,};
static int mport_usb_hs[] = {MASTER_PORT_USB_HS,};
static int mport_pnoc_cfg[] = {MASTER_PORT_PNOC_CFG,};
static int mport_gw_snoc_pnoc[] = {MASTER_PORT_GW_SNOC_PNOC,};
static int sport_sdcc_1[] = {SLAVE_PORT_SDCC_1,};
static int sport_sdcc_3[] = {SLAVE_PORT_SDCC_3,};
static int sport_sdcc_2[] = {SLAVE_PORT_SDCC_2,};
static int sport_sdcc_4[] = {SLAVE_PORT_SDCC_4,};
static int sport_tsif[] = {SLAVE_PORT_TSIF,};
static int sport_bam_dma[] = {SLAVE_PORT_BAM_DMA,};
static int sport_blsp_2[] = {SLAVE_PORT_BLSP_2,};
static int sport_usb_hsic[] = {SLAVE_PORT_USB_HSIC,};
static int sport_blsp_1[] = {SLAVE_PORT_BLSP_1,};
static int sport_usb_hs[] = {SLAVE_PORT_USB_HS,};
static int sport_pdm[] = {SLAVE_PORT_PDM,};
static int sport_periph_apu_cfg[] = {
SLAVE_PORT_PERIPH_APU_CFG,
};
static int sport_pnoc_mpu_cfg[] = {SLAVE_PORT_PNOC_MPU_CFG,};
static int sport_prng[] = {SLAVE_PORT_PRNG,};
static int sport_gw_pnoc_snoc[] = {SLAVE_PORT_GW_PNOC_SNOC,};
static int sport_service_pnoc[] = {SLAVE_PORT_SERVICE_PNOC,};
/* Config NOC Nodes */
static int mport_rpm_inst[] = {MASTER_PORT_RPM_INST,};
static int mport_rpm_data[] = {MASTER_PORT_RPM_DATA,};
static int mport_rpm_sys[] = {MASTER_PORT_RPM_SYS,};
static int mport_dehr[] = {MASTER_PORT_DEHR,};
static int mport_qdss_dap[] = {MASTER_PORT_QDSS_DAP,};
static int mport_spdm[] = {MASTER_PORT_SPDM,};
static int mport_tic[] = {MASTER_PORT_TIC,};
static int mport_gw_snoc_cnoc[] = {MASTER_PORT_GW_SNOC_CNOC,};
static int sport_clk_ctl[] = {SLAVE_PORT_CLK_CTL,};
static int sport_cnoc_mss[] = {SLAVE_PORT_CNOC_MSS,};
static int sport_security[] = {SLAVE_PORT_SECURITY,};
static int sport_tcsr[] = {SLAVE_PORT_TCSR,};
static int sport_tlmm[] = {SLAVE_PORT_TLMM,};
static int sport_crypto_0_cfg[] = {SLAVE_PORT_CRYPTO_0_CFG,};
static int sport_crypto_1_cfg[] = {SLAVE_PORT_CRYPTO_1_CFG,};
static int sport_imem_cfg[] = {SLAVE_PORT_IMEM_CFG,};
static int sport_message_ram[] = {SLAVE_PORT_MESSAGE_RAM,};
static int sport_bimc_cfg[] = {SLAVE_PORT_BIMC_CFG,};
static int sport_boot_rom[] = {SLAVE_PORT_BOOT_ROM,};
static int sport_cnoc_mnoc_mmss_cfg[] = {SLAVE_PORT_CNOC_MNOC_MMSS_CFG,};
static int sport_cnoc_mnoc_cfg[] = {SLAVE_PORT_CNOC_MNOC_CFG,};
static int sport_pmic_arb[] = {SLAVE_PORT_PMIC_ARB,};
static int sport_spdm_wrapper[] = {SLAVE_PORT_SPDM_WRAPPER,};
static int sport_dehr_cfg[] = {SLAVE_PORT_DEHR_CFG,};
static int sport_mpm[] = {SLAVE_PORT_MPM,};
static int sport_qdss_cfg[] = {SLAVE_PORT_QDSS_CFG,};
static int sport_rbcpr_cfg[] = {SLAVE_PORT_RBCPR_CFG,};
static int sport_rbcpr_qdss_apu_cfg[] = {SLAVE_PORT_RBCPR_QDSS_APU_CFG,};
static int sport_snoc_mpu_cfg[] = {SLAVE_PORT_SNOC_MPU_CFG,};
static int sport_cnoc_onoc_cfg[] = {SLAVE_PORT_CNOC_ONOC_CFG,};
static int sport_pnoc_cfg[] = {SLAVE_PORT_PNOC_CFG,};
static int sport_snoc_cfg[] = {SLAVE_PORT_SNOC_CFG,};
static int sport_ebi1_dll_cfg[] = {SLAVE_PORT_EBI1_DLL_CFG,};
static int sport_phy_apu_cfg[] = {SLAVE_PORT_PHY_APU_CFG,};
static int sport_ebi1_phy_cfg[] = {SLAVE_PORT_EBI1_PHY_CFG,};
static int sport_rpm[] = {SLAVE_PORT_RPM,};
static int sport_gw_cnoc_snoc[] = {SLAVE_PORT_GW_CNOC_SNOC,};
static int sport_service_cnoc[] = {SLAVE_PORT_SERVICE_CNOC,};
static int tier2[] = {MSM_BUS_BW_TIER2,};
/*
* QOS Ports defined only when qos ports are different than
* master ports
**/
static int qports_gemini[] = {0};
static int qports_mdp[] = {1};
static int qports_venus_p0[] = {4};
static int qports_venus_p1[] = {5};
static int qports_vfe[] = {6};
static int qports_gemini_ocmem[] = {0};
static int qports_venus_p0_ocmem[] = {2};
static int qports_venus_p1_ocmem[] = {3};
static int qports_vfe_ocmem[] = {4};
static int qports_crypto_c0[] = {2};
static int qports_crypto_c1[] = {3};
static int qports_lpass_proc[] = {4};
static int qports_ocmem_dma[] = {7};
static int qports_gw_snoc_bimc[] = {5, 6};
static int qports_kmpss[] = {0, 1};
static int qports_lpass_ahb[] = {0};
static int qports_qdss_bam[] = {1};
static int qports_gw_pnoc_snoc[] = {8};
static int qports_qdss_etr[] = {10};
static int qports_usb3[] = {11};
static int qports_oxili[] = {2, 3};
static int qports_gw_mnoc_bimc[] = {3, 4};
static struct msm_bus_node_info sys_noc_info[] = {
{
.id = MSM_BUS_MASTER_LPASS_AHB,
.masterp = mport_lpass_ahb,
.num_mports = ARRAY_SIZE(mport_lpass_ahb),
.tier = tier2,
.num_tiers = ARRAY_SIZE(tier2),
.qport = qports_lpass_ahb,
.mas_hw_id = MAS_LPASS_AHB,
.mode = NOC_QOS_MODE_FIXED,
.prio_rd = 2,
.prio_wr = 2,
},
{
.id = MSM_BUS_MASTER_QDSS_BAM,
.masterp = mport_qdss_bam,
.num_mports = ARRAY_SIZE(mport_qdss_bam),
.tier = tier2,
.num_tiers = ARRAY_SIZE(tier2),
.mode = NOC_QOS_MODE_FIXED,
.qport = qports_qdss_bam,
.mas_hw_id = MAS_QDSS_BAM,
},
{
.id = MSM_BUS_MASTER_SNOC_CFG,
.masterp = mport_snoc_cfg,
.num_mports = ARRAY_SIZE(mport_snoc_cfg),
.tier = tier2,
.num_tiers = ARRAY_SIZE(tier2),
.mas_hw_id = MAS_SNOC_CFG,
},
{
.id = MSM_BUS_FAB_BIMC,
.gateway = 1,
.slavep = sport_gw_snoc_bimc,
.num_sports = ARRAY_SIZE(sport_gw_snoc_bimc),
.masterp = mport_gw_bimc_snoc,
.num_mports = ARRAY_SIZE(mport_gw_bimc_snoc),
.buswidth = 8,
.mas_hw_id = MAS_BIMC_SNOC,
.slv_hw_id = SLV_SNOC_BIMC,
},
{
.id = MSM_BUS_FAB_CONFIG_NOC,
.gateway = 1,
.slavep = sport_gw_snoc_cnoc,
.num_sports = ARRAY_SIZE(sport_gw_snoc_cnoc),
.masterp = mport_gw_cnoc_snoc,
.num_mports = ARRAY_SIZE(mport_gw_cnoc_snoc),
.buswidth = 8,
.mas_hw_id = MAS_CNOC_SNOC,
.slv_hw_id = SLV_SNOC_CNOC,
},
{
.id = MSM_BUS_FAB_PERIPH_NOC,
.gateway = 1,
.slavep = sport_gw_snoc_pnoc,
.num_sports = ARRAY_SIZE(sport_gw_snoc_pnoc),
.masterp = mport_gw_pnoc_snoc,
.num_mports = ARRAY_SIZE(mport_gw_pnoc_snoc),
.buswidth = 8,
.qport = qports_gw_pnoc_snoc,
.mas_hw_id = MAS_PNOC_SNOC,
.slv_hw_id = SLV_SNOC_PNOC,
.mode = NOC_QOS_MODE_FIXED,
.prio_rd = 2,
.prio_wr = 2,
},
{
.id = MSM_BUS_FAB_OCMEM_VNOC,
.gateway = 1,
.buswidth = 8,
.mas_hw_id = MAS_OVNOC_SNOC,
.slv_hw_id = SLV_SNOC_OVNOC,
},
{
.id = MSM_BUS_MASTER_CRYPTO_CORE0,
.masterp = mport_crypto_core0,
.num_mports = ARRAY_SIZE(mport_crypto_core0),
.tier = tier2,
.num_tiers = ARRAY_SIZE(tier2),
.mode = NOC_QOS_MODE_FIXED,
.qport = qports_crypto_c0,
.mas_hw_id = MAS_CRYPTO_CORE0,
.hw_sel = MSM_BUS_NOC,
.prio_rd = 1,
.prio_wr = 1,
},
{
.id = MSM_BUS_MASTER_CRYPTO_CORE1,
.masterp = mport_crypto_core1,
.num_mports = ARRAY_SIZE(mport_crypto_core1),
.tier = tier2,
.num_tiers = ARRAY_SIZE(tier2),
.mode = NOC_QOS_MODE_FIXED,
.qport = qports_crypto_c1,
.mas_hw_id = MAS_CRYPTO_CORE1,
.hw_sel = MSM_BUS_NOC,
.prio_rd = 1,
.prio_wr = 1,
},
{
.id = MSM_BUS_MASTER_LPASS_PROC,
.masterp = mport_lpass_proc,
.num_mports = ARRAY_SIZE(mport_lpass_proc),
.tier = tier2,
.num_tiers = ARRAY_SIZE(tier2),
.qport = qports_lpass_proc,
.mas_hw_id = MAS_LPASS_PROC,
.mode = NOC_QOS_MODE_FIXED,
.prio_rd = 2,
.prio_wr = 2,
},
{
.id = MSM_BUS_MASTER_MSS,
.masterp = mport_mss,
.num_mports = ARRAY_SIZE(mport_mss),
.tier = tier2,
.num_tiers = ARRAY_SIZE(tier2),
.mas_hw_id = MAS_MSS,
},
{
.id = MSM_BUS_MASTER_MSS_NAV,
.masterp = mport_mss_nav,
.num_mports = ARRAY_SIZE(mport_mss_nav),
.tier = tier2,
.num_tiers = ARRAY_SIZE(tier2),
.mas_hw_id = MAS_MSS_NAV,
},
{
.id = MSM_BUS_MASTER_OCMEM_DMA,
.masterp = mport_ocmem_dma,
.num_mports = ARRAY_SIZE(mport_ocmem_dma),
.tier = tier2,
.num_tiers = ARRAY_SIZE(tier2),
.mode = NOC_QOS_MODE_FIXED,
.qport = qports_ocmem_dma,
.mas_hw_id = MAS_OCMEM_DMA,
},
{
.id = MSM_BUS_MASTER_WCSS,
.masterp = mport_wcss,
.num_mports = ARRAY_SIZE(mport_wcss),
.tier = tier2,
.num_tiers = ARRAY_SIZE(tier2),
.mas_hw_id = MAS_WCSS,
},
{
.id = MSM_BUS_MASTER_QDSS_ETR,
.masterp = mport_qdss_etr,
.num_mports = ARRAY_SIZE(mport_qdss_etr),
.tier = tier2,
.num_tiers = ARRAY_SIZE(tier2),
.qport = qports_qdss_etr,
.mode = NOC_QOS_MODE_FIXED,
.mas_hw_id = MAS_QDSS_ETR,
},
{
.id = MSM_BUS_MASTER_USB3,
.masterp = mport_usb3,
.num_mports = ARRAY_SIZE(mport_usb3),
.tier = tier2,
.num_tiers = ARRAY_SIZE(tier2),
.mode = NOC_QOS_MODE_FIXED,
.qport = qports_usb3,
.mas_hw_id = MAS_USB3,
.prio_rd = 2,
.prio_wr = 2,
.hw_sel = MSM_BUS_NOC,
.iface_clk_node = "msm_usb3",
},
{
.id = MSM_BUS_SLAVE_AMPSS,
.slavep = sport_kmpss,
.num_sports = ARRAY_SIZE(sport_kmpss),
.tier = tier2,
.num_tiers = ARRAY_SIZE(tier2),
.buswidth = 8,
.slv_hw_id = SLV_APPSS,
},
{
.id = MSM_BUS_SLAVE_LPASS,
.slavep = sport_lpass,
.num_sports = ARRAY_SIZE(sport_lpass),
.tier = tier2,
.num_tiers = ARRAY_SIZE(tier2),
.buswidth = 8,
.slv_hw_id = SLV_LPASS,
},
{
.id = MSM_BUS_SLAVE_USB3,
.slavep = sport_usb3,
.num_sports = ARRAY_SIZE(sport_usb3),
.tier = tier2,
.num_tiers = ARRAY_SIZE(tier2),
.buswidth = 8,
.slv_hw_id = SLV_USB3,
},
{
.id = MSM_BUS_SLAVE_WCSS,
.slavep = sport_wcss,
.num_sports = ARRAY_SIZE(sport_wcss),
.tier = tier2,
.num_tiers = ARRAY_SIZE(tier2),
.buswidth = 8,
.slv_hw_id = SLV_WCSS,
},
{
.id = MSM_BUS_SLAVE_OCIMEM,
.slavep = sport_ocimem,
.num_sports = ARRAY_SIZE(sport_ocimem),
.tier = tier2,
.num_tiers = ARRAY_SIZE(tier2),
.buswidth = 8,
.slv_hw_id = SLV_OCIMEM,
},
{
.id = MSM_BUS_SLAVE_SNOC_OCMEM,
.slavep = sport_snoc_ocmem,
.num_sports = ARRAY_SIZE(sport_snoc_ocmem),
.tier = tier2,
.num_tiers = ARRAY_SIZE(tier2),
.buswidth = 8,
.slv_hw_id = SLV_SNOC_OCMEM,
},
{
.id = MSM_BUS_SLAVE_SERVICE_SNOC,
.slavep = sport_service_snoc,
.num_sports = ARRAY_SIZE(sport_service_snoc),
.tier = tier2,
.num_tiers = ARRAY_SIZE(tier2),
.buswidth = 8,
.slv_hw_id = SLV_SERVICE_SNOC,
},
{
.id = MSM_BUS_SLAVE_QDSS_STM,
.slavep = sport_qdss_stm,
.num_sports = ARRAY_SIZE(sport_qdss_stm),
.tier = tier2,
.num_tiers = ARRAY_SIZE(tier2),
.buswidth = 8,
.slv_hw_id = SLV_QDSS_STM,
},
};
static struct msm_bus_node_info mmss_noc_info[] = {
{
.id = MSM_BUS_MASTER_GRAPHICS_3D,
.masterp = mport_gfx3d,
.num_mports = ARRAY_SIZE(mport_gfx3d),
.tier = tier2,
.num_tiers = ARRAY_SIZE(tier2),
.hw_sel = MSM_BUS_NOC,
.perm_mode = NOC_QOS_PERM_MODE_BYPASS,
.mode = NOC_QOS_MODE_BYPASS,
.ws = 10000,
.qport = qports_oxili,
.mas_hw_id = MAS_GFX3D,
},
{
.id = MSM_BUS_MASTER_JPEG,
.masterp = mport_jpeg,
.num_mports = ARRAY_SIZE(mport_jpeg),
.tier = tier2,
.num_tiers = ARRAY_SIZE(tier2),
.hw_sel = MSM_BUS_NOC,
.perm_mode = NOC_QOS_PERM_MODE_BYPASS,
.mode = NOC_QOS_MODE_BYPASS,
.qport = qports_gemini,
.ws = 10000,
.mas_hw_id = MAS_JPEG,
},
{
.id = MSM_BUS_MASTER_MDP_PORT0,
.masterp = mport_mdp,
.num_mports = ARRAY_SIZE(mport_mdp),
.tier = tier2,
.num_tiers = ARRAY_SIZE(tier2),
.hw_sel = MSM_BUS_NOC,
.perm_mode = NOC_QOS_PERM_MODE_BYPASS,
.mode = NOC_QOS_MODE_BYPASS,
.qport = qports_mdp,
.ws = 10000,
.mas_hw_id = MAS_MDP,
},
{
.id = MSM_BUS_MASTER_VIDEO_P0,
.masterp = mport_video_port0,
.num_mports = ARRAY_SIZE(mport_video_port0),
.tier = tier2,
.num_tiers = ARRAY_SIZE(tier2),
.hw_sel = MSM_BUS_NOC,
.perm_mode = NOC_QOS_PERM_MODE_BYPASS,
.mode = NOC_QOS_MODE_BYPASS,
.ws = 10000,
.qport = qports_venus_p0,
.mas_hw_id = MAS_VIDEO_P0,
},
{
.id = MSM_BUS_MASTER_VIDEO_P1,
.masterp = mport_video_port1,
.num_mports = ARRAY_SIZE(mport_video_port1),
.tier = tier2,
.num_tiers = ARRAY_SIZE(tier2),
.hw_sel = MSM_BUS_NOC,
.perm_mode = NOC_QOS_PERM_MODE_BYPASS,
.mode = NOC_QOS_MODE_BYPASS,
.ws = 10000,
.qport = qports_venus_p1,
.mas_hw_id = MAS_VIDEO_P1,
},
{
.id = MSM_BUS_MASTER_VFE,
.masterp = mport_vfe,
.num_mports = ARRAY_SIZE(mport_vfe),
.tier = tier2,
.num_tiers = ARRAY_SIZE(tier2),
.hw_sel = MSM_BUS_NOC,
.perm_mode = NOC_QOS_PERM_MODE_BYPASS,
.mode = NOC_QOS_MODE_BYPASS,
.ws = 10000,
.qport = qports_vfe,
.mas_hw_id = MAS_VFE,
},
{
.id = MSM_BUS_FAB_CONFIG_NOC,
.gateway = 1,
.masterp = mport_gw_cnoc_mnoc_cfg,
.num_mports = ARRAY_SIZE(mport_gw_cnoc_mnoc_cfg),
.buswidth = 16,
.hw_sel = MSM_BUS_RPM,
.mas_hw_id = MAS_CNOC_MNOC_MMSS_CFG,
},
{
.id = MSM_BUS_FAB_BIMC,
.gateway = 1,
.slavep = sport_gw_mmss_bimc,
.num_sports = ARRAY_SIZE(sport_gw_mmss_bimc),
.buswidth = 16,
.hw_sel = MSM_BUS_NOC,
.slv_hw_id = SLV_MMSS_BIMC,
},
{
.id = MSM_BUS_SLAVE_CAMERA_CFG,
.slavep = sport_camera_cfg,
.num_sports = ARRAY_SIZE(sport_camera_cfg),
.tier = tier2,
.num_tiers = ARRAY_SIZE(tier2),
.buswidth = 16,
.hw_sel = MSM_BUS_NOC,
.slv_hw_id = SLV_CAMERA_CFG,
},
{
.id = MSM_BUS_SLAVE_DISPLAY_CFG,
.slavep = sport_display_cfg,
.num_sports = ARRAY_SIZE(sport_display_cfg),
.tier = tier2,
.num_tiers = ARRAY_SIZE(tier2),
.buswidth = 16,
.hw_sel = MSM_BUS_NOC,
.slv_hw_id = SLV_DISPLAY_CFG,
},
{
.id = MSM_BUS_SLAVE_OCMEM_CFG,
.slavep = sport_ocmem_cfg,
.num_sports = ARRAY_SIZE(sport_ocmem_cfg),
.tier = tier2,
.num_tiers = ARRAY_SIZE(tier2),
.buswidth = 16,
.hw_sel = MSM_BUS_NOC,
.slv_hw_id = SLV_OCMEM_CFG,
},
{
.id = MSM_BUS_SLAVE_CPR_CFG,
.slavep = sport_cpr_cfg,
.num_sports = ARRAY_SIZE(sport_cpr_cfg),
.tier = tier2,
.num_tiers = ARRAY_SIZE(tier2),
.buswidth = 16,
.hw_sel = MSM_BUS_NOC,
.slv_hw_id = SLV_CPR_CFG,
},
{
.id = MSM_BUS_SLAVE_CPR_XPU_CFG,
.slavep = sport_cpr_xpu_cfg,
.num_sports = ARRAY_SIZE(sport_cpr_xpu_cfg),
.tier = tier2,
.num_tiers = ARRAY_SIZE(tier2),
.buswidth = 16,
.hw_sel = MSM_BUS_NOC,
.slv_hw_id = SLV_CPR_XPU_CFG,
},
{
.id = MSM_BUS_SLAVE_MISC_CFG,
.slavep = sport_misc_cfg,
.num_sports = ARRAY_SIZE(sport_misc_cfg),
.tier = tier2,
.num_tiers = ARRAY_SIZE(tier2),
.buswidth = 16,
.hw_sel = MSM_BUS_NOC,
.slv_hw_id = SLV_MISC_CFG,
},
{
.id = MSM_BUS_SLAVE_MISC_XPU_CFG,
.slavep = sport_misc_xpu_cfg,
.num_sports = ARRAY_SIZE(sport_misc_xpu_cfg),
.tier = tier2,
.num_tiers = ARRAY_SIZE(tier2),
.buswidth = 16,
.hw_sel = MSM_BUS_NOC,
.slv_hw_id = SLV_MISC_XPU_CFG,
},
{
.id = MSM_BUS_SLAVE_VENUS_CFG,
.slavep = sport_venus_cfg,
.num_sports = ARRAY_SIZE(sport_venus_cfg),
.tier = tier2,
.num_tiers = ARRAY_SIZE(tier2),
.buswidth = 16,
.hw_sel = MSM_BUS_NOC,
.slv_hw_id = SLV_VENUS_CFG,
},
{
.id = MSM_BUS_SLAVE_GRAPHICS_3D_CFG,
.slavep = sport_gfx3d_cfg,
.num_sports = ARRAY_SIZE(sport_gfx3d_cfg),
.tier = tier2,
.num_tiers = ARRAY_SIZE(tier2),
.buswidth = 16,
.hw_sel = MSM_BUS_NOC,
.slv_hw_id = SLV_GFX3D_CFG,
},
{
.id = MSM_BUS_SLAVE_MMSS_CLK_CFG,
.slavep = sport_mmss_clk_cfg,
.num_sports = ARRAY_SIZE(sport_mmss_clk_cfg),
.tier = tier2,
.num_tiers = ARRAY_SIZE(tier2),
.buswidth = 16,
.hw_sel = MSM_BUS_NOC,
.slv_hw_id = SLV_MMSS_CLK_CFG,
},
{
.id = MSM_BUS_SLAVE_MMSS_CLK_XPU_CFG,
.slavep = sport_mmss_clk_xpu_cfg,
.num_sports = ARRAY_SIZE(sport_mmss_clk_xpu_cfg),
.tier = tier2,
.num_tiers = ARRAY_SIZE(tier2),
.buswidth = 16,
.hw_sel = MSM_BUS_NOC,
.slv_hw_id = SLV_MMSS_CLK_XPU_CFG,
},
{
.id = MSM_BUS_SLAVE_MNOC_MPU_CFG,
.slavep = sport_mnoc_mpu_cfg,
.num_sports = ARRAY_SIZE(sport_mnoc_mpu_cfg),
.tier = tier2,
.num_tiers = ARRAY_SIZE(tier2),
.buswidth = 16,
.hw_sel = MSM_BUS_NOC,
.slv_hw_id = SLV_MNOC_MPU_CFG,
},
{
.id = MSM_BUS_SLAVE_ONOC_MPU_CFG,
.slavep = sport_onoc_mpu_cfg,
.num_sports = ARRAY_SIZE(sport_onoc_mpu_cfg),
.tier = tier2,
.num_tiers = ARRAY_SIZE(tier2),
.buswidth = 16,
.hw_sel = MSM_BUS_NOC,
.slv_hw_id = SLV_ONOC_MPU_CFG,
},
{
.id = MSM_BUS_SLAVE_SERVICE_MNOC,
.slavep = sport_service_mnoc,
.num_sports = ARRAY_SIZE(sport_service_mnoc),
.tier = tier2,
.num_tiers = ARRAY_SIZE(tier2),
.buswidth = 16,
.hw_sel = MSM_BUS_NOC,
.slv_hw_id = SLV_SERVICE_MNOC,
},
};
static struct msm_bus_node_info bimc_info[] = {
{
.id = MSM_BUS_MASTER_AMPSS_M0,
.masterp = mport_kmpss_m0,
.num_mports = ARRAY_SIZE(mport_kmpss_m0),
.tier = tier2,
.num_tiers = ARRAY_SIZE(tier2),
.hw_sel = MSM_BUS_BIMC,
.mode = NOC_QOS_MODE_FIXED,
.qport = qports_kmpss,
.ws = 10000,
.mas_hw_id = MAS_APPSS_PROC,
.prio_rd = 1,
.prio_wr = 1,
},
{
.id = MSM_BUS_MASTER_AMPSS_M1,
.masterp = mport_kmpss_m1,
.num_mports = ARRAY_SIZE(mport_kmpss_m1),
.tier = tier2,
.num_tiers = ARRAY_SIZE(tier2),
.hw_sel = MSM_BUS_BIMC,
.mode = NOC_QOS_MODE_FIXED,
.qport = qports_kmpss,
.ws = 10000,
.mas_hw_id = MAS_APPSS_PROC,
.prio_rd = 1,
.prio_wr = 1,
},
{
.id = MSM_BUS_MASTER_MSS_PROC,
.masterp = mport_mss_proc,
.num_mports = ARRAY_SIZE(mport_mss_proc),
.tier = tier2,
.num_tiers = ARRAY_SIZE(tier2),
.hw_sel = MSM_BUS_RPM,
.mas_hw_id = MAS_AMSS_PROC,
},
{
.id = MSM_BUS_FAB_MMSS_NOC,
.gateway = 1,
.masterp = mport_gw_mnoc_bimc,
.num_mports = ARRAY_SIZE(mport_gw_mnoc_bimc),
.qport = qports_gw_mnoc_bimc,
.buswidth = 8,
.ws = 10000,
.mas_hw_id = MAS_MNOC_BIMC,
.hw_sel = MSM_BUS_BIMC,
.mode = NOC_QOS_MODE_BYPASS,
},
{
.id = MSM_BUS_FAB_SYS_NOC,
.gateway = 1,
.slavep = sport_gw_bimc_snoc,
.num_sports = ARRAY_SIZE(sport_gw_bimc_snoc),
.masterp = mport_gw_snoc_bimc,
.num_mports = ARRAY_SIZE(mport_gw_snoc_bimc),
.qport = qports_gw_snoc_bimc,
.buswidth = 8,
.ws = 10000,
.mas_hw_id = MAS_SNOC_BIMC,
.slv_hw_id = SLV_BIMC_SNOC,
},
{
.id = MSM_BUS_SLAVE_EBI_CH0,
.slavep = sport_ebi1,
.num_sports = ARRAY_SIZE(sport_ebi1),
.tier = tier2,
.num_tiers = ARRAY_SIZE(tier2),
.buswidth = 8,
.slv_hw_id = SLV_EBI,
.mode = NOC_QOS_MODE_BYPASS,
},
{
.id = MSM_BUS_SLAVE_AMPSS_L2,
.slavep = sport_kmpss_l2,
.num_sports = ARRAY_SIZE(sport_kmpss_l2),
.tier = tier2,
.num_tiers = ARRAY_SIZE(tier2),
.buswidth = 8,
.slv_hw_id = SLV_APSS_L2,
},
};
static struct msm_bus_node_info ocmem_noc_info[] = {
{
.id = MSM_BUS_FAB_OCMEM_VNOC,
.gateway = 1,
.buswidth = 16,
.mas_hw_id = MAS_OVNOC_ONOC,
.slv_hw_id = SLV_ONOC_OVNOC,
},
{
.id = MSM_BUS_MASTER_JPEG_OCMEM,
.masterp = mport_jpeg_ocmem,
.num_mports = ARRAY_SIZE(mport_jpeg_ocmem),
.tier = tier2,
.num_tiers = ARRAY_SIZE(tier2),
.perm_mode = NOC_QOS_PERM_MODE_FIXED,
.mode = NOC_QOS_MODE_FIXED,
.qport = qports_gemini_ocmem,
.mas_hw_id = MAS_JPEG_OCMEM,
.hw_sel = MSM_BUS_NOC,
},
{
.id = MSM_BUS_MASTER_MDP_OCMEM,
.masterp = mport_mdp_ocmem,
.num_mports = ARRAY_SIZE(mport_mdp_ocmem),
.tier = tier2,
.num_tiers = ARRAY_SIZE(tier2),
.perm_mode = NOC_QOS_PERM_MODE_FIXED,
.mode = NOC_QOS_MODE_FIXED,
.mas_hw_id = MAS_MDP_OCMEM,
.hw_sel = MSM_BUS_NOC,
},
{
.id = MSM_BUS_MASTER_VIDEO_P0_OCMEM,
.masterp = mport_video_p0_ocmem,
.num_mports = ARRAY_SIZE(mport_video_p0_ocmem),
.tier = tier2,
.num_tiers = ARRAY_SIZE(tier2),
.perm_mode = NOC_QOS_PERM_MODE_FIXED,
.mode = NOC_QOS_MODE_FIXED,
.qport = qports_venus_p0_ocmem,
.mas_hw_id = MAS_VIDEO_P0_OCMEM,
.hw_sel = MSM_BUS_NOC,
},
{
.id = MSM_BUS_MASTER_VIDEO_P1_OCMEM,
.masterp = mport_video_p1_ocmem,
.num_mports = ARRAY_SIZE(mport_video_p1_ocmem),
.tier = tier2,
.num_tiers = ARRAY_SIZE(tier2),
.perm_mode = NOC_QOS_PERM_MODE_FIXED,
.mode = NOC_QOS_MODE_FIXED,
.qport = qports_venus_p1_ocmem,
.mas_hw_id = MAS_VIDEO_P1_OCMEM,
.hw_sel = MSM_BUS_NOC,
},
{
.id = MSM_BUS_MASTER_VFE_OCMEM,
.masterp = mport_vfe_ocmem,
.num_mports = ARRAY_SIZE(mport_vfe_ocmem),
.tier = tier2,
.num_tiers = ARRAY_SIZE(tier2),
.perm_mode = NOC_QOS_PERM_MODE_FIXED,
.mode = NOC_QOS_MODE_FIXED,
.qport = qports_vfe_ocmem,
.mas_hw_id = MAS_VFE_OCMEM,
.hw_sel = MSM_BUS_NOC,
.prio_rd = 1,
.prio_wr = 1,
},
{
.id = MSM_BUS_MASTER_CNOC_ONOC_CFG,
.masterp = mport_cnoc_onoc_cfg,
.num_mports = ARRAY_SIZE(mport_cnoc_onoc_cfg),
.tier = tier2,
.num_tiers = ARRAY_SIZE(tier2),
.buswidth = 16,
.mas_hw_id = MAS_CNOC_ONOC_CFG,
.hw_sel = MSM_BUS_NOC,
},
{
.id = MSM_BUS_SLAVE_SERVICE_ONOC,
.slavep = sport_service_onoc,
.num_sports = ARRAY_SIZE(sport_service_onoc),
.tier = tier2,
.num_tiers = ARRAY_SIZE(tier2),
.buswidth = 16,
.slv_hw_id = SLV_SERVICE_ONOC,
},
};
static struct msm_bus_node_info periph_noc_info[] = {
{
.id = MSM_BUS_MASTER_PNOC_CFG,
.masterp = mport_pnoc_cfg,
.num_mports = ARRAY_SIZE(mport_pnoc_cfg),
.tier = tier2,
.num_tiers = ARRAY_SIZE(tier2),
.buswidth = 8,
.mas_hw_id = MAS_PNOC_CFG,
},
{
.id = MSM_BUS_MASTER_SDCC_1,
.masterp = mport_sdcc_1,
.num_mports = ARRAY_SIZE(mport_sdcc_1),
.tier = tier2,
.num_tiers = ARRAY_SIZE(tier2),
.buswidth = 8,
.mas_hw_id = MAS_SDCC_1,
},
{
.id = MSM_BUS_MASTER_SDCC_3,
.masterp = mport_sdcc_3,
.num_mports = ARRAY_SIZE(mport_sdcc_3),
.tier = tier2,
.num_tiers = ARRAY_SIZE(tier2),
.buswidth = 8,
.mas_hw_id = MAS_SDCC_3,
},
{
.id = MSM_BUS_MASTER_SDCC_4,
.masterp = mport_sdcc_4,
.num_mports = ARRAY_SIZE(mport_sdcc_4),
.tier = tier2,
.num_tiers = ARRAY_SIZE(tier2),
.buswidth = 8,
.mas_hw_id = MAS_SDCC_4,
},
{
.id = MSM_BUS_MASTER_SDCC_2,
.masterp = mport_sdcc_2,
.num_mports = ARRAY_SIZE(mport_sdcc_2),
.tier = tier2,
.num_tiers = ARRAY_SIZE(tier2),
.buswidth = 8,
.mas_hw_id = MAS_SDCC_2,
},
{
.id = MSM_BUS_MASTER_TSIF,
.masterp = mport_tsif,
.num_mports = ARRAY_SIZE(mport_tsif),
.tier = tier2,
.num_tiers = ARRAY_SIZE(tier2),
.buswidth = 8,
.mas_hw_id = MAS_TSIF,
},
{
.id = MSM_BUS_MASTER_BAM_DMA,
.masterp = mport_bam_dma,
.num_mports = ARRAY_SIZE(mport_bam_dma),
.tier = tier2,
.num_tiers = ARRAY_SIZE(tier2),
.buswidth = 8,
.mas_hw_id = MAS_BAM_DMA,
},
{
.id = MSM_BUS_MASTER_BLSP_2,
.masterp = mport_blsp_2,
.num_mports = ARRAY_SIZE(mport_blsp_2),
.tier = tier2,
.num_tiers = ARRAY_SIZE(tier2),
.buswidth = 8,
.mas_hw_id = MAS_BLSP_2,
},
{
.id = MSM_BUS_MASTER_USB_HSIC,
.masterp = mport_usb_hsic,
.num_mports = ARRAY_SIZE(mport_usb_hsic),
.tier = tier2,
.num_tiers = ARRAY_SIZE(tier2),
.buswidth = 8,
.mas_hw_id = MAS_USB_HSIC,
},
{
.id = MSM_BUS_MASTER_BLSP_1,
.masterp = mport_blsp_1,
.num_mports = ARRAY_SIZE(mport_blsp_1),
.tier = tier2,
.num_tiers = ARRAY_SIZE(tier2),
.buswidth = 8,
.mas_hw_id = MAS_BLSP_1,
},
{
.id = MSM_BUS_MASTER_USB_HS,
.masterp = mport_usb_hs,
.num_mports = ARRAY_SIZE(mport_usb_hs),
.tier = tier2,
.num_tiers = ARRAY_SIZE(tier2),
.buswidth = 8,
.mas_hw_id = MAS_USB_HS,
},
{
.id = MSM_BUS_FAB_SYS_NOC,
.gateway = 1,
.slavep = sport_gw_pnoc_snoc,
.num_sports = ARRAY_SIZE(sport_gw_pnoc_snoc),
.masterp = mport_gw_snoc_pnoc,
.num_mports = ARRAY_SIZE(mport_gw_snoc_pnoc),
.tier = tier2,
.num_tiers = ARRAY_SIZE(tier2),
.buswidth = 8,
.slv_hw_id = SLV_PNOC_SNOC,
.mas_hw_id = MAS_SNOC_PNOC,
},
{
.id = MSM_BUS_SLAVE_SDCC_1,
.slavep = sport_sdcc_1,
.num_sports = ARRAY_SIZE(sport_sdcc_1),
.tier = tier2,
.num_tiers = ARRAY_SIZE(tier2),
.buswidth = 8,
.slv_hw_id = SLV_SDCC_1,
},
{
.id = MSM_BUS_SLAVE_SDCC_3,
.slavep = sport_sdcc_3,
.num_sports = ARRAY_SIZE(sport_sdcc_3),
.tier = tier2,
.num_tiers = ARRAY_SIZE(tier2),
.buswidth = 8,
.slv_hw_id = SLV_SDCC_3,
},
{
.id = MSM_BUS_SLAVE_SDCC_2,
.slavep = sport_sdcc_2,
.num_sports = ARRAY_SIZE(sport_sdcc_2),
.tier = tier2,
.num_tiers = ARRAY_SIZE(tier2),
.buswidth = 8,
.slv_hw_id = SLV_SDCC_2,
},
{
.id = MSM_BUS_SLAVE_SDCC_4,
.slavep = sport_sdcc_4,
.num_sports = ARRAY_SIZE(sport_sdcc_4),
.tier = tier2,
.num_tiers = ARRAY_SIZE(tier2),
.buswidth = 8,
.slv_hw_id = SLV_SDCC_4,
},
{
.id = MSM_BUS_SLAVE_TSIF,
.slavep = sport_tsif,
.num_sports = ARRAY_SIZE(sport_tsif),
.tier = tier2,
.num_tiers = ARRAY_SIZE(tier2),
.buswidth = 8,
.slv_hw_id = SLV_TSIF,
},
{
.id = MSM_BUS_SLAVE_BAM_DMA,
.slavep = sport_bam_dma,
.num_sports = ARRAY_SIZE(sport_bam_dma),
.tier = tier2,
.num_tiers = ARRAY_SIZE(tier2),
.buswidth = 8,
.slv_hw_id = SLV_BAM_DMA,
},
{
.id = MSM_BUS_SLAVE_BLSP_2,
.slavep = sport_blsp_2,
.num_sports = ARRAY_SIZE(sport_blsp_2),
.tier = tier2,
.num_tiers = ARRAY_SIZE(tier2),
.buswidth = 8,
.slv_hw_id = SLV_BLSP_2,
},
{
.id = MSM_BUS_SLAVE_USB_HSIC,
.slavep = sport_usb_hsic,
.num_sports = ARRAY_SIZE(sport_usb_hsic),
.tier = tier2,
.num_tiers = ARRAY_SIZE(tier2),
.buswidth = 8,
.slv_hw_id = SLV_USB_HSIC,
},
{
.id = MSM_BUS_SLAVE_BLSP_1,
.slavep = sport_blsp_1,
.num_sports = ARRAY_SIZE(sport_blsp_1),
.tier = tier2,
.num_tiers = ARRAY_SIZE(tier2),
.buswidth = 8,
.slv_hw_id = SLV_BLSP_1,
},
{
.id = MSM_BUS_SLAVE_USB_HS,
.slavep = sport_usb_hs,
.num_sports = ARRAY_SIZE(sport_usb_hs),
.tier = tier2,
.num_tiers = ARRAY_SIZE(tier2),
.buswidth = 8,
.slv_hw_id = SLV_USB_HS,
},
{
.id = MSM_BUS_SLAVE_PDM,
.slavep = sport_pdm,
.num_sports = ARRAY_SIZE(sport_pdm),
.tier = tier2,
.num_tiers = ARRAY_SIZE(tier2),
.buswidth = 8,
.slv_hw_id = SLV_PDM,
},
{
.id = MSM_BUS_SLAVE_PERIPH_APU_CFG,
.slavep = sport_periph_apu_cfg,
.num_sports = ARRAY_SIZE(sport_periph_apu_cfg),
.tier = tier2,
.num_tiers = ARRAY_SIZE(tier2),
.buswidth = 8,
.slv_hw_id = SLV_PERIPH_APU_CFG,
},
{
.id = MSM_BUS_SLAVE_PNOC_MPU_CFG,
.slavep = sport_pnoc_mpu_cfg,
.num_sports = ARRAY_SIZE(sport_pnoc_mpu_cfg),
.tier = tier2,
.num_tiers = ARRAY_SIZE(tier2),
.buswidth = 8,
.slv_hw_id = SLV_MPU_CFG,
},
{
.id = MSM_BUS_SLAVE_PRNG,
.slavep = sport_prng,
.num_sports = ARRAY_SIZE(sport_prng),
.tier = tier2,
.num_tiers = ARRAY_SIZE(tier2),
.buswidth = 8,
.slv_hw_id = SLV_PRNG,
},
{
.id = MSM_BUS_SLAVE_SERVICE_PNOC,
.slavep = sport_service_pnoc,
.num_sports = ARRAY_SIZE(sport_service_pnoc),
.tier = tier2,
.num_tiers = ARRAY_SIZE(tier2),
.buswidth = 8,
.slv_hw_id = SLV_SERVICE_PNOC,
},
};
static struct msm_bus_node_info config_noc_info[] = {
{
.id = MSM_BUS_MASTER_RPM_INST,
.masterp = mport_rpm_inst,
.num_mports = ARRAY_SIZE(mport_rpm_inst),
.tier = tier2,
.num_tiers = ARRAY_SIZE(tier2),
.buswidth = 8,
.mas_hw_id = MAS_RPM_INST,
},
{
.id = MSM_BUS_MASTER_RPM_DATA,
.masterp = mport_rpm_data,
.num_mports = ARRAY_SIZE(mport_rpm_data),
.tier = tier2,
.num_tiers = ARRAY_SIZE(tier2),
.buswidth = 8,
.mas_hw_id = MAS_RPM_DATA,
},
{
.id = MSM_BUS_MASTER_RPM_SYS,
.masterp = mport_rpm_sys,
.num_mports = ARRAY_SIZE(mport_rpm_sys),
.tier = tier2,
.num_tiers = ARRAY_SIZE(tier2),
.buswidth = 8,
.mas_hw_id = MAS_RPM_SYS,
},
{
.id = MSM_BUS_MASTER_DEHR,
.masterp = mport_dehr,
.num_mports = ARRAY_SIZE(mport_dehr),
.tier = tier2,
.num_tiers = ARRAY_SIZE(tier2),
.buswidth = 8,
.mas_hw_id = MAS_DEHR,
},
{
.id = MSM_BUS_MASTER_QDSS_DAP,
.masterp = mport_qdss_dap,
.num_mports = ARRAY_SIZE(mport_qdss_dap),
.tier = tier2,
.num_tiers = ARRAY_SIZE(tier2),
.buswidth = 8,
.mas_hw_id = MAS_QDSS_DAP,
},
{
.id = MSM_BUS_MASTER_SPDM,
.masterp = mport_spdm,
.num_mports = ARRAY_SIZE(mport_spdm),
.tier = tier2,
.num_tiers = ARRAY_SIZE(tier2),
.buswidth = 8,
.mas_hw_id = MAS_SPDM,
},
{
.id = MSM_BUS_MASTER_TIC,
.masterp = mport_tic,
.num_mports = ARRAY_SIZE(mport_tic),
.tier = tier2,
.num_tiers = ARRAY_SIZE(tier2),
.buswidth = 8,
.mas_hw_id = MAS_TIC,
},
{
.id = MSM_BUS_SLAVE_CLK_CTL,
.slavep = sport_clk_ctl,
.num_sports = ARRAY_SIZE(sport_clk_ctl),
.tier = tier2,
.num_tiers = ARRAY_SIZE(tier2),
.buswidth = 8,
.slv_hw_id = SLV_CLK_CTL,
},
{
.id = MSM_BUS_SLAVE_CNOC_MSS,
.slavep = sport_cnoc_mss,
.num_sports = ARRAY_SIZE(sport_cnoc_mss),
.tier = tier2,
.num_tiers = ARRAY_SIZE(tier2),
.buswidth = 8,
.slv_hw_id = SLV_CNOC_MSS,
},
{
.id = MSM_BUS_SLAVE_SECURITY,
.slavep = sport_security,
.num_sports = ARRAY_SIZE(sport_security),
.tier = tier2,
.num_tiers = ARRAY_SIZE(tier2),
.buswidth = 8,
.slv_hw_id = SLV_SECURITY,
},
{
.id = MSM_BUS_SLAVE_TCSR,
.slavep = sport_tcsr,
.num_sports = ARRAY_SIZE(sport_tcsr),
.tier = tier2,
.num_tiers = ARRAY_SIZE(tier2),
.buswidth = 8,
.slv_hw_id = SLV_TCSR,
},
{
.id = MSM_BUS_SLAVE_TLMM,
.slavep = sport_tlmm,
.num_sports = ARRAY_SIZE(sport_tlmm),
.tier = tier2,
.num_tiers = ARRAY_SIZE(tier2),
.buswidth = 8,
.slv_hw_id = SLV_TLMM,
},
{
.id = MSM_BUS_SLAVE_CRYPTO_0_CFG,
.slavep = sport_crypto_0_cfg,
.num_sports = ARRAY_SIZE(sport_crypto_0_cfg),
.tier = tier2,
.num_tiers = ARRAY_SIZE(tier2),
.buswidth = 8,
.slv_hw_id = SLV_CRYPTO_0_CFG,
},
{
.id = MSM_BUS_SLAVE_CRYPTO_1_CFG,
.slavep = sport_crypto_1_cfg,
.num_sports = ARRAY_SIZE(sport_crypto_1_cfg),
.tier = tier2,
.num_tiers = ARRAY_SIZE(tier2),
.buswidth = 8,
.slv_hw_id = SLV_CRYPTO_1_CFG,
},
{
.id = MSM_BUS_SLAVE_IMEM_CFG,
.slavep = sport_imem_cfg,
.num_sports = ARRAY_SIZE(sport_imem_cfg),
.tier = tier2,
.num_tiers = ARRAY_SIZE(tier2),
.buswidth = 8,
.slv_hw_id = SLV_IMEM_CFG,
},
{
.id = MSM_BUS_SLAVE_MESSAGE_RAM,
.slavep = sport_message_ram,
.num_sports = ARRAY_SIZE(sport_message_ram),
.tier = tier2,
.num_tiers = ARRAY_SIZE(tier2),
.buswidth = 8,
.slv_hw_id = SLV_MESSAGE_RAM,
},
{
.id = MSM_BUS_SLAVE_BIMC_CFG,
.slavep = sport_bimc_cfg,
.num_sports = ARRAY_SIZE(sport_bimc_cfg),
.tier = tier2,
.num_tiers = ARRAY_SIZE(tier2),
.buswidth = 8,
.slv_hw_id = SLV_BIMC_CFG,
},
{
.id = MSM_BUS_SLAVE_BOOT_ROM,
.slavep = sport_boot_rom,
.num_sports = ARRAY_SIZE(sport_boot_rom),
.tier = tier2,
.num_tiers = ARRAY_SIZE(tier2),
.buswidth = 8,
.slv_hw_id = SLV_BOOT_ROM,
},
{
.id = MSM_BUS_SLAVE_PMIC_ARB,
.slavep = sport_pmic_arb,
.num_sports = ARRAY_SIZE(sport_pmic_arb),
.tier = tier2,
.num_tiers = ARRAY_SIZE(tier2),
.buswidth = 8,
.slv_hw_id = SLV_PMIC_ARB,
},
{
.id = MSM_BUS_SLAVE_SPDM_WRAPPER,
.slavep = sport_spdm_wrapper,
.num_sports = ARRAY_SIZE(sport_spdm_wrapper),
.tier = tier2,
.num_tiers = ARRAY_SIZE(tier2),
.buswidth = 8,
.slv_hw_id = SLV_SPDM_WRAPPER,
},
{
.id = MSM_BUS_SLAVE_DEHR_CFG,
.slavep = sport_dehr_cfg,
.num_sports = ARRAY_SIZE(sport_dehr_cfg),
.tier = tier2,
.num_tiers = ARRAY_SIZE(tier2),
.buswidth = 8,
.slv_hw_id = SLV_DEHR_CFG,
},
{
.id = MSM_BUS_SLAVE_MPM,
.slavep = sport_mpm,
.num_sports = ARRAY_SIZE(sport_mpm),
.tier = tier2,
.num_tiers = ARRAY_SIZE(tier2),
.buswidth = 8,
.slv_hw_id = SLV_MPM,
},
{
.id = MSM_BUS_SLAVE_QDSS_CFG,
.slavep = sport_qdss_cfg,
.num_sports = ARRAY_SIZE(sport_qdss_cfg),
.tier = tier2,
.num_tiers = ARRAY_SIZE(tier2),
.buswidth = 8,
.slv_hw_id = SLV_QDSS_CFG,
},
{
.id = MSM_BUS_SLAVE_RBCPR_CFG,
.slavep = sport_rbcpr_cfg,
.num_sports = ARRAY_SIZE(sport_rbcpr_cfg),
.tier = tier2,
.num_tiers = ARRAY_SIZE(tier2),
.buswidth = 8,
.slv_hw_id = SLV_RBCPR_CFG,
},
{
.id = MSM_BUS_SLAVE_RBCPR_QDSS_APU_CFG,
.slavep = sport_rbcpr_qdss_apu_cfg,
.num_sports = ARRAY_SIZE(sport_rbcpr_qdss_apu_cfg),
.tier = tier2,
.num_tiers = ARRAY_SIZE(tier2),
.buswidth = 8,
.slv_hw_id = SLV_RBCPR_QDSS_APU_CFG,
},
{
.id = MSM_BUS_FAB_SYS_NOC,
.gateway = 1,
.slavep = sport_gw_cnoc_snoc,
.num_sports = ARRAY_SIZE(sport_gw_cnoc_snoc),
.masterp = mport_gw_snoc_cnoc,
.num_mports = ARRAY_SIZE(mport_gw_snoc_cnoc),
.tier = tier2,
.num_tiers = ARRAY_SIZE(tier2),
.buswidth = 8,
.mas_hw_id = MAS_SNOC_CNOC,
.slv_hw_id = SLV_CNOC_SNOC,
},
{
.id = MSM_BUS_SLAVE_CNOC_ONOC_CFG,
.slavep = sport_cnoc_onoc_cfg,
.num_sports = ARRAY_SIZE(sport_cnoc_onoc_cfg),
.tier = tier2,
.num_tiers = ARRAY_SIZE(tier2),
.buswidth = 8,
.slv_hw_id = SLV_CNOC_ONOC_CFG,
},
{
.id = MSM_BUS_SLAVE_CNOC_MNOC_MMSS_CFG,
.slavep = sport_cnoc_mnoc_mmss_cfg,
.num_sports = ARRAY_SIZE(sport_cnoc_mnoc_mmss_cfg),
.tier = tier2,
.num_tiers = ARRAY_SIZE(tier2),
.buswidth = 8,
.slv_hw_id = SLV_CNOC_MNOC_MMSS_CFG,
},
{
.id = MSM_BUS_SLAVE_CNOC_MNOC_CFG,
.slavep = sport_cnoc_mnoc_cfg,
.num_sports = ARRAY_SIZE(sport_cnoc_mnoc_cfg),
.tier = tier2,
.num_tiers = ARRAY_SIZE(tier2),
.buswidth = 8,
.slv_hw_id = SLV_CNOC_MNOC_CFG,
},
{
.id = MSM_BUS_SLAVE_PNOC_CFG,
.slavep = sport_pnoc_cfg,
.num_sports = ARRAY_SIZE(sport_pnoc_cfg),
.tier = tier2,
.num_tiers = ARRAY_SIZE(tier2),
.buswidth = 8,
.slv_hw_id = SLV_PNOC_CFG,
},
{
.id = MSM_BUS_SLAVE_SNOC_MPU_CFG,
.slavep = sport_snoc_mpu_cfg,
.num_sports = ARRAY_SIZE(sport_snoc_mpu_cfg),
.tier = tier2,
.num_tiers = ARRAY_SIZE(tier2),
.buswidth = 8,
.slv_hw_id = SLV_SNOC_MPU_CFG,
},
{
.id = MSM_BUS_SLAVE_SNOC_CFG,
.slavep = sport_snoc_cfg,
.num_sports = ARRAY_SIZE(sport_snoc_cfg),
.tier = tier2,
.num_tiers = ARRAY_SIZE(tier2),
.buswidth = 8,
.slv_hw_id = SLV_SNOC_CFG,
},
{
.id = MSM_BUS_SLAVE_EBI1_DLL_CFG,
.slavep = sport_ebi1_dll_cfg,
.num_sports = ARRAY_SIZE(sport_ebi1_dll_cfg),
.tier = tier2,
.num_tiers = ARRAY_SIZE(tier2),
.buswidth = 8,
.slv_hw_id = SLV_EBI1_DLL_CFG,
},
{
.id = MSM_BUS_SLAVE_PHY_APU_CFG,
.slavep = sport_phy_apu_cfg,
.num_sports = ARRAY_SIZE(sport_phy_apu_cfg),
.tier = tier2,
.num_tiers = ARRAY_SIZE(tier2),
.buswidth = 8,
.slv_hw_id = SLV_PHY_APU_CFG,
},
{
.id = MSM_BUS_SLAVE_EBI1_PHY_CFG,
.slavep = sport_ebi1_phy_cfg,
.num_sports = ARRAY_SIZE(sport_ebi1_phy_cfg),
.tier = tier2,
.num_tiers = ARRAY_SIZE(tier2),
.buswidth = 8,
.slv_hw_id = SLV_EBI1_PHY_CFG,
},
{
.id = MSM_BUS_SLAVE_RPM,
.slavep = sport_rpm,
.num_sports = ARRAY_SIZE(sport_rpm),
.tier = tier2,
.num_tiers = ARRAY_SIZE(tier2),
.buswidth = 8,
.slv_hw_id = SLV_RPM,
},
{
.id = MSM_BUS_SLAVE_SERVICE_CNOC,
.slavep = sport_service_cnoc,
.num_sports = ARRAY_SIZE(sport_service_cnoc),
.tier = tier2,
.num_tiers = ARRAY_SIZE(tier2),
.buswidth = 8,
.slv_hw_id = SLV_SERVICE_CNOC,
},
};
/* A virtual NoC is needed for connection to OCMEM */
static struct msm_bus_node_info ocmem_vnoc_info[] = {
{
.id = MSM_BUS_MASTER_V_OCMEM_GFX3D,
.tier = tier2,
.num_tiers = ARRAY_SIZE(tier2),
.buswidth = 8,
.mas_hw_id = MAS_V_OCMEM_GFX3D,
},
{
.id = MSM_BUS_SLAVE_OCMEM,
.slavep = sport_ocmem,
.num_sports = ARRAY_SIZE(sport_ocmem),
.tier = tier2,
.num_tiers = ARRAY_SIZE(tier2),
.buswidth = 16,
.slv_hw_id = SLV_OCMEM,
.tier = tier2,
.slaveclk[DUAL_CTX] = "ocmem_clk",
.slaveclk[ACTIVE_CTX] = "ocmem_a_clk",
},
{
.id = MSM_BUS_FAB_SYS_NOC,
.gateway = 1,
.buswidth = 8,
.ws = 10000,
.mas_hw_id = MAS_SNOC_OVNOC,
.slv_hw_id = SLV_OVNOC_SNOC,
},
{
.id = MSM_BUS_FAB_OCMEM_NOC,
.gateway = 1,
.buswidth = 16,
.ws = 10000,
.mas_hw_id = MAS_ONOC_OVNOC,
.slv_hw_id = SLV_OVNOC_ONOC,
},
};
static void msm_bus_board_assign_iids(struct msm_bus_fabric_registration
*fabreg, int fabid)
{
int i;
for (i = 0; i < fabreg->len; i++) {
if (!fabreg->info[i].gateway) {
fabreg->info[i].priv_id = fabid + fabreg->info[i].id;
if (fabreg->info[i].id < SLAVE_ID_KEY) {
WARN(fabreg->info[i].id >= NMASTERS,
"id %d exceeds array size!\n",
fabreg->info[i].id);
master_iids[fabreg->info[i].id] =
fabreg->info[i].priv_id;
} else {
WARN((fabreg->info[i].id - SLAVE_ID_KEY) >=
NSLAVES, "id %d exceeds array size!\n",
fabreg->info[i].id);
slave_iids[fabreg->info[i].id - (SLAVE_ID_KEY)]
= fabreg->info[i].priv_id;
}
} else {
fabreg->info[i].priv_id = fabreg->info[i].id;
}
}
}
static int msm_bus_board_8974_get_iid(int id)
{
if ((id < SLAVE_ID_KEY && id >= NMASTERS) ||
id >= (SLAVE_ID_KEY + NSLAVES)) {
MSM_BUS_ERR("Cannot get iid. Invalid id %d passed\n", id);
return -EINVAL;
}
return CHECK_ID(((id < SLAVE_ID_KEY) ? master_iids[id] :
slave_iids[id - SLAVE_ID_KEY]), id);
}
int msm_bus_board_rpm_get_il_ids(uint16_t *id)
{
return -ENXIO;
}
static struct msm_bus_board_algorithm msm_bus_board_algo = {
.board_nfab = NFAB_8974,
.get_iid = msm_bus_board_8974_get_iid,
.assign_iids = msm_bus_board_assign_iids,
};
struct msm_bus_fabric_registration msm_bus_8974_sys_noc_pdata = {
.id = MSM_BUS_FAB_SYS_NOC,
.name = "msm_sys_noc",
.info = sys_noc_info,
.len = ARRAY_SIZE(sys_noc_info),
.ahb = 0,
.fabclk[DUAL_CTX] = "bus_clk",
.fabclk[ACTIVE_CTX] = "bus_a_clk",
.nmasters = 15,
.nslaves = 12,
.ntieredslaves = 0,
.board_algo = &msm_bus_board_algo,
.qos_freq = 4800,
.hw_sel = MSM_BUS_NOC,
.rpm_enabled = 1,
};
struct msm_bus_fabric_registration msm_bus_8974_mmss_noc_pdata = {
.id = MSM_BUS_FAB_MMSS_NOC,
.name = "msm_mmss_noc",
.info = mmss_noc_info,
.len = ARRAY_SIZE(mmss_noc_info),
.ahb = 0,
.fabclk[DUAL_CTX] = "bus_clk",
.fabclk[ACTIVE_CTX] = "bus_a_clk",
.nmasters = 9,
.nslaves = 16,
.ntieredslaves = 0,
.board_algo = &msm_bus_board_algo,
.qos_freq = 4800,
.hw_sel = MSM_BUS_NOC,
.rpm_enabled = 1,
};
struct msm_bus_fabric_registration msm_bus_8974_bimc_pdata = {
.id = MSM_BUS_FAB_BIMC,
.name = "msm_bimc",
.info = bimc_info,
.len = ARRAY_SIZE(bimc_info),
.ahb = 0,
.fabclk[DUAL_CTX] = "mem_clk",
.fabclk[ACTIVE_CTX] = "mem_a_clk",
.nmasters = 7,
.nslaves = 4,
.ntieredslaves = 0,
.board_algo = &msm_bus_board_algo,
.qos_freq = 4800,
.hw_sel = MSM_BUS_BIMC,
.rpm_enabled = 1,
};
struct msm_bus_fabric_registration msm_bus_8974_ocmem_noc_pdata = {
.id = MSM_BUS_FAB_OCMEM_NOC,
.name = "msm_ocmem_noc",
.info = ocmem_noc_info,
.len = ARRAY_SIZE(ocmem_noc_info),
.ahb = 0,
.fabclk[DUAL_CTX] = "bus_clk",
.fabclk[ACTIVE_CTX] = "bus_a_clk",
.nmasters = 6,
.nslaves = 3,
.ntieredslaves = 0,
.board_algo = &msm_bus_board_algo,
.qos_freq = 4800,
.hw_sel = MSM_BUS_NOC,
.rpm_enabled = 1,
};
struct msm_bus_fabric_registration msm_bus_8974_periph_noc_pdata = {
.id = MSM_BUS_FAB_PERIPH_NOC,
.name = "msm_periph_noc",
.info = periph_noc_info,
.len = ARRAY_SIZE(periph_noc_info),
.ahb = 0,
.fabclk[DUAL_CTX] = "bus_clk",
.fabclk[ACTIVE_CTX] = "bus_a_clk",
.nmasters = 12,
.nslaves = 16,
.ntieredslaves = 0,
.board_algo = &msm_bus_board_algo,
.hw_sel = MSM_BUS_NOC,
.rpm_enabled = 1,
};
struct msm_bus_fabric_registration msm_bus_8974_config_noc_pdata = {
.id = MSM_BUS_FAB_CONFIG_NOC,
.name = "msm_config_noc",
.info = config_noc_info,
.len = ARRAY_SIZE(config_noc_info),
.ahb = 0,
.fabclk[DUAL_CTX] = "bus_clk",
.fabclk[ACTIVE_CTX] = "bus_a_clk",
.nmasters = 8,
.nslaves = 30,
.ntieredslaves = 0,
.board_algo = &msm_bus_board_algo,
.hw_sel = MSM_BUS_NOC,
.rpm_enabled = 1,
};
struct msm_bus_fabric_registration msm_bus_8974_ocmem_vnoc_pdata = {
.id = MSM_BUS_FAB_OCMEM_VNOC,
.name = "msm_ocmem_vnoc",
.info = ocmem_vnoc_info,
.len = ARRAY_SIZE(ocmem_vnoc_info),
.ahb = 0,
.nmasters = 5,
.nslaves = 4,
.ntieredslaves = 0,
.board_algo = &msm_bus_board_algo,
.hw_sel = MSM_BUS_NOC,
.virt = 1,
.rpm_enabled = 1,
};
void msm_bus_board_init(struct msm_bus_fabric_registration *pdata)
{
pdata->board_algo = &msm_bus_board_algo;
}
void msm_bus_board_set_nfab(struct msm_bus_fabric_registration *pdata,
int nfab)
{
if (nfab <= 0)
return;
msm_bus_board_algo.board_nfab = nfab;
}
| gpl-2.0 |
1N4148/android_kernel_samsung_msm8974 | crypto/aead.c | 1511 | 14332 | /*
* AEAD: Authenticated Encryption with Associated Data
*
* This file provides API support for AEAD algorithms.
*
* Copyright (c) 2007 Herbert Xu <herbert@gondor.apana.org.au>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
*/
#include <crypto/internal/aead.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/rtnetlink.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/seq_file.h>
#include <linux/cryptouser.h>
#include <net/netlink.h>
#include "internal.h"
static int setkey_unaligned(struct crypto_aead *tfm, const u8 *key,
unsigned int keylen)
{
struct aead_alg *aead = crypto_aead_alg(tfm);
unsigned long alignmask = crypto_aead_alignmask(tfm);
int ret;
u8 *buffer, *alignbuffer;
unsigned long absize;
absize = keylen + alignmask;
buffer = kmalloc(absize, GFP_ATOMIC);
if (!buffer)
return -ENOMEM;
alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
memcpy(alignbuffer, key, keylen);
ret = aead->setkey(tfm, alignbuffer, keylen);
memset(alignbuffer, 0, keylen);
kfree(buffer);
return ret;
}
static int setkey(struct crypto_aead *tfm, const u8 *key, unsigned int keylen)
{
struct aead_alg *aead = crypto_aead_alg(tfm);
unsigned long alignmask = crypto_aead_alignmask(tfm);
if ((unsigned long)key & alignmask)
return setkey_unaligned(tfm, key, keylen);
return aead->setkey(tfm, key, keylen);
}
int crypto_aead_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
{
struct aead_tfm *crt = crypto_aead_crt(tfm);
int err;
if (authsize > crypto_aead_alg(tfm)->maxauthsize)
return -EINVAL;
if (crypto_aead_alg(tfm)->setauthsize) {
err = crypto_aead_alg(tfm)->setauthsize(crt->base, authsize);
if (err)
return err;
}
crypto_aead_crt(crt->base)->authsize = authsize;
crt->authsize = authsize;
return 0;
}
EXPORT_SYMBOL_GPL(crypto_aead_setauthsize);
static unsigned int crypto_aead_ctxsize(struct crypto_alg *alg, u32 type,
u32 mask)
{
return alg->cra_ctxsize;
}
static int no_givcrypt(struct aead_givcrypt_request *req)
{
return -ENOSYS;
}
static int crypto_init_aead_ops(struct crypto_tfm *tfm, u32 type, u32 mask)
{
struct aead_alg *alg = &tfm->__crt_alg->cra_aead;
struct aead_tfm *crt = &tfm->crt_aead;
if (max(alg->maxauthsize, alg->ivsize) > PAGE_SIZE / 8)
return -EINVAL;
crt->setkey = tfm->__crt_alg->cra_flags & CRYPTO_ALG_GENIV ?
alg->setkey : setkey;
crt->encrypt = alg->encrypt;
crt->decrypt = alg->decrypt;
crt->givencrypt = alg->givencrypt ?: no_givcrypt;
crt->givdecrypt = alg->givdecrypt ?: no_givcrypt;
crt->base = __crypto_aead_cast(tfm);
crt->ivsize = alg->ivsize;
crt->authsize = alg->maxauthsize;
return 0;
}
#ifdef CONFIG_NET
static int crypto_aead_report(struct sk_buff *skb, struct crypto_alg *alg)
{
struct crypto_report_aead raead;
struct aead_alg *aead = &alg->cra_aead;
strncpy(raead.type, "aead", sizeof(raead.type));
strncpy(raead.geniv, aead->geniv ?: "<built-in>", sizeof(raead.geniv));
raead.blocksize = alg->cra_blocksize;
raead.maxauthsize = aead->maxauthsize;
raead.ivsize = aead->ivsize;
NLA_PUT(skb, CRYPTOCFGA_REPORT_AEAD,
sizeof(struct crypto_report_aead), &raead);
return 0;
nla_put_failure:
return -EMSGSIZE;
}
#else
static int crypto_aead_report(struct sk_buff *skb, struct crypto_alg *alg)
{
return -ENOSYS;
}
#endif
static void crypto_aead_show(struct seq_file *m, struct crypto_alg *alg)
__attribute__ ((unused));
static void crypto_aead_show(struct seq_file *m, struct crypto_alg *alg)
{
struct aead_alg *aead = &alg->cra_aead;
seq_printf(m, "type : aead\n");
seq_printf(m, "async : %s\n", alg->cra_flags & CRYPTO_ALG_ASYNC ?
"yes" : "no");
seq_printf(m, "blocksize : %u\n", alg->cra_blocksize);
seq_printf(m, "ivsize : %u\n", aead->ivsize);
seq_printf(m, "maxauthsize : %u\n", aead->maxauthsize);
seq_printf(m, "geniv : %s\n", aead->geniv ?: "<built-in>");
}
const struct crypto_type crypto_aead_type = {
.ctxsize = crypto_aead_ctxsize,
.init = crypto_init_aead_ops,
#ifdef CONFIG_PROC_FS
.show = crypto_aead_show,
#endif
.report = crypto_aead_report,
};
EXPORT_SYMBOL_GPL(crypto_aead_type);
static int aead_null_givencrypt(struct aead_givcrypt_request *req)
{
return crypto_aead_encrypt(&req->areq);
}
static int aead_null_givdecrypt(struct aead_givcrypt_request *req)
{
return crypto_aead_decrypt(&req->areq);
}
static int crypto_init_nivaead_ops(struct crypto_tfm *tfm, u32 type, u32 mask)
{
struct aead_alg *alg = &tfm->__crt_alg->cra_aead;
struct aead_tfm *crt = &tfm->crt_aead;
if (max(alg->maxauthsize, alg->ivsize) > PAGE_SIZE / 8)
return -EINVAL;
crt->setkey = setkey;
crt->encrypt = alg->encrypt;
crt->decrypt = alg->decrypt;
if (!alg->ivsize) {
crt->givencrypt = aead_null_givencrypt;
crt->givdecrypt = aead_null_givdecrypt;
}
crt->base = __crypto_aead_cast(tfm);
crt->ivsize = alg->ivsize;
crt->authsize = alg->maxauthsize;
return 0;
}
#ifdef CONFIG_NET
static int crypto_nivaead_report(struct sk_buff *skb, struct crypto_alg *alg)
{
struct crypto_report_aead raead;
struct aead_alg *aead = &alg->cra_aead;
strncpy(raead.type, "nivaead", sizeof(raead.type));
strncpy(raead.geniv, aead->geniv, sizeof(raead.geniv));
raead.blocksize = alg->cra_blocksize;
raead.maxauthsize = aead->maxauthsize;
raead.ivsize = aead->ivsize;
NLA_PUT(skb, CRYPTOCFGA_REPORT_AEAD,
sizeof(struct crypto_report_aead), &raead);
return 0;
nla_put_failure:
return -EMSGSIZE;
}
#else
static int crypto_nivaead_report(struct sk_buff *skb, struct crypto_alg *alg)
{
return -ENOSYS;
}
#endif
static void crypto_nivaead_show(struct seq_file *m, struct crypto_alg *alg)
__attribute__ ((unused));
static void crypto_nivaead_show(struct seq_file *m, struct crypto_alg *alg)
{
struct aead_alg *aead = &alg->cra_aead;
seq_printf(m, "type : nivaead\n");
seq_printf(m, "async : %s\n", alg->cra_flags & CRYPTO_ALG_ASYNC ?
"yes" : "no");
seq_printf(m, "blocksize : %u\n", alg->cra_blocksize);
seq_printf(m, "ivsize : %u\n", aead->ivsize);
seq_printf(m, "maxauthsize : %u\n", aead->maxauthsize);
seq_printf(m, "geniv : %s\n", aead->geniv);
}
const struct crypto_type crypto_nivaead_type = {
.ctxsize = crypto_aead_ctxsize,
.init = crypto_init_nivaead_ops,
#ifdef CONFIG_PROC_FS
.show = crypto_nivaead_show,
#endif
.report = crypto_nivaead_report,
};
EXPORT_SYMBOL_GPL(crypto_nivaead_type);
static int crypto_grab_nivaead(struct crypto_aead_spawn *spawn,
const char *name, u32 type, u32 mask)
{
struct crypto_alg *alg;
int err;
type &= ~(CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_GENIV);
type |= CRYPTO_ALG_TYPE_AEAD;
mask |= CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_GENIV;
alg = crypto_alg_mod_lookup(name, type, mask);
if (IS_ERR(alg))
return PTR_ERR(alg);
err = crypto_init_spawn(&spawn->base, alg, spawn->base.inst, mask);
crypto_mod_put(alg);
return err;
}
struct crypto_instance *aead_geniv_alloc(struct crypto_template *tmpl,
struct rtattr **tb, u32 type,
u32 mask)
{
const char *name;
struct crypto_aead_spawn *spawn;
struct crypto_attr_type *algt;
struct crypto_instance *inst;
struct crypto_alg *alg;
int err;
algt = crypto_get_attr_type(tb);
err = PTR_ERR(algt);
if (IS_ERR(algt))
return ERR_PTR(err);
if ((algt->type ^ (CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_GENIV)) &
algt->mask)
return ERR_PTR(-EINVAL);
name = crypto_attr_alg_name(tb[1]);
err = PTR_ERR(name);
if (IS_ERR(name))
return ERR_PTR(err);
inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
if (!inst)
return ERR_PTR(-ENOMEM);
spawn = crypto_instance_ctx(inst);
/* Ignore async algorithms if necessary. */
mask |= crypto_requires_sync(algt->type, algt->mask);
crypto_set_aead_spawn(spawn, inst);
err = crypto_grab_nivaead(spawn, name, type, mask);
if (err)
goto err_free_inst;
alg = crypto_aead_spawn_alg(spawn);
err = -EINVAL;
if (!alg->cra_aead.ivsize)
goto err_drop_alg;
/*
* This is only true if we're constructing an algorithm with its
* default IV generator. For the default generator we elide the
* template name and double-check the IV generator.
*/
if (algt->mask & CRYPTO_ALG_GENIV) {
if (strcmp(tmpl->name, alg->cra_aead.geniv))
goto err_drop_alg;
memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME);
memcpy(inst->alg.cra_driver_name, alg->cra_driver_name,
CRYPTO_MAX_ALG_NAME);
} else {
err = -ENAMETOOLONG;
if (snprintf(inst->alg.cra_name, CRYPTO_MAX_ALG_NAME,
"%s(%s)", tmpl->name, alg->cra_name) >=
CRYPTO_MAX_ALG_NAME)
goto err_drop_alg;
if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME,
"%s(%s)", tmpl->name, alg->cra_driver_name) >=
CRYPTO_MAX_ALG_NAME)
goto err_drop_alg;
}
inst->alg.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_GENIV;
inst->alg.cra_flags |= alg->cra_flags & CRYPTO_ALG_ASYNC;
inst->alg.cra_priority = alg->cra_priority;
inst->alg.cra_blocksize = alg->cra_blocksize;
inst->alg.cra_alignmask = alg->cra_alignmask;
inst->alg.cra_type = &crypto_aead_type;
inst->alg.cra_aead.ivsize = alg->cra_aead.ivsize;
inst->alg.cra_aead.maxauthsize = alg->cra_aead.maxauthsize;
inst->alg.cra_aead.geniv = alg->cra_aead.geniv;
inst->alg.cra_aead.setkey = alg->cra_aead.setkey;
inst->alg.cra_aead.setauthsize = alg->cra_aead.setauthsize;
inst->alg.cra_aead.encrypt = alg->cra_aead.encrypt;
inst->alg.cra_aead.decrypt = alg->cra_aead.decrypt;
out:
return inst;
err_drop_alg:
crypto_drop_aead(spawn);
err_free_inst:
kfree(inst);
inst = ERR_PTR(err);
goto out;
}
EXPORT_SYMBOL_GPL(aead_geniv_alloc);
void aead_geniv_free(struct crypto_instance *inst)
{
crypto_drop_aead(crypto_instance_ctx(inst));
kfree(inst);
}
EXPORT_SYMBOL_GPL(aead_geniv_free);
int aead_geniv_init(struct crypto_tfm *tfm)
{
struct crypto_instance *inst = (void *)tfm->__crt_alg;
struct crypto_aead *aead;
aead = crypto_spawn_aead(crypto_instance_ctx(inst));
if (IS_ERR(aead))
return PTR_ERR(aead);
tfm->crt_aead.base = aead;
tfm->crt_aead.reqsize += crypto_aead_reqsize(aead);
return 0;
}
EXPORT_SYMBOL_GPL(aead_geniv_init);
void aead_geniv_exit(struct crypto_tfm *tfm)
{
crypto_free_aead(tfm->crt_aead.base);
}
EXPORT_SYMBOL_GPL(aead_geniv_exit);
static int crypto_nivaead_default(struct crypto_alg *alg, u32 type, u32 mask)
{
struct rtattr *tb[3];
struct {
struct rtattr attr;
struct crypto_attr_type data;
} ptype;
struct {
struct rtattr attr;
struct crypto_attr_alg data;
} palg;
struct crypto_template *tmpl;
struct crypto_instance *inst;
struct crypto_alg *larval;
const char *geniv;
int err;
larval = crypto_larval_lookup(alg->cra_driver_name,
CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_GENIV,
CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_GENIV);
err = PTR_ERR(larval);
if (IS_ERR(larval))
goto out;
err = -EAGAIN;
if (!crypto_is_larval(larval))
goto drop_larval;
ptype.attr.rta_len = sizeof(ptype);
ptype.attr.rta_type = CRYPTOA_TYPE;
ptype.data.type = type | CRYPTO_ALG_GENIV;
/* GENIV tells the template that we're making a default geniv. */
ptype.data.mask = mask | CRYPTO_ALG_GENIV;
tb[0] = &ptype.attr;
palg.attr.rta_len = sizeof(palg);
palg.attr.rta_type = CRYPTOA_ALG;
/* Must use the exact name to locate ourselves. */
memcpy(palg.data.name, alg->cra_driver_name, CRYPTO_MAX_ALG_NAME);
tb[1] = &palg.attr;
tb[2] = NULL;
geniv = alg->cra_aead.geniv;
tmpl = crypto_lookup_template(geniv);
err = -ENOENT;
if (!tmpl)
goto kill_larval;
inst = tmpl->alloc(tb);
err = PTR_ERR(inst);
if (IS_ERR(inst))
goto put_tmpl;
if ((err = crypto_register_instance(tmpl, inst))) {
tmpl->free(inst);
goto put_tmpl;
}
/* Redo the lookup to use the instance we just registered. */
err = -EAGAIN;
put_tmpl:
crypto_tmpl_put(tmpl);
kill_larval:
crypto_larval_kill(larval);
drop_larval:
crypto_mod_put(larval);
out:
crypto_mod_put(alg);
return err;
}
struct crypto_alg *crypto_lookup_aead(const char *name, u32 type, u32 mask)
{
struct crypto_alg *alg;
alg = crypto_alg_mod_lookup(name, type, mask);
if (IS_ERR(alg))
return alg;
if (alg->cra_type == &crypto_aead_type)
return alg;
if (!alg->cra_aead.ivsize)
return alg;
crypto_mod_put(alg);
alg = crypto_alg_mod_lookup(name, type | CRYPTO_ALG_TESTED,
mask & ~CRYPTO_ALG_TESTED);
if (IS_ERR(alg))
return alg;
if (alg->cra_type == &crypto_aead_type) {
if ((alg->cra_flags ^ type ^ ~mask) & CRYPTO_ALG_TESTED) {
crypto_mod_put(alg);
alg = ERR_PTR(-ENOENT);
}
return alg;
}
BUG_ON(!alg->cra_aead.ivsize);
return ERR_PTR(crypto_nivaead_default(alg, type, mask));
}
EXPORT_SYMBOL_GPL(crypto_lookup_aead);
int crypto_grab_aead(struct crypto_aead_spawn *spawn, const char *name,
u32 type, u32 mask)
{
struct crypto_alg *alg;
int err;
type &= ~(CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_GENIV);
type |= CRYPTO_ALG_TYPE_AEAD;
mask &= ~(CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_GENIV);
mask |= CRYPTO_ALG_TYPE_MASK;
alg = crypto_lookup_aead(name, type, mask);
if (IS_ERR(alg))
return PTR_ERR(alg);
err = crypto_init_spawn(&spawn->base, alg, spawn->base.inst, mask);
crypto_mod_put(alg);
return err;
}
EXPORT_SYMBOL_GPL(crypto_grab_aead);
struct crypto_aead *crypto_alloc_aead(const char *alg_name, u32 type, u32 mask)
{
struct crypto_tfm *tfm;
int err;
type &= ~(CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_GENIV);
type |= CRYPTO_ALG_TYPE_AEAD;
mask &= ~(CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_GENIV);
mask |= CRYPTO_ALG_TYPE_MASK;
for (;;) {
struct crypto_alg *alg;
alg = crypto_lookup_aead(alg_name, type, mask);
if (IS_ERR(alg)) {
err = PTR_ERR(alg);
goto err;
}
tfm = __crypto_alloc_tfm(alg, type, mask);
if (!IS_ERR(tfm))
return __crypto_aead_cast(tfm);
crypto_mod_put(alg);
err = PTR_ERR(tfm);
err:
if (err != -EAGAIN)
break;
if (signal_pending(current)) {
err = -EINTR;
break;
}
}
return ERR_PTR(err);
}
EXPORT_SYMBOL_GPL(crypto_alloc_aead);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Authenticated Encryption with Associated Data (AEAD)");
| gpl-2.0 |
cometzero/e210s_jb | drivers/usb/gadget/f_serial.c | 2279 | 8258 | /*
* f_serial.c - generic USB serial function driver
*
* Copyright (C) 2003 Al Borchers (alborchers@steinerpoint.com)
* Copyright (C) 2008 by David Brownell
* Copyright (C) 2008 by Nokia Corporation
*
* This software is distributed under the terms of the GNU General
* Public License ("GPL") as published by the Free Software Foundation,
* either version 2 of that License or (at your option) any later version.
*/
#include <linux/slab.h>
#include <linux/kernel.h>
#include <linux/device.h>
#include "u_serial.h"
#include "gadget_chips.h"
/*
* This function packages a simple "generic serial" port with no real
* control mechanisms, just raw data transfer over two bulk endpoints.
*
* Because it's not standardized, this isn't as interoperable as the
* CDC ACM driver. However, for many purposes it's just as functional
* if you can arrange appropriate host side drivers.
*/
struct gser_descs {
struct usb_endpoint_descriptor *in;
struct usb_endpoint_descriptor *out;
};
struct f_gser {
struct gserial port;
u8 data_id;
u8 port_num;
struct gser_descs fs;
struct gser_descs hs;
};
static inline struct f_gser *func_to_gser(struct usb_function *f)
{
return container_of(f, struct f_gser, port.func);
}
/*-------------------------------------------------------------------------*/
/* interface descriptor: */
static struct usb_interface_descriptor gser_interface_desc __initdata = {
.bLength = USB_DT_INTERFACE_SIZE,
.bDescriptorType = USB_DT_INTERFACE,
/* .bInterfaceNumber = DYNAMIC */
.bNumEndpoints = 2,
.bInterfaceClass = USB_CLASS_VENDOR_SPEC,
.bInterfaceSubClass = 0,
.bInterfaceProtocol = 0,
/* .iInterface = DYNAMIC */
};
/* full speed support: */
static struct usb_endpoint_descriptor gser_fs_in_desc __initdata = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bEndpointAddress = USB_DIR_IN,
.bmAttributes = USB_ENDPOINT_XFER_BULK,
};
static struct usb_endpoint_descriptor gser_fs_out_desc __initdata = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bEndpointAddress = USB_DIR_OUT,
.bmAttributes = USB_ENDPOINT_XFER_BULK,
};
static struct usb_descriptor_header *gser_fs_function[] __initdata = {
(struct usb_descriptor_header *) &gser_interface_desc,
(struct usb_descriptor_header *) &gser_fs_in_desc,
(struct usb_descriptor_header *) &gser_fs_out_desc,
NULL,
};
/* high speed support: */
static struct usb_endpoint_descriptor gser_hs_in_desc __initdata = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bmAttributes = USB_ENDPOINT_XFER_BULK,
.wMaxPacketSize = cpu_to_le16(512),
};
static struct usb_endpoint_descriptor gser_hs_out_desc __initdata = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bmAttributes = USB_ENDPOINT_XFER_BULK,
.wMaxPacketSize = cpu_to_le16(512),
};
static struct usb_descriptor_header *gser_hs_function[] __initdata = {
(struct usb_descriptor_header *) &gser_interface_desc,
(struct usb_descriptor_header *) &gser_hs_in_desc,
(struct usb_descriptor_header *) &gser_hs_out_desc,
NULL,
};
/* string descriptors: */
static struct usb_string gser_string_defs[] = {
[0].s = "Generic Serial",
{ } /* end of list */
};
static struct usb_gadget_strings gser_string_table = {
.language = 0x0409, /* en-us */
.strings = gser_string_defs,
};
static struct usb_gadget_strings *gser_strings[] = {
&gser_string_table,
NULL,
};
/*-------------------------------------------------------------------------*/
static int gser_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
{
struct f_gser *gser = func_to_gser(f);
struct usb_composite_dev *cdev = f->config->cdev;
/* we know alt == 0, so this is an activation or a reset */
if (gser->port.in->driver_data) {
DBG(cdev, "reset generic ttyGS%d\n", gser->port_num);
gserial_disconnect(&gser->port);
} else {
DBG(cdev, "activate generic ttyGS%d\n", gser->port_num);
gser->port.in_desc = ep_choose(cdev->gadget,
gser->hs.in, gser->fs.in);
gser->port.out_desc = ep_choose(cdev->gadget,
gser->hs.out, gser->fs.out);
}
gserial_connect(&gser->port, gser->port_num);
return 0;
}
static void gser_disable(struct usb_function *f)
{
struct f_gser *gser = func_to_gser(f);
struct usb_composite_dev *cdev = f->config->cdev;
DBG(cdev, "generic ttyGS%d deactivated\n", gser->port_num);
gserial_disconnect(&gser->port);
}
/*-------------------------------------------------------------------------*/
/* serial function driver setup/binding */
static int __init
gser_bind(struct usb_configuration *c, struct usb_function *f)
{
struct usb_composite_dev *cdev = c->cdev;
struct f_gser *gser = func_to_gser(f);
int status;
struct usb_ep *ep;
/* allocate instance-specific interface IDs */
status = usb_interface_id(c, f);
if (status < 0)
goto fail;
gser->data_id = status;
gser_interface_desc.bInterfaceNumber = status;
status = -ENODEV;
/* allocate instance-specific endpoints */
ep = usb_ep_autoconfig(cdev->gadget, &gser_fs_in_desc);
if (!ep)
goto fail;
gser->port.in = ep;
ep->driver_data = cdev; /* claim */
ep = usb_ep_autoconfig(cdev->gadget, &gser_fs_out_desc);
if (!ep)
goto fail;
gser->port.out = ep;
ep->driver_data = cdev; /* claim */
/* copy descriptors, and track endpoint copies */
f->descriptors = usb_copy_descriptors(gser_fs_function);
gser->fs.in = usb_find_endpoint(gser_fs_function,
f->descriptors, &gser_fs_in_desc);
gser->fs.out = usb_find_endpoint(gser_fs_function,
f->descriptors, &gser_fs_out_desc);
/* support all relevant hardware speeds... we expect that when
* hardware is dual speed, all bulk-capable endpoints work at
* both speeds
*/
if (gadget_is_dualspeed(c->cdev->gadget)) {
gser_hs_in_desc.bEndpointAddress =
gser_fs_in_desc.bEndpointAddress;
gser_hs_out_desc.bEndpointAddress =
gser_fs_out_desc.bEndpointAddress;
/* copy descriptors, and track endpoint copies */
f->hs_descriptors = usb_copy_descriptors(gser_hs_function);
gser->hs.in = usb_find_endpoint(gser_hs_function,
f->hs_descriptors, &gser_hs_in_desc);
gser->hs.out = usb_find_endpoint(gser_hs_function,
f->hs_descriptors, &gser_hs_out_desc);
}
DBG(cdev, "generic ttyGS%d: %s speed IN/%s OUT/%s\n",
gser->port_num,
gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full",
gser->port.in->name, gser->port.out->name);
return 0;
fail:
/* we might as well release our claims on endpoints */
if (gser->port.out)
gser->port.out->driver_data = NULL;
if (gser->port.in)
gser->port.in->driver_data = NULL;
ERROR(cdev, "%s: can't bind, err %d\n", f->name, status);
return status;
}
static void
gser_unbind(struct usb_configuration *c, struct usb_function *f)
{
if (gadget_is_dualspeed(c->cdev->gadget))
usb_free_descriptors(f->hs_descriptors);
usb_free_descriptors(f->descriptors);
kfree(func_to_gser(f));
}
/**
* gser_bind_config - add a generic serial function to a configuration
* @c: the configuration to support the serial instance
* @port_num: /dev/ttyGS* port this interface will use
* Context: single threaded during gadget setup
*
* Returns zero on success, else negative errno.
*
* Caller must have called @gserial_setup() with enough ports to
* handle all the ones it binds. Caller is also responsible
* for calling @gserial_cleanup() before module unload.
*/
int __init gser_bind_config(struct usb_configuration *c, u8 port_num)
{
struct f_gser *gser;
int status;
/* REVISIT might want instance-specific strings to help
* distinguish instances ...
*/
/* maybe allocate device-global string ID */
if (gser_string_defs[0].id == 0) {
status = usb_string_id(c->cdev);
if (status < 0)
return status;
gser_string_defs[0].id = status;
}
/* allocate and initialize one new instance */
gser = kzalloc(sizeof *gser, GFP_KERNEL);
if (!gser)
return -ENOMEM;
gser->port_num = port_num;
gser->port.func.name = "gser";
gser->port.func.strings = gser_strings;
gser->port.func.bind = gser_bind;
gser->port.func.unbind = gser_unbind;
gser->port.func.set_alt = gser_set_alt;
gser->port.func.disable = gser_disable;
status = usb_add_function(c, &gser->port.func);
if (status)
kfree(gser);
return status;
}
| gpl-2.0 |
HridayHS/Lightning | drivers/xen/xen-pciback/vpci.c | 2279 | 6108 | /*
* PCI Backend - Provides a Virtual PCI bus (with real devices)
* to the frontend
*
* Author: Ryan Wilson <hap9@epoch.ncsc.mil>
*/
#include <linux/list.h>
#include <linux/slab.h>
#include <linux/pci.h>
#include <linux/mutex.h>
#include "pciback.h"
#define PCI_SLOT_MAX 32
struct vpci_dev_data {
/* Access to dev_list must be protected by lock */
struct list_head dev_list[PCI_SLOT_MAX];
struct mutex lock;
};
static inline struct list_head *list_first(struct list_head *head)
{
return head->next;
}
static struct pci_dev *__xen_pcibk_get_pci_dev(struct xen_pcibk_device *pdev,
unsigned int domain,
unsigned int bus,
unsigned int devfn)
{
struct pci_dev_entry *entry;
struct pci_dev *dev = NULL;
struct vpci_dev_data *vpci_dev = pdev->pci_dev_data;
if (domain != 0 || bus != 0)
return NULL;
if (PCI_SLOT(devfn) < PCI_SLOT_MAX) {
mutex_lock(&vpci_dev->lock);
list_for_each_entry(entry,
&vpci_dev->dev_list[PCI_SLOT(devfn)],
list) {
if (PCI_FUNC(entry->dev->devfn) == PCI_FUNC(devfn)) {
dev = entry->dev;
break;
}
}
mutex_unlock(&vpci_dev->lock);
}
return dev;
}
static inline int match_slot(struct pci_dev *l, struct pci_dev *r)
{
if (pci_domain_nr(l->bus) == pci_domain_nr(r->bus)
&& l->bus == r->bus && PCI_SLOT(l->devfn) == PCI_SLOT(r->devfn))
return 1;
return 0;
}
static int __xen_pcibk_add_pci_dev(struct xen_pcibk_device *pdev,
struct pci_dev *dev, int devid,
publish_pci_dev_cb publish_cb)
{
int err = 0, slot, func = -1;
struct pci_dev_entry *t, *dev_entry;
struct vpci_dev_data *vpci_dev = pdev->pci_dev_data;
if ((dev->class >> 24) == PCI_BASE_CLASS_BRIDGE) {
err = -EFAULT;
xenbus_dev_fatal(pdev->xdev, err,
"Can't export bridges on the virtual PCI bus");
goto out;
}
dev_entry = kmalloc(sizeof(*dev_entry), GFP_KERNEL);
if (!dev_entry) {
err = -ENOMEM;
xenbus_dev_fatal(pdev->xdev, err,
"Error adding entry to virtual PCI bus");
goto out;
}
dev_entry->dev = dev;
mutex_lock(&vpci_dev->lock);
/*
* Keep multi-function devices together on the virtual PCI bus, except
* virtual functions.
*/
if (!dev->is_virtfn) {
for (slot = 0; slot < PCI_SLOT_MAX; slot++) {
if (list_empty(&vpci_dev->dev_list[slot]))
continue;
t = list_entry(list_first(&vpci_dev->dev_list[slot]),
struct pci_dev_entry, list);
if (match_slot(dev, t->dev)) {
pr_info(DRV_NAME ": vpci: %s: "
"assign to virtual slot %d func %d\n",
pci_name(dev), slot,
PCI_FUNC(dev->devfn));
list_add_tail(&dev_entry->list,
&vpci_dev->dev_list[slot]);
func = PCI_FUNC(dev->devfn);
goto unlock;
}
}
}
/* Assign to a new slot on the virtual PCI bus */
for (slot = 0; slot < PCI_SLOT_MAX; slot++) {
if (list_empty(&vpci_dev->dev_list[slot])) {
printk(KERN_INFO DRV_NAME
": vpci: %s: assign to virtual slot %d\n",
pci_name(dev), slot);
list_add_tail(&dev_entry->list,
&vpci_dev->dev_list[slot]);
func = dev->is_virtfn ? 0 : PCI_FUNC(dev->devfn);
goto unlock;
}
}
err = -ENOMEM;
xenbus_dev_fatal(pdev->xdev, err,
"No more space on root virtual PCI bus");
unlock:
mutex_unlock(&vpci_dev->lock);
/* Publish this device. */
if (!err)
err = publish_cb(pdev, 0, 0, PCI_DEVFN(slot, func), devid);
out:
return err;
}
static void __xen_pcibk_release_pci_dev(struct xen_pcibk_device *pdev,
struct pci_dev *dev)
{
int slot;
struct vpci_dev_data *vpci_dev = pdev->pci_dev_data;
struct pci_dev *found_dev = NULL;
mutex_lock(&vpci_dev->lock);
for (slot = 0; slot < PCI_SLOT_MAX; slot++) {
struct pci_dev_entry *e;
list_for_each_entry(e, &vpci_dev->dev_list[slot], list) {
if (e->dev == dev) {
list_del(&e->list);
found_dev = e->dev;
kfree(e);
goto out;
}
}
}
out:
mutex_unlock(&vpci_dev->lock);
if (found_dev)
pcistub_put_pci_dev(found_dev);
}
static int __xen_pcibk_init_devices(struct xen_pcibk_device *pdev)
{
int slot;
struct vpci_dev_data *vpci_dev;
vpci_dev = kmalloc(sizeof(*vpci_dev), GFP_KERNEL);
if (!vpci_dev)
return -ENOMEM;
mutex_init(&vpci_dev->lock);
for (slot = 0; slot < PCI_SLOT_MAX; slot++)
INIT_LIST_HEAD(&vpci_dev->dev_list[slot]);
pdev->pci_dev_data = vpci_dev;
return 0;
}
static int __xen_pcibk_publish_pci_roots(struct xen_pcibk_device *pdev,
publish_pci_root_cb publish_cb)
{
/* The Virtual PCI bus has only one root */
return publish_cb(pdev, 0, 0);
}
static void __xen_pcibk_release_devices(struct xen_pcibk_device *pdev)
{
int slot;
struct vpci_dev_data *vpci_dev = pdev->pci_dev_data;
for (slot = 0; slot < PCI_SLOT_MAX; slot++) {
struct pci_dev_entry *e, *tmp;
list_for_each_entry_safe(e, tmp, &vpci_dev->dev_list[slot],
list) {
list_del(&e->list);
pcistub_put_pci_dev(e->dev);
kfree(e);
}
}
kfree(vpci_dev);
pdev->pci_dev_data = NULL;
}
static int __xen_pcibk_get_pcifront_dev(struct pci_dev *pcidev,
struct xen_pcibk_device *pdev,
unsigned int *domain, unsigned int *bus,
unsigned int *devfn)
{
struct pci_dev_entry *entry;
struct pci_dev *dev = NULL;
struct vpci_dev_data *vpci_dev = pdev->pci_dev_data;
int found = 0, slot;
mutex_lock(&vpci_dev->lock);
for (slot = 0; slot < PCI_SLOT_MAX; slot++) {
list_for_each_entry(entry,
&vpci_dev->dev_list[slot],
list) {
dev = entry->dev;
if (dev && dev->bus->number == pcidev->bus->number
&& pci_domain_nr(dev->bus) ==
pci_domain_nr(pcidev->bus)
&& dev->devfn == pcidev->devfn) {
found = 1;
*domain = 0;
*bus = 0;
*devfn = PCI_DEVFN(slot,
PCI_FUNC(pcidev->devfn));
}
}
}
mutex_unlock(&vpci_dev->lock);
return found;
}
const struct xen_pcibk_backend xen_pcibk_vpci_backend = {
.name = "vpci",
.init = __xen_pcibk_init_devices,
.free = __xen_pcibk_release_devices,
.find = __xen_pcibk_get_pcifront_dev,
.publish = __xen_pcibk_publish_pci_roots,
.release = __xen_pcibk_release_pci_dev,
.add = __xen_pcibk_add_pci_dev,
.get = __xen_pcibk_get_pci_dev,
};
| gpl-2.0 |
AndroPlus-org/kernel | drivers/xen/xen-pciback/vpci.c | 2279 | 6108 | /*
* PCI Backend - Provides a Virtual PCI bus (with real devices)
* to the frontend
*
* Author: Ryan Wilson <hap9@epoch.ncsc.mil>
*/
#include <linux/list.h>
#include <linux/slab.h>
#include <linux/pci.h>
#include <linux/mutex.h>
#include "pciback.h"
#define PCI_SLOT_MAX 32
struct vpci_dev_data {
/* Access to dev_list must be protected by lock */
struct list_head dev_list[PCI_SLOT_MAX];
struct mutex lock;
};
static inline struct list_head *list_first(struct list_head *head)
{
return head->next;
}
static struct pci_dev *__xen_pcibk_get_pci_dev(struct xen_pcibk_device *pdev,
unsigned int domain,
unsigned int bus,
unsigned int devfn)
{
struct pci_dev_entry *entry;
struct pci_dev *dev = NULL;
struct vpci_dev_data *vpci_dev = pdev->pci_dev_data;
if (domain != 0 || bus != 0)
return NULL;
if (PCI_SLOT(devfn) < PCI_SLOT_MAX) {
mutex_lock(&vpci_dev->lock);
list_for_each_entry(entry,
&vpci_dev->dev_list[PCI_SLOT(devfn)],
list) {
if (PCI_FUNC(entry->dev->devfn) == PCI_FUNC(devfn)) {
dev = entry->dev;
break;
}
}
mutex_unlock(&vpci_dev->lock);
}
return dev;
}
static inline int match_slot(struct pci_dev *l, struct pci_dev *r)
{
if (pci_domain_nr(l->bus) == pci_domain_nr(r->bus)
&& l->bus == r->bus && PCI_SLOT(l->devfn) == PCI_SLOT(r->devfn))
return 1;
return 0;
}
static int __xen_pcibk_add_pci_dev(struct xen_pcibk_device *pdev,
struct pci_dev *dev, int devid,
publish_pci_dev_cb publish_cb)
{
int err = 0, slot, func = -1;
struct pci_dev_entry *t, *dev_entry;
struct vpci_dev_data *vpci_dev = pdev->pci_dev_data;
if ((dev->class >> 24) == PCI_BASE_CLASS_BRIDGE) {
err = -EFAULT;
xenbus_dev_fatal(pdev->xdev, err,
"Can't export bridges on the virtual PCI bus");
goto out;
}
dev_entry = kmalloc(sizeof(*dev_entry), GFP_KERNEL);
if (!dev_entry) {
err = -ENOMEM;
xenbus_dev_fatal(pdev->xdev, err,
"Error adding entry to virtual PCI bus");
goto out;
}
dev_entry->dev = dev;
mutex_lock(&vpci_dev->lock);
/*
* Keep multi-function devices together on the virtual PCI bus, except
* virtual functions.
*/
if (!dev->is_virtfn) {
for (slot = 0; slot < PCI_SLOT_MAX; slot++) {
if (list_empty(&vpci_dev->dev_list[slot]))
continue;
t = list_entry(list_first(&vpci_dev->dev_list[slot]),
struct pci_dev_entry, list);
if (match_slot(dev, t->dev)) {
pr_info(DRV_NAME ": vpci: %s: "
"assign to virtual slot %d func %d\n",
pci_name(dev), slot,
PCI_FUNC(dev->devfn));
list_add_tail(&dev_entry->list,
&vpci_dev->dev_list[slot]);
func = PCI_FUNC(dev->devfn);
goto unlock;
}
}
}
/* Assign to a new slot on the virtual PCI bus */
for (slot = 0; slot < PCI_SLOT_MAX; slot++) {
if (list_empty(&vpci_dev->dev_list[slot])) {
printk(KERN_INFO DRV_NAME
": vpci: %s: assign to virtual slot %d\n",
pci_name(dev), slot);
list_add_tail(&dev_entry->list,
&vpci_dev->dev_list[slot]);
func = dev->is_virtfn ? 0 : PCI_FUNC(dev->devfn);
goto unlock;
}
}
err = -ENOMEM;
xenbus_dev_fatal(pdev->xdev, err,
"No more space on root virtual PCI bus");
unlock:
mutex_unlock(&vpci_dev->lock);
/* Publish this device. */
if (!err)
err = publish_cb(pdev, 0, 0, PCI_DEVFN(slot, func), devid);
out:
return err;
}
static void __xen_pcibk_release_pci_dev(struct xen_pcibk_device *pdev,
struct pci_dev *dev)
{
int slot;
struct vpci_dev_data *vpci_dev = pdev->pci_dev_data;
struct pci_dev *found_dev = NULL;
mutex_lock(&vpci_dev->lock);
for (slot = 0; slot < PCI_SLOT_MAX; slot++) {
struct pci_dev_entry *e;
list_for_each_entry(e, &vpci_dev->dev_list[slot], list) {
if (e->dev == dev) {
list_del(&e->list);
found_dev = e->dev;
kfree(e);
goto out;
}
}
}
out:
mutex_unlock(&vpci_dev->lock);
if (found_dev)
pcistub_put_pci_dev(found_dev);
}
static int __xen_pcibk_init_devices(struct xen_pcibk_device *pdev)
{
int slot;
struct vpci_dev_data *vpci_dev;
vpci_dev = kmalloc(sizeof(*vpci_dev), GFP_KERNEL);
if (!vpci_dev)
return -ENOMEM;
mutex_init(&vpci_dev->lock);
for (slot = 0; slot < PCI_SLOT_MAX; slot++)
INIT_LIST_HEAD(&vpci_dev->dev_list[slot]);
pdev->pci_dev_data = vpci_dev;
return 0;
}
static int __xen_pcibk_publish_pci_roots(struct xen_pcibk_device *pdev,
publish_pci_root_cb publish_cb)
{
/* The Virtual PCI bus has only one root */
return publish_cb(pdev, 0, 0);
}
static void __xen_pcibk_release_devices(struct xen_pcibk_device *pdev)
{
int slot;
struct vpci_dev_data *vpci_dev = pdev->pci_dev_data;
for (slot = 0; slot < PCI_SLOT_MAX; slot++) {
struct pci_dev_entry *e, *tmp;
list_for_each_entry_safe(e, tmp, &vpci_dev->dev_list[slot],
list) {
list_del(&e->list);
pcistub_put_pci_dev(e->dev);
kfree(e);
}
}
kfree(vpci_dev);
pdev->pci_dev_data = NULL;
}
static int __xen_pcibk_get_pcifront_dev(struct pci_dev *pcidev,
struct xen_pcibk_device *pdev,
unsigned int *domain, unsigned int *bus,
unsigned int *devfn)
{
struct pci_dev_entry *entry;
struct pci_dev *dev = NULL;
struct vpci_dev_data *vpci_dev = pdev->pci_dev_data;
int found = 0, slot;
mutex_lock(&vpci_dev->lock);
for (slot = 0; slot < PCI_SLOT_MAX; slot++) {
list_for_each_entry(entry,
&vpci_dev->dev_list[slot],
list) {
dev = entry->dev;
if (dev && dev->bus->number == pcidev->bus->number
&& pci_domain_nr(dev->bus) ==
pci_domain_nr(pcidev->bus)
&& dev->devfn == pcidev->devfn) {
found = 1;
*domain = 0;
*bus = 0;
*devfn = PCI_DEVFN(slot,
PCI_FUNC(pcidev->devfn));
}
}
}
mutex_unlock(&vpci_dev->lock);
return found;
}
const struct xen_pcibk_backend xen_pcibk_vpci_backend = {
.name = "vpci",
.init = __xen_pcibk_init_devices,
.free = __xen_pcibk_release_devices,
.find = __xen_pcibk_get_pcifront_dev,
.publish = __xen_pcibk_publish_pci_roots,
.release = __xen_pcibk_release_pci_dev,
.add = __xen_pcibk_add_pci_dev,
.get = __xen_pcibk_get_pci_dev,
};
| gpl-2.0 |
Howpathetic/ShooterU_kernel | arch/arm/mach-pxa/lpd270.c | 2279 | 11929 | /*
* linux/arch/arm/mach-pxa/lpd270.c
*
* Support for the LogicPD PXA270 Card Engine.
* Derived from the mainstone code, which carries these notices:
*
* Author: Nicolas Pitre
* Created: Nov 05, 2002
* Copyright: MontaVista Software Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/syscore_ops.h>
#include <linux/interrupt.h>
#include <linux/sched.h>
#include <linux/bitops.h>
#include <linux/fb.h>
#include <linux/ioport.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
#include <linux/pwm_backlight.h>
#include <asm/types.h>
#include <asm/setup.h>
#include <asm/memory.h>
#include <asm/mach-types.h>
#include <mach/hardware.h>
#include <asm/irq.h>
#include <asm/sizes.h>
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
#include <asm/mach/irq.h>
#include <asm/mach/flash.h>
#include <mach/pxa27x.h>
#include <mach/gpio.h>
#include <mach/lpd270.h>
#include <mach/audio.h>
#include <mach/pxafb.h>
#include <mach/mmc.h>
#include <mach/irda.h>
#include <mach/ohci.h>
#include <mach/smemc.h>
#include "generic.h"
#include "devices.h"
static unsigned long lpd270_pin_config[] __initdata = {
/* Chip Selects */
GPIO15_nCS_1, /* Mainboard Flash */
GPIO78_nCS_2, /* CPLD + Ethernet */
/* LCD - 16bpp Active TFT */
GPIO58_LCD_LDD_0,
GPIO59_LCD_LDD_1,
GPIO60_LCD_LDD_2,
GPIO61_LCD_LDD_3,
GPIO62_LCD_LDD_4,
GPIO63_LCD_LDD_5,
GPIO64_LCD_LDD_6,
GPIO65_LCD_LDD_7,
GPIO66_LCD_LDD_8,
GPIO67_LCD_LDD_9,
GPIO68_LCD_LDD_10,
GPIO69_LCD_LDD_11,
GPIO70_LCD_LDD_12,
GPIO71_LCD_LDD_13,
GPIO72_LCD_LDD_14,
GPIO73_LCD_LDD_15,
GPIO74_LCD_FCLK,
GPIO75_LCD_LCLK,
GPIO76_LCD_PCLK,
GPIO77_LCD_BIAS,
GPIO16_PWM0_OUT, /* Backlight */
/* USB Host */
GPIO88_USBH1_PWR,
GPIO89_USBH1_PEN,
/* AC97 */
GPIO28_AC97_BITCLK,
GPIO29_AC97_SDATA_IN_0,
GPIO30_AC97_SDATA_OUT,
GPIO31_AC97_SYNC,
GPIO45_AC97_SYSCLK,
GPIO1_GPIO | WAKEUP_ON_EDGE_BOTH,
};
static unsigned int lpd270_irq_enabled;
static void lpd270_mask_irq(struct irq_data *d)
{
int lpd270_irq = d->irq - LPD270_IRQ(0);
__raw_writew(~(1 << lpd270_irq), LPD270_INT_STATUS);
lpd270_irq_enabled &= ~(1 << lpd270_irq);
__raw_writew(lpd270_irq_enabled, LPD270_INT_MASK);
}
static void lpd270_unmask_irq(struct irq_data *d)
{
int lpd270_irq = d->irq - LPD270_IRQ(0);
lpd270_irq_enabled |= 1 << lpd270_irq;
__raw_writew(lpd270_irq_enabled, LPD270_INT_MASK);
}
static struct irq_chip lpd270_irq_chip = {
.name = "CPLD",
.irq_ack = lpd270_mask_irq,
.irq_mask = lpd270_mask_irq,
.irq_unmask = lpd270_unmask_irq,
};
static void lpd270_irq_handler(unsigned int irq, struct irq_desc *desc)
{
unsigned long pending;
pending = __raw_readw(LPD270_INT_STATUS) & lpd270_irq_enabled;
do {
/* clear useless edge notification */
desc->irq_data.chip->irq_ack(&desc->irq_data);
if (likely(pending)) {
irq = LPD270_IRQ(0) + __ffs(pending);
generic_handle_irq(irq);
pending = __raw_readw(LPD270_INT_STATUS) &
lpd270_irq_enabled;
}
} while (pending);
}
static void __init lpd270_init_irq(void)
{
int irq;
pxa27x_init_irq();
__raw_writew(0, LPD270_INT_MASK);
__raw_writew(0, LPD270_INT_STATUS);
/* setup extra LogicPD PXA270 irqs */
for (irq = LPD270_IRQ(2); irq <= LPD270_IRQ(4); irq++) {
irq_set_chip_and_handler(irq, &lpd270_irq_chip,
handle_level_irq);
set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
}
irq_set_chained_handler(IRQ_GPIO(0), lpd270_irq_handler);
irq_set_irq_type(IRQ_GPIO(0), IRQ_TYPE_EDGE_FALLING);
}
#ifdef CONFIG_PM
static void lpd270_irq_resume(void)
{
__raw_writew(lpd270_irq_enabled, LPD270_INT_MASK);
}
static struct syscore_ops lpd270_irq_syscore_ops = {
.resume = lpd270_irq_resume,
};
static int __init lpd270_irq_device_init(void)
{
if (machine_is_logicpd_pxa270()) {
register_syscore_ops(&lpd270_irq_syscore_ops);
return 0;
}
return -ENODEV;
}
device_initcall(lpd270_irq_device_init);
#endif
static struct resource smc91x_resources[] = {
[0] = {
.start = LPD270_ETH_PHYS,
.end = (LPD270_ETH_PHYS + 0xfffff),
.flags = IORESOURCE_MEM,
},
[1] = {
.start = LPD270_ETHERNET_IRQ,
.end = LPD270_ETHERNET_IRQ,
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device smc91x_device = {
.name = "smc91x",
.id = 0,
.num_resources = ARRAY_SIZE(smc91x_resources),
.resource = smc91x_resources,
};
static struct resource lpd270_flash_resources[] = {
[0] = {
.start = PXA_CS0_PHYS,
.end = PXA_CS0_PHYS + SZ_64M - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = PXA_CS1_PHYS,
.end = PXA_CS1_PHYS + SZ_64M - 1,
.flags = IORESOURCE_MEM,
},
};
static struct mtd_partition lpd270_flash0_partitions[] = {
{
.name = "Bootloader",
.size = 0x00040000,
.offset = 0,
.mask_flags = MTD_WRITEABLE /* force read-only */
}, {
.name = "Kernel",
.size = 0x00400000,
.offset = 0x00040000,
}, {
.name = "Filesystem",
.size = MTDPART_SIZ_FULL,
.offset = 0x00440000
},
};
static struct flash_platform_data lpd270_flash_data[2] = {
{
.name = "processor-flash",
.map_name = "cfi_probe",
.parts = lpd270_flash0_partitions,
.nr_parts = ARRAY_SIZE(lpd270_flash0_partitions),
}, {
.name = "mainboard-flash",
.map_name = "cfi_probe",
.parts = NULL,
.nr_parts = 0,
}
};
static struct platform_device lpd270_flash_device[2] = {
{
.name = "pxa2xx-flash",
.id = 0,
.dev = {
.platform_data = &lpd270_flash_data[0],
},
.resource = &lpd270_flash_resources[0],
.num_resources = 1,
}, {
.name = "pxa2xx-flash",
.id = 1,
.dev = {
.platform_data = &lpd270_flash_data[1],
},
.resource = &lpd270_flash_resources[1],
.num_resources = 1,
},
};
static struct platform_pwm_backlight_data lpd270_backlight_data = {
.pwm_id = 0,
.max_brightness = 1,
.dft_brightness = 1,
.pwm_period_ns = 78770,
};
static struct platform_device lpd270_backlight_device = {
.name = "pwm-backlight",
.dev = {
.parent = &pxa27x_device_pwm0.dev,
.platform_data = &lpd270_backlight_data,
},
};
/* 5.7" TFT QVGA (LoLo display number 1) */
static struct pxafb_mode_info sharp_lq057q3dc02_mode = {
.pixclock = 150000,
.xres = 320,
.yres = 240,
.bpp = 16,
.hsync_len = 0x14,
.left_margin = 0x28,
.right_margin = 0x0a,
.vsync_len = 0x02,
.upper_margin = 0x08,
.lower_margin = 0x14,
.sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
};
static struct pxafb_mach_info sharp_lq057q3dc02 = {
.modes = &sharp_lq057q3dc02_mode,
.num_modes = 1,
.lcd_conn = LCD_COLOR_TFT_16BPP | LCD_PCLK_EDGE_FALL |
LCD_ALTERNATE_MAPPING,
};
/* 12.1" TFT SVGA (LoLo display number 2) */
static struct pxafb_mode_info sharp_lq121s1dg31_mode = {
.pixclock = 50000,
.xres = 800,
.yres = 600,
.bpp = 16,
.hsync_len = 0x05,
.left_margin = 0x52,
.right_margin = 0x05,
.vsync_len = 0x04,
.upper_margin = 0x14,
.lower_margin = 0x0a,
.sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
};
static struct pxafb_mach_info sharp_lq121s1dg31 = {
.modes = &sharp_lq121s1dg31_mode,
.num_modes = 1,
.lcd_conn = LCD_COLOR_TFT_16BPP | LCD_PCLK_EDGE_FALL |
LCD_ALTERNATE_MAPPING,
};
/* 3.6" TFT QVGA (LoLo display number 3) */
static struct pxafb_mode_info sharp_lq036q1da01_mode = {
.pixclock = 150000,
.xres = 320,
.yres = 240,
.bpp = 16,
.hsync_len = 0x0e,
.left_margin = 0x04,
.right_margin = 0x0a,
.vsync_len = 0x03,
.upper_margin = 0x03,
.lower_margin = 0x03,
.sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
};
static struct pxafb_mach_info sharp_lq036q1da01 = {
.modes = &sharp_lq036q1da01_mode,
.num_modes = 1,
.lcd_conn = LCD_COLOR_TFT_16BPP | LCD_PCLK_EDGE_FALL |
LCD_ALTERNATE_MAPPING,
};
/* 6.4" TFT VGA (LoLo display number 5) */
static struct pxafb_mode_info sharp_lq64d343_mode = {
.pixclock = 25000,
.xres = 640,
.yres = 480,
.bpp = 16,
.hsync_len = 0x31,
.left_margin = 0x89,
.right_margin = 0x19,
.vsync_len = 0x12,
.upper_margin = 0x22,
.lower_margin = 0x00,
.sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
};
static struct pxafb_mach_info sharp_lq64d343 = {
.modes = &sharp_lq64d343_mode,
.num_modes = 1,
.lcd_conn = LCD_COLOR_TFT_16BPP | LCD_PCLK_EDGE_FALL |
LCD_ALTERNATE_MAPPING,
};
/* 10.4" TFT VGA (LoLo display number 7) */
static struct pxafb_mode_info sharp_lq10d368_mode = {
.pixclock = 25000,
.xres = 640,
.yres = 480,
.bpp = 16,
.hsync_len = 0x31,
.left_margin = 0x89,
.right_margin = 0x19,
.vsync_len = 0x12,
.upper_margin = 0x22,
.lower_margin = 0x00,
.sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
};
static struct pxafb_mach_info sharp_lq10d368 = {
.modes = &sharp_lq10d368_mode,
.num_modes = 1,
.lcd_conn = LCD_COLOR_TFT_16BPP | LCD_PCLK_EDGE_FALL |
LCD_ALTERNATE_MAPPING,
};
/* 3.5" TFT QVGA (LoLo display number 8) */
static struct pxafb_mode_info sharp_lq035q7db02_20_mode = {
.pixclock = 150000,
.xres = 240,
.yres = 320,
.bpp = 16,
.hsync_len = 0x0e,
.left_margin = 0x0a,
.right_margin = 0x0a,
.vsync_len = 0x03,
.upper_margin = 0x05,
.lower_margin = 0x14,
.sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
};
static struct pxafb_mach_info sharp_lq035q7db02_20 = {
.modes = &sharp_lq035q7db02_20_mode,
.num_modes = 1,
.lcd_conn = LCD_COLOR_TFT_16BPP | LCD_PCLK_EDGE_FALL |
LCD_ALTERNATE_MAPPING,
};
static struct pxafb_mach_info *lpd270_lcd_to_use;
static int __init lpd270_set_lcd(char *str)
{
if (!strnicmp(str, "lq057q3dc02", 11)) {
lpd270_lcd_to_use = &sharp_lq057q3dc02;
} else if (!strnicmp(str, "lq121s1dg31", 11)) {
lpd270_lcd_to_use = &sharp_lq121s1dg31;
} else if (!strnicmp(str, "lq036q1da01", 11)) {
lpd270_lcd_to_use = &sharp_lq036q1da01;
} else if (!strnicmp(str, "lq64d343", 8)) {
lpd270_lcd_to_use = &sharp_lq64d343;
} else if (!strnicmp(str, "lq10d368", 8)) {
lpd270_lcd_to_use = &sharp_lq10d368;
} else if (!strnicmp(str, "lq035q7db02-20", 14)) {
lpd270_lcd_to_use = &sharp_lq035q7db02_20;
} else {
printk(KERN_INFO "lpd270: unknown lcd panel [%s]\n", str);
}
return 1;
}
__setup("lcd=", lpd270_set_lcd);
static struct platform_device *platform_devices[] __initdata = {
&smc91x_device,
&lpd270_backlight_device,
&lpd270_flash_device[0],
&lpd270_flash_device[1],
};
static struct pxaohci_platform_data lpd270_ohci_platform_data = {
.port_mode = PMM_PERPORT_MODE,
.flags = ENABLE_PORT_ALL | POWER_CONTROL_LOW | POWER_SENSE_LOW,
};
static void __init lpd270_init(void)
{
pxa2xx_mfp_config(ARRAY_AND_SIZE(lpd270_pin_config));
pxa_set_ffuart_info(NULL);
pxa_set_btuart_info(NULL);
pxa_set_stuart_info(NULL);
lpd270_flash_data[0].width = (__raw_readl(BOOT_DEF) & 1) ? 2 : 4;
lpd270_flash_data[1].width = 4;
/*
* System bus arbiter setting:
* - Core_Park
* - LCD_wt:DMA_wt:CORE_Wt = 2:3:4
*/
ARB_CNTRL = ARB_CORE_PARK | 0x234;
platform_add_devices(platform_devices, ARRAY_SIZE(platform_devices));
pxa_set_ac97_info(NULL);
if (lpd270_lcd_to_use != NULL)
pxa_set_fb_info(NULL, lpd270_lcd_to_use);
pxa_set_ohci_info(&lpd270_ohci_platform_data);
}
static struct map_desc lpd270_io_desc[] __initdata = {
{
.virtual = LPD270_CPLD_VIRT,
.pfn = __phys_to_pfn(LPD270_CPLD_PHYS),
.length = LPD270_CPLD_SIZE,
.type = MT_DEVICE,
},
};
static void __init lpd270_map_io(void)
{
pxa27x_map_io();
iotable_init(lpd270_io_desc, ARRAY_SIZE(lpd270_io_desc));
/* for use I SRAM as framebuffer. */
PSLR |= 0x00000F04;
PCFR = 0x00000066;
}
MACHINE_START(LOGICPD_PXA270, "LogicPD PXA270 Card Engine")
/* Maintainer: Peter Barada */
.boot_params = 0xa0000100,
.map_io = lpd270_map_io,
.nr_irqs = LPD270_NR_IRQS,
.init_irq = lpd270_init_irq,
.timer = &pxa_timer,
.init_machine = lpd270_init,
MACHINE_END
| gpl-2.0 |
zhaochengw/ef40s_kernel-4.2 | drivers/media/video/omap/omap_vout.c | 2279 | 67940 | /*
* omap_vout.c
*
* Copyright (C) 2005-2010 Texas Instruments.
*
* This file is licensed under the terms of the GNU General Public License
* version 2. This program is licensed "as is" without any warranty of any
* kind, whether express or implied.
*
* Leveraged code from the OMAP2 camera driver
* Video-for-Linux (Version 2) camera capture driver for
* the OMAP24xx camera controller.
*
* Author: Andy Lowe (source@mvista.com)
*
* Copyright (C) 2004 MontaVista Software, Inc.
* Copyright (C) 2010 Texas Instruments.
*
* History:
* 20-APR-2006 Khasim Modified VRFB based Rotation,
* The image data is always read from 0 degree
* view and written
* to the virtual space of desired rotation angle
* 4-DEC-2006 Jian Changed to support better memory management
*
* 17-Nov-2008 Hardik Changed driver to use video_ioctl2
*
* 23-Feb-2010 Vaibhav H Modified to use new DSS2 interface
*
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/vmalloc.h>
#include <linux/sched.h>
#include <linux/types.h>
#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
#include <linux/irq.h>
#include <linux/videodev2.h>
#include <linux/slab.h>
#include <media/videobuf-dma-contig.h>
#include <media/v4l2-device.h>
#include <media/v4l2-ioctl.h>
#include <plat/dma.h>
#include <plat/vram.h>
#include <plat/vrfb.h>
#include <video/omapdss.h>
#include "omap_voutlib.h"
#include "omap_voutdef.h"
MODULE_AUTHOR("Texas Instruments");
MODULE_DESCRIPTION("OMAP Video for Linux Video out driver");
MODULE_LICENSE("GPL");
/* Driver Configuration macros */
#define VOUT_NAME "omap_vout"
enum omap_vout_channels {
OMAP_VIDEO1,
OMAP_VIDEO2,
};
enum dma_channel_state {
DMA_CHAN_NOT_ALLOTED,
DMA_CHAN_ALLOTED,
};
#define QQVGA_WIDTH 160
#define QQVGA_HEIGHT 120
/* Max Resolution supported by the driver */
#define VID_MAX_WIDTH 1280 /* Largest width */
#define VID_MAX_HEIGHT 720 /* Largest height */
/* Mimimum requirement is 2x2 for DSS */
#define VID_MIN_WIDTH 2
#define VID_MIN_HEIGHT 2
/* 2048 x 2048 is max res supported by OMAP display controller */
#define MAX_PIXELS_PER_LINE 2048
#define VRFB_TX_TIMEOUT 1000
#define VRFB_NUM_BUFS 4
/* Max buffer size tobe allocated during init */
#define OMAP_VOUT_MAX_BUF_SIZE (VID_MAX_WIDTH*VID_MAX_HEIGHT*4)
static struct videobuf_queue_ops video_vbq_ops;
/* Variables configurable through module params*/
static u32 video1_numbuffers = 3;
static u32 video2_numbuffers = 3;
static u32 video1_bufsize = OMAP_VOUT_MAX_BUF_SIZE;
static u32 video2_bufsize = OMAP_VOUT_MAX_BUF_SIZE;
static u32 vid1_static_vrfb_alloc;
static u32 vid2_static_vrfb_alloc;
static int debug;
/* Module parameters */
module_param(video1_numbuffers, uint, S_IRUGO);
MODULE_PARM_DESC(video1_numbuffers,
"Number of buffers to be allocated at init time for Video1 device.");
module_param(video2_numbuffers, uint, S_IRUGO);
MODULE_PARM_DESC(video2_numbuffers,
"Number of buffers to be allocated at init time for Video2 device.");
module_param(video1_bufsize, uint, S_IRUGO);
MODULE_PARM_DESC(video1_bufsize,
"Size of the buffer to be allocated for video1 device");
module_param(video2_bufsize, uint, S_IRUGO);
MODULE_PARM_DESC(video2_bufsize,
"Size of the buffer to be allocated for video2 device");
module_param(vid1_static_vrfb_alloc, bool, S_IRUGO);
MODULE_PARM_DESC(vid1_static_vrfb_alloc,
"Static allocation of the VRFB buffer for video1 device");
module_param(vid2_static_vrfb_alloc, bool, S_IRUGO);
MODULE_PARM_DESC(vid2_static_vrfb_alloc,
"Static allocation of the VRFB buffer for video2 device");
module_param(debug, bool, S_IRUGO);
MODULE_PARM_DESC(debug, "Debug level (0-1)");
/* list of image formats supported by OMAP2 video pipelines */
const static struct v4l2_fmtdesc omap_formats[] = {
{
/* Note: V4L2 defines RGB565 as:
*
* Byte 0 Byte 1
* g2 g1 g0 r4 r3 r2 r1 r0 b4 b3 b2 b1 b0 g5 g4 g3
*
* We interpret RGB565 as:
*
* Byte 0 Byte 1
* g2 g1 g0 b4 b3 b2 b1 b0 r4 r3 r2 r1 r0 g5 g4 g3
*/
.description = "RGB565, le",
.pixelformat = V4L2_PIX_FMT_RGB565,
},
{
/* Note: V4L2 defines RGB32 as: RGB-8-8-8-8 we use
* this for RGB24 unpack mode, the last 8 bits are ignored
* */
.description = "RGB32, le",
.pixelformat = V4L2_PIX_FMT_RGB32,
},
{
/* Note: V4L2 defines RGB24 as: RGB-8-8-8 we use
* this for RGB24 packed mode
*
*/
.description = "RGB24, le",
.pixelformat = V4L2_PIX_FMT_RGB24,
},
{
.description = "YUYV (YUV 4:2:2), packed",
.pixelformat = V4L2_PIX_FMT_YUYV,
},
{
.description = "UYVY, packed",
.pixelformat = V4L2_PIX_FMT_UYVY,
},
};
#define NUM_OUTPUT_FORMATS (ARRAY_SIZE(omap_formats))
/*
* Allocate buffers
*/
static unsigned long omap_vout_alloc_buffer(u32 buf_size, u32 *phys_addr)
{
u32 order, size;
unsigned long virt_addr, addr;
size = PAGE_ALIGN(buf_size);
order = get_order(size);
virt_addr = __get_free_pages(GFP_KERNEL | GFP_DMA, order);
addr = virt_addr;
if (virt_addr) {
while (size > 0) {
SetPageReserved(virt_to_page(addr));
addr += PAGE_SIZE;
size -= PAGE_SIZE;
}
}
*phys_addr = (u32) virt_to_phys((void *) virt_addr);
return virt_addr;
}
/*
* Free buffers
*/
static void omap_vout_free_buffer(unsigned long virtaddr, u32 buf_size)
{
u32 order, size;
unsigned long addr = virtaddr;
size = PAGE_ALIGN(buf_size);
order = get_order(size);
while (size > 0) {
ClearPageReserved(virt_to_page(addr));
addr += PAGE_SIZE;
size -= PAGE_SIZE;
}
free_pages((unsigned long) virtaddr, order);
}
/*
* Function for allocating video buffers
*/
static int omap_vout_allocate_vrfb_buffers(struct omap_vout_device *vout,
unsigned int *count, int startindex)
{
int i, j;
for (i = 0; i < *count; i++) {
if (!vout->smsshado_virt_addr[i]) {
vout->smsshado_virt_addr[i] =
omap_vout_alloc_buffer(vout->smsshado_size,
&vout->smsshado_phy_addr[i]);
}
if (!vout->smsshado_virt_addr[i] && startindex != -1) {
if (V4L2_MEMORY_MMAP == vout->memory && i >= startindex)
break;
}
if (!vout->smsshado_virt_addr[i]) {
for (j = 0; j < i; j++) {
omap_vout_free_buffer(
vout->smsshado_virt_addr[j],
vout->smsshado_size);
vout->smsshado_virt_addr[j] = 0;
vout->smsshado_phy_addr[j] = 0;
}
*count = 0;
return -ENOMEM;
}
memset((void *) vout->smsshado_virt_addr[i], 0,
vout->smsshado_size);
}
return 0;
}
/*
* Try format
*/
static int omap_vout_try_format(struct v4l2_pix_format *pix)
{
int ifmt, bpp = 0;
pix->height = clamp(pix->height, (u32)VID_MIN_HEIGHT,
(u32)VID_MAX_HEIGHT);
pix->width = clamp(pix->width, (u32)VID_MIN_WIDTH, (u32)VID_MAX_WIDTH);
for (ifmt = 0; ifmt < NUM_OUTPUT_FORMATS; ifmt++) {
if (pix->pixelformat == omap_formats[ifmt].pixelformat)
break;
}
if (ifmt == NUM_OUTPUT_FORMATS)
ifmt = 0;
pix->pixelformat = omap_formats[ifmt].pixelformat;
pix->field = V4L2_FIELD_ANY;
pix->priv = 0;
switch (pix->pixelformat) {
case V4L2_PIX_FMT_YUYV:
case V4L2_PIX_FMT_UYVY:
default:
pix->colorspace = V4L2_COLORSPACE_JPEG;
bpp = YUYV_BPP;
break;
case V4L2_PIX_FMT_RGB565:
case V4L2_PIX_FMT_RGB565X:
pix->colorspace = V4L2_COLORSPACE_SRGB;
bpp = RGB565_BPP;
break;
case V4L2_PIX_FMT_RGB24:
pix->colorspace = V4L2_COLORSPACE_SRGB;
bpp = RGB24_BPP;
break;
case V4L2_PIX_FMT_RGB32:
case V4L2_PIX_FMT_BGR32:
pix->colorspace = V4L2_COLORSPACE_SRGB;
bpp = RGB32_BPP;
break;
}
pix->bytesperline = pix->width * bpp;
pix->sizeimage = pix->bytesperline * pix->height;
return bpp;
}
/*
* omap_vout_uservirt_to_phys: This inline function is used to convert user
* space virtual address to physical address.
*/
static u32 omap_vout_uservirt_to_phys(u32 virtp)
{
unsigned long physp = 0;
struct vm_area_struct *vma;
struct mm_struct *mm = current->mm;
vma = find_vma(mm, virtp);
/* For kernel direct-mapped memory, take the easy way */
if (virtp >= PAGE_OFFSET) {
physp = virt_to_phys((void *) virtp);
} else if (vma && (vma->vm_flags & VM_IO) && vma->vm_pgoff) {
/* this will catch, kernel-allocated, mmaped-to-usermode
addresses */
physp = (vma->vm_pgoff << PAGE_SHIFT) + (virtp - vma->vm_start);
} else {
/* otherwise, use get_user_pages() for general userland pages */
int res, nr_pages = 1;
struct page *pages;
down_read(¤t->mm->mmap_sem);
res = get_user_pages(current, current->mm, virtp, nr_pages, 1,
0, &pages, NULL);
up_read(¤t->mm->mmap_sem);
if (res == nr_pages) {
physp = __pa(page_address(&pages[0]) +
(virtp & ~PAGE_MASK));
} else {
printk(KERN_WARNING VOUT_NAME
"get_user_pages failed\n");
return 0;
}
}
return physp;
}
/*
* Wakes up the application once the DMA transfer to VRFB space is completed.
*/
static void omap_vout_vrfb_dma_tx_callback(int lch, u16 ch_status, void *data)
{
struct vid_vrfb_dma *t = (struct vid_vrfb_dma *) data;
t->tx_status = 1;
wake_up_interruptible(&t->wait);
}
/*
* Release the VRFB context once the module exits
*/
static void omap_vout_release_vrfb(struct omap_vout_device *vout)
{
int i;
for (i = 0; i < VRFB_NUM_BUFS; i++)
omap_vrfb_release_ctx(&vout->vrfb_context[i]);
if (vout->vrfb_dma_tx.req_status == DMA_CHAN_ALLOTED) {
vout->vrfb_dma_tx.req_status = DMA_CHAN_NOT_ALLOTED;
omap_free_dma(vout->vrfb_dma_tx.dma_ch);
}
}
/*
* Return true if rotation is 90 or 270
*/
static inline int rotate_90_or_270(const struct omap_vout_device *vout)
{
return (vout->rotation == dss_rotation_90_degree ||
vout->rotation == dss_rotation_270_degree);
}
/*
* Return true if rotation is enabled
*/
static inline int rotation_enabled(const struct omap_vout_device *vout)
{
return vout->rotation || vout->mirror;
}
/*
* Reverse the rotation degree if mirroring is enabled
*/
static inline int calc_rotation(const struct omap_vout_device *vout)
{
if (!vout->mirror)
return vout->rotation;
switch (vout->rotation) {
case dss_rotation_90_degree:
return dss_rotation_270_degree;
case dss_rotation_270_degree:
return dss_rotation_90_degree;
case dss_rotation_180_degree:
return dss_rotation_0_degree;
default:
return dss_rotation_180_degree;
}
}
/*
* Free the V4L2 buffers
*/
static void omap_vout_free_buffers(struct omap_vout_device *vout)
{
int i, numbuffers;
/* Allocate memory for the buffers */
numbuffers = (vout->vid) ? video2_numbuffers : video1_numbuffers;
vout->buffer_size = (vout->vid) ? video2_bufsize : video1_bufsize;
for (i = 0; i < numbuffers; i++) {
omap_vout_free_buffer(vout->buf_virt_addr[i],
vout->buffer_size);
vout->buf_phy_addr[i] = 0;
vout->buf_virt_addr[i] = 0;
}
}
/*
* Free VRFB buffers
*/
static void omap_vout_free_vrfb_buffers(struct omap_vout_device *vout)
{
int j;
for (j = 0; j < VRFB_NUM_BUFS; j++) {
omap_vout_free_buffer(vout->smsshado_virt_addr[j],
vout->smsshado_size);
vout->smsshado_virt_addr[j] = 0;
vout->smsshado_phy_addr[j] = 0;
}
}
/*
* Allocate the buffers for the VRFB space. Data is copied from V4L2
* buffers to the VRFB buffers using the DMA engine.
*/
static int omap_vout_vrfb_buffer_setup(struct omap_vout_device *vout,
unsigned int *count, unsigned int startindex)
{
int i;
bool yuv_mode;
/* Allocate the VRFB buffers only if the buffers are not
* allocated during init time.
*/
if ((rotation_enabled(vout)) && !vout->vrfb_static_allocation)
if (omap_vout_allocate_vrfb_buffers(vout, count, startindex))
return -ENOMEM;
if (vout->dss_mode == OMAP_DSS_COLOR_YUV2 ||
vout->dss_mode == OMAP_DSS_COLOR_UYVY)
yuv_mode = true;
else
yuv_mode = false;
for (i = 0; i < *count; i++)
omap_vrfb_setup(&vout->vrfb_context[i],
vout->smsshado_phy_addr[i], vout->pix.width,
vout->pix.height, vout->bpp, yuv_mode);
return 0;
}
/*
* Convert V4L2 rotation to DSS rotation
* V4L2 understand 0, 90, 180, 270.
* Convert to 0, 1, 2 and 3 respectively for DSS
*/
static int v4l2_rot_to_dss_rot(int v4l2_rotation,
enum dss_rotation *rotation, bool mirror)
{
int ret = 0;
switch (v4l2_rotation) {
case 90:
*rotation = dss_rotation_90_degree;
break;
case 180:
*rotation = dss_rotation_180_degree;
break;
case 270:
*rotation = dss_rotation_270_degree;
break;
case 0:
*rotation = dss_rotation_0_degree;
break;
default:
ret = -EINVAL;
}
return ret;
}
/*
* Calculate the buffer offsets from which the streaming should
* start. This offset calculation is mainly required because of
* the VRFB 32 pixels alignment with rotation.
*/
static int omap_vout_calculate_offset(struct omap_vout_device *vout)
{
struct omap_overlay *ovl;
enum dss_rotation rotation;
struct omapvideo_info *ovid;
bool mirroring = vout->mirror;
struct omap_dss_device *cur_display;
struct v4l2_rect *crop = &vout->crop;
struct v4l2_pix_format *pix = &vout->pix;
int *cropped_offset = &vout->cropped_offset;
int vr_ps = 1, ps = 2, temp_ps = 2;
int offset = 0, ctop = 0, cleft = 0, line_length = 0;
ovid = &vout->vid_info;
ovl = ovid->overlays[0];
/* get the display device attached to the overlay */
if (!ovl->manager || !ovl->manager->device)
return -1;
cur_display = ovl->manager->device;
rotation = calc_rotation(vout);
if (V4L2_PIX_FMT_YUYV == pix->pixelformat ||
V4L2_PIX_FMT_UYVY == pix->pixelformat) {
if (rotation_enabled(vout)) {
/*
* ps - Actual pixel size for YUYV/UYVY for
* VRFB/Mirroring is 4 bytes
* vr_ps - Virtually pixel size for YUYV/UYVY is
* 2 bytes
*/
ps = 4;
vr_ps = 2;
} else {
ps = 2; /* otherwise the pixel size is 2 byte */
}
} else if (V4L2_PIX_FMT_RGB32 == pix->pixelformat) {
ps = 4;
} else if (V4L2_PIX_FMT_RGB24 == pix->pixelformat) {
ps = 3;
}
vout->ps = ps;
vout->vr_ps = vr_ps;
if (rotation_enabled(vout)) {
line_length = MAX_PIXELS_PER_LINE;
ctop = (pix->height - crop->height) - crop->top;
cleft = (pix->width - crop->width) - crop->left;
} else {
line_length = pix->width;
}
vout->line_length = line_length;
switch (rotation) {
case dss_rotation_90_degree:
offset = vout->vrfb_context[0].yoffset *
vout->vrfb_context[0].bytespp;
temp_ps = ps / vr_ps;
if (mirroring == 0) {
*cropped_offset = offset + line_length *
temp_ps * cleft + crop->top * temp_ps;
} else {
*cropped_offset = offset + line_length * temp_ps *
cleft + crop->top * temp_ps + (line_length *
((crop->width / (vr_ps)) - 1) * ps);
}
break;
case dss_rotation_180_degree:
offset = ((MAX_PIXELS_PER_LINE * vout->vrfb_context[0].yoffset *
vout->vrfb_context[0].bytespp) +
(vout->vrfb_context[0].xoffset *
vout->vrfb_context[0].bytespp));
if (mirroring == 0) {
*cropped_offset = offset + (line_length * ps * ctop) +
(cleft / vr_ps) * ps;
} else {
*cropped_offset = offset + (line_length * ps * ctop) +
(cleft / vr_ps) * ps + (line_length *
(crop->height - 1) * ps);
}
break;
case dss_rotation_270_degree:
offset = MAX_PIXELS_PER_LINE * vout->vrfb_context[0].xoffset *
vout->vrfb_context[0].bytespp;
temp_ps = ps / vr_ps;
if (mirroring == 0) {
*cropped_offset = offset + line_length *
temp_ps * crop->left + ctop * ps;
} else {
*cropped_offset = offset + line_length *
temp_ps * crop->left + ctop * ps +
(line_length * ((crop->width / vr_ps) - 1) *
ps);
}
break;
case dss_rotation_0_degree:
if (mirroring == 0) {
*cropped_offset = (line_length * ps) *
crop->top + (crop->left / vr_ps) * ps;
} else {
*cropped_offset = (line_length * ps) *
crop->top + (crop->left / vr_ps) * ps +
(line_length * (crop->height - 1) * ps);
}
break;
default:
*cropped_offset = (line_length * ps * crop->top) /
vr_ps + (crop->left * ps) / vr_ps +
((crop->width / vr_ps) - 1) * ps;
break;
}
v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev, "%s Offset:%x\n",
__func__, *cropped_offset);
return 0;
}
/*
* Convert V4L2 pixel format to DSS pixel format
*/
static int video_mode_to_dss_mode(struct omap_vout_device *vout)
{
struct omap_overlay *ovl;
struct omapvideo_info *ovid;
struct v4l2_pix_format *pix = &vout->pix;
enum omap_color_mode mode;
ovid = &vout->vid_info;
ovl = ovid->overlays[0];
switch (pix->pixelformat) {
case 0:
break;
case V4L2_PIX_FMT_YUYV:
mode = OMAP_DSS_COLOR_YUV2;
break;
case V4L2_PIX_FMT_UYVY:
mode = OMAP_DSS_COLOR_UYVY;
break;
case V4L2_PIX_FMT_RGB565:
mode = OMAP_DSS_COLOR_RGB16;
break;
case V4L2_PIX_FMT_RGB24:
mode = OMAP_DSS_COLOR_RGB24P;
break;
case V4L2_PIX_FMT_RGB32:
mode = (ovl->id == OMAP_DSS_VIDEO1) ?
OMAP_DSS_COLOR_RGB24U : OMAP_DSS_COLOR_ARGB32;
break;
case V4L2_PIX_FMT_BGR32:
mode = OMAP_DSS_COLOR_RGBX32;
break;
default:
mode = -EINVAL;
}
return mode;
}
/*
* Setup the overlay
*/
int omapvid_setup_overlay(struct omap_vout_device *vout,
struct omap_overlay *ovl, int posx, int posy, int outw,
int outh, u32 addr)
{
int ret = 0;
struct omap_overlay_info info;
int cropheight, cropwidth, pixheight, pixwidth;
if ((ovl->caps & OMAP_DSS_OVL_CAP_SCALE) == 0 &&
(outw != vout->pix.width || outh != vout->pix.height)) {
ret = -EINVAL;
goto setup_ovl_err;
}
vout->dss_mode = video_mode_to_dss_mode(vout);
if (vout->dss_mode == -EINVAL) {
ret = -EINVAL;
goto setup_ovl_err;
}
/* Setup the input plane parameters according to
* rotation value selected.
*/
if (rotate_90_or_270(vout)) {
cropheight = vout->crop.width;
cropwidth = vout->crop.height;
pixheight = vout->pix.width;
pixwidth = vout->pix.height;
} else {
cropheight = vout->crop.height;
cropwidth = vout->crop.width;
pixheight = vout->pix.height;
pixwidth = vout->pix.width;
}
ovl->get_overlay_info(ovl, &info);
info.paddr = addr;
info.vaddr = NULL;
info.width = cropwidth;
info.height = cropheight;
info.color_mode = vout->dss_mode;
info.mirror = vout->mirror;
info.pos_x = posx;
info.pos_y = posy;
info.out_width = outw;
info.out_height = outh;
info.global_alpha = vout->win.global_alpha;
if (!rotation_enabled(vout)) {
info.rotation = 0;
info.rotation_type = OMAP_DSS_ROT_DMA;
info.screen_width = pixwidth;
} else {
info.rotation = vout->rotation;
info.rotation_type = OMAP_DSS_ROT_VRFB;
info.screen_width = 2048;
}
v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev,
"%s enable=%d addr=%x width=%d\n height=%d color_mode=%d\n"
"rotation=%d mirror=%d posx=%d posy=%d out_width = %d \n"
"out_height=%d rotation_type=%d screen_width=%d\n",
__func__, info.enabled, info.paddr, info.width, info.height,
info.color_mode, info.rotation, info.mirror, info.pos_x,
info.pos_y, info.out_width, info.out_height, info.rotation_type,
info.screen_width);
ret = ovl->set_overlay_info(ovl, &info);
if (ret)
goto setup_ovl_err;
return 0;
setup_ovl_err:
v4l2_warn(&vout->vid_dev->v4l2_dev, "setup_overlay failed\n");
return ret;
}
/*
* Initialize the overlay structure
*/
int omapvid_init(struct omap_vout_device *vout, u32 addr)
{
int ret = 0, i;
struct v4l2_window *win;
struct omap_overlay *ovl;
int posx, posy, outw, outh, temp;
struct omap_video_timings *timing;
struct omapvideo_info *ovid = &vout->vid_info;
win = &vout->win;
for (i = 0; i < ovid->num_overlays; i++) {
ovl = ovid->overlays[i];
if (!ovl->manager || !ovl->manager->device)
return -EINVAL;
timing = &ovl->manager->device->panel.timings;
outw = win->w.width;
outh = win->w.height;
switch (vout->rotation) {
case dss_rotation_90_degree:
/* Invert the height and width for 90
* and 270 degree rotation
*/
temp = outw;
outw = outh;
outh = temp;
posy = (timing->y_res - win->w.width) - win->w.left;
posx = win->w.top;
break;
case dss_rotation_180_degree:
posx = (timing->x_res - win->w.width) - win->w.left;
posy = (timing->y_res - win->w.height) - win->w.top;
break;
case dss_rotation_270_degree:
temp = outw;
outw = outh;
outh = temp;
posy = win->w.left;
posx = (timing->x_res - win->w.height) - win->w.top;
break;
default:
posx = win->w.left;
posy = win->w.top;
break;
}
ret = omapvid_setup_overlay(vout, ovl, posx, posy,
outw, outh, addr);
if (ret)
goto omapvid_init_err;
}
return 0;
omapvid_init_err:
v4l2_warn(&vout->vid_dev->v4l2_dev, "apply_changes failed\n");
return ret;
}
/*
* Apply the changes set the go bit of DSS
*/
int omapvid_apply_changes(struct omap_vout_device *vout)
{
int i;
struct omap_overlay *ovl;
struct omapvideo_info *ovid = &vout->vid_info;
for (i = 0; i < ovid->num_overlays; i++) {
ovl = ovid->overlays[i];
if (!ovl->manager || !ovl->manager->device)
return -EINVAL;
ovl->manager->apply(ovl->manager);
}
return 0;
}
void omap_vout_isr(void *arg, unsigned int irqstatus)
{
int ret;
u32 addr, fid;
struct omap_overlay *ovl;
struct timeval timevalue;
struct omapvideo_info *ovid;
struct omap_dss_device *cur_display;
struct omap_vout_device *vout = (struct omap_vout_device *)arg;
if (!vout->streaming)
return;
ovid = &vout->vid_info;
ovl = ovid->overlays[0];
/* get the display device attached to the overlay */
if (!ovl->manager || !ovl->manager->device)
return;
cur_display = ovl->manager->device;
spin_lock(&vout->vbq_lock);
do_gettimeofday(&timevalue);
if (cur_display->type == OMAP_DISPLAY_TYPE_DPI) {
if (!(irqstatus & DISPC_IRQ_VSYNC))
goto vout_isr_err;
if (!vout->first_int && (vout->cur_frm != vout->next_frm)) {
vout->cur_frm->ts = timevalue;
vout->cur_frm->state = VIDEOBUF_DONE;
wake_up_interruptible(&vout->cur_frm->done);
vout->cur_frm = vout->next_frm;
}
vout->first_int = 0;
if (list_empty(&vout->dma_queue))
goto vout_isr_err;
vout->next_frm = list_entry(vout->dma_queue.next,
struct videobuf_buffer, queue);
list_del(&vout->next_frm->queue);
vout->next_frm->state = VIDEOBUF_ACTIVE;
addr = (unsigned long) vout->queued_buf_addr[vout->next_frm->i]
+ vout->cropped_offset;
/* First save the configuration in ovelray structure */
ret = omapvid_init(vout, addr);
if (ret)
printk(KERN_ERR VOUT_NAME
"failed to set overlay info\n");
/* Enable the pipeline and set the Go bit */
ret = omapvid_apply_changes(vout);
if (ret)
printk(KERN_ERR VOUT_NAME "failed to change mode\n");
} else {
if (vout->first_int) {
vout->first_int = 0;
goto vout_isr_err;
}
if (irqstatus & DISPC_IRQ_EVSYNC_ODD)
fid = 1;
else if (irqstatus & DISPC_IRQ_EVSYNC_EVEN)
fid = 0;
else
goto vout_isr_err;
vout->field_id ^= 1;
if (fid != vout->field_id) {
if (0 == fid)
vout->field_id = fid;
goto vout_isr_err;
}
if (0 == fid) {
if (vout->cur_frm == vout->next_frm)
goto vout_isr_err;
vout->cur_frm->ts = timevalue;
vout->cur_frm->state = VIDEOBUF_DONE;
wake_up_interruptible(&vout->cur_frm->done);
vout->cur_frm = vout->next_frm;
} else if (1 == fid) {
if (list_empty(&vout->dma_queue) ||
(vout->cur_frm != vout->next_frm))
goto vout_isr_err;
vout->next_frm = list_entry(vout->dma_queue.next,
struct videobuf_buffer, queue);
list_del(&vout->next_frm->queue);
vout->next_frm->state = VIDEOBUF_ACTIVE;
addr = (unsigned long)
vout->queued_buf_addr[vout->next_frm->i] +
vout->cropped_offset;
/* First save the configuration in ovelray structure */
ret = omapvid_init(vout, addr);
if (ret)
printk(KERN_ERR VOUT_NAME
"failed to set overlay info\n");
/* Enable the pipeline and set the Go bit */
ret = omapvid_apply_changes(vout);
if (ret)
printk(KERN_ERR VOUT_NAME
"failed to change mode\n");
}
}
vout_isr_err:
spin_unlock(&vout->vbq_lock);
}
/* Video buffer call backs */
/*
* Buffer setup function is called by videobuf layer when REQBUF ioctl is
* called. This is used to setup buffers and return size and count of
* buffers allocated. After the call to this buffer, videobuf layer will
* setup buffer queue depending on the size and count of buffers
*/
static int omap_vout_buffer_setup(struct videobuf_queue *q, unsigned int *count,
unsigned int *size)
{
int startindex = 0, i, j;
u32 phy_addr = 0, virt_addr = 0;
struct omap_vout_device *vout = q->priv_data;
if (!vout)
return -EINVAL;
if (V4L2_BUF_TYPE_VIDEO_OUTPUT != q->type)
return -EINVAL;
startindex = (vout->vid == OMAP_VIDEO1) ?
video1_numbuffers : video2_numbuffers;
if (V4L2_MEMORY_MMAP == vout->memory && *count < startindex)
*count = startindex;
if ((rotation_enabled(vout)) && *count > VRFB_NUM_BUFS)
*count = VRFB_NUM_BUFS;
/* If rotation is enabled, allocate memory for VRFB space also */
if (rotation_enabled(vout))
if (omap_vout_vrfb_buffer_setup(vout, count, startindex))
return -ENOMEM;
if (V4L2_MEMORY_MMAP != vout->memory)
return 0;
/* Now allocated the V4L2 buffers */
*size = PAGE_ALIGN(vout->pix.width * vout->pix.height * vout->bpp);
startindex = (vout->vid == OMAP_VIDEO1) ?
video1_numbuffers : video2_numbuffers;
/* Check the size of the buffer */
if (*size > vout->buffer_size) {
v4l2_err(&vout->vid_dev->v4l2_dev,
"buffer allocation mismatch [%u] [%u]\n",
*size, vout->buffer_size);
return -ENOMEM;
}
for (i = startindex; i < *count; i++) {
vout->buffer_size = *size;
virt_addr = omap_vout_alloc_buffer(vout->buffer_size,
&phy_addr);
if (!virt_addr) {
if (!rotation_enabled(vout))
break;
/* Free the VRFB buffers if no space for V4L2 buffers */
for (j = i; j < *count; j++) {
omap_vout_free_buffer(
vout->smsshado_virt_addr[j],
vout->smsshado_size);
vout->smsshado_virt_addr[j] = 0;
vout->smsshado_phy_addr[j] = 0;
}
}
vout->buf_virt_addr[i] = virt_addr;
vout->buf_phy_addr[i] = phy_addr;
}
*count = vout->buffer_allocated = i;
return 0;
}
/*
* Free the V4L2 buffers additionally allocated than default
* number of buffers and free all the VRFB buffers
*/
static void omap_vout_free_allbuffers(struct omap_vout_device *vout)
{
int num_buffers = 0, i;
num_buffers = (vout->vid == OMAP_VIDEO1) ?
video1_numbuffers : video2_numbuffers;
for (i = num_buffers; i < vout->buffer_allocated; i++) {
if (vout->buf_virt_addr[i])
omap_vout_free_buffer(vout->buf_virt_addr[i],
vout->buffer_size);
vout->buf_virt_addr[i] = 0;
vout->buf_phy_addr[i] = 0;
}
/* Free the VRFB buffers only if they are allocated
* during reqbufs. Don't free if init time allocated
*/
if (!vout->vrfb_static_allocation) {
for (i = 0; i < VRFB_NUM_BUFS; i++) {
if (vout->smsshado_virt_addr[i]) {
omap_vout_free_buffer(
vout->smsshado_virt_addr[i],
vout->smsshado_size);
vout->smsshado_virt_addr[i] = 0;
vout->smsshado_phy_addr[i] = 0;
}
}
}
vout->buffer_allocated = num_buffers;
}
/*
* This function will be called when VIDIOC_QBUF ioctl is called.
* It prepare buffers before give out for the display. This function
* converts user space virtual address into physical address if userptr memory
* exchange mechanism is used. If rotation is enabled, it copies entire
* buffer into VRFB memory space before giving it to the DSS.
*/
static int omap_vout_buffer_prepare(struct videobuf_queue *q,
struct videobuf_buffer *vb,
enum v4l2_field field)
{
dma_addr_t dmabuf;
struct vid_vrfb_dma *tx;
enum dss_rotation rotation;
struct omap_vout_device *vout = q->priv_data;
u32 dest_frame_index = 0, src_element_index = 0;
u32 dest_element_index = 0, src_frame_index = 0;
u32 elem_count = 0, frame_count = 0, pixsize = 2;
if (VIDEOBUF_NEEDS_INIT == vb->state) {
vb->width = vout->pix.width;
vb->height = vout->pix.height;
vb->size = vb->width * vb->height * vout->bpp;
vb->field = field;
}
vb->state = VIDEOBUF_PREPARED;
/* if user pointer memory mechanism is used, get the physical
* address of the buffer
*/
if (V4L2_MEMORY_USERPTR == vb->memory) {
if (0 == vb->baddr)
return -EINVAL;
/* Physical address */
vout->queued_buf_addr[vb->i] = (u8 *)
omap_vout_uservirt_to_phys(vb->baddr);
} else {
vout->queued_buf_addr[vb->i] = (u8 *)vout->buf_phy_addr[vb->i];
}
if (!rotation_enabled(vout))
return 0;
dmabuf = vout->buf_phy_addr[vb->i];
/* If rotation is enabled, copy input buffer into VRFB
* memory space using DMA. We are copying input buffer
* into VRFB memory space of desired angle and DSS will
* read image VRFB memory for 0 degree angle
*/
pixsize = vout->bpp * vout->vrfb_bpp;
/*
* DMA transfer in double index mode
*/
/* Frame index */
dest_frame_index = ((MAX_PIXELS_PER_LINE * pixsize) -
(vout->pix.width * vout->bpp)) + 1;
/* Source and destination parameters */
src_element_index = 0;
src_frame_index = 0;
dest_element_index = 1;
/* Number of elements per frame */
elem_count = vout->pix.width * vout->bpp;
frame_count = vout->pix.height;
tx = &vout->vrfb_dma_tx;
tx->tx_status = 0;
omap_set_dma_transfer_params(tx->dma_ch, OMAP_DMA_DATA_TYPE_S32,
(elem_count / 4), frame_count, OMAP_DMA_SYNC_ELEMENT,
tx->dev_id, 0x0);
/* src_port required only for OMAP1 */
omap_set_dma_src_params(tx->dma_ch, 0, OMAP_DMA_AMODE_POST_INC,
dmabuf, src_element_index, src_frame_index);
/*set dma source burst mode for VRFB */
omap_set_dma_src_burst_mode(tx->dma_ch, OMAP_DMA_DATA_BURST_16);
rotation = calc_rotation(vout);
/* dest_port required only for OMAP1 */
omap_set_dma_dest_params(tx->dma_ch, 0, OMAP_DMA_AMODE_DOUBLE_IDX,
vout->vrfb_context[vb->i].paddr[0], dest_element_index,
dest_frame_index);
/*set dma dest burst mode for VRFB */
omap_set_dma_dest_burst_mode(tx->dma_ch, OMAP_DMA_DATA_BURST_16);
omap_dma_set_global_params(DMA_DEFAULT_ARB_RATE, 0x20, 0);
omap_start_dma(tx->dma_ch);
interruptible_sleep_on_timeout(&tx->wait, VRFB_TX_TIMEOUT);
if (tx->tx_status == 0) {
omap_stop_dma(tx->dma_ch);
return -EINVAL;
}
/* Store buffers physical address into an array. Addresses
* from this array will be used to configure DSS */
vout->queued_buf_addr[vb->i] = (u8 *)
vout->vrfb_context[vb->i].paddr[rotation];
return 0;
}
/*
* Buffer queue function will be called from the videobuf layer when _QBUF
* ioctl is called. It is used to enqueue buffer, which is ready to be
* displayed.
*/
static void omap_vout_buffer_queue(struct videobuf_queue *q,
struct videobuf_buffer *vb)
{
struct omap_vout_device *vout = q->priv_data;
/* Driver is also maintainig a queue. So enqueue buffer in the driver
* queue */
list_add_tail(&vb->queue, &vout->dma_queue);
vb->state = VIDEOBUF_QUEUED;
}
/*
* Buffer release function is called from videobuf layer to release buffer
* which are already allocated
*/
static void omap_vout_buffer_release(struct videobuf_queue *q,
struct videobuf_buffer *vb)
{
struct omap_vout_device *vout = q->priv_data;
vb->state = VIDEOBUF_NEEDS_INIT;
if (V4L2_MEMORY_MMAP != vout->memory)
return;
}
/*
* File operations
*/
static void omap_vout_vm_open(struct vm_area_struct *vma)
{
struct omap_vout_device *vout = vma->vm_private_data;
v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev,
"vm_open [vma=%08lx-%08lx]\n", vma->vm_start, vma->vm_end);
vout->mmap_count++;
}
static void omap_vout_vm_close(struct vm_area_struct *vma)
{
struct omap_vout_device *vout = vma->vm_private_data;
v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev,
"vm_close [vma=%08lx-%08lx]\n", vma->vm_start, vma->vm_end);
vout->mmap_count--;
}
static struct vm_operations_struct omap_vout_vm_ops = {
.open = omap_vout_vm_open,
.close = omap_vout_vm_close,
};
static int omap_vout_mmap(struct file *file, struct vm_area_struct *vma)
{
int i;
void *pos;
unsigned long start = vma->vm_start;
unsigned long size = (vma->vm_end - vma->vm_start);
struct omap_vout_device *vout = file->private_data;
struct videobuf_queue *q = &vout->vbq;
v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev,
" %s pgoff=0x%lx, start=0x%lx, end=0x%lx\n", __func__,
vma->vm_pgoff, vma->vm_start, vma->vm_end);
/* look for the buffer to map */
for (i = 0; i < VIDEO_MAX_FRAME; i++) {
if (NULL == q->bufs[i])
continue;
if (V4L2_MEMORY_MMAP != q->bufs[i]->memory)
continue;
if (q->bufs[i]->boff == (vma->vm_pgoff << PAGE_SHIFT))
break;
}
if (VIDEO_MAX_FRAME == i) {
v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev,
"offset invalid [offset=0x%lx]\n",
(vma->vm_pgoff << PAGE_SHIFT));
return -EINVAL;
}
/* Check the size of the buffer */
if (size > vout->buffer_size) {
v4l2_err(&vout->vid_dev->v4l2_dev,
"insufficient memory [%lu] [%u]\n",
size, vout->buffer_size);
return -ENOMEM;
}
q->bufs[i]->baddr = vma->vm_start;
vma->vm_flags |= VM_RESERVED;
vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
vma->vm_ops = &omap_vout_vm_ops;
vma->vm_private_data = (void *) vout;
pos = (void *)vout->buf_virt_addr[i];
vma->vm_pgoff = virt_to_phys((void *)pos) >> PAGE_SHIFT;
while (size > 0) {
unsigned long pfn;
pfn = virt_to_phys((void *) pos) >> PAGE_SHIFT;
if (remap_pfn_range(vma, start, pfn, PAGE_SIZE, PAGE_SHARED))
return -EAGAIN;
start += PAGE_SIZE;
pos += PAGE_SIZE;
size -= PAGE_SIZE;
}
vout->mmap_count++;
v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev, "Exiting %s\n", __func__);
return 0;
}
static int omap_vout_release(struct file *file)
{
unsigned int ret, i;
struct videobuf_queue *q;
struct omapvideo_info *ovid;
struct omap_vout_device *vout = file->private_data;
v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev, "Entering %s\n", __func__);
ovid = &vout->vid_info;
if (!vout)
return 0;
q = &vout->vbq;
/* Disable all the overlay managers connected with this interface */
for (i = 0; i < ovid->num_overlays; i++) {
struct omap_overlay *ovl = ovid->overlays[i];
if (ovl->manager && ovl->manager->device) {
struct omap_overlay_info info;
ovl->get_overlay_info(ovl, &info);
info.enabled = 0;
ovl->set_overlay_info(ovl, &info);
}
}
/* Turn off the pipeline */
ret = omapvid_apply_changes(vout);
if (ret)
v4l2_warn(&vout->vid_dev->v4l2_dev,
"Unable to apply changes\n");
/* Free all buffers */
omap_vout_free_allbuffers(vout);
videobuf_mmap_free(q);
/* Even if apply changes fails we should continue
freeing allocated memory */
if (vout->streaming) {
u32 mask = 0;
mask = DISPC_IRQ_VSYNC | DISPC_IRQ_EVSYNC_EVEN |
DISPC_IRQ_EVSYNC_ODD;
omap_dispc_unregister_isr(omap_vout_isr, vout, mask);
vout->streaming = 0;
videobuf_streamoff(q);
videobuf_queue_cancel(q);
}
if (vout->mmap_count != 0)
vout->mmap_count = 0;
vout->opened -= 1;
file->private_data = NULL;
if (vout->buffer_allocated)
videobuf_mmap_free(q);
v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev, "Exiting %s\n", __func__);
return ret;
}
static int omap_vout_open(struct file *file)
{
struct videobuf_queue *q;
struct omap_vout_device *vout = NULL;
vout = video_drvdata(file);
v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev, "Entering %s\n", __func__);
if (vout == NULL)
return -ENODEV;
/* for now, we only support single open */
if (vout->opened)
return -EBUSY;
vout->opened += 1;
file->private_data = vout;
vout->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
q = &vout->vbq;
video_vbq_ops.buf_setup = omap_vout_buffer_setup;
video_vbq_ops.buf_prepare = omap_vout_buffer_prepare;
video_vbq_ops.buf_release = omap_vout_buffer_release;
video_vbq_ops.buf_queue = omap_vout_buffer_queue;
spin_lock_init(&vout->vbq_lock);
videobuf_queue_dma_contig_init(q, &video_vbq_ops, q->dev,
&vout->vbq_lock, vout->type, V4L2_FIELD_NONE,
sizeof(struct videobuf_buffer), vout, NULL);
v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev, "Exiting %s\n", __func__);
return 0;
}
/*
* V4L2 ioctls
*/
static int vidioc_querycap(struct file *file, void *fh,
struct v4l2_capability *cap)
{
struct omap_vout_device *vout = fh;
strlcpy(cap->driver, VOUT_NAME, sizeof(cap->driver));
strlcpy(cap->card, vout->vfd->name, sizeof(cap->card));
cap->bus_info[0] = '\0';
cap->capabilities = V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_OUTPUT;
return 0;
}
static int vidioc_enum_fmt_vid_out(struct file *file, void *fh,
struct v4l2_fmtdesc *fmt)
{
int index = fmt->index;
enum v4l2_buf_type type = fmt->type;
fmt->index = index;
fmt->type = type;
if (index >= NUM_OUTPUT_FORMATS)
return -EINVAL;
fmt->flags = omap_formats[index].flags;
strlcpy(fmt->description, omap_formats[index].description,
sizeof(fmt->description));
fmt->pixelformat = omap_formats[index].pixelformat;
return 0;
}
static int vidioc_g_fmt_vid_out(struct file *file, void *fh,
struct v4l2_format *f)
{
struct omap_vout_device *vout = fh;
f->fmt.pix = vout->pix;
return 0;
}
static int vidioc_try_fmt_vid_out(struct file *file, void *fh,
struct v4l2_format *f)
{
struct omap_overlay *ovl;
struct omapvideo_info *ovid;
struct omap_video_timings *timing;
struct omap_vout_device *vout = fh;
ovid = &vout->vid_info;
ovl = ovid->overlays[0];
if (!ovl->manager || !ovl->manager->device)
return -EINVAL;
/* get the display device attached to the overlay */
timing = &ovl->manager->device->panel.timings;
vout->fbuf.fmt.height = timing->y_res;
vout->fbuf.fmt.width = timing->x_res;
omap_vout_try_format(&f->fmt.pix);
return 0;
}
static int vidioc_s_fmt_vid_out(struct file *file, void *fh,
struct v4l2_format *f)
{
int ret, bpp;
struct omap_overlay *ovl;
struct omapvideo_info *ovid;
struct omap_video_timings *timing;
struct omap_vout_device *vout = fh;
if (vout->streaming)
return -EBUSY;
mutex_lock(&vout->lock);
ovid = &vout->vid_info;
ovl = ovid->overlays[0];
/* get the display device attached to the overlay */
if (!ovl->manager || !ovl->manager->device) {
ret = -EINVAL;
goto s_fmt_vid_out_exit;
}
timing = &ovl->manager->device->panel.timings;
/* We dont support RGB24-packed mode if vrfb rotation
* is enabled*/
if ((rotation_enabled(vout)) &&
f->fmt.pix.pixelformat == V4L2_PIX_FMT_RGB24) {
ret = -EINVAL;
goto s_fmt_vid_out_exit;
}
/* get the framebuffer parameters */
if (rotate_90_or_270(vout)) {
vout->fbuf.fmt.height = timing->x_res;
vout->fbuf.fmt.width = timing->y_res;
} else {
vout->fbuf.fmt.height = timing->y_res;
vout->fbuf.fmt.width = timing->x_res;
}
/* change to samller size is OK */
bpp = omap_vout_try_format(&f->fmt.pix);
f->fmt.pix.sizeimage = f->fmt.pix.width * f->fmt.pix.height * bpp;
/* try & set the new output format */
vout->bpp = bpp;
vout->pix = f->fmt.pix;
vout->vrfb_bpp = 1;
/* If YUYV then vrfb bpp is 2, for others its 1 */
if (V4L2_PIX_FMT_YUYV == vout->pix.pixelformat ||
V4L2_PIX_FMT_UYVY == vout->pix.pixelformat)
vout->vrfb_bpp = 2;
/* set default crop and win */
omap_vout_new_format(&vout->pix, &vout->fbuf, &vout->crop, &vout->win);
/* Save the changes in the overlay strcuture */
ret = omapvid_init(vout, 0);
if (ret) {
v4l2_err(&vout->vid_dev->v4l2_dev, "failed to change mode\n");
goto s_fmt_vid_out_exit;
}
ret = 0;
s_fmt_vid_out_exit:
mutex_unlock(&vout->lock);
return ret;
}
static int vidioc_try_fmt_vid_overlay(struct file *file, void *fh,
struct v4l2_format *f)
{
int ret = 0;
struct omap_vout_device *vout = fh;
struct v4l2_window *win = &f->fmt.win;
ret = omap_vout_try_window(&vout->fbuf, win);
if (!ret) {
if (vout->vid == OMAP_VIDEO1)
win->global_alpha = 255;
else
win->global_alpha = f->fmt.win.global_alpha;
}
return ret;
}
static int vidioc_s_fmt_vid_overlay(struct file *file, void *fh,
struct v4l2_format *f)
{
int ret = 0;
struct omap_overlay *ovl;
struct omapvideo_info *ovid;
struct omap_vout_device *vout = fh;
struct v4l2_window *win = &f->fmt.win;
mutex_lock(&vout->lock);
ovid = &vout->vid_info;
ovl = ovid->overlays[0];
ret = omap_vout_new_window(&vout->crop, &vout->win, &vout->fbuf, win);
if (!ret) {
/* Video1 plane does not support global alpha */
if (ovl->id == OMAP_DSS_VIDEO1)
vout->win.global_alpha = 255;
else
vout->win.global_alpha = f->fmt.win.global_alpha;
vout->win.chromakey = f->fmt.win.chromakey;
}
mutex_unlock(&vout->lock);
return ret;
}
static int vidioc_enum_fmt_vid_overlay(struct file *file, void *fh,
struct v4l2_fmtdesc *fmt)
{
int index = fmt->index;
enum v4l2_buf_type type = fmt->type;
fmt->index = index;
fmt->type = type;
if (index >= NUM_OUTPUT_FORMATS)
return -EINVAL;
fmt->flags = omap_formats[index].flags;
strlcpy(fmt->description, omap_formats[index].description,
sizeof(fmt->description));
fmt->pixelformat = omap_formats[index].pixelformat;
return 0;
}
static int vidioc_g_fmt_vid_overlay(struct file *file, void *fh,
struct v4l2_format *f)
{
u32 key_value = 0;
struct omap_overlay *ovl;
struct omapvideo_info *ovid;
struct omap_vout_device *vout = fh;
struct omap_overlay_manager_info info;
struct v4l2_window *win = &f->fmt.win;
ovid = &vout->vid_info;
ovl = ovid->overlays[0];
win->w = vout->win.w;
win->field = vout->win.field;
win->global_alpha = vout->win.global_alpha;
if (ovl->manager && ovl->manager->get_manager_info) {
ovl->manager->get_manager_info(ovl->manager, &info);
key_value = info.trans_key;
}
win->chromakey = key_value;
return 0;
}
static int vidioc_cropcap(struct file *file, void *fh,
struct v4l2_cropcap *cropcap)
{
struct omap_vout_device *vout = fh;
struct v4l2_pix_format *pix = &vout->pix;
if (cropcap->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
return -EINVAL;
/* Width and height are always even */
cropcap->bounds.width = pix->width & ~1;
cropcap->bounds.height = pix->height & ~1;
omap_vout_default_crop(&vout->pix, &vout->fbuf, &cropcap->defrect);
cropcap->pixelaspect.numerator = 1;
cropcap->pixelaspect.denominator = 1;
return 0;
}
static int vidioc_g_crop(struct file *file, void *fh, struct v4l2_crop *crop)
{
struct omap_vout_device *vout = fh;
if (crop->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
return -EINVAL;
crop->c = vout->crop;
return 0;
}
static int vidioc_s_crop(struct file *file, void *fh, struct v4l2_crop *crop)
{
int ret = -EINVAL;
struct omap_vout_device *vout = fh;
struct omapvideo_info *ovid;
struct omap_overlay *ovl;
struct omap_video_timings *timing;
if (vout->streaming)
return -EBUSY;
mutex_lock(&vout->lock);
ovid = &vout->vid_info;
ovl = ovid->overlays[0];
if (!ovl->manager || !ovl->manager->device) {
ret = -EINVAL;
goto s_crop_err;
}
/* get the display device attached to the overlay */
timing = &ovl->manager->device->panel.timings;
if (rotate_90_or_270(vout)) {
vout->fbuf.fmt.height = timing->x_res;
vout->fbuf.fmt.width = timing->y_res;
} else {
vout->fbuf.fmt.height = timing->y_res;
vout->fbuf.fmt.width = timing->x_res;
}
if (crop->type == V4L2_BUF_TYPE_VIDEO_OUTPUT)
ret = omap_vout_new_crop(&vout->pix, &vout->crop, &vout->win,
&vout->fbuf, &crop->c);
s_crop_err:
mutex_unlock(&vout->lock);
return ret;
}
static int vidioc_queryctrl(struct file *file, void *fh,
struct v4l2_queryctrl *ctrl)
{
int ret = 0;
switch (ctrl->id) {
case V4L2_CID_ROTATE:
ret = v4l2_ctrl_query_fill(ctrl, 0, 270, 90, 0);
break;
case V4L2_CID_BG_COLOR:
ret = v4l2_ctrl_query_fill(ctrl, 0, 0xFFFFFF, 1, 0);
break;
case V4L2_CID_VFLIP:
ret = v4l2_ctrl_query_fill(ctrl, 0, 1, 1, 0);
break;
default:
ctrl->name[0] = '\0';
ret = -EINVAL;
}
return ret;
}
static int vidioc_g_ctrl(struct file *file, void *fh, struct v4l2_control *ctrl)
{
int ret = 0;
struct omap_vout_device *vout = fh;
switch (ctrl->id) {
case V4L2_CID_ROTATE:
ctrl->value = vout->control[0].value;
break;
case V4L2_CID_BG_COLOR:
{
struct omap_overlay_manager_info info;
struct omap_overlay *ovl;
ovl = vout->vid_info.overlays[0];
if (!ovl->manager || !ovl->manager->get_manager_info) {
ret = -EINVAL;
break;
}
ovl->manager->get_manager_info(ovl->manager, &info);
ctrl->value = info.default_color;
break;
}
case V4L2_CID_VFLIP:
ctrl->value = vout->control[2].value;
break;
default:
ret = -EINVAL;
}
return ret;
}
static int vidioc_s_ctrl(struct file *file, void *fh, struct v4l2_control *a)
{
int ret = 0;
struct omap_vout_device *vout = fh;
switch (a->id) {
case V4L2_CID_ROTATE:
{
int rotation = a->value;
mutex_lock(&vout->lock);
if (rotation && vout->pix.pixelformat == V4L2_PIX_FMT_RGB24) {
mutex_unlock(&vout->lock);
ret = -EINVAL;
break;
}
if (v4l2_rot_to_dss_rot(rotation, &vout->rotation,
vout->mirror)) {
mutex_unlock(&vout->lock);
ret = -EINVAL;
break;
}
vout->control[0].value = rotation;
mutex_unlock(&vout->lock);
break;
}
case V4L2_CID_BG_COLOR:
{
struct omap_overlay *ovl;
unsigned int color = a->value;
struct omap_overlay_manager_info info;
ovl = vout->vid_info.overlays[0];
mutex_lock(&vout->lock);
if (!ovl->manager || !ovl->manager->get_manager_info) {
mutex_unlock(&vout->lock);
ret = -EINVAL;
break;
}
ovl->manager->get_manager_info(ovl->manager, &info);
info.default_color = color;
if (ovl->manager->set_manager_info(ovl->manager, &info)) {
mutex_unlock(&vout->lock);
ret = -EINVAL;
break;
}
vout->control[1].value = color;
mutex_unlock(&vout->lock);
break;
}
case V4L2_CID_VFLIP:
{
struct omap_overlay *ovl;
struct omapvideo_info *ovid;
unsigned int mirror = a->value;
ovid = &vout->vid_info;
ovl = ovid->overlays[0];
mutex_lock(&vout->lock);
if (mirror && vout->pix.pixelformat == V4L2_PIX_FMT_RGB24) {
mutex_unlock(&vout->lock);
ret = -EINVAL;
break;
}
vout->mirror = mirror;
vout->control[2].value = mirror;
mutex_unlock(&vout->lock);
break;
}
default:
ret = -EINVAL;
}
return ret;
}
static int vidioc_reqbufs(struct file *file, void *fh,
struct v4l2_requestbuffers *req)
{
int ret = 0;
unsigned int i, num_buffers = 0;
struct omap_vout_device *vout = fh;
struct videobuf_queue *q = &vout->vbq;
if ((req->type != V4L2_BUF_TYPE_VIDEO_OUTPUT) || (req->count < 0))
return -EINVAL;
/* if memory is not mmp or userptr
return error */
if ((V4L2_MEMORY_MMAP != req->memory) &&
(V4L2_MEMORY_USERPTR != req->memory))
return -EINVAL;
mutex_lock(&vout->lock);
/* Cannot be requested when streaming is on */
if (vout->streaming) {
ret = -EBUSY;
goto reqbuf_err;
}
/* If buffers are already allocated free them */
if (q->bufs[0] && (V4L2_MEMORY_MMAP == q->bufs[0]->memory)) {
if (vout->mmap_count) {
ret = -EBUSY;
goto reqbuf_err;
}
num_buffers = (vout->vid == OMAP_VIDEO1) ?
video1_numbuffers : video2_numbuffers;
for (i = num_buffers; i < vout->buffer_allocated; i++) {
omap_vout_free_buffer(vout->buf_virt_addr[i],
vout->buffer_size);
vout->buf_virt_addr[i] = 0;
vout->buf_phy_addr[i] = 0;
}
vout->buffer_allocated = num_buffers;
videobuf_mmap_free(q);
} else if (q->bufs[0] && (V4L2_MEMORY_USERPTR == q->bufs[0]->memory)) {
if (vout->buffer_allocated) {
videobuf_mmap_free(q);
for (i = 0; i < vout->buffer_allocated; i++) {
kfree(q->bufs[i]);
q->bufs[i] = NULL;
}
vout->buffer_allocated = 0;
}
}
/*store the memory type in data structure */
vout->memory = req->memory;
INIT_LIST_HEAD(&vout->dma_queue);
/* call videobuf_reqbufs api */
ret = videobuf_reqbufs(q, req);
if (ret < 0)
goto reqbuf_err;
vout->buffer_allocated = req->count;
reqbuf_err:
mutex_unlock(&vout->lock);
return ret;
}
static int vidioc_querybuf(struct file *file, void *fh,
struct v4l2_buffer *b)
{
struct omap_vout_device *vout = fh;
return videobuf_querybuf(&vout->vbq, b);
}
static int vidioc_qbuf(struct file *file, void *fh,
struct v4l2_buffer *buffer)
{
struct omap_vout_device *vout = fh;
struct videobuf_queue *q = &vout->vbq;
if ((V4L2_BUF_TYPE_VIDEO_OUTPUT != buffer->type) ||
(buffer->index >= vout->buffer_allocated) ||
(q->bufs[buffer->index]->memory != buffer->memory)) {
return -EINVAL;
}
if (V4L2_MEMORY_USERPTR == buffer->memory) {
if ((buffer->length < vout->pix.sizeimage) ||
(0 == buffer->m.userptr)) {
return -EINVAL;
}
}
if ((rotation_enabled(vout)) &&
vout->vrfb_dma_tx.req_status == DMA_CHAN_NOT_ALLOTED) {
v4l2_warn(&vout->vid_dev->v4l2_dev,
"DMA Channel not allocated for Rotation\n");
return -EINVAL;
}
return videobuf_qbuf(q, buffer);
}
static int vidioc_dqbuf(struct file *file, void *fh, struct v4l2_buffer *b)
{
struct omap_vout_device *vout = fh;
struct videobuf_queue *q = &vout->vbq;
if (!vout->streaming)
return -EINVAL;
if (file->f_flags & O_NONBLOCK)
/* Call videobuf_dqbuf for non blocking mode */
return videobuf_dqbuf(q, (struct v4l2_buffer *)b, 1);
else
/* Call videobuf_dqbuf for blocking mode */
return videobuf_dqbuf(q, (struct v4l2_buffer *)b, 0);
}
static int vidioc_streamon(struct file *file, void *fh, enum v4l2_buf_type i)
{
int ret = 0, j;
u32 addr = 0, mask = 0;
struct omap_vout_device *vout = fh;
struct videobuf_queue *q = &vout->vbq;
struct omapvideo_info *ovid = &vout->vid_info;
mutex_lock(&vout->lock);
if (vout->streaming) {
ret = -EBUSY;
goto streamon_err;
}
ret = videobuf_streamon(q);
if (ret)
goto streamon_err;
if (list_empty(&vout->dma_queue)) {
ret = -EIO;
goto streamon_err1;
}
/* Get the next frame from the buffer queue */
vout->next_frm = vout->cur_frm = list_entry(vout->dma_queue.next,
struct videobuf_buffer, queue);
/* Remove buffer from the buffer queue */
list_del(&vout->cur_frm->queue);
/* Mark state of the current frame to active */
vout->cur_frm->state = VIDEOBUF_ACTIVE;
/* Initialize field_id and started member */
vout->field_id = 0;
/* set flag here. Next QBUF will start DMA */
vout->streaming = 1;
vout->first_int = 1;
if (omap_vout_calculate_offset(vout)) {
ret = -EINVAL;
goto streamon_err1;
}
addr = (unsigned long) vout->queued_buf_addr[vout->cur_frm->i]
+ vout->cropped_offset;
mask = DISPC_IRQ_VSYNC | DISPC_IRQ_EVSYNC_EVEN | DISPC_IRQ_EVSYNC_ODD;
omap_dispc_register_isr(omap_vout_isr, vout, mask);
for (j = 0; j < ovid->num_overlays; j++) {
struct omap_overlay *ovl = ovid->overlays[j];
if (ovl->manager && ovl->manager->device) {
struct omap_overlay_info info;
ovl->get_overlay_info(ovl, &info);
info.enabled = 1;
info.paddr = addr;
if (ovl->set_overlay_info(ovl, &info)) {
ret = -EINVAL;
goto streamon_err1;
}
}
}
/* First save the configuration in ovelray structure */
ret = omapvid_init(vout, addr);
if (ret)
v4l2_err(&vout->vid_dev->v4l2_dev,
"failed to set overlay info\n");
/* Enable the pipeline and set the Go bit */
ret = omapvid_apply_changes(vout);
if (ret)
v4l2_err(&vout->vid_dev->v4l2_dev, "failed to change mode\n");
ret = 0;
streamon_err1:
if (ret)
ret = videobuf_streamoff(q);
streamon_err:
mutex_unlock(&vout->lock);
return ret;
}
static int vidioc_streamoff(struct file *file, void *fh, enum v4l2_buf_type i)
{
u32 mask = 0;
int ret = 0, j;
struct omap_vout_device *vout = fh;
struct omapvideo_info *ovid = &vout->vid_info;
if (!vout->streaming)
return -EINVAL;
vout->streaming = 0;
mask = DISPC_IRQ_VSYNC | DISPC_IRQ_EVSYNC_EVEN | DISPC_IRQ_EVSYNC_ODD;
omap_dispc_unregister_isr(omap_vout_isr, vout, mask);
for (j = 0; j < ovid->num_overlays; j++) {
struct omap_overlay *ovl = ovid->overlays[j];
if (ovl->manager && ovl->manager->device) {
struct omap_overlay_info info;
ovl->get_overlay_info(ovl, &info);
info.enabled = 0;
ret = ovl->set_overlay_info(ovl, &info);
if (ret)
v4l2_err(&vout->vid_dev->v4l2_dev,
"failed to update overlay info in streamoff\n");
}
}
/* Turn of the pipeline */
ret = omapvid_apply_changes(vout);
if (ret)
v4l2_err(&vout->vid_dev->v4l2_dev, "failed to change mode in"
" streamoff\n");
INIT_LIST_HEAD(&vout->dma_queue);
ret = videobuf_streamoff(&vout->vbq);
return ret;
}
static int vidioc_s_fbuf(struct file *file, void *fh,
struct v4l2_framebuffer *a)
{
int enable = 0;
struct omap_overlay *ovl;
struct omapvideo_info *ovid;
struct omap_vout_device *vout = fh;
struct omap_overlay_manager_info info;
enum omap_dss_trans_key_type key_type = OMAP_DSS_COLOR_KEY_GFX_DST;
ovid = &vout->vid_info;
ovl = ovid->overlays[0];
/* OMAP DSS doesn't support Source and Destination color
key together */
if ((a->flags & V4L2_FBUF_FLAG_SRC_CHROMAKEY) &&
(a->flags & V4L2_FBUF_FLAG_CHROMAKEY))
return -EINVAL;
/* OMAP DSS Doesn't support the Destination color key
and alpha blending together */
if ((a->flags & V4L2_FBUF_FLAG_CHROMAKEY) &&
(a->flags & V4L2_FBUF_FLAG_LOCAL_ALPHA))
return -EINVAL;
if ((a->flags & V4L2_FBUF_FLAG_SRC_CHROMAKEY)) {
vout->fbuf.flags |= V4L2_FBUF_FLAG_SRC_CHROMAKEY;
key_type = OMAP_DSS_COLOR_KEY_VID_SRC;
} else
vout->fbuf.flags &= ~V4L2_FBUF_FLAG_SRC_CHROMAKEY;
if ((a->flags & V4L2_FBUF_FLAG_CHROMAKEY)) {
vout->fbuf.flags |= V4L2_FBUF_FLAG_CHROMAKEY;
key_type = OMAP_DSS_COLOR_KEY_GFX_DST;
} else
vout->fbuf.flags &= ~V4L2_FBUF_FLAG_CHROMAKEY;
if (a->flags & (V4L2_FBUF_FLAG_CHROMAKEY |
V4L2_FBUF_FLAG_SRC_CHROMAKEY))
enable = 1;
else
enable = 0;
if (ovl->manager && ovl->manager->get_manager_info &&
ovl->manager->set_manager_info) {
ovl->manager->get_manager_info(ovl->manager, &info);
info.trans_enabled = enable;
info.trans_key_type = key_type;
info.trans_key = vout->win.chromakey;
if (ovl->manager->set_manager_info(ovl->manager, &info))
return -EINVAL;
}
if (a->flags & V4L2_FBUF_FLAG_LOCAL_ALPHA) {
vout->fbuf.flags |= V4L2_FBUF_FLAG_LOCAL_ALPHA;
enable = 1;
} else {
vout->fbuf.flags &= ~V4L2_FBUF_FLAG_LOCAL_ALPHA;
enable = 0;
}
if (ovl->manager && ovl->manager->get_manager_info &&
ovl->manager->set_manager_info) {
ovl->manager->get_manager_info(ovl->manager, &info);
info.alpha_enabled = enable;
if (ovl->manager->set_manager_info(ovl->manager, &info))
return -EINVAL;
}
return 0;
}
static int vidioc_g_fbuf(struct file *file, void *fh,
struct v4l2_framebuffer *a)
{
struct omap_overlay *ovl;
struct omapvideo_info *ovid;
struct omap_vout_device *vout = fh;
struct omap_overlay_manager_info info;
ovid = &vout->vid_info;
ovl = ovid->overlays[0];
a->flags = 0x0;
a->capability = V4L2_FBUF_CAP_LOCAL_ALPHA | V4L2_FBUF_CAP_CHROMAKEY
| V4L2_FBUF_CAP_SRC_CHROMAKEY;
if (ovl->manager && ovl->manager->get_manager_info) {
ovl->manager->get_manager_info(ovl->manager, &info);
if (info.trans_key_type == OMAP_DSS_COLOR_KEY_VID_SRC)
a->flags |= V4L2_FBUF_FLAG_SRC_CHROMAKEY;
if (info.trans_key_type == OMAP_DSS_COLOR_KEY_GFX_DST)
a->flags |= V4L2_FBUF_FLAG_CHROMAKEY;
}
if (ovl->manager && ovl->manager->get_manager_info) {
ovl->manager->get_manager_info(ovl->manager, &info);
if (info.alpha_enabled)
a->flags |= V4L2_FBUF_FLAG_LOCAL_ALPHA;
}
return 0;
}
static const struct v4l2_ioctl_ops vout_ioctl_ops = {
.vidioc_querycap = vidioc_querycap,
.vidioc_enum_fmt_vid_out = vidioc_enum_fmt_vid_out,
.vidioc_g_fmt_vid_out = vidioc_g_fmt_vid_out,
.vidioc_try_fmt_vid_out = vidioc_try_fmt_vid_out,
.vidioc_s_fmt_vid_out = vidioc_s_fmt_vid_out,
.vidioc_queryctrl = vidioc_queryctrl,
.vidioc_g_ctrl = vidioc_g_ctrl,
.vidioc_s_fbuf = vidioc_s_fbuf,
.vidioc_g_fbuf = vidioc_g_fbuf,
.vidioc_s_ctrl = vidioc_s_ctrl,
.vidioc_try_fmt_vid_overlay = vidioc_try_fmt_vid_overlay,
.vidioc_s_fmt_vid_overlay = vidioc_s_fmt_vid_overlay,
.vidioc_enum_fmt_vid_overlay = vidioc_enum_fmt_vid_overlay,
.vidioc_g_fmt_vid_overlay = vidioc_g_fmt_vid_overlay,
.vidioc_cropcap = vidioc_cropcap,
.vidioc_g_crop = vidioc_g_crop,
.vidioc_s_crop = vidioc_s_crop,
.vidioc_reqbufs = vidioc_reqbufs,
.vidioc_querybuf = vidioc_querybuf,
.vidioc_qbuf = vidioc_qbuf,
.vidioc_dqbuf = vidioc_dqbuf,
.vidioc_streamon = vidioc_streamon,
.vidioc_streamoff = vidioc_streamoff,
};
static const struct v4l2_file_operations omap_vout_fops = {
.owner = THIS_MODULE,
.unlocked_ioctl = video_ioctl2,
.mmap = omap_vout_mmap,
.open = omap_vout_open,
.release = omap_vout_release,
};
/* Init functions used during driver initialization */
/* Initial setup of video_data */
static int __init omap_vout_setup_video_data(struct omap_vout_device *vout)
{
struct video_device *vfd;
struct v4l2_pix_format *pix;
struct v4l2_control *control;
struct omap_dss_device *display =
vout->vid_info.overlays[0]->manager->device;
/* set the default pix */
pix = &vout->pix;
/* Set the default picture of QVGA */
pix->width = QQVGA_WIDTH;
pix->height = QQVGA_HEIGHT;
/* Default pixel format is RGB 5-6-5 */
pix->pixelformat = V4L2_PIX_FMT_RGB565;
pix->field = V4L2_FIELD_ANY;
pix->bytesperline = pix->width * 2;
pix->sizeimage = pix->bytesperline * pix->height;
pix->priv = 0;
pix->colorspace = V4L2_COLORSPACE_JPEG;
vout->bpp = RGB565_BPP;
vout->fbuf.fmt.width = display->panel.timings.x_res;
vout->fbuf.fmt.height = display->panel.timings.y_res;
/* Set the data structures for the overlay parameters*/
vout->win.global_alpha = 255;
vout->fbuf.flags = 0;
vout->fbuf.capability = V4L2_FBUF_CAP_LOCAL_ALPHA |
V4L2_FBUF_CAP_SRC_CHROMAKEY | V4L2_FBUF_CAP_CHROMAKEY;
vout->win.chromakey = 0;
omap_vout_new_format(pix, &vout->fbuf, &vout->crop, &vout->win);
/*Initialize the control variables for
rotation, flipping and background color. */
control = vout->control;
control[0].id = V4L2_CID_ROTATE;
control[0].value = 0;
vout->rotation = 0;
vout->mirror = 0;
vout->control[2].id = V4L2_CID_HFLIP;
vout->control[2].value = 0;
vout->vrfb_bpp = 2;
control[1].id = V4L2_CID_BG_COLOR;
control[1].value = 0;
/* initialize the video_device struct */
vfd = vout->vfd = video_device_alloc();
if (!vfd) {
printk(KERN_ERR VOUT_NAME ": could not allocate"
" video device struct\n");
return -ENOMEM;
}
vfd->release = video_device_release;
vfd->ioctl_ops = &vout_ioctl_ops;
strlcpy(vfd->name, VOUT_NAME, sizeof(vfd->name));
vfd->fops = &omap_vout_fops;
vfd->v4l2_dev = &vout->vid_dev->v4l2_dev;
mutex_init(&vout->lock);
vfd->minor = -1;
return 0;
}
/* Setup video buffers */
static int __init omap_vout_setup_video_bufs(struct platform_device *pdev,
int vid_num)
{
u32 numbuffers;
int ret = 0, i, j;
int image_width, image_height;
struct video_device *vfd;
struct omap_vout_device *vout;
int static_vrfb_allocation = 0, vrfb_num_bufs = VRFB_NUM_BUFS;
struct v4l2_device *v4l2_dev = platform_get_drvdata(pdev);
struct omap2video_device *vid_dev =
container_of(v4l2_dev, struct omap2video_device, v4l2_dev);
vout = vid_dev->vouts[vid_num];
vfd = vout->vfd;
numbuffers = (vid_num == 0) ? video1_numbuffers : video2_numbuffers;
vout->buffer_size = (vid_num == 0) ? video1_bufsize : video2_bufsize;
dev_info(&pdev->dev, "Buffer Size = %d\n", vout->buffer_size);
for (i = 0; i < numbuffers; i++) {
vout->buf_virt_addr[i] =
omap_vout_alloc_buffer(vout->buffer_size,
(u32 *) &vout->buf_phy_addr[i]);
if (!vout->buf_virt_addr[i]) {
numbuffers = i;
ret = -ENOMEM;
goto free_buffers;
}
}
for (i = 0; i < VRFB_NUM_BUFS; i++) {
if (omap_vrfb_request_ctx(&vout->vrfb_context[i])) {
dev_info(&pdev->dev, ": VRFB allocation failed\n");
for (j = 0; j < i; j++)
omap_vrfb_release_ctx(&vout->vrfb_context[j]);
ret = -ENOMEM;
goto free_buffers;
}
}
vout->cropped_offset = 0;
/* Calculate VRFB memory size */
/* allocate for worst case size */
image_width = VID_MAX_WIDTH / TILE_SIZE;
if (VID_MAX_WIDTH % TILE_SIZE)
image_width++;
image_width = image_width * TILE_SIZE;
image_height = VID_MAX_HEIGHT / TILE_SIZE;
if (VID_MAX_HEIGHT % TILE_SIZE)
image_height++;
image_height = image_height * TILE_SIZE;
vout->smsshado_size = PAGE_ALIGN(image_width * image_height * 2 * 2);
/*
* Request and Initialize DMA, for DMA based VRFB transfer
*/
vout->vrfb_dma_tx.dev_id = OMAP_DMA_NO_DEVICE;
vout->vrfb_dma_tx.dma_ch = -1;
vout->vrfb_dma_tx.req_status = DMA_CHAN_ALLOTED;
ret = omap_request_dma(vout->vrfb_dma_tx.dev_id, "VRFB DMA TX",
omap_vout_vrfb_dma_tx_callback,
(void *) &vout->vrfb_dma_tx, &vout->vrfb_dma_tx.dma_ch);
if (ret < 0) {
vout->vrfb_dma_tx.req_status = DMA_CHAN_NOT_ALLOTED;
dev_info(&pdev->dev, ": failed to allocate DMA Channel for"
" video%d\n", vfd->minor);
}
init_waitqueue_head(&vout->vrfb_dma_tx.wait);
/* Allocate VRFB buffers if selected through bootargs */
static_vrfb_allocation = (vid_num == 0) ?
vid1_static_vrfb_alloc : vid2_static_vrfb_alloc;
/* statically allocated the VRFB buffer is done through
commands line aruments */
if (static_vrfb_allocation) {
if (omap_vout_allocate_vrfb_buffers(vout, &vrfb_num_bufs, -1)) {
ret = -ENOMEM;
goto release_vrfb_ctx;
}
vout->vrfb_static_allocation = 1;
}
return 0;
release_vrfb_ctx:
for (j = 0; j < VRFB_NUM_BUFS; j++)
omap_vrfb_release_ctx(&vout->vrfb_context[j]);
free_buffers:
for (i = 0; i < numbuffers; i++) {
omap_vout_free_buffer(vout->buf_virt_addr[i],
vout->buffer_size);
vout->buf_virt_addr[i] = 0;
vout->buf_phy_addr[i] = 0;
}
return ret;
}
/* Create video out devices */
static int __init omap_vout_create_video_devices(struct platform_device *pdev)
{
int ret = 0, k;
struct omap_vout_device *vout;
struct video_device *vfd = NULL;
struct v4l2_device *v4l2_dev = platform_get_drvdata(pdev);
struct omap2video_device *vid_dev = container_of(v4l2_dev,
struct omap2video_device, v4l2_dev);
for (k = 0; k < pdev->num_resources; k++) {
vout = kzalloc(sizeof(struct omap_vout_device), GFP_KERNEL);
if (!vout) {
dev_err(&pdev->dev, ": could not allocate memory\n");
return -ENOMEM;
}
vout->vid = k;
vid_dev->vouts[k] = vout;
vout->vid_dev = vid_dev;
/* Select video2 if only 1 overlay is controlled by V4L2 */
if (pdev->num_resources == 1)
vout->vid_info.overlays[0] = vid_dev->overlays[k + 2];
else
/* Else select video1 and video2 one by one. */
vout->vid_info.overlays[0] = vid_dev->overlays[k + 1];
vout->vid_info.num_overlays = 1;
vout->vid_info.id = k + 1;
/* Setup the default configuration for the video devices
*/
if (omap_vout_setup_video_data(vout) != 0) {
ret = -ENOMEM;
goto error;
}
/* Allocate default number of buffers for the video streaming
* and reserve the VRFB space for rotation
*/
if (omap_vout_setup_video_bufs(pdev, k) != 0) {
ret = -ENOMEM;
goto error1;
}
/* Register the Video device with V4L2
*/
vfd = vout->vfd;
if (video_register_device(vfd, VFL_TYPE_GRABBER, -1) < 0) {
dev_err(&pdev->dev, ": Could not register "
"Video for Linux device\n");
vfd->minor = -1;
ret = -ENODEV;
goto error2;
}
video_set_drvdata(vfd, vout);
/* Configure the overlay structure */
ret = omapvid_init(vid_dev->vouts[k], 0);
if (!ret)
goto success;
error2:
omap_vout_release_vrfb(vout);
omap_vout_free_buffers(vout);
error1:
video_device_release(vfd);
error:
kfree(vout);
return ret;
success:
dev_info(&pdev->dev, ": registered and initialized"
" video device %d\n", vfd->minor);
if (k == (pdev->num_resources - 1))
return 0;
}
return -ENODEV;
}
/* Driver functions */
static void omap_vout_cleanup_device(struct omap_vout_device *vout)
{
struct video_device *vfd;
if (!vout)
return;
vfd = vout->vfd;
if (vfd) {
if (!video_is_registered(vfd)) {
/*
* The device was never registered, so release the
* video_device struct directly.
*/
video_device_release(vfd);
} else {
/*
* The unregister function will release the video_device
* struct as well as unregistering it.
*/
video_unregister_device(vfd);
}
}
omap_vout_release_vrfb(vout);
omap_vout_free_buffers(vout);
/* Free the VRFB buffer if allocated
* init time
*/
if (vout->vrfb_static_allocation)
omap_vout_free_vrfb_buffers(vout);
kfree(vout);
}
static int omap_vout_remove(struct platform_device *pdev)
{
int k;
struct v4l2_device *v4l2_dev = platform_get_drvdata(pdev);
struct omap2video_device *vid_dev = container_of(v4l2_dev, struct
omap2video_device, v4l2_dev);
v4l2_device_unregister(v4l2_dev);
for (k = 0; k < pdev->num_resources; k++)
omap_vout_cleanup_device(vid_dev->vouts[k]);
for (k = 0; k < vid_dev->num_displays; k++) {
if (vid_dev->displays[k]->state != OMAP_DSS_DISPLAY_DISABLED)
vid_dev->displays[k]->driver->disable(vid_dev->displays[k]);
omap_dss_put_device(vid_dev->displays[k]);
}
kfree(vid_dev);
return 0;
}
static int __init omap_vout_probe(struct platform_device *pdev)
{
int ret = 0, i;
struct omap_overlay *ovl;
struct omap_dss_device *dssdev = NULL;
struct omap_dss_device *def_display;
struct omap2video_device *vid_dev = NULL;
if (pdev->num_resources == 0) {
dev_err(&pdev->dev, "probed for an unknown device\n");
return -ENODEV;
}
vid_dev = kzalloc(sizeof(struct omap2video_device), GFP_KERNEL);
if (vid_dev == NULL)
return -ENOMEM;
vid_dev->num_displays = 0;
for_each_dss_dev(dssdev) {
omap_dss_get_device(dssdev);
vid_dev->displays[vid_dev->num_displays++] = dssdev;
}
if (vid_dev->num_displays == 0) {
dev_err(&pdev->dev, "no displays\n");
ret = -EINVAL;
goto probe_err0;
}
vid_dev->num_overlays = omap_dss_get_num_overlays();
for (i = 0; i < vid_dev->num_overlays; i++)
vid_dev->overlays[i] = omap_dss_get_overlay(i);
vid_dev->num_managers = omap_dss_get_num_overlay_managers();
for (i = 0; i < vid_dev->num_managers; i++)
vid_dev->managers[i] = omap_dss_get_overlay_manager(i);
/* Get the Video1 overlay and video2 overlay.
* Setup the Display attached to that overlays
*/
for (i = 1; i < vid_dev->num_overlays; i++) {
ovl = omap_dss_get_overlay(i);
if (ovl->manager && ovl->manager->device) {
def_display = ovl->manager->device;
} else {
dev_warn(&pdev->dev, "cannot find display\n");
def_display = NULL;
}
if (def_display) {
struct omap_dss_driver *dssdrv = def_display->driver;
ret = dssdrv->enable(def_display);
if (ret) {
/* Here we are not considering a error
* as display may be enabled by frame
* buffer driver
*/
dev_warn(&pdev->dev,
"'%s' Display already enabled\n",
def_display->name);
}
/* set the update mode */
if (def_display->caps &
OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE) {
if (dssdrv->enable_te)
dssdrv->enable_te(def_display, 0);
if (dssdrv->set_update_mode)
dssdrv->set_update_mode(def_display,
OMAP_DSS_UPDATE_MANUAL);
} else {
if (dssdrv->set_update_mode)
dssdrv->set_update_mode(def_display,
OMAP_DSS_UPDATE_AUTO);
}
}
}
if (v4l2_device_register(&pdev->dev, &vid_dev->v4l2_dev) < 0) {
dev_err(&pdev->dev, "v4l2_device_register failed\n");
ret = -ENODEV;
goto probe_err1;
}
ret = omap_vout_create_video_devices(pdev);
if (ret)
goto probe_err2;
for (i = 0; i < vid_dev->num_displays; i++) {
struct omap_dss_device *display = vid_dev->displays[i];
if (display->driver->update)
display->driver->update(display, 0, 0,
display->panel.timings.x_res,
display->panel.timings.y_res);
}
return 0;
probe_err2:
v4l2_device_unregister(&vid_dev->v4l2_dev);
probe_err1:
for (i = 1; i < vid_dev->num_overlays; i++) {
def_display = NULL;
ovl = omap_dss_get_overlay(i);
if (ovl->manager && ovl->manager->device)
def_display = ovl->manager->device;
if (def_display && def_display->driver)
def_display->driver->disable(def_display);
}
probe_err0:
kfree(vid_dev);
return ret;
}
static struct platform_driver omap_vout_driver = {
.driver = {
.name = VOUT_NAME,
},
.probe = omap_vout_probe,
.remove = omap_vout_remove,
};
static int __init omap_vout_init(void)
{
if (platform_driver_register(&omap_vout_driver) != 0) {
printk(KERN_ERR VOUT_NAME ":Could not register Video driver\n");
return -EINVAL;
}
return 0;
}
static void omap_vout_cleanup(void)
{
platform_driver_unregister(&omap_vout_driver);
}
late_initcall(omap_vout_init);
module_exit(omap_vout_cleanup);
| gpl-2.0 |
Snuzzo/vigor_aosp_kernel | drivers/staging/tty/ip2/i2lib.c | 2535 | 66114 | /*******************************************************************************
*
* (c) 1999 by Computone Corporation
*
********************************************************************************
*
*
* PACKAGE: Linux tty Device Driver for IntelliPort family of multiport
* serial I/O controllers.
*
* DESCRIPTION: High-level interface code for the device driver. Uses the
* Extremely Low Level Interface Support (i2ellis.c). Provides an
* interface to the standard loadware, to support drivers or
* application code. (This is included source code, not a separate
* compilation module.)
*
*******************************************************************************/
//------------------------------------------------------------------------------
// Note on Strategy:
// Once the board has been initialized, it will interrupt us when:
// 1) It has something in the fifo for us to read (incoming data, flow control
// packets, or whatever).
// 2) It has stripped whatever we have sent last time in the FIFO (and
// consequently is ready for more).
//
// Note also that the buffer sizes declared in i2lib.h are VERY SMALL. This
// worsens performance considerably, but is done so that a great many channels
// might use only a little memory.
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
// Revision History:
//
// 0.00 - 4/16/91 --- First Draft
// 0.01 - 4/29/91 --- 1st beta release
// 0.02 - 6/14/91 --- Changes to allow small model compilation
// 0.03 - 6/17/91 MAG Break reporting protected from interrupts routines with
// in-line asm added for moving data to/from ring buffers,
// replacing a variety of methods used previously.
// 0.04 - 6/21/91 MAG Initial flow-control packets not queued until
// i2_enable_interrupts time. Former versions would enqueue
// them at i2_init_channel time, before we knew how many
// channels were supposed to exist!
// 0.05 - 10/12/91 MAG Major changes: works through the ellis.c routines now;
// supports new 16-bit protocol and expandable boards.
// - 10/24/91 MAG Most changes in place and stable.
// 0.06 - 2/20/92 MAG Format of CMD_HOTACK corrected: the command takes no
// argument.
// 0.07 -- 3/11/92 MAG Support added to store special packet types at interrupt
// level (mostly responses to specific commands.)
// 0.08 -- 3/30/92 MAG Support added for STAT_MODEM packet
// 0.09 -- 6/24/93 MAG i2Link... needed to update number of boards BEFORE
// turning on the interrupt.
// 0.10 -- 6/25/93 MAG To avoid gruesome death from a bad board, we sanity check
// some incoming.
//
// 1.1 - 12/25/96 AKM Linux version.
// - 10/09/98 DMC Revised Linux version.
//------------------------------------------------------------------------------
//************
//* Includes *
//************
#include <linux/sched.h>
#include "i2lib.h"
//***********************
//* Function Prototypes *
//***********************
static void i2QueueNeeds(i2eBordStrPtr, i2ChanStrPtr, int);
static i2ChanStrPtr i2DeQueueNeeds(i2eBordStrPtr, int );
static void i2StripFifo(i2eBordStrPtr);
static void i2StuffFifoBypass(i2eBordStrPtr);
static void i2StuffFifoFlow(i2eBordStrPtr);
static void i2StuffFifoInline(i2eBordStrPtr);
static int i2RetryFlushOutput(i2ChanStrPtr);
// Not a documented part of the library routines (careful...) but the Diagnostic
// i2diag.c finds them useful to help the throughput in certain limited
// single-threaded operations.
static void iiSendPendingMail(i2eBordStrPtr);
static void serviceOutgoingFifo(i2eBordStrPtr);
// Functions defined in ip2.c as part of interrupt handling
static void do_input(struct work_struct *);
static void do_status(struct work_struct *);
//***************
//* Debug Data *
//***************
#ifdef DEBUG_FIFO
unsigned char DBGBuf[0x4000];
unsigned short I = 0;
static void
WriteDBGBuf(char *s, unsigned char *src, unsigned short n )
{
char *p = src;
// XXX: We need a spin lock here if we ever use this again
while (*s) { // copy label
DBGBuf[I] = *s++;
I = I++ & 0x3fff;
}
while (n--) { // copy data
DBGBuf[I] = *p++;
I = I++ & 0x3fff;
}
}
static void
fatality(i2eBordStrPtr pB )
{
int i;
for (i=0;i<sizeof(DBGBuf);i++) {
if ((i%16) == 0)
printk("\n%4x:",i);
printk("%02x ",DBGBuf[i]);
}
printk("\n");
for (i=0;i<sizeof(DBGBuf);i++) {
if ((i%16) == 0)
printk("\n%4x:",i);
if (DBGBuf[i] >= ' ' && DBGBuf[i] <= '~') {
printk(" %c ",DBGBuf[i]);
} else {
printk(" . ");
}
}
printk("\n");
printk("Last index %x\n",I);
}
#endif /* DEBUG_FIFO */
//********
//* Code *
//********
static inline int
i2Validate ( i2ChanStrPtr pCh )
{
//ip2trace(pCh->port_index, ITRC_VERIFY,ITRC_ENTER,2,pCh->validity,
// (CHANNEL_MAGIC | CHANNEL_SUPPORT));
return ((pCh->validity & (CHANNEL_MAGIC_BITS | CHANNEL_SUPPORT))
== (CHANNEL_MAGIC | CHANNEL_SUPPORT));
}
static void iiSendPendingMail_t(unsigned long data)
{
i2eBordStrPtr pB = (i2eBordStrPtr)data;
iiSendPendingMail(pB);
}
//******************************************************************************
// Function: iiSendPendingMail(pB)
// Parameters: Pointer to a board structure
// Returns: Nothing
//
// Description:
// If any outgoing mail bits are set and there is outgoing mailbox is empty,
// send the mail and clear the bits.
//******************************************************************************
static void
iiSendPendingMail(i2eBordStrPtr pB)
{
if (pB->i2eOutMailWaiting && (!pB->i2eWaitingForEmptyFifo) )
{
if (iiTrySendMail(pB, pB->i2eOutMailWaiting))
{
/* If we were already waiting for fifo to empty,
* or just sent MB_OUT_STUFFED, then we are
* still waiting for it to empty, until we should
* receive an MB_IN_STRIPPED from the board.
*/
pB->i2eWaitingForEmptyFifo |=
(pB->i2eOutMailWaiting & MB_OUT_STUFFED);
pB->i2eOutMailWaiting = 0;
pB->SendPendingRetry = 0;
} else {
/* The only time we hit this area is when "iiTrySendMail" has
failed. That only occurs when the outbound mailbox is
still busy with the last message. We take a short breather
to let the board catch up with itself and then try again.
16 Retries is the limit - then we got a borked board.
/\/\|=mhw=|\/\/ */
if( ++pB->SendPendingRetry < 16 ) {
setup_timer(&pB->SendPendingTimer,
iiSendPendingMail_t, (unsigned long)pB);
mod_timer(&pB->SendPendingTimer, jiffies + 1);
} else {
printk( KERN_ERR "IP2: iiSendPendingMail unable to queue outbound mail\n" );
}
}
}
}
//******************************************************************************
// Function: i2InitChannels(pB, nChannels, pCh)
// Parameters: Pointer to Ellis Board structure
// Number of channels to initialize
// Pointer to first element in an array of channel structures
// Returns: Success or failure
//
// Description:
//
// This function patches pointers, back-pointers, and initializes all the
// elements in the channel structure array.
//
// This should be run after the board structure is initialized, through having
// loaded the standard loadware (otherwise it complains).
//
// In any case, it must be done before any serious work begins initializing the
// irq's or sending commands...
//
//******************************************************************************
static int
i2InitChannels ( i2eBordStrPtr pB, int nChannels, i2ChanStrPtr pCh)
{
int index, stuffIndex;
i2ChanStrPtr *ppCh;
if (pB->i2eValid != I2E_MAGIC) {
I2_COMPLETE(pB, I2EE_BADMAGIC);
}
if (pB->i2eState != II_STATE_STDLOADED) {
I2_COMPLETE(pB, I2EE_BADSTATE);
}
rwlock_init(&pB->read_fifo_spinlock);
rwlock_init(&pB->write_fifo_spinlock);
rwlock_init(&pB->Dbuf_spinlock);
rwlock_init(&pB->Bbuf_spinlock);
rwlock_init(&pB->Fbuf_spinlock);
// NO LOCK needed yet - this is init
pB->i2eChannelPtr = pCh;
pB->i2eChannelCnt = nChannels;
pB->i2Fbuf_strip = pB->i2Fbuf_stuff = 0;
pB->i2Dbuf_strip = pB->i2Dbuf_stuff = 0;
pB->i2Bbuf_strip = pB->i2Bbuf_stuff = 0;
pB->SendPendingRetry = 0;
memset ( pCh, 0, sizeof (i2ChanStr) * nChannels );
for (index = stuffIndex = 0, ppCh = (i2ChanStrPtr *)(pB->i2Fbuf);
nChannels && index < ABS_MOST_PORTS;
index++)
{
if ( !(pB->i2eChannelMap[index >> 4] & (1 << (index & 0xf)) ) ) {
continue;
}
rwlock_init(&pCh->Ibuf_spinlock);
rwlock_init(&pCh->Obuf_spinlock);
rwlock_init(&pCh->Cbuf_spinlock);
rwlock_init(&pCh->Pbuf_spinlock);
// NO LOCK needed yet - this is init
// Set up validity flag according to support level
if (pB->i2eGoodMap[index >> 4] & (1 << (index & 0xf)) ) {
pCh->validity = CHANNEL_MAGIC | CHANNEL_SUPPORT;
} else {
pCh->validity = CHANNEL_MAGIC;
}
pCh->pMyBord = pB; /* Back-pointer */
// Prepare an outgoing flow-control packet to send as soon as the chance
// occurs.
if ( pCh->validity & CHANNEL_SUPPORT ) {
pCh->infl.hd.i2sChannel = index;
pCh->infl.hd.i2sCount = 5;
pCh->infl.hd.i2sType = PTYPE_BYPASS;
pCh->infl.fcmd = 37;
pCh->infl.asof = 0;
pCh->infl.room = IBUF_SIZE - 1;
pCh->whenSendFlow = (IBUF_SIZE/5)*4; // when 80% full
// The following is similar to calling i2QueueNeeds, except that this
// is done in longhand, since we are setting up initial conditions on
// many channels at once.
pCh->channelNeeds = NEED_FLOW; // Since starting from scratch
pCh->sinceLastFlow = 0; // No bytes received since last flow
// control packet was queued
stuffIndex++;
*ppCh++ = pCh; // List this channel as needing
// initial flow control packet sent
}
// Don't allow anything to be sent until the status packets come in from
// the board.
pCh->outfl.asof = 0;
pCh->outfl.room = 0;
// Initialize all the ring buffers
pCh->Ibuf_stuff = pCh->Ibuf_strip = 0;
pCh->Obuf_stuff = pCh->Obuf_strip = 0;
pCh->Cbuf_stuff = pCh->Cbuf_strip = 0;
memset( &pCh->icount, 0, sizeof (struct async_icount) );
pCh->hotKeyIn = HOT_CLEAR;
pCh->channelOptions = 0;
pCh->bookMarks = 0;
init_waitqueue_head(&pCh->pBookmarkWait);
init_waitqueue_head(&pCh->open_wait);
init_waitqueue_head(&pCh->close_wait);
init_waitqueue_head(&pCh->delta_msr_wait);
// Set base and divisor so default custom rate is 9600
pCh->BaudBase = 921600; // MAX for ST654, changed after we get
pCh->BaudDivisor = 96; // the boxids (UART types) later
pCh->dataSetIn = 0;
pCh->dataSetOut = 0;
pCh->wopen = 0;
pCh->throttled = 0;
pCh->speed = CBR_9600;
pCh->flags = 0;
pCh->ClosingDelay = 5*HZ/10;
pCh->ClosingWaitTime = 30*HZ;
// Initialize task queue objects
INIT_WORK(&pCh->tqueue_input, do_input);
INIT_WORK(&pCh->tqueue_status, do_status);
#ifdef IP2DEBUG_TRACE
pCh->trace = ip2trace;
#endif
++pCh;
--nChannels;
}
// No need to check for wrap here; this is initialization.
pB->i2Fbuf_stuff = stuffIndex;
I2_COMPLETE(pB, I2EE_GOOD);
}
//******************************************************************************
// Function: i2DeQueueNeeds(pB, type)
// Parameters: Pointer to a board structure
// type bit map: may include NEED_INLINE, NEED_BYPASS, or NEED_FLOW
// Returns:
// Pointer to a channel structure
//
// Description: Returns pointer struct of next channel that needs service of
// the type specified. Otherwise returns a NULL reference.
//
//******************************************************************************
static i2ChanStrPtr
i2DeQueueNeeds(i2eBordStrPtr pB, int type)
{
unsigned short queueIndex;
unsigned long flags;
i2ChanStrPtr pCh = NULL;
switch(type) {
case NEED_INLINE:
write_lock_irqsave(&pB->Dbuf_spinlock, flags);
if ( pB->i2Dbuf_stuff != pB->i2Dbuf_strip)
{
queueIndex = pB->i2Dbuf_strip;
pCh = pB->i2Dbuf[queueIndex];
queueIndex++;
if (queueIndex >= CH_QUEUE_SIZE) {
queueIndex = 0;
}
pB->i2Dbuf_strip = queueIndex;
pCh->channelNeeds &= ~NEED_INLINE;
}
write_unlock_irqrestore(&pB->Dbuf_spinlock, flags);
break;
case NEED_BYPASS:
write_lock_irqsave(&pB->Bbuf_spinlock, flags);
if (pB->i2Bbuf_stuff != pB->i2Bbuf_strip)
{
queueIndex = pB->i2Bbuf_strip;
pCh = pB->i2Bbuf[queueIndex];
queueIndex++;
if (queueIndex >= CH_QUEUE_SIZE) {
queueIndex = 0;
}
pB->i2Bbuf_strip = queueIndex;
pCh->channelNeeds &= ~NEED_BYPASS;
}
write_unlock_irqrestore(&pB->Bbuf_spinlock, flags);
break;
case NEED_FLOW:
write_lock_irqsave(&pB->Fbuf_spinlock, flags);
if (pB->i2Fbuf_stuff != pB->i2Fbuf_strip)
{
queueIndex = pB->i2Fbuf_strip;
pCh = pB->i2Fbuf[queueIndex];
queueIndex++;
if (queueIndex >= CH_QUEUE_SIZE) {
queueIndex = 0;
}
pB->i2Fbuf_strip = queueIndex;
pCh->channelNeeds &= ~NEED_FLOW;
}
write_unlock_irqrestore(&pB->Fbuf_spinlock, flags);
break;
default:
printk(KERN_ERR "i2DeQueueNeeds called with bad type:%x\n",type);
break;
}
return pCh;
}
//******************************************************************************
// Function: i2QueueNeeds(pB, pCh, type)
// Parameters: Pointer to a board structure
// Pointer to a channel structure
// type bit map: may include NEED_INLINE, NEED_BYPASS, or NEED_FLOW
// Returns: Nothing
//
// Description:
// For each type of need selected, if the given channel is not already in the
// queue, adds it, and sets the flag indicating it is in the queue.
//******************************************************************************
static void
i2QueueNeeds(i2eBordStrPtr pB, i2ChanStrPtr pCh, int type)
{
unsigned short queueIndex;
unsigned long flags;
// We turn off all the interrupts during this brief process, since the
// interrupt-level code might want to put things on the queue as well.
switch (type) {
case NEED_INLINE:
write_lock_irqsave(&pB->Dbuf_spinlock, flags);
if ( !(pCh->channelNeeds & NEED_INLINE) )
{
pCh->channelNeeds |= NEED_INLINE;
queueIndex = pB->i2Dbuf_stuff;
pB->i2Dbuf[queueIndex++] = pCh;
if (queueIndex >= CH_QUEUE_SIZE)
queueIndex = 0;
pB->i2Dbuf_stuff = queueIndex;
}
write_unlock_irqrestore(&pB->Dbuf_spinlock, flags);
break;
case NEED_BYPASS:
write_lock_irqsave(&pB->Bbuf_spinlock, flags);
if ((type & NEED_BYPASS) && !(pCh->channelNeeds & NEED_BYPASS))
{
pCh->channelNeeds |= NEED_BYPASS;
queueIndex = pB->i2Bbuf_stuff;
pB->i2Bbuf[queueIndex++] = pCh;
if (queueIndex >= CH_QUEUE_SIZE)
queueIndex = 0;
pB->i2Bbuf_stuff = queueIndex;
}
write_unlock_irqrestore(&pB->Bbuf_spinlock, flags);
break;
case NEED_FLOW:
write_lock_irqsave(&pB->Fbuf_spinlock, flags);
if ((type & NEED_FLOW) && !(pCh->channelNeeds & NEED_FLOW))
{
pCh->channelNeeds |= NEED_FLOW;
queueIndex = pB->i2Fbuf_stuff;
pB->i2Fbuf[queueIndex++] = pCh;
if (queueIndex >= CH_QUEUE_SIZE)
queueIndex = 0;
pB->i2Fbuf_stuff = queueIndex;
}
write_unlock_irqrestore(&pB->Fbuf_spinlock, flags);
break;
case NEED_CREDIT:
pCh->channelNeeds |= NEED_CREDIT;
break;
default:
printk(KERN_ERR "i2QueueNeeds called with bad type:%x\n",type);
break;
}
return;
}
//******************************************************************************
// Function: i2QueueCommands(type, pCh, timeout, nCommands, pCs,...)
// Parameters: type - PTYPE_BYPASS or PTYPE_INLINE
// pointer to the channel structure
// maximum period to wait
// number of commands (n)
// n commands
// Returns: Number of commands sent, or -1 for error
//
// get board lock before calling
//
// Description:
// Queues up some commands to be sent to a channel. To send possibly several
// bypass or inline commands to the given channel. The timeout parameter
// indicates how many HUNDREDTHS OF SECONDS to wait until there is room:
// 0 = return immediately if no room, -ive = wait forever, +ive = number of
// 1/100 seconds to wait. Return values:
// -1 Some kind of nasty error: bad channel structure or invalid arguments.
// 0 No room to send all the commands
// (+) Number of commands sent
//******************************************************************************
static int
i2QueueCommands(int type, i2ChanStrPtr pCh, int timeout, int nCommands,
cmdSyntaxPtr pCs0,...)
{
int totalsize = 0;
int blocksize;
int lastended;
cmdSyntaxPtr *ppCs;
cmdSyntaxPtr pCs;
int count;
int flag;
i2eBordStrPtr pB;
unsigned short maxBlock;
unsigned short maxBuff;
short bufroom;
unsigned short stuffIndex;
unsigned char *pBuf;
unsigned char *pInsert;
unsigned char *pDest, *pSource;
unsigned short channel;
int cnt;
unsigned long flags = 0;
rwlock_t *lock_var_p = NULL;
// Make sure the channel exists, otherwise do nothing
if ( !i2Validate ( pCh ) ) {
return -1;
}
ip2trace (CHANN, ITRC_QUEUE, ITRC_ENTER, 0 );
pB = pCh->pMyBord;
// Board must also exist, and THE INTERRUPT COMMAND ALREADY SENT
if (pB->i2eValid != I2E_MAGIC || pB->i2eUsingIrq == I2_IRQ_UNDEFINED)
return -2;
// If the board has gone fatal, return bad, and also hit the trap routine if
// it exists.
if (pB->i2eFatal) {
if ( pB->i2eFatalTrap ) {
(*(pB)->i2eFatalTrap)(pB);
}
return -3;
}
// Set up some variables, Which buffers are we using? How big are they?
switch(type)
{
case PTYPE_INLINE:
flag = INL;
maxBlock = MAX_OBUF_BLOCK;
maxBuff = OBUF_SIZE;
pBuf = pCh->Obuf;
break;
case PTYPE_BYPASS:
flag = BYP;
maxBlock = MAX_CBUF_BLOCK;
maxBuff = CBUF_SIZE;
pBuf = pCh->Cbuf;
break;
default:
return -4;
}
// Determine the total size required for all the commands
totalsize = blocksize = sizeof(i2CmdHeader);
lastended = 0;
ppCs = &pCs0;
for ( count = nCommands; count; count--, ppCs++)
{
pCs = *ppCs;
cnt = pCs->length;
// Will a new block be needed for this one?
// Two possible reasons: too
// big or previous command has to be at the end of a packet.
if ((blocksize + cnt > maxBlock) || lastended) {
blocksize = sizeof(i2CmdHeader);
totalsize += sizeof(i2CmdHeader);
}
totalsize += cnt;
blocksize += cnt;
// If this command had to end a block, then we will make sure to
// account for it should there be any more blocks.
lastended = pCs->flags & END;
}
for (;;) {
// Make sure any pending flush commands go out before we add more data.
if ( !( pCh->flush_flags && i2RetryFlushOutput( pCh ) ) ) {
// How much room (this time through) ?
switch(type) {
case PTYPE_INLINE:
lock_var_p = &pCh->Obuf_spinlock;
write_lock_irqsave(lock_var_p, flags);
stuffIndex = pCh->Obuf_stuff;
bufroom = pCh->Obuf_strip - stuffIndex;
break;
case PTYPE_BYPASS:
lock_var_p = &pCh->Cbuf_spinlock;
write_lock_irqsave(lock_var_p, flags);
stuffIndex = pCh->Cbuf_stuff;
bufroom = pCh->Cbuf_strip - stuffIndex;
break;
default:
return -5;
}
if (--bufroom < 0) {
bufroom += maxBuff;
}
ip2trace (CHANN, ITRC_QUEUE, 2, 1, bufroom );
// Check for overflow
if (totalsize <= bufroom) {
// Normal Expected path - We still hold LOCK
break; /* from for()- Enough room: goto proceed */
}
ip2trace(CHANN, ITRC_QUEUE, 3, 1, totalsize);
write_unlock_irqrestore(lock_var_p, flags);
} else
ip2trace(CHANN, ITRC_QUEUE, 3, 1, totalsize);
/* Prepare to wait for buffers to empty */
serviceOutgoingFifo(pB); // Dump what we got
if (timeout == 0) {
return 0; // Tired of waiting
}
if (timeout > 0)
timeout--; // So negative values == forever
if (!in_interrupt()) {
schedule_timeout_interruptible(1); // short nap
} else {
// we cannot sched/sleep in interrupt silly
return 0;
}
if (signal_pending(current)) {
return 0; // Wake up! Time to die!!!
}
ip2trace (CHANN, ITRC_QUEUE, 4, 0 );
} // end of for(;;)
// At this point we have room and the lock - stick them in.
channel = pCh->infl.hd.i2sChannel;
pInsert = &pBuf[stuffIndex]; // Pointer to start of packet
pDest = CMD_OF(pInsert); // Pointer to start of command
// When we start counting, the block is the size of the header
for (blocksize = sizeof(i2CmdHeader), count = nCommands,
lastended = 0, ppCs = &pCs0;
count;
count--, ppCs++)
{
pCs = *ppCs; // Points to command protocol structure
// If this is a bookmark request command, post the fact that a bookmark
// request is pending. NOTE THIS TRICK ONLY WORKS BECAUSE CMD_BMARK_REQ
// has no parameters! The more general solution would be to reference
// pCs->cmd[0].
if (pCs == CMD_BMARK_REQ) {
pCh->bookMarks++;
ip2trace (CHANN, ITRC_DRAIN, 30, 1, pCh->bookMarks );
}
cnt = pCs->length;
// If this command would put us over the maximum block size or
// if the last command had to be at the end of a block, we end
// the existing block here and start a new one.
if ((blocksize + cnt > maxBlock) || lastended) {
ip2trace (CHANN, ITRC_QUEUE, 5, 0 );
PTYPE_OF(pInsert) = type;
CHANNEL_OF(pInsert) = channel;
// count here does not include the header
CMD_COUNT_OF(pInsert) = blocksize - sizeof(i2CmdHeader);
stuffIndex += blocksize;
if(stuffIndex >= maxBuff) {
stuffIndex = 0;
pInsert = pBuf;
}
pInsert = &pBuf[stuffIndex]; // Pointer to start of next pkt
pDest = CMD_OF(pInsert);
blocksize = sizeof(i2CmdHeader);
}
// Now we know there is room for this one in the current block
blocksize += cnt; // Total bytes in this command
pSource = pCs->cmd; // Copy the command into the buffer
while (cnt--) {
*pDest++ = *pSource++;
}
// If this command had to end a block, then we will make sure to account
// for it should there be any more blocks.
lastended = pCs->flags & END;
} // end for
// Clean up the final block by writing header, etc
PTYPE_OF(pInsert) = type;
CHANNEL_OF(pInsert) = channel;
// count here does not include the header
CMD_COUNT_OF(pInsert) = blocksize - sizeof(i2CmdHeader);
stuffIndex += blocksize;
if(stuffIndex >= maxBuff) {
stuffIndex = 0;
pInsert = pBuf;
}
// Updates the index, and post the need for service. When adding these to
// the queue of channels, we turn off the interrupt while doing so,
// because at interrupt level we might want to push a channel back to the
// end of the queue.
switch(type)
{
case PTYPE_INLINE:
pCh->Obuf_stuff = stuffIndex; // Store buffer pointer
write_unlock_irqrestore(&pCh->Obuf_spinlock, flags);
pB->debugInlineQueued++;
// Add the channel pointer to list of channels needing service (first
// come...), if it's not already there.
i2QueueNeeds(pB, pCh, NEED_INLINE);
break;
case PTYPE_BYPASS:
pCh->Cbuf_stuff = stuffIndex; // Store buffer pointer
write_unlock_irqrestore(&pCh->Cbuf_spinlock, flags);
pB->debugBypassQueued++;
// Add the channel pointer to list of channels needing service (first
// come...), if it's not already there.
i2QueueNeeds(pB, pCh, NEED_BYPASS);
break;
}
ip2trace (CHANN, ITRC_QUEUE, ITRC_RETURN, 1, nCommands );
return nCommands; // Good status: number of commands sent
}
//******************************************************************************
// Function: i2GetStatus(pCh,resetBits)
// Parameters: Pointer to a channel structure
// Bit map of status bits to clear
// Returns: Bit map of current status bits
//
// Description:
// Returns the state of data set signals, and whether a break has been received,
// (see i2lib.h for bit-mapped result). resetBits is a bit-map of any status
// bits to be cleared: I2_BRK, I2_PAR, I2_FRA, I2_OVR,... These are cleared
// AFTER the condition is passed. If pCh does not point to a valid channel,
// returns -1 (which would be impossible otherwise.
//******************************************************************************
static int
i2GetStatus(i2ChanStrPtr pCh, int resetBits)
{
unsigned short status;
i2eBordStrPtr pB;
ip2trace (CHANN, ITRC_STATUS, ITRC_ENTER, 2, pCh->dataSetIn, resetBits );
// Make sure the channel exists, otherwise do nothing */
if ( !i2Validate ( pCh ) )
return -1;
pB = pCh->pMyBord;
status = pCh->dataSetIn;
// Clear any specified error bits: but note that only actual error bits can
// be cleared, regardless of the value passed.
if (resetBits)
{
pCh->dataSetIn &= ~(resetBits & (I2_BRK | I2_PAR | I2_FRA | I2_OVR));
pCh->dataSetIn &= ~(I2_DDCD | I2_DCTS | I2_DDSR | I2_DRI);
}
ip2trace (CHANN, ITRC_STATUS, ITRC_RETURN, 1, pCh->dataSetIn );
return status;
}
//******************************************************************************
// Function: i2Input(pChpDest,count)
// Parameters: Pointer to a channel structure
// Pointer to data buffer
// Number of bytes to read
// Returns: Number of bytes read, or -1 for error
//
// Description:
// Strips data from the input buffer and writes it to pDest. If there is a
// colossal blunder, (invalid structure pointers or the like), returns -1.
// Otherwise, returns the number of bytes read.
//******************************************************************************
static int
i2Input(i2ChanStrPtr pCh)
{
int amountToMove;
unsigned short stripIndex;
int count;
unsigned long flags = 0;
ip2trace (CHANN, ITRC_INPUT, ITRC_ENTER, 0);
// Ensure channel structure seems real
if ( !i2Validate( pCh ) ) {
count = -1;
goto i2Input_exit;
}
write_lock_irqsave(&pCh->Ibuf_spinlock, flags);
// initialize some accelerators and private copies
stripIndex = pCh->Ibuf_strip;
count = pCh->Ibuf_stuff - stripIndex;
// If buffer is empty or requested data count was 0, (trivial case) return
// without any further thought.
if ( count == 0 ) {
write_unlock_irqrestore(&pCh->Ibuf_spinlock, flags);
goto i2Input_exit;
}
// Adjust for buffer wrap
if ( count < 0 ) {
count += IBUF_SIZE;
}
// Don't give more than can be taken by the line discipline
amountToMove = pCh->pTTY->receive_room;
if (count > amountToMove) {
count = amountToMove;
}
// How much could we copy without a wrap?
amountToMove = IBUF_SIZE - stripIndex;
if (amountToMove > count) {
amountToMove = count;
}
// Move the first block
pCh->pTTY->ldisc->ops->receive_buf( pCh->pTTY,
&(pCh->Ibuf[stripIndex]), NULL, amountToMove );
// If we needed to wrap, do the second data move
if (count > amountToMove) {
pCh->pTTY->ldisc->ops->receive_buf( pCh->pTTY,
pCh->Ibuf, NULL, count - amountToMove );
}
// Bump and wrap the stripIndex all at once by the amount of data read. This
// method is good regardless of whether the data was in one or two pieces.
stripIndex += count;
if (stripIndex >= IBUF_SIZE) {
stripIndex -= IBUF_SIZE;
}
pCh->Ibuf_strip = stripIndex;
// Update our flow control information and possibly queue ourselves to send
// it, depending on how much data has been stripped since the last time a
// packet was sent.
pCh->infl.asof += count;
if ((pCh->sinceLastFlow += count) >= pCh->whenSendFlow) {
pCh->sinceLastFlow -= pCh->whenSendFlow;
write_unlock_irqrestore(&pCh->Ibuf_spinlock, flags);
i2QueueNeeds(pCh->pMyBord, pCh, NEED_FLOW);
} else {
write_unlock_irqrestore(&pCh->Ibuf_spinlock, flags);
}
i2Input_exit:
ip2trace (CHANN, ITRC_INPUT, ITRC_RETURN, 1, count);
return count;
}
//******************************************************************************
// Function: i2InputFlush(pCh)
// Parameters: Pointer to a channel structure
// Returns: Number of bytes stripped, or -1 for error
//
// Description:
// Strips any data from the input buffer. If there is a colossal blunder,
// (invalid structure pointers or the like), returns -1. Otherwise, returns the
// number of bytes stripped.
//******************************************************************************
static int
i2InputFlush(i2ChanStrPtr pCh)
{
int count;
unsigned long flags;
// Ensure channel structure seems real
if ( !i2Validate ( pCh ) )
return -1;
ip2trace (CHANN, ITRC_INPUT, 10, 0);
write_lock_irqsave(&pCh->Ibuf_spinlock, flags);
count = pCh->Ibuf_stuff - pCh->Ibuf_strip;
// Adjust for buffer wrap
if (count < 0) {
count += IBUF_SIZE;
}
// Expedient way to zero out the buffer
pCh->Ibuf_strip = pCh->Ibuf_stuff;
// Update our flow control information and possibly queue ourselves to send
// it, depending on how much data has been stripped since the last time a
// packet was sent.
pCh->infl.asof += count;
if ( (pCh->sinceLastFlow += count) >= pCh->whenSendFlow )
{
pCh->sinceLastFlow -= pCh->whenSendFlow;
write_unlock_irqrestore(&pCh->Ibuf_spinlock, flags);
i2QueueNeeds(pCh->pMyBord, pCh, NEED_FLOW);
} else {
write_unlock_irqrestore(&pCh->Ibuf_spinlock, flags);
}
ip2trace (CHANN, ITRC_INPUT, 19, 1, count);
return count;
}
//******************************************************************************
// Function: i2InputAvailable(pCh)
// Parameters: Pointer to a channel structure
// Returns: Number of bytes available, or -1 for error
//
// Description:
// If there is a colossal blunder, (invalid structure pointers or the like),
// returns -1. Otherwise, returns the number of bytes stripped. Otherwise,
// returns the number of bytes available in the buffer.
//******************************************************************************
#if 0
static int
i2InputAvailable(i2ChanStrPtr pCh)
{
int count;
// Ensure channel structure seems real
if ( !i2Validate ( pCh ) ) return -1;
// initialize some accelerators and private copies
read_lock_irqsave(&pCh->Ibuf_spinlock, flags);
count = pCh->Ibuf_stuff - pCh->Ibuf_strip;
read_unlock_irqrestore(&pCh->Ibuf_spinlock, flags);
// Adjust for buffer wrap
if (count < 0)
{
count += IBUF_SIZE;
}
return count;
}
#endif
//******************************************************************************
// Function: i2Output(pCh, pSource, count)
// Parameters: Pointer to channel structure
// Pointer to source data
// Number of bytes to send
// Returns: Number of bytes sent, or -1 for error
//
// Description:
// Queues the data at pSource to be sent as data packets to the board. If there
// is a colossal blunder, (invalid structure pointers or the like), returns -1.
// Otherwise, returns the number of bytes written. What if there is not enough
// room for all the data? If pCh->channelOptions & CO_NBLOCK_WRITE is set, then
// we transfer as many characters as we can now, then return. If this bit is
// clear (default), routine will spin along until all the data is buffered.
// Should this occur, the 1-ms delay routine is called while waiting to avoid
// applications that one cannot break out of.
//******************************************************************************
static int
i2Output(i2ChanStrPtr pCh, const char *pSource, int count)
{
i2eBordStrPtr pB;
unsigned char *pInsert;
int amountToMove;
int countOriginal = count;
unsigned short channel;
unsigned short stuffIndex;
unsigned long flags;
int bailout = 10;
ip2trace (CHANN, ITRC_OUTPUT, ITRC_ENTER, 2, count, 0 );
// Ensure channel structure seems real
if ( !i2Validate ( pCh ) )
return -1;
// initialize some accelerators and private copies
pB = pCh->pMyBord;
channel = pCh->infl.hd.i2sChannel;
// If the board has gone fatal, return bad, and also hit the trap routine if
// it exists.
if (pB->i2eFatal) {
if (pB->i2eFatalTrap) {
(*(pB)->i2eFatalTrap)(pB);
}
return -1;
}
// Proceed as though we would do everything
while ( count > 0 ) {
// How much room in output buffer is there?
read_lock_irqsave(&pCh->Obuf_spinlock, flags);
amountToMove = pCh->Obuf_strip - pCh->Obuf_stuff - 1;
read_unlock_irqrestore(&pCh->Obuf_spinlock, flags);
if (amountToMove < 0) {
amountToMove += OBUF_SIZE;
}
// Subtract off the headers size and see how much room there is for real
// data. If this is negative, we will discover later.
amountToMove -= sizeof (i2DataHeader);
// Don't move more (now) than can go in a single packet
if ( amountToMove > (int)(MAX_OBUF_BLOCK - sizeof(i2DataHeader)) ) {
amountToMove = MAX_OBUF_BLOCK - sizeof(i2DataHeader);
}
// Don't move more than the count we were given
if (amountToMove > count) {
amountToMove = count;
}
// Now we know how much we must move: NB because the ring buffers have
// an overflow area at the end, we needn't worry about wrapping in the
// middle of a packet.
// Small WINDOW here with no LOCK but I can't call Flush with LOCK
// We would be flushing (or ending flush) anyway
ip2trace (CHANN, ITRC_OUTPUT, 10, 1, amountToMove );
if ( !(pCh->flush_flags && i2RetryFlushOutput(pCh) )
&& amountToMove > 0 )
{
write_lock_irqsave(&pCh->Obuf_spinlock, flags);
stuffIndex = pCh->Obuf_stuff;
// Had room to move some data: don't know whether the block size,
// buffer space, or what was the limiting factor...
pInsert = &(pCh->Obuf[stuffIndex]);
// Set up the header
CHANNEL_OF(pInsert) = channel;
PTYPE_OF(pInsert) = PTYPE_DATA;
TAG_OF(pInsert) = 0;
ID_OF(pInsert) = ID_ORDINARY_DATA;
DATA_COUNT_OF(pInsert) = amountToMove;
// Move the data
memcpy( (char*)(DATA_OF(pInsert)), pSource, amountToMove );
// Adjust pointers and indices
pSource += amountToMove;
pCh->Obuf_char_count += amountToMove;
stuffIndex += amountToMove + sizeof(i2DataHeader);
count -= amountToMove;
if (stuffIndex >= OBUF_SIZE) {
stuffIndex = 0;
}
pCh->Obuf_stuff = stuffIndex;
write_unlock_irqrestore(&pCh->Obuf_spinlock, flags);
ip2trace (CHANN, ITRC_OUTPUT, 13, 1, stuffIndex );
} else {
// Cannot move data
// becuz we need to stuff a flush
// or amount to move is <= 0
ip2trace(CHANN, ITRC_OUTPUT, 14, 3,
amountToMove, pB->i2eFifoRemains,
pB->i2eWaitingForEmptyFifo );
// Put this channel back on queue
// this ultimatly gets more data or wakes write output
i2QueueNeeds(pB, pCh, NEED_INLINE);
if ( pB->i2eWaitingForEmptyFifo ) {
ip2trace (CHANN, ITRC_OUTPUT, 16, 0 );
// or schedule
if (!in_interrupt()) {
ip2trace (CHANN, ITRC_OUTPUT, 61, 0 );
schedule_timeout_interruptible(2);
if (signal_pending(current)) {
break;
}
continue;
} else {
ip2trace (CHANN, ITRC_OUTPUT, 62, 0 );
// let interrupt in = WAS restore_flags()
// We hold no lock nor is irq off anymore???
break;
}
break; // from while(count)
}
else if ( pB->i2eFifoRemains < 32 && !pB->i2eTxMailEmpty ( pB ) )
{
ip2trace (CHANN, ITRC_OUTPUT, 19, 2,
pB->i2eFifoRemains,
pB->i2eTxMailEmpty );
break; // from while(count)
} else if ( pCh->channelNeeds & NEED_CREDIT ) {
ip2trace (CHANN, ITRC_OUTPUT, 22, 0 );
break; // from while(count)
} else if ( --bailout) {
// Try to throw more things (maybe not us) in the fifo if we're
// not already waiting for it.
ip2trace (CHANN, ITRC_OUTPUT, 20, 0 );
serviceOutgoingFifo(pB);
//break; CONTINUE;
} else {
ip2trace (CHANN, ITRC_OUTPUT, 21, 3,
pB->i2eFifoRemains,
pB->i2eOutMailWaiting,
pB->i2eWaitingForEmptyFifo );
break; // from while(count)
}
}
} // End of while(count)
i2QueueNeeds(pB, pCh, NEED_INLINE);
// We drop through either when the count expires, or when there is some
// count left, but there was a non-blocking write.
if (countOriginal > count) {
ip2trace (CHANN, ITRC_OUTPUT, 17, 2, countOriginal, count );
serviceOutgoingFifo( pB );
}
ip2trace (CHANN, ITRC_OUTPUT, ITRC_RETURN, 2, countOriginal, count );
return countOriginal - count;
}
//******************************************************************************
// Function: i2FlushOutput(pCh)
// Parameters: Pointer to a channel structure
// Returns: Nothing
//
// Description:
// Sends bypass command to start flushing (waiting possibly forever until there
// is room), then sends inline command to stop flushing output, (again waiting
// possibly forever).
//******************************************************************************
static inline void
i2FlushOutput(i2ChanStrPtr pCh)
{
ip2trace (CHANN, ITRC_FLUSH, 1, 1, pCh->flush_flags );
if (pCh->flush_flags)
return;
if ( 1 != i2QueueCommands(PTYPE_BYPASS, pCh, 0, 1, CMD_STARTFL) ) {
pCh->flush_flags = STARTFL_FLAG; // Failed - flag for later
ip2trace (CHANN, ITRC_FLUSH, 2, 0 );
} else if ( 1 != i2QueueCommands(PTYPE_INLINE, pCh, 0, 1, CMD_STOPFL) ) {
pCh->flush_flags = STOPFL_FLAG; // Failed - flag for later
ip2trace (CHANN, ITRC_FLUSH, 3, 0 );
}
}
static int
i2RetryFlushOutput(i2ChanStrPtr pCh)
{
int old_flags = pCh->flush_flags;
ip2trace (CHANN, ITRC_FLUSH, 14, 1, old_flags );
pCh->flush_flags = 0; // Clear flag so we can avoid recursion
// and queue the commands
if ( old_flags & STARTFL_FLAG ) {
if ( 1 == i2QueueCommands(PTYPE_BYPASS, pCh, 0, 1, CMD_STARTFL) ) {
old_flags = STOPFL_FLAG; //Success - send stop flush
} else {
old_flags = STARTFL_FLAG; //Failure - Flag for retry later
}
ip2trace (CHANN, ITRC_FLUSH, 15, 1, old_flags );
}
if ( old_flags & STOPFL_FLAG ) {
if (1 == i2QueueCommands(PTYPE_INLINE, pCh, 0, 1, CMD_STOPFL)) {
old_flags = 0; // Success - clear flags
}
ip2trace (CHANN, ITRC_FLUSH, 16, 1, old_flags );
}
pCh->flush_flags = old_flags;
ip2trace (CHANN, ITRC_FLUSH, 17, 1, old_flags );
return old_flags;
}
//******************************************************************************
// Function: i2DrainOutput(pCh,timeout)
// Parameters: Pointer to a channel structure
// Maximum period to wait
// Returns: ?
//
// Description:
// Uses the bookmark request command to ask the board to send a bookmark back as
// soon as all the data is completely sent.
//******************************************************************************
static void
i2DrainWakeup(unsigned long d)
{
i2ChanStrPtr pCh = (i2ChanStrPtr)d;
ip2trace (CHANN, ITRC_DRAIN, 10, 1, pCh->BookmarkTimer.expires );
pCh->BookmarkTimer.expires = 0;
wake_up_interruptible( &pCh->pBookmarkWait );
}
static void
i2DrainOutput(i2ChanStrPtr pCh, int timeout)
{
wait_queue_t wait;
i2eBordStrPtr pB;
ip2trace (CHANN, ITRC_DRAIN, ITRC_ENTER, 1, pCh->BookmarkTimer.expires);
pB = pCh->pMyBord;
// If the board has gone fatal, return bad,
// and also hit the trap routine if it exists.
if (pB->i2eFatal) {
if (pB->i2eFatalTrap) {
(*(pB)->i2eFatalTrap)(pB);
}
return;
}
if ((timeout > 0) && (pCh->BookmarkTimer.expires == 0 )) {
// One per customer (channel)
setup_timer(&pCh->BookmarkTimer, i2DrainWakeup,
(unsigned long)pCh);
ip2trace (CHANN, ITRC_DRAIN, 1, 1, pCh->BookmarkTimer.expires );
mod_timer(&pCh->BookmarkTimer, jiffies + timeout);
}
i2QueueCommands( PTYPE_INLINE, pCh, -1, 1, CMD_BMARK_REQ );
init_waitqueue_entry(&wait, current);
add_wait_queue(&(pCh->pBookmarkWait), &wait);
set_current_state( TASK_INTERRUPTIBLE );
serviceOutgoingFifo( pB );
schedule(); // Now we take our interruptible sleep on
// Clean up the queue
set_current_state( TASK_RUNNING );
remove_wait_queue(&(pCh->pBookmarkWait), &wait);
// if expires == 0 then timer poped, then do not need to del_timer
if ((timeout > 0) && pCh->BookmarkTimer.expires &&
time_before(jiffies, pCh->BookmarkTimer.expires)) {
del_timer( &(pCh->BookmarkTimer) );
pCh->BookmarkTimer.expires = 0;
ip2trace (CHANN, ITRC_DRAIN, 3, 1, pCh->BookmarkTimer.expires );
}
ip2trace (CHANN, ITRC_DRAIN, ITRC_RETURN, 1, pCh->BookmarkTimer.expires );
return;
}
//******************************************************************************
// Function: i2OutputFree(pCh)
// Parameters: Pointer to a channel structure
// Returns: Space in output buffer
//
// Description:
// Returns -1 if very gross error. Otherwise returns the amount of bytes still
// free in the output buffer.
//******************************************************************************
static int
i2OutputFree(i2ChanStrPtr pCh)
{
int amountToMove;
unsigned long flags;
// Ensure channel structure seems real
if ( !i2Validate ( pCh ) ) {
return -1;
}
read_lock_irqsave(&pCh->Obuf_spinlock, flags);
amountToMove = pCh->Obuf_strip - pCh->Obuf_stuff - 1;
read_unlock_irqrestore(&pCh->Obuf_spinlock, flags);
if (amountToMove < 0) {
amountToMove += OBUF_SIZE;
}
// If this is negative, we will discover later
amountToMove -= sizeof(i2DataHeader);
return (amountToMove < 0) ? 0 : amountToMove;
}
static void
ip2_owake( PTTY tp)
{
i2ChanStrPtr pCh;
if (tp == NULL) return;
pCh = tp->driver_data;
ip2trace (CHANN, ITRC_SICMD, 10, 2, tp->flags,
(1 << TTY_DO_WRITE_WAKEUP) );
tty_wakeup(tp);
}
static inline void
set_baud_params(i2eBordStrPtr pB)
{
int i,j;
i2ChanStrPtr *pCh;
pCh = (i2ChanStrPtr *) pB->i2eChannelPtr;
for (i = 0; i < ABS_MAX_BOXES; i++) {
if (pB->channelBtypes.bid_value[i]) {
if (BID_HAS_654(pB->channelBtypes.bid_value[i])) {
for (j = 0; j < ABS_BIGGEST_BOX; j++) {
if (pCh[i*16+j] == NULL)
break;
(pCh[i*16+j])->BaudBase = 921600; // MAX for ST654
(pCh[i*16+j])->BaudDivisor = 96;
}
} else { // has cirrus cd1400
for (j = 0; j < ABS_BIGGEST_BOX; j++) {
if (pCh[i*16+j] == NULL)
break;
(pCh[i*16+j])->BaudBase = 115200; // MAX for CD1400
(pCh[i*16+j])->BaudDivisor = 12;
}
}
}
}
}
//******************************************************************************
// Function: i2StripFifo(pB)
// Parameters: Pointer to a board structure
// Returns: ?
//
// Description:
// Strips all the available data from the incoming FIFO, identifies the type of
// packet, and either buffers the data or does what needs to be done.
//
// Note there is no overflow checking here: if the board sends more data than it
// ought to, we will not detect it here, but blindly overflow...
//******************************************************************************
// A buffer for reading in blocks for unknown channels
static unsigned char junkBuffer[IBUF_SIZE];
// A buffer to read in a status packet. Because of the size of the count field
// for these things, the maximum packet size must be less than MAX_CMD_PACK_SIZE
static unsigned char cmdBuffer[MAX_CMD_PACK_SIZE + 4];
// This table changes the bit order from MSR order given by STAT_MODEM packet to
// status bits used in our library.
static char xlatDss[16] = {
0 | 0 | 0 | 0 ,
0 | 0 | 0 | I2_CTS ,
0 | 0 | I2_DSR | 0 ,
0 | 0 | I2_DSR | I2_CTS ,
0 | I2_RI | 0 | 0 ,
0 | I2_RI | 0 | I2_CTS ,
0 | I2_RI | I2_DSR | 0 ,
0 | I2_RI | I2_DSR | I2_CTS ,
I2_DCD | 0 | 0 | 0 ,
I2_DCD | 0 | 0 | I2_CTS ,
I2_DCD | 0 | I2_DSR | 0 ,
I2_DCD | 0 | I2_DSR | I2_CTS ,
I2_DCD | I2_RI | 0 | 0 ,
I2_DCD | I2_RI | 0 | I2_CTS ,
I2_DCD | I2_RI | I2_DSR | 0 ,
I2_DCD | I2_RI | I2_DSR | I2_CTS };
static inline void
i2StripFifo(i2eBordStrPtr pB)
{
i2ChanStrPtr pCh;
int channel;
int count;
unsigned short stuffIndex;
int amountToRead;
unsigned char *pc, *pcLimit;
unsigned char uc;
unsigned char dss_change;
unsigned long bflags,cflags;
// ip2trace (ITRC_NO_PORT, ITRC_SFIFO, ITRC_ENTER, 0 );
while (I2_HAS_INPUT(pB)) {
// ip2trace (ITRC_NO_PORT, ITRC_SFIFO, 2, 0 );
// Process packet from fifo a one atomic unit
write_lock_irqsave(&pB->read_fifo_spinlock, bflags);
// The first word (or two bytes) will have channel number and type of
// packet, possibly other information
pB->i2eLeadoffWord[0] = iiReadWord(pB);
switch(PTYPE_OF(pB->i2eLeadoffWord))
{
case PTYPE_DATA:
pB->got_input = 1;
// ip2trace (ITRC_NO_PORT, ITRC_SFIFO, 3, 0 );
channel = CHANNEL_OF(pB->i2eLeadoffWord); /* Store channel */
count = iiReadWord(pB); /* Count is in the next word */
// NEW: Check the count for sanity! Should the hardware fail, our death
// is more pleasant. While an oversize channel is acceptable (just more
// than the driver supports), an over-length count clearly means we are
// sick!
if ( ((unsigned int)count) > IBUF_SIZE ) {
pB->i2eFatal = 2;
write_unlock_irqrestore(&pB->read_fifo_spinlock,
bflags);
return; /* Bail out ASAP */
}
// Channel is illegally big ?
if ((channel >= pB->i2eChannelCnt) ||
(NULL==(pCh = ((i2ChanStrPtr*)pB->i2eChannelPtr)[channel])))
{
iiReadBuf(pB, junkBuffer, count);
write_unlock_irqrestore(&pB->read_fifo_spinlock,
bflags);
break; /* From switch: ready for next packet */
}
// Channel should be valid, then
// If this is a hot-key, merely post its receipt for now. These are
// always supposed to be 1-byte packets, so we won't even check the
// count. Also we will post an acknowledgement to the board so that
// more data can be forthcoming. Note that we are not trying to use
// these sequences in this driver, merely to robustly ignore them.
if(ID_OF(pB->i2eLeadoffWord) == ID_HOT_KEY)
{
pCh->hotKeyIn = iiReadWord(pB) & 0xff;
write_unlock_irqrestore(&pB->read_fifo_spinlock,
bflags);
i2QueueCommands(PTYPE_BYPASS, pCh, 0, 1, CMD_HOTACK);
break; /* From the switch: ready for next packet */
}
// Normal data! We crudely assume there is room for the data in our
// buffer because the board wouldn't have exceeded his credit limit.
write_lock_irqsave(&pCh->Ibuf_spinlock, cflags);
// We have 2 locks now
stuffIndex = pCh->Ibuf_stuff;
amountToRead = IBUF_SIZE - stuffIndex;
if (amountToRead > count)
amountToRead = count;
// stuffIndex would have been already adjusted so there would
// always be room for at least one, and count is always at least
// one.
iiReadBuf(pB, &(pCh->Ibuf[stuffIndex]), amountToRead);
pCh->icount.rx += amountToRead;
// Update the stuffIndex by the amount of data moved. Note we could
// never ask for more data than would just fit. However, we might
// have read in one more byte than we wanted because the read
// rounds up to even bytes. If this byte is on the end of the
// packet, and is padding, we ignore it. If the byte is part of
// the actual data, we need to move it.
stuffIndex += amountToRead;
if (stuffIndex >= IBUF_SIZE) {
if ((amountToRead & 1) && (count > amountToRead)) {
pCh->Ibuf[0] = pCh->Ibuf[IBUF_SIZE];
amountToRead++;
stuffIndex = 1;
} else {
stuffIndex = 0;
}
}
// If there is anything left over, read it as well
if (count > amountToRead) {
amountToRead = count - amountToRead;
iiReadBuf(pB, &(pCh->Ibuf[stuffIndex]), amountToRead);
pCh->icount.rx += amountToRead;
stuffIndex += amountToRead;
}
// Update stuff index
pCh->Ibuf_stuff = stuffIndex;
write_unlock_irqrestore(&pCh->Ibuf_spinlock, cflags);
write_unlock_irqrestore(&pB->read_fifo_spinlock,
bflags);
#ifdef USE_IQ
schedule_work(&pCh->tqueue_input);
#else
do_input(&pCh->tqueue_input);
#endif
// Note we do not need to maintain any flow-control credits at this
// time: if we were to increment .asof and decrement .room, there
// would be no net effect. Instead, when we strip data, we will
// increment .asof and leave .room unchanged.
break; // From switch: ready for next packet
case PTYPE_STATUS:
ip2trace (ITRC_NO_PORT, ITRC_SFIFO, 4, 0 );
count = CMD_COUNT_OF(pB->i2eLeadoffWord);
iiReadBuf(pB, cmdBuffer, count);
// We can release early with buffer grab
write_unlock_irqrestore(&pB->read_fifo_spinlock,
bflags);
pc = cmdBuffer;
pcLimit = &(cmdBuffer[count]);
while (pc < pcLimit) {
channel = *pc++;
ip2trace (channel, ITRC_SFIFO, 7, 2, channel, *pc );
/* check for valid channel */
if (channel < pB->i2eChannelCnt
&&
(pCh = (((i2ChanStrPtr*)pB->i2eChannelPtr)[channel])) != NULL
)
{
dss_change = 0;
switch (uc = *pc++)
{
/* Breaks and modem signals are easy: just update status */
case STAT_CTS_UP:
if ( !(pCh->dataSetIn & I2_CTS) )
{
pCh->dataSetIn |= I2_DCTS;
pCh->icount.cts++;
dss_change = 1;
}
pCh->dataSetIn |= I2_CTS;
break;
case STAT_CTS_DN:
if ( pCh->dataSetIn & I2_CTS )
{
pCh->dataSetIn |= I2_DCTS;
pCh->icount.cts++;
dss_change = 1;
}
pCh->dataSetIn &= ~I2_CTS;
break;
case STAT_DCD_UP:
ip2trace (channel, ITRC_MODEM, 1, 1, pCh->dataSetIn );
if ( !(pCh->dataSetIn & I2_DCD) )
{
ip2trace (CHANN, ITRC_MODEM, 2, 0 );
pCh->dataSetIn |= I2_DDCD;
pCh->icount.dcd++;
dss_change = 1;
}
pCh->dataSetIn |= I2_DCD;
ip2trace (channel, ITRC_MODEM, 3, 1, pCh->dataSetIn );
break;
case STAT_DCD_DN:
ip2trace (channel, ITRC_MODEM, 4, 1, pCh->dataSetIn );
if ( pCh->dataSetIn & I2_DCD )
{
ip2trace (channel, ITRC_MODEM, 5, 0 );
pCh->dataSetIn |= I2_DDCD;
pCh->icount.dcd++;
dss_change = 1;
}
pCh->dataSetIn &= ~I2_DCD;
ip2trace (channel, ITRC_MODEM, 6, 1, pCh->dataSetIn );
break;
case STAT_DSR_UP:
if ( !(pCh->dataSetIn & I2_DSR) )
{
pCh->dataSetIn |= I2_DDSR;
pCh->icount.dsr++;
dss_change = 1;
}
pCh->dataSetIn |= I2_DSR;
break;
case STAT_DSR_DN:
if ( pCh->dataSetIn & I2_DSR )
{
pCh->dataSetIn |= I2_DDSR;
pCh->icount.dsr++;
dss_change = 1;
}
pCh->dataSetIn &= ~I2_DSR;
break;
case STAT_RI_UP:
if ( !(pCh->dataSetIn & I2_RI) )
{
pCh->dataSetIn |= I2_DRI;
pCh->icount.rng++;
dss_change = 1;
}
pCh->dataSetIn |= I2_RI ;
break;
case STAT_RI_DN:
// to be compat with serial.c
//if ( pCh->dataSetIn & I2_RI )
//{
// pCh->dataSetIn |= I2_DRI;
// pCh->icount.rng++;
// dss_change = 1;
//}
pCh->dataSetIn &= ~I2_RI ;
break;
case STAT_BRK_DET:
pCh->dataSetIn |= I2_BRK;
pCh->icount.brk++;
dss_change = 1;
break;
// Bookmarks? one less request we're waiting for
case STAT_BMARK:
pCh->bookMarks--;
if (pCh->bookMarks <= 0 ) {
pCh->bookMarks = 0;
wake_up_interruptible( &pCh->pBookmarkWait );
ip2trace (channel, ITRC_DRAIN, 20, 1, pCh->BookmarkTimer.expires );
}
break;
// Flow control packets? Update the new credits, and if
// someone was waiting for output, queue him up again.
case STAT_FLOW:
pCh->outfl.room =
((flowStatPtr)pc)->room -
(pCh->outfl.asof - ((flowStatPtr)pc)->asof);
ip2trace (channel, ITRC_STFLW, 1, 1, pCh->outfl.room );
if (pCh->channelNeeds & NEED_CREDIT)
{
ip2trace (channel, ITRC_STFLW, 2, 1, pCh->channelNeeds);
pCh->channelNeeds &= ~NEED_CREDIT;
i2QueueNeeds(pB, pCh, NEED_INLINE);
if ( pCh->pTTY )
ip2_owake(pCh->pTTY);
}
ip2trace (channel, ITRC_STFLW, 3, 1, pCh->channelNeeds);
pc += sizeof(flowStat);
break;
/* Special packets: */
/* Just copy the information into the channel structure */
case STAT_STATUS:
pCh->channelStatus = *((debugStatPtr)pc);
pc += sizeof(debugStat);
break;
case STAT_TXCNT:
pCh->channelTcount = *((cntStatPtr)pc);
pc += sizeof(cntStat);
break;
case STAT_RXCNT:
pCh->channelRcount = *((cntStatPtr)pc);
pc += sizeof(cntStat);
break;
case STAT_BOXIDS:
pB->channelBtypes = *((bidStatPtr)pc);
pc += sizeof(bidStat);
set_baud_params(pB);
break;
case STAT_HWFAIL:
i2QueueCommands (PTYPE_INLINE, pCh, 0, 1, CMD_HW_TEST);
pCh->channelFail = *((failStatPtr)pc);
pc += sizeof(failStat);
break;
/* No explicit match? then
* Might be an error packet...
*/
default:
switch (uc & STAT_MOD_ERROR)
{
case STAT_ERROR:
if (uc & STAT_E_PARITY) {
pCh->dataSetIn |= I2_PAR;
pCh->icount.parity++;
}
if (uc & STAT_E_FRAMING){
pCh->dataSetIn |= I2_FRA;
pCh->icount.frame++;
}
if (uc & STAT_E_OVERRUN){
pCh->dataSetIn |= I2_OVR;
pCh->icount.overrun++;
}
break;
case STAT_MODEM:
// the answer to DSS_NOW request (not change)
pCh->dataSetIn = (pCh->dataSetIn
& ~(I2_RI | I2_CTS | I2_DCD | I2_DSR) )
| xlatDss[uc & 0xf];
wake_up_interruptible ( &pCh->dss_now_wait );
default:
break;
}
} /* End of switch on status type */
if (dss_change) {
#ifdef USE_IQ
schedule_work(&pCh->tqueue_status);
#else
do_status(&pCh->tqueue_status);
#endif
}
}
else /* Or else, channel is invalid */
{
// Even though the channel is invalid, we must test the
// status to see how much additional data it has (to be
// skipped)
switch (*pc++)
{
case STAT_FLOW:
pc += 4; /* Skip the data */
break;
default:
break;
}
}
} // End of while (there is still some status packet left)
break;
default: // Neither packet? should be impossible
ip2trace (ITRC_NO_PORT, ITRC_SFIFO, 5, 1,
PTYPE_OF(pB->i2eLeadoffWord) );
write_unlock_irqrestore(&pB->read_fifo_spinlock,
bflags);
break;
} // End of switch on type of packets
} /*while(board I2_HAS_INPUT)*/
ip2trace (ITRC_NO_PORT, ITRC_SFIFO, ITRC_RETURN, 0 );
// Send acknowledgement to the board even if there was no data!
pB->i2eOutMailWaiting |= MB_IN_STRIPPED;
return;
}
//******************************************************************************
// Function: i2Write2Fifo(pB,address,count)
// Parameters: Pointer to a board structure, source address, byte count
// Returns: bytes written
//
// Description:
// Writes count bytes to board io address(implied) from source
// Adjusts count, leaves reserve for next time around bypass cmds
//******************************************************************************
static int
i2Write2Fifo(i2eBordStrPtr pB, unsigned char *source, int count,int reserve)
{
int rc = 0;
unsigned long flags;
write_lock_irqsave(&pB->write_fifo_spinlock, flags);
if (!pB->i2eWaitingForEmptyFifo) {
if (pB->i2eFifoRemains > (count+reserve)) {
pB->i2eFifoRemains -= count;
iiWriteBuf(pB, source, count);
pB->i2eOutMailWaiting |= MB_OUT_STUFFED;
rc = count;
}
}
write_unlock_irqrestore(&pB->write_fifo_spinlock, flags);
return rc;
}
//******************************************************************************
// Function: i2StuffFifoBypass(pB)
// Parameters: Pointer to a board structure
// Returns: Nothing
//
// Description:
// Stuffs as many bypass commands into the fifo as possible. This is simpler
// than stuffing data or inline commands to fifo, since we do not have
// flow-control to deal with.
//******************************************************************************
static inline void
i2StuffFifoBypass(i2eBordStrPtr pB)
{
i2ChanStrPtr pCh;
unsigned char *pRemove;
unsigned short stripIndex;
unsigned short packetSize;
unsigned short paddedSize;
unsigned short notClogged = 1;
unsigned long flags;
int bailout = 1000;
// Continue processing so long as there are entries, or there is room in the
// fifo. Each entry represents a channel with something to do.
while ( --bailout && notClogged &&
(NULL != (pCh = i2DeQueueNeeds(pB,NEED_BYPASS))))
{
write_lock_irqsave(&pCh->Cbuf_spinlock, flags);
stripIndex = pCh->Cbuf_strip;
// as long as there are packets for this channel...
while (stripIndex != pCh->Cbuf_stuff) {
pRemove = &(pCh->Cbuf[stripIndex]);
packetSize = CMD_COUNT_OF(pRemove) + sizeof(i2CmdHeader);
paddedSize = roundup(packetSize, 2);
if (paddedSize > 0) {
if ( 0 == i2Write2Fifo(pB, pRemove, paddedSize,0)) {
notClogged = 0; /* fifo full */
i2QueueNeeds(pB, pCh, NEED_BYPASS); // Put back on queue
break; // Break from the channel
}
}
#ifdef DEBUG_FIFO
WriteDBGBuf("BYPS", pRemove, paddedSize);
#endif /* DEBUG_FIFO */
pB->debugBypassCount++;
pRemove += packetSize;
stripIndex += packetSize;
if (stripIndex >= CBUF_SIZE) {
stripIndex = 0;
pRemove = pCh->Cbuf;
}
}
// Done with this channel. Move to next, removing this one from
// the queue of channels if we cleaned it out (i.e., didn't get clogged.
pCh->Cbuf_strip = stripIndex;
write_unlock_irqrestore(&pCh->Cbuf_spinlock, flags);
} // Either clogged or finished all the work
#ifdef IP2DEBUG_TRACE
if ( !bailout ) {
ip2trace (ITRC_NO_PORT, ITRC_ERROR, 1, 0 );
}
#endif
}
//******************************************************************************
// Function: i2StuffFifoFlow(pB)
// Parameters: Pointer to a board structure
// Returns: Nothing
//
// Description:
// Stuffs as many flow control packets into the fifo as possible. This is easier
// even than doing normal bypass commands, because there is always at most one
// packet, already assembled, for each channel.
//******************************************************************************
static inline void
i2StuffFifoFlow(i2eBordStrPtr pB)
{
i2ChanStrPtr pCh;
unsigned short paddedSize = roundup(sizeof(flowIn), 2);
ip2trace (ITRC_NO_PORT, ITRC_SFLOW, ITRC_ENTER, 2,
pB->i2eFifoRemains, paddedSize );
// Continue processing so long as there are entries, or there is room in the
// fifo. Each entry represents a channel with something to do.
while ( (NULL != (pCh = i2DeQueueNeeds(pB,NEED_FLOW)))) {
pB->debugFlowCount++;
// NO Chan LOCK needed ???
if ( 0 == i2Write2Fifo(pB,(unsigned char *)&(pCh->infl),paddedSize,0)) {
break;
}
#ifdef DEBUG_FIFO
WriteDBGBuf("FLOW",(unsigned char *) &(pCh->infl), paddedSize);
#endif /* DEBUG_FIFO */
} // Either clogged or finished all the work
ip2trace (ITRC_NO_PORT, ITRC_SFLOW, ITRC_RETURN, 0 );
}
//******************************************************************************
// Function: i2StuffFifoInline(pB)
// Parameters: Pointer to a board structure
// Returns: Nothing
//
// Description:
// Stuffs as much data and inline commands into the fifo as possible. This is
// the most complex fifo-stuffing operation, since there if now the channel
// flow-control issue to deal with.
//******************************************************************************
static inline void
i2StuffFifoInline(i2eBordStrPtr pB)
{
i2ChanStrPtr pCh;
unsigned char *pRemove;
unsigned short stripIndex;
unsigned short packetSize;
unsigned short paddedSize;
unsigned short notClogged = 1;
unsigned short flowsize;
unsigned long flags;
int bailout = 1000;
int bailout2;
ip2trace (ITRC_NO_PORT, ITRC_SICMD, ITRC_ENTER, 3, pB->i2eFifoRemains,
pB->i2Dbuf_strip, pB->i2Dbuf_stuff );
// Continue processing so long as there are entries, or there is room in the
// fifo. Each entry represents a channel with something to do.
while ( --bailout && notClogged &&
(NULL != (pCh = i2DeQueueNeeds(pB,NEED_INLINE))) )
{
write_lock_irqsave(&pCh->Obuf_spinlock, flags);
stripIndex = pCh->Obuf_strip;
ip2trace (CHANN, ITRC_SICMD, 3, 2, stripIndex, pCh->Obuf_stuff );
// as long as there are packets for this channel...
bailout2 = 1000;
while ( --bailout2 && stripIndex != pCh->Obuf_stuff) {
pRemove = &(pCh->Obuf[stripIndex]);
// Must determine whether this be a data or command packet to
// calculate correctly the header size and the amount of
// flow-control credit this type of packet will use.
if (PTYPE_OF(pRemove) == PTYPE_DATA) {
flowsize = DATA_COUNT_OF(pRemove);
packetSize = flowsize + sizeof(i2DataHeader);
} else {
flowsize = CMD_COUNT_OF(pRemove);
packetSize = flowsize + sizeof(i2CmdHeader);
}
flowsize = CREDIT_USAGE(flowsize);
paddedSize = roundup(packetSize, 2);
ip2trace (CHANN, ITRC_SICMD, 4, 2, pB->i2eFifoRemains, paddedSize );
// If we don't have enough credits from the board to send the data,
// flag the channel that we are waiting for flow control credit, and
// break out. This will clean up this channel and remove us from the
// queue of hot things to do.
ip2trace (CHANN, ITRC_SICMD, 5, 2, pCh->outfl.room, flowsize );
if (pCh->outfl.room <= flowsize) {
// Do Not have the credits to send this packet.
i2QueueNeeds(pB, pCh, NEED_CREDIT);
notClogged = 0;
break; // So to do next channel
}
if ( (paddedSize > 0)
&& ( 0 == i2Write2Fifo(pB, pRemove, paddedSize, 128))) {
// Do Not have room in fifo to send this packet.
notClogged = 0;
i2QueueNeeds(pB, pCh, NEED_INLINE);
break; // Break from the channel
}
#ifdef DEBUG_FIFO
WriteDBGBuf("DATA", pRemove, paddedSize);
#endif /* DEBUG_FIFO */
pB->debugInlineCount++;
pCh->icount.tx += flowsize;
// Update current credits
pCh->outfl.room -= flowsize;
pCh->outfl.asof += flowsize;
if (PTYPE_OF(pRemove) == PTYPE_DATA) {
pCh->Obuf_char_count -= DATA_COUNT_OF(pRemove);
}
pRemove += packetSize;
stripIndex += packetSize;
ip2trace (CHANN, ITRC_SICMD, 6, 2, stripIndex, pCh->Obuf_strip);
if (stripIndex >= OBUF_SIZE) {
stripIndex = 0;
pRemove = pCh->Obuf;
ip2trace (CHANN, ITRC_SICMD, 7, 1, stripIndex );
}
} /* while */
if ( !bailout2 ) {
ip2trace (CHANN, ITRC_ERROR, 3, 0 );
}
// Done with this channel. Move to next, removing this one from the
// queue of channels if we cleaned it out (i.e., didn't get clogged.
pCh->Obuf_strip = stripIndex;
write_unlock_irqrestore(&pCh->Obuf_spinlock, flags);
if ( notClogged )
{
ip2trace (CHANN, ITRC_SICMD, 8, 0 );
if ( pCh->pTTY ) {
ip2_owake(pCh->pTTY);
}
}
} // Either clogged or finished all the work
if ( !bailout ) {
ip2trace (ITRC_NO_PORT, ITRC_ERROR, 4, 0 );
}
ip2trace (ITRC_NO_PORT, ITRC_SICMD, ITRC_RETURN, 1,pB->i2Dbuf_strip);
}
//******************************************************************************
// Function: serviceOutgoingFifo(pB)
// Parameters: Pointer to a board structure
// Returns: Nothing
//
// Description:
// Helper routine to put data in the outgoing fifo, if we aren't already waiting
// for something to be there. If the fifo has only room for a very little data,
// go head and hit the board with a mailbox hit immediately. Otherwise, it will
// have to happen later in the interrupt processing. Since this routine may be
// called both at interrupt and foreground time, we must turn off interrupts
// during the entire process.
//******************************************************************************
static void
serviceOutgoingFifo(i2eBordStrPtr pB)
{
// If we aren't currently waiting for the board to empty our fifo, service
// everything that is pending, in priority order (especially, Bypass before
// Inline).
if ( ! pB->i2eWaitingForEmptyFifo )
{
i2StuffFifoFlow(pB);
i2StuffFifoBypass(pB);
i2StuffFifoInline(pB);
iiSendPendingMail(pB);
}
}
//******************************************************************************
// Function: i2ServiceBoard(pB)
// Parameters: Pointer to a board structure
// Returns: Nothing
//
// Description:
// Normally this is called from interrupt level, but there is deliberately
// nothing in here specific to being called from interrupt level. All the
// hardware-specific, interrupt-specific things happen at the outer levels.
//
// For example, a timer interrupt could drive this routine for some sort of
// polled operation. The only requirement is that the programmer deal with any
// atomiticity/concurrency issues that result.
//
// This routine responds to the board's having sent mailbox information to the
// host (which would normally cause an interrupt). This routine reads the
// incoming mailbox. If there is no data in it, this board did not create the
// interrupt and/or has nothing to be done to it. (Except, if we have been
// waiting to write mailbox data to it, we may do so.
//
// Based on the value in the mailbox, we may take various actions.
//
// No checking here of pB validity: after all, it shouldn't have been called by
// the handler unless pB were on the list.
//******************************************************************************
static inline int
i2ServiceBoard ( i2eBordStrPtr pB )
{
unsigned inmail;
unsigned long flags;
/* This should be atomic because of the way we are called... */
if (NO_MAIL_HERE == ( inmail = pB->i2eStartMail ) ) {
inmail = iiGetMail(pB);
}
pB->i2eStartMail = NO_MAIL_HERE;
ip2trace (ITRC_NO_PORT, ITRC_INTR, 2, 1, inmail );
if (inmail != NO_MAIL_HERE) {
// If the board has gone fatal, nothing to do but hit a bit that will
// alert foreground tasks to protest!
if ( inmail & MB_FATAL_ERROR ) {
pB->i2eFatal = 1;
goto exit_i2ServiceBoard;
}
/* Assuming no fatal condition, we proceed to do work */
if ( inmail & MB_IN_STUFFED ) {
pB->i2eFifoInInts++;
i2StripFifo(pB); /* There might be incoming packets */
}
if (inmail & MB_OUT_STRIPPED) {
pB->i2eFifoOutInts++;
write_lock_irqsave(&pB->write_fifo_spinlock, flags);
pB->i2eFifoRemains = pB->i2eFifoSize;
pB->i2eWaitingForEmptyFifo = 0;
write_unlock_irqrestore(&pB->write_fifo_spinlock,
flags);
ip2trace (ITRC_NO_PORT, ITRC_INTR, 30, 1, pB->i2eFifoRemains );
}
serviceOutgoingFifo(pB);
}
ip2trace (ITRC_NO_PORT, ITRC_INTR, 8, 0 );
exit_i2ServiceBoard:
return 0;
}
| gpl-2.0 |
sztena/DG08_android4.2 | drivers/media/video/omap3isp/ispstat.c | 2535 | 30892 | /*
* ispstat.c
*
* TI OMAP3 ISP - Statistics core
*
* Copyright (C) 2010 Nokia Corporation
* Copyright (C) 2009 Texas Instruments, Inc
*
* Contacts: David Cohen <dacohen@gmail.com>
* Laurent Pinchart <laurent.pinchart@ideasonboard.com>
* Sakari Ailus <sakari.ailus@iki.fi>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
* 02110-1301 USA
*/
#include <linux/dma-mapping.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
#include "isp.h"
#define IS_COHERENT_BUF(stat) ((stat)->dma_ch >= 0)
/*
* MAGIC_SIZE must always be the greatest common divisor of
* AEWB_PACKET_SIZE and AF_PAXEL_SIZE.
*/
#define MAGIC_SIZE 16
#define MAGIC_NUM 0x55
/* HACK: AF module seems to be writing one more paxel data than it should. */
#define AF_EXTRA_DATA OMAP3ISP_AF_PAXEL_SIZE
/*
* HACK: H3A modules go to an invalid state after have a SBL overflow. It makes
* the next buffer to start to be written in the same point where the overflow
* occurred instead of the configured address. The only known way to make it to
* go back to a valid state is having a valid buffer processing. Of course it
* requires at least a doubled buffer size to avoid an access to invalid memory
* region. But it does not fix everything. It may happen more than one
* consecutive SBL overflows. In that case, it might be unpredictable how many
* buffers the allocated memory should fit. For that case, a recover
* configuration was created. It produces the minimum buffer size for each H3A
* module and decrease the change for more SBL overflows. This recover state
* will be enabled every time a SBL overflow occur. As the output buffer size
* isn't big, it's possible to have an extra size able to fit many recover
* buffers making it extreamily unlikely to have an access to invalid memory
* region.
*/
#define NUM_H3A_RECOVER_BUFS 10
/*
* HACK: Because of HW issues the generic layer sometimes need to have
* different behaviour for different statistic modules.
*/
#define IS_H3A_AF(stat) ((stat) == &(stat)->isp->isp_af)
#define IS_H3A_AEWB(stat) ((stat) == &(stat)->isp->isp_aewb)
#define IS_H3A(stat) (IS_H3A_AF(stat) || IS_H3A_AEWB(stat))
static void __isp_stat_buf_sync_magic(struct ispstat *stat,
struct ispstat_buffer *buf,
u32 buf_size, enum dma_data_direction dir,
void (*dma_sync)(struct device *,
dma_addr_t, unsigned long, size_t,
enum dma_data_direction))
{
struct device *dev = stat->isp->dev;
struct page *pg;
dma_addr_t dma_addr;
u32 offset;
/* Initial magic words */
pg = vmalloc_to_page(buf->virt_addr);
dma_addr = pfn_to_dma(dev, page_to_pfn(pg));
dma_sync(dev, dma_addr, 0, MAGIC_SIZE, dir);
/* Final magic words */
pg = vmalloc_to_page(buf->virt_addr + buf_size);
dma_addr = pfn_to_dma(dev, page_to_pfn(pg));
offset = ((u32)buf->virt_addr + buf_size) & ~PAGE_MASK;
dma_sync(dev, dma_addr, offset, MAGIC_SIZE, dir);
}
static void isp_stat_buf_sync_magic_for_device(struct ispstat *stat,
struct ispstat_buffer *buf,
u32 buf_size,
enum dma_data_direction dir)
{
if (IS_COHERENT_BUF(stat))
return;
__isp_stat_buf_sync_magic(stat, buf, buf_size, dir,
dma_sync_single_range_for_device);
}
static void isp_stat_buf_sync_magic_for_cpu(struct ispstat *stat,
struct ispstat_buffer *buf,
u32 buf_size,
enum dma_data_direction dir)
{
if (IS_COHERENT_BUF(stat))
return;
__isp_stat_buf_sync_magic(stat, buf, buf_size, dir,
dma_sync_single_range_for_cpu);
}
static int isp_stat_buf_check_magic(struct ispstat *stat,
struct ispstat_buffer *buf)
{
const u32 buf_size = IS_H3A_AF(stat) ?
buf->buf_size + AF_EXTRA_DATA : buf->buf_size;
u8 *w;
u8 *end;
int ret = -EINVAL;
isp_stat_buf_sync_magic_for_cpu(stat, buf, buf_size, DMA_FROM_DEVICE);
/* Checking initial magic numbers. They shouldn't be here anymore. */
for (w = buf->virt_addr, end = w + MAGIC_SIZE; w < end; w++)
if (likely(*w != MAGIC_NUM))
ret = 0;
if (ret) {
dev_dbg(stat->isp->dev, "%s: beginning magic check does not "
"match.\n", stat->subdev.name);
return ret;
}
/* Checking magic numbers at the end. They must be still here. */
for (w = buf->virt_addr + buf_size, end = w + MAGIC_SIZE;
w < end; w++) {
if (unlikely(*w != MAGIC_NUM)) {
dev_dbg(stat->isp->dev, "%s: endding magic check does "
"not match.\n", stat->subdev.name);
return -EINVAL;
}
}
isp_stat_buf_sync_magic_for_device(stat, buf, buf_size,
DMA_FROM_DEVICE);
return 0;
}
static void isp_stat_buf_insert_magic(struct ispstat *stat,
struct ispstat_buffer *buf)
{
const u32 buf_size = IS_H3A_AF(stat) ?
stat->buf_size + AF_EXTRA_DATA : stat->buf_size;
isp_stat_buf_sync_magic_for_cpu(stat, buf, buf_size, DMA_FROM_DEVICE);
/*
* Inserting MAGIC_NUM at the beginning and end of the buffer.
* buf->buf_size is set only after the buffer is queued. For now the
* right buf_size for the current configuration is pointed by
* stat->buf_size.
*/
memset(buf->virt_addr, MAGIC_NUM, MAGIC_SIZE);
memset(buf->virt_addr + buf_size, MAGIC_NUM, MAGIC_SIZE);
isp_stat_buf_sync_magic_for_device(stat, buf, buf_size,
DMA_BIDIRECTIONAL);
}
static void isp_stat_buf_sync_for_device(struct ispstat *stat,
struct ispstat_buffer *buf)
{
if (IS_COHERENT_BUF(stat))
return;
dma_sync_sg_for_device(stat->isp->dev, buf->iovm->sgt->sgl,
buf->iovm->sgt->nents, DMA_FROM_DEVICE);
}
static void isp_stat_buf_sync_for_cpu(struct ispstat *stat,
struct ispstat_buffer *buf)
{
if (IS_COHERENT_BUF(stat))
return;
dma_sync_sg_for_cpu(stat->isp->dev, buf->iovm->sgt->sgl,
buf->iovm->sgt->nents, DMA_FROM_DEVICE);
}
static void isp_stat_buf_clear(struct ispstat *stat)
{
int i;
for (i = 0; i < STAT_MAX_BUFS; i++)
stat->buf[i].empty = 1;
}
static struct ispstat_buffer *
__isp_stat_buf_find(struct ispstat *stat, int look_empty)
{
struct ispstat_buffer *found = NULL;
int i;
for (i = 0; i < STAT_MAX_BUFS; i++) {
struct ispstat_buffer *curr = &stat->buf[i];
/*
* Don't select the buffer which is being copied to
* userspace or used by the module.
*/
if (curr == stat->locked_buf || curr == stat->active_buf)
continue;
/* Don't select uninitialised buffers if it's not required */
if (!look_empty && curr->empty)
continue;
/* Pick uninitialised buffer over anything else if look_empty */
if (curr->empty) {
found = curr;
break;
}
/* Choose the oldest buffer */
if (!found ||
(s32)curr->frame_number - (s32)found->frame_number < 0)
found = curr;
}
return found;
}
static inline struct ispstat_buffer *
isp_stat_buf_find_oldest(struct ispstat *stat)
{
return __isp_stat_buf_find(stat, 0);
}
static inline struct ispstat_buffer *
isp_stat_buf_find_oldest_or_empty(struct ispstat *stat)
{
return __isp_stat_buf_find(stat, 1);
}
static int isp_stat_buf_queue(struct ispstat *stat)
{
if (!stat->active_buf)
return STAT_NO_BUF;
do_gettimeofday(&stat->active_buf->ts);
stat->active_buf->buf_size = stat->buf_size;
if (isp_stat_buf_check_magic(stat, stat->active_buf)) {
dev_dbg(stat->isp->dev, "%s: data wasn't properly written.\n",
stat->subdev.name);
return STAT_NO_BUF;
}
stat->active_buf->config_counter = stat->config_counter;
stat->active_buf->frame_number = stat->frame_number;
stat->active_buf->empty = 0;
stat->active_buf = NULL;
return STAT_BUF_DONE;
}
/* Get next free buffer to write the statistics to and mark it active. */
static void isp_stat_buf_next(struct ispstat *stat)
{
if (unlikely(stat->active_buf))
/* Overwriting unused active buffer */
dev_dbg(stat->isp->dev, "%s: new buffer requested without "
"queuing active one.\n",
stat->subdev.name);
else
stat->active_buf = isp_stat_buf_find_oldest_or_empty(stat);
}
static void isp_stat_buf_release(struct ispstat *stat)
{
unsigned long flags;
isp_stat_buf_sync_for_device(stat, stat->locked_buf);
spin_lock_irqsave(&stat->isp->stat_lock, flags);
stat->locked_buf = NULL;
spin_unlock_irqrestore(&stat->isp->stat_lock, flags);
}
/* Get buffer to userspace. */
static struct ispstat_buffer *isp_stat_buf_get(struct ispstat *stat,
struct omap3isp_stat_data *data)
{
int rval = 0;
unsigned long flags;
struct ispstat_buffer *buf;
spin_lock_irqsave(&stat->isp->stat_lock, flags);
while (1) {
buf = isp_stat_buf_find_oldest(stat);
if (!buf) {
spin_unlock_irqrestore(&stat->isp->stat_lock, flags);
dev_dbg(stat->isp->dev, "%s: cannot find a buffer.\n",
stat->subdev.name);
return ERR_PTR(-EBUSY);
}
if (isp_stat_buf_check_magic(stat, buf)) {
dev_dbg(stat->isp->dev, "%s: current buffer has "
"corrupted data\n.", stat->subdev.name);
/* Mark empty because it doesn't have valid data. */
buf->empty = 1;
} else {
/* Buffer isn't corrupted. */
break;
}
}
stat->locked_buf = buf;
spin_unlock_irqrestore(&stat->isp->stat_lock, flags);
if (buf->buf_size > data->buf_size) {
dev_warn(stat->isp->dev, "%s: userspace's buffer size is "
"not enough.\n", stat->subdev.name);
isp_stat_buf_release(stat);
return ERR_PTR(-EINVAL);
}
isp_stat_buf_sync_for_cpu(stat, buf);
rval = copy_to_user(data->buf,
buf->virt_addr,
buf->buf_size);
if (rval) {
dev_info(stat->isp->dev,
"%s: failed copying %d bytes of stat data\n",
stat->subdev.name, rval);
buf = ERR_PTR(-EFAULT);
isp_stat_buf_release(stat);
}
return buf;
}
static void isp_stat_bufs_free(struct ispstat *stat)
{
struct isp_device *isp = stat->isp;
int i;
for (i = 0; i < STAT_MAX_BUFS; i++) {
struct ispstat_buffer *buf = &stat->buf[i];
if (!IS_COHERENT_BUF(stat)) {
if (IS_ERR_OR_NULL((void *)buf->iommu_addr))
continue;
if (buf->iovm)
dma_unmap_sg(isp->dev, buf->iovm->sgt->sgl,
buf->iovm->sgt->nents,
DMA_FROM_DEVICE);
iommu_vfree(isp->iommu, buf->iommu_addr);
} else {
if (!buf->virt_addr)
continue;
dma_free_coherent(stat->isp->dev, stat->buf_alloc_size,
buf->virt_addr, buf->dma_addr);
}
buf->iommu_addr = 0;
buf->iovm = NULL;
buf->dma_addr = 0;
buf->virt_addr = NULL;
buf->empty = 1;
}
dev_dbg(stat->isp->dev, "%s: all buffers were freed.\n",
stat->subdev.name);
stat->buf_alloc_size = 0;
stat->active_buf = NULL;
}
static int isp_stat_bufs_alloc_iommu(struct ispstat *stat, unsigned int size)
{
struct isp_device *isp = stat->isp;
int i;
stat->buf_alloc_size = size;
for (i = 0; i < STAT_MAX_BUFS; i++) {
struct ispstat_buffer *buf = &stat->buf[i];
struct iovm_struct *iovm;
WARN_ON(buf->dma_addr);
buf->iommu_addr = iommu_vmalloc(isp->iommu, 0, size,
IOMMU_FLAG);
if (IS_ERR((void *)buf->iommu_addr)) {
dev_err(stat->isp->dev,
"%s: Can't acquire memory for "
"buffer %d\n", stat->subdev.name, i);
isp_stat_bufs_free(stat);
return -ENOMEM;
}
iovm = find_iovm_area(isp->iommu, buf->iommu_addr);
if (!iovm ||
!dma_map_sg(isp->dev, iovm->sgt->sgl, iovm->sgt->nents,
DMA_FROM_DEVICE)) {
isp_stat_bufs_free(stat);
return -ENOMEM;
}
buf->iovm = iovm;
buf->virt_addr = da_to_va(stat->isp->iommu,
(u32)buf->iommu_addr);
buf->empty = 1;
dev_dbg(stat->isp->dev, "%s: buffer[%d] allocated."
"iommu_addr=0x%08lx virt_addr=0x%08lx",
stat->subdev.name, i, buf->iommu_addr,
(unsigned long)buf->virt_addr);
}
return 0;
}
static int isp_stat_bufs_alloc_dma(struct ispstat *stat, unsigned int size)
{
int i;
stat->buf_alloc_size = size;
for (i = 0; i < STAT_MAX_BUFS; i++) {
struct ispstat_buffer *buf = &stat->buf[i];
WARN_ON(buf->iommu_addr);
buf->virt_addr = dma_alloc_coherent(stat->isp->dev, size,
&buf->dma_addr, GFP_KERNEL | GFP_DMA);
if (!buf->virt_addr || !buf->dma_addr) {
dev_info(stat->isp->dev,
"%s: Can't acquire memory for "
"DMA buffer %d\n", stat->subdev.name, i);
isp_stat_bufs_free(stat);
return -ENOMEM;
}
buf->empty = 1;
dev_dbg(stat->isp->dev, "%s: buffer[%d] allocated."
"dma_addr=0x%08lx virt_addr=0x%08lx\n",
stat->subdev.name, i, (unsigned long)buf->dma_addr,
(unsigned long)buf->virt_addr);
}
return 0;
}
static int isp_stat_bufs_alloc(struct ispstat *stat, u32 size)
{
unsigned long flags;
spin_lock_irqsave(&stat->isp->stat_lock, flags);
BUG_ON(stat->locked_buf != NULL);
/* Are the old buffers big enough? */
if (stat->buf_alloc_size >= size) {
spin_unlock_irqrestore(&stat->isp->stat_lock, flags);
return 0;
}
if (stat->state != ISPSTAT_DISABLED || stat->buf_processing) {
dev_info(stat->isp->dev,
"%s: trying to allocate memory when busy\n",
stat->subdev.name);
spin_unlock_irqrestore(&stat->isp->stat_lock, flags);
return -EBUSY;
}
spin_unlock_irqrestore(&stat->isp->stat_lock, flags);
isp_stat_bufs_free(stat);
if (IS_COHERENT_BUF(stat))
return isp_stat_bufs_alloc_dma(stat, size);
else
return isp_stat_bufs_alloc_iommu(stat, size);
}
static void isp_stat_queue_event(struct ispstat *stat, int err)
{
struct video_device *vdev = &stat->subdev.devnode;
struct v4l2_event event;
struct omap3isp_stat_event_status *status = (void *)event.u.data;
memset(&event, 0, sizeof(event));
if (!err) {
status->frame_number = stat->frame_number;
status->config_counter = stat->config_counter;
} else {
status->buf_err = 1;
}
event.type = stat->event_type;
v4l2_event_queue(vdev, &event);
}
/*
* omap3isp_stat_request_statistics - Request statistics.
* @data: Pointer to return statistics data.
*
* Returns 0 if successful.
*/
int omap3isp_stat_request_statistics(struct ispstat *stat,
struct omap3isp_stat_data *data)
{
struct ispstat_buffer *buf;
if (stat->state != ISPSTAT_ENABLED) {
dev_dbg(stat->isp->dev, "%s: engine not enabled.\n",
stat->subdev.name);
return -EINVAL;
}
mutex_lock(&stat->ioctl_lock);
buf = isp_stat_buf_get(stat, data);
if (IS_ERR(buf)) {
mutex_unlock(&stat->ioctl_lock);
return PTR_ERR(buf);
}
data->ts = buf->ts;
data->config_counter = buf->config_counter;
data->frame_number = buf->frame_number;
data->buf_size = buf->buf_size;
buf->empty = 1;
isp_stat_buf_release(stat);
mutex_unlock(&stat->ioctl_lock);
return 0;
}
/*
* omap3isp_stat_config - Receives new statistic engine configuration.
* @new_conf: Pointer to config structure.
*
* Returns 0 if successful, -EINVAL if new_conf pointer is NULL, -ENOMEM if
* was unable to allocate memory for the buffer, or other errors if parameters
* are invalid.
*/
int omap3isp_stat_config(struct ispstat *stat, void *new_conf)
{
int ret;
unsigned long irqflags;
struct ispstat_generic_config *user_cfg = new_conf;
u32 buf_size = user_cfg->buf_size;
if (!new_conf) {
dev_dbg(stat->isp->dev, "%s: configuration is NULL\n",
stat->subdev.name);
return -EINVAL;
}
mutex_lock(&stat->ioctl_lock);
dev_dbg(stat->isp->dev, "%s: configuring module with buffer "
"size=0x%08lx\n", stat->subdev.name, (unsigned long)buf_size);
ret = stat->ops->validate_params(stat, new_conf);
if (ret) {
mutex_unlock(&stat->ioctl_lock);
dev_dbg(stat->isp->dev, "%s: configuration values are "
"invalid.\n", stat->subdev.name);
return ret;
}
if (buf_size != user_cfg->buf_size)
dev_dbg(stat->isp->dev, "%s: driver has corrected buffer size "
"request to 0x%08lx\n", stat->subdev.name,
(unsigned long)user_cfg->buf_size);
/*
* Hack: H3A modules may need a doubled buffer size to avoid access
* to a invalid memory address after a SBL overflow.
* The buffer size is always PAGE_ALIGNED.
* Hack 2: MAGIC_SIZE is added to buf_size so a magic word can be
* inserted at the end to data integrity check purpose.
* Hack 3: AF module writes one paxel data more than it should, so
* the buffer allocation must consider it to avoid invalid memory
* access.
* Hack 4: H3A need to allocate extra space for the recover state.
*/
if (IS_H3A(stat)) {
buf_size = user_cfg->buf_size * 2 + MAGIC_SIZE;
if (IS_H3A_AF(stat))
/*
* Adding one extra paxel data size for each recover
* buffer + 2 regular ones.
*/
buf_size += AF_EXTRA_DATA * (NUM_H3A_RECOVER_BUFS + 2);
if (stat->recover_priv) {
struct ispstat_generic_config *recover_cfg =
stat->recover_priv;
buf_size += recover_cfg->buf_size *
NUM_H3A_RECOVER_BUFS;
}
buf_size = PAGE_ALIGN(buf_size);
} else { /* Histogram */
buf_size = PAGE_ALIGN(user_cfg->buf_size + MAGIC_SIZE);
}
ret = isp_stat_bufs_alloc(stat, buf_size);
if (ret) {
mutex_unlock(&stat->ioctl_lock);
return ret;
}
spin_lock_irqsave(&stat->isp->stat_lock, irqflags);
stat->ops->set_params(stat, new_conf);
spin_unlock_irqrestore(&stat->isp->stat_lock, irqflags);
/*
* Returning the right future config_counter for this setup, so
* userspace can *know* when it has been applied.
*/
user_cfg->config_counter = stat->config_counter + stat->inc_config;
/* Module has a valid configuration. */
stat->configured = 1;
dev_dbg(stat->isp->dev, "%s: module has been successfully "
"configured.\n", stat->subdev.name);
mutex_unlock(&stat->ioctl_lock);
return 0;
}
/*
* isp_stat_buf_process - Process statistic buffers.
* @buf_state: points out if buffer is ready to be processed. It's necessary
* because histogram needs to copy the data from internal memory
* before be able to process the buffer.
*/
static int isp_stat_buf_process(struct ispstat *stat, int buf_state)
{
int ret = STAT_NO_BUF;
if (!atomic_add_unless(&stat->buf_err, -1, 0) &&
buf_state == STAT_BUF_DONE && stat->state == ISPSTAT_ENABLED) {
ret = isp_stat_buf_queue(stat);
isp_stat_buf_next(stat);
}
return ret;
}
int omap3isp_stat_pcr_busy(struct ispstat *stat)
{
return stat->ops->busy(stat);
}
int omap3isp_stat_busy(struct ispstat *stat)
{
return omap3isp_stat_pcr_busy(stat) | stat->buf_processing |
(stat->state != ISPSTAT_DISABLED);
}
/*
* isp_stat_pcr_enable - Disables/Enables statistic engines.
* @pcr_enable: 0/1 - Disables/Enables the engine.
*
* Must be called from ISP driver when the module is idle and synchronized
* with CCDC.
*/
static void isp_stat_pcr_enable(struct ispstat *stat, u8 pcr_enable)
{
if ((stat->state != ISPSTAT_ENABLING &&
stat->state != ISPSTAT_ENABLED) && pcr_enable)
/* Userspace has disabled the module. Aborting. */
return;
stat->ops->enable(stat, pcr_enable);
if (stat->state == ISPSTAT_DISABLING && !pcr_enable)
stat->state = ISPSTAT_DISABLED;
else if (stat->state == ISPSTAT_ENABLING && pcr_enable)
stat->state = ISPSTAT_ENABLED;
}
void omap3isp_stat_suspend(struct ispstat *stat)
{
unsigned long flags;
spin_lock_irqsave(&stat->isp->stat_lock, flags);
if (stat->state != ISPSTAT_DISABLED)
stat->ops->enable(stat, 0);
if (stat->state == ISPSTAT_ENABLED)
stat->state = ISPSTAT_SUSPENDED;
spin_unlock_irqrestore(&stat->isp->stat_lock, flags);
}
void omap3isp_stat_resume(struct ispstat *stat)
{
/* Module will be re-enabled with its pipeline */
if (stat->state == ISPSTAT_SUSPENDED)
stat->state = ISPSTAT_ENABLING;
}
static void isp_stat_try_enable(struct ispstat *stat)
{
unsigned long irqflags;
if (stat->priv == NULL)
/* driver wasn't initialised */
return;
spin_lock_irqsave(&stat->isp->stat_lock, irqflags);
if (stat->state == ISPSTAT_ENABLING && !stat->buf_processing &&
stat->buf_alloc_size) {
/*
* Userspace's requested to enable the engine but it wasn't yet.
* Let's do that now.
*/
stat->update = 1;
isp_stat_buf_next(stat);
stat->ops->setup_regs(stat, stat->priv);
isp_stat_buf_insert_magic(stat, stat->active_buf);
/*
* H3A module has some hw issues which forces the driver to
* ignore next buffers even if it was disabled in the meantime.
* On the other hand, Histogram shouldn't ignore buffers anymore
* if it's being enabled.
*/
if (!IS_H3A(stat))
atomic_set(&stat->buf_err, 0);
isp_stat_pcr_enable(stat, 1);
spin_unlock_irqrestore(&stat->isp->stat_lock, irqflags);
dev_dbg(stat->isp->dev, "%s: module is enabled.\n",
stat->subdev.name);
} else {
spin_unlock_irqrestore(&stat->isp->stat_lock, irqflags);
}
}
void omap3isp_stat_isr_frame_sync(struct ispstat *stat)
{
isp_stat_try_enable(stat);
}
void omap3isp_stat_sbl_overflow(struct ispstat *stat)
{
unsigned long irqflags;
spin_lock_irqsave(&stat->isp->stat_lock, irqflags);
/*
* Due to a H3A hw issue which prevents the next buffer to start from
* the correct memory address, 2 buffers must be ignored.
*/
atomic_set(&stat->buf_err, 2);
/*
* If more than one SBL overflow happen in a row, H3A module may access
* invalid memory region.
* stat->sbl_ovl_recover is set to tell to the driver to temporarily use
* a soft configuration which helps to avoid consecutive overflows.
*/
if (stat->recover_priv)
stat->sbl_ovl_recover = 1;
spin_unlock_irqrestore(&stat->isp->stat_lock, irqflags);
}
/*
* omap3isp_stat_enable - Disable/Enable statistic engine as soon as possible
* @enable: 0/1 - Disables/Enables the engine.
*
* Client should configure all the module registers before this.
* This function can be called from a userspace request.
*/
int omap3isp_stat_enable(struct ispstat *stat, u8 enable)
{
unsigned long irqflags;
dev_dbg(stat->isp->dev, "%s: user wants to %s module.\n",
stat->subdev.name, enable ? "enable" : "disable");
/* Prevent enabling while configuring */
mutex_lock(&stat->ioctl_lock);
spin_lock_irqsave(&stat->isp->stat_lock, irqflags);
if (!stat->configured && enable) {
spin_unlock_irqrestore(&stat->isp->stat_lock, irqflags);
mutex_unlock(&stat->ioctl_lock);
dev_dbg(stat->isp->dev, "%s: cannot enable module as it's "
"never been successfully configured so far.\n",
stat->subdev.name);
return -EINVAL;
}
if (enable) {
if (stat->state == ISPSTAT_DISABLING)
/* Previous disabling request wasn't done yet */
stat->state = ISPSTAT_ENABLED;
else if (stat->state == ISPSTAT_DISABLED)
/* Module is now being enabled */
stat->state = ISPSTAT_ENABLING;
} else {
if (stat->state == ISPSTAT_ENABLING) {
/* Previous enabling request wasn't done yet */
stat->state = ISPSTAT_DISABLED;
} else if (stat->state == ISPSTAT_ENABLED) {
/* Module is now being disabled */
stat->state = ISPSTAT_DISABLING;
isp_stat_buf_clear(stat);
}
}
spin_unlock_irqrestore(&stat->isp->stat_lock, irqflags);
mutex_unlock(&stat->ioctl_lock);
return 0;
}
int omap3isp_stat_s_stream(struct v4l2_subdev *subdev, int enable)
{
struct ispstat *stat = v4l2_get_subdevdata(subdev);
if (enable) {
/*
* Only set enable PCR bit if the module was previously
* enabled through ioct.
*/
isp_stat_try_enable(stat);
} else {
unsigned long flags;
/* Disable PCR bit and config enable field */
omap3isp_stat_enable(stat, 0);
spin_lock_irqsave(&stat->isp->stat_lock, flags);
stat->ops->enable(stat, 0);
spin_unlock_irqrestore(&stat->isp->stat_lock, flags);
/*
* If module isn't busy, a new interrupt may come or not to
* set the state to DISABLED. As Histogram needs to read its
* internal memory to clear it, let interrupt handler
* responsible of changing state to DISABLED. If the last
* interrupt is coming, it's still safe as the handler will
* ignore the second time when state is already set to DISABLED.
* It's necessary to synchronize Histogram with streamoff, once
* the module may be considered idle before last SDMA transfer
* starts if we return here.
*/
if (!omap3isp_stat_pcr_busy(stat))
omap3isp_stat_isr(stat);
dev_dbg(stat->isp->dev, "%s: module is being disabled\n",
stat->subdev.name);
}
return 0;
}
/*
* __stat_isr - Interrupt handler for statistic drivers
*/
static void __stat_isr(struct ispstat *stat, int from_dma)
{
int ret = STAT_BUF_DONE;
int buf_processing;
unsigned long irqflags;
struct isp_pipeline *pipe;
/*
* stat->buf_processing must be set before disable module. It's
* necessary to not inform too early the buffers aren't busy in case
* of SDMA is going to be used.
*/
spin_lock_irqsave(&stat->isp->stat_lock, irqflags);
if (stat->state == ISPSTAT_DISABLED) {
spin_unlock_irqrestore(&stat->isp->stat_lock, irqflags);
return;
}
buf_processing = stat->buf_processing;
stat->buf_processing = 1;
stat->ops->enable(stat, 0);
if (buf_processing && !from_dma) {
if (stat->state == ISPSTAT_ENABLED) {
spin_unlock_irqrestore(&stat->isp->stat_lock, irqflags);
dev_err(stat->isp->dev,
"%s: interrupt occurred when module was still "
"processing a buffer.\n", stat->subdev.name);
ret = STAT_NO_BUF;
goto out;
} else {
/*
* Interrupt handler was called from streamoff when
* the module wasn't busy anymore to ensure it is being
* disabled after process last buffer. If such buffer
* processing has already started, no need to do
* anything else.
*/
spin_unlock_irqrestore(&stat->isp->stat_lock, irqflags);
return;
}
}
spin_unlock_irqrestore(&stat->isp->stat_lock, irqflags);
/* If it's busy we can't process this buffer anymore */
if (!omap3isp_stat_pcr_busy(stat)) {
if (!from_dma && stat->ops->buf_process)
/* Module still need to copy data to buffer. */
ret = stat->ops->buf_process(stat);
if (ret == STAT_BUF_WAITING_DMA)
/* Buffer is not ready yet */
return;
spin_lock_irqsave(&stat->isp->stat_lock, irqflags);
/*
* Histogram needs to read its internal memory to clear it
* before be disabled. For that reason, common statistic layer
* can return only after call stat's buf_process() operator.
*/
if (stat->state == ISPSTAT_DISABLING) {
stat->state = ISPSTAT_DISABLED;
spin_unlock_irqrestore(&stat->isp->stat_lock, irqflags);
stat->buf_processing = 0;
return;
}
pipe = to_isp_pipeline(&stat->subdev.entity);
stat->frame_number = atomic_read(&pipe->frame_number);
/*
* Before this point, 'ret' stores the buffer's status if it's
* ready to be processed. Afterwards, it holds the status if
* it was processed successfully.
*/
ret = isp_stat_buf_process(stat, ret);
if (likely(!stat->sbl_ovl_recover)) {
stat->ops->setup_regs(stat, stat->priv);
} else {
/*
* Using recover config to increase the chance to have
* a good buffer processing and make the H3A module to
* go back to a valid state.
*/
stat->update = 1;
stat->ops->setup_regs(stat, stat->recover_priv);
stat->sbl_ovl_recover = 0;
/*
* Set 'update' in case of the module needs to use
* regular configuration after next buffer.
*/
stat->update = 1;
}
isp_stat_buf_insert_magic(stat, stat->active_buf);
/*
* Hack: H3A modules may access invalid memory address or send
* corrupted data to userspace if more than 1 SBL overflow
* happens in a row without re-writing its buffer's start memory
* address in the meantime. Such situation is avoided if the
* module is not immediately re-enabled when the ISR misses the
* timing to process the buffer and to setup the registers.
* Because of that, pcr_enable(1) was moved to inside this 'if'
* block. But the next interruption will still happen as during
* pcr_enable(0) the module was busy.
*/
isp_stat_pcr_enable(stat, 1);
spin_unlock_irqrestore(&stat->isp->stat_lock, irqflags);
} else {
/*
* If a SBL overflow occurs and the H3A driver misses the timing
* to process the buffer, stat->buf_err is set and won't be
* cleared now. So the next buffer will be correctly ignored.
* It's necessary due to a hw issue which makes the next H3A
* buffer to start from the memory address where the previous
* one stopped, instead of start where it was configured to.
* Do not "stat->buf_err = 0" here.
*/
if (stat->ops->buf_process)
/*
* Driver may need to erase current data prior to
* process a new buffer. If it misses the timing, the
* next buffer might be wrong. So should be ignored.
* It happens only for Histogram.
*/
atomic_set(&stat->buf_err, 1);
ret = STAT_NO_BUF;
dev_dbg(stat->isp->dev, "%s: cannot process buffer, "
"device is busy.\n", stat->subdev.name);
}
out:
stat->buf_processing = 0;
isp_stat_queue_event(stat, ret != STAT_BUF_DONE);
}
void omap3isp_stat_isr(struct ispstat *stat)
{
__stat_isr(stat, 0);
}
void omap3isp_stat_dma_isr(struct ispstat *stat)
{
__stat_isr(stat, 1);
}
static int isp_stat_init_entities(struct ispstat *stat, const char *name,
const struct v4l2_subdev_ops *sd_ops)
{
struct v4l2_subdev *subdev = &stat->subdev;
struct media_entity *me = &subdev->entity;
v4l2_subdev_init(subdev, sd_ops);
snprintf(subdev->name, V4L2_SUBDEV_NAME_SIZE, "OMAP3 ISP %s", name);
subdev->grp_id = 1 << 16; /* group ID for isp subdevs */
subdev->flags |= V4L2_SUBDEV_FL_HAS_EVENTS | V4L2_SUBDEV_FL_HAS_DEVNODE;
subdev->nevents = STAT_NEVENTS;
v4l2_set_subdevdata(subdev, stat);
stat->pad.flags = MEDIA_PAD_FL_SINK;
me->ops = NULL;
return media_entity_init(me, 1, &stat->pad, 0);
}
int omap3isp_stat_subscribe_event(struct v4l2_subdev *subdev,
struct v4l2_fh *fh,
struct v4l2_event_subscription *sub)
{
struct ispstat *stat = v4l2_get_subdevdata(subdev);
if (sub->type != stat->event_type)
return -EINVAL;
return v4l2_event_subscribe(fh, sub);
}
int omap3isp_stat_unsubscribe_event(struct v4l2_subdev *subdev,
struct v4l2_fh *fh,
struct v4l2_event_subscription *sub)
{
return v4l2_event_unsubscribe(fh, sub);
}
void omap3isp_stat_unregister_entities(struct ispstat *stat)
{
media_entity_cleanup(&stat->subdev.entity);
v4l2_device_unregister_subdev(&stat->subdev);
}
int omap3isp_stat_register_entities(struct ispstat *stat,
struct v4l2_device *vdev)
{
return v4l2_device_register_subdev(vdev, &stat->subdev);
}
int omap3isp_stat_init(struct ispstat *stat, const char *name,
const struct v4l2_subdev_ops *sd_ops)
{
stat->buf = kcalloc(STAT_MAX_BUFS, sizeof(*stat->buf), GFP_KERNEL);
if (!stat->buf)
return -ENOMEM;
isp_stat_buf_clear(stat);
mutex_init(&stat->ioctl_lock);
atomic_set(&stat->buf_err, 0);
return isp_stat_init_entities(stat, name, sd_ops);
}
void omap3isp_stat_free(struct ispstat *stat)
{
isp_stat_bufs_free(stat);
kfree(stat->buf);
}
| gpl-2.0 |
xInterlopeRx/android_kernel_samsung_msm8930-common | drivers/firmware/efivars.c | 3303 | 31961 | /*
* EFI Variables - efivars.c
*
* Copyright (C) 2001,2003,2004 Dell <Matt_Domsch@dell.com>
* Copyright (C) 2004 Intel Corporation <matthew.e.tolentino@intel.com>
*
* This code takes all variables accessible from EFI runtime and
* exports them via sysfs
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
* Changelog:
*
* 17 May 2004 - Matt Domsch <Matt_Domsch@dell.com>
* remove check for efi_enabled in exit
* add MODULE_VERSION
*
* 26 Apr 2004 - Matt Domsch <Matt_Domsch@dell.com>
* minor bug fixes
*
* 21 Apr 2004 - Matt Tolentino <matthew.e.tolentino@intel.com)
* converted driver to export variable information via sysfs
* and moved to drivers/firmware directory
* bumped revision number to v0.07 to reflect conversion & move
*
* 10 Dec 2002 - Matt Domsch <Matt_Domsch@dell.com>
* fix locking per Peter Chubb's findings
*
* 25 Mar 2002 - Matt Domsch <Matt_Domsch@dell.com>
* move uuid_unparse() to include/asm-ia64/efi.h:efi_guid_unparse()
*
* 12 Feb 2002 - Matt Domsch <Matt_Domsch@dell.com>
* use list_for_each_safe when deleting vars.
* remove ifdef CONFIG_SMP around include <linux/smp.h>
* v0.04 release to linux-ia64@linuxia64.org
*
* 20 April 2001 - Matt Domsch <Matt_Domsch@dell.com>
* Moved vars from /proc/efi to /proc/efi/vars, and made
* efi.c own the /proc/efi directory.
* v0.03 release to linux-ia64@linuxia64.org
*
* 26 March 2001 - Matt Domsch <Matt_Domsch@dell.com>
* At the request of Stephane, moved ownership of /proc/efi
* to efi.c, and now efivars lives under /proc/efi/vars.
*
* 12 March 2001 - Matt Domsch <Matt_Domsch@dell.com>
* Feedback received from Stephane Eranian incorporated.
* efivar_write() checks copy_from_user() return value.
* efivar_read/write() returns proper errno.
* v0.02 release to linux-ia64@linuxia64.org
*
* 26 February 2001 - Matt Domsch <Matt_Domsch@dell.com>
* v0.01 release to linux-ia64@linuxia64.org
*/
#include <linux/capability.h>
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/string.h>
#include <linux/smp.h>
#include <linux/efi.h>
#include <linux/sysfs.h>
#include <linux/kobject.h>
#include <linux/device.h>
#include <linux/slab.h>
#include <linux/pstore.h>
#include <asm/uaccess.h>
#define EFIVARS_VERSION "0.08"
#define EFIVARS_DATE "2004-May-17"
MODULE_AUTHOR("Matt Domsch <Matt_Domsch@Dell.com>");
MODULE_DESCRIPTION("sysfs interface to EFI Variables");
MODULE_LICENSE("GPL");
MODULE_VERSION(EFIVARS_VERSION);
#define DUMP_NAME_LEN 52
/*
* The maximum size of VariableName + Data = 1024
* Therefore, it's reasonable to save that much
* space in each part of the structure,
* and we use a page for reading/writing.
*/
struct efi_variable {
efi_char16_t VariableName[1024/sizeof(efi_char16_t)];
efi_guid_t VendorGuid;
unsigned long DataSize;
__u8 Data[1024];
efi_status_t Status;
__u32 Attributes;
} __attribute__((packed));
struct efivar_entry {
struct efivars *efivars;
struct efi_variable var;
struct list_head list;
struct kobject kobj;
};
struct efivar_attribute {
struct attribute attr;
ssize_t (*show) (struct efivar_entry *entry, char *buf);
ssize_t (*store)(struct efivar_entry *entry, const char *buf, size_t count);
};
#define PSTORE_EFI_ATTRIBUTES \
(EFI_VARIABLE_NON_VOLATILE | \
EFI_VARIABLE_BOOTSERVICE_ACCESS | \
EFI_VARIABLE_RUNTIME_ACCESS)
#define EFIVAR_ATTR(_name, _mode, _show, _store) \
struct efivar_attribute efivar_attr_##_name = { \
.attr = {.name = __stringify(_name), .mode = _mode}, \
.show = _show, \
.store = _store, \
};
#define to_efivar_attr(_attr) container_of(_attr, struct efivar_attribute, attr)
#define to_efivar_entry(obj) container_of(obj, struct efivar_entry, kobj)
/*
* Prototype for sysfs creation function
*/
static int
efivar_create_sysfs_entry(struct efivars *efivars,
unsigned long variable_name_size,
efi_char16_t *variable_name,
efi_guid_t *vendor_guid);
/* Return the number of unicode characters in data */
static unsigned long
utf16_strnlen(efi_char16_t *s, size_t maxlength)
{
unsigned long length = 0;
while (*s++ != 0 && length < maxlength)
length++;
return length;
}
static inline unsigned long
utf16_strlen(efi_char16_t *s)
{
return utf16_strnlen(s, ~0UL);
}
/*
* Return the number of bytes is the length of this string
* Note: this is NOT the same as the number of unicode characters
*/
static inline unsigned long
utf16_strsize(efi_char16_t *data, unsigned long maxlength)
{
return utf16_strnlen(data, maxlength/sizeof(efi_char16_t)) * sizeof(efi_char16_t);
}
static inline int
utf16_strncmp(const efi_char16_t *a, const efi_char16_t *b, size_t len)
{
while (1) {
if (len == 0)
return 0;
if (*a < *b)
return -1;
if (*a > *b)
return 1;
if (*a == 0) /* implies *b == 0 */
return 0;
a++;
b++;
len--;
}
}
static bool
validate_device_path(struct efi_variable *var, int match, u8 *buffer,
unsigned long len)
{
struct efi_generic_dev_path *node;
int offset = 0;
node = (struct efi_generic_dev_path *)buffer;
if (len < sizeof(*node))
return false;
while (offset <= len - sizeof(*node) &&
node->length >= sizeof(*node) &&
node->length <= len - offset) {
offset += node->length;
if ((node->type == EFI_DEV_END_PATH ||
node->type == EFI_DEV_END_PATH2) &&
node->sub_type == EFI_DEV_END_ENTIRE)
return true;
node = (struct efi_generic_dev_path *)(buffer + offset);
}
/*
* If we're here then either node->length pointed past the end
* of the buffer or we reached the end of the buffer without
* finding a device path end node.
*/
return false;
}
static bool
validate_boot_order(struct efi_variable *var, int match, u8 *buffer,
unsigned long len)
{
/* An array of 16-bit integers */
if ((len % 2) != 0)
return false;
return true;
}
static bool
validate_load_option(struct efi_variable *var, int match, u8 *buffer,
unsigned long len)
{
u16 filepathlength;
int i, desclength = 0, namelen;
namelen = utf16_strnlen(var->VariableName, sizeof(var->VariableName));
/* Either "Boot" or "Driver" followed by four digits of hex */
for (i = match; i < match+4; i++) {
if (var->VariableName[i] > 127 ||
hex_to_bin(var->VariableName[i] & 0xff) < 0)
return true;
}
/* Reject it if there's 4 digits of hex and then further content */
if (namelen > match + 4)
return false;
/* A valid entry must be at least 8 bytes */
if (len < 8)
return false;
filepathlength = buffer[4] | buffer[5] << 8;
/*
* There's no stored length for the description, so it has to be
* found by hand
*/
desclength = utf16_strsize((efi_char16_t *)(buffer + 6), len - 6) + 2;
/* Each boot entry must have a descriptor */
if (!desclength)
return false;
/*
* If the sum of the length of the description, the claimed filepath
* length and the original header are greater than the length of the
* variable, it's malformed
*/
if ((desclength + filepathlength + 6) > len)
return false;
/*
* And, finally, check the filepath
*/
return validate_device_path(var, match, buffer + desclength + 6,
filepathlength);
}
static bool
validate_uint16(struct efi_variable *var, int match, u8 *buffer,
unsigned long len)
{
/* A single 16-bit integer */
if (len != 2)
return false;
return true;
}
static bool
validate_ascii_string(struct efi_variable *var, int match, u8 *buffer,
unsigned long len)
{
int i;
for (i = 0; i < len; i++) {
if (buffer[i] > 127)
return false;
if (buffer[i] == 0)
return true;
}
return false;
}
struct variable_validate {
char *name;
bool (*validate)(struct efi_variable *var, int match, u8 *data,
unsigned long len);
};
static const struct variable_validate variable_validate[] = {
{ "BootNext", validate_uint16 },
{ "BootOrder", validate_boot_order },
{ "DriverOrder", validate_boot_order },
{ "Boot*", validate_load_option },
{ "Driver*", validate_load_option },
{ "ConIn", validate_device_path },
{ "ConInDev", validate_device_path },
{ "ConOut", validate_device_path },
{ "ConOutDev", validate_device_path },
{ "ErrOut", validate_device_path },
{ "ErrOutDev", validate_device_path },
{ "Timeout", validate_uint16 },
{ "Lang", validate_ascii_string },
{ "PlatformLang", validate_ascii_string },
{ "", NULL },
};
static bool
validate_var(struct efi_variable *var, u8 *data, unsigned long len)
{
int i;
u16 *unicode_name = var->VariableName;
for (i = 0; variable_validate[i].validate != NULL; i++) {
const char *name = variable_validate[i].name;
int match;
for (match = 0; ; match++) {
char c = name[match];
u16 u = unicode_name[match];
/* All special variables are plain ascii */
if (u > 127)
return true;
/* Wildcard in the matching name means we've matched */
if (c == '*')
return variable_validate[i].validate(var,
match, data, len);
/* Case sensitive match */
if (c != u)
break;
/* Reached the end of the string while matching */
if (!c)
return variable_validate[i].validate(var,
match, data, len);
}
}
return true;
}
static efi_status_t
get_var_data_locked(struct efivars *efivars, struct efi_variable *var)
{
efi_status_t status;
var->DataSize = 1024;
status = efivars->ops->get_variable(var->VariableName,
&var->VendorGuid,
&var->Attributes,
&var->DataSize,
var->Data);
return status;
}
static efi_status_t
get_var_data(struct efivars *efivars, struct efi_variable *var)
{
efi_status_t status;
spin_lock(&efivars->lock);
status = get_var_data_locked(efivars, var);
spin_unlock(&efivars->lock);
if (status != EFI_SUCCESS) {
printk(KERN_WARNING "efivars: get_variable() failed 0x%lx!\n",
status);
}
return status;
}
static ssize_t
efivar_guid_read(struct efivar_entry *entry, char *buf)
{
struct efi_variable *var = &entry->var;
char *str = buf;
if (!entry || !buf)
return 0;
efi_guid_unparse(&var->VendorGuid, str);
str += strlen(str);
str += sprintf(str, "\n");
return str - buf;
}
static ssize_t
efivar_attr_read(struct efivar_entry *entry, char *buf)
{
struct efi_variable *var = &entry->var;
char *str = buf;
efi_status_t status;
if (!entry || !buf)
return -EINVAL;
status = get_var_data(entry->efivars, var);
if (status != EFI_SUCCESS)
return -EIO;
if (var->Attributes & 0x1)
str += sprintf(str, "EFI_VARIABLE_NON_VOLATILE\n");
if (var->Attributes & 0x2)
str += sprintf(str, "EFI_VARIABLE_BOOTSERVICE_ACCESS\n");
if (var->Attributes & 0x4)
str += sprintf(str, "EFI_VARIABLE_RUNTIME_ACCESS\n");
return str - buf;
}
static ssize_t
efivar_size_read(struct efivar_entry *entry, char *buf)
{
struct efi_variable *var = &entry->var;
char *str = buf;
efi_status_t status;
if (!entry || !buf)
return -EINVAL;
status = get_var_data(entry->efivars, var);
if (status != EFI_SUCCESS)
return -EIO;
str += sprintf(str, "0x%lx\n", var->DataSize);
return str - buf;
}
static ssize_t
efivar_data_read(struct efivar_entry *entry, char *buf)
{
struct efi_variable *var = &entry->var;
efi_status_t status;
if (!entry || !buf)
return -EINVAL;
status = get_var_data(entry->efivars, var);
if (status != EFI_SUCCESS)
return -EIO;
memcpy(buf, var->Data, var->DataSize);
return var->DataSize;
}
/*
* We allow each variable to be edited via rewriting the
* entire efi variable structure.
*/
static ssize_t
efivar_store_raw(struct efivar_entry *entry, const char *buf, size_t count)
{
struct efi_variable *new_var, *var = &entry->var;
struct efivars *efivars = entry->efivars;
efi_status_t status = EFI_NOT_FOUND;
if (count != sizeof(struct efi_variable))
return -EINVAL;
new_var = (struct efi_variable *)buf;
/*
* If only updating the variable data, then the name
* and guid should remain the same
*/
if (memcmp(new_var->VariableName, var->VariableName, sizeof(var->VariableName)) ||
efi_guidcmp(new_var->VendorGuid, var->VendorGuid)) {
printk(KERN_ERR "efivars: Cannot edit the wrong variable!\n");
return -EINVAL;
}
if ((new_var->DataSize <= 0) || (new_var->Attributes == 0)){
printk(KERN_ERR "efivars: DataSize & Attributes must be valid!\n");
return -EINVAL;
}
if ((new_var->Attributes & ~EFI_VARIABLE_MASK) != 0 ||
validate_var(new_var, new_var->Data, new_var->DataSize) == false) {
printk(KERN_ERR "efivars: Malformed variable content\n");
return -EINVAL;
}
spin_lock(&efivars->lock);
status = efivars->ops->set_variable(new_var->VariableName,
&new_var->VendorGuid,
new_var->Attributes,
new_var->DataSize,
new_var->Data);
spin_unlock(&efivars->lock);
if (status != EFI_SUCCESS) {
printk(KERN_WARNING "efivars: set_variable() failed: status=%lx\n",
status);
return -EIO;
}
memcpy(&entry->var, new_var, count);
return count;
}
static ssize_t
efivar_show_raw(struct efivar_entry *entry, char *buf)
{
struct efi_variable *var = &entry->var;
efi_status_t status;
if (!entry || !buf)
return 0;
status = get_var_data(entry->efivars, var);
if (status != EFI_SUCCESS)
return -EIO;
memcpy(buf, var, sizeof(*var));
return sizeof(*var);
}
/*
* Generic read/write functions that call the specific functions of
* the attributes...
*/
static ssize_t efivar_attr_show(struct kobject *kobj, struct attribute *attr,
char *buf)
{
struct efivar_entry *var = to_efivar_entry(kobj);
struct efivar_attribute *efivar_attr = to_efivar_attr(attr);
ssize_t ret = -EIO;
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
if (efivar_attr->show) {
ret = efivar_attr->show(var, buf);
}
return ret;
}
static ssize_t efivar_attr_store(struct kobject *kobj, struct attribute *attr,
const char *buf, size_t count)
{
struct efivar_entry *var = to_efivar_entry(kobj);
struct efivar_attribute *efivar_attr = to_efivar_attr(attr);
ssize_t ret = -EIO;
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
if (efivar_attr->store)
ret = efivar_attr->store(var, buf, count);
return ret;
}
static const struct sysfs_ops efivar_attr_ops = {
.show = efivar_attr_show,
.store = efivar_attr_store,
};
static void efivar_release(struct kobject *kobj)
{
struct efivar_entry *var = container_of(kobj, struct efivar_entry, kobj);
kfree(var);
}
static EFIVAR_ATTR(guid, 0400, efivar_guid_read, NULL);
static EFIVAR_ATTR(attributes, 0400, efivar_attr_read, NULL);
static EFIVAR_ATTR(size, 0400, efivar_size_read, NULL);
static EFIVAR_ATTR(data, 0400, efivar_data_read, NULL);
static EFIVAR_ATTR(raw_var, 0600, efivar_show_raw, efivar_store_raw);
static struct attribute *def_attrs[] = {
&efivar_attr_guid.attr,
&efivar_attr_size.attr,
&efivar_attr_attributes.attr,
&efivar_attr_data.attr,
&efivar_attr_raw_var.attr,
NULL,
};
static struct kobj_type efivar_ktype = {
.release = efivar_release,
.sysfs_ops = &efivar_attr_ops,
.default_attrs = def_attrs,
};
static struct pstore_info efi_pstore_info;
static inline void
efivar_unregister(struct efivar_entry *var)
{
kobject_put(&var->kobj);
}
#ifdef CONFIG_PSTORE
static int efi_pstore_open(struct pstore_info *psi)
{
struct efivars *efivars = psi->data;
spin_lock(&efivars->lock);
efivars->walk_entry = list_first_entry(&efivars->list,
struct efivar_entry, list);
return 0;
}
static int efi_pstore_close(struct pstore_info *psi)
{
struct efivars *efivars = psi->data;
spin_unlock(&efivars->lock);
return 0;
}
static ssize_t efi_pstore_read(u64 *id, enum pstore_type_id *type,
struct timespec *timespec,
char **buf, struct pstore_info *psi)
{
efi_guid_t vendor = LINUX_EFI_CRASH_GUID;
struct efivars *efivars = psi->data;
char name[DUMP_NAME_LEN];
int i;
unsigned int part, size;
unsigned long time;
while (&efivars->walk_entry->list != &efivars->list) {
if (!efi_guidcmp(efivars->walk_entry->var.VendorGuid,
vendor)) {
for (i = 0; i < DUMP_NAME_LEN; i++) {
name[i] = efivars->walk_entry->var.VariableName[i];
}
if (sscanf(name, "dump-type%u-%u-%lu", type, &part, &time) == 3) {
*id = part;
timespec->tv_sec = time;
timespec->tv_nsec = 0;
get_var_data_locked(efivars, &efivars->walk_entry->var);
size = efivars->walk_entry->var.DataSize;
*buf = kmalloc(size, GFP_KERNEL);
if (*buf == NULL)
return -ENOMEM;
memcpy(*buf, efivars->walk_entry->var.Data,
size);
efivars->walk_entry = list_entry(efivars->walk_entry->list.next,
struct efivar_entry, list);
return size;
}
}
efivars->walk_entry = list_entry(efivars->walk_entry->list.next,
struct efivar_entry, list);
}
return 0;
}
static int efi_pstore_write(enum pstore_type_id type,
enum kmsg_dump_reason reason, u64 *id,
unsigned int part, size_t size, struct pstore_info *psi)
{
char name[DUMP_NAME_LEN];
char stub_name[DUMP_NAME_LEN];
efi_char16_t efi_name[DUMP_NAME_LEN];
efi_guid_t vendor = LINUX_EFI_CRASH_GUID;
struct efivars *efivars = psi->data;
struct efivar_entry *entry, *found = NULL;
int i, ret = 0;
sprintf(stub_name, "dump-type%u-%u-", type, part);
sprintf(name, "%s%lu", stub_name, get_seconds());
spin_lock(&efivars->lock);
for (i = 0; i < DUMP_NAME_LEN; i++)
efi_name[i] = stub_name[i];
/*
* Clean up any entries with the same name
*/
list_for_each_entry(entry, &efivars->list, list) {
get_var_data_locked(efivars, &entry->var);
if (efi_guidcmp(entry->var.VendorGuid, vendor))
continue;
if (utf16_strncmp(entry->var.VariableName, efi_name,
utf16_strlen(efi_name)))
continue;
/* Needs to be a prefix */
if (entry->var.VariableName[utf16_strlen(efi_name)] == 0)
continue;
/* found */
found = entry;
efivars->ops->set_variable(entry->var.VariableName,
&entry->var.VendorGuid,
PSTORE_EFI_ATTRIBUTES,
0, NULL);
}
if (found)
list_del(&found->list);
for (i = 0; i < DUMP_NAME_LEN; i++)
efi_name[i] = name[i];
efivars->ops->set_variable(efi_name, &vendor, PSTORE_EFI_ATTRIBUTES,
size, psi->buf);
spin_unlock(&efivars->lock);
if (found)
efivar_unregister(found);
if (size)
ret = efivar_create_sysfs_entry(efivars,
utf16_strsize(efi_name,
DUMP_NAME_LEN * 2),
efi_name, &vendor);
*id = part;
return ret;
};
static int efi_pstore_erase(enum pstore_type_id type, u64 id,
struct pstore_info *psi)
{
efi_pstore_write(type, 0, &id, (unsigned int)id, 0, psi);
return 0;
}
#else
static int efi_pstore_open(struct pstore_info *psi)
{
return 0;
}
static int efi_pstore_close(struct pstore_info *psi)
{
return 0;
}
static ssize_t efi_pstore_read(u64 *id, enum pstore_type_id *type,
struct timespec *timespec,
char **buf, struct pstore_info *psi)
{
return -1;
}
static int efi_pstore_write(enum pstore_type_id type,
enum kmsg_dump_reason reason, u64 *id,
unsigned int part, size_t size, struct pstore_info *psi)
{
return 0;
}
static int efi_pstore_erase(enum pstore_type_id type, u64 id,
struct pstore_info *psi)
{
return 0;
}
#endif
static struct pstore_info efi_pstore_info = {
.owner = THIS_MODULE,
.name = "efi",
.open = efi_pstore_open,
.close = efi_pstore_close,
.read = efi_pstore_read,
.write = efi_pstore_write,
.erase = efi_pstore_erase,
};
static ssize_t efivar_create(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t pos, size_t count)
{
struct efi_variable *new_var = (struct efi_variable *)buf;
struct efivars *efivars = bin_attr->private;
struct efivar_entry *search_efivar, *n;
unsigned long strsize1, strsize2;
efi_status_t status = EFI_NOT_FOUND;
int found = 0;
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
if ((new_var->Attributes & ~EFI_VARIABLE_MASK) != 0 ||
validate_var(new_var, new_var->Data, new_var->DataSize) == false) {
printk(KERN_ERR "efivars: Malformed variable content\n");
return -EINVAL;
}
spin_lock(&efivars->lock);
/*
* Does this variable already exist?
*/
list_for_each_entry_safe(search_efivar, n, &efivars->list, list) {
strsize1 = utf16_strsize(search_efivar->var.VariableName, 1024);
strsize2 = utf16_strsize(new_var->VariableName, 1024);
if (strsize1 == strsize2 &&
!memcmp(&(search_efivar->var.VariableName),
new_var->VariableName, strsize1) &&
!efi_guidcmp(search_efivar->var.VendorGuid,
new_var->VendorGuid)) {
found = 1;
break;
}
}
if (found) {
spin_unlock(&efivars->lock);
return -EINVAL;
}
/* now *really* create the variable via EFI */
status = efivars->ops->set_variable(new_var->VariableName,
&new_var->VendorGuid,
new_var->Attributes,
new_var->DataSize,
new_var->Data);
if (status != EFI_SUCCESS) {
printk(KERN_WARNING "efivars: set_variable() failed: status=%lx\n",
status);
spin_unlock(&efivars->lock);
return -EIO;
}
spin_unlock(&efivars->lock);
/* Create the entry in sysfs. Locking is not required here */
status = efivar_create_sysfs_entry(efivars,
utf16_strsize(new_var->VariableName,
1024),
new_var->VariableName,
&new_var->VendorGuid);
if (status) {
printk(KERN_WARNING "efivars: variable created, but sysfs entry wasn't.\n");
}
return count;
}
static ssize_t efivar_delete(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t pos, size_t count)
{
struct efi_variable *del_var = (struct efi_variable *)buf;
struct efivars *efivars = bin_attr->private;
struct efivar_entry *search_efivar, *n;
unsigned long strsize1, strsize2;
efi_status_t status = EFI_NOT_FOUND;
int found = 0;
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
spin_lock(&efivars->lock);
/*
* Does this variable already exist?
*/
list_for_each_entry_safe(search_efivar, n, &efivars->list, list) {
strsize1 = utf16_strsize(search_efivar->var.VariableName, 1024);
strsize2 = utf16_strsize(del_var->VariableName, 1024);
if (strsize1 == strsize2 &&
!memcmp(&(search_efivar->var.VariableName),
del_var->VariableName, strsize1) &&
!efi_guidcmp(search_efivar->var.VendorGuid,
del_var->VendorGuid)) {
found = 1;
break;
}
}
if (!found) {
spin_unlock(&efivars->lock);
return -EINVAL;
}
/* force the Attributes/DataSize to 0 to ensure deletion */
del_var->Attributes = 0;
del_var->DataSize = 0;
status = efivars->ops->set_variable(del_var->VariableName,
&del_var->VendorGuid,
del_var->Attributes,
del_var->DataSize,
del_var->Data);
if (status != EFI_SUCCESS) {
printk(KERN_WARNING "efivars: set_variable() failed: status=%lx\n",
status);
spin_unlock(&efivars->lock);
return -EIO;
}
list_del(&search_efivar->list);
/* We need to release this lock before unregistering. */
spin_unlock(&efivars->lock);
efivar_unregister(search_efivar);
/* It's dead Jim.... */
return count;
}
/*
* Let's not leave out systab information that snuck into
* the efivars driver
*/
static ssize_t systab_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
char *str = buf;
if (!kobj || !buf)
return -EINVAL;
if (efi.mps != EFI_INVALID_TABLE_ADDR)
str += sprintf(str, "MPS=0x%lx\n", efi.mps);
if (efi.acpi20 != EFI_INVALID_TABLE_ADDR)
str += sprintf(str, "ACPI20=0x%lx\n", efi.acpi20);
if (efi.acpi != EFI_INVALID_TABLE_ADDR)
str += sprintf(str, "ACPI=0x%lx\n", efi.acpi);
if (efi.smbios != EFI_INVALID_TABLE_ADDR)
str += sprintf(str, "SMBIOS=0x%lx\n", efi.smbios);
if (efi.hcdp != EFI_INVALID_TABLE_ADDR)
str += sprintf(str, "HCDP=0x%lx\n", efi.hcdp);
if (efi.boot_info != EFI_INVALID_TABLE_ADDR)
str += sprintf(str, "BOOTINFO=0x%lx\n", efi.boot_info);
if (efi.uga != EFI_INVALID_TABLE_ADDR)
str += sprintf(str, "UGA=0x%lx\n", efi.uga);
return str - buf;
}
static struct kobj_attribute efi_attr_systab =
__ATTR(systab, 0400, systab_show, NULL);
static struct attribute *efi_subsys_attrs[] = {
&efi_attr_systab.attr,
NULL, /* maybe more in the future? */
};
static struct attribute_group efi_subsys_attr_group = {
.attrs = efi_subsys_attrs,
};
static struct kobject *efi_kobj;
/*
* efivar_create_sysfs_entry()
* Requires:
* variable_name_size = number of bytes required to hold
* variable_name (not counting the NULL
* character at the end.
* efivars->lock is not held on entry or exit.
* Returns 1 on failure, 0 on success
*/
static int
efivar_create_sysfs_entry(struct efivars *efivars,
unsigned long variable_name_size,
efi_char16_t *variable_name,
efi_guid_t *vendor_guid)
{
int i, short_name_size = variable_name_size / sizeof(efi_char16_t) + 38;
char *short_name;
struct efivar_entry *new_efivar;
short_name = kzalloc(short_name_size + 1, GFP_KERNEL);
new_efivar = kzalloc(sizeof(struct efivar_entry), GFP_KERNEL);
if (!short_name || !new_efivar) {
kfree(short_name);
kfree(new_efivar);
return 1;
}
new_efivar->efivars = efivars;
memcpy(new_efivar->var.VariableName, variable_name,
variable_name_size);
memcpy(&(new_efivar->var.VendorGuid), vendor_guid, sizeof(efi_guid_t));
/* Convert Unicode to normal chars (assume top bits are 0),
ala UTF-8 */
for (i=0; i < (int)(variable_name_size / sizeof(efi_char16_t)); i++) {
short_name[i] = variable_name[i] & 0xFF;
}
/* This is ugly, but necessary to separate one vendor's
private variables from another's. */
*(short_name + strlen(short_name)) = '-';
efi_guid_unparse(vendor_guid, short_name + strlen(short_name));
new_efivar->kobj.kset = efivars->kset;
i = kobject_init_and_add(&new_efivar->kobj, &efivar_ktype, NULL,
"%s", short_name);
if (i) {
kfree(short_name);
kfree(new_efivar);
return 1;
}
kobject_uevent(&new_efivar->kobj, KOBJ_ADD);
kfree(short_name);
short_name = NULL;
spin_lock(&efivars->lock);
list_add(&new_efivar->list, &efivars->list);
spin_unlock(&efivars->lock);
return 0;
}
static int
create_efivars_bin_attributes(struct efivars *efivars)
{
struct bin_attribute *attr;
int error;
/* new_var */
attr = kzalloc(sizeof(*attr), GFP_KERNEL);
if (!attr)
return -ENOMEM;
attr->attr.name = "new_var";
attr->attr.mode = 0200;
attr->write = efivar_create;
attr->private = efivars;
efivars->new_var = attr;
/* del_var */
attr = kzalloc(sizeof(*attr), GFP_KERNEL);
if (!attr) {
error = -ENOMEM;
goto out_free;
}
attr->attr.name = "del_var";
attr->attr.mode = 0200;
attr->write = efivar_delete;
attr->private = efivars;
efivars->del_var = attr;
sysfs_bin_attr_init(efivars->new_var);
sysfs_bin_attr_init(efivars->del_var);
/* Register */
error = sysfs_create_bin_file(&efivars->kset->kobj,
efivars->new_var);
if (error) {
printk(KERN_ERR "efivars: unable to create new_var sysfs file"
" due to error %d\n", error);
goto out_free;
}
error = sysfs_create_bin_file(&efivars->kset->kobj,
efivars->del_var);
if (error) {
printk(KERN_ERR "efivars: unable to create del_var sysfs file"
" due to error %d\n", error);
sysfs_remove_bin_file(&efivars->kset->kobj,
efivars->new_var);
goto out_free;
}
return 0;
out_free:
kfree(efivars->del_var);
efivars->del_var = NULL;
kfree(efivars->new_var);
efivars->new_var = NULL;
return error;
}
void unregister_efivars(struct efivars *efivars)
{
struct efivar_entry *entry, *n;
list_for_each_entry_safe(entry, n, &efivars->list, list) {
spin_lock(&efivars->lock);
list_del(&entry->list);
spin_unlock(&efivars->lock);
efivar_unregister(entry);
}
if (efivars->new_var)
sysfs_remove_bin_file(&efivars->kset->kobj, efivars->new_var);
if (efivars->del_var)
sysfs_remove_bin_file(&efivars->kset->kobj, efivars->del_var);
kfree(efivars->new_var);
kfree(efivars->del_var);
kset_unregister(efivars->kset);
}
EXPORT_SYMBOL_GPL(unregister_efivars);
int register_efivars(struct efivars *efivars,
const struct efivar_operations *ops,
struct kobject *parent_kobj)
{
efi_status_t status = EFI_NOT_FOUND;
efi_guid_t vendor_guid;
efi_char16_t *variable_name;
unsigned long variable_name_size = 1024;
int error = 0;
variable_name = kzalloc(variable_name_size, GFP_KERNEL);
if (!variable_name) {
printk(KERN_ERR "efivars: Memory allocation failed.\n");
return -ENOMEM;
}
spin_lock_init(&efivars->lock);
INIT_LIST_HEAD(&efivars->list);
efivars->ops = ops;
efivars->kset = kset_create_and_add("vars", NULL, parent_kobj);
if (!efivars->kset) {
printk(KERN_ERR "efivars: Subsystem registration failed.\n");
error = -ENOMEM;
goto out;
}
/*
* Per EFI spec, the maximum storage allocated for both
* the variable name and variable data is 1024 bytes.
*/
do {
variable_name_size = 1024;
status = ops->get_next_variable(&variable_name_size,
variable_name,
&vendor_guid);
switch (status) {
case EFI_SUCCESS:
efivar_create_sysfs_entry(efivars,
variable_name_size,
variable_name,
&vendor_guid);
break;
case EFI_NOT_FOUND:
break;
default:
printk(KERN_WARNING "efivars: get_next_variable: status=%lx\n",
status);
status = EFI_NOT_FOUND;
break;
}
} while (status != EFI_NOT_FOUND);
error = create_efivars_bin_attributes(efivars);
if (error)
unregister_efivars(efivars);
efivars->efi_pstore_info = efi_pstore_info;
efivars->efi_pstore_info.buf = kmalloc(4096, GFP_KERNEL);
if (efivars->efi_pstore_info.buf) {
efivars->efi_pstore_info.bufsize = 1024;
efivars->efi_pstore_info.data = efivars;
spin_lock_init(&efivars->efi_pstore_info.buf_lock);
pstore_register(&efivars->efi_pstore_info);
}
out:
kfree(variable_name);
return error;
}
EXPORT_SYMBOL_GPL(register_efivars);
static struct efivars __efivars;
static struct efivar_operations ops;
/*
* For now we register the efi subsystem with the firmware subsystem
* and the vars subsystem with the efi subsystem. In the future, it
* might make sense to split off the efi subsystem into its own
* driver, but for now only efivars will register with it, so just
* include it here.
*/
static int __init
efivars_init(void)
{
int error = 0;
printk(KERN_INFO "EFI Variables Facility v%s %s\n", EFIVARS_VERSION,
EFIVARS_DATE);
if (!efi_enabled)
return 0;
/* For now we'll register the efi directory at /sys/firmware/efi */
efi_kobj = kobject_create_and_add("efi", firmware_kobj);
if (!efi_kobj) {
printk(KERN_ERR "efivars: Firmware registration failed.\n");
return -ENOMEM;
}
ops.get_variable = efi.get_variable;
ops.set_variable = efi.set_variable;
ops.get_next_variable = efi.get_next_variable;
error = register_efivars(&__efivars, &ops, efi_kobj);
if (error)
goto err_put;
/* Don't forget the systab entry */
error = sysfs_create_group(efi_kobj, &efi_subsys_attr_group);
if (error) {
printk(KERN_ERR
"efivars: Sysfs attribute export failed with error %d.\n",
error);
goto err_unregister;
}
return 0;
err_unregister:
unregister_efivars(&__efivars);
err_put:
kobject_put(efi_kobj);
return error;
}
static void __exit
efivars_exit(void)
{
if (efi_enabled) {
unregister_efivars(&__efivars);
kobject_put(efi_kobj);
}
}
module_init(efivars_init);
module_exit(efivars_exit);
| gpl-2.0 |
OPTICM/android_kernel_amazon_otter-common | arch/powerpc/kernel/clock.c | 4583 | 1772 | /*
* Dummy clk implementations for powerpc.
* These need to be overridden in platform code.
*/
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/errno.h>
#include <linux/module.h>
#include <asm/clk_interface.h>
struct clk_interface clk_functions;
struct clk *clk_get(struct device *dev, const char *id)
{
if (clk_functions.clk_get)
return clk_functions.clk_get(dev, id);
return ERR_PTR(-ENOSYS);
}
EXPORT_SYMBOL(clk_get);
void clk_put(struct clk *clk)
{
if (clk_functions.clk_put)
clk_functions.clk_put(clk);
}
EXPORT_SYMBOL(clk_put);
int clk_enable(struct clk *clk)
{
if (clk_functions.clk_enable)
return clk_functions.clk_enable(clk);
return -ENOSYS;
}
EXPORT_SYMBOL(clk_enable);
void clk_disable(struct clk *clk)
{
if (clk_functions.clk_disable)
clk_functions.clk_disable(clk);
}
EXPORT_SYMBOL(clk_disable);
unsigned long clk_get_rate(struct clk *clk)
{
if (clk_functions.clk_get_rate)
return clk_functions.clk_get_rate(clk);
return 0;
}
EXPORT_SYMBOL(clk_get_rate);
long clk_round_rate(struct clk *clk, unsigned long rate)
{
if (clk_functions.clk_round_rate)
return clk_functions.clk_round_rate(clk, rate);
return -ENOSYS;
}
EXPORT_SYMBOL(clk_round_rate);
int clk_set_rate(struct clk *clk, unsigned long rate)
{
if (clk_functions.clk_set_rate)
return clk_functions.clk_set_rate(clk, rate);
return -ENOSYS;
}
EXPORT_SYMBOL(clk_set_rate);
struct clk *clk_get_parent(struct clk *clk)
{
if (clk_functions.clk_get_parent)
return clk_functions.clk_get_parent(clk);
return ERR_PTR(-ENOSYS);
}
EXPORT_SYMBOL(clk_get_parent);
int clk_set_parent(struct clk *clk, struct clk *parent)
{
if (clk_functions.clk_set_parent)
return clk_functions.clk_set_parent(clk, parent);
return -ENOSYS;
}
EXPORT_SYMBOL(clk_set_parent);
| gpl-2.0 |
kbc-developers/android_kernel_samsung_klte | net/mac80211/chan.c | 4839 | 3641 | /*
* mac80211 - channel management
*/
#include <linux/nl80211.h>
#include <net/cfg80211.h>
#include "ieee80211_i.h"
static enum ieee80211_chan_mode
__ieee80211_get_channel_mode(struct ieee80211_local *local,
struct ieee80211_sub_if_data *ignore)
{
struct ieee80211_sub_if_data *sdata;
lockdep_assert_held(&local->iflist_mtx);
list_for_each_entry(sdata, &local->interfaces, list) {
if (sdata == ignore)
continue;
if (!ieee80211_sdata_running(sdata))
continue;
switch (sdata->vif.type) {
case NL80211_IFTYPE_MONITOR:
continue;
case NL80211_IFTYPE_STATION:
if (!sdata->u.mgd.associated)
continue;
break;
case NL80211_IFTYPE_ADHOC:
if (!sdata->u.ibss.ssid_len)
continue;
if (!sdata->u.ibss.fixed_channel)
return CHAN_MODE_HOPPING;
break;
case NL80211_IFTYPE_AP_VLAN:
/* will also have _AP interface */
continue;
case NL80211_IFTYPE_AP:
if (!sdata->u.ap.beacon)
continue;
break;
default:
break;
}
return CHAN_MODE_FIXED;
}
return CHAN_MODE_UNDEFINED;
}
enum ieee80211_chan_mode
ieee80211_get_channel_mode(struct ieee80211_local *local,
struct ieee80211_sub_if_data *ignore)
{
enum ieee80211_chan_mode mode;
mutex_lock(&local->iflist_mtx);
mode = __ieee80211_get_channel_mode(local, ignore);
mutex_unlock(&local->iflist_mtx);
return mode;
}
bool ieee80211_set_channel_type(struct ieee80211_local *local,
struct ieee80211_sub_if_data *sdata,
enum nl80211_channel_type chantype)
{
struct ieee80211_sub_if_data *tmp;
enum nl80211_channel_type superchan = NL80211_CHAN_NO_HT;
bool result;
mutex_lock(&local->iflist_mtx);
list_for_each_entry(tmp, &local->interfaces, list) {
if (tmp == sdata)
continue;
if (!ieee80211_sdata_running(tmp))
continue;
switch (tmp->vif.bss_conf.channel_type) {
case NL80211_CHAN_NO_HT:
case NL80211_CHAN_HT20:
if (superchan > tmp->vif.bss_conf.channel_type)
break;
superchan = tmp->vif.bss_conf.channel_type;
break;
case NL80211_CHAN_HT40PLUS:
WARN_ON(superchan == NL80211_CHAN_HT40MINUS);
superchan = NL80211_CHAN_HT40PLUS;
break;
case NL80211_CHAN_HT40MINUS:
WARN_ON(superchan == NL80211_CHAN_HT40PLUS);
superchan = NL80211_CHAN_HT40MINUS;
break;
}
}
switch (superchan) {
case NL80211_CHAN_NO_HT:
case NL80211_CHAN_HT20:
/*
* allow any change that doesn't go to no-HT
* (if it already is no-HT no change is needed)
*/
if (chantype == NL80211_CHAN_NO_HT)
break;
superchan = chantype;
break;
case NL80211_CHAN_HT40PLUS:
case NL80211_CHAN_HT40MINUS:
/* allow smaller bandwidth and same */
if (chantype == NL80211_CHAN_NO_HT)
break;
if (chantype == NL80211_CHAN_HT20)
break;
if (superchan == chantype)
break;
result = false;
goto out;
}
local->_oper_channel_type = superchan;
if (sdata)
sdata->vif.bss_conf.channel_type = chantype;
result = true;
out:
mutex_unlock(&local->iflist_mtx);
return result;
}
/*
* ieee80211_get_tx_channel_type returns the channel type we should
* use for packet transmission, given the channel capability and
* whatever regulatory flags we have been given.
*/
enum nl80211_channel_type ieee80211_get_tx_channel_type(
struct ieee80211_local *local,
enum nl80211_channel_type channel_type)
{
switch (channel_type) {
case NL80211_CHAN_HT40PLUS:
if (local->hw.conf.channel->flags &
IEEE80211_CHAN_NO_HT40PLUS)
return NL80211_CHAN_HT20;
break;
case NL80211_CHAN_HT40MINUS:
if (local->hw.conf.channel->flags &
IEEE80211_CHAN_NO_HT40MINUS)
return NL80211_CHAN_HT20;
break;
default:
break;
}
return channel_type;
}
| gpl-2.0 |
Wonfee/android_kernel_asus_grouper | drivers/isdn/hisax/teles0.c | 4839 | 9243 | /* $Id: teles0.c,v 2.15.2.4 2004/01/13 23:48:39 keil Exp $
*
* low level stuff for Teles Memory IO isdn cards
*
* Author Karsten Keil
* based on the teles driver from Jan den Ouden
* Copyright by Karsten Keil <keil@isdn4linux.de>
*
* This software may be used and distributed according to the terms
* of the GNU General Public License, incorporated herein by reference.
*
* Thanks to Jan den Ouden
* Fritz Elfert
* Beat Doebeli
*
*/
#include <linux/init.h>
#include "hisax.h"
#include "isdnl1.h"
#include "isac.h"
#include "hscx.h"
static const char *teles0_revision = "$Revision: 2.15.2.4 $";
#define TELES_IOMEM_SIZE 0x400
#define byteout(addr,val) outb(val,addr)
#define bytein(addr) inb(addr)
static inline u_char
readisac(void __iomem *adr, u_char off)
{
return readb(adr + ((off & 1) ? 0x2ff : 0x100) + off);
}
static inline void
writeisac(void __iomem *adr, u_char off, u_char data)
{
writeb(data, adr + ((off & 1) ? 0x2ff : 0x100) + off); mb();
}
static inline u_char
readhscx(void __iomem *adr, int hscx, u_char off)
{
return readb(adr + (hscx ? 0x1c0 : 0x180) +
((off & 1) ? 0x1ff : 0) + off);
}
static inline void
writehscx(void __iomem *adr, int hscx, u_char off, u_char data)
{
writeb(data, adr + (hscx ? 0x1c0 : 0x180) +
((off & 1) ? 0x1ff : 0) + off); mb();
}
static inline void
read_fifo_isac(void __iomem *adr, u_char * data, int size)
{
register int i;
register u_char __iomem *ad = adr + 0x100;
for (i = 0; i < size; i++)
data[i] = readb(ad);
}
static inline void
write_fifo_isac(void __iomem *adr, u_char * data, int size)
{
register int i;
register u_char __iomem *ad = adr + 0x100;
for (i = 0; i < size; i++) {
writeb(data[i], ad); mb();
}
}
static inline void
read_fifo_hscx(void __iomem *adr, int hscx, u_char * data, int size)
{
register int i;
register u_char __iomem *ad = adr + (hscx ? 0x1c0 : 0x180);
for (i = 0; i < size; i++)
data[i] = readb(ad);
}
static inline void
write_fifo_hscx(void __iomem *adr, int hscx, u_char * data, int size)
{
int i;
register u_char __iomem *ad = adr + (hscx ? 0x1c0 : 0x180);
for (i = 0; i < size; i++) {
writeb(data[i], ad); mb();
}
}
/* Interface functions */
static u_char
ReadISAC(struct IsdnCardState *cs, u_char offset)
{
return (readisac(cs->hw.teles0.membase, offset));
}
static void
WriteISAC(struct IsdnCardState *cs, u_char offset, u_char value)
{
writeisac(cs->hw.teles0.membase, offset, value);
}
static void
ReadISACfifo(struct IsdnCardState *cs, u_char * data, int size)
{
read_fifo_isac(cs->hw.teles0.membase, data, size);
}
static void
WriteISACfifo(struct IsdnCardState *cs, u_char * data, int size)
{
write_fifo_isac(cs->hw.teles0.membase, data, size);
}
static u_char
ReadHSCX(struct IsdnCardState *cs, int hscx, u_char offset)
{
return (readhscx(cs->hw.teles0.membase, hscx, offset));
}
static void
WriteHSCX(struct IsdnCardState *cs, int hscx, u_char offset, u_char value)
{
writehscx(cs->hw.teles0.membase, hscx, offset, value);
}
/*
* fast interrupt HSCX stuff goes here
*/
#define READHSCX(cs, nr, reg) readhscx(cs->hw.teles0.membase, nr, reg)
#define WRITEHSCX(cs, nr, reg, data) writehscx(cs->hw.teles0.membase, nr, reg, data)
#define READHSCXFIFO(cs, nr, ptr, cnt) read_fifo_hscx(cs->hw.teles0.membase, nr, ptr, cnt)
#define WRITEHSCXFIFO(cs, nr, ptr, cnt) write_fifo_hscx(cs->hw.teles0.membase, nr, ptr, cnt)
#include "hscx_irq.c"
static irqreturn_t
teles0_interrupt(int intno, void *dev_id)
{
struct IsdnCardState *cs = dev_id;
u_char val;
u_long flags;
int count = 0;
spin_lock_irqsave(&cs->lock, flags);
val = readhscx(cs->hw.teles0.membase, 1, HSCX_ISTA);
Start_HSCX:
if (val)
hscx_int_main(cs, val);
val = readisac(cs->hw.teles0.membase, ISAC_ISTA);
Start_ISAC:
if (val)
isac_interrupt(cs, val);
count++;
val = readhscx(cs->hw.teles0.membase, 1, HSCX_ISTA);
if (val && count < 5) {
if (cs->debug & L1_DEB_HSCX)
debugl1(cs, "HSCX IntStat after IntRoutine");
goto Start_HSCX;
}
val = readisac(cs->hw.teles0.membase, ISAC_ISTA);
if (val && count < 5) {
if (cs->debug & L1_DEB_ISAC)
debugl1(cs, "ISAC IntStat after IntRoutine");
goto Start_ISAC;
}
writehscx(cs->hw.teles0.membase, 0, HSCX_MASK, 0xFF);
writehscx(cs->hw.teles0.membase, 1, HSCX_MASK, 0xFF);
writeisac(cs->hw.teles0.membase, ISAC_MASK, 0xFF);
writeisac(cs->hw.teles0.membase, ISAC_MASK, 0x0);
writehscx(cs->hw.teles0.membase, 0, HSCX_MASK, 0x0);
writehscx(cs->hw.teles0.membase, 1, HSCX_MASK, 0x0);
spin_unlock_irqrestore(&cs->lock, flags);
return IRQ_HANDLED;
}
static void
release_io_teles0(struct IsdnCardState *cs)
{
if (cs->hw.teles0.cfg_reg)
release_region(cs->hw.teles0.cfg_reg, 8);
iounmap(cs->hw.teles0.membase);
release_mem_region(cs->hw.teles0.phymem, TELES_IOMEM_SIZE);
}
static int
reset_teles0(struct IsdnCardState *cs)
{
u_char cfval;
if (cs->hw.teles0.cfg_reg) {
switch (cs->irq) {
case 2:
case 9:
cfval = 0x00;
break;
case 3:
cfval = 0x02;
break;
case 4:
cfval = 0x04;
break;
case 5:
cfval = 0x06;
break;
case 10:
cfval = 0x08;
break;
case 11:
cfval = 0x0A;
break;
case 12:
cfval = 0x0C;
break;
case 15:
cfval = 0x0E;
break;
default:
return(1);
}
cfval |= ((cs->hw.teles0.phymem >> 9) & 0xF0);
byteout(cs->hw.teles0.cfg_reg + 4, cfval);
HZDELAY(HZ / 10 + 1);
byteout(cs->hw.teles0.cfg_reg + 4, cfval | 1);
HZDELAY(HZ / 10 + 1);
}
writeb(0, cs->hw.teles0.membase + 0x80); mb();
HZDELAY(HZ / 5 + 1);
writeb(1, cs->hw.teles0.membase + 0x80); mb();
HZDELAY(HZ / 5 + 1);
return(0);
}
static int
Teles_card_msg(struct IsdnCardState *cs, int mt, void *arg)
{
u_long flags;
switch (mt) {
case CARD_RESET:
spin_lock_irqsave(&cs->lock, flags);
reset_teles0(cs);
spin_unlock_irqrestore(&cs->lock, flags);
return(0);
case CARD_RELEASE:
release_io_teles0(cs);
return(0);
case CARD_INIT:
spin_lock_irqsave(&cs->lock, flags);
inithscxisac(cs, 3);
spin_unlock_irqrestore(&cs->lock, flags);
return(0);
case CARD_TEST:
return(0);
}
return(0);
}
int __devinit
setup_teles0(struct IsdnCard *card)
{
u_char val;
struct IsdnCardState *cs = card->cs;
char tmp[64];
strcpy(tmp, teles0_revision);
printk(KERN_INFO "HiSax: Teles 8.0/16.0 driver Rev. %s\n", HiSax_getrev(tmp));
if ((cs->typ != ISDN_CTYPE_16_0) && (cs->typ != ISDN_CTYPE_8_0))
return (0);
if (cs->typ == ISDN_CTYPE_16_0)
cs->hw.teles0.cfg_reg = card->para[2];
else /* 8.0 */
cs->hw.teles0.cfg_reg = 0;
if (card->para[1] < 0x10000) {
card->para[1] <<= 4;
printk(KERN_INFO
"Teles0: membase configured DOSish, assuming 0x%lx\n",
(unsigned long) card->para[1]);
}
cs->irq = card->para[0];
if (cs->hw.teles0.cfg_reg) {
if (!request_region(cs->hw.teles0.cfg_reg, 8, "teles cfg")) {
printk(KERN_WARNING
"HiSax: %s config port %x-%x already in use\n",
CardType[card->typ],
cs->hw.teles0.cfg_reg,
cs->hw.teles0.cfg_reg + 8);
return (0);
}
}
if (cs->hw.teles0.cfg_reg) {
if ((val = bytein(cs->hw.teles0.cfg_reg + 0)) != 0x51) {
printk(KERN_WARNING "Teles0: 16.0 Byte at %x is %x\n",
cs->hw.teles0.cfg_reg + 0, val);
release_region(cs->hw.teles0.cfg_reg, 8);
return (0);
}
if ((val = bytein(cs->hw.teles0.cfg_reg + 1)) != 0x93) {
printk(KERN_WARNING "Teles0: 16.0 Byte at %x is %x\n",
cs->hw.teles0.cfg_reg + 1, val);
release_region(cs->hw.teles0.cfg_reg, 8);
return (0);
}
val = bytein(cs->hw.teles0.cfg_reg + 2); /* 0x1e=without AB
* 0x1f=with AB
* 0x1c 16.3 ???
*/
if (val != 0x1e && val != 0x1f) {
printk(KERN_WARNING "Teles0: 16.0 Byte at %x is %x\n",
cs->hw.teles0.cfg_reg + 2, val);
release_region(cs->hw.teles0.cfg_reg, 8);
return (0);
}
}
/* 16.0 and 8.0 designed for IOM1 */
test_and_set_bit(HW_IOM1, &cs->HW_Flags);
cs->hw.teles0.phymem = card->para[1];
if (!request_mem_region(cs->hw.teles0.phymem, TELES_IOMEM_SIZE, "teles iomem")) {
printk(KERN_WARNING
"HiSax: %s memory region %lx-%lx already in use\n",
CardType[card->typ],
cs->hw.teles0.phymem,
cs->hw.teles0.phymem + TELES_IOMEM_SIZE);
if (cs->hw.teles0.cfg_reg)
release_region(cs->hw.teles0.cfg_reg, 8);
return (0);
}
cs->hw.teles0.membase = ioremap(cs->hw.teles0.phymem, TELES_IOMEM_SIZE);
printk(KERN_INFO
"HiSax: %s config irq:%d mem:%p cfg:0x%X\n",
CardType[cs->typ], cs->irq,
cs->hw.teles0.membase, cs->hw.teles0.cfg_reg);
if (reset_teles0(cs)) {
printk(KERN_WARNING "Teles0: wrong IRQ\n");
release_io_teles0(cs);
return (0);
}
setup_isac(cs);
cs->readisac = &ReadISAC;
cs->writeisac = &WriteISAC;
cs->readisacfifo = &ReadISACfifo;
cs->writeisacfifo = &WriteISACfifo;
cs->BC_Read_Reg = &ReadHSCX;
cs->BC_Write_Reg = &WriteHSCX;
cs->BC_Send_Data = &hscx_fill_fifo;
cs->cardmsg = &Teles_card_msg;
cs->irq_func = &teles0_interrupt;
ISACVersion(cs, "Teles0:");
if (HscxVersion(cs, "Teles0:")) {
printk(KERN_WARNING
"Teles0: wrong HSCX versions check IO/MEM addresses\n");
release_io_teles0(cs);
return (0);
}
return (1);
}
| gpl-2.0 |
aapav01/android_kernel_samsung_ms013g-caf | drivers/i2c/busses/i2c-nomadik.c | 5095 | 26799 | /*
* Copyright (C) 2009 ST-Ericsson SA
* Copyright (C) 2009 STMicroelectronics
*
* I2C master mode controller driver, used in Nomadik 8815
* and Ux500 platforms.
*
* Author: Srinidhi Kasagar <srinidhi.kasagar@stericsson.com>
* Author: Sachin Verma <sachin.verma@st.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2, as
* published by the Free Software Foundation.
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/i2c.h>
#include <linux/err.h>
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/regulator/consumer.h>
#include <linux/pm_runtime.h>
#include <plat/i2c.h>
#define DRIVER_NAME "nmk-i2c"
/* I2C Controller register offsets */
#define I2C_CR (0x000)
#define I2C_SCR (0x004)
#define I2C_HSMCR (0x008)
#define I2C_MCR (0x00C)
#define I2C_TFR (0x010)
#define I2C_SR (0x014)
#define I2C_RFR (0x018)
#define I2C_TFTR (0x01C)
#define I2C_RFTR (0x020)
#define I2C_DMAR (0x024)
#define I2C_BRCR (0x028)
#define I2C_IMSCR (0x02C)
#define I2C_RISR (0x030)
#define I2C_MISR (0x034)
#define I2C_ICR (0x038)
/* Control registers */
#define I2C_CR_PE (0x1 << 0) /* Peripheral Enable */
#define I2C_CR_OM (0x3 << 1) /* Operating mode */
#define I2C_CR_SAM (0x1 << 3) /* Slave addressing mode */
#define I2C_CR_SM (0x3 << 4) /* Speed mode */
#define I2C_CR_SGCM (0x1 << 6) /* Slave general call mode */
#define I2C_CR_FTX (0x1 << 7) /* Flush Transmit */
#define I2C_CR_FRX (0x1 << 8) /* Flush Receive */
#define I2C_CR_DMA_TX_EN (0x1 << 9) /* DMA Tx enable */
#define I2C_CR_DMA_RX_EN (0x1 << 10) /* DMA Rx Enable */
#define I2C_CR_DMA_SLE (0x1 << 11) /* DMA sync. logic enable */
#define I2C_CR_LM (0x1 << 12) /* Loopback mode */
#define I2C_CR_FON (0x3 << 13) /* Filtering on */
#define I2C_CR_FS (0x3 << 15) /* Force stop enable */
/* Master controller (MCR) register */
#define I2C_MCR_OP (0x1 << 0) /* Operation */
#define I2C_MCR_A7 (0x7f << 1) /* 7-bit address */
#define I2C_MCR_EA10 (0x7 << 8) /* 10-bit Extended address */
#define I2C_MCR_SB (0x1 << 11) /* Extended address */
#define I2C_MCR_AM (0x3 << 12) /* Address type */
#define I2C_MCR_STOP (0x1 << 14) /* Stop condition */
#define I2C_MCR_LENGTH (0x7ff << 15) /* Transaction length */
/* Status register (SR) */
#define I2C_SR_OP (0x3 << 0) /* Operation */
#define I2C_SR_STATUS (0x3 << 2) /* controller status */
#define I2C_SR_CAUSE (0x7 << 4) /* Abort cause */
#define I2C_SR_TYPE (0x3 << 7) /* Receive type */
#define I2C_SR_LENGTH (0x7ff << 9) /* Transfer length */
/* Interrupt mask set/clear (IMSCR) bits */
#define I2C_IT_TXFE (0x1 << 0)
#define I2C_IT_TXFNE (0x1 << 1)
#define I2C_IT_TXFF (0x1 << 2)
#define I2C_IT_TXFOVR (0x1 << 3)
#define I2C_IT_RXFE (0x1 << 4)
#define I2C_IT_RXFNF (0x1 << 5)
#define I2C_IT_RXFF (0x1 << 6)
#define I2C_IT_RFSR (0x1 << 16)
#define I2C_IT_RFSE (0x1 << 17)
#define I2C_IT_WTSR (0x1 << 18)
#define I2C_IT_MTD (0x1 << 19)
#define I2C_IT_STD (0x1 << 20)
#define I2C_IT_MAL (0x1 << 24)
#define I2C_IT_BERR (0x1 << 25)
#define I2C_IT_MTDWS (0x1 << 28)
#define GEN_MASK(val, mask, sb) (((val) << (sb)) & (mask))
/* some bits in ICR are reserved */
#define I2C_CLEAR_ALL_INTS 0x131f007f
/* first three msb bits are reserved */
#define IRQ_MASK(mask) (mask & 0x1fffffff)
/* maximum threshold value */
#define MAX_I2C_FIFO_THRESHOLD 15
enum i2c_status {
I2C_NOP,
I2C_ON_GOING,
I2C_OK,
I2C_ABORT
};
/* operation */
enum i2c_operation {
I2C_NO_OPERATION = 0xff,
I2C_WRITE = 0x00,
I2C_READ = 0x01
};
/**
* struct i2c_nmk_client - client specific data
* @slave_adr: 7-bit slave address
* @count: no. bytes to be transferred
* @buffer: client data buffer
* @xfer_bytes: bytes transferred till now
* @operation: current I2C operation
*/
struct i2c_nmk_client {
unsigned short slave_adr;
unsigned long count;
unsigned char *buffer;
unsigned long xfer_bytes;
enum i2c_operation operation;
};
/**
* struct nmk_i2c_dev - private data structure of the controller.
* @pdev: parent platform device.
* @adap: corresponding I2C adapter.
* @irq: interrupt line for the controller.
* @virtbase: virtual io memory area.
* @clk: hardware i2c block clock.
* @cfg: machine provided controller configuration.
* @cli: holder of client specific data.
* @stop: stop condition.
* @xfer_complete: acknowledge completion for a I2C message.
* @result: controller propogated result.
* @regulator: pointer to i2c regulator.
* @busy: Busy doing transfer.
*/
struct nmk_i2c_dev {
struct platform_device *pdev;
struct i2c_adapter adap;
int irq;
void __iomem *virtbase;
struct clk *clk;
struct nmk_i2c_controller cfg;
struct i2c_nmk_client cli;
int stop;
struct completion xfer_complete;
int result;
struct regulator *regulator;
bool busy;
};
/* controller's abort causes */
static const char *abort_causes[] = {
"no ack received after address transmission",
"no ack received during data phase",
"ack received after xmission of master code",
"master lost arbitration",
"slave restarts",
"slave reset",
"overflow, maxsize is 2047 bytes",
};
static inline void i2c_set_bit(void __iomem *reg, u32 mask)
{
writel(readl(reg) | mask, reg);
}
static inline void i2c_clr_bit(void __iomem *reg, u32 mask)
{
writel(readl(reg) & ~mask, reg);
}
/**
* flush_i2c_fifo() - This function flushes the I2C FIFO
* @dev: private data of I2C Driver
*
* This function flushes the I2C Tx and Rx FIFOs. It returns
* 0 on successful flushing of FIFO
*/
static int flush_i2c_fifo(struct nmk_i2c_dev *dev)
{
#define LOOP_ATTEMPTS 10
int i;
unsigned long timeout;
/*
* flush the transmit and receive FIFO. The flushing
* operation takes several cycles before to be completed.
* On the completion, the I2C internal logic clears these
* bits, until then no one must access Tx, Rx FIFO and
* should poll on these bits waiting for the completion.
*/
writel((I2C_CR_FTX | I2C_CR_FRX), dev->virtbase + I2C_CR);
for (i = 0; i < LOOP_ATTEMPTS; i++) {
timeout = jiffies + dev->adap.timeout;
while (!time_after(jiffies, timeout)) {
if ((readl(dev->virtbase + I2C_CR) &
(I2C_CR_FTX | I2C_CR_FRX)) == 0)
return 0;
}
}
dev_err(&dev->pdev->dev,
"flushing operation timed out giving up after %d attempts",
LOOP_ATTEMPTS);
return -ETIMEDOUT;
}
/**
* disable_all_interrupts() - Disable all interrupts of this I2c Bus
* @dev: private data of I2C Driver
*/
static void disable_all_interrupts(struct nmk_i2c_dev *dev)
{
u32 mask = IRQ_MASK(0);
writel(mask, dev->virtbase + I2C_IMSCR);
}
/**
* clear_all_interrupts() - Clear all interrupts of I2C Controller
* @dev: private data of I2C Driver
*/
static void clear_all_interrupts(struct nmk_i2c_dev *dev)
{
u32 mask;
mask = IRQ_MASK(I2C_CLEAR_ALL_INTS);
writel(mask, dev->virtbase + I2C_ICR);
}
/**
* init_hw() - initialize the I2C hardware
* @dev: private data of I2C Driver
*/
static int init_hw(struct nmk_i2c_dev *dev)
{
int stat;
stat = flush_i2c_fifo(dev);
if (stat)
goto exit;
/* disable the controller */
i2c_clr_bit(dev->virtbase + I2C_CR , I2C_CR_PE);
disable_all_interrupts(dev);
clear_all_interrupts(dev);
dev->cli.operation = I2C_NO_OPERATION;
exit:
return stat;
}
/* enable peripheral, master mode operation */
#define DEFAULT_I2C_REG_CR ((1 << 1) | I2C_CR_PE)
/**
* load_i2c_mcr_reg() - load the MCR register
* @dev: private data of controller
*/
static u32 load_i2c_mcr_reg(struct nmk_i2c_dev *dev)
{
u32 mcr = 0;
/* 7-bit address transaction */
mcr |= GEN_MASK(1, I2C_MCR_AM, 12);
mcr |= GEN_MASK(dev->cli.slave_adr, I2C_MCR_A7, 1);
/* start byte procedure not applied */
mcr |= GEN_MASK(0, I2C_MCR_SB, 11);
/* check the operation, master read/write? */
if (dev->cli.operation == I2C_WRITE)
mcr |= GEN_MASK(I2C_WRITE, I2C_MCR_OP, 0);
else
mcr |= GEN_MASK(I2C_READ, I2C_MCR_OP, 0);
/* stop or repeated start? */
if (dev->stop)
mcr |= GEN_MASK(1, I2C_MCR_STOP, 14);
else
mcr &= ~(GEN_MASK(1, I2C_MCR_STOP, 14));
mcr |= GEN_MASK(dev->cli.count, I2C_MCR_LENGTH, 15);
return mcr;
}
/**
* setup_i2c_controller() - setup the controller
* @dev: private data of controller
*/
static void setup_i2c_controller(struct nmk_i2c_dev *dev)
{
u32 brcr1, brcr2;
u32 i2c_clk, div;
writel(0x0, dev->virtbase + I2C_CR);
writel(0x0, dev->virtbase + I2C_HSMCR);
writel(0x0, dev->virtbase + I2C_TFTR);
writel(0x0, dev->virtbase + I2C_RFTR);
writel(0x0, dev->virtbase + I2C_DMAR);
/*
* set the slsu:
*
* slsu defines the data setup time after SCL clock
* stretching in terms of i2c clk cycles. The
* needed setup time for the three modes are 250ns,
* 100ns, 10ns respectively thus leading to the values
* of 14, 6, 2 for a 48 MHz i2c clk.
*/
writel(dev->cfg.slsu << 16, dev->virtbase + I2C_SCR);
i2c_clk = clk_get_rate(dev->clk);
/* fallback to std. mode if machine has not provided it */
if (dev->cfg.clk_freq == 0)
dev->cfg.clk_freq = 100000;
/*
* The spec says, in case of std. mode the divider is
* 2 whereas it is 3 for fast and fastplus mode of
* operation. TODO - high speed support.
*/
div = (dev->cfg.clk_freq > 100000) ? 3 : 2;
/*
* generate the mask for baud rate counters. The controller
* has two baud rate counters. One is used for High speed
* operation, and the other is for std, fast mode, fast mode
* plus operation. Currently we do not supprt high speed mode
* so set brcr1 to 0.
*/
brcr1 = 0 << 16;
brcr2 = (i2c_clk/(dev->cfg.clk_freq * div)) & 0xffff;
/* set the baud rate counter register */
writel((brcr1 | brcr2), dev->virtbase + I2C_BRCR);
/*
* set the speed mode. Currently we support
* only standard and fast mode of operation
* TODO - support for fast mode plus (up to 1Mb/s)
* and high speed (up to 3.4 Mb/s)
*/
if (dev->cfg.sm > I2C_FREQ_MODE_FAST) {
dev_err(&dev->pdev->dev,
"do not support this mode defaulting to std. mode\n");
brcr2 = i2c_clk/(100000 * 2) & 0xffff;
writel((brcr1 | brcr2), dev->virtbase + I2C_BRCR);
writel(I2C_FREQ_MODE_STANDARD << 4,
dev->virtbase + I2C_CR);
}
writel(dev->cfg.sm << 4, dev->virtbase + I2C_CR);
/* set the Tx and Rx FIFO threshold */
writel(dev->cfg.tft, dev->virtbase + I2C_TFTR);
writel(dev->cfg.rft, dev->virtbase + I2C_RFTR);
}
/**
* read_i2c() - Read from I2C client device
* @dev: private data of I2C Driver
*
* This function reads from i2c client device when controller is in
* master mode. There is a completion timeout. If there is no transfer
* before timeout error is returned.
*/
static int read_i2c(struct nmk_i2c_dev *dev)
{
u32 status = 0;
u32 mcr;
u32 irq_mask = 0;
int timeout;
mcr = load_i2c_mcr_reg(dev);
writel(mcr, dev->virtbase + I2C_MCR);
/* load the current CR value */
writel(readl(dev->virtbase + I2C_CR) | DEFAULT_I2C_REG_CR,
dev->virtbase + I2C_CR);
/* enable the controller */
i2c_set_bit(dev->virtbase + I2C_CR, I2C_CR_PE);
init_completion(&dev->xfer_complete);
/* enable interrupts by setting the mask */
irq_mask = (I2C_IT_RXFNF | I2C_IT_RXFF |
I2C_IT_MAL | I2C_IT_BERR);
if (dev->stop)
irq_mask |= I2C_IT_MTD;
else
irq_mask |= I2C_IT_MTDWS;
irq_mask = I2C_CLEAR_ALL_INTS & IRQ_MASK(irq_mask);
writel(readl(dev->virtbase + I2C_IMSCR) | irq_mask,
dev->virtbase + I2C_IMSCR);
timeout = wait_for_completion_timeout(
&dev->xfer_complete, dev->adap.timeout);
if (timeout < 0) {
dev_err(&dev->pdev->dev,
"wait_for_completion_timeout "
"returned %d waiting for event\n", timeout);
status = timeout;
}
if (timeout == 0) {
/* Controller timed out */
dev_err(&dev->pdev->dev, "read from slave 0x%x timed out\n",
dev->cli.slave_adr);
status = -ETIMEDOUT;
}
return status;
}
static void fill_tx_fifo(struct nmk_i2c_dev *dev, int no_bytes)
{
int count;
for (count = (no_bytes - 2);
(count > 0) &&
(dev->cli.count != 0);
count--) {
/* write to the Tx FIFO */
writeb(*dev->cli.buffer,
dev->virtbase + I2C_TFR);
dev->cli.buffer++;
dev->cli.count--;
dev->cli.xfer_bytes++;
}
}
/**
* write_i2c() - Write data to I2C client.
* @dev: private data of I2C Driver
*
* This function writes data to I2C client
*/
static int write_i2c(struct nmk_i2c_dev *dev)
{
u32 status = 0;
u32 mcr;
u32 irq_mask = 0;
int timeout;
mcr = load_i2c_mcr_reg(dev);
writel(mcr, dev->virtbase + I2C_MCR);
/* load the current CR value */
writel(readl(dev->virtbase + I2C_CR) | DEFAULT_I2C_REG_CR,
dev->virtbase + I2C_CR);
/* enable the controller */
i2c_set_bit(dev->virtbase + I2C_CR , I2C_CR_PE);
init_completion(&dev->xfer_complete);
/* enable interrupts by settings the masks */
irq_mask = (I2C_IT_TXFOVR | I2C_IT_MAL | I2C_IT_BERR);
/* Fill the TX FIFO with transmit data */
fill_tx_fifo(dev, MAX_I2C_FIFO_THRESHOLD);
if (dev->cli.count != 0)
irq_mask |= I2C_IT_TXFNE;
/*
* check if we want to transfer a single or multiple bytes, if so
* set the MTDWS bit (Master Transaction Done Without Stop)
* to start repeated start operation
*/
if (dev->stop)
irq_mask |= I2C_IT_MTD;
else
irq_mask |= I2C_IT_MTDWS;
irq_mask = I2C_CLEAR_ALL_INTS & IRQ_MASK(irq_mask);
writel(readl(dev->virtbase + I2C_IMSCR) | irq_mask,
dev->virtbase + I2C_IMSCR);
timeout = wait_for_completion_timeout(
&dev->xfer_complete, dev->adap.timeout);
if (timeout < 0) {
dev_err(&dev->pdev->dev,
"wait_for_completion_timeout "
"returned %d waiting for event\n", timeout);
status = timeout;
}
if (timeout == 0) {
/* Controller timed out */
dev_err(&dev->pdev->dev, "write to slave 0x%x timed out\n",
dev->cli.slave_adr);
status = -ETIMEDOUT;
}
return status;
}
/**
* nmk_i2c_xfer_one() - transmit a single I2C message
* @dev: device with a message encoded into it
* @flags: message flags
*/
static int nmk_i2c_xfer_one(struct nmk_i2c_dev *dev, u16 flags)
{
int status;
if (flags & I2C_M_RD) {
/* read operation */
dev->cli.operation = I2C_READ;
status = read_i2c(dev);
} else {
/* write operation */
dev->cli.operation = I2C_WRITE;
status = write_i2c(dev);
}
if (status || (dev->result)) {
u32 i2c_sr;
u32 cause;
i2c_sr = readl(dev->virtbase + I2C_SR);
/*
* Check if the controller I2C operation status
* is set to ABORT(11b).
*/
if (((i2c_sr >> 2) & 0x3) == 0x3) {
/* get the abort cause */
cause = (i2c_sr >> 4) & 0x7;
dev_err(&dev->pdev->dev, "%s\n",
cause >= ARRAY_SIZE(abort_causes) ?
"unknown reason" :
abort_causes[cause]);
}
(void) init_hw(dev);
status = status ? status : dev->result;
}
return status;
}
/**
* nmk_i2c_xfer() - I2C transfer function used by kernel framework
* @i2c_adap: Adapter pointer to the controller
* @msgs: Pointer to data to be written.
* @num_msgs: Number of messages to be executed
*
* This is the function called by the generic kernel i2c_transfer()
* or i2c_smbus...() API calls. Note that this code is protected by the
* semaphore set in the kernel i2c_transfer() function.
*
* NOTE:
* READ TRANSFER : We impose a restriction of the first message to be the
* index message for any read transaction.
* - a no index is coded as '0',
* - 2byte big endian index is coded as '3'
* !!! msg[0].buf holds the actual index.
* This is compatible with generic messages of smbus emulator
* that send a one byte index.
* eg. a I2C transation to read 2 bytes from index 0
* idx = 0;
* msg[0].addr = client->addr;
* msg[0].flags = 0x0;
* msg[0].len = 1;
* msg[0].buf = &idx;
*
* msg[1].addr = client->addr;
* msg[1].flags = I2C_M_RD;
* msg[1].len = 2;
* msg[1].buf = rd_buff
* i2c_transfer(adap, msg, 2);
*
* WRITE TRANSFER : The I2C standard interface interprets all data as payload.
* If you want to emulate an SMBUS write transaction put the
* index as first byte(or first and second) in the payload.
* eg. a I2C transation to write 2 bytes from index 1
* wr_buff[0] = 0x1;
* wr_buff[1] = 0x23;
* wr_buff[2] = 0x46;
* msg[0].flags = 0x0;
* msg[0].len = 3;
* msg[0].buf = wr_buff;
* i2c_transfer(adap, msg, 1);
*
* To read or write a block of data (multiple bytes) using SMBUS emulation
* please use the i2c_smbus_read_i2c_block_data()
* or i2c_smbus_write_i2c_block_data() API
*/
static int nmk_i2c_xfer(struct i2c_adapter *i2c_adap,
struct i2c_msg msgs[], int num_msgs)
{
int status;
int i;
struct nmk_i2c_dev *dev = i2c_get_adapdata(i2c_adap);
int j;
dev->busy = true;
if (dev->regulator)
regulator_enable(dev->regulator);
pm_runtime_get_sync(&dev->pdev->dev);
clk_enable(dev->clk);
status = init_hw(dev);
if (status)
goto out;
/* Attempt three times to send the message queue */
for (j = 0; j < 3; j++) {
/* setup the i2c controller */
setup_i2c_controller(dev);
for (i = 0; i < num_msgs; i++) {
if (unlikely(msgs[i].flags & I2C_M_TEN)) {
dev_err(&dev->pdev->dev,
"10 bit addressing not supported\n");
status = -EINVAL;
goto out;
}
dev->cli.slave_adr = msgs[i].addr;
dev->cli.buffer = msgs[i].buf;
dev->cli.count = msgs[i].len;
dev->stop = (i < (num_msgs - 1)) ? 0 : 1;
dev->result = 0;
status = nmk_i2c_xfer_one(dev, msgs[i].flags);
if (status != 0)
break;
}
if (status == 0)
break;
}
out:
clk_disable(dev->clk);
pm_runtime_put_sync(&dev->pdev->dev);
if (dev->regulator)
regulator_disable(dev->regulator);
dev->busy = false;
/* return the no. messages processed */
if (status)
return status;
else
return num_msgs;
}
/**
* disable_interrupts() - disable the interrupts
* @dev: private data of controller
* @irq: interrupt number
*/
static int disable_interrupts(struct nmk_i2c_dev *dev, u32 irq)
{
irq = IRQ_MASK(irq);
writel(readl(dev->virtbase + I2C_IMSCR) & ~(I2C_CLEAR_ALL_INTS & irq),
dev->virtbase + I2C_IMSCR);
return 0;
}
/**
* i2c_irq_handler() - interrupt routine
* @irq: interrupt number
* @arg: data passed to the handler
*
* This is the interrupt handler for the i2c driver. Currently
* it handles the major interrupts like Rx & Tx FIFO management
* interrupts, master transaction interrupts, arbitration and
* bus error interrupts. The rest of the interrupts are treated as
* unhandled.
*/
static irqreturn_t i2c_irq_handler(int irq, void *arg)
{
struct nmk_i2c_dev *dev = arg;
u32 tft, rft;
u32 count;
u32 misr;
u32 src = 0;
/* load Tx FIFO and Rx FIFO threshold values */
tft = readl(dev->virtbase + I2C_TFTR);
rft = readl(dev->virtbase + I2C_RFTR);
/* read interrupt status register */
misr = readl(dev->virtbase + I2C_MISR);
src = __ffs(misr);
switch ((1 << src)) {
/* Transmit FIFO nearly empty interrupt */
case I2C_IT_TXFNE:
{
if (dev->cli.operation == I2C_READ) {
/*
* in read operation why do we care for writing?
* so disable the Transmit FIFO interrupt
*/
disable_interrupts(dev, I2C_IT_TXFNE);
} else {
fill_tx_fifo(dev, (MAX_I2C_FIFO_THRESHOLD - tft));
/*
* if done, close the transfer by disabling the
* corresponding TXFNE interrupt
*/
if (dev->cli.count == 0)
disable_interrupts(dev, I2C_IT_TXFNE);
}
}
break;
/*
* Rx FIFO nearly full interrupt.
* This is set when the numer of entries in Rx FIFO is
* greater or equal than the threshold value programmed
* in RFT
*/
case I2C_IT_RXFNF:
for (count = rft; count > 0; count--) {
/* Read the Rx FIFO */
*dev->cli.buffer = readb(dev->virtbase + I2C_RFR);
dev->cli.buffer++;
}
dev->cli.count -= rft;
dev->cli.xfer_bytes += rft;
break;
/* Rx FIFO full */
case I2C_IT_RXFF:
for (count = MAX_I2C_FIFO_THRESHOLD; count > 0; count--) {
*dev->cli.buffer = readb(dev->virtbase + I2C_RFR);
dev->cli.buffer++;
}
dev->cli.count -= MAX_I2C_FIFO_THRESHOLD;
dev->cli.xfer_bytes += MAX_I2C_FIFO_THRESHOLD;
break;
/* Master Transaction Done with/without stop */
case I2C_IT_MTD:
case I2C_IT_MTDWS:
if (dev->cli.operation == I2C_READ) {
while (!(readl(dev->virtbase + I2C_RISR)
& I2C_IT_RXFE)) {
if (dev->cli.count == 0)
break;
*dev->cli.buffer =
readb(dev->virtbase + I2C_RFR);
dev->cli.buffer++;
dev->cli.count--;
dev->cli.xfer_bytes++;
}
}
disable_all_interrupts(dev);
clear_all_interrupts(dev);
if (dev->cli.count) {
dev->result = -EIO;
dev_err(&dev->pdev->dev,
"%lu bytes still remain to be xfered\n",
dev->cli.count);
(void) init_hw(dev);
}
complete(&dev->xfer_complete);
break;
/* Master Arbitration lost interrupt */
case I2C_IT_MAL:
dev->result = -EIO;
(void) init_hw(dev);
i2c_set_bit(dev->virtbase + I2C_ICR, I2C_IT_MAL);
complete(&dev->xfer_complete);
break;
/*
* Bus Error interrupt.
* This happens when an unexpected start/stop condition occurs
* during the transaction.
*/
case I2C_IT_BERR:
dev->result = -EIO;
/* get the status */
if (((readl(dev->virtbase + I2C_SR) >> 2) & 0x3) == I2C_ABORT)
(void) init_hw(dev);
i2c_set_bit(dev->virtbase + I2C_ICR, I2C_IT_BERR);
complete(&dev->xfer_complete);
break;
/*
* Tx FIFO overrun interrupt.
* This is set when a write operation in Tx FIFO is performed and
* the Tx FIFO is full.
*/
case I2C_IT_TXFOVR:
dev->result = -EIO;
(void) init_hw(dev);
dev_err(&dev->pdev->dev, "Tx Fifo Over run\n");
complete(&dev->xfer_complete);
break;
/* unhandled interrupts by this driver - TODO*/
case I2C_IT_TXFE:
case I2C_IT_TXFF:
case I2C_IT_RXFE:
case I2C_IT_RFSR:
case I2C_IT_RFSE:
case I2C_IT_WTSR:
case I2C_IT_STD:
dev_err(&dev->pdev->dev, "unhandled Interrupt\n");
break;
default:
dev_err(&dev->pdev->dev, "spurious Interrupt..\n");
break;
}
return IRQ_HANDLED;
}
#ifdef CONFIG_PM
static int nmk_i2c_suspend(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct nmk_i2c_dev *nmk_i2c = platform_get_drvdata(pdev);
if (nmk_i2c->busy)
return -EBUSY;
return 0;
}
static int nmk_i2c_resume(struct device *dev)
{
return 0;
}
#else
#define nmk_i2c_suspend NULL
#define nmk_i2c_resume NULL
#endif
/*
* We use noirq so that we suspend late and resume before the wakeup interrupt
* to ensure that we do the !pm_runtime_suspended() check in resume before
* there has been a regular pm runtime resume (via pm_runtime_get_sync()).
*/
static const struct dev_pm_ops nmk_i2c_pm = {
.suspend_noirq = nmk_i2c_suspend,
.resume_noirq = nmk_i2c_resume,
};
static unsigned int nmk_i2c_functionality(struct i2c_adapter *adap)
{
return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
}
static const struct i2c_algorithm nmk_i2c_algo = {
.master_xfer = nmk_i2c_xfer,
.functionality = nmk_i2c_functionality
};
static int __devinit nmk_i2c_probe(struct platform_device *pdev)
{
int ret = 0;
struct resource *res;
struct nmk_i2c_controller *pdata =
pdev->dev.platform_data;
struct nmk_i2c_dev *dev;
struct i2c_adapter *adap;
dev = kzalloc(sizeof(struct nmk_i2c_dev), GFP_KERNEL);
if (!dev) {
dev_err(&pdev->dev, "cannot allocate memory\n");
ret = -ENOMEM;
goto err_no_mem;
}
dev->busy = false;
dev->pdev = pdev;
platform_set_drvdata(pdev, dev);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
ret = -ENOENT;
goto err_no_resource;
}
if (request_mem_region(res->start, resource_size(res),
DRIVER_NAME "I/O region") == NULL) {
ret = -EBUSY;
goto err_no_region;
}
dev->virtbase = ioremap(res->start, resource_size(res));
if (!dev->virtbase) {
ret = -ENOMEM;
goto err_no_ioremap;
}
dev->irq = platform_get_irq(pdev, 0);
ret = request_irq(dev->irq, i2c_irq_handler, 0,
DRIVER_NAME, dev);
if (ret) {
dev_err(&pdev->dev, "cannot claim the irq %d\n", dev->irq);
goto err_irq;
}
dev->regulator = regulator_get(&pdev->dev, "v-i2c");
if (IS_ERR(dev->regulator)) {
dev_warn(&pdev->dev, "could not get i2c regulator\n");
dev->regulator = NULL;
}
pm_suspend_ignore_children(&pdev->dev, true);
pm_runtime_enable(&pdev->dev);
dev->clk = clk_get(&pdev->dev, NULL);
if (IS_ERR(dev->clk)) {
dev_err(&pdev->dev, "could not get i2c clock\n");
ret = PTR_ERR(dev->clk);
goto err_no_clk;
}
adap = &dev->adap;
adap->dev.parent = &pdev->dev;
adap->owner = THIS_MODULE;
adap->class = I2C_CLASS_HWMON | I2C_CLASS_SPD;
adap->algo = &nmk_i2c_algo;
adap->timeout = pdata->timeout ? msecs_to_jiffies(pdata->timeout) :
msecs_to_jiffies(20000);
snprintf(adap->name, sizeof(adap->name),
"Nomadik I2C%d at %lx", pdev->id, (unsigned long)res->start);
/* fetch the controller id */
adap->nr = pdev->id;
/* fetch the controller configuration from machine */
dev->cfg.clk_freq = pdata->clk_freq;
dev->cfg.slsu = pdata->slsu;
dev->cfg.tft = pdata->tft;
dev->cfg.rft = pdata->rft;
dev->cfg.sm = pdata->sm;
i2c_set_adapdata(adap, dev);
dev_info(&pdev->dev,
"initialize %s on virtual base %p\n",
adap->name, dev->virtbase);
ret = i2c_add_numbered_adapter(adap);
if (ret) {
dev_err(&pdev->dev, "failed to add adapter\n");
goto err_add_adap;
}
return 0;
err_add_adap:
clk_put(dev->clk);
err_no_clk:
if (dev->regulator)
regulator_put(dev->regulator);
pm_runtime_disable(&pdev->dev);
free_irq(dev->irq, dev);
err_irq:
iounmap(dev->virtbase);
err_no_ioremap:
release_mem_region(res->start, resource_size(res));
err_no_region:
platform_set_drvdata(pdev, NULL);
err_no_resource:
kfree(dev);
err_no_mem:
return ret;
}
static int __devexit nmk_i2c_remove(struct platform_device *pdev)
{
struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
struct nmk_i2c_dev *dev = platform_get_drvdata(pdev);
i2c_del_adapter(&dev->adap);
flush_i2c_fifo(dev);
disable_all_interrupts(dev);
clear_all_interrupts(dev);
/* disable the controller */
i2c_clr_bit(dev->virtbase + I2C_CR, I2C_CR_PE);
free_irq(dev->irq, dev);
iounmap(dev->virtbase);
if (res)
release_mem_region(res->start, resource_size(res));
clk_put(dev->clk);
if (dev->regulator)
regulator_put(dev->regulator);
pm_runtime_disable(&pdev->dev);
platform_set_drvdata(pdev, NULL);
kfree(dev);
return 0;
}
static struct platform_driver nmk_i2c_driver = {
.driver = {
.owner = THIS_MODULE,
.name = DRIVER_NAME,
.pm = &nmk_i2c_pm,
},
.probe = nmk_i2c_probe,
.remove = __devexit_p(nmk_i2c_remove),
};
static int __init nmk_i2c_init(void)
{
return platform_driver_register(&nmk_i2c_driver);
}
static void __exit nmk_i2c_exit(void)
{
platform_driver_unregister(&nmk_i2c_driver);
}
subsys_initcall(nmk_i2c_init);
module_exit(nmk_i2c_exit);
MODULE_AUTHOR("Sachin Verma, Srinidhi KASAGAR");
MODULE_DESCRIPTION("Nomadik/Ux500 I2C driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:" DRIVER_NAME);
| gpl-2.0 |
DooMLoRD/android_kernel_sony_msm8974ab | drivers/isdn/hisax/st5481_usb.c | 5095 | 15239 | /*
* Driver for ST5481 USB ISDN modem
*
* Author Frode Isaksen
* Copyright 2001 by Frode Isaksen <fisaksen@bewan.com>
* 2001 by Kai Germaschewski <kai.germaschewski@gmx.de>
*
* This software may be used and distributed according to the terms
* of the GNU General Public License, incorporated herein by reference.
*
*/
#include <linux/init.h>
#include <linux/usb.h>
#include <linux/slab.h>
#include "st5481.h"
static int st5481_isoc_flatten(struct urb *urb);
/* ======================================================================
* control pipe
*/
/*
* Send the next endpoint 0 request stored in the FIFO.
* Called either by the completion or by usb_ctrl_msg.
*/
static void usb_next_ctrl_msg(struct urb *urb,
struct st5481_adapter *adapter)
{
struct st5481_ctrl *ctrl = &adapter->ctrl;
int r_index;
if (test_and_set_bit(0, &ctrl->busy)) {
return;
}
if ((r_index = fifo_remove(&ctrl->msg_fifo.f)) < 0) {
test_and_clear_bit(0, &ctrl->busy);
return;
}
urb->setup_packet =
(unsigned char *)&ctrl->msg_fifo.data[r_index];
DBG(1, "request=0x%02x,value=0x%04x,index=%x",
((struct ctrl_msg *)urb->setup_packet)->dr.bRequest,
((struct ctrl_msg *)urb->setup_packet)->dr.wValue,
((struct ctrl_msg *)urb->setup_packet)->dr.wIndex);
// Prepare the URB
urb->dev = adapter->usb_dev;
SUBMIT_URB(urb, GFP_ATOMIC);
}
/*
* Asynchronous endpoint 0 request (async version of usb_control_msg).
* The request will be queued up in a FIFO if the endpoint is busy.
*/
static void usb_ctrl_msg(struct st5481_adapter *adapter,
u8 request, u8 requesttype, u16 value, u16 index,
ctrl_complete_t complete, void *context)
{
struct st5481_ctrl *ctrl = &adapter->ctrl;
int w_index;
struct ctrl_msg *ctrl_msg;
if ((w_index = fifo_add(&ctrl->msg_fifo.f)) < 0) {
WARNING("control msg FIFO full");
return;
}
ctrl_msg = &ctrl->msg_fifo.data[w_index];
ctrl_msg->dr.bRequestType = requesttype;
ctrl_msg->dr.bRequest = request;
ctrl_msg->dr.wValue = cpu_to_le16p(&value);
ctrl_msg->dr.wIndex = cpu_to_le16p(&index);
ctrl_msg->dr.wLength = 0;
ctrl_msg->complete = complete;
ctrl_msg->context = context;
usb_next_ctrl_msg(ctrl->urb, adapter);
}
/*
* Asynchronous endpoint 0 device request.
*/
void st5481_usb_device_ctrl_msg(struct st5481_adapter *adapter,
u8 request, u16 value,
ctrl_complete_t complete, void *context)
{
usb_ctrl_msg(adapter, request,
USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
value, 0, complete, context);
}
/*
* Asynchronous pipe reset (async version of usb_clear_halt).
*/
void st5481_usb_pipe_reset(struct st5481_adapter *adapter,
u_char pipe,
ctrl_complete_t complete, void *context)
{
DBG(1, "pipe=%02x", pipe);
usb_ctrl_msg(adapter,
USB_REQ_CLEAR_FEATURE, USB_DIR_OUT | USB_RECIP_ENDPOINT,
0, pipe, complete, context);
}
/*
Physical level functions
*/
void st5481_ph_command(struct st5481_adapter *adapter, unsigned int command)
{
DBG(8, "command=%s", ST5481_CMD_string(command));
st5481_usb_device_ctrl_msg(adapter, TXCI, command, NULL, NULL);
}
/*
* The request on endpoint 0 has completed.
* Call the user provided completion routine and try
* to send the next request.
*/
static void usb_ctrl_complete(struct urb *urb)
{
struct st5481_adapter *adapter = urb->context;
struct st5481_ctrl *ctrl = &adapter->ctrl;
struct ctrl_msg *ctrl_msg;
if (unlikely(urb->status < 0)) {
switch (urb->status) {
case -ENOENT:
case -ESHUTDOWN:
case -ECONNRESET:
DBG(1, "urb killed status %d", urb->status);
return; // Give up
default:
WARNING("urb status %d", urb->status);
break;
}
}
ctrl_msg = (struct ctrl_msg *)urb->setup_packet;
if (ctrl_msg->dr.bRequest == USB_REQ_CLEAR_FEATURE) {
/* Special case handling for pipe reset */
le16_to_cpus(&ctrl_msg->dr.wIndex);
usb_reset_endpoint(adapter->usb_dev, ctrl_msg->dr.wIndex);
}
if (ctrl_msg->complete)
ctrl_msg->complete(ctrl_msg->context);
clear_bit(0, &ctrl->busy);
// Try to send next control message
usb_next_ctrl_msg(urb, adapter);
return;
}
/* ======================================================================
* interrupt pipe
*/
/*
* The interrupt endpoint will be called when any
* of the 6 registers changes state (depending on masks).
* Decode the register values and schedule a private event.
* Called at interrupt.
*/
static void usb_int_complete(struct urb *urb)
{
u8 *data = urb->transfer_buffer;
u8 irqbyte;
struct st5481_adapter *adapter = urb->context;
int j;
int status;
switch (urb->status) {
case 0:
/* success */
break;
case -ECONNRESET:
case -ENOENT:
case -ESHUTDOWN:
/* this urb is terminated, clean up */
DBG(2, "urb shutting down with status: %d", urb->status);
return;
default:
WARNING("nonzero urb status received: %d", urb->status);
goto exit;
}
DBG_PACKET(2, data, INT_PKT_SIZE);
if (urb->actual_length == 0) {
goto exit;
}
irqbyte = data[MPINT];
if (irqbyte & DEN_INT)
FsmEvent(&adapter->d_out.fsm, EV_DOUT_DEN, NULL);
if (irqbyte & DCOLL_INT)
FsmEvent(&adapter->d_out.fsm, EV_DOUT_COLL, NULL);
irqbyte = data[FFINT_D];
if (irqbyte & OUT_UNDERRUN)
FsmEvent(&adapter->d_out.fsm, EV_DOUT_UNDERRUN, NULL);
if (irqbyte & OUT_DOWN)
;// printk("OUT_DOWN\n");
irqbyte = data[MPINT];
if (irqbyte & RXCI_INT)
FsmEvent(&adapter->l1m, data[CCIST] & 0x0f, NULL);
for (j = 0; j < 2; j++)
adapter->bcs[j].b_out.flow_event |= data[FFINT_B1 + j];
urb->actual_length = 0;
exit:
status = usb_submit_urb(urb, GFP_ATOMIC);
if (status)
WARNING("usb_submit_urb failed with result %d", status);
}
/* ======================================================================
* initialization
*/
int st5481_setup_usb(struct st5481_adapter *adapter)
{
struct usb_device *dev = adapter->usb_dev;
struct st5481_ctrl *ctrl = &adapter->ctrl;
struct st5481_intr *intr = &adapter->intr;
struct usb_interface *intf;
struct usb_host_interface *altsetting = NULL;
struct usb_host_endpoint *endpoint;
int status;
struct urb *urb;
u8 *buf;
DBG(2, "");
if ((status = usb_reset_configuration(dev)) < 0) {
WARNING("reset_configuration failed,status=%d", status);
return status;
}
intf = usb_ifnum_to_if(dev, 0);
if (intf)
altsetting = usb_altnum_to_altsetting(intf, 3);
if (!altsetting)
return -ENXIO;
// Check if the config is sane
if (altsetting->desc.bNumEndpoints != 7) {
WARNING("expecting 7 got %d endpoints!", altsetting->desc.bNumEndpoints);
return -EINVAL;
}
// The descriptor is wrong for some early samples of the ST5481 chip
altsetting->endpoint[3].desc.wMaxPacketSize = __constant_cpu_to_le16(32);
altsetting->endpoint[4].desc.wMaxPacketSize = __constant_cpu_to_le16(32);
// Use alternative setting 3 on interface 0 to have 2B+D
if ((status = usb_set_interface(dev, 0, 3)) < 0) {
WARNING("usb_set_interface failed,status=%d", status);
return status;
}
// Allocate URB for control endpoint
urb = usb_alloc_urb(0, GFP_KERNEL);
if (!urb) {
return -ENOMEM;
}
ctrl->urb = urb;
// Fill the control URB
usb_fill_control_urb(urb, dev,
usb_sndctrlpipe(dev, 0),
NULL, NULL, 0, usb_ctrl_complete, adapter);
fifo_init(&ctrl->msg_fifo.f, ARRAY_SIZE(ctrl->msg_fifo.data));
// Allocate URBs and buffers for interrupt endpoint
urb = usb_alloc_urb(0, GFP_KERNEL);
if (!urb) {
return -ENOMEM;
}
intr->urb = urb;
buf = kmalloc(INT_PKT_SIZE, GFP_KERNEL);
if (!buf) {
return -ENOMEM;
}
endpoint = &altsetting->endpoint[EP_INT-1];
// Fill the interrupt URB
usb_fill_int_urb(urb, dev,
usb_rcvintpipe(dev, endpoint->desc.bEndpointAddress),
buf, INT_PKT_SIZE,
usb_int_complete, adapter,
endpoint->desc.bInterval);
return 0;
}
/*
* Release buffers and URBs for the interrupt and control
* endpoint.
*/
void st5481_release_usb(struct st5481_adapter *adapter)
{
struct st5481_intr *intr = &adapter->intr;
struct st5481_ctrl *ctrl = &adapter->ctrl;
DBG(1, "");
// Stop and free Control and Interrupt URBs
usb_kill_urb(ctrl->urb);
kfree(ctrl->urb->transfer_buffer);
usb_free_urb(ctrl->urb);
ctrl->urb = NULL;
usb_kill_urb(intr->urb);
kfree(intr->urb->transfer_buffer);
usb_free_urb(intr->urb);
intr->urb = NULL;
}
/*
* Initialize the adapter.
*/
void st5481_start(struct st5481_adapter *adapter)
{
static const u8 init_cmd_table[] = {
SET_DEFAULT, 0,
STT, 0,
SDA_MIN, 0x0d,
SDA_MAX, 0x29,
SDELAY_VALUE, 0x14,
GPIO_DIR, 0x01,
GPIO_OUT, RED_LED,
// FFCTRL_OUT_D,4,
// FFCTRH_OUT_D,12,
FFCTRL_OUT_B1, 6,
FFCTRH_OUT_B1, 20,
FFCTRL_OUT_B2, 6,
FFCTRH_OUT_B2, 20,
MPMSK, RXCI_INT + DEN_INT + DCOLL_INT,
0
};
struct st5481_intr *intr = &adapter->intr;
int i = 0;
u8 request, value;
DBG(8, "");
adapter->leds = RED_LED;
// Start receiving on the interrupt endpoint
SUBMIT_URB(intr->urb, GFP_KERNEL);
while ((request = init_cmd_table[i++])) {
value = init_cmd_table[i++];
st5481_usb_device_ctrl_msg(adapter, request, value, NULL, NULL);
}
st5481_ph_command(adapter, ST5481_CMD_PUP);
}
/*
* Reset the adapter to default values.
*/
void st5481_stop(struct st5481_adapter *adapter)
{
DBG(8, "");
st5481_usb_device_ctrl_msg(adapter, SET_DEFAULT, 0, NULL, NULL);
}
/* ======================================================================
* isochronous USB helpers
*/
static void
fill_isoc_urb(struct urb *urb, struct usb_device *dev,
unsigned int pipe, void *buf, int num_packets,
int packet_size, usb_complete_t complete,
void *context)
{
int k;
urb->dev = dev;
urb->pipe = pipe;
urb->interval = 1;
urb->transfer_buffer = buf;
urb->number_of_packets = num_packets;
urb->transfer_buffer_length = num_packets * packet_size;
urb->actual_length = 0;
urb->complete = complete;
urb->context = context;
urb->transfer_flags = URB_ISO_ASAP;
for (k = 0; k < num_packets; k++) {
urb->iso_frame_desc[k].offset = packet_size * k;
urb->iso_frame_desc[k].length = packet_size;
urb->iso_frame_desc[k].actual_length = 0;
}
}
int
st5481_setup_isocpipes(struct urb *urb[2], struct usb_device *dev,
unsigned int pipe, int num_packets,
int packet_size, int buf_size,
usb_complete_t complete, void *context)
{
int j, retval;
unsigned char *buf;
for (j = 0; j < 2; j++) {
retval = -ENOMEM;
urb[j] = usb_alloc_urb(num_packets, GFP_KERNEL);
if (!urb[j])
goto err;
// Allocate memory for 2000bytes/sec (16Kb/s)
buf = kmalloc(buf_size, GFP_KERNEL);
if (!buf)
goto err;
// Fill the isochronous URB
fill_isoc_urb(urb[j], dev, pipe, buf,
num_packets, packet_size, complete,
context);
}
return 0;
err:
for (j = 0; j < 2; j++) {
if (urb[j]) {
kfree(urb[j]->transfer_buffer);
urb[j]->transfer_buffer = NULL;
usb_free_urb(urb[j]);
urb[j] = NULL;
}
}
return retval;
}
void st5481_release_isocpipes(struct urb *urb[2])
{
int j;
for (j = 0; j < 2; j++) {
usb_kill_urb(urb[j]);
kfree(urb[j]->transfer_buffer);
usb_free_urb(urb[j]);
urb[j] = NULL;
}
}
/*
* Decode frames received on the B/D channel.
* Note that this function will be called continuously
* with 64Kbit/s / 16Kbit/s of data and hence it will be
* called 50 times per second with 20 ISOC descriptors.
* Called at interrupt.
*/
static void usb_in_complete(struct urb *urb)
{
struct st5481_in *in = urb->context;
unsigned char *ptr;
struct sk_buff *skb;
int len, count, status;
if (unlikely(urb->status < 0)) {
switch (urb->status) {
case -ENOENT:
case -ESHUTDOWN:
case -ECONNRESET:
DBG(1, "urb killed status %d", urb->status);
return; // Give up
default:
WARNING("urb status %d", urb->status);
break;
}
}
DBG_ISO_PACKET(0x80, urb);
len = st5481_isoc_flatten(urb);
ptr = urb->transfer_buffer;
while (len > 0) {
if (in->mode == L1_MODE_TRANS) {
memcpy(in->rcvbuf, ptr, len);
status = len;
len = 0;
} else {
status = isdnhdlc_decode(&in->hdlc_state, ptr, len, &count,
in->rcvbuf, in->bufsize);
ptr += count;
len -= count;
}
if (status > 0) {
// Good frame received
DBG(4, "count=%d", status);
DBG_PACKET(0x400, in->rcvbuf, status);
if (!(skb = dev_alloc_skb(status))) {
WARNING("receive out of memory\n");
break;
}
memcpy(skb_put(skb, status), in->rcvbuf, status);
in->hisax_if->l1l2(in->hisax_if, PH_DATA | INDICATION, skb);
} else if (status == -HDLC_CRC_ERROR) {
INFO("CRC error");
} else if (status == -HDLC_FRAMING_ERROR) {
INFO("framing error");
} else if (status == -HDLC_LENGTH_ERROR) {
INFO("length error");
}
}
// Prepare URB for next transfer
urb->dev = in->adapter->usb_dev;
urb->actual_length = 0;
SUBMIT_URB(urb, GFP_ATOMIC);
}
int st5481_setup_in(struct st5481_in *in)
{
struct usb_device *dev = in->adapter->usb_dev;
int retval;
DBG(4, "");
in->rcvbuf = kmalloc(in->bufsize, GFP_KERNEL);
retval = -ENOMEM;
if (!in->rcvbuf)
goto err;
retval = st5481_setup_isocpipes(in->urb, dev,
usb_rcvisocpipe(dev, in->ep),
in->num_packets, in->packet_size,
in->num_packets * in->packet_size,
usb_in_complete, in);
if (retval)
goto err_free;
return 0;
err_free:
kfree(in->rcvbuf);
err:
return retval;
}
void st5481_release_in(struct st5481_in *in)
{
DBG(2, "");
st5481_release_isocpipes(in->urb);
}
/*
* Make the transfer_buffer contiguous by
* copying from the iso descriptors if necessary.
*/
static int st5481_isoc_flatten(struct urb *urb)
{
struct usb_iso_packet_descriptor *pipd, *pend;
unsigned char *src, *dst;
unsigned int len;
if (urb->status < 0) {
return urb->status;
}
for (pipd = &urb->iso_frame_desc[0],
pend = &urb->iso_frame_desc[urb->number_of_packets],
dst = urb->transfer_buffer;
pipd < pend;
pipd++) {
if (pipd->status < 0) {
return (pipd->status);
}
len = pipd->actual_length;
pipd->actual_length = 0;
src = urb->transfer_buffer + pipd->offset;
if (src != dst) {
// Need to copy since isoc buffers not full
while (len--) {
*dst++ = *src++;
}
} else {
// No need to copy, just update destination buffer
dst += len;
}
}
// Return size of flattened buffer
return (dst - (unsigned char *)urb->transfer_buffer);
}
static void st5481_start_rcv(void *context)
{
struct st5481_in *in = context;
struct st5481_adapter *adapter = in->adapter;
DBG(4, "");
in->urb[0]->dev = adapter->usb_dev;
SUBMIT_URB(in->urb[0], GFP_KERNEL);
in->urb[1]->dev = adapter->usb_dev;
SUBMIT_URB(in->urb[1], GFP_KERNEL);
}
void st5481_in_mode(struct st5481_in *in, int mode)
{
if (in->mode == mode)
return;
in->mode = mode;
usb_unlink_urb(in->urb[0]);
usb_unlink_urb(in->urb[1]);
if (in->mode != L1_MODE_NULL) {
if (in->mode != L1_MODE_TRANS) {
u32 features = HDLC_BITREVERSE;
if (in->mode == L1_MODE_HDLC_56K)
features |= HDLC_56KBIT;
isdnhdlc_rcv_init(&in->hdlc_state, features);
}
st5481_usb_pipe_reset(in->adapter, in->ep, NULL, NULL);
st5481_usb_device_ctrl_msg(in->adapter, in->counter,
in->packet_size,
NULL, NULL);
st5481_start_rcv(in);
} else {
st5481_usb_device_ctrl_msg(in->adapter, in->counter,
0, NULL, NULL);
}
}
| gpl-2.0 |
HackerOO7/android_kernel_huawei_u8951 | drivers/misc/sgi-xp/xpnet.c | 7911 | 17928 | /*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1999-2009 Silicon Graphics, Inc. All rights reserved.
*/
/*
* Cross Partition Network Interface (XPNET) support
*
* XPNET provides a virtual network layered on top of the Cross
* Partition communication layer.
*
* XPNET provides direct point-to-point and broadcast-like support
* for an ethernet-like device. The ethernet broadcast medium is
* replaced with a point-to-point message structure which passes
* pointers to a DMA-capable block that a remote partition should
* retrieve and pass to the upper level networking layer.
*
*/
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include "xp.h"
/*
* The message payload transferred by XPC.
*
* buf_pa is the physical address where the DMA should pull from.
*
* NOTE: for performance reasons, buf_pa should _ALWAYS_ begin on a
* cacheline boundary. To accomplish this, we record the number of
* bytes from the beginning of the first cacheline to the first useful
* byte of the skb (leadin_ignore) and the number of bytes from the
* last useful byte of the skb to the end of the last cacheline
* (tailout_ignore).
*
* size is the number of bytes to transfer which includes the skb->len
* (useful bytes of the senders skb) plus the leadin and tailout
*/
struct xpnet_message {
u16 version; /* Version for this message */
u16 embedded_bytes; /* #of bytes embedded in XPC message */
u32 magic; /* Special number indicating this is xpnet */
unsigned long buf_pa; /* phys address of buffer to retrieve */
u32 size; /* #of bytes in buffer */
u8 leadin_ignore; /* #of bytes to ignore at the beginning */
u8 tailout_ignore; /* #of bytes to ignore at the end */
unsigned char data; /* body of small packets */
};
/*
* Determine the size of our message, the cacheline aligned size,
* and then the number of message will request from XPC.
*
* XPC expects each message to exist in an individual cacheline.
*/
#define XPNET_MSG_SIZE XPC_MSG_PAYLOAD_MAX_SIZE
#define XPNET_MSG_DATA_MAX \
(XPNET_MSG_SIZE - offsetof(struct xpnet_message, data))
#define XPNET_MSG_NENTRIES (PAGE_SIZE / XPC_MSG_MAX_SIZE)
#define XPNET_MAX_KTHREADS (XPNET_MSG_NENTRIES + 1)
#define XPNET_MAX_IDLE_KTHREADS (XPNET_MSG_NENTRIES + 1)
/*
* Version number of XPNET implementation. XPNET can always talk to versions
* with same major #, and never talk to versions with a different version.
*/
#define _XPNET_VERSION(_major, _minor) (((_major) << 4) | (_minor))
#define XPNET_VERSION_MAJOR(_v) ((_v) >> 4)
#define XPNET_VERSION_MINOR(_v) ((_v) & 0xf)
#define XPNET_VERSION _XPNET_VERSION(1, 0) /* version 1.0 */
#define XPNET_VERSION_EMBED _XPNET_VERSION(1, 1) /* version 1.1 */
#define XPNET_MAGIC 0x88786984 /* "XNET" */
#define XPNET_VALID_MSG(_m) \
((XPNET_VERSION_MAJOR(_m->version) == XPNET_VERSION_MAJOR(XPNET_VERSION)) \
&& (msg->magic == XPNET_MAGIC))
#define XPNET_DEVICE_NAME "xp0"
/*
* When messages are queued with xpc_send_notify, a kmalloc'd buffer
* of the following type is passed as a notification cookie. When the
* notification function is called, we use the cookie to decide
* whether all outstanding message sends have completed. The skb can
* then be released.
*/
struct xpnet_pending_msg {
struct sk_buff *skb;
atomic_t use_count;
};
struct net_device *xpnet_device;
/*
* When we are notified of other partitions activating, we add them to
* our bitmask of partitions to which we broadcast.
*/
static unsigned long *xpnet_broadcast_partitions;
/* protect above */
static DEFINE_SPINLOCK(xpnet_broadcast_lock);
/*
* Since the Block Transfer Engine (BTE) is being used for the transfer
* and it relies upon cache-line size transfers, we need to reserve at
* least one cache-line for head and tail alignment. The BTE is
* limited to 8MB transfers.
*
* Testing has shown that changing MTU to greater than 64KB has no effect
* on TCP as the two sides negotiate a Max Segment Size that is limited
* to 64K. Other protocols May use packets greater than this, but for
* now, the default is 64KB.
*/
#define XPNET_MAX_MTU (0x800000UL - L1_CACHE_BYTES)
/* 32KB has been determined to be the ideal */
#define XPNET_DEF_MTU (0x8000UL)
/*
* The partid is encapsulated in the MAC address beginning in the following
* octet and it consists of two octets.
*/
#define XPNET_PARTID_OCTET 2
/* Define the XPNET debug device structures to be used with dev_dbg() et al */
struct device_driver xpnet_dbg_name = {
.name = "xpnet"
};
struct device xpnet_dbg_subname = {
.init_name = "", /* set to "" */
.driver = &xpnet_dbg_name
};
struct device *xpnet = &xpnet_dbg_subname;
/*
* Packet was recevied by XPC and forwarded to us.
*/
static void
xpnet_receive(short partid, int channel, struct xpnet_message *msg)
{
struct sk_buff *skb;
void *dst;
enum xp_retval ret;
if (!XPNET_VALID_MSG(msg)) {
/*
* Packet with a different XPC version. Ignore.
*/
xpc_received(partid, channel, (void *)msg);
xpnet_device->stats.rx_errors++;
return;
}
dev_dbg(xpnet, "received 0x%lx, %d, %d, %d\n", msg->buf_pa, msg->size,
msg->leadin_ignore, msg->tailout_ignore);
/* reserve an extra cache line */
skb = dev_alloc_skb(msg->size + L1_CACHE_BYTES);
if (!skb) {
dev_err(xpnet, "failed on dev_alloc_skb(%d)\n",
msg->size + L1_CACHE_BYTES);
xpc_received(partid, channel, (void *)msg);
xpnet_device->stats.rx_errors++;
return;
}
/*
* The allocated skb has some reserved space.
* In order to use xp_remote_memcpy(), we need to get the
* skb->data pointer moved forward.
*/
skb_reserve(skb, (L1_CACHE_BYTES - ((u64)skb->data &
(L1_CACHE_BYTES - 1)) +
msg->leadin_ignore));
/*
* Update the tail pointer to indicate data actually
* transferred.
*/
skb_put(skb, (msg->size - msg->leadin_ignore - msg->tailout_ignore));
/*
* Move the data over from the other side.
*/
if ((XPNET_VERSION_MINOR(msg->version) == 1) &&
(msg->embedded_bytes != 0)) {
dev_dbg(xpnet, "copying embedded message. memcpy(0x%p, 0x%p, "
"%lu)\n", skb->data, &msg->data,
(size_t)msg->embedded_bytes);
skb_copy_to_linear_data(skb, &msg->data,
(size_t)msg->embedded_bytes);
} else {
dst = (void *)((u64)skb->data & ~(L1_CACHE_BYTES - 1));
dev_dbg(xpnet, "transferring buffer to the skb->data area;\n\t"
"xp_remote_memcpy(0x%p, 0x%p, %hu)\n", dst,
(void *)msg->buf_pa, msg->size);
ret = xp_remote_memcpy(xp_pa(dst), msg->buf_pa, msg->size);
if (ret != xpSuccess) {
/*
* !!! Need better way of cleaning skb. Currently skb
* !!! appears in_use and we can't just call
* !!! dev_kfree_skb.
*/
dev_err(xpnet, "xp_remote_memcpy(0x%p, 0x%p, 0x%hx) "
"returned error=0x%x\n", dst,
(void *)msg->buf_pa, msg->size, ret);
xpc_received(partid, channel, (void *)msg);
xpnet_device->stats.rx_errors++;
return;
}
}
dev_dbg(xpnet, "<skb->head=0x%p skb->data=0x%p skb->tail=0x%p "
"skb->end=0x%p skb->len=%d\n", (void *)skb->head,
(void *)skb->data, skb_tail_pointer(skb), skb_end_pointer(skb),
skb->len);
skb->protocol = eth_type_trans(skb, xpnet_device);
skb->ip_summed = CHECKSUM_UNNECESSARY;
dev_dbg(xpnet, "passing skb to network layer\n"
"\tskb->head=0x%p skb->data=0x%p skb->tail=0x%p "
"skb->end=0x%p skb->len=%d\n",
(void *)skb->head, (void *)skb->data, skb_tail_pointer(skb),
skb_end_pointer(skb), skb->len);
xpnet_device->stats.rx_packets++;
xpnet_device->stats.rx_bytes += skb->len + ETH_HLEN;
netif_rx_ni(skb);
xpc_received(partid, channel, (void *)msg);
}
/*
* This is the handler which XPC calls during any sort of change in
* state or message reception on a connection.
*/
static void
xpnet_connection_activity(enum xp_retval reason, short partid, int channel,
void *data, void *key)
{
DBUG_ON(partid < 0 || partid >= xp_max_npartitions);
DBUG_ON(channel != XPC_NET_CHANNEL);
switch (reason) {
case xpMsgReceived: /* message received */
DBUG_ON(data == NULL);
xpnet_receive(partid, channel, (struct xpnet_message *)data);
break;
case xpConnected: /* connection completed to a partition */
spin_lock_bh(&xpnet_broadcast_lock);
__set_bit(partid, xpnet_broadcast_partitions);
spin_unlock_bh(&xpnet_broadcast_lock);
netif_carrier_on(xpnet_device);
dev_dbg(xpnet, "%s connected to partition %d\n",
xpnet_device->name, partid);
break;
default:
spin_lock_bh(&xpnet_broadcast_lock);
__clear_bit(partid, xpnet_broadcast_partitions);
spin_unlock_bh(&xpnet_broadcast_lock);
if (bitmap_empty((unsigned long *)xpnet_broadcast_partitions,
xp_max_npartitions)) {
netif_carrier_off(xpnet_device);
}
dev_dbg(xpnet, "%s disconnected from partition %d\n",
xpnet_device->name, partid);
break;
}
}
static int
xpnet_dev_open(struct net_device *dev)
{
enum xp_retval ret;
dev_dbg(xpnet, "calling xpc_connect(%d, 0x%p, NULL, %ld, %ld, %ld, "
"%ld)\n", XPC_NET_CHANNEL, xpnet_connection_activity,
(unsigned long)XPNET_MSG_SIZE,
(unsigned long)XPNET_MSG_NENTRIES,
(unsigned long)XPNET_MAX_KTHREADS,
(unsigned long)XPNET_MAX_IDLE_KTHREADS);
ret = xpc_connect(XPC_NET_CHANNEL, xpnet_connection_activity, NULL,
XPNET_MSG_SIZE, XPNET_MSG_NENTRIES,
XPNET_MAX_KTHREADS, XPNET_MAX_IDLE_KTHREADS);
if (ret != xpSuccess) {
dev_err(xpnet, "ifconfig up of %s failed on XPC connect, "
"ret=%d\n", dev->name, ret);
return -ENOMEM;
}
dev_dbg(xpnet, "ifconfig up of %s; XPC connected\n", dev->name);
return 0;
}
static int
xpnet_dev_stop(struct net_device *dev)
{
xpc_disconnect(XPC_NET_CHANNEL);
dev_dbg(xpnet, "ifconfig down of %s; XPC disconnected\n", dev->name);
return 0;
}
static int
xpnet_dev_change_mtu(struct net_device *dev, int new_mtu)
{
/* 68 comes from min TCP+IP+MAC header */
if ((new_mtu < 68) || (new_mtu > XPNET_MAX_MTU)) {
dev_err(xpnet, "ifconfig %s mtu %d failed; value must be "
"between 68 and %ld\n", dev->name, new_mtu,
XPNET_MAX_MTU);
return -EINVAL;
}
dev->mtu = new_mtu;
dev_dbg(xpnet, "ifconfig %s mtu set to %d\n", dev->name, new_mtu);
return 0;
}
/*
* Notification that the other end has received the message and
* DMA'd the skb information. At this point, they are done with
* our side. When all recipients are done processing, we
* release the skb and then release our pending message structure.
*/
static void
xpnet_send_completed(enum xp_retval reason, short partid, int channel,
void *__qm)
{
struct xpnet_pending_msg *queued_msg = (struct xpnet_pending_msg *)__qm;
DBUG_ON(queued_msg == NULL);
dev_dbg(xpnet, "message to %d notified with reason %d\n",
partid, reason);
if (atomic_dec_return(&queued_msg->use_count) == 0) {
dev_dbg(xpnet, "all acks for skb->head=-x%p\n",
(void *)queued_msg->skb->head);
dev_kfree_skb_any(queued_msg->skb);
kfree(queued_msg);
}
}
static void
xpnet_send(struct sk_buff *skb, struct xpnet_pending_msg *queued_msg,
u64 start_addr, u64 end_addr, u16 embedded_bytes, int dest_partid)
{
u8 msg_buffer[XPNET_MSG_SIZE];
struct xpnet_message *msg = (struct xpnet_message *)&msg_buffer;
u16 msg_size = sizeof(struct xpnet_message);
enum xp_retval ret;
msg->embedded_bytes = embedded_bytes;
if (unlikely(embedded_bytes != 0)) {
msg->version = XPNET_VERSION_EMBED;
dev_dbg(xpnet, "calling memcpy(0x%p, 0x%p, 0x%lx)\n",
&msg->data, skb->data, (size_t)embedded_bytes);
skb_copy_from_linear_data(skb, &msg->data,
(size_t)embedded_bytes);
msg_size += embedded_bytes - 1;
} else {
msg->version = XPNET_VERSION;
}
msg->magic = XPNET_MAGIC;
msg->size = end_addr - start_addr;
msg->leadin_ignore = (u64)skb->data - start_addr;
msg->tailout_ignore = end_addr - (u64)skb_tail_pointer(skb);
msg->buf_pa = xp_pa((void *)start_addr);
dev_dbg(xpnet, "sending XPC message to %d:%d\n"
"msg->buf_pa=0x%lx, msg->size=%u, "
"msg->leadin_ignore=%u, msg->tailout_ignore=%u\n",
dest_partid, XPC_NET_CHANNEL, msg->buf_pa, msg->size,
msg->leadin_ignore, msg->tailout_ignore);
atomic_inc(&queued_msg->use_count);
ret = xpc_send_notify(dest_partid, XPC_NET_CHANNEL, XPC_NOWAIT, msg,
msg_size, xpnet_send_completed, queued_msg);
if (unlikely(ret != xpSuccess))
atomic_dec(&queued_msg->use_count);
}
/*
* Network layer has formatted a packet (skb) and is ready to place it
* "on the wire". Prepare and send an xpnet_message to all partitions
* which have connected with us and are targets of this packet.
*
* MAC-NOTE: For the XPNET driver, the MAC address contains the
* destination partid. If the destination partid octets are 0xffff,
* this packet is to be broadcast to all connected partitions.
*/
static int
xpnet_dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct xpnet_pending_msg *queued_msg;
u64 start_addr, end_addr;
short dest_partid;
u16 embedded_bytes = 0;
dev_dbg(xpnet, ">skb->head=0x%p skb->data=0x%p skb->tail=0x%p "
"skb->end=0x%p skb->len=%d\n", (void *)skb->head,
(void *)skb->data, skb_tail_pointer(skb), skb_end_pointer(skb),
skb->len);
if (skb->data[0] == 0x33) {
dev_kfree_skb(skb);
return NETDEV_TX_OK; /* nothing needed to be done */
}
/*
* The xpnet_pending_msg tracks how many outstanding
* xpc_send_notifies are relying on this skb. When none
* remain, release the skb.
*/
queued_msg = kmalloc(sizeof(struct xpnet_pending_msg), GFP_ATOMIC);
if (queued_msg == NULL) {
dev_warn(xpnet, "failed to kmalloc %ld bytes; dropping "
"packet\n", sizeof(struct xpnet_pending_msg));
dev->stats.tx_errors++;
dev_kfree_skb(skb);
return NETDEV_TX_OK;
}
/* get the beginning of the first cacheline and end of last */
start_addr = ((u64)skb->data & ~(L1_CACHE_BYTES - 1));
end_addr = L1_CACHE_ALIGN((u64)skb_tail_pointer(skb));
/* calculate how many bytes to embed in the XPC message */
if (unlikely(skb->len <= XPNET_MSG_DATA_MAX)) {
/* skb->data does fit so embed */
embedded_bytes = skb->len;
}
/*
* Since the send occurs asynchronously, we set the count to one
* and begin sending. Any sends that happen to complete before
* we are done sending will not free the skb. We will be left
* with that task during exit. This also handles the case of
* a packet destined for a partition which is no longer up.
*/
atomic_set(&queued_msg->use_count, 1);
queued_msg->skb = skb;
if (skb->data[0] == 0xff) {
/* we are being asked to broadcast to all partitions */
for_each_set_bit(dest_partid, xpnet_broadcast_partitions,
xp_max_npartitions) {
xpnet_send(skb, queued_msg, start_addr, end_addr,
embedded_bytes, dest_partid);
}
} else {
dest_partid = (short)skb->data[XPNET_PARTID_OCTET + 1];
dest_partid |= (short)skb->data[XPNET_PARTID_OCTET + 0] << 8;
if (dest_partid >= 0 &&
dest_partid < xp_max_npartitions &&
test_bit(dest_partid, xpnet_broadcast_partitions) != 0) {
xpnet_send(skb, queued_msg, start_addr, end_addr,
embedded_bytes, dest_partid);
}
}
dev->stats.tx_packets++;
dev->stats.tx_bytes += skb->len;
if (atomic_dec_return(&queued_msg->use_count) == 0) {
dev_kfree_skb(skb);
kfree(queued_msg);
}
return NETDEV_TX_OK;
}
/*
* Deal with transmit timeouts coming from the network layer.
*/
static void
xpnet_dev_tx_timeout(struct net_device *dev)
{
dev->stats.tx_errors++;
}
static const struct net_device_ops xpnet_netdev_ops = {
.ndo_open = xpnet_dev_open,
.ndo_stop = xpnet_dev_stop,
.ndo_start_xmit = xpnet_dev_hard_start_xmit,
.ndo_change_mtu = xpnet_dev_change_mtu,
.ndo_tx_timeout = xpnet_dev_tx_timeout,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
};
static int __init
xpnet_init(void)
{
int result;
if (!is_shub() && !is_uv())
return -ENODEV;
dev_info(xpnet, "registering network device %s\n", XPNET_DEVICE_NAME);
xpnet_broadcast_partitions = kzalloc(BITS_TO_LONGS(xp_max_npartitions) *
sizeof(long), GFP_KERNEL);
if (xpnet_broadcast_partitions == NULL)
return -ENOMEM;
/*
* use ether_setup() to init the majority of our device
* structure and then override the necessary pieces.
*/
xpnet_device = alloc_netdev(0, XPNET_DEVICE_NAME, ether_setup);
if (xpnet_device == NULL) {
kfree(xpnet_broadcast_partitions);
return -ENOMEM;
}
netif_carrier_off(xpnet_device);
xpnet_device->netdev_ops = &xpnet_netdev_ops;
xpnet_device->mtu = XPNET_DEF_MTU;
/*
* Multicast assumes the LSB of the first octet is set for multicast
* MAC addresses. We chose the first octet of the MAC to be unlikely
* to collide with any vendor's officially issued MAC.
*/
xpnet_device->dev_addr[0] = 0x02; /* locally administered, no OUI */
xpnet_device->dev_addr[XPNET_PARTID_OCTET + 1] = xp_partition_id;
xpnet_device->dev_addr[XPNET_PARTID_OCTET + 0] = (xp_partition_id >> 8);
/*
* ether_setup() sets this to a multicast device. We are
* really not supporting multicast at this time.
*/
xpnet_device->flags &= ~IFF_MULTICAST;
/*
* No need to checksum as it is a DMA transfer. The BTE will
* report an error if the data is not retrievable and the
* packet will be dropped.
*/
xpnet_device->features = NETIF_F_HW_CSUM;
result = register_netdev(xpnet_device);
if (result != 0) {
free_netdev(xpnet_device);
kfree(xpnet_broadcast_partitions);
}
return result;
}
module_init(xpnet_init);
static void __exit
xpnet_exit(void)
{
dev_info(xpnet, "unregistering network device %s\n",
xpnet_device[0].name);
unregister_netdev(xpnet_device);
free_netdev(xpnet_device);
kfree(xpnet_broadcast_partitions);
}
module_exit(xpnet_exit);
MODULE_AUTHOR("Silicon Graphics, Inc.");
MODULE_DESCRIPTION("Cross Partition Network adapter (XPNET)");
MODULE_LICENSE("GPL");
| gpl-2.0 |
FrancescoCG/CrazySuperKernel-CM13-KLTE | arch/sparc/kernel/sigutil_64.c | 8679 | 2573 | #include <linux/kernel.h>
#include <linux/types.h>
#include <linux/thread_info.h>
#include <linux/uaccess.h>
#include <linux/errno.h>
#include <asm/sigcontext.h>
#include <asm/fpumacro.h>
#include <asm/ptrace.h>
#include <asm/switch_to.h>
#include "sigutil.h"
int save_fpu_state(struct pt_regs *regs, __siginfo_fpu_t __user *fpu)
{
unsigned long *fpregs = current_thread_info()->fpregs;
unsigned long fprs;
int err = 0;
fprs = current_thread_info()->fpsaved[0];
if (fprs & FPRS_DL)
err |= copy_to_user(&fpu->si_float_regs[0], fpregs,
(sizeof(unsigned int) * 32));
if (fprs & FPRS_DU)
err |= copy_to_user(&fpu->si_float_regs[32], fpregs+16,
(sizeof(unsigned int) * 32));
err |= __put_user(current_thread_info()->xfsr[0], &fpu->si_fsr);
err |= __put_user(current_thread_info()->gsr[0], &fpu->si_gsr);
err |= __put_user(fprs, &fpu->si_fprs);
return err;
}
int restore_fpu_state(struct pt_regs *regs, __siginfo_fpu_t __user *fpu)
{
unsigned long *fpregs = current_thread_info()->fpregs;
unsigned long fprs;
int err;
err = __get_user(fprs, &fpu->si_fprs);
fprs_write(0);
regs->tstate &= ~TSTATE_PEF;
if (fprs & FPRS_DL)
err |= copy_from_user(fpregs, &fpu->si_float_regs[0],
(sizeof(unsigned int) * 32));
if (fprs & FPRS_DU)
err |= copy_from_user(fpregs+16, &fpu->si_float_regs[32],
(sizeof(unsigned int) * 32));
err |= __get_user(current_thread_info()->xfsr[0], &fpu->si_fsr);
err |= __get_user(current_thread_info()->gsr[0], &fpu->si_gsr);
current_thread_info()->fpsaved[0] |= fprs;
return err;
}
int save_rwin_state(int wsaved, __siginfo_rwin_t __user *rwin)
{
int i, err = __put_user(wsaved, &rwin->wsaved);
for (i = 0; i < wsaved; i++) {
struct reg_window *rp = ¤t_thread_info()->reg_window[i];
unsigned long fp = current_thread_info()->rwbuf_stkptrs[i];
err |= copy_to_user(&rwin->reg_window[i], rp,
sizeof(struct reg_window));
err |= __put_user(fp, &rwin->rwbuf_stkptrs[i]);
}
return err;
}
int restore_rwin_state(__siginfo_rwin_t __user *rp)
{
struct thread_info *t = current_thread_info();
int i, wsaved, err;
__get_user(wsaved, &rp->wsaved);
if (wsaved > NSWINS)
return -EFAULT;
err = 0;
for (i = 0; i < wsaved; i++) {
err |= copy_from_user(&t->reg_window[i],
&rp->reg_window[i],
sizeof(struct reg_window));
err |= __get_user(t->rwbuf_stkptrs[i],
&rp->rwbuf_stkptrs[i]);
}
if (err)
return err;
set_thread_wsaved(wsaved);
synchronize_user_stack();
if (get_thread_wsaved())
return -EFAULT;
return 0;
}
| gpl-2.0 |
cameron581/kernel_msm | drivers/net/wireless/hostap/hostap_pci.c | 8935 | 11237 | #define PRISM2_PCI
/* Host AP driver's support for Intersil Prism2.5 PCI cards is based on
* driver patches from Reyk Floeter <reyk@vantronix.net> and
* Andy Warner <andyw@pobox.com> */
#include <linux/module.h>
#include <linux/init.h>
#include <linux/if.h>
#include <linux/skbuff.h>
#include <linux/netdevice.h>
#include <linux/slab.h>
#include <linux/workqueue.h>
#include <linux/wireless.h>
#include <net/iw_handler.h>
#include <linux/ioport.h>
#include <linux/pci.h>
#include <asm/io.h>
#include "hostap_wlan.h"
static char *dev_info = "hostap_pci";
MODULE_AUTHOR("Jouni Malinen");
MODULE_DESCRIPTION("Support for Intersil Prism2.5-based 802.11 wireless LAN "
"PCI cards.");
MODULE_SUPPORTED_DEVICE("Intersil Prism2.5-based WLAN PCI cards");
MODULE_LICENSE("GPL");
/* struct local_info::hw_priv */
struct hostap_pci_priv {
void __iomem *mem_start;
};
/* FIX: do we need mb/wmb/rmb with memory operations? */
static DEFINE_PCI_DEVICE_TABLE(prism2_pci_id_table) = {
/* Intersil Prism3 ISL3872 11Mb/s WLAN Controller */
{ 0x1260, 0x3872, PCI_ANY_ID, PCI_ANY_ID },
/* Intersil Prism2.5 ISL3874 11Mb/s WLAN Controller */
{ 0x1260, 0x3873, PCI_ANY_ID, PCI_ANY_ID },
/* Samsung MagicLAN SWL-2210P */
{ 0x167d, 0xa000, PCI_ANY_ID, PCI_ANY_ID },
{ 0 }
};
#ifdef PRISM2_IO_DEBUG
static inline void hfa384x_outb_debug(struct net_device *dev, int a, u8 v)
{
struct hostap_interface *iface;
struct hostap_pci_priv *hw_priv;
local_info_t *local;
unsigned long flags;
iface = netdev_priv(dev);
local = iface->local;
hw_priv = local->hw_priv;
spin_lock_irqsave(&local->lock, flags);
prism2_io_debug_add(dev, PRISM2_IO_DEBUG_CMD_OUTB, a, v);
writeb(v, hw_priv->mem_start + a);
spin_unlock_irqrestore(&local->lock, flags);
}
static inline u8 hfa384x_inb_debug(struct net_device *dev, int a)
{
struct hostap_interface *iface;
struct hostap_pci_priv *hw_priv;
local_info_t *local;
unsigned long flags;
u8 v;
iface = netdev_priv(dev);
local = iface->local;
hw_priv = local->hw_priv;
spin_lock_irqsave(&local->lock, flags);
v = readb(hw_priv->mem_start + a);
prism2_io_debug_add(dev, PRISM2_IO_DEBUG_CMD_INB, a, v);
spin_unlock_irqrestore(&local->lock, flags);
return v;
}
static inline void hfa384x_outw_debug(struct net_device *dev, int a, u16 v)
{
struct hostap_interface *iface;
struct hostap_pci_priv *hw_priv;
local_info_t *local;
unsigned long flags;
iface = netdev_priv(dev);
local = iface->local;
hw_priv = local->hw_priv;
spin_lock_irqsave(&local->lock, flags);
prism2_io_debug_add(dev, PRISM2_IO_DEBUG_CMD_OUTW, a, v);
writew(v, hw_priv->mem_start + a);
spin_unlock_irqrestore(&local->lock, flags);
}
static inline u16 hfa384x_inw_debug(struct net_device *dev, int a)
{
struct hostap_interface *iface;
struct hostap_pci_priv *hw_priv;
local_info_t *local;
unsigned long flags;
u16 v;
iface = netdev_priv(dev);
local = iface->local;
hw_priv = local->hw_priv;
spin_lock_irqsave(&local->lock, flags);
v = readw(hw_priv->mem_start + a);
prism2_io_debug_add(dev, PRISM2_IO_DEBUG_CMD_INW, a, v);
spin_unlock_irqrestore(&local->lock, flags);
return v;
}
#define HFA384X_OUTB(v,a) hfa384x_outb_debug(dev, (a), (v))
#define HFA384X_INB(a) hfa384x_inb_debug(dev, (a))
#define HFA384X_OUTW(v,a) hfa384x_outw_debug(dev, (a), (v))
#define HFA384X_INW(a) hfa384x_inw_debug(dev, (a))
#define HFA384X_OUTW_DATA(v,a) hfa384x_outw_debug(dev, (a), le16_to_cpu((v)))
#define HFA384X_INW_DATA(a) cpu_to_le16(hfa384x_inw_debug(dev, (a)))
#else /* PRISM2_IO_DEBUG */
static inline void hfa384x_outb(struct net_device *dev, int a, u8 v)
{
struct hostap_interface *iface;
struct hostap_pci_priv *hw_priv;
iface = netdev_priv(dev);
hw_priv = iface->local->hw_priv;
writeb(v, hw_priv->mem_start + a);
}
static inline u8 hfa384x_inb(struct net_device *dev, int a)
{
struct hostap_interface *iface;
struct hostap_pci_priv *hw_priv;
iface = netdev_priv(dev);
hw_priv = iface->local->hw_priv;
return readb(hw_priv->mem_start + a);
}
static inline void hfa384x_outw(struct net_device *dev, int a, u16 v)
{
struct hostap_interface *iface;
struct hostap_pci_priv *hw_priv;
iface = netdev_priv(dev);
hw_priv = iface->local->hw_priv;
writew(v, hw_priv->mem_start + a);
}
static inline u16 hfa384x_inw(struct net_device *dev, int a)
{
struct hostap_interface *iface;
struct hostap_pci_priv *hw_priv;
iface = netdev_priv(dev);
hw_priv = iface->local->hw_priv;
return readw(hw_priv->mem_start + a);
}
#define HFA384X_OUTB(v,a) hfa384x_outb(dev, (a), (v))
#define HFA384X_INB(a) hfa384x_inb(dev, (a))
#define HFA384X_OUTW(v,a) hfa384x_outw(dev, (a), (v))
#define HFA384X_INW(a) hfa384x_inw(dev, (a))
#define HFA384X_OUTW_DATA(v,a) hfa384x_outw(dev, (a), le16_to_cpu((v)))
#define HFA384X_INW_DATA(a) cpu_to_le16(hfa384x_inw(dev, (a)))
#endif /* PRISM2_IO_DEBUG */
static int hfa384x_from_bap(struct net_device *dev, u16 bap, void *buf,
int len)
{
u16 d_off;
__le16 *pos;
d_off = (bap == 1) ? HFA384X_DATA1_OFF : HFA384X_DATA0_OFF;
pos = (__le16 *) buf;
for ( ; len > 1; len -= 2)
*pos++ = HFA384X_INW_DATA(d_off);
if (len & 1)
*((char *) pos) = HFA384X_INB(d_off);
return 0;
}
static int hfa384x_to_bap(struct net_device *dev, u16 bap, void *buf, int len)
{
u16 d_off;
__le16 *pos;
d_off = (bap == 1) ? HFA384X_DATA1_OFF : HFA384X_DATA0_OFF;
pos = (__le16 *) buf;
for ( ; len > 1; len -= 2)
HFA384X_OUTW_DATA(*pos++, d_off);
if (len & 1)
HFA384X_OUTB(*((char *) pos), d_off);
return 0;
}
/* FIX: This might change at some point.. */
#include "hostap_hw.c"
static void prism2_pci_cor_sreset(local_info_t *local)
{
struct net_device *dev = local->dev;
u16 reg;
reg = HFA384X_INB(HFA384X_PCICOR_OFF);
printk(KERN_DEBUG "%s: Original COR value: 0x%0x\n", dev->name, reg);
/* linux-wlan-ng uses extremely long hold and settle times for
* COR sreset. A comment in the driver code mentions that the long
* delays appear to be necessary. However, at least IBM 22P6901 seems
* to work fine with shorter delays.
*
* Longer delays can be configured by uncommenting following line: */
/* #define PRISM2_PCI_USE_LONG_DELAYS */
#ifdef PRISM2_PCI_USE_LONG_DELAYS
int i;
HFA384X_OUTW(reg | 0x0080, HFA384X_PCICOR_OFF);
mdelay(250);
HFA384X_OUTW(reg & ~0x0080, HFA384X_PCICOR_OFF);
mdelay(500);
/* Wait for f/w to complete initialization (CMD:BUSY == 0) */
i = 2000000 / 10;
while ((HFA384X_INW(HFA384X_CMD_OFF) & HFA384X_CMD_BUSY) && --i)
udelay(10);
#else /* PRISM2_PCI_USE_LONG_DELAYS */
HFA384X_OUTW(reg | 0x0080, HFA384X_PCICOR_OFF);
mdelay(2);
HFA384X_OUTW(reg & ~0x0080, HFA384X_PCICOR_OFF);
mdelay(2);
#endif /* PRISM2_PCI_USE_LONG_DELAYS */
if (HFA384X_INW(HFA384X_CMD_OFF) & HFA384X_CMD_BUSY) {
printk(KERN_DEBUG "%s: COR sreset timeout\n", dev->name);
}
}
static void prism2_pci_genesis_reset(local_info_t *local, int hcr)
{
struct net_device *dev = local->dev;
HFA384X_OUTW(0x00C5, HFA384X_PCICOR_OFF);
mdelay(10);
HFA384X_OUTW(hcr, HFA384X_PCIHCR_OFF);
mdelay(10);
HFA384X_OUTW(0x0045, HFA384X_PCICOR_OFF);
mdelay(10);
}
static struct prism2_helper_functions prism2_pci_funcs =
{
.card_present = NULL,
.cor_sreset = prism2_pci_cor_sreset,
.genesis_reset = prism2_pci_genesis_reset,
.hw_type = HOSTAP_HW_PCI,
};
static int prism2_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
{
unsigned long phymem;
void __iomem *mem = NULL;
local_info_t *local = NULL;
struct net_device *dev = NULL;
static int cards_found /* = 0 */;
int irq_registered = 0;
struct hostap_interface *iface;
struct hostap_pci_priv *hw_priv;
hw_priv = kzalloc(sizeof(*hw_priv), GFP_KERNEL);
if (hw_priv == NULL)
return -ENOMEM;
if (pci_enable_device(pdev))
goto err_out_free;
phymem = pci_resource_start(pdev, 0);
if (!request_mem_region(phymem, pci_resource_len(pdev, 0), "Prism2")) {
printk(KERN_ERR "prism2: Cannot reserve PCI memory region\n");
goto err_out_disable;
}
mem = pci_ioremap_bar(pdev, 0);
if (mem == NULL) {
printk(KERN_ERR "prism2: Cannot remap PCI memory region\n") ;
goto fail;
}
dev = prism2_init_local_data(&prism2_pci_funcs, cards_found,
&pdev->dev);
if (dev == NULL)
goto fail;
iface = netdev_priv(dev);
local = iface->local;
local->hw_priv = hw_priv;
cards_found++;
dev->irq = pdev->irq;
hw_priv->mem_start = mem;
dev->base_addr = (unsigned long) mem;
prism2_pci_cor_sreset(local);
pci_set_drvdata(pdev, dev);
if (request_irq(dev->irq, prism2_interrupt, IRQF_SHARED, dev->name,
dev)) {
printk(KERN_WARNING "%s: request_irq failed\n", dev->name);
goto fail;
} else
irq_registered = 1;
if (!local->pri_only && prism2_hw_config(dev, 1)) {
printk(KERN_DEBUG "%s: hardware initialization failed\n",
dev_info);
goto fail;
}
printk(KERN_INFO "%s: Intersil Prism2.5 PCI: "
"mem=0x%lx, irq=%d\n", dev->name, phymem, dev->irq);
return hostap_hw_ready(dev);
fail:
if (irq_registered && dev)
free_irq(dev->irq, dev);
if (mem)
iounmap(mem);
release_mem_region(phymem, pci_resource_len(pdev, 0));
err_out_disable:
pci_disable_device(pdev);
prism2_free_local_data(dev);
err_out_free:
kfree(hw_priv);
return -ENODEV;
}
static void prism2_pci_remove(struct pci_dev *pdev)
{
struct net_device *dev;
struct hostap_interface *iface;
void __iomem *mem_start;
struct hostap_pci_priv *hw_priv;
dev = pci_get_drvdata(pdev);
iface = netdev_priv(dev);
hw_priv = iface->local->hw_priv;
/* Reset the hardware, and ensure interrupts are disabled. */
prism2_pci_cor_sreset(iface->local);
hfa384x_disable_interrupts(dev);
if (dev->irq)
free_irq(dev->irq, dev);
mem_start = hw_priv->mem_start;
prism2_free_local_data(dev);
kfree(hw_priv);
iounmap(mem_start);
release_mem_region(pci_resource_start(pdev, 0),
pci_resource_len(pdev, 0));
pci_disable_device(pdev);
}
#ifdef CONFIG_PM
static int prism2_pci_suspend(struct pci_dev *pdev, pm_message_t state)
{
struct net_device *dev = pci_get_drvdata(pdev);
if (netif_running(dev)) {
netif_stop_queue(dev);
netif_device_detach(dev);
}
prism2_suspend(dev);
pci_save_state(pdev);
pci_disable_device(pdev);
pci_set_power_state(pdev, PCI_D3hot);
return 0;
}
static int prism2_pci_resume(struct pci_dev *pdev)
{
struct net_device *dev = pci_get_drvdata(pdev);
int err;
err = pci_enable_device(pdev);
if (err) {
printk(KERN_ERR "%s: pci_enable_device failed on resume\n",
dev->name);
return err;
}
pci_restore_state(pdev);
prism2_hw_config(dev, 0);
if (netif_running(dev)) {
netif_device_attach(dev);
netif_start_queue(dev);
}
return 0;
}
#endif /* CONFIG_PM */
MODULE_DEVICE_TABLE(pci, prism2_pci_id_table);
static struct pci_driver prism2_pci_driver = {
.name = "hostap_pci",
.id_table = prism2_pci_id_table,
.probe = prism2_pci_probe,
.remove = prism2_pci_remove,
#ifdef CONFIG_PM
.suspend = prism2_pci_suspend,
.resume = prism2_pci_resume,
#endif /* CONFIG_PM */
};
static int __init init_prism2_pci(void)
{
return pci_register_driver(&prism2_pci_driver);
}
static void __exit exit_prism2_pci(void)
{
pci_unregister_driver(&prism2_pci_driver);
}
module_init(init_prism2_pci);
module_exit(exit_prism2_pci);
| gpl-2.0 |
cahdudul/akh8960_cm | crypto/anubis.c | 9191 | 28481 | /*
* Cryptographic API.
*
* Anubis Algorithm
*
* The Anubis algorithm was developed by Paulo S. L. M. Barreto and
* Vincent Rijmen.
*
* See
*
* P.S.L.M. Barreto, V. Rijmen,
* ``The Anubis block cipher,''
* NESSIE submission, 2000.
*
* This software implements the "tweaked" version of Anubis.
* Only the S-box and (consequently) the rounds constants have been
* changed.
*
* The original authors have disclaimed all copyright interest in this
* code and thus put it in the public domain. The subsequent authors
* have put this under the GNU General Public License.
*
* By Aaron Grothe ajgrothe@yahoo.com, October 28, 2004
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/mm.h>
#include <asm/byteorder.h>
#include <linux/crypto.h>
#include <linux/types.h>
#define ANUBIS_MIN_KEY_SIZE 16
#define ANUBIS_MAX_KEY_SIZE 40
#define ANUBIS_BLOCK_SIZE 16
#define ANUBIS_MAX_N 10
#define ANUBIS_MAX_ROUNDS (8 + ANUBIS_MAX_N)
struct anubis_ctx {
int key_len; // in bits
int R;
u32 E[ANUBIS_MAX_ROUNDS + 1][4];
u32 D[ANUBIS_MAX_ROUNDS + 1][4];
};
static const u32 T0[256] = {
0xba69d2bbU, 0x54a84de5U, 0x2f5ebce2U, 0x74e8cd25U,
0x53a651f7U, 0xd3bb6bd0U, 0xd2b96fd6U, 0x4d9a29b3U,
0x50a05dfdU, 0xac458acfU, 0x8d070e09U, 0xbf63c6a5U,
0x70e0dd3dU, 0x52a455f1U, 0x9a29527bU, 0x4c982db5U,
0xeac98f46U, 0xd5b773c4U, 0x97336655U, 0xd1bf63dcU,
0x3366ccaaU, 0x51a259fbU, 0x5bb671c7U, 0xa651a2f3U,
0xdea15ffeU, 0x48903dadU, 0xa84d9ad7U, 0x992f5e71U,
0xdbab4be0U, 0x3264c8acU, 0xb773e695U, 0xfce5d732U,
0xe3dbab70U, 0x9e214263U, 0x913f7e41U, 0x9b2b567dU,
0xe2d9af76U, 0xbb6bd6bdU, 0x4182199bU, 0x6edca579U,
0xa557aef9U, 0xcb8b0b80U, 0x6bd6b167U, 0x95376e59U,
0xa15fbee1U, 0xf3fbeb10U, 0xb17ffe81U, 0x0204080cU,
0xcc851792U, 0xc49537a2U, 0x1d3a744eU, 0x14285078U,
0xc39b2bb0U, 0x63c69157U, 0xdaa94fe6U, 0x5dba69d3U,
0x5fbe61dfU, 0xdca557f2U, 0x7dfae913U, 0xcd871394U,
0x7ffee11fU, 0x5ab475c1U, 0x6cd8ad75U, 0x5cb86dd5U,
0xf7f3fb08U, 0x264c98d4U, 0xffe3db38U, 0xedc79354U,
0xe8cd874aU, 0x9d274e69U, 0x6fdea17fU, 0x8e010203U,
0x19326456U, 0xa05dbae7U, 0xf0fde71aU, 0x890f1e11U,
0x0f1e3c22U, 0x070e1c12U, 0xaf4386c5U, 0xfbebcb20U,
0x08102030U, 0x152a547eU, 0x0d1a342eU, 0x04081018U,
0x01020406U, 0x64c88d45U, 0xdfa35bf8U, 0x76ecc529U,
0x79f2f90bU, 0xdda753f4U, 0x3d7af48eU, 0x162c5874U,
0x3f7efc82U, 0x376edcb2U, 0x6ddaa973U, 0x3870e090U,
0xb96fdeb1U, 0x73e6d137U, 0xe9cf834cU, 0x356ad4beU,
0x55aa49e3U, 0x71e2d93bU, 0x7bf6f107U, 0x8c050a0fU,
0x72e4d531U, 0x880d1a17U, 0xf6f1ff0eU, 0x2a54a8fcU,
0x3e7cf884U, 0x5ebc65d9U, 0x274e9cd2U, 0x468c0589U,
0x0c183028U, 0x65ca8943U, 0x68d0bd6dU, 0x61c2995bU,
0x03060c0aU, 0xc19f23bcU, 0x57ae41efU, 0xd6b17fceU,
0xd9af43ecU, 0x58b07dcdU, 0xd8ad47eaU, 0x66cc8549U,
0xd7b37bc8U, 0x3a74e89cU, 0xc88d078aU, 0x3c78f088U,
0xfae9cf26U, 0x96316253U, 0xa753a6f5U, 0x982d5a77U,
0xecc59752U, 0xb86ddab7U, 0xc7933ba8U, 0xae4182c3U,
0x69d2b96bU, 0x4b9631a7U, 0xab4b96ddU, 0xa94f9ed1U,
0x67ce814fU, 0x0a14283cU, 0x478e018fU, 0xf2f9ef16U,
0xb577ee99U, 0x224488ccU, 0xe5d7b364U, 0xeec19f5eU,
0xbe61c2a3U, 0x2b56acfaU, 0x811f3e21U, 0x1224486cU,
0x831b362dU, 0x1b366c5aU, 0x0e1c3824U, 0x23468ccaU,
0xf5f7f304U, 0x458a0983U, 0x214284c6U, 0xce811f9eU,
0x499239abU, 0x2c58b0e8U, 0xf9efc32cU, 0xe6d1bf6eU,
0xb671e293U, 0x2850a0f0U, 0x172e5c72U, 0x8219322bU,
0x1a34685cU, 0x8b0b161dU, 0xfee1df3eU, 0x8a09121bU,
0x09122436U, 0xc98f038cU, 0x87132635U, 0x4e9c25b9U,
0xe1dfa37cU, 0x2e5cb8e4U, 0xe4d5b762U, 0xe0dda77aU,
0xebcb8b40U, 0x903d7a47U, 0xa455aaffU, 0x1e3c7844U,
0x85172e39U, 0x60c09d5dU, 0x00000000U, 0x254a94deU,
0xf4f5f702U, 0xf1ffe31cU, 0x94356a5fU, 0x0b162c3aU,
0xe7d3bb68U, 0x75eac923U, 0xefc39b58U, 0x3468d0b8U,
0x3162c4a6U, 0xd4b577c2U, 0xd0bd67daU, 0x86112233U,
0x7efce519U, 0xad478ec9U, 0xfde7d334U, 0x2952a4f6U,
0x3060c0a0U, 0x3b76ec9aU, 0x9f234665U, 0xf8edc72aU,
0xc6913faeU, 0x13264c6aU, 0x060c1814U, 0x050a141eU,
0xc59733a4U, 0x11224466U, 0x77eec12fU, 0x7cf8ed15U,
0x7af4f501U, 0x78f0fd0dU, 0x366cd8b4U, 0x1c387048U,
0x3972e496U, 0x59b279cbU, 0x18306050U, 0x56ac45e9U,
0xb37bf68dU, 0xb07dfa87U, 0x244890d8U, 0x204080c0U,
0xb279f28bU, 0x9239724bU, 0xa35bb6edU, 0xc09d27baU,
0x44880d85U, 0x62c49551U, 0x10204060U, 0xb475ea9fU,
0x84152a3fU, 0x43861197U, 0x933b764dU, 0xc2992fb6U,
0x4a9435a1U, 0xbd67cea9U, 0x8f030605U, 0x2d5ab4eeU,
0xbc65caafU, 0x9c254a6fU, 0x6ad4b561U, 0x40801d9dU,
0xcf831b98U, 0xa259b2ebU, 0x801d3a27U, 0x4f9e21bfU,
0x1f3e7c42U, 0xca890f86U, 0xaa4992dbU, 0x42841591U,
};
static const u32 T1[256] = {
0x69babbd2U, 0xa854e54dU, 0x5e2fe2bcU, 0xe87425cdU,
0xa653f751U, 0xbbd3d06bU, 0xb9d2d66fU, 0x9a4db329U,
0xa050fd5dU, 0x45accf8aU, 0x078d090eU, 0x63bfa5c6U,
0xe0703dddU, 0xa452f155U, 0x299a7b52U, 0x984cb52dU,
0xc9ea468fU, 0xb7d5c473U, 0x33975566U, 0xbfd1dc63U,
0x6633aaccU, 0xa251fb59U, 0xb65bc771U, 0x51a6f3a2U,
0xa1defe5fU, 0x9048ad3dU, 0x4da8d79aU, 0x2f99715eU,
0xabdbe04bU, 0x6432acc8U, 0x73b795e6U, 0xe5fc32d7U,
0xdbe370abU, 0x219e6342U, 0x3f91417eU, 0x2b9b7d56U,
0xd9e276afU, 0x6bbbbdd6U, 0x82419b19U, 0xdc6e79a5U,
0x57a5f9aeU, 0x8bcb800bU, 0xd66b67b1U, 0x3795596eU,
0x5fa1e1beU, 0xfbf310ebU, 0x7fb181feU, 0x04020c08U,
0x85cc9217U, 0x95c4a237U, 0x3a1d4e74U, 0x28147850U,
0x9bc3b02bU, 0xc6635791U, 0xa9dae64fU, 0xba5dd369U,
0xbe5fdf61U, 0xa5dcf257U, 0xfa7d13e9U, 0x87cd9413U,
0xfe7f1fe1U, 0xb45ac175U, 0xd86c75adU, 0xb85cd56dU,
0xf3f708fbU, 0x4c26d498U, 0xe3ff38dbU, 0xc7ed5493U,
0xcde84a87U, 0x279d694eU, 0xde6f7fa1U, 0x018e0302U,
0x32195664U, 0x5da0e7baU, 0xfdf01ae7U, 0x0f89111eU,
0x1e0f223cU, 0x0e07121cU, 0x43afc586U, 0xebfb20cbU,
0x10083020U, 0x2a157e54U, 0x1a0d2e34U, 0x08041810U,
0x02010604U, 0xc864458dU, 0xa3dff85bU, 0xec7629c5U,
0xf2790bf9U, 0xa7ddf453U, 0x7a3d8ef4U, 0x2c167458U,
0x7e3f82fcU, 0x6e37b2dcU, 0xda6d73a9U, 0x703890e0U,
0x6fb9b1deU, 0xe67337d1U, 0xcfe94c83U, 0x6a35bed4U,
0xaa55e349U, 0xe2713bd9U, 0xf67b07f1U, 0x058c0f0aU,
0xe47231d5U, 0x0d88171aU, 0xf1f60effU, 0x542afca8U,
0x7c3e84f8U, 0xbc5ed965U, 0x4e27d29cU, 0x8c468905U,
0x180c2830U, 0xca654389U, 0xd0686dbdU, 0xc2615b99U,
0x06030a0cU, 0x9fc1bc23U, 0xae57ef41U, 0xb1d6ce7fU,
0xafd9ec43U, 0xb058cd7dU, 0xadd8ea47U, 0xcc664985U,
0xb3d7c87bU, 0x743a9ce8U, 0x8dc88a07U, 0x783c88f0U,
0xe9fa26cfU, 0x31965362U, 0x53a7f5a6U, 0x2d98775aU,
0xc5ec5297U, 0x6db8b7daU, 0x93c7a83bU, 0x41aec382U,
0xd2696bb9U, 0x964ba731U, 0x4babdd96U, 0x4fa9d19eU,
0xce674f81U, 0x140a3c28U, 0x8e478f01U, 0xf9f216efU,
0x77b599eeU, 0x4422cc88U, 0xd7e564b3U, 0xc1ee5e9fU,
0x61bea3c2U, 0x562bfaacU, 0x1f81213eU, 0x24126c48U,
0x1b832d36U, 0x361b5a6cU, 0x1c0e2438U, 0x4623ca8cU,
0xf7f504f3U, 0x8a458309U, 0x4221c684U, 0x81ce9e1fU,
0x9249ab39U, 0x582ce8b0U, 0xeff92cc3U, 0xd1e66ebfU,
0x71b693e2U, 0x5028f0a0U, 0x2e17725cU, 0x19822b32U,
0x341a5c68U, 0x0b8b1d16U, 0xe1fe3edfU, 0x098a1b12U,
0x12093624U, 0x8fc98c03U, 0x13873526U, 0x9c4eb925U,
0xdfe17ca3U, 0x5c2ee4b8U, 0xd5e462b7U, 0xdde07aa7U,
0xcbeb408bU, 0x3d90477aU, 0x55a4ffaaU, 0x3c1e4478U,
0x1785392eU, 0xc0605d9dU, 0x00000000U, 0x4a25de94U,
0xf5f402f7U, 0xfff11ce3U, 0x35945f6aU, 0x160b3a2cU,
0xd3e768bbU, 0xea7523c9U, 0xc3ef589bU, 0x6834b8d0U,
0x6231a6c4U, 0xb5d4c277U, 0xbdd0da67U, 0x11863322U,
0xfc7e19e5U, 0x47adc98eU, 0xe7fd34d3U, 0x5229f6a4U,
0x6030a0c0U, 0x763b9aecU, 0x239f6546U, 0xedf82ac7U,
0x91c6ae3fU, 0x26136a4cU, 0x0c061418U, 0x0a051e14U,
0x97c5a433U, 0x22116644U, 0xee772fc1U, 0xf87c15edU,
0xf47a01f5U, 0xf0780dfdU, 0x6c36b4d8U, 0x381c4870U,
0x723996e4U, 0xb259cb79U, 0x30185060U, 0xac56e945U,
0x7bb38df6U, 0x7db087faU, 0x4824d890U, 0x4020c080U,
0x79b28bf2U, 0x39924b72U, 0x5ba3edb6U, 0x9dc0ba27U,
0x8844850dU, 0xc4625195U, 0x20106040U, 0x75b49feaU,
0x15843f2aU, 0x86439711U, 0x3b934d76U, 0x99c2b62fU,
0x944aa135U, 0x67bda9ceU, 0x038f0506U, 0x5a2deeb4U,
0x65bcafcaU, 0x259c6f4aU, 0xd46a61b5U, 0x80409d1dU,
0x83cf981bU, 0x59a2ebb2U, 0x1d80273aU, 0x9e4fbf21U,
0x3e1f427cU, 0x89ca860fU, 0x49aadb92U, 0x84429115U,
};
static const u32 T2[256] = {
0xd2bbba69U, 0x4de554a8U, 0xbce22f5eU, 0xcd2574e8U,
0x51f753a6U, 0x6bd0d3bbU, 0x6fd6d2b9U, 0x29b34d9aU,
0x5dfd50a0U, 0x8acfac45U, 0x0e098d07U, 0xc6a5bf63U,
0xdd3d70e0U, 0x55f152a4U, 0x527b9a29U, 0x2db54c98U,
0x8f46eac9U, 0x73c4d5b7U, 0x66559733U, 0x63dcd1bfU,
0xccaa3366U, 0x59fb51a2U, 0x71c75bb6U, 0xa2f3a651U,
0x5ffedea1U, 0x3dad4890U, 0x9ad7a84dU, 0x5e71992fU,
0x4be0dbabU, 0xc8ac3264U, 0xe695b773U, 0xd732fce5U,
0xab70e3dbU, 0x42639e21U, 0x7e41913fU, 0x567d9b2bU,
0xaf76e2d9U, 0xd6bdbb6bU, 0x199b4182U, 0xa5796edcU,
0xaef9a557U, 0x0b80cb8bU, 0xb1676bd6U, 0x6e599537U,
0xbee1a15fU, 0xeb10f3fbU, 0xfe81b17fU, 0x080c0204U,
0x1792cc85U, 0x37a2c495U, 0x744e1d3aU, 0x50781428U,
0x2bb0c39bU, 0x915763c6U, 0x4fe6daa9U, 0x69d35dbaU,
0x61df5fbeU, 0x57f2dca5U, 0xe9137dfaU, 0x1394cd87U,
0xe11f7ffeU, 0x75c15ab4U, 0xad756cd8U, 0x6dd55cb8U,
0xfb08f7f3U, 0x98d4264cU, 0xdb38ffe3U, 0x9354edc7U,
0x874ae8cdU, 0x4e699d27U, 0xa17f6fdeU, 0x02038e01U,
0x64561932U, 0xbae7a05dU, 0xe71af0fdU, 0x1e11890fU,
0x3c220f1eU, 0x1c12070eU, 0x86c5af43U, 0xcb20fbebU,
0x20300810U, 0x547e152aU, 0x342e0d1aU, 0x10180408U,
0x04060102U, 0x8d4564c8U, 0x5bf8dfa3U, 0xc52976ecU,
0xf90b79f2U, 0x53f4dda7U, 0xf48e3d7aU, 0x5874162cU,
0xfc823f7eU, 0xdcb2376eU, 0xa9736ddaU, 0xe0903870U,
0xdeb1b96fU, 0xd13773e6U, 0x834ce9cfU, 0xd4be356aU,
0x49e355aaU, 0xd93b71e2U, 0xf1077bf6U, 0x0a0f8c05U,
0xd53172e4U, 0x1a17880dU, 0xff0ef6f1U, 0xa8fc2a54U,
0xf8843e7cU, 0x65d95ebcU, 0x9cd2274eU, 0x0589468cU,
0x30280c18U, 0x894365caU, 0xbd6d68d0U, 0x995b61c2U,
0x0c0a0306U, 0x23bcc19fU, 0x41ef57aeU, 0x7fced6b1U,
0x43ecd9afU, 0x7dcd58b0U, 0x47ead8adU, 0x854966ccU,
0x7bc8d7b3U, 0xe89c3a74U, 0x078ac88dU, 0xf0883c78U,
0xcf26fae9U, 0x62539631U, 0xa6f5a753U, 0x5a77982dU,
0x9752ecc5U, 0xdab7b86dU, 0x3ba8c793U, 0x82c3ae41U,
0xb96b69d2U, 0x31a74b96U, 0x96ddab4bU, 0x9ed1a94fU,
0x814f67ceU, 0x283c0a14U, 0x018f478eU, 0xef16f2f9U,
0xee99b577U, 0x88cc2244U, 0xb364e5d7U, 0x9f5eeec1U,
0xc2a3be61U, 0xacfa2b56U, 0x3e21811fU, 0x486c1224U,
0x362d831bU, 0x6c5a1b36U, 0x38240e1cU, 0x8cca2346U,
0xf304f5f7U, 0x0983458aU, 0x84c62142U, 0x1f9ece81U,
0x39ab4992U, 0xb0e82c58U, 0xc32cf9efU, 0xbf6ee6d1U,
0xe293b671U, 0xa0f02850U, 0x5c72172eU, 0x322b8219U,
0x685c1a34U, 0x161d8b0bU, 0xdf3efee1U, 0x121b8a09U,
0x24360912U, 0x038cc98fU, 0x26358713U, 0x25b94e9cU,
0xa37ce1dfU, 0xb8e42e5cU, 0xb762e4d5U, 0xa77ae0ddU,
0x8b40ebcbU, 0x7a47903dU, 0xaaffa455U, 0x78441e3cU,
0x2e398517U, 0x9d5d60c0U, 0x00000000U, 0x94de254aU,
0xf702f4f5U, 0xe31cf1ffU, 0x6a5f9435U, 0x2c3a0b16U,
0xbb68e7d3U, 0xc92375eaU, 0x9b58efc3U, 0xd0b83468U,
0xc4a63162U, 0x77c2d4b5U, 0x67dad0bdU, 0x22338611U,
0xe5197efcU, 0x8ec9ad47U, 0xd334fde7U, 0xa4f62952U,
0xc0a03060U, 0xec9a3b76U, 0x46659f23U, 0xc72af8edU,
0x3faec691U, 0x4c6a1326U, 0x1814060cU, 0x141e050aU,
0x33a4c597U, 0x44661122U, 0xc12f77eeU, 0xed157cf8U,
0xf5017af4U, 0xfd0d78f0U, 0xd8b4366cU, 0x70481c38U,
0xe4963972U, 0x79cb59b2U, 0x60501830U, 0x45e956acU,
0xf68db37bU, 0xfa87b07dU, 0x90d82448U, 0x80c02040U,
0xf28bb279U, 0x724b9239U, 0xb6eda35bU, 0x27bac09dU,
0x0d854488U, 0x955162c4U, 0x40601020U, 0xea9fb475U,
0x2a3f8415U, 0x11974386U, 0x764d933bU, 0x2fb6c299U,
0x35a14a94U, 0xcea9bd67U, 0x06058f03U, 0xb4ee2d5aU,
0xcaafbc65U, 0x4a6f9c25U, 0xb5616ad4U, 0x1d9d4080U,
0x1b98cf83U, 0xb2eba259U, 0x3a27801dU, 0x21bf4f9eU,
0x7c421f3eU, 0x0f86ca89U, 0x92dbaa49U, 0x15914284U,
};
static const u32 T3[256] = {
0xbbd269baU, 0xe54da854U, 0xe2bc5e2fU, 0x25cde874U,
0xf751a653U, 0xd06bbbd3U, 0xd66fb9d2U, 0xb3299a4dU,
0xfd5da050U, 0xcf8a45acU, 0x090e078dU, 0xa5c663bfU,
0x3ddde070U, 0xf155a452U, 0x7b52299aU, 0xb52d984cU,
0x468fc9eaU, 0xc473b7d5U, 0x55663397U, 0xdc63bfd1U,
0xaacc6633U, 0xfb59a251U, 0xc771b65bU, 0xf3a251a6U,
0xfe5fa1deU, 0xad3d9048U, 0xd79a4da8U, 0x715e2f99U,
0xe04babdbU, 0xacc86432U, 0x95e673b7U, 0x32d7e5fcU,
0x70abdbe3U, 0x6342219eU, 0x417e3f91U, 0x7d562b9bU,
0x76afd9e2U, 0xbdd66bbbU, 0x9b198241U, 0x79a5dc6eU,
0xf9ae57a5U, 0x800b8bcbU, 0x67b1d66bU, 0x596e3795U,
0xe1be5fa1U, 0x10ebfbf3U, 0x81fe7fb1U, 0x0c080402U,
0x921785ccU, 0xa23795c4U, 0x4e743a1dU, 0x78502814U,
0xb02b9bc3U, 0x5791c663U, 0xe64fa9daU, 0xd369ba5dU,
0xdf61be5fU, 0xf257a5dcU, 0x13e9fa7dU, 0x941387cdU,
0x1fe1fe7fU, 0xc175b45aU, 0x75add86cU, 0xd56db85cU,
0x08fbf3f7U, 0xd4984c26U, 0x38dbe3ffU, 0x5493c7edU,
0x4a87cde8U, 0x694e279dU, 0x7fa1de6fU, 0x0302018eU,
0x56643219U, 0xe7ba5da0U, 0x1ae7fdf0U, 0x111e0f89U,
0x223c1e0fU, 0x121c0e07U, 0xc58643afU, 0x20cbebfbU,
0x30201008U, 0x7e542a15U, 0x2e341a0dU, 0x18100804U,
0x06040201U, 0x458dc864U, 0xf85ba3dfU, 0x29c5ec76U,
0x0bf9f279U, 0xf453a7ddU, 0x8ef47a3dU, 0x74582c16U,
0x82fc7e3fU, 0xb2dc6e37U, 0x73a9da6dU, 0x90e07038U,
0xb1de6fb9U, 0x37d1e673U, 0x4c83cfe9U, 0xbed46a35U,
0xe349aa55U, 0x3bd9e271U, 0x07f1f67bU, 0x0f0a058cU,
0x31d5e472U, 0x171a0d88U, 0x0efff1f6U, 0xfca8542aU,
0x84f87c3eU, 0xd965bc5eU, 0xd29c4e27U, 0x89058c46U,
0x2830180cU, 0x4389ca65U, 0x6dbdd068U, 0x5b99c261U,
0x0a0c0603U, 0xbc239fc1U, 0xef41ae57U, 0xce7fb1d6U,
0xec43afd9U, 0xcd7db058U, 0xea47add8U, 0x4985cc66U,
0xc87bb3d7U, 0x9ce8743aU, 0x8a078dc8U, 0x88f0783cU,
0x26cfe9faU, 0x53623196U, 0xf5a653a7U, 0x775a2d98U,
0x5297c5ecU, 0xb7da6db8U, 0xa83b93c7U, 0xc38241aeU,
0x6bb9d269U, 0xa731964bU, 0xdd964babU, 0xd19e4fa9U,
0x4f81ce67U, 0x3c28140aU, 0x8f018e47U, 0x16eff9f2U,
0x99ee77b5U, 0xcc884422U, 0x64b3d7e5U, 0x5e9fc1eeU,
0xa3c261beU, 0xfaac562bU, 0x213e1f81U, 0x6c482412U,
0x2d361b83U, 0x5a6c361bU, 0x24381c0eU, 0xca8c4623U,
0x04f3f7f5U, 0x83098a45U, 0xc6844221U, 0x9e1f81ceU,
0xab399249U, 0xe8b0582cU, 0x2cc3eff9U, 0x6ebfd1e6U,
0x93e271b6U, 0xf0a05028U, 0x725c2e17U, 0x2b321982U,
0x5c68341aU, 0x1d160b8bU, 0x3edfe1feU, 0x1b12098aU,
0x36241209U, 0x8c038fc9U, 0x35261387U, 0xb9259c4eU,
0x7ca3dfe1U, 0xe4b85c2eU, 0x62b7d5e4U, 0x7aa7dde0U,
0x408bcbebU, 0x477a3d90U, 0xffaa55a4U, 0x44783c1eU,
0x392e1785U, 0x5d9dc060U, 0x00000000U, 0xde944a25U,
0x02f7f5f4U, 0x1ce3fff1U, 0x5f6a3594U, 0x3a2c160bU,
0x68bbd3e7U, 0x23c9ea75U, 0x589bc3efU, 0xb8d06834U,
0xa6c46231U, 0xc277b5d4U, 0xda67bdd0U, 0x33221186U,
0x19e5fc7eU, 0xc98e47adU, 0x34d3e7fdU, 0xf6a45229U,
0xa0c06030U, 0x9aec763bU, 0x6546239fU, 0x2ac7edf8U,
0xae3f91c6U, 0x6a4c2613U, 0x14180c06U, 0x1e140a05U,
0xa43397c5U, 0x66442211U, 0x2fc1ee77U, 0x15edf87cU,
0x01f5f47aU, 0x0dfdf078U, 0xb4d86c36U, 0x4870381cU,
0x96e47239U, 0xcb79b259U, 0x50603018U, 0xe945ac56U,
0x8df67bb3U, 0x87fa7db0U, 0xd8904824U, 0xc0804020U,
0x8bf279b2U, 0x4b723992U, 0xedb65ba3U, 0xba279dc0U,
0x850d8844U, 0x5195c462U, 0x60402010U, 0x9fea75b4U,
0x3f2a1584U, 0x97118643U, 0x4d763b93U, 0xb62f99c2U,
0xa135944aU, 0xa9ce67bdU, 0x0506038fU, 0xeeb45a2dU,
0xafca65bcU, 0x6f4a259cU, 0x61b5d46aU, 0x9d1d8040U,
0x981b83cfU, 0xebb259a2U, 0x273a1d80U, 0xbf219e4fU,
0x427c3e1fU, 0x860f89caU, 0xdb9249aaU, 0x91158442U,
};
static const u32 T4[256] = {
0xbabababaU, 0x54545454U, 0x2f2f2f2fU, 0x74747474U,
0x53535353U, 0xd3d3d3d3U, 0xd2d2d2d2U, 0x4d4d4d4dU,
0x50505050U, 0xacacacacU, 0x8d8d8d8dU, 0xbfbfbfbfU,
0x70707070U, 0x52525252U, 0x9a9a9a9aU, 0x4c4c4c4cU,
0xeaeaeaeaU, 0xd5d5d5d5U, 0x97979797U, 0xd1d1d1d1U,
0x33333333U, 0x51515151U, 0x5b5b5b5bU, 0xa6a6a6a6U,
0xdedededeU, 0x48484848U, 0xa8a8a8a8U, 0x99999999U,
0xdbdbdbdbU, 0x32323232U, 0xb7b7b7b7U, 0xfcfcfcfcU,
0xe3e3e3e3U, 0x9e9e9e9eU, 0x91919191U, 0x9b9b9b9bU,
0xe2e2e2e2U, 0xbbbbbbbbU, 0x41414141U, 0x6e6e6e6eU,
0xa5a5a5a5U, 0xcbcbcbcbU, 0x6b6b6b6bU, 0x95959595U,
0xa1a1a1a1U, 0xf3f3f3f3U, 0xb1b1b1b1U, 0x02020202U,
0xccccccccU, 0xc4c4c4c4U, 0x1d1d1d1dU, 0x14141414U,
0xc3c3c3c3U, 0x63636363U, 0xdadadadaU, 0x5d5d5d5dU,
0x5f5f5f5fU, 0xdcdcdcdcU, 0x7d7d7d7dU, 0xcdcdcdcdU,
0x7f7f7f7fU, 0x5a5a5a5aU, 0x6c6c6c6cU, 0x5c5c5c5cU,
0xf7f7f7f7U, 0x26262626U, 0xffffffffU, 0xededededU,
0xe8e8e8e8U, 0x9d9d9d9dU, 0x6f6f6f6fU, 0x8e8e8e8eU,
0x19191919U, 0xa0a0a0a0U, 0xf0f0f0f0U, 0x89898989U,
0x0f0f0f0fU, 0x07070707U, 0xafafafafU, 0xfbfbfbfbU,
0x08080808U, 0x15151515U, 0x0d0d0d0dU, 0x04040404U,
0x01010101U, 0x64646464U, 0xdfdfdfdfU, 0x76767676U,
0x79797979U, 0xddddddddU, 0x3d3d3d3dU, 0x16161616U,
0x3f3f3f3fU, 0x37373737U, 0x6d6d6d6dU, 0x38383838U,
0xb9b9b9b9U, 0x73737373U, 0xe9e9e9e9U, 0x35353535U,
0x55555555U, 0x71717171U, 0x7b7b7b7bU, 0x8c8c8c8cU,
0x72727272U, 0x88888888U, 0xf6f6f6f6U, 0x2a2a2a2aU,
0x3e3e3e3eU, 0x5e5e5e5eU, 0x27272727U, 0x46464646U,
0x0c0c0c0cU, 0x65656565U, 0x68686868U, 0x61616161U,
0x03030303U, 0xc1c1c1c1U, 0x57575757U, 0xd6d6d6d6U,
0xd9d9d9d9U, 0x58585858U, 0xd8d8d8d8U, 0x66666666U,
0xd7d7d7d7U, 0x3a3a3a3aU, 0xc8c8c8c8U, 0x3c3c3c3cU,
0xfafafafaU, 0x96969696U, 0xa7a7a7a7U, 0x98989898U,
0xececececU, 0xb8b8b8b8U, 0xc7c7c7c7U, 0xaeaeaeaeU,
0x69696969U, 0x4b4b4b4bU, 0xababababU, 0xa9a9a9a9U,
0x67676767U, 0x0a0a0a0aU, 0x47474747U, 0xf2f2f2f2U,
0xb5b5b5b5U, 0x22222222U, 0xe5e5e5e5U, 0xeeeeeeeeU,
0xbebebebeU, 0x2b2b2b2bU, 0x81818181U, 0x12121212U,
0x83838383U, 0x1b1b1b1bU, 0x0e0e0e0eU, 0x23232323U,
0xf5f5f5f5U, 0x45454545U, 0x21212121U, 0xcecececeU,
0x49494949U, 0x2c2c2c2cU, 0xf9f9f9f9U, 0xe6e6e6e6U,
0xb6b6b6b6U, 0x28282828U, 0x17171717U, 0x82828282U,
0x1a1a1a1aU, 0x8b8b8b8bU, 0xfefefefeU, 0x8a8a8a8aU,
0x09090909U, 0xc9c9c9c9U, 0x87878787U, 0x4e4e4e4eU,
0xe1e1e1e1U, 0x2e2e2e2eU, 0xe4e4e4e4U, 0xe0e0e0e0U,
0xebebebebU, 0x90909090U, 0xa4a4a4a4U, 0x1e1e1e1eU,
0x85858585U, 0x60606060U, 0x00000000U, 0x25252525U,
0xf4f4f4f4U, 0xf1f1f1f1U, 0x94949494U, 0x0b0b0b0bU,
0xe7e7e7e7U, 0x75757575U, 0xefefefefU, 0x34343434U,
0x31313131U, 0xd4d4d4d4U, 0xd0d0d0d0U, 0x86868686U,
0x7e7e7e7eU, 0xadadadadU, 0xfdfdfdfdU, 0x29292929U,
0x30303030U, 0x3b3b3b3bU, 0x9f9f9f9fU, 0xf8f8f8f8U,
0xc6c6c6c6U, 0x13131313U, 0x06060606U, 0x05050505U,
0xc5c5c5c5U, 0x11111111U, 0x77777777U, 0x7c7c7c7cU,
0x7a7a7a7aU, 0x78787878U, 0x36363636U, 0x1c1c1c1cU,
0x39393939U, 0x59595959U, 0x18181818U, 0x56565656U,
0xb3b3b3b3U, 0xb0b0b0b0U, 0x24242424U, 0x20202020U,
0xb2b2b2b2U, 0x92929292U, 0xa3a3a3a3U, 0xc0c0c0c0U,
0x44444444U, 0x62626262U, 0x10101010U, 0xb4b4b4b4U,
0x84848484U, 0x43434343U, 0x93939393U, 0xc2c2c2c2U,
0x4a4a4a4aU, 0xbdbdbdbdU, 0x8f8f8f8fU, 0x2d2d2d2dU,
0xbcbcbcbcU, 0x9c9c9c9cU, 0x6a6a6a6aU, 0x40404040U,
0xcfcfcfcfU, 0xa2a2a2a2U, 0x80808080U, 0x4f4f4f4fU,
0x1f1f1f1fU, 0xcacacacaU, 0xaaaaaaaaU, 0x42424242U,
};
static const u32 T5[256] = {
0x00000000U, 0x01020608U, 0x02040c10U, 0x03060a18U,
0x04081820U, 0x050a1e28U, 0x060c1430U, 0x070e1238U,
0x08103040U, 0x09123648U, 0x0a143c50U, 0x0b163a58U,
0x0c182860U, 0x0d1a2e68U, 0x0e1c2470U, 0x0f1e2278U,
0x10206080U, 0x11226688U, 0x12246c90U, 0x13266a98U,
0x142878a0U, 0x152a7ea8U, 0x162c74b0U, 0x172e72b8U,
0x183050c0U, 0x193256c8U, 0x1a345cd0U, 0x1b365ad8U,
0x1c3848e0U, 0x1d3a4ee8U, 0x1e3c44f0U, 0x1f3e42f8U,
0x2040c01dU, 0x2142c615U, 0x2244cc0dU, 0x2346ca05U,
0x2448d83dU, 0x254ade35U, 0x264cd42dU, 0x274ed225U,
0x2850f05dU, 0x2952f655U, 0x2a54fc4dU, 0x2b56fa45U,
0x2c58e87dU, 0x2d5aee75U, 0x2e5ce46dU, 0x2f5ee265U,
0x3060a09dU, 0x3162a695U, 0x3264ac8dU, 0x3366aa85U,
0x3468b8bdU, 0x356abeb5U, 0x366cb4adU, 0x376eb2a5U,
0x387090ddU, 0x397296d5U, 0x3a749ccdU, 0x3b769ac5U,
0x3c7888fdU, 0x3d7a8ef5U, 0x3e7c84edU, 0x3f7e82e5U,
0x40809d3aU, 0x41829b32U, 0x4284912aU, 0x43869722U,
0x4488851aU, 0x458a8312U, 0x468c890aU, 0x478e8f02U,
0x4890ad7aU, 0x4992ab72U, 0x4a94a16aU, 0x4b96a762U,
0x4c98b55aU, 0x4d9ab352U, 0x4e9cb94aU, 0x4f9ebf42U,
0x50a0fdbaU, 0x51a2fbb2U, 0x52a4f1aaU, 0x53a6f7a2U,
0x54a8e59aU, 0x55aae392U, 0x56ace98aU, 0x57aeef82U,
0x58b0cdfaU, 0x59b2cbf2U, 0x5ab4c1eaU, 0x5bb6c7e2U,
0x5cb8d5daU, 0x5dbad3d2U, 0x5ebcd9caU, 0x5fbedfc2U,
0x60c05d27U, 0x61c25b2fU, 0x62c45137U, 0x63c6573fU,
0x64c84507U, 0x65ca430fU, 0x66cc4917U, 0x67ce4f1fU,
0x68d06d67U, 0x69d26b6fU, 0x6ad46177U, 0x6bd6677fU,
0x6cd87547U, 0x6dda734fU, 0x6edc7957U, 0x6fde7f5fU,
0x70e03da7U, 0x71e23bafU, 0x72e431b7U, 0x73e637bfU,
0x74e82587U, 0x75ea238fU, 0x76ec2997U, 0x77ee2f9fU,
0x78f00de7U, 0x79f20befU, 0x7af401f7U, 0x7bf607ffU,
0x7cf815c7U, 0x7dfa13cfU, 0x7efc19d7U, 0x7ffe1fdfU,
0x801d2774U, 0x811f217cU, 0x82192b64U, 0x831b2d6cU,
0x84153f54U, 0x8517395cU, 0x86113344U, 0x8713354cU,
0x880d1734U, 0x890f113cU, 0x8a091b24U, 0x8b0b1d2cU,
0x8c050f14U, 0x8d07091cU, 0x8e010304U, 0x8f03050cU,
0x903d47f4U, 0x913f41fcU, 0x92394be4U, 0x933b4decU,
0x94355fd4U, 0x953759dcU, 0x963153c4U, 0x973355ccU,
0x982d77b4U, 0x992f71bcU, 0x9a297ba4U, 0x9b2b7dacU,
0x9c256f94U, 0x9d27699cU, 0x9e216384U, 0x9f23658cU,
0xa05de769U, 0xa15fe161U, 0xa259eb79U, 0xa35bed71U,
0xa455ff49U, 0xa557f941U, 0xa651f359U, 0xa753f551U,
0xa84dd729U, 0xa94fd121U, 0xaa49db39U, 0xab4bdd31U,
0xac45cf09U, 0xad47c901U, 0xae41c319U, 0xaf43c511U,
0xb07d87e9U, 0xb17f81e1U, 0xb2798bf9U, 0xb37b8df1U,
0xb4759fc9U, 0xb57799c1U, 0xb67193d9U, 0xb77395d1U,
0xb86db7a9U, 0xb96fb1a1U, 0xba69bbb9U, 0xbb6bbdb1U,
0xbc65af89U, 0xbd67a981U, 0xbe61a399U, 0xbf63a591U,
0xc09dba4eU, 0xc19fbc46U, 0xc299b65eU, 0xc39bb056U,
0xc495a26eU, 0xc597a466U, 0xc691ae7eU, 0xc793a876U,
0xc88d8a0eU, 0xc98f8c06U, 0xca89861eU, 0xcb8b8016U,
0xcc85922eU, 0xcd879426U, 0xce819e3eU, 0xcf839836U,
0xd0bddaceU, 0xd1bfdcc6U, 0xd2b9d6deU, 0xd3bbd0d6U,
0xd4b5c2eeU, 0xd5b7c4e6U, 0xd6b1cefeU, 0xd7b3c8f6U,
0xd8adea8eU, 0xd9afec86U, 0xdaa9e69eU, 0xdbabe096U,
0xdca5f2aeU, 0xdda7f4a6U, 0xdea1febeU, 0xdfa3f8b6U,
0xe0dd7a53U, 0xe1df7c5bU, 0xe2d97643U, 0xe3db704bU,
0xe4d56273U, 0xe5d7647bU, 0xe6d16e63U, 0xe7d3686bU,
0xe8cd4a13U, 0xe9cf4c1bU, 0xeac94603U, 0xebcb400bU,
0xecc55233U, 0xedc7543bU, 0xeec15e23U, 0xefc3582bU,
0xf0fd1ad3U, 0xf1ff1cdbU, 0xf2f916c3U, 0xf3fb10cbU,
0xf4f502f3U, 0xf5f704fbU, 0xf6f10ee3U, 0xf7f308ebU,
0xf8ed2a93U, 0xf9ef2c9bU, 0xfae92683U, 0xfbeb208bU,
0xfce532b3U, 0xfde734bbU, 0xfee13ea3U, 0xffe338abU,
};
static const u32 rc[] = {
0xba542f74U, 0x53d3d24dU, 0x50ac8dbfU, 0x70529a4cU,
0xead597d1U, 0x33515ba6U, 0xde48a899U, 0xdb32b7fcU,
0xe39e919bU, 0xe2bb416eU, 0xa5cb6b95U, 0xa1f3b102U,
0xccc41d14U, 0xc363da5dU, 0x5fdc7dcdU, 0x7f5a6c5cU,
0xf726ffedU, 0xe89d6f8eU, 0x19a0f089U,
};
static int anubis_setkey(struct crypto_tfm *tfm, const u8 *in_key,
unsigned int key_len)
{
struct anubis_ctx *ctx = crypto_tfm_ctx(tfm);
const __be32 *key = (const __be32 *)in_key;
u32 *flags = &tfm->crt_flags;
int N, R, i, r;
u32 kappa[ANUBIS_MAX_N];
u32 inter[ANUBIS_MAX_N];
switch (key_len) {
case 16: case 20: case 24: case 28:
case 32: case 36: case 40:
break;
default:
*flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
return -EINVAL;
}
ctx->key_len = key_len * 8;
N = ctx->key_len >> 5;
ctx->R = R = 8 + N;
/* * map cipher key to initial key state (mu): */
for (i = 0; i < N; i++)
kappa[i] = be32_to_cpu(key[i]);
/*
* generate R + 1 round keys:
*/
for (r = 0; r <= R; r++) {
u32 K0, K1, K2, K3;
/*
* generate r-th round key K^r:
*/
K0 = T4[(kappa[N - 1] >> 24) ];
K1 = T4[(kappa[N - 1] >> 16) & 0xff];
K2 = T4[(kappa[N - 1] >> 8) & 0xff];
K3 = T4[(kappa[N - 1] ) & 0xff];
for (i = N - 2; i >= 0; i--) {
K0 = T4[(kappa[i] >> 24) ] ^
(T5[(K0 >> 24) ] & 0xff000000U) ^
(T5[(K0 >> 16) & 0xff] & 0x00ff0000U) ^
(T5[(K0 >> 8) & 0xff] & 0x0000ff00U) ^
(T5[(K0 ) & 0xff] & 0x000000ffU);
K1 = T4[(kappa[i] >> 16) & 0xff] ^
(T5[(K1 >> 24) ] & 0xff000000U) ^
(T5[(K1 >> 16) & 0xff] & 0x00ff0000U) ^
(T5[(K1 >> 8) & 0xff] & 0x0000ff00U) ^
(T5[(K1 ) & 0xff] & 0x000000ffU);
K2 = T4[(kappa[i] >> 8) & 0xff] ^
(T5[(K2 >> 24) ] & 0xff000000U) ^
(T5[(K2 >> 16) & 0xff] & 0x00ff0000U) ^
(T5[(K2 >> 8) & 0xff] & 0x0000ff00U) ^
(T5[(K2 ) & 0xff] & 0x000000ffU);
K3 = T4[(kappa[i] ) & 0xff] ^
(T5[(K3 >> 24) ] & 0xff000000U) ^
(T5[(K3 >> 16) & 0xff] & 0x00ff0000U) ^
(T5[(K3 >> 8) & 0xff] & 0x0000ff00U) ^
(T5[(K3 ) & 0xff] & 0x000000ffU);
}
ctx->E[r][0] = K0;
ctx->E[r][1] = K1;
ctx->E[r][2] = K2;
ctx->E[r][3] = K3;
/*
* compute kappa^{r+1} from kappa^r:
*/
if (r == R)
break;
for (i = 0; i < N; i++) {
int j = i;
inter[i] = T0[(kappa[j--] >> 24) ];
if (j < 0)
j = N - 1;
inter[i] ^= T1[(kappa[j--] >> 16) & 0xff];
if (j < 0)
j = N - 1;
inter[i] ^= T2[(kappa[j--] >> 8) & 0xff];
if (j < 0)
j = N - 1;
inter[i] ^= T3[(kappa[j ] ) & 0xff];
}
kappa[0] = inter[0] ^ rc[r];
for (i = 1; i < N; i++)
kappa[i] = inter[i];
}
/*
* generate inverse key schedule: K'^0 = K^R, K'^R =
* K^0, K'^r = theta(K^{R-r}):
*/
for (i = 0; i < 4; i++) {
ctx->D[0][i] = ctx->E[R][i];
ctx->D[R][i] = ctx->E[0][i];
}
for (r = 1; r < R; r++) {
for (i = 0; i < 4; i++) {
u32 v = ctx->E[R - r][i];
ctx->D[r][i] =
T0[T4[(v >> 24) ] & 0xff] ^
T1[T4[(v >> 16) & 0xff] & 0xff] ^
T2[T4[(v >> 8) & 0xff] & 0xff] ^
T3[T4[(v ) & 0xff] & 0xff];
}
}
return 0;
}
static void anubis_crypt(u32 roundKey[ANUBIS_MAX_ROUNDS + 1][4],
u8 *ciphertext, const u8 *plaintext, const int R)
{
const __be32 *src = (const __be32 *)plaintext;
__be32 *dst = (__be32 *)ciphertext;
int i, r;
u32 state[4];
u32 inter[4];
/*
* map plaintext block to cipher state (mu)
* and add initial round key (sigma[K^0]):
*/
for (i = 0; i < 4; i++)
state[i] = be32_to_cpu(src[i]) ^ roundKey[0][i];
/*
* R - 1 full rounds:
*/
for (r = 1; r < R; r++) {
inter[0] =
T0[(state[0] >> 24) ] ^
T1[(state[1] >> 24) ] ^
T2[(state[2] >> 24) ] ^
T3[(state[3] >> 24) ] ^
roundKey[r][0];
inter[1] =
T0[(state[0] >> 16) & 0xff] ^
T1[(state[1] >> 16) & 0xff] ^
T2[(state[2] >> 16) & 0xff] ^
T3[(state[3] >> 16) & 0xff] ^
roundKey[r][1];
inter[2] =
T0[(state[0] >> 8) & 0xff] ^
T1[(state[1] >> 8) & 0xff] ^
T2[(state[2] >> 8) & 0xff] ^
T3[(state[3] >> 8) & 0xff] ^
roundKey[r][2];
inter[3] =
T0[(state[0] ) & 0xff] ^
T1[(state[1] ) & 0xff] ^
T2[(state[2] ) & 0xff] ^
T3[(state[3] ) & 0xff] ^
roundKey[r][3];
state[0] = inter[0];
state[1] = inter[1];
state[2] = inter[2];
state[3] = inter[3];
}
/*
* last round:
*/
inter[0] =
(T0[(state[0] >> 24) ] & 0xff000000U) ^
(T1[(state[1] >> 24) ] & 0x00ff0000U) ^
(T2[(state[2] >> 24) ] & 0x0000ff00U) ^
(T3[(state[3] >> 24) ] & 0x000000ffU) ^
roundKey[R][0];
inter[1] =
(T0[(state[0] >> 16) & 0xff] & 0xff000000U) ^
(T1[(state[1] >> 16) & 0xff] & 0x00ff0000U) ^
(T2[(state[2] >> 16) & 0xff] & 0x0000ff00U) ^
(T3[(state[3] >> 16) & 0xff] & 0x000000ffU) ^
roundKey[R][1];
inter[2] =
(T0[(state[0] >> 8) & 0xff] & 0xff000000U) ^
(T1[(state[1] >> 8) & 0xff] & 0x00ff0000U) ^
(T2[(state[2] >> 8) & 0xff] & 0x0000ff00U) ^
(T3[(state[3] >> 8) & 0xff] & 0x000000ffU) ^
roundKey[R][2];
inter[3] =
(T0[(state[0] ) & 0xff] & 0xff000000U) ^
(T1[(state[1] ) & 0xff] & 0x00ff0000U) ^
(T2[(state[2] ) & 0xff] & 0x0000ff00U) ^
(T3[(state[3] ) & 0xff] & 0x000000ffU) ^
roundKey[R][3];
/*
* map cipher state to ciphertext block (mu^{-1}):
*/
for (i = 0; i < 4; i++)
dst[i] = cpu_to_be32(inter[i]);
}
static void anubis_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
{
struct anubis_ctx *ctx = crypto_tfm_ctx(tfm);
anubis_crypt(ctx->E, dst, src, ctx->R);
}
static void anubis_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
{
struct anubis_ctx *ctx = crypto_tfm_ctx(tfm);
anubis_crypt(ctx->D, dst, src, ctx->R);
}
static struct crypto_alg anubis_alg = {
.cra_name = "anubis",
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
.cra_blocksize = ANUBIS_BLOCK_SIZE,
.cra_ctxsize = sizeof (struct anubis_ctx),
.cra_alignmask = 3,
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(anubis_alg.cra_list),
.cra_u = { .cipher = {
.cia_min_keysize = ANUBIS_MIN_KEY_SIZE,
.cia_max_keysize = ANUBIS_MAX_KEY_SIZE,
.cia_setkey = anubis_setkey,
.cia_encrypt = anubis_encrypt,
.cia_decrypt = anubis_decrypt } }
};
static int __init anubis_mod_init(void)
{
int ret = 0;
ret = crypto_register_alg(&anubis_alg);
return ret;
}
static void __exit anubis_mod_fini(void)
{
crypto_unregister_alg(&anubis_alg);
}
module_init(anubis_mod_init);
module_exit(anubis_mod_fini);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Anubis Cryptographic Algorithm");
| gpl-2.0 |
ghdk/os | arch/ia64/hp/common/hwsw_iommu.c | 9191 | 1871 | /*
* Copyright (c) 2004 Hewlett-Packard Development Company, L.P.
* Contributed by David Mosberger-Tang <davidm@hpl.hp.com>
*
* This is a pseudo I/O MMU which dispatches to the hardware I/O MMU
* whenever possible. We assume that the hardware I/O MMU requires
* full 32-bit addressability, as is the case, e.g., for HP zx1-based
* systems (there, the I/O MMU window is mapped at 3-4GB). If a
* device doesn't provide full 32-bit addressability, we fall back on
* the sw I/O TLB. This is good enough to let us support broken
* hardware such as soundcards which have a DMA engine that can
* address only 28 bits.
*/
#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/swiotlb.h>
#include <linux/export.h>
#include <asm/machvec.h>
extern struct dma_map_ops sba_dma_ops, swiotlb_dma_ops;
/* swiotlb declarations & definitions: */
extern int swiotlb_late_init_with_default_size (size_t size);
/*
* Note: we need to make the determination of whether or not to use
* the sw I/O TLB based purely on the device structure. Anything else
* would be unreliable or would be too intrusive.
*/
static inline int use_swiotlb(struct device *dev)
{
return dev && dev->dma_mask &&
!sba_dma_ops.dma_supported(dev, *dev->dma_mask);
}
struct dma_map_ops *hwsw_dma_get_ops(struct device *dev)
{
if (use_swiotlb(dev))
return &swiotlb_dma_ops;
return &sba_dma_ops;
}
EXPORT_SYMBOL(hwsw_dma_get_ops);
void __init
hwsw_init (void)
{
/* default to a smallish 2MB sw I/O TLB */
if (swiotlb_late_init_with_default_size (2 * (1<<20)) != 0) {
#ifdef CONFIG_IA64_GENERIC
/* Better to have normal DMA than panic */
printk(KERN_WARNING "%s: Failed to initialize software I/O TLB,"
" reverting to hpzx1 platform vector\n", __func__);
machvec_init("hpzx1");
#else
panic("Unable to initialize software I/O TLB services");
#endif
}
}
| gpl-2.0 |
flwh/android_kernel_zte_x9180 | arch/x86/um/delay.c | 10215 | 1171 | /*
* Copyright (C) 2011 Richard Weinberger <richrd@nod.at>
* Mostly copied from arch/x86/lib/delay.c
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/delay.h>
#include <asm/param.h>
void __delay(unsigned long loops)
{
asm volatile(
"test %0,%0\n"
"jz 3f\n"
"jmp 1f\n"
".align 16\n"
"1: jmp 2f\n"
".align 16\n"
"2: dec %0\n"
" jnz 2b\n"
"3: dec %0\n"
: /* we don't need output */
: "a" (loops)
);
}
EXPORT_SYMBOL(__delay);
inline void __const_udelay(unsigned long xloops)
{
int d0;
xloops *= 4;
asm("mull %%edx"
: "=d" (xloops), "=&a" (d0)
: "1" (xloops), "0"
(loops_per_jiffy * (HZ/4)));
__delay(++xloops);
}
EXPORT_SYMBOL(__const_udelay);
void __udelay(unsigned long usecs)
{
__const_udelay(usecs * 0x000010c7); /* 2**32 / 1000000 (rounded up) */
}
EXPORT_SYMBOL(__udelay);
void __ndelay(unsigned long nsecs)
{
__const_udelay(nsecs * 0x00005); /* 2**32 / 1000000000 (rounded up) */
}
EXPORT_SYMBOL(__ndelay);
| gpl-2.0 |
coolya/android_kernel_samsung | scripts/conmakehash.c | 12263 | 6142 | /*
* conmakehash.c
*
* Create arrays for initializing the kernel folded tables (using a hash
* table turned out to be to limiting...) Unfortunately we can't simply
* preinitialize the tables at compile time since kfree() cannot accept
* memory not allocated by kmalloc(), and doing our own memory management
* just for this seems like massive overkill.
*
* Copyright (C) 1995-1997 H. Peter Anvin
*
* This program is a part of the Linux kernel, and may be freely
* copied under the terms of the GNU General Public License (GPL),
* version 2, or at your option any later version.
*/
#include <stdio.h>
#include <stdlib.h>
#include <sysexits.h>
#include <string.h>
#include <ctype.h>
#define MAX_FONTLEN 256
typedef unsigned short unicode;
static void usage(char *argv0)
{
fprintf(stderr, "Usage: \n"
" %s chartable [hashsize] [hashstep] [maxhashlevel]\n", argv0);
exit(EX_USAGE);
}
static int getunicode(char **p0)
{
char *p = *p0;
while (*p == ' ' || *p == '\t')
p++;
if (*p != 'U' || p[1] != '+' ||
!isxdigit(p[2]) || !isxdigit(p[3]) || !isxdigit(p[4]) ||
!isxdigit(p[5]) || isxdigit(p[6]))
return -1;
*p0 = p+6;
return strtol(p+2,0,16);
}
unicode unitable[MAX_FONTLEN][255];
/* Massive overkill, but who cares? */
int unicount[MAX_FONTLEN];
static void addpair(int fp, int un)
{
int i;
if ( un <= 0xfffe )
{
/* Check it isn't a duplicate */
for ( i = 0 ; i < unicount[fp] ; i++ )
if ( unitable[fp][i] == un )
return;
/* Add to list */
if ( unicount[fp] > 254 )
{
fprintf(stderr, "ERROR: Only 255 unicodes/glyph permitted!\n");
exit(EX_DATAERR);
}
unitable[fp][unicount[fp]] = un;
unicount[fp]++;
}
/* otherwise: ignore */
}
int main(int argc, char *argv[])
{
FILE *ctbl;
char *tblname;
char buffer[65536];
int fontlen;
int i, nuni, nent;
int fp0, fp1, un0, un1;
char *p, *p1;
if ( argc < 2 || argc > 5 )
usage(argv[0]);
if ( !strcmp(argv[1],"-") )
{
ctbl = stdin;
tblname = "stdin";
}
else
{
ctbl = fopen(tblname = argv[1], "r");
if ( !ctbl )
{
perror(tblname);
exit(EX_NOINPUT);
}
}
/* For now we assume the default font is always 256 characters. */
fontlen = 256;
/* Initialize table */
for ( i = 0 ; i < fontlen ; i++ )
unicount[i] = 0;
/* Now we come to the tricky part. Parse the input table. */
while ( fgets(buffer, sizeof(buffer), ctbl) != NULL )
{
if ( (p = strchr(buffer, '\n')) != NULL )
*p = '\0';
else
fprintf(stderr, "%s: Warning: line too long\n", tblname);
p = buffer;
/*
* Syntax accepted:
* <fontpos> <unicode> <unicode> ...
* <range> idem
* <range> <unicode range>
*
* where <range> ::= <fontpos>-<fontpos>
* and <unicode> ::= U+<h><h><h><h>
* and <h> ::= <hexadecimal digit>
*/
while (*p == ' ' || *p == '\t')
p++;
if (!*p || *p == '#')
continue; /* skip comment or blank line */
fp0 = strtol(p, &p1, 0);
if (p1 == p)
{
fprintf(stderr, "Bad input line: %s\n", buffer);
exit(EX_DATAERR);
}
p = p1;
while (*p == ' ' || *p == '\t')
p++;
if (*p == '-')
{
p++;
fp1 = strtol(p, &p1, 0);
if (p1 == p)
{
fprintf(stderr, "Bad input line: %s\n", buffer);
exit(EX_DATAERR);
}
p = p1;
}
else
fp1 = 0;
if ( fp0 < 0 || fp0 >= fontlen )
{
fprintf(stderr,
"%s: Glyph number (0x%x) larger than font length\n",
tblname, fp0);
exit(EX_DATAERR);
}
if ( fp1 && (fp1 < fp0 || fp1 >= fontlen) )
{
fprintf(stderr,
"%s: Bad end of range (0x%x)\n",
tblname, fp1);
exit(EX_DATAERR);
}
if (fp1)
{
/* we have a range; expect the word "idem" or a Unicode range of the
same length */
while (*p == ' ' || *p == '\t')
p++;
if (!strncmp(p, "idem", 4))
{
for (i=fp0; i<=fp1; i++)
addpair(i,i);
p += 4;
}
else
{
un0 = getunicode(&p);
while (*p == ' ' || *p == '\t')
p++;
if (*p != '-')
{
fprintf(stderr,
"%s: Corresponding to a range of font positions, there should be a Unicode range\n",
tblname);
exit(EX_DATAERR);
}
p++;
un1 = getunicode(&p);
if (un0 < 0 || un1 < 0)
{
fprintf(stderr,
"%s: Bad Unicode range corresponding to font position range 0x%x-0x%x\n",
tblname, fp0, fp1);
exit(EX_DATAERR);
}
if (un1 - un0 != fp1 - fp0)
{
fprintf(stderr,
"%s: Unicode range U+%x-U+%x not of the same length as font position range 0x%x-0x%x\n",
tblname, un0, un1, fp0, fp1);
exit(EX_DATAERR);
}
for(i=fp0; i<=fp1; i++)
addpair(i,un0-fp0+i);
}
}
else
{
/* no range; expect a list of unicode values for a single font position */
while ( (un0 = getunicode(&p)) >= 0 )
addpair(fp0, un0);
}
while (*p == ' ' || *p == '\t')
p++;
if (*p && *p != '#')
fprintf(stderr, "%s: trailing junk (%s) ignored\n", tblname, p);
}
/* Okay, we hit EOF, now output hash table */
fclose(ctbl);
/* Compute total size of Unicode list */
nuni = 0;
for ( i = 0 ; i < fontlen ; i++ )
nuni += unicount[i];
printf("\
/*\n\
* Do not edit this file; it was automatically generated by\n\
*\n\
* conmakehash %s > [this file]\n\
*\n\
*/\n\
\n\
#include <linux/types.h>\n\
\n\
u8 dfont_unicount[%d] = \n\
{\n\t", argv[1], fontlen);
for ( i = 0 ; i < fontlen ; i++ )
{
printf("%3d", unicount[i]);
if ( i == fontlen-1 )
printf("\n};\n");
else if ( i % 8 == 7 )
printf(",\n\t");
else
printf(", ");
}
printf("\nu16 dfont_unitable[%d] = \n{\n\t", nuni);
fp0 = 0;
nent = 0;
for ( i = 0 ; i < nuni ; i++ )
{
while ( nent >= unicount[fp0] )
{
fp0++;
nent = 0;
}
printf("0x%04x", unitable[fp0][nent++]);
if ( i == nuni-1 )
printf("\n};\n");
else if ( i % 8 == 7 )
printf(",\n\t");
else
printf(", ");
}
exit(EX_OK);
}
| gpl-2.0 |
varigit/kernel-VAR-SOM-AMxx | drivers/net/wireless/brcm80211/brcmfmac/fwil.c | 232 | 8180 | /*
* Copyright (c) 2012 Broadcom Corporation
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
* SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
* OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
* CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
/* FWIL is the Firmware Interface Layer. In this module the support functions
* are located to set and get variables to and from the firmware.
*/
#include <linux/kernel.h>
#include <linux/netdevice.h>
#include <brcmu_utils.h>
#include <brcmu_wifi.h>
#include "dhd.h"
#include "dhd_bus.h"
#include "dhd_dbg.h"
#include "tracepoint.h"
#include "fwil.h"
#include "proto.h"
#define MAX_HEX_DUMP_LEN 64
static s32
brcmf_fil_cmd_data(struct brcmf_if *ifp, u32 cmd, void *data, u32 len, bool set)
{
struct brcmf_pub *drvr = ifp->drvr;
s32 err;
if (drvr->bus_if->state != BRCMF_BUS_DATA) {
brcmf_err("bus is down. we have nothing to do.\n");
return -EIO;
}
if (data != NULL)
len = min_t(uint, len, BRCMF_DCMD_MAXLEN);
if (set)
err = brcmf_proto_set_dcmd(drvr, ifp->ifidx, cmd, data, len);
else
err = brcmf_proto_query_dcmd(drvr, ifp->ifidx, cmd, data, len);
if (err >= 0)
err = 0;
else
brcmf_err("Failed err=%d\n", err);
return err;
}
s32
brcmf_fil_cmd_data_set(struct brcmf_if *ifp, u32 cmd, void *data, u32 len)
{
s32 err;
mutex_lock(&ifp->drvr->proto_block);
brcmf_dbg(FIL, "cmd=%d, len=%d\n", cmd, len);
brcmf_dbg_hex_dump(BRCMF_FIL_ON(), data,
min_t(uint, len, MAX_HEX_DUMP_LEN), "data\n");
err = brcmf_fil_cmd_data(ifp, cmd, data, len, true);
mutex_unlock(&ifp->drvr->proto_block);
return err;
}
s32
brcmf_fil_cmd_data_get(struct brcmf_if *ifp, u32 cmd, void *data, u32 len)
{
s32 err;
mutex_lock(&ifp->drvr->proto_block);
err = brcmf_fil_cmd_data(ifp, cmd, data, len, false);
brcmf_dbg(FIL, "cmd=%d, len=%d\n", cmd, len);
brcmf_dbg_hex_dump(BRCMF_FIL_ON(), data,
min_t(uint, len, MAX_HEX_DUMP_LEN), "data\n");
mutex_unlock(&ifp->drvr->proto_block);
return err;
}
s32
brcmf_fil_cmd_int_set(struct brcmf_if *ifp, u32 cmd, u32 data)
{
s32 err;
__le32 data_le = cpu_to_le32(data);
mutex_lock(&ifp->drvr->proto_block);
brcmf_dbg(FIL, "cmd=%d, value=%d\n", cmd, data);
err = brcmf_fil_cmd_data(ifp, cmd, &data_le, sizeof(data_le), true);
mutex_unlock(&ifp->drvr->proto_block);
return err;
}
s32
brcmf_fil_cmd_int_get(struct brcmf_if *ifp, u32 cmd, u32 *data)
{
s32 err;
__le32 data_le = cpu_to_le32(*data);
mutex_lock(&ifp->drvr->proto_block);
err = brcmf_fil_cmd_data(ifp, cmd, &data_le, sizeof(data_le), false);
mutex_unlock(&ifp->drvr->proto_block);
*data = le32_to_cpu(data_le);
brcmf_dbg(FIL, "cmd=%d, value=%d\n", cmd, *data);
return err;
}
static u32
brcmf_create_iovar(char *name, char *data, u32 datalen, char *buf, u32 buflen)
{
u32 len;
len = strlen(name) + 1;
if ((len + datalen) > buflen)
return 0;
memcpy(buf, name, len);
/* append data onto the end of the name string */
if (data && datalen)
memcpy(&buf[len], data, datalen);
return len + datalen;
}
s32
brcmf_fil_iovar_data_set(struct brcmf_if *ifp, char *name, void *data,
u32 len)
{
struct brcmf_pub *drvr = ifp->drvr;
s32 err;
u32 buflen;
mutex_lock(&drvr->proto_block);
brcmf_dbg(FIL, "name=%s, len=%d\n", name, len);
brcmf_dbg_hex_dump(BRCMF_FIL_ON(), data,
min_t(uint, len, MAX_HEX_DUMP_LEN), "data\n");
buflen = brcmf_create_iovar(name, data, len, drvr->proto_buf,
sizeof(drvr->proto_buf));
if (buflen) {
err = brcmf_fil_cmd_data(ifp, BRCMF_C_SET_VAR, drvr->proto_buf,
buflen, true);
} else {
err = -EPERM;
brcmf_err("Creating iovar failed\n");
}
mutex_unlock(&drvr->proto_block);
return err;
}
s32
brcmf_fil_iovar_data_get(struct brcmf_if *ifp, char *name, void *data,
u32 len)
{
struct brcmf_pub *drvr = ifp->drvr;
s32 err;
u32 buflen;
mutex_lock(&drvr->proto_block);
buflen = brcmf_create_iovar(name, data, len, drvr->proto_buf,
sizeof(drvr->proto_buf));
if (buflen) {
err = brcmf_fil_cmd_data(ifp, BRCMF_C_GET_VAR, drvr->proto_buf,
buflen, false);
if (err == 0)
memcpy(data, drvr->proto_buf, len);
} else {
err = -EPERM;
brcmf_err("Creating iovar failed\n");
}
brcmf_dbg(FIL, "name=%s, len=%d\n", name, len);
brcmf_dbg_hex_dump(BRCMF_FIL_ON(), data,
min_t(uint, len, MAX_HEX_DUMP_LEN), "data\n");
mutex_unlock(&drvr->proto_block);
return err;
}
s32
brcmf_fil_iovar_int_set(struct brcmf_if *ifp, char *name, u32 data)
{
__le32 data_le = cpu_to_le32(data);
return brcmf_fil_iovar_data_set(ifp, name, &data_le, sizeof(data_le));
}
s32
brcmf_fil_iovar_int_get(struct brcmf_if *ifp, char *name, u32 *data)
{
__le32 data_le = cpu_to_le32(*data);
s32 err;
err = brcmf_fil_iovar_data_get(ifp, name, &data_le, sizeof(data_le));
if (err == 0)
*data = le32_to_cpu(data_le);
return err;
}
static u32
brcmf_create_bsscfg(s32 bssidx, char *name, char *data, u32 datalen, char *buf,
u32 buflen)
{
const s8 *prefix = "bsscfg:";
s8 *p;
u32 prefixlen;
u32 namelen;
u32 iolen;
__le32 bssidx_le;
if (bssidx == 0)
return brcmf_create_iovar(name, data, datalen, buf, buflen);
prefixlen = strlen(prefix);
namelen = strlen(name) + 1; /* lengh of iovar name + null */
iolen = prefixlen + namelen + sizeof(bssidx_le) + datalen;
if (buflen < iolen) {
brcmf_err("buffer is too short\n");
return 0;
}
p = buf;
/* copy prefix, no null */
memcpy(p, prefix, prefixlen);
p += prefixlen;
/* copy iovar name including null */
memcpy(p, name, namelen);
p += namelen;
/* bss config index as first data */
bssidx_le = cpu_to_le32(bssidx);
memcpy(p, &bssidx_le, sizeof(bssidx_le));
p += sizeof(bssidx_le);
/* parameter buffer follows */
if (datalen)
memcpy(p, data, datalen);
return iolen;
}
s32
brcmf_fil_bsscfg_data_set(struct brcmf_if *ifp, char *name,
void *data, u32 len)
{
struct brcmf_pub *drvr = ifp->drvr;
s32 err;
u32 buflen;
mutex_lock(&drvr->proto_block);
brcmf_dbg(FIL, "bssidx=%d, name=%s, len=%d\n", ifp->bssidx, name, len);
brcmf_dbg_hex_dump(BRCMF_FIL_ON(), data,
min_t(uint, len, MAX_HEX_DUMP_LEN), "data\n");
buflen = brcmf_create_bsscfg(ifp->bssidx, name, data, len,
drvr->proto_buf, sizeof(drvr->proto_buf));
if (buflen) {
err = brcmf_fil_cmd_data(ifp, BRCMF_C_SET_VAR, drvr->proto_buf,
buflen, true);
} else {
err = -EPERM;
brcmf_err("Creating bsscfg failed\n");
}
mutex_unlock(&drvr->proto_block);
return err;
}
s32
brcmf_fil_bsscfg_data_get(struct brcmf_if *ifp, char *name,
void *data, u32 len)
{
struct brcmf_pub *drvr = ifp->drvr;
s32 err;
u32 buflen;
mutex_lock(&drvr->proto_block);
buflen = brcmf_create_bsscfg(ifp->bssidx, name, data, len,
drvr->proto_buf, sizeof(drvr->proto_buf));
if (buflen) {
err = brcmf_fil_cmd_data(ifp, BRCMF_C_GET_VAR, drvr->proto_buf,
buflen, false);
if (err == 0)
memcpy(data, drvr->proto_buf, len);
} else {
err = -EPERM;
brcmf_err("Creating bsscfg failed\n");
}
brcmf_dbg(FIL, "bssidx=%d, name=%s, len=%d\n", ifp->bssidx, name, len);
brcmf_dbg_hex_dump(BRCMF_FIL_ON(), data,
min_t(uint, len, MAX_HEX_DUMP_LEN), "data\n");
mutex_unlock(&drvr->proto_block);
return err;
}
s32
brcmf_fil_bsscfg_int_set(struct brcmf_if *ifp, char *name, u32 data)
{
__le32 data_le = cpu_to_le32(data);
return brcmf_fil_bsscfg_data_set(ifp, name, &data_le,
sizeof(data_le));
}
s32
brcmf_fil_bsscfg_int_get(struct brcmf_if *ifp, char *name, u32 *data)
{
__le32 data_le = cpu_to_le32(*data);
s32 err;
err = brcmf_fil_bsscfg_data_get(ifp, name, &data_le,
sizeof(data_le));
if (err == 0)
*data = le32_to_cpu(data_le);
return err;
}
| gpl-2.0 |
DRAGUN-KERNEL/ORIGINAL-AOSP-kernel | block/bsg.c | 1512 | 24300 | /*
* bsg.c - block layer implementation of the sg v4 interface
*
* Copyright (C) 2004 Jens Axboe <axboe@suse.de> SUSE Labs
* Copyright (C) 2004 Peter M. Jones <pjones@redhat.com>
*
* This file is subject to the terms and conditions of the GNU General Public
* License version 2. See the file "COPYING" in the main directory of this
* archive for more details.
*
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/file.h>
#include <linux/blkdev.h>
#include <linux/poll.h>
#include <linux/cdev.h>
#include <linux/jiffies.h>
#include <linux/percpu.h>
#include <linux/uio.h>
#include <linux/idr.h>
#include <linux/bsg.h>
#include <linux/slab.h>
#include <scsi/scsi.h>
#include <scsi/scsi_ioctl.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_driver.h>
#include <scsi/sg.h>
#define BSG_DESCRIPTION "Block layer SCSI generic (bsg) driver"
#define BSG_VERSION "0.4"
struct bsg_device {
struct request_queue *queue;
spinlock_t lock;
struct list_head busy_list;
struct list_head done_list;
struct hlist_node dev_list;
atomic_t ref_count;
int queued_cmds;
int done_cmds;
wait_queue_head_t wq_done;
wait_queue_head_t wq_free;
char name[20];
int max_queue;
unsigned long flags;
};
enum {
BSG_F_BLOCK = 1,
};
#define BSG_DEFAULT_CMDS 64
#define BSG_MAX_DEVS 32768
#undef BSG_DEBUG
#ifdef BSG_DEBUG
#define dprintk(fmt, args...) printk(KERN_ERR "%s: " fmt, __func__, ##args)
#else
#define dprintk(fmt, args...)
#endif
static DEFINE_MUTEX(bsg_mutex);
static DEFINE_IDR(bsg_minor_idr);
#define BSG_LIST_ARRAY_SIZE 8
static struct hlist_head bsg_device_list[BSG_LIST_ARRAY_SIZE];
static struct class *bsg_class;
static int bsg_major;
static struct kmem_cache *bsg_cmd_cachep;
/*
* our internal command type
*/
struct bsg_command {
struct bsg_device *bd;
struct list_head list;
struct request *rq;
struct bio *bio;
struct bio *bidi_bio;
int err;
struct sg_io_v4 hdr;
char sense[SCSI_SENSE_BUFFERSIZE];
};
static void bsg_free_command(struct bsg_command *bc)
{
struct bsg_device *bd = bc->bd;
unsigned long flags;
kmem_cache_free(bsg_cmd_cachep, bc);
spin_lock_irqsave(&bd->lock, flags);
bd->queued_cmds--;
spin_unlock_irqrestore(&bd->lock, flags);
wake_up(&bd->wq_free);
}
static struct bsg_command *bsg_alloc_command(struct bsg_device *bd)
{
struct bsg_command *bc = ERR_PTR(-EINVAL);
spin_lock_irq(&bd->lock);
if (bd->queued_cmds >= bd->max_queue)
goto out;
bd->queued_cmds++;
spin_unlock_irq(&bd->lock);
bc = kmem_cache_zalloc(bsg_cmd_cachep, GFP_KERNEL);
if (unlikely(!bc)) {
spin_lock_irq(&bd->lock);
bd->queued_cmds--;
bc = ERR_PTR(-ENOMEM);
goto out;
}
bc->bd = bd;
INIT_LIST_HEAD(&bc->list);
dprintk("%s: returning free cmd %p\n", bd->name, bc);
return bc;
out:
spin_unlock_irq(&bd->lock);
return bc;
}
static inline struct hlist_head *bsg_dev_idx_hash(int index)
{
return &bsg_device_list[index & (BSG_LIST_ARRAY_SIZE - 1)];
}
static int bsg_io_schedule(struct bsg_device *bd)
{
DEFINE_WAIT(wait);
int ret = 0;
spin_lock_irq(&bd->lock);
BUG_ON(bd->done_cmds > bd->queued_cmds);
/*
* -ENOSPC or -ENODATA? I'm going for -ENODATA, meaning "I have no
* work to do", even though we return -ENOSPC after this same test
* during bsg_write() -- there, it means our buffer can't have more
* bsg_commands added to it, thus has no space left.
*/
if (bd->done_cmds == bd->queued_cmds) {
ret = -ENODATA;
goto unlock;
}
if (!test_bit(BSG_F_BLOCK, &bd->flags)) {
ret = -EAGAIN;
goto unlock;
}
prepare_to_wait(&bd->wq_done, &wait, TASK_UNINTERRUPTIBLE);
spin_unlock_irq(&bd->lock);
io_schedule();
finish_wait(&bd->wq_done, &wait);
return ret;
unlock:
spin_unlock_irq(&bd->lock);
return ret;
}
static int blk_fill_sgv4_hdr_rq(struct request_queue *q, struct request *rq,
struct sg_io_v4 *hdr, struct bsg_device *bd,
fmode_t has_write_perm)
{
if (hdr->request_len > BLK_MAX_CDB) {
rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL);
if (!rq->cmd)
return -ENOMEM;
}
if (copy_from_user(rq->cmd, (void *)(unsigned long)hdr->request,
hdr->request_len))
return -EFAULT;
if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) {
if (blk_verify_command(rq->cmd, has_write_perm))
return -EPERM;
} else if (!capable(CAP_SYS_RAWIO))
return -EPERM;
/*
* fill in request structure
*/
rq->cmd_len = hdr->request_len;
rq->cmd_type = REQ_TYPE_BLOCK_PC;
rq->timeout = msecs_to_jiffies(hdr->timeout);
if (!rq->timeout)
rq->timeout = q->sg_timeout;
if (!rq->timeout)
rq->timeout = BLK_DEFAULT_SG_TIMEOUT;
if (rq->timeout < BLK_MIN_SG_TIMEOUT)
rq->timeout = BLK_MIN_SG_TIMEOUT;
return 0;
}
/*
* Check if sg_io_v4 from user is allowed and valid
*/
static int
bsg_validate_sgv4_hdr(struct request_queue *q, struct sg_io_v4 *hdr, int *rw)
{
int ret = 0;
if (hdr->guard != 'Q')
return -EINVAL;
switch (hdr->protocol) {
case BSG_PROTOCOL_SCSI:
switch (hdr->subprotocol) {
case BSG_SUB_PROTOCOL_SCSI_CMD:
case BSG_SUB_PROTOCOL_SCSI_TRANSPORT:
break;
default:
ret = -EINVAL;
}
break;
default:
ret = -EINVAL;
}
*rw = hdr->dout_xfer_len ? WRITE : READ;
return ret;
}
/*
* map sg_io_v4 to a request.
*/
static struct request *
bsg_map_hdr(struct bsg_device *bd, struct sg_io_v4 *hdr, fmode_t has_write_perm,
u8 *sense)
{
struct request_queue *q = bd->queue;
struct request *rq, *next_rq = NULL;
int ret, rw;
unsigned int dxfer_len;
void *dxferp = NULL;
struct bsg_class_device *bcd = &q->bsg_dev;
/* if the LLD has been removed then the bsg_unregister_queue will
* eventually be called and the class_dev was freed, so we can no
* longer use this request_queue. Return no such address.
*/
if (!bcd->class_dev)
return ERR_PTR(-ENXIO);
dprintk("map hdr %llx/%u %llx/%u\n", (unsigned long long) hdr->dout_xferp,
hdr->dout_xfer_len, (unsigned long long) hdr->din_xferp,
hdr->din_xfer_len);
ret = bsg_validate_sgv4_hdr(q, hdr, &rw);
if (ret)
return ERR_PTR(ret);
/*
* map scatter-gather elements separately and string them to request
*/
rq = blk_get_request(q, rw, GFP_KERNEL);
if (!rq)
return ERR_PTR(-ENOMEM);
ret = blk_fill_sgv4_hdr_rq(q, rq, hdr, bd, has_write_perm);
if (ret)
goto out;
if (rw == WRITE && hdr->din_xfer_len) {
if (!test_bit(QUEUE_FLAG_BIDI, &q->queue_flags)) {
ret = -EOPNOTSUPP;
goto out;
}
next_rq = blk_get_request(q, READ, GFP_KERNEL);
if (!next_rq) {
ret = -ENOMEM;
goto out;
}
rq->next_rq = next_rq;
next_rq->cmd_type = rq->cmd_type;
dxferp = (void*)(unsigned long)hdr->din_xferp;
ret = blk_rq_map_user(q, next_rq, NULL, dxferp,
hdr->din_xfer_len, GFP_KERNEL);
if (ret)
goto out;
}
if (hdr->dout_xfer_len) {
dxfer_len = hdr->dout_xfer_len;
dxferp = (void*)(unsigned long)hdr->dout_xferp;
} else if (hdr->din_xfer_len) {
dxfer_len = hdr->din_xfer_len;
dxferp = (void*)(unsigned long)hdr->din_xferp;
} else
dxfer_len = 0;
if (dxfer_len) {
ret = blk_rq_map_user(q, rq, NULL, dxferp, dxfer_len,
GFP_KERNEL);
if (ret)
goto out;
}
rq->sense = sense;
rq->sense_len = 0;
return rq;
out:
if (rq->cmd != rq->__cmd)
kfree(rq->cmd);
blk_put_request(rq);
if (next_rq) {
blk_rq_unmap_user(next_rq->bio);
blk_put_request(next_rq);
}
return ERR_PTR(ret);
}
/*
* async completion call-back from the block layer, when scsi/ide/whatever
* calls end_that_request_last() on a request
*/
static void bsg_rq_end_io(struct request *rq, int uptodate)
{
struct bsg_command *bc = rq->end_io_data;
struct bsg_device *bd = bc->bd;
unsigned long flags;
dprintk("%s: finished rq %p bc %p, bio %p stat %d\n",
bd->name, rq, bc, bc->bio, uptodate);
bc->hdr.duration = jiffies_to_msecs(jiffies - bc->hdr.duration);
spin_lock_irqsave(&bd->lock, flags);
list_move_tail(&bc->list, &bd->done_list);
bd->done_cmds++;
spin_unlock_irqrestore(&bd->lock, flags);
wake_up(&bd->wq_done);
}
/*
* do final setup of a 'bc' and submit the matching 'rq' to the block
* layer for io
*/
static void bsg_add_command(struct bsg_device *bd, struct request_queue *q,
struct bsg_command *bc, struct request *rq)
{
int at_head = (0 == (bc->hdr.flags & BSG_FLAG_Q_AT_TAIL));
/*
* add bc command to busy queue and submit rq for io
*/
bc->rq = rq;
bc->bio = rq->bio;
if (rq->next_rq)
bc->bidi_bio = rq->next_rq->bio;
bc->hdr.duration = jiffies;
spin_lock_irq(&bd->lock);
list_add_tail(&bc->list, &bd->busy_list);
spin_unlock_irq(&bd->lock);
dprintk("%s: queueing rq %p, bc %p\n", bd->name, rq, bc);
rq->end_io_data = bc;
blk_execute_rq_nowait(q, NULL, rq, at_head, bsg_rq_end_io);
}
static struct bsg_command *bsg_next_done_cmd(struct bsg_device *bd)
{
struct bsg_command *bc = NULL;
spin_lock_irq(&bd->lock);
if (bd->done_cmds) {
bc = list_first_entry(&bd->done_list, struct bsg_command, list);
list_del(&bc->list);
bd->done_cmds--;
}
spin_unlock_irq(&bd->lock);
return bc;
}
/*
* Get a finished command from the done list
*/
static struct bsg_command *bsg_get_done_cmd(struct bsg_device *bd)
{
struct bsg_command *bc;
int ret;
do {
bc = bsg_next_done_cmd(bd);
if (bc)
break;
if (!test_bit(BSG_F_BLOCK, &bd->flags)) {
bc = ERR_PTR(-EAGAIN);
break;
}
ret = wait_event_interruptible(bd->wq_done, bd->done_cmds);
if (ret) {
bc = ERR_PTR(-ERESTARTSYS);
break;
}
} while (1);
dprintk("%s: returning done %p\n", bd->name, bc);
return bc;
}
static int blk_complete_sgv4_hdr_rq(struct request *rq, struct sg_io_v4 *hdr,
struct bio *bio, struct bio *bidi_bio)
{
int ret = 0;
dprintk("rq %p bio %p 0x%x\n", rq, bio, rq->errors);
/*
* fill in all the output members
*/
hdr->device_status = rq->errors & 0xff;
hdr->transport_status = host_byte(rq->errors);
hdr->driver_status = driver_byte(rq->errors);
hdr->info = 0;
if (hdr->device_status || hdr->transport_status || hdr->driver_status)
hdr->info |= SG_INFO_CHECK;
hdr->response_len = 0;
if (rq->sense_len && hdr->response) {
int len = min_t(unsigned int, hdr->max_response_len,
rq->sense_len);
ret = copy_to_user((void*)(unsigned long)hdr->response,
rq->sense, len);
if (!ret)
hdr->response_len = len;
else
ret = -EFAULT;
}
if (rq->next_rq) {
hdr->dout_resid = rq->resid_len;
hdr->din_resid = rq->next_rq->resid_len;
blk_rq_unmap_user(bidi_bio);
blk_put_request(rq->next_rq);
} else if (rq_data_dir(rq) == READ)
hdr->din_resid = rq->resid_len;
else
hdr->dout_resid = rq->resid_len;
/*
* If the request generated a negative error number, return it
* (providing we aren't already returning an error); if it's
* just a protocol response (i.e. non negative), that gets
* processed above.
*/
if (!ret && rq->errors < 0)
ret = rq->errors;
blk_rq_unmap_user(bio);
if (rq->cmd != rq->__cmd)
kfree(rq->cmd);
blk_put_request(rq);
return ret;
}
static int bsg_complete_all_commands(struct bsg_device *bd)
{
struct bsg_command *bc;
int ret, tret;
dprintk("%s: entered\n", bd->name);
/*
* wait for all commands to complete
*/
ret = 0;
do {
ret = bsg_io_schedule(bd);
/*
* look for -ENODATA specifically -- we'll sometimes get
* -ERESTARTSYS when we've taken a signal, but we can't
* return until we're done freeing the queue, so ignore
* it. The signal will get handled when we're done freeing
* the bsg_device.
*/
} while (ret != -ENODATA);
/*
* discard done commands
*/
ret = 0;
do {
spin_lock_irq(&bd->lock);
if (!bd->queued_cmds) {
spin_unlock_irq(&bd->lock);
break;
}
spin_unlock_irq(&bd->lock);
bc = bsg_get_done_cmd(bd);
if (IS_ERR(bc))
break;
tret = blk_complete_sgv4_hdr_rq(bc->rq, &bc->hdr, bc->bio,
bc->bidi_bio);
if (!ret)
ret = tret;
bsg_free_command(bc);
} while (1);
return ret;
}
static int
__bsg_read(char __user *buf, size_t count, struct bsg_device *bd,
const struct iovec *iov, ssize_t *bytes_read)
{
struct bsg_command *bc;
int nr_commands, ret;
if (count % sizeof(struct sg_io_v4))
return -EINVAL;
ret = 0;
nr_commands = count / sizeof(struct sg_io_v4);
while (nr_commands) {
bc = bsg_get_done_cmd(bd);
if (IS_ERR(bc)) {
ret = PTR_ERR(bc);
break;
}
/*
* this is the only case where we need to copy data back
* after completing the request. so do that here,
* bsg_complete_work() cannot do that for us
*/
ret = blk_complete_sgv4_hdr_rq(bc->rq, &bc->hdr, bc->bio,
bc->bidi_bio);
if (copy_to_user(buf, &bc->hdr, sizeof(bc->hdr)))
ret = -EFAULT;
bsg_free_command(bc);
if (ret)
break;
buf += sizeof(struct sg_io_v4);
*bytes_read += sizeof(struct sg_io_v4);
nr_commands--;
}
return ret;
}
static inline void bsg_set_block(struct bsg_device *bd, struct file *file)
{
if (file->f_flags & O_NONBLOCK)
clear_bit(BSG_F_BLOCK, &bd->flags);
else
set_bit(BSG_F_BLOCK, &bd->flags);
}
/*
* Check if the error is a "real" error that we should return.
*/
static inline int err_block_err(int ret)
{
if (ret && ret != -ENOSPC && ret != -ENODATA && ret != -EAGAIN)
return 1;
return 0;
}
static ssize_t
bsg_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
{
struct bsg_device *bd = file->private_data;
int ret;
ssize_t bytes_read;
dprintk("%s: read %Zd bytes\n", bd->name, count);
bsg_set_block(bd, file);
bytes_read = 0;
ret = __bsg_read(buf, count, bd, NULL, &bytes_read);
*ppos = bytes_read;
if (!bytes_read || (bytes_read && err_block_err(ret)))
bytes_read = ret;
return bytes_read;
}
static int __bsg_write(struct bsg_device *bd, const char __user *buf,
size_t count, ssize_t *bytes_written,
fmode_t has_write_perm)
{
struct bsg_command *bc;
struct request *rq;
int ret, nr_commands;
if (count % sizeof(struct sg_io_v4))
return -EINVAL;
nr_commands = count / sizeof(struct sg_io_v4);
rq = NULL;
bc = NULL;
ret = 0;
while (nr_commands) {
struct request_queue *q = bd->queue;
bc = bsg_alloc_command(bd);
if (IS_ERR(bc)) {
ret = PTR_ERR(bc);
bc = NULL;
break;
}
if (copy_from_user(&bc->hdr, buf, sizeof(bc->hdr))) {
ret = -EFAULT;
break;
}
/*
* get a request, fill in the blanks, and add to request queue
*/
rq = bsg_map_hdr(bd, &bc->hdr, has_write_perm, bc->sense);
if (IS_ERR(rq)) {
ret = PTR_ERR(rq);
rq = NULL;
break;
}
bsg_add_command(bd, q, bc, rq);
bc = NULL;
rq = NULL;
nr_commands--;
buf += sizeof(struct sg_io_v4);
*bytes_written += sizeof(struct sg_io_v4);
}
if (bc)
bsg_free_command(bc);
return ret;
}
static ssize_t
bsg_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
{
struct bsg_device *bd = file->private_data;
ssize_t bytes_written;
int ret;
dprintk("%s: write %Zd bytes\n", bd->name, count);
bsg_set_block(bd, file);
bytes_written = 0;
ret = __bsg_write(bd, buf, count, &bytes_written,
file->f_mode & FMODE_WRITE);
*ppos = bytes_written;
/*
* return bytes written on non-fatal errors
*/
if (!bytes_written || (bytes_written && err_block_err(ret)))
bytes_written = ret;
dprintk("%s: returning %Zd\n", bd->name, bytes_written);
return bytes_written;
}
static struct bsg_device *bsg_alloc_device(void)
{
struct bsg_device *bd;
bd = kzalloc(sizeof(struct bsg_device), GFP_KERNEL);
if (unlikely(!bd))
return NULL;
spin_lock_init(&bd->lock);
bd->max_queue = BSG_DEFAULT_CMDS;
INIT_LIST_HEAD(&bd->busy_list);
INIT_LIST_HEAD(&bd->done_list);
INIT_HLIST_NODE(&bd->dev_list);
init_waitqueue_head(&bd->wq_free);
init_waitqueue_head(&bd->wq_done);
return bd;
}
static void bsg_kref_release_function(struct kref *kref)
{
struct bsg_class_device *bcd =
container_of(kref, struct bsg_class_device, ref);
struct device *parent = bcd->parent;
if (bcd->release)
bcd->release(bcd->parent);
put_device(parent);
}
static int bsg_put_device(struct bsg_device *bd)
{
int ret = 0, do_free;
struct request_queue *q = bd->queue;
mutex_lock(&bsg_mutex);
do_free = atomic_dec_and_test(&bd->ref_count);
if (!do_free) {
mutex_unlock(&bsg_mutex);
goto out;
}
hlist_del(&bd->dev_list);
mutex_unlock(&bsg_mutex);
dprintk("%s: tearing down\n", bd->name);
/*
* close can always block
*/
set_bit(BSG_F_BLOCK, &bd->flags);
/*
* correct error detection baddies here again. it's the responsibility
* of the app to properly reap commands before close() if it wants
* fool-proof error detection
*/
ret = bsg_complete_all_commands(bd);
kfree(bd);
out:
kref_put(&q->bsg_dev.ref, bsg_kref_release_function);
if (do_free)
blk_put_queue(q);
return ret;
}
static struct bsg_device *bsg_add_device(struct inode *inode,
struct request_queue *rq,
struct file *file)
{
struct bsg_device *bd;
int ret;
#ifdef BSG_DEBUG
unsigned char buf[32];
#endif
ret = blk_get_queue(rq);
if (ret)
return ERR_PTR(-ENXIO);
bd = bsg_alloc_device();
if (!bd) {
blk_put_queue(rq);
return ERR_PTR(-ENOMEM);
}
bd->queue = rq;
bsg_set_block(bd, file);
atomic_set(&bd->ref_count, 1);
mutex_lock(&bsg_mutex);
hlist_add_head(&bd->dev_list, bsg_dev_idx_hash(iminor(inode)));
strncpy(bd->name, dev_name(rq->bsg_dev.class_dev), sizeof(bd->name) - 1);
dprintk("bound to <%s>, max queue %d\n",
format_dev_t(buf, inode->i_rdev), bd->max_queue);
mutex_unlock(&bsg_mutex);
return bd;
}
static struct bsg_device *__bsg_get_device(int minor, struct request_queue *q)
{
struct bsg_device *bd;
struct hlist_node *entry;
mutex_lock(&bsg_mutex);
hlist_for_each_entry(bd, entry, bsg_dev_idx_hash(minor), dev_list) {
if (bd->queue == q) {
atomic_inc(&bd->ref_count);
goto found;
}
}
bd = NULL;
found:
mutex_unlock(&bsg_mutex);
return bd;
}
static struct bsg_device *bsg_get_device(struct inode *inode, struct file *file)
{
struct bsg_device *bd;
struct bsg_class_device *bcd;
/*
* find the class device
*/
mutex_lock(&bsg_mutex);
bcd = idr_find(&bsg_minor_idr, iminor(inode));
if (bcd)
kref_get(&bcd->ref);
mutex_unlock(&bsg_mutex);
if (!bcd)
return ERR_PTR(-ENODEV);
bd = __bsg_get_device(iminor(inode), bcd->queue);
if (bd)
return bd;
bd = bsg_add_device(inode, bcd->queue, file);
if (IS_ERR(bd))
kref_put(&bcd->ref, bsg_kref_release_function);
return bd;
}
static int bsg_open(struct inode *inode, struct file *file)
{
struct bsg_device *bd;
bd = bsg_get_device(inode, file);
if (IS_ERR(bd))
return PTR_ERR(bd);
file->private_data = bd;
return 0;
}
static int bsg_release(struct inode *inode, struct file *file)
{
struct bsg_device *bd = file->private_data;
file->private_data = NULL;
return bsg_put_device(bd);
}
static unsigned int bsg_poll(struct file *file, poll_table *wait)
{
struct bsg_device *bd = file->private_data;
unsigned int mask = 0;
poll_wait(file, &bd->wq_done, wait);
poll_wait(file, &bd->wq_free, wait);
spin_lock_irq(&bd->lock);
if (!list_empty(&bd->done_list))
mask |= POLLIN | POLLRDNORM;
if (bd->queued_cmds >= bd->max_queue)
mask |= POLLOUT;
spin_unlock_irq(&bd->lock);
return mask;
}
static long bsg_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
struct bsg_device *bd = file->private_data;
int __user *uarg = (int __user *) arg;
int ret;
switch (cmd) {
/*
* our own ioctls
*/
case SG_GET_COMMAND_Q:
return put_user(bd->max_queue, uarg);
case SG_SET_COMMAND_Q: {
int queue;
if (get_user(queue, uarg))
return -EFAULT;
if (queue < 1)
return -EINVAL;
spin_lock_irq(&bd->lock);
bd->max_queue = queue;
spin_unlock_irq(&bd->lock);
return 0;
}
/*
* SCSI/sg ioctls
*/
case SG_GET_VERSION_NUM:
case SCSI_IOCTL_GET_IDLUN:
case SCSI_IOCTL_GET_BUS_NUMBER:
case SG_SET_TIMEOUT:
case SG_GET_TIMEOUT:
case SG_GET_RESERVED_SIZE:
case SG_SET_RESERVED_SIZE:
case SG_EMULATED_HOST:
case SCSI_IOCTL_SEND_COMMAND: {
void __user *uarg = (void __user *) arg;
return scsi_cmd_ioctl(bd->queue, NULL, file->f_mode, cmd, uarg);
}
case SG_IO: {
struct request *rq;
struct bio *bio, *bidi_bio = NULL;
struct sg_io_v4 hdr;
int at_head;
u8 sense[SCSI_SENSE_BUFFERSIZE];
if (copy_from_user(&hdr, uarg, sizeof(hdr)))
return -EFAULT;
rq = bsg_map_hdr(bd, &hdr, file->f_mode & FMODE_WRITE, sense);
if (IS_ERR(rq))
return PTR_ERR(rq);
bio = rq->bio;
if (rq->next_rq)
bidi_bio = rq->next_rq->bio;
at_head = (0 == (hdr.flags & BSG_FLAG_Q_AT_TAIL));
blk_execute_rq(bd->queue, NULL, rq, at_head);
ret = blk_complete_sgv4_hdr_rq(rq, &hdr, bio, bidi_bio);
if (copy_to_user(uarg, &hdr, sizeof(hdr)))
return -EFAULT;
return ret;
}
/*
* block device ioctls
*/
default:
#if 0
return ioctl_by_bdev(bd->bdev, cmd, arg);
#else
return -ENOTTY;
#endif
}
}
static const struct file_operations bsg_fops = {
.read = bsg_read,
.write = bsg_write,
.poll = bsg_poll,
.open = bsg_open,
.release = bsg_release,
.unlocked_ioctl = bsg_ioctl,
.owner = THIS_MODULE,
.llseek = default_llseek,
};
void bsg_unregister_queue(struct request_queue *q)
{
struct bsg_class_device *bcd = &q->bsg_dev;
if (!bcd->class_dev)
return;
mutex_lock(&bsg_mutex);
idr_remove(&bsg_minor_idr, bcd->minor);
if (q->kobj.sd)
sysfs_remove_link(&q->kobj, "bsg");
device_unregister(bcd->class_dev);
bcd->class_dev = NULL;
kref_put(&bcd->ref, bsg_kref_release_function);
mutex_unlock(&bsg_mutex);
}
EXPORT_SYMBOL_GPL(bsg_unregister_queue);
int bsg_register_queue(struct request_queue *q, struct device *parent,
const char *name, void (*release)(struct device *))
{
struct bsg_class_device *bcd;
dev_t dev;
int ret, minor;
struct device *class_dev = NULL;
const char *devname;
if (name)
devname = name;
else
devname = dev_name(parent);
/*
* we need a proper transport to send commands, not a stacked device
*/
if (!q->request_fn)
return 0;
bcd = &q->bsg_dev;
memset(bcd, 0, sizeof(*bcd));
mutex_lock(&bsg_mutex);
ret = idr_pre_get(&bsg_minor_idr, GFP_KERNEL);
if (!ret) {
ret = -ENOMEM;
goto unlock;
}
ret = idr_get_new(&bsg_minor_idr, bcd, &minor);
if (ret < 0)
goto unlock;
if (minor >= BSG_MAX_DEVS) {
printk(KERN_ERR "bsg: too many bsg devices\n");
ret = -EINVAL;
goto remove_idr;
}
bcd->minor = minor;
bcd->queue = q;
bcd->parent = get_device(parent);
bcd->release = release;
kref_init(&bcd->ref);
dev = MKDEV(bsg_major, bcd->minor);
class_dev = device_create(bsg_class, parent, dev, NULL, "%s", devname);
if (IS_ERR(class_dev)) {
ret = PTR_ERR(class_dev);
goto put_dev;
}
bcd->class_dev = class_dev;
if (q->kobj.sd) {
ret = sysfs_create_link(&q->kobj, &bcd->class_dev->kobj, "bsg");
if (ret)
goto unregister_class_dev;
}
mutex_unlock(&bsg_mutex);
return 0;
unregister_class_dev:
device_unregister(class_dev);
put_dev:
put_device(parent);
remove_idr:
idr_remove(&bsg_minor_idr, minor);
unlock:
mutex_unlock(&bsg_mutex);
return ret;
}
EXPORT_SYMBOL_GPL(bsg_register_queue);
static struct cdev bsg_cdev;
static char *bsg_devnode(struct device *dev, mode_t *mode)
{
return kasprintf(GFP_KERNEL, "bsg/%s", dev_name(dev));
}
static int __init bsg_init(void)
{
int ret, i;
dev_t devid;
bsg_cmd_cachep = kmem_cache_create("bsg_cmd",
sizeof(struct bsg_command), 0, 0, NULL);
if (!bsg_cmd_cachep) {
printk(KERN_ERR "bsg: failed creating slab cache\n");
return -ENOMEM;
}
for (i = 0; i < BSG_LIST_ARRAY_SIZE; i++)
INIT_HLIST_HEAD(&bsg_device_list[i]);
bsg_class = class_create(THIS_MODULE, "bsg");
if (IS_ERR(bsg_class)) {
ret = PTR_ERR(bsg_class);
goto destroy_kmemcache;
}
bsg_class->devnode = bsg_devnode;
ret = alloc_chrdev_region(&devid, 0, BSG_MAX_DEVS, "bsg");
if (ret)
goto destroy_bsg_class;
bsg_major = MAJOR(devid);
cdev_init(&bsg_cdev, &bsg_fops);
ret = cdev_add(&bsg_cdev, MKDEV(bsg_major, 0), BSG_MAX_DEVS);
if (ret)
goto unregister_chrdev;
printk(KERN_INFO BSG_DESCRIPTION " version " BSG_VERSION
" loaded (major %d)\n", bsg_major);
return 0;
unregister_chrdev:
unregister_chrdev_region(MKDEV(bsg_major, 0), BSG_MAX_DEVS);
destroy_bsg_class:
class_destroy(bsg_class);
destroy_kmemcache:
kmem_cache_destroy(bsg_cmd_cachep);
return ret;
}
MODULE_AUTHOR("Jens Axboe");
MODULE_DESCRIPTION(BSG_DESCRIPTION);
MODULE_LICENSE("GPL");
device_initcall(bsg_init);
| gpl-2.0 |
shskyinfo/SKernel_Yu | drivers/crypto/ixp4xx_crypto.c | 2024 | 37656 | /*
* Intel IXP4xx NPE-C crypto driver
*
* Copyright (C) 2008 Christian Hohnstaedt <chohnstaedt@innominate.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License
* as published by the Free Software Foundation.
*
*/
#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
#include <linux/dmapool.h>
#include <linux/crypto.h>
#include <linux/kernel.h>
#include <linux/rtnetlink.h>
#include <linux/interrupt.h>
#include <linux/spinlock.h>
#include <linux/gfp.h>
#include <linux/module.h>
#include <crypto/ctr.h>
#include <crypto/des.h>
#include <crypto/aes.h>
#include <crypto/sha.h>
#include <crypto/algapi.h>
#include <crypto/aead.h>
#include <crypto/authenc.h>
#include <crypto/scatterwalk.h>
#include <mach/npe.h>
#include <mach/qmgr.h>
#define MAX_KEYLEN 32
/* hash: cfgword + 2 * digestlen; crypt: keylen + cfgword */
#define NPE_CTX_LEN 80
#define AES_BLOCK128 16
#define NPE_OP_HASH_VERIFY 0x01
#define NPE_OP_CCM_ENABLE 0x04
#define NPE_OP_CRYPT_ENABLE 0x08
#define NPE_OP_HASH_ENABLE 0x10
#define NPE_OP_NOT_IN_PLACE 0x20
#define NPE_OP_HMAC_DISABLE 0x40
#define NPE_OP_CRYPT_ENCRYPT 0x80
#define NPE_OP_CCM_GEN_MIC 0xcc
#define NPE_OP_HASH_GEN_ICV 0x50
#define NPE_OP_ENC_GEN_KEY 0xc9
#define MOD_ECB 0x0000
#define MOD_CTR 0x1000
#define MOD_CBC_ENC 0x2000
#define MOD_CBC_DEC 0x3000
#define MOD_CCM_ENC 0x4000
#define MOD_CCM_DEC 0x5000
#define KEYLEN_128 4
#define KEYLEN_192 6
#define KEYLEN_256 8
#define CIPH_DECR 0x0000
#define CIPH_ENCR 0x0400
#define MOD_DES 0x0000
#define MOD_TDEA2 0x0100
#define MOD_3DES 0x0200
#define MOD_AES 0x0800
#define MOD_AES128 (0x0800 | KEYLEN_128)
#define MOD_AES192 (0x0900 | KEYLEN_192)
#define MOD_AES256 (0x0a00 | KEYLEN_256)
#define MAX_IVLEN 16
#define NPE_ID 2 /* NPE C */
#define NPE_QLEN 16
/* Space for registering when the first
* NPE_QLEN crypt_ctl are busy */
#define NPE_QLEN_TOTAL 64
#define SEND_QID 29
#define RECV_QID 30
#define CTL_FLAG_UNUSED 0x0000
#define CTL_FLAG_USED 0x1000
#define CTL_FLAG_PERFORM_ABLK 0x0001
#define CTL_FLAG_GEN_ICV 0x0002
#define CTL_FLAG_GEN_REVAES 0x0004
#define CTL_FLAG_PERFORM_AEAD 0x0008
#define CTL_FLAG_MASK 0x000f
#define HMAC_IPAD_VALUE 0x36
#define HMAC_OPAD_VALUE 0x5C
#define HMAC_PAD_BLOCKLEN SHA1_BLOCK_SIZE
#define MD5_DIGEST_SIZE 16
struct buffer_desc {
u32 phys_next;
#ifdef __ARMEB__
u16 buf_len;
u16 pkt_len;
#else
u16 pkt_len;
u16 buf_len;
#endif
u32 phys_addr;
u32 __reserved[4];
struct buffer_desc *next;
enum dma_data_direction dir;
};
struct crypt_ctl {
#ifdef __ARMEB__
u8 mode; /* NPE_OP_* operation mode */
u8 init_len;
u16 reserved;
#else
u16 reserved;
u8 init_len;
u8 mode; /* NPE_OP_* operation mode */
#endif
u8 iv[MAX_IVLEN]; /* IV for CBC mode or CTR IV for CTR mode */
u32 icv_rev_aes; /* icv or rev aes */
u32 src_buf;
u32 dst_buf;
#ifdef __ARMEB__
u16 auth_offs; /* Authentication start offset */
u16 auth_len; /* Authentication data length */
u16 crypt_offs; /* Cryption start offset */
u16 crypt_len; /* Cryption data length */
#else
u16 auth_len; /* Authentication data length */
u16 auth_offs; /* Authentication start offset */
u16 crypt_len; /* Cryption data length */
u16 crypt_offs; /* Cryption start offset */
#endif
u32 aadAddr; /* Additional Auth Data Addr for CCM mode */
u32 crypto_ctx; /* NPE Crypto Param structure address */
/* Used by Host: 4*4 bytes*/
unsigned ctl_flags;
union {
struct ablkcipher_request *ablk_req;
struct aead_request *aead_req;
struct crypto_tfm *tfm;
} data;
struct buffer_desc *regist_buf;
u8 *regist_ptr;
};
struct ablk_ctx {
struct buffer_desc *src;
struct buffer_desc *dst;
};
struct aead_ctx {
struct buffer_desc *buffer;
struct scatterlist ivlist;
/* used when the hmac is not on one sg entry */
u8 *hmac_virt;
int encrypt;
};
struct ix_hash_algo {
u32 cfgword;
unsigned char *icv;
};
struct ix_sa_dir {
unsigned char *npe_ctx;
dma_addr_t npe_ctx_phys;
int npe_ctx_idx;
u8 npe_mode;
};
struct ixp_ctx {
struct ix_sa_dir encrypt;
struct ix_sa_dir decrypt;
int authkey_len;
u8 authkey[MAX_KEYLEN];
int enckey_len;
u8 enckey[MAX_KEYLEN];
u8 salt[MAX_IVLEN];
u8 nonce[CTR_RFC3686_NONCE_SIZE];
unsigned salted;
atomic_t configuring;
struct completion completion;
};
struct ixp_alg {
struct crypto_alg crypto;
const struct ix_hash_algo *hash;
u32 cfg_enc;
u32 cfg_dec;
int registered;
};
static const struct ix_hash_algo hash_alg_md5 = {
.cfgword = 0xAA010004,
.icv = "\x01\x23\x45\x67\x89\xAB\xCD\xEF"
"\xFE\xDC\xBA\x98\x76\x54\x32\x10",
};
static const struct ix_hash_algo hash_alg_sha1 = {
.cfgword = 0x00000005,
.icv = "\x67\x45\x23\x01\xEF\xCD\xAB\x89\x98\xBA"
"\xDC\xFE\x10\x32\x54\x76\xC3\xD2\xE1\xF0",
};
static struct npe *npe_c;
static struct dma_pool *buffer_pool = NULL;
static struct dma_pool *ctx_pool = NULL;
static struct crypt_ctl *crypt_virt = NULL;
static dma_addr_t crypt_phys;
static int support_aes = 1;
static void dev_release(struct device *dev)
{
return;
}
#define DRIVER_NAME "ixp4xx_crypto"
static struct platform_device pseudo_dev = {
.name = DRIVER_NAME,
.id = 0,
.num_resources = 0,
.dev = {
.coherent_dma_mask = DMA_BIT_MASK(32),
.release = dev_release,
}
};
static struct device *dev = &pseudo_dev.dev;
static inline dma_addr_t crypt_virt2phys(struct crypt_ctl *virt)
{
return crypt_phys + (virt - crypt_virt) * sizeof(struct crypt_ctl);
}
static inline struct crypt_ctl *crypt_phys2virt(dma_addr_t phys)
{
return crypt_virt + (phys - crypt_phys) / sizeof(struct crypt_ctl);
}
static inline u32 cipher_cfg_enc(struct crypto_tfm *tfm)
{
return container_of(tfm->__crt_alg, struct ixp_alg,crypto)->cfg_enc;
}
static inline u32 cipher_cfg_dec(struct crypto_tfm *tfm)
{
return container_of(tfm->__crt_alg, struct ixp_alg,crypto)->cfg_dec;
}
static inline const struct ix_hash_algo *ix_hash(struct crypto_tfm *tfm)
{
return container_of(tfm->__crt_alg, struct ixp_alg, crypto)->hash;
}
static int setup_crypt_desc(void)
{
BUILD_BUG_ON(sizeof(struct crypt_ctl) != 64);
crypt_virt = dma_alloc_coherent(dev,
NPE_QLEN * sizeof(struct crypt_ctl),
&crypt_phys, GFP_ATOMIC);
if (!crypt_virt)
return -ENOMEM;
memset(crypt_virt, 0, NPE_QLEN * sizeof(struct crypt_ctl));
return 0;
}
static spinlock_t desc_lock;
static struct crypt_ctl *get_crypt_desc(void)
{
int i;
static int idx = 0;
unsigned long flags;
spin_lock_irqsave(&desc_lock, flags);
if (unlikely(!crypt_virt))
setup_crypt_desc();
if (unlikely(!crypt_virt)) {
spin_unlock_irqrestore(&desc_lock, flags);
return NULL;
}
i = idx;
if (crypt_virt[i].ctl_flags == CTL_FLAG_UNUSED) {
if (++idx >= NPE_QLEN)
idx = 0;
crypt_virt[i].ctl_flags = CTL_FLAG_USED;
spin_unlock_irqrestore(&desc_lock, flags);
return crypt_virt +i;
} else {
spin_unlock_irqrestore(&desc_lock, flags);
return NULL;
}
}
static spinlock_t emerg_lock;
static struct crypt_ctl *get_crypt_desc_emerg(void)
{
int i;
static int idx = NPE_QLEN;
struct crypt_ctl *desc;
unsigned long flags;
desc = get_crypt_desc();
if (desc)
return desc;
if (unlikely(!crypt_virt))
return NULL;
spin_lock_irqsave(&emerg_lock, flags);
i = idx;
if (crypt_virt[i].ctl_flags == CTL_FLAG_UNUSED) {
if (++idx >= NPE_QLEN_TOTAL)
idx = NPE_QLEN;
crypt_virt[i].ctl_flags = CTL_FLAG_USED;
spin_unlock_irqrestore(&emerg_lock, flags);
return crypt_virt +i;
} else {
spin_unlock_irqrestore(&emerg_lock, flags);
return NULL;
}
}
static void free_buf_chain(struct device *dev, struct buffer_desc *buf,u32 phys)
{
while (buf) {
struct buffer_desc *buf1;
u32 phys1;
buf1 = buf->next;
phys1 = buf->phys_next;
dma_unmap_single(dev, buf->phys_next, buf->buf_len, buf->dir);
dma_pool_free(buffer_pool, buf, phys);
buf = buf1;
phys = phys1;
}
}
static struct tasklet_struct crypto_done_tasklet;
static void finish_scattered_hmac(struct crypt_ctl *crypt)
{
struct aead_request *req = crypt->data.aead_req;
struct aead_ctx *req_ctx = aead_request_ctx(req);
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
int authsize = crypto_aead_authsize(tfm);
int decryptlen = req->cryptlen - authsize;
if (req_ctx->encrypt) {
scatterwalk_map_and_copy(req_ctx->hmac_virt,
req->src, decryptlen, authsize, 1);
}
dma_pool_free(buffer_pool, req_ctx->hmac_virt, crypt->icv_rev_aes);
}
static void one_packet(dma_addr_t phys)
{
struct crypt_ctl *crypt;
struct ixp_ctx *ctx;
int failed;
failed = phys & 0x1 ? -EBADMSG : 0;
phys &= ~0x3;
crypt = crypt_phys2virt(phys);
switch (crypt->ctl_flags & CTL_FLAG_MASK) {
case CTL_FLAG_PERFORM_AEAD: {
struct aead_request *req = crypt->data.aead_req;
struct aead_ctx *req_ctx = aead_request_ctx(req);
free_buf_chain(dev, req_ctx->buffer, crypt->src_buf);
if (req_ctx->hmac_virt) {
finish_scattered_hmac(crypt);
}
req->base.complete(&req->base, failed);
break;
}
case CTL_FLAG_PERFORM_ABLK: {
struct ablkcipher_request *req = crypt->data.ablk_req;
struct ablk_ctx *req_ctx = ablkcipher_request_ctx(req);
if (req_ctx->dst) {
free_buf_chain(dev, req_ctx->dst, crypt->dst_buf);
}
free_buf_chain(dev, req_ctx->src, crypt->src_buf);
req->base.complete(&req->base, failed);
break;
}
case CTL_FLAG_GEN_ICV:
ctx = crypto_tfm_ctx(crypt->data.tfm);
dma_pool_free(ctx_pool, crypt->regist_ptr,
crypt->regist_buf->phys_addr);
dma_pool_free(buffer_pool, crypt->regist_buf, crypt->src_buf);
if (atomic_dec_and_test(&ctx->configuring))
complete(&ctx->completion);
break;
case CTL_FLAG_GEN_REVAES:
ctx = crypto_tfm_ctx(crypt->data.tfm);
*(u32*)ctx->decrypt.npe_ctx &= cpu_to_be32(~CIPH_ENCR);
if (atomic_dec_and_test(&ctx->configuring))
complete(&ctx->completion);
break;
default:
BUG();
}
crypt->ctl_flags = CTL_FLAG_UNUSED;
}
static void irqhandler(void *_unused)
{
tasklet_schedule(&crypto_done_tasklet);
}
static void crypto_done_action(unsigned long arg)
{
int i;
for(i=0; i<4; i++) {
dma_addr_t phys = qmgr_get_entry(RECV_QID);
if (!phys)
return;
one_packet(phys);
}
tasklet_schedule(&crypto_done_tasklet);
}
static int init_ixp_crypto(void)
{
int ret = -ENODEV;
u32 msg[2] = { 0, 0 };
if (! ( ~(*IXP4XX_EXP_CFG2) & (IXP4XX_FEATURE_HASH |
IXP4XX_FEATURE_AES | IXP4XX_FEATURE_DES))) {
printk(KERN_ERR "ixp_crypto: No HW crypto available\n");
return ret;
}
npe_c = npe_request(NPE_ID);
if (!npe_c)
return ret;
if (!npe_running(npe_c)) {
ret = npe_load_firmware(npe_c, npe_name(npe_c), dev);
if (ret) {
return ret;
}
if (npe_recv_message(npe_c, msg, "STATUS_MSG"))
goto npe_error;
} else {
if (npe_send_message(npe_c, msg, "STATUS_MSG"))
goto npe_error;
if (npe_recv_message(npe_c, msg, "STATUS_MSG"))
goto npe_error;
}
switch ((msg[1]>>16) & 0xff) {
case 3:
printk(KERN_WARNING "Firmware of %s lacks AES support\n",
npe_name(npe_c));
support_aes = 0;
break;
case 4:
case 5:
support_aes = 1;
break;
default:
printk(KERN_ERR "Firmware of %s lacks crypto support\n",
npe_name(npe_c));
return -ENODEV;
}
/* buffer_pool will also be used to sometimes store the hmac,
* so assure it is large enough
*/
BUILD_BUG_ON(SHA1_DIGEST_SIZE > sizeof(struct buffer_desc));
buffer_pool = dma_pool_create("buffer", dev,
sizeof(struct buffer_desc), 32, 0);
ret = -ENOMEM;
if (!buffer_pool) {
goto err;
}
ctx_pool = dma_pool_create("context", dev,
NPE_CTX_LEN, 16, 0);
if (!ctx_pool) {
goto err;
}
ret = qmgr_request_queue(SEND_QID, NPE_QLEN_TOTAL, 0, 0,
"ixp_crypto:out", NULL);
if (ret)
goto err;
ret = qmgr_request_queue(RECV_QID, NPE_QLEN, 0, 0,
"ixp_crypto:in", NULL);
if (ret) {
qmgr_release_queue(SEND_QID);
goto err;
}
qmgr_set_irq(RECV_QID, QUEUE_IRQ_SRC_NOT_EMPTY, irqhandler, NULL);
tasklet_init(&crypto_done_tasklet, crypto_done_action, 0);
qmgr_enable_irq(RECV_QID);
return 0;
npe_error:
printk(KERN_ERR "%s not responding\n", npe_name(npe_c));
ret = -EIO;
err:
if (ctx_pool)
dma_pool_destroy(ctx_pool);
if (buffer_pool)
dma_pool_destroy(buffer_pool);
npe_release(npe_c);
return ret;
}
static void release_ixp_crypto(void)
{
qmgr_disable_irq(RECV_QID);
tasklet_kill(&crypto_done_tasklet);
qmgr_release_queue(SEND_QID);
qmgr_release_queue(RECV_QID);
dma_pool_destroy(ctx_pool);
dma_pool_destroy(buffer_pool);
npe_release(npe_c);
if (crypt_virt) {
dma_free_coherent(dev,
NPE_QLEN_TOTAL * sizeof( struct crypt_ctl),
crypt_virt, crypt_phys);
}
return;
}
static void reset_sa_dir(struct ix_sa_dir *dir)
{
memset(dir->npe_ctx, 0, NPE_CTX_LEN);
dir->npe_ctx_idx = 0;
dir->npe_mode = 0;
}
static int init_sa_dir(struct ix_sa_dir *dir)
{
dir->npe_ctx = dma_pool_alloc(ctx_pool, GFP_KERNEL, &dir->npe_ctx_phys);
if (!dir->npe_ctx) {
return -ENOMEM;
}
reset_sa_dir(dir);
return 0;
}
static void free_sa_dir(struct ix_sa_dir *dir)
{
memset(dir->npe_ctx, 0, NPE_CTX_LEN);
dma_pool_free(ctx_pool, dir->npe_ctx, dir->npe_ctx_phys);
}
static int init_tfm(struct crypto_tfm *tfm)
{
struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
int ret;
atomic_set(&ctx->configuring, 0);
ret = init_sa_dir(&ctx->encrypt);
if (ret)
return ret;
ret = init_sa_dir(&ctx->decrypt);
if (ret) {
free_sa_dir(&ctx->encrypt);
}
return ret;
}
static int init_tfm_ablk(struct crypto_tfm *tfm)
{
tfm->crt_ablkcipher.reqsize = sizeof(struct ablk_ctx);
return init_tfm(tfm);
}
static int init_tfm_aead(struct crypto_tfm *tfm)
{
tfm->crt_aead.reqsize = sizeof(struct aead_ctx);
return init_tfm(tfm);
}
static void exit_tfm(struct crypto_tfm *tfm)
{
struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
free_sa_dir(&ctx->encrypt);
free_sa_dir(&ctx->decrypt);
}
static int register_chain_var(struct crypto_tfm *tfm, u8 xpad, u32 target,
int init_len, u32 ctx_addr, const u8 *key, int key_len)
{
struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
struct crypt_ctl *crypt;
struct buffer_desc *buf;
int i;
u8 *pad;
u32 pad_phys, buf_phys;
BUILD_BUG_ON(NPE_CTX_LEN < HMAC_PAD_BLOCKLEN);
pad = dma_pool_alloc(ctx_pool, GFP_KERNEL, &pad_phys);
if (!pad)
return -ENOMEM;
buf = dma_pool_alloc(buffer_pool, GFP_KERNEL, &buf_phys);
if (!buf) {
dma_pool_free(ctx_pool, pad, pad_phys);
return -ENOMEM;
}
crypt = get_crypt_desc_emerg();
if (!crypt) {
dma_pool_free(ctx_pool, pad, pad_phys);
dma_pool_free(buffer_pool, buf, buf_phys);
return -EAGAIN;
}
memcpy(pad, key, key_len);
memset(pad + key_len, 0, HMAC_PAD_BLOCKLEN - key_len);
for (i = 0; i < HMAC_PAD_BLOCKLEN; i++) {
pad[i] ^= xpad;
}
crypt->data.tfm = tfm;
crypt->regist_ptr = pad;
crypt->regist_buf = buf;
crypt->auth_offs = 0;
crypt->auth_len = HMAC_PAD_BLOCKLEN;
crypt->crypto_ctx = ctx_addr;
crypt->src_buf = buf_phys;
crypt->icv_rev_aes = target;
crypt->mode = NPE_OP_HASH_GEN_ICV;
crypt->init_len = init_len;
crypt->ctl_flags |= CTL_FLAG_GEN_ICV;
buf->next = 0;
buf->buf_len = HMAC_PAD_BLOCKLEN;
buf->pkt_len = 0;
buf->phys_addr = pad_phys;
atomic_inc(&ctx->configuring);
qmgr_put_entry(SEND_QID, crypt_virt2phys(crypt));
BUG_ON(qmgr_stat_overflow(SEND_QID));
return 0;
}
static int setup_auth(struct crypto_tfm *tfm, int encrypt, unsigned authsize,
const u8 *key, int key_len, unsigned digest_len)
{
u32 itarget, otarget, npe_ctx_addr;
unsigned char *cinfo;
int init_len, ret = 0;
u32 cfgword;
struct ix_sa_dir *dir;
struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
const struct ix_hash_algo *algo;
dir = encrypt ? &ctx->encrypt : &ctx->decrypt;
cinfo = dir->npe_ctx + dir->npe_ctx_idx;
algo = ix_hash(tfm);
/* write cfg word to cryptinfo */
cfgword = algo->cfgword | ( authsize << 6); /* (authsize/4) << 8 */
#ifndef __ARMEB__
cfgword ^= 0xAA000000; /* change the "byte swap" flags */
#endif
*(u32*)cinfo = cpu_to_be32(cfgword);
cinfo += sizeof(cfgword);
/* write ICV to cryptinfo */
memcpy(cinfo, algo->icv, digest_len);
cinfo += digest_len;
itarget = dir->npe_ctx_phys + dir->npe_ctx_idx
+ sizeof(algo->cfgword);
otarget = itarget + digest_len;
init_len = cinfo - (dir->npe_ctx + dir->npe_ctx_idx);
npe_ctx_addr = dir->npe_ctx_phys + dir->npe_ctx_idx;
dir->npe_ctx_idx += init_len;
dir->npe_mode |= NPE_OP_HASH_ENABLE;
if (!encrypt)
dir->npe_mode |= NPE_OP_HASH_VERIFY;
ret = register_chain_var(tfm, HMAC_OPAD_VALUE, otarget,
init_len, npe_ctx_addr, key, key_len);
if (ret)
return ret;
return register_chain_var(tfm, HMAC_IPAD_VALUE, itarget,
init_len, npe_ctx_addr, key, key_len);
}
static int gen_rev_aes_key(struct crypto_tfm *tfm)
{
struct crypt_ctl *crypt;
struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
struct ix_sa_dir *dir = &ctx->decrypt;
crypt = get_crypt_desc_emerg();
if (!crypt) {
return -EAGAIN;
}
*(u32*)dir->npe_ctx |= cpu_to_be32(CIPH_ENCR);
crypt->data.tfm = tfm;
crypt->crypt_offs = 0;
crypt->crypt_len = AES_BLOCK128;
crypt->src_buf = 0;
crypt->crypto_ctx = dir->npe_ctx_phys;
crypt->icv_rev_aes = dir->npe_ctx_phys + sizeof(u32);
crypt->mode = NPE_OP_ENC_GEN_KEY;
crypt->init_len = dir->npe_ctx_idx;
crypt->ctl_flags |= CTL_FLAG_GEN_REVAES;
atomic_inc(&ctx->configuring);
qmgr_put_entry(SEND_QID, crypt_virt2phys(crypt));
BUG_ON(qmgr_stat_overflow(SEND_QID));
return 0;
}
static int setup_cipher(struct crypto_tfm *tfm, int encrypt,
const u8 *key, int key_len)
{
u8 *cinfo;
u32 cipher_cfg;
u32 keylen_cfg = 0;
struct ix_sa_dir *dir;
struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
u32 *flags = &tfm->crt_flags;
dir = encrypt ? &ctx->encrypt : &ctx->decrypt;
cinfo = dir->npe_ctx;
if (encrypt) {
cipher_cfg = cipher_cfg_enc(tfm);
dir->npe_mode |= NPE_OP_CRYPT_ENCRYPT;
} else {
cipher_cfg = cipher_cfg_dec(tfm);
}
if (cipher_cfg & MOD_AES) {
switch (key_len) {
case 16: keylen_cfg = MOD_AES128; break;
case 24: keylen_cfg = MOD_AES192; break;
case 32: keylen_cfg = MOD_AES256; break;
default:
*flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
return -EINVAL;
}
cipher_cfg |= keylen_cfg;
} else if (cipher_cfg & MOD_3DES) {
const u32 *K = (const u32 *)key;
if (unlikely(!((K[0] ^ K[2]) | (K[1] ^ K[3])) ||
!((K[2] ^ K[4]) | (K[3] ^ K[5]))))
{
*flags |= CRYPTO_TFM_RES_BAD_KEY_SCHED;
return -EINVAL;
}
} else {
u32 tmp[DES_EXPKEY_WORDS];
if (des_ekey(tmp, key) == 0) {
*flags |= CRYPTO_TFM_RES_WEAK_KEY;
}
}
/* write cfg word to cryptinfo */
*(u32*)cinfo = cpu_to_be32(cipher_cfg);
cinfo += sizeof(cipher_cfg);
/* write cipher key to cryptinfo */
memcpy(cinfo, key, key_len);
/* NPE wants keylen set to DES3_EDE_KEY_SIZE even for single DES */
if (key_len < DES3_EDE_KEY_SIZE && !(cipher_cfg & MOD_AES)) {
memset(cinfo + key_len, 0, DES3_EDE_KEY_SIZE -key_len);
key_len = DES3_EDE_KEY_SIZE;
}
dir->npe_ctx_idx = sizeof(cipher_cfg) + key_len;
dir->npe_mode |= NPE_OP_CRYPT_ENABLE;
if ((cipher_cfg & MOD_AES) && !encrypt) {
return gen_rev_aes_key(tfm);
}
return 0;
}
static struct buffer_desc *chainup_buffers(struct device *dev,
struct scatterlist *sg, unsigned nbytes,
struct buffer_desc *buf, gfp_t flags,
enum dma_data_direction dir)
{
for (;nbytes > 0; sg = scatterwalk_sg_next(sg)) {
unsigned len = min(nbytes, sg->length);
struct buffer_desc *next_buf;
u32 next_buf_phys;
void *ptr;
nbytes -= len;
ptr = page_address(sg_page(sg)) + sg->offset;
next_buf = dma_pool_alloc(buffer_pool, flags, &next_buf_phys);
if (!next_buf) {
buf = NULL;
break;
}
sg_dma_address(sg) = dma_map_single(dev, ptr, len, dir);
buf->next = next_buf;
buf->phys_next = next_buf_phys;
buf = next_buf;
buf->phys_addr = sg_dma_address(sg);
buf->buf_len = len;
buf->dir = dir;
}
buf->next = NULL;
buf->phys_next = 0;
return buf;
}
static int ablk_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
unsigned int key_len)
{
struct ixp_ctx *ctx = crypto_ablkcipher_ctx(tfm);
u32 *flags = &tfm->base.crt_flags;
int ret;
init_completion(&ctx->completion);
atomic_inc(&ctx->configuring);
reset_sa_dir(&ctx->encrypt);
reset_sa_dir(&ctx->decrypt);
ctx->encrypt.npe_mode = NPE_OP_HMAC_DISABLE;
ctx->decrypt.npe_mode = NPE_OP_HMAC_DISABLE;
ret = setup_cipher(&tfm->base, 0, key, key_len);
if (ret)
goto out;
ret = setup_cipher(&tfm->base, 1, key, key_len);
if (ret)
goto out;
if (*flags & CRYPTO_TFM_RES_WEAK_KEY) {
if (*flags & CRYPTO_TFM_REQ_WEAK_KEY) {
ret = -EINVAL;
} else {
*flags &= ~CRYPTO_TFM_RES_WEAK_KEY;
}
}
out:
if (!atomic_dec_and_test(&ctx->configuring))
wait_for_completion(&ctx->completion);
return ret;
}
static int ablk_rfc3686_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
unsigned int key_len)
{
struct ixp_ctx *ctx = crypto_ablkcipher_ctx(tfm);
/* the nonce is stored in bytes at end of key */
if (key_len < CTR_RFC3686_NONCE_SIZE)
return -EINVAL;
memcpy(ctx->nonce, key + (key_len - CTR_RFC3686_NONCE_SIZE),
CTR_RFC3686_NONCE_SIZE);
key_len -= CTR_RFC3686_NONCE_SIZE;
return ablk_setkey(tfm, key, key_len);
}
static int ablk_perform(struct ablkcipher_request *req, int encrypt)
{
struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
struct ixp_ctx *ctx = crypto_ablkcipher_ctx(tfm);
unsigned ivsize = crypto_ablkcipher_ivsize(tfm);
struct ix_sa_dir *dir;
struct crypt_ctl *crypt;
unsigned int nbytes = req->nbytes;
enum dma_data_direction src_direction = DMA_BIDIRECTIONAL;
struct ablk_ctx *req_ctx = ablkcipher_request_ctx(req);
struct buffer_desc src_hook;
gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
GFP_KERNEL : GFP_ATOMIC;
if (qmgr_stat_full(SEND_QID))
return -EAGAIN;
if (atomic_read(&ctx->configuring))
return -EAGAIN;
dir = encrypt ? &ctx->encrypt : &ctx->decrypt;
crypt = get_crypt_desc();
if (!crypt)
return -ENOMEM;
crypt->data.ablk_req = req;
crypt->crypto_ctx = dir->npe_ctx_phys;
crypt->mode = dir->npe_mode;
crypt->init_len = dir->npe_ctx_idx;
crypt->crypt_offs = 0;
crypt->crypt_len = nbytes;
BUG_ON(ivsize && !req->info);
memcpy(crypt->iv, req->info, ivsize);
if (req->src != req->dst) {
struct buffer_desc dst_hook;
crypt->mode |= NPE_OP_NOT_IN_PLACE;
/* This was never tested by Intel
* for more than one dst buffer, I think. */
BUG_ON(req->dst->length < nbytes);
req_ctx->dst = NULL;
if (!chainup_buffers(dev, req->dst, nbytes, &dst_hook,
flags, DMA_FROM_DEVICE))
goto free_buf_dest;
src_direction = DMA_TO_DEVICE;
req_ctx->dst = dst_hook.next;
crypt->dst_buf = dst_hook.phys_next;
} else {
req_ctx->dst = NULL;
}
req_ctx->src = NULL;
if (!chainup_buffers(dev, req->src, nbytes, &src_hook,
flags, src_direction))
goto free_buf_src;
req_ctx->src = src_hook.next;
crypt->src_buf = src_hook.phys_next;
crypt->ctl_flags |= CTL_FLAG_PERFORM_ABLK;
qmgr_put_entry(SEND_QID, crypt_virt2phys(crypt));
BUG_ON(qmgr_stat_overflow(SEND_QID));
return -EINPROGRESS;
free_buf_src:
free_buf_chain(dev, req_ctx->src, crypt->src_buf);
free_buf_dest:
if (req->src != req->dst) {
free_buf_chain(dev, req_ctx->dst, crypt->dst_buf);
}
crypt->ctl_flags = CTL_FLAG_UNUSED;
return -ENOMEM;
}
static int ablk_encrypt(struct ablkcipher_request *req)
{
return ablk_perform(req, 1);
}
static int ablk_decrypt(struct ablkcipher_request *req)
{
return ablk_perform(req, 0);
}
static int ablk_rfc3686_crypt(struct ablkcipher_request *req)
{
struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
struct ixp_ctx *ctx = crypto_ablkcipher_ctx(tfm);
u8 iv[CTR_RFC3686_BLOCK_SIZE];
u8 *info = req->info;
int ret;
/* set up counter block */
memcpy(iv, ctx->nonce, CTR_RFC3686_NONCE_SIZE);
memcpy(iv + CTR_RFC3686_NONCE_SIZE, info, CTR_RFC3686_IV_SIZE);
/* initialize counter portion of counter block */
*(__be32 *)(iv + CTR_RFC3686_NONCE_SIZE + CTR_RFC3686_IV_SIZE) =
cpu_to_be32(1);
req->info = iv;
ret = ablk_perform(req, 1);
req->info = info;
return ret;
}
static int hmac_inconsistent(struct scatterlist *sg, unsigned start,
unsigned int nbytes)
{
int offset = 0;
if (!nbytes)
return 0;
for (;;) {
if (start < offset + sg->length)
break;
offset += sg->length;
sg = scatterwalk_sg_next(sg);
}
return (start + nbytes > offset + sg->length);
}
static int aead_perform(struct aead_request *req, int encrypt,
int cryptoffset, int eff_cryptlen, u8 *iv)
{
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
struct ixp_ctx *ctx = crypto_aead_ctx(tfm);
unsigned ivsize = crypto_aead_ivsize(tfm);
unsigned authsize = crypto_aead_authsize(tfm);
struct ix_sa_dir *dir;
struct crypt_ctl *crypt;
unsigned int cryptlen;
struct buffer_desc *buf, src_hook;
struct aead_ctx *req_ctx = aead_request_ctx(req);
gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
GFP_KERNEL : GFP_ATOMIC;
if (qmgr_stat_full(SEND_QID))
return -EAGAIN;
if (atomic_read(&ctx->configuring))
return -EAGAIN;
if (encrypt) {
dir = &ctx->encrypt;
cryptlen = req->cryptlen;
} else {
dir = &ctx->decrypt;
/* req->cryptlen includes the authsize when decrypting */
cryptlen = req->cryptlen -authsize;
eff_cryptlen -= authsize;
}
crypt = get_crypt_desc();
if (!crypt)
return -ENOMEM;
crypt->data.aead_req = req;
crypt->crypto_ctx = dir->npe_ctx_phys;
crypt->mode = dir->npe_mode;
crypt->init_len = dir->npe_ctx_idx;
crypt->crypt_offs = cryptoffset;
crypt->crypt_len = eff_cryptlen;
crypt->auth_offs = 0;
crypt->auth_len = req->assoclen + ivsize + cryptlen;
BUG_ON(ivsize && !req->iv);
memcpy(crypt->iv, req->iv, ivsize);
if (req->src != req->dst) {
BUG(); /* -ENOTSUP because of my laziness */
}
/* ASSOC data */
buf = chainup_buffers(dev, req->assoc, req->assoclen, &src_hook,
flags, DMA_TO_DEVICE);
req_ctx->buffer = src_hook.next;
crypt->src_buf = src_hook.phys_next;
if (!buf)
goto out;
/* IV */
sg_init_table(&req_ctx->ivlist, 1);
sg_set_buf(&req_ctx->ivlist, iv, ivsize);
buf = chainup_buffers(dev, &req_ctx->ivlist, ivsize, buf, flags,
DMA_BIDIRECTIONAL);
if (!buf)
goto free_chain;
if (unlikely(hmac_inconsistent(req->src, cryptlen, authsize))) {
/* The 12 hmac bytes are scattered,
* we need to copy them into a safe buffer */
req_ctx->hmac_virt = dma_pool_alloc(buffer_pool, flags,
&crypt->icv_rev_aes);
if (unlikely(!req_ctx->hmac_virt))
goto free_chain;
if (!encrypt) {
scatterwalk_map_and_copy(req_ctx->hmac_virt,
req->src, cryptlen, authsize, 0);
}
req_ctx->encrypt = encrypt;
} else {
req_ctx->hmac_virt = NULL;
}
/* Crypt */
buf = chainup_buffers(dev, req->src, cryptlen + authsize, buf, flags,
DMA_BIDIRECTIONAL);
if (!buf)
goto free_hmac_virt;
if (!req_ctx->hmac_virt) {
crypt->icv_rev_aes = buf->phys_addr + buf->buf_len - authsize;
}
crypt->ctl_flags |= CTL_FLAG_PERFORM_AEAD;
qmgr_put_entry(SEND_QID, crypt_virt2phys(crypt));
BUG_ON(qmgr_stat_overflow(SEND_QID));
return -EINPROGRESS;
free_hmac_virt:
if (req_ctx->hmac_virt) {
dma_pool_free(buffer_pool, req_ctx->hmac_virt,
crypt->icv_rev_aes);
}
free_chain:
free_buf_chain(dev, req_ctx->buffer, crypt->src_buf);
out:
crypt->ctl_flags = CTL_FLAG_UNUSED;
return -ENOMEM;
}
static int aead_setup(struct crypto_aead *tfm, unsigned int authsize)
{
struct ixp_ctx *ctx = crypto_aead_ctx(tfm);
u32 *flags = &tfm->base.crt_flags;
unsigned digest_len = crypto_aead_alg(tfm)->maxauthsize;
int ret;
if (!ctx->enckey_len && !ctx->authkey_len)
return 0;
init_completion(&ctx->completion);
atomic_inc(&ctx->configuring);
reset_sa_dir(&ctx->encrypt);
reset_sa_dir(&ctx->decrypt);
ret = setup_cipher(&tfm->base, 0, ctx->enckey, ctx->enckey_len);
if (ret)
goto out;
ret = setup_cipher(&tfm->base, 1, ctx->enckey, ctx->enckey_len);
if (ret)
goto out;
ret = setup_auth(&tfm->base, 0, authsize, ctx->authkey,
ctx->authkey_len, digest_len);
if (ret)
goto out;
ret = setup_auth(&tfm->base, 1, authsize, ctx->authkey,
ctx->authkey_len, digest_len);
if (ret)
goto out;
if (*flags & CRYPTO_TFM_RES_WEAK_KEY) {
if (*flags & CRYPTO_TFM_REQ_WEAK_KEY) {
ret = -EINVAL;
goto out;
} else {
*flags &= ~CRYPTO_TFM_RES_WEAK_KEY;
}
}
out:
if (!atomic_dec_and_test(&ctx->configuring))
wait_for_completion(&ctx->completion);
return ret;
}
static int aead_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
{
int max = crypto_aead_alg(tfm)->maxauthsize >> 2;
if ((authsize>>2) < 1 || (authsize>>2) > max || (authsize & 3))
return -EINVAL;
return aead_setup(tfm, authsize);
}
static int aead_setkey(struct crypto_aead *tfm, const u8 *key,
unsigned int keylen)
{
struct ixp_ctx *ctx = crypto_aead_ctx(tfm);
struct rtattr *rta = (struct rtattr *)key;
struct crypto_authenc_key_param *param;
if (!RTA_OK(rta, keylen))
goto badkey;
if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
goto badkey;
if (RTA_PAYLOAD(rta) < sizeof(*param))
goto badkey;
param = RTA_DATA(rta);
ctx->enckey_len = be32_to_cpu(param->enckeylen);
key += RTA_ALIGN(rta->rta_len);
keylen -= RTA_ALIGN(rta->rta_len);
if (keylen < ctx->enckey_len)
goto badkey;
ctx->authkey_len = keylen - ctx->enckey_len;
memcpy(ctx->enckey, key + ctx->authkey_len, ctx->enckey_len);
memcpy(ctx->authkey, key, ctx->authkey_len);
return aead_setup(tfm, crypto_aead_authsize(tfm));
badkey:
ctx->enckey_len = 0;
crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
static int aead_encrypt(struct aead_request *req)
{
unsigned ivsize = crypto_aead_ivsize(crypto_aead_reqtfm(req));
return aead_perform(req, 1, req->assoclen + ivsize,
req->cryptlen, req->iv);
}
static int aead_decrypt(struct aead_request *req)
{
unsigned ivsize = crypto_aead_ivsize(crypto_aead_reqtfm(req));
return aead_perform(req, 0, req->assoclen + ivsize,
req->cryptlen, req->iv);
}
static int aead_givencrypt(struct aead_givcrypt_request *req)
{
struct crypto_aead *tfm = aead_givcrypt_reqtfm(req);
struct ixp_ctx *ctx = crypto_aead_ctx(tfm);
unsigned len, ivsize = crypto_aead_ivsize(tfm);
__be64 seq;
/* copied from eseqiv.c */
if (!ctx->salted) {
get_random_bytes(ctx->salt, ivsize);
ctx->salted = 1;
}
memcpy(req->areq.iv, ctx->salt, ivsize);
len = ivsize;
if (ivsize > sizeof(u64)) {
memset(req->giv, 0, ivsize - sizeof(u64));
len = sizeof(u64);
}
seq = cpu_to_be64(req->seq);
memcpy(req->giv + ivsize - len, &seq, len);
return aead_perform(&req->areq, 1, req->areq.assoclen,
req->areq.cryptlen +ivsize, req->giv);
}
static struct ixp_alg ixp4xx_algos[] = {
{
.crypto = {
.cra_name = "cbc(des)",
.cra_blocksize = DES_BLOCK_SIZE,
.cra_u = { .ablkcipher = {
.min_keysize = DES_KEY_SIZE,
.max_keysize = DES_KEY_SIZE,
.ivsize = DES_BLOCK_SIZE,
.geniv = "eseqiv",
}
}
},
.cfg_enc = CIPH_ENCR | MOD_DES | MOD_CBC_ENC | KEYLEN_192,
.cfg_dec = CIPH_DECR | MOD_DES | MOD_CBC_DEC | KEYLEN_192,
}, {
.crypto = {
.cra_name = "ecb(des)",
.cra_blocksize = DES_BLOCK_SIZE,
.cra_u = { .ablkcipher = {
.min_keysize = DES_KEY_SIZE,
.max_keysize = DES_KEY_SIZE,
}
}
},
.cfg_enc = CIPH_ENCR | MOD_DES | MOD_ECB | KEYLEN_192,
.cfg_dec = CIPH_DECR | MOD_DES | MOD_ECB | KEYLEN_192,
}, {
.crypto = {
.cra_name = "cbc(des3_ede)",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
.cra_u = { .ablkcipher = {
.min_keysize = DES3_EDE_KEY_SIZE,
.max_keysize = DES3_EDE_KEY_SIZE,
.ivsize = DES3_EDE_BLOCK_SIZE,
.geniv = "eseqiv",
}
}
},
.cfg_enc = CIPH_ENCR | MOD_3DES | MOD_CBC_ENC | KEYLEN_192,
.cfg_dec = CIPH_DECR | MOD_3DES | MOD_CBC_DEC | KEYLEN_192,
}, {
.crypto = {
.cra_name = "ecb(des3_ede)",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
.cra_u = { .ablkcipher = {
.min_keysize = DES3_EDE_KEY_SIZE,
.max_keysize = DES3_EDE_KEY_SIZE,
}
}
},
.cfg_enc = CIPH_ENCR | MOD_3DES | MOD_ECB | KEYLEN_192,
.cfg_dec = CIPH_DECR | MOD_3DES | MOD_ECB | KEYLEN_192,
}, {
.crypto = {
.cra_name = "cbc(aes)",
.cra_blocksize = AES_BLOCK_SIZE,
.cra_u = { .ablkcipher = {
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
.ivsize = AES_BLOCK_SIZE,
.geniv = "eseqiv",
}
}
},
.cfg_enc = CIPH_ENCR | MOD_AES | MOD_CBC_ENC,
.cfg_dec = CIPH_DECR | MOD_AES | MOD_CBC_DEC,
}, {
.crypto = {
.cra_name = "ecb(aes)",
.cra_blocksize = AES_BLOCK_SIZE,
.cra_u = { .ablkcipher = {
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
}
}
},
.cfg_enc = CIPH_ENCR | MOD_AES | MOD_ECB,
.cfg_dec = CIPH_DECR | MOD_AES | MOD_ECB,
}, {
.crypto = {
.cra_name = "ctr(aes)",
.cra_blocksize = AES_BLOCK_SIZE,
.cra_u = { .ablkcipher = {
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
.ivsize = AES_BLOCK_SIZE,
.geniv = "eseqiv",
}
}
},
.cfg_enc = CIPH_ENCR | MOD_AES | MOD_CTR,
.cfg_dec = CIPH_ENCR | MOD_AES | MOD_CTR,
}, {
.crypto = {
.cra_name = "rfc3686(ctr(aes))",
.cra_blocksize = AES_BLOCK_SIZE,
.cra_u = { .ablkcipher = {
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
.ivsize = AES_BLOCK_SIZE,
.geniv = "eseqiv",
.setkey = ablk_rfc3686_setkey,
.encrypt = ablk_rfc3686_crypt,
.decrypt = ablk_rfc3686_crypt }
}
},
.cfg_enc = CIPH_ENCR | MOD_AES | MOD_CTR,
.cfg_dec = CIPH_ENCR | MOD_AES | MOD_CTR,
}, {
.crypto = {
.cra_name = "authenc(hmac(md5),cbc(des))",
.cra_blocksize = DES_BLOCK_SIZE,
.cra_u = { .aead = {
.ivsize = DES_BLOCK_SIZE,
.maxauthsize = MD5_DIGEST_SIZE,
}
}
},
.hash = &hash_alg_md5,
.cfg_enc = CIPH_ENCR | MOD_DES | MOD_CBC_ENC | KEYLEN_192,
.cfg_dec = CIPH_DECR | MOD_DES | MOD_CBC_DEC | KEYLEN_192,
}, {
.crypto = {
.cra_name = "authenc(hmac(md5),cbc(des3_ede))",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
.cra_u = { .aead = {
.ivsize = DES3_EDE_BLOCK_SIZE,
.maxauthsize = MD5_DIGEST_SIZE,
}
}
},
.hash = &hash_alg_md5,
.cfg_enc = CIPH_ENCR | MOD_3DES | MOD_CBC_ENC | KEYLEN_192,
.cfg_dec = CIPH_DECR | MOD_3DES | MOD_CBC_DEC | KEYLEN_192,
}, {
.crypto = {
.cra_name = "authenc(hmac(sha1),cbc(des))",
.cra_blocksize = DES_BLOCK_SIZE,
.cra_u = { .aead = {
.ivsize = DES_BLOCK_SIZE,
.maxauthsize = SHA1_DIGEST_SIZE,
}
}
},
.hash = &hash_alg_sha1,
.cfg_enc = CIPH_ENCR | MOD_DES | MOD_CBC_ENC | KEYLEN_192,
.cfg_dec = CIPH_DECR | MOD_DES | MOD_CBC_DEC | KEYLEN_192,
}, {
.crypto = {
.cra_name = "authenc(hmac(sha1),cbc(des3_ede))",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
.cra_u = { .aead = {
.ivsize = DES3_EDE_BLOCK_SIZE,
.maxauthsize = SHA1_DIGEST_SIZE,
}
}
},
.hash = &hash_alg_sha1,
.cfg_enc = CIPH_ENCR | MOD_3DES | MOD_CBC_ENC | KEYLEN_192,
.cfg_dec = CIPH_DECR | MOD_3DES | MOD_CBC_DEC | KEYLEN_192,
}, {
.crypto = {
.cra_name = "authenc(hmac(md5),cbc(aes))",
.cra_blocksize = AES_BLOCK_SIZE,
.cra_u = { .aead = {
.ivsize = AES_BLOCK_SIZE,
.maxauthsize = MD5_DIGEST_SIZE,
}
}
},
.hash = &hash_alg_md5,
.cfg_enc = CIPH_ENCR | MOD_AES | MOD_CBC_ENC,
.cfg_dec = CIPH_DECR | MOD_AES | MOD_CBC_DEC,
}, {
.crypto = {
.cra_name = "authenc(hmac(sha1),cbc(aes))",
.cra_blocksize = AES_BLOCK_SIZE,
.cra_u = { .aead = {
.ivsize = AES_BLOCK_SIZE,
.maxauthsize = SHA1_DIGEST_SIZE,
}
}
},
.hash = &hash_alg_sha1,
.cfg_enc = CIPH_ENCR | MOD_AES | MOD_CBC_ENC,
.cfg_dec = CIPH_DECR | MOD_AES | MOD_CBC_DEC,
} };
#define IXP_POSTFIX "-ixp4xx"
static int __init ixp_module_init(void)
{
int num = ARRAY_SIZE(ixp4xx_algos);
int i,err ;
if (platform_device_register(&pseudo_dev))
return -ENODEV;
spin_lock_init(&desc_lock);
spin_lock_init(&emerg_lock);
err = init_ixp_crypto();
if (err) {
platform_device_unregister(&pseudo_dev);
return err;
}
for (i=0; i< num; i++) {
struct crypto_alg *cra = &ixp4xx_algos[i].crypto;
if (snprintf(cra->cra_driver_name, CRYPTO_MAX_ALG_NAME,
"%s"IXP_POSTFIX, cra->cra_name) >=
CRYPTO_MAX_ALG_NAME)
{
continue;
}
if (!support_aes && (ixp4xx_algos[i].cfg_enc & MOD_AES)) {
continue;
}
if (!ixp4xx_algos[i].hash) {
/* block ciphers */
cra->cra_type = &crypto_ablkcipher_type;
cra->cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
CRYPTO_ALG_KERN_DRIVER_ONLY |
CRYPTO_ALG_ASYNC;
if (!cra->cra_ablkcipher.setkey)
cra->cra_ablkcipher.setkey = ablk_setkey;
if (!cra->cra_ablkcipher.encrypt)
cra->cra_ablkcipher.encrypt = ablk_encrypt;
if (!cra->cra_ablkcipher.decrypt)
cra->cra_ablkcipher.decrypt = ablk_decrypt;
cra->cra_init = init_tfm_ablk;
} else {
/* authenc */
cra->cra_type = &crypto_aead_type;
cra->cra_flags = CRYPTO_ALG_TYPE_AEAD |
CRYPTO_ALG_KERN_DRIVER_ONLY |
CRYPTO_ALG_ASYNC;
cra->cra_aead.setkey = aead_setkey;
cra->cra_aead.setauthsize = aead_setauthsize;
cra->cra_aead.encrypt = aead_encrypt;
cra->cra_aead.decrypt = aead_decrypt;
cra->cra_aead.givencrypt = aead_givencrypt;
cra->cra_init = init_tfm_aead;
}
cra->cra_ctxsize = sizeof(struct ixp_ctx);
cra->cra_module = THIS_MODULE;
cra->cra_alignmask = 3;
cra->cra_priority = 300;
cra->cra_exit = exit_tfm;
if (crypto_register_alg(cra))
printk(KERN_ERR "Failed to register '%s'\n",
cra->cra_name);
else
ixp4xx_algos[i].registered = 1;
}
return 0;
}
static void __exit ixp_module_exit(void)
{
int num = ARRAY_SIZE(ixp4xx_algos);
int i;
for (i=0; i< num; i++) {
if (ixp4xx_algos[i].registered)
crypto_unregister_alg(&ixp4xx_algos[i].crypto);
}
release_ixp_crypto();
platform_device_unregister(&pseudo_dev);
}
module_init(ixp_module_init);
module_exit(ixp_module_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Christian Hohnstaedt <chohnstaedt@innominate.com>");
MODULE_DESCRIPTION("IXP4xx hardware crypto");
| gpl-2.0 |
actnextgendev/android_kernel_samsung_expressatt | arch/arm/mach-s5p64x0/clock-s5p6440.c | 2536 | 14350 | /* linux/arch/arm/mach-s5p64x0/clock-s5p6440.c
*
* Copyright (c) 2009-2010 Samsung Electronics Co., Ltd.
* http://www.samsung.com
*
* S5P6440 - Clock support
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/clk.h>
#include <linux/sysdev.h>
#include <linux/io.h>
#include <mach/hardware.h>
#include <mach/map.h>
#include <mach/regs-clock.h>
#include <mach/s5p64x0-clock.h>
#include <plat/cpu-freq.h>
#include <plat/clock.h>
#include <plat/cpu.h>
#include <plat/pll.h>
#include <plat/s5p-clock.h>
#include <plat/clock-clksrc.h>
#include <plat/s5p6440.h>
static u32 epll_div[][5] = {
{ 36000000, 0, 48, 1, 4 },
{ 48000000, 0, 32, 1, 3 },
{ 60000000, 0, 40, 1, 3 },
{ 72000000, 0, 48, 1, 3 },
{ 84000000, 0, 28, 1, 2 },
{ 96000000, 0, 32, 1, 2 },
{ 32768000, 45264, 43, 1, 4 },
{ 45158000, 6903, 30, 1, 3 },
{ 49152000, 50332, 32, 1, 3 },
{ 67738000, 10398, 45, 1, 3 },
{ 73728000, 9961, 49, 1, 3 }
};
static int s5p6440_epll_set_rate(struct clk *clk, unsigned long rate)
{
unsigned int epll_con, epll_con_k;
unsigned int i;
if (clk->rate == rate) /* Return if nothing changed */
return 0;
epll_con = __raw_readl(S5P64X0_EPLL_CON);
epll_con_k = __raw_readl(S5P64X0_EPLL_CON_K);
epll_con_k &= ~(PLL90XX_KDIV_MASK);
epll_con &= ~(PLL90XX_MDIV_MASK | PLL90XX_PDIV_MASK | PLL90XX_SDIV_MASK);
for (i = 0; i < ARRAY_SIZE(epll_div); i++) {
if (epll_div[i][0] == rate) {
epll_con_k |= (epll_div[i][1] << PLL90XX_KDIV_SHIFT);
epll_con |= (epll_div[i][2] << PLL90XX_MDIV_SHIFT) |
(epll_div[i][3] << PLL90XX_PDIV_SHIFT) |
(epll_div[i][4] << PLL90XX_SDIV_SHIFT);
break;
}
}
if (i == ARRAY_SIZE(epll_div)) {
printk(KERN_ERR "%s: Invalid Clock EPLL Frequency\n", __func__);
return -EINVAL;
}
__raw_writel(epll_con, S5P64X0_EPLL_CON);
__raw_writel(epll_con_k, S5P64X0_EPLL_CON_K);
printk(KERN_WARNING "EPLL Rate changes from %lu to %lu\n",
clk->rate, rate);
clk->rate = rate;
return 0;
}
static struct clk_ops s5p6440_epll_ops = {
.get_rate = s5p_epll_get_rate,
.set_rate = s5p6440_epll_set_rate,
};
static struct clksrc_clk clk_hclk = {
.clk = {
.name = "clk_hclk",
.id = -1,
.parent = &clk_armclk.clk,
},
.reg_div = { .reg = S5P64X0_CLK_DIV0, .shift = 8, .size = 4 },
};
static struct clksrc_clk clk_pclk = {
.clk = {
.name = "clk_pclk",
.id = -1,
.parent = &clk_hclk.clk,
},
.reg_div = { .reg = S5P64X0_CLK_DIV0, .shift = 12, .size = 4 },
};
static struct clksrc_clk clk_hclk_low = {
.clk = {
.name = "clk_hclk_low",
.id = -1,
},
.sources = &clkset_hclk_low,
.reg_src = { .reg = S5P64X0_SYS_OTHERS, .shift = 6, .size = 1 },
.reg_div = { .reg = S5P64X0_CLK_DIV3, .shift = 8, .size = 4 },
};
static struct clksrc_clk clk_pclk_low = {
.clk = {
.name = "clk_pclk_low",
.id = -1,
.parent = &clk_hclk_low.clk,
},
.reg_div = { .reg = S5P64X0_CLK_DIV3, .shift = 12, .size = 4 },
};
/*
* The following clocks will be disabled during clock initialization. It is
* recommended to keep the following clocks disabled until the driver requests
* for enabling the clock.
*/
static struct clk init_clocks_off[] = {
{
.name = "nand",
.id = -1,
.parent = &clk_hclk.clk,
.enable = s5p64x0_mem_ctrl,
.ctrlbit = (1 << 2),
}, {
.name = "post",
.id = -1,
.parent = &clk_hclk_low.clk,
.enable = s5p64x0_hclk0_ctrl,
.ctrlbit = (1 << 5)
}, {
.name = "2d",
.id = -1,
.parent = &clk_hclk.clk,
.enable = s5p64x0_hclk0_ctrl,
.ctrlbit = (1 << 8),
}, {
.name = "pdma",
.id = -1,
.parent = &clk_hclk_low.clk,
.enable = s5p64x0_hclk0_ctrl,
.ctrlbit = (1 << 12),
}, {
.name = "hsmmc",
.id = 0,
.parent = &clk_hclk_low.clk,
.enable = s5p64x0_hclk0_ctrl,
.ctrlbit = (1 << 17),
}, {
.name = "hsmmc",
.id = 1,
.parent = &clk_hclk_low.clk,
.enable = s5p64x0_hclk0_ctrl,
.ctrlbit = (1 << 18),
}, {
.name = "hsmmc",
.id = 2,
.parent = &clk_hclk_low.clk,
.enable = s5p64x0_hclk0_ctrl,
.ctrlbit = (1 << 19),
}, {
.name = "otg",
.id = -1,
.parent = &clk_hclk_low.clk,
.enable = s5p64x0_hclk0_ctrl,
.ctrlbit = (1 << 20)
}, {
.name = "irom",
.id = -1,
.parent = &clk_hclk.clk,
.enable = s5p64x0_hclk0_ctrl,
.ctrlbit = (1 << 25),
}, {
.name = "lcd",
.id = -1,
.parent = &clk_hclk_low.clk,
.enable = s5p64x0_hclk1_ctrl,
.ctrlbit = (1 << 1),
}, {
.name = "hclk_fimgvg",
.id = -1,
.parent = &clk_hclk.clk,
.enable = s5p64x0_hclk1_ctrl,
.ctrlbit = (1 << 2),
}, {
.name = "tsi",
.id = -1,
.parent = &clk_hclk_low.clk,
.enable = s5p64x0_hclk1_ctrl,
.ctrlbit = (1 << 0),
}, {
.name = "watchdog",
.id = -1,
.parent = &clk_pclk_low.clk,
.enable = s5p64x0_pclk_ctrl,
.ctrlbit = (1 << 5),
}, {
.name = "rtc",
.id = -1,
.parent = &clk_pclk_low.clk,
.enable = s5p64x0_pclk_ctrl,
.ctrlbit = (1 << 6),
}, {
.name = "timers",
.id = -1,
.parent = &clk_pclk_low.clk,
.enable = s5p64x0_pclk_ctrl,
.ctrlbit = (1 << 7),
}, {
.name = "pcm",
.id = -1,
.parent = &clk_pclk_low.clk,
.enable = s5p64x0_pclk_ctrl,
.ctrlbit = (1 << 8),
}, {
.name = "adc",
.id = -1,
.parent = &clk_pclk_low.clk,
.enable = s5p64x0_pclk_ctrl,
.ctrlbit = (1 << 12),
}, {
.name = "i2c",
.id = -1,
.parent = &clk_pclk_low.clk,
.enable = s5p64x0_pclk_ctrl,
.ctrlbit = (1 << 17),
}, {
.name = "spi",
.id = 0,
.parent = &clk_pclk_low.clk,
.enable = s5p64x0_pclk_ctrl,
.ctrlbit = (1 << 21),
}, {
.name = "spi",
.id = 1,
.parent = &clk_pclk_low.clk,
.enable = s5p64x0_pclk_ctrl,
.ctrlbit = (1 << 22),
}, {
.name = "gps",
.id = -1,
.parent = &clk_pclk_low.clk,
.enable = s5p64x0_pclk_ctrl,
.ctrlbit = (1 << 25),
}, {
.name = "iis",
.id = 0,
.parent = &clk_pclk_low.clk,
.enable = s5p64x0_pclk_ctrl,
.ctrlbit = (1 << 26),
}, {
.name = "dsim",
.id = -1,
.parent = &clk_pclk_low.clk,
.enable = s5p64x0_pclk_ctrl,
.ctrlbit = (1 << 28),
}, {
.name = "etm",
.id = -1,
.parent = &clk_pclk.clk,
.enable = s5p64x0_pclk_ctrl,
.ctrlbit = (1 << 29),
}, {
.name = "dmc0",
.id = -1,
.parent = &clk_pclk.clk,
.enable = s5p64x0_pclk_ctrl,
.ctrlbit = (1 << 30),
}, {
.name = "pclk_fimgvg",
.id = -1,
.parent = &clk_pclk.clk,
.enable = s5p64x0_pclk_ctrl,
.ctrlbit = (1 << 31),
}, {
.name = "sclk_spi_48",
.id = 0,
.parent = &clk_48m,
.enable = s5p64x0_sclk_ctrl,
.ctrlbit = (1 << 22),
}, {
.name = "sclk_spi_48",
.id = 1,
.parent = &clk_48m,
.enable = s5p64x0_sclk_ctrl,
.ctrlbit = (1 << 23),
}, {
.name = "mmc_48m",
.id = 0,
.parent = &clk_48m,
.enable = s5p64x0_sclk_ctrl,
.ctrlbit = (1 << 27),
}, {
.name = "mmc_48m",
.id = 1,
.parent = &clk_48m,
.enable = s5p64x0_sclk_ctrl,
.ctrlbit = (1 << 28),
}, {
.name = "mmc_48m",
.id = 2,
.parent = &clk_48m,
.enable = s5p64x0_sclk_ctrl,
.ctrlbit = (1 << 29),
},
};
/*
* The following clocks will be enabled during clock initialization.
*/
static struct clk init_clocks[] = {
{
.name = "intc",
.id = -1,
.parent = &clk_hclk.clk,
.enable = s5p64x0_hclk0_ctrl,
.ctrlbit = (1 << 1),
}, {
.name = "mem",
.id = -1,
.parent = &clk_hclk.clk,
.enable = s5p64x0_hclk0_ctrl,
.ctrlbit = (1 << 21),
}, {
.name = "uart",
.id = 0,
.parent = &clk_pclk_low.clk,
.enable = s5p64x0_pclk_ctrl,
.ctrlbit = (1 << 1),
}, {
.name = "uart",
.id = 1,
.parent = &clk_pclk_low.clk,
.enable = s5p64x0_pclk_ctrl,
.ctrlbit = (1 << 2),
}, {
.name = "uart",
.id = 2,
.parent = &clk_pclk_low.clk,
.enable = s5p64x0_pclk_ctrl,
.ctrlbit = (1 << 3),
}, {
.name = "uart",
.id = 3,
.parent = &clk_pclk_low.clk,
.enable = s5p64x0_pclk_ctrl,
.ctrlbit = (1 << 4),
}, {
.name = "gpio",
.id = -1,
.parent = &clk_pclk_low.clk,
.enable = s5p64x0_pclk_ctrl,
.ctrlbit = (1 << 18),
},
};
static struct clk clk_iis_cd_v40 = {
.name = "iis_cdclk_v40",
.id = -1,
};
static struct clk clk_pcm_cd = {
.name = "pcm_cdclk",
.id = -1,
};
static struct clk *clkset_group1_list[] = {
&clk_mout_epll.clk,
&clk_dout_mpll.clk,
&clk_fin_epll,
};
static struct clksrc_sources clkset_group1 = {
.sources = clkset_group1_list,
.nr_sources = ARRAY_SIZE(clkset_group1_list),
};
static struct clk *clkset_uart_list[] = {
&clk_mout_epll.clk,
&clk_dout_mpll.clk,
};
static struct clksrc_sources clkset_uart = {
.sources = clkset_uart_list,
.nr_sources = ARRAY_SIZE(clkset_uart_list),
};
static struct clk *clkset_audio_list[] = {
&clk_mout_epll.clk,
&clk_dout_mpll.clk,
&clk_fin_epll,
&clk_iis_cd_v40,
&clk_pcm_cd,
};
static struct clksrc_sources clkset_audio = {
.sources = clkset_audio_list,
.nr_sources = ARRAY_SIZE(clkset_audio_list),
};
static struct clksrc_clk clksrcs[] = {
{
.clk = {
.name = "sclk_mmc",
.id = 0,
.ctrlbit = (1 << 24),
.enable = s5p64x0_sclk_ctrl,
},
.sources = &clkset_group1,
.reg_src = { .reg = S5P64X0_CLK_SRC0, .shift = 18, .size = 2 },
.reg_div = { .reg = S5P64X0_CLK_DIV1, .shift = 0, .size = 4 },
}, {
.clk = {
.name = "sclk_mmc",
.id = 1,
.ctrlbit = (1 << 25),
.enable = s5p64x0_sclk_ctrl,
},
.sources = &clkset_group1,
.reg_src = { .reg = S5P64X0_CLK_SRC0, .shift = 20, .size = 2 },
.reg_div = { .reg = S5P64X0_CLK_DIV1, .shift = 4, .size = 4 },
}, {
.clk = {
.name = "sclk_mmc",
.id = 2,
.ctrlbit = (1 << 26),
.enable = s5p64x0_sclk_ctrl,
},
.sources = &clkset_group1,
.reg_src = { .reg = S5P64X0_CLK_SRC0, .shift = 22, .size = 2 },
.reg_div = { .reg = S5P64X0_CLK_DIV1, .shift = 8, .size = 4 },
}, {
.clk = {
.name = "uclk1",
.id = -1,
.ctrlbit = (1 << 5),
.enable = s5p64x0_sclk_ctrl,
},
.sources = &clkset_uart,
.reg_src = { .reg = S5P64X0_CLK_SRC0, .shift = 13, .size = 1 },
.reg_div = { .reg = S5P64X0_CLK_DIV2, .shift = 16, .size = 4 },
}, {
.clk = {
.name = "sclk_spi",
.id = 0,
.ctrlbit = (1 << 20),
.enable = s5p64x0_sclk_ctrl,
},
.sources = &clkset_group1,
.reg_src = { .reg = S5P64X0_CLK_SRC0, .shift = 14, .size = 2 },
.reg_div = { .reg = S5P64X0_CLK_DIV2, .shift = 0, .size = 4 },
}, {
.clk = {
.name = "sclk_spi",
.id = 1,
.ctrlbit = (1 << 21),
.enable = s5p64x0_sclk_ctrl,
},
.sources = &clkset_group1,
.reg_src = { .reg = S5P64X0_CLK_SRC0, .shift = 16, .size = 2 },
.reg_div = { .reg = S5P64X0_CLK_DIV2, .shift = 4, .size = 4 },
}, {
.clk = {
.name = "sclk_post",
.id = -1,
.ctrlbit = (1 << 10),
.enable = s5p64x0_sclk_ctrl,
},
.sources = &clkset_group1,
.reg_src = { .reg = S5P64X0_CLK_SRC0, .shift = 26, .size = 2 },
.reg_div = { .reg = S5P64X0_CLK_DIV1, .shift = 12, .size = 4 },
}, {
.clk = {
.name = "sclk_dispcon",
.id = -1,
.ctrlbit = (1 << 1),
.enable = s5p64x0_sclk1_ctrl,
},
.sources = &clkset_group1,
.reg_src = { .reg = S5P64X0_CLK_SRC1, .shift = 4, .size = 2 },
.reg_div = { .reg = S5P64X0_CLK_DIV3, .shift = 0, .size = 4 },
}, {
.clk = {
.name = "sclk_fimgvg",
.id = -1,
.ctrlbit = (1 << 2),
.enable = s5p64x0_sclk1_ctrl,
},
.sources = &clkset_group1,
.reg_src = { .reg = S5P64X0_CLK_SRC1, .shift = 8, .size = 2 },
.reg_div = { .reg = S5P64X0_CLK_DIV3, .shift = 4, .size = 4 },
}, {
.clk = {
.name = "sclk_audio2",
.id = -1,
.ctrlbit = (1 << 11),
.enable = s5p64x0_sclk_ctrl,
},
.sources = &clkset_audio,
.reg_src = { .reg = S5P64X0_CLK_SRC1, .shift = 0, .size = 3 },
.reg_div = { .reg = S5P64X0_CLK_DIV2, .shift = 24, .size = 4 },
},
};
/* Clock initialization code */
static struct clksrc_clk *sysclks[] = {
&clk_mout_apll,
&clk_mout_epll,
&clk_mout_mpll,
&clk_dout_mpll,
&clk_armclk,
&clk_hclk,
&clk_pclk,
&clk_hclk_low,
&clk_pclk_low,
};
void __init_or_cpufreq s5p6440_setup_clocks(void)
{
struct clk *xtal_clk;
unsigned long xtal;
unsigned long fclk;
unsigned long hclk;
unsigned long hclk_low;
unsigned long pclk;
unsigned long pclk_low;
unsigned long apll;
unsigned long mpll;
unsigned long epll;
unsigned int ptr;
/* Set S5P6440 functions for clk_fout_epll */
clk_fout_epll.enable = s5p_epll_enable;
clk_fout_epll.ops = &s5p6440_epll_ops;
clk_48m.enable = s5p64x0_clk48m_ctrl;
xtal_clk = clk_get(NULL, "ext_xtal");
BUG_ON(IS_ERR(xtal_clk));
xtal = clk_get_rate(xtal_clk);
clk_put(xtal_clk);
apll = s5p_get_pll45xx(xtal, __raw_readl(S5P64X0_APLL_CON), pll_4502);
mpll = s5p_get_pll45xx(xtal, __raw_readl(S5P64X0_MPLL_CON), pll_4502);
epll = s5p_get_pll90xx(xtal, __raw_readl(S5P64X0_EPLL_CON),
__raw_readl(S5P64X0_EPLL_CON_K));
clk_fout_apll.rate = apll;
clk_fout_mpll.rate = mpll;
clk_fout_epll.rate = epll;
printk(KERN_INFO "S5P6440: PLL settings, A=%ld.%ldMHz, M=%ld.%ldMHz," \
" E=%ld.%ldMHz\n",
print_mhz(apll), print_mhz(mpll), print_mhz(epll));
fclk = clk_get_rate(&clk_armclk.clk);
hclk = clk_get_rate(&clk_hclk.clk);
pclk = clk_get_rate(&clk_pclk.clk);
hclk_low = clk_get_rate(&clk_hclk_low.clk);
pclk_low = clk_get_rate(&clk_pclk_low.clk);
printk(KERN_INFO "S5P6440: HCLK=%ld.%ldMHz, HCLK_LOW=%ld.%ldMHz," \
" PCLK=%ld.%ldMHz, PCLK_LOW=%ld.%ldMHz\n",
print_mhz(hclk), print_mhz(hclk_low),
print_mhz(pclk), print_mhz(pclk_low));
clk_f.rate = fclk;
clk_h.rate = hclk;
clk_p.rate = pclk;
for (ptr = 0; ptr < ARRAY_SIZE(clksrcs); ptr++)
s3c_set_clksrc(&clksrcs[ptr], true);
}
static struct clk *clks[] __initdata = {
&clk_ext,
&clk_iis_cd_v40,
&clk_pcm_cd,
};
void __init s5p6440_register_clocks(void)
{
int ptr;
s3c24xx_register_clocks(clks, ARRAY_SIZE(clks));
for (ptr = 0; ptr < ARRAY_SIZE(sysclks); ptr++)
s3c_register_clksrc(sysclks[ptr], 1);
s3c_register_clksrc(clksrcs, ARRAY_SIZE(clksrcs));
s3c_register_clocks(init_clocks, ARRAY_SIZE(init_clocks));
s3c_register_clocks(init_clocks_off, ARRAY_SIZE(init_clocks_off));
s3c_disable_clocks(init_clocks_off, ARRAY_SIZE(init_clocks_off));
s3c_pwmclk_init();
}
| gpl-2.0 |
LuuchoRocha/fude_kernel_s3mini | drivers/staging/rtl8192e/ieee80211/ieee80211_crypt_ccmp.c | 2536 | 11434 | /*
* Host AP crypt: host-based CCMP encryption implementation for Host AP driver
*
* Copyright (c) 2003-2004, Jouni Malinen <jkmaline@cc.hut.fi>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation. See README and COPYING for
* more details.
*/
//#include <linux/config.h>
#include <linux/version.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/random.h>
#include <linux/skbuff.h>
#include <linux/netdevice.h>
#include <linux/if_ether.h>
#include <linux/if_arp.h>
#include <asm/string.h>
#include <linux/wireless.h>
#include "ieee80211.h"
#include <linux/crypto.h>
#include <linux/scatterlist.h>
MODULE_AUTHOR("Jouni Malinen");
MODULE_DESCRIPTION("Host AP crypt: CCMP");
MODULE_LICENSE("GPL");
#ifndef OPENSUSE_SLED
#define OPENSUSE_SLED 0
#endif
#define AES_BLOCK_LEN 16
#define CCMP_HDR_LEN 8
#define CCMP_MIC_LEN 8
#define CCMP_TK_LEN 16
#define CCMP_PN_LEN 6
struct ieee80211_ccmp_data {
u8 key[CCMP_TK_LEN];
int key_set;
u8 tx_pn[CCMP_PN_LEN];
u8 rx_pn[CCMP_PN_LEN];
u32 dot11RSNAStatsCCMPFormatErrors;
u32 dot11RSNAStatsCCMPReplays;
u32 dot11RSNAStatsCCMPDecryptErrors;
int key_idx;
struct crypto_tfm *tfm;
/* scratch buffers for virt_to_page() (crypto API) */
u8 tx_b0[AES_BLOCK_LEN], tx_b[AES_BLOCK_LEN],
tx_e[AES_BLOCK_LEN], tx_s0[AES_BLOCK_LEN];
u8 rx_b0[AES_BLOCK_LEN], rx_b[AES_BLOCK_LEN], rx_a[AES_BLOCK_LEN];
};
void ieee80211_ccmp_aes_encrypt(struct crypto_tfm *tfm,
const u8 pt[16], u8 ct[16])
{
crypto_cipher_encrypt_one((void*)tfm, ct, pt);
}
static void * ieee80211_ccmp_init(int key_idx)
{
struct ieee80211_ccmp_data *priv;
priv = kzalloc(sizeof(*priv), GFP_ATOMIC);
if (priv == NULL)
goto fail;
priv->key_idx = key_idx;
priv->tfm = (void*)crypto_alloc_cipher("aes", 0, CRYPTO_ALG_ASYNC);
if (IS_ERR(priv->tfm)) {
printk(KERN_DEBUG "ieee80211_crypt_ccmp: could not allocate "
"crypto API aes\n");
priv->tfm = NULL;
goto fail;
}
return priv;
fail:
if (priv) {
if (priv->tfm)
crypto_free_cipher((void*)priv->tfm);
kfree(priv);
}
return NULL;
}
static void ieee80211_ccmp_deinit(void *priv)
{
struct ieee80211_ccmp_data *_priv = priv;
if (_priv && _priv->tfm)
crypto_free_cipher((void*)_priv->tfm);
kfree(priv);
}
static inline void xor_block(u8 *b, u8 *a, size_t len)
{
int i;
for (i = 0; i < len; i++)
b[i] ^= a[i];
}
static void ccmp_init_blocks(struct crypto_tfm *tfm,
struct ieee80211_hdr_4addr *hdr,
u8 *pn, size_t dlen, u8 *b0, u8 *auth,
u8 *s0)
{
u8 *pos, qc = 0;
size_t aad_len;
u16 fc;
int a4_included, qc_included;
u8 aad[2 * AES_BLOCK_LEN];
fc = le16_to_cpu(hdr->frame_ctl);
a4_included = ((fc & (IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) ==
(IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS));
/*
qc_included = ((WLAN_FC_GET_TYPE(fc) == IEEE80211_FTYPE_DATA) &&
(WLAN_FC_GET_STYPE(fc) & 0x08));
*/
// fixed by David :2006.9.6
qc_included = ((WLAN_FC_GET_TYPE(fc) == IEEE80211_FTYPE_DATA) &&
(WLAN_FC_GET_STYPE(fc) & 0x80));
aad_len = 22;
if (a4_included)
aad_len += 6;
if (qc_included) {
pos = (u8 *) &hdr->addr4;
if (a4_included)
pos += 6;
qc = *pos & 0x0f;
aad_len += 2;
}
/* CCM Initial Block:
* Flag (Include authentication header, M=3 (8-octet MIC),
* L=1 (2-octet Dlen))
* Nonce: 0x00 | A2 | PN
* Dlen */
b0[0] = 0x59;
b0[1] = qc;
memcpy(b0 + 2, hdr->addr2, ETH_ALEN);
memcpy(b0 + 8, pn, CCMP_PN_LEN);
b0[14] = (dlen >> 8) & 0xff;
b0[15] = dlen & 0xff;
/* AAD:
* FC with bits 4..6 and 11..13 masked to zero; 14 is always one
* A1 | A2 | A3
* SC with bits 4..15 (seq#) masked to zero
* A4 (if present)
* QC (if present)
*/
pos = (u8 *) hdr;
aad[0] = 0; /* aad_len >> 8 */
aad[1] = aad_len & 0xff;
aad[2] = pos[0] & 0x8f;
aad[3] = pos[1] & 0xc7;
memcpy(aad + 4, hdr->addr1, 3 * ETH_ALEN);
pos = (u8 *) &hdr->seq_ctl;
aad[22] = pos[0] & 0x0f;
aad[23] = 0; /* all bits masked */
memset(aad + 24, 0, 8);
if (a4_included)
memcpy(aad + 24, hdr->addr4, ETH_ALEN);
if (qc_included) {
aad[a4_included ? 30 : 24] = qc;
/* rest of QC masked */
}
/* Start with the first block and AAD */
ieee80211_ccmp_aes_encrypt(tfm, b0, auth);
xor_block(auth, aad, AES_BLOCK_LEN);
ieee80211_ccmp_aes_encrypt(tfm, auth, auth);
xor_block(auth, &aad[AES_BLOCK_LEN], AES_BLOCK_LEN);
ieee80211_ccmp_aes_encrypt(tfm, auth, auth);
b0[0] &= 0x07;
b0[14] = b0[15] = 0;
ieee80211_ccmp_aes_encrypt(tfm, b0, s0);
}
static int ieee80211_ccmp_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
{
struct ieee80211_ccmp_data *key = priv;
int data_len, i;
u8 *pos;
struct ieee80211_hdr_4addr *hdr;
cb_desc *tcb_desc = (cb_desc *)(skb->cb + MAX_DEV_ADDR_SIZE);
if (skb_headroom(skb) < CCMP_HDR_LEN ||
skb_tailroom(skb) < CCMP_MIC_LEN ||
skb->len < hdr_len)
return -1;
data_len = skb->len - hdr_len;
pos = skb_push(skb, CCMP_HDR_LEN);
memmove(pos, pos + CCMP_HDR_LEN, hdr_len);
pos += hdr_len;
// mic = skb_put(skb, CCMP_MIC_LEN);
i = CCMP_PN_LEN - 1;
while (i >= 0) {
key->tx_pn[i]++;
if (key->tx_pn[i] != 0)
break;
i--;
}
*pos++ = key->tx_pn[5];
*pos++ = key->tx_pn[4];
*pos++ = 0;
*pos++ = (key->key_idx << 6) | (1 << 5) /* Ext IV included */;
*pos++ = key->tx_pn[3];
*pos++ = key->tx_pn[2];
*pos++ = key->tx_pn[1];
*pos++ = key->tx_pn[0];
hdr = (struct ieee80211_hdr_4addr *) skb->data;
if (!tcb_desc->bHwSec)
{
int blocks, last, len;
u8 *mic;
u8 *b0 = key->tx_b0;
u8 *b = key->tx_b;
u8 *e = key->tx_e;
u8 *s0 = key->tx_s0;
//mic is moved to here by john
mic = skb_put(skb, CCMP_MIC_LEN);
ccmp_init_blocks(key->tfm, hdr, key->tx_pn, data_len, b0, b, s0);
blocks = (data_len + AES_BLOCK_LEN - 1) / AES_BLOCK_LEN;
last = data_len % AES_BLOCK_LEN;
for (i = 1; i <= blocks; i++) {
len = (i == blocks && last) ? last : AES_BLOCK_LEN;
/* Authentication */
xor_block(b, pos, len);
ieee80211_ccmp_aes_encrypt(key->tfm, b, b);
/* Encryption, with counter */
b0[14] = (i >> 8) & 0xff;
b0[15] = i & 0xff;
ieee80211_ccmp_aes_encrypt(key->tfm, b0, e);
xor_block(pos, e, len);
pos += len;
}
for (i = 0; i < CCMP_MIC_LEN; i++)
mic[i] = b[i] ^ s0[i];
}
return 0;
}
static int ieee80211_ccmp_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
{
struct ieee80211_ccmp_data *key = priv;
u8 keyidx, *pos;
struct ieee80211_hdr_4addr *hdr;
cb_desc *tcb_desc = (cb_desc *)(skb->cb + MAX_DEV_ADDR_SIZE);
u8 pn[6];
if (skb->len < hdr_len + CCMP_HDR_LEN + CCMP_MIC_LEN) {
key->dot11RSNAStatsCCMPFormatErrors++;
return -1;
}
hdr = (struct ieee80211_hdr_4addr *) skb->data;
pos = skb->data + hdr_len;
keyidx = pos[3];
if (!(keyidx & (1 << 5))) {
if (net_ratelimit()) {
printk(KERN_DEBUG "CCMP: received packet without ExtIV"
" flag from %pM\n", hdr->addr2);
}
key->dot11RSNAStatsCCMPFormatErrors++;
return -2;
}
keyidx >>= 6;
if (key->key_idx != keyidx) {
printk(KERN_DEBUG "CCMP: RX tkey->key_idx=%d frame "
"keyidx=%d priv=%p\n", key->key_idx, keyidx, priv);
return -6;
}
if (!key->key_set) {
if (net_ratelimit()) {
printk(KERN_DEBUG "CCMP: received packet from %pM"
" with keyid=%d that does not have a configured"
" key\n", hdr->addr2, keyidx);
}
return -3;
}
pn[0] = pos[7];
pn[1] = pos[6];
pn[2] = pos[5];
pn[3] = pos[4];
pn[4] = pos[1];
pn[5] = pos[0];
pos += 8;
if (memcmp(pn, key->rx_pn, CCMP_PN_LEN) <= 0) {
key->dot11RSNAStatsCCMPReplays++;
return -4;
}
if (!tcb_desc->bHwSec)
{
size_t data_len = skb->len - hdr_len - CCMP_HDR_LEN - CCMP_MIC_LEN;
u8 *mic = skb->data + skb->len - CCMP_MIC_LEN;
u8 *b0 = key->rx_b0;
u8 *b = key->rx_b;
u8 *a = key->rx_a;
int i, blocks, last, len;
ccmp_init_blocks(key->tfm, hdr, pn, data_len, b0, a, b);
xor_block(mic, b, CCMP_MIC_LEN);
blocks = (data_len + AES_BLOCK_LEN - 1) / AES_BLOCK_LEN;
last = data_len % AES_BLOCK_LEN;
for (i = 1; i <= blocks; i++) {
len = (i == blocks && last) ? last : AES_BLOCK_LEN;
/* Decrypt, with counter */
b0[14] = (i >> 8) & 0xff;
b0[15] = i & 0xff;
ieee80211_ccmp_aes_encrypt(key->tfm, b0, b);
xor_block(pos, b, len);
/* Authentication */
xor_block(a, pos, len);
ieee80211_ccmp_aes_encrypt(key->tfm, a, a);
pos += len;
}
if (memcmp(mic, a, CCMP_MIC_LEN) != 0) {
if (net_ratelimit()) {
printk(KERN_DEBUG "CCMP: decrypt failed: STA="
"%pM\n", hdr->addr2);
}
key->dot11RSNAStatsCCMPDecryptErrors++;
return -5;
}
memcpy(key->rx_pn, pn, CCMP_PN_LEN);
}
/* Remove hdr and MIC */
memmove(skb->data + CCMP_HDR_LEN, skb->data, hdr_len);
skb_pull(skb, CCMP_HDR_LEN);
skb_trim(skb, skb->len - CCMP_MIC_LEN);
return keyidx;
}
static int ieee80211_ccmp_set_key(void *key, int len, u8 *seq, void *priv)
{
struct ieee80211_ccmp_data *data = priv;
int keyidx;
struct crypto_tfm *tfm = data->tfm;
keyidx = data->key_idx;
memset(data, 0, sizeof(*data));
data->key_idx = keyidx;
data->tfm = tfm;
if (len == CCMP_TK_LEN) {
memcpy(data->key, key, CCMP_TK_LEN);
data->key_set = 1;
if (seq) {
data->rx_pn[0] = seq[5];
data->rx_pn[1] = seq[4];
data->rx_pn[2] = seq[3];
data->rx_pn[3] = seq[2];
data->rx_pn[4] = seq[1];
data->rx_pn[5] = seq[0];
}
crypto_cipher_setkey((void*)data->tfm, data->key, CCMP_TK_LEN);
} else if (len == 0)
data->key_set = 0;
else
return -1;
return 0;
}
static int ieee80211_ccmp_get_key(void *key, int len, u8 *seq, void *priv)
{
struct ieee80211_ccmp_data *data = priv;
if (len < CCMP_TK_LEN)
return -1;
if (!data->key_set)
return 0;
memcpy(key, data->key, CCMP_TK_LEN);
if (seq) {
seq[0] = data->tx_pn[5];
seq[1] = data->tx_pn[4];
seq[2] = data->tx_pn[3];
seq[3] = data->tx_pn[2];
seq[4] = data->tx_pn[1];
seq[5] = data->tx_pn[0];
}
return CCMP_TK_LEN;
}
static char * ieee80211_ccmp_print_stats(char *p, void *priv)
{
struct ieee80211_ccmp_data *ccmp = priv;
int i;
p += sprintf(p, "key[%d] alg=CCMP key_set=%d tx_pn=",
ccmp->key_idx, ccmp->key_set);
for (i = 0; i < ARRAY_SIZE(ccmp->tx_pn); i++)
p += sprintf(p, "%02x", ccmp->tx_pn[i]);
sprintf(p, " rx_pn=");
for (i = 0; i < ARRAY_SIZE(ccmp->rx_pn); i++)
p += sprintf(p, "%02x", ccmp->tx_pn[i]);
p += sprintf(p, " format_errors=%d replays=%d decrypt_errors=%d\n",
ccmp->dot11RSNAStatsCCMPFormatErrors,
ccmp->dot11RSNAStatsCCMPReplays,
ccmp->dot11RSNAStatsCCMPDecryptErrors);
return p;
}
void ieee80211_ccmp_null(void)
{
return;
}
static struct ieee80211_crypto_ops ieee80211_crypt_ccmp = {
.name = "CCMP",
.init = ieee80211_ccmp_init,
.deinit = ieee80211_ccmp_deinit,
.encrypt_mpdu = ieee80211_ccmp_encrypt,
.decrypt_mpdu = ieee80211_ccmp_decrypt,
.encrypt_msdu = NULL,
.decrypt_msdu = NULL,
.set_key = ieee80211_ccmp_set_key,
.get_key = ieee80211_ccmp_get_key,
.print_stats = ieee80211_ccmp_print_stats,
.extra_prefix_len = CCMP_HDR_LEN,
.extra_postfix_len = CCMP_MIC_LEN,
.owner = THIS_MODULE,
};
int __init ieee80211_crypto_ccmp_init(void)
{
return ieee80211_register_crypto_ops(&ieee80211_crypt_ccmp);
}
void ieee80211_crypto_ccmp_exit(void)
{
ieee80211_unregister_crypto_ops(&ieee80211_crypt_ccmp);
}
| gpl-2.0 |
hiikezoe/android_kernel_sharp_is17sh | arch/arm/mach-mxs/mm-mx23.c | 2536 | 1094 | /*
* Copyright 2008-2010 Freescale Semiconductor, Inc. All Rights Reserved.
*
* The code contained herein is licensed under the GNU General Public
* License. You may obtain a copy of the GNU General Public License
* Version 2 or later at the following locations:
*
* http://www.opensource.org/licenses/gpl-license.html
* http://www.gnu.org/copyleft/gpl.html
*
* Create static mapping between physical to virtual memory.
*/
#include <linux/mm.h>
#include <linux/init.h>
#include <asm/mach/map.h>
#include <mach/mx23.h>
#include <mach/common.h>
#include <mach/iomux.h>
/*
* Define the MX23 memory map.
*/
static struct map_desc mx23_io_desc[] __initdata = {
mxs_map_entry(MX23, OCRAM, MT_DEVICE),
mxs_map_entry(MX23, IO, MT_DEVICE),
};
/*
* This function initializes the memory map. It is called during the
* system startup to create static physical to virtual memory mappings
* for the IO modules.
*/
void __init mx23_map_io(void)
{
iotable_init(mx23_io_desc, ARRAY_SIZE(mx23_io_desc));
}
void __init mx23_init_irq(void)
{
icoll_init_irq();
mx23_register_gpios();
}
| gpl-2.0 |
htc-mirror/evita-ics-crc-3.0.8-271616b | drivers/staging/et131x/et1310_pm.c | 2536 | 6044 | /*
* Agere Systems Inc.
* 10/100/1000 Base-T Ethernet Driver for the ET1301 and ET131x series MACs
*
* Copyright © 2005 Agere Systems Inc.
* All rights reserved.
* http://www.agere.com
*
*------------------------------------------------------------------------------
*
* et1310_pm.c - All power management related code (not completely implemented)
*
*------------------------------------------------------------------------------
*
* SOFTWARE LICENSE
*
* This software is provided subject to the following terms and conditions,
* which you should read carefully before using the software. Using this
* software indicates your acceptance of these terms and conditions. If you do
* not agree with these terms and conditions, do not use the software.
*
* Copyright © 2005 Agere Systems Inc.
* All rights reserved.
*
* Redistribution and use in source or binary forms, with or without
* modifications, are permitted provided that the following conditions are met:
*
* . Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following Disclaimer as comments in the code as
* well as in the documentation and/or other materials provided with the
* distribution.
*
* . Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following Disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* . Neither the name of Agere Systems Inc. nor the names of the contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* Disclaimer
*
* THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
* INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. ANY
* USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN
* RISK. IN NO EVENT SHALL AGERE SYSTEMS INC. OR CONTRIBUTORS BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, INCLUDING, BUT NOT LIMITED TO, CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
* DAMAGE.
*
*/
#include "et131x_version.h"
#include "et131x_defs.h"
#include <linux/init.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/ptrace.h>
#include <linux/ctype.h>
#include <linux/string.h>
#include <linux/timer.h>
#include <linux/interrupt.h>
#include <linux/in.h>
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/bitops.h>
#include <asm/system.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
#include <linux/if_arp.h>
#include <linux/ioport.h>
#include "et1310_phy.h"
#include "et1310_rx.h"
#include "et131x_adapter.h"
#include "et131x.h"
/**
* EnablePhyComa - called when network cable is unplugged
* @etdev: pointer to our adapter structure
*
* driver receive an phy status change interrupt while in D0 and check that
* phy_status is down.
*
* -- gate off JAGCore;
* -- set gigE PHY in Coma mode
* -- wake on phy_interrupt; Perform software reset JAGCore,
* re-initialize jagcore and gigE PHY
*
* Add D0-ASPM-PhyLinkDown Support:
* -- while in D0, when there is a phy_interrupt indicating phy link
* down status, call the MPSetPhyComa routine to enter this active
* state power saving mode
* -- while in D0-ASPM-PhyLinkDown mode, when there is a phy_interrupt
* indicating linkup status, call the MPDisablePhyComa routine to
* restore JAGCore and gigE PHY
*/
void EnablePhyComa(struct et131x_adapter *etdev)
{
unsigned long flags;
u32 pmcsr;
pmcsr = readl(&etdev->regs->global.pm_csr);
/* Save the GbE PHY speed and duplex modes. Need to restore this
* when cable is plugged back in
*/
etdev->pdown_speed = etdev->AiForceSpeed;
etdev->pdown_duplex = etdev->AiForceDpx;
/* Stop sending packets. */
spin_lock_irqsave(&etdev->send_hw_lock, flags);
etdev->Flags |= fMP_ADAPTER_LOWER_POWER;
spin_unlock_irqrestore(&etdev->send_hw_lock, flags);
/* Wait for outstanding Receive packets */
/* Gate off JAGCore 3 clock domains */
pmcsr &= ~ET_PMCSR_INIT;
writel(pmcsr, &etdev->regs->global.pm_csr);
/* Program gigE PHY in to Coma mode */
pmcsr |= ET_PM_PHY_SW_COMA;
writel(pmcsr, &etdev->regs->global.pm_csr);
}
/**
* DisablePhyComa - Disable the Phy Coma Mode
* @etdev: pointer to our adapter structure
*/
void DisablePhyComa(struct et131x_adapter *etdev)
{
u32 pmcsr;
pmcsr = readl(&etdev->regs->global.pm_csr);
/* Disable phy_sw_coma register and re-enable JAGCore clocks */
pmcsr |= ET_PMCSR_INIT;
pmcsr &= ~ET_PM_PHY_SW_COMA;
writel(pmcsr, &etdev->regs->global.pm_csr);
/* Restore the GbE PHY speed and duplex modes;
* Reset JAGCore; re-configure and initialize JAGCore and gigE PHY
*/
etdev->AiForceSpeed = etdev->pdown_speed;
etdev->AiForceDpx = etdev->pdown_duplex;
/* Re-initialize the send structures */
et131x_init_send(etdev);
/* Reset the RFD list and re-start RU */
et131x_reset_recv(etdev);
/* Bring the device back to the state it was during init prior to
* autonegotiation being complete. This way, when we get the auto-neg
* complete interrupt, we can complete init by calling ConfigMacREGS2.
*/
et131x_soft_reset(etdev);
/* setup et1310 as per the documentation ?? */
et131x_adapter_setup(etdev);
/* Allow Tx to restart */
etdev->Flags &= ~fMP_ADAPTER_LOWER_POWER;
/* Need to re-enable Rx. */
et131x_rx_dma_enable(etdev);
}
| gpl-2.0 |
1N4148/android_kernel_samsung_smdk4412 | drivers/staging/rtl8192e/ieee80211/ieee80211_crypt_ccmp.c | 2536 | 11434 | /*
* Host AP crypt: host-based CCMP encryption implementation for Host AP driver
*
* Copyright (c) 2003-2004, Jouni Malinen <jkmaline@cc.hut.fi>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation. See README and COPYING for
* more details.
*/
//#include <linux/config.h>
#include <linux/version.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/random.h>
#include <linux/skbuff.h>
#include <linux/netdevice.h>
#include <linux/if_ether.h>
#include <linux/if_arp.h>
#include <asm/string.h>
#include <linux/wireless.h>
#include "ieee80211.h"
#include <linux/crypto.h>
#include <linux/scatterlist.h>
MODULE_AUTHOR("Jouni Malinen");
MODULE_DESCRIPTION("Host AP crypt: CCMP");
MODULE_LICENSE("GPL");
#ifndef OPENSUSE_SLED
#define OPENSUSE_SLED 0
#endif
#define AES_BLOCK_LEN 16
#define CCMP_HDR_LEN 8
#define CCMP_MIC_LEN 8
#define CCMP_TK_LEN 16
#define CCMP_PN_LEN 6
struct ieee80211_ccmp_data {
u8 key[CCMP_TK_LEN];
int key_set;
u8 tx_pn[CCMP_PN_LEN];
u8 rx_pn[CCMP_PN_LEN];
u32 dot11RSNAStatsCCMPFormatErrors;
u32 dot11RSNAStatsCCMPReplays;
u32 dot11RSNAStatsCCMPDecryptErrors;
int key_idx;
struct crypto_tfm *tfm;
/* scratch buffers for virt_to_page() (crypto API) */
u8 tx_b0[AES_BLOCK_LEN], tx_b[AES_BLOCK_LEN],
tx_e[AES_BLOCK_LEN], tx_s0[AES_BLOCK_LEN];
u8 rx_b0[AES_BLOCK_LEN], rx_b[AES_BLOCK_LEN], rx_a[AES_BLOCK_LEN];
};
void ieee80211_ccmp_aes_encrypt(struct crypto_tfm *tfm,
const u8 pt[16], u8 ct[16])
{
crypto_cipher_encrypt_one((void*)tfm, ct, pt);
}
static void * ieee80211_ccmp_init(int key_idx)
{
struct ieee80211_ccmp_data *priv;
priv = kzalloc(sizeof(*priv), GFP_ATOMIC);
if (priv == NULL)
goto fail;
priv->key_idx = key_idx;
priv->tfm = (void*)crypto_alloc_cipher("aes", 0, CRYPTO_ALG_ASYNC);
if (IS_ERR(priv->tfm)) {
printk(KERN_DEBUG "ieee80211_crypt_ccmp: could not allocate "
"crypto API aes\n");
priv->tfm = NULL;
goto fail;
}
return priv;
fail:
if (priv) {
if (priv->tfm)
crypto_free_cipher((void*)priv->tfm);
kfree(priv);
}
return NULL;
}
static void ieee80211_ccmp_deinit(void *priv)
{
struct ieee80211_ccmp_data *_priv = priv;
if (_priv && _priv->tfm)
crypto_free_cipher((void*)_priv->tfm);
kfree(priv);
}
static inline void xor_block(u8 *b, u8 *a, size_t len)
{
int i;
for (i = 0; i < len; i++)
b[i] ^= a[i];
}
static void ccmp_init_blocks(struct crypto_tfm *tfm,
struct ieee80211_hdr_4addr *hdr,
u8 *pn, size_t dlen, u8 *b0, u8 *auth,
u8 *s0)
{
u8 *pos, qc = 0;
size_t aad_len;
u16 fc;
int a4_included, qc_included;
u8 aad[2 * AES_BLOCK_LEN];
fc = le16_to_cpu(hdr->frame_ctl);
a4_included = ((fc & (IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) ==
(IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS));
/*
qc_included = ((WLAN_FC_GET_TYPE(fc) == IEEE80211_FTYPE_DATA) &&
(WLAN_FC_GET_STYPE(fc) & 0x08));
*/
// fixed by David :2006.9.6
qc_included = ((WLAN_FC_GET_TYPE(fc) == IEEE80211_FTYPE_DATA) &&
(WLAN_FC_GET_STYPE(fc) & 0x80));
aad_len = 22;
if (a4_included)
aad_len += 6;
if (qc_included) {
pos = (u8 *) &hdr->addr4;
if (a4_included)
pos += 6;
qc = *pos & 0x0f;
aad_len += 2;
}
/* CCM Initial Block:
* Flag (Include authentication header, M=3 (8-octet MIC),
* L=1 (2-octet Dlen))
* Nonce: 0x00 | A2 | PN
* Dlen */
b0[0] = 0x59;
b0[1] = qc;
memcpy(b0 + 2, hdr->addr2, ETH_ALEN);
memcpy(b0 + 8, pn, CCMP_PN_LEN);
b0[14] = (dlen >> 8) & 0xff;
b0[15] = dlen & 0xff;
/* AAD:
* FC with bits 4..6 and 11..13 masked to zero; 14 is always one
* A1 | A2 | A3
* SC with bits 4..15 (seq#) masked to zero
* A4 (if present)
* QC (if present)
*/
pos = (u8 *) hdr;
aad[0] = 0; /* aad_len >> 8 */
aad[1] = aad_len & 0xff;
aad[2] = pos[0] & 0x8f;
aad[3] = pos[1] & 0xc7;
memcpy(aad + 4, hdr->addr1, 3 * ETH_ALEN);
pos = (u8 *) &hdr->seq_ctl;
aad[22] = pos[0] & 0x0f;
aad[23] = 0; /* all bits masked */
memset(aad + 24, 0, 8);
if (a4_included)
memcpy(aad + 24, hdr->addr4, ETH_ALEN);
if (qc_included) {
aad[a4_included ? 30 : 24] = qc;
/* rest of QC masked */
}
/* Start with the first block and AAD */
ieee80211_ccmp_aes_encrypt(tfm, b0, auth);
xor_block(auth, aad, AES_BLOCK_LEN);
ieee80211_ccmp_aes_encrypt(tfm, auth, auth);
xor_block(auth, &aad[AES_BLOCK_LEN], AES_BLOCK_LEN);
ieee80211_ccmp_aes_encrypt(tfm, auth, auth);
b0[0] &= 0x07;
b0[14] = b0[15] = 0;
ieee80211_ccmp_aes_encrypt(tfm, b0, s0);
}
static int ieee80211_ccmp_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
{
struct ieee80211_ccmp_data *key = priv;
int data_len, i;
u8 *pos;
struct ieee80211_hdr_4addr *hdr;
cb_desc *tcb_desc = (cb_desc *)(skb->cb + MAX_DEV_ADDR_SIZE);
if (skb_headroom(skb) < CCMP_HDR_LEN ||
skb_tailroom(skb) < CCMP_MIC_LEN ||
skb->len < hdr_len)
return -1;
data_len = skb->len - hdr_len;
pos = skb_push(skb, CCMP_HDR_LEN);
memmove(pos, pos + CCMP_HDR_LEN, hdr_len);
pos += hdr_len;
// mic = skb_put(skb, CCMP_MIC_LEN);
i = CCMP_PN_LEN - 1;
while (i >= 0) {
key->tx_pn[i]++;
if (key->tx_pn[i] != 0)
break;
i--;
}
*pos++ = key->tx_pn[5];
*pos++ = key->tx_pn[4];
*pos++ = 0;
*pos++ = (key->key_idx << 6) | (1 << 5) /* Ext IV included */;
*pos++ = key->tx_pn[3];
*pos++ = key->tx_pn[2];
*pos++ = key->tx_pn[1];
*pos++ = key->tx_pn[0];
hdr = (struct ieee80211_hdr_4addr *) skb->data;
if (!tcb_desc->bHwSec)
{
int blocks, last, len;
u8 *mic;
u8 *b0 = key->tx_b0;
u8 *b = key->tx_b;
u8 *e = key->tx_e;
u8 *s0 = key->tx_s0;
//mic is moved to here by john
mic = skb_put(skb, CCMP_MIC_LEN);
ccmp_init_blocks(key->tfm, hdr, key->tx_pn, data_len, b0, b, s0);
blocks = (data_len + AES_BLOCK_LEN - 1) / AES_BLOCK_LEN;
last = data_len % AES_BLOCK_LEN;
for (i = 1; i <= blocks; i++) {
len = (i == blocks && last) ? last : AES_BLOCK_LEN;
/* Authentication */
xor_block(b, pos, len);
ieee80211_ccmp_aes_encrypt(key->tfm, b, b);
/* Encryption, with counter */
b0[14] = (i >> 8) & 0xff;
b0[15] = i & 0xff;
ieee80211_ccmp_aes_encrypt(key->tfm, b0, e);
xor_block(pos, e, len);
pos += len;
}
for (i = 0; i < CCMP_MIC_LEN; i++)
mic[i] = b[i] ^ s0[i];
}
return 0;
}
static int ieee80211_ccmp_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
{
struct ieee80211_ccmp_data *key = priv;
u8 keyidx, *pos;
struct ieee80211_hdr_4addr *hdr;
cb_desc *tcb_desc = (cb_desc *)(skb->cb + MAX_DEV_ADDR_SIZE);
u8 pn[6];
if (skb->len < hdr_len + CCMP_HDR_LEN + CCMP_MIC_LEN) {
key->dot11RSNAStatsCCMPFormatErrors++;
return -1;
}
hdr = (struct ieee80211_hdr_4addr *) skb->data;
pos = skb->data + hdr_len;
keyidx = pos[3];
if (!(keyidx & (1 << 5))) {
if (net_ratelimit()) {
printk(KERN_DEBUG "CCMP: received packet without ExtIV"
" flag from %pM\n", hdr->addr2);
}
key->dot11RSNAStatsCCMPFormatErrors++;
return -2;
}
keyidx >>= 6;
if (key->key_idx != keyidx) {
printk(KERN_DEBUG "CCMP: RX tkey->key_idx=%d frame "
"keyidx=%d priv=%p\n", key->key_idx, keyidx, priv);
return -6;
}
if (!key->key_set) {
if (net_ratelimit()) {
printk(KERN_DEBUG "CCMP: received packet from %pM"
" with keyid=%d that does not have a configured"
" key\n", hdr->addr2, keyidx);
}
return -3;
}
pn[0] = pos[7];
pn[1] = pos[6];
pn[2] = pos[5];
pn[3] = pos[4];
pn[4] = pos[1];
pn[5] = pos[0];
pos += 8;
if (memcmp(pn, key->rx_pn, CCMP_PN_LEN) <= 0) {
key->dot11RSNAStatsCCMPReplays++;
return -4;
}
if (!tcb_desc->bHwSec)
{
size_t data_len = skb->len - hdr_len - CCMP_HDR_LEN - CCMP_MIC_LEN;
u8 *mic = skb->data + skb->len - CCMP_MIC_LEN;
u8 *b0 = key->rx_b0;
u8 *b = key->rx_b;
u8 *a = key->rx_a;
int i, blocks, last, len;
ccmp_init_blocks(key->tfm, hdr, pn, data_len, b0, a, b);
xor_block(mic, b, CCMP_MIC_LEN);
blocks = (data_len + AES_BLOCK_LEN - 1) / AES_BLOCK_LEN;
last = data_len % AES_BLOCK_LEN;
for (i = 1; i <= blocks; i++) {
len = (i == blocks && last) ? last : AES_BLOCK_LEN;
/* Decrypt, with counter */
b0[14] = (i >> 8) & 0xff;
b0[15] = i & 0xff;
ieee80211_ccmp_aes_encrypt(key->tfm, b0, b);
xor_block(pos, b, len);
/* Authentication */
xor_block(a, pos, len);
ieee80211_ccmp_aes_encrypt(key->tfm, a, a);
pos += len;
}
if (memcmp(mic, a, CCMP_MIC_LEN) != 0) {
if (net_ratelimit()) {
printk(KERN_DEBUG "CCMP: decrypt failed: STA="
"%pM\n", hdr->addr2);
}
key->dot11RSNAStatsCCMPDecryptErrors++;
return -5;
}
memcpy(key->rx_pn, pn, CCMP_PN_LEN);
}
/* Remove hdr and MIC */
memmove(skb->data + CCMP_HDR_LEN, skb->data, hdr_len);
skb_pull(skb, CCMP_HDR_LEN);
skb_trim(skb, skb->len - CCMP_MIC_LEN);
return keyidx;
}
static int ieee80211_ccmp_set_key(void *key, int len, u8 *seq, void *priv)
{
struct ieee80211_ccmp_data *data = priv;
int keyidx;
struct crypto_tfm *tfm = data->tfm;
keyidx = data->key_idx;
memset(data, 0, sizeof(*data));
data->key_idx = keyidx;
data->tfm = tfm;
if (len == CCMP_TK_LEN) {
memcpy(data->key, key, CCMP_TK_LEN);
data->key_set = 1;
if (seq) {
data->rx_pn[0] = seq[5];
data->rx_pn[1] = seq[4];
data->rx_pn[2] = seq[3];
data->rx_pn[3] = seq[2];
data->rx_pn[4] = seq[1];
data->rx_pn[5] = seq[0];
}
crypto_cipher_setkey((void*)data->tfm, data->key, CCMP_TK_LEN);
} else if (len == 0)
data->key_set = 0;
else
return -1;
return 0;
}
static int ieee80211_ccmp_get_key(void *key, int len, u8 *seq, void *priv)
{
struct ieee80211_ccmp_data *data = priv;
if (len < CCMP_TK_LEN)
return -1;
if (!data->key_set)
return 0;
memcpy(key, data->key, CCMP_TK_LEN);
if (seq) {
seq[0] = data->tx_pn[5];
seq[1] = data->tx_pn[4];
seq[2] = data->tx_pn[3];
seq[3] = data->tx_pn[2];
seq[4] = data->tx_pn[1];
seq[5] = data->tx_pn[0];
}
return CCMP_TK_LEN;
}
static char * ieee80211_ccmp_print_stats(char *p, void *priv)
{
struct ieee80211_ccmp_data *ccmp = priv;
int i;
p += sprintf(p, "key[%d] alg=CCMP key_set=%d tx_pn=",
ccmp->key_idx, ccmp->key_set);
for (i = 0; i < ARRAY_SIZE(ccmp->tx_pn); i++)
p += sprintf(p, "%02x", ccmp->tx_pn[i]);
sprintf(p, " rx_pn=");
for (i = 0; i < ARRAY_SIZE(ccmp->rx_pn); i++)
p += sprintf(p, "%02x", ccmp->tx_pn[i]);
p += sprintf(p, " format_errors=%d replays=%d decrypt_errors=%d\n",
ccmp->dot11RSNAStatsCCMPFormatErrors,
ccmp->dot11RSNAStatsCCMPReplays,
ccmp->dot11RSNAStatsCCMPDecryptErrors);
return p;
}
void ieee80211_ccmp_null(void)
{
return;
}
static struct ieee80211_crypto_ops ieee80211_crypt_ccmp = {
.name = "CCMP",
.init = ieee80211_ccmp_init,
.deinit = ieee80211_ccmp_deinit,
.encrypt_mpdu = ieee80211_ccmp_encrypt,
.decrypt_mpdu = ieee80211_ccmp_decrypt,
.encrypt_msdu = NULL,
.decrypt_msdu = NULL,
.set_key = ieee80211_ccmp_set_key,
.get_key = ieee80211_ccmp_get_key,
.print_stats = ieee80211_ccmp_print_stats,
.extra_prefix_len = CCMP_HDR_LEN,
.extra_postfix_len = CCMP_MIC_LEN,
.owner = THIS_MODULE,
};
int __init ieee80211_crypto_ccmp_init(void)
{
return ieee80211_register_crypto_ops(&ieee80211_crypt_ccmp);
}
void ieee80211_crypto_ccmp_exit(void)
{
ieee80211_unregister_crypto_ops(&ieee80211_crypt_ccmp);
}
| gpl-2.0 |
syntheticpp/linux | arch/arm/mach-omap2/clock34xx.c | 3816 | 4808 | /*
* OMAP3-specific clock framework functions
*
* Copyright (C) 2007-2008 Texas Instruments, Inc.
* Copyright (C) 2007-2011 Nokia Corporation
*
* Paul Walmsley
* Jouni Högander
*
* Parts of this code are based on code written by
* Richard Woodruff, Tony Lindgren, Tuukka Tikkanen, Karthik Dasu,
* Russell King
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#undef DEBUG
#include <linux/kernel.h>
#include <linux/clk.h>
#include <linux/io.h>
#include "clock.h"
#include "clock34xx.h"
#include "cm3xxx.h"
#include "cm-regbits-34xx.h"
/**
* omap3430es2_clk_ssi_find_idlest - return CM_IDLEST info for SSI
* @clk: struct clk * being enabled
* @idlest_reg: void __iomem ** to store CM_IDLEST reg address into
* @idlest_bit: pointer to a u8 to store the CM_IDLEST bit shift into
* @idlest_val: pointer to a u8 to store the CM_IDLEST indicator
*
* The OMAP3430ES2 SSI target CM_IDLEST bit is at a different shift
* from the CM_{I,F}CLKEN bit. Pass back the correct info via
* @idlest_reg and @idlest_bit. No return value.
*/
static void omap3430es2_clk_ssi_find_idlest(struct clk_hw_omap *clk,
void __iomem **idlest_reg,
u8 *idlest_bit,
u8 *idlest_val)
{
u32 r;
r = (((__force u32)clk->enable_reg & ~0xf0) | 0x20);
*idlest_reg = (__force void __iomem *)r;
*idlest_bit = OMAP3430ES2_ST_SSI_IDLE_SHIFT;
*idlest_val = OMAP34XX_CM_IDLEST_VAL;
}
const struct clk_hw_omap_ops clkhwops_omap3430es2_ssi_wait = {
.find_idlest = omap3430es2_clk_ssi_find_idlest,
.find_companion = omap2_clk_dflt_find_companion,
};
const struct clk_hw_omap_ops clkhwops_omap3430es2_iclk_ssi_wait = {
.allow_idle = omap2_clkt_iclk_allow_idle,
.deny_idle = omap2_clkt_iclk_deny_idle,
.find_idlest = omap3430es2_clk_ssi_find_idlest,
.find_companion = omap2_clk_dflt_find_companion,
};
/**
* omap3430es2_clk_dss_usbhost_find_idlest - CM_IDLEST info for DSS, USBHOST
* @clk: struct clk * being enabled
* @idlest_reg: void __iomem ** to store CM_IDLEST reg address into
* @idlest_bit: pointer to a u8 to store the CM_IDLEST bit shift into
* @idlest_val: pointer to a u8 to store the CM_IDLEST indicator
*
* Some OMAP modules on OMAP3 ES2+ chips have both initiator and
* target IDLEST bits. For our purposes, we are concerned with the
* target IDLEST bits, which exist at a different bit position than
* the *CLKEN bit position for these modules (DSS and USBHOST) (The
* default find_idlest code assumes that they are at the same
* position.) No return value.
*/
static void omap3430es2_clk_dss_usbhost_find_idlest(struct clk_hw_omap *clk,
void __iomem **idlest_reg,
u8 *idlest_bit,
u8 *idlest_val)
{
u32 r;
r = (((__force u32)clk->enable_reg & ~0xf0) | 0x20);
*idlest_reg = (__force void __iomem *)r;
/* USBHOST_IDLE has same shift */
*idlest_bit = OMAP3430ES2_ST_DSS_IDLE_SHIFT;
*idlest_val = OMAP34XX_CM_IDLEST_VAL;
}
const struct clk_hw_omap_ops clkhwops_omap3430es2_dss_usbhost_wait = {
.find_idlest = omap3430es2_clk_dss_usbhost_find_idlest,
.find_companion = omap2_clk_dflt_find_companion,
};
const struct clk_hw_omap_ops clkhwops_omap3430es2_iclk_dss_usbhost_wait = {
.allow_idle = omap2_clkt_iclk_allow_idle,
.deny_idle = omap2_clkt_iclk_deny_idle,
.find_idlest = omap3430es2_clk_dss_usbhost_find_idlest,
.find_companion = omap2_clk_dflt_find_companion,
};
/**
* omap3430es2_clk_hsotgusb_find_idlest - return CM_IDLEST info for HSOTGUSB
* @clk: struct clk * being enabled
* @idlest_reg: void __iomem ** to store CM_IDLEST reg address into
* @idlest_bit: pointer to a u8 to store the CM_IDLEST bit shift into
* @idlest_val: pointer to a u8 to store the CM_IDLEST indicator
*
* The OMAP3430ES2 HSOTGUSB target CM_IDLEST bit is at a different
* shift from the CM_{I,F}CLKEN bit. Pass back the correct info via
* @idlest_reg and @idlest_bit. No return value.
*/
static void omap3430es2_clk_hsotgusb_find_idlest(struct clk_hw_omap *clk,
void __iomem **idlest_reg,
u8 *idlest_bit,
u8 *idlest_val)
{
u32 r;
r = (((__force u32)clk->enable_reg & ~0xf0) | 0x20);
*idlest_reg = (__force void __iomem *)r;
*idlest_bit = OMAP3430ES2_ST_HSOTGUSB_IDLE_SHIFT;
*idlest_val = OMAP34XX_CM_IDLEST_VAL;
}
const struct clk_hw_omap_ops clkhwops_omap3430es2_iclk_hsotgusb_wait = {
.allow_idle = omap2_clkt_iclk_allow_idle,
.deny_idle = omap2_clkt_iclk_deny_idle,
.find_idlest = omap3430es2_clk_hsotgusb_find_idlest,
.find_companion = omap2_clk_dflt_find_companion,
};
const struct clk_hw_omap_ops clkhwops_omap3430es2_hsotgusb_wait = {
.find_idlest = omap3430es2_clk_hsotgusb_find_idlest,
.find_companion = omap2_clk_dflt_find_companion,
};
| gpl-2.0 |
Droid-Concepts/android_kernel_lge_f320k | fs/ubifs/log.c | 4840 | 20041 | /*
* This file is part of UBIFS.
*
* Copyright (C) 2006-2008 Nokia Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published by
* the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc., 51
* Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
* Authors: Artem Bityutskiy (Битюцкий Артём)
* Adrian Hunter
*/
/*
* This file is a part of UBIFS journal implementation and contains various
* functions which manipulate the log. The log is a fixed area on the flash
* which does not contain any data but refers to buds. The log is a part of the
* journal.
*/
#include "ubifs.h"
#ifdef CONFIG_UBIFS_FS_DEBUG
static int dbg_check_bud_bytes(struct ubifs_info *c);
#else
#define dbg_check_bud_bytes(c) 0
#endif
/**
* ubifs_search_bud - search bud LEB.
* @c: UBIFS file-system description object
* @lnum: logical eraseblock number to search
*
* This function searches bud LEB @lnum. Returns bud description object in case
* of success and %NULL if there is no bud with this LEB number.
*/
struct ubifs_bud *ubifs_search_bud(struct ubifs_info *c, int lnum)
{
struct rb_node *p;
struct ubifs_bud *bud;
spin_lock(&c->buds_lock);
p = c->buds.rb_node;
while (p) {
bud = rb_entry(p, struct ubifs_bud, rb);
if (lnum < bud->lnum)
p = p->rb_left;
else if (lnum > bud->lnum)
p = p->rb_right;
else {
spin_unlock(&c->buds_lock);
return bud;
}
}
spin_unlock(&c->buds_lock);
return NULL;
}
/**
* ubifs_get_wbuf - get the wbuf associated with a LEB, if there is one.
* @c: UBIFS file-system description object
* @lnum: logical eraseblock number to search
*
* This functions returns the wbuf for @lnum or %NULL if there is not one.
*/
struct ubifs_wbuf *ubifs_get_wbuf(struct ubifs_info *c, int lnum)
{
struct rb_node *p;
struct ubifs_bud *bud;
int jhead;
if (!c->jheads)
return NULL;
spin_lock(&c->buds_lock);
p = c->buds.rb_node;
while (p) {
bud = rb_entry(p, struct ubifs_bud, rb);
if (lnum < bud->lnum)
p = p->rb_left;
else if (lnum > bud->lnum)
p = p->rb_right;
else {
jhead = bud->jhead;
spin_unlock(&c->buds_lock);
return &c->jheads[jhead].wbuf;
}
}
spin_unlock(&c->buds_lock);
return NULL;
}
/**
* empty_log_bytes - calculate amount of empty space in the log.
* @c: UBIFS file-system description object
*/
static inline long long empty_log_bytes(const struct ubifs_info *c)
{
long long h, t;
h = (long long)c->lhead_lnum * c->leb_size + c->lhead_offs;
t = (long long)c->ltail_lnum * c->leb_size;
if (h >= t)
return c->log_bytes - h + t;
else
return t - h;
}
/**
* ubifs_add_bud - add bud LEB to the tree of buds and its journal head list.
* @c: UBIFS file-system description object
* @bud: the bud to add
*/
void ubifs_add_bud(struct ubifs_info *c, struct ubifs_bud *bud)
{
struct rb_node **p, *parent = NULL;
struct ubifs_bud *b;
struct ubifs_jhead *jhead;
spin_lock(&c->buds_lock);
p = &c->buds.rb_node;
while (*p) {
parent = *p;
b = rb_entry(parent, struct ubifs_bud, rb);
ubifs_assert(bud->lnum != b->lnum);
if (bud->lnum < b->lnum)
p = &(*p)->rb_left;
else
p = &(*p)->rb_right;
}
rb_link_node(&bud->rb, parent, p);
rb_insert_color(&bud->rb, &c->buds);
if (c->jheads) {
jhead = &c->jheads[bud->jhead];
list_add_tail(&bud->list, &jhead->buds_list);
} else
ubifs_assert(c->replaying && c->ro_mount);
/*
* Note, although this is a new bud, we anyway account this space now,
* before any data has been written to it, because this is about to
* guarantee fixed mount time, and this bud will anyway be read and
* scanned.
*/
c->bud_bytes += c->leb_size - bud->start;
dbg_log("LEB %d:%d, jhead %s, bud_bytes %lld", bud->lnum,
bud->start, dbg_jhead(bud->jhead), c->bud_bytes);
spin_unlock(&c->buds_lock);
}
/**
* ubifs_add_bud_to_log - add a new bud to the log.
* @c: UBIFS file-system description object
* @jhead: journal head the bud belongs to
* @lnum: LEB number of the bud
* @offs: starting offset of the bud
*
* This function writes reference node for the new bud LEB @lnum it to the log,
* and adds it to the buds tress. It also makes sure that log size does not
* exceed the 'c->max_bud_bytes' limit. Returns zero in case of success,
* %-EAGAIN if commit is required, and a negative error codes in case of
* failure.
*/
int ubifs_add_bud_to_log(struct ubifs_info *c, int jhead, int lnum, int offs)
{
int err;
struct ubifs_bud *bud;
struct ubifs_ref_node *ref;
bud = kmalloc(sizeof(struct ubifs_bud), GFP_NOFS);
if (!bud)
return -ENOMEM;
ref = kzalloc(c->ref_node_alsz, GFP_NOFS);
if (!ref) {
kfree(bud);
return -ENOMEM;
}
mutex_lock(&c->log_mutex);
ubifs_assert(!c->ro_media && !c->ro_mount);
if (c->ro_error) {
err = -EROFS;
goto out_unlock;
}
/* Make sure we have enough space in the log */
if (empty_log_bytes(c) - c->ref_node_alsz < c->min_log_bytes) {
dbg_log("not enough log space - %lld, required %d",
empty_log_bytes(c), c->min_log_bytes);
ubifs_commit_required(c);
err = -EAGAIN;
goto out_unlock;
}
/*
* Make sure the amount of space in buds will not exceed the
* 'c->max_bud_bytes' limit, because we want to guarantee mount time
* limits.
*
* It is not necessary to hold @c->buds_lock when reading @c->bud_bytes
* because we are holding @c->log_mutex. All @c->bud_bytes take place
* when both @c->log_mutex and @c->bud_bytes are locked.
*/
if (c->bud_bytes + c->leb_size - offs > c->max_bud_bytes) {
dbg_log("bud bytes %lld (%lld max), require commit",
c->bud_bytes, c->max_bud_bytes);
ubifs_commit_required(c);
err = -EAGAIN;
goto out_unlock;
}
/*
* If the journal is full enough - start background commit. Note, it is
* OK to read 'c->cmt_state' without spinlock because integer reads
* are atomic in the kernel.
*/
if (c->bud_bytes >= c->bg_bud_bytes &&
c->cmt_state == COMMIT_RESTING) {
dbg_log("bud bytes %lld (%lld max), initiate BG commit",
c->bud_bytes, c->max_bud_bytes);
ubifs_request_bg_commit(c);
}
bud->lnum = lnum;
bud->start = offs;
bud->jhead = jhead;
ref->ch.node_type = UBIFS_REF_NODE;
ref->lnum = cpu_to_le32(bud->lnum);
ref->offs = cpu_to_le32(bud->start);
ref->jhead = cpu_to_le32(jhead);
if (c->lhead_offs > c->leb_size - c->ref_node_alsz) {
c->lhead_lnum = ubifs_next_log_lnum(c, c->lhead_lnum);
c->lhead_offs = 0;
}
if (c->lhead_offs == 0) {
/* Must ensure next log LEB has been unmapped */
err = ubifs_leb_unmap(c, c->lhead_lnum);
if (err)
goto out_unlock;
}
if (bud->start == 0) {
/*
* Before writing the LEB reference which refers an empty LEB
* to the log, we have to make sure it is mapped, because
* otherwise we'd risk to refer an LEB with garbage in case of
* an unclean reboot, because the target LEB might have been
* unmapped, but not yet physically erased.
*/
err = ubifs_leb_map(c, bud->lnum, UBI_SHORTTERM);
if (err)
goto out_unlock;
}
dbg_log("write ref LEB %d:%d",
c->lhead_lnum, c->lhead_offs);
err = ubifs_write_node(c, ref, UBIFS_REF_NODE_SZ, c->lhead_lnum,
c->lhead_offs, UBI_SHORTTERM);
if (err)
goto out_unlock;
c->lhead_offs += c->ref_node_alsz;
ubifs_add_bud(c, bud);
mutex_unlock(&c->log_mutex);
kfree(ref);
return 0;
out_unlock:
mutex_unlock(&c->log_mutex);
kfree(ref);
kfree(bud);
return err;
}
/**
* remove_buds - remove used buds.
* @c: UBIFS file-system description object
*
* This function removes use buds from the buds tree. It does not remove the
* buds which are pointed to by journal heads.
*/
static void remove_buds(struct ubifs_info *c)
{
struct rb_node *p;
ubifs_assert(list_empty(&c->old_buds));
c->cmt_bud_bytes = 0;
spin_lock(&c->buds_lock);
p = rb_first(&c->buds);
while (p) {
struct rb_node *p1 = p;
struct ubifs_bud *bud;
struct ubifs_wbuf *wbuf;
p = rb_next(p);
bud = rb_entry(p1, struct ubifs_bud, rb);
wbuf = &c->jheads[bud->jhead].wbuf;
if (wbuf->lnum == bud->lnum) {
/*
* Do not remove buds which are pointed to by journal
* heads (non-closed buds).
*/
c->cmt_bud_bytes += wbuf->offs - bud->start;
dbg_log("preserve %d:%d, jhead %s, bud bytes %d, "
"cmt_bud_bytes %lld", bud->lnum, bud->start,
dbg_jhead(bud->jhead), wbuf->offs - bud->start,
c->cmt_bud_bytes);
bud->start = wbuf->offs;
} else {
c->cmt_bud_bytes += c->leb_size - bud->start;
dbg_log("remove %d:%d, jhead %s, bud bytes %d, "
"cmt_bud_bytes %lld", bud->lnum, bud->start,
dbg_jhead(bud->jhead), c->leb_size - bud->start,
c->cmt_bud_bytes);
rb_erase(p1, &c->buds);
/*
* If the commit does not finish, the recovery will need
* to replay the journal, in which case the old buds
* must be unchanged. Do not release them until post
* commit i.e. do not allow them to be garbage
* collected.
*/
list_move(&bud->list, &c->old_buds);
}
}
spin_unlock(&c->buds_lock);
}
/**
* ubifs_log_start_commit - start commit.
* @c: UBIFS file-system description object
* @ltail_lnum: return new log tail LEB number
*
* The commit operation starts with writing "commit start" node to the log and
* reference nodes for all journal heads which will define new journal after
* the commit has been finished. The commit start and reference nodes are
* written in one go to the nearest empty log LEB (hence, when commit is
* finished UBIFS may safely unmap all the previous log LEBs). This function
* returns zero in case of success and a negative error code in case of
* failure.
*/
int ubifs_log_start_commit(struct ubifs_info *c, int *ltail_lnum)
{
void *buf;
struct ubifs_cs_node *cs;
struct ubifs_ref_node *ref;
int err, i, max_len, len;
err = dbg_check_bud_bytes(c);
if (err)
return err;
max_len = UBIFS_CS_NODE_SZ + c->jhead_cnt * UBIFS_REF_NODE_SZ;
max_len = ALIGN(max_len, c->min_io_size);
buf = cs = kmalloc(max_len, GFP_NOFS);
if (!buf)
return -ENOMEM;
cs->ch.node_type = UBIFS_CS_NODE;
cs->cmt_no = cpu_to_le64(c->cmt_no);
ubifs_prepare_node(c, cs, UBIFS_CS_NODE_SZ, 0);
/*
* Note, we do not lock 'c->log_mutex' because this is the commit start
* phase and we are exclusively using the log. And we do not lock
* write-buffer because nobody can write to the file-system at this
* phase.
*/
len = UBIFS_CS_NODE_SZ;
for (i = 0; i < c->jhead_cnt; i++) {
int lnum = c->jheads[i].wbuf.lnum;
int offs = c->jheads[i].wbuf.offs;
if (lnum == -1 || offs == c->leb_size)
continue;
dbg_log("add ref to LEB %d:%d for jhead %s",
lnum, offs, dbg_jhead(i));
ref = buf + len;
ref->ch.node_type = UBIFS_REF_NODE;
ref->lnum = cpu_to_le32(lnum);
ref->offs = cpu_to_le32(offs);
ref->jhead = cpu_to_le32(i);
ubifs_prepare_node(c, ref, UBIFS_REF_NODE_SZ, 0);
len += UBIFS_REF_NODE_SZ;
}
ubifs_pad(c, buf + len, ALIGN(len, c->min_io_size) - len);
/* Switch to the next log LEB */
if (c->lhead_offs) {
c->lhead_lnum = ubifs_next_log_lnum(c, c->lhead_lnum);
c->lhead_offs = 0;
}
if (c->lhead_offs == 0) {
/* Must ensure next LEB has been unmapped */
err = ubifs_leb_unmap(c, c->lhead_lnum);
if (err)
goto out;
}
len = ALIGN(len, c->min_io_size);
dbg_log("writing commit start at LEB %d:0, len %d", c->lhead_lnum, len);
err = ubifs_leb_write(c, c->lhead_lnum, cs, 0, len, UBI_SHORTTERM);
if (err)
goto out;
*ltail_lnum = c->lhead_lnum;
c->lhead_offs += len;
if (c->lhead_offs == c->leb_size) {
c->lhead_lnum = ubifs_next_log_lnum(c, c->lhead_lnum);
c->lhead_offs = 0;
}
remove_buds(c);
/*
* We have started the commit and now users may use the rest of the log
* for new writes.
*/
c->min_log_bytes = 0;
out:
kfree(buf);
return err;
}
/**
* ubifs_log_end_commit - end commit.
* @c: UBIFS file-system description object
* @ltail_lnum: new log tail LEB number
*
* This function is called on when the commit operation was finished. It
* moves log tail to new position and unmaps LEBs which contain obsolete data.
* Returns zero in case of success and a negative error code in case of
* failure.
*/
int ubifs_log_end_commit(struct ubifs_info *c, int ltail_lnum)
{
int err;
/*
* At this phase we have to lock 'c->log_mutex' because UBIFS allows FS
* writes during commit. Its only short "commit" start phase when
* writers are blocked.
*/
mutex_lock(&c->log_mutex);
dbg_log("old tail was LEB %d:0, new tail is LEB %d:0",
c->ltail_lnum, ltail_lnum);
c->ltail_lnum = ltail_lnum;
/*
* The commit is finished and from now on it must be guaranteed that
* there is always enough space for the next commit.
*/
c->min_log_bytes = c->leb_size;
spin_lock(&c->buds_lock);
c->bud_bytes -= c->cmt_bud_bytes;
spin_unlock(&c->buds_lock);
err = dbg_check_bud_bytes(c);
mutex_unlock(&c->log_mutex);
return err;
}
/**
* ubifs_log_post_commit - things to do after commit is completed.
* @c: UBIFS file-system description object
* @old_ltail_lnum: old log tail LEB number
*
* Release buds only after commit is completed, because they must be unchanged
* if recovery is needed.
*
* Unmap log LEBs only after commit is completed, because they may be needed for
* recovery.
*
* This function returns %0 on success and a negative error code on failure.
*/
int ubifs_log_post_commit(struct ubifs_info *c, int old_ltail_lnum)
{
int lnum, err = 0;
while (!list_empty(&c->old_buds)) {
struct ubifs_bud *bud;
bud = list_entry(c->old_buds.next, struct ubifs_bud, list);
err = ubifs_return_leb(c, bud->lnum);
if (err)
return err;
list_del(&bud->list);
kfree(bud);
}
mutex_lock(&c->log_mutex);
for (lnum = old_ltail_lnum; lnum != c->ltail_lnum;
lnum = ubifs_next_log_lnum(c, lnum)) {
dbg_log("unmap log LEB %d", lnum);
err = ubifs_leb_unmap(c, lnum);
if (err)
goto out;
}
out:
mutex_unlock(&c->log_mutex);
return err;
}
/**
* struct done_ref - references that have been done.
* @rb: rb-tree node
* @lnum: LEB number
*/
struct done_ref {
struct rb_node rb;
int lnum;
};
/**
* done_already - determine if a reference has been done already.
* @done_tree: rb-tree to store references that have been done
* @lnum: LEB number of reference
*
* This function returns %1 if the reference has been done, %0 if not, otherwise
* a negative error code is returned.
*/
static int done_already(struct rb_root *done_tree, int lnum)
{
struct rb_node **p = &done_tree->rb_node, *parent = NULL;
struct done_ref *dr;
while (*p) {
parent = *p;
dr = rb_entry(parent, struct done_ref, rb);
if (lnum < dr->lnum)
p = &(*p)->rb_left;
else if (lnum > dr->lnum)
p = &(*p)->rb_right;
else
return 1;
}
dr = kzalloc(sizeof(struct done_ref), GFP_NOFS);
if (!dr)
return -ENOMEM;
dr->lnum = lnum;
rb_link_node(&dr->rb, parent, p);
rb_insert_color(&dr->rb, done_tree);
return 0;
}
/**
* destroy_done_tree - destroy the done tree.
* @done_tree: done tree to destroy
*/
static void destroy_done_tree(struct rb_root *done_tree)
{
struct rb_node *this = done_tree->rb_node;
struct done_ref *dr;
while (this) {
if (this->rb_left) {
this = this->rb_left;
continue;
} else if (this->rb_right) {
this = this->rb_right;
continue;
}
dr = rb_entry(this, struct done_ref, rb);
this = rb_parent(this);
if (this) {
if (this->rb_left == &dr->rb)
this->rb_left = NULL;
else
this->rb_right = NULL;
}
kfree(dr);
}
}
/**
* add_node - add a node to the consolidated log.
* @c: UBIFS file-system description object
* @buf: buffer to which to add
* @lnum: LEB number to which to write is passed and returned here
* @offs: offset to where to write is passed and returned here
* @node: node to add
*
* This function returns %0 on success and a negative error code on failure.
*/
static int add_node(struct ubifs_info *c, void *buf, int *lnum, int *offs,
void *node)
{
struct ubifs_ch *ch = node;
int len = le32_to_cpu(ch->len), remains = c->leb_size - *offs;
if (len > remains) {
int sz = ALIGN(*offs, c->min_io_size), err;
ubifs_pad(c, buf + *offs, sz - *offs);
err = ubifs_leb_change(c, *lnum, buf, sz, UBI_SHORTTERM);
if (err)
return err;
*lnum = ubifs_next_log_lnum(c, *lnum);
*offs = 0;
}
memcpy(buf + *offs, node, len);
*offs += ALIGN(len, 8);
return 0;
}
/**
* ubifs_consolidate_log - consolidate the log.
* @c: UBIFS file-system description object
*
* Repeated failed commits could cause the log to be full, but at least 1 LEB is
* needed for commit. This function rewrites the reference nodes in the log
* omitting duplicates, and failed CS nodes, and leaving no gaps.
*
* This function returns %0 on success and a negative error code on failure.
*/
int ubifs_consolidate_log(struct ubifs_info *c)
{
struct ubifs_scan_leb *sleb;
struct ubifs_scan_node *snod;
struct rb_root done_tree = RB_ROOT;
int lnum, err, first = 1, write_lnum, offs = 0;
void *buf;
dbg_rcvry("log tail LEB %d, log head LEB %d", c->ltail_lnum,
c->lhead_lnum);
buf = vmalloc(c->leb_size);
if (!buf)
return -ENOMEM;
lnum = c->ltail_lnum;
write_lnum = lnum;
while (1) {
sleb = ubifs_scan(c, lnum, 0, c->sbuf, 0);
if (IS_ERR(sleb)) {
err = PTR_ERR(sleb);
goto out_free;
}
list_for_each_entry(snod, &sleb->nodes, list) {
switch (snod->type) {
case UBIFS_REF_NODE: {
struct ubifs_ref_node *ref = snod->node;
int ref_lnum = le32_to_cpu(ref->lnum);
err = done_already(&done_tree, ref_lnum);
if (err < 0)
goto out_scan;
if (err != 1) {
err = add_node(c, buf, &write_lnum,
&offs, snod->node);
if (err)
goto out_scan;
}
break;
}
case UBIFS_CS_NODE:
if (!first)
break;
err = add_node(c, buf, &write_lnum, &offs,
snod->node);
if (err)
goto out_scan;
first = 0;
break;
}
}
ubifs_scan_destroy(sleb);
if (lnum == c->lhead_lnum)
break;
lnum = ubifs_next_log_lnum(c, lnum);
}
if (offs) {
int sz = ALIGN(offs, c->min_io_size);
ubifs_pad(c, buf + offs, sz - offs);
err = ubifs_leb_change(c, write_lnum, buf, sz, UBI_SHORTTERM);
if (err)
goto out_free;
offs = ALIGN(offs, c->min_io_size);
}
destroy_done_tree(&done_tree);
vfree(buf);
if (write_lnum == c->lhead_lnum) {
ubifs_err("log is too full");
return -EINVAL;
}
/* Unmap remaining LEBs */
lnum = write_lnum;
do {
lnum = ubifs_next_log_lnum(c, lnum);
err = ubifs_leb_unmap(c, lnum);
if (err)
return err;
} while (lnum != c->lhead_lnum);
c->lhead_lnum = write_lnum;
c->lhead_offs = offs;
dbg_rcvry("new log head at %d:%d", c->lhead_lnum, c->lhead_offs);
return 0;
out_scan:
ubifs_scan_destroy(sleb);
out_free:
destroy_done_tree(&done_tree);
vfree(buf);
return err;
}
#ifdef CONFIG_UBIFS_FS_DEBUG
/**
* dbg_check_bud_bytes - make sure bud bytes calculation are all right.
* @c: UBIFS file-system description object
*
* This function makes sure the amount of flash space used by closed buds
* ('c->bud_bytes' is correct). Returns zero in case of success and %-EINVAL in
* case of failure.
*/
static int dbg_check_bud_bytes(struct ubifs_info *c)
{
int i, err = 0;
struct ubifs_bud *bud;
long long bud_bytes = 0;
if (!dbg_is_chk_gen(c))
return 0;
spin_lock(&c->buds_lock);
for (i = 0; i < c->jhead_cnt; i++)
list_for_each_entry(bud, &c->jheads[i].buds_list, list)
bud_bytes += c->leb_size - bud->start;
if (c->bud_bytes != bud_bytes) {
ubifs_err("bad bud_bytes %lld, calculated %lld",
c->bud_bytes, bud_bytes);
err = -EINVAL;
}
spin_unlock(&c->buds_lock);
return err;
}
#endif /* CONFIG_UBIFS_FS_DEBUG */
| gpl-2.0 |
nics21212/android_kernel_samsung_msm8660-common | net/xfrm/xfrm_input.c | 4840 | 6370 | /*
* xfrm_input.c
*
* Changes:
* YOSHIFUJI Hideaki @USAGI
* Split up af-specific portion
*
*/
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/netdevice.h>
#include <net/dst.h>
#include <net/ip.h>
#include <net/xfrm.h>
static struct kmem_cache *secpath_cachep __read_mostly;
void __secpath_destroy(struct sec_path *sp)
{
int i;
for (i = 0; i < sp->len; i++)
xfrm_state_put(sp->xvec[i]);
kmem_cache_free(secpath_cachep, sp);
}
EXPORT_SYMBOL(__secpath_destroy);
struct sec_path *secpath_dup(struct sec_path *src)
{
struct sec_path *sp;
sp = kmem_cache_alloc(secpath_cachep, GFP_ATOMIC);
if (!sp)
return NULL;
sp->len = 0;
if (src) {
int i;
memcpy(sp, src, sizeof(*sp));
for (i = 0; i < sp->len; i++)
xfrm_state_hold(sp->xvec[i]);
}
atomic_set(&sp->refcnt, 1);
return sp;
}
EXPORT_SYMBOL(secpath_dup);
/* Fetch spi and seq from ipsec header */
int xfrm_parse_spi(struct sk_buff *skb, u8 nexthdr, __be32 *spi, __be32 *seq)
{
int offset, offset_seq;
int hlen;
switch (nexthdr) {
case IPPROTO_AH:
hlen = sizeof(struct ip_auth_hdr);
offset = offsetof(struct ip_auth_hdr, spi);
offset_seq = offsetof(struct ip_auth_hdr, seq_no);
break;
case IPPROTO_ESP:
hlen = sizeof(struct ip_esp_hdr);
offset = offsetof(struct ip_esp_hdr, spi);
offset_seq = offsetof(struct ip_esp_hdr, seq_no);
break;
case IPPROTO_COMP:
if (!pskb_may_pull(skb, sizeof(struct ip_comp_hdr)))
return -EINVAL;
*spi = htonl(ntohs(*(__be16*)(skb_transport_header(skb) + 2)));
*seq = 0;
return 0;
default:
return 1;
}
if (!pskb_may_pull(skb, hlen))
return -EINVAL;
*spi = *(__be32*)(skb_transport_header(skb) + offset);
*seq = *(__be32*)(skb_transport_header(skb) + offset_seq);
return 0;
}
int xfrm_prepare_input(struct xfrm_state *x, struct sk_buff *skb)
{
struct xfrm_mode *inner_mode = x->inner_mode;
int err;
err = x->outer_mode->afinfo->extract_input(x, skb);
if (err)
return err;
if (x->sel.family == AF_UNSPEC) {
inner_mode = xfrm_ip2inner_mode(x, XFRM_MODE_SKB_CB(skb)->protocol);
if (inner_mode == NULL)
return -EAFNOSUPPORT;
}
skb->protocol = inner_mode->afinfo->eth_proto;
return inner_mode->input2(x, skb);
}
EXPORT_SYMBOL(xfrm_prepare_input);
int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type)
{
struct net *net = dev_net(skb->dev);
int err;
__be32 seq;
__be32 seq_hi;
struct xfrm_state *x;
xfrm_address_t *daddr;
struct xfrm_mode *inner_mode;
unsigned int family;
int decaps = 0;
int async = 0;
/* A negative encap_type indicates async resumption. */
if (encap_type < 0) {
async = 1;
x = xfrm_input_state(skb);
seq = XFRM_SKB_CB(skb)->seq.input.low;
goto resume;
}
/* Allocate new secpath or COW existing one. */
if (!skb->sp || atomic_read(&skb->sp->refcnt) != 1) {
struct sec_path *sp;
sp = secpath_dup(skb->sp);
if (!sp) {
XFRM_INC_STATS(net, LINUX_MIB_XFRMINERROR);
goto drop;
}
if (skb->sp)
secpath_put(skb->sp);
skb->sp = sp;
}
daddr = (xfrm_address_t *)(skb_network_header(skb) +
XFRM_SPI_SKB_CB(skb)->daddroff);
family = XFRM_SPI_SKB_CB(skb)->family;
seq = 0;
if (!spi && (err = xfrm_parse_spi(skb, nexthdr, &spi, &seq)) != 0) {
XFRM_INC_STATS(net, LINUX_MIB_XFRMINHDRERROR);
goto drop;
}
do {
if (skb->sp->len == XFRM_MAX_DEPTH) {
XFRM_INC_STATS(net, LINUX_MIB_XFRMINBUFFERERROR);
goto drop;
}
x = xfrm_state_lookup(net, skb->mark, daddr, spi, nexthdr, family);
if (x == NULL) {
XFRM_INC_STATS(net, LINUX_MIB_XFRMINNOSTATES);
xfrm_audit_state_notfound(skb, family, spi, seq);
goto drop;
}
skb->sp->xvec[skb->sp->len++] = x;
spin_lock(&x->lock);
if (unlikely(x->km.state != XFRM_STATE_VALID)) {
XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEINVALID);
goto drop_unlock;
}
if ((x->encap ? x->encap->encap_type : 0) != encap_type) {
XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEMISMATCH);
goto drop_unlock;
}
if (x->repl->check(x, skb, seq)) {
XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATESEQERROR);
goto drop_unlock;
}
if (xfrm_state_check_expire(x)) {
XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEEXPIRED);
goto drop_unlock;
}
spin_unlock(&x->lock);
seq_hi = htonl(xfrm_replay_seqhi(x, seq));
XFRM_SKB_CB(skb)->seq.input.low = seq;
XFRM_SKB_CB(skb)->seq.input.hi = seq_hi;
skb_dst_force(skb);
nexthdr = x->type->input(x, skb);
if (nexthdr == -EINPROGRESS)
return 0;
resume:
spin_lock(&x->lock);
if (nexthdr <= 0) {
if (nexthdr == -EBADMSG) {
xfrm_audit_state_icvfail(x, skb,
x->type->proto);
x->stats.integrity_failed++;
}
XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEPROTOERROR);
goto drop_unlock;
}
/* only the first xfrm gets the encap type */
encap_type = 0;
if (async && x->repl->recheck(x, skb, seq)) {
XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATESEQERROR);
goto drop_unlock;
}
x->repl->advance(x, seq);
x->curlft.bytes += skb->len;
x->curlft.packets++;
spin_unlock(&x->lock);
XFRM_MODE_SKB_CB(skb)->protocol = nexthdr;
inner_mode = x->inner_mode;
if (x->sel.family == AF_UNSPEC) {
inner_mode = xfrm_ip2inner_mode(x, XFRM_MODE_SKB_CB(skb)->protocol);
if (inner_mode == NULL)
goto drop;
}
if (inner_mode->input(x, skb)) {
XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEMODEERROR);
goto drop;
}
if (x->outer_mode->flags & XFRM_MODE_FLAG_TUNNEL) {
decaps = 1;
break;
}
/*
* We need the inner address. However, we only get here for
* transport mode so the outer address is identical.
*/
daddr = &x->id.daddr;
family = x->outer_mode->afinfo->family;
err = xfrm_parse_spi(skb, nexthdr, &spi, &seq);
if (err < 0) {
XFRM_INC_STATS(net, LINUX_MIB_XFRMINHDRERROR);
goto drop;
}
} while (!err);
nf_reset(skb);
if (decaps) {
skb_dst_drop(skb);
netif_rx(skb);
return 0;
} else {
return x->inner_mode->afinfo->transport_finish(skb, async);
}
drop_unlock:
spin_unlock(&x->lock);
drop:
kfree_skb(skb);
return 0;
}
EXPORT_SYMBOL(xfrm_input);
int xfrm_input_resume(struct sk_buff *skb, int nexthdr)
{
return xfrm_input(skb, nexthdr, 0, -1);
}
EXPORT_SYMBOL(xfrm_input_resume);
void __init xfrm_input_init(void)
{
secpath_cachep = kmem_cache_create("secpath_cache",
sizeof(struct sec_path),
0, SLAB_HWCACHE_ALIGN|SLAB_PANIC,
NULL);
}
| gpl-2.0 |
CyanideL/android_kernel_oneplus_msm8974 | drivers/scsi/initio.c | 8168 | 83317 | /**************************************************************************
* Initio 9100 device driver for Linux.
*
* Copyright (c) 1994-1998 Initio Corporation
* Copyright (c) 1998 Bas Vermeulen <bvermeul@blackstar.xs4all.nl>
* Copyright (c) 2004 Christoph Hellwig <hch@lst.de>
* Copyright (c) 2007 Red Hat
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; see the file COPYING. If not, write to
* the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
*
*
*************************************************************************
*
* DESCRIPTION:
*
* This is the Linux low-level SCSI driver for Initio INI-9X00U/UW SCSI host
* adapters
*
* 08/06/97 hc - v1.01h
* - Support inic-940 and inic-935
* 09/26/97 hc - v1.01i
* - Make correction from J.W. Schultz suggestion
* 10/13/97 hc - Support reset function
* 10/21/97 hc - v1.01j
* - Support 32 LUN (SCSI 3)
* 01/14/98 hc - v1.01k
* - Fix memory allocation problem
* 03/04/98 hc - v1.01l
* - Fix tape rewind which will hang the system problem
* - Set can_queue to initio_num_scb
* 06/25/98 hc - v1.01m
* - Get it work for kernel version >= 2.1.75
* - Dynamic assign SCSI bus reset holding time in initio_init()
* 07/02/98 hc - v1.01n
* - Support 0002134A
* 08/07/98 hc - v1.01o
* - Change the initio_abort_srb routine to use scsi_done. <01>
* 09/07/98 hl - v1.02
* - Change the INI9100U define and proc_dir_entry to
* reflect the newer Kernel 2.1.118, but the v1.o1o
* should work with Kernel 2.1.118.
* 09/20/98 wh - v1.02a
* - Support Abort command.
* - Handle reset routine.
* 09/21/98 hl - v1.03
* - remove comments.
* 12/09/98 bv - v1.03a
* - Removed unused code
* 12/13/98 bv - v1.03b
* - Remove cli() locking for kernels >= 2.1.95. This uses
* spinlocks to serialize access to the pSRB_head and
* pSRB_tail members of the HCS structure.
* 09/01/99 bv - v1.03d
* - Fixed a deadlock problem in SMP.
* 21/01/99 bv - v1.03e
* - Add support for the Domex 3192U PCI SCSI
* This is a slightly modified patch by
* Brian Macy <bmacy@sunshinecomputing.com>
* 22/02/99 bv - v1.03f
* - Didn't detect the INIC-950 in 2.0.x correctly.
* Now fixed.
* 05/07/99 bv - v1.03g
* - Changed the assumption that HZ = 100
* 10/17/03 mc - v1.04
* - added new DMA API support
* 06/01/04 jmd - v1.04a
* - Re-add reset_bus support
**************************************************************************/
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/delay.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/spinlock.h>
#include <linux/stat.h>
#include <linux/kernel.h>
#include <linux/proc_fs.h>
#include <linux/string.h>
#include <linux/interrupt.h>
#include <linux/ioport.h>
#include <linux/slab.h>
#include <linux/jiffies.h>
#include <linux/dma-mapping.h>
#include <asm/io.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_tcq.h>
#include "initio.h"
#define SENSE_SIZE 14
#define i91u_MAXQUEUE 2
#define i91u_REVID "Initio INI-9X00U/UW SCSI device driver; Revision: 1.04a"
#define I950_DEVICE_ID 0x9500 /* Initio's inic-950 product ID */
#define I940_DEVICE_ID 0x9400 /* Initio's inic-940 product ID */
#define I935_DEVICE_ID 0x9401 /* Initio's inic-935 product ID */
#define I920_DEVICE_ID 0x0002 /* Initio's other product ID */
#ifdef DEBUG_i91u
static unsigned int i91u_debug = DEBUG_DEFAULT;
#endif
static int initio_tag_enable = 1;
#ifdef DEBUG_i91u
static int setup_debug = 0;
#endif
static void i91uSCBPost(u8 * pHcb, u8 * pScb);
/* PCI Devices supported by this driver */
static struct pci_device_id i91u_pci_devices[] = {
{ PCI_VENDOR_ID_INIT, I950_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{ PCI_VENDOR_ID_INIT, I940_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{ PCI_VENDOR_ID_INIT, I935_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{ PCI_VENDOR_ID_INIT, I920_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{ PCI_VENDOR_ID_DOMEX, I920_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{ }
};
MODULE_DEVICE_TABLE(pci, i91u_pci_devices);
#define DEBUG_INTERRUPT 0
#define DEBUG_QUEUE 0
#define DEBUG_STATE 0
#define INT_DISC 0
/*--- forward references ---*/
static struct scsi_ctrl_blk *initio_find_busy_scb(struct initio_host * host, u16 tarlun);
static struct scsi_ctrl_blk *initio_find_done_scb(struct initio_host * host);
static int tulip_main(struct initio_host * host);
static int initio_next_state(struct initio_host * host);
static int initio_state_1(struct initio_host * host);
static int initio_state_2(struct initio_host * host);
static int initio_state_3(struct initio_host * host);
static int initio_state_4(struct initio_host * host);
static int initio_state_5(struct initio_host * host);
static int initio_state_6(struct initio_host * host);
static int initio_state_7(struct initio_host * host);
static int initio_xfer_data_in(struct initio_host * host);
static int initio_xfer_data_out(struct initio_host * host);
static int initio_xpad_in(struct initio_host * host);
static int initio_xpad_out(struct initio_host * host);
static int initio_status_msg(struct initio_host * host);
static int initio_msgin(struct initio_host * host);
static int initio_msgin_sync(struct initio_host * host);
static int initio_msgin_accept(struct initio_host * host);
static int initio_msgout_reject(struct initio_host * host);
static int initio_msgin_extend(struct initio_host * host);
static int initio_msgout_ide(struct initio_host * host);
static int initio_msgout_abort_targ(struct initio_host * host);
static int initio_msgout_abort_tag(struct initio_host * host);
static int initio_bus_device_reset(struct initio_host * host);
static void initio_select_atn(struct initio_host * host, struct scsi_ctrl_blk * scb);
static void initio_select_atn3(struct initio_host * host, struct scsi_ctrl_blk * scb);
static void initio_select_atn_stop(struct initio_host * host, struct scsi_ctrl_blk * scb);
static int int_initio_busfree(struct initio_host * host);
static int int_initio_scsi_rst(struct initio_host * host);
static int int_initio_bad_seq(struct initio_host * host);
static int int_initio_resel(struct initio_host * host);
static int initio_sync_done(struct initio_host * host);
static int wdtr_done(struct initio_host * host);
static int wait_tulip(struct initio_host * host);
static int initio_wait_done_disc(struct initio_host * host);
static int initio_wait_disc(struct initio_host * host);
static void tulip_scsi(struct initio_host * host);
static int initio_post_scsi_rst(struct initio_host * host);
static void initio_se2_ew_en(unsigned long base);
static void initio_se2_ew_ds(unsigned long base);
static int initio_se2_rd_all(unsigned long base);
static void initio_se2_update_all(unsigned long base); /* setup default pattern */
static void initio_read_eeprom(unsigned long base);
/* ---- INTERNAL VARIABLES ---- */
static NVRAM i91unvram;
static NVRAM *i91unvramp;
static u8 i91udftNvRam[64] =
{
/*----------- header -----------*/
0x25, 0xc9, /* Signature */
0x40, /* Size */
0x01, /* Revision */
/* -- Host Adapter Structure -- */
0x95, /* ModelByte0 */
0x00, /* ModelByte1 */
0x00, /* ModelInfo */
0x01, /* NumOfCh */
NBC1_DEFAULT, /* BIOSConfig1 */
0, /* BIOSConfig2 */
0, /* HAConfig1 */
0, /* HAConfig2 */
/* SCSI channel 0 and target Structure */
7, /* SCSIid */
NCC1_DEFAULT, /* SCSIconfig1 */
0, /* SCSIconfig2 */
0x10, /* NumSCSItarget */
NTC_DEFAULT, NTC_DEFAULT, NTC_DEFAULT, NTC_DEFAULT,
NTC_DEFAULT, NTC_DEFAULT, NTC_DEFAULT, NTC_DEFAULT,
NTC_DEFAULT, NTC_DEFAULT, NTC_DEFAULT, NTC_DEFAULT,
NTC_DEFAULT, NTC_DEFAULT, NTC_DEFAULT, NTC_DEFAULT,
/* SCSI channel 1 and target Structure */
7, /* SCSIid */
NCC1_DEFAULT, /* SCSIconfig1 */
0, /* SCSIconfig2 */
0x10, /* NumSCSItarget */
NTC_DEFAULT, NTC_DEFAULT, NTC_DEFAULT, NTC_DEFAULT,
NTC_DEFAULT, NTC_DEFAULT, NTC_DEFAULT, NTC_DEFAULT,
NTC_DEFAULT, NTC_DEFAULT, NTC_DEFAULT, NTC_DEFAULT,
NTC_DEFAULT, NTC_DEFAULT, NTC_DEFAULT, NTC_DEFAULT,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0}; /* - CheckSum - */
static u8 initio_rate_tbl[8] = /* fast 20 */
{
/* nanosecond divide by 4 */
12, /* 50ns, 20M */
18, /* 75ns, 13.3M */
25, /* 100ns, 10M */
31, /* 125ns, 8M */
37, /* 150ns, 6.6M */
43, /* 175ns, 5.7M */
50, /* 200ns, 5M */
62 /* 250ns, 4M */
};
static void initio_do_pause(unsigned amount)
{
/* Pause for amount jiffies */
unsigned long the_time = jiffies + amount;
while (time_before_eq(jiffies, the_time))
cpu_relax();
}
/*-- forward reference --*/
/******************************************************************
Input: instruction for Serial E2PROM
EX: se2_rd(0 call se2_instr() to send address and read command
StartBit OP_Code Address Data
--------- -------- ------------------ -------
1 1 , 0 A5,A4,A3,A2,A1,A0 D15-D0
+-----------------------------------------------------
|
CS -----+
+--+ +--+ +--+ +--+ +--+
^ | ^ | ^ | ^ | ^ |
| | | | | | | | | |
CLK -------+ +--+ +--+ +--+ +--+ +--
(leading edge trigger)
+--1-----1--+
| SB OP | OP A5 A4
DI ----+ +--0------------------
(address and cmd sent to nvram)
-------------------------------------------+
|
DO +---
(data sent from nvram)
******************************************************************/
/**
* initio_se2_instr - bitbang an instruction
* @base: Base of InitIO controller
* @instr: Instruction for serial E2PROM
*
* Bitbang an instruction out to the serial E2Prom
*/
static void initio_se2_instr(unsigned long base, u8 instr)
{
int i;
u8 b;
outb(SE2CS | SE2DO, base + TUL_NVRAM); /* cs+start bit */
udelay(30);
outb(SE2CS | SE2CLK | SE2DO, base + TUL_NVRAM); /* +CLK */
udelay(30);
for (i = 0; i < 8; i++) {
if (instr & 0x80)
b = SE2CS | SE2DO; /* -CLK+dataBit */
else
b = SE2CS; /* -CLK */
outb(b, base + TUL_NVRAM);
udelay(30);
outb(b | SE2CLK, base + TUL_NVRAM); /* +CLK */
udelay(30);
instr <<= 1;
}
outb(SE2CS, base + TUL_NVRAM); /* -CLK */
udelay(30);
}
/**
* initio_se2_ew_en - Enable erase/write
* @base: Base address of InitIO controller
*
* Enable erase/write state of serial EEPROM
*/
void initio_se2_ew_en(unsigned long base)
{
initio_se2_instr(base, 0x30); /* EWEN */
outb(0, base + TUL_NVRAM); /* -CS */
udelay(30);
}
/**
* initio_se2_ew_ds - Disable erase/write
* @base: Base address of InitIO controller
*
* Disable erase/write state of serial EEPROM
*/
void initio_se2_ew_ds(unsigned long base)
{
initio_se2_instr(base, 0); /* EWDS */
outb(0, base + TUL_NVRAM); /* -CS */
udelay(30);
}
/**
* initio_se2_rd - read E2PROM word
* @base: Base of InitIO controller
* @addr: Address of word in E2PROM
*
* Read a word from the NV E2PROM device
*/
static u16 initio_se2_rd(unsigned long base, u8 addr)
{
u8 instr, rb;
u16 val = 0;
int i;
instr = (u8) (addr | 0x80);
initio_se2_instr(base, instr); /* READ INSTR */
for (i = 15; i >= 0; i--) {
outb(SE2CS | SE2CLK, base + TUL_NVRAM); /* +CLK */
udelay(30);
outb(SE2CS, base + TUL_NVRAM); /* -CLK */
/* sample data after the following edge of clock */
rb = inb(base + TUL_NVRAM);
rb &= SE2DI;
val += (rb << i);
udelay(30); /* 6/20/95 */
}
outb(0, base + TUL_NVRAM); /* no chip select */
udelay(30);
return val;
}
/**
* initio_se2_wr - read E2PROM word
* @base: Base of InitIO controller
* @addr: Address of word in E2PROM
* @val: Value to write
*
* Write a word to the NV E2PROM device. Used when recovering from
* a problem with the NV.
*/
static void initio_se2_wr(unsigned long base, u8 addr, u16 val)
{
u8 rb;
u8 instr;
int i;
instr = (u8) (addr | 0x40);
initio_se2_instr(base, instr); /* WRITE INSTR */
for (i = 15; i >= 0; i--) {
if (val & 0x8000)
outb(SE2CS | SE2DO, base + TUL_NVRAM); /* -CLK+dataBit 1 */
else
outb(SE2CS, base + TUL_NVRAM); /* -CLK+dataBit 0 */
udelay(30);
outb(SE2CS | SE2CLK, base + TUL_NVRAM); /* +CLK */
udelay(30);
val <<= 1;
}
outb(SE2CS, base + TUL_NVRAM); /* -CLK */
udelay(30);
outb(0, base + TUL_NVRAM); /* -CS */
udelay(30);
outb(SE2CS, base + TUL_NVRAM); /* +CS */
udelay(30);
for (;;) {
outb(SE2CS | SE2CLK, base + TUL_NVRAM); /* +CLK */
udelay(30);
outb(SE2CS, base + TUL_NVRAM); /* -CLK */
udelay(30);
if ((rb = inb(base + TUL_NVRAM)) & SE2DI)
break; /* write complete */
}
outb(0, base + TUL_NVRAM); /* -CS */
}
/**
* initio_se2_rd_all - read hostadapter NV configuration
* @base: Base address of InitIO controller
*
* Reads the E2PROM data into main memory. Ensures that the checksum
* and header marker are valid. Returns 1 on success -1 on error.
*/
static int initio_se2_rd_all(unsigned long base)
{
int i;
u16 chksum = 0;
u16 *np;
i91unvramp = &i91unvram;
np = (u16 *) i91unvramp;
for (i = 0; i < 32; i++)
*np++ = initio_se2_rd(base, i);
/* Is signature "ini" ok ? */
if (i91unvramp->NVM_Signature != INI_SIGNATURE)
return -1;
/* Is ckecksum ok ? */
np = (u16 *) i91unvramp;
for (i = 0; i < 31; i++)
chksum += *np++;
if (i91unvramp->NVM_CheckSum != chksum)
return -1;
return 1;
}
/**
* initio_se2_update_all - Update E2PROM
* @base: Base of InitIO controller
*
* Update the E2PROM by wrting any changes into the E2PROM
* chip, rewriting the checksum.
*/
static void initio_se2_update_all(unsigned long base)
{ /* setup default pattern */
int i;
u16 chksum = 0;
u16 *np, *np1;
i91unvramp = &i91unvram;
/* Calculate checksum first */
np = (u16 *) i91udftNvRam;
for (i = 0; i < 31; i++)
chksum += *np++;
*np = chksum;
initio_se2_ew_en(base); /* Enable write */
np = (u16 *) i91udftNvRam;
np1 = (u16 *) i91unvramp;
for (i = 0; i < 32; i++, np++, np1++) {
if (*np != *np1)
initio_se2_wr(base, i, *np);
}
initio_se2_ew_ds(base); /* Disable write */
}
/**
* initio_read_eeprom - Retrieve configuration
* @base: Base of InitIO Host Adapter
*
* Retrieve the host adapter configuration data from E2Prom. If the
* data is invalid then the defaults are used and are also restored
* into the E2PROM. This forms the access point for the SCSI driver
* into the E2PROM layer, the other functions for the E2PROM are all
* internal use.
*
* Must be called single threaded, uses a shared global area.
*/
static void initio_read_eeprom(unsigned long base)
{
u8 gctrl;
i91unvramp = &i91unvram;
/* Enable EEProm programming */
gctrl = inb(base + TUL_GCTRL);
outb(gctrl | TUL_GCTRL_EEPROM_BIT, base + TUL_GCTRL);
if (initio_se2_rd_all(base) != 1) {
initio_se2_update_all(base); /* setup default pattern */
initio_se2_rd_all(base); /* load again */
}
/* Disable EEProm programming */
gctrl = inb(base + TUL_GCTRL);
outb(gctrl & ~TUL_GCTRL_EEPROM_BIT, base + TUL_GCTRL);
}
/**
* initio_stop_bm - stop bus master
* @host: InitIO we are stopping
*
* Stop any pending DMA operation, aborting the DMA if necessary
*/
static void initio_stop_bm(struct initio_host * host)
{
if (inb(host->addr + TUL_XStatus) & XPEND) { /* if DMA xfer is pending, abort DMA xfer */
outb(TAX_X_ABT | TAX_X_CLR_FIFO, host->addr + TUL_XCmd);
/* wait Abort DMA xfer done */
while ((inb(host->addr + TUL_Int) & XABT) == 0)
cpu_relax();
}
outb(TSC_FLUSH_FIFO, host->addr + TUL_SCtrl0);
}
/**
* initio_reset_scsi - Reset SCSI host controller
* @host: InitIO host to reset
* @seconds: Recovery time
*
* Perform a full reset of the SCSI subsystem.
*/
static int initio_reset_scsi(struct initio_host * host, int seconds)
{
outb(TSC_RST_BUS, host->addr + TUL_SCtrl0);
while (!((host->jsint = inb(host->addr + TUL_SInt)) & TSS_SCSIRST_INT))
cpu_relax();
/* reset tulip chip */
outb(0, host->addr + TUL_SSignal);
/* Stall for a while, wait for target's firmware ready,make it 2 sec ! */
/* SONY 5200 tape drive won't work if only stall for 1 sec */
/* FIXME: this is a very long busy wait right now */
initio_do_pause(seconds * HZ);
inb(host->addr + TUL_SInt);
return SCSI_RESET_SUCCESS;
}
/**
* initio_init - set up an InitIO host adapter
* @host: InitIO host adapter
* @num_scbs: Number of SCBS
* @bios_addr: BIOS address
*
* Set up the host adapter and devices according to the configuration
* retrieved from the E2PROM.
*
* Locking: Calls E2PROM layer code which is not re-enterable so must
* run single threaded for now.
*/
static void initio_init(struct initio_host * host, u8 *bios_addr)
{
int i;
u8 *flags;
u8 *heads;
/* Get E2Prom configuration */
initio_read_eeprom(host->addr);
if (i91unvramp->NVM_SCSIInfo[0].NVM_NumOfTarg == 8)
host->max_tar = 8;
else
host->max_tar = 16;
host->config = i91unvramp->NVM_SCSIInfo[0].NVM_ChConfig1;
host->scsi_id = i91unvramp->NVM_SCSIInfo[0].NVM_ChSCSIID;
host->idmask = ~(1 << host->scsi_id);
#ifdef CHK_PARITY
/* Enable parity error response */
outb(inb(host->addr + TUL_PCMD) | 0x40, host->addr + TUL_PCMD);
#endif
/* Mask all the interrupt */
outb(0x1F, host->addr + TUL_Mask);
initio_stop_bm(host);
/* --- Initialize the tulip --- */
outb(TSC_RST_CHIP, host->addr + TUL_SCtrl0);
/* program HBA's SCSI ID */
outb(host->scsi_id << 4, host->addr + TUL_SScsiId);
/* Enable Initiator Mode ,phase latch,alternate sync period mode,
disable SCSI reset */
if (host->config & HCC_EN_PAR)
host->sconf1 = (TSC_INITDEFAULT | TSC_EN_SCSI_PAR);
else
host->sconf1 = (TSC_INITDEFAULT);
outb(host->sconf1, host->addr + TUL_SConfig);
/* Enable HW reselect */
outb(TSC_HW_RESELECT, host->addr + TUL_SCtrl1);
outb(0, host->addr + TUL_SPeriod);
/* selection time out = 250 ms */
outb(153, host->addr + TUL_STimeOut);
/* Enable SCSI terminator */
outb((host->config & (HCC_ACT_TERM1 | HCC_ACT_TERM2)),
host->addr + TUL_XCtrl);
outb(((host->config & HCC_AUTO_TERM) >> 4) |
(inb(host->addr + TUL_GCTRL1) & 0xFE),
host->addr + TUL_GCTRL1);
for (i = 0,
flags = & (i91unvramp->NVM_SCSIInfo[0].NVM_Targ0Config),
heads = bios_addr + 0x180;
i < host->max_tar;
i++, flags++) {
host->targets[i].flags = *flags & ~(TCF_SYNC_DONE | TCF_WDTR_DONE);
if (host->targets[i].flags & TCF_EN_255)
host->targets[i].drv_flags = TCF_DRV_255_63;
else
host->targets[i].drv_flags = 0;
host->targets[i].js_period = 0;
host->targets[i].sconfig0 = host->sconf1;
host->targets[i].heads = *heads++;
if (host->targets[i].heads == 255)
host->targets[i].drv_flags = TCF_DRV_255_63;
else
host->targets[i].drv_flags = 0;
host->targets[i].sectors = *heads++;
host->targets[i].flags &= ~TCF_BUSY;
host->act_tags[i] = 0;
host->max_tags[i] = 0xFF;
} /* for */
printk("i91u: PCI Base=0x%04X, IRQ=%d, BIOS=0x%04X0, SCSI ID=%d\n",
host->addr, host->pci_dev->irq,
host->bios_addr, host->scsi_id);
/* Reset SCSI Bus */
if (host->config & HCC_SCSI_RESET) {
printk(KERN_INFO "i91u: Reset SCSI Bus ... \n");
initio_reset_scsi(host, 10);
}
outb(0x17, host->addr + TUL_SCFG1);
outb(0xE9, host->addr + TUL_SIntEnable);
}
/**
* initio_alloc_scb - Allocate an SCB
* @host: InitIO host we are allocating for
*
* Walk the SCB list for the controller and allocate a free SCB if
* one exists.
*/
static struct scsi_ctrl_blk *initio_alloc_scb(struct initio_host *host)
{
struct scsi_ctrl_blk *scb;
unsigned long flags;
spin_lock_irqsave(&host->avail_lock, flags);
if ((scb = host->first_avail) != NULL) {
#if DEBUG_QUEUE
printk("find scb at %p\n", scb);
#endif
if ((host->first_avail = scb->next) == NULL)
host->last_avail = NULL;
scb->next = NULL;
scb->status = SCB_RENT;
}
spin_unlock_irqrestore(&host->avail_lock, flags);
return scb;
}
/**
* initio_release_scb - Release an SCB
* @host: InitIO host that owns the SCB
* @cmnd: SCB command block being returned
*
* Return an allocated SCB to the host free list
*/
static void initio_release_scb(struct initio_host * host, struct scsi_ctrl_blk * cmnd)
{
unsigned long flags;
#if DEBUG_QUEUE
printk("Release SCB %p; ", cmnd);
#endif
spin_lock_irqsave(&(host->avail_lock), flags);
cmnd->srb = NULL;
cmnd->status = 0;
cmnd->next = NULL;
if (host->last_avail != NULL) {
host->last_avail->next = cmnd;
host->last_avail = cmnd;
} else {
host->first_avail = cmnd;
host->last_avail = cmnd;
}
spin_unlock_irqrestore(&(host->avail_lock), flags);
}
/***************************************************************************/
static void initio_append_pend_scb(struct initio_host * host, struct scsi_ctrl_blk * scbp)
{
#if DEBUG_QUEUE
printk("Append pend SCB %p; ", scbp);
#endif
scbp->status = SCB_PEND;
scbp->next = NULL;
if (host->last_pending != NULL) {
host->last_pending->next = scbp;
host->last_pending = scbp;
} else {
host->first_pending = scbp;
host->last_pending = scbp;
}
}
/***************************************************************************/
static void initio_push_pend_scb(struct initio_host * host, struct scsi_ctrl_blk * scbp)
{
#if DEBUG_QUEUE
printk("Push pend SCB %p; ", scbp);
#endif
scbp->status = SCB_PEND;
if ((scbp->next = host->first_pending) != NULL) {
host->first_pending = scbp;
} else {
host->first_pending = scbp;
host->last_pending = scbp;
}
}
static struct scsi_ctrl_blk *initio_find_first_pend_scb(struct initio_host * host)
{
struct scsi_ctrl_blk *first;
first = host->first_pending;
while (first != NULL) {
if (first->opcode != ExecSCSI)
return first;
if (first->tagmsg == 0) {
if ((host->act_tags[first->target] == 0) &&
!(host->targets[first->target].flags & TCF_BUSY))
return first;
} else {
if ((host->act_tags[first->target] >=
host->max_tags[first->target]) |
(host->targets[first->target].flags & TCF_BUSY)) {
first = first->next;
continue;
}
return first;
}
first = first->next;
}
return first;
}
static void initio_unlink_pend_scb(struct initio_host * host, struct scsi_ctrl_blk * scb)
{
struct scsi_ctrl_blk *tmp, *prev;
#if DEBUG_QUEUE
printk("unlink pend SCB %p; ", scb);
#endif
prev = tmp = host->first_pending;
while (tmp != NULL) {
if (scb == tmp) { /* Unlink this SCB */
if (tmp == host->first_pending) {
if ((host->first_pending = tmp->next) == NULL)
host->last_pending = NULL;
} else {
prev->next = tmp->next;
if (tmp == host->last_pending)
host->last_pending = prev;
}
tmp->next = NULL;
break;
}
prev = tmp;
tmp = tmp->next;
}
}
static void initio_append_busy_scb(struct initio_host * host, struct scsi_ctrl_blk * scbp)
{
#if DEBUG_QUEUE
printk("append busy SCB %p; ", scbp);
#endif
if (scbp->tagmsg)
host->act_tags[scbp->target]++;
else
host->targets[scbp->target].flags |= TCF_BUSY;
scbp->status = SCB_BUSY;
scbp->next = NULL;
if (host->last_busy != NULL) {
host->last_busy->next = scbp;
host->last_busy = scbp;
} else {
host->first_busy = scbp;
host->last_busy = scbp;
}
}
/***************************************************************************/
static struct scsi_ctrl_blk *initio_pop_busy_scb(struct initio_host * host)
{
struct scsi_ctrl_blk *tmp;
if ((tmp = host->first_busy) != NULL) {
if ((host->first_busy = tmp->next) == NULL)
host->last_busy = NULL;
tmp->next = NULL;
if (tmp->tagmsg)
host->act_tags[tmp->target]--;
else
host->targets[tmp->target].flags &= ~TCF_BUSY;
}
#if DEBUG_QUEUE
printk("Pop busy SCB %p; ", tmp);
#endif
return tmp;
}
/***************************************************************************/
static void initio_unlink_busy_scb(struct initio_host * host, struct scsi_ctrl_blk * scb)
{
struct scsi_ctrl_blk *tmp, *prev;
#if DEBUG_QUEUE
printk("unlink busy SCB %p; ", scb);
#endif
prev = tmp = host->first_busy;
while (tmp != NULL) {
if (scb == tmp) { /* Unlink this SCB */
if (tmp == host->first_busy) {
if ((host->first_busy = tmp->next) == NULL)
host->last_busy = NULL;
} else {
prev->next = tmp->next;
if (tmp == host->last_busy)
host->last_busy = prev;
}
tmp->next = NULL;
if (tmp->tagmsg)
host->act_tags[tmp->target]--;
else
host->targets[tmp->target].flags &= ~TCF_BUSY;
break;
}
prev = tmp;
tmp = tmp->next;
}
return;
}
struct scsi_ctrl_blk *initio_find_busy_scb(struct initio_host * host, u16 tarlun)
{
struct scsi_ctrl_blk *tmp, *prev;
u16 scbp_tarlun;
prev = tmp = host->first_busy;
while (tmp != NULL) {
scbp_tarlun = (tmp->lun << 8) | (tmp->target);
if (scbp_tarlun == tarlun) { /* Unlink this SCB */
break;
}
prev = tmp;
tmp = tmp->next;
}
#if DEBUG_QUEUE
printk("find busy SCB %p; ", tmp);
#endif
return tmp;
}
static void initio_append_done_scb(struct initio_host * host, struct scsi_ctrl_blk * scbp)
{
#if DEBUG_QUEUE
printk("append done SCB %p; ", scbp);
#endif
scbp->status = SCB_DONE;
scbp->next = NULL;
if (host->last_done != NULL) {
host->last_done->next = scbp;
host->last_done = scbp;
} else {
host->first_done = scbp;
host->last_done = scbp;
}
}
struct scsi_ctrl_blk *initio_find_done_scb(struct initio_host * host)
{
struct scsi_ctrl_blk *tmp;
if ((tmp = host->first_done) != NULL) {
if ((host->first_done = tmp->next) == NULL)
host->last_done = NULL;
tmp->next = NULL;
}
#if DEBUG_QUEUE
printk("find done SCB %p; ",tmp);
#endif
return tmp;
}
static int initio_abort_srb(struct initio_host * host, struct scsi_cmnd *srbp)
{
unsigned long flags;
struct scsi_ctrl_blk *tmp, *prev;
spin_lock_irqsave(&host->semaph_lock, flags);
if ((host->semaph == 0) && (host->active == NULL)) {
/* disable Jasmin SCSI Int */
outb(0x1F, host->addr + TUL_Mask);
spin_unlock_irqrestore(&host->semaph_lock, flags);
/* FIXME: synchronize_irq needed ? */
tulip_main(host);
spin_lock_irqsave(&host->semaph_lock, flags);
host->semaph = 1;
outb(0x0F, host->addr + TUL_Mask);
spin_unlock_irqrestore(&host->semaph_lock, flags);
return SCSI_ABORT_SNOOZE;
}
prev = tmp = host->first_pending; /* Check Pend queue */
while (tmp != NULL) {
/* 07/27/98 */
if (tmp->srb == srbp) {
if (tmp == host->active) {
spin_unlock_irqrestore(&host->semaph_lock, flags);
return SCSI_ABORT_BUSY;
} else if (tmp == host->first_pending) {
if ((host->first_pending = tmp->next) == NULL)
host->last_pending = NULL;
} else {
prev->next = tmp->next;
if (tmp == host->last_pending)
host->last_pending = prev;
}
tmp->hastat = HOST_ABORTED;
tmp->flags |= SCF_DONE;
if (tmp->flags & SCF_POST)
(*tmp->post) ((u8 *) host, (u8 *) tmp);
spin_unlock_irqrestore(&host->semaph_lock, flags);
return SCSI_ABORT_SUCCESS;
}
prev = tmp;
tmp = tmp->next;
}
prev = tmp = host->first_busy; /* Check Busy queue */
while (tmp != NULL) {
if (tmp->srb == srbp) {
if (tmp == host->active) {
spin_unlock_irqrestore(&host->semaph_lock, flags);
return SCSI_ABORT_BUSY;
} else if (tmp->tagmsg == 0) {
spin_unlock_irqrestore(&host->semaph_lock, flags);
return SCSI_ABORT_BUSY;
} else {
host->act_tags[tmp->target]--;
if (tmp == host->first_busy) {
if ((host->first_busy = tmp->next) == NULL)
host->last_busy = NULL;
} else {
prev->next = tmp->next;
if (tmp == host->last_busy)
host->last_busy = prev;
}
tmp->next = NULL;
tmp->hastat = HOST_ABORTED;
tmp->flags |= SCF_DONE;
if (tmp->flags & SCF_POST)
(*tmp->post) ((u8 *) host, (u8 *) tmp);
spin_unlock_irqrestore(&host->semaph_lock, flags);
return SCSI_ABORT_SUCCESS;
}
}
prev = tmp;
tmp = tmp->next;
}
spin_unlock_irqrestore(&host->semaph_lock, flags);
return SCSI_ABORT_NOT_RUNNING;
}
/***************************************************************************/
static int initio_bad_seq(struct initio_host * host)
{
struct scsi_ctrl_blk *scb;
printk("initio_bad_seg c=%d\n", host->index);
if ((scb = host->active) != NULL) {
initio_unlink_busy_scb(host, scb);
scb->hastat = HOST_BAD_PHAS;
scb->tastat = 0;
initio_append_done_scb(host, scb);
}
initio_stop_bm(host);
initio_reset_scsi(host, 8); /* 7/29/98 */
return initio_post_scsi_rst(host);
}
/************************************************************************/
static void initio_exec_scb(struct initio_host * host, struct scsi_ctrl_blk * scb)
{
unsigned long flags;
scb->mode = 0;
scb->sgidx = 0;
scb->sgmax = scb->sglen;
spin_lock_irqsave(&host->semaph_lock, flags);
initio_append_pend_scb(host, scb); /* Append this SCB to Pending queue */
/* VVVVV 07/21/98 */
if (host->semaph == 1) {
/* Disable Jasmin SCSI Int */
outb(0x1F, host->addr + TUL_Mask);
host->semaph = 0;
spin_unlock_irqrestore(&host->semaph_lock, flags);
tulip_main(host);
spin_lock_irqsave(&host->semaph_lock, flags);
host->semaph = 1;
outb(0x0F, host->addr + TUL_Mask);
}
spin_unlock_irqrestore(&host->semaph_lock, flags);
return;
}
/***************************************************************************/
static int initio_isr(struct initio_host * host)
{
if (inb(host->addr + TUL_Int) & TSS_INT_PENDING) {
if (host->semaph == 1) {
outb(0x1F, host->addr + TUL_Mask);
/* Disable Tulip SCSI Int */
host->semaph = 0;
tulip_main(host);
host->semaph = 1;
outb(0x0F, host->addr + TUL_Mask);
return 1;
}
}
return 0;
}
static int tulip_main(struct initio_host * host)
{
struct scsi_ctrl_blk *scb;
for (;;) {
tulip_scsi(host); /* Call tulip_scsi */
/* Walk the list of completed SCBs */
while ((scb = initio_find_done_scb(host)) != NULL) { /* find done entry */
if (scb->tastat == INI_QUEUE_FULL) {
host->max_tags[scb->target] =
host->act_tags[scb->target] - 1;
scb->tastat = 0;
initio_append_pend_scb(host, scb);
continue;
}
if (!(scb->mode & SCM_RSENS)) { /* not in auto req. sense mode */
if (scb->tastat == 2) {
/* clr sync. nego flag */
if (scb->flags & SCF_SENSE) {
u8 len;
len = scb->senselen;
if (len == 0)
len = 1;
scb->buflen = scb->senselen;
scb->bufptr = scb->senseptr;
scb->flags &= ~(SCF_SG | SCF_DIR); /* for xfer_data_in */
/* so, we won't report wrong direction in xfer_data_in,
and won't report HOST_DO_DU in state_6 */
scb->mode = SCM_RSENS;
scb->ident &= 0xBF; /* Disable Disconnect */
scb->tagmsg = 0;
scb->tastat = 0;
scb->cdblen = 6;
scb->cdb[0] = SCSICMD_RequestSense;
scb->cdb[1] = 0;
scb->cdb[2] = 0;
scb->cdb[3] = 0;
scb->cdb[4] = len;
scb->cdb[5] = 0;
initio_push_pend_scb(host, scb);
break;
}
}
} else { /* in request sense mode */
if (scb->tastat == 2) { /* check contition status again after sending
requset sense cmd 0x3 */
scb->hastat = HOST_BAD_PHAS;
}
scb->tastat = 2;
}
scb->flags |= SCF_DONE;
if (scb->flags & SCF_POST) {
/* FIXME: only one post method and lose casts */
(*scb->post) ((u8 *) host, (u8 *) scb);
}
} /* while */
/* find_active: */
if (inb(host->addr + TUL_SStatus0) & TSS_INT_PENDING)
continue;
if (host->active) /* return to OS and wait for xfer_done_ISR/Selected_ISR */
return 1; /* return to OS, enable interrupt */
/* Check pending SCB */
if (initio_find_first_pend_scb(host) == NULL)
return 1; /* return to OS, enable interrupt */
} /* End of for loop */
/* statement won't reach here */
}
static void tulip_scsi(struct initio_host * host)
{
struct scsi_ctrl_blk *scb;
struct target_control *active_tc;
/* make sure to service interrupt asap */
if ((host->jsstatus0 = inb(host->addr + TUL_SStatus0)) & TSS_INT_PENDING) {
host->phase = host->jsstatus0 & TSS_PH_MASK;
host->jsstatus1 = inb(host->addr + TUL_SStatus1);
host->jsint = inb(host->addr + TUL_SInt);
if (host->jsint & TSS_SCSIRST_INT) { /* SCSI bus reset detected */
int_initio_scsi_rst(host);
return;
}
if (host->jsint & TSS_RESEL_INT) { /* if selected/reselected interrupt */
if (int_initio_resel(host) == 0)
initio_next_state(host);
return;
}
if (host->jsint & TSS_SEL_TIMEOUT) {
int_initio_busfree(host);
return;
}
if (host->jsint & TSS_DISC_INT) { /* BUS disconnection */
int_initio_busfree(host); /* unexpected bus free or sel timeout */
return;
}
if (host->jsint & (TSS_FUNC_COMP | TSS_BUS_SERV)) { /* func complete or Bus service */
if ((scb = host->active) != NULL)
initio_next_state(host);
return;
}
}
if (host->active != NULL)
return;
if ((scb = initio_find_first_pend_scb(host)) == NULL)
return;
/* program HBA's SCSI ID & target SCSI ID */
outb((host->scsi_id << 4) | (scb->target & 0x0F),
host->addr + TUL_SScsiId);
if (scb->opcode == ExecSCSI) {
active_tc = &host->targets[scb->target];
if (scb->tagmsg)
active_tc->drv_flags |= TCF_DRV_EN_TAG;
else
active_tc->drv_flags &= ~TCF_DRV_EN_TAG;
outb(active_tc->js_period, host->addr + TUL_SPeriod);
if ((active_tc->flags & (TCF_WDTR_DONE | TCF_NO_WDTR)) == 0) { /* do wdtr negotiation */
initio_select_atn_stop(host, scb);
} else {
if ((active_tc->flags & (TCF_SYNC_DONE | TCF_NO_SYNC_NEGO)) == 0) { /* do sync negotiation */
initio_select_atn_stop(host, scb);
} else {
if (scb->tagmsg)
initio_select_atn3(host, scb);
else
initio_select_atn(host, scb);
}
}
if (scb->flags & SCF_POLL) {
while (wait_tulip(host) != -1) {
if (initio_next_state(host) == -1)
break;
}
}
} else if (scb->opcode == BusDevRst) {
initio_select_atn_stop(host, scb);
scb->next_state = 8;
if (scb->flags & SCF_POLL) {
while (wait_tulip(host) != -1) {
if (initio_next_state(host) == -1)
break;
}
}
} else if (scb->opcode == AbortCmd) {
if (initio_abort_srb(host, scb->srb) != 0) {
initio_unlink_pend_scb(host, scb);
initio_release_scb(host, scb);
} else {
scb->opcode = BusDevRst;
initio_select_atn_stop(host, scb);
scb->next_state = 8;
}
} else {
initio_unlink_pend_scb(host, scb);
scb->hastat = 0x16; /* bad command */
initio_append_done_scb(host, scb);
}
return;
}
/**
* initio_next_state - Next SCSI state
* @host: InitIO host we are processing
*
* Progress the active command block along the state machine
* until we hit a state which we must wait for activity to occur.
*
* Returns zero or a negative code.
*/
static int initio_next_state(struct initio_host * host)
{
int next;
next = host->active->next_state;
for (;;) {
switch (next) {
case 1:
next = initio_state_1(host);
break;
case 2:
next = initio_state_2(host);
break;
case 3:
next = initio_state_3(host);
break;
case 4:
next = initio_state_4(host);
break;
case 5:
next = initio_state_5(host);
break;
case 6:
next = initio_state_6(host);
break;
case 7:
next = initio_state_7(host);
break;
case 8:
return initio_bus_device_reset(host);
default:
return initio_bad_seq(host);
}
if (next <= 0)
return next;
}
}
/**
* initio_state_1 - SCSI state machine
* @host: InitIO host we are controlling
*
* Perform SCSI state processing for Select/Attention/Stop
*/
static int initio_state_1(struct initio_host * host)
{
struct scsi_ctrl_blk *scb = host->active;
struct target_control *active_tc = host->active_tc;
#if DEBUG_STATE
printk("-s1-");
#endif
/* Move the SCB from pending to busy */
initio_unlink_pend_scb(host, scb);
initio_append_busy_scb(host, scb);
outb(active_tc->sconfig0, host->addr + TUL_SConfig );
/* ATN on */
if (host->phase == MSG_OUT) {
outb(TSC_EN_BUS_IN | TSC_HW_RESELECT, host->addr + TUL_SCtrl1);
outb(scb->ident, host->addr + TUL_SFifo);
if (scb->tagmsg) {
outb(scb->tagmsg, host->addr + TUL_SFifo);
outb(scb->tagid, host->addr + TUL_SFifo);
}
if ((active_tc->flags & (TCF_WDTR_DONE | TCF_NO_WDTR)) == 0) {
active_tc->flags |= TCF_WDTR_DONE;
outb(MSG_EXTEND, host->addr + TUL_SFifo);
outb(2, host->addr + TUL_SFifo); /* Extended msg length */
outb(3, host->addr + TUL_SFifo); /* Sync request */
outb(1, host->addr + TUL_SFifo); /* Start from 16 bits */
} else if ((active_tc->flags & (TCF_SYNC_DONE | TCF_NO_SYNC_NEGO)) == 0) {
active_tc->flags |= TCF_SYNC_DONE;
outb(MSG_EXTEND, host->addr + TUL_SFifo);
outb(3, host->addr + TUL_SFifo); /* extended msg length */
outb(1, host->addr + TUL_SFifo); /* sync request */
outb(initio_rate_tbl[active_tc->flags & TCF_SCSI_RATE], host->addr + TUL_SFifo);
outb(MAX_OFFSET, host->addr + TUL_SFifo); /* REQ/ACK offset */
}
outb(TSC_XF_FIFO_OUT, host->addr + TUL_SCmd);
if (wait_tulip(host) == -1)
return -1;
}
outb(TSC_FLUSH_FIFO, host->addr + TUL_SCtrl0);
outb((inb(host->addr + TUL_SSignal) & (TSC_SET_ACK | 7)), host->addr + TUL_SSignal);
/* Into before CDB xfer */
return 3;
}
/**
* initio_state_2 - SCSI state machine
* @host: InitIO host we are controlling
*
* state after selection with attention
* state after selection with attention3
*/
static int initio_state_2(struct initio_host * host)
{
struct scsi_ctrl_blk *scb = host->active;
struct target_control *active_tc = host->active_tc;
#if DEBUG_STATE
printk("-s2-");
#endif
initio_unlink_pend_scb(host, scb);
initio_append_busy_scb(host, scb);
outb(active_tc->sconfig0, host->addr + TUL_SConfig);
if (host->jsstatus1 & TSS_CMD_PH_CMP)
return 4;
outb(TSC_FLUSH_FIFO, host->addr + TUL_SCtrl0);
outb((inb(host->addr + TUL_SSignal) & (TSC_SET_ACK | 7)), host->addr + TUL_SSignal);
/* Into before CDB xfer */
return 3;
}
/**
* initio_state_3 - SCSI state machine
* @host: InitIO host we are controlling
*
* state before CDB xfer is done
*/
static int initio_state_3(struct initio_host * host)
{
struct scsi_ctrl_blk *scb = host->active;
struct target_control *active_tc = host->active_tc;
int i;
#if DEBUG_STATE
printk("-s3-");
#endif
for (;;) {
switch (host->phase) {
case CMD_OUT: /* Command out phase */
for (i = 0; i < (int) scb->cdblen; i++)
outb(scb->cdb[i], host->addr + TUL_SFifo);
outb(TSC_XF_FIFO_OUT, host->addr + TUL_SCmd);
if (wait_tulip(host) == -1)
return -1;
if (host->phase == CMD_OUT)
return initio_bad_seq(host);
return 4;
case MSG_IN: /* Message in phase */
scb->next_state = 3;
if (initio_msgin(host) == -1)
return -1;
break;
case STATUS_IN: /* Status phase */
if (initio_status_msg(host) == -1)
return -1;
break;
case MSG_OUT: /* Message out phase */
if (active_tc->flags & (TCF_SYNC_DONE | TCF_NO_SYNC_NEGO)) {
outb(MSG_NOP, host->addr + TUL_SFifo); /* msg nop */
outb(TSC_XF_FIFO_OUT, host->addr + TUL_SCmd);
if (wait_tulip(host) == -1)
return -1;
} else {
active_tc->flags |= TCF_SYNC_DONE;
outb(MSG_EXTEND, host->addr + TUL_SFifo);
outb(3, host->addr + TUL_SFifo); /* ext. msg len */
outb(1, host->addr + TUL_SFifo); /* sync request */
outb(initio_rate_tbl[active_tc->flags & TCF_SCSI_RATE], host->addr + TUL_SFifo);
outb(MAX_OFFSET, host->addr + TUL_SFifo); /* REQ/ACK offset */
outb(TSC_XF_FIFO_OUT, host->addr + TUL_SCmd);
if (wait_tulip(host) == -1)
return -1;
outb(TSC_FLUSH_FIFO, host->addr + TUL_SCtrl0);
outb(inb(host->addr + TUL_SSignal) & (TSC_SET_ACK | 7), host->addr + TUL_SSignal);
}
break;
default:
return initio_bad_seq(host);
}
}
}
/**
* initio_state_4 - SCSI state machine
* @host: InitIO host we are controlling
*
* SCSI state machine. State 4
*/
static int initio_state_4(struct initio_host * host)
{
struct scsi_ctrl_blk *scb = host->active;
#if DEBUG_STATE
printk("-s4-");
#endif
if ((scb->flags & SCF_DIR) == SCF_NO_XF) {
return 6; /* Go to state 6 (After data) */
}
for (;;) {
if (scb->buflen == 0)
return 6;
switch (host->phase) {
case STATUS_IN: /* Status phase */
if ((scb->flags & SCF_DIR) != 0) /* if direction bit set then report data underrun */
scb->hastat = HOST_DO_DU;
if ((initio_status_msg(host)) == -1)
return -1;
break;
case MSG_IN: /* Message in phase */
scb->next_state = 0x4;
if (initio_msgin(host) == -1)
return -1;
break;
case MSG_OUT: /* Message out phase */
if (host->jsstatus0 & TSS_PAR_ERROR) {
scb->buflen = 0;
scb->hastat = HOST_DO_DU;
if (initio_msgout_ide(host) == -1)
return -1;
return 6;
} else {
outb(MSG_NOP, host->addr + TUL_SFifo); /* msg nop */
outb(TSC_XF_FIFO_OUT, host->addr + TUL_SCmd);
if (wait_tulip(host) == -1)
return -1;
}
break;
case DATA_IN: /* Data in phase */
return initio_xfer_data_in(host);
case DATA_OUT: /* Data out phase */
return initio_xfer_data_out(host);
default:
return initio_bad_seq(host);
}
}
}
/**
* initio_state_5 - SCSI state machine
* @host: InitIO host we are controlling
*
* State after dma xfer done or phase change before xfer done
*/
static int initio_state_5(struct initio_host * host)
{
struct scsi_ctrl_blk *scb = host->active;
long cnt, xcnt; /* cannot use unsigned !! code: if (xcnt < 0) */
#if DEBUG_STATE
printk("-s5-");
#endif
/*------ get remaining count -------*/
cnt = inl(host->addr + TUL_SCnt0) & 0x0FFFFFF;
if (inb(host->addr + TUL_XCmd) & 0x20) {
/* ----------------------- DATA_IN ----------------------------- */
/* check scsi parity error */
if (host->jsstatus0 & TSS_PAR_ERROR)
scb->hastat = HOST_DO_DU;
if (inb(host->addr + TUL_XStatus) & XPEND) { /* DMA xfer pending, Send STOP */
/* tell Hardware scsi xfer has been terminated */
outb(inb(host->addr + TUL_XCtrl) | 0x80, host->addr + TUL_XCtrl);
/* wait until DMA xfer not pending */
while (inb(host->addr + TUL_XStatus) & XPEND)
cpu_relax();
}
} else {
/*-------- DATA OUT -----------*/
if ((inb(host->addr + TUL_SStatus1) & TSS_XFER_CMP) == 0) {
if (host->active_tc->js_period & TSC_WIDE_SCSI)
cnt += (inb(host->addr + TUL_SFifoCnt) & 0x1F) << 1;
else
cnt += (inb(host->addr + TUL_SFifoCnt) & 0x1F);
}
if (inb(host->addr + TUL_XStatus) & XPEND) { /* if DMA xfer is pending, abort DMA xfer */
outb(TAX_X_ABT, host->addr + TUL_XCmd);
/* wait Abort DMA xfer done */
while ((inb(host->addr + TUL_Int) & XABT) == 0)
cpu_relax();
}
if ((cnt == 1) && (host->phase == DATA_OUT)) {
outb(TSC_XF_FIFO_OUT, host->addr + TUL_SCmd);
if (wait_tulip(host) == -1)
return -1;
cnt = 0;
} else {
if ((inb(host->addr + TUL_SStatus1) & TSS_XFER_CMP) == 0)
outb(TSC_FLUSH_FIFO, host->addr + TUL_SCtrl0);
}
}
if (cnt == 0) {
scb->buflen = 0;
return 6; /* After Data */
}
/* Update active data pointer */
xcnt = (long) scb->buflen - cnt; /* xcnt== bytes already xferred */
scb->buflen = (u32) cnt; /* cnt == bytes left to be xferred */
if (scb->flags & SCF_SG) {
struct sg_entry *sgp;
unsigned long i;
sgp = &scb->sglist[scb->sgidx];
for (i = scb->sgidx; i < scb->sgmax; sgp++, i++) {
xcnt -= (long) sgp->len;
if (xcnt < 0) { /* this sgp xfer half done */
xcnt += (long) sgp->len; /* xcnt == bytes xferred in this sgp */
sgp->data += (u32) xcnt; /* new ptr to be xfer */
sgp->len -= (u32) xcnt; /* new len to be xfer */
scb->bufptr += ((u32) (i - scb->sgidx) << 3);
/* new SG table ptr */
scb->sglen = (u8) (scb->sgmax - i);
/* new SG table len */
scb->sgidx = (u16) i;
/* for next disc and come in this loop */
return 4; /* Go to state 4 */
}
/* else (xcnt >= 0 , i.e. this sgp already xferred */
} /* for */
return 6; /* Go to state 6 */
} else {
scb->bufptr += (u32) xcnt;
}
return 4; /* Go to state 4 */
}
/**
* initio_state_6 - SCSI state machine
* @host: InitIO host we are controlling
*
* State after Data phase
*/
static int initio_state_6(struct initio_host * host)
{
struct scsi_ctrl_blk *scb = host->active;
#if DEBUG_STATE
printk("-s6-");
#endif
for (;;) {
switch (host->phase) {
case STATUS_IN: /* Status phase */
if ((initio_status_msg(host)) == -1)
return -1;
break;
case MSG_IN: /* Message in phase */
scb->next_state = 6;
if ((initio_msgin(host)) == -1)
return -1;
break;
case MSG_OUT: /* Message out phase */
outb(MSG_NOP, host->addr + TUL_SFifo); /* msg nop */
outb(TSC_XF_FIFO_OUT, host->addr + TUL_SCmd);
if (wait_tulip(host) == -1)
return -1;
break;
case DATA_IN: /* Data in phase */
return initio_xpad_in(host);
case DATA_OUT: /* Data out phase */
return initio_xpad_out(host);
default:
return initio_bad_seq(host);
}
}
}
/**
* initio_state_7 - SCSI state machine
* @host: InitIO host we are controlling
*
*/
int initio_state_7(struct initio_host * host)
{
int cnt, i;
#if DEBUG_STATE
printk("-s7-");
#endif
/* flush SCSI FIFO */
cnt = inb(host->addr + TUL_SFifoCnt) & 0x1F;
if (cnt) {
for (i = 0; i < cnt; i++)
inb(host->addr + TUL_SFifo);
}
switch (host->phase) {
case DATA_IN: /* Data in phase */
case DATA_OUT: /* Data out phase */
return initio_bad_seq(host);
default:
return 6; /* Go to state 6 */
}
}
/**
* initio_xfer_data_in - Commence data input
* @host: InitIO host in use
*
* Commence a block of data transfer. The transfer itself will
* be managed by the controller and we will get a completion (or
* failure) interrupt.
*/
static int initio_xfer_data_in(struct initio_host * host)
{
struct scsi_ctrl_blk *scb = host->active;
if ((scb->flags & SCF_DIR) == SCF_DOUT)
return 6; /* wrong direction */
outl(scb->buflen, host->addr + TUL_SCnt0);
outb(TSC_XF_DMA_IN, host->addr + TUL_SCmd); /* 7/25/95 */
if (scb->flags & SCF_SG) { /* S/G xfer */
outl(((u32) scb->sglen) << 3, host->addr + TUL_XCntH);
outl(scb->bufptr, host->addr + TUL_XAddH);
outb(TAX_SG_IN, host->addr + TUL_XCmd);
} else {
outl(scb->buflen, host->addr + TUL_XCntH);
outl(scb->bufptr, host->addr + TUL_XAddH);
outb(TAX_X_IN, host->addr + TUL_XCmd);
}
scb->next_state = 0x5;
return 0; /* return to OS, wait xfer done , let jas_isr come in */
}
/**
* initio_xfer_data_out - Commence data output
* @host: InitIO host in use
*
* Commence a block of data transfer. The transfer itself will
* be managed by the controller and we will get a completion (or
* failure) interrupt.
*/
static int initio_xfer_data_out(struct initio_host * host)
{
struct scsi_ctrl_blk *scb = host->active;
if ((scb->flags & SCF_DIR) == SCF_DIN)
return 6; /* wrong direction */
outl(scb->buflen, host->addr + TUL_SCnt0);
outb(TSC_XF_DMA_OUT, host->addr + TUL_SCmd);
if (scb->flags & SCF_SG) { /* S/G xfer */
outl(((u32) scb->sglen) << 3, host->addr + TUL_XCntH);
outl(scb->bufptr, host->addr + TUL_XAddH);
outb(TAX_SG_OUT, host->addr + TUL_XCmd);
} else {
outl(scb->buflen, host->addr + TUL_XCntH);
outl(scb->bufptr, host->addr + TUL_XAddH);
outb(TAX_X_OUT, host->addr + TUL_XCmd);
}
scb->next_state = 0x5;
return 0; /* return to OS, wait xfer done , let jas_isr come in */
}
int initio_xpad_in(struct initio_host * host)
{
struct scsi_ctrl_blk *scb = host->active;
struct target_control *active_tc = host->active_tc;
if ((scb->flags & SCF_DIR) != SCF_NO_DCHK)
scb->hastat = HOST_DO_DU; /* over run */
for (;;) {
if (active_tc->js_period & TSC_WIDE_SCSI)
outl(2, host->addr + TUL_SCnt0);
else
outl(1, host->addr + TUL_SCnt0);
outb(TSC_XF_FIFO_IN, host->addr + TUL_SCmd);
if (wait_tulip(host) == -1)
return -1;
if (host->phase != DATA_IN) {
outb(TSC_FLUSH_FIFO, host->addr + TUL_SCtrl0);
return 6;
}
inb(host->addr + TUL_SFifo);
}
}
int initio_xpad_out(struct initio_host * host)
{
struct scsi_ctrl_blk *scb = host->active;
struct target_control *active_tc = host->active_tc;
if ((scb->flags & SCF_DIR) != SCF_NO_DCHK)
scb->hastat = HOST_DO_DU; /* over run */
for (;;) {
if (active_tc->js_period & TSC_WIDE_SCSI)
outl(2, host->addr + TUL_SCnt0);
else
outl(1, host->addr + TUL_SCnt0);
outb(0, host->addr + TUL_SFifo);
outb(TSC_XF_FIFO_OUT, host->addr + TUL_SCmd);
if ((wait_tulip(host)) == -1)
return -1;
if (host->phase != DATA_OUT) { /* Disable wide CPU to allow read 16 bits */
outb(TSC_HW_RESELECT, host->addr + TUL_SCtrl1);
outb(TSC_FLUSH_FIFO, host->addr + TUL_SCtrl0);
return 6;
}
}
}
int initio_status_msg(struct initio_host * host)
{ /* status & MSG_IN */
struct scsi_ctrl_blk *scb = host->active;
u8 msg;
outb(TSC_CMD_COMP, host->addr + TUL_SCmd);
if (wait_tulip(host) == -1)
return -1;
/* get status */
scb->tastat = inb(host->addr + TUL_SFifo);
if (host->phase == MSG_OUT) {
if (host->jsstatus0 & TSS_PAR_ERROR)
outb(MSG_PARITY, host->addr + TUL_SFifo);
else
outb(MSG_NOP, host->addr + TUL_SFifo);
outb(TSC_XF_FIFO_OUT, host->addr + TUL_SCmd);
return wait_tulip(host);
}
if (host->phase == MSG_IN) {
msg = inb(host->addr + TUL_SFifo);
if (host->jsstatus0 & TSS_PAR_ERROR) { /* Parity error */
if ((initio_msgin_accept(host)) == -1)
return -1;
if (host->phase != MSG_OUT)
return initio_bad_seq(host);
outb(MSG_PARITY, host->addr + TUL_SFifo);
outb(TSC_XF_FIFO_OUT, host->addr + TUL_SCmd);
return wait_tulip(host);
}
if (msg == 0) { /* Command complete */
if ((scb->tastat & 0x18) == 0x10) /* No link support */
return initio_bad_seq(host);
outb(TSC_FLUSH_FIFO, host->addr + TUL_SCtrl0);
outb(TSC_MSG_ACCEPT, host->addr + TUL_SCmd);
return initio_wait_done_disc(host);
}
if (msg == MSG_LINK_COMP || msg == MSG_LINK_FLAG) {
if ((scb->tastat & 0x18) == 0x10)
return initio_msgin_accept(host);
}
}
return initio_bad_seq(host);
}
/* scsi bus free */
int int_initio_busfree(struct initio_host * host)
{
struct scsi_ctrl_blk *scb = host->active;
if (scb != NULL) {
if (scb->status & SCB_SELECT) { /* selection timeout */
initio_unlink_pend_scb(host, scb);
scb->hastat = HOST_SEL_TOUT;
initio_append_done_scb(host, scb);
} else { /* Unexpected bus free */
initio_unlink_busy_scb(host, scb);
scb->hastat = HOST_BUS_FREE;
initio_append_done_scb(host, scb);
}
host->active = NULL;
host->active_tc = NULL;
}
outb(TSC_FLUSH_FIFO, host->addr + TUL_SCtrl0); /* Flush SCSI FIFO */
outb(TSC_INITDEFAULT, host->addr + TUL_SConfig);
outb(TSC_HW_RESELECT, host->addr + TUL_SCtrl1); /* Enable HW reselect */
return -1;
}
/**
* int_initio_scsi_rst - SCSI reset occurred
* @host: Host seeing the reset
*
* A SCSI bus reset has occurred. Clean up any pending transfer
* the hardware is doing by DMA and then abort all active and
* disconnected commands. The mid layer should sort the rest out
* for us
*/
static int int_initio_scsi_rst(struct initio_host * host)
{
struct scsi_ctrl_blk *scb;
int i;
/* if DMA xfer is pending, abort DMA xfer */
if (inb(host->addr + TUL_XStatus) & 0x01) {
outb(TAX_X_ABT | TAX_X_CLR_FIFO, host->addr + TUL_XCmd);
/* wait Abort DMA xfer done */
while ((inb(host->addr + TUL_Int) & 0x04) == 0)
cpu_relax();
outb(TSC_FLUSH_FIFO, host->addr + TUL_SCtrl0);
}
/* Abort all active & disconnected scb */
while ((scb = initio_pop_busy_scb(host)) != NULL) {
scb->hastat = HOST_BAD_PHAS;
initio_append_done_scb(host, scb);
}
host->active = NULL;
host->active_tc = NULL;
/* clr sync nego. done flag */
for (i = 0; i < host->max_tar; i++)
host->targets[i].flags &= ~(TCF_SYNC_DONE | TCF_WDTR_DONE);
return -1;
}
/**
* int_initio_scsi_resel - Reselection occurred
* @host: InitIO host adapter
*
* A SCSI reselection event has been signalled and the interrupt
* is now being processed. Work out which command block needs attention
* and continue processing that command.
*/
int int_initio_resel(struct initio_host * host)
{
struct scsi_ctrl_blk *scb;
struct target_control *active_tc;
u8 tag, msg = 0;
u8 tar, lun;
if ((scb = host->active) != NULL) {
/* FIXME: Why check and not just clear ? */
if (scb->status & SCB_SELECT) /* if waiting for selection complete */
scb->status &= ~SCB_SELECT;
host->active = NULL;
}
/* --------- get target id---------------------- */
tar = inb(host->addr + TUL_SBusId);
/* ------ get LUN from Identify message----------- */
lun = inb(host->addr + TUL_SIdent) & 0x0F;
/* 07/22/98 from 0x1F -> 0x0F */
active_tc = &host->targets[tar];
host->active_tc = active_tc;
outb(active_tc->sconfig0, host->addr + TUL_SConfig);
outb(active_tc->js_period, host->addr + TUL_SPeriod);
/* ------------- tag queueing ? ------------------- */
if (active_tc->drv_flags & TCF_DRV_EN_TAG) {
if ((initio_msgin_accept(host)) == -1)
return -1;
if (host->phase != MSG_IN)
goto no_tag;
outl(1, host->addr + TUL_SCnt0);
outb(TSC_XF_FIFO_IN, host->addr + TUL_SCmd);
if (wait_tulip(host) == -1)
return -1;
msg = inb(host->addr + TUL_SFifo); /* Read Tag Message */
if (msg < MSG_STAG || msg > MSG_OTAG) /* Is simple Tag */
goto no_tag;
if (initio_msgin_accept(host) == -1)
return -1;
if (host->phase != MSG_IN)
goto no_tag;
outl(1, host->addr + TUL_SCnt0);
outb(TSC_XF_FIFO_IN, host->addr + TUL_SCmd);
if (wait_tulip(host) == -1)
return -1;
tag = inb(host->addr + TUL_SFifo); /* Read Tag ID */
scb = host->scb + tag;
if (scb->target != tar || scb->lun != lun) {
return initio_msgout_abort_tag(host);
}
if (scb->status != SCB_BUSY) { /* 03/24/95 */
return initio_msgout_abort_tag(host);
}
host->active = scb;
if ((initio_msgin_accept(host)) == -1)
return -1;
} else { /* No tag */
no_tag:
if ((scb = initio_find_busy_scb(host, tar | (lun << 8))) == NULL) {
return initio_msgout_abort_targ(host);
}
host->active = scb;
if (!(active_tc->drv_flags & TCF_DRV_EN_TAG)) {
if ((initio_msgin_accept(host)) == -1)
return -1;
}
}
return 0;
}
/**
* int_initio_bad_seq - out of phase
* @host: InitIO host flagging event
*
* We have ended up out of phase somehow. Reset the host controller
* and throw all our toys out of the pram. Let the midlayer clean up
*/
static int int_initio_bad_seq(struct initio_host * host)
{ /* target wrong phase */
struct scsi_ctrl_blk *scb;
int i;
initio_reset_scsi(host, 10);
while ((scb = initio_pop_busy_scb(host)) != NULL) {
scb->hastat = HOST_BAD_PHAS;
initio_append_done_scb(host, scb);
}
for (i = 0; i < host->max_tar; i++)
host->targets[i].flags &= ~(TCF_SYNC_DONE | TCF_WDTR_DONE);
return -1;
}
/**
* initio_msgout_abort_targ - abort a tag
* @host: InitIO host
*
* Abort when the target/lun does not match or when our SCB is not
* busy. Used by untagged commands.
*/
static int initio_msgout_abort_targ(struct initio_host * host)
{
outb(((inb(host->addr + TUL_SSignal) & (TSC_SET_ACK | 7)) | TSC_SET_ATN), host->addr + TUL_SSignal);
if (initio_msgin_accept(host) == -1)
return -1;
if (host->phase != MSG_OUT)
return initio_bad_seq(host);
outb(MSG_ABORT, host->addr + TUL_SFifo);
outb(TSC_XF_FIFO_OUT, host->addr + TUL_SCmd);
return initio_wait_disc(host);
}
/**
* initio_msgout_abort_tag - abort a tag
* @host: InitIO host
*
* Abort when the target/lun does not match or when our SCB is not
* busy. Used for tagged commands.
*/
static int initio_msgout_abort_tag(struct initio_host * host)
{
outb(((inb(host->addr + TUL_SSignal) & (TSC_SET_ACK | 7)) | TSC_SET_ATN), host->addr + TUL_SSignal);
if (initio_msgin_accept(host) == -1)
return -1;
if (host->phase != MSG_OUT)
return initio_bad_seq(host);
outb(MSG_ABORT_TAG, host->addr + TUL_SFifo);
outb(TSC_XF_FIFO_OUT, host->addr + TUL_SCmd);
return initio_wait_disc(host);
}
/**
* initio_msgin - Message in
* @host: InitIO Host
*
* Process incoming message
*/
static int initio_msgin(struct initio_host * host)
{
struct target_control *active_tc;
for (;;) {
outb(TSC_FLUSH_FIFO, host->addr + TUL_SCtrl0);
outl(1, host->addr + TUL_SCnt0);
outb(TSC_XF_FIFO_IN, host->addr + TUL_SCmd);
if (wait_tulip(host) == -1)
return -1;
switch (inb(host->addr + TUL_SFifo)) {
case MSG_DISC: /* Disconnect msg */
outb(TSC_MSG_ACCEPT, host->addr + TUL_SCmd);
return initio_wait_disc(host);
case MSG_SDP:
case MSG_RESTORE:
case MSG_NOP:
initio_msgin_accept(host);
break;
case MSG_REJ: /* Clear ATN first */
outb((inb(host->addr + TUL_SSignal) & (TSC_SET_ACK | 7)),
host->addr + TUL_SSignal);
active_tc = host->active_tc;
if ((active_tc->flags & (TCF_SYNC_DONE | TCF_NO_SYNC_NEGO)) == 0) /* do sync nego */
outb(((inb(host->addr + TUL_SSignal) & (TSC_SET_ACK | 7)) | TSC_SET_ATN),
host->addr + TUL_SSignal);
initio_msgin_accept(host);
break;
case MSG_EXTEND: /* extended msg */
initio_msgin_extend(host);
break;
case MSG_IGNOREWIDE:
initio_msgin_accept(host);
break;
case MSG_COMP:
outb(TSC_FLUSH_FIFO, host->addr + TUL_SCtrl0);
outb(TSC_MSG_ACCEPT, host->addr + TUL_SCmd);
return initio_wait_done_disc(host);
default:
initio_msgout_reject(host);
break;
}
if (host->phase != MSG_IN)
return host->phase;
}
/* statement won't reach here */
}
static int initio_msgout_reject(struct initio_host * host)
{
outb(((inb(host->addr + TUL_SSignal) & (TSC_SET_ACK | 7)) | TSC_SET_ATN), host->addr + TUL_SSignal);
if (initio_msgin_accept(host) == -1)
return -1;
if (host->phase == MSG_OUT) {
outb(MSG_REJ, host->addr + TUL_SFifo); /* Msg reject */
outb(TSC_XF_FIFO_OUT, host->addr + TUL_SCmd);
return wait_tulip(host);
}
return host->phase;
}
static int initio_msgout_ide(struct initio_host * host)
{
outb(MSG_IDE, host->addr + TUL_SFifo); /* Initiator Detected Error */
outb(TSC_XF_FIFO_OUT, host->addr + TUL_SCmd);
return wait_tulip(host);
}
static int initio_msgin_extend(struct initio_host * host)
{
u8 len, idx;
if (initio_msgin_accept(host) != MSG_IN)
return host->phase;
/* Get extended msg length */
outl(1, host->addr + TUL_SCnt0);
outb(TSC_XF_FIFO_IN, host->addr + TUL_SCmd);
if (wait_tulip(host) == -1)
return -1;
len = inb(host->addr + TUL_SFifo);
host->msg[0] = len;
for (idx = 1; len != 0; len--) {
if ((initio_msgin_accept(host)) != MSG_IN)
return host->phase;
outl(1, host->addr + TUL_SCnt0);
outb(TSC_XF_FIFO_IN, host->addr + TUL_SCmd);
if (wait_tulip(host) == -1)
return -1;
host->msg[idx++] = inb(host->addr + TUL_SFifo);
}
if (host->msg[1] == 1) { /* if it's synchronous data transfer request */
u8 r;
if (host->msg[0] != 3) /* if length is not right */
return initio_msgout_reject(host);
if (host->active_tc->flags & TCF_NO_SYNC_NEGO) { /* Set OFFSET=0 to do async, nego back */
host->msg[3] = 0;
} else {
if (initio_msgin_sync(host) == 0 &&
(host->active_tc->flags & TCF_SYNC_DONE)) {
initio_sync_done(host);
return initio_msgin_accept(host);
}
}
r = inb(host->addr + TUL_SSignal);
outb((r & (TSC_SET_ACK | 7)) | TSC_SET_ATN,
host->addr + TUL_SSignal);
if (initio_msgin_accept(host) != MSG_OUT)
return host->phase;
/* sync msg out */
outb(TSC_FLUSH_FIFO, host->addr + TUL_SCtrl0);
initio_sync_done(host);
outb(MSG_EXTEND, host->addr + TUL_SFifo);
outb(3, host->addr + TUL_SFifo);
outb(1, host->addr + TUL_SFifo);
outb(host->msg[2], host->addr + TUL_SFifo);
outb(host->msg[3], host->addr + TUL_SFifo);
outb(TSC_XF_FIFO_OUT, host->addr + TUL_SCmd);
return wait_tulip(host);
}
if (host->msg[0] != 2 || host->msg[1] != 3)
return initio_msgout_reject(host);
/* if it's WIDE DATA XFER REQ */
if (host->active_tc->flags & TCF_NO_WDTR) {
host->msg[2] = 0;
} else {
if (host->msg[2] > 2) /* > 32 bits */
return initio_msgout_reject(host);
if (host->msg[2] == 2) { /* == 32 */
host->msg[2] = 1;
} else {
if ((host->active_tc->flags & TCF_NO_WDTR) == 0) {
wdtr_done(host);
if ((host->active_tc->flags & (TCF_SYNC_DONE | TCF_NO_SYNC_NEGO)) == 0)
outb(((inb(host->addr + TUL_SSignal) & (TSC_SET_ACK | 7)) | TSC_SET_ATN), host->addr + TUL_SSignal);
return initio_msgin_accept(host);
}
}
}
outb(((inb(host->addr + TUL_SSignal) & (TSC_SET_ACK | 7)) | TSC_SET_ATN), host->addr + TUL_SSignal);
if (initio_msgin_accept(host) != MSG_OUT)
return host->phase;
/* WDTR msg out */
outb(MSG_EXTEND, host->addr + TUL_SFifo);
outb(2, host->addr + TUL_SFifo);
outb(3, host->addr + TUL_SFifo);
outb(host->msg[2], host->addr + TUL_SFifo);
outb(TSC_XF_FIFO_OUT, host->addr + TUL_SCmd);
return wait_tulip(host);
}
static int initio_msgin_sync(struct initio_host * host)
{
char default_period;
default_period = initio_rate_tbl[host->active_tc->flags & TCF_SCSI_RATE];
if (host->msg[3] > MAX_OFFSET) {
host->msg[3] = MAX_OFFSET;
if (host->msg[2] < default_period) {
host->msg[2] = default_period;
return 1;
}
if (host->msg[2] >= 59) /* Change to async */
host->msg[3] = 0;
return 1;
}
/* offset requests asynchronous transfers ? */
if (host->msg[3] == 0) {
return 0;
}
if (host->msg[2] < default_period) {
host->msg[2] = default_period;
return 1;
}
if (host->msg[2] >= 59) {
host->msg[3] = 0;
return 1;
}
return 0;
}
static int wdtr_done(struct initio_host * host)
{
host->active_tc->flags &= ~TCF_SYNC_DONE;
host->active_tc->flags |= TCF_WDTR_DONE;
host->active_tc->js_period = 0;
if (host->msg[2]) /* if 16 bit */
host->active_tc->js_period |= TSC_WIDE_SCSI;
host->active_tc->sconfig0 &= ~TSC_ALT_PERIOD;
outb(host->active_tc->sconfig0, host->addr + TUL_SConfig);
outb(host->active_tc->js_period, host->addr + TUL_SPeriod);
return 1;
}
static int initio_sync_done(struct initio_host * host)
{
int i;
host->active_tc->flags |= TCF_SYNC_DONE;
if (host->msg[3]) {
host->active_tc->js_period |= host->msg[3];
for (i = 0; i < 8; i++) {
if (initio_rate_tbl[i] >= host->msg[2]) /* pick the big one */
break;
}
host->active_tc->js_period |= (i << 4);
host->active_tc->sconfig0 |= TSC_ALT_PERIOD;
}
outb(host->active_tc->sconfig0, host->addr + TUL_SConfig);
outb(host->active_tc->js_period, host->addr + TUL_SPeriod);
return -1;
}
static int initio_post_scsi_rst(struct initio_host * host)
{
struct scsi_ctrl_blk *scb;
struct target_control *active_tc;
int i;
host->active = NULL;
host->active_tc = NULL;
host->flags = 0;
while ((scb = initio_pop_busy_scb(host)) != NULL) {
scb->hastat = HOST_BAD_PHAS;
initio_append_done_scb(host, scb);
}
/* clear sync done flag */
active_tc = &host->targets[0];
for (i = 0; i < host->max_tar; active_tc++, i++) {
active_tc->flags &= ~(TCF_SYNC_DONE | TCF_WDTR_DONE);
/* Initialize the sync. xfer register values to an asyn xfer */
active_tc->js_period = 0;
active_tc->sconfig0 = host->sconf1;
host->act_tags[0] = 0; /* 07/22/98 */
host->targets[i].flags &= ~TCF_BUSY; /* 07/22/98 */
} /* for */
return -1;
}
static void initio_select_atn_stop(struct initio_host * host, struct scsi_ctrl_blk * scb)
{
scb->status |= SCB_SELECT;
scb->next_state = 0x1;
host->active = scb;
host->active_tc = &host->targets[scb->target];
outb(TSC_SELATNSTOP, host->addr + TUL_SCmd);
}
static void initio_select_atn(struct initio_host * host, struct scsi_ctrl_blk * scb)
{
int i;
scb->status |= SCB_SELECT;
scb->next_state = 0x2;
outb(scb->ident, host->addr + TUL_SFifo);
for (i = 0; i < (int) scb->cdblen; i++)
outb(scb->cdb[i], host->addr + TUL_SFifo);
host->active_tc = &host->targets[scb->target];
host->active = scb;
outb(TSC_SEL_ATN, host->addr + TUL_SCmd);
}
static void initio_select_atn3(struct initio_host * host, struct scsi_ctrl_blk * scb)
{
int i;
scb->status |= SCB_SELECT;
scb->next_state = 0x2;
outb(scb->ident, host->addr + TUL_SFifo);
outb(scb->tagmsg, host->addr + TUL_SFifo);
outb(scb->tagid, host->addr + TUL_SFifo);
for (i = 0; i < scb->cdblen; i++)
outb(scb->cdb[i], host->addr + TUL_SFifo);
host->active_tc = &host->targets[scb->target];
host->active = scb;
outb(TSC_SEL_ATN3, host->addr + TUL_SCmd);
}
/**
* initio_bus_device_reset - SCSI Bus Device Reset
* @host: InitIO host to reset
*
* Perform a device reset and abort all pending SCBs for the
* victim device
*/
int initio_bus_device_reset(struct initio_host * host)
{
struct scsi_ctrl_blk *scb = host->active;
struct target_control *active_tc = host->active_tc;
struct scsi_ctrl_blk *tmp, *prev;
u8 tar;
if (host->phase != MSG_OUT)
return int_initio_bad_seq(host); /* Unexpected phase */
initio_unlink_pend_scb(host, scb);
initio_release_scb(host, scb);
tar = scb->target; /* target */
active_tc->flags &= ~(TCF_SYNC_DONE | TCF_WDTR_DONE | TCF_BUSY);
/* clr sync. nego & WDTR flags 07/22/98 */
/* abort all SCB with same target */
prev = tmp = host->first_busy; /* Check Busy queue */
while (tmp != NULL) {
if (tmp->target == tar) {
/* unlink it */
if (tmp == host->first_busy) {
if ((host->first_busy = tmp->next) == NULL)
host->last_busy = NULL;
} else {
prev->next = tmp->next;
if (tmp == host->last_busy)
host->last_busy = prev;
}
tmp->hastat = HOST_ABORTED;
initio_append_done_scb(host, tmp);
}
/* Previous haven't change */
else {
prev = tmp;
}
tmp = tmp->next;
}
outb(MSG_DEVRST, host->addr + TUL_SFifo);
outb(TSC_XF_FIFO_OUT, host->addr + TUL_SCmd);
return initio_wait_disc(host);
}
static int initio_msgin_accept(struct initio_host * host)
{
outb(TSC_MSG_ACCEPT, host->addr + TUL_SCmd);
return wait_tulip(host);
}
static int wait_tulip(struct initio_host * host)
{
while (!((host->jsstatus0 = inb(host->addr + TUL_SStatus0))
& TSS_INT_PENDING))
cpu_relax();
host->jsint = inb(host->addr + TUL_SInt);
host->phase = host->jsstatus0 & TSS_PH_MASK;
host->jsstatus1 = inb(host->addr + TUL_SStatus1);
if (host->jsint & TSS_RESEL_INT) /* if SCSI bus reset detected */
return int_initio_resel(host);
if (host->jsint & TSS_SEL_TIMEOUT) /* if selected/reselected timeout interrupt */
return int_initio_busfree(host);
if (host->jsint & TSS_SCSIRST_INT) /* if SCSI bus reset detected */
return int_initio_scsi_rst(host);
if (host->jsint & TSS_DISC_INT) { /* BUS disconnection */
if (host->flags & HCF_EXPECT_DONE_DISC) {
outb(TSC_FLUSH_FIFO, host->addr + TUL_SCtrl0); /* Flush SCSI FIFO */
initio_unlink_busy_scb(host, host->active);
host->active->hastat = 0;
initio_append_done_scb(host, host->active);
host->active = NULL;
host->active_tc = NULL;
host->flags &= ~HCF_EXPECT_DONE_DISC;
outb(TSC_INITDEFAULT, host->addr + TUL_SConfig);
outb(TSC_HW_RESELECT, host->addr + TUL_SCtrl1); /* Enable HW reselect */
return -1;
}
if (host->flags & HCF_EXPECT_DISC) {
outb(TSC_FLUSH_FIFO, host->addr + TUL_SCtrl0); /* Flush SCSI FIFO */
host->active = NULL;
host->active_tc = NULL;
host->flags &= ~HCF_EXPECT_DISC;
outb(TSC_INITDEFAULT, host->addr + TUL_SConfig);
outb(TSC_HW_RESELECT, host->addr + TUL_SCtrl1); /* Enable HW reselect */
return -1;
}
return int_initio_busfree(host);
}
/* The old code really does the below. Can probably be removed */
if (host->jsint & (TSS_FUNC_COMP | TSS_BUS_SERV))
return host->phase;
return host->phase;
}
static int initio_wait_disc(struct initio_host * host)
{
while (!((host->jsstatus0 = inb(host->addr + TUL_SStatus0)) & TSS_INT_PENDING))
cpu_relax();
host->jsint = inb(host->addr + TUL_SInt);
if (host->jsint & TSS_SCSIRST_INT) /* if SCSI bus reset detected */
return int_initio_scsi_rst(host);
if (host->jsint & TSS_DISC_INT) { /* BUS disconnection */
outb(TSC_FLUSH_FIFO, host->addr + TUL_SCtrl0); /* Flush SCSI FIFO */
outb(TSC_INITDEFAULT, host->addr + TUL_SConfig);
outb(TSC_HW_RESELECT, host->addr + TUL_SCtrl1); /* Enable HW reselect */
host->active = NULL;
return -1;
}
return initio_bad_seq(host);
}
static int initio_wait_done_disc(struct initio_host * host)
{
while (!((host->jsstatus0 = inb(host->addr + TUL_SStatus0))
& TSS_INT_PENDING))
cpu_relax();
host->jsint = inb(host->addr + TUL_SInt);
if (host->jsint & TSS_SCSIRST_INT) /* if SCSI bus reset detected */
return int_initio_scsi_rst(host);
if (host->jsint & TSS_DISC_INT) { /* BUS disconnection */
outb(TSC_FLUSH_FIFO, host->addr + TUL_SCtrl0); /* Flush SCSI FIFO */
outb(TSC_INITDEFAULT, host->addr + TUL_SConfig);
outb(TSC_HW_RESELECT, host->addr + TUL_SCtrl1); /* Enable HW reselect */
initio_unlink_busy_scb(host, host->active);
initio_append_done_scb(host, host->active);
host->active = NULL;
return -1;
}
return initio_bad_seq(host);
}
/**
* i91u_intr - IRQ handler
* @irqno: IRQ number
* @dev_id: IRQ identifier
*
* Take the relevant locks and then invoke the actual isr processing
* code under the lock.
*/
static irqreturn_t i91u_intr(int irqno, void *dev_id)
{
struct Scsi_Host *dev = dev_id;
unsigned long flags;
int r;
spin_lock_irqsave(dev->host_lock, flags);
r = initio_isr((struct initio_host *)dev->hostdata);
spin_unlock_irqrestore(dev->host_lock, flags);
if (r)
return IRQ_HANDLED;
else
return IRQ_NONE;
}
/**
* initio_build_scb - Build the mappings and SCB
* @host: InitIO host taking the command
* @cblk: Firmware command block
* @cmnd: SCSI midlayer command block
*
* Translate the abstract SCSI command into a firmware command block
* suitable for feeding to the InitIO host controller. This also requires
* we build the scatter gather lists and ensure they are mapped properly.
*/
static void initio_build_scb(struct initio_host * host, struct scsi_ctrl_blk * cblk, struct scsi_cmnd * cmnd)
{ /* Create corresponding SCB */
struct scatterlist *sglist;
struct sg_entry *sg; /* Pointer to SG list */
int i, nseg;
long total_len;
dma_addr_t dma_addr;
/* Fill in the command headers */
cblk->post = i91uSCBPost; /* i91u's callback routine */
cblk->srb = cmnd;
cblk->opcode = ExecSCSI;
cblk->flags = SCF_POST; /* After SCSI done, call post routine */
cblk->target = cmnd->device->id;
cblk->lun = cmnd->device->lun;
cblk->ident = cmnd->device->lun | DISC_ALLOW;
cblk->flags |= SCF_SENSE; /* Turn on auto request sense */
/* Map the sense buffer into bus memory */
dma_addr = dma_map_single(&host->pci_dev->dev, cmnd->sense_buffer,
SENSE_SIZE, DMA_FROM_DEVICE);
cblk->senseptr = (u32)dma_addr;
cblk->senselen = SENSE_SIZE;
cmnd->SCp.ptr = (char *)(unsigned long)dma_addr;
cblk->cdblen = cmnd->cmd_len;
/* Clear the returned status */
cblk->hastat = 0;
cblk->tastat = 0;
/* Command the command */
memcpy(cblk->cdb, cmnd->cmnd, cmnd->cmd_len);
/* Set up tags */
if (cmnd->device->tagged_supported) { /* Tag Support */
cblk->tagmsg = SIMPLE_QUEUE_TAG; /* Do simple tag only */
} else {
cblk->tagmsg = 0; /* No tag support */
}
/* todo handle map_sg error */
nseg = scsi_dma_map(cmnd);
BUG_ON(nseg < 0);
if (nseg) {
dma_addr = dma_map_single(&host->pci_dev->dev, &cblk->sglist[0],
sizeof(struct sg_entry) * TOTAL_SG_ENTRY,
DMA_BIDIRECTIONAL);
cblk->bufptr = (u32)dma_addr;
cmnd->SCp.dma_handle = dma_addr;
cblk->sglen = nseg;
cblk->flags |= SCF_SG; /* Turn on SG list flag */
total_len = 0;
sg = &cblk->sglist[0];
scsi_for_each_sg(cmnd, sglist, cblk->sglen, i) {
sg->data = cpu_to_le32((u32)sg_dma_address(sglist));
sg->len = cpu_to_le32((u32)sg_dma_len(sglist));
total_len += sg_dma_len(sglist);
++sg;
}
cblk->buflen = (scsi_bufflen(cmnd) > total_len) ?
total_len : scsi_bufflen(cmnd);
} else { /* No data transfer required */
cblk->buflen = 0;
cblk->sglen = 0;
}
}
/**
* i91u_queuecommand - Queue a new command if possible
* @cmd: SCSI command block from the mid layer
* @done: Completion handler
*
* Attempts to queue a new command with the host adapter. Will return
* zero if successful or indicate a host busy condition if not (which
* will cause the mid layer to call us again later with the command)
*/
static int i91u_queuecommand_lck(struct scsi_cmnd *cmd,
void (*done)(struct scsi_cmnd *))
{
struct initio_host *host = (struct initio_host *) cmd->device->host->hostdata;
struct scsi_ctrl_blk *cmnd;
cmd->scsi_done = done;
cmnd = initio_alloc_scb(host);
if (!cmnd)
return SCSI_MLQUEUE_HOST_BUSY;
initio_build_scb(host, cmnd, cmd);
initio_exec_scb(host, cmnd);
return 0;
}
static DEF_SCSI_QCMD(i91u_queuecommand)
/**
* i91u_bus_reset - reset the SCSI bus
* @cmnd: Command block we want to trigger the reset for
*
* Initiate a SCSI bus reset sequence
*/
static int i91u_bus_reset(struct scsi_cmnd * cmnd)
{
struct initio_host *host;
host = (struct initio_host *) cmnd->device->host->hostdata;
spin_lock_irq(cmnd->device->host->host_lock);
initio_reset_scsi(host, 0);
spin_unlock_irq(cmnd->device->host->host_lock);
return SUCCESS;
}
/**
* i91u_biospararm - return the "logical geometry
* @sdev: SCSI device
* @dev; Matching block device
* @capacity: Sector size of drive
* @info_array: Return space for BIOS geometry
*
* Map the device geometry in a manner compatible with the host
* controller BIOS behaviour.
*
* FIXME: limited to 2^32 sector devices.
*/
static int i91u_biosparam(struct scsi_device *sdev, struct block_device *dev,
sector_t capacity, int *info_array)
{
struct initio_host *host; /* Point to Host adapter control block */
struct target_control *tc;
host = (struct initio_host *) sdev->host->hostdata;
tc = &host->targets[sdev->id];
if (tc->heads) {
info_array[0] = tc->heads;
info_array[1] = tc->sectors;
info_array[2] = (unsigned long)capacity / tc->heads / tc->sectors;
} else {
if (tc->drv_flags & TCF_DRV_255_63) {
info_array[0] = 255;
info_array[1] = 63;
info_array[2] = (unsigned long)capacity / 255 / 63;
} else {
info_array[0] = 64;
info_array[1] = 32;
info_array[2] = (unsigned long)capacity >> 11;
}
}
#if defined(DEBUG_BIOSPARAM)
if (i91u_debug & debug_biosparam) {
printk("bios geometry: head=%d, sec=%d, cyl=%d\n",
info_array[0], info_array[1], info_array[2]);
printk("WARNING: check, if the bios geometry is correct.\n");
}
#endif
return 0;
}
/**
* i91u_unmap_scb - Unmap a command
* @pci_dev: PCI device the command is for
* @cmnd: The command itself
*
* Unmap any PCI mapping/IOMMU resources allocated when the command
* was mapped originally as part of initio_build_scb
*/
static void i91u_unmap_scb(struct pci_dev *pci_dev, struct scsi_cmnd *cmnd)
{
/* auto sense buffer */
if (cmnd->SCp.ptr) {
dma_unmap_single(&pci_dev->dev,
(dma_addr_t)((unsigned long)cmnd->SCp.ptr),
SENSE_SIZE, DMA_FROM_DEVICE);
cmnd->SCp.ptr = NULL;
}
/* request buffer */
if (scsi_sg_count(cmnd)) {
dma_unmap_single(&pci_dev->dev, cmnd->SCp.dma_handle,
sizeof(struct sg_entry) * TOTAL_SG_ENTRY,
DMA_BIDIRECTIONAL);
scsi_dma_unmap(cmnd);
}
}
/**
* i91uSCBPost - SCSI callback
* @host: Pointer to host adapter control block.
* @cmnd: Pointer to SCSI control block.
*
* This is callback routine be called when tulip finish one
* SCSI command.
*/
static void i91uSCBPost(u8 * host_mem, u8 * cblk_mem)
{
struct scsi_cmnd *cmnd; /* Pointer to SCSI request block */
struct initio_host *host;
struct scsi_ctrl_blk *cblk;
host = (struct initio_host *) host_mem;
cblk = (struct scsi_ctrl_blk *) cblk_mem;
if ((cmnd = cblk->srb) == NULL) {
printk(KERN_ERR "i91uSCBPost: SRB pointer is empty\n");
WARN_ON(1);
initio_release_scb(host, cblk); /* Release SCB for current channel */
return;
}
/*
* Remap the firmware error status into a mid layer one
*/
switch (cblk->hastat) {
case 0x0:
case 0xa: /* Linked command complete without error and linked normally */
case 0xb: /* Linked command complete without error interrupt generated */
cblk->hastat = 0;
break;
case 0x11: /* Selection time out-The initiator selection or target
reselection was not complete within the SCSI Time out period */
cblk->hastat = DID_TIME_OUT;
break;
case 0x14: /* Target bus phase sequence failure-An invalid bus phase or bus
phase sequence was requested by the target. The host adapter
will generate a SCSI Reset Condition, notifying the host with
a SCRD interrupt */
cblk->hastat = DID_RESET;
break;
case 0x1a: /* SCB Aborted. 07/21/98 */
cblk->hastat = DID_ABORT;
break;
case 0x12: /* Data overrun/underrun-The target attempted to transfer more data
than was allocated by the Data Length field or the sum of the
Scatter / Gather Data Length fields. */
case 0x13: /* Unexpected bus free-The target dropped the SCSI BSY at an unexpected time. */
case 0x16: /* Invalid SCB Operation Code. */
default:
printk("ini9100u: %x %x\n", cblk->hastat, cblk->tastat);
cblk->hastat = DID_ERROR; /* Couldn't find any better */
break;
}
cmnd->result = cblk->tastat | (cblk->hastat << 16);
i91u_unmap_scb(host->pci_dev, cmnd);
cmnd->scsi_done(cmnd); /* Notify system DONE */
initio_release_scb(host, cblk); /* Release SCB for current channel */
}
static struct scsi_host_template initio_template = {
.proc_name = "INI9100U",
.name = "Initio INI-9X00U/UW SCSI device driver",
.queuecommand = i91u_queuecommand,
.eh_bus_reset_handler = i91u_bus_reset,
.bios_param = i91u_biosparam,
.can_queue = MAX_TARGETS * i91u_MAXQUEUE,
.this_id = 1,
.sg_tablesize = SG_ALL,
.cmd_per_lun = 1,
.use_clustering = ENABLE_CLUSTERING,
};
static int initio_probe_one(struct pci_dev *pdev,
const struct pci_device_id *id)
{
struct Scsi_Host *shost;
struct initio_host *host;
u32 reg;
u16 bios_seg;
struct scsi_ctrl_blk *scb, *tmp, *prev = NULL /* silence gcc */;
int num_scb, i, error;
error = pci_enable_device(pdev);
if (error)
return error;
pci_read_config_dword(pdev, 0x44, (u32 *) & reg);
bios_seg = (u16) (reg & 0xFF);
if (((reg & 0xFF00) >> 8) == 0xFF)
reg = 0;
bios_seg = (bios_seg << 8) + ((u16) ((reg & 0xFF00) >> 8));
if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
printk(KERN_WARNING "i91u: Could not set 32 bit DMA mask\n");
error = -ENODEV;
goto out_disable_device;
}
shost = scsi_host_alloc(&initio_template, sizeof(struct initio_host));
if (!shost) {
printk(KERN_WARNING "initio: Could not allocate host structure.\n");
error = -ENOMEM;
goto out_disable_device;
}
host = (struct initio_host *)shost->hostdata;
memset(host, 0, sizeof(struct initio_host));
host->addr = pci_resource_start(pdev, 0);
host->bios_addr = bios_seg;
if (!request_region(host->addr, 256, "i91u")) {
printk(KERN_WARNING "initio: I/O port range 0x%x is busy.\n", host->addr);
error = -ENODEV;
goto out_host_put;
}
if (initio_tag_enable) /* 1.01i */
num_scb = MAX_TARGETS * i91u_MAXQUEUE;
else
num_scb = MAX_TARGETS + 3; /* 1-tape, 1-CD_ROM, 1- extra */
for (; num_scb >= MAX_TARGETS + 3; num_scb--) {
i = num_scb * sizeof(struct scsi_ctrl_blk);
if ((scb = kzalloc(i, GFP_DMA)) != NULL)
break;
}
if (!scb) {
printk(KERN_WARNING "initio: Cannot allocate SCB array.\n");
error = -ENOMEM;
goto out_release_region;
}
host->pci_dev = pdev;
host->semaph = 1;
spin_lock_init(&host->semaph_lock);
host->num_scbs = num_scb;
host->scb = scb;
host->next_pending = scb;
host->next_avail = scb;
for (i = 0, tmp = scb; i < num_scb; i++, tmp++) {
tmp->tagid = i;
if (i != 0)
prev->next = tmp;
prev = tmp;
}
prev->next = NULL;
host->scb_end = tmp;
host->first_avail = scb;
host->last_avail = prev;
spin_lock_init(&host->avail_lock);
initio_init(host, phys_to_virt(((u32)bios_seg << 4)));
host->jsstatus0 = 0;
shost->io_port = host->addr;
shost->n_io_port = 0xff;
shost->can_queue = num_scb; /* 03/05/98 */
shost->unique_id = host->addr;
shost->max_id = host->max_tar;
shost->max_lun = 32; /* 10/21/97 */
shost->irq = pdev->irq;
shost->this_id = host->scsi_id; /* Assign HCS index */
shost->base = host->addr;
shost->sg_tablesize = TOTAL_SG_ENTRY;
error = request_irq(pdev->irq, i91u_intr, IRQF_DISABLED|IRQF_SHARED, "i91u", shost);
if (error < 0) {
printk(KERN_WARNING "initio: Unable to request IRQ %d\n", pdev->irq);
goto out_free_scbs;
}
pci_set_drvdata(pdev, shost);
error = scsi_add_host(shost, &pdev->dev);
if (error)
goto out_free_irq;
scsi_scan_host(shost);
return 0;
out_free_irq:
free_irq(pdev->irq, shost);
out_free_scbs:
kfree(host->scb);
out_release_region:
release_region(host->addr, 256);
out_host_put:
scsi_host_put(shost);
out_disable_device:
pci_disable_device(pdev);
return error;
}
/**
* initio_remove_one - control shutdown
* @pdev: PCI device being released
*
* Release the resources assigned to this adapter after it has
* finished being used.
*/
static void initio_remove_one(struct pci_dev *pdev)
{
struct Scsi_Host *host = pci_get_drvdata(pdev);
struct initio_host *s = (struct initio_host *)host->hostdata;
scsi_remove_host(host);
free_irq(pdev->irq, host);
release_region(s->addr, 256);
scsi_host_put(host);
pci_disable_device(pdev);
}
MODULE_LICENSE("GPL");
static struct pci_device_id initio_pci_tbl[] = {
{PCI_VENDOR_ID_INIT, 0x9500, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{PCI_VENDOR_ID_INIT, 0x9400, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{PCI_VENDOR_ID_INIT, 0x9401, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{PCI_VENDOR_ID_INIT, 0x0002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{PCI_VENDOR_ID_DOMEX, 0x0002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{0,}
};
MODULE_DEVICE_TABLE(pci, initio_pci_tbl);
static struct pci_driver initio_pci_driver = {
.name = "initio",
.id_table = initio_pci_tbl,
.probe = initio_probe_one,
.remove = __devexit_p(initio_remove_one),
};
static int __init initio_init_driver(void)
{
return pci_register_driver(&initio_pci_driver);
}
static void __exit initio_exit_driver(void)
{
pci_unregister_driver(&initio_pci_driver);
}
MODULE_DESCRIPTION("Initio INI-9X00U/UW SCSI device driver");
MODULE_AUTHOR("Initio Corporation");
MODULE_LICENSE("GPL");
module_init(initio_init_driver);
module_exit(initio_exit_driver);
| gpl-2.0 |
galaxyishere/samsung-kernel-latona | Documentation/laptops/dslm.c | 11496 | 3682 | /*
* dslm.c
* Simple Disk Sleep Monitor
* by Bartek Kania
* Licenced under the GPL
*/
#include <unistd.h>
#include <stdlib.h>
#include <stdio.h>
#include <fcntl.h>
#include <errno.h>
#include <time.h>
#include <string.h>
#include <signal.h>
#include <sys/ioctl.h>
#include <linux/hdreg.h>
#ifdef DEBUG
#define D(x) x
#else
#define D(x)
#endif
int endit = 0;
/* Check if the disk is in powersave-mode
* Most of the code is stolen from hdparm.
* 1 = active, 0 = standby/sleep, -1 = unknown */
static int check_powermode(int fd)
{
unsigned char args[4] = {WIN_CHECKPOWERMODE1,0,0,0};
int state;
if (ioctl(fd, HDIO_DRIVE_CMD, &args)
&& (args[0] = WIN_CHECKPOWERMODE2) /* try again with 0x98 */
&& ioctl(fd, HDIO_DRIVE_CMD, &args)) {
if (errno != EIO || args[0] != 0 || args[1] != 0) {
state = -1; /* "unknown"; */
} else
state = 0; /* "sleeping"; */
} else {
state = (args[2] == 255) ? 1 : 0;
}
D(printf(" drive state is: %d\n", state));
return state;
}
static char *state_name(int i)
{
if (i == -1) return "unknown";
if (i == 0) return "sleeping";
if (i == 1) return "active";
return "internal error";
}
static char *myctime(time_t time)
{
char *ts = ctime(&time);
ts[strlen(ts) - 1] = 0;
return ts;
}
static void measure(int fd)
{
time_t start_time;
int last_state;
time_t last_time;
int curr_state;
time_t curr_time = 0;
time_t time_diff;
time_t active_time = 0;
time_t sleep_time = 0;
time_t unknown_time = 0;
time_t total_time = 0;
int changes = 0;
float tmp;
printf("Starting measurements\n");
last_state = check_powermode(fd);
start_time = last_time = time(0);
printf(" System is in state %s\n\n", state_name(last_state));
while(!endit) {
sleep(1);
curr_state = check_powermode(fd);
if (curr_state != last_state || endit) {
changes++;
curr_time = time(0);
time_diff = curr_time - last_time;
if (last_state == 1) active_time += time_diff;
else if (last_state == 0) sleep_time += time_diff;
else unknown_time += time_diff;
last_state = curr_state;
last_time = curr_time;
printf("%s: State-change to %s\n", myctime(curr_time),
state_name(curr_state));
}
}
changes--; /* Compensate for SIGINT */
total_time = time(0) - start_time;
printf("\nTotal running time: %lus\n", curr_time - start_time);
printf(" State changed %d times\n", changes);
tmp = (float)sleep_time / (float)total_time * 100;
printf(" Time in sleep state: %lus (%.2f%%)\n", sleep_time, tmp);
tmp = (float)active_time / (float)total_time * 100;
printf(" Time in active state: %lus (%.2f%%)\n", active_time, tmp);
tmp = (float)unknown_time / (float)total_time * 100;
printf(" Time in unknown state: %lus (%.2f%%)\n", unknown_time, tmp);
}
static void ender(int s)
{
endit = 1;
}
static void usage(void)
{
puts("usage: dslm [-w <time>] <disk>");
exit(0);
}
int main(int argc, char **argv)
{
int fd;
char *disk = 0;
int settle_time = 60;
/* Parse the simple command-line */
if (argc == 2)
disk = argv[1];
else if (argc == 4) {
settle_time = atoi(argv[2]);
disk = argv[3];
} else
usage();
if (!(fd = open(disk, O_RDONLY|O_NONBLOCK))) {
printf("Can't open %s, because: %s\n", disk, strerror(errno));
exit(-1);
}
if (settle_time) {
printf("Waiting %d seconds for the system to settle down to "
"'normal'\n", settle_time);
sleep(settle_time);
} else
puts("Not waiting for system to settle down");
signal(SIGINT, ender);
measure(fd);
close(fd);
return 0;
}
| gpl-2.0 |
amphorion/kernel_pyramid | drivers/media/dvb/b2c2/flexcop-sram.c | 12264 | 8690 | /*
* Linux driver for digital TV devices equipped with B2C2 FlexcopII(b)/III
* flexcop-sram.c - functions for controlling the SRAM
* see flexcop.c for copyright information
*/
#include "flexcop.h"
static void flexcop_sram_set_chip(struct flexcop_device *fc,
flexcop_sram_type_t type)
{
flexcop_set_ibi_value(wan_ctrl_reg_71c, sram_chip, type);
}
int flexcop_sram_init(struct flexcop_device *fc)
{
switch (fc->rev) {
case FLEXCOP_II:
case FLEXCOP_IIB:
flexcop_sram_set_chip(fc, FC_SRAM_1_32KB);
break;
case FLEXCOP_III:
flexcop_sram_set_chip(fc, FC_SRAM_1_48KB);
break;
default:
return -EINVAL;
}
return 0;
}
int flexcop_sram_set_dest(struct flexcop_device *fc, flexcop_sram_dest_t dest,
flexcop_sram_dest_target_t target)
{
flexcop_ibi_value v;
v = fc->read_ibi_reg(fc, sram_dest_reg_714);
if (fc->rev != FLEXCOP_III && target == FC_SRAM_DEST_TARGET_FC3_CA) {
err("SRAM destination target to available on FlexCopII(b)\n");
return -EINVAL;
}
deb_sram("sram dest: %x target: %x\n", dest, target);
if (dest & FC_SRAM_DEST_NET)
v.sram_dest_reg_714.NET_Dest = target;
if (dest & FC_SRAM_DEST_CAI)
v.sram_dest_reg_714.CAI_Dest = target;
if (dest & FC_SRAM_DEST_CAO)
v.sram_dest_reg_714.CAO_Dest = target;
if (dest & FC_SRAM_DEST_MEDIA)
v.sram_dest_reg_714.MEDIA_Dest = target;
fc->write_ibi_reg(fc,sram_dest_reg_714,v);
udelay(1000); /* TODO delay really necessary */
return 0;
}
EXPORT_SYMBOL(flexcop_sram_set_dest);
void flexcop_wan_set_speed(struct flexcop_device *fc, flexcop_wan_speed_t s)
{
flexcop_set_ibi_value(wan_ctrl_reg_71c,wan_speed_sig,s);
}
EXPORT_SYMBOL(flexcop_wan_set_speed);
void flexcop_sram_ctrl(struct flexcop_device *fc, int usb_wan, int sramdma, int maximumfill)
{
flexcop_ibi_value v = fc->read_ibi_reg(fc,sram_dest_reg_714);
v.sram_dest_reg_714.ctrl_usb_wan = usb_wan;
v.sram_dest_reg_714.ctrl_sramdma = sramdma;
v.sram_dest_reg_714.ctrl_maximumfill = maximumfill;
fc->write_ibi_reg(fc,sram_dest_reg_714,v);
}
EXPORT_SYMBOL(flexcop_sram_ctrl);
#if 0
static void flexcop_sram_write(struct adapter *adapter, u32 bank, u32 addr, u8 *buf, u32 len)
{
int i, retries;
u32 command;
for (i = 0; i < len; i++) {
command = bank | addr | 0x04000000 | (*buf << 0x10);
retries = 2;
while (((read_reg_dw(adapter, 0x700) & 0x80000000) != 0) && (retries > 0)) {
mdelay(1);
retries--;
};
if (retries == 0)
printk("%s: SRAM timeout\n", __func__);
write_reg_dw(adapter, 0x700, command);
buf++;
addr++;
}
}
static void flex_sram_read(struct adapter *adapter, u32 bank, u32 addr, u8 *buf, u32 len)
{
int i, retries;
u32 command, value;
for (i = 0; i < len; i++) {
command = bank | addr | 0x04008000;
retries = 10000;
while (((read_reg_dw(adapter, 0x700) & 0x80000000) != 0) && (retries > 0)) {
mdelay(1);
retries--;
};
if (retries == 0)
printk("%s: SRAM timeout\n", __func__);
write_reg_dw(adapter, 0x700, command);
retries = 10000;
while (((read_reg_dw(adapter, 0x700) & 0x80000000) != 0) && (retries > 0)) {
mdelay(1);
retries--;
};
if (retries == 0)
printk("%s: SRAM timeout\n", __func__);
value = read_reg_dw(adapter, 0x700) >> 0x10;
*buf = (value & 0xff);
addr++;
buf++;
}
}
static void sram_write_chunk(struct adapter *adapter, u32 addr, u8 *buf, u16 len)
{
u32 bank;
bank = 0;
if (adapter->dw_sram_type == 0x20000) {
bank = (addr & 0x18000) << 0x0d;
}
if (adapter->dw_sram_type == 0x00000) {
if ((addr >> 0x0f) == 0)
bank = 0x20000000;
else
bank = 0x10000000;
}
flex_sram_write(adapter, bank, addr & 0x7fff, buf, len);
}
static void sram_read_chunk(struct adapter *adapter, u32 addr, u8 *buf, u16 len)
{
u32 bank;
bank = 0;
if (adapter->dw_sram_type == 0x20000) {
bank = (addr & 0x18000) << 0x0d;
}
if (adapter->dw_sram_type == 0x00000) {
if ((addr >> 0x0f) == 0)
bank = 0x20000000;
else
bank = 0x10000000;
}
flex_sram_read(adapter, bank, addr & 0x7fff, buf, len);
}
static void sram_read(struct adapter *adapter, u32 addr, u8 *buf, u32 len)
{
u32 length;
while (len != 0) {
length = len;
/* check if the address range belongs to the same
* 32K memory chip. If not, the data is read
* from one chip at a time */
if ((addr >> 0x0f) != ((addr + len - 1) >> 0x0f)) {
length = (((addr >> 0x0f) + 1) << 0x0f) - addr;
}
sram_read_chunk(adapter, addr, buf, length);
addr = addr + length;
buf = buf + length;
len = len - length;
}
}
static void sram_write(struct adapter *adapter, u32 addr, u8 *buf, u32 len)
{
u32 length;
while (len != 0) {
length = len;
/* check if the address range belongs to the same
* 32K memory chip. If not, the data is
* written to one chip at a time */
if ((addr >> 0x0f) != ((addr + len - 1) >> 0x0f)) {
length = (((addr >> 0x0f) + 1) << 0x0f) - addr;
}
sram_write_chunk(adapter, addr, buf, length);
addr = addr + length;
buf = buf + length;
len = len - length;
}
}
static void sram_set_size(struct adapter *adapter, u32 mask)
{
write_reg_dw(adapter, 0x71c,
(mask | (~0x30000 & read_reg_dw(adapter, 0x71c))));
}
static void sram_init(struct adapter *adapter)
{
u32 tmp;
tmp = read_reg_dw(adapter, 0x71c);
write_reg_dw(adapter, 0x71c, 1);
if (read_reg_dw(adapter, 0x71c) != 0) {
write_reg_dw(adapter, 0x71c, tmp);
adapter->dw_sram_type = tmp & 0x30000;
ddprintk("%s: dw_sram_type = %x\n", __func__, adapter->dw_sram_type);
} else {
adapter->dw_sram_type = 0x10000;
ddprintk("%s: dw_sram_type = %x\n", __func__, adapter->dw_sram_type);
}
}
static int sram_test_location(struct adapter *adapter, u32 mask, u32 addr)
{
u8 tmp1, tmp2;
dprintk("%s: mask = %x, addr = %x\n", __func__, mask, addr);
sram_set_size(adapter, mask);
sram_init(adapter);
tmp2 = 0xa5;
tmp1 = 0x4f;
sram_write(adapter, addr, &tmp2, 1);
sram_write(adapter, addr + 4, &tmp1, 1);
tmp2 = 0;
mdelay(20);
sram_read(adapter, addr, &tmp2, 1);
sram_read(adapter, addr, &tmp2, 1);
dprintk("%s: wrote 0xa5, read 0x%2x\n", __func__, tmp2);
if (tmp2 != 0xa5)
return 0;
tmp2 = 0x5a;
tmp1 = 0xf4;
sram_write(adapter, addr, &tmp2, 1);
sram_write(adapter, addr + 4, &tmp1, 1);
tmp2 = 0;
mdelay(20);
sram_read(adapter, addr, &tmp2, 1);
sram_read(adapter, addr, &tmp2, 1);
dprintk("%s: wrote 0x5a, read 0x%2x\n", __func__, tmp2);
if (tmp2 != 0x5a)
return 0;
return 1;
}
static u32 sram_length(struct adapter *adapter)
{
if (adapter->dw_sram_type == 0x10000)
return 32768; /* 32K */
if (adapter->dw_sram_type == 0x00000)
return 65536; /* 64K */
if (adapter->dw_sram_type == 0x20000)
return 131072; /* 128K */
return 32768; /* 32K */
}
/* FlexcopII can work with 32K, 64K or 128K of external SRAM memory.
- for 128K there are 4x32K chips at bank 0,1,2,3.
- for 64K there are 2x32K chips at bank 1,2.
- for 32K there is one 32K chip at bank 0.
FlexCop works only with one bank at a time. The bank is selected
by bits 28-29 of the 0x700 register.
bank 0 covers addresses 0x00000-0x07fff
bank 1 covers addresses 0x08000-0x0ffff
bank 2 covers addresses 0x10000-0x17fff
bank 3 covers addresses 0x18000-0x1ffff */
static int flexcop_sram_detect(struct flexcop_device *fc)
{
flexcop_ibi_value r208, r71c_0, vr71c_1;
r208 = fc->read_ibi_reg(fc, ctrl_208);
fc->write_ibi_reg(fc, ctrl_208, ibi_zero);
r71c_0 = fc->read_ibi_reg(fc, wan_ctrl_reg_71c);
write_reg_dw(adapter, 0x71c, 1);
tmp3 = read_reg_dw(adapter, 0x71c);
dprintk("%s: tmp3 = %x\n", __func__, tmp3);
write_reg_dw(adapter, 0x71c, tmp2);
// check for internal SRAM ???
tmp3--;
if (tmp3 != 0) {
sram_set_size(adapter, 0x10000);
sram_init(adapter);
write_reg_dw(adapter, 0x208, tmp);
dprintk("%s: sram size = 32K\n", __func__);
return 32;
}
if (sram_test_location(adapter, 0x20000, 0x18000) != 0) {
sram_set_size(adapter, 0x20000);
sram_init(adapter);
write_reg_dw(adapter, 0x208, tmp);
dprintk("%s: sram size = 128K\n", __func__);
return 128;
}
if (sram_test_location(adapter, 0x00000, 0x10000) != 0) {
sram_set_size(adapter, 0x00000);
sram_init(adapter);
write_reg_dw(adapter, 0x208, tmp);
dprintk("%s: sram size = 64K\n", __func__);
return 64;
}
if (sram_test_location(adapter, 0x10000, 0x00000) != 0) {
sram_set_size(adapter, 0x10000);
sram_init(adapter);
write_reg_dw(adapter, 0x208, tmp);
dprintk("%s: sram size = 32K\n", __func__);
return 32;
}
sram_set_size(adapter, 0x10000);
sram_init(adapter);
write_reg_dw(adapter, 0x208, tmp);
dprintk("%s: SRAM detection failed. Set to 32K \n", __func__);
return 0;
}
static void sll_detect_sram_size(struct adapter *adapter)
{
sram_detect_for_flex2(adapter);
}
#endif
| gpl-2.0 |
thekraven/kernel_samsung_lt02ltespr | security/min_addr.c | 13544 | 1345 | #include <linux/init.h>
#include <linux/mm.h>
#include <linux/security.h>
#include <linux/sysctl.h>
/* amount of vm to protect from userspace access by both DAC and the LSM*/
unsigned long mmap_min_addr;
/* amount of vm to protect from userspace using CAP_SYS_RAWIO (DAC) */
unsigned long dac_mmap_min_addr = CONFIG_DEFAULT_MMAP_MIN_ADDR;
/* amount of vm to protect from userspace using the LSM = CONFIG_LSM_MMAP_MIN_ADDR */
/*
* Update mmap_min_addr = max(dac_mmap_min_addr, CONFIG_LSM_MMAP_MIN_ADDR)
*/
static void update_mmap_min_addr(void)
{
#ifdef CONFIG_LSM_MMAP_MIN_ADDR
if (dac_mmap_min_addr > CONFIG_LSM_MMAP_MIN_ADDR)
mmap_min_addr = dac_mmap_min_addr;
else
mmap_min_addr = CONFIG_LSM_MMAP_MIN_ADDR;
#else
mmap_min_addr = dac_mmap_min_addr;
#endif
}
/*
* sysctl handler which just sets dac_mmap_min_addr = the new value and then
* calls update_mmap_min_addr() so non MAP_FIXED hints get rounded properly
*/
int mmap_min_addr_handler(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
int ret;
if (write && !capable(CAP_SYS_RAWIO))
return -EPERM;
ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
update_mmap_min_addr();
return ret;
}
static int __init init_mmap_min_addr(void)
{
update_mmap_min_addr();
return 0;
}
pure_initcall(init_mmap_min_addr);
| gpl-2.0 |
Fusion-Devices/android_kernel_cyanogen_msm8916 | drivers/hwmon/qpnp-adc-current.c | 745 | 42161 | /* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#define pr_fmt(fmt) "%s: " fmt, __func__
#include <linux/kernel.h>
#include <linux/of.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/mutex.h>
#include <linux/types.h>
#include <linux/hwmon.h>
#include <linux/module.h>
#include <linux/debugfs.h>
#include <linux/spmi.h>
#include <linux/of_irq.h>
#include <linux/wakelock.h>
#include <linux/interrupt.h>
#include <linux/completion.h>
#include <linux/hwmon-sysfs.h>
#include <linux/qpnp/qpnp-adc.h>
#include <linux/platform_device.h>
#include <linux/wakelock.h>
/* QPNP IADC register definition */
#define QPNP_IADC_REVISION1 0x0
#define QPNP_IADC_REVISION2 0x1
#define QPNP_IADC_REVISION3 0x2
#define QPNP_IADC_REVISION4 0x3
#define QPNP_IADC_PERPH_TYPE 0x4
#define QPNP_IADC_PERH_SUBTYPE 0x5
#define QPNP_IADC_SUPPORTED_REVISION2 1
#define QPNP_STATUS1 0x8
#define QPNP_STATUS1_OP_MODE 4
#define QPNP_STATUS1_MULTI_MEAS_EN BIT(3)
#define QPNP_STATUS1_MEAS_INTERVAL_EN_STS BIT(2)
#define QPNP_STATUS1_REQ_STS BIT(1)
#define QPNP_STATUS1_EOC BIT(0)
#define QPNP_STATUS1_REQ_STS_EOC_MASK 0x3
#define QPNP_STATUS2 0x9
#define QPNP_STATUS2_CONV_SEQ_STATE_SHIFT 4
#define QPNP_STATUS2_FIFO_NOT_EMPTY_FLAG BIT(1)
#define QPNP_STATUS2_CONV_SEQ_TIMEOUT_STS BIT(0)
#define QPNP_CONV_TIMEOUT_ERR 2
#define QPNP_IADC_MODE_CTL 0x40
#define QPNP_OP_MODE_SHIFT 4
#define QPNP_USE_BMS_DATA BIT(4)
#define QPNP_VADC_SYNCH_EN BIT(2)
#define QPNP_OFFSET_RMV_EN BIT(1)
#define QPNP_ADC_TRIM_EN BIT(0)
#define QPNP_IADC_EN_CTL1 0x46
#define QPNP_IADC_ADC_EN BIT(7)
#define QPNP_ADC_CH_SEL_CTL 0x48
#define QPNP_ADC_DIG_PARAM 0x50
#define QPNP_ADC_CLK_SEL_MASK 0x3
#define QPNP_ADC_DEC_RATIO_SEL_MASK 0xc
#define QPNP_ADC_DIG_DEC_RATIO_SEL_SHIFT 2
#define QPNP_CONV_REQ 0x52
#define QPNP_CONV_REQ_SET BIT(7)
#define QPNP_CONV_SEQ_CTL 0x54
#define QPNP_CONV_SEQ_HOLDOFF_SHIFT 4
#define QPNP_CONV_SEQ_TRIG_CTL 0x55
#define QPNP_FAST_AVG_CTL 0x5a
#define QPNP_M0_LOW_THR_LSB 0x5c
#define QPNP_M0_LOW_THR_MSB 0x5d
#define QPNP_M0_HIGH_THR_LSB 0x5e
#define QPNP_M0_HIGH_THR_MSB 0x5f
#define QPNP_M1_LOW_THR_LSB 0x69
#define QPNP_M1_LOW_THR_MSB 0x6a
#define QPNP_M1_HIGH_THR_LSB 0x6b
#define QPNP_M1_HIGH_THR_MSB 0x6c
#define QPNP_DATA0 0x60
#define QPNP_DATA1 0x61
#define QPNP_CONV_TIMEOUT_ERR 2
#define QPNP_IADC_SEC_ACCESS 0xD0
#define QPNP_IADC_SEC_ACCESS_DATA 0xA5
#define QPNP_IADC_MSB_OFFSET 0xF2
#define QPNP_IADC_LSB_OFFSET 0xF3
#define QPNP_IADC_NOMINAL_RSENSE 0xF4
#define QPNP_IADC_ATE_GAIN_CALIB_OFFSET 0xF5
#define QPNP_INT_TEST_VAL 0xE1
#define QPNP_IADC_ADC_CH_SEL_CTL 0x48
#define QPNP_IADC_ADC_CHX_SEL_SHIFT 3
#define QPNP_IADC_ADC_DIG_PARAM 0x50
#define QPNP_IADC_CLK_SEL_SHIFT 1
#define QPNP_IADC_DEC_RATIO_SEL 3
#define QPNP_IADC_CONV_REQUEST 0x52
#define QPNP_IADC_CONV_REQ BIT(7)
#define QPNP_IADC_DATA0 0x60
#define QPNP_IADC_DATA1 0x61
#define QPNP_ADC_CONV_TIME_MIN 2000
#define QPNP_ADC_CONV_TIME_MAX 2100
#define QPNP_ADC_ERR_COUNT 20
#define QPNP_ADC_GAIN_NV 17857
#define QPNP_OFFSET_CALIBRATION_SHORT_CADC_LEADS_IDEAL 0
#define QPNP_IADC_INTERNAL_RSENSE_N_OHMS_FACTOR 10000000
#define QPNP_IADC_NANO_VOLTS_FACTOR 1000000
#define QPNP_IADC_CALIB_SECONDS 300000
#define QPNP_IADC_RSENSE_LSB_N_OHMS_PER_BIT 15625
#define QPNP_IADC_DIE_TEMP_CALIB_OFFSET 5000
#define QPNP_RAW_CODE_16_BIT_MSB_MASK 0xff00
#define QPNP_RAW_CODE_16_BIT_LSB_MASK 0xff
#define QPNP_BIT_SHIFT_8 8
#define QPNP_RSENSE_MSB_SIGN_CHECK 0x80
#define QPNP_ADC_COMPLETION_TIMEOUT HZ
#define SMBB_BAT_IF_TRIM_CNST_RDS_MASK 0x7
#define SMBB_BAT_IF_TRIM_CNST_RDS_MASK_CONST_0 0
#define SMBB_BAT_IF_TRIM_CNST_RDS_MASK_CONST_2 2
#define QPNP_IADC1_USR_TRIM2_ADC_FULLSCALE1_CONST 127
#define QPNP_IADC_RSENSE_DEFAULT_VALUE 7800000
#define QPNP_IADC_RSENSE_DEFAULT_TYPEB_GF 9000000
#define QPNP_IADC_RSENSE_DEFAULT_TYPEB_SMIC 9700000
struct qpnp_iadc_comp {
bool ext_rsense;
u8 id;
u8 sys_gain;
u8 revision_dig_major;
u8 revision_ana_minor;
};
struct qpnp_iadc_chip {
struct device *dev;
struct qpnp_adc_drv *adc;
int32_t rsense;
bool external_rsense;
bool default_internal_rsense;
struct device *iadc_hwmon;
struct list_head list;
int64_t die_temp;
struct delayed_work iadc_work;
bool iadc_mode_sel;
struct qpnp_iadc_comp iadc_comp;
struct qpnp_vadc_chip *vadc_dev;
struct work_struct trigger_completion_work;
bool skip_auto_calibrations;
bool iadc_poll_eoc;
u16 batt_id_trim_cnst_rds;
int rds_trim_default_type;
int max_channels_available;
bool rds_trim_default_check;
int32_t rsense_workaround_value;
struct sensor_device_attribute sens_attr[0];
};
LIST_HEAD(qpnp_iadc_device_list);
enum qpnp_iadc_rsense_rds_workaround {
QPNP_IADC_RDS_DEFAULT_TYPEA,
QPNP_IADC_RDS_DEFAULT_TYPEB,
QPNP_IADC_RDS_DEFAULT_TYPEC,
};
static int32_t qpnp_iadc_read_reg(struct qpnp_iadc_chip *iadc,
uint32_t reg, u8 *data)
{
int rc;
rc = spmi_ext_register_readl(iadc->adc->spmi->ctrl, iadc->adc->slave,
(iadc->adc->offset + reg), data, 1);
if (rc < 0) {
pr_err("qpnp iadc read reg %d failed with %d\n", reg, rc);
return rc;
}
return 0;
}
static int32_t qpnp_iadc_write_reg(struct qpnp_iadc_chip *iadc,
uint32_t reg, u8 data)
{
int rc;
u8 *buf;
buf = &data;
rc = spmi_ext_register_writel(iadc->adc->spmi->ctrl, iadc->adc->slave,
(iadc->adc->offset + reg), buf, 1);
if (rc < 0) {
pr_err("qpnp iadc write reg %d failed with %d\n", reg, rc);
return rc;
}
return 0;
}
static int qpnp_iadc_is_valid(struct qpnp_iadc_chip *iadc)
{
struct qpnp_iadc_chip *iadc_chip = NULL;
list_for_each_entry(iadc_chip, &qpnp_iadc_device_list, list)
if (iadc == iadc_chip)
return 0;
return -EINVAL;
}
static void qpnp_iadc_trigger_completion(struct work_struct *work)
{
struct qpnp_iadc_chip *iadc = container_of(work,
struct qpnp_iadc_chip, trigger_completion_work);
if (qpnp_iadc_is_valid(iadc) < 0)
return;
complete(&iadc->adc->adc_rslt_completion);
return;
}
static irqreturn_t qpnp_iadc_isr(int irq, void *dev_id)
{
struct qpnp_iadc_chip *iadc = dev_id;
schedule_work(&iadc->trigger_completion_work);
return IRQ_HANDLED;
}
static int32_t qpnp_iadc_enable(struct qpnp_iadc_chip *dev, bool state)
{
int rc = 0;
u8 data = 0;
data = QPNP_IADC_ADC_EN;
if (state) {
rc = qpnp_iadc_write_reg(dev, QPNP_IADC_EN_CTL1,
data);
if (rc < 0) {
pr_err("IADC enable failed\n");
return rc;
}
} else {
rc = qpnp_iadc_write_reg(dev, QPNP_IADC_EN_CTL1,
(~data & QPNP_IADC_ADC_EN));
if (rc < 0) {
pr_err("IADC disable failed\n");
return rc;
}
}
return 0;
}
static int32_t qpnp_iadc_status_debug(struct qpnp_iadc_chip *dev)
{
int rc = 0;
u8 mode = 0, status1 = 0, chan = 0, dig = 0, en = 0;
rc = qpnp_iadc_read_reg(dev, QPNP_IADC_MODE_CTL, &mode);
if (rc < 0) {
pr_err("mode ctl register read failed with %d\n", rc);
return rc;
}
rc = qpnp_iadc_read_reg(dev, QPNP_ADC_DIG_PARAM, &dig);
if (rc < 0) {
pr_err("digital param read failed with %d\n", rc);
return rc;
}
rc = qpnp_iadc_read_reg(dev, QPNP_IADC_ADC_CH_SEL_CTL, &chan);
if (rc < 0) {
pr_err("channel read failed with %d\n", rc);
return rc;
}
rc = qpnp_iadc_read_reg(dev, QPNP_STATUS1, &status1);
if (rc < 0) {
pr_err("status1 read failed with %d\n", rc);
return rc;
}
rc = qpnp_iadc_read_reg(dev, QPNP_IADC_EN_CTL1, &en);
if (rc < 0) {
pr_err("en read failed with %d\n", rc);
return rc;
}
pr_debug("EOC not set with status:%x, dig:%x, ch:%x, mode:%x, en:%x\n",
status1, dig, chan, mode, en);
rc = qpnp_iadc_enable(dev, false);
if (rc < 0) {
pr_err("IADC disable failed with %d\n", rc);
return rc;
}
return 0;
}
static int32_t qpnp_iadc_read_conversion_result(struct qpnp_iadc_chip *iadc,
int16_t *data)
{
uint8_t rslt_lsb, rslt_msb;
uint16_t rslt;
int32_t rc;
rc = qpnp_iadc_read_reg(iadc, QPNP_IADC_DATA0, &rslt_lsb);
if (rc < 0) {
pr_err("qpnp adc result read failed with %d\n", rc);
return rc;
}
rc = qpnp_iadc_read_reg(iadc, QPNP_IADC_DATA1, &rslt_msb);
if (rc < 0) {
pr_err("qpnp adc result read failed with %d\n", rc);
return rc;
}
rslt = (rslt_msb << 8) | rslt_lsb;
*data = rslt;
rc = qpnp_iadc_enable(iadc, false);
if (rc)
return rc;
return 0;
}
#define QPNP_IADC_PM8026_2_REV2 4
#define QPNP_IADC_PM8026_2_REV3 2
#define QPNP_COEFF_1 969000
#define QPNP_COEFF_2 32
#define QPNP_COEFF_3_TYPEA 1700000
#define QPNP_COEFF_3_TYPEB 1000000
#define QPNP_COEFF_4 100
#define QPNP_COEFF_5 15
#define QPNP_COEFF_6 100000
#define QPNP_COEFF_7 21
#define QPNP_COEFF_8 100000000
#define QPNP_COEFF_9 38
#define QPNP_COEFF_10 40
#define QPNP_COEFF_11 7
#define QPNP_COEFF_12 11
#define QPNP_COEFF_13 37
#define QPNP_COEFF_14 39
#define QPNP_COEFF_15 9
#define QPNP_COEFF_16 11
#define QPNP_COEFF_17 851200
#define QPNP_COEFF_18 296500
#define QPNP_COEFF_19 222400
#define QPNP_COEFF_20 813800
#define QPNP_COEFF_21 1059100
#define QPNP_COEFF_22 5000000
#define QPNP_COEFF_23 3722500
#define QPNP_COEFF_24 84
#define QPNP_COEFF_25 33
#define QPNP_COEFF_26 22
#define QPNP_COEFF_27 53
#define QPNP_COEFF_28 48
static int32_t qpnp_iadc_comp(int64_t *result, struct qpnp_iadc_chip *iadc,
int64_t die_temp)
{
int64_t temp_var = 0, sys_gain_coeff = 0, old;
int32_t coeff_a = 0, coeff_b = 0;
int version = 0;
version = qpnp_adc_get_revid_version(iadc->dev);
if (version == -EINVAL)
return 0;
old = *result;
*result = *result * 1000000;
if (iadc->iadc_comp.sys_gain > 127)
sys_gain_coeff = -QPNP_COEFF_6 *
(iadc->iadc_comp.sys_gain - 128);
else
sys_gain_coeff = QPNP_COEFF_6 *
iadc->iadc_comp.sys_gain;
switch (version) {
case QPNP_REV_ID_8941_3_1:
switch (iadc->iadc_comp.id) {
case COMP_ID_GF:
if (!iadc->iadc_comp.ext_rsense) {
/* internal rsense */
coeff_a = QPNP_COEFF_2;
coeff_b = -QPNP_COEFF_3_TYPEA;
} else {
if (*result < 0) {
/* charge */
coeff_a = QPNP_COEFF_5;
coeff_b = QPNP_COEFF_6;
} else {
/* discharge */
coeff_a = -QPNP_COEFF_7;
coeff_b = QPNP_COEFF_6;
}
}
break;
case COMP_ID_TSMC:
default:
if (!iadc->iadc_comp.ext_rsense) {
/* internal rsense */
coeff_a = QPNP_COEFF_2;
coeff_b = -QPNP_COEFF_3_TYPEB;
} else {
if (*result < 0) {
/* charge */
coeff_a = QPNP_COEFF_5;
coeff_b = QPNP_COEFF_6;
} else {
/* discharge */
coeff_a = -QPNP_COEFF_7;
coeff_b = QPNP_COEFF_6;
}
}
break;
}
break;
case QPNP_REV_ID_8026_2_1:
case QPNP_REV_ID_8026_2_2:
/* pm8026 rev 2.1 and 2.2 */
switch (iadc->iadc_comp.id) {
case COMP_ID_GF:
if (!iadc->iadc_comp.ext_rsense) {
/* internal rsense */
if (*result < 0) {
/* charge */
coeff_a = 0;
coeff_b = 0;
} else {
coeff_a = QPNP_COEFF_25;
coeff_b = 0;
}
} else {
if (*result < 0) {
/* charge */
coeff_a = 0;
coeff_b = 0;
} else {
/* discharge */
coeff_a = 0;
coeff_b = 0;
}
}
break;
case COMP_ID_TSMC:
default:
if (!iadc->iadc_comp.ext_rsense) {
/* internal rsense */
if (*result < 0) {
/* charge */
coeff_a = 0;
coeff_b = 0;
} else {
coeff_a = QPNP_COEFF_26;
coeff_b = 0;
}
} else {
if (*result < 0) {
/* charge */
coeff_a = 0;
coeff_b = 0;
} else {
/* discharge */
coeff_a = 0;
coeff_b = 0;
}
}
break;
}
break;
case QPNP_REV_ID_8026_1_0:
/* pm8026 rev 1.0 */
switch (iadc->iadc_comp.id) {
case COMP_ID_GF:
if (!iadc->iadc_comp.ext_rsense) {
/* internal rsense */
if (*result < 0) {
/* charge */
coeff_a = QPNP_COEFF_9;
coeff_b = -QPNP_COEFF_17;
} else {
coeff_a = QPNP_COEFF_10;
coeff_b = QPNP_COEFF_18;
}
} else {
if (*result < 0) {
/* charge */
coeff_a = -QPNP_COEFF_11;
coeff_b = 0;
} else {
/* discharge */
coeff_a = -QPNP_COEFF_17;
coeff_b = -QPNP_COEFF_19;
}
}
break;
case COMP_ID_TSMC:
default:
if (!iadc->iadc_comp.ext_rsense) {
/* internal rsense */
if (*result < 0) {
/* charge */
coeff_a = QPNP_COEFF_13;
coeff_b = -QPNP_COEFF_20;
} else {
coeff_a = QPNP_COEFF_14;
coeff_b = QPNP_COEFF_21;
}
} else {
if (*result < 0) {
/* charge */
coeff_a = -QPNP_COEFF_15;
coeff_b = 0;
} else {
/* discharge */
coeff_a = -QPNP_COEFF_12;
coeff_b = -QPNP_COEFF_19;
}
}
break;
}
break;
case QPNP_REV_ID_8110_1_0:
/* pm8110 rev 1.0 */
switch (iadc->iadc_comp.id) {
case COMP_ID_GF:
if (!iadc->iadc_comp.ext_rsense) {
/* internal rsense */
if (*result < 0) {
/* charge */
coeff_a = QPNP_COEFF_24;
coeff_b = -QPNP_COEFF_22;
} else {
coeff_a = QPNP_COEFF_24;
coeff_b = -QPNP_COEFF_23;
}
}
break;
case COMP_ID_SMIC:
default:
if (!iadc->iadc_comp.ext_rsense) {
/* internal rsense */
if (*result < 0) {
/* charge */
coeff_a = QPNP_COEFF_24;
coeff_b = -QPNP_COEFF_22;
} else {
coeff_a = QPNP_COEFF_24;
coeff_b = -QPNP_COEFF_23;
}
}
break;
}
break;
case QPNP_REV_ID_8110_2_0:
die_temp -= 25000;
/* pm8110 rev 2.0 */
switch (iadc->iadc_comp.id) {
case COMP_ID_GF:
if (!iadc->iadc_comp.ext_rsense) {
/* internal rsense */
if (*result < 0) {
/* charge */
coeff_a = 0;
coeff_b = 0;
} else {
coeff_a = QPNP_COEFF_27;
coeff_b = 0;
}
}
break;
case COMP_ID_SMIC:
default:
if (!iadc->iadc_comp.ext_rsense) {
/* internal rsense */
if (*result < 0) {
/* charge */
coeff_a = 0;
coeff_b = 0;
} else {
coeff_a = QPNP_COEFF_28;
coeff_b = 0;
}
}
break;
}
break;
default:
case QPNP_REV_ID_8026_2_0:
/* pm8026 rev 1.0 */
coeff_a = 0;
coeff_b = 0;
break;
}
temp_var = (coeff_a * die_temp) + coeff_b;
temp_var = div64_s64(temp_var, QPNP_COEFF_4);
temp_var = 1000 * (1000000 - temp_var);
if (!iadc->iadc_comp.ext_rsense) {
/* internal rsense */
*result = div64_s64(*result * 1000, temp_var);
}
if (iadc->iadc_comp.ext_rsense) {
/* external rsense */
sys_gain_coeff = (1000000 +
div64_s64(sys_gain_coeff, QPNP_COEFF_4));
temp_var = div64_s64(temp_var * sys_gain_coeff, 1000000);
*result = div64_s64(*result * 1000, temp_var);
}
pr_debug("%lld compensated into %lld, a: %d, b: %d, sys_gain: %lld\n",
old, *result, coeff_a, coeff_b, sys_gain_coeff);
return 0;
}
int32_t qpnp_iadc_comp_result(struct qpnp_iadc_chip *iadc, int64_t *result)
{
return qpnp_iadc_comp(result, iadc, iadc->die_temp);
}
EXPORT_SYMBOL(qpnp_iadc_comp_result);
static int qpnp_iadc_rds_trim_update_check(struct qpnp_iadc_chip *iadc)
{
int rc = 0;
u8 trim2_val = 0, smbb_batt_trm_data = 0;
u8 smbb_batt_trm_cnst_rds = 0;
if (!iadc->rds_trim_default_check) {
pr_debug("No internal rds trim check needed\n");
return 0;
}
rc = qpnp_iadc_read_reg(iadc, QPNP_IADC_NOMINAL_RSENSE, &trim2_val);
if (rc < 0) {
pr_err("qpnp adc trim2_fullscale1 reg read failed %d\n", rc);
return rc;
}
rc = spmi_ext_register_readl(iadc->adc->spmi->ctrl, iadc->adc->slave,
iadc->batt_id_trim_cnst_rds, &smbb_batt_trm_data, 1);
if (rc < 0) {
pr_err("batt_id trim_cnst rds reg read failed %d\n", rc);
return rc;
}
smbb_batt_trm_cnst_rds = smbb_batt_trm_data &
SMBB_BAT_IF_TRIM_CNST_RDS_MASK;
pr_debug("n_trim:0x%x smb_trm:0x%x\n", trim2_val, smbb_batt_trm_data);
if (iadc->rds_trim_default_type == QPNP_IADC_RDS_DEFAULT_TYPEA) {
if ((smbb_batt_trm_cnst_rds ==
SMBB_BAT_IF_TRIM_CNST_RDS_MASK_CONST_2) &&
(trim2_val == QPNP_IADC1_USR_TRIM2_ADC_FULLSCALE1_CONST)) {
iadc->rsense_workaround_value =
QPNP_IADC_RSENSE_DEFAULT_VALUE;
iadc->default_internal_rsense = true;
}
} else if (iadc->rds_trim_default_type ==
QPNP_IADC_RDS_DEFAULT_TYPEB) {
if ((smbb_batt_trm_cnst_rds >=
SMBB_BAT_IF_TRIM_CNST_RDS_MASK_CONST_2) &&
(trim2_val == QPNP_IADC1_USR_TRIM2_ADC_FULLSCALE1_CONST)) {
iadc->rsense_workaround_value =
QPNP_IADC_RSENSE_DEFAULT_VALUE;
iadc->default_internal_rsense = true;
} else if ((smbb_batt_trm_cnst_rds <
SMBB_BAT_IF_TRIM_CNST_RDS_MASK_CONST_2) &&
(trim2_val ==
QPNP_IADC1_USR_TRIM2_ADC_FULLSCALE1_CONST)) {
if (iadc->iadc_comp.id == COMP_ID_GF) {
iadc->rsense_workaround_value =
QPNP_IADC_RSENSE_DEFAULT_TYPEB_GF;
iadc->default_internal_rsense = true;
} else if (iadc->iadc_comp.id == COMP_ID_SMIC) {
iadc->rsense_workaround_value =
QPNP_IADC_RSENSE_DEFAULT_TYPEB_SMIC;
iadc->default_internal_rsense = true;
}
}
} else if (iadc->rds_trim_default_type == QPNP_IADC_RDS_DEFAULT_TYPEC) {
if ((smbb_batt_trm_cnst_rds >
SMBB_BAT_IF_TRIM_CNST_RDS_MASK_CONST_0) &&
(smbb_batt_trm_cnst_rds <=
SMBB_BAT_IF_TRIM_CNST_RDS_MASK_CONST_2) &&
(trim2_val == QPNP_IADC1_USR_TRIM2_ADC_FULLSCALE1_CONST)) {
iadc->rsense_workaround_value =
QPNP_IADC_RSENSE_DEFAULT_VALUE;
iadc->default_internal_rsense = true;
}
}
return 0;
}
static int32_t qpnp_iadc_comp_info(struct qpnp_iadc_chip *iadc)
{
int rc = 0;
rc = qpnp_iadc_read_reg(iadc, QPNP_INT_TEST_VAL, &iadc->iadc_comp.id);
if (rc < 0) {
pr_err("qpnp adc comp id failed with %d\n", rc);
return rc;
}
rc = qpnp_iadc_read_reg(iadc, QPNP_IADC_REVISION2,
&iadc->iadc_comp.revision_dig_major);
if (rc < 0) {
pr_err("qpnp adc revision2 read failed with %d\n", rc);
return rc;
}
rc = qpnp_iadc_read_reg(iadc, QPNP_IADC_REVISION3,
&iadc->iadc_comp.revision_ana_minor);
if (rc < 0) {
pr_err("qpnp adc revision3 read failed with %d\n", rc);
return rc;
}
rc = qpnp_iadc_read_reg(iadc, QPNP_IADC_ATE_GAIN_CALIB_OFFSET,
&iadc->iadc_comp.sys_gain);
if (rc < 0) {
pr_err("full scale read failed with %d\n", rc);
return rc;
}
if (iadc->external_rsense)
iadc->iadc_comp.ext_rsense = true;
pr_debug("fab id = %u, revision_dig_major = %u, revision_ana_minor = %u sys gain = %u, external_rsense = %d\n",
iadc->iadc_comp.id,
iadc->iadc_comp.revision_dig_major,
iadc->iadc_comp.revision_ana_minor,
iadc->iadc_comp.sys_gain,
iadc->iadc_comp.ext_rsense);
return rc;
}
static int32_t qpnp_iadc_configure(struct qpnp_iadc_chip *iadc,
enum qpnp_iadc_channels channel,
uint16_t *raw_code, uint32_t mode_sel)
{
u8 qpnp_iadc_mode_reg = 0, qpnp_iadc_ch_sel_reg = 0;
u8 qpnp_iadc_conv_req = 0, qpnp_iadc_dig_param_reg = 0;
u8 status1 = 0;
uint32_t count = 0;
int32_t rc = 0;
qpnp_iadc_ch_sel_reg = channel;
qpnp_iadc_dig_param_reg |= iadc->adc->amux_prop->decimation <<
QPNP_IADC_DEC_RATIO_SEL;
if (iadc->iadc_mode_sel)
qpnp_iadc_mode_reg |= (QPNP_ADC_TRIM_EN | QPNP_VADC_SYNCH_EN);
else
qpnp_iadc_mode_reg |= QPNP_ADC_TRIM_EN;
qpnp_iadc_conv_req = QPNP_IADC_CONV_REQ;
rc = qpnp_iadc_write_reg(iadc, QPNP_IADC_MODE_CTL, qpnp_iadc_mode_reg);
if (rc) {
pr_err("qpnp adc read adc failed with %d\n", rc);
return rc;
}
rc = qpnp_iadc_write_reg(iadc, QPNP_IADC_ADC_CH_SEL_CTL,
qpnp_iadc_ch_sel_reg);
if (rc) {
pr_err("qpnp adc read adc failed with %d\n", rc);
return rc;
}
rc = qpnp_iadc_write_reg(iadc, QPNP_ADC_DIG_PARAM,
qpnp_iadc_dig_param_reg);
if (rc) {
pr_err("qpnp adc read adc failed with %d\n", rc);
return rc;
}
rc = qpnp_iadc_write_reg(iadc, QPNP_FAST_AVG_CTL,
iadc->adc->amux_prop->fast_avg_setup);
if (rc < 0) {
pr_err("qpnp adc fast averaging configure error\n");
return rc;
}
if (!iadc->iadc_poll_eoc)
INIT_COMPLETION(iadc->adc->adc_rslt_completion);
rc = qpnp_iadc_enable(iadc, true);
if (rc)
return rc;
rc = qpnp_iadc_write_reg(iadc, QPNP_CONV_REQ, qpnp_iadc_conv_req);
if (rc) {
pr_err("qpnp adc read adc failed with %d\n", rc);
return rc;
}
if (iadc->iadc_poll_eoc) {
while (status1 != QPNP_STATUS1_EOC) {
rc = qpnp_iadc_read_reg(iadc, QPNP_STATUS1, &status1);
if (rc < 0)
return rc;
status1 &= QPNP_STATUS1_REQ_STS_EOC_MASK;
usleep_range(QPNP_ADC_CONV_TIME_MIN,
QPNP_ADC_CONV_TIME_MAX);
count++;
if (count > QPNP_ADC_ERR_COUNT) {
pr_err("retry error exceeded\n");
rc = qpnp_iadc_status_debug(iadc);
if (rc < 0)
pr_err("IADC status debug failed\n");
rc = -EINVAL;
return rc;
}
}
} else {
rc = wait_for_completion_timeout(
&iadc->adc->adc_rslt_completion,
QPNP_ADC_COMPLETION_TIMEOUT);
if (!rc) {
rc = qpnp_iadc_read_reg(iadc, QPNP_STATUS1, &status1);
if (rc < 0)
return rc;
status1 &= QPNP_STATUS1_REQ_STS_EOC_MASK;
if (status1 == QPNP_STATUS1_EOC)
pr_debug("End of conversion status set\n");
else {
rc = qpnp_iadc_status_debug(iadc);
if (rc < 0) {
pr_err("status debug failed %d\n", rc);
return rc;
}
return -EINVAL;
}
}
}
rc = qpnp_iadc_read_conversion_result(iadc, raw_code);
if (rc) {
pr_err("qpnp adc read adc failed with %d\n", rc);
return rc;
}
return 0;
}
#define IADC_CENTER 0xC000
#define IADC_READING_RESOLUTION_N 542535
#define IADC_READING_RESOLUTION_D 100000
static int32_t qpnp_convert_raw_offset_voltage(struct qpnp_iadc_chip *iadc)
{
s64 numerator;
if ((iadc->adc->calib.gain_raw - iadc->adc->calib.offset_raw) == 0) {
pr_err("raw offset errors! raw_gain:0x%x and raw_offset:0x%x\n",
iadc->adc->calib.gain_raw, iadc->adc->calib.offset_raw);
return -EINVAL;
}
numerator = iadc->adc->calib.offset_raw - IADC_CENTER;
numerator *= IADC_READING_RESOLUTION_N;
iadc->adc->calib.offset_uv = div_s64(numerator,
IADC_READING_RESOLUTION_D);
numerator = iadc->adc->calib.gain_raw - iadc->adc->calib.offset_raw;
numerator *= IADC_READING_RESOLUTION_N;
iadc->adc->calib.gain_uv = div_s64(numerator,
IADC_READING_RESOLUTION_D);
pr_debug("gain_uv:%d offset_uv:%d\n",
iadc->adc->calib.gain_uv, iadc->adc->calib.offset_uv);
return 0;
}
#define IADC_IDEAL_RAW_GAIN 3291
int32_t qpnp_iadc_calibrate_for_trim(struct qpnp_iadc_chip *iadc,
bool batfet_closed)
{
uint8_t rslt_lsb, rslt_msb;
int32_t rc = 0, version = 0;
uint16_t raw_data;
uint32_t mode_sel = 0;
bool iadc_offset_ch_batfet_check;
if (qpnp_iadc_is_valid(iadc) < 0)
return -EPROBE_DEFER;
mutex_lock(&iadc->adc->adc_lock);
if (iadc->iadc_poll_eoc) {
pr_debug("acquiring iadc eoc wakelock\n");
pm_stay_awake(iadc->dev);
}
iadc->adc->amux_prop->decimation = DECIMATION_TYPE1;
iadc->adc->amux_prop->fast_avg_setup = ADC_FAST_AVG_SAMPLE_1;
rc = qpnp_iadc_configure(iadc, GAIN_CALIBRATION_17P857MV,
&raw_data, mode_sel);
if (rc < 0) {
pr_err("qpnp adc result read failed with %d\n", rc);
goto fail;
}
iadc->adc->calib.gain_raw = raw_data;
/*
* there is a features on PM8941 in the BMS where if the batfet is
* opened the BMS reads from INTERNAL_RSENSE (channel 0) actually go to
* OFFSET_CALIBRATION_CSP_CSN (channel 5). Hence if batfet is opened
* we have to calibrate based on OFFSET_CALIBRATION_CSP_CSN even for
* internal rsense.
*/
version = qpnp_adc_get_revid_version(iadc->dev);
if ((version == QPNP_REV_ID_8941_3_1) ||
(version == QPNP_REV_ID_8941_3_0) ||
(version == QPNP_REV_ID_8941_2_0))
iadc_offset_ch_batfet_check = true;
else
iadc_offset_ch_batfet_check = false;
if ((iadc_offset_ch_batfet_check && !batfet_closed) ||
(iadc->external_rsense)) {
/* external offset calculation */
rc = qpnp_iadc_configure(iadc, OFFSET_CALIBRATION_CSP_CSN,
&raw_data, mode_sel);
if (rc < 0) {
pr_err("qpnp adc result read failed with %d\n", rc);
goto fail;
}
} else {
/* internal offset calculation */
rc = qpnp_iadc_configure(iadc, OFFSET_CALIBRATION_CSP2_CSN2,
&raw_data, mode_sel);
if (rc < 0) {
pr_err("qpnp adc result read failed with %d\n", rc);
goto fail;
}
}
iadc->adc->calib.offset_raw = raw_data;
if (rc < 0) {
pr_err("qpnp adc offset/gain calculation failed\n");
goto fail;
}
if (iadc->iadc_comp.revision_dig_major == QPNP_IADC_PM8026_2_REV2
&& iadc->iadc_comp.revision_ana_minor ==
QPNP_IADC_PM8026_2_REV3)
iadc->adc->calib.gain_raw =
iadc->adc->calib.offset_raw + IADC_IDEAL_RAW_GAIN;
pr_debug("raw gain:0x%x, raw offset:0x%x\n",
iadc->adc->calib.gain_raw, iadc->adc->calib.offset_raw);
rc = qpnp_convert_raw_offset_voltage(iadc);
if (rc < 0) {
pr_err("qpnp raw_voltage conversion failed\n");
goto fail;
}
rslt_msb = (raw_data & QPNP_RAW_CODE_16_BIT_MSB_MASK) >>
QPNP_BIT_SHIFT_8;
rslt_lsb = raw_data & QPNP_RAW_CODE_16_BIT_LSB_MASK;
pr_debug("trim values:lsb:0x%x and msb:0x%x\n", rslt_lsb, rslt_msb);
rc = qpnp_iadc_write_reg(iadc, QPNP_IADC_SEC_ACCESS,
QPNP_IADC_SEC_ACCESS_DATA);
if (rc < 0) {
pr_err("qpnp iadc configure error for sec access\n");
goto fail;
}
rc = qpnp_iadc_write_reg(iadc, QPNP_IADC_MSB_OFFSET,
rslt_msb);
if (rc < 0) {
pr_err("qpnp iadc configure error for MSB write\n");
goto fail;
}
rc = qpnp_iadc_write_reg(iadc, QPNP_IADC_SEC_ACCESS,
QPNP_IADC_SEC_ACCESS_DATA);
if (rc < 0) {
pr_err("qpnp iadc configure error for sec access\n");
goto fail;
}
rc = qpnp_iadc_write_reg(iadc, QPNP_IADC_LSB_OFFSET,
rslt_lsb);
if (rc < 0) {
pr_err("qpnp iadc configure error for LSB write\n");
goto fail;
}
fail:
if (iadc->iadc_poll_eoc) {
pr_debug("releasing iadc eoc wakelock\n");
pm_relax(iadc->dev);
}
mutex_unlock(&iadc->adc->adc_lock);
return rc;
}
EXPORT_SYMBOL(qpnp_iadc_calibrate_for_trim);
static void qpnp_iadc_work(struct work_struct *work)
{
struct qpnp_iadc_chip *iadc = container_of(work,
struct qpnp_iadc_chip, iadc_work.work);
int rc = 0;
if (!iadc->skip_auto_calibrations) {
rc = qpnp_iadc_calibrate_for_trim(iadc, true);
if (rc)
pr_debug("periodic IADC calibration failed\n");
}
schedule_delayed_work(&iadc->iadc_work,
round_jiffies_relative(msecs_to_jiffies
(QPNP_IADC_CALIB_SECONDS)));
return;
}
static int32_t qpnp_iadc_version_check(struct qpnp_iadc_chip *iadc)
{
uint8_t revision;
int rc;
rc = qpnp_iadc_read_reg(iadc, QPNP_IADC_REVISION2, &revision);
if (rc < 0) {
pr_err("qpnp adc result read failed with %d\n", rc);
return rc;
}
if (revision < QPNP_IADC_SUPPORTED_REVISION2) {
pr_err("IADC Version not supported\n");
return -EINVAL;
}
return 0;
}
struct qpnp_iadc_chip *qpnp_get_iadc(struct device *dev, const char *name)
{
struct qpnp_iadc_chip *iadc;
struct device_node *node = NULL;
char prop_name[QPNP_MAX_PROP_NAME_LEN];
snprintf(prop_name, QPNP_MAX_PROP_NAME_LEN, "qcom,%s-iadc", name);
node = of_parse_phandle(dev->of_node, prop_name, 0);
if (node == NULL)
return ERR_PTR(-ENODEV);
list_for_each_entry(iadc, &qpnp_iadc_device_list, list)
if (iadc->adc->spmi->dev.of_node == node)
return iadc;
return ERR_PTR(-EPROBE_DEFER);
}
EXPORT_SYMBOL(qpnp_get_iadc);
int32_t qpnp_iadc_get_rsense(struct qpnp_iadc_chip *iadc, int32_t *rsense)
{
uint8_t rslt_rsense = 0;
int32_t rc = 0, sign_bit = 0;
if (qpnp_iadc_is_valid(iadc) < 0)
return -EPROBE_DEFER;
if (iadc->external_rsense) {
*rsense = iadc->rsense;
} else if (iadc->default_internal_rsense) {
*rsense = iadc->rsense_workaround_value;
} else {
rc = qpnp_iadc_read_reg(iadc, QPNP_IADC_NOMINAL_RSENSE,
&rslt_rsense);
if (rc < 0) {
pr_err("qpnp adc rsense read failed with %d\n", rc);
return rc;
}
pr_debug("rsense:0%x\n", rslt_rsense);
if (rslt_rsense & QPNP_RSENSE_MSB_SIGN_CHECK)
sign_bit = 1;
rslt_rsense &= ~QPNP_RSENSE_MSB_SIGN_CHECK;
if (sign_bit)
*rsense = QPNP_IADC_INTERNAL_RSENSE_N_OHMS_FACTOR -
(rslt_rsense * QPNP_IADC_RSENSE_LSB_N_OHMS_PER_BIT);
else
*rsense = QPNP_IADC_INTERNAL_RSENSE_N_OHMS_FACTOR +
(rslt_rsense * QPNP_IADC_RSENSE_LSB_N_OHMS_PER_BIT);
}
pr_debug("rsense value is %d\n", *rsense);
if (*rsense == 0)
pr_err("incorrect rsens value:%d rslt_rsense:%d\n",
*rsense, rslt_rsense);
return rc;
}
EXPORT_SYMBOL(qpnp_iadc_get_rsense);
static int32_t qpnp_check_pmic_temp(struct qpnp_iadc_chip *iadc)
{
struct qpnp_vadc_result result_pmic_therm;
int64_t die_temp_offset;
int rc = 0;
rc = qpnp_vadc_read(iadc->vadc_dev, DIE_TEMP, &result_pmic_therm);
if (rc < 0)
return rc;
die_temp_offset = result_pmic_therm.physical -
iadc->die_temp;
if (die_temp_offset < 0)
die_temp_offset = -die_temp_offset;
if (die_temp_offset > QPNP_IADC_DIE_TEMP_CALIB_OFFSET) {
iadc->die_temp = result_pmic_therm.physical;
if (!iadc->skip_auto_calibrations) {
rc = qpnp_iadc_calibrate_for_trim(iadc, true);
if (rc)
pr_err("IADC calibration failed rc = %d\n", rc);
}
}
return rc;
}
int32_t qpnp_iadc_read(struct qpnp_iadc_chip *iadc,
enum qpnp_iadc_channels channel,
struct qpnp_iadc_result *result)
{
int32_t rc, rsense_n_ohms, sign = 0, num, mode_sel = 0;
int32_t rsense_u_ohms = 0;
int64_t result_current;
uint16_t raw_data;
int dt_index = 0;
if (qpnp_iadc_is_valid(iadc) < 0)
return -EPROBE_DEFER;
if ((iadc->adc->calib.gain_raw - iadc->adc->calib.offset_raw) == 0) {
pr_err("raw offset errors! run iadc calibration again\n");
return -EINVAL;
}
rc = qpnp_check_pmic_temp(iadc);
if (rc) {
pr_err("Error checking pmic therm temp\n");
return rc;
}
mutex_lock(&iadc->adc->adc_lock);
while (((enum qpnp_iadc_channels)
iadc->adc->adc_channels[dt_index].channel_num
!= channel) && (dt_index < iadc->max_channels_available))
dt_index++;
if (dt_index >= iadc->max_channels_available) {
pr_err("not a valid IADC channel\n");
rc = -EINVAL;
goto fail;
}
iadc->adc->amux_prop->decimation =
iadc->adc->adc_channels[dt_index].adc_decimation;
iadc->adc->amux_prop->fast_avg_setup =
iadc->adc->adc_channels[dt_index].fast_avg_setup;
if (iadc->iadc_poll_eoc) {
pr_debug("acquiring iadc eoc wakelock\n");
pm_stay_awake(iadc->dev);
}
rc = qpnp_iadc_configure(iadc, channel, &raw_data, mode_sel);
if (rc < 0) {
pr_err("qpnp adc result read failed with %d\n", rc);
goto fail;
}
rc = qpnp_iadc_get_rsense(iadc, &rsense_n_ohms);
pr_debug("current raw:0%x and rsense:%d\n",
raw_data, rsense_n_ohms);
rsense_u_ohms = rsense_n_ohms/1000;
num = raw_data - iadc->adc->calib.offset_raw;
if (num < 0) {
sign = 1;
num = -num;
}
result->result_uv = (num * QPNP_ADC_GAIN_NV)/
(iadc->adc->calib.gain_raw - iadc->adc->calib.offset_raw);
result_current = result->result_uv;
result_current *= QPNP_IADC_NANO_VOLTS_FACTOR;
/* Intentional fall through. Process the result w/o comp */
do_div(result_current, rsense_u_ohms);
if (sign) {
result->result_uv = -result->result_uv;
result_current = -result_current;
}
result_current *= -1;
rc = qpnp_iadc_comp_result(iadc, &result_current);
if (rc < 0)
pr_err("Error during compensating the IADC\n");
rc = 0;
result_current *= -1;
result->result_ua = (int32_t) result_current;
fail:
if (iadc->iadc_poll_eoc) {
pr_debug("releasing iadc eoc wakelock\n");
pm_relax(iadc->dev);
}
mutex_unlock(&iadc->adc->adc_lock);
return rc;
}
EXPORT_SYMBOL(qpnp_iadc_read);
int32_t qpnp_iadc_get_gain_and_offset(struct qpnp_iadc_chip *iadc,
struct qpnp_iadc_calib *result)
{
int rc;
if (qpnp_iadc_is_valid(iadc) < 0)
return -EPROBE_DEFER;
rc = qpnp_check_pmic_temp(iadc);
if (rc) {
pr_err("Error checking pmic therm temp\n");
return rc;
}
mutex_lock(&iadc->adc->adc_lock);
result->gain_raw = iadc->adc->calib.gain_raw;
result->ideal_gain_nv = QPNP_ADC_GAIN_NV;
result->gain_uv = iadc->adc->calib.gain_uv;
result->offset_raw = iadc->adc->calib.offset_raw;
result->ideal_offset_uv =
QPNP_OFFSET_CALIBRATION_SHORT_CADC_LEADS_IDEAL;
result->offset_uv = iadc->adc->calib.offset_uv;
pr_debug("raw gain:0%x, raw offset:0%x\n",
result->gain_raw, result->offset_raw);
pr_debug("gain_uv:%d offset_uv:%d\n",
result->gain_uv, result->offset_uv);
mutex_unlock(&iadc->adc->adc_lock);
return 0;
}
EXPORT_SYMBOL(qpnp_iadc_get_gain_and_offset);
int qpnp_iadc_skip_calibration(struct qpnp_iadc_chip *iadc)
{
iadc->skip_auto_calibrations = true;
return 0;
}
EXPORT_SYMBOL(qpnp_iadc_skip_calibration);
int qpnp_iadc_resume_calibration(struct qpnp_iadc_chip *iadc)
{
iadc->skip_auto_calibrations = false;
return 0;
}
EXPORT_SYMBOL(qpnp_iadc_resume_calibration);
int32_t qpnp_iadc_vadc_sync_read(struct qpnp_iadc_chip *iadc,
enum qpnp_iadc_channels i_channel, struct qpnp_iadc_result *i_result,
enum qpnp_vadc_channels v_channel, struct qpnp_vadc_result *v_result)
{
int rc = 0, mode_sel = 0, num = 0, rsense_n_ohms = 0, sign = 0;
int dt_index = 0;
uint16_t raw_data;
int32_t rsense_u_ohms = 0;
int64_t result_current;
if (qpnp_iadc_is_valid(iadc) < 0)
return -EPROBE_DEFER;
if ((iadc->adc->calib.gain_raw - iadc->adc->calib.offset_raw) == 0) {
pr_err("raw offset errors! run iadc calibration again\n");
return -EINVAL;
}
mutex_lock(&iadc->adc->adc_lock);
if (iadc->iadc_poll_eoc) {
pr_debug("acquiring iadc eoc wakelock\n");
pm_stay_awake(iadc->dev);
}
iadc->iadc_mode_sel = true;
rc = qpnp_vadc_iadc_sync_request(iadc->vadc_dev, v_channel);
if (rc) {
pr_err("Configuring VADC failed\n");
goto fail;
}
while (((enum qpnp_iadc_channels)
iadc->adc->adc_channels[dt_index].channel_num
!= i_channel) && (dt_index < iadc->max_channels_available))
dt_index++;
if (dt_index >= iadc->max_channels_available) {
pr_err("not a valid IADC channel\n");
rc = -EINVAL;
goto fail;
}
iadc->adc->amux_prop->decimation =
iadc->adc->adc_channels[dt_index].adc_decimation;
iadc->adc->amux_prop->fast_avg_setup =
iadc->adc->adc_channels[dt_index].fast_avg_setup;
rc = qpnp_iadc_configure(iadc, i_channel, &raw_data, mode_sel);
if (rc < 0) {
pr_err("qpnp adc result read failed with %d\n", rc);
goto fail_release_vadc;
}
rc = qpnp_iadc_get_rsense(iadc, &rsense_n_ohms);
pr_debug("current raw:0%x and rsense:%d\n",
raw_data, rsense_n_ohms);
rsense_u_ohms = rsense_n_ohms/1000;
num = raw_data - iadc->adc->calib.offset_raw;
if (num < 0) {
sign = 1;
num = -num;
}
i_result->result_uv = (num * QPNP_ADC_GAIN_NV)/
(iadc->adc->calib.gain_raw - iadc->adc->calib.offset_raw);
result_current = i_result->result_uv;
result_current *= QPNP_IADC_NANO_VOLTS_FACTOR;
/* Intentional fall through. Process the result w/o comp */
if (!rsense_u_ohms) {
pr_err("rsense error=%d\n", rsense_u_ohms);
goto fail_release_vadc;
}
do_div(result_current, rsense_u_ohms);
if (sign) {
i_result->result_uv = -i_result->result_uv;
result_current = -result_current;
}
result_current *= -1;
rc = qpnp_iadc_comp_result(iadc, &result_current);
if (rc < 0)
pr_err("Error during compensating the IADC\n");
rc = 0;
result_current *= -1;
i_result->result_ua = (int32_t) result_current;
fail_release_vadc:
rc = qpnp_vadc_iadc_sync_complete_request(iadc->vadc_dev, v_channel,
v_result);
if (rc)
pr_err("Releasing VADC failed\n");
fail:
iadc->iadc_mode_sel = false;
if (iadc->iadc_poll_eoc) {
pr_debug("releasing iadc eoc wakelock\n");
pm_relax(iadc->dev);
}
mutex_unlock(&iadc->adc->adc_lock);
return rc;
}
EXPORT_SYMBOL(qpnp_iadc_vadc_sync_read);
static ssize_t qpnp_iadc_show(struct device *dev,
struct device_attribute *devattr, char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct qpnp_iadc_chip *iadc = dev_get_drvdata(dev);
struct qpnp_iadc_result result;
int rc = -1;
rc = qpnp_iadc_read(iadc, attr->index, &result);
if (rc)
return 0;
return snprintf(buf, QPNP_ADC_HWMON_NAME_LENGTH,
"Result:%d\n", result.result_ua);
}
static struct sensor_device_attribute qpnp_adc_attr =
SENSOR_ATTR(NULL, S_IRUGO, qpnp_iadc_show, NULL, 0);
static int32_t qpnp_iadc_init_hwmon(struct qpnp_iadc_chip *iadc,
struct spmi_device *spmi)
{
struct device_node *child;
struct device_node *node = spmi->dev.of_node;
int rc = 0, i = 0, channel;
for_each_child_of_node(node, child) {
channel = iadc->adc->adc_channels[i].channel_num;
qpnp_adc_attr.index = iadc->adc->adc_channels[i].channel_num;
qpnp_adc_attr.dev_attr.attr.name =
iadc->adc->adc_channels[i].name;
memcpy(&iadc->sens_attr[i], &qpnp_adc_attr,
sizeof(qpnp_adc_attr));
sysfs_attr_init(&iadc->sens_attr[i].dev_attr.attr);
rc = device_create_file(&spmi->dev,
&iadc->sens_attr[i].dev_attr);
if (rc) {
dev_err(&spmi->dev,
"device_create_file failed for dev %s\n",
iadc->adc->adc_channels[i].name);
goto hwmon_err_sens;
}
i++;
}
return 0;
hwmon_err_sens:
pr_err("Init HWMON failed for qpnp_iadc with %d\n", rc);
return rc;
}
static int qpnp_iadc_probe(struct spmi_device *spmi)
{
struct qpnp_iadc_chip *iadc;
struct qpnp_adc_drv *adc_qpnp;
struct device_node *node = spmi->dev.of_node;
struct device_node *child;
struct resource *res;
int rc, count_adc_channel_list = 0, i = 0;
for_each_child_of_node(node, child)
count_adc_channel_list++;
if (!count_adc_channel_list) {
pr_err("No channel listing\n");
return -EINVAL;
}
iadc = devm_kzalloc(&spmi->dev, sizeof(struct qpnp_iadc_chip) +
(sizeof(struct sensor_device_attribute) *
count_adc_channel_list), GFP_KERNEL);
if (!iadc) {
dev_err(&spmi->dev, "Unable to allocate memory\n");
return -ENOMEM;
}
adc_qpnp = devm_kzalloc(&spmi->dev, sizeof(struct qpnp_adc_drv),
GFP_KERNEL);
if (!adc_qpnp) {
dev_err(&spmi->dev, "Unable to allocate memory\n");
return -ENOMEM;
}
iadc->dev = &(spmi->dev);
iadc->adc = adc_qpnp;
rc = qpnp_adc_get_devicetree_data(spmi, iadc->adc);
if (rc) {
dev_err(&spmi->dev, "failed to read device tree\n");
return rc;
}
res = spmi_get_resource_byname(spmi, NULL, IORESOURCE_MEM,
"batt-id-trim-cnst-rds");
if (!res) {
dev_err(&spmi->dev, "failed to read batt_id trim register\n");
return -EINVAL;
}
iadc->batt_id_trim_cnst_rds = res->start;
rc = of_property_read_u32(node, "qcom,use-default-rds-trim",
&iadc->rds_trim_default_type);
if (rc)
pr_debug("No trim workaround needed\n");
else {
pr_debug("Use internal RDS trim workaround\n");
iadc->rds_trim_default_check = true;
}
iadc->vadc_dev = qpnp_get_vadc(&spmi->dev, "iadc");
if (IS_ERR(iadc->vadc_dev)) {
rc = PTR_ERR(iadc->vadc_dev);
if (rc != -EPROBE_DEFER)
pr_err("vadc property missing, rc=%d\n", rc);
return rc;
}
mutex_init(&iadc->adc->adc_lock);
rc = of_property_read_u32(node, "qcom,rsense",
&iadc->rsense);
if (rc)
pr_debug("Defaulting to internal rsense\n");
else {
pr_debug("Use external rsense\n");
iadc->external_rsense = true;
}
iadc->iadc_poll_eoc = of_property_read_bool(node,
"qcom,iadc-poll-eoc");
if (!iadc->iadc_poll_eoc) {
rc = devm_request_irq(&spmi->dev, iadc->adc->adc_irq_eoc,
qpnp_iadc_isr, IRQF_TRIGGER_RISING,
"qpnp_iadc_interrupt", iadc);
if (rc) {
dev_err(&spmi->dev, "failed to request adc irq\n");
return rc;
} else
enable_irq_wake(iadc->adc->adc_irq_eoc);
}
rc = qpnp_iadc_init_hwmon(iadc, spmi);
if (rc) {
dev_err(&spmi->dev, "failed to initialize qpnp hwmon adc\n");
return rc;
}
iadc->iadc_hwmon = hwmon_device_register(&iadc->adc->spmi->dev);
rc = qpnp_iadc_version_check(iadc);
if (rc) {
dev_err(&spmi->dev, "IADC version not supported\n");
goto fail;
}
iadc->max_channels_available = count_adc_channel_list;
INIT_WORK(&iadc->trigger_completion_work, qpnp_iadc_trigger_completion);
INIT_DELAYED_WORK(&iadc->iadc_work, qpnp_iadc_work);
rc = qpnp_iadc_comp_info(iadc);
if (rc) {
dev_err(&spmi->dev, "abstracting IADC comp info failed!\n");
goto fail;
}
rc = qpnp_iadc_rds_trim_update_check(iadc);
if (rc) {
dev_err(&spmi->dev, "Rds trim update failed!\n");
goto fail;
}
dev_set_drvdata(&spmi->dev, iadc);
list_add(&iadc->list, &qpnp_iadc_device_list);
rc = qpnp_iadc_calibrate_for_trim(iadc, true);
if (rc)
dev_err(&spmi->dev, "failed to calibrate for USR trim\n");
if (iadc->iadc_poll_eoc)
device_init_wakeup(iadc->dev, 1);
schedule_delayed_work(&iadc->iadc_work,
round_jiffies_relative(msecs_to_jiffies
(QPNP_IADC_CALIB_SECONDS)));
return 0;
fail:
for_each_child_of_node(node, child) {
device_remove_file(&spmi->dev,
&iadc->sens_attr[i].dev_attr);
i++;
}
hwmon_device_unregister(iadc->iadc_hwmon);
return rc;
}
static int qpnp_iadc_remove(struct spmi_device *spmi)
{
struct qpnp_iadc_chip *iadc = dev_get_drvdata(&spmi->dev);
struct device_node *node = spmi->dev.of_node;
struct device_node *child;
int i = 0;
cancel_delayed_work(&iadc->iadc_work);
for_each_child_of_node(node, child) {
device_remove_file(&spmi->dev,
&iadc->sens_attr[i].dev_attr);
i++;
}
hwmon_device_unregister(iadc->iadc_hwmon);
if (iadc->iadc_poll_eoc)
pm_relax(iadc->dev);
dev_set_drvdata(&spmi->dev, NULL);
return 0;
}
static const struct of_device_id qpnp_iadc_match_table[] = {
{ .compatible = "qcom,qpnp-iadc",
},
{}
};
static struct spmi_driver qpnp_iadc_driver = {
.driver = {
.name = "qcom,qpnp-iadc",
.of_match_table = qpnp_iadc_match_table,
},
.probe = qpnp_iadc_probe,
.remove = qpnp_iadc_remove,
};
static int __init qpnp_iadc_init(void)
{
return spmi_driver_register(&qpnp_iadc_driver);
}
module_init(qpnp_iadc_init);
static void __exit qpnp_iadc_exit(void)
{
spmi_driver_unregister(&qpnp_iadc_driver);
}
module_exit(qpnp_iadc_exit);
MODULE_DESCRIPTION("QPNP PMIC current ADC driver");
MODULE_LICENSE("GPL v2");
| gpl-2.0 |
virtuous/kernel-pyramid-v1 | arch/s390/kernel/module.c | 745 | 13181 | /*
* arch/s390/kernel/module.c - Kernel module help for s390.
*
* S390 version
* Copyright (C) 2002, 2003 IBM Deutschland Entwicklung GmbH,
* IBM Corporation
* Author(s): Arnd Bergmann (arndb@de.ibm.com)
* Martin Schwidefsky (schwidefsky@de.ibm.com)
*
* based on i386 version
* Copyright (C) 2001 Rusty Russell.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/module.h>
#include <linux/elf.h>
#include <linux/vmalloc.h>
#include <linux/fs.h>
#include <linux/string.h>
#include <linux/kernel.h>
#include <linux/moduleloader.h>
#include <linux/bug.h>
#if 0
#define DEBUGP printk
#else
#define DEBUGP(fmt , ...)
#endif
#ifndef CONFIG_64BIT
#define PLT_ENTRY_SIZE 12
#else /* CONFIG_64BIT */
#define PLT_ENTRY_SIZE 20
#endif /* CONFIG_64BIT */
void *module_alloc(unsigned long size)
{
if (size == 0)
return NULL;
return vmalloc(size);
}
/* Free memory returned from module_alloc */
void module_free(struct module *mod, void *module_region)
{
if (mod) {
vfree(mod->arch.syminfo);
mod->arch.syminfo = NULL;
}
vfree(module_region);
}
static void
check_rela(Elf_Rela *rela, struct module *me)
{
struct mod_arch_syminfo *info;
info = me->arch.syminfo + ELF_R_SYM (rela->r_info);
switch (ELF_R_TYPE (rela->r_info)) {
case R_390_GOT12: /* 12 bit GOT offset. */
case R_390_GOT16: /* 16 bit GOT offset. */
case R_390_GOT20: /* 20 bit GOT offset. */
case R_390_GOT32: /* 32 bit GOT offset. */
case R_390_GOT64: /* 64 bit GOT offset. */
case R_390_GOTENT: /* 32 bit PC rel. to GOT entry shifted by 1. */
case R_390_GOTPLT12: /* 12 bit offset to jump slot. */
case R_390_GOTPLT16: /* 16 bit offset to jump slot. */
case R_390_GOTPLT20: /* 20 bit offset to jump slot. */
case R_390_GOTPLT32: /* 32 bit offset to jump slot. */
case R_390_GOTPLT64: /* 64 bit offset to jump slot. */
case R_390_GOTPLTENT: /* 32 bit rel. offset to jump slot >> 1. */
if (info->got_offset == -1UL) {
info->got_offset = me->arch.got_size;
me->arch.got_size += sizeof(void*);
}
break;
case R_390_PLT16DBL: /* 16 bit PC rel. PLT shifted by 1. */
case R_390_PLT32DBL: /* 32 bit PC rel. PLT shifted by 1. */
case R_390_PLT32: /* 32 bit PC relative PLT address. */
case R_390_PLT64: /* 64 bit PC relative PLT address. */
case R_390_PLTOFF16: /* 16 bit offset from GOT to PLT. */
case R_390_PLTOFF32: /* 32 bit offset from GOT to PLT. */
case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */
if (info->plt_offset == -1UL) {
info->plt_offset = me->arch.plt_size;
me->arch.plt_size += PLT_ENTRY_SIZE;
}
break;
case R_390_COPY:
case R_390_GLOB_DAT:
case R_390_JMP_SLOT:
case R_390_RELATIVE:
/* Only needed if we want to support loading of
modules linked with -shared. */
break;
}
}
/*
* Account for GOT and PLT relocations. We can't add sections for
* got and plt but we can increase the core module size.
*/
int
module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
char *secstrings, struct module *me)
{
Elf_Shdr *symtab;
Elf_Sym *symbols;
Elf_Rela *rela;
char *strings;
int nrela, i, j;
/* Find symbol table and string table. */
symtab = NULL;
for (i = 0; i < hdr->e_shnum; i++)
switch (sechdrs[i].sh_type) {
case SHT_SYMTAB:
symtab = sechdrs + i;
break;
}
if (!symtab) {
printk(KERN_ERR "module %s: no symbol table\n", me->name);
return -ENOEXEC;
}
/* Allocate one syminfo structure per symbol. */
me->arch.nsyms = symtab->sh_size / sizeof(Elf_Sym);
me->arch.syminfo = vmalloc(me->arch.nsyms *
sizeof(struct mod_arch_syminfo));
if (!me->arch.syminfo)
return -ENOMEM;
symbols = (void *) hdr + symtab->sh_offset;
strings = (void *) hdr + sechdrs[symtab->sh_link].sh_offset;
for (i = 0; i < me->arch.nsyms; i++) {
if (symbols[i].st_shndx == SHN_UNDEF &&
strcmp(strings + symbols[i].st_name,
"_GLOBAL_OFFSET_TABLE_") == 0)
/* "Define" it as absolute. */
symbols[i].st_shndx = SHN_ABS;
me->arch.syminfo[i].got_offset = -1UL;
me->arch.syminfo[i].plt_offset = -1UL;
me->arch.syminfo[i].got_initialized = 0;
me->arch.syminfo[i].plt_initialized = 0;
}
/* Search for got/plt relocations. */
me->arch.got_size = me->arch.plt_size = 0;
for (i = 0; i < hdr->e_shnum; i++) {
if (sechdrs[i].sh_type != SHT_RELA)
continue;
nrela = sechdrs[i].sh_size / sizeof(Elf_Rela);
rela = (void *) hdr + sechdrs[i].sh_offset;
for (j = 0; j < nrela; j++)
check_rela(rela + j, me);
}
/* Increase core size by size of got & plt and set start
offsets for got and plt. */
me->core_size = ALIGN(me->core_size, 4);
me->arch.got_offset = me->core_size;
me->core_size += me->arch.got_size;
me->arch.plt_offset = me->core_size;
me->core_size += me->arch.plt_size;
return 0;
}
int
apply_relocate(Elf_Shdr *sechdrs, const char *strtab, unsigned int symindex,
unsigned int relsec, struct module *me)
{
printk(KERN_ERR "module %s: RELOCATION unsupported\n",
me->name);
return -ENOEXEC;
}
static int
apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
struct module *me)
{
struct mod_arch_syminfo *info;
Elf_Addr loc, val;
int r_type, r_sym;
/* This is where to make the change */
loc = base + rela->r_offset;
/* This is the symbol it is referring to. Note that all
undefined symbols have been resolved. */
r_sym = ELF_R_SYM(rela->r_info);
r_type = ELF_R_TYPE(rela->r_info);
info = me->arch.syminfo + r_sym;
val = symtab[r_sym].st_value;
switch (r_type) {
case R_390_8: /* Direct 8 bit. */
case R_390_12: /* Direct 12 bit. */
case R_390_16: /* Direct 16 bit. */
case R_390_20: /* Direct 20 bit. */
case R_390_32: /* Direct 32 bit. */
case R_390_64: /* Direct 64 bit. */
val += rela->r_addend;
if (r_type == R_390_8)
*(unsigned char *) loc = val;
else if (r_type == R_390_12)
*(unsigned short *) loc = (val & 0xfff) |
(*(unsigned short *) loc & 0xf000);
else if (r_type == R_390_16)
*(unsigned short *) loc = val;
else if (r_type == R_390_20)
*(unsigned int *) loc =
(*(unsigned int *) loc & 0xf00000ff) |
(val & 0xfff) << 16 | (val & 0xff000) >> 4;
else if (r_type == R_390_32)
*(unsigned int *) loc = val;
else if (r_type == R_390_64)
*(unsigned long *) loc = val;
break;
case R_390_PC16: /* PC relative 16 bit. */
case R_390_PC16DBL: /* PC relative 16 bit shifted by 1. */
case R_390_PC32DBL: /* PC relative 32 bit shifted by 1. */
case R_390_PC32: /* PC relative 32 bit. */
case R_390_PC64: /* PC relative 64 bit. */
val += rela->r_addend - loc;
if (r_type == R_390_PC16)
*(unsigned short *) loc = val;
else if (r_type == R_390_PC16DBL)
*(unsigned short *) loc = val >> 1;
else if (r_type == R_390_PC32DBL)
*(unsigned int *) loc = val >> 1;
else if (r_type == R_390_PC32)
*(unsigned int *) loc = val;
else if (r_type == R_390_PC64)
*(unsigned long *) loc = val;
break;
case R_390_GOT12: /* 12 bit GOT offset. */
case R_390_GOT16: /* 16 bit GOT offset. */
case R_390_GOT20: /* 20 bit GOT offset. */
case R_390_GOT32: /* 32 bit GOT offset. */
case R_390_GOT64: /* 64 bit GOT offset. */
case R_390_GOTENT: /* 32 bit PC rel. to GOT entry shifted by 1. */
case R_390_GOTPLT12: /* 12 bit offset to jump slot. */
case R_390_GOTPLT20: /* 20 bit offset to jump slot. */
case R_390_GOTPLT16: /* 16 bit offset to jump slot. */
case R_390_GOTPLT32: /* 32 bit offset to jump slot. */
case R_390_GOTPLT64: /* 64 bit offset to jump slot. */
case R_390_GOTPLTENT: /* 32 bit rel. offset to jump slot >> 1. */
if (info->got_initialized == 0) {
Elf_Addr *gotent;
gotent = me->module_core + me->arch.got_offset +
info->got_offset;
*gotent = val;
info->got_initialized = 1;
}
val = info->got_offset + rela->r_addend;
if (r_type == R_390_GOT12 ||
r_type == R_390_GOTPLT12)
*(unsigned short *) loc = (val & 0xfff) |
(*(unsigned short *) loc & 0xf000);
else if (r_type == R_390_GOT16 ||
r_type == R_390_GOTPLT16)
*(unsigned short *) loc = val;
else if (r_type == R_390_GOT20 ||
r_type == R_390_GOTPLT20)
*(unsigned int *) loc =
(*(unsigned int *) loc & 0xf00000ff) |
(val & 0xfff) << 16 | (val & 0xff000) >> 4;
else if (r_type == R_390_GOT32 ||
r_type == R_390_GOTPLT32)
*(unsigned int *) loc = val;
else if (r_type == R_390_GOTENT ||
r_type == R_390_GOTPLTENT)
*(unsigned int *) loc =
(val + (Elf_Addr) me->module_core - loc) >> 1;
else if (r_type == R_390_GOT64 ||
r_type == R_390_GOTPLT64)
*(unsigned long *) loc = val;
break;
case R_390_PLT16DBL: /* 16 bit PC rel. PLT shifted by 1. */
case R_390_PLT32DBL: /* 32 bit PC rel. PLT shifted by 1. */
case R_390_PLT32: /* 32 bit PC relative PLT address. */
case R_390_PLT64: /* 64 bit PC relative PLT address. */
case R_390_PLTOFF16: /* 16 bit offset from GOT to PLT. */
case R_390_PLTOFF32: /* 32 bit offset from GOT to PLT. */
case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */
if (info->plt_initialized == 0) {
unsigned int *ip;
ip = me->module_core + me->arch.plt_offset +
info->plt_offset;
#ifndef CONFIG_64BIT
ip[0] = 0x0d105810; /* basr 1,0; l 1,6(1); br 1 */
ip[1] = 0x100607f1;
ip[2] = val;
#else /* CONFIG_64BIT */
ip[0] = 0x0d10e310; /* basr 1,0; lg 1,10(1); br 1 */
ip[1] = 0x100a0004;
ip[2] = 0x07f10000;
ip[3] = (unsigned int) (val >> 32);
ip[4] = (unsigned int) val;
#endif /* CONFIG_64BIT */
info->plt_initialized = 1;
}
if (r_type == R_390_PLTOFF16 ||
r_type == R_390_PLTOFF32 ||
r_type == R_390_PLTOFF64)
val = me->arch.plt_offset - me->arch.got_offset +
info->plt_offset + rela->r_addend;
else {
if (!((r_type == R_390_PLT16DBL &&
val - loc + 0xffffUL < 0x1ffffeUL) ||
(r_type == R_390_PLT32DBL &&
val - loc + 0xffffffffULL < 0x1fffffffeULL)))
val = (Elf_Addr) me->module_core +
me->arch.plt_offset +
info->plt_offset;
val += rela->r_addend - loc;
}
if (r_type == R_390_PLT16DBL)
*(unsigned short *) loc = val >> 1;
else if (r_type == R_390_PLTOFF16)
*(unsigned short *) loc = val;
else if (r_type == R_390_PLT32DBL)
*(unsigned int *) loc = val >> 1;
else if (r_type == R_390_PLT32 ||
r_type == R_390_PLTOFF32)
*(unsigned int *) loc = val;
else if (r_type == R_390_PLT64 ||
r_type == R_390_PLTOFF64)
*(unsigned long *) loc = val;
break;
case R_390_GOTOFF16: /* 16 bit offset to GOT. */
case R_390_GOTOFF32: /* 32 bit offset to GOT. */
case R_390_GOTOFF64: /* 64 bit offset to GOT. */
val = val + rela->r_addend -
((Elf_Addr) me->module_core + me->arch.got_offset);
if (r_type == R_390_GOTOFF16)
*(unsigned short *) loc = val;
else if (r_type == R_390_GOTOFF32)
*(unsigned int *) loc = val;
else if (r_type == R_390_GOTOFF64)
*(unsigned long *) loc = val;
break;
case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */
case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */
val = (Elf_Addr) me->module_core + me->arch.got_offset +
rela->r_addend - loc;
if (r_type == R_390_GOTPC)
*(unsigned int *) loc = val;
else if (r_type == R_390_GOTPCDBL)
*(unsigned int *) loc = val >> 1;
break;
case R_390_COPY:
case R_390_GLOB_DAT: /* Create GOT entry. */
case R_390_JMP_SLOT: /* Create PLT entry. */
case R_390_RELATIVE: /* Adjust by program base. */
/* Only needed if we want to support loading of
modules linked with -shared. */
break;
default:
printk(KERN_ERR "module %s: Unknown relocation: %u\n",
me->name, r_type);
return -ENOEXEC;
}
return 0;
}
int
apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab,
unsigned int symindex, unsigned int relsec,
struct module *me)
{
Elf_Addr base;
Elf_Sym *symtab;
Elf_Rela *rela;
unsigned long i, n;
int rc;
DEBUGP("Applying relocate section %u to %u\n",
relsec, sechdrs[relsec].sh_info);
base = sechdrs[sechdrs[relsec].sh_info].sh_addr;
symtab = (Elf_Sym *) sechdrs[symindex].sh_addr;
rela = (Elf_Rela *) sechdrs[relsec].sh_addr;
n = sechdrs[relsec].sh_size / sizeof(Elf_Rela);
for (i = 0; i < n; i++, rela++) {
rc = apply_rela(rela, base, symtab, me);
if (rc)
return rc;
}
return 0;
}
int module_finalize(const Elf_Ehdr *hdr,
const Elf_Shdr *sechdrs,
struct module *me)
{
vfree(me->arch.syminfo);
me->arch.syminfo = NULL;
return module_bug_finalize(hdr, sechdrs, me);
}
void module_arch_cleanup(struct module *mod)
{
module_bug_cleanup(mod);
}
| gpl-2.0 |
bekriebel/android_kernel_omap | drivers/staging/easycap/easycap_sound.c | 2537 | 24615 | /******************************************************************************
* *
* easycap_sound.c *
* *
* Audio driver for EasyCAP USB2.0 Video Capture Device DC60 *
* *
* *
******************************************************************************/
/*
*
* Copyright (C) 2010 R.M. Thomas <rmthomas@sciolus.org>
*
*
* This is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* The software is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this software; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
/*****************************************************************************/
#include "easycap.h"
#ifndef CONFIG_EASYCAP_OSS
/*--------------------------------------------------------------------------*/
/*
* PARAMETERS USED WHEN REGISTERING THE AUDIO INTERFACE
*/
/*--------------------------------------------------------------------------*/
static const struct snd_pcm_hardware alsa_hardware = {
.info = SNDRV_PCM_INFO_BLOCK_TRANSFER |
SNDRV_PCM_INFO_MMAP |
SNDRV_PCM_INFO_INTERLEAVED |
SNDRV_PCM_INFO_MMAP_VALID,
.formats = SNDRV_PCM_FMTBIT_S16_LE,
.rates = SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000,
.rate_min = 32000,
.rate_max = 48000,
.channels_min = 2,
.channels_max = 2,
.buffer_bytes_max = PAGE_SIZE *
PAGES_PER_AUDIO_FRAGMENT *
AUDIO_FRAGMENT_MANY,
.period_bytes_min = PAGE_SIZE * PAGES_PER_AUDIO_FRAGMENT,
.period_bytes_max = PAGE_SIZE * PAGES_PER_AUDIO_FRAGMENT * 2,
.periods_min = AUDIO_FRAGMENT_MANY,
.periods_max = AUDIO_FRAGMENT_MANY * 2,
};
/*****************************************************************************/
/*---------------------------------------------------------------------------*/
/*
* ON COMPLETION OF AN AUDIO URB ITS DATA IS COPIED TO THE DAM BUFFER
* PROVIDED peasycap->audio_idle IS ZERO. REGARDLESS OF THIS BEING TRUE,
* IT IS RESUBMITTED PROVIDED peasycap->audio_isoc_streaming IS NOT ZERO.
*/
/*---------------------------------------------------------------------------*/
void
easycap_alsa_complete(struct urb *purb)
{
struct easycap *peasycap;
struct snd_pcm_substream *pss;
struct snd_pcm_runtime *prt;
int dma_bytes, fragment_bytes;
int isfragment;
u8 *p1, *p2;
s16 tmp;
int i, j, more, much, rc;
#ifdef UPSAMPLE
int k;
s16 oldaudio, newaudio, delta;
#endif /*UPSAMPLE*/
JOT(16, "\n");
if (!purb) {
SAY("ERROR: purb is NULL\n");
return;
}
peasycap = purb->context;
if (!peasycap) {
SAY("ERROR: peasycap is NULL\n");
return;
}
if (memcmp(&peasycap->telltale[0], TELLTALE, strlen(TELLTALE))) {
SAY("ERROR: bad peasycap\n");
return;
}
much = 0;
if (peasycap->audio_idle) {
JOM(16, "%i=audio_idle %i=audio_isoc_streaming\n",
peasycap->audio_idle, peasycap->audio_isoc_streaming);
if (peasycap->audio_isoc_streaming)
goto resubmit;
}
/*---------------------------------------------------------------------------*/
pss = peasycap->psubstream;
if (!pss)
goto resubmit;
prt = pss->runtime;
if (!prt)
goto resubmit;
dma_bytes = (int)prt->dma_bytes;
if (0 == dma_bytes)
goto resubmit;
fragment_bytes = 4 * ((int)prt->period_size);
if (0 == fragment_bytes)
goto resubmit;
/* -------------------------------------------------------------------------*/
if (purb->status) {
if ((-ESHUTDOWN == purb->status) || (-ENOENT == purb->status)) {
JOM(16, "urb status -ESHUTDOWN or -ENOENT\n");
return;
}
SAM("ERROR: non-zero urb status: -%s: %d\n",
strerror(purb->status), purb->status);
goto resubmit;
}
/*---------------------------------------------------------------------------*/
/*
* PROCEED HERE WHEN NO ERROR
*/
/*---------------------------------------------------------------------------*/
#ifdef UPSAMPLE
oldaudio = peasycap->oldaudio;
#endif /*UPSAMPLE*/
for (i = 0; i < purb->number_of_packets; i++) {
if (purb->iso_frame_desc[i].status < 0) {
SAM("-%s: %d\n",
strerror(purb->iso_frame_desc[i].status),
purb->iso_frame_desc[i].status);
}
if (purb->iso_frame_desc[i].status) {
JOM(12, "discarding audio samples because "
"%i=purb->iso_frame_desc[i].status\n",
purb->iso_frame_desc[i].status);
continue;
}
more = purb->iso_frame_desc[i].actual_length;
if (more == 0) {
peasycap->audio_mt++;
continue;
}
if (0 > more) {
SAM("MISTAKE: more is negative\n");
return;
}
if (peasycap->audio_mt) {
JOM(12, "%4i empty audio urb frames\n",
peasycap->audio_mt);
peasycap->audio_mt = 0;
}
p1 = (u8 *)(purb->transfer_buffer +
purb->iso_frame_desc[i].offset);
/*
* COPY more BYTES FROM ISOC BUFFER
* TO THE DMA BUFFER, CONVERTING
* 8-BIT MONO TO 16-BIT SIGNED
* LITTLE-ENDIAN SAMPLES IF NECESSARY
*/
while (more) {
much = dma_bytes - peasycap->dma_fill;
if (0 > much) {
SAM("MISTAKE: much is negative\n");
return;
}
if (0 == much) {
peasycap->dma_fill = 0;
peasycap->dma_next = fragment_bytes;
JOM(8, "wrapped dma buffer\n");
}
if (!peasycap->microphone) {
if (much > more)
much = more;
memcpy(prt->dma_area + peasycap->dma_fill,
p1, much);
p1 += much;
more -= much;
} else {
#ifdef UPSAMPLE
if (much % 16)
JOM(8, "MISTAKE? much"
" is not divisible by 16\n");
if (much > (16 * more))
much = 16 * more;
p2 = (u8 *)(prt->dma_area + peasycap->dma_fill);
for (j = 0; j < (much / 16); j++) {
newaudio = ((int) *p1) - 128;
newaudio = 128 * newaudio;
delta = (newaudio - oldaudio) / 4;
tmp = oldaudio + delta;
for (k = 0; k < 4; k++) {
*p2 = (0x00FF & tmp);
*(p2 + 1) = (0xFF00 & tmp) >> 8;
p2 += 2;
*p2 = (0x00FF & tmp);
*(p2 + 1) = (0xFF00 & tmp) >> 8;
p2 += 2;
tmp += delta;
}
p1++;
more--;
oldaudio = tmp;
}
#else /*!UPSAMPLE*/
if (much > (2 * more))
much = 2 * more;
p2 = (u8 *)(prt->dma_area + peasycap->dma_fill);
for (j = 0; j < (much / 2); j++) {
tmp = ((int) *p1) - 128;
tmp = 128 * tmp;
*p2 = (0x00FF & tmp);
*(p2 + 1) = (0xFF00 & tmp) >> 8;
p1++;
p2 += 2;
more--;
}
#endif /*UPSAMPLE*/
}
peasycap->dma_fill += much;
if (peasycap->dma_fill >= peasycap->dma_next) {
isfragment = peasycap->dma_fill / fragment_bytes;
if (0 > isfragment) {
SAM("MISTAKE: isfragment is negative\n");
return;
}
peasycap->dma_read = (isfragment - 1) * fragment_bytes;
peasycap->dma_next = (isfragment + 1) * fragment_bytes;
if (dma_bytes < peasycap->dma_next)
peasycap->dma_next = fragment_bytes;
if (0 <= peasycap->dma_read) {
JOM(8, "snd_pcm_period_elapsed(), %i="
"isfragment\n", isfragment);
snd_pcm_period_elapsed(pss);
}
}
}
#ifdef UPSAMPLE
peasycap->oldaudio = oldaudio;
#endif /*UPSAMPLE*/
}
/*---------------------------------------------------------------------------*/
/*
* RESUBMIT THIS URB
*/
/*---------------------------------------------------------------------------*/
resubmit:
if (peasycap->audio_isoc_streaming == 0)
return;
rc = usb_submit_urb(purb, GFP_ATOMIC);
if (rc) {
if ((-ENODEV != rc) && (-ENOENT != rc)) {
SAM("ERROR: while %i=audio_idle, usb_submit_urb failed "
"with rc: -%s :%d\n",
peasycap->audio_idle, strerror(rc), rc);
}
if (0 < peasycap->audio_isoc_streaming)
peasycap->audio_isoc_streaming--;
}
return;
}
/*****************************************************************************/
static int easycap_alsa_open(struct snd_pcm_substream *pss)
{
struct snd_pcm *psnd_pcm;
struct snd_card *psnd_card;
struct easycap *peasycap;
JOT(4, "\n");
if (!pss) {
SAY("ERROR: pss is NULL\n");
return -EFAULT;
}
psnd_pcm = pss->pcm;
if (!psnd_pcm) {
SAY("ERROR: psnd_pcm is NULL\n");
return -EFAULT;
}
psnd_card = psnd_pcm->card;
if (!psnd_card) {
SAY("ERROR: psnd_card is NULL\n");
return -EFAULT;
}
peasycap = psnd_card->private_data;
if (!peasycap) {
SAY("ERROR: peasycap is NULL\n");
return -EFAULT;
}
if (memcmp(&peasycap->telltale[0], TELLTALE, strlen(TELLTALE))) {
SAY("ERROR: bad peasycap\n");
return -EFAULT;
}
if (peasycap->psnd_card != psnd_card) {
SAM("ERROR: bad peasycap->psnd_card\n");
return -EFAULT;
}
if (peasycap->psubstream) {
SAM("ERROR: bad peasycap->psubstream\n");
return -EFAULT;
}
pss->private_data = peasycap;
peasycap->psubstream = pss;
pss->runtime->hw = peasycap->alsa_hardware;
pss->runtime->private_data = peasycap;
pss->private_data = peasycap;
if (0 != easycap_sound_setup(peasycap)) {
JOM(4, "ending unsuccessfully\n");
return -EFAULT;
}
JOM(4, "ending successfully\n");
return 0;
}
/*****************************************************************************/
static int easycap_alsa_close(struct snd_pcm_substream *pss)
{
struct easycap *peasycap;
JOT(4, "\n");
if (!pss) {
SAY("ERROR: pss is NULL\n");
return -EFAULT;
}
peasycap = snd_pcm_substream_chip(pss);
if (!peasycap) {
SAY("ERROR: peasycap is NULL\n");
return -EFAULT;
}
if (memcmp(&peasycap->telltale[0], TELLTALE, strlen(TELLTALE))) {
SAY("ERROR: bad peasycap\n");
return -EFAULT;
}
pss->private_data = NULL;
peasycap->psubstream = NULL;
JOT(4, "ending successfully\n");
return 0;
}
/*****************************************************************************/
static int easycap_alsa_vmalloc(struct snd_pcm_substream *pss, size_t sz)
{
struct snd_pcm_runtime *prt;
JOT(4, "\n");
if (!pss) {
SAY("ERROR: pss is NULL\n");
return -EFAULT;
}
prt = pss->runtime;
if (!prt) {
SAY("ERROR: substream.runtime is NULL\n");
return -EFAULT;
}
if (prt->dma_area) {
if (prt->dma_bytes > sz)
return 0;
vfree(prt->dma_area);
}
prt->dma_area = vmalloc(sz);
if (!prt->dma_area)
return -ENOMEM;
prt->dma_bytes = sz;
return 0;
}
/*****************************************************************************/
static int easycap_alsa_hw_params(struct snd_pcm_substream *pss,
struct snd_pcm_hw_params *phw)
{
int rc;
JOT(4, "%i\n", (params_buffer_bytes(phw)));
if (!pss) {
SAY("ERROR: pss is NULL\n");
return -EFAULT;
}
rc = easycap_alsa_vmalloc(pss, params_buffer_bytes(phw));
if (rc)
return rc;
return 0;
}
/*****************************************************************************/
static int easycap_alsa_hw_free(struct snd_pcm_substream *pss)
{
struct snd_pcm_runtime *prt;
JOT(4, "\n");
if (!pss) {
SAY("ERROR: pss is NULL\n");
return -EFAULT;
}
prt = pss->runtime;
if (!prt) {
SAY("ERROR: substream.runtime is NULL\n");
return -EFAULT;
}
if (prt->dma_area) {
JOT(8, "prt->dma_area = %p\n", prt->dma_area);
vfree(prt->dma_area);
prt->dma_area = NULL;
} else
JOT(8, "dma_area already freed\n");
return 0;
}
/*****************************************************************************/
static int easycap_alsa_prepare(struct snd_pcm_substream *pss)
{
struct easycap *peasycap;
struct snd_pcm_runtime *prt;
JOT(4, "\n");
if (!pss) {
SAY("ERROR: pss is NULL\n");
return -EFAULT;
}
prt = pss->runtime;
peasycap = snd_pcm_substream_chip(pss);
if (!peasycap) {
SAY("ERROR: peasycap is NULL\n");
return -EFAULT;
}
if (memcmp(&peasycap->telltale[0], TELLTALE, strlen(TELLTALE))) {
SAY("ERROR: bad peasycap\n");
return -EFAULT;
}
JOM(16, "ALSA decides %8i Hz=rate\n", pss->runtime->rate);
JOM(16, "ALSA decides %8ld =period_size\n", pss->runtime->period_size);
JOM(16, "ALSA decides %8i =periods\n", pss->runtime->periods);
JOM(16, "ALSA decides %8ld =buffer_size\n", pss->runtime->buffer_size);
JOM(16, "ALSA decides %8zd =dma_bytes\n", pss->runtime->dma_bytes);
JOM(16, "ALSA decides %8ld =boundary\n", pss->runtime->boundary);
JOM(16, "ALSA decides %8i =period_step\n", pss->runtime->period_step);
JOM(16, "ALSA decides %8i =sample_bits\n", pss->runtime->sample_bits);
JOM(16, "ALSA decides %8i =frame_bits\n", pss->runtime->frame_bits);
JOM(16, "ALSA decides %8ld =min_align\n", pss->runtime->min_align);
JOM(12, "ALSA decides %8ld =hw_ptr_base\n", pss->runtime->hw_ptr_base);
JOM(12, "ALSA decides %8ld =hw_ptr_interrupt\n",
pss->runtime->hw_ptr_interrupt);
if (prt->dma_bytes != 4 * ((int)prt->period_size) * ((int)prt->periods)) {
SAY("MISTAKE: unexpected ALSA parameters\n");
return -ENOENT;
}
return 0;
}
/*****************************************************************************/
static int easycap_alsa_ack(struct snd_pcm_substream *pss)
{
return 0;
}
/*****************************************************************************/
static int easycap_alsa_trigger(struct snd_pcm_substream *pss, int cmd)
{
struct easycap *peasycap;
int retval;
JOT(4, "%i=cmd cf %i=START %i=STOP\n", cmd, SNDRV_PCM_TRIGGER_START,
SNDRV_PCM_TRIGGER_STOP);
if (!pss) {
SAY("ERROR: pss is NULL\n");
return -EFAULT;
}
peasycap = snd_pcm_substream_chip(pss);
if (!peasycap) {
SAY("ERROR: peasycap is NULL\n");
return -EFAULT;
}
if (memcmp(&peasycap->telltale[0], TELLTALE, strlen(TELLTALE))) {
SAY("ERROR: bad peasycap\n");
return -EFAULT;
}
switch (cmd) {
case SNDRV_PCM_TRIGGER_START: {
peasycap->audio_idle = 0;
break;
}
case SNDRV_PCM_TRIGGER_STOP: {
peasycap->audio_idle = 1;
break;
}
default:
retval = -EINVAL;
}
return 0;
}
/*****************************************************************************/
static snd_pcm_uframes_t easycap_alsa_pointer(struct snd_pcm_substream *pss)
{
struct easycap *peasycap;
snd_pcm_uframes_t offset;
JOT(16, "\n");
if (!pss) {
SAY("ERROR: pss is NULL\n");
return -EFAULT;
}
peasycap = snd_pcm_substream_chip(pss);
if (!peasycap) {
SAY("ERROR: peasycap is NULL\n");
return -EFAULT;
}
if (memcmp(&peasycap->telltale[0], TELLTALE, strlen(TELLTALE))) {
SAY("ERROR: bad peasycap\n");
return -EFAULT;
}
if ((0 != peasycap->audio_eof) || (0 != peasycap->audio_idle)) {
JOM(8, "returning -EIO because "
"%i=audio_idle %i=audio_eof\n",
peasycap->audio_idle, peasycap->audio_eof);
return -EIO;
}
/*---------------------------------------------------------------------------*/
if (0 > peasycap->dma_read) {
JOM(8, "returning -EBUSY\n");
return -EBUSY;
}
offset = ((snd_pcm_uframes_t)peasycap->dma_read)/4;
JOM(8, "ALSA decides %8i =hw_ptr_base\n", (int)pss->runtime->hw_ptr_base);
JOM(8, "ALSA decides %8i =hw_ptr_interrupt\n",
(int)pss->runtime->hw_ptr_interrupt);
JOM(8, "%7i=offset %7i=dma_read %7i=dma_next\n",
(int)offset, peasycap->dma_read, peasycap->dma_next);
return offset;
}
/*****************************************************************************/
static struct page *
easycap_alsa_page(struct snd_pcm_substream *pss, unsigned long offset)
{
return vmalloc_to_page(pss->runtime->dma_area + offset);
}
/*****************************************************************************/
static struct snd_pcm_ops easycap_alsa_pcm_ops = {
.open = easycap_alsa_open,
.close = easycap_alsa_close,
.ioctl = snd_pcm_lib_ioctl,
.hw_params = easycap_alsa_hw_params,
.hw_free = easycap_alsa_hw_free,
.prepare = easycap_alsa_prepare,
.ack = easycap_alsa_ack,
.trigger = easycap_alsa_trigger,
.pointer = easycap_alsa_pointer,
.page = easycap_alsa_page,
};
/*****************************************************************************/
/*---------------------------------------------------------------------------*/
/*
* THE FUNCTION snd_card_create() HAS THIS_MODULE AS AN ARGUMENT. THIS
* MEANS MODULE easycap. BEWARE.
*/
/*---------------------------------------------------------------------------*/
int easycap_alsa_probe(struct easycap *peasycap)
{
int rc;
struct snd_card *psnd_card;
struct snd_pcm *psnd_pcm;
if (!peasycap) {
SAY("ERROR: peasycap is NULL\n");
return -ENODEV;
}
if (memcmp(&peasycap->telltale[0], TELLTALE, strlen(TELLTALE))) {
SAY("ERROR: bad peasycap\n");
return -EFAULT;
}
if (0 > peasycap->minor) {
SAY("ERROR: no minor\n");
return -ENODEV;
}
peasycap->alsa_hardware = alsa_hardware;
if (peasycap->microphone) {
peasycap->alsa_hardware.rates = SNDRV_PCM_RATE_32000;
peasycap->alsa_hardware.rate_min = 32000;
peasycap->alsa_hardware.rate_max = 32000;
} else {
peasycap->alsa_hardware.rates = SNDRV_PCM_RATE_48000;
peasycap->alsa_hardware.rate_min = 48000;
peasycap->alsa_hardware.rate_max = 48000;
}
if (0 != snd_card_create(SNDRV_DEFAULT_IDX1, "easycap_alsa",
THIS_MODULE, 0, &psnd_card)) {
SAY("ERROR: Cannot do ALSA snd_card_create()\n");
return -EFAULT;
}
sprintf(&psnd_card->id[0], "EasyALSA%i", peasycap->minor);
strcpy(&psnd_card->driver[0], EASYCAP_DRIVER_DESCRIPTION);
strcpy(&psnd_card->shortname[0], "easycap_alsa");
sprintf(&psnd_card->longname[0], "%s", &psnd_card->shortname[0]);
psnd_card->dev = &peasycap->pusb_device->dev;
psnd_card->private_data = peasycap;
peasycap->psnd_card = psnd_card;
rc = snd_pcm_new(psnd_card, "easycap_pcm", 0, 0, 1, &psnd_pcm);
if (rc) {
SAM("ERROR: Cannot do ALSA snd_pcm_new()\n");
snd_card_free(psnd_card);
return -EFAULT;
}
snd_pcm_set_ops(psnd_pcm, SNDRV_PCM_STREAM_CAPTURE,
&easycap_alsa_pcm_ops);
psnd_pcm->info_flags = 0;
strcpy(&psnd_pcm->name[0], &psnd_card->id[0]);
psnd_pcm->private_data = peasycap;
peasycap->psnd_pcm = psnd_pcm;
peasycap->psubstream = NULL;
rc = snd_card_register(psnd_card);
if (rc) {
SAM("ERROR: Cannot do ALSA snd_card_register()\n");
snd_card_free(psnd_card);
return -EFAULT;
}
SAM("registered %s\n", &psnd_card->id[0]);
return 0;
}
#endif /*! CONFIG_EASYCAP_OSS */
/*****************************************************************************/
/*****************************************************************************/
/*****************************************************************************/
/*****************************************************************************/
/*****************************************************************************/
/*****************************************************************************/
/*---------------------------------------------------------------------------*/
/*
* COMMON AUDIO INITIALIZATION
*/
/*---------------------------------------------------------------------------*/
int
easycap_sound_setup(struct easycap *peasycap)
{
int rc;
JOM(4, "starting initialization\n");
if (!peasycap) {
SAY("ERROR: peasycap is NULL.\n");
return -EFAULT;
}
if (!peasycap->pusb_device) {
SAM("ERROR: peasycap->pusb_device is NULL\n");
return -ENODEV;
}
JOM(16, "0x%08lX=peasycap->pusb_device\n", (long int)peasycap->pusb_device);
rc = audio_setup(peasycap);
JOM(8, "audio_setup() returned %i\n", rc);
if (!peasycap->pusb_device) {
SAM("ERROR: peasycap->pusb_device has become NULL\n");
return -ENODEV;
}
/*---------------------------------------------------------------------------*/
if (!peasycap->pusb_device) {
SAM("ERROR: peasycap->pusb_device has become NULL\n");
return -ENODEV;
}
rc = usb_set_interface(peasycap->pusb_device, peasycap->audio_interface,
peasycap->audio_altsetting_on);
JOM(8, "usb_set_interface(.,%i,%i) returned %i\n", peasycap->audio_interface,
peasycap->audio_altsetting_on, rc);
rc = wakeup_device(peasycap->pusb_device);
JOM(8, "wakeup_device() returned %i\n", rc);
peasycap->audio_eof = 0;
peasycap->audio_idle = 0;
peasycap->timeval1.tv_sec = 0;
peasycap->timeval1.tv_usec = 0;
submit_audio_urbs(peasycap);
JOM(4, "finished initialization\n");
return 0;
}
/*****************************************************************************/
/*---------------------------------------------------------------------------*/
/*
* SUBMIT ALL AUDIO URBS.
*/
/*---------------------------------------------------------------------------*/
int
submit_audio_urbs(struct easycap *peasycap)
{
struct data_urb *pdata_urb;
struct urb *purb;
struct list_head *plist_head;
int j, isbad, nospc, m, rc;
int isbuf;
if (!peasycap) {
SAY("ERROR: peasycap is NULL\n");
return -EFAULT;
}
if (!peasycap->purb_audio_head) {
SAM("ERROR: peasycap->urb_audio_head uninitialized\n");
return -EFAULT;
}
if (!peasycap->pusb_device) {
SAM("ERROR: peasycap->pusb_device is NULL\n");
return -EFAULT;
}
if (peasycap->audio_isoc_streaming) {
JOM(4, "already streaming audio urbs\n");
return 0;
}
JOM(4, "initial submission of all audio urbs\n");
rc = usb_set_interface(peasycap->pusb_device,
peasycap->audio_interface,
peasycap->audio_altsetting_on);
JOM(8, "usb_set_interface(.,%i,%i) returned %i\n",
peasycap->audio_interface,
peasycap->audio_altsetting_on, rc);
isbad = 0;
nospc = 0;
m = 0;
list_for_each(plist_head, peasycap->purb_audio_head) {
pdata_urb = list_entry(plist_head, struct data_urb, list_head);
if (pdata_urb && pdata_urb->purb) {
purb = pdata_urb->purb;
isbuf = pdata_urb->isbuf;
purb->interval = 1;
purb->dev = peasycap->pusb_device;
purb->pipe = usb_rcvisocpipe(peasycap->pusb_device,
peasycap->audio_endpointnumber);
purb->transfer_flags = URB_ISO_ASAP;
purb->transfer_buffer = peasycap->audio_isoc_buffer[isbuf].pgo;
purb->transfer_buffer_length = peasycap->audio_isoc_buffer_size;
#ifdef CONFIG_EASYCAP_OSS
purb->complete = easyoss_complete;
#else /* CONFIG_EASYCAP_OSS */
purb->complete = easycap_alsa_complete;
#endif /* CONFIG_EASYCAP_OSS */
purb->context = peasycap;
purb->start_frame = 0;
purb->number_of_packets = peasycap->audio_isoc_framesperdesc;
for (j = 0; j < peasycap->audio_isoc_framesperdesc; j++) {
purb->iso_frame_desc[j].offset = j * peasycap->audio_isoc_maxframesize;
purb->iso_frame_desc[j].length = peasycap->audio_isoc_maxframesize;
}
rc = usb_submit_urb(purb, GFP_KERNEL);
if (rc) {
isbad++;
SAM("ERROR: usb_submit_urb() failed"
" for urb with rc: -%s: %d\n",
strerror(rc), rc);
} else {
m++;
}
} else {
isbad++;
}
}
if (nospc) {
SAM("-ENOSPC=usb_submit_urb() for %i urbs\n", nospc);
SAM("..... possibly inadequate USB bandwidth\n");
peasycap->audio_eof = 1;
}
if (isbad) {
JOM(4, "attempting cleanup instead of submitting\n");
list_for_each(plist_head, (peasycap->purb_audio_head)) {
pdata_urb = list_entry(plist_head, struct data_urb, list_head);
if (pdata_urb && pdata_urb->purb)
usb_kill_urb(pdata_urb->purb);
}
peasycap->audio_isoc_streaming = 0;
} else {
peasycap->audio_isoc_streaming = m;
JOM(4, "submitted %i audio urbs\n", m);
}
return 0;
}
/*****************************************************************************/
/*---------------------------------------------------------------------------*/
/*
* KILL ALL AUDIO URBS.
*/
/*---------------------------------------------------------------------------*/
int
kill_audio_urbs(struct easycap *peasycap)
{
int m;
struct list_head *plist_head;
struct data_urb *pdata_urb;
if (!peasycap) {
SAY("ERROR: peasycap is NULL\n");
return -EFAULT;
}
if (!peasycap->audio_isoc_streaming) {
JOM(8, "%i=audio_isoc_streaming, no audio urbs killed\n",
peasycap->audio_isoc_streaming);
return 0;
}
if (!peasycap->purb_audio_head) {
SAM("ERROR: peasycap->purb_audio_head is NULL\n");
return -EFAULT;
}
peasycap->audio_isoc_streaming = 0;
JOM(4, "killing audio urbs\n");
m = 0;
list_for_each(plist_head, (peasycap->purb_audio_head)) {
pdata_urb = list_entry(plist_head, struct data_urb, list_head);
if (pdata_urb && pdata_urb->purb) {
usb_kill_urb(pdata_urb->purb);
m++;
}
}
JOM(4, "%i audio urbs killed\n", m);
return 0;
}
/*****************************************************************************/
| gpl-2.0 |
stratosk/samsung-kernel-aries | drivers/staging/tm6000/tm6000-input.c | 2537 | 10012 | /*
* tm6000-input.c - driver for TM5600/TM6000/TM6010 USB video capture devices
*
* Copyright (C) 2010 Stefan Ringel <stefan.ringel@arcor.de>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation version 2
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/input.h>
#include <linux/usb.h>
#include <media/rc-core.h>
#include "tm6000.h"
#include "tm6000-regs.h"
static unsigned int ir_debug;
module_param(ir_debug, int, 0644);
MODULE_PARM_DESC(ir_debug, "enable debug message [IR]");
static unsigned int enable_ir = 1;
module_param(enable_ir, int, 0644);
MODULE_PARM_DESC(enable_ir, "enable ir (default is enable)");
/* number of 50ms for ON-OFF-ON power led */
/* show IR activity */
#define PWLED_OFF 2
#undef dprintk
#define dprintk(fmt, arg...) \
if (ir_debug) { \
printk(KERN_DEBUG "%s/ir: " fmt, ir->name , ## arg); \
}
struct tm6000_ir_poll_result {
u16 rc_data;
};
struct tm6000_IR {
struct tm6000_core *dev;
struct rc_dev *rc;
char name[32];
char phys[32];
/* poll expernal decoder */
int polling;
struct delayed_work work;
u8 wait:1;
u8 key:1;
u8 pwled:1;
u8 pwledcnt;
u16 key_addr;
struct urb *int_urb;
u8 *urb_data;
int (*get_key) (struct tm6000_IR *, struct tm6000_ir_poll_result *);
/* IR device properties */
u64 rc_type;
};
void tm6000_ir_wait(struct tm6000_core *dev, u8 state)
{
struct tm6000_IR *ir = dev->ir;
if (!dev->ir)
return;
if (state)
ir->wait = 1;
else
ir->wait = 0;
}
static int tm6000_ir_config(struct tm6000_IR *ir)
{
struct tm6000_core *dev = ir->dev;
u8 buf[10];
int rc;
switch (ir->rc_type) {
case RC_TYPE_NEC:
/* Setup IR decoder for NEC standard 12MHz system clock */
/* IR_LEADER_CNT = 0.9ms */
tm6000_set_reg(dev, TM6010_REQ07_RD8_IR_LEADER1, 0xaa);
tm6000_set_reg(dev, TM6010_REQ07_RD8_IR_LEADER0, 0x30);
/* IR_PULSE_CNT = 0.7ms */
tm6000_set_reg(dev, TM6010_REQ07_RD8_IR_PULSE_CNT1, 0x20);
tm6000_set_reg(dev, TM6010_REQ07_RD8_IR_PULSE_CNT0, 0xd0);
/* Remote WAKEUP = enable */
tm6000_set_reg(dev, TM6010_REQ07_RE5_REMOTE_WAKEUP, 0xfe);
/* IR_WKUP_SEL = Low byte in decoded IR data */
tm6000_set_reg(dev, TM6010_REQ07_RD8_IR_WAKEUP_SEL, 0xff);
/* IR_WKU_ADD code */
tm6000_set_reg(dev, TM6010_REQ07_RD8_IR_WAKEUP_ADD, 0xff);
tm6000_flash_led(dev, 0);
msleep(100);
tm6000_flash_led(dev, 1);
break;
default:
/* hack */
buf[0] = 0xff;
buf[1] = 0xff;
buf[2] = 0xf2;
buf[3] = 0x2b;
buf[4] = 0x20;
buf[5] = 0x35;
buf[6] = 0x60;
buf[7] = 0x04;
buf[8] = 0xc0;
buf[9] = 0x08;
rc = tm6000_read_write_usb(dev, USB_DIR_OUT | USB_TYPE_VENDOR |
USB_RECIP_DEVICE, REQ_00_SET_IR_VALUE, 0, 0, buf, 0x0a);
msleep(100);
if (rc < 0) {
printk(KERN_INFO "IR configuration failed");
return rc;
}
break;
}
return 0;
}
static void tm6000_ir_urb_received(struct urb *urb)
{
struct tm6000_core *dev = urb->context;
struct tm6000_IR *ir = dev->ir;
int rc;
if (urb->status != 0)
printk(KERN_INFO "not ready\n");
else if (urb->actual_length > 0) {
memcpy(ir->urb_data, urb->transfer_buffer, urb->actual_length);
dprintk("data %02x %02x %02x %02x\n", ir->urb_data[0],
ir->urb_data[1], ir->urb_data[2], ir->urb_data[3]);
ir->key = 1;
}
rc = usb_submit_urb(urb, GFP_ATOMIC);
}
static int default_polling_getkey(struct tm6000_IR *ir,
struct tm6000_ir_poll_result *poll_result)
{
struct tm6000_core *dev = ir->dev;
int rc;
u8 buf[2];
if (ir->wait && !&dev->int_in)
return 0;
if (&dev->int_in) {
switch (ir->rc_type) {
case RC_TYPE_RC5:
poll_result->rc_data = ir->urb_data[0];
break;
case RC_TYPE_NEC:
if (ir->urb_data[1] == ((ir->key_addr >> 8) & 0xff)) {
poll_result->rc_data = ir->urb_data[0]
| ir->urb_data[1] << 8;
}
break;
default:
poll_result->rc_data = ir->urb_data[0]
| ir->urb_data[1] << 8;
break;
}
} else {
tm6000_set_reg(dev, REQ_04_EN_DISABLE_MCU_INT, 2, 0);
msleep(10);
tm6000_set_reg(dev, REQ_04_EN_DISABLE_MCU_INT, 2, 1);
msleep(10);
if (ir->rc_type == RC_TYPE_RC5) {
rc = tm6000_read_write_usb(dev, USB_DIR_IN |
USB_TYPE_VENDOR | USB_RECIP_DEVICE,
REQ_02_GET_IR_CODE, 0, 0, buf, 1);
msleep(10);
dprintk("read data=%02x\n", buf[0]);
if (rc < 0)
return rc;
poll_result->rc_data = buf[0];
} else {
rc = tm6000_read_write_usb(dev, USB_DIR_IN |
USB_TYPE_VENDOR | USB_RECIP_DEVICE,
REQ_02_GET_IR_CODE, 0, 0, buf, 2);
msleep(10);
dprintk("read data=%04x\n", buf[0] | buf[1] << 8);
if (rc < 0)
return rc;
poll_result->rc_data = buf[0] | buf[1] << 8;
}
if ((poll_result->rc_data & 0x00ff) != 0xff)
ir->key = 1;
}
return 0;
}
static void tm6000_ir_handle_key(struct tm6000_IR *ir)
{
struct tm6000_core *dev = ir->dev;
int result;
struct tm6000_ir_poll_result poll_result;
/* read the registers containing the IR status */
result = ir->get_key(ir, &poll_result);
if (result < 0) {
printk(KERN_INFO "ir->get_key() failed %d\n", result);
return;
}
dprintk("ir->get_key result data=%04x\n", poll_result.rc_data);
if (ir->pwled) {
if (ir->pwledcnt >= PWLED_OFF) {
ir->pwled = 0;
ir->pwledcnt = 0;
tm6000_flash_led(dev, 1);
} else
ir->pwledcnt += 1;
}
if (ir->key) {
rc_keydown(ir->rc, poll_result.rc_data, 0);
ir->key = 0;
ir->pwled = 1;
ir->pwledcnt = 0;
tm6000_flash_led(dev, 0);
}
return;
}
static void tm6000_ir_work(struct work_struct *work)
{
struct tm6000_IR *ir = container_of(work, struct tm6000_IR, work.work);
tm6000_ir_handle_key(ir);
schedule_delayed_work(&ir->work, msecs_to_jiffies(ir->polling));
}
static int tm6000_ir_start(struct rc_dev *rc)
{
struct tm6000_IR *ir = rc->priv;
INIT_DELAYED_WORK(&ir->work, tm6000_ir_work);
schedule_delayed_work(&ir->work, 0);
return 0;
}
static void tm6000_ir_stop(struct rc_dev *rc)
{
struct tm6000_IR *ir = rc->priv;
cancel_delayed_work_sync(&ir->work);
}
int tm6000_ir_change_protocol(struct rc_dev *rc, u64 rc_type)
{
struct tm6000_IR *ir = rc->priv;
if (!ir)
return 0;
if ((rc->rc_map.scan) && (rc_type == RC_TYPE_NEC))
ir->key_addr = ((rc->rc_map.scan[0].scancode >> 8) & 0xffff);
ir->get_key = default_polling_getkey;
ir->rc_type = rc_type;
tm6000_ir_config(ir);
/* TODO */
return 0;
}
int tm6000_ir_int_start(struct tm6000_core *dev)
{
struct tm6000_IR *ir = dev->ir;
int pipe, size;
int err = -ENOMEM;
if (!ir)
return -ENODEV;
ir->int_urb = usb_alloc_urb(0, GFP_KERNEL);
if (!ir->int_urb)
return -ENOMEM;
pipe = usb_rcvintpipe(dev->udev,
dev->int_in.endp->desc.bEndpointAddress
& USB_ENDPOINT_NUMBER_MASK);
size = usb_maxpacket(dev->udev, pipe, usb_pipeout(pipe));
dprintk("IR max size: %d\n", size);
ir->int_urb->transfer_buffer = kzalloc(size, GFP_KERNEL);
if (ir->int_urb->transfer_buffer == NULL) {
usb_free_urb(ir->int_urb);
return err;
}
dprintk("int interval: %d\n", dev->int_in.endp->desc.bInterval);
usb_fill_int_urb(ir->int_urb, dev->udev, pipe,
ir->int_urb->transfer_buffer, size,
tm6000_ir_urb_received, dev,
dev->int_in.endp->desc.bInterval);
err = usb_submit_urb(ir->int_urb, GFP_KERNEL);
if (err) {
kfree(ir->int_urb->transfer_buffer);
usb_free_urb(ir->int_urb);
return err;
}
ir->urb_data = kzalloc(size, GFP_KERNEL);
return 0;
}
void tm6000_ir_int_stop(struct tm6000_core *dev)
{
struct tm6000_IR *ir = dev->ir;
if (!ir)
return;
usb_kill_urb(ir->int_urb);
kfree(ir->int_urb->transfer_buffer);
usb_free_urb(ir->int_urb);
ir->int_urb = NULL;
kfree(ir->urb_data);
ir->urb_data = NULL;
}
int tm6000_ir_init(struct tm6000_core *dev)
{
struct tm6000_IR *ir;
struct rc_dev *rc;
int err = -ENOMEM;
if (!enable_ir)
return -ENODEV;
if (!dev->caps.has_remote)
return 0;
if (!dev->ir_codes)
return 0;
ir = kzalloc(sizeof(*ir), GFP_KERNEL);
rc = rc_allocate_device();
if (!ir || !rc)
goto out;
/* record handles to ourself */
ir->dev = dev;
dev->ir = ir;
ir->rc = rc;
/* input einrichten */
rc->allowed_protos = RC_TYPE_RC5 | RC_TYPE_NEC;
rc->priv = ir;
rc->change_protocol = tm6000_ir_change_protocol;
rc->open = tm6000_ir_start;
rc->close = tm6000_ir_stop;
rc->driver_type = RC_DRIVER_SCANCODE;
ir->polling = 50;
ir->pwled = 0;
ir->pwledcnt = 0;
snprintf(ir->name, sizeof(ir->name), "tm5600/60x0 IR (%s)",
dev->name);
usb_make_path(dev->udev, ir->phys, sizeof(ir->phys));
strlcat(ir->phys, "/input0", sizeof(ir->phys));
tm6000_ir_change_protocol(rc, RC_TYPE_UNKNOWN);
rc->input_name = ir->name;
rc->input_phys = ir->phys;
rc->input_id.bustype = BUS_USB;
rc->input_id.version = 1;
rc->input_id.vendor = le16_to_cpu(dev->udev->descriptor.idVendor);
rc->input_id.product = le16_to_cpu(dev->udev->descriptor.idProduct);
rc->map_name = dev->ir_codes;
rc->driver_name = "tm6000";
rc->dev.parent = &dev->udev->dev;
if (&dev->int_in) {
dprintk("IR over int\n");
err = tm6000_ir_int_start(dev);
if (err)
goto out;
}
/* ir register */
err = rc_register_device(rc);
if (err)
goto out;
return 0;
out:
dev->ir = NULL;
rc_free_device(rc);
kfree(ir);
return err;
}
int tm6000_ir_fini(struct tm6000_core *dev)
{
struct tm6000_IR *ir = dev->ir;
/* skip detach on non attached board */
if (!ir)
return 0;
rc_unregister_device(ir->rc);
if (ir->int_urb) {
tm6000_ir_int_stop(dev);
}
kfree(ir);
dev->ir = NULL;
return 0;
}
| gpl-2.0 |
ravendra275/sony_kernel_msm8960t | arch/s390/oprofile/init.c | 3049 | 12004 | /**
* arch/s390/oprofile/init.c
*
* S390 Version
* Copyright (C) 2002-2011 IBM Deutschland Entwicklung GmbH, IBM Corporation
* Author(s): Thomas Spatzier (tspat@de.ibm.com)
* Author(s): Mahesh Salgaonkar (mahesh@linux.vnet.ibm.com)
* Author(s): Heinz Graalfs (graalfs@linux.vnet.ibm.com)
* Author(s): Andreas Krebbel (krebbel@linux.vnet.ibm.com)
*
* @remark Copyright 2002-2011 OProfile authors
*/
#include <linux/oprofile.h>
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/fs.h>
#include <linux/module.h>
#include <asm/processor.h>
#include "../../../drivers/oprofile/oprof.h"
extern void s390_backtrace(struct pt_regs * const regs, unsigned int depth);
#ifdef CONFIG_64BIT
#include "hwsampler.h"
#include "op_counter.h"
#define DEFAULT_INTERVAL 4127518
#define DEFAULT_SDBT_BLOCKS 1
#define DEFAULT_SDB_BLOCKS 511
static unsigned long oprofile_hw_interval = DEFAULT_INTERVAL;
static unsigned long oprofile_min_interval;
static unsigned long oprofile_max_interval;
static unsigned long oprofile_sdbt_blocks = DEFAULT_SDBT_BLOCKS;
static unsigned long oprofile_sdb_blocks = DEFAULT_SDB_BLOCKS;
static int hwsampler_enabled;
static int hwsampler_running; /* start_mutex must be held to change */
static int hwsampler_available;
static struct oprofile_operations timer_ops;
struct op_counter_config counter_config;
enum __force_cpu_type {
reserved = 0, /* do not force */
timer,
};
static int force_cpu_type;
static int set_cpu_type(const char *str, struct kernel_param *kp)
{
if (!strcmp(str, "timer")) {
force_cpu_type = timer;
printk(KERN_INFO "oprofile: forcing timer to be returned "
"as cpu type\n");
} else {
force_cpu_type = 0;
}
return 0;
}
module_param_call(cpu_type, set_cpu_type, NULL, NULL, 0);
MODULE_PARM_DESC(cpu_type, "Force legacy basic mode sampling"
"(report cpu_type \"timer\"");
static int oprofile_hwsampler_start(void)
{
int retval;
hwsampler_running = hwsampler_enabled;
if (!hwsampler_running)
return timer_ops.start();
retval = hwsampler_allocate(oprofile_sdbt_blocks, oprofile_sdb_blocks);
if (retval)
return retval;
retval = hwsampler_start_all(oprofile_hw_interval);
if (retval)
hwsampler_deallocate();
return retval;
}
static void oprofile_hwsampler_stop(void)
{
if (!hwsampler_running) {
timer_ops.stop();
return;
}
hwsampler_stop_all();
hwsampler_deallocate();
return;
}
/*
* File ops used for:
* /dev/oprofile/0/enabled
* /dev/oprofile/hwsampling/hwsampler (cpu_type = timer)
*/
static ssize_t hwsampler_read(struct file *file, char __user *buf,
size_t count, loff_t *offset)
{
return oprofilefs_ulong_to_user(hwsampler_enabled, buf, count, offset);
}
static ssize_t hwsampler_write(struct file *file, char const __user *buf,
size_t count, loff_t *offset)
{
unsigned long val;
int retval;
if (*offset)
return -EINVAL;
retval = oprofilefs_ulong_from_user(&val, buf, count);
if (retval <= 0)
return retval;
if (val != 0 && val != 1)
return -EINVAL;
if (oprofile_started)
/*
* save to do without locking as we set
* hwsampler_running in start() when start_mutex is
* held
*/
return -EBUSY;
hwsampler_enabled = val;
return count;
}
static const struct file_operations hwsampler_fops = {
.read = hwsampler_read,
.write = hwsampler_write,
};
/*
* File ops used for:
* /dev/oprofile/0/count
* /dev/oprofile/hwsampling/hw_interval (cpu_type = timer)
*
* Make sure that the value is within the hardware range.
*/
static ssize_t hw_interval_read(struct file *file, char __user *buf,
size_t count, loff_t *offset)
{
return oprofilefs_ulong_to_user(oprofile_hw_interval, buf,
count, offset);
}
static ssize_t hw_interval_write(struct file *file, char const __user *buf,
size_t count, loff_t *offset)
{
unsigned long val;
int retval;
if (*offset)
return -EINVAL;
retval = oprofilefs_ulong_from_user(&val, buf, count);
if (retval)
return retval;
if (val < oprofile_min_interval)
oprofile_hw_interval = oprofile_min_interval;
else if (val > oprofile_max_interval)
oprofile_hw_interval = oprofile_max_interval;
else
oprofile_hw_interval = val;
return count;
}
static const struct file_operations hw_interval_fops = {
.read = hw_interval_read,
.write = hw_interval_write,
};
/*
* File ops used for:
* /dev/oprofile/0/event
* Only a single event with number 0 is supported with this counter.
*
* /dev/oprofile/0/unit_mask
* This is a dummy file needed by the user space tools.
* No value other than 0 is accepted or returned.
*/
static ssize_t hwsampler_zero_read(struct file *file, char __user *buf,
size_t count, loff_t *offset)
{
return oprofilefs_ulong_to_user(0, buf, count, offset);
}
static ssize_t hwsampler_zero_write(struct file *file, char const __user *buf,
size_t count, loff_t *offset)
{
unsigned long val;
int retval;
if (*offset)
return -EINVAL;
retval = oprofilefs_ulong_from_user(&val, buf, count);
if (retval)
return retval;
if (val != 0)
return -EINVAL;
return count;
}
static const struct file_operations zero_fops = {
.read = hwsampler_zero_read,
.write = hwsampler_zero_write,
};
/* /dev/oprofile/0/kernel file ops. */
static ssize_t hwsampler_kernel_read(struct file *file, char __user *buf,
size_t count, loff_t *offset)
{
return oprofilefs_ulong_to_user(counter_config.kernel,
buf, count, offset);
}
static ssize_t hwsampler_kernel_write(struct file *file, char const __user *buf,
size_t count, loff_t *offset)
{
unsigned long val;
int retval;
if (*offset)
return -EINVAL;
retval = oprofilefs_ulong_from_user(&val, buf, count);
if (retval)
return retval;
if (val != 0 && val != 1)
return -EINVAL;
counter_config.kernel = val;
return count;
}
static const struct file_operations kernel_fops = {
.read = hwsampler_kernel_read,
.write = hwsampler_kernel_write,
};
/* /dev/oprofile/0/user file ops. */
static ssize_t hwsampler_user_read(struct file *file, char __user *buf,
size_t count, loff_t *offset)
{
return oprofilefs_ulong_to_user(counter_config.user,
buf, count, offset);
}
static ssize_t hwsampler_user_write(struct file *file, char const __user *buf,
size_t count, loff_t *offset)
{
unsigned long val;
int retval;
if (*offset)
return -EINVAL;
retval = oprofilefs_ulong_from_user(&val, buf, count);
if (retval)
return retval;
if (val != 0 && val != 1)
return -EINVAL;
counter_config.user = val;
return count;
}
static const struct file_operations user_fops = {
.read = hwsampler_user_read,
.write = hwsampler_user_write,
};
/*
* File ops used for: /dev/oprofile/timer/enabled
* The value always has to be the inverted value of hwsampler_enabled. So
* no separate variable is created. That way we do not need locking.
*/
static ssize_t timer_enabled_read(struct file *file, char __user *buf,
size_t count, loff_t *offset)
{
return oprofilefs_ulong_to_user(!hwsampler_enabled, buf, count, offset);
}
static ssize_t timer_enabled_write(struct file *file, char const __user *buf,
size_t count, loff_t *offset)
{
unsigned long val;
int retval;
if (*offset)
return -EINVAL;
retval = oprofilefs_ulong_from_user(&val, buf, count);
if (retval)
return retval;
if (val != 0 && val != 1)
return -EINVAL;
/* Timer cannot be disabled without having hardware sampling. */
if (val == 0 && !hwsampler_available)
return -EINVAL;
if (oprofile_started)
/*
* save to do without locking as we set
* hwsampler_running in start() when start_mutex is
* held
*/
return -EBUSY;
hwsampler_enabled = !val;
return count;
}
static const struct file_operations timer_enabled_fops = {
.read = timer_enabled_read,
.write = timer_enabled_write,
};
static int oprofile_create_hwsampling_files(struct super_block *sb,
struct dentry *root)
{
struct dentry *dir;
dir = oprofilefs_mkdir(sb, root, "timer");
if (!dir)
return -EINVAL;
oprofilefs_create_file(sb, dir, "enabled", &timer_enabled_fops);
if (!hwsampler_available)
return 0;
/* reinitialize default values */
hwsampler_enabled = 1;
counter_config.kernel = 1;
counter_config.user = 1;
if (!force_cpu_type) {
/*
* Create the counter file system. A single virtual
* counter is created which can be used to
* enable/disable hardware sampling dynamically from
* user space. The user space will configure a single
* counter with a single event. The value of 'event'
* and 'unit_mask' are not evaluated by the kernel code
* and can only be set to 0.
*/
dir = oprofilefs_mkdir(sb, root, "0");
if (!dir)
return -EINVAL;
oprofilefs_create_file(sb, dir, "enabled", &hwsampler_fops);
oprofilefs_create_file(sb, dir, "event", &zero_fops);
oprofilefs_create_file(sb, dir, "count", &hw_interval_fops);
oprofilefs_create_file(sb, dir, "unit_mask", &zero_fops);
oprofilefs_create_file(sb, dir, "kernel", &kernel_fops);
oprofilefs_create_file(sb, dir, "user", &user_fops);
oprofilefs_create_ulong(sb, dir, "hw_sdbt_blocks",
&oprofile_sdbt_blocks);
} else {
/*
* Hardware sampling can be used but the cpu_type is
* forced to timer in order to deal with legacy user
* space tools. The /dev/oprofile/hwsampling fs is
* provided in that case.
*/
dir = oprofilefs_mkdir(sb, root, "hwsampling");
if (!dir)
return -EINVAL;
oprofilefs_create_file(sb, dir, "hwsampler",
&hwsampler_fops);
oprofilefs_create_file(sb, dir, "hw_interval",
&hw_interval_fops);
oprofilefs_create_ro_ulong(sb, dir, "hw_min_interval",
&oprofile_min_interval);
oprofilefs_create_ro_ulong(sb, dir, "hw_max_interval",
&oprofile_max_interval);
oprofilefs_create_ulong(sb, dir, "hw_sdbt_blocks",
&oprofile_sdbt_blocks);
}
return 0;
}
static int oprofile_hwsampler_init(struct oprofile_operations *ops)
{
/*
* Initialize the timer mode infrastructure as well in order
* to be able to switch back dynamically. oprofile_timer_init
* is not supposed to fail.
*/
if (oprofile_timer_init(ops))
BUG();
memcpy(&timer_ops, ops, sizeof(timer_ops));
ops->create_files = oprofile_create_hwsampling_files;
/*
* If the user space tools do not support newer cpu types,
* the force_cpu_type module parameter
* can be used to always return \"timer\" as cpu type.
*/
if (force_cpu_type != timer) {
struct cpuid id;
get_cpu_id (&id);
switch (id.machine) {
case 0x2097: case 0x2098: ops->cpu_type = "s390/z10"; break;
case 0x2817: case 0x2818: ops->cpu_type = "s390/z196"; break;
default: return -ENODEV;
}
}
if (hwsampler_setup())
return -ENODEV;
/*
* Query the range for the sampling interval from the
* hardware.
*/
oprofile_min_interval = hwsampler_query_min_interval();
if (oprofile_min_interval == 0)
return -ENODEV;
oprofile_max_interval = hwsampler_query_max_interval();
if (oprofile_max_interval == 0)
return -ENODEV;
/* The initial value should be sane */
if (oprofile_hw_interval < oprofile_min_interval)
oprofile_hw_interval = oprofile_min_interval;
if (oprofile_hw_interval > oprofile_max_interval)
oprofile_hw_interval = oprofile_max_interval;
printk(KERN_INFO "oprofile: System z hardware sampling "
"facility found.\n");
ops->start = oprofile_hwsampler_start;
ops->stop = oprofile_hwsampler_stop;
return 0;
}
static void oprofile_hwsampler_exit(void)
{
hwsampler_shutdown();
}
#endif /* CONFIG_64BIT */
int __init oprofile_arch_init(struct oprofile_operations *ops)
{
ops->backtrace = s390_backtrace;
#ifdef CONFIG_64BIT
/*
* -ENODEV is not reported to the caller. The module itself
* will use the timer mode sampling as fallback and this is
* always available.
*/
hwsampler_available = oprofile_hwsampler_init(ops) == 0;
return 0;
#else
return -ENODEV;
#endif
}
void oprofile_arch_exit(void)
{
#ifdef CONFIG_64BIT
oprofile_hwsampler_exit();
#endif
}
| gpl-2.0 |
bestmjh47/android_kernel_kttech_e100 | drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c | 4073 | 9074 | /**************************************************************************
*
* Copyright © 2011 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
#include "ttm/ttm_placement.h"
#include "drmP.h"
#include "vmwgfx_drv.h"
/**
* vmw_dmabuf_to_placement - Validate a buffer to placement.
*
* @dev_priv: Driver private.
* @buf: DMA buffer to move.
* @pin: Pin buffer if true.
* @interruptible: Use interruptible wait.
*
* May only be called by the current master since it assumes that the
* master lock is the current master's lock.
* This function takes the master's lock in write mode.
* Flushes and unpins the query bo to avoid failures.
*
* Returns
* -ERESTARTSYS if interrupted by a signal.
*/
int vmw_dmabuf_to_placement(struct vmw_private *dev_priv,
struct vmw_dma_buffer *buf,
struct ttm_placement *placement,
bool interruptible)
{
struct vmw_master *vmaster = dev_priv->active_master;
struct ttm_buffer_object *bo = &buf->base;
int ret;
ret = ttm_write_lock(&vmaster->lock, interruptible);
if (unlikely(ret != 0))
return ret;
vmw_execbuf_release_pinned_bo(dev_priv, false, 0);
ret = ttm_bo_reserve(bo, interruptible, false, false, 0);
if (unlikely(ret != 0))
goto err;
ret = ttm_bo_validate(bo, placement, interruptible, false, false);
ttm_bo_unreserve(bo);
err:
ttm_write_unlock(&vmaster->lock);
return ret;
}
/**
* vmw_dmabuf_to_vram_or_gmr - Move a buffer to vram or gmr.
*
* May only be called by the current master since it assumes that the
* master lock is the current master's lock.
* This function takes the master's lock in write mode.
* Flushes and unpins the query bo if @pin == true to avoid failures.
*
* @dev_priv: Driver private.
* @buf: DMA buffer to move.
* @pin: Pin buffer if true.
* @interruptible: Use interruptible wait.
*
* Returns
* -ERESTARTSYS if interrupted by a signal.
*/
int vmw_dmabuf_to_vram_or_gmr(struct vmw_private *dev_priv,
struct vmw_dma_buffer *buf,
bool pin, bool interruptible)
{
struct vmw_master *vmaster = dev_priv->active_master;
struct ttm_buffer_object *bo = &buf->base;
struct ttm_placement *placement;
int ret;
ret = ttm_write_lock(&vmaster->lock, interruptible);
if (unlikely(ret != 0))
return ret;
if (pin)
vmw_execbuf_release_pinned_bo(dev_priv, false, 0);
ret = ttm_bo_reserve(bo, interruptible, false, false, 0);
if (unlikely(ret != 0))
goto err;
/**
* Put BO in VRAM if there is space, otherwise as a GMR.
* If there is no space in VRAM and GMR ids are all used up,
* start evicting GMRs to make room. If the DMA buffer can't be
* used as a GMR, this will return -ENOMEM.
*/
if (pin)
placement = &vmw_vram_gmr_ne_placement;
else
placement = &vmw_vram_gmr_placement;
ret = ttm_bo_validate(bo, placement, interruptible, false, false);
if (likely(ret == 0) || ret == -ERESTARTSYS)
goto err_unreserve;
/**
* If that failed, try VRAM again, this time evicting
* previous contents.
*/
if (pin)
placement = &vmw_vram_ne_placement;
else
placement = &vmw_vram_placement;
ret = ttm_bo_validate(bo, placement, interruptible, false, false);
err_unreserve:
ttm_bo_unreserve(bo);
err:
ttm_write_unlock(&vmaster->lock);
return ret;
}
/**
* vmw_dmabuf_to_vram - Move a buffer to vram.
*
* May only be called by the current master since it assumes that the
* master lock is the current master's lock.
* This function takes the master's lock in write mode.
*
* @dev_priv: Driver private.
* @buf: DMA buffer to move.
* @pin: Pin buffer in vram if true.
* @interruptible: Use interruptible wait.
*
* Returns
* -ERESTARTSYS if interrupted by a signal.
*/
int vmw_dmabuf_to_vram(struct vmw_private *dev_priv,
struct vmw_dma_buffer *buf,
bool pin, bool interruptible)
{
struct ttm_placement *placement;
if (pin)
placement = &vmw_vram_ne_placement;
else
placement = &vmw_vram_placement;
return vmw_dmabuf_to_placement(dev_priv, buf,
placement,
interruptible);
}
/**
* vmw_dmabuf_to_start_of_vram - Move a buffer to start of vram.
*
* May only be called by the current master since it assumes that the
* master lock is the current master's lock.
* This function takes the master's lock in write mode.
* Flushes and unpins the query bo if @pin == true to avoid failures.
*
* @dev_priv: Driver private.
* @buf: DMA buffer to move.
* @pin: Pin buffer in vram if true.
* @interruptible: Use interruptible wait.
*
* Returns
* -ERESTARTSYS if interrupted by a signal.
*/
int vmw_dmabuf_to_start_of_vram(struct vmw_private *dev_priv,
struct vmw_dma_buffer *buf,
bool pin, bool interruptible)
{
struct vmw_master *vmaster = dev_priv->active_master;
struct ttm_buffer_object *bo = &buf->base;
struct ttm_placement placement;
int ret = 0;
if (pin)
placement = vmw_vram_ne_placement;
else
placement = vmw_vram_placement;
placement.lpfn = bo->num_pages;
ret = ttm_write_lock(&vmaster->lock, interruptible);
if (unlikely(ret != 0))
return ret;
if (pin)
vmw_execbuf_release_pinned_bo(dev_priv, false, 0);
ret = ttm_bo_reserve(bo, interruptible, false, false, 0);
if (unlikely(ret != 0))
goto err_unlock;
/* Is this buffer already in vram but not at the start of it? */
if (bo->mem.mem_type == TTM_PL_VRAM &&
bo->mem.start < bo->num_pages &&
bo->mem.start > 0)
(void) ttm_bo_validate(bo, &vmw_sys_placement, false,
false, false);
ret = ttm_bo_validate(bo, &placement, interruptible, false, false);
/* For some reason we didn't up at the start of vram */
WARN_ON(ret == 0 && bo->offset != 0);
ttm_bo_unreserve(bo);
err_unlock:
ttm_write_unlock(&vmaster->lock);
return ret;
}
/**
* vmw_dmabuf_upin - Unpin the buffer given buffer, does not move the buffer.
*
* May only be called by the current master since it assumes that the
* master lock is the current master's lock.
* This function takes the master's lock in write mode.
*
* @dev_priv: Driver private.
* @buf: DMA buffer to unpin.
* @interruptible: Use interruptible wait.
*
* Returns
* -ERESTARTSYS if interrupted by a signal.
*/
int vmw_dmabuf_unpin(struct vmw_private *dev_priv,
struct vmw_dma_buffer *buf,
bool interruptible)
{
/*
* We could in theory early out if the buffer is
* unpinned but we need to lock and reserve the buffer
* anyways so we don't gain much by that.
*/
return vmw_dmabuf_to_placement(dev_priv, buf,
&vmw_evictable_placement,
interruptible);
}
/**
* vmw_bo_get_guest_ptr - Get the guest ptr representing the current placement
* of a buffer.
*
* @bo: Pointer to a struct ttm_buffer_object. Must be pinned or reserved.
* @ptr: SVGAGuestPtr returning the result.
*/
void vmw_bo_get_guest_ptr(const struct ttm_buffer_object *bo,
SVGAGuestPtr *ptr)
{
if (bo->mem.mem_type == TTM_PL_VRAM) {
ptr->gmrId = SVGA_GMR_FRAMEBUFFER;
ptr->offset = bo->offset;
} else {
ptr->gmrId = bo->mem.start;
ptr->offset = 0;
}
}
/**
* vmw_bo_pin - Pin or unpin a buffer object without moving it.
*
* @bo: The buffer object. Must be reserved, and present either in VRAM
* or GMR memory.
* @pin: Whether to pin or unpin.
*
*/
void vmw_bo_pin(struct ttm_buffer_object *bo, bool pin)
{
uint32_t pl_flags;
struct ttm_placement placement;
uint32_t old_mem_type = bo->mem.mem_type;
int ret;
BUG_ON(!atomic_read(&bo->reserved));
BUG_ON(old_mem_type != TTM_PL_VRAM &&
old_mem_type != VMW_PL_FLAG_GMR);
pl_flags = TTM_PL_FLAG_VRAM | VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED;
if (pin)
pl_flags |= TTM_PL_FLAG_NO_EVICT;
memset(&placement, 0, sizeof(placement));
placement.num_placement = 1;
placement.placement = &pl_flags;
ret = ttm_bo_validate(bo, &placement, false, true, true);
BUG_ON(ret != 0 || bo->mem.mem_type != old_mem_type);
}
| gpl-2.0 |
jollaman999/LGF180-Optimus-G-_Android_KK_v30b_Kernel | arch/arm/mach-s3c24xx/s3c2443.c | 4841 | 2877 | /* linux/arch/arm/mach-s3c2443/s3c2443.c
*
* Copyright (c) 2007 Simtec Electronics
* Ben Dooks <ben@simtec.co.uk>
*
* Samsung S3C2443 Mobile CPU support
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/interrupt.h>
#include <linux/list.h>
#include <linux/timer.h>
#include <linux/init.h>
#include <linux/gpio.h>
#include <linux/platform_device.h>
#include <linux/serial_core.h>
#include <linux/device.h>
#include <linux/clk.h>
#include <linux/io.h>
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
#include <asm/mach/irq.h>
#include <mach/hardware.h>
#include <asm/irq.h>
#include <asm/system_misc.h>
#include <mach/regs-s3c2443-clock.h>
#include <plat/gpio-core.h>
#include <plat/gpio-cfg.h>
#include <plat/gpio-cfg-helpers.h>
#include <plat/s3c2443.h>
#include <plat/devs.h>
#include <plat/cpu.h>
#include <plat/fb-core.h>
#include <plat/nand-core.h>
#include <plat/adc-core.h>
#include <plat/rtc-core.h>
static struct map_desc s3c2443_iodesc[] __initdata = {
IODESC_ENT(WATCHDOG),
IODESC_ENT(CLKPWR),
IODESC_ENT(TIMER),
};
struct bus_type s3c2443_subsys = {
.name = "s3c2443-core",
.dev_name = "s3c2443-core",
};
static struct device s3c2443_dev = {
.bus = &s3c2443_subsys,
};
void s3c2443_restart(char mode, const char *cmd)
{
if (mode == 's')
soft_restart(0);
__raw_writel(S3C2443_SWRST_RESET, S3C2443_SWRST);
}
int __init s3c2443_init(void)
{
printk("S3C2443: Initialising architecture\n");
s3c_nand_setname("s3c2412-nand");
s3c_fb_setname("s3c2443-fb");
s3c_adc_setname("s3c2443-adc");
s3c_rtc_setname("s3c2443-rtc");
/* change WDT IRQ number */
s3c_device_wdt.resource[1].start = IRQ_S3C2443_WDT;
s3c_device_wdt.resource[1].end = IRQ_S3C2443_WDT;
return device_register(&s3c2443_dev);
}
void __init s3c2443_init_uarts(struct s3c2410_uartcfg *cfg, int no)
{
s3c24xx_init_uartdevs("s3c2440-uart", s3c2410_uart_resources, cfg, no);
}
/* s3c2443_map_io
*
* register the standard cpu IO areas, and any passed in from the
* machine specific initialisation.
*/
void __init s3c2443_map_io(void)
{
s3c24xx_gpiocfg_default.set_pull = s3c2443_gpio_setpull;
s3c24xx_gpiocfg_default.get_pull = s3c2443_gpio_getpull;
iotable_init(s3c2443_iodesc, ARRAY_SIZE(s3c2443_iodesc));
}
/* need to register the subsystem before we actually register the device, and
* we also need to ensure that it has been initialised before any of the
* drivers even try to use it (even if not on an s3c2443 based system)
* as a driver which may support both 2443 and 2440 may try and use it.
*/
static int __init s3c2443_core_init(void)
{
return subsys_system_register(&s3c2443_subsys, NULL);
}
core_initcall(s3c2443_core_init);
| gpl-2.0 |
srsdanitest/swingacera9 | arch/powerpc/sysdev/cpm2_pic.c | 6889 | 7017 | /*
* Platform information definitions.
*
* Copied from arch/ppc/syslib/cpm2_pic.c with minor subsequent updates
* to make in work in arch/powerpc/. Original (c) belongs to Dan Malek.
*
* Author: Vitaly Bordug <vbordug@ru.mvista.com>
*
* 1999-2001 (c) Dan Malek <dan@embeddedalley.com>
* 2006 (c) MontaVista Software, Inc.
*
* This file is licensed under the terms of the GNU General Public License
* version 2. This program is licensed "as is" without any warranty of any
* kind, whether express or implied.
*/
/* The CPM2 internal interrupt controller. It is usually
* the only interrupt controller.
* There are two 32-bit registers (high/low) for up to 64
* possible interrupts.
*
* Now, the fun starts.....Interrupt Numbers DO NOT MAP
* in a simple arithmetic fashion to mask or pending registers.
* That is, interrupt 4 does not map to bit position 4.
* We create two tables, indexed by vector number, to indicate
* which register to use and which bit in the register to use.
*/
#include <linux/stddef.h>
#include <linux/init.h>
#include <linux/sched.h>
#include <linux/signal.h>
#include <linux/irq.h>
#include <asm/immap_cpm2.h>
#include <asm/mpc8260.h>
#include <asm/io.h>
#include <asm/prom.h>
#include <asm/fs_pd.h>
#include "cpm2_pic.h"
/* External IRQS */
#define CPM2_IRQ_EXT1 19
#define CPM2_IRQ_EXT7 25
/* Port C IRQS */
#define CPM2_IRQ_PORTC15 48
#define CPM2_IRQ_PORTC0 63
static intctl_cpm2_t __iomem *cpm2_intctl;
static struct irq_domain *cpm2_pic_host;
static unsigned long ppc_cached_irq_mask[2]; /* 2 32-bit registers */
static const u_char irq_to_siureg[] = {
1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0
};
/* bit numbers do not match the docs, these are precomputed so the bit for
* a given irq is (1 << irq_to_siubit[irq]) */
static const u_char irq_to_siubit[] = {
0, 15, 14, 13, 12, 11, 10, 9,
8, 7, 6, 5, 4, 3, 2, 1,
2, 1, 0, 14, 13, 12, 11, 10,
9, 8, 7, 6, 5, 4, 3, 0,
31, 30, 29, 28, 27, 26, 25, 24,
23, 22, 21, 20, 19, 18, 17, 16,
16, 17, 18, 19, 20, 21, 22, 23,
24, 25, 26, 27, 28, 29, 30, 31,
};
static void cpm2_mask_irq(struct irq_data *d)
{
int bit, word;
unsigned int irq_nr = irqd_to_hwirq(d);
bit = irq_to_siubit[irq_nr];
word = irq_to_siureg[irq_nr];
ppc_cached_irq_mask[word] &= ~(1 << bit);
out_be32(&cpm2_intctl->ic_simrh + word, ppc_cached_irq_mask[word]);
}
static void cpm2_unmask_irq(struct irq_data *d)
{
int bit, word;
unsigned int irq_nr = irqd_to_hwirq(d);
bit = irq_to_siubit[irq_nr];
word = irq_to_siureg[irq_nr];
ppc_cached_irq_mask[word] |= 1 << bit;
out_be32(&cpm2_intctl->ic_simrh + word, ppc_cached_irq_mask[word]);
}
static void cpm2_ack(struct irq_data *d)
{
int bit, word;
unsigned int irq_nr = irqd_to_hwirq(d);
bit = irq_to_siubit[irq_nr];
word = irq_to_siureg[irq_nr];
out_be32(&cpm2_intctl->ic_sipnrh + word, 1 << bit);
}
static void cpm2_end_irq(struct irq_data *d)
{
int bit, word;
unsigned int irq_nr = irqd_to_hwirq(d);
bit = irq_to_siubit[irq_nr];
word = irq_to_siureg[irq_nr];
ppc_cached_irq_mask[word] |= 1 << bit;
out_be32(&cpm2_intctl->ic_simrh + word, ppc_cached_irq_mask[word]);
/*
* Work around large numbers of spurious IRQs on PowerPC 82xx
* systems.
*/
mb();
}
static int cpm2_set_irq_type(struct irq_data *d, unsigned int flow_type)
{
unsigned int src = irqd_to_hwirq(d);
unsigned int vold, vnew, edibit;
/* Port C interrupts are either IRQ_TYPE_EDGE_FALLING or
* IRQ_TYPE_EDGE_BOTH (default). All others are IRQ_TYPE_EDGE_FALLING
* or IRQ_TYPE_LEVEL_LOW (default)
*/
if (src >= CPM2_IRQ_PORTC15 && src <= CPM2_IRQ_PORTC0) {
if (flow_type == IRQ_TYPE_NONE)
flow_type = IRQ_TYPE_EDGE_BOTH;
if (flow_type != IRQ_TYPE_EDGE_BOTH &&
flow_type != IRQ_TYPE_EDGE_FALLING)
goto err_sense;
} else {
if (flow_type == IRQ_TYPE_NONE)
flow_type = IRQ_TYPE_LEVEL_LOW;
if (flow_type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_LEVEL_HIGH))
goto err_sense;
}
irqd_set_trigger_type(d, flow_type);
if (flow_type & IRQ_TYPE_LEVEL_LOW)
__irq_set_handler_locked(d->irq, handle_level_irq);
else
__irq_set_handler_locked(d->irq, handle_edge_irq);
/* internal IRQ senses are LEVEL_LOW
* EXT IRQ and Port C IRQ senses are programmable
*/
if (src >= CPM2_IRQ_EXT1 && src <= CPM2_IRQ_EXT7)
edibit = (14 - (src - CPM2_IRQ_EXT1));
else
if (src >= CPM2_IRQ_PORTC15 && src <= CPM2_IRQ_PORTC0)
edibit = (31 - (CPM2_IRQ_PORTC0 - src));
else
return (flow_type & IRQ_TYPE_LEVEL_LOW) ?
IRQ_SET_MASK_OK_NOCOPY : -EINVAL;
vold = in_be32(&cpm2_intctl->ic_siexr);
if ((flow_type & IRQ_TYPE_SENSE_MASK) == IRQ_TYPE_EDGE_FALLING)
vnew = vold | (1 << edibit);
else
vnew = vold & ~(1 << edibit);
if (vold != vnew)
out_be32(&cpm2_intctl->ic_siexr, vnew);
return IRQ_SET_MASK_OK_NOCOPY;
err_sense:
pr_err("CPM2 PIC: sense type 0x%x not supported\n", flow_type);
return -EINVAL;
}
static struct irq_chip cpm2_pic = {
.name = "CPM2 SIU",
.irq_mask = cpm2_mask_irq,
.irq_unmask = cpm2_unmask_irq,
.irq_ack = cpm2_ack,
.irq_eoi = cpm2_end_irq,
.irq_set_type = cpm2_set_irq_type,
.flags = IRQCHIP_EOI_IF_HANDLED,
};
unsigned int cpm2_get_irq(void)
{
int irq;
unsigned long bits;
/* For CPM2, read the SIVEC register and shift the bits down
* to get the irq number. */
bits = in_be32(&cpm2_intctl->ic_sivec);
irq = bits >> 26;
if (irq == 0)
return(-1);
return irq_linear_revmap(cpm2_pic_host, irq);
}
static int cpm2_pic_host_map(struct irq_domain *h, unsigned int virq,
irq_hw_number_t hw)
{
pr_debug("cpm2_pic_host_map(%d, 0x%lx)\n", virq, hw);
irq_set_status_flags(virq, IRQ_LEVEL);
irq_set_chip_and_handler(virq, &cpm2_pic, handle_level_irq);
return 0;
}
static const struct irq_domain_ops cpm2_pic_host_ops = {
.map = cpm2_pic_host_map,
.xlate = irq_domain_xlate_onetwocell,
};
void cpm2_pic_init(struct device_node *node)
{
int i;
cpm2_intctl = cpm2_map(im_intctl);
/* Clear the CPM IRQ controller, in case it has any bits set
* from the bootloader
*/
/* Mask out everything */
out_be32(&cpm2_intctl->ic_simrh, 0x00000000);
out_be32(&cpm2_intctl->ic_simrl, 0x00000000);
wmb();
/* Ack everything */
out_be32(&cpm2_intctl->ic_sipnrh, 0xffffffff);
out_be32(&cpm2_intctl->ic_sipnrl, 0xffffffff);
wmb();
/* Dummy read of the vector */
i = in_be32(&cpm2_intctl->ic_sivec);
rmb();
/* Initialize the default interrupt mapping priorities,
* in case the boot rom changed something on us.
*/
out_be16(&cpm2_intctl->ic_sicr, 0);
out_be32(&cpm2_intctl->ic_scprrh, 0x05309770);
out_be32(&cpm2_intctl->ic_scprrl, 0x05309770);
/* create a legacy host */
cpm2_pic_host = irq_domain_add_linear(node, 64, &cpm2_pic_host_ops, NULL);
if (cpm2_pic_host == NULL) {
printk(KERN_ERR "CPM2 PIC: failed to allocate irq host!\n");
return;
}
}
| gpl-2.0 |
shakalaca/ASUS_ZenFone_A500CG_A600CG | linux/kernel/arch/powerpc/sysdev/cpm2_pic.c | 6889 | 7017 | /*
* Platform information definitions.
*
* Copied from arch/ppc/syslib/cpm2_pic.c with minor subsequent updates
* to make in work in arch/powerpc/. Original (c) belongs to Dan Malek.
*
* Author: Vitaly Bordug <vbordug@ru.mvista.com>
*
* 1999-2001 (c) Dan Malek <dan@embeddedalley.com>
* 2006 (c) MontaVista Software, Inc.
*
* This file is licensed under the terms of the GNU General Public License
* version 2. This program is licensed "as is" without any warranty of any
* kind, whether express or implied.
*/
/* The CPM2 internal interrupt controller. It is usually
* the only interrupt controller.
* There are two 32-bit registers (high/low) for up to 64
* possible interrupts.
*
* Now, the fun starts.....Interrupt Numbers DO NOT MAP
* in a simple arithmetic fashion to mask or pending registers.
* That is, interrupt 4 does not map to bit position 4.
* We create two tables, indexed by vector number, to indicate
* which register to use and which bit in the register to use.
*/
#include <linux/stddef.h>
#include <linux/init.h>
#include <linux/sched.h>
#include <linux/signal.h>
#include <linux/irq.h>
#include <asm/immap_cpm2.h>
#include <asm/mpc8260.h>
#include <asm/io.h>
#include <asm/prom.h>
#include <asm/fs_pd.h>
#include "cpm2_pic.h"
/* External IRQS */
#define CPM2_IRQ_EXT1 19
#define CPM2_IRQ_EXT7 25
/* Port C IRQS */
#define CPM2_IRQ_PORTC15 48
#define CPM2_IRQ_PORTC0 63
static intctl_cpm2_t __iomem *cpm2_intctl;
static struct irq_domain *cpm2_pic_host;
static unsigned long ppc_cached_irq_mask[2]; /* 2 32-bit registers */
static const u_char irq_to_siureg[] = {
1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0
};
/* bit numbers do not match the docs, these are precomputed so the bit for
* a given irq is (1 << irq_to_siubit[irq]) */
static const u_char irq_to_siubit[] = {
0, 15, 14, 13, 12, 11, 10, 9,
8, 7, 6, 5, 4, 3, 2, 1,
2, 1, 0, 14, 13, 12, 11, 10,
9, 8, 7, 6, 5, 4, 3, 0,
31, 30, 29, 28, 27, 26, 25, 24,
23, 22, 21, 20, 19, 18, 17, 16,
16, 17, 18, 19, 20, 21, 22, 23,
24, 25, 26, 27, 28, 29, 30, 31,
};
static void cpm2_mask_irq(struct irq_data *d)
{
int bit, word;
unsigned int irq_nr = irqd_to_hwirq(d);
bit = irq_to_siubit[irq_nr];
word = irq_to_siureg[irq_nr];
ppc_cached_irq_mask[word] &= ~(1 << bit);
out_be32(&cpm2_intctl->ic_simrh + word, ppc_cached_irq_mask[word]);
}
static void cpm2_unmask_irq(struct irq_data *d)
{
int bit, word;
unsigned int irq_nr = irqd_to_hwirq(d);
bit = irq_to_siubit[irq_nr];
word = irq_to_siureg[irq_nr];
ppc_cached_irq_mask[word] |= 1 << bit;
out_be32(&cpm2_intctl->ic_simrh + word, ppc_cached_irq_mask[word]);
}
static void cpm2_ack(struct irq_data *d)
{
int bit, word;
unsigned int irq_nr = irqd_to_hwirq(d);
bit = irq_to_siubit[irq_nr];
word = irq_to_siureg[irq_nr];
out_be32(&cpm2_intctl->ic_sipnrh + word, 1 << bit);
}
static void cpm2_end_irq(struct irq_data *d)
{
int bit, word;
unsigned int irq_nr = irqd_to_hwirq(d);
bit = irq_to_siubit[irq_nr];
word = irq_to_siureg[irq_nr];
ppc_cached_irq_mask[word] |= 1 << bit;
out_be32(&cpm2_intctl->ic_simrh + word, ppc_cached_irq_mask[word]);
/*
* Work around large numbers of spurious IRQs on PowerPC 82xx
* systems.
*/
mb();
}
static int cpm2_set_irq_type(struct irq_data *d, unsigned int flow_type)
{
unsigned int src = irqd_to_hwirq(d);
unsigned int vold, vnew, edibit;
/* Port C interrupts are either IRQ_TYPE_EDGE_FALLING or
* IRQ_TYPE_EDGE_BOTH (default). All others are IRQ_TYPE_EDGE_FALLING
* or IRQ_TYPE_LEVEL_LOW (default)
*/
if (src >= CPM2_IRQ_PORTC15 && src <= CPM2_IRQ_PORTC0) {
if (flow_type == IRQ_TYPE_NONE)
flow_type = IRQ_TYPE_EDGE_BOTH;
if (flow_type != IRQ_TYPE_EDGE_BOTH &&
flow_type != IRQ_TYPE_EDGE_FALLING)
goto err_sense;
} else {
if (flow_type == IRQ_TYPE_NONE)
flow_type = IRQ_TYPE_LEVEL_LOW;
if (flow_type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_LEVEL_HIGH))
goto err_sense;
}
irqd_set_trigger_type(d, flow_type);
if (flow_type & IRQ_TYPE_LEVEL_LOW)
__irq_set_handler_locked(d->irq, handle_level_irq);
else
__irq_set_handler_locked(d->irq, handle_edge_irq);
/* internal IRQ senses are LEVEL_LOW
* EXT IRQ and Port C IRQ senses are programmable
*/
if (src >= CPM2_IRQ_EXT1 && src <= CPM2_IRQ_EXT7)
edibit = (14 - (src - CPM2_IRQ_EXT1));
else
if (src >= CPM2_IRQ_PORTC15 && src <= CPM2_IRQ_PORTC0)
edibit = (31 - (CPM2_IRQ_PORTC0 - src));
else
return (flow_type & IRQ_TYPE_LEVEL_LOW) ?
IRQ_SET_MASK_OK_NOCOPY : -EINVAL;
vold = in_be32(&cpm2_intctl->ic_siexr);
if ((flow_type & IRQ_TYPE_SENSE_MASK) == IRQ_TYPE_EDGE_FALLING)
vnew = vold | (1 << edibit);
else
vnew = vold & ~(1 << edibit);
if (vold != vnew)
out_be32(&cpm2_intctl->ic_siexr, vnew);
return IRQ_SET_MASK_OK_NOCOPY;
err_sense:
pr_err("CPM2 PIC: sense type 0x%x not supported\n", flow_type);
return -EINVAL;
}
static struct irq_chip cpm2_pic = {
.name = "CPM2 SIU",
.irq_mask = cpm2_mask_irq,
.irq_unmask = cpm2_unmask_irq,
.irq_ack = cpm2_ack,
.irq_eoi = cpm2_end_irq,
.irq_set_type = cpm2_set_irq_type,
.flags = IRQCHIP_EOI_IF_HANDLED,
};
unsigned int cpm2_get_irq(void)
{
int irq;
unsigned long bits;
/* For CPM2, read the SIVEC register and shift the bits down
* to get the irq number. */
bits = in_be32(&cpm2_intctl->ic_sivec);
irq = bits >> 26;
if (irq == 0)
return(-1);
return irq_linear_revmap(cpm2_pic_host, irq);
}
static int cpm2_pic_host_map(struct irq_domain *h, unsigned int virq,
irq_hw_number_t hw)
{
pr_debug("cpm2_pic_host_map(%d, 0x%lx)\n", virq, hw);
irq_set_status_flags(virq, IRQ_LEVEL);
irq_set_chip_and_handler(virq, &cpm2_pic, handle_level_irq);
return 0;
}
static const struct irq_domain_ops cpm2_pic_host_ops = {
.map = cpm2_pic_host_map,
.xlate = irq_domain_xlate_onetwocell,
};
void cpm2_pic_init(struct device_node *node)
{
int i;
cpm2_intctl = cpm2_map(im_intctl);
/* Clear the CPM IRQ controller, in case it has any bits set
* from the bootloader
*/
/* Mask out everything */
out_be32(&cpm2_intctl->ic_simrh, 0x00000000);
out_be32(&cpm2_intctl->ic_simrl, 0x00000000);
wmb();
/* Ack everything */
out_be32(&cpm2_intctl->ic_sipnrh, 0xffffffff);
out_be32(&cpm2_intctl->ic_sipnrl, 0xffffffff);
wmb();
/* Dummy read of the vector */
i = in_be32(&cpm2_intctl->ic_sivec);
rmb();
/* Initialize the default interrupt mapping priorities,
* in case the boot rom changed something on us.
*/
out_be16(&cpm2_intctl->ic_sicr, 0);
out_be32(&cpm2_intctl->ic_scprrh, 0x05309770);
out_be32(&cpm2_intctl->ic_scprrl, 0x05309770);
/* create a legacy host */
cpm2_pic_host = irq_domain_add_linear(node, 64, &cpm2_pic_host_ops, NULL);
if (cpm2_pic_host == NULL) {
printk(KERN_ERR "CPM2 PIC: failed to allocate irq host!\n");
return;
}
}
| gpl-2.0 |
whdgmawkd/furnace_kk_lge_msm8974 | arch/mips/jz4740/prom.c | 7913 | 1508 | /*
* Copyright (C) 2010, Lars-Peter Clausen <lars@metafoo.de>
* JZ4740 SoC prom code
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 675 Mass Ave, Cambridge, MA 02139, USA.
*
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/string.h>
#include <linux/serial_reg.h>
#include <asm/bootinfo.h>
#include <asm/mach-jz4740/base.h>
static __init void jz4740_init_cmdline(int argc, char *argv[])
{
unsigned int count = COMMAND_LINE_SIZE - 1;
int i;
char *dst = &(arcs_cmdline[0]);
char *src;
for (i = 1; i < argc && count; ++i) {
src = argv[i];
while (*src && count) {
*dst++ = *src++;
--count;
}
*dst++ = ' ';
}
if (i > 1)
--dst;
*dst = 0;
}
void __init prom_init(void)
{
jz4740_init_cmdline((int)fw_arg0, (char **)fw_arg1);
mips_machtype = MACH_INGENIC_JZ4740;
}
void __init prom_free_prom_memory(void)
{
}
#define UART_REG(_reg) ((void __iomem *)CKSEG1ADDR(JZ4740_UART0_BASE_ADDR + (_reg << 2)))
void prom_putchar(char c)
{
uint8_t lsr;
do {
lsr = readb(UART_REG(UART_LSR));
} while ((lsr & UART_LSR_TEMT) == 0);
writeb(c, UART_REG(UART_TX));
}
| gpl-2.0 |
mirror-androidarmv6/android_kernel_lge_msm7x27-3.0.x | drivers/parport/parport_cs.c | 8169 | 5882 | /*======================================================================
A driver for PCMCIA parallel port adapters
(specifically, for the Quatech SPP-100 EPP card: other cards will
probably require driver tweaks)
parport_cs.c 1.29 2002/10/11 06:57:41
The contents of this file are subject to the Mozilla Public
License Version 1.1 (the "License"); you may not use this file
except in compliance with the License. You may obtain a copy of
the License at http://www.mozilla.org/MPL/
Software distributed under the License is distributed on an "AS
IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
implied. See the License for the specific language governing
rights and limitations under the License.
The initial developer of the original code is David A. Hinds
<dahinds@users.sourceforge.net>. Portions created by David A. Hinds
are Copyright (C) 1999 David A. Hinds. All Rights Reserved.
Alternatively, the contents of this file may be used under the
terms of the GNU General Public License version 2 (the "GPL"), in
which case the provisions of the GPL are applicable instead of the
above. If you wish to allow the use of your version of this file
only under the terms of the GPL and not to allow others to use
your version of this file under the MPL, indicate your decision
by deleting the provisions above and replace them with the notice
and other provisions required by the GPL. If you do not delete
the provisions above, a recipient may use your version of this
file under either the MPL or the GPL.
======================================================================*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/ptrace.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/timer.h>
#include <linux/ioport.h>
#include <linux/major.h>
#include <linux/interrupt.h>
#include <linux/parport.h>
#include <linux/parport_pc.h>
#include <pcmcia/cistpl.h>
#include <pcmcia/ds.h>
#include <pcmcia/cisreg.h>
#include <pcmcia/ciscode.h>
/*====================================================================*/
/* Module parameters */
MODULE_AUTHOR("David Hinds <dahinds@users.sourceforge.net>");
MODULE_DESCRIPTION("PCMCIA parallel port card driver");
MODULE_LICENSE("Dual MPL/GPL");
#define INT_MODULE_PARM(n, v) static int n = v; module_param(n, int, 0)
INT_MODULE_PARM(epp_mode, 1);
/*====================================================================*/
#define FORCE_EPP_MODE 0x08
typedef struct parport_info_t {
struct pcmcia_device *p_dev;
int ndev;
struct parport *port;
} parport_info_t;
static void parport_detach(struct pcmcia_device *p_dev);
static int parport_config(struct pcmcia_device *link);
static void parport_cs_release(struct pcmcia_device *);
static int parport_probe(struct pcmcia_device *link)
{
parport_info_t *info;
dev_dbg(&link->dev, "parport_attach()\n");
/* Create new parport device */
info = kzalloc(sizeof(*info), GFP_KERNEL);
if (!info) return -ENOMEM;
link->priv = info;
info->p_dev = link;
link->config_flags |= CONF_ENABLE_IRQ | CONF_AUTO_SET_IO;
return parport_config(link);
} /* parport_attach */
static void parport_detach(struct pcmcia_device *link)
{
dev_dbg(&link->dev, "parport_detach\n");
parport_cs_release(link);
kfree(link->priv);
} /* parport_detach */
static int parport_config_check(struct pcmcia_device *p_dev, void *priv_data)
{
p_dev->resource[0]->flags &= ~IO_DATA_PATH_WIDTH;
p_dev->resource[0]->flags |= IO_DATA_PATH_WIDTH_8;
p_dev->resource[1]->flags &= ~IO_DATA_PATH_WIDTH;
p_dev->resource[1]->flags |= IO_DATA_PATH_WIDTH_8;
return pcmcia_request_io(p_dev);
}
static int parport_config(struct pcmcia_device *link)
{
parport_info_t *info = link->priv;
struct parport *p;
int ret;
dev_dbg(&link->dev, "parport_config\n");
if (epp_mode)
link->config_index |= FORCE_EPP_MODE;
ret = pcmcia_loop_config(link, parport_config_check, NULL);
if (ret)
goto failed;
if (!link->irq)
goto failed;
ret = pcmcia_enable_device(link);
if (ret)
goto failed;
p = parport_pc_probe_port(link->resource[0]->start,
link->resource[1]->start,
link->irq, PARPORT_DMA_NONE,
&link->dev, IRQF_SHARED);
if (p == NULL) {
printk(KERN_NOTICE "parport_cs: parport_pc_probe_port() at "
"0x%3x, irq %u failed\n",
(unsigned int) link->resource[0]->start,
link->irq);
goto failed;
}
p->modes |= PARPORT_MODE_PCSPP;
if (epp_mode)
p->modes |= PARPORT_MODE_TRISTATE | PARPORT_MODE_EPP;
info->ndev = 1;
info->port = p;
return 0;
failed:
parport_cs_release(link);
return -ENODEV;
} /* parport_config */
static void parport_cs_release(struct pcmcia_device *link)
{
parport_info_t *info = link->priv;
dev_dbg(&link->dev, "parport_release\n");
if (info->ndev) {
struct parport *p = info->port;
parport_pc_unregister_port(p);
}
info->ndev = 0;
pcmcia_disable_device(link);
} /* parport_cs_release */
static const struct pcmcia_device_id parport_ids[] = {
PCMCIA_DEVICE_FUNC_ID(3),
PCMCIA_MFC_DEVICE_PROD_ID12(1,"Elan","Serial+Parallel Port: SP230",0x3beb8cf2,0xdb9e58bc),
PCMCIA_DEVICE_MANF_CARD(0x0137, 0x0003),
PCMCIA_DEVICE_NULL
};
MODULE_DEVICE_TABLE(pcmcia, parport_ids);
static struct pcmcia_driver parport_cs_driver = {
.owner = THIS_MODULE,
.name = "parport_cs",
.probe = parport_probe,
.remove = parport_detach,
.id_table = parport_ids,
};
static int __init init_parport_cs(void)
{
return pcmcia_register_driver(&parport_cs_driver);
}
static void __exit exit_parport_cs(void)
{
pcmcia_unregister_driver(&parport_cs_driver);
}
module_init(init_parport_cs);
module_exit(exit_parport_cs);
| gpl-2.0 |
binkybear/flo | crypto/cast6.c | 9193 | 21939 | /* Kernel cryptographic api.
* cast6.c - Cast6 cipher algorithm [rfc2612].
*
* CAST-256 (*cast6*) is a DES like Substitution-Permutation Network (SPN)
* cryptosystem built upon the CAST-128 (*cast5*) [rfc2144] encryption
* algorithm.
*
* Copyright (C) 2003 Kartikey Mahendra Bhatt <kartik_me@hotmail.com>.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
*/
#include <asm/byteorder.h>
#include <linux/init.h>
#include <linux/crypto.h>
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/types.h>
#define CAST6_BLOCK_SIZE 16
#define CAST6_MIN_KEY_SIZE 16
#define CAST6_MAX_KEY_SIZE 32
struct cast6_ctx {
u32 Km[12][4];
u8 Kr[12][4];
};
#define F1(D, r, m) ((I = ((m) + (D))), (I = rol32(I, (r))), \
(((s1[I >> 24] ^ s2[(I>>16)&0xff]) - s3[(I>>8)&0xff]) + s4[I&0xff]))
#define F2(D, r, m) ((I = ((m) ^ (D))), (I = rol32(I, (r))), \
(((s1[I >> 24] - s2[(I>>16)&0xff]) + s3[(I>>8)&0xff]) ^ s4[I&0xff]))
#define F3(D, r, m) ((I = ((m) - (D))), (I = rol32(I, (r))), \
(((s1[I >> 24] + s2[(I>>16)&0xff]) ^ s3[(I>>8)&0xff]) - s4[I&0xff]))
static const u32 s1[256] = {
0x30fb40d4, 0x9fa0ff0b, 0x6beccd2f, 0x3f258c7a, 0x1e213f2f,
0x9c004dd3, 0x6003e540, 0xcf9fc949,
0xbfd4af27, 0x88bbbdb5, 0xe2034090, 0x98d09675, 0x6e63a0e0,
0x15c361d2, 0xc2e7661d, 0x22d4ff8e,
0x28683b6f, 0xc07fd059, 0xff2379c8, 0x775f50e2, 0x43c340d3,
0xdf2f8656, 0x887ca41a, 0xa2d2bd2d,
0xa1c9e0d6, 0x346c4819, 0x61b76d87, 0x22540f2f, 0x2abe32e1,
0xaa54166b, 0x22568e3a, 0xa2d341d0,
0x66db40c8, 0xa784392f, 0x004dff2f, 0x2db9d2de, 0x97943fac,
0x4a97c1d8, 0x527644b7, 0xb5f437a7,
0xb82cbaef, 0xd751d159, 0x6ff7f0ed, 0x5a097a1f, 0x827b68d0,
0x90ecf52e, 0x22b0c054, 0xbc8e5935,
0x4b6d2f7f, 0x50bb64a2, 0xd2664910, 0xbee5812d, 0xb7332290,
0xe93b159f, 0xb48ee411, 0x4bff345d,
0xfd45c240, 0xad31973f, 0xc4f6d02e, 0x55fc8165, 0xd5b1caad,
0xa1ac2dae, 0xa2d4b76d, 0xc19b0c50,
0x882240f2, 0x0c6e4f38, 0xa4e4bfd7, 0x4f5ba272, 0x564c1d2f,
0xc59c5319, 0xb949e354, 0xb04669fe,
0xb1b6ab8a, 0xc71358dd, 0x6385c545, 0x110f935d, 0x57538ad5,
0x6a390493, 0xe63d37e0, 0x2a54f6b3,
0x3a787d5f, 0x6276a0b5, 0x19a6fcdf, 0x7a42206a, 0x29f9d4d5,
0xf61b1891, 0xbb72275e, 0xaa508167,
0x38901091, 0xc6b505eb, 0x84c7cb8c, 0x2ad75a0f, 0x874a1427,
0xa2d1936b, 0x2ad286af, 0xaa56d291,
0xd7894360, 0x425c750d, 0x93b39e26, 0x187184c9, 0x6c00b32d,
0x73e2bb14, 0xa0bebc3c, 0x54623779,
0x64459eab, 0x3f328b82, 0x7718cf82, 0x59a2cea6, 0x04ee002e,
0x89fe78e6, 0x3fab0950, 0x325ff6c2,
0x81383f05, 0x6963c5c8, 0x76cb5ad6, 0xd49974c9, 0xca180dcf,
0x380782d5, 0xc7fa5cf6, 0x8ac31511,
0x35e79e13, 0x47da91d0, 0xf40f9086, 0xa7e2419e, 0x31366241,
0x051ef495, 0xaa573b04, 0x4a805d8d,
0x548300d0, 0x00322a3c, 0xbf64cddf, 0xba57a68e, 0x75c6372b,
0x50afd341, 0xa7c13275, 0x915a0bf5,
0x6b54bfab, 0x2b0b1426, 0xab4cc9d7, 0x449ccd82, 0xf7fbf265,
0xab85c5f3, 0x1b55db94, 0xaad4e324,
0xcfa4bd3f, 0x2deaa3e2, 0x9e204d02, 0xc8bd25ac, 0xeadf55b3,
0xd5bd9e98, 0xe31231b2, 0x2ad5ad6c,
0x954329de, 0xadbe4528, 0xd8710f69, 0xaa51c90f, 0xaa786bf6,
0x22513f1e, 0xaa51a79b, 0x2ad344cc,
0x7b5a41f0, 0xd37cfbad, 0x1b069505, 0x41ece491, 0xb4c332e6,
0x032268d4, 0xc9600acc, 0xce387e6d,
0xbf6bb16c, 0x6a70fb78, 0x0d03d9c9, 0xd4df39de, 0xe01063da,
0x4736f464, 0x5ad328d8, 0xb347cc96,
0x75bb0fc3, 0x98511bfb, 0x4ffbcc35, 0xb58bcf6a, 0xe11f0abc,
0xbfc5fe4a, 0xa70aec10, 0xac39570a,
0x3f04442f, 0x6188b153, 0xe0397a2e, 0x5727cb79, 0x9ceb418f,
0x1cacd68d, 0x2ad37c96, 0x0175cb9d,
0xc69dff09, 0xc75b65f0, 0xd9db40d8, 0xec0e7779, 0x4744ead4,
0xb11c3274, 0xdd24cb9e, 0x7e1c54bd,
0xf01144f9, 0xd2240eb1, 0x9675b3fd, 0xa3ac3755, 0xd47c27af,
0x51c85f4d, 0x56907596, 0xa5bb15e6,
0x580304f0, 0xca042cf1, 0x011a37ea, 0x8dbfaadb, 0x35ba3e4a,
0x3526ffa0, 0xc37b4d09, 0xbc306ed9,
0x98a52666, 0x5648f725, 0xff5e569d, 0x0ced63d0, 0x7c63b2cf,
0x700b45e1, 0xd5ea50f1, 0x85a92872,
0xaf1fbda7, 0xd4234870, 0xa7870bf3, 0x2d3b4d79, 0x42e04198,
0x0cd0ede7, 0x26470db8, 0xf881814c,
0x474d6ad7, 0x7c0c5e5c, 0xd1231959, 0x381b7298, 0xf5d2f4db,
0xab838653, 0x6e2f1e23, 0x83719c9e,
0xbd91e046, 0x9a56456e, 0xdc39200c, 0x20c8c571, 0x962bda1c,
0xe1e696ff, 0xb141ab08, 0x7cca89b9,
0x1a69e783, 0x02cc4843, 0xa2f7c579, 0x429ef47d, 0x427b169c,
0x5ac9f049, 0xdd8f0f00, 0x5c8165bf
};
static const u32 s2[256] = {
0x1f201094, 0xef0ba75b, 0x69e3cf7e, 0x393f4380, 0xfe61cf7a,
0xeec5207a, 0x55889c94, 0x72fc0651,
0xada7ef79, 0x4e1d7235, 0xd55a63ce, 0xde0436ba, 0x99c430ef,
0x5f0c0794, 0x18dcdb7d, 0xa1d6eff3,
0xa0b52f7b, 0x59e83605, 0xee15b094, 0xe9ffd909, 0xdc440086,
0xef944459, 0xba83ccb3, 0xe0c3cdfb,
0xd1da4181, 0x3b092ab1, 0xf997f1c1, 0xa5e6cf7b, 0x01420ddb,
0xe4e7ef5b, 0x25a1ff41, 0xe180f806,
0x1fc41080, 0x179bee7a, 0xd37ac6a9, 0xfe5830a4, 0x98de8b7f,
0x77e83f4e, 0x79929269, 0x24fa9f7b,
0xe113c85b, 0xacc40083, 0xd7503525, 0xf7ea615f, 0x62143154,
0x0d554b63, 0x5d681121, 0xc866c359,
0x3d63cf73, 0xcee234c0, 0xd4d87e87, 0x5c672b21, 0x071f6181,
0x39f7627f, 0x361e3084, 0xe4eb573b,
0x602f64a4, 0xd63acd9c, 0x1bbc4635, 0x9e81032d, 0x2701f50c,
0x99847ab4, 0xa0e3df79, 0xba6cf38c,
0x10843094, 0x2537a95e, 0xf46f6ffe, 0xa1ff3b1f, 0x208cfb6a,
0x8f458c74, 0xd9e0a227, 0x4ec73a34,
0xfc884f69, 0x3e4de8df, 0xef0e0088, 0x3559648d, 0x8a45388c,
0x1d804366, 0x721d9bfd, 0xa58684bb,
0xe8256333, 0x844e8212, 0x128d8098, 0xfed33fb4, 0xce280ae1,
0x27e19ba5, 0xd5a6c252, 0xe49754bd,
0xc5d655dd, 0xeb667064, 0x77840b4d, 0xa1b6a801, 0x84db26a9,
0xe0b56714, 0x21f043b7, 0xe5d05860,
0x54f03084, 0x066ff472, 0xa31aa153, 0xdadc4755, 0xb5625dbf,
0x68561be6, 0x83ca6b94, 0x2d6ed23b,
0xeccf01db, 0xa6d3d0ba, 0xb6803d5c, 0xaf77a709, 0x33b4a34c,
0x397bc8d6, 0x5ee22b95, 0x5f0e5304,
0x81ed6f61, 0x20e74364, 0xb45e1378, 0xde18639b, 0x881ca122,
0xb96726d1, 0x8049a7e8, 0x22b7da7b,
0x5e552d25, 0x5272d237, 0x79d2951c, 0xc60d894c, 0x488cb402,
0x1ba4fe5b, 0xa4b09f6b, 0x1ca815cf,
0xa20c3005, 0x8871df63, 0xb9de2fcb, 0x0cc6c9e9, 0x0beeff53,
0xe3214517, 0xb4542835, 0x9f63293c,
0xee41e729, 0x6e1d2d7c, 0x50045286, 0x1e6685f3, 0xf33401c6,
0x30a22c95, 0x31a70850, 0x60930f13,
0x73f98417, 0xa1269859, 0xec645c44, 0x52c877a9, 0xcdff33a6,
0xa02b1741, 0x7cbad9a2, 0x2180036f,
0x50d99c08, 0xcb3f4861, 0xc26bd765, 0x64a3f6ab, 0x80342676,
0x25a75e7b, 0xe4e6d1fc, 0x20c710e6,
0xcdf0b680, 0x17844d3b, 0x31eef84d, 0x7e0824e4, 0x2ccb49eb,
0x846a3bae, 0x8ff77888, 0xee5d60f6,
0x7af75673, 0x2fdd5cdb, 0xa11631c1, 0x30f66f43, 0xb3faec54,
0x157fd7fa, 0xef8579cc, 0xd152de58,
0xdb2ffd5e, 0x8f32ce19, 0x306af97a, 0x02f03ef8, 0x99319ad5,
0xc242fa0f, 0xa7e3ebb0, 0xc68e4906,
0xb8da230c, 0x80823028, 0xdcdef3c8, 0xd35fb171, 0x088a1bc8,
0xbec0c560, 0x61a3c9e8, 0xbca8f54d,
0xc72feffa, 0x22822e99, 0x82c570b4, 0xd8d94e89, 0x8b1c34bc,
0x301e16e6, 0x273be979, 0xb0ffeaa6,
0x61d9b8c6, 0x00b24869, 0xb7ffce3f, 0x08dc283b, 0x43daf65a,
0xf7e19798, 0x7619b72f, 0x8f1c9ba4,
0xdc8637a0, 0x16a7d3b1, 0x9fc393b7, 0xa7136eeb, 0xc6bcc63e,
0x1a513742, 0xef6828bc, 0x520365d6,
0x2d6a77ab, 0x3527ed4b, 0x821fd216, 0x095c6e2e, 0xdb92f2fb,
0x5eea29cb, 0x145892f5, 0x91584f7f,
0x5483697b, 0x2667a8cc, 0x85196048, 0x8c4bacea, 0x833860d4,
0x0d23e0f9, 0x6c387e8a, 0x0ae6d249,
0xb284600c, 0xd835731d, 0xdcb1c647, 0xac4c56ea, 0x3ebd81b3,
0x230eabb0, 0x6438bc87, 0xf0b5b1fa,
0x8f5ea2b3, 0xfc184642, 0x0a036b7a, 0x4fb089bd, 0x649da589,
0xa345415e, 0x5c038323, 0x3e5d3bb9,
0x43d79572, 0x7e6dd07c, 0x06dfdf1e, 0x6c6cc4ef, 0x7160a539,
0x73bfbe70, 0x83877605, 0x4523ecf1
};
static const u32 s3[256] = {
0x8defc240, 0x25fa5d9f, 0xeb903dbf, 0xe810c907, 0x47607fff,
0x369fe44b, 0x8c1fc644, 0xaececa90,
0xbeb1f9bf, 0xeefbcaea, 0xe8cf1950, 0x51df07ae, 0x920e8806,
0xf0ad0548, 0xe13c8d83, 0x927010d5,
0x11107d9f, 0x07647db9, 0xb2e3e4d4, 0x3d4f285e, 0xb9afa820,
0xfade82e0, 0xa067268b, 0x8272792e,
0x553fb2c0, 0x489ae22b, 0xd4ef9794, 0x125e3fbc, 0x21fffcee,
0x825b1bfd, 0x9255c5ed, 0x1257a240,
0x4e1a8302, 0xbae07fff, 0x528246e7, 0x8e57140e, 0x3373f7bf,
0x8c9f8188, 0xa6fc4ee8, 0xc982b5a5,
0xa8c01db7, 0x579fc264, 0x67094f31, 0xf2bd3f5f, 0x40fff7c1,
0x1fb78dfc, 0x8e6bd2c1, 0x437be59b,
0x99b03dbf, 0xb5dbc64b, 0x638dc0e6, 0x55819d99, 0xa197c81c,
0x4a012d6e, 0xc5884a28, 0xccc36f71,
0xb843c213, 0x6c0743f1, 0x8309893c, 0x0feddd5f, 0x2f7fe850,
0xd7c07f7e, 0x02507fbf, 0x5afb9a04,
0xa747d2d0, 0x1651192e, 0xaf70bf3e, 0x58c31380, 0x5f98302e,
0x727cc3c4, 0x0a0fb402, 0x0f7fef82,
0x8c96fdad, 0x5d2c2aae, 0x8ee99a49, 0x50da88b8, 0x8427f4a0,
0x1eac5790, 0x796fb449, 0x8252dc15,
0xefbd7d9b, 0xa672597d, 0xada840d8, 0x45f54504, 0xfa5d7403,
0xe83ec305, 0x4f91751a, 0x925669c2,
0x23efe941, 0xa903f12e, 0x60270df2, 0x0276e4b6, 0x94fd6574,
0x927985b2, 0x8276dbcb, 0x02778176,
0xf8af918d, 0x4e48f79e, 0x8f616ddf, 0xe29d840e, 0x842f7d83,
0x340ce5c8, 0x96bbb682, 0x93b4b148,
0xef303cab, 0x984faf28, 0x779faf9b, 0x92dc560d, 0x224d1e20,
0x8437aa88, 0x7d29dc96, 0x2756d3dc,
0x8b907cee, 0xb51fd240, 0xe7c07ce3, 0xe566b4a1, 0xc3e9615e,
0x3cf8209d, 0x6094d1e3, 0xcd9ca341,
0x5c76460e, 0x00ea983b, 0xd4d67881, 0xfd47572c, 0xf76cedd9,
0xbda8229c, 0x127dadaa, 0x438a074e,
0x1f97c090, 0x081bdb8a, 0x93a07ebe, 0xb938ca15, 0x97b03cff,
0x3dc2c0f8, 0x8d1ab2ec, 0x64380e51,
0x68cc7bfb, 0xd90f2788, 0x12490181, 0x5de5ffd4, 0xdd7ef86a,
0x76a2e214, 0xb9a40368, 0x925d958f,
0x4b39fffa, 0xba39aee9, 0xa4ffd30b, 0xfaf7933b, 0x6d498623,
0x193cbcfa, 0x27627545, 0x825cf47a,
0x61bd8ba0, 0xd11e42d1, 0xcead04f4, 0x127ea392, 0x10428db7,
0x8272a972, 0x9270c4a8, 0x127de50b,
0x285ba1c8, 0x3c62f44f, 0x35c0eaa5, 0xe805d231, 0x428929fb,
0xb4fcdf82, 0x4fb66a53, 0x0e7dc15b,
0x1f081fab, 0x108618ae, 0xfcfd086d, 0xf9ff2889, 0x694bcc11,
0x236a5cae, 0x12deca4d, 0x2c3f8cc5,
0xd2d02dfe, 0xf8ef5896, 0xe4cf52da, 0x95155b67, 0x494a488c,
0xb9b6a80c, 0x5c8f82bc, 0x89d36b45,
0x3a609437, 0xec00c9a9, 0x44715253, 0x0a874b49, 0xd773bc40,
0x7c34671c, 0x02717ef6, 0x4feb5536,
0xa2d02fff, 0xd2bf60c4, 0xd43f03c0, 0x50b4ef6d, 0x07478cd1,
0x006e1888, 0xa2e53f55, 0xb9e6d4bc,
0xa2048016, 0x97573833, 0xd7207d67, 0xde0f8f3d, 0x72f87b33,
0xabcc4f33, 0x7688c55d, 0x7b00a6b0,
0x947b0001, 0x570075d2, 0xf9bb88f8, 0x8942019e, 0x4264a5ff,
0x856302e0, 0x72dbd92b, 0xee971b69,
0x6ea22fde, 0x5f08ae2b, 0xaf7a616d, 0xe5c98767, 0xcf1febd2,
0x61efc8c2, 0xf1ac2571, 0xcc8239c2,
0x67214cb8, 0xb1e583d1, 0xb7dc3e62, 0x7f10bdce, 0xf90a5c38,
0x0ff0443d, 0x606e6dc6, 0x60543a49,
0x5727c148, 0x2be98a1d, 0x8ab41738, 0x20e1be24, 0xaf96da0f,
0x68458425, 0x99833be5, 0x600d457d,
0x282f9350, 0x8334b362, 0xd91d1120, 0x2b6d8da0, 0x642b1e31,
0x9c305a00, 0x52bce688, 0x1b03588a,
0xf7baefd5, 0x4142ed9c, 0xa4315c11, 0x83323ec5, 0xdfef4636,
0xa133c501, 0xe9d3531c, 0xee353783
};
static const u32 s4[256] = {
0x9db30420, 0x1fb6e9de, 0xa7be7bef, 0xd273a298, 0x4a4f7bdb,
0x64ad8c57, 0x85510443, 0xfa020ed1,
0x7e287aff, 0xe60fb663, 0x095f35a1, 0x79ebf120, 0xfd059d43,
0x6497b7b1, 0xf3641f63, 0x241e4adf,
0x28147f5f, 0x4fa2b8cd, 0xc9430040, 0x0cc32220, 0xfdd30b30,
0xc0a5374f, 0x1d2d00d9, 0x24147b15,
0xee4d111a, 0x0fca5167, 0x71ff904c, 0x2d195ffe, 0x1a05645f,
0x0c13fefe, 0x081b08ca, 0x05170121,
0x80530100, 0xe83e5efe, 0xac9af4f8, 0x7fe72701, 0xd2b8ee5f,
0x06df4261, 0xbb9e9b8a, 0x7293ea25,
0xce84ffdf, 0xf5718801, 0x3dd64b04, 0xa26f263b, 0x7ed48400,
0x547eebe6, 0x446d4ca0, 0x6cf3d6f5,
0x2649abdf, 0xaea0c7f5, 0x36338cc1, 0x503f7e93, 0xd3772061,
0x11b638e1, 0x72500e03, 0xf80eb2bb,
0xabe0502e, 0xec8d77de, 0x57971e81, 0xe14f6746, 0xc9335400,
0x6920318f, 0x081dbb99, 0xffc304a5,
0x4d351805, 0x7f3d5ce3, 0xa6c866c6, 0x5d5bcca9, 0xdaec6fea,
0x9f926f91, 0x9f46222f, 0x3991467d,
0xa5bf6d8e, 0x1143c44f, 0x43958302, 0xd0214eeb, 0x022083b8,
0x3fb6180c, 0x18f8931e, 0x281658e6,
0x26486e3e, 0x8bd78a70, 0x7477e4c1, 0xb506e07c, 0xf32d0a25,
0x79098b02, 0xe4eabb81, 0x28123b23,
0x69dead38, 0x1574ca16, 0xdf871b62, 0x211c40b7, 0xa51a9ef9,
0x0014377b, 0x041e8ac8, 0x09114003,
0xbd59e4d2, 0xe3d156d5, 0x4fe876d5, 0x2f91a340, 0x557be8de,
0x00eae4a7, 0x0ce5c2ec, 0x4db4bba6,
0xe756bdff, 0xdd3369ac, 0xec17b035, 0x06572327, 0x99afc8b0,
0x56c8c391, 0x6b65811c, 0x5e146119,
0x6e85cb75, 0xbe07c002, 0xc2325577, 0x893ff4ec, 0x5bbfc92d,
0xd0ec3b25, 0xb7801ab7, 0x8d6d3b24,
0x20c763ef, 0xc366a5fc, 0x9c382880, 0x0ace3205, 0xaac9548a,
0xeca1d7c7, 0x041afa32, 0x1d16625a,
0x6701902c, 0x9b757a54, 0x31d477f7, 0x9126b031, 0x36cc6fdb,
0xc70b8b46, 0xd9e66a48, 0x56e55a79,
0x026a4ceb, 0x52437eff, 0x2f8f76b4, 0x0df980a5, 0x8674cde3,
0xedda04eb, 0x17a9be04, 0x2c18f4df,
0xb7747f9d, 0xab2af7b4, 0xefc34d20, 0x2e096b7c, 0x1741a254,
0xe5b6a035, 0x213d42f6, 0x2c1c7c26,
0x61c2f50f, 0x6552daf9, 0xd2c231f8, 0x25130f69, 0xd8167fa2,
0x0418f2c8, 0x001a96a6, 0x0d1526ab,
0x63315c21, 0x5e0a72ec, 0x49bafefd, 0x187908d9, 0x8d0dbd86,
0x311170a7, 0x3e9b640c, 0xcc3e10d7,
0xd5cad3b6, 0x0caec388, 0xf73001e1, 0x6c728aff, 0x71eae2a1,
0x1f9af36e, 0xcfcbd12f, 0xc1de8417,
0xac07be6b, 0xcb44a1d8, 0x8b9b0f56, 0x013988c3, 0xb1c52fca,
0xb4be31cd, 0xd8782806, 0x12a3a4e2,
0x6f7de532, 0x58fd7eb6, 0xd01ee900, 0x24adffc2, 0xf4990fc5,
0x9711aac5, 0x001d7b95, 0x82e5e7d2,
0x109873f6, 0x00613096, 0xc32d9521, 0xada121ff, 0x29908415,
0x7fbb977f, 0xaf9eb3db, 0x29c9ed2a,
0x5ce2a465, 0xa730f32c, 0xd0aa3fe8, 0x8a5cc091, 0xd49e2ce7,
0x0ce454a9, 0xd60acd86, 0x015f1919,
0x77079103, 0xdea03af6, 0x78a8565e, 0xdee356df, 0x21f05cbe,
0x8b75e387, 0xb3c50651, 0xb8a5c3ef,
0xd8eeb6d2, 0xe523be77, 0xc2154529, 0x2f69efdf, 0xafe67afb,
0xf470c4b2, 0xf3e0eb5b, 0xd6cc9876,
0x39e4460c, 0x1fda8538, 0x1987832f, 0xca007367, 0xa99144f8,
0x296b299e, 0x492fc295, 0x9266beab,
0xb5676e69, 0x9bd3ddda, 0xdf7e052f, 0xdb25701c, 0x1b5e51ee,
0xf65324e6, 0x6afce36c, 0x0316cc04,
0x8644213e, 0xb7dc59d0, 0x7965291f, 0xccd6fd43, 0x41823979,
0x932bcdf6, 0xb657c34d, 0x4edfd282,
0x7ae5290c, 0x3cb9536b, 0x851e20fe, 0x9833557e, 0x13ecf0b0,
0xd3ffb372, 0x3f85c5c1, 0x0aef7ed2
};
static const u32 Tm[24][8] = {
{ 0x5a827999, 0xc95c653a, 0x383650db, 0xa7103c7c, 0x15ea281d,
0x84c413be, 0xf39dff5f, 0x6277eb00 } ,
{ 0xd151d6a1, 0x402bc242, 0xaf05ade3, 0x1ddf9984, 0x8cb98525,
0xfb9370c6, 0x6a6d5c67, 0xd9474808 } ,
{ 0x482133a9, 0xb6fb1f4a, 0x25d50aeb, 0x94aef68c, 0x0388e22d,
0x7262cdce, 0xe13cb96f, 0x5016a510 } ,
{ 0xbef090b1, 0x2dca7c52, 0x9ca467f3, 0x0b7e5394, 0x7a583f35,
0xe9322ad6, 0x580c1677, 0xc6e60218 } ,
{ 0x35bfedb9, 0xa499d95a, 0x1373c4fb, 0x824db09c, 0xf1279c3d,
0x600187de, 0xcedb737f, 0x3db55f20 } ,
{ 0xac8f4ac1, 0x1b693662, 0x8a432203, 0xf91d0da4, 0x67f6f945,
0xd6d0e4e6, 0x45aad087, 0xb484bc28 } ,
{ 0x235ea7c9, 0x9238936a, 0x01127f0b, 0x6fec6aac, 0xdec6564d,
0x4da041ee, 0xbc7a2d8f, 0x2b541930 } ,
{ 0x9a2e04d1, 0x0907f072, 0x77e1dc13, 0xe6bbc7b4, 0x5595b355,
0xc46f9ef6, 0x33498a97, 0xa2237638 } ,
{ 0x10fd61d9, 0x7fd74d7a, 0xeeb1391b, 0x5d8b24bc, 0xcc65105d,
0x3b3efbfe, 0xaa18e79f, 0x18f2d340 } ,
{ 0x87ccbee1, 0xf6a6aa82, 0x65809623, 0xd45a81c4, 0x43346d65,
0xb20e5906, 0x20e844a7, 0x8fc23048 } ,
{ 0xfe9c1be9, 0x6d76078a, 0xdc4ff32b, 0x4b29decc, 0xba03ca6d,
0x28ddb60e, 0x97b7a1af, 0x06918d50 } ,
{ 0x756b78f1, 0xe4456492, 0x531f5033, 0xc1f93bd4, 0x30d32775,
0x9fad1316, 0x0e86feb7, 0x7d60ea58 } ,
{ 0xec3ad5f9, 0x5b14c19a, 0xc9eead3b, 0x38c898dc, 0xa7a2847d,
0x167c701e, 0x85565bbf, 0xf4304760 } ,
{ 0x630a3301, 0xd1e41ea2, 0x40be0a43, 0xaf97f5e4, 0x1e71e185,
0x8d4bcd26, 0xfc25b8c7, 0x6affa468 } ,
{ 0xd9d99009, 0x48b37baa, 0xb78d674b, 0x266752ec, 0x95413e8d,
0x041b2a2e, 0x72f515cf, 0xe1cf0170 } ,
{ 0x50a8ed11, 0xbf82d8b2, 0x2e5cc453, 0x9d36aff4, 0x0c109b95,
0x7aea8736, 0xe9c472d7, 0x589e5e78 } ,
{ 0xc7784a19, 0x365235ba, 0xa52c215b, 0x14060cfc, 0x82dff89d,
0xf1b9e43e, 0x6093cfdf, 0xcf6dbb80 } ,
{ 0x3e47a721, 0xad2192c2, 0x1bfb7e63, 0x8ad56a04, 0xf9af55a5,
0x68894146, 0xd7632ce7, 0x463d1888 } ,
{ 0xb5170429, 0x23f0efca, 0x92cadb6b, 0x01a4c70c, 0x707eb2ad,
0xdf589e4e, 0x4e3289ef, 0xbd0c7590 } ,
{ 0x2be66131, 0x9ac04cd2, 0x099a3873, 0x78742414, 0xe74e0fb5,
0x5627fb56, 0xc501e6f7, 0x33dbd298 } ,
{ 0xa2b5be39, 0x118fa9da, 0x8069957b, 0xef43811c, 0x5e1d6cbd,
0xccf7585e, 0x3bd143ff, 0xaaab2fa0 } ,
{ 0x19851b41, 0x885f06e2, 0xf738f283, 0x6612de24, 0xd4ecc9c5,
0x43c6b566, 0xb2a0a107, 0x217a8ca8 } ,
{ 0x90547849, 0xff2e63ea, 0x6e084f8b, 0xdce23b2c, 0x4bbc26cd,
0xba96126e, 0x296ffe0f, 0x9849e9b0 } ,
{ 0x0723d551, 0x75fdc0f2, 0xe4d7ac93, 0x53b19834, 0xc28b83d5,
0x31656f76, 0xa03f5b17, 0x0f1946b8 }
};
static const u8 Tr[4][8] = {
{ 0x13, 0x04, 0x15, 0x06, 0x17, 0x08, 0x19, 0x0a } ,
{ 0x1b, 0x0c, 0x1d, 0x0e, 0x1f, 0x10, 0x01, 0x12 } ,
{ 0x03, 0x14, 0x05, 0x16, 0x07, 0x18, 0x09, 0x1a } ,
{ 0x0b, 0x1c, 0x0d, 0x1e, 0x0f, 0x00, 0x11, 0x02 }
};
/* forward octave */
static void W(u32 *key, unsigned int i)
{
u32 I;
key[6] ^= F1(key[7], Tr[i % 4][0], Tm[i][0]);
key[5] ^= F2(key[6], Tr[i % 4][1], Tm[i][1]);
key[4] ^= F3(key[5], Tr[i % 4][2], Tm[i][2]);
key[3] ^= F1(key[4], Tr[i % 4][3], Tm[i][3]);
key[2] ^= F2(key[3], Tr[i % 4][4], Tm[i][4]);
key[1] ^= F3(key[2], Tr[i % 4][5], Tm[i][5]);
key[0] ^= F1(key[1], Tr[i % 4][6], Tm[i][6]);
key[7] ^= F2(key[0], Tr[i % 4][7], Tm[i][7]);
}
static int cast6_setkey(struct crypto_tfm *tfm, const u8 *in_key,
unsigned key_len)
{
int i;
u32 key[8];
__be32 p_key[8]; /* padded key */
struct cast6_ctx *c = crypto_tfm_ctx(tfm);
u32 *flags = &tfm->crt_flags;
if (key_len % 4 != 0) {
*flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
return -EINVAL;
}
memset(p_key, 0, 32);
memcpy(p_key, in_key, key_len);
key[0] = be32_to_cpu(p_key[0]); /* A */
key[1] = be32_to_cpu(p_key[1]); /* B */
key[2] = be32_to_cpu(p_key[2]); /* C */
key[3] = be32_to_cpu(p_key[3]); /* D */
key[4] = be32_to_cpu(p_key[4]); /* E */
key[5] = be32_to_cpu(p_key[5]); /* F */
key[6] = be32_to_cpu(p_key[6]); /* G */
key[7] = be32_to_cpu(p_key[7]); /* H */
for (i = 0; i < 12; i++) {
W(key, 2 * i);
W(key, 2 * i + 1);
c->Kr[i][0] = key[0] & 0x1f;
c->Kr[i][1] = key[2] & 0x1f;
c->Kr[i][2] = key[4] & 0x1f;
c->Kr[i][3] = key[6] & 0x1f;
c->Km[i][0] = key[7];
c->Km[i][1] = key[5];
c->Km[i][2] = key[3];
c->Km[i][3] = key[1];
}
return 0;
}
/*forward quad round*/
static void Q(u32 *block, u8 *Kr, u32 *Km)
{
u32 I;
block[2] ^= F1(block[3], Kr[0], Km[0]);
block[1] ^= F2(block[2], Kr[1], Km[1]);
block[0] ^= F3(block[1], Kr[2], Km[2]);
block[3] ^= F1(block[0], Kr[3], Km[3]);
}
/*reverse quad round*/
static void QBAR(u32 *block, u8 *Kr, u32 *Km)
{
u32 I;
block[3] ^= F1(block[0], Kr[3], Km[3]);
block[0] ^= F3(block[1], Kr[2], Km[2]);
block[1] ^= F2(block[2], Kr[1], Km[1]);
block[2] ^= F1(block[3], Kr[0], Km[0]);
}
static void cast6_encrypt(struct crypto_tfm *tfm, u8 *outbuf, const u8 *inbuf)
{
struct cast6_ctx *c = crypto_tfm_ctx(tfm);
const __be32 *src = (const __be32 *)inbuf;
__be32 *dst = (__be32 *)outbuf;
u32 block[4];
u32 *Km;
u8 *Kr;
block[0] = be32_to_cpu(src[0]);
block[1] = be32_to_cpu(src[1]);
block[2] = be32_to_cpu(src[2]);
block[3] = be32_to_cpu(src[3]);
Km = c->Km[0]; Kr = c->Kr[0]; Q(block, Kr, Km);
Km = c->Km[1]; Kr = c->Kr[1]; Q(block, Kr, Km);
Km = c->Km[2]; Kr = c->Kr[2]; Q(block, Kr, Km);
Km = c->Km[3]; Kr = c->Kr[3]; Q(block, Kr, Km);
Km = c->Km[4]; Kr = c->Kr[4]; Q(block, Kr, Km);
Km = c->Km[5]; Kr = c->Kr[5]; Q(block, Kr, Km);
Km = c->Km[6]; Kr = c->Kr[6]; QBAR(block, Kr, Km);
Km = c->Km[7]; Kr = c->Kr[7]; QBAR(block, Kr, Km);
Km = c->Km[8]; Kr = c->Kr[8]; QBAR(block, Kr, Km);
Km = c->Km[9]; Kr = c->Kr[9]; QBAR(block, Kr, Km);
Km = c->Km[10]; Kr = c->Kr[10]; QBAR(block, Kr, Km);
Km = c->Km[11]; Kr = c->Kr[11]; QBAR(block, Kr, Km);
dst[0] = cpu_to_be32(block[0]);
dst[1] = cpu_to_be32(block[1]);
dst[2] = cpu_to_be32(block[2]);
dst[3] = cpu_to_be32(block[3]);
}
static void cast6_decrypt(struct crypto_tfm *tfm, u8 *outbuf, const u8 *inbuf)
{
struct cast6_ctx *c = crypto_tfm_ctx(tfm);
const __be32 *src = (const __be32 *)inbuf;
__be32 *dst = (__be32 *)outbuf;
u32 block[4];
u32 *Km;
u8 *Kr;
block[0] = be32_to_cpu(src[0]);
block[1] = be32_to_cpu(src[1]);
block[2] = be32_to_cpu(src[2]);
block[3] = be32_to_cpu(src[3]);
Km = c->Km[11]; Kr = c->Kr[11]; Q(block, Kr, Km);
Km = c->Km[10]; Kr = c->Kr[10]; Q(block, Kr, Km);
Km = c->Km[9]; Kr = c->Kr[9]; Q(block, Kr, Km);
Km = c->Km[8]; Kr = c->Kr[8]; Q(block, Kr, Km);
Km = c->Km[7]; Kr = c->Kr[7]; Q(block, Kr, Km);
Km = c->Km[6]; Kr = c->Kr[6]; Q(block, Kr, Km);
Km = c->Km[5]; Kr = c->Kr[5]; QBAR(block, Kr, Km);
Km = c->Km[4]; Kr = c->Kr[4]; QBAR(block, Kr, Km);
Km = c->Km[3]; Kr = c->Kr[3]; QBAR(block, Kr, Km);
Km = c->Km[2]; Kr = c->Kr[2]; QBAR(block, Kr, Km);
Km = c->Km[1]; Kr = c->Kr[1]; QBAR(block, Kr, Km);
Km = c->Km[0]; Kr = c->Kr[0]; QBAR(block, Kr, Km);
dst[0] = cpu_to_be32(block[0]);
dst[1] = cpu_to_be32(block[1]);
dst[2] = cpu_to_be32(block[2]);
dst[3] = cpu_to_be32(block[3]);
}
static struct crypto_alg alg = {
.cra_name = "cast6",
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
.cra_blocksize = CAST6_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct cast6_ctx),
.cra_alignmask = 3,
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(alg.cra_list),
.cra_u = {
.cipher = {
.cia_min_keysize = CAST6_MIN_KEY_SIZE,
.cia_max_keysize = CAST6_MAX_KEY_SIZE,
.cia_setkey = cast6_setkey,
.cia_encrypt = cast6_encrypt,
.cia_decrypt = cast6_decrypt}
}
};
static int __init cast6_mod_init(void)
{
return crypto_register_alg(&alg);
}
static void __exit cast6_mod_fini(void)
{
crypto_unregister_alg(&alg);
}
module_init(cast6_mod_init);
module_exit(cast6_mod_fini);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Cast6 Cipher Algorithm");
| gpl-2.0 |
Valera1978/android_kernel_samsung_viennalte | samples/kfifo/bytestream-example.c | 10729 | 4083 | /*
* Sample kfifo byte stream implementation
*
* Copyright (C) 2010 Stefani Seibold <stefani@seibold.net>
*
* Released under the GPL version 2 only.
*
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/proc_fs.h>
#include <linux/mutex.h>
#include <linux/kfifo.h>
/*
* This module shows how to create a byte stream fifo.
*/
/* fifo size in elements (bytes) */
#define FIFO_SIZE 32
/* name of the proc entry */
#define PROC_FIFO "bytestream-fifo"
/* lock for procfs read access */
static DEFINE_MUTEX(read_lock);
/* lock for procfs write access */
static DEFINE_MUTEX(write_lock);
/*
* define DYNAMIC in this example for a dynamically allocated fifo.
*
* Otherwise the fifo storage will be a part of the fifo structure.
*/
#if 0
#define DYNAMIC
#endif
#ifdef DYNAMIC
static struct kfifo test;
#else
static DECLARE_KFIFO(test, unsigned char, FIFO_SIZE);
#endif
static const unsigned char expected_result[FIFO_SIZE] = {
3, 4, 5, 6, 7, 8, 9, 0,
1, 20, 21, 22, 23, 24, 25, 26,
27, 28, 29, 30, 31, 32, 33, 34,
35, 36, 37, 38, 39, 40, 41, 42,
};
static int __init testfunc(void)
{
unsigned char buf[6];
unsigned char i, j;
unsigned int ret;
printk(KERN_INFO "byte stream fifo test start\n");
/* put string into the fifo */
kfifo_in(&test, "hello", 5);
/* put values into the fifo */
for (i = 0; i != 10; i++)
kfifo_put(&test, &i);
/* show the number of used elements */
printk(KERN_INFO "fifo len: %u\n", kfifo_len(&test));
/* get max of 5 bytes from the fifo */
i = kfifo_out(&test, buf, 5);
printk(KERN_INFO "buf: %.*s\n", i, buf);
/* get max of 2 elements from the fifo */
ret = kfifo_out(&test, buf, 2);
printk(KERN_INFO "ret: %d\n", ret);
/* and put it back to the end of the fifo */
ret = kfifo_in(&test, buf, ret);
printk(KERN_INFO "ret: %d\n", ret);
/* skip first element of the fifo */
printk(KERN_INFO "skip 1st element\n");
kfifo_skip(&test);
/* put values into the fifo until is full */
for (i = 20; kfifo_put(&test, &i); i++)
;
printk(KERN_INFO "queue len: %u\n", kfifo_len(&test));
/* show the first value without removing from the fifo */
if (kfifo_peek(&test, &i))
printk(KERN_INFO "%d\n", i);
/* check the correctness of all values in the fifo */
j = 0;
while (kfifo_get(&test, &i)) {
printk(KERN_INFO "item = %d\n", i);
if (i != expected_result[j++]) {
printk(KERN_WARNING "value mismatch: test failed\n");
return -EIO;
}
}
if (j != ARRAY_SIZE(expected_result)) {
printk(KERN_WARNING "size mismatch: test failed\n");
return -EIO;
}
printk(KERN_INFO "test passed\n");
return 0;
}
static ssize_t fifo_write(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
int ret;
unsigned int copied;
if (mutex_lock_interruptible(&write_lock))
return -ERESTARTSYS;
ret = kfifo_from_user(&test, buf, count, &copied);
mutex_unlock(&write_lock);
return ret ? ret : copied;
}
static ssize_t fifo_read(struct file *file, char __user *buf,
size_t count, loff_t *ppos)
{
int ret;
unsigned int copied;
if (mutex_lock_interruptible(&read_lock))
return -ERESTARTSYS;
ret = kfifo_to_user(&test, buf, count, &copied);
mutex_unlock(&read_lock);
return ret ? ret : copied;
}
static const struct file_operations fifo_fops = {
.owner = THIS_MODULE,
.read = fifo_read,
.write = fifo_write,
.llseek = noop_llseek,
};
static int __init example_init(void)
{
#ifdef DYNAMIC
int ret;
ret = kfifo_alloc(&test, FIFO_SIZE, GFP_KERNEL);
if (ret) {
printk(KERN_ERR "error kfifo_alloc\n");
return ret;
}
#else
INIT_KFIFO(test);
#endif
if (testfunc() < 0) {
#ifdef DYNAMIC
kfifo_free(&test);
#endif
return -EIO;
}
if (proc_create(PROC_FIFO, 0, NULL, &fifo_fops) == NULL) {
#ifdef DYNAMIC
kfifo_free(&test);
#endif
return -ENOMEM;
}
return 0;
}
static void __exit example_exit(void)
{
remove_proc_entry(PROC_FIFO, NULL);
#ifdef DYNAMIC
kfifo_free(&test);
#endif
}
module_init(example_init);
module_exit(example_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Stefani Seibold <stefani@seibold.net>");
| gpl-2.0 |
dalinaum/studyak | arch/m32r/lib/csum_partial_copy.c | 13801 | 1562 | /*
* INET An implementation of the TCP/IP protocol suite for the LINUX
* operating system. INET is implemented using the BSD Socket
* interface as the means of communication with the user level.
*
* M32R specific IP/TCP/UDP checksumming routines
* (Some code taken from MIPS architecture)
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1994, 1995 Waldorf Electronics GmbH
* Copyright (C) 1998, 1999 Ralf Baechle
* Copyright (C) 2001-2005 Hiroyuki Kondo, Hirokazu Takata
*
*/
#include <linux/module.h>
#include <linux/types.h>
#include <linux/string.h>
#include <net/checksum.h>
#include <asm/byteorder.h>
#include <asm/uaccess.h>
/*
* Copy while checksumming, otherwise like csum_partial
*/
__wsum
csum_partial_copy_nocheck (const void *src, void *dst, int len, __wsum sum)
{
sum = csum_partial(src, len, sum);
memcpy(dst, src, len);
return sum;
}
EXPORT_SYMBOL(csum_partial_copy_nocheck);
/*
* Copy from userspace and compute checksum. If we catch an exception
* then zero the rest of the buffer.
*/
__wsum
csum_partial_copy_from_user (const void __user *src, void *dst,
int len, __wsum sum, int *err_ptr)
{
int missing;
missing = copy_from_user(dst, src, len);
if (missing) {
memset(dst + len - missing, 0, missing);
*err_ptr = -EFAULT;
}
return csum_partial(dst, len-missing, sum);
}
EXPORT_SYMBOL(csum_partial_copy_from_user);
EXPORT_SYMBOL(csum_partial);
| gpl-2.0 |
djvoleur/kernel_samsung_exynos7420 | block/partitions/mac.c | 2538 | 3444 | /*
* fs/partitions/mac.c
*
* Code extracted from drivers/block/genhd.c
* Copyright (C) 1991-1998 Linus Torvalds
* Re-organised Feb 1998 Russell King
*/
#include <linux/ctype.h>
#include "check.h"
#include "mac.h"
#ifdef CONFIG_PPC_PMAC
#include <asm/machdep.h>
extern void note_bootable_part(dev_t dev, int part, int goodness);
#endif
/*
* Code to understand MacOS partition tables.
*/
static inline void mac_fix_string(char *stg, int len)
{
int i;
for (i = len - 1; i >= 0 && stg[i] == ' '; i--)
stg[i] = 0;
}
int mac_partition(struct parsed_partitions *state)
{
Sector sect;
unsigned char *data;
int slot, blocks_in_map;
unsigned secsize;
#ifdef CONFIG_PPC_PMAC
int found_root = 0;
int found_root_goodness = 0;
#endif
struct mac_partition *part;
struct mac_driver_desc *md;
/* Get 0th block and look at the first partition map entry. */
md = read_part_sector(state, 0, §);
if (!md)
return -1;
if (be16_to_cpu(md->signature) != MAC_DRIVER_MAGIC) {
put_dev_sector(sect);
return 0;
}
secsize = be16_to_cpu(md->block_size);
put_dev_sector(sect);
data = read_part_sector(state, secsize/512, §);
if (!data)
return -1;
part = (struct mac_partition *) (data + secsize%512);
if (be16_to_cpu(part->signature) != MAC_PARTITION_MAGIC) {
put_dev_sector(sect);
return 0; /* not a MacOS disk */
}
blocks_in_map = be32_to_cpu(part->map_count);
if (blocks_in_map < 0 || blocks_in_map >= DISK_MAX_PARTS) {
put_dev_sector(sect);
return 0;
}
if (blocks_in_map >= state->limit)
blocks_in_map = state->limit - 1;
strlcat(state->pp_buf, " [mac]", PAGE_SIZE);
for (slot = 1; slot <= blocks_in_map; ++slot) {
int pos = slot * secsize;
put_dev_sector(sect);
data = read_part_sector(state, pos/512, §);
if (!data)
return -1;
part = (struct mac_partition *) (data + pos%512);
if (be16_to_cpu(part->signature) != MAC_PARTITION_MAGIC)
break;
put_partition(state, slot,
be32_to_cpu(part->start_block) * (secsize/512),
be32_to_cpu(part->block_count) * (secsize/512));
if (!strnicmp(part->type, "Linux_RAID", 10))
state->parts[slot].flags = ADDPART_FLAG_RAID;
#ifdef CONFIG_PPC_PMAC
/*
* If this is the first bootable partition, tell the
* setup code, in case it wants to make this the root.
*/
if (machine_is(powermac)) {
int goodness = 0;
mac_fix_string(part->processor, 16);
mac_fix_string(part->name, 32);
mac_fix_string(part->type, 32);
if ((be32_to_cpu(part->status) & MAC_STATUS_BOOTABLE)
&& strcasecmp(part->processor, "powerpc") == 0)
goodness++;
if (strcasecmp(part->type, "Apple_UNIX_SVR2") == 0
|| (strnicmp(part->type, "Linux", 5) == 0
&& strcasecmp(part->type, "Linux_swap") != 0)) {
int i, l;
goodness++;
l = strlen(part->name);
if (strcmp(part->name, "/") == 0)
goodness++;
for (i = 0; i <= l - 4; ++i) {
if (strnicmp(part->name + i, "root",
4) == 0) {
goodness += 2;
break;
}
}
if (strnicmp(part->name, "swap", 4) == 0)
goodness--;
}
if (goodness > found_root_goodness) {
found_root = slot;
found_root_goodness = goodness;
}
}
#endif /* CONFIG_PPC_PMAC */
}
#ifdef CONFIG_PPC_PMAC
if (found_root_goodness)
note_bootable_part(state->bdev->bd_dev, found_root,
found_root_goodness);
#endif
put_dev_sector(sect);
strlcat(state->pp_buf, "\n", PAGE_SIZE);
return 1;
}
| gpl-2.0 |
supersonicninja/NinjaKernelHW01E | drivers/scsi/sd_dif.c | 3306 | 12680 | /*
* sd_dif.c - SCSI Data Integrity Field
*
* Copyright (C) 2007, 2008 Oracle Corporation
* Written by: Martin K. Petersen <martin.petersen@oracle.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version
* 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; see the file COPYING. If not, write to
* the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
* USA.
*
*/
#include <linux/blkdev.h>
#include <linux/crc-t10dif.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_dbg.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_driver.h>
#include <scsi/scsi_eh.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_ioctl.h>
#include <scsi/scsicam.h>
#include <net/checksum.h>
#include "sd.h"
typedef __u16 (csum_fn) (void *, unsigned int);
static __u16 sd_dif_crc_fn(void *data, unsigned int len)
{
return cpu_to_be16(crc_t10dif(data, len));
}
static __u16 sd_dif_ip_fn(void *data, unsigned int len)
{
return ip_compute_csum(data, len);
}
/*
* Type 1 and Type 2 protection use the same format: 16 bit guard tag,
* 16 bit app tag, 32 bit reference tag.
*/
static void sd_dif_type1_generate(struct blk_integrity_exchg *bix, csum_fn *fn)
{
void *buf = bix->data_buf;
struct sd_dif_tuple *sdt = bix->prot_buf;
sector_t sector = bix->sector;
unsigned int i;
for (i = 0 ; i < bix->data_size ; i += bix->sector_size, sdt++) {
sdt->guard_tag = fn(buf, bix->sector_size);
sdt->ref_tag = cpu_to_be32(sector & 0xffffffff);
sdt->app_tag = 0;
buf += bix->sector_size;
sector++;
}
}
static void sd_dif_type1_generate_crc(struct blk_integrity_exchg *bix)
{
sd_dif_type1_generate(bix, sd_dif_crc_fn);
}
static void sd_dif_type1_generate_ip(struct blk_integrity_exchg *bix)
{
sd_dif_type1_generate(bix, sd_dif_ip_fn);
}
static int sd_dif_type1_verify(struct blk_integrity_exchg *bix, csum_fn *fn)
{
void *buf = bix->data_buf;
struct sd_dif_tuple *sdt = bix->prot_buf;
sector_t sector = bix->sector;
unsigned int i;
__u16 csum;
for (i = 0 ; i < bix->data_size ; i += bix->sector_size, sdt++) {
/* Unwritten sectors */
if (sdt->app_tag == 0xffff)
return 0;
/* Bad ref tag received from disk */
if (sdt->ref_tag == 0xffffffff) {
printk(KERN_ERR
"%s: bad phys ref tag on sector %lu\n",
bix->disk_name, (unsigned long)sector);
return -EIO;
}
if (be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) {
printk(KERN_ERR
"%s: ref tag error on sector %lu (rcvd %u)\n",
bix->disk_name, (unsigned long)sector,
be32_to_cpu(sdt->ref_tag));
return -EIO;
}
csum = fn(buf, bix->sector_size);
if (sdt->guard_tag != csum) {
printk(KERN_ERR "%s: guard tag error on sector %lu " \
"(rcvd %04x, data %04x)\n", bix->disk_name,
(unsigned long)sector,
be16_to_cpu(sdt->guard_tag), be16_to_cpu(csum));
return -EIO;
}
buf += bix->sector_size;
sector++;
}
return 0;
}
static int sd_dif_type1_verify_crc(struct blk_integrity_exchg *bix)
{
return sd_dif_type1_verify(bix, sd_dif_crc_fn);
}
static int sd_dif_type1_verify_ip(struct blk_integrity_exchg *bix)
{
return sd_dif_type1_verify(bix, sd_dif_ip_fn);
}
/*
* Functions for interleaving and deinterleaving application tags
*/
static void sd_dif_type1_set_tag(void *prot, void *tag_buf, unsigned int sectors)
{
struct sd_dif_tuple *sdt = prot;
u8 *tag = tag_buf;
unsigned int i, j;
for (i = 0, j = 0 ; i < sectors ; i++, j += 2, sdt++) {
sdt->app_tag = tag[j] << 8 | tag[j+1];
BUG_ON(sdt->app_tag == 0xffff);
}
}
static void sd_dif_type1_get_tag(void *prot, void *tag_buf, unsigned int sectors)
{
struct sd_dif_tuple *sdt = prot;
u8 *tag = tag_buf;
unsigned int i, j;
for (i = 0, j = 0 ; i < sectors ; i++, j += 2, sdt++) {
tag[j] = (sdt->app_tag & 0xff00) >> 8;
tag[j+1] = sdt->app_tag & 0xff;
}
}
static struct blk_integrity dif_type1_integrity_crc = {
.name = "T10-DIF-TYPE1-CRC",
.generate_fn = sd_dif_type1_generate_crc,
.verify_fn = sd_dif_type1_verify_crc,
.get_tag_fn = sd_dif_type1_get_tag,
.set_tag_fn = sd_dif_type1_set_tag,
.tuple_size = sizeof(struct sd_dif_tuple),
.tag_size = 0,
};
static struct blk_integrity dif_type1_integrity_ip = {
.name = "T10-DIF-TYPE1-IP",
.generate_fn = sd_dif_type1_generate_ip,
.verify_fn = sd_dif_type1_verify_ip,
.get_tag_fn = sd_dif_type1_get_tag,
.set_tag_fn = sd_dif_type1_set_tag,
.tuple_size = sizeof(struct sd_dif_tuple),
.tag_size = 0,
};
/*
* Type 3 protection has a 16-bit guard tag and 16 + 32 bits of opaque
* tag space.
*/
static void sd_dif_type3_generate(struct blk_integrity_exchg *bix, csum_fn *fn)
{
void *buf = bix->data_buf;
struct sd_dif_tuple *sdt = bix->prot_buf;
unsigned int i;
for (i = 0 ; i < bix->data_size ; i += bix->sector_size, sdt++) {
sdt->guard_tag = fn(buf, bix->sector_size);
sdt->ref_tag = 0;
sdt->app_tag = 0;
buf += bix->sector_size;
}
}
static void sd_dif_type3_generate_crc(struct blk_integrity_exchg *bix)
{
sd_dif_type3_generate(bix, sd_dif_crc_fn);
}
static void sd_dif_type3_generate_ip(struct blk_integrity_exchg *bix)
{
sd_dif_type3_generate(bix, sd_dif_ip_fn);
}
static int sd_dif_type3_verify(struct blk_integrity_exchg *bix, csum_fn *fn)
{
void *buf = bix->data_buf;
struct sd_dif_tuple *sdt = bix->prot_buf;
sector_t sector = bix->sector;
unsigned int i;
__u16 csum;
for (i = 0 ; i < bix->data_size ; i += bix->sector_size, sdt++) {
/* Unwritten sectors */
if (sdt->app_tag == 0xffff && sdt->ref_tag == 0xffffffff)
return 0;
csum = fn(buf, bix->sector_size);
if (sdt->guard_tag != csum) {
printk(KERN_ERR "%s: guard tag error on sector %lu " \
"(rcvd %04x, data %04x)\n", bix->disk_name,
(unsigned long)sector,
be16_to_cpu(sdt->guard_tag), be16_to_cpu(csum));
return -EIO;
}
buf += bix->sector_size;
sector++;
}
return 0;
}
static int sd_dif_type3_verify_crc(struct blk_integrity_exchg *bix)
{
return sd_dif_type3_verify(bix, sd_dif_crc_fn);
}
static int sd_dif_type3_verify_ip(struct blk_integrity_exchg *bix)
{
return sd_dif_type3_verify(bix, sd_dif_ip_fn);
}
static void sd_dif_type3_set_tag(void *prot, void *tag_buf, unsigned int sectors)
{
struct sd_dif_tuple *sdt = prot;
u8 *tag = tag_buf;
unsigned int i, j;
for (i = 0, j = 0 ; i < sectors ; i++, j += 6, sdt++) {
sdt->app_tag = tag[j] << 8 | tag[j+1];
sdt->ref_tag = tag[j+2] << 24 | tag[j+3] << 16 |
tag[j+4] << 8 | tag[j+5];
}
}
static void sd_dif_type3_get_tag(void *prot, void *tag_buf, unsigned int sectors)
{
struct sd_dif_tuple *sdt = prot;
u8 *tag = tag_buf;
unsigned int i, j;
for (i = 0, j = 0 ; i < sectors ; i++, j += 2, sdt++) {
tag[j] = (sdt->app_tag & 0xff00) >> 8;
tag[j+1] = sdt->app_tag & 0xff;
tag[j+2] = (sdt->ref_tag & 0xff000000) >> 24;
tag[j+3] = (sdt->ref_tag & 0xff0000) >> 16;
tag[j+4] = (sdt->ref_tag & 0xff00) >> 8;
tag[j+5] = sdt->ref_tag & 0xff;
BUG_ON(sdt->app_tag == 0xffff || sdt->ref_tag == 0xffffffff);
}
}
static struct blk_integrity dif_type3_integrity_crc = {
.name = "T10-DIF-TYPE3-CRC",
.generate_fn = sd_dif_type3_generate_crc,
.verify_fn = sd_dif_type3_verify_crc,
.get_tag_fn = sd_dif_type3_get_tag,
.set_tag_fn = sd_dif_type3_set_tag,
.tuple_size = sizeof(struct sd_dif_tuple),
.tag_size = 0,
};
static struct blk_integrity dif_type3_integrity_ip = {
.name = "T10-DIF-TYPE3-IP",
.generate_fn = sd_dif_type3_generate_ip,
.verify_fn = sd_dif_type3_verify_ip,
.get_tag_fn = sd_dif_type3_get_tag,
.set_tag_fn = sd_dif_type3_set_tag,
.tuple_size = sizeof(struct sd_dif_tuple),
.tag_size = 0,
};
/*
* Configure exchange of protection information between OS and HBA.
*/
void sd_dif_config_host(struct scsi_disk *sdkp)
{
struct scsi_device *sdp = sdkp->device;
struct gendisk *disk = sdkp->disk;
u8 type = sdkp->protection_type;
int dif, dix;
dif = scsi_host_dif_capable(sdp->host, type);
dix = scsi_host_dix_capable(sdp->host, type);
if (!dix && scsi_host_dix_capable(sdp->host, 0)) {
dif = 0; dix = 1;
}
if (!dix)
return;
/* Enable DMA of protection information */
if (scsi_host_get_guard(sdkp->device->host) & SHOST_DIX_GUARD_IP)
if (type == SD_DIF_TYPE3_PROTECTION)
blk_integrity_register(disk, &dif_type3_integrity_ip);
else
blk_integrity_register(disk, &dif_type1_integrity_ip);
else
if (type == SD_DIF_TYPE3_PROTECTION)
blk_integrity_register(disk, &dif_type3_integrity_crc);
else
blk_integrity_register(disk, &dif_type1_integrity_crc);
sd_printk(KERN_NOTICE, sdkp,
"Enabling DIX %s protection\n", disk->integrity->name);
/* Signal to block layer that we support sector tagging */
if (dif && type && sdkp->ATO) {
if (type == SD_DIF_TYPE3_PROTECTION)
disk->integrity->tag_size = sizeof(u16) + sizeof(u32);
else
disk->integrity->tag_size = sizeof(u16);
sd_printk(KERN_NOTICE, sdkp, "DIF application tag size %u\n",
disk->integrity->tag_size);
}
}
/*
* The virtual start sector is the one that was originally submitted
* by the block layer. Due to partitioning, MD/DM cloning, etc. the
* actual physical start sector is likely to be different. Remap
* protection information to match the physical LBA.
*
* From a protocol perspective there's a slight difference between
* Type 1 and 2. The latter uses 32-byte CDBs exclusively, and the
* reference tag is seeded in the CDB. This gives us the potential to
* avoid virt->phys remapping during write. However, at read time we
* don't know whether the virt sector is the same as when we wrote it
* (we could be reading from real disk as opposed to MD/DM device. So
* we always remap Type 2 making it identical to Type 1.
*
* Type 3 does not have a reference tag so no remapping is required.
*/
int sd_dif_prepare(struct request *rq, sector_t hw_sector, unsigned int sector_sz)
{
const int tuple_sz = sizeof(struct sd_dif_tuple);
struct bio *bio;
struct scsi_disk *sdkp;
struct sd_dif_tuple *sdt;
unsigned int i, j;
u32 phys, virt;
sdkp = rq->bio->bi_bdev->bd_disk->private_data;
if (sdkp->protection_type == SD_DIF_TYPE3_PROTECTION)
return 0;
phys = hw_sector & 0xffffffff;
__rq_for_each_bio(bio, rq) {
struct bio_vec *iv;
/* Already remapped? */
if (bio_flagged(bio, BIO_MAPPED_INTEGRITY))
break;
virt = bio->bi_integrity->bip_sector & 0xffffffff;
bip_for_each_vec(iv, bio->bi_integrity, i) {
sdt = kmap_atomic(iv->bv_page, KM_USER0)
+ iv->bv_offset;
for (j = 0 ; j < iv->bv_len ; j += tuple_sz, sdt++) {
if (be32_to_cpu(sdt->ref_tag) != virt)
goto error;
sdt->ref_tag = cpu_to_be32(phys);
virt++;
phys++;
}
kunmap_atomic(sdt, KM_USER0);
}
bio->bi_flags |= BIO_MAPPED_INTEGRITY;
}
return 0;
error:
kunmap_atomic(sdt, KM_USER0);
sd_printk(KERN_ERR, sdkp, "%s: virt %u, phys %u, ref %u, app %4x\n",
__func__, virt, phys, be32_to_cpu(sdt->ref_tag),
be16_to_cpu(sdt->app_tag));
return -EILSEQ;
}
/*
* Remap physical sector values in the reference tag to the virtual
* values expected by the block layer.
*/
void sd_dif_complete(struct scsi_cmnd *scmd, unsigned int good_bytes)
{
const int tuple_sz = sizeof(struct sd_dif_tuple);
struct scsi_disk *sdkp;
struct bio *bio;
struct sd_dif_tuple *sdt;
unsigned int i, j, sectors, sector_sz;
u32 phys, virt;
sdkp = scsi_disk(scmd->request->rq_disk);
if (sdkp->protection_type == SD_DIF_TYPE3_PROTECTION || good_bytes == 0)
return;
sector_sz = scmd->device->sector_size;
sectors = good_bytes / sector_sz;
phys = blk_rq_pos(scmd->request) & 0xffffffff;
if (sector_sz == 4096)
phys >>= 3;
__rq_for_each_bio(bio, scmd->request) {
struct bio_vec *iv;
virt = bio->bi_integrity->bip_sector & 0xffffffff;
bip_for_each_vec(iv, bio->bi_integrity, i) {
sdt = kmap_atomic(iv->bv_page, KM_USER0)
+ iv->bv_offset;
for (j = 0 ; j < iv->bv_len ; j += tuple_sz, sdt++) {
if (sectors == 0) {
kunmap_atomic(sdt, KM_USER0);
return;
}
if (be32_to_cpu(sdt->ref_tag) != phys &&
sdt->app_tag != 0xffff)
sdt->ref_tag = 0xffffffff; /* Bad ref */
else
sdt->ref_tag = cpu_to_be32(virt);
virt++;
phys++;
sectors--;
}
kunmap_atomic(sdt, KM_USER0);
}
}
}
| gpl-2.0 |
halaszk/SGS3 | drivers/gpu/drm/radeon/radeon_atpx_handler.c | 3818 | 7317 | /*
* Copyright (c) 2010 Red Hat Inc.
* Author : Dave Airlie <airlied@redhat.com>
*
* Licensed under GPLv2
*
* ATPX support for both Intel/ATI
*/
#include <linux/vga_switcheroo.h>
#include <linux/slab.h>
#include <acpi/acpi.h>
#include <acpi/acpi_bus.h>
#include <linux/pci.h>
#define ATPX_VERSION 0
#define ATPX_GPU_PWR 2
#define ATPX_MUX_SELECT 3
#define ATPX_I2C_MUX_SELECT 4
#define ATPX_SWITCH_START 5
#define ATPX_SWITCH_END 6
#define ATPX_INTEGRATED 0
#define ATPX_DISCRETE 1
#define ATPX_MUX_IGD 0
#define ATPX_MUX_DISCRETE 1
static struct radeon_atpx_priv {
bool atpx_detected;
/* handle for device - and atpx */
acpi_handle dhandle;
acpi_handle atpx_handle;
acpi_handle atrm_handle;
} radeon_atpx_priv;
/* retrieve the ROM in 4k blocks */
static int radeon_atrm_call(acpi_handle atrm_handle, uint8_t *bios,
int offset, int len)
{
acpi_status status;
union acpi_object atrm_arg_elements[2], *obj;
struct acpi_object_list atrm_arg;
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL};
atrm_arg.count = 2;
atrm_arg.pointer = &atrm_arg_elements[0];
atrm_arg_elements[0].type = ACPI_TYPE_INTEGER;
atrm_arg_elements[0].integer.value = offset;
atrm_arg_elements[1].type = ACPI_TYPE_INTEGER;
atrm_arg_elements[1].integer.value = len;
status = acpi_evaluate_object(atrm_handle, NULL, &atrm_arg, &buffer);
if (ACPI_FAILURE(status)) {
printk("failed to evaluate ATRM got %s\n", acpi_format_exception(status));
return -ENODEV;
}
obj = (union acpi_object *)buffer.pointer;
memcpy(bios+offset, obj->buffer.pointer, obj->buffer.length);
len = obj->buffer.length;
kfree(buffer.pointer);
return len;
}
bool radeon_atrm_supported(struct pci_dev *pdev)
{
/* get the discrete ROM only via ATRM */
if (!radeon_atpx_priv.atpx_detected)
return false;
if (radeon_atpx_priv.dhandle == DEVICE_ACPI_HANDLE(&pdev->dev))
return false;
return true;
}
int radeon_atrm_get_bios_chunk(uint8_t *bios, int offset, int len)
{
return radeon_atrm_call(radeon_atpx_priv.atrm_handle, bios, offset, len);
}
static int radeon_atpx_get_version(acpi_handle handle)
{
acpi_status status;
union acpi_object atpx_arg_elements[2], *obj;
struct acpi_object_list atpx_arg;
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
atpx_arg.count = 2;
atpx_arg.pointer = &atpx_arg_elements[0];
atpx_arg_elements[0].type = ACPI_TYPE_INTEGER;
atpx_arg_elements[0].integer.value = ATPX_VERSION;
atpx_arg_elements[1].type = ACPI_TYPE_INTEGER;
atpx_arg_elements[1].integer.value = ATPX_VERSION;
status = acpi_evaluate_object(handle, NULL, &atpx_arg, &buffer);
if (ACPI_FAILURE(status)) {
printk("%s: failed to call ATPX: %s\n", __func__, acpi_format_exception(status));
return -ENOSYS;
}
obj = (union acpi_object *)buffer.pointer;
if (obj && (obj->type == ACPI_TYPE_BUFFER))
printk(KERN_INFO "radeon atpx: version is %d\n", *((u8 *)(obj->buffer.pointer) + 2));
kfree(buffer.pointer);
return 0;
}
static int radeon_atpx_execute(acpi_handle handle, int cmd_id, u16 value)
{
acpi_status status;
union acpi_object atpx_arg_elements[2];
struct acpi_object_list atpx_arg;
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
uint8_t buf[4] = {0};
if (!handle)
return -EINVAL;
atpx_arg.count = 2;
atpx_arg.pointer = &atpx_arg_elements[0];
atpx_arg_elements[0].type = ACPI_TYPE_INTEGER;
atpx_arg_elements[0].integer.value = cmd_id;
buf[2] = value & 0xff;
buf[3] = (value >> 8) & 0xff;
atpx_arg_elements[1].type = ACPI_TYPE_BUFFER;
atpx_arg_elements[1].buffer.length = 4;
atpx_arg_elements[1].buffer.pointer = buf;
status = acpi_evaluate_object(handle, NULL, &atpx_arg, &buffer);
if (ACPI_FAILURE(status)) {
printk("%s: failed to call ATPX: %s\n", __func__, acpi_format_exception(status));
return -ENOSYS;
}
kfree(buffer.pointer);
return 0;
}
static int radeon_atpx_set_discrete_state(acpi_handle handle, int state)
{
return radeon_atpx_execute(handle, ATPX_GPU_PWR, state);
}
static int radeon_atpx_switch_mux(acpi_handle handle, int mux_id)
{
return radeon_atpx_execute(handle, ATPX_MUX_SELECT, mux_id);
}
static int radeon_atpx_switch_i2c_mux(acpi_handle handle, int mux_id)
{
return radeon_atpx_execute(handle, ATPX_I2C_MUX_SELECT, mux_id);
}
static int radeon_atpx_switch_start(acpi_handle handle, int gpu_id)
{
return radeon_atpx_execute(handle, ATPX_SWITCH_START, gpu_id);
}
static int radeon_atpx_switch_end(acpi_handle handle, int gpu_id)
{
return radeon_atpx_execute(handle, ATPX_SWITCH_END, gpu_id);
}
static int radeon_atpx_switchto(enum vga_switcheroo_client_id id)
{
int gpu_id;
if (id == VGA_SWITCHEROO_IGD)
gpu_id = ATPX_INTEGRATED;
else
gpu_id = ATPX_DISCRETE;
radeon_atpx_switch_start(radeon_atpx_priv.atpx_handle, gpu_id);
radeon_atpx_switch_mux(radeon_atpx_priv.atpx_handle, gpu_id);
radeon_atpx_switch_i2c_mux(radeon_atpx_priv.atpx_handle, gpu_id);
radeon_atpx_switch_end(radeon_atpx_priv.atpx_handle, gpu_id);
return 0;
}
static int radeon_atpx_power_state(enum vga_switcheroo_client_id id,
enum vga_switcheroo_state state)
{
/* on w500 ACPI can't change intel gpu state */
if (id == VGA_SWITCHEROO_IGD)
return 0;
radeon_atpx_set_discrete_state(radeon_atpx_priv.atpx_handle, state);
return 0;
}
static bool radeon_atpx_pci_probe_handle(struct pci_dev *pdev)
{
acpi_handle dhandle, atpx_handle, atrm_handle;
acpi_status status;
dhandle = DEVICE_ACPI_HANDLE(&pdev->dev);
if (!dhandle)
return false;
status = acpi_get_handle(dhandle, "ATPX", &atpx_handle);
if (ACPI_FAILURE(status))
return false;
status = acpi_get_handle(dhandle, "ATRM", &atrm_handle);
if (ACPI_FAILURE(status))
return false;
radeon_atpx_priv.dhandle = dhandle;
radeon_atpx_priv.atpx_handle = atpx_handle;
radeon_atpx_priv.atrm_handle = atrm_handle;
return true;
}
static int radeon_atpx_init(void)
{
/* set up the ATPX handle */
radeon_atpx_get_version(radeon_atpx_priv.atpx_handle);
return 0;
}
static int radeon_atpx_get_client_id(struct pci_dev *pdev)
{
if (radeon_atpx_priv.dhandle == DEVICE_ACPI_HANDLE(&pdev->dev))
return VGA_SWITCHEROO_IGD;
else
return VGA_SWITCHEROO_DIS;
}
static struct vga_switcheroo_handler radeon_atpx_handler = {
.switchto = radeon_atpx_switchto,
.power_state = radeon_atpx_power_state,
.init = radeon_atpx_init,
.get_client_id = radeon_atpx_get_client_id,
};
static bool radeon_atpx_detect(void)
{
char acpi_method_name[255] = { 0 };
struct acpi_buffer buffer = {sizeof(acpi_method_name), acpi_method_name};
struct pci_dev *pdev = NULL;
bool has_atpx = false;
int vga_count = 0;
while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) {
vga_count++;
has_atpx |= (radeon_atpx_pci_probe_handle(pdev) == true);
}
if (has_atpx && vga_count == 2) {
acpi_get_name(radeon_atpx_priv.atpx_handle, ACPI_FULL_PATHNAME, &buffer);
printk(KERN_INFO "VGA switcheroo: detected switching method %s handle\n",
acpi_method_name);
radeon_atpx_priv.atpx_detected = true;
return true;
}
return false;
}
void radeon_register_atpx_handler(void)
{
bool r;
/* detect if we have any ATPX + 2 VGA in the system */
r = radeon_atpx_detect();
if (!r)
return;
vga_switcheroo_register_handler(&radeon_atpx_handler);
}
void radeon_unregister_atpx_handler(void)
{
vga_switcheroo_unregister_handler();
}
| gpl-2.0 |
mir-ror/linux-yocto-dev | drivers/ide/pdc202xx_new.c | 4074 | 14838 | /*
* Promise TX2/TX4/TX2000/133 IDE driver
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
* Split from:
* linux/drivers/ide/pdc202xx.c Version 0.35 Mar. 30, 2002
* Copyright (C) 1998-2002 Andre Hedrick <andre@linux-ide.org>
* Copyright (C) 2005-2007 MontaVista Software, Inc.
* Portions Copyright (C) 1999 Promise Technology, Inc.
* Author: Frank Tiernan (frankt@promise.com)
* Released under terms of General Public License
*/
#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/delay.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/ide.h>
#include <asm/io.h>
#ifdef CONFIG_PPC_PMAC
#include <asm/prom.h>
#include <asm/pci-bridge.h>
#endif
#define DRV_NAME "pdc202xx_new"
#undef DEBUG
#ifdef DEBUG
#define DBG(fmt, args...) printk("%s: " fmt, __func__, ## args)
#else
#define DBG(fmt, args...)
#endif
static u8 max_dma_rate(struct pci_dev *pdev)
{
u8 mode;
switch(pdev->device) {
case PCI_DEVICE_ID_PROMISE_20277:
case PCI_DEVICE_ID_PROMISE_20276:
case PCI_DEVICE_ID_PROMISE_20275:
case PCI_DEVICE_ID_PROMISE_20271:
case PCI_DEVICE_ID_PROMISE_20269:
mode = 4;
break;
case PCI_DEVICE_ID_PROMISE_20270:
case PCI_DEVICE_ID_PROMISE_20268:
mode = 3;
break;
default:
return 0;
}
return mode;
}
/**
* get_indexed_reg - Get indexed register
* @hwif: for the port address
* @index: index of the indexed register
*/
static u8 get_indexed_reg(ide_hwif_t *hwif, u8 index)
{
u8 value;
outb(index, hwif->dma_base + 1);
value = inb(hwif->dma_base + 3);
DBG("index[%02X] value[%02X]\n", index, value);
return value;
}
/**
* set_indexed_reg - Set indexed register
* @hwif: for the port address
* @index: index of the indexed register
*/
static void set_indexed_reg(ide_hwif_t *hwif, u8 index, u8 value)
{
outb(index, hwif->dma_base + 1);
outb(value, hwif->dma_base + 3);
DBG("index[%02X] value[%02X]\n", index, value);
}
/*
* ATA Timing Tables based on 133 MHz PLL output clock.
*
* If the PLL outputs 100 MHz clock, the ASIC hardware will set
* the timing registers automatically when "set features" command is
* issued to the device. However, if the PLL output clock is 133 MHz,
* the following tables must be used.
*/
static struct pio_timing {
u8 reg0c, reg0d, reg13;
} pio_timings [] = {
{ 0xfb, 0x2b, 0xac }, /* PIO mode 0, IORDY off, Prefetch off */
{ 0x46, 0x29, 0xa4 }, /* PIO mode 1, IORDY off, Prefetch off */
{ 0x23, 0x26, 0x64 }, /* PIO mode 2, IORDY off, Prefetch off */
{ 0x27, 0x0d, 0x35 }, /* PIO mode 3, IORDY on, Prefetch off */
{ 0x23, 0x09, 0x25 }, /* PIO mode 4, IORDY on, Prefetch off */
};
static struct mwdma_timing {
u8 reg0e, reg0f;
} mwdma_timings [] = {
{ 0xdf, 0x5f }, /* MWDMA mode 0 */
{ 0x6b, 0x27 }, /* MWDMA mode 1 */
{ 0x69, 0x25 }, /* MWDMA mode 2 */
};
static struct udma_timing {
u8 reg10, reg11, reg12;
} udma_timings [] = {
{ 0x4a, 0x0f, 0xd5 }, /* UDMA mode 0 */
{ 0x3a, 0x0a, 0xd0 }, /* UDMA mode 1 */
{ 0x2a, 0x07, 0xcd }, /* UDMA mode 2 */
{ 0x1a, 0x05, 0xcd }, /* UDMA mode 3 */
{ 0x1a, 0x03, 0xcd }, /* UDMA mode 4 */
{ 0x1a, 0x02, 0xcb }, /* UDMA mode 5 */
{ 0x1a, 0x01, 0xcb }, /* UDMA mode 6 */
};
static void pdcnew_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive)
{
struct pci_dev *dev = to_pci_dev(hwif->dev);
u8 adj = (drive->dn & 1) ? 0x08 : 0x00;
const u8 speed = drive->dma_mode;
/*
* IDE core issues SETFEATURES_XFER to the drive first (thanks to
* IDE_HFLAG_POST_SET_MODE in ->host_flags). PDC202xx hardware will
* automatically set the timing registers based on 100 MHz PLL output.
*
* As we set up the PLL to output 133 MHz for UltraDMA/133 capable
* chips, we must override the default register settings...
*/
if (max_dma_rate(dev) == 4) {
u8 mode = speed & 0x07;
if (speed >= XFER_UDMA_0) {
set_indexed_reg(hwif, 0x10 + adj,
udma_timings[mode].reg10);
set_indexed_reg(hwif, 0x11 + adj,
udma_timings[mode].reg11);
set_indexed_reg(hwif, 0x12 + adj,
udma_timings[mode].reg12);
} else {
set_indexed_reg(hwif, 0x0e + adj,
mwdma_timings[mode].reg0e);
set_indexed_reg(hwif, 0x0f + adj,
mwdma_timings[mode].reg0f);
}
} else if (speed == XFER_UDMA_2) {
/* Set tHOLD bit to 0 if using UDMA mode 2 */
u8 tmp = get_indexed_reg(hwif, 0x10 + adj);
set_indexed_reg(hwif, 0x10 + adj, tmp & 0x7f);
}
}
static void pdcnew_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
{
struct pci_dev *dev = to_pci_dev(hwif->dev);
u8 adj = (drive->dn & 1) ? 0x08 : 0x00;
const u8 pio = drive->pio_mode - XFER_PIO_0;
if (max_dma_rate(dev) == 4) {
set_indexed_reg(hwif, 0x0c + adj, pio_timings[pio].reg0c);
set_indexed_reg(hwif, 0x0d + adj, pio_timings[pio].reg0d);
set_indexed_reg(hwif, 0x13 + adj, pio_timings[pio].reg13);
}
}
static u8 pdcnew_cable_detect(ide_hwif_t *hwif)
{
if (get_indexed_reg(hwif, 0x0b) & 0x04)
return ATA_CBL_PATA40;
else
return ATA_CBL_PATA80;
}
static void pdcnew_reset(ide_drive_t *drive)
{
/*
* Deleted this because it is redundant from the caller.
*/
printk(KERN_WARNING "pdc202xx_new: %s channel reset.\n",
drive->hwif->channel ? "Secondary" : "Primary");
}
/**
* read_counter - Read the byte count registers
* @dma_base: for the port address
*/
static long read_counter(u32 dma_base)
{
u32 pri_dma_base = dma_base, sec_dma_base = dma_base + 0x08;
u8 cnt0, cnt1, cnt2, cnt3;
long count = 0, last;
int retry = 3;
do {
last = count;
/* Read the current count */
outb(0x20, pri_dma_base + 0x01);
cnt0 = inb(pri_dma_base + 0x03);
outb(0x21, pri_dma_base + 0x01);
cnt1 = inb(pri_dma_base + 0x03);
outb(0x20, sec_dma_base + 0x01);
cnt2 = inb(sec_dma_base + 0x03);
outb(0x21, sec_dma_base + 0x01);
cnt3 = inb(sec_dma_base + 0x03);
count = (cnt3 << 23) | (cnt2 << 15) | (cnt1 << 8) | cnt0;
/*
* The 30-bit decrementing counter is read in 4 pieces.
* Incorrect value may be read when the most significant bytes
* are changing...
*/
} while (retry-- && (((last ^ count) & 0x3fff8000) || last < count));
DBG("cnt0[%02X] cnt1[%02X] cnt2[%02X] cnt3[%02X]\n",
cnt0, cnt1, cnt2, cnt3);
return count;
}
/**
* detect_pll_input_clock - Detect the PLL input clock in Hz.
* @dma_base: for the port address
* E.g. 16949000 on 33 MHz PCI bus, i.e. half of the PCI clock.
*/
static long detect_pll_input_clock(unsigned long dma_base)
{
struct timeval start_time, end_time;
long start_count, end_count;
long pll_input, usec_elapsed;
u8 scr1;
start_count = read_counter(dma_base);
do_gettimeofday(&start_time);
/* Start the test mode */
outb(0x01, dma_base + 0x01);
scr1 = inb(dma_base + 0x03);
DBG("scr1[%02X]\n", scr1);
outb(scr1 | 0x40, dma_base + 0x03);
/* Let the counter run for 10 ms. */
mdelay(10);
end_count = read_counter(dma_base);
do_gettimeofday(&end_time);
/* Stop the test mode */
outb(0x01, dma_base + 0x01);
scr1 = inb(dma_base + 0x03);
DBG("scr1[%02X]\n", scr1);
outb(scr1 & ~0x40, dma_base + 0x03);
/*
* Calculate the input clock in Hz
* (the clock counter is 30 bit wide and counts down)
*/
usec_elapsed = (end_time.tv_sec - start_time.tv_sec) * 1000000 +
(end_time.tv_usec - start_time.tv_usec);
pll_input = ((start_count - end_count) & 0x3fffffff) / 10 *
(10000000 / usec_elapsed);
DBG("start[%ld] end[%ld]\n", start_count, end_count);
return pll_input;
}
#ifdef CONFIG_PPC_PMAC
static void apple_kiwi_init(struct pci_dev *pdev)
{
struct device_node *np = pci_device_to_OF_node(pdev);
u8 conf;
if (np == NULL || !of_device_is_compatible(np, "kiwi-root"))
return;
if (pdev->revision >= 0x03) {
/* Setup chip magic config stuff (from darwin) */
pci_read_config_byte (pdev, 0x40, &conf);
pci_write_config_byte(pdev, 0x40, (conf | 0x01));
}
}
#endif /* CONFIG_PPC_PMAC */
static int init_chipset_pdcnew(struct pci_dev *dev)
{
const char *name = DRV_NAME;
unsigned long dma_base = pci_resource_start(dev, 4);
unsigned long sec_dma_base = dma_base + 0x08;
long pll_input, pll_output, ratio;
int f, r;
u8 pll_ctl0, pll_ctl1;
if (dma_base == 0)
return -EFAULT;
#ifdef CONFIG_PPC_PMAC
apple_kiwi_init(dev);
#endif
/* Calculate the required PLL output frequency */
switch(max_dma_rate(dev)) {
case 4: /* it's 133 MHz for Ultra133 chips */
pll_output = 133333333;
break;
case 3: /* and 100 MHz for Ultra100 chips */
default:
pll_output = 100000000;
break;
}
/*
* Detect PLL input clock.
* On some systems, where PCI bus is running at non-standard clock rate
* (e.g. 25 or 40 MHz), we have to adjust the cycle time.
* PDC20268 and newer chips employ PLL circuit to help correct timing
* registers setting.
*/
pll_input = detect_pll_input_clock(dma_base);
printk(KERN_INFO "%s %s: PLL input clock is %ld kHz\n",
name, pci_name(dev), pll_input / 1000);
/* Sanity check */
if (unlikely(pll_input < 5000000L || pll_input > 70000000L)) {
printk(KERN_ERR "%s %s: Bad PLL input clock %ld Hz, giving up!"
"\n", name, pci_name(dev), pll_input);
goto out;
}
#ifdef DEBUG
DBG("pll_output is %ld Hz\n", pll_output);
/* Show the current clock value of PLL control register
* (maybe already configured by the BIOS)
*/
outb(0x02, sec_dma_base + 0x01);
pll_ctl0 = inb(sec_dma_base + 0x03);
outb(0x03, sec_dma_base + 0x01);
pll_ctl1 = inb(sec_dma_base + 0x03);
DBG("pll_ctl[%02X][%02X]\n", pll_ctl0, pll_ctl1);
#endif
/*
* Calculate the ratio of F, R and NO
* POUT = (F + 2) / (( R + 2) * NO)
*/
ratio = pll_output / (pll_input / 1000);
if (ratio < 8600L) { /* 8.6x */
/* Using NO = 0x01, R = 0x0d */
r = 0x0d;
} else if (ratio < 12900L) { /* 12.9x */
/* Using NO = 0x01, R = 0x08 */
r = 0x08;
} else if (ratio < 16100L) { /* 16.1x */
/* Using NO = 0x01, R = 0x06 */
r = 0x06;
} else if (ratio < 64000L) { /* 64x */
r = 0x00;
} else {
/* Invalid ratio */
printk(KERN_ERR "%s %s: Bad ratio %ld, giving up!\n",
name, pci_name(dev), ratio);
goto out;
}
f = (ratio * (r + 2)) / 1000 - 2;
DBG("F[%d] R[%d] ratio*1000[%ld]\n", f, r, ratio);
if (unlikely(f < 0 || f > 127)) {
/* Invalid F */
printk(KERN_ERR "%s %s: F[%d] invalid!\n",
name, pci_name(dev), f);
goto out;
}
pll_ctl0 = (u8) f;
pll_ctl1 = (u8) r;
DBG("Writing pll_ctl[%02X][%02X]\n", pll_ctl0, pll_ctl1);
outb(0x02, sec_dma_base + 0x01);
outb(pll_ctl0, sec_dma_base + 0x03);
outb(0x03, sec_dma_base + 0x01);
outb(pll_ctl1, sec_dma_base + 0x03);
/* Wait the PLL circuit to be stable */
mdelay(30);
#ifdef DEBUG
/*
* Show the current clock value of PLL control register
*/
outb(0x02, sec_dma_base + 0x01);
pll_ctl0 = inb(sec_dma_base + 0x03);
outb(0x03, sec_dma_base + 0x01);
pll_ctl1 = inb(sec_dma_base + 0x03);
DBG("pll_ctl[%02X][%02X]\n", pll_ctl0, pll_ctl1);
#endif
out:
return 0;
}
static struct pci_dev *pdc20270_get_dev2(struct pci_dev *dev)
{
struct pci_dev *dev2;
dev2 = pci_get_slot(dev->bus, PCI_DEVFN(PCI_SLOT(dev->devfn) + 1,
PCI_FUNC(dev->devfn)));
if (dev2 &&
dev2->vendor == dev->vendor &&
dev2->device == dev->device) {
if (dev2->irq != dev->irq) {
dev2->irq = dev->irq;
printk(KERN_INFO DRV_NAME " %s: PCI config space "
"interrupt fixed\n", pci_name(dev));
}
return dev2;
}
return NULL;
}
static const struct ide_port_ops pdcnew_port_ops = {
.set_pio_mode = pdcnew_set_pio_mode,
.set_dma_mode = pdcnew_set_dma_mode,
.resetproc = pdcnew_reset,
.cable_detect = pdcnew_cable_detect,
};
#define DECLARE_PDCNEW_DEV(udma) \
{ \
.name = DRV_NAME, \
.init_chipset = init_chipset_pdcnew, \
.port_ops = &pdcnew_port_ops, \
.host_flags = IDE_HFLAG_POST_SET_MODE | \
IDE_HFLAG_ERROR_STOPS_FIFO | \
IDE_HFLAG_OFF_BOARD, \
.pio_mask = ATA_PIO4, \
.mwdma_mask = ATA_MWDMA2, \
.udma_mask = udma, \
}
static const struct ide_port_info pdcnew_chipsets[] = {
/* 0: PDC202{68,70} */ DECLARE_PDCNEW_DEV(ATA_UDMA5),
/* 1: PDC202{69,71,75,76,77} */ DECLARE_PDCNEW_DEV(ATA_UDMA6),
};
/**
* pdc202new_init_one - called when a pdc202xx is found
* @dev: the pdc202new device
* @id: the matching pci id
*
* Called when the PCI registration layer (or the IDE initialization)
* finds a device matching our IDE device tables.
*/
static int pdc202new_init_one(struct pci_dev *dev, const struct pci_device_id *id)
{
const struct ide_port_info *d = &pdcnew_chipsets[id->driver_data];
struct pci_dev *bridge = dev->bus->self;
if (dev->device == PCI_DEVICE_ID_PROMISE_20270 && bridge &&
bridge->vendor == PCI_VENDOR_ID_DEC &&
bridge->device == PCI_DEVICE_ID_DEC_21150) {
struct pci_dev *dev2;
if (PCI_SLOT(dev->devfn) & 2)
return -ENODEV;
dev2 = pdc20270_get_dev2(dev);
if (dev2) {
int ret = ide_pci_init_two(dev, dev2, d, NULL);
if (ret < 0)
pci_dev_put(dev2);
return ret;
}
}
if (dev->device == PCI_DEVICE_ID_PROMISE_20276 && bridge &&
bridge->vendor == PCI_VENDOR_ID_INTEL &&
(bridge->device == PCI_DEVICE_ID_INTEL_I960 ||
bridge->device == PCI_DEVICE_ID_INTEL_I960RM)) {
printk(KERN_INFO DRV_NAME " %s: attached to I2O RAID controller,"
" skipping\n", pci_name(dev));
return -ENODEV;
}
return ide_pci_init_one(dev, d, NULL);
}
static void pdc202new_remove(struct pci_dev *dev)
{
struct ide_host *host = pci_get_drvdata(dev);
struct pci_dev *dev2 = host->dev[1] ? to_pci_dev(host->dev[1]) : NULL;
ide_pci_remove(dev);
pci_dev_put(dev2);
}
static const struct pci_device_id pdc202new_pci_tbl[] = {
{ PCI_VDEVICE(PROMISE, PCI_DEVICE_ID_PROMISE_20268), 0 },
{ PCI_VDEVICE(PROMISE, PCI_DEVICE_ID_PROMISE_20269), 1 },
{ PCI_VDEVICE(PROMISE, PCI_DEVICE_ID_PROMISE_20270), 0 },
{ PCI_VDEVICE(PROMISE, PCI_DEVICE_ID_PROMISE_20271), 1 },
{ PCI_VDEVICE(PROMISE, PCI_DEVICE_ID_PROMISE_20275), 1 },
{ PCI_VDEVICE(PROMISE, PCI_DEVICE_ID_PROMISE_20276), 1 },
{ PCI_VDEVICE(PROMISE, PCI_DEVICE_ID_PROMISE_20277), 1 },
{ 0, },
};
MODULE_DEVICE_TABLE(pci, pdc202new_pci_tbl);
static struct pci_driver pdc202new_pci_driver = {
.name = "Promise_IDE",
.id_table = pdc202new_pci_tbl,
.probe = pdc202new_init_one,
.remove = pdc202new_remove,
.suspend = ide_pci_suspend,
.resume = ide_pci_resume,
};
static int __init pdc202new_ide_init(void)
{
return ide_pci_register_driver(&pdc202new_pci_driver);
}
static void __exit pdc202new_ide_exit(void)
{
pci_unregister_driver(&pdc202new_pci_driver);
}
module_init(pdc202new_ide_init);
module_exit(pdc202new_ide_exit);
MODULE_AUTHOR("Andre Hedrick, Frank Tiernan");
MODULE_DESCRIPTION("PCI driver module for Promise PDC20268 and higher");
MODULE_LICENSE("GPL");
| gpl-2.0 |
shminer/android_kernel_flounder | drivers/ide/umc8672.c | 4842 | 5088 | /*
* Copyright (C) 1995-1996 Linus Torvalds & author (see below)
*/
/*
* Principal Author/Maintainer: PODIEN@hml2.atlas.de (Wolfram Podien)
*
* This file provides support for the advanced features
* of the UMC 8672 IDE interface.
*
* Version 0.01 Initial version, hacked out of ide.c,
* and #include'd rather than compiled separately.
* This will get cleaned up in a subsequent release.
*
* Version 0.02 now configs/compiles separate from ide.c -ml
* Version 0.03 enhanced auto-tune, fix display bug
* Version 0.05 replace sti() with restore_flags() -ml
* add detection of possible race condition -ml
*/
/*
* VLB Controller Support from
* Wolfram Podien
* Rohoefe 3
* D28832 Achim
* Germany
*
* To enable UMC8672 support there must a lilo line like
* append="ide0=umc8672"...
* To set the speed according to the abilities of the hardware there must be a
* line like
* #define UMC_DRIVE0 11
* in the beginning of the driver, which sets the speed of drive 0 to 11 (there
* are some lines present). 0 - 11 are allowed speed values. These values are
* the results from the DOS speed test program supplied from UMC. 11 is the
* highest speed (about PIO mode 3)
*/
#define REALLY_SLOW_IO /* some systems can safely undef this */
#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/delay.h>
#include <linux/timer.h>
#include <linux/mm.h>
#include <linux/ioport.h>
#include <linux/blkdev.h>
#include <linux/ide.h>
#include <linux/init.h>
#include <asm/io.h>
#define DRV_NAME "umc8672"
/*
* Default speeds. These can be changed with "auto-tune" and/or hdparm.
*/
#define UMC_DRIVE0 1 /* DOS measured drive speeds */
#define UMC_DRIVE1 1 /* 0 to 11 allowed */
#define UMC_DRIVE2 1 /* 11 = Fastest Speed */
#define UMC_DRIVE3 1 /* In case of crash reduce speed */
static u8 current_speeds[4] = {UMC_DRIVE0, UMC_DRIVE1, UMC_DRIVE2, UMC_DRIVE3};
static const u8 pio_to_umc [5] = {0, 3, 7, 10, 11}; /* rough guesses */
/* 0 1 2 3 4 5 6 7 8 9 10 11 */
static const u8 speedtab [3][12] = {
{0x0f, 0x0b, 0x02, 0x02, 0x02, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x1},
{0x03, 0x02, 0x02, 0x02, 0x02, 0x02, 0x01, 0x01, 0x01, 0x01, 0x01, 0x1},
{0xff, 0xcb, 0xc0, 0x58, 0x36, 0x33, 0x23, 0x22, 0x21, 0x11, 0x10, 0x0}
};
static void out_umc(char port, char wert)
{
outb_p(port, 0x108);
outb_p(wert, 0x109);
}
static inline u8 in_umc(char port)
{
outb_p(port, 0x108);
return inb_p(0x109);
}
static void umc_set_speeds(u8 speeds[])
{
int i, tmp;
outb_p(0x5A, 0x108); /* enable umc */
out_umc(0xd7, (speedtab[0][speeds[2]] | (speedtab[0][speeds[3]]<<4)));
out_umc(0xd6, (speedtab[0][speeds[0]] | (speedtab[0][speeds[1]]<<4)));
tmp = 0;
for (i = 3; i >= 0; i--)
tmp = (tmp << 2) | speedtab[1][speeds[i]];
out_umc(0xdc, tmp);
for (i = 0; i < 4; i++) {
out_umc(0xd0 + i, speedtab[2][speeds[i]]);
out_umc(0xd8 + i, speedtab[2][speeds[i]]);
}
outb_p(0xa5, 0x108); /* disable umc */
printk("umc8672: drive speeds [0 to 11]: %d %d %d %d\n",
speeds[0], speeds[1], speeds[2], speeds[3]);
}
static void umc_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
{
ide_hwif_t *mate = hwif->mate;
unsigned long uninitialized_var(flags);
const u8 pio = drive->pio_mode - XFER_PIO_0;
printk("%s: setting umc8672 to PIO mode%d (speed %d)\n",
drive->name, pio, pio_to_umc[pio]);
if (mate)
spin_lock_irqsave(&mate->lock, flags);
if (mate && mate->handler) {
printk(KERN_ERR "umc8672: other interface is busy: exiting tune_umc()\n");
} else {
current_speeds[drive->name[2] - 'a'] = pio_to_umc[pio];
umc_set_speeds(current_speeds);
}
if (mate)
spin_unlock_irqrestore(&mate->lock, flags);
}
static const struct ide_port_ops umc8672_port_ops = {
.set_pio_mode = umc_set_pio_mode,
};
static const struct ide_port_info umc8672_port_info __initconst = {
.name = DRV_NAME,
.chipset = ide_umc8672,
.port_ops = &umc8672_port_ops,
.host_flags = IDE_HFLAG_NO_DMA,
.pio_mask = ATA_PIO4,
};
static int __init umc8672_probe(void)
{
unsigned long flags;
if (!request_region(0x108, 2, "umc8672")) {
printk(KERN_ERR "umc8672: ports 0x108-0x109 already in use.\n");
return 1;
}
local_irq_save(flags);
outb_p(0x5A, 0x108); /* enable umc */
if (in_umc (0xd5) != 0xa0) {
local_irq_restore(flags);
printk(KERN_ERR "umc8672: not found\n");
release_region(0x108, 2);
return 1;
}
outb_p(0xa5, 0x108); /* disable umc */
umc_set_speeds(current_speeds);
local_irq_restore(flags);
return ide_legacy_device_add(&umc8672_port_info, 0);
}
static bool probe_umc8672;
module_param_named(probe, probe_umc8672, bool, 0);
MODULE_PARM_DESC(probe, "probe for UMC8672 chipset");
static int __init umc8672_init(void)
{
if (probe_umc8672 == 0)
goto out;
if (umc8672_probe() == 0)
return 0;
out:
return -ENODEV;
}
module_init(umc8672_init);
MODULE_AUTHOR("Wolfram Podien");
MODULE_DESCRIPTION("Support for UMC 8672 IDE chipset");
MODULE_LICENSE("GPL");
| gpl-2.0 |
uliamo/android_kernel_zte_quantum | arch/cris/kernel/setup.c | 7146 | 5632 | /*
*
* linux/arch/cris/kernel/setup.c
*
* Copyright (C) 1995 Linus Torvalds
* Copyright (c) 2001 Axis Communications AB
*/
/*
* This file handles the architecture-dependent parts of initialization
*/
#include <linux/init.h>
#include <linux/mm.h>
#include <linux/bootmem.h>
#include <asm/pgtable.h>
#include <linux/seq_file.h>
#include <linux/screen_info.h>
#include <linux/utsname.h>
#include <linux/pfn.h>
#include <linux/cpu.h>
#include <asm/setup.h>
#include <arch/system.h>
/*
* Setup options
*/
struct screen_info screen_info;
extern int root_mountflags;
extern char _etext, _edata, _end;
char __initdata cris_command_line[COMMAND_LINE_SIZE] = { 0, };
extern const unsigned long text_start, edata; /* set by the linker script */
extern unsigned long dram_start, dram_end;
extern unsigned long romfs_start, romfs_length, romfs_in_flash; /* from head.S */
static struct cpu cpu_devices[NR_CPUS];
extern void show_etrax_copyright(void); /* arch-vX/kernel/setup.c */
/* This mainly sets up the memory area, and can be really confusing.
*
* The physical DRAM is virtually mapped into dram_start to dram_end
* (usually c0000000 to c0000000 + DRAM size). The physical address is
* given by the macro __pa().
*
* In this DRAM, the kernel code and data is loaded, in the beginning.
* It really starts at c0004000 to make room for some special pages -
* the start address is text_start. The kernel data ends at _end. After
* this the ROM filesystem is appended (if there is any).
*
* Between this address and dram_end, we have RAM pages usable to the
* boot code and the system.
*
*/
void __init setup_arch(char **cmdline_p)
{
extern void init_etrax_debug(void);
unsigned long bootmap_size;
unsigned long start_pfn, max_pfn;
unsigned long memory_start;
/* register an initial console printing routine for printk's */
init_etrax_debug();
/* we should really poll for DRAM size! */
high_memory = &dram_end;
if(romfs_in_flash || !romfs_length) {
/* if we have the romfs in flash, or if there is no rom filesystem,
* our free area starts directly after the BSS
*/
memory_start = (unsigned long) &_end;
} else {
/* otherwise the free area starts after the ROM filesystem */
printk("ROM fs in RAM, size %lu bytes\n", romfs_length);
memory_start = romfs_start + romfs_length;
}
/* process 1's initial memory region is the kernel code/data */
init_mm.start_code = (unsigned long) &text_start;
init_mm.end_code = (unsigned long) &_etext;
init_mm.end_data = (unsigned long) &_edata;
init_mm.brk = (unsigned long) &_end;
/* min_low_pfn points to the start of DRAM, start_pfn points
* to the first DRAM pages after the kernel, and max_low_pfn
* to the end of DRAM.
*/
/*
* partially used pages are not usable - thus
* we are rounding upwards:
*/
start_pfn = PFN_UP(memory_start); /* usually c0000000 + kernel + romfs */
max_pfn = PFN_DOWN((unsigned long)high_memory); /* usually c0000000 + dram size */
/*
* Initialize the boot-time allocator (start, end)
*
* We give it access to all our DRAM, but we could as well just have
* given it a small slice. No point in doing that though, unless we
* have non-contiguous memory and want the boot-stuff to be in, say,
* the smallest area.
*
* It will put a bitmap of the allocated pages in the beginning
* of the range we give it, but it won't mark the bitmaps pages
* as reserved. We have to do that ourselves below.
*
* We need to use init_bootmem_node instead of init_bootmem
* because our map starts at a quite high address (min_low_pfn).
*/
max_low_pfn = max_pfn;
min_low_pfn = PAGE_OFFSET >> PAGE_SHIFT;
bootmap_size = init_bootmem_node(NODE_DATA(0), start_pfn,
min_low_pfn,
max_low_pfn);
/* And free all memory not belonging to the kernel (addr, size) */
free_bootmem(PFN_PHYS(start_pfn), PFN_PHYS(max_pfn - start_pfn));
/*
* Reserve the bootmem bitmap itself as well. We do this in two
* steps (first step was init_bootmem()) because this catches
* the (very unlikely) case of us accidentally initializing the
* bootmem allocator with an invalid RAM area.
*
* Arguments are start, size
*/
reserve_bootmem(PFN_PHYS(start_pfn), bootmap_size, BOOTMEM_DEFAULT);
/* paging_init() sets up the MMU and marks all pages as reserved */
paging_init();
*cmdline_p = cris_command_line;
#ifdef CONFIG_ETRAX_CMDLINE
if (!strcmp(cris_command_line, "")) {
strlcpy(cris_command_line, CONFIG_ETRAX_CMDLINE, COMMAND_LINE_SIZE);
cris_command_line[COMMAND_LINE_SIZE - 1] = '\0';
}
#endif
/* Save command line for future references. */
memcpy(boot_command_line, cris_command_line, COMMAND_LINE_SIZE);
boot_command_line[COMMAND_LINE_SIZE - 1] = '\0';
/* give credit for the CRIS port */
show_etrax_copyright();
/* Setup utsname */
strcpy(init_utsname()->machine, cris_machine_name);
}
static void *c_start(struct seq_file *m, loff_t *pos)
{
return *pos < nr_cpu_ids ? (void *)(int)(*pos + 1) : NULL;
}
static void *c_next(struct seq_file *m, void *v, loff_t *pos)
{
++*pos;
return c_start(m, pos);
}
static void c_stop(struct seq_file *m, void *v)
{
}
extern int show_cpuinfo(struct seq_file *m, void *v);
const struct seq_operations cpuinfo_op = {
.start = c_start,
.next = c_next,
.stop = c_stop,
.show = show_cpuinfo,
};
static int __init topology_init(void)
{
int i;
for_each_possible_cpu(i) {
return register_cpu(&cpu_devices[i], i);
}
return 0;
}
subsys_initcall(topology_init);
| gpl-2.0 |
n3ocort3x/android_kernel_htc_m7 | drivers/oprofile/timer_int.c | 7146 | 2604 | /**
* @file timer_int.c
*
* @remark Copyright 2002 OProfile authors
* @remark Read the file COPYING
*
* @author John Levon <levon@movementarian.org>
*/
#include <linux/kernel.h>
#include <linux/notifier.h>
#include <linux/smp.h>
#include <linux/oprofile.h>
#include <linux/profile.h>
#include <linux/init.h>
#include <linux/cpu.h>
#include <linux/hrtimer.h>
#include <asm/irq_regs.h>
#include <asm/ptrace.h>
#include "oprof.h"
static DEFINE_PER_CPU(struct hrtimer, oprofile_hrtimer);
static int ctr_running;
static enum hrtimer_restart oprofile_hrtimer_notify(struct hrtimer *hrtimer)
{
oprofile_add_sample(get_irq_regs(), 0);
hrtimer_forward_now(hrtimer, ns_to_ktime(TICK_NSEC));
return HRTIMER_RESTART;
}
static void __oprofile_hrtimer_start(void *unused)
{
struct hrtimer *hrtimer = &__get_cpu_var(oprofile_hrtimer);
if (!ctr_running)
return;
hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
hrtimer->function = oprofile_hrtimer_notify;
hrtimer_start(hrtimer, ns_to_ktime(TICK_NSEC),
HRTIMER_MODE_REL_PINNED);
}
static int oprofile_hrtimer_start(void)
{
get_online_cpus();
ctr_running = 1;
on_each_cpu(__oprofile_hrtimer_start, NULL, 1);
put_online_cpus();
return 0;
}
static void __oprofile_hrtimer_stop(int cpu)
{
struct hrtimer *hrtimer = &per_cpu(oprofile_hrtimer, cpu);
if (!ctr_running)
return;
hrtimer_cancel(hrtimer);
}
static void oprofile_hrtimer_stop(void)
{
int cpu;
get_online_cpus();
for_each_online_cpu(cpu)
__oprofile_hrtimer_stop(cpu);
ctr_running = 0;
put_online_cpus();
}
static int __cpuinit oprofile_cpu_notify(struct notifier_block *self,
unsigned long action, void *hcpu)
{
long cpu = (long) hcpu;
switch (action) {
case CPU_ONLINE:
case CPU_ONLINE_FROZEN:
smp_call_function_single(cpu, __oprofile_hrtimer_start,
NULL, 1);
break;
case CPU_DEAD:
case CPU_DEAD_FROZEN:
__oprofile_hrtimer_stop(cpu);
break;
}
return NOTIFY_OK;
}
static struct notifier_block __refdata oprofile_cpu_notifier = {
.notifier_call = oprofile_cpu_notify,
};
static int oprofile_hrtimer_setup(void)
{
return register_hotcpu_notifier(&oprofile_cpu_notifier);
}
static void oprofile_hrtimer_shutdown(void)
{
unregister_hotcpu_notifier(&oprofile_cpu_notifier);
}
int oprofile_timer_init(struct oprofile_operations *ops)
{
ops->create_files = NULL;
ops->setup = oprofile_hrtimer_setup;
ops->shutdown = oprofile_hrtimer_shutdown;
ops->start = oprofile_hrtimer_start;
ops->stop = oprofile_hrtimer_stop;
ops->cpu_type = "timer";
printk(KERN_INFO "oprofile: using timer interrupt.\n");
return 0;
}
| gpl-2.0 |
crimsonthunder/morfic_n4 | drivers/mfd/tc6393xb.c | 7402 | 22229 | /*
* Toshiba TC6393XB SoC support
*
* Copyright(c) 2005-2006 Chris Humbert
* Copyright(c) 2005 Dirk Opfer
* Copyright(c) 2005 Ian Molton <spyro@f2s.com>
* Copyright(c) 2007 Dmitry Baryshkov
*
* Based on code written by Sharp/Lineo for 2.4 kernels
* Based on locomo.c
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/io.h>
#include <linux/irq.h>
#include <linux/platform_device.h>
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/mfd/core.h>
#include <linux/mfd/tmio.h>
#include <linux/mfd/tc6393xb.h>
#include <linux/gpio.h>
#include <linux/slab.h>
#define SCR_REVID 0x08 /* b Revision ID */
#define SCR_ISR 0x50 /* b Interrupt Status */
#define SCR_IMR 0x52 /* b Interrupt Mask */
#define SCR_IRR 0x54 /* b Interrupt Routing */
#define SCR_GPER 0x60 /* w GP Enable */
#define SCR_GPI_SR(i) (0x64 + (i)) /* b3 GPI Status */
#define SCR_GPI_IMR(i) (0x68 + (i)) /* b3 GPI INT Mask */
#define SCR_GPI_EDER(i) (0x6c + (i)) /* b3 GPI Edge Detect Enable */
#define SCR_GPI_LIR(i) (0x70 + (i)) /* b3 GPI Level Invert */
#define SCR_GPO_DSR(i) (0x78 + (i)) /* b3 GPO Data Set */
#define SCR_GPO_DOECR(i) (0x7c + (i)) /* b3 GPO Data OE Control */
#define SCR_GP_IARCR(i) (0x80 + (i)) /* b3 GP Internal Active Register Control */
#define SCR_GP_IARLCR(i) (0x84 + (i)) /* b3 GP INTERNAL Active Register Level Control */
#define SCR_GPI_BCR(i) (0x88 + (i)) /* b3 GPI Buffer Control */
#define SCR_GPA_IARCR 0x8c /* w GPa Internal Active Register Control */
#define SCR_GPA_IARLCR 0x90 /* w GPa Internal Active Register Level Control */
#define SCR_GPA_BCR 0x94 /* w GPa Buffer Control */
#define SCR_CCR 0x98 /* w Clock Control */
#define SCR_PLL2CR 0x9a /* w PLL2 Control */
#define SCR_PLL1CR 0x9c /* l PLL1 Control */
#define SCR_DIARCR 0xa0 /* b Device Internal Active Register Control */
#define SCR_DBOCR 0xa1 /* b Device Buffer Off Control */
#define SCR_FER 0xe0 /* b Function Enable */
#define SCR_MCR 0xe4 /* w Mode Control */
#define SCR_CONFIG 0xfc /* b Configuration Control */
#define SCR_DEBUG 0xff /* b Debug */
#define SCR_CCR_CK32K BIT(0)
#define SCR_CCR_USBCK BIT(1)
#define SCR_CCR_UNK1 BIT(4)
#define SCR_CCR_MCLK_MASK (7 << 8)
#define SCR_CCR_MCLK_OFF (0 << 8)
#define SCR_CCR_MCLK_12 (1 << 8)
#define SCR_CCR_MCLK_24 (2 << 8)
#define SCR_CCR_MCLK_48 (3 << 8)
#define SCR_CCR_HCLK_MASK (3 << 12)
#define SCR_CCR_HCLK_24 (0 << 12)
#define SCR_CCR_HCLK_48 (1 << 12)
#define SCR_FER_USBEN BIT(0) /* USB host enable */
#define SCR_FER_LCDCVEN BIT(1) /* polysilicon TFT enable */
#define SCR_FER_SLCDEN BIT(2) /* SLCD enable */
#define SCR_MCR_RDY_MASK (3 << 0)
#define SCR_MCR_RDY_OPENDRAIN (0 << 0)
#define SCR_MCR_RDY_TRISTATE (1 << 0)
#define SCR_MCR_RDY_PUSHPULL (2 << 0)
#define SCR_MCR_RDY_UNK BIT(2)
#define SCR_MCR_RDY_EN BIT(3)
#define SCR_MCR_INT_MASK (3 << 4)
#define SCR_MCR_INT_OPENDRAIN (0 << 4)
#define SCR_MCR_INT_TRISTATE (1 << 4)
#define SCR_MCR_INT_PUSHPULL (2 << 4)
#define SCR_MCR_INT_UNK BIT(6)
#define SCR_MCR_INT_EN BIT(7)
/* bits 8 - 16 are unknown */
#define TC_GPIO_BIT(i) (1 << (i & 0x7))
/*--------------------------------------------------------------------------*/
struct tc6393xb {
void __iomem *scr;
struct gpio_chip gpio;
struct clk *clk; /* 3,6 Mhz */
spinlock_t lock; /* protects RMW cycles */
struct {
u8 fer;
u16 ccr;
u8 gpi_bcr[3];
u8 gpo_dsr[3];
u8 gpo_doecr[3];
} suspend_state;
struct resource rscr;
struct resource *iomem;
int irq;
int irq_base;
};
enum {
TC6393XB_CELL_NAND,
TC6393XB_CELL_MMC,
TC6393XB_CELL_OHCI,
TC6393XB_CELL_FB,
};
/*--------------------------------------------------------------------------*/
static int tc6393xb_nand_enable(struct platform_device *nand)
{
struct platform_device *dev = to_platform_device(nand->dev.parent);
struct tc6393xb *tc6393xb = platform_get_drvdata(dev);
unsigned long flags;
spin_lock_irqsave(&tc6393xb->lock, flags);
/* SMD buffer on */
dev_dbg(&dev->dev, "SMD buffer on\n");
tmio_iowrite8(0xff, tc6393xb->scr + SCR_GPI_BCR(1));
spin_unlock_irqrestore(&tc6393xb->lock, flags);
return 0;
}
static struct resource __devinitdata tc6393xb_nand_resources[] = {
{
.start = 0x1000,
.end = 0x1007,
.flags = IORESOURCE_MEM,
},
{
.start = 0x0100,
.end = 0x01ff,
.flags = IORESOURCE_MEM,
},
{
.start = IRQ_TC6393_NAND,
.end = IRQ_TC6393_NAND,
.flags = IORESOURCE_IRQ,
},
};
static struct resource tc6393xb_mmc_resources[] = {
{
.start = 0x800,
.end = 0x9ff,
.flags = IORESOURCE_MEM,
},
{
.start = IRQ_TC6393_MMC,
.end = IRQ_TC6393_MMC,
.flags = IORESOURCE_IRQ,
},
};
static const struct resource tc6393xb_ohci_resources[] = {
{
.start = 0x3000,
.end = 0x31ff,
.flags = IORESOURCE_MEM,
},
{
.start = 0x0300,
.end = 0x03ff,
.flags = IORESOURCE_MEM,
},
{
.start = 0x010000,
.end = 0x017fff,
.flags = IORESOURCE_MEM,
},
{
.start = 0x018000,
.end = 0x01ffff,
.flags = IORESOURCE_MEM,
},
{
.start = IRQ_TC6393_OHCI,
.end = IRQ_TC6393_OHCI,
.flags = IORESOURCE_IRQ,
},
};
static struct resource __devinitdata tc6393xb_fb_resources[] = {
{
.start = 0x5000,
.end = 0x51ff,
.flags = IORESOURCE_MEM,
},
{
.start = 0x0500,
.end = 0x05ff,
.flags = IORESOURCE_MEM,
},
{
.start = 0x100000,
.end = 0x1fffff,
.flags = IORESOURCE_MEM,
},
{
.start = IRQ_TC6393_FB,
.end = IRQ_TC6393_FB,
.flags = IORESOURCE_IRQ,
},
};
static int tc6393xb_ohci_enable(struct platform_device *dev)
{
struct tc6393xb *tc6393xb = dev_get_drvdata(dev->dev.parent);
unsigned long flags;
u16 ccr;
u8 fer;
spin_lock_irqsave(&tc6393xb->lock, flags);
ccr = tmio_ioread16(tc6393xb->scr + SCR_CCR);
ccr |= SCR_CCR_USBCK;
tmio_iowrite16(ccr, tc6393xb->scr + SCR_CCR);
fer = tmio_ioread8(tc6393xb->scr + SCR_FER);
fer |= SCR_FER_USBEN;
tmio_iowrite8(fer, tc6393xb->scr + SCR_FER);
spin_unlock_irqrestore(&tc6393xb->lock, flags);
return 0;
}
static int tc6393xb_ohci_disable(struct platform_device *dev)
{
struct tc6393xb *tc6393xb = dev_get_drvdata(dev->dev.parent);
unsigned long flags;
u16 ccr;
u8 fer;
spin_lock_irqsave(&tc6393xb->lock, flags);
fer = tmio_ioread8(tc6393xb->scr + SCR_FER);
fer &= ~SCR_FER_USBEN;
tmio_iowrite8(fer, tc6393xb->scr + SCR_FER);
ccr = tmio_ioread16(tc6393xb->scr + SCR_CCR);
ccr &= ~SCR_CCR_USBCK;
tmio_iowrite16(ccr, tc6393xb->scr + SCR_CCR);
spin_unlock_irqrestore(&tc6393xb->lock, flags);
return 0;
}
static int tc6393xb_fb_enable(struct platform_device *dev)
{
struct tc6393xb *tc6393xb = dev_get_drvdata(dev->dev.parent);
unsigned long flags;
u16 ccr;
spin_lock_irqsave(&tc6393xb->lock, flags);
ccr = tmio_ioread16(tc6393xb->scr + SCR_CCR);
ccr &= ~SCR_CCR_MCLK_MASK;
ccr |= SCR_CCR_MCLK_48;
tmio_iowrite16(ccr, tc6393xb->scr + SCR_CCR);
spin_unlock_irqrestore(&tc6393xb->lock, flags);
return 0;
}
static int tc6393xb_fb_disable(struct platform_device *dev)
{
struct tc6393xb *tc6393xb = dev_get_drvdata(dev->dev.parent);
unsigned long flags;
u16 ccr;
spin_lock_irqsave(&tc6393xb->lock, flags);
ccr = tmio_ioread16(tc6393xb->scr + SCR_CCR);
ccr &= ~SCR_CCR_MCLK_MASK;
ccr |= SCR_CCR_MCLK_OFF;
tmio_iowrite16(ccr, tc6393xb->scr + SCR_CCR);
spin_unlock_irqrestore(&tc6393xb->lock, flags);
return 0;
}
int tc6393xb_lcd_set_power(struct platform_device *fb, bool on)
{
struct platform_device *dev = to_platform_device(fb->dev.parent);
struct tc6393xb *tc6393xb = platform_get_drvdata(dev);
u8 fer;
unsigned long flags;
spin_lock_irqsave(&tc6393xb->lock, flags);
fer = ioread8(tc6393xb->scr + SCR_FER);
if (on)
fer |= SCR_FER_SLCDEN;
else
fer &= ~SCR_FER_SLCDEN;
iowrite8(fer, tc6393xb->scr + SCR_FER);
spin_unlock_irqrestore(&tc6393xb->lock, flags);
return 0;
}
EXPORT_SYMBOL(tc6393xb_lcd_set_power);
int tc6393xb_lcd_mode(struct platform_device *fb,
const struct fb_videomode *mode) {
struct platform_device *dev = to_platform_device(fb->dev.parent);
struct tc6393xb *tc6393xb = platform_get_drvdata(dev);
unsigned long flags;
spin_lock_irqsave(&tc6393xb->lock, flags);
iowrite16(mode->pixclock, tc6393xb->scr + SCR_PLL1CR + 0);
iowrite16(mode->pixclock >> 16, tc6393xb->scr + SCR_PLL1CR + 2);
spin_unlock_irqrestore(&tc6393xb->lock, flags);
return 0;
}
EXPORT_SYMBOL(tc6393xb_lcd_mode);
static int tc6393xb_mmc_enable(struct platform_device *mmc)
{
struct platform_device *dev = to_platform_device(mmc->dev.parent);
struct tc6393xb *tc6393xb = platform_get_drvdata(dev);
tmio_core_mmc_enable(tc6393xb->scr + 0x200, 0,
tc6393xb_mmc_resources[0].start & 0xfffe);
return 0;
}
static int tc6393xb_mmc_resume(struct platform_device *mmc)
{
struct platform_device *dev = to_platform_device(mmc->dev.parent);
struct tc6393xb *tc6393xb = platform_get_drvdata(dev);
tmio_core_mmc_resume(tc6393xb->scr + 0x200, 0,
tc6393xb_mmc_resources[0].start & 0xfffe);
return 0;
}
static void tc6393xb_mmc_pwr(struct platform_device *mmc, int state)
{
struct platform_device *dev = to_platform_device(mmc->dev.parent);
struct tc6393xb *tc6393xb = platform_get_drvdata(dev);
tmio_core_mmc_pwr(tc6393xb->scr + 0x200, 0, state);
}
static void tc6393xb_mmc_clk_div(struct platform_device *mmc, int state)
{
struct platform_device *dev = to_platform_device(mmc->dev.parent);
struct tc6393xb *tc6393xb = platform_get_drvdata(dev);
tmio_core_mmc_clk_div(tc6393xb->scr + 0x200, 0, state);
}
static struct tmio_mmc_data tc6393xb_mmc_data = {
.hclk = 24000000,
.set_pwr = tc6393xb_mmc_pwr,
.set_clk_div = tc6393xb_mmc_clk_div,
};
static struct mfd_cell __devinitdata tc6393xb_cells[] = {
[TC6393XB_CELL_NAND] = {
.name = "tmio-nand",
.enable = tc6393xb_nand_enable,
.num_resources = ARRAY_SIZE(tc6393xb_nand_resources),
.resources = tc6393xb_nand_resources,
},
[TC6393XB_CELL_MMC] = {
.name = "tmio-mmc",
.enable = tc6393xb_mmc_enable,
.resume = tc6393xb_mmc_resume,
.platform_data = &tc6393xb_mmc_data,
.pdata_size = sizeof(tc6393xb_mmc_data),
.num_resources = ARRAY_SIZE(tc6393xb_mmc_resources),
.resources = tc6393xb_mmc_resources,
},
[TC6393XB_CELL_OHCI] = {
.name = "tmio-ohci",
.num_resources = ARRAY_SIZE(tc6393xb_ohci_resources),
.resources = tc6393xb_ohci_resources,
.enable = tc6393xb_ohci_enable,
.suspend = tc6393xb_ohci_disable,
.resume = tc6393xb_ohci_enable,
.disable = tc6393xb_ohci_disable,
},
[TC6393XB_CELL_FB] = {
.name = "tmio-fb",
.num_resources = ARRAY_SIZE(tc6393xb_fb_resources),
.resources = tc6393xb_fb_resources,
.enable = tc6393xb_fb_enable,
.suspend = tc6393xb_fb_disable,
.resume = tc6393xb_fb_enable,
.disable = tc6393xb_fb_disable,
},
};
/*--------------------------------------------------------------------------*/
static int tc6393xb_gpio_get(struct gpio_chip *chip,
unsigned offset)
{
struct tc6393xb *tc6393xb = container_of(chip, struct tc6393xb, gpio);
/* XXX: does dsr also represent inputs? */
return tmio_ioread8(tc6393xb->scr + SCR_GPO_DSR(offset / 8))
& TC_GPIO_BIT(offset);
}
static void __tc6393xb_gpio_set(struct gpio_chip *chip,
unsigned offset, int value)
{
struct tc6393xb *tc6393xb = container_of(chip, struct tc6393xb, gpio);
u8 dsr;
dsr = tmio_ioread8(tc6393xb->scr + SCR_GPO_DSR(offset / 8));
if (value)
dsr |= TC_GPIO_BIT(offset);
else
dsr &= ~TC_GPIO_BIT(offset);
tmio_iowrite8(dsr, tc6393xb->scr + SCR_GPO_DSR(offset / 8));
}
static void tc6393xb_gpio_set(struct gpio_chip *chip,
unsigned offset, int value)
{
struct tc6393xb *tc6393xb = container_of(chip, struct tc6393xb, gpio);
unsigned long flags;
spin_lock_irqsave(&tc6393xb->lock, flags);
__tc6393xb_gpio_set(chip, offset, value);
spin_unlock_irqrestore(&tc6393xb->lock, flags);
}
static int tc6393xb_gpio_direction_input(struct gpio_chip *chip,
unsigned offset)
{
struct tc6393xb *tc6393xb = container_of(chip, struct tc6393xb, gpio);
unsigned long flags;
u8 doecr;
spin_lock_irqsave(&tc6393xb->lock, flags);
doecr = tmio_ioread8(tc6393xb->scr + SCR_GPO_DOECR(offset / 8));
doecr &= ~TC_GPIO_BIT(offset);
tmio_iowrite8(doecr, tc6393xb->scr + SCR_GPO_DOECR(offset / 8));
spin_unlock_irqrestore(&tc6393xb->lock, flags);
return 0;
}
static int tc6393xb_gpio_direction_output(struct gpio_chip *chip,
unsigned offset, int value)
{
struct tc6393xb *tc6393xb = container_of(chip, struct tc6393xb, gpio);
unsigned long flags;
u8 doecr;
spin_lock_irqsave(&tc6393xb->lock, flags);
__tc6393xb_gpio_set(chip, offset, value);
doecr = tmio_ioread8(tc6393xb->scr + SCR_GPO_DOECR(offset / 8));
doecr |= TC_GPIO_BIT(offset);
tmio_iowrite8(doecr, tc6393xb->scr + SCR_GPO_DOECR(offset / 8));
spin_unlock_irqrestore(&tc6393xb->lock, flags);
return 0;
}
static int tc6393xb_register_gpio(struct tc6393xb *tc6393xb, int gpio_base)
{
tc6393xb->gpio.label = "tc6393xb";
tc6393xb->gpio.base = gpio_base;
tc6393xb->gpio.ngpio = 16;
tc6393xb->gpio.set = tc6393xb_gpio_set;
tc6393xb->gpio.get = tc6393xb_gpio_get;
tc6393xb->gpio.direction_input = tc6393xb_gpio_direction_input;
tc6393xb->gpio.direction_output = tc6393xb_gpio_direction_output;
return gpiochip_add(&tc6393xb->gpio);
}
/*--------------------------------------------------------------------------*/
static void
tc6393xb_irq(unsigned int irq, struct irq_desc *desc)
{
struct tc6393xb *tc6393xb = irq_get_handler_data(irq);
unsigned int isr;
unsigned int i, irq_base;
irq_base = tc6393xb->irq_base;
while ((isr = tmio_ioread8(tc6393xb->scr + SCR_ISR) &
~tmio_ioread8(tc6393xb->scr + SCR_IMR)))
for (i = 0; i < TC6393XB_NR_IRQS; i++) {
if (isr & (1 << i))
generic_handle_irq(irq_base + i);
}
}
static void tc6393xb_irq_ack(struct irq_data *data)
{
}
static void tc6393xb_irq_mask(struct irq_data *data)
{
struct tc6393xb *tc6393xb = irq_data_get_irq_chip_data(data);
unsigned long flags;
u8 imr;
spin_lock_irqsave(&tc6393xb->lock, flags);
imr = tmio_ioread8(tc6393xb->scr + SCR_IMR);
imr |= 1 << (data->irq - tc6393xb->irq_base);
tmio_iowrite8(imr, tc6393xb->scr + SCR_IMR);
spin_unlock_irqrestore(&tc6393xb->lock, flags);
}
static void tc6393xb_irq_unmask(struct irq_data *data)
{
struct tc6393xb *tc6393xb = irq_data_get_irq_chip_data(data);
unsigned long flags;
u8 imr;
spin_lock_irqsave(&tc6393xb->lock, flags);
imr = tmio_ioread8(tc6393xb->scr + SCR_IMR);
imr &= ~(1 << (data->irq - tc6393xb->irq_base));
tmio_iowrite8(imr, tc6393xb->scr + SCR_IMR);
spin_unlock_irqrestore(&tc6393xb->lock, flags);
}
static struct irq_chip tc6393xb_chip = {
.name = "tc6393xb",
.irq_ack = tc6393xb_irq_ack,
.irq_mask = tc6393xb_irq_mask,
.irq_unmask = tc6393xb_irq_unmask,
};
static void tc6393xb_attach_irq(struct platform_device *dev)
{
struct tc6393xb *tc6393xb = platform_get_drvdata(dev);
unsigned int irq, irq_base;
irq_base = tc6393xb->irq_base;
for (irq = irq_base; irq < irq_base + TC6393XB_NR_IRQS; irq++) {
irq_set_chip_and_handler(irq, &tc6393xb_chip, handle_edge_irq);
irq_set_chip_data(irq, tc6393xb);
set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
}
irq_set_irq_type(tc6393xb->irq, IRQ_TYPE_EDGE_FALLING);
irq_set_handler_data(tc6393xb->irq, tc6393xb);
irq_set_chained_handler(tc6393xb->irq, tc6393xb_irq);
}
static void tc6393xb_detach_irq(struct platform_device *dev)
{
struct tc6393xb *tc6393xb = platform_get_drvdata(dev);
unsigned int irq, irq_base;
irq_set_chained_handler(tc6393xb->irq, NULL);
irq_set_handler_data(tc6393xb->irq, NULL);
irq_base = tc6393xb->irq_base;
for (irq = irq_base; irq < irq_base + TC6393XB_NR_IRQS; irq++) {
set_irq_flags(irq, 0);
irq_set_chip(irq, NULL);
irq_set_chip_data(irq, NULL);
}
}
/*--------------------------------------------------------------------------*/
static int __devinit tc6393xb_probe(struct platform_device *dev)
{
struct tc6393xb_platform_data *tcpd = dev->dev.platform_data;
struct tc6393xb *tc6393xb;
struct resource *iomem, *rscr;
int ret, temp;
iomem = platform_get_resource(dev, IORESOURCE_MEM, 0);
if (!iomem)
return -EINVAL;
tc6393xb = kzalloc(sizeof *tc6393xb, GFP_KERNEL);
if (!tc6393xb) {
ret = -ENOMEM;
goto err_kzalloc;
}
spin_lock_init(&tc6393xb->lock);
platform_set_drvdata(dev, tc6393xb);
ret = platform_get_irq(dev, 0);
if (ret >= 0)
tc6393xb->irq = ret;
else
goto err_noirq;
tc6393xb->iomem = iomem;
tc6393xb->irq_base = tcpd->irq_base;
tc6393xb->clk = clk_get(&dev->dev, "CLK_CK3P6MI");
if (IS_ERR(tc6393xb->clk)) {
ret = PTR_ERR(tc6393xb->clk);
goto err_clk_get;
}
rscr = &tc6393xb->rscr;
rscr->name = "tc6393xb-core";
rscr->start = iomem->start;
rscr->end = iomem->start + 0xff;
rscr->flags = IORESOURCE_MEM;
ret = request_resource(iomem, rscr);
if (ret)
goto err_request_scr;
tc6393xb->scr = ioremap(rscr->start, resource_size(rscr));
if (!tc6393xb->scr) {
ret = -ENOMEM;
goto err_ioremap;
}
ret = clk_enable(tc6393xb->clk);
if (ret)
goto err_clk_enable;
ret = tcpd->enable(dev);
if (ret)
goto err_enable;
iowrite8(0, tc6393xb->scr + SCR_FER);
iowrite16(tcpd->scr_pll2cr, tc6393xb->scr + SCR_PLL2CR);
iowrite16(SCR_CCR_UNK1 | SCR_CCR_HCLK_48,
tc6393xb->scr + SCR_CCR);
iowrite16(SCR_MCR_RDY_OPENDRAIN | SCR_MCR_RDY_UNK | SCR_MCR_RDY_EN |
SCR_MCR_INT_OPENDRAIN | SCR_MCR_INT_UNK | SCR_MCR_INT_EN |
BIT(15), tc6393xb->scr + SCR_MCR);
iowrite16(tcpd->scr_gper, tc6393xb->scr + SCR_GPER);
iowrite8(0, tc6393xb->scr + SCR_IRR);
iowrite8(0xbf, tc6393xb->scr + SCR_IMR);
printk(KERN_INFO "Toshiba tc6393xb revision %d at 0x%08lx, irq %d\n",
tmio_ioread8(tc6393xb->scr + SCR_REVID),
(unsigned long) iomem->start, tc6393xb->irq);
tc6393xb->gpio.base = -1;
if (tcpd->gpio_base >= 0) {
ret = tc6393xb_register_gpio(tc6393xb, tcpd->gpio_base);
if (ret)
goto err_gpio_add;
}
tc6393xb_attach_irq(dev);
if (tcpd->setup) {
ret = tcpd->setup(dev);
if (ret)
goto err_setup;
}
tc6393xb_cells[TC6393XB_CELL_NAND].platform_data = tcpd->nand_data;
tc6393xb_cells[TC6393XB_CELL_NAND].pdata_size =
sizeof(*tcpd->nand_data);
tc6393xb_cells[TC6393XB_CELL_FB].platform_data = tcpd->fb_data;
tc6393xb_cells[TC6393XB_CELL_FB].pdata_size = sizeof(*tcpd->fb_data);
ret = mfd_add_devices(&dev->dev, dev->id,
tc6393xb_cells, ARRAY_SIZE(tc6393xb_cells),
iomem, tcpd->irq_base);
if (!ret)
return 0;
if (tcpd->teardown)
tcpd->teardown(dev);
err_setup:
tc6393xb_detach_irq(dev);
err_gpio_add:
if (tc6393xb->gpio.base != -1)
temp = gpiochip_remove(&tc6393xb->gpio);
tcpd->disable(dev);
err_enable:
clk_disable(tc6393xb->clk);
err_clk_enable:
iounmap(tc6393xb->scr);
err_ioremap:
release_resource(&tc6393xb->rscr);
err_request_scr:
clk_put(tc6393xb->clk);
err_noirq:
err_clk_get:
kfree(tc6393xb);
err_kzalloc:
return ret;
}
static int __devexit tc6393xb_remove(struct platform_device *dev)
{
struct tc6393xb_platform_data *tcpd = dev->dev.platform_data;
struct tc6393xb *tc6393xb = platform_get_drvdata(dev);
int ret;
mfd_remove_devices(&dev->dev);
if (tcpd->teardown)
tcpd->teardown(dev);
tc6393xb_detach_irq(dev);
if (tc6393xb->gpio.base != -1) {
ret = gpiochip_remove(&tc6393xb->gpio);
if (ret) {
dev_err(&dev->dev, "Can't remove gpio chip: %d\n", ret);
return ret;
}
}
ret = tcpd->disable(dev);
clk_disable(tc6393xb->clk);
iounmap(tc6393xb->scr);
release_resource(&tc6393xb->rscr);
platform_set_drvdata(dev, NULL);
clk_put(tc6393xb->clk);
kfree(tc6393xb);
return ret;
}
#ifdef CONFIG_PM
static int tc6393xb_suspend(struct platform_device *dev, pm_message_t state)
{
struct tc6393xb_platform_data *tcpd = dev->dev.platform_data;
struct tc6393xb *tc6393xb = platform_get_drvdata(dev);
int i, ret;
tc6393xb->suspend_state.ccr = ioread16(tc6393xb->scr + SCR_CCR);
tc6393xb->suspend_state.fer = ioread8(tc6393xb->scr + SCR_FER);
for (i = 0; i < 3; i++) {
tc6393xb->suspend_state.gpo_dsr[i] =
ioread8(tc6393xb->scr + SCR_GPO_DSR(i));
tc6393xb->suspend_state.gpo_doecr[i] =
ioread8(tc6393xb->scr + SCR_GPO_DOECR(i));
tc6393xb->suspend_state.gpi_bcr[i] =
ioread8(tc6393xb->scr + SCR_GPI_BCR(i));
}
ret = tcpd->suspend(dev);
clk_disable(tc6393xb->clk);
return ret;
}
static int tc6393xb_resume(struct platform_device *dev)
{
struct tc6393xb_platform_data *tcpd = dev->dev.platform_data;
struct tc6393xb *tc6393xb = platform_get_drvdata(dev);
int ret;
int i;
clk_enable(tc6393xb->clk);
ret = tcpd->resume(dev);
if (ret)
return ret;
if (!tcpd->resume_restore)
return 0;
iowrite8(tc6393xb->suspend_state.fer, tc6393xb->scr + SCR_FER);
iowrite16(tcpd->scr_pll2cr, tc6393xb->scr + SCR_PLL2CR);
iowrite16(tc6393xb->suspend_state.ccr, tc6393xb->scr + SCR_CCR);
iowrite16(SCR_MCR_RDY_OPENDRAIN | SCR_MCR_RDY_UNK | SCR_MCR_RDY_EN |
SCR_MCR_INT_OPENDRAIN | SCR_MCR_INT_UNK | SCR_MCR_INT_EN |
BIT(15), tc6393xb->scr + SCR_MCR);
iowrite16(tcpd->scr_gper, tc6393xb->scr + SCR_GPER);
iowrite8(0, tc6393xb->scr + SCR_IRR);
iowrite8(0xbf, tc6393xb->scr + SCR_IMR);
for (i = 0; i < 3; i++) {
iowrite8(tc6393xb->suspend_state.gpo_dsr[i],
tc6393xb->scr + SCR_GPO_DSR(i));
iowrite8(tc6393xb->suspend_state.gpo_doecr[i],
tc6393xb->scr + SCR_GPO_DOECR(i));
iowrite8(tc6393xb->suspend_state.gpi_bcr[i],
tc6393xb->scr + SCR_GPI_BCR(i));
}
return 0;
}
#else
#define tc6393xb_suspend NULL
#define tc6393xb_resume NULL
#endif
static struct platform_driver tc6393xb_driver = {
.probe = tc6393xb_probe,
.remove = __devexit_p(tc6393xb_remove),
.suspend = tc6393xb_suspend,
.resume = tc6393xb_resume,
.driver = {
.name = "tc6393xb",
.owner = THIS_MODULE,
},
};
static int __init tc6393xb_init(void)
{
return platform_driver_register(&tc6393xb_driver);
}
static void __exit tc6393xb_exit(void)
{
platform_driver_unregister(&tc6393xb_driver);
}
subsys_initcall(tc6393xb_init);
module_exit(tc6393xb_exit);
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Ian Molton, Dmitry Baryshkov and Dirk Opfer");
MODULE_DESCRIPTION("tc6393xb Toshiba Mobile IO Controller");
MODULE_ALIAS("platform:tc6393xb");
| gpl-2.0 |
andr00ib/victor-oficial-kernel | arch/mn10300/kernel/kgdb.c | 7402 | 12404 | /* kgdb support for MN10300
*
* Copyright (C) 2010 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public Licence
* as published by the Free Software Foundation; either version
* 2 of the Licence, or (at your option) any later version.
*/
#include <linux/slab.h>
#include <linux/ptrace.h>
#include <linux/kgdb.h>
#include <linux/uaccess.h>
#include <unit/leds.h>
#include <unit/serial.h>
#include <asm/debugger.h>
#include <asm/serial-regs.h>
#include "internal.h"
/*
* Software single-stepping breakpoint save (used by __switch_to())
*/
static struct thread_info *kgdb_sstep_thread;
u8 *kgdb_sstep_bp_addr[2];
u8 kgdb_sstep_bp[2];
/*
* Copy kernel exception frame registers to the GDB register file
*/
void pt_regs_to_gdb_regs(unsigned long *gdb_regs, struct pt_regs *regs)
{
unsigned long ssp = (unsigned long) (regs + 1);
gdb_regs[GDB_FR_D0] = regs->d0;
gdb_regs[GDB_FR_D1] = regs->d1;
gdb_regs[GDB_FR_D2] = regs->d2;
gdb_regs[GDB_FR_D3] = regs->d3;
gdb_regs[GDB_FR_A0] = regs->a0;
gdb_regs[GDB_FR_A1] = regs->a1;
gdb_regs[GDB_FR_A2] = regs->a2;
gdb_regs[GDB_FR_A3] = regs->a3;
gdb_regs[GDB_FR_SP] = (regs->epsw & EPSW_nSL) ? regs->sp : ssp;
gdb_regs[GDB_FR_PC] = regs->pc;
gdb_regs[GDB_FR_MDR] = regs->mdr;
gdb_regs[GDB_FR_EPSW] = regs->epsw;
gdb_regs[GDB_FR_LIR] = regs->lir;
gdb_regs[GDB_FR_LAR] = regs->lar;
gdb_regs[GDB_FR_MDRQ] = regs->mdrq;
gdb_regs[GDB_FR_E0] = regs->e0;
gdb_regs[GDB_FR_E1] = regs->e1;
gdb_regs[GDB_FR_E2] = regs->e2;
gdb_regs[GDB_FR_E3] = regs->e3;
gdb_regs[GDB_FR_E4] = regs->e4;
gdb_regs[GDB_FR_E5] = regs->e5;
gdb_regs[GDB_FR_E6] = regs->e6;
gdb_regs[GDB_FR_E7] = regs->e7;
gdb_regs[GDB_FR_SSP] = ssp;
gdb_regs[GDB_FR_MSP] = 0;
gdb_regs[GDB_FR_USP] = regs->sp;
gdb_regs[GDB_FR_MCRH] = regs->mcrh;
gdb_regs[GDB_FR_MCRL] = regs->mcrl;
gdb_regs[GDB_FR_MCVF] = regs->mcvf;
gdb_regs[GDB_FR_DUMMY0] = 0;
gdb_regs[GDB_FR_DUMMY1] = 0;
gdb_regs[GDB_FR_FS0] = 0;
}
/*
* Extracts kernel SP/PC values understandable by gdb from the values
* saved by switch_to().
*/
void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *p)
{
gdb_regs[GDB_FR_SSP] = p->thread.sp;
gdb_regs[GDB_FR_PC] = p->thread.pc;
gdb_regs[GDB_FR_A3] = p->thread.a3;
gdb_regs[GDB_FR_USP] = p->thread.usp;
gdb_regs[GDB_FR_FPCR] = p->thread.fpu_state.fpcr;
}
/*
* Fill kernel exception frame registers from the GDB register file
*/
void gdb_regs_to_pt_regs(unsigned long *gdb_regs, struct pt_regs *regs)
{
regs->d0 = gdb_regs[GDB_FR_D0];
regs->d1 = gdb_regs[GDB_FR_D1];
regs->d2 = gdb_regs[GDB_FR_D2];
regs->d3 = gdb_regs[GDB_FR_D3];
regs->a0 = gdb_regs[GDB_FR_A0];
regs->a1 = gdb_regs[GDB_FR_A1];
regs->a2 = gdb_regs[GDB_FR_A2];
regs->a3 = gdb_regs[GDB_FR_A3];
regs->sp = gdb_regs[GDB_FR_SP];
regs->pc = gdb_regs[GDB_FR_PC];
regs->mdr = gdb_regs[GDB_FR_MDR];
regs->epsw = gdb_regs[GDB_FR_EPSW];
regs->lir = gdb_regs[GDB_FR_LIR];
regs->lar = gdb_regs[GDB_FR_LAR];
regs->mdrq = gdb_regs[GDB_FR_MDRQ];
regs->e0 = gdb_regs[GDB_FR_E0];
regs->e1 = gdb_regs[GDB_FR_E1];
regs->e2 = gdb_regs[GDB_FR_E2];
regs->e3 = gdb_regs[GDB_FR_E3];
regs->e4 = gdb_regs[GDB_FR_E4];
regs->e5 = gdb_regs[GDB_FR_E5];
regs->e6 = gdb_regs[GDB_FR_E6];
regs->e7 = gdb_regs[GDB_FR_E7];
regs->sp = gdb_regs[GDB_FR_SSP];
/* gdb_regs[GDB_FR_MSP]; */
// regs->usp = gdb_regs[GDB_FR_USP];
regs->mcrh = gdb_regs[GDB_FR_MCRH];
regs->mcrl = gdb_regs[GDB_FR_MCRL];
regs->mcvf = gdb_regs[GDB_FR_MCVF];
/* gdb_regs[GDB_FR_DUMMY0]; */
/* gdb_regs[GDB_FR_DUMMY1]; */
// regs->fpcr = gdb_regs[GDB_FR_FPCR];
// regs->fs0 = gdb_regs[GDB_FR_FS0];
}
struct kgdb_arch arch_kgdb_ops = {
.gdb_bpt_instr = { 0xff },
.flags = KGDB_HW_BREAKPOINT,
};
static const unsigned char mn10300_kgdb_insn_sizes[256] =
{
/* 1 2 3 4 5 6 7 8 9 a b c d e f */
1, 3, 3, 3, 1, 3, 3, 3, 1, 3, 3, 3, 1, 3, 3, 3, /* 0 */
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* 1 */
2, 2, 2, 2, 3, 3, 3, 3, 2, 2, 2, 2, 3, 3, 3, 3, /* 2 */
3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 1, 1, 1, 1, /* 3 */
1, 1, 2, 2, 1, 1, 2, 2, 1, 1, 2, 2, 1, 1, 2, 2, /* 4 */
1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, /* 5 */
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* 6 */
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* 7 */
2, 1, 1, 1, 1, 2, 1, 1, 1, 1, 2, 1, 1, 1, 1, 2, /* 8 */
2, 1, 1, 1, 1, 2, 1, 1, 1, 1, 2, 1, 1, 1, 1, 2, /* 9 */
2, 1, 1, 1, 1, 2, 1, 1, 1, 1, 2, 1, 1, 1, 1, 2, /* a */
2, 1, 1, 1, 1, 2, 1, 1, 1, 1, 2, 1, 1, 1, 1, 2, /* b */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 2, 2, /* c */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* d */
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* e */
0, 2, 2, 2, 2, 2, 2, 4, 0, 3, 0, 4, 0, 6, 7, 1 /* f */
};
/*
* Attempt to emulate single stepping by means of breakpoint instructions.
* Although there is a single-step trace flag in EPSW, its use is not
* sufficiently documented and is only intended for use with the JTAG debugger.
*/
static int kgdb_arch_do_singlestep(struct pt_regs *regs)
{
unsigned long arg;
unsigned size;
u8 *pc = (u8 *)regs->pc, *sp = (u8 *)(regs + 1), cur;
u8 *x = NULL, *y = NULL;
int ret;
ret = probe_kernel_read(&cur, pc, 1);
if (ret < 0)
return ret;
size = mn10300_kgdb_insn_sizes[cur];
if (size > 0) {
x = pc + size;
goto set_x;
}
switch (cur) {
/* Bxx (d8,PC) */
case 0xc0 ... 0xca:
ret = probe_kernel_read(&arg, pc + 1, 1);
if (ret < 0)
return ret;
x = pc + 2;
if (arg >= 0 && arg <= 2)
goto set_x;
y = pc + (s8)arg;
goto set_x_and_y;
/* LXX (d8,PC) */
case 0xd0 ... 0xda:
x = pc + 1;
if (regs->pc == regs->lar)
goto set_x;
y = (u8 *)regs->lar;
goto set_x_and_y;
/* SETLB - loads the next four bytes into the LIR register
* (which mustn't include a breakpoint instruction) */
case 0xdb:
x = pc + 5;
goto set_x;
/* JMP (d16,PC) or CALL (d16,PC) */
case 0xcc:
case 0xcd:
ret = probe_kernel_read(&arg, pc + 1, 2);
if (ret < 0)
return ret;
x = pc + (s16)arg;
goto set_x;
/* JMP (d32,PC) or CALL (d32,PC) */
case 0xdc:
case 0xdd:
ret = probe_kernel_read(&arg, pc + 1, 4);
if (ret < 0)
return ret;
x = pc + (s32)arg;
goto set_x;
/* RETF */
case 0xde:
x = (u8 *)regs->mdr;
goto set_x;
/* RET */
case 0xdf:
ret = probe_kernel_read(&arg, pc + 2, 1);
if (ret < 0)
return ret;
ret = probe_kernel_read(&x, sp + (s8)arg, 4);
if (ret < 0)
return ret;
goto set_x;
case 0xf0:
ret = probe_kernel_read(&cur, pc + 1, 1);
if (ret < 0)
return ret;
if (cur >= 0xf0 && cur <= 0xf7) {
/* JMP (An) / CALLS (An) */
switch (cur & 3) {
case 0: x = (u8 *)regs->a0; break;
case 1: x = (u8 *)regs->a1; break;
case 2: x = (u8 *)regs->a2; break;
case 3: x = (u8 *)regs->a3; break;
}
goto set_x;
} else if (cur == 0xfc) {
/* RETS */
ret = probe_kernel_read(&x, sp, 4);
if (ret < 0)
return ret;
goto set_x;
} else if (cur == 0xfd) {
/* RTI */
ret = probe_kernel_read(&x, sp + 4, 4);
if (ret < 0)
return ret;
goto set_x;
} else {
x = pc + 2;
goto set_x;
}
break;
/* potential 3-byte conditional branches */
case 0xf8:
ret = probe_kernel_read(&cur, pc + 1, 1);
if (ret < 0)
return ret;
x = pc + 3;
if (cur >= 0xe8 && cur <= 0xeb) {
ret = probe_kernel_read(&arg, pc + 2, 1);
if (ret < 0)
return ret;
if (arg >= 0 && arg <= 3)
goto set_x;
y = pc + (s8)arg;
goto set_x_and_y;
}
goto set_x;
case 0xfa:
ret = probe_kernel_read(&cur, pc + 1, 1);
if (ret < 0)
return ret;
if (cur == 0xff) {
/* CALLS (d16,PC) */
ret = probe_kernel_read(&arg, pc + 2, 2);
if (ret < 0)
return ret;
x = pc + (s16)arg;
goto set_x;
}
x = pc + 4;
goto set_x;
case 0xfc:
ret = probe_kernel_read(&cur, pc + 1, 1);
if (ret < 0)
return ret;
if (cur == 0xff) {
/* CALLS (d32,PC) */
ret = probe_kernel_read(&arg, pc + 2, 4);
if (ret < 0)
return ret;
x = pc + (s32)arg;
goto set_x;
}
x = pc + 6;
goto set_x;
}
return 0;
set_x:
kgdb_sstep_bp_addr[0] = x;
kgdb_sstep_bp_addr[1] = NULL;
ret = probe_kernel_read(&kgdb_sstep_bp[0], x, 1);
if (ret < 0)
return ret;
ret = probe_kernel_write(x, &arch_kgdb_ops.gdb_bpt_instr, 1);
if (ret < 0)
return ret;
kgdb_sstep_thread = current_thread_info();
debugger_local_cache_flushinv_one(x);
return ret;
set_x_and_y:
kgdb_sstep_bp_addr[0] = x;
kgdb_sstep_bp_addr[1] = y;
ret = probe_kernel_read(&kgdb_sstep_bp[0], x, 1);
if (ret < 0)
return ret;
ret = probe_kernel_read(&kgdb_sstep_bp[1], y, 1);
if (ret < 0)
return ret;
ret = probe_kernel_write(x, &arch_kgdb_ops.gdb_bpt_instr, 1);
if (ret < 0)
return ret;
ret = probe_kernel_write(y, &arch_kgdb_ops.gdb_bpt_instr, 1);
if (ret < 0) {
probe_kernel_write(kgdb_sstep_bp_addr[0],
&kgdb_sstep_bp[0], 1);
} else {
kgdb_sstep_thread = current_thread_info();
}
debugger_local_cache_flushinv_one(x);
debugger_local_cache_flushinv_one(y);
return ret;
}
/*
* Remove emplaced single-step breakpoints, returning true if we hit one of
* them.
*/
static bool kgdb_arch_undo_singlestep(struct pt_regs *regs)
{
bool hit = false;
u8 *x = kgdb_sstep_bp_addr[0], *y = kgdb_sstep_bp_addr[1];
u8 opcode;
if (kgdb_sstep_thread == current_thread_info()) {
if (x) {
if (x == (u8 *)regs->pc)
hit = true;
if (probe_kernel_read(&opcode, x,
1) < 0 ||
opcode != 0xff)
BUG();
probe_kernel_write(x, &kgdb_sstep_bp[0], 1);
debugger_local_cache_flushinv_one(x);
}
if (y) {
if (y == (u8 *)regs->pc)
hit = true;
if (probe_kernel_read(&opcode, y,
1) < 0 ||
opcode != 0xff)
BUG();
probe_kernel_write(y, &kgdb_sstep_bp[1], 1);
debugger_local_cache_flushinv_one(y);
}
}
kgdb_sstep_bp_addr[0] = NULL;
kgdb_sstep_bp_addr[1] = NULL;
kgdb_sstep_thread = NULL;
return hit;
}
/*
* Catch a single-step-pending thread being deleted and make sure the global
* single-step state is cleared. At this point the breakpoints should have
* been removed by __switch_to().
*/
void free_thread_info(struct thread_info *ti)
{
if (kgdb_sstep_thread == ti) {
kgdb_sstep_thread = NULL;
/* However, we may now be running in degraded mode, with most
* of the CPUs disabled until such a time as KGDB is reentered,
* so force immediate reentry */
kgdb_breakpoint();
}
kfree(ti);
}
/*
* Handle unknown packets and [CcsDk] packets
* - at this point breakpoints have been installed
*/
int kgdb_arch_handle_exception(int vector, int signo, int err_code,
char *remcom_in_buffer, char *remcom_out_buffer,
struct pt_regs *regs)
{
long addr;
char *ptr;
switch (remcom_in_buffer[0]) {
case 'c':
case 's':
/* try to read optional parameter, pc unchanged if no parm */
ptr = &remcom_in_buffer[1];
if (kgdb_hex2long(&ptr, &addr))
regs->pc = addr;
case 'D':
case 'k':
atomic_set(&kgdb_cpu_doing_single_step, -1);
if (remcom_in_buffer[0] == 's') {
kgdb_arch_do_singlestep(regs);
kgdb_single_step = 1;
atomic_set(&kgdb_cpu_doing_single_step,
raw_smp_processor_id());
}
return 0;
}
return -1; /* this means that we do not want to exit from the handler */
}
/*
* Handle event interception
* - returns 0 if the exception should be skipped, -ERROR otherwise.
*/
int debugger_intercept(enum exception_code excep, int signo, int si_code,
struct pt_regs *regs)
{
int ret;
if (kgdb_arch_undo_singlestep(regs)) {
excep = EXCEP_TRAP;
signo = SIGTRAP;
si_code = TRAP_TRACE;
}
ret = kgdb_handle_exception(excep, signo, si_code, regs);
debugger_local_cache_flushinv();
return ret;
}
/*
* Determine if we've hit a debugger special breakpoint
*/
int at_debugger_breakpoint(struct pt_regs *regs)
{
return regs->pc == (unsigned long)&__arch_kgdb_breakpoint;
}
/*
* Initialise kgdb
*/
int kgdb_arch_init(void)
{
return 0;
}
/*
* Do something, perhaps, but don't know what.
*/
void kgdb_arch_exit(void)
{
}
#ifdef CONFIG_SMP
void debugger_nmi_interrupt(struct pt_regs *regs, enum exception_code code)
{
kgdb_nmicallback(arch_smp_processor_id(), regs);
debugger_local_cache_flushinv();
}
void kgdb_roundup_cpus(unsigned long flags)
{
smp_jump_to_debugger();
}
#endif
| gpl-2.0 |
Split-Screen/android_kernel_hp_tenderloin | drivers/staging/speakup/buffers.c | 8426 | 2403 | #include <linux/console.h>
#include <linux/types.h>
#include <linux/wait.h>
#include "speakup.h"
#include "spk_priv.h"
#define synthBufferSize 8192 /* currently 8K bytes */
static u_char synth_buffer[synthBufferSize]; /* guess what this is for! */
static u_char *buff_in = synth_buffer;
static u_char *buff_out = synth_buffer;
static u_char *buffer_end = synth_buffer+synthBufferSize-1;
/* These try to throttle applications by stopping the TTYs
* Note: we need to make sure that we will restart them eventually, which is
* usually not possible to do from the notifiers. TODO: it should be possible
* starting from linux 2.6.26.
*
* So we only stop when we know alive == 1 (else we discard the data anyway),
* and the alive synth will eventually call start_ttys from the thread context.
*/
void speakup_start_ttys(void)
{
int i;
for (i = 0; i < MAX_NR_CONSOLES; i++) {
if (speakup_console[i] && speakup_console[i]->tty_stopped)
continue;
if ((vc_cons[i].d != NULL) && (vc_cons[i].d->port.tty != NULL))
start_tty(vc_cons[i].d->port.tty);
}
}
EXPORT_SYMBOL_GPL(speakup_start_ttys);
static void speakup_stop_ttys(void)
{
int i;
for (i = 0; i < MAX_NR_CONSOLES; i++)
if ((vc_cons[i].d != NULL) && (vc_cons[i].d->port.tty != NULL))
stop_tty(vc_cons[i].d->port.tty);
}
static int synth_buffer_free(void)
{
int bytesFree;
if (buff_in >= buff_out)
bytesFree = synthBufferSize - (buff_in - buff_out);
else
bytesFree = buff_out - buff_in;
return bytesFree;
}
int synth_buffer_empty(void)
{
return (buff_in == buff_out);
}
EXPORT_SYMBOL_GPL(synth_buffer_empty);
void synth_buffer_add(char ch)
{
if (!synth->alive) {
/* This makes sure that we won't stop TTYs if there is no synth
* to restart them */
return;
}
if (synth_buffer_free() <= 100) {
synth_start();
speakup_stop_ttys();
}
if (synth_buffer_free() <= 1)
return;
*buff_in++ = ch;
if (buff_in > buffer_end)
buff_in = synth_buffer;
}
char synth_buffer_getc(void)
{
char ch;
if (buff_out == buff_in)
return 0;
ch = *buff_out++;
if (buff_out > buffer_end)
buff_out = synth_buffer;
return ch;
}
EXPORT_SYMBOL_GPL(synth_buffer_getc);
char synth_buffer_peek(void)
{
if (buff_out == buff_in)
return 0;
return *buff_out;
}
EXPORT_SYMBOL_GPL(synth_buffer_peek);
void synth_buffer_clear(void)
{
buff_in = buff_out = synth_buffer;
return;
}
EXPORT_SYMBOL_GPL(synth_buffer_clear);
| gpl-2.0 |
michabs/linux-imx6-3.14 | arch/sh/kernel/cpu/sh2a/opcode_helper.c | 9194 | 1548 | /*
* arch/sh/kernel/cpu/sh2a/opcode_helper.c
*
* Helper for the SH-2A 32-bit opcodes.
*
* Copyright (C) 2007 Paul Mundt
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#include <linux/kernel.h>
/*
* Instructions on SH are generally fixed at 16-bits, however, SH-2A
* introduces some 32-bit instructions. Since there are no real
* constraints on their use (and they can be mixed and matched), we need
* to check the instruction encoding to work out if it's a true 32-bit
* instruction or not.
*
* Presently, 32-bit opcodes have only slight variations in what the
* actual encoding looks like in the first-half of the instruction, which
* makes it fairly straightforward to differentiate from the 16-bit ones.
*
* First 16-bits of encoding Used by
*
* 0011nnnnmmmm0001 mov.b, mov.w, mov.l, fmov.d,
* fmov.s, movu.b, movu.w
*
* 0011nnnn0iii1001 bclr.b, bld.b, bset.b, bst.b, band.b,
* bandnot.b, bldnot.b, bor.b, bornot.b,
* bxor.b
*
* 0000nnnniiii0000 movi20
* 0000nnnniiii0001 movi20s
*/
unsigned int instruction_size(unsigned int insn)
{
/* Look for the common cases */
switch ((insn & 0xf00f)) {
case 0x0000: /* movi20 */
case 0x0001: /* movi20s */
case 0x3001: /* 32-bit mov/fmov/movu variants */
return 4;
}
/* And the special cases.. */
switch ((insn & 0xf08f)) {
case 0x3009: /* 32-bit b*.b bit operations */
return 4;
}
return 2;
}
| gpl-2.0 |
jianpingye/linux | arch/arm/kernel/devtree.c | 1259 | 6338 | /*
* linux/arch/arm/kernel/devtree.c
*
* Copyright (C) 2009 Canonical Ltd. <jeremy.kerr@canonical.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/init.h>
#include <linux/export.h>
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/bootmem.h>
#include <linux/memblock.h>
#include <linux/of.h>
#include <linux/of_fdt.h>
#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <linux/smp.h>
#include <asm/cputype.h>
#include <asm/setup.h>
#include <asm/page.h>
#include <asm/smp_plat.h>
#include <asm/mach/arch.h>
#include <asm/mach-types.h>
#ifdef CONFIG_SMP
extern struct of_cpu_method __cpu_method_of_table[];
static const struct of_cpu_method __cpu_method_of_table_sentinel
__used __section(__cpu_method_of_table_end);
static int __init set_smp_ops_by_method(struct device_node *node)
{
const char *method;
struct of_cpu_method *m = __cpu_method_of_table;
if (of_property_read_string(node, "enable-method", &method))
return 0;
for (; m->method; m++)
if (!strcmp(m->method, method)) {
smp_set_ops(m->ops);
return 1;
}
return 0;
}
#else
static inline int set_smp_ops_by_method(struct device_node *node)
{
return 1;
}
#endif
/*
* arm_dt_init_cpu_maps - Function retrieves cpu nodes from the device tree
* and builds the cpu logical map array containing MPIDR values related to
* logical cpus
*
* Updates the cpu possible mask with the number of parsed cpu nodes
*/
void __init arm_dt_init_cpu_maps(void)
{
/*
* Temp logical map is initialized with UINT_MAX values that are
* considered invalid logical map entries since the logical map must
* contain a list of MPIDR[23:0] values where MPIDR[31:24] must
* read as 0.
*/
struct device_node *cpu, *cpus;
int found_method = 0;
u32 i, j, cpuidx = 1;
u32 mpidr = is_smp() ? read_cpuid_mpidr() & MPIDR_HWID_BITMASK : 0;
u32 tmp_map[NR_CPUS] = { [0 ... NR_CPUS-1] = MPIDR_INVALID };
bool bootcpu_valid = false;
cpus = of_find_node_by_path("/cpus");
if (!cpus)
return;
for_each_child_of_node(cpus, cpu) {
u32 hwid;
if (of_node_cmp(cpu->type, "cpu"))
continue;
pr_debug(" * %s...\n", cpu->full_name);
/*
* A device tree containing CPU nodes with missing "reg"
* properties is considered invalid to build the
* cpu_logical_map.
*/
if (of_property_read_u32(cpu, "reg", &hwid)) {
pr_debug(" * %s missing reg property\n",
cpu->full_name);
return;
}
/*
* 8 MSBs must be set to 0 in the DT since the reg property
* defines the MPIDR[23:0].
*/
if (hwid & ~MPIDR_HWID_BITMASK)
return;
/*
* Duplicate MPIDRs are a recipe for disaster.
* Scan all initialized entries and check for
* duplicates. If any is found just bail out.
* temp values were initialized to UINT_MAX
* to avoid matching valid MPIDR[23:0] values.
*/
for (j = 0; j < cpuidx; j++)
if (WARN(tmp_map[j] == hwid, "Duplicate /cpu reg "
"properties in the DT\n"))
return;
/*
* Build a stashed array of MPIDR values. Numbering scheme
* requires that if detected the boot CPU must be assigned
* logical id 0. Other CPUs get sequential indexes starting
* from 1. If a CPU node with a reg property matching the
* boot CPU MPIDR is detected, this is recorded so that the
* logical map built from DT is validated and can be used
* to override the map created in smp_setup_processor_id().
*/
if (hwid == mpidr) {
i = 0;
bootcpu_valid = true;
} else {
i = cpuidx++;
}
if (WARN(cpuidx > nr_cpu_ids, "DT /cpu %u nodes greater than "
"max cores %u, capping them\n",
cpuidx, nr_cpu_ids)) {
cpuidx = nr_cpu_ids;
break;
}
tmp_map[i] = hwid;
if (!found_method)
found_method = set_smp_ops_by_method(cpu);
}
/*
* Fallback to an enable-method in the cpus node if nothing found in
* a cpu node.
*/
if (!found_method)
set_smp_ops_by_method(cpus);
if (!bootcpu_valid) {
pr_warn("DT missing boot CPU MPIDR[23:0], fall back to default cpu_logical_map\n");
return;
}
/*
* Since the boot CPU node contains proper data, and all nodes have
* a reg property, the DT CPU list can be considered valid and the
* logical map created in smp_setup_processor_id() can be overridden
*/
for (i = 0; i < cpuidx; i++) {
set_cpu_possible(i, true);
cpu_logical_map(i) = tmp_map[i];
pr_debug("cpu logical map 0x%x\n", cpu_logical_map(i));
}
}
bool arch_match_cpu_phys_id(int cpu, u64 phys_id)
{
return phys_id == cpu_logical_map(cpu);
}
static const void * __init arch_get_next_mach(const char *const **match)
{
static const struct machine_desc *mdesc = __arch_info_begin;
const struct machine_desc *m = mdesc;
if (m >= __arch_info_end)
return NULL;
mdesc++;
*match = m->dt_compat;
return m;
}
/**
* setup_machine_fdt - Machine setup when an dtb was passed to the kernel
* @dt_phys: physical address of dt blob
*
* If a dtb was passed to the kernel in r2, then use it to choose the
* correct machine_desc and to setup the system.
*/
const struct machine_desc * __init setup_machine_fdt(unsigned int dt_phys)
{
const struct machine_desc *mdesc, *mdesc_best = NULL;
#ifdef CONFIG_ARCH_MULTIPLATFORM
DT_MACHINE_START(GENERIC_DT, "Generic DT based system")
MACHINE_END
mdesc_best = &__mach_desc_GENERIC_DT;
#endif
if (!dt_phys || !early_init_dt_verify(phys_to_virt(dt_phys)))
return NULL;
mdesc = of_flat_dt_match_machine(mdesc_best, arch_get_next_mach);
if (!mdesc) {
const char *prop;
int size;
unsigned long dt_root;
early_print("\nError: unrecognized/unsupported "
"device tree compatible list:\n[ ");
dt_root = of_get_flat_dt_root();
prop = of_get_flat_dt_prop(dt_root, "compatible", &size);
while (size > 0) {
early_print("'%s' ", prop);
size -= strlen(prop) + 1;
prop += strlen(prop) + 1;
}
early_print("]\n\n");
dump_machine_table(); /* does not return */
}
/* We really don't want to do this, but sometimes firmware provides buggy data */
if (mdesc->dt_fixup)
mdesc->dt_fixup();
early_init_dt_scan_nodes();
/* Change machine number to match the mdesc we're using */
__machine_arch_type = mdesc->nr;
return mdesc;
}
| gpl-2.0 |
juston-li/mako | arch/x86/kernel/hpet.c | 1515 | 28128 | #include <linux/clocksource.h>
#include <linux/clockchips.h>
#include <linux/interrupt.h>
#include <linux/export.h>
#include <linux/delay.h>
#include <linux/errno.h>
#include <linux/i8253.h>
#include <linux/slab.h>
#include <linux/hpet.h>
#include <linux/init.h>
#include <linux/cpu.h>
#include <linux/pm.h>
#include <linux/io.h>
#include <asm/fixmap.h>
#include <asm/hpet.h>
#include <asm/time.h>
#define HPET_MASK CLOCKSOURCE_MASK(32)
/* FSEC = 10^-15
NSEC = 10^-9 */
#define FSEC_PER_NSEC 1000000L
#define HPET_DEV_USED_BIT 2
#define HPET_DEV_USED (1 << HPET_DEV_USED_BIT)
#define HPET_DEV_VALID 0x8
#define HPET_DEV_FSB_CAP 0x1000
#define HPET_DEV_PERI_CAP 0x2000
#define HPET_MIN_CYCLES 128
#define HPET_MIN_PROG_DELTA (HPET_MIN_CYCLES + (HPET_MIN_CYCLES >> 1))
/*
* HPET address is set in acpi/boot.c, when an ACPI entry exists
*/
unsigned long hpet_address;
u8 hpet_blockid; /* OS timer block num */
u8 hpet_msi_disable;
#ifdef CONFIG_PCI_MSI
static unsigned long hpet_num_timers;
#endif
static void __iomem *hpet_virt_address;
struct hpet_dev {
struct clock_event_device evt;
unsigned int num;
int cpu;
unsigned int irq;
unsigned int flags;
char name[10];
};
inline struct hpet_dev *EVT_TO_HPET_DEV(struct clock_event_device *evtdev)
{
return container_of(evtdev, struct hpet_dev, evt);
}
inline unsigned int hpet_readl(unsigned int a)
{
return readl(hpet_virt_address + a);
}
static inline void hpet_writel(unsigned int d, unsigned int a)
{
writel(d, hpet_virt_address + a);
}
#ifdef CONFIG_X86_64
#include <asm/pgtable.h>
#endif
static inline void hpet_set_mapping(void)
{
hpet_virt_address = ioremap_nocache(hpet_address, HPET_MMAP_SIZE);
#ifdef CONFIG_X86_64
__set_fixmap(VSYSCALL_HPET, hpet_address, PAGE_KERNEL_VVAR_NOCACHE);
#endif
}
static inline void hpet_clear_mapping(void)
{
iounmap(hpet_virt_address);
hpet_virt_address = NULL;
}
/*
* HPET command line enable / disable
*/
static int boot_hpet_disable;
int hpet_force_user;
static int hpet_verbose;
static int __init hpet_setup(char *str)
{
if (str) {
if (!strncmp("disable", str, 7))
boot_hpet_disable = 1;
if (!strncmp("force", str, 5))
hpet_force_user = 1;
if (!strncmp("verbose", str, 7))
hpet_verbose = 1;
}
return 1;
}
__setup("hpet=", hpet_setup);
static int __init disable_hpet(char *str)
{
boot_hpet_disable = 1;
return 1;
}
__setup("nohpet", disable_hpet);
static inline int is_hpet_capable(void)
{
return !boot_hpet_disable && hpet_address;
}
/*
* HPET timer interrupt enable / disable
*/
static int hpet_legacy_int_enabled;
/**
* is_hpet_enabled - check whether the hpet timer interrupt is enabled
*/
int is_hpet_enabled(void)
{
return is_hpet_capable() && hpet_legacy_int_enabled;
}
EXPORT_SYMBOL_GPL(is_hpet_enabled);
static void _hpet_print_config(const char *function, int line)
{
u32 i, timers, l, h;
printk(KERN_INFO "hpet: %s(%d):\n", function, line);
l = hpet_readl(HPET_ID);
h = hpet_readl(HPET_PERIOD);
timers = ((l & HPET_ID_NUMBER) >> HPET_ID_NUMBER_SHIFT) + 1;
printk(KERN_INFO "hpet: ID: 0x%x, PERIOD: 0x%x\n", l, h);
l = hpet_readl(HPET_CFG);
h = hpet_readl(HPET_STATUS);
printk(KERN_INFO "hpet: CFG: 0x%x, STATUS: 0x%x\n", l, h);
l = hpet_readl(HPET_COUNTER);
h = hpet_readl(HPET_COUNTER+4);
printk(KERN_INFO "hpet: COUNTER_l: 0x%x, COUNTER_h: 0x%x\n", l, h);
for (i = 0; i < timers; i++) {
l = hpet_readl(HPET_Tn_CFG(i));
h = hpet_readl(HPET_Tn_CFG(i)+4);
printk(KERN_INFO "hpet: T%d: CFG_l: 0x%x, CFG_h: 0x%x\n",
i, l, h);
l = hpet_readl(HPET_Tn_CMP(i));
h = hpet_readl(HPET_Tn_CMP(i)+4);
printk(KERN_INFO "hpet: T%d: CMP_l: 0x%x, CMP_h: 0x%x\n",
i, l, h);
l = hpet_readl(HPET_Tn_ROUTE(i));
h = hpet_readl(HPET_Tn_ROUTE(i)+4);
printk(KERN_INFO "hpet: T%d ROUTE_l: 0x%x, ROUTE_h: 0x%x\n",
i, l, h);
}
}
#define hpet_print_config() \
do { \
if (hpet_verbose) \
_hpet_print_config(__FUNCTION__, __LINE__); \
} while (0)
/*
* When the hpet driver (/dev/hpet) is enabled, we need to reserve
* timer 0 and timer 1 in case of RTC emulation.
*/
#ifdef CONFIG_HPET
static void hpet_reserve_msi_timers(struct hpet_data *hd);
static void hpet_reserve_platform_timers(unsigned int id)
{
struct hpet __iomem *hpet = hpet_virt_address;
struct hpet_timer __iomem *timer = &hpet->hpet_timers[2];
unsigned int nrtimers, i;
struct hpet_data hd;
nrtimers = ((id & HPET_ID_NUMBER) >> HPET_ID_NUMBER_SHIFT) + 1;
memset(&hd, 0, sizeof(hd));
hd.hd_phys_address = hpet_address;
hd.hd_address = hpet;
hd.hd_nirqs = nrtimers;
hpet_reserve_timer(&hd, 0);
#ifdef CONFIG_HPET_EMULATE_RTC
hpet_reserve_timer(&hd, 1);
#endif
/*
* NOTE that hd_irq[] reflects IOAPIC input pins (LEGACY_8254
* is wrong for i8259!) not the output IRQ. Many BIOS writers
* don't bother configuring *any* comparator interrupts.
*/
hd.hd_irq[0] = HPET_LEGACY_8254;
hd.hd_irq[1] = HPET_LEGACY_RTC;
for (i = 2; i < nrtimers; timer++, i++) {
hd.hd_irq[i] = (readl(&timer->hpet_config) &
Tn_INT_ROUTE_CNF_MASK) >> Tn_INT_ROUTE_CNF_SHIFT;
}
hpet_reserve_msi_timers(&hd);
hpet_alloc(&hd);
}
#else
static void hpet_reserve_platform_timers(unsigned int id) { }
#endif
/*
* Common hpet info
*/
static unsigned long hpet_freq;
static void hpet_legacy_set_mode(enum clock_event_mode mode,
struct clock_event_device *evt);
static int hpet_legacy_next_event(unsigned long delta,
struct clock_event_device *evt);
/*
* The hpet clock event device
*/
static struct clock_event_device hpet_clockevent = {
.name = "hpet",
.features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
.set_mode = hpet_legacy_set_mode,
.set_next_event = hpet_legacy_next_event,
.irq = 0,
.rating = 50,
};
static void hpet_stop_counter(void)
{
unsigned long cfg = hpet_readl(HPET_CFG);
cfg &= ~HPET_CFG_ENABLE;
hpet_writel(cfg, HPET_CFG);
}
static void hpet_reset_counter(void)
{
hpet_writel(0, HPET_COUNTER);
hpet_writel(0, HPET_COUNTER + 4);
}
static void hpet_start_counter(void)
{
unsigned int cfg = hpet_readl(HPET_CFG);
cfg |= HPET_CFG_ENABLE;
hpet_writel(cfg, HPET_CFG);
}
static void hpet_restart_counter(void)
{
hpet_stop_counter();
hpet_reset_counter();
hpet_start_counter();
}
static void hpet_resume_device(void)
{
force_hpet_resume();
}
static void hpet_resume_counter(struct clocksource *cs)
{
hpet_resume_device();
hpet_restart_counter();
}
static void hpet_enable_legacy_int(void)
{
unsigned int cfg = hpet_readl(HPET_CFG);
cfg |= HPET_CFG_LEGACY;
hpet_writel(cfg, HPET_CFG);
hpet_legacy_int_enabled = 1;
}
static void hpet_legacy_clockevent_register(void)
{
/* Start HPET legacy interrupts */
hpet_enable_legacy_int();
/*
* Start hpet with the boot cpu mask and make it
* global after the IO_APIC has been initialized.
*/
hpet_clockevent.cpumask = cpumask_of(smp_processor_id());
clockevents_config_and_register(&hpet_clockevent, hpet_freq,
HPET_MIN_PROG_DELTA, 0x7FFFFFFF);
global_clock_event = &hpet_clockevent;
printk(KERN_DEBUG "hpet clockevent registered\n");
}
static int hpet_setup_msi_irq(unsigned int irq);
static void hpet_set_mode(enum clock_event_mode mode,
struct clock_event_device *evt, int timer)
{
unsigned int cfg, cmp, now;
uint64_t delta;
switch (mode) {
case CLOCK_EVT_MODE_PERIODIC:
hpet_stop_counter();
delta = ((uint64_t)(NSEC_PER_SEC/HZ)) * evt->mult;
delta >>= evt->shift;
now = hpet_readl(HPET_COUNTER);
cmp = now + (unsigned int) delta;
cfg = hpet_readl(HPET_Tn_CFG(timer));
/* Make sure we use edge triggered interrupts */
cfg &= ~HPET_TN_LEVEL;
cfg |= HPET_TN_ENABLE | HPET_TN_PERIODIC |
HPET_TN_SETVAL | HPET_TN_32BIT;
hpet_writel(cfg, HPET_Tn_CFG(timer));
hpet_writel(cmp, HPET_Tn_CMP(timer));
udelay(1);
/*
* HPET on AMD 81xx needs a second write (with HPET_TN_SETVAL
* cleared) to T0_CMP to set the period. The HPET_TN_SETVAL
* bit is automatically cleared after the first write.
* (See AMD-8111 HyperTransport I/O Hub Data Sheet,
* Publication # 24674)
*/
hpet_writel((unsigned int) delta, HPET_Tn_CMP(timer));
hpet_start_counter();
hpet_print_config();
break;
case CLOCK_EVT_MODE_ONESHOT:
cfg = hpet_readl(HPET_Tn_CFG(timer));
cfg &= ~HPET_TN_PERIODIC;
cfg |= HPET_TN_ENABLE | HPET_TN_32BIT;
hpet_writel(cfg, HPET_Tn_CFG(timer));
break;
case CLOCK_EVT_MODE_UNUSED:
case CLOCK_EVT_MODE_SHUTDOWN:
cfg = hpet_readl(HPET_Tn_CFG(timer));
cfg &= ~HPET_TN_ENABLE;
hpet_writel(cfg, HPET_Tn_CFG(timer));
break;
case CLOCK_EVT_MODE_RESUME:
if (timer == 0) {
hpet_enable_legacy_int();
} else {
struct hpet_dev *hdev = EVT_TO_HPET_DEV(evt);
hpet_setup_msi_irq(hdev->irq);
disable_irq(hdev->irq);
irq_set_affinity(hdev->irq, cpumask_of(hdev->cpu));
enable_irq(hdev->irq);
}
hpet_print_config();
break;
}
}
static int hpet_next_event(unsigned long delta,
struct clock_event_device *evt, int timer)
{
u32 cnt;
s32 res;
cnt = hpet_readl(HPET_COUNTER);
cnt += (u32) delta;
hpet_writel(cnt, HPET_Tn_CMP(timer));
/*
* HPETs are a complete disaster. The compare register is
* based on a equal comparison and neither provides a less
* than or equal functionality (which would require to take
* the wraparound into account) nor a simple count down event
* mode. Further the write to the comparator register is
* delayed internally up to two HPET clock cycles in certain
* chipsets (ATI, ICH9,10). Some newer AMD chipsets have even
* longer delays. We worked around that by reading back the
* compare register, but that required another workaround for
* ICH9,10 chips where the first readout after write can
* return the old stale value. We already had a minimum
* programming delta of 5us enforced, but a NMI or SMI hitting
* between the counter readout and the comparator write can
* move us behind that point easily. Now instead of reading
* the compare register back several times, we make the ETIME
* decision based on the following: Return ETIME if the
* counter value after the write is less than HPET_MIN_CYCLES
* away from the event or if the counter is already ahead of
* the event. The minimum programming delta for the generic
* clockevents code is set to 1.5 * HPET_MIN_CYCLES.
*/
res = (s32)(cnt - hpet_readl(HPET_COUNTER));
return res < HPET_MIN_CYCLES ? -ETIME : 0;
}
static void hpet_legacy_set_mode(enum clock_event_mode mode,
struct clock_event_device *evt)
{
hpet_set_mode(mode, evt, 0);
}
static int hpet_legacy_next_event(unsigned long delta,
struct clock_event_device *evt)
{
return hpet_next_event(delta, evt, 0);
}
/*
* HPET MSI Support
*/
#ifdef CONFIG_PCI_MSI
static DEFINE_PER_CPU(struct hpet_dev *, cpu_hpet_dev);
static struct hpet_dev *hpet_devs;
void hpet_msi_unmask(struct irq_data *data)
{
struct hpet_dev *hdev = data->handler_data;
unsigned int cfg;
/* unmask it */
cfg = hpet_readl(HPET_Tn_CFG(hdev->num));
cfg |= HPET_TN_ENABLE | HPET_TN_FSB;
hpet_writel(cfg, HPET_Tn_CFG(hdev->num));
}
void hpet_msi_mask(struct irq_data *data)
{
struct hpet_dev *hdev = data->handler_data;
unsigned int cfg;
/* mask it */
cfg = hpet_readl(HPET_Tn_CFG(hdev->num));
cfg &= ~(HPET_TN_ENABLE | HPET_TN_FSB);
hpet_writel(cfg, HPET_Tn_CFG(hdev->num));
}
void hpet_msi_write(struct hpet_dev *hdev, struct msi_msg *msg)
{
hpet_writel(msg->data, HPET_Tn_ROUTE(hdev->num));
hpet_writel(msg->address_lo, HPET_Tn_ROUTE(hdev->num) + 4);
}
void hpet_msi_read(struct hpet_dev *hdev, struct msi_msg *msg)
{
msg->data = hpet_readl(HPET_Tn_ROUTE(hdev->num));
msg->address_lo = hpet_readl(HPET_Tn_ROUTE(hdev->num) + 4);
msg->address_hi = 0;
}
static void hpet_msi_set_mode(enum clock_event_mode mode,
struct clock_event_device *evt)
{
struct hpet_dev *hdev = EVT_TO_HPET_DEV(evt);
hpet_set_mode(mode, evt, hdev->num);
}
static int hpet_msi_next_event(unsigned long delta,
struct clock_event_device *evt)
{
struct hpet_dev *hdev = EVT_TO_HPET_DEV(evt);
return hpet_next_event(delta, evt, hdev->num);
}
static int hpet_setup_msi_irq(unsigned int irq)
{
if (arch_setup_hpet_msi(irq, hpet_blockid)) {
destroy_irq(irq);
return -EINVAL;
}
return 0;
}
static int hpet_assign_irq(struct hpet_dev *dev)
{
unsigned int irq;
irq = create_irq_nr(0, -1);
if (!irq)
return -EINVAL;
irq_set_handler_data(irq, dev);
if (hpet_setup_msi_irq(irq))
return -EINVAL;
dev->irq = irq;
return 0;
}
static irqreturn_t hpet_interrupt_handler(int irq, void *data)
{
struct hpet_dev *dev = (struct hpet_dev *)data;
struct clock_event_device *hevt = &dev->evt;
if (!hevt->event_handler) {
printk(KERN_INFO "Spurious HPET timer interrupt on HPET timer %d\n",
dev->num);
return IRQ_HANDLED;
}
hevt->event_handler(hevt);
return IRQ_HANDLED;
}
static int hpet_setup_irq(struct hpet_dev *dev)
{
if (request_irq(dev->irq, hpet_interrupt_handler,
IRQF_TIMER | IRQF_DISABLED | IRQF_NOBALANCING,
dev->name, dev))
return -1;
disable_irq(dev->irq);
irq_set_affinity(dev->irq, cpumask_of(dev->cpu));
enable_irq(dev->irq);
printk(KERN_DEBUG "hpet: %s irq %d for MSI\n",
dev->name, dev->irq);
return 0;
}
/* This should be called in specific @cpu */
static void init_one_hpet_msi_clockevent(struct hpet_dev *hdev, int cpu)
{
struct clock_event_device *evt = &hdev->evt;
WARN_ON(cpu != smp_processor_id());
if (!(hdev->flags & HPET_DEV_VALID))
return;
if (hpet_setup_msi_irq(hdev->irq))
return;
hdev->cpu = cpu;
per_cpu(cpu_hpet_dev, cpu) = hdev;
evt->name = hdev->name;
hpet_setup_irq(hdev);
evt->irq = hdev->irq;
evt->rating = 110;
evt->features = CLOCK_EVT_FEAT_ONESHOT;
if (hdev->flags & HPET_DEV_PERI_CAP)
evt->features |= CLOCK_EVT_FEAT_PERIODIC;
evt->set_mode = hpet_msi_set_mode;
evt->set_next_event = hpet_msi_next_event;
evt->cpumask = cpumask_of(hdev->cpu);
clockevents_config_and_register(evt, hpet_freq, HPET_MIN_PROG_DELTA,
0x7FFFFFFF);
}
#ifdef CONFIG_HPET
/* Reserve at least one timer for userspace (/dev/hpet) */
#define RESERVE_TIMERS 1
#else
#define RESERVE_TIMERS 0
#endif
static void hpet_msi_capability_lookup(unsigned int start_timer)
{
unsigned int id;
unsigned int num_timers;
unsigned int num_timers_used = 0;
int i;
if (hpet_msi_disable)
return;
if (boot_cpu_has(X86_FEATURE_ARAT))
return;
id = hpet_readl(HPET_ID);
num_timers = ((id & HPET_ID_NUMBER) >> HPET_ID_NUMBER_SHIFT);
num_timers++; /* Value read out starts from 0 */
hpet_print_config();
hpet_devs = kzalloc(sizeof(struct hpet_dev) * num_timers, GFP_KERNEL);
if (!hpet_devs)
return;
hpet_num_timers = num_timers;
for (i = start_timer; i < num_timers - RESERVE_TIMERS; i++) {
struct hpet_dev *hdev = &hpet_devs[num_timers_used];
unsigned int cfg = hpet_readl(HPET_Tn_CFG(i));
/* Only consider HPET timer with MSI support */
if (!(cfg & HPET_TN_FSB_CAP))
continue;
hdev->flags = 0;
if (cfg & HPET_TN_PERIODIC_CAP)
hdev->flags |= HPET_DEV_PERI_CAP;
hdev->num = i;
sprintf(hdev->name, "hpet%d", i);
if (hpet_assign_irq(hdev))
continue;
hdev->flags |= HPET_DEV_FSB_CAP;
hdev->flags |= HPET_DEV_VALID;
num_timers_used++;
if (num_timers_used == num_possible_cpus())
break;
}
printk(KERN_INFO "HPET: %d timers in total, %d timers will be used for per-cpu timer\n",
num_timers, num_timers_used);
}
#ifdef CONFIG_HPET
static void hpet_reserve_msi_timers(struct hpet_data *hd)
{
int i;
if (!hpet_devs)
return;
for (i = 0; i < hpet_num_timers; i++) {
struct hpet_dev *hdev = &hpet_devs[i];
if (!(hdev->flags & HPET_DEV_VALID))
continue;
hd->hd_irq[hdev->num] = hdev->irq;
hpet_reserve_timer(hd, hdev->num);
}
}
#endif
static struct hpet_dev *hpet_get_unused_timer(void)
{
int i;
if (!hpet_devs)
return NULL;
for (i = 0; i < hpet_num_timers; i++) {
struct hpet_dev *hdev = &hpet_devs[i];
if (!(hdev->flags & HPET_DEV_VALID))
continue;
if (test_and_set_bit(HPET_DEV_USED_BIT,
(unsigned long *)&hdev->flags))
continue;
return hdev;
}
return NULL;
}
struct hpet_work_struct {
struct delayed_work work;
struct completion complete;
};
static void hpet_work(struct work_struct *w)
{
struct hpet_dev *hdev;
int cpu = smp_processor_id();
struct hpet_work_struct *hpet_work;
hpet_work = container_of(w, struct hpet_work_struct, work.work);
hdev = hpet_get_unused_timer();
if (hdev)
init_one_hpet_msi_clockevent(hdev, cpu);
complete(&hpet_work->complete);
}
static int hpet_cpuhp_notify(struct notifier_block *n,
unsigned long action, void *hcpu)
{
unsigned long cpu = (unsigned long)hcpu;
struct hpet_work_struct work;
struct hpet_dev *hdev = per_cpu(cpu_hpet_dev, cpu);
switch (action & 0xf) {
case CPU_ONLINE:
INIT_DELAYED_WORK_ONSTACK(&work.work, hpet_work);
init_completion(&work.complete);
/* FIXME: add schedule_work_on() */
schedule_delayed_work_on(cpu, &work.work, 0);
wait_for_completion(&work.complete);
destroy_timer_on_stack(&work.work.timer);
break;
case CPU_DEAD:
if (hdev) {
free_irq(hdev->irq, hdev);
hdev->flags &= ~HPET_DEV_USED;
per_cpu(cpu_hpet_dev, cpu) = NULL;
}
break;
}
return NOTIFY_OK;
}
#else
static int hpet_setup_msi_irq(unsigned int irq)
{
return 0;
}
static void hpet_msi_capability_lookup(unsigned int start_timer)
{
return;
}
#ifdef CONFIG_HPET
static void hpet_reserve_msi_timers(struct hpet_data *hd)
{
return;
}
#endif
static int hpet_cpuhp_notify(struct notifier_block *n,
unsigned long action, void *hcpu)
{
return NOTIFY_OK;
}
#endif
/*
* Clock source related code
*/
static cycle_t read_hpet(struct clocksource *cs)
{
return (cycle_t)hpet_readl(HPET_COUNTER);
}
static struct clocksource clocksource_hpet = {
.name = "hpet",
.rating = 250,
.read = read_hpet,
.mask = HPET_MASK,
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
.resume = hpet_resume_counter,
#ifdef CONFIG_X86_64
.archdata = { .vclock_mode = VCLOCK_HPET },
#endif
};
static int hpet_clocksource_register(void)
{
u64 start, now;
cycle_t t1;
/* Start the counter */
hpet_restart_counter();
/* Verify whether hpet counter works */
t1 = hpet_readl(HPET_COUNTER);
rdtscll(start);
/*
* We don't know the TSC frequency yet, but waiting for
* 200000 TSC cycles is safe:
* 4 GHz == 50us
* 1 GHz == 200us
*/
do {
rep_nop();
rdtscll(now);
} while ((now - start) < 200000UL);
if (t1 == hpet_readl(HPET_COUNTER)) {
printk(KERN_WARNING
"HPET counter not counting. HPET disabled\n");
return -ENODEV;
}
clocksource_register_hz(&clocksource_hpet, (u32)hpet_freq);
return 0;
}
/**
* hpet_enable - Try to setup the HPET timer. Returns 1 on success.
*/
int __init hpet_enable(void)
{
unsigned long hpet_period;
unsigned int id;
u64 freq;
int i;
if (!is_hpet_capable())
return 0;
hpet_set_mapping();
/*
* Read the period and check for a sane value:
*/
hpet_period = hpet_readl(HPET_PERIOD);
/*
* AMD SB700 based systems with spread spectrum enabled use a
* SMM based HPET emulation to provide proper frequency
* setting. The SMM code is initialized with the first HPET
* register access and takes some time to complete. During
* this time the config register reads 0xffffffff. We check
* for max. 1000 loops whether the config register reads a non
* 0xffffffff value to make sure that HPET is up and running
* before we go further. A counting loop is safe, as the HPET
* access takes thousands of CPU cycles. On non SB700 based
* machines this check is only done once and has no side
* effects.
*/
for (i = 0; hpet_readl(HPET_CFG) == 0xFFFFFFFF; i++) {
if (i == 1000) {
printk(KERN_WARNING
"HPET config register value = 0xFFFFFFFF. "
"Disabling HPET\n");
goto out_nohpet;
}
}
if (hpet_period < HPET_MIN_PERIOD || hpet_period > HPET_MAX_PERIOD)
goto out_nohpet;
/*
* The period is a femto seconds value. Convert it to a
* frequency.
*/
freq = FSEC_PER_SEC;
do_div(freq, hpet_period);
hpet_freq = freq;
/*
* Read the HPET ID register to retrieve the IRQ routing
* information and the number of channels
*/
id = hpet_readl(HPET_ID);
hpet_print_config();
#ifdef CONFIG_HPET_EMULATE_RTC
/*
* The legacy routing mode needs at least two channels, tick timer
* and the rtc emulation channel.
*/
if (!(id & HPET_ID_NUMBER))
goto out_nohpet;
#endif
if (hpet_clocksource_register())
goto out_nohpet;
if (id & HPET_ID_LEGSUP) {
hpet_legacy_clockevent_register();
return 1;
}
return 0;
out_nohpet:
hpet_clear_mapping();
hpet_address = 0;
return 0;
}
/*
* Needs to be late, as the reserve_timer code calls kalloc !
*
* Not a problem on i386 as hpet_enable is called from late_time_init,
* but on x86_64 it is necessary !
*/
static __init int hpet_late_init(void)
{
int cpu;
if (boot_hpet_disable)
return -ENODEV;
if (!hpet_address) {
if (!force_hpet_address)
return -ENODEV;
hpet_address = force_hpet_address;
hpet_enable();
}
if (!hpet_virt_address)
return -ENODEV;
if (hpet_readl(HPET_ID) & HPET_ID_LEGSUP)
hpet_msi_capability_lookup(2);
else
hpet_msi_capability_lookup(0);
hpet_reserve_platform_timers(hpet_readl(HPET_ID));
hpet_print_config();
if (hpet_msi_disable)
return 0;
if (boot_cpu_has(X86_FEATURE_ARAT))
return 0;
for_each_online_cpu(cpu) {
hpet_cpuhp_notify(NULL, CPU_ONLINE, (void *)(long)cpu);
}
/* This notifier should be called after workqueue is ready */
hotcpu_notifier(hpet_cpuhp_notify, -20);
return 0;
}
fs_initcall(hpet_late_init);
void hpet_disable(void)
{
if (is_hpet_capable() && hpet_virt_address) {
unsigned int cfg = hpet_readl(HPET_CFG);
if (hpet_legacy_int_enabled) {
cfg &= ~HPET_CFG_LEGACY;
hpet_legacy_int_enabled = 0;
}
cfg &= ~HPET_CFG_ENABLE;
hpet_writel(cfg, HPET_CFG);
}
}
#ifdef CONFIG_HPET_EMULATE_RTC
/* HPET in LegacyReplacement Mode eats up RTC interrupt line. When, HPET
* is enabled, we support RTC interrupt functionality in software.
* RTC has 3 kinds of interrupts:
* 1) Update Interrupt - generate an interrupt, every sec, when RTC clock
* is updated
* 2) Alarm Interrupt - generate an interrupt at a specific time of day
* 3) Periodic Interrupt - generate periodic interrupt, with frequencies
* 2Hz-8192Hz (2Hz-64Hz for non-root user) (all freqs in powers of 2)
* (1) and (2) above are implemented using polling at a frequency of
* 64 Hz. The exact frequency is a tradeoff between accuracy and interrupt
* overhead. (DEFAULT_RTC_INT_FREQ)
* For (3), we use interrupts at 64Hz or user specified periodic
* frequency, whichever is higher.
*/
#include <linux/mc146818rtc.h>
#include <linux/rtc.h>
#include <asm/rtc.h>
#define DEFAULT_RTC_INT_FREQ 64
#define DEFAULT_RTC_SHIFT 6
#define RTC_NUM_INTS 1
static unsigned long hpet_rtc_flags;
static int hpet_prev_update_sec;
static struct rtc_time hpet_alarm_time;
static unsigned long hpet_pie_count;
static u32 hpet_t1_cmp;
static u32 hpet_default_delta;
static u32 hpet_pie_delta;
static unsigned long hpet_pie_limit;
static rtc_irq_handler irq_handler;
/*
* Check that the hpet counter c1 is ahead of the c2
*/
static inline int hpet_cnt_ahead(u32 c1, u32 c2)
{
return (s32)(c2 - c1) < 0;
}
/*
* Registers a IRQ handler.
*/
int hpet_register_irq_handler(rtc_irq_handler handler)
{
if (!is_hpet_enabled())
return -ENODEV;
if (irq_handler)
return -EBUSY;
irq_handler = handler;
return 0;
}
EXPORT_SYMBOL_GPL(hpet_register_irq_handler);
/*
* Deregisters the IRQ handler registered with hpet_register_irq_handler()
* and does cleanup.
*/
void hpet_unregister_irq_handler(rtc_irq_handler handler)
{
if (!is_hpet_enabled())
return;
irq_handler = NULL;
hpet_rtc_flags = 0;
}
EXPORT_SYMBOL_GPL(hpet_unregister_irq_handler);
/*
* Timer 1 for RTC emulation. We use one shot mode, as periodic mode
* is not supported by all HPET implementations for timer 1.
*
* hpet_rtc_timer_init() is called when the rtc is initialized.
*/
int hpet_rtc_timer_init(void)
{
unsigned int cfg, cnt, delta;
unsigned long flags;
if (!is_hpet_enabled())
return 0;
if (!hpet_default_delta) {
uint64_t clc;
clc = (uint64_t) hpet_clockevent.mult * NSEC_PER_SEC;
clc >>= hpet_clockevent.shift + DEFAULT_RTC_SHIFT;
hpet_default_delta = clc;
}
if (!(hpet_rtc_flags & RTC_PIE) || hpet_pie_limit)
delta = hpet_default_delta;
else
delta = hpet_pie_delta;
local_irq_save(flags);
cnt = delta + hpet_readl(HPET_COUNTER);
hpet_writel(cnt, HPET_T1_CMP);
hpet_t1_cmp = cnt;
cfg = hpet_readl(HPET_T1_CFG);
cfg &= ~HPET_TN_PERIODIC;
cfg |= HPET_TN_ENABLE | HPET_TN_32BIT;
hpet_writel(cfg, HPET_T1_CFG);
local_irq_restore(flags);
return 1;
}
EXPORT_SYMBOL_GPL(hpet_rtc_timer_init);
static void hpet_disable_rtc_channel(void)
{
unsigned long cfg;
cfg = hpet_readl(HPET_T1_CFG);
cfg &= ~HPET_TN_ENABLE;
hpet_writel(cfg, HPET_T1_CFG);
}
/*
* The functions below are called from rtc driver.
* Return 0 if HPET is not being used.
* Otherwise do the necessary changes and return 1.
*/
int hpet_mask_rtc_irq_bit(unsigned long bit_mask)
{
if (!is_hpet_enabled())
return 0;
hpet_rtc_flags &= ~bit_mask;
if (unlikely(!hpet_rtc_flags))
hpet_disable_rtc_channel();
return 1;
}
EXPORT_SYMBOL_GPL(hpet_mask_rtc_irq_bit);
int hpet_set_rtc_irq_bit(unsigned long bit_mask)
{
unsigned long oldbits = hpet_rtc_flags;
if (!is_hpet_enabled())
return 0;
hpet_rtc_flags |= bit_mask;
if ((bit_mask & RTC_UIE) && !(oldbits & RTC_UIE))
hpet_prev_update_sec = -1;
if (!oldbits)
hpet_rtc_timer_init();
return 1;
}
EXPORT_SYMBOL_GPL(hpet_set_rtc_irq_bit);
int hpet_set_alarm_time(unsigned char hrs, unsigned char min,
unsigned char sec)
{
if (!is_hpet_enabled())
return 0;
hpet_alarm_time.tm_hour = hrs;
hpet_alarm_time.tm_min = min;
hpet_alarm_time.tm_sec = sec;
return 1;
}
EXPORT_SYMBOL_GPL(hpet_set_alarm_time);
int hpet_set_periodic_freq(unsigned long freq)
{
uint64_t clc;
if (!is_hpet_enabled())
return 0;
if (freq <= DEFAULT_RTC_INT_FREQ)
hpet_pie_limit = DEFAULT_RTC_INT_FREQ / freq;
else {
clc = (uint64_t) hpet_clockevent.mult * NSEC_PER_SEC;
do_div(clc, freq);
clc >>= hpet_clockevent.shift;
hpet_pie_delta = clc;
hpet_pie_limit = 0;
}
return 1;
}
EXPORT_SYMBOL_GPL(hpet_set_periodic_freq);
int hpet_rtc_dropped_irq(void)
{
return is_hpet_enabled();
}
EXPORT_SYMBOL_GPL(hpet_rtc_dropped_irq);
static void hpet_rtc_timer_reinit(void)
{
unsigned int delta;
int lost_ints = -1;
if (unlikely(!hpet_rtc_flags))
hpet_disable_rtc_channel();
if (!(hpet_rtc_flags & RTC_PIE) || hpet_pie_limit)
delta = hpet_default_delta;
else
delta = hpet_pie_delta;
/*
* Increment the comparator value until we are ahead of the
* current count.
*/
do {
hpet_t1_cmp += delta;
hpet_writel(hpet_t1_cmp, HPET_T1_CMP);
lost_ints++;
} while (!hpet_cnt_ahead(hpet_t1_cmp, hpet_readl(HPET_COUNTER)));
if (lost_ints) {
if (hpet_rtc_flags & RTC_PIE)
hpet_pie_count += lost_ints;
if (printk_ratelimit())
printk(KERN_WARNING "hpet1: lost %d rtc interrupts\n",
lost_ints);
}
}
irqreturn_t hpet_rtc_interrupt(int irq, void *dev_id)
{
struct rtc_time curr_time;
unsigned long rtc_int_flag = 0;
hpet_rtc_timer_reinit();
memset(&curr_time, 0, sizeof(struct rtc_time));
if (hpet_rtc_flags & (RTC_UIE | RTC_AIE))
get_rtc_time(&curr_time);
if (hpet_rtc_flags & RTC_UIE &&
curr_time.tm_sec != hpet_prev_update_sec) {
if (hpet_prev_update_sec >= 0)
rtc_int_flag = RTC_UF;
hpet_prev_update_sec = curr_time.tm_sec;
}
if (hpet_rtc_flags & RTC_PIE &&
++hpet_pie_count >= hpet_pie_limit) {
rtc_int_flag |= RTC_PF;
hpet_pie_count = 0;
}
if (hpet_rtc_flags & RTC_AIE &&
(curr_time.tm_sec == hpet_alarm_time.tm_sec) &&
(curr_time.tm_min == hpet_alarm_time.tm_min) &&
(curr_time.tm_hour == hpet_alarm_time.tm_hour))
rtc_int_flag |= RTC_AF;
if (rtc_int_flag) {
rtc_int_flag |= (RTC_IRQF | (RTC_NUM_INTS << 8));
if (irq_handler)
irq_handler(rtc_int_flag, dev_id);
}
return IRQ_HANDLED;
}
EXPORT_SYMBOL_GPL(hpet_rtc_interrupt);
#endif
| gpl-2.0 |
hallovveen31/ICE_COLD_KERNEL | drivers/input/serio/xilinx_ps2.c | 2539 | 11368 | /*
* Xilinx XPS PS/2 device driver
*
* (c) 2005 MontaVista Software, Inc.
* (c) 2008 Xilinx, Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/module.h>
#include <linux/serio.h>
#include <linux/interrupt.h>
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/list.h>
#include <linux/io.h>
#include <linux/of_device.h>
#include <linux/of_platform.h>
#define DRIVER_NAME "xilinx_ps2"
/* Register offsets for the xps2 device */
#define XPS2_SRST_OFFSET 0x00000000 /* Software Reset register */
#define XPS2_STATUS_OFFSET 0x00000004 /* Status register */
#define XPS2_RX_DATA_OFFSET 0x00000008 /* Receive Data register */
#define XPS2_TX_DATA_OFFSET 0x0000000C /* Transmit Data register */
#define XPS2_GIER_OFFSET 0x0000002C /* Global Interrupt Enable reg */
#define XPS2_IPISR_OFFSET 0x00000030 /* Interrupt Status register */
#define XPS2_IPIER_OFFSET 0x00000038 /* Interrupt Enable register */
/* Reset Register Bit Definitions */
#define XPS2_SRST_RESET 0x0000000A /* Software Reset */
/* Status Register Bit Positions */
#define XPS2_STATUS_RX_FULL 0x00000001 /* Receive Full */
#define XPS2_STATUS_TX_FULL 0x00000002 /* Transmit Full */
/* Bit definitions for ISR/IER registers. Both the registers have the same bit
* definitions and are only defined once. */
#define XPS2_IPIXR_WDT_TOUT 0x00000001 /* Watchdog Timeout Interrupt */
#define XPS2_IPIXR_TX_NOACK 0x00000002 /* Transmit No ACK Interrupt */
#define XPS2_IPIXR_TX_ACK 0x00000004 /* Transmit ACK (Data) Interrupt */
#define XPS2_IPIXR_RX_OVF 0x00000008 /* Receive Overflow Interrupt */
#define XPS2_IPIXR_RX_ERR 0x00000010 /* Receive Error Interrupt */
#define XPS2_IPIXR_RX_FULL 0x00000020 /* Receive Data Interrupt */
/* Mask for all the Transmit Interrupts */
#define XPS2_IPIXR_TX_ALL (XPS2_IPIXR_TX_NOACK | XPS2_IPIXR_TX_ACK)
/* Mask for all the Receive Interrupts */
#define XPS2_IPIXR_RX_ALL (XPS2_IPIXR_RX_OVF | XPS2_IPIXR_RX_ERR | \
XPS2_IPIXR_RX_FULL)
/* Mask for all the Interrupts */
#define XPS2_IPIXR_ALL (XPS2_IPIXR_TX_ALL | XPS2_IPIXR_RX_ALL | \
XPS2_IPIXR_WDT_TOUT)
/* Global Interrupt Enable mask */
#define XPS2_GIER_GIE_MASK 0x80000000
struct xps2data {
int irq;
spinlock_t lock;
void __iomem *base_address; /* virt. address of control registers */
unsigned int flags;
struct serio serio; /* serio */
};
/************************************/
/* XPS PS/2 data transmission calls */
/************************************/
/**
* xps2_recv() - attempts to receive a byte from the PS/2 port.
* @drvdata: pointer to ps2 device private data structure
* @byte: address where the read data will be copied
*
* If there is any data available in the PS/2 receiver, this functions reads
* the data, otherwise it returns error.
*/
static int xps2_recv(struct xps2data *drvdata, u8 *byte)
{
u32 sr;
int status = -1;
/* If there is data available in the PS/2 receiver, read it */
sr = in_be32(drvdata->base_address + XPS2_STATUS_OFFSET);
if (sr & XPS2_STATUS_RX_FULL) {
*byte = in_be32(drvdata->base_address + XPS2_RX_DATA_OFFSET);
status = 0;
}
return status;
}
/*********************/
/* Interrupt handler */
/*********************/
static irqreturn_t xps2_interrupt(int irq, void *dev_id)
{
struct xps2data *drvdata = dev_id;
u32 intr_sr;
u8 c;
int status;
/* Get the PS/2 interrupts and clear them */
intr_sr = in_be32(drvdata->base_address + XPS2_IPISR_OFFSET);
out_be32(drvdata->base_address + XPS2_IPISR_OFFSET, intr_sr);
/* Check which interrupt is active */
if (intr_sr & XPS2_IPIXR_RX_OVF)
dev_warn(drvdata->serio.dev.parent, "receive overrun error\n");
if (intr_sr & XPS2_IPIXR_RX_ERR)
drvdata->flags |= SERIO_PARITY;
if (intr_sr & (XPS2_IPIXR_TX_NOACK | XPS2_IPIXR_WDT_TOUT))
drvdata->flags |= SERIO_TIMEOUT;
if (intr_sr & XPS2_IPIXR_RX_FULL) {
status = xps2_recv(drvdata, &c);
/* Error, if a byte is not received */
if (status) {
dev_err(drvdata->serio.dev.parent,
"wrong rcvd byte count (%d)\n", status);
} else {
serio_interrupt(&drvdata->serio, c, drvdata->flags);
drvdata->flags = 0;
}
}
return IRQ_HANDLED;
}
/*******************/
/* serio callbacks */
/*******************/
/**
* sxps2_write() - sends a byte out through the PS/2 port.
* @pserio: pointer to the serio structure of the PS/2 port
* @c: data that needs to be written to the PS/2 port
*
* This function checks if the PS/2 transmitter is empty and sends a byte.
* Otherwise it returns error. Transmission fails only when nothing is connected
* to the PS/2 port. Thats why, we do not try to resend the data in case of a
* failure.
*/
static int sxps2_write(struct serio *pserio, unsigned char c)
{
struct xps2data *drvdata = pserio->port_data;
unsigned long flags;
u32 sr;
int status = -1;
spin_lock_irqsave(&drvdata->lock, flags);
/* If the PS/2 transmitter is empty send a byte of data */
sr = in_be32(drvdata->base_address + XPS2_STATUS_OFFSET);
if (!(sr & XPS2_STATUS_TX_FULL)) {
out_be32(drvdata->base_address + XPS2_TX_DATA_OFFSET, c);
status = 0;
}
spin_unlock_irqrestore(&drvdata->lock, flags);
return status;
}
/**
* sxps2_open() - called when a port is opened by the higher layer.
* @pserio: pointer to the serio structure of the PS/2 device
*
* This function requests irq and enables interrupts for the PS/2 device.
*/
static int sxps2_open(struct serio *pserio)
{
struct xps2data *drvdata = pserio->port_data;
int error;
u8 c;
error = request_irq(drvdata->irq, &xps2_interrupt, 0,
DRIVER_NAME, drvdata);
if (error) {
dev_err(drvdata->serio.dev.parent,
"Couldn't allocate interrupt %d\n", drvdata->irq);
return error;
}
/* start reception by enabling the interrupts */
out_be32(drvdata->base_address + XPS2_GIER_OFFSET, XPS2_GIER_GIE_MASK);
out_be32(drvdata->base_address + XPS2_IPIER_OFFSET, XPS2_IPIXR_RX_ALL);
(void)xps2_recv(drvdata, &c);
return 0; /* success */
}
/**
* sxps2_close() - frees the interrupt.
* @pserio: pointer to the serio structure of the PS/2 device
*
* This function frees the irq and disables interrupts for the PS/2 device.
*/
static void sxps2_close(struct serio *pserio)
{
struct xps2data *drvdata = pserio->port_data;
/* Disable the PS2 interrupts */
out_be32(drvdata->base_address + XPS2_GIER_OFFSET, 0x00);
out_be32(drvdata->base_address + XPS2_IPIER_OFFSET, 0x00);
free_irq(drvdata->irq, drvdata);
}
/**
* xps2_of_probe - probe method for the PS/2 device.
* @of_dev: pointer to OF device structure
* @match: pointer to the structure used for matching a device
*
* This function probes the PS/2 device in the device tree.
* It initializes the driver data structure and the hardware.
* It returns 0, if the driver is bound to the PS/2 device, or a negative
* value if there is an error.
*/
static int __devinit xps2_of_probe(struct platform_device *ofdev)
{
struct resource r_irq; /* Interrupt resources */
struct resource r_mem; /* IO mem resources */
struct xps2data *drvdata;
struct serio *serio;
struct device *dev = &ofdev->dev;
resource_size_t remap_size, phys_addr;
int error;
dev_info(dev, "Device Tree Probing \'%s\'\n",
ofdev->dev.of_node->name);
/* Get iospace for the device */
error = of_address_to_resource(ofdev->dev.of_node, 0, &r_mem);
if (error) {
dev_err(dev, "invalid address\n");
return error;
}
/* Get IRQ for the device */
if (of_irq_to_resource(ofdev->dev.of_node, 0, &r_irq) == NO_IRQ) {
dev_err(dev, "no IRQ found\n");
return -ENODEV;
}
drvdata = kzalloc(sizeof(struct xps2data), GFP_KERNEL);
if (!drvdata) {
dev_err(dev, "Couldn't allocate device private record\n");
return -ENOMEM;
}
dev_set_drvdata(dev, drvdata);
spin_lock_init(&drvdata->lock);
drvdata->irq = r_irq.start;
phys_addr = r_mem.start;
remap_size = resource_size(&r_mem);
if (!request_mem_region(phys_addr, remap_size, DRIVER_NAME)) {
dev_err(dev, "Couldn't lock memory region at 0x%08llX\n",
(unsigned long long)phys_addr);
error = -EBUSY;
goto failed1;
}
/* Fill in configuration data and add them to the list */
drvdata->base_address = ioremap(phys_addr, remap_size);
if (drvdata->base_address == NULL) {
dev_err(dev, "Couldn't ioremap memory at 0x%08llX\n",
(unsigned long long)phys_addr);
error = -EFAULT;
goto failed2;
}
/* Disable all the interrupts, just in case */
out_be32(drvdata->base_address + XPS2_IPIER_OFFSET, 0);
/* Reset the PS2 device and abort any current transaction, to make sure
* we have the PS2 in a good state */
out_be32(drvdata->base_address + XPS2_SRST_OFFSET, XPS2_SRST_RESET);
dev_info(dev, "Xilinx PS2 at 0x%08llX mapped to 0x%p, irq=%d\n",
(unsigned long long)phys_addr, drvdata->base_address,
drvdata->irq);
serio = &drvdata->serio;
serio->id.type = SERIO_8042;
serio->write = sxps2_write;
serio->open = sxps2_open;
serio->close = sxps2_close;
serio->port_data = drvdata;
serio->dev.parent = dev;
snprintf(serio->name, sizeof(serio->name),
"Xilinx XPS PS/2 at %08llX", (unsigned long long)phys_addr);
snprintf(serio->phys, sizeof(serio->phys),
"xilinxps2/serio at %08llX", (unsigned long long)phys_addr);
serio_register_port(serio);
return 0; /* success */
failed2:
release_mem_region(phys_addr, remap_size);
failed1:
kfree(drvdata);
dev_set_drvdata(dev, NULL);
return error;
}
/**
* xps2_of_remove - unbinds the driver from the PS/2 device.
* @of_dev: pointer to OF device structure
*
* This function is called if a device is physically removed from the system or
* if the driver module is being unloaded. It frees any resources allocated to
* the device.
*/
static int __devexit xps2_of_remove(struct platform_device *of_dev)
{
struct device *dev = &of_dev->dev;
struct xps2data *drvdata = dev_get_drvdata(dev);
struct resource r_mem; /* IO mem resources */
serio_unregister_port(&drvdata->serio);
iounmap(drvdata->base_address);
/* Get iospace of the device */
if (of_address_to_resource(of_dev->dev.of_node, 0, &r_mem))
dev_err(dev, "invalid address\n");
else
release_mem_region(r_mem.start, resource_size(&r_mem));
kfree(drvdata);
dev_set_drvdata(dev, NULL);
return 0;
}
/* Match table for of_platform binding */
static const struct of_device_id xps2_of_match[] __devinitconst = {
{ .compatible = "xlnx,xps-ps2-1.00.a", },
{ /* end of list */ },
};
MODULE_DEVICE_TABLE(of, xps2_of_match);
static struct platform_driver xps2_of_driver = {
.driver = {
.name = DRIVER_NAME,
.owner = THIS_MODULE,
.of_match_table = xps2_of_match,
},
.probe = xps2_of_probe,
.remove = __devexit_p(xps2_of_remove),
};
static int __init xps2_init(void)
{
return platform_driver_register(&xps2_of_driver);
}
static void __exit xps2_cleanup(void)
{
platform_driver_unregister(&xps2_of_driver);
}
module_init(xps2_init);
module_exit(xps2_cleanup);
MODULE_AUTHOR("Xilinx, Inc.");
MODULE_DESCRIPTION("Xilinx XPS PS/2 driver");
MODULE_LICENSE("GPL");
| gpl-2.0 |
archos-sa/archos-gpl-gen9-kernel-ics | net/irda/irttp.c | 2539 | 51971 | /*********************************************************************
*
* Filename: irttp.c
* Version: 1.2
* Description: Tiny Transport Protocol (TTP) implementation
* Status: Stable
* Author: Dag Brattli <dagb@cs.uit.no>
* Created at: Sun Aug 31 20:14:31 1997
* Modified at: Wed Jan 5 11:31:27 2000
* Modified by: Dag Brattli <dagb@cs.uit.no>
*
* Copyright (c) 1998-2000 Dag Brattli <dagb@cs.uit.no>,
* All Rights Reserved.
* Copyright (c) 2000-2003 Jean Tourrilhes <jt@hpl.hp.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 2 of
* the License, or (at your option) any later version.
*
* Neither Dag Brattli nor University of Tromsø admit liability nor
* provide warranty for any of this software. This material is
* provided "AS-IS" and at no charge.
*
********************************************************************/
#include <linux/skbuff.h>
#include <linux/init.h>
#include <linux/fs.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <asm/byteorder.h>
#include <asm/unaligned.h>
#include <net/irda/irda.h>
#include <net/irda/irlap.h>
#include <net/irda/irlmp.h>
#include <net/irda/parameters.h>
#include <net/irda/irttp.h>
static struct irttp_cb *irttp;
static void __irttp_close_tsap(struct tsap_cb *self);
static int irttp_data_indication(void *instance, void *sap,
struct sk_buff *skb);
static int irttp_udata_indication(void *instance, void *sap,
struct sk_buff *skb);
static void irttp_disconnect_indication(void *instance, void *sap,
LM_REASON reason, struct sk_buff *);
static void irttp_connect_indication(void *instance, void *sap,
struct qos_info *qos, __u32 max_sdu_size,
__u8 header_size, struct sk_buff *skb);
static void irttp_connect_confirm(void *instance, void *sap,
struct qos_info *qos, __u32 max_sdu_size,
__u8 header_size, struct sk_buff *skb);
static void irttp_run_tx_queue(struct tsap_cb *self);
static void irttp_run_rx_queue(struct tsap_cb *self);
static void irttp_flush_queues(struct tsap_cb *self);
static void irttp_fragment_skb(struct tsap_cb *self, struct sk_buff *skb);
static struct sk_buff *irttp_reassemble_skb(struct tsap_cb *self);
static void irttp_todo_expired(unsigned long data);
static int irttp_param_max_sdu_size(void *instance, irda_param_t *param,
int get);
static void irttp_flow_indication(void *instance, void *sap, LOCAL_FLOW flow);
static void irttp_status_indication(void *instance,
LINK_STATUS link, LOCK_STATUS lock);
/* Information for parsing parameters in IrTTP */
static pi_minor_info_t pi_minor_call_table[] = {
{ NULL, 0 }, /* 0x00 */
{ irttp_param_max_sdu_size, PV_INTEGER | PV_BIG_ENDIAN } /* 0x01 */
};
static pi_major_info_t pi_major_call_table[] = {{ pi_minor_call_table, 2 }};
static pi_param_info_t param_info = { pi_major_call_table, 1, 0x0f, 4 };
/************************ GLOBAL PROCEDURES ************************/
/*
* Function irttp_init (void)
*
* Initialize the IrTTP layer. Called by module initialization code
*
*/
int __init irttp_init(void)
{
irttp = kzalloc(sizeof(struct irttp_cb), GFP_KERNEL);
if (irttp == NULL)
return -ENOMEM;
irttp->magic = TTP_MAGIC;
irttp->tsaps = hashbin_new(HB_LOCK);
if (!irttp->tsaps) {
IRDA_ERROR("%s: can't allocate IrTTP hashbin!\n",
__func__);
kfree(irttp);
return -ENOMEM;
}
return 0;
}
/*
* Function irttp_cleanup (void)
*
* Called by module destruction/cleanup code
*
*/
void irttp_cleanup(void)
{
/* Check for main structure */
IRDA_ASSERT(irttp->magic == TTP_MAGIC, return;);
/*
* Delete hashbin and close all TSAP instances in it
*/
hashbin_delete(irttp->tsaps, (FREE_FUNC) __irttp_close_tsap);
irttp->magic = 0;
/* De-allocate main structure */
kfree(irttp);
irttp = NULL;
}
/*************************** SUBROUTINES ***************************/
/*
* Function irttp_start_todo_timer (self, timeout)
*
* Start todo timer.
*
* Made it more effient and unsensitive to race conditions - Jean II
*/
static inline void irttp_start_todo_timer(struct tsap_cb *self, int timeout)
{
/* Set new value for timer */
mod_timer(&self->todo_timer, jiffies + timeout);
}
/*
* Function irttp_todo_expired (data)
*
* Todo timer has expired!
*
* One of the restriction of the timer is that it is run only on the timer
* interrupt which run every 10ms. This mean that even if you set the timer
* with a delay of 0, it may take up to 10ms before it's run.
* So, to minimise latency and keep cache fresh, we try to avoid using
* it as much as possible.
* Note : we can't use tasklets, because they can't be asynchronously
* killed (need user context), and we can't guarantee that here...
* Jean II
*/
static void irttp_todo_expired(unsigned long data)
{
struct tsap_cb *self = (struct tsap_cb *) data;
/* Check that we still exist */
if (!self || self->magic != TTP_TSAP_MAGIC)
return;
IRDA_DEBUG(4, "%s(instance=%p)\n", __func__, self);
/* Try to make some progress, especially on Tx side - Jean II */
irttp_run_rx_queue(self);
irttp_run_tx_queue(self);
/* Check if time for disconnect */
if (test_bit(0, &self->disconnect_pend)) {
/* Check if it's possible to disconnect yet */
if (skb_queue_empty(&self->tx_queue)) {
/* Make sure disconnect is not pending anymore */
clear_bit(0, &self->disconnect_pend); /* FALSE */
/* Note : self->disconnect_skb may be NULL */
irttp_disconnect_request(self, self->disconnect_skb,
P_NORMAL);
self->disconnect_skb = NULL;
} else {
/* Try again later */
irttp_start_todo_timer(self, HZ/10);
/* No reason to try and close now */
return;
}
}
/* Check if it's closing time */
if (self->close_pend)
/* Finish cleanup */
irttp_close_tsap(self);
}
/*
* Function irttp_flush_queues (self)
*
* Flushes (removes all frames) in transitt-buffer (tx_list)
*/
static void irttp_flush_queues(struct tsap_cb *self)
{
struct sk_buff* skb;
IRDA_DEBUG(4, "%s()\n", __func__);
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return;);
/* Deallocate frames waiting to be sent */
while ((skb = skb_dequeue(&self->tx_queue)) != NULL)
dev_kfree_skb(skb);
/* Deallocate received frames */
while ((skb = skb_dequeue(&self->rx_queue)) != NULL)
dev_kfree_skb(skb);
/* Deallocate received fragments */
while ((skb = skb_dequeue(&self->rx_fragments)) != NULL)
dev_kfree_skb(skb);
}
/*
* Function irttp_reassemble (self)
*
* Makes a new (continuous) skb of all the fragments in the fragment
* queue
*
*/
static struct sk_buff *irttp_reassemble_skb(struct tsap_cb *self)
{
struct sk_buff *skb, *frag;
int n = 0; /* Fragment index */
IRDA_ASSERT(self != NULL, return NULL;);
IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return NULL;);
IRDA_DEBUG(2, "%s(), self->rx_sdu_size=%d\n", __func__,
self->rx_sdu_size);
skb = dev_alloc_skb(TTP_HEADER + self->rx_sdu_size);
if (!skb)
return NULL;
/*
* Need to reserve space for TTP header in case this skb needs to
* be requeued in case delivery failes
*/
skb_reserve(skb, TTP_HEADER);
skb_put(skb, self->rx_sdu_size);
/*
* Copy all fragments to a new buffer
*/
while ((frag = skb_dequeue(&self->rx_fragments)) != NULL) {
skb_copy_to_linear_data_offset(skb, n, frag->data, frag->len);
n += frag->len;
dev_kfree_skb(frag);
}
IRDA_DEBUG(2,
"%s(), frame len=%d, rx_sdu_size=%d, rx_max_sdu_size=%d\n",
__func__, n, self->rx_sdu_size, self->rx_max_sdu_size);
/* Note : irttp_run_rx_queue() calculate self->rx_sdu_size
* by summing the size of all fragments, so we should always
* have n == self->rx_sdu_size, except in cases where we
* droped the last fragment (when self->rx_sdu_size exceed
* self->rx_max_sdu_size), where n < self->rx_sdu_size.
* Jean II */
IRDA_ASSERT(n <= self->rx_sdu_size, n = self->rx_sdu_size;);
/* Set the new length */
skb_trim(skb, n);
self->rx_sdu_size = 0;
return skb;
}
/*
* Function irttp_fragment_skb (skb)
*
* Fragments a frame and queues all the fragments for transmission
*
*/
static inline void irttp_fragment_skb(struct tsap_cb *self,
struct sk_buff *skb)
{
struct sk_buff *frag;
__u8 *frame;
IRDA_DEBUG(2, "%s()\n", __func__);
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return;);
IRDA_ASSERT(skb != NULL, return;);
/*
* Split frame into a number of segments
*/
while (skb->len > self->max_seg_size) {
IRDA_DEBUG(2, "%s(), fragmenting ...\n", __func__);
/* Make new segment */
frag = alloc_skb(self->max_seg_size+self->max_header_size,
GFP_ATOMIC);
if (!frag)
return;
skb_reserve(frag, self->max_header_size);
/* Copy data from the original skb into this fragment. */
skb_copy_from_linear_data(skb, skb_put(frag, self->max_seg_size),
self->max_seg_size);
/* Insert TTP header, with the more bit set */
frame = skb_push(frag, TTP_HEADER);
frame[0] = TTP_MORE;
/* Hide the copied data from the original skb */
skb_pull(skb, self->max_seg_size);
/* Queue fragment */
skb_queue_tail(&self->tx_queue, frag);
}
/* Queue what is left of the original skb */
IRDA_DEBUG(2, "%s(), queuing last segment\n", __func__);
frame = skb_push(skb, TTP_HEADER);
frame[0] = 0x00; /* Clear more bit */
/* Queue fragment */
skb_queue_tail(&self->tx_queue, skb);
}
/*
* Function irttp_param_max_sdu_size (self, param)
*
* Handle the MaxSduSize parameter in the connect frames, this function
* will be called both when this parameter needs to be inserted into, and
* extracted from the connect frames
*/
static int irttp_param_max_sdu_size(void *instance, irda_param_t *param,
int get)
{
struct tsap_cb *self;
self = (struct tsap_cb *) instance;
IRDA_ASSERT(self != NULL, return -1;);
IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return -1;);
if (get)
param->pv.i = self->tx_max_sdu_size;
else
self->tx_max_sdu_size = param->pv.i;
IRDA_DEBUG(1, "%s(), MaxSduSize=%d\n", __func__, param->pv.i);
return 0;
}
/*************************** CLIENT CALLS ***************************/
/************************** LMP CALLBACKS **************************/
/* Everything is happily mixed up. Waiting for next clean up - Jean II */
/*
* Initialization, that has to be done on new tsap
* instance allocation and on duplication
*/
static void irttp_init_tsap(struct tsap_cb *tsap)
{
spin_lock_init(&tsap->lock);
init_timer(&tsap->todo_timer);
skb_queue_head_init(&tsap->rx_queue);
skb_queue_head_init(&tsap->tx_queue);
skb_queue_head_init(&tsap->rx_fragments);
}
/*
* Function irttp_open_tsap (stsap, notify)
*
* Create TSAP connection endpoint,
*/
struct tsap_cb *irttp_open_tsap(__u8 stsap_sel, int credit, notify_t *notify)
{
struct tsap_cb *self;
struct lsap_cb *lsap;
notify_t ttp_notify;
IRDA_ASSERT(irttp->magic == TTP_MAGIC, return NULL;);
/* The IrLMP spec (IrLMP 1.1 p10) says that we have the right to
* use only 0x01-0x6F. Of course, we can use LSAP_ANY as well.
* JeanII */
if((stsap_sel != LSAP_ANY) &&
((stsap_sel < 0x01) || (stsap_sel >= 0x70))) {
IRDA_DEBUG(0, "%s(), invalid tsap!\n", __func__);
return NULL;
}
self = kzalloc(sizeof(struct tsap_cb), GFP_ATOMIC);
if (self == NULL) {
IRDA_DEBUG(0, "%s(), unable to kmalloc!\n", __func__);
return NULL;
}
/* Initialize internal objects */
irttp_init_tsap(self);
/* Initialise todo timer */
self->todo_timer.data = (unsigned long) self;
self->todo_timer.function = &irttp_todo_expired;
/* Initialize callbacks for IrLMP to use */
irda_notify_init(&ttp_notify);
ttp_notify.connect_confirm = irttp_connect_confirm;
ttp_notify.connect_indication = irttp_connect_indication;
ttp_notify.disconnect_indication = irttp_disconnect_indication;
ttp_notify.data_indication = irttp_data_indication;
ttp_notify.udata_indication = irttp_udata_indication;
ttp_notify.flow_indication = irttp_flow_indication;
if(notify->status_indication != NULL)
ttp_notify.status_indication = irttp_status_indication;
ttp_notify.instance = self;
strncpy(ttp_notify.name, notify->name, NOTIFY_MAX_NAME);
self->magic = TTP_TSAP_MAGIC;
self->connected = FALSE;
/*
* Create LSAP at IrLMP layer
*/
lsap = irlmp_open_lsap(stsap_sel, &ttp_notify, 0);
if (lsap == NULL) {
IRDA_WARNING("%s: unable to allocate LSAP!!\n", __func__);
return NULL;
}
/*
* If user specified LSAP_ANY as source TSAP selector, then IrLMP
* will replace it with whatever source selector which is free, so
* the stsap_sel we have might not be valid anymore
*/
self->stsap_sel = lsap->slsap_sel;
IRDA_DEBUG(4, "%s(), stsap_sel=%02x\n", __func__, self->stsap_sel);
self->notify = *notify;
self->lsap = lsap;
hashbin_insert(irttp->tsaps, (irda_queue_t *) self, (long) self, NULL);
if (credit > TTP_RX_MAX_CREDIT)
self->initial_credit = TTP_RX_MAX_CREDIT;
else
self->initial_credit = credit;
return self;
}
EXPORT_SYMBOL(irttp_open_tsap);
/*
* Function irttp_close (handle)
*
* Remove an instance of a TSAP. This function should only deal with the
* deallocation of the TSAP, and resetting of the TSAPs values;
*
*/
static void __irttp_close_tsap(struct tsap_cb *self)
{
/* First make sure we're connected. */
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return;);
irttp_flush_queues(self);
del_timer(&self->todo_timer);
/* This one won't be cleaned up if we are disconnect_pend + close_pend
* and we receive a disconnect_indication */
if (self->disconnect_skb)
dev_kfree_skb(self->disconnect_skb);
self->connected = FALSE;
self->magic = ~TTP_TSAP_MAGIC;
kfree(self);
}
/*
* Function irttp_close (self)
*
* Remove TSAP from list of all TSAPs and then deallocate all resources
* associated with this TSAP
*
* Note : because we *free* the tsap structure, it is the responsibility
* of the caller to make sure we are called only once and to deal with
* possible race conditions. - Jean II
*/
int irttp_close_tsap(struct tsap_cb *self)
{
struct tsap_cb *tsap;
IRDA_DEBUG(4, "%s()\n", __func__);
IRDA_ASSERT(self != NULL, return -1;);
IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return -1;);
/* Make sure tsap has been disconnected */
if (self->connected) {
/* Check if disconnect is not pending */
if (!test_bit(0, &self->disconnect_pend)) {
IRDA_WARNING("%s: TSAP still connected!\n",
__func__);
irttp_disconnect_request(self, NULL, P_NORMAL);
}
self->close_pend = TRUE;
irttp_start_todo_timer(self, HZ/10);
return 0; /* Will be back! */
}
tsap = hashbin_remove(irttp->tsaps, (long) self, NULL);
IRDA_ASSERT(tsap == self, return -1;);
/* Close corresponding LSAP */
if (self->lsap) {
irlmp_close_lsap(self->lsap);
self->lsap = NULL;
}
__irttp_close_tsap(self);
return 0;
}
EXPORT_SYMBOL(irttp_close_tsap);
/*
* Function irttp_udata_request (self, skb)
*
* Send unreliable data on this TSAP
*
*/
int irttp_udata_request(struct tsap_cb *self, struct sk_buff *skb)
{
int ret;
IRDA_ASSERT(self != NULL, return -1;);
IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return -1;);
IRDA_ASSERT(skb != NULL, return -1;);
IRDA_DEBUG(4, "%s()\n", __func__);
/* Take shortcut on zero byte packets */
if (skb->len == 0) {
ret = 0;
goto err;
}
/* Check that nothing bad happens */
if (!self->connected) {
IRDA_WARNING("%s(), Not connected\n", __func__);
ret = -ENOTCONN;
goto err;
}
if (skb->len > self->max_seg_size) {
IRDA_ERROR("%s(), UData is too large for IrLAP!\n", __func__);
ret = -EMSGSIZE;
goto err;
}
irlmp_udata_request(self->lsap, skb);
self->stats.tx_packets++;
return 0;
err:
dev_kfree_skb(skb);
return ret;
}
EXPORT_SYMBOL(irttp_udata_request);
/*
* Function irttp_data_request (handle, skb)
*
* Queue frame for transmission. If SAR is enabled, fragement the frame
* and queue the fragments for transmission
*/
int irttp_data_request(struct tsap_cb *self, struct sk_buff *skb)
{
__u8 *frame;
int ret;
IRDA_ASSERT(self != NULL, return -1;);
IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return -1;);
IRDA_ASSERT(skb != NULL, return -1;);
IRDA_DEBUG(2, "%s() : queue len = %d\n", __func__,
skb_queue_len(&self->tx_queue));
/* Take shortcut on zero byte packets */
if (skb->len == 0) {
ret = 0;
goto err;
}
/* Check that nothing bad happens */
if (!self->connected) {
IRDA_WARNING("%s: Not connected\n", __func__);
ret = -ENOTCONN;
goto err;
}
/*
* Check if SAR is disabled, and the frame is larger than what fits
* inside an IrLAP frame
*/
if ((self->tx_max_sdu_size == 0) && (skb->len > self->max_seg_size)) {
IRDA_ERROR("%s: SAR disabled, and data is too large for IrLAP!\n",
__func__);
ret = -EMSGSIZE;
goto err;
}
/*
* Check if SAR is enabled, and the frame is larger than the
* TxMaxSduSize
*/
if ((self->tx_max_sdu_size != 0) &&
(self->tx_max_sdu_size != TTP_SAR_UNBOUND) &&
(skb->len > self->tx_max_sdu_size))
{
IRDA_ERROR("%s: SAR enabled, but data is larger than TxMaxSduSize!\n",
__func__);
ret = -EMSGSIZE;
goto err;
}
/*
* Check if transmit queue is full
*/
if (skb_queue_len(&self->tx_queue) >= TTP_TX_MAX_QUEUE) {
/*
* Give it a chance to empty itself
*/
irttp_run_tx_queue(self);
/* Drop packet. This error code should trigger the caller
* to resend the data in the client code - Jean II */
ret = -ENOBUFS;
goto err;
}
/* Queue frame, or queue frame segments */
if ((self->tx_max_sdu_size == 0) || (skb->len < self->max_seg_size)) {
/* Queue frame */
IRDA_ASSERT(skb_headroom(skb) >= TTP_HEADER, return -1;);
frame = skb_push(skb, TTP_HEADER);
frame[0] = 0x00; /* Clear more bit */
skb_queue_tail(&self->tx_queue, skb);
} else {
/*
* Fragment the frame, this function will also queue the
* fragments, we don't care about the fact the transmit
* queue may be overfilled by all the segments for a little
* while
*/
irttp_fragment_skb(self, skb);
}
/* Check if we can accept more data from client */
if ((!self->tx_sdu_busy) &&
(skb_queue_len(&self->tx_queue) > TTP_TX_HIGH_THRESHOLD)) {
/* Tx queue filling up, so stop client. */
if (self->notify.flow_indication) {
self->notify.flow_indication(self->notify.instance,
self, FLOW_STOP);
}
/* self->tx_sdu_busy is the state of the client.
* Update state after notifying client to avoid
* race condition with irttp_flow_indication().
* If the queue empty itself after our test but before
* we set the flag, we will fix ourselves below in
* irttp_run_tx_queue().
* Jean II */
self->tx_sdu_busy = TRUE;
}
/* Try to make some progress */
irttp_run_tx_queue(self);
return 0;
err:
dev_kfree_skb(skb);
return ret;
}
EXPORT_SYMBOL(irttp_data_request);
/*
* Function irttp_run_tx_queue (self)
*
* Transmit packets queued for transmission (if possible)
*
*/
static void irttp_run_tx_queue(struct tsap_cb *self)
{
struct sk_buff *skb;
unsigned long flags;
int n;
IRDA_DEBUG(2, "%s() : send_credit = %d, queue_len = %d\n",
__func__,
self->send_credit, skb_queue_len(&self->tx_queue));
/* Get exclusive access to the tx queue, otherwise don't touch it */
if (irda_lock(&self->tx_queue_lock) == FALSE)
return;
/* Try to send out frames as long as we have credits
* and as long as LAP is not full. If LAP is full, it will
* poll us through irttp_flow_indication() - Jean II */
while ((self->send_credit > 0) &&
(!irlmp_lap_tx_queue_full(self->lsap)) &&
(skb = skb_dequeue(&self->tx_queue)))
{
/*
* Since we can transmit and receive frames concurrently,
* the code below is a critical region and we must assure that
* nobody messes with the credits while we update them.
*/
spin_lock_irqsave(&self->lock, flags);
n = self->avail_credit;
self->avail_credit = 0;
/* Only room for 127 credits in frame */
if (n > 127) {
self->avail_credit = n-127;
n = 127;
}
self->remote_credit += n;
self->send_credit--;
spin_unlock_irqrestore(&self->lock, flags);
/*
* More bit must be set by the data_request() or fragment()
* functions
*/
skb->data[0] |= (n & 0x7f);
/* Detach from socket.
* The current skb has a reference to the socket that sent
* it (skb->sk). When we pass it to IrLMP, the skb will be
* stored in in IrLAP (self->wx_list). When we are within
* IrLAP, we lose the notion of socket, so we should not
* have a reference to a socket. So, we drop it here.
*
* Why does it matter ?
* When the skb is freed (kfree_skb), if it is associated
* with a socket, it release buffer space on the socket
* (through sock_wfree() and sock_def_write_space()).
* If the socket no longer exist, we may crash. Hard.
* When we close a socket, we make sure that associated packets
* in IrTTP are freed. However, we have no way to cancel
* the packet that we have passed to IrLAP. So, if a packet
* remains in IrLAP (retry on the link or else) after we
* close the socket, we are dead !
* Jean II */
if (skb->sk != NULL) {
/* IrSOCK application, IrOBEX, ... */
skb_orphan(skb);
}
/* IrCOMM over IrTTP, IrLAN, ... */
/* Pass the skb to IrLMP - done */
irlmp_data_request(self->lsap, skb);
self->stats.tx_packets++;
}
/* Check if we can accept more frames from client.
* We don't want to wait until the todo timer to do that, and we
* can't use tasklets (grr...), so we are obliged to give control
* to client. That's ok, this test will be true not too often
* (max once per LAP window) and we are called from places
* where we can spend a bit of time doing stuff. - Jean II */
if ((self->tx_sdu_busy) &&
(skb_queue_len(&self->tx_queue) < TTP_TX_LOW_THRESHOLD) &&
(!self->close_pend))
{
if (self->notify.flow_indication)
self->notify.flow_indication(self->notify.instance,
self, FLOW_START);
/* self->tx_sdu_busy is the state of the client.
* We don't really have a race here, but it's always safer
* to update our state after the client - Jean II */
self->tx_sdu_busy = FALSE;
}
/* Reset lock */
self->tx_queue_lock = 0;
}
/*
* Function irttp_give_credit (self)
*
* Send a dataless flowdata TTP-PDU and give available credit to peer
* TSAP
*/
static inline void irttp_give_credit(struct tsap_cb *self)
{
struct sk_buff *tx_skb = NULL;
unsigned long flags;
int n;
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return;);
IRDA_DEBUG(4, "%s() send=%d,avail=%d,remote=%d\n",
__func__,
self->send_credit, self->avail_credit, self->remote_credit);
/* Give credit to peer */
tx_skb = alloc_skb(TTP_MAX_HEADER, GFP_ATOMIC);
if (!tx_skb)
return;
/* Reserve space for LMP, and LAP header */
skb_reserve(tx_skb, LMP_MAX_HEADER);
/*
* Since we can transmit and receive frames concurrently,
* the code below is a critical region and we must assure that
* nobody messes with the credits while we update them.
*/
spin_lock_irqsave(&self->lock, flags);
n = self->avail_credit;
self->avail_credit = 0;
/* Only space for 127 credits in frame */
if (n > 127) {
self->avail_credit = n - 127;
n = 127;
}
self->remote_credit += n;
spin_unlock_irqrestore(&self->lock, flags);
skb_put(tx_skb, 1);
tx_skb->data[0] = (__u8) (n & 0x7f);
irlmp_data_request(self->lsap, tx_skb);
self->stats.tx_packets++;
}
/*
* Function irttp_udata_indication (instance, sap, skb)
*
* Received some unit-data (unreliable)
*
*/
static int irttp_udata_indication(void *instance, void *sap,
struct sk_buff *skb)
{
struct tsap_cb *self;
int err;
IRDA_DEBUG(4, "%s()\n", __func__);
self = (struct tsap_cb *) instance;
IRDA_ASSERT(self != NULL, return -1;);
IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return -1;);
IRDA_ASSERT(skb != NULL, return -1;);
self->stats.rx_packets++;
/* Just pass data to layer above */
if (self->notify.udata_indication) {
err = self->notify.udata_indication(self->notify.instance,
self,skb);
/* Same comment as in irttp_do_data_indication() */
if (!err)
return 0;
}
/* Either no handler, or handler returns an error */
dev_kfree_skb(skb);
return 0;
}
/*
* Function irttp_data_indication (instance, sap, skb)
*
* Receive segment from IrLMP.
*
*/
static int irttp_data_indication(void *instance, void *sap,
struct sk_buff *skb)
{
struct tsap_cb *self;
unsigned long flags;
int n;
self = (struct tsap_cb *) instance;
n = skb->data[0] & 0x7f; /* Extract the credits */
self->stats.rx_packets++;
/* Deal with inbound credit
* Since we can transmit and receive frames concurrently,
* the code below is a critical region and we must assure that
* nobody messes with the credits while we update them.
*/
spin_lock_irqsave(&self->lock, flags);
self->send_credit += n;
if (skb->len > 1)
self->remote_credit--;
spin_unlock_irqrestore(&self->lock, flags);
/*
* Data or dataless packet? Dataless frames contains only the
* TTP_HEADER.
*/
if (skb->len > 1) {
/*
* We don't remove the TTP header, since we must preserve the
* more bit, so the defragment routing knows what to do
*/
skb_queue_tail(&self->rx_queue, skb);
} else {
/* Dataless flowdata TTP-PDU */
dev_kfree_skb(skb);
}
/* Push data to the higher layer.
* We do it synchronously because running the todo timer for each
* receive packet would be too much overhead and latency.
* By passing control to the higher layer, we run the risk that
* it may take time or grab a lock. Most often, the higher layer
* will only put packet in a queue.
* Anyway, packets are only dripping through the IrDA, so we can
* have time before the next packet.
* Further, we are run from NET_BH, so the worse that can happen is
* us missing the optimal time to send back the PF bit in LAP.
* Jean II */
irttp_run_rx_queue(self);
/* We now give credits to peer in irttp_run_rx_queue().
* We need to send credit *NOW*, otherwise we are going
* to miss the next Tx window. The todo timer may take
* a while before it's run... - Jean II */
/*
* If the peer device has given us some credits and we didn't have
* anyone from before, then we need to shedule the tx queue.
* We need to do that because our Tx have stopped (so we may not
* get any LAP flow indication) and the user may be stopped as
* well. - Jean II
*/
if (self->send_credit == n) {
/* Restart pushing stuff to LAP */
irttp_run_tx_queue(self);
/* Note : we don't want to schedule the todo timer
* because it has horrible latency. No tasklets
* because the tasklet API is broken. - Jean II */
}
return 0;
}
/*
* Function irttp_status_indication (self, reason)
*
* Status_indication, just pass to the higher layer...
*
*/
static void irttp_status_indication(void *instance,
LINK_STATUS link, LOCK_STATUS lock)
{
struct tsap_cb *self;
IRDA_DEBUG(4, "%s()\n", __func__);
self = (struct tsap_cb *) instance;
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return;);
/* Check if client has already closed the TSAP and gone away */
if (self->close_pend)
return;
/*
* Inform service user if he has requested it
*/
if (self->notify.status_indication != NULL)
self->notify.status_indication(self->notify.instance,
link, lock);
else
IRDA_DEBUG(2, "%s(), no handler\n", __func__);
}
/*
* Function irttp_flow_indication (self, reason)
*
* Flow_indication : IrLAP tells us to send more data.
*
*/
static void irttp_flow_indication(void *instance, void *sap, LOCAL_FLOW flow)
{
struct tsap_cb *self;
self = (struct tsap_cb *) instance;
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return;);
IRDA_DEBUG(4, "%s(instance=%p)\n", __func__, self);
/* We are "polled" directly from LAP, and the LAP want to fill
* its Tx window. We want to do our best to send it data, so that
* we maximise the window. On the other hand, we want to limit the
* amount of work here so that LAP doesn't hang forever waiting
* for packets. - Jean II */
/* Try to send some packets. Currently, LAP calls us every time
* there is one free slot, so we will send only one packet.
* This allow the scheduler to do its round robin - Jean II */
irttp_run_tx_queue(self);
/* Note regarding the interraction with higher layer.
* irttp_run_tx_queue() may call the client when its queue
* start to empty, via notify.flow_indication(). Initially.
* I wanted this to happen in a tasklet, to avoid client
* grabbing the CPU, but we can't use tasklets safely. And timer
* is definitely too slow.
* This will happen only once per LAP window, and usually at
* the third packet (unless window is smaller). LAP is still
* doing mtt and sending first packet so it's sort of OK
* to do that. Jean II */
/* If we need to send disconnect. try to do it now */
if(self->disconnect_pend)
irttp_start_todo_timer(self, 0);
}
/*
* Function irttp_flow_request (self, command)
*
* This function could be used by the upper layers to tell IrTTP to stop
* delivering frames if the receive queues are starting to get full, or
* to tell IrTTP to start delivering frames again.
*/
void irttp_flow_request(struct tsap_cb *self, LOCAL_FLOW flow)
{
IRDA_DEBUG(1, "%s()\n", __func__);
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return;);
switch (flow) {
case FLOW_STOP:
IRDA_DEBUG(1, "%s(), flow stop\n", __func__);
self->rx_sdu_busy = TRUE;
break;
case FLOW_START:
IRDA_DEBUG(1, "%s(), flow start\n", __func__);
self->rx_sdu_busy = FALSE;
/* Client say he can accept more data, try to free our
* queues ASAP - Jean II */
irttp_run_rx_queue(self);
break;
default:
IRDA_DEBUG(1, "%s(), Unknown flow command!\n", __func__);
}
}
EXPORT_SYMBOL(irttp_flow_request);
/*
* Function irttp_connect_request (self, dtsap_sel, daddr, qos)
*
* Try to connect to remote destination TSAP selector
*
*/
int irttp_connect_request(struct tsap_cb *self, __u8 dtsap_sel,
__u32 saddr, __u32 daddr,
struct qos_info *qos, __u32 max_sdu_size,
struct sk_buff *userdata)
{
struct sk_buff *tx_skb;
__u8 *frame;
__u8 n;
IRDA_DEBUG(4, "%s(), max_sdu_size=%d\n", __func__, max_sdu_size);
IRDA_ASSERT(self != NULL, return -EBADR;);
IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return -EBADR;);
if (self->connected) {
if(userdata)
dev_kfree_skb(userdata);
return -EISCONN;
}
/* Any userdata supplied? */
if (userdata == NULL) {
tx_skb = alloc_skb(TTP_MAX_HEADER + TTP_SAR_HEADER,
GFP_ATOMIC);
if (!tx_skb)
return -ENOMEM;
/* Reserve space for MUX_CONTROL and LAP header */
skb_reserve(tx_skb, TTP_MAX_HEADER + TTP_SAR_HEADER);
} else {
tx_skb = userdata;
/*
* Check that the client has reserved enough space for
* headers
*/
IRDA_ASSERT(skb_headroom(userdata) >= TTP_MAX_HEADER,
{ dev_kfree_skb(userdata); return -1; } );
}
/* Initialize connection parameters */
self->connected = FALSE;
self->avail_credit = 0;
self->rx_max_sdu_size = max_sdu_size;
self->rx_sdu_size = 0;
self->rx_sdu_busy = FALSE;
self->dtsap_sel = dtsap_sel;
n = self->initial_credit;
self->remote_credit = 0;
self->send_credit = 0;
/*
* Give away max 127 credits for now
*/
if (n > 127) {
self->avail_credit=n-127;
n = 127;
}
self->remote_credit = n;
/* SAR enabled? */
if (max_sdu_size > 0) {
IRDA_ASSERT(skb_headroom(tx_skb) >= (TTP_MAX_HEADER + TTP_SAR_HEADER),
{ dev_kfree_skb(tx_skb); return -1; } );
/* Insert SAR parameters */
frame = skb_push(tx_skb, TTP_HEADER+TTP_SAR_HEADER);
frame[0] = TTP_PARAMETERS | n;
frame[1] = 0x04; /* Length */
frame[2] = 0x01; /* MaxSduSize */
frame[3] = 0x02; /* Value length */
put_unaligned(cpu_to_be16((__u16) max_sdu_size),
(__be16 *)(frame+4));
} else {
/* Insert plain TTP header */
frame = skb_push(tx_skb, TTP_HEADER);
/* Insert initial credit in frame */
frame[0] = n & 0x7f;
}
/* Connect with IrLMP. No QoS parameters for now */
return irlmp_connect_request(self->lsap, dtsap_sel, saddr, daddr, qos,
tx_skb);
}
EXPORT_SYMBOL(irttp_connect_request);
/*
* Function irttp_connect_confirm (handle, qos, skb)
*
* Service user confirms TSAP connection with peer.
*
*/
static void irttp_connect_confirm(void *instance, void *sap,
struct qos_info *qos, __u32 max_seg_size,
__u8 max_header_size, struct sk_buff *skb)
{
struct tsap_cb *self;
int parameters;
int ret;
__u8 plen;
__u8 n;
IRDA_DEBUG(4, "%s()\n", __func__);
self = (struct tsap_cb *) instance;
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return;);
IRDA_ASSERT(skb != NULL, return;);
self->max_seg_size = max_seg_size - TTP_HEADER;
self->max_header_size = max_header_size + TTP_HEADER;
/*
* Check if we have got some QoS parameters back! This should be the
* negotiated QoS for the link.
*/
if (qos) {
IRDA_DEBUG(4, "IrTTP, Negotiated BAUD_RATE: %02x\n",
qos->baud_rate.bits);
IRDA_DEBUG(4, "IrTTP, Negotiated BAUD_RATE: %d bps.\n",
qos->baud_rate.value);
}
n = skb->data[0] & 0x7f;
IRDA_DEBUG(4, "%s(), Initial send_credit=%d\n", __func__, n);
self->send_credit = n;
self->tx_max_sdu_size = 0;
self->connected = TRUE;
parameters = skb->data[0] & 0x80;
IRDA_ASSERT(skb->len >= TTP_HEADER, return;);
skb_pull(skb, TTP_HEADER);
if (parameters) {
plen = skb->data[0];
ret = irda_param_extract_all(self, skb->data+1,
IRDA_MIN(skb->len-1, plen),
¶m_info);
/* Any errors in the parameter list? */
if (ret < 0) {
IRDA_WARNING("%s: error extracting parameters\n",
__func__);
dev_kfree_skb(skb);
/* Do not accept this connection attempt */
return;
}
/* Remove parameters */
skb_pull(skb, IRDA_MIN(skb->len, plen+1));
}
IRDA_DEBUG(4, "%s() send=%d,avail=%d,remote=%d\n", __func__,
self->send_credit, self->avail_credit, self->remote_credit);
IRDA_DEBUG(2, "%s(), MaxSduSize=%d\n", __func__,
self->tx_max_sdu_size);
if (self->notify.connect_confirm) {
self->notify.connect_confirm(self->notify.instance, self, qos,
self->tx_max_sdu_size,
self->max_header_size, skb);
} else
dev_kfree_skb(skb);
}
/*
* Function irttp_connect_indication (handle, skb)
*
* Some other device is connecting to this TSAP
*
*/
static void irttp_connect_indication(void *instance, void *sap,
struct qos_info *qos, __u32 max_seg_size, __u8 max_header_size,
struct sk_buff *skb)
{
struct tsap_cb *self;
struct lsap_cb *lsap;
int parameters;
int ret;
__u8 plen;
__u8 n;
self = (struct tsap_cb *) instance;
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return;);
IRDA_ASSERT(skb != NULL, return;);
lsap = (struct lsap_cb *) sap;
self->max_seg_size = max_seg_size - TTP_HEADER;
self->max_header_size = max_header_size+TTP_HEADER;
IRDA_DEBUG(4, "%s(), TSAP sel=%02x\n", __func__, self->stsap_sel);
/* Need to update dtsap_sel if its equal to LSAP_ANY */
self->dtsap_sel = lsap->dlsap_sel;
n = skb->data[0] & 0x7f;
self->send_credit = n;
self->tx_max_sdu_size = 0;
parameters = skb->data[0] & 0x80;
IRDA_ASSERT(skb->len >= TTP_HEADER, return;);
skb_pull(skb, TTP_HEADER);
if (parameters) {
plen = skb->data[0];
ret = irda_param_extract_all(self, skb->data+1,
IRDA_MIN(skb->len-1, plen),
¶m_info);
/* Any errors in the parameter list? */
if (ret < 0) {
IRDA_WARNING("%s: error extracting parameters\n",
__func__);
dev_kfree_skb(skb);
/* Do not accept this connection attempt */
return;
}
/* Remove parameters */
skb_pull(skb, IRDA_MIN(skb->len, plen+1));
}
if (self->notify.connect_indication) {
self->notify.connect_indication(self->notify.instance, self,
qos, self->tx_max_sdu_size,
self->max_header_size, skb);
} else
dev_kfree_skb(skb);
}
/*
* Function irttp_connect_response (handle, userdata)
*
* Service user is accepting the connection, just pass it down to
* IrLMP!
*
*/
int irttp_connect_response(struct tsap_cb *self, __u32 max_sdu_size,
struct sk_buff *userdata)
{
struct sk_buff *tx_skb;
__u8 *frame;
int ret;
__u8 n;
IRDA_ASSERT(self != NULL, return -1;);
IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return -1;);
IRDA_DEBUG(4, "%s(), Source TSAP selector=%02x\n", __func__,
self->stsap_sel);
/* Any userdata supplied? */
if (userdata == NULL) {
tx_skb = alloc_skb(TTP_MAX_HEADER + TTP_SAR_HEADER,
GFP_ATOMIC);
if (!tx_skb)
return -ENOMEM;
/* Reserve space for MUX_CONTROL and LAP header */
skb_reserve(tx_skb, TTP_MAX_HEADER + TTP_SAR_HEADER);
} else {
tx_skb = userdata;
/*
* Check that the client has reserved enough space for
* headers
*/
IRDA_ASSERT(skb_headroom(userdata) >= TTP_MAX_HEADER,
{ dev_kfree_skb(userdata); return -1; } );
}
self->avail_credit = 0;
self->remote_credit = 0;
self->rx_max_sdu_size = max_sdu_size;
self->rx_sdu_size = 0;
self->rx_sdu_busy = FALSE;
n = self->initial_credit;
/* Frame has only space for max 127 credits (7 bits) */
if (n > 127) {
self->avail_credit = n - 127;
n = 127;
}
self->remote_credit = n;
self->connected = TRUE;
/* SAR enabled? */
if (max_sdu_size > 0) {
IRDA_ASSERT(skb_headroom(tx_skb) >= (TTP_MAX_HEADER + TTP_SAR_HEADER),
{ dev_kfree_skb(tx_skb); return -1; } );
/* Insert TTP header with SAR parameters */
frame = skb_push(tx_skb, TTP_HEADER+TTP_SAR_HEADER);
frame[0] = TTP_PARAMETERS | n;
frame[1] = 0x04; /* Length */
/* irda_param_insert(self, IRTTP_MAX_SDU_SIZE, frame+1, */
/* TTP_SAR_HEADER, ¶m_info) */
frame[2] = 0x01; /* MaxSduSize */
frame[3] = 0x02; /* Value length */
put_unaligned(cpu_to_be16((__u16) max_sdu_size),
(__be16 *)(frame+4));
} else {
/* Insert TTP header */
frame = skb_push(tx_skb, TTP_HEADER);
frame[0] = n & 0x7f;
}
ret = irlmp_connect_response(self->lsap, tx_skb);
return ret;
}
EXPORT_SYMBOL(irttp_connect_response);
/*
* Function irttp_dup (self, instance)
*
* Duplicate TSAP, can be used by servers to confirm a connection on a
* new TSAP so it can keep listening on the old one.
*/
struct tsap_cb *irttp_dup(struct tsap_cb *orig, void *instance)
{
struct tsap_cb *new;
unsigned long flags;
IRDA_DEBUG(1, "%s()\n", __func__);
/* Protect our access to the old tsap instance */
spin_lock_irqsave(&irttp->tsaps->hb_spinlock, flags);
/* Find the old instance */
if (!hashbin_find(irttp->tsaps, (long) orig, NULL)) {
IRDA_DEBUG(0, "%s(), unable to find TSAP\n", __func__);
spin_unlock_irqrestore(&irttp->tsaps->hb_spinlock, flags);
return NULL;
}
/* Allocate a new instance */
new = kmalloc(sizeof(struct tsap_cb), GFP_ATOMIC);
if (!new) {
IRDA_DEBUG(0, "%s(), unable to kmalloc\n", __func__);
spin_unlock_irqrestore(&irttp->tsaps->hb_spinlock, flags);
return NULL;
}
/* Dup */
memcpy(new, orig, sizeof(struct tsap_cb));
spin_lock_init(&new->lock);
/* We don't need the old instance any more */
spin_unlock_irqrestore(&irttp->tsaps->hb_spinlock, flags);
/* Try to dup the LSAP (may fail if we were too slow) */
new->lsap = irlmp_dup(orig->lsap, new);
if (!new->lsap) {
IRDA_DEBUG(0, "%s(), dup failed!\n", __func__);
kfree(new);
return NULL;
}
/* Not everything should be copied */
new->notify.instance = instance;
/* Initialize internal objects */
irttp_init_tsap(new);
/* This is locked */
hashbin_insert(irttp->tsaps, (irda_queue_t *) new, (long) new, NULL);
return new;
}
EXPORT_SYMBOL(irttp_dup);
/*
* Function irttp_disconnect_request (self)
*
* Close this connection please! If priority is high, the queued data
* segments, if any, will be deallocated first
*
*/
int irttp_disconnect_request(struct tsap_cb *self, struct sk_buff *userdata,
int priority)
{
int ret;
IRDA_ASSERT(self != NULL, return -1;);
IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return -1;);
/* Already disconnected? */
if (!self->connected) {
IRDA_DEBUG(4, "%s(), already disconnected!\n", __func__);
if (userdata)
dev_kfree_skb(userdata);
return -1;
}
/* Disconnect already pending ?
* We need to use an atomic operation to prevent reentry. This
* function may be called from various context, like user, timer
* for following a disconnect_indication() (i.e. net_bh).
* Jean II */
if(test_and_set_bit(0, &self->disconnect_pend)) {
IRDA_DEBUG(0, "%s(), disconnect already pending\n",
__func__);
if (userdata)
dev_kfree_skb(userdata);
/* Try to make some progress */
irttp_run_tx_queue(self);
return -1;
}
/*
* Check if there is still data segments in the transmit queue
*/
if (!skb_queue_empty(&self->tx_queue)) {
if (priority == P_HIGH) {
/*
* No need to send the queued data, if we are
* disconnecting right now since the data will
* not have any usable connection to be sent on
*/
IRDA_DEBUG(1, "%s(): High priority!!()\n", __func__);
irttp_flush_queues(self);
} else if (priority == P_NORMAL) {
/*
* Must delay disconnect until after all data segments
* have been sent and the tx_queue is empty
*/
/* We'll reuse this one later for the disconnect */
self->disconnect_skb = userdata; /* May be NULL */
irttp_run_tx_queue(self);
irttp_start_todo_timer(self, HZ/10);
return -1;
}
}
/* Note : we don't need to check if self->rx_queue is full and the
* state of self->rx_sdu_busy because the disconnect response will
* be sent at the LMP level (so even if the peer has its Tx queue
* full of data). - Jean II */
IRDA_DEBUG(1, "%s(), Disconnecting ...\n", __func__);
self->connected = FALSE;
if (!userdata) {
struct sk_buff *tx_skb;
tx_skb = alloc_skb(LMP_MAX_HEADER, GFP_ATOMIC);
if (!tx_skb)
return -ENOMEM;
/*
* Reserve space for MUX and LAP header
*/
skb_reserve(tx_skb, LMP_MAX_HEADER);
userdata = tx_skb;
}
ret = irlmp_disconnect_request(self->lsap, userdata);
/* The disconnect is no longer pending */
clear_bit(0, &self->disconnect_pend); /* FALSE */
return ret;
}
EXPORT_SYMBOL(irttp_disconnect_request);
/*
* Function irttp_disconnect_indication (self, reason)
*
* Disconnect indication, TSAP disconnected by peer?
*
*/
static void irttp_disconnect_indication(void *instance, void *sap,
LM_REASON reason, struct sk_buff *skb)
{
struct tsap_cb *self;
IRDA_DEBUG(4, "%s()\n", __func__);
self = (struct tsap_cb *) instance;
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return;);
/* Prevent higher layer to send more data */
self->connected = FALSE;
/* Check if client has already tried to close the TSAP */
if (self->close_pend) {
/* In this case, the higher layer is probably gone. Don't
* bother it and clean up the remains - Jean II */
if (skb)
dev_kfree_skb(skb);
irttp_close_tsap(self);
return;
}
/* If we are here, we assume that is the higher layer is still
* waiting for the disconnect notification and able to process it,
* even if he tried to disconnect. Otherwise, it would have already
* attempted to close the tsap and self->close_pend would be TRUE.
* Jean II */
/* No need to notify the client if has already tried to disconnect */
if(self->notify.disconnect_indication)
self->notify.disconnect_indication(self->notify.instance, self,
reason, skb);
else
if (skb)
dev_kfree_skb(skb);
}
/*
* Function irttp_do_data_indication (self, skb)
*
* Try to deliver reassembled skb to layer above, and requeue it if that
* for some reason should fail. We mark rx sdu as busy to apply back
* pressure is necessary.
*/
static void irttp_do_data_indication(struct tsap_cb *self, struct sk_buff *skb)
{
int err;
/* Check if client has already closed the TSAP and gone away */
if (self->close_pend) {
dev_kfree_skb(skb);
return;
}
err = self->notify.data_indication(self->notify.instance, self, skb);
/* Usually the layer above will notify that it's input queue is
* starting to get filled by using the flow request, but this may
* be difficult, so it can instead just refuse to eat it and just
* give an error back
*/
if (err) {
IRDA_DEBUG(0, "%s() requeueing skb!\n", __func__);
/* Make sure we take a break */
self->rx_sdu_busy = TRUE;
/* Need to push the header in again */
skb_push(skb, TTP_HEADER);
skb->data[0] = 0x00; /* Make sure MORE bit is cleared */
/* Put skb back on queue */
skb_queue_head(&self->rx_queue, skb);
}
}
/*
* Function irttp_run_rx_queue (self)
*
* Check if we have any frames to be transmitted, or if we have any
* available credit to give away.
*/
static void irttp_run_rx_queue(struct tsap_cb *self)
{
struct sk_buff *skb;
int more = 0;
IRDA_DEBUG(2, "%s() send=%d,avail=%d,remote=%d\n", __func__,
self->send_credit, self->avail_credit, self->remote_credit);
/* Get exclusive access to the rx queue, otherwise don't touch it */
if (irda_lock(&self->rx_queue_lock) == FALSE)
return;
/*
* Reassemble all frames in receive queue and deliver them
*/
while (!self->rx_sdu_busy && (skb = skb_dequeue(&self->rx_queue))) {
/* This bit will tell us if it's the last fragment or not */
more = skb->data[0] & 0x80;
/* Remove TTP header */
skb_pull(skb, TTP_HEADER);
/* Add the length of the remaining data */
self->rx_sdu_size += skb->len;
/*
* If SAR is disabled, or user has requested no reassembly
* of received fragments then we just deliver them
* immediately. This can be requested by clients that
* implements byte streams without any message boundaries
*/
if (self->rx_max_sdu_size == TTP_SAR_DISABLE) {
irttp_do_data_indication(self, skb);
self->rx_sdu_size = 0;
continue;
}
/* Check if this is a fragment, and not the last fragment */
if (more) {
/*
* Queue the fragment if we still are within the
* limits of the maximum size of the rx_sdu
*/
if (self->rx_sdu_size <= self->rx_max_sdu_size) {
IRDA_DEBUG(4, "%s(), queueing frag\n",
__func__);
skb_queue_tail(&self->rx_fragments, skb);
} else {
/* Free the part of the SDU that is too big */
dev_kfree_skb(skb);
}
continue;
}
/*
* This is the last fragment, so time to reassemble!
*/
if ((self->rx_sdu_size <= self->rx_max_sdu_size) ||
(self->rx_max_sdu_size == TTP_SAR_UNBOUND))
{
/*
* A little optimizing. Only queue the fragment if
* there are other fragments. Since if this is the
* last and only fragment, there is no need to
* reassemble :-)
*/
if (!skb_queue_empty(&self->rx_fragments)) {
skb_queue_tail(&self->rx_fragments,
skb);
skb = irttp_reassemble_skb(self);
}
/* Now we can deliver the reassembled skb */
irttp_do_data_indication(self, skb);
} else {
IRDA_DEBUG(1, "%s(), Truncated frame\n", __func__);
/* Free the part of the SDU that is too big */
dev_kfree_skb(skb);
/* Deliver only the valid but truncated part of SDU */
skb = irttp_reassemble_skb(self);
irttp_do_data_indication(self, skb);
}
self->rx_sdu_size = 0;
}
/*
* It's not trivial to keep track of how many credits are available
* by incrementing at each packet, because delivery may fail
* (irttp_do_data_indication() may requeue the frame) and because
* we need to take care of fragmentation.
* We want the other side to send up to initial_credit packets.
* We have some frames in our queues, and we have already allowed it
* to send remote_credit.
* No need to spinlock, write is atomic and self correcting...
* Jean II
*/
self->avail_credit = (self->initial_credit -
(self->remote_credit +
skb_queue_len(&self->rx_queue) +
skb_queue_len(&self->rx_fragments)));
/* Do we have too much credits to send to peer ? */
if ((self->remote_credit <= TTP_RX_MIN_CREDIT) &&
(self->avail_credit > 0)) {
/* Send explicit credit frame */
irttp_give_credit(self);
/* Note : do *NOT* check if tx_queue is non-empty, that
* will produce deadlocks. I repeat : send a credit frame
* even if we have something to send in our Tx queue.
* If we have credits, it means that our Tx queue is blocked.
*
* Let's suppose the peer can't keep up with our Tx. He will
* flow control us by not sending us any credits, and we
* will stop Tx and start accumulating credits here.
* Up to the point where the peer will stop its Tx queue,
* for lack of credits.
* Let's assume the peer application is single threaded.
* It will block on Tx and never consume any Rx buffer.
* Deadlock. Guaranteed. - Jean II
*/
}
/* Reset lock */
self->rx_queue_lock = 0;
}
#ifdef CONFIG_PROC_FS
struct irttp_iter_state {
int id;
};
static void *irttp_seq_start(struct seq_file *seq, loff_t *pos)
{
struct irttp_iter_state *iter = seq->private;
struct tsap_cb *self;
/* Protect our access to the tsap list */
spin_lock_irq(&irttp->tsaps->hb_spinlock);
iter->id = 0;
for (self = (struct tsap_cb *) hashbin_get_first(irttp->tsaps);
self != NULL;
self = (struct tsap_cb *) hashbin_get_next(irttp->tsaps)) {
if (iter->id == *pos)
break;
++iter->id;
}
return self;
}
static void *irttp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
struct irttp_iter_state *iter = seq->private;
++*pos;
++iter->id;
return (void *) hashbin_get_next(irttp->tsaps);
}
static void irttp_seq_stop(struct seq_file *seq, void *v)
{
spin_unlock_irq(&irttp->tsaps->hb_spinlock);
}
static int irttp_seq_show(struct seq_file *seq, void *v)
{
const struct irttp_iter_state *iter = seq->private;
const struct tsap_cb *self = v;
seq_printf(seq, "TSAP %d, ", iter->id);
seq_printf(seq, "stsap_sel: %02x, ",
self->stsap_sel);
seq_printf(seq, "dtsap_sel: %02x\n",
self->dtsap_sel);
seq_printf(seq, " connected: %s, ",
self->connected? "TRUE":"FALSE");
seq_printf(seq, "avail credit: %d, ",
self->avail_credit);
seq_printf(seq, "remote credit: %d, ",
self->remote_credit);
seq_printf(seq, "send credit: %d\n",
self->send_credit);
seq_printf(seq, " tx packets: %lu, ",
self->stats.tx_packets);
seq_printf(seq, "rx packets: %lu, ",
self->stats.rx_packets);
seq_printf(seq, "tx_queue len: %u ",
skb_queue_len(&self->tx_queue));
seq_printf(seq, "rx_queue len: %u\n",
skb_queue_len(&self->rx_queue));
seq_printf(seq, " tx_sdu_busy: %s, ",
self->tx_sdu_busy? "TRUE":"FALSE");
seq_printf(seq, "rx_sdu_busy: %s\n",
self->rx_sdu_busy? "TRUE":"FALSE");
seq_printf(seq, " max_seg_size: %u, ",
self->max_seg_size);
seq_printf(seq, "tx_max_sdu_size: %u, ",
self->tx_max_sdu_size);
seq_printf(seq, "rx_max_sdu_size: %u\n",
self->rx_max_sdu_size);
seq_printf(seq, " Used by (%s)\n\n",
self->notify.name);
return 0;
}
static const struct seq_operations irttp_seq_ops = {
.start = irttp_seq_start,
.next = irttp_seq_next,
.stop = irttp_seq_stop,
.show = irttp_seq_show,
};
static int irttp_seq_open(struct inode *inode, struct file *file)
{
return seq_open_private(file, &irttp_seq_ops,
sizeof(struct irttp_iter_state));
}
const struct file_operations irttp_seq_fops = {
.owner = THIS_MODULE,
.open = irttp_seq_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release_private,
};
#endif /* PROC_FS */
| gpl-2.0 |
BigBot96/android_kernel_samsung_espressovzw | drivers/gpu/drm/radeon/radeon_acpi.c | 2795 | 1571 | #include <linux/pci.h>
#include <linux/acpi.h>
#include <linux/slab.h>
#include <acpi/acpi_drivers.h>
#include <acpi/acpi_bus.h>
#include "drmP.h"
#include "drm.h"
#include "drm_sarea.h"
#include "drm_crtc_helper.h"
#include "radeon.h"
#include <linux/vga_switcheroo.h>
/* Call the ATIF method
*
* Note: currently we discard the output
*/
static int radeon_atif_call(acpi_handle handle)
{
acpi_status status;
union acpi_object atif_arg_elements[2];
struct acpi_object_list atif_arg;
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL};
atif_arg.count = 2;
atif_arg.pointer = &atif_arg_elements[0];
atif_arg_elements[0].type = ACPI_TYPE_INTEGER;
atif_arg_elements[0].integer.value = 0;
atif_arg_elements[1].type = ACPI_TYPE_INTEGER;
atif_arg_elements[1].integer.value = 0;
status = acpi_evaluate_object(handle, "ATIF", &atif_arg, &buffer);
/* Fail only if calling the method fails and ATIF is supported */
if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
printk(KERN_DEBUG "failed to evaluate ATIF got %s\n", acpi_format_exception(status));
kfree(buffer.pointer);
return 1;
}
kfree(buffer.pointer);
return 0;
}
/* Call all ACPI methods here */
int radeon_acpi_init(struct radeon_device *rdev)
{
acpi_handle handle;
int ret;
/* No need to proceed if we're sure that ATIF is not supported */
if (!ASIC_IS_AVIVO(rdev) || !rdev->bios)
return 0;
/* Get the device handle */
handle = DEVICE_ACPI_HANDLE(&rdev->pdev->dev);
/* Call the ATIF method */
ret = radeon_atif_call(handle);
if (ret)
return ret;
return 0;
}
| gpl-2.0 |
phiexz/kernel-cyanogen-gio | drivers/scsi/mac_scsi.c | 4331 | 15018 | /*
* Generic Macintosh NCR5380 driver
*
* Copyright 1998, Michael Schmitz <mschmitz@lbl.gov>
*
* derived in part from:
*/
/*
* Generic Generic NCR5380 driver
*
* Copyright 1995, Russell King
*
* ALPHA RELEASE 1.
*
* For more information, please consult
*
* NCR 5380 Family
* SCSI Protocol Controller
* Databook
*
* NCR Microelectronics
* 1635 Aeroplaza Drive
* Colorado Springs, CO 80916
* 1+ (719) 578-3400
* 1+ (800) 334-5454
*/
/*
* $Log: mac_NCR5380.c,v $
*/
#include <linux/types.h>
#include <linux/stddef.h>
#include <linux/ctype.h>
#include <linux/delay.h>
#include <linux/module.h>
#include <linux/signal.h>
#include <linux/ioport.h>
#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/interrupt.h>
#include <asm/io.h>
#include <asm/irq.h>
#include <asm/system.h>
#include <asm/macintosh.h>
#include <asm/macints.h>
#include <asm/mac_via.h>
#include "scsi.h"
#include <scsi/scsi_host.h>
#include "mac_scsi.h"
/* These control the behaviour of the generic 5380 core */
#define AUTOSENSE
#define PSEUDO_DMA
#include "NCR5380.h"
#if 0
#define NDEBUG (NDEBUG_INTR | NDEBUG_PSEUDO_DMA | NDEBUG_ARBITRATION | NDEBUG_SELECTION | NDEBUG_RESELECTION)
#else
#define NDEBUG (NDEBUG_ABORT)
#endif
#define RESET_BOOT
#define DRIVER_SETUP
extern void via_scsi_clear(void);
#ifdef RESET_BOOT
static void mac_scsi_reset_boot(struct Scsi_Host *instance);
#endif
static int setup_called = 0;
static int setup_can_queue = -1;
static int setup_cmd_per_lun = -1;
static int setup_sg_tablesize = -1;
static int setup_use_pdma = -1;
#ifdef SUPPORT_TAGS
static int setup_use_tagged_queuing = -1;
#endif
static int setup_hostid = -1;
/* Time (in jiffies) to wait after a reset; the SCSI standard calls for 250ms,
* we usually do 0.5s to be on the safe side. But Toshiba CD-ROMs once more
* need ten times the standard value... */
#define TOSHIBA_DELAY
#ifdef TOSHIBA_DELAY
#define AFTER_RESET_DELAY (5*HZ/2)
#else
#define AFTER_RESET_DELAY (HZ/2)
#endif
static volatile unsigned char *mac_scsi_regp = NULL;
static volatile unsigned char *mac_scsi_drq = NULL;
static volatile unsigned char *mac_scsi_nodrq = NULL;
/*
* NCR 5380 register access functions
*/
#if 0
/* Debug versions */
#define CTRL(p,v) (*ctrl = (v))
static char macscsi_read(struct Scsi_Host *instance, int reg)
{
int iobase = instance->io_port;
int i;
int *ctrl = &((struct NCR5380_hostdata *)instance->hostdata)->ctrl;
CTRL(iobase, 0);
i = in_8(iobase + (reg<<4));
CTRL(iobase, 0x40);
return i;
}
static void macscsi_write(struct Scsi_Host *instance, int reg, int value)
{
int iobase = instance->io_port;
int *ctrl = &((struct NCR5380_hostdata *)instance->hostdata)->ctrl;
CTRL(iobase, 0);
out_8(iobase + (reg<<4), value);
CTRL(iobase, 0x40);
}
#else
/* Fast versions */
static __inline__ char macscsi_read(struct Scsi_Host *instance, int reg)
{
return in_8(instance->io_port + (reg<<4));
}
static __inline__ void macscsi_write(struct Scsi_Host *instance, int reg, int value)
{
out_8(instance->io_port + (reg<<4), value);
}
#endif
/*
* Function : mac_scsi_setup(char *str)
*
* Purpose : booter command line initialization of the overrides array,
*
* Inputs : str - comma delimited list of options
*
*/
static int __init mac_scsi_setup(char *str) {
#ifdef DRIVER_SETUP
int ints[7];
(void)get_options( str, ARRAY_SIZE(ints), ints);
if (setup_called++ || ints[0] < 1 || ints[0] > 6) {
printk(KERN_WARNING "scsi: <mac5380>"
" Usage: mac5380=<can_queue>[,<cmd_per_lun>,<sg_tablesize>,<hostid>,<use_tags>,<use_pdma>]\n");
printk(KERN_ALERT "scsi: <mac5380> Bad Penguin parameters?\n");
return 0;
}
if (ints[0] >= 1) {
if (ints[1] > 0)
/* no limits on this, just > 0 */
setup_can_queue = ints[1];
}
if (ints[0] >= 2) {
if (ints[2] > 0)
setup_cmd_per_lun = ints[2];
}
if (ints[0] >= 3) {
if (ints[3] >= 0) {
setup_sg_tablesize = ints[3];
/* Must be <= SG_ALL (255) */
if (setup_sg_tablesize > SG_ALL)
setup_sg_tablesize = SG_ALL;
}
}
if (ints[0] >= 4) {
/* Must be between 0 and 7 */
if (ints[4] >= 0 && ints[4] <= 7)
setup_hostid = ints[4];
else if (ints[4] > 7)
printk(KERN_WARNING "mac_scsi_setup: invalid host ID %d !\n", ints[4] );
}
#ifdef SUPPORT_TAGS
if (ints[0] >= 5) {
if (ints[5] >= 0)
setup_use_tagged_queuing = !!ints[5];
}
if (ints[0] == 6) {
if (ints[6] >= 0)
setup_use_pdma = ints[6];
}
#else
if (ints[0] == 5) {
if (ints[5] >= 0)
setup_use_pdma = ints[5];
}
#endif /* SUPPORT_TAGS */
#endif /* DRIVER_SETUP */
return 1;
}
__setup("mac5380=", mac_scsi_setup);
/*
* If you want to find the instance with (k)gdb ...
*/
#if NDEBUG
static struct Scsi_Host *default_instance;
#endif
/*
* Function : int macscsi_detect(struct scsi_host_template * tpnt)
*
* Purpose : initializes mac NCR5380 driver based on the
* command line / compile time port and irq definitions.
*
* Inputs : tpnt - template for this SCSI adapter.
*
* Returns : 1 if a host adapter was found, 0 if not.
*
*/
int macscsi_detect(struct scsi_host_template * tpnt)
{
static int called = 0;
int flags = 0;
struct Scsi_Host *instance;
if (!MACH_IS_MAC || called)
return( 0 );
if (macintosh_config->scsi_type != MAC_SCSI_OLD)
return( 0 );
/* setup variables */
tpnt->can_queue =
(setup_can_queue > 0) ? setup_can_queue : CAN_QUEUE;
tpnt->cmd_per_lun =
(setup_cmd_per_lun > 0) ? setup_cmd_per_lun : CMD_PER_LUN;
tpnt->sg_tablesize =
(setup_sg_tablesize >= 0) ? setup_sg_tablesize : SG_TABLESIZE;
if (setup_hostid >= 0)
tpnt->this_id = setup_hostid;
else {
/* use 7 as default */
tpnt->this_id = 7;
}
#ifdef SUPPORT_TAGS
if (setup_use_tagged_queuing < 0)
setup_use_tagged_queuing = USE_TAGGED_QUEUING;
#endif
/* Once we support multiple 5380s (e.g. DuoDock) we'll do
something different here */
instance = scsi_register (tpnt, sizeof(struct NCR5380_hostdata));
#if NDEBUG
default_instance = instance;
#endif
if (macintosh_config->ident == MAC_MODEL_IIFX) {
mac_scsi_regp = via1+0x8000;
mac_scsi_drq = via1+0xE000;
mac_scsi_nodrq = via1+0xC000;
/* The IIFX should be able to do true DMA, but pseudo-dma doesn't work */
flags = FLAG_NO_PSEUDO_DMA;
} else {
mac_scsi_regp = via1+0x10000;
mac_scsi_drq = via1+0x6000;
mac_scsi_nodrq = via1+0x12000;
}
if (! setup_use_pdma)
flags = FLAG_NO_PSEUDO_DMA;
instance->io_port = (unsigned long) mac_scsi_regp;
instance->irq = IRQ_MAC_SCSI;
#ifdef RESET_BOOT
mac_scsi_reset_boot(instance);
#endif
NCR5380_init(instance, flags);
instance->n_io_port = 255;
((struct NCR5380_hostdata *)instance->hostdata)->ctrl = 0;
if (instance->irq != SCSI_IRQ_NONE)
if (request_irq(instance->irq, NCR5380_intr, IRQ_FLG_SLOW,
"ncr5380", instance)) {
printk(KERN_WARNING "scsi%d: IRQ%d not free, interrupts disabled\n",
instance->host_no, instance->irq);
instance->irq = SCSI_IRQ_NONE;
}
printk(KERN_INFO "scsi%d: generic 5380 at port %lX irq", instance->host_no, instance->io_port);
if (instance->irq == SCSI_IRQ_NONE)
printk (KERN_INFO "s disabled");
else
printk (KERN_INFO " %d", instance->irq);
printk(KERN_INFO " options CAN_QUEUE=%d CMD_PER_LUN=%d release=%d",
instance->can_queue, instance->cmd_per_lun, MACSCSI_PUBLIC_RELEASE);
printk(KERN_INFO "\nscsi%d:", instance->host_no);
NCR5380_print_options(instance);
printk("\n");
called = 1;
return 1;
}
int macscsi_release (struct Scsi_Host *shpnt)
{
if (shpnt->irq != SCSI_IRQ_NONE)
free_irq(shpnt->irq, shpnt);
NCR5380_exit(shpnt);
return 0;
}
#ifdef RESET_BOOT
/*
* Our 'bus reset on boot' function
*/
static void mac_scsi_reset_boot(struct Scsi_Host *instance)
{
unsigned long end;
NCR5380_local_declare();
NCR5380_setup(instance);
/*
* Do a SCSI reset to clean up the bus during initialization. No messing
* with the queues, interrupts, or locks necessary here.
*/
printk(KERN_INFO "Macintosh SCSI: resetting the SCSI bus..." );
/* switch off SCSI IRQ - catch an interrupt without IRQ bit set else */
disable_irq(IRQ_MAC_SCSI);
/* get in phase */
NCR5380_write( TARGET_COMMAND_REG,
PHASE_SR_TO_TCR( NCR5380_read(STATUS_REG) ));
/* assert RST */
NCR5380_write( INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_RST );
/* The min. reset hold time is 25us, so 40us should be enough */
udelay( 50 );
/* reset RST and interrupt */
NCR5380_write( INITIATOR_COMMAND_REG, ICR_BASE );
NCR5380_read( RESET_PARITY_INTERRUPT_REG );
for( end = jiffies + AFTER_RESET_DELAY; time_before(jiffies, end); )
barrier();
/* switch on SCSI IRQ again */
enable_irq(IRQ_MAC_SCSI);
printk(KERN_INFO " done\n" );
}
#endif
const char * macscsi_info (struct Scsi_Host *spnt) {
return "";
}
/*
Pseudo-DMA: (Ove Edlund)
The code attempts to catch bus errors that occur if one for example
"trips over the cable".
XXX: Since bus errors in the PDMA routines never happen on my
computer, the bus error code is untested.
If the code works as intended, a bus error results in Pseudo-DMA
beeing disabled, meaning that the driver switches to slow handshake.
If bus errors are NOT extremely rare, this has to be changed.
*/
#define CP_IO_TO_MEM(s,d,len) \
__asm__ __volatile__ \
(" cmp.w #4,%2\n" \
" bls 8f\n" \
" move.w %1,%%d0\n" \
" neg.b %%d0\n" \
" and.w #3,%%d0\n" \
" sub.w %%d0,%2\n" \
" bra 2f\n" \
" 1: move.b (%0),(%1)+\n" \
" 2: dbf %%d0,1b\n" \
" move.w %2,%%d0\n" \
" lsr.w #5,%%d0\n" \
" bra 4f\n" \
" 3: move.l (%0),(%1)+\n" \
"31: move.l (%0),(%1)+\n" \
"32: move.l (%0),(%1)+\n" \
"33: move.l (%0),(%1)+\n" \
"34: move.l (%0),(%1)+\n" \
"35: move.l (%0),(%1)+\n" \
"36: move.l (%0),(%1)+\n" \
"37: move.l (%0),(%1)+\n" \
" 4: dbf %%d0,3b\n" \
" move.w %2,%%d0\n" \
" lsr.w #2,%%d0\n" \
" and.w #7,%%d0\n" \
" bra 6f\n" \
" 5: move.l (%0),(%1)+\n" \
" 6: dbf %%d0,5b\n" \
" and.w #3,%2\n" \
" bra 8f\n" \
" 7: move.b (%0),(%1)+\n" \
" 8: dbf %2,7b\n" \
" moveq.l #0, %2\n" \
" 9: \n" \
".section .fixup,\"ax\"\n" \
" .even\n" \
"90: moveq.l #1, %2\n" \
" jra 9b\n" \
".previous\n" \
".section __ex_table,\"a\"\n" \
" .align 4\n" \
" .long 1b,90b\n" \
" .long 3b,90b\n" \
" .long 31b,90b\n" \
" .long 32b,90b\n" \
" .long 33b,90b\n" \
" .long 34b,90b\n" \
" .long 35b,90b\n" \
" .long 36b,90b\n" \
" .long 37b,90b\n" \
" .long 5b,90b\n" \
" .long 7b,90b\n" \
".previous" \
: "=a"(s), "=a"(d), "=d"(len) \
: "0"(s), "1"(d), "2"(len) \
: "d0")
static int macscsi_pread (struct Scsi_Host *instance,
unsigned char *dst, int len)
{
unsigned char *d;
volatile unsigned char *s;
NCR5380_local_declare();
NCR5380_setup(instance);
s = mac_scsi_drq+0x60;
d = dst;
/* These conditions are derived from MacOS */
while (!(NCR5380_read(BUS_AND_STATUS_REG) & BASR_DRQ)
&& !(NCR5380_read(STATUS_REG) & SR_REQ))
;
if (!(NCR5380_read(BUS_AND_STATUS_REG) & BASR_DRQ)
&& (NCR5380_read(BUS_AND_STATUS_REG) & BASR_PHASE_MATCH)) {
printk(KERN_ERR "Error in macscsi_pread\n");
return -1;
}
CP_IO_TO_MEM(s, d, len);
if (len != 0) {
printk(KERN_NOTICE "Bus error in macscsi_pread\n");
return -1;
}
return 0;
}
#define CP_MEM_TO_IO(s,d,len) \
__asm__ __volatile__ \
(" cmp.w #4,%2\n" \
" bls 8f\n" \
" move.w %0,%%d0\n" \
" neg.b %%d0\n" \
" and.w #3,%%d0\n" \
" sub.w %%d0,%2\n" \
" bra 2f\n" \
" 1: move.b (%0)+,(%1)\n" \
" 2: dbf %%d0,1b\n" \
" move.w %2,%%d0\n" \
" lsr.w #5,%%d0\n" \
" bra 4f\n" \
" 3: move.l (%0)+,(%1)\n" \
"31: move.l (%0)+,(%1)\n" \
"32: move.l (%0)+,(%1)\n" \
"33: move.l (%0)+,(%1)\n" \
"34: move.l (%0)+,(%1)\n" \
"35: move.l (%0)+,(%1)\n" \
"36: move.l (%0)+,(%1)\n" \
"37: move.l (%0)+,(%1)\n" \
" 4: dbf %%d0,3b\n" \
" move.w %2,%%d0\n" \
" lsr.w #2,%%d0\n" \
" and.w #7,%%d0\n" \
" bra 6f\n" \
" 5: move.l (%0)+,(%1)\n" \
" 6: dbf %%d0,5b\n" \
" and.w #3,%2\n" \
" bra 8f\n" \
" 7: move.b (%0)+,(%1)\n" \
" 8: dbf %2,7b\n" \
" moveq.l #0, %2\n" \
" 9: \n" \
".section .fixup,\"ax\"\n" \
" .even\n" \
"90: moveq.l #1, %2\n" \
" jra 9b\n" \
".previous\n" \
".section __ex_table,\"a\"\n" \
" .align 4\n" \
" .long 1b,90b\n" \
" .long 3b,90b\n" \
" .long 31b,90b\n" \
" .long 32b,90b\n" \
" .long 33b,90b\n" \
" .long 34b,90b\n" \
" .long 35b,90b\n" \
" .long 36b,90b\n" \
" .long 37b,90b\n" \
" .long 5b,90b\n" \
" .long 7b,90b\n" \
".previous" \
: "=a"(s), "=a"(d), "=d"(len) \
: "0"(s), "1"(d), "2"(len) \
: "d0")
static int macscsi_pwrite (struct Scsi_Host *instance,
unsigned char *src, int len)
{
unsigned char *s;
volatile unsigned char *d;
NCR5380_local_declare();
NCR5380_setup(instance);
s = src;
d = mac_scsi_drq;
/* These conditions are derived from MacOS */
while (!(NCR5380_read(BUS_AND_STATUS_REG) & BASR_DRQ)
&& (!(NCR5380_read(STATUS_REG) & SR_REQ)
|| (NCR5380_read(BUS_AND_STATUS_REG) & BASR_PHASE_MATCH)))
;
if (!(NCR5380_read(BUS_AND_STATUS_REG) & BASR_DRQ)) {
printk(KERN_ERR "Error in macscsi_pwrite\n");
return -1;
}
CP_MEM_TO_IO(s, d, len);
if (len != 0) {
printk(KERN_NOTICE "Bus error in macscsi_pwrite\n");
return -1;
}
return 0;
}
#include "NCR5380.c"
static struct scsi_host_template driver_template = {
.proc_name = "Mac5380",
.proc_info = macscsi_proc_info,
.name = "Macintosh NCR5380 SCSI",
.detect = macscsi_detect,
.release = macscsi_release,
.info = macscsi_info,
.queuecommand = macscsi_queue_command,
.eh_abort_handler = macscsi_abort,
.eh_bus_reset_handler = macscsi_bus_reset,
.can_queue = CAN_QUEUE,
.this_id = 7,
.sg_tablesize = SG_ALL,
.cmd_per_lun = CMD_PER_LUN,
.use_clustering = DISABLE_CLUSTERING
};
#include "scsi_module.c"
| gpl-2.0 |
Maroc-OS/android_kernel_bn_encore | arch/sh/mm/flush-sh4.c | 4587 | 2601 | #include <linux/mm.h>
#include <asm/mmu_context.h>
#include <asm/cacheflush.h>
/*
* Write back the dirty D-caches, but not invalidate them.
*
* START: Virtual Address (U0, P1, or P3)
* SIZE: Size of the region.
*/
static void sh4__flush_wback_region(void *start, int size)
{
reg_size_t aligned_start, v, cnt, end;
aligned_start = register_align(start);
v = aligned_start & ~(L1_CACHE_BYTES-1);
end = (aligned_start + size + L1_CACHE_BYTES-1)
& ~(L1_CACHE_BYTES-1);
cnt = (end - v) / L1_CACHE_BYTES;
while (cnt >= 8) {
__ocbwb(v); v += L1_CACHE_BYTES;
__ocbwb(v); v += L1_CACHE_BYTES;
__ocbwb(v); v += L1_CACHE_BYTES;
__ocbwb(v); v += L1_CACHE_BYTES;
__ocbwb(v); v += L1_CACHE_BYTES;
__ocbwb(v); v += L1_CACHE_BYTES;
__ocbwb(v); v += L1_CACHE_BYTES;
__ocbwb(v); v += L1_CACHE_BYTES;
cnt -= 8;
}
while (cnt) {
__ocbwb(v); v += L1_CACHE_BYTES;
cnt--;
}
}
/*
* Write back the dirty D-caches and invalidate them.
*
* START: Virtual Address (U0, P1, or P3)
* SIZE: Size of the region.
*/
static void sh4__flush_purge_region(void *start, int size)
{
reg_size_t aligned_start, v, cnt, end;
aligned_start = register_align(start);
v = aligned_start & ~(L1_CACHE_BYTES-1);
end = (aligned_start + size + L1_CACHE_BYTES-1)
& ~(L1_CACHE_BYTES-1);
cnt = (end - v) / L1_CACHE_BYTES;
while (cnt >= 8) {
__ocbp(v); v += L1_CACHE_BYTES;
__ocbp(v); v += L1_CACHE_BYTES;
__ocbp(v); v += L1_CACHE_BYTES;
__ocbp(v); v += L1_CACHE_BYTES;
__ocbp(v); v += L1_CACHE_BYTES;
__ocbp(v); v += L1_CACHE_BYTES;
__ocbp(v); v += L1_CACHE_BYTES;
__ocbp(v); v += L1_CACHE_BYTES;
cnt -= 8;
}
while (cnt) {
__ocbp(v); v += L1_CACHE_BYTES;
cnt--;
}
}
/*
* No write back please
*/
static void sh4__flush_invalidate_region(void *start, int size)
{
reg_size_t aligned_start, v, cnt, end;
aligned_start = register_align(start);
v = aligned_start & ~(L1_CACHE_BYTES-1);
end = (aligned_start + size + L1_CACHE_BYTES-1)
& ~(L1_CACHE_BYTES-1);
cnt = (end - v) / L1_CACHE_BYTES;
while (cnt >= 8) {
__ocbi(v); v += L1_CACHE_BYTES;
__ocbi(v); v += L1_CACHE_BYTES;
__ocbi(v); v += L1_CACHE_BYTES;
__ocbi(v); v += L1_CACHE_BYTES;
__ocbi(v); v += L1_CACHE_BYTES;
__ocbi(v); v += L1_CACHE_BYTES;
__ocbi(v); v += L1_CACHE_BYTES;
__ocbi(v); v += L1_CACHE_BYTES;
cnt -= 8;
}
while (cnt) {
__ocbi(v); v += L1_CACHE_BYTES;
cnt--;
}
}
void __init sh4__flush_region_init(void)
{
__flush_wback_region = sh4__flush_wback_region;
__flush_invalidate_region = sh4__flush_invalidate_region;
__flush_purge_region = sh4__flush_purge_region;
}
| gpl-2.0 |
cooldroid/android_kernel_oneplus_msm8974 | arch/arm/mach-imx/mach-mx53_evk.c | 4843 | 4614 | /*
* Copyright (C) 2010-2011 Freescale Semiconductor, Inc. All Rights Reserved.
* Copyright (C) 2010 Yong Shen. <Yong.Shen@linaro.org>
*/
/*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
#include <linux/init.h>
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/gpio.h>
#include <linux/spi/flash.h>
#include <linux/spi/spi.h>
#include <mach/common.h>
#include <mach/hardware.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
#include <asm/mach/time.h>
#include <mach/iomux-mx53.h>
#define MX53_EVK_FEC_PHY_RST IMX_GPIO_NR(7, 6)
#define EVK_ECSPI1_CS0 IMX_GPIO_NR(2, 30)
#define EVK_ECSPI1_CS1 IMX_GPIO_NR(3, 19)
#define MX53EVK_LED IMX_GPIO_NR(7, 7)
#include "devices-imx53.h"
static iomux_v3_cfg_t mx53_evk_pads[] = {
MX53_PAD_CSI0_DAT10__UART1_TXD_MUX,
MX53_PAD_CSI0_DAT11__UART1_RXD_MUX,
MX53_PAD_PATA_BUFFER_EN__UART2_RXD_MUX,
MX53_PAD_PATA_DMARQ__UART2_TXD_MUX,
MX53_PAD_PATA_DIOR__UART2_RTS,
MX53_PAD_PATA_INTRQ__UART2_CTS,
MX53_PAD_PATA_CS_0__UART3_TXD_MUX,
MX53_PAD_PATA_CS_1__UART3_RXD_MUX,
MX53_PAD_EIM_D16__ECSPI1_SCLK,
MX53_PAD_EIM_D17__ECSPI1_MISO,
MX53_PAD_EIM_D18__ECSPI1_MOSI,
/* ecspi chip select lines */
MX53_PAD_EIM_EB2__GPIO2_30,
MX53_PAD_EIM_D19__GPIO3_19,
/* LED */
MX53_PAD_PATA_DA_1__GPIO7_7,
};
static const struct imxuart_platform_data mx53_evk_uart_pdata __initconst = {
.flags = IMXUART_HAVE_RTSCTS,
};
static const struct gpio_led mx53evk_leds[] __initconst = {
{
.name = "green",
.default_trigger = "heartbeat",
.gpio = MX53EVK_LED,
},
};
static const struct gpio_led_platform_data mx53evk_leds_data __initconst = {
.leds = mx53evk_leds,
.num_leds = ARRAY_SIZE(mx53evk_leds),
};
static inline void mx53_evk_init_uart(void)
{
imx53_add_imx_uart(0, NULL);
imx53_add_imx_uart(1, &mx53_evk_uart_pdata);
imx53_add_imx_uart(2, NULL);
}
static const struct imxi2c_platform_data mx53_evk_i2c_data __initconst = {
.bitrate = 100000,
};
static inline void mx53_evk_fec_reset(void)
{
int ret;
/* reset FEC PHY */
ret = gpio_request_one(MX53_EVK_FEC_PHY_RST, GPIOF_OUT_INIT_LOW,
"fec-phy-reset");
if (ret) {
printk(KERN_ERR"failed to get GPIO_FEC_PHY_RESET: %d\n", ret);
return;
}
msleep(1);
gpio_set_value(MX53_EVK_FEC_PHY_RST, 1);
}
static const struct fec_platform_data mx53_evk_fec_pdata __initconst = {
.phy = PHY_INTERFACE_MODE_RMII,
};
static struct spi_board_info mx53_evk_spi_board_info[] __initdata = {
{
.modalias = "mtd_dataflash",
.max_speed_hz = 25000000,
.bus_num = 0,
.chip_select = 1,
.mode = SPI_MODE_0,
.platform_data = NULL,
},
};
static int mx53_evk_spi_cs[] = {
EVK_ECSPI1_CS0,
EVK_ECSPI1_CS1,
};
static const struct spi_imx_master mx53_evk_spi_data __initconst = {
.chipselect = mx53_evk_spi_cs,
.num_chipselect = ARRAY_SIZE(mx53_evk_spi_cs),
};
void __init imx53_evk_common_init(void)
{
mxc_iomux_v3_setup_multiple_pads(mx53_evk_pads,
ARRAY_SIZE(mx53_evk_pads));
}
static void __init mx53_evk_board_init(void)
{
imx53_soc_init();
imx53_evk_common_init();
mx53_evk_init_uart();
mx53_evk_fec_reset();
imx53_add_fec(&mx53_evk_fec_pdata);
imx53_add_imx_i2c(0, &mx53_evk_i2c_data);
imx53_add_imx_i2c(1, &mx53_evk_i2c_data);
imx53_add_sdhci_esdhc_imx(0, NULL);
imx53_add_sdhci_esdhc_imx(1, NULL);
spi_register_board_info(mx53_evk_spi_board_info,
ARRAY_SIZE(mx53_evk_spi_board_info));
imx53_add_ecspi(0, &mx53_evk_spi_data);
imx53_add_imx2_wdt(0, NULL);
gpio_led_register_device(-1, &mx53evk_leds_data);
}
static void __init mx53_evk_timer_init(void)
{
mx53_clocks_init(32768, 24000000, 22579200, 0);
}
static struct sys_timer mx53_evk_timer = {
.init = mx53_evk_timer_init,
};
MACHINE_START(MX53_EVK, "Freescale MX53 EVK Board")
.map_io = mx53_map_io,
.init_early = imx53_init_early,
.init_irq = mx53_init_irq,
.handle_irq = imx53_handle_irq,
.timer = &mx53_evk_timer,
.init_machine = mx53_evk_board_init,
.restart = mxc_restart,
MACHINE_END
| gpl-2.0 |
I8552-CM/android_kernel_delos3geur | drivers/gpu/drm/drm_dma.c | 5099 | 4131 | /**
* \file drm_dma.c
* DMA IOCTL and function support
*
* \author Rickard E. (Rik) Faith <faith@valinux.com>
* \author Gareth Hughes <gareth@valinux.com>
*/
/*
* Created: Fri Mar 19 14:30:16 1999 by faith@valinux.com
*
* Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include <linux/export.h>
#include "drmP.h"
/**
* Initialize the DMA data.
*
* \param dev DRM device.
* \return zero on success or a negative value on failure.
*
* Allocate and initialize a drm_device_dma structure.
*/
int drm_dma_setup(struct drm_device *dev)
{
int i;
dev->dma = kzalloc(sizeof(*dev->dma), GFP_KERNEL);
if (!dev->dma)
return -ENOMEM;
for (i = 0; i <= DRM_MAX_ORDER; i++)
memset(&dev->dma->bufs[i], 0, sizeof(dev->dma->bufs[0]));
return 0;
}
/**
* Cleanup the DMA resources.
*
* \param dev DRM device.
*
* Free all pages associated with DMA buffers, the buffers and pages lists, and
* finally the drm_device::dma structure itself.
*/
void drm_dma_takedown(struct drm_device *dev)
{
struct drm_device_dma *dma = dev->dma;
int i, j;
if (!dma)
return;
/* Clear dma buffers */
for (i = 0; i <= DRM_MAX_ORDER; i++) {
if (dma->bufs[i].seg_count) {
DRM_DEBUG("order %d: buf_count = %d,"
" seg_count = %d\n",
i,
dma->bufs[i].buf_count,
dma->bufs[i].seg_count);
for (j = 0; j < dma->bufs[i].seg_count; j++) {
if (dma->bufs[i].seglist[j]) {
drm_pci_free(dev, dma->bufs[i].seglist[j]);
}
}
kfree(dma->bufs[i].seglist);
}
if (dma->bufs[i].buf_count) {
for (j = 0; j < dma->bufs[i].buf_count; j++) {
kfree(dma->bufs[i].buflist[j].dev_private);
}
kfree(dma->bufs[i].buflist);
}
}
kfree(dma->buflist);
kfree(dma->pagelist);
kfree(dev->dma);
dev->dma = NULL;
}
/**
* Free a buffer.
*
* \param dev DRM device.
* \param buf buffer to free.
*
* Resets the fields of \p buf.
*/
void drm_free_buffer(struct drm_device *dev, struct drm_buf * buf)
{
if (!buf)
return;
buf->waiting = 0;
buf->pending = 0;
buf->file_priv = NULL;
buf->used = 0;
if (drm_core_check_feature(dev, DRIVER_DMA_QUEUE)
&& waitqueue_active(&buf->dma_wait)) {
wake_up_interruptible(&buf->dma_wait);
}
}
/**
* Reclaim the buffers.
*
* \param file_priv DRM file private.
*
* Frees each buffer associated with \p file_priv not already on the hardware.
*/
void drm_core_reclaim_buffers(struct drm_device *dev,
struct drm_file *file_priv)
{
struct drm_device_dma *dma = dev->dma;
int i;
if (!dma)
return;
for (i = 0; i < dma->buf_count; i++) {
if (dma->buflist[i]->file_priv == file_priv) {
switch (dma->buflist[i]->list) {
case DRM_LIST_NONE:
drm_free_buffer(dev, dma->buflist[i]);
break;
case DRM_LIST_WAIT:
dma->buflist[i]->list = DRM_LIST_RECLAIM;
break;
default:
/* Buffer already on hardware. */
break;
}
}
}
}
EXPORT_SYMBOL(drm_core_reclaim_buffers);
| gpl-2.0 |
faux123/msm8660-htc-ics | drivers/hid/hid-cypress.c | 6123 | 3943 | /*
* HID driver for some cypress "special" devices
*
* Copyright (c) 1999 Andreas Gal
* Copyright (c) 2000-2005 Vojtech Pavlik <vojtech@suse.cz>
* Copyright (c) 2005 Michael Haboustak <mike-@cinci.rr.com> for Concept2, Inc
* Copyright (c) 2006-2007 Jiri Kosina
* Copyright (c) 2007 Paul Walmsley
* Copyright (c) 2008 Jiri Slaby
*/
/*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*/
#include <linux/device.h>
#include <linux/hid.h>
#include <linux/input.h>
#include <linux/module.h>
#include "hid-ids.h"
#define CP_RDESC_SWAPPED_MIN_MAX 0x01
#define CP_2WHEEL_MOUSE_HACK 0x02
#define CP_2WHEEL_MOUSE_HACK_ON 0x04
/*
* Some USB barcode readers from cypress have usage min and usage max in
* the wrong order
*/
static __u8 *cp_report_fixup(struct hid_device *hdev, __u8 *rdesc,
unsigned int *rsize)
{
unsigned long quirks = (unsigned long)hid_get_drvdata(hdev);
unsigned int i;
if (!(quirks & CP_RDESC_SWAPPED_MIN_MAX))
return rdesc;
for (i = 0; i < *rsize - 4; i++)
if (rdesc[i] == 0x29 && rdesc[i + 2] == 0x19) {
__u8 tmp;
rdesc[i] = 0x19;
rdesc[i + 2] = 0x29;
tmp = rdesc[i + 3];
rdesc[i + 3] = rdesc[i + 1];
rdesc[i + 1] = tmp;
}
return rdesc;
}
static int cp_input_mapped(struct hid_device *hdev, struct hid_input *hi,
struct hid_field *field, struct hid_usage *usage,
unsigned long **bit, int *max)
{
unsigned long quirks = (unsigned long)hid_get_drvdata(hdev);
if (!(quirks & CP_2WHEEL_MOUSE_HACK))
return 0;
if (usage->type == EV_REL && usage->code == REL_WHEEL)
set_bit(REL_HWHEEL, *bit);
if (usage->hid == 0x00090005)
return -1;
return 0;
}
static int cp_event(struct hid_device *hdev, struct hid_field *field,
struct hid_usage *usage, __s32 value)
{
unsigned long quirks = (unsigned long)hid_get_drvdata(hdev);
if (!(hdev->claimed & HID_CLAIMED_INPUT) || !field->hidinput ||
!usage->type || !(quirks & CP_2WHEEL_MOUSE_HACK))
return 0;
if (usage->hid == 0x00090005) {
if (value)
quirks |= CP_2WHEEL_MOUSE_HACK_ON;
else
quirks &= ~CP_2WHEEL_MOUSE_HACK_ON;
hid_set_drvdata(hdev, (void *)quirks);
return 1;
}
if (usage->code == REL_WHEEL && (quirks & CP_2WHEEL_MOUSE_HACK_ON)) {
struct input_dev *input = field->hidinput->input;
input_event(input, usage->type, REL_HWHEEL, value);
return 1;
}
return 0;
}
static int cp_probe(struct hid_device *hdev, const struct hid_device_id *id)
{
unsigned long quirks = id->driver_data;
int ret;
hid_set_drvdata(hdev, (void *)quirks);
ret = hid_parse(hdev);
if (ret) {
hid_err(hdev, "parse failed\n");
goto err_free;
}
ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
if (ret) {
hid_err(hdev, "hw start failed\n");
goto err_free;
}
return 0;
err_free:
return ret;
}
static const struct hid_device_id cp_devices[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_1),
.driver_data = CP_RDESC_SWAPPED_MIN_MAX },
{ HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_2),
.driver_data = CP_RDESC_SWAPPED_MIN_MAX },
{ HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_3),
.driver_data = CP_RDESC_SWAPPED_MIN_MAX },
{ HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_MOUSE),
.driver_data = CP_2WHEEL_MOUSE_HACK },
{ }
};
MODULE_DEVICE_TABLE(hid, cp_devices);
static struct hid_driver cp_driver = {
.name = "cypress",
.id_table = cp_devices,
.report_fixup = cp_report_fixup,
.input_mapped = cp_input_mapped,
.event = cp_event,
.probe = cp_probe,
};
static int __init cp_init(void)
{
return hid_register_driver(&cp_driver);
}
static void __exit cp_exit(void)
{
hid_unregister_driver(&cp_driver);
}
module_init(cp_init);
module_exit(cp_exit);
MODULE_LICENSE("GPL");
| gpl-2.0 |
blazingwolf/fireball-ics-3.0.8-2.17.605.2 | drivers/media/video/saa7164/saa7164-fw.c | 8171 | 16461 | /*
* Driver for the NXP SAA7164 PCIe bridge
*
* Copyright (c) 2010 Steven Toth <stoth@kernellabs.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
*
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/firmware.h>
#include <linux/slab.h>
#include "saa7164.h"
#define SAA7164_REV2_FIRMWARE "NXP7164-2010-03-10.1.fw"
#define SAA7164_REV2_FIRMWARE_SIZE 4019072
#define SAA7164_REV3_FIRMWARE "NXP7164-2010-03-10.1.fw"
#define SAA7164_REV3_FIRMWARE_SIZE 4019072
struct fw_header {
u32 firmwaresize;
u32 bslsize;
u32 reserved;
u32 version;
};
int saa7164_dl_wait_ack(struct saa7164_dev *dev, u32 reg)
{
u32 timeout = SAA_DEVICE_TIMEOUT;
while ((saa7164_readl(reg) & 0x01) == 0) {
timeout -= 10;
if (timeout == 0) {
printk(KERN_ERR "%s() timeout (no d/l ack)\n",
__func__);
return -EBUSY;
}
msleep(100);
}
return 0;
}
int saa7164_dl_wait_clr(struct saa7164_dev *dev, u32 reg)
{
u32 timeout = SAA_DEVICE_TIMEOUT;
while (saa7164_readl(reg) & 0x01) {
timeout -= 10;
if (timeout == 0) {
printk(KERN_ERR "%s() timeout (no d/l clr)\n",
__func__);
return -EBUSY;
}
msleep(100);
}
return 0;
}
/* TODO: move dlflags into dev-> and change to write/readl/b */
/* TODO: Excessive levels of debug */
int saa7164_downloadimage(struct saa7164_dev *dev, u8 *src, u32 srcsize,
u32 dlflags, u8 *dst, u32 dstsize)
{
u32 reg, timeout, offset;
u8 *srcbuf = NULL;
int ret;
u32 dlflag = dlflags;
u32 dlflag_ack = dlflag + 4;
u32 drflag = dlflag_ack + 4;
u32 drflag_ack = drflag + 4;
u32 bleflag = drflag_ack + 4;
dprintk(DBGLVL_FW,
"%s(image=%p, size=%d, flags=0x%x, dst=%p, dstsize=0x%x)\n",
__func__, src, srcsize, dlflags, dst, dstsize);
if ((src == NULL) || (dst == NULL)) {
ret = -EIO;
goto out;
}
srcbuf = kzalloc(4 * 1048576, GFP_KERNEL);
if (NULL == srcbuf) {
ret = -ENOMEM;
goto out;
}
if (srcsize > (4*1048576)) {
ret = -ENOMEM;
goto out;
}
memcpy(srcbuf, src, srcsize);
dprintk(DBGLVL_FW, "%s() dlflag = 0x%x\n", __func__, dlflag);
dprintk(DBGLVL_FW, "%s() dlflag_ack = 0x%x\n", __func__, dlflag_ack);
dprintk(DBGLVL_FW, "%s() drflag = 0x%x\n", __func__, drflag);
dprintk(DBGLVL_FW, "%s() drflag_ack = 0x%x\n", __func__, drflag_ack);
dprintk(DBGLVL_FW, "%s() bleflag = 0x%x\n", __func__, bleflag);
reg = saa7164_readl(dlflag);
dprintk(DBGLVL_FW, "%s() dlflag (0x%x)= 0x%x\n", __func__, dlflag, reg);
if (reg == 1)
dprintk(DBGLVL_FW,
"%s() Download flag already set, please reboot\n",
__func__);
/* Indicate download start */
saa7164_writel(dlflag, 1);
ret = saa7164_dl_wait_ack(dev, dlflag_ack);
if (ret < 0)
goto out;
/* Ack download start, then wait for wait */
saa7164_writel(dlflag, 0);
ret = saa7164_dl_wait_clr(dev, dlflag_ack);
if (ret < 0)
goto out;
/* Deal with the raw firmware, in the appropriate chunk size */
for (offset = 0; srcsize > dstsize;
srcsize -= dstsize, offset += dstsize) {
dprintk(DBGLVL_FW, "%s() memcpy %d\n", __func__, dstsize);
memcpy(dst, srcbuf + offset, dstsize);
/* Flag the data as ready */
saa7164_writel(drflag, 1);
ret = saa7164_dl_wait_ack(dev, drflag_ack);
if (ret < 0)
goto out;
/* Wait for indication data was received */
saa7164_writel(drflag, 0);
ret = saa7164_dl_wait_clr(dev, drflag_ack);
if (ret < 0)
goto out;
}
dprintk(DBGLVL_FW, "%s() memcpy(l) %d\n", __func__, dstsize);
/* Write last block to the device */
memcpy(dst, srcbuf+offset, srcsize);
/* Flag the data as ready */
saa7164_writel(drflag, 1);
ret = saa7164_dl_wait_ack(dev, drflag_ack);
if (ret < 0)
goto out;
saa7164_writel(drflag, 0);
timeout = 0;
while (saa7164_readl(bleflag) != SAA_DEVICE_IMAGE_BOOTING) {
if (saa7164_readl(bleflag) & SAA_DEVICE_IMAGE_CORRUPT) {
printk(KERN_ERR "%s() image corrupt\n", __func__);
ret = -EBUSY;
goto out;
}
if (saa7164_readl(bleflag) & SAA_DEVICE_MEMORY_CORRUPT) {
printk(KERN_ERR "%s() device memory corrupt\n",
__func__);
ret = -EBUSY;
goto out;
}
msleep(10); /* Checkpatch throws a < 20ms warning */
if (timeout++ > 60)
break;
}
printk(KERN_INFO "%s() Image downloaded, booting...\n", __func__);
ret = saa7164_dl_wait_clr(dev, drflag_ack);
if (ret < 0)
goto out;
printk(KERN_INFO "%s() Image booted successfully.\n", __func__);
ret = 0;
out:
kfree(srcbuf);
return ret;
}
/* TODO: Excessive debug */
/* Load the firmware. Optionally it can be in ROM or newer versions
* can be on disk, saving the expense of the ROM hardware. */
int saa7164_downloadfirmware(struct saa7164_dev *dev)
{
/* u32 second_timeout = 60 * SAA_DEVICE_TIMEOUT; */
u32 tmp, filesize, version, err_flags, first_timeout, fwlength;
u32 second_timeout, updatebootloader = 1, bootloadersize = 0;
const struct firmware *fw = NULL;
struct fw_header *hdr, *boothdr = NULL, *fwhdr;
u32 bootloaderversion = 0, fwloadersize;
u8 *bootloaderoffset = NULL, *fwloaderoffset;
char *fwname;
int ret;
dprintk(DBGLVL_FW, "%s()\n", __func__);
if (saa7164_boards[dev->board].chiprev == SAA7164_CHIP_REV2) {
fwname = SAA7164_REV2_FIRMWARE;
fwlength = SAA7164_REV2_FIRMWARE_SIZE;
} else {
fwname = SAA7164_REV3_FIRMWARE;
fwlength = SAA7164_REV3_FIRMWARE_SIZE;
}
version = saa7164_getcurrentfirmwareversion(dev);
if (version == 0x00) {
second_timeout = 100;
first_timeout = 100;
err_flags = saa7164_readl(SAA_BOOTLOADERERROR_FLAGS);
dprintk(DBGLVL_FW, "%s() err_flags = %x\n",
__func__, err_flags);
while (err_flags != SAA_DEVICE_IMAGE_BOOTING) {
dprintk(DBGLVL_FW, "%s() err_flags = %x\n",
__func__, err_flags);
msleep(10); /* Checkpatch throws a < 20ms warning */
if (err_flags & SAA_DEVICE_IMAGE_CORRUPT) {
printk(KERN_ERR "%s() firmware corrupt\n",
__func__);
break;
}
if (err_flags & SAA_DEVICE_MEMORY_CORRUPT) {
printk(KERN_ERR "%s() device memory corrupt\n",
__func__);
break;
}
if (err_flags & SAA_DEVICE_NO_IMAGE) {
printk(KERN_ERR "%s() no first image\n",
__func__);
break;
}
if (err_flags & SAA_DEVICE_IMAGE_SEARCHING) {
first_timeout -= 10;
if (first_timeout == 0) {
printk(KERN_ERR
"%s() no first image\n",
__func__);
break;
}
} else if (err_flags & SAA_DEVICE_IMAGE_LOADING) {
second_timeout -= 10;
if (second_timeout == 0) {
printk(KERN_ERR
"%s() FW load time exceeded\n",
__func__);
break;
}
} else {
second_timeout -= 10;
if (second_timeout == 0) {
printk(KERN_ERR
"%s() Unknown bootloader flags 0x%x\n",
__func__, err_flags);
break;
}
}
err_flags = saa7164_readl(SAA_BOOTLOADERERROR_FLAGS);
} /* While != Booting */
if (err_flags == SAA_DEVICE_IMAGE_BOOTING) {
dprintk(DBGLVL_FW, "%s() Loader 1 has loaded.\n",
__func__);
first_timeout = SAA_DEVICE_TIMEOUT;
second_timeout = 60 * SAA_DEVICE_TIMEOUT;
second_timeout = 100;
err_flags = saa7164_readl(SAA_SECONDSTAGEERROR_FLAGS);
dprintk(DBGLVL_FW, "%s() err_flags2 = %x\n",
__func__, err_flags);
while (err_flags != SAA_DEVICE_IMAGE_BOOTING) {
dprintk(DBGLVL_FW, "%s() err_flags2 = %x\n",
__func__, err_flags);
msleep(10); /* Checkpatch throws a < 20ms warning */
if (err_flags & SAA_DEVICE_IMAGE_CORRUPT) {
printk(KERN_ERR
"%s() firmware corrupt\n",
__func__);
break;
}
if (err_flags & SAA_DEVICE_MEMORY_CORRUPT) {
printk(KERN_ERR
"%s() device memory corrupt\n",
__func__);
break;
}
if (err_flags & SAA_DEVICE_NO_IMAGE) {
printk(KERN_ERR "%s() no first image\n",
__func__);
break;
}
if (err_flags & SAA_DEVICE_IMAGE_SEARCHING) {
first_timeout -= 10;
if (first_timeout == 0) {
printk(KERN_ERR
"%s() no second image\n",
__func__);
break;
}
} else if (err_flags &
SAA_DEVICE_IMAGE_LOADING) {
second_timeout -= 10;
if (second_timeout == 0) {
printk(KERN_ERR
"%s() FW load time exceeded\n",
__func__);
break;
}
} else {
second_timeout -= 10;
if (second_timeout == 0) {
printk(KERN_ERR
"%s() Unknown bootloader flags 0x%x\n",
__func__, err_flags);
break;
}
}
err_flags =
saa7164_readl(SAA_SECONDSTAGEERROR_FLAGS);
} /* err_flags != SAA_DEVICE_IMAGE_BOOTING */
dprintk(DBGLVL_FW, "%s() Loader flags 1:0x%x 2:0x%x.\n",
__func__,
saa7164_readl(SAA_BOOTLOADERERROR_FLAGS),
saa7164_readl(SAA_SECONDSTAGEERROR_FLAGS));
} /* err_flags == SAA_DEVICE_IMAGE_BOOTING */
/* It's possible for both firmwares to have booted,
* but that doesn't mean they've finished booting yet.
*/
if ((saa7164_readl(SAA_BOOTLOADERERROR_FLAGS) ==
SAA_DEVICE_IMAGE_BOOTING) &&
(saa7164_readl(SAA_SECONDSTAGEERROR_FLAGS) ==
SAA_DEVICE_IMAGE_BOOTING)) {
dprintk(DBGLVL_FW, "%s() Loader 2 has loaded.\n",
__func__);
first_timeout = SAA_DEVICE_TIMEOUT;
while (first_timeout) {
msleep(10); /* Checkpatch throws a < 20ms warning */
version =
saa7164_getcurrentfirmwareversion(dev);
if (version) {
dprintk(DBGLVL_FW,
"%s() All f/w loaded successfully\n",
__func__);
break;
} else {
first_timeout -= 10;
if (first_timeout == 0) {
printk(KERN_ERR
"%s() FW did not boot\n",
__func__);
break;
}
}
}
}
version = saa7164_getcurrentfirmwareversion(dev);
} /* version == 0 */
/* Has the firmware really booted? */
if ((saa7164_readl(SAA_BOOTLOADERERROR_FLAGS) ==
SAA_DEVICE_IMAGE_BOOTING) &&
(saa7164_readl(SAA_SECONDSTAGEERROR_FLAGS) ==
SAA_DEVICE_IMAGE_BOOTING) && (version == 0)) {
printk(KERN_ERR
"%s() The firmware hung, probably bad firmware\n",
__func__);
/* Tell the second stage loader we have a deadlock */
saa7164_writel(SAA_DEVICE_DEADLOCK_DETECTED_OFFSET,
SAA_DEVICE_DEADLOCK_DETECTED);
saa7164_getfirmwarestatus(dev);
return -ENOMEM;
}
dprintk(DBGLVL_FW, "Device has Firmware Version %d.%d.%d.%d\n",
(version & 0x0000fc00) >> 10,
(version & 0x000003e0) >> 5,
(version & 0x0000001f),
(version & 0xffff0000) >> 16);
/* Load the firmwware from the disk if required */
if (version == 0) {
printk(KERN_INFO "%s() Waiting for firmware upload (%s)\n",
__func__, fwname);
ret = request_firmware(&fw, fwname, &dev->pci->dev);
if (ret) {
printk(KERN_ERR "%s() Upload failed. "
"(file not found?)\n", __func__);
return -ENOMEM;
}
printk(KERN_INFO "%s() firmware read %Zu bytes.\n",
__func__, fw->size);
if (fw->size != fwlength) {
printk(KERN_ERR "xc5000: firmware incorrect size\n");
ret = -ENOMEM;
goto out;
}
printk(KERN_INFO "%s() firmware loaded.\n", __func__);
hdr = (struct fw_header *)fw->data;
printk(KERN_INFO "Firmware file header part 1:\n");
printk(KERN_INFO " .FirmwareSize = 0x%x\n", hdr->firmwaresize);
printk(KERN_INFO " .BSLSize = 0x%x\n", hdr->bslsize);
printk(KERN_INFO " .Reserved = 0x%x\n", hdr->reserved);
printk(KERN_INFO " .Version = 0x%x\n", hdr->version);
/* Retrieve bootloader if reqd */
if ((hdr->firmwaresize == 0) && (hdr->bslsize == 0))
/* Second bootloader in the firmware file */
filesize = hdr->reserved * 16;
else
filesize = (hdr->firmwaresize + hdr->bslsize) *
16 + sizeof(struct fw_header);
printk(KERN_INFO "%s() SecBootLoader.FileSize = %d\n",
__func__, filesize);
/* Get bootloader (if reqd) and firmware header */
if ((hdr->firmwaresize == 0) && (hdr->bslsize == 0)) {
/* Second boot loader is required */
/* Get the loader header */
boothdr = (struct fw_header *)(fw->data +
sizeof(struct fw_header));
bootloaderversion =
saa7164_readl(SAA_DEVICE_2ND_VERSION);
dprintk(DBGLVL_FW, "Onboard BootLoader:\n");
dprintk(DBGLVL_FW, "->Flag 0x%x\n",
saa7164_readl(SAA_BOOTLOADERERROR_FLAGS));
dprintk(DBGLVL_FW, "->Ack 0x%x\n",
saa7164_readl(SAA_DATAREADY_FLAG_ACK));
dprintk(DBGLVL_FW, "->FW Version 0x%x\n", version);
dprintk(DBGLVL_FW, "->Loader Version 0x%x\n",
bootloaderversion);
if ((saa7164_readl(SAA_BOOTLOADERERROR_FLAGS) ==
0x03) && (saa7164_readl(SAA_DATAREADY_FLAG_ACK)
== 0x00) && (version == 0x00)) {
dprintk(DBGLVL_FW, "BootLoader version in "
"rom %d.%d.%d.%d\n",
(bootloaderversion & 0x0000fc00) >> 10,
(bootloaderversion & 0x000003e0) >> 5,
(bootloaderversion & 0x0000001f),
(bootloaderversion & 0xffff0000) >> 16
);
dprintk(DBGLVL_FW, "BootLoader version "
"in file %d.%d.%d.%d\n",
(boothdr->version & 0x0000fc00) >> 10,
(boothdr->version & 0x000003e0) >> 5,
(boothdr->version & 0x0000001f),
(boothdr->version & 0xffff0000) >> 16
);
if (bootloaderversion == boothdr->version)
updatebootloader = 0;
}
/* Calculate offset to firmware header */
tmp = (boothdr->firmwaresize + boothdr->bslsize) * 16 +
(sizeof(struct fw_header) +
sizeof(struct fw_header));
fwhdr = (struct fw_header *)(fw->data+tmp);
} else {
/* No second boot loader */
fwhdr = hdr;
}
dprintk(DBGLVL_FW, "Firmware version in file %d.%d.%d.%d\n",
(fwhdr->version & 0x0000fc00) >> 10,
(fwhdr->version & 0x000003e0) >> 5,
(fwhdr->version & 0x0000001f),
(fwhdr->version & 0xffff0000) >> 16
);
if (version == fwhdr->version) {
/* No download, firmware already on board */
ret = 0;
goto out;
}
if ((hdr->firmwaresize == 0) && (hdr->bslsize == 0)) {
if (updatebootloader) {
/* Get ready to upload the bootloader */
bootloadersize = (boothdr->firmwaresize +
boothdr->bslsize) * 16 +
sizeof(struct fw_header);
bootloaderoffset = (u8 *)(fw->data +
sizeof(struct fw_header));
dprintk(DBGLVL_FW, "bootloader d/l starts.\n");
printk(KERN_INFO "%s() FirmwareSize = 0x%x\n",
__func__, boothdr->firmwaresize);
printk(KERN_INFO "%s() BSLSize = 0x%x\n",
__func__, boothdr->bslsize);
printk(KERN_INFO "%s() Reserved = 0x%x\n",
__func__, boothdr->reserved);
printk(KERN_INFO "%s() Version = 0x%x\n",
__func__, boothdr->version);
ret = saa7164_downloadimage(
dev,
bootloaderoffset,
bootloadersize,
SAA_DOWNLOAD_FLAGS,
dev->bmmio + SAA_DEVICE_DOWNLOAD_OFFSET,
SAA_DEVICE_BUFFERBLOCKSIZE);
if (ret < 0) {
printk(KERN_ERR
"bootloader d/l has failed\n");
goto out;
}
dprintk(DBGLVL_FW,
"bootloader download complete.\n");
}
printk(KERN_ERR "starting firmware download(2)\n");
bootloadersize = (boothdr->firmwaresize +
boothdr->bslsize) * 16 +
sizeof(struct fw_header);
bootloaderoffset =
(u8 *)(fw->data + sizeof(struct fw_header));
fwloaderoffset = bootloaderoffset + bootloadersize;
/* TODO: fix this bounds overrun here with old f/ws */
fwloadersize = (fwhdr->firmwaresize + fwhdr->bslsize) *
16 + sizeof(struct fw_header);
ret = saa7164_downloadimage(
dev,
fwloaderoffset,
fwloadersize,
SAA_DEVICE_2ND_DOWNLOADFLAG_OFFSET,
dev->bmmio + SAA_DEVICE_2ND_DOWNLOAD_OFFSET,
SAA_DEVICE_2ND_BUFFERBLOCKSIZE);
if (ret < 0) {
printk(KERN_ERR "firmware download failed\n");
goto out;
}
printk(KERN_ERR "firmware download complete.\n");
} else {
/* No bootloader update reqd, download firmware only */
printk(KERN_ERR "starting firmware download(3)\n");
ret = saa7164_downloadimage(
dev,
(u8 *)fw->data,
fw->size,
SAA_DOWNLOAD_FLAGS,
dev->bmmio + SAA_DEVICE_DOWNLOAD_OFFSET,
SAA_DEVICE_BUFFERBLOCKSIZE);
if (ret < 0) {
printk(KERN_ERR "firmware download failed\n");
goto out;
}
printk(KERN_ERR "firmware download complete.\n");
}
}
dev->firmwareloaded = 1;
ret = 0;
out:
release_firmware(fw);
return ret;
}
| gpl-2.0 |
bgamari/linux | drivers/isdn/hardware/eicon/message.c | 8171 | 446143 | /*
*
Copyright (c) Eicon Networks, 2002.
*
This source file is supplied for the use with
Eicon Networks range of DIVA Server Adapters.
*
Eicon File Revision : 2.1
*
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2, or (at your option)
any later version.
*
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY OF ANY KIND WHATSOEVER INCLUDING ANY
implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
See the GNU General Public License for more details.
*
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*
*/
#include "platform.h"
#include "di_defs.h"
#include "pc.h"
#include "capi20.h"
#include "divacapi.h"
#include "mdm_msg.h"
#include "divasync.h"
#define FILE_ "MESSAGE.C"
#define dprintf
/*------------------------------------------------------------------*/
/* This is options supported for all adapters that are server by */
/* XDI driver. Allo it is not necessary to ask it from every adapter*/
/* and it is not necessary to save it separate for every adapter */
/* Macrose defined here have only local meaning */
/*------------------------------------------------------------------*/
static dword diva_xdi_extended_features = 0;
#define DIVA_CAPI_USE_CMA 0x00000001
#define DIVA_CAPI_XDI_PROVIDES_SDRAM_BAR 0x00000002
#define DIVA_CAPI_XDI_PROVIDES_NO_CANCEL 0x00000004
#define DIVA_CAPI_XDI_PROVIDES_RX_DMA 0x00000008
/*
CAPI can request to process all return codes self only if:
protocol code supports this && xdi supports this
*/
#define DIVA_CAPI_SUPPORTS_NO_CANCEL(__a__) (((__a__)->manufacturer_features & MANUFACTURER_FEATURE_XONOFF_FLOW_CONTROL) && ((__a__)->manufacturer_features & MANUFACTURER_FEATURE_OK_FC_LABEL) && (diva_xdi_extended_features & DIVA_CAPI_XDI_PROVIDES_NO_CANCEL))
/*------------------------------------------------------------------*/
/* local function prototypes */
/*------------------------------------------------------------------*/
static void group_optimization(DIVA_CAPI_ADAPTER *a, PLCI *plci);
static void set_group_ind_mask(PLCI *plci);
static void clear_group_ind_mask_bit(PLCI *plci, word b);
static byte test_group_ind_mask_bit(PLCI *plci, word b);
void AutomaticLaw(DIVA_CAPI_ADAPTER *);
word CapiRelease(word);
word CapiRegister(word);
word api_put(APPL *, CAPI_MSG *);
static word api_parse(byte *, word, byte *, API_PARSE *);
static void api_save_msg(API_PARSE *in, byte *format, API_SAVE *out);
static void api_load_msg(API_SAVE *in, API_PARSE *out);
word api_remove_start(void);
void api_remove_complete(void);
static void plci_remove(PLCI *);
static void diva_get_extended_adapter_features(DIVA_CAPI_ADAPTER *a);
static void diva_ask_for_xdi_sdram_bar(DIVA_CAPI_ADAPTER *, IDI_SYNC_REQ *);
void callback(ENTITY *);
static void control_rc(PLCI *, byte, byte, byte, byte, byte);
static void data_rc(PLCI *, byte);
static void data_ack(PLCI *, byte);
static void sig_ind(PLCI *);
static void SendInfo(PLCI *, dword, byte **, byte);
static void SendSetupInfo(APPL *, PLCI *, dword, byte **, byte);
static void SendSSExtInd(APPL *, PLCI *plci, dword Id, byte **parms);
static void VSwitchReqInd(PLCI *plci, dword Id, byte **parms);
static void nl_ind(PLCI *);
static byte connect_req(dword, word, DIVA_CAPI_ADAPTER *, PLCI *, APPL *, API_PARSE *);
static byte connect_res(dword, word, DIVA_CAPI_ADAPTER *, PLCI *, APPL *, API_PARSE *);
static byte connect_a_res(dword, word, DIVA_CAPI_ADAPTER *, PLCI *, APPL *, API_PARSE *);
static byte disconnect_req(dword, word, DIVA_CAPI_ADAPTER *, PLCI *, APPL *, API_PARSE *);
static byte disconnect_res(dword, word, DIVA_CAPI_ADAPTER *, PLCI *, APPL *, API_PARSE *);
static byte listen_req(dword, word, DIVA_CAPI_ADAPTER *, PLCI *, APPL *, API_PARSE *);
static byte info_req(dword, word, DIVA_CAPI_ADAPTER *, PLCI *, APPL *, API_PARSE *);
static byte info_res(dword, word, DIVA_CAPI_ADAPTER *, PLCI *, APPL *, API_PARSE *);
static byte alert_req(dword, word, DIVA_CAPI_ADAPTER *, PLCI *, APPL *, API_PARSE *);
static byte facility_req(dword, word, DIVA_CAPI_ADAPTER *, PLCI *, APPL *, API_PARSE *);
static byte facility_res(dword, word, DIVA_CAPI_ADAPTER *, PLCI *, APPL *, API_PARSE *);
static byte connect_b3_req(dword, word, DIVA_CAPI_ADAPTER *, PLCI *, APPL *, API_PARSE *);
static byte connect_b3_res(dword, word, DIVA_CAPI_ADAPTER *, PLCI *, APPL *, API_PARSE *);
static byte connect_b3_a_res(dword, word, DIVA_CAPI_ADAPTER *, PLCI *, APPL *, API_PARSE *);
static byte disconnect_b3_req(dword, word, DIVA_CAPI_ADAPTER *, PLCI *, APPL *, API_PARSE *);
static byte disconnect_b3_res(dword, word, DIVA_CAPI_ADAPTER *, PLCI *, APPL *, API_PARSE *);
static byte data_b3_req(dword, word, DIVA_CAPI_ADAPTER *, PLCI *, APPL *, API_PARSE *);
static byte data_b3_res(dword, word, DIVA_CAPI_ADAPTER *, PLCI *, APPL *, API_PARSE *);
static byte reset_b3_req(dword, word, DIVA_CAPI_ADAPTER *, PLCI *, APPL *, API_PARSE *);
static byte reset_b3_res(dword, word, DIVA_CAPI_ADAPTER *, PLCI *, APPL *, API_PARSE *);
static byte connect_b3_t90_a_res(dword, word, DIVA_CAPI_ADAPTER *, PLCI *, APPL *, API_PARSE *);
static byte select_b_req(dword, word, DIVA_CAPI_ADAPTER *, PLCI *, APPL *, API_PARSE *);
static byte manufacturer_req(dword, word, DIVA_CAPI_ADAPTER *, PLCI *, APPL *, API_PARSE *);
static byte manufacturer_res(dword, word, DIVA_CAPI_ADAPTER *, PLCI *, APPL *, API_PARSE *);
static word get_plci(DIVA_CAPI_ADAPTER *);
static void add_p(PLCI *, byte, byte *);
static void add_s(PLCI *plci, byte code, API_PARSE *p);
static void add_ss(PLCI *plci, byte code, API_PARSE *p);
static void add_ie(PLCI *plci, byte code, byte *p, word p_length);
static void add_d(PLCI *, word, byte *);
static void add_ai(PLCI *, API_PARSE *);
static word add_b1(PLCI *, API_PARSE *, word, word);
static word add_b23(PLCI *, API_PARSE *);
static word add_modem_b23(PLCI *plci, API_PARSE *bp_parms);
static void sig_req(PLCI *, byte, byte);
static void nl_req_ncci(PLCI *, byte, byte);
static void send_req(PLCI *);
static void send_data(PLCI *);
static word plci_remove_check(PLCI *);
static void listen_check(DIVA_CAPI_ADAPTER *);
static byte AddInfo(byte **, byte **, byte *, byte *);
static byte getChannel(API_PARSE *);
static void IndParse(PLCI *, word *, byte **, byte);
static byte ie_compare(byte *, byte *);
static word find_cip(DIVA_CAPI_ADAPTER *, byte *, byte *);
static word CPN_filter_ok(byte *cpn, DIVA_CAPI_ADAPTER *, word);
/*
XON protocol helpers
*/
static void channel_flow_control_remove(PLCI *plci);
static void channel_x_off(PLCI *plci, byte ch, byte flag);
static void channel_x_on(PLCI *plci, byte ch);
static void channel_request_xon(PLCI *plci, byte ch);
static void channel_xmit_xon(PLCI *plci);
static int channel_can_xon(PLCI *plci, byte ch);
static void channel_xmit_extended_xon(PLCI *plci);
static byte SendMultiIE(PLCI *plci, dword Id, byte **parms, byte ie_type, dword info_mask, byte setupParse);
static word AdvCodecSupport(DIVA_CAPI_ADAPTER *, PLCI *, APPL *, byte);
static void CodecIdCheck(DIVA_CAPI_ADAPTER *, PLCI *);
static void SetVoiceChannel(PLCI *, byte *, DIVA_CAPI_ADAPTER *);
static void VoiceChannelOff(PLCI *plci);
static void adv_voice_write_coefs(PLCI *plci, word write_command);
static void adv_voice_clear_config(PLCI *plci);
static word get_b1_facilities(PLCI *plci, byte b1_resource);
static byte add_b1_facilities(PLCI *plci, byte b1_resource, word b1_facilities);
static void adjust_b1_facilities(PLCI *plci, byte new_b1_resource, word new_b1_facilities);
static word adjust_b_process(dword Id, PLCI *plci, byte Rc);
static void adjust_b1_resource(dword Id, PLCI *plci, API_SAVE *bp_msg, word b1_facilities, word internal_command);
static void adjust_b_restore(dword Id, PLCI *plci, byte Rc);
static void reset_b3_command(dword Id, PLCI *plci, byte Rc);
static void select_b_command(dword Id, PLCI *plci, byte Rc);
static void fax_connect_ack_command(dword Id, PLCI *plci, byte Rc);
static void fax_edata_ack_command(dword Id, PLCI *plci, byte Rc);
static void fax_connect_info_command(dword Id, PLCI *plci, byte Rc);
static void fax_adjust_b23_command(dword Id, PLCI *plci, byte Rc);
static void fax_disconnect_command(dword Id, PLCI *plci, byte Rc);
static void hold_save_command(dword Id, PLCI *plci, byte Rc);
static void retrieve_restore_command(dword Id, PLCI *plci, byte Rc);
static void init_b1_config(PLCI *plci);
static void clear_b1_config(PLCI *plci);
static void dtmf_command(dword Id, PLCI *plci, byte Rc);
static byte dtmf_request(dword Id, word Number, DIVA_CAPI_ADAPTER *a, PLCI *plci, APPL *appl, API_PARSE *msg);
static void dtmf_confirmation(dword Id, PLCI *plci);
static void dtmf_indication(dword Id, PLCI *plci, byte *msg, word length);
static void dtmf_parameter_write(PLCI *plci);
static void mixer_set_bchannel_id_esc(PLCI *plci, byte bchannel_id);
static void mixer_set_bchannel_id(PLCI *plci, byte *chi);
static void mixer_clear_config(PLCI *plci);
static void mixer_notify_update(PLCI *plci, byte others);
static void mixer_command(dword Id, PLCI *plci, byte Rc);
static byte mixer_request(dword Id, word Number, DIVA_CAPI_ADAPTER *a, PLCI *plci, APPL *appl, API_PARSE *msg);
static void mixer_indication_coefs_set(dword Id, PLCI *plci);
static void mixer_indication_xconnect_from(dword Id, PLCI *plci, byte *msg, word length);
static void mixer_indication_xconnect_to(dword Id, PLCI *plci, byte *msg, word length);
static void mixer_remove(PLCI *plci);
static void ec_command(dword Id, PLCI *plci, byte Rc);
static byte ec_request(dword Id, word Number, DIVA_CAPI_ADAPTER *a, PLCI *plci, APPL *appl, API_PARSE *msg);
static void ec_indication(dword Id, PLCI *plci, byte *msg, word length);
static void rtp_connect_b3_req_command(dword Id, PLCI *plci, byte Rc);
static void rtp_connect_b3_res_command(dword Id, PLCI *plci, byte Rc);
static int diva_get_dma_descriptor(PLCI *plci, dword *dma_magic);
static void diva_free_dma_descriptor(PLCI *plci, int nr);
/*------------------------------------------------------------------*/
/* external function prototypes */
/*------------------------------------------------------------------*/
extern byte MapController(byte);
extern byte UnMapController(byte);
#define MapId(Id)(((Id) & 0xffffff00L) | MapController((byte)(Id)))
#define UnMapId(Id)(((Id) & 0xffffff00L) | UnMapController((byte)(Id)))
void sendf(APPL *, word, dword, word, byte *, ...);
void *TransmitBufferSet(APPL *appl, dword ref);
void *TransmitBufferGet(APPL *appl, void *p);
void TransmitBufferFree(APPL *appl, void *p);
void *ReceiveBufferGet(APPL *appl, int Num);
int fax_head_line_time(char *buffer);
/*------------------------------------------------------------------*/
/* Global data definitions */
/*------------------------------------------------------------------*/
extern byte max_adapter;
extern byte max_appl;
extern DIVA_CAPI_ADAPTER *adapter;
extern APPL *application;
static byte remove_started = false;
static PLCI dummy_plci;
static struct _ftable {
word command;
byte *format;
byte (*function)(dword, word, DIVA_CAPI_ADAPTER *, PLCI *, APPL *, API_PARSE *);
} ftable[] = {
{_DATA_B3_R, "dwww", data_b3_req},
{_DATA_B3_I | RESPONSE, "w", data_b3_res},
{_INFO_R, "ss", info_req},
{_INFO_I | RESPONSE, "", info_res},
{_CONNECT_R, "wsssssssss", connect_req},
{_CONNECT_I | RESPONSE, "wsssss", connect_res},
{_CONNECT_ACTIVE_I | RESPONSE, "", connect_a_res},
{_DISCONNECT_R, "s", disconnect_req},
{_DISCONNECT_I | RESPONSE, "", disconnect_res},
{_LISTEN_R, "dddss", listen_req},
{_ALERT_R, "s", alert_req},
{_FACILITY_R, "ws", facility_req},
{_FACILITY_I | RESPONSE, "ws", facility_res},
{_CONNECT_B3_R, "s", connect_b3_req},
{_CONNECT_B3_I | RESPONSE, "ws", connect_b3_res},
{_CONNECT_B3_ACTIVE_I | RESPONSE, "", connect_b3_a_res},
{_DISCONNECT_B3_R, "s", disconnect_b3_req},
{_DISCONNECT_B3_I | RESPONSE, "", disconnect_b3_res},
{_RESET_B3_R, "s", reset_b3_req},
{_RESET_B3_I | RESPONSE, "", reset_b3_res},
{_CONNECT_B3_T90_ACTIVE_I | RESPONSE, "ws", connect_b3_t90_a_res},
{_CONNECT_B3_T90_ACTIVE_I | RESPONSE, "", connect_b3_t90_a_res},
{_SELECT_B_REQ, "s", select_b_req},
{_MANUFACTURER_R, "dws", manufacturer_req},
{_MANUFACTURER_I | RESPONSE, "dws", manufacturer_res},
{_MANUFACTURER_I | RESPONSE, "", manufacturer_res}
};
static byte *cip_bc[29][2] = {
{ "", "" }, /* 0 */
{ "\x03\x80\x90\xa3", "\x03\x80\x90\xa2" }, /* 1 */
{ "\x02\x88\x90", "\x02\x88\x90" }, /* 2 */
{ "\x02\x89\x90", "\x02\x89\x90" }, /* 3 */
{ "\x03\x90\x90\xa3", "\x03\x90\x90\xa2" }, /* 4 */
{ "\x03\x91\x90\xa5", "\x03\x91\x90\xa5" }, /* 5 */
{ "\x02\x98\x90", "\x02\x98\x90" }, /* 6 */
{ "\x04\x88\xc0\xc6\xe6", "\x04\x88\xc0\xc6\xe6" }, /* 7 */
{ "\x04\x88\x90\x21\x8f", "\x04\x88\x90\x21\x8f" }, /* 8 */
{ "\x03\x91\x90\xa5", "\x03\x91\x90\xa5" }, /* 9 */
{ "", "" }, /* 10 */
{ "", "" }, /* 11 */
{ "", "" }, /* 12 */
{ "", "" }, /* 13 */
{ "", "" }, /* 14 */
{ "", "" }, /* 15 */
{ "\x03\x80\x90\xa3", "\x03\x80\x90\xa2" }, /* 16 */
{ "\x03\x90\x90\xa3", "\x03\x90\x90\xa2" }, /* 17 */
{ "\x02\x88\x90", "\x02\x88\x90" }, /* 18 */
{ "\x02\x88\x90", "\x02\x88\x90" }, /* 19 */
{ "\x02\x88\x90", "\x02\x88\x90" }, /* 20 */
{ "\x02\x88\x90", "\x02\x88\x90" }, /* 21 */
{ "\x02\x88\x90", "\x02\x88\x90" }, /* 22 */
{ "\x02\x88\x90", "\x02\x88\x90" }, /* 23 */
{ "\x02\x88\x90", "\x02\x88\x90" }, /* 24 */
{ "\x02\x88\x90", "\x02\x88\x90" }, /* 25 */
{ "\x03\x91\x90\xa5", "\x03\x91\x90\xa5" }, /* 26 */
{ "\x03\x91\x90\xa5", "\x03\x91\x90\xa5" }, /* 27 */
{ "\x02\x88\x90", "\x02\x88\x90" } /* 28 */
};
static byte *cip_hlc[29] = {
"", /* 0 */
"", /* 1 */
"", /* 2 */
"", /* 3 */
"", /* 4 */
"", /* 5 */
"", /* 6 */
"", /* 7 */
"", /* 8 */
"", /* 9 */
"", /* 10 */
"", /* 11 */
"", /* 12 */
"", /* 13 */
"", /* 14 */
"", /* 15 */
"\x02\x91\x81", /* 16 */
"\x02\x91\x84", /* 17 */
"\x02\x91\xa1", /* 18 */
"\x02\x91\xa4", /* 19 */
"\x02\x91\xa8", /* 20 */
"\x02\x91\xb1", /* 21 */
"\x02\x91\xb2", /* 22 */
"\x02\x91\xb5", /* 23 */
"\x02\x91\xb8", /* 24 */
"\x02\x91\xc1", /* 25 */
"\x02\x91\x81", /* 26 */
"\x03\x91\xe0\x01", /* 27 */
"\x03\x91\xe0\x02" /* 28 */
};
/*------------------------------------------------------------------*/
#define V120_HEADER_LENGTH 1
#define V120_HEADER_EXTEND_BIT 0x80
#define V120_HEADER_BREAK_BIT 0x40
#define V120_HEADER_C1_BIT 0x04
#define V120_HEADER_C2_BIT 0x08
#define V120_HEADER_FLUSH_COND (V120_HEADER_BREAK_BIT | V120_HEADER_C1_BIT | V120_HEADER_C2_BIT)
static byte v120_default_header[] =
{
0x83 /* Ext, BR , res, res, C2 , C1 , B , F */
};
static byte v120_break_header[] =
{
0xc3 | V120_HEADER_BREAK_BIT /* Ext, BR , res, res, C2 , C1 , B , F */
};
/*------------------------------------------------------------------*/
/* API_PUT function */
/*------------------------------------------------------------------*/
word api_put(APPL *appl, CAPI_MSG *msg)
{
word i, j, k, l, n;
word ret;
byte c;
byte controller;
DIVA_CAPI_ADAPTER *a;
PLCI *plci;
NCCI *ncci_ptr;
word ncci;
CAPI_MSG *m;
API_PARSE msg_parms[MAX_MSG_PARMS + 1];
if (msg->header.length < sizeof(msg->header) ||
msg->header.length > MAX_MSG_SIZE) {
dbug(1, dprintf("bad len"));
return _BAD_MSG;
}
controller = (byte)((msg->header.controller & 0x7f) - 1);
/* controller starts with 0 up to (max_adapter - 1) */
if (controller >= max_adapter)
{
dbug(1, dprintf("invalid ctrl"));
return _BAD_MSG;
}
a = &adapter[controller];
plci = NULL;
if ((msg->header.plci != 0) && (msg->header.plci <= a->max_plci) && !a->adapter_disabled)
{
dbug(1, dprintf("plci=%x", msg->header.plci));
plci = &a->plci[msg->header.plci - 1];
ncci = GET_WORD(&msg->header.ncci);
if (plci->Id
&& (plci->appl
|| (plci->State == INC_CON_PENDING)
|| (plci->State == INC_CON_ALERT)
|| (msg->header.command == (_DISCONNECT_I | RESPONSE)))
&& ((ncci == 0)
|| (msg->header.command == (_DISCONNECT_B3_I | RESPONSE))
|| ((ncci < MAX_NCCI + 1) && (a->ncci_plci[ncci] == plci->Id))))
{
i = plci->msg_in_read_pos;
j = plci->msg_in_write_pos;
if (j >= i)
{
if (j + msg->header.length + MSG_IN_OVERHEAD <= MSG_IN_QUEUE_SIZE)
i += MSG_IN_QUEUE_SIZE - j;
else
j = 0;
}
else
{
n = (((CAPI_MSG *)(plci->msg_in_queue))->header.length + MSG_IN_OVERHEAD + 3) & 0xfffc;
if (i > MSG_IN_QUEUE_SIZE - n)
i = MSG_IN_QUEUE_SIZE - n + 1;
i -= j;
}
if (i <= ((msg->header.length + MSG_IN_OVERHEAD + 3) & 0xfffc))
{
dbug(0, dprintf("Q-FULL1(msg) - len=%d write=%d read=%d wrap=%d free=%d",
msg->header.length, plci->msg_in_write_pos,
plci->msg_in_read_pos, plci->msg_in_wrap_pos, i));
return _QUEUE_FULL;
}
c = false;
if ((((byte *) msg) < ((byte *)(plci->msg_in_queue)))
|| (((byte *) msg) >= ((byte *)(plci->msg_in_queue)) + sizeof(plci->msg_in_queue)))
{
if (plci->msg_in_write_pos != plci->msg_in_read_pos)
c = true;
}
if (msg->header.command == _DATA_B3_R)
{
if (msg->header.length < 20)
{
dbug(1, dprintf("DATA_B3 REQ wrong length %d", msg->header.length));
return _BAD_MSG;
}
ncci_ptr = &(a->ncci[ncci]);
n = ncci_ptr->data_pending;
l = ncci_ptr->data_ack_pending;
k = plci->msg_in_read_pos;
while (k != plci->msg_in_write_pos)
{
if (k == plci->msg_in_wrap_pos)
k = 0;
if ((((CAPI_MSG *)(&((byte *)(plci->msg_in_queue))[k]))->header.command == _DATA_B3_R)
&& (((CAPI_MSG *)(&((byte *)(plci->msg_in_queue))[k]))->header.ncci == ncci))
{
n++;
if (((CAPI_MSG *)(&((byte *)(plci->msg_in_queue))[k]))->info.data_b3_req.Flags & 0x0004)
l++;
}
k += (((CAPI_MSG *)(&((byte *)(plci->msg_in_queue))[k]))->header.length +
MSG_IN_OVERHEAD + 3) & 0xfffc;
}
if ((n >= MAX_DATA_B3) || (l >= MAX_DATA_ACK))
{
dbug(0, dprintf("Q-FULL2(data) - pending=%d/%d ack_pending=%d/%d",
ncci_ptr->data_pending, n, ncci_ptr->data_ack_pending, l));
return _QUEUE_FULL;
}
if (plci->req_in || plci->internal_command)
{
if ((((byte *) msg) >= ((byte *)(plci->msg_in_queue)))
&& (((byte *) msg) < ((byte *)(plci->msg_in_queue)) + sizeof(plci->msg_in_queue)))
{
dbug(0, dprintf("Q-FULL3(requeue)"));
return _QUEUE_FULL;
}
c = true;
}
}
else
{
if (plci->req_in || plci->internal_command)
c = true;
else
{
plci->command = msg->header.command;
plci->number = msg->header.number;
}
}
if (c)
{
dbug(1, dprintf("enqueue msg(0x%04x,0x%x,0x%x) - len=%d write=%d read=%d wrap=%d free=%d",
msg->header.command, plci->req_in, plci->internal_command,
msg->header.length, plci->msg_in_write_pos,
plci->msg_in_read_pos, plci->msg_in_wrap_pos, i));
if (j == 0)
plci->msg_in_wrap_pos = plci->msg_in_write_pos;
m = (CAPI_MSG *)(&((byte *)(plci->msg_in_queue))[j]);
for (i = 0; i < msg->header.length; i++)
((byte *)(plci->msg_in_queue))[j++] = ((byte *) msg)[i];
if (m->header.command == _DATA_B3_R)
{
m->info.data_b3_req.Data = (dword)(long)(TransmitBufferSet(appl, m->info.data_b3_req.Data));
}
j = (j + 3) & 0xfffc;
*((APPL **)(&((byte *)(plci->msg_in_queue))[j])) = appl;
plci->msg_in_write_pos = j + MSG_IN_OVERHEAD;
return 0;
}
}
else
{
plci = NULL;
}
}
dbug(1, dprintf("com=%x", msg->header.command));
for (j = 0; j < MAX_MSG_PARMS + 1; j++) msg_parms[j].length = 0;
for (i = 0, ret = _BAD_MSG; i < ARRAY_SIZE(ftable); i++) {
if (ftable[i].command == msg->header.command) {
/* break loop if the message is correct, otherwise continue scan */
/* (for example: CONNECT_B3_T90_ACT_RES has two specifications) */
if (!api_parse(msg->info.b, (word)(msg->header.length - 12), ftable[i].format, msg_parms)) {
ret = 0;
break;
}
for (j = 0; j < MAX_MSG_PARMS + 1; j++) msg_parms[j].length = 0;
}
}
if (ret) {
dbug(1, dprintf("BAD_MSG"));
if (plci) plci->command = 0;
return ret;
}
c = ftable[i].function(GET_DWORD(&msg->header.controller),
msg->header.number,
a,
plci,
appl,
msg_parms);
channel_xmit_extended_xon(plci);
if (c == 1) send_req(plci);
if (c == 2 && plci) plci->req_in = plci->req_in_start = plci->req_out = 0;
if (plci && !plci->req_in) plci->command = 0;
return 0;
}
/*------------------------------------------------------------------*/
/* api_parse function, check the format of api messages */
/*------------------------------------------------------------------*/
static word api_parse(byte *msg, word length, byte *format, API_PARSE *parms)
{
word i;
word p;
for (i = 0, p = 0; format[i]; i++) {
if (parms)
{
parms[i].info = &msg[p];
}
switch (format[i]) {
case 'b':
p += 1;
break;
case 'w':
p += 2;
break;
case 'd':
p += 4;
break;
case 's':
if (msg[p] == 0xff) {
parms[i].info += 2;
parms[i].length = msg[p + 1] + (msg[p + 2] << 8);
p += (parms[i].length + 3);
}
else {
parms[i].length = msg[p];
p += (parms[i].length + 1);
}
break;
}
if (p > length) return true;
}
if (parms) parms[i].info = NULL;
return false;
}
static void api_save_msg(API_PARSE *in, byte *format, API_SAVE *out)
{
word i, j, n = 0;
byte *p;
p = out->info;
for (i = 0; format[i] != '\0'; i++)
{
out->parms[i].info = p;
out->parms[i].length = in[i].length;
switch (format[i])
{
case 'b':
n = 1;
break;
case 'w':
n = 2;
break;
case 'd':
n = 4;
break;
case 's':
n = in[i].length + 1;
break;
}
for (j = 0; j < n; j++)
*(p++) = in[i].info[j];
}
out->parms[i].info = NULL;
out->parms[i].length = 0;
}
static void api_load_msg(API_SAVE *in, API_PARSE *out)
{
word i;
i = 0;
do
{
out[i].info = in->parms[i].info;
out[i].length = in->parms[i].length;
} while (in->parms[i++].info);
}
/*------------------------------------------------------------------*/
/* CAPI remove function */
/*------------------------------------------------------------------*/
word api_remove_start(void)
{
word i;
word j;
if (!remove_started) {
remove_started = true;
for (i = 0; i < max_adapter; i++) {
if (adapter[i].request) {
for (j = 0; j < adapter[i].max_plci; j++) {
if (adapter[i].plci[j].Sig.Id) plci_remove(&adapter[i].plci[j]);
}
}
}
return 1;
}
else {
for (i = 0; i < max_adapter; i++) {
if (adapter[i].request) {
for (j = 0; j < adapter[i].max_plci; j++) {
if (adapter[i].plci[j].Sig.Id) return 1;
}
}
}
}
api_remove_complete();
return 0;
}
/*------------------------------------------------------------------*/
/* internal command queue */
/*------------------------------------------------------------------*/
static void init_internal_command_queue(PLCI *plci)
{
word i;
dbug(1, dprintf("%s,%d: init_internal_command_queue",
(char *)(FILE_), __LINE__));
plci->internal_command = 0;
for (i = 0; i < MAX_INTERNAL_COMMAND_LEVELS; i++)
plci->internal_command_queue[i] = NULL;
}
static void start_internal_command(dword Id, PLCI *plci, t_std_internal_command command_function)
{
word i;
dbug(1, dprintf("[%06lx] %s,%d: start_internal_command",
UnMapId(Id), (char *)(FILE_), __LINE__));
if (plci->internal_command == 0)
{
plci->internal_command_queue[0] = command_function;
(*command_function)(Id, plci, OK);
}
else
{
i = 1;
while (plci->internal_command_queue[i] != NULL)
i++;
plci->internal_command_queue[i] = command_function;
}
}
static void next_internal_command(dword Id, PLCI *plci)
{
word i;
dbug(1, dprintf("[%06lx] %s,%d: next_internal_command",
UnMapId(Id), (char *)(FILE_), __LINE__));
plci->internal_command = 0;
plci->internal_command_queue[0] = NULL;
while (plci->internal_command_queue[1] != NULL)
{
for (i = 0; i < MAX_INTERNAL_COMMAND_LEVELS - 1; i++)
plci->internal_command_queue[i] = plci->internal_command_queue[i + 1];
plci->internal_command_queue[MAX_INTERNAL_COMMAND_LEVELS - 1] = NULL;
(*(plci->internal_command_queue[0]))(Id, plci, OK);
if (plci->internal_command != 0)
return;
plci->internal_command_queue[0] = NULL;
}
}
/*------------------------------------------------------------------*/
/* NCCI allocate/remove function */
/*------------------------------------------------------------------*/
static dword ncci_mapping_bug = 0;
static word get_ncci(PLCI *plci, byte ch, word force_ncci)
{
DIVA_CAPI_ADAPTER *a;
word ncci, i, j, k;
a = plci->adapter;
if (!ch || a->ch_ncci[ch])
{
ncci_mapping_bug++;
dbug(1, dprintf("NCCI mapping exists %ld %02x %02x %02x-%02x",
ncci_mapping_bug, ch, force_ncci, a->ncci_ch[a->ch_ncci[ch]], a->ch_ncci[ch]));
ncci = ch;
}
else
{
if (force_ncci)
ncci = force_ncci;
else
{
if ((ch < MAX_NCCI + 1) && !a->ncci_ch[ch])
ncci = ch;
else
{
ncci = 1;
while ((ncci < MAX_NCCI + 1) && a->ncci_ch[ncci])
ncci++;
if (ncci == MAX_NCCI + 1)
{
ncci_mapping_bug++;
i = 1;
do
{
j = 1;
while ((j < MAX_NCCI + 1) && (a->ncci_ch[j] != i))
j++;
k = j;
if (j < MAX_NCCI + 1)
{
do
{
j++;
} while ((j < MAX_NCCI + 1) && (a->ncci_ch[j] != i));
}
} while ((i < MAX_NL_CHANNEL + 1) && (j < MAX_NCCI + 1));
if (i < MAX_NL_CHANNEL + 1)
{
dbug(1, dprintf("NCCI mapping overflow %ld %02x %02x %02x-%02x-%02x",
ncci_mapping_bug, ch, force_ncci, i, k, j));
}
else
{
dbug(1, dprintf("NCCI mapping overflow %ld %02x %02x",
ncci_mapping_bug, ch, force_ncci));
}
ncci = ch;
}
}
a->ncci_plci[ncci] = plci->Id;
a->ncci_state[ncci] = IDLE;
if (!plci->ncci_ring_list)
plci->ncci_ring_list = ncci;
else
a->ncci_next[ncci] = a->ncci_next[plci->ncci_ring_list];
a->ncci_next[plci->ncci_ring_list] = (byte) ncci;
}
a->ncci_ch[ncci] = ch;
a->ch_ncci[ch] = (byte) ncci;
dbug(1, dprintf("NCCI mapping established %ld %02x %02x %02x-%02x",
ncci_mapping_bug, ch, force_ncci, ch, ncci));
}
return (ncci);
}
static void ncci_free_receive_buffers(PLCI *plci, word ncci)
{
DIVA_CAPI_ADAPTER *a;
APPL *appl;
word i, ncci_code;
dword Id;
a = plci->adapter;
Id = (((dword) ncci) << 16) | (((word)(plci->Id)) << 8) | a->Id;
if (ncci)
{
if (a->ncci_plci[ncci] == plci->Id)
{
if (!plci->appl)
{
ncci_mapping_bug++;
dbug(1, dprintf("NCCI mapping appl expected %ld %08lx",
ncci_mapping_bug, Id));
}
else
{
appl = plci->appl;
ncci_code = ncci | (((word) a->Id) << 8);
for (i = 0; i < appl->MaxBuffer; i++)
{
if ((appl->DataNCCI[i] == ncci_code)
&& (((byte)(appl->DataFlags[i] >> 8)) == plci->Id))
{
appl->DataNCCI[i] = 0;
}
}
}
}
}
else
{
for (ncci = 1; ncci < MAX_NCCI + 1; ncci++)
{
if (a->ncci_plci[ncci] == plci->Id)
{
if (!plci->appl)
{
ncci_mapping_bug++;
dbug(1, dprintf("NCCI mapping no appl %ld %08lx",
ncci_mapping_bug, Id));
}
else
{
appl = plci->appl;
ncci_code = ncci | (((word) a->Id) << 8);
for (i = 0; i < appl->MaxBuffer; i++)
{
if ((appl->DataNCCI[i] == ncci_code)
&& (((byte)(appl->DataFlags[i] >> 8)) == plci->Id))
{
appl->DataNCCI[i] = 0;
}
}
}
}
}
}
}
static void cleanup_ncci_data(PLCI *plci, word ncci)
{
NCCI *ncci_ptr;
if (ncci && (plci->adapter->ncci_plci[ncci] == plci->Id))
{
ncci_ptr = &(plci->adapter->ncci[ncci]);
if (plci->appl)
{
while (ncci_ptr->data_pending != 0)
{
if (!plci->data_sent || (ncci_ptr->DBuffer[ncci_ptr->data_out].P != plci->data_sent_ptr))
TransmitBufferFree(plci->appl, ncci_ptr->DBuffer[ncci_ptr->data_out].P);
(ncci_ptr->data_out)++;
if (ncci_ptr->data_out == MAX_DATA_B3)
ncci_ptr->data_out = 0;
(ncci_ptr->data_pending)--;
}
}
ncci_ptr->data_out = 0;
ncci_ptr->data_pending = 0;
ncci_ptr->data_ack_out = 0;
ncci_ptr->data_ack_pending = 0;
}
}
static void ncci_remove(PLCI *plci, word ncci, byte preserve_ncci)
{
DIVA_CAPI_ADAPTER *a;
dword Id;
word i;
a = plci->adapter;
Id = (((dword) ncci) << 16) | (((word)(plci->Id)) << 8) | a->Id;
if (!preserve_ncci)
ncci_free_receive_buffers(plci, ncci);
if (ncci)
{
if (a->ncci_plci[ncci] != plci->Id)
{
ncci_mapping_bug++;
dbug(1, dprintf("NCCI mapping doesn't exist %ld %08lx %02x",
ncci_mapping_bug, Id, preserve_ncci));
}
else
{
cleanup_ncci_data(plci, ncci);
dbug(1, dprintf("NCCI mapping released %ld %08lx %02x %02x-%02x",
ncci_mapping_bug, Id, preserve_ncci, a->ncci_ch[ncci], ncci));
a->ch_ncci[a->ncci_ch[ncci]] = 0;
if (!preserve_ncci)
{
a->ncci_ch[ncci] = 0;
a->ncci_plci[ncci] = 0;
a->ncci_state[ncci] = IDLE;
i = plci->ncci_ring_list;
while ((i != 0) && (a->ncci_next[i] != plci->ncci_ring_list) && (a->ncci_next[i] != ncci))
i = a->ncci_next[i];
if ((i != 0) && (a->ncci_next[i] == ncci))
{
if (i == ncci)
plci->ncci_ring_list = 0;
else if (plci->ncci_ring_list == ncci)
plci->ncci_ring_list = i;
a->ncci_next[i] = a->ncci_next[ncci];
}
a->ncci_next[ncci] = 0;
}
}
}
else
{
for (ncci = 1; ncci < MAX_NCCI + 1; ncci++)
{
if (a->ncci_plci[ncci] == plci->Id)
{
cleanup_ncci_data(plci, ncci);
dbug(1, dprintf("NCCI mapping released %ld %08lx %02x %02x-%02x",
ncci_mapping_bug, Id, preserve_ncci, a->ncci_ch[ncci], ncci));
a->ch_ncci[a->ncci_ch[ncci]] = 0;
if (!preserve_ncci)
{
a->ncci_ch[ncci] = 0;
a->ncci_plci[ncci] = 0;
a->ncci_state[ncci] = IDLE;
a->ncci_next[ncci] = 0;
}
}
}
if (!preserve_ncci)
plci->ncci_ring_list = 0;
}
}
/*------------------------------------------------------------------*/
/* PLCI remove function */
/*------------------------------------------------------------------*/
static void plci_free_msg_in_queue(PLCI *plci)
{
word i;
if (plci->appl)
{
i = plci->msg_in_read_pos;
while (i != plci->msg_in_write_pos)
{
if (i == plci->msg_in_wrap_pos)
i = 0;
if (((CAPI_MSG *)(&((byte *)(plci->msg_in_queue))[i]))->header.command == _DATA_B3_R)
{
TransmitBufferFree(plci->appl,
(byte *)(long)(((CAPI_MSG *)(&((byte *)(plci->msg_in_queue))[i]))->info.data_b3_req.Data));
}
i += (((CAPI_MSG *)(&((byte *)(plci->msg_in_queue))[i]))->header.length +
MSG_IN_OVERHEAD + 3) & 0xfffc;
}
}
plci->msg_in_write_pos = MSG_IN_QUEUE_SIZE;
plci->msg_in_read_pos = MSG_IN_QUEUE_SIZE;
plci->msg_in_wrap_pos = MSG_IN_QUEUE_SIZE;
}
static void plci_remove(PLCI *plci)
{
if (!plci) {
dbug(1, dprintf("plci_remove(no plci)"));
return;
}
init_internal_command_queue(plci);
dbug(1, dprintf("plci_remove(%x,tel=%x)", plci->Id, plci->tel));
if (plci_remove_check(plci))
{
return;
}
if (plci->Sig.Id == 0xff)
{
dbug(1, dprintf("D-channel X.25 plci->NL.Id:%0x", plci->NL.Id));
if (plci->NL.Id && !plci->nl_remove_id)
{
nl_req_ncci(plci, REMOVE, 0);
send_req(plci);
}
}
else
{
if (!plci->sig_remove_id
&& (plci->Sig.Id
|| (plci->req_in != plci->req_out)
|| (plci->nl_req || plci->sig_req)))
{
sig_req(plci, HANGUP, 0);
send_req(plci);
}
}
ncci_remove(plci, 0, false);
plci_free_msg_in_queue(plci);
plci->channels = 0;
plci->appl = NULL;
if ((plci->State == INC_CON_PENDING) || (plci->State == INC_CON_ALERT))
plci->State = OUTG_DIS_PENDING;
}
/*------------------------------------------------------------------*/
/* Application Group function helpers */
/*------------------------------------------------------------------*/
static void set_group_ind_mask(PLCI *plci)
{
word i;
for (i = 0; i < C_IND_MASK_DWORDS; i++)
plci->group_optimization_mask_table[i] = 0xffffffffL;
}
static void clear_group_ind_mask_bit(PLCI *plci, word b)
{
plci->group_optimization_mask_table[b >> 5] &= ~(1L << (b & 0x1f));
}
static byte test_group_ind_mask_bit(PLCI *plci, word b)
{
return ((plci->group_optimization_mask_table[b >> 5] & (1L << (b & 0x1f))) != 0);
}
/*------------------------------------------------------------------*/
/* c_ind_mask operations for arbitrary MAX_APPL */
/*------------------------------------------------------------------*/
static void clear_c_ind_mask(PLCI *plci)
{
word i;
for (i = 0; i < C_IND_MASK_DWORDS; i++)
plci->c_ind_mask_table[i] = 0;
}
static byte c_ind_mask_empty(PLCI *plci)
{
word i;
i = 0;
while ((i < C_IND_MASK_DWORDS) && (plci->c_ind_mask_table[i] == 0))
i++;
return (i == C_IND_MASK_DWORDS);
}
static void set_c_ind_mask_bit(PLCI *plci, word b)
{
plci->c_ind_mask_table[b >> 5] |= (1L << (b & 0x1f));
}
static void clear_c_ind_mask_bit(PLCI *plci, word b)
{
plci->c_ind_mask_table[b >> 5] &= ~(1L << (b & 0x1f));
}
static byte test_c_ind_mask_bit(PLCI *plci, word b)
{
return ((plci->c_ind_mask_table[b >> 5] & (1L << (b & 0x1f))) != 0);
}
static void dump_c_ind_mask(PLCI *plci)
{
static char hex_digit_table[0x10] =
{'0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f'};
word i, j, k;
dword d;
char *p;
char buf[40];
for (i = 0; i < C_IND_MASK_DWORDS; i += 4)
{
p = buf + 36;
*p = '\0';
for (j = 0; j < 4; j++)
{
if (i + j < C_IND_MASK_DWORDS)
{
d = plci->c_ind_mask_table[i + j];
for (k = 0; k < 8; k++)
{
*(--p) = hex_digit_table[d & 0xf];
d >>= 4;
}
}
else if (i != 0)
{
for (k = 0; k < 8; k++)
*(--p) = ' ';
}
*(--p) = ' ';
}
dbug(1, dprintf("c_ind_mask =%s", (char *) p));
}
}
#define dump_plcis(a)
/*------------------------------------------------------------------*/
/* translation function for each message */
/*------------------------------------------------------------------*/
static byte connect_req(dword Id, word Number, DIVA_CAPI_ADAPTER *a,
PLCI *plci, APPL *appl, API_PARSE *parms)
{
word ch;
word i;
word Info;
byte LinkLayer;
API_PARSE *ai;
API_PARSE *bp;
API_PARSE ai_parms[5];
word channel = 0;
dword ch_mask;
byte m;
static byte esc_chi[35] = {0x02, 0x18, 0x01};
static byte lli[2] = {0x01, 0x00};
byte noCh = 0;
word dir = 0;
byte *p_chi = "";
for (i = 0; i < 5; i++) ai_parms[i].length = 0;
dbug(1, dprintf("connect_req(%d)", parms->length));
Info = _WRONG_IDENTIFIER;
if (a)
{
if (a->adapter_disabled)
{
dbug(1, dprintf("adapter disabled"));
Id = ((word)1 << 8) | a->Id;
sendf(appl, _CONNECT_R | CONFIRM, Id, Number, "w", 0);
sendf(appl, _DISCONNECT_I, Id, 0, "w", _L1_ERROR);
return false;
}
Info = _OUT_OF_PLCI;
if ((i = get_plci(a)))
{
Info = 0;
plci = &a->plci[i - 1];
plci->appl = appl;
plci->call_dir = CALL_DIR_OUT | CALL_DIR_ORIGINATE;
/* check 'external controller' bit for codec support */
if (Id & EXT_CONTROLLER)
{
if (AdvCodecSupport(a, plci, appl, 0))
{
plci->Id = 0;
sendf(appl, _CONNECT_R | CONFIRM, Id, Number, "w", _WRONG_IDENTIFIER);
return 2;
}
}
ai = &parms[9];
bp = &parms[5];
ch = 0;
if (bp->length)LinkLayer = bp->info[3];
else LinkLayer = 0;
if (ai->length)
{
ch = 0xffff;
if (!api_parse(&ai->info[1], (word)ai->length, "ssss", ai_parms))
{
ch = 0;
if (ai_parms[0].length)
{
ch = GET_WORD(ai_parms[0].info + 1);
if (ch > 4) ch = 0; /* safety -> ignore ChannelID */
if (ch == 4) /* explizit CHI in message */
{
/* check length of B-CH struct */
if ((ai_parms[0].info)[3] >= 1)
{
if ((ai_parms[0].info)[4] == CHI)
{
p_chi = &((ai_parms[0].info)[5]);
}
else
{
p_chi = &((ai_parms[0].info)[3]);
}
if (p_chi[0] > 35) /* check length of channel ID */
{
Info = _WRONG_MESSAGE_FORMAT;
}
}
else Info = _WRONG_MESSAGE_FORMAT;
}
if (ch == 3 && ai_parms[0].length >= 7 && ai_parms[0].length <= 36)
{
dir = GET_WORD(ai_parms[0].info + 3);
ch_mask = 0;
m = 0x3f;
for (i = 0; i + 5 <= ai_parms[0].length; i++)
{
if (ai_parms[0].info[i + 5] != 0)
{
if ((ai_parms[0].info[i + 5] | m) != 0xff)
Info = _WRONG_MESSAGE_FORMAT;
else
{
if (ch_mask == 0)
channel = i;
ch_mask |= 1L << i;
}
}
m = 0;
}
if (ch_mask == 0)
Info = _WRONG_MESSAGE_FORMAT;
if (!Info)
{
if ((ai_parms[0].length == 36) || (ch_mask != ((dword)(1L << channel))))
{
esc_chi[0] = (byte)(ai_parms[0].length - 2);
for (i = 0; i + 5 <= ai_parms[0].length; i++)
esc_chi[i + 3] = ai_parms[0].info[i + 5];
}
else
esc_chi[0] = 2;
esc_chi[2] = (byte)channel;
plci->b_channel = (byte)channel; /* not correct for ETSI ch 17..31 */
add_p(plci, LLI, lli);
add_p(plci, ESC, esc_chi);
plci->State = LOCAL_CONNECT;
if (!dir) plci->call_dir |= CALL_DIR_FORCE_OUTG_NL; /* dir 0=DTE, 1=DCE */
}
}
}
}
else Info = _WRONG_MESSAGE_FORMAT;
}
dbug(1, dprintf("ch=%x,dir=%x,p_ch=%d", ch, dir, channel));
plci->command = _CONNECT_R;
plci->number = Number;
/* x.31 or D-ch free SAPI in LinkLayer? */
if (ch == 1 && LinkLayer != 3 && LinkLayer != 12) noCh = true;
if ((ch == 0 || ch == 2 || noCh || ch == 3 || ch == 4) && !Info)
{
/* B-channel used for B3 connections (ch==0), or no B channel */
/* is used (ch==2) or perm. connection (3) is used do a CALL */
if (noCh) Info = add_b1(plci, &parms[5], 2, 0); /* no resource */
else Info = add_b1(plci, &parms[5], ch, 0);
add_s(plci, OAD, &parms[2]);
add_s(plci, OSA, &parms[4]);
add_s(plci, BC, &parms[6]);
add_s(plci, LLC, &parms[7]);
add_s(plci, HLC, &parms[8]);
if (a->Info_Mask[appl->Id - 1] & 0x200)
{
/* early B3 connect (CIP mask bit 9) no release after a disc */
add_p(plci, LLI, "\x01\x01");
}
if (GET_WORD(parms[0].info) < 29) {
add_p(plci, BC, cip_bc[GET_WORD(parms[0].info)][a->u_law]);
add_p(plci, HLC, cip_hlc[GET_WORD(parms[0].info)]);
}
add_p(plci, UID, "\x06\x43\x61\x70\x69\x32\x30");
sig_req(plci, ASSIGN, DSIG_ID);
}
else if (ch == 1) {
/* D-Channel used for B3 connections */
plci->Sig.Id = 0xff;
Info = 0;
}
if (!Info && ch != 2 && !noCh) {
Info = add_b23(plci, &parms[5]);
if (!Info) {
if (!(plci->tel && !plci->adv_nl))nl_req_ncci(plci, ASSIGN, 0);
}
}
if (!Info)
{
if (ch == 0 || ch == 2 || ch == 3 || noCh || ch == 4)
{
if (plci->spoofed_msg == SPOOFING_REQUIRED)
{
api_save_msg(parms, "wsssssssss", &plci->saved_msg);
plci->spoofed_msg = CALL_REQ;
plci->internal_command = BLOCK_PLCI;
plci->command = 0;
dbug(1, dprintf("Spoof"));
send_req(plci);
return false;
}
if (ch == 4)add_p(plci, CHI, p_chi);
add_s(plci, CPN, &parms[1]);
add_s(plci, DSA, &parms[3]);
if (noCh) add_p(plci, ESC, "\x02\x18\xfd"); /* D-channel, no B-L3 */
add_ai(plci, &parms[9]);
if (!dir)sig_req(plci, CALL_REQ, 0);
else
{
plci->command = PERM_LIST_REQ;
plci->appl = appl;
sig_req(plci, LISTEN_REQ, 0);
send_req(plci);
return false;
}
}
send_req(plci);
return false;
}
plci->Id = 0;
}
}
sendf(appl,
_CONNECT_R | CONFIRM,
Id,
Number,
"w", Info);
return 2;
}
static byte connect_res(dword Id, word Number, DIVA_CAPI_ADAPTER *a,
PLCI *plci, APPL *appl, API_PARSE *parms)
{
word i, Info;
word Reject;
static byte cau_t[] = {0, 0, 0x90, 0x91, 0xac, 0x9d, 0x86, 0xd8, 0x9b};
static byte esc_t[] = {0x03, 0x08, 0x00, 0x00};
API_PARSE *ai;
API_PARSE ai_parms[5];
word ch = 0;
if (!plci) {
dbug(1, dprintf("connect_res(no plci)"));
return 0; /* no plci, no send */
}
dbug(1, dprintf("connect_res(State=0x%x)", plci->State));
for (i = 0; i < 5; i++) ai_parms[i].length = 0;
ai = &parms[5];
dbug(1, dprintf("ai->length=%d", ai->length));
if (ai->length)
{
if (!api_parse(&ai->info[1], (word)ai->length, "ssss", ai_parms))
{
dbug(1, dprintf("ai_parms[0].length=%d/0x%x", ai_parms[0].length, GET_WORD(ai_parms[0].info + 1)));
ch = 0;
if (ai_parms[0].length)
{
ch = GET_WORD(ai_parms[0].info + 1);
dbug(1, dprintf("BCH-I=0x%x", ch));
}
}
}
if (plci->State == INC_CON_CONNECTED_ALERT)
{
dbug(1, dprintf("Connected Alert Call_Res"));
if (a->Info_Mask[appl->Id - 1] & 0x200)
{
/* early B3 connect (CIP mask bit 9) no release after a disc */
add_p(plci, LLI, "\x01\x01");
}
add_s(plci, CONN_NR, &parms[2]);
add_s(plci, LLC, &parms[4]);
add_ai(plci, &parms[5]);
plci->State = INC_CON_ACCEPT;
sig_req(plci, CALL_RES, 0);
return 1;
}
else if (plci->State == INC_CON_PENDING || plci->State == INC_CON_ALERT) {
clear_c_ind_mask_bit(plci, (word)(appl->Id - 1));
dump_c_ind_mask(plci);
Reject = GET_WORD(parms[0].info);
dbug(1, dprintf("Reject=0x%x", Reject));
if (Reject)
{
if (c_ind_mask_empty(plci))
{
if ((Reject & 0xff00) == 0x3400)
{
esc_t[2] = ((byte)(Reject & 0x00ff)) | 0x80;
add_p(plci, ESC, esc_t);
add_ai(plci, &parms[5]);
sig_req(plci, REJECT, 0);
}
else if (Reject == 1 || Reject > 9)
{
add_ai(plci, &parms[5]);
sig_req(plci, HANGUP, 0);
}
else
{
esc_t[2] = cau_t[(Reject&0x000f)];
add_p(plci, ESC, esc_t);
add_ai(plci, &parms[5]);
sig_req(plci, REJECT, 0);
}
plci->appl = appl;
}
else
{
sendf(appl, _DISCONNECT_I, Id, 0, "w", _OTHER_APPL_CONNECTED);
}
}
else {
plci->appl = appl;
if (Id & EXT_CONTROLLER) {
if (AdvCodecSupport(a, plci, appl, 0)) {
dbug(1, dprintf("connect_res(error from AdvCodecSupport)"));
sig_req(plci, HANGUP, 0);
return 1;
}
if (plci->tel == ADV_VOICE && a->AdvCodecPLCI)
{
Info = add_b23(plci, &parms[1]);
if (Info)
{
dbug(1, dprintf("connect_res(error from add_b23)"));
sig_req(plci, HANGUP, 0);
return 1;
}
if (plci->adv_nl)
{
nl_req_ncci(plci, ASSIGN, 0);
}
}
}
else
{
plci->tel = 0;
if (ch != 2)
{
Info = add_b23(plci, &parms[1]);
if (Info)
{
dbug(1, dprintf("connect_res(error from add_b23 2)"));
sig_req(plci, HANGUP, 0);
return 1;
}
}
nl_req_ncci(plci, ASSIGN, 0);
}
if (plci->spoofed_msg == SPOOFING_REQUIRED)
{
api_save_msg(parms, "wsssss", &plci->saved_msg);
plci->spoofed_msg = CALL_RES;
plci->internal_command = BLOCK_PLCI;
plci->command = 0;
dbug(1, dprintf("Spoof"));
}
else
{
add_b1(plci, &parms[1], ch, plci->B1_facilities);
if (a->Info_Mask[appl->Id - 1] & 0x200)
{
/* early B3 connect (CIP mask bit 9) no release after a disc */
add_p(plci, LLI, "\x01\x01");
}
add_s(plci, CONN_NR, &parms[2]);
add_s(plci, LLC, &parms[4]);
add_ai(plci, &parms[5]);
plci->State = INC_CON_ACCEPT;
sig_req(plci, CALL_RES, 0);
}
for (i = 0; i < max_appl; i++) {
if (test_c_ind_mask_bit(plci, i)) {
sendf(&application[i], _DISCONNECT_I, Id, 0, "w", _OTHER_APPL_CONNECTED);
}
}
}
}
return 1;
}
static byte connect_a_res(dword Id, word Number, DIVA_CAPI_ADAPTER *a,
PLCI *plci, APPL *appl, API_PARSE *msg)
{
dbug(1, dprintf("connect_a_res"));
return false;
}
static byte disconnect_req(dword Id, word Number, DIVA_CAPI_ADAPTER *a,
PLCI *plci, APPL *appl, API_PARSE *msg)
{
word Info;
word i;
dbug(1, dprintf("disconnect_req"));
Info = _WRONG_IDENTIFIER;
if (plci)
{
if (plci->State == INC_CON_PENDING || plci->State == INC_CON_ALERT)
{
clear_c_ind_mask_bit(plci, (word)(appl->Id - 1));
plci->appl = appl;
for (i = 0; i < max_appl; i++)
{
if (test_c_ind_mask_bit(plci, i))
sendf(&application[i], _DISCONNECT_I, Id, 0, "w", 0);
}
plci->State = OUTG_DIS_PENDING;
}
if (plci->Sig.Id && plci->appl)
{
Info = 0;
if (plci->Sig.Id != 0xff)
{
if (plci->State != INC_DIS_PENDING)
{
add_ai(plci, &msg[0]);
sig_req(plci, HANGUP, 0);
plci->State = OUTG_DIS_PENDING;
return 1;
}
}
else
{
if (plci->NL.Id && !plci->nl_remove_id)
{
mixer_remove(plci);
nl_req_ncci(plci, REMOVE, 0);
sendf(appl, _DISCONNECT_R | CONFIRM, Id, Number, "w", 0);
sendf(appl, _DISCONNECT_I, Id, 0, "w", 0);
plci->State = INC_DIS_PENDING;
}
return 1;
}
}
}
if (!appl) return false;
sendf(appl, _DISCONNECT_R | CONFIRM, Id, Number, "w", Info);
return false;
}
static byte disconnect_res(dword Id, word Number, DIVA_CAPI_ADAPTER *a,
PLCI *plci, APPL *appl, API_PARSE *msg)
{
dbug(1, dprintf("disconnect_res"));
if (plci)
{
/* clear ind mask bit, just in case of collsion of */
/* DISCONNECT_IND and CONNECT_RES */
clear_c_ind_mask_bit(plci, (word)(appl->Id - 1));
ncci_free_receive_buffers(plci, 0);
if (plci_remove_check(plci))
{
return 0;
}
if (plci->State == INC_DIS_PENDING
|| plci->State == SUSPENDING) {
if (c_ind_mask_empty(plci)) {
if (plci->State != SUSPENDING) plci->State = IDLE;
dbug(1, dprintf("chs=%d", plci->channels));
if (!plci->channels) {
plci_remove(plci);
}
}
}
}
return 0;
}
static byte listen_req(dword Id, word Number, DIVA_CAPI_ADAPTER *a,
PLCI *plci, APPL *appl, API_PARSE *parms)
{
word Info;
byte i;
dbug(1, dprintf("listen_req(Appl=0x%x)", appl->Id));
Info = _WRONG_IDENTIFIER;
if (a) {
Info = 0;
a->Info_Mask[appl->Id - 1] = GET_DWORD(parms[0].info);
a->CIP_Mask[appl->Id - 1] = GET_DWORD(parms[1].info);
dbug(1, dprintf("CIP_MASK=0x%lx", GET_DWORD(parms[1].info)));
if (a->Info_Mask[appl->Id - 1] & 0x200) { /* early B3 connect provides */
a->Info_Mask[appl->Id - 1] |= 0x10; /* call progression infos */
}
/* check if external controller listen and switch listen on or off*/
if (Id&EXT_CONTROLLER && GET_DWORD(parms[1].info)) {
if (a->profile.Global_Options & ON_BOARD_CODEC) {
dummy_plci.State = IDLE;
a->codec_listen[appl->Id - 1] = &dummy_plci;
a->TelOAD[0] = (byte)(parms[3].length);
for (i = 1; parms[3].length >= i && i < 22; i++) {
a->TelOAD[i] = parms[3].info[i];
}
a->TelOAD[i] = 0;
a->TelOSA[0] = (byte)(parms[4].length);
for (i = 1; parms[4].length >= i && i < 22; i++) {
a->TelOSA[i] = parms[4].info[i];
}
a->TelOSA[i] = 0;
}
else Info = 0x2002; /* wrong controller, codec not supported */
}
else{ /* clear listen */
a->codec_listen[appl->Id - 1] = (PLCI *)0;
}
}
sendf(appl,
_LISTEN_R | CONFIRM,
Id,
Number,
"w", Info);
if (a) listen_check(a);
return false;
}
static byte info_req(dword Id, word Number, DIVA_CAPI_ADAPTER *a,
PLCI *plci, APPL *appl, API_PARSE *msg)
{
word i;
API_PARSE *ai;
PLCI *rc_plci = NULL;
API_PARSE ai_parms[5];
word Info = 0;
dbug(1, dprintf("info_req"));
for (i = 0; i < 5; i++) ai_parms[i].length = 0;
ai = &msg[1];
if (ai->length)
{
if (api_parse(&ai->info[1], (word)ai->length, "ssss", ai_parms))
{
dbug(1, dprintf("AddInfo wrong"));
Info = _WRONG_MESSAGE_FORMAT;
}
}
if (!a) Info = _WRONG_STATE;
if (!Info && plci)
{ /* no fac, with CPN, or KEY */
rc_plci = plci;
if (!ai_parms[3].length && plci->State && (msg[0].length || ai_parms[1].length))
{
/* overlap sending option */
dbug(1, dprintf("OvlSnd"));
add_s(plci, CPN, &msg[0]);
add_s(plci, KEY, &ai_parms[1]);
sig_req(plci, INFO_REQ, 0);
send_req(plci);
return false;
}
if (plci->State && ai_parms[2].length)
{
/* User_Info option */
dbug(1, dprintf("UUI"));
add_s(plci, UUI, &ai_parms[2]);
sig_req(plci, USER_DATA, 0);
}
else if (plci->State && ai_parms[3].length)
{
/* Facility option */
dbug(1, dprintf("FAC"));
add_s(plci, CPN, &msg[0]);
add_ai(plci, &msg[1]);
sig_req(plci, FACILITY_REQ, 0);
}
else
{
Info = _WRONG_STATE;
}
}
else if ((ai_parms[1].length || ai_parms[2].length || ai_parms[3].length) && !Info)
{
/* NCR_Facility option -> send UUI and Keypad too */
dbug(1, dprintf("NCR_FAC"));
if ((i = get_plci(a)))
{
rc_plci = &a->plci[i - 1];
appl->NullCREnable = true;
rc_plci->internal_command = C_NCR_FAC_REQ;
rc_plci->appl = appl;
add_p(rc_plci, CAI, "\x01\x80");
add_p(rc_plci, UID, "\x06\x43\x61\x70\x69\x32\x30");
sig_req(rc_plci, ASSIGN, DSIG_ID);
send_req(rc_plci);
}
else
{
Info = _OUT_OF_PLCI;
}
if (!Info)
{
add_s(rc_plci, CPN, &msg[0]);
add_ai(rc_plci, &msg[1]);
sig_req(rc_plci, NCR_FACILITY, 0);
send_req(rc_plci);
return false;
/* for application controlled supplementary services */
}
}
if (!rc_plci)
{
Info = _WRONG_MESSAGE_FORMAT;
}
if (!Info)
{
send_req(rc_plci);
}
else
{ /* appl is not assigned to a PLCI or error condition */
dbug(1, dprintf("localInfoCon"));
sendf(appl,
_INFO_R | CONFIRM,
Id,
Number,
"w", Info);
}
return false;
}
static byte info_res(dword Id, word Number, DIVA_CAPI_ADAPTER *a,
PLCI *plci, APPL *appl, API_PARSE *msg)
{
dbug(1, dprintf("info_res"));
return false;
}
static byte alert_req(dword Id, word Number, DIVA_CAPI_ADAPTER *a,
PLCI *plci, APPL *appl, API_PARSE *msg)
{
word Info;
byte ret;
dbug(1, dprintf("alert_req"));
Info = _WRONG_IDENTIFIER;
ret = false;
if (plci) {
Info = _ALERT_IGNORED;
if (plci->State != INC_CON_ALERT) {
Info = _WRONG_STATE;
if (plci->State == INC_CON_PENDING) {
Info = 0;
plci->State = INC_CON_ALERT;
add_ai(plci, &msg[0]);
sig_req(plci, CALL_ALERT, 0);
ret = 1;
}
}
}
sendf(appl,
_ALERT_R | CONFIRM,
Id,
Number,
"w", Info);
return ret;
}
static byte facility_req(dword Id, word Number, DIVA_CAPI_ADAPTER *a,
PLCI *plci, APPL *appl, API_PARSE *msg)
{
word Info = 0;
word i = 0;
word selector;
word SSreq;
long relatedPLCIvalue;
DIVA_CAPI_ADAPTER *relatedadapter;
byte *SSparms = "";
byte RCparms[] = "\x05\x00\x00\x02\x00\x00";
byte SSstruct[] = "\x09\x00\x00\x06\x00\x00\x00\x00\x00\x00";
API_PARSE *parms;
API_PARSE ss_parms[11];
PLCI *rplci;
byte cai[15];
dword d;
API_PARSE dummy;
dbug(1, dprintf("facility_req"));
for (i = 0; i < 9; i++) ss_parms[i].length = 0;
parms = &msg[1];
if (!a)
{
dbug(1, dprintf("wrong Ctrl"));
Info = _WRONG_IDENTIFIER;
}
selector = GET_WORD(msg[0].info);
if (!Info)
{
switch (selector)
{
case SELECTOR_HANDSET:
Info = AdvCodecSupport(a, plci, appl, HOOK_SUPPORT);
break;
case SELECTOR_SU_SERV:
if (!msg[1].length)
{
Info = _WRONG_MESSAGE_FORMAT;
break;
}
SSreq = GET_WORD(&(msg[1].info[1]));
PUT_WORD(&RCparms[1], SSreq);
SSparms = RCparms;
switch (SSreq)
{
case S_GET_SUPPORTED_SERVICES:
if ((i = get_plci(a)))
{
rplci = &a->plci[i - 1];
rplci->appl = appl;
add_p(rplci, CAI, "\x01\x80");
add_p(rplci, UID, "\x06\x43\x61\x70\x69\x32\x30");
sig_req(rplci, ASSIGN, DSIG_ID);
send_req(rplci);
}
else
{
PUT_DWORD(&SSstruct[6], MASK_TERMINAL_PORTABILITY);
SSparms = (byte *)SSstruct;
break;
}
rplci->internal_command = GETSERV_REQ_PEND;
rplci->number = Number;
rplci->appl = appl;
sig_req(rplci, S_SUPPORTED, 0);
send_req(rplci);
return false;
break;
case S_LISTEN:
if (parms->length == 7)
{
if (api_parse(&parms->info[1], (word)parms->length, "wbd", ss_parms))
{
dbug(1, dprintf("format wrong"));
Info = _WRONG_MESSAGE_FORMAT;
break;
}
}
else
{
Info = _WRONG_MESSAGE_FORMAT;
break;
}
a->Notification_Mask[appl->Id - 1] = GET_DWORD(ss_parms[2].info);
if (a->Notification_Mask[appl->Id - 1] & SMASK_MWI) /* MWI active? */
{
if ((i = get_plci(a)))
{
rplci = &a->plci[i - 1];
rplci->appl = appl;
add_p(rplci, CAI, "\x01\x80");
add_p(rplci, UID, "\x06\x43\x61\x70\x69\x32\x30");
sig_req(rplci, ASSIGN, DSIG_ID);
send_req(rplci);
}
else
{
break;
}
rplci->internal_command = GET_MWI_STATE;
rplci->number = Number;
sig_req(rplci, MWI_POLL, 0);
send_req(rplci);
}
break;
case S_HOLD:
api_parse(&parms->info[1], (word)parms->length, "ws", ss_parms);
if (plci && plci->State && plci->SuppState == IDLE)
{
plci->SuppState = HOLD_REQUEST;
plci->command = C_HOLD_REQ;
add_s(plci, CAI, &ss_parms[1]);
sig_req(plci, CALL_HOLD, 0);
send_req(plci);
return false;
}
else Info = 0x3010; /* wrong state */
break;
case S_RETRIEVE:
if (plci && plci->State && plci->SuppState == CALL_HELD)
{
if (Id & EXT_CONTROLLER)
{
if (AdvCodecSupport(a, plci, appl, 0))
{
Info = 0x3010; /* wrong state */
break;
}
}
else plci->tel = 0;
plci->SuppState = RETRIEVE_REQUEST;
plci->command = C_RETRIEVE_REQ;
if (plci->spoofed_msg == SPOOFING_REQUIRED)
{
plci->spoofed_msg = CALL_RETRIEVE;
plci->internal_command = BLOCK_PLCI;
plci->command = 0;
dbug(1, dprintf("Spoof"));
return false;
}
else
{
sig_req(plci, CALL_RETRIEVE, 0);
send_req(plci);
return false;
}
}
else Info = 0x3010; /* wrong state */
break;
case S_SUSPEND:
if (parms->length)
{
if (api_parse(&parms->info[1], (word)parms->length, "wbs", ss_parms))
{
dbug(1, dprintf("format wrong"));
Info = _WRONG_MESSAGE_FORMAT;
break;
}
}
if (plci && plci->State)
{
add_s(plci, CAI, &ss_parms[2]);
plci->command = SUSPEND_REQ;
sig_req(plci, SUSPEND, 0);
plci->State = SUSPENDING;
send_req(plci);
}
else Info = 0x3010; /* wrong state */
break;
case S_RESUME:
if (!(i = get_plci(a)))
{
Info = _OUT_OF_PLCI;
break;
}
rplci = &a->plci[i - 1];
rplci->appl = appl;
rplci->number = Number;
rplci->tel = 0;
rplci->call_dir = CALL_DIR_OUT | CALL_DIR_ORIGINATE;
/* check 'external controller' bit for codec support */
if (Id & EXT_CONTROLLER)
{
if (AdvCodecSupport(a, rplci, appl, 0))
{
rplci->Id = 0;
Info = 0x300A;
break;
}
}
if (parms->length)
{
if (api_parse(&parms->info[1], (word)parms->length, "wbs", ss_parms))
{
dbug(1, dprintf("format wrong"));
rplci->Id = 0;
Info = _WRONG_MESSAGE_FORMAT;
break;
}
}
dummy.length = 0;
dummy.info = "\x00";
add_b1(rplci, &dummy, 0, 0);
if (a->Info_Mask[appl->Id - 1] & 0x200)
{
/* early B3 connect (CIP mask bit 9) no release after a disc */
add_p(rplci, LLI, "\x01\x01");
}
add_p(rplci, UID, "\x06\x43\x61\x70\x69\x32\x30");
sig_req(rplci, ASSIGN, DSIG_ID);
send_req(rplci);
add_s(rplci, CAI, &ss_parms[2]);
rplci->command = RESUME_REQ;
sig_req(rplci, RESUME, 0);
rplci->State = RESUMING;
send_req(rplci);
break;
case S_CONF_BEGIN: /* Request */
case S_CONF_DROP:
case S_CONF_ISOLATE:
case S_CONF_REATTACH:
if (api_parse(&parms->info[1], (word)parms->length, "wbd", ss_parms))
{
dbug(1, dprintf("format wrong"));
Info = _WRONG_MESSAGE_FORMAT;
break;
}
if (plci && plci->State && ((plci->SuppState == IDLE) || (plci->SuppState == CALL_HELD)))
{
d = GET_DWORD(ss_parms[2].info);
if (d >= 0x80)
{
dbug(1, dprintf("format wrong"));
Info = _WRONG_MESSAGE_FORMAT;
break;
}
plci->ptyState = (byte)SSreq;
plci->command = 0;
cai[0] = 2;
switch (SSreq)
{
case S_CONF_BEGIN:
cai[1] = CONF_BEGIN;
plci->internal_command = CONF_BEGIN_REQ_PEND;
break;
case S_CONF_DROP:
cai[1] = CONF_DROP;
plci->internal_command = CONF_DROP_REQ_PEND;
break;
case S_CONF_ISOLATE:
cai[1] = CONF_ISOLATE;
plci->internal_command = CONF_ISOLATE_REQ_PEND;
break;
case S_CONF_REATTACH:
cai[1] = CONF_REATTACH;
plci->internal_command = CONF_REATTACH_REQ_PEND;
break;
}
cai[2] = (byte)d; /* Conference Size resp. PartyId */
add_p(plci, CAI, cai);
sig_req(plci, S_SERVICE, 0);
send_req(plci);
return false;
}
else Info = 0x3010; /* wrong state */
break;
case S_ECT:
case S_3PTY_BEGIN:
case S_3PTY_END:
case S_CONF_ADD:
if (parms->length == 7)
{
if (api_parse(&parms->info[1], (word)parms->length, "wbd", ss_parms))
{
dbug(1, dprintf("format wrong"));
Info = _WRONG_MESSAGE_FORMAT;
break;
}
}
else if (parms->length == 8) /* workaround for the T-View-S */
{
if (api_parse(&parms->info[1], (word)parms->length, "wbdb", ss_parms))
{
dbug(1, dprintf("format wrong"));
Info = _WRONG_MESSAGE_FORMAT;
break;
}
}
else
{
Info = _WRONG_MESSAGE_FORMAT;
break;
}
if (!msg[1].length)
{
Info = _WRONG_MESSAGE_FORMAT;
break;
}
if (!plci)
{
Info = _WRONG_IDENTIFIER;
break;
}
relatedPLCIvalue = GET_DWORD(ss_parms[2].info);
relatedPLCIvalue &= 0x0000FFFF;
dbug(1, dprintf("PTY/ECT/addCONF,relPLCI=%lx", relatedPLCIvalue));
/* controller starts with 0 up to (max_adapter - 1) */
if (((relatedPLCIvalue & 0x7f) == 0)
|| (MapController((byte)(relatedPLCIvalue & 0x7f)) == 0)
|| (MapController((byte)(relatedPLCIvalue & 0x7f)) > max_adapter))
{
if (SSreq == S_3PTY_END)
{
dbug(1, dprintf("wrong Controller use 2nd PLCI=PLCI"));
rplci = plci;
}
else
{
Info = 0x3010; /* wrong state */
break;
}
}
else
{
relatedadapter = &adapter[MapController((byte)(relatedPLCIvalue & 0x7f)) - 1];
relatedPLCIvalue >>= 8;
/* find PLCI PTR*/
for (i = 0, rplci = NULL; i < relatedadapter->max_plci; i++)
{
if (relatedadapter->plci[i].Id == (byte)relatedPLCIvalue)
{
rplci = &relatedadapter->plci[i];
}
}
if (!rplci || !relatedPLCIvalue)
{
if (SSreq == S_3PTY_END)
{
dbug(1, dprintf("use 2nd PLCI=PLCI"));
rplci = plci;
}
else
{
Info = 0x3010; /* wrong state */
break;
}
}
}
/*
dbug(1, dprintf("rplci:%x", rplci));
dbug(1, dprintf("plci:%x", plci));
dbug(1, dprintf("rplci->ptyState:%x", rplci->ptyState));
dbug(1, dprintf("plci->ptyState:%x", plci->ptyState));
dbug(1, dprintf("SSreq:%x", SSreq));
dbug(1, dprintf("rplci->internal_command:%x", rplci->internal_command));
dbug(1, dprintf("rplci->appl:%x", rplci->appl));
dbug(1, dprintf("rplci->Id:%x", rplci->Id));
*/
/* send PTY/ECT req, cannot check all states because of US stuff */
if (!rplci->internal_command && rplci->appl)
{
plci->command = 0;
rplci->relatedPTYPLCI = plci;
plci->relatedPTYPLCI = rplci;
rplci->ptyState = (byte)SSreq;
if (SSreq == S_ECT)
{
rplci->internal_command = ECT_REQ_PEND;
cai[1] = ECT_EXECUTE;
rplci->vswitchstate = 0;
rplci->vsprot = 0;
rplci->vsprotdialect = 0;
plci->vswitchstate = 0;
plci->vsprot = 0;
plci->vsprotdialect = 0;
}
else if (SSreq == S_CONF_ADD)
{
rplci->internal_command = CONF_ADD_REQ_PEND;
cai[1] = CONF_ADD;
}
else
{
rplci->internal_command = PTY_REQ_PEND;
cai[1] = (byte)(SSreq - 3);
}
rplci->number = Number;
if (plci != rplci) /* explicit invocation */
{
cai[0] = 2;
cai[2] = plci->Sig.Id;
dbug(1, dprintf("explicit invocation"));
}
else
{
dbug(1, dprintf("implicit invocation"));
cai[0] = 1;
}
add_p(rplci, CAI, cai);
sig_req(rplci, S_SERVICE, 0);
send_req(rplci);
return false;
}
else
{
dbug(0, dprintf("Wrong line"));
Info = 0x3010; /* wrong state */
break;
}
break;
case S_CALL_DEFLECTION:
if (api_parse(&parms->info[1], (word)parms->length, "wbwss", ss_parms))
{
dbug(1, dprintf("format wrong"));
Info = _WRONG_MESSAGE_FORMAT;
break;
}
if (!plci)
{
Info = _WRONG_IDENTIFIER;
break;
}
/* reuse unused screening indicator */
ss_parms[3].info[3] = (byte)GET_WORD(&(ss_parms[2].info[0]));
plci->command = 0;
plci->internal_command = CD_REQ_PEND;
appl->CDEnable = true;
cai[0] = 1;
cai[1] = CALL_DEFLECTION;
add_p(plci, CAI, cai);
add_p(plci, CPN, ss_parms[3].info);
sig_req(plci, S_SERVICE, 0);
send_req(plci);
return false;
break;
case S_CALL_FORWARDING_START:
if (api_parse(&parms->info[1], (word)parms->length, "wbdwwsss", ss_parms))
{
dbug(1, dprintf("format wrong"));
Info = _WRONG_MESSAGE_FORMAT;
break;
}
if ((i = get_plci(a)))
{
rplci = &a->plci[i - 1];
rplci->appl = appl;
add_p(rplci, CAI, "\x01\x80");
add_p(rplci, UID, "\x06\x43\x61\x70\x69\x32\x30");
sig_req(rplci, ASSIGN, DSIG_ID);
send_req(rplci);
}
else
{
Info = _OUT_OF_PLCI;
break;
}
/* reuse unused screening indicator */
rplci->internal_command = CF_START_PEND;
rplci->appl = appl;
rplci->number = Number;
appl->S_Handle = GET_DWORD(&(ss_parms[2].info[0]));
cai[0] = 2;
cai[1] = 0x70 | (byte)GET_WORD(&(ss_parms[3].info[0])); /* Function */
cai[2] = (byte)GET_WORD(&(ss_parms[4].info[0])); /* Basic Service */
add_p(rplci, CAI, cai);
add_p(rplci, OAD, ss_parms[5].info);
add_p(rplci, CPN, ss_parms[6].info);
sig_req(rplci, S_SERVICE, 0);
send_req(rplci);
return false;
break;
case S_INTERROGATE_DIVERSION:
case S_INTERROGATE_NUMBERS:
case S_CALL_FORWARDING_STOP:
case S_CCBS_REQUEST:
case S_CCBS_DEACTIVATE:
case S_CCBS_INTERROGATE:
switch (SSreq)
{
case S_INTERROGATE_NUMBERS:
if (api_parse(&parms->info[1], (word)parms->length, "wbd", ss_parms))
{
dbug(0, dprintf("format wrong"));
Info = _WRONG_MESSAGE_FORMAT;
}
break;
case S_CCBS_REQUEST:
case S_CCBS_DEACTIVATE:
if (api_parse(&parms->info[1], (word)parms->length, "wbdw", ss_parms))
{
dbug(0, dprintf("format wrong"));
Info = _WRONG_MESSAGE_FORMAT;
}
break;
case S_CCBS_INTERROGATE:
if (api_parse(&parms->info[1], (word)parms->length, "wbdws", ss_parms))
{
dbug(0, dprintf("format wrong"));
Info = _WRONG_MESSAGE_FORMAT;
}
break;
default:
if (api_parse(&parms->info[1], (word)parms->length, "wbdwws", ss_parms))
{
dbug(0, dprintf("format wrong"));
Info = _WRONG_MESSAGE_FORMAT;
break;
}
break;
}
if (Info) break;
if ((i = get_plci(a)))
{
rplci = &a->plci[i - 1];
switch (SSreq)
{
case S_INTERROGATE_DIVERSION: /* use cai with S_SERVICE below */
cai[1] = 0x60 | (byte)GET_WORD(&(ss_parms[3].info[0])); /* Function */
rplci->internal_command = INTERR_DIVERSION_REQ_PEND; /* move to rplci if assigned */
break;
case S_INTERROGATE_NUMBERS: /* use cai with S_SERVICE below */
cai[1] = DIVERSION_INTERROGATE_NUM; /* Function */
rplci->internal_command = INTERR_NUMBERS_REQ_PEND; /* move to rplci if assigned */
break;
case S_CALL_FORWARDING_STOP:
rplci->internal_command = CF_STOP_PEND;
cai[1] = 0x80 | (byte)GET_WORD(&(ss_parms[3].info[0])); /* Function */
break;
case S_CCBS_REQUEST:
cai[1] = CCBS_REQUEST;
rplci->internal_command = CCBS_REQUEST_REQ_PEND;
break;
case S_CCBS_DEACTIVATE:
cai[1] = CCBS_DEACTIVATE;
rplci->internal_command = CCBS_DEACTIVATE_REQ_PEND;
break;
case S_CCBS_INTERROGATE:
cai[1] = CCBS_INTERROGATE;
rplci->internal_command = CCBS_INTERROGATE_REQ_PEND;
break;
default:
cai[1] = 0;
break;
}
rplci->appl = appl;
rplci->number = Number;
add_p(rplci, CAI, "\x01\x80");
add_p(rplci, UID, "\x06\x43\x61\x70\x69\x32\x30");
sig_req(rplci, ASSIGN, DSIG_ID);
send_req(rplci);
}
else
{
Info = _OUT_OF_PLCI;
break;
}
appl->S_Handle = GET_DWORD(&(ss_parms[2].info[0]));
switch (SSreq)
{
case S_INTERROGATE_NUMBERS:
cai[0] = 1;
add_p(rplci, CAI, cai);
break;
case S_CCBS_REQUEST:
case S_CCBS_DEACTIVATE:
cai[0] = 3;
PUT_WORD(&cai[2], GET_WORD(&(ss_parms[3].info[0])));
add_p(rplci, CAI, cai);
break;
case S_CCBS_INTERROGATE:
cai[0] = 3;
PUT_WORD(&cai[2], GET_WORD(&(ss_parms[3].info[0])));
add_p(rplci, CAI, cai);
add_p(rplci, OAD, ss_parms[4].info);
break;
default:
cai[0] = 2;
cai[2] = (byte)GET_WORD(&(ss_parms[4].info[0])); /* Basic Service */
add_p(rplci, CAI, cai);
add_p(rplci, OAD, ss_parms[5].info);
break;
}
sig_req(rplci, S_SERVICE, 0);
send_req(rplci);
return false;
break;
case S_MWI_ACTIVATE:
if (api_parse(&parms->info[1], (word)parms->length, "wbwdwwwssss", ss_parms))
{
dbug(1, dprintf("format wrong"));
Info = _WRONG_MESSAGE_FORMAT;
break;
}
if (!plci)
{
if ((i = get_plci(a)))
{
rplci = &a->plci[i - 1];
rplci->appl = appl;
rplci->cr_enquiry = true;
add_p(rplci, CAI, "\x01\x80");
add_p(rplci, UID, "\x06\x43\x61\x70\x69\x32\x30");
sig_req(rplci, ASSIGN, DSIG_ID);
send_req(rplci);
}
else
{
Info = _OUT_OF_PLCI;
break;
}
}
else
{
rplci = plci;
rplci->cr_enquiry = false;
}
rplci->command = 0;
rplci->internal_command = MWI_ACTIVATE_REQ_PEND;
rplci->appl = appl;
rplci->number = Number;
cai[0] = 13;
cai[1] = ACTIVATION_MWI; /* Function */
PUT_WORD(&cai[2], GET_WORD(&(ss_parms[2].info[0]))); /* Basic Service */
PUT_DWORD(&cai[4], GET_DWORD(&(ss_parms[3].info[0]))); /* Number of Messages */
PUT_WORD(&cai[8], GET_WORD(&(ss_parms[4].info[0]))); /* Message Status */
PUT_WORD(&cai[10], GET_WORD(&(ss_parms[5].info[0]))); /* Message Reference */
PUT_WORD(&cai[12], GET_WORD(&(ss_parms[6].info[0]))); /* Invocation Mode */
add_p(rplci, CAI, cai);
add_p(rplci, CPN, ss_parms[7].info); /* Receiving User Number */
add_p(rplci, OAD, ss_parms[8].info); /* Controlling User Number */
add_p(rplci, OSA, ss_parms[9].info); /* Controlling User Provided Number */
add_p(rplci, UID, ss_parms[10].info); /* Time */
sig_req(rplci, S_SERVICE, 0);
send_req(rplci);
return false;
case S_MWI_DEACTIVATE:
if (api_parse(&parms->info[1], (word)parms->length, "wbwwss", ss_parms))
{
dbug(1, dprintf("format wrong"));
Info = _WRONG_MESSAGE_FORMAT;
break;
}
if (!plci)
{
if ((i = get_plci(a)))
{
rplci = &a->plci[i - 1];
rplci->appl = appl;
rplci->cr_enquiry = true;
add_p(rplci, CAI, "\x01\x80");
add_p(rplci, UID, "\x06\x43\x61\x70\x69\x32\x30");
sig_req(rplci, ASSIGN, DSIG_ID);
send_req(rplci);
}
else
{
Info = _OUT_OF_PLCI;
break;
}
}
else
{
rplci = plci;
rplci->cr_enquiry = false;
}
rplci->command = 0;
rplci->internal_command = MWI_DEACTIVATE_REQ_PEND;
rplci->appl = appl;
rplci->number = Number;
cai[0] = 5;
cai[1] = DEACTIVATION_MWI; /* Function */
PUT_WORD(&cai[2], GET_WORD(&(ss_parms[2].info[0]))); /* Basic Service */
PUT_WORD(&cai[4], GET_WORD(&(ss_parms[3].info[0]))); /* Invocation Mode */
add_p(rplci, CAI, cai);
add_p(rplci, CPN, ss_parms[4].info); /* Receiving User Number */
add_p(rplci, OAD, ss_parms[5].info); /* Controlling User Number */
sig_req(rplci, S_SERVICE, 0);
send_req(rplci);
return false;
default:
Info = 0x300E; /* not supported */
break;
}
break; /* case SELECTOR_SU_SERV: end */
case SELECTOR_DTMF:
return (dtmf_request(Id, Number, a, plci, appl, msg));
case SELECTOR_LINE_INTERCONNECT:
return (mixer_request(Id, Number, a, plci, appl, msg));
case PRIV_SELECTOR_ECHO_CANCELLER:
appl->appl_flags |= APPL_FLAG_PRIV_EC_SPEC;
return (ec_request(Id, Number, a, plci, appl, msg));
case SELECTOR_ECHO_CANCELLER:
appl->appl_flags &= ~APPL_FLAG_PRIV_EC_SPEC;
return (ec_request(Id, Number, a, plci, appl, msg));
case SELECTOR_V42BIS:
default:
Info = _FACILITY_NOT_SUPPORTED;
break;
} /* end of switch (selector) */
}
dbug(1, dprintf("SendFacRc"));
sendf(appl,
_FACILITY_R | CONFIRM,
Id,
Number,
"wws", Info, selector, SSparms);
return false;
}
static byte facility_res(dword Id, word Number, DIVA_CAPI_ADAPTER *a,
PLCI *plci, APPL *appl, API_PARSE *msg)
{
dbug(1, dprintf("facility_res"));
return false;
}
static byte connect_b3_req(dword Id, word Number, DIVA_CAPI_ADAPTER *a,
PLCI *plci, APPL *appl, API_PARSE *parms)
{
word Info = 0;
byte req;
byte len;
word w;
word fax_control_bits, fax_feature_bits, fax_info_change;
API_PARSE *ncpi;
byte pvc[2];
API_PARSE fax_parms[9];
word i;
dbug(1, dprintf("connect_b3_req"));
if (plci)
{
if ((plci->State == IDLE) || (plci->State == OUTG_DIS_PENDING)
|| (plci->State == INC_DIS_PENDING) || (plci->SuppState != IDLE))
{
Info = _WRONG_STATE;
}
else
{
/* local reply if assign unsuccessful
or B3 protocol allows only one layer 3 connection
and already connected
or B2 protocol not any LAPD
and connect_b3_req contradicts originate/answer direction */
if (!plci->NL.Id
|| (((plci->B3_prot != B3_T90NL) && (plci->B3_prot != B3_ISO8208) && (plci->B3_prot != B3_X25_DCE))
&& ((plci->channels != 0)
|| (((plci->B2_prot != B2_SDLC) && (plci->B2_prot != B2_LAPD) && (plci->B2_prot != B2_LAPD_FREE_SAPI_SEL))
&& ((plci->call_dir & CALL_DIR_ANSWER) && !(plci->call_dir & CALL_DIR_FORCE_OUTG_NL))))))
{
dbug(1, dprintf("B3 already connected=%d or no NL.Id=0x%x, dir=%d sstate=0x%x",
plci->channels, plci->NL.Id, plci->call_dir, plci->SuppState));
Info = _WRONG_STATE;
sendf(appl,
_CONNECT_B3_R | CONFIRM,
Id,
Number,
"w", Info);
return false;
}
plci->requested_options_conn = 0;
req = N_CONNECT;
ncpi = &parms[0];
if (plci->B3_prot == 2 || plci->B3_prot == 3)
{
if (ncpi->length > 2)
{
/* check for PVC */
if (ncpi->info[2] || ncpi->info[3])
{
pvc[0] = ncpi->info[3];
pvc[1] = ncpi->info[2];
add_d(plci, 2, pvc);
req = N_RESET;
}
else
{
if (ncpi->info[1] & 1) req = N_CONNECT | N_D_BIT;
add_d(plci, (word)(ncpi->length - 3), &ncpi->info[4]);
}
}
}
else if (plci->B3_prot == 5)
{
if (plci->NL.Id && !plci->nl_remove_id)
{
fax_control_bits = GET_WORD(&((T30_INFO *)plci->fax_connect_info_buffer)->control_bits_low);
fax_feature_bits = GET_WORD(&((T30_INFO *)plci->fax_connect_info_buffer)->feature_bits_low);
if (!(fax_control_bits & T30_CONTROL_BIT_MORE_DOCUMENTS)
|| (fax_feature_bits & T30_FEATURE_BIT_MORE_DOCUMENTS))
{
len = offsetof(T30_INFO, universal_6);
fax_info_change = false;
if (ncpi->length >= 4)
{
w = GET_WORD(&ncpi->info[3]);
if ((w & 0x0001) != ((word)(((T30_INFO *)(plci->fax_connect_info_buffer))->resolution & 0x0001)))
{
((T30_INFO *)(plci->fax_connect_info_buffer))->resolution =
(byte)((((T30_INFO *)(plci->fax_connect_info_buffer))->resolution & ~T30_RESOLUTION_R8_0770_OR_200) |
((w & 0x0001) ? T30_RESOLUTION_R8_0770_OR_200 : 0));
fax_info_change = true;
}
fax_control_bits &= ~(T30_CONTROL_BIT_REQUEST_POLLING | T30_CONTROL_BIT_MORE_DOCUMENTS);
if (w & 0x0002) /* Fax-polling request */
fax_control_bits |= T30_CONTROL_BIT_REQUEST_POLLING;
if ((w & 0x0004) /* Request to send / poll another document */
&& (a->manufacturer_features & MANUFACTURER_FEATURE_FAX_MORE_DOCUMENTS))
{
fax_control_bits |= T30_CONTROL_BIT_MORE_DOCUMENTS;
}
if (ncpi->length >= 6)
{
w = GET_WORD(&ncpi->info[5]);
if (((byte) w) != ((T30_INFO *)(plci->fax_connect_info_buffer))->data_format)
{
((T30_INFO *)(plci->fax_connect_info_buffer))->data_format = (byte) w;
fax_info_change = true;
}
if ((a->man_profile.private_options & (1L << PRIVATE_FAX_SUB_SEP_PWD))
&& (GET_WORD(&ncpi->info[5]) & 0x8000)) /* Private SEP/SUB/PWD enable */
{
plci->requested_options_conn |= (1L << PRIVATE_FAX_SUB_SEP_PWD);
}
if ((a->man_profile.private_options & (1L << PRIVATE_FAX_NONSTANDARD))
&& (GET_WORD(&ncpi->info[5]) & 0x4000)) /* Private non-standard facilities enable */
{
plci->requested_options_conn |= (1L << PRIVATE_FAX_NONSTANDARD);
}
fax_control_bits &= ~(T30_CONTROL_BIT_ACCEPT_SUBADDRESS | T30_CONTROL_BIT_ACCEPT_SEL_POLLING |
T30_CONTROL_BIT_ACCEPT_PASSWORD);
if ((plci->requested_options_conn | plci->requested_options | a->requested_options_table[appl->Id - 1])
& ((1L << PRIVATE_FAX_SUB_SEP_PWD) | (1L << PRIVATE_FAX_NONSTANDARD)))
{
if (api_parse(&ncpi->info[1], ncpi->length, "wwwwsss", fax_parms))
Info = _WRONG_MESSAGE_FORMAT;
else
{
if ((plci->requested_options_conn | plci->requested_options | a->requested_options_table[appl->Id - 1])
& (1L << PRIVATE_FAX_SUB_SEP_PWD))
{
fax_control_bits |= T30_CONTROL_BIT_ACCEPT_SUBADDRESS | T30_CONTROL_BIT_ACCEPT_PASSWORD;
if (fax_control_bits & T30_CONTROL_BIT_ACCEPT_POLLING)
fax_control_bits |= T30_CONTROL_BIT_ACCEPT_SEL_POLLING;
}
w = fax_parms[4].length;
if (w > 20)
w = 20;
((T30_INFO *)(plci->fax_connect_info_buffer))->station_id_len = (byte) w;
for (i = 0; i < w; i++)
((T30_INFO *)(plci->fax_connect_info_buffer))->station_id[i] = fax_parms[4].info[1 + i];
((T30_INFO *)(plci->fax_connect_info_buffer))->head_line_len = 0;
len = offsetof(T30_INFO, station_id) + T30_MAX_STATION_ID_LENGTH;
w = fax_parms[5].length;
if (w > 20)
w = 20;
plci->fax_connect_info_buffer[len++] = (byte) w;
for (i = 0; i < w; i++)
plci->fax_connect_info_buffer[len++] = fax_parms[5].info[1 + i];
w = fax_parms[6].length;
if (w > 20)
w = 20;
plci->fax_connect_info_buffer[len++] = (byte) w;
for (i = 0; i < w; i++)
plci->fax_connect_info_buffer[len++] = fax_parms[6].info[1 + i];
if ((plci->requested_options_conn | plci->requested_options | a->requested_options_table[appl->Id - 1])
& (1L << PRIVATE_FAX_NONSTANDARD))
{
if (api_parse(&ncpi->info[1], ncpi->length, "wwwwssss", fax_parms))
{
dbug(1, dprintf("non-standard facilities info missing or wrong format"));
plci->fax_connect_info_buffer[len++] = 0;
}
else
{
if ((fax_parms[7].length >= 3) && (fax_parms[7].info[1] >= 2))
plci->nsf_control_bits = GET_WORD(&fax_parms[7].info[2]);
plci->fax_connect_info_buffer[len++] = (byte)(fax_parms[7].length);
for (i = 0; i < fax_parms[7].length; i++)
plci->fax_connect_info_buffer[len++] = fax_parms[7].info[1 + i];
}
}
}
}
else
{
len = offsetof(T30_INFO, universal_6);
}
fax_info_change = true;
}
if (fax_control_bits != GET_WORD(&((T30_INFO *)plci->fax_connect_info_buffer)->control_bits_low))
{
PUT_WORD(&((T30_INFO *)plci->fax_connect_info_buffer)->control_bits_low, fax_control_bits);
fax_info_change = true;
}
}
if (Info == GOOD)
{
plci->fax_connect_info_length = len;
if (fax_info_change)
{
if (fax_feature_bits & T30_FEATURE_BIT_MORE_DOCUMENTS)
{
start_internal_command(Id, plci, fax_connect_info_command);
return false;
}
else
{
start_internal_command(Id, plci, fax_adjust_b23_command);
return false;
}
}
}
}
else Info = _WRONG_STATE;
}
else Info = _WRONG_STATE;
}
else if (plci->B3_prot == B3_RTP)
{
plci->internal_req_buffer[0] = ncpi->length + 1;
plci->internal_req_buffer[1] = UDATA_REQUEST_RTP_RECONFIGURE;
for (w = 0; w < ncpi->length; w++)
plci->internal_req_buffer[2 + w] = ncpi->info[1 + w];
start_internal_command(Id, plci, rtp_connect_b3_req_command);
return false;
}
if (!Info)
{
nl_req_ncci(plci, req, 0);
return 1;
}
}
}
else Info = _WRONG_IDENTIFIER;
sendf(appl,
_CONNECT_B3_R | CONFIRM,
Id,
Number,
"w", Info);
return false;
}
static byte connect_b3_res(dword Id, word Number, DIVA_CAPI_ADAPTER *a,
PLCI *plci, APPL *appl, API_PARSE *parms)
{
word ncci;
API_PARSE *ncpi;
byte req;
word w;
API_PARSE fax_parms[9];
word i;
byte len;
dbug(1, dprintf("connect_b3_res"));
ncci = (word)(Id >> 16);
if (plci && ncci) {
if (a->ncci_state[ncci] == INC_CON_PENDING) {
if (GET_WORD(&parms[0].info[0]) != 0)
{
a->ncci_state[ncci] = OUTG_REJ_PENDING;
channel_request_xon(plci, a->ncci_ch[ncci]);
channel_xmit_xon(plci);
cleanup_ncci_data(plci, ncci);
nl_req_ncci(plci, N_DISC, (byte)ncci);
return 1;
}
a->ncci_state[ncci] = INC_ACT_PENDING;
req = N_CONNECT_ACK;
ncpi = &parms[1];
if ((plci->B3_prot == 4) || (plci->B3_prot == 5) || (plci->B3_prot == 7))
{
if ((plci->requested_options_conn | plci->requested_options | a->requested_options_table[plci->appl->Id - 1])
& (1L << PRIVATE_FAX_NONSTANDARD))
{
if (((plci->B3_prot == 4) || (plci->B3_prot == 5))
&& (plci->nsf_control_bits & T30_NSF_CONTROL_BIT_ENABLE_NSF)
&& (plci->nsf_control_bits & T30_NSF_CONTROL_BIT_NEGOTIATE_RESP))
{
len = offsetof(T30_INFO, station_id) + T30_MAX_STATION_ID_LENGTH;
if (plci->fax_connect_info_length < len)
{
((T30_INFO *)(plci->fax_connect_info_buffer))->station_id_len = 0;
((T30_INFO *)(plci->fax_connect_info_buffer))->head_line_len = 0;
}
if (api_parse(&ncpi->info[1], ncpi->length, "wwwwssss", fax_parms))
{
dbug(1, dprintf("non-standard facilities info missing or wrong format"));
}
else
{
if (plci->fax_connect_info_length <= len)
plci->fax_connect_info_buffer[len] = 0;
len += 1 + plci->fax_connect_info_buffer[len];
if (plci->fax_connect_info_length <= len)
plci->fax_connect_info_buffer[len] = 0;
len += 1 + plci->fax_connect_info_buffer[len];
if ((fax_parms[7].length >= 3) && (fax_parms[7].info[1] >= 2))
plci->nsf_control_bits = GET_WORD(&fax_parms[7].info[2]);
plci->fax_connect_info_buffer[len++] = (byte)(fax_parms[7].length);
for (i = 0; i < fax_parms[7].length; i++)
plci->fax_connect_info_buffer[len++] = fax_parms[7].info[1 + i];
}
plci->fax_connect_info_length = len;
((T30_INFO *)(plci->fax_connect_info_buffer))->code = 0;
start_internal_command(Id, plci, fax_connect_ack_command);
return false;
}
}
nl_req_ncci(plci, req, (byte)ncci);
if ((plci->ncpi_state & NCPI_VALID_CONNECT_B3_ACT)
&& !(plci->ncpi_state & NCPI_CONNECT_B3_ACT_SENT))
{
if (plci->B3_prot == 4)
sendf(appl, _CONNECT_B3_ACTIVE_I, Id, 0, "s", "");
else
sendf(appl, _CONNECT_B3_ACTIVE_I, Id, 0, "S", plci->ncpi_buffer);
plci->ncpi_state |= NCPI_CONNECT_B3_ACT_SENT;
}
}
else if (plci->B3_prot == B3_RTP)
{
plci->internal_req_buffer[0] = ncpi->length + 1;
plci->internal_req_buffer[1] = UDATA_REQUEST_RTP_RECONFIGURE;
for (w = 0; w < ncpi->length; w++)
plci->internal_req_buffer[2 + w] = ncpi->info[1+w];
start_internal_command(Id, plci, rtp_connect_b3_res_command);
return false;
}
else
{
if (ncpi->length > 2) {
if (ncpi->info[1] & 1) req = N_CONNECT_ACK | N_D_BIT;
add_d(plci, (word)(ncpi->length - 3), &ncpi->info[4]);
}
nl_req_ncci(plci, req, (byte)ncci);
sendf(appl, _CONNECT_B3_ACTIVE_I, Id, 0, "s", "");
if (plci->adjust_b_restore)
{
plci->adjust_b_restore = false;
start_internal_command(Id, plci, adjust_b_restore);
}
}
return 1;
}
}
return false;
}
static byte connect_b3_a_res(dword Id, word Number, DIVA_CAPI_ADAPTER *a,
PLCI *plci, APPL *appl, API_PARSE *parms)
{
word ncci;
ncci = (word)(Id >> 16);
dbug(1, dprintf("connect_b3_a_res(ncci=0x%x)", ncci));
if (plci && ncci && (plci->State != IDLE) && (plci->State != INC_DIS_PENDING)
&& (plci->State != OUTG_DIS_PENDING))
{
if (a->ncci_state[ncci] == INC_ACT_PENDING) {
a->ncci_state[ncci] = CONNECTED;
if (plci->State != INC_CON_CONNECTED_ALERT) plci->State = CONNECTED;
channel_request_xon(plci, a->ncci_ch[ncci]);
channel_xmit_xon(plci);
}
}
return false;
}
static byte disconnect_b3_req(dword Id, word Number, DIVA_CAPI_ADAPTER *a,
PLCI *plci, APPL *appl, API_PARSE *parms)
{
word Info;
word ncci;
API_PARSE *ncpi;
dbug(1, dprintf("disconnect_b3_req"));
Info = _WRONG_IDENTIFIER;
ncci = (word)(Id >> 16);
if (plci && ncci)
{
Info = _WRONG_STATE;
if ((a->ncci_state[ncci] == CONNECTED)
|| (a->ncci_state[ncci] == OUTG_CON_PENDING)
|| (a->ncci_state[ncci] == INC_CON_PENDING)
|| (a->ncci_state[ncci] == INC_ACT_PENDING))
{
a->ncci_state[ncci] = OUTG_DIS_PENDING;
channel_request_xon(plci, a->ncci_ch[ncci]);
channel_xmit_xon(plci);
if (a->ncci[ncci].data_pending
&& ((plci->B3_prot == B3_TRANSPARENT)
|| (plci->B3_prot == B3_T30)
|| (plci->B3_prot == B3_T30_WITH_EXTENSIONS)))
{
plci->send_disc = (byte)ncci;
plci->command = 0;
return false;
}
else
{
cleanup_ncci_data(plci, ncci);
if (plci->B3_prot == 2 || plci->B3_prot == 3)
{
ncpi = &parms[0];
if (ncpi->length > 3)
{
add_d(plci, (word)(ncpi->length - 3), (byte *)&(ncpi->info[4]));
}
}
nl_req_ncci(plci, N_DISC, (byte)ncci);
}
return 1;
}
}
sendf(appl,
_DISCONNECT_B3_R | CONFIRM,
Id,
Number,
"w", Info);
return false;
}
static byte disconnect_b3_res(dword Id, word Number, DIVA_CAPI_ADAPTER *a,
PLCI *plci, APPL *appl, API_PARSE *parms)
{
word ncci;
word i;
ncci = (word)(Id >> 16);
dbug(1, dprintf("disconnect_b3_res(ncci=0x%x", ncci));
if (plci && ncci) {
plci->requested_options_conn = 0;
plci->fax_connect_info_length = 0;
plci->ncpi_state = 0x00;
if (((plci->B3_prot != B3_T90NL) && (plci->B3_prot != B3_ISO8208) && (plci->B3_prot != B3_X25_DCE))
&& ((plci->B2_prot != B2_LAPD) && (plci->B2_prot != B2_LAPD_FREE_SAPI_SEL)))
{
plci->call_dir |= CALL_DIR_FORCE_OUTG_NL;
}
for (i = 0; i < MAX_CHANNELS_PER_PLCI && plci->inc_dis_ncci_table[i] != (byte)ncci; i++);
if (i < MAX_CHANNELS_PER_PLCI) {
if (plci->channels)plci->channels--;
for (; i < MAX_CHANNELS_PER_PLCI - 1; i++) plci->inc_dis_ncci_table[i] = plci->inc_dis_ncci_table[i + 1];
plci->inc_dis_ncci_table[MAX_CHANNELS_PER_PLCI - 1] = 0;
ncci_free_receive_buffers(plci, ncci);
if ((plci->State == IDLE || plci->State == SUSPENDING) && !plci->channels) {
if (plci->State == SUSPENDING) {
sendf(plci->appl,
_FACILITY_I,
Id & 0xffffL,
0,
"ws", (word)3, "\x03\x04\x00\x00");
sendf(plci->appl, _DISCONNECT_I, Id & 0xffffL, 0, "w", 0);
}
plci_remove(plci);
plci->State = IDLE;
}
}
else
{
if ((a->manufacturer_features & MANUFACTURER_FEATURE_FAX_PAPER_FORMATS)
&& ((plci->B3_prot == 4) || (plci->B3_prot == 5))
&& (a->ncci_state[ncci] == INC_DIS_PENDING))
{
ncci_free_receive_buffers(plci, ncci);
nl_req_ncci(plci, N_EDATA, (byte)ncci);
plci->adapter->ncci_state[ncci] = IDLE;
start_internal_command(Id, plci, fax_disconnect_command);
return 1;
}
}
}
return false;
}
static byte data_b3_req(dword Id, word Number, DIVA_CAPI_ADAPTER *a,
PLCI *plci, APPL *appl, API_PARSE *parms)
{
NCCI *ncci_ptr;
DATA_B3_DESC *data;
word Info;
word ncci;
word i;
dbug(1, dprintf("data_b3_req"));
Info = _WRONG_IDENTIFIER;
ncci = (word)(Id >> 16);
dbug(1, dprintf("ncci=0x%x, plci=0x%x", ncci, plci));
if (plci && ncci)
{
Info = _WRONG_STATE;
if ((a->ncci_state[ncci] == CONNECTED)
|| (a->ncci_state[ncci] == INC_ACT_PENDING))
{
/* queue data */
ncci_ptr = &(a->ncci[ncci]);
i = ncci_ptr->data_out + ncci_ptr->data_pending;
if (i >= MAX_DATA_B3)
i -= MAX_DATA_B3;
data = &(ncci_ptr->DBuffer[i]);
data->Number = Number;
if ((((byte *)(parms[0].info)) >= ((byte *)(plci->msg_in_queue)))
&& (((byte *)(parms[0].info)) < ((byte *)(plci->msg_in_queue)) + sizeof(plci->msg_in_queue)))
{
data->P = (byte *)(long)(*((dword *)(parms[0].info)));
}
else
data->P = TransmitBufferSet(appl, *(dword *)parms[0].info);
data->Length = GET_WORD(parms[1].info);
data->Handle = GET_WORD(parms[2].info);
data->Flags = GET_WORD(parms[3].info);
(ncci_ptr->data_pending)++;
/* check for delivery confirmation */
if (data->Flags & 0x0004)
{
i = ncci_ptr->data_ack_out + ncci_ptr->data_ack_pending;
if (i >= MAX_DATA_ACK)
i -= MAX_DATA_ACK;
ncci_ptr->DataAck[i].Number = data->Number;
ncci_ptr->DataAck[i].Handle = data->Handle;
(ncci_ptr->data_ack_pending)++;
}
send_data(plci);
return false;
}
}
if (appl)
{
if (plci)
{
if ((((byte *)(parms[0].info)) >= ((byte *)(plci->msg_in_queue)))
&& (((byte *)(parms[0].info)) < ((byte *)(plci->msg_in_queue)) + sizeof(plci->msg_in_queue)))
{
TransmitBufferFree(appl, (byte *)(long)(*((dword *)(parms[0].info))));
}
}
sendf(appl,
_DATA_B3_R | CONFIRM,
Id,
Number,
"ww", GET_WORD(parms[2].info), Info);
}
return false;
}
static byte data_b3_res(dword Id, word Number, DIVA_CAPI_ADAPTER *a,
PLCI *plci, APPL *appl, API_PARSE *parms)
{
word n;
word ncci;
word NCCIcode;
dbug(1, dprintf("data_b3_res"));
ncci = (word)(Id >> 16);
if (plci && ncci) {
n = GET_WORD(parms[0].info);
dbug(1, dprintf("free(%d)", n));
NCCIcode = ncci | (((word) a->Id) << 8);
if (n < appl->MaxBuffer &&
appl->DataNCCI[n] == NCCIcode &&
(byte)(appl->DataFlags[n] >> 8) == plci->Id) {
dbug(1, dprintf("found"));
appl->DataNCCI[n] = 0;
if (channel_can_xon(plci, a->ncci_ch[ncci])) {
channel_request_xon(plci, a->ncci_ch[ncci]);
}
channel_xmit_xon(plci);
if (appl->DataFlags[n] & 4) {
nl_req_ncci(plci, N_DATA_ACK, (byte)ncci);
return 1;
}
}
}
return false;
}
static byte reset_b3_req(dword Id, word Number, DIVA_CAPI_ADAPTER *a,
PLCI *plci, APPL *appl, API_PARSE *parms)
{
word Info;
word ncci;
dbug(1, dprintf("reset_b3_req"));
Info = _WRONG_IDENTIFIER;
ncci = (word)(Id >> 16);
if (plci && ncci)
{
Info = _WRONG_STATE;
switch (plci->B3_prot)
{
case B3_ISO8208:
case B3_X25_DCE:
if (a->ncci_state[ncci] == CONNECTED)
{
nl_req_ncci(plci, N_RESET, (byte)ncci);
send_req(plci);
Info = GOOD;
}
break;
case B3_TRANSPARENT:
if (a->ncci_state[ncci] == CONNECTED)
{
start_internal_command(Id, plci, reset_b3_command);
Info = GOOD;
}
break;
}
}
/* reset_b3 must result in a reset_b3_con & reset_b3_Ind */
sendf(appl,
_RESET_B3_R | CONFIRM,
Id,
Number,
"w", Info);
return false;
}
static byte reset_b3_res(dword Id, word Number, DIVA_CAPI_ADAPTER *a,
PLCI *plci, APPL *appl, API_PARSE *parms)
{
word ncci;
dbug(1, dprintf("reset_b3_res"));
ncci = (word)(Id >> 16);
if (plci && ncci) {
switch (plci->B3_prot)
{
case B3_ISO8208:
case B3_X25_DCE:
if (a->ncci_state[ncci] == INC_RES_PENDING)
{
a->ncci_state[ncci] = CONNECTED;
nl_req_ncci(plci, N_RESET_ACK, (byte)ncci);
return true;
}
break;
}
}
return false;
}
static byte connect_b3_t90_a_res(dword Id, word Number, DIVA_CAPI_ADAPTER *a,
PLCI *plci, APPL *appl, API_PARSE *parms)
{
word ncci;
API_PARSE *ncpi;
byte req;
dbug(1, dprintf("connect_b3_t90_a_res"));
ncci = (word)(Id >> 16);
if (plci && ncci) {
if (a->ncci_state[ncci] == INC_ACT_PENDING) {
a->ncci_state[ncci] = CONNECTED;
}
else if (a->ncci_state[ncci] == INC_CON_PENDING) {
a->ncci_state[ncci] = CONNECTED;
req = N_CONNECT_ACK;
/* parms[0]==0 for CAPI original message definition! */
if (parms[0].info) {
ncpi = &parms[1];
if (ncpi->length > 2) {
if (ncpi->info[1] & 1) req = N_CONNECT_ACK | N_D_BIT;
add_d(plci, (word)(ncpi->length - 3), &ncpi->info[4]);
}
}
nl_req_ncci(plci, req, (byte)ncci);
return 1;
}
}
return false;
}
static byte select_b_req(dword Id, word Number, DIVA_CAPI_ADAPTER *a,
PLCI *plci, APPL *appl, API_PARSE *msg)
{
word Info = 0;
word i;
byte tel;
API_PARSE bp_parms[7];
if (!plci || !msg)
{
Info = _WRONG_IDENTIFIER;
}
else
{
dbug(1, dprintf("select_b_req[%d],PLCI=0x%x,Tel=0x%x,NL=0x%x,appl=0x%x,sstate=0x%x",
msg->length, plci->Id, plci->tel, plci->NL.Id, plci->appl, plci->SuppState));
dbug(1, dprintf("PlciState=0x%x", plci->State));
for (i = 0; i < 7; i++) bp_parms[i].length = 0;
/* check if no channel is open, no B3 connected only */
if ((plci->State == IDLE) || (plci->State == OUTG_DIS_PENDING) || (plci->State == INC_DIS_PENDING)
|| (plci->SuppState != IDLE) || plci->channels || plci->nl_remove_id)
{
Info = _WRONG_STATE;
}
/* check message format and fill bp_parms pointer */
else if (msg->length && api_parse(&msg->info[1], (word)msg->length, "wwwsss", bp_parms))
{
Info = _WRONG_MESSAGE_FORMAT;
}
else
{
if ((plci->State == INC_CON_PENDING) || (plci->State == INC_CON_ALERT)) /* send alert tone inband to the network, */
{ /* e.g. Qsig or RBS or Cornet-N or xess PRI */
if (Id & EXT_CONTROLLER)
{
sendf(appl, _SELECT_B_REQ | CONFIRM, Id, Number, "w", 0x2002); /* wrong controller */
return 0;
}
plci->State = INC_CON_CONNECTED_ALERT;
plci->appl = appl;
clear_c_ind_mask_bit(plci, (word)(appl->Id - 1));
dump_c_ind_mask(plci);
for (i = 0; i < max_appl; i++) /* disconnect the other appls */
{ /* its quasi a connect */
if (test_c_ind_mask_bit(plci, i))
sendf(&application[i], _DISCONNECT_I, Id, 0, "w", _OTHER_APPL_CONNECTED);
}
}
api_save_msg(msg, "s", &plci->saved_msg);
tel = plci->tel;
if (Id & EXT_CONTROLLER)
{
if (tel) /* external controller in use by this PLCI */
{
if (a->AdvSignalAppl && a->AdvSignalAppl != appl)
{
dbug(1, dprintf("Ext_Ctrl in use 1"));
Info = _WRONG_STATE;
}
}
else /* external controller NOT in use by this PLCI ? */
{
if (a->AdvSignalPLCI)
{
dbug(1, dprintf("Ext_Ctrl in use 2"));
Info = _WRONG_STATE;
}
else /* activate the codec */
{
dbug(1, dprintf("Ext_Ctrl start"));
if (AdvCodecSupport(a, plci, appl, 0))
{
dbug(1, dprintf("Error in codec procedures"));
Info = _WRONG_STATE;
}
else if (plci->spoofed_msg == SPOOFING_REQUIRED) /* wait until codec is active */
{
plci->spoofed_msg = AWAITING_SELECT_B;
plci->internal_command = BLOCK_PLCI; /* lock other commands */
plci->command = 0;
dbug(1, dprintf("continue if codec loaded"));
return false;
}
}
}
}
else /* external controller bit is OFF */
{
if (tel) /* external controller in use, need to switch off */
{
if (a->AdvSignalAppl == appl)
{
CodecIdCheck(a, plci);
plci->tel = 0;
plci->adv_nl = 0;
dbug(1, dprintf("Ext_Ctrl disable"));
}
else
{
dbug(1, dprintf("Ext_Ctrl not requested"));
}
}
}
if (!Info)
{
if (plci->call_dir & CALL_DIR_OUT)
plci->call_dir = CALL_DIR_OUT | CALL_DIR_ORIGINATE;
else if (plci->call_dir & CALL_DIR_IN)
plci->call_dir = CALL_DIR_IN | CALL_DIR_ANSWER;
start_internal_command(Id, plci, select_b_command);
return false;
}
}
}
sendf(appl, _SELECT_B_REQ | CONFIRM, Id, Number, "w", Info);
return false;
}
static byte manufacturer_req(dword Id, word Number, DIVA_CAPI_ADAPTER *a,
PLCI *plci, APPL *appl, API_PARSE *parms)
{
word command;
word i;
word ncci;
API_PARSE *m;
API_PARSE m_parms[5];
word codec;
byte req;
byte ch;
byte dir;
static byte chi[2] = {0x01, 0x00};
static byte lli[2] = {0x01, 0x00};
static byte codec_cai[2] = {0x01, 0x01};
static byte null_msg = {0};
static API_PARSE null_parms = { 0, &null_msg };
PLCI *v_plci;
word Info = 0;
dbug(1, dprintf("manufacturer_req"));
for (i = 0; i < 5; i++) m_parms[i].length = 0;
if (GET_DWORD(parms[0].info) != _DI_MANU_ID) {
Info = _WRONG_MESSAGE_FORMAT;
}
command = GET_WORD(parms[1].info);
m = &parms[2];
if (!Info)
{
switch (command) {
case _DI_ASSIGN_PLCI:
if (api_parse(&m->info[1], (word)m->length, "wbbs", m_parms)) {
Info = _WRONG_MESSAGE_FORMAT;
break;
}
codec = GET_WORD(m_parms[0].info);
ch = m_parms[1].info[0];
dir = m_parms[2].info[0];
if ((i = get_plci(a))) {
plci = &a->plci[i - 1];
plci->appl = appl;
plci->command = _MANUFACTURER_R;
plci->m_command = command;
plci->number = Number;
plci->State = LOCAL_CONNECT;
Id = (((word)plci->Id << 8) | plci->adapter->Id | 0x80);
dbug(1, dprintf("ManCMD,plci=0x%x", Id));
if ((ch == 1 || ch == 2) && (dir <= 2)) {
chi[1] = (byte)(0x80 | ch);
lli[1] = 0;
plci->call_dir = CALL_DIR_OUT | CALL_DIR_ORIGINATE;
switch (codec)
{
case 0:
Info = add_b1(plci, &m_parms[3], 0, 0);
break;
case 1:
add_p(plci, CAI, codec_cai);
break;
/* manual 'swich on' to the codec support without signalling */
/* first 'assign plci' with this function, then use */
case 2:
if (AdvCodecSupport(a, plci, appl, 0)) {
Info = _RESOURCE_ERROR;
}
else {
Info = add_b1(plci, &null_parms, 0, B1_FACILITY_LOCAL);
lli[1] = 0x10; /* local call codec stream */
}
break;
}
plci->State = LOCAL_CONNECT;
plci->manufacturer = true;
plci->command = _MANUFACTURER_R;
plci->m_command = command;
plci->number = Number;
if (!Info)
{
add_p(plci, LLI, lli);
add_p(plci, CHI, chi);
add_p(plci, UID, "\x06\x43\x61\x70\x69\x32\x30");
sig_req(plci, ASSIGN, DSIG_ID);
if (!codec)
{
Info = add_b23(plci, &m_parms[3]);
if (!Info)
{
nl_req_ncci(plci, ASSIGN, 0);
send_req(plci);
}
}
if (!Info)
{
dbug(1, dprintf("dir=0x%x,spoof=0x%x", dir, plci->spoofed_msg));
if (plci->spoofed_msg == SPOOFING_REQUIRED)
{
api_save_msg(m_parms, "wbbs", &plci->saved_msg);
plci->spoofed_msg = AWAITING_MANUF_CON;
plci->internal_command = BLOCK_PLCI; /* reject other req meanwhile */
plci->command = 0;
send_req(plci);
return false;
}
if (dir == 1) {
sig_req(plci, CALL_REQ, 0);
}
else if (!dir) {
sig_req(plci, LISTEN_REQ, 0);
}
send_req(plci);
}
else
{
sendf(appl,
_MANUFACTURER_R | CONFIRM,
Id,
Number,
"dww", _DI_MANU_ID, command, Info);
return 2;
}
}
}
}
else Info = _OUT_OF_PLCI;
break;
case _DI_IDI_CTRL:
if (!plci)
{
Info = _WRONG_IDENTIFIER;
break;
}
if (api_parse(&m->info[1], (word)m->length, "bs", m_parms)) {
Info = _WRONG_MESSAGE_FORMAT;
break;
}
req = m_parms[0].info[0];
plci->command = _MANUFACTURER_R;
plci->m_command = command;
plci->number = Number;
if (req == CALL_REQ)
{
plci->b_channel = getChannel(&m_parms[1]);
mixer_set_bchannel_id_esc(plci, plci->b_channel);
if (plci->spoofed_msg == SPOOFING_REQUIRED)
{
plci->spoofed_msg = CALL_REQ | AWAITING_MANUF_CON;
plci->internal_command = BLOCK_PLCI; /* reject other req meanwhile */
plci->command = 0;
break;
}
}
else if (req == LAW_REQ)
{
plci->cr_enquiry = true;
}
add_ss(plci, FTY, &m_parms[1]);
sig_req(plci, req, 0);
send_req(plci);
if (req == HANGUP)
{
if (plci->NL.Id && !plci->nl_remove_id)
{
if (plci->channels)
{
for (ncci = 1; ncci < MAX_NCCI + 1; ncci++)
{
if ((a->ncci_plci[ncci] == plci->Id) && (a->ncci_state[ncci] == CONNECTED))
{
a->ncci_state[ncci] = OUTG_DIS_PENDING;
cleanup_ncci_data(plci, ncci);
nl_req_ncci(plci, N_DISC, (byte)ncci);
}
}
}
mixer_remove(plci);
nl_req_ncci(plci, REMOVE, 0);
send_req(plci);
}
}
break;
case _DI_SIG_CTRL:
/* signalling control for loop activation B-channel */
if (!plci)
{
Info = _WRONG_IDENTIFIER;
break;
}
if (m->length) {
plci->command = _MANUFACTURER_R;
plci->number = Number;
add_ss(plci, FTY, m);
sig_req(plci, SIG_CTRL, 0);
send_req(plci);
}
else Info = _WRONG_MESSAGE_FORMAT;
break;
case _DI_RXT_CTRL:
/* activation control for receiver/transmitter B-channel */
if (!plci)
{
Info = _WRONG_IDENTIFIER;
break;
}
if (m->length) {
plci->command = _MANUFACTURER_R;
plci->number = Number;
add_ss(plci, FTY, m);
sig_req(plci, DSP_CTRL, 0);
send_req(plci);
}
else Info = _WRONG_MESSAGE_FORMAT;
break;
case _DI_ADV_CODEC:
case _DI_DSP_CTRL:
/* TEL_CTRL commands to support non standard adjustments: */
/* Ring on/off, Handset micro volume, external micro vol. */
/* handset+external speaker volume, receiver+transm. gain,*/
/* handsfree on (hookinfo off), set mixer command */
if (command == _DI_ADV_CODEC)
{
if (!a->AdvCodecPLCI) {
Info = _WRONG_STATE;
break;
}
v_plci = a->AdvCodecPLCI;
}
else
{
if (plci
&& (m->length >= 3)
&& (m->info[1] == 0x1c)
&& (m->info[2] >= 1))
{
if (m->info[3] == DSP_CTRL_OLD_SET_MIXER_COEFFICIENTS)
{
if ((plci->tel != ADV_VOICE) || (plci != a->AdvSignalPLCI))
{
Info = _WRONG_STATE;
break;
}
a->adv_voice_coef_length = m->info[2] - 1;
if (a->adv_voice_coef_length > m->length - 3)
a->adv_voice_coef_length = (byte)(m->length - 3);
if (a->adv_voice_coef_length > ADV_VOICE_COEF_BUFFER_SIZE)
a->adv_voice_coef_length = ADV_VOICE_COEF_BUFFER_SIZE;
for (i = 0; i < a->adv_voice_coef_length; i++)
a->adv_voice_coef_buffer[i] = m->info[4 + i];
if (plci->B1_facilities & B1_FACILITY_VOICE)
adv_voice_write_coefs(plci, ADV_VOICE_WRITE_UPDATE);
break;
}
else if (m->info[3] == DSP_CTRL_SET_DTMF_PARAMETERS)
{
if (!(a->manufacturer_features & MANUFACTURER_FEATURE_DTMF_PARAMETERS))
{
Info = _FACILITY_NOT_SUPPORTED;
break;
}
plci->dtmf_parameter_length = m->info[2] - 1;
if (plci->dtmf_parameter_length > m->length - 3)
plci->dtmf_parameter_length = (byte)(m->length - 3);
if (plci->dtmf_parameter_length > DTMF_PARAMETER_BUFFER_SIZE)
plci->dtmf_parameter_length = DTMF_PARAMETER_BUFFER_SIZE;
for (i = 0; i < plci->dtmf_parameter_length; i++)
plci->dtmf_parameter_buffer[i] = m->info[4 + i];
if (plci->B1_facilities & B1_FACILITY_DTMFR)
dtmf_parameter_write(plci);
break;
}
}
v_plci = plci;
}
if (!v_plci)
{
Info = _WRONG_IDENTIFIER;
break;
}
if (m->length) {
add_ss(v_plci, FTY, m);
sig_req(v_plci, TEL_CTRL, 0);
send_req(v_plci);
}
else Info = _WRONG_MESSAGE_FORMAT;
break;
case _DI_OPTIONS_REQUEST:
if (api_parse(&m->info[1], (word)m->length, "d", m_parms)) {
Info = _WRONG_MESSAGE_FORMAT;
break;
}
if (GET_DWORD(m_parms[0].info) & ~a->man_profile.private_options)
{
Info = _FACILITY_NOT_SUPPORTED;
break;
}
a->requested_options_table[appl->Id - 1] = GET_DWORD(m_parms[0].info);
break;
default:
Info = _WRONG_MESSAGE_FORMAT;
break;
}
}
sendf(appl,
_MANUFACTURER_R | CONFIRM,
Id,
Number,
"dww", _DI_MANU_ID, command, Info);
return false;
}
static byte manufacturer_res(dword Id, word Number, DIVA_CAPI_ADAPTER *a,
PLCI *plci, APPL *appl, API_PARSE *msg)
{
word indication;
API_PARSE m_parms[3];
API_PARSE *ncpi;
API_PARSE fax_parms[9];
word i;
byte len;
dbug(1, dprintf("manufacturer_res"));
if ((msg[0].length == 0)
|| (msg[1].length == 0)
|| (GET_DWORD(msg[0].info) != _DI_MANU_ID))
{
return false;
}
indication = GET_WORD(msg[1].info);
switch (indication)
{
case _DI_NEGOTIATE_B3:
if (!plci)
break;
if (((plci->B3_prot != 4) && (plci->B3_prot != 5))
|| !(plci->ncpi_state & NCPI_NEGOTIATE_B3_SENT))
{
dbug(1, dprintf("wrong state for NEGOTIATE_B3 parameters"));
break;
}
if (api_parse(&msg[2].info[1], msg[2].length, "ws", m_parms))
{
dbug(1, dprintf("wrong format in NEGOTIATE_B3 parameters"));
break;
}
ncpi = &m_parms[1];
len = offsetof(T30_INFO, station_id) + T30_MAX_STATION_ID_LENGTH;
if (plci->fax_connect_info_length < len)
{
((T30_INFO *)(plci->fax_connect_info_buffer))->station_id_len = 0;
((T30_INFO *)(plci->fax_connect_info_buffer))->head_line_len = 0;
}
if (api_parse(&ncpi->info[1], ncpi->length, "wwwwssss", fax_parms))
{
dbug(1, dprintf("non-standard facilities info missing or wrong format"));
}
else
{
if (plci->fax_connect_info_length <= len)
plci->fax_connect_info_buffer[len] = 0;
len += 1 + plci->fax_connect_info_buffer[len];
if (plci->fax_connect_info_length <= len)
plci->fax_connect_info_buffer[len] = 0;
len += 1 + plci->fax_connect_info_buffer[len];
if ((fax_parms[7].length >= 3) && (fax_parms[7].info[1] >= 2))
plci->nsf_control_bits = GET_WORD(&fax_parms[7].info[2]);
plci->fax_connect_info_buffer[len++] = (byte)(fax_parms[7].length);
for (i = 0; i < fax_parms[7].length; i++)
plci->fax_connect_info_buffer[len++] = fax_parms[7].info[1 + i];
}
plci->fax_connect_info_length = len;
plci->fax_edata_ack_length = plci->fax_connect_info_length;
start_internal_command(Id, plci, fax_edata_ack_command);
break;
}
return false;
}
/*------------------------------------------------------------------*/
/* IDI callback function */
/*------------------------------------------------------------------*/
void callback(ENTITY *e)
{
DIVA_CAPI_ADAPTER *a;
APPL *appl;
PLCI *plci;
CAPI_MSG *m;
word i, j;
byte rc;
byte ch;
byte req;
byte global_req;
int no_cancel_rc;
dbug(1, dprintf("%x:CB(%x:Req=%x,Rc=%x,Ind=%x)",
(e->user[0] + 1) & 0x7fff, e->Id, e->Req, e->Rc, e->Ind));
a = &(adapter[(byte)e->user[0]]);
plci = &(a->plci[e->user[1]]);
no_cancel_rc = DIVA_CAPI_SUPPORTS_NO_CANCEL(a);
/*
If new protocol code and new XDI is used then CAPI should work
fully in accordance with IDI cpec an look on callback field instead
of Rc field for return codes.
*/
if (((e->complete == 0xff) && no_cancel_rc) ||
(e->Rc && !no_cancel_rc)) {
rc = e->Rc;
ch = e->RcCh;
req = e->Req;
e->Rc = 0;
if (e->user[0] & 0x8000)
{
/*
If REMOVE request was sent then we have to wait until
return code with Id set to zero arrives.
All other return codes should be ignored.
*/
if (req == REMOVE)
{
if (e->Id)
{
dbug(1, dprintf("cancel RC in REMOVE state"));
return;
}
channel_flow_control_remove(plci);
for (i = 0; i < 256; i++)
{
if (a->FlowControlIdTable[i] == plci->nl_remove_id)
a->FlowControlIdTable[i] = 0;
}
plci->nl_remove_id = 0;
if (plci->rx_dma_descriptor > 0) {
diva_free_dma_descriptor(plci, plci->rx_dma_descriptor - 1);
plci->rx_dma_descriptor = 0;
}
}
if (rc == OK_FC)
{
a->FlowControlIdTable[ch] = e->Id;
a->FlowControlSkipTable[ch] = 0;
a->ch_flow_control[ch] |= N_OK_FC_PENDING;
a->ch_flow_plci[ch] = plci->Id;
plci->nl_req = 0;
}
else
{
/*
Cancel return codes self, if feature was requested
*/
if (no_cancel_rc && (a->FlowControlIdTable[ch] == e->Id) && e->Id) {
a->FlowControlIdTable[ch] = 0;
if ((rc == OK) && a->FlowControlSkipTable[ch]) {
dbug(3, dprintf("XDI CAPI: RC cancelled Id:0x02, Ch:%02x", e->Id, ch));
return;
}
}
if (a->ch_flow_control[ch] & N_OK_FC_PENDING)
{
a->ch_flow_control[ch] &= ~N_OK_FC_PENDING;
if (ch == e->ReqCh)
plci->nl_req = 0;
}
else
plci->nl_req = 0;
}
if (plci->nl_req)
control_rc(plci, 0, rc, ch, 0, true);
else
{
if (req == N_XON)
{
channel_x_on(plci, ch);
if (plci->internal_command)
control_rc(plci, req, rc, ch, 0, true);
}
else
{
if (plci->nl_global_req)
{
global_req = plci->nl_global_req;
plci->nl_global_req = 0;
if (rc != ASSIGN_OK) {
e->Id = 0;
if (plci->rx_dma_descriptor > 0) {
diva_free_dma_descriptor(plci, plci->rx_dma_descriptor - 1);
plci->rx_dma_descriptor = 0;
}
}
channel_xmit_xon(plci);
control_rc(plci, 0, rc, ch, global_req, true);
}
else if (plci->data_sent)
{
channel_xmit_xon(plci);
plci->data_sent = false;
plci->NL.XNum = 1;
data_rc(plci, ch);
if (plci->internal_command)
control_rc(plci, req, rc, ch, 0, true);
}
else
{
channel_xmit_xon(plci);
control_rc(plci, req, rc, ch, 0, true);
}
}
}
}
else
{
/*
If REMOVE request was sent then we have to wait until
return code with Id set to zero arrives.
All other return codes should be ignored.
*/
if (req == REMOVE)
{
if (e->Id)
{
dbug(1, dprintf("cancel RC in REMOVE state"));
return;
}
plci->sig_remove_id = 0;
}
plci->sig_req = 0;
if (plci->sig_global_req)
{
global_req = plci->sig_global_req;
plci->sig_global_req = 0;
if (rc != ASSIGN_OK)
e->Id = 0;
channel_xmit_xon(plci);
control_rc(plci, 0, rc, ch, global_req, false);
}
else
{
channel_xmit_xon(plci);
control_rc(plci, req, rc, ch, 0, false);
}
}
/*
Again: in accordance with IDI spec Rc and Ind can't be delivered in the
same callback. Also if new XDI and protocol code used then jump
direct to finish.
*/
if (no_cancel_rc) {
channel_xmit_xon(plci);
goto capi_callback_suffix;
}
}
channel_xmit_xon(plci);
if (e->Ind) {
if (e->user[0] & 0x8000) {
byte Ind = e->Ind & 0x0f;
byte Ch = e->IndCh;
if (((Ind == N_DISC) || (Ind == N_DISC_ACK)) &&
(a->ch_flow_plci[Ch] == plci->Id)) {
if (a->ch_flow_control[Ch] & N_RX_FLOW_CONTROL_MASK) {
dbug(3, dprintf("XDI CAPI: I: pending N-XON Ch:%02x", Ch));
}
a->ch_flow_control[Ch] &= ~N_RX_FLOW_CONTROL_MASK;
}
nl_ind(plci);
if ((e->RNR != 1) &&
(a->ch_flow_plci[Ch] == plci->Id) &&
(a->ch_flow_control[Ch] & N_RX_FLOW_CONTROL_MASK)) {
a->ch_flow_control[Ch] &= ~N_RX_FLOW_CONTROL_MASK;
dbug(3, dprintf("XDI CAPI: I: remove faked N-XON Ch:%02x", Ch));
}
} else {
sig_ind(plci);
}
e->Ind = 0;
}
capi_callback_suffix:
while (!plci->req_in
&& !plci->internal_command
&& (plci->msg_in_write_pos != plci->msg_in_read_pos))
{
j = (plci->msg_in_read_pos == plci->msg_in_wrap_pos) ? 0 : plci->msg_in_read_pos;
i = (((CAPI_MSG *)(&((byte *)(plci->msg_in_queue))[j]))->header.length + 3) & 0xfffc;
m = (CAPI_MSG *)(&((byte *)(plci->msg_in_queue))[j]);
appl = *((APPL **)(&((byte *)(plci->msg_in_queue))[j + i]));
dbug(1, dprintf("dequeue msg(0x%04x) - write=%d read=%d wrap=%d",
m->header.command, plci->msg_in_write_pos, plci->msg_in_read_pos, plci->msg_in_wrap_pos));
if (plci->msg_in_read_pos == plci->msg_in_wrap_pos)
{
plci->msg_in_wrap_pos = MSG_IN_QUEUE_SIZE;
plci->msg_in_read_pos = i + MSG_IN_OVERHEAD;
}
else
{
plci->msg_in_read_pos = j + i + MSG_IN_OVERHEAD;
}
if (plci->msg_in_read_pos == plci->msg_in_write_pos)
{
plci->msg_in_write_pos = MSG_IN_QUEUE_SIZE;
plci->msg_in_read_pos = MSG_IN_QUEUE_SIZE;
}
else if (plci->msg_in_read_pos == plci->msg_in_wrap_pos)
{
plci->msg_in_read_pos = MSG_IN_QUEUE_SIZE;
plci->msg_in_wrap_pos = MSG_IN_QUEUE_SIZE;
}
i = api_put(appl, m);
if (i != 0)
{
if (m->header.command == _DATA_B3_R)
TransmitBufferFree(appl, (byte *)(long)(m->info.data_b3_req.Data));
dbug(1, dprintf("Error 0x%04x from msg(0x%04x)", i, m->header.command));
break;
}
if (plci->li_notify_update)
{
plci->li_notify_update = false;
mixer_notify_update(plci, false);
}
}
send_data(plci);
send_req(plci);
}
static void control_rc(PLCI *plci, byte req, byte rc, byte ch, byte global_req,
byte nl_rc)
{
dword Id;
dword rId;
word Number;
word Info = 0;
word i;
word ncci;
DIVA_CAPI_ADAPTER *a;
APPL *appl;
PLCI *rplci;
byte SSparms[] = "\x05\x00\x00\x02\x00\x00";
byte SSstruct[] = "\x09\x00\x00\x06\x00\x00\x00\x00\x00\x00";
if (!plci) {
dbug(0, dprintf("A: control_rc, no plci %02x:%02x:%02x:%02x:%02x", req, rc, ch, global_req, nl_rc));
return;
}
dbug(1, dprintf("req0_in/out=%d/%d", plci->req_in, plci->req_out));
if (plci->req_in != plci->req_out)
{
if (nl_rc || (global_req != ASSIGN) || (rc == ASSIGN_OK))
{
dbug(1, dprintf("req_1return"));
return;
}
/* cancel outstanding request on the PLCI after SIG ASSIGN failure */
}
plci->req_in = plci->req_in_start = plci->req_out = 0;
dbug(1, dprintf("control_rc"));
appl = plci->appl;
a = plci->adapter;
ncci = a->ch_ncci[ch];
if (appl)
{
Id = (((dword)(ncci ? ncci : ch)) << 16) | ((word)plci->Id << 8) | a->Id;
if (plci->tel && plci->SuppState != CALL_HELD) Id |= EXT_CONTROLLER;
Number = plci->number;
dbug(1, dprintf("Contr_RC-Id=%08lx,plci=%x,tel=%x, entity=0x%x, command=0x%x, int_command=0x%x", Id, plci->Id, plci->tel, plci->Sig.Id, plci->command, plci->internal_command));
dbug(1, dprintf("channels=0x%x", plci->channels));
if (plci_remove_check(plci))
return;
if (req == REMOVE && rc == ASSIGN_OK)
{
sig_req(plci, HANGUP, 0);
sig_req(plci, REMOVE, 0);
send_req(plci);
}
if (plci->command)
{
switch (plci->command)
{
case C_HOLD_REQ:
dbug(1, dprintf("HoldRC=0x%x", rc));
SSparms[1] = (byte)S_HOLD;
if (rc != OK)
{
plci->SuppState = IDLE;
Info = 0x2001;
}
sendf(appl, _FACILITY_R | CONFIRM, Id, Number, "wws", Info, 3, SSparms);
break;
case C_RETRIEVE_REQ:
dbug(1, dprintf("RetrieveRC=0x%x", rc));
SSparms[1] = (byte)S_RETRIEVE;
if (rc != OK)
{
plci->SuppState = CALL_HELD;
Info = 0x2001;
}
sendf(appl, _FACILITY_R | CONFIRM, Id, Number, "wws", Info, 3, SSparms);
break;
case _INFO_R:
dbug(1, dprintf("InfoRC=0x%x", rc));
if (rc != OK) Info = _WRONG_STATE;
sendf(appl, _INFO_R | CONFIRM, Id, Number, "w", Info);
break;
case _CONNECT_R:
dbug(1, dprintf("Connect_R=0x%x/0x%x/0x%x/0x%x", req, rc, global_req, nl_rc));
if (plci->State == INC_DIS_PENDING)
break;
if (plci->Sig.Id != 0xff)
{
if (((global_req == ASSIGN) && (rc != ASSIGN_OK))
|| (!nl_rc && (req == CALL_REQ) && (rc != OK)))
{
dbug(1, dprintf("No more IDs/Call_Req failed"));
sendf(appl, _CONNECT_R | CONFIRM, Id & 0xffL, Number, "w", _OUT_OF_PLCI);
plci_remove(plci);
plci->State = IDLE;
break;
}
if (plci->State != LOCAL_CONNECT) plci->State = OUTG_CON_PENDING;
sendf(appl, _CONNECT_R | CONFIRM, Id, Number, "w", 0);
}
else /* D-ch activation */
{
if (rc != ASSIGN_OK)
{
dbug(1, dprintf("No more IDs/X.25 Call_Req failed"));
sendf(appl, _CONNECT_R | CONFIRM, Id & 0xffL, Number, "w", _OUT_OF_PLCI);
plci_remove(plci);
plci->State = IDLE;
break;
}
sendf(appl, _CONNECT_R | CONFIRM, Id, Number, "w", 0);
sendf(plci->appl, _CONNECT_ACTIVE_I, Id, 0, "sss", "", "", "");
plci->State = INC_ACT_PENDING;
}
break;
case _CONNECT_I | RESPONSE:
if (plci->State != INC_DIS_PENDING)
plci->State = INC_CON_ACCEPT;
break;
case _DISCONNECT_R:
if (plci->State == INC_DIS_PENDING)
break;
if (plci->Sig.Id != 0xff)
{
plci->State = OUTG_DIS_PENDING;
sendf(appl, _DISCONNECT_R | CONFIRM, Id, Number, "w", 0);
}
break;
case SUSPEND_REQ:
break;
case RESUME_REQ:
break;
case _CONNECT_B3_R:
if (rc != OK)
{
sendf(appl, _CONNECT_B3_R | CONFIRM, Id, Number, "w", _WRONG_IDENTIFIER);
break;
}
ncci = get_ncci(plci, ch, 0);
Id = (Id & 0xffff) | (((dword) ncci) << 16);
plci->channels++;
if (req == N_RESET)
{
a->ncci_state[ncci] = INC_ACT_PENDING;
sendf(appl, _CONNECT_B3_R | CONFIRM, Id, Number, "w", 0);
sendf(appl, _CONNECT_B3_ACTIVE_I, Id, 0, "s", "");
}
else
{
a->ncci_state[ncci] = OUTG_CON_PENDING;
sendf(appl, _CONNECT_B3_R | CONFIRM, Id, Number, "w", 0);
}
break;
case _CONNECT_B3_I | RESPONSE:
break;
case _RESET_B3_R:
/* sendf(appl, _RESET_B3_R | CONFIRM, Id, Number, "w", 0);*/
break;
case _DISCONNECT_B3_R:
sendf(appl, _DISCONNECT_B3_R | CONFIRM, Id, Number, "w", 0);
break;
case _MANUFACTURER_R:
break;
case PERM_LIST_REQ:
if (rc != OK)
{
Info = _WRONG_IDENTIFIER;
sendf(plci->appl, _CONNECT_R | CONFIRM, Id, Number, "w", Info);
plci_remove(plci);
}
else
sendf(plci->appl, _CONNECT_R | CONFIRM, Id, Number, "w", Info);
break;
default:
break;
}
plci->command = 0;
}
else if (plci->internal_command)
{
switch (plci->internal_command)
{
case BLOCK_PLCI:
return;
case GET_MWI_STATE:
if (rc == OK) /* command supported, wait for indication */
{
return;
}
plci_remove(plci);
break;
/* Get Supported Services */
case GETSERV_REQ_PEND:
if (rc == OK) /* command supported, wait for indication */
{
break;
}
PUT_DWORD(&SSstruct[6], MASK_TERMINAL_PORTABILITY);
sendf(appl, _FACILITY_R | CONFIRM, Id, Number, "wws", 0, 3, SSstruct);
plci_remove(plci);
break;
case INTERR_DIVERSION_REQ_PEND: /* Interrogate Parameters */
case INTERR_NUMBERS_REQ_PEND:
case CF_START_PEND: /* Call Forwarding Start pending */
case CF_STOP_PEND: /* Call Forwarding Stop pending */
case CCBS_REQUEST_REQ_PEND:
case CCBS_DEACTIVATE_REQ_PEND:
case CCBS_INTERROGATE_REQ_PEND:
switch (plci->internal_command)
{
case INTERR_DIVERSION_REQ_PEND:
SSparms[1] = S_INTERROGATE_DIVERSION;
break;
case INTERR_NUMBERS_REQ_PEND:
SSparms[1] = S_INTERROGATE_NUMBERS;
break;
case CF_START_PEND:
SSparms[1] = S_CALL_FORWARDING_START;
break;
case CF_STOP_PEND:
SSparms[1] = S_CALL_FORWARDING_STOP;
break;
case CCBS_REQUEST_REQ_PEND:
SSparms[1] = S_CCBS_REQUEST;
break;
case CCBS_DEACTIVATE_REQ_PEND:
SSparms[1] = S_CCBS_DEACTIVATE;
break;
case CCBS_INTERROGATE_REQ_PEND:
SSparms[1] = S_CCBS_INTERROGATE;
break;
}
if (global_req == ASSIGN)
{
dbug(1, dprintf("AssignDiversion_RC=0x%x/0x%x", req, rc));
return;
}
if (!plci->appl) break;
if (rc == ISDN_GUARD_REJ)
{
Info = _CAPI_GUARD_ERROR;
}
else if (rc != OK)
{
Info = _SUPPLEMENTARY_SERVICE_NOT_SUPPORTED;
}
sendf(plci->appl, _FACILITY_R | CONFIRM, Id & 0x7,
plci->number, "wws", Info, (word)3, SSparms);
if (Info) plci_remove(plci);
break;
/* 3pty conference pending */
case PTY_REQ_PEND:
if (!plci->relatedPTYPLCI) break;
rplci = plci->relatedPTYPLCI;
SSparms[1] = plci->ptyState;
rId = ((word)rplci->Id << 8) | rplci->adapter->Id;
if (rplci->tel) rId |= EXT_CONTROLLER;
if (rc != OK)
{
Info = 0x300E; /* not supported */
plci->relatedPTYPLCI = NULL;
plci->ptyState = 0;
}
sendf(rplci->appl,
_FACILITY_R | CONFIRM,
rId,
plci->number,
"wws", Info, (word)3, SSparms);
break;
/* Explicit Call Transfer pending */
case ECT_REQ_PEND:
dbug(1, dprintf("ECT_RC=0x%x/0x%x", req, rc));
if (!plci->relatedPTYPLCI) break;
rplci = plci->relatedPTYPLCI;
SSparms[1] = S_ECT;
rId = ((word)rplci->Id << 8) | rplci->adapter->Id;
if (rplci->tel) rId |= EXT_CONTROLLER;
if (rc != OK)
{
Info = 0x300E; /* not supported */
plci->relatedPTYPLCI = NULL;
plci->ptyState = 0;
}
sendf(rplci->appl,
_FACILITY_R | CONFIRM,
rId,
plci->number,
"wws", Info, (word)3, SSparms);
break;
case _MANUFACTURER_R:
dbug(1, dprintf("_Manufacturer_R=0x%x/0x%x", req, rc));
if ((global_req == ASSIGN) && (rc != ASSIGN_OK))
{
dbug(1, dprintf("No more IDs"));
sendf(appl, _MANUFACTURER_R | CONFIRM, Id, Number, "dww", _DI_MANU_ID, _MANUFACTURER_R, _OUT_OF_PLCI);
plci_remove(plci); /* after codec init, internal codec commands pending */
}
break;
case _CONNECT_R:
dbug(1, dprintf("_Connect_R=0x%x/0x%x", req, rc));
if ((global_req == ASSIGN) && (rc != ASSIGN_OK))
{
dbug(1, dprintf("No more IDs"));
sendf(appl, _CONNECT_R | CONFIRM, Id & 0xffL, Number, "w", _OUT_OF_PLCI);
plci_remove(plci); /* after codec init, internal codec commands pending */
}
break;
case PERM_COD_HOOK: /* finished with Hook_Ind */
return;
case PERM_COD_CALL:
dbug(1, dprintf("***Codec Connect_Pending A, Rc = 0x%x", rc));
plci->internal_command = PERM_COD_CONN_PEND;
return;
case PERM_COD_ASSIGN:
dbug(1, dprintf("***Codec Assign A, Rc = 0x%x", rc));
if (rc != ASSIGN_OK) break;
sig_req(plci, CALL_REQ, 0);
send_req(plci);
plci->internal_command = PERM_COD_CALL;
return;
/* Null Call Reference Request pending */
case C_NCR_FAC_REQ:
dbug(1, dprintf("NCR_FAC=0x%x/0x%x", req, rc));
if (global_req == ASSIGN)
{
if (rc == ASSIGN_OK)
{
return;
}
else
{
sendf(appl, _INFO_R | CONFIRM, Id & 0xf, Number, "w", _WRONG_STATE);
appl->NullCREnable = false;
plci_remove(plci);
}
}
else if (req == NCR_FACILITY)
{
if (rc == OK)
{
sendf(appl, _INFO_R | CONFIRM, Id & 0xf, Number, "w", 0);
}
else
{
sendf(appl, _INFO_R | CONFIRM, Id & 0xf, Number, "w", _WRONG_STATE);
appl->NullCREnable = false;
}
plci_remove(plci);
}
break;
case HOOK_ON_REQ:
if (plci->channels)
{
if (a->ncci_state[ncci] == CONNECTED)
{
a->ncci_state[ncci] = OUTG_DIS_PENDING;
cleanup_ncci_data(plci, ncci);
nl_req_ncci(plci, N_DISC, (byte)ncci);
}
break;
}
break;
case HOOK_OFF_REQ:
if (plci->State == INC_DIS_PENDING)
break;
sig_req(plci, CALL_REQ, 0);
send_req(plci);
plci->State = OUTG_CON_PENDING;
break;
case MWI_ACTIVATE_REQ_PEND:
case MWI_DEACTIVATE_REQ_PEND:
if (global_req == ASSIGN && rc == ASSIGN_OK)
{
dbug(1, dprintf("MWI_REQ assigned"));
return;
}
else if (rc != OK)
{
if (rc == WRONG_IE)
{
Info = 0x2007; /* Illegal message parameter coding */
dbug(1, dprintf("MWI_REQ invalid parameter"));
}
else
{
Info = 0x300B; /* not supported */
dbug(1, dprintf("MWI_REQ not supported"));
}
/* 0x3010: Request not allowed in this state */
PUT_WORD(&SSparms[4], 0x300E); /* SS not supported */
}
if (plci->internal_command == MWI_ACTIVATE_REQ_PEND)
{
PUT_WORD(&SSparms[1], S_MWI_ACTIVATE);
}
else PUT_WORD(&SSparms[1], S_MWI_DEACTIVATE);
if (plci->cr_enquiry)
{
sendf(plci->appl,
_FACILITY_R | CONFIRM,
Id & 0xf,
plci->number,
"wws", Info, (word)3, SSparms);
if (rc != OK) plci_remove(plci);
}
else
{
sendf(plci->appl,
_FACILITY_R | CONFIRM,
Id,
plci->number,
"wws", Info, (word)3, SSparms);
}
break;
case CONF_BEGIN_REQ_PEND:
case CONF_ADD_REQ_PEND:
case CONF_SPLIT_REQ_PEND:
case CONF_DROP_REQ_PEND:
case CONF_ISOLATE_REQ_PEND:
case CONF_REATTACH_REQ_PEND:
dbug(1, dprintf("CONF_RC=0x%x/0x%x", req, rc));
if ((plci->internal_command == CONF_ADD_REQ_PEND) && (!plci->relatedPTYPLCI)) break;
rplci = plci;
rId = Id;
switch (plci->internal_command)
{
case CONF_BEGIN_REQ_PEND:
SSparms[1] = S_CONF_BEGIN;
break;
case CONF_ADD_REQ_PEND:
SSparms[1] = S_CONF_ADD;
rplci = plci->relatedPTYPLCI;
rId = ((word)rplci->Id << 8) | rplci->adapter->Id;
break;
case CONF_SPLIT_REQ_PEND:
SSparms[1] = S_CONF_SPLIT;
break;
case CONF_DROP_REQ_PEND:
SSparms[1] = S_CONF_DROP;
break;
case CONF_ISOLATE_REQ_PEND:
SSparms[1] = S_CONF_ISOLATE;
break;
case CONF_REATTACH_REQ_PEND:
SSparms[1] = S_CONF_REATTACH;
break;
}
if (rc != OK)
{
Info = 0x300E; /* not supported */
plci->relatedPTYPLCI = NULL;
plci->ptyState = 0;
}
sendf(rplci->appl,
_FACILITY_R | CONFIRM,
rId,
plci->number,
"wws", Info, (word)3, SSparms);
break;
case VSWITCH_REQ_PEND:
if (rc != OK)
{
if (plci->relatedPTYPLCI)
{
plci->relatedPTYPLCI->vswitchstate = 0;
plci->relatedPTYPLCI->vsprot = 0;
plci->relatedPTYPLCI->vsprotdialect = 0;
}
plci->vswitchstate = 0;
plci->vsprot = 0;
plci->vsprotdialect = 0;
}
else
{
if (plci->relatedPTYPLCI &&
plci->vswitchstate == 1 &&
plci->relatedPTYPLCI->vswitchstate == 3) /* join complete */
plci->vswitchstate = 3;
}
break;
/* Call Deflection Request pending (SSCT) */
case CD_REQ_PEND:
SSparms[1] = S_CALL_DEFLECTION;
if (rc != OK)
{
Info = 0x300E; /* not supported */
plci->appl->CDEnable = 0;
}
sendf(plci->appl, _FACILITY_R | CONFIRM, Id,
plci->number, "wws", Info, (word)3, SSparms);
break;
case RTP_CONNECT_B3_REQ_COMMAND_2:
if (rc == OK)
{
ncci = get_ncci(plci, ch, 0);
Id = (Id & 0xffff) | (((dword) ncci) << 16);
plci->channels++;
a->ncci_state[ncci] = OUTG_CON_PENDING;
}
default:
if (plci->internal_command_queue[0])
{
(*(plci->internal_command_queue[0]))(Id, plci, rc);
if (plci->internal_command)
return;
}
break;
}
next_internal_command(Id, plci);
}
}
else /* appl==0 */
{
Id = ((word)plci->Id << 8) | plci->adapter->Id;
if (plci->tel) Id |= EXT_CONTROLLER;
switch (plci->internal_command)
{
case BLOCK_PLCI:
return;
case START_L1_SIG_ASSIGN_PEND:
case REM_L1_SIG_ASSIGN_PEND:
if (global_req == ASSIGN)
{
break;
}
else
{
dbug(1, dprintf("***L1 Req rem PLCI"));
plci->internal_command = 0;
sig_req(plci, REMOVE, 0);
send_req(plci);
}
break;
/* Call Deflection Request pending, just no appl ptr assigned */
case CD_REQ_PEND:
SSparms[1] = S_CALL_DEFLECTION;
if (rc != OK)
{
Info = 0x300E; /* not supported */
}
for (i = 0; i < max_appl; i++)
{
if (application[i].CDEnable)
{
if (!application[i].Id) application[i].CDEnable = 0;
else
{
sendf(&application[i], _FACILITY_R | CONFIRM, Id,
plci->number, "wws", Info, (word)3, SSparms);
if (Info) application[i].CDEnable = 0;
}
}
}
plci->internal_command = 0;
break;
case PERM_COD_HOOK: /* finished with Hook_Ind */
return;
case PERM_COD_CALL:
plci->internal_command = PERM_COD_CONN_PEND;
dbug(1, dprintf("***Codec Connect_Pending, Rc = 0x%x", rc));
return;
case PERM_COD_ASSIGN:
dbug(1, dprintf("***Codec Assign, Rc = 0x%x", rc));
plci->internal_command = 0;
if (rc != ASSIGN_OK) break;
plci->internal_command = PERM_COD_CALL;
sig_req(plci, CALL_REQ, 0);
send_req(plci);
return;
case LISTEN_SIG_ASSIGN_PEND:
if (rc == ASSIGN_OK)
{
plci->internal_command = 0;
dbug(1, dprintf("ListenCheck, new SIG_ID = 0x%x", plci->Sig.Id));
add_p(plci, ESC, "\x02\x18\x00"); /* support call waiting */
sig_req(plci, INDICATE_REQ, 0);
send_req(plci);
}
else
{
dbug(1, dprintf("ListenCheck failed (assignRc=0x%x)", rc));
a->listen_active--;
plci_remove(plci);
plci->State = IDLE;
}
break;
case USELAW_REQ:
if (global_req == ASSIGN)
{
if (rc == ASSIGN_OK)
{
sig_req(plci, LAW_REQ, 0);
send_req(plci);
dbug(1, dprintf("Auto-Law assigned"));
}
else
{
dbug(1, dprintf("Auto-Law assign failed"));
a->automatic_law = 3;
plci->internal_command = 0;
a->automatic_lawPLCI = NULL;
}
break;
}
else if (req == LAW_REQ && rc == OK)
{
dbug(1, dprintf("Auto-Law initiated"));
a->automatic_law = 2;
plci->internal_command = 0;
}
else
{
dbug(1, dprintf("Auto-Law not supported"));
a->automatic_law = 3;
plci->internal_command = 0;
sig_req(plci, REMOVE, 0);
send_req(plci);
a->automatic_lawPLCI = NULL;
}
break;
}
plci_remove_check(plci);
}
}
static void data_rc(PLCI *plci, byte ch)
{
dword Id;
DIVA_CAPI_ADAPTER *a;
NCCI *ncci_ptr;
DATA_B3_DESC *data;
word ncci;
if (plci->appl)
{
TransmitBufferFree(plci->appl, plci->data_sent_ptr);
a = plci->adapter;
ncci = a->ch_ncci[ch];
if (ncci && (a->ncci_plci[ncci] == plci->Id))
{
ncci_ptr = &(a->ncci[ncci]);
dbug(1, dprintf("data_out=%d, data_pending=%d", ncci_ptr->data_out, ncci_ptr->data_pending));
if (ncci_ptr->data_pending)
{
data = &(ncci_ptr->DBuffer[ncci_ptr->data_out]);
if (!(data->Flags & 4) && a->ncci_state[ncci])
{
Id = (((dword)ncci) << 16) | ((word)plci->Id << 8) | a->Id;
if (plci->tel) Id |= EXT_CONTROLLER;
sendf(plci->appl, _DATA_B3_R | CONFIRM, Id, data->Number,
"ww", data->Handle, 0);
}
(ncci_ptr->data_out)++;
if (ncci_ptr->data_out == MAX_DATA_B3)
ncci_ptr->data_out = 0;
(ncci_ptr->data_pending)--;
}
}
}
}
static void data_ack(PLCI *plci, byte ch)
{
dword Id;
DIVA_CAPI_ADAPTER *a;
NCCI *ncci_ptr;
word ncci;
a = plci->adapter;
ncci = a->ch_ncci[ch];
ncci_ptr = &(a->ncci[ncci]);
if (ncci_ptr->data_ack_pending)
{
if (a->ncci_state[ncci] && (a->ncci_plci[ncci] == plci->Id))
{
Id = (((dword)ncci) << 16) | ((word)plci->Id << 8) | a->Id;
if (plci->tel) Id |= EXT_CONTROLLER;
sendf(plci->appl, _DATA_B3_R | CONFIRM, Id, ncci_ptr->DataAck[ncci_ptr->data_ack_out].Number,
"ww", ncci_ptr->DataAck[ncci_ptr->data_ack_out].Handle, 0);
}
(ncci_ptr->data_ack_out)++;
if (ncci_ptr->data_ack_out == MAX_DATA_ACK)
ncci_ptr->data_ack_out = 0;
(ncci_ptr->data_ack_pending)--;
}
}
static void sig_ind(PLCI *plci)
{
dword x_Id;
dword Id;
dword rId;
word i;
word cip;
dword cip_mask;
byte *ie;
DIVA_CAPI_ADAPTER *a;
API_PARSE saved_parms[MAX_MSG_PARMS + 1];
#define MAXPARMSIDS 31
byte *parms[MAXPARMSIDS];
byte *add_i[4];
byte *multi_fac_parms[MAX_MULTI_IE];
byte *multi_pi_parms[MAX_MULTI_IE];
byte *multi_ssext_parms[MAX_MULTI_IE];
byte *multi_CiPN_parms[MAX_MULTI_IE];
byte *multi_vswitch_parms[MAX_MULTI_IE];
byte ai_len;
byte *esc_chi = "";
byte *esc_law = "";
byte *pty_cai = "";
byte *esc_cr = "";
byte *esc_profile = "";
byte facility[256];
PLCI *tplci = NULL;
byte chi[] = "\x02\x18\x01";
byte voice_cai[] = "\x06\x14\x00\x00\x00\x00\x08";
byte resume_cau[] = "\x05\x05\x00\x02\x00\x00";
/* ESC_MSGTYPE must be the last but one message, a new IE has to be */
/* included before the ESC_MSGTYPE and MAXPARMSIDS has to be incremented */
/* SMSG is situated at the end because its 0 (for compatibility reasons */
/* (see Info_Mask Bit 4, first IE. then the message type) */
word parms_id[] =
{MAXPARMSIDS, CPN, 0xff, DSA, OSA, BC, LLC, HLC, ESC_CAUSE, DSP, DT, CHA,
UUI, CONG_RR, CONG_RNR, ESC_CHI, KEY, CHI, CAU, ESC_LAW,
RDN, RDX, CONN_NR, RIN, NI, CAI, ESC_CR,
CST, ESC_PROFILE, 0xff, ESC_MSGTYPE, SMSG};
/* 14 FTY repl by ESC_CHI */
/* 18 PI repl by ESC_LAW */
/* removed OAD changed to 0xff for future use, OAD is multiIE now */
word multi_fac_id[] = {1, FTY};
word multi_pi_id[] = {1, PI};
word multi_CiPN_id[] = {1, OAD};
word multi_ssext_id[] = {1, ESC_SSEXT};
word multi_vswitch_id[] = {1, ESC_VSWITCH};
byte *cau;
word ncci;
byte SS_Ind[] = "\x05\x02\x00\x02\x00\x00"; /* Hold_Ind struct*/
byte CF_Ind[] = "\x09\x02\x00\x06\x00\x00\x00\x00\x00\x00";
byte Interr_Err_Ind[] = "\x0a\x02\x00\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00";
byte CONF_Ind[] = "\x09\x16\x00\x06\x00\x00\0x00\0x00\0x00\0x00";
byte force_mt_info = false;
byte dir;
dword d;
word w;
a = plci->adapter;
Id = ((word)plci->Id << 8) | a->Id;
PUT_WORD(&SS_Ind[4], 0x0000);
if (plci->sig_remove_id)
{
plci->Sig.RNR = 2; /* discard */
dbug(1, dprintf("SIG discard while remove pending"));
return;
}
if (plci->tel && plci->SuppState != CALL_HELD) Id |= EXT_CONTROLLER;
dbug(1, dprintf("SigInd-Id=%08lx,plci=%x,tel=%x,state=0x%x,channels=%d,Discflowcl=%d",
Id, plci->Id, plci->tel, plci->State, plci->channels, plci->hangup_flow_ctrl_timer));
if (plci->Sig.Ind == CALL_HOLD_ACK && plci->channels)
{
plci->Sig.RNR = 1;
return;
}
if (plci->Sig.Ind == HANGUP && plci->channels)
{
plci->Sig.RNR = 1;
plci->hangup_flow_ctrl_timer++;
/* recover the network layer after timeout */
if (plci->hangup_flow_ctrl_timer == 100)
{
dbug(1, dprintf("Exceptional disc"));
plci->Sig.RNR = 0;
plci->hangup_flow_ctrl_timer = 0;
for (ncci = 1; ncci < MAX_NCCI + 1; ncci++)
{
if (a->ncci_plci[ncci] == plci->Id)
{
cleanup_ncci_data(plci, ncci);
if (plci->channels)plci->channels--;
if (plci->appl)
sendf(plci->appl, _DISCONNECT_B3_I, (((dword) ncci) << 16) | Id, 0, "ws", 0, "");
}
}
if (plci->appl)
sendf(plci->appl, _DISCONNECT_I, Id, 0, "w", 0);
plci_remove(plci);
plci->State = IDLE;
}
return;
}
/* do first parse the info with no OAD in, because OAD will be converted */
/* first the multiple facility IE, then mult. progress ind. */
/* then the parameters for the info_ind + conn_ind */
IndParse(plci, multi_fac_id, multi_fac_parms, MAX_MULTI_IE);
IndParse(plci, multi_pi_id, multi_pi_parms, MAX_MULTI_IE);
IndParse(plci, multi_ssext_id, multi_ssext_parms, MAX_MULTI_IE);
IndParse(plci, multi_vswitch_id, multi_vswitch_parms, MAX_MULTI_IE);
IndParse(plci, parms_id, parms, 0);
IndParse(plci, multi_CiPN_id, multi_CiPN_parms, MAX_MULTI_IE);
esc_chi = parms[14];
esc_law = parms[18];
pty_cai = parms[24];
esc_cr = parms[25];
esc_profile = parms[27];
if (esc_cr[0] && plci)
{
if (plci->cr_enquiry && plci->appl)
{
plci->cr_enquiry = false;
/* d = MANU_ID */
/* w = m_command */
/* b = total length */
/* b = indication type */
/* b = length of all IEs */
/* b = IE1 */
/* S = IE1 length + cont. */
/* b = IE2 */
/* S = IE2 length + cont. */
sendf(plci->appl,
_MANUFACTURER_I,
Id,
0,
"dwbbbbSbS", _DI_MANU_ID, plci->m_command,
2 + 1 + 1 + esc_cr[0] + 1 + 1 + esc_law[0], plci->Sig.Ind, 1 + 1 + esc_cr[0] + 1 + 1 + esc_law[0], ESC, esc_cr, ESC, esc_law);
}
}
/* create the additional info structure */
add_i[1] = parms[15]; /* KEY of additional info */
add_i[2] = parms[11]; /* UUI of additional info */
ai_len = AddInfo(add_i, multi_fac_parms, esc_chi, facility);
/* the ESC_LAW indicates if u-Law or a-Law is actually used by the card */
/* indication returns by the card if requested by the function */
/* AutomaticLaw() after driver init */
if (a->automatic_law < 4)
{
if (esc_law[0]) {
if (esc_law[2]) {
dbug(0, dprintf("u-Law selected"));
a->u_law = 1;
}
else {
dbug(0, dprintf("a-Law selected"));
a->u_law = 0;
}
a->automatic_law = 4;
if (plci == a->automatic_lawPLCI) {
plci->internal_command = 0;
sig_req(plci, REMOVE, 0);
send_req(plci);
a->automatic_lawPLCI = NULL;
}
}
if (esc_profile[0])
{
dbug(1, dprintf("[%06x] CardProfile: %lx %lx %lx %lx %lx",
UnMapController(a->Id), GET_DWORD(&esc_profile[6]),
GET_DWORD(&esc_profile[10]), GET_DWORD(&esc_profile[14]),
GET_DWORD(&esc_profile[18]), GET_DWORD(&esc_profile[46])));
a->profile.Global_Options &= 0x000000ffL;
a->profile.B1_Protocols &= 0x000003ffL;
a->profile.B2_Protocols &= 0x00001fdfL;
a->profile.B3_Protocols &= 0x000000b7L;
a->profile.Global_Options &= GET_DWORD(&esc_profile[6]) |
GL_BCHANNEL_OPERATION_SUPPORTED;
a->profile.B1_Protocols &= GET_DWORD(&esc_profile[10]);
a->profile.B2_Protocols &= GET_DWORD(&esc_profile[14]);
a->profile.B3_Protocols &= GET_DWORD(&esc_profile[18]);
a->manufacturer_features = GET_DWORD(&esc_profile[46]);
a->man_profile.private_options = 0;
if (a->manufacturer_features & MANUFACTURER_FEATURE_ECHO_CANCELLER)
{
a->man_profile.private_options |= 1L << PRIVATE_ECHO_CANCELLER;
a->profile.Global_Options |= GL_ECHO_CANCELLER_SUPPORTED;
}
if (a->manufacturer_features & MANUFACTURER_FEATURE_RTP)
a->man_profile.private_options |= 1L << PRIVATE_RTP;
a->man_profile.rtp_primary_payloads = GET_DWORD(&esc_profile[50]);
a->man_profile.rtp_additional_payloads = GET_DWORD(&esc_profile[54]);
if (a->manufacturer_features & MANUFACTURER_FEATURE_T38)
a->man_profile.private_options |= 1L << PRIVATE_T38;
if (a->manufacturer_features & MANUFACTURER_FEATURE_FAX_SUB_SEP_PWD)
a->man_profile.private_options |= 1L << PRIVATE_FAX_SUB_SEP_PWD;
if (a->manufacturer_features & MANUFACTURER_FEATURE_V18)
a->man_profile.private_options |= 1L << PRIVATE_V18;
if (a->manufacturer_features & MANUFACTURER_FEATURE_DTMF_TONE)
a->man_profile.private_options |= 1L << PRIVATE_DTMF_TONE;
if (a->manufacturer_features & MANUFACTURER_FEATURE_PIAFS)
a->man_profile.private_options |= 1L << PRIVATE_PIAFS;
if (a->manufacturer_features & MANUFACTURER_FEATURE_FAX_PAPER_FORMATS)
a->man_profile.private_options |= 1L << PRIVATE_FAX_PAPER_FORMATS;
if (a->manufacturer_features & MANUFACTURER_FEATURE_VOWN)
a->man_profile.private_options |= 1L << PRIVATE_VOWN;
if (a->manufacturer_features & MANUFACTURER_FEATURE_FAX_NONSTANDARD)
a->man_profile.private_options |= 1L << PRIVATE_FAX_NONSTANDARD;
}
else
{
a->profile.Global_Options &= 0x0000007fL;
a->profile.B1_Protocols &= 0x000003dfL;
a->profile.B2_Protocols &= 0x00001adfL;
a->profile.B3_Protocols &= 0x000000b7L;
a->manufacturer_features &= MANUFACTURER_FEATURE_HARDDTMF;
}
if (a->manufacturer_features & (MANUFACTURER_FEATURE_HARDDTMF |
MANUFACTURER_FEATURE_SOFTDTMF_SEND | MANUFACTURER_FEATURE_SOFTDTMF_RECEIVE))
{
a->profile.Global_Options |= GL_DTMF_SUPPORTED;
}
a->manufacturer_features &= ~MANUFACTURER_FEATURE_OOB_CHANNEL;
dbug(1, dprintf("[%06x] Profile: %lx %lx %lx %lx %lx",
UnMapController(a->Id), a->profile.Global_Options,
a->profile.B1_Protocols, a->profile.B2_Protocols,
a->profile.B3_Protocols, a->manufacturer_features));
}
/* codec plci for the handset/hook state support is just an internal id */
if (plci != a->AdvCodecPLCI)
{
force_mt_info = SendMultiIE(plci, Id, multi_fac_parms, FTY, 0x20, 0);
force_mt_info |= SendMultiIE(plci, Id, multi_pi_parms, PI, 0x210, 0);
SendSSExtInd(NULL, plci, Id, multi_ssext_parms);
SendInfo(plci, Id, parms, force_mt_info);
VSwitchReqInd(plci, Id, multi_vswitch_parms);
}
/* switch the codec to the b-channel */
if (esc_chi[0] && plci && !plci->SuppState) {
plci->b_channel = esc_chi[esc_chi[0]]&0x1f;
mixer_set_bchannel_id_esc(plci, plci->b_channel);
dbug(1, dprintf("storeChannel=0x%x", plci->b_channel));
if (plci->tel == ADV_VOICE && plci->appl) {
SetVoiceChannel(a->AdvCodecPLCI, esc_chi, a);
}
}
if (plci->appl) plci->appl->Number++;
switch (plci->Sig.Ind) {
/* Response to Get_Supported_Services request */
case S_SUPPORTED:
dbug(1, dprintf("S_Supported"));
if (!plci->appl) break;
if (pty_cai[0] == 4)
{
PUT_DWORD(&CF_Ind[6], GET_DWORD(&pty_cai[1]));
}
else
{
PUT_DWORD(&CF_Ind[6], MASK_TERMINAL_PORTABILITY | MASK_HOLD_RETRIEVE);
}
PUT_WORD(&CF_Ind[1], 0);
PUT_WORD(&CF_Ind[4], 0);
sendf(plci->appl, _FACILITY_R | CONFIRM, Id & 0x7, plci->number, "wws", 0, 3, CF_Ind);
plci_remove(plci);
break;
/* Supplementary Service rejected */
case S_SERVICE_REJ:
dbug(1, dprintf("S_Reject=0x%x", pty_cai[5]));
if (!pty_cai[0]) break;
switch (pty_cai[5])
{
case ECT_EXECUTE:
case THREE_PTY_END:
case THREE_PTY_BEGIN:
if (!plci->relatedPTYPLCI) break;
tplci = plci->relatedPTYPLCI;
rId = ((word)tplci->Id << 8) | tplci->adapter->Id;
if (tplci->tel) rId |= EXT_CONTROLLER;
if (pty_cai[5] == ECT_EXECUTE)
{
PUT_WORD(&SS_Ind[1], S_ECT);
plci->vswitchstate = 0;
plci->relatedPTYPLCI->vswitchstate = 0;
}
else
{
PUT_WORD(&SS_Ind[1], pty_cai[5] + 3);
}
if (pty_cai[2] != 0xff)
{
PUT_WORD(&SS_Ind[4], 0x3600 | (word)pty_cai[2]);
}
else
{
PUT_WORD(&SS_Ind[4], 0x300E);
}
plci->relatedPTYPLCI = NULL;
plci->ptyState = 0;
sendf(tplci->appl, _FACILITY_I, rId, 0, "ws", 3, SS_Ind);
break;
case CALL_DEFLECTION:
if (pty_cai[2] != 0xff)
{
PUT_WORD(&SS_Ind[4], 0x3600 | (word)pty_cai[2]);
}
else
{
PUT_WORD(&SS_Ind[4], 0x300E);
}
PUT_WORD(&SS_Ind[1], pty_cai[5]);
for (i = 0; i < max_appl; i++)
{
if (application[i].CDEnable)
{
if (application[i].Id) sendf(&application[i], _FACILITY_I, Id, 0, "ws", 3, SS_Ind);
application[i].CDEnable = false;
}
}
break;
case DEACTIVATION_DIVERSION:
case ACTIVATION_DIVERSION:
case DIVERSION_INTERROGATE_CFU:
case DIVERSION_INTERROGATE_CFB:
case DIVERSION_INTERROGATE_CFNR:
case DIVERSION_INTERROGATE_NUM:
case CCBS_REQUEST:
case CCBS_DEACTIVATE:
case CCBS_INTERROGATE:
if (!plci->appl) break;
if (pty_cai[2] != 0xff)
{
PUT_WORD(&Interr_Err_Ind[4], 0x3600 | (word)pty_cai[2]);
}
else
{
PUT_WORD(&Interr_Err_Ind[4], 0x300E);
}
switch (pty_cai[5])
{
case DEACTIVATION_DIVERSION:
dbug(1, dprintf("Deact_Div"));
Interr_Err_Ind[0] = 0x9;
Interr_Err_Ind[3] = 0x6;
PUT_WORD(&Interr_Err_Ind[1], S_CALL_FORWARDING_STOP);
break;
case ACTIVATION_DIVERSION:
dbug(1, dprintf("Act_Div"));
Interr_Err_Ind[0] = 0x9;
Interr_Err_Ind[3] = 0x6;
PUT_WORD(&Interr_Err_Ind[1], S_CALL_FORWARDING_START);
break;
case DIVERSION_INTERROGATE_CFU:
case DIVERSION_INTERROGATE_CFB:
case DIVERSION_INTERROGATE_CFNR:
dbug(1, dprintf("Interr_Div"));
Interr_Err_Ind[0] = 0xa;
Interr_Err_Ind[3] = 0x7;
PUT_WORD(&Interr_Err_Ind[1], S_INTERROGATE_DIVERSION);
break;
case DIVERSION_INTERROGATE_NUM:
dbug(1, dprintf("Interr_Num"));
Interr_Err_Ind[0] = 0xa;
Interr_Err_Ind[3] = 0x7;
PUT_WORD(&Interr_Err_Ind[1], S_INTERROGATE_NUMBERS);
break;
case CCBS_REQUEST:
dbug(1, dprintf("CCBS Request"));
Interr_Err_Ind[0] = 0xd;
Interr_Err_Ind[3] = 0xa;
PUT_WORD(&Interr_Err_Ind[1], S_CCBS_REQUEST);
break;
case CCBS_DEACTIVATE:
dbug(1, dprintf("CCBS Deactivate"));
Interr_Err_Ind[0] = 0x9;
Interr_Err_Ind[3] = 0x6;
PUT_WORD(&Interr_Err_Ind[1], S_CCBS_DEACTIVATE);
break;
case CCBS_INTERROGATE:
dbug(1, dprintf("CCBS Interrogate"));
Interr_Err_Ind[0] = 0xb;
Interr_Err_Ind[3] = 0x8;
PUT_WORD(&Interr_Err_Ind[1], S_CCBS_INTERROGATE);
break;
}
PUT_DWORD(&Interr_Err_Ind[6], plci->appl->S_Handle);
sendf(plci->appl, _FACILITY_I, Id & 0x7, 0, "ws", 3, Interr_Err_Ind);
plci_remove(plci);
break;
case ACTIVATION_MWI:
case DEACTIVATION_MWI:
if (pty_cai[5] == ACTIVATION_MWI)
{
PUT_WORD(&SS_Ind[1], S_MWI_ACTIVATE);
}
else PUT_WORD(&SS_Ind[1], S_MWI_DEACTIVATE);
if (pty_cai[2] != 0xff)
{
PUT_WORD(&SS_Ind[4], 0x3600 | (word)pty_cai[2]);
}
else
{
PUT_WORD(&SS_Ind[4], 0x300E);
}
if (plci->cr_enquiry)
{
sendf(plci->appl, _FACILITY_I, Id & 0xf, 0, "ws", 3, SS_Ind);
plci_remove(plci);
}
else
{
sendf(plci->appl, _FACILITY_I, Id, 0, "ws", 3, SS_Ind);
}
break;
case CONF_ADD: /* ERROR */
case CONF_BEGIN:
case CONF_DROP:
case CONF_ISOLATE:
case CONF_REATTACH:
CONF_Ind[0] = 9;
CONF_Ind[3] = 6;
switch (pty_cai[5])
{
case CONF_BEGIN:
PUT_WORD(&CONF_Ind[1], S_CONF_BEGIN);
plci->ptyState = 0;
break;
case CONF_DROP:
CONF_Ind[0] = 5;
CONF_Ind[3] = 2;
PUT_WORD(&CONF_Ind[1], S_CONF_DROP);
plci->ptyState = CONNECTED;
break;
case CONF_ISOLATE:
CONF_Ind[0] = 5;
CONF_Ind[3] = 2;
PUT_WORD(&CONF_Ind[1], S_CONF_ISOLATE);
plci->ptyState = CONNECTED;
break;
case CONF_REATTACH:
CONF_Ind[0] = 5;
CONF_Ind[3] = 2;
PUT_WORD(&CONF_Ind[1], S_CONF_REATTACH);
plci->ptyState = CONNECTED;
break;
case CONF_ADD:
PUT_WORD(&CONF_Ind[1], S_CONF_ADD);
plci->relatedPTYPLCI = NULL;
tplci = plci->relatedPTYPLCI;
if (tplci) tplci->ptyState = CONNECTED;
plci->ptyState = CONNECTED;
break;
}
if (pty_cai[2] != 0xff)
{
PUT_WORD(&CONF_Ind[4], 0x3600 | (word)pty_cai[2]);
}
else
{
PUT_WORD(&CONF_Ind[4], 0x3303); /* Time-out: network did not respond
within the required time */
}
PUT_DWORD(&CONF_Ind[6], 0x0);
sendf(plci->appl, _FACILITY_I, Id, 0, "ws", 3, CONF_Ind);
break;
}
break;
/* Supplementary Service indicates success */
case S_SERVICE:
dbug(1, dprintf("Service_Ind"));
PUT_WORD(&CF_Ind[4], 0);
switch (pty_cai[5])
{
case THREE_PTY_END:
case THREE_PTY_BEGIN:
case ECT_EXECUTE:
if (!plci->relatedPTYPLCI) break;
tplci = plci->relatedPTYPLCI;
rId = ((word)tplci->Id << 8) | tplci->adapter->Id;
if (tplci->tel) rId |= EXT_CONTROLLER;
if (pty_cai[5] == ECT_EXECUTE)
{
PUT_WORD(&SS_Ind[1], S_ECT);
if (plci->vswitchstate != 3)
{
plci->ptyState = IDLE;
plci->relatedPTYPLCI = NULL;
plci->ptyState = 0;
}
dbug(1, dprintf("ECT OK"));
sendf(tplci->appl, _FACILITY_I, rId, 0, "ws", 3, SS_Ind);
}
else
{
switch (plci->ptyState)
{
case S_3PTY_BEGIN:
plci->ptyState = CONNECTED;
dbug(1, dprintf("3PTY ON"));
break;
case S_3PTY_END:
plci->ptyState = IDLE;
plci->relatedPTYPLCI = NULL;
plci->ptyState = 0;
dbug(1, dprintf("3PTY OFF"));
break;
}
PUT_WORD(&SS_Ind[1], pty_cai[5] + 3);
sendf(tplci->appl, _FACILITY_I, rId, 0, "ws", 3, SS_Ind);
}
break;
case CALL_DEFLECTION:
PUT_WORD(&SS_Ind[1], pty_cai[5]);
for (i = 0; i < max_appl; i++)
{
if (application[i].CDEnable)
{
if (application[i].Id) sendf(&application[i], _FACILITY_I, Id, 0, "ws", 3, SS_Ind);
application[i].CDEnable = false;
}
}
break;
case DEACTIVATION_DIVERSION:
case ACTIVATION_DIVERSION:
if (!plci->appl) break;
PUT_WORD(&CF_Ind[1], pty_cai[5] + 2);
PUT_DWORD(&CF_Ind[6], plci->appl->S_Handle);
sendf(plci->appl, _FACILITY_I, Id & 0x7, 0, "ws", 3, CF_Ind);
plci_remove(plci);
break;
case DIVERSION_INTERROGATE_CFU:
case DIVERSION_INTERROGATE_CFB:
case DIVERSION_INTERROGATE_CFNR:
case DIVERSION_INTERROGATE_NUM:
case CCBS_REQUEST:
case CCBS_DEACTIVATE:
case CCBS_INTERROGATE:
if (!plci->appl) break;
switch (pty_cai[5])
{
case DIVERSION_INTERROGATE_CFU:
case DIVERSION_INTERROGATE_CFB:
case DIVERSION_INTERROGATE_CFNR:
dbug(1, dprintf("Interr_Div"));
PUT_WORD(&pty_cai[1], S_INTERROGATE_DIVERSION);
pty_cai[3] = pty_cai[0] - 3; /* Supplementary Service-specific parameter len */
break;
case DIVERSION_INTERROGATE_NUM:
dbug(1, dprintf("Interr_Num"));
PUT_WORD(&pty_cai[1], S_INTERROGATE_NUMBERS);
pty_cai[3] = pty_cai[0] - 3; /* Supplementary Service-specific parameter len */
break;
case CCBS_REQUEST:
dbug(1, dprintf("CCBS Request"));
PUT_WORD(&pty_cai[1], S_CCBS_REQUEST);
pty_cai[3] = pty_cai[0] - 3; /* Supplementary Service-specific parameter len */
break;
case CCBS_DEACTIVATE:
dbug(1, dprintf("CCBS Deactivate"));
PUT_WORD(&pty_cai[1], S_CCBS_DEACTIVATE);
pty_cai[3] = pty_cai[0] - 3; /* Supplementary Service-specific parameter len */
break;
case CCBS_INTERROGATE:
dbug(1, dprintf("CCBS Interrogate"));
PUT_WORD(&pty_cai[1], S_CCBS_INTERROGATE);
pty_cai[3] = pty_cai[0] - 3; /* Supplementary Service-specific parameter len */
break;
}
PUT_WORD(&pty_cai[4], 0); /* Supplementary Service Reason */
PUT_DWORD(&pty_cai[6], plci->appl->S_Handle);
sendf(plci->appl, _FACILITY_I, Id & 0x7, 0, "wS", 3, pty_cai);
plci_remove(plci);
break;
case ACTIVATION_MWI:
case DEACTIVATION_MWI:
if (pty_cai[5] == ACTIVATION_MWI)
{
PUT_WORD(&SS_Ind[1], S_MWI_ACTIVATE);
}
else PUT_WORD(&SS_Ind[1], S_MWI_DEACTIVATE);
if (plci->cr_enquiry)
{
sendf(plci->appl, _FACILITY_I, Id & 0xf, 0, "ws", 3, SS_Ind);
plci_remove(plci);
}
else
{
sendf(plci->appl, _FACILITY_I, Id, 0, "ws", 3, SS_Ind);
}
break;
case MWI_INDICATION:
if (pty_cai[0] >= 0x12)
{
PUT_WORD(&pty_cai[3], S_MWI_INDICATE);
pty_cai[2] = pty_cai[0] - 2; /* len Parameter */
pty_cai[5] = pty_cai[0] - 5; /* Supplementary Service-specific parameter len */
if (plci->appl && (a->Notification_Mask[plci->appl->Id - 1] & SMASK_MWI))
{
if (plci->internal_command == GET_MWI_STATE) /* result on Message Waiting Listen */
{
sendf(plci->appl, _FACILITY_I, Id & 0xf, 0, "wS", 3, &pty_cai[2]);
plci_remove(plci);
return;
}
else sendf(plci->appl, _FACILITY_I, Id, 0, "wS", 3, &pty_cai[2]);
pty_cai[0] = 0;
}
else
{
for (i = 0; i < max_appl; i++)
{
if (a->Notification_Mask[i]&SMASK_MWI)
{
sendf(&application[i], _FACILITY_I, Id & 0x7, 0, "wS", 3, &pty_cai[2]);
pty_cai[0] = 0;
}
}
}
if (!pty_cai[0])
{ /* acknowledge */
facility[2] = 0; /* returncode */
}
else facility[2] = 0xff;
}
else
{
/* reject */
facility[2] = 0xff; /* returncode */
}
facility[0] = 2;
facility[1] = MWI_RESPONSE; /* Function */
add_p(plci, CAI, facility);
add_p(plci, ESC, multi_ssext_parms[0]); /* remembered parameter -> only one possible */
sig_req(plci, S_SERVICE, 0);
send_req(plci);
plci->command = 0;
next_internal_command(Id, plci);
break;
case CONF_ADD: /* OK */
case CONF_BEGIN:
case CONF_DROP:
case CONF_ISOLATE:
case CONF_REATTACH:
case CONF_PARTYDISC:
CONF_Ind[0] = 9;
CONF_Ind[3] = 6;
switch (pty_cai[5])
{
case CONF_BEGIN:
PUT_WORD(&CONF_Ind[1], S_CONF_BEGIN);
if (pty_cai[0] == 6)
{
d = pty_cai[6];
PUT_DWORD(&CONF_Ind[6], d); /* PartyID */
}
else
{
PUT_DWORD(&CONF_Ind[6], 0x0);
}
break;
case CONF_ISOLATE:
PUT_WORD(&CONF_Ind[1], S_CONF_ISOLATE);
CONF_Ind[0] = 5;
CONF_Ind[3] = 2;
break;
case CONF_REATTACH:
PUT_WORD(&CONF_Ind[1], S_CONF_REATTACH);
CONF_Ind[0] = 5;
CONF_Ind[3] = 2;
break;
case CONF_DROP:
PUT_WORD(&CONF_Ind[1], S_CONF_DROP);
CONF_Ind[0] = 5;
CONF_Ind[3] = 2;
break;
case CONF_ADD:
PUT_WORD(&CONF_Ind[1], S_CONF_ADD);
d = pty_cai[6];
PUT_DWORD(&CONF_Ind[6], d); /* PartyID */
tplci = plci->relatedPTYPLCI;
if (tplci) tplci->ptyState = CONNECTED;
break;
case CONF_PARTYDISC:
CONF_Ind[0] = 7;
CONF_Ind[3] = 4;
PUT_WORD(&CONF_Ind[1], S_CONF_PARTYDISC);
d = pty_cai[6];
PUT_DWORD(&CONF_Ind[4], d); /* PartyID */
break;
}
plci->ptyState = CONNECTED;
sendf(plci->appl, _FACILITY_I, Id, 0, "ws", 3, CONF_Ind);
break;
case CCBS_INFO_RETAIN:
case CCBS_ERASECALLLINKAGEID:
case CCBS_STOP_ALERTING:
CONF_Ind[0] = 5;
CONF_Ind[3] = 2;
switch (pty_cai[5])
{
case CCBS_INFO_RETAIN:
PUT_WORD(&CONF_Ind[1], S_CCBS_INFO_RETAIN);
break;
case CCBS_STOP_ALERTING:
PUT_WORD(&CONF_Ind[1], S_CCBS_STOP_ALERTING);
break;
case CCBS_ERASECALLLINKAGEID:
PUT_WORD(&CONF_Ind[1], S_CCBS_ERASECALLLINKAGEID);
CONF_Ind[0] = 7;
CONF_Ind[3] = 4;
CONF_Ind[6] = 0;
CONF_Ind[7] = 0;
break;
}
w = pty_cai[6];
PUT_WORD(&CONF_Ind[4], w); /* PartyID */
if (plci->appl && (a->Notification_Mask[plci->appl->Id - 1] & SMASK_CCBS))
{
sendf(plci->appl, _FACILITY_I, Id, 0, "ws", 3, CONF_Ind);
}
else
{
for (i = 0; i < max_appl; i++)
if (a->Notification_Mask[i] & SMASK_CCBS)
sendf(&application[i], _FACILITY_I, Id & 0x7, 0, "ws", 3, CONF_Ind);
}
break;
}
break;
case CALL_HOLD_REJ:
cau = parms[7];
if (cau)
{
i = _L3_CAUSE | cau[2];
if (cau[2] == 0) i = 0x3603;
}
else
{
i = 0x3603;
}
PUT_WORD(&SS_Ind[1], S_HOLD);
PUT_WORD(&SS_Ind[4], i);
if (plci->SuppState == HOLD_REQUEST)
{
plci->SuppState = IDLE;
sendf(plci->appl, _FACILITY_I, Id, 0, "ws", 3, SS_Ind);
}
break;
case CALL_HOLD_ACK:
if (plci->SuppState == HOLD_REQUEST)
{
plci->SuppState = CALL_HELD;
CodecIdCheck(a, plci);
start_internal_command(Id, plci, hold_save_command);
}
break;
case CALL_RETRIEVE_REJ:
cau = parms[7];
if (cau)
{
i = _L3_CAUSE | cau[2];
if (cau[2] == 0) i = 0x3603;
}
else
{
i = 0x3603;
}
PUT_WORD(&SS_Ind[1], S_RETRIEVE);
PUT_WORD(&SS_Ind[4], i);
if (plci->SuppState == RETRIEVE_REQUEST)
{
plci->SuppState = CALL_HELD;
CodecIdCheck(a, plci);
sendf(plci->appl, _FACILITY_I, Id, 0, "ws", 3, SS_Ind);
}
break;
case CALL_RETRIEVE_ACK:
PUT_WORD(&SS_Ind[1], S_RETRIEVE);
if (plci->SuppState == RETRIEVE_REQUEST)
{
plci->SuppState = IDLE;
plci->call_dir |= CALL_DIR_FORCE_OUTG_NL;
plci->b_channel = esc_chi[esc_chi[0]]&0x1f;
if (plci->tel)
{
mixer_set_bchannel_id_esc(plci, plci->b_channel);
dbug(1, dprintf("RetrChannel=0x%x", plci->b_channel));
SetVoiceChannel(a->AdvCodecPLCI, esc_chi, a);
if (plci->B2_prot == B2_TRANSPARENT && plci->B3_prot == B3_TRANSPARENT)
{
dbug(1, dprintf("Get B-ch"));
start_internal_command(Id, plci, retrieve_restore_command);
}
else
sendf(plci->appl, _FACILITY_I, Id, 0, "ws", 3, SS_Ind);
}
else
start_internal_command(Id, plci, retrieve_restore_command);
}
break;
case INDICATE_IND:
if (plci->State != LISTENING) {
sig_req(plci, HANGUP, 0);
send_req(plci);
break;
}
cip = find_cip(a, parms[4], parms[6]);
cip_mask = 1L << cip;
dbug(1, dprintf("cip=%d,cip_mask=%lx", cip, cip_mask));
clear_c_ind_mask(plci);
if (!remove_started && !a->adapter_disabled)
{
set_c_ind_mask_bit(plci, MAX_APPL);
group_optimization(a, plci);
for (i = 0; i < max_appl; i++) {
if (application[i].Id
&& (a->CIP_Mask[i] & 1 || a->CIP_Mask[i] & cip_mask)
&& CPN_filter_ok(parms[0], a, i)
&& test_group_ind_mask_bit(plci, i)) {
dbug(1, dprintf("storedcip_mask[%d]=0x%lx", i, a->CIP_Mask[i]));
set_c_ind_mask_bit(plci, i);
dump_c_ind_mask(plci);
plci->State = INC_CON_PENDING;
plci->call_dir = (plci->call_dir & ~(CALL_DIR_OUT | CALL_DIR_ORIGINATE)) |
CALL_DIR_IN | CALL_DIR_ANSWER;
if (esc_chi[0]) {
plci->b_channel = esc_chi[esc_chi[0]] & 0x1f;
mixer_set_bchannel_id_esc(plci, plci->b_channel);
}
/* if a listen on the ext controller is done, check if hook states */
/* are supported or if just a on board codec must be activated */
if (a->codec_listen[i] && !a->AdvSignalPLCI) {
if (a->profile.Global_Options & HANDSET)
plci->tel = ADV_VOICE;
else if (a->profile.Global_Options & ON_BOARD_CODEC)
plci->tel = CODEC;
if (plci->tel) Id |= EXT_CONTROLLER;
a->codec_listen[i] = plci;
}
sendf(&application[i], _CONNECT_I, Id, 0,
"wSSSSSSSbSSSSS", cip, /* CIP */
parms[0], /* CalledPartyNumber */
multi_CiPN_parms[0], /* CallingPartyNumber */
parms[2], /* CalledPartySubad */
parms[3], /* CallingPartySubad */
parms[4], /* BearerCapability */
parms[5], /* LowLC */
parms[6], /* HighLC */
ai_len, /* nested struct add_i */
add_i[0], /* B channel info */
add_i[1], /* keypad facility */
add_i[2], /* user user data */
add_i[3], /* nested facility */
multi_CiPN_parms[1] /* second CiPN(SCR) */
);
SendSSExtInd(&application[i],
plci,
Id,
multi_ssext_parms);
SendSetupInfo(&application[i],
plci,
Id,
parms,
SendMultiIE(plci, Id, multi_pi_parms, PI, 0x210, true));
}
}
clear_c_ind_mask_bit(plci, MAX_APPL);
dump_c_ind_mask(plci);
}
if (c_ind_mask_empty(plci)) {
sig_req(plci, HANGUP, 0);
send_req(plci);
plci->State = IDLE;
}
plci->notifiedcall = 0;
a->listen_active--;
listen_check(a);
break;
case CALL_PEND_NOTIFY:
plci->notifiedcall = 1;
listen_check(a);
break;
case CALL_IND:
case CALL_CON:
if (plci->State == ADVANCED_VOICE_SIG || plci->State == ADVANCED_VOICE_NOSIG)
{
if (plci->internal_command == PERM_COD_CONN_PEND)
{
if (plci->State == ADVANCED_VOICE_NOSIG)
{
dbug(1, dprintf("***Codec OK"));
if (a->AdvSignalPLCI)
{
tplci = a->AdvSignalPLCI;
if (tplci->spoofed_msg)
{
dbug(1, dprintf("***Spoofed Msg(0x%x)", tplci->spoofed_msg));
tplci->command = 0;
tplci->internal_command = 0;
x_Id = ((word)tplci->Id << 8) | tplci->adapter->Id | 0x80;
switch (tplci->spoofed_msg)
{
case CALL_RES:
tplci->command = _CONNECT_I | RESPONSE;
api_load_msg(&tplci->saved_msg, saved_parms);
add_b1(tplci, &saved_parms[1], 0, tplci->B1_facilities);
if (tplci->adapter->Info_Mask[tplci->appl->Id - 1] & 0x200)
{
/* early B3 connect (CIP mask bit 9) no release after a disc */
add_p(tplci, LLI, "\x01\x01");
}
add_s(tplci, CONN_NR, &saved_parms[2]);
add_s(tplci, LLC, &saved_parms[4]);
add_ai(tplci, &saved_parms[5]);
tplci->State = INC_CON_ACCEPT;
sig_req(tplci, CALL_RES, 0);
send_req(tplci);
break;
case AWAITING_SELECT_B:
dbug(1, dprintf("Select_B continue"));
start_internal_command(x_Id, tplci, select_b_command);
break;
case AWAITING_MANUF_CON: /* Get_Plci per Manufacturer_Req to ext controller */
if (!tplci->Sig.Id)
{
dbug(1, dprintf("No SigID!"));
sendf(tplci->appl, _MANUFACTURER_R | CONFIRM, x_Id, tplci->number, "dww", _DI_MANU_ID, _MANUFACTURER_R, _OUT_OF_PLCI);
plci_remove(tplci);
break;
}
tplci->command = _MANUFACTURER_R;
api_load_msg(&tplci->saved_msg, saved_parms);
dir = saved_parms[2].info[0];
if (dir == 1) {
sig_req(tplci, CALL_REQ, 0);
}
else if (!dir) {
sig_req(tplci, LISTEN_REQ, 0);
}
send_req(tplci);
sendf(tplci->appl, _MANUFACTURER_R | CONFIRM, x_Id, tplci->number, "dww", _DI_MANU_ID, _MANUFACTURER_R, 0);
break;
case (CALL_REQ | AWAITING_MANUF_CON):
sig_req(tplci, CALL_REQ, 0);
send_req(tplci);
break;
case CALL_REQ:
if (!tplci->Sig.Id)
{
dbug(1, dprintf("No SigID!"));
sendf(tplci->appl, _CONNECT_R | CONFIRM, tplci->adapter->Id, 0, "w", _OUT_OF_PLCI);
plci_remove(tplci);
break;
}
tplci->command = _CONNECT_R;
api_load_msg(&tplci->saved_msg, saved_parms);
add_s(tplci, CPN, &saved_parms[1]);
add_s(tplci, DSA, &saved_parms[3]);
add_ai(tplci, &saved_parms[9]);
sig_req(tplci, CALL_REQ, 0);
send_req(tplci);
break;
case CALL_RETRIEVE:
tplci->command = C_RETRIEVE_REQ;
sig_req(tplci, CALL_RETRIEVE, 0);
send_req(tplci);
break;
}
tplci->spoofed_msg = 0;
if (tplci->internal_command == 0)
next_internal_command(x_Id, tplci);
}
}
next_internal_command(Id, plci);
break;
}
dbug(1, dprintf("***Codec Hook Init Req"));
plci->internal_command = PERM_COD_HOOK;
add_p(plci, FTY, "\x01\x09"); /* Get Hook State*/
sig_req(plci, TEL_CTRL, 0);
send_req(plci);
}
}
else if (plci->command != _MANUFACTURER_R /* old style permanent connect */
&& plci->State != INC_ACT_PENDING)
{
mixer_set_bchannel_id_esc(plci, plci->b_channel);
if (plci->tel == ADV_VOICE && plci->SuppState == IDLE) /* with permanent codec switch on immediately */
{
chi[2] = plci->b_channel;
SetVoiceChannel(a->AdvCodecPLCI, chi, a);
}
sendf(plci->appl, _CONNECT_ACTIVE_I, Id, 0, "Sss", parms[21], "", "");
plci->State = INC_ACT_PENDING;
}
break;
case TEL_CTRL:
ie = multi_fac_parms[0]; /* inspect the facility hook indications */
if (plci->State == ADVANCED_VOICE_SIG && ie[0]) {
switch (ie[1] & 0x91) {
case 0x80: /* hook off */
case 0x81:
if (plci->internal_command == PERM_COD_HOOK)
{
dbug(1, dprintf("init:hook_off"));
plci->hook_state = ie[1];
next_internal_command(Id, plci);
break;
}
else /* ignore doubled hook indications */
{
if (((plci->hook_state) & 0xf0) == 0x80)
{
dbug(1, dprintf("ignore hook"));
break;
}
plci->hook_state = ie[1]&0x91;
}
/* check for incoming call pending */
/* and signal '+'.Appl must decide */
/* with connect_res if call must */
/* accepted or not */
for (i = 0, tplci = NULL; i < max_appl; i++) {
if (a->codec_listen[i]
&& (a->codec_listen[i]->State == INC_CON_PENDING
|| a->codec_listen[i]->State == INC_CON_ALERT)) {
tplci = a->codec_listen[i];
tplci->appl = &application[i];
}
}
/* no incoming call, do outgoing call */
/* and signal '+' if outg. setup */
if (!a->AdvSignalPLCI && !tplci) {
if ((i = get_plci(a))) {
a->AdvSignalPLCI = &a->plci[i - 1];
tplci = a->AdvSignalPLCI;
tplci->tel = ADV_VOICE;
PUT_WORD(&voice_cai[5], a->AdvSignalAppl->MaxDataLength);
if (a->Info_Mask[a->AdvSignalAppl->Id - 1] & 0x200) {
/* early B3 connect (CIP mask bit 9) no release after a disc */
add_p(tplci, LLI, "\x01\x01");
}
add_p(tplci, CAI, voice_cai);
add_p(tplci, OAD, a->TelOAD);
add_p(tplci, OSA, a->TelOSA);
add_p(tplci, SHIFT | 6, NULL);
add_p(tplci, SIN, "\x02\x01\x00");
add_p(tplci, UID, "\x06\x43\x61\x70\x69\x32\x30");
sig_req(tplci, ASSIGN, DSIG_ID);
a->AdvSignalPLCI->internal_command = HOOK_OFF_REQ;
a->AdvSignalPLCI->command = 0;
tplci->appl = a->AdvSignalAppl;
tplci->call_dir = CALL_DIR_OUT | CALL_DIR_ORIGINATE;
send_req(tplci);
}
}
if (!tplci) break;
Id = ((word)tplci->Id << 8) | a->Id;
Id |= EXT_CONTROLLER;
sendf(tplci->appl,
_FACILITY_I,
Id,
0,
"ws", (word)0, "\x01+");
break;
case 0x90: /* hook on */
case 0x91:
if (plci->internal_command == PERM_COD_HOOK)
{
dbug(1, dprintf("init:hook_on"));
plci->hook_state = ie[1] & 0x91;
next_internal_command(Id, plci);
break;
}
else /* ignore doubled hook indications */
{
if (((plci->hook_state) & 0xf0) == 0x90) break;
plci->hook_state = ie[1] & 0x91;
}
/* hangup the adv. voice call and signal '-' to the appl */
if (a->AdvSignalPLCI) {
Id = ((word)a->AdvSignalPLCI->Id << 8) | a->Id;
if (plci->tel) Id |= EXT_CONTROLLER;
sendf(a->AdvSignalAppl,
_FACILITY_I,
Id,
0,
"ws", (word)0, "\x01-");
a->AdvSignalPLCI->internal_command = HOOK_ON_REQ;
a->AdvSignalPLCI->command = 0;
sig_req(a->AdvSignalPLCI, HANGUP, 0);
send_req(a->AdvSignalPLCI);
}
break;
}
}
break;
case RESUME:
clear_c_ind_mask_bit(plci, (word)(plci->appl->Id - 1));
PUT_WORD(&resume_cau[4], GOOD);
sendf(plci->appl, _FACILITY_I, Id, 0, "ws", (word)3, resume_cau);
break;
case SUSPEND:
clear_c_ind_mask(plci);
if (plci->NL.Id && !plci->nl_remove_id) {
mixer_remove(plci);
nl_req_ncci(plci, REMOVE, 0);
}
if (!plci->sig_remove_id) {
plci->internal_command = 0;
sig_req(plci, REMOVE, 0);
}
send_req(plci);
if (!plci->channels) {
sendf(plci->appl, _FACILITY_I, Id, 0, "ws", (word)3, "\x05\x04\x00\x02\x00\x00");
sendf(plci->appl, _DISCONNECT_I, Id, 0, "w", 0);
}
break;
case SUSPEND_REJ:
break;
case HANGUP:
plci->hangup_flow_ctrl_timer = 0;
if (plci->manufacturer && plci->State == LOCAL_CONNECT) break;
cau = parms[7];
if (cau) {
i = _L3_CAUSE | cau[2];
if (cau[2] == 0) i = 0;
else if (cau[2] == 8) i = _L1_ERROR;
else if (cau[2] == 9 || cau[2] == 10) i = _L2_ERROR;
else if (cau[2] == 5) i = _CAPI_GUARD_ERROR;
}
else {
i = _L3_ERROR;
}
if (plci->State == INC_CON_PENDING || plci->State == INC_CON_ALERT)
{
for (i = 0; i < max_appl; i++)
{
if (test_c_ind_mask_bit(plci, i))
sendf(&application[i], _DISCONNECT_I, Id, 0, "w", 0);
}
}
else
{
clear_c_ind_mask(plci);
}
if (!plci->appl)
{
if (plci->State == LISTENING)
{
plci->notifiedcall = 0;
a->listen_active--;
}
plci->State = INC_DIS_PENDING;
if (c_ind_mask_empty(plci))
{
plci->State = IDLE;
if (plci->NL.Id && !plci->nl_remove_id)
{
mixer_remove(plci);
nl_req_ncci(plci, REMOVE, 0);
}
if (!plci->sig_remove_id)
{
plci->internal_command = 0;
sig_req(plci, REMOVE, 0);
}
send_req(plci);
}
}
else
{
/* collision of DISCONNECT or CONNECT_RES with HANGUP can */
/* result in a second HANGUP! Don't generate another */
/* DISCONNECT */
if (plci->State != IDLE && plci->State != INC_DIS_PENDING)
{
if (plci->State == RESUMING)
{
PUT_WORD(&resume_cau[4], i);
sendf(plci->appl, _FACILITY_I, Id, 0, "ws", (word)3, resume_cau);
}
plci->State = INC_DIS_PENDING;
sendf(plci->appl, _DISCONNECT_I, Id, 0, "w", i);
}
}
break;
case SSEXT_IND:
SendSSExtInd(NULL, plci, Id, multi_ssext_parms);
break;
case VSWITCH_REQ:
VSwitchReqInd(plci, Id, multi_vswitch_parms);
break;
case VSWITCH_IND:
if (plci->relatedPTYPLCI &&
plci->vswitchstate == 3 &&
plci->relatedPTYPLCI->vswitchstate == 3 &&
parms[MAXPARMSIDS - 1][0])
{
add_p(plci->relatedPTYPLCI, SMSG, parms[MAXPARMSIDS - 1]);
sig_req(plci->relatedPTYPLCI, VSWITCH_REQ, 0);
send_req(plci->relatedPTYPLCI);
}
else VSwitchReqInd(plci, Id, multi_vswitch_parms);
break;
}
}
static void SendSetupInfo(APPL *appl, PLCI *plci, dword Id, byte **parms, byte Info_Sent_Flag)
{
word i;
byte *ie;
word Info_Number;
byte *Info_Element;
word Info_Mask = 0;
dbug(1, dprintf("SetupInfo"));
for (i = 0; i < MAXPARMSIDS; i++) {
ie = parms[i];
Info_Number = 0;
Info_Element = ie;
if (ie[0]) {
switch (i) {
case 0:
dbug(1, dprintf("CPN "));
Info_Number = 0x0070;
Info_Mask = 0x80;
Info_Sent_Flag = true;
break;
case 8: /* display */
dbug(1, dprintf("display(%d)", i));
Info_Number = 0x0028;
Info_Mask = 0x04;
Info_Sent_Flag = true;
break;
case 16: /* Channel Id */
dbug(1, dprintf("CHI"));
Info_Number = 0x0018;
Info_Mask = 0x100;
Info_Sent_Flag = true;
mixer_set_bchannel_id(plci, Info_Element);
break;
case 19: /* Redirected Number */
dbug(1, dprintf("RDN"));
Info_Number = 0x0074;
Info_Mask = 0x400;
Info_Sent_Flag = true;
break;
case 20: /* Redirected Number extended */
dbug(1, dprintf("RDX"));
Info_Number = 0x0073;
Info_Mask = 0x400;
Info_Sent_Flag = true;
break;
case 22: /* Redirecing Number */
dbug(1, dprintf("RIN"));
Info_Number = 0x0076;
Info_Mask = 0x400;
Info_Sent_Flag = true;
break;
default:
Info_Number = 0;
break;
}
}
if (i == MAXPARMSIDS - 2) { /* to indicate the message type "Setup" */
Info_Number = 0x8000 | 5;
Info_Mask = 0x10;
Info_Element = "";
}
if (Info_Sent_Flag && Info_Number) {
if (plci->adapter->Info_Mask[appl->Id - 1] & Info_Mask) {
sendf(appl, _INFO_I, Id, 0, "wS", Info_Number, Info_Element);
}
}
}
}
static void SendInfo(PLCI *plci, dword Id, byte **parms, byte iesent)
{
word i;
word j;
word k;
byte *ie;
word Info_Number;
byte *Info_Element;
word Info_Mask = 0;
static byte charges[5] = {4, 0, 0, 0, 0};
static byte cause[] = {0x02, 0x80, 0x00};
APPL *appl;
dbug(1, dprintf("InfoParse "));
if (
!plci->appl
&& !plci->State
&& plci->Sig.Ind != NCR_FACILITY
)
{
dbug(1, dprintf("NoParse "));
return;
}
cause[2] = 0;
for (i = 0; i < MAXPARMSIDS; i++) {
ie = parms[i];
Info_Number = 0;
Info_Element = ie;
if (ie[0]) {
switch (i) {
case 0:
dbug(1, dprintf("CPN "));
Info_Number = 0x0070;
Info_Mask = 0x80;
break;
case 7: /* ESC_CAU */
dbug(1, dprintf("cau(0x%x)", ie[2]));
Info_Number = 0x0008;
Info_Mask = 0x00;
cause[2] = ie[2];
Info_Element = NULL;
break;
case 8: /* display */
dbug(1, dprintf("display(%d)", i));
Info_Number = 0x0028;
Info_Mask = 0x04;
break;
case 9: /* Date display */
dbug(1, dprintf("date(%d)", i));
Info_Number = 0x0029;
Info_Mask = 0x02;
break;
case 10: /* charges */
for (j = 0; j < 4; j++) charges[1 + j] = 0;
for (j = 0; j < ie[0] && !(ie[1 + j] & 0x80); j++);
for (k = 1, j++; j < ie[0] && k <= 4; j++, k++) charges[k] = ie[1 + j];
Info_Number = 0x4000;
Info_Mask = 0x40;
Info_Element = charges;
break;
case 11: /* user user info */
dbug(1, dprintf("uui"));
Info_Number = 0x007E;
Info_Mask = 0x08;
break;
case 12: /* congestion receiver ready */
dbug(1, dprintf("clRDY"));
Info_Number = 0x00B0;
Info_Mask = 0x08;
Info_Element = "";
break;
case 13: /* congestion receiver not ready */
dbug(1, dprintf("clNRDY"));
Info_Number = 0x00BF;
Info_Mask = 0x08;
Info_Element = "";
break;
case 15: /* Keypad Facility */
dbug(1, dprintf("KEY"));
Info_Number = 0x002C;
Info_Mask = 0x20;
break;
case 16: /* Channel Id */
dbug(1, dprintf("CHI"));
Info_Number = 0x0018;
Info_Mask = 0x100;
mixer_set_bchannel_id(plci, Info_Element);
break;
case 17: /* if no 1tr6 cause, send full cause, else esc_cause */
dbug(1, dprintf("q9cau(0x%x)", ie[2]));
if (!cause[2] || cause[2] < 0x80) break; /* eg. layer 1 error */
Info_Number = 0x0008;
Info_Mask = 0x01;
if (cause[2] != ie[2]) Info_Element = cause;
break;
case 19: /* Redirected Number */
dbug(1, dprintf("RDN"));
Info_Number = 0x0074;
Info_Mask = 0x400;
break;
case 22: /* Redirecing Number */
dbug(1, dprintf("RIN"));
Info_Number = 0x0076;
Info_Mask = 0x400;
break;
case 23: /* Notification Indicator */
dbug(1, dprintf("NI"));
Info_Number = (word)NI;
Info_Mask = 0x210;
break;
case 26: /* Call State */
dbug(1, dprintf("CST"));
Info_Number = (word)CST;
Info_Mask = 0x01; /* do with cause i.e. for now */
break;
case MAXPARMSIDS - 2: /* Escape Message Type, must be the last indication */
dbug(1, dprintf("ESC/MT[0x%x]", ie[3]));
Info_Number = 0x8000 | ie[3];
if (iesent) Info_Mask = 0xffff;
else Info_Mask = 0x10;
Info_Element = "";
break;
default:
Info_Number = 0;
Info_Mask = 0;
Info_Element = "";
break;
}
}
if (plci->Sig.Ind == NCR_FACILITY) /* check controller broadcast */
{
for (j = 0; j < max_appl; j++)
{
appl = &application[j];
if (Info_Number
&& appl->Id
&& plci->adapter->Info_Mask[appl->Id - 1] & Info_Mask)
{
dbug(1, dprintf("NCR_Ind"));
iesent = true;
sendf(&application[j], _INFO_I, Id & 0x0f, 0, "wS", Info_Number, Info_Element);
}
}
}
else if (!plci->appl)
{ /* overlap receiving broadcast */
if (Info_Number == CPN
|| Info_Number == KEY
|| Info_Number == NI
|| Info_Number == DSP
|| Info_Number == UUI)
{
for (j = 0; j < max_appl; j++)
{
if (test_c_ind_mask_bit(plci, j))
{
dbug(1, dprintf("Ovl_Ind"));
iesent = true;
sendf(&application[j], _INFO_I, Id, 0, "wS", Info_Number, Info_Element);
}
}
}
} /* all other signalling states */
else if (Info_Number
&& plci->adapter->Info_Mask[plci->appl->Id - 1] & Info_Mask)
{
dbug(1, dprintf("Std_Ind"));
iesent = true;
sendf(plci->appl, _INFO_I, Id, 0, "wS", Info_Number, Info_Element);
}
}
}
static byte SendMultiIE(PLCI *plci, dword Id, byte **parms, byte ie_type,
dword info_mask, byte setupParse)
{
word i;
word j;
byte *ie;
word Info_Number;
byte *Info_Element;
APPL *appl;
word Info_Mask = 0;
byte iesent = 0;
if (
!plci->appl
&& !plci->State
&& plci->Sig.Ind != NCR_FACILITY
&& !setupParse
)
{
dbug(1, dprintf("NoM-IEParse "));
return 0;
}
dbug(1, dprintf("M-IEParse "));
for (i = 0; i < MAX_MULTI_IE; i++)
{
ie = parms[i];
Info_Number = 0;
Info_Element = ie;
if (ie[0])
{
dbug(1, dprintf("[Ind0x%x]:IE=0x%x", plci->Sig.Ind, ie_type));
Info_Number = (word)ie_type;
Info_Mask = (word)info_mask;
}
if (plci->Sig.Ind == NCR_FACILITY) /* check controller broadcast */
{
for (j = 0; j < max_appl; j++)
{
appl = &application[j];
if (Info_Number
&& appl->Id
&& plci->adapter->Info_Mask[appl->Id - 1] & Info_Mask)
{
iesent = true;
dbug(1, dprintf("Mlt_NCR_Ind"));
sendf(&application[j], _INFO_I, Id & 0x0f, 0, "wS", Info_Number, Info_Element);
}
}
}
else if (!plci->appl && Info_Number)
{ /* overlap receiving broadcast */
for (j = 0; j < max_appl; j++)
{
if (test_c_ind_mask_bit(plci, j))
{
iesent = true;
dbug(1, dprintf("Mlt_Ovl_Ind"));
sendf(&application[j] , _INFO_I, Id, 0, "wS", Info_Number, Info_Element);
}
}
} /* all other signalling states */
else if (Info_Number
&& plci->adapter->Info_Mask[plci->appl->Id - 1] & Info_Mask)
{
iesent = true;
dbug(1, dprintf("Mlt_Std_Ind"));
sendf(plci->appl, _INFO_I, Id, 0, "wS", Info_Number, Info_Element);
}
}
return iesent;
}
static void SendSSExtInd(APPL *appl, PLCI *plci, dword Id, byte **parms)
{
word i;
/* Format of multi_ssext_parms[i][]:
0 byte length
1 byte SSEXTIE
2 byte SSEXT_REQ/SSEXT_IND
3 byte length
4 word SSExtCommand
6... Params
*/
if (
plci
&& plci->State
&& plci->Sig.Ind != NCR_FACILITY
)
for (i = 0; i < MAX_MULTI_IE; i++)
{
if (parms[i][0] < 6) continue;
if (parms[i][2] == SSEXT_REQ) continue;
if (appl)
{
parms[i][0] = 0; /* kill it */
sendf(appl, _MANUFACTURER_I,
Id,
0,
"dwS",
_DI_MANU_ID,
_DI_SSEXT_CTRL,
&parms[i][3]);
}
else if (plci->appl)
{
parms[i][0] = 0; /* kill it */
sendf(plci->appl, _MANUFACTURER_I,
Id,
0,
"dwS",
_DI_MANU_ID,
_DI_SSEXT_CTRL,
&parms[i][3]);
}
}
};
static void nl_ind(PLCI *plci)
{
byte ch;
word ncci;
dword Id;
DIVA_CAPI_ADAPTER *a;
word NCCIcode;
APPL *APPLptr;
word count;
word Num;
word i, ncpi_state;
byte len, ncci_state;
word msg;
word info = 0;
word fax_feature_bits;
byte fax_send_edata_ack;
static byte v120_header_buffer[2 + 3];
static word fax_info[] = {
0, /* T30_SUCCESS */
_FAX_NO_CONNECTION, /* T30_ERR_NO_DIS_RECEIVED */
_FAX_PROTOCOL_ERROR, /* T30_ERR_TIMEOUT_NO_RESPONSE */
_FAX_PROTOCOL_ERROR, /* T30_ERR_RETRY_NO_RESPONSE */
_FAX_PROTOCOL_ERROR, /* T30_ERR_TOO_MANY_REPEATS */
_FAX_PROTOCOL_ERROR, /* T30_ERR_UNEXPECTED_MESSAGE */
_FAX_REMOTE_ABORT, /* T30_ERR_UNEXPECTED_DCN */
_FAX_LOCAL_ABORT, /* T30_ERR_DTC_UNSUPPORTED */
_FAX_TRAINING_ERROR, /* T30_ERR_ALL_RATES_FAILED */
_FAX_TRAINING_ERROR, /* T30_ERR_TOO_MANY_TRAINS */
_FAX_PARAMETER_ERROR, /* T30_ERR_RECEIVE_CORRUPTED */
_FAX_REMOTE_ABORT, /* T30_ERR_UNEXPECTED_DISC */
_FAX_LOCAL_ABORT, /* T30_ERR_APPLICATION_DISC */
_FAX_REMOTE_REJECT, /* T30_ERR_INCOMPATIBLE_DIS */
_FAX_LOCAL_ABORT, /* T30_ERR_INCOMPATIBLE_DCS */
_FAX_PROTOCOL_ERROR, /* T30_ERR_TIMEOUT_NO_COMMAND */
_FAX_PROTOCOL_ERROR, /* T30_ERR_RETRY_NO_COMMAND */
_FAX_PROTOCOL_ERROR, /* T30_ERR_TIMEOUT_COMMAND_TOO_LONG */
_FAX_PROTOCOL_ERROR, /* T30_ERR_TIMEOUT_RESPONSE_TOO_LONG */
_FAX_NO_CONNECTION, /* T30_ERR_NOT_IDENTIFIED */
_FAX_PROTOCOL_ERROR, /* T30_ERR_SUPERVISORY_TIMEOUT */
_FAX_PARAMETER_ERROR, /* T30_ERR_TOO_LONG_SCAN_LINE */
_FAX_PROTOCOL_ERROR, /* T30_ERR_RETRY_NO_PAGE_AFTER_MPS */
_FAX_PROTOCOL_ERROR, /* T30_ERR_RETRY_NO_PAGE_AFTER_CFR */
_FAX_PROTOCOL_ERROR, /* T30_ERR_RETRY_NO_DCS_AFTER_FTT */
_FAX_PROTOCOL_ERROR, /* T30_ERR_RETRY_NO_DCS_AFTER_EOM */
_FAX_PROTOCOL_ERROR, /* T30_ERR_RETRY_NO_DCS_AFTER_MPS */
_FAX_PROTOCOL_ERROR, /* T30_ERR_RETRY_NO_DCN_AFTER_MCF */
_FAX_PROTOCOL_ERROR, /* T30_ERR_RETRY_NO_DCN_AFTER_RTN */
_FAX_PROTOCOL_ERROR, /* T30_ERR_RETRY_NO_CFR */
_FAX_PROTOCOL_ERROR, /* T30_ERR_RETRY_NO_MCF_AFTER_EOP */
_FAX_PROTOCOL_ERROR, /* T30_ERR_RETRY_NO_MCF_AFTER_EOM */
_FAX_PROTOCOL_ERROR, /* T30_ERR_RETRY_NO_MCF_AFTER_MPS */
0x331d, /* T30_ERR_SUB_SEP_UNSUPPORTED */
0x331e, /* T30_ERR_PWD_UNSUPPORTED */
0x331f, /* T30_ERR_SUB_SEP_PWD_UNSUPPORTED */
_FAX_PROTOCOL_ERROR, /* T30_ERR_INVALID_COMMAND_FRAME */
_FAX_PARAMETER_ERROR, /* T30_ERR_UNSUPPORTED_PAGE_CODING */
_FAX_PARAMETER_ERROR, /* T30_ERR_INVALID_PAGE_CODING */
_FAX_REMOTE_REJECT, /* T30_ERR_INCOMPATIBLE_PAGE_CONFIG */
_FAX_LOCAL_ABORT, /* T30_ERR_TIMEOUT_FROM_APPLICATION */
_FAX_PROTOCOL_ERROR, /* T30_ERR_V34FAX_NO_REACTION_ON_MARK */
_FAX_PROTOCOL_ERROR, /* T30_ERR_V34FAX_TRAINING_TIMEOUT */
_FAX_PROTOCOL_ERROR, /* T30_ERR_V34FAX_UNEXPECTED_V21 */
_FAX_PROTOCOL_ERROR, /* T30_ERR_V34FAX_PRIMARY_CTS_ON */
_FAX_LOCAL_ABORT, /* T30_ERR_V34FAX_TURNAROUND_POLLING */
_FAX_LOCAL_ABORT /* T30_ERR_V34FAX_V8_INCOMPATIBILITY */
};
byte dtmf_code_buffer[CAPIDTMF_RECV_DIGIT_BUFFER_SIZE + 1];
static word rtp_info[] = {
GOOD, /* RTP_SUCCESS */
0x3600 /* RTP_ERR_SSRC_OR_PAYLOAD_CHANGE */
};
static dword udata_forwarding_table[0x100 / sizeof(dword)] =
{
0x0020301e, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000
};
ch = plci->NL.IndCh;
a = plci->adapter;
ncci = a->ch_ncci[ch];
Id = (((dword)(ncci ? ncci : ch)) << 16) | (((word) plci->Id) << 8) | a->Id;
if (plci->tel) Id |= EXT_CONTROLLER;
APPLptr = plci->appl;
dbug(1, dprintf("NL_IND-Id(NL:0x%x)=0x%08lx,plci=%x,tel=%x,state=0x%x,ch=0x%x,chs=%d,Ind=%x",
plci->NL.Id, Id, plci->Id, plci->tel, plci->State, ch, plci->channels, plci->NL.Ind & 0x0f));
/* in the case if no connect_active_Ind was sent to the appl we wait for */
if (plci->nl_remove_id)
{
plci->NL.RNR = 2; /* discard */
dbug(1, dprintf("NL discard while remove pending"));
return;
}
if ((plci->NL.Ind & 0x0f) == N_CONNECT)
{
if (plci->State == INC_DIS_PENDING
|| plci->State == OUTG_DIS_PENDING
|| plci->State == IDLE)
{
plci->NL.RNR = 2; /* discard */
dbug(1, dprintf("discard n_connect"));
return;
}
if (plci->State < INC_ACT_PENDING)
{
plci->NL.RNR = 1; /* flow control */
channel_x_off(plci, ch, N_XON_CONNECT_IND);
return;
}
}
if (!APPLptr) /* no application or invalid data */
{ /* while reloading the DSP */
dbug(1, dprintf("discard1"));
plci->NL.RNR = 2;
return;
}
if (((plci->NL.Ind & 0x0f) == N_UDATA)
&& (((plci->B2_prot != B2_SDLC) && ((plci->B1_resource == 17) || (plci->B1_resource == 18)))
|| (plci->B2_prot == 7)
|| (plci->B3_prot == 7)))
{
plci->ncpi_buffer[0] = 0;
ncpi_state = plci->ncpi_state;
if (plci->NL.complete == 1)
{
byte *data = &plci->NL.RBuffer->P[0];
if ((plci->NL.RBuffer->length >= 12)
&& ((*data == DSP_UDATA_INDICATION_DCD_ON)
|| (*data == DSP_UDATA_INDICATION_CTS_ON)))
{
word conn_opt, ncpi_opt = 0x00;
/* HexDump ("MDM N_UDATA:", plci->NL.RBuffer->length, data); */
if (*data == DSP_UDATA_INDICATION_DCD_ON)
plci->ncpi_state |= NCPI_MDM_DCD_ON_RECEIVED;
if (*data == DSP_UDATA_INDICATION_CTS_ON)
plci->ncpi_state |= NCPI_MDM_CTS_ON_RECEIVED;
data++; /* indication code */
data += 2; /* timestamp */
if ((*data == DSP_CONNECTED_NORM_V18) || (*data == DSP_CONNECTED_NORM_VOWN))
ncpi_state &= ~(NCPI_MDM_DCD_ON_RECEIVED | NCPI_MDM_CTS_ON_RECEIVED);
data++; /* connected norm */
conn_opt = GET_WORD(data);
data += 2; /* connected options */
PUT_WORD(&(plci->ncpi_buffer[1]), (word)(GET_DWORD(data) & 0x0000FFFF));
if (conn_opt & DSP_CONNECTED_OPTION_MASK_V42)
{
ncpi_opt |= MDM_NCPI_ECM_V42;
}
else if (conn_opt & DSP_CONNECTED_OPTION_MASK_MNP)
{
ncpi_opt |= MDM_NCPI_ECM_MNP;
}
else
{
ncpi_opt |= MDM_NCPI_TRANSPARENT;
}
if (conn_opt & DSP_CONNECTED_OPTION_MASK_COMPRESSION)
{
ncpi_opt |= MDM_NCPI_COMPRESSED;
}
PUT_WORD(&(plci->ncpi_buffer[3]), ncpi_opt);
plci->ncpi_buffer[0] = 4;
plci->ncpi_state |= NCPI_VALID_CONNECT_B3_IND | NCPI_VALID_CONNECT_B3_ACT | NCPI_VALID_DISC_B3_IND;
}
}
if (plci->B3_prot == 7)
{
if (((a->ncci_state[ncci] == INC_ACT_PENDING) || (a->ncci_state[ncci] == OUTG_CON_PENDING))
&& (plci->ncpi_state & NCPI_VALID_CONNECT_B3_ACT)
&& !(plci->ncpi_state & NCPI_CONNECT_B3_ACT_SENT))
{
a->ncci_state[ncci] = INC_ACT_PENDING;
sendf(plci->appl, _CONNECT_B3_ACTIVE_I, Id, 0, "S", plci->ncpi_buffer);
plci->ncpi_state |= NCPI_CONNECT_B3_ACT_SENT;
}
}
if (!((plci->requested_options_conn | plci->requested_options | plci->adapter->requested_options_table[plci->appl->Id - 1])
& ((1L << PRIVATE_V18) | (1L << PRIVATE_VOWN)))
|| !(ncpi_state & NCPI_MDM_DCD_ON_RECEIVED)
|| !(ncpi_state & NCPI_MDM_CTS_ON_RECEIVED))
{
plci->NL.RNR = 2;
return;
}
}
if (plci->NL.complete == 2)
{
if (((plci->NL.Ind & 0x0f) == N_UDATA)
&& !(udata_forwarding_table[plci->RData[0].P[0] >> 5] & (1L << (plci->RData[0].P[0] & 0x1f))))
{
switch (plci->RData[0].P[0])
{
case DTMF_UDATA_INDICATION_FAX_CALLING_TONE:
if (plci->dtmf_rec_active & DTMF_LISTEN_ACTIVE_FLAG)
sendf(plci->appl, _FACILITY_I, Id & 0xffffL, 0, "ws", SELECTOR_DTMF, "\x01X");
break;
case DTMF_UDATA_INDICATION_ANSWER_TONE:
if (plci->dtmf_rec_active & DTMF_LISTEN_ACTIVE_FLAG)
sendf(plci->appl, _FACILITY_I, Id & 0xffffL, 0, "ws", SELECTOR_DTMF, "\x01Y");
break;
case DTMF_UDATA_INDICATION_DIGITS_RECEIVED:
dtmf_indication(Id, plci, plci->RData[0].P, plci->RData[0].PLength);
break;
case DTMF_UDATA_INDICATION_DIGITS_SENT:
dtmf_confirmation(Id, plci);
break;
case UDATA_INDICATION_MIXER_TAP_DATA:
capidtmf_recv_process_block(&(plci->capidtmf_state), plci->RData[0].P + 1, (word)(plci->RData[0].PLength - 1));
i = capidtmf_indication(&(plci->capidtmf_state), dtmf_code_buffer + 1);
if (i != 0)
{
dtmf_code_buffer[0] = DTMF_UDATA_INDICATION_DIGITS_RECEIVED;
dtmf_indication(Id, plci, dtmf_code_buffer, (word)(i + 1));
}
break;
case UDATA_INDICATION_MIXER_COEFS_SET:
mixer_indication_coefs_set(Id, plci);
break;
case UDATA_INDICATION_XCONNECT_FROM:
mixer_indication_xconnect_from(Id, plci, plci->RData[0].P, plci->RData[0].PLength);
break;
case UDATA_INDICATION_XCONNECT_TO:
mixer_indication_xconnect_to(Id, plci, plci->RData[0].P, plci->RData[0].PLength);
break;
case LEC_UDATA_INDICATION_DISABLE_DETECT:
ec_indication(Id, plci, plci->RData[0].P, plci->RData[0].PLength);
break;
default:
break;
}
}
else
{
if ((plci->RData[0].PLength != 0)
&& ((plci->B2_prot == B2_V120_ASYNC)
|| (plci->B2_prot == B2_V120_ASYNC_V42BIS)
|| (plci->B2_prot == B2_V120_BIT_TRANSPARENT)))
{
sendf(plci->appl, _DATA_B3_I, Id, 0,
"dwww",
plci->RData[1].P,
(plci->NL.RNum < 2) ? 0 : plci->RData[1].PLength,
plci->RNum,
plci->RFlags);
}
else
{
sendf(plci->appl, _DATA_B3_I, Id, 0,
"dwww",
plci->RData[0].P,
plci->RData[0].PLength,
plci->RNum,
plci->RFlags);
}
}
return;
}
fax_feature_bits = 0;
if ((plci->NL.Ind & 0x0f) == N_CONNECT ||
(plci->NL.Ind & 0x0f) == N_CONNECT_ACK ||
(plci->NL.Ind & 0x0f) == N_DISC ||
(plci->NL.Ind & 0x0f) == N_EDATA ||
(plci->NL.Ind & 0x0f) == N_DISC_ACK)
{
info = 0;
plci->ncpi_buffer[0] = 0;
switch (plci->B3_prot) {
case 0: /*XPARENT*/
case 1: /*T.90 NL*/
break; /* no network control protocol info - jfr */
case 2: /*ISO8202*/
case 3: /*X25 DCE*/
for (i = 0; i < plci->NL.RLength; i++) plci->ncpi_buffer[4 + i] = plci->NL.RBuffer->P[i];
plci->ncpi_buffer[0] = (byte)(i + 3);
plci->ncpi_buffer[1] = (byte)(plci->NL.Ind & N_D_BIT ? 1 : 0);
plci->ncpi_buffer[2] = 0;
plci->ncpi_buffer[3] = 0;
break;
case 4: /*T.30 - FAX*/
case 5: /*T.30 - FAX*/
if (plci->NL.RLength >= sizeof(T30_INFO))
{
dbug(1, dprintf("FaxStatus %04x", ((T30_INFO *)plci->NL.RBuffer->P)->code));
len = 9;
PUT_WORD(&(plci->ncpi_buffer[1]), ((T30_INFO *)plci->NL.RBuffer->P)->rate_div_2400 * 2400);
fax_feature_bits = GET_WORD(&((T30_INFO *)plci->NL.RBuffer->P)->feature_bits_low);
i = (((T30_INFO *)plci->NL.RBuffer->P)->resolution & T30_RESOLUTION_R8_0770_OR_200) ? 0x0001 : 0x0000;
if (plci->B3_prot == 5)
{
if (!(fax_feature_bits & T30_FEATURE_BIT_ECM))
i |= 0x8000; /* This is not an ECM connection */
if (fax_feature_bits & T30_FEATURE_BIT_T6_CODING)
i |= 0x4000; /* This is a connection with MMR compression */
if (fax_feature_bits & T30_FEATURE_BIT_2D_CODING)
i |= 0x2000; /* This is a connection with MR compression */
if (fax_feature_bits & T30_FEATURE_BIT_MORE_DOCUMENTS)
i |= 0x0004; /* More documents */
if (fax_feature_bits & T30_FEATURE_BIT_POLLING)
i |= 0x0002; /* Fax-polling indication */
}
dbug(1, dprintf("FAX Options %04x %04x", fax_feature_bits, i));
PUT_WORD(&(plci->ncpi_buffer[3]), i);
PUT_WORD(&(plci->ncpi_buffer[5]), ((T30_INFO *)plci->NL.RBuffer->P)->data_format);
plci->ncpi_buffer[7] = ((T30_INFO *)plci->NL.RBuffer->P)->pages_low;
plci->ncpi_buffer[8] = ((T30_INFO *)plci->NL.RBuffer->P)->pages_high;
plci->ncpi_buffer[len] = 0;
if (((T30_INFO *)plci->NL.RBuffer->P)->station_id_len)
{
plci->ncpi_buffer[len] = 20;
for (i = 0; i < T30_MAX_STATION_ID_LENGTH; i++)
plci->ncpi_buffer[++len] = ((T30_INFO *)plci->NL.RBuffer->P)->station_id[i];
}
if (((plci->NL.Ind & 0x0f) == N_DISC) || ((plci->NL.Ind & 0x0f) == N_DISC_ACK))
{
if (((T30_INFO *)plci->NL.RBuffer->P)->code < ARRAY_SIZE(fax_info))
info = fax_info[((T30_INFO *)plci->NL.RBuffer->P)->code];
else
info = _FAX_PROTOCOL_ERROR;
}
if ((plci->requested_options_conn | plci->requested_options | a->requested_options_table[plci->appl->Id - 1])
& ((1L << PRIVATE_FAX_SUB_SEP_PWD) | (1L << PRIVATE_FAX_NONSTANDARD)))
{
i = offsetof(T30_INFO, station_id) + T30_MAX_STATION_ID_LENGTH + ((T30_INFO *)plci->NL.RBuffer->P)->head_line_len;
while (i < plci->NL.RBuffer->length)
plci->ncpi_buffer[++len] = plci->NL.RBuffer->P[i++];
}
plci->ncpi_buffer[0] = len;
fax_feature_bits = GET_WORD(&((T30_INFO *)plci->NL.RBuffer->P)->feature_bits_low);
PUT_WORD(&((T30_INFO *)plci->fax_connect_info_buffer)->feature_bits_low, fax_feature_bits);
plci->ncpi_state |= NCPI_VALID_CONNECT_B3_IND;
if (((plci->NL.Ind & 0x0f) == N_CONNECT_ACK)
|| (((plci->NL.Ind & 0x0f) == N_CONNECT)
&& (fax_feature_bits & T30_FEATURE_BIT_POLLING))
|| (((plci->NL.Ind & 0x0f) == N_EDATA)
&& ((((T30_INFO *)plci->NL.RBuffer->P)->code == EDATA_T30_TRAIN_OK)
|| (((T30_INFO *)plci->NL.RBuffer->P)->code == EDATA_T30_DIS)
|| (((T30_INFO *)plci->NL.RBuffer->P)->code == EDATA_T30_DTC))))
{
plci->ncpi_state |= NCPI_VALID_CONNECT_B3_ACT;
}
if (((plci->NL.Ind & 0x0f) == N_DISC)
|| ((plci->NL.Ind & 0x0f) == N_DISC_ACK)
|| (((plci->NL.Ind & 0x0f) == N_EDATA)
&& (((T30_INFO *)plci->NL.RBuffer->P)->code == EDATA_T30_EOP_CAPI)))
{
plci->ncpi_state |= NCPI_VALID_CONNECT_B3_ACT | NCPI_VALID_DISC_B3_IND;
}
}
break;
case B3_RTP:
if (((plci->NL.Ind & 0x0f) == N_DISC) || ((plci->NL.Ind & 0x0f) == N_DISC_ACK))
{
if (plci->NL.RLength != 0)
{
info = rtp_info[plci->NL.RBuffer->P[0]];
plci->ncpi_buffer[0] = plci->NL.RLength - 1;
for (i = 1; i < plci->NL.RLength; i++)
plci->ncpi_buffer[i] = plci->NL.RBuffer->P[i];
}
}
break;
}
plci->NL.RNR = 2;
}
switch (plci->NL.Ind & 0x0f) {
case N_EDATA:
if ((plci->B3_prot == 4) || (plci->B3_prot == 5))
{
dbug(1, dprintf("EDATA ncci=0x%x state=%d code=%02x", ncci, a->ncci_state[ncci],
((T30_INFO *)plci->NL.RBuffer->P)->code));
fax_send_edata_ack = (((T30_INFO *)(plci->fax_connect_info_buffer))->operating_mode == T30_OPERATING_MODE_CAPI_NEG);
if ((plci->nsf_control_bits & T30_NSF_CONTROL_BIT_ENABLE_NSF)
&& (plci->nsf_control_bits & (T30_NSF_CONTROL_BIT_NEGOTIATE_IND | T30_NSF_CONTROL_BIT_NEGOTIATE_RESP))
&& (((T30_INFO *)plci->NL.RBuffer->P)->code == EDATA_T30_DIS)
&& (a->ncci_state[ncci] == OUTG_CON_PENDING)
&& (plci->ncpi_state & NCPI_VALID_CONNECT_B3_ACT)
&& !(plci->ncpi_state & NCPI_NEGOTIATE_B3_SENT))
{
((T30_INFO *)(plci->fax_connect_info_buffer))->code = ((T30_INFO *)plci->NL.RBuffer->P)->code;
sendf(plci->appl, _MANUFACTURER_I, Id, 0, "dwbS", _DI_MANU_ID, _DI_NEGOTIATE_B3,
(byte)(plci->ncpi_buffer[0] + 1), plci->ncpi_buffer);
plci->ncpi_state |= NCPI_NEGOTIATE_B3_SENT;
if (plci->nsf_control_bits & T30_NSF_CONTROL_BIT_NEGOTIATE_RESP)
fax_send_edata_ack = false;
}
if (a->manufacturer_features & MANUFACTURER_FEATURE_FAX_PAPER_FORMATS)
{
switch (((T30_INFO *)plci->NL.RBuffer->P)->code)
{
case EDATA_T30_DIS:
if ((a->ncci_state[ncci] == OUTG_CON_PENDING)
&& !(GET_WORD(&((T30_INFO *)plci->fax_connect_info_buffer)->control_bits_low) & T30_CONTROL_BIT_REQUEST_POLLING)
&& (plci->ncpi_state & NCPI_VALID_CONNECT_B3_ACT)
&& !(plci->ncpi_state & NCPI_CONNECT_B3_ACT_SENT))
{
a->ncci_state[ncci] = INC_ACT_PENDING;
if (plci->B3_prot == 4)
sendf(plci->appl, _CONNECT_B3_ACTIVE_I, Id, 0, "s", "");
else
sendf(plci->appl, _CONNECT_B3_ACTIVE_I, Id, 0, "S", plci->ncpi_buffer);
plci->ncpi_state |= NCPI_CONNECT_B3_ACT_SENT;
}
break;
case EDATA_T30_TRAIN_OK:
if ((a->ncci_state[ncci] == INC_ACT_PENDING)
&& (plci->ncpi_state & NCPI_VALID_CONNECT_B3_ACT)
&& !(plci->ncpi_state & NCPI_CONNECT_B3_ACT_SENT))
{
if (plci->B3_prot == 4)
sendf(plci->appl, _CONNECT_B3_ACTIVE_I, Id, 0, "s", "");
else
sendf(plci->appl, _CONNECT_B3_ACTIVE_I, Id, 0, "S", plci->ncpi_buffer);
plci->ncpi_state |= NCPI_CONNECT_B3_ACT_SENT;
}
break;
case EDATA_T30_EOP_CAPI:
if (a->ncci_state[ncci] == CONNECTED)
{
sendf(plci->appl, _DISCONNECT_B3_I, Id, 0, "wS", GOOD, plci->ncpi_buffer);
a->ncci_state[ncci] = INC_DIS_PENDING;
plci->ncpi_state = 0;
fax_send_edata_ack = false;
}
break;
}
}
else
{
switch (((T30_INFO *)plci->NL.RBuffer->P)->code)
{
case EDATA_T30_TRAIN_OK:
if ((a->ncci_state[ncci] == INC_ACT_PENDING)
&& (plci->ncpi_state & NCPI_VALID_CONNECT_B3_ACT)
&& !(plci->ncpi_state & NCPI_CONNECT_B3_ACT_SENT))
{
if (plci->B3_prot == 4)
sendf(plci->appl, _CONNECT_B3_ACTIVE_I, Id, 0, "s", "");
else
sendf(plci->appl, _CONNECT_B3_ACTIVE_I, Id, 0, "S", plci->ncpi_buffer);
plci->ncpi_state |= NCPI_CONNECT_B3_ACT_SENT;
}
break;
}
}
if (fax_send_edata_ack)
{
((T30_INFO *)(plci->fax_connect_info_buffer))->code = ((T30_INFO *)plci->NL.RBuffer->P)->code;
plci->fax_edata_ack_length = 1;
start_internal_command(Id, plci, fax_edata_ack_command);
}
}
else
{
dbug(1, dprintf("EDATA ncci=0x%x state=%d", ncci, a->ncci_state[ncci]));
}
break;
case N_CONNECT:
if (!a->ch_ncci[ch])
{
ncci = get_ncci(plci, ch, 0);
Id = (Id & 0xffff) | (((dword) ncci) << 16);
}
dbug(1, dprintf("N_CONNECT: ch=%d state=%d plci=%lx plci_Id=%lx plci_State=%d",
ch, a->ncci_state[ncci], a->ncci_plci[ncci], plci->Id, plci->State));
msg = _CONNECT_B3_I;
if (a->ncci_state[ncci] == IDLE)
plci->channels++;
else if (plci->B3_prot == 1)
msg = _CONNECT_B3_T90_ACTIVE_I;
a->ncci_state[ncci] = INC_CON_PENDING;
if (plci->B3_prot == 4)
sendf(plci->appl, msg, Id, 0, "s", "");
else
sendf(plci->appl, msg, Id, 0, "S", plci->ncpi_buffer);
break;
case N_CONNECT_ACK:
dbug(1, dprintf("N_connect_Ack"));
if (plci->internal_command_queue[0]
&& ((plci->adjust_b_state == ADJUST_B_CONNECT_2)
|| (plci->adjust_b_state == ADJUST_B_CONNECT_3)
|| (plci->adjust_b_state == ADJUST_B_CONNECT_4)))
{
(*(plci->internal_command_queue[0]))(Id, plci, 0);
if (!plci->internal_command)
next_internal_command(Id, plci);
break;
}
msg = _CONNECT_B3_ACTIVE_I;
if (plci->B3_prot == 1)
{
if (a->ncci_state[ncci] != OUTG_CON_PENDING)
msg = _CONNECT_B3_T90_ACTIVE_I;
a->ncci_state[ncci] = INC_ACT_PENDING;
sendf(plci->appl, msg, Id, 0, "S", plci->ncpi_buffer);
}
else if ((plci->B3_prot == 4) || (plci->B3_prot == 5) || (plci->B3_prot == 7))
{
if ((a->ncci_state[ncci] == OUTG_CON_PENDING)
&& (plci->ncpi_state & NCPI_VALID_CONNECT_B3_ACT)
&& !(plci->ncpi_state & NCPI_CONNECT_B3_ACT_SENT))
{
a->ncci_state[ncci] = INC_ACT_PENDING;
if (plci->B3_prot == 4)
sendf(plci->appl, msg, Id, 0, "s", "");
else
sendf(plci->appl, msg, Id, 0, "S", plci->ncpi_buffer);
plci->ncpi_state |= NCPI_CONNECT_B3_ACT_SENT;
}
}
else
{
a->ncci_state[ncci] = INC_ACT_PENDING;
sendf(plci->appl, msg, Id, 0, "S", plci->ncpi_buffer);
}
if (plci->adjust_b_restore)
{
plci->adjust_b_restore = false;
start_internal_command(Id, plci, adjust_b_restore);
}
break;
case N_DISC:
case N_DISC_ACK:
if (plci->internal_command_queue[0]
&& ((plci->internal_command == FAX_DISCONNECT_COMMAND_1)
|| (plci->internal_command == FAX_DISCONNECT_COMMAND_2)
|| (plci->internal_command == FAX_DISCONNECT_COMMAND_3)))
{
(*(plci->internal_command_queue[0]))(Id, plci, 0);
if (!plci->internal_command)
next_internal_command(Id, plci);
}
ncci_state = a->ncci_state[ncci];
ncci_remove(plci, ncci, false);
/* with N_DISC or N_DISC_ACK the IDI frees the respective */
/* channel, so we cannot store the state in ncci_state! The */
/* information which channel we received a N_DISC is thus */
/* stored in the inc_dis_ncci_table buffer. */
for (i = 0; plci->inc_dis_ncci_table[i]; i++);
plci->inc_dis_ncci_table[i] = (byte) ncci;
/* need a connect_b3_ind before a disconnect_b3_ind with FAX */
if (!plci->channels
&& (plci->B1_resource == 16)
&& (plci->State <= CONNECTED))
{
len = 9;
i = ((T30_INFO *)plci->fax_connect_info_buffer)->rate_div_2400 * 2400;
PUT_WORD(&plci->ncpi_buffer[1], i);
PUT_WORD(&plci->ncpi_buffer[3], 0);
i = ((T30_INFO *)plci->fax_connect_info_buffer)->data_format;
PUT_WORD(&plci->ncpi_buffer[5], i);
PUT_WORD(&plci->ncpi_buffer[7], 0);
plci->ncpi_buffer[len] = 0;
plci->ncpi_buffer[0] = len;
if (plci->B3_prot == 4)
sendf(plci->appl, _CONNECT_B3_I, Id, 0, "s", "");
else
{
if ((plci->requested_options_conn | plci->requested_options | a->requested_options_table[plci->appl->Id - 1])
& ((1L << PRIVATE_FAX_SUB_SEP_PWD) | (1L << PRIVATE_FAX_NONSTANDARD)))
{
plci->ncpi_buffer[++len] = 0;
plci->ncpi_buffer[++len] = 0;
plci->ncpi_buffer[++len] = 0;
plci->ncpi_buffer[0] = len;
}
sendf(plci->appl, _CONNECT_B3_I, Id, 0, "S", plci->ncpi_buffer);
}
sendf(plci->appl, _DISCONNECT_B3_I, Id, 0, "wS", info, plci->ncpi_buffer);
plci->ncpi_state = 0;
sig_req(plci, HANGUP, 0);
send_req(plci);
plci->State = OUTG_DIS_PENDING;
/* disc here */
}
else if ((a->manufacturer_features & MANUFACTURER_FEATURE_FAX_PAPER_FORMATS)
&& ((plci->B3_prot == 4) || (plci->B3_prot == 5))
&& ((ncci_state == INC_DIS_PENDING) || (ncci_state == IDLE)))
{
if (ncci_state == IDLE)
{
if (plci->channels)
plci->channels--;
if ((plci->State == IDLE || plci->State == SUSPENDING) && !plci->channels) {
if (plci->State == SUSPENDING) {
sendf(plci->appl,
_FACILITY_I,
Id & 0xffffL,
0,
"ws", (word)3, "\x03\x04\x00\x00");
sendf(plci->appl, _DISCONNECT_I, Id & 0xffffL, 0, "w", 0);
}
plci_remove(plci);
plci->State = IDLE;
}
}
}
else if (plci->channels)
{
sendf(plci->appl, _DISCONNECT_B3_I, Id, 0, "wS", info, plci->ncpi_buffer);
plci->ncpi_state = 0;
if ((ncci_state == OUTG_REJ_PENDING)
&& ((plci->B3_prot != B3_T90NL) && (plci->B3_prot != B3_ISO8208) && (plci->B3_prot != B3_X25_DCE)))
{
sig_req(plci, HANGUP, 0);
send_req(plci);
plci->State = OUTG_DIS_PENDING;
}
}
break;
case N_RESET:
a->ncci_state[ncci] = INC_RES_PENDING;
sendf(plci->appl, _RESET_B3_I, Id, 0, "S", plci->ncpi_buffer);
break;
case N_RESET_ACK:
a->ncci_state[ncci] = CONNECTED;
sendf(plci->appl, _RESET_B3_I, Id, 0, "S", plci->ncpi_buffer);
break;
case N_UDATA:
if (!(udata_forwarding_table[plci->NL.RBuffer->P[0] >> 5] & (1L << (plci->NL.RBuffer->P[0] & 0x1f))))
{
plci->RData[0].P = plci->internal_ind_buffer + (-((int)(long)(plci->internal_ind_buffer)) & 3);
plci->RData[0].PLength = INTERNAL_IND_BUFFER_SIZE;
plci->NL.R = plci->RData;
plci->NL.RNum = 1;
return;
}
case N_BDATA:
case N_DATA:
if (((a->ncci_state[ncci] != CONNECTED) && (plci->B2_prot == 1)) /* transparent */
|| (a->ncci_state[ncci] == IDLE)
|| (a->ncci_state[ncci] == INC_DIS_PENDING))
{
plci->NL.RNR = 2;
break;
}
if ((a->ncci_state[ncci] != CONNECTED)
&& (a->ncci_state[ncci] != OUTG_DIS_PENDING)
&& (a->ncci_state[ncci] != OUTG_REJ_PENDING))
{
dbug(1, dprintf("flow control"));
plci->NL.RNR = 1; /* flow control */
channel_x_off(plci, ch, 0);
break;
}
NCCIcode = ncci | (((word)a->Id) << 8);
/* count all buffers within the Application pool */
/* belonging to the same NCCI. If this is below the */
/* number of buffers available per NCCI we accept */
/* this packet, otherwise we reject it */
count = 0;
Num = 0xffff;
for (i = 0; i < APPLptr->MaxBuffer; i++) {
if (NCCIcode == APPLptr->DataNCCI[i]) count++;
if (!APPLptr->DataNCCI[i] && Num == 0xffff) Num = i;
}
if (count >= APPLptr->MaxNCCIData || Num == 0xffff)
{
dbug(3, dprintf("Flow-Control"));
plci->NL.RNR = 1;
if (++(APPLptr->NCCIDataFlowCtrlTimer) >=
(word)((a->manufacturer_features & MANUFACTURER_FEATURE_OOB_CHANNEL) ? 40 : 2000))
{
plci->NL.RNR = 2;
dbug(3, dprintf("DiscardData"));
} else {
channel_x_off(plci, ch, 0);
}
break;
}
else
{
APPLptr->NCCIDataFlowCtrlTimer = 0;
}
plci->RData[0].P = ReceiveBufferGet(APPLptr, Num);
if (!plci->RData[0].P) {
plci->NL.RNR = 1;
channel_x_off(plci, ch, 0);
break;
}
APPLptr->DataNCCI[Num] = NCCIcode;
APPLptr->DataFlags[Num] = (plci->Id << 8) | (plci->NL.Ind >> 4);
dbug(3, dprintf("Buffer(%d), Max = %d", Num, APPLptr->MaxBuffer));
plci->RNum = Num;
plci->RFlags = plci->NL.Ind >> 4;
plci->RData[0].PLength = APPLptr->MaxDataLength;
plci->NL.R = plci->RData;
if ((plci->NL.RLength != 0)
&& ((plci->B2_prot == B2_V120_ASYNC)
|| (plci->B2_prot == B2_V120_ASYNC_V42BIS)
|| (plci->B2_prot == B2_V120_BIT_TRANSPARENT)))
{
plci->RData[1].P = plci->RData[0].P;
plci->RData[1].PLength = plci->RData[0].PLength;
plci->RData[0].P = v120_header_buffer + (-((unsigned long)v120_header_buffer) & 3);
if ((plci->NL.RBuffer->P[0] & V120_HEADER_EXTEND_BIT) || (plci->NL.RLength == 1))
plci->RData[0].PLength = 1;
else
plci->RData[0].PLength = 2;
if (plci->NL.RBuffer->P[0] & V120_HEADER_BREAK_BIT)
plci->RFlags |= 0x0010;
if (plci->NL.RBuffer->P[0] & (V120_HEADER_C1_BIT | V120_HEADER_C2_BIT))
plci->RFlags |= 0x8000;
plci->NL.RNum = 2;
}
else
{
if ((plci->NL.Ind & 0x0f) == N_UDATA)
plci->RFlags |= 0x0010;
else if ((plci->B3_prot == B3_RTP) && ((plci->NL.Ind & 0x0f) == N_BDATA))
plci->RFlags |= 0x0001;
plci->NL.RNum = 1;
}
break;
case N_DATA_ACK:
data_ack(plci, ch);
break;
default:
plci->NL.RNR = 2;
break;
}
}
/*------------------------------------------------------------------*/
/* find a free PLCI */
/*------------------------------------------------------------------*/
static word get_plci(DIVA_CAPI_ADAPTER *a)
{
word i, j;
PLCI *plci;
dump_plcis(a);
for (i = 0; i < a->max_plci && a->plci[i].Id; i++);
if (i == a->max_plci) {
dbug(1, dprintf("get_plci: out of PLCIs"));
return 0;
}
plci = &a->plci[i];
plci->Id = (byte)(i + 1);
plci->Sig.Id = 0;
plci->NL.Id = 0;
plci->sig_req = 0;
plci->nl_req = 0;
plci->appl = NULL;
plci->relatedPTYPLCI = NULL;
plci->State = IDLE;
plci->SuppState = IDLE;
plci->channels = 0;
plci->tel = 0;
plci->B1_resource = 0;
plci->B2_prot = 0;
plci->B3_prot = 0;
plci->command = 0;
plci->m_command = 0;
init_internal_command_queue(plci);
plci->number = 0;
plci->req_in_start = 0;
plci->req_in = 0;
plci->req_out = 0;
plci->msg_in_write_pos = MSG_IN_QUEUE_SIZE;
plci->msg_in_read_pos = MSG_IN_QUEUE_SIZE;
plci->msg_in_wrap_pos = MSG_IN_QUEUE_SIZE;
plci->data_sent = false;
plci->send_disc = 0;
plci->sig_global_req = 0;
plci->sig_remove_id = 0;
plci->nl_global_req = 0;
plci->nl_remove_id = 0;
plci->adv_nl = 0;
plci->manufacturer = false;
plci->call_dir = CALL_DIR_OUT | CALL_DIR_ORIGINATE;
plci->spoofed_msg = 0;
plci->ptyState = 0;
plci->cr_enquiry = false;
plci->hangup_flow_ctrl_timer = 0;
plci->ncci_ring_list = 0;
for (j = 0; j < MAX_CHANNELS_PER_PLCI; j++) plci->inc_dis_ncci_table[j] = 0;
clear_c_ind_mask(plci);
set_group_ind_mask(plci);
plci->fax_connect_info_length = 0;
plci->nsf_control_bits = 0;
plci->ncpi_state = 0x00;
plci->ncpi_buffer[0] = 0;
plci->requested_options_conn = 0;
plci->requested_options = 0;
plci->notifiedcall = 0;
plci->vswitchstate = 0;
plci->vsprot = 0;
plci->vsprotdialect = 0;
init_b1_config(plci);
dbug(1, dprintf("get_plci(%x)", plci->Id));
return i + 1;
}
/*------------------------------------------------------------------*/
/* put a parameter in the parameter buffer */
/*------------------------------------------------------------------*/
static void add_p(PLCI *plci, byte code, byte *p)
{
word p_length;
p_length = 0;
if (p) p_length = p[0];
add_ie(plci, code, p, p_length);
}
/*------------------------------------------------------------------*/
/* put a structure in the parameter buffer */
/*------------------------------------------------------------------*/
static void add_s(PLCI *plci, byte code, API_PARSE *p)
{
if (p) add_ie(plci, code, p->info, (word)p->length);
}
/*------------------------------------------------------------------*/
/* put multiple structures in the parameter buffer */
/*------------------------------------------------------------------*/
static void add_ss(PLCI *plci, byte code, API_PARSE *p)
{
byte i;
if (p) {
dbug(1, dprintf("add_ss(%x,len=%d)", code, p->length));
for (i = 2; i < (byte)p->length; i += p->info[i] + 2) {
dbug(1, dprintf("add_ss_ie(%x,len=%d)", p->info[i - 1], p->info[i]));
add_ie(plci, p->info[i - 1], (byte *)&(p->info[i]), (word)p->info[i]);
}
}
}
/*------------------------------------------------------------------*/
/* return the channel number sent by the application in a esc_chi */
/*------------------------------------------------------------------*/
static byte getChannel(API_PARSE *p)
{
byte i;
if (p) {
for (i = 2; i < (byte)p->length; i += p->info[i] + 2) {
if (p->info[i] == 2) {
if (p->info[i - 1] == ESC && p->info[i + 1] == CHI) return (p->info[i + 2]);
}
}
}
return 0;
}
/*------------------------------------------------------------------*/
/* put an information element in the parameter buffer */
/*------------------------------------------------------------------*/
static void add_ie(PLCI *plci, byte code, byte *p, word p_length)
{
word i;
if (!(code & 0x80) && !p_length) return;
if (plci->req_in == plci->req_in_start) {
plci->req_in += 2;
}
else {
plci->req_in--;
}
plci->RBuffer[plci->req_in++] = code;
if (p) {
plci->RBuffer[plci->req_in++] = (byte)p_length;
for (i = 0; i < p_length; i++) plci->RBuffer[plci->req_in++] = p[1 + i];
}
plci->RBuffer[plci->req_in++] = 0;
}
/*------------------------------------------------------------------*/
/* put a unstructured data into the buffer */
/*------------------------------------------------------------------*/
static void add_d(PLCI *plci, word length, byte *p)
{
word i;
if (plci->req_in == plci->req_in_start) {
plci->req_in += 2;
}
else {
plci->req_in--;
}
for (i = 0; i < length; i++) plci->RBuffer[plci->req_in++] = p[i];
}
/*------------------------------------------------------------------*/
/* put parameters from the Additional Info parameter in the */
/* parameter buffer */
/*------------------------------------------------------------------*/
static void add_ai(PLCI *plci, API_PARSE *ai)
{
word i;
API_PARSE ai_parms[5];
for (i = 0; i < 5; i++) ai_parms[i].length = 0;
if (!ai->length)
return;
if (api_parse(&ai->info[1], (word)ai->length, "ssss", ai_parms))
return;
add_s(plci, KEY, &ai_parms[1]);
add_s(plci, UUI, &ai_parms[2]);
add_ss(plci, FTY, &ai_parms[3]);
}
/*------------------------------------------------------------------*/
/* put parameter for b1 protocol in the parameter buffer */
/*------------------------------------------------------------------*/
static word add_b1(PLCI *plci, API_PARSE *bp, word b_channel_info,
word b1_facilities)
{
API_PARSE bp_parms[8];
API_PARSE mdm_cfg[9];
API_PARSE global_config[2];
byte cai[256];
byte resource[] = {5, 9, 13, 12, 16, 39, 9, 17, 17, 18};
byte voice_cai[] = "\x06\x14\x00\x00\x00\x00\x08";
word i;
API_PARSE mdm_cfg_v18[4];
word j, n, w;
dword d;
for (i = 0; i < 8; i++) bp_parms[i].length = 0;
for (i = 0; i < 2; i++) global_config[i].length = 0;
dbug(1, dprintf("add_b1"));
api_save_msg(bp, "s", &plci->B_protocol);
if (b_channel_info == 2) {
plci->B1_resource = 0;
adjust_b1_facilities(plci, plci->B1_resource, b1_facilities);
add_p(plci, CAI, "\x01\x00");
dbug(1, dprintf("Cai=1,0 (no resource)"));
return 0;
}
if (plci->tel == CODEC_PERMANENT) return 0;
else if (plci->tel == CODEC) {
plci->B1_resource = 1;
adjust_b1_facilities(plci, plci->B1_resource, b1_facilities);
add_p(plci, CAI, "\x01\x01");
dbug(1, dprintf("Cai=1,1 (Codec)"));
return 0;
}
else if (plci->tel == ADV_VOICE) {
plci->B1_resource = add_b1_facilities(plci, 9, (word)(b1_facilities | B1_FACILITY_VOICE));
adjust_b1_facilities(plci, plci->B1_resource, (word)(b1_facilities | B1_FACILITY_VOICE));
voice_cai[1] = plci->B1_resource;
PUT_WORD(&voice_cai[5], plci->appl->MaxDataLength);
add_p(plci, CAI, voice_cai);
dbug(1, dprintf("Cai=1,0x%x (AdvVoice)", voice_cai[1]));
return 0;
}
plci->call_dir &= ~(CALL_DIR_ORIGINATE | CALL_DIR_ANSWER);
if (plci->call_dir & CALL_DIR_OUT)
plci->call_dir |= CALL_DIR_ORIGINATE;
else if (plci->call_dir & CALL_DIR_IN)
plci->call_dir |= CALL_DIR_ANSWER;
if (!bp->length) {
plci->B1_resource = 0x5;
adjust_b1_facilities(plci, plci->B1_resource, b1_facilities);
add_p(plci, CAI, "\x01\x05");
return 0;
}
dbug(1, dprintf("b_prot_len=%d", (word)bp->length));
if (bp->length > 256) return _WRONG_MESSAGE_FORMAT;
if (api_parse(&bp->info[1], (word)bp->length, "wwwsssb", bp_parms))
{
bp_parms[6].length = 0;
if (api_parse(&bp->info[1], (word)bp->length, "wwwsss", bp_parms))
{
dbug(1, dprintf("b-form.!"));
return _WRONG_MESSAGE_FORMAT;
}
}
else if (api_parse(&bp->info[1], (word)bp->length, "wwwssss", bp_parms))
{
dbug(1, dprintf("b-form.!"));
return _WRONG_MESSAGE_FORMAT;
}
if (bp_parms[6].length)
{
if (api_parse(&bp_parms[6].info[1], (word)bp_parms[6].length, "w", global_config))
{
return _WRONG_MESSAGE_FORMAT;
}
switch (GET_WORD(global_config[0].info))
{
case 1:
plci->call_dir = (plci->call_dir & ~CALL_DIR_ANSWER) | CALL_DIR_ORIGINATE;
break;
case 2:
plci->call_dir = (plci->call_dir & ~CALL_DIR_ORIGINATE) | CALL_DIR_ANSWER;
break;
}
}
dbug(1, dprintf("call_dir=%04x", plci->call_dir));
if ((GET_WORD(bp_parms[0].info) == B1_RTP)
&& (plci->adapter->man_profile.private_options & (1L << PRIVATE_RTP)))
{
plci->B1_resource = add_b1_facilities(plci, 31, (word)(b1_facilities & ~B1_FACILITY_VOICE));
adjust_b1_facilities(plci, plci->B1_resource, (word)(b1_facilities & ~B1_FACILITY_VOICE));
cai[1] = plci->B1_resource;
cai[2] = 0;
cai[3] = 0;
cai[4] = 0;
PUT_WORD(&cai[5], plci->appl->MaxDataLength);
for (i = 0; i < bp_parms[3].length; i++)
cai[7 + i] = bp_parms[3].info[1 + i];
cai[0] = 6 + bp_parms[3].length;
add_p(plci, CAI, cai);
return 0;
}
if ((GET_WORD(bp_parms[0].info) == B1_PIAFS)
&& (plci->adapter->man_profile.private_options & (1L << PRIVATE_PIAFS)))
{
plci->B1_resource = add_b1_facilities(plci, 35/* PIAFS HARDWARE FACILITY */, (word)(b1_facilities & ~B1_FACILITY_VOICE));
adjust_b1_facilities(plci, plci->B1_resource, (word)(b1_facilities & ~B1_FACILITY_VOICE));
cai[1] = plci->B1_resource;
cai[2] = 0;
cai[3] = 0;
cai[4] = 0;
PUT_WORD(&cai[5], plci->appl->MaxDataLength);
cai[0] = 6;
add_p(plci, CAI, cai);
return 0;
}
if ((GET_WORD(bp_parms[0].info) >= 32)
|| (!((1L << GET_WORD(bp_parms[0].info)) & plci->adapter->profile.B1_Protocols)
&& ((GET_WORD(bp_parms[0].info) != 3)
|| !((1L << B1_HDLC) & plci->adapter->profile.B1_Protocols)
|| ((bp_parms[3].length != 0) && (GET_WORD(&bp_parms[3].info[1]) != 0) && (GET_WORD(&bp_parms[3].info[1]) != 56000)))))
{
return _B1_NOT_SUPPORTED;
}
plci->B1_resource = add_b1_facilities(plci, resource[GET_WORD(bp_parms[0].info)],
(word)(b1_facilities & ~B1_FACILITY_VOICE));
adjust_b1_facilities(plci, plci->B1_resource, (word)(b1_facilities & ~B1_FACILITY_VOICE));
cai[0] = 6;
cai[1] = plci->B1_resource;
for (i = 2; i < sizeof(cai); i++) cai[i] = 0;
if ((GET_WORD(bp_parms[0].info) == B1_MODEM_ALL_NEGOTIATE)
|| (GET_WORD(bp_parms[0].info) == B1_MODEM_ASYNC)
|| (GET_WORD(bp_parms[0].info) == B1_MODEM_SYNC_HDLC))
{ /* B1 - modem */
for (i = 0; i < 7; i++) mdm_cfg[i].length = 0;
if (bp_parms[3].length)
{
if (api_parse(&bp_parms[3].info[1], (word)bp_parms[3].length, "wwwwww", mdm_cfg))
{
return (_WRONG_MESSAGE_FORMAT);
}
cai[2] = 0; /* Bit rate for adaptation */
dbug(1, dprintf("MDM Max Bit Rate:<%d>", GET_WORD(mdm_cfg[0].info)));
PUT_WORD(&cai[13], 0); /* Min Tx speed */
PUT_WORD(&cai[15], GET_WORD(mdm_cfg[0].info)); /* Max Tx speed */
PUT_WORD(&cai[17], 0); /* Min Rx speed */
PUT_WORD(&cai[19], GET_WORD(mdm_cfg[0].info)); /* Max Rx speed */
cai[3] = 0; /* Async framing parameters */
switch (GET_WORD(mdm_cfg[2].info))
{ /* Parity */
case 1: /* odd parity */
cai[3] |= (DSP_CAI_ASYNC_PARITY_ENABLE | DSP_CAI_ASYNC_PARITY_ODD);
dbug(1, dprintf("MDM: odd parity"));
break;
case 2: /* even parity */
cai[3] |= (DSP_CAI_ASYNC_PARITY_ENABLE | DSP_CAI_ASYNC_PARITY_EVEN);
dbug(1, dprintf("MDM: even parity"));
break;
default:
dbug(1, dprintf("MDM: no parity"));
break;
}
switch (GET_WORD(mdm_cfg[3].info))
{ /* stop bits */
case 1: /* 2 stop bits */
cai[3] |= DSP_CAI_ASYNC_TWO_STOP_BITS;
dbug(1, dprintf("MDM: 2 stop bits"));
break;
default:
dbug(1, dprintf("MDM: 1 stop bit"));
break;
}
switch (GET_WORD(mdm_cfg[1].info))
{ /* char length */
case 5:
cai[3] |= DSP_CAI_ASYNC_CHAR_LENGTH_5;
dbug(1, dprintf("MDM: 5 bits"));
break;
case 6:
cai[3] |= DSP_CAI_ASYNC_CHAR_LENGTH_6;
dbug(1, dprintf("MDM: 6 bits"));
break;
case 7:
cai[3] |= DSP_CAI_ASYNC_CHAR_LENGTH_7;
dbug(1, dprintf("MDM: 7 bits"));
break;
default:
dbug(1, dprintf("MDM: 8 bits"));
break;
}
cai[7] = 0; /* Line taking options */
cai[8] = 0; /* Modulation negotiation options */
cai[9] = 0; /* Modulation options */
if (((plci->call_dir & CALL_DIR_ORIGINATE) != 0) ^ ((plci->call_dir & CALL_DIR_OUT) != 0))
{
cai[9] |= DSP_CAI_MODEM_REVERSE_DIRECTION;
dbug(1, dprintf("MDM: Reverse direction"));
}
if (GET_WORD(mdm_cfg[4].info) & MDM_CAPI_DISABLE_RETRAIN)
{
cai[9] |= DSP_CAI_MODEM_DISABLE_RETRAIN;
dbug(1, dprintf("MDM: Disable retrain"));
}
if (GET_WORD(mdm_cfg[4].info) & MDM_CAPI_DISABLE_RING_TONE)
{
cai[7] |= DSP_CAI_MODEM_DISABLE_CALLING_TONE | DSP_CAI_MODEM_DISABLE_ANSWER_TONE;
dbug(1, dprintf("MDM: Disable ring tone"));
}
if (GET_WORD(mdm_cfg[4].info) & MDM_CAPI_GUARD_1800)
{
cai[8] |= DSP_CAI_MODEM_GUARD_TONE_1800HZ;
dbug(1, dprintf("MDM: 1800 guard tone"));
}
else if (GET_WORD(mdm_cfg[4].info) & MDM_CAPI_GUARD_550)
{
cai[8] |= DSP_CAI_MODEM_GUARD_TONE_550HZ;
dbug(1, dprintf("MDM: 550 guard tone"));
}
if ((GET_WORD(mdm_cfg[5].info) & 0x00ff) == MDM_CAPI_NEG_V100)
{
cai[8] |= DSP_CAI_MODEM_NEGOTIATE_V100;
dbug(1, dprintf("MDM: V100"));
}
else if ((GET_WORD(mdm_cfg[5].info) & 0x00ff) == MDM_CAPI_NEG_MOD_CLASS)
{
cai[8] |= DSP_CAI_MODEM_NEGOTIATE_IN_CLASS;
dbug(1, dprintf("MDM: IN CLASS"));
}
else if ((GET_WORD(mdm_cfg[5].info) & 0x00ff) == MDM_CAPI_NEG_DISABLED)
{
cai[8] |= DSP_CAI_MODEM_NEGOTIATE_DISABLED;
dbug(1, dprintf("MDM: DISABLED"));
}
cai[0] = 20;
if ((plci->adapter->man_profile.private_options & (1L << PRIVATE_V18))
&& (GET_WORD(mdm_cfg[5].info) & 0x8000)) /* Private V.18 enable */
{
plci->requested_options |= 1L << PRIVATE_V18;
}
if (GET_WORD(mdm_cfg[5].info) & 0x4000) /* Private VOWN enable */
plci->requested_options |= 1L << PRIVATE_VOWN;
if ((plci->requested_options_conn | plci->requested_options | plci->adapter->requested_options_table[plci->appl->Id - 1])
& ((1L << PRIVATE_V18) | (1L << PRIVATE_VOWN)))
{
if (!api_parse(&bp_parms[3].info[1], (word)bp_parms[3].length, "wwwwwws", mdm_cfg))
{
i = 27;
if (mdm_cfg[6].length >= 4)
{
d = GET_DWORD(&mdm_cfg[6].info[1]);
cai[7] |= (byte) d; /* line taking options */
cai[9] |= (byte)(d >> 8); /* modulation options */
cai[++i] = (byte)(d >> 16); /* vown modulation options */
cai[++i] = (byte)(d >> 24);
if (mdm_cfg[6].length >= 8)
{
d = GET_DWORD(&mdm_cfg[6].info[5]);
cai[10] |= (byte) d; /* disabled modulations mask */
cai[11] |= (byte)(d >> 8);
if (mdm_cfg[6].length >= 12)
{
d = GET_DWORD(&mdm_cfg[6].info[9]);
cai[12] = (byte) d; /* enabled modulations mask */
cai[++i] = (byte)(d >> 8); /* vown enabled modulations */
cai[++i] = (byte)(d >> 16);
cai[++i] = (byte)(d >> 24);
cai[++i] = 0;
if (mdm_cfg[6].length >= 14)
{
w = GET_WORD(&mdm_cfg[6].info[13]);
if (w != 0)
PUT_WORD(&cai[13], w); /* min tx speed */
if (mdm_cfg[6].length >= 16)
{
w = GET_WORD(&mdm_cfg[6].info[15]);
if (w != 0)
PUT_WORD(&cai[15], w); /* max tx speed */
if (mdm_cfg[6].length >= 18)
{
w = GET_WORD(&mdm_cfg[6].info[17]);
if (w != 0)
PUT_WORD(&cai[17], w); /* min rx speed */
if (mdm_cfg[6].length >= 20)
{
w = GET_WORD(&mdm_cfg[6].info[19]);
if (w != 0)
PUT_WORD(&cai[19], w); /* max rx speed */
if (mdm_cfg[6].length >= 22)
{
w = GET_WORD(&mdm_cfg[6].info[21]);
cai[23] = (byte)(-((short) w)); /* transmit level */
if (mdm_cfg[6].length >= 24)
{
w = GET_WORD(&mdm_cfg[6].info[23]);
cai[22] |= (byte) w; /* info options mask */
cai[21] |= (byte)(w >> 8); /* disabled symbol rates */
}
}
}
}
}
}
}
}
}
cai[27] = i - 27;
i++;
if (!api_parse(&bp_parms[3].info[1], (word)bp_parms[3].length, "wwwwwwss", mdm_cfg))
{
if (!api_parse(&mdm_cfg[7].info[1], (word)mdm_cfg[7].length, "sss", mdm_cfg_v18))
{
for (n = 0; n < 3; n++)
{
cai[i] = (byte)(mdm_cfg_v18[n].length);
for (j = 1; j < ((word)(cai[i] + 1)); j++)
cai[i + j] = mdm_cfg_v18[n].info[j];
i += cai[i] + 1;
}
}
}
cai[0] = (byte)(i - 1);
}
}
}
}
if (GET_WORD(bp_parms[0].info) == 2 || /* V.110 async */
GET_WORD(bp_parms[0].info) == 3) /* V.110 sync */
{
if (bp_parms[3].length) {
dbug(1, dprintf("V.110,%d", GET_WORD(&bp_parms[3].info[1])));
switch (GET_WORD(&bp_parms[3].info[1])) { /* Rate */
case 0:
case 56000:
if (GET_WORD(bp_parms[0].info) == 3) { /* V.110 sync 56k */
dbug(1, dprintf("56k sync HSCX"));
cai[1] = 8;
cai[2] = 0;
cai[3] = 0;
}
else if (GET_WORD(bp_parms[0].info) == 2) {
dbug(1, dprintf("56k async DSP"));
cai[2] = 9;
}
break;
case 50: cai[2] = 1; break;
case 75: cai[2] = 1; break;
case 110: cai[2] = 1; break;
case 150: cai[2] = 1; break;
case 200: cai[2] = 1; break;
case 300: cai[2] = 1; break;
case 600: cai[2] = 1; break;
case 1200: cai[2] = 2; break;
case 2400: cai[2] = 3; break;
case 4800: cai[2] = 4; break;
case 7200: cai[2] = 10; break;
case 9600: cai[2] = 5; break;
case 12000: cai[2] = 13; break;
case 24000: cai[2] = 0; break;
case 14400: cai[2] = 11; break;
case 19200: cai[2] = 6; break;
case 28800: cai[2] = 12; break;
case 38400: cai[2] = 7; break;
case 48000: cai[2] = 8; break;
case 76: cai[2] = 15; break; /* 75/1200 */
case 1201: cai[2] = 14; break; /* 1200/75 */
case 56001: cai[2] = 9; break; /* V.110 56000 */
default:
return _B1_PARM_NOT_SUPPORTED;
}
cai[3] = 0;
if (cai[1] == 13) /* v.110 async */
{
if (bp_parms[3].length >= 8)
{
switch (GET_WORD(&bp_parms[3].info[3]))
{ /* char length */
case 5:
cai[3] |= DSP_CAI_ASYNC_CHAR_LENGTH_5;
break;
case 6:
cai[3] |= DSP_CAI_ASYNC_CHAR_LENGTH_6;
break;
case 7:
cai[3] |= DSP_CAI_ASYNC_CHAR_LENGTH_7;
break;
}
switch (GET_WORD(&bp_parms[3].info[5]))
{ /* Parity */
case 1: /* odd parity */
cai[3] |= (DSP_CAI_ASYNC_PARITY_ENABLE | DSP_CAI_ASYNC_PARITY_ODD);
break;
case 2: /* even parity */
cai[3] |= (DSP_CAI_ASYNC_PARITY_ENABLE | DSP_CAI_ASYNC_PARITY_EVEN);
break;
}
switch (GET_WORD(&bp_parms[3].info[7]))
{ /* stop bits */
case 1: /* 2 stop bits */
cai[3] |= DSP_CAI_ASYNC_TWO_STOP_BITS;
break;
}
}
}
}
else if (cai[1] == 8 || GET_WORD(bp_parms[0].info) == 3) {
dbug(1, dprintf("V.110 default 56k sync"));
cai[1] = 8;
cai[2] = 0;
cai[3] = 0;
}
else {
dbug(1, dprintf("V.110 default 9600 async"));
cai[2] = 5;
}
}
PUT_WORD(&cai[5], plci->appl->MaxDataLength);
dbug(1, dprintf("CAI[%d]=%x,%x,%x,%x,%x,%x", cai[0], cai[1], cai[2], cai[3], cai[4], cai[5], cai[6]));
/* HexDump ("CAI", sizeof(cai), &cai[0]); */
add_p(plci, CAI, cai);
return 0;
}
/*------------------------------------------------------------------*/
/* put parameter for b2 and B3 protocol in the parameter buffer */
/*------------------------------------------------------------------*/
static word add_b23(PLCI *plci, API_PARSE *bp)
{
word i, fax_control_bits;
byte pos, len;
byte SAPI = 0x40; /* default SAPI 16 for x.31 */
API_PARSE bp_parms[8];
API_PARSE *b1_config;
API_PARSE *b2_config;
API_PARSE b2_config_parms[8];
API_PARSE *b3_config;
API_PARSE b3_config_parms[6];
API_PARSE global_config[2];
static byte llc[3] = {2,0,0};
static byte dlc[20] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
static byte nlc[256];
static byte lli[12] = {1,1};
const byte llc2_out[] = {1,2,4,6,2,0,0,0, X75_V42BIS,V120_L2,V120_V42BIS,V120_L2,6};
const byte llc2_in[] = {1,3,4,6,3,0,0,0, X75_V42BIS,V120_L2,V120_V42BIS,V120_L2,6};
const byte llc3[] = {4,3,2,2,6,6,0};
const byte header[] = {0,2,3,3,0,0,0};
for (i = 0; i < 8; i++) bp_parms[i].length = 0;
for (i = 0; i < 6; i++) b2_config_parms[i].length = 0;
for (i = 0; i < 5; i++) b3_config_parms[i].length = 0;
lli[0] = 1;
lli[1] = 1;
if (plci->adapter->manufacturer_features & MANUFACTURER_FEATURE_XONOFF_FLOW_CONTROL)
lli[1] |= 2;
if (plci->adapter->manufacturer_features & MANUFACTURER_FEATURE_OOB_CHANNEL)
lli[1] |= 4;
if ((lli[1] & 0x02) && (diva_xdi_extended_features & DIVA_CAPI_USE_CMA)) {
lli[1] |= 0x10;
if (plci->rx_dma_descriptor <= 0) {
plci->rx_dma_descriptor = diva_get_dma_descriptor(plci, &plci->rx_dma_magic);
if (plci->rx_dma_descriptor >= 0)
plci->rx_dma_descriptor++;
}
if (plci->rx_dma_descriptor > 0) {
lli[0] = 6;
lli[1] |= 0x40;
lli[2] = (byte)(plci->rx_dma_descriptor - 1);
lli[3] = (byte)plci->rx_dma_magic;
lli[4] = (byte)(plci->rx_dma_magic >> 8);
lli[5] = (byte)(plci->rx_dma_magic >> 16);
lli[6] = (byte)(plci->rx_dma_magic >> 24);
}
}
if (DIVA_CAPI_SUPPORTS_NO_CANCEL(plci->adapter)) {
lli[1] |= 0x20;
}
dbug(1, dprintf("add_b23"));
api_save_msg(bp, "s", &plci->B_protocol);
if (!bp->length && plci->tel)
{
plci->adv_nl = true;
dbug(1, dprintf("Default adv.Nl"));
add_p(plci, LLI, lli);
plci->B2_prot = 1 /*XPARENT*/;
plci->B3_prot = 0 /*XPARENT*/;
llc[1] = 2;
llc[2] = 4;
add_p(plci, LLC, llc);
dlc[0] = 2;
PUT_WORD(&dlc[1], plci->appl->MaxDataLength);
add_p(plci, DLC, dlc);
return 0;
}
if (!bp->length) /*default*/
{
dbug(1, dprintf("ret default"));
add_p(plci, LLI, lli);
plci->B2_prot = 0 /*X.75 */;
plci->B3_prot = 0 /*XPARENT*/;
llc[1] = 1;
llc[2] = 4;
add_p(plci, LLC, llc);
dlc[0] = 2;
PUT_WORD(&dlc[1], plci->appl->MaxDataLength);
add_p(plci, DLC, dlc);
return 0;
}
dbug(1, dprintf("b_prot_len=%d", (word)bp->length));
if ((word)bp->length > 256) return _WRONG_MESSAGE_FORMAT;
if (api_parse(&bp->info[1], (word)bp->length, "wwwsssb", bp_parms))
{
bp_parms[6].length = 0;
if (api_parse(&bp->info[1], (word)bp->length, "wwwsss", bp_parms))
{
dbug(1, dprintf("b-form.!"));
return _WRONG_MESSAGE_FORMAT;
}
}
else if (api_parse(&bp->info[1], (word)bp->length, "wwwssss", bp_parms))
{
dbug(1, dprintf("b-form.!"));
return _WRONG_MESSAGE_FORMAT;
}
if (plci->tel == ADV_VOICE) /* transparent B on advanced voice */
{
if (GET_WORD(bp_parms[1].info) != 1
|| GET_WORD(bp_parms[2].info) != 0) return _B2_NOT_SUPPORTED;
plci->adv_nl = true;
}
else if (plci->tel) return _B2_NOT_SUPPORTED;
if ((GET_WORD(bp_parms[1].info) == B2_RTP)
&& (GET_WORD(bp_parms[2].info) == B3_RTP)
&& (plci->adapter->man_profile.private_options & (1L << PRIVATE_RTP)))
{
add_p(plci, LLI, lli);
plci->B2_prot = (byte) GET_WORD(bp_parms[1].info);
plci->B3_prot = (byte) GET_WORD(bp_parms[2].info);
llc[1] = (plci->call_dir & (CALL_DIR_ORIGINATE | CALL_DIR_FORCE_OUTG_NL)) ? 14 : 13;
llc[2] = 4;
add_p(plci, LLC, llc);
dlc[0] = 2;
PUT_WORD(&dlc[1], plci->appl->MaxDataLength);
dlc[3] = 3; /* Addr A */
dlc[4] = 1; /* Addr B */
dlc[5] = 7; /* modulo mode */
dlc[6] = 7; /* window size */
dlc[7] = 0; /* XID len Lo */
dlc[8] = 0; /* XID len Hi */
for (i = 0; i < bp_parms[4].length; i++)
dlc[9 + i] = bp_parms[4].info[1 + i];
dlc[0] = (byte)(8 + bp_parms[4].length);
add_p(plci, DLC, dlc);
for (i = 0; i < bp_parms[5].length; i++)
nlc[1 + i] = bp_parms[5].info[1 + i];
nlc[0] = (byte)(bp_parms[5].length);
add_p(plci, NLC, nlc);
return 0;
}
if ((GET_WORD(bp_parms[1].info) >= 32)
|| (!((1L << GET_WORD(bp_parms[1].info)) & plci->adapter->profile.B2_Protocols)
&& ((GET_WORD(bp_parms[1].info) != B2_PIAFS)
|| !(plci->adapter->man_profile.private_options & (1L << PRIVATE_PIAFS)))))
{
return _B2_NOT_SUPPORTED;
}
if ((GET_WORD(bp_parms[2].info) >= 32)
|| !((1L << GET_WORD(bp_parms[2].info)) & plci->adapter->profile.B3_Protocols))
{
return _B3_NOT_SUPPORTED;
}
if ((GET_WORD(bp_parms[1].info) != B2_SDLC)
&& ((GET_WORD(bp_parms[0].info) == B1_MODEM_ALL_NEGOTIATE)
|| (GET_WORD(bp_parms[0].info) == B1_MODEM_ASYNC)
|| (GET_WORD(bp_parms[0].info) == B1_MODEM_SYNC_HDLC)))
{
return (add_modem_b23(plci, bp_parms));
}
add_p(plci, LLI, lli);
plci->B2_prot = (byte)GET_WORD(bp_parms[1].info);
plci->B3_prot = (byte)GET_WORD(bp_parms[2].info);
if (plci->B2_prot == 12) SAPI = 0; /* default SAPI D-channel */
if (bp_parms[6].length)
{
if (api_parse(&bp_parms[6].info[1], (word)bp_parms[6].length, "w", global_config))
{
return _WRONG_MESSAGE_FORMAT;
}
switch (GET_WORD(global_config[0].info))
{
case 1:
plci->call_dir = (plci->call_dir & ~CALL_DIR_ANSWER) | CALL_DIR_ORIGINATE;
break;
case 2:
plci->call_dir = (plci->call_dir & ~CALL_DIR_ORIGINATE) | CALL_DIR_ANSWER;
break;
}
}
dbug(1, dprintf("call_dir=%04x", plci->call_dir));
if (plci->B2_prot == B2_PIAFS)
llc[1] = PIAFS_CRC;
else
/* IMPLEMENT_PIAFS */
{
llc[1] = (plci->call_dir & (CALL_DIR_ORIGINATE | CALL_DIR_FORCE_OUTG_NL)) ?
llc2_out[GET_WORD(bp_parms[1].info)] : llc2_in[GET_WORD(bp_parms[1].info)];
}
llc[2] = llc3[GET_WORD(bp_parms[2].info)];
add_p(plci, LLC, llc);
dlc[0] = 2;
PUT_WORD(&dlc[1], plci->appl->MaxDataLength +
header[GET_WORD(bp_parms[2].info)]);
b1_config = &bp_parms[3];
nlc[0] = 0;
if (plci->B3_prot == 4
|| plci->B3_prot == 5)
{
for (i = 0; i < sizeof(T30_INFO); i++) nlc[i] = 0;
nlc[0] = sizeof(T30_INFO);
if (plci->adapter->manufacturer_features & MANUFACTURER_FEATURE_FAX_PAPER_FORMATS)
((T30_INFO *)&nlc[1])->operating_mode = T30_OPERATING_MODE_CAPI;
((T30_INFO *)&nlc[1])->rate_div_2400 = 0xff;
if (b1_config->length >= 2)
{
((T30_INFO *)&nlc[1])->rate_div_2400 = (byte)(GET_WORD(&b1_config->info[1]) / 2400);
}
}
b2_config = &bp_parms[4];
if (llc[1] == PIAFS_CRC)
{
if (plci->B3_prot != B3_TRANSPARENT)
{
return _B_STACK_NOT_SUPPORTED;
}
if (b2_config->length && api_parse(&b2_config->info[1], (word)b2_config->length, "bwww", b2_config_parms)) {
return _WRONG_MESSAGE_FORMAT;
}
PUT_WORD(&dlc[1], plci->appl->MaxDataLength);
dlc[3] = 0; /* Addr A */
dlc[4] = 0; /* Addr B */
dlc[5] = 0; /* modulo mode */
dlc[6] = 0; /* window size */
if (b2_config->length >= 7) {
dlc[7] = 7;
dlc[8] = 0;
dlc[9] = b2_config_parms[0].info[0]; /* PIAFS protocol Speed configuration */
dlc[10] = b2_config_parms[1].info[0]; /* V.42bis P0 */
dlc[11] = b2_config_parms[1].info[1]; /* V.42bis P0 */
dlc[12] = b2_config_parms[2].info[0]; /* V.42bis P1 */
dlc[13] = b2_config_parms[2].info[1]; /* V.42bis P1 */
dlc[14] = b2_config_parms[3].info[0]; /* V.42bis P2 */
dlc[15] = b2_config_parms[3].info[1]; /* V.42bis P2 */
dlc[0] = 15;
if (b2_config->length >= 8) { /* PIAFS control abilities */
dlc[7] = 10;
dlc[16] = 2; /* Length of PIAFS extension */
dlc[17] = PIAFS_UDATA_ABILITIES; /* control (UDATA) ability */
dlc[18] = b2_config_parms[4].info[0]; /* value */
dlc[0] = 18;
}
}
else /* default values, 64K, variable, no compression */
{
dlc[7] = 7;
dlc[8] = 0;
dlc[9] = 0x03; /* PIAFS protocol Speed configuration */
dlc[10] = 0x03; /* V.42bis P0 */
dlc[11] = 0; /* V.42bis P0 */
dlc[12] = 0; /* V.42bis P1 */
dlc[13] = 0; /* V.42bis P1 */
dlc[14] = 0; /* V.42bis P2 */
dlc[15] = 0; /* V.42bis P2 */
dlc[0] = 15;
}
add_p(plci, DLC, dlc);
}
else
if ((llc[1] == V120_L2) || (llc[1] == V120_V42BIS))
{
if (plci->B3_prot != B3_TRANSPARENT)
return _B_STACK_NOT_SUPPORTED;
dlc[0] = 6;
PUT_WORD(&dlc[1], GET_WORD(&dlc[1]) + 2);
dlc[3] = 0x08;
dlc[4] = 0x01;
dlc[5] = 127;
dlc[6] = 7;
if (b2_config->length != 0)
{
if ((llc[1] == V120_V42BIS) && api_parse(&b2_config->info[1], (word)b2_config->length, "bbbbwww", b2_config_parms)) {
return _WRONG_MESSAGE_FORMAT;
}
dlc[3] = (byte)((b2_config->info[2] << 3) | ((b2_config->info[1] >> 5) & 0x04));
dlc[4] = (byte)((b2_config->info[1] << 1) | 0x01);
if (b2_config->info[3] != 128)
{
dbug(1, dprintf("1D-dlc= %x %x %x %x %x", dlc[0], dlc[1], dlc[2], dlc[3], dlc[4]));
return _B2_PARM_NOT_SUPPORTED;
}
dlc[5] = (byte)(b2_config->info[3] - 1);
dlc[6] = b2_config->info[4];
if (llc[1] == V120_V42BIS) {
if (b2_config->length >= 10) {
dlc[7] = 6;
dlc[8] = 0;
dlc[9] = b2_config_parms[4].info[0];
dlc[10] = b2_config_parms[4].info[1];
dlc[11] = b2_config_parms[5].info[0];
dlc[12] = b2_config_parms[5].info[1];
dlc[13] = b2_config_parms[6].info[0];
dlc[14] = b2_config_parms[6].info[1];
dlc[0] = 14;
dbug(1, dprintf("b2_config_parms[4].info[0] [1]: %x %x", b2_config_parms[4].info[0], b2_config_parms[4].info[1]));
dbug(1, dprintf("b2_config_parms[5].info[0] [1]: %x %x", b2_config_parms[5].info[0], b2_config_parms[5].info[1]));
dbug(1, dprintf("b2_config_parms[6].info[0] [1]: %x %x", b2_config_parms[6].info[0], b2_config_parms[6].info[1]));
}
else {
dlc[6] = 14;
}
}
}
}
else
{
if (b2_config->length)
{
dbug(1, dprintf("B2-Config"));
if (llc[1] == X75_V42BIS) {
if (api_parse(&b2_config->info[1], (word)b2_config->length, "bbbbwww", b2_config_parms))
{
return _WRONG_MESSAGE_FORMAT;
}
}
else {
if (api_parse(&b2_config->info[1], (word)b2_config->length, "bbbbs", b2_config_parms))
{
return _WRONG_MESSAGE_FORMAT;
}
}
/* if B2 Protocol is LAPD, b2_config structure is different */
if (llc[1] == 6)
{
dlc[0] = 4;
if (b2_config->length >= 1) dlc[2] = b2_config->info[1]; /* TEI */
else dlc[2] = 0x01;
if ((b2_config->length >= 2) && (plci->B2_prot == 12))
{
SAPI = b2_config->info[2]; /* SAPI */
}
dlc[1] = SAPI;
if ((b2_config->length >= 3) && (b2_config->info[3] == 128))
{
dlc[3] = 127; /* Mode */
}
else
{
dlc[3] = 7; /* Mode */
}
if (b2_config->length >= 4) dlc[4] = b2_config->info[4]; /* Window */
else dlc[4] = 1;
dbug(1, dprintf("D-dlc[%d]=%x,%x,%x,%x", dlc[0], dlc[1], dlc[2], dlc[3], dlc[4]));
if (b2_config->length > 5) return _B2_PARM_NOT_SUPPORTED;
}
else
{
dlc[0] = (byte)(b2_config_parms[4].length + 6);
dlc[3] = b2_config->info[1];
dlc[4] = b2_config->info[2];
if (b2_config->info[3] != 8 && b2_config->info[3] != 128) {
dbug(1, dprintf("1D-dlc= %x %x %x %x %x", dlc[0], dlc[1], dlc[2], dlc[3], dlc[4]));
return _B2_PARM_NOT_SUPPORTED;
}
dlc[5] = (byte)(b2_config->info[3] - 1);
dlc[6] = b2_config->info[4];
if (dlc[6] > dlc[5]) {
dbug(1, dprintf("2D-dlc= %x %x %x %x %x %x %x", dlc[0], dlc[1], dlc[2], dlc[3], dlc[4], dlc[5], dlc[6]));
return _B2_PARM_NOT_SUPPORTED;
}
if (llc[1] == X75_V42BIS) {
if (b2_config->length >= 10) {
dlc[7] = 6;
dlc[8] = 0;
dlc[9] = b2_config_parms[4].info[0];
dlc[10] = b2_config_parms[4].info[1];
dlc[11] = b2_config_parms[5].info[0];
dlc[12] = b2_config_parms[5].info[1];
dlc[13] = b2_config_parms[6].info[0];
dlc[14] = b2_config_parms[6].info[1];
dlc[0] = 14;
dbug(1, dprintf("b2_config_parms[4].info[0] [1]: %x %x", b2_config_parms[4].info[0], b2_config_parms[4].info[1]));
dbug(1, dprintf("b2_config_parms[5].info[0] [1]: %x %x", b2_config_parms[5].info[0], b2_config_parms[5].info[1]));
dbug(1, dprintf("b2_config_parms[6].info[0] [1]: %x %x", b2_config_parms[6].info[0], b2_config_parms[6].info[1]));
}
else {
dlc[6] = 14;
}
}
else {
PUT_WORD(&dlc[7], (word)b2_config_parms[4].length);
for (i = 0; i < b2_config_parms[4].length; i++)
dlc[11 + i] = b2_config_parms[4].info[1 + i];
}
}
}
}
add_p(plci, DLC, dlc);
b3_config = &bp_parms[5];
if (b3_config->length)
{
if (plci->B3_prot == 4
|| plci->B3_prot == 5)
{
if (api_parse(&b3_config->info[1], (word)b3_config->length, "wwss", b3_config_parms))
{
return _WRONG_MESSAGE_FORMAT;
}
i = GET_WORD((byte *)(b3_config_parms[0].info));
((T30_INFO *)&nlc[1])->resolution = (byte)(((i & 0x0001) ||
((plci->B3_prot == 4) && (((byte)(GET_WORD((byte *)b3_config_parms[1].info))) != 5))) ? T30_RESOLUTION_R8_0770_OR_200 : 0);
((T30_INFO *)&nlc[1])->data_format = (byte)(GET_WORD((byte *)b3_config_parms[1].info));
fax_control_bits = T30_CONTROL_BIT_ALL_FEATURES;
if ((((T30_INFO *)&nlc[1])->rate_div_2400 != 0) && (((T30_INFO *)&nlc[1])->rate_div_2400 <= 6))
fax_control_bits &= ~T30_CONTROL_BIT_ENABLE_V34FAX;
if (plci->adapter->manufacturer_features & MANUFACTURER_FEATURE_FAX_PAPER_FORMATS)
{
if ((plci->requested_options_conn | plci->requested_options | plci->adapter->requested_options_table[plci->appl->Id - 1])
& (1L << PRIVATE_FAX_PAPER_FORMATS))
{
((T30_INFO *)&nlc[1])->resolution |= T30_RESOLUTION_R8_1540 |
T30_RESOLUTION_R16_1540_OR_400 | T30_RESOLUTION_300_300 |
T30_RESOLUTION_INCH_BASED | T30_RESOLUTION_METRIC_BASED;
}
((T30_INFO *)&nlc[1])->recording_properties =
T30_RECORDING_WIDTH_ISO_A3 |
(T30_RECORDING_LENGTH_UNLIMITED << 2) |
(T30_MIN_SCANLINE_TIME_00_00_00 << 4);
}
if (plci->B3_prot == 5)
{
if (i & 0x0002) /* Accept incoming fax-polling requests */
fax_control_bits |= T30_CONTROL_BIT_ACCEPT_POLLING;
if (i & 0x2000) /* Do not use MR compression */
fax_control_bits &= ~T30_CONTROL_BIT_ENABLE_2D_CODING;
if (i & 0x4000) /* Do not use MMR compression */
fax_control_bits &= ~T30_CONTROL_BIT_ENABLE_T6_CODING;
if (i & 0x8000) /* Do not use ECM */
fax_control_bits &= ~T30_CONTROL_BIT_ENABLE_ECM;
if (plci->fax_connect_info_length != 0)
{
((T30_INFO *)&nlc[1])->resolution = ((T30_INFO *)plci->fax_connect_info_buffer)->resolution;
((T30_INFO *)&nlc[1])->data_format = ((T30_INFO *)plci->fax_connect_info_buffer)->data_format;
((T30_INFO *)&nlc[1])->recording_properties = ((T30_INFO *)plci->fax_connect_info_buffer)->recording_properties;
fax_control_bits |= GET_WORD(&((T30_INFO *)plci->fax_connect_info_buffer)->control_bits_low) &
(T30_CONTROL_BIT_REQUEST_POLLING | T30_CONTROL_BIT_MORE_DOCUMENTS);
}
}
/* copy station id to NLC */
for (i = 0; i < T30_MAX_STATION_ID_LENGTH; i++)
{
if (i < b3_config_parms[2].length)
{
((T30_INFO *)&nlc[1])->station_id[i] = ((byte *)b3_config_parms[2].info)[1 + i];
}
else
{
((T30_INFO *)&nlc[1])->station_id[i] = ' ';
}
}
((T30_INFO *)&nlc[1])->station_id_len = T30_MAX_STATION_ID_LENGTH;
/* copy head line to NLC */
if (b3_config_parms[3].length)
{
pos = (byte)(fax_head_line_time(&(((T30_INFO *)&nlc[1])->station_id[T30_MAX_STATION_ID_LENGTH])));
if (pos != 0)
{
if (CAPI_MAX_DATE_TIME_LENGTH + 2 + b3_config_parms[3].length > CAPI_MAX_HEAD_LINE_SPACE)
pos = 0;
else
{
nlc[1 + offsetof(T30_INFO, station_id) + T30_MAX_STATION_ID_LENGTH + pos++] = ' ';
nlc[1 + offsetof(T30_INFO, station_id) + T30_MAX_STATION_ID_LENGTH + pos++] = ' ';
len = (byte)b3_config_parms[2].length;
if (len > 20)
len = 20;
if (CAPI_MAX_DATE_TIME_LENGTH + 2 + len + 2 + b3_config_parms[3].length <= CAPI_MAX_HEAD_LINE_SPACE)
{
for (i = 0; i < len; i++)
nlc[1 + offsetof(T30_INFO, station_id) + T30_MAX_STATION_ID_LENGTH + pos++] = ((byte *)b3_config_parms[2].info)[1 + i];
nlc[1 + offsetof(T30_INFO, station_id) + T30_MAX_STATION_ID_LENGTH + pos++] = ' ';
nlc[1 + offsetof(T30_INFO, station_id) + T30_MAX_STATION_ID_LENGTH + pos++] = ' ';
}
}
}
len = (byte)b3_config_parms[3].length;
if (len > CAPI_MAX_HEAD_LINE_SPACE - pos)
len = (byte)(CAPI_MAX_HEAD_LINE_SPACE - pos);
((T30_INFO *)&nlc[1])->head_line_len = (byte)(pos + len);
nlc[0] += (byte)(pos + len);
for (i = 0; i < len; i++)
nlc[1 + offsetof(T30_INFO, station_id) + T30_MAX_STATION_ID_LENGTH + pos++] = ((byte *)b3_config_parms[3].info)[1 + i];
} else
((T30_INFO *)&nlc[1])->head_line_len = 0;
plci->nsf_control_bits = 0;
if (plci->B3_prot == 5)
{
if ((plci->adapter->man_profile.private_options & (1L << PRIVATE_FAX_SUB_SEP_PWD))
&& (GET_WORD((byte *)b3_config_parms[1].info) & 0x8000)) /* Private SUB/SEP/PWD enable */
{
plci->requested_options |= 1L << PRIVATE_FAX_SUB_SEP_PWD;
}
if ((plci->adapter->man_profile.private_options & (1L << PRIVATE_FAX_NONSTANDARD))
&& (GET_WORD((byte *)b3_config_parms[1].info) & 0x4000)) /* Private non-standard facilities enable */
{
plci->requested_options |= 1L << PRIVATE_FAX_NONSTANDARD;
}
if ((plci->requested_options_conn | plci->requested_options | plci->adapter->requested_options_table[plci->appl->Id - 1])
& ((1L << PRIVATE_FAX_SUB_SEP_PWD) | (1L << PRIVATE_FAX_NONSTANDARD)))
{
if ((plci->requested_options_conn | plci->requested_options | plci->adapter->requested_options_table[plci->appl->Id - 1])
& (1L << PRIVATE_FAX_SUB_SEP_PWD))
{
fax_control_bits |= T30_CONTROL_BIT_ACCEPT_SUBADDRESS | T30_CONTROL_BIT_ACCEPT_PASSWORD;
if (fax_control_bits & T30_CONTROL_BIT_ACCEPT_POLLING)
fax_control_bits |= T30_CONTROL_BIT_ACCEPT_SEL_POLLING;
}
len = nlc[0];
pos = offsetof(T30_INFO, station_id) + T30_MAX_STATION_ID_LENGTH;
if (pos < plci->fax_connect_info_length)
{
for (i = 1 + plci->fax_connect_info_buffer[pos]; i != 0; i--)
nlc[++len] = plci->fax_connect_info_buffer[pos++];
}
else
nlc[++len] = 0;
if (pos < plci->fax_connect_info_length)
{
for (i = 1 + plci->fax_connect_info_buffer[pos]; i != 0; i--)
nlc[++len] = plci->fax_connect_info_buffer[pos++];
}
else
nlc[++len] = 0;
if ((plci->requested_options_conn | plci->requested_options | plci->adapter->requested_options_table[plci->appl->Id - 1])
& (1L << PRIVATE_FAX_NONSTANDARD))
{
if ((pos < plci->fax_connect_info_length) && (plci->fax_connect_info_buffer[pos] != 0))
{
if ((plci->fax_connect_info_buffer[pos] >= 3) && (plci->fax_connect_info_buffer[pos + 1] >= 2))
plci->nsf_control_bits = GET_WORD(&plci->fax_connect_info_buffer[pos + 2]);
for (i = 1 + plci->fax_connect_info_buffer[pos]; i != 0; i--)
nlc[++len] = plci->fax_connect_info_buffer[pos++];
}
else
{
if (api_parse(&b3_config->info[1], (word)b3_config->length, "wwsss", b3_config_parms))
{
dbug(1, dprintf("non-standard facilities info missing or wrong format"));
nlc[++len] = 0;
}
else
{
if ((b3_config_parms[4].length >= 3) && (b3_config_parms[4].info[1] >= 2))
plci->nsf_control_bits = GET_WORD(&b3_config_parms[4].info[2]);
nlc[++len] = (byte)(b3_config_parms[4].length);
for (i = 0; i < b3_config_parms[4].length; i++)
nlc[++len] = b3_config_parms[4].info[1 + i];
}
}
}
nlc[0] = len;
if ((plci->nsf_control_bits & T30_NSF_CONTROL_BIT_ENABLE_NSF)
&& (plci->nsf_control_bits & T30_NSF_CONTROL_BIT_NEGOTIATE_RESP))
{
((T30_INFO *)&nlc[1])->operating_mode = T30_OPERATING_MODE_CAPI_NEG;
}
}
}
PUT_WORD(&(((T30_INFO *)&nlc[1])->control_bits_low), fax_control_bits);
len = offsetof(T30_INFO, station_id) + T30_MAX_STATION_ID_LENGTH;
for (i = 0; i < len; i++)
plci->fax_connect_info_buffer[i] = nlc[1 + i];
((T30_INFO *) plci->fax_connect_info_buffer)->head_line_len = 0;
i += ((T30_INFO *)&nlc[1])->head_line_len;
while (i < nlc[0])
plci->fax_connect_info_buffer[len++] = nlc[++i];
plci->fax_connect_info_length = len;
}
else
{
nlc[0] = 14;
if (b3_config->length != 16)
return _B3_PARM_NOT_SUPPORTED;
for (i = 0; i < 12; i++) nlc[1 + i] = b3_config->info[1 + i];
if (GET_WORD(&b3_config->info[13]) != 8 && GET_WORD(&b3_config->info[13]) != 128)
return _B3_PARM_NOT_SUPPORTED;
nlc[13] = b3_config->info[13];
if (GET_WORD(&b3_config->info[15]) >= nlc[13])
return _B3_PARM_NOT_SUPPORTED;
nlc[14] = b3_config->info[15];
}
}
else
{
if (plci->B3_prot == 4
|| plci->B3_prot == 5 /*T.30 - FAX*/) return _B3_PARM_NOT_SUPPORTED;
}
add_p(plci, NLC, nlc);
return 0;
}
/*----------------------------------------------------------------*/
/* make the same as add_b23, but only for the modem related */
/* L2 and L3 B-Chan protocol. */
/* */
/* Enabled L2 and L3 Configurations: */
/* If L1 == Modem all negotiation */
/* only L2 == Modem with full negotiation is allowed */
/* If L1 == Modem async or sync */
/* only L2 == Transparent is allowed */
/* L3 == Modem or L3 == Transparent are allowed */
/* B2 Configuration for modem: */
/* word : enable/disable compression, bitoptions */
/* B3 Configuration for modem: */
/* empty */
/*----------------------------------------------------------------*/
static word add_modem_b23(PLCI *plci, API_PARSE *bp_parms)
{
static byte lli[12] = {1,1};
static byte llc[3] = {2,0,0};
static byte dlc[16] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
API_PARSE mdm_config[2];
word i;
word b2_config = 0;
for (i = 0; i < 2; i++) mdm_config[i].length = 0;
for (i = 0; i < sizeof(dlc); i++) dlc[i] = 0;
if (((GET_WORD(bp_parms[0].info) == B1_MODEM_ALL_NEGOTIATE)
&& (GET_WORD(bp_parms[1].info) != B2_MODEM_EC_COMPRESSION))
|| ((GET_WORD(bp_parms[0].info) != B1_MODEM_ALL_NEGOTIATE)
&& (GET_WORD(bp_parms[1].info) != B2_TRANSPARENT)))
{
return (_B_STACK_NOT_SUPPORTED);
}
if ((GET_WORD(bp_parms[2].info) != B3_MODEM)
&& (GET_WORD(bp_parms[2].info) != B3_TRANSPARENT))
{
return (_B_STACK_NOT_SUPPORTED);
}
plci->B2_prot = (byte) GET_WORD(bp_parms[1].info);
plci->B3_prot = (byte) GET_WORD(bp_parms[2].info);
if ((GET_WORD(bp_parms[1].info) == B2_MODEM_EC_COMPRESSION) && bp_parms[4].length)
{
if (api_parse(&bp_parms[4].info[1],
(word)bp_parms[4].length, "w",
mdm_config))
{
return (_WRONG_MESSAGE_FORMAT);
}
b2_config = GET_WORD(mdm_config[0].info);
}
/* OK, L2 is modem */
lli[0] = 1;
lli[1] = 1;
if (plci->adapter->manufacturer_features & MANUFACTURER_FEATURE_XONOFF_FLOW_CONTROL)
lli[1] |= 2;
if (plci->adapter->manufacturer_features & MANUFACTURER_FEATURE_OOB_CHANNEL)
lli[1] |= 4;
if ((lli[1] & 0x02) && (diva_xdi_extended_features & DIVA_CAPI_USE_CMA)) {
lli[1] |= 0x10;
if (plci->rx_dma_descriptor <= 0) {
plci->rx_dma_descriptor = diva_get_dma_descriptor(plci, &plci->rx_dma_magic);
if (plci->rx_dma_descriptor >= 0)
plci->rx_dma_descriptor++;
}
if (plci->rx_dma_descriptor > 0) {
lli[1] |= 0x40;
lli[0] = 6;
lli[2] = (byte)(plci->rx_dma_descriptor - 1);
lli[3] = (byte)plci->rx_dma_magic;
lli[4] = (byte)(plci->rx_dma_magic >> 8);
lli[5] = (byte)(plci->rx_dma_magic >> 16);
lli[6] = (byte)(plci->rx_dma_magic >> 24);
}
}
if (DIVA_CAPI_SUPPORTS_NO_CANCEL(plci->adapter)) {
lli[1] |= 0x20;
}
llc[1] = (plci->call_dir & (CALL_DIR_ORIGINATE | CALL_DIR_FORCE_OUTG_NL)) ?
/*V42*/ 10 : /*V42_IN*/ 9;
llc[2] = 4; /* pass L3 always transparent */
add_p(plci, LLI, lli);
add_p(plci, LLC, llc);
i = 1;
PUT_WORD(&dlc[i], plci->appl->MaxDataLength);
i += 2;
if (GET_WORD(bp_parms[1].info) == B2_MODEM_EC_COMPRESSION)
{
if (bp_parms[4].length)
{
dbug(1, dprintf("MDM b2_config=%02x", b2_config));
dlc[i++] = 3; /* Addr A */
dlc[i++] = 1; /* Addr B */
dlc[i++] = 7; /* modulo mode */
dlc[i++] = 7; /* window size */
dlc[i++] = 0; /* XID len Lo */
dlc[i++] = 0; /* XID len Hi */
if (b2_config & MDM_B2_DISABLE_V42bis)
{
dlc[i] |= DLC_MODEMPROT_DISABLE_V42_V42BIS;
}
if (b2_config & MDM_B2_DISABLE_MNP)
{
dlc[i] |= DLC_MODEMPROT_DISABLE_MNP_MNP5;
}
if (b2_config & MDM_B2_DISABLE_TRANS)
{
dlc[i] |= DLC_MODEMPROT_REQUIRE_PROTOCOL;
}
if (b2_config & MDM_B2_DISABLE_V42)
{
dlc[i] |= DLC_MODEMPROT_DISABLE_V42_DETECT;
}
if (b2_config & MDM_B2_DISABLE_COMP)
{
dlc[i] |= DLC_MODEMPROT_DISABLE_COMPRESSION;
}
i++;
}
}
else
{
dlc[i++] = 3; /* Addr A */
dlc[i++] = 1; /* Addr B */
dlc[i++] = 7; /* modulo mode */
dlc[i++] = 7; /* window size */
dlc[i++] = 0; /* XID len Lo */
dlc[i++] = 0; /* XID len Hi */
dlc[i++] = DLC_MODEMPROT_DISABLE_V42_V42BIS |
DLC_MODEMPROT_DISABLE_MNP_MNP5 |
DLC_MODEMPROT_DISABLE_V42_DETECT |
DLC_MODEMPROT_DISABLE_COMPRESSION;
}
dlc[0] = (byte)(i - 1);
/* HexDump ("DLC", sizeof(dlc), &dlc[0]); */
add_p(plci, DLC, dlc);
return (0);
}
/*------------------------------------------------------------------*/
/* send a request for the signaling entity */
/*------------------------------------------------------------------*/
static void sig_req(PLCI *plci, byte req, byte Id)
{
if (!plci) return;
if (plci->adapter->adapter_disabled) return;
dbug(1, dprintf("sig_req(%x)", req));
if (req == REMOVE)
plci->sig_remove_id = plci->Sig.Id;
if (plci->req_in == plci->req_in_start) {
plci->req_in += 2;
plci->RBuffer[plci->req_in++] = 0;
}
PUT_WORD(&plci->RBuffer[plci->req_in_start], plci->req_in-plci->req_in_start - 2);
plci->RBuffer[plci->req_in++] = Id; /* sig/nl flag */
plci->RBuffer[plci->req_in++] = req; /* request */
plci->RBuffer[plci->req_in++] = 0; /* channel */
plci->req_in_start = plci->req_in;
}
/*------------------------------------------------------------------*/
/* send a request for the network layer entity */
/*------------------------------------------------------------------*/
static void nl_req_ncci(PLCI *plci, byte req, byte ncci)
{
if (!plci) return;
if (plci->adapter->adapter_disabled) return;
dbug(1, dprintf("nl_req %02x %02x %02x", plci->Id, req, ncci));
if (req == REMOVE)
{
plci->nl_remove_id = plci->NL.Id;
ncci_remove(plci, 0, (byte)(ncci != 0));
ncci = 0;
}
if (plci->req_in == plci->req_in_start) {
plci->req_in += 2;
plci->RBuffer[plci->req_in++] = 0;
}
PUT_WORD(&plci->RBuffer[plci->req_in_start], plci->req_in-plci->req_in_start - 2);
plci->RBuffer[plci->req_in++] = 1; /* sig/nl flag */
plci->RBuffer[plci->req_in++] = req; /* request */
plci->RBuffer[plci->req_in++] = plci->adapter->ncci_ch[ncci]; /* channel */
plci->req_in_start = plci->req_in;
}
static void send_req(PLCI *plci)
{
ENTITY *e;
word l;
/* word i; */
if (!plci) return;
if (plci->adapter->adapter_disabled) return;
channel_xmit_xon(plci);
/* if nothing to do, return */
if (plci->req_in == plci->req_out) return;
dbug(1, dprintf("send_req(in=%d,out=%d)", plci->req_in, plci->req_out));
if (plci->nl_req || plci->sig_req) return;
l = GET_WORD(&plci->RBuffer[plci->req_out]);
plci->req_out += 2;
plci->XData[0].P = &plci->RBuffer[plci->req_out];
plci->req_out += l;
if (plci->RBuffer[plci->req_out] == 1)
{
e = &plci->NL;
plci->req_out++;
e->Req = plci->nl_req = plci->RBuffer[plci->req_out++];
e->ReqCh = plci->RBuffer[plci->req_out++];
if (!(e->Id & 0x1f))
{
e->Id = NL_ID;
plci->RBuffer[plci->req_out - 4] = CAI;
plci->RBuffer[plci->req_out - 3] = 1;
plci->RBuffer[plci->req_out - 2] = (plci->Sig.Id == 0xff) ? 0 : plci->Sig.Id;
plci->RBuffer[plci->req_out - 1] = 0;
l += 3;
plci->nl_global_req = plci->nl_req;
}
dbug(1, dprintf("%x:NLREQ(%x:%x:%x)", plci->adapter->Id, e->Id, e->Req, e->ReqCh));
}
else
{
e = &plci->Sig;
if (plci->RBuffer[plci->req_out])
e->Id = plci->RBuffer[plci->req_out];
plci->req_out++;
e->Req = plci->sig_req = plci->RBuffer[plci->req_out++];
e->ReqCh = plci->RBuffer[plci->req_out++];
if (!(e->Id & 0x1f))
plci->sig_global_req = plci->sig_req;
dbug(1, dprintf("%x:SIGREQ(%x:%x:%x)", plci->adapter->Id, e->Id, e->Req, e->ReqCh));
}
plci->XData[0].PLength = l;
e->X = plci->XData;
plci->adapter->request(e);
dbug(1, dprintf("send_ok"));
}
static void send_data(PLCI *plci)
{
DIVA_CAPI_ADAPTER *a;
DATA_B3_DESC *data;
NCCI *ncci_ptr;
word ncci;
if (!plci->nl_req && plci->ncci_ring_list)
{
a = plci->adapter;
ncci = plci->ncci_ring_list;
do
{
ncci = a->ncci_next[ncci];
ncci_ptr = &(a->ncci[ncci]);
if (!(a->ncci_ch[ncci]
&& (a->ch_flow_control[a->ncci_ch[ncci]] & N_OK_FC_PENDING)))
{
if (ncci_ptr->data_pending)
{
if ((a->ncci_state[ncci] == CONNECTED)
|| (a->ncci_state[ncci] == INC_ACT_PENDING)
|| (plci->send_disc == ncci))
{
data = &(ncci_ptr->DBuffer[ncci_ptr->data_out]);
if ((plci->B2_prot == B2_V120_ASYNC)
|| (plci->B2_prot == B2_V120_ASYNC_V42BIS)
|| (plci->B2_prot == B2_V120_BIT_TRANSPARENT))
{
plci->NData[1].P = TransmitBufferGet(plci->appl, data->P);
plci->NData[1].PLength = data->Length;
if (data->Flags & 0x10)
plci->NData[0].P = v120_break_header;
else
plci->NData[0].P = v120_default_header;
plci->NData[0].PLength = 1;
plci->NL.XNum = 2;
plci->NL.Req = plci->nl_req = (byte)((data->Flags & 0x07) << 4 | N_DATA);
}
else
{
plci->NData[0].P = TransmitBufferGet(plci->appl, data->P);
plci->NData[0].PLength = data->Length;
if (data->Flags & 0x10)
plci->NL.Req = plci->nl_req = (byte)N_UDATA;
else if ((plci->B3_prot == B3_RTP) && (data->Flags & 0x01))
plci->NL.Req = plci->nl_req = (byte)N_BDATA;
else
plci->NL.Req = plci->nl_req = (byte)((data->Flags & 0x07) << 4 | N_DATA);
}
plci->NL.X = plci->NData;
plci->NL.ReqCh = a->ncci_ch[ncci];
dbug(1, dprintf("%x:DREQ(%x:%x)", a->Id, plci->NL.Id, plci->NL.Req));
plci->data_sent = true;
plci->data_sent_ptr = data->P;
a->request(&plci->NL);
}
else {
cleanup_ncci_data(plci, ncci);
}
}
else if (plci->send_disc == ncci)
{
/* dprintf("N_DISC"); */
plci->NData[0].PLength = 0;
plci->NL.ReqCh = a->ncci_ch[ncci];
plci->NL.Req = plci->nl_req = N_DISC;
a->request(&plci->NL);
plci->command = _DISCONNECT_B3_R;
plci->send_disc = 0;
}
}
} while (!plci->nl_req && (ncci != plci->ncci_ring_list));
plci->ncci_ring_list = ncci;
}
}
static void listen_check(DIVA_CAPI_ADAPTER *a)
{
word i, j;
PLCI *plci;
byte activnotifiedcalls = 0;
dbug(1, dprintf("listen_check(%d,%d)", a->listen_active, a->max_listen));
if (!remove_started && !a->adapter_disabled)
{
for (i = 0; i < a->max_plci; i++)
{
plci = &(a->plci[i]);
if (plci->notifiedcall) activnotifiedcalls++;
}
dbug(1, dprintf("listen_check(%d)", activnotifiedcalls));
for (i = a->listen_active; i < ((word)(a->max_listen + activnotifiedcalls)); i++) {
if ((j = get_plci(a))) {
a->listen_active++;
plci = &a->plci[j - 1];
plci->State = LISTENING;
add_p(plci, OAD, "\x01\xfd");
add_p(plci, KEY, "\x04\x43\x41\x32\x30");
add_p(plci, CAI, "\x01\xc0");
add_p(plci, UID, "\x06\x43\x61\x70\x69\x32\x30");
add_p(plci, LLI, "\x01\xc4"); /* support Dummy CR FAC + MWI + SpoofNotify */
add_p(plci, SHIFT | 6, NULL);
add_p(plci, SIN, "\x02\x00\x00");
plci->internal_command = LISTEN_SIG_ASSIGN_PEND; /* do indicate_req if OK */
sig_req(plci, ASSIGN, DSIG_ID);
send_req(plci);
}
}
}
}
/*------------------------------------------------------------------*/
/* functions for all parameters sent in INDs */
/*------------------------------------------------------------------*/
static void IndParse(PLCI *plci, word *parms_id, byte **parms, byte multiIEsize)
{
word ploc; /* points to current location within packet */
byte w;
byte wlen;
byte codeset, lock;
byte *in;
word i;
word code;
word mIEindex = 0;
ploc = 0;
codeset = 0;
lock = 0;
in = plci->Sig.RBuffer->P;
for (i = 0; i < parms_id[0]; i++) /* multiIE parms_id contains just the 1st */
{ /* element but parms array is larger */
parms[i] = (byte *)"";
}
for (i = 0; i < multiIEsize; i++)
{
parms[i] = (byte *)"";
}
while (ploc < plci->Sig.RBuffer->length - 1) {
/* read information element id and length */
w = in[ploc];
if (w & 0x80) {
/* w &=0xf0; removed, cannot detect congestion levels */
/* upper 4 bit masked with w==SHIFT now */
wlen = 0;
}
else {
wlen = (byte)(in[ploc + 1] + 1);
}
/* check if length valid (not exceeding end of packet) */
if ((ploc + wlen) > 270) return;
if (lock & 0x80) lock &= 0x7f;
else codeset = lock;
if ((w & 0xf0) == SHIFT) {
codeset = in[ploc];
if (!(codeset & 0x08)) lock = (byte)(codeset & 7);
codeset &= 7;
lock |= 0x80;
}
else {
if (w == ESC && wlen >= 3) code = in[ploc + 2] | 0x800;
else code = w;
code |= (codeset << 8);
for (i = 1; i < parms_id[0] + 1 && parms_id[i] != code; i++);
if (i < parms_id[0] + 1) {
if (!multiIEsize) { /* with multiIEs use next field index, */
mIEindex = i - 1; /* with normal IEs use same index like parms_id */
}
parms[mIEindex] = &in[ploc + 1];
dbug(1, dprintf("mIE[%d]=0x%x", *parms[mIEindex], in[ploc]));
if (parms_id[i] == OAD
|| parms_id[i] == CONN_NR
|| parms_id[i] == CAD) {
if (in[ploc + 2] & 0x80) {
in[ploc + 0] = (byte)(in[ploc + 1] + 1);
in[ploc + 1] = (byte)(in[ploc + 2] & 0x7f);
in[ploc + 2] = 0x80;
parms[mIEindex] = &in[ploc];
}
}
mIEindex++; /* effects multiIEs only */
}
}
ploc += (wlen + 1);
}
return;
}
/*------------------------------------------------------------------*/
/* try to match a cip from received BC and HLC */
/*------------------------------------------------------------------*/
static byte ie_compare(byte *ie1, byte *ie2)
{
word i;
if (!ie1 || !ie2) return false;
if (!ie1[0]) return false;
for (i = 0; i < (word)(ie1[0] + 1); i++) if (ie1[i] != ie2[i]) return false;
return true;
}
static word find_cip(DIVA_CAPI_ADAPTER *a, byte *bc, byte *hlc)
{
word i;
word j;
for (i = 9; i && !ie_compare(bc, cip_bc[i][a->u_law]); i--);
for (j = 16; j < 29 &&
(!ie_compare(bc, cip_bc[j][a->u_law]) || !ie_compare(hlc, cip_hlc[j])); j++);
if (j == 29) return i;
return j;
}
static byte AddInfo(byte **add_i,
byte **fty_i,
byte *esc_chi,
byte *facility)
{
byte i;
byte j;
byte k;
byte flen;
byte len = 0;
/* facility is a nested structure */
/* FTY can be more than once */
if (esc_chi[0] && !(esc_chi[esc_chi[0]] & 0x7f))
{
add_i[0] = (byte *)"\x02\x02\x00"; /* use neither b nor d channel */
}
else
{
add_i[0] = (byte *)"";
}
if (!fty_i[0][0])
{
add_i[3] = (byte *)"";
}
else
{ /* facility array found */
for (i = 0, j = 1; i < MAX_MULTI_IE && fty_i[i][0]; i++)
{
dbug(1, dprintf("AddIFac[%d]", fty_i[i][0]));
len += fty_i[i][0];
len += 2;
flen = fty_i[i][0];
facility[j++] = 0x1c; /* copy fac IE */
for (k = 0; k <= flen; k++, j++)
{
facility[j] = fty_i[i][k];
/* dbug(1, dprintf("%x ",facility[j])); */
}
}
facility[0] = len;
add_i[3] = facility;
}
/* dbug(1, dprintf("FacArrLen=%d ",len)); */
len = add_i[0][0] + add_i[1][0] + add_i[2][0] + add_i[3][0];
len += 4; /* calculate length of all */
return (len);
}
/*------------------------------------------------------------------*/
/* voice and codec features */
/*------------------------------------------------------------------*/
static void SetVoiceChannel(PLCI *plci, byte *chi, DIVA_CAPI_ADAPTER *a)
{
byte voice_chi[] = "\x02\x18\x01";
byte channel;
channel = chi[chi[0]] & 0x3;
dbug(1, dprintf("ExtDevON(Ch=0x%x)", channel));
voice_chi[2] = (channel) ? channel : 1;
add_p(plci, FTY, "\x02\x01\x07"); /* B On, default on 1 */
add_p(plci, ESC, voice_chi); /* Channel */
sig_req(plci, TEL_CTRL, 0);
send_req(plci);
if (a->AdvSignalPLCI)
{
adv_voice_write_coefs(a->AdvSignalPLCI, ADV_VOICE_WRITE_ACTIVATION);
}
}
static void VoiceChannelOff(PLCI *plci)
{
dbug(1, dprintf("ExtDevOFF"));
add_p(plci, FTY, "\x02\x01\x08"); /* B Off */
sig_req(plci, TEL_CTRL, 0);
send_req(plci);
if (plci->adapter->AdvSignalPLCI)
{
adv_voice_clear_config(plci->adapter->AdvSignalPLCI);
}
}
static word AdvCodecSupport(DIVA_CAPI_ADAPTER *a, PLCI *plci, APPL *appl,
byte hook_listen)
{
word j;
PLCI *splci;
/* check if hardware supports handset with hook states (adv.codec) */
/* or if just a on board codec is supported */
/* the advanced codec plci is just for internal use */
/* diva Pro with on-board codec: */
if (a->profile.Global_Options & HANDSET)
{
/* new call, but hook states are already signalled */
if (a->AdvCodecFLAG)
{
if (a->AdvSignalAppl != appl || a->AdvSignalPLCI)
{
dbug(1, dprintf("AdvSigPlci=0x%x", a->AdvSignalPLCI));
return 0x2001; /* codec in use by another application */
}
if (plci != NULL)
{
a->AdvSignalPLCI = plci;
plci->tel = ADV_VOICE;
}
return 0; /* adv codec still used */
}
if ((j = get_plci(a)))
{
splci = &a->plci[j - 1];
splci->tel = CODEC_PERMANENT;
/* hook_listen indicates if a facility_req with handset/hook support */
/* was sent. Otherwise if just a call on an external device was made */
/* the codec will be used but the hook info will be discarded (just */
/* the external controller is in use */
if (hook_listen) splci->State = ADVANCED_VOICE_SIG;
else
{
splci->State = ADVANCED_VOICE_NOSIG;
if (plci)
{
plci->spoofed_msg = SPOOFING_REQUIRED;
}
/* indicate D-ch connect if */
} /* codec is connected OK */
if (plci != NULL)
{
a->AdvSignalPLCI = plci;
plci->tel = ADV_VOICE;
}
a->AdvSignalAppl = appl;
a->AdvCodecFLAG = true;
a->AdvCodecPLCI = splci;
add_p(splci, CAI, "\x01\x15");
add_p(splci, LLI, "\x01\x00");
add_p(splci, ESC, "\x02\x18\x00");
add_p(splci, UID, "\x06\x43\x61\x70\x69\x32\x30");
splci->internal_command = PERM_COD_ASSIGN;
dbug(1, dprintf("Codec Assign"));
sig_req(splci, ASSIGN, DSIG_ID);
send_req(splci);
}
else
{
return 0x2001; /* wrong state, no more plcis */
}
}
else if (a->profile.Global_Options & ON_BOARD_CODEC)
{
if (hook_listen) return 0x300B; /* Facility not supported */
/* no hook with SCOM */
if (plci != NULL) plci->tel = CODEC;
dbug(1, dprintf("S/SCOM codec"));
/* first time we use the scom-s codec we must shut down the internal */
/* handset application of the card. This can be done by an assign with */
/* a cai with the 0x80 bit set. Assign return code is 'out of resource'*/
if (!a->scom_appl_disable) {
if ((j = get_plci(a))) {
splci = &a->plci[j - 1];
add_p(splci, CAI, "\x01\x80");
add_p(splci, UID, "\x06\x43\x61\x70\x69\x32\x30");
sig_req(splci, ASSIGN, 0xC0); /* 0xc0 is the TEL_ID */
send_req(splci);
a->scom_appl_disable = true;
}
else{
return 0x2001; /* wrong state, no more plcis */
}
}
}
else return 0x300B; /* Facility not supported */
return 0;
}
static void CodecIdCheck(DIVA_CAPI_ADAPTER *a, PLCI *plci)
{
dbug(1, dprintf("CodecIdCheck"));
if (a->AdvSignalPLCI == plci)
{
dbug(1, dprintf("PLCI owns codec"));
VoiceChannelOff(a->AdvCodecPLCI);
if (a->AdvCodecPLCI->State == ADVANCED_VOICE_NOSIG)
{
dbug(1, dprintf("remove temp codec PLCI"));
plci_remove(a->AdvCodecPLCI);
a->AdvCodecFLAG = 0;
a->AdvCodecPLCI = NULL;
a->AdvSignalAppl = NULL;
}
a->AdvSignalPLCI = NULL;
}
}
/* -------------------------------------------------------------------
Ask for physical address of card on PCI bus
------------------------------------------------------------------- */
static void diva_ask_for_xdi_sdram_bar(DIVA_CAPI_ADAPTER *a,
IDI_SYNC_REQ *preq) {
a->sdram_bar = 0;
if (diva_xdi_extended_features & DIVA_CAPI_XDI_PROVIDES_SDRAM_BAR) {
ENTITY *e = (ENTITY *)preq;
e->user[0] = a->Id - 1;
preq->xdi_sdram_bar.info.bar = 0;
preq->xdi_sdram_bar.Req = 0;
preq->xdi_sdram_bar.Rc = IDI_SYNC_REQ_XDI_GET_ADAPTER_SDRAM_BAR;
(*(a->request))(e);
a->sdram_bar = preq->xdi_sdram_bar.info.bar;
dbug(3, dprintf("A(%d) SDRAM BAR = %08x", a->Id, a->sdram_bar));
}
}
/* -------------------------------------------------------------------
Ask XDI about extended features
------------------------------------------------------------------- */
static void diva_get_extended_adapter_features(DIVA_CAPI_ADAPTER *a) {
IDI_SYNC_REQ *preq;
char buffer[((sizeof(preq->xdi_extended_features) + 4) > sizeof(ENTITY)) ? (sizeof(preq->xdi_extended_features) + 4) : sizeof(ENTITY)];
char features[4];
preq = (IDI_SYNC_REQ *)&buffer[0];
if (!diva_xdi_extended_features) {
ENTITY *e = (ENTITY *)preq;
diva_xdi_extended_features |= 0x80000000;
e->user[0] = a->Id - 1;
preq->xdi_extended_features.Req = 0;
preq->xdi_extended_features.Rc = IDI_SYNC_REQ_XDI_GET_EXTENDED_FEATURES;
preq->xdi_extended_features.info.buffer_length_in_bytes = sizeof(features);
preq->xdi_extended_features.info.features = &features[0];
(*(a->request))(e);
if (features[0] & DIVA_XDI_EXTENDED_FEATURES_VALID) {
/*
Check features located in the byte '0'
*/
if (features[0] & DIVA_XDI_EXTENDED_FEATURE_CMA) {
diva_xdi_extended_features |= DIVA_CAPI_USE_CMA;
}
if (features[0] & DIVA_XDI_EXTENDED_FEATURE_RX_DMA) {
diva_xdi_extended_features |= DIVA_CAPI_XDI_PROVIDES_RX_DMA;
dbug(1, dprintf("XDI provides RxDMA"));
}
if (features[0] & DIVA_XDI_EXTENDED_FEATURE_SDRAM_BAR) {
diva_xdi_extended_features |= DIVA_CAPI_XDI_PROVIDES_SDRAM_BAR;
}
if (features[0] & DIVA_XDI_EXTENDED_FEATURE_NO_CANCEL_RC) {
diva_xdi_extended_features |= DIVA_CAPI_XDI_PROVIDES_NO_CANCEL;
dbug(3, dprintf("XDI provides NO_CANCEL_RC feature"));
}
}
}
diva_ask_for_xdi_sdram_bar(a, preq);
}
/*------------------------------------------------------------------*/
/* automatic law */
/*------------------------------------------------------------------*/
/* called from OS specific part after init time to get the Law */
/* a-law (Euro) and u-law (us,japan) use different BCs in the Setup message */
void AutomaticLaw(DIVA_CAPI_ADAPTER *a)
{
word j;
PLCI *splci;
if (a->automatic_law) {
return;
}
if ((j = get_plci(a))) {
diva_get_extended_adapter_features(a);
splci = &a->plci[j - 1];
a->automatic_lawPLCI = splci;
a->automatic_law = 1;
add_p(splci, CAI, "\x01\x80");
add_p(splci, UID, "\x06\x43\x61\x70\x69\x32\x30");
splci->internal_command = USELAW_REQ;
splci->command = 0;
splci->number = 0;
sig_req(splci, ASSIGN, DSIG_ID);
send_req(splci);
}
}
/* called from OS specific part if an application sends an Capi20Release */
word CapiRelease(word Id)
{
word i, j, appls_found;
PLCI *plci;
APPL *this;
DIVA_CAPI_ADAPTER *a;
if (!Id)
{
dbug(0, dprintf("A: CapiRelease(Id==0)"));
return (_WRONG_APPL_ID);
}
this = &application[Id - 1]; /* get application pointer */
for (i = 0, appls_found = 0; i < max_appl; i++)
{
if (application[i].Id) /* an application has been found */
{
appls_found++;
}
}
for (i = 0; i < max_adapter; i++) /* scan all adapters... */
{
a = &adapter[i];
if (a->request)
{
a->Info_Mask[Id - 1] = 0;
a->CIP_Mask[Id - 1] = 0;
a->Notification_Mask[Id - 1] = 0;
a->codec_listen[Id - 1] = NULL;
a->requested_options_table[Id - 1] = 0;
for (j = 0; j < a->max_plci; j++) /* and all PLCIs connected */
{ /* with this application */
plci = &a->plci[j];
if (plci->Id) /* if plci owns no application */
{ /* it may be not jet connected */
if (plci->State == INC_CON_PENDING
|| plci->State == INC_CON_ALERT)
{
if (test_c_ind_mask_bit(plci, (word)(Id - 1)))
{
clear_c_ind_mask_bit(plci, (word)(Id - 1));
if (c_ind_mask_empty(plci))
{
sig_req(plci, HANGUP, 0);
send_req(plci);
plci->State = OUTG_DIS_PENDING;
}
}
}
if (test_c_ind_mask_bit(plci, (word)(Id - 1)))
{
clear_c_ind_mask_bit(plci, (word)(Id - 1));
if (c_ind_mask_empty(plci))
{
if (!plci->appl)
{
plci_remove(plci);
plci->State = IDLE;
}
}
}
if (plci->appl == this)
{
plci->appl = NULL;
plci_remove(plci);
plci->State = IDLE;
}
}
}
listen_check(a);
if (a->flag_dynamic_l1_down)
{
if (appls_found == 1) /* last application does a capi release */
{
if ((j = get_plci(a)))
{
plci = &a->plci[j - 1];
plci->command = 0;
add_p(plci, OAD, "\x01\xfd");
add_p(plci, CAI, "\x01\x80");
add_p(plci, UID, "\x06\x43\x61\x70\x69\x32\x30");
add_p(plci, SHIFT | 6, NULL);
add_p(plci, SIN, "\x02\x00\x00");
plci->internal_command = REM_L1_SIG_ASSIGN_PEND;
sig_req(plci, ASSIGN, DSIG_ID);
add_p(plci, FTY, "\x02\xff\x06"); /* l1 down */
sig_req(plci, SIG_CTRL, 0);
send_req(plci);
}
}
}
if (a->AdvSignalAppl == this)
{
this->NullCREnable = false;
if (a->AdvCodecPLCI)
{
plci_remove(a->AdvCodecPLCI);
a->AdvCodecPLCI->tel = 0;
a->AdvCodecPLCI->adv_nl = 0;
}
a->AdvSignalAppl = NULL;
a->AdvSignalPLCI = NULL;
a->AdvCodecFLAG = 0;
a->AdvCodecPLCI = NULL;
}
}
}
this->Id = 0;
return GOOD;
}
static word plci_remove_check(PLCI *plci)
{
if (!plci) return true;
if (!plci->NL.Id && c_ind_mask_empty(plci))
{
if (plci->Sig.Id == 0xff)
plci->Sig.Id = 0;
if (!plci->Sig.Id)
{
dbug(1, dprintf("plci_remove_complete(%x)", plci->Id));
dbug(1, dprintf("tel=0x%x,Sig=0x%x", plci->tel, plci->Sig.Id));
if (plci->Id)
{
CodecIdCheck(plci->adapter, plci);
clear_b1_config(plci);
ncci_remove(plci, 0, false);
plci_free_msg_in_queue(plci);
channel_flow_control_remove(plci);
plci->Id = 0;
plci->State = IDLE;
plci->channels = 0;
plci->appl = NULL;
plci->notifiedcall = 0;
}
listen_check(plci->adapter);
return true;
}
}
return false;
}
/*------------------------------------------------------------------*/
static byte plci_nl_busy(PLCI *plci)
{
/* only applicable for non-multiplexed protocols */
return (plci->nl_req
|| (plci->ncci_ring_list
&& plci->adapter->ncci_ch[plci->ncci_ring_list]
&& (plci->adapter->ch_flow_control[plci->adapter->ncci_ch[plci->ncci_ring_list]] & N_OK_FC_PENDING)));
}
/*------------------------------------------------------------------*/
/* DTMF facilities */
/*------------------------------------------------------------------*/
static struct
{
byte send_mask;
byte listen_mask;
byte character;
byte code;
} dtmf_digit_map[] =
{
{ 0x01, 0x01, 0x23, DTMF_DIGIT_TONE_CODE_HASHMARK },
{ 0x01, 0x01, 0x2a, DTMF_DIGIT_TONE_CODE_STAR },
{ 0x01, 0x01, 0x30, DTMF_DIGIT_TONE_CODE_0 },
{ 0x01, 0x01, 0x31, DTMF_DIGIT_TONE_CODE_1 },
{ 0x01, 0x01, 0x32, DTMF_DIGIT_TONE_CODE_2 },
{ 0x01, 0x01, 0x33, DTMF_DIGIT_TONE_CODE_3 },
{ 0x01, 0x01, 0x34, DTMF_DIGIT_TONE_CODE_4 },
{ 0x01, 0x01, 0x35, DTMF_DIGIT_TONE_CODE_5 },
{ 0x01, 0x01, 0x36, DTMF_DIGIT_TONE_CODE_6 },
{ 0x01, 0x01, 0x37, DTMF_DIGIT_TONE_CODE_7 },
{ 0x01, 0x01, 0x38, DTMF_DIGIT_TONE_CODE_8 },
{ 0x01, 0x01, 0x39, DTMF_DIGIT_TONE_CODE_9 },
{ 0x01, 0x01, 0x41, DTMF_DIGIT_TONE_CODE_A },
{ 0x01, 0x01, 0x42, DTMF_DIGIT_TONE_CODE_B },
{ 0x01, 0x01, 0x43, DTMF_DIGIT_TONE_CODE_C },
{ 0x01, 0x01, 0x44, DTMF_DIGIT_TONE_CODE_D },
{ 0x01, 0x00, 0x61, DTMF_DIGIT_TONE_CODE_A },
{ 0x01, 0x00, 0x62, DTMF_DIGIT_TONE_CODE_B },
{ 0x01, 0x00, 0x63, DTMF_DIGIT_TONE_CODE_C },
{ 0x01, 0x00, 0x64, DTMF_DIGIT_TONE_CODE_D },
{ 0x04, 0x04, 0x80, DTMF_SIGNAL_NO_TONE },
{ 0x00, 0x04, 0x81, DTMF_SIGNAL_UNIDENTIFIED_TONE },
{ 0x04, 0x04, 0x82, DTMF_SIGNAL_DIAL_TONE },
{ 0x04, 0x04, 0x83, DTMF_SIGNAL_PABX_INTERNAL_DIAL_TONE },
{ 0x04, 0x04, 0x84, DTMF_SIGNAL_SPECIAL_DIAL_TONE },
{ 0x04, 0x04, 0x85, DTMF_SIGNAL_SECOND_DIAL_TONE },
{ 0x04, 0x04, 0x86, DTMF_SIGNAL_RINGING_TONE },
{ 0x04, 0x04, 0x87, DTMF_SIGNAL_SPECIAL_RINGING_TONE },
{ 0x04, 0x04, 0x88, DTMF_SIGNAL_BUSY_TONE },
{ 0x04, 0x04, 0x89, DTMF_SIGNAL_CONGESTION_TONE },
{ 0x04, 0x04, 0x8a, DTMF_SIGNAL_SPECIAL_INFORMATION_TONE },
{ 0x04, 0x04, 0x8b, DTMF_SIGNAL_COMFORT_TONE },
{ 0x04, 0x04, 0x8c, DTMF_SIGNAL_HOLD_TONE },
{ 0x04, 0x04, 0x8d, DTMF_SIGNAL_RECORD_TONE },
{ 0x04, 0x04, 0x8e, DTMF_SIGNAL_CALLER_WAITING_TONE },
{ 0x04, 0x04, 0x8f, DTMF_SIGNAL_CALL_WAITING_TONE },
{ 0x04, 0x04, 0x90, DTMF_SIGNAL_PAY_TONE },
{ 0x04, 0x04, 0x91, DTMF_SIGNAL_POSITIVE_INDICATION_TONE },
{ 0x04, 0x04, 0x92, DTMF_SIGNAL_NEGATIVE_INDICATION_TONE },
{ 0x04, 0x04, 0x93, DTMF_SIGNAL_WARNING_TONE },
{ 0x04, 0x04, 0x94, DTMF_SIGNAL_INTRUSION_TONE },
{ 0x04, 0x04, 0x95, DTMF_SIGNAL_CALLING_CARD_SERVICE_TONE },
{ 0x04, 0x04, 0x96, DTMF_SIGNAL_PAYPHONE_RECOGNITION_TONE },
{ 0x04, 0x04, 0x97, DTMF_SIGNAL_CPE_ALERTING_SIGNAL },
{ 0x04, 0x04, 0x98, DTMF_SIGNAL_OFF_HOOK_WARNING_TONE },
{ 0x04, 0x04, 0xbf, DTMF_SIGNAL_INTERCEPT_TONE },
{ 0x04, 0x04, 0xc0, DTMF_SIGNAL_MODEM_CALLING_TONE },
{ 0x04, 0x04, 0xc1, DTMF_SIGNAL_FAX_CALLING_TONE },
{ 0x04, 0x04, 0xc2, DTMF_SIGNAL_ANSWER_TONE },
{ 0x04, 0x04, 0xc3, DTMF_SIGNAL_REVERSED_ANSWER_TONE },
{ 0x04, 0x04, 0xc4, DTMF_SIGNAL_ANSAM_TONE },
{ 0x04, 0x04, 0xc5, DTMF_SIGNAL_REVERSED_ANSAM_TONE },
{ 0x04, 0x04, 0xc6, DTMF_SIGNAL_BELL103_ANSWER_TONE },
{ 0x04, 0x04, 0xc7, DTMF_SIGNAL_FAX_FLAGS },
{ 0x04, 0x04, 0xc8, DTMF_SIGNAL_G2_FAX_GROUP_ID },
{ 0x00, 0x04, 0xc9, DTMF_SIGNAL_HUMAN_SPEECH },
{ 0x04, 0x04, 0xca, DTMF_SIGNAL_ANSWERING_MACHINE_390 },
{ 0x02, 0x02, 0xf1, DTMF_MF_DIGIT_TONE_CODE_1 },
{ 0x02, 0x02, 0xf2, DTMF_MF_DIGIT_TONE_CODE_2 },
{ 0x02, 0x02, 0xf3, DTMF_MF_DIGIT_TONE_CODE_3 },
{ 0x02, 0x02, 0xf4, DTMF_MF_DIGIT_TONE_CODE_4 },
{ 0x02, 0x02, 0xf5, DTMF_MF_DIGIT_TONE_CODE_5 },
{ 0x02, 0x02, 0xf6, DTMF_MF_DIGIT_TONE_CODE_6 },
{ 0x02, 0x02, 0xf7, DTMF_MF_DIGIT_TONE_CODE_7 },
{ 0x02, 0x02, 0xf8, DTMF_MF_DIGIT_TONE_CODE_8 },
{ 0x02, 0x02, 0xf9, DTMF_MF_DIGIT_TONE_CODE_9 },
{ 0x02, 0x02, 0xfa, DTMF_MF_DIGIT_TONE_CODE_0 },
{ 0x02, 0x02, 0xfb, DTMF_MF_DIGIT_TONE_CODE_K1 },
{ 0x02, 0x02, 0xfc, DTMF_MF_DIGIT_TONE_CODE_K2 },
{ 0x02, 0x02, 0xfd, DTMF_MF_DIGIT_TONE_CODE_KP },
{ 0x02, 0x02, 0xfe, DTMF_MF_DIGIT_TONE_CODE_S1 },
{ 0x02, 0x02, 0xff, DTMF_MF_DIGIT_TONE_CODE_ST },
};
#define DTMF_DIGIT_MAP_ENTRIES ARRAY_SIZE(dtmf_digit_map)
static void dtmf_enable_receiver(PLCI *plci, byte enable_mask)
{
word min_digit_duration, min_gap_duration;
dbug(1, dprintf("[%06lx] %s,%d: dtmf_enable_receiver %02x",
(dword)((plci->Id << 8) | UnMapController(plci->adapter->Id)),
(char *)(FILE_), __LINE__, enable_mask));
if (enable_mask != 0)
{
min_digit_duration = (plci->dtmf_rec_pulse_ms == 0) ? 40 : plci->dtmf_rec_pulse_ms;
min_gap_duration = (plci->dtmf_rec_pause_ms == 0) ? 40 : plci->dtmf_rec_pause_ms;
plci->internal_req_buffer[0] = DTMF_UDATA_REQUEST_ENABLE_RECEIVER;
PUT_WORD(&plci->internal_req_buffer[1], min_digit_duration);
PUT_WORD(&plci->internal_req_buffer[3], min_gap_duration);
plci->NData[0].PLength = 5;
PUT_WORD(&plci->internal_req_buffer[5], INTERNAL_IND_BUFFER_SIZE);
plci->NData[0].PLength += 2;
capidtmf_recv_enable(&(plci->capidtmf_state), min_digit_duration, min_gap_duration);
}
else
{
plci->internal_req_buffer[0] = DTMF_UDATA_REQUEST_DISABLE_RECEIVER;
plci->NData[0].PLength = 1;
capidtmf_recv_disable(&(plci->capidtmf_state));
}
plci->NData[0].P = plci->internal_req_buffer;
plci->NL.X = plci->NData;
plci->NL.ReqCh = 0;
plci->NL.Req = plci->nl_req = (byte) N_UDATA;
plci->adapter->request(&plci->NL);
}
static void dtmf_send_digits(PLCI *plci, byte *digit_buffer, word digit_count)
{
word w, i;
dbug(1, dprintf("[%06lx] %s,%d: dtmf_send_digits %d",
(dword)((plci->Id << 8) | UnMapController(plci->adapter->Id)),
(char *)(FILE_), __LINE__, digit_count));
plci->internal_req_buffer[0] = DTMF_UDATA_REQUEST_SEND_DIGITS;
w = (plci->dtmf_send_pulse_ms == 0) ? 40 : plci->dtmf_send_pulse_ms;
PUT_WORD(&plci->internal_req_buffer[1], w);
w = (plci->dtmf_send_pause_ms == 0) ? 40 : plci->dtmf_send_pause_ms;
PUT_WORD(&plci->internal_req_buffer[3], w);
for (i = 0; i < digit_count; i++)
{
w = 0;
while ((w < DTMF_DIGIT_MAP_ENTRIES)
&& (digit_buffer[i] != dtmf_digit_map[w].character))
{
w++;
}
plci->internal_req_buffer[5 + i] = (w < DTMF_DIGIT_MAP_ENTRIES) ?
dtmf_digit_map[w].code : DTMF_DIGIT_TONE_CODE_STAR;
}
plci->NData[0].PLength = 5 + digit_count;
plci->NData[0].P = plci->internal_req_buffer;
plci->NL.X = plci->NData;
plci->NL.ReqCh = 0;
plci->NL.Req = plci->nl_req = (byte) N_UDATA;
plci->adapter->request(&plci->NL);
}
static void dtmf_rec_clear_config(PLCI *plci)
{
dbug(1, dprintf("[%06lx] %s,%d: dtmf_rec_clear_config",
(dword)((plci->Id << 8) | UnMapController(plci->adapter->Id)),
(char *)(FILE_), __LINE__));
plci->dtmf_rec_active = 0;
plci->dtmf_rec_pulse_ms = 0;
plci->dtmf_rec_pause_ms = 0;
capidtmf_init(&(plci->capidtmf_state), plci->adapter->u_law);
}
static void dtmf_send_clear_config(PLCI *plci)
{
dbug(1, dprintf("[%06lx] %s,%d: dtmf_send_clear_config",
(dword)((plci->Id << 8) | UnMapController(plci->adapter->Id)),
(char *)(FILE_), __LINE__));
plci->dtmf_send_requests = 0;
plci->dtmf_send_pulse_ms = 0;
plci->dtmf_send_pause_ms = 0;
}
static void dtmf_prepare_switch(dword Id, PLCI *plci)
{
dbug(1, dprintf("[%06lx] %s,%d: dtmf_prepare_switch",
UnMapId(Id), (char *)(FILE_), __LINE__));
while (plci->dtmf_send_requests != 0)
dtmf_confirmation(Id, plci);
}
static word dtmf_save_config(dword Id, PLCI *plci, byte Rc)
{
dbug(1, dprintf("[%06lx] %s,%d: dtmf_save_config %02x %d",
UnMapId(Id), (char *)(FILE_), __LINE__, Rc, plci->adjust_b_state));
return (GOOD);
}
static word dtmf_restore_config(dword Id, PLCI *plci, byte Rc)
{
word Info;
dbug(1, dprintf("[%06lx] %s,%d: dtmf_restore_config %02x %d",
UnMapId(Id), (char *)(FILE_), __LINE__, Rc, plci->adjust_b_state));
Info = GOOD;
if (plci->B1_facilities & B1_FACILITY_DTMFR)
{
switch (plci->adjust_b_state)
{
case ADJUST_B_RESTORE_DTMF_1:
plci->internal_command = plci->adjust_b_command;
if (plci_nl_busy(plci))
{
plci->adjust_b_state = ADJUST_B_RESTORE_DTMF_1;
break;
}
dtmf_enable_receiver(plci, plci->dtmf_rec_active);
plci->adjust_b_state = ADJUST_B_RESTORE_DTMF_2;
break;
case ADJUST_B_RESTORE_DTMF_2:
if ((Rc != OK) && (Rc != OK_FC))
{
dbug(1, dprintf("[%06lx] %s,%d: Reenable DTMF receiver failed %02x",
UnMapId(Id), (char *)(FILE_), __LINE__, Rc));
Info = _WRONG_STATE;
break;
}
break;
}
}
return (Info);
}
static void dtmf_command(dword Id, PLCI *plci, byte Rc)
{
word internal_command, Info;
byte mask;
byte result[4];
dbug(1, dprintf("[%06lx] %s,%d: dtmf_command %02x %04x %04x %d %d %d %d",
UnMapId(Id), (char *)(FILE_), __LINE__, Rc, plci->internal_command,
plci->dtmf_cmd, plci->dtmf_rec_pulse_ms, plci->dtmf_rec_pause_ms,
plci->dtmf_send_pulse_ms, plci->dtmf_send_pause_ms));
Info = GOOD;
result[0] = 2;
PUT_WORD(&result[1], DTMF_SUCCESS);
internal_command = plci->internal_command;
plci->internal_command = 0;
mask = 0x01;
switch (plci->dtmf_cmd)
{
case DTMF_LISTEN_TONE_START:
mask <<= 1;
case DTMF_LISTEN_MF_START:
mask <<= 1;
case DTMF_LISTEN_START:
switch (internal_command)
{
default:
adjust_b1_resource(Id, plci, NULL, (word)(plci->B1_facilities |
B1_FACILITY_DTMFR), DTMF_COMMAND_1);
case DTMF_COMMAND_1:
if (adjust_b_process(Id, plci, Rc) != GOOD)
{
dbug(1, dprintf("[%06lx] %s,%d: Load DTMF failed",
UnMapId(Id), (char *)(FILE_), __LINE__));
Info = _FACILITY_NOT_SUPPORTED;
break;
}
if (plci->internal_command)
return;
case DTMF_COMMAND_2:
if (plci_nl_busy(plci))
{
plci->internal_command = DTMF_COMMAND_2;
return;
}
plci->internal_command = DTMF_COMMAND_3;
dtmf_enable_receiver(plci, (byte)(plci->dtmf_rec_active | mask));
return;
case DTMF_COMMAND_3:
if ((Rc != OK) && (Rc != OK_FC))
{
dbug(1, dprintf("[%06lx] %s,%d: Enable DTMF receiver failed %02x",
UnMapId(Id), (char *)(FILE_), __LINE__, Rc));
Info = _FACILITY_NOT_SUPPORTED;
break;
}
plci->tone_last_indication_code = DTMF_SIGNAL_NO_TONE;
plci->dtmf_rec_active |= mask;
break;
}
break;
case DTMF_LISTEN_TONE_STOP:
mask <<= 1;
case DTMF_LISTEN_MF_STOP:
mask <<= 1;
case DTMF_LISTEN_STOP:
switch (internal_command)
{
default:
plci->dtmf_rec_active &= ~mask;
if (plci->dtmf_rec_active)
break;
/*
case DTMF_COMMAND_1:
if (plci->dtmf_rec_active)
{
if (plci_nl_busy (plci))
{
plci->internal_command = DTMF_COMMAND_1;
return;
}
plci->dtmf_rec_active &= ~mask;
plci->internal_command = DTMF_COMMAND_2;
dtmf_enable_receiver (plci, false);
return;
}
Rc = OK;
case DTMF_COMMAND_2:
if ((Rc != OK) && (Rc != OK_FC))
{
dbug (1, dprintf("[%06lx] %s,%d: Disable DTMF receiver failed %02x",
UnMapId (Id), (char far *)(FILE_), __LINE__, Rc));
Info = _FACILITY_NOT_SUPPORTED;
break;
}
*/
adjust_b1_resource(Id, plci, NULL, (word)(plci->B1_facilities &
~(B1_FACILITY_DTMFX | B1_FACILITY_DTMFR)), DTMF_COMMAND_3);
case DTMF_COMMAND_3:
if (adjust_b_process(Id, plci, Rc) != GOOD)
{
dbug(1, dprintf("[%06lx] %s,%d: Unload DTMF failed",
UnMapId(Id), (char *)(FILE_), __LINE__));
Info = _FACILITY_NOT_SUPPORTED;
break;
}
if (plci->internal_command)
return;
break;
}
break;
case DTMF_SEND_TONE:
mask <<= 1;
case DTMF_SEND_MF:
mask <<= 1;
case DTMF_DIGITS_SEND:
switch (internal_command)
{
default:
adjust_b1_resource(Id, plci, NULL, (word)(plci->B1_facilities |
((plci->dtmf_parameter_length != 0) ? B1_FACILITY_DTMFX | B1_FACILITY_DTMFR : B1_FACILITY_DTMFX)),
DTMF_COMMAND_1);
case DTMF_COMMAND_1:
if (adjust_b_process(Id, plci, Rc) != GOOD)
{
dbug(1, dprintf("[%06lx] %s,%d: Load DTMF failed",
UnMapId(Id), (char *)(FILE_), __LINE__));
Info = _FACILITY_NOT_SUPPORTED;
break;
}
if (plci->internal_command)
return;
case DTMF_COMMAND_2:
if (plci_nl_busy(plci))
{
plci->internal_command = DTMF_COMMAND_2;
return;
}
plci->dtmf_msg_number_queue[(plci->dtmf_send_requests)++] = plci->number;
plci->internal_command = DTMF_COMMAND_3;
dtmf_send_digits(plci, &plci->saved_msg.parms[3].info[1], plci->saved_msg.parms[3].length);
return;
case DTMF_COMMAND_3:
if ((Rc != OK) && (Rc != OK_FC))
{
dbug(1, dprintf("[%06lx] %s,%d: Send DTMF digits failed %02x",
UnMapId(Id), (char *)(FILE_), __LINE__, Rc));
if (plci->dtmf_send_requests != 0)
(plci->dtmf_send_requests)--;
Info = _FACILITY_NOT_SUPPORTED;
break;
}
return;
}
break;
}
sendf(plci->appl, _FACILITY_R | CONFIRM, Id & 0xffffL, plci->number,
"wws", Info, SELECTOR_DTMF, result);
}
static byte dtmf_request(dword Id, word Number, DIVA_CAPI_ADAPTER *a, PLCI *plci, APPL *appl, API_PARSE *msg)
{
word Info;
word i, j;
byte mask;
API_PARSE dtmf_parms[5];
byte result[40];
dbug(1, dprintf("[%06lx] %s,%d: dtmf_request",
UnMapId(Id), (char *)(FILE_), __LINE__));
Info = GOOD;
result[0] = 2;
PUT_WORD(&result[1], DTMF_SUCCESS);
if (!(a->profile.Global_Options & GL_DTMF_SUPPORTED))
{
dbug(1, dprintf("[%06lx] %s,%d: Facility not supported",
UnMapId(Id), (char *)(FILE_), __LINE__));
Info = _FACILITY_NOT_SUPPORTED;
}
else if (api_parse(&msg[1].info[1], msg[1].length, "w", dtmf_parms))
{
dbug(1, dprintf("[%06lx] %s,%d: Wrong message format",
UnMapId(Id), (char *)(FILE_), __LINE__));
Info = _WRONG_MESSAGE_FORMAT;
}
else if ((GET_WORD(dtmf_parms[0].info) == DTMF_GET_SUPPORTED_DETECT_CODES)
|| (GET_WORD(dtmf_parms[0].info) == DTMF_GET_SUPPORTED_SEND_CODES))
{
if (!((a->requested_options_table[appl->Id - 1])
& (1L << PRIVATE_DTMF_TONE)))
{
dbug(1, dprintf("[%06lx] %s,%d: DTMF unknown request %04x",
UnMapId(Id), (char *)(FILE_), __LINE__, GET_WORD(dtmf_parms[0].info)));
PUT_WORD(&result[1], DTMF_UNKNOWN_REQUEST);
}
else
{
for (i = 0; i < 32; i++)
result[4 + i] = 0;
if (GET_WORD(dtmf_parms[0].info) == DTMF_GET_SUPPORTED_DETECT_CODES)
{
for (i = 0; i < DTMF_DIGIT_MAP_ENTRIES; i++)
{
if (dtmf_digit_map[i].listen_mask != 0)
result[4 + (dtmf_digit_map[i].character >> 3)] |= (1 << (dtmf_digit_map[i].character & 0x7));
}
}
else
{
for (i = 0; i < DTMF_DIGIT_MAP_ENTRIES; i++)
{
if (dtmf_digit_map[i].send_mask != 0)
result[4 + (dtmf_digit_map[i].character >> 3)] |= (1 << (dtmf_digit_map[i].character & 0x7));
}
}
result[0] = 3 + 32;
result[3] = 32;
}
}
else if (plci == NULL)
{
dbug(1, dprintf("[%06lx] %s,%d: Wrong PLCI",
UnMapId(Id), (char *)(FILE_), __LINE__));
Info = _WRONG_IDENTIFIER;
}
else
{
if (!plci->State
|| !plci->NL.Id || plci->nl_remove_id)
{
dbug(1, dprintf("[%06lx] %s,%d: Wrong state",
UnMapId(Id), (char *)(FILE_), __LINE__));
Info = _WRONG_STATE;
}
else
{
plci->command = 0;
plci->dtmf_cmd = GET_WORD(dtmf_parms[0].info);
mask = 0x01;
switch (plci->dtmf_cmd)
{
case DTMF_LISTEN_TONE_START:
case DTMF_LISTEN_TONE_STOP:
mask <<= 1;
case DTMF_LISTEN_MF_START:
case DTMF_LISTEN_MF_STOP:
mask <<= 1;
if (!((plci->requested_options_conn | plci->requested_options | plci->adapter->requested_options_table[appl->Id - 1])
& (1L << PRIVATE_DTMF_TONE)))
{
dbug(1, dprintf("[%06lx] %s,%d: DTMF unknown request %04x",
UnMapId(Id), (char *)(FILE_), __LINE__, GET_WORD(dtmf_parms[0].info)));
PUT_WORD(&result[1], DTMF_UNKNOWN_REQUEST);
break;
}
case DTMF_LISTEN_START:
case DTMF_LISTEN_STOP:
if (!(a->manufacturer_features & MANUFACTURER_FEATURE_HARDDTMF)
&& !(a->manufacturer_features & MANUFACTURER_FEATURE_SOFTDTMF_RECEIVE))
{
dbug(1, dprintf("[%06lx] %s,%d: Facility not supported",
UnMapId(Id), (char *)(FILE_), __LINE__));
Info = _FACILITY_NOT_SUPPORTED;
break;
}
if (mask & DTMF_LISTEN_ACTIVE_FLAG)
{
if (api_parse(&msg[1].info[1], msg[1].length, "wwws", dtmf_parms))
{
plci->dtmf_rec_pulse_ms = 0;
plci->dtmf_rec_pause_ms = 0;
}
else
{
plci->dtmf_rec_pulse_ms = GET_WORD(dtmf_parms[1].info);
plci->dtmf_rec_pause_ms = GET_WORD(dtmf_parms[2].info);
}
}
start_internal_command(Id, plci, dtmf_command);
return (false);
case DTMF_SEND_TONE:
mask <<= 1;
case DTMF_SEND_MF:
mask <<= 1;
if (!((plci->requested_options_conn | plci->requested_options | plci->adapter->requested_options_table[appl->Id - 1])
& (1L << PRIVATE_DTMF_TONE)))
{
dbug(1, dprintf("[%06lx] %s,%d: DTMF unknown request %04x",
UnMapId(Id), (char *)(FILE_), __LINE__, GET_WORD(dtmf_parms[0].info)));
PUT_WORD(&result[1], DTMF_UNKNOWN_REQUEST);
break;
}
case DTMF_DIGITS_SEND:
if (api_parse(&msg[1].info[1], msg[1].length, "wwws", dtmf_parms))
{
dbug(1, dprintf("[%06lx] %s,%d: Wrong message format",
UnMapId(Id), (char *)(FILE_), __LINE__));
Info = _WRONG_MESSAGE_FORMAT;
break;
}
if (mask & DTMF_LISTEN_ACTIVE_FLAG)
{
plci->dtmf_send_pulse_ms = GET_WORD(dtmf_parms[1].info);
plci->dtmf_send_pause_ms = GET_WORD(dtmf_parms[2].info);
}
i = 0;
j = 0;
while ((i < dtmf_parms[3].length) && (j < DTMF_DIGIT_MAP_ENTRIES))
{
j = 0;
while ((j < DTMF_DIGIT_MAP_ENTRIES)
&& ((dtmf_parms[3].info[i + 1] != dtmf_digit_map[j].character)
|| ((dtmf_digit_map[j].send_mask & mask) == 0)))
{
j++;
}
i++;
}
if (j == DTMF_DIGIT_MAP_ENTRIES)
{
dbug(1, dprintf("[%06lx] %s,%d: Incorrect DTMF digit %02x",
UnMapId(Id), (char *)(FILE_), __LINE__, dtmf_parms[3].info[i]));
PUT_WORD(&result[1], DTMF_INCORRECT_DIGIT);
break;
}
if (plci->dtmf_send_requests >= ARRAY_SIZE(plci->dtmf_msg_number_queue))
{
dbug(1, dprintf("[%06lx] %s,%d: DTMF request overrun",
UnMapId(Id), (char *)(FILE_), __LINE__));
Info = _WRONG_STATE;
break;
}
api_save_msg(dtmf_parms, "wwws", &plci->saved_msg);
start_internal_command(Id, plci, dtmf_command);
return (false);
default:
dbug(1, dprintf("[%06lx] %s,%d: DTMF unknown request %04x",
UnMapId(Id), (char *)(FILE_), __LINE__, plci->dtmf_cmd));
PUT_WORD(&result[1], DTMF_UNKNOWN_REQUEST);
}
}
}
sendf(appl, _FACILITY_R | CONFIRM, Id & 0xffffL, Number,
"wws", Info, SELECTOR_DTMF, result);
return (false);
}
static void dtmf_confirmation(dword Id, PLCI *plci)
{
word i;
byte result[4];
dbug(1, dprintf("[%06lx] %s,%d: dtmf_confirmation",
UnMapId(Id), (char *)(FILE_), __LINE__));
result[0] = 2;
PUT_WORD(&result[1], DTMF_SUCCESS);
if (plci->dtmf_send_requests != 0)
{
sendf(plci->appl, _FACILITY_R | CONFIRM, Id & 0xffffL, plci->dtmf_msg_number_queue[0],
"wws", GOOD, SELECTOR_DTMF, result);
(plci->dtmf_send_requests)--;
for (i = 0; i < plci->dtmf_send_requests; i++)
plci->dtmf_msg_number_queue[i] = plci->dtmf_msg_number_queue[i + 1];
}
}
static void dtmf_indication(dword Id, PLCI *plci, byte *msg, word length)
{
word i, j, n;
dbug(1, dprintf("[%06lx] %s,%d: dtmf_indication",
UnMapId(Id), (char *)(FILE_), __LINE__));
n = 0;
for (i = 1; i < length; i++)
{
j = 0;
while ((j < DTMF_DIGIT_MAP_ENTRIES)
&& ((msg[i] != dtmf_digit_map[j].code)
|| ((dtmf_digit_map[j].listen_mask & plci->dtmf_rec_active) == 0)))
{
j++;
}
if (j < DTMF_DIGIT_MAP_ENTRIES)
{
if ((dtmf_digit_map[j].listen_mask & DTMF_TONE_LISTEN_ACTIVE_FLAG)
&& (plci->tone_last_indication_code == DTMF_SIGNAL_NO_TONE)
&& (dtmf_digit_map[j].character != DTMF_SIGNAL_UNIDENTIFIED_TONE))
{
if (n + 1 == i)
{
for (i = length; i > n + 1; i--)
msg[i] = msg[i - 1];
length++;
i++;
}
msg[++n] = DTMF_SIGNAL_UNIDENTIFIED_TONE;
}
plci->tone_last_indication_code = dtmf_digit_map[j].character;
msg[++n] = dtmf_digit_map[j].character;
}
}
if (n != 0)
{
msg[0] = (byte) n;
sendf(plci->appl, _FACILITY_I, Id & 0xffffL, 0, "wS", SELECTOR_DTMF, msg);
}
}
/*------------------------------------------------------------------*/
/* DTMF parameters */
/*------------------------------------------------------------------*/
static void dtmf_parameter_write(PLCI *plci)
{
word i;
byte parameter_buffer[DTMF_PARAMETER_BUFFER_SIZE + 2];
dbug(1, dprintf("[%06lx] %s,%d: dtmf_parameter_write",
(dword)((plci->Id << 8) | UnMapController(plci->adapter->Id)),
(char *)(FILE_), __LINE__));
parameter_buffer[0] = plci->dtmf_parameter_length + 1;
parameter_buffer[1] = DSP_CTRL_SET_DTMF_PARAMETERS;
for (i = 0; i < plci->dtmf_parameter_length; i++)
parameter_buffer[2 + i] = plci->dtmf_parameter_buffer[i];
add_p(plci, FTY, parameter_buffer);
sig_req(plci, TEL_CTRL, 0);
send_req(plci);
}
static void dtmf_parameter_clear_config(PLCI *plci)
{
dbug(1, dprintf("[%06lx] %s,%d: dtmf_parameter_clear_config",
(dword)((plci->Id << 8) | UnMapController(plci->adapter->Id)),
(char *)(FILE_), __LINE__));
plci->dtmf_parameter_length = 0;
}
static void dtmf_parameter_prepare_switch(dword Id, PLCI *plci)
{
dbug(1, dprintf("[%06lx] %s,%d: dtmf_parameter_prepare_switch",
UnMapId(Id), (char *)(FILE_), __LINE__));
}
static word dtmf_parameter_save_config(dword Id, PLCI *plci, byte Rc)
{
dbug(1, dprintf("[%06lx] %s,%d: dtmf_parameter_save_config %02x %d",
UnMapId(Id), (char *)(FILE_), __LINE__, Rc, plci->adjust_b_state));
return (GOOD);
}
static word dtmf_parameter_restore_config(dword Id, PLCI *plci, byte Rc)
{
word Info;
dbug(1, dprintf("[%06lx] %s,%d: dtmf_parameter_restore_config %02x %d",
UnMapId(Id), (char *)(FILE_), __LINE__, Rc, plci->adjust_b_state));
Info = GOOD;
if ((plci->B1_facilities & B1_FACILITY_DTMFR)
&& (plci->dtmf_parameter_length != 0))
{
switch (plci->adjust_b_state)
{
case ADJUST_B_RESTORE_DTMF_PARAMETER_1:
plci->internal_command = plci->adjust_b_command;
if (plci->sig_req)
{
plci->adjust_b_state = ADJUST_B_RESTORE_DTMF_PARAMETER_1;
break;
}
dtmf_parameter_write(plci);
plci->adjust_b_state = ADJUST_B_RESTORE_DTMF_PARAMETER_2;
break;
case ADJUST_B_RESTORE_DTMF_PARAMETER_2:
if ((Rc != OK) && (Rc != OK_FC))
{
dbug(1, dprintf("[%06lx] %s,%d: Restore DTMF parameters failed %02x",
UnMapId(Id), (char *)(FILE_), __LINE__, Rc));
Info = _WRONG_STATE;
break;
}
break;
}
}
return (Info);
}
/*------------------------------------------------------------------*/
/* Line interconnect facilities */
/*------------------------------------------------------------------*/
LI_CONFIG *li_config_table;
word li_total_channels;
/*------------------------------------------------------------------*/
/* translate a CHI information element to a channel number */
/* returns 0xff - any channel */
/* 0xfe - chi wrong coding */
/* 0xfd - D-channel */
/* 0x00 - no channel */
/* else channel number / PRI: timeslot */
/* if channels is provided we accept more than one channel. */
/*------------------------------------------------------------------*/
static byte chi_to_channel(byte *chi, dword *pchannelmap)
{
int p;
int i;
dword map;
byte excl;
byte ofs;
byte ch;
if (pchannelmap) *pchannelmap = 0;
if (!chi[0]) return 0xff;
excl = 0;
if (chi[1] & 0x20) {
if (chi[0] == 1 && chi[1] == 0xac) return 0xfd; /* exclusive d-channel */
for (i = 1; i < chi[0] && !(chi[i] & 0x80); i++);
if (i == chi[0] || !(chi[i] & 0x80)) return 0xfe;
if ((chi[1] | 0xc8) != 0xe9) return 0xfe;
if (chi[1] & 0x08) excl = 0x40;
/* int. id present */
if (chi[1] & 0x40) {
p = i + 1;
for (i = p; i < chi[0] && !(chi[i] & 0x80); i++);
if (i == chi[0] || !(chi[i] & 0x80)) return 0xfe;
}
/* coding standard, Number/Map, Channel Type */
p = i + 1;
for (i = p; i < chi[0] && !(chi[i] & 0x80); i++);
if (i == chi[0] || !(chi[i] & 0x80)) return 0xfe;
if ((chi[p] | 0xd0) != 0xd3) return 0xfe;
/* Number/Map */
if (chi[p] & 0x10) {
/* map */
if ((chi[0] - p) == 4) ofs = 0;
else if ((chi[0] - p) == 3) ofs = 1;
else return 0xfe;
ch = 0;
map = 0;
for (i = 0; i < 4 && p < chi[0]; i++) {
p++;
ch += 8;
map <<= 8;
if (chi[p]) {
for (ch = 0; !(chi[p] & (1 << ch)); ch++);
map |= chi[p];
}
}
ch += ofs;
map <<= ofs;
}
else {
/* number */
p = i + 1;
ch = chi[p] & 0x3f;
if (pchannelmap) {
if ((byte)(chi[0] - p) > 30) return 0xfe;
map = 0;
for (i = p; i <= chi[0]; i++) {
if ((chi[i] & 0x7f) > 31) return 0xfe;
map |= (1L << (chi[i] & 0x7f));
}
}
else {
if (p != chi[0]) return 0xfe;
if (ch > 31) return 0xfe;
map = (1L << ch);
}
if (chi[p] & 0x40) return 0xfe;
}
if (pchannelmap) *pchannelmap = map;
else if (map != ((dword)(1L << ch))) return 0xfe;
return (byte)(excl | ch);
}
else { /* not PRI */
for (i = 1; i < chi[0] && !(chi[i] & 0x80); i++);
if (i != chi[0] || !(chi[i] & 0x80)) return 0xfe;
if (chi[1] & 0x08) excl = 0x40;
switch (chi[1] | 0x98) {
case 0x98: return 0;
case 0x99:
if (pchannelmap) *pchannelmap = 2;
return excl | 1;
case 0x9a:
if (pchannelmap) *pchannelmap = 4;
return excl | 2;
case 0x9b: return 0xff;
case 0x9c: return 0xfd; /* d-ch */
default: return 0xfe;
}
}
}
static void mixer_set_bchannel_id_esc(PLCI *plci, byte bchannel_id)
{
DIVA_CAPI_ADAPTER *a;
PLCI *splci;
byte old_id;
a = plci->adapter;
old_id = plci->li_bchannel_id;
if (a->li_pri)
{
if ((old_id != 0) && (li_config_table[a->li_base + (old_id - 1)].plci == plci))
li_config_table[a->li_base + (old_id - 1)].plci = NULL;
plci->li_bchannel_id = (bchannel_id & 0x1f) + 1;
if (li_config_table[a->li_base + (plci->li_bchannel_id - 1)].plci == NULL)
li_config_table[a->li_base + (plci->li_bchannel_id - 1)].plci = plci;
}
else
{
if (((bchannel_id & 0x03) == 1) || ((bchannel_id & 0x03) == 2))
{
if ((old_id != 0) && (li_config_table[a->li_base + (old_id - 1)].plci == plci))
li_config_table[a->li_base + (old_id - 1)].plci = NULL;
plci->li_bchannel_id = bchannel_id & 0x03;
if ((a->AdvSignalPLCI != NULL) && (a->AdvSignalPLCI != plci) && (a->AdvSignalPLCI->tel == ADV_VOICE))
{
splci = a->AdvSignalPLCI;
if (li_config_table[a->li_base + (2 - plci->li_bchannel_id)].plci == NULL)
{
if ((splci->li_bchannel_id != 0)
&& (li_config_table[a->li_base + (splci->li_bchannel_id - 1)].plci == splci))
{
li_config_table[a->li_base + (splci->li_bchannel_id - 1)].plci = NULL;
}
splci->li_bchannel_id = 3 - plci->li_bchannel_id;
li_config_table[a->li_base + (2 - plci->li_bchannel_id)].plci = splci;
dbug(1, dprintf("[%06lx] %s,%d: adv_voice_set_bchannel_id_esc %d",
(dword)((splci->Id << 8) | UnMapController(splci->adapter->Id)),
(char *)(FILE_), __LINE__, splci->li_bchannel_id));
}
}
if (li_config_table[a->li_base + (plci->li_bchannel_id - 1)].plci == NULL)
li_config_table[a->li_base + (plci->li_bchannel_id - 1)].plci = plci;
}
}
if ((old_id == 0) && (plci->li_bchannel_id != 0)
&& (li_config_table[a->li_base + (plci->li_bchannel_id - 1)].plci == plci))
{
mixer_clear_config(plci);
}
dbug(1, dprintf("[%06lx] %s,%d: mixer_set_bchannel_id_esc %d %d",
(dword)((plci->Id << 8) | UnMapController(plci->adapter->Id)),
(char *)(FILE_), __LINE__, bchannel_id, plci->li_bchannel_id));
}
static void mixer_set_bchannel_id(PLCI *plci, byte *chi)
{
DIVA_CAPI_ADAPTER *a;
PLCI *splci;
byte ch, old_id;
a = plci->adapter;
old_id = plci->li_bchannel_id;
ch = chi_to_channel(chi, NULL);
if (!(ch & 0x80))
{
if (a->li_pri)
{
if ((old_id != 0) && (li_config_table[a->li_base + (old_id - 1)].plci == plci))
li_config_table[a->li_base + (old_id - 1)].plci = NULL;
plci->li_bchannel_id = (ch & 0x1f) + 1;
if (li_config_table[a->li_base + (plci->li_bchannel_id - 1)].plci == NULL)
li_config_table[a->li_base + (plci->li_bchannel_id - 1)].plci = plci;
}
else
{
if (((ch & 0x1f) == 1) || ((ch & 0x1f) == 2))
{
if ((old_id != 0) && (li_config_table[a->li_base + (old_id - 1)].plci == plci))
li_config_table[a->li_base + (old_id - 1)].plci = NULL;
plci->li_bchannel_id = ch & 0x1f;
if ((a->AdvSignalPLCI != NULL) && (a->AdvSignalPLCI != plci) && (a->AdvSignalPLCI->tel == ADV_VOICE))
{
splci = a->AdvSignalPLCI;
if (li_config_table[a->li_base + (2 - plci->li_bchannel_id)].plci == NULL)
{
if ((splci->li_bchannel_id != 0)
&& (li_config_table[a->li_base + (splci->li_bchannel_id - 1)].plci == splci))
{
li_config_table[a->li_base + (splci->li_bchannel_id - 1)].plci = NULL;
}
splci->li_bchannel_id = 3 - plci->li_bchannel_id;
li_config_table[a->li_base + (2 - plci->li_bchannel_id)].plci = splci;
dbug(1, dprintf("[%06lx] %s,%d: adv_voice_set_bchannel_id %d",
(dword)((splci->Id << 8) | UnMapController(splci->adapter->Id)),
(char *)(FILE_), __LINE__, splci->li_bchannel_id));
}
}
if (li_config_table[a->li_base + (plci->li_bchannel_id - 1)].plci == NULL)
li_config_table[a->li_base + (plci->li_bchannel_id - 1)].plci = plci;
}
}
}
if ((old_id == 0) && (plci->li_bchannel_id != 0)
&& (li_config_table[a->li_base + (plci->li_bchannel_id - 1)].plci == plci))
{
mixer_clear_config(plci);
}
dbug(1, dprintf("[%06lx] %s,%d: mixer_set_bchannel_id %02x %d",
(dword)((plci->Id << 8) | UnMapController(plci->adapter->Id)),
(char *)(FILE_), __LINE__, ch, plci->li_bchannel_id));
}
#define MIXER_MAX_DUMP_CHANNELS 34
static void mixer_calculate_coefs(DIVA_CAPI_ADAPTER *a)
{
static char hex_digit_table[0x10] = {'0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f'};
word n, i, j;
char *p;
char hex_line[2 * MIXER_MAX_DUMP_CHANNELS + MIXER_MAX_DUMP_CHANNELS / 8 + 4];
dbug(1, dprintf("[%06lx] %s,%d: mixer_calculate_coefs",
(dword)(UnMapController(a->Id)), (char *)(FILE_), __LINE__));
for (i = 0; i < li_total_channels; i++)
{
li_config_table[i].channel &= LI_CHANNEL_ADDRESSES_SET;
if (li_config_table[i].chflags != 0)
li_config_table[i].channel |= LI_CHANNEL_INVOLVED;
else
{
for (j = 0; j < li_total_channels; j++)
{
if (((li_config_table[i].flag_table[j]) != 0)
|| ((li_config_table[j].flag_table[i]) != 0))
{
li_config_table[i].channel |= LI_CHANNEL_INVOLVED;
}
if (((li_config_table[i].flag_table[j] & LI_FLAG_CONFERENCE) != 0)
|| ((li_config_table[j].flag_table[i] & LI_FLAG_CONFERENCE) != 0))
{
li_config_table[i].channel |= LI_CHANNEL_CONFERENCE;
}
}
}
}
for (i = 0; i < li_total_channels; i++)
{
for (j = 0; j < li_total_channels; j++)
{
li_config_table[i].coef_table[j] &= ~(LI_COEF_CH_CH | LI_COEF_CH_PC | LI_COEF_PC_CH | LI_COEF_PC_PC);
if (li_config_table[i].flag_table[j] & LI_FLAG_CONFERENCE)
li_config_table[i].coef_table[j] |= LI_COEF_CH_CH;
}
}
for (n = 0; n < li_total_channels; n++)
{
if (li_config_table[n].channel & LI_CHANNEL_CONFERENCE)
{
for (i = 0; i < li_total_channels; i++)
{
if (li_config_table[i].channel & LI_CHANNEL_CONFERENCE)
{
for (j = 0; j < li_total_channels; j++)
{
li_config_table[i].coef_table[j] |=
li_config_table[i].coef_table[n] & li_config_table[n].coef_table[j];
}
}
}
}
}
for (i = 0; i < li_total_channels; i++)
{
if (li_config_table[i].channel & LI_CHANNEL_INVOLVED)
{
li_config_table[i].coef_table[i] &= ~LI_COEF_CH_CH;
for (j = 0; j < li_total_channels; j++)
{
if (li_config_table[i].coef_table[j] & LI_COEF_CH_CH)
li_config_table[i].flag_table[j] |= LI_FLAG_CONFERENCE;
}
if (li_config_table[i].flag_table[i] & LI_FLAG_CONFERENCE)
li_config_table[i].coef_table[i] |= LI_COEF_CH_CH;
}
}
for (i = 0; i < li_total_channels; i++)
{
if (li_config_table[i].channel & LI_CHANNEL_INVOLVED)
{
for (j = 0; j < li_total_channels; j++)
{
if (li_config_table[i].flag_table[j] & LI_FLAG_INTERCONNECT)
li_config_table[i].coef_table[j] |= LI_COEF_CH_CH;
if (li_config_table[i].flag_table[j] & LI_FLAG_MONITOR)
li_config_table[i].coef_table[j] |= LI_COEF_CH_PC;
if (li_config_table[i].flag_table[j] & LI_FLAG_MIX)
li_config_table[i].coef_table[j] |= LI_COEF_PC_CH;
if (li_config_table[i].flag_table[j] & LI_FLAG_PCCONNECT)
li_config_table[i].coef_table[j] |= LI_COEF_PC_PC;
}
if (li_config_table[i].chflags & LI_CHFLAG_MONITOR)
{
for (j = 0; j < li_total_channels; j++)
{
if (li_config_table[i].flag_table[j] & LI_FLAG_INTERCONNECT)
{
li_config_table[i].coef_table[j] |= LI_COEF_CH_PC;
if (li_config_table[j].chflags & LI_CHFLAG_MIX)
li_config_table[i].coef_table[j] |= LI_COEF_PC_CH | LI_COEF_PC_PC;
}
}
}
if (li_config_table[i].chflags & LI_CHFLAG_MIX)
{
for (j = 0; j < li_total_channels; j++)
{
if (li_config_table[j].flag_table[i] & LI_FLAG_INTERCONNECT)
li_config_table[j].coef_table[i] |= LI_COEF_PC_CH;
}
}
if (li_config_table[i].chflags & LI_CHFLAG_LOOP)
{
for (j = 0; j < li_total_channels; j++)
{
if (li_config_table[i].flag_table[j] & LI_FLAG_INTERCONNECT)
{
for (n = 0; n < li_total_channels; n++)
{
if (li_config_table[n].flag_table[i] & LI_FLAG_INTERCONNECT)
{
li_config_table[n].coef_table[j] |= LI_COEF_CH_CH;
if (li_config_table[j].chflags & LI_CHFLAG_MIX)
{
li_config_table[n].coef_table[j] |= LI_COEF_PC_CH;
if (li_config_table[n].chflags & LI_CHFLAG_MONITOR)
li_config_table[n].coef_table[j] |= LI_COEF_CH_PC | LI_COEF_PC_PC;
}
else if (li_config_table[n].chflags & LI_CHFLAG_MONITOR)
li_config_table[n].coef_table[j] |= LI_COEF_CH_PC;
}
}
}
}
}
}
}
for (i = 0; i < li_total_channels; i++)
{
if (li_config_table[i].channel & LI_CHANNEL_INVOLVED)
{
if (li_config_table[i].chflags & (LI_CHFLAG_MONITOR | LI_CHFLAG_MIX | LI_CHFLAG_LOOP))
li_config_table[i].channel |= LI_CHANNEL_ACTIVE;
if (li_config_table[i].chflags & LI_CHFLAG_MONITOR)
li_config_table[i].channel |= LI_CHANNEL_RX_DATA;
if (li_config_table[i].chflags & LI_CHFLAG_MIX)
li_config_table[i].channel |= LI_CHANNEL_TX_DATA;
for (j = 0; j < li_total_channels; j++)
{
if ((li_config_table[i].flag_table[j] &
(LI_FLAG_INTERCONNECT | LI_FLAG_PCCONNECT | LI_FLAG_CONFERENCE | LI_FLAG_MONITOR))
|| (li_config_table[j].flag_table[i] &
(LI_FLAG_INTERCONNECT | LI_FLAG_PCCONNECT | LI_FLAG_CONFERENCE | LI_FLAG_ANNOUNCEMENT | LI_FLAG_MIX)))
{
li_config_table[i].channel |= LI_CHANNEL_ACTIVE;
}
if (li_config_table[i].flag_table[j] & (LI_FLAG_PCCONNECT | LI_FLAG_MONITOR))
li_config_table[i].channel |= LI_CHANNEL_RX_DATA;
if (li_config_table[j].flag_table[i] & (LI_FLAG_PCCONNECT | LI_FLAG_ANNOUNCEMENT | LI_FLAG_MIX))
li_config_table[i].channel |= LI_CHANNEL_TX_DATA;
}
if (!(li_config_table[i].channel & LI_CHANNEL_ACTIVE))
{
li_config_table[i].coef_table[i] |= LI_COEF_PC_CH | LI_COEF_CH_PC;
li_config_table[i].channel |= LI_CHANNEL_TX_DATA | LI_CHANNEL_RX_DATA;
}
}
}
for (i = 0; i < li_total_channels; i++)
{
if (li_config_table[i].channel & LI_CHANNEL_INVOLVED)
{
j = 0;
while ((j < li_total_channels) && !(li_config_table[i].flag_table[j] & LI_FLAG_ANNOUNCEMENT))
j++;
if (j < li_total_channels)
{
for (j = 0; j < li_total_channels; j++)
{
li_config_table[i].coef_table[j] &= ~(LI_COEF_CH_CH | LI_COEF_PC_CH);
if (li_config_table[i].flag_table[j] & LI_FLAG_ANNOUNCEMENT)
li_config_table[i].coef_table[j] |= LI_COEF_PC_CH;
}
}
}
}
n = li_total_channels;
if (n > MIXER_MAX_DUMP_CHANNELS)
n = MIXER_MAX_DUMP_CHANNELS;
p = hex_line;
for (j = 0; j < n; j++)
{
if ((j & 0x7) == 0)
*(p++) = ' ';
*(p++) = hex_digit_table[li_config_table[j].curchnl >> 4];
*(p++) = hex_digit_table[li_config_table[j].curchnl & 0xf];
}
*p = '\0';
dbug(1, dprintf("[%06lx] CURRENT %s",
(dword)(UnMapController(a->Id)), (char *)hex_line));
p = hex_line;
for (j = 0; j < n; j++)
{
if ((j & 0x7) == 0)
*(p++) = ' ';
*(p++) = hex_digit_table[li_config_table[j].channel >> 4];
*(p++) = hex_digit_table[li_config_table[j].channel & 0xf];
}
*p = '\0';
dbug(1, dprintf("[%06lx] CHANNEL %s",
(dword)(UnMapController(a->Id)), (char *)hex_line));
p = hex_line;
for (j = 0; j < n; j++)
{
if ((j & 0x7) == 0)
*(p++) = ' ';
*(p++) = hex_digit_table[li_config_table[j].chflags >> 4];
*(p++) = hex_digit_table[li_config_table[j].chflags & 0xf];
}
*p = '\0';
dbug(1, dprintf("[%06lx] CHFLAG %s",
(dword)(UnMapController(a->Id)), (char *)hex_line));
for (i = 0; i < n; i++)
{
p = hex_line;
for (j = 0; j < n; j++)
{
if ((j & 0x7) == 0)
*(p++) = ' ';
*(p++) = hex_digit_table[li_config_table[i].flag_table[j] >> 4];
*(p++) = hex_digit_table[li_config_table[i].flag_table[j] & 0xf];
}
*p = '\0';
dbug(1, dprintf("[%06lx] FLAG[%02x]%s",
(dword)(UnMapController(a->Id)), i, (char *)hex_line));
}
for (i = 0; i < n; i++)
{
p = hex_line;
for (j = 0; j < n; j++)
{
if ((j & 0x7) == 0)
*(p++) = ' ';
*(p++) = hex_digit_table[li_config_table[i].coef_table[j] >> 4];
*(p++) = hex_digit_table[li_config_table[i].coef_table[j] & 0xf];
}
*p = '\0';
dbug(1, dprintf("[%06lx] COEF[%02x]%s",
(dword)(UnMapController(a->Id)), i, (char *)hex_line));
}
}
static struct
{
byte mask;
byte line_flags;
} mixer_write_prog_pri[] =
{
{ LI_COEF_CH_CH, 0 },
{ LI_COEF_CH_PC, MIXER_COEF_LINE_TO_PC_FLAG },
{ LI_COEF_PC_CH, MIXER_COEF_LINE_FROM_PC_FLAG },
{ LI_COEF_PC_PC, MIXER_COEF_LINE_TO_PC_FLAG | MIXER_COEF_LINE_FROM_PC_FLAG }
};
static struct
{
byte from_ch;
byte to_ch;
byte mask;
byte xconnect_override;
} mixer_write_prog_bri[] =
{
{ 0, 0, LI_COEF_CH_CH, 0x01 }, /* B to B */
{ 1, 0, LI_COEF_CH_CH, 0x01 }, /* Alt B to B */
{ 0, 0, LI_COEF_PC_CH, 0x80 }, /* PC to B */
{ 1, 0, LI_COEF_PC_CH, 0x01 }, /* Alt PC to B */
{ 2, 0, LI_COEF_CH_CH, 0x00 }, /* IC to B */
{ 3, 0, LI_COEF_CH_CH, 0x00 }, /* Alt IC to B */
{ 0, 0, LI_COEF_CH_PC, 0x80 }, /* B to PC */
{ 1, 0, LI_COEF_CH_PC, 0x01 }, /* Alt B to PC */
{ 0, 0, LI_COEF_PC_PC, 0x01 }, /* PC to PC */
{ 1, 0, LI_COEF_PC_PC, 0x01 }, /* Alt PC to PC */
{ 2, 0, LI_COEF_CH_PC, 0x00 }, /* IC to PC */
{ 3, 0, LI_COEF_CH_PC, 0x00 }, /* Alt IC to PC */
{ 0, 2, LI_COEF_CH_CH, 0x00 }, /* B to IC */
{ 1, 2, LI_COEF_CH_CH, 0x00 }, /* Alt B to IC */
{ 0, 2, LI_COEF_PC_CH, 0x00 }, /* PC to IC */
{ 1, 2, LI_COEF_PC_CH, 0x00 }, /* Alt PC to IC */
{ 2, 2, LI_COEF_CH_CH, 0x00 }, /* IC to IC */
{ 3, 2, LI_COEF_CH_CH, 0x00 }, /* Alt IC to IC */
{ 1, 1, LI_COEF_CH_CH, 0x01 }, /* Alt B to Alt B */
{ 0, 1, LI_COEF_CH_CH, 0x01 }, /* B to Alt B */
{ 1, 1, LI_COEF_PC_CH, 0x80 }, /* Alt PC to Alt B */
{ 0, 1, LI_COEF_PC_CH, 0x01 }, /* PC to Alt B */
{ 3, 1, LI_COEF_CH_CH, 0x00 }, /* Alt IC to Alt B */
{ 2, 1, LI_COEF_CH_CH, 0x00 }, /* IC to Alt B */
{ 1, 1, LI_COEF_CH_PC, 0x80 }, /* Alt B to Alt PC */
{ 0, 1, LI_COEF_CH_PC, 0x01 }, /* B to Alt PC */
{ 1, 1, LI_COEF_PC_PC, 0x01 }, /* Alt PC to Alt PC */
{ 0, 1, LI_COEF_PC_PC, 0x01 }, /* PC to Alt PC */
{ 3, 1, LI_COEF_CH_PC, 0x00 }, /* Alt IC to Alt PC */
{ 2, 1, LI_COEF_CH_PC, 0x00 }, /* IC to Alt PC */
{ 1, 3, LI_COEF_CH_CH, 0x00 }, /* Alt B to Alt IC */
{ 0, 3, LI_COEF_CH_CH, 0x00 }, /* B to Alt IC */
{ 1, 3, LI_COEF_PC_CH, 0x00 }, /* Alt PC to Alt IC */
{ 0, 3, LI_COEF_PC_CH, 0x00 }, /* PC to Alt IC */
{ 3, 3, LI_COEF_CH_CH, 0x00 }, /* Alt IC to Alt IC */
{ 2, 3, LI_COEF_CH_CH, 0x00 } /* IC to Alt IC */
};
static byte mixer_swapped_index_bri[] =
{
18, /* B to B */
19, /* Alt B to B */
20, /* PC to B */
21, /* Alt PC to B */
22, /* IC to B */
23, /* Alt IC to B */
24, /* B to PC */
25, /* Alt B to PC */
26, /* PC to PC */
27, /* Alt PC to PC */
28, /* IC to PC */
29, /* Alt IC to PC */
30, /* B to IC */
31, /* Alt B to IC */
32, /* PC to IC */
33, /* Alt PC to IC */
34, /* IC to IC */
35, /* Alt IC to IC */
0, /* Alt B to Alt B */
1, /* B to Alt B */
2, /* Alt PC to Alt B */
3, /* PC to Alt B */
4, /* Alt IC to Alt B */
5, /* IC to Alt B */
6, /* Alt B to Alt PC */
7, /* B to Alt PC */
8, /* Alt PC to Alt PC */
9, /* PC to Alt PC */
10, /* Alt IC to Alt PC */
11, /* IC to Alt PC */
12, /* Alt B to Alt IC */
13, /* B to Alt IC */
14, /* Alt PC to Alt IC */
15, /* PC to Alt IC */
16, /* Alt IC to Alt IC */
17 /* IC to Alt IC */
};
static struct
{
byte mask;
byte from_pc;
byte to_pc;
} xconnect_write_prog[] =
{
{ LI_COEF_CH_CH, false, false },
{ LI_COEF_CH_PC, false, true },
{ LI_COEF_PC_CH, true, false },
{ LI_COEF_PC_PC, true, true }
};
static void xconnect_query_addresses(PLCI *plci)
{
DIVA_CAPI_ADAPTER *a;
word w, ch;
byte *p;
dbug(1, dprintf("[%06lx] %s,%d: xconnect_query_addresses",
(dword)((plci->Id << 8) | UnMapController(plci->adapter->Id)),
(char *)(FILE_), __LINE__));
a = plci->adapter;
if (a->li_pri && ((plci->li_bchannel_id == 0)
|| (li_config_table[a->li_base + (plci->li_bchannel_id - 1)].plci != plci)))
{
dbug(1, dprintf("[%06x] %s,%d: Channel id wiped out",
(dword)((plci->Id << 8) | UnMapController(plci->adapter->Id)),
(char *)(FILE_), __LINE__));
return;
}
p = plci->internal_req_buffer;
ch = (a->li_pri) ? plci->li_bchannel_id - 1 : 0;
*(p++) = UDATA_REQUEST_XCONNECT_FROM;
w = ch;
*(p++) = (byte) w;
*(p++) = (byte)(w >> 8);
w = ch | XCONNECT_CHANNEL_PORT_PC;
*(p++) = (byte) w;
*(p++) = (byte)(w >> 8);
plci->NData[0].P = plci->internal_req_buffer;
plci->NData[0].PLength = p - plci->internal_req_buffer;
plci->NL.X = plci->NData;
plci->NL.ReqCh = 0;
plci->NL.Req = plci->nl_req = (byte) N_UDATA;
plci->adapter->request(&plci->NL);
}
static void xconnect_write_coefs(PLCI *plci, word internal_command)
{
dbug(1, dprintf("[%06lx] %s,%d: xconnect_write_coefs %04x",
(dword)((plci->Id << 8) | UnMapController(plci->adapter->Id)),
(char *)(FILE_), __LINE__, internal_command));
plci->li_write_command = internal_command;
plci->li_write_channel = 0;
}
static byte xconnect_write_coefs_process(dword Id, PLCI *plci, byte Rc)
{
DIVA_CAPI_ADAPTER *a;
word w, n, i, j, r, s, to_ch;
dword d;
byte *p;
struct xconnect_transfer_address_s *transfer_address;
byte ch_map[MIXER_CHANNELS_BRI];
dbug(1, dprintf("[%06x] %s,%d: xconnect_write_coefs_process %02x %d",
UnMapId(Id), (char *)(FILE_), __LINE__, Rc, plci->li_write_channel));
a = plci->adapter;
if ((plci->li_bchannel_id == 0)
|| (li_config_table[a->li_base + (plci->li_bchannel_id - 1)].plci != plci))
{
dbug(1, dprintf("[%06x] %s,%d: Channel id wiped out",
UnMapId(Id), (char *)(FILE_), __LINE__));
return (true);
}
i = a->li_base + (plci->li_bchannel_id - 1);
j = plci->li_write_channel;
p = plci->internal_req_buffer;
if (j != 0)
{
if ((Rc != OK) && (Rc != OK_FC))
{
dbug(1, dprintf("[%06lx] %s,%d: LI write coefs failed %02x",
UnMapId(Id), (char *)(FILE_), __LINE__, Rc));
return (false);
}
}
if (li_config_table[i].adapter->manufacturer_features & MANUFACTURER_FEATURE_XCONNECT)
{
r = 0;
s = 0;
if (j < li_total_channels)
{
if (li_config_table[i].channel & LI_CHANNEL_ADDRESSES_SET)
{
s = ((li_config_table[i].send_b.card_address.low | li_config_table[i].send_b.card_address.high) ?
(LI_COEF_CH_CH | LI_COEF_CH_PC | LI_COEF_PC_CH | LI_COEF_PC_PC) : (LI_COEF_CH_PC | LI_COEF_PC_PC)) &
((li_config_table[i].send_pc.card_address.low | li_config_table[i].send_pc.card_address.high) ?
(LI_COEF_CH_CH | LI_COEF_CH_PC | LI_COEF_PC_CH | LI_COEF_PC_PC) : (LI_COEF_CH_CH | LI_COEF_PC_CH));
}
r = ((li_config_table[i].coef_table[j] & 0xf) ^ (li_config_table[i].coef_table[j] >> 4));
while ((j < li_total_channels)
&& ((r == 0)
|| (!(li_config_table[j].channel & LI_CHANNEL_ADDRESSES_SET))
|| (!li_config_table[j].adapter->li_pri
&& (j >= li_config_table[j].adapter->li_base + MIXER_BCHANNELS_BRI))
|| (((li_config_table[j].send_b.card_address.low != li_config_table[i].send_b.card_address.low)
|| (li_config_table[j].send_b.card_address.high != li_config_table[i].send_b.card_address.high))
&& (!(a->manufacturer_features & MANUFACTURER_FEATURE_DMACONNECT)
|| !(li_config_table[j].adapter->manufacturer_features & MANUFACTURER_FEATURE_DMACONNECT)))
|| ((li_config_table[j].adapter->li_base != a->li_base)
&& !(r & s &
((li_config_table[j].send_b.card_address.low | li_config_table[j].send_b.card_address.high) ?
(LI_COEF_CH_CH | LI_COEF_CH_PC | LI_COEF_PC_CH | LI_COEF_PC_PC) : (LI_COEF_PC_CH | LI_COEF_PC_PC)) &
((li_config_table[j].send_pc.card_address.low | li_config_table[j].send_pc.card_address.high) ?
(LI_COEF_CH_CH | LI_COEF_CH_PC | LI_COEF_PC_CH | LI_COEF_PC_PC) : (LI_COEF_CH_CH | LI_COEF_CH_PC))))))
{
j++;
if (j < li_total_channels)
r = ((li_config_table[i].coef_table[j] & 0xf) ^ (li_config_table[i].coef_table[j] >> 4));
}
}
if (j < li_total_channels)
{
plci->internal_command = plci->li_write_command;
if (plci_nl_busy(plci))
return (true);
to_ch = (a->li_pri) ? plci->li_bchannel_id - 1 : 0;
*(p++) = UDATA_REQUEST_XCONNECT_TO;
do
{
if (li_config_table[j].adapter->li_base != a->li_base)
{
r &= s &
((li_config_table[j].send_b.card_address.low | li_config_table[j].send_b.card_address.high) ?
(LI_COEF_CH_CH | LI_COEF_CH_PC | LI_COEF_PC_CH | LI_COEF_PC_PC) : (LI_COEF_PC_CH | LI_COEF_PC_PC)) &
((li_config_table[j].send_pc.card_address.low | li_config_table[j].send_pc.card_address.high) ?
(LI_COEF_CH_CH | LI_COEF_CH_PC | LI_COEF_PC_CH | LI_COEF_PC_PC) : (LI_COEF_CH_CH | LI_COEF_CH_PC));
}
n = 0;
do
{
if (r & xconnect_write_prog[n].mask)
{
if (xconnect_write_prog[n].from_pc)
transfer_address = &(li_config_table[j].send_pc);
else
transfer_address = &(li_config_table[j].send_b);
d = transfer_address->card_address.low;
*(p++) = (byte) d;
*(p++) = (byte)(d >> 8);
*(p++) = (byte)(d >> 16);
*(p++) = (byte)(d >> 24);
d = transfer_address->card_address.high;
*(p++) = (byte) d;
*(p++) = (byte)(d >> 8);
*(p++) = (byte)(d >> 16);
*(p++) = (byte)(d >> 24);
d = transfer_address->offset;
*(p++) = (byte) d;
*(p++) = (byte)(d >> 8);
*(p++) = (byte)(d >> 16);
*(p++) = (byte)(d >> 24);
w = xconnect_write_prog[n].to_pc ? to_ch | XCONNECT_CHANNEL_PORT_PC : to_ch;
*(p++) = (byte) w;
*(p++) = (byte)(w >> 8);
w = ((li_config_table[i].coef_table[j] & xconnect_write_prog[n].mask) == 0) ? 0x01 :
(li_config_table[i].adapter->u_law ?
(li_config_table[j].adapter->u_law ? 0x80 : 0x86) :
(li_config_table[j].adapter->u_law ? 0x7a : 0x80));
*(p++) = (byte) w;
*(p++) = (byte) 0;
li_config_table[i].coef_table[j] ^= xconnect_write_prog[n].mask << 4;
}
n++;
} while ((n < ARRAY_SIZE(xconnect_write_prog))
&& ((p - plci->internal_req_buffer) + 16 < INTERNAL_REQ_BUFFER_SIZE));
if (n == ARRAY_SIZE(xconnect_write_prog))
{
do
{
j++;
if (j < li_total_channels)
r = ((li_config_table[i].coef_table[j] & 0xf) ^ (li_config_table[i].coef_table[j] >> 4));
} while ((j < li_total_channels)
&& ((r == 0)
|| (!(li_config_table[j].channel & LI_CHANNEL_ADDRESSES_SET))
|| (!li_config_table[j].adapter->li_pri
&& (j >= li_config_table[j].adapter->li_base + MIXER_BCHANNELS_BRI))
|| (((li_config_table[j].send_b.card_address.low != li_config_table[i].send_b.card_address.low)
|| (li_config_table[j].send_b.card_address.high != li_config_table[i].send_b.card_address.high))
&& (!(a->manufacturer_features & MANUFACTURER_FEATURE_DMACONNECT)
|| !(li_config_table[j].adapter->manufacturer_features & MANUFACTURER_FEATURE_DMACONNECT)))
|| ((li_config_table[j].adapter->li_base != a->li_base)
&& !(r & s &
((li_config_table[j].send_b.card_address.low | li_config_table[j].send_b.card_address.high) ?
(LI_COEF_CH_CH | LI_COEF_CH_PC | LI_COEF_PC_CH | LI_COEF_PC_PC) : (LI_COEF_PC_CH | LI_COEF_PC_PC)) &
((li_config_table[j].send_pc.card_address.low | li_config_table[j].send_pc.card_address.high) ?
(LI_COEF_CH_CH | LI_COEF_CH_PC | LI_COEF_PC_CH | LI_COEF_PC_PC) : (LI_COEF_CH_CH | LI_COEF_CH_PC))))));
}
} while ((j < li_total_channels)
&& ((p - plci->internal_req_buffer) + 16 < INTERNAL_REQ_BUFFER_SIZE));
}
else if (j == li_total_channels)
{
plci->internal_command = plci->li_write_command;
if (plci_nl_busy(plci))
return (true);
if (a->li_pri)
{
*(p++) = UDATA_REQUEST_SET_MIXER_COEFS_PRI_SYNC;
w = 0;
if (li_config_table[i].channel & LI_CHANNEL_TX_DATA)
w |= MIXER_FEATURE_ENABLE_TX_DATA;
if (li_config_table[i].channel & LI_CHANNEL_RX_DATA)
w |= MIXER_FEATURE_ENABLE_RX_DATA;
*(p++) = (byte) w;
*(p++) = (byte)(w >> 8);
}
else
{
*(p++) = UDATA_REQUEST_SET_MIXER_COEFS_BRI;
w = 0;
if ((plci->tel == ADV_VOICE) && (plci == a->AdvSignalPLCI)
&& (ADV_VOICE_NEW_COEF_BASE + sizeof(word) <= a->adv_voice_coef_length))
{
w = GET_WORD(a->adv_voice_coef_buffer + ADV_VOICE_NEW_COEF_BASE);
}
if (li_config_table[i].channel & LI_CHANNEL_TX_DATA)
w |= MIXER_FEATURE_ENABLE_TX_DATA;
if (li_config_table[i].channel & LI_CHANNEL_RX_DATA)
w |= MIXER_FEATURE_ENABLE_RX_DATA;
*(p++) = (byte) w;
*(p++) = (byte)(w >> 8);
for (j = 0; j < sizeof(ch_map); j += 2)
{
if (plci->li_bchannel_id == 2)
{
ch_map[j] = (byte)(j + 1);
ch_map[j + 1] = (byte) j;
}
else
{
ch_map[j] = (byte) j;
ch_map[j + 1] = (byte)(j + 1);
}
}
for (n = 0; n < ARRAY_SIZE(mixer_write_prog_bri); n++)
{
i = a->li_base + ch_map[mixer_write_prog_bri[n].to_ch];
j = a->li_base + ch_map[mixer_write_prog_bri[n].from_ch];
if (li_config_table[i].channel & li_config_table[j].channel & LI_CHANNEL_INVOLVED)
{
*p = (mixer_write_prog_bri[n].xconnect_override != 0) ?
mixer_write_prog_bri[n].xconnect_override :
((li_config_table[i].coef_table[j] & mixer_write_prog_bri[n].mask) ? 0x80 : 0x01);
if ((i >= a->li_base + MIXER_BCHANNELS_BRI) || (j >= a->li_base + MIXER_BCHANNELS_BRI))
{
w = ((li_config_table[i].coef_table[j] & 0xf) ^ (li_config_table[i].coef_table[j] >> 4));
li_config_table[i].coef_table[j] ^= (w & mixer_write_prog_bri[n].mask) << 4;
}
}
else
{
*p = 0x00;
if ((a->AdvSignalPLCI != NULL) && (a->AdvSignalPLCI->tel == ADV_VOICE))
{
w = (plci == a->AdvSignalPLCI) ? n : mixer_swapped_index_bri[n];
if (ADV_VOICE_NEW_COEF_BASE + sizeof(word) + w < a->adv_voice_coef_length)
*p = a->adv_voice_coef_buffer[ADV_VOICE_NEW_COEF_BASE + sizeof(word) + w];
}
}
p++;
}
}
j = li_total_channels + 1;
}
}
else
{
if (j <= li_total_channels)
{
plci->internal_command = plci->li_write_command;
if (plci_nl_busy(plci))
return (true);
if (j < a->li_base)
j = a->li_base;
if (a->li_pri)
{
*(p++) = UDATA_REQUEST_SET_MIXER_COEFS_PRI_SYNC;
w = 0;
if (li_config_table[i].channel & LI_CHANNEL_TX_DATA)
w |= MIXER_FEATURE_ENABLE_TX_DATA;
if (li_config_table[i].channel & LI_CHANNEL_RX_DATA)
w |= MIXER_FEATURE_ENABLE_RX_DATA;
*(p++) = (byte) w;
*(p++) = (byte)(w >> 8);
for (n = 0; n < ARRAY_SIZE(mixer_write_prog_pri); n++)
{
*(p++) = (byte)((plci->li_bchannel_id - 1) | mixer_write_prog_pri[n].line_flags);
for (j = a->li_base; j < a->li_base + MIXER_CHANNELS_PRI; j++)
{
w = ((li_config_table[i].coef_table[j] & 0xf) ^ (li_config_table[i].coef_table[j] >> 4));
if (w & mixer_write_prog_pri[n].mask)
{
*(p++) = (li_config_table[i].coef_table[j] & mixer_write_prog_pri[n].mask) ? 0x80 : 0x01;
li_config_table[i].coef_table[j] ^= mixer_write_prog_pri[n].mask << 4;
}
else
*(p++) = 0x00;
}
*(p++) = (byte)((plci->li_bchannel_id - 1) | MIXER_COEF_LINE_ROW_FLAG | mixer_write_prog_pri[n].line_flags);
for (j = a->li_base; j < a->li_base + MIXER_CHANNELS_PRI; j++)
{
w = ((li_config_table[j].coef_table[i] & 0xf) ^ (li_config_table[j].coef_table[i] >> 4));
if (w & mixer_write_prog_pri[n].mask)
{
*(p++) = (li_config_table[j].coef_table[i] & mixer_write_prog_pri[n].mask) ? 0x80 : 0x01;
li_config_table[j].coef_table[i] ^= mixer_write_prog_pri[n].mask << 4;
}
else
*(p++) = 0x00;
}
}
}
else
{
*(p++) = UDATA_REQUEST_SET_MIXER_COEFS_BRI;
w = 0;
if ((plci->tel == ADV_VOICE) && (plci == a->AdvSignalPLCI)
&& (ADV_VOICE_NEW_COEF_BASE + sizeof(word) <= a->adv_voice_coef_length))
{
w = GET_WORD(a->adv_voice_coef_buffer + ADV_VOICE_NEW_COEF_BASE);
}
if (li_config_table[i].channel & LI_CHANNEL_TX_DATA)
w |= MIXER_FEATURE_ENABLE_TX_DATA;
if (li_config_table[i].channel & LI_CHANNEL_RX_DATA)
w |= MIXER_FEATURE_ENABLE_RX_DATA;
*(p++) = (byte) w;
*(p++) = (byte)(w >> 8);
for (j = 0; j < sizeof(ch_map); j += 2)
{
if (plci->li_bchannel_id == 2)
{
ch_map[j] = (byte)(j + 1);
ch_map[j + 1] = (byte) j;
}
else
{
ch_map[j] = (byte) j;
ch_map[j + 1] = (byte)(j + 1);
}
}
for (n = 0; n < ARRAY_SIZE(mixer_write_prog_bri); n++)
{
i = a->li_base + ch_map[mixer_write_prog_bri[n].to_ch];
j = a->li_base + ch_map[mixer_write_prog_bri[n].from_ch];
if (li_config_table[i].channel & li_config_table[j].channel & LI_CHANNEL_INVOLVED)
{
*p = ((li_config_table[i].coef_table[j] & mixer_write_prog_bri[n].mask) ? 0x80 : 0x01);
w = ((li_config_table[i].coef_table[j] & 0xf) ^ (li_config_table[i].coef_table[j] >> 4));
li_config_table[i].coef_table[j] ^= (w & mixer_write_prog_bri[n].mask) << 4;
}
else
{
*p = 0x00;
if ((a->AdvSignalPLCI != NULL) && (a->AdvSignalPLCI->tel == ADV_VOICE))
{
w = (plci == a->AdvSignalPLCI) ? n : mixer_swapped_index_bri[n];
if (ADV_VOICE_NEW_COEF_BASE + sizeof(word) + w < a->adv_voice_coef_length)
*p = a->adv_voice_coef_buffer[ADV_VOICE_NEW_COEF_BASE + sizeof(word) + w];
}
}
p++;
}
}
j = li_total_channels + 1;
}
}
plci->li_write_channel = j;
if (p != plci->internal_req_buffer)
{
plci->NData[0].P = plci->internal_req_buffer;
plci->NData[0].PLength = p - plci->internal_req_buffer;
plci->NL.X = plci->NData;
plci->NL.ReqCh = 0;
plci->NL.Req = plci->nl_req = (byte) N_UDATA;
plci->adapter->request(&plci->NL);
}
return (true);
}
static void mixer_notify_update(PLCI *plci, byte others)
{
DIVA_CAPI_ADAPTER *a;
word i, w;
PLCI *notify_plci;
byte msg[sizeof(CAPI_MSG_HEADER) + 6];
dbug(1, dprintf("[%06lx] %s,%d: mixer_notify_update %d",
(dword)((plci->Id << 8) | UnMapController(plci->adapter->Id)),
(char *)(FILE_), __LINE__, others));
a = plci->adapter;
if (a->profile.Global_Options & GL_LINE_INTERCONNECT_SUPPORTED)
{
if (others)
plci->li_notify_update = true;
i = 0;
do
{
notify_plci = NULL;
if (others)
{
while ((i < li_total_channels) && (li_config_table[i].plci == NULL))
i++;
if (i < li_total_channels)
notify_plci = li_config_table[i++].plci;
}
else
{
if ((plci->li_bchannel_id != 0)
&& (li_config_table[a->li_base + (plci->li_bchannel_id - 1)].plci == plci))
{
notify_plci = plci;
}
}
if ((notify_plci != NULL)
&& !notify_plci->li_notify_update
&& (notify_plci->appl != NULL)
&& (notify_plci->State)
&& notify_plci->NL.Id && !notify_plci->nl_remove_id)
{
notify_plci->li_notify_update = true;
((CAPI_MSG *) msg)->header.length = 18;
((CAPI_MSG *) msg)->header.appl_id = notify_plci->appl->Id;
((CAPI_MSG *) msg)->header.command = _FACILITY_R;
((CAPI_MSG *) msg)->header.number = 0;
((CAPI_MSG *) msg)->header.controller = notify_plci->adapter->Id;
((CAPI_MSG *) msg)->header.plci = notify_plci->Id;
((CAPI_MSG *) msg)->header.ncci = 0;
((CAPI_MSG *) msg)->info.facility_req.Selector = SELECTOR_LINE_INTERCONNECT;
((CAPI_MSG *) msg)->info.facility_req.structs[0] = 3;
PUT_WORD(&(((CAPI_MSG *) msg)->info.facility_req.structs[1]), LI_REQ_SILENT_UPDATE);
((CAPI_MSG *) msg)->info.facility_req.structs[3] = 0;
w = api_put(notify_plci->appl, (CAPI_MSG *) msg);
if (w != _QUEUE_FULL)
{
if (w != 0)
{
dbug(1, dprintf("[%06lx] %s,%d: Interconnect notify failed %06x %d",
(dword)((plci->Id << 8) | UnMapController(plci->adapter->Id)),
(char *)(FILE_), __LINE__,
(dword)((notify_plci->Id << 8) | UnMapController(notify_plci->adapter->Id)), w));
}
notify_plci->li_notify_update = false;
}
}
} while (others && (notify_plci != NULL));
if (others)
plci->li_notify_update = false;
}
}
static void mixer_clear_config(PLCI *plci)
{
DIVA_CAPI_ADAPTER *a;
word i, j;
dbug(1, dprintf("[%06lx] %s,%d: mixer_clear_config",
(dword)((plci->Id << 8) | UnMapController(plci->adapter->Id)),
(char *)(FILE_), __LINE__));
plci->li_notify_update = false;
plci->li_plci_b_write_pos = 0;
plci->li_plci_b_read_pos = 0;
plci->li_plci_b_req_pos = 0;
a = plci->adapter;
if ((plci->li_bchannel_id != 0)
&& (li_config_table[a->li_base + (plci->li_bchannel_id - 1)].plci == plci))
{
i = a->li_base + (plci->li_bchannel_id - 1);
li_config_table[i].curchnl = 0;
li_config_table[i].channel = 0;
li_config_table[i].chflags = 0;
for (j = 0; j < li_total_channels; j++)
{
li_config_table[j].flag_table[i] = 0;
li_config_table[i].flag_table[j] = 0;
li_config_table[i].coef_table[j] = 0;
li_config_table[j].coef_table[i] = 0;
}
if (!a->li_pri)
{
li_config_table[i].coef_table[i] |= LI_COEF_CH_PC_SET | LI_COEF_PC_CH_SET;
if ((plci->tel == ADV_VOICE) && (plci == a->AdvSignalPLCI))
{
i = a->li_base + MIXER_IC_CHANNEL_BASE + (plci->li_bchannel_id - 1);
li_config_table[i].curchnl = 0;
li_config_table[i].channel = 0;
li_config_table[i].chflags = 0;
for (j = 0; j < li_total_channels; j++)
{
li_config_table[i].flag_table[j] = 0;
li_config_table[j].flag_table[i] = 0;
li_config_table[i].coef_table[j] = 0;
li_config_table[j].coef_table[i] = 0;
}
if (a->manufacturer_features & MANUFACTURER_FEATURE_SLAVE_CODEC)
{
i = a->li_base + MIXER_IC_CHANNEL_BASE + (2 - plci->li_bchannel_id);
li_config_table[i].curchnl = 0;
li_config_table[i].channel = 0;
li_config_table[i].chflags = 0;
for (j = 0; j < li_total_channels; j++)
{
li_config_table[i].flag_table[j] = 0;
li_config_table[j].flag_table[i] = 0;
li_config_table[i].coef_table[j] = 0;
li_config_table[j].coef_table[i] = 0;
}
}
}
}
}
}
static void mixer_prepare_switch(dword Id, PLCI *plci)
{
dbug(1, dprintf("[%06lx] %s,%d: mixer_prepare_switch",
UnMapId(Id), (char *)(FILE_), __LINE__));
do
{
mixer_indication_coefs_set(Id, plci);
} while (plci->li_plci_b_read_pos != plci->li_plci_b_req_pos);
}
static word mixer_save_config(dword Id, PLCI *plci, byte Rc)
{
DIVA_CAPI_ADAPTER *a;
word i, j;
dbug(1, dprintf("[%06lx] %s,%d: mixer_save_config %02x %d",
UnMapId(Id), (char *)(FILE_), __LINE__, Rc, plci->adjust_b_state));
a = plci->adapter;
if ((plci->li_bchannel_id != 0)
&& (li_config_table[a->li_base + (plci->li_bchannel_id - 1)].plci == plci))
{
i = a->li_base + (plci->li_bchannel_id - 1);
for (j = 0; j < li_total_channels; j++)
{
li_config_table[i].coef_table[j] &= 0xf;
li_config_table[j].coef_table[i] &= 0xf;
}
if (!a->li_pri)
li_config_table[i].coef_table[i] |= LI_COEF_CH_PC_SET | LI_COEF_PC_CH_SET;
}
return (GOOD);
}
static word mixer_restore_config(dword Id, PLCI *plci, byte Rc)
{
DIVA_CAPI_ADAPTER *a;
word Info;
dbug(1, dprintf("[%06lx] %s,%d: mixer_restore_config %02x %d",
UnMapId(Id), (char *)(FILE_), __LINE__, Rc, plci->adjust_b_state));
Info = GOOD;
a = plci->adapter;
if ((plci->B1_facilities & B1_FACILITY_MIXER)
&& (plci->li_bchannel_id != 0)
&& (li_config_table[a->li_base + (plci->li_bchannel_id - 1)].plci == plci))
{
switch (plci->adjust_b_state)
{
case ADJUST_B_RESTORE_MIXER_1:
if (a->manufacturer_features & MANUFACTURER_FEATURE_XCONNECT)
{
plci->internal_command = plci->adjust_b_command;
if (plci_nl_busy(plci))
{
plci->adjust_b_state = ADJUST_B_RESTORE_MIXER_1;
break;
}
xconnect_query_addresses(plci);
plci->adjust_b_state = ADJUST_B_RESTORE_MIXER_2;
break;
}
plci->adjust_b_state = ADJUST_B_RESTORE_MIXER_5;
Rc = OK;
case ADJUST_B_RESTORE_MIXER_2:
case ADJUST_B_RESTORE_MIXER_3:
case ADJUST_B_RESTORE_MIXER_4:
if ((Rc != OK) && (Rc != OK_FC) && (Rc != 0))
{
dbug(1, dprintf("[%06lx] %s,%d: Adjust B query addresses failed %02x",
UnMapId(Id), (char *)(FILE_), __LINE__, Rc));
Info = _WRONG_STATE;
break;
}
if (Rc == OK)
{
if (plci->adjust_b_state == ADJUST_B_RESTORE_MIXER_2)
plci->adjust_b_state = ADJUST_B_RESTORE_MIXER_3;
else if (plci->adjust_b_state == ADJUST_B_RESTORE_MIXER_4)
plci->adjust_b_state = ADJUST_B_RESTORE_MIXER_5;
}
else if (Rc == 0)
{
if (plci->adjust_b_state == ADJUST_B_RESTORE_MIXER_2)
plci->adjust_b_state = ADJUST_B_RESTORE_MIXER_4;
else if (plci->adjust_b_state == ADJUST_B_RESTORE_MIXER_3)
plci->adjust_b_state = ADJUST_B_RESTORE_MIXER_5;
}
if (plci->adjust_b_state != ADJUST_B_RESTORE_MIXER_5)
{
plci->internal_command = plci->adjust_b_command;
break;
}
case ADJUST_B_RESTORE_MIXER_5:
xconnect_write_coefs(plci, plci->adjust_b_command);
plci->adjust_b_state = ADJUST_B_RESTORE_MIXER_6;
Rc = OK;
case ADJUST_B_RESTORE_MIXER_6:
if (!xconnect_write_coefs_process(Id, plci, Rc))
{
dbug(1, dprintf("[%06lx] %s,%d: Write mixer coefs failed",
UnMapId(Id), (char *)(FILE_), __LINE__));
Info = _FACILITY_NOT_SUPPORTED;
break;
}
if (plci->internal_command)
break;
plci->adjust_b_state = ADJUST_B_RESTORE_MIXER_7;
case ADJUST_B_RESTORE_MIXER_7:
break;
}
}
return (Info);
}
static void mixer_command(dword Id, PLCI *plci, byte Rc)
{
DIVA_CAPI_ADAPTER *a;
word i, internal_command;
dbug(1, dprintf("[%06lx] %s,%d: mixer_command %02x %04x %04x",
UnMapId(Id), (char *)(FILE_), __LINE__, Rc, plci->internal_command,
plci->li_cmd));
a = plci->adapter;
internal_command = plci->internal_command;
plci->internal_command = 0;
switch (plci->li_cmd)
{
case LI_REQ_CONNECT:
case LI_REQ_DISCONNECT:
case LI_REQ_SILENT_UPDATE:
switch (internal_command)
{
default:
if (plci->li_channel_bits & LI_CHANNEL_INVOLVED)
{
adjust_b1_resource(Id, plci, NULL, (word)(plci->B1_facilities |
B1_FACILITY_MIXER), MIXER_COMMAND_1);
}
case MIXER_COMMAND_1:
if (plci->li_channel_bits & LI_CHANNEL_INVOLVED)
{
if (adjust_b_process(Id, plci, Rc) != GOOD)
{
dbug(1, dprintf("[%06lx] %s,%d: Load mixer failed",
UnMapId(Id), (char *)(FILE_), __LINE__));
break;
}
if (plci->internal_command)
return;
}
plci->li_plci_b_req_pos = plci->li_plci_b_write_pos;
if ((plci->li_channel_bits & LI_CHANNEL_INVOLVED)
|| ((get_b1_facilities(plci, plci->B1_resource) & B1_FACILITY_MIXER)
&& (add_b1_facilities(plci, plci->B1_resource, (word)(plci->B1_facilities &
~B1_FACILITY_MIXER)) == plci->B1_resource)))
{
xconnect_write_coefs(plci, MIXER_COMMAND_2);
}
else
{
do
{
mixer_indication_coefs_set(Id, plci);
} while (plci->li_plci_b_read_pos != plci->li_plci_b_req_pos);
}
case MIXER_COMMAND_2:
if ((plci->li_channel_bits & LI_CHANNEL_INVOLVED)
|| ((get_b1_facilities(plci, plci->B1_resource) & B1_FACILITY_MIXER)
&& (add_b1_facilities(plci, plci->B1_resource, (word)(plci->B1_facilities &
~B1_FACILITY_MIXER)) == plci->B1_resource)))
{
if (!xconnect_write_coefs_process(Id, plci, Rc))
{
dbug(1, dprintf("[%06lx] %s,%d: Write mixer coefs failed",
UnMapId(Id), (char *)(FILE_), __LINE__));
if (plci->li_plci_b_write_pos != plci->li_plci_b_req_pos)
{
do
{
plci->li_plci_b_write_pos = (plci->li_plci_b_write_pos == 0) ?
LI_PLCI_B_QUEUE_ENTRIES - 1 : plci->li_plci_b_write_pos - 1;
i = (plci->li_plci_b_write_pos == 0) ?
LI_PLCI_B_QUEUE_ENTRIES - 1 : plci->li_plci_b_write_pos - 1;
} while ((plci->li_plci_b_write_pos != plci->li_plci_b_req_pos)
&& !(plci->li_plci_b_queue[i] & LI_PLCI_B_LAST_FLAG));
}
break;
}
if (plci->internal_command)
return;
}
if (!(plci->li_channel_bits & LI_CHANNEL_INVOLVED))
{
adjust_b1_resource(Id, plci, NULL, (word)(plci->B1_facilities &
~B1_FACILITY_MIXER), MIXER_COMMAND_3);
}
case MIXER_COMMAND_3:
if (!(plci->li_channel_bits & LI_CHANNEL_INVOLVED))
{
if (adjust_b_process(Id, plci, Rc) != GOOD)
{
dbug(1, dprintf("[%06lx] %s,%d: Unload mixer failed",
UnMapId(Id), (char *)(FILE_), __LINE__));
break;
}
if (plci->internal_command)
return;
}
break;
}
break;
}
if ((plci->li_bchannel_id == 0)
|| (li_config_table[a->li_base + (plci->li_bchannel_id - 1)].plci != plci))
{
dbug(1, dprintf("[%06x] %s,%d: Channel id wiped out %d",
UnMapId(Id), (char *)(FILE_), __LINE__, (int)(plci->li_bchannel_id)));
}
else
{
i = a->li_base + (plci->li_bchannel_id - 1);
li_config_table[i].curchnl = plci->li_channel_bits;
if (!a->li_pri && (plci->tel == ADV_VOICE) && (plci == a->AdvSignalPLCI))
{
i = a->li_base + MIXER_IC_CHANNEL_BASE + (plci->li_bchannel_id - 1);
li_config_table[i].curchnl = plci->li_channel_bits;
if (a->manufacturer_features & MANUFACTURER_FEATURE_SLAVE_CODEC)
{
i = a->li_base + MIXER_IC_CHANNEL_BASE + (2 - plci->li_bchannel_id);
li_config_table[i].curchnl = plci->li_channel_bits;
}
}
}
}
static void li_update_connect(dword Id, DIVA_CAPI_ADAPTER *a, PLCI *plci,
dword plci_b_id, byte connect, dword li_flags)
{
word i, ch_a, ch_a_v, ch_a_s, ch_b, ch_b_v, ch_b_s;
PLCI *plci_b;
DIVA_CAPI_ADAPTER *a_b;
a_b = &(adapter[MapController((byte)(plci_b_id & 0x7f)) - 1]);
plci_b = &(a_b->plci[((plci_b_id >> 8) & 0xff) - 1]);
ch_a = a->li_base + (plci->li_bchannel_id - 1);
if (!a->li_pri && (plci->tel == ADV_VOICE)
&& (plci == a->AdvSignalPLCI) && (Id & EXT_CONTROLLER))
{
ch_a_v = ch_a + MIXER_IC_CHANNEL_BASE;
ch_a_s = (a->manufacturer_features & MANUFACTURER_FEATURE_SLAVE_CODEC) ?
a->li_base + MIXER_IC_CHANNEL_BASE + (2 - plci->li_bchannel_id) : ch_a_v;
}
else
{
ch_a_v = ch_a;
ch_a_s = ch_a;
}
ch_b = a_b->li_base + (plci_b->li_bchannel_id - 1);
if (!a_b->li_pri && (plci_b->tel == ADV_VOICE)
&& (plci_b == a_b->AdvSignalPLCI) && (plci_b_id & EXT_CONTROLLER))
{
ch_b_v = ch_b + MIXER_IC_CHANNEL_BASE;
ch_b_s = (a_b->manufacturer_features & MANUFACTURER_FEATURE_SLAVE_CODEC) ?
a_b->li_base + MIXER_IC_CHANNEL_BASE + (2 - plci_b->li_bchannel_id) : ch_b_v;
}
else
{
ch_b_v = ch_b;
ch_b_s = ch_b;
}
if (connect)
{
li_config_table[ch_a].flag_table[ch_a_v] &= ~LI_FLAG_MONITOR;
li_config_table[ch_a].flag_table[ch_a_s] &= ~LI_FLAG_MONITOR;
li_config_table[ch_a_v].flag_table[ch_a] &= ~(LI_FLAG_ANNOUNCEMENT | LI_FLAG_MIX);
li_config_table[ch_a_s].flag_table[ch_a] &= ~(LI_FLAG_ANNOUNCEMENT | LI_FLAG_MIX);
}
li_config_table[ch_a].flag_table[ch_b_v] &= ~LI_FLAG_MONITOR;
li_config_table[ch_a].flag_table[ch_b_s] &= ~LI_FLAG_MONITOR;
li_config_table[ch_b_v].flag_table[ch_a] &= ~(LI_FLAG_ANNOUNCEMENT | LI_FLAG_MIX);
li_config_table[ch_b_s].flag_table[ch_a] &= ~(LI_FLAG_ANNOUNCEMENT | LI_FLAG_MIX);
if (ch_a_v == ch_b_v)
{
li_config_table[ch_a_v].flag_table[ch_b_v] &= ~LI_FLAG_CONFERENCE;
li_config_table[ch_a_s].flag_table[ch_b_s] &= ~LI_FLAG_CONFERENCE;
}
else
{
if (li_config_table[ch_a_v].flag_table[ch_b_v] & LI_FLAG_CONFERENCE)
{
for (i = 0; i < li_total_channels; i++)
{
if (i != ch_a_v)
li_config_table[ch_a_v].flag_table[i] &= ~LI_FLAG_CONFERENCE;
}
}
if (li_config_table[ch_a_s].flag_table[ch_b_v] & LI_FLAG_CONFERENCE)
{
for (i = 0; i < li_total_channels; i++)
{
if (i != ch_a_s)
li_config_table[ch_a_s].flag_table[i] &= ~LI_FLAG_CONFERENCE;
}
}
if (li_config_table[ch_b_v].flag_table[ch_a_v] & LI_FLAG_CONFERENCE)
{
for (i = 0; i < li_total_channels; i++)
{
if (i != ch_a_v)
li_config_table[i].flag_table[ch_a_v] &= ~LI_FLAG_CONFERENCE;
}
}
if (li_config_table[ch_b_v].flag_table[ch_a_s] & LI_FLAG_CONFERENCE)
{
for (i = 0; i < li_total_channels; i++)
{
if (i != ch_a_s)
li_config_table[i].flag_table[ch_a_s] &= ~LI_FLAG_CONFERENCE;
}
}
}
if (li_flags & LI_FLAG_CONFERENCE_A_B)
{
li_config_table[ch_b_v].flag_table[ch_a_v] |= LI_FLAG_CONFERENCE;
li_config_table[ch_b_s].flag_table[ch_a_v] |= LI_FLAG_CONFERENCE;
li_config_table[ch_b_v].flag_table[ch_a_s] |= LI_FLAG_CONFERENCE;
li_config_table[ch_b_s].flag_table[ch_a_s] |= LI_FLAG_CONFERENCE;
}
if (li_flags & LI_FLAG_CONFERENCE_B_A)
{
li_config_table[ch_a_v].flag_table[ch_b_v] |= LI_FLAG_CONFERENCE;
li_config_table[ch_a_v].flag_table[ch_b_s] |= LI_FLAG_CONFERENCE;
li_config_table[ch_a_s].flag_table[ch_b_v] |= LI_FLAG_CONFERENCE;
li_config_table[ch_a_s].flag_table[ch_b_s] |= LI_FLAG_CONFERENCE;
}
if (li_flags & LI_FLAG_MONITOR_A)
{
li_config_table[ch_a].flag_table[ch_a_v] |= LI_FLAG_MONITOR;
li_config_table[ch_a].flag_table[ch_a_s] |= LI_FLAG_MONITOR;
}
if (li_flags & LI_FLAG_MONITOR_B)
{
li_config_table[ch_a].flag_table[ch_b_v] |= LI_FLAG_MONITOR;
li_config_table[ch_a].flag_table[ch_b_s] |= LI_FLAG_MONITOR;
}
if (li_flags & LI_FLAG_ANNOUNCEMENT_A)
{
li_config_table[ch_a_v].flag_table[ch_a] |= LI_FLAG_ANNOUNCEMENT;
li_config_table[ch_a_s].flag_table[ch_a] |= LI_FLAG_ANNOUNCEMENT;
}
if (li_flags & LI_FLAG_ANNOUNCEMENT_B)
{
li_config_table[ch_b_v].flag_table[ch_a] |= LI_FLAG_ANNOUNCEMENT;
li_config_table[ch_b_s].flag_table[ch_a] |= LI_FLAG_ANNOUNCEMENT;
}
if (li_flags & LI_FLAG_MIX_A)
{
li_config_table[ch_a_v].flag_table[ch_a] |= LI_FLAG_MIX;
li_config_table[ch_a_s].flag_table[ch_a] |= LI_FLAG_MIX;
}
if (li_flags & LI_FLAG_MIX_B)
{
li_config_table[ch_b_v].flag_table[ch_a] |= LI_FLAG_MIX;
li_config_table[ch_b_s].flag_table[ch_a] |= LI_FLAG_MIX;
}
if (ch_a_v != ch_a_s)
{
li_config_table[ch_a_v].flag_table[ch_a_s] |= LI_FLAG_CONFERENCE;
li_config_table[ch_a_s].flag_table[ch_a_v] |= LI_FLAG_CONFERENCE;
}
if (ch_b_v != ch_b_s)
{
li_config_table[ch_b_v].flag_table[ch_b_s] |= LI_FLAG_CONFERENCE;
li_config_table[ch_b_s].flag_table[ch_b_v] |= LI_FLAG_CONFERENCE;
}
}
static void li2_update_connect(dword Id, DIVA_CAPI_ADAPTER *a, PLCI *plci,
dword plci_b_id, byte connect, dword li_flags)
{
word ch_a, ch_a_v, ch_a_s, ch_b, ch_b_v, ch_b_s;
PLCI *plci_b;
DIVA_CAPI_ADAPTER *a_b;
a_b = &(adapter[MapController((byte)(plci_b_id & 0x7f)) - 1]);
plci_b = &(a_b->plci[((plci_b_id >> 8) & 0xff) - 1]);
ch_a = a->li_base + (plci->li_bchannel_id - 1);
if (!a->li_pri && (plci->tel == ADV_VOICE)
&& (plci == a->AdvSignalPLCI) && (Id & EXT_CONTROLLER))
{
ch_a_v = ch_a + MIXER_IC_CHANNEL_BASE;
ch_a_s = (a->manufacturer_features & MANUFACTURER_FEATURE_SLAVE_CODEC) ?
a->li_base + MIXER_IC_CHANNEL_BASE + (2 - plci->li_bchannel_id) : ch_a_v;
}
else
{
ch_a_v = ch_a;
ch_a_s = ch_a;
}
ch_b = a_b->li_base + (plci_b->li_bchannel_id - 1);
if (!a_b->li_pri && (plci_b->tel == ADV_VOICE)
&& (plci_b == a_b->AdvSignalPLCI) && (plci_b_id & EXT_CONTROLLER))
{
ch_b_v = ch_b + MIXER_IC_CHANNEL_BASE;
ch_b_s = (a_b->manufacturer_features & MANUFACTURER_FEATURE_SLAVE_CODEC) ?
a_b->li_base + MIXER_IC_CHANNEL_BASE + (2 - plci_b->li_bchannel_id) : ch_b_v;
}
else
{
ch_b_v = ch_b;
ch_b_s = ch_b;
}
if (connect)
{
li_config_table[ch_b].flag_table[ch_b_v] &= ~LI_FLAG_MONITOR;
li_config_table[ch_b].flag_table[ch_b_s] &= ~LI_FLAG_MONITOR;
li_config_table[ch_b_v].flag_table[ch_b] &= ~LI_FLAG_MIX;
li_config_table[ch_b_s].flag_table[ch_b] &= ~LI_FLAG_MIX;
li_config_table[ch_b].flag_table[ch_b] &= ~LI_FLAG_PCCONNECT;
li_config_table[ch_b].chflags &= ~(LI_CHFLAG_MONITOR | LI_CHFLAG_MIX | LI_CHFLAG_LOOP);
}
li_config_table[ch_b_v].flag_table[ch_a_v] &= ~(LI_FLAG_INTERCONNECT | LI_FLAG_CONFERENCE);
li_config_table[ch_b_s].flag_table[ch_a_v] &= ~(LI_FLAG_INTERCONNECT | LI_FLAG_CONFERENCE);
li_config_table[ch_b_v].flag_table[ch_a_s] &= ~(LI_FLAG_INTERCONNECT | LI_FLAG_CONFERENCE);
li_config_table[ch_b_s].flag_table[ch_a_s] &= ~(LI_FLAG_INTERCONNECT | LI_FLAG_CONFERENCE);
li_config_table[ch_a_v].flag_table[ch_b_v] &= ~(LI_FLAG_INTERCONNECT | LI_FLAG_CONFERENCE);
li_config_table[ch_a_v].flag_table[ch_b_s] &= ~(LI_FLAG_INTERCONNECT | LI_FLAG_CONFERENCE);
li_config_table[ch_a_s].flag_table[ch_b_v] &= ~(LI_FLAG_INTERCONNECT | LI_FLAG_CONFERENCE);
li_config_table[ch_a_s].flag_table[ch_b_s] &= ~(LI_FLAG_INTERCONNECT | LI_FLAG_CONFERENCE);
if (li_flags & LI2_FLAG_INTERCONNECT_A_B)
{
li_config_table[ch_b_v].flag_table[ch_a_v] |= LI_FLAG_INTERCONNECT;
li_config_table[ch_b_s].flag_table[ch_a_v] |= LI_FLAG_INTERCONNECT;
li_config_table[ch_b_v].flag_table[ch_a_s] |= LI_FLAG_INTERCONNECT;
li_config_table[ch_b_s].flag_table[ch_a_s] |= LI_FLAG_INTERCONNECT;
}
if (li_flags & LI2_FLAG_INTERCONNECT_B_A)
{
li_config_table[ch_a_v].flag_table[ch_b_v] |= LI_FLAG_INTERCONNECT;
li_config_table[ch_a_v].flag_table[ch_b_s] |= LI_FLAG_INTERCONNECT;
li_config_table[ch_a_s].flag_table[ch_b_v] |= LI_FLAG_INTERCONNECT;
li_config_table[ch_a_s].flag_table[ch_b_s] |= LI_FLAG_INTERCONNECT;
}
if (li_flags & LI2_FLAG_MONITOR_B)
{
li_config_table[ch_b].flag_table[ch_b_v] |= LI_FLAG_MONITOR;
li_config_table[ch_b].flag_table[ch_b_s] |= LI_FLAG_MONITOR;
}
if (li_flags & LI2_FLAG_MIX_B)
{
li_config_table[ch_b_v].flag_table[ch_b] |= LI_FLAG_MIX;
li_config_table[ch_b_s].flag_table[ch_b] |= LI_FLAG_MIX;
}
if (li_flags & LI2_FLAG_MONITOR_X)
li_config_table[ch_b].chflags |= LI_CHFLAG_MONITOR;
if (li_flags & LI2_FLAG_MIX_X)
li_config_table[ch_b].chflags |= LI_CHFLAG_MIX;
if (li_flags & LI2_FLAG_LOOP_B)
{
li_config_table[ch_b_v].flag_table[ch_b_v] |= LI_FLAG_INTERCONNECT;
li_config_table[ch_b_s].flag_table[ch_b_v] |= LI_FLAG_INTERCONNECT;
li_config_table[ch_b_v].flag_table[ch_b_s] |= LI_FLAG_INTERCONNECT;
li_config_table[ch_b_s].flag_table[ch_b_s] |= LI_FLAG_INTERCONNECT;
}
if (li_flags & LI2_FLAG_LOOP_PC)
li_config_table[ch_b].flag_table[ch_b] |= LI_FLAG_PCCONNECT;
if (li_flags & LI2_FLAG_LOOP_X)
li_config_table[ch_b].chflags |= LI_CHFLAG_LOOP;
if (li_flags & LI2_FLAG_PCCONNECT_A_B)
li_config_table[ch_b_s].flag_table[ch_a_s] |= LI_FLAG_PCCONNECT;
if (li_flags & LI2_FLAG_PCCONNECT_B_A)
li_config_table[ch_a_s].flag_table[ch_b_s] |= LI_FLAG_PCCONNECT;
if (ch_a_v != ch_a_s)
{
li_config_table[ch_a_v].flag_table[ch_a_s] |= LI_FLAG_CONFERENCE;
li_config_table[ch_a_s].flag_table[ch_a_v] |= LI_FLAG_CONFERENCE;
}
if (ch_b_v != ch_b_s)
{
li_config_table[ch_b_v].flag_table[ch_b_s] |= LI_FLAG_CONFERENCE;
li_config_table[ch_b_s].flag_table[ch_b_v] |= LI_FLAG_CONFERENCE;
}
}
static word li_check_main_plci(dword Id, PLCI *plci)
{
if (plci == NULL)
{
dbug(1, dprintf("[%06lx] %s,%d: Wrong PLCI",
UnMapId(Id), (char *)(FILE_), __LINE__));
return (_WRONG_IDENTIFIER);
}
if (!plci->State
|| !plci->NL.Id || plci->nl_remove_id
|| (plci->li_bchannel_id == 0))
{
dbug(1, dprintf("[%06lx] %s,%d: Wrong state",
UnMapId(Id), (char *)(FILE_), __LINE__));
return (_WRONG_STATE);
}
li_config_table[plci->adapter->li_base + (plci->li_bchannel_id - 1)].plci = plci;
return (GOOD);
}
static PLCI *li_check_plci_b(dword Id, PLCI *plci,
dword plci_b_id, word plci_b_write_pos, byte *p_result)
{
byte ctlr_b;
PLCI *plci_b;
if (((plci->li_plci_b_read_pos > plci_b_write_pos) ? plci->li_plci_b_read_pos :
LI_PLCI_B_QUEUE_ENTRIES + plci->li_plci_b_read_pos) - plci_b_write_pos - 1 < 2)
{
dbug(1, dprintf("[%06lx] %s,%d: LI request overrun",
UnMapId(Id), (char *)(FILE_), __LINE__));
PUT_WORD(p_result, _REQUEST_NOT_ALLOWED_IN_THIS_STATE);
return (NULL);
}
ctlr_b = 0;
if ((plci_b_id & 0x7f) != 0)
{
ctlr_b = MapController((byte)(plci_b_id & 0x7f));
if ((ctlr_b > max_adapter) || ((ctlr_b != 0) && (adapter[ctlr_b - 1].request == NULL)))
ctlr_b = 0;
}
if ((ctlr_b == 0)
|| (((plci_b_id >> 8) & 0xff) == 0)
|| (((plci_b_id >> 8) & 0xff) > adapter[ctlr_b - 1].max_plci))
{
dbug(1, dprintf("[%06lx] %s,%d: LI invalid second PLCI %08lx",
UnMapId(Id), (char *)(FILE_), __LINE__, plci_b_id));
PUT_WORD(p_result, _WRONG_IDENTIFIER);
return (NULL);
}
plci_b = &(adapter[ctlr_b - 1].plci[((plci_b_id >> 8) & 0xff) - 1]);
if (!plci_b->State
|| !plci_b->NL.Id || plci_b->nl_remove_id
|| (plci_b->li_bchannel_id == 0))
{
dbug(1, dprintf("[%06lx] %s,%d: LI peer in wrong state %08lx",
UnMapId(Id), (char *)(FILE_), __LINE__, plci_b_id));
PUT_WORD(p_result, _REQUEST_NOT_ALLOWED_IN_THIS_STATE);
return (NULL);
}
li_config_table[plci_b->adapter->li_base + (plci_b->li_bchannel_id - 1)].plci = plci_b;
if (((byte)(plci_b_id & ~EXT_CONTROLLER)) !=
((byte)(UnMapController(plci->adapter->Id) & ~EXT_CONTROLLER))
&& (!(plci->adapter->manufacturer_features & MANUFACTURER_FEATURE_XCONNECT)
|| !(plci_b->adapter->manufacturer_features & MANUFACTURER_FEATURE_XCONNECT)))
{
dbug(1, dprintf("[%06lx] %s,%d: LI not on same ctrl %08lx",
UnMapId(Id), (char *)(FILE_), __LINE__, plci_b_id));
PUT_WORD(p_result, _WRONG_IDENTIFIER);
return (NULL);
}
if (!(get_b1_facilities(plci_b, add_b1_facilities(plci_b, plci_b->B1_resource,
(word)(plci_b->B1_facilities | B1_FACILITY_MIXER))) & B1_FACILITY_MIXER))
{
dbug(1, dprintf("[%06lx] %s,%d: Interconnect peer cannot mix %d",
UnMapId(Id), (char *)(FILE_), __LINE__, plci_b->B1_resource));
PUT_WORD(p_result, _REQUEST_NOT_ALLOWED_IN_THIS_STATE);
return (NULL);
}
return (plci_b);
}
static PLCI *li2_check_plci_b(dword Id, PLCI *plci,
dword plci_b_id, word plci_b_write_pos, byte *p_result)
{
byte ctlr_b;
PLCI *plci_b;
if (((plci->li_plci_b_read_pos > plci_b_write_pos) ? plci->li_plci_b_read_pos :
LI_PLCI_B_QUEUE_ENTRIES + plci->li_plci_b_read_pos) - plci_b_write_pos - 1 < 2)
{
dbug(1, dprintf("[%06lx] %s,%d: LI request overrun",
UnMapId(Id), (char *)(FILE_), __LINE__));
PUT_WORD(p_result, _WRONG_STATE);
return (NULL);
}
ctlr_b = 0;
if ((plci_b_id & 0x7f) != 0)
{
ctlr_b = MapController((byte)(plci_b_id & 0x7f));
if ((ctlr_b > max_adapter) || ((ctlr_b != 0) && (adapter[ctlr_b - 1].request == NULL)))
ctlr_b = 0;
}
if ((ctlr_b == 0)
|| (((plci_b_id >> 8) & 0xff) == 0)
|| (((plci_b_id >> 8) & 0xff) > adapter[ctlr_b - 1].max_plci))
{
dbug(1, dprintf("[%06lx] %s,%d: LI invalid second PLCI %08lx",
UnMapId(Id), (char *)(FILE_), __LINE__, plci_b_id));
PUT_WORD(p_result, _WRONG_IDENTIFIER);
return (NULL);
}
plci_b = &(adapter[ctlr_b - 1].plci[((plci_b_id >> 8) & 0xff) - 1]);
if (!plci_b->State
|| !plci_b->NL.Id || plci_b->nl_remove_id
|| (plci_b->li_bchannel_id == 0)
|| (li_config_table[plci_b->adapter->li_base + (plci_b->li_bchannel_id - 1)].plci != plci_b))
{
dbug(1, dprintf("[%06lx] %s,%d: LI peer in wrong state %08lx",
UnMapId(Id), (char *)(FILE_), __LINE__, plci_b_id));
PUT_WORD(p_result, _WRONG_STATE);
return (NULL);
}
if (((byte)(plci_b_id & ~EXT_CONTROLLER)) !=
((byte)(UnMapController(plci->adapter->Id) & ~EXT_CONTROLLER))
&& (!(plci->adapter->manufacturer_features & MANUFACTURER_FEATURE_XCONNECT)
|| !(plci_b->adapter->manufacturer_features & MANUFACTURER_FEATURE_XCONNECT)))
{
dbug(1, dprintf("[%06lx] %s,%d: LI not on same ctrl %08lx",
UnMapId(Id), (char *)(FILE_), __LINE__, plci_b_id));
PUT_WORD(p_result, _WRONG_IDENTIFIER);
return (NULL);
}
if (!(get_b1_facilities(plci_b, add_b1_facilities(plci_b, plci_b->B1_resource,
(word)(plci_b->B1_facilities | B1_FACILITY_MIXER))) & B1_FACILITY_MIXER))
{
dbug(1, dprintf("[%06lx] %s,%d: Interconnect peer cannot mix %d",
UnMapId(Id), (char *)(FILE_), __LINE__, plci_b->B1_resource));
PUT_WORD(p_result, _WRONG_STATE);
return (NULL);
}
return (plci_b);
}
static byte mixer_request(dword Id, word Number, DIVA_CAPI_ADAPTER *a, PLCI *plci, APPL *appl, API_PARSE *msg)
{
word Info;
word i;
dword d, li_flags, plci_b_id;
PLCI *plci_b;
API_PARSE li_parms[3];
API_PARSE li_req_parms[3];
API_PARSE li_participant_struct[2];
API_PARSE li_participant_parms[3];
word participant_parms_pos;
byte result_buffer[32];
byte *result;
word result_pos;
word plci_b_write_pos;
dbug(1, dprintf("[%06lx] %s,%d: mixer_request",
UnMapId(Id), (char *)(FILE_), __LINE__));
Info = GOOD;
result = result_buffer;
result_buffer[0] = 0;
if (!(a->profile.Global_Options & GL_LINE_INTERCONNECT_SUPPORTED))
{
dbug(1, dprintf("[%06lx] %s,%d: Facility not supported",
UnMapId(Id), (char *)(FILE_), __LINE__));
Info = _FACILITY_NOT_SUPPORTED;
}
else if (api_parse(&msg[1].info[1], msg[1].length, "ws", li_parms))
{
dbug(1, dprintf("[%06lx] %s,%d: Wrong message format",
UnMapId(Id), (char *)(FILE_), __LINE__));
Info = _WRONG_MESSAGE_FORMAT;
}
else
{
result_buffer[0] = 3;
PUT_WORD(&result_buffer[1], GET_WORD(li_parms[0].info));
result_buffer[3] = 0;
switch (GET_WORD(li_parms[0].info))
{
case LI_GET_SUPPORTED_SERVICES:
if (appl->appl_flags & APPL_FLAG_OLD_LI_SPEC)
{
result_buffer[0] = 17;
result_buffer[3] = 14;
PUT_WORD(&result_buffer[4], GOOD);
d = 0;
if (a->manufacturer_features & MANUFACTURER_FEATURE_MIXER_CH_CH)
d |= LI_CONFERENCING_SUPPORTED;
if (a->manufacturer_features & MANUFACTURER_FEATURE_MIXER_CH_PC)
d |= LI_MONITORING_SUPPORTED;
if (a->manufacturer_features & MANUFACTURER_FEATURE_MIXER_PC_CH)
d |= LI_ANNOUNCEMENTS_SUPPORTED | LI_MIXING_SUPPORTED;
if (a->manufacturer_features & MANUFACTURER_FEATURE_XCONNECT)
d |= LI_CROSS_CONTROLLER_SUPPORTED;
PUT_DWORD(&result_buffer[6], d);
if (a->manufacturer_features & MANUFACTURER_FEATURE_XCONNECT)
{
d = 0;
for (i = 0; i < li_total_channels; i++)
{
if ((li_config_table[i].adapter->manufacturer_features & MANUFACTURER_FEATURE_XCONNECT)
&& (li_config_table[i].adapter->li_pri
|| (i < li_config_table[i].adapter->li_base + MIXER_BCHANNELS_BRI)))
{
d++;
}
}
}
else
{
d = a->li_pri ? a->li_channels : MIXER_BCHANNELS_BRI;
}
PUT_DWORD(&result_buffer[10], d / 2);
PUT_DWORD(&result_buffer[14], d);
}
else
{
result_buffer[0] = 25;
result_buffer[3] = 22;
PUT_WORD(&result_buffer[4], GOOD);
d = LI2_ASYMMETRIC_SUPPORTED | LI2_B_LOOPING_SUPPORTED | LI2_X_LOOPING_SUPPORTED;
if (a->manufacturer_features & MANUFACTURER_FEATURE_MIXER_CH_PC)
d |= LI2_MONITORING_SUPPORTED | LI2_REMOTE_MONITORING_SUPPORTED;
if (a->manufacturer_features & MANUFACTURER_FEATURE_MIXER_PC_CH)
d |= LI2_MIXING_SUPPORTED | LI2_REMOTE_MIXING_SUPPORTED;
if (a->manufacturer_features & MANUFACTURER_FEATURE_MIXER_PC_PC)
d |= LI2_PC_LOOPING_SUPPORTED;
if (a->manufacturer_features & MANUFACTURER_FEATURE_XCONNECT)
d |= LI2_CROSS_CONTROLLER_SUPPORTED;
PUT_DWORD(&result_buffer[6], d);
d = a->li_pri ? a->li_channels : MIXER_BCHANNELS_BRI;
PUT_DWORD(&result_buffer[10], d / 2);
PUT_DWORD(&result_buffer[14], d - 1);
if (a->manufacturer_features & MANUFACTURER_FEATURE_XCONNECT)
{
d = 0;
for (i = 0; i < li_total_channels; i++)
{
if ((li_config_table[i].adapter->manufacturer_features & MANUFACTURER_FEATURE_XCONNECT)
&& (li_config_table[i].adapter->li_pri
|| (i < li_config_table[i].adapter->li_base + MIXER_BCHANNELS_BRI)))
{
d++;
}
}
}
PUT_DWORD(&result_buffer[18], d / 2);
PUT_DWORD(&result_buffer[22], d - 1);
}
break;
case LI_REQ_CONNECT:
if (li_parms[1].length == 8)
{
appl->appl_flags |= APPL_FLAG_OLD_LI_SPEC;
if (api_parse(&li_parms[1].info[1], li_parms[1].length, "dd", li_req_parms))
{
dbug(1, dprintf("[%06lx] %s,%d: Wrong message format",
UnMapId(Id), (char *)(FILE_), __LINE__));
Info = _WRONG_MESSAGE_FORMAT;
break;
}
plci_b_id = GET_DWORD(li_req_parms[0].info) & 0xffff;
li_flags = GET_DWORD(li_req_parms[1].info);
Info = li_check_main_plci(Id, plci);
result_buffer[0] = 9;
result_buffer[3] = 6;
PUT_DWORD(&result_buffer[4], plci_b_id);
PUT_WORD(&result_buffer[8], GOOD);
if (Info != GOOD)
break;
result = plci->saved_msg.info;
for (i = 0; i <= result_buffer[0]; i++)
result[i] = result_buffer[i];
plci_b_write_pos = plci->li_plci_b_write_pos;
plci_b = li_check_plci_b(Id, plci, plci_b_id, plci_b_write_pos, &result[8]);
if (plci_b == NULL)
break;
li_update_connect(Id, a, plci, plci_b_id, true, li_flags);
plci->li_plci_b_queue[plci_b_write_pos] = plci_b_id | LI_PLCI_B_LAST_FLAG;
plci_b_write_pos = (plci_b_write_pos == LI_PLCI_B_QUEUE_ENTRIES - 1) ? 0 : plci_b_write_pos + 1;
plci->li_plci_b_write_pos = plci_b_write_pos;
}
else
{
appl->appl_flags &= ~APPL_FLAG_OLD_LI_SPEC;
if (api_parse(&li_parms[1].info[1], li_parms[1].length, "ds", li_req_parms))
{
dbug(1, dprintf("[%06lx] %s,%d: Wrong message format",
UnMapId(Id), (char *)(FILE_), __LINE__));
Info = _WRONG_MESSAGE_FORMAT;
break;
}
li_flags = GET_DWORD(li_req_parms[0].info) & ~(LI2_FLAG_INTERCONNECT_A_B | LI2_FLAG_INTERCONNECT_B_A);
Info = li_check_main_plci(Id, plci);
result_buffer[0] = 7;
result_buffer[3] = 4;
PUT_WORD(&result_buffer[4], Info);
result_buffer[6] = 0;
if (Info != GOOD)
break;
result = plci->saved_msg.info;
for (i = 0; i <= result_buffer[0]; i++)
result[i] = result_buffer[i];
plci_b_write_pos = plci->li_plci_b_write_pos;
participant_parms_pos = 0;
result_pos = 7;
li2_update_connect(Id, a, plci, UnMapId(Id), true, li_flags);
while (participant_parms_pos < li_req_parms[1].length)
{
result[result_pos] = 6;
result_pos += 7;
PUT_DWORD(&result[result_pos - 6], 0);
PUT_WORD(&result[result_pos - 2], GOOD);
if (api_parse(&li_req_parms[1].info[1 + participant_parms_pos],
(word)(li_parms[1].length - participant_parms_pos), "s", li_participant_struct))
{
dbug(1, dprintf("[%06lx] %s,%d: Wrong message format",
UnMapId(Id), (char *)(FILE_), __LINE__));
PUT_WORD(&result[result_pos - 2], _WRONG_MESSAGE_FORMAT);
break;
}
if (api_parse(&li_participant_struct[0].info[1],
li_participant_struct[0].length, "dd", li_participant_parms))
{
dbug(1, dprintf("[%06lx] %s,%d: Wrong message format",
UnMapId(Id), (char *)(FILE_), __LINE__));
PUT_WORD(&result[result_pos - 2], _WRONG_MESSAGE_FORMAT);
break;
}
plci_b_id = GET_DWORD(li_participant_parms[0].info) & 0xffff;
li_flags = GET_DWORD(li_participant_parms[1].info);
PUT_DWORD(&result[result_pos - 6], plci_b_id);
if (sizeof(result) - result_pos < 7)
{
dbug(1, dprintf("[%06lx] %s,%d: LI result overrun",
UnMapId(Id), (char *)(FILE_), __LINE__));
PUT_WORD(&result[result_pos - 2], _WRONG_STATE);
break;
}
plci_b = li2_check_plci_b(Id, plci, plci_b_id, plci_b_write_pos, &result[result_pos - 2]);
if (plci_b != NULL)
{
li2_update_connect(Id, a, plci, plci_b_id, true, li_flags);
plci->li_plci_b_queue[plci_b_write_pos] = plci_b_id |
((li_flags & (LI2_FLAG_INTERCONNECT_A_B | LI2_FLAG_INTERCONNECT_B_A |
LI2_FLAG_PCCONNECT_A_B | LI2_FLAG_PCCONNECT_B_A)) ? 0 : LI_PLCI_B_DISC_FLAG);
plci_b_write_pos = (plci_b_write_pos == LI_PLCI_B_QUEUE_ENTRIES - 1) ? 0 : plci_b_write_pos + 1;
}
participant_parms_pos = (word)((&li_participant_struct[0].info[1 + li_participant_struct[0].length]) -
(&li_req_parms[1].info[1]));
}
result[0] = (byte)(result_pos - 1);
result[3] = (byte)(result_pos - 4);
result[6] = (byte)(result_pos - 7);
i = (plci_b_write_pos == 0) ? LI_PLCI_B_QUEUE_ENTRIES - 1 : plci_b_write_pos - 1;
if ((plci_b_write_pos == plci->li_plci_b_read_pos)
|| (plci->li_plci_b_queue[i] & LI_PLCI_B_LAST_FLAG))
{
plci->li_plci_b_queue[plci_b_write_pos] = LI_PLCI_B_SKIP_FLAG | LI_PLCI_B_LAST_FLAG;
plci_b_write_pos = (plci_b_write_pos == LI_PLCI_B_QUEUE_ENTRIES - 1) ? 0 : plci_b_write_pos + 1;
}
else
plci->li_plci_b_queue[i] |= LI_PLCI_B_LAST_FLAG;
plci->li_plci_b_write_pos = plci_b_write_pos;
}
mixer_calculate_coefs(a);
plci->li_channel_bits = li_config_table[a->li_base + (plci->li_bchannel_id - 1)].channel;
mixer_notify_update(plci, true);
sendf(appl, _FACILITY_R | CONFIRM, Id & 0xffffL, Number,
"wwS", Info, SELECTOR_LINE_INTERCONNECT, result);
plci->command = 0;
plci->li_cmd = GET_WORD(li_parms[0].info);
start_internal_command(Id, plci, mixer_command);
return (false);
case LI_REQ_DISCONNECT:
if (li_parms[1].length == 4)
{
appl->appl_flags |= APPL_FLAG_OLD_LI_SPEC;
if (api_parse(&li_parms[1].info[1], li_parms[1].length, "d", li_req_parms))
{
dbug(1, dprintf("[%06lx] %s,%d: Wrong message format",
UnMapId(Id), (char *)(FILE_), __LINE__));
Info = _WRONG_MESSAGE_FORMAT;
break;
}
plci_b_id = GET_DWORD(li_req_parms[0].info) & 0xffff;
Info = li_check_main_plci(Id, plci);
result_buffer[0] = 9;
result_buffer[3] = 6;
PUT_DWORD(&result_buffer[4], GET_DWORD(li_req_parms[0].info));
PUT_WORD(&result_buffer[8], GOOD);
if (Info != GOOD)
break;
result = plci->saved_msg.info;
for (i = 0; i <= result_buffer[0]; i++)
result[i] = result_buffer[i];
plci_b_write_pos = plci->li_plci_b_write_pos;
plci_b = li_check_plci_b(Id, plci, plci_b_id, plci_b_write_pos, &result[8]);
if (plci_b == NULL)
break;
li_update_connect(Id, a, plci, plci_b_id, false, 0);
plci->li_plci_b_queue[plci_b_write_pos] = plci_b_id | LI_PLCI_B_DISC_FLAG | LI_PLCI_B_LAST_FLAG;
plci_b_write_pos = (plci_b_write_pos == LI_PLCI_B_QUEUE_ENTRIES - 1) ? 0 : plci_b_write_pos + 1;
plci->li_plci_b_write_pos = plci_b_write_pos;
}
else
{
appl->appl_flags &= ~APPL_FLAG_OLD_LI_SPEC;
if (api_parse(&li_parms[1].info[1], li_parms[1].length, "s", li_req_parms))
{
dbug(1, dprintf("[%06lx] %s,%d: Wrong message format",
UnMapId(Id), (char *)(FILE_), __LINE__));
Info = _WRONG_MESSAGE_FORMAT;
break;
}
Info = li_check_main_plci(Id, plci);
result_buffer[0] = 7;
result_buffer[3] = 4;
PUT_WORD(&result_buffer[4], Info);
result_buffer[6] = 0;
if (Info != GOOD)
break;
result = plci->saved_msg.info;
for (i = 0; i <= result_buffer[0]; i++)
result[i] = result_buffer[i];
plci_b_write_pos = plci->li_plci_b_write_pos;
participant_parms_pos = 0;
result_pos = 7;
while (participant_parms_pos < li_req_parms[0].length)
{
result[result_pos] = 6;
result_pos += 7;
PUT_DWORD(&result[result_pos - 6], 0);
PUT_WORD(&result[result_pos - 2], GOOD);
if (api_parse(&li_req_parms[0].info[1 + participant_parms_pos],
(word)(li_parms[1].length - participant_parms_pos), "s", li_participant_struct))
{
dbug(1, dprintf("[%06lx] %s,%d: Wrong message format",
UnMapId(Id), (char *)(FILE_), __LINE__));
PUT_WORD(&result[result_pos - 2], _WRONG_MESSAGE_FORMAT);
break;
}
if (api_parse(&li_participant_struct[0].info[1],
li_participant_struct[0].length, "d", li_participant_parms))
{
dbug(1, dprintf("[%06lx] %s,%d: Wrong message format",
UnMapId(Id), (char *)(FILE_), __LINE__));
PUT_WORD(&result[result_pos - 2], _WRONG_MESSAGE_FORMAT);
break;
}
plci_b_id = GET_DWORD(li_participant_parms[0].info) & 0xffff;
PUT_DWORD(&result[result_pos - 6], plci_b_id);
if (sizeof(result) - result_pos < 7)
{
dbug(1, dprintf("[%06lx] %s,%d: LI result overrun",
UnMapId(Id), (char *)(FILE_), __LINE__));
PUT_WORD(&result[result_pos - 2], _WRONG_STATE);
break;
}
plci_b = li2_check_plci_b(Id, plci, plci_b_id, plci_b_write_pos, &result[result_pos - 2]);
if (plci_b != NULL)
{
li2_update_connect(Id, a, plci, plci_b_id, false, 0);
plci->li_plci_b_queue[plci_b_write_pos] = plci_b_id | LI_PLCI_B_DISC_FLAG;
plci_b_write_pos = (plci_b_write_pos == LI_PLCI_B_QUEUE_ENTRIES - 1) ? 0 : plci_b_write_pos + 1;
}
participant_parms_pos = (word)((&li_participant_struct[0].info[1 + li_participant_struct[0].length]) -
(&li_req_parms[0].info[1]));
}
result[0] = (byte)(result_pos - 1);
result[3] = (byte)(result_pos - 4);
result[6] = (byte)(result_pos - 7);
i = (plci_b_write_pos == 0) ? LI_PLCI_B_QUEUE_ENTRIES - 1 : plci_b_write_pos - 1;
if ((plci_b_write_pos == plci->li_plci_b_read_pos)
|| (plci->li_plci_b_queue[i] & LI_PLCI_B_LAST_FLAG))
{
plci->li_plci_b_queue[plci_b_write_pos] = LI_PLCI_B_SKIP_FLAG | LI_PLCI_B_LAST_FLAG;
plci_b_write_pos = (plci_b_write_pos == LI_PLCI_B_QUEUE_ENTRIES - 1) ? 0 : plci_b_write_pos + 1;
}
else
plci->li_plci_b_queue[i] |= LI_PLCI_B_LAST_FLAG;
plci->li_plci_b_write_pos = plci_b_write_pos;
}
mixer_calculate_coefs(a);
plci->li_channel_bits = li_config_table[a->li_base + (plci->li_bchannel_id - 1)].channel;
mixer_notify_update(plci, true);
sendf(appl, _FACILITY_R | CONFIRM, Id & 0xffffL, Number,
"wwS", Info, SELECTOR_LINE_INTERCONNECT, result);
plci->command = 0;
plci->li_cmd = GET_WORD(li_parms[0].info);
start_internal_command(Id, plci, mixer_command);
return (false);
case LI_REQ_SILENT_UPDATE:
if (!plci || !plci->State
|| !plci->NL.Id || plci->nl_remove_id
|| (plci->li_bchannel_id == 0)
|| (li_config_table[plci->adapter->li_base + (plci->li_bchannel_id - 1)].plci != plci))
{
dbug(1, dprintf("[%06lx] %s,%d: Wrong state",
UnMapId(Id), (char *)(FILE_), __LINE__));
return (false);
}
plci_b_write_pos = plci->li_plci_b_write_pos;
if (((plci->li_plci_b_read_pos > plci_b_write_pos) ? plci->li_plci_b_read_pos :
LI_PLCI_B_QUEUE_ENTRIES + plci->li_plci_b_read_pos) - plci_b_write_pos - 1 < 2)
{
dbug(1, dprintf("[%06lx] %s,%d: LI request overrun",
UnMapId(Id), (char *)(FILE_), __LINE__));
return (false);
}
i = (plci_b_write_pos == 0) ? LI_PLCI_B_QUEUE_ENTRIES - 1 : plci_b_write_pos - 1;
if ((plci_b_write_pos == plci->li_plci_b_read_pos)
|| (plci->li_plci_b_queue[i] & LI_PLCI_B_LAST_FLAG))
{
plci->li_plci_b_queue[plci_b_write_pos] = LI_PLCI_B_SKIP_FLAG | LI_PLCI_B_LAST_FLAG;
plci_b_write_pos = (plci_b_write_pos == LI_PLCI_B_QUEUE_ENTRIES - 1) ? 0 : plci_b_write_pos + 1;
}
else
plci->li_plci_b_queue[i] |= LI_PLCI_B_LAST_FLAG;
plci->li_plci_b_write_pos = plci_b_write_pos;
plci->li_channel_bits = li_config_table[a->li_base + (plci->li_bchannel_id - 1)].channel;
plci->command = 0;
plci->li_cmd = GET_WORD(li_parms[0].info);
start_internal_command(Id, plci, mixer_command);
return (false);
default:
dbug(1, dprintf("[%06lx] %s,%d: LI unknown request %04x",
UnMapId(Id), (char *)(FILE_), __LINE__, GET_WORD(li_parms[0].info)));
Info = _FACILITY_NOT_SUPPORTED;
}
}
sendf(appl, _FACILITY_R | CONFIRM, Id & 0xffffL, Number,
"wwS", Info, SELECTOR_LINE_INTERCONNECT, result);
return (false);
}
static void mixer_indication_coefs_set(dword Id, PLCI *plci)
{
dword d;
byte result[12];
dbug(1, dprintf("[%06lx] %s,%d: mixer_indication_coefs_set",
UnMapId(Id), (char *)(FILE_), __LINE__));
if (plci->li_plci_b_read_pos != plci->li_plci_b_req_pos)
{
do
{
d = plci->li_plci_b_queue[plci->li_plci_b_read_pos];
if (!(d & LI_PLCI_B_SKIP_FLAG))
{
if (plci->appl->appl_flags & APPL_FLAG_OLD_LI_SPEC)
{
if (d & LI_PLCI_B_DISC_FLAG)
{
result[0] = 5;
PUT_WORD(&result[1], LI_IND_DISCONNECT);
result[3] = 2;
PUT_WORD(&result[4], _LI_USER_INITIATED);
}
else
{
result[0] = 7;
PUT_WORD(&result[1], LI_IND_CONNECT_ACTIVE);
result[3] = 4;
PUT_DWORD(&result[4], d & ~LI_PLCI_B_FLAG_MASK);
}
}
else
{
if (d & LI_PLCI_B_DISC_FLAG)
{
result[0] = 9;
PUT_WORD(&result[1], LI_IND_DISCONNECT);
result[3] = 6;
PUT_DWORD(&result[4], d & ~LI_PLCI_B_FLAG_MASK);
PUT_WORD(&result[8], _LI_USER_INITIATED);
}
else
{
result[0] = 7;
PUT_WORD(&result[1], LI_IND_CONNECT_ACTIVE);
result[3] = 4;
PUT_DWORD(&result[4], d & ~LI_PLCI_B_FLAG_MASK);
}
}
sendf(plci->appl, _FACILITY_I, Id & 0xffffL, 0,
"ws", SELECTOR_LINE_INTERCONNECT, result);
}
plci->li_plci_b_read_pos = (plci->li_plci_b_read_pos == LI_PLCI_B_QUEUE_ENTRIES - 1) ?
0 : plci->li_plci_b_read_pos + 1;
} while (!(d & LI_PLCI_B_LAST_FLAG) && (plci->li_plci_b_read_pos != plci->li_plci_b_req_pos));
}
}
static void mixer_indication_xconnect_from(dword Id, PLCI *plci, byte *msg, word length)
{
word i, j, ch;
struct xconnect_transfer_address_s s, *p;
DIVA_CAPI_ADAPTER *a;
dbug(1, dprintf("[%06lx] %s,%d: mixer_indication_xconnect_from %d",
UnMapId(Id), (char *)(FILE_), __LINE__, (int)length));
a = plci->adapter;
i = 1;
for (i = 1; i < length; i += 16)
{
s.card_address.low = msg[i] | (msg[i + 1] << 8) | (((dword)(msg[i + 2])) << 16) | (((dword)(msg[i + 3])) << 24);
s.card_address.high = msg[i + 4] | (msg[i + 5] << 8) | (((dword)(msg[i + 6])) << 16) | (((dword)(msg[i + 7])) << 24);
s.offset = msg[i + 8] | (msg[i + 9] << 8) | (((dword)(msg[i + 10])) << 16) | (((dword)(msg[i + 11])) << 24);
ch = msg[i + 12] | (msg[i + 13] << 8);
j = ch & XCONNECT_CHANNEL_NUMBER_MASK;
if (!a->li_pri && (plci->li_bchannel_id == 2))
j = 1 - j;
j += a->li_base;
if (ch & XCONNECT_CHANNEL_PORT_PC)
p = &(li_config_table[j].send_pc);
else
p = &(li_config_table[j].send_b);
p->card_address.low = s.card_address.low;
p->card_address.high = s.card_address.high;
p->offset = s.offset;
li_config_table[j].channel |= LI_CHANNEL_ADDRESSES_SET;
}
if (plci->internal_command_queue[0]
&& ((plci->adjust_b_state == ADJUST_B_RESTORE_MIXER_2)
|| (plci->adjust_b_state == ADJUST_B_RESTORE_MIXER_3)
|| (plci->adjust_b_state == ADJUST_B_RESTORE_MIXER_4)))
{
(*(plci->internal_command_queue[0]))(Id, plci, 0);
if (!plci->internal_command)
next_internal_command(Id, plci);
}
mixer_notify_update(plci, true);
}
static void mixer_indication_xconnect_to(dword Id, PLCI *plci, byte *msg, word length)
{
dbug(1, dprintf("[%06lx] %s,%d: mixer_indication_xconnect_to %d",
UnMapId(Id), (char *)(FILE_), __LINE__, (int) length));
}
static byte mixer_notify_source_removed(PLCI *plci, dword plci_b_id)
{
word plci_b_write_pos;
plci_b_write_pos = plci->li_plci_b_write_pos;
if (((plci->li_plci_b_read_pos > plci_b_write_pos) ? plci->li_plci_b_read_pos :
LI_PLCI_B_QUEUE_ENTRIES + plci->li_plci_b_read_pos) - plci_b_write_pos - 1 < 1)
{
dbug(1, dprintf("[%06lx] %s,%d: LI request overrun",
(dword)((plci->Id << 8) | UnMapController(plci->adapter->Id)),
(char *)(FILE_), __LINE__));
return (false);
}
plci->li_plci_b_queue[plci_b_write_pos] = plci_b_id | LI_PLCI_B_DISC_FLAG;
plci_b_write_pos = (plci_b_write_pos == LI_PLCI_B_QUEUE_ENTRIES - 1) ? 0 : plci_b_write_pos + 1;
plci->li_plci_b_write_pos = plci_b_write_pos;
return (true);
}
static void mixer_remove(PLCI *plci)
{
DIVA_CAPI_ADAPTER *a;
PLCI *notify_plci;
dword plci_b_id;
word i, j;
dbug(1, dprintf("[%06lx] %s,%d: mixer_remove",
(dword)((plci->Id << 8) | UnMapController(plci->adapter->Id)),
(char *)(FILE_), __LINE__));
a = plci->adapter;
plci_b_id = (plci->Id << 8) | UnMapController(plci->adapter->Id);
if (a->profile.Global_Options & GL_LINE_INTERCONNECT_SUPPORTED)
{
if ((plci->li_bchannel_id != 0)
&& (li_config_table[a->li_base + (plci->li_bchannel_id - 1)].plci == plci))
{
i = a->li_base + (plci->li_bchannel_id - 1);
if ((li_config_table[i].curchnl | li_config_table[i].channel) & LI_CHANNEL_INVOLVED)
{
for (j = 0; j < li_total_channels; j++)
{
if ((li_config_table[i].flag_table[j] & LI_FLAG_INTERCONNECT)
|| (li_config_table[j].flag_table[i] & LI_FLAG_INTERCONNECT))
{
notify_plci = li_config_table[j].plci;
if ((notify_plci != NULL)
&& (notify_plci != plci)
&& (notify_plci->appl != NULL)
&& !(notify_plci->appl->appl_flags & APPL_FLAG_OLD_LI_SPEC)
&& (notify_plci->State)
&& notify_plci->NL.Id && !notify_plci->nl_remove_id)
{
mixer_notify_source_removed(notify_plci, plci_b_id);
}
}
}
mixer_clear_config(plci);
mixer_calculate_coefs(a);
mixer_notify_update(plci, true);
}
li_config_table[i].plci = NULL;
plci->li_bchannel_id = 0;
}
}
}
/*------------------------------------------------------------------*/
/* Echo canceller facilities */
/*------------------------------------------------------------------*/
static void ec_write_parameters(PLCI *plci)
{
word w;
byte parameter_buffer[6];
dbug(1, dprintf("[%06lx] %s,%d: ec_write_parameters",
(dword)((plci->Id << 8) | UnMapController(plci->adapter->Id)),
(char *)(FILE_), __LINE__));
parameter_buffer[0] = 5;
parameter_buffer[1] = DSP_CTRL_SET_LEC_PARAMETERS;
PUT_WORD(¶meter_buffer[2], plci->ec_idi_options);
plci->ec_idi_options &= ~LEC_RESET_COEFFICIENTS;
w = (plci->ec_tail_length == 0) ? 128 : plci->ec_tail_length;
PUT_WORD(¶meter_buffer[4], w);
add_p(plci, FTY, parameter_buffer);
sig_req(plci, TEL_CTRL, 0);
send_req(plci);
}
static void ec_clear_config(PLCI *plci)
{
dbug(1, dprintf("[%06lx] %s,%d: ec_clear_config",
(dword)((plci->Id << 8) | UnMapController(plci->adapter->Id)),
(char *)(FILE_), __LINE__));
plci->ec_idi_options = LEC_ENABLE_ECHO_CANCELLER |
LEC_MANUAL_DISABLE | LEC_ENABLE_NONLINEAR_PROCESSING;
plci->ec_tail_length = 0;
}
static void ec_prepare_switch(dword Id, PLCI *plci)
{
dbug(1, dprintf("[%06lx] %s,%d: ec_prepare_switch",
UnMapId(Id), (char *)(FILE_), __LINE__));
}
static word ec_save_config(dword Id, PLCI *plci, byte Rc)
{
dbug(1, dprintf("[%06lx] %s,%d: ec_save_config %02x %d",
UnMapId(Id), (char *)(FILE_), __LINE__, Rc, plci->adjust_b_state));
return (GOOD);
}
static word ec_restore_config(dword Id, PLCI *plci, byte Rc)
{
word Info;
dbug(1, dprintf("[%06lx] %s,%d: ec_restore_config %02x %d",
UnMapId(Id), (char *)(FILE_), __LINE__, Rc, plci->adjust_b_state));
Info = GOOD;
if (plci->B1_facilities & B1_FACILITY_EC)
{
switch (plci->adjust_b_state)
{
case ADJUST_B_RESTORE_EC_1:
plci->internal_command = plci->adjust_b_command;
if (plci->sig_req)
{
plci->adjust_b_state = ADJUST_B_RESTORE_EC_1;
break;
}
ec_write_parameters(plci);
plci->adjust_b_state = ADJUST_B_RESTORE_EC_2;
break;
case ADJUST_B_RESTORE_EC_2:
if ((Rc != OK) && (Rc != OK_FC))
{
dbug(1, dprintf("[%06lx] %s,%d: Restore EC failed %02x",
UnMapId(Id), (char *)(FILE_), __LINE__, Rc));
Info = _WRONG_STATE;
break;
}
break;
}
}
return (Info);
}
static void ec_command(dword Id, PLCI *plci, byte Rc)
{
word internal_command, Info;
byte result[8];
dbug(1, dprintf("[%06lx] %s,%d: ec_command %02x %04x %04x %04x %d",
UnMapId(Id), (char *)(FILE_), __LINE__, Rc, plci->internal_command,
plci->ec_cmd, plci->ec_idi_options, plci->ec_tail_length));
Info = GOOD;
if (plci->appl->appl_flags & APPL_FLAG_PRIV_EC_SPEC)
{
result[0] = 2;
PUT_WORD(&result[1], EC_SUCCESS);
}
else
{
result[0] = 5;
PUT_WORD(&result[1], plci->ec_cmd);
result[3] = 2;
PUT_WORD(&result[4], GOOD);
}
internal_command = plci->internal_command;
plci->internal_command = 0;
switch (plci->ec_cmd)
{
case EC_ENABLE_OPERATION:
case EC_FREEZE_COEFFICIENTS:
case EC_RESUME_COEFFICIENT_UPDATE:
case EC_RESET_COEFFICIENTS:
switch (internal_command)
{
default:
adjust_b1_resource(Id, plci, NULL, (word)(plci->B1_facilities |
B1_FACILITY_EC), EC_COMMAND_1);
case EC_COMMAND_1:
if (adjust_b_process(Id, plci, Rc) != GOOD)
{
dbug(1, dprintf("[%06lx] %s,%d: Load EC failed",
UnMapId(Id), (char *)(FILE_), __LINE__));
Info = _FACILITY_NOT_SUPPORTED;
break;
}
if (plci->internal_command)
return;
case EC_COMMAND_2:
if (plci->sig_req)
{
plci->internal_command = EC_COMMAND_2;
return;
}
plci->internal_command = EC_COMMAND_3;
ec_write_parameters(plci);
return;
case EC_COMMAND_3:
if ((Rc != OK) && (Rc != OK_FC))
{
dbug(1, dprintf("[%06lx] %s,%d: Enable EC failed %02x",
UnMapId(Id), (char *)(FILE_), __LINE__, Rc));
Info = _FACILITY_NOT_SUPPORTED;
break;
}
break;
}
break;
case EC_DISABLE_OPERATION:
switch (internal_command)
{
default:
case EC_COMMAND_1:
if (plci->B1_facilities & B1_FACILITY_EC)
{
if (plci->sig_req)
{
plci->internal_command = EC_COMMAND_1;
return;
}
plci->internal_command = EC_COMMAND_2;
ec_write_parameters(plci);
return;
}
Rc = OK;
case EC_COMMAND_2:
if ((Rc != OK) && (Rc != OK_FC))
{
dbug(1, dprintf("[%06lx] %s,%d: Disable EC failed %02x",
UnMapId(Id), (char *)(FILE_), __LINE__, Rc));
Info = _FACILITY_NOT_SUPPORTED;
break;
}
adjust_b1_resource(Id, plci, NULL, (word)(plci->B1_facilities &
~B1_FACILITY_EC), EC_COMMAND_3);
case EC_COMMAND_3:
if (adjust_b_process(Id, plci, Rc) != GOOD)
{
dbug(1, dprintf("[%06lx] %s,%d: Unload EC failed",
UnMapId(Id), (char *)(FILE_), __LINE__));
Info = _FACILITY_NOT_SUPPORTED;
break;
}
if (plci->internal_command)
return;
break;
}
break;
}
sendf(plci->appl, _FACILITY_R | CONFIRM, Id & 0xffffL, plci->number,
"wws", Info, (plci->appl->appl_flags & APPL_FLAG_PRIV_EC_SPEC) ?
PRIV_SELECTOR_ECHO_CANCELLER : SELECTOR_ECHO_CANCELLER, result);
}
static byte ec_request(dword Id, word Number, DIVA_CAPI_ADAPTER *a, PLCI *plci, APPL *appl, API_PARSE *msg)
{
word Info;
word opt;
API_PARSE ec_parms[3];
byte result[16];
dbug(1, dprintf("[%06lx] %s,%d: ec_request",
UnMapId(Id), (char *)(FILE_), __LINE__));
Info = GOOD;
result[0] = 0;
if (!(a->man_profile.private_options & (1L << PRIVATE_ECHO_CANCELLER)))
{
dbug(1, dprintf("[%06lx] %s,%d: Facility not supported",
UnMapId(Id), (char *)(FILE_), __LINE__));
Info = _FACILITY_NOT_SUPPORTED;
}
else
{
if (appl->appl_flags & APPL_FLAG_PRIV_EC_SPEC)
{
if (api_parse(&msg[1].info[1], msg[1].length, "w", ec_parms))
{
dbug(1, dprintf("[%06lx] %s,%d: Wrong message format",
UnMapId(Id), (char *)(FILE_), __LINE__));
Info = _WRONG_MESSAGE_FORMAT;
}
else
{
if (plci == NULL)
{
dbug(1, dprintf("[%06lx] %s,%d: Wrong PLCI",
UnMapId(Id), (char *)(FILE_), __LINE__));
Info = _WRONG_IDENTIFIER;
}
else if (!plci->State || !plci->NL.Id || plci->nl_remove_id)
{
dbug(1, dprintf("[%06lx] %s,%d: Wrong state",
UnMapId(Id), (char *)(FILE_), __LINE__));
Info = _WRONG_STATE;
}
else
{
plci->command = 0;
plci->ec_cmd = GET_WORD(ec_parms[0].info);
plci->ec_idi_options &= ~(LEC_MANUAL_DISABLE | LEC_RESET_COEFFICIENTS);
result[0] = 2;
PUT_WORD(&result[1], EC_SUCCESS);
if (msg[1].length >= 4)
{
opt = GET_WORD(&ec_parms[0].info[2]);
plci->ec_idi_options &= ~(LEC_ENABLE_NONLINEAR_PROCESSING |
LEC_ENABLE_2100HZ_DETECTOR | LEC_REQUIRE_2100HZ_REVERSALS);
if (!(opt & EC_DISABLE_NON_LINEAR_PROCESSING))
plci->ec_idi_options |= LEC_ENABLE_NONLINEAR_PROCESSING;
if (opt & EC_DETECT_DISABLE_TONE)
plci->ec_idi_options |= LEC_ENABLE_2100HZ_DETECTOR;
if (!(opt & EC_DO_NOT_REQUIRE_REVERSALS))
plci->ec_idi_options |= LEC_REQUIRE_2100HZ_REVERSALS;
if (msg[1].length >= 6)
{
plci->ec_tail_length = GET_WORD(&ec_parms[0].info[4]);
}
}
switch (plci->ec_cmd)
{
case EC_ENABLE_OPERATION:
plci->ec_idi_options &= ~LEC_FREEZE_COEFFICIENTS;
start_internal_command(Id, plci, ec_command);
return (false);
case EC_DISABLE_OPERATION:
plci->ec_idi_options = LEC_ENABLE_ECHO_CANCELLER |
LEC_MANUAL_DISABLE | LEC_ENABLE_NONLINEAR_PROCESSING |
LEC_RESET_COEFFICIENTS;
start_internal_command(Id, plci, ec_command);
return (false);
case EC_FREEZE_COEFFICIENTS:
plci->ec_idi_options |= LEC_FREEZE_COEFFICIENTS;
start_internal_command(Id, plci, ec_command);
return (false);
case EC_RESUME_COEFFICIENT_UPDATE:
plci->ec_idi_options &= ~LEC_FREEZE_COEFFICIENTS;
start_internal_command(Id, plci, ec_command);
return (false);
case EC_RESET_COEFFICIENTS:
plci->ec_idi_options |= LEC_RESET_COEFFICIENTS;
start_internal_command(Id, plci, ec_command);
return (false);
default:
dbug(1, dprintf("[%06lx] %s,%d: EC unknown request %04x",
UnMapId(Id), (char *)(FILE_), __LINE__, plci->ec_cmd));
PUT_WORD(&result[1], EC_UNSUPPORTED_OPERATION);
}
}
}
}
else
{
if (api_parse(&msg[1].info[1], msg[1].length, "ws", ec_parms))
{
dbug(1, dprintf("[%06lx] %s,%d: Wrong message format",
UnMapId(Id), (char *)(FILE_), __LINE__));
Info = _WRONG_MESSAGE_FORMAT;
}
else
{
if (GET_WORD(ec_parms[0].info) == EC_GET_SUPPORTED_SERVICES)
{
result[0] = 11;
PUT_WORD(&result[1], EC_GET_SUPPORTED_SERVICES);
result[3] = 8;
PUT_WORD(&result[4], GOOD);
PUT_WORD(&result[6], 0x0007);
PUT_WORD(&result[8], LEC_MAX_SUPPORTED_TAIL_LENGTH);
PUT_WORD(&result[10], 0);
}
else if (plci == NULL)
{
dbug(1, dprintf("[%06lx] %s,%d: Wrong PLCI",
UnMapId(Id), (char *)(FILE_), __LINE__));
Info = _WRONG_IDENTIFIER;
}
else if (!plci->State || !plci->NL.Id || plci->nl_remove_id)
{
dbug(1, dprintf("[%06lx] %s,%d: Wrong state",
UnMapId(Id), (char *)(FILE_), __LINE__));
Info = _WRONG_STATE;
}
else
{
plci->command = 0;
plci->ec_cmd = GET_WORD(ec_parms[0].info);
plci->ec_idi_options &= ~(LEC_MANUAL_DISABLE | LEC_RESET_COEFFICIENTS);
result[0] = 5;
PUT_WORD(&result[1], plci->ec_cmd);
result[3] = 2;
PUT_WORD(&result[4], GOOD);
plci->ec_idi_options &= ~(LEC_ENABLE_NONLINEAR_PROCESSING |
LEC_ENABLE_2100HZ_DETECTOR | LEC_REQUIRE_2100HZ_REVERSALS);
plci->ec_tail_length = 0;
if (ec_parms[1].length >= 2)
{
opt = GET_WORD(&ec_parms[1].info[1]);
if (opt & EC_ENABLE_NON_LINEAR_PROCESSING)
plci->ec_idi_options |= LEC_ENABLE_NONLINEAR_PROCESSING;
if (opt & EC_DETECT_DISABLE_TONE)
plci->ec_idi_options |= LEC_ENABLE_2100HZ_DETECTOR;
if (!(opt & EC_DO_NOT_REQUIRE_REVERSALS))
plci->ec_idi_options |= LEC_REQUIRE_2100HZ_REVERSALS;
if (ec_parms[1].length >= 4)
{
plci->ec_tail_length = GET_WORD(&ec_parms[1].info[3]);
}
}
switch (plci->ec_cmd)
{
case EC_ENABLE_OPERATION:
plci->ec_idi_options &= ~LEC_FREEZE_COEFFICIENTS;
start_internal_command(Id, plci, ec_command);
return (false);
case EC_DISABLE_OPERATION:
plci->ec_idi_options = LEC_ENABLE_ECHO_CANCELLER |
LEC_MANUAL_DISABLE | LEC_ENABLE_NONLINEAR_PROCESSING |
LEC_RESET_COEFFICIENTS;
start_internal_command(Id, plci, ec_command);
return (false);
default:
dbug(1, dprintf("[%06lx] %s,%d: EC unknown request %04x",
UnMapId(Id), (char *)(FILE_), __LINE__, plci->ec_cmd));
PUT_WORD(&result[4], _FACILITY_SPECIFIC_FUNCTION_NOT_SUPP);
}
}
}
}
}
sendf(appl, _FACILITY_R | CONFIRM, Id & 0xffffL, Number,
"wws", Info, (appl->appl_flags & APPL_FLAG_PRIV_EC_SPEC) ?
PRIV_SELECTOR_ECHO_CANCELLER : SELECTOR_ECHO_CANCELLER, result);
return (false);
}
static void ec_indication(dword Id, PLCI *plci, byte *msg, word length)
{
byte result[8];
dbug(1, dprintf("[%06lx] %s,%d: ec_indication",
UnMapId(Id), (char *)(FILE_), __LINE__));
if (!(plci->ec_idi_options & LEC_MANUAL_DISABLE))
{
if (plci->appl->appl_flags & APPL_FLAG_PRIV_EC_SPEC)
{
result[0] = 2;
PUT_WORD(&result[1], 0);
switch (msg[1])
{
case LEC_DISABLE_TYPE_CONTIGNUOUS_2100HZ:
PUT_WORD(&result[1], EC_BYPASS_DUE_TO_CONTINUOUS_2100HZ);
break;
case LEC_DISABLE_TYPE_REVERSED_2100HZ:
PUT_WORD(&result[1], EC_BYPASS_DUE_TO_REVERSED_2100HZ);
break;
case LEC_DISABLE_RELEASED:
PUT_WORD(&result[1], EC_BYPASS_RELEASED);
break;
}
}
else
{
result[0] = 5;
PUT_WORD(&result[1], EC_BYPASS_INDICATION);
result[3] = 2;
PUT_WORD(&result[4], 0);
switch (msg[1])
{
case LEC_DISABLE_TYPE_CONTIGNUOUS_2100HZ:
PUT_WORD(&result[4], EC_BYPASS_DUE_TO_CONTINUOUS_2100HZ);
break;
case LEC_DISABLE_TYPE_REVERSED_2100HZ:
PUT_WORD(&result[4], EC_BYPASS_DUE_TO_REVERSED_2100HZ);
break;
case LEC_DISABLE_RELEASED:
PUT_WORD(&result[4], EC_BYPASS_RELEASED);
break;
}
}
sendf(plci->appl, _FACILITY_I, Id & 0xffffL, 0, "ws", (plci->appl->appl_flags & APPL_FLAG_PRIV_EC_SPEC) ?
PRIV_SELECTOR_ECHO_CANCELLER : SELECTOR_ECHO_CANCELLER, result);
}
}
/*------------------------------------------------------------------*/
/* Advanced voice */
/*------------------------------------------------------------------*/
static void adv_voice_write_coefs(PLCI *plci, word write_command)
{
DIVA_CAPI_ADAPTER *a;
word i;
byte *p;
word w, n, j, k;
byte ch_map[MIXER_CHANNELS_BRI];
byte coef_buffer[ADV_VOICE_COEF_BUFFER_SIZE + 2];
dbug(1, dprintf("[%06lx] %s,%d: adv_voice_write_coefs %d",
(dword)((plci->Id << 8) | UnMapController(plci->adapter->Id)),
(char *)(FILE_), __LINE__, write_command));
a = plci->adapter;
p = coef_buffer + 1;
*(p++) = DSP_CTRL_OLD_SET_MIXER_COEFFICIENTS;
i = 0;
while (i + sizeof(word) <= a->adv_voice_coef_length)
{
PUT_WORD(p, GET_WORD(a->adv_voice_coef_buffer + i));
p += 2;
i += 2;
}
while (i < ADV_VOICE_OLD_COEF_COUNT * sizeof(word))
{
PUT_WORD(p, 0x8000);
p += 2;
i += 2;
}
if (!a->li_pri && (plci->li_bchannel_id == 0))
{
if ((li_config_table[a->li_base].plci == NULL) && (li_config_table[a->li_base + 1].plci != NULL))
{
plci->li_bchannel_id = 1;
li_config_table[a->li_base].plci = plci;
dbug(1, dprintf("[%06lx] %s,%d: adv_voice_set_bchannel_id %d",
(dword)((plci->Id << 8) | UnMapController(plci->adapter->Id)),
(char *)(FILE_), __LINE__, plci->li_bchannel_id));
}
else if ((li_config_table[a->li_base].plci != NULL) && (li_config_table[a->li_base + 1].plci == NULL))
{
plci->li_bchannel_id = 2;
li_config_table[a->li_base + 1].plci = plci;
dbug(1, dprintf("[%06lx] %s,%d: adv_voice_set_bchannel_id %d",
(dword)((plci->Id << 8) | UnMapController(plci->adapter->Id)),
(char *)(FILE_), __LINE__, plci->li_bchannel_id));
}
}
if (!a->li_pri && (plci->li_bchannel_id != 0)
&& (li_config_table[a->li_base + (plci->li_bchannel_id - 1)].plci == plci))
{
i = a->li_base + (plci->li_bchannel_id - 1);
switch (write_command)
{
case ADV_VOICE_WRITE_ACTIVATION:
j = a->li_base + MIXER_IC_CHANNEL_BASE + (plci->li_bchannel_id - 1);
k = a->li_base + MIXER_IC_CHANNEL_BASE + (2 - plci->li_bchannel_id);
if (!(plci->B1_facilities & B1_FACILITY_MIXER))
{
li_config_table[j].flag_table[i] |= LI_FLAG_CONFERENCE | LI_FLAG_MIX;
li_config_table[i].flag_table[j] |= LI_FLAG_CONFERENCE | LI_FLAG_MONITOR;
}
if (a->manufacturer_features & MANUFACTURER_FEATURE_SLAVE_CODEC)
{
li_config_table[k].flag_table[i] |= LI_FLAG_CONFERENCE | LI_FLAG_MIX;
li_config_table[i].flag_table[k] |= LI_FLAG_CONFERENCE | LI_FLAG_MONITOR;
li_config_table[k].flag_table[j] |= LI_FLAG_CONFERENCE;
li_config_table[j].flag_table[k] |= LI_FLAG_CONFERENCE;
}
mixer_calculate_coefs(a);
li_config_table[i].curchnl = li_config_table[i].channel;
li_config_table[j].curchnl = li_config_table[j].channel;
if (a->manufacturer_features & MANUFACTURER_FEATURE_SLAVE_CODEC)
li_config_table[k].curchnl = li_config_table[k].channel;
break;
case ADV_VOICE_WRITE_DEACTIVATION:
for (j = 0; j < li_total_channels; j++)
{
li_config_table[i].flag_table[j] = 0;
li_config_table[j].flag_table[i] = 0;
}
k = a->li_base + MIXER_IC_CHANNEL_BASE + (plci->li_bchannel_id - 1);
for (j = 0; j < li_total_channels; j++)
{
li_config_table[k].flag_table[j] = 0;
li_config_table[j].flag_table[k] = 0;
}
if (a->manufacturer_features & MANUFACTURER_FEATURE_SLAVE_CODEC)
{
k = a->li_base + MIXER_IC_CHANNEL_BASE + (2 - plci->li_bchannel_id);
for (j = 0; j < li_total_channels; j++)
{
li_config_table[k].flag_table[j] = 0;
li_config_table[j].flag_table[k] = 0;
}
}
mixer_calculate_coefs(a);
break;
}
if (plci->B1_facilities & B1_FACILITY_MIXER)
{
w = 0;
if (ADV_VOICE_NEW_COEF_BASE + sizeof(word) <= a->adv_voice_coef_length)
w = GET_WORD(a->adv_voice_coef_buffer + ADV_VOICE_NEW_COEF_BASE);
if (li_config_table[i].channel & LI_CHANNEL_TX_DATA)
w |= MIXER_FEATURE_ENABLE_TX_DATA;
if (li_config_table[i].channel & LI_CHANNEL_RX_DATA)
w |= MIXER_FEATURE_ENABLE_RX_DATA;
*(p++) = (byte) w;
*(p++) = (byte)(w >> 8);
for (j = 0; j < sizeof(ch_map); j += 2)
{
ch_map[j] = (byte)(j + (plci->li_bchannel_id - 1));
ch_map[j + 1] = (byte)(j + (2 - plci->li_bchannel_id));
}
for (n = 0; n < ARRAY_SIZE(mixer_write_prog_bri); n++)
{
i = a->li_base + ch_map[mixer_write_prog_bri[n].to_ch];
j = a->li_base + ch_map[mixer_write_prog_bri[n].from_ch];
if (li_config_table[i].channel & li_config_table[j].channel & LI_CHANNEL_INVOLVED)
{
*(p++) = ((li_config_table[i].coef_table[j] & mixer_write_prog_bri[n].mask) ? 0x80 : 0x01);
w = ((li_config_table[i].coef_table[j] & 0xf) ^ (li_config_table[i].coef_table[j] >> 4));
li_config_table[i].coef_table[j] ^= (w & mixer_write_prog_bri[n].mask) << 4;
}
else
{
*(p++) = (ADV_VOICE_NEW_COEF_BASE + sizeof(word) + n < a->adv_voice_coef_length) ?
a->adv_voice_coef_buffer[ADV_VOICE_NEW_COEF_BASE + sizeof(word) + n] : 0x00;
}
}
}
else
{
for (i = ADV_VOICE_NEW_COEF_BASE; i < a->adv_voice_coef_length; i++)
*(p++) = a->adv_voice_coef_buffer[i];
}
}
else
{
for (i = ADV_VOICE_NEW_COEF_BASE; i < a->adv_voice_coef_length; i++)
*(p++) = a->adv_voice_coef_buffer[i];
}
coef_buffer[0] = (p - coef_buffer) - 1;
add_p(plci, FTY, coef_buffer);
sig_req(plci, TEL_CTRL, 0);
send_req(plci);
}
static void adv_voice_clear_config(PLCI *plci)
{
DIVA_CAPI_ADAPTER *a;
word i, j;
dbug(1, dprintf("[%06lx] %s,%d: adv_voice_clear_config",
(dword)((plci->Id << 8) | UnMapController(plci->adapter->Id)),
(char *)(FILE_), __LINE__));
a = plci->adapter;
if ((plci->tel == ADV_VOICE) && (plci == a->AdvSignalPLCI))
{
a->adv_voice_coef_length = 0;
if (!a->li_pri && (plci->li_bchannel_id != 0)
&& (li_config_table[a->li_base + (plci->li_bchannel_id - 1)].plci == plci))
{
i = a->li_base + (plci->li_bchannel_id - 1);
li_config_table[i].curchnl = 0;
li_config_table[i].channel = 0;
li_config_table[i].chflags = 0;
for (j = 0; j < li_total_channels; j++)
{
li_config_table[i].flag_table[j] = 0;
li_config_table[j].flag_table[i] = 0;
li_config_table[i].coef_table[j] = 0;
li_config_table[j].coef_table[i] = 0;
}
li_config_table[i].coef_table[i] |= LI_COEF_CH_PC_SET | LI_COEF_PC_CH_SET;
i = a->li_base + MIXER_IC_CHANNEL_BASE + (plci->li_bchannel_id - 1);
li_config_table[i].curchnl = 0;
li_config_table[i].channel = 0;
li_config_table[i].chflags = 0;
for (j = 0; j < li_total_channels; j++)
{
li_config_table[i].flag_table[j] = 0;
li_config_table[j].flag_table[i] = 0;
li_config_table[i].coef_table[j] = 0;
li_config_table[j].coef_table[i] = 0;
}
if (a->manufacturer_features & MANUFACTURER_FEATURE_SLAVE_CODEC)
{
i = a->li_base + MIXER_IC_CHANNEL_BASE + (2 - plci->li_bchannel_id);
li_config_table[i].curchnl = 0;
li_config_table[i].channel = 0;
li_config_table[i].chflags = 0;
for (j = 0; j < li_total_channels; j++)
{
li_config_table[i].flag_table[j] = 0;
li_config_table[j].flag_table[i] = 0;
li_config_table[i].coef_table[j] = 0;
li_config_table[j].coef_table[i] = 0;
}
}
}
}
}
static void adv_voice_prepare_switch(dword Id, PLCI *plci)
{
dbug(1, dprintf("[%06lx] %s,%d: adv_voice_prepare_switch",
UnMapId(Id), (char *)(FILE_), __LINE__));
}
static word adv_voice_save_config(dword Id, PLCI *plci, byte Rc)
{
dbug(1, dprintf("[%06lx] %s,%d: adv_voice_save_config %02x %d",
UnMapId(Id), (char *)(FILE_), __LINE__, Rc, plci->adjust_b_state));
return (GOOD);
}
static word adv_voice_restore_config(dword Id, PLCI *plci, byte Rc)
{
DIVA_CAPI_ADAPTER *a;
word Info;
dbug(1, dprintf("[%06lx] %s,%d: adv_voice_restore_config %02x %d",
UnMapId(Id), (char *)(FILE_), __LINE__, Rc, plci->adjust_b_state));
Info = GOOD;
a = plci->adapter;
if ((plci->B1_facilities & B1_FACILITY_VOICE)
&& (plci->tel == ADV_VOICE) && (plci == a->AdvSignalPLCI))
{
switch (plci->adjust_b_state)
{
case ADJUST_B_RESTORE_VOICE_1:
plci->internal_command = plci->adjust_b_command;
if (plci->sig_req)
{
plci->adjust_b_state = ADJUST_B_RESTORE_VOICE_1;
break;
}
adv_voice_write_coefs(plci, ADV_VOICE_WRITE_UPDATE);
plci->adjust_b_state = ADJUST_B_RESTORE_VOICE_2;
break;
case ADJUST_B_RESTORE_VOICE_2:
if ((Rc != OK) && (Rc != OK_FC))
{
dbug(1, dprintf("[%06lx] %s,%d: Restore voice config failed %02x",
UnMapId(Id), (char *)(FILE_), __LINE__, Rc));
Info = _WRONG_STATE;
break;
}
break;
}
}
return (Info);
}
/*------------------------------------------------------------------*/
/* B1 resource switching */
/*------------------------------------------------------------------*/
static byte b1_facilities_table[] =
{
0x00, /* 0 No bchannel resources */
0x00, /* 1 Codec (automatic law) */
0x00, /* 2 Codec (A-law) */
0x00, /* 3 Codec (y-law) */
0x00, /* 4 HDLC for X.21 */
0x00, /* 5 HDLC */
0x00, /* 6 External Device 0 */
0x00, /* 7 External Device 1 */
0x00, /* 8 HDLC 56k */
0x00, /* 9 Transparent */
0x00, /* 10 Loopback to network */
0x00, /* 11 Test pattern to net */
0x00, /* 12 Rate adaptation sync */
0x00, /* 13 Rate adaptation async */
0x00, /* 14 R-Interface */
0x00, /* 15 HDLC 128k leased line */
0x00, /* 16 FAX */
0x00, /* 17 Modem async */
0x00, /* 18 Modem sync HDLC */
0x00, /* 19 V.110 async HDLC */
0x12, /* 20 Adv voice (Trans,mixer) */
0x00, /* 21 Codec connected to IC */
0x0c, /* 22 Trans,DTMF */
0x1e, /* 23 Trans,DTMF+mixer */
0x1f, /* 24 Trans,DTMF+mixer+local */
0x13, /* 25 Trans,mixer+local */
0x12, /* 26 HDLC,mixer */
0x12, /* 27 HDLC 56k,mixer */
0x2c, /* 28 Trans,LEC+DTMF */
0x3e, /* 29 Trans,LEC+DTMF+mixer */
0x3f, /* 30 Trans,LEC+DTMF+mixer+local */
0x2c, /* 31 RTP,LEC+DTMF */
0x3e, /* 32 RTP,LEC+DTMF+mixer */
0x3f, /* 33 RTP,LEC+DTMF+mixer+local */
0x00, /* 34 Signaling task */
0x00, /* 35 PIAFS */
0x0c, /* 36 Trans,DTMF+TONE */
0x1e, /* 37 Trans,DTMF+TONE+mixer */
0x1f /* 38 Trans,DTMF+TONE+mixer+local*/
};
static word get_b1_facilities(PLCI *plci, byte b1_resource)
{
word b1_facilities;
b1_facilities = b1_facilities_table[b1_resource];
if ((b1_resource == 9) || (b1_resource == 20) || (b1_resource == 25))
{
if (!(((plci->requested_options_conn | plci->requested_options) & (1L << PRIVATE_DTMF_TONE))
|| (plci->appl && (plci->adapter->requested_options_table[plci->appl->Id - 1] & (1L << PRIVATE_DTMF_TONE)))))
{
if (plci->adapter->manufacturer_features & MANUFACTURER_FEATURE_SOFTDTMF_SEND)
b1_facilities |= B1_FACILITY_DTMFX;
if (plci->adapter->manufacturer_features & MANUFACTURER_FEATURE_SOFTDTMF_RECEIVE)
b1_facilities |= B1_FACILITY_DTMFR;
}
}
if ((b1_resource == 17) || (b1_resource == 18))
{
if (plci->adapter->manufacturer_features & (MANUFACTURER_FEATURE_V18 | MANUFACTURER_FEATURE_VOWN))
b1_facilities |= B1_FACILITY_DTMFX | B1_FACILITY_DTMFR;
}
/*
dbug (1, dprintf("[%06lx] %s,%d: get_b1_facilities %d %04x",
(dword)((plci->Id << 8) | UnMapController(plci->adapter->Id)),
(char far *)(FILE_), __LINE__, b1_resource, b1_facilites));
*/
return (b1_facilities);
}
static byte add_b1_facilities(PLCI *plci, byte b1_resource, word b1_facilities)
{
byte b;
switch (b1_resource)
{
case 5:
case 26:
if (b1_facilities & (B1_FACILITY_MIXER | B1_FACILITY_VOICE))
b = 26;
else
b = 5;
break;
case 8:
case 27:
if (b1_facilities & (B1_FACILITY_MIXER | B1_FACILITY_VOICE))
b = 27;
else
b = 8;
break;
case 9:
case 20:
case 22:
case 23:
case 24:
case 25:
case 28:
case 29:
case 30:
case 36:
case 37:
case 38:
if (b1_facilities & B1_FACILITY_EC)
{
if (b1_facilities & B1_FACILITY_LOCAL)
b = 30;
else if (b1_facilities & (B1_FACILITY_MIXER | B1_FACILITY_VOICE))
b = 29;
else
b = 28;
}
else if ((b1_facilities & (B1_FACILITY_DTMFX | B1_FACILITY_DTMFR | B1_FACILITY_MIXER))
&& (((plci->requested_options_conn | plci->requested_options) & (1L << PRIVATE_DTMF_TONE))
|| (plci->appl && (plci->adapter->requested_options_table[plci->appl->Id - 1] & (1L << PRIVATE_DTMF_TONE)))))
{
if (b1_facilities & B1_FACILITY_LOCAL)
b = 38;
else if (b1_facilities & (B1_FACILITY_MIXER | B1_FACILITY_VOICE))
b = 37;
else
b = 36;
}
else if (((plci->adapter->manufacturer_features & MANUFACTURER_FEATURE_HARDDTMF)
&& !(plci->adapter->manufacturer_features & MANUFACTURER_FEATURE_SOFTDTMF_RECEIVE))
|| ((b1_facilities & B1_FACILITY_DTMFR)
&& ((b1_facilities & B1_FACILITY_MIXER)
|| !(plci->adapter->manufacturer_features & MANUFACTURER_FEATURE_SOFTDTMF_RECEIVE)))
|| ((b1_facilities & B1_FACILITY_DTMFX)
&& ((b1_facilities & B1_FACILITY_MIXER)
|| !(plci->adapter->manufacturer_features & MANUFACTURER_FEATURE_SOFTDTMF_SEND))))
{
if (b1_facilities & B1_FACILITY_LOCAL)
b = 24;
else if (b1_facilities & (B1_FACILITY_MIXER | B1_FACILITY_VOICE))
b = 23;
else
b = 22;
}
else
{
if (b1_facilities & B1_FACILITY_LOCAL)
b = 25;
else if (b1_facilities & (B1_FACILITY_MIXER | B1_FACILITY_VOICE))
b = 20;
else
b = 9;
}
break;
case 31:
case 32:
case 33:
if (b1_facilities & B1_FACILITY_LOCAL)
b = 33;
else if (b1_facilities & (B1_FACILITY_MIXER | B1_FACILITY_VOICE))
b = 32;
else
b = 31;
break;
default:
b = b1_resource;
}
dbug(1, dprintf("[%06lx] %s,%d: add_b1_facilities %d %04x %d %04x",
(dword)((plci->Id << 8) | UnMapController(plci->adapter->Id)),
(char *)(FILE_), __LINE__,
b1_resource, b1_facilities, b, get_b1_facilities(plci, b)));
return (b);
}
static void adjust_b1_facilities(PLCI *plci, byte new_b1_resource, word new_b1_facilities)
{
word removed_facilities;
dbug(1, dprintf("[%06lx] %s,%d: adjust_b1_facilities %d %04x %04x",
(dword)((plci->Id << 8) | UnMapController(plci->adapter->Id)),
(char *)(FILE_), __LINE__, new_b1_resource, new_b1_facilities,
new_b1_facilities & get_b1_facilities(plci, new_b1_resource)));
new_b1_facilities &= get_b1_facilities(plci, new_b1_resource);
removed_facilities = plci->B1_facilities & ~new_b1_facilities;
if (removed_facilities & B1_FACILITY_EC)
ec_clear_config(plci);
if (removed_facilities & B1_FACILITY_DTMFR)
{
dtmf_rec_clear_config(plci);
dtmf_parameter_clear_config(plci);
}
if (removed_facilities & B1_FACILITY_DTMFX)
dtmf_send_clear_config(plci);
if (removed_facilities & B1_FACILITY_MIXER)
mixer_clear_config(plci);
if (removed_facilities & B1_FACILITY_VOICE)
adv_voice_clear_config(plci);
plci->B1_facilities = new_b1_facilities;
}
static void adjust_b_clear(PLCI *plci)
{
dbug(1, dprintf("[%06lx] %s,%d: adjust_b_clear",
(dword)((plci->Id << 8) | UnMapController(plci->adapter->Id)),
(char *)(FILE_), __LINE__));
plci->adjust_b_restore = false;
}
static word adjust_b_process(dword Id, PLCI *plci, byte Rc)
{
word Info;
byte b1_resource;
NCCI *ncci_ptr;
API_PARSE bp[2];
dbug(1, dprintf("[%06lx] %s,%d: adjust_b_process %02x %d",
UnMapId(Id), (char *)(FILE_), __LINE__, Rc, plci->adjust_b_state));
Info = GOOD;
switch (plci->adjust_b_state)
{
case ADJUST_B_START:
if ((plci->adjust_b_parms_msg == NULL)
&& (plci->adjust_b_mode & ADJUST_B_MODE_SWITCH_L1)
&& ((plci->adjust_b_mode & ~(ADJUST_B_MODE_SAVE | ADJUST_B_MODE_SWITCH_L1 |
ADJUST_B_MODE_NO_RESOURCE | ADJUST_B_MODE_RESTORE)) == 0))
{
b1_resource = (plci->adjust_b_mode == ADJUST_B_MODE_NO_RESOURCE) ?
0 : add_b1_facilities(plci, plci->B1_resource, plci->adjust_b_facilities);
if (b1_resource == plci->B1_resource)
{
adjust_b1_facilities(plci, b1_resource, plci->adjust_b_facilities);
break;
}
if (plci->adjust_b_facilities & ~get_b1_facilities(plci, b1_resource))
{
dbug(1, dprintf("[%06lx] %s,%d: Adjust B nonsupported facilities %d %d %04x",
UnMapId(Id), (char *)(FILE_), __LINE__,
plci->B1_resource, b1_resource, plci->adjust_b_facilities));
Info = _WRONG_STATE;
break;
}
}
if (plci->adjust_b_mode & ADJUST_B_MODE_SAVE)
{
mixer_prepare_switch(Id, plci);
dtmf_prepare_switch(Id, plci);
dtmf_parameter_prepare_switch(Id, plci);
ec_prepare_switch(Id, plci);
adv_voice_prepare_switch(Id, plci);
}
plci->adjust_b_state = ADJUST_B_SAVE_MIXER_1;
Rc = OK;
case ADJUST_B_SAVE_MIXER_1:
if (plci->adjust_b_mode & ADJUST_B_MODE_SAVE)
{
Info = mixer_save_config(Id, plci, Rc);
if ((Info != GOOD) || plci->internal_command)
break;
}
plci->adjust_b_state = ADJUST_B_SAVE_DTMF_1;
Rc = OK;
case ADJUST_B_SAVE_DTMF_1:
if (plci->adjust_b_mode & ADJUST_B_MODE_SAVE)
{
Info = dtmf_save_config(Id, plci, Rc);
if ((Info != GOOD) || plci->internal_command)
break;
}
plci->adjust_b_state = ADJUST_B_REMOVE_L23_1;
case ADJUST_B_REMOVE_L23_1:
if ((plci->adjust_b_mode & ADJUST_B_MODE_REMOVE_L23)
&& plci->NL.Id && !plci->nl_remove_id)
{
plci->internal_command = plci->adjust_b_command;
if (plci->adjust_b_ncci != 0)
{
ncci_ptr = &(plci->adapter->ncci[plci->adjust_b_ncci]);
while (ncci_ptr->data_pending)
{
plci->data_sent_ptr = ncci_ptr->DBuffer[ncci_ptr->data_out].P;
data_rc(plci, plci->adapter->ncci_ch[plci->adjust_b_ncci]);
}
while (ncci_ptr->data_ack_pending)
data_ack(plci, plci->adapter->ncci_ch[plci->adjust_b_ncci]);
}
nl_req_ncci(plci, REMOVE,
(byte)((plci->adjust_b_mode & ADJUST_B_MODE_CONNECT) ? plci->adjust_b_ncci : 0));
send_req(plci);
plci->adjust_b_state = ADJUST_B_REMOVE_L23_2;
break;
}
plci->adjust_b_state = ADJUST_B_REMOVE_L23_2;
Rc = OK;
case ADJUST_B_REMOVE_L23_2:
if ((Rc != OK) && (Rc != OK_FC))
{
dbug(1, dprintf("[%06lx] %s,%d: Adjust B remove failed %02x",
UnMapId(Id), (char *)(FILE_), __LINE__, Rc));
Info = _WRONG_STATE;
break;
}
if (plci->adjust_b_mode & ADJUST_B_MODE_REMOVE_L23)
{
if (plci_nl_busy(plci))
{
plci->internal_command = plci->adjust_b_command;
break;
}
}
plci->adjust_b_state = ADJUST_B_SAVE_EC_1;
Rc = OK;
case ADJUST_B_SAVE_EC_1:
if (plci->adjust_b_mode & ADJUST_B_MODE_SAVE)
{
Info = ec_save_config(Id, plci, Rc);
if ((Info != GOOD) || plci->internal_command)
break;
}
plci->adjust_b_state = ADJUST_B_SAVE_DTMF_PARAMETER_1;
Rc = OK;
case ADJUST_B_SAVE_DTMF_PARAMETER_1:
if (plci->adjust_b_mode & ADJUST_B_MODE_SAVE)
{
Info = dtmf_parameter_save_config(Id, plci, Rc);
if ((Info != GOOD) || plci->internal_command)
break;
}
plci->adjust_b_state = ADJUST_B_SAVE_VOICE_1;
Rc = OK;
case ADJUST_B_SAVE_VOICE_1:
if (plci->adjust_b_mode & ADJUST_B_MODE_SAVE)
{
Info = adv_voice_save_config(Id, plci, Rc);
if ((Info != GOOD) || plci->internal_command)
break;
}
plci->adjust_b_state = ADJUST_B_SWITCH_L1_1;
case ADJUST_B_SWITCH_L1_1:
if (plci->adjust_b_mode & ADJUST_B_MODE_SWITCH_L1)
{
if (plci->sig_req)
{
plci->internal_command = plci->adjust_b_command;
break;
}
if (plci->adjust_b_parms_msg != NULL)
api_load_msg(plci->adjust_b_parms_msg, bp);
else
api_load_msg(&plci->B_protocol, bp);
Info = add_b1(plci, bp,
(word)((plci->adjust_b_mode & ADJUST_B_MODE_NO_RESOURCE) ? 2 : 0),
plci->adjust_b_facilities);
if (Info != GOOD)
{
dbug(1, dprintf("[%06lx] %s,%d: Adjust B invalid L1 parameters %d %04x",
UnMapId(Id), (char *)(FILE_), __LINE__,
plci->B1_resource, plci->adjust_b_facilities));
break;
}
plci->internal_command = plci->adjust_b_command;
sig_req(plci, RESOURCES, 0);
send_req(plci);
plci->adjust_b_state = ADJUST_B_SWITCH_L1_2;
break;
}
plci->adjust_b_state = ADJUST_B_SWITCH_L1_2;
Rc = OK;
case ADJUST_B_SWITCH_L1_2:
if ((Rc != OK) && (Rc != OK_FC))
{
dbug(1, dprintf("[%06lx] %s,%d: Adjust B switch failed %02x %d %04x",
UnMapId(Id), (char *)(FILE_), __LINE__,
Rc, plci->B1_resource, plci->adjust_b_facilities));
Info = _WRONG_STATE;
break;
}
plci->adjust_b_state = ADJUST_B_RESTORE_VOICE_1;
Rc = OK;
case ADJUST_B_RESTORE_VOICE_1:
case ADJUST_B_RESTORE_VOICE_2:
if (plci->adjust_b_mode & ADJUST_B_MODE_RESTORE)
{
Info = adv_voice_restore_config(Id, plci, Rc);
if ((Info != GOOD) || plci->internal_command)
break;
}
plci->adjust_b_state = ADJUST_B_RESTORE_DTMF_PARAMETER_1;
Rc = OK;
case ADJUST_B_RESTORE_DTMF_PARAMETER_1:
case ADJUST_B_RESTORE_DTMF_PARAMETER_2:
if (plci->adjust_b_mode & ADJUST_B_MODE_RESTORE)
{
Info = dtmf_parameter_restore_config(Id, plci, Rc);
if ((Info != GOOD) || plci->internal_command)
break;
}
plci->adjust_b_state = ADJUST_B_RESTORE_EC_1;
Rc = OK;
case ADJUST_B_RESTORE_EC_1:
case ADJUST_B_RESTORE_EC_2:
if (plci->adjust_b_mode & ADJUST_B_MODE_RESTORE)
{
Info = ec_restore_config(Id, plci, Rc);
if ((Info != GOOD) || plci->internal_command)
break;
}
plci->adjust_b_state = ADJUST_B_ASSIGN_L23_1;
case ADJUST_B_ASSIGN_L23_1:
if (plci->adjust_b_mode & ADJUST_B_MODE_ASSIGN_L23)
{
if (plci_nl_busy(plci))
{
plci->internal_command = plci->adjust_b_command;
break;
}
if (plci->adjust_b_mode & ADJUST_B_MODE_CONNECT)
plci->call_dir |= CALL_DIR_FORCE_OUTG_NL;
if (plci->adjust_b_parms_msg != NULL)
api_load_msg(plci->adjust_b_parms_msg, bp);
else
api_load_msg(&plci->B_protocol, bp);
Info = add_b23(plci, bp);
if (Info != GOOD)
{
dbug(1, dprintf("[%06lx] %s,%d: Adjust B invalid L23 parameters %04x",
UnMapId(Id), (char *)(FILE_), __LINE__, Info));
break;
}
plci->internal_command = plci->adjust_b_command;
nl_req_ncci(plci, ASSIGN, 0);
send_req(plci);
plci->adjust_b_state = ADJUST_B_ASSIGN_L23_2;
break;
}
plci->adjust_b_state = ADJUST_B_ASSIGN_L23_2;
Rc = ASSIGN_OK;
case ADJUST_B_ASSIGN_L23_2:
if ((Rc != OK) && (Rc != OK_FC) && (Rc != ASSIGN_OK))
{
dbug(1, dprintf("[%06lx] %s,%d: Adjust B assign failed %02x",
UnMapId(Id), (char *)(FILE_), __LINE__, Rc));
Info = _WRONG_STATE;
break;
}
if (plci->adjust_b_mode & ADJUST_B_MODE_ASSIGN_L23)
{
if (Rc != ASSIGN_OK)
{
plci->internal_command = plci->adjust_b_command;
break;
}
}
if (plci->adjust_b_mode & ADJUST_B_MODE_USER_CONNECT)
{
plci->adjust_b_restore = true;
break;
}
plci->adjust_b_state = ADJUST_B_CONNECT_1;
case ADJUST_B_CONNECT_1:
if (plci->adjust_b_mode & ADJUST_B_MODE_CONNECT)
{
plci->internal_command = plci->adjust_b_command;
if (plci_nl_busy(plci))
break;
nl_req_ncci(plci, N_CONNECT, 0);
send_req(plci);
plci->adjust_b_state = ADJUST_B_CONNECT_2;
break;
}
plci->adjust_b_state = ADJUST_B_RESTORE_DTMF_1;
Rc = OK;
case ADJUST_B_CONNECT_2:
case ADJUST_B_CONNECT_3:
case ADJUST_B_CONNECT_4:
if ((Rc != OK) && (Rc != OK_FC) && (Rc != 0))
{
dbug(1, dprintf("[%06lx] %s,%d: Adjust B connect failed %02x",
UnMapId(Id), (char *)(FILE_), __LINE__, Rc));
Info = _WRONG_STATE;
break;
}
if (Rc == OK)
{
if (plci->adjust_b_mode & ADJUST_B_MODE_CONNECT)
{
get_ncci(plci, (byte)(Id >> 16), plci->adjust_b_ncci);
Id = (Id & 0xffff) | (((dword)(plci->adjust_b_ncci)) << 16);
}
if (plci->adjust_b_state == ADJUST_B_CONNECT_2)
plci->adjust_b_state = ADJUST_B_CONNECT_3;
else if (plci->adjust_b_state == ADJUST_B_CONNECT_4)
plci->adjust_b_state = ADJUST_B_RESTORE_DTMF_1;
}
else if (Rc == 0)
{
if (plci->adjust_b_state == ADJUST_B_CONNECT_2)
plci->adjust_b_state = ADJUST_B_CONNECT_4;
else if (plci->adjust_b_state == ADJUST_B_CONNECT_3)
plci->adjust_b_state = ADJUST_B_RESTORE_DTMF_1;
}
if (plci->adjust_b_state != ADJUST_B_RESTORE_DTMF_1)
{
plci->internal_command = plci->adjust_b_command;
break;
}
Rc = OK;
case ADJUST_B_RESTORE_DTMF_1:
case ADJUST_B_RESTORE_DTMF_2:
if (plci->adjust_b_mode & ADJUST_B_MODE_RESTORE)
{
Info = dtmf_restore_config(Id, plci, Rc);
if ((Info != GOOD) || plci->internal_command)
break;
}
plci->adjust_b_state = ADJUST_B_RESTORE_MIXER_1;
Rc = OK;
case ADJUST_B_RESTORE_MIXER_1:
case ADJUST_B_RESTORE_MIXER_2:
case ADJUST_B_RESTORE_MIXER_3:
case ADJUST_B_RESTORE_MIXER_4:
case ADJUST_B_RESTORE_MIXER_5:
case ADJUST_B_RESTORE_MIXER_6:
case ADJUST_B_RESTORE_MIXER_7:
if (plci->adjust_b_mode & ADJUST_B_MODE_RESTORE)
{
Info = mixer_restore_config(Id, plci, Rc);
if ((Info != GOOD) || plci->internal_command)
break;
}
plci->adjust_b_state = ADJUST_B_END;
case ADJUST_B_END:
break;
}
return (Info);
}
static void adjust_b1_resource(dword Id, PLCI *plci, API_SAVE *bp_msg, word b1_facilities, word internal_command)
{
dbug(1, dprintf("[%06lx] %s,%d: adjust_b1_resource %d %04x",
UnMapId(Id), (char *)(FILE_), __LINE__,
plci->B1_resource, b1_facilities));
plci->adjust_b_parms_msg = bp_msg;
plci->adjust_b_facilities = b1_facilities;
plci->adjust_b_command = internal_command;
plci->adjust_b_ncci = (word)(Id >> 16);
if ((bp_msg == NULL) && (plci->B1_resource == 0))
plci->adjust_b_mode = ADJUST_B_MODE_SAVE | ADJUST_B_MODE_NO_RESOURCE | ADJUST_B_MODE_SWITCH_L1;
else
plci->adjust_b_mode = ADJUST_B_MODE_SAVE | ADJUST_B_MODE_SWITCH_L1 | ADJUST_B_MODE_RESTORE;
plci->adjust_b_state = ADJUST_B_START;
dbug(1, dprintf("[%06lx] %s,%d: Adjust B1 resource %d %04x...",
UnMapId(Id), (char *)(FILE_), __LINE__,
plci->B1_resource, b1_facilities));
}
static void adjust_b_restore(dword Id, PLCI *plci, byte Rc)
{
word internal_command;
dbug(1, dprintf("[%06lx] %s,%d: adjust_b_restore %02x %04x",
UnMapId(Id), (char *)(FILE_), __LINE__, Rc, plci->internal_command));
internal_command = plci->internal_command;
plci->internal_command = 0;
switch (internal_command)
{
default:
plci->command = 0;
if (plci->req_in != 0)
{
plci->internal_command = ADJUST_B_RESTORE_1;
break;
}
Rc = OK;
case ADJUST_B_RESTORE_1:
if ((Rc != OK) && (Rc != OK_FC))
{
dbug(1, dprintf("[%06lx] %s,%d: Adjust B enqueued failed %02x",
UnMapId(Id), (char *)(FILE_), __LINE__, Rc));
}
plci->adjust_b_parms_msg = NULL;
plci->adjust_b_facilities = plci->B1_facilities;
plci->adjust_b_command = ADJUST_B_RESTORE_2;
plci->adjust_b_ncci = (word)(Id >> 16);
plci->adjust_b_mode = ADJUST_B_MODE_RESTORE;
plci->adjust_b_state = ADJUST_B_START;
dbug(1, dprintf("[%06lx] %s,%d: Adjust B restore...",
UnMapId(Id), (char *)(FILE_), __LINE__));
case ADJUST_B_RESTORE_2:
if (adjust_b_process(Id, plci, Rc) != GOOD)
{
dbug(1, dprintf("[%06lx] %s,%d: Adjust B restore failed",
UnMapId(Id), (char *)(FILE_), __LINE__));
}
if (plci->internal_command)
break;
break;
}
}
static void reset_b3_command(dword Id, PLCI *plci, byte Rc)
{
word Info;
word internal_command;
dbug(1, dprintf("[%06lx] %s,%d: reset_b3_command %02x %04x",
UnMapId(Id), (char *)(FILE_), __LINE__, Rc, plci->internal_command));
Info = GOOD;
internal_command = plci->internal_command;
plci->internal_command = 0;
switch (internal_command)
{
default:
plci->command = 0;
plci->adjust_b_parms_msg = NULL;
plci->adjust_b_facilities = plci->B1_facilities;
plci->adjust_b_command = RESET_B3_COMMAND_1;
plci->adjust_b_ncci = (word)(Id >> 16);
plci->adjust_b_mode = ADJUST_B_MODE_REMOVE_L23 | ADJUST_B_MODE_ASSIGN_L23 | ADJUST_B_MODE_CONNECT;
plci->adjust_b_state = ADJUST_B_START;
dbug(1, dprintf("[%06lx] %s,%d: Reset B3...",
UnMapId(Id), (char *)(FILE_), __LINE__));
case RESET_B3_COMMAND_1:
Info = adjust_b_process(Id, plci, Rc);
if (Info != GOOD)
{
dbug(1, dprintf("[%06lx] %s,%d: Reset failed",
UnMapId(Id), (char *)(FILE_), __LINE__));
break;
}
if (plci->internal_command)
return;
break;
}
/* sendf (plci->appl, _RESET_B3_R | CONFIRM, Id, plci->number, "w", Info);*/
sendf(plci->appl, _RESET_B3_I, Id, 0, "s", "");
}
static void select_b_command(dword Id, PLCI *plci, byte Rc)
{
word Info;
word internal_command;
byte esc_chi[3];
dbug(1, dprintf("[%06lx] %s,%d: select_b_command %02x %04x",
UnMapId(Id), (char *)(FILE_), __LINE__, Rc, plci->internal_command));
Info = GOOD;
internal_command = plci->internal_command;
plci->internal_command = 0;
switch (internal_command)
{
default:
plci->command = 0;
plci->adjust_b_parms_msg = &plci->saved_msg;
if ((plci->tel == ADV_VOICE) && (plci == plci->adapter->AdvSignalPLCI))
plci->adjust_b_facilities = plci->B1_facilities | B1_FACILITY_VOICE;
else
plci->adjust_b_facilities = plci->B1_facilities & ~B1_FACILITY_VOICE;
plci->adjust_b_command = SELECT_B_COMMAND_1;
plci->adjust_b_ncci = (word)(Id >> 16);
if (plci->saved_msg.parms[0].length == 0)
{
plci->adjust_b_mode = ADJUST_B_MODE_SAVE | ADJUST_B_MODE_REMOVE_L23 | ADJUST_B_MODE_SWITCH_L1 |
ADJUST_B_MODE_NO_RESOURCE;
}
else
{
plci->adjust_b_mode = ADJUST_B_MODE_SAVE | ADJUST_B_MODE_REMOVE_L23 | ADJUST_B_MODE_SWITCH_L1 |
ADJUST_B_MODE_ASSIGN_L23 | ADJUST_B_MODE_USER_CONNECT | ADJUST_B_MODE_RESTORE;
}
plci->adjust_b_state = ADJUST_B_START;
dbug(1, dprintf("[%06lx] %s,%d: Select B protocol...",
UnMapId(Id), (char *)(FILE_), __LINE__));
case SELECT_B_COMMAND_1:
Info = adjust_b_process(Id, plci, Rc);
if (Info != GOOD)
{
dbug(1, dprintf("[%06lx] %s,%d: Select B protocol failed",
UnMapId(Id), (char *)(FILE_), __LINE__));
break;
}
if (plci->internal_command)
return;
if (plci->tel == ADV_VOICE)
{
esc_chi[0] = 0x02;
esc_chi[1] = 0x18;
esc_chi[2] = plci->b_channel;
SetVoiceChannel(plci->adapter->AdvCodecPLCI, esc_chi, plci->adapter);
}
break;
}
sendf(plci->appl, _SELECT_B_REQ | CONFIRM, Id, plci->number, "w", Info);
}
static void fax_connect_ack_command(dword Id, PLCI *plci, byte Rc)
{
word internal_command;
dbug(1, dprintf("[%06lx] %s,%d: fax_connect_ack_command %02x %04x",
UnMapId(Id), (char *)(FILE_), __LINE__, Rc, plci->internal_command));
internal_command = plci->internal_command;
plci->internal_command = 0;
switch (internal_command)
{
default:
plci->command = 0;
case FAX_CONNECT_ACK_COMMAND_1:
if (plci_nl_busy(plci))
{
plci->internal_command = FAX_CONNECT_ACK_COMMAND_1;
return;
}
plci->internal_command = FAX_CONNECT_ACK_COMMAND_2;
plci->NData[0].P = plci->fax_connect_info_buffer;
plci->NData[0].PLength = plci->fax_connect_info_length;
plci->NL.X = plci->NData;
plci->NL.ReqCh = 0;
plci->NL.Req = plci->nl_req = (byte) N_CONNECT_ACK;
plci->adapter->request(&plci->NL);
return;
case FAX_CONNECT_ACK_COMMAND_2:
if ((Rc != OK) && (Rc != OK_FC))
{
dbug(1, dprintf("[%06lx] %s,%d: FAX issue CONNECT ACK failed %02x",
UnMapId(Id), (char *)(FILE_), __LINE__, Rc));
break;
}
}
if ((plci->ncpi_state & NCPI_VALID_CONNECT_B3_ACT)
&& !(plci->ncpi_state & NCPI_CONNECT_B3_ACT_SENT))
{
if (plci->B3_prot == 4)
sendf(plci->appl, _CONNECT_B3_ACTIVE_I, Id, 0, "s", "");
else
sendf(plci->appl, _CONNECT_B3_ACTIVE_I, Id, 0, "S", plci->ncpi_buffer);
plci->ncpi_state |= NCPI_CONNECT_B3_ACT_SENT;
}
}
static void fax_edata_ack_command(dword Id, PLCI *plci, byte Rc)
{
word internal_command;
dbug(1, dprintf("[%06lx] %s,%d: fax_edata_ack_command %02x %04x",
UnMapId(Id), (char *)(FILE_), __LINE__, Rc, plci->internal_command));
internal_command = plci->internal_command;
plci->internal_command = 0;
switch (internal_command)
{
default:
plci->command = 0;
case FAX_EDATA_ACK_COMMAND_1:
if (plci_nl_busy(plci))
{
plci->internal_command = FAX_EDATA_ACK_COMMAND_1;
return;
}
plci->internal_command = FAX_EDATA_ACK_COMMAND_2;
plci->NData[0].P = plci->fax_connect_info_buffer;
plci->NData[0].PLength = plci->fax_edata_ack_length;
plci->NL.X = plci->NData;
plci->NL.ReqCh = 0;
plci->NL.Req = plci->nl_req = (byte) N_EDATA;
plci->adapter->request(&plci->NL);
return;
case FAX_EDATA_ACK_COMMAND_2:
if ((Rc != OK) && (Rc != OK_FC))
{
dbug(1, dprintf("[%06lx] %s,%d: FAX issue EDATA ACK failed %02x",
UnMapId(Id), (char *)(FILE_), __LINE__, Rc));
break;
}
}
}
static void fax_connect_info_command(dword Id, PLCI *plci, byte Rc)
{
word Info;
word internal_command;
dbug(1, dprintf("[%06lx] %s,%d: fax_connect_info_command %02x %04x",
UnMapId(Id), (char *)(FILE_), __LINE__, Rc, plci->internal_command));
Info = GOOD;
internal_command = plci->internal_command;
plci->internal_command = 0;
switch (internal_command)
{
default:
plci->command = 0;
case FAX_CONNECT_INFO_COMMAND_1:
if (plci_nl_busy(plci))
{
plci->internal_command = FAX_CONNECT_INFO_COMMAND_1;
return;
}
plci->internal_command = FAX_CONNECT_INFO_COMMAND_2;
plci->NData[0].P = plci->fax_connect_info_buffer;
plci->NData[0].PLength = plci->fax_connect_info_length;
plci->NL.X = plci->NData;
plci->NL.ReqCh = 0;
plci->NL.Req = plci->nl_req = (byte) N_EDATA;
plci->adapter->request(&plci->NL);
return;
case FAX_CONNECT_INFO_COMMAND_2:
if ((Rc != OK) && (Rc != OK_FC))
{
dbug(1, dprintf("[%06lx] %s,%d: FAX setting connect info failed %02x",
UnMapId(Id), (char *)(FILE_), __LINE__, Rc));
Info = _WRONG_STATE;
break;
}
if (plci_nl_busy(plci))
{
plci->internal_command = FAX_CONNECT_INFO_COMMAND_2;
return;
}
plci->command = _CONNECT_B3_R;
nl_req_ncci(plci, N_CONNECT, 0);
send_req(plci);
return;
}
sendf(plci->appl, _CONNECT_B3_R | CONFIRM, Id, plci->number, "w", Info);
}
static void fax_adjust_b23_command(dword Id, PLCI *plci, byte Rc)
{
word Info;
word internal_command;
dbug(1, dprintf("[%06lx] %s,%d: fax_adjust_b23_command %02x %04x",
UnMapId(Id), (char *)(FILE_), __LINE__, Rc, plci->internal_command));
Info = GOOD;
internal_command = plci->internal_command;
plci->internal_command = 0;
switch (internal_command)
{
default:
plci->command = 0;
plci->adjust_b_parms_msg = NULL;
plci->adjust_b_facilities = plci->B1_facilities;
plci->adjust_b_command = FAX_ADJUST_B23_COMMAND_1;
plci->adjust_b_ncci = (word)(Id >> 16);
plci->adjust_b_mode = ADJUST_B_MODE_REMOVE_L23 | ADJUST_B_MODE_ASSIGN_L23;
plci->adjust_b_state = ADJUST_B_START;
dbug(1, dprintf("[%06lx] %s,%d: FAX adjust B23...",
UnMapId(Id), (char *)(FILE_), __LINE__));
case FAX_ADJUST_B23_COMMAND_1:
Info = adjust_b_process(Id, plci, Rc);
if (Info != GOOD)
{
dbug(1, dprintf("[%06lx] %s,%d: FAX adjust failed",
UnMapId(Id), (char *)(FILE_), __LINE__));
break;
}
if (plci->internal_command)
return;
case FAX_ADJUST_B23_COMMAND_2:
if (plci_nl_busy(plci))
{
plci->internal_command = FAX_ADJUST_B23_COMMAND_2;
return;
}
plci->command = _CONNECT_B3_R;
nl_req_ncci(plci, N_CONNECT, 0);
send_req(plci);
return;
}
sendf(plci->appl, _CONNECT_B3_R | CONFIRM, Id, plci->number, "w", Info);
}
static void fax_disconnect_command(dword Id, PLCI *plci, byte Rc)
{
word internal_command;
dbug(1, dprintf("[%06lx] %s,%d: fax_disconnect_command %02x %04x",
UnMapId(Id), (char *)(FILE_), __LINE__, Rc, plci->internal_command));
internal_command = plci->internal_command;
plci->internal_command = 0;
switch (internal_command)
{
default:
plci->command = 0;
plci->internal_command = FAX_DISCONNECT_COMMAND_1;
return;
case FAX_DISCONNECT_COMMAND_1:
case FAX_DISCONNECT_COMMAND_2:
case FAX_DISCONNECT_COMMAND_3:
if ((Rc != OK) && (Rc != OK_FC) && (Rc != 0))
{
dbug(1, dprintf("[%06lx] %s,%d: FAX disconnect EDATA failed %02x",
UnMapId(Id), (char *)(FILE_), __LINE__, Rc));
break;
}
if (Rc == OK)
{
if ((internal_command == FAX_DISCONNECT_COMMAND_1)
|| (internal_command == FAX_DISCONNECT_COMMAND_2))
{
plci->internal_command = FAX_DISCONNECT_COMMAND_2;
}
}
else if (Rc == 0)
{
if (internal_command == FAX_DISCONNECT_COMMAND_1)
plci->internal_command = FAX_DISCONNECT_COMMAND_3;
}
return;
}
}
static void rtp_connect_b3_req_command(dword Id, PLCI *plci, byte Rc)
{
word Info;
word internal_command;
dbug(1, dprintf("[%06lx] %s,%d: rtp_connect_b3_req_command %02x %04x",
UnMapId(Id), (char *)(FILE_), __LINE__, Rc, plci->internal_command));
Info = GOOD;
internal_command = plci->internal_command;
plci->internal_command = 0;
switch (internal_command)
{
default:
plci->command = 0;
case RTP_CONNECT_B3_REQ_COMMAND_1:
if (plci_nl_busy(plci))
{
plci->internal_command = RTP_CONNECT_B3_REQ_COMMAND_1;
return;
}
plci->internal_command = RTP_CONNECT_B3_REQ_COMMAND_2;
nl_req_ncci(plci, N_CONNECT, 0);
send_req(plci);
return;
case RTP_CONNECT_B3_REQ_COMMAND_2:
if ((Rc != OK) && (Rc != OK_FC))
{
dbug(1, dprintf("[%06lx] %s,%d: RTP setting connect info failed %02x",
UnMapId(Id), (char *)(FILE_), __LINE__, Rc));
Info = _WRONG_STATE;
break;
}
if (plci_nl_busy(plci))
{
plci->internal_command = RTP_CONNECT_B3_REQ_COMMAND_2;
return;
}
plci->internal_command = RTP_CONNECT_B3_REQ_COMMAND_3;
plci->NData[0].PLength = plci->internal_req_buffer[0];
plci->NData[0].P = plci->internal_req_buffer + 1;
plci->NL.X = plci->NData;
plci->NL.ReqCh = 0;
plci->NL.Req = plci->nl_req = (byte) N_UDATA;
plci->adapter->request(&plci->NL);
break;
case RTP_CONNECT_B3_REQ_COMMAND_3:
return;
}
sendf(plci->appl, _CONNECT_B3_R | CONFIRM, Id, plci->number, "w", Info);
}
static void rtp_connect_b3_res_command(dword Id, PLCI *plci, byte Rc)
{
word internal_command;
dbug(1, dprintf("[%06lx] %s,%d: rtp_connect_b3_res_command %02x %04x",
UnMapId(Id), (char *)(FILE_), __LINE__, Rc, plci->internal_command));
internal_command = plci->internal_command;
plci->internal_command = 0;
switch (internal_command)
{
default:
plci->command = 0;
case RTP_CONNECT_B3_RES_COMMAND_1:
if (plci_nl_busy(plci))
{
plci->internal_command = RTP_CONNECT_B3_RES_COMMAND_1;
return;
}
plci->internal_command = RTP_CONNECT_B3_RES_COMMAND_2;
nl_req_ncci(plci, N_CONNECT_ACK, (byte)(Id >> 16));
send_req(plci);
return;
case RTP_CONNECT_B3_RES_COMMAND_2:
if ((Rc != OK) && (Rc != OK_FC))
{
dbug(1, dprintf("[%06lx] %s,%d: RTP setting connect resp info failed %02x",
UnMapId(Id), (char *)(FILE_), __LINE__, Rc));
break;
}
if (plci_nl_busy(plci))
{
plci->internal_command = RTP_CONNECT_B3_RES_COMMAND_2;
return;
}
sendf(plci->appl, _CONNECT_B3_ACTIVE_I, Id, 0, "s", "");
plci->internal_command = RTP_CONNECT_B3_RES_COMMAND_3;
plci->NData[0].PLength = plci->internal_req_buffer[0];
plci->NData[0].P = plci->internal_req_buffer + 1;
plci->NL.X = plci->NData;
plci->NL.ReqCh = 0;
plci->NL.Req = plci->nl_req = (byte) N_UDATA;
plci->adapter->request(&plci->NL);
return;
case RTP_CONNECT_B3_RES_COMMAND_3:
return;
}
}
static void hold_save_command(dword Id, PLCI *plci, byte Rc)
{
byte SS_Ind[] = "\x05\x02\x00\x02\x00\x00"; /* Hold_Ind struct*/
word Info;
word internal_command;
dbug(1, dprintf("[%06lx] %s,%d: hold_save_command %02x %04x",
UnMapId(Id), (char *)(FILE_), __LINE__, Rc, plci->internal_command));
Info = GOOD;
internal_command = plci->internal_command;
plci->internal_command = 0;
switch (internal_command)
{
default:
if (!plci->NL.Id)
break;
plci->command = 0;
plci->adjust_b_parms_msg = NULL;
plci->adjust_b_facilities = plci->B1_facilities;
plci->adjust_b_command = HOLD_SAVE_COMMAND_1;
plci->adjust_b_ncci = (word)(Id >> 16);
plci->adjust_b_mode = ADJUST_B_MODE_SAVE | ADJUST_B_MODE_REMOVE_L23;
plci->adjust_b_state = ADJUST_B_START;
dbug(1, dprintf("[%06lx] %s,%d: HOLD save...",
UnMapId(Id), (char *)(FILE_), __LINE__));
case HOLD_SAVE_COMMAND_1:
Info = adjust_b_process(Id, plci, Rc);
if (Info != GOOD)
{
dbug(1, dprintf("[%06lx] %s,%d: HOLD save failed",
UnMapId(Id), (char *)(FILE_), __LINE__));
break;
}
if (plci->internal_command)
return;
}
sendf(plci->appl, _FACILITY_I, Id & 0xffffL, 0, "ws", 3, SS_Ind);
}
static void retrieve_restore_command(dword Id, PLCI *plci, byte Rc)
{
byte SS_Ind[] = "\x05\x03\x00\x02\x00\x00"; /* Retrieve_Ind struct*/
word Info;
word internal_command;
dbug(1, dprintf("[%06lx] %s,%d: retrieve_restore_command %02x %04x",
UnMapId(Id), (char *)(FILE_), __LINE__, Rc, plci->internal_command));
Info = GOOD;
internal_command = plci->internal_command;
plci->internal_command = 0;
switch (internal_command)
{
default:
plci->command = 0;
plci->adjust_b_parms_msg = NULL;
plci->adjust_b_facilities = plci->B1_facilities;
plci->adjust_b_command = RETRIEVE_RESTORE_COMMAND_1;
plci->adjust_b_ncci = (word)(Id >> 16);
plci->adjust_b_mode = ADJUST_B_MODE_ASSIGN_L23 | ADJUST_B_MODE_USER_CONNECT | ADJUST_B_MODE_RESTORE;
plci->adjust_b_state = ADJUST_B_START;
dbug(1, dprintf("[%06lx] %s,%d: RETRIEVE restore...",
UnMapId(Id), (char *)(FILE_), __LINE__));
case RETRIEVE_RESTORE_COMMAND_1:
Info = adjust_b_process(Id, plci, Rc);
if (Info != GOOD)
{
dbug(1, dprintf("[%06lx] %s,%d: RETRIEVE restore failed",
UnMapId(Id), (char *)(FILE_), __LINE__));
break;
}
if (plci->internal_command)
return;
}
sendf(plci->appl, _FACILITY_I, Id & 0xffffL, 0, "ws", 3, SS_Ind);
}
static void init_b1_config(PLCI *plci)
{
dbug(1, dprintf("[%06lx] %s,%d: init_b1_config",
(dword)((plci->Id << 8) | UnMapController(plci->adapter->Id)),
(char *)(FILE_), __LINE__));
plci->B1_resource = 0;
plci->B1_facilities = 0;
plci->li_bchannel_id = 0;
mixer_clear_config(plci);
ec_clear_config(plci);
dtmf_rec_clear_config(plci);
dtmf_send_clear_config(plci);
dtmf_parameter_clear_config(plci);
adv_voice_clear_config(plci);
adjust_b_clear(plci);
}
static void clear_b1_config(PLCI *plci)
{
dbug(1, dprintf("[%06lx] %s,%d: clear_b1_config",
(dword)((plci->Id << 8) | UnMapController(plci->adapter->Id)),
(char *)(FILE_), __LINE__));
adv_voice_clear_config(plci);
adjust_b_clear(plci);
ec_clear_config(plci);
dtmf_rec_clear_config(plci);
dtmf_send_clear_config(plci);
dtmf_parameter_clear_config(plci);
if ((plci->li_bchannel_id != 0)
&& (li_config_table[plci->adapter->li_base + (plci->li_bchannel_id - 1)].plci == plci))
{
mixer_clear_config(plci);
li_config_table[plci->adapter->li_base + (plci->li_bchannel_id - 1)].plci = NULL;
plci->li_bchannel_id = 0;
}
plci->B1_resource = 0;
plci->B1_facilities = 0;
}
/* -----------------------------------------------------------------
XON protocol local helpers
----------------------------------------------------------------- */
static void channel_flow_control_remove(PLCI *plci) {
DIVA_CAPI_ADAPTER *a = plci->adapter;
word i;
for (i = 1; i < MAX_NL_CHANNEL + 1; i++) {
if (a->ch_flow_plci[i] == plci->Id) {
a->ch_flow_plci[i] = 0;
a->ch_flow_control[i] = 0;
}
}
}
static void channel_x_on(PLCI *plci, byte ch) {
DIVA_CAPI_ADAPTER *a = plci->adapter;
if (a->ch_flow_control[ch] & N_XON_SENT) {
a->ch_flow_control[ch] &= ~N_XON_SENT;
}
}
static void channel_x_off(PLCI *plci, byte ch, byte flag) {
DIVA_CAPI_ADAPTER *a = plci->adapter;
if ((a->ch_flow_control[ch] & N_RX_FLOW_CONTROL_MASK) == 0) {
a->ch_flow_control[ch] |= (N_CH_XOFF | flag);
a->ch_flow_plci[ch] = plci->Id;
a->ch_flow_control_pending++;
}
}
static void channel_request_xon(PLCI *plci, byte ch) {
DIVA_CAPI_ADAPTER *a = plci->adapter;
if (a->ch_flow_control[ch] & N_CH_XOFF) {
a->ch_flow_control[ch] |= N_XON_REQ;
a->ch_flow_control[ch] &= ~N_CH_XOFF;
a->ch_flow_control[ch] &= ~N_XON_CONNECT_IND;
}
}
static void channel_xmit_extended_xon(PLCI *plci) {
DIVA_CAPI_ADAPTER *a;
int max_ch = ARRAY_SIZE(a->ch_flow_control);
int i, one_requested = 0;
if ((!plci) || (!plci->Id) || ((a = plci->adapter) == NULL)) {
return;
}
for (i = 0; i < max_ch; i++) {
if ((a->ch_flow_control[i] & N_CH_XOFF) &&
(a->ch_flow_control[i] & N_XON_CONNECT_IND) &&
(plci->Id == a->ch_flow_plci[i])) {
channel_request_xon(plci, (byte)i);
one_requested = 1;
}
}
if (one_requested) {
channel_xmit_xon(plci);
}
}
/*
Try to xmit next X_ON
*/
static int find_channel_with_pending_x_on(DIVA_CAPI_ADAPTER *a, PLCI *plci) {
int max_ch = ARRAY_SIZE(a->ch_flow_control);
int i;
if (!(plci->adapter->manufacturer_features & MANUFACTURER_FEATURE_XONOFF_FLOW_CONTROL)) {
return (0);
}
if (a->last_flow_control_ch >= max_ch) {
a->last_flow_control_ch = 1;
}
for (i = a->last_flow_control_ch; i < max_ch; i++) {
if ((a->ch_flow_control[i] & N_XON_REQ) &&
(plci->Id == a->ch_flow_plci[i])) {
a->last_flow_control_ch = i + 1;
return (i);
}
}
for (i = 1; i < a->last_flow_control_ch; i++) {
if ((a->ch_flow_control[i] & N_XON_REQ) &&
(plci->Id == a->ch_flow_plci[i])) {
a->last_flow_control_ch = i + 1;
return (i);
}
}
return (0);
}
static void channel_xmit_xon(PLCI *plci) {
DIVA_CAPI_ADAPTER *a = plci->adapter;
byte ch;
if (plci->nl_req || !plci->NL.Id || plci->nl_remove_id) {
return;
}
if ((ch = (byte)find_channel_with_pending_x_on(a, plci)) == 0) {
return;
}
a->ch_flow_control[ch] &= ~N_XON_REQ;
a->ch_flow_control[ch] |= N_XON_SENT;
plci->NL.Req = plci->nl_req = (byte)N_XON;
plci->NL.ReqCh = ch;
plci->NL.X = plci->NData;
plci->NL.XNum = 1;
plci->NData[0].P = &plci->RBuffer[0];
plci->NData[0].PLength = 0;
plci->adapter->request(&plci->NL);
}
static int channel_can_xon(PLCI *plci, byte ch) {
APPL *APPLptr;
DIVA_CAPI_ADAPTER *a;
word NCCIcode;
dword count;
word Num;
word i;
APPLptr = plci->appl;
a = plci->adapter;
if (!APPLptr)
return (0);
NCCIcode = a->ch_ncci[ch] | (((word) a->Id) << 8);
/* count all buffers within the Application pool */
/* belonging to the same NCCI. XON if a first is */
/* used. */
count = 0;
Num = 0xffff;
for (i = 0; i < APPLptr->MaxBuffer; i++) {
if (NCCIcode == APPLptr->DataNCCI[i]) count++;
if (!APPLptr->DataNCCI[i] && Num == 0xffff) Num = i;
}
if ((count > 2) || (Num == 0xffff)) {
return (0);
}
return (1);
}
/*------------------------------------------------------------------*/
static word CPN_filter_ok(byte *cpn, DIVA_CAPI_ADAPTER *a, word offset)
{
return 1;
}
/**********************************************************************************/
/* function groups the listening applications according to the CIP mask and the */
/* Info_Mask. Each group gets just one Connect_Ind. Some application manufacturer */
/* are not multi-instance capable, so they start e.g. 30 applications what causes */
/* big problems on application level (one call, 30 Connect_Ind, ect). The */
/* function must be enabled by setting "a->group_optimization_enabled" from the */
/* OS specific part (per adapter). */
/**********************************************************************************/
static void group_optimization(DIVA_CAPI_ADAPTER *a, PLCI *plci)
{
word i, j, k, busy, group_found;
dword info_mask_group[MAX_CIP_TYPES];
dword cip_mask_group[MAX_CIP_TYPES];
word appl_number_group_type[MAX_APPL];
PLCI *auxplci;
set_group_ind_mask(plci); /* all APPLs within this inc. call are allowed to dial in */
if (!a->group_optimization_enabled)
{
dbug(1, dprintf("No group optimization"));
return;
}
dbug(1, dprintf("Group optimization = 0x%x...", a->group_optimization_enabled));
for (i = 0; i < MAX_CIP_TYPES; i++)
{
info_mask_group[i] = 0;
cip_mask_group[i] = 0;
}
for (i = 0; i < MAX_APPL; i++)
{
appl_number_group_type[i] = 0;
}
for (i = 0; i < max_appl; i++) /* check if any multi instance capable application is present */
{ /* group_optimization set to 1 means not to optimize multi-instance capable applications (default) */
if (application[i].Id && (application[i].MaxNCCI) > 1 && (a->CIP_Mask[i]) && (a->group_optimization_enabled == 1))
{
dbug(1, dprintf("Multi-Instance capable, no optimization required"));
return; /* allow good application unfiltered access */
}
}
for (i = 0; i < max_appl; i++) /* Build CIP Groups */
{
if (application[i].Id && a->CIP_Mask[i])
{
for (k = 0, busy = false; k < a->max_plci; k++)
{
if (a->plci[k].Id)
{
auxplci = &a->plci[k];
if (auxplci->appl == &application[i]) /* application has a busy PLCI */
{
busy = true;
dbug(1, dprintf("Appl 0x%x is busy", i + 1));
}
else if (test_c_ind_mask_bit(auxplci, i)) /* application has an incoming call pending */
{
busy = true;
dbug(1, dprintf("Appl 0x%x has inc. call pending", i + 1));
}
}
}
for (j = 0, group_found = 0; j <= (MAX_CIP_TYPES) && !busy && !group_found; j++) /* build groups with free applications only */
{
if (j == MAX_CIP_TYPES) /* all groups are in use but group still not found */
{ /* the MAX_CIP_TYPES group enables all calls because of field overflow */
appl_number_group_type[i] = MAX_CIP_TYPES;
group_found = true;
dbug(1, dprintf("Field overflow appl 0x%x", i + 1));
}
else if ((info_mask_group[j] == a->CIP_Mask[i]) && (cip_mask_group[j] == a->Info_Mask[i]))
{ /* is group already present ? */
appl_number_group_type[i] = j | 0x80; /* store the group number for each application */
group_found = true;
dbug(1, dprintf("Group 0x%x found with appl 0x%x, CIP=0x%lx", appl_number_group_type[i], i + 1, info_mask_group[j]));
}
else if (!info_mask_group[j])
{ /* establish a new group */
appl_number_group_type[i] = j | 0x80; /* store the group number for each application */
info_mask_group[j] = a->CIP_Mask[i]; /* store the new CIP mask for the new group */
cip_mask_group[j] = a->Info_Mask[i]; /* store the new Info_Mask for this new group */
group_found = true;
dbug(1, dprintf("New Group 0x%x established with appl 0x%x, CIP=0x%lx", appl_number_group_type[i], i + 1, info_mask_group[j]));
}
}
}
}
for (i = 0; i < max_appl; i++) /* Build group_optimization_mask_table */
{
if (appl_number_group_type[i]) /* application is free, has listens and is member of a group */
{
if (appl_number_group_type[i] == MAX_CIP_TYPES)
{
dbug(1, dprintf("OverflowGroup 0x%x, valid appl = 0x%x, call enabled", appl_number_group_type[i], i + 1));
}
else
{
dbug(1, dprintf("Group 0x%x, valid appl = 0x%x", appl_number_group_type[i], i + 1));
for (j = i + 1; j < max_appl; j++) /* search other group members and mark them as busy */
{
if (appl_number_group_type[i] == appl_number_group_type[j])
{
dbug(1, dprintf("Appl 0x%x is member of group 0x%x, no call", j + 1, appl_number_group_type[j]));
clear_group_ind_mask_bit(plci, j); /* disable call on other group members */
appl_number_group_type[j] = 0; /* remove disabled group member from group list */
}
}
}
}
else /* application should not get a call */
{
clear_group_ind_mask_bit(plci, i);
}
}
}
/* OS notifies the driver about a application Capi_Register */
word CapiRegister(word id)
{
word i, j, appls_found;
PLCI *plci;
DIVA_CAPI_ADAPTER *a;
for (i = 0, appls_found = 0; i < max_appl; i++)
{
if (application[i].Id && (application[i].Id != id))
{
appls_found++; /* an application has been found */
}
}
if (appls_found) return true;
for (i = 0; i < max_adapter; i++) /* scan all adapters... */
{
a = &adapter[i];
if (a->request)
{
if (a->flag_dynamic_l1_down) /* remove adapter from L1 tristate (Huntgroup) */
{
if (!appls_found) /* first application does a capi register */
{
if ((j = get_plci(a))) /* activate L1 of all adapters */
{
plci = &a->plci[j - 1];
plci->command = 0;
add_p(plci, OAD, "\x01\xfd");
add_p(plci, CAI, "\x01\x80");
add_p(plci, UID, "\x06\x43\x61\x70\x69\x32\x30");
add_p(plci, SHIFT | 6, NULL);
add_p(plci, SIN, "\x02\x00\x00");
plci->internal_command = START_L1_SIG_ASSIGN_PEND;
sig_req(plci, ASSIGN, DSIG_ID);
add_p(plci, FTY, "\x02\xff\x07"); /* l1 start */
sig_req(plci, SIG_CTRL, 0);
send_req(plci);
}
}
}
}
}
return false;
}
/*------------------------------------------------------------------*/
/* Functions for virtual Switching e.g. Transfer by join, Conference */
static void VSwitchReqInd(PLCI *plci, dword Id, byte **parms)
{
word i;
/* Format of vswitch_t:
0 byte length
1 byte VSWITCHIE
2 byte VSWITCH_REQ/VSWITCH_IND
3 byte reserved
4 word VSwitchcommand
6 word returnerror
8... Params
*/
if (!plci ||
!plci->appl ||
!plci->State ||
plci->Sig.Ind == NCR_FACILITY
)
return;
for (i = 0; i < MAX_MULTI_IE; i++)
{
if (!parms[i][0]) continue;
if (parms[i][0] < 7)
{
parms[i][0] = 0; /* kill it */
continue;
}
dbug(1, dprintf("VSwitchReqInd(%d)", parms[i][4]));
switch (parms[i][4])
{
case VSJOIN:
if (!plci->relatedPTYPLCI ||
(plci->ptyState != S_ECT && plci->relatedPTYPLCI->ptyState != S_ECT))
{ /* Error */
break;
}
/* remember all necessary informations */
if (parms[i][0] != 11 || parms[i][8] != 3) /* Length Test */
{
break;
}
if (parms[i][2] == VSWITCH_IND && parms[i][9] == 1)
{ /* first indication after ECT-Request on Consultation Call */
plci->vswitchstate = parms[i][9];
parms[i][9] = 2; /* State */
/* now ask first Call to join */
}
else if (parms[i][2] == VSWITCH_REQ && parms[i][9] == 3)
{ /* Answer of VSWITCH_REQ from first Call */
plci->vswitchstate = parms[i][9];
/* tell consultation call to join
and the protocol capabilities of the first call */
}
else
{ /* Error */
break;
}
plci->vsprot = parms[i][10]; /* protocol */
plci->vsprotdialect = parms[i][11]; /* protocoldialect */
/* send join request to related PLCI */
parms[i][1] = VSWITCHIE;
parms[i][2] = VSWITCH_REQ;
plci->relatedPTYPLCI->command = 0;
plci->relatedPTYPLCI->internal_command = VSWITCH_REQ_PEND;
add_p(plci->relatedPTYPLCI, ESC, &parms[i][0]);
sig_req(plci->relatedPTYPLCI, VSWITCH_REQ, 0);
send_req(plci->relatedPTYPLCI);
break;
case VSTRANSPORT:
default:
if (plci->relatedPTYPLCI &&
plci->vswitchstate == 3 &&
plci->relatedPTYPLCI->vswitchstate == 3)
{
add_p(plci->relatedPTYPLCI, ESC, &parms[i][0]);
sig_req(plci->relatedPTYPLCI, VSWITCH_REQ, 0);
send_req(plci->relatedPTYPLCI);
}
break;
}
parms[i][0] = 0; /* kill it */
}
}
/*------------------------------------------------------------------*/
static int diva_get_dma_descriptor(PLCI *plci, dword *dma_magic) {
ENTITY e;
IDI_SYNC_REQ *pReq = (IDI_SYNC_REQ *)&e;
if (!(diva_xdi_extended_features & DIVA_CAPI_XDI_PROVIDES_RX_DMA)) {
return (-1);
}
pReq->xdi_dma_descriptor_operation.Req = 0;
pReq->xdi_dma_descriptor_operation.Rc = IDI_SYNC_REQ_DMA_DESCRIPTOR_OPERATION;
pReq->xdi_dma_descriptor_operation.info.operation = IDI_SYNC_REQ_DMA_DESCRIPTOR_ALLOC;
pReq->xdi_dma_descriptor_operation.info.descriptor_number = -1;
pReq->xdi_dma_descriptor_operation.info.descriptor_address = NULL;
pReq->xdi_dma_descriptor_operation.info.descriptor_magic = 0;
e.user[0] = plci->adapter->Id - 1;
plci->adapter->request((ENTITY *)pReq);
if (!pReq->xdi_dma_descriptor_operation.info.operation &&
(pReq->xdi_dma_descriptor_operation.info.descriptor_number >= 0) &&
pReq->xdi_dma_descriptor_operation.info.descriptor_magic) {
*dma_magic = pReq->xdi_dma_descriptor_operation.info.descriptor_magic;
dbug(3, dprintf("dma_alloc, a:%d (%d-%08x)",
plci->adapter->Id,
pReq->xdi_dma_descriptor_operation.info.descriptor_number,
*dma_magic));
return (pReq->xdi_dma_descriptor_operation.info.descriptor_number);
} else {
dbug(1, dprintf("dma_alloc failed"));
return (-1);
}
}
static void diva_free_dma_descriptor(PLCI *plci, int nr) {
ENTITY e;
IDI_SYNC_REQ *pReq = (IDI_SYNC_REQ *)&e;
if (nr < 0) {
return;
}
pReq->xdi_dma_descriptor_operation.Req = 0;
pReq->xdi_dma_descriptor_operation.Rc = IDI_SYNC_REQ_DMA_DESCRIPTOR_OPERATION;
pReq->xdi_dma_descriptor_operation.info.operation = IDI_SYNC_REQ_DMA_DESCRIPTOR_FREE;
pReq->xdi_dma_descriptor_operation.info.descriptor_number = nr;
pReq->xdi_dma_descriptor_operation.info.descriptor_address = NULL;
pReq->xdi_dma_descriptor_operation.info.descriptor_magic = 0;
e.user[0] = plci->adapter->Id - 1;
plci->adapter->request((ENTITY *)pReq);
if (!pReq->xdi_dma_descriptor_operation.info.operation) {
dbug(1, dprintf("dma_free(%d)", nr));
} else {
dbug(1, dprintf("dma_free failed (%d)", nr));
}
}
/*------------------------------------------------------------------*/
| gpl-2.0 |
Team-Exhibit/android_kernel_samsung_u8500 | fs/ocfs2/reservations.c | 11499 | 20845 | /* -*- mode: c; c-basic-offset: 8; -*-
* vim: noexpandtab sw=8 ts=8 sts=0:
*
* reservations.c
*
* Allocation reservations implementation
*
* Some code borrowed from fs/ext3/balloc.c and is:
*
* Copyright (C) 1992, 1993, 1994, 1995
* Remy Card (card@masi.ibp.fr)
* Laboratoire MASI - Institut Blaise Pascal
* Universite Pierre et Marie Curie (Paris VI)
*
* The rest is copyright (C) 2010 Novell. All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public
* License version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*/
#include <linux/fs.h>
#include <linux/types.h>
#include <linux/highmem.h>
#include <linux/bitops.h>
#include <linux/list.h>
#include <cluster/masklog.h>
#include "ocfs2.h"
#include "ocfs2_trace.h"
#ifdef CONFIG_OCFS2_DEBUG_FS
#define OCFS2_CHECK_RESERVATIONS
#endif
DEFINE_SPINLOCK(resv_lock);
#define OCFS2_MIN_RESV_WINDOW_BITS 8
#define OCFS2_MAX_RESV_WINDOW_BITS 1024
int ocfs2_dir_resv_allowed(struct ocfs2_super *osb)
{
return (osb->osb_resv_level && osb->osb_dir_resv_level);
}
static unsigned int ocfs2_resv_window_bits(struct ocfs2_reservation_map *resmap,
struct ocfs2_alloc_reservation *resv)
{
struct ocfs2_super *osb = resmap->m_osb;
unsigned int bits;
if (!(resv->r_flags & OCFS2_RESV_FLAG_DIR)) {
/* 8, 16, 32, 64, 128, 256, 512, 1024 */
bits = 4 << osb->osb_resv_level;
} else {
bits = 4 << osb->osb_dir_resv_level;
}
return bits;
}
static inline unsigned int ocfs2_resv_end(struct ocfs2_alloc_reservation *resv)
{
if (resv->r_len)
return resv->r_start + resv->r_len - 1;
return resv->r_start;
}
static inline int ocfs2_resv_empty(struct ocfs2_alloc_reservation *resv)
{
return !!(resv->r_len == 0);
}
static inline int ocfs2_resmap_disabled(struct ocfs2_reservation_map *resmap)
{
if (resmap->m_osb->osb_resv_level == 0)
return 1;
return 0;
}
static void ocfs2_dump_resv(struct ocfs2_reservation_map *resmap)
{
struct ocfs2_super *osb = resmap->m_osb;
struct rb_node *node;
struct ocfs2_alloc_reservation *resv;
int i = 0;
mlog(ML_NOTICE, "Dumping resmap for device %s. Bitmap length: %u\n",
osb->dev_str, resmap->m_bitmap_len);
node = rb_first(&resmap->m_reservations);
while (node) {
resv = rb_entry(node, struct ocfs2_alloc_reservation, r_node);
mlog(ML_NOTICE, "start: %u\tend: %u\tlen: %u\tlast_start: %u"
"\tlast_len: %u\n", resv->r_start,
ocfs2_resv_end(resv), resv->r_len, resv->r_last_start,
resv->r_last_len);
node = rb_next(node);
i++;
}
mlog(ML_NOTICE, "%d reservations found. LRU follows\n", i);
i = 0;
list_for_each_entry(resv, &resmap->m_lru, r_lru) {
mlog(ML_NOTICE, "LRU(%d) start: %u\tend: %u\tlen: %u\t"
"last_start: %u\tlast_len: %u\n", i, resv->r_start,
ocfs2_resv_end(resv), resv->r_len, resv->r_last_start,
resv->r_last_len);
i++;
}
}
#ifdef OCFS2_CHECK_RESERVATIONS
static int ocfs2_validate_resmap_bits(struct ocfs2_reservation_map *resmap,
int i,
struct ocfs2_alloc_reservation *resv)
{
char *disk_bitmap = resmap->m_disk_bitmap;
unsigned int start = resv->r_start;
unsigned int end = ocfs2_resv_end(resv);
while (start <= end) {
if (ocfs2_test_bit(start, disk_bitmap)) {
mlog(ML_ERROR,
"reservation %d covers an allocated area "
"starting at bit %u!\n", i, start);
return 1;
}
start++;
}
return 0;
}
static void ocfs2_check_resmap(struct ocfs2_reservation_map *resmap)
{
unsigned int off = 0;
int i = 0;
struct rb_node *node;
struct ocfs2_alloc_reservation *resv;
node = rb_first(&resmap->m_reservations);
while (node) {
resv = rb_entry(node, struct ocfs2_alloc_reservation, r_node);
if (i > 0 && resv->r_start <= off) {
mlog(ML_ERROR, "reservation %d has bad start off!\n",
i);
goto bad;
}
if (resv->r_len == 0) {
mlog(ML_ERROR, "reservation %d has no length!\n",
i);
goto bad;
}
if (resv->r_start > ocfs2_resv_end(resv)) {
mlog(ML_ERROR, "reservation %d has invalid range!\n",
i);
goto bad;
}
if (ocfs2_resv_end(resv) >= resmap->m_bitmap_len) {
mlog(ML_ERROR, "reservation %d extends past bitmap!\n",
i);
goto bad;
}
if (ocfs2_validate_resmap_bits(resmap, i, resv))
goto bad;
off = ocfs2_resv_end(resv);
node = rb_next(node);
i++;
}
return;
bad:
ocfs2_dump_resv(resmap);
BUG();
}
#else
static inline void ocfs2_check_resmap(struct ocfs2_reservation_map *resmap)
{
}
#endif
void ocfs2_resv_init_once(struct ocfs2_alloc_reservation *resv)
{
memset(resv, 0, sizeof(*resv));
INIT_LIST_HEAD(&resv->r_lru);
}
void ocfs2_resv_set_type(struct ocfs2_alloc_reservation *resv,
unsigned int flags)
{
BUG_ON(flags & ~OCFS2_RESV_TYPES);
resv->r_flags |= flags;
}
int ocfs2_resmap_init(struct ocfs2_super *osb,
struct ocfs2_reservation_map *resmap)
{
memset(resmap, 0, sizeof(*resmap));
resmap->m_osb = osb;
resmap->m_reservations = RB_ROOT;
/* m_bitmap_len is initialized to zero by the above memset. */
INIT_LIST_HEAD(&resmap->m_lru);
return 0;
}
static void ocfs2_resv_mark_lru(struct ocfs2_reservation_map *resmap,
struct ocfs2_alloc_reservation *resv)
{
assert_spin_locked(&resv_lock);
if (!list_empty(&resv->r_lru))
list_del_init(&resv->r_lru);
list_add_tail(&resv->r_lru, &resmap->m_lru);
}
static void __ocfs2_resv_trunc(struct ocfs2_alloc_reservation *resv)
{
resv->r_len = 0;
resv->r_start = 0;
}
static void ocfs2_resv_remove(struct ocfs2_reservation_map *resmap,
struct ocfs2_alloc_reservation *resv)
{
if (resv->r_flags & OCFS2_RESV_FLAG_INUSE) {
list_del_init(&resv->r_lru);
rb_erase(&resv->r_node, &resmap->m_reservations);
resv->r_flags &= ~OCFS2_RESV_FLAG_INUSE;
}
}
static void __ocfs2_resv_discard(struct ocfs2_reservation_map *resmap,
struct ocfs2_alloc_reservation *resv)
{
assert_spin_locked(&resv_lock);
__ocfs2_resv_trunc(resv);
/*
* last_len and last_start no longer make sense if
* we're changing the range of our allocations.
*/
resv->r_last_len = resv->r_last_start = 0;
ocfs2_resv_remove(resmap, resv);
}
/* does nothing if 'resv' is null */
void ocfs2_resv_discard(struct ocfs2_reservation_map *resmap,
struct ocfs2_alloc_reservation *resv)
{
if (resv) {
spin_lock(&resv_lock);
__ocfs2_resv_discard(resmap, resv);
spin_unlock(&resv_lock);
}
}
static void ocfs2_resmap_clear_all_resv(struct ocfs2_reservation_map *resmap)
{
struct rb_node *node;
struct ocfs2_alloc_reservation *resv;
assert_spin_locked(&resv_lock);
while ((node = rb_last(&resmap->m_reservations)) != NULL) {
resv = rb_entry(node, struct ocfs2_alloc_reservation, r_node);
__ocfs2_resv_discard(resmap, resv);
}
}
void ocfs2_resmap_restart(struct ocfs2_reservation_map *resmap,
unsigned int clen, char *disk_bitmap)
{
if (ocfs2_resmap_disabled(resmap))
return;
spin_lock(&resv_lock);
ocfs2_resmap_clear_all_resv(resmap);
resmap->m_bitmap_len = clen;
resmap->m_disk_bitmap = disk_bitmap;
spin_unlock(&resv_lock);
}
void ocfs2_resmap_uninit(struct ocfs2_reservation_map *resmap)
{
/* Does nothing for now. Keep this around for API symmetry */
}
static void ocfs2_resv_insert(struct ocfs2_reservation_map *resmap,
struct ocfs2_alloc_reservation *new)
{
struct rb_root *root = &resmap->m_reservations;
struct rb_node *parent = NULL;
struct rb_node **p = &root->rb_node;
struct ocfs2_alloc_reservation *tmp;
assert_spin_locked(&resv_lock);
trace_ocfs2_resv_insert(new->r_start, new->r_len);
while (*p) {
parent = *p;
tmp = rb_entry(parent, struct ocfs2_alloc_reservation, r_node);
if (new->r_start < tmp->r_start) {
p = &(*p)->rb_left;
/*
* This is a good place to check for
* overlapping reservations.
*/
BUG_ON(ocfs2_resv_end(new) >= tmp->r_start);
} else if (new->r_start > ocfs2_resv_end(tmp)) {
p = &(*p)->rb_right;
} else {
/* This should never happen! */
mlog(ML_ERROR, "Duplicate reservation window!\n");
BUG();
}
}
rb_link_node(&new->r_node, parent, p);
rb_insert_color(&new->r_node, root);
new->r_flags |= OCFS2_RESV_FLAG_INUSE;
ocfs2_resv_mark_lru(resmap, new);
ocfs2_check_resmap(resmap);
}
/**
* ocfs2_find_resv_lhs() - find the window which contains goal
* @resmap: reservation map to search
* @goal: which bit to search for
*
* If a window containing that goal is not found, we return the window
* which comes before goal. Returns NULL on empty rbtree or no window
* before goal.
*/
static struct ocfs2_alloc_reservation *
ocfs2_find_resv_lhs(struct ocfs2_reservation_map *resmap, unsigned int goal)
{
struct ocfs2_alloc_reservation *resv = NULL;
struct ocfs2_alloc_reservation *prev_resv = NULL;
struct rb_node *node = resmap->m_reservations.rb_node;
assert_spin_locked(&resv_lock);
if (!node)
return NULL;
node = rb_first(&resmap->m_reservations);
while (node) {
resv = rb_entry(node, struct ocfs2_alloc_reservation, r_node);
if (resv->r_start <= goal && ocfs2_resv_end(resv) >= goal)
break;
/* Check if we overshot the reservation just before goal? */
if (resv->r_start > goal) {
resv = prev_resv;
break;
}
prev_resv = resv;
node = rb_next(node);
}
return resv;
}
/*
* We are given a range within the bitmap, which corresponds to a gap
* inside the reservations tree (search_start, search_len). The range
* can be anything from the whole bitmap, to a gap between
* reservations.
*
* The start value of *rstart is insignificant.
*
* This function searches the bitmap range starting at search_start
* with length search_len for a set of contiguous free bits. We try
* to find up to 'wanted' bits, but can sometimes return less.
*
* Returns the length of allocation, 0 if no free bits are found.
*
* *cstart and *clen will also be populated with the result.
*/
static int ocfs2_resmap_find_free_bits(struct ocfs2_reservation_map *resmap,
unsigned int wanted,
unsigned int search_start,
unsigned int search_len,
unsigned int *rstart,
unsigned int *rlen)
{
void *bitmap = resmap->m_disk_bitmap;
unsigned int best_start, best_len = 0;
int offset, start, found;
trace_ocfs2_resmap_find_free_bits_begin(search_start, search_len,
wanted, resmap->m_bitmap_len);
found = best_start = best_len = 0;
start = search_start;
while ((offset = ocfs2_find_next_zero_bit(bitmap, resmap->m_bitmap_len,
start)) != -1) {
/* Search reached end of the region */
if (offset >= (search_start + search_len))
break;
if (offset == start) {
/* we found a zero */
found++;
/* move start to the next bit to test */
start++;
} else {
/* got a zero after some ones */
found = 1;
start = offset + 1;
}
if (found > best_len) {
best_len = found;
best_start = start - found;
}
if (found >= wanted)
break;
}
if (best_len == 0)
return 0;
if (best_len >= wanted)
best_len = wanted;
*rlen = best_len;
*rstart = best_start;
trace_ocfs2_resmap_find_free_bits_end(best_start, best_len);
return *rlen;
}
static void __ocfs2_resv_find_window(struct ocfs2_reservation_map *resmap,
struct ocfs2_alloc_reservation *resv,
unsigned int goal, unsigned int wanted)
{
struct rb_root *root = &resmap->m_reservations;
unsigned int gap_start, gap_end, gap_len;
struct ocfs2_alloc_reservation *prev_resv, *next_resv;
struct rb_node *prev, *next;
unsigned int cstart, clen;
unsigned int best_start = 0, best_len = 0;
/*
* Nasty cases to consider:
*
* - rbtree is empty
* - our window should be first in all reservations
* - our window should be last in all reservations
* - need to make sure we don't go past end of bitmap
*/
trace_ocfs2_resv_find_window_begin(resv->r_start, ocfs2_resv_end(resv),
goal, wanted, RB_EMPTY_ROOT(root));
assert_spin_locked(&resv_lock);
if (RB_EMPTY_ROOT(root)) {
/*
* Easiest case - empty tree. We can just take
* whatever window of free bits we want.
*/
clen = ocfs2_resmap_find_free_bits(resmap, wanted, goal,
resmap->m_bitmap_len - goal,
&cstart, &clen);
/*
* This should never happen - the local alloc window
* will always have free bits when we're called.
*/
BUG_ON(goal == 0 && clen == 0);
if (clen == 0)
return;
resv->r_start = cstart;
resv->r_len = clen;
ocfs2_resv_insert(resmap, resv);
return;
}
prev_resv = ocfs2_find_resv_lhs(resmap, goal);
if (prev_resv == NULL) {
/*
* A NULL here means that the search code couldn't
* find a window that starts before goal.
*
* However, we can take the first window after goal,
* which is also by definition, the leftmost window in
* the entire tree. If we can find free bits in the
* gap between goal and the LHS window, then the
* reservation can safely be placed there.
*
* Otherwise we fall back to a linear search, checking
* the gaps in between windows for a place to
* allocate.
*/
next = rb_first(root);
next_resv = rb_entry(next, struct ocfs2_alloc_reservation,
r_node);
/*
* The search should never return such a window. (see
* comment above
*/
if (next_resv->r_start <= goal) {
mlog(ML_ERROR, "goal: %u next_resv: start %u len %u\n",
goal, next_resv->r_start, next_resv->r_len);
ocfs2_dump_resv(resmap);
BUG();
}
clen = ocfs2_resmap_find_free_bits(resmap, wanted, goal,
next_resv->r_start - goal,
&cstart, &clen);
if (clen) {
best_len = clen;
best_start = cstart;
if (best_len == wanted)
goto out_insert;
}
prev_resv = next_resv;
next_resv = NULL;
}
trace_ocfs2_resv_find_window_prev(prev_resv->r_start,
ocfs2_resv_end(prev_resv));
prev = &prev_resv->r_node;
/* Now we do a linear search for a window, starting at 'prev_rsv' */
while (1) {
next = rb_next(prev);
if (next) {
next_resv = rb_entry(next,
struct ocfs2_alloc_reservation,
r_node);
gap_start = ocfs2_resv_end(prev_resv) + 1;
gap_end = next_resv->r_start - 1;
gap_len = gap_end - gap_start + 1;
} else {
/*
* We're at the rightmost edge of the
* tree. See if a reservation between this
* window and the end of the bitmap will work.
*/
gap_start = ocfs2_resv_end(prev_resv) + 1;
gap_len = resmap->m_bitmap_len - gap_start;
gap_end = resmap->m_bitmap_len - 1;
}
trace_ocfs2_resv_find_window_next(next ? next_resv->r_start: -1,
next ? ocfs2_resv_end(next_resv) : -1);
/*
* No need to check this gap if we have already found
* a larger region of free bits.
*/
if (gap_len <= best_len)
goto next_resv;
clen = ocfs2_resmap_find_free_bits(resmap, wanted, gap_start,
gap_len, &cstart, &clen);
if (clen == wanted) {
best_len = clen;
best_start = cstart;
goto out_insert;
} else if (clen > best_len) {
best_len = clen;
best_start = cstart;
}
next_resv:
if (!next)
break;
prev = next;
prev_resv = rb_entry(prev, struct ocfs2_alloc_reservation,
r_node);
}
out_insert:
if (best_len) {
resv->r_start = best_start;
resv->r_len = best_len;
ocfs2_resv_insert(resmap, resv);
}
}
static void ocfs2_cannibalize_resv(struct ocfs2_reservation_map *resmap,
struct ocfs2_alloc_reservation *resv,
unsigned int wanted)
{
struct ocfs2_alloc_reservation *lru_resv;
int tmpwindow = !!(resv->r_flags & OCFS2_RESV_FLAG_TMP);
unsigned int min_bits;
if (!tmpwindow)
min_bits = ocfs2_resv_window_bits(resmap, resv) >> 1;
else
min_bits = wanted; /* We at know the temp window will use all
* of these bits */
/*
* Take the first reservation off the LRU as our 'target'. We
* don't try to be smart about it. There might be a case for
* searching based on size but I don't have enough data to be
* sure. --Mark (3/16/2010)
*/
lru_resv = list_first_entry(&resmap->m_lru,
struct ocfs2_alloc_reservation, r_lru);
trace_ocfs2_cannibalize_resv_begin(lru_resv->r_start,
lru_resv->r_len,
ocfs2_resv_end(lru_resv));
/*
* Cannibalize (some or all) of the target reservation and
* feed it to the current window.
*/
if (lru_resv->r_len <= min_bits) {
/*
* Discard completely if size is less than or equal to a
* reasonable threshold - 50% of window bits for non temporary
* windows.
*/
resv->r_start = lru_resv->r_start;
resv->r_len = lru_resv->r_len;
__ocfs2_resv_discard(resmap, lru_resv);
} else {
unsigned int shrink;
if (tmpwindow)
shrink = min_bits;
else
shrink = lru_resv->r_len / 2;
lru_resv->r_len -= shrink;
resv->r_start = ocfs2_resv_end(lru_resv) + 1;
resv->r_len = shrink;
}
trace_ocfs2_cannibalize_resv_end(resv->r_start, ocfs2_resv_end(resv),
resv->r_len, resv->r_last_start,
resv->r_last_len);
ocfs2_resv_insert(resmap, resv);
}
static void ocfs2_resv_find_window(struct ocfs2_reservation_map *resmap,
struct ocfs2_alloc_reservation *resv,
unsigned int wanted)
{
unsigned int goal = 0;
BUG_ON(!ocfs2_resv_empty(resv));
/*
* Begin by trying to get a window as close to the previous
* one as possible. Using the most recent allocation as a
* start goal makes sense.
*/
if (resv->r_last_len) {
goal = resv->r_last_start + resv->r_last_len;
if (goal >= resmap->m_bitmap_len)
goal = 0;
}
__ocfs2_resv_find_window(resmap, resv, goal, wanted);
/* Search from last alloc didn't work, try once more from beginning. */
if (ocfs2_resv_empty(resv) && goal != 0)
__ocfs2_resv_find_window(resmap, resv, 0, wanted);
if (ocfs2_resv_empty(resv)) {
/*
* Still empty? Pull oldest one off the LRU, remove it from
* tree, put this one in it's place.
*/
ocfs2_cannibalize_resv(resmap, resv, wanted);
}
BUG_ON(ocfs2_resv_empty(resv));
}
int ocfs2_resmap_resv_bits(struct ocfs2_reservation_map *resmap,
struct ocfs2_alloc_reservation *resv,
int *cstart, int *clen)
{
if (resv == NULL || ocfs2_resmap_disabled(resmap))
return -ENOSPC;
spin_lock(&resv_lock);
if (ocfs2_resv_empty(resv)) {
/*
* We don't want to over-allocate for temporary
* windows. Otherwise, we run the risk of fragmenting the
* allocation space.
*/
unsigned int wanted = ocfs2_resv_window_bits(resmap, resv);
if ((resv->r_flags & OCFS2_RESV_FLAG_TMP) || wanted < *clen)
wanted = *clen;
/*
* Try to get a window here. If it works, we must fall
* through and test the bitmap . This avoids some
* ping-ponging of windows due to non-reserved space
* being allocation before we initialize a window for
* that inode.
*/
ocfs2_resv_find_window(resmap, resv, wanted);
trace_ocfs2_resmap_resv_bits(resv->r_start, resv->r_len);
}
BUG_ON(ocfs2_resv_empty(resv));
*cstart = resv->r_start;
*clen = resv->r_len;
spin_unlock(&resv_lock);
return 0;
}
static void
ocfs2_adjust_resv_from_alloc(struct ocfs2_reservation_map *resmap,
struct ocfs2_alloc_reservation *resv,
unsigned int start, unsigned int end)
{
unsigned int rhs = 0;
unsigned int old_end = ocfs2_resv_end(resv);
BUG_ON(start != resv->r_start || old_end < end);
/*
* Completely used? We can remove it then.
*/
if (old_end == end) {
__ocfs2_resv_discard(resmap, resv);
return;
}
rhs = old_end - end;
/*
* This should have been trapped above.
*/
BUG_ON(rhs == 0);
resv->r_start = end + 1;
resv->r_len = old_end - resv->r_start + 1;
}
void ocfs2_resmap_claimed_bits(struct ocfs2_reservation_map *resmap,
struct ocfs2_alloc_reservation *resv,
u32 cstart, u32 clen)
{
unsigned int cend = cstart + clen - 1;
if (resmap == NULL || ocfs2_resmap_disabled(resmap))
return;
if (resv == NULL)
return;
BUG_ON(cstart != resv->r_start);
spin_lock(&resv_lock);
trace_ocfs2_resmap_claimed_bits_begin(cstart, cend, clen, resv->r_start,
ocfs2_resv_end(resv), resv->r_len,
resv->r_last_start,
resv->r_last_len);
BUG_ON(cstart < resv->r_start);
BUG_ON(cstart > ocfs2_resv_end(resv));
BUG_ON(cend > ocfs2_resv_end(resv));
ocfs2_adjust_resv_from_alloc(resmap, resv, cstart, cend);
resv->r_last_start = cstart;
resv->r_last_len = clen;
/*
* May have been discarded above from
* ocfs2_adjust_resv_from_alloc().
*/
if (!ocfs2_resv_empty(resv))
ocfs2_resv_mark_lru(resmap, resv);
trace_ocfs2_resmap_claimed_bits_end(resv->r_start, ocfs2_resv_end(resv),
resv->r_len, resv->r_last_start,
resv->r_last_len);
ocfs2_check_resmap(resmap);
spin_unlock(&resv_lock);
}
| gpl-2.0 |
sub77-bkp/android_kernel_samsung_golden | fs/ocfs2/reservations.c | 11499 | 20845 | /* -*- mode: c; c-basic-offset: 8; -*-
* vim: noexpandtab sw=8 ts=8 sts=0:
*
* reservations.c
*
* Allocation reservations implementation
*
* Some code borrowed from fs/ext3/balloc.c and is:
*
* Copyright (C) 1992, 1993, 1994, 1995
* Remy Card (card@masi.ibp.fr)
* Laboratoire MASI - Institut Blaise Pascal
* Universite Pierre et Marie Curie (Paris VI)
*
* The rest is copyright (C) 2010 Novell. All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public
* License version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*/
#include <linux/fs.h>
#include <linux/types.h>
#include <linux/highmem.h>
#include <linux/bitops.h>
#include <linux/list.h>
#include <cluster/masklog.h>
#include "ocfs2.h"
#include "ocfs2_trace.h"
#ifdef CONFIG_OCFS2_DEBUG_FS
#define OCFS2_CHECK_RESERVATIONS
#endif
DEFINE_SPINLOCK(resv_lock);
#define OCFS2_MIN_RESV_WINDOW_BITS 8
#define OCFS2_MAX_RESV_WINDOW_BITS 1024
int ocfs2_dir_resv_allowed(struct ocfs2_super *osb)
{
return (osb->osb_resv_level && osb->osb_dir_resv_level);
}
static unsigned int ocfs2_resv_window_bits(struct ocfs2_reservation_map *resmap,
struct ocfs2_alloc_reservation *resv)
{
struct ocfs2_super *osb = resmap->m_osb;
unsigned int bits;
if (!(resv->r_flags & OCFS2_RESV_FLAG_DIR)) {
/* 8, 16, 32, 64, 128, 256, 512, 1024 */
bits = 4 << osb->osb_resv_level;
} else {
bits = 4 << osb->osb_dir_resv_level;
}
return bits;
}
static inline unsigned int ocfs2_resv_end(struct ocfs2_alloc_reservation *resv)
{
if (resv->r_len)
return resv->r_start + resv->r_len - 1;
return resv->r_start;
}
static inline int ocfs2_resv_empty(struct ocfs2_alloc_reservation *resv)
{
return !!(resv->r_len == 0);
}
static inline int ocfs2_resmap_disabled(struct ocfs2_reservation_map *resmap)
{
if (resmap->m_osb->osb_resv_level == 0)
return 1;
return 0;
}
static void ocfs2_dump_resv(struct ocfs2_reservation_map *resmap)
{
struct ocfs2_super *osb = resmap->m_osb;
struct rb_node *node;
struct ocfs2_alloc_reservation *resv;
int i = 0;
mlog(ML_NOTICE, "Dumping resmap for device %s. Bitmap length: %u\n",
osb->dev_str, resmap->m_bitmap_len);
node = rb_first(&resmap->m_reservations);
while (node) {
resv = rb_entry(node, struct ocfs2_alloc_reservation, r_node);
mlog(ML_NOTICE, "start: %u\tend: %u\tlen: %u\tlast_start: %u"
"\tlast_len: %u\n", resv->r_start,
ocfs2_resv_end(resv), resv->r_len, resv->r_last_start,
resv->r_last_len);
node = rb_next(node);
i++;
}
mlog(ML_NOTICE, "%d reservations found. LRU follows\n", i);
i = 0;
list_for_each_entry(resv, &resmap->m_lru, r_lru) {
mlog(ML_NOTICE, "LRU(%d) start: %u\tend: %u\tlen: %u\t"
"last_start: %u\tlast_len: %u\n", i, resv->r_start,
ocfs2_resv_end(resv), resv->r_len, resv->r_last_start,
resv->r_last_len);
i++;
}
}
#ifdef OCFS2_CHECK_RESERVATIONS
static int ocfs2_validate_resmap_bits(struct ocfs2_reservation_map *resmap,
int i,
struct ocfs2_alloc_reservation *resv)
{
char *disk_bitmap = resmap->m_disk_bitmap;
unsigned int start = resv->r_start;
unsigned int end = ocfs2_resv_end(resv);
while (start <= end) {
if (ocfs2_test_bit(start, disk_bitmap)) {
mlog(ML_ERROR,
"reservation %d covers an allocated area "
"starting at bit %u!\n", i, start);
return 1;
}
start++;
}
return 0;
}
static void ocfs2_check_resmap(struct ocfs2_reservation_map *resmap)
{
unsigned int off = 0;
int i = 0;
struct rb_node *node;
struct ocfs2_alloc_reservation *resv;
node = rb_first(&resmap->m_reservations);
while (node) {
resv = rb_entry(node, struct ocfs2_alloc_reservation, r_node);
if (i > 0 && resv->r_start <= off) {
mlog(ML_ERROR, "reservation %d has bad start off!\n",
i);
goto bad;
}
if (resv->r_len == 0) {
mlog(ML_ERROR, "reservation %d has no length!\n",
i);
goto bad;
}
if (resv->r_start > ocfs2_resv_end(resv)) {
mlog(ML_ERROR, "reservation %d has invalid range!\n",
i);
goto bad;
}
if (ocfs2_resv_end(resv) >= resmap->m_bitmap_len) {
mlog(ML_ERROR, "reservation %d extends past bitmap!\n",
i);
goto bad;
}
if (ocfs2_validate_resmap_bits(resmap, i, resv))
goto bad;
off = ocfs2_resv_end(resv);
node = rb_next(node);
i++;
}
return;
bad:
ocfs2_dump_resv(resmap);
BUG();
}
#else
static inline void ocfs2_check_resmap(struct ocfs2_reservation_map *resmap)
{
}
#endif
void ocfs2_resv_init_once(struct ocfs2_alloc_reservation *resv)
{
memset(resv, 0, sizeof(*resv));
INIT_LIST_HEAD(&resv->r_lru);
}
void ocfs2_resv_set_type(struct ocfs2_alloc_reservation *resv,
unsigned int flags)
{
BUG_ON(flags & ~OCFS2_RESV_TYPES);
resv->r_flags |= flags;
}
int ocfs2_resmap_init(struct ocfs2_super *osb,
struct ocfs2_reservation_map *resmap)
{
memset(resmap, 0, sizeof(*resmap));
resmap->m_osb = osb;
resmap->m_reservations = RB_ROOT;
/* m_bitmap_len is initialized to zero by the above memset. */
INIT_LIST_HEAD(&resmap->m_lru);
return 0;
}
static void ocfs2_resv_mark_lru(struct ocfs2_reservation_map *resmap,
struct ocfs2_alloc_reservation *resv)
{
assert_spin_locked(&resv_lock);
if (!list_empty(&resv->r_lru))
list_del_init(&resv->r_lru);
list_add_tail(&resv->r_lru, &resmap->m_lru);
}
static void __ocfs2_resv_trunc(struct ocfs2_alloc_reservation *resv)
{
resv->r_len = 0;
resv->r_start = 0;
}
static void ocfs2_resv_remove(struct ocfs2_reservation_map *resmap,
struct ocfs2_alloc_reservation *resv)
{
if (resv->r_flags & OCFS2_RESV_FLAG_INUSE) {
list_del_init(&resv->r_lru);
rb_erase(&resv->r_node, &resmap->m_reservations);
resv->r_flags &= ~OCFS2_RESV_FLAG_INUSE;
}
}
static void __ocfs2_resv_discard(struct ocfs2_reservation_map *resmap,
struct ocfs2_alloc_reservation *resv)
{
assert_spin_locked(&resv_lock);
__ocfs2_resv_trunc(resv);
/*
* last_len and last_start no longer make sense if
* we're changing the range of our allocations.
*/
resv->r_last_len = resv->r_last_start = 0;
ocfs2_resv_remove(resmap, resv);
}
/* does nothing if 'resv' is null */
void ocfs2_resv_discard(struct ocfs2_reservation_map *resmap,
struct ocfs2_alloc_reservation *resv)
{
if (resv) {
spin_lock(&resv_lock);
__ocfs2_resv_discard(resmap, resv);
spin_unlock(&resv_lock);
}
}
static void ocfs2_resmap_clear_all_resv(struct ocfs2_reservation_map *resmap)
{
struct rb_node *node;
struct ocfs2_alloc_reservation *resv;
assert_spin_locked(&resv_lock);
while ((node = rb_last(&resmap->m_reservations)) != NULL) {
resv = rb_entry(node, struct ocfs2_alloc_reservation, r_node);
__ocfs2_resv_discard(resmap, resv);
}
}
void ocfs2_resmap_restart(struct ocfs2_reservation_map *resmap,
unsigned int clen, char *disk_bitmap)
{
if (ocfs2_resmap_disabled(resmap))
return;
spin_lock(&resv_lock);
ocfs2_resmap_clear_all_resv(resmap);
resmap->m_bitmap_len = clen;
resmap->m_disk_bitmap = disk_bitmap;
spin_unlock(&resv_lock);
}
void ocfs2_resmap_uninit(struct ocfs2_reservation_map *resmap)
{
/* Does nothing for now. Keep this around for API symmetry */
}
static void ocfs2_resv_insert(struct ocfs2_reservation_map *resmap,
struct ocfs2_alloc_reservation *new)
{
struct rb_root *root = &resmap->m_reservations;
struct rb_node *parent = NULL;
struct rb_node **p = &root->rb_node;
struct ocfs2_alloc_reservation *tmp;
assert_spin_locked(&resv_lock);
trace_ocfs2_resv_insert(new->r_start, new->r_len);
while (*p) {
parent = *p;
tmp = rb_entry(parent, struct ocfs2_alloc_reservation, r_node);
if (new->r_start < tmp->r_start) {
p = &(*p)->rb_left;
/*
* This is a good place to check for
* overlapping reservations.
*/
BUG_ON(ocfs2_resv_end(new) >= tmp->r_start);
} else if (new->r_start > ocfs2_resv_end(tmp)) {
p = &(*p)->rb_right;
} else {
/* This should never happen! */
mlog(ML_ERROR, "Duplicate reservation window!\n");
BUG();
}
}
rb_link_node(&new->r_node, parent, p);
rb_insert_color(&new->r_node, root);
new->r_flags |= OCFS2_RESV_FLAG_INUSE;
ocfs2_resv_mark_lru(resmap, new);
ocfs2_check_resmap(resmap);
}
/**
* ocfs2_find_resv_lhs() - find the window which contains goal
* @resmap: reservation map to search
* @goal: which bit to search for
*
* If a window containing that goal is not found, we return the window
* which comes before goal. Returns NULL on empty rbtree or no window
* before goal.
*/
static struct ocfs2_alloc_reservation *
ocfs2_find_resv_lhs(struct ocfs2_reservation_map *resmap, unsigned int goal)
{
struct ocfs2_alloc_reservation *resv = NULL;
struct ocfs2_alloc_reservation *prev_resv = NULL;
struct rb_node *node = resmap->m_reservations.rb_node;
assert_spin_locked(&resv_lock);
if (!node)
return NULL;
node = rb_first(&resmap->m_reservations);
while (node) {
resv = rb_entry(node, struct ocfs2_alloc_reservation, r_node);
if (resv->r_start <= goal && ocfs2_resv_end(resv) >= goal)
break;
/* Check if we overshot the reservation just before goal? */
if (resv->r_start > goal) {
resv = prev_resv;
break;
}
prev_resv = resv;
node = rb_next(node);
}
return resv;
}
/*
* We are given a range within the bitmap, which corresponds to a gap
* inside the reservations tree (search_start, search_len). The range
* can be anything from the whole bitmap, to a gap between
* reservations.
*
* The start value of *rstart is insignificant.
*
* This function searches the bitmap range starting at search_start
* with length search_len for a set of contiguous free bits. We try
* to find up to 'wanted' bits, but can sometimes return less.
*
* Returns the length of allocation, 0 if no free bits are found.
*
* *cstart and *clen will also be populated with the result.
*/
static int ocfs2_resmap_find_free_bits(struct ocfs2_reservation_map *resmap,
unsigned int wanted,
unsigned int search_start,
unsigned int search_len,
unsigned int *rstart,
unsigned int *rlen)
{
void *bitmap = resmap->m_disk_bitmap;
unsigned int best_start, best_len = 0;
int offset, start, found;
trace_ocfs2_resmap_find_free_bits_begin(search_start, search_len,
wanted, resmap->m_bitmap_len);
found = best_start = best_len = 0;
start = search_start;
while ((offset = ocfs2_find_next_zero_bit(bitmap, resmap->m_bitmap_len,
start)) != -1) {
/* Search reached end of the region */
if (offset >= (search_start + search_len))
break;
if (offset == start) {
/* we found a zero */
found++;
/* move start to the next bit to test */
start++;
} else {
/* got a zero after some ones */
found = 1;
start = offset + 1;
}
if (found > best_len) {
best_len = found;
best_start = start - found;
}
if (found >= wanted)
break;
}
if (best_len == 0)
return 0;
if (best_len >= wanted)
best_len = wanted;
*rlen = best_len;
*rstart = best_start;
trace_ocfs2_resmap_find_free_bits_end(best_start, best_len);
return *rlen;
}
static void __ocfs2_resv_find_window(struct ocfs2_reservation_map *resmap,
struct ocfs2_alloc_reservation *resv,
unsigned int goal, unsigned int wanted)
{
struct rb_root *root = &resmap->m_reservations;
unsigned int gap_start, gap_end, gap_len;
struct ocfs2_alloc_reservation *prev_resv, *next_resv;
struct rb_node *prev, *next;
unsigned int cstart, clen;
unsigned int best_start = 0, best_len = 0;
/*
* Nasty cases to consider:
*
* - rbtree is empty
* - our window should be first in all reservations
* - our window should be last in all reservations
* - need to make sure we don't go past end of bitmap
*/
trace_ocfs2_resv_find_window_begin(resv->r_start, ocfs2_resv_end(resv),
goal, wanted, RB_EMPTY_ROOT(root));
assert_spin_locked(&resv_lock);
if (RB_EMPTY_ROOT(root)) {
/*
* Easiest case - empty tree. We can just take
* whatever window of free bits we want.
*/
clen = ocfs2_resmap_find_free_bits(resmap, wanted, goal,
resmap->m_bitmap_len - goal,
&cstart, &clen);
/*
* This should never happen - the local alloc window
* will always have free bits when we're called.
*/
BUG_ON(goal == 0 && clen == 0);
if (clen == 0)
return;
resv->r_start = cstart;
resv->r_len = clen;
ocfs2_resv_insert(resmap, resv);
return;
}
prev_resv = ocfs2_find_resv_lhs(resmap, goal);
if (prev_resv == NULL) {
/*
* A NULL here means that the search code couldn't
* find a window that starts before goal.
*
* However, we can take the first window after goal,
* which is also by definition, the leftmost window in
* the entire tree. If we can find free bits in the
* gap between goal and the LHS window, then the
* reservation can safely be placed there.
*
* Otherwise we fall back to a linear search, checking
* the gaps in between windows for a place to
* allocate.
*/
next = rb_first(root);
next_resv = rb_entry(next, struct ocfs2_alloc_reservation,
r_node);
/*
* The search should never return such a window. (see
* comment above
*/
if (next_resv->r_start <= goal) {
mlog(ML_ERROR, "goal: %u next_resv: start %u len %u\n",
goal, next_resv->r_start, next_resv->r_len);
ocfs2_dump_resv(resmap);
BUG();
}
clen = ocfs2_resmap_find_free_bits(resmap, wanted, goal,
next_resv->r_start - goal,
&cstart, &clen);
if (clen) {
best_len = clen;
best_start = cstart;
if (best_len == wanted)
goto out_insert;
}
prev_resv = next_resv;
next_resv = NULL;
}
trace_ocfs2_resv_find_window_prev(prev_resv->r_start,
ocfs2_resv_end(prev_resv));
prev = &prev_resv->r_node;
/* Now we do a linear search for a window, starting at 'prev_rsv' */
while (1) {
next = rb_next(prev);
if (next) {
next_resv = rb_entry(next,
struct ocfs2_alloc_reservation,
r_node);
gap_start = ocfs2_resv_end(prev_resv) + 1;
gap_end = next_resv->r_start - 1;
gap_len = gap_end - gap_start + 1;
} else {
/*
* We're at the rightmost edge of the
* tree. See if a reservation between this
* window and the end of the bitmap will work.
*/
gap_start = ocfs2_resv_end(prev_resv) + 1;
gap_len = resmap->m_bitmap_len - gap_start;
gap_end = resmap->m_bitmap_len - 1;
}
trace_ocfs2_resv_find_window_next(next ? next_resv->r_start: -1,
next ? ocfs2_resv_end(next_resv) : -1);
/*
* No need to check this gap if we have already found
* a larger region of free bits.
*/
if (gap_len <= best_len)
goto next_resv;
clen = ocfs2_resmap_find_free_bits(resmap, wanted, gap_start,
gap_len, &cstart, &clen);
if (clen == wanted) {
best_len = clen;
best_start = cstart;
goto out_insert;
} else if (clen > best_len) {
best_len = clen;
best_start = cstart;
}
next_resv:
if (!next)
break;
prev = next;
prev_resv = rb_entry(prev, struct ocfs2_alloc_reservation,
r_node);
}
out_insert:
if (best_len) {
resv->r_start = best_start;
resv->r_len = best_len;
ocfs2_resv_insert(resmap, resv);
}
}
static void ocfs2_cannibalize_resv(struct ocfs2_reservation_map *resmap,
struct ocfs2_alloc_reservation *resv,
unsigned int wanted)
{
struct ocfs2_alloc_reservation *lru_resv;
int tmpwindow = !!(resv->r_flags & OCFS2_RESV_FLAG_TMP);
unsigned int min_bits;
if (!tmpwindow)
min_bits = ocfs2_resv_window_bits(resmap, resv) >> 1;
else
min_bits = wanted; /* We at know the temp window will use all
* of these bits */
/*
* Take the first reservation off the LRU as our 'target'. We
* don't try to be smart about it. There might be a case for
* searching based on size but I don't have enough data to be
* sure. --Mark (3/16/2010)
*/
lru_resv = list_first_entry(&resmap->m_lru,
struct ocfs2_alloc_reservation, r_lru);
trace_ocfs2_cannibalize_resv_begin(lru_resv->r_start,
lru_resv->r_len,
ocfs2_resv_end(lru_resv));
/*
* Cannibalize (some or all) of the target reservation and
* feed it to the current window.
*/
if (lru_resv->r_len <= min_bits) {
/*
* Discard completely if size is less than or equal to a
* reasonable threshold - 50% of window bits for non temporary
* windows.
*/
resv->r_start = lru_resv->r_start;
resv->r_len = lru_resv->r_len;
__ocfs2_resv_discard(resmap, lru_resv);
} else {
unsigned int shrink;
if (tmpwindow)
shrink = min_bits;
else
shrink = lru_resv->r_len / 2;
lru_resv->r_len -= shrink;
resv->r_start = ocfs2_resv_end(lru_resv) + 1;
resv->r_len = shrink;
}
trace_ocfs2_cannibalize_resv_end(resv->r_start, ocfs2_resv_end(resv),
resv->r_len, resv->r_last_start,
resv->r_last_len);
ocfs2_resv_insert(resmap, resv);
}
static void ocfs2_resv_find_window(struct ocfs2_reservation_map *resmap,
struct ocfs2_alloc_reservation *resv,
unsigned int wanted)
{
unsigned int goal = 0;
BUG_ON(!ocfs2_resv_empty(resv));
/*
* Begin by trying to get a window as close to the previous
* one as possible. Using the most recent allocation as a
* start goal makes sense.
*/
if (resv->r_last_len) {
goal = resv->r_last_start + resv->r_last_len;
if (goal >= resmap->m_bitmap_len)
goal = 0;
}
__ocfs2_resv_find_window(resmap, resv, goal, wanted);
/* Search from last alloc didn't work, try once more from beginning. */
if (ocfs2_resv_empty(resv) && goal != 0)
__ocfs2_resv_find_window(resmap, resv, 0, wanted);
if (ocfs2_resv_empty(resv)) {
/*
* Still empty? Pull oldest one off the LRU, remove it from
* tree, put this one in it's place.
*/
ocfs2_cannibalize_resv(resmap, resv, wanted);
}
BUG_ON(ocfs2_resv_empty(resv));
}
int ocfs2_resmap_resv_bits(struct ocfs2_reservation_map *resmap,
struct ocfs2_alloc_reservation *resv,
int *cstart, int *clen)
{
if (resv == NULL || ocfs2_resmap_disabled(resmap))
return -ENOSPC;
spin_lock(&resv_lock);
if (ocfs2_resv_empty(resv)) {
/*
* We don't want to over-allocate for temporary
* windows. Otherwise, we run the risk of fragmenting the
* allocation space.
*/
unsigned int wanted = ocfs2_resv_window_bits(resmap, resv);
if ((resv->r_flags & OCFS2_RESV_FLAG_TMP) || wanted < *clen)
wanted = *clen;
/*
* Try to get a window here. If it works, we must fall
* through and test the bitmap . This avoids some
* ping-ponging of windows due to non-reserved space
* being allocation before we initialize a window for
* that inode.
*/
ocfs2_resv_find_window(resmap, resv, wanted);
trace_ocfs2_resmap_resv_bits(resv->r_start, resv->r_len);
}
BUG_ON(ocfs2_resv_empty(resv));
*cstart = resv->r_start;
*clen = resv->r_len;
spin_unlock(&resv_lock);
return 0;
}
static void
ocfs2_adjust_resv_from_alloc(struct ocfs2_reservation_map *resmap,
struct ocfs2_alloc_reservation *resv,
unsigned int start, unsigned int end)
{
unsigned int rhs = 0;
unsigned int old_end = ocfs2_resv_end(resv);
BUG_ON(start != resv->r_start || old_end < end);
/*
* Completely used? We can remove it then.
*/
if (old_end == end) {
__ocfs2_resv_discard(resmap, resv);
return;
}
rhs = old_end - end;
/*
* This should have been trapped above.
*/
BUG_ON(rhs == 0);
resv->r_start = end + 1;
resv->r_len = old_end - resv->r_start + 1;
}
void ocfs2_resmap_claimed_bits(struct ocfs2_reservation_map *resmap,
struct ocfs2_alloc_reservation *resv,
u32 cstart, u32 clen)
{
unsigned int cend = cstart + clen - 1;
if (resmap == NULL || ocfs2_resmap_disabled(resmap))
return;
if (resv == NULL)
return;
BUG_ON(cstart != resv->r_start);
spin_lock(&resv_lock);
trace_ocfs2_resmap_claimed_bits_begin(cstart, cend, clen, resv->r_start,
ocfs2_resv_end(resv), resv->r_len,
resv->r_last_start,
resv->r_last_len);
BUG_ON(cstart < resv->r_start);
BUG_ON(cstart > ocfs2_resv_end(resv));
BUG_ON(cend > ocfs2_resv_end(resv));
ocfs2_adjust_resv_from_alloc(resmap, resv, cstart, cend);
resv->r_last_start = cstart;
resv->r_last_len = clen;
/*
* May have been discarded above from
* ocfs2_adjust_resv_from_alloc().
*/
if (!ocfs2_resv_empty(resv))
ocfs2_resv_mark_lru(resmap, resv);
trace_ocfs2_resmap_claimed_bits_end(resv->r_start, ocfs2_resv_end(resv),
resv->r_len, resv->r_last_start,
resv->r_last_len);
ocfs2_check_resmap(resmap);
spin_unlock(&resv_lock);
}
| gpl-2.0 |
Fred6681/android_kernel_samsung_golden | drivers/media/dvb/frontends/isl6423.c | 14571 | 6645 | /*
Intersil ISL6423 SEC and LNB Power supply controller
Copyright (C) Manu Abraham <abraham.manu@gmail.com>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/delay.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/string.h>
#include <linux/slab.h>
#include "dvb_frontend.h"
#include "isl6423.h"
static unsigned int verbose;
module_param(verbose, int, 0644);
MODULE_PARM_DESC(verbose, "Set Verbosity level");
#define FE_ERROR 0
#define FE_NOTICE 1
#define FE_INFO 2
#define FE_DEBUG 3
#define FE_DEBUGREG 4
#define dprintk(__y, __z, format, arg...) do { \
if (__z) { \
if ((verbose > FE_ERROR) && (verbose > __y)) \
printk(KERN_ERR "%s: " format "\n", __func__ , ##arg); \
else if ((verbose > FE_NOTICE) && (verbose > __y)) \
printk(KERN_NOTICE "%s: " format "\n", __func__ , ##arg); \
else if ((verbose > FE_INFO) && (verbose > __y)) \
printk(KERN_INFO "%s: " format "\n", __func__ , ##arg); \
else if ((verbose > FE_DEBUG) && (verbose > __y)) \
printk(KERN_DEBUG "%s: " format "\n", __func__ , ##arg); \
} else { \
if (verbose > __y) \
printk(format, ##arg); \
} \
} while (0)
struct isl6423_dev {
const struct isl6423_config *config;
struct i2c_adapter *i2c;
u8 reg_3;
u8 reg_4;
unsigned int verbose;
};
static int isl6423_write(struct isl6423_dev *isl6423, u8 reg)
{
struct i2c_adapter *i2c = isl6423->i2c;
u8 addr = isl6423->config->addr;
int err = 0;
struct i2c_msg msg = { .addr = addr, .flags = 0, .buf = ®, .len = 1 };
dprintk(FE_DEBUG, 1, "write reg %02X", reg);
err = i2c_transfer(i2c, &msg, 1);
if (err < 0)
goto exit;
return 0;
exit:
dprintk(FE_ERROR, 1, "I/O error <%d>", err);
return err;
}
static int isl6423_set_modulation(struct dvb_frontend *fe)
{
struct isl6423_dev *isl6423 = (struct isl6423_dev *) fe->sec_priv;
const struct isl6423_config *config = isl6423->config;
int err = 0;
u8 reg_2 = 0;
reg_2 = 0x01 << 5;
if (config->mod_extern)
reg_2 |= (1 << 3);
else
reg_2 |= (1 << 4);
err = isl6423_write(isl6423, reg_2);
if (err < 0)
goto exit;
return 0;
exit:
dprintk(FE_ERROR, 1, "I/O error <%d>", err);
return err;
}
static int isl6423_voltage_boost(struct dvb_frontend *fe, long arg)
{
struct isl6423_dev *isl6423 = (struct isl6423_dev *) fe->sec_priv;
u8 reg_3 = isl6423->reg_3;
u8 reg_4 = isl6423->reg_4;
int err = 0;
if (arg) {
/* EN = 1, VSPEN = 1, VBOT = 1 */
reg_4 |= (1 << 4);
reg_4 |= 0x1;
reg_3 |= (1 << 3);
} else {
/* EN = 1, VSPEN = 1, VBOT = 0 */
reg_4 |= (1 << 4);
reg_4 &= ~0x1;
reg_3 |= (1 << 3);
}
err = isl6423_write(isl6423, reg_3);
if (err < 0)
goto exit;
err = isl6423_write(isl6423, reg_4);
if (err < 0)
goto exit;
isl6423->reg_3 = reg_3;
isl6423->reg_4 = reg_4;
return 0;
exit:
dprintk(FE_ERROR, 1, "I/O error <%d>", err);
return err;
}
static int isl6423_set_voltage(struct dvb_frontend *fe,
enum fe_sec_voltage voltage)
{
struct isl6423_dev *isl6423 = (struct isl6423_dev *) fe->sec_priv;
u8 reg_3 = isl6423->reg_3;
u8 reg_4 = isl6423->reg_4;
int err = 0;
switch (voltage) {
case SEC_VOLTAGE_OFF:
/* EN = 0 */
reg_4 &= ~(1 << 4);
break;
case SEC_VOLTAGE_13:
/* EN = 1, VSPEN = 1, VTOP = 0, VBOT = 0 */
reg_4 |= (1 << 4);
reg_4 &= ~0x3;
reg_3 |= (1 << 3);
break;
case SEC_VOLTAGE_18:
/* EN = 1, VSPEN = 1, VTOP = 1, VBOT = 0 */
reg_4 |= (1 << 4);
reg_4 |= 0x2;
reg_4 &= ~0x1;
reg_3 |= (1 << 3);
break;
default:
break;
}
err = isl6423_write(isl6423, reg_3);
if (err < 0)
goto exit;
err = isl6423_write(isl6423, reg_4);
if (err < 0)
goto exit;
isl6423->reg_3 = reg_3;
isl6423->reg_4 = reg_4;
return 0;
exit:
dprintk(FE_ERROR, 1, "I/O error <%d>", err);
return err;
}
static int isl6423_set_current(struct dvb_frontend *fe)
{
struct isl6423_dev *isl6423 = (struct isl6423_dev *) fe->sec_priv;
u8 reg_3 = isl6423->reg_3;
const struct isl6423_config *config = isl6423->config;
int err = 0;
switch (config->current_max) {
case SEC_CURRENT_275m:
/* 275mA */
/* ISELH = 0, ISELL = 0 */
reg_3 &= ~0x3;
break;
case SEC_CURRENT_515m:
/* 515mA */
/* ISELH = 0, ISELL = 1 */
reg_3 &= ~0x2;
reg_3 |= 0x1;
break;
case SEC_CURRENT_635m:
/* 635mA */
/* ISELH = 1, ISELL = 0 */
reg_3 &= ~0x1;
reg_3 |= 0x2;
break;
case SEC_CURRENT_800m:
/* 800mA */
/* ISELH = 1, ISELL = 1 */
reg_3 |= 0x3;
break;
}
err = isl6423_write(isl6423, reg_3);
if (err < 0)
goto exit;
switch (config->curlim) {
case SEC_CURRENT_LIM_ON:
/* DCL = 0 */
reg_3 &= ~0x10;
break;
case SEC_CURRENT_LIM_OFF:
/* DCL = 1 */
reg_3 |= 0x10;
break;
}
err = isl6423_write(isl6423, reg_3);
if (err < 0)
goto exit;
isl6423->reg_3 = reg_3;
return 0;
exit:
dprintk(FE_ERROR, 1, "I/O error <%d>", err);
return err;
}
static void isl6423_release(struct dvb_frontend *fe)
{
isl6423_set_voltage(fe, SEC_VOLTAGE_OFF);
kfree(fe->sec_priv);
fe->sec_priv = NULL;
}
struct dvb_frontend *isl6423_attach(struct dvb_frontend *fe,
struct i2c_adapter *i2c,
const struct isl6423_config *config)
{
struct isl6423_dev *isl6423;
isl6423 = kzalloc(sizeof(struct isl6423_dev), GFP_KERNEL);
if (!isl6423)
return NULL;
isl6423->config = config;
isl6423->i2c = i2c;
fe->sec_priv = isl6423;
/* SR3H = 0, SR3M = 1, SR3L = 0 */
isl6423->reg_3 = 0x02 << 5;
/* SR4H = 0, SR4M = 1, SR4L = 1 */
isl6423->reg_4 = 0x03 << 5;
if (isl6423_set_current(fe))
goto exit;
if (isl6423_set_modulation(fe))
goto exit;
fe->ops.release_sec = isl6423_release;
fe->ops.set_voltage = isl6423_set_voltage;
fe->ops.enable_high_lnb_voltage = isl6423_voltage_boost;
isl6423->verbose = verbose;
return fe;
exit:
kfree(isl6423);
fe->sec_priv = NULL;
return NULL;
}
EXPORT_SYMBOL(isl6423_attach);
MODULE_DESCRIPTION("ISL6423 SEC");
MODULE_AUTHOR("Manu Abraham");
MODULE_LICENSE("GPL");
| gpl-2.0 |
jderrick/linux-blkdev | fs/affs/dir.c | 236 | 3407 | /*
* linux/fs/affs/dir.c
*
* (c) 1996 Hans-Joachim Widmaier - Rewritten
*
* (C) 1993 Ray Burr - Modified for Amiga FFS filesystem.
*
* (C) 1992 Eric Youngdale Modified for ISO 9660 filesystem.
*
* (C) 1991 Linus Torvalds - minix filesystem
*
* affs directory handling functions
*
*/
#include "affs.h"
static int affs_readdir(struct file *, struct dir_context *);
const struct file_operations affs_dir_operations = {
.read = generic_read_dir,
.llseek = generic_file_llseek,
.iterate_shared = affs_readdir,
.fsync = affs_file_fsync,
};
/*
* directories can handle most operations...
*/
const struct inode_operations affs_dir_inode_operations = {
.create = affs_create,
.lookup = affs_lookup,
.link = affs_link,
.unlink = affs_unlink,
.symlink = affs_symlink,
.mkdir = affs_mkdir,
.rmdir = affs_rmdir,
.rename = affs_rename,
.setattr = affs_notify_change,
};
static int
affs_readdir(struct file *file, struct dir_context *ctx)
{
struct inode *inode = file_inode(file);
struct super_block *sb = inode->i_sb;
struct buffer_head *dir_bh = NULL;
struct buffer_head *fh_bh = NULL;
unsigned char *name;
int namelen;
u32 i;
int hash_pos;
int chain_pos;
u32 ino;
int error = 0;
pr_debug("%s(ino=%lu,f_pos=%llx)\n", __func__, inode->i_ino, ctx->pos);
if (ctx->pos < 2) {
file->private_data = (void *)0;
if (!dir_emit_dots(file, ctx))
return 0;
}
affs_lock_dir(inode);
chain_pos = (ctx->pos - 2) & 0xffff;
hash_pos = (ctx->pos - 2) >> 16;
if (chain_pos == 0xffff) {
affs_warning(sb, "readdir", "More than 65535 entries in chain");
chain_pos = 0;
hash_pos++;
ctx->pos = ((hash_pos << 16) | chain_pos) + 2;
}
dir_bh = affs_bread(sb, inode->i_ino);
if (!dir_bh)
goto out_unlock_dir;
/* If the directory hasn't changed since the last call to readdir(),
* we can jump directly to where we left off.
*/
ino = (u32)(long)file->private_data;
if (ino && file->f_version == inode->i_version) {
pr_debug("readdir() left off=%d\n", ino);
goto inside;
}
ino = be32_to_cpu(AFFS_HEAD(dir_bh)->table[hash_pos]);
for (i = 0; ino && i < chain_pos; i++) {
fh_bh = affs_bread(sb, ino);
if (!fh_bh) {
affs_error(sb, "readdir","Cannot read block %d", i);
error = -EIO;
goto out_brelse_dir;
}
ino = be32_to_cpu(AFFS_TAIL(sb, fh_bh)->hash_chain);
affs_brelse(fh_bh);
fh_bh = NULL;
}
if (ino)
goto inside;
hash_pos++;
for (; hash_pos < AFFS_SB(sb)->s_hashsize; hash_pos++) {
ino = be32_to_cpu(AFFS_HEAD(dir_bh)->table[hash_pos]);
if (!ino)
continue;
ctx->pos = (hash_pos << 16) + 2;
inside:
do {
fh_bh = affs_bread(sb, ino);
if (!fh_bh) {
affs_error(sb, "readdir",
"Cannot read block %d", ino);
break;
}
namelen = min(AFFS_TAIL(sb, fh_bh)->name[0],
(u8)AFFSNAMEMAX);
name = AFFS_TAIL(sb, fh_bh)->name + 1;
pr_debug("readdir(): dir_emit(\"%.*s\", ino=%u), hash=%d, f_pos=%llx\n",
namelen, name, ino, hash_pos, ctx->pos);
if (!dir_emit(ctx, name, namelen, ino, DT_UNKNOWN))
goto done;
ctx->pos++;
ino = be32_to_cpu(AFFS_TAIL(sb, fh_bh)->hash_chain);
affs_brelse(fh_bh);
fh_bh = NULL;
} while (ino);
}
done:
file->f_version = inode->i_version;
file->private_data = (void *)(long)ino;
affs_brelse(fh_bh);
out_brelse_dir:
affs_brelse(dir_bh);
out_unlock_dir:
affs_unlock_dir(inode);
return error;
}
| gpl-2.0 |
sohkis/android_kernel_lge_hammerhead | fs/cifs/inode.c | 492 | 61222 | /*
* fs/cifs/inode.c
*
* Copyright (C) International Business Machines Corp., 2002,2010
* Author(s): Steve French (sfrench@us.ibm.com)
*
* This library is free software; you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published
* by the Free Software Foundation; either version 2.1 of the License, or
* (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
* the GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this library; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/fs.h>
#include <linux/stat.h>
#include <linux/slab.h>
#include <linux/pagemap.h>
#include <asm/div64.h>
#include "cifsfs.h"
#include "cifspdu.h"
#include "cifsglob.h"
#include "cifsproto.h"
#include "cifs_debug.h"
#include "cifs_fs_sb.h"
#include "fscache.h"
static void cifs_set_ops(struct inode *inode)
{
struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
switch (inode->i_mode & S_IFMT) {
case S_IFREG:
inode->i_op = &cifs_file_inode_ops;
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO) {
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
inode->i_fop = &cifs_file_direct_nobrl_ops;
else
inode->i_fop = &cifs_file_direct_ops;
} else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO) {
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
inode->i_fop = &cifs_file_strict_nobrl_ops;
else
inode->i_fop = &cifs_file_strict_ops;
} else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
inode->i_fop = &cifs_file_nobrl_ops;
else { /* not direct, send byte range locks */
inode->i_fop = &cifs_file_ops;
}
/* check if server can support readpages */
if (cifs_sb_master_tcon(cifs_sb)->ses->server->maxBuf <
PAGE_CACHE_SIZE + MAX_CIFS_HDR_SIZE)
inode->i_data.a_ops = &cifs_addr_ops_smallbuf;
else
inode->i_data.a_ops = &cifs_addr_ops;
break;
case S_IFDIR:
#ifdef CONFIG_CIFS_DFS_UPCALL
if (IS_AUTOMOUNT(inode)) {
inode->i_op = &cifs_dfs_referral_inode_operations;
} else {
#else /* NO DFS support, treat as a directory */
{
#endif
inode->i_op = &cifs_dir_inode_ops;
inode->i_fop = &cifs_dir_ops;
}
break;
case S_IFLNK:
inode->i_op = &cifs_symlink_inode_ops;
break;
default:
init_special_inode(inode, inode->i_mode, inode->i_rdev);
break;
}
}
/* check inode attributes against fattr. If they don't match, tag the
* inode for cache invalidation
*/
static void
cifs_revalidate_cache(struct inode *inode, struct cifs_fattr *fattr)
{
struct cifsInodeInfo *cifs_i = CIFS_I(inode);
cFYI(1, "%s: revalidating inode %llu", __func__, cifs_i->uniqueid);
if (inode->i_state & I_NEW) {
cFYI(1, "%s: inode %llu is new", __func__, cifs_i->uniqueid);
return;
}
/* don't bother with revalidation if we have an oplock */
if (cifs_i->clientCanCacheRead) {
cFYI(1, "%s: inode %llu is oplocked", __func__,
cifs_i->uniqueid);
return;
}
/* revalidate if mtime or size have changed */
if (timespec_equal(&inode->i_mtime, &fattr->cf_mtime) &&
cifs_i->server_eof == fattr->cf_eof) {
cFYI(1, "%s: inode %llu is unchanged", __func__,
cifs_i->uniqueid);
return;
}
cFYI(1, "%s: invalidating inode %llu mapping", __func__,
cifs_i->uniqueid);
cifs_i->invalid_mapping = true;
}
/* populate an inode with info from a cifs_fattr struct */
void
cifs_fattr_to_inode(struct inode *inode, struct cifs_fattr *fattr)
{
struct cifsInodeInfo *cifs_i = CIFS_I(inode);
struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
unsigned long oldtime = cifs_i->time;
cifs_revalidate_cache(inode, fattr);
inode->i_atime = fattr->cf_atime;
inode->i_mtime = fattr->cf_mtime;
inode->i_ctime = fattr->cf_ctime;
inode->i_rdev = fattr->cf_rdev;
set_nlink(inode, fattr->cf_nlink);
inode->i_uid = fattr->cf_uid;
inode->i_gid = fattr->cf_gid;
/* if dynperm is set, don't clobber existing mode */
if (inode->i_state & I_NEW ||
!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DYNPERM))
inode->i_mode = fattr->cf_mode;
cifs_i->cifsAttrs = fattr->cf_cifsattrs;
if (fattr->cf_flags & CIFS_FATTR_NEED_REVAL)
cifs_i->time = 0;
else
cifs_i->time = jiffies;
cFYI(1, "inode 0x%p old_time=%ld new_time=%ld", inode,
oldtime, cifs_i->time);
cifs_i->delete_pending = fattr->cf_flags & CIFS_FATTR_DELETE_PENDING;
cifs_i->server_eof = fattr->cf_eof;
/*
* Can't safely change the file size here if the client is writing to
* it due to potential races.
*/
spin_lock(&inode->i_lock);
if (is_size_safe_to_change(cifs_i, fattr->cf_eof)) {
i_size_write(inode, fattr->cf_eof);
/*
* i_blocks is not related to (i_size / i_blksize),
* but instead 512 byte (2**9) size is required for
* calculating num blocks.
*/
inode->i_blocks = (512 - 1 + fattr->cf_bytes) >> 9;
}
spin_unlock(&inode->i_lock);
if (fattr->cf_flags & CIFS_FATTR_DFS_REFERRAL)
inode->i_flags |= S_AUTOMOUNT;
if (inode->i_state & I_NEW)
cifs_set_ops(inode);
}
void
cifs_fill_uniqueid(struct super_block *sb, struct cifs_fattr *fattr)
{
struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM)
return;
fattr->cf_uniqueid = iunique(sb, ROOT_I);
}
/* Fill a cifs_fattr struct with info from FILE_UNIX_BASIC_INFO. */
void
cifs_unix_basic_to_fattr(struct cifs_fattr *fattr, FILE_UNIX_BASIC_INFO *info,
struct cifs_sb_info *cifs_sb)
{
memset(fattr, 0, sizeof(*fattr));
fattr->cf_uniqueid = le64_to_cpu(info->UniqueId);
fattr->cf_bytes = le64_to_cpu(info->NumOfBytes);
fattr->cf_eof = le64_to_cpu(info->EndOfFile);
fattr->cf_atime = cifs_NTtimeToUnix(info->LastAccessTime);
fattr->cf_mtime = cifs_NTtimeToUnix(info->LastModificationTime);
fattr->cf_ctime = cifs_NTtimeToUnix(info->LastStatusChange);
fattr->cf_mode = le64_to_cpu(info->Permissions);
/*
* Since we set the inode type below we need to mask off
* to avoid strange results if bits set above.
*/
fattr->cf_mode &= ~S_IFMT;
switch (le32_to_cpu(info->Type)) {
case UNIX_FILE:
fattr->cf_mode |= S_IFREG;
fattr->cf_dtype = DT_REG;
break;
case UNIX_SYMLINK:
fattr->cf_mode |= S_IFLNK;
fattr->cf_dtype = DT_LNK;
break;
case UNIX_DIR:
fattr->cf_mode |= S_IFDIR;
fattr->cf_dtype = DT_DIR;
break;
case UNIX_CHARDEV:
fattr->cf_mode |= S_IFCHR;
fattr->cf_dtype = DT_CHR;
fattr->cf_rdev = MKDEV(le64_to_cpu(info->DevMajor),
le64_to_cpu(info->DevMinor) & MINORMASK);
break;
case UNIX_BLOCKDEV:
fattr->cf_mode |= S_IFBLK;
fattr->cf_dtype = DT_BLK;
fattr->cf_rdev = MKDEV(le64_to_cpu(info->DevMajor),
le64_to_cpu(info->DevMinor) & MINORMASK);
break;
case UNIX_FIFO:
fattr->cf_mode |= S_IFIFO;
fattr->cf_dtype = DT_FIFO;
break;
case UNIX_SOCKET:
fattr->cf_mode |= S_IFSOCK;
fattr->cf_dtype = DT_SOCK;
break;
default:
/* safest to call it a file if we do not know */
fattr->cf_mode |= S_IFREG;
fattr->cf_dtype = DT_REG;
cFYI(1, "unknown type %d", le32_to_cpu(info->Type));
break;
}
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_UID)
fattr->cf_uid = cifs_sb->mnt_uid;
else
fattr->cf_uid = le64_to_cpu(info->Uid);
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_GID)
fattr->cf_gid = cifs_sb->mnt_gid;
else
fattr->cf_gid = le64_to_cpu(info->Gid);
fattr->cf_nlink = le64_to_cpu(info->Nlinks);
}
/*
* Fill a cifs_fattr struct with fake inode info.
*
* Needed to setup cifs_fattr data for the directory which is the
* junction to the new submount (ie to setup the fake directory
* which represents a DFS referral).
*/
static void
cifs_create_dfs_fattr(struct cifs_fattr *fattr, struct super_block *sb)
{
struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
cFYI(1, "creating fake fattr for DFS referral");
memset(fattr, 0, sizeof(*fattr));
fattr->cf_mode = S_IFDIR | S_IXUGO | S_IRWXU;
fattr->cf_uid = cifs_sb->mnt_uid;
fattr->cf_gid = cifs_sb->mnt_gid;
fattr->cf_atime = CURRENT_TIME;
fattr->cf_ctime = CURRENT_TIME;
fattr->cf_mtime = CURRENT_TIME;
fattr->cf_nlink = 2;
fattr->cf_flags |= CIFS_FATTR_DFS_REFERRAL;
}
int cifs_get_file_info_unix(struct file *filp)
{
int rc;
int xid;
FILE_UNIX_BASIC_INFO find_data;
struct cifs_fattr fattr;
struct inode *inode = filp->f_path.dentry->d_inode;
struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
struct cifsFileInfo *cfile = filp->private_data;
struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
xid = GetXid();
rc = CIFSSMBUnixQFileInfo(xid, tcon, cfile->netfid, &find_data);
if (!rc) {
cifs_unix_basic_to_fattr(&fattr, &find_data, cifs_sb);
} else if (rc == -EREMOTE) {
cifs_create_dfs_fattr(&fattr, inode->i_sb);
rc = 0;
}
cifs_fattr_to_inode(inode, &fattr);
FreeXid(xid);
return rc;
}
int cifs_get_inode_info_unix(struct inode **pinode,
const unsigned char *full_path,
struct super_block *sb, int xid)
{
int rc;
FILE_UNIX_BASIC_INFO find_data;
struct cifs_fattr fattr;
struct cifs_tcon *tcon;
struct tcon_link *tlink;
struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
cFYI(1, "Getting info on %s", full_path);
tlink = cifs_sb_tlink(cifs_sb);
if (IS_ERR(tlink))
return PTR_ERR(tlink);
tcon = tlink_tcon(tlink);
/* could have done a find first instead but this returns more info */
rc = CIFSSMBUnixQPathInfo(xid, tcon, full_path, &find_data,
cifs_sb->local_nls, cifs_sb->mnt_cifs_flags &
CIFS_MOUNT_MAP_SPECIAL_CHR);
cifs_put_tlink(tlink);
if (!rc) {
cifs_unix_basic_to_fattr(&fattr, &find_data, cifs_sb);
} else if (rc == -EREMOTE) {
cifs_create_dfs_fattr(&fattr, sb);
rc = 0;
} else {
return rc;
}
/* check for Minshall+French symlinks */
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MF_SYMLINKS) {
int tmprc = CIFSCheckMFSymlink(&fattr, full_path, cifs_sb, xid);
if (tmprc)
cFYI(1, "CIFSCheckMFSymlink: %d", tmprc);
}
if (*pinode == NULL) {
/* get new inode */
cifs_fill_uniqueid(sb, &fattr);
*pinode = cifs_iget(sb, &fattr);
if (!*pinode)
rc = -ENOMEM;
} else {
/* we already have inode, update it */
cifs_fattr_to_inode(*pinode, &fattr);
}
return rc;
}
static int
cifs_sfu_type(struct cifs_fattr *fattr, const unsigned char *path,
struct cifs_sb_info *cifs_sb, int xid)
{
int rc;
int oplock = 0;
__u16 netfid;
struct tcon_link *tlink;
struct cifs_tcon *tcon;
struct cifs_io_parms io_parms;
char buf[24];
unsigned int bytes_read;
char *pbuf;
pbuf = buf;
fattr->cf_mode &= ~S_IFMT;
if (fattr->cf_eof == 0) {
fattr->cf_mode |= S_IFIFO;
fattr->cf_dtype = DT_FIFO;
return 0;
} else if (fattr->cf_eof < 8) {
fattr->cf_mode |= S_IFREG;
fattr->cf_dtype = DT_REG;
return -EINVAL; /* EOPNOTSUPP? */
}
tlink = cifs_sb_tlink(cifs_sb);
if (IS_ERR(tlink))
return PTR_ERR(tlink);
tcon = tlink_tcon(tlink);
rc = CIFSSMBOpen(xid, tcon, path, FILE_OPEN, GENERIC_READ,
CREATE_NOT_DIR, &netfid, &oplock, NULL,
cifs_sb->local_nls,
cifs_sb->mnt_cifs_flags &
CIFS_MOUNT_MAP_SPECIAL_CHR);
if (rc == 0) {
int buf_type = CIFS_NO_BUFFER;
/* Read header */
io_parms.netfid = netfid;
io_parms.pid = current->tgid;
io_parms.tcon = tcon;
io_parms.offset = 0;
io_parms.length = 24;
rc = CIFSSMBRead(xid, &io_parms, &bytes_read, &pbuf,
&buf_type);
if ((rc == 0) && (bytes_read >= 8)) {
if (memcmp("IntxBLK", pbuf, 8) == 0) {
cFYI(1, "Block device");
fattr->cf_mode |= S_IFBLK;
fattr->cf_dtype = DT_BLK;
if (bytes_read == 24) {
/* we have enough to decode dev num */
__u64 mjr; /* major */
__u64 mnr; /* minor */
mjr = le64_to_cpu(*(__le64 *)(pbuf+8));
mnr = le64_to_cpu(*(__le64 *)(pbuf+16));
fattr->cf_rdev = MKDEV(mjr, mnr);
}
} else if (memcmp("IntxCHR", pbuf, 8) == 0) {
cFYI(1, "Char device");
fattr->cf_mode |= S_IFCHR;
fattr->cf_dtype = DT_CHR;
if (bytes_read == 24) {
/* we have enough to decode dev num */
__u64 mjr; /* major */
__u64 mnr; /* minor */
mjr = le64_to_cpu(*(__le64 *)(pbuf+8));
mnr = le64_to_cpu(*(__le64 *)(pbuf+16));
fattr->cf_rdev = MKDEV(mjr, mnr);
}
} else if (memcmp("IntxLNK", pbuf, 7) == 0) {
cFYI(1, "Symlink");
fattr->cf_mode |= S_IFLNK;
fattr->cf_dtype = DT_LNK;
} else {
fattr->cf_mode |= S_IFREG; /* file? */
fattr->cf_dtype = DT_REG;
rc = -EOPNOTSUPP;
}
} else {
fattr->cf_mode |= S_IFREG; /* then it is a file */
fattr->cf_dtype = DT_REG;
rc = -EOPNOTSUPP; /* or some unknown SFU type */
}
CIFSSMBClose(xid, tcon, netfid);
}
cifs_put_tlink(tlink);
return rc;
}
#define SFBITS_MASK (S_ISVTX | S_ISGID | S_ISUID) /* SETFILEBITS valid bits */
/*
* Fetch mode bits as provided by SFU.
*
* FIXME: Doesn't this clobber the type bit we got from cifs_sfu_type ?
*/
static int cifs_sfu_mode(struct cifs_fattr *fattr, const unsigned char *path,
struct cifs_sb_info *cifs_sb, int xid)
{
#ifdef CONFIG_CIFS_XATTR
ssize_t rc;
char ea_value[4];
__u32 mode;
struct tcon_link *tlink;
struct cifs_tcon *tcon;
tlink = cifs_sb_tlink(cifs_sb);
if (IS_ERR(tlink))
return PTR_ERR(tlink);
tcon = tlink_tcon(tlink);
rc = CIFSSMBQAllEAs(xid, tcon, path, "SETFILEBITS",
ea_value, 4 /* size of buf */, cifs_sb->local_nls,
cifs_sb->mnt_cifs_flags &
CIFS_MOUNT_MAP_SPECIAL_CHR);
cifs_put_tlink(tlink);
if (rc < 0)
return (int)rc;
else if (rc > 3) {
mode = le32_to_cpu(*((__le32 *)ea_value));
fattr->cf_mode &= ~SFBITS_MASK;
cFYI(1, "special bits 0%o org mode 0%o", mode,
fattr->cf_mode);
fattr->cf_mode = (mode & SFBITS_MASK) | fattr->cf_mode;
cFYI(1, "special mode bits 0%o", mode);
}
return 0;
#else
return -EOPNOTSUPP;
#endif
}
/* Fill a cifs_fattr struct with info from FILE_ALL_INFO */
static void
cifs_all_info_to_fattr(struct cifs_fattr *fattr, FILE_ALL_INFO *info,
struct cifs_sb_info *cifs_sb, bool adjust_tz)
{
struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
memset(fattr, 0, sizeof(*fattr));
fattr->cf_cifsattrs = le32_to_cpu(info->Attributes);
if (info->DeletePending)
fattr->cf_flags |= CIFS_FATTR_DELETE_PENDING;
if (info->LastAccessTime)
fattr->cf_atime = cifs_NTtimeToUnix(info->LastAccessTime);
else
fattr->cf_atime = CURRENT_TIME;
fattr->cf_ctime = cifs_NTtimeToUnix(info->ChangeTime);
fattr->cf_mtime = cifs_NTtimeToUnix(info->LastWriteTime);
if (adjust_tz) {
fattr->cf_ctime.tv_sec += tcon->ses->server->timeAdj;
fattr->cf_mtime.tv_sec += tcon->ses->server->timeAdj;
}
fattr->cf_eof = le64_to_cpu(info->EndOfFile);
fattr->cf_bytes = le64_to_cpu(info->AllocationSize);
fattr->cf_createtime = le64_to_cpu(info->CreationTime);
if (fattr->cf_cifsattrs & ATTR_DIRECTORY) {
fattr->cf_mode = S_IFDIR | cifs_sb->mnt_dir_mode;
fattr->cf_dtype = DT_DIR;
/*
* Server can return wrong NumberOfLinks value for directories
* when Unix extensions are disabled - fake it.
*/
fattr->cf_nlink = 2;
} else {
fattr->cf_mode = S_IFREG | cifs_sb->mnt_file_mode;
fattr->cf_dtype = DT_REG;
/* clear write bits if ATTR_READONLY is set */
if (fattr->cf_cifsattrs & ATTR_READONLY)
fattr->cf_mode &= ~(S_IWUGO);
fattr->cf_nlink = le32_to_cpu(info->NumberOfLinks);
if (fattr->cf_nlink < 1) {
cFYI(1, "replacing bogus file nlink value %u\n",
fattr->cf_nlink);
fattr->cf_nlink = 1;
}
}
fattr->cf_uid = cifs_sb->mnt_uid;
fattr->cf_gid = cifs_sb->mnt_gid;
}
int cifs_get_file_info(struct file *filp)
{
int rc;
int xid;
FILE_ALL_INFO find_data;
struct cifs_fattr fattr;
struct inode *inode = filp->f_path.dentry->d_inode;
struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
struct cifsFileInfo *cfile = filp->private_data;
struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
xid = GetXid();
rc = CIFSSMBQFileInfo(xid, tcon, cfile->netfid, &find_data);
switch (rc) {
case 0:
cifs_all_info_to_fattr(&fattr, &find_data, cifs_sb, false);
break;
case -EREMOTE:
cifs_create_dfs_fattr(&fattr, inode->i_sb);
rc = 0;
break;
case -EOPNOTSUPP:
case -EINVAL:
/*
* FIXME: legacy server -- fall back to path-based call?
* for now, just skip revalidating and mark inode for
* immediate reval.
*/
rc = 0;
CIFS_I(inode)->time = 0;
default:
goto cgfi_exit;
}
/*
* don't bother with SFU junk here -- just mark inode as needing
* revalidation.
*/
fattr.cf_uniqueid = CIFS_I(inode)->uniqueid;
fattr.cf_flags |= CIFS_FATTR_NEED_REVAL;
cifs_fattr_to_inode(inode, &fattr);
cgfi_exit:
FreeXid(xid);
return rc;
}
int cifs_get_inode_info(struct inode **pinode,
const unsigned char *full_path, FILE_ALL_INFO *pfindData,
struct super_block *sb, int xid, const __u16 *pfid)
{
int rc = 0, tmprc;
struct cifs_tcon *pTcon;
struct tcon_link *tlink;
struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
char *buf = NULL;
bool adjustTZ = false;
struct cifs_fattr fattr;
tlink = cifs_sb_tlink(cifs_sb);
if (IS_ERR(tlink))
return PTR_ERR(tlink);
pTcon = tlink_tcon(tlink);
cFYI(1, "Getting info on %s", full_path);
if ((pfindData == NULL) && (*pinode != NULL)) {
if (CIFS_I(*pinode)->clientCanCacheRead) {
cFYI(1, "No need to revalidate cached inode sizes");
goto cgii_exit;
}
}
/* if file info not passed in then get it from server */
if (pfindData == NULL) {
buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
if (buf == NULL) {
rc = -ENOMEM;
goto cgii_exit;
}
pfindData = (FILE_ALL_INFO *)buf;
/* could do find first instead but this returns more info */
rc = CIFSSMBQPathInfo(xid, pTcon, full_path, pfindData,
0 /* not legacy */,
cifs_sb->local_nls, cifs_sb->mnt_cifs_flags &
CIFS_MOUNT_MAP_SPECIAL_CHR);
/* BB optimize code so we do not make the above call
when server claims no NT SMB support and the above call
failed at least once - set flag in tcon or mount */
if ((rc == -EOPNOTSUPP) || (rc == -EINVAL)) {
rc = SMBQueryInformation(xid, pTcon, full_path,
pfindData, cifs_sb->local_nls,
cifs_sb->mnt_cifs_flags &
CIFS_MOUNT_MAP_SPECIAL_CHR);
adjustTZ = true;
}
}
if (!rc) {
cifs_all_info_to_fattr(&fattr, (FILE_ALL_INFO *) pfindData,
cifs_sb, adjustTZ);
} else if (rc == -EREMOTE) {
cifs_create_dfs_fattr(&fattr, sb);
rc = 0;
} else {
goto cgii_exit;
}
/*
* If an inode wasn't passed in, then get the inode number
*
* Is an i_ino of zero legal? Can we use that to check if the server
* supports returning inode numbers? Are there other sanity checks we
* can use to ensure that the server is really filling in that field?
*
* We can not use the IndexNumber field by default from Windows or
* Samba (in ALL_INFO buf) but we can request it explicitly. The SNIA
* CIFS spec claims that this value is unique within the scope of a
* share, and the windows docs hint that it's actually unique
* per-machine.
*
* There may be higher info levels that work but are there Windows
* server or network appliances for which IndexNumber field is not
* guaranteed unique?
*/
if (*pinode == NULL) {
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) {
int rc1 = 0;
rc1 = CIFSGetSrvInodeNumber(xid, pTcon,
full_path, &fattr.cf_uniqueid,
cifs_sb->local_nls,
cifs_sb->mnt_cifs_flags &
CIFS_MOUNT_MAP_SPECIAL_CHR);
if (rc1 || !fattr.cf_uniqueid) {
cFYI(1, "GetSrvInodeNum rc %d", rc1);
fattr.cf_uniqueid = iunique(sb, ROOT_I);
cifs_autodisable_serverino(cifs_sb);
}
} else {
fattr.cf_uniqueid = iunique(sb, ROOT_I);
}
} else {
fattr.cf_uniqueid = CIFS_I(*pinode)->uniqueid;
}
/* query for SFU type info if supported and needed */
if (fattr.cf_cifsattrs & ATTR_SYSTEM &&
cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL) {
tmprc = cifs_sfu_type(&fattr, full_path, cifs_sb, xid);
if (tmprc)
cFYI(1, "cifs_sfu_type failed: %d", tmprc);
}
#ifdef CONFIG_CIFS_ACL
/* fill in 0777 bits from ACL */
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_ACL) {
rc = cifs_acl_to_fattr(cifs_sb, &fattr, *pinode, full_path,
pfid);
if (rc) {
cFYI(1, "%s: Getting ACL failed with error: %d",
__func__, rc);
goto cgii_exit;
}
}
#endif /* CONFIG_CIFS_ACL */
/* fill in remaining high mode bits e.g. SUID, VTX */
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL)
cifs_sfu_mode(&fattr, full_path, cifs_sb, xid);
/* check for Minshall+French symlinks */
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MF_SYMLINKS) {
tmprc = CIFSCheckMFSymlink(&fattr, full_path, cifs_sb, xid);
if (tmprc)
cFYI(1, "CIFSCheckMFSymlink: %d", tmprc);
}
if (!*pinode) {
*pinode = cifs_iget(sb, &fattr);
if (!*pinode)
rc = -ENOMEM;
} else {
cifs_fattr_to_inode(*pinode, &fattr);
}
cgii_exit:
kfree(buf);
cifs_put_tlink(tlink);
return rc;
}
static const struct inode_operations cifs_ipc_inode_ops = {
.lookup = cifs_lookup,
};
char *cifs_build_path_to_root(struct smb_vol *vol, struct cifs_sb_info *cifs_sb,
struct cifs_tcon *tcon)
{
int pplen = vol->prepath ? strlen(vol->prepath) : 0;
int dfsplen;
char *full_path = NULL;
/* if no prefix path, simply set path to the root of share to "" */
if (pplen == 0) {
full_path = kmalloc(1, GFP_KERNEL);
if (full_path)
full_path[0] = 0;
return full_path;
}
if (tcon->Flags & SMB_SHARE_IS_IN_DFS)
dfsplen = strnlen(tcon->treeName, MAX_TREE_SIZE + 1);
else
dfsplen = 0;
full_path = kmalloc(dfsplen + pplen + 1, GFP_KERNEL);
if (full_path == NULL)
return full_path;
if (dfsplen)
strncpy(full_path, tcon->treeName, dfsplen);
strncpy(full_path + dfsplen, vol->prepath, pplen);
convert_delimiter(full_path, CIFS_DIR_SEP(cifs_sb));
full_path[dfsplen + pplen] = 0; /* add trailing null */
return full_path;
}
static int
cifs_find_inode(struct inode *inode, void *opaque)
{
struct cifs_fattr *fattr = (struct cifs_fattr *) opaque;
/* don't match inode with different uniqueid */
if (CIFS_I(inode)->uniqueid != fattr->cf_uniqueid)
return 0;
/* use createtime like an i_generation field */
if (CIFS_I(inode)->createtime != fattr->cf_createtime)
return 0;
/* don't match inode of different type */
if ((inode->i_mode & S_IFMT) != (fattr->cf_mode & S_IFMT))
return 0;
/* if it's not a directory or has no dentries, then flag it */
if (S_ISDIR(inode->i_mode) && !list_empty(&inode->i_dentry))
fattr->cf_flags |= CIFS_FATTR_INO_COLLISION;
return 1;
}
static int
cifs_init_inode(struct inode *inode, void *opaque)
{
struct cifs_fattr *fattr = (struct cifs_fattr *) opaque;
CIFS_I(inode)->uniqueid = fattr->cf_uniqueid;
CIFS_I(inode)->createtime = fattr->cf_createtime;
return 0;
}
/*
* walk dentry list for an inode and report whether it has aliases that
* are hashed. We use this to determine if a directory inode can actually
* be used.
*/
static bool
inode_has_hashed_dentries(struct inode *inode)
{
struct dentry *dentry;
spin_lock(&inode->i_lock);
list_for_each_entry(dentry, &inode->i_dentry, d_alias) {
if (!d_unhashed(dentry) || IS_ROOT(dentry)) {
spin_unlock(&inode->i_lock);
return true;
}
}
spin_unlock(&inode->i_lock);
return false;
}
/* Given fattrs, get a corresponding inode */
struct inode *
cifs_iget(struct super_block *sb, struct cifs_fattr *fattr)
{
unsigned long hash;
struct inode *inode;
retry_iget5_locked:
cFYI(1, "looking for uniqueid=%llu", fattr->cf_uniqueid);
/* hash down to 32-bits on 32-bit arch */
hash = cifs_uniqueid_to_ino_t(fattr->cf_uniqueid);
inode = iget5_locked(sb, hash, cifs_find_inode, cifs_init_inode, fattr);
if (inode) {
/* was there a potentially problematic inode collision? */
if (fattr->cf_flags & CIFS_FATTR_INO_COLLISION) {
fattr->cf_flags &= ~CIFS_FATTR_INO_COLLISION;
if (inode_has_hashed_dentries(inode)) {
cifs_autodisable_serverino(CIFS_SB(sb));
iput(inode);
fattr->cf_uniqueid = iunique(sb, ROOT_I);
goto retry_iget5_locked;
}
}
cifs_fattr_to_inode(inode, fattr);
if (sb->s_flags & MS_NOATIME)
inode->i_flags |= S_NOATIME | S_NOCMTIME;
if (inode->i_state & I_NEW) {
inode->i_ino = hash;
if (S_ISREG(inode->i_mode))
inode->i_data.backing_dev_info = sb->s_bdi;
#ifdef CONFIG_CIFS_FSCACHE
/* initialize per-inode cache cookie pointer */
CIFS_I(inode)->fscache = NULL;
#endif
unlock_new_inode(inode);
}
}
return inode;
}
/* gets root inode */
struct inode *cifs_root_iget(struct super_block *sb)
{
int xid;
struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
struct inode *inode = NULL;
long rc;
struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
xid = GetXid();
if (tcon->unix_ext)
rc = cifs_get_inode_info_unix(&inode, "", sb, xid);
else
rc = cifs_get_inode_info(&inode, "", NULL, sb, xid, NULL);
if (!inode) {
inode = ERR_PTR(rc);
goto out;
}
#ifdef CONFIG_CIFS_FSCACHE
/* populate tcon->resource_id */
tcon->resource_id = CIFS_I(inode)->uniqueid;
#endif
if (rc && tcon->ipc) {
cFYI(1, "ipc connection - fake read inode");
inode->i_mode |= S_IFDIR;
set_nlink(inode, 2);
inode->i_op = &cifs_ipc_inode_ops;
inode->i_fop = &simple_dir_operations;
inode->i_uid = cifs_sb->mnt_uid;
inode->i_gid = cifs_sb->mnt_gid;
} else if (rc) {
iget_failed(inode);
inode = ERR_PTR(rc);
}
out:
/* can not call macro FreeXid here since in a void func
* TODO: This is no longer true
*/
_FreeXid(xid);
return inode;
}
static int
cifs_set_file_info(struct inode *inode, struct iattr *attrs, int xid,
char *full_path, __u32 dosattr)
{
int rc;
int oplock = 0;
__u16 netfid;
__u32 netpid;
bool set_time = false;
struct cifsFileInfo *open_file;
struct cifsInodeInfo *cifsInode = CIFS_I(inode);
struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
struct tcon_link *tlink = NULL;
struct cifs_tcon *pTcon;
FILE_BASIC_INFO info_buf;
if (attrs == NULL)
return -EINVAL;
if (attrs->ia_valid & ATTR_ATIME) {
set_time = true;
info_buf.LastAccessTime =
cpu_to_le64(cifs_UnixTimeToNT(attrs->ia_atime));
} else
info_buf.LastAccessTime = 0;
if (attrs->ia_valid & ATTR_MTIME) {
set_time = true;
info_buf.LastWriteTime =
cpu_to_le64(cifs_UnixTimeToNT(attrs->ia_mtime));
} else
info_buf.LastWriteTime = 0;
/*
* Samba throws this field away, but windows may actually use it.
* Do not set ctime unless other time stamps are changed explicitly
* (i.e. by utimes()) since we would then have a mix of client and
* server times.
*/
if (set_time && (attrs->ia_valid & ATTR_CTIME)) {
cFYI(1, "CIFS - CTIME changed");
info_buf.ChangeTime =
cpu_to_le64(cifs_UnixTimeToNT(attrs->ia_ctime));
} else
info_buf.ChangeTime = 0;
info_buf.CreationTime = 0; /* don't change */
info_buf.Attributes = cpu_to_le32(dosattr);
/*
* If the file is already open for write, just use that fileid
*/
open_file = find_writable_file(cifsInode, true);
if (open_file) {
netfid = open_file->netfid;
netpid = open_file->pid;
pTcon = tlink_tcon(open_file->tlink);
goto set_via_filehandle;
}
tlink = cifs_sb_tlink(cifs_sb);
if (IS_ERR(tlink)) {
rc = PTR_ERR(tlink);
tlink = NULL;
goto out;
}
pTcon = tlink_tcon(tlink);
/*
* NT4 apparently returns success on this call, but it doesn't
* really work.
*/
if (!(pTcon->ses->flags & CIFS_SES_NT4)) {
rc = CIFSSMBSetPathInfo(xid, pTcon, full_path,
&info_buf, cifs_sb->local_nls,
cifs_sb->mnt_cifs_flags &
CIFS_MOUNT_MAP_SPECIAL_CHR);
if (rc == 0) {
cifsInode->cifsAttrs = dosattr;
goto out;
} else if (rc != -EOPNOTSUPP && rc != -EINVAL)
goto out;
}
cFYI(1, "calling SetFileInfo since SetPathInfo for "
"times not supported by this server");
rc = CIFSSMBOpen(xid, pTcon, full_path, FILE_OPEN,
SYNCHRONIZE | FILE_WRITE_ATTRIBUTES,
CREATE_NOT_DIR, &netfid, &oplock,
NULL, cifs_sb->local_nls,
cifs_sb->mnt_cifs_flags &
CIFS_MOUNT_MAP_SPECIAL_CHR);
if (rc != 0) {
if (rc == -EIO)
rc = -EINVAL;
goto out;
}
netpid = current->tgid;
set_via_filehandle:
rc = CIFSSMBSetFileInfo(xid, pTcon, &info_buf, netfid, netpid);
if (!rc)
cifsInode->cifsAttrs = dosattr;
if (open_file == NULL)
CIFSSMBClose(xid, pTcon, netfid);
else
cifsFileInfo_put(open_file);
out:
if (tlink != NULL)
cifs_put_tlink(tlink);
return rc;
}
/*
* open the given file (if it isn't already), set the DELETE_ON_CLOSE bit
* and rename it to a random name that hopefully won't conflict with
* anything else.
*/
static int
cifs_rename_pending_delete(char *full_path, struct dentry *dentry, int xid)
{
int oplock = 0;
int rc;
__u16 netfid;
struct inode *inode = dentry->d_inode;
struct cifsInodeInfo *cifsInode = CIFS_I(inode);
struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
struct tcon_link *tlink;
struct cifs_tcon *tcon;
__u32 dosattr, origattr;
FILE_BASIC_INFO *info_buf = NULL;
tlink = cifs_sb_tlink(cifs_sb);
if (IS_ERR(tlink))
return PTR_ERR(tlink);
tcon = tlink_tcon(tlink);
rc = CIFSSMBOpen(xid, tcon, full_path, FILE_OPEN,
DELETE|FILE_WRITE_ATTRIBUTES, CREATE_NOT_DIR,
&netfid, &oplock, NULL, cifs_sb->local_nls,
cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR);
if (rc != 0)
goto out;
origattr = cifsInode->cifsAttrs;
if (origattr == 0)
origattr |= ATTR_NORMAL;
dosattr = origattr & ~ATTR_READONLY;
if (dosattr == 0)
dosattr |= ATTR_NORMAL;
dosattr |= ATTR_HIDDEN;
/* set ATTR_HIDDEN and clear ATTR_READONLY, but only if needed */
if (dosattr != origattr) {
info_buf = kzalloc(sizeof(*info_buf), GFP_KERNEL);
if (info_buf == NULL) {
rc = -ENOMEM;
goto out_close;
}
info_buf->Attributes = cpu_to_le32(dosattr);
rc = CIFSSMBSetFileInfo(xid, tcon, info_buf, netfid,
current->tgid);
/* although we would like to mark the file hidden
if that fails we will still try to rename it */
if (rc != 0)
cifsInode->cifsAttrs = dosattr;
else
dosattr = origattr; /* since not able to change them */
}
/* rename the file */
rc = CIFSSMBRenameOpenFile(xid, tcon, netfid, NULL, cifs_sb->local_nls,
cifs_sb->mnt_cifs_flags &
CIFS_MOUNT_MAP_SPECIAL_CHR);
if (rc != 0) {
rc = -ETXTBSY;
goto undo_setattr;
}
/* try to set DELETE_ON_CLOSE */
if (!cifsInode->delete_pending) {
rc = CIFSSMBSetFileDisposition(xid, tcon, true, netfid,
current->tgid);
/*
* some samba versions return -ENOENT when we try to set the
* file disposition here. Likely a samba bug, but work around
* it for now. This means that some cifsXXX files may hang
* around after they shouldn't.
*
* BB: remove this hack after more servers have the fix
*/
if (rc == -ENOENT)
rc = 0;
else if (rc != 0) {
rc = -ETXTBSY;
goto undo_rename;
}
cifsInode->delete_pending = true;
}
out_close:
CIFSSMBClose(xid, tcon, netfid);
out:
kfree(info_buf);
cifs_put_tlink(tlink);
return rc;
/*
* reset everything back to the original state. Don't bother
* dealing with errors here since we can't do anything about
* them anyway.
*/
undo_rename:
CIFSSMBRenameOpenFile(xid, tcon, netfid, dentry->d_name.name,
cifs_sb->local_nls, cifs_sb->mnt_cifs_flags &
CIFS_MOUNT_MAP_SPECIAL_CHR);
undo_setattr:
if (dosattr != origattr) {
info_buf->Attributes = cpu_to_le32(origattr);
if (!CIFSSMBSetFileInfo(xid, tcon, info_buf, netfid,
current->tgid))
cifsInode->cifsAttrs = origattr;
}
goto out_close;
}
/*
* If dentry->d_inode is null (usually meaning the cached dentry
* is a negative dentry) then we would attempt a standard SMB delete, but
* if that fails we can not attempt the fall back mechanisms on EACCESS
* but will return the EACCESS to the caller. Note that the VFS does not call
* unlink on negative dentries currently.
*/
int cifs_unlink(struct inode *dir, struct dentry *dentry)
{
int rc = 0;
int xid;
char *full_path = NULL;
struct inode *inode = dentry->d_inode;
struct cifsInodeInfo *cifs_inode;
struct super_block *sb = dir->i_sb;
struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
struct tcon_link *tlink;
struct cifs_tcon *tcon;
struct iattr *attrs = NULL;
__u32 dosattr = 0, origattr = 0;
cFYI(1, "cifs_unlink, dir=0x%p, dentry=0x%p", dir, dentry);
tlink = cifs_sb_tlink(cifs_sb);
if (IS_ERR(tlink))
return PTR_ERR(tlink);
tcon = tlink_tcon(tlink);
xid = GetXid();
/* Unlink can be called from rename so we can not take the
* sb->s_vfs_rename_mutex here */
full_path = build_path_from_dentry(dentry);
if (full_path == NULL) {
rc = -ENOMEM;
goto unlink_out;
}
if ((tcon->ses->capabilities & CAP_UNIX) &&
(CIFS_UNIX_POSIX_PATH_OPS_CAP &
le64_to_cpu(tcon->fsUnixInfo.Capability))) {
rc = CIFSPOSIXDelFile(xid, tcon, full_path,
SMB_POSIX_UNLINK_FILE_TARGET, cifs_sb->local_nls,
cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR);
cFYI(1, "posix del rc %d", rc);
if ((rc == 0) || (rc == -ENOENT))
goto psx_del_no_retry;
}
retry_std_delete:
rc = CIFSSMBDelFile(xid, tcon, full_path, cifs_sb->local_nls,
cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR);
psx_del_no_retry:
if (!rc) {
if (inode)
drop_nlink(inode);
} else if (rc == -ENOENT) {
d_drop(dentry);
} else if (rc == -ETXTBSY) {
rc = cifs_rename_pending_delete(full_path, dentry, xid);
if (rc == 0)
drop_nlink(inode);
} else if ((rc == -EACCES) && (dosattr == 0) && inode) {
attrs = kzalloc(sizeof(*attrs), GFP_KERNEL);
if (attrs == NULL) {
rc = -ENOMEM;
goto out_reval;
}
/* try to reset dos attributes */
cifs_inode = CIFS_I(inode);
origattr = cifs_inode->cifsAttrs;
if (origattr == 0)
origattr |= ATTR_NORMAL;
dosattr = origattr & ~ATTR_READONLY;
if (dosattr == 0)
dosattr |= ATTR_NORMAL;
dosattr |= ATTR_HIDDEN;
rc = cifs_set_file_info(inode, attrs, xid, full_path, dosattr);
if (rc != 0)
goto out_reval;
goto retry_std_delete;
}
/* undo the setattr if we errored out and it's needed */
if (rc != 0 && dosattr != 0)
cifs_set_file_info(inode, attrs, xid, full_path, origattr);
out_reval:
if (inode) {
cifs_inode = CIFS_I(inode);
cifs_inode->time = 0; /* will force revalidate to get info
when needed */
inode->i_ctime = current_fs_time(sb);
}
dir->i_ctime = dir->i_mtime = current_fs_time(sb);
cifs_inode = CIFS_I(dir);
CIFS_I(dir)->time = 0; /* force revalidate of dir as well */
unlink_out:
kfree(full_path);
kfree(attrs);
FreeXid(xid);
cifs_put_tlink(tlink);
return rc;
}
int cifs_mkdir(struct inode *inode, struct dentry *direntry, umode_t mode)
{
int rc = 0, tmprc;
int xid;
struct cifs_sb_info *cifs_sb;
struct tcon_link *tlink;
struct cifs_tcon *pTcon;
char *full_path = NULL;
struct inode *newinode = NULL;
struct cifs_fattr fattr;
cFYI(1, "In cifs_mkdir, mode = 0x%hx inode = 0x%p", mode, inode);
cifs_sb = CIFS_SB(inode->i_sb);
tlink = cifs_sb_tlink(cifs_sb);
if (IS_ERR(tlink))
return PTR_ERR(tlink);
pTcon = tlink_tcon(tlink);
xid = GetXid();
full_path = build_path_from_dentry(direntry);
if (full_path == NULL) {
rc = -ENOMEM;
goto mkdir_out;
}
if ((pTcon->ses->capabilities & CAP_UNIX) &&
(CIFS_UNIX_POSIX_PATH_OPS_CAP &
le64_to_cpu(pTcon->fsUnixInfo.Capability))) {
u32 oplock = 0;
FILE_UNIX_BASIC_INFO *pInfo =
kzalloc(sizeof(FILE_UNIX_BASIC_INFO), GFP_KERNEL);
if (pInfo == NULL) {
rc = -ENOMEM;
goto mkdir_out;
}
mode &= ~current_umask();
rc = CIFSPOSIXCreate(xid, pTcon, SMB_O_DIRECTORY | SMB_O_CREAT,
mode, NULL /* netfid */, pInfo, &oplock,
full_path, cifs_sb->local_nls,
cifs_sb->mnt_cifs_flags &
CIFS_MOUNT_MAP_SPECIAL_CHR);
if (rc == -EOPNOTSUPP) {
kfree(pInfo);
goto mkdir_retry_old;
} else if (rc) {
cFYI(1, "posix mkdir returned 0x%x", rc);
d_drop(direntry);
} else {
if (pInfo->Type == cpu_to_le32(-1)) {
/* no return info, go query for it */
kfree(pInfo);
goto mkdir_get_info;
}
/*BB check (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID ) to see if need
to set uid/gid */
cifs_unix_basic_to_fattr(&fattr, pInfo, cifs_sb);
cifs_fill_uniqueid(inode->i_sb, &fattr);
newinode = cifs_iget(inode->i_sb, &fattr);
if (!newinode) {
kfree(pInfo);
goto mkdir_get_info;
}
d_instantiate(direntry, newinode);
#ifdef CONFIG_CIFS_DEBUG2
cFYI(1, "instantiated dentry %p %s to inode %p",
direntry, direntry->d_name.name, newinode);
if (newinode->i_nlink != 2)
cFYI(1, "unexpected number of links %d",
newinode->i_nlink);
#endif
}
kfree(pInfo);
goto mkdir_out;
}
mkdir_retry_old:
/* BB add setting the equivalent of mode via CreateX w/ACLs */
rc = CIFSSMBMkDir(xid, pTcon, full_path, cifs_sb->local_nls,
cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR);
if (rc) {
cFYI(1, "cifs_mkdir returned 0x%x", rc);
d_drop(direntry);
} else {
mkdir_get_info:
if (pTcon->unix_ext)
rc = cifs_get_inode_info_unix(&newinode, full_path,
inode->i_sb, xid);
else
rc = cifs_get_inode_info(&newinode, full_path, NULL,
inode->i_sb, xid, NULL);
d_instantiate(direntry, newinode);
/* setting nlink not necessary except in cases where we
* failed to get it from the server or was set bogus */
if ((direntry->d_inode) && (direntry->d_inode->i_nlink < 2))
set_nlink(direntry->d_inode, 2);
mode &= ~current_umask();
/* must turn on setgid bit if parent dir has it */
if (inode->i_mode & S_ISGID)
mode |= S_ISGID;
if (pTcon->unix_ext) {
struct cifs_unix_set_info_args args = {
.mode = mode,
.ctime = NO_CHANGE_64,
.atime = NO_CHANGE_64,
.mtime = NO_CHANGE_64,
.device = 0,
};
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID) {
args.uid = (__u64)current_fsuid();
if (inode->i_mode & S_ISGID)
args.gid = (__u64)inode->i_gid;
else
args.gid = (__u64)current_fsgid();
} else {
args.uid = NO_CHANGE_64;
args.gid = NO_CHANGE_64;
}
CIFSSMBUnixSetPathInfo(xid, pTcon, full_path, &args,
cifs_sb->local_nls,
cifs_sb->mnt_cifs_flags &
CIFS_MOUNT_MAP_SPECIAL_CHR);
} else {
if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_ACL) &&
(mode & S_IWUGO) == 0) {
FILE_BASIC_INFO pInfo;
struct cifsInodeInfo *cifsInode;
u32 dosattrs;
memset(&pInfo, 0, sizeof(pInfo));
cifsInode = CIFS_I(newinode);
dosattrs = cifsInode->cifsAttrs|ATTR_READONLY;
pInfo.Attributes = cpu_to_le32(dosattrs);
tmprc = CIFSSMBSetPathInfo(xid, pTcon,
full_path, &pInfo,
cifs_sb->local_nls,
cifs_sb->mnt_cifs_flags &
CIFS_MOUNT_MAP_SPECIAL_CHR);
if (tmprc == 0)
cifsInode->cifsAttrs = dosattrs;
}
if (direntry->d_inode) {
if (cifs_sb->mnt_cifs_flags &
CIFS_MOUNT_DYNPERM)
direntry->d_inode->i_mode =
(mode | S_IFDIR);
if (cifs_sb->mnt_cifs_flags &
CIFS_MOUNT_SET_UID) {
direntry->d_inode->i_uid =
current_fsuid();
if (inode->i_mode & S_ISGID)
direntry->d_inode->i_gid =
inode->i_gid;
else
direntry->d_inode->i_gid =
current_fsgid();
}
}
}
}
mkdir_out:
/*
* Force revalidate to get parent dir info when needed since cached
* attributes are invalid now.
*/
CIFS_I(inode)->time = 0;
kfree(full_path);
FreeXid(xid);
cifs_put_tlink(tlink);
return rc;
}
int cifs_rmdir(struct inode *inode, struct dentry *direntry)
{
int rc = 0;
int xid;
struct cifs_sb_info *cifs_sb;
struct tcon_link *tlink;
struct cifs_tcon *pTcon;
char *full_path = NULL;
struct cifsInodeInfo *cifsInode;
cFYI(1, "cifs_rmdir, inode = 0x%p", inode);
xid = GetXid();
full_path = build_path_from_dentry(direntry);
if (full_path == NULL) {
rc = -ENOMEM;
goto rmdir_exit;
}
cifs_sb = CIFS_SB(inode->i_sb);
tlink = cifs_sb_tlink(cifs_sb);
if (IS_ERR(tlink)) {
rc = PTR_ERR(tlink);
goto rmdir_exit;
}
pTcon = tlink_tcon(tlink);
rc = CIFSSMBRmDir(xid, pTcon, full_path, cifs_sb->local_nls,
cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR);
cifs_put_tlink(tlink);
if (!rc) {
spin_lock(&direntry->d_inode->i_lock);
i_size_write(direntry->d_inode, 0);
clear_nlink(direntry->d_inode);
spin_unlock(&direntry->d_inode->i_lock);
}
cifsInode = CIFS_I(direntry->d_inode);
/* force revalidate to go get info when needed */
cifsInode->time = 0;
cifsInode = CIFS_I(inode);
/*
* Force revalidate to get parent dir info when needed since cached
* attributes are invalid now.
*/
cifsInode->time = 0;
direntry->d_inode->i_ctime = inode->i_ctime = inode->i_mtime =
current_fs_time(inode->i_sb);
rmdir_exit:
kfree(full_path);
FreeXid(xid);
return rc;
}
static int
cifs_do_rename(int xid, struct dentry *from_dentry, const char *fromPath,
struct dentry *to_dentry, const char *toPath)
{
struct cifs_sb_info *cifs_sb = CIFS_SB(from_dentry->d_sb);
struct tcon_link *tlink;
struct cifs_tcon *pTcon;
__u16 srcfid;
int oplock, rc;
tlink = cifs_sb_tlink(cifs_sb);
if (IS_ERR(tlink))
return PTR_ERR(tlink);
pTcon = tlink_tcon(tlink);
/* try path-based rename first */
rc = CIFSSMBRename(xid, pTcon, fromPath, toPath, cifs_sb->local_nls,
cifs_sb->mnt_cifs_flags &
CIFS_MOUNT_MAP_SPECIAL_CHR);
/*
* don't bother with rename by filehandle unless file is busy and
* source Note that cross directory moves do not work with
* rename by filehandle to various Windows servers.
*/
if (rc == 0 || rc != -ETXTBSY)
goto do_rename_exit;
/* open-file renames don't work across directories */
if (to_dentry->d_parent != from_dentry->d_parent)
goto do_rename_exit;
/* open the file to be renamed -- we need DELETE perms */
rc = CIFSSMBOpen(xid, pTcon, fromPath, FILE_OPEN, DELETE,
CREATE_NOT_DIR, &srcfid, &oplock, NULL,
cifs_sb->local_nls, cifs_sb->mnt_cifs_flags &
CIFS_MOUNT_MAP_SPECIAL_CHR);
if (rc == 0) {
rc = CIFSSMBRenameOpenFile(xid, pTcon, srcfid,
(const char *) to_dentry->d_name.name,
cifs_sb->local_nls, cifs_sb->mnt_cifs_flags &
CIFS_MOUNT_MAP_SPECIAL_CHR);
CIFSSMBClose(xid, pTcon, srcfid);
}
do_rename_exit:
cifs_put_tlink(tlink);
return rc;
}
int cifs_rename(struct inode *source_dir, struct dentry *source_dentry,
struct inode *target_dir, struct dentry *target_dentry)
{
char *fromName = NULL;
char *toName = NULL;
struct cifs_sb_info *cifs_sb;
struct tcon_link *tlink;
struct cifs_tcon *tcon;
FILE_UNIX_BASIC_INFO *info_buf_source = NULL;
FILE_UNIX_BASIC_INFO *info_buf_target;
int xid, rc, tmprc;
cifs_sb = CIFS_SB(source_dir->i_sb);
tlink = cifs_sb_tlink(cifs_sb);
if (IS_ERR(tlink))
return PTR_ERR(tlink);
tcon = tlink_tcon(tlink);
xid = GetXid();
/*
* we already have the rename sem so we do not need to
* grab it again here to protect the path integrity
*/
fromName = build_path_from_dentry(source_dentry);
if (fromName == NULL) {
rc = -ENOMEM;
goto cifs_rename_exit;
}
toName = build_path_from_dentry(target_dentry);
if (toName == NULL) {
rc = -ENOMEM;
goto cifs_rename_exit;
}
rc = cifs_do_rename(xid, source_dentry, fromName,
target_dentry, toName);
if (rc == -EEXIST && tcon->unix_ext) {
/*
* Are src and dst hardlinks of same inode? We can
* only tell with unix extensions enabled
*/
info_buf_source =
kmalloc(2 * sizeof(FILE_UNIX_BASIC_INFO),
GFP_KERNEL);
if (info_buf_source == NULL) {
rc = -ENOMEM;
goto cifs_rename_exit;
}
info_buf_target = info_buf_source + 1;
tmprc = CIFSSMBUnixQPathInfo(xid, tcon, fromName,
info_buf_source,
cifs_sb->local_nls,
cifs_sb->mnt_cifs_flags &
CIFS_MOUNT_MAP_SPECIAL_CHR);
if (tmprc != 0)
goto unlink_target;
tmprc = CIFSSMBUnixQPathInfo(xid, tcon, toName,
info_buf_target,
cifs_sb->local_nls,
cifs_sb->mnt_cifs_flags &
CIFS_MOUNT_MAP_SPECIAL_CHR);
if (tmprc == 0 && (info_buf_source->UniqueId ==
info_buf_target->UniqueId)) {
/* same file, POSIX says that this is a noop */
rc = 0;
goto cifs_rename_exit;
}
} /* else ... BB we could add the same check for Windows by
checking the UniqueId via FILE_INTERNAL_INFO */
unlink_target:
/* Try unlinking the target dentry if it's not negative */
if (target_dentry->d_inode && (rc == -EACCES || rc == -EEXIST)) {
tmprc = cifs_unlink(target_dir, target_dentry);
if (tmprc)
goto cifs_rename_exit;
rc = cifs_do_rename(xid, source_dentry, fromName,
target_dentry, toName);
}
cifs_rename_exit:
kfree(info_buf_source);
kfree(fromName);
kfree(toName);
FreeXid(xid);
cifs_put_tlink(tlink);
return rc;
}
static bool
cifs_inode_needs_reval(struct inode *inode)
{
struct cifsInodeInfo *cifs_i = CIFS_I(inode);
struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
if (cifs_i->clientCanCacheRead)
return false;
if (!lookupCacheEnabled)
return true;
if (cifs_i->time == 0)
return true;
if (!time_in_range(jiffies, cifs_i->time,
cifs_i->time + cifs_sb->actimeo))
return true;
/* hardlinked files w/ noserverino get "special" treatment */
if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) &&
S_ISREG(inode->i_mode) && inode->i_nlink != 1)
return true;
return false;
}
/*
* Zap the cache. Called when invalid_mapping flag is set.
*/
int
cifs_invalidate_mapping(struct inode *inode)
{
int rc = 0;
struct cifsInodeInfo *cifs_i = CIFS_I(inode);
cifs_i->invalid_mapping = false;
if (inode->i_mapping && inode->i_mapping->nrpages != 0) {
rc = invalidate_inode_pages2(inode->i_mapping);
if (rc) {
cERROR(1, "%s: could not invalidate inode %p", __func__,
inode);
cifs_i->invalid_mapping = true;
}
}
cifs_fscache_reset_inode_cookie(inode);
return rc;
}
int cifs_revalidate_file_attr(struct file *filp)
{
int rc = 0;
struct inode *inode = filp->f_path.dentry->d_inode;
struct cifsFileInfo *cfile = (struct cifsFileInfo *) filp->private_data;
if (!cifs_inode_needs_reval(inode))
return rc;
if (tlink_tcon(cfile->tlink)->unix_ext)
rc = cifs_get_file_info_unix(filp);
else
rc = cifs_get_file_info(filp);
return rc;
}
int cifs_revalidate_dentry_attr(struct dentry *dentry)
{
int xid;
int rc = 0;
struct inode *inode = dentry->d_inode;
struct super_block *sb = dentry->d_sb;
char *full_path = NULL;
if (inode == NULL)
return -ENOENT;
if (!cifs_inode_needs_reval(inode))
return rc;
xid = GetXid();
/* can not safely grab the rename sem here if rename calls revalidate
since that would deadlock */
full_path = build_path_from_dentry(dentry);
if (full_path == NULL) {
rc = -ENOMEM;
goto out;
}
cFYI(1, "Update attributes: %s inode 0x%p count %d dentry: 0x%p d_time "
"%ld jiffies %ld", full_path, inode, inode->i_count.counter,
dentry, dentry->d_time, jiffies);
if (cifs_sb_master_tcon(CIFS_SB(sb))->unix_ext)
rc = cifs_get_inode_info_unix(&inode, full_path, sb, xid);
else
rc = cifs_get_inode_info(&inode, full_path, NULL, sb,
xid, NULL);
out:
kfree(full_path);
FreeXid(xid);
return rc;
}
int cifs_revalidate_file(struct file *filp)
{
int rc;
struct inode *inode = filp->f_path.dentry->d_inode;
rc = cifs_revalidate_file_attr(filp);
if (rc)
return rc;
if (CIFS_I(inode)->invalid_mapping)
rc = cifs_invalidate_mapping(inode);
return rc;
}
/* revalidate a dentry's inode attributes */
int cifs_revalidate_dentry(struct dentry *dentry)
{
int rc;
struct inode *inode = dentry->d_inode;
rc = cifs_revalidate_dentry_attr(dentry);
if (rc)
return rc;
if (CIFS_I(inode)->invalid_mapping)
rc = cifs_invalidate_mapping(inode);
return rc;
}
int cifs_getattr(struct vfsmount *mnt, struct dentry *dentry,
struct kstat *stat)
{
struct cifs_sb_info *cifs_sb = CIFS_SB(dentry->d_sb);
struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
struct inode *inode = dentry->d_inode;
int rc;
/*
* We need to be sure that all dirty pages are written and the server
* has actual ctime, mtime and file length.
*/
if (!CIFS_I(inode)->clientCanCacheRead && inode->i_mapping &&
inode->i_mapping->nrpages != 0) {
rc = filemap_fdatawait(inode->i_mapping);
if (rc) {
mapping_set_error(inode->i_mapping, rc);
return rc;
}
}
rc = cifs_revalidate_dentry_attr(dentry);
if (rc)
return rc;
generic_fillattr(inode, stat);
stat->blksize = CIFS_MAX_MSGSIZE;
stat->ino = CIFS_I(inode)->uniqueid;
/*
* If on a multiuser mount without unix extensions, and the admin hasn't
* overridden them, set the ownership to the fsuid/fsgid of the current
* process.
*/
if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER) &&
!tcon->unix_ext) {
if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_UID))
stat->uid = current_fsuid();
if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_GID))
stat->gid = current_fsgid();
}
return rc;
}
static int cifs_truncate_page(struct address_space *mapping, loff_t from)
{
pgoff_t index = from >> PAGE_CACHE_SHIFT;
unsigned offset = from & (PAGE_CACHE_SIZE - 1);
struct page *page;
int rc = 0;
page = grab_cache_page(mapping, index);
if (!page)
return -ENOMEM;
zero_user_segment(page, offset, PAGE_CACHE_SIZE);
unlock_page(page);
page_cache_release(page);
return rc;
}
static void cifs_setsize(struct inode *inode, loff_t offset)
{
loff_t oldsize;
spin_lock(&inode->i_lock);
oldsize = inode->i_size;
i_size_write(inode, offset);
spin_unlock(&inode->i_lock);
truncate_pagecache(inode, oldsize, offset);
}
static int
cifs_set_file_size(struct inode *inode, struct iattr *attrs,
int xid, char *full_path)
{
int rc;
struct cifsFileInfo *open_file;
struct cifsInodeInfo *cifsInode = CIFS_I(inode);
struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
struct tcon_link *tlink = NULL;
struct cifs_tcon *pTcon = NULL;
struct cifs_io_parms io_parms;
/*
* To avoid spurious oplock breaks from server, in the case of
* inodes that we already have open, avoid doing path based
* setting of file size if we can do it by handle.
* This keeps our caching token (oplock) and avoids timeouts
* when the local oplock break takes longer to flush
* writebehind data than the SMB timeout for the SetPathInfo
* request would allow
*/
open_file = find_writable_file(cifsInode, true);
if (open_file) {
__u16 nfid = open_file->netfid;
__u32 npid = open_file->pid;
pTcon = tlink_tcon(open_file->tlink);
rc = CIFSSMBSetFileSize(xid, pTcon, attrs->ia_size, nfid,
npid, false);
cifsFileInfo_put(open_file);
cFYI(1, "SetFSize for attrs rc = %d", rc);
if ((rc == -EINVAL) || (rc == -EOPNOTSUPP)) {
unsigned int bytes_written;
io_parms.netfid = nfid;
io_parms.pid = npid;
io_parms.tcon = pTcon;
io_parms.offset = 0;
io_parms.length = attrs->ia_size;
rc = CIFSSMBWrite(xid, &io_parms, &bytes_written,
NULL, NULL, 1);
cFYI(1, "Wrt seteof rc %d", rc);
}
} else
rc = -EINVAL;
if (rc != 0) {
if (pTcon == NULL) {
tlink = cifs_sb_tlink(cifs_sb);
if (IS_ERR(tlink))
return PTR_ERR(tlink);
pTcon = tlink_tcon(tlink);
}
/* Set file size by pathname rather than by handle
either because no valid, writeable file handle for
it was found or because there was an error setting
it by handle */
rc = CIFSSMBSetEOF(xid, pTcon, full_path, attrs->ia_size,
false, cifs_sb->local_nls,
cifs_sb->mnt_cifs_flags &
CIFS_MOUNT_MAP_SPECIAL_CHR);
cFYI(1, "SetEOF by path (setattrs) rc = %d", rc);
if ((rc == -EINVAL) || (rc == -EOPNOTSUPP)) {
__u16 netfid;
int oplock = 0;
rc = SMBLegacyOpen(xid, pTcon, full_path,
FILE_OPEN, GENERIC_WRITE,
CREATE_NOT_DIR, &netfid, &oplock, NULL,
cifs_sb->local_nls,
cifs_sb->mnt_cifs_flags &
CIFS_MOUNT_MAP_SPECIAL_CHR);
if (rc == 0) {
unsigned int bytes_written;
io_parms.netfid = netfid;
io_parms.pid = current->tgid;
io_parms.tcon = pTcon;
io_parms.offset = 0;
io_parms.length = attrs->ia_size;
rc = CIFSSMBWrite(xid, &io_parms,
&bytes_written,
NULL, NULL, 1);
cFYI(1, "wrt seteof rc %d", rc);
CIFSSMBClose(xid, pTcon, netfid);
}
}
if (tlink)
cifs_put_tlink(tlink);
}
if (rc == 0) {
cifsInode->server_eof = attrs->ia_size;
cifs_setsize(inode, attrs->ia_size);
cifs_truncate_page(inode->i_mapping, inode->i_size);
}
return rc;
}
static int
cifs_setattr_unix(struct dentry *direntry, struct iattr *attrs)
{
int rc;
int xid;
char *full_path = NULL;
struct inode *inode = direntry->d_inode;
struct cifsInodeInfo *cifsInode = CIFS_I(inode);
struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
struct tcon_link *tlink;
struct cifs_tcon *pTcon;
struct cifs_unix_set_info_args *args = NULL;
struct cifsFileInfo *open_file;
cFYI(1, "setattr_unix on file %s attrs->ia_valid=0x%x",
direntry->d_name.name, attrs->ia_valid);
xid = GetXid();
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_PERM)
attrs->ia_valid |= ATTR_FORCE;
rc = inode_change_ok(inode, attrs);
if (rc < 0)
goto out;
full_path = build_path_from_dentry(direntry);
if (full_path == NULL) {
rc = -ENOMEM;
goto out;
}
/*
* Attempt to flush data before changing attributes. We need to do
* this for ATTR_SIZE and ATTR_MTIME for sure, and if we change the
* ownership or mode then we may also need to do this. Here, we take
* the safe way out and just do the flush on all setattr requests. If
* the flush returns error, store it to report later and continue.
*
* BB: This should be smarter. Why bother flushing pages that
* will be truncated anyway? Also, should we error out here if
* the flush returns error?
*/
rc = filemap_write_and_wait(inode->i_mapping);
mapping_set_error(inode->i_mapping, rc);
rc = 0;
if (attrs->ia_valid & ATTR_SIZE) {
rc = cifs_set_file_size(inode, attrs, xid, full_path);
if (rc != 0)
goto out;
}
/* skip mode change if it's just for clearing setuid/setgid */
if (attrs->ia_valid & (ATTR_KILL_SUID|ATTR_KILL_SGID))
attrs->ia_valid &= ~ATTR_MODE;
args = kmalloc(sizeof(*args), GFP_KERNEL);
if (args == NULL) {
rc = -ENOMEM;
goto out;
}
/* set up the struct */
if (attrs->ia_valid & ATTR_MODE)
args->mode = attrs->ia_mode;
else
args->mode = NO_CHANGE_64;
if (attrs->ia_valid & ATTR_UID)
args->uid = attrs->ia_uid;
else
args->uid = NO_CHANGE_64;
if (attrs->ia_valid & ATTR_GID)
args->gid = attrs->ia_gid;
else
args->gid = NO_CHANGE_64;
if (attrs->ia_valid & ATTR_ATIME)
args->atime = cifs_UnixTimeToNT(attrs->ia_atime);
else
args->atime = NO_CHANGE_64;
if (attrs->ia_valid & ATTR_MTIME)
args->mtime = cifs_UnixTimeToNT(attrs->ia_mtime);
else
args->mtime = NO_CHANGE_64;
if (attrs->ia_valid & ATTR_CTIME)
args->ctime = cifs_UnixTimeToNT(attrs->ia_ctime);
else
args->ctime = NO_CHANGE_64;
args->device = 0;
open_file = find_writable_file(cifsInode, true);
if (open_file) {
u16 nfid = open_file->netfid;
u32 npid = open_file->pid;
pTcon = tlink_tcon(open_file->tlink);
rc = CIFSSMBUnixSetFileInfo(xid, pTcon, args, nfid, npid);
cifsFileInfo_put(open_file);
} else {
tlink = cifs_sb_tlink(cifs_sb);
if (IS_ERR(tlink)) {
rc = PTR_ERR(tlink);
goto out;
}
pTcon = tlink_tcon(tlink);
rc = CIFSSMBUnixSetPathInfo(xid, pTcon, full_path, args,
cifs_sb->local_nls,
cifs_sb->mnt_cifs_flags &
CIFS_MOUNT_MAP_SPECIAL_CHR);
cifs_put_tlink(tlink);
}
if (rc)
goto out;
if ((attrs->ia_valid & ATTR_SIZE) &&
attrs->ia_size != i_size_read(inode))
truncate_setsize(inode, attrs->ia_size);
setattr_copy(inode, attrs);
mark_inode_dirty(inode);
/* force revalidate when any of these times are set since some
of the fs types (eg ext3, fat) do not have fine enough
time granularity to match protocol, and we do not have a
a way (yet) to query the server fs's time granularity (and
whether it rounds times down).
*/
if (attrs->ia_valid & (ATTR_MTIME | ATTR_CTIME))
cifsInode->time = 0;
out:
kfree(args);
kfree(full_path);
FreeXid(xid);
return rc;
}
static int
cifs_setattr_nounix(struct dentry *direntry, struct iattr *attrs)
{
int xid;
uid_t uid = NO_CHANGE_32;
gid_t gid = NO_CHANGE_32;
struct inode *inode = direntry->d_inode;
struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
struct cifsInodeInfo *cifsInode = CIFS_I(inode);
char *full_path = NULL;
int rc = -EACCES;
__u32 dosattr = 0;
__u64 mode = NO_CHANGE_64;
xid = GetXid();
cFYI(1, "setattr on file %s attrs->iavalid 0x%x",
direntry->d_name.name, attrs->ia_valid);
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_PERM)
attrs->ia_valid |= ATTR_FORCE;
rc = inode_change_ok(inode, attrs);
if (rc < 0) {
FreeXid(xid);
return rc;
}
full_path = build_path_from_dentry(direntry);
if (full_path == NULL) {
rc = -ENOMEM;
FreeXid(xid);
return rc;
}
/*
* Attempt to flush data before changing attributes. We need to do
* this for ATTR_SIZE and ATTR_MTIME for sure, and if we change the
* ownership or mode then we may also need to do this. Here, we take
* the safe way out and just do the flush on all setattr requests. If
* the flush returns error, store it to report later and continue.
*
* BB: This should be smarter. Why bother flushing pages that
* will be truncated anyway? Also, should we error out here if
* the flush returns error?
*/
rc = filemap_write_and_wait(inode->i_mapping);
mapping_set_error(inode->i_mapping, rc);
rc = 0;
if (attrs->ia_valid & ATTR_SIZE) {
rc = cifs_set_file_size(inode, attrs, xid, full_path);
if (rc != 0)
goto cifs_setattr_exit;
}
if (attrs->ia_valid & ATTR_UID)
uid = attrs->ia_uid;
if (attrs->ia_valid & ATTR_GID)
gid = attrs->ia_gid;
#ifdef CONFIG_CIFS_ACL
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_ACL) {
if (uid != NO_CHANGE_32 || gid != NO_CHANGE_32) {
rc = id_mode_to_cifs_acl(inode, full_path, NO_CHANGE_64,
uid, gid);
if (rc) {
cFYI(1, "%s: Setting id failed with error: %d",
__func__, rc);
goto cifs_setattr_exit;
}
}
} else
#endif /* CONFIG_CIFS_ACL */
if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID))
attrs->ia_valid &= ~(ATTR_UID | ATTR_GID);
/* skip mode change if it's just for clearing setuid/setgid */
if (attrs->ia_valid & (ATTR_KILL_SUID|ATTR_KILL_SGID))
attrs->ia_valid &= ~ATTR_MODE;
if (attrs->ia_valid & ATTR_MODE) {
mode = attrs->ia_mode;
rc = 0;
#ifdef CONFIG_CIFS_ACL
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_ACL) {
rc = id_mode_to_cifs_acl(inode, full_path, mode,
NO_CHANGE_32, NO_CHANGE_32);
if (rc) {
cFYI(1, "%s: Setting ACL failed with error: %d",
__func__, rc);
goto cifs_setattr_exit;
}
} else
#endif /* CONFIG_CIFS_ACL */
if (((mode & S_IWUGO) == 0) &&
(cifsInode->cifsAttrs & ATTR_READONLY) == 0) {
dosattr = cifsInode->cifsAttrs | ATTR_READONLY;
/* fix up mode if we're not using dynperm */
if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DYNPERM) == 0)
attrs->ia_mode = inode->i_mode & ~S_IWUGO;
} else if ((mode & S_IWUGO) &&
(cifsInode->cifsAttrs & ATTR_READONLY)) {
dosattr = cifsInode->cifsAttrs & ~ATTR_READONLY;
/* Attributes of 0 are ignored */
if (dosattr == 0)
dosattr |= ATTR_NORMAL;
/* reset local inode permissions to normal */
if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DYNPERM)) {
attrs->ia_mode &= ~(S_IALLUGO);
if (S_ISDIR(inode->i_mode))
attrs->ia_mode |=
cifs_sb->mnt_dir_mode;
else
attrs->ia_mode |=
cifs_sb->mnt_file_mode;
}
} else if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DYNPERM)) {
/* ignore mode change - ATTR_READONLY hasn't changed */
attrs->ia_valid &= ~ATTR_MODE;
}
}
if (attrs->ia_valid & (ATTR_MTIME|ATTR_ATIME|ATTR_CTIME) ||
((attrs->ia_valid & ATTR_MODE) && dosattr)) {
rc = cifs_set_file_info(inode, attrs, xid, full_path, dosattr);
/* BB: check for rc = -EOPNOTSUPP and switch to legacy mode */
/* Even if error on time set, no sense failing the call if
the server would set the time to a reasonable value anyway,
and this check ensures that we are not being called from
sys_utimes in which case we ought to fail the call back to
the user when the server rejects the call */
if ((rc) && (attrs->ia_valid &
(ATTR_MODE | ATTR_GID | ATTR_UID | ATTR_SIZE)))
rc = 0;
}
/* do not need local check to inode_check_ok since the server does
that */
if (rc)
goto cifs_setattr_exit;
if ((attrs->ia_valid & ATTR_SIZE) &&
attrs->ia_size != i_size_read(inode))
truncate_setsize(inode, attrs->ia_size);
setattr_copy(inode, attrs);
mark_inode_dirty(inode);
cifs_setattr_exit:
kfree(full_path);
FreeXid(xid);
return rc;
}
int
cifs_setattr(struct dentry *direntry, struct iattr *attrs)
{
struct inode *inode = direntry->d_inode;
struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
struct cifs_tcon *pTcon = cifs_sb_master_tcon(cifs_sb);
if (pTcon->unix_ext)
return cifs_setattr_unix(direntry, attrs);
return cifs_setattr_nounix(direntry, attrs);
/* BB: add cifs_setattr_legacy for really old servers */
}
#if 0
void cifs_delete_inode(struct inode *inode)
{
cFYI(1, "In cifs_delete_inode, inode = 0x%p", inode);
/* may have to add back in if and when safe distributed caching of
directories added e.g. via FindNotify */
}
#endif
| gpl-2.0 |
pseudonymous-foss/clydefs | arch/x86/kernel/apic/numaq_32.c | 2284 | 13488 | /*
* Written by: Patricia Gaughen, IBM Corporation
*
* Copyright (C) 2002, IBM Corp.
* Copyright (C) 2009, Red Hat, Inc., Ingo Molnar
*
* All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
* NON INFRINGEMENT. See the GNU General Public License for more
* details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*
* Send feedback to <gone@us.ibm.com>
*/
#include <linux/nodemask.h>
#include <linux/topology.h>
#include <linux/bootmem.h>
#include <linux/memblock.h>
#include <linux/threads.h>
#include <linux/cpumask.h>
#include <linux/kernel.h>
#include <linux/mmzone.h>
#include <linux/module.h>
#include <linux/string.h>
#include <linux/init.h>
#include <linux/numa.h>
#include <linux/smp.h>
#include <linux/io.h>
#include <linux/mm.h>
#include <asm/processor.h>
#include <asm/fixmap.h>
#include <asm/mpspec.h>
#include <asm/numaq.h>
#include <asm/setup.h>
#include <asm/apic.h>
#include <asm/e820.h>
#include <asm/ipi.h>
int found_numaq;
/*
* Have to match translation table entries to main table entries by counter
* hence the mpc_record variable .... can't see a less disgusting way of
* doing this ....
*/
struct mpc_trans {
unsigned char mpc_type;
unsigned char trans_len;
unsigned char trans_type;
unsigned char trans_quad;
unsigned char trans_global;
unsigned char trans_local;
unsigned short trans_reserved;
};
static int mpc_record;
static struct mpc_trans *translation_table[MAX_MPC_ENTRY];
int mp_bus_id_to_node[MAX_MP_BUSSES];
int mp_bus_id_to_local[MAX_MP_BUSSES];
int quad_local_to_mp_bus_id[NR_CPUS/4][4];
static inline void numaq_register_node(int node, struct sys_cfg_data *scd)
{
struct eachquadmem *eq = scd->eq + node;
u64 start = (u64)(eq->hi_shrd_mem_start - eq->priv_mem_size) << 20;
u64 end = (u64)(eq->hi_shrd_mem_start + eq->hi_shrd_mem_size) << 20;
int ret;
node_set(node, numa_nodes_parsed);
ret = numa_add_memblk(node, start, end);
BUG_ON(ret < 0);
}
/*
* Function: smp_dump_qct()
*
* Description: gets memory layout from the quad config table. This
* function also updates numa_nodes_parsed with the nodes (quads) present.
*/
static void __init smp_dump_qct(void)
{
struct sys_cfg_data *scd;
int node;
scd = (void *)__va(SYS_CFG_DATA_PRIV_ADDR);
for_each_node(node) {
if (scd->quads_present31_0 & (1 << node))
numaq_register_node(node, scd);
}
}
void __cpuinit numaq_tsc_disable(void)
{
if (!found_numaq)
return;
if (num_online_nodes() > 1) {
printk(KERN_DEBUG "NUMAQ: disabling TSC\n");
setup_clear_cpu_cap(X86_FEATURE_TSC);
}
}
static void __init numaq_tsc_init(void)
{
numaq_tsc_disable();
}
static inline int generate_logical_apicid(int quad, int phys_apicid)
{
return (quad << 4) + (phys_apicid ? phys_apicid << 1 : 1);
}
/* x86_quirks member */
static int mpc_apic_id(struct mpc_cpu *m)
{
int quad = translation_table[mpc_record]->trans_quad;
int logical_apicid = generate_logical_apicid(quad, m->apicid);
printk(KERN_DEBUG
"Processor #%d %u:%u APIC version %d (quad %d, apic %d)\n",
m->apicid, (m->cpufeature & CPU_FAMILY_MASK) >> 8,
(m->cpufeature & CPU_MODEL_MASK) >> 4,
m->apicver, quad, logical_apicid);
return logical_apicid;
}
/* x86_quirks member */
static void mpc_oem_bus_info(struct mpc_bus *m, char *name)
{
int quad = translation_table[mpc_record]->trans_quad;
int local = translation_table[mpc_record]->trans_local;
mp_bus_id_to_node[m->busid] = quad;
mp_bus_id_to_local[m->busid] = local;
printk(KERN_INFO "Bus #%d is %s (node %d)\n", m->busid, name, quad);
}
/* x86_quirks member */
static void mpc_oem_pci_bus(struct mpc_bus *m)
{
int quad = translation_table[mpc_record]->trans_quad;
int local = translation_table[mpc_record]->trans_local;
quad_local_to_mp_bus_id[quad][local] = m->busid;
}
/*
* Called from mpparse code.
* mode = 0: prescan
* mode = 1: one mpc entry scanned
*/
static void numaq_mpc_record(unsigned int mode)
{
if (!mode)
mpc_record = 0;
else
mpc_record++;
}
static void __init MP_translation_info(struct mpc_trans *m)
{
printk(KERN_INFO
"Translation: record %d, type %d, quad %d, global %d, local %d\n",
mpc_record, m->trans_type, m->trans_quad, m->trans_global,
m->trans_local);
if (mpc_record >= MAX_MPC_ENTRY)
printk(KERN_ERR "MAX_MPC_ENTRY exceeded!\n");
else
translation_table[mpc_record] = m; /* stash this for later */
if (m->trans_quad < MAX_NUMNODES && !node_online(m->trans_quad))
node_set_online(m->trans_quad);
}
static int __init mpf_checksum(unsigned char *mp, int len)
{
int sum = 0;
while (len--)
sum += *mp++;
return sum & 0xFF;
}
/*
* Read/parse the MPC oem tables
*/
static void __init smp_read_mpc_oem(struct mpc_table *mpc)
{
struct mpc_oemtable *oemtable = (void *)(long)mpc->oemptr;
int count = sizeof(*oemtable); /* the header size */
unsigned char *oemptr = ((unsigned char *)oemtable) + count;
mpc_record = 0;
printk(KERN_INFO
"Found an OEM MPC table at %8p - parsing it...\n", oemtable);
if (memcmp(oemtable->signature, MPC_OEM_SIGNATURE, 4)) {
printk(KERN_WARNING
"SMP mpc oemtable: bad signature [%c%c%c%c]!\n",
oemtable->signature[0], oemtable->signature[1],
oemtable->signature[2], oemtable->signature[3]);
return;
}
if (mpf_checksum((unsigned char *)oemtable, oemtable->length)) {
printk(KERN_WARNING "SMP oem mptable: checksum error!\n");
return;
}
while (count < oemtable->length) {
switch (*oemptr) {
case MP_TRANSLATION:
{
struct mpc_trans *m = (void *)oemptr;
MP_translation_info(m);
oemptr += sizeof(*m);
count += sizeof(*m);
++mpc_record;
break;
}
default:
printk(KERN_WARNING
"Unrecognised OEM table entry type! - %d\n",
(int)*oemptr);
return;
}
}
}
static __init void early_check_numaq(void)
{
/*
* get boot-time SMP configuration:
*/
if (smp_found_config)
early_get_smp_config();
if (found_numaq) {
x86_init.mpparse.mpc_record = numaq_mpc_record;
x86_init.mpparse.setup_ioapic_ids = x86_init_noop;
x86_init.mpparse.mpc_apic_id = mpc_apic_id;
x86_init.mpparse.smp_read_mpc_oem = smp_read_mpc_oem;
x86_init.mpparse.mpc_oem_pci_bus = mpc_oem_pci_bus;
x86_init.mpparse.mpc_oem_bus_info = mpc_oem_bus_info;
x86_init.timers.tsc_pre_init = numaq_tsc_init;
x86_init.pci.init = pci_numaq_init;
}
}
int __init numaq_numa_init(void)
{
early_check_numaq();
if (!found_numaq)
return -ENOENT;
smp_dump_qct();
return 0;
}
#define NUMAQ_APIC_DFR_VALUE (APIC_DFR_CLUSTER)
static inline unsigned int numaq_get_apic_id(unsigned long x)
{
return (x >> 24) & 0x0F;
}
static inline void numaq_send_IPI_mask(const struct cpumask *mask, int vector)
{
default_send_IPI_mask_sequence_logical(mask, vector);
}
static inline void numaq_send_IPI_allbutself(int vector)
{
default_send_IPI_mask_allbutself_logical(cpu_online_mask, vector);
}
static inline void numaq_send_IPI_all(int vector)
{
numaq_send_IPI_mask(cpu_online_mask, vector);
}
#define NUMAQ_TRAMPOLINE_PHYS_LOW (0x8)
#define NUMAQ_TRAMPOLINE_PHYS_HIGH (0xa)
/*
* Because we use NMIs rather than the INIT-STARTUP sequence to
* bootstrap the CPUs, the APIC may be in a weird state. Kick it:
*/
static inline void numaq_smp_callin_clear_local_apic(void)
{
clear_local_APIC();
}
static inline const struct cpumask *numaq_target_cpus(void)
{
return cpu_all_mask;
}
static unsigned long numaq_check_apicid_used(physid_mask_t *map, int apicid)
{
return physid_isset(apicid, *map);
}
static inline unsigned long numaq_check_apicid_present(int bit)
{
return physid_isset(bit, phys_cpu_present_map);
}
static inline int numaq_apic_id_registered(void)
{
return 1;
}
static inline void numaq_init_apic_ldr(void)
{
/* Already done in NUMA-Q firmware */
}
static inline void numaq_setup_apic_routing(void)
{
printk(KERN_INFO
"Enabling APIC mode: NUMA-Q. Using %d I/O APICs\n",
nr_ioapics);
}
/*
* Skip adding the timer int on secondary nodes, which causes
* a small but painful rift in the time-space continuum.
*/
static inline int numaq_multi_timer_check(int apic, int irq)
{
return apic != 0 && irq == 0;
}
static inline void numaq_ioapic_phys_id_map(physid_mask_t *phys_map, physid_mask_t *retmap)
{
/* We don't have a good way to do this yet - hack */
return physids_promote(0xFUL, retmap);
}
/*
* Supporting over 60 cpus on NUMA-Q requires a locality-dependent
* cpu to APIC ID relation to properly interact with the intelligent
* mode of the cluster controller.
*/
static inline int numaq_cpu_present_to_apicid(int mps_cpu)
{
if (mps_cpu < 60)
return ((mps_cpu >> 2) << 4) | (1 << (mps_cpu & 0x3));
else
return BAD_APICID;
}
static inline int numaq_apicid_to_node(int logical_apicid)
{
return logical_apicid >> 4;
}
static int numaq_numa_cpu_node(int cpu)
{
int logical_apicid = early_per_cpu(x86_cpu_to_logical_apicid, cpu);
if (logical_apicid != BAD_APICID)
return numaq_apicid_to_node(logical_apicid);
return NUMA_NO_NODE;
}
static void numaq_apicid_to_cpu_present(int logical_apicid, physid_mask_t *retmap)
{
int node = numaq_apicid_to_node(logical_apicid);
int cpu = __ffs(logical_apicid & 0xf);
physid_set_mask_of_physid(cpu + 4*node, retmap);
}
/* Where the IO area was mapped on multiquad, always 0 otherwise */
void *xquad_portio;
static inline int numaq_check_phys_apicid_present(int phys_apicid)
{
return 1;
}
/*
* We use physical apicids here, not logical, so just return the default
* physical broadcast to stop people from breaking us
*/
static int
numaq_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
const struct cpumask *andmask,
unsigned int *apicid)
{
*apicid = 0x0F;
return 0;
}
/* No NUMA-Q box has a HT CPU, but it can't hurt to use the default code. */
static inline int numaq_phys_pkg_id(int cpuid_apic, int index_msb)
{
return cpuid_apic >> index_msb;
}
static int
numaq_mps_oem_check(struct mpc_table *mpc, char *oem, char *productid)
{
if (strncmp(oem, "IBM NUMA", 8))
printk(KERN_ERR "Warning! Not a NUMA-Q system!\n");
else
found_numaq = 1;
return found_numaq;
}
static int probe_numaq(void)
{
/* already know from get_memcfg_numaq() */
return found_numaq;
}
static void numaq_setup_portio_remap(void)
{
int num_quads = num_online_nodes();
if (num_quads <= 1)
return;
printk(KERN_INFO
"Remapping cross-quad port I/O for %d quads\n", num_quads);
xquad_portio = ioremap(XQUAD_PORTIO_BASE, num_quads*XQUAD_PORTIO_QUAD);
printk(KERN_INFO
"xquad_portio vaddr 0x%08lx, len %08lx\n",
(u_long) xquad_portio, (u_long) num_quads*XQUAD_PORTIO_QUAD);
}
/* Use __refdata to keep false positive warning calm. */
static struct apic __refdata apic_numaq = {
.name = "NUMAQ",
.probe = probe_numaq,
.acpi_madt_oem_check = NULL,
.apic_id_valid = default_apic_id_valid,
.apic_id_registered = numaq_apic_id_registered,
.irq_delivery_mode = dest_LowestPrio,
/* physical delivery on LOCAL quad: */
.irq_dest_mode = 0,
.target_cpus = numaq_target_cpus,
.disable_esr = 1,
.dest_logical = APIC_DEST_LOGICAL,
.check_apicid_used = numaq_check_apicid_used,
.check_apicid_present = numaq_check_apicid_present,
.vector_allocation_domain = flat_vector_allocation_domain,
.init_apic_ldr = numaq_init_apic_ldr,
.ioapic_phys_id_map = numaq_ioapic_phys_id_map,
.setup_apic_routing = numaq_setup_apic_routing,
.multi_timer_check = numaq_multi_timer_check,
.cpu_present_to_apicid = numaq_cpu_present_to_apicid,
.apicid_to_cpu_present = numaq_apicid_to_cpu_present,
.setup_portio_remap = numaq_setup_portio_remap,
.check_phys_apicid_present = numaq_check_phys_apicid_present,
.enable_apic_mode = NULL,
.phys_pkg_id = numaq_phys_pkg_id,
.mps_oem_check = numaq_mps_oem_check,
.get_apic_id = numaq_get_apic_id,
.set_apic_id = NULL,
.apic_id_mask = 0x0F << 24,
.cpu_mask_to_apicid_and = numaq_cpu_mask_to_apicid_and,
.send_IPI_mask = numaq_send_IPI_mask,
.send_IPI_mask_allbutself = NULL,
.send_IPI_allbutself = numaq_send_IPI_allbutself,
.send_IPI_all = numaq_send_IPI_all,
.send_IPI_self = default_send_IPI_self,
.wakeup_secondary_cpu = wakeup_secondary_cpu_via_nmi,
.trampoline_phys_low = NUMAQ_TRAMPOLINE_PHYS_LOW,
.trampoline_phys_high = NUMAQ_TRAMPOLINE_PHYS_HIGH,
/* We don't do anything here because we use NMI's to boot instead */
.wait_for_init_deassert = NULL,
.smp_callin_clear_local_apic = numaq_smp_callin_clear_local_apic,
.inquire_remote_apic = NULL,
.read = native_apic_mem_read,
.write = native_apic_mem_write,
.eoi_write = native_apic_mem_write,
.icr_read = native_apic_icr_read,
.icr_write = native_apic_icr_write,
.wait_icr_idle = native_apic_wait_icr_idle,
.safe_wait_icr_idle = native_safe_apic_wait_icr_idle,
.x86_32_early_logical_apicid = noop_x86_32_early_logical_apicid,
.x86_32_numa_cpu_node = numaq_numa_cpu_node,
};
apic_driver(apic_numaq);
| gpl-2.0 |
13693100472/linux | net/unix/sysctl_net_unix.c | 2540 | 1351 | /*
* NET4: Sysctl interface to net af_unix subsystem.
*
* Authors: Mike Shaver.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/sysctl.h>
#include <net/af_unix.h>
static struct ctl_table unix_table[] = {
{
.procname = "max_dgram_qlen",
.data = &init_net.unx.sysctl_max_dgram_qlen,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec
},
{ }
};
int __net_init unix_sysctl_register(struct net *net)
{
struct ctl_table *table;
table = kmemdup(unix_table, sizeof(unix_table), GFP_KERNEL);
if (table == NULL)
goto err_alloc;
/* Don't export sysctls to unprivileged users */
if (net->user_ns != &init_user_ns)
table[0].procname = NULL;
table[0].data = &net->unx.sysctl_max_dgram_qlen;
net->unx.ctl = register_net_sysctl(net, "net/unix", table);
if (net->unx.ctl == NULL)
goto err_reg;
return 0;
err_reg:
kfree(table);
err_alloc:
return -ENOMEM;
}
void unix_sysctl_unregister(struct net *net)
{
struct ctl_table *table;
table = net->unx.ctl->ctl_table_arg;
unregister_net_sysctl_table(net->unx.ctl);
kfree(table);
}
| gpl-2.0 |
dl9pf/linux | arch/m68k/platform/coldfire/intc-5272.c | 2796 | 6006 | /*
* intc.c -- interrupt controller or ColdFire 5272 SoC
*
* (C) Copyright 2009, Greg Ungerer <gerg@snapgear.com>
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file COPYING in the main directory of this archive
* for more details.
*/
#include <linux/types.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/interrupt.h>
#include <linux/kernel_stat.h>
#include <linux/irq.h>
#include <linux/io.h>
#include <asm/coldfire.h>
#include <asm/mcfsim.h>
#include <asm/traps.h>
/*
* The 5272 ColdFire interrupt controller is nothing like any other
* ColdFire interrupt controller - it truly is completely different.
* Given its age it is unlikely to be used on any other ColdFire CPU.
*/
/*
* The masking and priproty setting of interrupts on the 5272 is done
* via a set of 4 "Interrupt Controller Registers" (ICR). There is a
* loose mapping of vector number to register and internal bits, but
* a table is the easiest and quickest way to map them.
*
* Note that the external interrupts are edge triggered (unlike the
* internal interrupt sources which are level triggered). Which means
* they also need acknowledging via acknowledge bits.
*/
struct irqmap {
unsigned char icr;
unsigned char index;
unsigned char ack;
};
static struct irqmap intc_irqmap[MCFINT_VECMAX - MCFINT_VECBASE] = {
/*MCF_IRQ_SPURIOUS*/ { .icr = 0, .index = 0, .ack = 0, },
/*MCF_IRQ_EINT1*/ { .icr = MCFSIM_ICR1, .index = 28, .ack = 1, },
/*MCF_IRQ_EINT2*/ { .icr = MCFSIM_ICR1, .index = 24, .ack = 1, },
/*MCF_IRQ_EINT3*/ { .icr = MCFSIM_ICR1, .index = 20, .ack = 1, },
/*MCF_IRQ_EINT4*/ { .icr = MCFSIM_ICR1, .index = 16, .ack = 1, },
/*MCF_IRQ_TIMER1*/ { .icr = MCFSIM_ICR1, .index = 12, .ack = 0, },
/*MCF_IRQ_TIMER2*/ { .icr = MCFSIM_ICR1, .index = 8, .ack = 0, },
/*MCF_IRQ_TIMER3*/ { .icr = MCFSIM_ICR1, .index = 4, .ack = 0, },
/*MCF_IRQ_TIMER4*/ { .icr = MCFSIM_ICR1, .index = 0, .ack = 0, },
/*MCF_IRQ_UART1*/ { .icr = MCFSIM_ICR2, .index = 28, .ack = 0, },
/*MCF_IRQ_UART2*/ { .icr = MCFSIM_ICR2, .index = 24, .ack = 0, },
/*MCF_IRQ_PLIP*/ { .icr = MCFSIM_ICR2, .index = 20, .ack = 0, },
/*MCF_IRQ_PLIA*/ { .icr = MCFSIM_ICR2, .index = 16, .ack = 0, },
/*MCF_IRQ_USB0*/ { .icr = MCFSIM_ICR2, .index = 12, .ack = 0, },
/*MCF_IRQ_USB1*/ { .icr = MCFSIM_ICR2, .index = 8, .ack = 0, },
/*MCF_IRQ_USB2*/ { .icr = MCFSIM_ICR2, .index = 4, .ack = 0, },
/*MCF_IRQ_USB3*/ { .icr = MCFSIM_ICR2, .index = 0, .ack = 0, },
/*MCF_IRQ_USB4*/ { .icr = MCFSIM_ICR3, .index = 28, .ack = 0, },
/*MCF_IRQ_USB5*/ { .icr = MCFSIM_ICR3, .index = 24, .ack = 0, },
/*MCF_IRQ_USB6*/ { .icr = MCFSIM_ICR3, .index = 20, .ack = 0, },
/*MCF_IRQ_USB7*/ { .icr = MCFSIM_ICR3, .index = 16, .ack = 0, },
/*MCF_IRQ_DMA*/ { .icr = MCFSIM_ICR3, .index = 12, .ack = 0, },
/*MCF_IRQ_ERX*/ { .icr = MCFSIM_ICR3, .index = 8, .ack = 0, },
/*MCF_IRQ_ETX*/ { .icr = MCFSIM_ICR3, .index = 4, .ack = 0, },
/*MCF_IRQ_ENTC*/ { .icr = MCFSIM_ICR3, .index = 0, .ack = 0, },
/*MCF_IRQ_QSPI*/ { .icr = MCFSIM_ICR4, .index = 28, .ack = 0, },
/*MCF_IRQ_EINT5*/ { .icr = MCFSIM_ICR4, .index = 24, .ack = 1, },
/*MCF_IRQ_EINT6*/ { .icr = MCFSIM_ICR4, .index = 20, .ack = 1, },
/*MCF_IRQ_SWTO*/ { .icr = MCFSIM_ICR4, .index = 16, .ack = 0, },
};
/*
* The act of masking the interrupt also has a side effect of 'ack'ing
* an interrupt on this irq (for the external irqs). So this mask function
* is also an ack_mask function.
*/
static void intc_irq_mask(struct irq_data *d)
{
unsigned int irq = d->irq;
if ((irq >= MCFINT_VECBASE) && (irq <= MCFINT_VECMAX)) {
u32 v;
irq -= MCFINT_VECBASE;
v = 0x8 << intc_irqmap[irq].index;
writel(v, intc_irqmap[irq].icr);
}
}
static void intc_irq_unmask(struct irq_data *d)
{
unsigned int irq = d->irq;
if ((irq >= MCFINT_VECBASE) && (irq <= MCFINT_VECMAX)) {
u32 v;
irq -= MCFINT_VECBASE;
v = 0xd << intc_irqmap[irq].index;
writel(v, intc_irqmap[irq].icr);
}
}
static void intc_irq_ack(struct irq_data *d)
{
unsigned int irq = d->irq;
/* Only external interrupts are acked */
if ((irq >= MCFINT_VECBASE) && (irq <= MCFINT_VECMAX)) {
irq -= MCFINT_VECBASE;
if (intc_irqmap[irq].ack) {
u32 v;
v = readl(intc_irqmap[irq].icr);
v &= (0x7 << intc_irqmap[irq].index);
v |= (0x8 << intc_irqmap[irq].index);
writel(v, intc_irqmap[irq].icr);
}
}
}
static int intc_irq_set_type(struct irq_data *d, unsigned int type)
{
unsigned int irq = d->irq;
if ((irq >= MCFINT_VECBASE) && (irq <= MCFINT_VECMAX)) {
irq -= MCFINT_VECBASE;
if (intc_irqmap[irq].ack) {
u32 v;
v = readl(MCFSIM_PITR);
if (type == IRQ_TYPE_EDGE_FALLING)
v &= ~(0x1 << (32 - irq));
else
v |= (0x1 << (32 - irq));
writel(v, MCFSIM_PITR);
}
}
return 0;
}
/*
* Simple flow handler to deal with the external edge triggered interrupts.
* We need to be careful with the masking/acking due to the side effects
* of masking an interrupt.
*/
static void intc_external_irq(unsigned int irq, struct irq_desc *desc)
{
irq_desc_get_chip(desc)->irq_ack(&desc->irq_data);
handle_simple_irq(irq, desc);
}
static struct irq_chip intc_irq_chip = {
.name = "CF-INTC",
.irq_mask = intc_irq_mask,
.irq_unmask = intc_irq_unmask,
.irq_mask_ack = intc_irq_mask,
.irq_ack = intc_irq_ack,
.irq_set_type = intc_irq_set_type,
};
void __init init_IRQ(void)
{
int irq, edge;
/* Mask all interrupt sources */
writel(0x88888888, MCFSIM_ICR1);
writel(0x88888888, MCFSIM_ICR2);
writel(0x88888888, MCFSIM_ICR3);
writel(0x88888888, MCFSIM_ICR4);
for (irq = 0; (irq < NR_IRQS); irq++) {
irq_set_chip(irq, &intc_irq_chip);
edge = 0;
if ((irq >= MCFINT_VECBASE) && (irq <= MCFINT_VECMAX))
edge = intc_irqmap[irq - MCFINT_VECBASE].ack;
if (edge) {
irq_set_irq_type(irq, IRQ_TYPE_EDGE_RISING);
irq_set_handler(irq, intc_external_irq);
} else {
irq_set_irq_type(irq, IRQ_TYPE_LEVEL_HIGH);
irq_set_handler(irq, handle_level_irq);
}
}
}
| gpl-2.0 |
jmarcet/linux-amlogic | arch/arm/mach-davinci/pm_domain.c | 3052 | 1398 | /*
* Runtime PM support code for DaVinci
*
* Author: Kevin Hilman
*
* Copyright (C) 2012 Texas Instruments, Inc.
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
* warranty of any kind, whether express or implied.
*/
#include <linux/init.h>
#include <linux/pm_runtime.h>
#include <linux/pm_clock.h>
#include <linux/platform_device.h>
#ifdef CONFIG_PM_RUNTIME
static int davinci_pm_runtime_suspend(struct device *dev)
{
int ret;
dev_dbg(dev, "%s\n", __func__);
ret = pm_generic_runtime_suspend(dev);
if (ret)
return ret;
ret = pm_clk_suspend(dev);
if (ret) {
pm_generic_runtime_resume(dev);
return ret;
}
return 0;
}
static int davinci_pm_runtime_resume(struct device *dev)
{
dev_dbg(dev, "%s\n", __func__);
pm_clk_resume(dev);
return pm_generic_runtime_resume(dev);
}
#endif
static struct dev_pm_domain davinci_pm_domain = {
.ops = {
SET_RUNTIME_PM_OPS(davinci_pm_runtime_suspend,
davinci_pm_runtime_resume, NULL)
USE_PLATFORM_PM_SLEEP_OPS
},
};
static struct pm_clk_notifier_block platform_bus_notifier = {
.pm_domain = &davinci_pm_domain,
.con_ids = { "fck", "master", "slave", NULL },
};
static int __init davinci_pm_runtime_init(void)
{
pm_clk_add_notifier(&platform_bus_type, &platform_bus_notifier);
return 0;
}
core_initcall(davinci_pm_runtime_init);
| gpl-2.0 |
barome/BlackChocolate | drivers/staging/ozwpan/ozcdev.c | 3820 | 13427 | /* -----------------------------------------------------------------------------
* Copyright (c) 2011 Ozmo Inc
* Released under the GNU General Public License Version 2 (GPLv2).
* -----------------------------------------------------------------------------
*/
#include <linux/module.h>
#include <linux/fs.h>
#include <linux/cdev.h>
#include <linux/uaccess.h>
#include <linux/netdevice.h>
#include <linux/poll.h>
#include <linux/sched.h>
#include "ozconfig.h"
#include "ozprotocol.h"
#include "oztrace.h"
#include "ozappif.h"
#include "ozeltbuf.h"
#include "ozpd.h"
#include "ozproto.h"
#include "ozevent.h"
/*------------------------------------------------------------------------------
*/
#define OZ_RD_BUF_SZ 256
struct oz_cdev {
dev_t devnum;
struct cdev cdev;
wait_queue_head_t rdq;
spinlock_t lock;
u8 active_addr[ETH_ALEN];
struct oz_pd *active_pd;
};
/* Per PD context for the serial service stored in the PD. */
struct oz_serial_ctx {
atomic_t ref_count;
u8 tx_seq_num;
u8 rx_seq_num;
u8 rd_buf[OZ_RD_BUF_SZ];
int rd_in;
int rd_out;
};
/*------------------------------------------------------------------------------
*/
int g_taction;
/*------------------------------------------------------------------------------
*/
static struct oz_cdev g_cdev;
/*------------------------------------------------------------------------------
* Context: process and softirq
*/
static struct oz_serial_ctx *oz_cdev_claim_ctx(struct oz_pd *pd)
{
struct oz_serial_ctx *ctx;
spin_lock_bh(&pd->app_lock[OZ_APPID_SERIAL-1]);
ctx = (struct oz_serial_ctx *)pd->app_ctx[OZ_APPID_SERIAL-1];
if (ctx)
atomic_inc(&ctx->ref_count);
spin_unlock_bh(&pd->app_lock[OZ_APPID_SERIAL-1]);
return ctx;
}
/*------------------------------------------------------------------------------
* Context: softirq or process
*/
static void oz_cdev_release_ctx(struct oz_serial_ctx *ctx)
{
if (atomic_dec_and_test(&ctx->ref_count)) {
oz_trace("Dealloc serial context.\n");
kfree(ctx);
}
}
/*------------------------------------------------------------------------------
* Context: process
*/
int oz_cdev_open(struct inode *inode, struct file *filp)
{
struct oz_cdev *dev;
oz_trace("oz_cdev_open()\n");
oz_trace("major = %d minor = %d\n", imajor(inode), iminor(inode));
dev = container_of(inode->i_cdev, struct oz_cdev, cdev);
filp->private_data = dev;
return 0;
}
/*------------------------------------------------------------------------------
* Context: process
*/
int oz_cdev_release(struct inode *inode, struct file *filp)
{
oz_trace("oz_cdev_release()\n");
return 0;
}
/*------------------------------------------------------------------------------
* Context: process
*/
ssize_t oz_cdev_read(struct file *filp, char __user *buf, size_t count,
loff_t *fpos)
{
int n;
int ix;
struct oz_pd *pd;
struct oz_serial_ctx *ctx = 0;
spin_lock_bh(&g_cdev.lock);
pd = g_cdev.active_pd;
if (pd)
oz_pd_get(pd);
spin_unlock_bh(&g_cdev.lock);
if (pd == 0)
return -1;
ctx = oz_cdev_claim_ctx(pd);
if (ctx == 0)
goto out2;
n = ctx->rd_in - ctx->rd_out;
if (n < 0)
n += OZ_RD_BUF_SZ;
if (count > n)
count = n;
ix = ctx->rd_out;
n = OZ_RD_BUF_SZ - ix;
if (n > count)
n = count;
if (copy_to_user(buf, &ctx->rd_buf[ix], n)) {
count = 0;
goto out1;
}
ix += n;
if (ix == OZ_RD_BUF_SZ)
ix = 0;
if (n < count) {
if (copy_to_user(&buf[n], ctx->rd_buf, count-n)) {
count = 0;
goto out1;
}
ix = count-n;
}
ctx->rd_out = ix;
out1:
oz_cdev_release_ctx(ctx);
out2:
oz_pd_put(pd);
return count;
}
/*------------------------------------------------------------------------------
* Context: process
*/
ssize_t oz_cdev_write(struct file *filp, const char __user *buf, size_t count,
loff_t *fpos)
{
struct oz_pd *pd;
struct oz_elt_buf *eb;
struct oz_elt_info *ei = 0;
struct oz_elt *elt;
struct oz_app_hdr *app_hdr;
struct oz_serial_ctx *ctx;
spin_lock_bh(&g_cdev.lock);
pd = g_cdev.active_pd;
if (pd)
oz_pd_get(pd);
spin_unlock_bh(&g_cdev.lock);
if (pd == 0)
return -1;
eb = &pd->elt_buff;
ei = oz_elt_info_alloc(eb);
if (ei == 0) {
count = 0;
goto out;
}
elt = (struct oz_elt *)ei->data;
app_hdr = (struct oz_app_hdr *)(elt+1);
elt->length = sizeof(struct oz_app_hdr) + count;
elt->type = OZ_ELT_APP_DATA;
ei->app_id = OZ_APPID_SERIAL;
ei->length = elt->length + sizeof(struct oz_elt);
app_hdr->app_id = OZ_APPID_SERIAL;
if (copy_from_user(app_hdr+1, buf, count))
goto out;
spin_lock_bh(&pd->app_lock[OZ_APPID_USB-1]);
ctx = (struct oz_serial_ctx *)pd->app_ctx[OZ_APPID_SERIAL-1];
if (ctx) {
app_hdr->elt_seq_num = ctx->tx_seq_num++;
if (ctx->tx_seq_num == 0)
ctx->tx_seq_num = 1;
spin_lock(&eb->lock);
if (oz_queue_elt_info(eb, 0, 0, ei) == 0)
ei = 0;
spin_unlock(&eb->lock);
}
spin_unlock_bh(&pd->app_lock[OZ_APPID_USB-1]);
out:
if (ei) {
count = 0;
spin_lock_bh(&eb->lock);
oz_elt_info_free(eb, ei);
spin_unlock_bh(&eb->lock);
}
oz_pd_put(pd);
return count;
}
/*------------------------------------------------------------------------------
* Context: process
*/
static int oz_set_active_pd(u8 *addr)
{
int rc = 0;
struct oz_pd *pd;
struct oz_pd *old_pd;
pd = oz_pd_find(addr);
if (pd) {
spin_lock_bh(&g_cdev.lock);
memcpy(g_cdev.active_addr, addr, ETH_ALEN);
old_pd = g_cdev.active_pd;
g_cdev.active_pd = pd;
spin_unlock_bh(&g_cdev.lock);
if (old_pd)
oz_pd_put(old_pd);
} else {
if (!memcmp(addr, "\0\0\0\0\0\0", sizeof(addr))) {
spin_lock_bh(&g_cdev.lock);
pd = g_cdev.active_pd;
g_cdev.active_pd = 0;
memset(g_cdev.active_addr, 0,
sizeof(g_cdev.active_addr));
spin_unlock_bh(&g_cdev.lock);
if (pd)
oz_pd_put(pd);
} else {
rc = -1;
}
}
return rc;
}
/*------------------------------------------------------------------------------
* Context: process
*/
long oz_cdev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
int rc = 0;
if (_IOC_TYPE(cmd) != OZ_IOCTL_MAGIC)
return -ENOTTY;
if (_IOC_NR(cmd) > OZ_IOCTL_MAX)
return -ENOTTY;
if (_IOC_DIR(cmd) & _IOC_READ)
rc = !access_ok(VERIFY_WRITE, (void __user *)arg,
_IOC_SIZE(cmd));
else if (_IOC_DIR(cmd) & _IOC_WRITE)
rc = !access_ok(VERIFY_READ, (void __user *)arg,
_IOC_SIZE(cmd));
if (rc)
return -EFAULT;
switch (cmd) {
case OZ_IOCTL_GET_PD_LIST: {
struct oz_pd_list list;
oz_trace("OZ_IOCTL_GET_PD_LIST\n");
list.count = oz_get_pd_list(list.addr, OZ_MAX_PDS);
if (copy_to_user((void __user *)arg, &list,
sizeof(list)))
return -EFAULT;
}
break;
case OZ_IOCTL_SET_ACTIVE_PD: {
u8 addr[ETH_ALEN];
oz_trace("OZ_IOCTL_SET_ACTIVE_PD\n");
if (copy_from_user(addr, (void __user *)arg, ETH_ALEN))
return -EFAULT;
rc = oz_set_active_pd(addr);
}
break;
case OZ_IOCTL_GET_ACTIVE_PD: {
u8 addr[ETH_ALEN];
oz_trace("OZ_IOCTL_GET_ACTIVE_PD\n");
spin_lock_bh(&g_cdev.lock);
memcpy(addr, g_cdev.active_addr, ETH_ALEN);
spin_unlock_bh(&g_cdev.lock);
if (copy_to_user((void __user *)arg, addr, ETH_ALEN))
return -EFAULT;
}
break;
#ifdef WANT_EVENT_TRACE
case OZ_IOCTL_CLEAR_EVENTS:
oz_events_clear();
break;
case OZ_IOCTL_GET_EVENTS:
rc = oz_events_copy((void __user *)arg);
break;
case OZ_IOCTL_SET_EVENT_MASK:
if (copy_from_user(&g_evt_mask, (void __user *)arg,
sizeof(unsigned long))) {
return -EFAULT;
}
break;
#endif /* WANT_EVENT_TRACE */
case OZ_IOCTL_ADD_BINDING:
case OZ_IOCTL_REMOVE_BINDING: {
struct oz_binding_info b;
if (copy_from_user(&b, (void __user *)arg,
sizeof(struct oz_binding_info))) {
return -EFAULT;
}
/* Make sure name is null terminated. */
b.name[OZ_MAX_BINDING_LEN-1] = 0;
if (cmd == OZ_IOCTL_ADD_BINDING)
oz_binding_add(b.name);
else
oz_binding_remove(b.name);
}
break;
}
return rc;
}
/*------------------------------------------------------------------------------
* Context: process
*/
unsigned int oz_cdev_poll(struct file *filp, poll_table *wait)
{
unsigned int ret = 0;
struct oz_cdev *dev = filp->private_data;
oz_trace("Poll called wait = %p\n", wait);
spin_lock_bh(&dev->lock);
if (dev->active_pd) {
struct oz_serial_ctx *ctx = oz_cdev_claim_ctx(dev->active_pd);
if (ctx) {
if (ctx->rd_in != ctx->rd_out)
ret |= POLLIN | POLLRDNORM;
oz_cdev_release_ctx(ctx);
}
}
spin_unlock_bh(&dev->lock);
if (wait)
poll_wait(filp, &dev->rdq, wait);
return ret;
}
/*------------------------------------------------------------------------------
*/
const struct file_operations oz_fops = {
.owner = THIS_MODULE,
.open = oz_cdev_open,
.release = oz_cdev_release,
.read = oz_cdev_read,
.write = oz_cdev_write,
.unlocked_ioctl = oz_cdev_ioctl,
.poll = oz_cdev_poll
};
/*------------------------------------------------------------------------------
* Context: process
*/
int oz_cdev_register(void)
{
int err;
memset(&g_cdev, 0, sizeof(g_cdev));
err = alloc_chrdev_region(&g_cdev.devnum, 0, 1, "ozwpan");
if (err < 0)
return err;
oz_trace("Alloc dev number %d:%d\n", MAJOR(g_cdev.devnum),
MINOR(g_cdev.devnum));
cdev_init(&g_cdev.cdev, &oz_fops);
g_cdev.cdev.owner = THIS_MODULE;
g_cdev.cdev.ops = &oz_fops;
spin_lock_init(&g_cdev.lock);
init_waitqueue_head(&g_cdev.rdq);
err = cdev_add(&g_cdev.cdev, g_cdev.devnum, 1);
return 0;
}
/*------------------------------------------------------------------------------
* Context: process
*/
int oz_cdev_deregister(void)
{
cdev_del(&g_cdev.cdev);
unregister_chrdev_region(g_cdev.devnum, 1);
return 0;
}
/*------------------------------------------------------------------------------
* Context: process
*/
int oz_cdev_init(void)
{
oz_event_log(OZ_EVT_SERVICE, 1, OZ_APPID_SERIAL, 0, 0);
oz_app_enable(OZ_APPID_SERIAL, 1);
return 0;
}
/*------------------------------------------------------------------------------
* Context: process
*/
void oz_cdev_term(void)
{
oz_event_log(OZ_EVT_SERVICE, 2, OZ_APPID_SERIAL, 0, 0);
oz_app_enable(OZ_APPID_SERIAL, 0);
}
/*------------------------------------------------------------------------------
* Context: softirq-serialized
*/
int oz_cdev_start(struct oz_pd *pd, int resume)
{
struct oz_serial_ctx *ctx;
struct oz_serial_ctx *old_ctx = 0;
oz_event_log(OZ_EVT_SERVICE, 3, OZ_APPID_SERIAL, 0, resume);
if (resume) {
oz_trace("Serial service resumed.\n");
return 0;
}
ctx = kzalloc(sizeof(struct oz_serial_ctx), GFP_ATOMIC);
if (ctx == 0)
return -ENOMEM;
atomic_set(&ctx->ref_count, 1);
ctx->tx_seq_num = 1;
spin_lock_bh(&pd->app_lock[OZ_APPID_SERIAL-1]);
old_ctx = pd->app_ctx[OZ_APPID_SERIAL-1];
if (old_ctx) {
spin_unlock_bh(&pd->app_lock[OZ_APPID_SERIAL-1]);
kfree(ctx);
} else {
pd->app_ctx[OZ_APPID_SERIAL-1] = ctx;
spin_unlock_bh(&pd->app_lock[OZ_APPID_SERIAL-1]);
}
spin_lock(&g_cdev.lock);
if ((g_cdev.active_pd == 0) &&
(memcmp(pd->mac_addr, g_cdev.active_addr, ETH_ALEN) == 0)) {
oz_pd_get(pd);
g_cdev.active_pd = pd;
oz_trace("Active PD arrived.\n");
}
spin_unlock(&g_cdev.lock);
oz_trace("Serial service started.\n");
return 0;
}
/*------------------------------------------------------------------------------
* Context: softirq or process
*/
void oz_cdev_stop(struct oz_pd *pd, int pause)
{
struct oz_serial_ctx *ctx;
oz_event_log(OZ_EVT_SERVICE, 4, OZ_APPID_SERIAL, 0, pause);
if (pause) {
oz_trace("Serial service paused.\n");
return;
}
spin_lock_bh(&pd->app_lock[OZ_APPID_SERIAL-1]);
ctx = (struct oz_serial_ctx *)pd->app_ctx[OZ_APPID_SERIAL-1];
pd->app_ctx[OZ_APPID_SERIAL-1] = 0;
spin_unlock_bh(&pd->app_lock[OZ_APPID_SERIAL-1]);
if (ctx)
oz_cdev_release_ctx(ctx);
spin_lock(&g_cdev.lock);
if (pd == g_cdev.active_pd)
g_cdev.active_pd = 0;
else
pd = 0;
spin_unlock(&g_cdev.lock);
if (pd) {
oz_pd_put(pd);
oz_trace("Active PD departed.\n");
}
oz_trace("Serial service stopped.\n");
}
/*------------------------------------------------------------------------------
* Context: softirq-serialized
*/
void oz_cdev_rx(struct oz_pd *pd, struct oz_elt *elt)
{
struct oz_serial_ctx *ctx;
struct oz_app_hdr *app_hdr;
u8 *data;
int len;
int space;
int copy_sz;
int ix;
ctx = oz_cdev_claim_ctx(pd);
if (ctx == 0) {
oz_trace("Cannot claim serial context.\n");
return;
}
app_hdr = (struct oz_app_hdr *)(elt+1);
/* If sequence number is non-zero then check it is not a duplicate.
*/
if (app_hdr->elt_seq_num != 0) {
if (((ctx->rx_seq_num - app_hdr->elt_seq_num) & 0x80) == 0) {
/* Reject duplicate element. */
oz_trace("Duplicate element:%02x %02x\n",
app_hdr->elt_seq_num, ctx->rx_seq_num);
goto out;
}
}
ctx->rx_seq_num = app_hdr->elt_seq_num;
len = elt->length - sizeof(struct oz_app_hdr);
data = ((u8 *)(elt+1)) + sizeof(struct oz_app_hdr);
if (len <= 0)
goto out;
space = ctx->rd_out - ctx->rd_in - 1;
if (space < 0)
space += OZ_RD_BUF_SZ;
if (len > space) {
oz_trace("Not enough space:%d %d\n", len, space);
len = space;
}
ix = ctx->rd_in;
copy_sz = OZ_RD_BUF_SZ - ix;
if (copy_sz > len)
copy_sz = len;
memcpy(&ctx->rd_buf[ix], data, copy_sz);
len -= copy_sz;
ix += copy_sz;
if (ix == OZ_RD_BUF_SZ)
ix = 0;
if (len) {
memcpy(ctx->rd_buf, data+copy_sz, len);
ix = len;
}
ctx->rd_in = ix;
wake_up(&g_cdev.rdq);
out:
oz_cdev_release_ctx(ctx);
}
/*------------------------------------------------------------------------------
* Context: softirq
*/
void oz_cdev_heartbeat(struct oz_pd *pd)
{
}
| gpl-2.0 |
weritos666/ARCHOS_50_Platinum | arch/alpha/kernel/sys_titan.c | 4588 | 9605 | /*
* linux/arch/alpha/kernel/sys_titan.c
*
* Copyright (C) 1995 David A Rusling
* Copyright (C) 1996, 1999 Jay A Estabrook
* Copyright (C) 1998, 1999 Richard Henderson
* Copyright (C) 1999, 2000 Jeff Wiedemeier
*
* Code supporting TITAN systems (EV6+TITAN), currently:
* Privateer
* Falcon
* Granite
*/
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/mm.h>
#include <linux/sched.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/bitops.h>
#include <asm/ptrace.h>
#include <asm/dma.h>
#include <asm/irq.h>
#include <asm/mmu_context.h>
#include <asm/io.h>
#include <asm/pgtable.h>
#include <asm/core_titan.h>
#include <asm/hwrpb.h>
#include <asm/tlbflush.h>
#include "proto.h"
#include "irq_impl.h"
#include "pci_impl.h"
#include "machvec_impl.h"
#include "err_impl.h"
/*
* Titan generic
*/
/*
* Titan supports up to 4 CPUs
*/
static unsigned long titan_cpu_irq_affinity[4] = { ~0UL, ~0UL, ~0UL, ~0UL };
/*
* Mask is set (1) if enabled
*/
static unsigned long titan_cached_irq_mask;
/*
* Need SMP-safe access to interrupt CSRs
*/
DEFINE_SPINLOCK(titan_irq_lock);
static void
titan_update_irq_hw(unsigned long mask)
{
register titan_cchip *cchip = TITAN_cchip;
unsigned long isa_enable = 1UL << 55;
register int bcpu = boot_cpuid;
#ifdef CONFIG_SMP
cpumask_t cpm;
volatile unsigned long *dim0, *dim1, *dim2, *dim3;
unsigned long mask0, mask1, mask2, mask3, dummy;
cpumask_copy(&cpm, cpu_present_mask);
mask &= ~isa_enable;
mask0 = mask & titan_cpu_irq_affinity[0];
mask1 = mask & titan_cpu_irq_affinity[1];
mask2 = mask & titan_cpu_irq_affinity[2];
mask3 = mask & titan_cpu_irq_affinity[3];
if (bcpu == 0) mask0 |= isa_enable;
else if (bcpu == 1) mask1 |= isa_enable;
else if (bcpu == 2) mask2 |= isa_enable;
else mask3 |= isa_enable;
dim0 = &cchip->dim0.csr;
dim1 = &cchip->dim1.csr;
dim2 = &cchip->dim2.csr;
dim3 = &cchip->dim3.csr;
if (!cpumask_test_cpu(0, &cpm)) dim0 = &dummy;
if (!cpumask_test_cpu(1, &cpm)) dim1 = &dummy;
if (!cpumask_test_cpu(2, &cpm)) dim2 = &dummy;
if (!cpumask_test_cpu(3, &cpm)) dim3 = &dummy;
*dim0 = mask0;
*dim1 = mask1;
*dim2 = mask2;
*dim3 = mask3;
mb();
*dim0;
*dim1;
*dim2;
*dim3;
#else
volatile unsigned long *dimB;
dimB = &cchip->dim0.csr;
if (bcpu == 1) dimB = &cchip->dim1.csr;
else if (bcpu == 2) dimB = &cchip->dim2.csr;
else if (bcpu == 3) dimB = &cchip->dim3.csr;
*dimB = mask | isa_enable;
mb();
*dimB;
#endif
}
static inline void
titan_enable_irq(struct irq_data *d)
{
unsigned int irq = d->irq;
spin_lock(&titan_irq_lock);
titan_cached_irq_mask |= 1UL << (irq - 16);
titan_update_irq_hw(titan_cached_irq_mask);
spin_unlock(&titan_irq_lock);
}
static inline void
titan_disable_irq(struct irq_data *d)
{
unsigned int irq = d->irq;
spin_lock(&titan_irq_lock);
titan_cached_irq_mask &= ~(1UL << (irq - 16));
titan_update_irq_hw(titan_cached_irq_mask);
spin_unlock(&titan_irq_lock);
}
static void
titan_cpu_set_irq_affinity(unsigned int irq, cpumask_t affinity)
{
int cpu;
for (cpu = 0; cpu < 4; cpu++) {
if (cpumask_test_cpu(cpu, &affinity))
titan_cpu_irq_affinity[cpu] |= 1UL << irq;
else
titan_cpu_irq_affinity[cpu] &= ~(1UL << irq);
}
}
static int
titan_set_irq_affinity(struct irq_data *d, const struct cpumask *affinity,
bool force)
{
unsigned int irq = d->irq;
spin_lock(&titan_irq_lock);
titan_cpu_set_irq_affinity(irq - 16, *affinity);
titan_update_irq_hw(titan_cached_irq_mask);
spin_unlock(&titan_irq_lock);
return 0;
}
static void
titan_device_interrupt(unsigned long vector)
{
printk("titan_device_interrupt: NOT IMPLEMENTED YET!!\n");
}
static void
titan_srm_device_interrupt(unsigned long vector)
{
int irq;
irq = (vector - 0x800) >> 4;
handle_irq(irq);
}
static void __init
init_titan_irqs(struct irq_chip * ops, int imin, int imax)
{
long i;
for (i = imin; i <= imax; ++i) {
irq_set_chip_and_handler(i, ops, handle_level_irq);
irq_set_status_flags(i, IRQ_LEVEL);
}
}
static struct irq_chip titan_irq_type = {
.name = "TITAN",
.irq_unmask = titan_enable_irq,
.irq_mask = titan_disable_irq,
.irq_mask_ack = titan_disable_irq,
.irq_set_affinity = titan_set_irq_affinity,
};
static irqreturn_t
titan_intr_nop(int irq, void *dev_id)
{
/*
* This is a NOP interrupt handler for the purposes of
* event counting -- just return.
*/
return IRQ_HANDLED;
}
static void __init
titan_init_irq(void)
{
if (alpha_using_srm && !alpha_mv.device_interrupt)
alpha_mv.device_interrupt = titan_srm_device_interrupt;
if (!alpha_mv.device_interrupt)
alpha_mv.device_interrupt = titan_device_interrupt;
titan_update_irq_hw(0);
init_titan_irqs(&titan_irq_type, 16, 63 + 16);
}
static void __init
titan_legacy_init_irq(void)
{
/* init the legacy dma controller */
outb(0, DMA1_RESET_REG);
outb(0, DMA2_RESET_REG);
outb(DMA_MODE_CASCADE, DMA2_MODE_REG);
outb(0, DMA2_MASK_REG);
/* init the legacy irq controller */
init_i8259a_irqs();
/* init the titan irqs */
titan_init_irq();
}
void
titan_dispatch_irqs(u64 mask)
{
unsigned long vector;
/*
* Mask down to those interrupts which are enable on this processor
*/
mask &= titan_cpu_irq_affinity[smp_processor_id()];
/*
* Dispatch all requested interrupts
*/
while (mask) {
/* convert to SRM vector... priority is <63> -> <0> */
vector = 63 - __kernel_ctlz(mask);
mask &= ~(1UL << vector); /* clear it out */
vector = 0x900 + (vector << 4); /* convert to SRM vector */
/* dispatch it */
alpha_mv.device_interrupt(vector);
}
}
/*
* Titan Family
*/
static void __init
titan_request_irq(unsigned int irq, irq_handler_t handler,
unsigned long irqflags, const char *devname,
void *dev_id)
{
int err;
err = request_irq(irq, handler, irqflags, devname, dev_id);
if (err) {
printk("titan_request_irq for IRQ %d returned %d; ignoring\n",
irq, err);
}
}
static void __init
titan_late_init(void)
{
/*
* Enable the system error interrupts. These interrupts are
* all reported to the kernel as machine checks, so the handler
* is a nop so it can be called to count the individual events.
*/
titan_request_irq(63+16, titan_intr_nop, IRQF_DISABLED,
"CChip Error", NULL);
titan_request_irq(62+16, titan_intr_nop, IRQF_DISABLED,
"PChip 0 H_Error", NULL);
titan_request_irq(61+16, titan_intr_nop, IRQF_DISABLED,
"PChip 1 H_Error", NULL);
titan_request_irq(60+16, titan_intr_nop, IRQF_DISABLED,
"PChip 0 C_Error", NULL);
titan_request_irq(59+16, titan_intr_nop, IRQF_DISABLED,
"PChip 1 C_Error", NULL);
/*
* Register our error handlers.
*/
titan_register_error_handlers();
/*
* Check if the console left us any error logs.
*/
cdl_check_console_data_log();
}
static int __devinit
titan_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
u8 intline;
int irq;
/* Get the current intline. */
pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &intline);
irq = intline;
/* Is it explicitly routed through ISA? */
if ((irq & 0xF0) == 0xE0)
return irq;
/* Offset by 16 to make room for ISA interrupts 0 - 15. */
return irq + 16;
}
static void __init
titan_init_pci(void)
{
/*
* This isn't really the right place, but there's some init
* that needs to be done after everything is basically up.
*/
titan_late_init();
/* Indicate that we trust the console to configure things properly */
pci_set_flags(PCI_PROBE_ONLY);
common_init_pci();
SMC669_Init(0);
locate_and_init_vga(NULL);
}
/*
* Privateer
*/
static void __init
privateer_init_pci(void)
{
/*
* Hook a couple of extra err interrupts that the
* common titan code won't.
*/
titan_request_irq(53+16, titan_intr_nop, IRQF_DISABLED,
"NMI", NULL);
titan_request_irq(50+16, titan_intr_nop, IRQF_DISABLED,
"Temperature Warning", NULL);
/*
* Finish with the common version.
*/
return titan_init_pci();
}
/*
* The System Vectors.
*/
struct alpha_machine_vector titan_mv __initmv = {
.vector_name = "TITAN",
DO_EV6_MMU,
DO_DEFAULT_RTC,
DO_TITAN_IO,
.machine_check = titan_machine_check,
.max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS,
.min_io_address = DEFAULT_IO_BASE,
.min_mem_address = DEFAULT_MEM_BASE,
.pci_dac_offset = TITAN_DAC_OFFSET,
.nr_irqs = 80, /* 64 + 16 */
/* device_interrupt will be filled in by titan_init_irq */
.agp_info = titan_agp_info,
.init_arch = titan_init_arch,
.init_irq = titan_legacy_init_irq,
.init_rtc = common_init_rtc,
.init_pci = titan_init_pci,
.kill_arch = titan_kill_arch,
.pci_map_irq = titan_map_irq,
.pci_swizzle = common_swizzle,
};
ALIAS_MV(titan)
struct alpha_machine_vector privateer_mv __initmv = {
.vector_name = "PRIVATEER",
DO_EV6_MMU,
DO_DEFAULT_RTC,
DO_TITAN_IO,
.machine_check = privateer_machine_check,
.max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS,
.min_io_address = DEFAULT_IO_BASE,
.min_mem_address = DEFAULT_MEM_BASE,
.pci_dac_offset = TITAN_DAC_OFFSET,
.nr_irqs = 80, /* 64 + 16 */
/* device_interrupt will be filled in by titan_init_irq */
.agp_info = titan_agp_info,
.init_arch = titan_init_arch,
.init_irq = titan_legacy_init_irq,
.init_rtc = common_init_rtc,
.init_pci = privateer_init_pci,
.kill_arch = titan_kill_arch,
.pci_map_irq = titan_map_irq,
.pci_swizzle = common_swizzle,
};
/* No alpha_mv alias for privateer since we compile it
in unconditionally with titan; setup_arch knows how to cope. */
| gpl-2.0 |
hyuh/kernel-k2 | arch/m68k/platform/68328/ints.c | 4588 | 4291 | /*
* linux/arch/m68knommu/platform/68328/ints.c
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file COPYING in the main directory of this archive
* for more details.
*
* Copyright 1996 Roman Zippel
* Copyright 1999 D. Jeff Dionne <jeff@rt-control.com>
*/
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <asm/traps.h>
#include <asm/io.h>
#include <asm/machdep.h>
#if defined(CONFIG_M68328)
#include <asm/MC68328.h>
#elif defined(CONFIG_M68EZ328)
#include <asm/MC68EZ328.h>
#elif defined(CONFIG_M68VZ328)
#include <asm/MC68VZ328.h>
#endif
/* assembler routines */
asmlinkage void system_call(void);
asmlinkage void buserr(void);
asmlinkage void trap(void);
asmlinkage void trap3(void);
asmlinkage void trap4(void);
asmlinkage void trap5(void);
asmlinkage void trap6(void);
asmlinkage void trap7(void);
asmlinkage void trap8(void);
asmlinkage void trap9(void);
asmlinkage void trap10(void);
asmlinkage void trap11(void);
asmlinkage void trap12(void);
asmlinkage void trap13(void);
asmlinkage void trap14(void);
asmlinkage void trap15(void);
asmlinkage void trap33(void);
asmlinkage void trap34(void);
asmlinkage void trap35(void);
asmlinkage void trap36(void);
asmlinkage void trap37(void);
asmlinkage void trap38(void);
asmlinkage void trap39(void);
asmlinkage void trap40(void);
asmlinkage void trap41(void);
asmlinkage void trap42(void);
asmlinkage void trap43(void);
asmlinkage void trap44(void);
asmlinkage void trap45(void);
asmlinkage void trap46(void);
asmlinkage void trap47(void);
asmlinkage irqreturn_t bad_interrupt(int, void *);
asmlinkage irqreturn_t inthandler(void);
asmlinkage irqreturn_t inthandler1(void);
asmlinkage irqreturn_t inthandler2(void);
asmlinkage irqreturn_t inthandler3(void);
asmlinkage irqreturn_t inthandler4(void);
asmlinkage irqreturn_t inthandler5(void);
asmlinkage irqreturn_t inthandler6(void);
asmlinkage irqreturn_t inthandler7(void);
/* The 68k family did not have a good way to determine the source
* of interrupts until later in the family. The EC000 core does
* not provide the vector number on the stack, we vector everything
* into one vector and look in the blasted mask register...
* This code is designed to be fast, almost constant time, not clean!
*/
void process_int(int vec, struct pt_regs *fp)
{
int irq;
int mask;
unsigned long pend = ISR;
while (pend) {
if (pend & 0x0000ffff) {
if (pend & 0x000000ff) {
if (pend & 0x0000000f) {
mask = 0x00000001;
irq = 0;
} else {
mask = 0x00000010;
irq = 4;
}
} else {
if (pend & 0x00000f00) {
mask = 0x00000100;
irq = 8;
} else {
mask = 0x00001000;
irq = 12;
}
}
} else {
if (pend & 0x00ff0000) {
if (pend & 0x000f0000) {
mask = 0x00010000;
irq = 16;
} else {
mask = 0x00100000;
irq = 20;
}
} else {
if (pend & 0x0f000000) {
mask = 0x01000000;
irq = 24;
} else {
mask = 0x10000000;
irq = 28;
}
}
}
while (! (mask & pend)) {
mask <<=1;
irq++;
}
do_IRQ(irq, fp);
pend &= ~mask;
}
}
static void intc_irq_unmask(struct irq_data *d)
{
IMR &= ~(1 << d->irq);
}
static void intc_irq_mask(struct irq_data *d)
{
IMR |= (1 << d->irq);
}
static struct irq_chip intc_irq_chip = {
.name = "M68K-INTC",
.irq_mask = intc_irq_mask,
.irq_unmask = intc_irq_unmask,
};
/*
* This function should be called during kernel startup to initialize
* the machine vector table.
*/
void __init trap_init(void)
{
int i;
/* set up the vectors */
for (i = 72; i < 256; ++i)
_ramvec[i] = (e_vector) bad_interrupt;
_ramvec[32] = system_call;
_ramvec[65] = (e_vector) inthandler1;
_ramvec[66] = (e_vector) inthandler2;
_ramvec[67] = (e_vector) inthandler3;
_ramvec[68] = (e_vector) inthandler4;
_ramvec[69] = (e_vector) inthandler5;
_ramvec[70] = (e_vector) inthandler6;
_ramvec[71] = (e_vector) inthandler7;
}
void __init init_IRQ(void)
{
int i;
IVR = 0x40; /* Set DragonBall IVR (interrupt base) to 64 */
/* turn off all interrupts */
IMR = ~0;
for (i = 0; (i < NR_IRQS); i++) {
irq_set_chip(i, &intc_irq_chip);
irq_set_handler(i, handle_level_irq);
}
}
| gpl-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.